use of org.apache.htrace.core.TraceScope in project hadoop by apache.
the class DataStreamer method processDatanodeOrExternalError.
/**
* If this stream has encountered any errors, shutdown threads
* and mark the stream as closed.
*
* @return true if it should sleep for a while after returning.
*/
private boolean processDatanodeOrExternalError() throws IOException {
if (!errorState.hasDatanodeError() && !shouldHandleExternalError()) {
return false;
}
LOG.debug("start process datanode/external error, {}", this);
if (response != null) {
LOG.info("Error Recovery for " + block + " waiting for responder to exit. ");
return true;
}
closeStream();
// move packets from ack queue to front of the data queue
synchronized (dataQueue) {
dataQueue.addAll(0, ackQueue);
ackQueue.clear();
packetSendTime.clear();
}
// during transmission.
if (!errorState.isRestartingNode() && ++pipelineRecoveryCount > 5) {
LOG.warn("Error recovering pipeline for writing " + block + ". Already retried 5 times for the same packet.");
lastException.set(new IOException("Failing write. Tried pipeline " + "recovery 5 times without success."));
streamerClosed = true;
return false;
}
setupPipelineForAppendOrRecovery();
if (!streamerClosed && dfsClient.clientRunning) {
if (stage == BlockConstructionStage.PIPELINE_CLOSE) {
// a client waiting on close() will be aware that the flush finished.
synchronized (dataQueue) {
// remove the end of block packet
DFSPacket endOfBlockPacket = dataQueue.remove();
// Close any trace span associated with this Packet
TraceScope scope = endOfBlockPacket.getTraceScope();
if (scope != null) {
scope.reattach();
scope.close();
endOfBlockPacket.setTraceScope(null);
}
assert endOfBlockPacket.isLastPacketInBlock();
assert lastAckedSeqno == endOfBlockPacket.getSeqno() - 1;
lastAckedSeqno = endOfBlockPacket.getSeqno();
pipelineRecoveryCount = 0;
dataQueue.notifyAll();
}
endBlock();
} else {
initDataStreaming();
}
}
return false;
}
use of org.apache.htrace.core.TraceScope in project hadoop by apache.
the class DFSStripedOutputStream method closeImpl.
@Override
protected synchronized void closeImpl() throws IOException {
if (isClosed()) {
final MultipleIOException.Builder b = new MultipleIOException.Builder();
for (int i = 0; i < streamers.size(); i++) {
final StripedDataStreamer si = getStripedDataStreamer(i);
try {
si.getLastException().check(true);
} catch (IOException e) {
b.add(e);
}
}
final IOException ioe = b.build();
if (ioe != null) {
throw ioe;
}
return;
}
try {
try {
// flush from all upper layers
flushBuffer();
// if the last stripe is incomplete, generate and write parity cells
if (generateParityCellsForLastStripe()) {
writeParityCells();
}
enqueueAllCurrentPackets();
// flush all the data packets
flushAllInternals();
// check failures
checkStreamerFailures();
for (int i = 0; i < numAllBlocks; i++) {
final StripedDataStreamer s = setCurrentStreamer(i);
if (s.isHealthy()) {
try {
if (s.getBytesCurBlock() > 0) {
setCurrentPacketToEmpty();
}
// flush the last "close" packet to Datanode
flushInternal();
} catch (Exception e) {
// TODO for both close and endBlock, we currently do not handle
// failures when sending the last packet. We actually do not need to
// bump GS for this kind of failure. Thus counting the total number
// of failures may be good enough.
}
}
}
} finally {
// Failures may happen when flushing data/parity data out. Exceptions
// may be thrown if more than 3 streamers fail, or updatePipeline RPC
// fails. Streamers may keep waiting for the new block/GS information.
// Thus need to force closing these threads.
closeThreads(true);
}
try (TraceScope ignored = dfsClient.getTracer().newScope("completeFile")) {
completeFile(currentBlockGroup);
}
logCorruptBlocks();
} catch (ClosedChannelException ignored) {
} finally {
setClosed();
// shutdown executor of flushAll tasks
flushAllExecutor.shutdownNow();
}
}
use of org.apache.htrace.core.TraceScope in project hadoop by apache.
the class Receiver method opRequestShortCircuitFds.
/** Receive {@link Op#REQUEST_SHORT_CIRCUIT_FDS} */
private void opRequestShortCircuitFds(DataInputStream in) throws IOException {
final OpRequestShortCircuitAccessProto proto = OpRequestShortCircuitAccessProto.parseFrom(vintPrefixed(in));
SlotId slotId = (proto.hasSlotId()) ? PBHelperClient.convert(proto.getSlotId()) : null;
TraceScope traceScope = continueTraceSpan(proto.getHeader(), proto.getClass().getSimpleName());
try {
requestShortCircuitFds(PBHelperClient.convert(proto.getHeader().getBlock()), PBHelperClient.convert(proto.getHeader().getToken()), slotId, proto.getMaxVersion(), proto.getSupportsReceiptVerification());
} finally {
if (traceScope != null)
traceScope.close();
}
}
use of org.apache.htrace.core.TraceScope in project hadoop by apache.
the class Receiver method opStripedBlockChecksum.
/** Receive OP_STRIPED_BLOCK_CHECKSUM. */
private void opStripedBlockChecksum(DataInputStream dis) throws IOException {
OpBlockGroupChecksumProto proto = OpBlockGroupChecksumProto.parseFrom(vintPrefixed(dis));
TraceScope traceScope = continueTraceSpan(proto.getHeader(), proto.getClass().getSimpleName());
StripedBlockInfo stripedBlockInfo = new StripedBlockInfo(PBHelperClient.convert(proto.getHeader().getBlock()), PBHelperClient.convert(proto.getDatanodes()), PBHelperClient.convertTokens(proto.getBlockTokensList()), PBHelperClient.convertBlockIndices(proto.getBlockIndicesList()), PBHelperClient.convertErasureCodingPolicy(proto.getEcPolicy()));
try {
blockGroupChecksum(stripedBlockInfo, PBHelperClient.convert(proto.getHeader().getToken()), proto.getRequestedNumBytes());
} finally {
if (traceScope != null) {
traceScope.close();
}
}
}
use of org.apache.htrace.core.TraceScope in project hadoop by apache.
the class Receiver method continueTraceSpan.
private TraceScope continueTraceSpan(DataTransferTraceInfoProto proto, String description) {
TraceScope scope = null;
SpanId spanId = fromProto(proto);
if (spanId != null) {
scope = tracer.newScope(description, spanId);
}
return scope;
}
Aggregations