use of org.apache.htrace.core.TraceScope in project hadoop by apache.
the class DFSOutputStream method newStreamForAppend.
static DFSOutputStream newStreamForAppend(DFSClient dfsClient, String src, EnumSet<CreateFlag> flags, Progressable progress, LocatedBlock lastBlock, HdfsFileStatus stat, DataChecksum checksum, String[] favoredNodes) throws IOException {
if (stat.getErasureCodingPolicy() != null) {
throw new IOException("Not support appending to a striping layout file yet.");
}
try (TraceScope ignored = dfsClient.newPathTraceScope("newStreamForAppend", src)) {
final DFSOutputStream out = new DFSOutputStream(dfsClient, src, flags, progress, lastBlock, stat, checksum, favoredNodes);
out.start();
return out;
}
}
use of org.apache.htrace.core.TraceScope in project hadoop by apache.
the class DataStreamer method waitForAckedSeqno.
/**
* wait for the ack of seqno
*
* @param seqno the sequence number to be acked
* @throws IOException
*/
void waitForAckedSeqno(long seqno) throws IOException {
try (TraceScope ignored = dfsClient.getTracer().newScope("waitForAckedSeqno")) {
LOG.debug("{} waiting for ack for: {}", this, seqno);
long begin = Time.monotonicNow();
try {
synchronized (dataQueue) {
while (!streamerClosed) {
checkClosed();
if (lastAckedSeqno >= seqno) {
break;
}
try {
// when we receive an ack, we notify on
dataQueue.wait(1000);
// dataQueue
} catch (InterruptedException ie) {
throw new InterruptedIOException("Interrupted while waiting for data to be acknowledged by pipeline");
}
}
}
checkClosed();
} catch (ClosedChannelException cce) {
}
long duration = Time.monotonicNow() - begin;
if (duration > dfsclientSlowLogThresholdMs) {
LOG.warn("Slow waitForAckedSeqno took {}ms (threshold={}ms). File being" + " written: {}, block: {}, Write pipeline datanodes: {}.", duration, dfsclientSlowLogThresholdMs, src, block, nodes);
}
}
}
use of org.apache.htrace.core.TraceScope in project hadoop by apache.
the class DataStreamer method processDatanodeOrExternalError.
/**
* If this stream has encountered any errors, shutdown threads
* and mark the stream as closed.
*
* @return true if it should sleep for a while after returning.
*/
private boolean processDatanodeOrExternalError() throws IOException {
if (!errorState.hasDatanodeError() && !shouldHandleExternalError()) {
return false;
}
LOG.debug("start process datanode/external error, {}", this);
if (response != null) {
LOG.info("Error Recovery for " + block + " waiting for responder to exit. ");
return true;
}
closeStream();
// move packets from ack queue to front of the data queue
synchronized (dataQueue) {
dataQueue.addAll(0, ackQueue);
ackQueue.clear();
packetSendTime.clear();
}
// during transmission.
if (!errorState.isRestartingNode() && ++pipelineRecoveryCount > 5) {
LOG.warn("Error recovering pipeline for writing " + block + ". Already retried 5 times for the same packet.");
lastException.set(new IOException("Failing write. Tried pipeline " + "recovery 5 times without success."));
streamerClosed = true;
return false;
}
setupPipelineForAppendOrRecovery();
if (!streamerClosed && dfsClient.clientRunning) {
if (stage == BlockConstructionStage.PIPELINE_CLOSE) {
// a client waiting on close() will be aware that the flush finished.
synchronized (dataQueue) {
// remove the end of block packet
DFSPacket endOfBlockPacket = dataQueue.remove();
// Close any trace span associated with this Packet
TraceScope scope = endOfBlockPacket.getTraceScope();
if (scope != null) {
scope.reattach();
scope.close();
endOfBlockPacket.setTraceScope(null);
}
assert endOfBlockPacket.isLastPacketInBlock();
assert lastAckedSeqno == endOfBlockPacket.getSeqno() - 1;
lastAckedSeqno = endOfBlockPacket.getSeqno();
pipelineRecoveryCount = 0;
dataQueue.notifyAll();
}
endBlock();
} else {
initDataStreaming();
}
}
return false;
}
use of org.apache.htrace.core.TraceScope in project hadoop by apache.
the class DFSStripedOutputStream method closeImpl.
@Override
protected synchronized void closeImpl() throws IOException {
if (isClosed()) {
final MultipleIOException.Builder b = new MultipleIOException.Builder();
for (int i = 0; i < streamers.size(); i++) {
final StripedDataStreamer si = getStripedDataStreamer(i);
try {
si.getLastException().check(true);
} catch (IOException e) {
b.add(e);
}
}
final IOException ioe = b.build();
if (ioe != null) {
throw ioe;
}
return;
}
try {
try {
// flush from all upper layers
flushBuffer();
// if the last stripe is incomplete, generate and write parity cells
if (generateParityCellsForLastStripe()) {
writeParityCells();
}
enqueueAllCurrentPackets();
// flush all the data packets
flushAllInternals();
// check failures
checkStreamerFailures();
for (int i = 0; i < numAllBlocks; i++) {
final StripedDataStreamer s = setCurrentStreamer(i);
if (s.isHealthy()) {
try {
if (s.getBytesCurBlock() > 0) {
setCurrentPacketToEmpty();
}
// flush the last "close" packet to Datanode
flushInternal();
} catch (Exception e) {
// TODO for both close and endBlock, we currently do not handle
// failures when sending the last packet. We actually do not need to
// bump GS for this kind of failure. Thus counting the total number
// of failures may be good enough.
}
}
}
} finally {
// Failures may happen when flushing data/parity data out. Exceptions
// may be thrown if more than 3 streamers fail, or updatePipeline RPC
// fails. Streamers may keep waiting for the new block/GS information.
// Thus need to force closing these threads.
closeThreads(true);
}
try (TraceScope ignored = dfsClient.getTracer().newScope("completeFile")) {
completeFile(currentBlockGroup);
}
logCorruptBlocks();
} catch (ClosedChannelException ignored) {
} finally {
setClosed();
// shutdown executor of flushAll tasks
flushAllExecutor.shutdownNow();
}
}
use of org.apache.htrace.core.TraceScope in project hadoop by apache.
the class Receiver method opRequestShortCircuitFds.
/** Receive {@link Op#REQUEST_SHORT_CIRCUIT_FDS} */
private void opRequestShortCircuitFds(DataInputStream in) throws IOException {
final OpRequestShortCircuitAccessProto proto = OpRequestShortCircuitAccessProto.parseFrom(vintPrefixed(in));
SlotId slotId = (proto.hasSlotId()) ? PBHelperClient.convert(proto.getSlotId()) : null;
TraceScope traceScope = continueTraceSpan(proto.getHeader(), proto.getClass().getSimpleName());
try {
requestShortCircuitFds(PBHelperClient.convert(proto.getHeader().getBlock()), PBHelperClient.convert(proto.getHeader().getToken()), slotId, proto.getMaxVersion(), proto.getSupportsReceiptVerification());
} finally {
if (traceScope != null)
traceScope.close();
}
}
Aggregations