Search in sources :

Example 16 with TraceScope

use of org.apache.htrace.core.TraceScope in project hadoop by apache.

the class DFSOutputStream method newStreamForAppend.

static DFSOutputStream newStreamForAppend(DFSClient dfsClient, String src, EnumSet<CreateFlag> flags, Progressable progress, LocatedBlock lastBlock, HdfsFileStatus stat, DataChecksum checksum, String[] favoredNodes) throws IOException {
    if (stat.getErasureCodingPolicy() != null) {
        throw new IOException("Not support appending to a striping layout file yet.");
    }
    try (TraceScope ignored = dfsClient.newPathTraceScope("newStreamForAppend", src)) {
        final DFSOutputStream out = new DFSOutputStream(dfsClient, src, flags, progress, lastBlock, stat, checksum, favoredNodes);
        out.start();
        return out;
    }
}
Also used : TraceScope(org.apache.htrace.core.TraceScope) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) MultipleIOException(org.apache.hadoop.io.MultipleIOException)

Example 17 with TraceScope

use of org.apache.htrace.core.TraceScope in project hadoop by apache.

the class DataStreamer method waitForAckedSeqno.

/**
   * wait for the ack of seqno
   *
   * @param seqno the sequence number to be acked
   * @throws IOException
   */
void waitForAckedSeqno(long seqno) throws IOException {
    try (TraceScope ignored = dfsClient.getTracer().newScope("waitForAckedSeqno")) {
        LOG.debug("{} waiting for ack for: {}", this, seqno);
        long begin = Time.monotonicNow();
        try {
            synchronized (dataQueue) {
                while (!streamerClosed) {
                    checkClosed();
                    if (lastAckedSeqno >= seqno) {
                        break;
                    }
                    try {
                        // when we receive an ack, we notify on
                        dataQueue.wait(1000);
                    // dataQueue
                    } catch (InterruptedException ie) {
                        throw new InterruptedIOException("Interrupted while waiting for data to be acknowledged by pipeline");
                    }
                }
            }
            checkClosed();
        } catch (ClosedChannelException cce) {
        }
        long duration = Time.monotonicNow() - begin;
        if (duration > dfsclientSlowLogThresholdMs) {
            LOG.warn("Slow waitForAckedSeqno took {}ms (threshold={}ms). File being" + " written: {}, block: {}, Write pipeline datanodes: {}.", duration, dfsclientSlowLogThresholdMs, src, block, nodes);
        }
    }
}
Also used : InterruptedIOException(java.io.InterruptedIOException) ClosedChannelException(java.nio.channels.ClosedChannelException) TraceScope(org.apache.htrace.core.TraceScope)

Example 18 with TraceScope

use of org.apache.htrace.core.TraceScope in project hadoop by apache.

the class DataStreamer method processDatanodeOrExternalError.

/**
   * If this stream has encountered any errors, shutdown threads
   * and mark the stream as closed.
   *
   * @return true if it should sleep for a while after returning.
   */
private boolean processDatanodeOrExternalError() throws IOException {
    if (!errorState.hasDatanodeError() && !shouldHandleExternalError()) {
        return false;
    }
    LOG.debug("start process datanode/external error, {}", this);
    if (response != null) {
        LOG.info("Error Recovery for " + block + " waiting for responder to exit. ");
        return true;
    }
    closeStream();
    // move packets from ack queue to front of the data queue
    synchronized (dataQueue) {
        dataQueue.addAll(0, ackQueue);
        ackQueue.clear();
        packetSendTime.clear();
    }
    // during transmission.
    if (!errorState.isRestartingNode() && ++pipelineRecoveryCount > 5) {
        LOG.warn("Error recovering pipeline for writing " + block + ". Already retried 5 times for the same packet.");
        lastException.set(new IOException("Failing write. Tried pipeline " + "recovery 5 times without success."));
        streamerClosed = true;
        return false;
    }
    setupPipelineForAppendOrRecovery();
    if (!streamerClosed && dfsClient.clientRunning) {
        if (stage == BlockConstructionStage.PIPELINE_CLOSE) {
            // a client waiting on close() will be aware that the flush finished.
            synchronized (dataQueue) {
                // remove the end of block packet
                DFSPacket endOfBlockPacket = dataQueue.remove();
                // Close any trace span associated with this Packet
                TraceScope scope = endOfBlockPacket.getTraceScope();
                if (scope != null) {
                    scope.reattach();
                    scope.close();
                    endOfBlockPacket.setTraceScope(null);
                }
                assert endOfBlockPacket.isLastPacketInBlock();
                assert lastAckedSeqno == endOfBlockPacket.getSeqno() - 1;
                lastAckedSeqno = endOfBlockPacket.getSeqno();
                pipelineRecoveryCount = 0;
                dataQueue.notifyAll();
            }
            endBlock();
        } else {
            initDataStreaming();
        }
    }
    return false;
}
Also used : TraceScope(org.apache.htrace.core.TraceScope) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) MultipleIOException(org.apache.hadoop.io.MultipleIOException)

Example 19 with TraceScope

use of org.apache.htrace.core.TraceScope in project hadoop by apache.

the class DFSStripedOutputStream method closeImpl.

@Override
protected synchronized void closeImpl() throws IOException {
    if (isClosed()) {
        final MultipleIOException.Builder b = new MultipleIOException.Builder();
        for (int i = 0; i < streamers.size(); i++) {
            final StripedDataStreamer si = getStripedDataStreamer(i);
            try {
                si.getLastException().check(true);
            } catch (IOException e) {
                b.add(e);
            }
        }
        final IOException ioe = b.build();
        if (ioe != null) {
            throw ioe;
        }
        return;
    }
    try {
        try {
            // flush from all upper layers
            flushBuffer();
            // if the last stripe is incomplete, generate and write parity cells
            if (generateParityCellsForLastStripe()) {
                writeParityCells();
            }
            enqueueAllCurrentPackets();
            // flush all the data packets
            flushAllInternals();
            // check failures
            checkStreamerFailures();
            for (int i = 0; i < numAllBlocks; i++) {
                final StripedDataStreamer s = setCurrentStreamer(i);
                if (s.isHealthy()) {
                    try {
                        if (s.getBytesCurBlock() > 0) {
                            setCurrentPacketToEmpty();
                        }
                        // flush the last "close" packet to Datanode
                        flushInternal();
                    } catch (Exception e) {
                    // TODO for both close and endBlock, we currently do not handle
                    // failures when sending the last packet. We actually do not need to
                    // bump GS for this kind of failure. Thus counting the total number
                    // of failures may be good enough.
                    }
                }
            }
        } finally {
            // Failures may happen when flushing data/parity data out. Exceptions
            // may be thrown if more than 3 streamers fail, or updatePipeline RPC
            // fails. Streamers may keep waiting for the new block/GS information.
            // Thus need to force closing these threads.
            closeThreads(true);
        }
        try (TraceScope ignored = dfsClient.getTracer().newScope("completeFile")) {
            completeFile(currentBlockGroup);
        }
        logCorruptBlocks();
    } catch (ClosedChannelException ignored) {
    } finally {
        setClosed();
        // shutdown executor of flushAll tasks
        flushAllExecutor.shutdownNow();
    }
}
Also used : ClosedChannelException(java.nio.channels.ClosedChannelException) DatanodeInfoBuilder(org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder) TraceScope(org.apache.htrace.core.TraceScope) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) MultipleIOException(org.apache.hadoop.io.MultipleIOException) MultipleIOException(org.apache.hadoop.io.MultipleIOException) InterruptedIOException(java.io.InterruptedIOException) HadoopIllegalArgumentException(org.apache.hadoop.HadoopIllegalArgumentException) ClosedChannelException(java.nio.channels.ClosedChannelException) IOException(java.io.IOException) ExecutionException(java.util.concurrent.ExecutionException) MultipleIOException(org.apache.hadoop.io.MultipleIOException)

Example 20 with TraceScope

use of org.apache.htrace.core.TraceScope in project hadoop by apache.

the class Receiver method opRequestShortCircuitFds.

/** Receive {@link Op#REQUEST_SHORT_CIRCUIT_FDS} */
private void opRequestShortCircuitFds(DataInputStream in) throws IOException {
    final OpRequestShortCircuitAccessProto proto = OpRequestShortCircuitAccessProto.parseFrom(vintPrefixed(in));
    SlotId slotId = (proto.hasSlotId()) ? PBHelperClient.convert(proto.getSlotId()) : null;
    TraceScope traceScope = continueTraceSpan(proto.getHeader(), proto.getClass().getSimpleName());
    try {
        requestShortCircuitFds(PBHelperClient.convert(proto.getHeader().getBlock()), PBHelperClient.convert(proto.getHeader().getToken()), slotId, proto.getMaxVersion(), proto.getSupportsReceiptVerification());
    } finally {
        if (traceScope != null)
            traceScope.close();
    }
}
Also used : SlotId(org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm.SlotId) TraceScope(org.apache.htrace.core.TraceScope) OpRequestShortCircuitAccessProto(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpRequestShortCircuitAccessProto)

Aggregations

TraceScope (org.apache.htrace.core.TraceScope)54 IOException (java.io.IOException)11 InterruptedIOException (java.io.InterruptedIOException)7 MultipleIOException (org.apache.hadoop.io.MultipleIOException)6 RemoteException (org.apache.hadoop.ipc.RemoteException)5 FileNotFoundException (java.io.FileNotFoundException)4 SnapshotAccessControlException (org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException)4 UnresolvedPathException (org.apache.hadoop.hdfs.protocol.UnresolvedPathException)4 AccessControlException (org.apache.hadoop.security.AccessControlException)4 ClosedChannelException (java.nio.channels.ClosedChannelException)3 FileAlreadyExistsException (org.apache.hadoop.fs.FileAlreadyExistsException)3 ParentNotDirectoryException (org.apache.hadoop.fs.ParentNotDirectoryException)3 DSQuotaExceededException (org.apache.hadoop.hdfs.protocol.DSQuotaExceededException)3 NSQuotaExceededException (org.apache.hadoop.hdfs.protocol.NSQuotaExceededException)3 QuotaByStorageTypeExceededException (org.apache.hadoop.hdfs.protocol.QuotaByStorageTypeExceededException)3 Tracer (org.apache.htrace.core.Tracer)3 ByteBuffer (java.nio.ByteBuffer)2 List (java.util.List)2 EventBatch (org.apache.hadoop.hdfs.inotify.EventBatch)2 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)2