Search in sources :

Example 21 with TraceScope

use of org.apache.htrace.core.TraceScope in project hadoop by apache.

the class DFSInputStream method read.

@Override
public synchronized int read(final ByteBuffer buf) throws IOException {
    ReaderStrategy byteBufferReader = new ByteBufferStrategy(buf, readStatistics, dfsClient);
    int reqLen = buf.remaining();
    try (TraceScope scope = dfsClient.newReaderTraceScope("DFSInputStream#byteBufferRead", src, getPos(), reqLen)) {
        int retLen = readWithStrategy(byteBufferReader);
        if (retLen < reqLen) {
            dfsClient.addRetLenToReaderScope(scope, retLen);
        }
        return retLen;
    }
}
Also used : TraceScope(org.apache.htrace.core.TraceScope)

Example 22 with TraceScope

use of org.apache.htrace.core.TraceScope in project hadoop by apache.

the class DFSInputStream method read.

/**
   * Read bytes starting from the specified position.
   *
   * @param position start read from this position
   * @param buffer read buffer
   * @param offset offset into buffer
   * @param length number of bytes to read
   *
   * @return actual number of bytes read
   */
@Override
public int read(long position, byte[] buffer, int offset, int length) throws IOException {
    validatePositionedReadArgs(position, buffer, offset, length);
    if (length == 0) {
        return 0;
    }
    try (TraceScope scope = dfsClient.newReaderTraceScope("DFSInputStream#byteArrayPread", src, position, length)) {
        ByteBuffer bb = ByteBuffer.wrap(buffer, offset, length);
        int retLen = pread(position, bb);
        if (retLen < length) {
            dfsClient.addRetLenToReaderScope(scope, retLen);
        }
        return retLen;
    }
}
Also used : TraceScope(org.apache.htrace.core.TraceScope) ByteBuffer(java.nio.ByteBuffer)

Example 23 with TraceScope

use of org.apache.htrace.core.TraceScope in project hadoop by apache.

the class DataStreamer method run.

/*
   * streamer thread is the only thread that opens streams to datanode,
   * and closes them. Any error recovery is also done by this thread.
   */
@Override
public void run() {
    long lastPacket = Time.monotonicNow();
    TraceScope scope = null;
    while (!streamerClosed && dfsClient.clientRunning) {
        // if the Responder encountered an error, shutdown Responder
        if (errorState.hasError()) {
            closeResponder();
        }
        DFSPacket one;
        try {
            // process datanode IO errors if any
            boolean doSleep = processDatanodeOrExternalError();
            final int halfSocketTimeout = dfsClient.getConf().getSocketTimeout() / 2;
            synchronized (dataQueue) {
                // wait for a packet to be sent.
                long now = Time.monotonicNow();
                while ((!shouldStop() && dataQueue.size() == 0 && (stage != BlockConstructionStage.DATA_STREAMING || stage == BlockConstructionStage.DATA_STREAMING && now - lastPacket < halfSocketTimeout)) || doSleep) {
                    long timeout = halfSocketTimeout - (now - lastPacket);
                    timeout = timeout <= 0 ? 1000 : timeout;
                    timeout = (stage == BlockConstructionStage.DATA_STREAMING) ? timeout : 1000;
                    try {
                        dataQueue.wait(timeout);
                    } catch (InterruptedException e) {
                        LOG.warn("Caught exception", e);
                    }
                    doSleep = false;
                    now = Time.monotonicNow();
                }
                if (shouldStop()) {
                    continue;
                }
                // get packet to be sent.
                if (dataQueue.isEmpty()) {
                    one = createHeartbeatPacket();
                } else {
                    try {
                        backOffIfNecessary();
                    } catch (InterruptedException e) {
                        LOG.warn("Caught exception", e);
                    }
                    // regular data packet
                    one = dataQueue.getFirst();
                    SpanId[] parents = one.getTraceParents();
                    if (parents.length > 0) {
                        scope = dfsClient.getTracer().newScope("dataStreamer", parents[0]);
                        scope.getSpan().setParents(parents);
                    }
                }
            }
            // get new block from namenode.
            if (LOG.isDebugEnabled()) {
                LOG.debug("stage=" + stage + ", " + this);
            }
            if (stage == BlockConstructionStage.PIPELINE_SETUP_CREATE) {
                LOG.debug("Allocating new block: {}", this);
                setPipeline(nextBlockOutputStream());
                initDataStreaming();
            } else if (stage == BlockConstructionStage.PIPELINE_SETUP_APPEND) {
                LOG.debug("Append to block {}", block);
                setupPipelineForAppendOrRecovery();
                if (streamerClosed) {
                    continue;
                }
                initDataStreaming();
            }
            long lastByteOffsetInBlock = one.getLastByteOffsetBlock();
            if (lastByteOffsetInBlock > stat.getBlockSize()) {
                throw new IOException("BlockSize " + stat.getBlockSize() + " < lastByteOffsetInBlock, " + this + ", " + one);
            }
            if (one.isLastPacketInBlock()) {
                // wait for all data packets have been successfully acked
                synchronized (dataQueue) {
                    while (!shouldStop() && ackQueue.size() != 0) {
                        try {
                            // wait for acks to arrive from datanodes
                            dataQueue.wait(1000);
                        } catch (InterruptedException e) {
                            LOG.warn("Caught exception", e);
                        }
                    }
                }
                if (shouldStop()) {
                    continue;
                }
                stage = BlockConstructionStage.PIPELINE_CLOSE;
            }
            // send the packet
            SpanId spanId = SpanId.INVALID;
            synchronized (dataQueue) {
                // move packet from dataQueue to ackQueue
                if (!one.isHeartbeatPacket()) {
                    if (scope != null) {
                        spanId = scope.getSpanId();
                        scope.detach();
                        one.setTraceScope(scope);
                    }
                    scope = null;
                    dataQueue.removeFirst();
                    ackQueue.addLast(one);
                    packetSendTime.put(one.getSeqno(), Time.monotonicNow());
                    dataQueue.notifyAll();
                }
            }
            LOG.debug("{} sending {}", this, one);
            // write out data to remote datanode
            try (TraceScope ignored = dfsClient.getTracer().newScope("DataStreamer#writeTo", spanId)) {
                one.writeTo(blockStream);
                blockStream.flush();
            } catch (IOException e) {
                // HDFS-3398 treat primary DN is down since client is unable to
                // write to primary DN. If a failed or restarting node has already
                // been recorded by the responder, the following call will have no
                // effect. Pipeline recovery can handle only one node error at a
                // time. If the primary node fails again during the recovery, it
                // will be taken out then.
                errorState.markFirstNodeIfNotMarked();
                throw e;
            }
            lastPacket = Time.monotonicNow();
            // update bytesSent
            long tmpBytesSent = one.getLastByteOffsetBlock();
            if (bytesSent < tmpBytesSent) {
                bytesSent = tmpBytesSent;
            }
            if (shouldStop()) {
                continue;
            }
            // Is this block full?
            if (one.isLastPacketInBlock()) {
                // wait for the close packet has been acked
                synchronized (dataQueue) {
                    while (!shouldStop() && ackQueue.size() != 0) {
                        // wait for acks to arrive from datanodes
                        dataQueue.wait(1000);
                    }
                }
                if (shouldStop()) {
                    continue;
                }
                endBlock();
            }
            if (progress != null) {
                progress.progress();
            }
            // This is used by unit test to trigger race conditions.
            if (artificialSlowdown != 0 && dfsClient.clientRunning) {
                Thread.sleep(artificialSlowdown);
            }
        } catch (Throwable e) {
            // Log warning if there was a real error.
            if (!errorState.isRestartingNode()) {
                // log a verbose stack-trace WARN for quota exceptions.
                if (e instanceof QuotaExceededException) {
                    LOG.debug("DataStreamer Quota Exception", e);
                } else {
                    LOG.warn("DataStreamer Exception", e);
                }
            }
            lastException.set(e);
            assert !(e instanceof NullPointerException);
            errorState.setInternalError();
            if (!errorState.isNodeMarked()) {
                // Not a datanode issue
                streamerClosed = true;
            }
        } finally {
            if (scope != null) {
                scope.close();
                scope = null;
            }
        }
    }
    closeInternal();
}
Also used : QuotaExceededException(org.apache.hadoop.hdfs.protocol.QuotaExceededException) TraceScope(org.apache.htrace.core.TraceScope) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) MultipleIOException(org.apache.hadoop.io.MultipleIOException) SpanId(org.apache.htrace.core.SpanId)

Example 24 with TraceScope

use of org.apache.htrace.core.TraceScope in project hadoop by apache.

the class DFSOutputStream method newStreamForAppend.

static DFSOutputStream newStreamForAppend(DFSClient dfsClient, String src, EnumSet<CreateFlag> flags, Progressable progress, LocatedBlock lastBlock, HdfsFileStatus stat, DataChecksum checksum, String[] favoredNodes) throws IOException {
    if (stat.getErasureCodingPolicy() != null) {
        throw new IOException("Not support appending to a striping layout file yet.");
    }
    try (TraceScope ignored = dfsClient.newPathTraceScope("newStreamForAppend", src)) {
        final DFSOutputStream out = new DFSOutputStream(dfsClient, src, flags, progress, lastBlock, stat, checksum, favoredNodes);
        out.start();
        return out;
    }
}
Also used : TraceScope(org.apache.htrace.core.TraceScope) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) MultipleIOException(org.apache.hadoop.io.MultipleIOException)

Example 25 with TraceScope

use of org.apache.htrace.core.TraceScope in project hadoop by apache.

the class DataStreamer method waitForAckedSeqno.

/**
   * wait for the ack of seqno
   *
   * @param seqno the sequence number to be acked
   * @throws IOException
   */
void waitForAckedSeqno(long seqno) throws IOException {
    try (TraceScope ignored = dfsClient.getTracer().newScope("waitForAckedSeqno")) {
        LOG.debug("{} waiting for ack for: {}", this, seqno);
        long begin = Time.monotonicNow();
        try {
            synchronized (dataQueue) {
                while (!streamerClosed) {
                    checkClosed();
                    if (lastAckedSeqno >= seqno) {
                        break;
                    }
                    try {
                        // when we receive an ack, we notify on
                        dataQueue.wait(1000);
                    // dataQueue
                    } catch (InterruptedException ie) {
                        throw new InterruptedIOException("Interrupted while waiting for data to be acknowledged by pipeline");
                    }
                }
            }
            checkClosed();
        } catch (ClosedChannelException cce) {
        }
        long duration = Time.monotonicNow() - begin;
        if (duration > dfsclientSlowLogThresholdMs) {
            LOG.warn("Slow waitForAckedSeqno took {}ms (threshold={}ms). File being" + " written: {}, block: {}, Write pipeline datanodes: {}.", duration, dfsclientSlowLogThresholdMs, src, block, nodes);
        }
    }
}
Also used : InterruptedIOException(java.io.InterruptedIOException) ClosedChannelException(java.nio.channels.ClosedChannelException) TraceScope(org.apache.htrace.core.TraceScope)

Aggregations

TraceScope (org.apache.htrace.core.TraceScope)62 IOException (java.io.IOException)11 InterruptedIOException (java.io.InterruptedIOException)7 MultipleIOException (org.apache.hadoop.io.MultipleIOException)6 RemoteException (org.apache.hadoop.ipc.RemoteException)5 FileNotFoundException (java.io.FileNotFoundException)4 List (java.util.List)4 SnapshotAccessControlException (org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException)4 UnresolvedPathException (org.apache.hadoop.hdfs.protocol.UnresolvedPathException)4 AccessControlException (org.apache.hadoop.security.AccessControlException)4 ClosedChannelException (java.nio.channels.ClosedChannelException)3 FileAlreadyExistsException (org.apache.hadoop.fs.FileAlreadyExistsException)3 ParentNotDirectoryException (org.apache.hadoop.fs.ParentNotDirectoryException)3 DSQuotaExceededException (org.apache.hadoop.hdfs.protocol.DSQuotaExceededException)3 NSQuotaExceededException (org.apache.hadoop.hdfs.protocol.NSQuotaExceededException)3 QuotaByStorageTypeExceededException (org.apache.hadoop.hdfs.protocol.QuotaByStorageTypeExceededException)3 SpanId (org.apache.htrace.core.SpanId)3 Tracer (org.apache.htrace.core.Tracer)3 Test (org.junit.Test)3 ByteBuffer (java.nio.ByteBuffer)2