Search in sources :

Example 41 with TraceScope

use of org.apache.htrace.core.TraceScope in project hadoop by apache.

the class TestTracing method readWithTracing.

private void readWithTracing(Tracer tracer) throws Exception {
    long startTime = System.currentTimeMillis();
    TraceScope ts = tracer.newScope("testReadTraceHooks");
    readTestFile("testReadTraceHooks.dat");
    ts.close();
    long endTime = System.currentTimeMillis();
    String[] expectedSpanNames = { "testReadTraceHooks", "ClientProtocol#getBlockLocations", "ClientNamenodeProtocol#getBlockLocations", "OpReadBlockProto" };
    SetSpanReceiver.assertSpanNamesFound(expectedSpanNames);
    // The trace should last about the same amount of time as the test
    Map<String, List<Span>> map = SetSpanReceiver.getMap();
    Span s = map.get("testReadTraceHooks").get(0);
    Assert.assertNotNull(s);
    long spanStart = s.getStartTimeMillis();
    long spanEnd = s.getStopTimeMillis();
    Assert.assertTrue(spanStart - startTime < 100);
    Assert.assertTrue(spanEnd - endTime < 100);
    // top trace.
    for (Span span : SetSpanReceiver.getSpans()) {
        System.out.println(span.toJson());
    }
    for (Span span : SetSpanReceiver.getSpans()) {
        Assert.assertEquals(ts.getSpan().getSpanId().getHigh(), span.getSpanId().getHigh());
    }
    SetSpanReceiver.clear();
}
Also used : TraceScope(org.apache.htrace.core.TraceScope) List(java.util.List) Span(org.apache.htrace.core.Span)

Example 42 with TraceScope

use of org.apache.htrace.core.TraceScope in project hadoop by apache.

the class DFSOutputStream method closeImpl.

protected synchronized void closeImpl() throws IOException {
    if (isClosed()) {
        getStreamer().getLastException().check(true);
        return;
    }
    try {
        // flush from all upper layers
        flushBuffer();
        if (currentPacket != null) {
            enqueueCurrentPacket();
        }
        if (getStreamer().getBytesCurBlock() != 0) {
            setCurrentPacketToEmpty();
        }
        // flush all data to Datanodes
        flushInternal();
        // get last block before destroying the streamer
        ExtendedBlock lastBlock = getStreamer().getBlock();
        try (TraceScope ignored = dfsClient.getTracer().newScope("completeFile")) {
            completeFile(lastBlock);
        }
    } catch (ClosedChannelException ignored) {
    } finally {
        // Failures may happen when flushing data.
        // Streamers may keep waiting for the new block information.
        // Thus need to force closing these threads.
        // Don't need to call setClosed() because closeThreads(true)
        // calls setClosed() in the finally block.
        closeThreads(true);
    }
}
Also used : ClosedChannelException(java.nio.channels.ClosedChannelException) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) TraceScope(org.apache.htrace.core.TraceScope)

Example 43 with TraceScope

use of org.apache.htrace.core.TraceScope in project hadoop by apache.

the class DFSOutputStream method newStreamForCreate.

static DFSOutputStream newStreamForCreate(DFSClient dfsClient, String src, FsPermission masked, EnumSet<CreateFlag> flag, boolean createParent, short replication, long blockSize, Progressable progress, DataChecksum checksum, String[] favoredNodes) throws IOException {
    try (TraceScope ignored = dfsClient.newPathTraceScope("newStreamForCreate", src)) {
        HdfsFileStatus stat = null;
        // Retry the create if we get a RetryStartFileException up to a maximum
        // number of times
        boolean shouldRetry = true;
        int retryCount = CREATE_RETRY_COUNT;
        while (shouldRetry) {
            shouldRetry = false;
            try {
                stat = dfsClient.namenode.create(src, masked, dfsClient.clientName, new EnumSetWritable<>(flag), createParent, replication, blockSize, SUPPORTED_CRYPTO_VERSIONS);
                break;
            } catch (RemoteException re) {
                IOException e = re.unwrapRemoteException(AccessControlException.class, DSQuotaExceededException.class, QuotaByStorageTypeExceededException.class, FileAlreadyExistsException.class, FileNotFoundException.class, ParentNotDirectoryException.class, NSQuotaExceededException.class, RetryStartFileException.class, SafeModeException.class, UnresolvedPathException.class, SnapshotAccessControlException.class, UnknownCryptoProtocolVersionException.class);
                if (e instanceof RetryStartFileException) {
                    if (retryCount > 0) {
                        shouldRetry = true;
                        retryCount--;
                    } else {
                        throw new IOException("Too many retries because of encryption" + " zone operations", e);
                    }
                } else {
                    throw e;
                }
            }
        }
        Preconditions.checkNotNull(stat, "HdfsFileStatus should not be null!");
        final DFSOutputStream out;
        if (stat.getErasureCodingPolicy() != null) {
            out = new DFSStripedOutputStream(dfsClient, src, stat, flag, progress, checksum, favoredNodes);
        } else {
            out = new DFSOutputStream(dfsClient, src, stat, flag, progress, checksum, favoredNodes, true);
        }
        out.start();
        return out;
    }
}
Also used : EnumSetWritable(org.apache.hadoop.io.EnumSetWritable) QuotaByStorageTypeExceededException(org.apache.hadoop.hdfs.protocol.QuotaByStorageTypeExceededException) FileAlreadyExistsException(org.apache.hadoop.fs.FileAlreadyExistsException) TraceScope(org.apache.htrace.core.TraceScope) FileNotFoundException(java.io.FileNotFoundException) AccessControlException(org.apache.hadoop.security.AccessControlException) SnapshotAccessControlException(org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) MultipleIOException(org.apache.hadoop.io.MultipleIOException) RetryStartFileException(org.apache.hadoop.hdfs.server.namenode.RetryStartFileException) ParentNotDirectoryException(org.apache.hadoop.fs.ParentNotDirectoryException) HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) DSQuotaExceededException(org.apache.hadoop.hdfs.protocol.DSQuotaExceededException) NSQuotaExceededException(org.apache.hadoop.hdfs.protocol.NSQuotaExceededException) SafeModeException(org.apache.hadoop.hdfs.server.namenode.SafeModeException) SnapshotAccessControlException(org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException) RemoteException(org.apache.hadoop.ipc.RemoteException) UnresolvedPathException(org.apache.hadoop.hdfs.protocol.UnresolvedPathException)

Example 44 with TraceScope

use of org.apache.htrace.core.TraceScope in project hadoop by apache.

the class DFSOutputStream method close.

/**
   * Closes this output stream and releases any system
   * resources associated with this stream.
   */
@Override
public void close() throws IOException {
    final MultipleIOException.Builder b = new MultipleIOException.Builder();
    synchronized (this) {
        try (TraceScope ignored = dfsClient.newPathTraceScope("DFSOutputStream#close", src)) {
            closeImpl();
        } catch (IOException e) {
            b.add(e);
        }
    }
    final IOException ioe = b.build();
    if (ioe != null) {
        throw ioe;
    }
}
Also used : TraceScope(org.apache.htrace.core.TraceScope) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) MultipleIOException(org.apache.hadoop.io.MultipleIOException) MultipleIOException(org.apache.hadoop.io.MultipleIOException)

Example 45 with TraceScope

use of org.apache.htrace.core.TraceScope in project hadoop by apache.

the class DFSInotifyEventInputStream method poll.

/**
   * Returns the next batch of events in the stream or null if no new
   * batches are currently available.
   *
   * @throws IOException because of network error or edit log
   * corruption. Also possible if JournalNodes are unresponsive in the
   * QJM setting (even one unresponsive JournalNode is enough in rare cases),
   * so catching this exception and retrying at least a few times is
   * recommended.
   * @throws MissingEventsException if we cannot return the next batch in the
   * stream because the data for the events (and possibly some subsequent
   * events) has been deleted (generally because this stream is a very large
   * number of transactions behind the current state of the NameNode). It is
   * safe to continue reading from the stream after this exception is thrown
   * The next available batch of events will be returned.
   */
public EventBatch poll() throws IOException, MissingEventsException {
    try (TraceScope ignored = tracer.newScope("inotifyPoll")) {
        // need to keep retrying until the NN sends us the latest committed txid
        if (lastReadTxid == -1) {
            LOG.debug("poll(): lastReadTxid is -1, reading current txid from NN");
            lastReadTxid = namenode.getCurrentEditLogTxid();
            return null;
        }
        if (!it.hasNext()) {
            EventBatchList el = namenode.getEditsFromTxid(lastReadTxid + 1);
            if (el.getLastTxid() != -1) {
                // we only want to set syncTxid when we were actually able to read some
                // edits on the NN -- otherwise it will seem like edits are being
                // generated faster than we can read them when the problem is really
                // that we are temporarily unable to read edits
                syncTxid = el.getSyncTxid();
                it = el.getBatches().iterator();
                long formerLastReadTxid = lastReadTxid;
                lastReadTxid = el.getLastTxid();
                if (el.getFirstTxid() != formerLastReadTxid + 1) {
                    throw new MissingEventsException(formerLastReadTxid + 1, el.getFirstTxid());
                }
            } else {
                LOG.debug("poll(): read no edits from the NN when requesting edits " + "after txid {}", lastReadTxid);
                return null;
            }
        }
        if (it.hasNext()) {
            // newly seen edit log ops actually got converted to events
            return it.next();
        } else {
            return null;
        }
    }
}
Also used : TraceScope(org.apache.htrace.core.TraceScope) EventBatchList(org.apache.hadoop.hdfs.inotify.EventBatchList) MissingEventsException(org.apache.hadoop.hdfs.inotify.MissingEventsException)

Aggregations

TraceScope (org.apache.htrace.core.TraceScope)62 IOException (java.io.IOException)11 InterruptedIOException (java.io.InterruptedIOException)7 MultipleIOException (org.apache.hadoop.io.MultipleIOException)6 RemoteException (org.apache.hadoop.ipc.RemoteException)5 FileNotFoundException (java.io.FileNotFoundException)4 List (java.util.List)4 SnapshotAccessControlException (org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException)4 UnresolvedPathException (org.apache.hadoop.hdfs.protocol.UnresolvedPathException)4 AccessControlException (org.apache.hadoop.security.AccessControlException)4 ClosedChannelException (java.nio.channels.ClosedChannelException)3 FileAlreadyExistsException (org.apache.hadoop.fs.FileAlreadyExistsException)3 ParentNotDirectoryException (org.apache.hadoop.fs.ParentNotDirectoryException)3 DSQuotaExceededException (org.apache.hadoop.hdfs.protocol.DSQuotaExceededException)3 NSQuotaExceededException (org.apache.hadoop.hdfs.protocol.NSQuotaExceededException)3 QuotaByStorageTypeExceededException (org.apache.hadoop.hdfs.protocol.QuotaByStorageTypeExceededException)3 SpanId (org.apache.htrace.core.SpanId)3 Tracer (org.apache.htrace.core.Tracer)3 Test (org.junit.Test)3 ByteBuffer (java.nio.ByteBuffer)2