Search in sources :

Example 46 with TraceScope

use of org.apache.htrace.core.TraceScope in project hadoop by apache.

the class DFSInputStream method read.

/**
   * Read the entire buffer.
   */
@Override
public synchronized int read(@Nonnull final byte[] buf, int off, int len) throws IOException {
    validatePositionedReadArgs(pos, buf, off, len);
    if (len == 0) {
        return 0;
    }
    ReaderStrategy byteArrayReader = new ByteArrayStrategy(buf, off, len, readStatistics, dfsClient);
    try (TraceScope scope = dfsClient.newReaderTraceScope("DFSInputStream#byteArrayRead", src, getPos(), len)) {
        int retLen = readWithStrategy(byteArrayReader);
        if (retLen < len) {
            dfsClient.addRetLenToReaderScope(scope, retLen);
        }
        return retLen;
    }
}
Also used : TraceScope(org.apache.htrace.core.TraceScope)

Example 47 with TraceScope

use of org.apache.htrace.core.TraceScope in project hadoop by apache.

the class DFSClient method getDelegationToken.

/**
   * @see ClientProtocol#getDelegationToken(Text)
   */
public Token<DelegationTokenIdentifier> getDelegationToken(Text renewer) throws IOException {
    assert dtService != null;
    try (TraceScope ignored = tracer.newScope("getDelegationToken")) {
        Token<DelegationTokenIdentifier> token = namenode.getDelegationToken(renewer);
        if (token != null) {
            token.setService(this.dtService);
            LOG.info("Created " + DelegationTokenIdentifier.stringifyToken(token));
        } else {
            LOG.info("Cannot get delegation token from " + renewer);
        }
        return token;
    }
}
Also used : DelegationTokenIdentifier(org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier) TraceScope(org.apache.htrace.core.TraceScope)

Example 48 with TraceScope

use of org.apache.htrace.core.TraceScope in project hadoop by apache.

the class DFSClient method getBlockLocations.

/**
   * Get block location info about file
   *
   * getBlockLocations() returns a list of hostnames that store
   * data for a specific file region.  It returns a set of hostnames
   * for every block within the indicated region.
   *
   * This function is very useful when writing code that considers
   * data-placement when performing operations.  For example, the
   * MapReduce system tries to schedule tasks on the same machines
   * as the data-block the task processes.
   */
public BlockLocation[] getBlockLocations(String src, long start, long length) throws IOException {
    checkOpen();
    try (TraceScope ignored = newPathTraceScope("getBlockLocations", src)) {
        LocatedBlocks blocks = getLocatedBlocks(src, start, length);
        BlockLocation[] locations = DFSUtilClient.locatedBlocks2Locations(blocks);
        HdfsBlockLocation[] hdfsLocations = new HdfsBlockLocation[locations.length];
        for (int i = 0; i < locations.length; i++) {
            hdfsLocations[i] = new HdfsBlockLocation(locations[i], blocks.get(i));
        }
        return hdfsLocations;
    }
}
Also used : HdfsBlockLocation(org.apache.hadoop.fs.HdfsBlockLocation) LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) TraceScope(org.apache.htrace.core.TraceScope) BlockLocation(org.apache.hadoop.fs.BlockLocation) HdfsBlockLocation(org.apache.hadoop.fs.HdfsBlockLocation)

Example 49 with TraceScope

use of org.apache.htrace.core.TraceScope in project hadoop by apache.

the class BlockReaderLocal method fillBuffer.

/**
   * Read from the block file into a buffer.
   *
   * This function overwrites checksumBuf.  It will increment dataPos.
   *
   * @param buf   The buffer to read into.  May be dataBuf.
   *              The position and limit of this buffer should be set to
   *              multiples of the checksum size.
   * @param canSkipChecksum  True if we can skip checksumming.
   *
   * @return      Total bytes read.  0 on EOF.
   */
private synchronized int fillBuffer(ByteBuffer buf, boolean canSkipChecksum) throws IOException {
    try (TraceScope ignored = tracer.newScope("BlockReaderLocal#fillBuffer(" + block.getBlockId() + ")")) {
        int total = 0;
        long startDataPos = dataPos;
        int startBufPos = buf.position();
        while (buf.hasRemaining()) {
            int nRead = dataIn.read(buf, dataPos);
            if (nRead < 0) {
                break;
            }
            dataPos += nRead;
            total += nRead;
        }
        if (canSkipChecksum) {
            freeChecksumBufIfExists();
            return total;
        }
        if (total > 0) {
            try {
                buf.limit(buf.position());
                buf.position(startBufPos);
                createChecksumBufIfNeeded();
                int checksumsNeeded = (total + bytesPerChecksum - 1) / bytesPerChecksum;
                checksumBuf.clear();
                checksumBuf.limit(checksumsNeeded * checksumSize);
                long checksumPos = BlockMetadataHeader.getHeaderSize() + ((startDataPos / bytesPerChecksum) * checksumSize);
                while (checksumBuf.hasRemaining()) {
                    int nRead = checksumIn.read(checksumBuf, checksumPos);
                    if (nRead < 0) {
                        throw new IOException("Got unexpected checksum file EOF at " + checksumPos + ", block file position " + startDataPos + " for block " + block + " of file " + filename);
                    }
                    checksumPos += nRead;
                }
                checksumBuf.flip();
                checksum.verifyChunkedSums(buf, checksumBuf, filename, startDataPos);
            } finally {
                buf.position(buf.limit());
            }
        }
        return total;
    }
}
Also used : TraceScope(org.apache.htrace.core.TraceScope) IOException(java.io.IOException)

Example 50 with TraceScope

use of org.apache.htrace.core.TraceScope in project hadoop by apache.

the class BlockReaderLocalLegacy method fillBuffer.

/**
   * Reads bytes into a buffer until EOF or the buffer's limit is reached
   */
private int fillBuffer(FileInputStream stream, ByteBuffer buf) throws IOException {
    try (TraceScope ignored = tracer.newScope("BlockReaderLocalLegacy#fillBuffer(" + blockId + ")")) {
        int bytesRead = stream.getChannel().read(buf);
        if (bytesRead < 0) {
            //EOF
            return bytesRead;
        }
        while (buf.remaining() > 0) {
            int n = stream.getChannel().read(buf);
            if (n < 0) {
                //EOF
                return bytesRead;
            }
            bytesRead += n;
        }
        return bytesRead;
    }
}
Also used : TraceScope(org.apache.htrace.core.TraceScope)

Aggregations

TraceScope (org.apache.htrace.core.TraceScope)62 IOException (java.io.IOException)11 InterruptedIOException (java.io.InterruptedIOException)7 MultipleIOException (org.apache.hadoop.io.MultipleIOException)6 RemoteException (org.apache.hadoop.ipc.RemoteException)5 FileNotFoundException (java.io.FileNotFoundException)4 List (java.util.List)4 SnapshotAccessControlException (org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException)4 UnresolvedPathException (org.apache.hadoop.hdfs.protocol.UnresolvedPathException)4 AccessControlException (org.apache.hadoop.security.AccessControlException)4 ClosedChannelException (java.nio.channels.ClosedChannelException)3 FileAlreadyExistsException (org.apache.hadoop.fs.FileAlreadyExistsException)3 ParentNotDirectoryException (org.apache.hadoop.fs.ParentNotDirectoryException)3 DSQuotaExceededException (org.apache.hadoop.hdfs.protocol.DSQuotaExceededException)3 NSQuotaExceededException (org.apache.hadoop.hdfs.protocol.NSQuotaExceededException)3 QuotaByStorageTypeExceededException (org.apache.hadoop.hdfs.protocol.QuotaByStorageTypeExceededException)3 SpanId (org.apache.htrace.core.SpanId)3 Tracer (org.apache.htrace.core.Tracer)3 Test (org.junit.Test)3 ByteBuffer (java.nio.ByteBuffer)2