use of org.apache.htrace.core.TraceScope in project hadoop by apache.
the class DFSInputStream method read.
/**
* Read the entire buffer.
*/
@Override
public synchronized int read(@Nonnull final byte[] buf, int off, int len) throws IOException {
validatePositionedReadArgs(pos, buf, off, len);
if (len == 0) {
return 0;
}
ReaderStrategy byteArrayReader = new ByteArrayStrategy(buf, off, len, readStatistics, dfsClient);
try (TraceScope scope = dfsClient.newReaderTraceScope("DFSInputStream#byteArrayRead", src, getPos(), len)) {
int retLen = readWithStrategy(byteArrayReader);
if (retLen < len) {
dfsClient.addRetLenToReaderScope(scope, retLen);
}
return retLen;
}
}
use of org.apache.htrace.core.TraceScope in project hadoop by apache.
the class DFSClient method getDelegationToken.
/**
* @see ClientProtocol#getDelegationToken(Text)
*/
public Token<DelegationTokenIdentifier> getDelegationToken(Text renewer) throws IOException {
assert dtService != null;
try (TraceScope ignored = tracer.newScope("getDelegationToken")) {
Token<DelegationTokenIdentifier> token = namenode.getDelegationToken(renewer);
if (token != null) {
token.setService(this.dtService);
LOG.info("Created " + DelegationTokenIdentifier.stringifyToken(token));
} else {
LOG.info("Cannot get delegation token from " + renewer);
}
return token;
}
}
use of org.apache.htrace.core.TraceScope in project hadoop by apache.
the class DFSClient method getBlockLocations.
/**
* Get block location info about file
*
* getBlockLocations() returns a list of hostnames that store
* data for a specific file region. It returns a set of hostnames
* for every block within the indicated region.
*
* This function is very useful when writing code that considers
* data-placement when performing operations. For example, the
* MapReduce system tries to schedule tasks on the same machines
* as the data-block the task processes.
*/
public BlockLocation[] getBlockLocations(String src, long start, long length) throws IOException {
checkOpen();
try (TraceScope ignored = newPathTraceScope("getBlockLocations", src)) {
LocatedBlocks blocks = getLocatedBlocks(src, start, length);
BlockLocation[] locations = DFSUtilClient.locatedBlocks2Locations(blocks);
HdfsBlockLocation[] hdfsLocations = new HdfsBlockLocation[locations.length];
for (int i = 0; i < locations.length; i++) {
hdfsLocations[i] = new HdfsBlockLocation(locations[i], blocks.get(i));
}
return hdfsLocations;
}
}
use of org.apache.htrace.core.TraceScope in project hadoop by apache.
the class BlockReaderLocal method fillBuffer.
/**
* Read from the block file into a buffer.
*
* This function overwrites checksumBuf. It will increment dataPos.
*
* @param buf The buffer to read into. May be dataBuf.
* The position and limit of this buffer should be set to
* multiples of the checksum size.
* @param canSkipChecksum True if we can skip checksumming.
*
* @return Total bytes read. 0 on EOF.
*/
private synchronized int fillBuffer(ByteBuffer buf, boolean canSkipChecksum) throws IOException {
try (TraceScope ignored = tracer.newScope("BlockReaderLocal#fillBuffer(" + block.getBlockId() + ")")) {
int total = 0;
long startDataPos = dataPos;
int startBufPos = buf.position();
while (buf.hasRemaining()) {
int nRead = dataIn.read(buf, dataPos);
if (nRead < 0) {
break;
}
dataPos += nRead;
total += nRead;
}
if (canSkipChecksum) {
freeChecksumBufIfExists();
return total;
}
if (total > 0) {
try {
buf.limit(buf.position());
buf.position(startBufPos);
createChecksumBufIfNeeded();
int checksumsNeeded = (total + bytesPerChecksum - 1) / bytesPerChecksum;
checksumBuf.clear();
checksumBuf.limit(checksumsNeeded * checksumSize);
long checksumPos = BlockMetadataHeader.getHeaderSize() + ((startDataPos / bytesPerChecksum) * checksumSize);
while (checksumBuf.hasRemaining()) {
int nRead = checksumIn.read(checksumBuf, checksumPos);
if (nRead < 0) {
throw new IOException("Got unexpected checksum file EOF at " + checksumPos + ", block file position " + startDataPos + " for block " + block + " of file " + filename);
}
checksumPos += nRead;
}
checksumBuf.flip();
checksum.verifyChunkedSums(buf, checksumBuf, filename, startDataPos);
} finally {
buf.position(buf.limit());
}
}
return total;
}
}
use of org.apache.htrace.core.TraceScope in project hadoop by apache.
the class BlockReaderLocalLegacy method fillBuffer.
/**
* Reads bytes into a buffer until EOF or the buffer's limit is reached
*/
private int fillBuffer(FileInputStream stream, ByteBuffer buf) throws IOException {
try (TraceScope ignored = tracer.newScope("BlockReaderLocalLegacy#fillBuffer(" + blockId + ")")) {
int bytesRead = stream.getChannel().read(buf);
if (bytesRead < 0) {
//EOF
return bytesRead;
}
while (buf.remaining() > 0) {
int n = stream.getChannel().read(buf);
if (n < 0) {
//EOF
return bytesRead;
}
bytesRead += n;
}
return bytesRead;
}
}
Aggregations