use of org.apache.htrace.core.TraceScope in project hadoop by apache.
the class TestTracing method readWithTracing.
private void readWithTracing(Tracer tracer) throws Exception {
long startTime = System.currentTimeMillis();
TraceScope ts = tracer.newScope("testReadTraceHooks");
readTestFile("testReadTraceHooks.dat");
ts.close();
long endTime = System.currentTimeMillis();
String[] expectedSpanNames = { "testReadTraceHooks", "ClientProtocol#getBlockLocations", "ClientNamenodeProtocol#getBlockLocations", "OpReadBlockProto" };
SetSpanReceiver.assertSpanNamesFound(expectedSpanNames);
// The trace should last about the same amount of time as the test
Map<String, List<Span>> map = SetSpanReceiver.getMap();
Span s = map.get("testReadTraceHooks").get(0);
Assert.assertNotNull(s);
long spanStart = s.getStartTimeMillis();
long spanEnd = s.getStopTimeMillis();
Assert.assertTrue(spanStart - startTime < 100);
Assert.assertTrue(spanEnd - endTime < 100);
// top trace.
for (Span span : SetSpanReceiver.getSpans()) {
System.out.println(span.toJson());
}
for (Span span : SetSpanReceiver.getSpans()) {
Assert.assertEquals(ts.getSpan().getSpanId().getHigh(), span.getSpanId().getHigh());
}
SetSpanReceiver.clear();
}
use of org.apache.htrace.core.TraceScope in project hadoop by apache.
the class DFSOutputStream method closeImpl.
protected synchronized void closeImpl() throws IOException {
if (isClosed()) {
getStreamer().getLastException().check(true);
return;
}
try {
// flush from all upper layers
flushBuffer();
if (currentPacket != null) {
enqueueCurrentPacket();
}
if (getStreamer().getBytesCurBlock() != 0) {
setCurrentPacketToEmpty();
}
// flush all data to Datanodes
flushInternal();
// get last block before destroying the streamer
ExtendedBlock lastBlock = getStreamer().getBlock();
try (TraceScope ignored = dfsClient.getTracer().newScope("completeFile")) {
completeFile(lastBlock);
}
} catch (ClosedChannelException ignored) {
} finally {
// Failures may happen when flushing data.
// Streamers may keep waiting for the new block information.
// Thus need to force closing these threads.
// Don't need to call setClosed() because closeThreads(true)
// calls setClosed() in the finally block.
closeThreads(true);
}
}
use of org.apache.htrace.core.TraceScope in project hadoop by apache.
the class DFSOutputStream method newStreamForCreate.
static DFSOutputStream newStreamForCreate(DFSClient dfsClient, String src, FsPermission masked, EnumSet<CreateFlag> flag, boolean createParent, short replication, long blockSize, Progressable progress, DataChecksum checksum, String[] favoredNodes) throws IOException {
try (TraceScope ignored = dfsClient.newPathTraceScope("newStreamForCreate", src)) {
HdfsFileStatus stat = null;
// Retry the create if we get a RetryStartFileException up to a maximum
// number of times
boolean shouldRetry = true;
int retryCount = CREATE_RETRY_COUNT;
while (shouldRetry) {
shouldRetry = false;
try {
stat = dfsClient.namenode.create(src, masked, dfsClient.clientName, new EnumSetWritable<>(flag), createParent, replication, blockSize, SUPPORTED_CRYPTO_VERSIONS);
break;
} catch (RemoteException re) {
IOException e = re.unwrapRemoteException(AccessControlException.class, DSQuotaExceededException.class, QuotaByStorageTypeExceededException.class, FileAlreadyExistsException.class, FileNotFoundException.class, ParentNotDirectoryException.class, NSQuotaExceededException.class, RetryStartFileException.class, SafeModeException.class, UnresolvedPathException.class, SnapshotAccessControlException.class, UnknownCryptoProtocolVersionException.class);
if (e instanceof RetryStartFileException) {
if (retryCount > 0) {
shouldRetry = true;
retryCount--;
} else {
throw new IOException("Too many retries because of encryption" + " zone operations", e);
}
} else {
throw e;
}
}
}
Preconditions.checkNotNull(stat, "HdfsFileStatus should not be null!");
final DFSOutputStream out;
if (stat.getErasureCodingPolicy() != null) {
out = new DFSStripedOutputStream(dfsClient, src, stat, flag, progress, checksum, favoredNodes);
} else {
out = new DFSOutputStream(dfsClient, src, stat, flag, progress, checksum, favoredNodes, true);
}
out.start();
return out;
}
}
use of org.apache.htrace.core.TraceScope in project hadoop by apache.
the class DFSOutputStream method close.
/**
* Closes this output stream and releases any system
* resources associated with this stream.
*/
@Override
public void close() throws IOException {
final MultipleIOException.Builder b = new MultipleIOException.Builder();
synchronized (this) {
try (TraceScope ignored = dfsClient.newPathTraceScope("DFSOutputStream#close", src)) {
closeImpl();
} catch (IOException e) {
b.add(e);
}
}
final IOException ioe = b.build();
if (ioe != null) {
throw ioe;
}
}
use of org.apache.htrace.core.TraceScope in project hadoop by apache.
the class DFSInotifyEventInputStream method poll.
/**
* Returns the next batch of events in the stream or null if no new
* batches are currently available.
*
* @throws IOException because of network error or edit log
* corruption. Also possible if JournalNodes are unresponsive in the
* QJM setting (even one unresponsive JournalNode is enough in rare cases),
* so catching this exception and retrying at least a few times is
* recommended.
* @throws MissingEventsException if we cannot return the next batch in the
* stream because the data for the events (and possibly some subsequent
* events) has been deleted (generally because this stream is a very large
* number of transactions behind the current state of the NameNode). It is
* safe to continue reading from the stream after this exception is thrown
* The next available batch of events will be returned.
*/
public EventBatch poll() throws IOException, MissingEventsException {
try (TraceScope ignored = tracer.newScope("inotifyPoll")) {
// need to keep retrying until the NN sends us the latest committed txid
if (lastReadTxid == -1) {
LOG.debug("poll(): lastReadTxid is -1, reading current txid from NN");
lastReadTxid = namenode.getCurrentEditLogTxid();
return null;
}
if (!it.hasNext()) {
EventBatchList el = namenode.getEditsFromTxid(lastReadTxid + 1);
if (el.getLastTxid() != -1) {
// we only want to set syncTxid when we were actually able to read some
// edits on the NN -- otherwise it will seem like edits are being
// generated faster than we can read them when the problem is really
// that we are temporarily unable to read edits
syncTxid = el.getSyncTxid();
it = el.getBatches().iterator();
long formerLastReadTxid = lastReadTxid;
lastReadTxid = el.getLastTxid();
if (el.getFirstTxid() != formerLastReadTxid + 1) {
throw new MissingEventsException(formerLastReadTxid + 1, el.getFirstTxid());
}
} else {
LOG.debug("poll(): read no edits from the NN when requesting edits " + "after txid {}", lastReadTxid);
return null;
}
}
if (it.hasNext()) {
// newly seen edit log ops actually got converted to events
return it.next();
} else {
return null;
}
}
}
Aggregations