Search in sources :

Example 1 with Span

use of org.apache.htrace.core.Span in project hadoop by apache.

the class TestTracing method writeWithTracing.

private void writeWithTracing(Tracer tracer) throws Exception {
    long startTime = System.currentTimeMillis();
    TraceScope ts = tracer.newScope("testWriteTraceHooks");
    writeTestFile("testWriteTraceHooks.dat");
    long endTime = System.currentTimeMillis();
    ts.close();
    String[] expectedSpanNames = { "testWriteTraceHooks", "ClientProtocol#create", "ClientNamenodeProtocol#create", "ClientProtocol#fsync", "ClientNamenodeProtocol#fsync", "ClientProtocol#complete", "ClientNamenodeProtocol#complete", "newStreamForCreate", "DFSOutputStream#write", "DFSOutputStream#close", "dataStreamer", "OpWriteBlockProto", "ClientProtocol#addBlock", "ClientNamenodeProtocol#addBlock" };
    SetSpanReceiver.assertSpanNamesFound(expectedSpanNames);
    // The trace should last about the same amount of time as the test
    Map<String, List<Span>> map = SetSpanReceiver.getMap();
    Span s = map.get("testWriteTraceHooks").get(0);
    Assert.assertNotNull(s);
    long spanStart = s.getStartTimeMillis();
    long spanEnd = s.getStopTimeMillis();
    // Spans homed in the top trace shoud have same trace id.
    // Spans having multiple parents (e.g. "dataStreamer" added by HDFS-7054)
    // and children of them are exception.
    String[] spansInTopTrace = { "testWriteTraceHooks", "ClientProtocol#create", "ClientNamenodeProtocol#create", "ClientProtocol#fsync", "ClientNamenodeProtocol#fsync", "ClientProtocol#complete", "ClientNamenodeProtocol#complete", "newStreamForCreate", "DFSOutputStream#write", "DFSOutputStream#close" };
    for (String desc : spansInTopTrace) {
        for (Span span : map.get(desc)) {
            Assert.assertEquals(ts.getSpan().getSpanId().getHigh(), span.getSpanId().getHigh());
        }
    }
    // test for timeline annotation added by HADOOP-11242
    Assert.assertEquals("called", map.get("ClientProtocol#create").get(0).getTimelineAnnotations().get(0).getMessage());
    SetSpanReceiver.clear();
}
Also used : TraceScope(org.apache.htrace.core.TraceScope) List(java.util.List) Span(org.apache.htrace.core.Span)

Example 2 with Span

use of org.apache.htrace.core.Span in project hadoop by apache.

the class BlockReceiver method close.

/**
   * close files and release volume reference.
   */
@Override
public void close() throws IOException {
    Span span = Tracer.getCurrentSpan();
    if (span != null) {
        span.addKVAnnotation("maxWriteToDiskMs", Long.toString(maxWriteToDiskMs));
    }
    packetReceiver.close();
    IOException ioe = null;
    if (syncOnClose && (streams.getDataOut() != null || checksumOut != null)) {
        datanode.metrics.incrFsyncCount();
    }
    long flushTotalNanos = 0;
    boolean measuredFlushTime = false;
    // close checksum file
    try {
        if (checksumOut != null) {
            long flushStartNanos = System.nanoTime();
            checksumOut.flush();
            long flushEndNanos = System.nanoTime();
            if (syncOnClose) {
                long fsyncStartNanos = flushEndNanos;
                streams.syncChecksumOut();
                datanode.metrics.addFsyncNanos(System.nanoTime() - fsyncStartNanos);
            }
            flushTotalNanos += flushEndNanos - flushStartNanos;
            measuredFlushTime = true;
            checksumOut.close();
            checksumOut = null;
        }
    } catch (IOException e) {
        ioe = e;
    } finally {
        IOUtils.closeStream(checksumOut);
    }
    // close block file
    try {
        if (streams.getDataOut() != null) {
            long flushStartNanos = System.nanoTime();
            streams.flushDataOut();
            long flushEndNanos = System.nanoTime();
            if (syncOnClose) {
                long fsyncStartNanos = flushEndNanos;
                streams.syncDataOut();
                datanode.metrics.addFsyncNanos(System.nanoTime() - fsyncStartNanos);
            }
            flushTotalNanos += flushEndNanos - flushStartNanos;
            measuredFlushTime = true;
            streams.closeDataStream();
        }
    } catch (IOException e) {
        ioe = e;
    } finally {
        streams.close();
    }
    if (replicaHandler != null) {
        IOUtils.cleanup(null, replicaHandler);
        replicaHandler = null;
    }
    if (measuredFlushTime) {
        datanode.metrics.addFlushNanos(flushTotalNanos);
    }
    if (ioe != null) {
        // Volume error check moved to FileIoProvider
        throw ioe;
    }
}
Also used : IOException(java.io.IOException) Span(org.apache.htrace.core.Span)

Example 3 with Span

use of org.apache.htrace.core.Span in project hadoop by apache.

the class TestTracing method readWithTracing.

private void readWithTracing(Tracer tracer) throws Exception {
    long startTime = System.currentTimeMillis();
    TraceScope ts = tracer.newScope("testReadTraceHooks");
    readTestFile("testReadTraceHooks.dat");
    ts.close();
    long endTime = System.currentTimeMillis();
    String[] expectedSpanNames = { "testReadTraceHooks", "ClientProtocol#getBlockLocations", "ClientNamenodeProtocol#getBlockLocations", "OpReadBlockProto" };
    SetSpanReceiver.assertSpanNamesFound(expectedSpanNames);
    // The trace should last about the same amount of time as the test
    Map<String, List<Span>> map = SetSpanReceiver.getMap();
    Span s = map.get("testReadTraceHooks").get(0);
    Assert.assertNotNull(s);
    long spanStart = s.getStartTimeMillis();
    long spanEnd = s.getStopTimeMillis();
    Assert.assertTrue(spanStart - startTime < 100);
    Assert.assertTrue(spanEnd - endTime < 100);
    // top trace.
    for (Span span : SetSpanReceiver.getSpans()) {
        System.out.println(span.toJson());
    }
    for (Span span : SetSpanReceiver.getSpans()) {
        Assert.assertEquals(ts.getSpan().getSpanId().getHigh(), span.getSpanId().getHigh());
    }
    SetSpanReceiver.clear();
}
Also used : TraceScope(org.apache.htrace.core.TraceScope) List(java.util.List) Span(org.apache.htrace.core.Span)

Example 4 with Span

use of org.apache.htrace.core.Span in project hadoop by apache.

the class ProtoUtil method makeRpcRequestHeader.

public static RpcRequestHeaderProto makeRpcRequestHeader(RPC.RpcKind rpcKind, RpcRequestHeaderProto.OperationProto operation, int callId, int retryCount, byte[] uuid) {
    RpcRequestHeaderProto.Builder result = RpcRequestHeaderProto.newBuilder();
    result.setRpcKind(convert(rpcKind)).setRpcOp(operation).setCallId(callId).setRetryCount(retryCount).setClientId(ByteString.copyFrom(uuid));
    // Add tracing info if we are currently tracing.
    Span span = Tracer.getCurrentSpan();
    if (span != null) {
        result.setTraceInfo(RPCTraceInfoProto.newBuilder().setTraceId(span.getSpanId().getHigh()).setParentId(span.getSpanId().getLow()).build());
    }
    // Add caller context if it is not null
    CallerContext callerContext = CallerContext.getCurrent();
    if (callerContext != null && callerContext.isContextValid()) {
        RPCCallerContextProto.Builder contextBuilder = RPCCallerContextProto.newBuilder().setContext(callerContext.getContext());
        if (callerContext.getSignature() != null) {
            contextBuilder.setSignature(ByteString.copyFrom(callerContext.getSignature()));
        }
        result.setCallerContext(contextBuilder);
    }
    return result.build();
}
Also used : CallerContext(org.apache.hadoop.ipc.CallerContext) Span(org.apache.htrace.core.Span)

Example 5 with Span

use of org.apache.htrace.core.Span in project hadoop by apache.

the class DataStreamer method waitAndQueuePacket.

/**
   * wait for space of dataQueue and queue the packet
   *
   * @param packet  the DFSPacket to be queued
   * @throws IOException
   */
void waitAndQueuePacket(DFSPacket packet) throws IOException {
    synchronized (dataQueue) {
        try {
            // If queue is full, then wait till we have enough space
            boolean firstWait = true;
            try {
                while (!streamerClosed && dataQueue.size() + ackQueue.size() > dfsClient.getConf().getWriteMaxPackets()) {
                    if (firstWait) {
                        Span span = Tracer.getCurrentSpan();
                        if (span != null) {
                            span.addTimelineAnnotation("dataQueue.wait");
                        }
                        firstWait = false;
                    }
                    try {
                        dataQueue.wait();
                    } catch (InterruptedException e) {
                        // If we get interrupted while waiting to queue data, we still need to get rid
                        // of the current packet. This is because we have an invariant that if
                        // currentPacket gets full, it will get queued before the next writeChunk.
                        //
                        // Rather than wait around for space in the queue, we should instead try to
                        // return to the caller as soon as possible, even though we slightly overrun
                        // the MAX_PACKETS length.
                        Thread.currentThread().interrupt();
                        break;
                    }
                }
            } finally {
                Span span = Tracer.getCurrentSpan();
                if ((span != null) && (!firstWait)) {
                    span.addTimelineAnnotation("end.wait");
                }
            }
            checkClosed();
            queuePacket(packet);
        } catch (ClosedChannelException ignored) {
        }
    }
}
Also used : ClosedChannelException(java.nio.channels.ClosedChannelException) Span(org.apache.htrace.core.Span)

Aggregations

Span (org.apache.htrace.core.Span)6 TraceScope (org.apache.htrace.core.TraceScope)3 List (java.util.List)2 IOException (java.io.IOException)1 ClosedChannelException (java.nio.channels.ClosedChannelException)1 CallerContext (org.apache.hadoop.ipc.CallerContext)1