Search in sources :

Example 36 with TraceScope

use of org.apache.htrace.TraceScope in project phoenix by apache.

the class PhoenixTransactionalIndexer method preBatchMutate.

@Override
public void preBatchMutate(ObserverContext<RegionCoprocessorEnvironment> c, MiniBatchOperationInProgress<Mutation> miniBatchOp) throws IOException {
    Mutation m = miniBatchOp.getOperation(0);
    if (!codec.isEnabled(m)) {
        super.preBatchMutate(c, miniBatchOp);
        return;
    }
    Map<String, byte[]> updateAttributes = m.getAttributesMap();
    PhoenixIndexMetaData indexMetaData = new PhoenixIndexMetaData(c.getEnvironment(), updateAttributes);
    byte[] txRollbackAttribute = m.getAttribute(TxConstants.TX_ROLLBACK_ATTRIBUTE_KEY);
    Collection<Pair<Mutation, byte[]>> indexUpdates = null;
    // get the current span, or just use a null-span to avoid a bunch of if statements
    try (TraceScope scope = Trace.startSpan("Starting to build index updates")) {
        Span current = scope.getSpan();
        if (current == null) {
            current = NullSpan.INSTANCE;
        }
        // get the index updates for all elements in this batch
        indexUpdates = getIndexUpdates(c.getEnvironment(), indexMetaData, getMutationIterator(miniBatchOp), txRollbackAttribute);
        current.addTimelineAnnotation("Built index updates, doing preStep");
        TracingUtils.addAnnotation(current, "index update count", indexUpdates.size());
        // no index updates, so we are done
        if (!indexUpdates.isEmpty()) {
            this.writer.write(indexUpdates, true);
        }
    } catch (Throwable t) {
        String msg = "Failed to update index with entries:" + indexUpdates;
        LOG.error(msg, t);
        ServerUtil.throwIOException(msg, t);
    }
}
Also used : TraceScope(org.apache.htrace.TraceScope) Mutation(org.apache.hadoop.hbase.client.Mutation) MultiMutation(org.apache.phoenix.hbase.index.MultiMutation) Span(org.apache.htrace.Span) NullSpan(org.apache.phoenix.trace.util.NullSpan) Pair(org.apache.hadoop.hbase.util.Pair)

Example 37 with TraceScope

use of org.apache.htrace.TraceScope in project phoenix by apache.

the class Indexer method doPostWithExceptions.

private void doPostWithExceptions(WALEdit edit, Mutation m, final Durability durability) throws Exception {
    //short circuit, if we don't need to do any work
    if (durability == Durability.SKIP_WAL || !this.builder.isEnabled(m) || edit == null) {
        // already did the index update in prePut, so we are done
        return;
    }
    // get the current span, or just use a null-span to avoid a bunch of if statements
    try (TraceScope scope = Trace.startSpan("Completing index writes")) {
        Span current = scope.getSpan();
        if (current == null) {
            current = NullSpan.INSTANCE;
        }
        // there is a little bit of excess here- we iterate all the non-indexed kvs for this check first
        // and then do it again later when getting out the index updates. This should be pretty minor
        // though, compared to the rest of the runtime
        IndexedKeyValue ikv = getFirstIndexedKeyValue(edit);
        /*
           * early exit - we have nothing to write, so we don't need to do anything else. NOTE: we don't
           * release the WAL Rolling lock (INDEX_UPDATE_LOCK) since we never take it in doPre if there are
           * no index updates.
           */
        if (ikv == null) {
            return;
        }
        /*
           * only write the update if we haven't already seen this batch. We only want to write the batch
           * once (this hook gets called with the same WALEdit for each Put/Delete in a batch, which can
           * lead to writing all the index updates for each Put/Delete).
           */
        if (!ikv.getBatchFinished()) {
            Collection<Pair<Mutation, byte[]>> indexUpdates = extractIndexUpdate(edit);
            // already specified on each reference
            try {
                current.addTimelineAnnotation("Actually doing index update for first time");
                writer.writeAndKillYourselfOnFailure(indexUpdates, false);
            } finally {
                // With a custom kill policy, we may throw instead of kill the server.
                // Without doing this in a finally block (at least with the mini cluster),
                // the region server never goes down.
                // mark the batch as having been written. In the single-update case, this never gets check
                // again, but in the batch case, we will check it again (see above).
                ikv.markBatchFinished();
            }
        }
    }
}
Also used : TraceScope(org.apache.htrace.TraceScope) Span(org.apache.htrace.Span) NullSpan(org.apache.phoenix.trace.util.NullSpan) IndexedKeyValue(org.apache.phoenix.hbase.index.wal.IndexedKeyValue) Pair(org.apache.hadoop.hbase.util.Pair)

Example 38 with TraceScope

use of org.apache.htrace.TraceScope in project phoenix by apache.

the class PhoenixTracingEndToEndIT method testWriteSpans.

/**
     * Simple test that we can correctly write spans to the phoenix table
     * @throws Exception on failure
     */
@Test
public void testWriteSpans() throws Exception {
    // watch our sink so we know when commits happen
    latch = new CountDownLatch(1);
    testTraceWriter = new TestTraceWriter(tracingTableName, defaultTracingThreadPoolForTest, defaultTracingBatchSizeForTest);
    // write some spans
    TraceScope trace = Trace.startSpan("Start write test", Sampler.ALWAYS);
    Span span = trace.getSpan();
    // add a child with some annotations
    Span child = span.child("child 1");
    child.addTimelineAnnotation("timeline annotation");
    TracingUtils.addAnnotation(child, "test annotation", 10);
    child.stop();
    // sleep a little bit to get some time difference
    Thread.sleep(100);
    trace.close();
    // pass the trace on
    Tracing.getTraceSpanReceiver().receiveSpan(span);
    // wait for the tracer to actually do the write
    assertTrue("Sink not flushed. commit() not called on the connection", latch.await(60, TimeUnit.SECONDS));
    // look for the writes to make sure they were made
    Connection conn = getConnectionWithoutTracing();
    checkStoredTraces(conn, new TraceChecker() {

        @Override
        public boolean foundTrace(TraceHolder trace, SpanInfo info) {
            if (info.description.equals("child 1")) {
                assertEquals("Not all annotations present", 1, info.annotationCount);
                assertEquals("Not all tags present", 1, info.tagCount);
                boolean found = false;
                for (String annotation : info.annotations) {
                    if (annotation.startsWith("test annotation")) {
                        found = true;
                    }
                }
                assertTrue("Missing the annotations in span: " + info, found);
                found = false;
                for (String tag : info.tags) {
                    if (tag.endsWith("timeline annotation")) {
                        found = true;
                    }
                }
                assertTrue("Missing the tags in span: " + info, found);
                return true;
            }
            return false;
        }
    });
}
Also used : TraceHolder(org.apache.phoenix.trace.TraceReader.TraceHolder) TraceScope(org.apache.htrace.TraceScope) Connection(java.sql.Connection) PhoenixConnection(org.apache.phoenix.jdbc.PhoenixConnection) SpanInfo(org.apache.phoenix.trace.TraceReader.SpanInfo) CountDownLatch(java.util.concurrent.CountDownLatch) Span(org.apache.htrace.Span) Test(org.junit.Test)

Aggregations

TraceScope (org.apache.htrace.TraceScope)38 RetryCounter (org.apache.hadoop.hbase.util.RetryCounter)11 KeeperException (org.apache.zookeeper.KeeperException)11 IOException (java.io.IOException)9 Span (org.apache.htrace.Span)7 Pair (org.apache.hadoop.hbase.util.Pair)4 InterruptedIOException (java.io.InterruptedIOException)3 LinkedBlockingQueue (java.util.concurrent.LinkedBlockingQueue)3 Mutation (org.apache.hadoop.hbase.client.Mutation)3 Table (org.apache.hadoop.hbase.client.Table)3 TimeoutIOException (org.apache.hadoop.hbase.exceptions.TimeoutIOException)3 ArrayList (java.util.ArrayList)2 List (java.util.List)2 Path (org.apache.hadoop.fs.Path)2 Put (org.apache.hadoop.hbase.client.Put)2 Result (org.apache.hadoop.hbase.client.Result)2 ImmutableBytesWritable (org.apache.hadoop.hbase.io.ImmutableBytesWritable)2 ImmutableBytesPtr (org.apache.phoenix.hbase.index.util.ImmutableBytesPtr)2 ResultIterator (org.apache.phoenix.iterate.ResultIterator)2 PhoenixConnection (org.apache.phoenix.jdbc.PhoenixConnection)2