use of org.apache.htrace.TraceScope in project phoenix by apache.
the class PhoenixTransactionalIndexer method preBatchMutate.
@Override
public void preBatchMutate(ObserverContext<RegionCoprocessorEnvironment> c, MiniBatchOperationInProgress<Mutation> miniBatchOp) throws IOException {
Mutation m = miniBatchOp.getOperation(0);
if (!codec.isEnabled(m)) {
super.preBatchMutate(c, miniBatchOp);
return;
}
Map<String, byte[]> updateAttributes = m.getAttributesMap();
PhoenixIndexMetaData indexMetaData = new PhoenixIndexMetaData(c.getEnvironment(), updateAttributes);
byte[] txRollbackAttribute = m.getAttribute(TxConstants.TX_ROLLBACK_ATTRIBUTE_KEY);
Collection<Pair<Mutation, byte[]>> indexUpdates = null;
// get the current span, or just use a null-span to avoid a bunch of if statements
try (TraceScope scope = Trace.startSpan("Starting to build index updates")) {
Span current = scope.getSpan();
if (current == null) {
current = NullSpan.INSTANCE;
}
// get the index updates for all elements in this batch
indexUpdates = getIndexUpdates(c.getEnvironment(), indexMetaData, getMutationIterator(miniBatchOp), txRollbackAttribute);
current.addTimelineAnnotation("Built index updates, doing preStep");
TracingUtils.addAnnotation(current, "index update count", indexUpdates.size());
// no index updates, so we are done
if (!indexUpdates.isEmpty()) {
this.writer.write(indexUpdates, true);
}
} catch (Throwable t) {
String msg = "Failed to update index with entries:" + indexUpdates;
LOG.error(msg, t);
ServerUtil.throwIOException(msg, t);
}
}
use of org.apache.htrace.TraceScope in project phoenix by apache.
the class Indexer method doPostWithExceptions.
private void doPostWithExceptions(WALEdit edit, Mutation m, final Durability durability) throws Exception {
//short circuit, if we don't need to do any work
if (durability == Durability.SKIP_WAL || !this.builder.isEnabled(m) || edit == null) {
// already did the index update in prePut, so we are done
return;
}
// get the current span, or just use a null-span to avoid a bunch of if statements
try (TraceScope scope = Trace.startSpan("Completing index writes")) {
Span current = scope.getSpan();
if (current == null) {
current = NullSpan.INSTANCE;
}
// there is a little bit of excess here- we iterate all the non-indexed kvs for this check first
// and then do it again later when getting out the index updates. This should be pretty minor
// though, compared to the rest of the runtime
IndexedKeyValue ikv = getFirstIndexedKeyValue(edit);
/*
* early exit - we have nothing to write, so we don't need to do anything else. NOTE: we don't
* release the WAL Rolling lock (INDEX_UPDATE_LOCK) since we never take it in doPre if there are
* no index updates.
*/
if (ikv == null) {
return;
}
/*
* only write the update if we haven't already seen this batch. We only want to write the batch
* once (this hook gets called with the same WALEdit for each Put/Delete in a batch, which can
* lead to writing all the index updates for each Put/Delete).
*/
if (!ikv.getBatchFinished()) {
Collection<Pair<Mutation, byte[]>> indexUpdates = extractIndexUpdate(edit);
// already specified on each reference
try {
current.addTimelineAnnotation("Actually doing index update for first time");
writer.writeAndKillYourselfOnFailure(indexUpdates, false);
} finally {
// With a custom kill policy, we may throw instead of kill the server.
// Without doing this in a finally block (at least with the mini cluster),
// the region server never goes down.
// mark the batch as having been written. In the single-update case, this never gets check
// again, but in the batch case, we will check it again (see above).
ikv.markBatchFinished();
}
}
}
}
use of org.apache.htrace.TraceScope in project phoenix by apache.
the class PhoenixTracingEndToEndIT method testWriteSpans.
/**
* Simple test that we can correctly write spans to the phoenix table
* @throws Exception on failure
*/
@Test
public void testWriteSpans() throws Exception {
// watch our sink so we know when commits happen
latch = new CountDownLatch(1);
testTraceWriter = new TestTraceWriter(tracingTableName, defaultTracingThreadPoolForTest, defaultTracingBatchSizeForTest);
// write some spans
TraceScope trace = Trace.startSpan("Start write test", Sampler.ALWAYS);
Span span = trace.getSpan();
// add a child with some annotations
Span child = span.child("child 1");
child.addTimelineAnnotation("timeline annotation");
TracingUtils.addAnnotation(child, "test annotation", 10);
child.stop();
// sleep a little bit to get some time difference
Thread.sleep(100);
trace.close();
// pass the trace on
Tracing.getTraceSpanReceiver().receiveSpan(span);
// wait for the tracer to actually do the write
assertTrue("Sink not flushed. commit() not called on the connection", latch.await(60, TimeUnit.SECONDS));
// look for the writes to make sure they were made
Connection conn = getConnectionWithoutTracing();
checkStoredTraces(conn, new TraceChecker() {
@Override
public boolean foundTrace(TraceHolder trace, SpanInfo info) {
if (info.description.equals("child 1")) {
assertEquals("Not all annotations present", 1, info.annotationCount);
assertEquals("Not all tags present", 1, info.tagCount);
boolean found = false;
for (String annotation : info.annotations) {
if (annotation.startsWith("test annotation")) {
found = true;
}
}
assertTrue("Missing the annotations in span: " + info, found);
found = false;
for (String tag : info.tags) {
if (tag.endsWith("timeline annotation")) {
found = true;
}
}
assertTrue("Missing the tags in span: " + info, found);
return true;
}
return false;
}
});
}
Aggregations