use of zipkin.Span in project zipkin by openzipkin.
the class ElasticsearchSpanStore method getTraces.
@Override
public ListenableFuture<List<List<Span>>> getTraces(final QueryRequest request) {
long endMillis = request.endTs;
long beginMillis = endMillis - request.lookback;
BoolQueryBuilder filter = boolQuery().must(rangeQuery("timestamp_millis").gte(beginMillis).lte(endMillis));
if (request.serviceName != null) {
filter.must(boolQuery().should(nestedQuery("annotations", termQuery("annotations.endpoint.serviceName", request.serviceName))).should(nestedQuery("binaryAnnotations", termQuery("binaryAnnotations.endpoint.serviceName", request.serviceName))));
}
if (request.spanName != null) {
filter.must(termQuery("name", request.spanName));
}
for (String annotation : request.annotations) {
BoolQueryBuilder annotationQuery = boolQuery().must(termQuery("annotations.value", annotation));
if (request.serviceName != null) {
annotationQuery.must(termQuery("annotations.endpoint.serviceName", request.serviceName));
}
filter.must(nestedQuery("annotations", annotationQuery));
}
for (Map.Entry<String, String> kv : request.binaryAnnotations.entrySet()) {
// In our index template, we make sure the binaryAnnotation value is indexed as string,
// meaning non-string values won't even be indexed at all. This means that we can only
// match string values here, which happens to be exactly what we want.
BoolQueryBuilder binaryAnnotationQuery = boolQuery().must(termQuery("binaryAnnotations.key", kv.getKey())).must(termQuery("binaryAnnotations.value", kv.getValue()));
if (request.serviceName != null) {
binaryAnnotationQuery.must(termQuery("binaryAnnotations.endpoint.serviceName", request.serviceName));
}
filter.must(nestedQuery("binaryAnnotations", binaryAnnotationQuery));
}
if (request.minDuration != null) {
RangeQueryBuilder durationQuery = rangeQuery("duration").gte(request.minDuration);
if (request.maxDuration != null) {
durationQuery.lte(request.maxDuration);
}
filter.must(durationQuery);
}
Set<String> strings = indexNameFormatter.indexNamePatternsForRange(beginMillis, endMillis);
final String[] indices = strings.toArray(new String[0]);
// We need to filter to traces that contain at least one span that matches the request,
// but the zipkin API is supposed to order traces by first span, regardless of if it was
// filtered or not. This is not possible without either multiple, heavyweight queries
// or complex multiple indexing, defeating much of the elegance of using elasticsearch for this.
// So we fudge and order on the first span among the filtered spans - in practice, there should
// be no significant difference in user experience since span start times are usually very
// close to each other in human time.
ListenableFuture<List<String>> traceIds = client.collectBucketKeys(indices, boolQuery().must(matchAllQuery()).filter(filter), AggregationBuilders.terms("traceId_agg").field("traceId").subAggregation(AggregationBuilders.min("timestamps_agg").field("timestamp_millis")).order(Order.aggregation("timestamps_agg", false)).size(request.limit));
return transform(traceIds, new AsyncFunction<List<String>, List<List<Span>>>() {
@Override
public ListenableFuture<List<List<Span>>> apply(List<String> input) {
return getTracesByIds(input, indices, request);
}
});
}
use of zipkin.Span in project zipkin by openzipkin.
the class ElasticsearchHttpSpanConsumer method indexSpans.
HttpBulkSpanIndexer indexSpans(HttpBulkSpanIndexer indexer, List<Span> spans) throws IOException {
for (Span span : spans) {
Long timestamp = guessTimestamp(span);
Long timestampMillis;
// which index to store this span into
String index;
if (timestamp != null) {
timestampMillis = TimeUnit.MICROSECONDS.toMillis(timestamp);
index = indexNameFormatter.indexNameForTimestamp(timestampMillis);
} else {
timestampMillis = null;
index = indexNameFormatter.indexNameForTimestamp(System.currentTimeMillis());
}
indexer.add(index, span, timestampMillis);
}
return indexer;
}
use of zipkin.Span in project zipkin by openzipkin.
the class ElasticsearchHttpSpanStore method getTraces.
@Override
public void getTraces(QueryRequest request, Callback<List<List<Span>>> callback) {
long beginMillis = request.endTs - request.lookback;
long endMillis = request.endTs;
SearchRequest.Filters filters = new SearchRequest.Filters();
filters.addRange("timestamp_millis", beginMillis, endMillis);
if (request.serviceName != null) {
filters.addNestedTerms(asList("annotations.endpoint.serviceName", "binaryAnnotations.endpoint.serviceName"), request.serviceName);
}
if (request.spanName != null) {
filters.addTerm("name", request.spanName);
}
for (String annotation : request.annotations) {
Map<String, String> nestedTerms = new LinkedHashMap<>();
nestedTerms.put("annotations.value", annotation);
if (request.serviceName != null) {
nestedTerms.put("annotations.endpoint.serviceName", request.serviceName);
}
filters.addNestedTerms(nestedTerms);
}
for (Map.Entry<String, String> kv : request.binaryAnnotations.entrySet()) {
// In our index template, we make sure the binaryAnnotation value is indexed as string,
// meaning non-string values won't even be indexed at all. This means that we can only
// match string values here, which happens to be exactly what we want.
Map<String, String> nestedTerms = new LinkedHashMap<>();
nestedTerms.put("binaryAnnotations.key", kv.getKey());
nestedTerms.put("binaryAnnotations.value", kv.getValue());
if (request.serviceName != null) {
nestedTerms.put("binaryAnnotations.endpoint.serviceName", request.serviceName);
}
filters.addNestedTerms(nestedTerms);
}
if (request.minDuration != null) {
filters.addRange("duration", request.minDuration, request.maxDuration);
}
// We need to filter to traces that contain at least one span that matches the request,
// but the zipkin API is supposed to order traces by first span, regardless of if it was
// filtered or not. This is not possible without either multiple, heavyweight queries
// or complex multiple indexing, defeating much of the elegance of using elasticsearch for this.
// So we fudge and order on the first span among the filtered spans - in practice, there should
// be no significant difference in user experience since span start times are usually very
// close to each other in human time.
Aggregation traceIdTimestamp = Aggregation.terms("traceId", request.limit).addSubAggregation(Aggregation.min("timestamp_millis")).orderBy("timestamp_millis", "desc");
List<String> indices = indexNameFormatter.indexNamePatternsForRange(beginMillis, endMillis);
SearchRequest esRequest = SearchRequest.forIndicesAndType(indices, SPAN).filters(filters).addAggregation(traceIdTimestamp);
HttpCall<List<String>> traceIdsCall = search.newCall(esRequest, BodyConverters.SORTED_KEYS);
// When we receive span results, we need to group them by trace ID
Callback<List<Span>> successCallback = new Callback<List<Span>>() {
@Override
public void onSuccess(List<Span> input) {
List<List<Span>> traces = GroupByTraceId.apply(input, strictTraceId, true);
// Due to tokenization of the trace ID, our matches are imprecise on Span.traceIdHigh
for (Iterator<List<Span>> trace = traces.iterator(); trace.hasNext(); ) {
List<Span> next = trace.next();
if (next.get(0).traceIdHigh != 0 && !request.test(next)) {
trace.remove();
}
}
callback.onSuccess(traces);
}
@Override
public void onError(Throwable t) {
callback.onError(t);
}
};
// Fire off the query to get spans once we have trace ids
traceIdsCall.submit(new Callback<List<String>>() {
@Override
public void onSuccess(@Nullable List<String> traceIds) {
if (traceIds == null || traceIds.isEmpty()) {
callback.onSuccess(Collections.emptyList());
return;
}
SearchRequest request = SearchRequest.forIndicesAndType(indices, SPAN).terms("traceId", traceIds);
search.newCall(request, BodyConverters.SPANS).submit(successCallback);
}
@Override
public void onError(Throwable t) {
callback.onError(t);
}
});
}
use of zipkin.Span in project zipkin by openzipkin.
the class MySQLSpanStore method getTraces.
List<List<Span>> getTraces(@Nullable QueryRequest request, @Nullable Long traceIdHigh, @Nullable Long traceIdLow, boolean raw) {
if (traceIdHigh != null && !strictTraceId)
traceIdHigh = null;
final Map<Pair<Long>, List<Span>> spansWithoutAnnotations;
final Map<Row3<Long, Long, Long>, List<Record>> dbAnnotations;
try (Connection conn = datasource.getConnection()) {
Condition traceIdCondition = request != null ? schema.spanTraceIdCondition(toTraceIdQuery(context.get(conn), request)) : schema.spanTraceIdCondition(traceIdHigh, traceIdLow);
spansWithoutAnnotations = context.get(conn).select(schema.spanFields).from(ZIPKIN_SPANS).where(traceIdCondition).stream().map(r -> Span.builder().traceIdHigh(maybeGet(r, ZIPKIN_SPANS.TRACE_ID_HIGH, 0L)).traceId(r.getValue(ZIPKIN_SPANS.TRACE_ID)).name(r.getValue(ZIPKIN_SPANS.NAME)).id(r.getValue(ZIPKIN_SPANS.ID)).parentId(r.getValue(ZIPKIN_SPANS.PARENT_ID)).timestamp(r.getValue(ZIPKIN_SPANS.START_TS)).duration(r.getValue(ZIPKIN_SPANS.DURATION)).debug(r.getValue(ZIPKIN_SPANS.DEBUG)).build()).collect(groupingBy((Span s) -> Pair.create(s.traceIdHigh, s.traceId), LinkedHashMap::new, Collectors.<Span>toList()));
dbAnnotations = context.get(conn).select(schema.annotationFields).from(ZIPKIN_ANNOTATIONS).where(schema.annotationsTraceIdCondition(spansWithoutAnnotations.keySet())).orderBy(ZIPKIN_ANNOTATIONS.A_TIMESTAMP.asc(), ZIPKIN_ANNOTATIONS.A_KEY.asc()).stream().collect(groupingBy((Record a) -> row(maybeGet(a, ZIPKIN_ANNOTATIONS.TRACE_ID_HIGH, 0L), a.getValue(ZIPKIN_ANNOTATIONS.TRACE_ID), a.getValue(ZIPKIN_ANNOTATIONS.SPAN_ID)), LinkedHashMap::new, // LinkedHashMap preserves order while grouping
Collectors.<Record>toList()));
} catch (SQLException e) {
throw new RuntimeException("Error querying for " + request + ": " + e.getMessage());
}
List<Span> allSpans = new ArrayList<>(spansWithoutAnnotations.size());
for (List<Span> spans : spansWithoutAnnotations.values()) {
for (Span s : spans) {
Span.Builder span = s.toBuilder();
Row3<Long, Long, Long> key = row(s.traceIdHigh, s.traceId, s.id);
if (dbAnnotations.containsKey(key)) {
for (Record a : dbAnnotations.get(key)) {
Endpoint endpoint = endpoint(a);
int type = a.getValue(ZIPKIN_ANNOTATIONS.A_TYPE);
if (type == -1) {
span.addAnnotation(Annotation.create(a.getValue(ZIPKIN_ANNOTATIONS.A_TIMESTAMP), a.getValue(ZIPKIN_ANNOTATIONS.A_KEY), endpoint));
} else {
span.addBinaryAnnotation(BinaryAnnotation.create(a.getValue(ZIPKIN_ANNOTATIONS.A_KEY), a.getValue(ZIPKIN_ANNOTATIONS.A_VALUE), Type.fromValue(type), endpoint));
}
}
}
allSpans.add(span.build());
}
}
return GroupByTraceId.apply(allSpans, strictTraceId, !raw);
}
use of zipkin.Span in project zipkin by openzipkin.
the class JsonAdaptersTest method specialCharsInJson.
/**
* This isn't a test of what we "should" accept as a span, rather that characters that
* trip-up json don't fail in SPAN_ADAPTER.
*/
@Test
public void specialCharsInJson() throws IOException {
// service name is surrounded by control characters
Endpoint e = Endpoint.create(new String(new char[] { 0, 'a', 1 }), 0);
Span worstSpanInTheWorld = Span.builder().traceId(1L).id(1L).name(new String(new char[] { '"', '\\', '\t', '\b', '\n', '\r', '\f' })).addAnnotation(Annotation.create(1L, "
and
", e)).addBinaryAnnotation(BinaryAnnotation.create("\"foo", "Database error: ORA-00942:
and
table or view does not exist\n", e)).build();
Buffer bytes = new Buffer();
bytes.write(Codec.JSON.writeSpan(worstSpanInTheWorld));
assertThat(SPAN_ADAPTER.fromJson(bytes)).isEqualTo(worstSpanInTheWorld);
}
Aggregations