use of zipkin.storage.Callback in project zipkin by openzipkin.
the class NativeClient method bulkSpanIndexer.
@Override
protected BulkSpanIndexer bulkSpanIndexer() {
return new SpanBytesBulkSpanIndexer() {
final List<IndexRequestBuilder> indexRequests = new LinkedList<>();
final Set<String> indicesToFlush = new LinkedHashSet<>();
@Override
protected void add(String index, byte[] spanBytes) {
indexRequests.add(client.prepareIndex(index, SPAN).setSource(spanBytes));
if (flushOnWrites)
indicesToFlush.add(index);
}
// Creates a bulk request when there is more than one span to store
@Override
public void execute(final Callback<Void> callback) {
ActionListener callbackAdapter = new ActionListener() {
@Override
public void onResponse(Object input) {
callback.onSuccess(null);
}
@Override
public void onFailure(Throwable throwable) {
callback.onError(throwable);
}
};
// Conditionally create a bulk action depending on the count of index requests
ListenableActionFuture<? extends ActionResponse> future;
if (indexRequests.size() == 1) {
future = indexRequests.get(0).execute();
} else {
BulkRequestBuilder request = client.prepareBulk();
for (IndexRequestBuilder span : indexRequests) {
request.add(span);
}
future = request.execute();
}
// Unless we are in a unit test, this should always be true
if (indicesToFlush.isEmpty()) {
future.addListener(callbackAdapter);
return;
}
// If we are in a unit test, we need to flush so that we can read our writes
future.addListener(new ActionListener() {
@Override
public void onResponse(Object input) {
client.admin().indices().prepareFlush(indicesToFlush.toArray(new String[indicesToFlush.size()])).execute().addListener(callbackAdapter);
}
@Override
public void onFailure(Throwable throwable) {
callbackAdapter.onFailure(throwable);
}
});
}
};
}
use of zipkin.storage.Callback in project zipkin by openzipkin.
the class ElasticsearchHttpSpanStore method getTraces.
@Override
public void getTraces(QueryRequest request, Callback<List<List<Span>>> callback) {
long beginMillis = request.endTs - request.lookback;
long endMillis = request.endTs;
SearchRequest.Filters filters = new SearchRequest.Filters();
filters.addRange("timestamp_millis", beginMillis, endMillis);
if (request.serviceName != null) {
filters.addNestedTerms(asList("annotations.endpoint.serviceName", "binaryAnnotations.endpoint.serviceName"), request.serviceName);
}
if (request.spanName != null) {
filters.addTerm("name", request.spanName);
}
for (String annotation : request.annotations) {
Map<String, String> nestedTerms = new LinkedHashMap<>();
nestedTerms.put("annotations.value", annotation);
if (request.serviceName != null) {
nestedTerms.put("annotations.endpoint.serviceName", request.serviceName);
}
filters.addNestedTerms(nestedTerms);
}
for (Map.Entry<String, String> kv : request.binaryAnnotations.entrySet()) {
// In our index template, we make sure the binaryAnnotation value is indexed as string,
// meaning non-string values won't even be indexed at all. This means that we can only
// match string values here, which happens to be exactly what we want.
Map<String, String> nestedTerms = new LinkedHashMap<>();
nestedTerms.put("binaryAnnotations.key", kv.getKey());
nestedTerms.put("binaryAnnotations.value", kv.getValue());
if (request.serviceName != null) {
nestedTerms.put("binaryAnnotations.endpoint.serviceName", request.serviceName);
}
filters.addNestedTerms(nestedTerms);
}
if (request.minDuration != null) {
filters.addRange("duration", request.minDuration, request.maxDuration);
}
// We need to filter to traces that contain at least one span that matches the request,
// but the zipkin API is supposed to order traces by first span, regardless of if it was
// filtered or not. This is not possible without either multiple, heavyweight queries
// or complex multiple indexing, defeating much of the elegance of using elasticsearch for this.
// So we fudge and order on the first span among the filtered spans - in practice, there should
// be no significant difference in user experience since span start times are usually very
// close to each other in human time.
Aggregation traceIdTimestamp = Aggregation.terms("traceId", request.limit).addSubAggregation(Aggregation.min("timestamp_millis")).orderBy("timestamp_millis", "desc");
List<String> indices = indexNameFormatter.indexNamePatternsForRange(beginMillis, endMillis);
SearchRequest esRequest = SearchRequest.forIndicesAndType(indices, SPAN).filters(filters).addAggregation(traceIdTimestamp);
HttpCall<List<String>> traceIdsCall = search.newCall(esRequest, BodyConverters.SORTED_KEYS);
// When we receive span results, we need to group them by trace ID
Callback<List<Span>> successCallback = new Callback<List<Span>>() {
@Override
public void onSuccess(List<Span> input) {
List<List<Span>> traces = GroupByTraceId.apply(input, strictTraceId, true);
// Due to tokenization of the trace ID, our matches are imprecise on Span.traceIdHigh
for (Iterator<List<Span>> trace = traces.iterator(); trace.hasNext(); ) {
List<Span> next = trace.next();
if (next.get(0).traceIdHigh != 0 && !request.test(next)) {
trace.remove();
}
}
callback.onSuccess(traces);
}
@Override
public void onError(Throwable t) {
callback.onError(t);
}
};
// Fire off the query to get spans once we have trace ids
traceIdsCall.submit(new Callback<List<String>>() {
@Override
public void onSuccess(@Nullable List<String> traceIds) {
if (traceIds == null || traceIds.isEmpty()) {
callback.onSuccess(Collections.emptyList());
return;
}
SearchRequest request = SearchRequest.forIndicesAndType(indices, SPAN).terms("traceId", traceIds);
search.newCall(request, BodyConverters.SPANS).submit(successCallback);
}
@Override
public void onError(Throwable t) {
callback.onError(t);
}
});
}
use of zipkin.storage.Callback in project zipkin by openzipkin.
the class ZipkinDispatcher method dispatch.
@Override
public MockResponse dispatch(RecordedRequest request) {
HttpUrl url = server.url(request.getPath());
if (request.getMethod().equals("GET")) {
if (url.encodedPath().equals("/health")) {
return new MockResponse().setBody("OK\n");
} else if (url.encodedPath().equals("/api/v1/services")) {
return jsonResponse(Codec.JSON.writeStrings(store.getServiceNames()));
} else if (url.encodedPath().equals("/api/v1/spans")) {
String serviceName = url.queryParameter("serviceName");
return jsonResponse(Codec.JSON.writeStrings(store.getSpanNames(serviceName)));
} else if (url.encodedPath().equals("/api/v1/dependencies")) {
Long endTs = maybeLong(url.queryParameter("endTs"));
Long lookback = maybeLong(url.queryParameter("lookback"));
List<DependencyLink> result = store.getDependencies(endTs, lookback);
return jsonResponse(Codec.JSON.writeDependencyLinks(result));
} else if (url.encodedPath().equals("/api/v1/traces")) {
QueryRequest queryRequest = toQueryRequest(url);
return jsonResponse(Codec.JSON.writeTraces(store.getTraces(queryRequest)));
} else if (url.encodedPath().startsWith("/api/v1/trace/")) {
String traceIdHex = url.encodedPath().replace("/api/v1/trace/", "");
long traceIdHigh = traceIdHex.length() == 32 ? lowerHexToUnsignedLong(traceIdHex, 0) : 0L;
long traceIdLow = lowerHexToUnsignedLong(traceIdHex);
List<Span> trace = url.queryParameterNames().contains("raw") ? store.getRawTrace(traceIdHigh, traceIdLow) : store.getTrace(traceIdHigh, traceIdLow);
if (trace != null)
return jsonResponse(Codec.JSON.writeSpans(trace));
}
} else if (request.getMethod().equals("POST")) {
if (url.encodedPath().equals("/api/v1/spans")) {
metrics.incrementMessages();
byte[] body = request.getBody().readByteArray();
String encoding = request.getHeader("Content-Encoding");
if (encoding != null && encoding.contains("gzip")) {
try {
Buffer result = new Buffer();
GzipSource source = new GzipSource(new Buffer().write(body));
while (source.read(result, Integer.MAX_VALUE) != -1) ;
body = result.readByteArray();
} catch (IOException e) {
metrics.incrementMessagesDropped();
return new MockResponse().setResponseCode(400).setBody("Cannot gunzip spans");
}
}
String type = request.getHeader("Content-Type");
Codec codec = type != null && type.contains("/x-thrift") ? Codec.THRIFT : Codec.JSON;
final MockResponse result = new MockResponse();
consumer.acceptSpans(body, codec, new Callback<Void>() {
@Override
public void onSuccess(Void value) {
result.setResponseCode(202);
}
@Override
public void onError(Throwable t) {
String message = t.getMessage();
result.setBody(message).setResponseCode(message.startsWith("Cannot store") ? 500 : 400);
}
});
return result;
}
} else {
// unsupported method
return new MockResponse().setResponseCode(405);
}
return new MockResponse().setResponseCode(404);
}
use of zipkin.storage.Callback in project zipkin by openzipkin.
the class HttpBulkIndexer method execute.
/** Creates a bulk request when there is more than one object to store */
void execute(Callback<Void> callback) {
HttpUrl url = pipeline != null ? http.baseUrl.newBuilder("_bulk").addQueryParameter("pipeline", pipeline).build() : http.baseUrl.resolve("_bulk");
Request request = new Request.Builder().url(url).tag(tag).post(RequestBody.create(APPLICATION_JSON, body.readByteString())).build();
http.<Void>newCall(request, b -> {
if (indices.isEmpty())
return null;
ElasticsearchHttpStorage.flush(http, join(indices));
return null;
}).submit(callback);
}
Aggregations