use of zipkin.Endpoint in project zipkin by openzipkin.
the class ZipkinServerTest method readsBackSpanName.
@Test
public void readsBackSpanName() throws Exception {
String service = "web";
Endpoint endpoint = Endpoint.create(service, 127 << 24 | 1, 80);
Annotation ann = Annotation.create(System.currentTimeMillis() * 1000, SERVER_RECV, endpoint);
Span span = Span.builder().id(1L).traceId(1L).name("get").addAnnotation(ann).build();
// write the span to the server
performAsync(post("/api/v1/spans").content(Codec.JSON.writeSpans(asList(span)))).andExpect(status().isAccepted());
// sleep as the the storage operation is async
Thread.sleep(1000);
// read back the span name, given its service
mockMvc.perform(get("/api/v1/spans?serviceName=" + service)).andExpect(status().isOk()).andExpect(content().string("[\"" + span.name + "\"]"));
}
use of zipkin.Endpoint in project zipkin by openzipkin.
the class CassandraSpanStoreTest method searchingByAnnotationShouldFilterBeforeLimiting.
@Test
public void searchingByAnnotationShouldFilterBeforeLimiting() {
long now = System.currentTimeMillis();
int queryLimit = 2;
Endpoint endpoint = TestObjects.LOTS_OF_SPANS[0].annotations.get(0).endpoint;
BinaryAnnotation ba = BinaryAnnotation.create("host.name", "host1", endpoint);
int nbTraceFetched = queryLimit * storage.indexFetchMultiplier;
IntStream.range(0, nbTraceFetched).forEach(i -> accept(TestObjects.LOTS_OF_SPANS[i++].toBuilder().timestamp(now - (i * 1000)).build()));
// Add two traces with the binary annotation we're looking for
IntStream.range(nbTraceFetched, nbTraceFetched + 2).forEach(i -> accept(TestObjects.LOTS_OF_SPANS[i++].toBuilder().timestamp(now - (i * 1000)).addBinaryAnnotation(ba).build()));
QueryRequest queryRequest = QueryRequest.builder().addBinaryAnnotation(ba.key, new String(ba.value, Util.UTF_8)).serviceName(endpoint.serviceName).limit(queryLimit).build();
assertThat(store().getTraces(queryRequest)).hasSize(queryLimit);
}
use of zipkin.Endpoint in project zipkin by openzipkin.
the class CassandraSpanStoreTest method searchingByAnnotationShouldFilterBeforeLimiting.
@Test
public void searchingByAnnotationShouldFilterBeforeLimiting() {
long now = System.currentTimeMillis();
int queryLimit = 2;
Endpoint endpoint = TestObjects.LOTS_OF_SPANS[0].annotations.get(0).endpoint;
BinaryAnnotation ba = BinaryAnnotation.create("host.name", "host1", endpoint);
int nbTraceFetched = queryLimit * storage.indexFetchMultiplier;
IntStream.range(0, nbTraceFetched).forEach(i -> accept(TestObjects.LOTS_OF_SPANS[i++].toBuilder().timestamp(now - (i * 1000)).build()));
// Add two traces with the binary annotation we're looking for
IntStream.range(nbTraceFetched, nbTraceFetched + 2).forEach(i -> accept(TestObjects.LOTS_OF_SPANS[i++].toBuilder().timestamp(now - (i * 1000)).addBinaryAnnotation(ba).build()));
QueryRequest queryRequest = QueryRequest.builder().addBinaryAnnotation(ba.key, new String(ba.value, Util.UTF_8)).serviceName(endpoint.serviceName).limit(queryLimit).build();
assertThat(store().getTraces(queryRequest)).hasSize(queryLimit);
}
use of zipkin.Endpoint in project zipkin by openzipkin.
the class DependenciesTest method noEmptyLinks.
/** Some use empty string for the {@link Constants#CLIENT_ADDR} to defer naming to the server. */
@Test
public void noEmptyLinks() {
Endpoint someClient = Endpoint.create("", 172 << 24 | 17 << 16 | 4);
List<Span> trace = asList(Span.builder().traceId(20L).id(20L).name("get").timestamp(TODAY * 1000).duration(350L * 1000).addBinaryAnnotation(BinaryAnnotation.address(CLIENT_ADDR, someClient)).addBinaryAnnotation(BinaryAnnotation.address(SERVER_ADDR, WEB_ENDPOINT)).build(), Span.builder().traceId(20L).parentId(20L).id(21L).name("get").timestamp((TODAY + 50) * 1000).duration(250L * 1000).addBinaryAnnotation(BinaryAnnotation.address(CLIENT_ADDR, WEB_ENDPOINT)).addBinaryAnnotation(BinaryAnnotation.address(SERVER_ADDR, APP_ENDPOINT)).build(), Span.builder().traceId(20L).parentId(21L).id(22L).name("get").timestamp((TODAY + 150) * 1000).duration(50L * 1000).addBinaryAnnotation(BinaryAnnotation.address(CLIENT_ADDR, APP_ENDPOINT)).addBinaryAnnotation(BinaryAnnotation.address(SERVER_ADDR, DB_ENDPOINT)).build());
processDependencies(trace);
assertThat(store().getDependencies(TODAY + 1000, null)).containsOnly(DependencyLink.create("web", "app", 1), DependencyLink.create("app", "db", 1));
}
use of zipkin.Endpoint in project zipkin by openzipkin.
the class SpanStoreTest method correctsClockSkew.
/**
* Basic clock skew correction is something span stores should support, until the UI supports
* happens-before without using timestamps. The easiest clock skew to correct is where a child
* appears to happen before the parent.
*
* <p>It doesn't matter if clock-skew correction happens at store or query time, as long as it
* occurs by the time results are returned.
*
* <p>Span stores who don't support this can override and disable this test, noting in the README
* the limitation.
*/
@Test
public void correctsClockSkew() {
Endpoint client = Endpoint.create("client", 192 << 24 | 168 << 16 | 1);
Endpoint frontend = Endpoint.create("frontend", 192 << 24 | 168 << 16 | 2);
Endpoint backend = Endpoint.create("backend", 192 << 24 | 168 << 16 | 3);
/** Intentionally not setting span.timestamp, duration */
Span parent = Span.builder().traceId(1).name("method1").id(666).addAnnotation(Annotation.create((today + 100) * 1000, CLIENT_SEND, client)).addAnnotation(// before client sends
Annotation.create((today + 95) * 1000, SERVER_RECV, frontend)).addAnnotation(// before client receives
Annotation.create((today + 120) * 1000, SERVER_SEND, frontend)).addAnnotation(Annotation.create((today + 135) * 1000, CLIENT_RECV, client)).build();
/** Intentionally not setting span.timestamp, duration */
Span remoteChild = Span.builder().traceId(1).name("method2").id(777).parentId(666L).addAnnotation(Annotation.create((today + 100) * 1000, CLIENT_SEND, frontend)).addAnnotation(Annotation.create((today + 115) * 1000, SERVER_RECV, backend)).addAnnotation(Annotation.create((today + 120) * 1000, SERVER_SEND, backend)).addAnnotation(// before server sent
Annotation.create((today + 115) * 1000, CLIENT_RECV, frontend)).build();
/** Local spans must explicitly set timestamp */
Span localChild = Span.builder().traceId(1).name("local").id(778).parentId(666L).timestamp((today + 101) * 1000).duration(50L).addBinaryAnnotation(BinaryAnnotation.create(LOCAL_COMPONENT, "framey", frontend)).build();
List<Span> skewed = asList(parent, remoteChild, localChild);
// There's clock skew when the child doesn't happen after the parent
assertThat(skewed.get(0).annotations.get(0).timestamp).isLessThanOrEqualTo(skewed.get(1).annotations.get(0).timestamp).isLessThanOrEqualTo(// local span
skewed.get(2).timestamp);
// Regardless of when clock skew is corrected, it should be corrected before traces return
accept(parent, remoteChild, localChild);
List<Span> adjusted = store().getTrace(localChild.traceIdHigh, localChild.traceId);
// After correction, the child happens after the parent
assertThat(adjusted.get(0).timestamp).isLessThanOrEqualTo(adjusted.get(0).timestamp);
// After correction, children happen after their parent
assertThat(adjusted.get(0).timestamp).isLessThanOrEqualTo(adjusted.get(1).timestamp).isLessThanOrEqualTo(adjusted.get(2).timestamp);
// And we do not change the parent (client) duration, due to skew in the child (server)
assertThat(adjusted.get(0).duration).isEqualTo(clientDuration(skewed.get(0)));
assertThat(adjusted.get(1).duration).isEqualTo(clientDuration(skewed.get(1)));
assertThat(adjusted.get(2).duration).isEqualTo(skewed.get(2).duration);
}
Aggregations