use of com.amazon.dataprepper.model.trace.Span in project data-prepper by opensearch-project.
the class PeerForwarderTest method testSingleRemoteIpForwardRequestEncodeError.
@Test
public void testSingleRemoteIpForwardRequestEncodeError() throws NoSuchFieldException, IllegalAccessException, DecoderException, UnsupportedEncodingException {
final List<String> testIps = generateTestIps(2);
final String peerIp = testIps.get(1);
when(peerClientPool.getClient(peerIp)).thenReturn(client);
MetricsTestUtil.initMetrics();
final PeerForwarder testPeerForwarder = generatePeerForwarder(testIps, 3);
when(oTelProtoEncoder.convertToResourceSpans(any(Span.class))).thenThrow(new DecoderException());
reflectivelySetEncoder(testPeerForwarder, oTelProtoEncoder);
final List<Record<Object>> exportedRecords = testPeerForwarder.doExecute(TEST_SPANS_B.stream().map(span -> new Record<Object>(span)).collect(Collectors.toList()));
verifyNoInteractions(client);
Assert.assertEquals(3, exportedRecords.size());
final List<Span> exportedSpans = exportedRecords.stream().map(record -> (Span) record.getData()).collect(Collectors.toList());
assertTrue(exportedSpans.containsAll(TEST_SPANS_B));
assertTrue(TEST_SPANS_B.containsAll(exportedSpans));
}
use of com.amazon.dataprepper.model.trace.Span in project data-prepper by opensearch-project.
the class PeerForwarderTest method testSingleRemoteIpForwardRequestFutureError.
@Test
public void testSingleRemoteIpForwardRequestFutureError() throws ExecutionException, InterruptedException {
try (final MockedStatic<CompletableFuture> completableFutureMockedStatic = mockStatic(CompletableFuture.class)) {
completableFutureMockedStatic.when(() -> CompletableFuture.supplyAsync(ArgumentMatchers.<Supplier<ExportTraceServiceRequest>>any(), any(ExecutorService.class))).thenReturn(completableFuture);
when(completableFuture.get()).thenThrow(new InterruptedException());
final List<String> testIps = generateTestIps(2);
final Channel channel = mock(Channel.class);
final String peerIp = testIps.get(1);
final String fullPeerIp = String.format("%s:21890", peerIp);
when(channel.authority()).thenReturn(fullPeerIp);
when(peerClientPool.getClient(peerIp)).thenReturn(client);
when(client.getChannel()).thenReturn(channel);
MetricsTestUtil.initMetrics();
final PeerForwarder testPeerForwarder = generatePeerForwarder(testIps, 3);
final List<Record<Object>> exportedRecords = testPeerForwarder.doExecute(TEST_SPANS_B.stream().map(span -> new Record<Object>(span)).collect(Collectors.toList()));
verify(completableFuture, times(1)).get();
Assert.assertEquals(3, exportedRecords.size());
final List<Span> exportedSpans = exportedRecords.stream().map(record -> (Span) record.getData()).collect(Collectors.toList());
assertTrue(exportedSpans.containsAll(TEST_SPANS_B));
assertTrue(TEST_SPANS_B.containsAll(exportedSpans));
}
}
use of com.amazon.dataprepper.model.trace.Span in project data-prepper by opensearch-project.
the class PeerForwarderTest method testSingleRemoteIpBothLocalAndForwardedRequestWithEventRecordData.
@Test
public void testSingleRemoteIpBothLocalAndForwardedRequestWithEventRecordData() throws DecoderException {
final List<String> testIps = generateTestIps(2);
final Channel channel = mock(Channel.class);
final String peerIp = testIps.get(1);
when(channel.authority()).thenReturn(String.format("%s:21890", peerIp));
when(peerClientPool.getClient(peerIp)).thenReturn(client);
when(client.getChannel()).thenReturn(channel);
final Map<String, List<ExportTraceServiceRequest>> requestsByIp = testIps.stream().collect(Collectors.toMap(ip -> ip, ip -> new ArrayList<>()));
doAnswer(invocation -> {
final ExportTraceServiceRequest exportTraceServiceRequest = invocation.getArgument(0);
requestsByIp.get(peerIp).add(exportTraceServiceRequest);
return null;
}).when(client).export(any(ExportTraceServiceRequest.class));
MetricsTestUtil.initMetrics();
final PeerForwarder testPeerForwarder = generatePeerForwarder(testIps, 3);
final List<Record<Object>> exportedRecords = testPeerForwarder.doExecute(TEST_SPANS_ALL.stream().map(span -> new Record<Object>(span)).collect(Collectors.toList()));
final List<Span> expectedLocalSpans = Arrays.asList(SPAN_1, SPAN_2, SPAN_3);
Assert.assertEquals(3, exportedRecords.size());
final List<Span> localSpans = exportedRecords.stream().map(record -> (Span) record.getData()).collect(Collectors.toList());
assertTrue(localSpans.containsAll(expectedLocalSpans));
assertTrue(expectedLocalSpans.containsAll(localSpans));
Assert.assertEquals(1, requestsByIp.get(peerIp).size());
final ExportTraceServiceRequest forwardedRequest = requestsByIp.get(peerIp).get(0);
final List<ResourceSpans> forwardedResourceSpans = forwardedRequest.getResourceSpansList();
assertEquals(3, forwardedResourceSpans.size());
forwardedResourceSpans.forEach(rs -> {
assertEquals(TEST_SERVICE_B, extractServiceName(rs));
assertEquals(1, rs.getInstrumentationLibrarySpansCount());
final InstrumentationLibrarySpans ils = rs.getInstrumentationLibrarySpans(0);
assertEquals(1, ils.getSpansCount());
final io.opentelemetry.proto.trace.v1.Span sp = ils.getSpans(0);
assertEquals(TEST_TRACE_ID_2, Hex.encodeHexString(sp.getTraceId().toByteArray()));
});
}
use of com.amazon.dataprepper.model.trace.Span in project data-prepper by opensearch-project.
the class ServiceMapStatefulPrepperTest method testPrepareForShutdownWithEventRecordData.
@Test
public void testPrepareForShutdownWithEventRecordData() {
final File path = new File(ServiceMapPrepperConfig.DEFAULT_DB_PATH);
final ServiceMapStatefulPrepper serviceMapStateful = new ServiceMapStatefulPrepper(100, path, Clock.systemUTC(), 1, PLUGIN_SETTING);
final byte[] rootSpanId1Bytes = ServiceMapTestUtils.getRandomBytes(8);
final byte[] traceId1Bytes = ServiceMapTestUtils.getRandomBytes(16);
final String rootSpanId1 = Hex.encodeHexString(rootSpanId1Bytes);
final String traceId1 = Hex.encodeHexString(traceId1Bytes);
final String traceGroup1 = "reset_password";
final Span frontendSpans1 = ServiceMapTestUtils.getSpan(FRONTEND_SERVICE, traceGroup1, rootSpanId1, "", traceId1, io.opentelemetry.proto.trace.v1.Span.SpanKind.SPAN_KIND_CLIENT);
final Span authenticationSpansServer = ServiceMapTestUtils.getSpan(AUTHENTICATION_SERVICE, "reset", Hex.encodeHexString(ServiceMapTestUtils.getRandomBytes(8)), frontendSpans1.getSpanId(), traceId1, io.opentelemetry.proto.trace.v1.Span.SpanKind.SPAN_KIND_SERVER);
serviceMapStateful.execute(Arrays.asList(new Record<>(frontendSpans1), new Record<>(authenticationSpansServer)));
assertFalse(serviceMapStateful.isReadyForShutdown());
serviceMapStateful.prepareForShutdown();
serviceMapStateful.execute(Collections.emptyList());
assertTrue(serviceMapStateful.isReadyForShutdown());
serviceMapStateful.shutdown();
}
use of com.amazon.dataprepper.model.trace.Span in project data-prepper by opensearch-project.
the class OTelTraceRawProcessor method getTracesToFlushByGarbageCollection.
/**
* Periodically flush spans from memory. Typically all spans of a trace are written
* once the trace's root span arrives, however some child spans my arrive after the root span.
* This method ensures "orphaned" child spans are eventually flushed from memory.
* @return List of RawSpans to be sent down the pipeline
*/
private List<Span> getTracesToFlushByGarbageCollection() {
final List<Span> recordsToFlush = new LinkedList<>();
if (shouldGarbageCollect()) {
final boolean isLockAcquired = traceFlushLock.tryLock();
if (isLockAcquired) {
try {
final long now = System.currentTimeMillis();
lastTraceFlushTime = now;
final Iterator<Map.Entry<String, SpanSet>> entryIterator = traceIdSpanSetMap.entrySet().iterator();
while (entryIterator.hasNext()) {
final Map.Entry<String, SpanSet> entry = entryIterator.next();
final String traceId = entry.getKey();
final TraceGroup traceGroup = traceIdTraceGroupCache.getIfPresent(traceId);
final SpanSet spanSet = entry.getValue();
final long traceTime = spanSet.getTimeSeen();
if (now - traceTime >= traceFlushInterval || isShuttingDown) {
final Set<Span> spans = spanSet.getSpans();
if (traceGroup != null) {
spans.forEach(span -> {
fillInTraceGroupInfo(span, traceGroup);
recordsToFlush.add(span);
});
} else {
spans.forEach(span -> {
recordsToFlush.add(span);
LOG.warn("Missing trace group for SpanId: {}", span.getSpanId());
});
}
entryIterator.remove();
}
}
if (recordsToFlush.size() > 0) {
LOG.info("Flushing {} records due to GC", recordsToFlush.size());
}
} finally {
traceFlushLock.unlock();
}
}
}
return recordsToFlush;
}
Aggregations