use of com.google.cloud.servicedirectory.v1.Endpoint in project wavefront-proxy by wavefrontHQ.
the class ZipkinPortUnificationHandlerTest method testZipkinPreprocessedDerivedMetrics.
/**
* Test for derived metrics emitted from Zipkin trace listeners. Derived metrics should report
* tag values post applying preprocessing rules to the span.
*/
@Test
public void testZipkinPreprocessedDerivedMetrics() throws Exception {
Supplier<ReportableEntityPreprocessor> preprocessorSupplier = () -> {
ReportableEntityPreprocessor preprocessor = new ReportableEntityPreprocessor();
PreprocessorRuleMetrics preprocessorRuleMetrics = new PreprocessorRuleMetrics(null, null, null);
preprocessor.forSpan().addTransformer(new SpanReplaceRegexTransformer(APPLICATION_TAG_KEY, "^Zipkin.*", PREPROCESSED_APPLICATION_TAG_VALUE, null, null, false, x -> true, preprocessorRuleMetrics));
preprocessor.forSpan().addTransformer(new SpanReplaceRegexTransformer(SERVICE_TAG_KEY, "^test.*", PREPROCESSED_SERVICE_TAG_VALUE, null, null, false, x -> true, preprocessorRuleMetrics));
preprocessor.forSpan().addTransformer(new SpanReplaceRegexTransformer("sourceName", "^zipkin.*", PREPROCESSED_SOURCE_VALUE, null, null, false, x -> true, preprocessorRuleMetrics));
preprocessor.forSpan().addTransformer(new SpanReplaceRegexTransformer(CLUSTER_TAG_KEY, "^none.*", PREPROCESSED_CLUSTER_TAG_VALUE, null, null, false, x -> true, preprocessorRuleMetrics));
preprocessor.forSpan().addTransformer(new SpanReplaceRegexTransformer(SHARD_TAG_KEY, "^none.*", PREPROCESSED_SHARD_TAG_VALUE, null, null, false, x -> true, preprocessorRuleMetrics));
return preprocessor;
};
ZipkinPortUnificationHandler handler = new ZipkinPortUnificationHandler("9411", new NoopHealthCheckManager(), mockTraceHandler, mockTraceSpanLogsHandler, mockWavefrontSender, () -> false, () -> false, preprocessorSupplier, new SpanSampler(new RateSampler(1.0D), () -> null), null, null);
Endpoint localEndpoint1 = Endpoint.newBuilder().serviceName("testService").ip("10.0.0.1").build();
zipkin2.Span spanServer1 = zipkin2.Span.newBuilder().traceId("2822889fe47043bd").id("2822889fe47043bd").kind(zipkin2.Span.Kind.SERVER).name("getservice").timestamp(startTime * 1000).duration(1234 * 1000).localEndpoint(localEndpoint1).build();
List<zipkin2.Span> zipkinSpanList = ImmutableList.of(spanServer1);
// Reset mock
reset(mockTraceHandler, mockWavefrontSender);
// Set Expectation
mockTraceHandler.report(Span.newBuilder().setCustomer("dummy").setStartMillis(startTime).setDuration(1234).setName("getservice").setSource(PREPROCESSED_SOURCE_VALUE).setSpanId("00000000-0000-0000-2822-889fe47043bd").setTraceId("00000000-0000-0000-2822-889fe47043bd").setAnnotations(ImmutableList.of(new Annotation("zipkinSpanId", "2822889fe47043bd"), new Annotation("zipkinTraceId", "2822889fe47043bd"), new Annotation("span.kind", "server"), new Annotation("service", PREPROCESSED_SERVICE_TAG_VALUE), new Annotation("application", PREPROCESSED_APPLICATION_TAG_VALUE), new Annotation("cluster", PREPROCESSED_CLUSTER_TAG_VALUE), new Annotation("shard", PREPROCESSED_SHARD_TAG_VALUE), new Annotation("ipv4", "10.0.0.1"))).build());
expectLastCall();
Capture<HashMap<String, String>> tagsCapture = EasyMock.newCapture();
mockWavefrontSender.sendMetric(eq(HEART_BEAT_METRIC), eq(1.0), anyLong(), eq(PREPROCESSED_SOURCE_VALUE), EasyMock.capture(tagsCapture));
expectLastCall().anyTimes();
replay(mockTraceHandler, mockWavefrontSender);
ChannelHandlerContext mockCtx = createNiceMock(ChannelHandlerContext.class);
doMockLifecycle(mockCtx);
ByteBuf content = Unpooled.copiedBuffer(SpanBytesEncoder.JSON_V2.encodeList(zipkinSpanList));
FullHttpRequest httpRequest = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.POST, "http://localhost:9411/api/v2/spans", content, true);
handler.handleHttpMessage(mockCtx, httpRequest);
handler.run();
verifyWithTimeout(500, mockTraceHandler, mockWavefrontSender);
HashMap<String, String> tagsReturned = tagsCapture.getValue();
assertEquals(PREPROCESSED_APPLICATION_TAG_VALUE, tagsReturned.get(APPLICATION_TAG_KEY));
assertEquals(PREPROCESSED_SERVICE_TAG_VALUE, tagsReturned.get(SERVICE_TAG_KEY));
assertEquals(PREPROCESSED_CLUSTER_TAG_VALUE, tagsReturned.get(CLUSTER_TAG_KEY));
assertEquals(PREPROCESSED_SHARD_TAG_VALUE, tagsReturned.get(SHARD_TAG_KEY));
}
use of com.google.cloud.servicedirectory.v1.Endpoint in project wavefront-proxy by wavefrontHQ.
the class ZipkinPortUnificationHandlerTest method testZipkinDurationSampler.
@Test
public void testZipkinDurationSampler() throws Exception {
ZipkinPortUnificationHandler handler = new ZipkinPortUnificationHandler("9411", new NoopHealthCheckManager(), mockTraceHandler, mockTraceSpanLogsHandler, null, () -> false, () -> false, null, new SpanSampler(new DurationSampler(5), () -> null), null, null);
Endpoint localEndpoint1 = Endpoint.newBuilder().serviceName("frontend").ip("10.0.0.1").build();
zipkin2.Span spanServer1 = zipkin2.Span.newBuilder().traceId("2822889fe47043bd").id("2822889fe47043bd").kind(zipkin2.Span.Kind.SERVER).name("getservice").timestamp(startTime * 1000).duration(4 * 1000).localEndpoint(localEndpoint1).putTag("http.method", "GET").putTag("http.url", "none+h1c://localhost:8881/").putTag("http.status_code", "200").addAnnotation(startTime * 1000, "start processing").build();
zipkin2.Span spanServer2 = zipkin2.Span.newBuilder().traceId("3822889fe47043bd").id("3822889fe47043bd").kind(zipkin2.Span.Kind.SERVER).name("getservice").timestamp(startTime * 1000).duration(9 * 1000).localEndpoint(localEndpoint1).putTag("http.method", "GET").putTag("http.url", "none+h1c://localhost:8881/").putTag("http.status_code", "200").addAnnotation(startTime * 1000, "start processing").build();
List<zipkin2.Span> zipkinSpanList = ImmutableList.of(spanServer1, spanServer2);
SpanBytesEncoder encoder = SpanBytesEncoder.values()[1];
ByteBuf content = Unpooled.copiedBuffer(encoder.encodeList(zipkinSpanList));
// take care of mocks.
// Reset mock
reset(mockTraceHandler, mockTraceSpanLogsHandler);
// Set Expectation
Span expectedSpan2 = Span.newBuilder().setCustomer("dummy").setStartMillis(startTime).setDuration(9).setName("getservice").setSource(DEFAULT_SOURCE).setSpanId("00000000-0000-0000-3822-889fe47043bd").setTraceId("00000000-0000-0000-3822-889fe47043bd").setAnnotations(ImmutableList.of(new Annotation("zipkinSpanId", "3822889fe47043bd"), new Annotation("zipkinTraceId", "3822889fe47043bd"), new Annotation("span.kind", "server"), new Annotation("_spanSecondaryId", "server"), new Annotation("service", "frontend"), new Annotation("http.method", "GET"), new Annotation("http.status_code", "200"), new Annotation("http.url", "none+h1c://localhost:8881/"), new Annotation("application", "Zipkin"), new Annotation("cluster", "none"), new Annotation("shard", "none"), new Annotation("ipv4", "10.0.0.1"), new Annotation("_spanLogs", "true"))).build();
mockTraceHandler.report(expectedSpan2);
expectLastCall();
mockTraceSpanLogsHandler.report(SpanLogs.newBuilder().setCustomer("default").setTraceId("00000000-0000-0000-3822-889fe47043bd").setSpanId("00000000-0000-0000-3822-889fe47043bd").setSpanSecondaryId("server").setLogs(ImmutableList.of(SpanLog.newBuilder().setTimestamp(startTime * 1000).setFields(ImmutableMap.of("annotation", "start processing")).build())).build());
expectLastCall();
replay(mockTraceHandler, mockTraceSpanLogsHandler);
ChannelHandlerContext mockCtx = createNiceMock(ChannelHandlerContext.class);
doMockLifecycle(mockCtx);
FullHttpRequest httpRequest = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.POST, "http://localhost:9411/api/v1/spans", content, true);
handler.handleHttpMessage(mockCtx, httpRequest);
verify(mockTraceHandler, mockTraceSpanLogsHandler);
}
use of com.google.cloud.servicedirectory.v1.Endpoint in project wildfly-core by wildfly.
the class RemotingConnectorAdd method performRuntime.
@Override
protected void performRuntime(OperationContext context, ModelNode operation, ModelNode model) throws OperationFailedException {
boolean useManagementEndpoint = RemotingConnectorResource.USE_MANAGEMENT_ENDPOINT.resolveModelAttribute(context, model).asBoolean();
ServiceName remotingCapability;
if (!useManagementEndpoint) {
// Use the remoting capability
// if (context.getProcessType() == ProcessType.DOMAIN_SERVER) then DomainServerCommunicationServices
// installed the "remoting subsystem" endpoint and we don't even necessarily *have to* have a remoting
// subsystem and possibly we could skip adding the requirement for its capability. But really, specifying
// not to use the management endpoint and then not configuring a remoting subsystem is a misconfiguration,
// and we should treat it as such. So, we add the requirement no matter what.
context.requireOptionalCapability(RemotingConnectorResource.REMOTING_CAPABILITY, RemotingConnectorResource.REMOTE_JMX_CAPABILITY.getName(), RemotingConnectorResource.USE_MANAGEMENT_ENDPOINT.getName());
remotingCapability = context.getCapabilityServiceName(RemotingConnectorResource.REMOTING_CAPABILITY, Endpoint.class);
} else {
remotingCapability = ManagementRemotingServices.MANAGEMENT_ENDPOINT;
}
// Read the model for the JMX subsystem to find the domain name for the resolved/expressions models (if they are exposed).
PathAddress address = PathAddress.pathAddress(operation.get(ModelDescriptionConstants.OP_ADDR));
PathAddress parentAddress = address.subAddress(0, address.size() - 1);
ModelNode jmxSubsystemModel = Resource.Tools.readModel(context.readResourceFromRoot(parentAddress, true));
String resolvedDomain = getDomainName(context, jmxSubsystemModel, CommonAttributes.RESOLVED);
String expressionsDomain = getDomainName(context, jmxSubsystemModel, CommonAttributes.EXPRESSION);
RemotingConnectorService.addService(context.getServiceTarget(), remotingCapability, resolvedDomain, expressionsDomain);
}
use of com.google.cloud.servicedirectory.v1.Endpoint in project wildfly-core by wildfly.
the class ChannelServer method create.
public static ChannelServer create(final Configuration configuration) throws IOException {
checkNotNullParam("configuration", configuration);
configuration.validate();
// Hack WFCORE-3302/REM3-303 workaround
if (firstCreate) {
firstCreate = false;
} else {
try {
// wait in case the previous socket has not closed
Thread.sleep(100);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new RuntimeException(e);
}
}
// TODO WFCORE-3302 -- Endpoint.getCurrent() should be ok
final Endpoint endpoint = Endpoint.builder().setEndpointName(configuration.getEndpointName()).build();
final NetworkServerProvider networkServerProvider = endpoint.getConnectionProviderInterface(configuration.getUriScheme(), NetworkServerProvider.class);
final SecurityDomain.Builder domainBuilder = SecurityDomain.builder();
final SimpleMapBackedSecurityRealm realm = new SimpleMapBackedSecurityRealm();
realm.setPasswordMap("bob", ClearPassword.createRaw(ClearPassword.ALGORITHM_CLEAR, "pass".toCharArray()));
domainBuilder.addRealm("default", realm).build();
domainBuilder.setDefaultRealmName("default");
domainBuilder.setPermissionMapper((permissionMappable, roles) -> PermissionVerifier.ALL);
SecurityDomain testDomain = domainBuilder.build();
SaslAuthenticationFactory saslAuthenticationFactory = SaslAuthenticationFactory.builder().setSecurityDomain(testDomain).setMechanismConfigurationSelector(mechanismInformation -> {
switch(mechanismInformation.getMechanismName()) {
case "ANONYMOUS":
case "PLAIN":
{
return MechanismConfiguration.EMPTY;
}
default:
return null;
}
}).setFactory(SaslFactories.getElytronSaslServerFactory()).build();
AcceptingChannel<StreamConnection> streamServer = networkServerProvider.createServer(configuration.getBindAddress(), OptionMap.EMPTY, saslAuthenticationFactory, null);
return new ChannelServer(endpoint, streamServer);
}
use of com.google.cloud.servicedirectory.v1.Endpoint in project wildfly-core by wildfly.
the class TestControllerUtils method create.
static TestControllerUtils create(URI uri, CallbackHandler callbackHandler) throws IOException {
final Endpoint endpoint = Endpoint.getCurrent();
final ProtocolConnectionConfiguration configuration = ProtocolConnectionConfiguration.create(endpoint, uri);
configuration.setCallbackHandler(callbackHandler);
return new TestControllerUtils(endpoint, configuration, createDefaultExecutor());
}
Aggregations