use of org.apache.beam.vendor.grpc.v1p43p2.io.grpc.Metadata in project pinpoint by naver.
the class SimpleRejectedExecutionListener method idleTimeout.
private void idleTimeout() {
logger.info("stream idle timeout applicationName:{} agentId:{} {}", serverCall.getApplicationName(), serverCall.getAgentId(), this.name);
serverCall.cancel(STREAM_IDLE_TIMEOUT, new Metadata());
}
use of org.apache.beam.vendor.grpc.v1p43p2.io.grpc.Metadata in project pinpoint by naver.
the class HeaderPropagationInterceptor method interceptCall.
@Override
public <ReqT, RespT> ServerCall.Listener<ReqT> interceptCall(final ServerCall<ReqT, RespT> call, Metadata headers, ServerCallHandler<ReqT, RespT> next) {
Header headerObject;
try {
headerObject = headerReader.extract(headers);
} catch (Exception e) {
if (logger.isInfoEnabled()) {
logger.info("Header extract fail cause={}, method={} headers={}, attr={}", e.getMessage(), call.getMethodDescriptor().getFullMethodName(), headers, call.getAttributes(), e);
}
call.close(Status.INVALID_ARGUMENT.withDescription(e.getMessage()), new Metadata());
return new ServerCall.Listener<ReqT>() {
};
}
final Context currentContext = Context.current();
final Context newContext = currentContext.withValue(contextKey, headerObject);
if (logger.isDebugEnabled()) {
logger.debug("headerPropagation method={}, headers={}, attr={}", call.getMethodDescriptor().getFullMethodName(), headers, call.getAttributes());
}
ServerCall.Listener<ReqT> contextPropagateInterceptor = Contexts.interceptCall(newContext, call, headers, next);
return contextPropagateInterceptor;
}
use of org.apache.beam.vendor.grpc.v1p43p2.io.grpc.Metadata in project beam by apache.
the class KafkaIOExternalTest method testConstructKafkaReadWithoutMetadata.
@Test
public void testConstructKafkaReadWithoutMetadata() throws Exception {
List<String> topics = ImmutableList.of("topic1", "topic2");
String keyDeserializer = "org.apache.kafka.common.serialization.ByteArrayDeserializer";
String valueDeserializer = "org.apache.kafka.common.serialization.LongDeserializer";
ImmutableMap<String, String> consumerConfig = ImmutableMap.<String, String>builder().put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "server1:port,server2:port").put("key2", "value2").put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, keyDeserializer).put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, valueDeserializer).build();
Long startReadTime = 100L;
ExternalTransforms.ExternalConfigurationPayload payload = encodeRow(Row.withSchema(Schema.of(Field.of("topics", FieldType.array(FieldType.STRING)), Field.of("consumer_config", FieldType.map(FieldType.STRING, FieldType.STRING)), Field.of("key_deserializer", FieldType.STRING), Field.of("value_deserializer", FieldType.STRING), Field.of("start_read_time", FieldType.INT64), Field.of("commit_offset_in_finalize", FieldType.BOOLEAN), Field.of("timestamp_policy", FieldType.STRING))).withFieldValue("topics", topics).withFieldValue("consumer_config", consumerConfig).withFieldValue("key_deserializer", keyDeserializer).withFieldValue("value_deserializer", valueDeserializer).withFieldValue("start_read_time", startReadTime).withFieldValue("commit_offset_in_finalize", false).withFieldValue("timestamp_policy", "ProcessingTime").build());
RunnerApi.Components defaultInstance = RunnerApi.Components.getDefaultInstance();
ExpansionApi.ExpansionRequest request = ExpansionApi.ExpansionRequest.newBuilder().setComponents(defaultInstance).setTransform(RunnerApi.PTransform.newBuilder().setUniqueName("test").setSpec(RunnerApi.FunctionSpec.newBuilder().setUrn(org.apache.beam.sdk.io.kafka.KafkaIO.Read.External.URN_WITHOUT_METADATA).setPayload(payload.toByteString()))).setNamespace("test_namespace").build();
ExpansionService expansionService = new ExpansionService();
TestStreamObserver<ExpansionApi.ExpansionResponse> observer = new TestStreamObserver<>();
expansionService.expand(request, observer);
ExpansionApi.ExpansionResponse result = observer.result;
RunnerApi.PTransform transform = result.getTransform();
assertThat(transform.getSubtransformsList(), Matchers.hasItem(MatchesPattern.matchesPattern(".*KafkaIO-Read.*")));
assertThat(transform.getSubtransformsList(), Matchers.hasItem(MatchesPattern.matchesPattern(".*Remove-Kafka-Metadata.*")));
assertThat(transform.getInputsCount(), Matchers.is(0));
assertThat(transform.getOutputsCount(), Matchers.is(1));
RunnerApi.PTransform kafkaReadComposite = result.getComponents().getTransformsOrThrow(transform.getSubtransforms(0));
result.getComponents().getTransformsOrThrow(kafkaReadComposite.getSubtransforms(0));
verifyKafkaReadComposite(result.getComponents().getTransformsOrThrow(kafkaReadComposite.getSubtransforms(0)), result);
}
use of org.apache.beam.vendor.grpc.v1p43p2.io.grpc.Metadata in project beam by apache.
the class BigQueryServicesImplTest method testRetryAttemptCounter.
@Test
public void testRetryAttemptCounter() {
BigQueryServicesImpl.StorageClientImpl.RetryAttemptCounter counter = new BigQueryServicesImpl.StorageClientImpl.RetryAttemptCounter();
RetryInfo retryInfo = RetryInfo.newBuilder().setRetryDelay(com.google.protobuf.Duration.newBuilder().setSeconds(123).setNanos(456000000).build()).build();
Metadata metadata = new Metadata();
metadata.put(Metadata.Key.of("google.rpc.retryinfo-bin", new Metadata.BinaryMarshaller<RetryInfo>() {
@Override
public byte[] toBytes(RetryInfo value) {
return value.toByteArray();
}
@Override
public RetryInfo parseBytes(byte[] serialized) {
try {
Parser<RetryInfo> parser = RetryInfo.newBuilder().build().getParserForType();
return parser.parseFrom(serialized);
} catch (Exception e) {
return null;
}
}
}), retryInfo);
MetricName metricName = MetricName.named("org.apache.beam.sdk.io.gcp.bigquery.BigQueryServicesImpl$StorageClientImpl", "throttling-msecs");
MetricsContainerImpl container = (MetricsContainerImpl) MetricsEnvironment.getCurrentContainer();
// Nulls don't bump the counter.
counter.onRetryAttempt(null, null);
assertEquals(0, (long) container.getCounter(metricName).getCumulative());
// Resource exhausted with empty metadata doesn't bump the counter.
counter.onRetryAttempt(Status.RESOURCE_EXHAUSTED.withDescription("You have consumed some quota"), new Metadata());
assertEquals(0, (long) container.getCounter(metricName).getCumulative());
// Resource exhausted with retry info bumps the counter.
counter.onRetryAttempt(Status.RESOURCE_EXHAUSTED.withDescription("Stop for a while"), metadata);
assertEquals(123456, (long) container.getCounter(metricName).getCumulative());
// Other errors with retry info doesn't bump the counter.
counter.onRetryAttempt(Status.UNAVAILABLE.withDescription("Server is gone"), metadata);
assertEquals(123456, (long) container.getCounter(metricName).getCumulative());
}
use of org.apache.beam.vendor.grpc.v1p43p2.io.grpc.Metadata in project beam by apache.
the class AddHarnessIdInterceptor method create.
public static ClientInterceptor create(String harnessId) {
checkArgument(harnessId != null, "harnessId must not be null");
Metadata md = new Metadata();
md.put(ID_KEY, harnessId);
return MetadataUtils.newAttachHeadersInterceptor(md);
}
Aggregations