use of org.apache.kafka.clients.producer.RecordMetadata in project brave by openzipkin.
the class TracingCallbackTest method on_completion_should_forward_then_finish_span.
@Test
public void on_completion_should_forward_then_finish_span() {
Span span = tracing.tracer().nextSpan().start();
Callback delegate = mock(Callback.class);
Callback tracingCallback = TracingCallback.create(delegate, span, currentTraceContext);
RecordMetadata md = createRecordMetadata();
tracingCallback.onCompletion(md, null);
verify(delegate).onCompletion(md, null);
assertThat(spans.get(0).finishTimestamp()).isNotZero();
}
use of org.apache.kafka.clients.producer.RecordMetadata in project hono by eclipse.
the class KafkaBasedCommandSenderTest method sendCommandAndReceiveResponse.
private void sendCommandAndReceiveResponse(final VertxTestContext ctx, final String correlationId, final Integer responseStatus, final String responsePayload, final boolean expectSuccess, final int expectedStatusCode) {
final Context context = vertx.getOrCreateContext();
final Promise<Void> onProducerRecordSentPromise = Promise.promise();
mockProducer = new MockProducer<>(true, new StringSerializer(), new BufferSerializer()) {
@Override
public synchronized java.util.concurrent.Future<RecordMetadata> send(final ProducerRecord<String, Buffer> record, final Callback callback) {
return super.send(record, (metadata, exception) -> {
callback.onCompletion(metadata, exception);
context.runOnContext(v -> {
// decouple from current execution in order to run after the "send" result handler
onProducerRecordSentPromise.complete();
});
});
}
};
final var producerFactory = CachingKafkaProducerFactory.testFactory(vertx, (n, c) -> KafkaClientUnitTestHelper.newKafkaProducer(mockProducer));
commandSender = new KafkaBasedCommandSender(vertx, consumerConfig, producerFactory, producerConfig, NoopTracerFactory.create());
final Map<String, Object> headerProperties = new HashMap<>();
headerProperties.put("appKey", "appValue");
final String command = "setVolume";
final ConsumerRecord<String, Buffer> commandResponseRecord = commandResponseRecord(tenantId, deviceId, correlationId, responseStatus, Buffer.buffer(responsePayload));
final String responseTopic = new HonoTopic(HonoTopic.Type.COMMAND_RESPONSE, tenantId).toString();
final TopicPartition responseTopicPartition = new TopicPartition(responseTopic, 0);
mockConsumer.setRebalancePartitionAssignmentAfterSubscribe(List.of(responseTopicPartition));
mockConsumer.updatePartitions(responseTopicPartition, KafkaMockConsumer.DEFAULT_NODE);
mockConsumer.updateBeginningOffsets(Map.of(responseTopicPartition, 0L));
mockConsumer.updateEndOffsets(Map.of(responseTopicPartition, 0L));
onProducerRecordSentPromise.future().onComplete(ar -> {
LOG.debug("producer record sent, add command response record to mockConsumer");
// Send a command response with the same correlation id as that of the command
mockConsumer.addRecord(commandResponseRecord);
});
// This correlation id is used for both command and its response.
commandSender.setCorrelationIdSupplier(() -> correlationId);
commandSender.setKafkaConsumerSupplier(() -> mockConsumer);
context.runOnContext(v -> {
// Send a command to the device
commandSender.sendCommand(tenantId, deviceId, command, "text/plain", Buffer.buffer("test"), headerProperties).onComplete(ar -> {
ctx.verify(() -> {
if (expectSuccess) {
// assert that send operation succeeded
assertThat(ar.succeeded()).isTrue();
// Verify the command response that has been received
final DownstreamMessage<KafkaMessageContext> response = ar.result();
assertThat(response.getDeviceId()).isEqualTo(deviceId);
assertThat(response.getStatus()).isEqualTo(responseStatus);
assertThat(response.getPayload().toString()).isEqualTo(responsePayload);
} else {
// assert that send operation failed
assertThat(ar.succeeded()).isFalse();
assertThat(ar.cause()).isInstanceOf(ServiceInvocationException.class);
assertThat(((ServiceInvocationException) ar.cause()).getErrorCode()).isEqualTo(expectedStatusCode);
assertThat(ar.cause().getMessage()).isEqualTo(responsePayload);
}
});
ctx.completeNow();
mockConsumer.close();
commandSender.stop();
});
});
}
use of org.apache.kafka.clients.producer.RecordMetadata in project kafka by apache.
the class RecordCollectorImpl method send.
@Override
public <K, V> void send(final String topic, K key, V value, Integer partition, Long timestamp, Serializer<K> keySerializer, Serializer<V> valueSerializer, StreamPartitioner<? super K, ? super V> partitioner) {
checkForException();
byte[] keyBytes = keySerializer.serialize(topic, key);
byte[] valBytes = valueSerializer.serialize(topic, value);
if (partition == null && partitioner != null) {
List<PartitionInfo> partitions = this.producer.partitionsFor(topic);
if (partitions != null && partitions.size() > 0)
partition = partitioner.partition(key, value, partitions.size());
}
ProducerRecord<byte[], byte[]> serializedRecord = new ProducerRecord<>(topic, partition, timestamp, keyBytes, valBytes);
for (int attempt = 1; attempt <= MAX_SEND_ATTEMPTS; attempt++) {
try {
this.producer.send(serializedRecord, new Callback() {
@Override
public void onCompletion(RecordMetadata metadata, Exception exception) {
if (exception == null) {
if (sendException != null) {
return;
}
TopicPartition tp = new TopicPartition(metadata.topic(), metadata.partition());
offsets.put(tp, metadata.offset());
} else {
sendException = exception;
log.error("{} Error sending record to topic {}. No more offsets will be recorded for this task and the exception will eventually be thrown", logPrefix, topic, exception);
}
}
});
return;
} catch (TimeoutException e) {
if (attempt == MAX_SEND_ATTEMPTS) {
throw new StreamsException(String.format("%s Failed to send record to topic %s after %d attempts", logPrefix, topic, attempt));
}
log.warn("{} Timeout exception caught when sending record to topic {} attempt {}", logPrefix, topic, attempt);
Utils.sleep(SEND_RETRY_BACKOFF);
}
}
}
use of org.apache.kafka.clients.producer.RecordMetadata in project kafka by apache.
the class ProducerBatch method done.
/**
* Complete the request.
*
* @param baseOffset The base offset of the messages assigned by the server
* @param logAppendTime The log append time or -1 if CreateTime is being used
* @param exception The exception that occurred (or null if the request was successful)
*/
public void done(long baseOffset, long logAppendTime, RuntimeException exception) {
log.trace("Produced messages to topic-partition {} with base offset offset {} and error: {}.", topicPartition, baseOffset, exception);
if (completed.getAndSet(true))
throw new IllegalStateException("Batch has already been completed");
// Set the future before invoking the callbacks as we rely on its state for the `onCompletion` call
produceFuture.set(baseOffset, logAppendTime, exception);
// execute callbacks
for (Thunk thunk : thunks) {
try {
if (exception == null) {
RecordMetadata metadata = thunk.future.value();
thunk.callback.onCompletion(metadata, null);
} else {
thunk.callback.onCompletion(null, exception);
}
} catch (Exception e) {
log.error("Error executing user-provided callback on message for topic-partition '{}'", topicPartition, e);
}
}
produceFuture.done();
}
use of org.apache.kafka.clients.producer.RecordMetadata in project hadoop by apache.
the class KafkaSink method putMetrics.
@Override
public void putMetrics(MetricsRecord record) {
if (producer == null) {
throw new MetricsException("Producer in KafkaSink is null!");
}
// Create the json object.
StringBuilder jsonLines = new StringBuilder();
long timestamp = record.timestamp();
Instant instant = Instant.ofEpochMilli(timestamp);
LocalDateTime ldt = LocalDateTime.ofInstant(instant, zoneId);
String date = ldt.format(dateFormat);
String time = ldt.format(timeFormat);
// Collect datapoints and populate the json object.
jsonLines.append("{\"hostname\": \"" + hostname);
jsonLines.append("\", \"timestamp\": " + timestamp);
jsonLines.append(", \"date\": \"" + date);
jsonLines.append("\",\"time\": \"" + time);
jsonLines.append("\",\"name\": \"" + record.name() + "\" ");
for (MetricsTag tag : record.tags()) {
jsonLines.append(", \"" + tag.name().toString().replaceAll("[\\p{Cc}]", "") + "\": ");
jsonLines.append(" \"" + tag.value().toString() + "\"");
}
for (AbstractMetric metric : record.metrics()) {
jsonLines.append(", \"" + metric.name().toString().replaceAll("[\\p{Cc}]", "") + "\": ");
jsonLines.append(" \"" + metric.value().toString() + "\"");
}
jsonLines.append("}");
LOG.debug("kafka message: " + jsonLines.toString());
// Create the record to be sent from the json.
ProducerRecord<Integer, byte[]> data = new ProducerRecord<Integer, byte[]>(topic, jsonLines.toString().getBytes(Charset.forName("UTF-8")));
// Send the data to the Kafka broker. Here is an example of this data:
// {"hostname": "...", "timestamp": 1436913651516,
// "date": "2015-6-14","time": "22:40:51","context": "yarn","name":
// "QueueMetrics, "running_0": "1", "running_60": "0", "running_300": "0",
// "running_1440": "0", "AppsSubmitted": "1", "AppsRunning": "1",
// "AppsPending": "0", "AppsCompleted": "0", "AppsKilled": "0",
// "AppsFailed": "0", "AllocatedMB": "134656", "AllocatedVCores": "132",
// "AllocatedContainers": "132", "AggregateContainersAllocated": "132",
// "AggregateContainersReleased": "0", "AvailableMB": "0",
// "AvailableVCores": "0", "PendingMB": "275456", "PendingVCores": "269",
// "PendingContainers": "269", "ReservedMB": "0", "ReservedVCores": "0",
// "ReservedContainers": "0", "ActiveUsers": "1", "ActiveApplications": "1"}
Future<RecordMetadata> future = producer.send(data);
jsonLines.setLength(0);
try {
future.get();
} catch (InterruptedException e) {
throw new MetricsException("Error sending data", e);
} catch (ExecutionException e) {
throw new MetricsException("Error sending data", e);
}
}
Aggregations