Search in sources :

Example 71 with Callback

use of org.apache.kafka.clients.producer.Callback in project storm by apache.

the class KafkaBolt method process.

@Override
protected void process(final Tuple input) {
    K key = null;
    V message = null;
    String topic = null;
    try {
        key = mapper.getKeyFromTuple(input);
        message = mapper.getMessageFromTuple(input);
        topic = topicSelector.getTopic(input);
        if (topic != null) {
            Callback callback = null;
            if (!fireAndForget && async) {
                callback = createProducerCallback(input);
            } else if (providedCallback != null) {
                callback = providedCallback;
            }
            Future<RecordMetadata> result = producer.send(new ProducerRecord<>(topic, key, message), callback);
            if (!async) {
                try {
                    result.get();
                    collector.ack(input);
                } catch (ExecutionException err) {
                    collector.reportError(err);
                    collector.fail(input);
                }
            } else if (fireAndForget) {
                collector.ack(input);
            }
        } else {
            LOG.warn("skipping key = " + key + ", topic selector returned null.");
            collector.ack(input);
        }
    } catch (Exception ex) {
        collector.reportError(ex);
        collector.fail(input);
    }
}
Also used : RecordMetadata(org.apache.kafka.clients.producer.RecordMetadata) Callback(org.apache.kafka.clients.producer.Callback) ExecutionException(java.util.concurrent.ExecutionException) ExecutionException(java.util.concurrent.ExecutionException)

Example 72 with Callback

use of org.apache.kafka.clients.producer.Callback in project kafka by apache.

the class KafkaStatusBackingStoreFormatTest method putTopicStateShouldOverridePreviousState.

@Test
public void putTopicStateShouldOverridePreviousState() {
    TopicStatus firstTopicStatus = new TopicStatus(FOO_TOPIC, new ConnectorTaskId(FOO_CONNECTOR, 0), time.milliseconds());
    time.sleep(1000);
    TopicStatus secondTopicStatus = new TopicStatus(BAR_TOPIC, new ConnectorTaskId(FOO_CONNECTOR, 0), time.milliseconds());
    String firstKey = TOPIC_STATUS_PREFIX + FOO_TOPIC + TOPIC_STATUS_SEPARATOR + FOO_CONNECTOR;
    String secondKey = TOPIC_STATUS_PREFIX + BAR_TOPIC + TOPIC_STATUS_SEPARATOR + FOO_CONNECTOR;
    ArgumentCaptor<byte[]> valueCaptor = ArgumentCaptor.forClass(byte[].class);
    doAnswer(invocation -> {
        ((Callback) invocation.getArgument(2)).onCompletion(null, null);
        ConsumerRecord<String, byte[]> statusRecord = new ConsumerRecord<>(STATUS_TOPIC, 0, 0, secondKey, valueCaptor.getValue());
        store.read(statusRecord);
        return null;
    }).when(kafkaBasedLog).send(eq(secondKey), valueCaptor.capture(), any(Callback.class));
    byte[] value = store.serializeTopicStatus(firstTopicStatus);
    ConsumerRecord<String, byte[]> statusRecord = new ConsumerRecord<>(STATUS_TOPIC, 0, 0, firstKey, value);
    store.read(statusRecord);
    store.put(secondTopicStatus);
    // check capture state
    assertEquals(secondTopicStatus, store.parseTopicStatus(valueCaptor.getValue()));
    assertEquals(firstTopicStatus, store.getTopic(FOO_CONNECTOR, FOO_TOPIC));
    assertEquals(secondTopicStatus, store.getTopic(FOO_CONNECTOR, BAR_TOPIC));
    assertEquals(new HashSet<>(Arrays.asList(firstTopicStatus, secondTopicStatus)), new HashSet<>(store.getAllTopics(FOO_CONNECTOR)));
}
Also used : Callback(org.apache.kafka.clients.producer.Callback) ConnectorTaskId(org.apache.kafka.connect.util.ConnectorTaskId) TopicStatus(org.apache.kafka.connect.runtime.TopicStatus) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) Test(org.junit.Test)

Example 73 with Callback

use of org.apache.kafka.clients.producer.Callback in project kafka by apache.

the class KafkaStatusBackingStoreFormatTest method putTopicState.

@Test
public void putTopicState() {
    TopicStatus topicStatus = new TopicStatus(FOO_TOPIC, new ConnectorTaskId(FOO_CONNECTOR, 0), time.milliseconds());
    String key = TOPIC_STATUS_PREFIX + FOO_TOPIC + TOPIC_STATUS_SEPARATOR + FOO_CONNECTOR;
    ArgumentCaptor<byte[]> valueCaptor = ArgumentCaptor.forClass(byte[].class);
    doAnswer(invocation -> {
        ((Callback) invocation.getArgument(2)).onCompletion(null, null);
        return null;
    }).when(kafkaBasedLog).send(eq(key), valueCaptor.capture(), any(Callback.class));
    store.put(topicStatus);
    // check capture state
    assertEquals(topicStatus, store.parseTopicStatus(valueCaptor.getValue()));
    // state is not visible until read back from the log
    assertNull(store.getTopic(FOO_CONNECTOR, FOO_TOPIC));
    ConsumerRecord<String, byte[]> statusRecord = new ConsumerRecord<>(STATUS_TOPIC, 0, 0, key, valueCaptor.getValue());
    store.read(statusRecord);
    assertEquals(topicStatus, store.getTopic(FOO_CONNECTOR, FOO_TOPIC));
    assertEquals(Collections.singleton(topicStatus), new HashSet<>(store.getAllTopics(FOO_CONNECTOR)));
}
Also used : Callback(org.apache.kafka.clients.producer.Callback) ConnectorTaskId(org.apache.kafka.connect.util.ConnectorTaskId) TopicStatus(org.apache.kafka.connect.runtime.TopicStatus) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) Test(org.junit.Test)

Example 74 with Callback

use of org.apache.kafka.clients.producer.Callback in project kafka by apache.

the class KafkaStatusBackingStoreFormatTest method putTopicStateNonRetriableFailure.

@Test
public void putTopicStateNonRetriableFailure() {
    TopicStatus topicStatus = new TopicStatus(FOO_TOPIC, new ConnectorTaskId(FOO_CONNECTOR, 0), time.milliseconds());
    String key = TOPIC_STATUS_PREFIX + FOO_TOPIC + TOPIC_STATUS_SEPARATOR + FOO_CONNECTOR;
    ArgumentCaptor<byte[]> valueCaptor = ArgumentCaptor.forClass(byte[].class);
    doAnswer(invocation -> {
        ((Callback) invocation.getArgument(2)).onCompletion(null, new UnknownServerException());
        return null;
    }).when(kafkaBasedLog).send(eq(key), valueCaptor.capture(), any(Callback.class));
    // the error is logged and ignored
    store.put(topicStatus);
    // check capture state
    assertEquals(topicStatus, store.parseTopicStatus(valueCaptor.getValue()));
    // state is not visible until read back from the log
    assertNull(store.getTopic(FOO_CONNECTOR, FOO_TOPIC));
}
Also used : Callback(org.apache.kafka.clients.producer.Callback) ConnectorTaskId(org.apache.kafka.connect.util.ConnectorTaskId) TopicStatus(org.apache.kafka.connect.runtime.TopicStatus) UnknownServerException(org.apache.kafka.common.errors.UnknownServerException) Test(org.junit.Test)

Example 75 with Callback

use of org.apache.kafka.clients.producer.Callback in project kafka by apache.

the class ProducerManager method publishMessage.

/**
 * Returns {@link CompletableFuture} which will complete only after publishing of the given {@code remoteLogMetadata}
 * is considered complete.
 *
 * @param remoteLogMetadata RemoteLogMetadata to be published
 * @return
 */
public CompletableFuture<RecordMetadata> publishMessage(RemoteLogMetadata remoteLogMetadata) {
    CompletableFuture<RecordMetadata> future = new CompletableFuture<>();
    TopicIdPartition topicIdPartition = remoteLogMetadata.topicIdPartition();
    int metadataPartitionNum = topicPartitioner.metadataPartition(topicIdPartition);
    log.debug("Publishing metadata message of partition:[{}] into metadata topic partition:[{}] with payload: [{}]", topicIdPartition, metadataPartitionNum, remoteLogMetadata);
    if (metadataPartitionNum >= rlmmConfig.metadataTopicPartitionsCount()) {
        // This should never occur as long as metadata partitions always remain the same.
        throw new KafkaException("Chosen partition no " + metadataPartitionNum + " must be less than the partition count: " + rlmmConfig.metadataTopicPartitionsCount());
    }
    try {
        Callback callback = new Callback() {

            @Override
            public void onCompletion(RecordMetadata metadata, Exception exception) {
                if (exception != null) {
                    future.completeExceptionally(exception);
                } else {
                    future.complete(metadata);
                }
            }
        };
        producer.send(new ProducerRecord<>(rlmmConfig.remoteLogMetadataTopicName(), metadataPartitionNum, null, serde.serialize(remoteLogMetadata)), callback);
    } catch (Exception ex) {
        future.completeExceptionally(ex);
    }
    return future;
}
Also used : RecordMetadata(org.apache.kafka.clients.producer.RecordMetadata) CompletableFuture(java.util.concurrent.CompletableFuture) Callback(org.apache.kafka.clients.producer.Callback) KafkaException(org.apache.kafka.common.KafkaException) TopicIdPartition(org.apache.kafka.common.TopicIdPartition) KafkaException(org.apache.kafka.common.KafkaException)

Aggregations

Callback (org.apache.kafka.clients.producer.Callback)81 Test (org.junit.Test)47 ProducerRecord (org.apache.kafka.clients.producer.ProducerRecord)39 RecordMetadata (org.apache.kafka.clients.producer.RecordMetadata)37 KafkaException (org.apache.kafka.common.KafkaException)21 Future (java.util.concurrent.Future)18 TimeoutException (org.apache.kafka.common.errors.TimeoutException)18 ExecutionException (java.util.concurrent.ExecutionException)15 ArrayList (java.util.ArrayList)14 List (java.util.List)13 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)13 MockProducer (org.apache.kafka.clients.producer.MockProducer)13 HashMap (java.util.HashMap)12 Properties (java.util.Properties)12 DefaultPartitioner (org.apache.kafka.clients.producer.internals.DefaultPartitioner)12 TopicPartition (org.apache.kafka.common.TopicPartition)12 Schema (org.apache.kafka.connect.data.Schema)12 Struct (org.apache.kafka.connect.data.Struct)12 KafkaProducer (org.apache.kafka.clients.producer.KafkaProducer)11 StreamsException (org.apache.kafka.streams.errors.StreamsException)11