Search in sources :

Example 56 with ProducerRecord

use of org.apache.kafka.clients.producer.ProducerRecord in project nakadi by zalando.

the class KafkaTopicRepository method publishItem.

private static CompletableFuture<Exception> publishItem(final Producer<String, String> producer, final String topicId, final BatchItem item, final HystrixKafkaCircuitBreaker circuitBreaker) throws EventPublishingException {
    try {
        final CompletableFuture<Exception> result = new CompletableFuture<>();
        final ProducerRecord<String, String> kafkaRecord = new ProducerRecord<>(topicId, KafkaCursor.toKafkaPartition(item.getPartition()), item.getPartition(), item.dumpEventToString());
        circuitBreaker.markStart();
        producer.send(kafkaRecord, ((metadata, exception) -> {
            if (null != exception) {
                LOG.warn("Failed to publish to kafka topic {}", topicId, exception);
                item.updateStatusAndDetail(EventPublishingStatus.FAILED, "internal error");
                if (hasKafkaConnectionException(exception)) {
                    circuitBreaker.markFailure();
                } else {
                    circuitBreaker.markSuccessfully();
                }
                result.complete(exception);
            } else {
                item.updateStatusAndDetail(EventPublishingStatus.SUBMITTED, "");
                circuitBreaker.markSuccessfully();
                result.complete(null);
            }
        }));
        return result;
    } catch (final InterruptException e) {
        Thread.currentThread().interrupt();
        circuitBreaker.markSuccessfully();
        item.updateStatusAndDetail(EventPublishingStatus.FAILED, "internal error");
        throw new EventPublishingException("Error publishing message to kafka", e);
    } catch (final RuntimeException e) {
        circuitBreaker.markSuccessfully();
        item.updateStatusAndDetail(EventPublishingStatus.FAILED, "internal error");
        throw new EventPublishingException("Error publishing message to kafka", e);
    }
}
Also used : EventPublishingException(org.zalando.nakadi.exceptions.EventPublishingException) NotLeaderForPartitionException(org.apache.kafka.common.errors.NotLeaderForPartitionException) Collections.unmodifiableList(java.util.Collections.unmodifiableList) LoggerFactory(org.slf4j.LoggerFactory) TimeoutException(java.util.concurrent.TimeoutException) TopicRepositoryException(org.zalando.nakadi.exceptions.runtime.TopicRepositoryException) PARTITION_NOT_FOUND(org.zalando.nakadi.domain.CursorError.PARTITION_NOT_FOUND) ServiceUnavailableException(org.zalando.nakadi.exceptions.ServiceUnavailableException) Map(java.util.Map) RetryForSpecifiedTimeStrategy(org.echocat.jomon.runtime.concurrent.RetryForSpecifiedTimeStrategy) Consumer(org.apache.kafka.clients.consumer.Consumer) ZooKeeperHolder(org.zalando.nakadi.repository.zookeeper.ZooKeeperHolder) TopicPartition(org.apache.kafka.common.TopicPartition) TopicRepository(org.zalando.nakadi.repository.TopicRepository) Retryer(org.echocat.jomon.runtime.concurrent.Retryer) Collection(java.util.Collection) PartitionStatistics(org.zalando.nakadi.domain.PartitionStatistics) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) Set(java.util.Set) ConfigType(kafka.server.ConfigType) PartitionInfo(org.apache.kafka.common.PartitionInfo) InvalidCursorException(org.zalando.nakadi.exceptions.InvalidCursorException) Collectors(java.util.stream.Collectors) TopicDeletionException(org.zalando.nakadi.exceptions.TopicDeletionException) Objects(java.util.Objects) ZkUtils(kafka.utils.ZkUtils) TopicExistsException(org.apache.kafka.common.errors.TopicExistsException) List(java.util.List) Stream(java.util.stream.Stream) Lists.newArrayList(com.google.common.collect.Lists.newArrayList) Timeline(org.zalando.nakadi.domain.Timeline) ZookeeperSettings(org.zalando.nakadi.repository.zookeeper.ZookeeperSettings) NULL_OFFSET(org.zalando.nakadi.domain.CursorError.NULL_OFFSET) BatchItem(org.zalando.nakadi.domain.BatchItem) Optional(java.util.Optional) UnknownTopicOrPartitionException(org.apache.kafka.common.errors.UnknownTopicOrPartitionException) AdminUtils(kafka.admin.AdminUtils) IntStream(java.util.stream.IntStream) ProducerRecord(org.apache.kafka.clients.producer.ProducerRecord) NetworkException(org.apache.kafka.common.errors.NetworkException) NakadiCursor(org.zalando.nakadi.domain.NakadiCursor) NakadiSettings(org.zalando.nakadi.config.NakadiSettings) TopicCreationException(org.zalando.nakadi.exceptions.TopicCreationException) HashMap(java.util.HashMap) CompletableFuture(java.util.concurrent.CompletableFuture) TopicConfigException(org.zalando.nakadi.exceptions.runtime.TopicConfigException) UnknownServerException(org.apache.kafka.common.errors.UnknownServerException) ArrayList(java.util.ArrayList) ConcurrentMap(java.util.concurrent.ConcurrentMap) UUIDGenerator(org.zalando.nakadi.util.UUIDGenerator) InterruptException(org.apache.kafka.common.errors.InterruptException) EventPublishingStep(org.zalando.nakadi.domain.EventPublishingStep) Nullable(javax.annotation.Nullable) UNAVAILABLE(org.zalando.nakadi.domain.CursorError.UNAVAILABLE) NULL_PARTITION(org.zalando.nakadi.domain.CursorError.NULL_PARTITION) Logger(org.slf4j.Logger) Properties(java.util.Properties) Producer(org.apache.kafka.clients.producer.Producer) PartitionEndStatistics(org.zalando.nakadi.domain.PartitionEndStatistics) ExecutionException(java.util.concurrent.ExecutionException) TimeUnit(java.util.concurrent.TimeUnit) EventConsumer(org.zalando.nakadi.repository.EventConsumer) Collectors.toList(java.util.stream.Collectors.toList) EventPublishingStatus(org.zalando.nakadi.domain.EventPublishingStatus) Preconditions(com.google.common.base.Preconditions) Collections(java.util.Collections) RackAwareMode(kafka.admin.RackAwareMode) CompletableFuture(java.util.concurrent.CompletableFuture) ProducerRecord(org.apache.kafka.clients.producer.ProducerRecord) InterruptException(org.apache.kafka.common.errors.InterruptException) EventPublishingException(org.zalando.nakadi.exceptions.EventPublishingException) EventPublishingException(org.zalando.nakadi.exceptions.EventPublishingException) NotLeaderForPartitionException(org.apache.kafka.common.errors.NotLeaderForPartitionException) TimeoutException(java.util.concurrent.TimeoutException) TopicRepositoryException(org.zalando.nakadi.exceptions.runtime.TopicRepositoryException) ServiceUnavailableException(org.zalando.nakadi.exceptions.ServiceUnavailableException) InvalidCursorException(org.zalando.nakadi.exceptions.InvalidCursorException) TopicDeletionException(org.zalando.nakadi.exceptions.TopicDeletionException) TopicExistsException(org.apache.kafka.common.errors.TopicExistsException) UnknownTopicOrPartitionException(org.apache.kafka.common.errors.UnknownTopicOrPartitionException) NetworkException(org.apache.kafka.common.errors.NetworkException) TopicCreationException(org.zalando.nakadi.exceptions.TopicCreationException) TopicConfigException(org.zalando.nakadi.exceptions.runtime.TopicConfigException) UnknownServerException(org.apache.kafka.common.errors.UnknownServerException) InterruptException(org.apache.kafka.common.errors.InterruptException) ExecutionException(java.util.concurrent.ExecutionException)

Example 57 with ProducerRecord

use of org.apache.kafka.clients.producer.ProducerRecord in project hazelcast-jet by hazelcast.

the class KafkaSinkTest method testWriteToSpecificPartitions.

@Test
public void testWriteToSpecificPartitions() throws Exception {
    String localTopic = topic;
    Pipeline p = Pipeline.create();
    p.drawFrom(Sources.<String, String>map(SOURCE_IMAP_NAME)).drainTo(KafkaSinks.kafka(properties, e -> new ProducerRecord<>(localTopic, Integer.valueOf(e.getKey()), e.getKey(), e.getValue())));
    instance.newJob(p).join();
    assertTopicContentsEventually(sourceIMap, true);
}
Also used : AbstractProcessor(com.hazelcast.jet.core.AbstractProcessor) Traverser(com.hazelcast.jet.Traverser) JetInstance(com.hazelcast.jet.JetInstance) ProducerRecord(org.apache.kafka.clients.producer.ProducerRecord) RunWith(org.junit.runner.RunWith) IMapJet(com.hazelcast.jet.IMapJet) ConsumerRecords(org.apache.kafka.clients.consumer.ConsumerRecords) HazelcastSerialClassRunner(com.hazelcast.test.HazelcastSerialClassRunner) KafkaTestSupport(com.hazelcast.jet.kafka.impl.KafkaTestSupport) Util.entry(com.hazelcast.jet.Util.entry) Map(java.util.Map) StringSerializer(org.apache.kafka.common.serialization.StringSerializer) Collections.singletonMap(java.util.Collections.singletonMap) Job(com.hazelcast.jet.Job) Before(org.junit.Before) Properties(java.util.Properties) Pipeline(com.hazelcast.jet.pipeline.Pipeline) ProcessorMetaSupplier(com.hazelcast.jet.core.ProcessorMetaSupplier) JobConfig(com.hazelcast.jet.config.JobConfig) IOException(java.io.IOException) Test(org.junit.Test) Sources(com.hazelcast.jet.pipeline.Sources) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) Entry(java.util.Map.Entry) ProcessingGuarantee(com.hazelcast.jet.config.ProcessingGuarantee) SECONDS(java.util.concurrent.TimeUnit.SECONDS) Assert.assertEquals(org.junit.Assert.assertEquals) KafkaConsumer(org.apache.kafka.clients.consumer.KafkaConsumer) ProducerRecord(org.apache.kafka.clients.producer.ProducerRecord) Pipeline(com.hazelcast.jet.pipeline.Pipeline) Test(org.junit.Test)

Example 58 with ProducerRecord

use of org.apache.kafka.clients.producer.ProducerRecord in project debezium by debezium.

the class KafkaDatabaseHistoryTest method shouldIgnoreUnparseableMessages.

@Test
public void shouldIgnoreUnparseableMessages() throws Exception {
    // Create the empty topic ...
    kafka.createTopic(topicName, 1, 1);
    // Create invalid records
    final ProducerRecord<String, String> nullRecord = new ProducerRecord<>(topicName, PARTITION_NO, null, null);
    final ProducerRecord<String, String> emptyRecord = new ProducerRecord<>(topicName, PARTITION_NO, null, "");
    final ProducerRecord<String, String> noSourceRecord = new ProducerRecord<>(topicName, PARTITION_NO, null, "{\"position\":{\"filename\":\"my-txn-file.log\",\"position\":39},\"databaseName\":\"db1\",\"ddl\":\"DROP TABLE foo;\"}");
    final ProducerRecord<String, String> noPositionRecord = new ProducerRecord<>(topicName, PARTITION_NO, null, "{\"source\":{\"server\":\"my-server\"},\"databaseName\":\"db1\",\"ddl\":\"DROP TABLE foo;\"}");
    final ProducerRecord<String, String> invalidJSONRecord1 = new ProducerRecord<>(topicName, PARTITION_NO, null, "{\"source\":{\"server\":\"my-server\"},\"position\":{\"filename\":\"my-txn-file.log\",\"position\":39},\"databaseName\":\"db1\",\"ddl\":\"DROP TABLE foo;\"");
    final ProducerRecord<String, String> invalidJSONRecord2 = new ProducerRecord<>(topicName, PARTITION_NO, null, "\"source\":{\"server\":\"my-server\"},\"position\":{\"filename\":\"my-txn-file.log\",\"position\":39},\"databaseName\":\"db1\",\"ddl\":\"DROP TABLE foo;\"}");
    final ProducerRecord<String, String> invalidSQL = new ProducerRecord<>(topicName, PARTITION_NO, null, "{\"source\":{\"server\":\"my-server\"},\"position\":{\"filename\":\"my-txn-file.log\",\"position\":39},\"databaseName\":\"db1\",\"ddl\":\"xxxDROP TABLE foo;\"}");
    final Configuration intruderConfig = Configuration.create().withDefault(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, kafka.brokerList()).withDefault(ProducerConfig.CLIENT_ID_CONFIG, "intruder").withDefault(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class).withDefault(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class).build();
    try (final KafkaProducer<String, String> producer = new KafkaProducer<>(intruderConfig.asProperties())) {
        producer.send(nullRecord).get();
        producer.send(emptyRecord).get();
        producer.send(noSourceRecord).get();
        producer.send(noPositionRecord).get();
        producer.send(invalidJSONRecord1).get();
        producer.send(invalidJSONRecord2).get();
        producer.send(invalidSQL).get();
    }
    testHistoryTopicContent(true);
}
Also used : KafkaProducer(org.apache.kafka.clients.producer.KafkaProducer) Configuration(io.debezium.config.Configuration) ProducerRecord(org.apache.kafka.clients.producer.ProducerRecord) StringSerializer(org.apache.kafka.common.serialization.StringSerializer) Test(org.junit.Test)

Example 59 with ProducerRecord

use of org.apache.kafka.clients.producer.ProducerRecord in project debezium by debezium.

the class KafkaDatabaseHistoryTest method shouldStopOnUnparseableSQL.

@Test(expected = ParsingException.class)
public void shouldStopOnUnparseableSQL() throws Exception {
    // Create the empty topic ...
    kafka.createTopic(topicName, 1, 1);
    // Create invalid records
    final ProducerRecord<String, String> invalidSQL = new ProducerRecord<>(topicName, PARTITION_NO, null, "{\"source\":{\"server\":\"my-server\"},\"position\":{\"filename\":\"my-txn-file.log\",\"position\":39},\"databaseName\":\"db1\",\"ddl\":\"xxxDROP TABLE foo;\"}");
    final Configuration intruderConfig = Configuration.create().withDefault(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, kafka.brokerList()).withDefault(ProducerConfig.CLIENT_ID_CONFIG, "intruder").withDefault(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class).withDefault(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class).build();
    try (final KafkaProducer<String, String> producer = new KafkaProducer<>(intruderConfig.asProperties())) {
        producer.send(invalidSQL).get();
    }
    testHistoryTopicContent(false);
}
Also used : KafkaProducer(org.apache.kafka.clients.producer.KafkaProducer) Configuration(io.debezium.config.Configuration) ProducerRecord(org.apache.kafka.clients.producer.ProducerRecord) StringSerializer(org.apache.kafka.common.serialization.StringSerializer) Test(org.junit.Test)

Example 60 with ProducerRecord

use of org.apache.kafka.clients.producer.ProducerRecord in project mist by snuspl.

the class KafkaSourceTest method testKafkaDataGenerator.

/**
 * Test whether TextKafkaDataGenerator fetches input stream
 * from the Kafka server and generates data correctly.
 * @throws Exception
 */
@Test(timeout = 30000L)
public void testKafkaDataGenerator() throws Exception {
    final Map<String, String> inputStream = new HashMap<>();
    inputStream.put("0", "Lorem ipsum dolor sit amet, consectetur adipiscing elit.");
    inputStream.put("1", "In in leo nec erat fringilla mattis eu non massa.");
    inputStream.put("2", "Cras quis diam suscipit, commodo enim id, pulvinar nunc.");
    final CountDownLatch dataCountDownLatch = new CountDownLatch(inputStream.size());
    final Map<String, String> result = new HashMap<>();
    // create local kafka broker
    KafkaLocalBroker kafkaLocalBroker = new KafkaLocalBroker(KAFKA_PORT, KAFKA_ADDRESS, ZK_PORT, ZK_ADDRESS);
    kafkaLocalBroker.start();
    // define kafka consumer configuration
    final HashMap<String, Object> kafkaConsumerConf = new HashMap<>();
    kafkaConsumerConf.put("bootstrap.servers", kafkaLocalBroker.getLocalhostBroker());
    kafkaConsumerConf.put("group.id", "SourceTestGroup");
    kafkaConsumerConf.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
    kafkaConsumerConf.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
    kafkaConsumerConf.put("auto.offset.reset", "earliest");
    // define kafka producer configuration
    final HashMap<String, Object> kafkaProducerConf = new HashMap<>();
    kafkaProducerConf.put("bootstrap.servers", kafkaLocalBroker.getLocalhostBroker());
    kafkaProducerConf.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
    kafkaProducerConf.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
    // create kafka source
    final KafkaDataGenerator<Integer, String> kafkaDataGenerator = new KafkaDataGenerator<>(KAFKA_TOPIC, kafkaConsumerConf, kafkaSharedResource);
    final SourceTestEventGenerator<String, String> eventGenerator = new SourceTestEventGenerator<>(result, dataCountDownLatch);
    kafkaDataGenerator.setEventGenerator(eventGenerator);
    kafkaDataGenerator.start();
    // create kafka producer
    final KafkaProducer<String, String> producer = new KafkaProducer<>(kafkaProducerConf);
    final ProducerRecord<String, String> record1 = new ProducerRecord<>(KAFKA_TOPIC, "0", inputStream.get("0"));
    final ProducerRecord<String, String> record2 = new ProducerRecord<>(KAFKA_TOPIC, "1", inputStream.get("1"));
    final ProducerRecord<String, String> record3 = new ProducerRecord<>(KAFKA_TOPIC, "2", inputStream.get("2"));
    producer.send(record1);
    producer.send(record2);
    producer.send(record3);
    producer.close();
    // wait for the consumer to receive the inputs.
    dataCountDownLatch.await();
    kafkaDataGenerator.close();
    // KafkaDataGenerator will wait until it's pollTimeout before it is closed.
    // Therefore, we need to wait a bit for KafkaDataGenerator.
    // TODO: [MIST-369] Removing sleep in the `KafkaSourceTest`
    sleep(2000);
    kafkaLocalBroker.stop();
    Assert.assertEquals(inputStream, result);
}
Also used : KafkaProducer(org.apache.kafka.clients.producer.KafkaProducer) HashMap(java.util.HashMap) CountDownLatch(java.util.concurrent.CountDownLatch) ProducerRecord(org.apache.kafka.clients.producer.ProducerRecord) Test(org.junit.Test)

Aggregations

ProducerRecord (org.apache.kafka.clients.producer.ProducerRecord)193 Test (org.junit.Test)90 KafkaProducer (org.apache.kafka.clients.producer.KafkaProducer)57 Properties (java.util.Properties)50 RecordMetadata (org.apache.kafka.clients.producer.RecordMetadata)40 ArrayList (java.util.ArrayList)39 Callback (org.apache.kafka.clients.producer.Callback)30 Future (java.util.concurrent.Future)26 TopicPartition (org.apache.kafka.common.TopicPartition)24 StringSerializer (org.apache.kafka.common.serialization.StringSerializer)21 HashMap (java.util.HashMap)20 Random (java.util.Random)19 IOException (java.io.IOException)16 ConsumerRecord (org.apache.kafka.clients.consumer.ConsumerRecord)16 KafkaConsumer (org.apache.kafka.clients.consumer.KafkaConsumer)16 KafkaException (org.apache.kafka.common.KafkaException)16 List (java.util.List)13 MockProducer (org.apache.kafka.clients.producer.MockProducer)13 DefaultPartitioner (org.apache.kafka.clients.producer.internals.DefaultPartitioner)12 StreamsException (org.apache.kafka.streams.errors.StreamsException)12