Search in sources :

Example 81 with ProducerRecord

use of org.apache.kafka.clients.producer.ProducerRecord in project kafka by apache.

the class WorkerSourceTaskTest method testHeadersWithCustomConverter.

@Test
public void testHeadersWithCustomConverter() throws Exception {
    StringConverter stringConverter = new StringConverter();
    TestConverterWithHeaders testConverter = new TestConverterWithHeaders();
    createWorkerTask(TargetState.STARTED, stringConverter, testConverter, stringConverter);
    List<SourceRecord> records = new ArrayList<>();
    String stringA = "Árvíztűrő tükörfúrógép";
    org.apache.kafka.connect.header.Headers headersA = new ConnectHeaders();
    String encodingA = "latin2";
    headersA.addString("encoding", encodingA);
    records.add(new SourceRecord(PARTITION, OFFSET, "topic", null, Schema.STRING_SCHEMA, "a", Schema.STRING_SCHEMA, stringA, null, headersA));
    String stringB = "Тестовое сообщение";
    org.apache.kafka.connect.header.Headers headersB = new ConnectHeaders();
    String encodingB = "koi8_r";
    headersB.addString("encoding", encodingB);
    records.add(new SourceRecord(PARTITION, OFFSET, "topic", null, Schema.STRING_SCHEMA, "b", Schema.STRING_SCHEMA, stringB, null, headersB));
    expectTopicCreation(TOPIC);
    Capture<ProducerRecord<byte[], byte[]>> sentRecordA = expectSendRecord(TOPIC, false, true, true, false, null);
    Capture<ProducerRecord<byte[], byte[]>> sentRecordB = expectSendRecord(TOPIC, false, true, true, false, null);
    PowerMock.replayAll();
    Whitebox.setInternalState(workerTask, "toSend", records);
    Whitebox.invokeMethod(workerTask, "sendRecords");
    assertEquals(ByteBuffer.wrap("a".getBytes()), ByteBuffer.wrap(sentRecordA.getValue().key()));
    assertEquals(ByteBuffer.wrap(stringA.getBytes(encodingA)), ByteBuffer.wrap(sentRecordA.getValue().value()));
    assertEquals(encodingA, new String(sentRecordA.getValue().headers().lastHeader("encoding").value()));
    assertEquals(ByteBuffer.wrap("b".getBytes()), ByteBuffer.wrap(sentRecordB.getValue().key()));
    assertEquals(ByteBuffer.wrap(stringB.getBytes(encodingB)), ByteBuffer.wrap(sentRecordB.getValue().value()));
    assertEquals(encodingB, new String(sentRecordB.getValue().headers().lastHeader("encoding").value()));
    PowerMock.verifyAll();
}
Also used : ArrayList(java.util.ArrayList) StringConverter(org.apache.kafka.connect.storage.StringConverter) SourceRecord(org.apache.kafka.connect.source.SourceRecord) ConnectHeaders(org.apache.kafka.connect.header.ConnectHeaders) ProducerRecord(org.apache.kafka.clients.producer.ProducerRecord) ThreadedTest(org.apache.kafka.connect.util.ThreadedTest) RetryWithToleranceOperatorTest(org.apache.kafka.connect.runtime.errors.RetryWithToleranceOperatorTest) ParameterizedTest(org.apache.kafka.connect.util.ParameterizedTest) Test(org.junit.Test)

Example 82 with ProducerRecord

use of org.apache.kafka.clients.producer.ProducerRecord in project kafka by apache.

the class WorkerSourceTaskTest method testSendRecordsCorruptTimestamp.

@Test
public void testSendRecordsCorruptTimestamp() throws Exception {
    final Long timestamp = -3L;
    createWorkerTask();
    List<SourceRecord> records = Collections.singletonList(new SourceRecord(PARTITION, OFFSET, "topic", null, KEY_SCHEMA, KEY, RECORD_SCHEMA, RECORD, timestamp));
    Capture<ProducerRecord<byte[], byte[]>> sent = expectSendRecordAnyTimes();
    PowerMock.replayAll();
    Whitebox.setInternalState(workerTask, "toSend", records);
    assertThrows(InvalidRecordException.class, () -> Whitebox.invokeMethod(workerTask, "sendRecords"));
    assertFalse(sent.hasCaptured());
    PowerMock.verifyAll();
}
Also used : ProducerRecord(org.apache.kafka.clients.producer.ProducerRecord) SourceRecord(org.apache.kafka.connect.source.SourceRecord) ThreadedTest(org.apache.kafka.connect.util.ThreadedTest) RetryWithToleranceOperatorTest(org.apache.kafka.connect.runtime.errors.RetryWithToleranceOperatorTest) ParameterizedTest(org.apache.kafka.connect.util.ParameterizedTest) Test(org.junit.Test)

Example 83 with ProducerRecord

use of org.apache.kafka.clients.producer.ProducerRecord in project kafka by apache.

the class RecordCollectorImpl method send.

@Override
public <K, V> void send(final String topic, final K key, final V value, final Headers headers, final Integer partition, final Long timestamp, final Serializer<K> keySerializer, final Serializer<V> valueSerializer) {
    checkForException();
    final byte[] keyBytes;
    final byte[] valBytes;
    try {
        keyBytes = keySerializer.serialize(topic, headers, key);
        valBytes = valueSerializer.serialize(topic, headers, value);
    } catch (final ClassCastException exception) {
        final String keyClass = key == null ? "unknown because key is null" : key.getClass().getName();
        final String valueClass = value == null ? "unknown because value is null" : value.getClass().getName();
        throw new StreamsException(String.format("ClassCastException while producing data to topic %s. " + "A serializer (key: %s / value: %s) is not compatible to the actual key or value type " + "(key type: %s / value type: %s). " + "Change the default Serdes in StreamConfig or provide correct Serdes via method parameters " + "(for example if using the DSL, `#to(String topic, Produced<K, V> produced)` with " + "`Produced.keySerde(WindowedSerdes.timeWindowedSerdeFrom(String.class))`).", topic, keySerializer.getClass().getName(), valueSerializer.getClass().getName(), keyClass, valueClass), exception);
    } catch (final RuntimeException exception) {
        final String errorMessage = String.format(SEND_EXCEPTION_MESSAGE, topic, taskId, exception.toString());
        throw new StreamsException(errorMessage, exception);
    }
    final ProducerRecord<byte[], byte[]> serializedRecord = new ProducerRecord<>(topic, partition, timestamp, keyBytes, valBytes, headers);
    streamsProducer.send(serializedRecord, (metadata, exception) -> {
        // if there's already an exception record, skip logging offsets or new exceptions
        if (sendException.get() != null) {
            return;
        }
        if (exception == null) {
            final TopicPartition tp = new TopicPartition(metadata.topic(), metadata.partition());
            if (metadata.offset() >= 0L) {
                offsets.put(tp, metadata.offset());
            } else {
                log.warn("Received offset={} in produce response for {}", metadata.offset(), tp);
            }
        } else {
            recordSendError(topic, exception, serializedRecord);
            // KAFKA-7510 only put message key and value in TRACE level log so we don't leak data by default
            log.trace("Failed record: (key {} value {} timestamp {}) topic=[{}] partition=[{}]", key, value, timestamp, topic, partition);
        }
    });
}
Also used : TopicPartition(org.apache.kafka.common.TopicPartition) StreamsException(org.apache.kafka.streams.errors.StreamsException) ProducerRecord(org.apache.kafka.clients.producer.ProducerRecord)

Example 84 with ProducerRecord

use of org.apache.kafka.clients.producer.ProducerRecord in project kafka by apache.

the class StateDirectoryIntegrationTest method testNotCleanUpStateDirIfNotEmpty.

@Test
public void testNotCleanUpStateDirIfNotEmpty() throws InterruptedException {
    final String uniqueTestName = safeUniqueTestName(getClass(), testName);
    // Create Topic
    final String input = uniqueTestName + "-input";
    CLUSTER.createTopic(input);
    final Properties producerConfig = mkProperties(mkMap(mkEntry(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, CLUSTER.bootstrapServers()), mkEntry(ProducerConfig.ACKS_CONFIG, "all"), mkEntry(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getCanonicalName()), mkEntry(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getCanonicalName())));
    try (final KafkaProducer<String, String> producer = new KafkaProducer<>(producerConfig, Serdes.String().serializer(), Serdes.String().serializer())) {
        // Create Test Records
        producer.send(new ProducerRecord<>(input, "a"));
        producer.send(new ProducerRecord<>(input, "b"));
        producer.send(new ProducerRecord<>(input, "c"));
        // Create Topology
        final String storeName = uniqueTestName + "-input-table";
        final StreamsBuilder builder = new StreamsBuilder();
        builder.table(input, Materialized.<String, String, KeyValueStore<Bytes, byte[]>>as(storeName).withKeySerde(Serdes.String()).withValueSerde(Serdes.String()));
        final Topology topology = builder.build();
        // State Store Directory
        final String stateDir = TestUtils.tempDirectory(uniqueTestName).getPath();
        // Create KafkaStreams instance
        final String applicationId = uniqueTestName + "-app";
        final Properties streamsConfig = mkProperties(mkMap(mkEntry(StreamsConfig.APPLICATION_ID_CONFIG, applicationId), mkEntry(StreamsConfig.STATE_DIR_CONFIG, stateDir), mkEntry(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, CLUSTER.bootstrapServers())));
        final KafkaStreams streams = new KafkaStreams(topology, streamsConfig);
        // Create StateListener
        final CountDownLatch runningLatch = new CountDownLatch(1);
        final CountDownLatch notRunningLatch = new CountDownLatch(1);
        final KafkaStreams.StateListener stateListener = (newState, oldState) -> {
            if (newState == KafkaStreams.State.RUNNING) {
                runningLatch.countDown();
            }
            if (newState == KafkaStreams.State.NOT_RUNNING) {
                notRunningLatch.countDown();
            }
        };
        streams.setStateListener(stateListener);
        // Application state directory
        final File appDir = new File(stateDir, applicationId);
        // Validate application state directory is created.
        streams.start();
        try {
            runningLatch.await(IntegrationTestUtils.DEFAULT_TIMEOUT, TimeUnit.MILLISECONDS);
        } catch (final InterruptedException e) {
            throw new RuntimeException("Streams didn't start in time.", e);
        }
        // State directory exists
        assertTrue((new File(stateDir)).exists());
        // Application state directory Exists
        assertTrue(appDir.exists());
        try {
            assertTrue((new File(appDir, "dummy")).createNewFile());
        } catch (final IOException e) {
            throw new RuntimeException("Failed to create dummy file.", e);
        }
        // Validate StateStore directory is deleted.
        streams.close();
        try {
            notRunningLatch.await(IntegrationTestUtils.DEFAULT_TIMEOUT, TimeUnit.MILLISECONDS);
        } catch (final InterruptedException e) {
            throw new RuntimeException("Streams didn't cleaned up in time.", e);
        }
        streams.cleanUp();
        // Root state store exists
        assertTrue((new File(stateDir)).exists());
        // Application state store exists
        assertTrue(appDir.exists());
    } finally {
        CLUSTER.deleteAllTopicsAndWait(0L);
    }
}
Also used : KafkaProducer(org.apache.kafka.clients.producer.KafkaProducer) StreamsConfig(org.apache.kafka.streams.StreamsConfig) Arrays(java.util.Arrays) ProducerRecord(org.apache.kafka.clients.producer.ProducerRecord) BeforeClass(org.junit.BeforeClass) IntegrationTest(org.apache.kafka.test.IntegrationTest) Utils.mkProperties(org.apache.kafka.common.utils.Utils.mkProperties) KafkaProducer(org.apache.kafka.clients.producer.KafkaProducer) Utils.mkMap(org.apache.kafka.common.utils.Utils.mkMap) IntegrationTestUtils.safeUniqueTestName(org.apache.kafka.streams.integration.utils.IntegrationTestUtils.safeUniqueTestName) EmbeddedKafkaCluster(org.apache.kafka.streams.integration.utils.EmbeddedKafkaCluster) TestName(org.junit.rules.TestName) KeyValueStore(org.apache.kafka.streams.state.KeyValueStore) Serdes(org.apache.kafka.common.serialization.Serdes) StringSerializer(org.apache.kafka.common.serialization.StringSerializer) ProducerConfig(org.apache.kafka.clients.producer.ProducerConfig) StreamsBuilder(org.apache.kafka.streams.StreamsBuilder) AfterClass(org.junit.AfterClass) Properties(java.util.Properties) TestUtils(org.apache.kafka.test.TestUtils) Assert.assertTrue(org.junit.Assert.assertTrue) IOException(java.io.IOException) Test(org.junit.Test) Category(org.junit.experimental.categories.Category) File(java.io.File) Bytes(org.apache.kafka.common.utils.Bytes) TimeUnit(java.util.concurrent.TimeUnit) CountDownLatch(java.util.concurrent.CountDownLatch) IntegrationTestUtils(org.apache.kafka.streams.integration.utils.IntegrationTestUtils) Rule(org.junit.Rule) Utils.mkEntry(org.apache.kafka.common.utils.Utils.mkEntry) Materialized(org.apache.kafka.streams.kstream.Materialized) KafkaStreams(org.apache.kafka.streams.KafkaStreams) Topology(org.apache.kafka.streams.Topology) KafkaStreams(org.apache.kafka.streams.KafkaStreams) Topology(org.apache.kafka.streams.Topology) IOException(java.io.IOException) Utils.mkProperties(org.apache.kafka.common.utils.Utils.mkProperties) Properties(java.util.Properties) CountDownLatch(java.util.concurrent.CountDownLatch) StreamsBuilder(org.apache.kafka.streams.StreamsBuilder) Bytes(org.apache.kafka.common.utils.Bytes) StringSerializer(org.apache.kafka.common.serialization.StringSerializer) File(java.io.File) IntegrationTest(org.apache.kafka.test.IntegrationTest) Test(org.junit.Test)

Example 85 with ProducerRecord

use of org.apache.kafka.clients.producer.ProducerRecord in project kafka by apache.

the class StateDirectoryIntegrationTest method testCleanUpStateDirIfEmpty.

@Test
public void testCleanUpStateDirIfEmpty() throws InterruptedException {
    final String uniqueTestName = safeUniqueTestName(getClass(), testName);
    // Create Topic
    final String input = uniqueTestName + "-input";
    CLUSTER.createTopic(input);
    final Properties producerConfig = mkProperties(mkMap(mkEntry(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, CLUSTER.bootstrapServers()), mkEntry(ProducerConfig.ACKS_CONFIG, "all"), mkEntry(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getCanonicalName()), mkEntry(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getCanonicalName())));
    try (final KafkaProducer<String, String> producer = new KafkaProducer<>(producerConfig, Serdes.String().serializer(), Serdes.String().serializer())) {
        // Create Test Records
        producer.send(new ProducerRecord<>(input, "a"));
        producer.send(new ProducerRecord<>(input, "b"));
        producer.send(new ProducerRecord<>(input, "c"));
        // Create Topology
        final String storeName = uniqueTestName + "-input-table";
        final StreamsBuilder builder = new StreamsBuilder();
        builder.table(input, Materialized.<String, String, KeyValueStore<Bytes, byte[]>>as(storeName).withKeySerde(Serdes.String()).withValueSerde(Serdes.String()));
        final Topology topology = builder.build();
        // State Store Directory
        final String stateDir = TestUtils.tempDirectory(uniqueTestName).getPath();
        // Create KafkaStreams instance
        final String applicationId = uniqueTestName + "-app";
        final Properties streamsConfig = mkProperties(mkMap(mkEntry(StreamsConfig.APPLICATION_ID_CONFIG, applicationId), mkEntry(StreamsConfig.STATE_DIR_CONFIG, stateDir), mkEntry(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, CLUSTER.bootstrapServers())));
        final KafkaStreams streams = new KafkaStreams(topology, streamsConfig);
        // Create StateListener
        final CountDownLatch runningLatch = new CountDownLatch(1);
        final CountDownLatch notRunningLatch = new CountDownLatch(1);
        final KafkaStreams.StateListener stateListener = (newState, oldState) -> {
            if (newState == KafkaStreams.State.RUNNING) {
                runningLatch.countDown();
            }
            if (newState == KafkaStreams.State.NOT_RUNNING) {
                notRunningLatch.countDown();
            }
        };
        streams.setStateListener(stateListener);
        // Application state directory
        final File appDir = new File(stateDir, applicationId);
        // Validate application state directory is created.
        streams.start();
        try {
            runningLatch.await(IntegrationTestUtils.DEFAULT_TIMEOUT, TimeUnit.MILLISECONDS);
        } catch (final InterruptedException e) {
            throw new RuntimeException("Streams didn't start in time.", e);
        }
        // State directory exists
        assertTrue((new File(stateDir)).exists());
        // Application state directory Exists
        assertTrue(appDir.exists());
        // Validate StateStore directory is deleted.
        streams.close();
        try {
            notRunningLatch.await(IntegrationTestUtils.DEFAULT_TIMEOUT, TimeUnit.MILLISECONDS);
        } catch (final InterruptedException e) {
            throw new RuntimeException("Streams didn't cleaned up in time.", e);
        }
        streams.cleanUp();
        // Root state store exists
        assertTrue((new File(stateDir)).exists());
        // case 1: the state directory is cleaned up without any problems.
        // case 2: The state directory is not cleaned up, for it does not include any checkpoint file.
        // case 3: The state directory is not cleaned up, for it includes a checkpoint file but it is empty.
        assertTrue(appDir.exists() || Arrays.stream(appDir.listFiles()).filter((File f) -> f.isDirectory() && f.listFiles().length > 0 && !(new File(f, ".checkpoint")).exists()).findFirst().isPresent() || Arrays.stream(appDir.listFiles()).filter((File f) -> f.isDirectory() && (new File(f, ".checkpoint")).length() == 0L).findFirst().isPresent());
    } finally {
        CLUSTER.deleteAllTopicsAndWait(0L);
    }
}
Also used : KafkaProducer(org.apache.kafka.clients.producer.KafkaProducer) StreamsConfig(org.apache.kafka.streams.StreamsConfig) Arrays(java.util.Arrays) ProducerRecord(org.apache.kafka.clients.producer.ProducerRecord) BeforeClass(org.junit.BeforeClass) IntegrationTest(org.apache.kafka.test.IntegrationTest) Utils.mkProperties(org.apache.kafka.common.utils.Utils.mkProperties) KafkaProducer(org.apache.kafka.clients.producer.KafkaProducer) Utils.mkMap(org.apache.kafka.common.utils.Utils.mkMap) IntegrationTestUtils.safeUniqueTestName(org.apache.kafka.streams.integration.utils.IntegrationTestUtils.safeUniqueTestName) EmbeddedKafkaCluster(org.apache.kafka.streams.integration.utils.EmbeddedKafkaCluster) TestName(org.junit.rules.TestName) KeyValueStore(org.apache.kafka.streams.state.KeyValueStore) Serdes(org.apache.kafka.common.serialization.Serdes) StringSerializer(org.apache.kafka.common.serialization.StringSerializer) ProducerConfig(org.apache.kafka.clients.producer.ProducerConfig) StreamsBuilder(org.apache.kafka.streams.StreamsBuilder) AfterClass(org.junit.AfterClass) Properties(java.util.Properties) TestUtils(org.apache.kafka.test.TestUtils) Assert.assertTrue(org.junit.Assert.assertTrue) IOException(java.io.IOException) Test(org.junit.Test) Category(org.junit.experimental.categories.Category) File(java.io.File) Bytes(org.apache.kafka.common.utils.Bytes) TimeUnit(java.util.concurrent.TimeUnit) CountDownLatch(java.util.concurrent.CountDownLatch) IntegrationTestUtils(org.apache.kafka.streams.integration.utils.IntegrationTestUtils) Rule(org.junit.Rule) Utils.mkEntry(org.apache.kafka.common.utils.Utils.mkEntry) Materialized(org.apache.kafka.streams.kstream.Materialized) KafkaStreams(org.apache.kafka.streams.KafkaStreams) Topology(org.apache.kafka.streams.Topology) KafkaStreams(org.apache.kafka.streams.KafkaStreams) Topology(org.apache.kafka.streams.Topology) Utils.mkProperties(org.apache.kafka.common.utils.Utils.mkProperties) Properties(java.util.Properties) CountDownLatch(java.util.concurrent.CountDownLatch) StreamsBuilder(org.apache.kafka.streams.StreamsBuilder) Bytes(org.apache.kafka.common.utils.Bytes) StringSerializer(org.apache.kafka.common.serialization.StringSerializer) File(java.io.File) IntegrationTest(org.apache.kafka.test.IntegrationTest) Test(org.junit.Test)

Aggregations

ProducerRecord (org.apache.kafka.clients.producer.ProducerRecord)193 Test (org.junit.Test)90 KafkaProducer (org.apache.kafka.clients.producer.KafkaProducer)57 Properties (java.util.Properties)50 RecordMetadata (org.apache.kafka.clients.producer.RecordMetadata)40 ArrayList (java.util.ArrayList)39 Callback (org.apache.kafka.clients.producer.Callback)30 Future (java.util.concurrent.Future)26 TopicPartition (org.apache.kafka.common.TopicPartition)24 StringSerializer (org.apache.kafka.common.serialization.StringSerializer)21 HashMap (java.util.HashMap)20 Random (java.util.Random)19 IOException (java.io.IOException)16 ConsumerRecord (org.apache.kafka.clients.consumer.ConsumerRecord)16 KafkaConsumer (org.apache.kafka.clients.consumer.KafkaConsumer)16 KafkaException (org.apache.kafka.common.KafkaException)16 List (java.util.List)13 MockProducer (org.apache.kafka.clients.producer.MockProducer)13 DefaultPartitioner (org.apache.kafka.clients.producer.internals.DefaultPartitioner)12 StreamsException (org.apache.kafka.streams.errors.StreamsException)12