use of org.apache.kafka.clients.producer.ProducerRecord in project kafka by apache.
the class WorkerSourceTaskTest method testHeadersWithCustomConverter.
@Test
public void testHeadersWithCustomConverter() throws Exception {
StringConverter stringConverter = new StringConverter();
TestConverterWithHeaders testConverter = new TestConverterWithHeaders();
createWorkerTask(TargetState.STARTED, stringConverter, testConverter, stringConverter);
List<SourceRecord> records = new ArrayList<>();
String stringA = "Árvíztűrő tükörfúrógép";
org.apache.kafka.connect.header.Headers headersA = new ConnectHeaders();
String encodingA = "latin2";
headersA.addString("encoding", encodingA);
records.add(new SourceRecord(PARTITION, OFFSET, "topic", null, Schema.STRING_SCHEMA, "a", Schema.STRING_SCHEMA, stringA, null, headersA));
String stringB = "Тестовое сообщение";
org.apache.kafka.connect.header.Headers headersB = new ConnectHeaders();
String encodingB = "koi8_r";
headersB.addString("encoding", encodingB);
records.add(new SourceRecord(PARTITION, OFFSET, "topic", null, Schema.STRING_SCHEMA, "b", Schema.STRING_SCHEMA, stringB, null, headersB));
expectTopicCreation(TOPIC);
Capture<ProducerRecord<byte[], byte[]>> sentRecordA = expectSendRecord(TOPIC, false, true, true, false, null);
Capture<ProducerRecord<byte[], byte[]>> sentRecordB = expectSendRecord(TOPIC, false, true, true, false, null);
PowerMock.replayAll();
Whitebox.setInternalState(workerTask, "toSend", records);
Whitebox.invokeMethod(workerTask, "sendRecords");
assertEquals(ByteBuffer.wrap("a".getBytes()), ByteBuffer.wrap(sentRecordA.getValue().key()));
assertEquals(ByteBuffer.wrap(stringA.getBytes(encodingA)), ByteBuffer.wrap(sentRecordA.getValue().value()));
assertEquals(encodingA, new String(sentRecordA.getValue().headers().lastHeader("encoding").value()));
assertEquals(ByteBuffer.wrap("b".getBytes()), ByteBuffer.wrap(sentRecordB.getValue().key()));
assertEquals(ByteBuffer.wrap(stringB.getBytes(encodingB)), ByteBuffer.wrap(sentRecordB.getValue().value()));
assertEquals(encodingB, new String(sentRecordB.getValue().headers().lastHeader("encoding").value()));
PowerMock.verifyAll();
}
use of org.apache.kafka.clients.producer.ProducerRecord in project kafka by apache.
the class WorkerSourceTaskTest method testSendRecordsCorruptTimestamp.
@Test
public void testSendRecordsCorruptTimestamp() throws Exception {
final Long timestamp = -3L;
createWorkerTask();
List<SourceRecord> records = Collections.singletonList(new SourceRecord(PARTITION, OFFSET, "topic", null, KEY_SCHEMA, KEY, RECORD_SCHEMA, RECORD, timestamp));
Capture<ProducerRecord<byte[], byte[]>> sent = expectSendRecordAnyTimes();
PowerMock.replayAll();
Whitebox.setInternalState(workerTask, "toSend", records);
assertThrows(InvalidRecordException.class, () -> Whitebox.invokeMethod(workerTask, "sendRecords"));
assertFalse(sent.hasCaptured());
PowerMock.verifyAll();
}
use of org.apache.kafka.clients.producer.ProducerRecord in project kafka by apache.
the class RecordCollectorImpl method send.
@Override
public <K, V> void send(final String topic, final K key, final V value, final Headers headers, final Integer partition, final Long timestamp, final Serializer<K> keySerializer, final Serializer<V> valueSerializer) {
checkForException();
final byte[] keyBytes;
final byte[] valBytes;
try {
keyBytes = keySerializer.serialize(topic, headers, key);
valBytes = valueSerializer.serialize(topic, headers, value);
} catch (final ClassCastException exception) {
final String keyClass = key == null ? "unknown because key is null" : key.getClass().getName();
final String valueClass = value == null ? "unknown because value is null" : value.getClass().getName();
throw new StreamsException(String.format("ClassCastException while producing data to topic %s. " + "A serializer (key: %s / value: %s) is not compatible to the actual key or value type " + "(key type: %s / value type: %s). " + "Change the default Serdes in StreamConfig or provide correct Serdes via method parameters " + "(for example if using the DSL, `#to(String topic, Produced<K, V> produced)` with " + "`Produced.keySerde(WindowedSerdes.timeWindowedSerdeFrom(String.class))`).", topic, keySerializer.getClass().getName(), valueSerializer.getClass().getName(), keyClass, valueClass), exception);
} catch (final RuntimeException exception) {
final String errorMessage = String.format(SEND_EXCEPTION_MESSAGE, topic, taskId, exception.toString());
throw new StreamsException(errorMessage, exception);
}
final ProducerRecord<byte[], byte[]> serializedRecord = new ProducerRecord<>(topic, partition, timestamp, keyBytes, valBytes, headers);
streamsProducer.send(serializedRecord, (metadata, exception) -> {
// if there's already an exception record, skip logging offsets or new exceptions
if (sendException.get() != null) {
return;
}
if (exception == null) {
final TopicPartition tp = new TopicPartition(metadata.topic(), metadata.partition());
if (metadata.offset() >= 0L) {
offsets.put(tp, metadata.offset());
} else {
log.warn("Received offset={} in produce response for {}", metadata.offset(), tp);
}
} else {
recordSendError(topic, exception, serializedRecord);
// KAFKA-7510 only put message key and value in TRACE level log so we don't leak data by default
log.trace("Failed record: (key {} value {} timestamp {}) topic=[{}] partition=[{}]", key, value, timestamp, topic, partition);
}
});
}
use of org.apache.kafka.clients.producer.ProducerRecord in project kafka by apache.
the class StateDirectoryIntegrationTest method testNotCleanUpStateDirIfNotEmpty.
@Test
public void testNotCleanUpStateDirIfNotEmpty() throws InterruptedException {
final String uniqueTestName = safeUniqueTestName(getClass(), testName);
// Create Topic
final String input = uniqueTestName + "-input";
CLUSTER.createTopic(input);
final Properties producerConfig = mkProperties(mkMap(mkEntry(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, CLUSTER.bootstrapServers()), mkEntry(ProducerConfig.ACKS_CONFIG, "all"), mkEntry(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getCanonicalName()), mkEntry(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getCanonicalName())));
try (final KafkaProducer<String, String> producer = new KafkaProducer<>(producerConfig, Serdes.String().serializer(), Serdes.String().serializer())) {
// Create Test Records
producer.send(new ProducerRecord<>(input, "a"));
producer.send(new ProducerRecord<>(input, "b"));
producer.send(new ProducerRecord<>(input, "c"));
// Create Topology
final String storeName = uniqueTestName + "-input-table";
final StreamsBuilder builder = new StreamsBuilder();
builder.table(input, Materialized.<String, String, KeyValueStore<Bytes, byte[]>>as(storeName).withKeySerde(Serdes.String()).withValueSerde(Serdes.String()));
final Topology topology = builder.build();
// State Store Directory
final String stateDir = TestUtils.tempDirectory(uniqueTestName).getPath();
// Create KafkaStreams instance
final String applicationId = uniqueTestName + "-app";
final Properties streamsConfig = mkProperties(mkMap(mkEntry(StreamsConfig.APPLICATION_ID_CONFIG, applicationId), mkEntry(StreamsConfig.STATE_DIR_CONFIG, stateDir), mkEntry(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, CLUSTER.bootstrapServers())));
final KafkaStreams streams = new KafkaStreams(topology, streamsConfig);
// Create StateListener
final CountDownLatch runningLatch = new CountDownLatch(1);
final CountDownLatch notRunningLatch = new CountDownLatch(1);
final KafkaStreams.StateListener stateListener = (newState, oldState) -> {
if (newState == KafkaStreams.State.RUNNING) {
runningLatch.countDown();
}
if (newState == KafkaStreams.State.NOT_RUNNING) {
notRunningLatch.countDown();
}
};
streams.setStateListener(stateListener);
// Application state directory
final File appDir = new File(stateDir, applicationId);
// Validate application state directory is created.
streams.start();
try {
runningLatch.await(IntegrationTestUtils.DEFAULT_TIMEOUT, TimeUnit.MILLISECONDS);
} catch (final InterruptedException e) {
throw new RuntimeException("Streams didn't start in time.", e);
}
// State directory exists
assertTrue((new File(stateDir)).exists());
// Application state directory Exists
assertTrue(appDir.exists());
try {
assertTrue((new File(appDir, "dummy")).createNewFile());
} catch (final IOException e) {
throw new RuntimeException("Failed to create dummy file.", e);
}
// Validate StateStore directory is deleted.
streams.close();
try {
notRunningLatch.await(IntegrationTestUtils.DEFAULT_TIMEOUT, TimeUnit.MILLISECONDS);
} catch (final InterruptedException e) {
throw new RuntimeException("Streams didn't cleaned up in time.", e);
}
streams.cleanUp();
// Root state store exists
assertTrue((new File(stateDir)).exists());
// Application state store exists
assertTrue(appDir.exists());
} finally {
CLUSTER.deleteAllTopicsAndWait(0L);
}
}
use of org.apache.kafka.clients.producer.ProducerRecord in project kafka by apache.
the class StateDirectoryIntegrationTest method testCleanUpStateDirIfEmpty.
@Test
public void testCleanUpStateDirIfEmpty() throws InterruptedException {
final String uniqueTestName = safeUniqueTestName(getClass(), testName);
// Create Topic
final String input = uniqueTestName + "-input";
CLUSTER.createTopic(input);
final Properties producerConfig = mkProperties(mkMap(mkEntry(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, CLUSTER.bootstrapServers()), mkEntry(ProducerConfig.ACKS_CONFIG, "all"), mkEntry(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getCanonicalName()), mkEntry(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getCanonicalName())));
try (final KafkaProducer<String, String> producer = new KafkaProducer<>(producerConfig, Serdes.String().serializer(), Serdes.String().serializer())) {
// Create Test Records
producer.send(new ProducerRecord<>(input, "a"));
producer.send(new ProducerRecord<>(input, "b"));
producer.send(new ProducerRecord<>(input, "c"));
// Create Topology
final String storeName = uniqueTestName + "-input-table";
final StreamsBuilder builder = new StreamsBuilder();
builder.table(input, Materialized.<String, String, KeyValueStore<Bytes, byte[]>>as(storeName).withKeySerde(Serdes.String()).withValueSerde(Serdes.String()));
final Topology topology = builder.build();
// State Store Directory
final String stateDir = TestUtils.tempDirectory(uniqueTestName).getPath();
// Create KafkaStreams instance
final String applicationId = uniqueTestName + "-app";
final Properties streamsConfig = mkProperties(mkMap(mkEntry(StreamsConfig.APPLICATION_ID_CONFIG, applicationId), mkEntry(StreamsConfig.STATE_DIR_CONFIG, stateDir), mkEntry(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, CLUSTER.bootstrapServers())));
final KafkaStreams streams = new KafkaStreams(topology, streamsConfig);
// Create StateListener
final CountDownLatch runningLatch = new CountDownLatch(1);
final CountDownLatch notRunningLatch = new CountDownLatch(1);
final KafkaStreams.StateListener stateListener = (newState, oldState) -> {
if (newState == KafkaStreams.State.RUNNING) {
runningLatch.countDown();
}
if (newState == KafkaStreams.State.NOT_RUNNING) {
notRunningLatch.countDown();
}
};
streams.setStateListener(stateListener);
// Application state directory
final File appDir = new File(stateDir, applicationId);
// Validate application state directory is created.
streams.start();
try {
runningLatch.await(IntegrationTestUtils.DEFAULT_TIMEOUT, TimeUnit.MILLISECONDS);
} catch (final InterruptedException e) {
throw new RuntimeException("Streams didn't start in time.", e);
}
// State directory exists
assertTrue((new File(stateDir)).exists());
// Application state directory Exists
assertTrue(appDir.exists());
// Validate StateStore directory is deleted.
streams.close();
try {
notRunningLatch.await(IntegrationTestUtils.DEFAULT_TIMEOUT, TimeUnit.MILLISECONDS);
} catch (final InterruptedException e) {
throw new RuntimeException("Streams didn't cleaned up in time.", e);
}
streams.cleanUp();
// Root state store exists
assertTrue((new File(stateDir)).exists());
// case 1: the state directory is cleaned up without any problems.
// case 2: The state directory is not cleaned up, for it does not include any checkpoint file.
// case 3: The state directory is not cleaned up, for it includes a checkpoint file but it is empty.
assertTrue(appDir.exists() || Arrays.stream(appDir.listFiles()).filter((File f) -> f.isDirectory() && f.listFiles().length > 0 && !(new File(f, ".checkpoint")).exists()).findFirst().isPresent() || Arrays.stream(appDir.listFiles()).filter((File f) -> f.isDirectory() && (new File(f, ".checkpoint")).length() == 0L).findFirst().isPresent());
} finally {
CLUSTER.deleteAllTopicsAndWait(0L);
}
}
Aggregations