use of org.apache.kafka.common.header.internals.RecordHeaders in project kafka by apache.
the class RecordCollectorTest method shouldSendToSpecificPartition.
@Test
public void shouldSendToSpecificPartition() {
final Headers headers = new RecordHeaders(new Header[] { new RecordHeader("key", "value".getBytes()) });
collector.send(topic, "999", "0", null, 0, null, stringSerializer, stringSerializer);
collector.send(topic, "999", "0", null, 0, null, stringSerializer, stringSerializer);
collector.send(topic, "999", "0", null, 0, null, stringSerializer, stringSerializer);
collector.send(topic, "999", "0", headers, 1, null, stringSerializer, stringSerializer);
collector.send(topic, "999", "0", headers, 1, null, stringSerializer, stringSerializer);
collector.send(topic, "999", "0", headers, 2, null, stringSerializer, stringSerializer);
Map<TopicPartition, Long> offsets = collector.offsets();
assertEquals(2L, (long) offsets.get(new TopicPartition(topic, 0)));
assertEquals(1L, (long) offsets.get(new TopicPartition(topic, 1)));
assertEquals(0L, (long) offsets.get(new TopicPartition(topic, 2)));
assertEquals(6, mockProducer.history().size());
collector.send(topic, "999", "0", null, 0, null, stringSerializer, stringSerializer);
collector.send(topic, "999", "0", null, 1, null, stringSerializer, stringSerializer);
collector.send(topic, "999", "0", headers, 2, null, stringSerializer, stringSerializer);
offsets = collector.offsets();
assertEquals(3L, (long) offsets.get(new TopicPartition(topic, 0)));
assertEquals(2L, (long) offsets.get(new TopicPartition(topic, 1)));
assertEquals(1L, (long) offsets.get(new TopicPartition(topic, 2)));
assertEquals(9, mockProducer.history().size());
}
use of org.apache.kafka.common.header.internals.RecordHeaders in project kafka by apache.
the class RecordCollectorTest method shouldPassThroughRecordHeaderToSerializer.
@Test
public void shouldPassThroughRecordHeaderToSerializer() {
final CustomStringSerializer keySerializer = new CustomStringSerializer();
final CustomStringSerializer valueSerializer = new CustomStringSerializer();
keySerializer.configure(Collections.emptyMap(), true);
collector.send(topic, "3", "0", new RecordHeaders(), null, keySerializer, valueSerializer, streamPartitioner);
final List<ProducerRecord<byte[], byte[]>> recordHistory = mockProducer.history();
for (final ProducerRecord<byte[], byte[]> sentRecord : recordHistory) {
final Headers headers = sentRecord.headers();
assertEquals(2, headers.toArray().length);
assertEquals(new RecordHeader("key", "key".getBytes()), headers.lastHeader("key"));
assertEquals(new RecordHeader("value", "value".getBytes()), headers.lastHeader("value"));
}
}
use of org.apache.kafka.common.header.internals.RecordHeaders in project kafka by apache.
the class RecordQueueTest method shouldThrowStreamsExceptionWhenValueDeserializationFails.
@Test
public void shouldThrowStreamsExceptionWhenValueDeserializationFails() {
final byte[] value = Serdes.Long().serializer().serialize("foo", 1L);
final List<ConsumerRecord<byte[], byte[]>> records = Collections.singletonList(new ConsumerRecord<>("topic", 1, 1, 0L, TimestampType.CREATE_TIME, 0, 0, recordKey, value, new RecordHeaders(), Optional.empty()));
final StreamsException exception = assertThrows(StreamsException.class, () -> queue.addRawRecords(records));
assertThat(exception.getCause(), instanceOf(SerializationException.class));
}
use of org.apache.kafka.common.header.internals.RecordHeaders in project kafka by apache.
the class RecordQueueTest method shouldNotThrowStreamsExceptionWhenValueDeserializationFailsWithSkipHandler.
@Test
public void shouldNotThrowStreamsExceptionWhenValueDeserializationFailsWithSkipHandler() {
final byte[] value = Serdes.Long().serializer().serialize("foo", 1L);
final ConsumerRecord<byte[], byte[]> record = new ConsumerRecord<>("topic", 1, 1, 0L, TimestampType.CREATE_TIME, 0, 0, recordKey, value, new RecordHeaders(), Optional.empty());
final List<ConsumerRecord<byte[], byte[]>> records = Collections.singletonList(record);
queueThatSkipsDeserializeErrors.addRawRecords(records);
assertEquals(1, queueThatSkipsDeserializeErrors.size());
assertEquals(new CorruptedRecord(record), queueThatSkipsDeserializeErrors.poll());
}
use of org.apache.kafka.common.header.internals.RecordHeaders in project kafka by apache.
the class RecordQueueTest method shouldNotThrowStreamsExceptionWhenKeyDeserializationFailsWithSkipHandler.
@Test
public void shouldNotThrowStreamsExceptionWhenKeyDeserializationFailsWithSkipHandler() {
final byte[] key = Serdes.Long().serializer().serialize("foo", 1L);
final ConsumerRecord<byte[], byte[]> record = new ConsumerRecord<>("topic", 1, 1, 0L, TimestampType.CREATE_TIME, 0, 0, key, recordValue, new RecordHeaders(), Optional.empty());
final List<ConsumerRecord<byte[], byte[]>> records = Collections.singletonList(record);
queueThatSkipsDeserializeErrors.addRawRecords(records);
assertEquals(1, queueThatSkipsDeserializeErrors.size());
assertEquals(new CorruptedRecord(record), queueThatSkipsDeserializeErrors.poll());
}
Aggregations