use of org.apache.kafka.clients.producer.internals.DefaultPartitioner in project apache-kafka-on-k8s by banzaicloud.
the class RecordCollectorTest method shouldNotThrowStreamsExceptionOnCloseIfASendFailedWithContinueExceptionHandler.
@SuppressWarnings("unchecked")
@Test
public void shouldNotThrowStreamsExceptionOnCloseIfASendFailedWithContinueExceptionHandler() {
final RecordCollector collector = new RecordCollectorImpl(new MockProducer(cluster, true, new DefaultPartitioner(), byteArraySerializer, byteArraySerializer) {
@Override
public synchronized Future<RecordMetadata> send(final ProducerRecord record, final Callback callback) {
callback.onCompletion(null, new Exception());
return null;
}
}, "test", logContext, new AlwaysContinueProductionExceptionHandler());
collector.send("topic1", "3", "0", null, stringSerializer, stringSerializer, streamPartitioner);
collector.close();
}
use of org.apache.kafka.clients.producer.internals.DefaultPartitioner in project apache-kafka-on-k8s by banzaicloud.
the class RecordCollectorTest method shouldNotThrowStreamsExceptionOnSubsequentCallIfASendFailsWithContinueExceptionHandler.
@SuppressWarnings("unchecked")
@Test
public void shouldNotThrowStreamsExceptionOnSubsequentCallIfASendFailsWithContinueExceptionHandler() {
final RecordCollector collector = new RecordCollectorImpl(new MockProducer(cluster, true, new DefaultPartitioner(), byteArraySerializer, byteArraySerializer) {
@Override
public synchronized Future<RecordMetadata> send(final ProducerRecord record, final Callback callback) {
callback.onCompletion(null, new Exception());
return null;
}
}, "test", logContext, new AlwaysContinueProductionExceptionHandler());
collector.send("topic1", "3", "0", null, stringSerializer, stringSerializer, streamPartitioner);
collector.send("topic1", "3", "0", null, stringSerializer, stringSerializer, streamPartitioner);
}
use of org.apache.kafka.clients.producer.internals.DefaultPartitioner in project apache-kafka-on-k8s by banzaicloud.
the class RecordCollectorTest method testStreamPartitioner.
@Test
public void testStreamPartitioner() {
final RecordCollectorImpl collector = new RecordCollectorImpl(new MockProducer<>(cluster, true, new DefaultPartitioner(), byteArraySerializer, byteArraySerializer), "RecordCollectorTest-TestStreamPartitioner", new LogContext("RecordCollectorTest-TestStreamPartitioner "), new DefaultProductionExceptionHandler());
collector.send("topic1", "3", "0", null, stringSerializer, stringSerializer, streamPartitioner);
collector.send("topic1", "9", "0", null, stringSerializer, stringSerializer, streamPartitioner);
collector.send("topic1", "27", "0", null, stringSerializer, stringSerializer, streamPartitioner);
collector.send("topic1", "81", "0", null, stringSerializer, stringSerializer, streamPartitioner);
collector.send("topic1", "243", "0", null, stringSerializer, stringSerializer, streamPartitioner);
collector.send("topic1", "28", "0", null, stringSerializer, stringSerializer, streamPartitioner);
collector.send("topic1", "82", "0", null, stringSerializer, stringSerializer, streamPartitioner);
collector.send("topic1", "244", "0", null, stringSerializer, stringSerializer, streamPartitioner);
collector.send("topic1", "245", "0", null, stringSerializer, stringSerializer, streamPartitioner);
final Map<TopicPartition, Long> offsets = collector.offsets();
assertEquals((Long) 4L, offsets.get(new TopicPartition("topic1", 0)));
assertEquals((Long) 2L, offsets.get(new TopicPartition("topic1", 1)));
assertEquals((Long) 0L, offsets.get(new TopicPartition("topic1", 2)));
}
use of org.apache.kafka.clients.producer.internals.DefaultPartitioner in project apache-kafka-on-k8s by banzaicloud.
the class RecordCollectorTest method shouldThrowIfTopicIsUnknownWithDefaultExceptionHandler.
@SuppressWarnings("unchecked")
@Test(expected = StreamsException.class)
public void shouldThrowIfTopicIsUnknownWithDefaultExceptionHandler() {
final RecordCollector collector = new RecordCollectorImpl(new MockProducer(cluster, true, new DefaultPartitioner(), byteArraySerializer, byteArraySerializer) {
@Override
public List<PartitionInfo> partitionsFor(final String topic) {
return Collections.EMPTY_LIST;
}
}, "test", logContext, new DefaultProductionExceptionHandler());
collector.send("topic1", "3", "0", null, stringSerializer, stringSerializer, streamPartitioner);
}
use of org.apache.kafka.clients.producer.internals.DefaultPartitioner in project apache-kafka-on-k8s by banzaicloud.
the class RecordCollectorTest method testSpecificPartition.
@Test
public void testSpecificPartition() {
final RecordCollectorImpl collector = new RecordCollectorImpl(new MockProducer<>(cluster, true, new DefaultPartitioner(), byteArraySerializer, byteArraySerializer), "RecordCollectorTest-TestSpecificPartition", new LogContext("RecordCollectorTest-TestSpecificPartition "), new DefaultProductionExceptionHandler());
collector.send("topic1", "999", "0", 0, null, stringSerializer, stringSerializer);
collector.send("topic1", "999", "0", 0, null, stringSerializer, stringSerializer);
collector.send("topic1", "999", "0", 0, null, stringSerializer, stringSerializer);
collector.send("topic1", "999", "0", 1, null, stringSerializer, stringSerializer);
collector.send("topic1", "999", "0", 1, null, stringSerializer, stringSerializer);
collector.send("topic1", "999", "0", 2, null, stringSerializer, stringSerializer);
final Map<TopicPartition, Long> offsets = collector.offsets();
assertEquals((Long) 2L, offsets.get(new TopicPartition("topic1", 0)));
assertEquals((Long) 1L, offsets.get(new TopicPartition("topic1", 1)));
assertEquals((Long) 0L, offsets.get(new TopicPartition("topic1", 2)));
// ignore StreamPartitioner
collector.send("topic1", "999", "0", 0, null, stringSerializer, stringSerializer);
collector.send("topic1", "999", "0", 1, null, stringSerializer, stringSerializer);
collector.send("topic1", "999", "0", 2, null, stringSerializer, stringSerializer);
assertEquals((Long) 3L, offsets.get(new TopicPartition("topic1", 0)));
assertEquals((Long) 2L, offsets.get(new TopicPartition("topic1", 1)));
assertEquals((Long) 1L, offsets.get(new TopicPartition("topic1", 2)));
}
Aggregations