use of org.apache.kafka.clients.admin.Admin in project kafka by apache.
the class ClientUtilsTest method fetchEndOffsetsShouldRethrowRuntimeExceptionAsStreamsException.
@Test
public void fetchEndOffsetsShouldRethrowRuntimeExceptionAsStreamsException() throws Exception {
final Admin adminClient = EasyMock.createMock(AdminClient.class);
final ListOffsetsResult result = EasyMock.createNiceMock(ListOffsetsResult.class);
final KafkaFuture<Map<TopicPartition, ListOffsetsResultInfo>> allFuture = EasyMock.createMock(KafkaFuture.class);
EasyMock.expect(adminClient.listOffsets(EasyMock.anyObject())).andStubReturn(result);
EasyMock.expect(result.all()).andStubReturn(allFuture);
EasyMock.expect(allFuture.get()).andThrow(new RuntimeException());
replay(adminClient, result, allFuture);
assertThrows(StreamsException.class, () -> fetchEndOffsets(PARTITIONS, adminClient));
verify(adminClient);
}
use of org.apache.kafka.clients.admin.Admin in project kafka by apache.
the class ClientUtilsTest method fetchEndOffsetsShouldRethrowExecutionExceptionAsStreamsException.
@Test
public void fetchEndOffsetsShouldRethrowExecutionExceptionAsStreamsException() throws Exception {
final Admin adminClient = EasyMock.createMock(AdminClient.class);
final ListOffsetsResult result = EasyMock.createNiceMock(ListOffsetsResult.class);
final KafkaFuture<Map<TopicPartition, ListOffsetsResultInfo>> allFuture = EasyMock.createMock(KafkaFuture.class);
EasyMock.expect(adminClient.listOffsets(EasyMock.anyObject())).andStubReturn(result);
EasyMock.expect(result.all()).andStubReturn(allFuture);
EasyMock.expect(allFuture.get()).andThrow(new ExecutionException(new RuntimeException()));
replay(adminClient, result, allFuture);
assertThrows(StreamsException.class, () -> fetchEndOffsets(PARTITIONS, adminClient));
verify(adminClient);
}
use of org.apache.kafka.clients.admin.Admin in project kafka by apache.
the class EosTestDriver method verify.
public static void verify(final String kafka, final boolean withRepartitioning) {
final Properties props = new Properties();
props.put(ConsumerConfig.CLIENT_ID_CONFIG, "verifier");
props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, kafka);
props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class);
props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class);
props.put(ConsumerConfig.ISOLATION_LEVEL_CONFIG, IsolationLevel.READ_COMMITTED.toString().toLowerCase(Locale.ROOT));
try (final KafkaConsumer<byte[], byte[]> consumer = new KafkaConsumer<>(props)) {
verifyAllTransactionFinished(consumer, kafka, withRepartitioning);
} catch (final Exception e) {
e.printStackTrace(System.err);
System.out.println("FAILED");
return;
}
final Map<TopicPartition, Long> committedOffsets;
try (final Admin adminClient = Admin.create(props)) {
ensureStreamsApplicationDown(adminClient);
committedOffsets = getCommittedOffsets(adminClient, withRepartitioning);
}
final String[] allInputTopics;
final String[] allOutputTopics;
if (withRepartitioning) {
allInputTopics = new String[] { "data", "repartition" };
allOutputTopics = new String[] { "echo", "min", "sum", "repartition", "max", "cnt" };
} else {
allInputTopics = new String[] { "data" };
allOutputTopics = new String[] { "echo", "min", "sum" };
}
final Map<String, Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>>> inputRecordsPerTopicPerPartition;
try (final KafkaConsumer<byte[], byte[]> consumer = new KafkaConsumer<>(props)) {
final List<TopicPartition> partitions = getAllPartitions(consumer, allInputTopics);
consumer.assign(partitions);
consumer.seekToBeginning(partitions);
inputRecordsPerTopicPerPartition = getRecords(consumer, committedOffsets, withRepartitioning, true);
} catch (final Exception e) {
e.printStackTrace(System.err);
System.out.println("FAILED");
return;
}
final Map<String, Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>>> outputRecordsPerTopicPerPartition;
try (final KafkaConsumer<byte[], byte[]> consumer = new KafkaConsumer<>(props)) {
final List<TopicPartition> partitions = getAllPartitions(consumer, allOutputTopics);
consumer.assign(partitions);
consumer.seekToBeginning(partitions);
outputRecordsPerTopicPerPartition = getRecords(consumer, consumer.endOffsets(partitions), withRepartitioning, false);
} catch (final Exception e) {
e.printStackTrace(System.err);
System.out.println("FAILED");
return;
}
verifyReceivedAllRecords(inputRecordsPerTopicPerPartition.get("data"), outputRecordsPerTopicPerPartition.get("echo"));
if (withRepartitioning) {
verifyReceivedAllRecords(inputRecordsPerTopicPerPartition.get("data"), outputRecordsPerTopicPerPartition.get("repartition"));
}
verifyMin(inputRecordsPerTopicPerPartition.get("data"), outputRecordsPerTopicPerPartition.get("min"));
verifySum(inputRecordsPerTopicPerPartition.get("data"), outputRecordsPerTopicPerPartition.get("sum"));
if (withRepartitioning) {
verifyMax(inputRecordsPerTopicPerPartition.get("repartition"), outputRecordsPerTopicPerPartition.get("max"));
verifyCnt(inputRecordsPerTopicPerPartition.get("repartition"), outputRecordsPerTopicPerPartition.get("cnt"));
}
// do not modify: required test output
System.out.println("ALL-RECORDS-DELIVERED");
System.out.flush();
}
use of org.apache.kafka.clients.admin.Admin in project druid by druid-io.
the class KafkaSupervisorTest method addSomeEvents.
private void addSomeEvents(int numEventsPerPartition) throws Exception {
// create topic manually
try (Admin admin = kafkaServer.newAdminClient()) {
admin.createTopics(Collections.singletonList(new NewTopic(topic, NUM_PARTITIONS, (short) 1))).all().get();
}
try (final KafkaProducer<byte[], byte[]> kafkaProducer = kafkaServer.newProducer()) {
kafkaProducer.initTransactions();
kafkaProducer.beginTransaction();
for (int i = 0; i < NUM_PARTITIONS; i++) {
for (int j = 0; j < numEventsPerPartition; j++) {
kafkaProducer.send(new ProducerRecord<>(topic, i, null, StringUtils.toUtf8(StringUtils.format("event-%d", j)))).get();
}
}
kafkaProducer.commitTransaction();
}
}
use of org.apache.kafka.clients.admin.Admin in project druid by druid-io.
the class KafkaSupervisorTest method addMoreEvents.
private void addMoreEvents(int numEventsPerPartition, int num_partitions) throws Exception {
try (Admin admin = kafkaServer.newAdminClient()) {
admin.createPartitions(Collections.singletonMap(topic, NewPartitions.increaseTo(num_partitions))).all().get();
}
try (final KafkaProducer<byte[], byte[]> kafkaProducer = kafkaServer.newProducer()) {
kafkaProducer.initTransactions();
kafkaProducer.beginTransaction();
for (int i = NUM_PARTITIONS; i < num_partitions; i++) {
for (int j = 0; j < numEventsPerPartition; j++) {
kafkaProducer.send(new ProducerRecord<>(topic, i, null, StringUtils.toUtf8(StringUtils.format("event-%d", j)))).get();
}
}
kafkaProducer.commitTransaction();
}
}
Aggregations