use of org.apache.kafka.clients.consumer.ConsumerRecord in project apache-kafka-on-k8s by banzaicloud.
the class KafkaOffsetBackingStoreTest method testGetSet.
@Test
public void testGetSet() throws Exception {
expectConfigure();
expectStart(Collections.EMPTY_LIST);
expectStop();
// First get() against an empty store
final Capture<Callback<Void>> firstGetReadToEndCallback = EasyMock.newCapture();
storeLog.readToEnd(EasyMock.capture(firstGetReadToEndCallback));
PowerMock.expectLastCall().andAnswer(new IAnswer<Object>() {
@Override
public Object answer() throws Throwable {
firstGetReadToEndCallback.getValue().onCompletion(null, null);
return null;
}
});
// Set offsets
Capture<org.apache.kafka.clients.producer.Callback> callback0 = EasyMock.newCapture();
storeLog.send(EasyMock.aryEq(TP0_KEY.array()), EasyMock.aryEq(TP0_VALUE.array()), EasyMock.capture(callback0));
PowerMock.expectLastCall();
Capture<org.apache.kafka.clients.producer.Callback> callback1 = EasyMock.newCapture();
storeLog.send(EasyMock.aryEq(TP1_KEY.array()), EasyMock.aryEq(TP1_VALUE.array()), EasyMock.capture(callback1));
PowerMock.expectLastCall();
// Second get() should get the produced data and return the new values
final Capture<Callback<Void>> secondGetReadToEndCallback = EasyMock.newCapture();
storeLog.readToEnd(EasyMock.capture(secondGetReadToEndCallback));
PowerMock.expectLastCall().andAnswer(new IAnswer<Object>() {
@Override
public Object answer() throws Throwable {
capturedConsumedCallback.getValue().onCompletion(null, new ConsumerRecord<>(TOPIC, 0, 0, 0L, TimestampType.CREATE_TIME, 0L, 0, 0, TP0_KEY.array(), TP0_VALUE.array()));
capturedConsumedCallback.getValue().onCompletion(null, new ConsumerRecord<>(TOPIC, 1, 0, 0L, TimestampType.CREATE_TIME, 0L, 0, 0, TP1_KEY.array(), TP1_VALUE.array()));
secondGetReadToEndCallback.getValue().onCompletion(null, null);
return null;
}
});
// Third get() should pick up data produced by someone else and return those values
final Capture<Callback<Void>> thirdGetReadToEndCallback = EasyMock.newCapture();
storeLog.readToEnd(EasyMock.capture(thirdGetReadToEndCallback));
PowerMock.expectLastCall().andAnswer(new IAnswer<Object>() {
@Override
public Object answer() throws Throwable {
capturedConsumedCallback.getValue().onCompletion(null, new ConsumerRecord<>(TOPIC, 0, 1, 0L, TimestampType.CREATE_TIME, 0L, 0, 0, TP0_KEY.array(), TP0_VALUE_NEW.array()));
capturedConsumedCallback.getValue().onCompletion(null, new ConsumerRecord<>(TOPIC, 1, 1, 0L, TimestampType.CREATE_TIME, 0L, 0, 0, TP1_KEY.array(), TP1_VALUE_NEW.array()));
thirdGetReadToEndCallback.getValue().onCompletion(null, null);
return null;
}
});
PowerMock.replayAll();
store.configure(DEFAULT_DISTRIBUTED_CONFIG);
store.start();
// Getting from empty store should return nulls
final AtomicBoolean getInvokedAndPassed = new AtomicBoolean(false);
store.get(Arrays.asList(TP0_KEY, TP1_KEY), new Callback<Map<ByteBuffer, ByteBuffer>>() {
@Override
public void onCompletion(Throwable error, Map<ByteBuffer, ByteBuffer> result) {
// Since we didn't read them yet, these will be null
assertEquals(null, result.get(TP0_KEY));
assertEquals(null, result.get(TP1_KEY));
getInvokedAndPassed.set(true);
}
}).get(10000, TimeUnit.MILLISECONDS);
assertTrue(getInvokedAndPassed.get());
// Set some offsets
Map<ByteBuffer, ByteBuffer> toSet = new HashMap<>();
toSet.put(TP0_KEY, TP0_VALUE);
toSet.put(TP1_KEY, TP1_VALUE);
final AtomicBoolean invoked = new AtomicBoolean(false);
Future<Void> setFuture = store.set(toSet, new Callback<Void>() {
@Override
public void onCompletion(Throwable error, Void result) {
invoked.set(true);
}
});
assertFalse(setFuture.isDone());
// Out of order callbacks shouldn't matter, should still require all to be invoked before invoking the callback
// for the store's set callback
callback1.getValue().onCompletion(null, null);
assertFalse(invoked.get());
callback0.getValue().onCompletion(null, null);
setFuture.get(10000, TimeUnit.MILLISECONDS);
assertTrue(invoked.get());
// Getting data should read to end of our published data and return it
final AtomicBoolean secondGetInvokedAndPassed = new AtomicBoolean(false);
store.get(Arrays.asList(TP0_KEY, TP1_KEY), new Callback<Map<ByteBuffer, ByteBuffer>>() {
@Override
public void onCompletion(Throwable error, Map<ByteBuffer, ByteBuffer> result) {
assertEquals(TP0_VALUE, result.get(TP0_KEY));
assertEquals(TP1_VALUE, result.get(TP1_KEY));
secondGetInvokedAndPassed.set(true);
}
}).get(10000, TimeUnit.MILLISECONDS);
assertTrue(secondGetInvokedAndPassed.get());
// Getting data should read to end of our published data and return it
final AtomicBoolean thirdGetInvokedAndPassed = new AtomicBoolean(false);
store.get(Arrays.asList(TP0_KEY, TP1_KEY), new Callback<Map<ByteBuffer, ByteBuffer>>() {
@Override
public void onCompletion(Throwable error, Map<ByteBuffer, ByteBuffer> result) {
assertEquals(TP0_VALUE_NEW, result.get(TP0_KEY));
assertEquals(TP1_VALUE_NEW, result.get(TP1_KEY));
thirdGetInvokedAndPassed.set(true);
}
}).get(10000, TimeUnit.MILLISECONDS);
assertTrue(thirdGetInvokedAndPassed.get());
store.stop();
PowerMock.verifyAll();
}
use of org.apache.kafka.clients.consumer.ConsumerRecord in project apache-kafka-on-k8s by banzaicloud.
the class TopologyTestDriverTest method shouldProcessConsumerRecordList.
@Test
public void shouldProcessConsumerRecordList() {
testDriver = new TopologyTestDriver(setupMultipleSourceTopology(SOURCE_TOPIC_1, SOURCE_TOPIC_2), config);
final List<Record> processedRecords1 = mockProcessors.get(0).processedRecords;
final List<Record> processedRecords2 = mockProcessors.get(1).processedRecords;
final List<ConsumerRecord<byte[], byte[]>> testRecords = new ArrayList<>(2);
testRecords.add(consumerRecord1);
testRecords.add(consumerRecord2);
testDriver.pipeInput(testRecords);
assertEquals(1, processedRecords1.size());
assertEquals(1, processedRecords2.size());
Record record = processedRecords1.get(0);
Record expectedResult = new Record(consumerRecord1);
expectedResult.offset = 0L;
assertThat(record, equalTo(expectedResult));
record = processedRecords2.get(0);
expectedResult = new Record(consumerRecord2);
expectedResult.offset = 0L;
assertThat(record, equalTo(expectedResult));
}
use of org.apache.kafka.clients.consumer.ConsumerRecord in project apache-kafka-on-k8s by banzaicloud.
the class EosTestDriver method getRecords.
private static Map<String, Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>>> getRecords(final KafkaConsumer<byte[], byte[]> consumer, final Map<TopicPartition, Long> readEndOffsets, final boolean withRepartitioning, final boolean isInputTopic) {
System.err.println("read end offset: " + readEndOffsets);
final Map<String, Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>>> recordPerTopicPerPartition = new HashMap<>();
final Map<TopicPartition, Long> maxReceivedOffsetPerPartition = new HashMap<>();
final Map<TopicPartition, Long> maxConsumerPositionPerPartition = new HashMap<>();
long maxWaitTime = System.currentTimeMillis() + MAX_IDLE_TIME_MS;
boolean allRecordsReceived = false;
while (!allRecordsReceived && System.currentTimeMillis() < maxWaitTime) {
final ConsumerRecords<byte[], byte[]> receivedRecords = consumer.poll(100);
for (final ConsumerRecord<byte[], byte[]> record : receivedRecords) {
maxWaitTime = System.currentTimeMillis() + MAX_IDLE_TIME_MS;
final TopicPartition tp = new TopicPartition(record.topic(), record.partition());
maxReceivedOffsetPerPartition.put(tp, record.offset());
final long readEndOffset = readEndOffsets.get(tp);
if (record.offset() < readEndOffset) {
addRecord(record, recordPerTopicPerPartition, withRepartitioning);
} else if (!isInputTopic) {
throw new RuntimeException("FAIL: did receive more records than expected for " + tp + " (expected EOL offset: " + readEndOffset + "; current offset: " + record.offset());
}
}
for (final TopicPartition tp : readEndOffsets.keySet()) {
maxConsumerPositionPerPartition.put(tp, consumer.position(tp));
if (consumer.position(tp) >= readEndOffsets.get(tp)) {
consumer.pause(Collections.singletonList(tp));
}
}
allRecordsReceived = consumer.paused().size() == readEndOffsets.keySet().size();
}
if (!allRecordsReceived) {
System.err.println("Pause partitions (ie, received all data): " + consumer.paused());
System.err.println("Max received offset per partition: " + maxReceivedOffsetPerPartition);
System.err.println("Max consumer position per partition: " + maxConsumerPositionPerPartition);
throw new RuntimeException("FAIL: did not receive all records after " + (MAX_IDLE_TIME_MS / 1000) + " sec idle time.");
}
return recordPerTopicPerPartition;
}
use of org.apache.kafka.clients.consumer.ConsumerRecord in project apache-kafka-on-k8s by banzaicloud.
the class EosTestDriver method verify.
public static void verify(final String kafka, final boolean withRepartitioning) {
ensureStreamsApplicationDown(kafka);
final Map<TopicPartition, Long> committedOffsets = getCommittedOffsets(kafka, withRepartitioning);
final Properties props = new Properties();
props.put(ConsumerConfig.CLIENT_ID_CONFIG, "verifier");
props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, kafka);
props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class);
props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class);
props.put(ConsumerConfig.ISOLATION_LEVEL_CONFIG, IsolationLevel.READ_COMMITTED.toString().toLowerCase(Locale.ROOT));
final String[] allInputTopics;
final String[] allOutputTopics;
if (withRepartitioning) {
allInputTopics = new String[] { "data", "repartition" };
allOutputTopics = new String[] { "echo", "min", "sum", "repartition", "max", "cnt" };
} else {
allInputTopics = new String[] { "data" };
allOutputTopics = new String[] { "echo", "min", "sum" };
}
final Map<String, Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>>> inputRecordsPerTopicPerPartition;
try (final KafkaConsumer<byte[], byte[]> consumer = new KafkaConsumer<>(props)) {
final List<TopicPartition> partitions = getAllPartitions(consumer, allInputTopics);
consumer.assign(partitions);
consumer.seekToBeginning(partitions);
inputRecordsPerTopicPerPartition = getRecords(consumer, committedOffsets, withRepartitioning, true);
} catch (final Exception e) {
e.printStackTrace(System.err);
System.out.println("FAILED");
return;
}
final Map<String, Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>>> outputRecordsPerTopicPerPartition;
try (final KafkaConsumer<byte[], byte[]> consumer = new KafkaConsumer<>(props)) {
final List<TopicPartition> partitions = getAllPartitions(consumer, allOutputTopics);
consumer.assign(partitions);
consumer.seekToBeginning(partitions);
outputRecordsPerTopicPerPartition = getRecords(consumer, consumer.endOffsets(partitions), withRepartitioning, false);
} catch (final Exception e) {
e.printStackTrace(System.err);
System.out.println("FAILED");
return;
}
verifyReceivedAllRecords(inputRecordsPerTopicPerPartition.get("data"), outputRecordsPerTopicPerPartition.get("echo"));
if (withRepartitioning) {
verifyReceivedAllRecords(inputRecordsPerTopicPerPartition.get("data"), outputRecordsPerTopicPerPartition.get("repartition"));
}
verifyMin(inputRecordsPerTopicPerPartition.get("data"), outputRecordsPerTopicPerPartition.get("min"));
verifySum(inputRecordsPerTopicPerPartition.get("data"), outputRecordsPerTopicPerPartition.get("sum"));
if (withRepartitioning) {
verifyMax(inputRecordsPerTopicPerPartition.get("repartition"), outputRecordsPerTopicPerPartition.get("max"));
verifyCnt(inputRecordsPerTopicPerPartition.get("repartition"), outputRecordsPerTopicPerPartition.get("cnt"));
}
try (final KafkaConsumer<byte[], byte[]> consumer = new KafkaConsumer<>(props)) {
final List<TopicPartition> partitions = getAllPartitions(consumer, allOutputTopics);
consumer.assign(partitions);
consumer.seekToBeginning(partitions);
verifyAllTransactionFinished(consumer, kafka, withRepartitioning);
} catch (final Exception e) {
e.printStackTrace(System.err);
System.out.println("FAILED");
return;
}
// do not modify: required test output
System.out.println("ALL-RECORDS-DELIVERED");
System.out.flush();
}
use of org.apache.kafka.clients.consumer.ConsumerRecord in project apache-kafka-on-k8s by banzaicloud.
the class EosTestDriver method verifyMin.
private static void verifyMin(final Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> inputPerTopicPerPartition, final Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> minPerTopicPerPartition) {
final StringDeserializer stringDeserializer = new StringDeserializer();
final IntegerDeserializer integerDeserializer = new IntegerDeserializer();
final HashMap<String, Integer> currentMinPerKey = new HashMap<>();
for (final Map.Entry<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> partitionRecords : minPerTopicPerPartition.entrySet()) {
final TopicPartition inputTopicPartition = new TopicPartition("data", partitionRecords.getKey().partition());
final List<ConsumerRecord<byte[], byte[]>> partitionInput = inputPerTopicPerPartition.get(inputTopicPartition);
final List<ConsumerRecord<byte[], byte[]>> partitionMin = partitionRecords.getValue();
if (partitionInput.size() != partitionMin.size()) {
throw new RuntimeException("Result verification failed: expected " + partitionInput.size() + " records for " + partitionRecords.getKey() + " but received " + partitionMin.size());
}
final Iterator<ConsumerRecord<byte[], byte[]>> inputRecords = partitionInput.iterator();
for (final ConsumerRecord<byte[], byte[]> receivedRecord : partitionMin) {
final ConsumerRecord<byte[], byte[]> input = inputRecords.next();
final String receivedKey = stringDeserializer.deserialize(receivedRecord.topic(), receivedRecord.key());
final int receivedValue = integerDeserializer.deserialize(receivedRecord.topic(), receivedRecord.value());
final String key = stringDeserializer.deserialize(input.topic(), input.key());
final int value = integerDeserializer.deserialize(input.topic(), input.value());
Integer min = currentMinPerKey.get(key);
if (min == null) {
min = value;
} else {
min = Math.min(min, value);
}
currentMinPerKey.put(key, min);
if (!receivedKey.equals(key) || receivedValue != min) {
throw new RuntimeException("Result verification failed for " + receivedRecord + " expected <" + key + "," + min + "> but was <" + receivedKey + "," + receivedValue + ">");
}
}
}
}
Aggregations