use of org.apache.kafka.clients.consumer.ConsumerRecords in project open-kilda by telstra.
the class VersioningConsumerInterceptor method onConsume.
@Override
public ConsumerRecords<K, V> onConsume(ConsumerRecords<K, V> records) {
if (!watchDog.isConnectedAndValidated()) {
if (isZooKeeperConnectTimeoutPassed()) {
log.error("Component {} with id {} tries to reconnect to ZooKeeper with connection string: {}", componentName, runId, connectionString);
cantConnectToZooKeeperTimestamp = Instant.now();
}
watchDog.safeRefreshConnection();
}
Map<TopicPartition, List<ConsumerRecord<K, V>>> filteredRecordMap = new HashMap<>();
for (TopicPartition partition : records.partitions()) {
List<ConsumerRecord<K, V>> filteredRecords = new ArrayList<>();
for (ConsumerRecord<K, V> record : records.records(partition)) {
if (!checkRecordVersion(record)) {
continue;
}
if (!checkDeserializationError(record.value())) {
filteredRecords.add(record);
}
}
if (!filteredRecords.isEmpty()) {
filteredRecordMap.put(partition, filteredRecords);
}
}
return new ConsumerRecords<>(filteredRecordMap);
}
use of org.apache.kafka.clients.consumer.ConsumerRecords in project open-kilda by telstra.
the class VersioningConsumerInterceptorTest method severalHeadersTest.
@Test
public void severalHeadersTest() {
VersioningConsumerInterceptor<String, String> interceptor = createInterceptor();
interceptor.handle(VERSION_1);
// record with 2 same headers
ConsumerRecord<String, String> record = new ConsumerRecord<>(TOPIC, PARTITION_1, 0L, KEY_1, VALUE_1);
addVersion(record, VERSION_1);
addVersion(record, VERSION_1);
Map<TopicPartition, List<ConsumerRecord<String, String>>> recordMap = new HashMap<>();
recordMap.put(new TopicPartition(TOPIC, PARTITION_1), Lists.newArrayList(record));
ConsumerRecords<String, String> records = new ConsumerRecords<>(recordMap);
ConsumerRecords<String, String> result = interceptor.onConsume(records);
// interceptor will skip record because it has 2 headers, but must has one
Assert.assertEquals(0, result.count());
}
use of org.apache.kafka.clients.consumer.ConsumerRecords in project apache-kafka-on-k8s by banzaicloud.
the class WorkerSinkTaskThreadedTest method expectRebalanceDuringPoll.
@SuppressWarnings("unchecked")
private IExpectationSetters<Object> expectRebalanceDuringPoll() throws Exception {
final List<TopicPartition> partitions = Arrays.asList(TOPIC_PARTITION, TOPIC_PARTITION2, TOPIC_PARTITION3);
final long startOffset = 40L;
final Map<TopicPartition, Long> offsets = new HashMap<>();
offsets.put(TOPIC_PARTITION, startOffset);
EasyMock.expect(consumer.poll(EasyMock.anyLong())).andAnswer(new IAnswer<ConsumerRecords<byte[], byte[]>>() {
@Override
public ConsumerRecords<byte[], byte[]> answer() throws Throwable {
// "Sleep" so time will progress
time.sleep(1L);
sinkTaskContext.getValue().offset(offsets);
rebalanceListener.getValue().onPartitionsAssigned(partitions);
ConsumerRecords<byte[], byte[]> records = new ConsumerRecords<>(Collections.singletonMap(new TopicPartition(TOPIC, PARTITION), Arrays.asList(new ConsumerRecord<>(TOPIC, PARTITION, FIRST_OFFSET + recordsReturned, TIMESTAMP, TIMESTAMP_TYPE, 0L, 0, 0, RAW_KEY, RAW_VALUE))));
recordsReturned++;
return records;
}
});
EasyMock.expect(consumer.position(TOPIC_PARTITION)).andReturn(FIRST_OFFSET);
EasyMock.expect(consumer.position(TOPIC_PARTITION2)).andReturn(FIRST_OFFSET);
EasyMock.expect(consumer.position(TOPIC_PARTITION3)).andReturn(FIRST_OFFSET);
sinkTask.open(partitions);
EasyMock.expectLastCall();
consumer.seek(TOPIC_PARTITION, startOffset);
EasyMock.expectLastCall();
EasyMock.expect(keyConverter.toConnectData(TOPIC, RAW_KEY)).andReturn(new SchemaAndValue(KEY_SCHEMA, KEY));
EasyMock.expect(valueConverter.toConnectData(TOPIC, RAW_VALUE)).andReturn(new SchemaAndValue(VALUE_SCHEMA, VALUE));
sinkTask.put(EasyMock.anyObject(Collection.class));
return EasyMock.expectLastCall();
}
use of org.apache.kafka.clients.consumer.ConsumerRecords in project apache-kafka-on-k8s by banzaicloud.
the class WorkerSinkTaskThreadedTest method expectPolls.
// Note that this can only be called once per test currently
private Capture<Collection<SinkRecord>> expectPolls(final long pollDelayMs) throws Exception {
// Stub out all the consumer stream/iterator responses, which we just want to verify occur,
// but don't care about the exact details here.
EasyMock.expect(consumer.poll(EasyMock.anyLong())).andStubAnswer(new IAnswer<ConsumerRecords<byte[], byte[]>>() {
@Override
public ConsumerRecords<byte[], byte[]> answer() throws Throwable {
// "Sleep" so time will progress
time.sleep(pollDelayMs);
ConsumerRecords<byte[], byte[]> records = new ConsumerRecords<>(Collections.singletonMap(new TopicPartition(TOPIC, PARTITION), Arrays.asList(new ConsumerRecord<>(TOPIC, PARTITION, FIRST_OFFSET + recordsReturned, TIMESTAMP, TIMESTAMP_TYPE, 0L, 0, 0, RAW_KEY, RAW_VALUE))));
recordsReturned++;
return records;
}
});
EasyMock.expect(keyConverter.toConnectData(TOPIC, RAW_KEY)).andReturn(new SchemaAndValue(KEY_SCHEMA, KEY)).anyTimes();
EasyMock.expect(valueConverter.toConnectData(TOPIC, RAW_VALUE)).andReturn(new SchemaAndValue(VALUE_SCHEMA, VALUE)).anyTimes();
final Capture<SinkRecord> recordCapture = EasyMock.newCapture();
EasyMock.expect(transformationChain.apply(EasyMock.capture(recordCapture))).andAnswer(new IAnswer<SinkRecord>() {
@Override
public SinkRecord answer() {
return recordCapture.getValue();
}
}).anyTimes();
Capture<Collection<SinkRecord>> capturedRecords = EasyMock.newCapture(CaptureType.ALL);
sinkTask.put(EasyMock.capture(capturedRecords));
EasyMock.expectLastCall().anyTimes();
return capturedRecords;
}
use of org.apache.kafka.clients.consumer.ConsumerRecords in project apache-kafka-on-k8s by banzaicloud.
the class WorkerSinkTaskThreadedTest method expectOnePoll.
@SuppressWarnings("unchecked")
private IExpectationSetters<Object> expectOnePoll() {
// Currently the SinkTask's put() method will not be invoked unless we provide some data, so instead of
// returning empty data, we return one record. The expectation is that the data will be ignored by the
// response behavior specified using the return value of this method.
EasyMock.expect(consumer.poll(EasyMock.anyLong())).andAnswer(new IAnswer<ConsumerRecords<byte[], byte[]>>() {
@Override
public ConsumerRecords<byte[], byte[]> answer() throws Throwable {
// "Sleep" so time will progress
time.sleep(1L);
ConsumerRecords<byte[], byte[]> records = new ConsumerRecords<>(Collections.singletonMap(new TopicPartition(TOPIC, PARTITION), Arrays.asList(new ConsumerRecord<>(TOPIC, PARTITION, FIRST_OFFSET + recordsReturned, TIMESTAMP, TIMESTAMP_TYPE, 0L, 0, 0, RAW_KEY, RAW_VALUE))));
recordsReturned++;
return records;
}
});
EasyMock.expect(keyConverter.toConnectData(TOPIC, RAW_KEY)).andReturn(new SchemaAndValue(KEY_SCHEMA, KEY));
EasyMock.expect(valueConverter.toConnectData(TOPIC, RAW_VALUE)).andReturn(new SchemaAndValue(VALUE_SCHEMA, VALUE));
sinkTask.put(EasyMock.anyObject(Collection.class));
return EasyMock.expectLastCall();
}
Aggregations