use of org.apache.kafka.clients.consumer.ConsumerRecords in project kafka by apache.
the class WorkerSinkTaskThreadedTest method expectOnePoll.
@SuppressWarnings("unchecked")
private IExpectationSetters<Object> expectOnePoll() {
// Currently the SinkTask's put() method will not be invoked unless we provide some data, so instead of
// returning empty data, we return one record. The expectation is that the data will be ignored by the
// response behavior specified using the return value of this method.
EasyMock.expect(consumer.poll(Duration.ofMillis(EasyMock.anyLong()))).andAnswer(() -> {
// "Sleep" so time will progress
time.sleep(1L);
ConsumerRecords<byte[], byte[]> records = new ConsumerRecords<>(Collections.singletonMap(new TopicPartition(TOPIC, PARTITION), Arrays.asList(new ConsumerRecord<>(TOPIC, PARTITION, FIRST_OFFSET + recordsReturned, TIMESTAMP, TIMESTAMP_TYPE, 0, 0, RAW_KEY, RAW_VALUE, new RecordHeaders(), Optional.empty()))));
recordsReturned++;
return records;
});
EasyMock.expect(keyConverter.toConnectData(TOPIC, emptyHeaders(), RAW_KEY)).andReturn(new SchemaAndValue(KEY_SCHEMA, KEY));
EasyMock.expect(valueConverter.toConnectData(TOPIC, emptyHeaders(), RAW_VALUE)).andReturn(new SchemaAndValue(VALUE_SCHEMA, VALUE));
sinkTask.put(EasyMock.anyObject(Collection.class));
return EasyMock.expectLastCall();
}
use of org.apache.kafka.clients.consumer.ConsumerRecords in project kafka by apache.
the class WorkerSinkTaskThreadedTest method expectRebalanceDuringPoll.
@SuppressWarnings("unchecked")
private IExpectationSetters<Object> expectRebalanceDuringPoll() {
final List<TopicPartition> partitions = Arrays.asList(TOPIC_PARTITION, TOPIC_PARTITION2, TOPIC_PARTITION3);
final long startOffset = 40L;
final Map<TopicPartition, Long> offsets = new HashMap<>();
offsets.put(TOPIC_PARTITION, startOffset);
EasyMock.expect(consumer.poll(Duration.ofMillis(EasyMock.anyLong()))).andAnswer(() -> {
// "Sleep" so time will progress
time.sleep(1L);
sinkTaskContext.getValue().offset(offsets);
rebalanceListener.getValue().onPartitionsAssigned(partitions);
ConsumerRecords<byte[], byte[]> records = new ConsumerRecords<>(Collections.singletonMap(new TopicPartition(TOPIC, PARTITION), Arrays.asList(new ConsumerRecord<>(TOPIC, PARTITION, FIRST_OFFSET + recordsReturned, TIMESTAMP, TIMESTAMP_TYPE, 0, 0, RAW_KEY, RAW_VALUE, new RecordHeaders(), Optional.empty()))));
recordsReturned++;
return records;
});
EasyMock.expect(consumer.position(TOPIC_PARTITION)).andReturn(FIRST_OFFSET);
EasyMock.expect(consumer.position(TOPIC_PARTITION2)).andReturn(FIRST_OFFSET);
EasyMock.expect(consumer.position(TOPIC_PARTITION3)).andReturn(FIRST_OFFSET);
sinkTask.open(partitions);
EasyMock.expectLastCall();
consumer.seek(TOPIC_PARTITION, startOffset);
EasyMock.expectLastCall();
EasyMock.expect(keyConverter.toConnectData(TOPIC, emptyHeaders(), RAW_KEY)).andReturn(new SchemaAndValue(KEY_SCHEMA, KEY));
EasyMock.expect(valueConverter.toConnectData(TOPIC, emptyHeaders(), RAW_VALUE)).andReturn(new SchemaAndValue(VALUE_SCHEMA, VALUE));
sinkTask.put(EasyMock.anyObject(Collection.class));
return EasyMock.expectLastCall();
}
use of org.apache.kafka.clients.consumer.ConsumerRecords in project kafka by apache.
the class MirrorSourceTaskTest method testPoll.
@Test
public void testPoll() {
// Create a consumer mock
byte[] key1 = "abc".getBytes();
byte[] value1 = "fgh".getBytes();
byte[] key2 = "123".getBytes();
byte[] value2 = "456".getBytes();
List<ConsumerRecord<byte[], byte[]>> consumerRecordsList = new ArrayList<>();
String topicName = "test";
String headerKey = "key";
RecordHeaders headers = new RecordHeaders(new Header[] { new RecordHeader(headerKey, "value".getBytes()) });
consumerRecordsList.add(new ConsumerRecord<>(topicName, 0, 0, System.currentTimeMillis(), TimestampType.CREATE_TIME, key1.length, value1.length, key1, value1, headers, Optional.empty()));
consumerRecordsList.add(new ConsumerRecord<>(topicName, 1, 1, System.currentTimeMillis(), TimestampType.CREATE_TIME, key2.length, value2.length, key2, value2, headers, Optional.empty()));
ConsumerRecords<byte[], byte[]> consumerRecords = new ConsumerRecords<>(Collections.singletonMap(new TopicPartition(topicName, 0), consumerRecordsList));
@SuppressWarnings("unchecked") KafkaConsumer<byte[], byte[]> consumer = mock(KafkaConsumer.class);
when(consumer.poll(any())).thenReturn(consumerRecords);
MirrorMetrics metrics = mock(MirrorMetrics.class);
String sourceClusterName = "cluster1";
ReplicationPolicy replicationPolicy = new DefaultReplicationPolicy();
MirrorSourceTask mirrorSourceTask = new MirrorSourceTask(consumer, metrics, sourceClusterName, replicationPolicy, 50);
List<SourceRecord> sourceRecords = mirrorSourceTask.poll();
assertEquals(2, sourceRecords.size());
for (int i = 0; i < sourceRecords.size(); i++) {
SourceRecord sourceRecord = sourceRecords.get(i);
ConsumerRecord<byte[], byte[]> consumerRecord = consumerRecordsList.get(i);
assertEquals(consumerRecord.key(), sourceRecord.key(), "consumerRecord key does not equal sourceRecord key");
assertEquals(consumerRecord.value(), sourceRecord.value(), "consumerRecord value does not equal sourceRecord value");
// We expect that the topicname will be based on the replication policy currently used
assertEquals(replicationPolicy.formatRemoteTopic(sourceClusterName, topicName), sourceRecord.topic(), "topicName not the same as the current replicationPolicy");
// We expect that MirrorMaker will keep the same partition assignment
assertEquals(consumerRecord.partition(), sourceRecord.kafkaPartition().intValue(), "partition assignment not the same as the current replicationPolicy");
// Check header values
List<Header> expectedHeaders = new ArrayList<>();
consumerRecord.headers().forEach(expectedHeaders::add);
List<org.apache.kafka.connect.header.Header> taskHeaders = new ArrayList<>();
sourceRecord.headers().forEach(taskHeaders::add);
compareHeaders(expectedHeaders, taskHeaders);
}
}
use of org.apache.kafka.clients.consumer.ConsumerRecords in project kafka by apache.
the class TransactionalMessageCopier method runEventLoop.
public static void runEventLoop(Namespace parsedArgs) {
final String transactionalId = parsedArgs.getString("transactionalId");
final String outputTopic = parsedArgs.getString("outputTopic");
String consumerGroup = parsedArgs.getString("consumerGroup");
final KafkaProducer<String, String> producer = createProducer(parsedArgs);
final KafkaConsumer<String, String> consumer = createConsumer(parsedArgs);
final AtomicLong remainingMessages = new AtomicLong(parsedArgs.getInt("maxMessages") == -1 ? Long.MAX_VALUE : parsedArgs.getInt("maxMessages"));
boolean groupMode = parsedArgs.getBoolean("groupMode");
String topicName = parsedArgs.getString("inputTopic");
final AtomicLong numMessagesProcessedSinceLastRebalance = new AtomicLong(0);
final AtomicLong totalMessageProcessed = new AtomicLong(0);
if (groupMode) {
consumer.subscribe(Collections.singleton(topicName), new ConsumerRebalanceListener() {
@Override
public void onPartitionsRevoked(Collection<TopicPartition> partitions) {
}
@Override
public void onPartitionsAssigned(Collection<TopicPartition> partitions) {
remainingMessages.set(partitions.stream().mapToLong(partition -> messagesRemaining(consumer, partition)).sum());
numMessagesProcessedSinceLastRebalance.set(0);
// We use message cap for remaining here as the remainingMessages are not set yet.
System.out.println(statusAsJson(totalMessageProcessed.get(), numMessagesProcessedSinceLastRebalance.get(), remainingMessages.get(), transactionalId, "RebalanceComplete"));
}
});
} else {
TopicPartition inputPartition = new TopicPartition(topicName, parsedArgs.getInt("inputPartition"));
consumer.assign(singleton(inputPartition));
remainingMessages.set(Math.min(messagesRemaining(consumer, inputPartition), remainingMessages.get()));
}
final boolean enableRandomAborts = parsedArgs.getBoolean("enableRandomAborts");
producer.initTransactions();
final AtomicBoolean isShuttingDown = new AtomicBoolean(false);
Exit.addShutdownHook("transactional-message-copier-shutdown-hook", () -> {
isShuttingDown.set(true);
consumer.wakeup();
System.out.println(shutDownString(totalMessageProcessed.get(), numMessagesProcessedSinceLastRebalance.get(), remainingMessages.get(), transactionalId));
});
final boolean useGroupMetadata = parsedArgs.getBoolean("useGroupMetadata");
try {
Random random = new Random();
while (!isShuttingDown.get() && remainingMessages.get() > 0) {
System.out.println(statusAsJson(totalMessageProcessed.get(), numMessagesProcessedSinceLastRebalance.get(), remainingMessages.get(), transactionalId, "ProcessLoop"));
ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(200));
if (records.count() > 0) {
try {
producer.beginTransaction();
for (ConsumerRecord<String, String> record : records) {
producer.send(producerRecordFromConsumerRecord(outputTopic, record));
}
long messagesSentWithinCurrentTxn = records.count();
ConsumerGroupMetadata groupMetadata = useGroupMetadata ? consumer.groupMetadata() : new ConsumerGroupMetadata(consumerGroup);
producer.sendOffsetsToTransaction(consumerPositions(consumer), groupMetadata);
if (enableRandomAborts && random.nextInt() % 3 == 0) {
abortTransactionAndResetPosition(producer, consumer);
} else {
producer.commitTransaction();
remainingMessages.getAndAdd(-messagesSentWithinCurrentTxn);
numMessagesProcessedSinceLastRebalance.getAndAdd(messagesSentWithinCurrentTxn);
totalMessageProcessed.getAndAdd(messagesSentWithinCurrentTxn);
}
} catch (ProducerFencedException e) {
throw new KafkaException(String.format("The transactional.id %s has been claimed by another process", transactionalId), e);
} catch (KafkaException e) {
log.debug("Aborting transaction after catching exception", e);
abortTransactionAndResetPosition(producer, consumer);
}
}
}
} catch (WakeupException e) {
if (!isShuttingDown.get()) {
// as part of shutdown.
throw e;
}
} finally {
Utils.closeQuietly(producer, "producer");
Utils.closeQuietly(consumer, "consumer");
}
}
use of org.apache.kafka.clients.consumer.ConsumerRecords in project kafka by apache.
the class EmbeddedKafkaCluster method consume.
/**
* Consume at least n records in a given duration or throw an exception.
*
* @param n the number of expected records in this topic.
* @param maxDuration the max duration to wait for these records (in milliseconds).
* @param topics the topics to subscribe and consume records from.
* @return a {@link ConsumerRecords} collection containing at least n records.
*/
public ConsumerRecords<byte[], byte[]> consume(int n, long maxDuration, String... topics) {
Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> records = new HashMap<>();
int consumedRecords = 0;
try (KafkaConsumer<byte[], byte[]> consumer = createConsumerAndSubscribeTo(Collections.emptyMap(), topics)) {
final long startMillis = System.currentTimeMillis();
long allowedDuration = maxDuration;
while (allowedDuration > 0) {
log.debug("Consuming from {} for {} millis.", Arrays.toString(topics), allowedDuration);
ConsumerRecords<byte[], byte[]> rec = consumer.poll(Duration.ofMillis(allowedDuration));
if (rec.isEmpty()) {
allowedDuration = maxDuration - (System.currentTimeMillis() - startMillis);
continue;
}
for (TopicPartition partition : rec.partitions()) {
final List<ConsumerRecord<byte[], byte[]>> r = rec.records(partition);
records.computeIfAbsent(partition, t -> new ArrayList<>()).addAll(r);
consumedRecords += r.size();
}
if (consumedRecords >= n) {
return new ConsumerRecords<>(records);
}
allowedDuration = maxDuration - (System.currentTimeMillis() - startMillis);
}
}
throw new RuntimeException("Could not find enough records. found " + consumedRecords + ", expected " + n);
}
Aggregations