use of org.apache.kafka.clients.consumer.ConsumerRecords in project kafka by apache.
the class SuppressionIntegrationTest method waitForAnyRecord.
private static boolean waitForAnyRecord(final String topic) {
final Properties properties = new Properties();
properties.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, CLUSTER.bootstrapServers());
properties.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
properties.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
properties.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, false);
try (final Consumer<Object, Object> consumer = new KafkaConsumer<>(properties)) {
final List<TopicPartition> partitions = consumer.partitionsFor(topic).stream().map(pi -> new TopicPartition(pi.topic(), pi.partition())).collect(Collectors.toList());
consumer.assign(partitions);
consumer.seekToBeginning(partitions);
final long start = System.currentTimeMillis();
while ((System.currentTimeMillis() - start) < DEFAULT_TIMEOUT) {
final ConsumerRecords<Object, Object> records = consumer.poll(ofMillis(500));
if (!records.isEmpty()) {
return true;
}
}
return false;
}
}
use of org.apache.kafka.clients.consumer.ConsumerRecords in project kafka by apache.
the class StoreChangelogReader method restore.
// 1. if there are any registered changelogs that needs initialization, try to initialize them first;
// 2. if all changelogs have finished, return early;
// 3. if there are any restoring changelogs, try to read from the restore consumer and process them.
@Override
public void restore(final Map<TaskId, Task> tasks) {
initializeChangelogs(tasks, registeredChangelogs());
if (!activeRestoringChangelogs().isEmpty() && state == ChangelogReaderState.STANDBY_UPDATING) {
throw new IllegalStateException("Should not be in standby updating state if there are still un-completed active changelogs");
}
if (allChangelogsCompleted()) {
log.debug("Finished restoring all changelogs {}", changelogs.keySet());
return;
}
final Set<TopicPartition> restoringChangelogs = restoringChangelogs();
if (!restoringChangelogs.isEmpty()) {
final ConsumerRecords<byte[], byte[]> polledRecords;
try {
// for restoring active and updating standby we may prefer different poll time
// in order to make sure we call the main consumer#poll in time.
// TODO: once we move ChangelogReader to a separate thread this may no longer be a concern
polledRecords = restoreConsumer.poll(state == ChangelogReaderState.STANDBY_UPDATING ? Duration.ZERO : pollTime);
// TODO (?) If we cannot fetch records during restore, should we trigger `task.timeout.ms` ?
// TODO (?) If we cannot fetch records for standby task, should we trigger `task.timeout.ms` ?
} catch (final InvalidOffsetException e) {
log.warn("Encountered " + e.getClass().getName() + " fetching records from restore consumer for partitions " + e.partitions() + ", it is likely that " + "the consumer's position has fallen out of the topic partition offset range because the topic was " + "truncated or compacted on the broker, marking the corresponding tasks as corrupted and re-initializing" + " it later.", e);
final Set<TaskId> corruptedTasks = new HashSet<>();
e.partitions().forEach(partition -> corruptedTasks.add(changelogs.get(partition).stateManager.taskId()));
throw new TaskCorruptedException(corruptedTasks, e);
} catch (final KafkaException e) {
throw new StreamsException("Restore consumer get unexpected error polling records.", e);
}
for (final TopicPartition partition : polledRecords.partitions()) {
bufferChangelogRecords(restoringChangelogByPartition(partition), polledRecords.records(partition));
}
for (final TopicPartition partition : restoringChangelogs) {
// even if some partition do not have any accumulated data, we still trigger
// restoring since some changelog may not need to restore any at all, and the
// restore to end check needs to be executed still.
// TODO: we always try to restore as a batch when some records are accumulated, which may result in
// small batches; this can be optimized in the future, e.g. wait longer for larger batches.
final TaskId taskId = changelogs.get(partition).stateManager.taskId();
try {
if (restoreChangelog(changelogs.get(partition))) {
tasks.get(taskId).clearTaskTimeout();
}
} catch (final TimeoutException timeoutException) {
tasks.get(taskId).maybeInitTaskTimeoutOrThrow(time.milliseconds(), timeoutException);
}
}
maybeUpdateLimitOffsetsForStandbyChangelogs(tasks);
maybeLogRestorationProgress();
}
}
use of org.apache.kafka.clients.consumer.ConsumerRecords in project kafka by apache.
the class WorkerSinkTaskTest method testWakeupNotThrownDuringShutdown.
@Test
public void testWakeupNotThrownDuringShutdown() throws Exception {
createTask(initialState);
expectInitializeTask();
expectTaskGetTopic(true);
expectPollInitialAssignment();
expectConsumerPoll(1);
expectConversionAndTransformation(1);
sinkTask.put(EasyMock.anyObject());
EasyMock.expectLastCall();
EasyMock.expect(consumer.poll(Duration.ofMillis(EasyMock.anyLong()))).andAnswer(() -> {
// stop the task during its second iteration
workerTask.stop();
return new ConsumerRecords<>(Collections.emptyMap());
});
consumer.wakeup();
EasyMock.expectLastCall();
sinkTask.put(EasyMock.eq(Collections.emptyList()));
EasyMock.expectLastCall();
EasyMock.expect(consumer.assignment()).andReturn(INITIAL_ASSIGNMENT).times(1);
final Map<TopicPartition, OffsetAndMetadata> offsets = new HashMap<>();
offsets.put(TOPIC_PARTITION, new OffsetAndMetadata(FIRST_OFFSET + 1));
offsets.put(TOPIC_PARTITION2, new OffsetAndMetadata(FIRST_OFFSET));
sinkTask.preCommit(offsets);
EasyMock.expectLastCall().andReturn(offsets);
sinkTask.close(EasyMock.anyObject());
PowerMock.expectLastCall();
// fail the first time
consumer.commitSync(EasyMock.eq(offsets));
EasyMock.expectLastCall().andThrow(new WakeupException());
// and succeed the second time
consumer.commitSync(EasyMock.eq(offsets));
EasyMock.expectLastCall();
PowerMock.replayAll();
workerTask.initialize(TASK_CONFIG);
workerTask.initializeAndStart();
workerTask.execute();
assertEquals(0, workerTask.commitFailures());
PowerMock.verifyAll();
}
use of org.apache.kafka.clients.consumer.ConsumerRecords in project pinpoint by naver.
the class ConsumerPollInterceptor method after.
@Override
public void after(Object target, Object[] args, Object result, Throwable throwable) {
if (isDebug) {
logger.afterInterceptor(target, args, result, throwable);
}
if (!(target instanceof RemoteAddressFieldAccessor)) {
return;
}
String remoteAddress = ((RemoteAddressFieldAccessor) target)._$PINPOINT$_getRemoteAddress();
remoteAddress = StringUtils.defaultIfEmpty(remoteAddress, KafkaConstants.UNKNOWN);
if (result instanceof ConsumerRecords) {
Iterator consumerRecordIterator = ((ConsumerRecords) result).iterator();
while (consumerRecordIterator.hasNext()) {
Object consumerRecord = consumerRecordIterator.next();
if (consumerRecord instanceof RemoteAddressFieldAccessor) {
((RemoteAddressFieldAccessor) consumerRecord)._$PINPOINT$_setRemoteAddress(remoteAddress);
}
}
}
}
use of org.apache.kafka.clients.consumer.ConsumerRecords in project heron by twitter.
the class KafkaSpoutTest method ack.
@Test
public void ack() {
when(kafkaConsumerFactory.create()).thenReturn(consumer);
TopicPartition topicPartition = new TopicPartition(DUMMY_TOPIC_NAME, 0);
List<ConsumerRecord<String, byte[]>> recordList = new ArrayList<>();
byte[] randomBytes = new byte[1];
for (int i = 0; i < 5; i++) {
RANDOM.nextBytes(randomBytes);
recordList.add(new ConsumerRecord<>(DUMMY_TOPIC_NAME, 0, i, "key", Arrays.copyOf(randomBytes, randomBytes.length)));
}
ConsumerRecords<String, byte[]> consumerRecords = new ConsumerRecords<>(Collections.singletonMap(topicPartition, recordList));
when(consumer.poll(any(Duration.class))).thenReturn(consumerRecords);
kafkaSpout.open(Collections.singletonMap(Config.TOPOLOGY_RELIABILITY_MODE, ATLEAST_ONCE.name()), topologyContext, collector);
verify(consumer).subscribe(eq(Collections.singleton(DUMMY_TOPIC_NAME)), consumerRebalanceListenerArgumentCaptor.capture());
ConsumerRebalanceListener consumerRebalanceListener = consumerRebalanceListenerArgumentCaptor.getValue();
consumerRebalanceListener.onPartitionsAssigned(Collections.singleton(topicPartition));
// poll the topic
kafkaSpout.nextTuple();
// emit all of the five records
for (int i = 0; i < 5; i++) {
kafkaSpout.nextTuple();
}
// ack came in out of order and the third record is not acknowledged
kafkaSpout.ack(new KafkaSpout.ConsumerRecordMessageId(topicPartition, 4));
kafkaSpout.ack(new KafkaSpout.ConsumerRecordMessageId(topicPartition, 0));
kafkaSpout.ack(new KafkaSpout.ConsumerRecordMessageId(topicPartition, 1));
kafkaSpout.ack(new KafkaSpout.ConsumerRecordMessageId(topicPartition, 3));
// commit and poll
kafkaSpout.nextTuple();
verify(consumer).commitAsync(Collections.singletonMap(topicPartition, new OffsetAndMetadata(2)), null);
}
Aggregations