use of org.apache.kafka.clients.consumer.OffsetAndMetadata in project kafka by apache.
the class ConsumerCoordinator method refreshCommittedOffsetsIfNeeded.
/**
* Refresh the committed offsets for provided partitions.
*/
public void refreshCommittedOffsetsIfNeeded() {
if (subscriptions.refreshCommitsNeeded()) {
Map<TopicPartition, OffsetAndMetadata> offsets = fetchCommittedOffsets(subscriptions.assignedPartitions());
for (Map.Entry<TopicPartition, OffsetAndMetadata> entry : offsets.entrySet()) {
TopicPartition tp = entry.getKey();
// verify assignment is still active
if (subscriptions.isAssigned(tp))
this.subscriptions.committed(tp, entry.getValue());
}
this.subscriptions.commitsRefreshed();
}
}
use of org.apache.kafka.clients.consumer.OffsetAndMetadata in project kafka by apache.
the class StandbyTaskTest method shouldCheckpointStoreOffsetsOnCommit.
@Test
public void shouldCheckpointStoreOffsetsOnCommit() throws Exception {
consumer.assign(Utils.mkList(ktable));
final Map<TopicPartition, OffsetAndMetadata> committedOffsets = new HashMap<>();
committedOffsets.put(new TopicPartition(ktable.topic(), ktable.partition()), new OffsetAndMetadata(100L));
consumer.commitSync(committedOffsets);
restoreStateConsumer.updatePartitions("ktable1", Utils.mkList(new PartitionInfo("ktable1", 0, Node.noNode(), new Node[0], new Node[0])));
final TaskId taskId = new TaskId(0, 0);
final MockTime time = new MockTime();
final StreamsConfig config = createConfig(baseDir);
final StandbyTask task = new StandbyTask(taskId, applicationId, ktablePartitions, ktableTopology, consumer, changelogReader, config, null, stateDirectory);
restoreStateConsumer.assign(new ArrayList<>(task.changeLogPartitions()));
final byte[] serializedValue = Serdes.Integer().serializer().serialize("", 1);
task.update(ktable, Collections.singletonList(new ConsumerRecord<>(ktable.topic(), ktable.partition(), 50L, serializedValue, serializedValue)));
time.sleep(config.getLong(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG));
task.commit();
final Map<TopicPartition, Long> checkpoint = new OffsetCheckpoint(new File(stateDirectory.directoryForTask(taskId), ProcessorStateManager.CHECKPOINT_FILE_NAME)).read();
assertThat(checkpoint, equalTo(Collections.singletonMap(ktable, 51L)));
}
use of org.apache.kafka.clients.consumer.OffsetAndMetadata in project kafka by apache.
the class StandbyTaskTest method shouldNotThrowUnsupportedOperationExceptionWhenInitializingStateStores.
@Test
public void shouldNotThrowUnsupportedOperationExceptionWhenInitializingStateStores() throws Exception {
final String changelogName = "test-application-my-store-changelog";
final List<TopicPartition> partitions = Utils.mkList(new TopicPartition(changelogName, 0));
consumer.assign(partitions);
final Map<TopicPartition, OffsetAndMetadata> committedOffsets = new HashMap<>();
committedOffsets.put(new TopicPartition(changelogName, 0), new OffsetAndMetadata(0L));
consumer.commitSync(committedOffsets);
restoreStateConsumer.updatePartitions(changelogName, Utils.mkList(new PartitionInfo(changelogName, 0, Node.noNode(), new Node[0], new Node[0])));
final KStreamBuilder builder = new KStreamBuilder();
builder.stream("topic").groupByKey().count("my-store");
final ProcessorTopology topology = builder.setApplicationId(applicationId).build(0);
StreamsConfig config = createConfig(baseDir);
new StandbyTask(taskId, applicationId, partitions, topology, consumer, changelogReader, config, new MockStreamsMetrics(new Metrics()), stateDirectory);
}
use of org.apache.kafka.clients.consumer.OffsetAndMetadata in project kafka by apache.
the class StreamTask method commitOffsets.
/**
* commit consumed offsets if needed
*/
@Override
public void commitOffsets() {
if (commitOffsetNeeded) {
Map<TopicPartition, OffsetAndMetadata> consumedOffsetsAndMetadata = new HashMap<>(consumedOffsets.size());
for (Map.Entry<TopicPartition, Long> entry : consumedOffsets.entrySet()) {
TopicPartition partition = entry.getKey();
long offset = entry.getValue() + 1;
consumedOffsetsAndMetadata.put(partition, new OffsetAndMetadata(offset));
stateMgr.putOffsetLimit(partition, offset);
}
try {
consumer.commitSync(consumedOffsetsAndMetadata);
} catch (final CommitFailedException cfe) {
log.warn("{} Failed offset commits: {} ", logPrefix, consumedOffsetsAndMetadata);
throw cfe;
}
commitOffsetNeeded = false;
}
commitRequested = false;
}
use of org.apache.kafka.clients.consumer.OffsetAndMetadata in project kafka by apache.
the class AbstractTask method initializeOffsetLimits.
protected void initializeOffsetLimits() {
for (TopicPartition partition : partitions) {
try {
// TODO: batch API?
OffsetAndMetadata metadata = consumer.committed(partition);
stateMgr.putOffsetLimit(partition, metadata != null ? metadata.offset() : 0L);
} catch (AuthorizationException e) {
throw new ProcessorStateException(String.format("task [%s] AuthorizationException when initializing offsets for %s", id, partition), e);
} catch (WakeupException e) {
throw e;
} catch (KafkaException e) {
throw new ProcessorStateException(String.format("task [%s] Failed to initialize offsets for %s", id, partition), e);
}
}
}
Aggregations