use of org.apache.kafka.streams.processor.BatchingStateRestoreCallback in project apache-kafka-on-k8s by banzaicloud.
the class InternalMockProcessorContext method restore.
public void restore(final String storeName, final Iterable<KeyValue<byte[], byte[]>> changeLog) {
final BatchingStateRestoreCallback restoreCallback = getBatchingRestoreCallback(restoreFuncs.get(storeName));
final StateRestoreListener restoreListener = getStateRestoreListener(restoreCallback);
restoreListener.onRestoreStart(null, storeName, 0L, 0L);
List<KeyValue<byte[], byte[]>> records = new ArrayList<>();
for (KeyValue<byte[], byte[]> keyValue : changeLog) {
records.add(keyValue);
}
restoreCallback.restoreAll(records);
restoreListener.onRestoreEnd(null, storeName, 0L);
}
use of org.apache.kafka.streams.processor.BatchingStateRestoreCallback in project apache-kafka-on-k8s by banzaicloud.
the class ProcessorStateManager method updateStandbyStates.
List<ConsumerRecord<byte[], byte[]>> updateStandbyStates(final TopicPartition storePartition, final List<ConsumerRecord<byte[], byte[]>> records) {
final long limit = offsetLimit(storePartition);
List<ConsumerRecord<byte[], byte[]>> remainingRecords = null;
final List<KeyValue<byte[], byte[]>> restoreRecords = new ArrayList<>();
// restore states from changelog records
final BatchingStateRestoreCallback restoreCallback = getBatchingRestoreCallback(restoreCallbacks.get(storePartition.topic()));
long lastOffset = -1L;
int count = 0;
for (final ConsumerRecord<byte[], byte[]> record : records) {
if (record.offset() < limit) {
restoreRecords.add(KeyValue.pair(record.key(), record.value()));
lastOffset = record.offset();
} else {
if (remainingRecords == null) {
remainingRecords = new ArrayList<>(records.size() - count);
}
remainingRecords.add(record);
}
count++;
}
if (!restoreRecords.isEmpty()) {
try {
restoreCallback.restoreAll(restoreRecords);
} catch (final Exception e) {
throw new ProcessorStateException(String.format("%sException caught while trying to restore state from %s", logPrefix, storePartition), e);
}
}
// record the restored offset for its change log partition
restoredOffsets.put(storePartition, lastOffset + 1);
return remainingRecords;
}
use of org.apache.kafka.streams.processor.BatchingStateRestoreCallback in project kafka by apache.
the class StateRestoreCallbackAdapterTest method shouldConvertToKeyValueBatches.
@Test
public void shouldConvertToKeyValueBatches() {
final ArrayList<KeyValue<byte[], byte[]>> actual = new ArrayList<>();
final BatchingStateRestoreCallback callback = new BatchingStateRestoreCallback() {
@Override
public void restoreAll(final Collection<KeyValue<byte[], byte[]>> records) {
actual.addAll(records);
}
@Override
public void restore(final byte[] key, final byte[] value) {
// unreachable
}
};
final RecordBatchingStateRestoreCallback adapted = adapt(callback);
final byte[] key1 = { 1 };
final byte[] value1 = { 2 };
final byte[] key2 = { 3 };
final byte[] value2 = { 4 };
adapted.restoreBatch(asList(new ConsumerRecord<>("topic1", 0, 0L, key1, value1), new ConsumerRecord<>("topic2", 1, 1L, key2, value2)));
assertThat(actual, is(asList(new KeyValue<>(key1, value1), new KeyValue<>(key2, value2))));
}
use of org.apache.kafka.streams.processor.BatchingStateRestoreCallback in project apache-kafka-on-k8s by banzaicloud.
the class GlobalStateManagerImpl method restoreState.
private void restoreState(final StateRestoreCallback stateRestoreCallback, final List<TopicPartition> topicPartitions, final Map<TopicPartition, Long> highWatermarks, final String storeName) {
for (final TopicPartition topicPartition : topicPartitions) {
globalConsumer.assign(Collections.singletonList(topicPartition));
final Long checkpoint = checkpointableOffsets.get(topicPartition);
if (checkpoint != null) {
globalConsumer.seek(topicPartition, checkpoint);
} else {
globalConsumer.seekToBeginning(Collections.singletonList(topicPartition));
}
long offset = globalConsumer.position(topicPartition);
final Long highWatermark = highWatermarks.get(topicPartition);
BatchingStateRestoreCallback stateRestoreAdapter = (BatchingStateRestoreCallback) ((stateRestoreCallback instanceof BatchingStateRestoreCallback) ? stateRestoreCallback : new WrappedBatchingStateRestoreCallback(stateRestoreCallback));
stateRestoreListener.onRestoreStart(topicPartition, storeName, offset, highWatermark);
long restoreCount = 0L;
while (offset < highWatermark) {
try {
final ConsumerRecords<byte[], byte[]> records = globalConsumer.poll(100);
final List<KeyValue<byte[], byte[]>> restoreRecords = new ArrayList<>();
for (ConsumerRecord<byte[], byte[]> record : records) {
if (record.key() != null) {
restoreRecords.add(KeyValue.pair(record.key(), record.value()));
}
offset = globalConsumer.position(topicPartition);
}
stateRestoreAdapter.restoreAll(restoreRecords);
stateRestoreListener.onBatchRestored(topicPartition, storeName, offset, restoreRecords.size());
restoreCount += restoreRecords.size();
} catch (final InvalidOffsetException recoverableException) {
log.warn("Restoring GlobalStore {} failed due to: {}. Deleting global store to recreate from scratch.", storeName, recoverableException.getMessage());
reinitializeStateStoresForPartitions(recoverableException.partitions(), processorContext);
stateRestoreListener.onRestoreStart(topicPartition, storeName, offset, highWatermark);
restoreCount = 0L;
}
}
stateRestoreListener.onRestoreEnd(topicPartition, storeName, restoreCount);
checkpointableOffsets.put(topicPartition, offset);
}
}
Aggregations