use of org.apache.kafka.streams.processor.StateRestoreCallback in project kafka by apache.
the class ProcessorStateManager method checkpointed.
public Map<TopicPartition, Long> checkpointed() {
Map<TopicPartition, Long> partitionsAndOffsets = new HashMap<>();
for (Map.Entry<String, StateRestoreCallback> entry : restoreCallbacks.entrySet()) {
String topicName = entry.getKey();
int partition = getPartition(topicName);
TopicPartition storePartition = new TopicPartition(topicName, partition);
if (checkpointedOffsets.containsKey(storePartition)) {
partitionsAndOffsets.put(storePartition, checkpointedOffsets.get(storePartition));
} else {
partitionsAndOffsets.put(storePartition, -1L);
}
}
return partitionsAndOffsets;
}
use of org.apache.kafka.streams.processor.StateRestoreCallback in project kafka by apache.
the class ProcessorStateManager method updateStandbyStates.
public List<ConsumerRecord<byte[], byte[]>> updateStandbyStates(TopicPartition storePartition, List<ConsumerRecord<byte[], byte[]>> records) {
long limit = offsetLimit(storePartition);
List<ConsumerRecord<byte[], byte[]>> remainingRecords = null;
// restore states from changelog records
StateRestoreCallback restoreCallback = restoreCallbacks.get(storePartition.topic());
long lastOffset = -1L;
int count = 0;
for (ConsumerRecord<byte[], byte[]> record : records) {
if (record.offset() < limit) {
try {
restoreCallback.restore(record.key(), record.value());
} catch (Exception e) {
throw new ProcessorStateException(String.format("%s exception caught while trying to restore state from %s", logPrefix, storePartition), e);
}
lastOffset = record.offset();
} else {
if (remainingRecords == null)
remainingRecords = new ArrayList<>(records.size() - count);
remainingRecords.add(record);
}
count++;
}
// record the restored offset for its change log partition
restoredOffsets.put(storePartition, lastOffset + 1);
return remainingRecords;
}
use of org.apache.kafka.streams.processor.StateRestoreCallback in project kafka by apache.
the class InMemoryKeyValueStore method init.
@Override
@SuppressWarnings("unchecked")
public void init(ProcessorContext context, StateStore root) {
// construct the serde
this.serdes = new StateSerdes<>(name, keySerde == null ? (Serde<K>) context.keySerde() : keySerde, valueSerde == null ? (Serde<V>) context.valueSerde() : valueSerde);
if (root != null) {
// register the store
context.register(root, true, new StateRestoreCallback() {
@Override
public void restore(byte[] key, byte[] value) {
// check value for null, to avoid deserialization error.
if (value == null) {
put(serdes.keyFrom(key), null);
} else {
put(serdes.keyFrom(key), serdes.valueFrom(value));
}
}
});
}
this.open = true;
}
use of org.apache.kafka.streams.processor.StateRestoreCallback in project kafka by apache.
the class MemoryLRUCache method init.
@Override
@SuppressWarnings("unchecked")
public void init(ProcessorContext context, StateStore root) {
// construct the serde
this.serdes = new StateSerdes<>(name, keySerde == null ? (Serde<K>) context.keySerde() : keySerde, valueSerde == null ? (Serde<V>) context.valueSerde() : valueSerde);
// register the store
context.register(root, true, new StateRestoreCallback() {
@Override
public void restore(byte[] key, byte[] value) {
// check value for null, to avoid deserialization error.
if (value == null) {
put(serdes.keyFrom(key), null);
} else {
put(serdes.keyFrom(key), serdes.valueFrom(value));
}
}
});
}
use of org.apache.kafka.streams.processor.StateRestoreCallback in project kafka by apache.
the class RocksDBStore method init.
public void init(ProcessorContext context, StateStore root) {
// open the DB dir
openDB(context);
// value getter should always read directly from rocksDB
// since it is only for values that are already flushed
context.register(root, false, new StateRestoreCallback() {
@Override
public void restore(byte[] key, byte[] value) {
putInternal(key, value);
}
});
open = true;
}
Aggregations