use of org.rocksdb.WriteBatch in project kafka by apache.
the class AbstractRocksDBSegmentedBytesStore method getWriteBatches.
// Visible for testing
Map<S, WriteBatch> getWriteBatches(final Collection<ConsumerRecord<byte[], byte[]>> records) {
// advance stream time to the max timestamp in the batch
for (final ConsumerRecord<byte[], byte[]> record : records) {
final long timestamp = keySchema.segmentTimestamp(Bytes.wrap(record.key()));
observedStreamTime = Math.max(observedStreamTime, timestamp);
}
final Map<S, WriteBatch> writeBatchMap = new HashMap<>();
for (final ConsumerRecord<byte[], byte[]> record : records) {
final long timestamp = keySchema.segmentTimestamp(Bytes.wrap(record.key()));
final long segmentId = segments.segmentId(timestamp);
final S segment = segments.getOrCreateSegmentIfLive(segmentId, context, observedStreamTime);
if (segment != null) {
ChangelogRecordDeserializationHelper.applyChecksAndUpdatePosition(record, consistencyEnabled, position);
try {
final WriteBatch batch = writeBatchMap.computeIfAbsent(segment, s -> new WriteBatch());
segment.addToBatch(new KeyValue<>(record.key(), record.value()), batch);
} catch (final RocksDBException e) {
throw new ProcessorStateException("Error restoring batch to store " + this.name, e);
}
}
}
return writeBatchMap;
}
use of org.rocksdb.WriteBatch in project kafka by apache.
the class AbstractRocksDBSegmentedBytesStoreTest method shouldCreateWriteBatches.
@Test
public void shouldCreateWriteBatches() {
final String key = "a";
final Collection<ConsumerRecord<byte[], byte[]>> records = new ArrayList<>();
records.add(new ConsumerRecord<>("", 0, 0L, serializeKey(new Windowed<>(key, windows[0])).get(), serializeValue(50L)));
records.add(new ConsumerRecord<>("", 0, 0L, serializeKey(new Windowed<>(key, windows[3])).get(), serializeValue(100L)));
final Map<S, WriteBatch> writeBatchMap = bytesStore.getWriteBatches(records);
assertEquals(2, writeBatchMap.size());
for (final WriteBatch batch : writeBatchMap.values()) {
assertEquals(1, batch.count());
}
}
Aggregations