use of org.rocksdb.WriteBatch in project bookkeeper by apache.
the class RocksdbKVStore method putRaw.
private void putRaw(K key, byte[] keyBytes, V value, long revision) {
WriteBatch batch = new WriteBatch();
if (revision > 0) {
// last revision has been set to revision bytes
batch.put(metaCfHandle, LAST_REVISION, lastRevisionBytes);
}
if (null == value) {
// delete a key if value is null
batch.remove(dataCfHandle, keyBytes);
} else {
byte[] valBytes = valCoder.encode(value);
batch.put(dataCfHandle, keyBytes, valBytes);
}
try {
db.write(writeOpts, batch);
} catch (RocksDBException e) {
throw new StateStoreRuntimeException("Error while updating key " + key + " to value " + value + " from store " + name, e);
}
}
use of org.rocksdb.WriteBatch in project bookkeeper by apache.
the class MVCCStoreImpl method processTxn.
synchronized TxnResult<K, V> processTxn(long revision, TxnOp<K, V> op) {
checkStoreOpen();
// 1. process the compares
boolean success = processCompares(op);
// 2. prepare the response list
List<Op<K, V>> operations;
List<Result<K, V>> results;
if (success) {
operations = op.successOps();
} else {
operations = op.failureOps();
}
if (operations == null) {
operations = Collections.emptyList();
}
results = Lists.newArrayListWithExpectedSize(operations.size());
// 3. process the operations
try (WriteBatch batch = new WriteBatch()) {
for (Op<K, V> o : operations) {
results.add(executeOp(revision, batch, o));
}
executeBatch(batch);
// 4. repare the result
TxnResultImpl<K, V> txnResult = resultFactory.newTxnResult(revision);
txnResult.isSuccess(success);
txnResult.results(results);
txnResult.code(Code.OK);
return txnResult;
} catch (StateStoreRuntimeException e) {
results.forEach(Result::close);
throw e;
}
}
use of org.rocksdb.WriteBatch in project bookkeeper by apache.
the class MVCCStoreImpl method processIncrement.
synchronized IncrementResult<K, V> processIncrement(long revision, IncrementOp<K, V> op) {
checkStoreOpen();
WriteBatch batch = new WriteBatch();
IncrementResult<K, V> result = null;
try {
result = increment(revision, batch, op);
executeBatch(batch);
return result;
} catch (StateStoreRuntimeException e) {
if (null != result) {
result.close();
}
throw e;
} finally {
RocksUtils.close(batch);
}
}
use of org.rocksdb.WriteBatch in project kafka by apache.
the class AbstractRocksDBSegmentedBytesStore method restoreAllInternal.
// Visible for testing
void restoreAllInternal(final Collection<ConsumerRecord<byte[], byte[]>> records) {
try {
final Map<S, WriteBatch> writeBatchMap = getWriteBatches(records);
for (final Map.Entry<S, WriteBatch> entry : writeBatchMap.entrySet()) {
final S segment = entry.getKey();
final WriteBatch batch = entry.getValue();
segment.write(batch);
batch.close();
}
} catch (final RocksDBException e) {
throw new ProcessorStateException("Error restoring batch to store " + this.name, e);
}
}
use of org.rocksdb.WriteBatch in project kafka by apache.
the class RocksDBStore method putAll.
@Override
public void putAll(final List<KeyValue<Bytes, byte[]>> entries) {
try (final WriteBatch batch = new WriteBatch()) {
dbAccessor.prepareBatch(entries, batch);
StoreQueryUtils.updatePosition(position, context);
write(batch);
} catch (final RocksDBException e) {
throw new ProcessorStateException("Error while batch writing to store " + name, e);
}
}
Aggregations