use of org.rocksdb.WriteBatch in project aion by aionnetwork.
the class RocksDBWrapper method putBatchInternal.
@Override
public void putBatchInternal(Map<byte[], byte[]> input) {
// try-with-resources will automatically close the batch object
try (WriteBatch batch = new WriteBatch()) {
// add put and delete operations to batch
for (Map.Entry<byte[], byte[]> e : input.entrySet()) {
byte[] key = e.getKey();
byte[] value = e.getValue();
batch.put(key, value);
}
// bulk atomic update
db.write(writeOptions, batch);
} catch (RocksDBException e) {
LOG.error("Unable to execute batch put/update operation on " + this.toString() + ".", e);
}
}
use of org.rocksdb.WriteBatch in project storm by apache.
the class RocksDbStore method deleteMetadataBefore.
// deletes metadata strings before the provided timestamp
void deleteMetadataBefore(long firstValidTimestamp) throws MetricException {
if (firstValidTimestamp < 1L) {
if (this.failureMeter != null) {
this.failureMeter.mark();
}
throw new MetricException("Invalid timestamp for deleting metadata: " + firstValidTimestamp);
}
try (WriteBatch writeBatch = new WriteBatch();
WriteOptions writeOps = new WriteOptions()) {
// search all metadata strings
RocksDbKey topologyMetadataPrefix = RocksDbKey.getPrefix(KeyType.METADATA_STRING_START);
RocksDbKey lastPrefix = RocksDbKey.getPrefix(KeyType.METADATA_STRING_END);
try {
scanRange(topologyMetadataPrefix, lastPrefix, (key, value) -> {
// we'll assume the metadata was recently used if still in the cache.
if (!readOnlyStringMetadataCache.contains(key.getMetadataStringId())) {
if (value.getLastTimestamp() < firstValidTimestamp) {
writeBatch.delete(key.getRaw());
}
}
return true;
});
} catch (RocksDBException e) {
throw new MetricException("Error reading metric data", e);
}
if (writeBatch.count() > 0) {
LOG.info("Deleting {} metadata strings", writeBatch.count());
try {
db.write(writeOps, writeBatch);
} catch (Exception e) {
String message = "Failed delete metadata strings";
LOG.error(message, e);
if (this.failureMeter != null) {
this.failureMeter.mark();
}
throw new MetricException(message, e);
}
}
}
}
use of org.rocksdb.WriteBatch in project storm by apache.
the class RocksDbMetricsWriter method processBatchInsert.
// writes multiple metric values into the database as a batch operation. The tree map keeps the keys sorted
// for faster insertion to RocksDB.
private void processBatchInsert(TreeMap<RocksDbKey, RocksDbValue> batchMap) throws MetricException {
try (WriteBatch writeBatch = new WriteBatch()) {
// take the batched metric data and write to the database
for (RocksDbKey k : batchMap.keySet()) {
RocksDbValue v = batchMap.get(k);
writeBatch.put(k.getRaw(), v.getRaw());
}
store.db.write(writeOpts, writeBatch);
} catch (Exception e) {
String message = "Failed to store data to RocksDB";
LOG.error(message, e);
throw new MetricException(message, e);
}
}
use of org.rocksdb.WriteBatch in project jstorm by alibaba.
the class RocksTTLDBCache method putBatch.
protected void putBatch(Map<String, Object> map, Entry<Integer, ColumnFamilyHandle> putEntry) {
WriteOptions writeOpts = null;
WriteBatch writeBatch = null;
Set<byte[]> putKeys = new HashSet<>();
try {
writeOpts = new WriteOptions();
writeBatch = new WriteBatch();
for (Entry<String, Object> entry : map.entrySet()) {
String key = entry.getKey();
Object value = entry.getValue();
byte[] data = Utils.javaSerialize(value);
if (StringUtils.isBlank(key) || data == null || data.length == 0) {
continue;
}
byte[] keyByte = key.getBytes();
writeBatch.put(putEntry.getValue(), keyByte, data);
putKeys.add(keyByte);
}
ttlDB.write(writeOpts, writeBatch);
} catch (Exception e) {
LOG.error("Failed to putBatch into DB, " + map.keySet(), e);
} finally {
if (writeOpts != null) {
writeOpts.dispose();
}
if (writeBatch != null) {
writeBatch.dispose();
}
}
for (Entry<Integer, ColumnFamilyHandle> entry : windowHandlers.entrySet()) {
if (entry.getKey().equals(putEntry.getKey())) {
continue;
}
for (byte[] keyByte : putKeys) {
try {
ttlDB.remove(entry.getValue(), keyByte);
} catch (Exception e) {
LOG.error("Failed to remove other's " + new String(keyByte));
}
}
}
}
use of org.rocksdb.WriteBatch in project kafka by apache.
the class RocksDBStore method restoreBatch.
void restoreBatch(final Collection<ConsumerRecord<byte[], byte[]>> records) {
try (final WriteBatch batch = new WriteBatch()) {
final List<KeyValue<byte[], byte[]>> keyValues = new ArrayList<>();
for (final ConsumerRecord<byte[], byte[]> record : records) {
ChangelogRecordDeserializationHelper.applyChecksAndUpdatePosition(record, consistencyEnabled, position);
// If version headers are not present or version is V0
keyValues.add(new KeyValue<>(record.key(), record.value()));
}
dbAccessor.prepareBatchForRestore(keyValues, batch);
write(batch);
} catch (final RocksDBException e) {
throw new ProcessorStateException("Error restoring batch to store " + name, e);
}
}
Aggregations