use of org.rocksdb.WriteOptions in project bookkeeper by apache.
the class RocksdbKVStore method openRocksdb.
protected void openRocksdb(StateStoreSpec spec) throws StateStoreException {
// initialize the db options
final BlockBasedTableConfig tableConfig = new BlockBasedTableConfig();
tableConfig.setBlockCacheSize(BLOCK_CACHE_SIZE);
tableConfig.setBlockSize(BLOCK_SIZE);
tableConfig.setChecksumType(DEFAULT_CHECKSUM_TYPE);
dbOpts = new DBOptions();
dbOpts.setCreateIfMissing(true);
dbOpts.setErrorIfExists(false);
dbOpts.setInfoLogLevel(DEFAULT_LOG_LEVEL);
dbOpts.setIncreaseParallelism(DEFAULT_PARALLELISM);
dbOpts.setCreateMissingColumnFamilies(true);
cfOpts = new ColumnFamilyOptions();
cfOpts.setTableFormatConfig(tableConfig);
cfOpts.setWriteBufferSize(WRITE_BUFFER_SIZE);
cfOpts.setCompressionType(DEFAULT_COMPRESSION_TYPE);
cfOpts.setCompactionStyle(DEFAULT_COMPACTION_STYLE);
cfOpts.setMaxWriteBufferNumber(MAX_WRITE_BUFFERS);
// initialize the write options
writeOpts = new WriteOptions();
// disable wal, since the source of truth will be on distributedlog
writeOpts.setDisableWAL(false);
// initialize the flush options
flushOpts = new FlushOptions();
flushOpts.setWaitForFlush(true);
// open the rocksdb
this.dbDir = spec.getLocalStateStoreDir();
Pair<RocksDB, List<ColumnFamilyHandle>> dbPair = openLocalDB(dbDir, dbOpts, cfOpts);
this.db = dbPair.getLeft();
this.metaCfHandle = dbPair.getRight().get(0);
this.dataCfHandle = dbPair.getRight().get(1);
}
use of org.rocksdb.WriteOptions in project storm by apache.
the class RocksDbStore method deleteMetadataBefore.
// deletes metadata strings before the provided timestamp
void deleteMetadataBefore(long firstValidTimestamp) throws MetricException {
if (firstValidTimestamp < 1L) {
if (this.failureMeter != null) {
this.failureMeter.mark();
}
throw new MetricException("Invalid timestamp for deleting metadata: " + firstValidTimestamp);
}
try (WriteBatch writeBatch = new WriteBatch();
WriteOptions writeOps = new WriteOptions()) {
// search all metadata strings
RocksDbKey topologyMetadataPrefix = RocksDbKey.getPrefix(KeyType.METADATA_STRING_START);
RocksDbKey lastPrefix = RocksDbKey.getPrefix(KeyType.METADATA_STRING_END);
try {
scanRange(topologyMetadataPrefix, lastPrefix, (key, value) -> {
// we'll assume the metadata was recently used if still in the cache.
if (!readOnlyStringMetadataCache.contains(key.getMetadataStringId())) {
if (value.getLastTimestamp() < firstValidTimestamp) {
writeBatch.delete(key.getRaw());
}
}
return true;
});
} catch (RocksDBException e) {
throw new MetricException("Error reading metric data", e);
}
if (writeBatch.count() > 0) {
LOG.info("Deleting {} metadata strings", writeBatch.count());
try {
db.write(writeOps, writeBatch);
} catch (Exception e) {
String message = "Failed delete metadata strings";
LOG.error(message, e);
if (this.failureMeter != null) {
this.failureMeter.mark();
}
throw new MetricException(message, e);
}
}
}
}
use of org.rocksdb.WriteOptions in project samza by apache.
the class TestRocksDbKeyValueStoreJava method testIterate.
@Test
public void testIterate() throws Exception {
Config config = new MapConfig();
Options options = new Options();
options.setCreateIfMissing(true);
File dbDir = new File(System.getProperty("java.io.tmpdir") + "/dbStore" + System.currentTimeMillis());
RocksDbKeyValueStore store = new RocksDbKeyValueStore(dbDir, options, config, false, "dbStore", new WriteOptions(), new FlushOptions(), new KeyValueStoreMetrics("dbStore", new MetricsRegistryMap()));
ByteArrayOutputStream outputStream = new ByteArrayOutputStream();
String prefix = "prefix";
for (int i = 0; i < 100; i++) {
store.put(genKey(outputStream, prefix, i), genValue());
}
byte[] firstKey = genKey(outputStream, prefix, 0);
byte[] lastKey = genKey(outputStream, prefix, 1000);
KeyValueSnapshot<byte[], byte[]> snapshot = store.snapshot(firstKey, lastKey);
// Make sure the cached Iterable won't change when new elements are added
store.put(genKey(outputStream, prefix, 200), genValue());
KeyValueIterator<byte[], byte[]> iterator = snapshot.iterator();
assertTrue(Iterators.size(iterator) == 100);
iterator.close();
List<Integer> keys = new ArrayList<>();
KeyValueIterator<byte[], byte[]> iterator2 = snapshot.iterator();
while (iterator2.hasNext()) {
Entry<byte[], byte[]> entry = iterator2.next();
int key = Ints.fromByteArray(Arrays.copyOfRange(entry.getKey(), prefix.getBytes().length, entry.getKey().length));
keys.add(key);
}
assertEquals(keys, IntStream.rangeClosed(0, 99).boxed().collect(Collectors.toList()));
iterator2.close();
outputStream.close();
snapshot.close();
store.close();
}
use of org.rocksdb.WriteOptions in project jstorm by alibaba.
the class RocksTTLDBCache method putBatch.
protected void putBatch(Map<String, Object> map, Entry<Integer, ColumnFamilyHandle> putEntry) {
WriteOptions writeOpts = null;
WriteBatch writeBatch = null;
Set<byte[]> putKeys = new HashSet<>();
try {
writeOpts = new WriteOptions();
writeBatch = new WriteBatch();
for (Entry<String, Object> entry : map.entrySet()) {
String key = entry.getKey();
Object value = entry.getValue();
byte[] data = Utils.javaSerialize(value);
if (StringUtils.isBlank(key) || data == null || data.length == 0) {
continue;
}
byte[] keyByte = key.getBytes();
writeBatch.put(putEntry.getValue(), keyByte, data);
putKeys.add(keyByte);
}
ttlDB.write(writeOpts, writeBatch);
} catch (Exception e) {
LOG.error("Failed to putBatch into DB, " + map.keySet(), e);
} finally {
if (writeOpts != null) {
writeOpts.dispose();
}
if (writeBatch != null) {
writeBatch.dispose();
}
}
for (Entry<Integer, ColumnFamilyHandle> entry : windowHandlers.entrySet()) {
if (entry.getKey().equals(putEntry.getKey())) {
continue;
}
for (byte[] keyByte : putKeys) {
try {
ttlDB.remove(entry.getValue(), keyByte);
} catch (Exception e) {
LOG.error("Failed to remove other's " + new String(keyByte));
}
}
}
}
use of org.rocksdb.WriteOptions in project alluxio by Alluxio.
the class RocksStoreTest method backupRestore.
@Test
public void backupRestore() throws Exception {
ColumnFamilyOptions cfOpts = new ColumnFamilyOptions().setMemTableConfig(new HashLinkedListMemTableConfig()).setCompressionType(CompressionType.NO_COMPRESSION).useFixedLengthPrefixExtractor(// We always search using the initial long key
Longs.BYTES);
List<ColumnFamilyDescriptor> columnDescriptors = Arrays.asList(new ColumnFamilyDescriptor("test".getBytes(), cfOpts));
String dbDir = mFolder.newFolder("rocks").getAbsolutePath();
String backupsDir = mFolder.newFolder("rocks-backups").getAbsolutePath();
AtomicReference<ColumnFamilyHandle> testColumn = new AtomicReference<>();
RocksStore store = new RocksStore("test", dbDir, backupsDir, columnDescriptors, Arrays.asList(testColumn));
ByteArrayOutputStream baos = new ByteArrayOutputStream();
RocksDB db = store.getDb();
int count = 10;
for (int i = 0; i < count; i++) {
db.put(testColumn.get(), new WriteOptions().setDisableWAL(true), ("a" + i).getBytes(), "b".getBytes());
}
store.writeToCheckpoint(baos);
String newBbDir = mFolder.newFolder("rocks-new").getAbsolutePath();
store = new RocksStore("test-new", newBbDir, backupsDir, columnDescriptors, Arrays.asList(testColumn));
store.restoreFromCheckpoint(new CheckpointInputStream(new ByteArrayInputStream(baos.toByteArray())));
db = store.getDb();
for (int i = 0; i < count; i++) {
assertArrayEquals("b".getBytes(), db.get(testColumn.get(), ("a" + i).getBytes()));
}
}
Aggregations