use of org.rocksdb.DBOptions in project kafka by apache.
the class RocksDBTimestampedStoreTest method verifyOldAndNewColumnFamily.
private void verifyOldAndNewColumnFamily() throws Exception {
final DBOptions dbOptions = new DBOptions();
final ColumnFamilyOptions columnFamilyOptions = new ColumnFamilyOptions();
final List<ColumnFamilyDescriptor> columnFamilyDescriptors = asList(new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY, columnFamilyOptions), new ColumnFamilyDescriptor("keyValueWithTimestamp".getBytes(StandardCharsets.UTF_8), columnFamilyOptions));
final List<ColumnFamilyHandle> columnFamilies = new ArrayList<>(columnFamilyDescriptors.size());
RocksDB db = null;
ColumnFamilyHandle noTimestampColumnFamily = null, withTimestampColumnFamily = null;
boolean errorOccurred = false;
try {
db = RocksDB.open(dbOptions, new File(new File(context.stateDir(), "rocksdb"), DB_NAME).getAbsolutePath(), columnFamilyDescriptors, columnFamilies);
noTimestampColumnFamily = columnFamilies.get(0);
withTimestampColumnFamily = columnFamilies.get(1);
assertThat(db.get(noTimestampColumnFamily, "unknown".getBytes()), new IsNull<>());
assertThat(db.get(noTimestampColumnFamily, "key1".getBytes()), new IsNull<>());
assertThat(db.get(noTimestampColumnFamily, "key2".getBytes()), new IsNull<>());
assertThat(db.get(noTimestampColumnFamily, "key3".getBytes()), new IsNull<>());
assertThat(db.get(noTimestampColumnFamily, "key4".getBytes()), new IsNull<>());
assertThat(db.get(noTimestampColumnFamily, "key5".getBytes()), new IsNull<>());
assertThat(db.get(noTimestampColumnFamily, "key6".getBytes()), new IsNull<>());
assertThat(db.get(noTimestampColumnFamily, "key7".getBytes()).length, is(7));
assertThat(db.get(noTimestampColumnFamily, "key8".getBytes()), new IsNull<>());
assertThat(db.get(noTimestampColumnFamily, "key11".getBytes()), new IsNull<>());
assertThat(db.get(noTimestampColumnFamily, "key12".getBytes()), new IsNull<>());
assertThat(db.get(withTimestampColumnFamily, "unknown".getBytes()), new IsNull<>());
assertThat(db.get(withTimestampColumnFamily, "key1".getBytes()).length, is(8 + 1));
assertThat(db.get(withTimestampColumnFamily, "key2".getBytes()).length, is(12));
assertThat(db.get(withTimestampColumnFamily, "key3".getBytes()), new IsNull<>());
assertThat(db.get(withTimestampColumnFamily, "key4".getBytes()).length, is(8 + 4));
assertThat(db.get(withTimestampColumnFamily, "key5".getBytes()).length, is(8 + 5));
assertThat(db.get(withTimestampColumnFamily, "key6".getBytes()), new IsNull<>());
assertThat(db.get(withTimestampColumnFamily, "key7".getBytes()), new IsNull<>());
assertThat(db.get(withTimestampColumnFamily, "key8".getBytes()).length, is(18));
assertThat(db.get(withTimestampColumnFamily, "key11".getBytes()).length, is(21));
assertThat(db.get(withTimestampColumnFamily, "key12".getBytes()), new IsNull<>());
} catch (final RuntimeException fatal) {
errorOccurred = true;
} finally {
// Order of closing must follow: ColumnFamilyHandle > RocksDB > DBOptions > ColumnFamilyOptions
if (noTimestampColumnFamily != null) {
noTimestampColumnFamily.close();
}
if (withTimestampColumnFamily != null) {
withTimestampColumnFamily.close();
}
if (db != null) {
db.close();
}
if (errorOccurred) {
dbOptions.close();
columnFamilyOptions.close();
}
}
// check that still in upgrade mode
try (LogCaptureAppender appender = LogCaptureAppender.createAndRegister(RocksDBTimestampedStore.class)) {
rocksDBStore.init((StateStoreContext) context, rocksDBStore);
assertThat(appender.getMessages(), hasItem("Opening store " + DB_NAME + " in upgrade mode"));
} finally {
rocksDBStore.close();
}
// clear old CF
columnFamilies.clear();
db = null;
noTimestampColumnFamily = null;
try {
db = RocksDB.open(dbOptions, new File(new File(context.stateDir(), "rocksdb"), DB_NAME).getAbsolutePath(), columnFamilyDescriptors, columnFamilies);
noTimestampColumnFamily = columnFamilies.get(0);
db.delete(noTimestampColumnFamily, "key7".getBytes());
} finally {
// Order of closing must follow: ColumnFamilyHandle > RocksDB > DBOptions > ColumnFamilyOptions
if (noTimestampColumnFamily != null) {
noTimestampColumnFamily.close();
}
if (db != null) {
db.close();
}
dbOptions.close();
columnFamilyOptions.close();
}
// check that still in regular mode
try (LogCaptureAppender appender = LogCaptureAppender.createAndRegister(RocksDBTimestampedStore.class)) {
rocksDBStore.init((StateStoreContext) context, rocksDBStore);
assertThat(appender.getMessages(), hasItem("Opening store " + DB_NAME + " in regular mode"));
}
}
use of org.rocksdb.DBOptions in project kafka by apache.
the class RocksDBStore method openDB.
@SuppressWarnings("unchecked")
void openDB(final Map<String, Object> configs, final File stateDir) {
// initialize the default rocksdb options
final DBOptions dbOptions = new DBOptions();
final ColumnFamilyOptions columnFamilyOptions = new ColumnFamilyOptions();
userSpecifiedOptions = new RocksDBGenericOptionsToDbOptionsColumnFamilyOptionsAdapter(dbOptions, columnFamilyOptions);
final BlockBasedTableConfigWithAccessibleCache tableConfig = new BlockBasedTableConfigWithAccessibleCache();
cache = new LRUCache(BLOCK_CACHE_SIZE);
tableConfig.setBlockCache(cache);
tableConfig.setBlockSize(BLOCK_SIZE);
filter = new BloomFilter();
tableConfig.setFilterPolicy(filter);
userSpecifiedOptions.optimizeFiltersForHits();
userSpecifiedOptions.setTableFormatConfig(tableConfig);
userSpecifiedOptions.setWriteBufferSize(WRITE_BUFFER_SIZE);
userSpecifiedOptions.setCompressionType(COMPRESSION_TYPE);
userSpecifiedOptions.setCompactionStyle(COMPACTION_STYLE);
userSpecifiedOptions.setMaxWriteBufferNumber(MAX_WRITE_BUFFERS);
userSpecifiedOptions.setCreateIfMissing(true);
userSpecifiedOptions.setErrorIfExists(false);
userSpecifiedOptions.setInfoLogLevel(InfoLogLevel.ERROR_LEVEL);
// this is the recommended way to increase parallelism in RocksDb
// note that the current implementation of setIncreaseParallelism affects the number
// of compaction threads but not flush threads (the latter remains one). Also,
// the parallelism value needs to be at least two because of the code in
// https://github.com/facebook/rocksdb/blob/62ad0a9b19f0be4cefa70b6b32876e764b7f3c11/util/options.cc#L580
// subtracts one from the value passed to determine the number of compaction threads
// (this could be a bug in the RocksDB code and their devs have been contacted).
userSpecifiedOptions.setIncreaseParallelism(Math.max(Runtime.getRuntime().availableProcessors(), 2));
wOptions = new WriteOptions();
wOptions.setDisableWAL(true);
fOptions = new FlushOptions();
fOptions.setWaitForFlush(true);
final Class<RocksDBConfigSetter> configSetterClass = (Class<RocksDBConfigSetter>) configs.get(StreamsConfig.ROCKSDB_CONFIG_SETTER_CLASS_CONFIG);
if (configSetterClass != null) {
configSetter = Utils.newInstance(configSetterClass);
configSetter.setConfig(name, userSpecifiedOptions, configs);
}
dbDir = new File(new File(stateDir, parentDir), name);
try {
Files.createDirectories(dbDir.getParentFile().toPath());
Files.createDirectories(dbDir.getAbsoluteFile().toPath());
} catch (final IOException fatal) {
throw new ProcessorStateException(fatal);
}
// Setup statistics before the database is opened, otherwise the statistics are not updated
// with the measurements from Rocks DB
maybeSetUpStatistics(configs);
openRocksDB(dbOptions, columnFamilyOptions);
open = true;
addValueProvidersToMetricsRecorder();
}
use of org.rocksdb.DBOptions in project voldemort by voldemort.
the class RocksDbStorageConfiguration method getStore.
@Override
public StorageEngine<ByteArray, byte[], byte[]> getStore(StoreDefinition storeDef, RoutingStrategy strategy) {
String storeName = storeDef.getName();
if (!stores.containsKey(storeName)) {
String dataDir = this.voldemortconfig.getRdbDataDirectory() + "/" + storeName;
new File(dataDir).mkdirs();
Properties dbProperties = parseProperties(VoldemortConfig.ROCKSDB_DB_OPTIONS);
DBOptions dbOptions = (dbProperties.size() > 0) ? DBOptions.getDBOptionsFromProps(dbProperties) : new DBOptions();
if (dbOptions == null) {
throw new StorageInitializationException("Unable to parse Data Base Options.");
}
dbOptions.setCreateIfMissing(true);
dbOptions.setCreateMissingColumnFamilies(true);
dbOptions.createStatistics();
Properties cfProperties = parseProperties(VoldemortConfig.ROCKSDB_CF_OPTIONS);
if (this.voldemortconfig.getRocksdbPrefixKeysWithPartitionId()) {
cfProperties.setProperty("prefix_extractor", "fixed:" + StoreBinaryFormat.PARTITIONID_PREFIX_SIZE);
}
ColumnFamilyOptions cfOptions = (cfProperties.size() > 0) ? ColumnFamilyOptions.getColumnFamilyOptionsFromProps(cfProperties) : new ColumnFamilyOptions();
if (cfOptions == null) {
throw new StorageInitializationException("Unable to parse Column Family Options.");
}
// Create a non default Column Family tp hold the store data.
List<ColumnFamilyDescriptor> descriptors = new ArrayList<ColumnFamilyDescriptor>();
descriptors.add(new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY, cfOptions));
descriptors.add(new ColumnFamilyDescriptor(storeName.getBytes(), cfOptions));
List<ColumnFamilyHandle> handles = new ArrayList<ColumnFamilyHandle>();
try {
RocksDB rdbStore = RocksDB.open(dbOptions, dataDir, descriptors, handles);
// Dispose of the default Column Family immediately. We don't use it and if it has not been disposed
// by the time the DB is closed then the RocksDB code can terminate abnormally (if the RocksDB code is
// built with assertions enabled). The handle will go out of scope on its own and the Java finalizer
// will (eventually) do this for us, but, that is not fast enough for the unit tests.
handles.get(0).dispose();
ColumnFamilyHandle storeHandle = handles.get(1);
RocksDbStorageEngine rdbStorageEngine;
if (this.voldemortconfig.getRocksdbPrefixKeysWithPartitionId()) {
rdbStorageEngine = new PartitionPrefixedRocksDbStorageEngine(storeName, rdbStore, storeHandle, cfOptions, lockStripes, strategy, voldemortconfig.isRocksdbEnableReadLocks());
} else {
rdbStorageEngine = new RocksDbStorageEngine(storeName, rdbStore, storeHandle, cfOptions, lockStripes, voldemortconfig.isRocksdbEnableReadLocks());
}
stores.put(storeName, rdbStorageEngine);
} catch (Exception e) {
throw new StorageInitializationException(e);
}
}
return stores.get(storeName);
}
use of org.rocksdb.DBOptions in project jstorm by alibaba.
the class RocksTTLDBCache method initDb.
public void initDb(List<Integer> list) throws Exception {
LOG.info("Begin to init rocksDB of {}", rootDir);
DBOptions dbOptions = null;
List<ColumnFamilyDescriptor> columnFamilyNames = new ArrayList<>();
columnFamilyNames.add(new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY));
for (Integer timeout : list) {
columnFamilyNames.add(new ColumnFamilyDescriptor(String.valueOf(timeout).getBytes()));
}
List<Integer> ttlValues = new ArrayList<>();
// Default column family with infinite TTL
// NOTE that the first must be 0, RocksDB.java API has this limitation
ttlValues.add(0);
// new column family with list second ttl
ttlValues.addAll(list);
try {
dbOptions = new DBOptions().setCreateMissingColumnFamilies(true).setCreateIfMissing(true);
List<ColumnFamilyHandle> columnFamilyHandleList = new ArrayList<>();
ttlDB = TtlDB.open(dbOptions, rootDir, columnFamilyNames, columnFamilyHandleList, ttlValues, false);
for (int i = 0; i < ttlValues.size(); i++) {
windowHandlers.put(ttlValues.get(i), columnFamilyHandleList.get(i));
}
LOG.info("Successfully init rocksDB of {}", rootDir);
} finally {
if (dbOptions != null) {
dbOptions.dispose();
}
}
}
use of org.rocksdb.DBOptions in project jstorm by alibaba.
the class RocksDbHdfsState method initRocksDb.
protected void initRocksDb() {
RocksDbOptionsFactory optionFactory = new RocksDbOptionsFactory.Defaults();
Options options = optionFactory.createOptions(null);
DBOptions dbOptions = optionFactory.createDbOptions(null);
ColumnFamilyOptions cfOptions = optionFactory.createColumnFamilyOptions(null);
String optionsFactoryClass = (String) conf.get(ConfigExtension.ROCKSDB_OPTIONS_FACTORY_CLASS);
if (optionsFactoryClass != null) {
RocksDbOptionsFactory udfOptionFactory = (RocksDbOptionsFactory) Utils.newInstance(optionsFactoryClass);
options = udfOptionFactory.createOptions(options);
dbOptions = udfOptionFactory.createDbOptions(dbOptions);
cfOptions = udfOptionFactory.createColumnFamilyOptions(cfOptions);
}
try {
ttlTimeSec = ConfigExtension.getStateTtlTime(conf);
if (ttlTimeSec > 0)
rocksDb = TtlDB.open(options, rocksDbDir, ttlTimeSec, false);
else
rocksDb = RocksDB.open(options, rocksDbDir);
// enable compaction
rocksDb.compactRange();
LOG.info("Finish the initialization of RocksDB");
} catch (RocksDBException e) {
LOG.error("Failed to open rocksdb located at " + rocksDbDir, e);
throw new RuntimeException(e.getMessage());
}
lastCheckpointFiles = new HashSet<String>();
lastCleanTime = System.currentTimeMillis();
lastSuccessBatchId = -1;
}
Aggregations