use of org.rocksdb.DBOptions in project jstorm by alibaba.
the class WindowedRocksDbHdfsState method initRocksDb.
@Override
protected void initRocksDb() {
windowToCFHandler = new HashMap<>();
RocksDbOptionsFactory optionFactory = new RocksDbOptionsFactory.Defaults();
Options options = optionFactory.createOptions(null);
DBOptions dbOptions = optionFactory.createDbOptions(null);
ColumnFamilyOptions cfOptions = optionFactory.createColumnFamilyOptions(null);
String optionsFactoryClass = (String) conf.get(ConfigExtension.ROCKSDB_OPTIONS_FACTORY_CLASS);
if (optionsFactoryClass != null) {
RocksDbOptionsFactory udfOptionFactory = (RocksDbOptionsFactory) Utils.newInstance(optionsFactoryClass);
options = udfOptionFactory.createOptions(options);
dbOptions = udfOptionFactory.createDbOptions(dbOptions);
cfOptions = udfOptionFactory.createColumnFamilyOptions(cfOptions);
}
try {
ttlTimeSec = ConfigExtension.getStateTtlTime(conf);
List<Integer> ttlValues = new ArrayList<>();
List<byte[]> families = RocksDB.listColumnFamilies(options, rocksDbDir);
List<ColumnFamilyHandle> columnFamilyHandles = new ArrayList<>();
List<ColumnFamilyDescriptor> columnFamilyDescriptors = new ArrayList<>();
if (families != null) {
for (byte[] bytes : families) {
columnFamilyDescriptors.add(new ColumnFamilyDescriptor(bytes, cfOptions));
LOG.debug("Load colum family of {}", new String(bytes));
if (ttlTimeSec > 0)
ttlValues.add(ttlTimeSec);
}
}
if (columnFamilyDescriptors.size() > 0) {
if (ttlTimeSec > 0)
rocksDb = TtlDB.open(dbOptions, rocksDbDir, columnFamilyDescriptors, columnFamilyHandles, ttlValues, false);
else
rocksDb = RocksDB.open(dbOptions, rocksDbDir, columnFamilyDescriptors, columnFamilyHandles);
int n = Math.min(columnFamilyDescriptors.size(), columnFamilyHandles.size());
LOG.info("Try to load RocksDB with column family, desc_num={}, handler_num={}", columnFamilyDescriptors.size(), columnFamilyHandles.size());
// skip default column
for (int i = 1; i < n; i++) {
windowToCFHandler.put((TimeWindow) serializer.deserialize(columnFamilyDescriptors.get(i).columnFamilyName()), columnFamilyHandles.get(i));
}
} else {
rocksDb = RocksDB.open(options, rocksDbDir);
}
rocksDb.compactRange();
LOG.info("Finish the initialization of RocksDB");
} catch (RocksDBException e) {
LOG.error("Failed to open rocksdb located at " + rocksDbDir, e);
throw new RuntimeException(e.getMessage());
}
lastCheckpointFiles = new HashSet<String>();
lastCleanTime = System.currentTimeMillis();
lastSuccessBatchId = -1;
}
use of org.rocksdb.DBOptions in project jstorm by alibaba.
the class RocksDBTest method ttlDbOpenWithColumnFamilies.
public void ttlDbOpenWithColumnFamilies() throws RocksDBException, InterruptedException {
DBOptions dbOptions = null;
TtlDB ttlDB = null;
List<ColumnFamilyDescriptor> cfNames = new ArrayList<ColumnFamilyDescriptor>();
List<ColumnFamilyHandle> columnFamilyHandleList = new ArrayList<ColumnFamilyHandle>();
cfNames.add(new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY));
cfNames.add(new ColumnFamilyDescriptor("new_cf".getBytes()));
List<Integer> ttlValues = new ArrayList<Integer>();
// Default column family with infinite lifetime
ttlValues.add(0);
// new column family with 1 second ttl
ttlValues.add(1);
try {
System.out.println("Begin to open db");
dbOptions = new DBOptions().setCreateMissingColumnFamilies(true).setCreateIfMissing(true);
ttlDB = TtlDB.open(dbOptions, rootDir, cfNames, columnFamilyHandleList, ttlValues, false);
System.out.println("Successfully open db " + rootDir);
ttlDB.put("key".getBytes(), "value".getBytes());
assertThat(ttlDB.get("key".getBytes())).isEqualTo("value".getBytes());
ttlDB.put(columnFamilyHandleList.get(1), "key".getBytes(), "value".getBytes());
assertThat(ttlDB.get(columnFamilyHandleList.get(1), "key".getBytes())).isEqualTo("value".getBytes());
TimeUnit.SECONDS.sleep(2);
ttlDB.compactRange();
ttlDB.compactRange(columnFamilyHandleList.get(1));
assertThat(ttlDB.get("key".getBytes())).isNotNull();
assertThat(ttlDB.get(columnFamilyHandleList.get(1), "key".getBytes())).isNull();
} finally {
for (ColumnFamilyHandle columnFamilyHandle : columnFamilyHandleList) {
columnFamilyHandle.dispose();
}
if (ttlDB != null) {
ttlDB.close();
}
if (dbOptions != null) {
dbOptions.dispose();
}
}
}
use of org.rocksdb.DBOptions in project flink by apache.
the class KVStateRequestSerializerRocksDBTest method testMapSerialization.
/**
* Tests map serialization and deserialization match.
*
* @see KvStateRequestSerializerTest#testMapSerialization()
* KvStateRequestSerializerTest#testMapSerialization() using the heap state back-end
* test
*/
@Test
public void testMapSerialization() throws Exception {
final long key = 0L;
// objects for RocksDB state list serialisation
DBOptions dbOptions = PredefinedOptions.DEFAULT.createDBOptions();
dbOptions.setCreateIfMissing(true);
ColumnFamilyOptions columnFamilyOptions = PredefinedOptions.DEFAULT.createColumnOptions();
final RocksDBKeyedStateBackend<Long> longHeapKeyedStateBackend = new RocksDBKeyedStateBackend<>(new JobID(), "no-op", ClassLoader.getSystemClassLoader(), temporaryFolder.getRoot(), dbOptions, columnFamilyOptions, mock(TaskKvStateRegistry.class), LongSerializer.INSTANCE, 1, new KeyGroupRange(0, 0), new ExecutionConfig());
longHeapKeyedStateBackend.setCurrentKey(key);
final InternalMapState<VoidNamespace, Long, String> mapState = (InternalMapState<VoidNamespace, Long, String>) longHeapKeyedStateBackend.getPartitionedState(VoidNamespace.INSTANCE, VoidNamespaceSerializer.INSTANCE, new MapStateDescriptor<>("test", LongSerializer.INSTANCE, StringSerializer.INSTANCE));
KvStateRequestSerializerTest.testMapSerialization(key, mapState);
}
use of org.rocksdb.DBOptions in project bookkeeper by apache.
the class RocksdbKVStore method openRocksdb.
protected void openRocksdb(StateStoreSpec spec) throws StateStoreException {
// initialize the db options
final BlockBasedTableConfig tableConfig = new BlockBasedTableConfig();
tableConfig.setBlockCacheSize(BLOCK_CACHE_SIZE);
tableConfig.setBlockSize(BLOCK_SIZE);
tableConfig.setChecksumType(DEFAULT_CHECKSUM_TYPE);
dbOpts = new DBOptions();
dbOpts.setCreateIfMissing(true);
dbOpts.setErrorIfExists(false);
dbOpts.setInfoLogLevel(DEFAULT_LOG_LEVEL);
dbOpts.setIncreaseParallelism(DEFAULT_PARALLELISM);
dbOpts.setCreateMissingColumnFamilies(true);
cfOpts = new ColumnFamilyOptions();
cfOpts.setTableFormatConfig(tableConfig);
cfOpts.setWriteBufferSize(WRITE_BUFFER_SIZE);
cfOpts.setCompressionType(DEFAULT_COMPRESSION_TYPE);
cfOpts.setCompactionStyle(DEFAULT_COMPACTION_STYLE);
cfOpts.setMaxWriteBufferNumber(MAX_WRITE_BUFFERS);
// initialize the write options
writeOpts = new WriteOptions();
// disable wal, since the source of truth will be on distributedlog
writeOpts.setDisableWAL(false);
// initialize the flush options
flushOpts = new FlushOptions();
flushOpts.setWaitForFlush(true);
// open the rocksdb
this.dbDir = spec.getLocalStateStoreDir();
Pair<RocksDB, List<ColumnFamilyHandle>> dbPair = openLocalDB(dbDir, dbOpts, cfOpts);
this.db = dbPair.getLeft();
this.metaCfHandle = dbPair.getRight().get(0);
this.dataCfHandle = dbPair.getRight().get(1);
}
use of org.rocksdb.DBOptions in project jstorm by alibaba.
the class RocksDbUnitTest method main.
public static void main(String[] args) {
Map conf = JStormHelper.LoadConf(args[0]);
putNum = JStormUtils.parseInt(conf.get("put.number"), 100);
isFlush = JStormUtils.parseBoolean(conf.get("is.flush"), true);
isCheckpoint = JStormUtils.parseBoolean(conf.get("is.checkpoint"), true);
sleepTime = JStormUtils.parseInt(conf.get("sleep.time"), 5000);
compactionInterval = JStormUtils.parseInt(conf.get("compaction.interval"), 30000);
flushInterval = JStormUtils.parseInt(conf.get("flush.interval"), 3000);
isCompaction = JStormUtils.parseBoolean(conf.get("is.compaction"), true);
fileSizeBase = JStormUtils.parseLong(conf.get("file.size.base"), 10 * SizeUnit.KB);
levelNum = JStormUtils.parseInt(conf.get("db.level.num"), 1);
compactionTriggerNum = JStormUtils.parseInt(conf.get("db.compaction.trigger.num"), 4);
LOG.info("Conf={}", conf);
RocksDB db;
File file = new File(cpPath);
file.mkdirs();
List<ColumnFamilyHandle> columnFamilyHandles = new ArrayList<>();
try {
Options options = new Options();
options.setCreateMissingColumnFamilies(true);
options.setCreateIfMissing(true);
options.setTargetFileSizeBase(fileSizeBase);
options.setMaxBackgroundFlushes(2);
options.setMaxBackgroundCompactions(2);
options.setCompactionStyle(CompactionStyle.LEVEL);
options.setNumLevels(levelNum);
options.setLevelZeroFileNumCompactionTrigger(compactionTriggerNum);
DBOptions dbOptions = new DBOptions();
dbOptions.setCreateMissingColumnFamilies(true);
dbOptions.setCreateIfMissing(true);
dbOptions.setMaxBackgroundFlushes(2);
dbOptions.setMaxBackgroundCompactions(2);
ColumnFamilyOptions familyOptions = new ColumnFamilyOptions();
familyOptions.setTargetFileSizeBase(fileSizeBase);
familyOptions.setCompactionStyle(CompactionStyle.LEVEL);
familyOptions.setNumLevels(levelNum);
familyOptions.setLevelZeroFileNumCompactionTrigger(compactionTriggerNum);
List<byte[]> families = RocksDB.listColumnFamilies(options, dbPath);
List<ColumnFamilyDescriptor> columnFamilyDescriptors = new ArrayList<>();
if (families != null) {
for (byte[] bytes : families) {
columnFamilyDescriptors.add(new ColumnFamilyDescriptor(bytes, familyOptions));
LOG.info("Load colum family of {}", new String(bytes));
}
}
if (columnFamilyDescriptors.size() > 0) {
db = RocksDB.open(dbOptions, dbPath, columnFamilyDescriptors, columnFamilyHandles);
} else {
db = RocksDB.open(options, dbPath);
}
} catch (RocksDBException e) {
LOG.error("Failed to open db", e);
return;
}
rocksDbTest(db, columnFamilyHandles);
db.close();
}
Aggregations