use of org.rocksdb.Options in project aion by aionnetwork.
the class RocksDBWrapper method setupRocksDbOptions.
private Options setupRocksDbOptions() {
Options options = new Options();
options.setCreateIfMissing(true);
options.setUseFsync(false);
options.setCompressionType(enableDbCompression ? CompressionType.LZ4_COMPRESSION : CompressionType.NO_COMPRESSION);
options.setBottommostCompressionType(CompressionType.ZLIB_COMPRESSION);
options.setMinWriteBufferNumberToMerge(MIN_WRITE_BUFFER_NUMBER_TOMERGE);
options.setLevel0StopWritesTrigger(LEVEL0_STOP_WRITES_TRIGGER);
options.setLevel0SlowdownWritesTrigger(LEVEL0_SLOWDOWN_WRITES_TRIGGER);
options.setAtomicFlush(true);
options.setWriteBufferSize(this.writeBufferSize);
options.setRandomAccessMaxBufferSize(this.readBufferSize);
options.setParanoidChecks(true);
options.setMaxOpenFiles(this.maxOpenFiles);
options.setTableFormatConfig(setupBlockBasedTableConfig());
options.setDisableAutoCompactions(false);
options.setIncreaseParallelism(max(1, Runtime.getRuntime().availableProcessors() / 2));
options.setLevelCompactionDynamicLevelBytes(true);
options.setMaxBackgroundCompactions(MAX_BACKGROUND_COMPACTIONS);
options.setMaxBackgroundFlushes(MAX_BACKGROUND_FLUSHES);
options.setBytesPerSync(BYTES_PER_SYNC);
options.setCompactionPriority(CompactionPriority.MinOverlappingRatio);
options.optimizeLevelStyleCompaction(OPTIMIZE_LEVEL_STYLE_COMPACTION);
return options;
}
use of org.rocksdb.Options in project storm by apache.
the class RocksDbStore method prepare.
/**
* Create metric store instance using the configurations provided via the config map.
*
* @param config Storm config map
* @param metricsRegistry The Nimbus daemon metrics registry
* @throws MetricException on preparation error
*/
@Override
public void prepare(Map<String, Object> config, StormMetricsRegistry metricsRegistry) throws MetricException {
validateConfig(config);
this.failureMeter = metricsRegistry.registerMeter("RocksDB:metric-failures");
RocksDB.loadLibrary();
boolean createIfMissing = ObjectReader.getBoolean(config.get(DaemonConfig.STORM_ROCKSDB_CREATE_IF_MISSING), false);
try (Options options = new Options().setCreateIfMissing(createIfMissing)) {
// use the hash index for prefix searches
BlockBasedTableConfig tfc = new BlockBasedTableConfig();
tfc.setIndexType(IndexType.kHashSearch);
options.setTableFormatConfig(tfc);
options.useCappedPrefixExtractor(RocksDbKey.KEY_SIZE);
String path = getRocksDbAbsoluteDir(config);
LOG.info("Opening RocksDB from {}", path);
db = RocksDB.open(options, path);
} catch (RocksDBException e) {
String message = "Error opening RockDB database";
LOG.error(message, e);
throw new MetricException(message, e);
}
// create thread to delete old metrics and metadata
Integer retentionHours = Integer.parseInt(config.get(DaemonConfig.STORM_ROCKSDB_METRIC_RETENTION_HOURS).toString());
Integer deletionPeriod = 0;
if (config.containsKey(DaemonConfig.STORM_ROCKSDB_METRIC_DELETION_PERIOD_HOURS)) {
deletionPeriod = Integer.parseInt(config.get(DaemonConfig.STORM_ROCKSDB_METRIC_DELETION_PERIOD_HOURS).toString());
}
metricsCleaner = new MetricsCleaner(this, retentionHours, deletionPeriod, failureMeter, metricsRegistry);
// create thread to process insertion of all metrics
metricsWriter = new RocksDbMetricsWriter(this, this.queue, this.failureMeter);
int cacheCapacity = Integer.parseInt(config.get(DaemonConfig.STORM_ROCKSDB_METADATA_STRING_CACHE_CAPACITY).toString());
StringMetadataCache.init(metricsWriter, cacheCapacity);
readOnlyStringMetadataCache = StringMetadataCache.getReadOnlyStringMetadataCache();
// init the writer once the cache is setup
metricsWriter.init();
// start threads after metadata cache created
Thread thread = new Thread(metricsCleaner, "RocksDbMetricsCleaner");
thread.setDaemon(true);
thread.start();
thread = new Thread(metricsWriter, "RocksDbMetricsWriter");
thread.setDaemon(true);
thread.start();
}
use of org.rocksdb.Options in project samza by apache.
the class RocksDbOptionsHelper method options.
public static Options options(Config storeConfig, int numTasksForContainer, File storeDir, StorageEngineFactory.StoreMode storeMode) {
Options options = new Options();
if (storeConfig.getBoolean(ROCKSDB_WAL_ENABLED, false)) {
// store.flush() will flushWAL(sync = true) instead
options.setManualWalFlush(true);
options.setWalRecoveryMode(WALRecoveryMode.AbsoluteConsistency);
}
Long writeBufSize = storeConfig.getLong("container.write.buffer.size.bytes", 32 * 1024 * 1024);
// Cache size and write buffer size are specified on a per-container basis.
options.setWriteBufferSize((int) (writeBufSize / numTasksForContainer));
CompressionType compressionType = CompressionType.SNAPPY_COMPRESSION;
String compressionInConfig = storeConfig.get(ROCKSDB_COMPRESSION, "snappy");
switch(compressionInConfig) {
case "snappy":
compressionType = CompressionType.SNAPPY_COMPRESSION;
break;
case "bzip2":
compressionType = CompressionType.BZLIB2_COMPRESSION;
break;
case "zlib":
compressionType = CompressionType.ZLIB_COMPRESSION;
break;
case "lz4":
compressionType = CompressionType.LZ4_COMPRESSION;
break;
case "lz4hc":
compressionType = CompressionType.LZ4HC_COMPRESSION;
break;
case "none":
compressionType = CompressionType.NO_COMPRESSION;
break;
default:
log.warn("Unknown rocksdb.compression codec " + compressionInConfig + ", overwriting to " + compressionType.name());
}
options.setCompressionType(compressionType);
long blockCacheSize = getBlockCacheSize(storeConfig, numTasksForContainer);
int blockSize = storeConfig.getInt(ROCKSDB_BLOCK_SIZE_BYTES, 4096);
BlockBasedTableConfig tableOptions = new BlockBasedTableConfig();
tableOptions.setBlockCacheSize(blockCacheSize).setBlockSize(blockSize);
options.setTableFormatConfig(tableOptions);
setCompactionOptions(storeConfig, options);
options.setMaxWriteBufferNumber(storeConfig.getInt(ROCKSDB_NUM_WRITE_BUFFERS, 3));
options.setCreateIfMissing(true);
options.setErrorIfExists(false);
options.setMaxLogFileSize(storeConfig.getLong(ROCKSDB_MAX_LOG_FILE_SIZE_BYTES, 64 * 1024 * 1024L));
options.setKeepLogFileNum(storeConfig.getLong(ROCKSDB_KEEP_LOG_FILE_NUM, 2));
options.setDeleteObsoleteFilesPeriodMicros(storeConfig.getLong(ROCKSDB_DELETE_OBSOLETE_FILES_PERIOD_MICROS, 21600000000L));
options.setMaxOpenFiles(storeConfig.getInt(ROCKSDB_MAX_OPEN_FILES, -1));
options.setMaxFileOpeningThreads(storeConfig.getInt(ROCKSDB_MAX_FILE_OPENING_THREADS, 16));
// The default for rocksdb is 18446744073709551615, which is larger than java Long.MAX_VALUE. Hence setting it only if it's passed.
if (storeConfig.containsKey(ROCKSDB_MAX_MANIFEST_FILE_SIZE)) {
options.setMaxManifestFileSize(storeConfig.getLong(ROCKSDB_MAX_MANIFEST_FILE_SIZE));
}
// use prepareForBulk load only when i. the store is being requested in BulkLoad mode
// and ii. the storeDirectory does not exist (fresh restore), because bulk load does not work seamlessly with
// existing stores : https://github.com/facebook/rocksdb/issues/2734
StorageManagerUtil storageManagerUtil = new StorageManagerUtil();
if (storeMode.equals(StorageEngineFactory.StoreMode.BulkLoad) && !storageManagerUtil.storeExists(storeDir)) {
log.info("Using prepareForBulkLoad for restore to " + storeDir);
options.prepareForBulkLoad();
}
return options;
}
use of org.rocksdb.Options in project samza by apache.
the class TestRocksDbKeyValueStoreJava method testIterate.
@Test
public void testIterate() throws Exception {
Config config = new MapConfig();
Options options = new Options();
options.setCreateIfMissing(true);
File dbDir = new File(System.getProperty("java.io.tmpdir") + "/dbStore" + System.currentTimeMillis());
RocksDbKeyValueStore store = new RocksDbKeyValueStore(dbDir, options, config, false, "dbStore", new WriteOptions(), new FlushOptions(), new KeyValueStoreMetrics("dbStore", new MetricsRegistryMap()));
ByteArrayOutputStream outputStream = new ByteArrayOutputStream();
String prefix = "prefix";
for (int i = 0; i < 100; i++) {
store.put(genKey(outputStream, prefix, i), genValue());
}
byte[] firstKey = genKey(outputStream, prefix, 0);
byte[] lastKey = genKey(outputStream, prefix, 1000);
KeyValueSnapshot<byte[], byte[]> snapshot = store.snapshot(firstKey, lastKey);
// Make sure the cached Iterable won't change when new elements are added
store.put(genKey(outputStream, prefix, 200), genValue());
KeyValueIterator<byte[], byte[]> iterator = snapshot.iterator();
assertTrue(Iterators.size(iterator) == 100);
iterator.close();
List<Integer> keys = new ArrayList<>();
KeyValueIterator<byte[], byte[]> iterator2 = snapshot.iterator();
while (iterator2.hasNext()) {
Entry<byte[], byte[]> entry = iterator2.next();
int key = Ints.fromByteArray(Arrays.copyOfRange(entry.getKey(), prefix.getBytes().length, entry.getKey().length));
keys.add(key);
}
assertEquals(keys, IntStream.rangeClosed(0, 99).boxed().collect(Collectors.toList()));
iterator2.close();
outputStream.close();
snapshot.close();
store.close();
}
use of org.rocksdb.Options in project samza by apache.
the class TestRocksDbKeyValueReader method createRocksDb.
@BeforeClass
public static void createRocksDb() throws IOException, RocksDBException {
if (Files.exists(dirPath)) {
removeRecursiveDirectory(dirPath);
}
Files.createDirectories(dirPath);
Options options = new Options().setCreateIfMissing(true);
db = RocksDB.open(options, dirPath.toString());
db.put("testString".getBytes(), "this is string".getBytes());
db.put(ByteBuffer.allocate(4).putInt(123).array(), ByteBuffer.allocate(4).putInt(456).array());
}
Aggregations