Search in sources :

Example 1 with Options

use of org.rocksdb.Options in project kafka by apache.

the class RocksDBStore method openDB.

@SuppressWarnings("unchecked")
public void openDB(ProcessorContext context) {
    // initialize the default rocksdb options
    final BlockBasedTableConfig tableConfig = new BlockBasedTableConfig();
    tableConfig.setBlockCacheSize(BLOCK_CACHE_SIZE);
    tableConfig.setBlockSize(BLOCK_SIZE);
    options = new Options();
    options.setTableFormatConfig(tableConfig);
    options.setWriteBufferSize(WRITE_BUFFER_SIZE);
    options.setCompressionType(COMPRESSION_TYPE);
    options.setCompactionStyle(COMPACTION_STYLE);
    options.setMaxWriteBufferNumber(MAX_WRITE_BUFFERS);
    options.setCreateIfMissing(true);
    options.setErrorIfExists(false);
    options.setInfoLogLevel(InfoLogLevel.ERROR_LEVEL);
    // this is the recommended way to increase parallelism in RocksDb
    // note that the current implementation increases the number of compaction threads
    // but not flush threads.
    options.setIncreaseParallelism(Runtime.getRuntime().availableProcessors());
    wOptions = new WriteOptions();
    wOptions.setDisableWAL(true);
    fOptions = new FlushOptions();
    fOptions.setWaitForFlush(true);
    final Map<String, Object> configs = context.appConfigs();
    final Class<RocksDBConfigSetter> configSetterClass = (Class<RocksDBConfigSetter>) configs.get(StreamsConfig.ROCKSDB_CONFIG_SETTER_CLASS_CONFIG);
    if (configSetterClass != null) {
        final RocksDBConfigSetter configSetter = Utils.newInstance(configSetterClass);
        configSetter.setConfig(name, options, configs);
    }
    // we need to construct the serde while opening DB since
    // it is also triggered by windowed DB segments without initialization
    this.serdes = new StateSerdes<>(name, keySerde == null ? (Serde<K>) context.keySerde() : keySerde, valueSerde == null ? (Serde<V>) context.valueSerde() : valueSerde);
    this.dbDir = new File(new File(context.stateDir(), parentDir), this.name);
    try {
        this.db = openDB(this.dbDir, this.options, TTL_SECONDS);
    } catch (IOException e) {
        throw new StreamsException(e);
    }
}
Also used : FlushOptions(org.rocksdb.FlushOptions) WriteOptions(org.rocksdb.WriteOptions) Options(org.rocksdb.Options) StreamsException(org.apache.kafka.streams.errors.StreamsException) BlockBasedTableConfig(org.rocksdb.BlockBasedTableConfig) IOException(java.io.IOException) FlushOptions(org.rocksdb.FlushOptions) WriteOptions(org.rocksdb.WriteOptions) RocksDBConfigSetter(org.apache.kafka.streams.state.RocksDBConfigSetter) File(java.io.File)

Example 2 with Options

use of org.rocksdb.Options in project samza by apache.

the class RocksDbOptionsHelper method options.

public static Options options(Config storeConfig, SamzaContainerContext containerContext) {
    Options options = new Options();
    Long writeBufSize = storeConfig.getLong("container.write.buffer.size.bytes", 32 * 1024 * 1024);
    // Cache size and write buffer size are specified on a per-container basis.
    int numTasks = containerContext.taskNames.size();
    options.setWriteBufferSize((int) (writeBufSize / numTasks));
    CompressionType compressionType = CompressionType.SNAPPY_COMPRESSION;
    String compressionInConfig = storeConfig.get(ROCKSDB_COMPRESSION, "snappy");
    switch(compressionInConfig) {
        case "snappy":
            compressionType = CompressionType.SNAPPY_COMPRESSION;
            break;
        case "bzip2":
            compressionType = CompressionType.BZLIB2_COMPRESSION;
            break;
        case "zlib":
            compressionType = CompressionType.ZLIB_COMPRESSION;
            break;
        case "lz4":
            compressionType = CompressionType.LZ4_COMPRESSION;
            break;
        case "lz4hc":
            compressionType = CompressionType.LZ4HC_COMPRESSION;
            break;
        case "none":
            compressionType = CompressionType.NO_COMPRESSION;
            break;
        default:
            log.warn("Unknown rocksdb.compression codec " + compressionInConfig + ", overwriting to " + compressionType.name());
    }
    options.setCompressionType(compressionType);
    Long cacheSize = storeConfig.getLong("container.cache.size.bytes", 100 * 1024 * 1024L);
    Long cacheSizePerContainer = cacheSize / numTasks;
    int blockSize = storeConfig.getInt(ROCKSDB_BLOCK_SIZE_BYTES, 4096);
    BlockBasedTableConfig tableOptions = new BlockBasedTableConfig();
    tableOptions.setBlockCacheSize(cacheSizePerContainer).setBlockSize(blockSize);
    options.setTableFormatConfig(tableOptions);
    CompactionStyle compactionStyle = CompactionStyle.UNIVERSAL;
    String compactionStyleInConfig = storeConfig.get(ROCKSDB_COMPACTION_STYLE, "universal");
    switch(compactionStyleInConfig) {
        case "universal":
            compactionStyle = CompactionStyle.UNIVERSAL;
            break;
        case "fifo":
            compactionStyle = CompactionStyle.FIFO;
            break;
        case "level":
            compactionStyle = CompactionStyle.LEVEL;
            break;
        default:
            log.warn("Unknown rocksdb.compaction.style " + compactionStyleInConfig + ", overwriting to " + compactionStyle.name());
    }
    options.setCompactionStyle(compactionStyle);
    options.setMaxWriteBufferNumber(storeConfig.getInt(ROCKSDB_NUM_WRITE_BUFFERS, 3));
    options.setCreateIfMissing(true);
    options.setErrorIfExists(false);
    options.setMaxLogFileSize(storeConfig.getLong(ROCKSDB_MAX_LOG_FILE_SIZE_BYTES, 64 * 1024 * 1024L));
    options.setKeepLogFileNum(storeConfig.getLong(ROCKSDB_KEEP_LOG_FILE_NUM, 2));
    return options;
}
Also used : Options(org.rocksdb.Options) BlockBasedTableConfig(org.rocksdb.BlockBasedTableConfig) CompactionStyle(org.rocksdb.CompactionStyle) CompressionType(org.rocksdb.CompressionType)

Example 3 with Options

use of org.rocksdb.Options in project flink by apache.

the class ListViaMergeSpeedMiniBenchmark method main.

public static void main(String[] args) throws Exception {
    final File rocksDir = new File("/tmp/rdb");
    FileUtils.deleteDirectory(rocksDir);
    final Options options = new Options().setCompactionStyle(CompactionStyle.LEVEL).setLevelCompactionDynamicLevelBytes(true).setIncreaseParallelism(4).setUseFsync(false).setMaxOpenFiles(-1).setDisableDataSync(true).setCreateIfMissing(true).setMergeOperator(new StringAppendOperator());
    final WriteOptions write_options = new WriteOptions().setSync(false).setDisableWAL(true);
    final RocksDB rocksDB = RocksDB.open(options, rocksDir.getAbsolutePath());
    final String key = "key";
    final String value = "abcdefghijklmnopqrstuvwxyz0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ7890654321";
    final byte[] keyBytes = key.getBytes(StandardCharsets.UTF_8);
    final byte[] valueBytes = value.getBytes(StandardCharsets.UTF_8);
    final int num = 50000;
    // ----- insert -----
    System.out.println("begin insert");
    final long beginInsert = System.nanoTime();
    for (int i = 0; i < num; i++) {
        rocksDB.merge(write_options, keyBytes, valueBytes);
    }
    final long endInsert = System.nanoTime();
    System.out.println("end insert - duration: " + ((endInsert - beginInsert) / 1_000_000) + " ms");
    // ----- read (attempt 1) -----
    final byte[] resultHolder = new byte[num * (valueBytes.length + 2)];
    final long beginGet1 = System.nanoTime();
    rocksDB.get(keyBytes, resultHolder);
    final long endGet1 = System.nanoTime();
    System.out.println("end get - duration: " + ((endGet1 - beginGet1) / 1_000_000) + " ms");
    // ----- read (attempt 2) -----
    final long beginGet2 = System.nanoTime();
    rocksDB.get(keyBytes, resultHolder);
    final long endGet2 = System.nanoTime();
    System.out.println("end get - duration: " + ((endGet2 - beginGet2) / 1_000_000) + " ms");
    // ----- compact -----
    System.out.println("compacting...");
    final long beginCompact = System.nanoTime();
    rocksDB.compactRange();
    final long endCompact = System.nanoTime();
    System.out.println("end compaction - duration: " + ((endCompact - beginCompact) / 1_000_000) + " ms");
    // ----- read (attempt 3) -----
    final long beginGet3 = System.nanoTime();
    rocksDB.get(keyBytes, resultHolder);
    final long endGet3 = System.nanoTime();
    System.out.println("end get - duration: " + ((endGet3 - beginGet3) / 1_000_000) + " ms");
}
Also used : Options(org.rocksdb.Options) WriteOptions(org.rocksdb.WriteOptions) WriteOptions(org.rocksdb.WriteOptions) RocksDB(org.rocksdb.RocksDB) StringAppendOperator(org.rocksdb.StringAppendOperator) File(java.io.File)

Example 4 with Options

use of org.rocksdb.Options in project apache-kafka-on-k8s by banzaicloud.

the class RocksDBStore method openDB.

@SuppressWarnings("unchecked")
public void openDB(final ProcessorContext context) {
    // initialize the default rocksdb options
    final BlockBasedTableConfig tableConfig = new BlockBasedTableConfig();
    tableConfig.setBlockCacheSize(BLOCK_CACHE_SIZE);
    tableConfig.setBlockSize(BLOCK_SIZE);
    options = new Options();
    options.setTableFormatConfig(tableConfig);
    options.setWriteBufferSize(WRITE_BUFFER_SIZE);
    options.setCompressionType(COMPRESSION_TYPE);
    options.setCompactionStyle(COMPACTION_STYLE);
    options.setMaxWriteBufferNumber(MAX_WRITE_BUFFERS);
    options.setCreateIfMissing(true);
    options.setErrorIfExists(false);
    options.setInfoLogLevel(InfoLogLevel.ERROR_LEVEL);
    // this is the recommended way to increase parallelism in RocksDb
    // note that the current implementation of setIncreaseParallelism affects the number
    // of compaction threads but not flush threads (the latter remains one). Also
    // the parallelism value needs to be at least two because of the code in
    // https://github.com/facebook/rocksdb/blob/62ad0a9b19f0be4cefa70b6b32876e764b7f3c11/util/options.cc#L580
    // subtracts one from the value passed to determine the number of compaction threads
    // (this could be a bug in the RocksDB code and their devs have been contacted).
    options.setIncreaseParallelism(Math.max(Runtime.getRuntime().availableProcessors(), 2));
    if (prepareForBulkload) {
        options.prepareForBulkLoad();
    }
    wOptions = new WriteOptions();
    wOptions.setDisableWAL(true);
    fOptions = new FlushOptions();
    fOptions.setWaitForFlush(true);
    final Map<String, Object> configs = context.appConfigs();
    final Class<RocksDBConfigSetter> configSetterClass = (Class<RocksDBConfigSetter>) configs.get(StreamsConfig.ROCKSDB_CONFIG_SETTER_CLASS_CONFIG);
    if (configSetterClass != null) {
        final RocksDBConfigSetter configSetter = Utils.newInstance(configSetterClass);
        configSetter.setConfig(name, options, configs);
    }
    this.dbDir = new File(new File(context.stateDir(), parentDir), this.name);
    try {
        this.db = openDB(this.dbDir, this.options, TTL_SECONDS);
    } catch (IOException e) {
        throw new ProcessorStateException(e);
    }
    open = true;
}
Also used : FlushOptions(org.rocksdb.FlushOptions) WriteOptions(org.rocksdb.WriteOptions) Options(org.rocksdb.Options) BlockBasedTableConfig(org.rocksdb.BlockBasedTableConfig) IOException(java.io.IOException) FlushOptions(org.rocksdb.FlushOptions) WriteOptions(org.rocksdb.WriteOptions) RocksDBConfigSetter(org.apache.kafka.streams.state.RocksDBConfigSetter) File(java.io.File) ProcessorStateException(org.apache.kafka.streams.errors.ProcessorStateException)

Example 5 with Options

use of org.rocksdb.Options in project samza by apache.

the class TestRocksDbKeyValueStoreJava method testPerf.

@Test
public void testPerf() throws Exception {
    Config config = new MapConfig();
    Options options = new Options();
    options.setCreateIfMissing(true);
    File dbDir = new File(System.getProperty("java.io.tmpdir") + "/dbStore" + System.currentTimeMillis());
    RocksDbKeyValueStore store = new RocksDbKeyValueStore(dbDir, options, config, false, "dbStore", new WriteOptions(), new FlushOptions(), new KeyValueStoreMetrics("dbStore", new MetricsRegistryMap()));
    ByteArrayOutputStream outputStream = new ByteArrayOutputStream();
    String prefix = "this is the key prefix";
    Random r = new Random();
    for (int i = 0; i < 100000; i++) {
        store.put(genKey(outputStream, prefix, r.nextInt()), genValue());
    }
    byte[] firstKey = genKey(outputStream, prefix, 0);
    byte[] lastKey = genKey(outputStream, prefix, Integer.MAX_VALUE);
    long start;
    start = System.currentTimeMillis();
    KeyValueIterator<byte[], byte[]> iterator1 = store.range(firstKey, lastKey);
    long rangeTime = System.currentTimeMillis() - start;
    start = System.currentTimeMillis();
    Iterators.size(iterator1);
    long rangeIterTime = System.currentTimeMillis() - start;
    System.out.println("range iter create time: " + rangeTime + ", iterate time: " + rangeIterTime);
    iterator1.close();
    // Please comment out range query part in order to do an accurate perf test for snapshot
    start = System.currentTimeMillis();
    KeyValueSnapshot<byte[], byte[]> snapshot = store.snapshot(firstKey, lastKey);
    KeyValueIterator<byte[], byte[]> iterator2 = snapshot.iterator();
    long snapshotTime = System.currentTimeMillis() - start;
    start = System.currentTimeMillis();
    Iterators.size(iterator2);
    long snapshotIterTime = System.currentTimeMillis() - start;
    System.out.println("snapshot iter create time: " + snapshotTime + ", iterate time: " + snapshotIterTime);
    iterator2.close();
    snapshot.close();
    store.close();
}
Also used : FlushOptions(org.rocksdb.FlushOptions) WriteOptions(org.rocksdb.WriteOptions) Options(org.rocksdb.Options) Config(org.apache.samza.config.Config) MapConfig(org.apache.samza.config.MapConfig) ByteArrayOutputStream(java.io.ByteArrayOutputStream) FlushOptions(org.rocksdb.FlushOptions) WriteOptions(org.rocksdb.WriteOptions) Random(java.util.Random) ThreadLocalRandom(java.util.concurrent.ThreadLocalRandom) MapConfig(org.apache.samza.config.MapConfig) MetricsRegistryMap(org.apache.samza.metrics.MetricsRegistryMap) File(java.io.File) Test(org.junit.Test)

Aggregations

Options (org.rocksdb.Options)18 WriteOptions (org.rocksdb.WriteOptions)13 File (java.io.File)9 FlushOptions (org.rocksdb.FlushOptions)8 RocksDBException (org.rocksdb.RocksDBException)6 BlockBasedTableConfig (org.rocksdb.BlockBasedTableConfig)5 MapConfig (org.apache.samza.config.MapConfig)4 MetricsRegistryMap (org.apache.samza.metrics.MetricsRegistryMap)4 RocksDB (org.rocksdb.RocksDB)4 IOException (java.io.IOException)3 ArrayList (java.util.ArrayList)3 Config (org.apache.samza.config.Config)3 ColumnFamilyOptions (org.rocksdb.ColumnFamilyOptions)3 DBOptions (org.rocksdb.DBOptions)3 ReadOptions (org.rocksdb.ReadOptions)3 RocksDbOptionsFactory (com.alibaba.jstorm.cache.rocksdb.RocksDbOptionsFactory)2 ByteArrayOutputStream (java.io.ByteArrayOutputStream)2 RocksDBConfigSetter (org.apache.kafka.streams.state.RocksDBConfigSetter)2 Test (org.junit.Test)2 ColumnFamilyDescriptor (org.rocksdb.ColumnFamilyDescriptor)2