Search in sources :

Example 1 with WriteOptions

use of org.rocksdb.WriteOptions in project kafka by apache.

the class RocksDBStore method openDB.

@SuppressWarnings("unchecked")
public void openDB(ProcessorContext context) {
    // initialize the default rocksdb options
    final BlockBasedTableConfig tableConfig = new BlockBasedTableConfig();
    tableConfig.setBlockCacheSize(BLOCK_CACHE_SIZE);
    tableConfig.setBlockSize(BLOCK_SIZE);
    options = new Options();
    options.setTableFormatConfig(tableConfig);
    options.setWriteBufferSize(WRITE_BUFFER_SIZE);
    options.setCompressionType(COMPRESSION_TYPE);
    options.setCompactionStyle(COMPACTION_STYLE);
    options.setMaxWriteBufferNumber(MAX_WRITE_BUFFERS);
    options.setCreateIfMissing(true);
    options.setErrorIfExists(false);
    options.setInfoLogLevel(InfoLogLevel.ERROR_LEVEL);
    // this is the recommended way to increase parallelism in RocksDb
    // note that the current implementation increases the number of compaction threads
    // but not flush threads.
    options.setIncreaseParallelism(Runtime.getRuntime().availableProcessors());
    wOptions = new WriteOptions();
    wOptions.setDisableWAL(true);
    fOptions = new FlushOptions();
    fOptions.setWaitForFlush(true);
    final Map<String, Object> configs = context.appConfigs();
    final Class<RocksDBConfigSetter> configSetterClass = (Class<RocksDBConfigSetter>) configs.get(StreamsConfig.ROCKSDB_CONFIG_SETTER_CLASS_CONFIG);
    if (configSetterClass != null) {
        final RocksDBConfigSetter configSetter = Utils.newInstance(configSetterClass);
        configSetter.setConfig(name, options, configs);
    }
    // we need to construct the serde while opening DB since
    // it is also triggered by windowed DB segments without initialization
    this.serdes = new StateSerdes<>(name, keySerde == null ? (Serde<K>) context.keySerde() : keySerde, valueSerde == null ? (Serde<V>) context.valueSerde() : valueSerde);
    this.dbDir = new File(new File(context.stateDir(), parentDir), this.name);
    try {
        this.db = openDB(this.dbDir, this.options, TTL_SECONDS);
    } catch (IOException e) {
        throw new StreamsException(e);
    }
}
Also used : FlushOptions(org.rocksdb.FlushOptions) WriteOptions(org.rocksdb.WriteOptions) Options(org.rocksdb.Options) StreamsException(org.apache.kafka.streams.errors.StreamsException) BlockBasedTableConfig(org.rocksdb.BlockBasedTableConfig) IOException(java.io.IOException) FlushOptions(org.rocksdb.FlushOptions) WriteOptions(org.rocksdb.WriteOptions) RocksDBConfigSetter(org.apache.kafka.streams.state.RocksDBConfigSetter) File(java.io.File)

Example 2 with WriteOptions

use of org.rocksdb.WriteOptions in project flink by apache.

the class ListViaMergeSpeedMiniBenchmark method main.

public static void main(String[] args) throws Exception {
    final File rocksDir = new File("/tmp/rdb");
    FileUtils.deleteDirectory(rocksDir);
    final Options options = new Options().setCompactionStyle(CompactionStyle.LEVEL).setLevelCompactionDynamicLevelBytes(true).setIncreaseParallelism(4).setUseFsync(false).setMaxOpenFiles(-1).setDisableDataSync(true).setCreateIfMissing(true).setMergeOperator(new StringAppendOperator());
    final WriteOptions write_options = new WriteOptions().setSync(false).setDisableWAL(true);
    final RocksDB rocksDB = RocksDB.open(options, rocksDir.getAbsolutePath());
    final String key = "key";
    final String value = "abcdefghijklmnopqrstuvwxyz0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ7890654321";
    final byte[] keyBytes = key.getBytes(StandardCharsets.UTF_8);
    final byte[] valueBytes = value.getBytes(StandardCharsets.UTF_8);
    final int num = 50000;
    // ----- insert -----
    System.out.println("begin insert");
    final long beginInsert = System.nanoTime();
    for (int i = 0; i < num; i++) {
        rocksDB.merge(write_options, keyBytes, valueBytes);
    }
    final long endInsert = System.nanoTime();
    System.out.println("end insert - duration: " + ((endInsert - beginInsert) / 1_000_000) + " ms");
    // ----- read (attempt 1) -----
    final byte[] resultHolder = new byte[num * (valueBytes.length + 2)];
    final long beginGet1 = System.nanoTime();
    rocksDB.get(keyBytes, resultHolder);
    final long endGet1 = System.nanoTime();
    System.out.println("end get - duration: " + ((endGet1 - beginGet1) / 1_000_000) + " ms");
    // ----- read (attempt 2) -----
    final long beginGet2 = System.nanoTime();
    rocksDB.get(keyBytes, resultHolder);
    final long endGet2 = System.nanoTime();
    System.out.println("end get - duration: " + ((endGet2 - beginGet2) / 1_000_000) + " ms");
    // ----- compact -----
    System.out.println("compacting...");
    final long beginCompact = System.nanoTime();
    rocksDB.compactRange();
    final long endCompact = System.nanoTime();
    System.out.println("end compaction - duration: " + ((endCompact - beginCompact) / 1_000_000) + " ms");
    // ----- read (attempt 3) -----
    final long beginGet3 = System.nanoTime();
    rocksDB.get(keyBytes, resultHolder);
    final long endGet3 = System.nanoTime();
    System.out.println("end get - duration: " + ((endGet3 - beginGet3) / 1_000_000) + " ms");
}
Also used : Options(org.rocksdb.Options) WriteOptions(org.rocksdb.WriteOptions) WriteOptions(org.rocksdb.WriteOptions) RocksDB(org.rocksdb.RocksDB) StringAppendOperator(org.rocksdb.StringAppendOperator) File(java.io.File)

Example 3 with WriteOptions

use of org.rocksdb.WriteOptions in project apache-kafka-on-k8s by banzaicloud.

the class RocksDBStore method openDB.

@SuppressWarnings("unchecked")
public void openDB(final ProcessorContext context) {
    // initialize the default rocksdb options
    final BlockBasedTableConfig tableConfig = new BlockBasedTableConfig();
    tableConfig.setBlockCacheSize(BLOCK_CACHE_SIZE);
    tableConfig.setBlockSize(BLOCK_SIZE);
    options = new Options();
    options.setTableFormatConfig(tableConfig);
    options.setWriteBufferSize(WRITE_BUFFER_SIZE);
    options.setCompressionType(COMPRESSION_TYPE);
    options.setCompactionStyle(COMPACTION_STYLE);
    options.setMaxWriteBufferNumber(MAX_WRITE_BUFFERS);
    options.setCreateIfMissing(true);
    options.setErrorIfExists(false);
    options.setInfoLogLevel(InfoLogLevel.ERROR_LEVEL);
    // this is the recommended way to increase parallelism in RocksDb
    // note that the current implementation of setIncreaseParallelism affects the number
    // of compaction threads but not flush threads (the latter remains one). Also
    // the parallelism value needs to be at least two because of the code in
    // https://github.com/facebook/rocksdb/blob/62ad0a9b19f0be4cefa70b6b32876e764b7f3c11/util/options.cc#L580
    // subtracts one from the value passed to determine the number of compaction threads
    // (this could be a bug in the RocksDB code and their devs have been contacted).
    options.setIncreaseParallelism(Math.max(Runtime.getRuntime().availableProcessors(), 2));
    if (prepareForBulkload) {
        options.prepareForBulkLoad();
    }
    wOptions = new WriteOptions();
    wOptions.setDisableWAL(true);
    fOptions = new FlushOptions();
    fOptions.setWaitForFlush(true);
    final Map<String, Object> configs = context.appConfigs();
    final Class<RocksDBConfigSetter> configSetterClass = (Class<RocksDBConfigSetter>) configs.get(StreamsConfig.ROCKSDB_CONFIG_SETTER_CLASS_CONFIG);
    if (configSetterClass != null) {
        final RocksDBConfigSetter configSetter = Utils.newInstance(configSetterClass);
        configSetter.setConfig(name, options, configs);
    }
    this.dbDir = new File(new File(context.stateDir(), parentDir), this.name);
    try {
        this.db = openDB(this.dbDir, this.options, TTL_SECONDS);
    } catch (IOException e) {
        throw new ProcessorStateException(e);
    }
    open = true;
}
Also used : FlushOptions(org.rocksdb.FlushOptions) WriteOptions(org.rocksdb.WriteOptions) Options(org.rocksdb.Options) BlockBasedTableConfig(org.rocksdb.BlockBasedTableConfig) IOException(java.io.IOException) FlushOptions(org.rocksdb.FlushOptions) WriteOptions(org.rocksdb.WriteOptions) RocksDBConfigSetter(org.apache.kafka.streams.state.RocksDBConfigSetter) File(java.io.File) ProcessorStateException(org.apache.kafka.streams.errors.ProcessorStateException)

Example 4 with WriteOptions

use of org.rocksdb.WriteOptions in project samza by apache.

the class TestRocksDbKeyValueStoreJava method testPerf.

@Test
public void testPerf() throws Exception {
    Config config = new MapConfig();
    Options options = new Options();
    options.setCreateIfMissing(true);
    File dbDir = new File(System.getProperty("java.io.tmpdir") + "/dbStore" + System.currentTimeMillis());
    RocksDbKeyValueStore store = new RocksDbKeyValueStore(dbDir, options, config, false, "dbStore", new WriteOptions(), new FlushOptions(), new KeyValueStoreMetrics("dbStore", new MetricsRegistryMap()));
    ByteArrayOutputStream outputStream = new ByteArrayOutputStream();
    String prefix = "this is the key prefix";
    Random r = new Random();
    for (int i = 0; i < 100000; i++) {
        store.put(genKey(outputStream, prefix, r.nextInt()), genValue());
    }
    byte[] firstKey = genKey(outputStream, prefix, 0);
    byte[] lastKey = genKey(outputStream, prefix, Integer.MAX_VALUE);
    long start;
    start = System.currentTimeMillis();
    KeyValueIterator<byte[], byte[]> iterator1 = store.range(firstKey, lastKey);
    long rangeTime = System.currentTimeMillis() - start;
    start = System.currentTimeMillis();
    Iterators.size(iterator1);
    long rangeIterTime = System.currentTimeMillis() - start;
    System.out.println("range iter create time: " + rangeTime + ", iterate time: " + rangeIterTime);
    iterator1.close();
    // Please comment out range query part in order to do an accurate perf test for snapshot
    start = System.currentTimeMillis();
    KeyValueSnapshot<byte[], byte[]> snapshot = store.snapshot(firstKey, lastKey);
    KeyValueIterator<byte[], byte[]> iterator2 = snapshot.iterator();
    long snapshotTime = System.currentTimeMillis() - start;
    start = System.currentTimeMillis();
    Iterators.size(iterator2);
    long snapshotIterTime = System.currentTimeMillis() - start;
    System.out.println("snapshot iter create time: " + snapshotTime + ", iterate time: " + snapshotIterTime);
    iterator2.close();
    snapshot.close();
    store.close();
}
Also used : FlushOptions(org.rocksdb.FlushOptions) WriteOptions(org.rocksdb.WriteOptions) Options(org.rocksdb.Options) Config(org.apache.samza.config.Config) MapConfig(org.apache.samza.config.MapConfig) ByteArrayOutputStream(java.io.ByteArrayOutputStream) FlushOptions(org.rocksdb.FlushOptions) WriteOptions(org.rocksdb.WriteOptions) Random(java.util.Random) ThreadLocalRandom(java.util.concurrent.ThreadLocalRandom) MapConfig(org.apache.samza.config.MapConfig) MetricsRegistryMap(org.apache.samza.metrics.MetricsRegistryMap) File(java.io.File) Test(org.junit.Test)

Example 5 with WriteOptions

use of org.rocksdb.WriteOptions in project samza by apache.

the class TestKeyValueSizeHistogramMetric method setup.

@Before
public void setup() {
    Config config = new MapConfig();
    Options options = new Options();
    options.setCreateIfMissing(true);
    File dbDir = new File(System.getProperty("java.io.tmpdir") + "/dbStore" + System.currentTimeMillis());
    RocksDbKeyValueStore kvStore = new RocksDbKeyValueStore(dbDir, options, config, false, "dbStore", new WriteOptions(), new FlushOptions(), new KeyValueStoreMetrics("dbStore", new MetricsRegistryMap()));
    KeyValueStore<String, String> serializedStore = new SerializedKeyValueStore<>(kvStore, stringSerde, stringSerde, serializedKeyValueStoreMetrics);
    store = new NullSafeKeyValueStore<>(serializedStore);
}
Also used : FlushOptions(org.rocksdb.FlushOptions) WriteOptions(org.rocksdb.WriteOptions) Options(org.rocksdb.Options) Config(org.apache.samza.config.Config) MapConfig(org.apache.samza.config.MapConfig) FlushOptions(org.rocksdb.FlushOptions) WriteOptions(org.rocksdb.WriteOptions) MapConfig(org.apache.samza.config.MapConfig) MetricsRegistryMap(org.apache.samza.metrics.MetricsRegistryMap) File(java.io.File) Before(org.junit.Before)

Aggregations

WriteOptions (org.rocksdb.WriteOptions)21 File (java.io.File)10 FlushOptions (org.rocksdb.FlushOptions)9 Options (org.rocksdb.Options)8 Test (org.junit.Test)7 RocksDB (org.rocksdb.RocksDB)6 MapConfig (org.apache.samza.config.MapConfig)5 MetricsRegistryMap (org.apache.samza.metrics.MetricsRegistryMap)5 ColumnFamilyHandle (org.rocksdb.ColumnFamilyHandle)5 IOException (java.io.IOException)4 ColumnFamilyDescriptor (org.rocksdb.ColumnFamilyDescriptor)4 ColumnFamilyOptions (org.rocksdb.ColumnFamilyOptions)4 WriteBatch (org.rocksdb.WriteBatch)4 ByteArrayOutputStream (java.io.ByteArrayOutputStream)3 RocksDBConfigSetter (org.apache.kafka.streams.state.RocksDBConfigSetter)3 Config (org.apache.samza.config.Config)3 BlockBasedTableConfig (org.rocksdb.BlockBasedTableConfig)3 DBOptions (org.rocksdb.DBOptions)3 RocksDBException (org.rocksdb.RocksDBException)3 ArrayList (java.util.ArrayList)2