Search in sources :

Example 6 with FlushOptions

use of org.rocksdb.FlushOptions in project storm by apache.

the class RocksDbMetricsWriter method close.

@Override
public void close() {
    this.shutdown = true;
    // get all metadata from the cache to put into the database
    // use a new map to prevent threading issues with writer thread
    TreeMap<RocksDbKey, RocksDbValue> batchMap = new TreeMap<>();
    for (Map.Entry entry : stringMetadataCache.entrySet()) {
        String metadataString = (String) entry.getKey();
        StringMetadata val = (StringMetadata) entry.getValue();
        RocksDbValue rval = new RocksDbValue(val.getLastTimestamp(), metadataString);
        for (KeyType type : val.getMetadataTypes()) {
            // save the metadata for all types of strings it matches
            RocksDbKey rkey = new RocksDbKey(type, val.getStringId());
            batchMap.put(rkey, rval);
        }
    }
    try {
        processBatchInsert(batchMap);
    } catch (MetricException e) {
        LOG.error("Failed to insert all metadata", e);
    }
    // flush db to disk
    try (FlushOptions flushOps = new FlushOptions()) {
        flushOps.setWaitForFlush(true);
        store.db.flush(flushOps);
    } catch (RocksDBException e) {
        LOG.error("Failed ot flush RocksDB", e);
        if (this.failureMeter != null) {
            this.failureMeter.mark();
        }
    }
}
Also used : RocksDBException(org.rocksdb.RocksDBException) TreeMap(java.util.TreeMap) FlushOptions(org.rocksdb.FlushOptions) TreeMap(java.util.TreeMap) Map(java.util.Map) MetricException(org.apache.storm.metricstore.MetricException)

Example 7 with FlushOptions

use of org.rocksdb.FlushOptions in project kafka by apache.

the class RocksDBStore method openDB.

@SuppressWarnings("unchecked")
void openDB(final Map<String, Object> configs, final File stateDir) {
    // initialize the default rocksdb options
    final DBOptions dbOptions = new DBOptions();
    final ColumnFamilyOptions columnFamilyOptions = new ColumnFamilyOptions();
    userSpecifiedOptions = new RocksDBGenericOptionsToDbOptionsColumnFamilyOptionsAdapter(dbOptions, columnFamilyOptions);
    final BlockBasedTableConfigWithAccessibleCache tableConfig = new BlockBasedTableConfigWithAccessibleCache();
    cache = new LRUCache(BLOCK_CACHE_SIZE);
    tableConfig.setBlockCache(cache);
    tableConfig.setBlockSize(BLOCK_SIZE);
    filter = new BloomFilter();
    tableConfig.setFilterPolicy(filter);
    userSpecifiedOptions.optimizeFiltersForHits();
    userSpecifiedOptions.setTableFormatConfig(tableConfig);
    userSpecifiedOptions.setWriteBufferSize(WRITE_BUFFER_SIZE);
    userSpecifiedOptions.setCompressionType(COMPRESSION_TYPE);
    userSpecifiedOptions.setCompactionStyle(COMPACTION_STYLE);
    userSpecifiedOptions.setMaxWriteBufferNumber(MAX_WRITE_BUFFERS);
    userSpecifiedOptions.setCreateIfMissing(true);
    userSpecifiedOptions.setErrorIfExists(false);
    userSpecifiedOptions.setInfoLogLevel(InfoLogLevel.ERROR_LEVEL);
    // this is the recommended way to increase parallelism in RocksDb
    // note that the current implementation of setIncreaseParallelism affects the number
    // of compaction threads but not flush threads (the latter remains one). Also,
    // the parallelism value needs to be at least two because of the code in
    // https://github.com/facebook/rocksdb/blob/62ad0a9b19f0be4cefa70b6b32876e764b7f3c11/util/options.cc#L580
    // subtracts one from the value passed to determine the number of compaction threads
    // (this could be a bug in the RocksDB code and their devs have been contacted).
    userSpecifiedOptions.setIncreaseParallelism(Math.max(Runtime.getRuntime().availableProcessors(), 2));
    wOptions = new WriteOptions();
    wOptions.setDisableWAL(true);
    fOptions = new FlushOptions();
    fOptions.setWaitForFlush(true);
    final Class<RocksDBConfigSetter> configSetterClass = (Class<RocksDBConfigSetter>) configs.get(StreamsConfig.ROCKSDB_CONFIG_SETTER_CLASS_CONFIG);
    if (configSetterClass != null) {
        configSetter = Utils.newInstance(configSetterClass);
        configSetter.setConfig(name, userSpecifiedOptions, configs);
    }
    dbDir = new File(new File(stateDir, parentDir), name);
    try {
        Files.createDirectories(dbDir.getParentFile().toPath());
        Files.createDirectories(dbDir.getAbsoluteFile().toPath());
    } catch (final IOException fatal) {
        throw new ProcessorStateException(fatal);
    }
    // Setup statistics before the database is opened, otherwise the statistics are not updated
    // with the measurements from Rocks DB
    maybeSetUpStatistics(configs);
    openRocksDB(dbOptions, columnFamilyOptions);
    open = true;
    addValueProvidersToMetricsRecorder();
}
Also used : IOException(java.io.IOException) FlushOptions(org.rocksdb.FlushOptions) BloomFilter(org.rocksdb.BloomFilter) ColumnFamilyOptions(org.rocksdb.ColumnFamilyOptions) WriteOptions(org.rocksdb.WriteOptions) LRUCache(org.rocksdb.LRUCache) RocksDBConfigSetter(org.apache.kafka.streams.state.RocksDBConfigSetter) DBOptions(org.rocksdb.DBOptions) File(java.io.File) ProcessorStateException(org.apache.kafka.streams.errors.ProcessorStateException)

Example 8 with FlushOptions

use of org.rocksdb.FlushOptions in project jstorm by alibaba.

the class RocksDbUnitTest method rocksDbTest.

private static void rocksDbTest(RocksDB db, List<ColumnFamilyHandle> handlers) {
    try {
        ColumnFamilyHandle handler1 = null;
        ColumnFamilyHandle handler2 = null;
        if (handlers.size() > 0) {
            // skip default column family
            handler1 = handlers.get(1);
            handler2 = handlers.get(2);
        } else {
            handler1 = db.createColumnFamily(new ColumnFamilyDescriptor("test1".getBytes()));
            handler2 = db.createColumnFamily(new ColumnFamilyDescriptor("test2".getBytes()));
        }
        int startValue1 = getStartValue(db, handler1);
        int startValue2 = getStartValue(db, handler2);
        ;
        Checkpoint cp = Checkpoint.create(db);
        if (isCompaction) {
            db.compactRange();
            LOG.info("Compaction!");
        }
        long flushWaitTime = System.currentTimeMillis() + flushInterval;
        for (int i = 0; i < putNum || putNum == -1; i++) {
            db.put(handler1, String.valueOf(i % 1000).getBytes(), String.valueOf(startValue1 + i).getBytes());
            db.put(handler2, String.valueOf(i % 1000).getBytes(), String.valueOf(startValue2 + i).getBytes());
            if (isFlush && flushWaitTime <= System.currentTimeMillis()) {
                db.flush(new FlushOptions());
                if (isCheckpoint) {
                    cp.createCheckpoint(cpPath + "/" + i);
                }
                flushWaitTime = System.currentTimeMillis() + flushInterval;
            }
        }
    } catch (RocksDBException e) {
        LOG.error("Failed to put or flush", e);
    }
}
Also used : RocksDBException(org.rocksdb.RocksDBException) Checkpoint(org.rocksdb.Checkpoint) ColumnFamilyDescriptor(org.rocksdb.ColumnFamilyDescriptor) FlushOptions(org.rocksdb.FlushOptions) ColumnFamilyHandle(org.rocksdb.ColumnFamilyHandle) Checkpoint(org.rocksdb.Checkpoint)

Example 9 with FlushOptions

use of org.rocksdb.FlushOptions in project jstorm by alibaba.

the class RocksDbHdfsState method checkpoint.

/**
 * Flush the data in memtable of RocksDB into disk, and then create checkpoint
 *
 * @param batchId
 */
@Override
public void checkpoint(long batchId) {
    long startTime = System.currentTimeMillis();
    try {
        rocksDb.flush(new FlushOptions());
        Checkpoint cp = Checkpoint.create(rocksDb);
        cp.createCheckpoint(getLocalCheckpointPath(batchId));
    } catch (RocksDBException e) {
        LOG.error("Failed to create checkpoint for batch-" + batchId, e);
        throw new RuntimeException(e.getMessage());
    }
    if (JStormMetrics.enabled)
        rocksDbFlushAndCpLatency.update(System.currentTimeMillis() - startTime);
}
Also used : RocksDBException(org.rocksdb.RocksDBException) Checkpoint(org.rocksdb.Checkpoint) FlushOptions(org.rocksdb.FlushOptions)

Example 10 with FlushOptions

use of org.rocksdb.FlushOptions in project bookkeeper by apache.

the class RocksdbKVStore method openRocksdb.

protected void openRocksdb(StateStoreSpec spec) throws StateStoreException {
    // initialize the db options
    final BlockBasedTableConfig tableConfig = new BlockBasedTableConfig();
    tableConfig.setBlockCacheSize(BLOCK_CACHE_SIZE);
    tableConfig.setBlockSize(BLOCK_SIZE);
    tableConfig.setChecksumType(DEFAULT_CHECKSUM_TYPE);
    dbOpts = new DBOptions();
    dbOpts.setCreateIfMissing(true);
    dbOpts.setErrorIfExists(false);
    dbOpts.setInfoLogLevel(DEFAULT_LOG_LEVEL);
    dbOpts.setIncreaseParallelism(DEFAULT_PARALLELISM);
    dbOpts.setCreateMissingColumnFamilies(true);
    cfOpts = new ColumnFamilyOptions();
    cfOpts.setTableFormatConfig(tableConfig);
    cfOpts.setWriteBufferSize(WRITE_BUFFER_SIZE);
    cfOpts.setCompressionType(DEFAULT_COMPRESSION_TYPE);
    cfOpts.setCompactionStyle(DEFAULT_COMPACTION_STYLE);
    cfOpts.setMaxWriteBufferNumber(MAX_WRITE_BUFFERS);
    // initialize the write options
    writeOpts = new WriteOptions();
    // disable wal, since the source of truth will be on distributedlog
    writeOpts.setDisableWAL(false);
    // initialize the flush options
    flushOpts = new FlushOptions();
    flushOpts.setWaitForFlush(true);
    // open the rocksdb
    this.dbDir = spec.getLocalStateStoreDir();
    Pair<RocksDB, List<ColumnFamilyHandle>> dbPair = openLocalDB(dbDir, dbOpts, cfOpts);
    this.db = dbPair.getLeft();
    this.metaCfHandle = dbPair.getRight().get(0);
    this.dataCfHandle = dbPair.getRight().get(1);
}
Also used : ColumnFamilyOptions(org.rocksdb.ColumnFamilyOptions) WriteOptions(org.rocksdb.WriteOptions) RocksDB(org.rocksdb.RocksDB) BlockBasedTableConfig(org.rocksdb.BlockBasedTableConfig) DBOptions(org.rocksdb.DBOptions) List(java.util.List) FlushOptions(org.rocksdb.FlushOptions)

Aggregations

FlushOptions (org.rocksdb.FlushOptions)12 WriteOptions (org.rocksdb.WriteOptions)9 File (java.io.File)7 Options (org.rocksdb.Options)6 MapConfig (org.apache.samza.config.MapConfig)5 MetricsRegistryMap (org.apache.samza.metrics.MetricsRegistryMap)5 IOException (java.io.IOException)3 RocksDBConfigSetter (org.apache.kafka.streams.state.RocksDBConfigSetter)3 Config (org.apache.samza.config.Config)3 Test (org.junit.Test)3 BlockBasedTableConfig (org.rocksdb.BlockBasedTableConfig)3 RocksDBException (org.rocksdb.RocksDBException)3 ByteArrayOutputStream (java.io.ByteArrayOutputStream)2 ArrayList (java.util.ArrayList)2 List (java.util.List)2 Random (java.util.Random)2 ProcessorStateException (org.apache.kafka.streams.errors.ProcessorStateException)2 Before (org.junit.Before)2 Checkpoint (org.rocksdb.Checkpoint)2 Arrays (java.util.Arrays)1