Search in sources :

Example 16 with WriteOptions

use of org.rocksdb.WriteOptions in project bookkeeper by apache.

the class RocksdbKVStore method openRocksdb.

protected void openRocksdb(StateStoreSpec spec) throws StateStoreException {
    // initialize the db options
    final BlockBasedTableConfig tableConfig = new BlockBasedTableConfig();
    tableConfig.setBlockCacheSize(BLOCK_CACHE_SIZE);
    tableConfig.setBlockSize(BLOCK_SIZE);
    tableConfig.setChecksumType(DEFAULT_CHECKSUM_TYPE);
    dbOpts = new DBOptions();
    dbOpts.setCreateIfMissing(true);
    dbOpts.setErrorIfExists(false);
    dbOpts.setInfoLogLevel(DEFAULT_LOG_LEVEL);
    dbOpts.setIncreaseParallelism(DEFAULT_PARALLELISM);
    dbOpts.setCreateMissingColumnFamilies(true);
    cfOpts = new ColumnFamilyOptions();
    cfOpts.setTableFormatConfig(tableConfig);
    cfOpts.setWriteBufferSize(WRITE_BUFFER_SIZE);
    cfOpts.setCompressionType(DEFAULT_COMPRESSION_TYPE);
    cfOpts.setCompactionStyle(DEFAULT_COMPACTION_STYLE);
    cfOpts.setMaxWriteBufferNumber(MAX_WRITE_BUFFERS);
    // initialize the write options
    writeOpts = new WriteOptions();
    // disable wal, since the source of truth will be on distributedlog
    writeOpts.setDisableWAL(false);
    // initialize the flush options
    flushOpts = new FlushOptions();
    flushOpts.setWaitForFlush(true);
    // open the rocksdb
    this.dbDir = spec.getLocalStateStoreDir();
    Pair<RocksDB, List<ColumnFamilyHandle>> dbPair = openLocalDB(dbDir, dbOpts, cfOpts);
    this.db = dbPair.getLeft();
    this.metaCfHandle = dbPair.getRight().get(0);
    this.dataCfHandle = dbPair.getRight().get(1);
}
Also used : ColumnFamilyOptions(org.rocksdb.ColumnFamilyOptions) WriteOptions(org.rocksdb.WriteOptions) RocksDB(org.rocksdb.RocksDB) BlockBasedTableConfig(org.rocksdb.BlockBasedTableConfig) DBOptions(org.rocksdb.DBOptions) List(java.util.List) FlushOptions(org.rocksdb.FlushOptions)

Example 17 with WriteOptions

use of org.rocksdb.WriteOptions in project storm by apache.

the class RocksDbStore method deleteMetadataBefore.

// deletes metadata strings before the provided timestamp
void deleteMetadataBefore(long firstValidTimestamp) throws MetricException {
    if (firstValidTimestamp < 1L) {
        if (this.failureMeter != null) {
            this.failureMeter.mark();
        }
        throw new MetricException("Invalid timestamp for deleting metadata: " + firstValidTimestamp);
    }
    try (WriteBatch writeBatch = new WriteBatch();
        WriteOptions writeOps = new WriteOptions()) {
        // search all metadata strings
        RocksDbKey topologyMetadataPrefix = RocksDbKey.getPrefix(KeyType.METADATA_STRING_START);
        RocksDbKey lastPrefix = RocksDbKey.getPrefix(KeyType.METADATA_STRING_END);
        try {
            scanRange(topologyMetadataPrefix, lastPrefix, (key, value) -> {
                // we'll assume the metadata was recently used if still in the cache.
                if (!readOnlyStringMetadataCache.contains(key.getMetadataStringId())) {
                    if (value.getLastTimestamp() < firstValidTimestamp) {
                        writeBatch.delete(key.getRaw());
                    }
                }
                return true;
            });
        } catch (RocksDBException e) {
            throw new MetricException("Error reading metric data", e);
        }
        if (writeBatch.count() > 0) {
            LOG.info("Deleting {} metadata strings", writeBatch.count());
            try {
                db.write(writeOps, writeBatch);
            } catch (Exception e) {
                String message = "Failed delete metadata strings";
                LOG.error(message, e);
                if (this.failureMeter != null) {
                    this.failureMeter.mark();
                }
                throw new MetricException(message, e);
            }
        }
    }
}
Also used : WriteOptions(org.rocksdb.WriteOptions) RocksDBException(org.rocksdb.RocksDBException) WriteBatch(org.rocksdb.WriteBatch) RocksDBException(org.rocksdb.RocksDBException) MetricException(org.apache.storm.metricstore.MetricException) MetricException(org.apache.storm.metricstore.MetricException)

Example 18 with WriteOptions

use of org.rocksdb.WriteOptions in project samza by apache.

the class TestRocksDbKeyValueStoreJava method testIterate.

@Test
public void testIterate() throws Exception {
    Config config = new MapConfig();
    Options options = new Options();
    options.setCreateIfMissing(true);
    File dbDir = new File(System.getProperty("java.io.tmpdir") + "/dbStore" + System.currentTimeMillis());
    RocksDbKeyValueStore store = new RocksDbKeyValueStore(dbDir, options, config, false, "dbStore", new WriteOptions(), new FlushOptions(), new KeyValueStoreMetrics("dbStore", new MetricsRegistryMap()));
    ByteArrayOutputStream outputStream = new ByteArrayOutputStream();
    String prefix = "prefix";
    for (int i = 0; i < 100; i++) {
        store.put(genKey(outputStream, prefix, i), genValue());
    }
    byte[] firstKey = genKey(outputStream, prefix, 0);
    byte[] lastKey = genKey(outputStream, prefix, 1000);
    KeyValueSnapshot<byte[], byte[]> snapshot = store.snapshot(firstKey, lastKey);
    // Make sure the cached Iterable won't change when new elements are added
    store.put(genKey(outputStream, prefix, 200), genValue());
    KeyValueIterator<byte[], byte[]> iterator = snapshot.iterator();
    assertTrue(Iterators.size(iterator) == 100);
    iterator.close();
    List<Integer> keys = new ArrayList<>();
    KeyValueIterator<byte[], byte[]> iterator2 = snapshot.iterator();
    while (iterator2.hasNext()) {
        Entry<byte[], byte[]> entry = iterator2.next();
        int key = Ints.fromByteArray(Arrays.copyOfRange(entry.getKey(), prefix.getBytes().length, entry.getKey().length));
        keys.add(key);
    }
    assertEquals(keys, IntStream.rangeClosed(0, 99).boxed().collect(Collectors.toList()));
    iterator2.close();
    outputStream.close();
    snapshot.close();
    store.close();
}
Also used : FlushOptions(org.rocksdb.FlushOptions) WriteOptions(org.rocksdb.WriteOptions) Options(org.rocksdb.Options) Config(org.apache.samza.config.Config) MapConfig(org.apache.samza.config.MapConfig) ArrayList(java.util.ArrayList) ByteArrayOutputStream(java.io.ByteArrayOutputStream) FlushOptions(org.rocksdb.FlushOptions) WriteOptions(org.rocksdb.WriteOptions) MapConfig(org.apache.samza.config.MapConfig) MetricsRegistryMap(org.apache.samza.metrics.MetricsRegistryMap) File(java.io.File) Test(org.junit.Test)

Example 19 with WriteOptions

use of org.rocksdb.WriteOptions in project jstorm by alibaba.

the class RocksTTLDBCache method putBatch.

protected void putBatch(Map<String, Object> map, Entry<Integer, ColumnFamilyHandle> putEntry) {
    WriteOptions writeOpts = null;
    WriteBatch writeBatch = null;
    Set<byte[]> putKeys = new HashSet<>();
    try {
        writeOpts = new WriteOptions();
        writeBatch = new WriteBatch();
        for (Entry<String, Object> entry : map.entrySet()) {
            String key = entry.getKey();
            Object value = entry.getValue();
            byte[] data = Utils.javaSerialize(value);
            if (StringUtils.isBlank(key) || data == null || data.length == 0) {
                continue;
            }
            byte[] keyByte = key.getBytes();
            writeBatch.put(putEntry.getValue(), keyByte, data);
            putKeys.add(keyByte);
        }
        ttlDB.write(writeOpts, writeBatch);
    } catch (Exception e) {
        LOG.error("Failed to putBatch into DB, " + map.keySet(), e);
    } finally {
        if (writeOpts != null) {
            writeOpts.dispose();
        }
        if (writeBatch != null) {
            writeBatch.dispose();
        }
    }
    for (Entry<Integer, ColumnFamilyHandle> entry : windowHandlers.entrySet()) {
        if (entry.getKey().equals(putEntry.getKey())) {
            continue;
        }
        for (byte[] keyByte : putKeys) {
            try {
                ttlDB.remove(entry.getValue(), keyByte);
            } catch (Exception e) {
                LOG.error("Failed to remove other's " + new String(keyByte));
            }
        }
    }
}
Also used : WriteOptions(org.rocksdb.WriteOptions) WriteBatch(org.rocksdb.WriteBatch) IOException(java.io.IOException) ColumnFamilyHandle(org.rocksdb.ColumnFamilyHandle) HashSet(java.util.HashSet)

Example 20 with WriteOptions

use of org.rocksdb.WriteOptions in project alluxio by Alluxio.

the class RocksStoreTest method backupRestore.

@Test
public void backupRestore() throws Exception {
    ColumnFamilyOptions cfOpts = new ColumnFamilyOptions().setMemTableConfig(new HashLinkedListMemTableConfig()).setCompressionType(CompressionType.NO_COMPRESSION).useFixedLengthPrefixExtractor(// We always search using the initial long key
    Longs.BYTES);
    List<ColumnFamilyDescriptor> columnDescriptors = Arrays.asList(new ColumnFamilyDescriptor("test".getBytes(), cfOpts));
    String dbDir = mFolder.newFolder("rocks").getAbsolutePath();
    String backupsDir = mFolder.newFolder("rocks-backups").getAbsolutePath();
    AtomicReference<ColumnFamilyHandle> testColumn = new AtomicReference<>();
    RocksStore store = new RocksStore("test", dbDir, backupsDir, columnDescriptors, Arrays.asList(testColumn));
    ByteArrayOutputStream baos = new ByteArrayOutputStream();
    RocksDB db = store.getDb();
    int count = 10;
    for (int i = 0; i < count; i++) {
        db.put(testColumn.get(), new WriteOptions().setDisableWAL(true), ("a" + i).getBytes(), "b".getBytes());
    }
    store.writeToCheckpoint(baos);
    String newBbDir = mFolder.newFolder("rocks-new").getAbsolutePath();
    store = new RocksStore("test-new", newBbDir, backupsDir, columnDescriptors, Arrays.asList(testColumn));
    store.restoreFromCheckpoint(new CheckpointInputStream(new ByteArrayInputStream(baos.toByteArray())));
    db = store.getDb();
    for (int i = 0; i < count; i++) {
        assertArrayEquals("b".getBytes(), db.get(testColumn.get(), ("a" + i).getBytes()));
    }
}
Also used : RocksDB(org.rocksdb.RocksDB) AtomicReference(java.util.concurrent.atomic.AtomicReference) ByteArrayOutputStream(java.io.ByteArrayOutputStream) ColumnFamilyDescriptor(org.rocksdb.ColumnFamilyDescriptor) CheckpointInputStream(alluxio.master.journal.checkpoint.CheckpointInputStream) ColumnFamilyHandle(org.rocksdb.ColumnFamilyHandle) ColumnFamilyOptions(org.rocksdb.ColumnFamilyOptions) WriteOptions(org.rocksdb.WriteOptions) ByteArrayInputStream(java.io.ByteArrayInputStream) HashLinkedListMemTableConfig(org.rocksdb.HashLinkedListMemTableConfig) Test(org.junit.Test)

Aggregations

WriteOptions (org.rocksdb.WriteOptions)23 File (java.io.File)11 FlushOptions (org.rocksdb.FlushOptions)9 Options (org.rocksdb.Options)9 Test (org.junit.Test)7 RocksDB (org.rocksdb.RocksDB)6 MapConfig (org.apache.samza.config.MapConfig)5 MetricsRegistryMap (org.apache.samza.metrics.MetricsRegistryMap)5 ColumnFamilyHandle (org.rocksdb.ColumnFamilyHandle)5 IOException (java.io.IOException)4 ColumnFamilyDescriptor (org.rocksdb.ColumnFamilyDescriptor)4 ColumnFamilyOptions (org.rocksdb.ColumnFamilyOptions)4 RocksDBException (org.rocksdb.RocksDBException)4 WriteBatch (org.rocksdb.WriteBatch)4 ByteArrayOutputStream (java.io.ByteArrayOutputStream)3 RocksDBConfigSetter (org.apache.kafka.streams.state.RocksDBConfigSetter)3 Config (org.apache.samza.config.Config)3 BlockBasedTableConfig (org.rocksdb.BlockBasedTableConfig)3 DBOptions (org.rocksdb.DBOptions)3 ReadOptions (org.rocksdb.ReadOptions)3