use of org.rocksdb.FlushOptions in project kafka by apache.
the class RocksDBStore method openDB.
@SuppressWarnings("unchecked")
public void openDB(ProcessorContext context) {
// initialize the default rocksdb options
final BlockBasedTableConfig tableConfig = new BlockBasedTableConfig();
tableConfig.setBlockCacheSize(BLOCK_CACHE_SIZE);
tableConfig.setBlockSize(BLOCK_SIZE);
options = new Options();
options.setTableFormatConfig(tableConfig);
options.setWriteBufferSize(WRITE_BUFFER_SIZE);
options.setCompressionType(COMPRESSION_TYPE);
options.setCompactionStyle(COMPACTION_STYLE);
options.setMaxWriteBufferNumber(MAX_WRITE_BUFFERS);
options.setCreateIfMissing(true);
options.setErrorIfExists(false);
options.setInfoLogLevel(InfoLogLevel.ERROR_LEVEL);
// this is the recommended way to increase parallelism in RocksDb
// note that the current implementation increases the number of compaction threads
// but not flush threads.
options.setIncreaseParallelism(Runtime.getRuntime().availableProcessors());
wOptions = new WriteOptions();
wOptions.setDisableWAL(true);
fOptions = new FlushOptions();
fOptions.setWaitForFlush(true);
final Map<String, Object> configs = context.appConfigs();
final Class<RocksDBConfigSetter> configSetterClass = (Class<RocksDBConfigSetter>) configs.get(StreamsConfig.ROCKSDB_CONFIG_SETTER_CLASS_CONFIG);
if (configSetterClass != null) {
final RocksDBConfigSetter configSetter = Utils.newInstance(configSetterClass);
configSetter.setConfig(name, options, configs);
}
// we need to construct the serde while opening DB since
// it is also triggered by windowed DB segments without initialization
this.serdes = new StateSerdes<>(name, keySerde == null ? (Serde<K>) context.keySerde() : keySerde, valueSerde == null ? (Serde<V>) context.valueSerde() : valueSerde);
this.dbDir = new File(new File(context.stateDir(), parentDir), this.name);
try {
this.db = openDB(this.dbDir, this.options, TTL_SECONDS);
} catch (IOException e) {
throw new StreamsException(e);
}
}
use of org.rocksdb.FlushOptions in project apache-kafka-on-k8s by banzaicloud.
the class RocksDBStore method openDB.
@SuppressWarnings("unchecked")
public void openDB(final ProcessorContext context) {
// initialize the default rocksdb options
final BlockBasedTableConfig tableConfig = new BlockBasedTableConfig();
tableConfig.setBlockCacheSize(BLOCK_CACHE_SIZE);
tableConfig.setBlockSize(BLOCK_SIZE);
options = new Options();
options.setTableFormatConfig(tableConfig);
options.setWriteBufferSize(WRITE_BUFFER_SIZE);
options.setCompressionType(COMPRESSION_TYPE);
options.setCompactionStyle(COMPACTION_STYLE);
options.setMaxWriteBufferNumber(MAX_WRITE_BUFFERS);
options.setCreateIfMissing(true);
options.setErrorIfExists(false);
options.setInfoLogLevel(InfoLogLevel.ERROR_LEVEL);
// this is the recommended way to increase parallelism in RocksDb
// note that the current implementation of setIncreaseParallelism affects the number
// of compaction threads but not flush threads (the latter remains one). Also
// the parallelism value needs to be at least two because of the code in
// https://github.com/facebook/rocksdb/blob/62ad0a9b19f0be4cefa70b6b32876e764b7f3c11/util/options.cc#L580
// subtracts one from the value passed to determine the number of compaction threads
// (this could be a bug in the RocksDB code and their devs have been contacted).
options.setIncreaseParallelism(Math.max(Runtime.getRuntime().availableProcessors(), 2));
if (prepareForBulkload) {
options.prepareForBulkLoad();
}
wOptions = new WriteOptions();
wOptions.setDisableWAL(true);
fOptions = new FlushOptions();
fOptions.setWaitForFlush(true);
final Map<String, Object> configs = context.appConfigs();
final Class<RocksDBConfigSetter> configSetterClass = (Class<RocksDBConfigSetter>) configs.get(StreamsConfig.ROCKSDB_CONFIG_SETTER_CLASS_CONFIG);
if (configSetterClass != null) {
final RocksDBConfigSetter configSetter = Utils.newInstance(configSetterClass);
configSetter.setConfig(name, options, configs);
}
this.dbDir = new File(new File(context.stateDir(), parentDir), this.name);
try {
this.db = openDB(this.dbDir, this.options, TTL_SECONDS);
} catch (IOException e) {
throw new ProcessorStateException(e);
}
open = true;
}
use of org.rocksdb.FlushOptions in project samza by apache.
the class TestRocksDbKeyValueStoreJava method testPerf.
@Test
public void testPerf() throws Exception {
Config config = new MapConfig();
Options options = new Options();
options.setCreateIfMissing(true);
File dbDir = new File(System.getProperty("java.io.tmpdir") + "/dbStore" + System.currentTimeMillis());
RocksDbKeyValueStore store = new RocksDbKeyValueStore(dbDir, options, config, false, "dbStore", new WriteOptions(), new FlushOptions(), new KeyValueStoreMetrics("dbStore", new MetricsRegistryMap()));
ByteArrayOutputStream outputStream = new ByteArrayOutputStream();
String prefix = "this is the key prefix";
Random r = new Random();
for (int i = 0; i < 100000; i++) {
store.put(genKey(outputStream, prefix, r.nextInt()), genValue());
}
byte[] firstKey = genKey(outputStream, prefix, 0);
byte[] lastKey = genKey(outputStream, prefix, Integer.MAX_VALUE);
long start;
start = System.currentTimeMillis();
KeyValueIterator<byte[], byte[]> iterator1 = store.range(firstKey, lastKey);
long rangeTime = System.currentTimeMillis() - start;
start = System.currentTimeMillis();
Iterators.size(iterator1);
long rangeIterTime = System.currentTimeMillis() - start;
System.out.println("range iter create time: " + rangeTime + ", iterate time: " + rangeIterTime);
iterator1.close();
// Please comment out range query part in order to do an accurate perf test for snapshot
start = System.currentTimeMillis();
KeyValueSnapshot<byte[], byte[]> snapshot = store.snapshot(firstKey, lastKey);
KeyValueIterator<byte[], byte[]> iterator2 = snapshot.iterator();
long snapshotTime = System.currentTimeMillis() - start;
start = System.currentTimeMillis();
Iterators.size(iterator2);
long snapshotIterTime = System.currentTimeMillis() - start;
System.out.println("snapshot iter create time: " + snapshotTime + ", iterate time: " + snapshotIterTime);
iterator2.close();
snapshot.close();
store.close();
}
use of org.rocksdb.FlushOptions in project samza by apache.
the class TestKeyValueSizeHistogramMetric method setup.
@Before
public void setup() {
Config config = new MapConfig();
Options options = new Options();
options.setCreateIfMissing(true);
File dbDir = new File(System.getProperty("java.io.tmpdir") + "/dbStore" + System.currentTimeMillis());
RocksDbKeyValueStore kvStore = new RocksDbKeyValueStore(dbDir, options, config, false, "dbStore", new WriteOptions(), new FlushOptions(), new KeyValueStoreMetrics("dbStore", new MetricsRegistryMap()));
KeyValueStore<String, String> serializedStore = new SerializedKeyValueStore<>(kvStore, stringSerde, stringSerde, serializedKeyValueStoreMetrics);
store = new NullSafeKeyValueStore<>(serializedStore);
}
use of org.rocksdb.FlushOptions in project samza by apache.
the class TestLargeMessageSafeKeyValueStores method setup.
@Before
public void setup() {
KeyValueStore<byte[], byte[]> kvStore;
switch(typeOfStore) {
case "inmemory":
{
kvStore = new InMemoryKeyValueStore(keyValueStoreMetrics);
break;
}
case "rocksdb":
{
kvStore = new RocksDbKeyValueStore(dir, new org.rocksdb.Options().setCreateIfMissing(true).setCompressionType(org.rocksdb.CompressionType.SNAPPY_COMPRESSION), new MapConfig(), false, storeName, new WriteOptions(), new FlushOptions(), keyValueStoreMetrics);
break;
}
default:
throw new IllegalArgumentException("Type of store undefined: " + typeOfStore);
}
MessageCollector collector = envelope -> {
int messageLength = ((byte[]) envelope.getMessage()).length;
if (messageLength > maxMessageSize) {
throw new SamzaException("Logged store message size " + messageLength + " for store " + storeName + " was larger than the maximum allowed message size " + maxMessageSize + ".");
}
};
loggedStore = new LoggedStore<>(kvStore, systemStreamPartition, collector, loggedStoreMetrics);
switch(storeConfig) {
case "serde":
{
KeyValueStore<byte[], byte[]> largeMessageSafeStore = new LargeMessageSafeStore(loggedStore, storeName, dropLargeMessage, maxMessageSize);
store = new SerializedKeyValueStore<>(largeMessageSafeStore, stringSerde, stringSerde, serializedKeyValueStoreMetrics);
break;
}
case "cache-then-serde":
{
KeyValueStore<byte[], byte[]> toBeSerializedStore = loggedStore;
if (dropLargeMessage) {
toBeSerializedStore = new LargeMessageSafeStore(loggedStore, storeName, dropLargeMessage, maxMessageSize);
}
KeyValueStore<String, String> serializedStore = new SerializedKeyValueStore<>(toBeSerializedStore, stringSerde, stringSerde, serializedKeyValueStoreMetrics);
store = new CachedStore<>(serializedStore, cacheSize, batchSize, cachedStoreMetrics);
break;
}
// large messages are expected and StorageConfig.DISALLOW_LARGE_MESSAGES is true.
case "serde-then-cache":
{
KeyValueStore<byte[], byte[]> cachedStore = new CachedStore<>(loggedStore, cacheSize, batchSize, cachedStoreMetrics);
KeyValueStore<byte[], byte[]> largeMessageSafeStore = new LargeMessageSafeStore(cachedStore, storeName, dropLargeMessage, maxMessageSize);
store = new SerializedKeyValueStore<>(largeMessageSafeStore, stringSerde, stringSerde, serializedKeyValueStoreMetrics);
break;
}
default:
throw new IllegalArgumentException("Store config undefined: " + storeConfig);
}
store = new NullSafeKeyValueStore<>(store);
}
Aggregations