use of org.rocksdb.WriteOptions in project samza by apache.
the class TestLargeMessageSafeKeyValueStores method setup.
@Before
public void setup() {
KeyValueStore<byte[], byte[]> kvStore;
switch(typeOfStore) {
case "inmemory":
{
kvStore = new InMemoryKeyValueStore(keyValueStoreMetrics);
break;
}
case "rocksdb":
{
kvStore = new RocksDbKeyValueStore(dir, new org.rocksdb.Options().setCreateIfMissing(true).setCompressionType(org.rocksdb.CompressionType.SNAPPY_COMPRESSION), new MapConfig(), false, storeName, new WriteOptions(), new FlushOptions(), keyValueStoreMetrics);
break;
}
default:
throw new IllegalArgumentException("Type of store undefined: " + typeOfStore);
}
MessageCollector collector = envelope -> {
int messageLength = ((byte[]) envelope.getMessage()).length;
if (messageLength > maxMessageSize) {
throw new SamzaException("Logged store message size " + messageLength + " for store " + storeName + " was larger than the maximum allowed message size " + maxMessageSize + ".");
}
};
loggedStore = new LoggedStore<>(kvStore, systemStreamPartition, collector, loggedStoreMetrics);
switch(storeConfig) {
case "serde":
{
KeyValueStore<byte[], byte[]> largeMessageSafeStore = new LargeMessageSafeStore(loggedStore, storeName, dropLargeMessage, maxMessageSize);
store = new SerializedKeyValueStore<>(largeMessageSafeStore, stringSerde, stringSerde, serializedKeyValueStoreMetrics);
break;
}
case "cache-then-serde":
{
KeyValueStore<byte[], byte[]> toBeSerializedStore = loggedStore;
if (dropLargeMessage) {
toBeSerializedStore = new LargeMessageSafeStore(loggedStore, storeName, dropLargeMessage, maxMessageSize);
}
KeyValueStore<String, String> serializedStore = new SerializedKeyValueStore<>(toBeSerializedStore, stringSerde, stringSerde, serializedKeyValueStoreMetrics);
store = new CachedStore<>(serializedStore, cacheSize, batchSize, cachedStoreMetrics);
break;
}
// large messages are expected and StorageConfig.DISALLOW_LARGE_MESSAGES is true.
case "serde-then-cache":
{
KeyValueStore<byte[], byte[]> cachedStore = new CachedStore<>(loggedStore, cacheSize, batchSize, cachedStoreMetrics);
KeyValueStore<byte[], byte[]> largeMessageSafeStore = new LargeMessageSafeStore(cachedStore, storeName, dropLargeMessage, maxMessageSize);
store = new SerializedKeyValueStore<>(largeMessageSafeStore, stringSerde, stringSerde, serializedKeyValueStoreMetrics);
break;
}
default:
throw new IllegalArgumentException("Store config undefined: " + storeConfig);
}
store = new NullSafeKeyValueStore<>(store);
}
use of org.rocksdb.WriteOptions in project flink by apache.
the class RocksDBResourceContainer method getWriteOptions.
/**
* Gets the RocksDB {@link WriteOptions} to be used for write operations.
*/
public WriteOptions getWriteOptions() {
// Disable WAL by default
WriteOptions opt = new WriteOptions().setDisableWAL(true);
handlesToClose.add(opt);
// add user-defined options factory, if specified
if (optionsFactory != null) {
opt = optionsFactory.createWriteOptions(opt, handlesToClose);
}
return opt;
}
use of org.rocksdb.WriteOptions in project flink by apache.
the class RocksDBWriteBatchWrapperTest method testWriteBatchWrapperFlushAfterMemorySizeExceed.
/**
* Tests that {@link RocksDBWriteBatchWrapper} flushes after the memory consumed exceeds the
* preconfigured value.
*/
@Test
public void testWriteBatchWrapperFlushAfterMemorySizeExceed() throws Exception {
try (RocksDB db = RocksDB.open(folder.newFolder().getAbsolutePath());
WriteOptions options = new WriteOptions().setDisableWAL(true);
ColumnFamilyHandle handle = db.createColumnFamily(new ColumnFamilyDescriptor("test".getBytes()));
RocksDBWriteBatchWrapper writeBatchWrapper = new RocksDBWriteBatchWrapper(db, options, 200, 50)) {
long initBatchSize = writeBatchWrapper.getDataSize();
byte[] dummy = new byte[6];
ThreadLocalRandom.current().nextBytes(dummy);
// will add 1 + 1 + 1 + 6 + 1 + 6 = 16 bytes for each KV
// format is [handleType|kvType|keyLen|key|valueLen|value]
// more information please ref write_batch.cc in RocksDB
writeBatchWrapper.put(handle, dummy, dummy);
assertEquals(initBatchSize + 16, writeBatchWrapper.getDataSize());
writeBatchWrapper.put(handle, dummy, dummy);
assertEquals(initBatchSize + 32, writeBatchWrapper.getDataSize());
writeBatchWrapper.put(handle, dummy, dummy);
// will flush all, then an empty write batch
assertEquals(initBatchSize, writeBatchWrapper.getDataSize());
}
}
use of org.rocksdb.WriteOptions in project flink by apache.
the class RocksDBResource method before.
@Override
protected void before() throws Throwable {
this.temporaryFolder = new TemporaryFolder();
this.temporaryFolder.create();
final File rocksFolder = temporaryFolder.newFolder();
this.dbOptions = optionsFactory.createDBOptions(new DBOptions().setUseFsync(false).setInfoLogLevel(InfoLogLevel.HEADER_LEVEL).setStatsDumpPeriodSec(0), handlesToClose).setCreateIfMissing(true);
this.columnFamilyOptions = optionsFactory.createColumnOptions(new ColumnFamilyOptions(), handlesToClose);
this.writeOptions = new WriteOptions();
this.writeOptions.disableWAL();
this.readOptions = new ReadOptions();
this.columnFamilyHandles = new ArrayList<>(1);
this.rocksDB = RocksDB.open(dbOptions, rocksFolder.getAbsolutePath(), Collections.singletonList(new ColumnFamilyDescriptor("default".getBytes(), columnFamilyOptions)), columnFamilyHandles);
this.batchWrapper = new RocksDBWriteBatchWrapper(rocksDB, writeOptions);
}
use of org.rocksdb.WriteOptions in project kafka by apache.
the class RocksDBStore method openDB.
@SuppressWarnings("unchecked")
void openDB(final Map<String, Object> configs, final File stateDir) {
// initialize the default rocksdb options
final DBOptions dbOptions = new DBOptions();
final ColumnFamilyOptions columnFamilyOptions = new ColumnFamilyOptions();
userSpecifiedOptions = new RocksDBGenericOptionsToDbOptionsColumnFamilyOptionsAdapter(dbOptions, columnFamilyOptions);
final BlockBasedTableConfigWithAccessibleCache tableConfig = new BlockBasedTableConfigWithAccessibleCache();
cache = new LRUCache(BLOCK_CACHE_SIZE);
tableConfig.setBlockCache(cache);
tableConfig.setBlockSize(BLOCK_SIZE);
filter = new BloomFilter();
tableConfig.setFilterPolicy(filter);
userSpecifiedOptions.optimizeFiltersForHits();
userSpecifiedOptions.setTableFormatConfig(tableConfig);
userSpecifiedOptions.setWriteBufferSize(WRITE_BUFFER_SIZE);
userSpecifiedOptions.setCompressionType(COMPRESSION_TYPE);
userSpecifiedOptions.setCompactionStyle(COMPACTION_STYLE);
userSpecifiedOptions.setMaxWriteBufferNumber(MAX_WRITE_BUFFERS);
userSpecifiedOptions.setCreateIfMissing(true);
userSpecifiedOptions.setErrorIfExists(false);
userSpecifiedOptions.setInfoLogLevel(InfoLogLevel.ERROR_LEVEL);
// this is the recommended way to increase parallelism in RocksDb
// note that the current implementation of setIncreaseParallelism affects the number
// of compaction threads but not flush threads (the latter remains one). Also,
// the parallelism value needs to be at least two because of the code in
// https://github.com/facebook/rocksdb/blob/62ad0a9b19f0be4cefa70b6b32876e764b7f3c11/util/options.cc#L580
// subtracts one from the value passed to determine the number of compaction threads
// (this could be a bug in the RocksDB code and their devs have been contacted).
userSpecifiedOptions.setIncreaseParallelism(Math.max(Runtime.getRuntime().availableProcessors(), 2));
wOptions = new WriteOptions();
wOptions.setDisableWAL(true);
fOptions = new FlushOptions();
fOptions.setWaitForFlush(true);
final Class<RocksDBConfigSetter> configSetterClass = (Class<RocksDBConfigSetter>) configs.get(StreamsConfig.ROCKSDB_CONFIG_SETTER_CLASS_CONFIG);
if (configSetterClass != null) {
configSetter = Utils.newInstance(configSetterClass);
configSetter.setConfig(name, userSpecifiedOptions, configs);
}
dbDir = new File(new File(stateDir, parentDir), name);
try {
Files.createDirectories(dbDir.getParentFile().toPath());
Files.createDirectories(dbDir.getAbsoluteFile().toPath());
} catch (final IOException fatal) {
throw new ProcessorStateException(fatal);
}
// Setup statistics before the database is opened, otherwise the statistics are not updated
// with the measurements from Rocks DB
maybeSetUpStatistics(configs);
openRocksDB(dbOptions, columnFamilyOptions);
open = true;
addValueProvidersToMetricsRecorder();
}
Aggregations