use of org.rocksdb.ColumnFamilyDescriptor in project flink by apache.
the class RocksDBOperationUtils method openDB.
public static RocksDB openDB(String path, List<ColumnFamilyDescriptor> stateColumnFamilyDescriptors, List<ColumnFamilyHandle> stateColumnFamilyHandles, ColumnFamilyOptions columnFamilyOptions, DBOptions dbOptions) throws IOException {
List<ColumnFamilyDescriptor> columnFamilyDescriptors = new ArrayList<>(1 + stateColumnFamilyDescriptors.size());
// we add the required descriptor for the default CF in FIRST position, see
// https://github.com/facebook/rocksdb/wiki/RocksJava-Basics#opening-a-database-with-column-families
columnFamilyDescriptors.add(new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY, columnFamilyOptions));
columnFamilyDescriptors.addAll(stateColumnFamilyDescriptors);
RocksDB dbRef;
try {
dbRef = RocksDB.open(Preconditions.checkNotNull(dbOptions), Preconditions.checkNotNull(path), columnFamilyDescriptors, stateColumnFamilyHandles);
} catch (RocksDBException e) {
IOUtils.closeQuietly(columnFamilyOptions);
columnFamilyDescriptors.forEach((cfd) -> IOUtils.closeQuietly(cfd.getOptions()));
// improve error reporting on Windows
throwExceptionIfPathLengthExceededOnWindows(path, e);
throw new IOException("Error while opening RocksDB instance.", e);
}
// requested + default CF
Preconditions.checkState(1 + stateColumnFamilyDescriptors.size() == stateColumnFamilyHandles.size(), "Not all requested column family handles have been created");
return dbRef;
}
use of org.rocksdb.ColumnFamilyDescriptor in project flink by apache.
the class RocksDBWriteBatchWrapperTest method testWriteBatchWrapperFlushAfterMemorySizeExceed.
/**
* Tests that {@link RocksDBWriteBatchWrapper} flushes after the memory consumed exceeds the
* preconfigured value.
*/
@Test
public void testWriteBatchWrapperFlushAfterMemorySizeExceed() throws Exception {
try (RocksDB db = RocksDB.open(folder.newFolder().getAbsolutePath());
WriteOptions options = new WriteOptions().setDisableWAL(true);
ColumnFamilyHandle handle = db.createColumnFamily(new ColumnFamilyDescriptor("test".getBytes()));
RocksDBWriteBatchWrapper writeBatchWrapper = new RocksDBWriteBatchWrapper(db, options, 200, 50)) {
long initBatchSize = writeBatchWrapper.getDataSize();
byte[] dummy = new byte[6];
ThreadLocalRandom.current().nextBytes(dummy);
// will add 1 + 1 + 1 + 6 + 1 + 6 = 16 bytes for each KV
// format is [handleType|kvType|keyLen|key|valueLen|value]
// more information please ref write_batch.cc in RocksDB
writeBatchWrapper.put(handle, dummy, dummy);
assertEquals(initBatchSize + 16, writeBatchWrapper.getDataSize());
writeBatchWrapper.put(handle, dummy, dummy);
assertEquals(initBatchSize + 32, writeBatchWrapper.getDataSize());
writeBatchWrapper.put(handle, dummy, dummy);
// will flush all, then an empty write batch
assertEquals(initBatchSize, writeBatchWrapper.getDataSize());
}
}
use of org.rocksdb.ColumnFamilyDescriptor in project flink by apache.
the class RocksDBResource method before.
@Override
protected void before() throws Throwable {
this.temporaryFolder = new TemporaryFolder();
this.temporaryFolder.create();
final File rocksFolder = temporaryFolder.newFolder();
this.dbOptions = optionsFactory.createDBOptions(new DBOptions().setUseFsync(false).setInfoLogLevel(InfoLogLevel.HEADER_LEVEL).setStatsDumpPeriodSec(0), handlesToClose).setCreateIfMissing(true);
this.columnFamilyOptions = optionsFactory.createColumnOptions(new ColumnFamilyOptions(), handlesToClose);
this.writeOptions = new WriteOptions();
this.writeOptions.disableWAL();
this.readOptions = new ReadOptions();
this.columnFamilyHandles = new ArrayList<>(1);
this.rocksDB = RocksDB.open(dbOptions, rocksFolder.getAbsolutePath(), Collections.singletonList(new ColumnFamilyDescriptor("default".getBytes(), columnFamilyOptions)), columnFamilyHandles);
this.batchWrapper = new RocksDBWriteBatchWrapper(rocksDB, writeOptions);
}
use of org.rocksdb.ColumnFamilyDescriptor in project flink by apache.
the class RocksKeyGroupsRocksSingleStateIteratorTest method testMergeIterator.
public void testMergeIterator(int maxParallelism) throws Exception {
Random random = new Random(1234);
try (ReadOptions readOptions = new ReadOptions();
RocksDB rocksDB = RocksDB.open(tempFolder.getRoot().getAbsolutePath())) {
List<Tuple2<RocksIteratorWrapper, Integer>> rocksIteratorsWithKVStateId = new ArrayList<>();
List<Tuple2<ColumnFamilyHandle, Integer>> columnFamilyHandlesWithKeyCount = new ArrayList<>();
int totalKeysExpected = 0;
for (int c = 0; c < NUM_KEY_VAL_STATES; ++c) {
ColumnFamilyHandle handle = rocksDB.createColumnFamily(new ColumnFamilyDescriptor(("column-" + c).getBytes(ConfigConstants.DEFAULT_CHARSET)));
ByteArrayOutputStreamWithPos bos = new ByteArrayOutputStreamWithPos();
DataOutputStream dos = new DataOutputStream(bos);
int numKeys = random.nextInt(MAX_NUM_KEYS + 1);
for (int i = 0; i < numKeys; ++i) {
if (maxParallelism <= Byte.MAX_VALUE) {
dos.writeByte(i);
} else {
dos.writeShort(i);
}
dos.writeInt(i);
byte[] key = bos.toByteArray();
byte[] val = new byte[] { 42 };
rocksDB.put(handle, key, val);
bos.reset();
}
columnFamilyHandlesWithKeyCount.add(new Tuple2<>(handle, numKeys));
totalKeysExpected += numKeys;
}
CloseableRegistry closeableRegistry = new CloseableRegistry();
int id = 0;
for (Tuple2<ColumnFamilyHandle, Integer> columnFamilyHandle : columnFamilyHandlesWithKeyCount) {
RocksIteratorWrapper rocksIterator = RocksDBOperationUtils.getRocksIterator(rocksDB, columnFamilyHandle.f0, readOptions);
closeableRegistry.registerCloseable(rocksIterator);
rocksIteratorsWithKVStateId.add(new Tuple2<>(rocksIterator, id));
++id;
}
try (RocksStatesPerKeyGroupMergeIterator mergeIterator = new RocksStatesPerKeyGroupMergeIterator(closeableRegistry, rocksIteratorsWithKVStateId, Collections.emptyList(), maxParallelism <= Byte.MAX_VALUE ? 1 : 2)) {
int prevKVState = -1;
int prevKey = -1;
int prevKeyGroup = -1;
int totalKeysActual = 0;
while (mergeIterator.isValid()) {
ByteBuffer bb = ByteBuffer.wrap(mergeIterator.key());
int keyGroup = maxParallelism > Byte.MAX_VALUE ? bb.getShort() : bb.get();
int key = bb.getInt();
Assert.assertTrue(keyGroup >= prevKeyGroup);
Assert.assertTrue(key >= prevKey);
Assert.assertEquals(prevKeyGroup != keyGroup, mergeIterator.isNewKeyGroup());
Assert.assertEquals(prevKVState != mergeIterator.kvStateId(), mergeIterator.isNewKeyValueState());
prevKeyGroup = keyGroup;
prevKVState = mergeIterator.kvStateId();
mergeIterator.next();
++totalKeysActual;
}
Assert.assertEquals(totalKeysExpected, totalKeysActual);
}
IOUtils.closeQuietly(rocksDB.getDefaultColumnFamily());
for (Tuple2<ColumnFamilyHandle, Integer> handleWithCount : columnFamilyHandlesWithKeyCount) {
IOUtils.closeQuietly(handleWithCount.f0);
}
}
}
use of org.rocksdb.ColumnFamilyDescriptor in project kafka by apache.
the class RocksDBTimestampedStoreTest method verifyOldAndNewColumnFamily.
private void verifyOldAndNewColumnFamily() throws Exception {
final DBOptions dbOptions = new DBOptions();
final ColumnFamilyOptions columnFamilyOptions = new ColumnFamilyOptions();
final List<ColumnFamilyDescriptor> columnFamilyDescriptors = asList(new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY, columnFamilyOptions), new ColumnFamilyDescriptor("keyValueWithTimestamp".getBytes(StandardCharsets.UTF_8), columnFamilyOptions));
final List<ColumnFamilyHandle> columnFamilies = new ArrayList<>(columnFamilyDescriptors.size());
RocksDB db = null;
ColumnFamilyHandle noTimestampColumnFamily = null, withTimestampColumnFamily = null;
boolean errorOccurred = false;
try {
db = RocksDB.open(dbOptions, new File(new File(context.stateDir(), "rocksdb"), DB_NAME).getAbsolutePath(), columnFamilyDescriptors, columnFamilies);
noTimestampColumnFamily = columnFamilies.get(0);
withTimestampColumnFamily = columnFamilies.get(1);
assertThat(db.get(noTimestampColumnFamily, "unknown".getBytes()), new IsNull<>());
assertThat(db.get(noTimestampColumnFamily, "key1".getBytes()), new IsNull<>());
assertThat(db.get(noTimestampColumnFamily, "key2".getBytes()), new IsNull<>());
assertThat(db.get(noTimestampColumnFamily, "key3".getBytes()), new IsNull<>());
assertThat(db.get(noTimestampColumnFamily, "key4".getBytes()), new IsNull<>());
assertThat(db.get(noTimestampColumnFamily, "key5".getBytes()), new IsNull<>());
assertThat(db.get(noTimestampColumnFamily, "key6".getBytes()), new IsNull<>());
assertThat(db.get(noTimestampColumnFamily, "key7".getBytes()).length, is(7));
assertThat(db.get(noTimestampColumnFamily, "key8".getBytes()), new IsNull<>());
assertThat(db.get(noTimestampColumnFamily, "key11".getBytes()), new IsNull<>());
assertThat(db.get(noTimestampColumnFamily, "key12".getBytes()), new IsNull<>());
assertThat(db.get(withTimestampColumnFamily, "unknown".getBytes()), new IsNull<>());
assertThat(db.get(withTimestampColumnFamily, "key1".getBytes()).length, is(8 + 1));
assertThat(db.get(withTimestampColumnFamily, "key2".getBytes()).length, is(12));
assertThat(db.get(withTimestampColumnFamily, "key3".getBytes()), new IsNull<>());
assertThat(db.get(withTimestampColumnFamily, "key4".getBytes()).length, is(8 + 4));
assertThat(db.get(withTimestampColumnFamily, "key5".getBytes()).length, is(8 + 5));
assertThat(db.get(withTimestampColumnFamily, "key6".getBytes()), new IsNull<>());
assertThat(db.get(withTimestampColumnFamily, "key7".getBytes()), new IsNull<>());
assertThat(db.get(withTimestampColumnFamily, "key8".getBytes()).length, is(18));
assertThat(db.get(withTimestampColumnFamily, "key11".getBytes()).length, is(21));
assertThat(db.get(withTimestampColumnFamily, "key12".getBytes()), new IsNull<>());
} catch (final RuntimeException fatal) {
errorOccurred = true;
} finally {
// Order of closing must follow: ColumnFamilyHandle > RocksDB > DBOptions > ColumnFamilyOptions
if (noTimestampColumnFamily != null) {
noTimestampColumnFamily.close();
}
if (withTimestampColumnFamily != null) {
withTimestampColumnFamily.close();
}
if (db != null) {
db.close();
}
if (errorOccurred) {
dbOptions.close();
columnFamilyOptions.close();
}
}
// check that still in upgrade mode
try (LogCaptureAppender appender = LogCaptureAppender.createAndRegister(RocksDBTimestampedStore.class)) {
rocksDBStore.init((StateStoreContext) context, rocksDBStore);
assertThat(appender.getMessages(), hasItem("Opening store " + DB_NAME + " in upgrade mode"));
} finally {
rocksDBStore.close();
}
// clear old CF
columnFamilies.clear();
db = null;
noTimestampColumnFamily = null;
try {
db = RocksDB.open(dbOptions, new File(new File(context.stateDir(), "rocksdb"), DB_NAME).getAbsolutePath(), columnFamilyDescriptors, columnFamilies);
noTimestampColumnFamily = columnFamilies.get(0);
db.delete(noTimestampColumnFamily, "key7".getBytes());
} finally {
// Order of closing must follow: ColumnFamilyHandle > RocksDB > DBOptions > ColumnFamilyOptions
if (noTimestampColumnFamily != null) {
noTimestampColumnFamily.close();
}
if (db != null) {
db.close();
}
dbOptions.close();
columnFamilyOptions.close();
}
// check that still in regular mode
try (LogCaptureAppender appender = LogCaptureAppender.createAndRegister(RocksDBTimestampedStore.class)) {
rocksDBStore.init((StateStoreContext) context, rocksDBStore);
assertThat(appender.getMessages(), hasItem("Opening store " + DB_NAME + " in regular mode"));
}
}
Aggregations