use of org.rocksdb.ColumnFamilyHandle in project flink by apache.
the class RocksDBRocksStateKeysAndNamespacesIteratorTest method testIteratorHelper.
@SuppressWarnings("unchecked")
<K> void testIteratorHelper(TypeSerializer<K> keySerializer, int maxKeyGroupNumber, Function<Integer, K> getKeyFunc) throws Exception {
String testStateName = "aha";
String namespace = "ns";
try (RocksDBKeyedStateBackendTestFactory factory = new RocksDBKeyedStateBackendTestFactory()) {
RocksDBKeyedStateBackend<K> keyedStateBackend = factory.create(tmp, keySerializer, maxKeyGroupNumber);
ValueState<String> testState = keyedStateBackend.getPartitionedState(namespace, StringSerializer.INSTANCE, new ValueStateDescriptor<>(testStateName, String.class));
// insert record
for (int i = 0; i < 1000; ++i) {
keyedStateBackend.setCurrentKey(getKeyFunc.apply(i));
testState.update(String.valueOf(i));
}
DataOutputSerializer outputStream = new DataOutputSerializer(8);
boolean ambiguousKeyPossible = CompositeKeySerializationUtils.isAmbiguousKeyPossible(keySerializer, StringSerializer.INSTANCE);
CompositeKeySerializationUtils.writeNameSpace(namespace, StringSerializer.INSTANCE, outputStream, ambiguousKeyPossible);
// already created with the state, should be closed with the backend
ColumnFamilyHandle handle = keyedStateBackend.getColumnFamilyHandle(testStateName);
try (RocksIteratorWrapper iterator = RocksDBOperationUtils.getRocksIterator(keyedStateBackend.db, handle, keyedStateBackend.getReadOptions());
RocksStateKeysAndNamespaceIterator<K, String> iteratorWrapper = new RocksStateKeysAndNamespaceIterator<>(iterator, testStateName, keySerializer, StringSerializer.INSTANCE, keyedStateBackend.getKeyGroupPrefixBytes(), ambiguousKeyPossible)) {
iterator.seekToFirst();
// valid record
List<Tuple2<Integer, String>> fetchedKeys = new ArrayList<>(1000);
while (iteratorWrapper.hasNext()) {
Tuple2 entry = iteratorWrapper.next();
entry.f0 = Integer.parseInt(entry.f0.toString());
fetchedKeys.add((Tuple2<Integer, String>) entry);
}
fetchedKeys.sort(Comparator.comparingInt(a -> a.f0));
Assert.assertEquals(1000, fetchedKeys.size());
for (int i = 0; i < 1000; ++i) {
Assert.assertEquals(i, fetchedKeys.get(i).f0.intValue());
Assert.assertEquals(namespace, fetchedKeys.get(i).f1);
}
}
}
}
use of org.rocksdb.ColumnFamilyHandle in project flink by apache.
the class RocksDBPropertyTest method testRocksDBPropertiesValid.
@Test
public void testRocksDBPropertiesValid() throws RocksDBException {
RocksDB db = rocksDBResource.getRocksDB();
ColumnFamilyHandle handle = rocksDBResource.getDefaultColumnFamily();
for (RocksDBProperty property : RocksDBProperty.values()) {
try {
db.getLongProperty(handle, property.getRocksDBProperty());
} catch (RocksDBException e) {
throw new AssertionError(String.format("Invalid RocksDB property %s", property.getRocksDBProperty()), e);
}
}
}
use of org.rocksdb.ColumnFamilyHandle in project flink by apache.
the class RocksDBRocksStateKeysIteratorTest method testIteratorHelper.
<K> void testIteratorHelper(TypeSerializer<K> keySerializer, int maxKeyGroupNumber, Function<Integer, K> getKeyFunc) throws Exception {
String testStateName = "aha";
String namespace = "ns";
try (RocksDBKeyedStateBackendTestFactory factory = new RocksDBKeyedStateBackendTestFactory()) {
RocksDBKeyedStateBackend<K> keyedStateBackend = factory.create(tmp, keySerializer, maxKeyGroupNumber);
ValueState<String> testState = keyedStateBackend.getPartitionedState(namespace, StringSerializer.INSTANCE, new ValueStateDescriptor<>(testStateName, String.class));
// insert record
for (int i = 0; i < 1000; ++i) {
keyedStateBackend.setCurrentKey(getKeyFunc.apply(i));
testState.update(String.valueOf(i));
}
DataOutputSerializer outputStream = new DataOutputSerializer(8);
boolean ambiguousKeyPossible = CompositeKeySerializationUtils.isAmbiguousKeyPossible(keySerializer, StringSerializer.INSTANCE);
CompositeKeySerializationUtils.writeNameSpace(namespace, StringSerializer.INSTANCE, outputStream, ambiguousKeyPossible);
byte[] nameSpaceBytes = outputStream.getCopyOfBuffer();
// already created with the state, should be closed with the backend
ColumnFamilyHandle handle = keyedStateBackend.getColumnFamilyHandle(testStateName);
try (RocksIteratorWrapper iterator = RocksDBOperationUtils.getRocksIterator(keyedStateBackend.db, handle, keyedStateBackend.getReadOptions());
RocksStateKeysIterator<K> iteratorWrapper = new RocksStateKeysIterator<>(iterator, testStateName, keySerializer, keyedStateBackend.getKeyGroupPrefixBytes(), ambiguousKeyPossible, nameSpaceBytes)) {
iterator.seekToFirst();
// valid record
List<Integer> fetchedKeys = new ArrayList<>(1000);
while (iteratorWrapper.hasNext()) {
fetchedKeys.add(Integer.parseInt(iteratorWrapper.next().toString()));
}
fetchedKeys.sort(Comparator.comparingInt(a -> a));
Assert.assertEquals(1000, fetchedKeys.size());
for (int i = 0; i < 1000; ++i) {
Assert.assertEquals(i, fetchedKeys.get(i).intValue());
}
}
}
}
use of org.rocksdb.ColumnFamilyHandle in project flink by apache.
the class RocksKeyGroupsRocksSingleStateIteratorTest method testMergeIterator.
public void testMergeIterator(int maxParallelism) throws Exception {
Random random = new Random(1234);
try (ReadOptions readOptions = new ReadOptions();
RocksDB rocksDB = RocksDB.open(tempFolder.getRoot().getAbsolutePath())) {
List<Tuple2<RocksIteratorWrapper, Integer>> rocksIteratorsWithKVStateId = new ArrayList<>();
List<Tuple2<ColumnFamilyHandle, Integer>> columnFamilyHandlesWithKeyCount = new ArrayList<>();
int totalKeysExpected = 0;
for (int c = 0; c < NUM_KEY_VAL_STATES; ++c) {
ColumnFamilyHandle handle = rocksDB.createColumnFamily(new ColumnFamilyDescriptor(("column-" + c).getBytes(ConfigConstants.DEFAULT_CHARSET)));
ByteArrayOutputStreamWithPos bos = new ByteArrayOutputStreamWithPos();
DataOutputStream dos = new DataOutputStream(bos);
int numKeys = random.nextInt(MAX_NUM_KEYS + 1);
for (int i = 0; i < numKeys; ++i) {
if (maxParallelism <= Byte.MAX_VALUE) {
dos.writeByte(i);
} else {
dos.writeShort(i);
}
dos.writeInt(i);
byte[] key = bos.toByteArray();
byte[] val = new byte[] { 42 };
rocksDB.put(handle, key, val);
bos.reset();
}
columnFamilyHandlesWithKeyCount.add(new Tuple2<>(handle, numKeys));
totalKeysExpected += numKeys;
}
CloseableRegistry closeableRegistry = new CloseableRegistry();
int id = 0;
for (Tuple2<ColumnFamilyHandle, Integer> columnFamilyHandle : columnFamilyHandlesWithKeyCount) {
RocksIteratorWrapper rocksIterator = RocksDBOperationUtils.getRocksIterator(rocksDB, columnFamilyHandle.f0, readOptions);
closeableRegistry.registerCloseable(rocksIterator);
rocksIteratorsWithKVStateId.add(new Tuple2<>(rocksIterator, id));
++id;
}
try (RocksStatesPerKeyGroupMergeIterator mergeIterator = new RocksStatesPerKeyGroupMergeIterator(closeableRegistry, rocksIteratorsWithKVStateId, Collections.emptyList(), maxParallelism <= Byte.MAX_VALUE ? 1 : 2)) {
int prevKVState = -1;
int prevKey = -1;
int prevKeyGroup = -1;
int totalKeysActual = 0;
while (mergeIterator.isValid()) {
ByteBuffer bb = ByteBuffer.wrap(mergeIterator.key());
int keyGroup = maxParallelism > Byte.MAX_VALUE ? bb.getShort() : bb.get();
int key = bb.getInt();
Assert.assertTrue(keyGroup >= prevKeyGroup);
Assert.assertTrue(key >= prevKey);
Assert.assertEquals(prevKeyGroup != keyGroup, mergeIterator.isNewKeyGroup());
Assert.assertEquals(prevKVState != mergeIterator.kvStateId(), mergeIterator.isNewKeyValueState());
prevKeyGroup = keyGroup;
prevKVState = mergeIterator.kvStateId();
mergeIterator.next();
++totalKeysActual;
}
Assert.assertEquals(totalKeysExpected, totalKeysActual);
}
IOUtils.closeQuietly(rocksDB.getDefaultColumnFamily());
for (Tuple2<ColumnFamilyHandle, Integer> handleWithCount : columnFamilyHandlesWithKeyCount) {
IOUtils.closeQuietly(handleWithCount.f0);
}
}
}
use of org.rocksdb.ColumnFamilyHandle in project kafka by apache.
the class RocksDBTimestampedStoreTest method verifyOldAndNewColumnFamily.
private void verifyOldAndNewColumnFamily() throws Exception {
final DBOptions dbOptions = new DBOptions();
final ColumnFamilyOptions columnFamilyOptions = new ColumnFamilyOptions();
final List<ColumnFamilyDescriptor> columnFamilyDescriptors = asList(new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY, columnFamilyOptions), new ColumnFamilyDescriptor("keyValueWithTimestamp".getBytes(StandardCharsets.UTF_8), columnFamilyOptions));
final List<ColumnFamilyHandle> columnFamilies = new ArrayList<>(columnFamilyDescriptors.size());
RocksDB db = null;
ColumnFamilyHandle noTimestampColumnFamily = null, withTimestampColumnFamily = null;
boolean errorOccurred = false;
try {
db = RocksDB.open(dbOptions, new File(new File(context.stateDir(), "rocksdb"), DB_NAME).getAbsolutePath(), columnFamilyDescriptors, columnFamilies);
noTimestampColumnFamily = columnFamilies.get(0);
withTimestampColumnFamily = columnFamilies.get(1);
assertThat(db.get(noTimestampColumnFamily, "unknown".getBytes()), new IsNull<>());
assertThat(db.get(noTimestampColumnFamily, "key1".getBytes()), new IsNull<>());
assertThat(db.get(noTimestampColumnFamily, "key2".getBytes()), new IsNull<>());
assertThat(db.get(noTimestampColumnFamily, "key3".getBytes()), new IsNull<>());
assertThat(db.get(noTimestampColumnFamily, "key4".getBytes()), new IsNull<>());
assertThat(db.get(noTimestampColumnFamily, "key5".getBytes()), new IsNull<>());
assertThat(db.get(noTimestampColumnFamily, "key6".getBytes()), new IsNull<>());
assertThat(db.get(noTimestampColumnFamily, "key7".getBytes()).length, is(7));
assertThat(db.get(noTimestampColumnFamily, "key8".getBytes()), new IsNull<>());
assertThat(db.get(noTimestampColumnFamily, "key11".getBytes()), new IsNull<>());
assertThat(db.get(noTimestampColumnFamily, "key12".getBytes()), new IsNull<>());
assertThat(db.get(withTimestampColumnFamily, "unknown".getBytes()), new IsNull<>());
assertThat(db.get(withTimestampColumnFamily, "key1".getBytes()).length, is(8 + 1));
assertThat(db.get(withTimestampColumnFamily, "key2".getBytes()).length, is(12));
assertThat(db.get(withTimestampColumnFamily, "key3".getBytes()), new IsNull<>());
assertThat(db.get(withTimestampColumnFamily, "key4".getBytes()).length, is(8 + 4));
assertThat(db.get(withTimestampColumnFamily, "key5".getBytes()).length, is(8 + 5));
assertThat(db.get(withTimestampColumnFamily, "key6".getBytes()), new IsNull<>());
assertThat(db.get(withTimestampColumnFamily, "key7".getBytes()), new IsNull<>());
assertThat(db.get(withTimestampColumnFamily, "key8".getBytes()).length, is(18));
assertThat(db.get(withTimestampColumnFamily, "key11".getBytes()).length, is(21));
assertThat(db.get(withTimestampColumnFamily, "key12".getBytes()), new IsNull<>());
} catch (final RuntimeException fatal) {
errorOccurred = true;
} finally {
// Order of closing must follow: ColumnFamilyHandle > RocksDB > DBOptions > ColumnFamilyOptions
if (noTimestampColumnFamily != null) {
noTimestampColumnFamily.close();
}
if (withTimestampColumnFamily != null) {
withTimestampColumnFamily.close();
}
if (db != null) {
db.close();
}
if (errorOccurred) {
dbOptions.close();
columnFamilyOptions.close();
}
}
// check that still in upgrade mode
try (LogCaptureAppender appender = LogCaptureAppender.createAndRegister(RocksDBTimestampedStore.class)) {
rocksDBStore.init((StateStoreContext) context, rocksDBStore);
assertThat(appender.getMessages(), hasItem("Opening store " + DB_NAME + " in upgrade mode"));
} finally {
rocksDBStore.close();
}
// clear old CF
columnFamilies.clear();
db = null;
noTimestampColumnFamily = null;
try {
db = RocksDB.open(dbOptions, new File(new File(context.stateDir(), "rocksdb"), DB_NAME).getAbsolutePath(), columnFamilyDescriptors, columnFamilies);
noTimestampColumnFamily = columnFamilies.get(0);
db.delete(noTimestampColumnFamily, "key7".getBytes());
} finally {
// Order of closing must follow: ColumnFamilyHandle > RocksDB > DBOptions > ColumnFamilyOptions
if (noTimestampColumnFamily != null) {
noTimestampColumnFamily.close();
}
if (db != null) {
db.close();
}
dbOptions.close();
columnFamilyOptions.close();
}
// check that still in regular mode
try (LogCaptureAppender appender = LogCaptureAppender.createAndRegister(RocksDBTimestampedStore.class)) {
rocksDBStore.init((StateStoreContext) context, rocksDBStore);
assertThat(appender.getMessages(), hasItem("Opening store " + DB_NAME + " in regular mode"));
}
}
Aggregations