Search in sources :

Example 16 with ColumnFamilyHandle

use of org.rocksdb.ColumnFamilyHandle in project flink by apache.

the class RocksDBRocksStateKeysAndNamespacesIteratorTest method testIteratorHelper.

@SuppressWarnings("unchecked")
<K> void testIteratorHelper(TypeSerializer<K> keySerializer, int maxKeyGroupNumber, Function<Integer, K> getKeyFunc) throws Exception {
    String testStateName = "aha";
    String namespace = "ns";
    try (RocksDBKeyedStateBackendTestFactory factory = new RocksDBKeyedStateBackendTestFactory()) {
        RocksDBKeyedStateBackend<K> keyedStateBackend = factory.create(tmp, keySerializer, maxKeyGroupNumber);
        ValueState<String> testState = keyedStateBackend.getPartitionedState(namespace, StringSerializer.INSTANCE, new ValueStateDescriptor<>(testStateName, String.class));
        // insert record
        for (int i = 0; i < 1000; ++i) {
            keyedStateBackend.setCurrentKey(getKeyFunc.apply(i));
            testState.update(String.valueOf(i));
        }
        DataOutputSerializer outputStream = new DataOutputSerializer(8);
        boolean ambiguousKeyPossible = CompositeKeySerializationUtils.isAmbiguousKeyPossible(keySerializer, StringSerializer.INSTANCE);
        CompositeKeySerializationUtils.writeNameSpace(namespace, StringSerializer.INSTANCE, outputStream, ambiguousKeyPossible);
        // already created with the state, should be closed with the backend
        ColumnFamilyHandle handle = keyedStateBackend.getColumnFamilyHandle(testStateName);
        try (RocksIteratorWrapper iterator = RocksDBOperationUtils.getRocksIterator(keyedStateBackend.db, handle, keyedStateBackend.getReadOptions());
            RocksStateKeysAndNamespaceIterator<K, String> iteratorWrapper = new RocksStateKeysAndNamespaceIterator<>(iterator, testStateName, keySerializer, StringSerializer.INSTANCE, keyedStateBackend.getKeyGroupPrefixBytes(), ambiguousKeyPossible)) {
            iterator.seekToFirst();
            // valid record
            List<Tuple2<Integer, String>> fetchedKeys = new ArrayList<>(1000);
            while (iteratorWrapper.hasNext()) {
                Tuple2 entry = iteratorWrapper.next();
                entry.f0 = Integer.parseInt(entry.f0.toString());
                fetchedKeys.add((Tuple2<Integer, String>) entry);
            }
            fetchedKeys.sort(Comparator.comparingInt(a -> a.f0));
            Assert.assertEquals(1000, fetchedKeys.size());
            for (int i = 0; i < 1000; ++i) {
                Assert.assertEquals(i, fetchedKeys.get(i).f0.intValue());
                Assert.assertEquals(namespace, fetchedKeys.get(i).f1);
            }
        }
    }
}
Also used : TypeSerializer(org.apache.flink.api.common.typeutils.TypeSerializer) Tuple2(org.apache.flink.api.java.tuple.Tuple2) CompositeKeySerializationUtils(org.apache.flink.runtime.state.CompositeKeySerializationUtils) ValueStateDescriptor(org.apache.flink.api.common.state.ValueStateDescriptor) Test(org.junit.Test) StringSerializer(org.apache.flink.api.common.typeutils.base.StringSerializer) Function(java.util.function.Function) RocksStateKeysAndNamespaceIterator(org.apache.flink.contrib.streaming.state.iterator.RocksStateKeysAndNamespaceIterator) ArrayList(java.util.ArrayList) IntSerializer(org.apache.flink.api.common.typeutils.base.IntSerializer) List(java.util.List) Rule(org.junit.Rule) ValueState(org.apache.flink.api.common.state.ValueState) ColumnFamilyHandle(org.rocksdb.ColumnFamilyHandle) DataOutputSerializer(org.apache.flink.core.memory.DataOutputSerializer) Assert(org.junit.Assert) Comparator(java.util.Comparator) TemporaryFolder(org.junit.rules.TemporaryFolder) DataOutputSerializer(org.apache.flink.core.memory.DataOutputSerializer) ArrayList(java.util.ArrayList) RocksStateKeysAndNamespaceIterator(org.apache.flink.contrib.streaming.state.iterator.RocksStateKeysAndNamespaceIterator) ColumnFamilyHandle(org.rocksdb.ColumnFamilyHandle) Tuple2(org.apache.flink.api.java.tuple.Tuple2)

Example 17 with ColumnFamilyHandle

use of org.rocksdb.ColumnFamilyHandle in project flink by apache.

the class RocksDBPropertyTest method testRocksDBPropertiesValid.

@Test
public void testRocksDBPropertiesValid() throws RocksDBException {
    RocksDB db = rocksDBResource.getRocksDB();
    ColumnFamilyHandle handle = rocksDBResource.getDefaultColumnFamily();
    for (RocksDBProperty property : RocksDBProperty.values()) {
        try {
            db.getLongProperty(handle, property.getRocksDBProperty());
        } catch (RocksDBException e) {
            throw new AssertionError(String.format("Invalid RocksDB property %s", property.getRocksDBProperty()), e);
        }
    }
}
Also used : RocksDBException(org.rocksdb.RocksDBException) RocksDB(org.rocksdb.RocksDB) ColumnFamilyHandle(org.rocksdb.ColumnFamilyHandle) Test(org.junit.Test)

Example 18 with ColumnFamilyHandle

use of org.rocksdb.ColumnFamilyHandle in project flink by apache.

the class RocksDBRocksStateKeysIteratorTest method testIteratorHelper.

<K> void testIteratorHelper(TypeSerializer<K> keySerializer, int maxKeyGroupNumber, Function<Integer, K> getKeyFunc) throws Exception {
    String testStateName = "aha";
    String namespace = "ns";
    try (RocksDBKeyedStateBackendTestFactory factory = new RocksDBKeyedStateBackendTestFactory()) {
        RocksDBKeyedStateBackend<K> keyedStateBackend = factory.create(tmp, keySerializer, maxKeyGroupNumber);
        ValueState<String> testState = keyedStateBackend.getPartitionedState(namespace, StringSerializer.INSTANCE, new ValueStateDescriptor<>(testStateName, String.class));
        // insert record
        for (int i = 0; i < 1000; ++i) {
            keyedStateBackend.setCurrentKey(getKeyFunc.apply(i));
            testState.update(String.valueOf(i));
        }
        DataOutputSerializer outputStream = new DataOutputSerializer(8);
        boolean ambiguousKeyPossible = CompositeKeySerializationUtils.isAmbiguousKeyPossible(keySerializer, StringSerializer.INSTANCE);
        CompositeKeySerializationUtils.writeNameSpace(namespace, StringSerializer.INSTANCE, outputStream, ambiguousKeyPossible);
        byte[] nameSpaceBytes = outputStream.getCopyOfBuffer();
        // already created with the state, should be closed with the backend
        ColumnFamilyHandle handle = keyedStateBackend.getColumnFamilyHandle(testStateName);
        try (RocksIteratorWrapper iterator = RocksDBOperationUtils.getRocksIterator(keyedStateBackend.db, handle, keyedStateBackend.getReadOptions());
            RocksStateKeysIterator<K> iteratorWrapper = new RocksStateKeysIterator<>(iterator, testStateName, keySerializer, keyedStateBackend.getKeyGroupPrefixBytes(), ambiguousKeyPossible, nameSpaceBytes)) {
            iterator.seekToFirst();
            // valid record
            List<Integer> fetchedKeys = new ArrayList<>(1000);
            while (iteratorWrapper.hasNext()) {
                fetchedKeys.add(Integer.parseInt(iteratorWrapper.next().toString()));
            }
            fetchedKeys.sort(Comparator.comparingInt(a -> a));
            Assert.assertEquals(1000, fetchedKeys.size());
            for (int i = 0; i < 1000; ++i) {
                Assert.assertEquals(i, fetchedKeys.get(i).intValue());
            }
        }
    }
}
Also used : TypeSerializer(org.apache.flink.api.common.typeutils.TypeSerializer) CompositeKeySerializationUtils(org.apache.flink.runtime.state.CompositeKeySerializationUtils) RocksStateKeysIterator(org.apache.flink.contrib.streaming.state.iterator.RocksStateKeysIterator) ValueStateDescriptor(org.apache.flink.api.common.state.ValueStateDescriptor) Test(org.junit.Test) StringSerializer(org.apache.flink.api.common.typeutils.base.StringSerializer) Function(java.util.function.Function) ArrayList(java.util.ArrayList) IntSerializer(org.apache.flink.api.common.typeutils.base.IntSerializer) List(java.util.List) Rule(org.junit.Rule) ValueState(org.apache.flink.api.common.state.ValueState) ColumnFamilyHandle(org.rocksdb.ColumnFamilyHandle) DataOutputSerializer(org.apache.flink.core.memory.DataOutputSerializer) Assert(org.junit.Assert) Comparator(java.util.Comparator) TemporaryFolder(org.junit.rules.TemporaryFolder) DataOutputSerializer(org.apache.flink.core.memory.DataOutputSerializer) RocksStateKeysIterator(org.apache.flink.contrib.streaming.state.iterator.RocksStateKeysIterator) ArrayList(java.util.ArrayList) ColumnFamilyHandle(org.rocksdb.ColumnFamilyHandle)

Example 19 with ColumnFamilyHandle

use of org.rocksdb.ColumnFamilyHandle in project flink by apache.

the class RocksKeyGroupsRocksSingleStateIteratorTest method testMergeIterator.

public void testMergeIterator(int maxParallelism) throws Exception {
    Random random = new Random(1234);
    try (ReadOptions readOptions = new ReadOptions();
        RocksDB rocksDB = RocksDB.open(tempFolder.getRoot().getAbsolutePath())) {
        List<Tuple2<RocksIteratorWrapper, Integer>> rocksIteratorsWithKVStateId = new ArrayList<>();
        List<Tuple2<ColumnFamilyHandle, Integer>> columnFamilyHandlesWithKeyCount = new ArrayList<>();
        int totalKeysExpected = 0;
        for (int c = 0; c < NUM_KEY_VAL_STATES; ++c) {
            ColumnFamilyHandle handle = rocksDB.createColumnFamily(new ColumnFamilyDescriptor(("column-" + c).getBytes(ConfigConstants.DEFAULT_CHARSET)));
            ByteArrayOutputStreamWithPos bos = new ByteArrayOutputStreamWithPos();
            DataOutputStream dos = new DataOutputStream(bos);
            int numKeys = random.nextInt(MAX_NUM_KEYS + 1);
            for (int i = 0; i < numKeys; ++i) {
                if (maxParallelism <= Byte.MAX_VALUE) {
                    dos.writeByte(i);
                } else {
                    dos.writeShort(i);
                }
                dos.writeInt(i);
                byte[] key = bos.toByteArray();
                byte[] val = new byte[] { 42 };
                rocksDB.put(handle, key, val);
                bos.reset();
            }
            columnFamilyHandlesWithKeyCount.add(new Tuple2<>(handle, numKeys));
            totalKeysExpected += numKeys;
        }
        CloseableRegistry closeableRegistry = new CloseableRegistry();
        int id = 0;
        for (Tuple2<ColumnFamilyHandle, Integer> columnFamilyHandle : columnFamilyHandlesWithKeyCount) {
            RocksIteratorWrapper rocksIterator = RocksDBOperationUtils.getRocksIterator(rocksDB, columnFamilyHandle.f0, readOptions);
            closeableRegistry.registerCloseable(rocksIterator);
            rocksIteratorsWithKVStateId.add(new Tuple2<>(rocksIterator, id));
            ++id;
        }
        try (RocksStatesPerKeyGroupMergeIterator mergeIterator = new RocksStatesPerKeyGroupMergeIterator(closeableRegistry, rocksIteratorsWithKVStateId, Collections.emptyList(), maxParallelism <= Byte.MAX_VALUE ? 1 : 2)) {
            int prevKVState = -1;
            int prevKey = -1;
            int prevKeyGroup = -1;
            int totalKeysActual = 0;
            while (mergeIterator.isValid()) {
                ByteBuffer bb = ByteBuffer.wrap(mergeIterator.key());
                int keyGroup = maxParallelism > Byte.MAX_VALUE ? bb.getShort() : bb.get();
                int key = bb.getInt();
                Assert.assertTrue(keyGroup >= prevKeyGroup);
                Assert.assertTrue(key >= prevKey);
                Assert.assertEquals(prevKeyGroup != keyGroup, mergeIterator.isNewKeyGroup());
                Assert.assertEquals(prevKVState != mergeIterator.kvStateId(), mergeIterator.isNewKeyValueState());
                prevKeyGroup = keyGroup;
                prevKVState = mergeIterator.kvStateId();
                mergeIterator.next();
                ++totalKeysActual;
            }
            Assert.assertEquals(totalKeysExpected, totalKeysActual);
        }
        IOUtils.closeQuietly(rocksDB.getDefaultColumnFamily());
        for (Tuple2<ColumnFamilyHandle, Integer> handleWithCount : columnFamilyHandlesWithKeyCount) {
            IOUtils.closeQuietly(handleWithCount.f0);
        }
    }
}
Also used : RocksDB(org.rocksdb.RocksDB) RocksStatesPerKeyGroupMergeIterator(org.apache.flink.contrib.streaming.state.iterator.RocksStatesPerKeyGroupMergeIterator) DataOutputStream(java.io.DataOutputStream) ArrayList(java.util.ArrayList) ColumnFamilyDescriptor(org.rocksdb.ColumnFamilyDescriptor) CloseableRegistry(org.apache.flink.core.fs.CloseableRegistry) ByteBuffer(java.nio.ByteBuffer) ColumnFamilyHandle(org.rocksdb.ColumnFamilyHandle) Random(java.util.Random) ReadOptions(org.rocksdb.ReadOptions) Tuple2(org.apache.flink.api.java.tuple.Tuple2) ByteArrayOutputStreamWithPos(org.apache.flink.core.memory.ByteArrayOutputStreamWithPos)

Example 20 with ColumnFamilyHandle

use of org.rocksdb.ColumnFamilyHandle in project kafka by apache.

the class RocksDBTimestampedStoreTest method verifyOldAndNewColumnFamily.

private void verifyOldAndNewColumnFamily() throws Exception {
    final DBOptions dbOptions = new DBOptions();
    final ColumnFamilyOptions columnFamilyOptions = new ColumnFamilyOptions();
    final List<ColumnFamilyDescriptor> columnFamilyDescriptors = asList(new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY, columnFamilyOptions), new ColumnFamilyDescriptor("keyValueWithTimestamp".getBytes(StandardCharsets.UTF_8), columnFamilyOptions));
    final List<ColumnFamilyHandle> columnFamilies = new ArrayList<>(columnFamilyDescriptors.size());
    RocksDB db = null;
    ColumnFamilyHandle noTimestampColumnFamily = null, withTimestampColumnFamily = null;
    boolean errorOccurred = false;
    try {
        db = RocksDB.open(dbOptions, new File(new File(context.stateDir(), "rocksdb"), DB_NAME).getAbsolutePath(), columnFamilyDescriptors, columnFamilies);
        noTimestampColumnFamily = columnFamilies.get(0);
        withTimestampColumnFamily = columnFamilies.get(1);
        assertThat(db.get(noTimestampColumnFamily, "unknown".getBytes()), new IsNull<>());
        assertThat(db.get(noTimestampColumnFamily, "key1".getBytes()), new IsNull<>());
        assertThat(db.get(noTimestampColumnFamily, "key2".getBytes()), new IsNull<>());
        assertThat(db.get(noTimestampColumnFamily, "key3".getBytes()), new IsNull<>());
        assertThat(db.get(noTimestampColumnFamily, "key4".getBytes()), new IsNull<>());
        assertThat(db.get(noTimestampColumnFamily, "key5".getBytes()), new IsNull<>());
        assertThat(db.get(noTimestampColumnFamily, "key6".getBytes()), new IsNull<>());
        assertThat(db.get(noTimestampColumnFamily, "key7".getBytes()).length, is(7));
        assertThat(db.get(noTimestampColumnFamily, "key8".getBytes()), new IsNull<>());
        assertThat(db.get(noTimestampColumnFamily, "key11".getBytes()), new IsNull<>());
        assertThat(db.get(noTimestampColumnFamily, "key12".getBytes()), new IsNull<>());
        assertThat(db.get(withTimestampColumnFamily, "unknown".getBytes()), new IsNull<>());
        assertThat(db.get(withTimestampColumnFamily, "key1".getBytes()).length, is(8 + 1));
        assertThat(db.get(withTimestampColumnFamily, "key2".getBytes()).length, is(12));
        assertThat(db.get(withTimestampColumnFamily, "key3".getBytes()), new IsNull<>());
        assertThat(db.get(withTimestampColumnFamily, "key4".getBytes()).length, is(8 + 4));
        assertThat(db.get(withTimestampColumnFamily, "key5".getBytes()).length, is(8 + 5));
        assertThat(db.get(withTimestampColumnFamily, "key6".getBytes()), new IsNull<>());
        assertThat(db.get(withTimestampColumnFamily, "key7".getBytes()), new IsNull<>());
        assertThat(db.get(withTimestampColumnFamily, "key8".getBytes()).length, is(18));
        assertThat(db.get(withTimestampColumnFamily, "key11".getBytes()).length, is(21));
        assertThat(db.get(withTimestampColumnFamily, "key12".getBytes()), new IsNull<>());
    } catch (final RuntimeException fatal) {
        errorOccurred = true;
    } finally {
        // Order of closing must follow: ColumnFamilyHandle > RocksDB > DBOptions > ColumnFamilyOptions
        if (noTimestampColumnFamily != null) {
            noTimestampColumnFamily.close();
        }
        if (withTimestampColumnFamily != null) {
            withTimestampColumnFamily.close();
        }
        if (db != null) {
            db.close();
        }
        if (errorOccurred) {
            dbOptions.close();
            columnFamilyOptions.close();
        }
    }
    // check that still in upgrade mode
    try (LogCaptureAppender appender = LogCaptureAppender.createAndRegister(RocksDBTimestampedStore.class)) {
        rocksDBStore.init((StateStoreContext) context, rocksDBStore);
        assertThat(appender.getMessages(), hasItem("Opening store " + DB_NAME + " in upgrade mode"));
    } finally {
        rocksDBStore.close();
    }
    // clear old CF
    columnFamilies.clear();
    db = null;
    noTimestampColumnFamily = null;
    try {
        db = RocksDB.open(dbOptions, new File(new File(context.stateDir(), "rocksdb"), DB_NAME).getAbsolutePath(), columnFamilyDescriptors, columnFamilies);
        noTimestampColumnFamily = columnFamilies.get(0);
        db.delete(noTimestampColumnFamily, "key7".getBytes());
    } finally {
        // Order of closing must follow: ColumnFamilyHandle > RocksDB > DBOptions > ColumnFamilyOptions
        if (noTimestampColumnFamily != null) {
            noTimestampColumnFamily.close();
        }
        if (db != null) {
            db.close();
        }
        dbOptions.close();
        columnFamilyOptions.close();
    }
    // check that still in regular mode
    try (LogCaptureAppender appender = LogCaptureAppender.createAndRegister(RocksDBTimestampedStore.class)) {
        rocksDBStore.init((StateStoreContext) context, rocksDBStore);
        assertThat(appender.getMessages(), hasItem("Opening store " + DB_NAME + " in regular mode"));
    }
}
Also used : RocksDB(org.rocksdb.RocksDB) ArrayList(java.util.ArrayList) ColumnFamilyDescriptor(org.rocksdb.ColumnFamilyDescriptor) ColumnFamilyHandle(org.rocksdb.ColumnFamilyHandle) ColumnFamilyOptions(org.rocksdb.ColumnFamilyOptions) LogCaptureAppender(org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender) DBOptions(org.rocksdb.DBOptions) File(java.io.File)

Aggregations

ColumnFamilyHandle (org.rocksdb.ColumnFamilyHandle)55 ColumnFamilyDescriptor (org.rocksdb.ColumnFamilyDescriptor)27 ArrayList (java.util.ArrayList)21 RocksDBException (org.rocksdb.RocksDBException)19 RocksDB (org.rocksdb.RocksDB)16 Test (org.junit.Test)10 DBOptions (org.rocksdb.DBOptions)9 ColumnFamilyOptions (org.rocksdb.ColumnFamilyOptions)8 File (java.io.File)7 HashMap (java.util.HashMap)6 Map (java.util.Map)6 WriteOptions (org.rocksdb.WriteOptions)6 IOException (java.io.IOException)5 ValueStateDescriptor (org.apache.flink.api.common.state.ValueStateDescriptor)5 ReadOptions (org.rocksdb.ReadOptions)5 List (java.util.List)4 Tuple2 (org.apache.flink.api.java.tuple.Tuple2)4 KeyGroup (org.apache.flink.runtime.state.restore.KeyGroup)4 LinkedHashMap (java.util.LinkedHashMap)3 SortedMap (java.util.SortedMap)3