Search in sources :

Example 1 with RocksStatesPerKeyGroupMergeIterator

use of org.apache.flink.contrib.streaming.state.iterator.RocksStatesPerKeyGroupMergeIterator in project flink by apache.

the class RocksDBFullSnapshotResources method createKVStateIterator.

@Override
public KeyValueStateIterator createKVStateIterator() throws IOException {
    CloseableRegistry closeableRegistry = new CloseableRegistry();
    try {
        ReadOptions readOptions = new ReadOptions();
        closeableRegistry.registerCloseable(readOptions::close);
        readOptions.setSnapshot(snapshot);
        List<Tuple2<RocksIteratorWrapper, Integer>> kvStateIterators = createKVStateIterators(closeableRegistry, readOptions);
        List<SingleStateIterator> heapPriorityQueueIterators = createHeapPriorityQueueIterators();
        // RocksStatesPerKeyGroupMergeIterator
        return new RocksStatesPerKeyGroupMergeIterator(closeableRegistry, kvStateIterators, heapPriorityQueueIterators, keyGroupPrefixBytes);
    } catch (Throwable t) {
        // If anything goes wrong, clean up our stuff. If things went smoothly the
        // merging iterator is now responsible for closing the resources
        IOUtils.closeQuietly(closeableRegistry);
        throw new IOException("Error creating merge iterator", t);
    }
}
Also used : ReadOptions(org.rocksdb.ReadOptions) RocksStatesPerKeyGroupMergeIterator(org.apache.flink.contrib.streaming.state.iterator.RocksStatesPerKeyGroupMergeIterator) Tuple2(org.apache.flink.api.java.tuple.Tuple2) SingleStateIterator(org.apache.flink.contrib.streaming.state.iterator.SingleStateIterator) IOException(java.io.IOException) CloseableRegistry(org.apache.flink.core.fs.CloseableRegistry)

Example 2 with RocksStatesPerKeyGroupMergeIterator

use of org.apache.flink.contrib.streaming.state.iterator.RocksStatesPerKeyGroupMergeIterator in project flink by apache.

the class RocksKeyGroupsRocksSingleStateIteratorTest method testMergeIterator.

public void testMergeIterator(int maxParallelism) throws Exception {
    Random random = new Random(1234);
    try (ReadOptions readOptions = new ReadOptions();
        RocksDB rocksDB = RocksDB.open(tempFolder.getRoot().getAbsolutePath())) {
        List<Tuple2<RocksIteratorWrapper, Integer>> rocksIteratorsWithKVStateId = new ArrayList<>();
        List<Tuple2<ColumnFamilyHandle, Integer>> columnFamilyHandlesWithKeyCount = new ArrayList<>();
        int totalKeysExpected = 0;
        for (int c = 0; c < NUM_KEY_VAL_STATES; ++c) {
            ColumnFamilyHandle handle = rocksDB.createColumnFamily(new ColumnFamilyDescriptor(("column-" + c).getBytes(ConfigConstants.DEFAULT_CHARSET)));
            ByteArrayOutputStreamWithPos bos = new ByteArrayOutputStreamWithPos();
            DataOutputStream dos = new DataOutputStream(bos);
            int numKeys = random.nextInt(MAX_NUM_KEYS + 1);
            for (int i = 0; i < numKeys; ++i) {
                if (maxParallelism <= Byte.MAX_VALUE) {
                    dos.writeByte(i);
                } else {
                    dos.writeShort(i);
                }
                dos.writeInt(i);
                byte[] key = bos.toByteArray();
                byte[] val = new byte[] { 42 };
                rocksDB.put(handle, key, val);
                bos.reset();
            }
            columnFamilyHandlesWithKeyCount.add(new Tuple2<>(handle, numKeys));
            totalKeysExpected += numKeys;
        }
        CloseableRegistry closeableRegistry = new CloseableRegistry();
        int id = 0;
        for (Tuple2<ColumnFamilyHandle, Integer> columnFamilyHandle : columnFamilyHandlesWithKeyCount) {
            RocksIteratorWrapper rocksIterator = RocksDBOperationUtils.getRocksIterator(rocksDB, columnFamilyHandle.f0, readOptions);
            closeableRegistry.registerCloseable(rocksIterator);
            rocksIteratorsWithKVStateId.add(new Tuple2<>(rocksIterator, id));
            ++id;
        }
        try (RocksStatesPerKeyGroupMergeIterator mergeIterator = new RocksStatesPerKeyGroupMergeIterator(closeableRegistry, rocksIteratorsWithKVStateId, Collections.emptyList(), maxParallelism <= Byte.MAX_VALUE ? 1 : 2)) {
            int prevKVState = -1;
            int prevKey = -1;
            int prevKeyGroup = -1;
            int totalKeysActual = 0;
            while (mergeIterator.isValid()) {
                ByteBuffer bb = ByteBuffer.wrap(mergeIterator.key());
                int keyGroup = maxParallelism > Byte.MAX_VALUE ? bb.getShort() : bb.get();
                int key = bb.getInt();
                Assert.assertTrue(keyGroup >= prevKeyGroup);
                Assert.assertTrue(key >= prevKey);
                Assert.assertEquals(prevKeyGroup != keyGroup, mergeIterator.isNewKeyGroup());
                Assert.assertEquals(prevKVState != mergeIterator.kvStateId(), mergeIterator.isNewKeyValueState());
                prevKeyGroup = keyGroup;
                prevKVState = mergeIterator.kvStateId();
                mergeIterator.next();
                ++totalKeysActual;
            }
            Assert.assertEquals(totalKeysExpected, totalKeysActual);
        }
        IOUtils.closeQuietly(rocksDB.getDefaultColumnFamily());
        for (Tuple2<ColumnFamilyHandle, Integer> handleWithCount : columnFamilyHandlesWithKeyCount) {
            IOUtils.closeQuietly(handleWithCount.f0);
        }
    }
}
Also used : RocksDB(org.rocksdb.RocksDB) RocksStatesPerKeyGroupMergeIterator(org.apache.flink.contrib.streaming.state.iterator.RocksStatesPerKeyGroupMergeIterator) DataOutputStream(java.io.DataOutputStream) ArrayList(java.util.ArrayList) ColumnFamilyDescriptor(org.rocksdb.ColumnFamilyDescriptor) CloseableRegistry(org.apache.flink.core.fs.CloseableRegistry) ByteBuffer(java.nio.ByteBuffer) ColumnFamilyHandle(org.rocksdb.ColumnFamilyHandle) Random(java.util.Random) ReadOptions(org.rocksdb.ReadOptions) Tuple2(org.apache.flink.api.java.tuple.Tuple2) ByteArrayOutputStreamWithPos(org.apache.flink.core.memory.ByteArrayOutputStreamWithPos)

Example 3 with RocksStatesPerKeyGroupMergeIterator

use of org.apache.flink.contrib.streaming.state.iterator.RocksStatesPerKeyGroupMergeIterator in project flink by apache.

the class RocksKeyGroupsRocksSingleStateIteratorTest method testEmptyMergeIterator.

@Test
public void testEmptyMergeIterator() throws Exception {
    RocksStatesPerKeyGroupMergeIterator emptyIterator = new RocksStatesPerKeyGroupMergeIterator(new CloseableRegistry(), Collections.emptyList(), Collections.emptyList(), 2);
    Assert.assertFalse(emptyIterator.isValid());
}
Also used : RocksStatesPerKeyGroupMergeIterator(org.apache.flink.contrib.streaming.state.iterator.RocksStatesPerKeyGroupMergeIterator) CloseableRegistry(org.apache.flink.core.fs.CloseableRegistry) Test(org.junit.Test)

Aggregations

RocksStatesPerKeyGroupMergeIterator (org.apache.flink.contrib.streaming.state.iterator.RocksStatesPerKeyGroupMergeIterator)3 CloseableRegistry (org.apache.flink.core.fs.CloseableRegistry)3 Tuple2 (org.apache.flink.api.java.tuple.Tuple2)2 ReadOptions (org.rocksdb.ReadOptions)2 DataOutputStream (java.io.DataOutputStream)1 IOException (java.io.IOException)1 ByteBuffer (java.nio.ByteBuffer)1 ArrayList (java.util.ArrayList)1 Random (java.util.Random)1 SingleStateIterator (org.apache.flink.contrib.streaming.state.iterator.SingleStateIterator)1 ByteArrayOutputStreamWithPos (org.apache.flink.core.memory.ByteArrayOutputStreamWithPos)1 Test (org.junit.Test)1 ColumnFamilyDescriptor (org.rocksdb.ColumnFamilyDescriptor)1 ColumnFamilyHandle (org.rocksdb.ColumnFamilyHandle)1 RocksDB (org.rocksdb.RocksDB)1