Search in sources :

Example 1 with ColumnFamilyHandle

use of org.rocksdb.ColumnFamilyHandle in project jstorm by alibaba.

the class RocksTTLDBCache method getHandler.

protected Entry<Integer, ColumnFamilyHandle> getHandler(int timeoutSecond) {
    ColumnFamilyHandle cfHandler = null;
    Entry<Integer, ColumnFamilyHandle> ceilingEntry = windowHandlers.ceilingEntry(timeoutSecond);
    if (ceilingEntry != null) {
        return ceilingEntry;
    } else {
        return windowHandlers.firstEntry();
    }
}
Also used : ColumnFamilyHandle(org.rocksdb.ColumnFamilyHandle)

Example 2 with ColumnFamilyHandle

use of org.rocksdb.ColumnFamilyHandle in project flink by apache.

the class RocksDBKeyedStateBackend method getColumnFamily.

// ------------------------------------------------------------------------
//  State factories
// ------------------------------------------------------------------------
/**
	 * Creates a column family handle for use with a k/v state. When restoring from a snapshot
	 * we don't restore the individual k/v states, just the global RocksDB data base and the
	 * list of column families. When a k/v state is first requested we check here whether we
	 * already have a column family for that and return it or create a new one if it doesn't exist.
	 *
	 * <p>This also checks whether the {@link StateDescriptor} for a state matches the one
	 * that we checkpointed, i.e. is already in the map of column families.
	 */
@SuppressWarnings("rawtypes, unchecked")
protected <N, S> ColumnFamilyHandle getColumnFamily(StateDescriptor<?, S> descriptor, TypeSerializer<N> namespaceSerializer) throws IOException {
    Tuple2<ColumnFamilyHandle, RegisteredBackendStateMetaInfo<?, ?>> stateInfo = kvStateInformation.get(descriptor.getName());
    RegisteredBackendStateMetaInfo<N, S> newMetaInfo = new RegisteredBackendStateMetaInfo<>(descriptor.getType(), descriptor.getName(), namespaceSerializer, descriptor.getSerializer());
    if (stateInfo != null) {
        if (newMetaInfo.isCompatibleWith(stateInfo.f1)) {
            stateInfo.f1 = newMetaInfo;
            return stateInfo.f0;
        } else {
            throw new IOException("Trying to access state using wrong meta info, was " + stateInfo.f1 + " trying access with " + newMetaInfo);
        }
    }
    ColumnFamilyDescriptor columnDescriptor = new ColumnFamilyDescriptor(descriptor.getName().getBytes(ConfigConstants.DEFAULT_CHARSET), columnOptions);
    try {
        ColumnFamilyHandle columnFamily = db.createColumnFamily(columnDescriptor);
        Tuple2<ColumnFamilyHandle, RegisteredBackendStateMetaInfo<N, S>> tuple = new Tuple2<>(columnFamily, newMetaInfo);
        Map rawAccess = kvStateInformation;
        rawAccess.put(descriptor.getName(), tuple);
        return columnFamily;
    } catch (RocksDBException e) {
        throw new IOException("Error creating ColumnFamilyHandle.", e);
    }
}
Also used : RocksDBException(org.rocksdb.RocksDBException) Tuple2(org.apache.flink.api.java.tuple.Tuple2) RegisteredBackendStateMetaInfo(org.apache.flink.runtime.state.RegisteredBackendStateMetaInfo) IOException(java.io.IOException) ColumnFamilyDescriptor(org.rocksdb.ColumnFamilyDescriptor) Map(java.util.Map) HashMap(java.util.HashMap) ColumnFamilyHandle(org.rocksdb.ColumnFamilyHandle)

Example 3 with ColumnFamilyHandle

use of org.rocksdb.ColumnFamilyHandle in project flink by apache.

the class RocksDBKeyedStateBackend method restoreOldSavepointKeyedState.

/**
	 * For backwards compatibility, remove again later!
	 */
@Deprecated
private void restoreOldSavepointKeyedState(Collection<KeyGroupsStateHandle> restoreState) throws Exception {
    if (restoreState.isEmpty()) {
        return;
    }
    Preconditions.checkState(1 == restoreState.size(), "Only one element expected here.");
    HashMap<String, RocksDBStateBackend.FinalFullyAsyncSnapshot> namedStates;
    try (FSDataInputStream inputStream = restoreState.iterator().next().openInputStream()) {
        namedStates = InstantiationUtil.deserializeObject(inputStream, userCodeClassLoader);
    }
    Preconditions.checkState(1 == namedStates.size(), "Only one element expected here.");
    DataInputView inputView = namedStates.values().iterator().next().stateHandle.getState(userCodeClassLoader);
    // clear k/v state information before filling it
    kvStateInformation.clear();
    // first get the column family mapping
    int numColumns = inputView.readInt();
    Map<Byte, StateDescriptor<?, ?>> columnFamilyMapping = new HashMap<>(numColumns);
    for (int i = 0; i < numColumns; i++) {
        byte mappingByte = inputView.readByte();
        ObjectInputStream ooIn = new InstantiationUtil.ClassLoaderObjectInputStream(new DataInputViewStream(inputView), userCodeClassLoader);
        StateDescriptor stateDescriptor = (StateDescriptor) ooIn.readObject();
        columnFamilyMapping.put(mappingByte, stateDescriptor);
        // this will fill in the k/v state information
        getColumnFamily(stateDescriptor, MigrationNamespaceSerializerProxy.INSTANCE);
    }
    // try and read until EOF
    try {
        // the EOFException will get us out of this...
        while (true) {
            byte mappingByte = inputView.readByte();
            ColumnFamilyHandle handle = getColumnFamily(columnFamilyMapping.get(mappingByte), MigrationNamespaceSerializerProxy.INSTANCE);
            byte[] keyAndNamespace = BytePrimitiveArraySerializer.INSTANCE.deserialize(inputView);
            ByteArrayInputStreamWithPos bis = new ByteArrayInputStreamWithPos(keyAndNamespace);
            K reconstructedKey = keySerializer.deserialize(new DataInputViewStreamWrapper(bis));
            int len = bis.getPosition();
            int keyGroup = (byte) KeyGroupRangeAssignment.assignToKeyGroup(reconstructedKey, numberOfKeyGroups);
            if (keyGroupPrefixBytes == 1) {
                // copy and override one byte (42) between key and namespace
                System.arraycopy(keyAndNamespace, 0, keyAndNamespace, 1, len);
                keyAndNamespace[0] = (byte) keyGroup;
            } else {
                byte[] largerKey = new byte[1 + keyAndNamespace.length];
                // write key-group
                largerKey[0] = (byte) ((keyGroup >> 8) & 0xFF);
                largerKey[1] = (byte) (keyGroup & 0xFF);
                // write key
                System.arraycopy(keyAndNamespace, 0, largerKey, 2, len);
                //skip one byte (42), write namespace
                System.arraycopy(keyAndNamespace, 1 + len, largerKey, 2 + len, keyAndNamespace.length - len - 1);
                keyAndNamespace = largerKey;
            }
            byte[] value = BytePrimitiveArraySerializer.INSTANCE.deserialize(inputView);
            db.put(handle, keyAndNamespace, value);
        }
    } catch (EOFException e) {
    // expected
    }
}
Also used : HashMap(java.util.HashMap) DataInputView(org.apache.flink.core.memory.DataInputView) DataInputViewStreamWrapper(org.apache.flink.core.memory.DataInputViewStreamWrapper) ColumnFamilyHandle(org.rocksdb.ColumnFamilyHandle) ByteArrayInputStreamWithPos(org.apache.flink.core.memory.ByteArrayInputStreamWithPos) ReducingStateDescriptor(org.apache.flink.api.common.state.ReducingStateDescriptor) ListStateDescriptor(org.apache.flink.api.common.state.ListStateDescriptor) MapStateDescriptor(org.apache.flink.api.common.state.MapStateDescriptor) AggregatingStateDescriptor(org.apache.flink.api.common.state.AggregatingStateDescriptor) StateDescriptor(org.apache.flink.api.common.state.StateDescriptor) ValueStateDescriptor(org.apache.flink.api.common.state.ValueStateDescriptor) FoldingStateDescriptor(org.apache.flink.api.common.state.FoldingStateDescriptor) EOFException(java.io.EOFException) FSDataInputStream(org.apache.flink.core.fs.FSDataInputStream) DataInputViewStream(org.apache.flink.api.java.typeutils.runtime.DataInputViewStream) ObjectInputStream(java.io.ObjectInputStream)

Example 4 with ColumnFamilyHandle

use of org.rocksdb.ColumnFamilyHandle in project flink by apache.

the class RocksDBMergeIteratorTest method testMergeIterator.

public void testMergeIterator(int maxParallelism) throws Exception {
    Random random = new Random(1234);
    File tmpDir = CommonTestUtils.createTempDirectory();
    RocksDB rocksDB = RocksDB.open(tmpDir.getAbsolutePath());
    try {
        List<Tuple2<RocksIterator, Integer>> rocksIteratorsWithKVStateId = new ArrayList<>();
        List<Tuple2<ColumnFamilyHandle, Integer>> columnFamilyHandlesWithKeyCount = new ArrayList<>();
        int totalKeysExpected = 0;
        for (int c = 0; c < NUM_KEY_VAL_STATES; ++c) {
            ColumnFamilyHandle handle = rocksDB.createColumnFamily(new ColumnFamilyDescriptor(("column-" + c).getBytes(ConfigConstants.DEFAULT_CHARSET)));
            ByteArrayOutputStreamWithPos bos = new ByteArrayOutputStreamWithPos();
            DataOutputStream dos = new DataOutputStream(bos);
            int numKeys = random.nextInt(MAX_NUM_KEYS + 1);
            for (int i = 0; i < numKeys; ++i) {
                if (maxParallelism <= Byte.MAX_VALUE) {
                    dos.writeByte(i);
                } else {
                    dos.writeShort(i);
                }
                dos.writeInt(i);
                byte[] key = bos.toByteArray();
                byte[] val = new byte[] { 42 };
                rocksDB.put(handle, key, val);
                bos.reset();
            }
            columnFamilyHandlesWithKeyCount.add(new Tuple2<>(handle, numKeys));
            totalKeysExpected += numKeys;
        }
        int id = 0;
        for (Tuple2<ColumnFamilyHandle, Integer> columnFamilyHandle : columnFamilyHandlesWithKeyCount) {
            rocksIteratorsWithKVStateId.add(new Tuple2<>(rocksDB.newIterator(columnFamilyHandle.f0), id));
            ++id;
        }
        RocksDBKeyedStateBackend.RocksDBMergeIterator mergeIterator = new RocksDBKeyedStateBackend.RocksDBMergeIterator(rocksIteratorsWithKVStateId, maxParallelism <= Byte.MAX_VALUE ? 1 : 2);
        int prevKVState = -1;
        int prevKey = -1;
        int prevKeyGroup = -1;
        int totalKeysActual = 0;
        while (mergeIterator.isValid()) {
            ByteBuffer bb = ByteBuffer.wrap(mergeIterator.key());
            int keyGroup = maxParallelism > Byte.MAX_VALUE ? bb.getShort() : bb.get();
            int key = bb.getInt();
            Assert.assertTrue(keyGroup >= prevKeyGroup);
            Assert.assertTrue(key >= prevKey);
            Assert.assertEquals(prevKeyGroup != keyGroup, mergeIterator.isNewKeyGroup());
            Assert.assertEquals(prevKVState != mergeIterator.kvStateId(), mergeIterator.isNewKeyValueState());
            prevKeyGroup = keyGroup;
            prevKVState = mergeIterator.kvStateId();
            //System.out.println(keyGroup + " " + key + " " + mergeIterator.kvStateId());
            mergeIterator.next();
            ++totalKeysActual;
        }
        Assert.assertEquals(totalKeysExpected, totalKeysActual);
        for (Tuple2<ColumnFamilyHandle, Integer> handleWithCount : columnFamilyHandlesWithKeyCount) {
            rocksDB.dropColumnFamily(handleWithCount.f0);
        }
    } finally {
        rocksDB.close();
    }
}
Also used : RocksDB(org.rocksdb.RocksDB) DataOutputStream(java.io.DataOutputStream) ArrayList(java.util.ArrayList) ColumnFamilyDescriptor(org.rocksdb.ColumnFamilyDescriptor) ByteBuffer(java.nio.ByteBuffer) ColumnFamilyHandle(org.rocksdb.ColumnFamilyHandle) Random(java.util.Random) Tuple2(org.apache.flink.api.java.tuple.Tuple2) File(java.io.File) ByteArrayOutputStreamWithPos(org.apache.flink.core.memory.ByteArrayOutputStreamWithPos)

Example 5 with ColumnFamilyHandle

use of org.rocksdb.ColumnFamilyHandle in project flink by apache.

the class RocksDBStateBackendTest method setupRocksKeyedStateBackend.

public void setupRocksKeyedStateBackend() throws Exception {
    blocker = new OneShotLatch();
    waiter = new OneShotLatch();
    testStreamFactory = new BlockerCheckpointStreamFactory(1024 * 1024);
    testStreamFactory.setBlockerLatch(blocker);
    testStreamFactory.setWaiterLatch(waiter);
    testStreamFactory.setAfterNumberInvocations(100);
    RocksDBStateBackend backend = getStateBackend();
    Environment env = new DummyEnvironment("TestTask", 1, 0);
    keyedStateBackend = (RocksDBKeyedStateBackend<Integer>) backend.createKeyedStateBackend(env, new JobID(), "Test", IntSerializer.INSTANCE, 2, new KeyGroupRange(0, 1), mock(TaskKvStateRegistry.class));
    testState1 = keyedStateBackend.getPartitionedState(VoidNamespace.INSTANCE, VoidNamespaceSerializer.INSTANCE, new ValueStateDescriptor<>("TestState-1", Integer.class, 0));
    testState2 = keyedStateBackend.getPartitionedState(VoidNamespace.INSTANCE, VoidNamespaceSerializer.INSTANCE, new ValueStateDescriptor<>("TestState-2", String.class, ""));
    allCreatedCloseables = new ArrayList<>();
    keyedStateBackend.db = spy(keyedStateBackend.db);
    doAnswer(new Answer<Object>() {

        @Override
        public Object answer(InvocationOnMock invocationOnMock) throws Throwable {
            RocksIterator rocksIterator = spy((RocksIterator) invocationOnMock.callRealMethod());
            allCreatedCloseables.add(rocksIterator);
            return rocksIterator;
        }
    }).when(keyedStateBackend.db).newIterator(any(ColumnFamilyHandle.class), any(ReadOptions.class));
    doAnswer(new Answer<Object>() {

        @Override
        public Object answer(InvocationOnMock invocationOnMock) throws Throwable {
            Snapshot snapshot = spy((Snapshot) invocationOnMock.callRealMethod());
            allCreatedCloseables.add(snapshot);
            return snapshot;
        }
    }).when(keyedStateBackend.db).getSnapshot();
    doAnswer(new Answer<Object>() {

        @Override
        public Object answer(InvocationOnMock invocationOnMock) throws Throwable {
            ColumnFamilyHandle snapshot = spy((ColumnFamilyHandle) invocationOnMock.callRealMethod());
            allCreatedCloseables.add(snapshot);
            return snapshot;
        }
    }).when(keyedStateBackend.db).createColumnFamily(any(ColumnFamilyDescriptor.class));
    for (int i = 0; i < 100; ++i) {
        keyedStateBackend.setCurrentKey(i);
        testState1.update(4200 + i);
        testState2.update("S-" + (4200 + i));
    }
}
Also used : KeyGroupRange(org.apache.flink.runtime.state.KeyGroupRange) TaskKvStateRegistry(org.apache.flink.runtime.query.TaskKvStateRegistry) DummyEnvironment(org.apache.flink.runtime.operators.testutils.DummyEnvironment) RocksIterator(org.rocksdb.RocksIterator) ColumnFamilyDescriptor(org.rocksdb.ColumnFamilyDescriptor) ColumnFamilyHandle(org.rocksdb.ColumnFamilyHandle) ValueStateDescriptor(org.apache.flink.api.common.state.ValueStateDescriptor) Snapshot(org.rocksdb.Snapshot) ReadOptions(org.rocksdb.ReadOptions) InvocationOnMock(org.mockito.invocation.InvocationOnMock) OneShotLatch(org.apache.flink.core.testutils.OneShotLatch) BlockerCheckpointStreamFactory(org.apache.flink.runtime.util.BlockerCheckpointStreamFactory) DummyEnvironment(org.apache.flink.runtime.operators.testutils.DummyEnvironment) Environment(org.apache.flink.runtime.execution.Environment) RocksObject(org.rocksdb.RocksObject) JobID(org.apache.flink.api.common.JobID)

Aggregations

ColumnFamilyHandle (org.rocksdb.ColumnFamilyHandle)55 ColumnFamilyDescriptor (org.rocksdb.ColumnFamilyDescriptor)27 ArrayList (java.util.ArrayList)21 RocksDBException (org.rocksdb.RocksDBException)19 RocksDB (org.rocksdb.RocksDB)16 Test (org.junit.Test)10 DBOptions (org.rocksdb.DBOptions)9 ColumnFamilyOptions (org.rocksdb.ColumnFamilyOptions)8 File (java.io.File)7 HashMap (java.util.HashMap)6 Map (java.util.Map)6 WriteOptions (org.rocksdb.WriteOptions)6 IOException (java.io.IOException)5 ValueStateDescriptor (org.apache.flink.api.common.state.ValueStateDescriptor)5 ReadOptions (org.rocksdb.ReadOptions)5 List (java.util.List)4 Tuple2 (org.apache.flink.api.java.tuple.Tuple2)4 KeyGroup (org.apache.flink.runtime.state.restore.KeyGroup)4 LinkedHashMap (java.util.LinkedHashMap)3 SortedMap (java.util.SortedMap)3