use of org.apache.flink.contrib.streaming.state.iterator.RocksStateKeysIterator in project flink by apache.
the class RocksDBRocksStateKeysIteratorTest method testIteratorHelper.
<K> void testIteratorHelper(TypeSerializer<K> keySerializer, int maxKeyGroupNumber, Function<Integer, K> getKeyFunc) throws Exception {
String testStateName = "aha";
String namespace = "ns";
try (RocksDBKeyedStateBackendTestFactory factory = new RocksDBKeyedStateBackendTestFactory()) {
RocksDBKeyedStateBackend<K> keyedStateBackend = factory.create(tmp, keySerializer, maxKeyGroupNumber);
ValueState<String> testState = keyedStateBackend.getPartitionedState(namespace, StringSerializer.INSTANCE, new ValueStateDescriptor<>(testStateName, String.class));
// insert record
for (int i = 0; i < 1000; ++i) {
keyedStateBackend.setCurrentKey(getKeyFunc.apply(i));
testState.update(String.valueOf(i));
}
DataOutputSerializer outputStream = new DataOutputSerializer(8);
boolean ambiguousKeyPossible = CompositeKeySerializationUtils.isAmbiguousKeyPossible(keySerializer, StringSerializer.INSTANCE);
CompositeKeySerializationUtils.writeNameSpace(namespace, StringSerializer.INSTANCE, outputStream, ambiguousKeyPossible);
byte[] nameSpaceBytes = outputStream.getCopyOfBuffer();
// already created with the state, should be closed with the backend
ColumnFamilyHandle handle = keyedStateBackend.getColumnFamilyHandle(testStateName);
try (RocksIteratorWrapper iterator = RocksDBOperationUtils.getRocksIterator(keyedStateBackend.db, handle, keyedStateBackend.getReadOptions());
RocksStateKeysIterator<K> iteratorWrapper = new RocksStateKeysIterator<>(iterator, testStateName, keySerializer, keyedStateBackend.getKeyGroupPrefixBytes(), ambiguousKeyPossible, nameSpaceBytes)) {
iterator.seekToFirst();
// valid record
List<Integer> fetchedKeys = new ArrayList<>(1000);
while (iteratorWrapper.hasNext()) {
fetchedKeys.add(Integer.parseInt(iteratorWrapper.next().toString()));
}
fetchedKeys.sort(Comparator.comparingInt(a -> a));
Assert.assertEquals(1000, fetchedKeys.size());
for (int i = 0; i < 1000; ++i) {
Assert.assertEquals(i, fetchedKeys.get(i).intValue());
}
}
}
}
use of org.apache.flink.contrib.streaming.state.iterator.RocksStateKeysIterator in project flink by apache.
the class RocksDBKeyedStateBackend method getKeys.
@SuppressWarnings("unchecked")
@Override
public <N> Stream<K> getKeys(String state, N namespace) {
RocksDbKvStateInfo columnInfo = kvStateInformation.get(state);
if (columnInfo == null || !(columnInfo.metaInfo instanceof RegisteredKeyValueStateBackendMetaInfo)) {
return Stream.empty();
}
RegisteredKeyValueStateBackendMetaInfo<N, ?> registeredKeyValueStateBackendMetaInfo = (RegisteredKeyValueStateBackendMetaInfo<N, ?>) columnInfo.metaInfo;
final TypeSerializer<N> namespaceSerializer = registeredKeyValueStateBackendMetaInfo.getNamespaceSerializer();
final DataOutputSerializer namespaceOutputView = new DataOutputSerializer(8);
boolean ambiguousKeyPossible = CompositeKeySerializationUtils.isAmbiguousKeyPossible(getKeySerializer(), namespaceSerializer);
final byte[] nameSpaceBytes;
try {
CompositeKeySerializationUtils.writeNameSpace(namespace, namespaceSerializer, namespaceOutputView, ambiguousKeyPossible);
nameSpaceBytes = namespaceOutputView.getCopyOfBuffer();
} catch (IOException ex) {
throw new FlinkRuntimeException("Failed to get keys from RocksDB state backend.", ex);
}
RocksIteratorWrapper iterator = RocksDBOperationUtils.getRocksIterator(db, columnInfo.columnFamilyHandle, readOptions);
iterator.seekToFirst();
final RocksStateKeysIterator<K> iteratorWrapper = new RocksStateKeysIterator<>(iterator, state, getKeySerializer(), keyGroupPrefixBytes, ambiguousKeyPossible, nameSpaceBytes);
Stream<K> targetStream = StreamSupport.stream(Spliterators.spliteratorUnknownSize(iteratorWrapper, Spliterator.ORDERED), false);
return targetStream.onClose(iteratorWrapper::close);
}
Aggregations