use of org.rocksdb.ColumnFamilyHandle in project flink by apache.
the class RocksDBKeyedStateBackend method restoreOldSavepointKeyedState.
/**
* For backwards compatibility, remove again later!
*/
@Deprecated
private void restoreOldSavepointKeyedState(Collection<KeyGroupsStateHandle> restoreState) throws Exception {
if (restoreState.isEmpty()) {
return;
}
Preconditions.checkState(1 == restoreState.size(), "Only one element expected here.");
HashMap<String, RocksDBStateBackend.FinalFullyAsyncSnapshot> namedStates;
try (FSDataInputStream inputStream = restoreState.iterator().next().openInputStream()) {
namedStates = InstantiationUtil.deserializeObject(inputStream, userCodeClassLoader);
}
Preconditions.checkState(1 == namedStates.size(), "Only one element expected here.");
DataInputView inputView = namedStates.values().iterator().next().stateHandle.getState(userCodeClassLoader);
// clear k/v state information before filling it
kvStateInformation.clear();
// first get the column family mapping
int numColumns = inputView.readInt();
Map<Byte, StateDescriptor<?, ?>> columnFamilyMapping = new HashMap<>(numColumns);
for (int i = 0; i < numColumns; i++) {
byte mappingByte = inputView.readByte();
ObjectInputStream ooIn = new InstantiationUtil.ClassLoaderObjectInputStream(new DataInputViewStream(inputView), userCodeClassLoader);
StateDescriptor stateDescriptor = (StateDescriptor) ooIn.readObject();
columnFamilyMapping.put(mappingByte, stateDescriptor);
// this will fill in the k/v state information
getColumnFamily(stateDescriptor, MigrationNamespaceSerializerProxy.INSTANCE);
}
// try and read until EOF
try {
// the EOFException will get us out of this...
while (true) {
byte mappingByte = inputView.readByte();
ColumnFamilyHandle handle = getColumnFamily(columnFamilyMapping.get(mappingByte), MigrationNamespaceSerializerProxy.INSTANCE);
byte[] keyAndNamespace = BytePrimitiveArraySerializer.INSTANCE.deserialize(inputView);
ByteArrayInputStreamWithPos bis = new ByteArrayInputStreamWithPos(keyAndNamespace);
K reconstructedKey = keySerializer.deserialize(new DataInputViewStreamWrapper(bis));
int len = bis.getPosition();
int keyGroup = (byte) KeyGroupRangeAssignment.assignToKeyGroup(reconstructedKey, numberOfKeyGroups);
if (keyGroupPrefixBytes == 1) {
// copy and override one byte (42) between key and namespace
System.arraycopy(keyAndNamespace, 0, keyAndNamespace, 1, len);
keyAndNamespace[0] = (byte) keyGroup;
} else {
byte[] largerKey = new byte[1 + keyAndNamespace.length];
// write key-group
largerKey[0] = (byte) ((keyGroup >> 8) & 0xFF);
largerKey[1] = (byte) (keyGroup & 0xFF);
// write key
System.arraycopy(keyAndNamespace, 0, largerKey, 2, len);
//skip one byte (42), write namespace
System.arraycopy(keyAndNamespace, 1 + len, largerKey, 2 + len, keyAndNamespace.length - len - 1);
keyAndNamespace = largerKey;
}
byte[] value = BytePrimitiveArraySerializer.INSTANCE.deserialize(inputView);
db.put(handle, keyAndNamespace, value);
}
} catch (EOFException e) {
// expected
}
}
use of org.rocksdb.ColumnFamilyHandle in project flink by apache.
the class RocksDBMergeIteratorTest method testMergeIterator.
public void testMergeIterator(int maxParallelism) throws Exception {
Random random = new Random(1234);
File tmpDir = CommonTestUtils.createTempDirectory();
RocksDB rocksDB = RocksDB.open(tmpDir.getAbsolutePath());
try {
List<Tuple2<RocksIterator, Integer>> rocksIteratorsWithKVStateId = new ArrayList<>();
List<Tuple2<ColumnFamilyHandle, Integer>> columnFamilyHandlesWithKeyCount = new ArrayList<>();
int totalKeysExpected = 0;
for (int c = 0; c < NUM_KEY_VAL_STATES; ++c) {
ColumnFamilyHandle handle = rocksDB.createColumnFamily(new ColumnFamilyDescriptor(("column-" + c).getBytes(ConfigConstants.DEFAULT_CHARSET)));
ByteArrayOutputStreamWithPos bos = new ByteArrayOutputStreamWithPos();
DataOutputStream dos = new DataOutputStream(bos);
int numKeys = random.nextInt(MAX_NUM_KEYS + 1);
for (int i = 0; i < numKeys; ++i) {
if (maxParallelism <= Byte.MAX_VALUE) {
dos.writeByte(i);
} else {
dos.writeShort(i);
}
dos.writeInt(i);
byte[] key = bos.toByteArray();
byte[] val = new byte[] { 42 };
rocksDB.put(handle, key, val);
bos.reset();
}
columnFamilyHandlesWithKeyCount.add(new Tuple2<>(handle, numKeys));
totalKeysExpected += numKeys;
}
int id = 0;
for (Tuple2<ColumnFamilyHandle, Integer> columnFamilyHandle : columnFamilyHandlesWithKeyCount) {
rocksIteratorsWithKVStateId.add(new Tuple2<>(rocksDB.newIterator(columnFamilyHandle.f0), id));
++id;
}
RocksDBKeyedStateBackend.RocksDBMergeIterator mergeIterator = new RocksDBKeyedStateBackend.RocksDBMergeIterator(rocksIteratorsWithKVStateId, maxParallelism <= Byte.MAX_VALUE ? 1 : 2);
int prevKVState = -1;
int prevKey = -1;
int prevKeyGroup = -1;
int totalKeysActual = 0;
while (mergeIterator.isValid()) {
ByteBuffer bb = ByteBuffer.wrap(mergeIterator.key());
int keyGroup = maxParallelism > Byte.MAX_VALUE ? bb.getShort() : bb.get();
int key = bb.getInt();
Assert.assertTrue(keyGroup >= prevKeyGroup);
Assert.assertTrue(key >= prevKey);
Assert.assertEquals(prevKeyGroup != keyGroup, mergeIterator.isNewKeyGroup());
Assert.assertEquals(prevKVState != mergeIterator.kvStateId(), mergeIterator.isNewKeyValueState());
prevKeyGroup = keyGroup;
prevKVState = mergeIterator.kvStateId();
//System.out.println(keyGroup + " " + key + " " + mergeIterator.kvStateId());
mergeIterator.next();
++totalKeysActual;
}
Assert.assertEquals(totalKeysExpected, totalKeysActual);
for (Tuple2<ColumnFamilyHandle, Integer> handleWithCount : columnFamilyHandlesWithKeyCount) {
rocksDB.dropColumnFamily(handleWithCount.f0);
}
} finally {
rocksDB.close();
}
}
use of org.rocksdb.ColumnFamilyHandle in project flink by apache.
the class RocksDBStateBackendTest method setupRocksKeyedStateBackend.
public void setupRocksKeyedStateBackend() throws Exception {
blocker = new OneShotLatch();
waiter = new OneShotLatch();
testStreamFactory = new BlockerCheckpointStreamFactory(1024 * 1024);
testStreamFactory.setBlockerLatch(blocker);
testStreamFactory.setWaiterLatch(waiter);
testStreamFactory.setAfterNumberInvocations(100);
RocksDBStateBackend backend = getStateBackend();
Environment env = new DummyEnvironment("TestTask", 1, 0);
keyedStateBackend = (RocksDBKeyedStateBackend<Integer>) backend.createKeyedStateBackend(env, new JobID(), "Test", IntSerializer.INSTANCE, 2, new KeyGroupRange(0, 1), mock(TaskKvStateRegistry.class));
testState1 = keyedStateBackend.getPartitionedState(VoidNamespace.INSTANCE, VoidNamespaceSerializer.INSTANCE, new ValueStateDescriptor<>("TestState-1", Integer.class, 0));
testState2 = keyedStateBackend.getPartitionedState(VoidNamespace.INSTANCE, VoidNamespaceSerializer.INSTANCE, new ValueStateDescriptor<>("TestState-2", String.class, ""));
allCreatedCloseables = new ArrayList<>();
keyedStateBackend.db = spy(keyedStateBackend.db);
doAnswer(new Answer<Object>() {
@Override
public Object answer(InvocationOnMock invocationOnMock) throws Throwable {
RocksIterator rocksIterator = spy((RocksIterator) invocationOnMock.callRealMethod());
allCreatedCloseables.add(rocksIterator);
return rocksIterator;
}
}).when(keyedStateBackend.db).newIterator(any(ColumnFamilyHandle.class), any(ReadOptions.class));
doAnswer(new Answer<Object>() {
@Override
public Object answer(InvocationOnMock invocationOnMock) throws Throwable {
Snapshot snapshot = spy((Snapshot) invocationOnMock.callRealMethod());
allCreatedCloseables.add(snapshot);
return snapshot;
}
}).when(keyedStateBackend.db).getSnapshot();
doAnswer(new Answer<Object>() {
@Override
public Object answer(InvocationOnMock invocationOnMock) throws Throwable {
ColumnFamilyHandle snapshot = spy((ColumnFamilyHandle) invocationOnMock.callRealMethod());
allCreatedCloseables.add(snapshot);
return snapshot;
}
}).when(keyedStateBackend.db).createColumnFamily(any(ColumnFamilyDescriptor.class));
for (int i = 0; i < 100; ++i) {
keyedStateBackend.setCurrentKey(i);
testState1.update(4200 + i);
testState2.update("S-" + (4200 + i));
}
}
use of org.rocksdb.ColumnFamilyHandle in project voldemort by voldemort.
the class RocksDbStorageConfiguration method getStore.
@Override
public StorageEngine<ByteArray, byte[], byte[]> getStore(StoreDefinition storeDef, RoutingStrategy strategy) {
String storeName = storeDef.getName();
if (!stores.containsKey(storeName)) {
String dataDir = this.voldemortconfig.getRdbDataDirectory() + "/" + storeName;
new File(dataDir).mkdirs();
Properties dbProperties = parseProperties(VoldemortConfig.ROCKSDB_DB_OPTIONS);
DBOptions dbOptions = (dbProperties.size() > 0) ? DBOptions.getDBOptionsFromProps(dbProperties) : new DBOptions();
if (dbOptions == null) {
throw new StorageInitializationException("Unable to parse Data Base Options.");
}
dbOptions.setCreateIfMissing(true);
dbOptions.setCreateMissingColumnFamilies(true);
dbOptions.createStatistics();
Properties cfProperties = parseProperties(VoldemortConfig.ROCKSDB_CF_OPTIONS);
if (this.voldemortconfig.getRocksdbPrefixKeysWithPartitionId()) {
cfProperties.setProperty("prefix_extractor", "fixed:" + StoreBinaryFormat.PARTITIONID_PREFIX_SIZE);
}
ColumnFamilyOptions cfOptions = (cfProperties.size() > 0) ? ColumnFamilyOptions.getColumnFamilyOptionsFromProps(cfProperties) : new ColumnFamilyOptions();
if (cfOptions == null) {
throw new StorageInitializationException("Unable to parse Column Family Options.");
}
// Create a non default Column Family tp hold the store data.
List<ColumnFamilyDescriptor> descriptors = new ArrayList<ColumnFamilyDescriptor>();
descriptors.add(new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY, cfOptions));
descriptors.add(new ColumnFamilyDescriptor(storeName.getBytes(), cfOptions));
List<ColumnFamilyHandle> handles = new ArrayList<ColumnFamilyHandle>();
try {
RocksDB rdbStore = RocksDB.open(dbOptions, dataDir, descriptors, handles);
// Dispose of the default Column Family immediately. We don't use it and if it has not been disposed
// by the time the DB is closed then the RocksDB code can terminate abnormally (if the RocksDB code is
// built with assertions enabled). The handle will go out of scope on its own and the Java finalizer
// will (eventually) do this for us, but, that is not fast enough for the unit tests.
handles.get(0).dispose();
ColumnFamilyHandle storeHandle = handles.get(1);
RocksDbStorageEngine rdbStorageEngine;
if (this.voldemortconfig.getRocksdbPrefixKeysWithPartitionId()) {
rdbStorageEngine = new PartitionPrefixedRocksDbStorageEngine(storeName, rdbStore, storeHandle, cfOptions, lockStripes, strategy, voldemortconfig.isRocksdbEnableReadLocks());
} else {
rdbStorageEngine = new RocksDbStorageEngine(storeName, rdbStore, storeHandle, cfOptions, lockStripes, voldemortconfig.isRocksdbEnableReadLocks());
}
stores.put(storeName, rdbStorageEngine);
} catch (Exception e) {
throw new StorageInitializationException(e);
}
}
return stores.get(storeName);
}
use of org.rocksdb.ColumnFamilyHandle in project jstorm by alibaba.
the class RocksTTLDBCache method putBatch.
protected void putBatch(Map<String, Object> map, Entry<Integer, ColumnFamilyHandle> putEntry) {
// TODO Auto-generated method stub
WriteOptions writeOpts = null;
WriteBatch writeBatch = null;
Set<byte[]> putKeys = new HashSet<byte[]>();
try {
writeOpts = new WriteOptions();
writeBatch = new WriteBatch();
for (Entry<String, Object> entry : map.entrySet()) {
String key = entry.getKey();
Object value = entry.getValue();
byte[] data = Utils.javaSerialize(value);
if (StringUtils.isBlank(key) || data == null || data.length == 0) {
continue;
}
byte[] keyByte = key.getBytes();
writeBatch.put(putEntry.getValue(), keyByte, data);
putKeys.add(keyByte);
}
ttlDB.write(writeOpts, writeBatch);
} catch (Exception e) {
LOG.error("Failed to putBatch into DB, " + map.keySet(), e);
} finally {
if (writeOpts != null) {
writeOpts.dispose();
}
if (writeBatch != null) {
writeBatch.dispose();
}
}
for (Entry<Integer, ColumnFamilyHandle> entry : windowHandlers.entrySet()) {
if (entry.getKey().equals(putEntry.getKey())) {
continue;
}
for (byte[] keyByte : putKeys) {
try {
ttlDB.remove(entry.getValue(), keyByte);
} catch (Exception e) {
LOG.error("Failed to remove other's " + new String(keyByte));
}
}
}
}
Aggregations