use of org.rocksdb.ColumnFamilyHandle in project kafka by apache.
the class RocksDBTimestampedStore method openRocksDB.
@Override
void openRocksDB(final DBOptions dbOptions, final ColumnFamilyOptions columnFamilyOptions) {
final List<ColumnFamilyDescriptor> columnFamilyDescriptors = asList(new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY, columnFamilyOptions), new ColumnFamilyDescriptor("keyValueWithTimestamp".getBytes(StandardCharsets.UTF_8), columnFamilyOptions));
final List<ColumnFamilyHandle> columnFamilies = new ArrayList<>(columnFamilyDescriptors.size());
try {
db = RocksDB.open(dbOptions, dbDir.getAbsolutePath(), columnFamilyDescriptors, columnFamilies);
setDbAccessor(columnFamilies.get(0), columnFamilies.get(1));
} catch (final RocksDBException e) {
if ("Column family not found: keyValueWithTimestamp".equals(e.getMessage())) {
try {
db = RocksDB.open(dbOptions, dbDir.getAbsolutePath(), columnFamilyDescriptors.subList(0, 1), columnFamilies);
columnFamilies.add(db.createColumnFamily(columnFamilyDescriptors.get(1)));
} catch (final RocksDBException fatal) {
throw new ProcessorStateException("Error opening store " + name + " at location " + dbDir.toString(), fatal);
}
setDbAccessor(columnFamilies.get(0), columnFamilies.get(1));
} else {
throw new ProcessorStateException("Error opening store " + name + " at location " + dbDir.toString(), e);
}
}
}
use of org.rocksdb.ColumnFamilyHandle in project kafka by apache.
the class RocksDBTimestampedStoreTest method shouldOpenExistingStoreInRegularMode.
@Test
public void shouldOpenExistingStoreInRegularMode() throws Exception {
// prepare store
rocksDBStore.init((StateStoreContext) context, rocksDBStore);
rocksDBStore.put(new Bytes("key".getBytes()), "timestamped".getBytes());
rocksDBStore.close();
// re-open store
try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister(RocksDBTimestampedStore.class)) {
rocksDBStore.init((StateStoreContext) context, rocksDBStore);
assertThat(appender.getMessages(), hasItem("Opening store " + DB_NAME + " in regular mode"));
} finally {
rocksDBStore.close();
}
// verify store
final DBOptions dbOptions = new DBOptions();
final ColumnFamilyOptions columnFamilyOptions = new ColumnFamilyOptions();
final List<ColumnFamilyDescriptor> columnFamilyDescriptors = asList(new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY, columnFamilyOptions), new ColumnFamilyDescriptor("keyValueWithTimestamp".getBytes(StandardCharsets.UTF_8), columnFamilyOptions));
final List<ColumnFamilyHandle> columnFamilies = new ArrayList<>(columnFamilyDescriptors.size());
RocksDB db = null;
ColumnFamilyHandle noTimestampColumnFamily = null, withTimestampColumnFamily = null;
try {
db = RocksDB.open(dbOptions, new File(new File(context.stateDir(), "rocksdb"), DB_NAME).getAbsolutePath(), columnFamilyDescriptors, columnFamilies);
noTimestampColumnFamily = columnFamilies.get(0);
withTimestampColumnFamily = columnFamilies.get(1);
assertThat(db.get(noTimestampColumnFamily, "key".getBytes()), new IsNull<>());
assertThat(db.getLongProperty(noTimestampColumnFamily, "rocksdb.estimate-num-keys"), is(0L));
assertThat(db.get(withTimestampColumnFamily, "key".getBytes()).length, is(11));
assertThat(db.getLongProperty(withTimestampColumnFamily, "rocksdb.estimate-num-keys"), is(1L));
} finally {
// Order of closing must follow: ColumnFamilyHandle > RocksDB > DBOptions > ColumnFamilyOptions
if (noTimestampColumnFamily != null) {
noTimestampColumnFamily.close();
}
if (withTimestampColumnFamily != null) {
withTimestampColumnFamily.close();
}
if (db != null) {
db.close();
}
dbOptions.close();
columnFamilyOptions.close();
}
}
use of org.rocksdb.ColumnFamilyHandle in project alluxio by Alluxio.
the class RocksStoreTest method backupRestore.
@Test
public void backupRestore() throws Exception {
ColumnFamilyOptions cfOpts = new ColumnFamilyOptions().setMemTableConfig(new HashLinkedListMemTableConfig()).setCompressionType(CompressionType.NO_COMPRESSION).useFixedLengthPrefixExtractor(// We always search using the initial long key
Longs.BYTES);
List<ColumnFamilyDescriptor> columnDescriptors = Arrays.asList(new ColumnFamilyDescriptor("test".getBytes(), cfOpts));
String dbDir = mFolder.newFolder("rocks").getAbsolutePath();
String backupsDir = mFolder.newFolder("rocks-backups").getAbsolutePath();
AtomicReference<ColumnFamilyHandle> testColumn = new AtomicReference<>();
RocksStore store = new RocksStore("test", dbDir, backupsDir, columnDescriptors, Arrays.asList(testColumn));
ByteArrayOutputStream baos = new ByteArrayOutputStream();
RocksDB db = store.getDb();
int count = 10;
for (int i = 0; i < count; i++) {
db.put(testColumn.get(), new WriteOptions().setDisableWAL(true), ("a" + i).getBytes(), "b".getBytes());
}
store.writeToCheckpoint(baos);
String newBbDir = mFolder.newFolder("rocks-new").getAbsolutePath();
store = new RocksStore("test-new", newBbDir, backupsDir, columnDescriptors, Arrays.asList(testColumn));
store.restoreFromCheckpoint(new CheckpointInputStream(new ByteArrayInputStream(baos.toByteArray())));
db = store.getDb();
for (int i = 0; i < count; i++) {
assertArrayEquals("b".getBytes(), db.get(testColumn.get(), ("a" + i).getBytes()));
}
}
use of org.rocksdb.ColumnFamilyHandle in project flink by apache.
the class RocksDBIncrementalCheckpointUtils method deleteRange.
/**
* Delete the record falls into [beginKeyBytes, endKeyBytes) of the db.
*
* @param db the target need to be clipped.
* @param columnFamilyHandles the column family need to be clipped.
* @param beginKeyBytes the begin key bytes
* @param endKeyBytes the end key bytes
*/
private static void deleteRange(RocksDB db, List<ColumnFamilyHandle> columnFamilyHandles, byte[] beginKeyBytes, byte[] endKeyBytes, @Nonnegative long writeBatchSize) throws RocksDBException {
for (ColumnFamilyHandle columnFamilyHandle : columnFamilyHandles) {
try (ReadOptions readOptions = new ReadOptions();
RocksIteratorWrapper iteratorWrapper = RocksDBOperationUtils.getRocksIterator(db, columnFamilyHandle, readOptions);
RocksDBWriteBatchWrapper writeBatchWrapper = new RocksDBWriteBatchWrapper(db, writeBatchSize)) {
iteratorWrapper.seek(beginKeyBytes);
while (iteratorWrapper.isValid()) {
final byte[] currentKey = iteratorWrapper.key();
if (beforeThePrefixBytes(currentKey, endKeyBytes)) {
writeBatchWrapper.remove(columnFamilyHandle, currentKey);
} else {
break;
}
iteratorWrapper.next();
}
}
}
}
use of org.rocksdb.ColumnFamilyHandle in project flink by apache.
the class RocksDBHeapTimersFullRestoreOperation method restoreKVStateData.
/**
* Restore the KV-state / ColumnFamily data for all key-groups referenced by the current state
* handle.
*/
private void restoreKVStateData(ThrowingIterator<KeyGroup> keyGroups, Map<Integer, ColumnFamilyHandle> columnFamilies, Map<Integer, HeapPriorityQueueSnapshotRestoreWrapper<?>> restoredPQStates) throws IOException, RocksDBException, StateMigrationException {
// for all key-groups in the current state handle...
try (RocksDBWriteBatchWrapper writeBatchWrapper = new RocksDBWriteBatchWrapper(this.rocksHandle.getDb(), writeBatchSize)) {
HeapPriorityQueueSnapshotRestoreWrapper<HeapPriorityQueueElement> restoredPQ = null;
ColumnFamilyHandle handle = null;
while (keyGroups.hasNext()) {
KeyGroup keyGroup = keyGroups.next();
try (ThrowingIterator<KeyGroupEntry> groupEntries = keyGroup.getKeyGroupEntries()) {
int oldKvStateId = -1;
while (groupEntries.hasNext()) {
KeyGroupEntry groupEntry = groupEntries.next();
int kvStateId = groupEntry.getKvStateId();
if (kvStateId != oldKvStateId) {
oldKvStateId = kvStateId;
handle = columnFamilies.get(kvStateId);
restoredPQ = getRestoredPQ(restoredPQStates, kvStateId);
}
if (restoredPQ != null) {
restoreQueueElement(restoredPQ, groupEntry);
} else if (handle != null) {
writeBatchWrapper.put(handle, groupEntry.getKey(), groupEntry.getValue());
} else {
throw new IllegalStateException("Unknown state id: " + kvStateId);
}
}
}
}
}
}
Aggregations