Search in sources :

Example 41 with ColumnFamilyHandle

use of org.rocksdb.ColumnFamilyHandle in project kafka by apache.

the class RocksDBTimestampedStore method openRocksDB.

@Override
void openRocksDB(final DBOptions dbOptions, final ColumnFamilyOptions columnFamilyOptions) {
    final List<ColumnFamilyDescriptor> columnFamilyDescriptors = asList(new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY, columnFamilyOptions), new ColumnFamilyDescriptor("keyValueWithTimestamp".getBytes(StandardCharsets.UTF_8), columnFamilyOptions));
    final List<ColumnFamilyHandle> columnFamilies = new ArrayList<>(columnFamilyDescriptors.size());
    try {
        db = RocksDB.open(dbOptions, dbDir.getAbsolutePath(), columnFamilyDescriptors, columnFamilies);
        setDbAccessor(columnFamilies.get(0), columnFamilies.get(1));
    } catch (final RocksDBException e) {
        if ("Column family not found: keyValueWithTimestamp".equals(e.getMessage())) {
            try {
                db = RocksDB.open(dbOptions, dbDir.getAbsolutePath(), columnFamilyDescriptors.subList(0, 1), columnFamilies);
                columnFamilies.add(db.createColumnFamily(columnFamilyDescriptors.get(1)));
            } catch (final RocksDBException fatal) {
                throw new ProcessorStateException("Error opening store " + name + " at location " + dbDir.toString(), fatal);
            }
            setDbAccessor(columnFamilies.get(0), columnFamilies.get(1));
        } else {
            throw new ProcessorStateException("Error opening store " + name + " at location " + dbDir.toString(), e);
        }
    }
}
Also used : RocksDBException(org.rocksdb.RocksDBException) ArrayList(java.util.ArrayList) ColumnFamilyDescriptor(org.rocksdb.ColumnFamilyDescriptor) ProcessorStateException(org.apache.kafka.streams.errors.ProcessorStateException) ColumnFamilyHandle(org.rocksdb.ColumnFamilyHandle)

Example 42 with ColumnFamilyHandle

use of org.rocksdb.ColumnFamilyHandle in project kafka by apache.

the class RocksDBTimestampedStoreTest method shouldOpenExistingStoreInRegularMode.

@Test
public void shouldOpenExistingStoreInRegularMode() throws Exception {
    // prepare store
    rocksDBStore.init((StateStoreContext) context, rocksDBStore);
    rocksDBStore.put(new Bytes("key".getBytes()), "timestamped".getBytes());
    rocksDBStore.close();
    // re-open store
    try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister(RocksDBTimestampedStore.class)) {
        rocksDBStore.init((StateStoreContext) context, rocksDBStore);
        assertThat(appender.getMessages(), hasItem("Opening store " + DB_NAME + " in regular mode"));
    } finally {
        rocksDBStore.close();
    }
    // verify store
    final DBOptions dbOptions = new DBOptions();
    final ColumnFamilyOptions columnFamilyOptions = new ColumnFamilyOptions();
    final List<ColumnFamilyDescriptor> columnFamilyDescriptors = asList(new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY, columnFamilyOptions), new ColumnFamilyDescriptor("keyValueWithTimestamp".getBytes(StandardCharsets.UTF_8), columnFamilyOptions));
    final List<ColumnFamilyHandle> columnFamilies = new ArrayList<>(columnFamilyDescriptors.size());
    RocksDB db = null;
    ColumnFamilyHandle noTimestampColumnFamily = null, withTimestampColumnFamily = null;
    try {
        db = RocksDB.open(dbOptions, new File(new File(context.stateDir(), "rocksdb"), DB_NAME).getAbsolutePath(), columnFamilyDescriptors, columnFamilies);
        noTimestampColumnFamily = columnFamilies.get(0);
        withTimestampColumnFamily = columnFamilies.get(1);
        assertThat(db.get(noTimestampColumnFamily, "key".getBytes()), new IsNull<>());
        assertThat(db.getLongProperty(noTimestampColumnFamily, "rocksdb.estimate-num-keys"), is(0L));
        assertThat(db.get(withTimestampColumnFamily, "key".getBytes()).length, is(11));
        assertThat(db.getLongProperty(withTimestampColumnFamily, "rocksdb.estimate-num-keys"), is(1L));
    } finally {
        // Order of closing must follow: ColumnFamilyHandle > RocksDB > DBOptions > ColumnFamilyOptions
        if (noTimestampColumnFamily != null) {
            noTimestampColumnFamily.close();
        }
        if (withTimestampColumnFamily != null) {
            withTimestampColumnFamily.close();
        }
        if (db != null) {
            db.close();
        }
        dbOptions.close();
        columnFamilyOptions.close();
    }
}
Also used : ColumnFamilyOptions(org.rocksdb.ColumnFamilyOptions) Bytes(org.apache.kafka.common.utils.Bytes) RocksDB(org.rocksdb.RocksDB) ArrayList(java.util.ArrayList) LogCaptureAppender(org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender) DBOptions(org.rocksdb.DBOptions) ColumnFamilyDescriptor(org.rocksdb.ColumnFamilyDescriptor) File(java.io.File) ColumnFamilyHandle(org.rocksdb.ColumnFamilyHandle) Test(org.junit.Test)

Example 43 with ColumnFamilyHandle

use of org.rocksdb.ColumnFamilyHandle in project alluxio by Alluxio.

the class RocksStoreTest method backupRestore.

@Test
public void backupRestore() throws Exception {
    ColumnFamilyOptions cfOpts = new ColumnFamilyOptions().setMemTableConfig(new HashLinkedListMemTableConfig()).setCompressionType(CompressionType.NO_COMPRESSION).useFixedLengthPrefixExtractor(// We always search using the initial long key
    Longs.BYTES);
    List<ColumnFamilyDescriptor> columnDescriptors = Arrays.asList(new ColumnFamilyDescriptor("test".getBytes(), cfOpts));
    String dbDir = mFolder.newFolder("rocks").getAbsolutePath();
    String backupsDir = mFolder.newFolder("rocks-backups").getAbsolutePath();
    AtomicReference<ColumnFamilyHandle> testColumn = new AtomicReference<>();
    RocksStore store = new RocksStore("test", dbDir, backupsDir, columnDescriptors, Arrays.asList(testColumn));
    ByteArrayOutputStream baos = new ByteArrayOutputStream();
    RocksDB db = store.getDb();
    int count = 10;
    for (int i = 0; i < count; i++) {
        db.put(testColumn.get(), new WriteOptions().setDisableWAL(true), ("a" + i).getBytes(), "b".getBytes());
    }
    store.writeToCheckpoint(baos);
    String newBbDir = mFolder.newFolder("rocks-new").getAbsolutePath();
    store = new RocksStore("test-new", newBbDir, backupsDir, columnDescriptors, Arrays.asList(testColumn));
    store.restoreFromCheckpoint(new CheckpointInputStream(new ByteArrayInputStream(baos.toByteArray())));
    db = store.getDb();
    for (int i = 0; i < count; i++) {
        assertArrayEquals("b".getBytes(), db.get(testColumn.get(), ("a" + i).getBytes()));
    }
}
Also used : RocksDB(org.rocksdb.RocksDB) AtomicReference(java.util.concurrent.atomic.AtomicReference) ByteArrayOutputStream(java.io.ByteArrayOutputStream) ColumnFamilyDescriptor(org.rocksdb.ColumnFamilyDescriptor) CheckpointInputStream(alluxio.master.journal.checkpoint.CheckpointInputStream) ColumnFamilyHandle(org.rocksdb.ColumnFamilyHandle) ColumnFamilyOptions(org.rocksdb.ColumnFamilyOptions) WriteOptions(org.rocksdb.WriteOptions) ByteArrayInputStream(java.io.ByteArrayInputStream) HashLinkedListMemTableConfig(org.rocksdb.HashLinkedListMemTableConfig) Test(org.junit.Test)

Example 44 with ColumnFamilyHandle

use of org.rocksdb.ColumnFamilyHandle in project flink by apache.

the class RocksDBIncrementalCheckpointUtils method deleteRange.

/**
 * Delete the record falls into [beginKeyBytes, endKeyBytes) of the db.
 *
 * @param db the target need to be clipped.
 * @param columnFamilyHandles the column family need to be clipped.
 * @param beginKeyBytes the begin key bytes
 * @param endKeyBytes the end key bytes
 */
private static void deleteRange(RocksDB db, List<ColumnFamilyHandle> columnFamilyHandles, byte[] beginKeyBytes, byte[] endKeyBytes, @Nonnegative long writeBatchSize) throws RocksDBException {
    for (ColumnFamilyHandle columnFamilyHandle : columnFamilyHandles) {
        try (ReadOptions readOptions = new ReadOptions();
            RocksIteratorWrapper iteratorWrapper = RocksDBOperationUtils.getRocksIterator(db, columnFamilyHandle, readOptions);
            RocksDBWriteBatchWrapper writeBatchWrapper = new RocksDBWriteBatchWrapper(db, writeBatchSize)) {
            iteratorWrapper.seek(beginKeyBytes);
            while (iteratorWrapper.isValid()) {
                final byte[] currentKey = iteratorWrapper.key();
                if (beforeThePrefixBytes(currentKey, endKeyBytes)) {
                    writeBatchWrapper.remove(columnFamilyHandle, currentKey);
                } else {
                    break;
                }
                iteratorWrapper.next();
            }
        }
    }
}
Also used : ReadOptions(org.rocksdb.ReadOptions) ColumnFamilyHandle(org.rocksdb.ColumnFamilyHandle)

Example 45 with ColumnFamilyHandle

use of org.rocksdb.ColumnFamilyHandle in project flink by apache.

the class RocksDBHeapTimersFullRestoreOperation method restoreKVStateData.

/**
 * Restore the KV-state / ColumnFamily data for all key-groups referenced by the current state
 * handle.
 */
private void restoreKVStateData(ThrowingIterator<KeyGroup> keyGroups, Map<Integer, ColumnFamilyHandle> columnFamilies, Map<Integer, HeapPriorityQueueSnapshotRestoreWrapper<?>> restoredPQStates) throws IOException, RocksDBException, StateMigrationException {
    // for all key-groups in the current state handle...
    try (RocksDBWriteBatchWrapper writeBatchWrapper = new RocksDBWriteBatchWrapper(this.rocksHandle.getDb(), writeBatchSize)) {
        HeapPriorityQueueSnapshotRestoreWrapper<HeapPriorityQueueElement> restoredPQ = null;
        ColumnFamilyHandle handle = null;
        while (keyGroups.hasNext()) {
            KeyGroup keyGroup = keyGroups.next();
            try (ThrowingIterator<KeyGroupEntry> groupEntries = keyGroup.getKeyGroupEntries()) {
                int oldKvStateId = -1;
                while (groupEntries.hasNext()) {
                    KeyGroupEntry groupEntry = groupEntries.next();
                    int kvStateId = groupEntry.getKvStateId();
                    if (kvStateId != oldKvStateId) {
                        oldKvStateId = kvStateId;
                        handle = columnFamilies.get(kvStateId);
                        restoredPQ = getRestoredPQ(restoredPQStates, kvStateId);
                    }
                    if (restoredPQ != null) {
                        restoreQueueElement(restoredPQ, groupEntry);
                    } else if (handle != null) {
                        writeBatchWrapper.put(handle, groupEntry.getKey(), groupEntry.getValue());
                    } else {
                        throw new IllegalStateException("Unknown state id: " + kvStateId);
                    }
                }
            }
        }
    }
}
Also used : KeyGroupEntry(org.apache.flink.runtime.state.restore.KeyGroupEntry) KeyGroup(org.apache.flink.runtime.state.restore.KeyGroup) RocksDBWriteBatchWrapper(org.apache.flink.contrib.streaming.state.RocksDBWriteBatchWrapper) HeapPriorityQueueElement(org.apache.flink.runtime.state.heap.HeapPriorityQueueElement) ColumnFamilyHandle(org.rocksdb.ColumnFamilyHandle)

Aggregations

ColumnFamilyHandle (org.rocksdb.ColumnFamilyHandle)55 ColumnFamilyDescriptor (org.rocksdb.ColumnFamilyDescriptor)27 ArrayList (java.util.ArrayList)21 RocksDBException (org.rocksdb.RocksDBException)19 RocksDB (org.rocksdb.RocksDB)16 Test (org.junit.Test)10 DBOptions (org.rocksdb.DBOptions)9 ColumnFamilyOptions (org.rocksdb.ColumnFamilyOptions)8 File (java.io.File)7 HashMap (java.util.HashMap)6 Map (java.util.Map)6 WriteOptions (org.rocksdb.WriteOptions)6 IOException (java.io.IOException)5 ValueStateDescriptor (org.apache.flink.api.common.state.ValueStateDescriptor)5 ReadOptions (org.rocksdb.ReadOptions)5 List (java.util.List)4 Tuple2 (org.apache.flink.api.java.tuple.Tuple2)4 KeyGroup (org.apache.flink.runtime.state.restore.KeyGroup)4 LinkedHashMap (java.util.LinkedHashMap)3 SortedMap (java.util.SortedMap)3