Search in sources :

Example 51 with ColumnFamilyHandle

use of org.rocksdb.ColumnFamilyHandle in project flink by apache.

the class RocksDBIncrementalCheckpointUtilsTest method testClipDBWithKeyGroupRangeHelper.

private void testClipDBWithKeyGroupRangeHelper(KeyGroupRange targetGroupRange, KeyGroupRange currentGroupRange, int keyGroupPrefixBytes) throws RocksDBException, IOException {
    try (RocksDB rocksDB = RocksDB.open(tmp.newFolder().getAbsolutePath());
        ColumnFamilyHandle columnFamilyHandle = rocksDB.createColumnFamily(new ColumnFamilyDescriptor("test".getBytes()))) {
        int currentGroupRangeStart = currentGroupRange.getStartKeyGroup();
        int currentGroupRangeEnd = currentGroupRange.getEndKeyGroup();
        DataOutputSerializer outputView = new DataOutputSerializer(32);
        for (int i = currentGroupRangeStart; i <= currentGroupRangeEnd; ++i) {
            for (int j = 0; j < 100; ++j) {
                outputView.clear();
                CompositeKeySerializationUtils.writeKeyGroup(i, keyGroupPrefixBytes, outputView);
                CompositeKeySerializationUtils.writeKey(j, IntSerializer.INSTANCE, outputView, false);
                rocksDB.put(columnFamilyHandle, outputView.getCopyOfBuffer(), String.valueOf(j).getBytes());
            }
        }
        for (int i = currentGroupRangeStart; i <= currentGroupRangeEnd; ++i) {
            for (int j = 0; j < 100; ++j) {
                outputView.clear();
                CompositeKeySerializationUtils.writeKeyGroup(i, keyGroupPrefixBytes, outputView);
                CompositeKeySerializationUtils.writeKey(j, IntSerializer.INSTANCE, outputView, false);
                byte[] value = rocksDB.get(columnFamilyHandle, outputView.getCopyOfBuffer());
                Assert.assertEquals(String.valueOf(j), new String(value));
            }
        }
        RocksDBIncrementalCheckpointUtils.clipDBWithKeyGroupRange(rocksDB, Collections.singletonList(columnFamilyHandle), targetGroupRange, currentGroupRange, keyGroupPrefixBytes, RocksDBConfigurableOptions.WRITE_BATCH_SIZE.defaultValue().getBytes());
        for (int i = currentGroupRangeStart; i <= currentGroupRangeEnd; ++i) {
            for (int j = 0; j < 100; ++j) {
                outputView.clear();
                CompositeKeySerializationUtils.writeKeyGroup(i, keyGroupPrefixBytes, outputView);
                CompositeKeySerializationUtils.writeKey(j, IntSerializer.INSTANCE, outputView, false);
                byte[] value = rocksDB.get(columnFamilyHandle, outputView.getCopyOfBuffer());
                if (targetGroupRange.contains(i)) {
                    Assert.assertEquals(String.valueOf(j), new String(value));
                } else {
                    Assert.assertNull(value);
                }
            }
        }
    }
}
Also used : RocksDB(org.rocksdb.RocksDB) DataOutputSerializer(org.apache.flink.core.memory.DataOutputSerializer) ColumnFamilyDescriptor(org.rocksdb.ColumnFamilyDescriptor) ColumnFamilyHandle(org.rocksdb.ColumnFamilyHandle)

Example 52 with ColumnFamilyHandle

use of org.rocksdb.ColumnFamilyHandle in project flink by apache.

the class RocksDBResource method createNewColumnFamily.

/**
 * Creates and returns a new column family with the given name.
 */
public ColumnFamilyHandle createNewColumnFamily(String name) {
    try {
        final ColumnFamilyHandle columnFamily = rocksDB.createColumnFamily(new ColumnFamilyDescriptor(name.getBytes(), columnFamilyOptions));
        columnFamilyHandles.add(columnFamily);
        return columnFamily;
    } catch (Exception ex) {
        throw new FlinkRuntimeException("Could not create column family.", ex);
    }
}
Also used : FlinkRuntimeException(org.apache.flink.util.FlinkRuntimeException) ColumnFamilyDescriptor(org.rocksdb.ColumnFamilyDescriptor) ColumnFamilyHandle(org.rocksdb.ColumnFamilyHandle) FlinkRuntimeException(org.apache.flink.util.FlinkRuntimeException)

Example 53 with ColumnFamilyHandle

use of org.rocksdb.ColumnFamilyHandle in project flink by apache.

the class RocksDBNativeMetricMonitorTest method testClosedGaugesDontRead.

@Test
public void testClosedGaugesDontRead() {
    SimpleMetricRegistry registry = new SimpleMetricRegistry();
    GenericMetricGroup group = new GenericMetricGroup(registry, UnregisteredMetricGroups.createUnregisteredTaskMetricGroup(), OPERATOR_NAME);
    RocksDBNativeMetricOptions options = new RocksDBNativeMetricOptions();
    options.enableSizeAllMemTables();
    RocksDBNativeMetricMonitor monitor = new RocksDBNativeMetricMonitor(options, group, rocksDBResource.getRocksDB());
    ColumnFamilyHandle handle = rocksDBResource.createNewColumnFamily(COLUMN_FAMILY_NAME);
    monitor.registerColumnFamily(COLUMN_FAMILY_NAME, handle);
    RocksDBNativeMetricMonitor.RocksDBNativeMetricView view = registry.metrics.get(0);
    view.close();
    view.update();
    Assert.assertEquals("Closed gauge still queried RocksDB", BigInteger.ZERO, view.getValue());
}
Also used : GenericMetricGroup(org.apache.flink.runtime.metrics.groups.GenericMetricGroup) ColumnFamilyHandle(org.rocksdb.ColumnFamilyHandle) Test(org.junit.Test)

Example 54 with ColumnFamilyHandle

use of org.rocksdb.ColumnFamilyHandle in project flink by apache.

the class RocksDBNativeMetricMonitorTest method testMetricMonitorLifecycle.

@Test
public void testMetricMonitorLifecycle() throws Throwable {
    // We use a local variable here to manually control the life-cycle.
    // This allows us to verify that metrics do not try to access
    // RocksDB after the monitor was closed.
    RocksDBResource localRocksDBResource = new RocksDBResource();
    localRocksDBResource.before();
    SimpleMetricRegistry registry = new SimpleMetricRegistry();
    GenericMetricGroup group = new GenericMetricGroup(registry, UnregisteredMetricGroups.createUnregisteredTaskMetricGroup(), OPERATOR_NAME);
    RocksDBNativeMetricOptions options = new RocksDBNativeMetricOptions();
    // always returns a non-zero
    // value since empty memtables
    // have overhead.
    options.enableSizeAllMemTables();
    RocksDBNativeMetricMonitor monitor = new RocksDBNativeMetricMonitor(options, group, localRocksDBResource.getRocksDB());
    ColumnFamilyHandle handle = localRocksDBResource.createNewColumnFamily(COLUMN_FAMILY_NAME);
    monitor.registerColumnFamily(COLUMN_FAMILY_NAME, handle);
    Assert.assertEquals("Failed to register metrics for column family", 1, registry.metrics.size());
    RocksDBNativeMetricMonitor.RocksDBNativeMetricView view = registry.metrics.get(0);
    view.update();
    Assert.assertNotEquals("Failed to pull metric from RocksDB", BigInteger.ZERO, view.getValue());
    view.setValue(0L);
    // After the monitor is closed no metric should be accessing RocksDB anymore.
    // If they do, then this test will likely fail with a segmentation fault.
    monitor.close();
    localRocksDBResource.after();
    view.update();
    Assert.assertEquals("Failed to release RocksDB reference", BigInteger.ZERO, view.getValue());
}
Also used : GenericMetricGroup(org.apache.flink.runtime.metrics.groups.GenericMetricGroup) ColumnFamilyHandle(org.rocksdb.ColumnFamilyHandle) Test(org.junit.Test)

Example 55 with ColumnFamilyHandle

use of org.rocksdb.ColumnFamilyHandle in project flink by apache.

the class RocksDBIncrementalRestoreOperation method restoreDBInstanceFromStateHandle.

private RestoredDBInstance restoreDBInstanceFromStateHandle(IncrementalRemoteKeyedStateHandle restoreStateHandle, Path temporaryRestoreInstancePath) throws Exception {
    try (RocksDBStateDownloader rocksDBStateDownloader = new RocksDBStateDownloader(numberOfTransferringThreads)) {
        rocksDBStateDownloader.transferAllStateDataToDirectory(restoreStateHandle, temporaryRestoreInstancePath, cancelStreamRegistry);
    }
    KeyedBackendSerializationProxy<K> serializationProxy = readMetaData(restoreStateHandle.getMetaStateHandle());
    // read meta data
    List<StateMetaInfoSnapshot> stateMetaInfoSnapshots = serializationProxy.getStateMetaInfoSnapshots();
    List<ColumnFamilyDescriptor> columnFamilyDescriptors = createColumnFamilyDescriptors(stateMetaInfoSnapshots, false);
    List<ColumnFamilyHandle> columnFamilyHandles = new ArrayList<>(stateMetaInfoSnapshots.size() + 1);
    RocksDB restoreDb = RocksDBOperationUtils.openDB(temporaryRestoreInstancePath.toString(), columnFamilyDescriptors, columnFamilyHandles, RocksDBOperationUtils.createColumnFamilyOptions(this.rocksHandle.getColumnFamilyOptionsFactory(), "default"), this.rocksHandle.getDbOptions());
    return new RestoredDBInstance(restoreDb, columnFamilyHandles, columnFamilyDescriptors, stateMetaInfoSnapshots);
}
Also used : RocksDB(org.rocksdb.RocksDB) ArrayList(java.util.ArrayList) StateMetaInfoSnapshot(org.apache.flink.runtime.state.metainfo.StateMetaInfoSnapshot) RocksDBStateDownloader(org.apache.flink.contrib.streaming.state.RocksDBStateDownloader) ColumnFamilyDescriptor(org.rocksdb.ColumnFamilyDescriptor) ColumnFamilyHandle(org.rocksdb.ColumnFamilyHandle)

Aggregations

ColumnFamilyHandle (org.rocksdb.ColumnFamilyHandle)55 ColumnFamilyDescriptor (org.rocksdb.ColumnFamilyDescriptor)27 ArrayList (java.util.ArrayList)21 RocksDBException (org.rocksdb.RocksDBException)19 RocksDB (org.rocksdb.RocksDB)16 Test (org.junit.Test)10 DBOptions (org.rocksdb.DBOptions)9 ColumnFamilyOptions (org.rocksdb.ColumnFamilyOptions)8 File (java.io.File)7 HashMap (java.util.HashMap)6 Map (java.util.Map)6 WriteOptions (org.rocksdb.WriteOptions)6 IOException (java.io.IOException)5 ValueStateDescriptor (org.apache.flink.api.common.state.ValueStateDescriptor)5 ReadOptions (org.rocksdb.ReadOptions)5 List (java.util.List)4 Tuple2 (org.apache.flink.api.java.tuple.Tuple2)4 KeyGroup (org.apache.flink.runtime.state.restore.KeyGroup)4 LinkedHashMap (java.util.LinkedHashMap)3 SortedMap (java.util.SortedMap)3