use of org.rocksdb.ColumnFamilyHandle in project jstorm by alibaba.
the class WindowedRocksDbHdfsState method clear.
@Override
public void clear() {
Iterator<Entry<TimeWindow, ColumnFamilyHandle>> itr = windowToCFHandler.entrySet().iterator();
while (itr.hasNext()) {
TimeWindow window = itr.next().getKey();
ColumnFamilyHandle handler = itr.next().getValue();
try {
rocksDb.dropColumnFamily(handler);
} catch (Exception e) {
LOG.error("Failed to remove timeWindow={}", window);
}
}
windowToCFHandler.clear();
}
use of org.rocksdb.ColumnFamilyHandle in project jstorm by alibaba.
the class WindowedRocksDbHdfsState method getAllKeys.
@Override
public Collection<K> getAllKeys(TimeWindow window) {
try {
ColumnFamilyHandle handler = getColumnFamilyHandle(window);
Collection<K> keys = new ArrayList<K>();
RocksIterator itr = rocksDb.newIterator(handler);
itr.seekToFirst();
while (itr.isValid()) {
keys.add((K) serializer.deserialize(itr.key()));
itr.next();
}
return keys;
} catch (RocksDBException e) {
LOG.error("Failed to get all keys for timeWindow={}", window);
throw new RuntimeException(e.getMessage());
}
}
use of org.rocksdb.ColumnFamilyHandle in project jstorm by alibaba.
the class RocksDbUnitTest method main.
public static void main(String[] args) {
Map conf = JStormHelper.LoadConf(args[0]);
putNum = JStormUtils.parseInt(conf.get("put.number"), 100);
isFlush = JStormUtils.parseBoolean(conf.get("is.flush"), true);
isCheckpoint = JStormUtils.parseBoolean(conf.get("is.checkpoint"), true);
sleepTime = JStormUtils.parseInt(conf.get("sleep.time"), 5000);
compactionInterval = JStormUtils.parseInt(conf.get("compaction.interval"), 30000);
flushInterval = JStormUtils.parseInt(conf.get("flush.interval"), 3000);
isCompaction = JStormUtils.parseBoolean(conf.get("is.compaction"), true);
fileSizeBase = JStormUtils.parseLong(conf.get("file.size.base"), 10 * SizeUnit.KB);
levelNum = JStormUtils.parseInt(conf.get("db.level.num"), 1);
compactionTriggerNum = JStormUtils.parseInt(conf.get("db.compaction.trigger.num"), 4);
LOG.info("Conf={}", conf);
RocksDB db;
File file = new File(cpPath);
file.mkdirs();
List<ColumnFamilyHandle> columnFamilyHandles = new ArrayList<>();
try {
Options options = new Options();
options.setCreateMissingColumnFamilies(true);
options.setCreateIfMissing(true);
options.setTargetFileSizeBase(fileSizeBase);
options.setMaxBackgroundFlushes(2);
options.setMaxBackgroundCompactions(2);
options.setCompactionStyle(CompactionStyle.LEVEL);
options.setNumLevels(levelNum);
options.setLevelZeroFileNumCompactionTrigger(compactionTriggerNum);
DBOptions dbOptions = new DBOptions();
dbOptions.setCreateMissingColumnFamilies(true);
dbOptions.setCreateIfMissing(true);
dbOptions.setMaxBackgroundFlushes(2);
dbOptions.setMaxBackgroundCompactions(2);
ColumnFamilyOptions familyOptions = new ColumnFamilyOptions();
familyOptions.setTargetFileSizeBase(fileSizeBase);
familyOptions.setCompactionStyle(CompactionStyle.LEVEL);
familyOptions.setNumLevels(levelNum);
familyOptions.setLevelZeroFileNumCompactionTrigger(compactionTriggerNum);
List<byte[]> families = RocksDB.listColumnFamilies(options, dbPath);
List<ColumnFamilyDescriptor> columnFamilyDescriptors = new ArrayList<>();
if (families != null) {
for (byte[] bytes : families) {
columnFamilyDescriptors.add(new ColumnFamilyDescriptor(bytes, familyOptions));
LOG.info("Load colum family of {}", new String(bytes));
}
}
if (columnFamilyDescriptors.size() > 0) {
db = RocksDB.open(dbOptions, dbPath, columnFamilyDescriptors, columnFamilyHandles);
} else {
db = RocksDB.open(options, dbPath);
}
} catch (RocksDBException e) {
LOG.error("Failed to open db", e);
return;
}
rocksDbTest(db, columnFamilyHandles);
db.close();
}
use of org.rocksdb.ColumnFamilyHandle in project jstorm by alibaba.
the class RocksDBTest method visitorAccross.
public void visitorAccross() throws RocksDBException, InterruptedException {
DBOptions dbOptions = null;
TtlDB ttlDB = null;
List<ColumnFamilyDescriptor> cfNames = new ArrayList<ColumnFamilyDescriptor>();
List<ColumnFamilyHandle> columnFamilyHandleList = new ArrayList<ColumnFamilyHandle>();
cfNames.add(new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY));
cfNames.add(new ColumnFamilyDescriptor("new_cf".getBytes()));
List<Integer> ttlValues = new ArrayList<Integer>();
// new column family with 1 second ttl
ttlValues.add(1);
// Default column family with infinite lifetime
ttlValues.add(0);
try {
System.out.println("Begin to open db");
dbOptions = new DBOptions().setCreateMissingColumnFamilies(true).setCreateIfMissing(true);
ttlDB = TtlDB.open(dbOptions, rootDir, cfNames, columnFamilyHandleList, ttlValues, false);
System.out.println("Successfully open db " + rootDir);
List<String> keys = new ArrayList<String>();
keys.add("key");
ttlDB.put("key".getBytes(), "key".getBytes());
for (int i = 0; i < 2; i++) {
String key = "key" + i;
keys.add(key);
ttlDB.put(columnFamilyHandleList.get(i), key.getBytes(), key.getBytes());
}
try {
byte[] value = ttlDB.get("others".getBytes());
if (value != null) {
System.out.println("Raw get :" + new String(value));
} else {
System.out.println("No value of other");
}
} catch (Exception e) {
System.out.println("Occur exception other");
}
for (String key : keys) {
try {
byte[] value = ttlDB.get(key.getBytes());
if (value != null) {
System.out.println("Raw get :" + new String(value));
} else {
System.out.println("No value of " + key);
}
} catch (Exception e) {
System.out.println("Occur exception " + key + ", Raw");
}
for (int i = 0; i < 2; i++) {
try {
byte[] value = ttlDB.get(columnFamilyHandleList.get(i), key.getBytes());
if (value != null) {
System.out.println("handler index" + i + " get :" + new String(value));
} else {
System.out.println("No value of index" + i + " get :" + key);
}
} catch (Exception e) {
System.out.println("Occur exception " + key + ", handler index:" + i);
}
}
}
} finally {
for (ColumnFamilyHandle columnFamilyHandle : columnFamilyHandleList) {
columnFamilyHandle.dispose();
}
if (ttlDB != null) {
ttlDB.close();
}
if (dbOptions != null) {
dbOptions.dispose();
}
}
}
use of org.rocksdb.ColumnFamilyHandle in project kafka by apache.
the class RocksDBStore method openRocksDB.
void openRocksDB(final DBOptions dbOptions, final ColumnFamilyOptions columnFamilyOptions) {
final List<ColumnFamilyDescriptor> columnFamilyDescriptors = Collections.singletonList(new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY, columnFamilyOptions));
final List<ColumnFamilyHandle> columnFamilies = new ArrayList<>(columnFamilyDescriptors.size());
try {
db = RocksDB.open(dbOptions, dbDir.getAbsolutePath(), columnFamilyDescriptors, columnFamilies);
dbAccessor = new SingleColumnFamilyAccessor(columnFamilies.get(0));
} catch (final RocksDBException e) {
throw new ProcessorStateException("Error opening store " + name + " at location " + dbDir.toString(), e);
}
}
Aggregations