use of org.rocksdb.RocksDBException in project samza by apache.
the class TestRocksDbKeyValueReader method testReadCorrectDbValue.
@Test
public void testReadCorrectDbValue() throws RocksDBException {
HashMap<String, String> map = new HashMap<String, String>();
map.put("stores." + DB_NAME + ".factory", "mockFactory");
map.put("stores." + DB_NAME + ".key.serde", "string");
map.put("stores." + DB_NAME + ".msg.serde", "string");
Config config = new MapConfig(map);
RocksDbKeyValueReader reader = new RocksDbKeyValueReader(DB_NAME, dirPath.toString(), config);
assertEquals("this is string", reader.get("testString"));
// should throw exception if the input is in other type
boolean throwClassCastException = false;
try {
reader.get(123);
} catch (Exception e) {
if (e instanceof ClassCastException) {
throwClassCastException = true;
}
}
assertTrue(throwClassCastException);
reader.stop();
// test with customized serde
map.put("serializers.registry.mock.class", IntegerSerdeFactory.class.getCanonicalName());
map.put("stores." + DB_NAME + ".key.serde", "mock");
map.put("stores." + DB_NAME + ".msg.serde", "mock");
config = new MapConfig(map);
reader = new RocksDbKeyValueReader(DB_NAME, dirPath.toString(), config);
assertEquals(456, reader.get(123));
assertNull(reader.get(789));
reader.stop();
}
use of org.rocksdb.RocksDBException in project jstorm by alibaba.
the class WindowedRocksDbHdfsState method get.
@Override
public V get(TimeWindow window, K key) {
try {
ColumnFamilyHandle handler = getColumnFamilyHandle(window);
V ret = null;
if (key != null) {
byte[] rawKey = serializer.serialize(key);
byte[] rawData = rocksDb.get(handler, rawKey);
ret = rawData != null ? (V) serializer.deserialize(rawData) : null;
}
return ret;
} catch (RocksDBException e) {
LOG.error("Failed to get value by key-{} for timeWindow={}", key, window);
throw new RuntimeException(e.getMessage());
}
}
use of org.rocksdb.RocksDBException in project jstorm by alibaba.
the class WindowedRocksDbHdfsState method remove.
@Override
public void remove(TimeWindow window, Object key) {
try {
ColumnFamilyHandle handler = getColumnFamilyHandle(window);
rocksDb.remove(handler, serializer.serialize(key));
} catch (RocksDBException e) {
LOG.warn("Failed to remove " + key + "for timeWindow=" + window, e);
}
}
use of org.rocksdb.RocksDBException in project jstorm by alibaba.
the class WindowedRocksDbHdfsState method getAllKeys.
@Override
public Collection<K> getAllKeys(TimeWindow window) {
try {
ColumnFamilyHandle handler = getColumnFamilyHandle(window);
Collection<K> keys = new ArrayList<K>();
RocksIterator itr = rocksDb.newIterator(handler);
itr.seekToFirst();
while (itr.isValid()) {
keys.add((K) serializer.deserialize(itr.key()));
itr.next();
}
return keys;
} catch (RocksDBException e) {
LOG.error("Failed to get all keys for timeWindow={}", window);
throw new RuntimeException(e.getMessage());
}
}
use of org.rocksdb.RocksDBException in project jstorm by alibaba.
the class RocksDbUnitTest method main.
public static void main(String[] args) {
Map conf = JStormHelper.LoadConf(args[0]);
putNum = JStormUtils.parseInt(conf.get("put.number"), 100);
isFlush = JStormUtils.parseBoolean(conf.get("is.flush"), true);
isCheckpoint = JStormUtils.parseBoolean(conf.get("is.checkpoint"), true);
sleepTime = JStormUtils.parseInt(conf.get("sleep.time"), 5000);
compactionInterval = JStormUtils.parseInt(conf.get("compaction.interval"), 30000);
flushInterval = JStormUtils.parseInt(conf.get("flush.interval"), 3000);
isCompaction = JStormUtils.parseBoolean(conf.get("is.compaction"), true);
fileSizeBase = JStormUtils.parseLong(conf.get("file.size.base"), 10 * SizeUnit.KB);
levelNum = JStormUtils.parseInt(conf.get("db.level.num"), 1);
compactionTriggerNum = JStormUtils.parseInt(conf.get("db.compaction.trigger.num"), 4);
LOG.info("Conf={}", conf);
RocksDB db;
File file = new File(cpPath);
file.mkdirs();
List<ColumnFamilyHandle> columnFamilyHandles = new ArrayList<>();
try {
Options options = new Options();
options.setCreateMissingColumnFamilies(true);
options.setCreateIfMissing(true);
options.setTargetFileSizeBase(fileSizeBase);
options.setMaxBackgroundFlushes(2);
options.setMaxBackgroundCompactions(2);
options.setCompactionStyle(CompactionStyle.LEVEL);
options.setNumLevels(levelNum);
options.setLevelZeroFileNumCompactionTrigger(compactionTriggerNum);
DBOptions dbOptions = new DBOptions();
dbOptions.setCreateMissingColumnFamilies(true);
dbOptions.setCreateIfMissing(true);
dbOptions.setMaxBackgroundFlushes(2);
dbOptions.setMaxBackgroundCompactions(2);
ColumnFamilyOptions familyOptions = new ColumnFamilyOptions();
familyOptions.setTargetFileSizeBase(fileSizeBase);
familyOptions.setCompactionStyle(CompactionStyle.LEVEL);
familyOptions.setNumLevels(levelNum);
familyOptions.setLevelZeroFileNumCompactionTrigger(compactionTriggerNum);
List<byte[]> families = RocksDB.listColumnFamilies(options, dbPath);
List<ColumnFamilyDescriptor> columnFamilyDescriptors = new ArrayList<>();
if (families != null) {
for (byte[] bytes : families) {
columnFamilyDescriptors.add(new ColumnFamilyDescriptor(bytes, familyOptions));
LOG.info("Load colum family of {}", new String(bytes));
}
}
if (columnFamilyDescriptors.size() > 0) {
db = RocksDB.open(dbOptions, dbPath, columnFamilyDescriptors, columnFamilyHandles);
} else {
db = RocksDB.open(options, dbPath);
}
} catch (RocksDBException e) {
LOG.error("Failed to open db", e);
return;
}
rocksDbTest(db, columnFamilyHandles);
db.close();
}
Aggregations