use of org.rocksdb.RocksDBException in project kafka by apache.
the class RocksDBStore method putAll.
@Override
public void putAll(List<KeyValue<K, V>> entries) {
try (WriteBatch batch = new WriteBatch()) {
for (KeyValue<K, V> entry : entries) {
final byte[] rawKey = serdes.rawKey(entry.key);
if (entry.value == null) {
db.delete(rawKey);
} else {
final byte[] value = serdes.rawValue(entry.value);
batch.put(rawKey, value);
}
}
db.write(wOptions, batch);
} catch (RocksDBException e) {
throw new ProcessorStateException("Error while batch writing to store " + this.name, e);
}
}
use of org.rocksdb.RocksDBException in project flink by apache.
the class RocksDBAggregatingState method get.
@Override
public R get() throws IOException {
try {
// prepare the current key and namespace for RocksDB lookup
writeCurrentKeyWithGroupAndNamespace();
final byte[] key = keySerializationStream.toByteArray();
// get the current value
final byte[] valueBytes = backend.db.get(columnFamily, key);
if (valueBytes == null) {
return null;
}
ACC accumulator = valueSerializer.deserialize(new DataInputViewStreamWrapper(new ByteArrayInputStreamWithPos(valueBytes)));
return aggFunction.getResult(accumulator);
} catch (IOException | RocksDBException e) {
throw new IOException("Error while retrieving value from RocksDB", e);
}
}
use of org.rocksdb.RocksDBException in project flink by apache.
the class RocksDBAggregatingState method add.
@Override
public void add(T value) throws IOException {
try {
// prepare the current key and namespace for RocksDB lookup
writeCurrentKeyWithGroupAndNamespace();
final byte[] key = keySerializationStream.toByteArray();
keySerializationStream.reset();
// get the current value
final byte[] valueBytes = backend.db.get(columnFamily, key);
// deserialize the current accumulator, or create a blank one
final ACC accumulator = valueBytes == null ? aggFunction.createAccumulator() : valueSerializer.deserialize(new DataInputViewStreamWrapper(new ByteArrayInputStreamWithPos(valueBytes)));
// aggregate the value into the accumulator
aggFunction.add(value, accumulator);
// serialize the new accumulator
final DataOutputViewStreamWrapper out = new DataOutputViewStreamWrapper(keySerializationStream);
valueSerializer.serialize(accumulator, out);
// write the new value to RocksDB
backend.db.put(columnFamily, writeOptions, key, keySerializationStream.toByteArray());
} catch (IOException | RocksDBException e) {
throw new IOException("Error while adding value to RocksDB", e);
}
}
use of org.rocksdb.RocksDBException in project flink by apache.
the class RocksDBFoldingState method get.
@Override
public ACC get() {
try {
writeCurrentKeyWithGroupAndNamespace();
byte[] key = keySerializationStream.toByteArray();
byte[] valueBytes = backend.db.get(columnFamily, key);
if (valueBytes == null) {
return null;
}
return valueSerializer.deserialize(new DataInputViewStreamWrapper(new ByteArrayInputStreamWithPos(valueBytes)));
} catch (IOException | RocksDBException e) {
throw new RuntimeException("Error while retrieving data from RocksDB", e);
}
}
use of org.rocksdb.RocksDBException in project flink by apache.
the class RocksDBReducingState method get.
@Override
public V get() {
try {
writeCurrentKeyWithGroupAndNamespace();
byte[] key = keySerializationStream.toByteArray();
byte[] valueBytes = backend.db.get(columnFamily, key);
if (valueBytes == null) {
return null;
}
return valueSerializer.deserialize(new DataInputViewStreamWrapper(new ByteArrayInputStream(valueBytes)));
} catch (IOException | RocksDBException e) {
throw new RuntimeException("Error while retrieving data from RocksDB", e);
}
}
Aggregations