use of org.apache.flink.util.FlinkRuntimeException in project flink by apache.
the class RocksDBValueState method value.
@Override
public V value() {
try {
byte[] valueBytes = backend.db.get(columnFamily, serializeCurrentKeyWithGroupAndNamespace());
if (valueBytes == null) {
return getDefaultValue();
}
dataInputView.setBuffer(valueBytes);
return valueSerializer.deserialize(dataInputView);
} catch (IOException | RocksDBException e) {
throw new FlinkRuntimeException("Error while retrieving data from RocksDB.", e);
}
}
use of org.apache.flink.util.FlinkRuntimeException in project flink by apache.
the class RocksDBReducingState method mergeNamespaces.
@Override
public void mergeNamespaces(N target, Collection<N> sources) {
if (sources == null || sources.isEmpty()) {
return;
}
try {
V current = null;
// merge the sources to the target
for (N source : sources) {
if (source != null) {
setCurrentNamespace(source);
final byte[] sourceKey = serializeCurrentKeyWithGroupAndNamespace();
final byte[] valueBytes = backend.db.get(columnFamily, sourceKey);
if (valueBytes != null) {
backend.db.delete(columnFamily, writeOptions, sourceKey);
dataInputView.setBuffer(valueBytes);
V value = valueSerializer.deserialize(dataInputView);
if (current != null) {
current = reduceFunction.reduce(current, value);
} else {
current = value;
}
}
}
}
// if something came out of merging the sources, merge it or write it to the target
if (current != null) {
// create the target full-binary-key
setCurrentNamespace(target);
final byte[] targetKey = serializeCurrentKeyWithGroupAndNamespace();
final byte[] targetValueBytes = backend.db.get(columnFamily, targetKey);
if (targetValueBytes != null) {
dataInputView.setBuffer(targetValueBytes);
// target also had a value, merge
V value = valueSerializer.deserialize(dataInputView);
current = reduceFunction.reduce(current, value);
}
// serialize the resulting value
dataOutputView.clear();
valueSerializer.serialize(current, dataOutputView);
// write the resulting value
backend.db.put(columnFamily, writeOptions, targetKey, dataOutputView.getCopyOfBuffer());
}
} catch (Exception e) {
throw new FlinkRuntimeException("Error while merging state in RocksDB", e);
}
}
use of org.apache.flink.util.FlinkRuntimeException in project flink by apache.
the class AbstractRocksDBAppendingState method getInternal.
SV getInternal(byte[] key) {
try {
byte[] valueBytes = backend.db.get(columnFamily, key);
if (valueBytes == null) {
return null;
}
dataInputView.setBuffer(valueBytes);
return valueSerializer.deserialize(dataInputView);
} catch (IOException | RocksDBException e) {
throw new FlinkRuntimeException("Error while retrieving data from RocksDB", e);
}
}
use of org.apache.flink.util.FlinkRuntimeException in project flink by apache.
the class RocksDBResource method createNewColumnFamily.
/**
* Creates and returns a new column family with the given name.
*/
public ColumnFamilyHandle createNewColumnFamily(String name) {
try {
final ColumnFamilyHandle columnFamily = rocksDB.createColumnFamily(new ColumnFamilyDescriptor(name.getBytes(), columnFamilyOptions));
columnFamilyHandles.add(columnFamily);
return columnFamily;
} catch (Exception ex) {
throw new FlinkRuntimeException("Could not create column family.", ex);
}
}
use of org.apache.flink.util.FlinkRuntimeException in project flink by apache.
the class RocksDBTtlStateTestBase method createStateBackend.
StateBackend createStateBackend(TernaryBoolean enableIncrementalCheckpointing) {
String dbPath;
String checkpointPath;
try {
dbPath = tempFolder.newFolder().getAbsolutePath();
checkpointPath = tempFolder.newFolder().toURI().toString();
} catch (IOException e) {
throw new FlinkRuntimeException("Failed to init rocksdb test state backend");
}
RocksDBStateBackend backend = new RocksDBStateBackend(new FsStateBackend(checkpointPath), enableIncrementalCheckpointing);
Configuration config = new Configuration();
backend = backend.configure(config, Thread.currentThread().getContextClassLoader());
backend.setDbStoragePath(dbPath);
return backend;
}
Aggregations