use of org.apache.flink.core.memory.DataOutputViewStreamWrapper in project flink by apache.
the class RocksDBAggregatingState method add.
@Override
public void add(T value) throws IOException {
try {
// prepare the current key and namespace for RocksDB lookup
writeCurrentKeyWithGroupAndNamespace();
final byte[] key = keySerializationStream.toByteArray();
keySerializationStream.reset();
// get the current value
final byte[] valueBytes = backend.db.get(columnFamily, key);
// deserialize the current accumulator, or create a blank one
final ACC accumulator = valueBytes == null ? aggFunction.createAccumulator() : valueSerializer.deserialize(new DataInputViewStreamWrapper(new ByteArrayInputStreamWithPos(valueBytes)));
// aggregate the value into the accumulator
aggFunction.add(value, accumulator);
// serialize the new accumulator
final DataOutputViewStreamWrapper out = new DataOutputViewStreamWrapper(keySerializationStream);
valueSerializer.serialize(accumulator, out);
// write the new value to RocksDB
backend.db.put(columnFamily, writeOptions, key, keySerializationStream.toByteArray());
} catch (IOException | RocksDBException e) {
throw new IOException("Error while adding value to RocksDB", e);
}
}
use of org.apache.flink.core.memory.DataOutputViewStreamWrapper in project flink by apache.
the class RocksDBFoldingState method add.
@Override
public void add(T value) throws IOException {
try {
writeCurrentKeyWithGroupAndNamespace();
byte[] key = keySerializationStream.toByteArray();
byte[] valueBytes = backend.db.get(columnFamily, key);
DataOutputViewStreamWrapper out = new DataOutputViewStreamWrapper(keySerializationStream);
if (valueBytes == null) {
keySerializationStream.reset();
valueSerializer.serialize(foldFunction.fold(stateDesc.getDefaultValue(), value), out);
backend.db.put(columnFamily, writeOptions, key, keySerializationStream.toByteArray());
} else {
ACC oldValue = valueSerializer.deserialize(new DataInputViewStreamWrapper(new ByteArrayInputStreamWithPos(valueBytes)));
ACC newValue = foldFunction.fold(oldValue, value);
keySerializationStream.reset();
valueSerializer.serialize(newValue, out);
backend.db.put(columnFamily, writeOptions, key, keySerializationStream.toByteArray());
}
} catch (Exception e) {
throw new RuntimeException("Error while adding data to RocksDB", e);
}
}
use of org.apache.flink.core.memory.DataOutputViewStreamWrapper in project flink by apache.
the class RocksDBValueState method update.
@Override
public void update(V value) throws IOException {
if (value == null) {
clear();
return;
}
DataOutputViewStreamWrapper out = new DataOutputViewStreamWrapper(keySerializationStream);
try {
writeCurrentKeyWithGroupAndNamespace();
byte[] key = keySerializationStream.toByteArray();
keySerializationStream.reset();
valueSerializer.serialize(value, out);
backend.db.put(columnFamily, writeOptions, key, keySerializationStream.toByteArray());
} catch (Exception e) {
throw new RuntimeException("Error while adding data to RocksDB", e);
}
}
use of org.apache.flink.core.memory.DataOutputViewStreamWrapper in project flink by apache.
the class RocksDBListState method add.
@Override
public void add(V value) throws IOException {
try {
writeCurrentKeyWithGroupAndNamespace();
byte[] key = keySerializationStream.toByteArray();
keySerializationStream.reset();
DataOutputViewStreamWrapper out = new DataOutputViewStreamWrapper(keySerializationStream);
valueSerializer.serialize(value, out);
backend.db.merge(columnFamily, writeOptions, key, keySerializationStream.toByteArray());
} catch (Exception e) {
throw new RuntimeException("Error while adding data to RocksDB", e);
}
}
use of org.apache.flink.core.memory.DataOutputViewStreamWrapper in project flink by apache.
the class RocksDBMapState method getSerializedValue.
@Override
@SuppressWarnings("unchecked")
public byte[] getSerializedValue(byte[] serializedKeyAndNamespace) throws Exception {
Preconditions.checkNotNull(serializedKeyAndNamespace, "Serialized key and namespace");
//TODO make KvStateRequestSerializer key-group aware to save this round trip and key-group computation
Tuple2<K, N> des = KvStateRequestSerializer.deserializeKeyAndNamespace(serializedKeyAndNamespace, backend.getKeySerializer(), namespaceSerializer);
int keyGroup = KeyGroupRangeAssignment.assignToKeyGroup(des.f0, backend.getNumberOfKeyGroups());
ByteArrayOutputStreamWithPos outputStream = new ByteArrayOutputStreamWithPos(128);
DataOutputViewStreamWrapper outputView = new DataOutputViewStreamWrapper(outputStream);
writeKeyWithGroupAndNamespace(keyGroup, des.f0, des.f1, outputStream, outputView);
final byte[] keyPrefixBytes = outputStream.toByteArray();
final Iterator<Map.Entry<UK, UV>> iterator = new RocksDBMapIterator<Map.Entry<UK, UV>>(backend.db, keyPrefixBytes) {
@Override
public Map.Entry<UK, UV> next() {
return nextEntry();
}
};
// Return null to make the behavior consistent with other backends
if (!iterator.hasNext()) {
return null;
}
return KvStateRequestSerializer.serializeMap(new Iterable<Map.Entry<UK, UV>>() {
@Override
public Iterator<Map.Entry<UK, UV>> iterator() {
return iterator;
}
}, userKeySerializer, userValueSerializer);
}
Aggregations