use of org.apache.ignite.internal.storage.basic.DelegatingDataRow in project ignite-3 by apache.
the class RocksDbPartitionStorage method invoke.
/**
* {@inheritDoc}
*/
@Nullable
@Override
public <T> T invoke(SearchRow key, InvokeClosure<T> clo) throws StorageException {
try {
byte[] partitionKey = partitionKey(key);
byte[] existingDataBytes = data.get(partitionKey);
clo.call(existingDataBytes == null ? null : new DelegatingDataRow(key, existingDataBytes));
switch(clo.operationType()) {
case WRITE:
DataRow newRow = clo.newRow();
assert newRow != null;
byte[] value = newRow.valueBytes();
assert value != null;
data.put(partitionKey, value);
break;
case REMOVE:
data.delete(partitionKey);
break;
case NOOP:
break;
default:
throw new UnsupportedOperationException(String.valueOf(clo.operationType()));
}
return clo.result();
} catch (RocksDBException e) {
throw new StorageException("Failed to access data in the storage", e);
}
}
use of org.apache.ignite.internal.storage.basic.DelegatingDataRow in project ignite-3 by apache.
the class VersionedRowStore method pack.
/**
* Packs a multi-versioned value.
*
* @param key The key.
* @param value The value.
* @return Data row.
*/
private static DataRow pack(SearchRow key, Value value) {
byte[] b1 = null;
byte[] b2 = null;
int l1 = value.newRow == null ? 0 : (b1 = value.newRow.bytes()).length;
int l2 = value.oldRow == null ? 0 : (b2 = value.oldRow.bytes()).length;
// TODO asch write only values.
ByteBuffer buf = ByteBuffer.allocate(4 + l1 + 4 + l2 + 16);
buf.asIntBuffer().put(l1);
buf.position(4);
if (l1 > 0) {
buf.put(b1);
}
buf.asIntBuffer().put(l2);
buf.position(buf.position() + 4);
if (l2 > 0) {
buf.put(b2);
}
buf.putLong(value.timestamp.getTimestamp());
buf.putLong(value.timestamp.getNodeId());
return new DelegatingDataRow(key, buf.array());
}
use of org.apache.ignite.internal.storage.basic.DelegatingDataRow in project ignite-3 by apache.
the class RocksDbPartitionStorage method readAll.
/**
* {@inheritDoc}
*/
@Override
public Collection<DataRow> readAll(List<? extends SearchRow> keys) throws StorageException {
int resultSize = keys.size();
List<byte[]> values;
try {
values = db.multiGetAsList(nCopies(resultSize, data.handle()), getKeys(keys));
} catch (RocksDBException e) {
throw new StorageException("Failed to read data from the storage", e);
}
assert resultSize == values.size();
List<DataRow> res = new ArrayList<>(resultSize);
for (int i = 0; i < resultSize; i++) {
byte[] value = values.get(i);
if (value != null) {
res.add(new DelegatingDataRow(keys.get(i), value));
}
}
return res;
}
Aggregations