use of com.aerospike.client.Operation in project aerospike-client-java by aerospike.
the class ExpOperation method createOperation.
private static Operation createOperation(Operation.Type type, String name, Expression exp, int flags) {
Packer packer = new Packer();
packer.packArrayBegin(2);
byte[] b = exp.getBytes();
packer.packByteArray(b, 0, b.length);
packer.packInt(flags);
return new Operation(type, name, Value.get(packer.toByteArray()));
}
use of com.aerospike.client.Operation in project aerospike-client-java by aerospike.
the class Command method writeOperations.
private final int writeOperations(Operation[] ops, int readAttr) {
boolean readBin = false;
boolean readHeader = false;
for (Operation op : ops) {
switch(op.type) {
case READ:
// Read all bins if no bin is specified.
if (op.binName == null) {
readAttr |= Command.INFO1_GET_ALL;
}
readBin = true;
break;
case READ_HEADER:
readHeader = true;
break;
default:
break;
}
writeOperation(op);
}
if (readHeader && !readBin) {
readAttr |= Command.INFO1_NOBINDATA;
}
return readAttr;
}
use of com.aerospike.client.Operation in project aerospike-client-java by aerospike.
the class Command method setBatchRead.
public final void setBatchRead(BatchPolicy policy, List<BatchRead> records, BatchNode batch) {
// Estimate full row size
final int[] offsets = batch.offsets;
final int max = batch.offsetsSize;
final int fieldCountRow = policy.sendSetName ? 2 : 1;
BatchRead prev = null;
begin();
int fieldCount = 1;
CommandExp exp = getCommandExp(policy);
if (exp != null) {
dataOffset += exp.size();
fieldCount++;
}
dataOffset += FIELD_HEADER_SIZE + 5;
for (int i = 0; i < max; i++) {
final BatchRead record = records.get(offsets[i]);
final Key key = record.key;
final String[] binNames = record.binNames;
final Operation[] ops = record.ops;
dataOffset += key.digest.length + 4;
// results in more space used. The batch will still be correct.
if (prev != null && prev.key.namespace == key.namespace && (!policy.sendSetName || prev.key.setName == key.setName) && prev.binNames == binNames && prev.readAllBins == record.readAllBins && prev.ops == ops) {
// Can set repeat previous namespace/bin names to save space.
dataOffset++;
} else {
// Estimate full header, namespace and bin names.
dataOffset += Buffer.estimateSizeUtf8(key.namespace) + FIELD_HEADER_SIZE + 6;
if (policy.sendSetName) {
dataOffset += Buffer.estimateSizeUtf8(key.setName) + FIELD_HEADER_SIZE;
}
if (binNames != null) {
for (String binName : binNames) {
estimateOperationSize(binName);
}
} else if (ops != null) {
for (Operation op : ops) {
estimateReadOperationSize(op);
}
}
prev = record;
}
}
sizeBuffer();
int readAttr = Command.INFO1_READ;
if (policy.readModeAP == ReadModeAP.ALL) {
readAttr |= Command.INFO1_READ_MODE_AP_ALL;
}
writeHeaderRead(policy, totalTimeout, readAttr | Command.INFO1_BATCH, fieldCount, 0);
if (exp != null) {
dataOffset = exp.write(this);
}
final int fieldSizeOffset = dataOffset;
// Need to update size at end
writeFieldHeader(0, policy.sendSetName ? FieldType.BATCH_INDEX_WITH_SET : FieldType.BATCH_INDEX);
Buffer.intToBytes(max, dataBuffer, dataOffset);
dataOffset += 4;
dataBuffer[dataOffset++] = (policy.allowInline) ? (byte) 1 : (byte) 0;
prev = null;
for (int i = 0; i < max; i++) {
final int index = offsets[i];
Buffer.intToBytes(index, dataBuffer, dataOffset);
dataOffset += 4;
final BatchRead record = records.get(index);
final Key key = record.key;
final String[] binNames = record.binNames;
final Operation[] ops = record.ops;
final byte[] digest = key.digest;
System.arraycopy(digest, 0, dataBuffer, dataOffset, digest.length);
dataOffset += digest.length;
// results in more space used. The batch will still be correct.
if (prev != null && prev.key.namespace == key.namespace && (!policy.sendSetName || prev.key.setName == key.setName) && prev.binNames == binNames && prev.readAllBins == record.readAllBins && prev.ops == ops) {
// Can set repeat previous namespace/bin names to save space.
// repeat
dataBuffer[dataOffset++] = 1;
} else {
// Write full header, namespace and bin names.
// do not repeat
dataBuffer[dataOffset++] = 0;
if (binNames != null && binNames.length != 0) {
dataBuffer[dataOffset++] = (byte) readAttr;
writeBatchFields(policy, key, fieldCountRow, binNames.length);
for (String binName : binNames) {
writeOperation(binName, Operation.Type.READ);
}
} else if (ops != null) {
int offset = dataOffset++;
writeBatchFields(policy, key, fieldCountRow, ops.length);
dataBuffer[offset] = (byte) writeOperations(ops, readAttr);
} else {
dataBuffer[dataOffset++] = (byte) (readAttr | (record.readAllBins ? Command.INFO1_GET_ALL : Command.INFO1_NOBINDATA));
writeBatchFields(policy, key, fieldCountRow, 0);
}
prev = record;
}
}
// Write real field size.
Buffer.intToBytes(dataOffset - MSG_TOTAL_HEADER_SIZE - 4, dataBuffer, fieldSizeOffset);
end();
compress(policy);
}
use of com.aerospike.client.Operation in project aerospike-client-java by aerospike.
the class Command method setBatchRead.
public final void setBatchRead(BatchPolicy policy, Key[] keys, BatchNode batch, String[] binNames, Operation[] ops, int readAttr) {
// Estimate full row size
final int[] offsets = batch.offsets;
final int max = batch.offsetsSize;
final int fieldCountRow = policy.sendSetName ? 2 : 1;
// Estimate buffer size.
begin();
int fieldCount = 1;
CommandExp exp = getCommandExp(policy);
if (exp != null) {
dataOffset += exp.size();
fieldCount++;
}
dataOffset += FIELD_HEADER_SIZE + 5;
Key prev = null;
for (int i = 0; i < max; i++) {
Key key = keys[offsets[i]];
dataOffset += key.digest.length + 4;
// Try reference equality in hope that namespace/set for all keys is set from fixed variables.
if (prev != null && prev.namespace == key.namespace && (!policy.sendSetName || prev.setName == key.setName)) {
// Can set repeat previous namespace/bin names to save space.
dataOffset++;
} else {
// Must write full header and namespace/set/bin names.
dataOffset += Buffer.estimateSizeUtf8(key.namespace) + FIELD_HEADER_SIZE + 6;
if (policy.sendSetName) {
dataOffset += Buffer.estimateSizeUtf8(key.setName) + FIELD_HEADER_SIZE;
}
if (binNames != null) {
for (String binName : binNames) {
estimateOperationSize(binName);
}
} else if (ops != null) {
for (Operation op : ops) {
estimateReadOperationSize(op);
}
}
prev = key;
}
}
sizeBuffer();
if (policy.readModeAP == ReadModeAP.ALL) {
readAttr |= Command.INFO1_READ_MODE_AP_ALL;
}
writeHeaderRead(policy, totalTimeout, readAttr | Command.INFO1_BATCH, fieldCount, 0);
if (exp != null) {
dataOffset = exp.write(this);
}
int fieldSizeOffset = dataOffset;
// Need to update size at end
writeFieldHeader(0, policy.sendSetName ? FieldType.BATCH_INDEX_WITH_SET : FieldType.BATCH_INDEX);
Buffer.intToBytes(max, dataBuffer, dataOffset);
dataOffset += 4;
dataBuffer[dataOffset++] = (policy.allowInline) ? (byte) 1 : (byte) 0;
prev = null;
for (int i = 0; i < max; i++) {
int index = offsets[i];
Buffer.intToBytes(index, dataBuffer, dataOffset);
dataOffset += 4;
Key key = keys[index];
byte[] digest = key.digest;
System.arraycopy(digest, 0, dataBuffer, dataOffset, digest.length);
dataOffset += digest.length;
// Try reference equality in hope that namespace/set for all keys is set from fixed variables.
if (prev != null && prev.namespace == key.namespace && (!policy.sendSetName || prev.setName == key.setName)) {
// Can set repeat previous namespace/bin names to save space.
// repeat
dataBuffer[dataOffset++] = 1;
} else {
// Write full header, namespace and bin names.
// do not repeat
dataBuffer[dataOffset++] = 0;
if (binNames != null && binNames.length != 0) {
dataBuffer[dataOffset++] = (byte) readAttr;
writeBatchFields(policy, key, fieldCountRow, binNames.length);
for (String binName : binNames) {
writeOperation(binName, Operation.Type.READ);
}
} else if (ops != null) {
int offset = dataOffset++;
writeBatchFields(policy, key, fieldCountRow, ops.length);
dataBuffer[offset] = (byte) writeOperations(ops, readAttr);
} else {
dataBuffer[dataOffset++] = (byte) readAttr;
writeBatchFields(policy, key, fieldCountRow, 0);
}
prev = key;
}
}
// Write real field size.
Buffer.intToBytes(dataOffset - MSG_TOTAL_HEADER_SIZE - 4, dataBuffer, fieldSizeOffset);
end();
compress(policy);
}
use of com.aerospike.client.Operation in project aerospike-client-java by aerospike.
the class MapOperation method put.
/**
* Create map put operation.
* Server writes key/value item to map bin and returns map size.
* <p>
* The required map policy dictates the type of map to create when it does not exist.
* The map policy also specifies the flags used when writing items to the map.
* See policy {@link com.aerospike.client.cdt.MapPolicy}.
*/
public static Operation put(MapPolicy policy, String binName, Value key, Value value, CTX... ctx) {
Packer packer = new Packer();
if (policy.flags != 0) {
Pack.init(packer, ctx);
packer.packArrayBegin(5);
packer.packInt(MapOperation.PUT);
key.pack(packer);
value.pack(packer);
packer.packInt(policy.attributes);
packer.packInt(policy.flags);
} else {
if (policy.itemCommand == REPLACE) {
// Replace doesn't allow map attributes because it does not create on non-existing key.
Pack.init(packer, ctx);
packer.packArrayBegin(3);
packer.packInt(policy.itemCommand);
key.pack(packer);
value.pack(packer);
} else {
Pack.init(packer, ctx);
packer.packArrayBegin(4);
packer.packInt(policy.itemCommand);
key.pack(packer);
value.pack(packer);
packer.packInt(policy.attributes);
}
}
byte[] bytes = packer.toByteArray();
return new Operation(Operation.Type.MAP_MODIFY, binName, Value.get(bytes));
}
Aggregations