Search in sources :

Example 1 with CommandExp

use of com.aerospike.client.exp.CommandExp in project aerospike-client-java by aerospike.

the class Command method setRead.

private final void setRead(Policy policy, Key key) {
    begin();
    int fieldCount = estimateKeySize(policy, key);
    CommandExp exp = getCommandExp(policy);
    if (exp != null) {
        dataOffset += exp.size();
        fieldCount++;
    }
    sizeBuffer();
    writeHeaderRead(policy, serverTimeout, Command.INFO1_READ | Command.INFO1_GET_ALL, fieldCount, 0);
    writeKey(policy, key);
    if (exp != null) {
        dataOffset = exp.write(this);
    }
    end();
}
Also used : CommandExp(com.aerospike.client.exp.CommandExp)

Example 2 with CommandExp

use of com.aerospike.client.exp.CommandExp in project aerospike-client-java by aerospike.

the class Command method setExists.

public final void setExists(Policy policy, Key key) {
    begin();
    int fieldCount = estimateKeySize(policy, key);
    CommandExp exp = getCommandExp(policy);
    if (exp != null) {
        dataOffset += exp.size();
        fieldCount++;
    }
    sizeBuffer();
    writeHeaderReadHeader(policy, Command.INFO1_READ | Command.INFO1_NOBINDATA, fieldCount, 0);
    writeKey(policy, key);
    if (exp != null) {
        dataOffset = exp.write(this);
    }
    end();
}
Also used : CommandExp(com.aerospike.client.exp.CommandExp)

Example 3 with CommandExp

use of com.aerospike.client.exp.CommandExp in project aerospike-client-java by aerospike.

the class Command method setUdf.

public final void setUdf(WritePolicy policy, Key key, String packageName, String functionName, Value[] args) {
    begin();
    int fieldCount = estimateKeySize(policy, key);
    CommandExp exp = getCommandExp(policy);
    if (exp != null) {
        dataOffset += exp.size();
        fieldCount++;
    }
    byte[] argBytes = Packer.pack(args);
    fieldCount += estimateUdfSize(packageName, functionName, argBytes);
    sizeBuffer();
    writeHeaderWrite(policy, Command.INFO2_WRITE, fieldCount, 0);
    writeKey(policy, key);
    if (exp != null) {
        dataOffset = exp.write(this);
    }
    writeField(packageName, FieldType.UDF_PACKAGE_NAME);
    writeField(functionName, FieldType.UDF_FUNCTION);
    writeField(argBytes, FieldType.UDF_ARGLIST);
    end();
    compress(policy);
}
Also used : CommandExp(com.aerospike.client.exp.CommandExp)

Example 4 with CommandExp

use of com.aerospike.client.exp.CommandExp in project aerospike-client-java by aerospike.

the class Command method setScan.

public final void setScan(ScanPolicy policy, String namespace, String setName, String[] binNames, long taskId, NodePartitions nodePartitions) {
    begin();
    int fieldCount = 0;
    int partsFullSize = 0;
    int partsPartialSize = 0;
    long maxRecords = 0;
    if (nodePartitions != null) {
        partsFullSize = nodePartitions.partsFull.size() * 2;
        partsPartialSize = nodePartitions.partsPartial.size() * 20;
        maxRecords = nodePartitions.recordMax;
    }
    if (namespace != null) {
        dataOffset += Buffer.estimateSizeUtf8(namespace) + FIELD_HEADER_SIZE;
        fieldCount++;
    }
    if (setName != null) {
        dataOffset += Buffer.estimateSizeUtf8(setName) + FIELD_HEADER_SIZE;
        fieldCount++;
    }
    if (partsFullSize > 0) {
        dataOffset += partsFullSize + FIELD_HEADER_SIZE;
        fieldCount++;
    }
    if (partsPartialSize > 0) {
        dataOffset += partsPartialSize + FIELD_HEADER_SIZE;
        fieldCount++;
    }
    if (maxRecords > 0) {
        dataOffset += 8 + FIELD_HEADER_SIZE;
        fieldCount++;
    }
    if (policy.recordsPerSecond > 0) {
        dataOffset += 4 + FIELD_HEADER_SIZE;
        fieldCount++;
    }
    CommandExp exp = getCommandExp(policy);
    if (exp != null) {
        dataOffset += exp.size();
        fieldCount++;
    }
    // Estimate scan timeout size.
    dataOffset += 4 + FIELD_HEADER_SIZE;
    fieldCount++;
    // Estimate taskId size.
    dataOffset += 8 + FIELD_HEADER_SIZE;
    fieldCount++;
    if (binNames != null) {
        for (String binName : binNames) {
            estimateOperationSize(binName);
        }
    }
    sizeBuffer();
    byte readAttr = Command.INFO1_READ;
    if (!policy.includeBinData) {
        readAttr |= Command.INFO1_NOBINDATA;
    }
    int operationCount = (binNames == null) ? 0 : binNames.length;
    writeHeaderRead(policy, totalTimeout, readAttr, fieldCount, operationCount);
    if (namespace != null) {
        writeField(namespace, FieldType.NAMESPACE);
    }
    if (setName != null) {
        writeField(setName, FieldType.TABLE);
    }
    if (partsFullSize > 0) {
        writeFieldHeader(partsFullSize, FieldType.PID_ARRAY);
        for (PartitionStatus part : nodePartitions.partsFull) {
            Buffer.shortToLittleBytes(part.id, dataBuffer, dataOffset);
            dataOffset += 2;
        }
    }
    if (partsPartialSize > 0) {
        writeFieldHeader(partsPartialSize, FieldType.DIGEST_ARRAY);
        for (PartitionStatus part : nodePartitions.partsPartial) {
            System.arraycopy(part.digest, 0, dataBuffer, dataOffset, 20);
            dataOffset += 20;
        }
    }
    if (maxRecords > 0) {
        writeField(maxRecords, FieldType.SCAN_MAX_RECORDS);
    }
    if (policy.recordsPerSecond > 0) {
        writeField(policy.recordsPerSecond, FieldType.RECORDS_PER_SECOND);
    }
    if (exp != null) {
        dataOffset = exp.write(this);
    }
    // Write scan socket idle timeout.
    writeField(policy.socketTimeout, FieldType.SCAN_TIMEOUT);
    // Write taskId field
    writeField(taskId, FieldType.TRAN_ID);
    if (binNames != null) {
        for (String binName : binNames) {
            writeOperation(binName, Operation.Type.READ);
        }
    }
    end();
}
Also used : PartitionStatus(com.aerospike.client.query.PartitionStatus) CommandExp(com.aerospike.client.exp.CommandExp)

Example 5 with CommandExp

use of com.aerospike.client.exp.CommandExp in project aerospike-client-java by aerospike.

the class Command method setBatchRead.

public final void setBatchRead(BatchPolicy policy, List<BatchRead> records, BatchNode batch) {
    // Estimate full row size
    final int[] offsets = batch.offsets;
    final int max = batch.offsetsSize;
    final int fieldCountRow = policy.sendSetName ? 2 : 1;
    BatchRead prev = null;
    begin();
    int fieldCount = 1;
    CommandExp exp = getCommandExp(policy);
    if (exp != null) {
        dataOffset += exp.size();
        fieldCount++;
    }
    dataOffset += FIELD_HEADER_SIZE + 5;
    for (int i = 0; i < max; i++) {
        final BatchRead record = records.get(offsets[i]);
        final Key key = record.key;
        final String[] binNames = record.binNames;
        final Operation[] ops = record.ops;
        dataOffset += key.digest.length + 4;
        // results in more space used. The batch will still be correct.
        if (prev != null && prev.key.namespace == key.namespace && (!policy.sendSetName || prev.key.setName == key.setName) && prev.binNames == binNames && prev.readAllBins == record.readAllBins && prev.ops == ops) {
            // Can set repeat previous namespace/bin names to save space.
            dataOffset++;
        } else {
            // Estimate full header, namespace and bin names.
            dataOffset += Buffer.estimateSizeUtf8(key.namespace) + FIELD_HEADER_SIZE + 6;
            if (policy.sendSetName) {
                dataOffset += Buffer.estimateSizeUtf8(key.setName) + FIELD_HEADER_SIZE;
            }
            if (binNames != null) {
                for (String binName : binNames) {
                    estimateOperationSize(binName);
                }
            } else if (ops != null) {
                for (Operation op : ops) {
                    estimateReadOperationSize(op);
                }
            }
            prev = record;
        }
    }
    sizeBuffer();
    int readAttr = Command.INFO1_READ;
    if (policy.readModeAP == ReadModeAP.ALL) {
        readAttr |= Command.INFO1_READ_MODE_AP_ALL;
    }
    writeHeaderRead(policy, totalTimeout, readAttr | Command.INFO1_BATCH, fieldCount, 0);
    if (exp != null) {
        dataOffset = exp.write(this);
    }
    final int fieldSizeOffset = dataOffset;
    // Need to update size at end
    writeFieldHeader(0, policy.sendSetName ? FieldType.BATCH_INDEX_WITH_SET : FieldType.BATCH_INDEX);
    Buffer.intToBytes(max, dataBuffer, dataOffset);
    dataOffset += 4;
    dataBuffer[dataOffset++] = (policy.allowInline) ? (byte) 1 : (byte) 0;
    prev = null;
    for (int i = 0; i < max; i++) {
        final int index = offsets[i];
        Buffer.intToBytes(index, dataBuffer, dataOffset);
        dataOffset += 4;
        final BatchRead record = records.get(index);
        final Key key = record.key;
        final String[] binNames = record.binNames;
        final Operation[] ops = record.ops;
        final byte[] digest = key.digest;
        System.arraycopy(digest, 0, dataBuffer, dataOffset, digest.length);
        dataOffset += digest.length;
        // results in more space used. The batch will still be correct.
        if (prev != null && prev.key.namespace == key.namespace && (!policy.sendSetName || prev.key.setName == key.setName) && prev.binNames == binNames && prev.readAllBins == record.readAllBins && prev.ops == ops) {
            // Can set repeat previous namespace/bin names to save space.
            // repeat
            dataBuffer[dataOffset++] = 1;
        } else {
            // Write full header, namespace and bin names.
            // do not repeat
            dataBuffer[dataOffset++] = 0;
            if (binNames != null && binNames.length != 0) {
                dataBuffer[dataOffset++] = (byte) readAttr;
                writeBatchFields(policy, key, fieldCountRow, binNames.length);
                for (String binName : binNames) {
                    writeOperation(binName, Operation.Type.READ);
                }
            } else if (ops != null) {
                int offset = dataOffset++;
                writeBatchFields(policy, key, fieldCountRow, ops.length);
                dataBuffer[offset] = (byte) writeOperations(ops, readAttr);
            } else {
                dataBuffer[dataOffset++] = (byte) (readAttr | (record.readAllBins ? Command.INFO1_GET_ALL : Command.INFO1_NOBINDATA));
                writeBatchFields(policy, key, fieldCountRow, 0);
            }
            prev = record;
        }
    }
    // Write real field size.
    Buffer.intToBytes(dataOffset - MSG_TOTAL_HEADER_SIZE - 4, dataBuffer, fieldSizeOffset);
    end();
    compress(policy);
}
Also used : BatchRead(com.aerospike.client.BatchRead) Operation(com.aerospike.client.Operation) CommandExp(com.aerospike.client.exp.CommandExp) Key(com.aerospike.client.Key)

Aggregations

CommandExp (com.aerospike.client.exp.CommandExp)13 Operation (com.aerospike.client.Operation)4 Key (com.aerospike.client.Key)2 PartitionStatus (com.aerospike.client.query.PartitionStatus)2 BatchRead (com.aerospike.client.BatchRead)1 Bin (com.aerospike.client.Bin)1 QueryPolicy (com.aerospike.client.policy.QueryPolicy)1 Filter (com.aerospike.client.query.Filter)1 IndexCollectionType (com.aerospike.client.query.IndexCollectionType)1 PredExp (com.aerospike.client.query.PredExp)1