use of com.aerospike.client.command.MultiCommand in project aerospike-client-java by aerospike.
the class AerospikeClient method get.
// -------------------------------------------------------
// Batch Read Operations
// -------------------------------------------------------
/**
* Read multiple records for specified batch keys in one batch call.
* This method allows different namespaces/bins to be requested for each key in the batch.
* The returned records are located in the same list.
* If the BatchRead key field is not found, the corresponding record field will be null.
* <p>
* If a batch request to a node fails, the entire batch is cancelled.
*
* @param policy batch configuration parameters, pass in null for defaults
* @param records list of unique record identifiers and the bins to retrieve.
* The returned records are located in the same list.
* @throws AerospikeException if read fails
*/
public final void get(BatchPolicy policy, List<BatchRead> records) throws AerospikeException {
if (records.size() == 0) {
return;
}
if (policy == null) {
policy = batchPolicyDefault;
}
List<BatchNode> batchNodes = BatchNodeList.generate(cluster, policy, records);
if (policy.maxConcurrentThreads == 1 || batchNodes.size() <= 1) {
// Run batch requests sequentially in same thread.
for (BatchNode batchNode : batchNodes) {
MultiCommand command = new Batch.ReadListCommand(cluster, null, batchNode, policy, records);
command.execute();
}
} else {
// Run batch requests in parallel in separate threads.
//
// Multiple threads write to the record list, so one might think that
// volatile or memory barriers are needed on the write threads and this read thread.
// This should not be necessary here because it happens in Executor which does a
// volatile write (completedCount.incrementAndGet()) at the end of write threads
// and a synchronized waitTillComplete() in this thread.
Executor executor = new Executor(cluster, batchNodes.size());
for (BatchNode batchNode : batchNodes) {
MultiCommand command = new Batch.ReadListCommand(cluster, executor, batchNode, policy, records);
executor.addCommand(command);
}
executor.execute(policy.maxConcurrentThreads);
}
}
use of com.aerospike.client.command.MultiCommand in project aerospike-client-java by aerospike.
the class QueryExecutor method initializeThreads.
protected final void initializeThreads() {
// Detect cluster migrations when performing scan.
long clusterKey = policy.failOnClusterChange ? QueryValidate.validateBegin(nodes[0], statement.namespace) : 0;
boolean first = true;
// Initialize threads.
for (int i = 0; i < nodes.length; i++) {
MultiCommand command = createCommand(nodes[i], clusterKey, first);
threads[i] = new QueryThread(command);
first = false;
}
}
use of com.aerospike.client.command.MultiCommand in project aerospike-client-java by aerospike.
the class QueryPartitionExecutor method execute.
private void execute() {
while (true) {
statement.taskId = RandomShift.instance().nextLong();
List<NodePartitions> list = tracker.assignPartitionsToNodes(cluster, statement.namespace);
// Initialize maximum number of nodes to query in parallel.
maxConcurrentThreads = (policy.maxConcurrentNodes == 0 || policy.maxConcurrentNodes >= list.size()) ? list.size() : policy.maxConcurrentNodes;
boolean parallel = maxConcurrentThreads > 1 && list.size() > 1;
synchronized (threads) {
// RecordSet thread may have aborted query, so check done under lock.
if (done.get()) {
break;
}
threads.clear();
if (parallel) {
for (NodePartitions nodePartitions : list) {
MultiCommand command = new QueryPartitionCommand(cluster, nodePartitions.node, policy, statement, recordSet, tracker, nodePartitions);
threads.add(new QueryThread(command));
}
for (int i = 0; i < maxConcurrentThreads; i++) {
threadPool.execute(threads.get(i));
}
}
}
if (parallel) {
waitTillComplete();
} else {
for (NodePartitions nodePartitions : list) {
MultiCommand command = new QueryPartitionCommand(cluster, nodePartitions.node, policy, statement, recordSet, tracker, nodePartitions);
command.execute();
}
}
if (exception != null) {
break;
}
if (tracker.isComplete(policy)) {
// All partitions received.
recordSet.put(RecordSet.END);
break;
}
// Set done to false so RecordSet thread has chance to close early again.
done.set(false);
if (policy.sleepBetweenRetries > 0) {
// Sleep before trying again.
Util.sleep(policy.sleepBetweenRetries);
}
completedCount.set(0);
threadsComplete = false;
exception = null;
}
}
Aggregations