use of org.elasticsearch.common.collect.Tuple in project elasticsearch by elastic.
the class RestSimulatePipelineAction method prepareRequest.
@Override
public RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) throws IOException {
Tuple<XContentType, BytesReference> sourceTuple = restRequest.contentOrSourceParam();
SimulatePipelineRequest request = new SimulatePipelineRequest(sourceTuple.v2(), sourceTuple.v1());
request.setId(restRequest.param("id"));
request.setVerbose(restRequest.paramAsBoolean("verbose", false));
return channel -> client.admin().cluster().simulatePipeline(request, new RestToXContentListener<>(channel));
}
use of org.elasticsearch.common.collect.Tuple in project crate by crate.
the class FileReadingIterator method initCollectorState.
private void initCollectorState() {
lineContext = new LineContext();
for (LineCollectorExpression<?> collectorExpression : collectorExpressions) {
collectorExpression.startCollect(lineContext);
}
List<Tuple<FileInput, UriWithGlob>> fileInputs = new ArrayList<>(urisWithGlob.size());
for (UriWithGlob fileUri : urisWithGlob) {
try {
FileInput fileInput = getFileInput(fileUri.uri);
fileInputs.add(new Tuple<>(fileInput, fileUri));
} catch (IOException e) {
rethrowUnchecked(e);
}
}
fileInputsIterator = fileInputs.iterator();
}
use of org.elasticsearch.common.collect.Tuple in project crate by crate.
the class TransportDeleteBlobAction method shardOperationOnPrimary.
@Override
protected Tuple<DeleteBlobResponse, DeleteBlobRequest> shardOperationOnPrimary(MetaData metaData, DeleteBlobRequest request) throws Throwable {
logger.trace("shardOperationOnPrimary {}", request);
BlobShard blobShard = blobIndicesService.blobShardSafe(request.shardId());
boolean deleted = blobShard.delete(request.id());
final DeleteBlobResponse response = new DeleteBlobResponse(deleted);
return new Tuple<>(response, request);
}
use of org.elasticsearch.common.collect.Tuple in project crate by crate.
the class ExecutionPhasesTask method setupContext.
private void setupContext(Map<String, Collection<NodeOperation>> operationByServer, List<ExecutionPhase> handlerPhases, List<BatchConsumer> handlerConsumers) throws Throwable {
assert handlerPhases.size() == handlerConsumers.size() : "handlerPhases size must match handlerConsumers size";
String localNodeId = clusterService.localNode().getId();
Collection<NodeOperation> localNodeOperations = operationByServer.remove(localNodeId);
if (localNodeOperations == null) {
localNodeOperations = Collections.emptyList();
}
// + 1 for localJobContext which is always created
InitializationTracker initializationTracker = new InitializationTracker(operationByServer.size() + 1);
List<Tuple<ExecutionPhase, BatchConsumer>> handlerPhaseAndReceiver = createHandlerPhaseAndReceivers(handlerPhases, handlerConsumers, initializationTracker);
JobExecutionContext.Builder builder = jobContextService.newBuilder(jobId(), localNodeId, operationByServer.keySet());
List<CompletableFuture<Bucket>> directResponseFutures = contextPreparer.prepareOnHandler(localNodeOperations, builder, handlerPhaseAndReceiver, new SharedShardContexts(indicesService));
JobExecutionContext localJobContext = jobContextService.createContext(builder);
List<PageBucketReceiver> pageBucketReceivers = getHandlerBucketReceivers(localJobContext, handlerPhaseAndReceiver);
int bucketIdx = 0;
/*
* If you touch anything here make sure the following tests pass with > 1k iterations:
*
* Seed: 112E1807417E925A - testInvalidPatternSyntax
* Seed: Any - testRegularSelectWithFewAvailableThreadsShouldNeverGetStuck
* Seed: CC456FF5004F35D3 - testFailureOfJoinDownstream
*/
if (!localNodeOperations.isEmpty() && !directResponseFutures.isEmpty()) {
CompletableFutures.allAsList(directResponseFutures).whenComplete(new SetBucketCallback(pageBucketReceivers, bucketIdx, initializationTracker));
bucketIdx++;
try {
// initializationTracker for localNodeOperations is triggered via SetBucketCallback
localJobContext.start();
} catch (Throwable t) {
accountFailureForRemoteOperations(operationByServer, initializationTracker, handlerPhaseAndReceiver, t);
return;
}
} else {
try {
localJobContext.start();
initializationTracker.jobInitialized();
} catch (Throwable t) {
initializationTracker.jobInitializationFailed(t);
accountFailureForRemoteOperations(operationByServer, initializationTracker, handlerPhaseAndReceiver, t);
return;
}
}
sendJobRequests(localNodeId, operationByServer, pageBucketReceivers, handlerPhaseAndReceiver, bucketIdx, initializationTracker);
}
use of org.elasticsearch.common.collect.Tuple in project crate by crate.
the class Assignments method convert.
/**
* convert assignments into a tuple of fqn column names and the symbols.
* <p>
* <pre>
* {
* users.age: users.age + 1,
* users.name: users.name || 'foo'
*
* }
* </pre>
* becomes
* <pre>
* ( [users.age, users.name], [users.age + 1, users.name || 'foo'] )
* </pre>
*
* @return a tuple or null if the input is null.
*/
public static Tuple<String[], Symbol[]> convert(@Nonnull Map<Reference, ? extends Symbol> assignments) {
String[] assignmentColumns = new String[assignments.size()];
Symbol[] assignmentSymbols = new Symbol[assignments.size()];
int i = 0;
for (Map.Entry<Reference, ? extends Symbol> entry : assignments.entrySet()) {
Reference key = entry.getKey();
assignmentColumns[i] = key.ident().columnIdent().fqn();
assignmentSymbols[i] = entry.getValue();
i++;
}
return new Tuple<>(assignmentColumns, assignmentSymbols);
}
Aggregations