use of io.crate.execution.dml.upsert.ShardUpsertRequest in project crate by crate.
the class InsertFromValues method execute.
private CompletableFuture<ShardResponse.CompressedResult> execute(NodeLimits nodeLimits, ClusterState state, Collection<ShardUpsertRequest> shardUpsertRequests, TransportShardUpsertAction shardUpsertAction, ScheduledExecutorService scheduler) {
ShardResponse.CompressedResult compressedResult = new ShardResponse.CompressedResult();
if (shardUpsertRequests.isEmpty()) {
return CompletableFuture.completedFuture(compressedResult);
}
CompletableFuture<ShardResponse.CompressedResult> result = new CompletableFuture<>();
AtomicInteger numRequests = new AtomicInteger(shardUpsertRequests.size());
AtomicReference<Throwable> lastFailure = new AtomicReference<>(null);
Consumer<ShardUpsertRequest> countdown = request -> {
if (numRequests.decrementAndGet() == 0) {
Throwable throwable = lastFailure.get();
if (throwable == null) {
result.complete(compressedResult);
} else {
throwable = SQLExceptions.unwrap(throwable, t -> t instanceof RuntimeException);
// we want to report duplicate key exceptions
if (!SQLExceptions.isDocumentAlreadyExistsException(throwable) && (partitionWasDeleted(throwable, request.index()) || partitionClosed(throwable, request.index()) || mixedArgumentTypesFailure(throwable))) {
result.complete(compressedResult);
} else {
result.completeExceptionally(throwable);
}
}
}
};
for (ShardUpsertRequest request : shardUpsertRequests) {
String nodeId;
try {
nodeId = state.routingTable().shardRoutingTable(request.shardId()).primaryShard().currentNodeId();
} catch (IndexNotFoundException e) {
lastFailure.set(e);
if (!IndexParts.isPartitioned(request.index())) {
synchronized (compressedResult) {
compressedResult.markAsFailed(request.items());
}
}
countdown.accept(request);
continue;
}
final ConcurrencyLimit nodeLimit = nodeLimits.get(nodeId);
final long startTime = nodeLimit.startSample();
ActionListener<ShardResponse> listener = new ActionListener<>() {
@Override
public void onResponse(ShardResponse shardResponse) {
Throwable throwable = shardResponse.failure();
if (throwable == null) {
nodeLimit.onSample(startTime, false);
synchronized (compressedResult) {
compressedResult.update(shardResponse);
}
} else {
nodeLimit.onSample(startTime, true);
lastFailure.set(throwable);
}
countdown.accept(request);
}
@Override
public void onFailure(Exception e) {
nodeLimit.onSample(startTime, true);
Throwable t = SQLExceptions.unwrap(e);
if (!partitionWasDeleted(t, request.index())) {
synchronized (compressedResult) {
compressedResult.markAsFailed(request.items());
}
}
lastFailure.set(t);
countdown.accept(request);
}
};
shardUpsertAction.execute(request, new RetryListener<>(scheduler, l -> shardUpsertAction.execute(request, l), listener, BackoffPolicy.limitedDynamic(nodeLimit)));
}
return result;
}
use of io.crate.execution.dml.upsert.ShardUpsertRequest in project crate by crate.
the class InsertFromValues method createRowsByShardGrouper.
private GroupRowsByShard<ShardUpsertRequest, ShardUpsertRequest.Item> createRowsByShardGrouper(Symbol[] assignmentSources, ArrayList<Input<?>> insertInputs, Supplier<String> indexNameResolver, InputFactory.Context<CollectExpression<Row, ?>> collectContext, PlannerContext plannerContext, ClusterService clusterService) {
InputRow insertValues = new InputRow(insertInputs);
Function<String, ShardUpsertRequest.Item> itemFactory = id -> new ShardUpsertRequest.Item(id, assignmentSources, insertValues.materialize(), null, null, null);
var rowShardResolver = new RowShardResolver(plannerContext.transactionContext(), plannerContext.nodeContext(), writerProjection.primaryKeys(), writerProjection.ids(), writerProjection.clusteredByIdent(), writerProjection.clusteredBy());
return new GroupRowsByShard<>(clusterService, rowShardResolver, new TypeGuessEstimateRowSize(), indexNameResolver, collectContext.expressions(), itemFactory, true);
}
Aggregations