use of io.crate.execution.jobs.NodeLimits in project crate by crate.
the class ProjectionToProjectorVisitorTest method prepare.
@Before
public void prepare() {
nodeCtx = createNodeContext();
MockitoAnnotations.initMocks(this);
visitor = new ProjectionToProjectorVisitor(clusterService, new NodeLimits(new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)), new NoneCircuitBreakerService(), nodeCtx, THREAD_POOL, Settings.EMPTY, mock(TransportActionProvider.class, Answers.RETURNS_DEEP_STUBS), new InputFactory(nodeCtx), EvaluatingNormalizer.functionOnlyNormalizer(nodeCtx), t -> null, t -> null);
memoryManager = new OnHeapMemoryManager(usedBytes -> {
});
avgSignature = Signature.aggregate("avg", DataTypes.INTEGER.getTypeSignature(), DataTypes.DOUBLE.getTypeSignature());
}
use of io.crate.execution.jobs.NodeLimits in project crate by crate.
the class ProjectorsTest method prepare.
@Before
public void prepare() throws Exception {
nodeCtx = createNodeContext();
memoryManager = new OnHeapMemoryManager(bytes -> {
});
projectorFactory = new ProjectionToProjectorVisitor(clusterService, new NodeLimits(new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)), new NoneCircuitBreakerService(), nodeCtx, THREAD_POOL, Settings.EMPTY, mock(TransportActionProvider.class, Answers.RETURNS_DEEP_STUBS), new InputFactory(nodeCtx), new EvaluatingNormalizer(nodeCtx, RowGranularity.SHARD, r -> Literal.ofUnchecked(r.valueType(), r.valueType().sanitizeValue("1")), null), t -> null, t -> null, Version.CURRENT, new ShardId("dummy", UUID.randomUUID().toString(), 0), null);
}
use of io.crate.execution.jobs.NodeLimits in project crate by crate.
the class InsertFromValues method execute.
private CompletableFuture<ShardResponse.CompressedResult> execute(NodeLimits nodeLimits, ClusterState state, Collection<ShardUpsertRequest> shardUpsertRequests, TransportShardUpsertAction shardUpsertAction, ScheduledExecutorService scheduler) {
ShardResponse.CompressedResult compressedResult = new ShardResponse.CompressedResult();
if (shardUpsertRequests.isEmpty()) {
return CompletableFuture.completedFuture(compressedResult);
}
CompletableFuture<ShardResponse.CompressedResult> result = new CompletableFuture<>();
AtomicInteger numRequests = new AtomicInteger(shardUpsertRequests.size());
AtomicReference<Throwable> lastFailure = new AtomicReference<>(null);
Consumer<ShardUpsertRequest> countdown = request -> {
if (numRequests.decrementAndGet() == 0) {
Throwable throwable = lastFailure.get();
if (throwable == null) {
result.complete(compressedResult);
} else {
throwable = SQLExceptions.unwrap(throwable, t -> t instanceof RuntimeException);
// we want to report duplicate key exceptions
if (!SQLExceptions.isDocumentAlreadyExistsException(throwable) && (partitionWasDeleted(throwable, request.index()) || partitionClosed(throwable, request.index()) || mixedArgumentTypesFailure(throwable))) {
result.complete(compressedResult);
} else {
result.completeExceptionally(throwable);
}
}
}
};
for (ShardUpsertRequest request : shardUpsertRequests) {
String nodeId;
try {
nodeId = state.routingTable().shardRoutingTable(request.shardId()).primaryShard().currentNodeId();
} catch (IndexNotFoundException e) {
lastFailure.set(e);
if (!IndexParts.isPartitioned(request.index())) {
synchronized (compressedResult) {
compressedResult.markAsFailed(request.items());
}
}
countdown.accept(request);
continue;
}
final ConcurrencyLimit nodeLimit = nodeLimits.get(nodeId);
final long startTime = nodeLimit.startSample();
ActionListener<ShardResponse> listener = new ActionListener<>() {
@Override
public void onResponse(ShardResponse shardResponse) {
Throwable throwable = shardResponse.failure();
if (throwable == null) {
nodeLimit.onSample(startTime, false);
synchronized (compressedResult) {
compressedResult.update(shardResponse);
}
} else {
nodeLimit.onSample(startTime, true);
lastFailure.set(throwable);
}
countdown.accept(request);
}
@Override
public void onFailure(Exception e) {
nodeLimit.onSample(startTime, true);
Throwable t = SQLExceptions.unwrap(e);
if (!partitionWasDeleted(t, request.index())) {
synchronized (compressedResult) {
compressedResult.markAsFailed(request.items());
}
}
lastFailure.set(t);
countdown.accept(request);
}
};
shardUpsertAction.execute(request, new RetryListener<>(scheduler, l -> shardUpsertAction.execute(request, l), listener, BackoffPolicy.limitedDynamic(nodeLimit)));
}
return result;
}
use of io.crate.execution.jobs.NodeLimits in project crate by crate.
the class IndexWriterProjectorTest method testIndexWriter.
@Test
public void testIndexWriter() throws Throwable {
execute("create table bulk_import (id int primary key, name string) with (number_of_replicas=0)");
ensureGreen();
InputCollectExpression sourceInput = new InputCollectExpression(1);
List<CollectExpression<Row, ?>> collectExpressions = Collections.<CollectExpression<Row, ?>>singletonList(sourceInput);
RelationName bulkImportIdent = new RelationName(sqlExecutor.getCurrentSchema(), "bulk_import");
ClusterState state = clusterService().state();
Settings tableSettings = TableSettingsResolver.get(state.getMetadata(), bulkImportIdent, false);
ThreadPool threadPool = internalCluster().getInstance(ThreadPool.class);
IndexWriterProjector writerProjector = new IndexWriterProjector(clusterService(), new NodeLimits(new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)), new NoopCircuitBreaker("dummy"), RamAccounting.NO_ACCOUNTING, threadPool.scheduler(), threadPool.executor(ThreadPool.Names.SEARCH), CoordinatorTxnCtx.systemTransactionContext(), new NodeContext(internalCluster().getInstance(Functions.class)), Settings.EMPTY, IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.get(tableSettings), NumberOfReplicas.fromSettings(tableSettings, state.getNodes().getSize()), internalCluster().getInstance(TransportCreatePartitionsAction.class), internalCluster().getInstance(TransportShardUpsertAction.class)::execute, IndexNameResolver.forTable(bulkImportIdent), new Reference(new ReferenceIdent(bulkImportIdent, DocSysColumns.RAW), RowGranularity.DOC, DataTypes.STRING, 0, null), Collections.singletonList(ID_IDENT), Collections.<Symbol>singletonList(new InputColumn(0)), null, null, sourceInput, collectExpressions, 20, null, null, false, false, UUID.randomUUID(), UpsertResultContext.forRowCount(), false);
BatchIterator rowsIterator = InMemoryBatchIterator.of(IntStream.range(0, 100).mapToObj(i -> new RowN(new Object[] { i, "{\"id\": " + i + ", \"name\": \"Arthur\"}" })).collect(Collectors.toList()), SENTINEL, true);
TestingRowConsumer consumer = new TestingRowConsumer();
consumer.accept(writerProjector.apply(rowsIterator), null);
Bucket objects = consumer.getBucket();
assertThat(objects, contains(isRow(100L)));
execute("refresh table bulk_import");
execute("select count(*) from bulk_import");
assertThat(response.rowCount(), is(1L));
assertThat(response.rows()[0][0], is(100L));
}
use of io.crate.execution.jobs.NodeLimits in project crate by crate.
the class IndexWriterProjectorUnitTest method testNullPKValue.
@Test
public void testNullPKValue() throws Throwable {
InputCollectExpression sourceInput = new InputCollectExpression(0);
List<CollectExpression<Row, ?>> collectExpressions = Collections.<CollectExpression<Row, ?>>singletonList(sourceInput);
TransportCreatePartitionsAction transportCreatePartitionsAction = mock(TransportCreatePartitionsAction.class);
IndexWriterProjector indexWriter = new IndexWriterProjector(clusterService, new NodeLimits(new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)), new NoopCircuitBreaker("dummy"), RamAccounting.NO_ACCOUNTING, scheduler, executor, CoordinatorTxnCtx.systemTransactionContext(), createNodeContext(), Settings.EMPTY, 5, 1, transportCreatePartitionsAction, (request, listener) -> {
}, IndexNameResolver.forTable(BULK_IMPORT_IDENT), RAW_SOURCE_REFERENCE, Collections.singletonList(ID_IDENT), Collections.<Symbol>singletonList(new InputColumn(1)), null, null, sourceInput, collectExpressions, 20, null, null, false, false, UUID.randomUUID(), UpsertResultContext.forRowCount(), false);
RowN rowN = new RowN(new Object[] { new BytesRef("{\"y\": \"x\"}"), null });
BatchIterator<Row> batchIterator = InMemoryBatchIterator.of(Collections.singletonList(rowN), SENTINEL, true);
batchIterator = indexWriter.apply(batchIterator);
TestingRowConsumer testingBatchConsumer = new TestingRowConsumer();
testingBatchConsumer.accept(batchIterator, null);
List<Object[]> result = testingBatchConsumer.getResult();
// Zero affected rows as a NULL as a PK value will result in an exception.
// It must never bubble up as other rows might already have been written.
assertThat(result.get(0)[0], is(0L));
}
Aggregations