use of org.elasticsearch.cluster.metadata.IndexNameExpressionResolver in project crate by crate.
the class BulkShardProcessorTest method testNonEsRejectedExceptionDoesNotResultInRetryButAborts.
@Test
public void testNonEsRejectedExceptionDoesNotResultInRetryButAborts() throws Throwable {
expectedException.expect(RuntimeException.class);
expectedException.expectMessage("a random exception");
final AtomicReference<ActionListener<ShardResponse>> ref = new AtomicReference<>();
BulkRequestExecutor<ShardUpsertRequest> transportShardBulkAction = (request, listener) -> ref.set(listener);
BulkRetryCoordinator bulkRetryCoordinator = new BulkRetryCoordinator(threadPool);
BulkRetryCoordinatorPool coordinatorPool = mock(BulkRetryCoordinatorPool.class);
when(coordinatorPool.coordinator(any(ShardId.class))).thenReturn(bulkRetryCoordinator);
ShardUpsertRequest.Builder builder = new ShardUpsertRequest.Builder(TimeValue.timeValueMillis(10), false, false, null, new Reference[] { fooRef }, UUID.randomUUID());
final BulkShardProcessor<ShardUpsertRequest> bulkShardProcessor = new BulkShardProcessor<>(clusterService, mock(TransportBulkCreateIndicesAction.class), new IndexNameExpressionResolver(Settings.EMPTY), Settings.EMPTY, coordinatorPool, false, 1, builder, transportShardBulkAction, UUID.randomUUID());
bulkShardProcessor.add("foo", new ShardUpsertRequest.Item("1", null, new Object[] { "bar1" }, null), null);
ActionListener<ShardResponse> listener = ref.get();
listener.onFailure(new RuntimeException("a random exception"));
assertFalse(bulkShardProcessor.add("foo", new ShardUpsertRequest.Item("2", null, new Object[] { "bar2" }, null), null));
try {
bulkShardProcessor.result().get();
} catch (ExecutionException e) {
throw e.getCause();
} finally {
bulkShardProcessor.close();
}
}
use of org.elasticsearch.cluster.metadata.IndexNameExpressionResolver in project crate by crate.
the class BulkShardProcessorTest method testThatAddAfterFailureBlocksDueToRetry.
@Test
public void testThatAddAfterFailureBlocksDueToRetry() throws Exception {
ClusterService clusterService = mock(ClusterService.class);
OperationRouting operationRouting = mock(OperationRouting.class);
mockShard(operationRouting, 1);
mockShard(operationRouting, 2);
mockShard(operationRouting, 3);
when(clusterService.operationRouting()).thenReturn(operationRouting);
// listener will be executed 2 times, once for the successfully added row and once for the failure
final CountDownLatch listenerLatch = new CountDownLatch(2);
final AtomicReference<ActionListener<ShardResponse>> ref = new AtomicReference<>();
BulkRequestExecutor<ShardUpsertRequest> transportShardBulkAction = (request, listener) -> {
ref.set(listener);
listenerLatch.countDown();
};
BulkRetryCoordinator bulkRetryCoordinator = new BulkRetryCoordinator(threadPool);
BulkRetryCoordinatorPool coordinatorPool = mock(BulkRetryCoordinatorPool.class);
when(coordinatorPool.coordinator(any(ShardId.class))).thenReturn(bulkRetryCoordinator);
ShardUpsertRequest.Builder builder = new ShardUpsertRequest.Builder(TimeValue.timeValueMillis(10), false, false, null, new Reference[] { fooRef }, UUID.randomUUID());
final BulkShardProcessor<ShardUpsertRequest> bulkShardProcessor = new BulkShardProcessor<>(clusterService, mock(TransportBulkCreateIndicesAction.class), new IndexNameExpressionResolver(Settings.EMPTY), Settings.EMPTY, coordinatorPool, false, 1, builder, transportShardBulkAction, UUID.randomUUID());
bulkShardProcessor.add("foo", new ShardUpsertRequest.Item("1", null, new Object[] { "bar1" }, null), null);
final ActionListener<ShardResponse> listener = ref.get();
listener.onFailure(new EsRejectedExecutionException());
// wait, failure retry lock is done in decoupled thread
listenerLatch.await(10, TimeUnit.SECONDS);
final ScheduledExecutorService scheduledExecutorService = Executors.newScheduledThreadPool(2);
try {
final AtomicBoolean hadBlocked = new AtomicBoolean(false);
final AtomicBoolean hasBlocked = new AtomicBoolean(true);
final CountDownLatch latch = new CountDownLatch(1);
scheduledExecutorService.execute(new Runnable() {
@Override
public void run() {
scheduledExecutorService.schedule(new Runnable() {
@Override
public void run() {
hadBlocked.set(hasBlocked.get());
latch.countDown();
}
}, 10, TimeUnit.MILLISECONDS);
bulkShardProcessor.add("foo", new ShardUpsertRequest.Item("2", null, new Object[] { "bar2" }, null), null);
hasBlocked.set(false);
}
});
latch.await();
assertTrue(hadBlocked.get());
} finally {
scheduledExecutorService.shutdownNow();
}
}
use of org.elasticsearch.cluster.metadata.IndexNameExpressionResolver in project crate by crate.
the class BulkShardProcessorTest method testKill.
@Test
public void testKill() throws Exception {
ClusterService clusterService = mock(ClusterService.class);
OperationRouting operationRouting = mock(OperationRouting.class);
mockShard(operationRouting, 1);
mockShard(operationRouting, 2);
mockShard(operationRouting, 3);
when(clusterService.operationRouting()).thenReturn(operationRouting);
final AtomicReference<ActionListener<ShardResponse>> ref = new AtomicReference<>();
BulkRequestExecutor<ShardUpsertRequest> transportShardBulkAction = (request, listener) -> ref.set(listener);
BulkRetryCoordinator bulkRetryCoordinator = new BulkRetryCoordinator(threadPool);
BulkRetryCoordinatorPool coordinatorPool = mock(BulkRetryCoordinatorPool.class);
when(coordinatorPool.coordinator(any(ShardId.class))).thenReturn(bulkRetryCoordinator);
ShardUpsertRequest.Builder builder = new ShardUpsertRequest.Builder(TimeValue.timeValueMillis(10), false, false, null, new Reference[] { fooRef }, UUID.randomUUID());
final BulkShardProcessor<ShardUpsertRequest> bulkShardProcessor = new BulkShardProcessor<>(clusterService, mock(TransportBulkCreateIndicesAction.class), new IndexNameExpressionResolver(Settings.EMPTY), Settings.EMPTY, coordinatorPool, false, 1, builder, transportShardBulkAction, UUID.randomUUID());
assertThat(bulkShardProcessor.add("foo", new ShardUpsertRequest.Item("1", null, new Object[] { "bar1" }, null), null), is(true));
bulkShardProcessor.kill(new InterruptedException());
// A InterruptedException is thrown
expectedException.expect(ExecutionException.class);
expectedException.expectCause(isA(InterruptedException.class));
bulkShardProcessor.result().get();
// it's not possible to add more
assertThat(bulkShardProcessor.add("foo", new ShardUpsertRequest.Item("1", null, new Object[] { "bar1" }, null), null), is(false));
}
use of org.elasticsearch.cluster.metadata.IndexNameExpressionResolver in project crate by crate.
the class IndexWriterCountBatchIteratorTest method getBulkShardProcessor.
private BulkShardProcessor<ShardUpsertRequest> getBulkShardProcessor() {
UUID jobId = UUID.randomUUID();
ShardUpsertRequest.Builder builder = new ShardUpsertRequest.Builder(CrateSettings.BULK_REQUEST_TIMEOUT.extractTimeValue(Settings.EMPTY), false, true, null, new Reference[] { new Reference(new ReferenceIdent(bulkImportIdent, DocSysColumns.RAW), RowGranularity.DOC, DataTypes.STRING) }, jobId, false);
IndexNameExpressionResolver indexNameExpressionResolver = new IndexNameExpressionResolver(Settings.EMPTY);
return new BulkShardProcessor<>(internalCluster().getInstance(ClusterService.class), internalCluster().getInstance(TransportBulkCreateIndicesAction.class), indexNameExpressionResolver, Settings.EMPTY, internalCluster().getInstance(BulkRetryCoordinatorPool.class), false, 2, builder, internalCluster().getInstance(TransportShardUpsertAction.class)::execute, jobId);
}
use of org.elasticsearch.cluster.metadata.IndexNameExpressionResolver in project crate by crate.
the class IndexWriterProjectorUnitTest method testNullPKValue.
@Test
public void testNullPKValue() throws Throwable {
InputCollectExpression sourceInput = new InputCollectExpression(0);
List<CollectExpression<Row, ?>> collectExpressions = Collections.<CollectExpression<Row, ?>>singletonList(sourceInput);
final IndexWriterProjector indexWriter = new IndexWriterProjector(clusterService, TestingHelpers.getFunctions(), new IndexNameExpressionResolver(Settings.EMPTY), Settings.EMPTY, mock(TransportBulkCreateIndicesAction.class), mock(BulkRequestExecutor.class), () -> "foo", mock(BulkRetryCoordinatorPool.class, Answers.RETURNS_DEEP_STUBS.get()), rawSourceReference, ImmutableList.of(ID_IDENT), Arrays.<Symbol>asList(new InputColumn(1)), null, null, sourceInput, collectExpressions, 20, null, null, false, false, UUID.randomUUID());
RowN rowN = new RowN(new Object[] { new BytesRef("{\"y\": \"x\"}"), null });
BatchIterator batchIterator = RowsBatchIterator.newInstance(Collections.singletonList(rowN), rowN.numColumns());
batchIterator = indexWriter.apply(batchIterator);
TestingBatchConsumer testingBatchConsumer = new TestingBatchConsumer();
testingBatchConsumer.accept(batchIterator, null);
expectedException.expect(IllegalArgumentException.class);
expectedException.expectMessage("A primary key value must not be NULL");
testingBatchConsumer.getResult();
}
Aggregations