use of io.crate.metadata.Reference in project crate by crate.
the class OrderedLuceneBatchIteratorBenchmark method createLuceneBatchIterator.
@Setup
public void createLuceneBatchIterator() throws Exception {
IndexWriter iw = new IndexWriter(new RAMDirectory(), new IndexWriterConfig(new StandardAnalyzer()));
dummyShardId = new ShardId("dummy", 1);
columnName = "x";
for (int i = 0; i < 10_000_000; i++) {
Document doc = new Document();
doc.add(new NumericDocValuesField(columnName, i));
iw.addDocument(doc);
}
iw.commit();
iw.forceMerge(1, true);
indexSearcher = new IndexSearcher(DirectoryReader.open(iw, true));
collectorContext = new CollectorContext(mock(IndexFieldDataService.class), new CollectorFieldsVisitor(0));
fieldTypeLookup = column -> {
IntegerFieldMapper.IntegerFieldType integerFieldType = new IntegerFieldMapper.IntegerFieldType();
integerFieldType.setNames(new MappedFieldType.Names(column));
return integerFieldType;
};
reference = new Reference(new ReferenceIdent(new TableIdent(null, "dummyTable"), columnName), RowGranularity.DOC, DataTypes.INTEGER);
orderBy = new OrderBy(Collections.singletonList(reference), reverseFlags, nullsFirst);
}
use of io.crate.metadata.Reference in project crate by crate.
the class BulkShardProcessorTest method testNonEsRejectedExceptionDoesNotResultInRetryButAborts.
@Test
public void testNonEsRejectedExceptionDoesNotResultInRetryButAborts() throws Throwable {
expectedException.expect(RuntimeException.class);
expectedException.expectMessage("a random exception");
final AtomicReference<ActionListener<ShardResponse>> ref = new AtomicReference<>();
BulkRequestExecutor<ShardUpsertRequest> transportShardBulkAction = (request, listener) -> ref.set(listener);
BulkRetryCoordinator bulkRetryCoordinator = new BulkRetryCoordinator(threadPool);
BulkRetryCoordinatorPool coordinatorPool = mock(BulkRetryCoordinatorPool.class);
when(coordinatorPool.coordinator(any(ShardId.class))).thenReturn(bulkRetryCoordinator);
ShardUpsertRequest.Builder builder = new ShardUpsertRequest.Builder(TimeValue.timeValueMillis(10), false, false, null, new Reference[] { fooRef }, UUID.randomUUID());
final BulkShardProcessor<ShardUpsertRequest> bulkShardProcessor = new BulkShardProcessor<>(clusterService, mock(TransportBulkCreateIndicesAction.class), new IndexNameExpressionResolver(Settings.EMPTY), Settings.EMPTY, coordinatorPool, false, 1, builder, transportShardBulkAction, UUID.randomUUID());
bulkShardProcessor.add("foo", new ShardUpsertRequest.Item("1", null, new Object[] { "bar1" }, null), null);
ActionListener<ShardResponse> listener = ref.get();
listener.onFailure(new RuntimeException("a random exception"));
assertFalse(bulkShardProcessor.add("foo", new ShardUpsertRequest.Item("2", null, new Object[] { "bar2" }, null), null));
try {
bulkShardProcessor.result().get();
} catch (ExecutionException e) {
throw e.getCause();
} finally {
bulkShardProcessor.close();
}
}
use of io.crate.metadata.Reference in project crate by crate.
the class BulkShardProcessorTest method testThatAddAfterFailureBlocksDueToRetry.
@Test
public void testThatAddAfterFailureBlocksDueToRetry() throws Exception {
ClusterService clusterService = mock(ClusterService.class);
OperationRouting operationRouting = mock(OperationRouting.class);
mockShard(operationRouting, 1);
mockShard(operationRouting, 2);
mockShard(operationRouting, 3);
when(clusterService.operationRouting()).thenReturn(operationRouting);
// listener will be executed 2 times, once for the successfully added row and once for the failure
final CountDownLatch listenerLatch = new CountDownLatch(2);
final AtomicReference<ActionListener<ShardResponse>> ref = new AtomicReference<>();
BulkRequestExecutor<ShardUpsertRequest> transportShardBulkAction = (request, listener) -> {
ref.set(listener);
listenerLatch.countDown();
};
BulkRetryCoordinator bulkRetryCoordinator = new BulkRetryCoordinator(threadPool);
BulkRetryCoordinatorPool coordinatorPool = mock(BulkRetryCoordinatorPool.class);
when(coordinatorPool.coordinator(any(ShardId.class))).thenReturn(bulkRetryCoordinator);
ShardUpsertRequest.Builder builder = new ShardUpsertRequest.Builder(TimeValue.timeValueMillis(10), false, false, null, new Reference[] { fooRef }, UUID.randomUUID());
final BulkShardProcessor<ShardUpsertRequest> bulkShardProcessor = new BulkShardProcessor<>(clusterService, mock(TransportBulkCreateIndicesAction.class), new IndexNameExpressionResolver(Settings.EMPTY), Settings.EMPTY, coordinatorPool, false, 1, builder, transportShardBulkAction, UUID.randomUUID());
bulkShardProcessor.add("foo", new ShardUpsertRequest.Item("1", null, new Object[] { "bar1" }, null), null);
final ActionListener<ShardResponse> listener = ref.get();
listener.onFailure(new EsRejectedExecutionException());
// wait, failure retry lock is done in decoupled thread
listenerLatch.await(10, TimeUnit.SECONDS);
final ScheduledExecutorService scheduledExecutorService = Executors.newScheduledThreadPool(2);
try {
final AtomicBoolean hadBlocked = new AtomicBoolean(false);
final AtomicBoolean hasBlocked = new AtomicBoolean(true);
final CountDownLatch latch = new CountDownLatch(1);
scheduledExecutorService.execute(new Runnable() {
@Override
public void run() {
scheduledExecutorService.schedule(new Runnable() {
@Override
public void run() {
hadBlocked.set(hasBlocked.get());
latch.countDown();
}
}, 10, TimeUnit.MILLISECONDS);
bulkShardProcessor.add("foo", new ShardUpsertRequest.Item("2", null, new Object[] { "bar2" }, null), null);
hasBlocked.set(false);
}
});
latch.await();
assertTrue(hadBlocked.get());
} finally {
scheduledExecutorService.shutdownNow();
}
}
use of io.crate.metadata.Reference in project crate by crate.
the class BulkShardProcessorTest method testKill.
@Test
public void testKill() throws Exception {
ClusterService clusterService = mock(ClusterService.class);
OperationRouting operationRouting = mock(OperationRouting.class);
mockShard(operationRouting, 1);
mockShard(operationRouting, 2);
mockShard(operationRouting, 3);
when(clusterService.operationRouting()).thenReturn(operationRouting);
final AtomicReference<ActionListener<ShardResponse>> ref = new AtomicReference<>();
BulkRequestExecutor<ShardUpsertRequest> transportShardBulkAction = (request, listener) -> ref.set(listener);
BulkRetryCoordinator bulkRetryCoordinator = new BulkRetryCoordinator(threadPool);
BulkRetryCoordinatorPool coordinatorPool = mock(BulkRetryCoordinatorPool.class);
when(coordinatorPool.coordinator(any(ShardId.class))).thenReturn(bulkRetryCoordinator);
ShardUpsertRequest.Builder builder = new ShardUpsertRequest.Builder(TimeValue.timeValueMillis(10), false, false, null, new Reference[] { fooRef }, UUID.randomUUID());
final BulkShardProcessor<ShardUpsertRequest> bulkShardProcessor = new BulkShardProcessor<>(clusterService, mock(TransportBulkCreateIndicesAction.class), new IndexNameExpressionResolver(Settings.EMPTY), Settings.EMPTY, coordinatorPool, false, 1, builder, transportShardBulkAction, UUID.randomUUID());
assertThat(bulkShardProcessor.add("foo", new ShardUpsertRequest.Item("1", null, new Object[] { "bar1" }, null), null), is(true));
bulkShardProcessor.kill(new InterruptedException());
// A InterruptedException is thrown
expectedException.expect(ExecutionException.class);
expectedException.expectCause(isA(InterruptedException.class));
bulkShardProcessor.result().get();
// it's not possible to add more
assertThat(bulkShardProcessor.add("foo", new ShardUpsertRequest.Item("1", null, new Object[] { "bar1" }, null), null), is(false));
}
use of io.crate.metadata.Reference in project crate by crate.
the class ValueNormalizer method normalizeObjectValue.
@SuppressWarnings("unchecked")
private void normalizeObjectValue(Map<String, Object> value, Reference info) {
for (Map.Entry<String, Object> entry : value.entrySet()) {
AnalyzedColumnDefinition.validateName(entry.getKey());
ColumnIdent nestedIdent = ColumnIdent.getChild(info.ident().columnIdent(), entry.getKey());
TableInfo tableInfo = schemas.getTableInfo(info.ident().tableIdent());
Reference nestedInfo = tableInfo.getReference(nestedIdent);
if (nestedInfo == null) {
if (info.columnPolicy() == ColumnPolicy.IGNORED) {
continue;
}
DynamicReference dynamicReference = null;
if (tableInfo instanceof DocTableInfo) {
dynamicReference = ((DocTableInfo) tableInfo).getDynamic(nestedIdent, true);
}
if (dynamicReference == null) {
throw new ColumnUnknownException(nestedIdent.sqlFqn());
}
DataType type = DataTypes.guessType(entry.getValue());
if (type == null) {
throw new ColumnValidationException(info.ident().columnIdent().sqlFqn(), "Invalid value");
}
dynamicReference.valueType(type);
nestedInfo = dynamicReference;
} else {
if (entry.getValue() == null) {
continue;
}
}
if (nestedInfo.valueType() == DataTypes.OBJECT && entry.getValue() instanceof Map) {
normalizeObjectValue((Map<String, Object>) entry.getValue(), nestedInfo);
} else if (isObjectArray(nestedInfo.valueType()) && entry.getValue() instanceof Object[]) {
normalizeObjectArrayValue((Object[]) entry.getValue(), nestedInfo);
} else {
entry.setValue(normalizePrimitiveValue(entry.getValue(), nestedInfo));
}
}
}
Aggregations