use of io.crate.testing.TestingRowConsumer in project crate by crate.
the class OrderedLuceneBatchIteratorFactoryTest method test_ensure_lucene_ordered_collector_propagates_kill.
@Test
public void test_ensure_lucene_ordered_collector_propagates_kill() throws Exception {
LuceneOrderedDocCollector luceneOrderedDocCollector = createOrderedCollector(searcher1, 1);
AtomicReference<Thread> collectThread = new AtomicReference<>();
CountDownLatch latch = new CountDownLatch(1);
BatchIterator<Row> rowBatchIterator = OrderedLuceneBatchIteratorFactory.newInstance(Collections.singletonList(luceneOrderedDocCollector), OrderingByPosition.rowOrdering(new int[] { 0 }, reverseFlags, nullsFirst), mock(RowAccounting.class), c -> {
var t = new Thread(c);
collectThread.set(t);
t.start();
latch.countDown();
}, () -> 1, true);
TestingRowConsumer consumer = new TestingRowConsumer();
consumer.accept(rowBatchIterator, null);
// Ensure that the collect thread is running
latch.await(1, TimeUnit.SECONDS);
assertThat(collectThread.get().isAlive(), is(true));
rowBatchIterator.kill(new InterruptedException("killed"));
expectedException.expect(InterruptedException.class);
consumer.getResult();
}
use of io.crate.testing.TestingRowConsumer in project crate by crate.
the class FileReadingCollectorTest method testCollectFromDirectory.
@Test
public void testCollectFromDirectory() throws Throwable {
TestingRowConsumer projector = getObjects(Paths.get(tmpFile.getParentFile().toURI()).toUri().toString() + "*");
assertCorrectResult(projector.getBucket());
}
use of io.crate.testing.TestingRowConsumer in project crate by crate.
the class FileReadingCollectorTest method testDoCollectRawFromCompressed.
@Test
public void testDoCollectRawFromCompressed() throws Throwable {
TestingRowConsumer consumer = getObjects(Collections.singletonList(Paths.get(tmpFileGz.toURI()).toUri().toString()), "gzip");
assertCorrectResult(consumer.getBucket());
}
use of io.crate.testing.TestingRowConsumer in project crate by crate.
the class FileWriterProjectorTest method testFileAsDirectory.
@Test
public void testFileAsDirectory() throws Exception {
expectedException.expect(UnhandledServerException.class);
expectedException.expectMessage("Failed to open output");
String uri = Paths.get(folder.newFile().toURI()).resolve("out.json").toUri().toString();
FileWriterProjector fileWriterProjector = new FileWriterProjector(executorService, uri, null, null, Set.of(), new HashMap<>(), null, WriterProjection.OutputFormat.JSON_OBJECT, Map.of(LocalFsFileOutputFactory.NAME, new LocalFsFileOutputFactory()), Settings.EMPTY);
new TestingRowConsumer().accept(fileWriterProjector.apply(sourceSupplier.get()), null);
}
use of io.crate.testing.TestingRowConsumer in project crate by crate.
the class IndexWriterProjectorTest method testIndexWriter.
@Test
public void testIndexWriter() throws Throwable {
execute("create table bulk_import (id int primary key, name string) with (number_of_replicas=0)");
ensureGreen();
InputCollectExpression sourceInput = new InputCollectExpression(1);
List<CollectExpression<Row, ?>> collectExpressions = Collections.<CollectExpression<Row, ?>>singletonList(sourceInput);
RelationName bulkImportIdent = new RelationName(sqlExecutor.getCurrentSchema(), "bulk_import");
ClusterState state = clusterService().state();
Settings tableSettings = TableSettingsResolver.get(state.getMetadata(), bulkImportIdent, false);
ThreadPool threadPool = internalCluster().getInstance(ThreadPool.class);
IndexWriterProjector writerProjector = new IndexWriterProjector(clusterService(), new NodeLimits(new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)), new NoopCircuitBreaker("dummy"), RamAccounting.NO_ACCOUNTING, threadPool.scheduler(), threadPool.executor(ThreadPool.Names.SEARCH), CoordinatorTxnCtx.systemTransactionContext(), new NodeContext(internalCluster().getInstance(Functions.class)), Settings.EMPTY, IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.get(tableSettings), NumberOfReplicas.fromSettings(tableSettings, state.getNodes().getSize()), internalCluster().getInstance(TransportCreatePartitionsAction.class), internalCluster().getInstance(TransportShardUpsertAction.class)::execute, IndexNameResolver.forTable(bulkImportIdent), new Reference(new ReferenceIdent(bulkImportIdent, DocSysColumns.RAW), RowGranularity.DOC, DataTypes.STRING, 0, null), Collections.singletonList(ID_IDENT), Collections.<Symbol>singletonList(new InputColumn(0)), null, null, sourceInput, collectExpressions, 20, null, null, false, false, UUID.randomUUID(), UpsertResultContext.forRowCount(), false);
BatchIterator rowsIterator = InMemoryBatchIterator.of(IntStream.range(0, 100).mapToObj(i -> new RowN(new Object[] { i, "{\"id\": " + i + ", \"name\": \"Arthur\"}" })).collect(Collectors.toList()), SENTINEL, true);
TestingRowConsumer consumer = new TestingRowConsumer();
consumer.accept(writerProjector.apply(rowsIterator), null);
Bucket objects = consumer.getBucket();
assertThat(objects, contains(isRow(100L)));
execute("refresh table bulk_import");
execute("select count(*) from bulk_import");
assertThat(response.rowCount(), is(1L));
assertThat(response.rows()[0][0], is(100L));
}
Aggregations