use of org.apache.druid.data.input.Firehose in project druid by druid-io.
the class BatchDeltaIngestionTest method testIngestion.
private void testIngestion(HadoopDruidIndexerConfig config, List<ImmutableMap<String, Object>> expectedRowsGenerated, WindowedDataSegment windowedDataSegment, List<String> expectedDimensions, List<String> expectedMetrics) throws Exception {
IndexGeneratorJob job = new IndexGeneratorJob(config);
Assert.assertTrue(JobHelper.runJobs(ImmutableList.of(job)));
List<DataSegmentAndIndexZipFilePath> dataSegmentAndIndexZipFilePaths = IndexGeneratorJob.getPublishedSegmentAndIndexZipFilePaths(config);
JobHelper.renameIndexFilesForSegments(config.getSchema(), dataSegmentAndIndexZipFilePaths);
JobHelper.maybeDeleteIntermediatePath(true, config.getSchema());
File workingPath = new File(config.makeIntermediatePath().toUri().getPath());
Assert.assertFalse(workingPath.exists());
File segmentFolder = new File(StringUtils.format("%s/%s/%s_%s/%s/0", config.getSchema().getIOConfig().getSegmentOutputPath(), config.getSchema().getDataSchema().getDataSource(), INTERVAL_FULL.getStart().toString(), INTERVAL_FULL.getEnd().toString(), config.getSchema().getTuningConfig().getVersion()));
Assert.assertTrue(segmentFolder.exists());
File indexZip = new File(segmentFolder, "index.zip");
Assert.assertTrue(indexZip.exists());
File tmpUnzippedSegmentDir = temporaryFolder.newFolder();
new LocalDataSegmentPuller().getSegmentFiles(indexZip, tmpUnzippedSegmentDir);
QueryableIndex index = INDEX_IO.loadIndex(tmpUnzippedSegmentDir);
StorageAdapter adapter = new QueryableIndexStorageAdapter(index);
Firehose firehose = new IngestSegmentFirehose(ImmutableList.of(new WindowedStorageAdapter(adapter, windowedDataSegment.getInterval())), TransformSpec.NONE, expectedDimensions, expectedMetrics, null);
List<InputRow> rows = new ArrayList<>();
while (firehose.hasMore()) {
rows.add(firehose.nextRow());
}
verifyRows(expectedRowsGenerated, rows, expectedDimensions, expectedMetrics);
}
use of org.apache.druid.data.input.Firehose in project druid by druid-io.
the class CombiningFirehoseFactoryTest method testCombiningfirehose.
@Test
public void testCombiningfirehose() throws IOException {
final Firehose firehose = combiningFirehoseFactory.connect(null, null);
for (int i = 1; i < 6; i++) {
Assert.assertTrue(firehose.hasMore());
final InputRow inputRow = firehose.nextRow();
Assert.assertEquals(i, inputRow.getTimestampFromEpoch());
Assert.assertEquals(i, inputRow.getMetric("test").floatValue(), 0);
}
Assert.assertFalse(firehose.hasMore());
}
use of org.apache.druid.data.input.Firehose in project druid by druid-io.
the class InlineFirehoseFactoryTest method testConnect.
@Test
public void testConnect() throws IOException {
Firehose firehose = target.connect(PARSER, NO_TEMP_DIR);
InputRow row = firehose.nextRow();
Assert.assertNotNull(row);
List<String> values = row.getDimension(DIMENSION_1);
Assert.assertNotNull(values);
Assert.assertEquals(1, values.size());
Assert.assertEquals(VALUE, values.get(0));
}
use of org.apache.druid.data.input.Firehose in project druid by druid-io.
the class SqlFirehoseFactoryTest method testWithoutCache.
@Test
public void testWithoutCache() throws IOException {
derbyConnector = derbyConnectorRule.getConnector();
SqlTestUtils testUtils = new SqlTestUtils(derbyConnector);
testUtils.createAndUpdateTable(TABLE_NAME_1, 10);
final SqlFirehoseFactory factory = new SqlFirehoseFactory(SQLLIST1, 0L, null, null, null, true, testUtils.getDerbyFirehoseConnector(), mapper);
final List<Row> rows = new ArrayList<>();
final File firehoseTmpDir = createFirehoseTmpDir("testWithoutCache");
try (Firehose firehose = factory.connect(parser, firehoseTmpDir)) {
while (firehose.hasMore()) {
rows.add(firehose.nextRow());
}
}
assertResult(rows, SQLLIST1);
assertNumRemainingCacheFiles(firehoseTmpDir, 0);
testUtils.dropTable(TABLE_NAME_1);
}
use of org.apache.druid.data.input.Firehose in project druid by druid-io.
the class SqlFirehoseFactoryTest method testWithCacheAndFetch.
@Test
public void testWithCacheAndFetch() throws IOException {
derbyConnector = derbyConnectorRule.getConnector();
SqlTestUtils testUtils = new SqlTestUtils(derbyConnector);
testUtils.createAndUpdateTable(TABLE_NAME_1, 10);
testUtils.createAndUpdateTable(TABLE_NAME_2, 10);
final SqlFirehoseFactory factory = new SqlFirehoseFactory(SQLLIST2, null, null, 0L, null, true, testUtils.getDerbyFirehoseConnector(), mapper);
final List<Row> rows = new ArrayList<>();
final File firehoseTmpDir = createFirehoseTmpDir("testWithCacheAndFetch");
try (Firehose firehose = factory.connect(parser, firehoseTmpDir)) {
while (firehose.hasMore()) {
rows.add(firehose.nextRow());
}
}
assertResult(rows, SQLLIST2);
assertNumRemainingCacheFiles(firehoseTmpDir, 2);
testUtils.dropTable(TABLE_NAME_1);
testUtils.dropTable(TABLE_NAME_2);
}
Aggregations