use of org.apache.druid.segment.QueryableIndexSegment in project druid by druid-io.
the class GroupByLimitPushDownInsufficientBufferTest method getRunner2.
private List<QueryRunner<ResultRow>> getRunner2() {
List<QueryRunner<ResultRow>> runners = new ArrayList<>();
QueryableIndex index2 = groupByIndices.get(1);
QueryRunner<ResultRow> tooSmallRunner = makeQueryRunner(tooSmallGroupByFactory, SegmentId.dummy(index2.toString()), new QueryableIndexSegment(index2, SegmentId.dummy(index2.toString())));
runners.add(tooSmallGroupByFactory.getToolchest().preMergeQueryDecoration(tooSmallRunner));
return runners;
}
use of org.apache.druid.segment.QueryableIndexSegment in project druid by druid-io.
the class GroupByLimitPushDownMultiNodeMergeTest method getRunner1.
private List<QueryRunner<ResultRow>> getRunner1(int qIndexNumber) {
List<QueryRunner<ResultRow>> runners = new ArrayList<>();
QueryableIndex index = groupByIndices.get(qIndexNumber);
QueryRunner<ResultRow> runner = makeQueryRunner(groupByFactory, SegmentId.dummy(index.toString()), new QueryableIndexSegment(index, SegmentId.dummy(index.toString())));
runners.add(groupByFactory.getToolchest().preMergeQueryDecoration(runner));
return runners;
}
use of org.apache.druid.segment.QueryableIndexSegment in project druid by druid-io.
the class FireHydrantTest method setup.
@Before
public void setup() {
incrementalIndexSegment = new IncrementalIndexSegment(TestIndex.getIncrementalTestIndex(), SegmentId.dummy("test"));
queryableIndexSegment = new QueryableIndexSegment(TestIndex.getMMappedTestIndex(), SegmentId.dummy("test"));
// hydrant starts out with incremental segment loaded
hydrant = new FireHydrant(incrementalIndexSegment, 0);
}
use of org.apache.druid.segment.QueryableIndexSegment in project druid by druid-io.
the class SegmentAnalyzerTest method testAnalyzingSegmentWithNonExistentAggregator.
/**
* This test verifies that if a segment was created using an unknown/invalid aggregator
* (which can happen if an aggregator was removed for a later version), then,
* analyzing the segment doesn't fail and the result of analysis of the complex column
* is reported as an error.
* @throws IOException
*/
@Test
public void testAnalyzingSegmentWithNonExistentAggregator() throws IOException {
final URL resource = SegmentAnalyzerTest.class.getClassLoader().getResource("druid.sample.numeric.tsv");
CharSource source = Resources.asByteSource(resource).asCharSource(StandardCharsets.UTF_8);
String invalid_aggregator = "invalid_aggregator";
AggregatorFactory[] metrics = new AggregatorFactory[] { new DoubleSumAggregatorFactory(TestIndex.DOUBLE_METRICS[0], "index"), new HyperUniquesAggregatorFactory("quality_uniques", "quality"), new InvalidAggregatorFactory(invalid_aggregator, "quality") };
final IncrementalIndexSchema schema = new IncrementalIndexSchema.Builder().withMinTimestamp(DateTimes.of("2011-01-12T00:00:00.000Z").getMillis()).withTimestampSpec(new TimestampSpec("ds", "auto", null)).withDimensionsSpec(TestIndex.DIMENSIONS_SPEC).withMetrics(metrics).withRollup(true).build();
final IncrementalIndex retVal = new OnheapIncrementalIndex.Builder().setIndexSchema(schema).setMaxRowCount(10000).build();
IncrementalIndex incrementalIndex = TestIndex.loadIncrementalIndex(retVal, source);
// Analyze the in-memory segment.
{
SegmentAnalyzer analyzer = new SegmentAnalyzer(EnumSet.of(SegmentMetadataQuery.AnalysisType.SIZE));
IncrementalIndexSegment segment = new IncrementalIndexSegment(incrementalIndex, SegmentId.dummy("ds"));
Map<String, ColumnAnalysis> analyses = analyzer.analyze(segment);
ColumnAnalysis columnAnalysis = analyses.get(invalid_aggregator);
Assert.assertFalse(columnAnalysis.isError());
Assert.assertEquals("invalid_complex_column_type", columnAnalysis.getType());
Assert.assertEquals(ColumnType.ofComplex("invalid_complex_column_type"), columnAnalysis.getTypeSignature());
}
// Persist the index.
final File segmentFile = TestIndex.INDEX_MERGER.persist(incrementalIndex, temporaryFolder.newFolder(), TestIndex.INDEX_SPEC, null);
// Unload the complex serde, then analyze the persisted segment.
ComplexMetrics.unregisterSerde(InvalidAggregatorFactory.TYPE);
{
SegmentAnalyzer analyzer = new SegmentAnalyzer(EnumSet.of(SegmentMetadataQuery.AnalysisType.SIZE));
QueryableIndexSegment segment = new QueryableIndexSegment(TestIndex.INDEX_IO.loadIndex(segmentFile), SegmentId.dummy("ds"));
Map<String, ColumnAnalysis> analyses = analyzer.analyze(segment);
ColumnAnalysis invalidColumnAnalysis = analyses.get(invalid_aggregator);
Assert.assertTrue(invalidColumnAnalysis.isError());
Assert.assertEquals("error:unknown_complex_invalid_complex_column_type", invalidColumnAnalysis.getErrorMessage());
// Run a segment metadata query also to verify it doesn't break
final List<SegmentAnalysis> results = getSegmentAnalysises(segment, EnumSet.of(SegmentMetadataQuery.AnalysisType.SIZE));
for (SegmentAnalysis result : results) {
Assert.assertTrue(result.getColumns().get(invalid_aggregator).isError());
}
}
}
use of org.apache.druid.segment.QueryableIndexSegment in project druid by druid-io.
the class StreamAppenderator method persistHydrant.
/**
* Persists the given hydrant and returns the number of rows persisted. Must only be called in the single-threaded
* persistExecutor.
*
* @param indexToPersist hydrant to persist
* @param identifier the segment this hydrant is going to be part of
*
* @return the number of rows persisted
*/
private int persistHydrant(FireHydrant indexToPersist, SegmentIdWithShardSpec identifier) {
synchronized (indexToPersist) {
if (indexToPersist.hasSwapped()) {
log.info("Segment[%s] hydrant[%s] already swapped. Ignoring request to persist.", identifier, indexToPersist);
return 0;
}
log.debug("Segment[%s], persisting Hydrant[%s]", identifier, indexToPersist);
try {
final long startTime = System.nanoTime();
int numRows = indexToPersist.getIndex().size();
final File persistedFile;
final File persistDir = createPersistDirIfNeeded(identifier);
persistedFile = indexMerger.persist(indexToPersist.getIndex(), identifier.getInterval(), new File(persistDir, String.valueOf(indexToPersist.getCount())), tuningConfig.getIndexSpecForIntermediatePersists(), tuningConfig.getSegmentWriteOutMediumFactory());
log.info("Flushed in-memory data for segment[%s] spill[%s] to disk in [%,d] ms (%,d rows).", indexToPersist.getSegmentId(), indexToPersist.getCount(), (System.nanoTime() - startTime) / 1000000, numRows);
indexToPersist.swapSegment(new QueryableIndexSegment(indexIO.loadIndex(persistedFile), indexToPersist.getSegmentId()));
return numRows;
} catch (IOException e) {
log.makeAlert("Incremental persist failed").addData("segment", identifier.toString()).addData("dataSource", schema.getDataSource()).addData("count", indexToPersist.getCount()).emit();
throw new RuntimeException(e);
}
}
}
Aggregations