use of io.druid.segment.QueryableIndexSegment in project druid by druid-io.
the class SpatialFilterBonusTest method constructorFeeder.
@Parameterized.Parameters
public static Collection<?> constructorFeeder() throws IOException {
final IndexSpec indexSpec = new IndexSpec();
final IncrementalIndex rtIndex = makeIncrementalIndex();
final QueryableIndex mMappedTestIndex = makeQueryableIndex(indexSpec);
final QueryableIndex mergedRealtimeIndex = makeMergedQueryableIndex(indexSpec);
return Arrays.asList(new Object[][] { { new IncrementalIndexSegment(rtIndex, null) }, { new QueryableIndexSegment(null, mMappedTestIndex) }, { new QueryableIndexSegment(null, mergedRealtimeIndex) } });
}
use of io.druid.segment.QueryableIndexSegment in project druid by druid-io.
the class SpatialFilterTest method constructorFeeder.
@Parameterized.Parameters
public static Collection<?> constructorFeeder() throws IOException {
final IndexSpec indexSpec = new IndexSpec();
final IncrementalIndex rtIndex = makeIncrementalIndex();
final QueryableIndex mMappedTestIndex = makeQueryableIndex(indexSpec);
final QueryableIndex mergedRealtimeIndex = makeMergedQueryableIndex(indexSpec);
return Arrays.asList(new Object[][] { { new IncrementalIndexSegment(rtIndex, null) }, { new QueryableIndexSegment(null, mMappedTestIndex) }, { new QueryableIndexSegment(null, mergedRealtimeIndex) } });
}
use of io.druid.segment.QueryableIndexSegment in project druid by druid-io.
the class TopNQueryRunnerBenchmark method setUp.
@BeforeClass
public static void setUp() throws Exception {
QueryRunnerFactory factory = new TopNQueryRunnerFactory(new StupidPool<ByteBuffer>("TopNQueryRunnerFactory-directBufferPool", new Supplier<ByteBuffer>() {
@Override
public ByteBuffer get() {
// Instead of causing a circular dependency, we simply mimic its behavior
return ByteBuffer.allocateDirect(2000);
}
}), new TopNQueryQueryToolChest(new TopNQueryConfig(), QueryRunnerTestHelper.NoopIntervalChunkingQueryRunnerDecorator()), QueryRunnerTestHelper.NOOP_QUERYWATCHER);
testCaseMap.put(TestCases.rtIndex, QueryRunnerTestHelper.makeQueryRunner(factory, new IncrementalIndexSegment(TestIndex.getIncrementalTestIndex(), segmentId), null));
testCaseMap.put(TestCases.mMappedTestIndex, QueryRunnerTestHelper.makeQueryRunner(factory, new QueryableIndexSegment(segmentId, TestIndex.getMMappedTestIndex()), null));
testCaseMap.put(TestCases.mergedRealtimeIndex, QueryRunnerTestHelper.makeQueryRunner(factory, new QueryableIndexSegment(segmentId, TestIndex.mergedRealtimeIndex()), null));
//Thread.sleep(10000);
}
use of io.druid.segment.QueryableIndexSegment in project druid by druid-io.
the class RealtimePlumber method bootstrapSinksFromDisk.
protected Object bootstrapSinksFromDisk() {
final VersioningPolicy versioningPolicy = config.getVersioningPolicy();
File baseDir = computeBaseDir(schema);
if (baseDir == null || !baseDir.exists()) {
return null;
}
File[] files = baseDir.listFiles();
if (files == null) {
return null;
}
Object metadata = null;
long latestCommitTime = 0;
for (File sinkDir : files) {
final Interval sinkInterval = new Interval(sinkDir.getName().replace("_", "/"));
//final File[] sinkFiles = sinkDir.listFiles();
// To avoid reading and listing of "merged" dir
final File[] sinkFiles = sinkDir.listFiles(new FilenameFilter() {
@Override
public boolean accept(File dir, String fileName) {
return !(Ints.tryParse(fileName) == null);
}
});
Arrays.sort(sinkFiles, new Comparator<File>() {
@Override
public int compare(File o1, File o2) {
try {
return Ints.compare(Integer.parseInt(o1.getName()), Integer.parseInt(o2.getName()));
} catch (NumberFormatException e) {
log.error(e, "Couldn't compare as numbers? [%s][%s]", o1, o2);
return o1.compareTo(o2);
}
}
});
boolean isCorrupted = false;
List<FireHydrant> hydrants = Lists.newArrayList();
for (File segmentDir : sinkFiles) {
log.info("Loading previously persisted segment at [%s]", segmentDir);
// If 100% sure that this is not needed, this check can be removed.
if (Ints.tryParse(segmentDir.getName()) == null) {
continue;
}
QueryableIndex queryableIndex = null;
try {
queryableIndex = indexIO.loadIndex(segmentDir);
} catch (IOException e) {
log.error(e, "Problem loading segmentDir from disk.");
isCorrupted = true;
}
if (isCorrupted) {
try {
File corruptSegmentDir = computeCorruptedFileDumpDir(segmentDir, schema);
log.info("Renaming %s to %s", segmentDir.getAbsolutePath(), corruptSegmentDir.getAbsolutePath());
FileUtils.copyDirectory(segmentDir, corruptSegmentDir);
FileUtils.deleteDirectory(segmentDir);
} catch (Exception e1) {
log.error(e1, "Failed to rename %s", segmentDir.getAbsolutePath());
}
//at some point.
continue;
}
Metadata segmentMetadata = queryableIndex.getMetadata();
if (segmentMetadata != null) {
Object timestampObj = segmentMetadata.get(COMMIT_METADATA_TIMESTAMP_KEY);
if (timestampObj != null) {
long timestamp = ((Long) timestampObj).longValue();
if (timestamp > latestCommitTime) {
log.info("Found metaData [%s] with latestCommitTime [%s] greater than previous recorded [%s]", queryableIndex.getMetadata(), timestamp, latestCommitTime);
latestCommitTime = timestamp;
metadata = queryableIndex.getMetadata().get(COMMIT_METADATA_KEY);
}
}
}
hydrants.add(new FireHydrant(new QueryableIndexSegment(DataSegment.makeDataSegmentIdentifier(schema.getDataSource(), sinkInterval.getStart(), sinkInterval.getEnd(), versioningPolicy.getVersion(sinkInterval), config.getShardSpec()), queryableIndex), Integer.parseInt(segmentDir.getName())));
}
if (hydrants.isEmpty()) {
// Probably encountered a corrupt sink directory
log.warn("Found persisted segment directory with no intermediate segments present at %s, skipping sink creation.", sinkDir.getAbsolutePath());
continue;
}
final Sink currSink = new Sink(sinkInterval, schema, config.getShardSpec(), versioningPolicy.getVersion(sinkInterval), config.getMaxRowsInMemory(), config.isReportParseExceptions(), hydrants);
addSink(currSink);
}
return metadata;
}
use of io.druid.segment.QueryableIndexSegment in project druid by druid-io.
the class RealtimePlumber method persistHydrant.
/**
* Persists the given hydrant and returns the number of rows persisted
*
* @param indexToPersist hydrant to persist
* @param schema datasource schema
* @param interval interval to persist
*
* @return the number of rows persisted
*/
protected int persistHydrant(FireHydrant indexToPersist, DataSchema schema, Interval interval, Map<String, Object> metadataElems) {
synchronized (indexToPersist) {
if (indexToPersist.hasSwapped()) {
log.info("DataSource[%s], Interval[%s], Hydrant[%s] already swapped. Ignoring request to persist.", schema.getDataSource(), interval, indexToPersist);
return 0;
}
log.info("DataSource[%s], Interval[%s], Metadata [%s] persisting Hydrant[%s]", schema.getDataSource(), interval, metadataElems, indexToPersist);
try {
int numRows = indexToPersist.getIndex().size();
final IndexSpec indexSpec = config.getIndexSpec();
indexToPersist.getIndex().getMetadata().putAll(metadataElems);
final File persistedFile = indexMerger.persist(indexToPersist.getIndex(), interval, new File(computePersistDir(schema, interval), String.valueOf(indexToPersist.getCount())), indexSpec);
indexToPersist.swapSegment(new QueryableIndexSegment(indexToPersist.getSegment().getIdentifier(), indexIO.loadIndex(persistedFile)));
return numRows;
} catch (IOException e) {
log.makeAlert("dataSource[%s] -- incremental persist failed", schema.getDataSource()).addData("interval", interval).addData("count", indexToPersist.getCount()).emit();
throw Throwables.propagate(e);
}
}
}
Aggregations