Search in sources :

Example 31 with QueryableIndex

use of io.druid.segment.QueryableIndex in project druid by druid-io.

the class DruidSchemaTest method setUp.

@Before
public void setUp() throws Exception {
    Calcites.setSystemProperties();
    final File tmpDir = temporaryFolder.newFolder();
    final QueryableIndex index1 = IndexBuilder.create().tmpDir(new File(tmpDir, "1")).indexMerger(TestHelper.getTestIndexMergerV9()).schema(new IncrementalIndexSchema.Builder().withMetrics(new AggregatorFactory[] { new CountAggregatorFactory("cnt"), new DoubleSumAggregatorFactory("m1", "m1"), new HyperUniquesAggregatorFactory("unique_dim1", "dim1") }).withRollup(false).build()).rows(ROWS1).buildMMappedIndex();
    final QueryableIndex index2 = IndexBuilder.create().tmpDir(new File(tmpDir, "2")).indexMerger(TestHelper.getTestIndexMergerV9()).schema(new IncrementalIndexSchema.Builder().withMetrics(new AggregatorFactory[] { new LongSumAggregatorFactory("m1", "m1") }).withRollup(false).build()).rows(ROWS2).buildMMappedIndex();
    walker = new SpecificSegmentsQuerySegmentWalker(CalciteTests.queryRunnerFactoryConglomerate()).add(DataSegment.builder().dataSource(CalciteTests.DATASOURCE1).interval(new Interval("2000/P1Y")).version("1").shardSpec(new LinearShardSpec(0)).build(), index1).add(DataSegment.builder().dataSource(CalciteTests.DATASOURCE1).interval(new Interval("2001/P1Y")).version("1").shardSpec(new LinearShardSpec(0)).build(), index2).add(DataSegment.builder().dataSource(CalciteTests.DATASOURCE2).interval(index2.getDataInterval()).version("1").shardSpec(new LinearShardSpec(0)).build(), index2);
    schema = new DruidSchema(walker, new TestServerInventoryView(walker.getSegments()), PLANNER_CONFIG_DEFAULT);
    schema.start();
    schema.awaitInitialization();
}
Also used : DoubleSumAggregatorFactory(io.druid.query.aggregation.DoubleSumAggregatorFactory) TestServerInventoryView(io.druid.sql.calcite.util.TestServerInventoryView) LinearShardSpec(io.druid.timeline.partition.LinearShardSpec) IndexBuilder(io.druid.segment.IndexBuilder) LongSumAggregatorFactory(io.druid.query.aggregation.LongSumAggregatorFactory) AggregatorFactory(io.druid.query.aggregation.AggregatorFactory) CountAggregatorFactory(io.druid.query.aggregation.CountAggregatorFactory) HyperUniquesAggregatorFactory(io.druid.query.aggregation.hyperloglog.HyperUniquesAggregatorFactory) DoubleSumAggregatorFactory(io.druid.query.aggregation.DoubleSumAggregatorFactory) LongSumAggregatorFactory(io.druid.query.aggregation.LongSumAggregatorFactory) CountAggregatorFactory(io.druid.query.aggregation.CountAggregatorFactory) SpecificSegmentsQuerySegmentWalker(io.druid.sql.calcite.util.SpecificSegmentsQuerySegmentWalker) QueryableIndex(io.druid.segment.QueryableIndex) HyperUniquesAggregatorFactory(io.druid.query.aggregation.hyperloglog.HyperUniquesAggregatorFactory) File(java.io.File) Interval(org.joda.time.Interval) Before(org.junit.Before)

Example 32 with QueryableIndex

use of io.druid.segment.QueryableIndex in project druid by druid-io.

the class CalciteTests method createMockWalker.

public static SpecificSegmentsQuerySegmentWalker createMockWalker(final File tmpDir) {
    final QueryableIndex index1 = IndexBuilder.create().tmpDir(new File(tmpDir, "1")).indexMerger(TestHelper.getTestIndexMergerV9()).schema(INDEX_SCHEMA).rows(ROWS1).buildMMappedIndex();
    final QueryableIndex index2 = IndexBuilder.create().tmpDir(new File(tmpDir, "2")).indexMerger(TestHelper.getTestIndexMergerV9()).schema(INDEX_SCHEMA).rows(ROWS2).buildMMappedIndex();
    return new SpecificSegmentsQuerySegmentWalker(queryRunnerFactoryConglomerate()).add(DataSegment.builder().dataSource(DATASOURCE1).interval(index1.getDataInterval()).version("1").shardSpec(new LinearShardSpec(0)).build(), index1).add(DataSegment.builder().dataSource(DATASOURCE2).interval(index2.getDataInterval()).version("1").shardSpec(new LinearShardSpec(0)).build(), index2);
}
Also used : QueryableIndex(io.druid.segment.QueryableIndex) LinearShardSpec(io.druid.timeline.partition.LinearShardSpec) File(java.io.File)

Example 33 with QueryableIndex

use of io.druid.segment.QueryableIndex in project druid by druid-io.

the class SegmentAnalyzer method analyze.

public Map<String, ColumnAnalysis> analyze(Segment segment) {
    Preconditions.checkNotNull(segment, "segment");
    // index is null for incremental-index-based segments, but storageAdapter is always available
    final QueryableIndex index = segment.asQueryableIndex();
    final StorageAdapter storageAdapter = segment.asStorageAdapter();
    // get length and column names from storageAdapter
    final int length = storageAdapter.getNumRows();
    final Set<String> columnNames = Sets.newHashSet();
    Iterables.addAll(columnNames, storageAdapter.getAvailableDimensions());
    Iterables.addAll(columnNames, storageAdapter.getAvailableMetrics());
    Map<String, ColumnAnalysis> columns = Maps.newTreeMap();
    for (String columnName : columnNames) {
        final Column column = index == null ? null : index.getColumn(columnName);
        final ColumnCapabilities capabilities = column != null ? column.getCapabilities() : storageAdapter.getColumnCapabilities(columnName);
        final ColumnAnalysis analysis;
        final ValueType type = capabilities.getType();
        switch(type) {
            case LONG:
                analysis = analyzeNumericColumn(capabilities, length, Longs.BYTES);
                break;
            case FLOAT:
                analysis = analyzeNumericColumn(capabilities, length, NUM_BYTES_IN_TEXT_FLOAT);
                break;
            case STRING:
                if (index != null) {
                    analysis = analyzeStringColumn(capabilities, column);
                } else {
                    analysis = analyzeStringColumn(capabilities, storageAdapter, columnName);
                }
                break;
            case COMPLEX:
                analysis = analyzeComplexColumn(capabilities, column, storageAdapter.getColumnTypeName(columnName));
                break;
            default:
                log.warn("Unknown column type[%s].", type);
                analysis = ColumnAnalysis.error(String.format("unknown_type_%s", type));
        }
        columns.put(columnName, analysis);
    }
    // Add time column too
    ColumnCapabilities timeCapabilities = storageAdapter.getColumnCapabilities(Column.TIME_COLUMN_NAME);
    if (timeCapabilities == null) {
        timeCapabilities = new ColumnCapabilitiesImpl().setType(ValueType.LONG).setHasMultipleValues(false);
    }
    columns.put(Column.TIME_COLUMN_NAME, analyzeNumericColumn(timeCapabilities, length, NUM_BYTES_IN_TIMESTAMP));
    return columns;
}
Also used : ComplexColumn(io.druid.segment.column.ComplexColumn) Column(io.druid.segment.column.Column) ValueType(io.druid.segment.column.ValueType) QueryableIndex(io.druid.segment.QueryableIndex) ColumnAnalysis(io.druid.query.metadata.metadata.ColumnAnalysis) StorageAdapter(io.druid.segment.StorageAdapter) ColumnCapabilities(io.druid.segment.column.ColumnCapabilities) ColumnCapabilitiesImpl(io.druid.segment.column.ColumnCapabilitiesImpl)

Example 34 with QueryableIndex

use of io.druid.segment.QueryableIndex in project druid by druid-io.

the class RealtimePlumber method persistAndMerge.

// Submits persist-n-merge task for a Sink to the mergeExecutor
private void persistAndMerge(final long truncatedTime, final Sink sink) {
    final String threadName = String.format("%s-%s-persist-n-merge", schema.getDataSource(), new DateTime(truncatedTime));
    mergeExecutor.execute(new ThreadRenamingRunnable(threadName) {

        final Interval interval = sink.getInterval();

        Stopwatch mergeStopwatch = null;

        @Override
        public void doRun() {
            try {
                // Bail out if this sink has been abandoned by a previously-executed task.
                if (sinks.get(truncatedTime) != sink) {
                    log.info("Sink[%s] was abandoned, bailing out of persist-n-merge.", sink);
                    return;
                }
                // Use a file to indicate that pushing has completed.
                final File persistDir = computePersistDir(schema, interval);
                final File mergedTarget = new File(persistDir, "merged");
                final File isPushedMarker = new File(persistDir, "isPushedMarker");
                if (!isPushedMarker.exists()) {
                    removeSegment(sink, mergedTarget);
                    if (mergedTarget.exists()) {
                        log.wtf("Merged target[%s] exists?!", mergedTarget);
                        return;
                    }
                } else {
                    log.info("Already pushed sink[%s]", sink);
                    return;
                }
                /*
            Note: it the plumber crashes after persisting a subset of hydrants then might duplicate data as these
            hydrants will be read but older commitMetadata will be used. fixing this possibly needs structural
            changes to plumber.
             */
                for (FireHydrant hydrant : sink) {
                    synchronized (hydrant) {
                        if (!hydrant.hasSwapped()) {
                            log.info("Hydrant[%s] hasn't swapped yet, swapping. Sink[%s]", hydrant, sink);
                            final int rowCount = persistHydrant(hydrant, schema, interval, null);
                            metrics.incrementRowOutputCount(rowCount);
                        }
                    }
                }
                final long mergeThreadCpuTime = VMUtils.safeGetThreadCpuTime();
                mergeStopwatch = Stopwatch.createStarted();
                List<QueryableIndex> indexes = Lists.newArrayList();
                for (FireHydrant fireHydrant : sink) {
                    Segment segment = fireHydrant.getSegment();
                    final QueryableIndex queryableIndex = segment.asQueryableIndex();
                    log.info("Adding hydrant[%s]", fireHydrant);
                    indexes.add(queryableIndex);
                }
                final File mergedFile = indexMerger.mergeQueryableIndex(indexes, schema.getGranularitySpec().isRollup(), schema.getAggregators(), mergedTarget, config.getIndexSpec());
                // emit merge metrics before publishing segment
                metrics.incrementMergeCpuTime(VMUtils.safeGetThreadCpuTime() - mergeThreadCpuTime);
                metrics.incrementMergeTimeMillis(mergeStopwatch.elapsed(TimeUnit.MILLISECONDS));
                QueryableIndex index = indexIO.loadIndex(mergedFile);
                log.info("Pushing [%s] to deep storage", sink.getSegment().getIdentifier());
                DataSegment segment = dataSegmentPusher.push(mergedFile, sink.getSegment().withDimensions(Lists.newArrayList(index.getAvailableDimensions())));
                log.info("Inserting [%s] to the metadata store", sink.getSegment().getIdentifier());
                segmentPublisher.publishSegment(segment);
                if (!isPushedMarker.createNewFile()) {
                    log.makeAlert("Failed to create marker file for [%s]", schema.getDataSource()).addData("interval", sink.getInterval()).addData("partitionNum", segment.getShardSpec().getPartitionNum()).addData("marker", isPushedMarker).emit();
                }
            } catch (Exception e) {
                metrics.incrementFailedHandoffs();
                log.makeAlert(e, "Failed to persist merged index[%s]", schema.getDataSource()).addData("interval", interval).emit();
                if (shuttingDown) {
                    // We're trying to shut down, and this segment failed to push. Let's just get rid of it.
                    // This call will also delete possibly-partially-written files, so we don't need to do it explicitly.
                    cleanShutdown = false;
                    abandonSegment(truncatedTime, sink);
                }
            } finally {
                if (mergeStopwatch != null) {
                    mergeStopwatch.stop();
                }
            }
        }
    });
    handoffNotifier.registerSegmentHandoffCallback(new SegmentDescriptor(sink.getInterval(), sink.getVersion(), config.getShardSpec().getPartitionNum()), mergeExecutor, new Runnable() {

        @Override
        public void run() {
            abandonSegment(sink.getInterval().getStartMillis(), sink);
            metrics.incrementHandOffCount();
        }
    });
}
Also used : Stopwatch(com.google.common.base.Stopwatch) DataSegment(io.druid.timeline.DataSegment) DateTime(org.joda.time.DateTime) DataSegment(io.druid.timeline.DataSegment) QueryableIndexSegment(io.druid.segment.QueryableIndexSegment) Segment(io.druid.segment.Segment) IndexSizeExceededException(io.druid.segment.incremental.IndexSizeExceededException) IOException(java.io.IOException) QueryableIndex(io.druid.segment.QueryableIndex) SegmentDescriptor(io.druid.query.SegmentDescriptor) ThreadRenamingRunnable(io.druid.common.guava.ThreadRenamingRunnable) List(java.util.List) FireHydrant(io.druid.segment.realtime.FireHydrant) ThreadRenamingRunnable(io.druid.common.guava.ThreadRenamingRunnable) File(java.io.File) Interval(org.joda.time.Interval)

Example 35 with QueryableIndex

use of io.druid.segment.QueryableIndex in project druid by druid-io.

the class RealtimePlumber method bootstrapSinksFromDisk.

protected Object bootstrapSinksFromDisk() {
    final VersioningPolicy versioningPolicy = config.getVersioningPolicy();
    File baseDir = computeBaseDir(schema);
    if (baseDir == null || !baseDir.exists()) {
        return null;
    }
    File[] files = baseDir.listFiles();
    if (files == null) {
        return null;
    }
    Object metadata = null;
    long latestCommitTime = 0;
    for (File sinkDir : files) {
        final Interval sinkInterval = new Interval(sinkDir.getName().replace("_", "/"));
        //final File[] sinkFiles = sinkDir.listFiles();
        // To avoid reading and listing of "merged" dir
        final File[] sinkFiles = sinkDir.listFiles(new FilenameFilter() {

            @Override
            public boolean accept(File dir, String fileName) {
                return !(Ints.tryParse(fileName) == null);
            }
        });
        Arrays.sort(sinkFiles, new Comparator<File>() {

            @Override
            public int compare(File o1, File o2) {
                try {
                    return Ints.compare(Integer.parseInt(o1.getName()), Integer.parseInt(o2.getName()));
                } catch (NumberFormatException e) {
                    log.error(e, "Couldn't compare as numbers? [%s][%s]", o1, o2);
                    return o1.compareTo(o2);
                }
            }
        });
        boolean isCorrupted = false;
        List<FireHydrant> hydrants = Lists.newArrayList();
        for (File segmentDir : sinkFiles) {
            log.info("Loading previously persisted segment at [%s]", segmentDir);
            // If 100% sure that this is not needed, this check can be removed.
            if (Ints.tryParse(segmentDir.getName()) == null) {
                continue;
            }
            QueryableIndex queryableIndex = null;
            try {
                queryableIndex = indexIO.loadIndex(segmentDir);
            } catch (IOException e) {
                log.error(e, "Problem loading segmentDir from disk.");
                isCorrupted = true;
            }
            if (isCorrupted) {
                try {
                    File corruptSegmentDir = computeCorruptedFileDumpDir(segmentDir, schema);
                    log.info("Renaming %s to %s", segmentDir.getAbsolutePath(), corruptSegmentDir.getAbsolutePath());
                    FileUtils.copyDirectory(segmentDir, corruptSegmentDir);
                    FileUtils.deleteDirectory(segmentDir);
                } catch (Exception e1) {
                    log.error(e1, "Failed to rename %s", segmentDir.getAbsolutePath());
                }
                //at some point.
                continue;
            }
            Metadata segmentMetadata = queryableIndex.getMetadata();
            if (segmentMetadata != null) {
                Object timestampObj = segmentMetadata.get(COMMIT_METADATA_TIMESTAMP_KEY);
                if (timestampObj != null) {
                    long timestamp = ((Long) timestampObj).longValue();
                    if (timestamp > latestCommitTime) {
                        log.info("Found metaData [%s] with latestCommitTime [%s] greater than previous recorded [%s]", queryableIndex.getMetadata(), timestamp, latestCommitTime);
                        latestCommitTime = timestamp;
                        metadata = queryableIndex.getMetadata().get(COMMIT_METADATA_KEY);
                    }
                }
            }
            hydrants.add(new FireHydrant(new QueryableIndexSegment(DataSegment.makeDataSegmentIdentifier(schema.getDataSource(), sinkInterval.getStart(), sinkInterval.getEnd(), versioningPolicy.getVersion(sinkInterval), config.getShardSpec()), queryableIndex), Integer.parseInt(segmentDir.getName())));
        }
        if (hydrants.isEmpty()) {
            // Probably encountered a corrupt sink directory
            log.warn("Found persisted segment directory with no intermediate segments present at %s, skipping sink creation.", sinkDir.getAbsolutePath());
            continue;
        }
        final Sink currSink = new Sink(sinkInterval, schema, config.getShardSpec(), versioningPolicy.getVersion(sinkInterval), config.getMaxRowsInMemory(), config.isReportParseExceptions(), hydrants);
        addSink(currSink);
    }
    return metadata;
}
Also used : QueryableIndexSegment(io.druid.segment.QueryableIndexSegment) Metadata(io.druid.segment.Metadata) IOException(java.io.IOException) IndexSizeExceededException(io.druid.segment.incremental.IndexSizeExceededException) IOException(java.io.IOException) FilenameFilter(java.io.FilenameFilter) QueryableIndex(io.druid.segment.QueryableIndex) FireHydrant(io.druid.segment.realtime.FireHydrant) File(java.io.File) Interval(org.joda.time.Interval)

Aggregations

QueryableIndex (io.druid.segment.QueryableIndex)35 File (java.io.File)23 IncrementalIndex (io.druid.segment.incremental.IncrementalIndex)16 OnheapIncrementalIndex (io.druid.segment.incremental.OnheapIncrementalIndex)12 IndexSpec (io.druid.segment.IndexSpec)11 InputRow (io.druid.data.input.InputRow)10 QueryableIndexSegment (io.druid.segment.QueryableIndexSegment)8 IOException (java.io.IOException)8 BenchmarkDataGenerator (io.druid.benchmark.datagen.BenchmarkDataGenerator)7 HyperUniquesSerde (io.druid.query.aggregation.hyperloglog.HyperUniquesSerde)7 Setup (org.openjdk.jmh.annotations.Setup)7 FireHydrant (io.druid.segment.realtime.FireHydrant)6 DataSegment (io.druid.timeline.DataSegment)6 QueryableIndexStorageAdapter (io.druid.segment.QueryableIndexStorageAdapter)5 StorageAdapter (io.druid.segment.StorageAdapter)5 DateTime (org.joda.time.DateTime)5 Interval (org.joda.time.Interval)5 IncrementalIndexSegment (io.druid.segment.IncrementalIndexSegment)4 IndexSizeExceededException (io.druid.segment.incremental.IndexSizeExceededException)4 ImmutableList (com.google.common.collect.ImmutableList)3