use of io.druid.java.util.common.granularity.Granularity in project druid by druid-io.
the class AppenderatorPlumber method getSegmentIdentifier.
private SegmentIdentifier getSegmentIdentifier(long timestamp) {
if (!rejectionPolicy.accept(timestamp)) {
return null;
}
final Granularity segmentGranularity = schema.getGranularitySpec().getSegmentGranularity();
final VersioningPolicy versioningPolicy = config.getVersioningPolicy();
final long truncatedTime = segmentGranularity.bucketStart(new DateTime(timestamp)).getMillis();
SegmentIdentifier retVal = segments.get(truncatedTime);
if (retVal == null) {
final Interval interval = new Interval(new DateTime(truncatedTime), segmentGranularity.increment(new DateTime(truncatedTime)));
retVal = new SegmentIdentifier(schema.getDataSource(), interval, versioningPolicy.getVersion(interval), config.getShardSpec());
addSegment(retVal);
}
return retVal;
}
use of io.druid.java.util.common.granularity.Granularity in project druid by druid-io.
the class AppenderatorPlumber method mergeAndPush.
private void mergeAndPush() {
final Granularity segmentGranularity = schema.getGranularitySpec().getSegmentGranularity();
final Period windowPeriod = config.getWindowPeriod();
final long windowMillis = windowPeriod.toStandardDuration().getMillis();
log.info("Starting merge and push.");
DateTime minTimestampAsDate = segmentGranularity.bucketStart(new DateTime(Math.max(windowMillis, rejectionPolicy.getCurrMaxTime().getMillis()) - windowMillis));
long minTimestamp = minTimestampAsDate.getMillis();
final List<SegmentIdentifier> appenderatorSegments = appenderator.getSegments();
final List<SegmentIdentifier> segmentsToPush = Lists.newArrayList();
if (shuttingDown) {
log.info("Found [%,d] segments. Attempting to hand off all of them.", appenderatorSegments.size());
segmentsToPush.addAll(appenderatorSegments);
} else {
log.info("Found [%,d] segments. Attempting to hand off segments that start before [%s].", appenderatorSegments.size(), minTimestampAsDate);
for (SegmentIdentifier segment : appenderatorSegments) {
final Long intervalStart = segment.getInterval().getStartMillis();
if (intervalStart < minTimestamp) {
log.info("Adding entry [%s] for merge and push.", segment);
segmentsToPush.add(segment);
} else {
log.info("Skipping persist and merge for entry [%s] : Start time [%s] >= [%s] min timestamp required in this run. Segment will be picked up in a future run.", segment, new DateTime(intervalStart), minTimestampAsDate);
}
}
}
log.info("Found [%,d] segments to persist and merge", segmentsToPush.size());
final Function<Throwable, Void> errorHandler = new Function<Throwable, Void>() {
@Override
public Void apply(Throwable throwable) {
final List<String> segmentIdentifierStrings = Lists.transform(segmentsToPush, new Function<SegmentIdentifier, String>() {
@Override
public String apply(SegmentIdentifier input) {
return input.getIdentifierAsString();
}
});
log.makeAlert(throwable, "Failed to publish merged indexes[%s]", schema.getDataSource()).addData("segments", segmentIdentifierStrings).emit();
if (shuttingDown) {
// We're trying to shut down, and these segments failed to push. Let's just get rid of them.
// This call will also delete possibly-partially-written files, so we don't need to do it explicitly.
cleanShutdown = false;
for (SegmentIdentifier identifier : segmentsToPush) {
dropSegment(identifier);
}
}
return null;
}
};
// WARNING: Committers.nil() here means that on-disk data can get out of sync with committing.
Futures.addCallback(appenderator.push(segmentsToPush, Committers.nil()), new FutureCallback<SegmentsAndMetadata>() {
@Override
public void onSuccess(SegmentsAndMetadata result) {
// Immediately publish after pushing
for (DataSegment pushedSegment : result.getSegments()) {
try {
segmentPublisher.publishSegment(pushedSegment);
} catch (Exception e) {
errorHandler.apply(e);
}
}
log.info("Published [%,d] sinks.", segmentsToPush.size());
}
@Override
public void onFailure(Throwable e) {
log.warn(e, "Failed to push [%,d] segments.", segmentsToPush.size());
errorHandler.apply(e);
}
});
}
use of io.druid.java.util.common.granularity.Granularity in project druid by druid-io.
the class AppenderatorPlumber method startPersistThread.
private void startPersistThread() {
final Granularity segmentGranularity = schema.getGranularitySpec().getSegmentGranularity();
final Period windowPeriod = config.getWindowPeriod();
final DateTime truncatedNow = segmentGranularity.bucketStart(new DateTime());
final long windowMillis = windowPeriod.toStandardDuration().getMillis();
log.info("Expect to run at [%s]", new DateTime().plus(new Duration(System.currentTimeMillis(), segmentGranularity.increment(truncatedNow).getMillis() + windowMillis)));
ScheduledExecutors.scheduleAtFixedRate(scheduledExecutor, new Duration(System.currentTimeMillis(), segmentGranularity.increment(truncatedNow).getMillis() + windowMillis), new Duration(truncatedNow, segmentGranularity.increment(truncatedNow)), new ThreadRenamingCallable<ScheduledExecutors.Signal>(String.format("%s-overseer-%d", schema.getDataSource(), config.getShardSpec().getPartitionNum())) {
@Override
public ScheduledExecutors.Signal doCall() {
if (stopped) {
log.info("Stopping merge-n-push overseer thread");
return ScheduledExecutors.Signal.STOP;
}
mergeAndPush();
if (stopped) {
log.info("Stopping merge-n-push overseer thread");
return ScheduledExecutors.Signal.STOP;
} else {
return ScheduledExecutors.Signal.REPEAT;
}
}
});
}
use of io.druid.java.util.common.granularity.Granularity in project druid by druid-io.
the class RealtimePlumber method startPersistThread.
protected void startPersistThread() {
final Granularity segmentGranularity = schema.getGranularitySpec().getSegmentGranularity();
final Period windowPeriod = config.getWindowPeriod();
final DateTime truncatedNow = segmentGranularity.bucketStart(new DateTime());
final long windowMillis = windowPeriod.toStandardDuration().getMillis();
log.info("Expect to run at [%s]", new DateTime().plus(new Duration(System.currentTimeMillis(), segmentGranularity.increment(truncatedNow).getMillis() + windowMillis)));
ScheduledExecutors.scheduleAtFixedRate(scheduledExecutor, new Duration(System.currentTimeMillis(), segmentGranularity.increment(truncatedNow).getMillis() + windowMillis), new Duration(truncatedNow, segmentGranularity.increment(truncatedNow)), new ThreadRenamingCallable<ScheduledExecutors.Signal>(String.format("%s-overseer-%d", schema.getDataSource(), config.getShardSpec().getPartitionNum())) {
@Override
public ScheduledExecutors.Signal doCall() {
if (stopped) {
log.info("Stopping merge-n-push overseer thread");
return ScheduledExecutors.Signal.STOP;
}
mergeAndPush();
if (stopped) {
log.info("Stopping merge-n-push overseer thread");
return ScheduledExecutors.Signal.STOP;
} else {
return ScheduledExecutors.Signal.REPEAT;
}
}
});
}
use of io.druid.java.util.common.granularity.Granularity in project druid by druid-io.
the class GroupByQueryHelper method createIndexAccumulatorPair.
public static <T> Pair<IncrementalIndex, Accumulator<IncrementalIndex, T>> createIndexAccumulatorPair(final GroupByQuery query, final GroupByQueryConfig config, StupidPool<ByteBuffer> bufferPool, final boolean combine) {
final GroupByQueryConfig querySpecificConfig = config.withOverrides(query);
final Granularity gran = query.getGranularity();
final long timeStart = query.getIntervals().get(0).getStartMillis();
long granTimeStart = timeStart;
if (!(Granularities.ALL.equals(gran))) {
granTimeStart = gran.bucketStart(new DateTime(timeStart)).getMillis();
}
final List<AggregatorFactory> aggs;
if (combine) {
aggs = Lists.transform(query.getAggregatorSpecs(), new Function<AggregatorFactory, AggregatorFactory>() {
@Override
public AggregatorFactory apply(AggregatorFactory input) {
return input.getCombiningFactory();
}
});
} else {
aggs = query.getAggregatorSpecs();
}
final List<String> dimensions = Lists.transform(query.getDimensions(), new Function<DimensionSpec, String>() {
@Override
public String apply(DimensionSpec input) {
return input.getOutputName();
}
});
final IncrementalIndex index;
final boolean sortResults = query.getContextValue(CTX_KEY_SORT_RESULTS, true);
// All groupBy dimensions are strings, for now.
final List<DimensionSchema> dimensionSchemas = Lists.newArrayList();
for (DimensionSpec dimension : query.getDimensions()) {
dimensionSchemas.add(new StringDimensionSchema(dimension.getOutputName()));
}
final IncrementalIndexSchema indexSchema = new IncrementalIndexSchema.Builder().withDimensionsSpec(new DimensionsSpec(dimensionSchemas, null, null)).withMetrics(aggs.toArray(new AggregatorFactory[aggs.size()])).withQueryGranularity(gran).withMinTimestamp(granTimeStart).build();
if (query.getContextValue("useOffheap", false)) {
index = new OffheapIncrementalIndex(indexSchema, false, true, sortResults, querySpecificConfig.getMaxResults(), bufferPool);
} else {
index = new OnheapIncrementalIndex(indexSchema, false, true, sortResults, querySpecificConfig.getMaxResults());
}
Accumulator<IncrementalIndex, T> accumulator = new Accumulator<IncrementalIndex, T>() {
@Override
public IncrementalIndex accumulate(IncrementalIndex accumulated, T in) {
if (in instanceof MapBasedRow) {
try {
MapBasedRow row = (MapBasedRow) in;
accumulated.add(new MapBasedInputRow(row.getTimestamp(), dimensions, row.getEvent()));
} catch (IndexSizeExceededException e) {
throw new ResourceLimitExceededException(e.getMessage());
}
} else {
throw new ISE("Unable to accumulate something of type [%s]", in.getClass());
}
return accumulated;
}
};
return new Pair<>(index, accumulator);
}
Aggregations