use of io.druid.java.util.common.granularity.Granularity in project druid by druid-io.
the class GroupByStrategyV2 method getUniversalTimestamp.
/**
* If "query" has a single universal timestamp, return it. Otherwise return null. This is useful
* for keeping timestamps in sync across partial queries that may have different intervals.
*
* @param query the query
*
* @return universal timestamp, or null
*/
public static DateTime getUniversalTimestamp(final GroupByQuery query) {
final Granularity gran = query.getGranularity();
final String timestampStringFromContext = query.getContextValue(CTX_KEY_FUDGE_TIMESTAMP, "");
if (!timestampStringFromContext.isEmpty()) {
return new DateTime(Long.parseLong(timestampStringFromContext));
} else if (Granularities.ALL.equals(gran)) {
final long timeStart = query.getIntervals().get(0).getStartMillis();
return gran.getIterable(new Interval(timeStart, timeStart + 1)).iterator().next().getStart();
} else {
return null;
}
}
use of io.druid.java.util.common.granularity.Granularity in project druid by druid-io.
the class FlushingPlumber method startFlushThread.
private void startFlushThread() {
final Granularity segmentGranularity = schema.getGranularitySpec().getSegmentGranularity();
final DateTime truncatedNow = segmentGranularity.bucketStart(new DateTime());
final long windowMillis = config.getWindowPeriod().toStandardDuration().getMillis();
log.info("Expect to run at [%s]", new DateTime().plus(new Duration(System.currentTimeMillis(), schema.getGranularitySpec().getSegmentGranularity().increment(truncatedNow).getMillis() + windowMillis)));
ScheduledExecutors.scheduleAtFixedRate(flushScheduledExec, new Duration(System.currentTimeMillis(), schema.getGranularitySpec().getSegmentGranularity().increment(truncatedNow).getMillis() + windowMillis), new Duration(truncatedNow, segmentGranularity.increment(truncatedNow)), new ThreadRenamingCallable<ScheduledExecutors.Signal>(String.format("%s-flusher-%d", getSchema().getDataSource(), getConfig().getShardSpec().getPartitionNum())) {
@Override
public ScheduledExecutors.Signal doCall() {
if (stopped) {
log.info("Stopping flusher thread");
return ScheduledExecutors.Signal.STOP;
}
long minTimestamp = segmentGranularity.bucketStart(getRejectionPolicy().getCurrMaxTime().minus(windowMillis)).getMillis();
List<Map.Entry<Long, Sink>> sinksToPush = Lists.newArrayList();
for (Map.Entry<Long, Sink> entry : getSinks().entrySet()) {
final Long intervalStart = entry.getKey();
if (intervalStart < minTimestamp) {
log.info("Adding entry[%s] to flush.", entry);
sinksToPush.add(entry);
}
}
for (final Map.Entry<Long, Sink> entry : sinksToPush) {
flushAfterDuration(entry.getKey(), entry.getValue());
}
if (stopped) {
log.info("Stopping flusher thread");
return ScheduledExecutors.Signal.STOP;
} else {
return ScheduledExecutors.Signal.REPEAT;
}
}
});
}
use of io.druid.java.util.common.granularity.Granularity in project druid by druid-io.
the class RealtimePlumber method mergeAndPush.
private void mergeAndPush() {
final Granularity segmentGranularity = schema.getGranularitySpec().getSegmentGranularity();
final Period windowPeriod = config.getWindowPeriod();
final long windowMillis = windowPeriod.toStandardDuration().getMillis();
log.info("Starting merge and push.");
DateTime minTimestampAsDate = segmentGranularity.bucketStart(new DateTime(Math.max(windowMillis, rejectionPolicy.getCurrMaxTime().getMillis()) - windowMillis));
long minTimestamp = minTimestampAsDate.getMillis();
log.info("Found [%,d] segments. Attempting to hand off segments that start before [%s].", sinks.size(), minTimestampAsDate);
List<Map.Entry<Long, Sink>> sinksToPush = Lists.newArrayList();
for (Map.Entry<Long, Sink> entry : sinks.entrySet()) {
final Long intervalStart = entry.getKey();
if (intervalStart < minTimestamp) {
log.info("Adding entry [%s] for merge and push.", entry);
sinksToPush.add(entry);
} else {
log.info("Skipping persist and merge for entry [%s] : Start time [%s] >= [%s] min timestamp required in this run. Segment will be picked up in a future run.", entry, new DateTime(intervalStart), minTimestampAsDate);
}
}
log.info("Found [%,d] sinks to persist and merge", sinksToPush.size());
for (final Map.Entry<Long, Sink> entry : sinksToPush) {
persistAndMerge(entry.getKey(), entry.getValue());
}
}
use of io.druid.java.util.common.granularity.Granularity in project druid by druid-io.
the class RealtimePlumber method getSink.
private Sink getSink(long timestamp) {
if (!rejectionPolicy.accept(timestamp)) {
return null;
}
final Granularity segmentGranularity = schema.getGranularitySpec().getSegmentGranularity();
final VersioningPolicy versioningPolicy = config.getVersioningPolicy();
final long truncatedTime = segmentGranularity.bucketStart(new DateTime(timestamp)).getMillis();
Sink retVal = sinks.get(truncatedTime);
if (retVal == null) {
final Interval sinkInterval = new Interval(new DateTime(truncatedTime), segmentGranularity.increment(new DateTime(truncatedTime)));
retVal = new Sink(sinkInterval, schema, config.getShardSpec(), versioningPolicy.getVersion(sinkInterval), config.getMaxRowsInMemory(), config.isReportParseExceptions());
addSink(retVal);
}
return retVal;
}
Aggregations