use of io.druid.java.util.common.granularity.PeriodGranularity in project druid by druid-io.
the class HadoopIngestionSpecTest method testPeriodSegmentGranularitySpec.
@Test
public void testPeriodSegmentGranularitySpec() {
final HadoopIngestionSpec schema;
try {
schema = jsonReadWriteRead("{\n" + " \"dataSchema\": {\n" + " \"dataSource\": \"foo\",\n" + " \"metricsSpec\": [],\n" + " \"granularitySpec\": {\n" + " \"type\": \"uniform\",\n" + " \"segmentGranularity\": {\"type\": \"period\", \"period\":\"PT1H\", \"timeZone\":\"America/Los_Angeles\"},\n" + " \"intervals\": [\"2012-01-01/P1D\"]\n" + " }\n" + " }\n" + "}", HadoopIngestionSpec.class);
} catch (Exception e) {
throw Throwables.propagate(e);
}
final UniformGranularitySpec granularitySpec = (UniformGranularitySpec) schema.getDataSchema().getGranularitySpec();
Assert.assertEquals("getSegmentGranularity", new PeriodGranularity(new Period("PT1H"), null, DateTimeZone.forID("America/Los_Angeles")), granularitySpec.getSegmentGranularity());
}
use of io.druid.java.util.common.granularity.PeriodGranularity in project druid by druid-io.
the class GranularityPathSpecTest method testBackwardCompatiblePeriodSegmentGranularitySerialization.
@Test
public void testBackwardCompatiblePeriodSegmentGranularitySerialization() throws JsonProcessingException {
final PeriodGranularity pt2S = new PeriodGranularity(new Period("PT2S"), null, DateTimeZone.UTC);
Assert.assertNotEquals("\"SECOND\"", jsonMapper.writeValueAsString(pt2S));
final Granularity pt1S = Granularities.SECOND;
Assert.assertEquals("\"SECOND\"", jsonMapper.writeValueAsString(pt1S));
}
use of io.druid.java.util.common.granularity.PeriodGranularity in project druid by druid-io.
the class TimeseriesQueryRunnerTest method testTimeseriesWithVaryingGran.
@Test
public void testTimeseriesWithVaryingGran() {
TimeseriesQuery query1 = Druids.newTimeseriesQueryBuilder().dataSource(QueryRunnerTestHelper.dataSource).granularity(new PeriodGranularity(new Period("P1M"), null, null)).intervals(Arrays.asList(new Interval("2011-04-02T00:00:00.000Z/2011-04-03T00:00:00.000Z"))).aggregators(Arrays.<AggregatorFactory>asList(QueryRunnerTestHelper.rowsCount, new LongSumAggregatorFactory("idx", "index"), QueryRunnerTestHelper.qualityUniques)).descending(descending).build();
List<Result<TimeseriesResultValue>> expectedResults1 = Arrays.asList(new Result<>(new DateTime("2011-04-01"), new TimeseriesResultValue(ImmutableMap.<String, Object>of("rows", 13L, "idx", 5827L, "uniques", QueryRunnerTestHelper.UNIQUES_9))));
Iterable<Result<TimeseriesResultValue>> results1 = Sequences.toList(runner.run(query1, CONTEXT), Lists.<Result<TimeseriesResultValue>>newArrayList());
assertExpectedResults(expectedResults1, results1);
TimeseriesQuery query2 = Druids.newTimeseriesQueryBuilder().dataSource(QueryRunnerTestHelper.dataSource).granularity("DAY").intervals(Arrays.asList(new Interval("2011-04-02T00:00:00.000Z/2011-04-03T00:00:00.000Z"))).aggregators(Arrays.<AggregatorFactory>asList(QueryRunnerTestHelper.rowsCount, new LongSumAggregatorFactory("idx", "index"), QueryRunnerTestHelper.qualityUniques)).build();
List<Result<TimeseriesResultValue>> expectedResults2 = Arrays.asList(new Result<>(new DateTime("2011-04-02"), new TimeseriesResultValue(ImmutableMap.<String, Object>of("rows", 13L, "idx", 5827L, "uniques", QueryRunnerTestHelper.UNIQUES_9))));
Iterable<Result<TimeseriesResultValue>> results2 = Sequences.toList(runner.run(query2, CONTEXT), Lists.<Result<TimeseriesResultValue>>newArrayList());
assertExpectedResults(expectedResults2, results2);
}
use of io.druid.java.util.common.granularity.PeriodGranularity in project druid by druid-io.
the class TimeseriesQueryRunnerTest method testTimeseriesGranularityNotAlignedOnSegmentBoundariesWithFilter.
@Test
public void testTimeseriesGranularityNotAlignedOnSegmentBoundariesWithFilter() {
TimeseriesQuery query1 = Druids.newTimeseriesQueryBuilder().dataSource(QueryRunnerTestHelper.dataSource).filters(QueryRunnerTestHelper.marketDimension, "spot", "upfront", "total_market").granularity(new PeriodGranularity(new Period("P7D"), null, DateTimeZone.forID("America/Los_Angeles"))).intervals(Arrays.asList(new Interval("2011-01-12T00:00:00.000-08:00/2011-01-20T00:00:00.000-08:00"))).aggregators(Arrays.<AggregatorFactory>asList(QueryRunnerTestHelper.rowsCount, new LongSumAggregatorFactory("idx", "index"))).descending(descending).build();
List<Result<TimeseriesResultValue>> expectedResults1 = Arrays.asList(new Result<>(new DateTime("2011-01-06T00:00:00.000-08:00", DateTimeZone.forID("America/Los_Angeles")), new TimeseriesResultValue(ImmutableMap.<String, Object>of("rows", 13L, "idx", 6071L))), new Result<>(new DateTime("2011-01-13T00:00:00.000-08:00", DateTimeZone.forID("America/Los_Angeles")), new TimeseriesResultValue(ImmutableMap.<String, Object>of("rows", 91L, "idx", 33382L))));
Iterable<Result<TimeseriesResultValue>> results1 = Sequences.toList(runner.run(query1, CONTEXT), Lists.<Result<TimeseriesResultValue>>newArrayList());
assertExpectedResults(expectedResults1, results1);
}
use of io.druid.java.util.common.granularity.PeriodGranularity in project druid by druid-io.
the class AbstractWorkerResourceManagementStrategy method startManagement.
@Override
public void startManagement(final WorkerTaskRunner runner) {
if (!lifecycleLock.canStart()) {
return;
}
try {
log.info("Started Resource Management Scheduler");
ScheduledExecutors.scheduleAtFixedRate(exec, resourceManagementSchedulerConfig.getProvisionPeriod().toStandardDuration(), new Runnable() {
@Override
public void run() {
// Any Errors are caught by ScheduledExecutors
doProvision(runner);
}
});
// Schedule termination of worker nodes periodically
Period period = resourceManagementSchedulerConfig.getTerminatePeriod();
PeriodGranularity granularity = new PeriodGranularity(period, resourceManagementSchedulerConfig.getOriginTime(), null);
final long startTime = granularity.bucketEnd(new DateTime()).getMillis();
ScheduledExecutors.scheduleAtFixedRate(exec, new Duration(System.currentTimeMillis(), startTime), resourceManagementSchedulerConfig.getTerminatePeriod().toStandardDuration(), new Runnable() {
@Override
public void run() {
// Any Errors are caught by ScheduledExecutors
doTerminate(runner);
}
});
lifecycleLock.started();
} finally {
lifecycleLock.exitStart();
}
}
Aggregations