Search in sources :

Example 56 with Period

use of org.joda.time.Period in project druid by druid-io.

the class KafkaSupervisorTuningConfigTest method testSerdeWithNonDefaults.

@Test
public void testSerdeWithNonDefaults() throws Exception {
    String jsonStr = "{\n" + "  \"type\": \"kafka\",\n" + "  \"basePersistDirectory\": \"/tmp/xxx\",\n" + "  \"maxRowsInMemory\": 100,\n" + "  \"maxRowsPerSegment\": 100,\n" + "  \"intermediatePersistPeriod\": \"PT1H\",\n" + "  \"maxPendingPersists\": 100,\n" + "  \"buildV9Directly\": false,\n" + "  \"reportParseExceptions\": true,\n" + "  \"handoffConditionTimeout\": 100,\n" + "  \"workerThreads\": 12,\n" + "  \"chatThreads\": 13,\n" + "  \"chatRetries\": 14,\n" + "  \"httpTimeout\": \"PT15S\",\n" + "  \"shutdownTimeout\": \"PT95S\"\n" + "}";
    KafkaSupervisorTuningConfig config = (KafkaSupervisorTuningConfig) mapper.readValue(mapper.writeValueAsString(mapper.readValue(jsonStr, TuningConfig.class)), TuningConfig.class);
    Assert.assertEquals(new File("/tmp/xxx"), config.getBasePersistDirectory());
    Assert.assertEquals(100, config.getMaxRowsInMemory());
    Assert.assertEquals(100, config.getMaxRowsPerSegment());
    Assert.assertEquals(new Period("PT1H"), config.getIntermediatePersistPeriod());
    Assert.assertEquals(100, config.getMaxPendingPersists());
    Assert.assertEquals(false, config.getBuildV9Directly());
    Assert.assertEquals(true, config.isReportParseExceptions());
    Assert.assertEquals(100, config.getHandoffConditionTimeout());
    Assert.assertEquals(12, (int) config.getWorkerThreads());
    Assert.assertEquals(13, (int) config.getChatThreads());
    Assert.assertEquals(14L, (long) config.getChatRetries());
    Assert.assertEquals(Duration.standardSeconds(15), config.getHttpTimeout());
    Assert.assertEquals(Duration.standardSeconds(95), config.getShutdownTimeout());
}
Also used : TuningConfig(io.druid.segment.indexing.TuningConfig) Period(org.joda.time.Period) File(java.io.File) Test(org.junit.Test)

Example 57 with Period

use of org.joda.time.Period in project druid by druid-io.

the class KafkaTuningConfigTest method testSerdeWithDefaults.

@Test
public void testSerdeWithDefaults() throws Exception {
    String jsonStr = "{\"type\": \"kafka\"}";
    KafkaTuningConfig config = (KafkaTuningConfig) mapper.readValue(mapper.writeValueAsString(mapper.readValue(jsonStr, TuningConfig.class)), TuningConfig.class);
    Assert.assertNotNull(config.getBasePersistDirectory());
    Assert.assertEquals(75000, config.getMaxRowsInMemory());
    Assert.assertEquals(5_000_000, config.getMaxRowsPerSegment());
    Assert.assertEquals(new Period("PT10M"), config.getIntermediatePersistPeriod());
    Assert.assertEquals(0, config.getMaxPendingPersists());
    Assert.assertEquals(new IndexSpec(), config.getIndexSpec());
    Assert.assertEquals(true, config.getBuildV9Directly());
    Assert.assertEquals(false, config.isReportParseExceptions());
    Assert.assertEquals(0, config.getHandoffConditionTimeout());
}
Also used : TuningConfig(io.druid.segment.indexing.TuningConfig) IndexSpec(io.druid.segment.IndexSpec) Period(org.joda.time.Period) Test(org.junit.Test)

Example 58 with Period

use of org.joda.time.Period in project druid by druid-io.

the class KafkaTuningConfigTest method testSerdeWithNonDefaults.

@Test
public void testSerdeWithNonDefaults() throws Exception {
    String jsonStr = "{\n" + "  \"type\": \"kafka\",\n" + "  \"basePersistDirectory\": \"/tmp/xxx\",\n" + "  \"maxRowsInMemory\": 100,\n" + "  \"maxRowsPerSegment\": 100,\n" + "  \"intermediatePersistPeriod\": \"PT1H\",\n" + "  \"maxPendingPersists\": 100,\n" + "  \"buildV9Directly\": true,\n" + "  \"reportParseExceptions\": true,\n" + "  \"handoffConditionTimeout\": 100\n" + "}";
    KafkaTuningConfig config = (KafkaTuningConfig) mapper.readValue(mapper.writeValueAsString(mapper.readValue(jsonStr, TuningConfig.class)), TuningConfig.class);
    Assert.assertEquals(new File("/tmp/xxx"), config.getBasePersistDirectory());
    Assert.assertEquals(100, config.getMaxRowsInMemory());
    Assert.assertEquals(100, config.getMaxRowsPerSegment());
    Assert.assertEquals(new Period("PT1H"), config.getIntermediatePersistPeriod());
    Assert.assertEquals(100, config.getMaxPendingPersists());
    Assert.assertEquals(true, config.getBuildV9Directly());
    Assert.assertEquals(true, config.isReportParseExceptions());
    Assert.assertEquals(100, config.getHandoffConditionTimeout());
}
Also used : TuningConfig(io.druid.segment.indexing.TuningConfig) Period(org.joda.time.Period) File(java.io.File) Test(org.junit.Test)

Example 59 with Period

use of org.joda.time.Period in project druid by druid-io.

the class KafkaTuningConfigTest method testCopyOf.

@Test
public void testCopyOf() throws Exception {
    KafkaTuningConfig original = new KafkaTuningConfig(1, 2, new Period("PT3S"), new File("/tmp/xxx"), 4, new IndexSpec(), true, true, 5L, null);
    KafkaTuningConfig copy = KafkaTuningConfig.copyOf(original);
    Assert.assertEquals(1, copy.getMaxRowsInMemory());
    Assert.assertEquals(2, copy.getMaxRowsPerSegment());
    Assert.assertEquals(new Period("PT3S"), copy.getIntermediatePersistPeriod());
    Assert.assertEquals(new File("/tmp/xxx"), copy.getBasePersistDirectory());
    Assert.assertEquals(4, copy.getMaxPendingPersists());
    Assert.assertEquals(new IndexSpec(), copy.getIndexSpec());
    Assert.assertEquals(true, copy.getBuildV9Directly());
    Assert.assertEquals(true, copy.isReportParseExceptions());
    Assert.assertEquals(5L, copy.getHandoffConditionTimeout());
}
Also used : IndexSpec(io.druid.segment.IndexSpec) Period(org.joda.time.Period) File(java.io.File) Test(org.junit.Test)

Example 60 with Period

use of org.joda.time.Period in project druid by druid-io.

the class AppenderatorPlumber method mergeAndPush.

private void mergeAndPush() {
    final Granularity segmentGranularity = schema.getGranularitySpec().getSegmentGranularity();
    final Period windowPeriod = config.getWindowPeriod();
    final long windowMillis = windowPeriod.toStandardDuration().getMillis();
    log.info("Starting merge and push.");
    DateTime minTimestampAsDate = segmentGranularity.bucketStart(new DateTime(Math.max(windowMillis, rejectionPolicy.getCurrMaxTime().getMillis()) - windowMillis));
    long minTimestamp = minTimestampAsDate.getMillis();
    final List<SegmentIdentifier> appenderatorSegments = appenderator.getSegments();
    final List<SegmentIdentifier> segmentsToPush = Lists.newArrayList();
    if (shuttingDown) {
        log.info("Found [%,d] segments. Attempting to hand off all of them.", appenderatorSegments.size());
        segmentsToPush.addAll(appenderatorSegments);
    } else {
        log.info("Found [%,d] segments. Attempting to hand off segments that start before [%s].", appenderatorSegments.size(), minTimestampAsDate);
        for (SegmentIdentifier segment : appenderatorSegments) {
            final Long intervalStart = segment.getInterval().getStartMillis();
            if (intervalStart < minTimestamp) {
                log.info("Adding entry [%s] for merge and push.", segment);
                segmentsToPush.add(segment);
            } else {
                log.info("Skipping persist and merge for entry [%s] : Start time [%s] >= [%s] min timestamp required in this run. Segment will be picked up in a future run.", segment, new DateTime(intervalStart), minTimestampAsDate);
            }
        }
    }
    log.info("Found [%,d] segments to persist and merge", segmentsToPush.size());
    final Function<Throwable, Void> errorHandler = new Function<Throwable, Void>() {

        @Override
        public Void apply(Throwable throwable) {
            final List<String> segmentIdentifierStrings = Lists.transform(segmentsToPush, new Function<SegmentIdentifier, String>() {

                @Override
                public String apply(SegmentIdentifier input) {
                    return input.getIdentifierAsString();
                }
            });
            log.makeAlert(throwable, "Failed to publish merged indexes[%s]", schema.getDataSource()).addData("segments", segmentIdentifierStrings).emit();
            if (shuttingDown) {
                // We're trying to shut down, and these segments failed to push. Let's just get rid of them.
                // This call will also delete possibly-partially-written files, so we don't need to do it explicitly.
                cleanShutdown = false;
                for (SegmentIdentifier identifier : segmentsToPush) {
                    dropSegment(identifier);
                }
            }
            return null;
        }
    };
    // WARNING: Committers.nil() here means that on-disk data can get out of sync with committing.
    Futures.addCallback(appenderator.push(segmentsToPush, Committers.nil()), new FutureCallback<SegmentsAndMetadata>() {

        @Override
        public void onSuccess(SegmentsAndMetadata result) {
            // Immediately publish after pushing
            for (DataSegment pushedSegment : result.getSegments()) {
                try {
                    segmentPublisher.publishSegment(pushedSegment);
                } catch (Exception e) {
                    errorHandler.apply(e);
                }
            }
            log.info("Published [%,d] sinks.", segmentsToPush.size());
        }

        @Override
        public void onFailure(Throwable e) {
            log.warn(e, "Failed to push [%,d] segments.", segmentsToPush.size());
            errorHandler.apply(e);
        }
    });
}
Also used : Period(org.joda.time.Period) Granularity(io.druid.java.util.common.granularity.Granularity) DataSegment(io.druid.timeline.DataSegment) DateTime(org.joda.time.DateTime) IndexSizeExceededException(io.druid.segment.incremental.IndexSizeExceededException) IOException(java.io.IOException) Function(com.google.common.base.Function)

Aggregations

Period (org.joda.time.Period)273 Test (org.junit.Test)102 DateTime (org.joda.time.DateTime)54 PeriodGranularity (io.druid.java.util.common.granularity.PeriodGranularity)40 Interval (org.joda.time.Interval)30 LongSumAggregatorFactory (io.druid.query.aggregation.LongSumAggregatorFactory)29 DefaultDimensionSpec (io.druid.query.dimension.DefaultDimensionSpec)20 Row (io.druid.data.input.Row)19 File (java.io.File)15 DefaultObjectMapper (io.druid.jackson.DefaultObjectMapper)12 ObjectMapper (com.fasterxml.jackson.databind.ObjectMapper)10 Result (io.druid.query.Result)10 CountAggregatorFactory (io.druid.query.aggregation.CountAggregatorFactory)10 FinalizeResultsQueryRunner (io.druid.query.FinalizeResultsQueryRunner)8 QueryRunner (io.druid.query.QueryRunner)8 AggregatorFactory (io.druid.query.aggregation.AggregatorFactory)8 DimensionSpec (io.druid.query.dimension.DimensionSpec)8 MutablePeriod (org.joda.time.MutablePeriod)8 ExtractionDimensionSpec (io.druid.query.dimension.ExtractionDimensionSpec)7 DimFilterHavingSpec (io.druid.query.groupby.having.DimFilterHavingSpec)7