use of org.apache.druid.timeline.partition.NumberedShardSpec in project druid by druid-io.
the class RealtimeTuningConfigTest method testSerdeWithDefaults.
@Test
public void testSerdeWithDefaults() throws Exception {
String jsonStr = "{\"type\":\"realtime\"}";
ObjectMapper mapper = TestHelper.makeJsonMapper();
RealtimeTuningConfig config = (RealtimeTuningConfig) mapper.readValue(mapper.writeValueAsString(mapper.readValue(jsonStr, TuningConfig.class)), TuningConfig.class);
Assert.assertNotNull(config.getBasePersistDirectory());
Assert.assertEquals(new OnheapIncrementalIndex.Spec(), config.getAppendableIndexSpec());
Assert.assertEquals(0, config.getHandoffConditionTimeout());
Assert.assertEquals(0, config.getAlertTimeout());
Assert.assertEquals(new IndexSpec(), config.getIndexSpec());
Assert.assertEquals(new IndexSpec(), config.getIndexSpecForIntermediatePersists());
Assert.assertEquals(new Period("PT10M"), config.getIntermediatePersistPeriod());
Assert.assertEquals(new NumberedShardSpec(0, 1), config.getShardSpec());
Assert.assertEquals(0, config.getMaxPendingPersists());
Assert.assertEquals(1000000, config.getMaxRowsInMemory());
Assert.assertEquals(0, config.getMergeThreadPriority());
Assert.assertEquals(0, config.getPersistThreadPriority());
Assert.assertEquals(new Period("PT10M"), config.getWindowPeriod());
Assert.assertEquals(false, config.isReportParseExceptions());
}
use of org.apache.druid.timeline.partition.NumberedShardSpec in project druid by druid-io.
the class RealtimeTuningConfigTest method testSerdeWithNonDefaults.
@Test
public void testSerdeWithNonDefaults() throws Exception {
String jsonStr = "{\n" + " \"type\": \"realtime\",\n" + " \"maxRowsInMemory\": 100,\n" + " \"intermediatePersistPeriod\": \"PT1H\",\n" + " \"windowPeriod\": \"PT1H\",\n" + " \"basePersistDirectory\": \"/tmp/xxx\",\n" + " \"maxPendingPersists\": 100,\n" + " \"persistThreadPriority\": 100,\n" + " \"mergeThreadPriority\": 100,\n" + " \"reportParseExceptions\": true,\n" + " \"handoffConditionTimeout\": 100,\n" + " \"alertTimeout\": 70,\n" + " \"indexSpec\": { \"metricCompression\" : \"NONE\" },\n" + " \"indexSpecForIntermediatePersists\": { \"dimensionCompression\" : \"uncompressed\" },\n" + " \"appendableIndexSpec\": { \"type\" : \"onheap\" }\n" + "}";
ObjectMapper mapper = TestHelper.makeJsonMapper();
RealtimeTuningConfig config = (RealtimeTuningConfig) mapper.readValue(mapper.writeValueAsString(mapper.readValue(jsonStr, TuningConfig.class)), TuningConfig.class);
Assert.assertEquals("/tmp/xxx", config.getBasePersistDirectory().toString());
Assert.assertEquals(new OnheapIncrementalIndex.Spec(), config.getAppendableIndexSpec());
Assert.assertEquals(100, config.getHandoffConditionTimeout());
Assert.assertEquals(70, config.getAlertTimeout());
Assert.assertEquals(new Period("PT1H"), config.getIntermediatePersistPeriod());
Assert.assertEquals(new NumberedShardSpec(0, 1), config.getShardSpec());
Assert.assertEquals(100, config.getMaxPendingPersists());
Assert.assertEquals(100, config.getMaxRowsInMemory());
Assert.assertEquals(100, config.getMergeThreadPriority());
Assert.assertEquals(100, config.getPersistThreadPriority());
Assert.assertEquals(new Period("PT1H"), config.getWindowPeriod());
Assert.assertEquals(true, config.isReportParseExceptions());
Assert.assertEquals(new IndexSpec(null, null, CompressionStrategy.NONE, null), config.getIndexSpec());
Assert.assertEquals(new IndexSpec(null, CompressionStrategy.UNCOMPRESSED, null, null), config.getIndexSpecForIntermediatePersists());
}
use of org.apache.druid.timeline.partition.NumberedShardSpec in project druid by druid-io.
the class NewestSegmentFirstPolicyTest method createTimeline.
private static VersionedIntervalTimeline<String, DataSegment> createTimeline(SegmentGenerateSpec... specs) {
List<DataSegment> segments = new ArrayList<>();
final String version = DateTimes.nowUtc().toString();
final List<SegmentGenerateSpec> orderedSpecs = Arrays.asList(specs);
orderedSpecs.sort(Comparator.comparing(s -> s.totalInterval, Comparators.intervalsByStartThenEnd().reversed()));
for (SegmentGenerateSpec spec : orderedSpecs) {
Interval remainingInterval = spec.totalInterval;
while (!Intervals.isEmpty(remainingInterval)) {
final Interval segmentInterval;
if (remainingInterval.toDuration().isLongerThan(spec.segmentPeriod.toStandardDuration())) {
segmentInterval = new Interval(spec.segmentPeriod, remainingInterval.getEnd());
} else {
segmentInterval = remainingInterval;
}
for (int i = 0; i < spec.numSegmentsPerShard; i++) {
final ShardSpec shardSpec = new NumberedShardSpec(i, spec.numSegmentsPerShard);
final DataSegment segment = new DataSegment(DATA_SOURCE, segmentInterval, spec.version == null ? version : spec.version, null, ImmutableList.of(), ImmutableList.of(), shardSpec, spec.lastCompactionState, 0, spec.segmentSize);
segments.add(segment);
}
remainingInterval = SegmentCompactionUtil.removeIntervalFromEnd(remainingInterval, segmentInterval);
}
}
return VersionedIntervalTimeline.forSegments(segments);
}
use of org.apache.druid.timeline.partition.NumberedShardSpec in project druid by druid-io.
the class SegmentPublisherHelperTest method testAnnotateCorePartitionSetSizeForNumberedShardSpec.
@Test
public void testAnnotateCorePartitionSetSizeForNumberedShardSpec() {
final Set<DataSegment> segments = ImmutableSet.of(newSegment(new BuildingNumberedShardSpec(0)), newSegment(new BuildingNumberedShardSpec(1)), newSegment(new BuildingNumberedShardSpec(2)));
final Set<DataSegment> annotated = SegmentPublisherHelper.annotateShardSpec(segments);
for (DataSegment segment : annotated) {
Assert.assertSame(NumberedShardSpec.class, segment.getShardSpec().getClass());
final NumberedShardSpec shardSpec = (NumberedShardSpec) segment.getShardSpec();
Assert.assertEquals(3, shardSpec.getNumCorePartitions());
}
}
use of org.apache.druid.timeline.partition.NumberedShardSpec in project druid by druid-io.
the class StreamAppenderatorDriverFailTest method testFailDuringPublishInternal.
private void testFailDuringPublishInternal(boolean failWithException) throws Exception {
driver = new StreamAppenderatorDriver(new FailableAppenderator(), allocator, segmentHandoffNotifierFactory, new NoopUsedSegmentChecker(), dataSegmentKiller, OBJECT_MAPPER, new FireDepartmentMetrics());
driver.startJob(null);
final TestCommitterSupplier<Integer> committerSupplier = new TestCommitterSupplier<>();
segmentHandoffNotifierFactory.setHandoffDelay(100);
Assert.assertNull(driver.startJob(null));
for (int i = 0; i < ROWS.size(); i++) {
committerSupplier.setMetadata(i + 1);
Assert.assertTrue(driver.add(ROWS.get(i), "dummy", committerSupplier, false, true).isOk());
}
if (!failWithException) {
// Should only kill segments if there was _no_ exception.
dataSegmentKiller.killQuietly(new DataSegment("foo", Intervals.of("2000-01-01T00:00:00.000Z/2000-01-01T01:00:00.000Z"), "abc123", ImmutableMap.of(), ImmutableList.of(), ImmutableList.of(), new NumberedShardSpec(0, 0), 0, 0));
EasyMock.expectLastCall().once();
dataSegmentKiller.killQuietly(new DataSegment("foo", Intervals.of("2000-01-01T01:00:00.000Z/2000-01-01T02:00:00.000Z"), "abc123", ImmutableMap.of(), ImmutableList.of(), ImmutableList.of(), new NumberedShardSpec(0, 0), 0, 0));
EasyMock.expectLastCall().once();
}
EasyMock.replay(dataSegmentKiller);
try {
driver.publish(StreamAppenderatorDriverTest.makeFailingPublisher(failWithException), committerSupplier.get(), ImmutableList.of("dummy")).get(PUBLISH_TIMEOUT_MILLIS, TimeUnit.MILLISECONDS);
} catch (Exception e) {
throw e;
} finally {
EasyMock.verify(dataSegmentKiller);
}
}
Aggregations