use of org.apache.druid.indexer.partitions.HashedPartitionsSpec in project druid by druid-io.
the class IndexTaskSerdeTest method testBestEffortRollupWithHashedPartitionsSpec.
@Test
public void testBestEffortRollupWithHashedPartitionsSpec() {
expectedException.expect(IllegalArgumentException.class);
expectedException.expectMessage("DynamicPartitionsSpec must be used for best-effort rollup");
final IndexTuningConfig tuningConfig = new IndexTuningConfig(null, null, null, 100, 2000L, null, null, null, null, null, new HashedPartitionsSpec(null, 10, ImmutableList.of("dim1", "dim2")), new IndexSpec(new RoaringBitmapSerdeFactory(false), CompressionStrategy.LZ4, CompressionStrategy.LZF, LongEncodingStrategy.LONGS), null, null, false, null, null, 100L, OffHeapMemorySegmentWriteOutMediumFactory.instance(), true, 10, 100, null, null);
}
use of org.apache.druid.indexer.partitions.HashedPartitionsSpec in project druid by druid-io.
the class HashPartitionTaskKillTest method failsInSecondPhase.
@Test(timeout = 5000L)
public void failsInSecondPhase() throws Exception {
final ParallelIndexSupervisorTask task = createTestTask(TIMESTAMP_SPEC, DIMENSIONS_SPEC, INPUT_FORMAT, null, INTERVAL_TO_INDEX, inputDir, "test_*", new HashedPartitionsSpec(null, 3, ImmutableList.of("dim1", "dim2")), 2, false, true, 0);
final TaskActionClient actionClient = createActionClient(task);
final TaskToolbox toolbox = createTaskToolbox(task, actionClient);
prepareTaskForLocking(task);
Assert.assertTrue(task.isReady(actionClient));
task.stopGracefully(null);
TaskStatus taskStatus = task.runHashPartitionMultiPhaseParallel(toolbox);
Assert.assertTrue(taskStatus.isFailure());
Assert.assertEquals("Failed in phase[PHASE-2]. See task logs for details.", taskStatus.getErrorMsg());
}
use of org.apache.druid.indexer.partitions.HashedPartitionsSpec in project druid by druid-io.
the class PartialRangeSegmentGenerateTaskTest method requiresMultiDimensionPartitions.
@Test
public void requiresMultiDimensionPartitions() {
exception.expect(IllegalArgumentException.class);
exception.expectMessage("range or single_dim partitionsSpec required");
PartitionsSpec partitionsSpec = new HashedPartitionsSpec(null, 1, null);
ParallelIndexTuningConfig tuningConfig = new ParallelIndexTestingFactory.TuningConfigBuilder().partitionsSpec(partitionsSpec).build();
new PartialRangeSegmentGenerateTaskBuilder().tuningConfig(tuningConfig).build();
}
use of org.apache.druid.indexer.partitions.HashedPartitionsSpec in project druid by druid-io.
the class PartialSegmentMergeIngestionSpecTest method setup.
@Before
public void setup() {
ioConfig = new PartialSegmentMergeIOConfig(Collections.singletonList(partitionLocation));
partitionsSpec = new HashedPartitionsSpec(null, 1, Collections.emptyList());
target = new PartialSegmentMergeIngestionSpec(ParallelIndexTestingFactory.createDataSchema(ParallelIndexTestingFactory.INPUT_INTERVALS), ioConfig, new ParallelIndexTestingFactory.TuningConfigBuilder().partitionsSpec(partitionsSpec).build());
}
use of org.apache.druid.indexer.partitions.HashedPartitionsSpec in project druid by druid-io.
the class ParallelIndexSupervisorTaskSerdeTest method forceGuaranteedRollupWithHashPartitionsValid.
@Test
public void forceGuaranteedRollupWithHashPartitionsValid() {
Integer numShards = 2;
ParallelIndexSupervisorTask task = new ParallelIndexSupervisorTaskBuilder().ingestionSpec(new ParallelIndexIngestionSpecBuilder().forceGuaranteedRollup(true).partitionsSpec(new HashedPartitionsSpec(null, numShards, null)).inputIntervals(INTERVALS).build()).build();
PartitionsSpec partitionsSpec = task.getIngestionSchema().getTuningConfig().getPartitionsSpec();
Assert.assertThat(partitionsSpec, CoreMatchers.instanceOf(HashedPartitionsSpec.class));
}
Aggregations