use of org.apache.druid.indexing.common.TaskToolbox in project druid by druid-io.
the class RangePartitionTaskKillTest method failsThirdPhase.
@Test(timeout = 5000L)
public void failsThirdPhase() throws Exception {
int targetRowsPerSegment = NUM_ROW * 2 / DIM_FILE_CARDINALITY / NUM_PARTITION;
final ParallelIndexSupervisorTask task = newTask(TIMESTAMP_SPEC, DIMENSIONS_SPEC, INPUT_FORMAT, null, INTERVAL_TO_INDEX, inputDir, TEST_FILE_NAME_PREFIX + "*", new SingleDimensionPartitionsSpec(targetRowsPerSegment, null, DIM1, false), 2, false, 2);
final TaskActionClient actionClient = createActionClient(task);
final TaskToolbox toolbox = createTaskToolbox(task, actionClient);
prepareTaskForLocking(task);
Assert.assertTrue(task.isReady(actionClient));
task.stopGracefully(null);
TaskStatus taskStatus = task.runRangePartitionMultiPhaseParallel(toolbox);
Assert.assertTrue(taskStatus.isFailure());
Assert.assertEquals("Failed in phase[PHASE-3]. See task logs for details.", taskStatus.getErrorMsg());
}
use of org.apache.druid.indexing.common.TaskToolbox in project druid by druid-io.
the class RangePartitionTaskKillTest method failsSecondPhase.
@Test(timeout = 5000L)
public void failsSecondPhase() throws Exception {
int targetRowsPerSegment = NUM_ROW * 2 / DIM_FILE_CARDINALITY / NUM_PARTITION;
final ParallelIndexSupervisorTask task = newTask(TIMESTAMP_SPEC, DIMENSIONS_SPEC, INPUT_FORMAT, null, INTERVAL_TO_INDEX, inputDir, TEST_FILE_NAME_PREFIX + "*", new SingleDimensionPartitionsSpec(targetRowsPerSegment, null, DIM1, false), 2, false, 1);
final TaskActionClient actionClient = createActionClient(task);
final TaskToolbox toolbox = createTaskToolbox(task, actionClient);
prepareTaskForLocking(task);
Assert.assertTrue(task.isReady(actionClient));
task.stopGracefully(null);
TaskStatus taskStatus = task.runRangePartitionMultiPhaseParallel(toolbox);
Assert.assertTrue(taskStatus.isFailure());
Assert.assertEquals("Failed in phase[PHASE-2]. See task logs for details.", taskStatus.getErrorMsg());
}
use of org.apache.druid.indexing.common.TaskToolbox in project druid by druid-io.
the class HashPartitionCachingLocalSegmentAllocatorTest method setup.
@Before
public void setup() throws IOException {
TaskToolbox toolbox = createToolbox();
HashPartitionAnalysis partitionAnalysis = new HashPartitionAnalysis(PARTITIONS_SPEC);
partitionAnalysis.updateBucket(INTERVAL, NUM_PARTITONS);
target = SegmentAllocators.forNonLinearPartitioning(toolbox, DATASOURCE, TASKID, new UniformGranularitySpec(Granularities.HOUR, Granularities.NONE, ImmutableList.of()), new SupervisorTaskAccessWithNullClient(SUPERVISOR_TASKID), partitionAnalysis);
sequenceNameFunction = ((CachingLocalSegmentAllocator) target).getSequenceNameFunction();
}
use of org.apache.druid.indexing.common.TaskToolbox in project druid by druid-io.
the class HashPartitionTaskKillTest method failsInSecondPhase.
@Test(timeout = 5000L)
public void failsInSecondPhase() throws Exception {
final ParallelIndexSupervisorTask task = createTestTask(TIMESTAMP_SPEC, DIMENSIONS_SPEC, INPUT_FORMAT, null, INTERVAL_TO_INDEX, inputDir, "test_*", new HashedPartitionsSpec(null, 3, ImmutableList.of("dim1", "dim2")), 2, false, true, 0);
final TaskActionClient actionClient = createActionClient(task);
final TaskToolbox toolbox = createTaskToolbox(task, actionClient);
prepareTaskForLocking(task);
Assert.assertTrue(task.isReady(actionClient));
task.stopGracefully(null);
TaskStatus taskStatus = task.runHashPartitionMultiPhaseParallel(toolbox);
Assert.assertTrue(taskStatus.isFailure());
Assert.assertEquals("Failed in phase[PHASE-2]. See task logs for details.", taskStatus.getErrorMsg());
}
use of org.apache.druid.indexing.common.TaskToolbox in project druid by druid-io.
the class ParallelIndexPhaseRunnerTest method testSmallEstimatedNumSplits.
@Test
public void testSmallEstimatedNumSplits() throws Exception {
final NoopTask task = NoopTask.create();
final TaskActionClient actionClient = createActionClient(task);
final TaskToolbox toolbox = createTaskToolbox(task, actionClient);
final TestPhaseRunner runner = new TestPhaseRunner(toolbox, "supervisorTaskId", "groupId", AbstractParallelIndexSupervisorTaskTest.DEFAULT_TUNING_CONFIG_FOR_PARALLEL_INDEXING, 10, 8);
Assert.assertEquals(TaskState.SUCCESS, runner.run());
}
Aggregations