use of org.apache.druid.indexing.common.actions.TaskActionClient in project druid by druid-io.
the class TaskLockHelper method tryLockSegments.
private boolean tryLockSegments(TaskActionClient actionClient, List<DataSegment> segments) throws IOException {
final Map<Interval, List<DataSegment>> intervalToSegments = SegmentUtils.groupSegmentsByInterval(segments);
for (Entry<Interval, List<DataSegment>> entry : intervalToSegments.entrySet()) {
final Interval interval = entry.getKey();
final List<DataSegment> segmentsInInterval = entry.getValue();
final boolean hasSameVersion = segmentsInInterval.stream().allMatch(segment -> segment.getVersion().equals(segmentsInInterval.get(0).getVersion()));
Preconditions.checkState(hasSameVersion, "Segments %s should have same version", SegmentUtils.commaSeparatedIdentifiers(segmentsInInterval));
final List<LockResult> lockResults = actionClient.submit(new SegmentLockTryAcquireAction(TaskLockType.EXCLUSIVE, interval, segmentsInInterval.get(0).getVersion(), segmentsInInterval.stream().map(segment -> segment.getShardSpec().getPartitionNum()).collect(Collectors.toSet())));
if (lockResults.stream().anyMatch(result -> !result.isOk())) {
return false;
}
lockedExistingSegments.addAll(segmentsInInterval);
verifyAndFindRootPartitionRangeAndMinorVersion(segmentsInInterval);
}
return true;
}
use of org.apache.druid.indexing.common.actions.TaskActionClient in project druid by druid-io.
the class ActionBasedUsedSegmentCheckerTest method testBasic.
@Test
public void testBasic() throws IOException {
final TaskActionClient taskActionClient = EasyMock.createMock(TaskActionClient.class);
EasyMock.expect(taskActionClient.submit(new RetrieveUsedSegmentsAction("bar", Intervals.of("2002/P1D"), null, Segments.ONLY_VISIBLE))).andReturn(ImmutableList.of(DataSegment.builder().dataSource("bar").interval(Intervals.of("2002/P1D")).shardSpec(new LinearShardSpec(0)).version("b").size(0).build(), DataSegment.builder().dataSource("bar").interval(Intervals.of("2002/P1D")).shardSpec(new LinearShardSpec(1)).version("b").size(0).build()));
EasyMock.expect(taskActionClient.submit(new RetrieveUsedSegmentsAction("foo", null, ImmutableList.of(Intervals.of("2000/P1D"), Intervals.of("2001/P1D")), Segments.ONLY_VISIBLE))).andReturn(ImmutableList.of(DataSegment.builder().dataSource("foo").interval(Intervals.of("2000/P1D")).shardSpec(new LinearShardSpec(0)).version("a").size(0).build(), DataSegment.builder().dataSource("foo").interval(Intervals.of("2000/P1D")).shardSpec(new LinearShardSpec(1)).version("a").size(0).build(), DataSegment.builder().dataSource("foo").interval(Intervals.of("2001/P1D")).shardSpec(new LinearShardSpec(1)).version("b").size(0).build(), DataSegment.builder().dataSource("foo").interval(Intervals.of("2002/P1D")).shardSpec(new LinearShardSpec(1)).version("b").size(0).build()));
EasyMock.replay(taskActionClient);
final UsedSegmentChecker checker = new ActionBasedUsedSegmentChecker(taskActionClient);
final Set<DataSegment> segments = checker.findUsedSegments(ImmutableSet.of(new SegmentIdWithShardSpec("foo", Intervals.of("2000/P1D"), "a", new LinearShardSpec(1)), new SegmentIdWithShardSpec("foo", Intervals.of("2001/P1D"), "b", new LinearShardSpec(0)), new SegmentIdWithShardSpec("bar", Intervals.of("2002/P1D"), "b", new LinearShardSpec(0))));
Assert.assertEquals(ImmutableSet.of(DataSegment.builder().dataSource("foo").interval(Intervals.of("2000/P1D")).shardSpec(new LinearShardSpec(1)).version("a").size(0).build(), DataSegment.builder().dataSource("bar").interval(Intervals.of("2002/P1D")).shardSpec(new LinearShardSpec(0)).version("b").size(0).build()), segments);
EasyMock.verify(taskActionClient);
}
use of org.apache.druid.indexing.common.actions.TaskActionClient in project druid by druid-io.
the class RangePartitionTaskKillTest method failsFirstPhase.
@Test(timeout = 5000L)
public void failsFirstPhase() throws Exception {
int targetRowsPerSegment = NUM_ROW * 2 / DIM_FILE_CARDINALITY / NUM_PARTITION;
final ParallelIndexSupervisorTask task = newTask(TIMESTAMP_SPEC, DIMENSIONS_SPEC, INPUT_FORMAT, null, INTERVAL_TO_INDEX, inputDir, TEST_FILE_NAME_PREFIX + "*", new SingleDimensionPartitionsSpec(targetRowsPerSegment, null, DIM1, false), 2, false, 0);
final TaskActionClient actionClient = createActionClient(task);
final TaskToolbox toolbox = createTaskToolbox(task, actionClient);
prepareTaskForLocking(task);
Assert.assertTrue(task.isReady(actionClient));
task.stopGracefully(null);
TaskStatus taskStatus = task.runRangePartitionMultiPhaseParallel(toolbox);
Assert.assertTrue(taskStatus.isFailure());
Assert.assertEquals("Failed in phase[PHASE-1]. See task logs for details.", taskStatus.getErrorMsg());
}
use of org.apache.druid.indexing.common.actions.TaskActionClient in project druid by druid-io.
the class RangePartitionTaskKillTest method failsThirdPhase.
@Test(timeout = 5000L)
public void failsThirdPhase() throws Exception {
int targetRowsPerSegment = NUM_ROW * 2 / DIM_FILE_CARDINALITY / NUM_PARTITION;
final ParallelIndexSupervisorTask task = newTask(TIMESTAMP_SPEC, DIMENSIONS_SPEC, INPUT_FORMAT, null, INTERVAL_TO_INDEX, inputDir, TEST_FILE_NAME_PREFIX + "*", new SingleDimensionPartitionsSpec(targetRowsPerSegment, null, DIM1, false), 2, false, 2);
final TaskActionClient actionClient = createActionClient(task);
final TaskToolbox toolbox = createTaskToolbox(task, actionClient);
prepareTaskForLocking(task);
Assert.assertTrue(task.isReady(actionClient));
task.stopGracefully(null);
TaskStatus taskStatus = task.runRangePartitionMultiPhaseParallel(toolbox);
Assert.assertTrue(taskStatus.isFailure());
Assert.assertEquals("Failed in phase[PHASE-3]. See task logs for details.", taskStatus.getErrorMsg());
}
use of org.apache.druid.indexing.common.actions.TaskActionClient in project druid by druid-io.
the class RangePartitionTaskKillTest method failsSecondPhase.
@Test(timeout = 5000L)
public void failsSecondPhase() throws Exception {
int targetRowsPerSegment = NUM_ROW * 2 / DIM_FILE_CARDINALITY / NUM_PARTITION;
final ParallelIndexSupervisorTask task = newTask(TIMESTAMP_SPEC, DIMENSIONS_SPEC, INPUT_FORMAT, null, INTERVAL_TO_INDEX, inputDir, TEST_FILE_NAME_PREFIX + "*", new SingleDimensionPartitionsSpec(targetRowsPerSegment, null, DIM1, false), 2, false, 1);
final TaskActionClient actionClient = createActionClient(task);
final TaskToolbox toolbox = createTaskToolbox(task, actionClient);
prepareTaskForLocking(task);
Assert.assertTrue(task.isReady(actionClient));
task.stopGracefully(null);
TaskStatus taskStatus = task.runRangePartitionMultiPhaseParallel(toolbox);
Assert.assertTrue(taskStatus.isFailure());
Assert.assertEquals("Failed in phase[PHASE-2]. See task logs for details.", taskStatus.getErrorMsg());
}
Aggregations