use of io.trino.execution.scheduler.TestingExchange.TestingExchangeSourceHandle in project trino by trinodb.
the class TestStageTaskSourceFactory method testHashDistributionTaskSource.
@Test
public void testHashDistributionTaskSource() {
TaskSource taskSource = createHashDistributionTaskSource(ImmutableMap.of(), ImmutableListMultimap.of(), ImmutableListMultimap.of(), 1, new int[] { 0, 1, 2, 3 }, Optional.empty(), 0, DataSize.of(3, BYTE));
assertFalse(taskSource.isFinished());
assertEquals(taskSource.getMoreTasks(), ImmutableList.of());
assertTrue(taskSource.isFinished());
taskSource = createHashDistributionTaskSource(ImmutableMap.of(), ImmutableListMultimap.of(PLAN_NODE_1, new TestingExchangeSourceHandle(0, 1), PLAN_NODE_1, new TestingExchangeSourceHandle(1, 1), PLAN_NODE_2, new TestingExchangeSourceHandle(0, 1), PLAN_NODE_2, new TestingExchangeSourceHandle(3, 1)), ImmutableListMultimap.of(PLAN_NODE_3, new TestingExchangeSourceHandle(0, 1)), 1, new int[] { 0, 1, 2, 3 }, Optional.empty(), 0, DataSize.of(0, BYTE));
assertFalse(taskSource.isFinished());
assertEquals(taskSource.getMoreTasks(), ImmutableList.of(new TaskDescriptor(0, ImmutableListMultimap.of(), ImmutableListMultimap.of(PLAN_NODE_1, new TestingExchangeSourceHandle(0, 1), PLAN_NODE_2, new TestingExchangeSourceHandle(0, 1), PLAN_NODE_3, new TestingExchangeSourceHandle(0, 1)), new NodeRequirements(Optional.of(CATALOG), ImmutableSet.of(), DataSize.of(4, GIGABYTE))), new TaskDescriptor(1, ImmutableListMultimap.of(), ImmutableListMultimap.of(PLAN_NODE_1, new TestingExchangeSourceHandle(1, 1), PLAN_NODE_3, new TestingExchangeSourceHandle(0, 1)), new NodeRequirements(Optional.of(CATALOG), ImmutableSet.of(), DataSize.of(4, GIGABYTE))), new TaskDescriptor(2, ImmutableListMultimap.of(), ImmutableListMultimap.of(PLAN_NODE_2, new TestingExchangeSourceHandle(3, 1), PLAN_NODE_3, new TestingExchangeSourceHandle(0, 1)), new NodeRequirements(Optional.of(CATALOG), ImmutableSet.of(), DataSize.of(4, GIGABYTE)))));
assertTrue(taskSource.isFinished());
Split bucketedSplit1 = createBucketedSplit(0, 0);
Split bucketedSplit2 = createBucketedSplit(0, 2);
Split bucketedSplit3 = createBucketedSplit(0, 3);
Split bucketedSplit4 = createBucketedSplit(0, 1);
taskSource = createHashDistributionTaskSource(ImmutableMap.of(PLAN_NODE_4, new TestingSplitSource(CATALOG, ImmutableList.of(bucketedSplit1, bucketedSplit2, bucketedSplit3)), PLAN_NODE_5, new TestingSplitSource(CATALOG, ImmutableList.of(bucketedSplit4))), ImmutableListMultimap.of(), ImmutableListMultimap.of(PLAN_NODE_3, new TestingExchangeSourceHandle(0, 1)), 1, new int[] { 0, 1, 2, 3 }, Optional.of(getTestingBucketNodeMap(4)), 0, DataSize.of(0, BYTE));
assertFalse(taskSource.isFinished());
assertEquals(taskSource.getMoreTasks(), ImmutableList.of(new TaskDescriptor(0, ImmutableListMultimap.of(PLAN_NODE_4, bucketedSplit1), ImmutableListMultimap.of(PLAN_NODE_3, new TestingExchangeSourceHandle(0, 1)), new NodeRequirements(Optional.of(CATALOG), ImmutableSet.of(), DataSize.of(4, GIGABYTE))), new TaskDescriptor(1, ImmutableListMultimap.of(PLAN_NODE_5, bucketedSplit4), ImmutableListMultimap.of(PLAN_NODE_3, new TestingExchangeSourceHandle(0, 1)), new NodeRequirements(Optional.of(CATALOG), ImmutableSet.of(), DataSize.of(4, GIGABYTE))), new TaskDescriptor(2, ImmutableListMultimap.of(PLAN_NODE_4, bucketedSplit2), ImmutableListMultimap.of(PLAN_NODE_3, new TestingExchangeSourceHandle(0, 1)), new NodeRequirements(Optional.of(CATALOG), ImmutableSet.of(), DataSize.of(4, GIGABYTE))), new TaskDescriptor(3, ImmutableListMultimap.of(PLAN_NODE_4, bucketedSplit3), ImmutableListMultimap.of(PLAN_NODE_3, new TestingExchangeSourceHandle(0, 1)), new NodeRequirements(Optional.of(CATALOG), ImmutableSet.of(), DataSize.of(4, GIGABYTE)))));
assertTrue(taskSource.isFinished());
taskSource = createHashDistributionTaskSource(ImmutableMap.of(PLAN_NODE_4, new TestingSplitSource(CATALOG, ImmutableList.of(bucketedSplit1, bucketedSplit2, bucketedSplit3)), PLAN_NODE_5, new TestingSplitSource(CATALOG, ImmutableList.of(bucketedSplit4))), ImmutableListMultimap.of(PLAN_NODE_1, new TestingExchangeSourceHandle(0, 1), PLAN_NODE_1, new TestingExchangeSourceHandle(1, 1), PLAN_NODE_2, new TestingExchangeSourceHandle(0, 1), PLAN_NODE_2, new TestingExchangeSourceHandle(3, 1)), ImmutableListMultimap.of(PLAN_NODE_3, new TestingExchangeSourceHandle(0, 1)), 1, new int[] { 0, 1, 2, 3 }, Optional.of(getTestingBucketNodeMap(4)), 0, DataSize.of(0, BYTE));
assertFalse(taskSource.isFinished());
assertEquals(taskSource.getMoreTasks(), ImmutableList.of(new TaskDescriptor(0, ImmutableListMultimap.of(PLAN_NODE_4, bucketedSplit1), ImmutableListMultimap.of(PLAN_NODE_1, new TestingExchangeSourceHandle(0, 1), PLAN_NODE_2, new TestingExchangeSourceHandle(0, 1), PLAN_NODE_3, new TestingExchangeSourceHandle(0, 1)), new NodeRequirements(Optional.of(CATALOG), ImmutableSet.of(), DataSize.of(4, GIGABYTE))), new TaskDescriptor(1, ImmutableListMultimap.of(PLAN_NODE_5, bucketedSplit4), ImmutableListMultimap.of(PLAN_NODE_1, new TestingExchangeSourceHandle(1, 1), PLAN_NODE_3, new TestingExchangeSourceHandle(0, 1)), new NodeRequirements(Optional.of(CATALOG), ImmutableSet.of(), DataSize.of(4, GIGABYTE))), new TaskDescriptor(2, ImmutableListMultimap.of(PLAN_NODE_4, bucketedSplit2), ImmutableListMultimap.of(PLAN_NODE_3, new TestingExchangeSourceHandle(0, 1)), new NodeRequirements(Optional.of(CATALOG), ImmutableSet.of(), DataSize.of(4, GIGABYTE))), new TaskDescriptor(3, ImmutableListMultimap.of(PLAN_NODE_4, bucketedSplit3), ImmutableListMultimap.of(PLAN_NODE_2, new TestingExchangeSourceHandle(3, 1), PLAN_NODE_3, new TestingExchangeSourceHandle(0, 1)), new NodeRequirements(Optional.of(CATALOG), ImmutableSet.of(), DataSize.of(4, GIGABYTE)))));
assertTrue(taskSource.isFinished());
taskSource = createHashDistributionTaskSource(ImmutableMap.of(PLAN_NODE_4, new TestingSplitSource(CATALOG, ImmutableList.of(bucketedSplit1, bucketedSplit2, bucketedSplit3)), PLAN_NODE_5, new TestingSplitSource(CATALOG, ImmutableList.of(bucketedSplit4))), ImmutableListMultimap.of(PLAN_NODE_1, new TestingExchangeSourceHandle(0, 1), PLAN_NODE_1, new TestingExchangeSourceHandle(1, 1), PLAN_NODE_2, new TestingExchangeSourceHandle(0, 1)), ImmutableListMultimap.of(PLAN_NODE_3, new TestingExchangeSourceHandle(0, 1)), 2, new int[] { 0, 1, 0, 1 }, Optional.of(getTestingBucketNodeMap(4)), 0, DataSize.of(0, BYTE));
assertFalse(taskSource.isFinished());
assertEquals(taskSource.getMoreTasks(), ImmutableList.of(new TaskDescriptor(0, ImmutableListMultimap.of(PLAN_NODE_4, bucketedSplit1, PLAN_NODE_4, bucketedSplit2), ImmutableListMultimap.of(PLAN_NODE_1, new TestingExchangeSourceHandle(0, 1), PLAN_NODE_2, new TestingExchangeSourceHandle(0, 1), PLAN_NODE_3, new TestingExchangeSourceHandle(0, 1)), new NodeRequirements(Optional.of(CATALOG), ImmutableSet.of(), DataSize.of(4, GIGABYTE))), new TaskDescriptor(1, ImmutableListMultimap.of(PLAN_NODE_4, bucketedSplit3, PLAN_NODE_5, bucketedSplit4), ImmutableListMultimap.of(PLAN_NODE_1, new TestingExchangeSourceHandle(1, 1), PLAN_NODE_3, new TestingExchangeSourceHandle(0, 1)), new NodeRequirements(Optional.of(CATALOG), ImmutableSet.of(), DataSize.of(4, GIGABYTE)))));
assertTrue(taskSource.isFinished());
// join based on split target split weight
taskSource = createHashDistributionTaskSource(ImmutableMap.of(PLAN_NODE_4, new TestingSplitSource(CATALOG, ImmutableList.of(bucketedSplit1, bucketedSplit2, bucketedSplit3)), PLAN_NODE_5, new TestingSplitSource(CATALOG, ImmutableList.of(bucketedSplit4))), ImmutableListMultimap.of(PLAN_NODE_1, new TestingExchangeSourceHandle(0, 1), PLAN_NODE_1, new TestingExchangeSourceHandle(1, 1), PLAN_NODE_2, new TestingExchangeSourceHandle(1, 1), PLAN_NODE_2, new TestingExchangeSourceHandle(2, 1), PLAN_NODE_2, new TestingExchangeSourceHandle(3, 1)), ImmutableListMultimap.of(PLAN_NODE_3, new TestingExchangeSourceHandle(17, 1)), 2, new int[] { 0, 1, 2, 3 }, Optional.of(getTestingBucketNodeMap(4)), 2 * STANDARD_WEIGHT, DataSize.of(100, GIGABYTE));
assertFalse(taskSource.isFinished());
assertEquals(taskSource.getMoreTasks(), ImmutableList.of(new TaskDescriptor(0, ImmutableListMultimap.of(PLAN_NODE_4, bucketedSplit1, PLAN_NODE_5, bucketedSplit4), ImmutableListMultimap.of(PLAN_NODE_1, new TestingExchangeSourceHandle(0, 1), PLAN_NODE_1, new TestingExchangeSourceHandle(1, 1), PLAN_NODE_2, new TestingExchangeSourceHandle(1, 1), PLAN_NODE_3, new TestingExchangeSourceHandle(17, 1)), new NodeRequirements(Optional.of(CATALOG), ImmutableSet.of(), DataSize.of(4, GIGABYTE))), new TaskDescriptor(1, ImmutableListMultimap.of(PLAN_NODE_4, bucketedSplit2, PLAN_NODE_4, bucketedSplit3), ImmutableListMultimap.of(PLAN_NODE_2, new TestingExchangeSourceHandle(2, 1), PLAN_NODE_2, new TestingExchangeSourceHandle(3, 1), PLAN_NODE_3, new TestingExchangeSourceHandle(17, 1)), new NodeRequirements(Optional.of(CATALOG), ImmutableSet.of(), DataSize.of(4, GIGABYTE)))));
assertTrue(taskSource.isFinished());
// join based on target exchange size
taskSource = createHashDistributionTaskSource(ImmutableMap.of(PLAN_NODE_4, new TestingSplitSource(CATALOG, ImmutableList.of(bucketedSplit1, bucketedSplit2, bucketedSplit3)), PLAN_NODE_5, new TestingSplitSource(CATALOG, ImmutableList.of(bucketedSplit4))), ImmutableListMultimap.of(PLAN_NODE_1, new TestingExchangeSourceHandle(0, 20), PLAN_NODE_1, new TestingExchangeSourceHandle(1, 30), PLAN_NODE_2, new TestingExchangeSourceHandle(1, 20), PLAN_NODE_2, new TestingExchangeSourceHandle(2, 99), PLAN_NODE_2, new TestingExchangeSourceHandle(3, 30)), ImmutableListMultimap.of(PLAN_NODE_3, new TestingExchangeSourceHandle(17, 1)), 2, new int[] { 0, 1, 2, 3 }, Optional.of(getTestingBucketNodeMap(4)), 100 * STANDARD_WEIGHT, DataSize.of(100, BYTE));
assertFalse(taskSource.isFinished());
assertEquals(taskSource.getMoreTasks(), ImmutableList.of(new TaskDescriptor(0, ImmutableListMultimap.of(PLAN_NODE_4, bucketedSplit1, PLAN_NODE_5, bucketedSplit4), ImmutableListMultimap.of(PLAN_NODE_1, new TestingExchangeSourceHandle(0, 20), PLAN_NODE_1, new TestingExchangeSourceHandle(1, 30), PLAN_NODE_2, new TestingExchangeSourceHandle(1, 20), PLAN_NODE_3, new TestingExchangeSourceHandle(17, 1)), new NodeRequirements(Optional.of(CATALOG), ImmutableSet.of(), DataSize.of(4, GIGABYTE))), new TaskDescriptor(1, ImmutableListMultimap.of(PLAN_NODE_4, bucketedSplit2), ImmutableListMultimap.of(PLAN_NODE_2, new TestingExchangeSourceHandle(2, 99), PLAN_NODE_3, new TestingExchangeSourceHandle(17, 1)), new NodeRequirements(Optional.of(CATALOG), ImmutableSet.of(), DataSize.of(4, GIGABYTE))), new TaskDescriptor(2, ImmutableListMultimap.of(PLAN_NODE_4, bucketedSplit3), ImmutableListMultimap.of(PLAN_NODE_2, new TestingExchangeSourceHandle(3, 30), PLAN_NODE_3, new TestingExchangeSourceHandle(17, 1)), new NodeRequirements(Optional.of(CATALOG), ImmutableSet.of(), DataSize.of(4, GIGABYTE)))));
assertTrue(taskSource.isFinished());
}
use of io.trino.execution.scheduler.TestingExchange.TestingExchangeSourceHandle in project trino by trinodb.
the class TestStageTaskSourceFactory method testSingleDistributionTaskSource.
@Test
public void testSingleDistributionTaskSource() {
ListMultimap<PlanNodeId, ExchangeSourceHandle> sources = ImmutableListMultimap.<PlanNodeId, ExchangeSourceHandle>builder().put(PLAN_NODE_1, new TestingExchangeSourceHandle(0, 123)).put(PLAN_NODE_2, new TestingExchangeSourceHandle(0, 321)).put(PLAN_NODE_1, new TestingExchangeSourceHandle(0, 222)).build();
TaskSource taskSource = new SingleDistributionTaskSource(sources, DataSize.of(4, GIGABYTE));
assertFalse(taskSource.isFinished());
List<TaskDescriptor> tasks = taskSource.getMoreTasks();
assertThat(tasks).hasSize(1);
assertTrue(taskSource.isFinished());
TaskDescriptor task = tasks.get(0);
assertThat(task.getNodeRequirements().getCatalogName()).isEmpty();
assertThat(task.getNodeRequirements().getAddresses()).isEmpty();
assertEquals(task.getPartitionId(), 0);
assertEquals(task.getExchangeSourceHandles(), sources);
assertEquals(task.getSplits(), ImmutableListMultimap.of());
}
use of io.trino.execution.scheduler.TestingExchange.TestingExchangeSourceHandle in project trino by trinodb.
the class TestFaultTolerantStageScheduler method testHappyPath.
@Test
public void testHappyPath() throws Exception {
TestingRemoteTaskFactory remoteTaskFactory = new TestingRemoteTaskFactory();
TestingTaskSourceFactory taskSourceFactory = createTaskSourceFactory(5, 2);
TestingNodeSupplier nodeSupplier = TestingNodeSupplier.create(ImmutableMap.of(NODE_1, ImmutableList.of(CATALOG), NODE_2, ImmutableList.of(CATALOG), NODE_3, ImmutableList.of(CATALOG)));
setupNodeAllocatorService(nodeSupplier);
TestingExchange sinkExchange = new TestingExchange(false);
TestingExchange sourceExchange1 = new TestingExchange(false);
TestingExchange sourceExchange2 = new TestingExchange(false);
try (NodeAllocator nodeAllocator = nodeAllocatorService.getNodeAllocator(SESSION, 1)) {
FaultTolerantStageScheduler scheduler = createFaultTolerantTaskScheduler(remoteTaskFactory, taskSourceFactory, nodeAllocator, TaskLifecycleListener.NO_OP, Optional.of(sinkExchange), ImmutableMap.of(SOURCE_FRAGMENT_ID_1, sourceExchange1, SOURCE_FRAGMENT_ID_2, sourceExchange2), 2);
ListenableFuture<Void> blocked = scheduler.isBlocked();
assertUnblocked(blocked);
scheduler.schedule();
blocked = scheduler.isBlocked();
// blocked on first source exchange
assertBlocked(blocked);
sourceExchange1.setSourceHandles(ImmutableList.of(new TestingExchangeSourceHandle(0, 1)));
// still blocked on the second source exchange
assertBlocked(blocked);
assertFalse(scheduler.isBlocked().isDone());
sourceExchange2.setSourceHandles(ImmutableList.of(new TestingExchangeSourceHandle(0, 1)));
// now unblocked
assertUnblocked(blocked);
assertUnblocked(scheduler.isBlocked());
scheduler.schedule();
blocked = scheduler.isBlocked();
// blocked on node allocation
assertBlocked(blocked);
// not all tasks have been enumerated yet
assertFalse(sinkExchange.isNoMoreSinks());
Map<TaskId, TestingRemoteTask> tasks = remoteTaskFactory.getTasks();
// one task per node
assertThat(tasks).hasSize(3);
assertThat(tasks).containsKey(getTaskId(0, 0));
assertThat(tasks).containsKey(getTaskId(1, 0));
assertThat(tasks).containsKey(getTaskId(2, 0));
TestingRemoteTask task = tasks.get(getTaskId(0, 0));
// fail task for partition 0
task.fail(new RuntimeException("some failure"));
assertUnblocked(blocked);
assertUnblocked(scheduler.isBlocked());
// schedule more tasks
scheduler.schedule();
tasks = remoteTaskFactory.getTasks();
assertThat(tasks).hasSize(4);
assertThat(tasks).containsKey(getTaskId(3, 0));
blocked = scheduler.isBlocked();
// blocked on task scheduling
assertBlocked(blocked);
// finish some task
assertThat(tasks).containsKey(getTaskId(1, 0));
tasks.get(getTaskId(1, 0)).finish();
assertUnblocked(blocked);
assertUnblocked(scheduler.isBlocked());
assertThat(sinkExchange.getFinishedSinkHandles()).contains(new TestingExchangeSinkHandle(1));
// this will schedule failed task
scheduler.schedule();
blocked = scheduler.isBlocked();
// blocked on task scheduling
assertBlocked(blocked);
tasks = remoteTaskFactory.getTasks();
assertThat(tasks).hasSize(5);
assertThat(tasks).containsKey(getTaskId(0, 1));
// finish some task
tasks = remoteTaskFactory.getTasks();
assertThat(tasks).containsKey(getTaskId(3, 0));
tasks.get(getTaskId(3, 0)).finish();
assertThat(sinkExchange.getFinishedSinkHandles()).contains(new TestingExchangeSinkHandle(1), new TestingExchangeSinkHandle(3));
assertUnblocked(blocked);
// schedule the last task
scheduler.schedule();
tasks = remoteTaskFactory.getTasks();
assertThat(tasks).hasSize(6);
assertThat(tasks).containsKey(getTaskId(4, 0));
// not finished yet, will be finished when all tasks succeed
assertFalse(scheduler.isFinished());
blocked = scheduler.isBlocked();
// blocked on task scheduling
assertBlocked(blocked);
tasks = remoteTaskFactory.getTasks();
assertThat(tasks).containsKey(getTaskId(4, 0));
// finish remaining tasks
tasks.get(getTaskId(0, 1)).finish();
tasks.get(getTaskId(2, 0)).finish();
tasks.get(getTaskId(4, 0)).finish();
// now it's not blocked and finished
assertUnblocked(blocked);
assertUnblocked(scheduler.isBlocked());
assertThat(sinkExchange.getFinishedSinkHandles()).contains(new TestingExchangeSinkHandle(0), new TestingExchangeSinkHandle(1), new TestingExchangeSinkHandle(2), new TestingExchangeSinkHandle(3), new TestingExchangeSinkHandle(4));
assertTrue(scheduler.isFinished());
}
}
use of io.trino.execution.scheduler.TestingExchange.TestingExchangeSourceHandle in project trino by trinodb.
the class TestFaultTolerantStageScheduler method testCancellation.
private void testCancellation(boolean abort) throws Exception {
TestingRemoteTaskFactory remoteTaskFactory = new TestingRemoteTaskFactory();
TestingTaskSourceFactory taskSourceFactory = createTaskSourceFactory(3, 1);
TestingNodeSupplier nodeSupplier = TestingNodeSupplier.create(ImmutableMap.of(NODE_1, ImmutableList.of(CATALOG), NODE_2, ImmutableList.of(CATALOG)));
setupNodeAllocatorService(nodeSupplier);
TestingExchange sourceExchange1 = new TestingExchange(false);
TestingExchange sourceExchange2 = new TestingExchange(false);
try (NodeAllocator nodeAllocator = nodeAllocatorService.getNodeAllocator(SESSION, 1)) {
FaultTolerantStageScheduler scheduler = createFaultTolerantTaskScheduler(remoteTaskFactory, taskSourceFactory, nodeAllocator, TaskLifecycleListener.NO_OP, Optional.empty(), ImmutableMap.of(SOURCE_FRAGMENT_ID_1, sourceExchange1, SOURCE_FRAGMENT_ID_2, sourceExchange2), 0);
sourceExchange1.setSourceHandles(ImmutableList.of(new TestingExchangeSourceHandle(0, 1)));
sourceExchange2.setSourceHandles(ImmutableList.of(new TestingExchangeSourceHandle(0, 1)));
assertUnblocked(scheduler.isBlocked());
scheduler.schedule();
ListenableFuture<Void> blocked = scheduler.isBlocked();
// waiting on node acquisition
assertBlocked(blocked);
NodeAllocator.NodeLease acquireNode1 = nodeAllocator.acquire(new NodeRequirements(Optional.of(CATALOG), ImmutableSet.of(), DataSize.of(4, GIGABYTE)));
NodeAllocator.NodeLease acquireNode2 = nodeAllocator.acquire(new NodeRequirements(Optional.of(CATALOG), ImmutableSet.of(), DataSize.of(4, GIGABYTE)));
if (abort) {
scheduler.abort();
} else {
scheduler.cancel();
}
assertUnblocked(blocked);
assertUnblocked(acquireNode1.getNode());
assertUnblocked(acquireNode2.getNode());
scheduler.schedule();
assertUnblocked(scheduler.isBlocked());
assertFalse(scheduler.isFinished());
}
}
use of io.trino.execution.scheduler.TestingExchange.TestingExchangeSourceHandle in project trino by trinodb.
the class TestFaultTolerantStageScheduler method testTaskLifecycleListener.
@Test
public void testTaskLifecycleListener() throws Exception {
TestingRemoteTaskFactory remoteTaskFactory = new TestingRemoteTaskFactory();
TestingTaskSourceFactory taskSourceFactory = createTaskSourceFactory(2, 1);
TestingNodeSupplier nodeSupplier = TestingNodeSupplier.create(ImmutableMap.of(NODE_1, ImmutableList.of(CATALOG), NODE_2, ImmutableList.of(CATALOG)));
setupNodeAllocatorService(nodeSupplier);
TestingTaskLifecycleListener taskLifecycleListener = new TestingTaskLifecycleListener();
TestingExchange sourceExchange1 = new TestingExchange(false);
TestingExchange sourceExchange2 = new TestingExchange(false);
try (NodeAllocator nodeAllocator = nodeAllocatorService.getNodeAllocator(SESSION, 1)) {
FaultTolerantStageScheduler scheduler = createFaultTolerantTaskScheduler(remoteTaskFactory, taskSourceFactory, nodeAllocator, taskLifecycleListener, Optional.empty(), ImmutableMap.of(SOURCE_FRAGMENT_ID_1, sourceExchange1, SOURCE_FRAGMENT_ID_2, sourceExchange2), 2);
sourceExchange1.setSourceHandles(ImmutableList.of(new TestingExchangeSourceHandle(0, 1)));
sourceExchange2.setSourceHandles(ImmutableList.of(new TestingExchangeSourceHandle(0, 1)));
assertUnblocked(scheduler.isBlocked());
scheduler.schedule();
assertBlocked(scheduler.isBlocked());
assertThat(taskLifecycleListener.getTasks().get(FRAGMENT_ID)).contains(getTaskId(0, 0), getTaskId(1, 0));
remoteTaskFactory.getTasks().get(getTaskId(0, 0)).fail(new RuntimeException("some exception"));
assertUnblocked(scheduler.isBlocked());
scheduler.schedule();
assertBlocked(scheduler.isBlocked());
assertThat(taskLifecycleListener.getTasks().get(FRAGMENT_ID)).contains(getTaskId(0, 0), getTaskId(1, 0), getTaskId(0, 1));
}
}
Aggregations