use of org.apache.flink.runtime.executiongraph.utils.SimpleAckingTaskManagerGateway in project flink by apache.
the class DefaultSchedulerTest method getCheckpointTriggeredLatch.
/**
* Since checkpoint is triggered asynchronously, we need to figure out when checkpoint is really
* triggered. Note that this should be invoked before scheduler initialized.
*
* @return the latch representing checkpoint is really triggered
*/
private CountDownLatch getCheckpointTriggeredLatch() {
final CountDownLatch checkpointTriggeredLatch = new CountDownLatch(1);
final SimpleAckingTaskManagerGateway taskManagerGateway = new SimpleAckingTaskManagerGateway();
testExecutionSlotAllocator.getLogicalSlotBuilder().setTaskManagerGateway(taskManagerGateway);
taskManagerGateway.setCheckpointConsumer((executionAttemptID, jobId, checkpointId, timestamp, checkpointOptions) -> {
checkpointTriggeredLatch.countDown();
});
return checkpointTriggeredLatch;
}
use of org.apache.flink.runtime.executiongraph.utils.SimpleAckingTaskManagerGateway in project flink by apache.
the class SharedSlotTest method testLogicalSlotAllocation.
@Test
public void testLogicalSlotAllocation() {
CompletableFuture<PhysicalSlot> slotContextFuture = new CompletableFuture<>();
CompletableFuture<ExecutionSlotSharingGroup> released = new CompletableFuture<>();
SharedSlot sharedSlot = SharedSlotBuilder.newBuilder().withSlotContextFuture(slotContextFuture).slotWillBeOccupiedIndefinitely().withExternalReleaseCallback(released::complete).build();
CompletableFuture<LogicalSlot> logicalSlotFuture = sharedSlot.allocateLogicalSlot(EV1);
assertThat(logicalSlotFuture.isDone(), is(false));
AllocationID allocationId = new AllocationID();
LocalTaskManagerLocation taskManagerLocation = new LocalTaskManagerLocation();
SimpleAckingTaskManagerGateway taskManagerGateway = new SimpleAckingTaskManagerGateway();
slotContextFuture.complete(new TestingPhysicalSlot(allocationId, taskManagerLocation, 3, taskManagerGateway, RP));
assertThat(sharedSlot.isEmpty(), is(false));
assertThat(released.isDone(), is(false));
assertThat(logicalSlotFuture.isDone(), is(true));
LogicalSlot logicalSlot = logicalSlotFuture.join();
assertThat(logicalSlot.getAllocationId(), is(allocationId));
assertThat(logicalSlot.getTaskManagerLocation(), is(taskManagerLocation));
assertThat(logicalSlot.getTaskManagerGateway(), is(taskManagerGateway));
assertThat(logicalSlot.getLocality(), is(Locality.UNKNOWN));
}
use of org.apache.flink.runtime.executiongraph.utils.SimpleAckingTaskManagerGateway in project flink by apache.
the class DefaultExecutionGraphDeploymentWithSmallBlobCacheSizeLimitTest method testDeployMultipleTasksWithSmallBlobCacheSizeLimit.
/**
* Test the deployment works well even the size limit of {@link BlobCacheSizeTracker} in {@link
* PermanentBlobCache} is set to the minimum value.
*
* <p>In this extreme case, since the size limit is 1, every time a task is deployed, all the
* existing **tracked** BLOBs on the cache must be untracked and deleted before the new BLOB is
* stored onto the cache.
*
* <p>This extreme case covers the situation of the normal case, where the size limit is much
* larger than 1 and the deletion won't happen so frequently.
*/
@Test
public void testDeployMultipleTasksWithSmallBlobCacheSizeLimit() throws Exception {
final int numberOfVertices = 4;
final int parallelism = 10;
final ExecutionGraph eg = createAndSetupExecutionGraph(numberOfVertices, parallelism);
final SimpleAckingTaskManagerGateway taskManagerGateway = new SimpleAckingTaskManagerGateway();
final BlockingQueue<TaskDeploymentDescriptor> tdds = new ArrayBlockingQueue<>(numberOfVertices * parallelism);
taskManagerGateway.setSubmitConsumer(FunctionUtils.uncheckedConsumer(taskDeploymentDescriptor -> {
taskDeploymentDescriptor.loadBigData(blobCache);
tdds.offer(taskDeploymentDescriptor);
}));
for (ExecutionJobVertex ejv : eg.getVerticesTopologically()) {
for (ExecutionVertex ev : ejv.getTaskVertices()) {
assertEquals(ExecutionState.CREATED, ev.getExecutionState());
LogicalSlot slot = new TestingLogicalSlotBuilder().setTaskManagerGateway(taskManagerGateway).createTestingLogicalSlot();
final Execution execution = ev.getCurrentExecutionAttempt();
execution.transitionState(ExecutionState.SCHEDULED);
execution.registerProducedPartitions(slot.getTaskManagerLocation(), true).get();
ev.deployToSlot(slot);
assertEquals(ExecutionState.DEPLOYING, ev.getExecutionState());
TaskDeploymentDescriptor tdd = tdds.take();
assertNotNull(tdd);
List<InputGateDeploymentDescriptor> igdds = tdd.getInputGates();
assertEquals(ev.getAllConsumedPartitionGroups().size(), igdds.size());
if (igdds.size() > 0) {
checkShuffleDescriptors(igdds.get(0), ev.getConsumedPartitionGroup(0));
}
}
}
}
use of org.apache.flink.runtime.executiongraph.utils.SimpleAckingTaskManagerGateway in project flink by apache.
the class ExecutionTest method testCanceledExecutionReturnsSlot.
@Test
public void testCanceledExecutionReturnsSlot() throws Exception {
final JobVertex jobVertex = createNoOpJobVertex();
final JobVertexID jobVertexId = jobVertex.getID();
final SimpleAckingTaskManagerGateway taskManagerGateway = new SimpleAckingTaskManagerGateway();
TestingPhysicalSlotProvider physicalSlotProvider = TestingPhysicalSlotProvider.create((resourceProfile) -> CompletableFuture.completedFuture(TestingPhysicalSlot.builder().withTaskManagerGateway(taskManagerGateway).build()));
final SchedulerBase scheduler = SchedulerTestingUtils.newSchedulerBuilder(JobGraphTestUtils.streamingJobGraph(jobVertex), testMainThreadUtil.getMainThreadExecutor()).setExecutionSlotAllocatorFactory(SchedulerTestingUtils.newSlotSharingExecutionSlotAllocatorFactory(physicalSlotProvider)).build();
ExecutionJobVertex executionJobVertex = scheduler.getExecutionJobVertex(jobVertexId);
ExecutionVertex executionVertex = executionJobVertex.getTaskVertices()[0];
final Execution execution = executionVertex.getCurrentExecutionAttempt();
taskManagerGateway.setCancelConsumer(executionAttemptID -> {
if (execution.getAttemptId().equals(executionAttemptID)) {
execution.completeCancelling();
}
});
testMainThreadUtil.execute(scheduler::startScheduling);
// cancel the execution in case we could schedule the execution
testMainThreadUtil.execute(execution::cancel);
assertThat(physicalSlotProvider.getRequests().keySet(), is(physicalSlotProvider.getCancellations().keySet()));
}
use of org.apache.flink.runtime.executiongraph.utils.SimpleAckingTaskManagerGateway in project flink by apache.
the class UpdatePartitionConsumersTest method testUpdatePartitionConsumers.
/**
* Test BLOCKING partition information are properly updated to consumers when its producer
* finishes.
*/
@Test
public void testUpdatePartitionConsumers() throws Exception {
final SimpleAckingTaskManagerGateway taskManagerGateway = new SimpleAckingTaskManagerGateway();
final SchedulerBase scheduler = SchedulerTestingUtils.newSchedulerBuilder(jobGraph, ComponentMainThreadExecutorServiceAdapter.forMainThread()).setExecutionSlotAllocatorFactory(new TestExecutionSlotAllocatorFactory(taskManagerGateway)).build();
final ExecutionVertex ev1 = scheduler.getExecutionVertex(new ExecutionVertexID(v1.getID(), 0));
final ExecutionVertex ev2 = scheduler.getExecutionVertex(new ExecutionVertexID(v2.getID(), 0));
final ExecutionVertex ev3 = scheduler.getExecutionVertex(new ExecutionVertexID(v3.getID(), 0));
final ExecutionVertex ev4 = scheduler.getExecutionVertex(new ExecutionVertexID(v4.getID(), 0));
final CompletableFuture<TaskDeploymentDescriptor> ev4TddFuture = new CompletableFuture<>();
taskManagerGateway.setSubmitConsumer(tdd -> {
if (tdd.getExecutionAttemptId().equals(ev4.getCurrentExecutionAttempt().getAttemptId())) {
ev4TddFuture.complete(tdd);
}
});
scheduler.startScheduling();
assertThat(ev1.getExecutionState(), is(ExecutionState.DEPLOYING));
assertThat(ev2.getExecutionState(), is(ExecutionState.DEPLOYING));
assertThat(ev3.getExecutionState(), is(ExecutionState.DEPLOYING));
assertThat(ev4.getExecutionState(), is(ExecutionState.DEPLOYING));
updateState(scheduler, ev1, ExecutionState.INITIALIZING);
updateState(scheduler, ev1, ExecutionState.RUNNING);
updateState(scheduler, ev2, ExecutionState.INITIALIZING);
updateState(scheduler, ev2, ExecutionState.RUNNING);
updateState(scheduler, ev3, ExecutionState.INITIALIZING);
updateState(scheduler, ev3, ExecutionState.RUNNING);
updateState(scheduler, ev4, ExecutionState.INITIALIZING);
updateState(scheduler, ev4, ExecutionState.RUNNING);
final InputGateDeploymentDescriptor ev4Igdd2 = ev4TddFuture.get(TIMEOUT, TimeUnit.MILLISECONDS).getInputGates().get(1);
assertThat(ev4Igdd2.getShuffleDescriptors()[0], instanceOf(UnknownShuffleDescriptor.class));
final CompletableFuture<Void> updatePartitionFuture = new CompletableFuture<>();
taskManagerGateway.setUpdatePartitionsConsumer((attemptId, partitionInfos, time) -> {
assertThat(attemptId, equalTo(ev4.getCurrentExecutionAttempt().getAttemptId()));
final List<PartitionInfo> partitionInfoList = IterableUtils.toStream(partitionInfos).collect(Collectors.toList());
assertThat(partitionInfoList, hasSize(1));
final PartitionInfo partitionInfo = partitionInfoList.get(0);
assertThat(partitionInfo.getIntermediateDataSetID(), equalTo(v3.getProducedDataSets().get(0).getId()));
assertThat(partitionInfo.getShuffleDescriptor(), instanceOf(NettyShuffleDescriptor.class));
updatePartitionFuture.complete(null);
});
updateState(scheduler, ev1, ExecutionState.FINISHED);
updateState(scheduler, ev3, ExecutionState.FINISHED);
updatePartitionFuture.get(TIMEOUT, TimeUnit.MILLISECONDS);
}
Aggregations