use of org.apache.flink.runtime.io.network.partition.TaskExecutorPartitionTracker in project flink by apache.
the class TaskExecutorPartitionLifecycleTest method internalTestPartitionRelease.
private void internalTestPartitionRelease(TaskExecutorPartitionTracker partitionTracker, ShuffleEnvironment<?, ?> shuffleEnvironment, CompletableFuture<ResultPartitionID> startTrackingFuture, TestAction testAction) throws Exception {
final ResultPartitionDeploymentDescriptor taskResultPartitionDescriptor = PartitionTestUtils.createPartitionDeploymentDescriptor(ResultPartitionType.BLOCKING);
final ExecutionAttemptID eid1 = taskResultPartitionDescriptor.getShuffleDescriptor().getResultPartitionID().getProducerId();
final TaskDeploymentDescriptor taskDeploymentDescriptor = TaskExecutorSubmissionTest.createTaskDeploymentDescriptor(jobId, "job", eid1, new SerializedValue<>(new ExecutionConfig()), "Sender", 1, 0, 1, 0, new Configuration(), new Configuration(), TestingInvokable.class.getName(), Collections.singletonList(taskResultPartitionDescriptor), Collections.emptyList(), Collections.emptyList(), Collections.emptyList());
final TaskSlotTable<Task> taskSlotTable = createTaskSlotTable();
final TaskExecutorLocalStateStoresManager localStateStoresManager = new TaskExecutorLocalStateStoresManager(false, Reference.owned(new File[] { tmp.newFolder() }), Executors.directExecutor());
final TaskManagerServices taskManagerServices = new TaskManagerServicesBuilder().setTaskSlotTable(taskSlotTable).setTaskStateManager(localStateStoresManager).setShuffleEnvironment(shuffleEnvironment).build();
final CompletableFuture<Void> taskFinishedFuture = new CompletableFuture<>();
final OneShotLatch slotOfferedLatch = new OneShotLatch();
final TestingJobMasterGateway jobMasterGateway = new TestingJobMasterGatewayBuilder().setRegisterTaskManagerFunction((ignoredJobId, ignoredTaskManagerRegistrationInformation) -> CompletableFuture.completedFuture(new JMTMRegistrationSuccess(ResourceID.generate()))).setOfferSlotsFunction((resourceID, slotOffers) -> {
slotOfferedLatch.trigger();
return CompletableFuture.completedFuture(slotOffers);
}).setUpdateTaskExecutionStateFunction(taskExecutionState -> {
if (taskExecutionState.getExecutionState() == ExecutionState.FINISHED) {
taskFinishedFuture.complete(null);
}
return CompletableFuture.completedFuture(Acknowledge.get());
}).build();
final TestingTaskExecutor taskExecutor = createTestingTaskExecutor(taskManagerServices, partitionTracker);
final CompletableFuture<SlotReport> initialSlotReportFuture = new CompletableFuture<>();
final TestingResourceManagerGateway testingResourceManagerGateway = new TestingResourceManagerGateway();
testingResourceManagerGateway.setSendSlotReportFunction(resourceIDInstanceIDSlotReportTuple3 -> {
initialSlotReportFuture.complete(resourceIDInstanceIDSlotReportTuple3.f2);
return CompletableFuture.completedFuture(Acknowledge.get());
});
testingResourceManagerGateway.setRegisterTaskExecutorFunction(input -> CompletableFuture.completedFuture(new TaskExecutorRegistrationSuccess(new InstanceID(), testingResourceManagerGateway.getOwnResourceId(), new ClusterInformation("blobServerHost", 55555))));
try {
taskExecutor.start();
taskExecutor.waitUntilStarted();
final TaskExecutorGateway taskExecutorGateway = taskExecutor.getSelfGateway(TaskExecutorGateway.class);
final String jobMasterAddress = "jm";
rpc.registerGateway(jobMasterAddress, jobMasterGateway);
rpc.registerGateway(testingResourceManagerGateway.getAddress(), testingResourceManagerGateway);
// inform the task manager about the job leader
taskManagerServices.getJobLeaderService().addJob(jobId, jobMasterAddress);
jobManagerLeaderRetriever.notifyListener(jobMasterAddress, UUID.randomUUID());
resourceManagerLeaderRetriever.notifyListener(testingResourceManagerGateway.getAddress(), testingResourceManagerGateway.getFencingToken().toUUID());
final Optional<SlotStatus> slotStatusOptional = StreamSupport.stream(initialSlotReportFuture.get().spliterator(), false).findAny();
assertTrue(slotStatusOptional.isPresent());
final SlotStatus slotStatus = slotStatusOptional.get();
while (true) {
try {
taskExecutorGateway.requestSlot(slotStatus.getSlotID(), jobId, taskDeploymentDescriptor.getAllocationId(), ResourceProfile.ZERO, jobMasterAddress, testingResourceManagerGateway.getFencingToken(), timeout).get();
break;
} catch (Exception e) {
// the proper establishment of the RM connection is tracked
// asynchronously, so we have to poll here until it went through
// until then, slot requests will fail with an exception
Thread.sleep(50);
}
}
TestingInvokable.sync = new BlockerSync();
// Wait till the slot has been successfully offered before submitting the task.
// This ensures TM has been successfully registered to JM.
slotOfferedLatch.await();
taskExecutorGateway.submitTask(taskDeploymentDescriptor, jobMasterGateway.getFencingToken(), timeout).get();
TestingInvokable.sync.awaitBlocker();
// the task is still running => the partition is in in-progress and should be tracked
assertThat(startTrackingFuture.get(), equalTo(taskResultPartitionDescriptor.getShuffleDescriptor().getResultPartitionID()));
TestingInvokable.sync.releaseBlocker();
taskFinishedFuture.get(timeout.getSize(), timeout.getUnit());
testAction.accept(jobId, taskResultPartitionDescriptor, taskExecutor, taskExecutorGateway);
} finally {
RpcUtils.terminateRpcEndpoint(taskExecutor, timeout);
}
// the shutdown of the backing shuffle environment releases all partitions
// the book-keeping is not aware of this
assertTrue(shuffleEnvironment.getPartitionsOccupyingLocalResources().isEmpty());
}
use of org.apache.flink.runtime.io.network.partition.TaskExecutorPartitionTracker in project flink by apache.
the class TaskExecutorTest method testHeartbeatReporting.
/**
* Tests that the correct partition/slot report is sent as part of the heartbeat response.
*/
@Test
public void testHeartbeatReporting() throws Exception {
final String rmAddress = "rm";
final UUID rmLeaderId = UUID.randomUUID();
// register the mock resource manager gateway
final TestingResourceManagerGateway rmGateway = new TestingResourceManagerGateway();
final CompletableFuture<ResourceID> taskExecutorRegistrationFuture = new CompletableFuture<>();
final ResourceID rmResourceId = rmGateway.getOwnResourceId();
final CompletableFuture<RegistrationResponse> registrationResponse = CompletableFuture.completedFuture(new TaskExecutorRegistrationSuccess(new InstanceID(), rmResourceId, new ClusterInformation("localhost", 1234)));
rmGateway.setRegisterTaskExecutorFunction(taskExecutorRegistration -> {
taskExecutorRegistrationFuture.complete(taskExecutorRegistration.getResourceId());
return registrationResponse;
});
final CompletableFuture<SlotReport> initialSlotReportFuture = new CompletableFuture<>();
rmGateway.setSendSlotReportFunction(resourceIDInstanceIDSlotReportTuple3 -> {
initialSlotReportFuture.complete(resourceIDInstanceIDSlotReportTuple3.f2);
return CompletableFuture.completedFuture(Acknowledge.get());
});
final CompletableFuture<TaskExecutorHeartbeatPayload> heartbeatPayloadCompletableFuture = new CompletableFuture<>();
rmGateway.setTaskExecutorHeartbeatFunction((resourceID, heartbeatPayload) -> {
heartbeatPayloadCompletableFuture.complete(heartbeatPayload);
return FutureUtils.completedVoidFuture();
});
rpc.registerGateway(rmAddress, rmGateway);
final SlotID slotId = buildSlotID(0);
final ResourceProfile resourceProfile = ResourceProfile.fromResources(1.0, 1);
final SlotReport slotReport1 = new SlotReport(new SlotStatus(slotId, resourceProfile));
final SlotReport slotReport2 = new SlotReport(new SlotStatus(slotId, resourceProfile, new JobID(), new AllocationID()));
final Queue<SlotReport> reports = new ArrayDeque<>(Arrays.asList(slotReport1, slotReport2));
final TaskSlotTable<Task> taskSlotTable = TestingTaskSlotTable.<Task>newBuilder().createSlotReportSupplier(reports::poll).closeAsyncReturns(CompletableFuture.completedFuture(null)).build();
final TaskExecutorLocalStateStoresManager localStateStoresManager = createTaskExecutorLocalStateStoresManager();
final TaskManagerServices taskManagerServices = new TaskManagerServicesBuilder().setUnresolvedTaskManagerLocation(unresolvedTaskManagerLocation).setTaskSlotTable(taskSlotTable).setTaskStateManager(localStateStoresManager).build();
final TaskExecutorPartitionTracker partitionTracker = createPartitionTrackerWithFixedPartitionReport(taskManagerServices.getShuffleEnvironment());
final TaskExecutor taskManager = createTaskExecutor(taskManagerServices, HEARTBEAT_SERVICES, partitionTracker);
try {
taskManager.start();
// define a leader and see that a registration happens
resourceManagerLeaderRetriever.notifyListener(rmAddress, rmLeaderId);
// register resource manager success will trigger monitoring heartbeat target between tm
// and rm
assertThat(taskExecutorRegistrationFuture.get(), equalTo(unresolvedTaskManagerLocation.getResourceID()));
assertThat(initialSlotReportFuture.get(), equalTo(slotReport1));
TaskExecutorGateway taskExecutorGateway = taskManager.getSelfGateway(TaskExecutorGateway.class);
// trigger the heartbeat asynchronously
taskExecutorGateway.heartbeatFromResourceManager(rmResourceId);
// wait for heartbeat response
SlotReport actualSlotReport = heartbeatPayloadCompletableFuture.get().getSlotReport();
// the new slot report should be reported
assertEquals(slotReport2, actualSlotReport);
ClusterPartitionReport actualClusterPartitionReport = heartbeatPayloadCompletableFuture.get().getClusterPartitionReport();
assertEquals(partitionTracker.createClusterPartitionReport(), actualClusterPartitionReport);
} finally {
RpcUtils.terminateRpcEndpoint(taskManager, timeout);
}
}
use of org.apache.flink.runtime.io.network.partition.TaskExecutorPartitionTracker in project flink by apache.
the class TaskExecutorTest method testReleaseOfJobResourcesIfJobMasterIsNotCorrect.
/**
* Tests that the TaskExecutor releases all of its job resources if the JobMaster is not running
* the specified job. See FLINK-21606.
*/
@Test
public void testReleaseOfJobResourcesIfJobMasterIsNotCorrect() throws Exception {
final TaskManagerServices taskManagerServices = new TaskManagerServicesBuilder().setTaskSlotTable(TaskSlotUtils.createTaskSlotTable(1)).build();
final TestingTaskExecutorPartitionTracker taskExecutorPartitionTracker = new TestingTaskExecutorPartitionTracker();
final CompletableFuture<JobID> jobPartitionsReleaseFuture = new CompletableFuture<>();
// simulate that we have some partitions tracked
taskExecutorPartitionTracker.setIsTrackingPartitionsForFunction(ignored -> true);
taskExecutorPartitionTracker.setStopTrackingAndReleaseAllPartitionsConsumer(jobPartitionsReleaseFuture::complete);
final TaskExecutor taskExecutor = createTaskExecutor(taskManagerServices, HEARTBEAT_SERVICES, taskExecutorPartitionTracker);
final TestingJobMasterGateway jobMasterGateway = new TestingJobMasterGatewayBuilder().setRegisterTaskManagerFunction((ignoredJobId, ignoredTaskManagerRegistrationInformation) -> CompletableFuture.completedFuture(new JMTMRegistrationRejection("foobar"))).build();
rpc.registerGateway(jobMasterGateway.getAddress(), jobMasterGateway);
final InstanceID registrationId = new InstanceID();
final OneShotLatch taskExecutorIsRegistered = new OneShotLatch();
final CompletableFuture<Tuple3<InstanceID, SlotID, AllocationID>> availableSlotFuture = new CompletableFuture<>();
final TestingResourceManagerGateway resourceManagerGateway = createRmWithTmRegisterAndNotifySlotHooks(registrationId, taskExecutorIsRegistered, availableSlotFuture);
rpc.registerGateway(resourceManagerGateway.getAddress(), resourceManagerGateway);
resourceManagerLeaderRetriever.notifyListener(resourceManagerGateway.getAddress(), resourceManagerGateway.getFencingToken().toUUID());
try {
taskExecutor.start();
final TaskExecutorGateway taskExecutorGateway = taskExecutor.getSelfGateway(TaskExecutorGateway.class);
taskExecutorIsRegistered.await();
final AllocationID allocationId = new AllocationID();
final SlotID slotId = new SlotID(taskExecutor.getResourceID(), 0);
requestSlot(taskExecutorGateway, jobId, allocationId, slotId, ResourceProfile.UNKNOWN, jobMasterGateway.getAddress(), resourceManagerGateway.getFencingToken());
// The JobManager should reject the registration which should release all job resources
// on the TaskExecutor
jobManagerLeaderRetriever.notifyListener(jobMasterGateway.getAddress(), jobMasterGateway.getFencingToken().toUUID());
// the slot should be freed
assertThat(availableSlotFuture.get().f1, is(slotId));
assertThat(availableSlotFuture.get().f2, is(allocationId));
// all job partitions should be released
assertThat(jobPartitionsReleaseFuture.get(), is(jobId));
} finally {
RpcUtils.terminateRpcEndpoint(taskExecutor, timeout);
}
}
use of org.apache.flink.runtime.io.network.partition.TaskExecutorPartitionTracker in project flink by apache.
the class TaskExecutorPartitionLifecycleTest method testBlockingLocalPartitionReleaseDoesNotBlockTaskExecutor.
@Test
public void testBlockingLocalPartitionReleaseDoesNotBlockTaskExecutor() throws Exception {
BlockerSync sync = new BlockerSync();
ResultPartitionManager blockingResultPartitionManager = new ResultPartitionManager() {
@Override
public void releasePartition(ResultPartitionID partitionId, Throwable cause) {
sync.blockNonInterruptible();
super.releasePartition(partitionId, cause);
}
};
NettyShuffleEnvironment shuffleEnvironment = new NettyShuffleEnvironmentBuilder().setResultPartitionManager(blockingResultPartitionManager).setIoExecutor(TEST_EXECUTOR_SERVICE_RESOURCE.getExecutor()).build();
final CompletableFuture<ResultPartitionID> startTrackingFuture = new CompletableFuture<>();
final TaskExecutorPartitionTracker partitionTracker = new TaskExecutorPartitionTrackerImpl(shuffleEnvironment) {
@Override
public void startTrackingPartition(JobID producingJobId, TaskExecutorPartitionInfo partitionInfo) {
super.startTrackingPartition(producingJobId, partitionInfo);
startTrackingFuture.complete(partitionInfo.getResultPartitionId());
}
};
try {
internalTestPartitionRelease(partitionTracker, shuffleEnvironment, startTrackingFuture, (jobId, resultPartitionDeploymentDescriptor, taskExecutor, taskExecutorGateway) -> {
final IntermediateDataSetID dataSetId = resultPartitionDeploymentDescriptor.getResultId();
taskExecutorGateway.releaseClusterPartitions(Collections.singleton(dataSetId), timeout);
// execute some operation to check whether the TaskExecutor is blocked
taskExecutorGateway.canBeReleased().get(5, TimeUnit.SECONDS);
});
} finally {
sync.releaseBlocker();
}
}
Aggregations