use of org.apache.flink.runtime.taskexecutor.partition.ClusterPartitionReport in project flink by apache.
the class ResourceManagerPartitionTrackerImpl method processTaskExecutorShutdown.
@Override
public void processTaskExecutorShutdown(ResourceID taskExecutorId) {
Preconditions.checkNotNull(taskExecutorId);
LOG.debug("Processing shutdown of task executor {}.", taskExecutorId);
internalProcessClusterPartitionReport(taskExecutorId, new ClusterPartitionReport(Collections.emptyList()));
}
use of org.apache.flink.runtime.taskexecutor.partition.ClusterPartitionReport in project flink by apache.
the class TaskExecutorTest method testHeartbeatReporting.
/**
* Tests that the correct partition/slot report is sent as part of the heartbeat response.
*/
@Test
public void testHeartbeatReporting() throws Exception {
final String rmAddress = "rm";
final UUID rmLeaderId = UUID.randomUUID();
// register the mock resource manager gateway
final TestingResourceManagerGateway rmGateway = new TestingResourceManagerGateway();
final CompletableFuture<ResourceID> taskExecutorRegistrationFuture = new CompletableFuture<>();
final ResourceID rmResourceId = rmGateway.getOwnResourceId();
final CompletableFuture<RegistrationResponse> registrationResponse = CompletableFuture.completedFuture(new TaskExecutorRegistrationSuccess(new InstanceID(), rmResourceId, new ClusterInformation("localhost", 1234)));
rmGateway.setRegisterTaskExecutorFunction(taskExecutorRegistration -> {
taskExecutorRegistrationFuture.complete(taskExecutorRegistration.getResourceId());
return registrationResponse;
});
final CompletableFuture<SlotReport> initialSlotReportFuture = new CompletableFuture<>();
rmGateway.setSendSlotReportFunction(resourceIDInstanceIDSlotReportTuple3 -> {
initialSlotReportFuture.complete(resourceIDInstanceIDSlotReportTuple3.f2);
return CompletableFuture.completedFuture(Acknowledge.get());
});
final CompletableFuture<TaskExecutorHeartbeatPayload> heartbeatPayloadCompletableFuture = new CompletableFuture<>();
rmGateway.setTaskExecutorHeartbeatFunction((resourceID, heartbeatPayload) -> {
heartbeatPayloadCompletableFuture.complete(heartbeatPayload);
return FutureUtils.completedVoidFuture();
});
rpc.registerGateway(rmAddress, rmGateway);
final SlotID slotId = buildSlotID(0);
final ResourceProfile resourceProfile = ResourceProfile.fromResources(1.0, 1);
final SlotReport slotReport1 = new SlotReport(new SlotStatus(slotId, resourceProfile));
final SlotReport slotReport2 = new SlotReport(new SlotStatus(slotId, resourceProfile, new JobID(), new AllocationID()));
final Queue<SlotReport> reports = new ArrayDeque<>(Arrays.asList(slotReport1, slotReport2));
final TaskSlotTable<Task> taskSlotTable = TestingTaskSlotTable.<Task>newBuilder().createSlotReportSupplier(reports::poll).closeAsyncReturns(CompletableFuture.completedFuture(null)).build();
final TaskExecutorLocalStateStoresManager localStateStoresManager = createTaskExecutorLocalStateStoresManager();
final TaskManagerServices taskManagerServices = new TaskManagerServicesBuilder().setUnresolvedTaskManagerLocation(unresolvedTaskManagerLocation).setTaskSlotTable(taskSlotTable).setTaskStateManager(localStateStoresManager).build();
final TaskExecutorPartitionTracker partitionTracker = createPartitionTrackerWithFixedPartitionReport(taskManagerServices.getShuffleEnvironment());
final TaskExecutor taskManager = createTaskExecutor(taskManagerServices, HEARTBEAT_SERVICES, partitionTracker);
try {
taskManager.start();
// define a leader and see that a registration happens
resourceManagerLeaderRetriever.notifyListener(rmAddress, rmLeaderId);
// register resource manager success will trigger monitoring heartbeat target between tm
// and rm
assertThat(taskExecutorRegistrationFuture.get(), equalTo(unresolvedTaskManagerLocation.getResourceID()));
assertThat(initialSlotReportFuture.get(), equalTo(slotReport1));
TaskExecutorGateway taskExecutorGateway = taskManager.getSelfGateway(TaskExecutorGateway.class);
// trigger the heartbeat asynchronously
taskExecutorGateway.heartbeatFromResourceManager(rmResourceId);
// wait for heartbeat response
SlotReport actualSlotReport = heartbeatPayloadCompletableFuture.get().getSlotReport();
// the new slot report should be reported
assertEquals(slotReport2, actualSlotReport);
ClusterPartitionReport actualClusterPartitionReport = heartbeatPayloadCompletableFuture.get().getClusterPartitionReport();
assertEquals(partitionTracker.createClusterPartitionReport(), actualClusterPartitionReport);
} finally {
RpcUtils.terminateRpcEndpoint(taskManager, timeout);
}
}
use of org.apache.flink.runtime.taskexecutor.partition.ClusterPartitionReport in project flink by apache.
the class TaskExecutorTest method createPartitionTrackerWithFixedPartitionReport.
private static TaskExecutorPartitionTracker createPartitionTrackerWithFixedPartitionReport(ShuffleEnvironment<?, ?> shuffleEnvironment) {
final ClusterPartitionReport.ClusterPartitionReportEntry clusterPartitionReportEntry = new ClusterPartitionReport.ClusterPartitionReportEntry(new IntermediateDataSetID(), Collections.singleton(new ResultPartitionID()), 4);
final ClusterPartitionReport clusterPartitionReport = new ClusterPartitionReport(Collections.singletonList(clusterPartitionReportEntry));
return new TaskExecutorPartitionTrackerImpl(shuffleEnvironment) {
@Override
public ClusterPartitionReport createClusterPartitionReport() {
return clusterPartitionReport;
}
};
}
use of org.apache.flink.runtime.taskexecutor.partition.ClusterPartitionReport in project flink by apache.
the class TaskExecutorPartitionTrackerImplTest method createClusterPartitionReport.
@Test
public void createClusterPartitionReport() {
final TaskExecutorPartitionTrackerImpl partitionTracker = new TaskExecutorPartitionTrackerImpl(new NettyShuffleEnvironmentBuilder().build());
assertThat(partitionTracker.createClusterPartitionReport().getEntries(), is(empty()));
final IntermediateDataSetID dataSetId = new IntermediateDataSetID();
final JobID jobId = new JobID();
final ResultPartitionID clusterPartitionId = new ResultPartitionID();
final ResultPartitionID jobPartitionId = new ResultPartitionID();
final int numberOfPartitions = 1;
partitionTracker.startTrackingPartition(jobId, new TaskExecutorPartitionInfo(clusterPartitionId, dataSetId, numberOfPartitions));
partitionTracker.startTrackingPartition(jobId, new TaskExecutorPartitionInfo(jobPartitionId, dataSetId, numberOfPartitions + 1));
partitionTracker.promoteJobPartitions(Collections.singleton(clusterPartitionId));
final ClusterPartitionReport clusterPartitionReport = partitionTracker.createClusterPartitionReport();
final ClusterPartitionReport.ClusterPartitionReportEntry reportEntry = Iterables.getOnlyElement(clusterPartitionReport.getEntries());
assertThat(reportEntry.getDataSetId(), is(dataSetId));
assertThat(reportEntry.getNumTotalPartitions(), is(numberOfPartitions));
assertThat(reportEntry.getHostedPartitions(), hasItems(clusterPartitionId));
}
Aggregations