use of org.apache.flink.runtime.executiongraph.ExecutionAttemptID in project flink by apache.
the class JobMasterTest method testRequestPartitionState.
/**
* Tests the {@link JobMaster#requestPartitionState(IntermediateDataSetID, ResultPartitionID)}
* call for a finished result partition.
*/
@Test
public void testRequestPartitionState() throws Exception {
final JobGraph producerConsumerJobGraph = producerConsumerJobGraph();
final JobMaster jobMaster = new JobMasterBuilder(producerConsumerJobGraph, rpcService).withConfiguration(configuration).withHighAvailabilityServices(haServices).withHeartbeatServices(heartbeatServices).createJobMaster();
jobMaster.start();
try {
final CompletableFuture<TaskDeploymentDescriptor> tddFuture = new CompletableFuture<>();
final TestingTaskExecutorGateway testingTaskExecutorGateway = new TestingTaskExecutorGatewayBuilder().setSubmitTaskConsumer((taskDeploymentDescriptor, jobMasterId) -> {
tddFuture.complete(taskDeploymentDescriptor);
return CompletableFuture.completedFuture(Acknowledge.get());
}).createTestingTaskExecutorGateway();
final LocalUnresolvedTaskManagerLocation taskManagerLocation = new LocalUnresolvedTaskManagerLocation();
final JobMasterGateway jobMasterGateway = jobMaster.getSelfGateway(JobMasterGateway.class);
final Collection<SlotOffer> slotOffers = registerSlotsAtJobMaster(1, jobMasterGateway, producerConsumerJobGraph.getJobID(), testingTaskExecutorGateway, taskManagerLocation);
assertThat(slotOffers, hasSize(1));
// obtain tdd for the result partition ids
final TaskDeploymentDescriptor tdd = tddFuture.get();
assertThat(tdd.getProducedPartitions(), hasSize(1));
final ResultPartitionDeploymentDescriptor partition = tdd.getProducedPartitions().iterator().next();
final ExecutionAttemptID executionAttemptId = tdd.getExecutionAttemptId();
final ExecutionAttemptID copiedExecutionAttemptId = new ExecutionAttemptID(executionAttemptId);
// finish the producer task
jobMasterGateway.updateTaskExecutionState(new TaskExecutionState(executionAttemptId, ExecutionState.FINISHED)).get();
// request the state of the result partition of the producer
final ResultPartitionID partitionId = new ResultPartitionID(partition.getPartitionId(), copiedExecutionAttemptId);
CompletableFuture<ExecutionState> partitionStateFuture = jobMasterGateway.requestPartitionState(partition.getResultId(), partitionId);
assertThat(partitionStateFuture.get(), equalTo(ExecutionState.FINISHED));
// ask for unknown result partition
partitionStateFuture = jobMasterGateway.requestPartitionState(partition.getResultId(), new ResultPartitionID());
try {
partitionStateFuture.get();
fail("Expected failure.");
} catch (ExecutionException e) {
assertThat(ExceptionUtils.findThrowable(e, IllegalArgumentException.class).isPresent(), is(true));
}
// ask for wrong intermediate data set id
partitionStateFuture = jobMasterGateway.requestPartitionState(new IntermediateDataSetID(), partitionId);
try {
partitionStateFuture.get();
fail("Expected failure.");
} catch (ExecutionException e) {
assertThat(ExceptionUtils.findThrowable(e, IllegalArgumentException.class).isPresent(), is(true));
}
// ask for "old" execution
partitionStateFuture = jobMasterGateway.requestPartitionState(partition.getResultId(), new ResultPartitionID(partition.getPartitionId(), new ExecutionAttemptID()));
try {
partitionStateFuture.get();
fail("Expected failure.");
} catch (ExecutionException e) {
assertThat(ExceptionUtils.findThrowable(e, PartitionProducerDisposedException.class).isPresent(), is(true));
}
} finally {
RpcUtils.terminateRpcEndpoint(jobMaster, testingTimeout);
}
}
use of org.apache.flink.runtime.executiongraph.ExecutionAttemptID in project flink by apache.
the class CheckpointMessagesTest method testConfirmTaskCheckpointed.
@Test
public void testConfirmTaskCheckpointed() {
final Random rnd = new Random();
try {
AcknowledgeCheckpoint noState = new AcknowledgeCheckpoint(new JobID(), new ExecutionAttemptID(), 569345L);
KeyGroupRange keyGroupRange = KeyGroupRange.of(42, 42);
TaskStateSnapshot checkpointStateHandles = new TaskStateSnapshot();
OperatorSubtaskState subtaskState = OperatorSubtaskState.builder().setManagedOperatorState(generatePartitionableStateHandle(new JobVertexID(), 0, 2, 8, false)).setManagedKeyedState(generateKeyGroupState(keyGroupRange, Collections.singletonList(new MyHandle()))).setInputChannelState(singleton(createNewInputChannelStateHandle(10, rnd))).setResultSubpartitionState(singleton(createNewResultSubpartitionStateHandle(10, rnd))).build();
checkpointStateHandles.putSubtaskStateByOperatorID(new OperatorID(), subtaskState);
AcknowledgeCheckpoint withState = new AcknowledgeCheckpoint(new JobID(), new ExecutionAttemptID(), 87658976143L, new CheckpointMetrics(), checkpointStateHandles);
testSerializabilityEqualsHashCode(noState);
testSerializabilityEqualsHashCode(withState);
} catch (Exception e) {
e.printStackTrace();
fail(e.getMessage());
}
}
use of org.apache.flink.runtime.executiongraph.ExecutionAttemptID in project flink by apache.
the class InternalOperatorGroupTest method testCreateQueryServiceMetricInfo.
@Test
public void testCreateQueryServiceMetricInfo() {
JobID jid = new JobID();
JobVertexID vid = new JobVertexID();
ExecutionAttemptID eid = new ExecutionAttemptID();
OperatorID oid = new OperatorID();
TaskManagerMetricGroup tm = TaskManagerMetricGroup.createTaskManagerMetricGroup(registry, "host", new ResourceID("id"));
TaskMetricGroup task = tm.addJob(jid, "jobname").addTask(vid, eid, "taskName", 4, 5);
InternalOperatorMetricGroup operator = task.getOrAddOperator(oid, "operator");
QueryScopeInfo.OperatorQueryScopeInfo info = operator.createQueryServiceMetricInfo(new DummyCharacterFilter());
assertEquals("", info.scope);
assertEquals(jid.toString(), info.jobID);
assertEquals(vid.toString(), info.vertexID);
assertEquals(4, info.subtaskIndex);
assertEquals("operator", info.operatorName);
}
use of org.apache.flink.runtime.executiongraph.ExecutionAttemptID in project flink by apache.
the class InternalOperatorGroupTest method testIOMetricGroupInstantiation.
@Test
public void testIOMetricGroupInstantiation() throws Exception {
TaskManagerMetricGroup tmGroup = TaskManagerMetricGroup.createTaskManagerMetricGroup(registry, "theHostName", new ResourceID("test-tm-id"));
TaskMetricGroup taskGroup = tmGroup.addJob(new JobID(), "myJobName").addTask(new JobVertexID(), new ExecutionAttemptID(), "aTaskName", 11, 0);
InternalOperatorMetricGroup opGroup = taskGroup.getOrAddOperator(new OperatorID(), "myOpName");
assertNotNull(opGroup.getIOMetricGroup());
assertNotNull(opGroup.getIOMetricGroup().getNumRecordsInCounter());
assertNotNull(opGroup.getIOMetricGroup().getNumRecordsOutCounter());
}
use of org.apache.flink.runtime.executiongraph.ExecutionAttemptID in project flink by apache.
the class InternalOperatorGroupTest method testVariables.
@Test
public void testVariables() {
JobID jid = new JobID();
JobVertexID tid = new JobVertexID();
ExecutionAttemptID eid = new ExecutionAttemptID();
OperatorID oid = new OperatorID();
TaskManagerMetricGroup tmGroup = TaskManagerMetricGroup.createTaskManagerMetricGroup(registry, "theHostName", new ResourceID("test-tm-id"));
TaskMetricGroup taskGroup = tmGroup.addJob(jid, "myJobName").addTask(tid, eid, "aTaskName", 11, 0);
InternalOperatorMetricGroup opGroup = taskGroup.getOrAddOperator(oid, "myOpName");
Map<String, String> variables = opGroup.getAllVariables();
testVariable(variables, ScopeFormat.SCOPE_HOST, "theHostName");
testVariable(variables, ScopeFormat.SCOPE_TASKMANAGER_ID, "test-tm-id");
testVariable(variables, ScopeFormat.SCOPE_JOB_ID, jid.toString());
testVariable(variables, ScopeFormat.SCOPE_JOB_NAME, "myJobName");
testVariable(variables, ScopeFormat.SCOPE_TASK_VERTEX_ID, tid.toString());
testVariable(variables, ScopeFormat.SCOPE_TASK_NAME, "aTaskName");
testVariable(variables, ScopeFormat.SCOPE_TASK_ATTEMPT_ID, eid.toString());
testVariable(variables, ScopeFormat.SCOPE_TASK_SUBTASK_INDEX, "11");
testVariable(variables, ScopeFormat.SCOPE_TASK_ATTEMPT_NUM, "0");
testVariable(variables, ScopeFormat.SCOPE_OPERATOR_ID, oid.toString());
testVariable(variables, ScopeFormat.SCOPE_OPERATOR_NAME, "myOpName");
}
Aggregations