use of org.apache.flink.runtime.deployment.InputGateDeploymentDescriptor in project flink by apache.
the class TaskManagerTest method testCancellingDependentAndStateUpdateFails.
@Test
public void testCancellingDependentAndStateUpdateFails() {
// this tests creates two tasks. the sender sends data, and fails to send the
// state update back to the job manager
// the second one blocks to be canceled
new JavaTestKit(system) {
{
ActorGateway jobManager = null;
ActorGateway taskManager = null;
final ActorGateway testActorGateway = new AkkaActorGateway(getTestActor(), leaderSessionID);
try {
final JobID jid = new JobID();
JobVertexID vid1 = new JobVertexID();
JobVertexID vid2 = new JobVertexID();
final ExecutionAttemptID eid1 = new ExecutionAttemptID();
final ExecutionAttemptID eid2 = new ExecutionAttemptID();
ActorRef jm = system.actorOf(Props.create(new SimpleLookupFailingUpdateJobManagerCreator(leaderSessionID, eid2)));
jobManager = new AkkaActorGateway(jm, leaderSessionID);
taskManager = TestingUtils.createTaskManager(system, jobManager, new Configuration(), true, true);
final ActorGateway tm = taskManager;
IntermediateResultPartitionID partitionId = new IntermediateResultPartitionID();
List<ResultPartitionDeploymentDescriptor> irpdd = new ArrayList<ResultPartitionDeploymentDescriptor>();
irpdd.add(new ResultPartitionDeploymentDescriptor(new IntermediateDataSetID(), partitionId, ResultPartitionType.PIPELINED, 1, 1, true));
InputGateDeploymentDescriptor ircdd = new InputGateDeploymentDescriptor(new IntermediateDataSetID(), ResultPartitionType.PIPELINED, 0, new InputChannelDeploymentDescriptor[] { new InputChannelDeploymentDescriptor(new ResultPartitionID(partitionId, eid1), ResultPartitionLocation.createLocal()) });
final TaskDeploymentDescriptor tdd1 = createTaskDeploymentDescriptor(jid, "TestJob", vid1, eid1, new SerializedValue<>(new ExecutionConfig()), "Sender", 1, 0, 1, 0, new Configuration(), new Configuration(), Tasks.Sender.class.getName(), irpdd, Collections.<InputGateDeploymentDescriptor>emptyList(), new ArrayList<BlobKey>(), Collections.<URL>emptyList(), 0);
final TaskDeploymentDescriptor tdd2 = createTaskDeploymentDescriptor(jid, "TestJob", vid2, eid2, new SerializedValue<>(new ExecutionConfig()), "Receiver", 7, 2, 7, 0, new Configuration(), new Configuration(), Tasks.BlockingReceiver.class.getName(), Collections.<ResultPartitionDeploymentDescriptor>emptyList(), Collections.singletonList(ircdd), new ArrayList<BlobKey>(), Collections.<URL>emptyList(), 0);
new Within(d) {
@Override
protected void run() {
try {
Future<Object> t1Running = tm.ask(new TestingTaskManagerMessages.NotifyWhenTaskIsRunning(eid1), timeout);
Future<Object> t2Running = tm.ask(new TestingTaskManagerMessages.NotifyWhenTaskIsRunning(eid2), timeout);
tm.tell(new SubmitTask(tdd2), testActorGateway);
tm.tell(new SubmitTask(tdd1), testActorGateway);
expectMsgEquals(Acknowledge.get());
expectMsgEquals(Acknowledge.get());
Await.ready(t1Running, d);
Await.ready(t2Running, d);
tm.tell(TestingTaskManagerMessages.getRequestRunningTasksMessage(), testActorGateway);
Map<ExecutionAttemptID, Task> tasks = expectMsgClass(TestingTaskManagerMessages.ResponseRunningTasks.class).asJava();
Task t1 = tasks.get(eid1);
Task t2 = tasks.get(eid2);
tm.tell(new CancelTask(eid2), testActorGateway);
expectMsgEquals(Acknowledge.get());
if (t2 != null) {
Future<Object> response = tm.ask(new TestingTaskManagerMessages.NotifyWhenTaskRemoved(eid2), timeout);
Await.ready(response, d);
}
if (t1 != null) {
if (t1.getExecutionState() == ExecutionState.RUNNING) {
tm.tell(new CancelTask(eid1), testActorGateway);
expectMsgEquals(Acknowledge.get());
}
Future<Object> response = tm.ask(new TestingTaskManagerMessages.NotifyWhenTaskRemoved(eid1), timeout);
Await.ready(response, d);
}
tm.tell(TestingTaskManagerMessages.getRequestRunningTasksMessage(), testActorGateway);
tasks = expectMsgClass(TestingTaskManagerMessages.ResponseRunningTasks.class).asJava();
assertEquals(0, tasks.size());
} catch (Exception e) {
e.printStackTrace();
fail(e.getMessage());
}
}
};
} catch (Exception e) {
e.printStackTrace();
fail(e.getMessage());
} finally {
// shut down the actors
TestingUtils.stopActor(taskManager);
TestingUtils.stopActor(jobManager);
}
}
};
}
use of org.apache.flink.runtime.deployment.InputGateDeploymentDescriptor in project flink by apache.
the class TaskManagerTest method testRemotePartitionNotFound.
/**
* Tests that repeated remote {@link PartitionNotFoundException}s ultimately fail the receiver.
*/
@Test
public void testRemotePartitionNotFound() throws Exception {
new JavaTestKit(system) {
{
ActorGateway jobManager = null;
ActorGateway taskManager = null;
final ActorGateway testActorGateway = new AkkaActorGateway(getTestActor(), leaderSessionID);
try {
final IntermediateDataSetID resultId = new IntermediateDataSetID();
// Create the JM
ActorRef jm = system.actorOf(Props.create(new SimplePartitionStateLookupJobManagerCreator(leaderSessionID, getTestActor())));
jobManager = new AkkaActorGateway(jm, leaderSessionID);
final int dataPort = NetUtils.getAvailablePort();
Configuration config = new Configuration();
config.setInteger(ConfigConstants.TASK_MANAGER_DATA_PORT_KEY, dataPort);
config.setInteger(TaskManagerOptions.NETWORK_REQUEST_BACKOFF_INITIAL, 100);
config.setInteger(TaskManagerOptions.NETWORK_REQUEST_BACKOFF_MAX, 200);
taskManager = TestingUtils.createTaskManager(system, jobManager, config, false, true);
// ---------------------------------------------------------------------------------
final ActorGateway tm = taskManager;
final JobID jid = new JobID();
final JobVertexID vid = new JobVertexID();
final ExecutionAttemptID eid = new ExecutionAttemptID();
final ResultPartitionID partitionId = new ResultPartitionID();
// Remote location (on the same TM though) for the partition
final ResultPartitionLocation loc = ResultPartitionLocation.createRemote(new ConnectionID(new InetSocketAddress("localhost", dataPort), 0));
final InputChannelDeploymentDescriptor[] icdd = new InputChannelDeploymentDescriptor[] { new InputChannelDeploymentDescriptor(partitionId, loc) };
final InputGateDeploymentDescriptor igdd = new InputGateDeploymentDescriptor(resultId, ResultPartitionType.PIPELINED, 0, icdd);
final TaskDeploymentDescriptor tdd = createTaskDeploymentDescriptor(jid, "TestJob", vid, eid, new SerializedValue<>(new ExecutionConfig()), "Receiver", 1, 0, 1, 0, new Configuration(), new Configuration(), Tasks.AgnosticReceiver.class.getName(), Collections.<ResultPartitionDeploymentDescriptor>emptyList(), Collections.singletonList(igdd), Collections.<BlobKey>emptyList(), Collections.<URL>emptyList(), 0);
new Within(d) {
@Override
protected void run() {
// Submit the task
tm.tell(new SubmitTask(tdd), testActorGateway);
expectMsgClass(Acknowledge.get().getClass());
// Wait to be notified about the final execution state by the mock JM
TaskExecutionState msg = expectMsgClass(TaskExecutionState.class);
// The task should fail after repeated requests
assertEquals(ExecutionState.FAILED, msg.getExecutionState());
Throwable t = msg.getError(ClassLoader.getSystemClassLoader());
assertEquals("Thrown exception was not a PartitionNotFoundException: " + t.getMessage(), PartitionNotFoundException.class, t.getClass());
}
};
} catch (Exception e) {
e.printStackTrace();
fail(e.getMessage());
} finally {
TestingUtils.stopActor(taskManager);
TestingUtils.stopActor(jobManager);
}
}
};
}
use of org.apache.flink.runtime.deployment.InputGateDeploymentDescriptor in project flink by apache.
the class TaskStopTest method doMocking.
public void doMocking(AbstractInvokable taskMock) throws Exception {
TaskInfo taskInfoMock = mock(TaskInfo.class);
when(taskInfoMock.getTaskNameWithSubtasks()).thenReturn("dummyName");
TaskManagerRuntimeInfo tmRuntimeInfo = mock(TaskManagerRuntimeInfo.class);
when(tmRuntimeInfo.getConfiguration()).thenReturn(new Configuration());
task = new Task(mock(JobInformation.class), new TaskInformation(new JobVertexID(), "test task name", 1, 1, "foobar", new Configuration()), mock(ExecutionAttemptID.class), mock(AllocationID.class), 0, 0, Collections.<ResultPartitionDeploymentDescriptor>emptyList(), Collections.<InputGateDeploymentDescriptor>emptyList(), 0, mock(TaskStateHandles.class), mock(MemoryManager.class), mock(IOManager.class), mock(NetworkEnvironment.class), mock(BroadcastVariableManager.class), mock(TaskManagerActions.class), mock(InputSplitProvider.class), mock(CheckpointResponder.class), mock(LibraryCacheManager.class), mock(FileCache.class), tmRuntimeInfo, mock(TaskMetricGroup.class), mock(ResultPartitionConsumableNotifier.class), mock(PartitionProducerStateChecker.class), mock(Executor.class));
Field f = task.getClass().getDeclaredField("invokable");
f.setAccessible(true);
f.set(task, taskMock);
Field f2 = task.getClass().getDeclaredField("executionState");
f2.setAccessible(true);
f2.set(task, ExecutionState.RUNNING);
}
use of org.apache.flink.runtime.deployment.InputGateDeploymentDescriptor in project flink by apache.
the class ExecutionGraphDeploymentTest method testBuildDeploymentDescriptor.
@Test
public void testBuildDeploymentDescriptor() {
try {
final JobID jobId = new JobID();
final JobVertexID jid1 = new JobVertexID();
final JobVertexID jid2 = new JobVertexID();
final JobVertexID jid3 = new JobVertexID();
final JobVertexID jid4 = new JobVertexID();
JobVertex v1 = new JobVertex("v1", jid1);
JobVertex v2 = new JobVertex("v2", jid2);
JobVertex v3 = new JobVertex("v3", jid3);
JobVertex v4 = new JobVertex("v4", jid4);
v1.setParallelism(10);
v2.setParallelism(10);
v3.setParallelism(10);
v4.setParallelism(10);
v1.setInvokableClass(BatchTask.class);
v2.setInvokableClass(BatchTask.class);
v3.setInvokableClass(BatchTask.class);
v4.setInvokableClass(BatchTask.class);
v2.connectNewDataSetAsInput(v1, DistributionPattern.ALL_TO_ALL, ResultPartitionType.PIPELINED);
v3.connectNewDataSetAsInput(v2, DistributionPattern.ALL_TO_ALL, ResultPartitionType.PIPELINED);
v4.connectNewDataSetAsInput(v2, DistributionPattern.ALL_TO_ALL, ResultPartitionType.PIPELINED);
ExecutionGraph eg = new ExecutionGraph(TestingUtils.defaultExecutor(), TestingUtils.defaultExecutor(), jobId, "some job", new Configuration(), new SerializedValue<>(new ExecutionConfig()), AkkaUtils.getDefaultTimeout(), new NoRestartStrategy(), new Scheduler(TestingUtils.defaultExecutionContext()));
List<JobVertex> ordered = Arrays.asList(v1, v2, v3, v4);
eg.attachJobGraph(ordered);
ExecutionJobVertex ejv = eg.getAllVertices().get(jid2);
ExecutionVertex vertex = ejv.getTaskVertices()[3];
ExecutionGraphTestUtils.SimpleActorGateway instanceGateway = new ExecutionGraphTestUtils.SimpleActorGateway(TestingUtils.directExecutionContext());
final Instance instance = getInstance(new ActorTaskManagerGateway(instanceGateway));
final SimpleSlot slot = instance.allocateSimpleSlot(jobId);
assertEquals(ExecutionState.CREATED, vertex.getExecutionState());
vertex.deployToSlot(slot);
assertEquals(ExecutionState.DEPLOYING, vertex.getExecutionState());
TaskDeploymentDescriptor descr = instanceGateway.lastTDD;
assertNotNull(descr);
JobInformation jobInformation = descr.getSerializedJobInformation().deserializeValue(getClass().getClassLoader());
TaskInformation taskInformation = descr.getSerializedTaskInformation().deserializeValue(getClass().getClassLoader());
assertEquals(jobId, jobInformation.getJobId());
assertEquals(jid2, taskInformation.getJobVertexId());
assertEquals(3, descr.getSubtaskIndex());
assertEquals(10, taskInformation.getNumberOfSubtasks());
assertEquals(BatchTask.class.getName(), taskInformation.getInvokableClassName());
assertEquals("v2", taskInformation.getTaskName());
Collection<ResultPartitionDeploymentDescriptor> producedPartitions = descr.getProducedPartitions();
Collection<InputGateDeploymentDescriptor> consumedPartitions = descr.getInputGates();
assertEquals(2, producedPartitions.size());
assertEquals(1, consumedPartitions.size());
Iterator<ResultPartitionDeploymentDescriptor> iteratorProducedPartitions = producedPartitions.iterator();
Iterator<InputGateDeploymentDescriptor> iteratorConsumedPartitions = consumedPartitions.iterator();
assertEquals(10, iteratorProducedPartitions.next().getNumberOfSubpartitions());
assertEquals(10, iteratorProducedPartitions.next().getNumberOfSubpartitions());
assertEquals(10, iteratorConsumedPartitions.next().getInputChannelDeploymentDescriptors().length);
} catch (Exception e) {
e.printStackTrace();
fail(e.getMessage());
}
}
use of org.apache.flink.runtime.deployment.InputGateDeploymentDescriptor in project flink by apache.
the class ExecutionVertex method createDeploymentDescriptor.
/**
* Creates a task deployment descriptor to deploy a subtask to the given target slot.
*
* TODO: This should actually be in the EXECUTION
*/
TaskDeploymentDescriptor createDeploymentDescriptor(ExecutionAttemptID executionId, SimpleSlot targetSlot, TaskStateHandles taskStateHandles, int attemptNumber) throws ExecutionGraphException {
// Produced intermediate results
List<ResultPartitionDeploymentDescriptor> producedPartitions = new ArrayList<>(resultPartitions.size());
// Consumed intermediate results
List<InputGateDeploymentDescriptor> consumedPartitions = new ArrayList<>(inputEdges.length);
boolean lazyScheduling = getExecutionGraph().getScheduleMode().allowLazyDeployment();
for (IntermediateResultPartition partition : resultPartitions.values()) {
List<List<ExecutionEdge>> consumers = partition.getConsumers();
if (consumers.isEmpty()) {
//TODO this case only exists for test, currently there has to be exactly one consumer in real jobs!
producedPartitions.add(ResultPartitionDeploymentDescriptor.from(partition, ExecutionConfig.UPPER_BOUND_MAX_PARALLELISM, lazyScheduling));
} else {
Preconditions.checkState(1 == consumers.size(), "Only one consumer supported in the current implementation! Found: " + consumers.size());
List<ExecutionEdge> consumer = consumers.get(0);
ExecutionJobVertex vertex = consumer.get(0).getTarget().getJobVertex();
int maxParallelism = vertex.getMaxParallelism();
producedPartitions.add(ResultPartitionDeploymentDescriptor.from(partition, maxParallelism, lazyScheduling));
}
}
for (ExecutionEdge[] edges : inputEdges) {
InputChannelDeploymentDescriptor[] partitions = InputChannelDeploymentDescriptor.fromEdges(edges, targetSlot, lazyScheduling);
// If the produced partition has multiple consumers registered, we
// need to request the one matching our sub task index.
// TODO Refactor after removing the consumers from the intermediate result partitions
int numConsumerEdges = edges[0].getSource().getConsumers().get(0).size();
int queueToRequest = subTaskIndex % numConsumerEdges;
IntermediateResult consumedIntermediateResult = edges[0].getSource().getIntermediateResult();
final IntermediateDataSetID resultId = consumedIntermediateResult.getId();
final ResultPartitionType partitionType = consumedIntermediateResult.getResultType();
consumedPartitions.add(new InputGateDeploymentDescriptor(resultId, partitionType, queueToRequest, partitions));
}
SerializedValue<JobInformation> serializedJobInformation = getExecutionGraph().getSerializedJobInformation();
SerializedValue<TaskInformation> serializedJobVertexInformation = null;
try {
serializedJobVertexInformation = jobVertex.getSerializedTaskInformation();
} catch (IOException e) {
throw new ExecutionGraphException("Could not create a serialized JobVertexInformation for " + jobVertex.getJobVertexId(), e);
}
return new TaskDeploymentDescriptor(serializedJobInformation, serializedJobVertexInformation, executionId, targetSlot.getAllocatedSlot().getSlotAllocationId(), subTaskIndex, attemptNumber, targetSlot.getRoot().getSlotNumber(), taskStateHandles, producedPartitions, consumedPartitions);
}
Aggregations