use of org.apache.flink.runtime.jobgraph.IntermediateDataSetID in project flink by apache.
the class PartialInputChannelDeploymentDescriptor method fromEdge.
// ------------------------------------------------------------------------
/**
* Creates a partial input channel for the given partition and producing task.
*/
public static PartialInputChannelDeploymentDescriptor fromEdge(IntermediateResultPartition partition, Execution producer) {
final ResultPartitionID partitionId = new ResultPartitionID(partition.getPartitionId(), producer.getAttemptId());
final IntermediateResult result = partition.getIntermediateResult();
final IntermediateDataSetID resultId = result.getId();
final TaskManagerLocation partitionConnectionInfo = producer.getAssignedResourceLocation();
final int partitionConnectionIndex = result.getConnectionIndex();
return new PartialInputChannelDeploymentDescriptor(resultId, partitionId, partitionConnectionInfo, partitionConnectionIndex);
}
use of org.apache.flink.runtime.jobgraph.IntermediateDataSetID in project flink by apache.
the class ResultPartitionDeploymentDescriptor method from.
// ------------------------------------------------------------------------
public static ResultPartitionDeploymentDescriptor from(IntermediateResultPartition partition, int maxParallelism, boolean lazyScheduling) {
final IntermediateDataSetID resultId = partition.getIntermediateResult().getId();
final IntermediateResultPartitionID partitionId = partition.getPartitionId();
final ResultPartitionType partitionType = partition.getIntermediateResult().getResultType();
// The produced data is partitioned among a number of subpartitions.
//
// If no consumers are known at this point, we use a single subpartition, otherwise we have
// one for each consuming sub task.
int numberOfSubpartitions = 1;
if (!partition.getConsumers().isEmpty() && !partition.getConsumers().get(0).isEmpty()) {
if (partition.getConsumers().size() > 1) {
throw new IllegalStateException("Currently, only a single consumer group per partition is supported.");
}
numberOfSubpartitions = partition.getConsumers().get(0).size();
}
return new ResultPartitionDeploymentDescriptor(resultId, partitionId, partitionType, numberOfSubpartitions, maxParallelism, lazyScheduling);
}
use of org.apache.flink.runtime.jobgraph.IntermediateDataSetID in project flink by apache.
the class SingleInputGate method create.
// ------------------------------------------------------------------------
/**
* Creates an input gate and all of its input channels.
*/
public static SingleInputGate create(String owningTaskName, JobID jobId, ExecutionAttemptID executionId, InputGateDeploymentDescriptor igdd, NetworkEnvironment networkEnvironment, TaskActions taskActions, TaskIOMetricGroup metrics) {
final IntermediateDataSetID consumedResultId = checkNotNull(igdd.getConsumedResultId());
final ResultPartitionType consumedPartitionType = checkNotNull(igdd.getConsumedPartitionType());
final int consumedSubpartitionIndex = igdd.getConsumedSubpartitionIndex();
checkArgument(consumedSubpartitionIndex >= 0);
final InputChannelDeploymentDescriptor[] icdd = checkNotNull(igdd.getInputChannelDeploymentDescriptors());
final SingleInputGate inputGate = new SingleInputGate(owningTaskName, jobId, consumedResultId, consumedPartitionType, consumedSubpartitionIndex, icdd.length, taskActions, metrics);
// Create the input channels. There is one input channel for each consumed partition.
final InputChannel[] inputChannels = new InputChannel[icdd.length];
int numLocalChannels = 0;
int numRemoteChannels = 0;
int numUnknownChannels = 0;
for (int i = 0; i < inputChannels.length; i++) {
final ResultPartitionID partitionId = icdd[i].getConsumedPartitionId();
final ResultPartitionLocation partitionLocation = icdd[i].getConsumedPartitionLocation();
if (partitionLocation.isLocal()) {
inputChannels[i] = new LocalInputChannel(inputGate, i, partitionId, networkEnvironment.getResultPartitionManager(), networkEnvironment.getTaskEventDispatcher(), networkEnvironment.getPartitionRequestInitialBackoff(), networkEnvironment.getPartitionRequestMaxBackoff(), metrics);
numLocalChannels++;
} else if (partitionLocation.isRemote()) {
inputChannels[i] = new RemoteInputChannel(inputGate, i, partitionId, partitionLocation.getConnectionId(), networkEnvironment.getConnectionManager(), networkEnvironment.getPartitionRequestInitialBackoff(), networkEnvironment.getPartitionRequestMaxBackoff(), metrics);
numRemoteChannels++;
} else if (partitionLocation.isUnknown()) {
inputChannels[i] = new UnknownInputChannel(inputGate, i, partitionId, networkEnvironment.getResultPartitionManager(), networkEnvironment.getTaskEventDispatcher(), networkEnvironment.getConnectionManager(), networkEnvironment.getPartitionRequestInitialBackoff(), networkEnvironment.getPartitionRequestMaxBackoff(), metrics);
numUnknownChannels++;
} else {
throw new IllegalStateException("Unexpected partition location.");
}
inputGate.setInputChannel(partitionId.getPartitionId(), inputChannels[i]);
}
LOG.debug("Created {} input channels (local: {}, remote: {}, unknown: {}).", inputChannels.length, numLocalChannels, numRemoteChannels, numUnknownChannels);
return inputGate;
}
use of org.apache.flink.runtime.jobgraph.IntermediateDataSetID in project flink by apache.
the class TaskManagerTest method testUpdateTaskInputPartitionsFailure.
/**
* Tests that the TaskManager sends a proper exception back to the sender if the trigger stack
* trace message fails.
*/
@Test
public void testUpdateTaskInputPartitionsFailure() throws Exception {
ActorGateway jobManager = null;
ActorGateway taskManager = null;
try {
final ExecutionAttemptID executionAttemptId = new ExecutionAttemptID();
ActorRef jm = system.actorOf(Props.create(SimpleJobManager.class, leaderSessionID));
jobManager = new AkkaActorGateway(jm, leaderSessionID);
taskManager = TestingUtils.createTaskManager(system, jobManager, new Configuration(), true, true);
TaskDeploymentDescriptor tdd = createTaskDeploymentDescriptor(new JobID(), "test job", new JobVertexID(), executionAttemptId, new SerializedValue<>(new ExecutionConfig()), "test task", 1, 0, 1, 0, new Configuration(), new Configuration(), BlockingNoOpInvokable.class.getName(), Collections.<ResultPartitionDeploymentDescriptor>emptyList(), Collections.<InputGateDeploymentDescriptor>emptyList(), Collections.<BlobKey>emptyList(), Collections.<URL>emptyList(), 0);
Future<Object> submitResponse = taskManager.ask(new SubmitTask(tdd), timeout);
Await.result(submitResponse, timeout);
Future<Object> partitionUpdateResponse = taskManager.ask(new TaskMessages.UpdateTaskSinglePartitionInfo(executionAttemptId, new IntermediateDataSetID(), new InputChannelDeploymentDescriptor(new ResultPartitionID(), ResultPartitionLocation.createLocal())), timeout);
try {
Await.result(partitionUpdateResponse, timeout);
fail("The update task input partitions message should have failed.");
} catch (Exception e) {
// expected
}
} finally {
TestingUtils.stopActor(jobManager);
TestingUtils.stopActor(taskManager);
}
}
use of org.apache.flink.runtime.jobgraph.IntermediateDataSetID in project flink by apache.
the class TaskManagerTest method testRunJobWithForwardChannel.
@Test
public void testRunJobWithForwardChannel() {
new JavaTestKit(system) {
{
ActorGateway jobManager = null;
ActorGateway taskManager = null;
final ActorGateway testActorGateway = new AkkaActorGateway(getTestActor(), leaderSessionID);
try {
final JobID jid = new JobID();
JobVertexID vid1 = new JobVertexID();
JobVertexID vid2 = new JobVertexID();
final ExecutionAttemptID eid1 = new ExecutionAttemptID();
final ExecutionAttemptID eid2 = new ExecutionAttemptID();
ActorRef jm = system.actorOf(Props.create(new SimpleLookupJobManagerCreator(leaderSessionID)));
jobManager = new AkkaActorGateway(jm, leaderSessionID);
taskManager = TestingUtils.createTaskManager(system, jobManager, new Configuration(), true, true);
final ActorGateway tm = taskManager;
IntermediateResultPartitionID partitionId = new IntermediateResultPartitionID();
List<ResultPartitionDeploymentDescriptor> irpdd = new ArrayList<ResultPartitionDeploymentDescriptor>();
irpdd.add(new ResultPartitionDeploymentDescriptor(new IntermediateDataSetID(), partitionId, ResultPartitionType.PIPELINED, 1, 1, true));
InputGateDeploymentDescriptor ircdd = new InputGateDeploymentDescriptor(new IntermediateDataSetID(), ResultPartitionType.PIPELINED, 0, new InputChannelDeploymentDescriptor[] { new InputChannelDeploymentDescriptor(new ResultPartitionID(partitionId, eid1), ResultPartitionLocation.createLocal()) });
final TaskDeploymentDescriptor tdd1 = createTaskDeploymentDescriptor(jid, "TestJob", vid1, eid1, new SerializedValue<>(new ExecutionConfig()), "Sender", 1, 0, 1, 0, new Configuration(), new Configuration(), Tasks.Sender.class.getName(), irpdd, Collections.<InputGateDeploymentDescriptor>emptyList(), new ArrayList<BlobKey>(), Collections.<URL>emptyList(), 0);
final TaskDeploymentDescriptor tdd2 = createTaskDeploymentDescriptor(jid, "TestJob", vid2, eid2, new SerializedValue<>(new ExecutionConfig()), "Receiver", 7, 2, 7, 0, new Configuration(), new Configuration(), Tasks.Receiver.class.getName(), Collections.<ResultPartitionDeploymentDescriptor>emptyList(), Collections.singletonList(ircdd), new ArrayList<BlobKey>(), Collections.<URL>emptyList(), 0);
new Within(d) {
@Override
protected void run() {
try {
Future<Object> t1Running = tm.ask(new TestingTaskManagerMessages.NotifyWhenTaskIsRunning(eid1), timeout);
Future<Object> t2Running = tm.ask(new TestingTaskManagerMessages.NotifyWhenTaskIsRunning(eid2), timeout);
// submit the sender task
tm.tell(new SubmitTask(tdd1), testActorGateway);
expectMsgEquals(Acknowledge.get());
// wait until the sender task is running
Await.ready(t1Running, d);
// only now (after the sender is running), submit the receiver task
tm.tell(new SubmitTask(tdd2), testActorGateway);
expectMsgEquals(Acknowledge.get());
// wait until the receiver task is running
Await.ready(t2Running, d);
tm.tell(TestingTaskManagerMessages.getRequestRunningTasksMessage(), testActorGateway);
Map<ExecutionAttemptID, Task> tasks = expectMsgClass(TestingTaskManagerMessages.ResponseRunningTasks.class).asJava();
Task t1 = tasks.get(eid1);
Task t2 = tasks.get(eid2);
// we get to the check, so we need to guard the check
if (t1 != null) {
Future<Object> response = tm.ask(new TestingTaskManagerMessages.NotifyWhenTaskRemoved(eid1), timeout);
Await.ready(response, d);
}
if (t2 != null) {
Future<Object> response = tm.ask(new TestingTaskManagerMessages.NotifyWhenTaskRemoved(eid2), timeout);
Await.ready(response, d);
assertEquals(ExecutionState.FINISHED, t2.getExecutionState());
}
tm.tell(TestingTaskManagerMessages.getRequestRunningTasksMessage(), testActorGateway);
tasks = expectMsgClass(TestingTaskManagerMessages.ResponseRunningTasks.class).asJava();
assertEquals(0, tasks.size());
} catch (Exception e) {
e.printStackTrace();
fail(e.getMessage());
}
}
};
} catch (Exception e) {
e.printStackTrace();
fail(e.getMessage());
} finally {
// shut down the actors
TestingUtils.stopActor(taskManager);
TestingUtils.stopActor(jobManager);
}
}
};
}
Aggregations