use of org.apache.flink.runtime.jobgraph.IntermediateResultPartitionID in project flink by apache.
the class TaskManagerTest method testRunJobWithForwardChannel.
@Test
public void testRunJobWithForwardChannel() {
new JavaTestKit(system) {
{
ActorGateway jobManager = null;
ActorGateway taskManager = null;
final ActorGateway testActorGateway = new AkkaActorGateway(getTestActor(), leaderSessionID);
try {
final JobID jid = new JobID();
JobVertexID vid1 = new JobVertexID();
JobVertexID vid2 = new JobVertexID();
final ExecutionAttemptID eid1 = new ExecutionAttemptID();
final ExecutionAttemptID eid2 = new ExecutionAttemptID();
ActorRef jm = system.actorOf(Props.create(new SimpleLookupJobManagerCreator(leaderSessionID)));
jobManager = new AkkaActorGateway(jm, leaderSessionID);
taskManager = TestingUtils.createTaskManager(system, jobManager, new Configuration(), true, true);
final ActorGateway tm = taskManager;
IntermediateResultPartitionID partitionId = new IntermediateResultPartitionID();
List<ResultPartitionDeploymentDescriptor> irpdd = new ArrayList<ResultPartitionDeploymentDescriptor>();
irpdd.add(new ResultPartitionDeploymentDescriptor(new IntermediateDataSetID(), partitionId, ResultPartitionType.PIPELINED, 1, 1, true));
InputGateDeploymentDescriptor ircdd = new InputGateDeploymentDescriptor(new IntermediateDataSetID(), ResultPartitionType.PIPELINED, 0, new InputChannelDeploymentDescriptor[] { new InputChannelDeploymentDescriptor(new ResultPartitionID(partitionId, eid1), ResultPartitionLocation.createLocal()) });
final TaskDeploymentDescriptor tdd1 = createTaskDeploymentDescriptor(jid, "TestJob", vid1, eid1, new SerializedValue<>(new ExecutionConfig()), "Sender", 1, 0, 1, 0, new Configuration(), new Configuration(), Tasks.Sender.class.getName(), irpdd, Collections.<InputGateDeploymentDescriptor>emptyList(), new ArrayList<BlobKey>(), Collections.<URL>emptyList(), 0);
final TaskDeploymentDescriptor tdd2 = createTaskDeploymentDescriptor(jid, "TestJob", vid2, eid2, new SerializedValue<>(new ExecutionConfig()), "Receiver", 7, 2, 7, 0, new Configuration(), new Configuration(), Tasks.Receiver.class.getName(), Collections.<ResultPartitionDeploymentDescriptor>emptyList(), Collections.singletonList(ircdd), new ArrayList<BlobKey>(), Collections.<URL>emptyList(), 0);
new Within(d) {
@Override
protected void run() {
try {
Future<Object> t1Running = tm.ask(new TestingTaskManagerMessages.NotifyWhenTaskIsRunning(eid1), timeout);
Future<Object> t2Running = tm.ask(new TestingTaskManagerMessages.NotifyWhenTaskIsRunning(eid2), timeout);
// submit the sender task
tm.tell(new SubmitTask(tdd1), testActorGateway);
expectMsgEquals(Acknowledge.get());
// wait until the sender task is running
Await.ready(t1Running, d);
// only now (after the sender is running), submit the receiver task
tm.tell(new SubmitTask(tdd2), testActorGateway);
expectMsgEquals(Acknowledge.get());
// wait until the receiver task is running
Await.ready(t2Running, d);
tm.tell(TestingTaskManagerMessages.getRequestRunningTasksMessage(), testActorGateway);
Map<ExecutionAttemptID, Task> tasks = expectMsgClass(TestingTaskManagerMessages.ResponseRunningTasks.class).asJava();
Task t1 = tasks.get(eid1);
Task t2 = tasks.get(eid2);
// we get to the check, so we need to guard the check
if (t1 != null) {
Future<Object> response = tm.ask(new TestingTaskManagerMessages.NotifyWhenTaskRemoved(eid1), timeout);
Await.ready(response, d);
}
if (t2 != null) {
Future<Object> response = tm.ask(new TestingTaskManagerMessages.NotifyWhenTaskRemoved(eid2), timeout);
Await.ready(response, d);
assertEquals(ExecutionState.FINISHED, t2.getExecutionState());
}
tm.tell(TestingTaskManagerMessages.getRequestRunningTasksMessage(), testActorGateway);
tasks = expectMsgClass(TestingTaskManagerMessages.ResponseRunningTasks.class).asJava();
assertEquals(0, tasks.size());
} catch (Exception e) {
e.printStackTrace();
fail(e.getMessage());
}
}
};
} catch (Exception e) {
e.printStackTrace();
fail(e.getMessage());
} finally {
// shut down the actors
TestingUtils.stopActor(taskManager);
TestingUtils.stopActor(jobManager);
}
}
};
}
use of org.apache.flink.runtime.jobgraph.IntermediateResultPartitionID in project flink by apache.
the class InputGateConcurrentTest method testConsumptionWithMixedChannels.
@Test
public void testConsumptionWithMixedChannels() throws Exception {
final int numChannels = 61;
final int numLocalChannels = 20;
final int buffersPerChannel = 1000;
// fill the local/remote decision
List<Boolean> localOrRemote = new ArrayList<>(numChannels);
for (int i = 0; i < numChannels; i++) {
localOrRemote.add(i < numLocalChannels);
}
Collections.shuffle(localOrRemote);
final ConnectionManager connManager = createDummyConnectionManager();
final ResultPartition resultPartition = mock(ResultPartition.class);
final PipelinedSubpartition[] localPartitions = new PipelinedSubpartition[numLocalChannels];
final ResultPartitionManager resultPartitionManager = createResultPartitionManager(localPartitions);
final Source[] sources = new Source[numChannels];
final SingleInputGate gate = new SingleInputGate("Test Task Name", new JobID(), new IntermediateDataSetID(), ResultPartitionType.PIPELINED, 0, numChannels, mock(TaskActions.class), new UnregisteredTaskMetricsGroup.DummyTaskIOMetricGroup());
for (int i = 0, local = 0; i < numChannels; i++) {
if (localOrRemote.get(i)) {
// local channel
PipelinedSubpartition psp = new PipelinedSubpartition(0, resultPartition);
localPartitions[local++] = psp;
sources[i] = new PipelinedSubpartitionSource(psp);
LocalInputChannel channel = new LocalInputChannel(gate, i, new ResultPartitionID(), resultPartitionManager, mock(TaskEventDispatcher.class), new UnregisteredTaskMetricsGroup.DummyTaskIOMetricGroup());
gate.setInputChannel(new IntermediateResultPartitionID(), channel);
} else {
//remote channel
RemoteInputChannel channel = new RemoteInputChannel(gate, i, new ResultPartitionID(), mock(ConnectionID.class), connManager, 0, 0, new UnregisteredTaskMetricsGroup.DummyTaskIOMetricGroup());
gate.setInputChannel(new IntermediateResultPartitionID(), channel);
sources[i] = new RemoteChannelSource(channel);
}
}
ProducerThread producer = new ProducerThread(sources, numChannels * buffersPerChannel, 4, 10);
ConsumerThread consumer = new ConsumerThread(gate, numChannels * buffersPerChannel);
producer.start();
consumer.start();
// the 'sync()' call checks for exceptions and failed assertions
producer.sync();
consumer.sync();
}
use of org.apache.flink.runtime.jobgraph.IntermediateResultPartitionID in project flink by apache.
the class SingleInputGateTest method testBasicGetNextLogic.
/**
* Tests basic correctness of buffer-or-event interleaving and correct <code>null</code> return
* value after receiving all end-of-partition events.
*/
@Test(timeout = 120 * 1000)
public void testBasicGetNextLogic() throws Exception {
// Setup
final SingleInputGate inputGate = new SingleInputGate("Test Task Name", new JobID(), new IntermediateDataSetID(), ResultPartitionType.PIPELINED, 0, 2, mock(TaskActions.class), new UnregisteredTaskMetricsGroup.DummyTaskIOMetricGroup());
assertEquals(ResultPartitionType.PIPELINED, inputGate.getConsumedPartitionType());
final TestInputChannel[] inputChannels = new TestInputChannel[] { new TestInputChannel(inputGate, 0), new TestInputChannel(inputGate, 1) };
inputGate.setInputChannel(new IntermediateResultPartitionID(), inputChannels[0].getInputChannel());
inputGate.setInputChannel(new IntermediateResultPartitionID(), inputChannels[1].getInputChannel());
// Test
inputChannels[0].readBuffer();
inputChannels[0].readBuffer();
inputChannels[1].readBuffer();
inputChannels[1].readEndOfPartitionEvent();
inputChannels[0].readEndOfPartitionEvent();
inputGate.notifyChannelNonEmpty(inputChannels[0].getInputChannel());
inputGate.notifyChannelNonEmpty(inputChannels[1].getInputChannel());
verifyBufferOrEvent(inputGate, true, 0);
verifyBufferOrEvent(inputGate, true, 1);
verifyBufferOrEvent(inputGate, true, 0);
verifyBufferOrEvent(inputGate, false, 1);
verifyBufferOrEvent(inputGate, false, 0);
// Return null when the input gate has received all end-of-partition events
assertTrue(inputGate.isFinished());
assertNull(inputGate.getNextBufferOrEvent());
}
use of org.apache.flink.runtime.jobgraph.IntermediateResultPartitionID in project flink by apache.
the class DefaultExecutionGraph method createResultPartitionId.
ResultPartitionID createResultPartitionId(final IntermediateResultPartitionID resultPartitionId) {
final SchedulingResultPartition schedulingResultPartition = getSchedulingTopology().getResultPartition(resultPartitionId);
final SchedulingExecutionVertex producer = schedulingResultPartition.getProducer();
final ExecutionVertexID producerId = producer.getId();
final JobVertexID jobVertexId = producerId.getJobVertexId();
final ExecutionJobVertex jobVertex = getJobVertex(jobVertexId);
checkNotNull(jobVertex, "Unknown job vertex %s", jobVertexId);
final ExecutionVertex[] taskVertices = jobVertex.getTaskVertices();
final int subtaskIndex = producerId.getSubtaskIndex();
checkState(subtaskIndex < taskVertices.length, "Invalid subtask index %d for job vertex %s", subtaskIndex, jobVertexId);
final ExecutionVertex taskVertex = taskVertices[subtaskIndex];
final Execution execution = taskVertex.getCurrentExecutionAttempt();
return new ResultPartitionID(resultPartitionId, execution.getAttemptId());
}
use of org.apache.flink.runtime.jobgraph.IntermediateResultPartitionID in project flink by apache.
the class DefaultExecutionGraphConstructionTest method testRegisterConsumedPartitionGroupToEdgeManager.
@Test
public void testRegisterConsumedPartitionGroupToEdgeManager() throws Exception {
JobVertex v1 = new JobVertex("source");
JobVertex v2 = new JobVertex("sink");
v1.setParallelism(2);
v2.setParallelism(2);
v2.connectNewDataSetAsInput(v1, DistributionPattern.ALL_TO_ALL, ResultPartitionType.BLOCKING);
List<JobVertex> ordered = new ArrayList<>(Arrays.asList(v1, v2));
ExecutionGraph eg = createDefaultExecutionGraph(ordered);
eg.attachJobGraph(ordered);
IntermediateResult result = Objects.requireNonNull(eg.getJobVertex(v1.getID())).getProducedDataSets()[0];
IntermediateResultPartition partition1 = result.getPartitions()[0];
IntermediateResultPartition partition2 = result.getPartitions()[1];
assertEquals(partition1.getConsumedPartitionGroups().get(0), partition2.getConsumedPartitionGroups().get(0));
ConsumedPartitionGroup consumedPartitionGroup = partition1.getConsumedPartitionGroups().get(0);
Set<IntermediateResultPartitionID> partitionIds = new HashSet<>();
for (IntermediateResultPartitionID partitionId : consumedPartitionGroup) {
partitionIds.add(partitionId);
}
assertThat(partitionIds, containsInAnyOrder(partition1.getPartitionId(), partition2.getPartitionId()));
}
Aggregations