use of org.apache.flink.runtime.jobgraph.IntermediateDataSetID in project flink by apache.
the class TaskManagerTest method testLocalPartitionNotFound.
/**
* Tests that repeated local {@link PartitionNotFoundException}s ultimately fail the receiver.
*/
@Test
public void testLocalPartitionNotFound() throws Exception {
new JavaTestKit(system) {
{
ActorGateway jobManager = null;
ActorGateway taskManager = null;
final ActorGateway testActorGateway = new AkkaActorGateway(getTestActor(), leaderSessionID);
try {
final IntermediateDataSetID resultId = new IntermediateDataSetID();
// Create the JM
ActorRef jm = system.actorOf(Props.create(new SimplePartitionStateLookupJobManagerCreator(leaderSessionID, getTestActor())));
jobManager = new AkkaActorGateway(jm, leaderSessionID);
final Configuration config = new Configuration();
config.setInteger(TaskManagerOptions.NETWORK_REQUEST_BACKOFF_INITIAL, 100);
config.setInteger(TaskManagerOptions.NETWORK_REQUEST_BACKOFF_MAX, 200);
taskManager = TestingUtils.createTaskManager(system, jobManager, config, true, true);
// ---------------------------------------------------------------------------------
final ActorGateway tm = taskManager;
final JobID jid = new JobID();
final JobVertexID vid = new JobVertexID();
final ExecutionAttemptID eid = new ExecutionAttemptID();
final ResultPartitionID partitionId = new ResultPartitionID();
// Local location (on the same TM though) for the partition
final ResultPartitionLocation loc = ResultPartitionLocation.createLocal();
final InputChannelDeploymentDescriptor[] icdd = new InputChannelDeploymentDescriptor[] { new InputChannelDeploymentDescriptor(partitionId, loc) };
final InputGateDeploymentDescriptor igdd = new InputGateDeploymentDescriptor(resultId, ResultPartitionType.PIPELINED, 0, icdd);
final TaskDeploymentDescriptor tdd = createTaskDeploymentDescriptor(jid, "TestJob", vid, eid, new SerializedValue<>(new ExecutionConfig()), "Receiver", 1, 0, 1, 0, new Configuration(), new Configuration(), Tasks.AgnosticReceiver.class.getName(), Collections.<ResultPartitionDeploymentDescriptor>emptyList(), Collections.singletonList(igdd), Collections.<BlobKey>emptyList(), Collections.<URL>emptyList(), 0);
new Within(new FiniteDuration(120, TimeUnit.SECONDS)) {
@Override
protected void run() {
// Submit the task
tm.tell(new SubmitTask(tdd), testActorGateway);
expectMsgClass(Acknowledge.get().getClass());
// Wait to be notified about the final execution state by the mock JM
TaskExecutionState msg = expectMsgClass(TaskExecutionState.class);
// The task should fail after repeated requests
assertEquals(msg.getExecutionState(), ExecutionState.FAILED);
Throwable error = msg.getError(getClass().getClassLoader());
if (error.getClass() != PartitionNotFoundException.class) {
error.printStackTrace();
fail("Wrong exception: " + error.getMessage());
}
}
};
} catch (Exception e) {
e.printStackTrace();
fail(e.getMessage());
} finally {
TestingUtils.stopActor(taskManager);
TestingUtils.stopActor(jobManager);
}
}
};
}
use of org.apache.flink.runtime.jobgraph.IntermediateDataSetID in project flink by apache.
the class TaskTest method setInputGate.
// ------------------------------------------------------------------------
private void setInputGate(Task task, SingleInputGate inputGate) {
try {
Field f = Task.class.getDeclaredField("inputGates");
f.setAccessible(true);
f.set(task, new SingleInputGate[] { inputGate });
Map<IntermediateDataSetID, SingleInputGate> byId = new HashMap<>(1);
byId.put(inputGate.getConsumedResultId(), inputGate);
f = Task.class.getDeclaredField("inputGatesById");
f.setAccessible(true);
f.set(task, byId);
} catch (Exception e) {
throw new RuntimeException("Modifying the task state failed", e);
}
}
use of org.apache.flink.runtime.jobgraph.IntermediateDataSetID in project flink by apache.
the class TaskExecutor method updatePartitions.
// ----------------------------------------------------------------------
// Partition lifecycle RPCs
// ----------------------------------------------------------------------
@RpcMethod
public Acknowledge updatePartitions(final ExecutionAttemptID executionAttemptID, Iterable<PartitionInfo> partitionInfos) throws PartitionException {
final Task task = taskSlotTable.getTask(executionAttemptID);
if (task != null) {
for (final PartitionInfo partitionInfo : partitionInfos) {
IntermediateDataSetID intermediateResultPartitionID = partitionInfo.getIntermediateDataSetID();
final SingleInputGate singleInputGate = task.getInputGateById(intermediateResultPartitionID);
if (singleInputGate != null) {
// Run asynchronously because it might be blocking
getRpcService().execute(new Runnable() {
@Override
public void run() {
try {
singleInputGate.updateInputChannel(partitionInfo.getInputChannelDeploymentDescriptor());
} catch (IOException | InterruptedException e) {
log.error("Could not update input data location for task {}. Trying to fail task.", task.getTaskInfo().getTaskName(), e);
try {
task.failExternally(e);
} catch (RuntimeException re) {
// TODO: Check whether we need this or make exception in failExtenally checked
log.error("Failed canceling task with execution ID {} after task update failure.", executionAttemptID, re);
}
}
}
});
} else {
throw new PartitionException("No reader with ID " + intermediateResultPartitionID + " for task " + executionAttemptID + " was found.");
}
}
return Acknowledge.get();
} else {
log.debug("Discard update for input partitions of task {}. Task is no longer running.", executionAttemptID);
return Acknowledge.get();
}
}
use of org.apache.flink.runtime.jobgraph.IntermediateDataSetID in project flink by apache.
the class ExecutionGraphConstructionTest method testCannotConnectMissingId.
@Test
public void testCannotConnectMissingId() throws Exception {
final JobID jobId = new JobID();
final String jobName = "Test Job Sample Name";
final Configuration cfg = new Configuration();
// construct part one of the execution graph
JobVertex v1 = new JobVertex("vertex1");
v1.setParallelism(7);
v1.setInvokableClass(AbstractInvokable.class);
List<JobVertex> ordered = new ArrayList<JobVertex>(Arrays.asList(v1));
ExecutionGraph eg = new ExecutionGraph(TestingUtils.defaultExecutor(), TestingUtils.defaultExecutor(), jobId, jobName, cfg, new SerializedValue<>(new ExecutionConfig()), AkkaUtils.getDefaultTimeout(), new NoRestartStrategy(), new Scheduler(TestingUtils.defaultExecutionContext()));
try {
eg.attachJobGraph(ordered);
} catch (JobException e) {
e.printStackTrace();
fail("Job failed with exception: " + e.getMessage());
}
// attach the second part of the graph
JobVertex v2 = new JobVertex("vertex2");
v2.setInvokableClass(AbstractInvokable.class);
v2.connectIdInput(new IntermediateDataSetID(), DistributionPattern.ALL_TO_ALL);
List<JobVertex> ordered2 = new ArrayList<JobVertex>(Arrays.asList(v2));
try {
eg.attachJobGraph(ordered2);
fail("Attached wrong jobgraph");
} catch (JobException e) {
// expected
}
}
use of org.apache.flink.runtime.jobgraph.IntermediateDataSetID in project flink by apache.
the class InputGateConcurrentTest method testConsumptionWithMixedChannels.
@Test
public void testConsumptionWithMixedChannels() throws Exception {
final int numChannels = 61;
final int numLocalChannels = 20;
final int buffersPerChannel = 1000;
// fill the local/remote decision
List<Boolean> localOrRemote = new ArrayList<>(numChannels);
for (int i = 0; i < numChannels; i++) {
localOrRemote.add(i < numLocalChannels);
}
Collections.shuffle(localOrRemote);
final ConnectionManager connManager = createDummyConnectionManager();
final ResultPartition resultPartition = mock(ResultPartition.class);
final PipelinedSubpartition[] localPartitions = new PipelinedSubpartition[numLocalChannels];
final ResultPartitionManager resultPartitionManager = createResultPartitionManager(localPartitions);
final Source[] sources = new Source[numChannels];
final SingleInputGate gate = new SingleInputGate("Test Task Name", new JobID(), new IntermediateDataSetID(), ResultPartitionType.PIPELINED, 0, numChannels, mock(TaskActions.class), new UnregisteredTaskMetricsGroup.DummyTaskIOMetricGroup());
for (int i = 0, local = 0; i < numChannels; i++) {
if (localOrRemote.get(i)) {
// local channel
PipelinedSubpartition psp = new PipelinedSubpartition(0, resultPartition);
localPartitions[local++] = psp;
sources[i] = new PipelinedSubpartitionSource(psp);
LocalInputChannel channel = new LocalInputChannel(gate, i, new ResultPartitionID(), resultPartitionManager, mock(TaskEventDispatcher.class), new UnregisteredTaskMetricsGroup.DummyTaskIOMetricGroup());
gate.setInputChannel(new IntermediateResultPartitionID(), channel);
} else {
//remote channel
RemoteInputChannel channel = new RemoteInputChannel(gate, i, new ResultPartitionID(), mock(ConnectionID.class), connManager, 0, 0, new UnregisteredTaskMetricsGroup.DummyTaskIOMetricGroup());
gate.setInputChannel(new IntermediateResultPartitionID(), channel);
sources[i] = new RemoteChannelSource(channel);
}
}
ProducerThread producer = new ProducerThread(sources, numChannels * buffersPerChannel, 4, 10);
ConsumerThread consumer = new ConsumerThread(gate, numChannels * buffersPerChannel);
producer.start();
consumer.start();
// the 'sync()' call checks for exceptions and failed assertions
producer.sync();
consumer.sync();
}
Aggregations