use of org.apache.flink.runtime.executiongraph.ExecutionVertex in project flink by apache.
the class StackTraceSampleCoordinatorTest method testCollectStackTraceForUnknownTask.
/** Tests that collecting for a unknown task fails. */
@Test(expected = IllegalArgumentException.class)
public void testCollectStackTraceForUnknownTask() throws Exception {
ExecutionVertex[] vertices = new ExecutionVertex[] { mockExecutionVertex(new ExecutionAttemptID(), ExecutionState.RUNNING, true) };
coord.triggerStackTraceSample(vertices, 1, Time.milliseconds(100L), 0);
coord.collectStackTraces(0, new ExecutionAttemptID(), new ArrayList<StackTraceElement[]>());
}
use of org.apache.flink.runtime.executiongraph.ExecutionVertex in project flink by apache.
the class StackTraceSampleCoordinatorTest method testCancelStackTraceSample.
/** Tests cancelling of a pending sample. */
@Test
public void testCancelStackTraceSample() throws Exception {
ExecutionVertex[] vertices = new ExecutionVertex[] { mockExecutionVertex(new ExecutionAttemptID(), ExecutionState.RUNNING, true) };
Future<StackTraceSample> sampleFuture = coord.triggerStackTraceSample(vertices, 1, Time.milliseconds(100L), 0);
assertFalse(sampleFuture.isDone());
// Cancel
coord.cancelStackTraceSample(0, null);
// Verify completed
assertTrue(sampleFuture.isDone());
// Verify no more pending samples
assertEquals(0, coord.getNumberOfPendingSamples());
}
use of org.apache.flink.runtime.executiongraph.ExecutionVertex in project flink by apache.
the class StackTraceSampleCoordinatorTest method testTriggerStackTraceSample.
/** Tests simple trigger and collect of stack trace samples. */
@Test
public void testTriggerStackTraceSample() throws Exception {
ExecutionVertex[] vertices = new ExecutionVertex[] { mockExecutionVertex(new ExecutionAttemptID(), ExecutionState.RUNNING, true), mockExecutionVertex(new ExecutionAttemptID(), ExecutionState.RUNNING, true), mockExecutionVertex(new ExecutionAttemptID(), ExecutionState.RUNNING, true), mockExecutionVertex(new ExecutionAttemptID(), ExecutionState.RUNNING, true) };
int numSamples = 1;
Time delayBetweenSamples = Time.milliseconds(100L);
int maxStackTraceDepth = 0;
Future<StackTraceSample> sampleFuture = coord.triggerStackTraceSample(vertices, numSamples, delayBetweenSamples, maxStackTraceDepth);
// Verify messages have been sent
for (ExecutionVertex vertex : vertices) {
ExecutionAttemptID expectedExecutionId = vertex.getCurrentExecutionAttempt().getAttemptId();
TriggerStackTraceSample expectedMsg = new TriggerStackTraceSample(0, expectedExecutionId, numSamples, delayBetweenSamples, maxStackTraceDepth);
verify(vertex.getCurrentExecutionAttempt()).requestStackTraceSample(eq(0), eq(numSamples), eq(delayBetweenSamples), eq(maxStackTraceDepth), any(Time.class));
}
assertFalse(sampleFuture.isDone());
StackTraceElement[] stackTraceSample = Thread.currentThread().getStackTrace();
List<StackTraceElement[]> traces = new ArrayList<>();
traces.add(stackTraceSample);
traces.add(stackTraceSample);
traces.add(stackTraceSample);
// Collect stack traces
for (int i = 0; i < vertices.length; i++) {
ExecutionAttemptID executionId = vertices[i].getCurrentExecutionAttempt().getAttemptId();
coord.collectStackTraces(0, executionId, traces);
if (i == vertices.length - 1) {
assertTrue(sampleFuture.isDone());
} else {
assertFalse(sampleFuture.isDone());
}
}
// Verify completed stack trace sample
StackTraceSample sample = sampleFuture.get();
assertEquals(0, sample.getSampleId());
assertTrue(sample.getEndTime() >= sample.getStartTime());
Map<ExecutionAttemptID, List<StackTraceElement[]>> tracesByTask = sample.getStackTraces();
for (ExecutionVertex vertex : vertices) {
ExecutionAttemptID executionId = vertex.getCurrentExecutionAttempt().getAttemptId();
List<StackTraceElement[]> sampleTraces = tracesByTask.get(executionId);
assertNotNull("Task not found", sampleTraces);
assertTrue(traces.equals(sampleTraces));
}
// Verify no more pending sample
assertEquals(0, coord.getNumberOfPendingSamples());
// Verify no error on late collect
coord.collectStackTraces(0, vertices[0].getCurrentExecutionAttempt().getAttemptId(), traces);
}
use of org.apache.flink.runtime.executiongraph.ExecutionVertex in project flink by apache.
the class StackTraceSampleCoordinatorTest method testTriggerStackTraceSampleTimeout.
/** Tests that samples time out if they don't finish in time. */
@Test(timeout = 1000L)
public void testTriggerStackTraceSampleTimeout() throws Exception {
int timeout = 100;
coord = new StackTraceSampleCoordinator(system.dispatcher(), timeout);
final ScheduledExecutorService scheduledExecutorService = new ScheduledThreadPoolExecutor(1);
try {
ExecutionVertex[] vertices = new ExecutionVertex[] { mockExecutionVertexWithTimeout(new ExecutionAttemptID(), ExecutionState.RUNNING, scheduledExecutorService, timeout) };
Future<StackTraceSample> sampleFuture = coord.triggerStackTraceSample(vertices, 1, Time.milliseconds(100L), 0);
// Wait for the timeout
Thread.sleep(timeout * 2);
boolean success = false;
for (int i = 0; i < 10; i++) {
if (sampleFuture.isDone()) {
success = true;
break;
}
Thread.sleep(timeout);
}
assertTrue("Sample did not time out", success);
try {
sampleFuture.get();
fail("Expected exception.");
} catch (ExecutionException e) {
assertTrue(e.getCause().getCause().getMessage().contains("Timeout"));
}
// Collect after the timeout (should be ignored)
ExecutionAttemptID executionId = vertices[0].getCurrentExecutionAttempt().getAttemptId();
coord.collectStackTraces(0, executionId, new ArrayList<StackTraceElement[]>());
} finally {
scheduledExecutorService.shutdownNow();
}
}
use of org.apache.flink.runtime.executiongraph.ExecutionVertex in project flink by apache.
the class JobManagerTest method testRequestPartitionStateUnregisteredExecution.
/**
* Tests the JobManager response when the execution is not registered with
* the ExecutionGraph.
*/
@Test
public void testRequestPartitionStateUnregisteredExecution() throws Exception {
new JavaTestKit(system) {
{
new Within(duration("15 seconds")) {
@Override
protected void run() {
// Setup
TestingCluster cluster = null;
try {
cluster = startTestingCluster(4, 1, DEFAULT_AKKA_ASK_TIMEOUT());
final IntermediateDataSetID rid = new IntermediateDataSetID();
// Create a task
final JobVertex sender = new JobVertex("Sender");
sender.setParallelism(1);
// just finish
sender.setInvokableClass(NoOpInvokable.class);
sender.createAndAddResultDataSet(rid, PIPELINED);
final JobVertex sender2 = new JobVertex("Blocking Sender");
sender2.setParallelism(1);
// just block
sender2.setInvokableClass(BlockingNoOpInvokable.class);
sender2.createAndAddResultDataSet(new IntermediateDataSetID(), PIPELINED);
final JobGraph jobGraph = new JobGraph("Fast finishing producer test job", sender, sender2);
final JobID jid = jobGraph.getJobID();
final ActorGateway jobManagerGateway = cluster.getLeaderGateway(TestingUtils.TESTING_DURATION());
// we can set the leader session ID to None because we don't use this gateway to send messages
final ActorGateway testActorGateway = new AkkaActorGateway(getTestActor(), null);
// Submit the job and wait for all vertices to be running
jobManagerGateway.tell(new SubmitJob(jobGraph, ListeningBehaviour.EXECUTION_RESULT), testActorGateway);
expectMsgClass(JobSubmitSuccess.class);
jobManagerGateway.tell(new WaitForAllVerticesToBeRunningOrFinished(jid), testActorGateway);
expectMsgClass(AllVerticesRunning.class);
Future<Object> egFuture = jobManagerGateway.ask(new RequestExecutionGraph(jobGraph.getJobID()), remaining());
ExecutionGraphFound egFound = (ExecutionGraphFound) Await.result(egFuture, remaining());
ExecutionGraph eg = (ExecutionGraph) egFound.executionGraph();
ExecutionVertex vertex = eg.getJobVertex(sender.getID()).getTaskVertices()[0];
while (vertex.getExecutionState() != ExecutionState.FINISHED) {
Thread.sleep(1);
}
IntermediateResultPartition partition = vertex.getProducedPartitions().values().iterator().next();
ResultPartitionID partitionId = new ResultPartitionID(partition.getPartitionId(), vertex.getCurrentExecutionAttempt().getAttemptId());
// Producer finished, request state
Object request = new RequestPartitionProducerState(jid, rid, partitionId);
Future<ExecutionState> producerStateFuture = jobManagerGateway.ask(request, getRemainingTime()).mapTo(ClassTag$.MODULE$.<ExecutionState>apply(ExecutionState.class));
assertEquals(ExecutionState.FINISHED, Await.result(producerStateFuture, getRemainingTime()));
} catch (Exception e) {
e.printStackTrace();
fail(e.getMessage());
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
};
}
};
}
Aggregations