use of org.apache.flink.runtime.executiongraph.Execution in project flink by apache.
the class DeployingDownstreamTasksInBatchJobBenchmark method setup.
@Override
public void setup(JobConfiguration jobConfiguration) throws Exception {
super.setup(jobConfiguration);
final JobVertex source = jobVertices.get(0);
for (ExecutionVertex ev : executionGraph.getJobVertex(source.getID()).getTaskVertices()) {
Execution execution = ev.getCurrentExecutionAttempt();
execution.transitionState(ExecutionState.SCHEDULED);
execution.deploy();
}
final JobVertex sink = jobVertices.get(1);
vertices = executionGraph.getJobVertex(sink.getID()).getTaskVertices();
}
use of org.apache.flink.runtime.executiongraph.Execution in project flink by apache.
the class FailureHandlingResultSnapshotTest method testFailureHandlingWithRootCauseExecutionBeingPartOfConcurrentlyFailedExecutions.
@Test(expected = IllegalArgumentException.class)
public void testFailureHandlingWithRootCauseExecutionBeingPartOfConcurrentlyFailedExecutions() {
final Execution rootCauseExecution = extractExecutionVertex(0).getCurrentExecutionAttempt();
new FailureHandlingResultSnapshot(rootCauseExecution, new RuntimeException("Expected exception"), System.currentTimeMillis(), Collections.singleton(rootCauseExecution));
}
use of org.apache.flink.runtime.executiongraph.Execution in project flink by apache.
the class DeployingTasksInStreamingJobBenchmark method deployAllTasks.
public void deployAllTasks() throws Exception {
for (ExecutionJobVertex ejv : executionGraph.getVerticesTopologically()) {
for (ExecutionVertex ev : ejv.getTaskVertices()) {
Execution execution = ev.getCurrentExecutionAttempt();
execution.transitionState(ExecutionState.SCHEDULED);
execution.deploy();
}
}
}
use of org.apache.flink.runtime.executiongraph.Execution in project flink by apache.
the class DeployingTasksBenchmarkBase method setup.
public void setup(JobConfiguration jobConfiguration) throws Exception {
super.setup();
jobVertices = createDefaultJobVertices(jobConfiguration);
executionGraph = createAndInitExecutionGraph(jobVertices, jobConfiguration, scheduledExecutorService);
final TestingLogicalSlotBuilder slotBuilder = new TestingLogicalSlotBuilder();
for (ExecutionJobVertex ejv : executionGraph.getVerticesTopologically()) {
for (ExecutionVertex ev : ejv.getTaskVertices()) {
final LogicalSlot slot = slotBuilder.createTestingLogicalSlot();
final Execution execution = ev.getCurrentExecutionAttempt();
execution.registerProducedPartitions(slot.getTaskManagerLocation(), true).get();
if (!execution.tryAssignResource(slot)) {
throw new RuntimeException("Error when assigning slot to execution.");
}
}
}
}
use of org.apache.flink.runtime.executiongraph.Execution in project flink by apache.
the class StackTraceSampleCoordinator method triggerStackTraceSample.
/**
* Triggers a stack trace sample to all tasks.
*
* @param tasksToSample Tasks to sample.
* @param numSamples Number of stack trace samples to collect.
* @param delayBetweenSamples Delay between consecutive samples.
* @param maxStackTraceDepth Maximum depth of the stack trace. 0 indicates
* no maximum and keeps the complete stack trace.
* @return A future of the completed stack trace sample
*/
@SuppressWarnings("unchecked")
public Future<StackTraceSample> triggerStackTraceSample(ExecutionVertex[] tasksToSample, int numSamples, Time delayBetweenSamples, int maxStackTraceDepth) {
checkNotNull(tasksToSample, "Tasks to sample");
checkArgument(tasksToSample.length >= 1, "No tasks to sample");
checkArgument(numSamples >= 1, "No number of samples");
checkArgument(maxStackTraceDepth >= 0, "Negative maximum stack trace depth");
// Execution IDs of running tasks
ExecutionAttemptID[] triggerIds = new ExecutionAttemptID[tasksToSample.length];
Execution[] executions = new Execution[tasksToSample.length];
// triggering can still fail.
for (int i = 0; i < triggerIds.length; i++) {
Execution execution = tasksToSample[i].getCurrentExecutionAttempt();
if (execution != null && execution.getState() == ExecutionState.RUNNING) {
executions[i] = execution;
triggerIds[i] = execution.getAttemptId();
} else {
return FlinkCompletableFuture.completedExceptionally(new IllegalStateException("Task " + tasksToSample[i].getTaskNameWithSubtaskIndex() + " is not running."));
}
}
synchronized (lock) {
if (isShutDown) {
return FlinkCompletableFuture.completedExceptionally(new IllegalStateException("Shut down"));
}
final int sampleId = sampleIdCounter++;
LOG.debug("Triggering stack trace sample {}", sampleId);
final PendingStackTraceSample pending = new PendingStackTraceSample(sampleId, triggerIds);
// Discard the sample if it takes too long. We don't send cancel
// messages to the task managers, but only wait for the responses
// and then ignore them.
long expectedDuration = numSamples * delayBetweenSamples.toMilliseconds();
Time timeout = Time.milliseconds(expectedDuration + sampleTimeout);
// Add the pending sample before scheduling the discard task to
// prevent races with removing it again.
pendingSamples.put(sampleId, pending);
// Trigger all samples
for (Execution execution : executions) {
final Future<StackTraceSampleResponse> stackTraceSampleFuture = execution.requestStackTraceSample(sampleId, numSamples, delayBetweenSamples, maxStackTraceDepth, timeout);
stackTraceSampleFuture.handleAsync(new BiFunction<StackTraceSampleResponse, Throwable, Void>() {
@Override
public Void apply(StackTraceSampleResponse stackTraceSampleResponse, Throwable throwable) {
if (stackTraceSampleResponse != null) {
collectStackTraces(stackTraceSampleResponse.getSampleId(), stackTraceSampleResponse.getExecutionAttemptID(), stackTraceSampleResponse.getSamples());
} else {
cancelStackTraceSample(sampleId, throwable);
}
return null;
}
}, executor);
}
return pending.getStackTraceSampleFuture();
}
}
Aggregations