use of org.apache.flink.api.common.TaskInfo in project flink by apache.
the class CollectionExecutor method executeUnaryOperator.
private <IN, OUT> List<OUT> executeUnaryOperator(SingleInputOperator<?, ?, ?> operator, int superStep) throws Exception {
Operator<?> inputOp = operator.getInput();
if (inputOp == null) {
throw new InvalidProgramException("The unary operation " + operator.getName() + " has no input.");
}
@SuppressWarnings("unchecked") List<IN> inputData = (List<IN>) execute(inputOp, superStep);
@SuppressWarnings("unchecked") SingleInputOperator<IN, OUT, ?> typedOp = (SingleInputOperator<IN, OUT, ?>) operator;
// build the runtime context and compute broadcast variables, if necessary
TaskInfo taskInfo = new TaskInfo(typedOp.getName(), 1, 0, 1, 0);
RuntimeUDFContext ctx;
MetricGroup metrics = new UnregisteredMetricsGroup();
if (RichFunction.class.isAssignableFrom(typedOp.getUserCodeWrapper().getUserCodeClass())) {
ctx = superStep == 0 ? new RuntimeUDFContext(taskInfo, classLoader, executionConfig, cachedFiles, accumulators, metrics) : new IterationRuntimeUDFContext(taskInfo, classLoader, executionConfig, cachedFiles, accumulators, metrics);
for (Map.Entry<String, Operator<?>> bcInputs : operator.getBroadcastInputs().entrySet()) {
List<?> bcData = execute(bcInputs.getValue());
ctx.setBroadcastVariable(bcInputs.getKey(), bcData);
}
} else {
ctx = null;
}
return typedOp.executeOnCollections(inputData, ctx, executionConfig);
}
use of org.apache.flink.api.common.TaskInfo in project flink by apache.
the class TaskStopTest method doMocking.
public void doMocking(AbstractInvokable taskMock) throws Exception {
TaskInfo taskInfoMock = mock(TaskInfo.class);
when(taskInfoMock.getTaskNameWithSubtasks()).thenReturn("dummyName");
TaskManagerRuntimeInfo tmRuntimeInfo = mock(TaskManagerRuntimeInfo.class);
when(tmRuntimeInfo.getConfiguration()).thenReturn(new Configuration());
task = new Task(mock(JobInformation.class), new TaskInformation(new JobVertexID(), "test task name", 1, 1, "foobar", new Configuration()), mock(ExecutionAttemptID.class), mock(AllocationID.class), 0, 0, Collections.<ResultPartitionDeploymentDescriptor>emptyList(), Collections.<InputGateDeploymentDescriptor>emptyList(), 0, mock(TaskStateHandles.class), mock(MemoryManager.class), mock(IOManager.class), mock(NetworkEnvironment.class), mock(BroadcastVariableManager.class), mock(TaskManagerActions.class), mock(InputSplitProvider.class), mock(CheckpointResponder.class), mock(LibraryCacheManager.class), mock(FileCache.class), tmRuntimeInfo, mock(TaskMetricGroup.class), mock(ResultPartitionConsumableNotifier.class), mock(PartitionProducerStateChecker.class), mock(Executor.class));
Field f = task.getClass().getDeclaredField("invokable");
f.setAccessible(true);
f.set(task, taskMock);
Field f2 = task.getClass().getDeclaredField("executionState");
f2.setAccessible(true);
f2.set(task, ExecutionState.RUNNING);
}
use of org.apache.flink.api.common.TaskInfo in project flink by apache.
the class AsyncWaitOperatorTest method testTimeoutCleanup.
/**
* FLINK-5652
* Tests that registered timers are properly canceled upon completion of a
* {@link StreamRecordQueueEntry} in order to avoid resource leaks because TriggerTasks hold
* a reference on the StreamRecordQueueEntry.
*/
@Test
public void testTimeoutCleanup() throws Exception {
final Object lock = new Object();
final long timeout = 100000L;
final long timestamp = 1L;
Environment environment = mock(Environment.class);
when(environment.getMetricGroup()).thenReturn(new UnregisteredTaskMetricsGroup());
when(environment.getTaskManagerInfo()).thenReturn(new TestingTaskManagerRuntimeInfo());
when(environment.getUserClassLoader()).thenReturn(getClass().getClassLoader());
when(environment.getTaskInfo()).thenReturn(new TaskInfo("testTask", 1, 0, 1, 0));
ScheduledFuture<?> scheduledFuture = mock(ScheduledFuture.class);
ProcessingTimeService processingTimeService = mock(ProcessingTimeService.class);
when(processingTimeService.getCurrentProcessingTime()).thenReturn(timestamp);
doReturn(scheduledFuture).when(processingTimeService).registerTimer(anyLong(), any(ProcessingTimeCallback.class));
StreamTask<?, ?> containingTask = mock(StreamTask.class);
when(containingTask.getEnvironment()).thenReturn(environment);
when(containingTask.getCheckpointLock()).thenReturn(lock);
when(containingTask.getProcessingTimeService()).thenReturn(processingTimeService);
StreamConfig streamConfig = mock(StreamConfig.class);
doReturn(IntSerializer.INSTANCE).when(streamConfig).getTypeSerializerIn1(any(ClassLoader.class));
Output<StreamRecord<Integer>> output = mock(Output.class);
AsyncWaitOperator<Integer, Integer> operator = new AsyncWaitOperator<>(new AsyncFunction<Integer, Integer>() {
private static final long serialVersionUID = -3718276118074877073L;
@Override
public void asyncInvoke(Integer input, AsyncCollector<Integer> collector) throws Exception {
collector.collect(Collections.singletonList(input));
}
}, timeout, 1, AsyncDataStream.OutputMode.UNORDERED);
operator.setup(containingTask, streamConfig, output);
operator.open();
final StreamRecord<Integer> streamRecord = new StreamRecord<>(42, timestamp);
synchronized (lock) {
// processing an element will register a timeout
operator.processElement(streamRecord);
}
synchronized (lock) {
// closing the operator waits until all inputs have been processed
operator.close();
}
// check that we actually outputted the result of the single input
verify(output).collect(eq(streamRecord));
verify(processingTimeService).registerTimer(eq(processingTimeService.getCurrentProcessingTime() + timeout), any(ProcessingTimeCallback.class));
// check that we have cancelled our registered timeout
verify(scheduledFuture).cancel(eq(true));
}
use of org.apache.flink.api.common.TaskInfo in project flink by apache.
the class AsyncWaitOperatorTest method testAsyncTimeout.
@Test
public void testAsyncTimeout() throws Exception {
final long timeout = 10L;
final AsyncWaitOperator<Integer, Integer> operator = new AsyncWaitOperator<>(new LazyAsyncFunction(), timeout, 2, AsyncDataStream.OutputMode.ORDERED);
final Environment mockEnvironment = mock(Environment.class);
final Configuration taskConfiguration = new Configuration();
final ExecutionConfig executionConfig = new ExecutionConfig();
final TaskMetricGroup metricGroup = new UnregisteredTaskMetricsGroup();
final TaskManagerRuntimeInfo taskManagerRuntimeInfo = new TestingTaskManagerRuntimeInfo();
final TaskInfo taskInfo = new TaskInfo("foobarTask", 1, 0, 1, 1);
when(mockEnvironment.getTaskConfiguration()).thenReturn(taskConfiguration);
when(mockEnvironment.getExecutionConfig()).thenReturn(executionConfig);
when(mockEnvironment.getMetricGroup()).thenReturn(metricGroup);
when(mockEnvironment.getTaskManagerInfo()).thenReturn(taskManagerRuntimeInfo);
when(mockEnvironment.getTaskInfo()).thenReturn(taskInfo);
when(mockEnvironment.getUserClassLoader()).thenReturn(AsyncWaitOperatorTest.class.getClassLoader());
final OneInputStreamOperatorTestHarness<Integer, Integer> testHarness = new OneInputStreamOperatorTestHarness<>(operator, IntSerializer.INSTANCE, mockEnvironment);
final long initialTime = 0L;
final ConcurrentLinkedQueue<Object> expectedOutput = new ConcurrentLinkedQueue<>();
testHarness.open();
testHarness.setProcessingTime(initialTime);
synchronized (testHarness.getCheckpointLock()) {
testHarness.processElement(new StreamRecord<>(1, initialTime));
testHarness.setProcessingTime(initialTime + 5L);
testHarness.processElement(new StreamRecord<>(2, initialTime + 5L));
}
// trigger the timeout of the first stream record
testHarness.setProcessingTime(initialTime + timeout + 1L);
// allow the second async stream record to be processed
LazyAsyncFunction.countDown();
// wait until all async collectors in the buffer have been emitted out.
synchronized (testHarness.getCheckpointLock()) {
testHarness.close();
}
expectedOutput.add(new StreamRecord<>(2, initialTime + 5L));
TestHarnessUtil.assertOutputEquals("Output with watermark was not correct.", expectedOutput, testHarness.getOutput());
ArgumentCaptor<Throwable> argumentCaptor = ArgumentCaptor.forClass(Throwable.class);
verify(mockEnvironment).failExternally(argumentCaptor.capture());
Throwable failureCause = argumentCaptor.getValue();
Assert.assertNotNull(failureCause.getCause());
Assert.assertTrue(failureCause.getCause() instanceof ExecutionException);
Assert.assertNotNull(failureCause.getCause().getCause());
Assert.assertTrue(failureCause.getCause().getCause() instanceof TimeoutException);
}
use of org.apache.flink.api.common.TaskInfo in project flink by apache.
the class StreamingRuntimeContextTest method testFoldingStateInstantiation.
@Test
public void testFoldingStateInstantiation() throws Exception {
final ExecutionConfig config = new ExecutionConfig();
config.registerKryoType(Path.class);
final AtomicReference<Object> descriptorCapture = new AtomicReference<>();
StreamingRuntimeContext context = new StreamingRuntimeContext(createDescriptorCapturingMockOp(descriptorCapture, config), createMockEnvironment(), Collections.<String, Accumulator<?, ?>>emptyMap());
@SuppressWarnings("unchecked") FoldFunction<String, TaskInfo> folder = (FoldFunction<String, TaskInfo>) mock(FoldFunction.class);
FoldingStateDescriptor<String, TaskInfo> descr = new FoldingStateDescriptor<>("name", null, folder, TaskInfo.class);
context.getFoldingState(descr);
FoldingStateDescriptor<?, ?> descrIntercepted = (FoldingStateDescriptor<?, ?>) descriptorCapture.get();
TypeSerializer<?> serializer = descrIntercepted.getSerializer();
// check that the Path class is really registered, i.e., the execution config was applied
assertTrue(serializer instanceof KryoSerializer);
assertTrue(((KryoSerializer<?>) serializer).getKryo().getRegistration(Path.class).getId() > 0);
}
Aggregations