use of org.apache.flink.runtime.jobgraph.OperatorID in project flink by apache.
the class String2HashJoinOperatorTest method init.
private void init(boolean leftOut, boolean rightOut, boolean buildLeft) throws Exception {
HashJoinType type = HashJoinType.of(buildLeft, leftOut, rightOut);
HashJoinOperator operator = newOperator(33 * 32 * 1024, type, !buildLeft);
testHarness = new TwoInputStreamTaskTestHarness<>(TwoInputStreamTask::new, 2, 2, new int[] { 1, 2 }, typeInfo, (TypeInformation) typeInfo, joinedInfo);
testHarness.memorySize = 36 * 1024 * 1024;
testHarness.getExecutionConfig().enableObjectReuse();
testHarness.setupOutputForSingletonOperatorChain();
testHarness.getStreamConfig().setStreamOperator(operator);
testHarness.getStreamConfig().setOperatorID(new OperatorID());
testHarness.getStreamConfig().setManagedMemoryFractionOperatorOfUseCase(ManagedMemoryUseCase.OPERATOR, 0.99);
testHarness.invoke();
testHarness.waitForTaskRunning();
}
use of org.apache.flink.runtime.jobgraph.OperatorID in project flink by apache.
the class CoordinatorEventsExactlyOnceITCase method buildJobVertex.
// ------------------------------------------------------------------------
// test setup helpers
// ------------------------------------------------------------------------
private static JobVertex buildJobVertex(String name, int numEvents, int delay) throws IOException {
final JobVertex vertex = new JobVertex(name);
final OperatorID opId = OperatorID.fromJobVertexID(vertex.getID());
vertex.setParallelism(1);
vertex.setInvokableClass(EventCollectingTask.class);
vertex.getConfiguration().setString(ACC_NAME, name);
final OperatorCoordinator.Provider provider = new OperatorCoordinator.Provider() {
@Override
public OperatorID getOperatorId() {
return opId;
}
@Override
public OperatorCoordinator create(OperatorCoordinator.Context context) {
return new EventSendingCoordinator(context, name, numEvents, delay);
}
};
vertex.addOperatorCoordinator(new SerializedValue<>(provider));
return vertex;
}
use of org.apache.flink.runtime.jobgraph.OperatorID in project flink by apache.
the class StreamingJobGraphGeneratorTest method testInputOutputFormat.
@Test
public void testInputOutputFormat() {
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
DataStream<Long> source = env.addSource(new InputFormatSourceFunction<>(new TypeSerializerInputFormat<>(TypeInformation.of(Long.class)), TypeInformation.of(Long.class)), TypeInformation.of(Long.class)).name("source");
source.writeUsingOutputFormat(new DiscardingOutputFormat<>()).name("sink1");
source.writeUsingOutputFormat(new DiscardingOutputFormat<>()).name("sink2");
StreamGraph streamGraph = env.getStreamGraph();
JobGraph jobGraph = StreamingJobGraphGenerator.createJobGraph(streamGraph);
assertEquals(1, jobGraph.getNumberOfVertices());
JobVertex jobVertex = jobGraph.getVertices().iterator().next();
assertTrue(jobVertex instanceof InputOutputFormatVertex);
InputOutputFormatContainer formatContainer = new InputOutputFormatContainer(new TaskConfig(jobVertex.getConfiguration()), Thread.currentThread().getContextClassLoader());
Map<OperatorID, UserCodeWrapper<? extends InputFormat<?, ?>>> inputFormats = formatContainer.getInputFormats();
Map<OperatorID, UserCodeWrapper<? extends OutputFormat<?>>> outputFormats = formatContainer.getOutputFormats();
assertEquals(1, inputFormats.size());
assertEquals(2, outputFormats.size());
Map<String, OperatorID> nameToOperatorIds = new HashMap<>();
StreamConfig headConfig = new StreamConfig(jobVertex.getConfiguration());
nameToOperatorIds.put(headConfig.getOperatorName(), headConfig.getOperatorID());
Map<Integer, StreamConfig> chainedConfigs = headConfig.getTransitiveChainedTaskConfigs(Thread.currentThread().getContextClassLoader());
for (StreamConfig config : chainedConfigs.values()) {
nameToOperatorIds.put(config.getOperatorName(), config.getOperatorID());
}
InputFormat<?, ?> sourceFormat = inputFormats.get(nameToOperatorIds.get("Source: source")).getUserCodeObject();
assertTrue(sourceFormat instanceof TypeSerializerInputFormat);
OutputFormat<?> sinkFormat1 = outputFormats.get(nameToOperatorIds.get("Sink: sink1")).getUserCodeObject();
assertTrue(sinkFormat1 instanceof DiscardingOutputFormat);
OutputFormat<?> sinkFormat2 = outputFormats.get(nameToOperatorIds.get("Sink: sink2")).getUserCodeObject();
assertTrue(sinkFormat2 instanceof DiscardingOutputFormat);
}
use of org.apache.flink.runtime.jobgraph.OperatorID in project flink by apache.
the class AbstractUdfStreamOperatorLifecycleTest method testLifeCycleCancel.
@Test
public void testLifeCycleCancel() throws Exception {
ACTUAL_ORDER_TRACKING.clear();
Configuration taskManagerConfig = new Configuration();
StreamConfig cfg = new StreamConfig(new Configuration());
MockSourceFunction srcFun = new MockSourceFunction();
cfg.setStreamOperator(new LifecycleTrackingStreamSource<>(srcFun, false));
cfg.setOperatorID(new OperatorID());
cfg.setTimeCharacteristic(TimeCharacteristic.ProcessingTime);
try (ShuffleEnvironment shuffleEnvironment = new NettyShuffleEnvironmentBuilder().build()) {
Task task = StreamTaskTest.createTask(SourceStreamTask.class, shuffleEnvironment, cfg, taskManagerConfig);
task.startTaskThread();
LifecycleTrackingStreamSource.runStarted.await();
// this should cancel the task even though it is blocked on runFinished
task.cancelExecution();
// wait for clean termination
task.getExecutingThread().join();
assertEquals(ExecutionState.CANCELED, task.getExecutionState());
assertEquals(EXPECTED_CALL_ORDER_CANCEL_RUNNING, ACTUAL_ORDER_TRACKING);
}
}
use of org.apache.flink.runtime.jobgraph.OperatorID in project flink by apache.
the class CheckpointMessagesTest method testConfirmTaskCheckpointed.
@Test
public void testConfirmTaskCheckpointed() {
final Random rnd = new Random();
try {
AcknowledgeCheckpoint noState = new AcknowledgeCheckpoint(new JobID(), new ExecutionAttemptID(), 569345L);
KeyGroupRange keyGroupRange = KeyGroupRange.of(42, 42);
TaskStateSnapshot checkpointStateHandles = new TaskStateSnapshot();
OperatorSubtaskState subtaskState = OperatorSubtaskState.builder().setManagedOperatorState(generatePartitionableStateHandle(new JobVertexID(), 0, 2, 8, false)).setManagedKeyedState(generateKeyGroupState(keyGroupRange, Collections.singletonList(new MyHandle()))).setInputChannelState(singleton(createNewInputChannelStateHandle(10, rnd))).setResultSubpartitionState(singleton(createNewResultSubpartitionStateHandle(10, rnd))).build();
checkpointStateHandles.putSubtaskStateByOperatorID(new OperatorID(), subtaskState);
AcknowledgeCheckpoint withState = new AcknowledgeCheckpoint(new JobID(), new ExecutionAttemptID(), 87658976143L, new CheckpointMetrics(), checkpointStateHandles);
testSerializabilityEqualsHashCode(noState);
testSerializabilityEqualsHashCode(withState);
} catch (Exception e) {
e.printStackTrace();
fail(e.getMessage());
}
}
Aggregations