use of org.apache.flink.runtime.jobgraph.OperatorID in project flink by apache.
the class OperatorChainTest method setupOperatorChain.
// ------------------------------------------------------------------------
// Operator Chain Setup Utils
// ------------------------------------------------------------------------
@SafeVarargs
public static <T, OP extends StreamOperator<T>> OperatorChain<T, OP> setupOperatorChain(OneInputStreamOperator<T, T>... operators) throws Exception {
checkNotNull(operators);
checkArgument(operators.length > 0);
try (MockEnvironment env = MockEnvironment.builder().build()) {
final StreamTask<?, ?> containingTask = new MockStreamTaskBuilder(env).build();
final StreamConfig cfg = new StreamConfig(new Configuration());
cfg.setOperatorID(new OperatorID());
cfg.setStateKeySerializer(new StringSerializer());
final List<StreamOperatorWrapper<?, ?>> operatorWrappers = new ArrayList<>();
// initial output goes to nowhere
@SuppressWarnings({ "unchecked", "rawtypes" }) WatermarkGaugeExposingOutput<StreamRecord<T>> lastWriter = new BroadcastingOutputCollector<>(new Output[0]);
// build the reverse operators array
for (int i = 0; i < operators.length; i++) {
int operatorIndex = operators.length - i - 1;
OneInputStreamOperator<T, T> op = operators[operatorIndex];
if (op instanceof SetupableStreamOperator) {
((SetupableStreamOperator) op).setup(containingTask, cfg, lastWriter);
}
lastWriter = new ChainingOutput<>(op, null);
ProcessingTimeService processingTimeService = null;
if (op instanceof AbstractStreamOperator) {
processingTimeService = ((AbstractStreamOperator) op).getProcessingTimeService();
}
operatorWrappers.add(new StreamOperatorWrapper<>(op, Optional.ofNullable(processingTimeService), containingTask.getMailboxExecutorFactory().createExecutor(i), operatorIndex == 0));
}
@SuppressWarnings("unchecked") final StreamOperatorWrapper<T, OP> headOperatorWrapper = (StreamOperatorWrapper<T, OP>) operatorWrappers.get(operatorWrappers.size() - 1);
return new RegularOperatorChain<>(operatorWrappers, new RecordWriterOutput<?>[0], lastWriter, headOperatorWrapper);
}
}
use of org.apache.flink.runtime.jobgraph.OperatorID in project flink by apache.
the class AsyncCheckpointRunnableTest method testDeclineAsyncCheckpoint.
@Test
public void testDeclineAsyncCheckpoint() {
CheckpointFailureReason originalReason = CheckpointFailureReason.CHECKPOINT_DECLINED_INPUT_END_OF_STREAM;
final Map<OperatorID, OperatorSnapshotFutures> snapshotsInProgress = new HashMap<>();
snapshotsInProgress.put(new OperatorID(), new OperatorSnapshotFutures(DoneFuture.of(SnapshotResult.empty()), DoneFuture.of(SnapshotResult.empty()), DoneFuture.of(SnapshotResult.empty()), DoneFuture.of(SnapshotResult.empty()), ExceptionallyDoneFuture.of(new CheckpointException(originalReason)), DoneFuture.of(SnapshotResult.empty())));
final TestEnvironment environment = new TestEnvironment();
final AsyncCheckpointRunnable runnable = createAsyncRunnable(snapshotsInProgress, environment, false, true);
runnable.run();
Assert.assertSame(environment.getCause().getCheckpointFailureReason(), originalReason);
}
use of org.apache.flink.runtime.jobgraph.OperatorID in project flink by apache.
the class StreamTaskCancellationBarrierTest method testDeclineCallOnCancelBarrierTwoInputs.
/**
* This test verifies (for two input tasks) that the Stream tasks react the following way to
* receiving a checkpoint cancellation barrier: - send a "decline checkpoint" notification out
* (to the JobManager) - emit a cancellation barrier downstream.
*/
@Test
public void testDeclineCallOnCancelBarrierTwoInputs() throws Exception {
TwoInputStreamTaskTestHarness<String, String, String> testHarness = new TwoInputStreamTaskTestHarness<>(TwoInputStreamTask::new, BasicTypeInfo.STRING_TYPE_INFO, BasicTypeInfo.STRING_TYPE_INFO, BasicTypeInfo.STRING_TYPE_INFO);
testHarness.setupOutputForSingletonOperatorChain();
StreamConfig streamConfig = testHarness.getStreamConfig();
CoStreamMap<String, String, String> op = new CoStreamMap<>(new UnionCoMap());
streamConfig.setStreamOperator(op);
streamConfig.setOperatorID(new OperatorID());
StreamMockEnvironment environment = spy(testHarness.createEnvironment());
// start the task
testHarness.invoke(environment);
testHarness.waitForTaskRunning();
// emit cancellation barriers
testHarness.processEvent(new CancelCheckpointMarker(2L), 0, 0);
testHarness.processEvent(new CancelCheckpointMarker(2L), 1, 0);
testHarness.waitForInputProcessing();
// the decline call should go to the coordinator
verify(environment, times(1)).declineCheckpoint(eq(2L), argThat(new AlignedCheckpointsTest.CheckpointExceptionMatcher(CheckpointFailureReason.CHECKPOINT_DECLINED_ON_CANCELLATION_BARRIER)));
// a cancellation barrier should be downstream
Object result = testHarness.getOutput().poll();
assertNotNull("nothing emitted", result);
assertTrue("wrong type emitted", result instanceof CancelCheckpointMarker);
assertEquals("wrong checkpoint id", 2L, ((CancelCheckpointMarker) result).getCheckpointId());
// cancel and shutdown
testHarness.endInput();
testHarness.waitForTaskCompletion();
}
use of org.apache.flink.runtime.jobgraph.OperatorID in project flink by apache.
the class RandomSortMergeInnerJoinTest method join.
public static LinkedBlockingQueue<Object> join(StreamOperator operator, MutableObjectIterator<Tuple2<Integer, String>> input1, MutableObjectIterator<Tuple2<Integer, String>> input2, boolean input1First) throws Exception {
InternalTypeInfo<RowData> typeInfo = InternalTypeInfo.ofFields(new IntType(), VarCharType.STRING_TYPE);
InternalTypeInfo<RowData> joinedInfo = InternalTypeInfo.ofFields(new IntType(), VarCharType.STRING_TYPE, new IntType(), VarCharType.STRING_TYPE);
final TwoInputStreamTaskTestHarness<BinaryRowData, BinaryRowData, JoinedRowData> testHarness = new TwoInputStreamTaskTestHarness<>(TwoInputStreamTask::new, 2, 1, new int[] { 1, 2 }, typeInfo, (TypeInformation) typeInfo, joinedInfo);
// Deep pit!!! Cause in TwoInputStreamTaskTestHarness, one record one buffer.
testHarness.bufferSize = 10 * 1024;
testHarness.getExecutionConfig().enableObjectReuse();
testHarness.memorySize = 36 * 1024 * 1024;
testHarness.setupOutputForSingletonOperatorChain();
testHarness.getStreamConfig().setStreamOperator(operator);
testHarness.getStreamConfig().setOperatorID(new OperatorID());
testHarness.getStreamConfig().setManagedMemoryFractionOperatorOfUseCase(ManagedMemoryUseCase.OPERATOR, 0.99);
long initialTime = 0L;
testHarness.invoke();
testHarness.waitForTaskRunning();
if (input1First) {
Tuple2<Integer, String> tuple2 = new Tuple2<>();
while ((tuple2 = input1.next(tuple2)) != null) {
testHarness.processElement(new StreamRecord<>(newRow(tuple2.f0, tuple2.f1), initialTime), 0, 0);
}
testHarness.waitForInputProcessing();
tuple2 = new Tuple2<>();
while ((tuple2 = input2.next(tuple2)) != null) {
testHarness.processElement(new StreamRecord<>(newRow(tuple2.f0, tuple2.f1), initialTime), 1, 0);
}
testHarness.waitForInputProcessing();
} else {
Tuple2<Integer, String> tuple2 = new Tuple2<>();
while ((tuple2 = input2.next(tuple2)) != null) {
testHarness.processElement(new StreamRecord<>(newRow(tuple2.f0, tuple2.f1), initialTime), 1, 0);
}
testHarness.waitForInputProcessing();
tuple2 = new Tuple2<>();
while ((tuple2 = input1.next(tuple2)) != null) {
testHarness.processElement(new StreamRecord<>(newRow(tuple2.f0, tuple2.f1), initialTime), 0, 0);
}
testHarness.waitForInputProcessing();
}
testHarness.endInput();
testHarness.waitForTaskCompletion();
return testHarness.getOutput();
}
use of org.apache.flink.runtime.jobgraph.OperatorID in project flink by apache.
the class Int2HashJoinOperatorTest method joinAndAssert.
@SuppressWarnings("unchecked")
static void joinAndAssert(Object operator, MutableObjectIterator<BinaryRowData> input1, MutableObjectIterator<BinaryRowData> input2, int expectOutSize, int expectOutKeySize, int expectOutVal, boolean semiJoin) throws Exception {
InternalTypeInfo<RowData> typeInfo = InternalTypeInfo.ofFields(new IntType(), new IntType());
InternalTypeInfo<RowData> rowDataTypeInfo = InternalTypeInfo.ofFields(new IntType(), new IntType(), new IntType(), new IntType());
TwoInputStreamTaskTestHarness<BinaryRowData, BinaryRowData, JoinedRowData> testHarness = new TwoInputStreamTaskTestHarness<>(TwoInputStreamTask::new, 2, 1, new int[] { 1, 2 }, typeInfo, (TypeInformation) typeInfo, rowDataTypeInfo);
testHarness.memorySize = 36 * 1024 * 1024;
testHarness.getExecutionConfig().enableObjectReuse();
testHarness.setupOutputForSingletonOperatorChain();
if (operator instanceof StreamOperator) {
testHarness.getStreamConfig().setStreamOperator((StreamOperator<?>) operator);
} else {
testHarness.getStreamConfig().setStreamOperatorFactory((StreamOperatorFactory<?>) operator);
}
testHarness.getStreamConfig().setOperatorID(new OperatorID());
testHarness.getStreamConfig().setManagedMemoryFractionOperatorOfUseCase(ManagedMemoryUseCase.OPERATOR, 0.99);
testHarness.invoke();
testHarness.waitForTaskRunning();
Random random = new Random();
do {
BinaryRowData row1 = null;
BinaryRowData row2 = null;
if (random.nextInt(2) == 0) {
row1 = input1.next();
if (row1 == null) {
row2 = input2.next();
}
} else {
row2 = input2.next();
if (row2 == null) {
row1 = input1.next();
}
}
if (row1 == null && row2 == null) {
break;
}
if (row1 != null) {
testHarness.processElement(new StreamRecord<>(row1), 0, 0);
} else {
testHarness.processElement(new StreamRecord<>(row2), 1, 0);
}
} while (true);
testHarness.endInput(0, 0);
testHarness.endInput(1, 0);
testHarness.waitForInputProcessing();
testHarness.waitForTaskCompletion();
Queue<Object> actual = testHarness.getOutput();
Assert.assertEquals("Output was not correct.", expectOutSize, actual.size());
// Don't verify the output value when experOutVal is -1
if (expectOutVal != -1) {
if (semiJoin) {
HashMap<Integer, Long> map = new HashMap<>(expectOutKeySize);
for (Object o : actual) {
StreamRecord<RowData> record = (StreamRecord<RowData>) o;
RowData row = record.getValue();
int key = row.getInt(0);
int val = row.getInt(1);
Long contained = map.get(key);
if (contained == null) {
contained = (long) val;
} else {
contained = valueOf(contained + val);
}
map.put(key, contained);
}
Assert.assertEquals("Wrong number of keys", expectOutKeySize, map.size());
for (Map.Entry<Integer, Long> entry : map.entrySet()) {
long val = entry.getValue();
int key = entry.getKey();
Assert.assertEquals("Wrong number of values in per-key cross product for key " + key, expectOutVal, val);
}
} else {
// create the map for validating the results
HashMap<Integer, Long> map = new HashMap<>(expectOutKeySize);
for (Object o : actual) {
StreamRecord<RowData> record = (StreamRecord<RowData>) o;
RowData row = record.getValue();
int key = row.isNullAt(0) ? row.getInt(2) : row.getInt(0);
int val1 = 0;
int val2 = 0;
if (!row.isNullAt(1)) {
val1 = row.getInt(1);
}
if (!row.isNullAt(3)) {
val2 = row.getInt(3);
}
int val = val1 + val2;
Long contained = map.get(key);
if (contained == null) {
contained = (long) val;
} else {
contained = valueOf(contained + val);
}
map.put(key, contained);
}
Assert.assertEquals("Wrong number of keys", expectOutKeySize, map.size());
for (Map.Entry<Integer, Long> entry : map.entrySet()) {
long val = entry.getValue();
int key = entry.getKey();
Assert.assertEquals("Wrong number of values in per-key cross product for key " + key, expectOutVal, val);
}
}
}
}
Aggregations