use of org.apache.flink.streaming.api.graph.StreamConfig in project flink by apache.
the class ForwardForUnspecifiedPartitionerTest method testConvertToForwardPartitioner.
@Test
public void testConvertToForwardPartitioner() {
JobGraph jobGraph = StreamPartitionerTestUtils.createJobGraph("group1", "group1", new ForwardForUnspecifiedPartitioner<>());
List<JobVertex> jobVertices = jobGraph.getVerticesSortedTopologicallyFromSources();
assertThat(jobVertices.size(), is(1));
JobVertex vertex = jobGraph.getVerticesSortedTopologicallyFromSources().get(0);
StreamConfig sourceConfig = new StreamConfig(vertex.getConfiguration());
StreamEdge edge = sourceConfig.getChainedOutputs(getClass().getClassLoader()).get(0);
assertThat(edge.getPartitioner(), instanceOf(ForwardPartitioner.class));
}
use of org.apache.flink.streaming.api.graph.StreamConfig in project flink by apache.
the class ForwardForUnspecifiedPartitionerTest method testConvertToRescalePartitioner.
@Test
public void testConvertToRescalePartitioner() {
JobGraph jobGraph = StreamPartitionerTestUtils.createJobGraph("group1", "group2", new ForwardForUnspecifiedPartitioner<>());
List<JobVertex> jobVertices = jobGraph.getVerticesSortedTopologicallyFromSources();
assertThat(jobVertices.size(), is(2));
JobVertex sourceVertex = jobGraph.getVerticesSortedTopologicallyFromSources().get(0);
StreamConfig sourceConfig = new StreamConfig(sourceVertex.getConfiguration());
StreamEdge edge = sourceConfig.getNonChainedOutputs(getClass().getClassLoader()).get(0);
assertThat(edge.getPartitioner(), instanceOf(RescalePartitioner.class));
}
use of org.apache.flink.streaming.api.graph.StreamConfig in project flink by apache.
the class OneInputStreamTaskTest method testOpenCloseAndTimestamps.
/**
* This test verifies that open() and close() are correctly called. This test also verifies that
* timestamps of emitted elements are correct. {@link StreamMap} assigns the input timestamp to
* emitted elements.
*/
@Test
public void testOpenCloseAndTimestamps() throws Exception {
final OneInputStreamTaskTestHarness<String, String> testHarness = new OneInputStreamTaskTestHarness<>(OneInputStreamTask::new, BasicTypeInfo.STRING_TYPE_INFO, BasicTypeInfo.STRING_TYPE_INFO);
testHarness.setupOutputForSingletonOperatorChain();
StreamConfig streamConfig = testHarness.getStreamConfig();
StreamMap<String, String> mapOperator = new StreamMap<>(new TestOpenCloseMapFunction());
streamConfig.setStreamOperator(mapOperator);
streamConfig.setOperatorID(new OperatorID());
long initialTime = 0L;
ConcurrentLinkedQueue<Object> expectedOutput = new ConcurrentLinkedQueue<>();
testHarness.invoke();
testHarness.waitForTaskRunning();
testHarness.processElement(new StreamRecord<>("Hello", initialTime + 1));
testHarness.processElement(new StreamRecord<>("Ciao", initialTime + 2));
expectedOutput.add(new StreamRecord<>("Hello", initialTime + 1));
expectedOutput.add(new StreamRecord<>("Ciao", initialTime + 2));
testHarness.waitForInputProcessing();
testHarness.endInput();
testHarness.waitForTaskCompletion();
assertTrue("RichFunction methods where not called.", TestOpenCloseMapFunction.closeCalled);
TestHarnessUtil.assertOutputEquals("Output was not correct.", expectedOutput, testHarness.getOutput());
}
use of org.apache.flink.streaming.api.graph.StreamConfig in project flink by apache.
the class InterruptSensitiveRestoreTest method testRestoreWithInterrupt.
private void testRestoreWithInterrupt(int mode) throws Exception {
IN_RESTORE_LATCH.reset();
Configuration taskConfig = new Configuration();
StreamConfig cfg = new StreamConfig(taskConfig);
cfg.setTimeCharacteristic(TimeCharacteristic.ProcessingTime);
switch(mode) {
case OPERATOR_MANAGED:
case OPERATOR_RAW:
case KEYED_MANAGED:
case KEYED_RAW:
cfg.setStateKeySerializer(IntSerializer.INSTANCE);
cfg.setStreamOperator(new StreamSource<>(new TestSource(mode)));
break;
default:
throw new IllegalArgumentException();
}
StreamStateHandle lockingHandle = new InterruptLockingStateHandle();
Task task = createTask(cfg, taskConfig, lockingHandle, mode);
// start the task and wait until it is in "restore"
task.startTaskThread();
IN_RESTORE_LATCH.await();
// trigger cancellation and signal to continue
task.cancelExecution();
task.getExecutingThread().join(30000);
if (task.getExecutionState() == ExecutionState.CANCELING) {
fail("Task is stuck and not canceling");
}
assertEquals(ExecutionState.CANCELED, task.getExecutionState());
assertNull(task.getFailureCause());
}
use of org.apache.flink.streaming.api.graph.StreamConfig in project flink by apache.
the class RocksDBAsyncSnapshotTest method testCancelFullyAsyncCheckpoints.
/**
* This tests ensures that canceling of asynchronous snapshots works as expected and does not
* block.
*/
@Test
public void testCancelFullyAsyncCheckpoints() throws Exception {
final OneInputStreamTaskTestHarness<String, String> testHarness = new OneInputStreamTaskTestHarness<>(OneInputStreamTask::new, BasicTypeInfo.STRING_TYPE_INFO, BasicTypeInfo.STRING_TYPE_INFO);
testHarness.setupOutputForSingletonOperatorChain();
testHarness.configureForKeyedStream(value -> value, BasicTypeInfo.STRING_TYPE_INFO);
StreamConfig streamConfig = testHarness.getStreamConfig();
File dbDir = temporaryFolder.newFolder();
final EmbeddedRocksDBStateBackend.PriorityQueueStateType timerServicePriorityQueueType = RocksDBOptions.TIMER_SERVICE_FACTORY.defaultValue();
final int skipStreams;
if (timerServicePriorityQueueType == EmbeddedRocksDBStateBackend.PriorityQueueStateType.HEAP) {
// we skip the first created stream, because it is used to checkpoint the timer service,
// which is
// currently not asynchronous.
skipStreams = 1;
} else if (timerServicePriorityQueueType == EmbeddedRocksDBStateBackend.PriorityQueueStateType.ROCKSDB) {
skipStreams = 0;
} else {
throw new AssertionError(String.format("Unknown timer service priority queue type %s.", timerServicePriorityQueueType));
}
// this is the proper instance that we need to call.
BlockerCheckpointStreamFactory blockerCheckpointStreamFactory = new BlockerCheckpointStreamFactory(4 * 1024 * 1024) {
int count = skipStreams;
@Override
public CheckpointStateOutputStream createCheckpointStateOutputStream(CheckpointedStateScope scope) throws IOException {
if (count > 0) {
--count;
return new BlockingCheckpointOutputStream(new MemCheckpointStreamFactory.MemoryCheckpointOutputStream(maxSize), null, null, Integer.MAX_VALUE);
} else {
return super.createCheckpointStateOutputStream(scope);
}
}
};
// to avoid serialization of the above factory instance, we need to pass it in
// through a static variable
StateBackend stateBackend = new BackendForTestStream(new StaticForwardFactory(blockerCheckpointStreamFactory));
RocksDBStateBackend backend = new RocksDBStateBackend(stateBackend);
backend.setDbStoragePath(dbDir.getAbsolutePath());
streamConfig.setStateBackend(backend);
streamConfig.setStreamOperator(new AsyncCheckpointOperator());
streamConfig.setOperatorID(new OperatorID());
TestTaskStateManager taskStateManagerTestMock = new TestTaskStateManager();
StreamMockEnvironment mockEnv = new StreamMockEnvironment(testHarness.jobConfig, testHarness.taskConfig, testHarness.memorySize, new MockInputSplitProvider(), testHarness.bufferSize, taskStateManagerTestMock);
blockerCheckpointStreamFactory.setBlockerLatch(new OneShotLatch());
blockerCheckpointStreamFactory.setWaiterLatch(new OneShotLatch());
testHarness.invoke(mockEnv);
testHarness.waitForTaskRunning();
final OneInputStreamTask<String, String> task = testHarness.getTask();
task.triggerCheckpointAsync(new CheckpointMetaData(42, 17), CheckpointOptions.forCheckpointWithDefaultLocation()).get();
testHarness.processElement(new StreamRecord<>("Wohoo", 0));
blockerCheckpointStreamFactory.getWaiterLatch().await();
task.cancel();
blockerCheckpointStreamFactory.getBlockerLatch().trigger();
testHarness.endInput();
ExecutorService threadPool = task.getAsyncOperationsThreadPool();
threadPool.shutdown();
Assert.assertTrue(threadPool.awaitTermination(60_000, TimeUnit.MILLISECONDS));
Set<BlockingCheckpointOutputStream> createdStreams = blockerCheckpointStreamFactory.getAllCreatedStreams();
for (BlockingCheckpointOutputStream stream : createdStreams) {
Assert.assertTrue("Not all of the " + createdStreams.size() + " created streams have been closed.", stream.isClosed());
}
try {
testHarness.waitForTaskCompletion();
fail("Operation completed. Cancel failed.");
} catch (Exception expected) {
Throwable cause = expected.getCause();
if (!(cause instanceof CancelTaskException)) {
fail("Unexpected exception: " + expected);
}
}
}
Aggregations