use of org.apache.flink.streaming.util.AbstractStreamOperatorTestHarness in project flink by apache.
the class ContinuousFileProcessingMigrationTest method writeMonitoringSourceSnapshot.
/**
* Manually run this to write binary snapshot data. Remove @Ignore to run.
*/
@Ignore
@Test
public void writeMonitoringSourceSnapshot() throws Exception {
File testFolder = tempFolder.newFolder();
long fileModTime = Long.MIN_VALUE;
for (int i = 0; i < 1; i++) {
Tuple2<File, String> file = createFileAndFillWithData(testFolder, "file", i, "This is test line.");
fileModTime = file.f0.lastModified();
}
TextInputFormat format = new TextInputFormat(new Path(testFolder.getAbsolutePath()));
final ContinuousFileMonitoringFunction<String> monitoringFunction = new ContinuousFileMonitoringFunction<>(format, FileProcessingMode.PROCESS_CONTINUOUSLY, 1, INTERVAL);
StreamSource<TimestampedFileInputSplit, ContinuousFileMonitoringFunction<String>> src = new StreamSource<>(monitoringFunction);
final AbstractStreamOperatorTestHarness<TimestampedFileInputSplit> testHarness = new AbstractStreamOperatorTestHarness<>(src, 1, 1, 0);
testHarness.open();
final Throwable[] error = new Throwable[1];
final OneShotLatch latch = new OneShotLatch();
// run the source asynchronously
Thread runner = new Thread() {
@Override
public void run() {
try {
monitoringFunction.run(new DummySourceContext() {
@Override
public void collect(TimestampedFileInputSplit element) {
latch.trigger();
}
@Override
public void markAsTemporarilyIdle() {
}
});
} catch (Throwable t) {
t.printStackTrace();
error[0] = t;
}
}
};
runner.start();
if (!latch.isTriggered()) {
latch.await();
}
final OperatorSubtaskState snapshot;
synchronized (testHarness.getCheckpointLock()) {
snapshot = testHarness.snapshot(0L, 0L);
}
OperatorSnapshotUtil.writeStateHandle(snapshot, "src/test/resources/monitoring-function-migration-test-" + fileModTime + "-flink" + flinkGenerateSavepointVersion + "-snapshot");
monitoringFunction.cancel();
runner.join();
testHarness.close();
}
use of org.apache.flink.streaming.util.AbstractStreamOperatorTestHarness in project flink by apache.
the class ContinuousFileProcessingMigrationTest method testMonitoringSourceRestore.
@Test
public void testMonitoringSourceRestore() throws Exception {
File testFolder = tempFolder.newFolder();
TextInputFormat format = new TextInputFormat(new Path(testFolder.getAbsolutePath()));
final ContinuousFileMonitoringFunction<String> monitoringFunction = new ContinuousFileMonitoringFunction<>(format, FileProcessingMode.PROCESS_CONTINUOUSLY, 1, INTERVAL);
StreamSource<TimestampedFileInputSplit, ContinuousFileMonitoringFunction<String>> src = new StreamSource<>(monitoringFunction);
final AbstractStreamOperatorTestHarness<TimestampedFileInputSplit> testHarness = new AbstractStreamOperatorTestHarness<>(src, 1, 1, 0);
testHarness.setup();
testHarness.initializeState(OperatorSnapshotUtil.getResourceFilename("monitoring-function-migration-test-" + expectedModTime + "-flink" + testMigrateVersion + "-snapshot"));
testHarness.open();
Assert.assertEquals((long) expectedModTime, monitoringFunction.getGlobalModificationTime());
}
use of org.apache.flink.streaming.util.AbstractStreamOperatorTestHarness in project flink by apache.
the class ArrowSourceFunctionTestBase method testRestore.
@Test
public void testRestore() throws Exception {
Tuple2<List<RowData>, Integer> testData = getTestData();
final ArrowSourceFunction arrowSourceFunction = createTestArrowSourceFunction(testData.f0, testData.f1);
final AbstractStreamOperatorTestHarness<RowData> testHarness = new AbstractStreamOperatorTestHarness<>(new StreamSource<>(arrowSourceFunction), 1, 1, 0);
testHarness.open();
final Throwable[] error = new Throwable[1];
final MultiShotLatch latch = new MultiShotLatch();
final AtomicInteger numOfEmittedElements = new AtomicInteger(0);
final List<RowData> results = new ArrayList<>();
final DummySourceContext<RowData> sourceContext = new DummySourceContext<RowData>() {
@Override
public void collect(RowData element) {
if (numOfEmittedElements.get() == 2) {
latch.trigger();
// fail the source function at the second element
throw new RuntimeException("Fail the arrow source");
}
results.add(typeSerializer.copy(element));
numOfEmittedElements.incrementAndGet();
}
};
// run the source asynchronously
Thread runner = new Thread(() -> {
try {
arrowSourceFunction.run(sourceContext);
} catch (Throwable t) {
if (!t.getMessage().equals("Fail the arrow source")) {
error[0] = t;
}
}
});
runner.start();
if (!latch.isTriggered()) {
latch.await();
}
OperatorSubtaskState snapshot;
synchronized (sourceContext.getCheckpointLock()) {
snapshot = testHarness.snapshot(0, 0);
}
runner.join();
testHarness.close();
final ArrowSourceFunction arrowSourceFunction2 = createTestArrowSourceFunction(testData.f0, testData.f1);
AbstractStreamOperatorTestHarness testHarnessCopy = new AbstractStreamOperatorTestHarness(new StreamSource<>(arrowSourceFunction2), 1, 1, 0);
testHarnessCopy.initializeState(snapshot);
testHarnessCopy.open();
// run the source asynchronously
Thread runner2 = new Thread(() -> {
try {
arrowSourceFunction2.run(new DummySourceContext<RowData>() {
@Override
public void collect(RowData element) {
results.add(typeSerializer.copy(element));
if (numOfEmittedElements.incrementAndGet() == testData.f0.size()) {
latch.trigger();
}
}
});
} catch (Throwable t) {
error[0] = t;
}
});
runner2.start();
if (!latch.isTriggered()) {
latch.await();
}
runner2.join();
Assert.assertNull(error[0]);
Assert.assertEquals(testData.f0.size(), numOfEmittedElements.get());
checkElementsEquals(results, testData.f0);
}
use of org.apache.flink.streaming.util.AbstractStreamOperatorTestHarness in project flink by apache.
the class FlinkKafkaConsumerBaseMigrationTest method testRestoreFromFlink11WithEmptyStateWithPartitions.
/** Test restoring from an empty state taken using Flink 1.1, when some partitions could be found for topics. */
@Test
public void testRestoreFromFlink11WithEmptyStateWithPartitions() throws Exception {
final List<KafkaTopicPartition> partitions = new ArrayList<>();
partitions.add(new KafkaTopicPartition("abc", 13));
partitions.add(new KafkaTopicPartition("def", 7));
final DummyFlinkKafkaConsumer<String> consumerFunction = new DummyFlinkKafkaConsumer<>(partitions);
StreamSource<String, DummyFlinkKafkaConsumer<String>> consumerOperator = new StreamSource<>(consumerFunction);
final AbstractStreamOperatorTestHarness<String> testHarness = new AbstractStreamOperatorTestHarness<>(consumerOperator, 1, 1, 0);
testHarness.setTimeCharacteristic(TimeCharacteristic.ProcessingTime);
testHarness.setup();
// restore state from binary snapshot file using legacy method
testHarness.initializeStateFromLegacyCheckpoint(getResourceFilename("kafka-consumer-migration-test-flink1.1-snapshot-empty-state"));
testHarness.open();
// the expected state in "kafka-consumer-migration-test-flink1.1-snapshot-empty-state";
// since the state is empty, the consumer should reflect on the startup mode to determine start offsets.
final HashMap<KafkaTopicPartition, Long> expectedSubscribedPartitionsWithStartOffsets = new HashMap<>();
expectedSubscribedPartitionsWithStartOffsets.put(new KafkaTopicPartition("abc", 13), KafkaTopicPartitionStateSentinel.GROUP_OFFSET);
expectedSubscribedPartitionsWithStartOffsets.put(new KafkaTopicPartition("def", 7), KafkaTopicPartitionStateSentinel.GROUP_OFFSET);
// assert that there are partitions and is identical to expected list
Assert.assertTrue(consumerFunction.getSubscribedPartitionsToStartOffsets() != null);
Assert.assertTrue(!consumerFunction.getSubscribedPartitionsToStartOffsets().isEmpty());
Assert.assertEquals(expectedSubscribedPartitionsWithStartOffsets, consumerFunction.getSubscribedPartitionsToStartOffsets());
// assert that no state was restored
Assert.assertTrue(consumerFunction.getRestoredState() == null);
consumerOperator.close();
consumerOperator.cancel();
}
use of org.apache.flink.streaming.util.AbstractStreamOperatorTestHarness in project flink by apache.
the class ListCheckpointedTest method testUDFReturningEmpty.
@Test
public void testUDFReturningEmpty() throws Exception {
TestUserFunction userFunction = new TestUserFunction(Collections.<Integer>emptyList());
AbstractStreamOperatorTestHarness<Integer> testHarness = new AbstractStreamOperatorTestHarness<>(new StreamMap<>(userFunction), 1, 1, 0);
testHarness.open();
OperatorStateHandles snapshot = testHarness.snapshot(0L, 0L);
testHarness.initializeState(snapshot);
Assert.assertTrue(userFunction.isRestored());
}
Aggregations