use of org.apache.flink.testutils.junit.SharedReference in project flink by apache.
the class BufferTimeoutITCase method testDisablingBufferTimeout.
/**
* The test verifies that it is possible to disable explicit buffer flushing. It checks that
* OutputFlasher thread would not be started when the task is running. But this doesn't
* guarantee that the unfinished buffers can not be flushed by another events.
*/
@Test
public void testDisablingBufferTimeout() throws Exception {
final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
env.setParallelism(1);
env.setBufferTimeout(-1);
final SharedReference<ArrayList<Integer>> results = sharedObjects.add(new ArrayList<>());
env.addSource(new SourceFunction<Integer>() {
@Override
public void run(SourceContext<Integer> ctx) throws Exception {
ctx.collect(1);
// just sleep forever
Thread.sleep(Long.MAX_VALUE);
}
@Override
public void cancel() {
}
}).slotSharingGroup("source").addSink(new SinkFunction<Integer>() {
@Override
public void invoke(Integer value, Context context) {
results.get().add(value);
}
}).slotSharingGroup("sink");
final JobClient jobClient = env.executeAsync();
CommonTestUtils.waitForAllTaskRunning(MINI_CLUSTER_RESOURCE.getMiniCluster(), jobClient.getJobID(), false);
assertTrue(RecordWriter.DEFAULT_OUTPUT_FLUSH_THREAD_NAME + " thread is unexpectedly running", Thread.getAllStackTraces().keySet().stream().noneMatch(thread -> thread.getName().startsWith(RecordWriter.DEFAULT_OUTPUT_FLUSH_THREAD_NAME)));
}
use of org.apache.flink.testutils.junit.SharedReference in project flink by apache.
the class SavepointITCase method testTriggerSavepointAndResumeWithNoClaim.
@Test
@Ignore("Disabling this test because it regularly fails on AZP. See FLINK-25427.")
public void testTriggerSavepointAndResumeWithNoClaim() throws Exception {
final int numTaskManagers = 2;
final int numSlotsPerTaskManager = 2;
final int parallelism = numTaskManagers * numSlotsPerTaskManager;
final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
env.setStateBackend(new EmbeddedRocksDBStateBackend(true));
env.getCheckpointConfig().enableExternalizedCheckpoints(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION);
env.getCheckpointConfig().setCheckpointStorage(folder.newFolder().toURI());
env.setParallelism(parallelism);
final SharedReference<CountDownLatch> counter = sharedObjects.add(new CountDownLatch(10_000));
env.fromSequence(1, Long.MAX_VALUE).keyBy(i -> i % parallelism).process(new KeyedProcessFunction<Long, Long, Long>() {
private ListState<Long> last;
@Override
public void open(Configuration parameters) {
// we use list state here to create sst files of a significant size
// if sst files do not reach certain thresholds they are not stored
// in files, but as a byte stream in checkpoints metadata
last = getRuntimeContext().getListState(new ListStateDescriptor<>("last", BasicTypeInfo.LONG_TYPE_INFO));
}
@Override
public void processElement(Long value, KeyedProcessFunction<Long, Long, Long>.Context ctx, Collector<Long> out) throws Exception {
last.add(value);
out.collect(value);
}
}).addSink(new SinkFunction<Long>() {
@Override
public void invoke(Long value) {
counter.consumeSync(CountDownLatch::countDown);
}
}).setParallelism(1);
final JobGraph jobGraph = env.getStreamGraph().getJobGraph();
MiniClusterWithClientResource cluster = new MiniClusterWithClientResource(new MiniClusterResourceConfiguration.Builder().setNumberTaskManagers(numTaskManagers).setNumberSlotsPerTaskManager(numSlotsPerTaskManager).build());
cluster.before();
try {
final JobID jobID1 = new JobID();
jobGraph.setJobID(jobID1);
cluster.getClusterClient().submitJob(jobGraph).get();
CommonTestUtils.waitForAllTaskRunning(cluster.getMiniCluster(), jobID1, false);
// wait for some records to be processed before taking the checkpoint
counter.get().await();
final String firstCheckpoint = cluster.getMiniCluster().triggerCheckpoint(jobID1).get();
cluster.getClusterClient().cancel(jobID1).get();
jobGraph.setSavepointRestoreSettings(SavepointRestoreSettings.forPath(firstCheckpoint, false, RestoreMode.NO_CLAIM));
final JobID jobID2 = new JobID();
jobGraph.setJobID(jobID2);
cluster.getClusterClient().submitJob(jobGraph).get();
CommonTestUtils.waitForAllTaskRunning(cluster.getMiniCluster(), jobID2, false);
String secondCheckpoint = cluster.getMiniCluster().triggerCheckpoint(jobID2).get();
cluster.getClusterClient().cancel(jobID2).get();
// delete the checkpoint we restored from
FileUtils.deleteDirectory(Paths.get(new URI(firstCheckpoint)).getParent().toFile());
// we should be able to restore from the second checkpoint even though it has been built
// on top of the first checkpoint
jobGraph.setSavepointRestoreSettings(SavepointRestoreSettings.forPath(secondCheckpoint, false, RestoreMode.NO_CLAIM));
final JobID jobID3 = new JobID();
jobGraph.setJobID(jobID3);
cluster.getClusterClient().submitJob(jobGraph).get();
CommonTestUtils.waitForAllTaskRunning(cluster.getMiniCluster(), jobID3, false);
} finally {
cluster.after();
}
}
use of org.apache.flink.testutils.junit.SharedReference in project flink by apache.
the class FileSinkMigrationITCase method test.
@Test
public void test() throws Exception {
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
SharedReference<Collection<Long>> list = sharedObjects.add(new ArrayList<>());
int n = 10000;
env.setParallelism(100);
env.fromSequence(0, n).map(i -> list.applySync(l -> l.add(i)));
env.execute();
assertEquals(n + 1, list.get().size());
assertEquals(LongStream.rangeClosed(0, n).boxed().collect(Collectors.toList()), list.get().stream().sorted().collect(Collectors.toList()));
}
use of org.apache.flink.testutils.junit.SharedReference in project flink by apache.
the class CommonExecSinkITCase method testFromValuesWatermarkPropagation.
@Test
public void testFromValuesWatermarkPropagation() throws Exception {
final StreamTableEnvironment tableEnv = StreamTableEnvironment.create(env);
final SharedReference<List<Long>> watermarks = sharedObjects.add(new ArrayList<>());
final SinkFunction<RowData> sinkFunction = new SinkFunction<RowData>() {
@Override
public void writeWatermark(org.apache.flink.api.common.eventtime.Watermark watermark) {
addElement(watermarks, watermark.getTimestamp());
}
};
final TableDescriptor sinkDescriptor = TableFactoryHarness.newBuilder().sink(new TableFactoryHarness.SinkBase() {
@Override
public DataStreamSinkProvider getSinkRuntimeProvider(DynamicTableSink.Context context) {
return (providerContext, dataStream) -> dataStream.addSink(sinkFunction);
}
}).build();
final Table source = tableEnv.fromValues(DataTypes.ROW(DataTypes.FIELD("a", DataTypes.INT())), Row.of(1), Row.of(2), Row.of(3));
source.executeInsert(sinkDescriptor).await();
assertThat(watermarks.get().size()).isEqualTo(env.getParallelism());
for (Long watermark : watermarks.get()) {
assertThat(watermark).isEqualTo(Watermark.MAX_WATERMARK.getTimestamp());
}
}
use of org.apache.flink.testutils.junit.SharedReference in project flink by apache.
the class CommonExecSinkITCase method testStreamRecordTimestampInserterDataStreamSinkProvider.
@Test
public void testStreamRecordTimestampInserterDataStreamSinkProvider() throws ExecutionException, InterruptedException {
final StreamTableEnvironment tableEnv = StreamTableEnvironment.create(env);
final SharedReference<List<Long>> timestamps = sharedObjects.add(new ArrayList<>());
final List<Row> rows = Arrays.asList(Row.of(1, "foo", Instant.parse("2020-11-10T11:34:56.123Z")), Row.of(2, "foo", Instant.parse("2020-11-10T12:34:56.789Z")), Row.of(3, "foo", Instant.parse("2020-11-11T10:11:22.777Z")), Row.of(4, "foo", Instant.parse("2020-11-11T10:11:23.888Z")));
final SinkFunction<RowData> sinkFunction = new SinkFunction<RowData>() {
@Override
public void invoke(RowData value, Context context) {
addElement(timestamps, context.timestamp());
}
};
final TableDescriptor sourceDescriptor = TableFactoryHarness.newBuilder().schema(schemaStreamRecordTimestampInserter(true)).source(new TestSource(rows)).sink(new TableFactoryHarness.SinkBase() {
@Override
public DataStreamSinkProvider getSinkRuntimeProvider(DynamicTableSink.Context context) {
return (providerContext, dataStream) -> dataStream.addSink(sinkFunction);
}
}).build();
tableEnv.createTable("T1", sourceDescriptor);
final String sqlStmt = "INSERT INTO T1 SELECT * FROM T1";
assertPlan(tableEnv, sqlStmt, true);
tableEnv.executeSql(sqlStmt).await();
Collections.sort(timestamps.get());
assertTimestampResults(timestamps, rows);
}
Aggregations