Search in sources :

Example 1 with SharedReference

use of org.apache.flink.testutils.junit.SharedReference in project flink by apache.

the class BufferTimeoutITCase method testDisablingBufferTimeout.

/**
 * The test verifies that it is possible to disable explicit buffer flushing. It checks that
 * OutputFlasher thread would not be started when the task is running. But this doesn't
 * guarantee that the unfinished buffers can not be flushed by another events.
 */
@Test
public void testDisablingBufferTimeout() throws Exception {
    final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
    env.setParallelism(1);
    env.setBufferTimeout(-1);
    final SharedReference<ArrayList<Integer>> results = sharedObjects.add(new ArrayList<>());
    env.addSource(new SourceFunction<Integer>() {

        @Override
        public void run(SourceContext<Integer> ctx) throws Exception {
            ctx.collect(1);
            // just sleep forever
            Thread.sleep(Long.MAX_VALUE);
        }

        @Override
        public void cancel() {
        }
    }).slotSharingGroup("source").addSink(new SinkFunction<Integer>() {

        @Override
        public void invoke(Integer value, Context context) {
            results.get().add(value);
        }
    }).slotSharingGroup("sink");
    final JobClient jobClient = env.executeAsync();
    CommonTestUtils.waitForAllTaskRunning(MINI_CLUSTER_RESOURCE.getMiniCluster(), jobClient.getJobID(), false);
    assertTrue(RecordWriter.DEFAULT_OUTPUT_FLUSH_THREAD_NAME + " thread is unexpectedly running", Thread.getAllStackTraces().keySet().stream().noneMatch(thread -> thread.getName().startsWith(RecordWriter.DEFAULT_OUTPUT_FLUSH_THREAD_NAME)));
}
Also used : SinkFunction(org.apache.flink.streaming.api.functions.sink.SinkFunction) SharedObjects(org.apache.flink.testutils.junit.SharedObjects) Assert.assertTrue(org.junit.Assert.assertTrue) Test(org.junit.Test) JobClient(org.apache.flink.core.execution.JobClient) ArrayList(java.util.ArrayList) RecordWriter(org.apache.flink.runtime.io.network.api.writer.RecordWriter) Rule(org.junit.Rule) SourceFunction(org.apache.flink.streaming.api.functions.source.SourceFunction) CommonTestUtils(org.apache.flink.runtime.testutils.CommonTestUtils) SharedReference(org.apache.flink.testutils.junit.SharedReference) StreamExecutionEnvironment(org.apache.flink.streaming.api.environment.StreamExecutionEnvironment) AbstractTestBase(org.apache.flink.test.util.AbstractTestBase) SourceFunction(org.apache.flink.streaming.api.functions.source.SourceFunction) SinkFunction(org.apache.flink.streaming.api.functions.sink.SinkFunction) ArrayList(java.util.ArrayList) StreamExecutionEnvironment(org.apache.flink.streaming.api.environment.StreamExecutionEnvironment) JobClient(org.apache.flink.core.execution.JobClient) Test(org.junit.Test)

Example 2 with SharedReference

use of org.apache.flink.testutils.junit.SharedReference in project flink by apache.

the class SavepointITCase method testTriggerSavepointAndResumeWithNoClaim.

@Test
@Ignore("Disabling this test because it regularly fails on AZP. See FLINK-25427.")
public void testTriggerSavepointAndResumeWithNoClaim() throws Exception {
    final int numTaskManagers = 2;
    final int numSlotsPerTaskManager = 2;
    final int parallelism = numTaskManagers * numSlotsPerTaskManager;
    final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
    env.setStateBackend(new EmbeddedRocksDBStateBackend(true));
    env.getCheckpointConfig().enableExternalizedCheckpoints(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION);
    env.getCheckpointConfig().setCheckpointStorage(folder.newFolder().toURI());
    env.setParallelism(parallelism);
    final SharedReference<CountDownLatch> counter = sharedObjects.add(new CountDownLatch(10_000));
    env.fromSequence(1, Long.MAX_VALUE).keyBy(i -> i % parallelism).process(new KeyedProcessFunction<Long, Long, Long>() {

        private ListState<Long> last;

        @Override
        public void open(Configuration parameters) {
            // we use list state here to create sst files of a significant size
            // if sst files do not reach certain thresholds they are not stored
            // in files, but as a byte stream in checkpoints metadata
            last = getRuntimeContext().getListState(new ListStateDescriptor<>("last", BasicTypeInfo.LONG_TYPE_INFO));
        }

        @Override
        public void processElement(Long value, KeyedProcessFunction<Long, Long, Long>.Context ctx, Collector<Long> out) throws Exception {
            last.add(value);
            out.collect(value);
        }
    }).addSink(new SinkFunction<Long>() {

        @Override
        public void invoke(Long value) {
            counter.consumeSync(CountDownLatch::countDown);
        }
    }).setParallelism(1);
    final JobGraph jobGraph = env.getStreamGraph().getJobGraph();
    MiniClusterWithClientResource cluster = new MiniClusterWithClientResource(new MiniClusterResourceConfiguration.Builder().setNumberTaskManagers(numTaskManagers).setNumberSlotsPerTaskManager(numSlotsPerTaskManager).build());
    cluster.before();
    try {
        final JobID jobID1 = new JobID();
        jobGraph.setJobID(jobID1);
        cluster.getClusterClient().submitJob(jobGraph).get();
        CommonTestUtils.waitForAllTaskRunning(cluster.getMiniCluster(), jobID1, false);
        // wait for some records to be processed before taking the checkpoint
        counter.get().await();
        final String firstCheckpoint = cluster.getMiniCluster().triggerCheckpoint(jobID1).get();
        cluster.getClusterClient().cancel(jobID1).get();
        jobGraph.setSavepointRestoreSettings(SavepointRestoreSettings.forPath(firstCheckpoint, false, RestoreMode.NO_CLAIM));
        final JobID jobID2 = new JobID();
        jobGraph.setJobID(jobID2);
        cluster.getClusterClient().submitJob(jobGraph).get();
        CommonTestUtils.waitForAllTaskRunning(cluster.getMiniCluster(), jobID2, false);
        String secondCheckpoint = cluster.getMiniCluster().triggerCheckpoint(jobID2).get();
        cluster.getClusterClient().cancel(jobID2).get();
        // delete the checkpoint we restored from
        FileUtils.deleteDirectory(Paths.get(new URI(firstCheckpoint)).getParent().toFile());
        // we should be able to restore from the second checkpoint even though it has been built
        // on top of the first checkpoint
        jobGraph.setSavepointRestoreSettings(SavepointRestoreSettings.forPath(secondCheckpoint, false, RestoreMode.NO_CLAIM));
        final JobID jobID3 = new JobID();
        jobGraph.setJobID(jobID3);
        cluster.getClusterClient().submitJob(jobGraph).get();
        CommonTestUtils.waitForAllTaskRunning(cluster.getMiniCluster(), jobID3, false);
    } finally {
        cluster.after();
    }
}
Also used : Arrays(java.util.Arrays) SharedObjects(org.apache.flink.testutils.junit.SharedObjects) MemorySize(org.apache.flink.configuration.MemorySize) EmptyRequestBody(org.apache.flink.runtime.rest.messages.EmptyRequestBody) MiniClusterResourceConfiguration(org.apache.flink.runtime.testutils.MiniClusterResourceConfiguration) ExceptionUtils.findThrowable(org.apache.flink.util.ExceptionUtils.findThrowable) CheckpointException(org.apache.flink.runtime.checkpoint.CheckpointException) TestUtils.submitJobAndWaitForResult(org.apache.flink.test.util.TestUtils.submitJobAndWaitForResult) FSDataOutputStream(org.apache.flink.core.fs.FSDataOutputStream) CheckpointListener(org.apache.flink.api.common.state.CheckpointListener) Duration(java.time.Duration) Map(java.util.Map) StreamGraph(org.apache.flink.streaming.api.graph.StreamGraph) ExceptionUtils.assertThrowable(org.apache.flink.util.ExceptionUtils.assertThrowable) RichSourceFunction(org.apache.flink.streaming.api.functions.source.RichSourceFunction) Path(java.nio.file.Path) StateSnapshotContext(org.apache.flink.runtime.state.StateSnapshotContext) SinkFunction(org.apache.flink.streaming.api.functions.sink.SinkFunction) BoundedOneInput(org.apache.flink.streaming.api.operators.BoundedOneInput) FileSystemFactory(org.apache.flink.core.fs.FileSystemFactory) CountDownLatch(java.util.concurrent.CountDownLatch) JobMessageParameters(org.apache.flink.runtime.rest.messages.JobMessageParameters) Stream(java.util.stream.Stream) ValueState(org.apache.flink.api.common.state.ValueState) ClusterClient(org.apache.flink.client.program.ClusterClient) Assert.assertFalse(org.junit.Assert.assertFalse) OneInputStreamOperator(org.apache.flink.streaming.api.operators.OneInputStreamOperator) Time(org.apache.flink.api.common.time.Time) CopyOnWriteArrayList(java.util.concurrent.CopyOnWriteArrayList) FlinkException(org.apache.flink.util.FlinkException) LocalFileSystem(org.apache.flink.core.fs.local.LocalFileSystem) JobStatus(org.apache.flink.api.common.JobStatus) KeyedProcessFunction(org.apache.flink.streaming.api.functions.KeyedProcessFunction) TypeSafeDiagnosingMatcher(org.hamcrest.TypeSafeDiagnosingMatcher) TaskManagerOptions(org.apache.flink.configuration.TaskManagerOptions) SourceFunction(org.apache.flink.streaming.api.functions.source.SourceFunction) FutureUtils(org.apache.flink.util.concurrent.FutureUtils) RichMapFunction(org.apache.flink.api.common.functions.RichMapFunction) Collector(org.apache.flink.util.Collector) JobExecutionException(org.apache.flink.runtime.client.JobExecutionException) Before(org.junit.Before) MiniClusterWithClientResource(org.apache.flink.test.util.MiniClusterWithClientResource) Files(java.nio.file.Files) ValueStateDescriptor(org.apache.flink.api.common.state.ValueStateDescriptor) ExecutionState(org.apache.flink.runtime.execution.ExecutionState) Assert.assertTrue(org.junit.Assert.assertTrue) Test(org.junit.Test) IOException(java.io.IOException) FSDataInputStream(org.apache.flink.core.fs.FSDataInputStream) File(java.io.File) AbstractStreamOperator(org.apache.flink.streaming.api.operators.AbstractStreamOperator) ExecutionException(java.util.concurrent.ExecutionException) JobID(org.apache.flink.api.common.JobID) Paths(java.nio.file.Paths) Matcher(org.hamcrest.Matcher) Assert(org.junit.Assert) SavepointRestoreSettings(org.apache.flink.runtime.jobgraph.SavepointRestoreSettings) Assert.assertEquals(org.junit.Assert.assertEquals) StateBackendOptions(org.apache.flink.configuration.StateBackendOptions) EntropyInjectingTestFileSystem(org.apache.flink.testutils.EntropyInjectingTestFileSystem) Deadline(org.apache.flink.api.common.time.Deadline) ExceptionUtils.findThrowableWithMessage(org.apache.flink.util.ExceptionUtils.findThrowableWithMessage) ClusterOptions(org.apache.flink.configuration.ClusterOptions) FileUtils(org.apache.flink.util.FileUtils) URISyntaxException(java.net.URISyntaxException) BiFunction(java.util.function.BiFunction) JobGraph(org.apache.flink.runtime.jobgraph.JobGraph) LoggerFactory(org.slf4j.LoggerFactory) BlockingNoOpInvokable(org.apache.flink.runtime.testtasks.BlockingNoOpInvokable) Random(java.util.Random) FunctionSnapshotContext(org.apache.flink.runtime.state.FunctionSnapshotContext) EmbeddedRocksDBStateBackend(org.apache.flink.contrib.streaming.state.EmbeddedRocksDBStateBackend) MapFunction(org.apache.flink.api.common.functions.MapFunction) BasicTypeInfo(org.apache.flink.api.common.typeinfo.BasicTypeInfo) Assert.assertThat(org.junit.Assert.assertThat) ListState(org.apache.flink.api.common.state.ListState) CommonTestUtils.waitForAllTaskRunning(org.apache.flink.runtime.testutils.CommonTestUtils.waitForAllTaskRunning) ChainingStrategy(org.apache.flink.streaming.api.operators.ChainingStrategy) TestLogger(org.apache.flink.util.TestLogger) ListStateDescriptor(org.apache.flink.api.common.state.ListStateDescriptor) Assert.fail(org.junit.Assert.fail) URI(java.net.URI) KeySelector(org.apache.flink.api.java.functions.KeySelector) CheckpointedFunction(org.apache.flink.streaming.api.checkpoint.CheckpointedFunction) FunctionInitializationContext(org.apache.flink.runtime.state.FunctionInitializationContext) Collection(java.util.Collection) Collectors(java.util.stream.Collectors) FileNotFoundException(java.io.FileNotFoundException) CheckpointingOptions(org.apache.flink.configuration.CheckpointingOptions) Objects(java.util.Objects) TestingUtils(org.apache.flink.testutils.TestingUtils) List(java.util.List) FileSystem(org.apache.flink.core.fs.FileSystem) FlinkJobNotFoundException(org.apache.flink.runtime.messages.FlinkJobNotFoundException) Optional(java.util.Optional) CheckpointConfig(org.apache.flink.streaming.api.environment.CheckpointConfig) ParallelSourceFunction(org.apache.flink.streaming.api.functions.source.ParallelSourceFunction) StreamExecutionEnvironment(org.apache.flink.streaming.api.environment.StreamExecutionEnvironment) RichFlatMapFunction(org.apache.flink.api.common.functions.RichFlatMapFunction) OneShotLatch(org.apache.flink.core.testutils.OneShotLatch) SavepointFormatType(org.apache.flink.core.execution.SavepointFormatType) JobVertex(org.apache.flink.runtime.jobgraph.JobVertex) IterativeStream(org.apache.flink.streaming.api.datastream.IterativeStream) CompletableFuture(java.util.concurrent.CompletableFuture) RestartStrategies(org.apache.flink.api.common.restartstrategy.RestartStrategies) RestClusterClient(org.apache.flink.client.program.rest.RestClusterClient) RestoreMode(org.apache.flink.runtime.jobgraph.RestoreMode) StreamRecord(org.apache.flink.streaming.runtime.streamrecord.StreamRecord) CompletableFuture.allOf(java.util.concurrent.CompletableFuture.allOf) JobGraphTestUtils(org.apache.flink.runtime.jobgraph.JobGraphTestUtils) JobDetailsHeaders(org.apache.flink.runtime.rest.messages.job.JobDetailsHeaders) SharedReference(org.apache.flink.testutils.junit.SharedReference) Description(org.hamcrest.Description) Logger(org.slf4j.Logger) LocalRecoverableWriter(org.apache.flink.core.fs.local.LocalRecoverableWriter) DiscardingSink(org.apache.flink.streaming.api.functions.sink.DiscardingSink) Assert.assertNotNull(org.junit.Assert.assertNotNull) Configuration(org.apache.flink.configuration.Configuration) ExceptionUtils.assertThrowableWithMessage(org.apache.flink.util.ExceptionUtils.assertThrowableWithMessage) DataStream(org.apache.flink.streaming.api.datastream.DataStream) TimeUnit(java.util.concurrent.TimeUnit) Rule(org.junit.Rule) Ignore(org.junit.Ignore) ListCheckpointed(org.apache.flink.streaming.api.checkpoint.ListCheckpointed) FileVisitOption(java.nio.file.FileVisitOption) CommonTestUtils(org.apache.flink.runtime.testutils.CommonTestUtils) Collections(java.util.Collections) TemporaryFolder(org.junit.rules.TemporaryFolder) MiniClusterResourceConfiguration(org.apache.flink.runtime.testutils.MiniClusterResourceConfiguration) Configuration(org.apache.flink.configuration.Configuration) KeyedProcessFunction(org.apache.flink.streaming.api.functions.KeyedProcessFunction) MiniClusterWithClientResource(org.apache.flink.test.util.MiniClusterWithClientResource) CountDownLatch(java.util.concurrent.CountDownLatch) URI(java.net.URI) CheckpointException(org.apache.flink.runtime.checkpoint.CheckpointException) FlinkException(org.apache.flink.util.FlinkException) JobExecutionException(org.apache.flink.runtime.client.JobExecutionException) IOException(java.io.IOException) ExecutionException(java.util.concurrent.ExecutionException) URISyntaxException(java.net.URISyntaxException) FileNotFoundException(java.io.FileNotFoundException) FlinkJobNotFoundException(org.apache.flink.runtime.messages.FlinkJobNotFoundException) JobGraph(org.apache.flink.runtime.jobgraph.JobGraph) EmbeddedRocksDBStateBackend(org.apache.flink.contrib.streaming.state.EmbeddedRocksDBStateBackend) SinkFunction(org.apache.flink.streaming.api.functions.sink.SinkFunction) StreamExecutionEnvironment(org.apache.flink.streaming.api.environment.StreamExecutionEnvironment) JobID(org.apache.flink.api.common.JobID) Ignore(org.junit.Ignore) Test(org.junit.Test)

Example 3 with SharedReference

use of org.apache.flink.testutils.junit.SharedReference in project flink by apache.

the class FileSinkMigrationITCase method test.

@Test
public void test() throws Exception {
    StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
    SharedReference<Collection<Long>> list = sharedObjects.add(new ArrayList<>());
    int n = 10000;
    env.setParallelism(100);
    env.fromSequence(0, n).map(i -> list.applySync(l -> l.add(i)));
    env.execute();
    assertEquals(n + 1, list.get().size());
    assertEquals(LongStream.rangeClosed(0, n).boxed().collect(Collectors.toList()), list.get().stream().sorted().collect(Collectors.toList()));
}
Also used : MiniClusterConfiguration(org.apache.flink.runtime.minicluster.MiniClusterConfiguration) SavepointFormatType(org.apache.flink.core.execution.SavepointFormatType) SharedObjects(org.apache.flink.testutils.junit.SharedObjects) JobGraph(org.apache.flink.runtime.jobgraph.JobGraph) CheckpointingMode(org.apache.flink.streaming.api.CheckpointingMode) CompletableFuture(java.util.concurrent.CompletableFuture) OnCheckpointRollingPolicy(org.apache.flink.streaming.api.functions.sink.filesystem.rollingpolicies.OnCheckpointRollingPolicy) FunctionSnapshotContext(org.apache.flink.runtime.state.FunctionSnapshotContext) FileSink(org.apache.flink.connector.file.sink.FileSink) ArrayList(java.util.ArrayList) ListState(org.apache.flink.api.common.state.ListState) CommonTestUtils.waitForAllTaskRunning(org.apache.flink.runtime.testutils.CommonTestUtils.waitForAllTaskRunning) CheckpointListener(org.apache.flink.api.common.state.CheckpointListener) Path(org.apache.flink.core.fs.Path) StreamingFileSink(org.apache.flink.streaming.api.functions.sink.filesystem.StreamingFileSink) RichParallelSourceFunction(org.apache.flink.streaming.api.functions.source.RichParallelSourceFunction) TestLogger(org.apache.flink.util.TestLogger) ListStateDescriptor(org.apache.flink.api.common.state.ListStateDescriptor) MiniCluster(org.apache.flink.runtime.minicluster.MiniCluster) RestOptions(org.apache.flink.configuration.RestOptions) ClassRule(org.junit.ClassRule) SharedReference(org.apache.flink.testutils.junit.SharedReference) JobSubmissionResult(org.apache.flink.api.common.JobSubmissionResult) Before(org.junit.Before) IntegerFileSinkTestDataUtils(org.apache.flink.connector.file.sink.utils.IntegerFileSinkTestDataUtils) LongStream(java.util.stream.LongStream) CheckpointedFunction(org.apache.flink.streaming.api.checkpoint.CheckpointedFunction) FunctionInitializationContext(org.apache.flink.runtime.state.FunctionInitializationContext) Collection(java.util.Collection) Configuration(org.apache.flink.configuration.Configuration) Test(org.junit.Test) Collectors(java.util.stream.Collectors) CountDownLatch(java.util.concurrent.CountDownLatch) JobID(org.apache.flink.api.common.JobID) Rule(org.junit.Rule) SavepointRestoreSettings(org.apache.flink.runtime.jobgraph.SavepointRestoreSettings) Collections(java.util.Collections) TemporaryFolder(org.junit.rules.TemporaryFolder) Assert.assertEquals(org.junit.Assert.assertEquals) StreamExecutionEnvironment(org.apache.flink.streaming.api.environment.StreamExecutionEnvironment) Collection(java.util.Collection) StreamExecutionEnvironment(org.apache.flink.streaming.api.environment.StreamExecutionEnvironment) Test(org.junit.Test)

Example 4 with SharedReference

use of org.apache.flink.testutils.junit.SharedReference in project flink by apache.

the class CommonExecSinkITCase method testFromValuesWatermarkPropagation.

@Test
public void testFromValuesWatermarkPropagation() throws Exception {
    final StreamTableEnvironment tableEnv = StreamTableEnvironment.create(env);
    final SharedReference<List<Long>> watermarks = sharedObjects.add(new ArrayList<>());
    final SinkFunction<RowData> sinkFunction = new SinkFunction<RowData>() {

        @Override
        public void writeWatermark(org.apache.flink.api.common.eventtime.Watermark watermark) {
            addElement(watermarks, watermark.getTimestamp());
        }
    };
    final TableDescriptor sinkDescriptor = TableFactoryHarness.newBuilder().sink(new TableFactoryHarness.SinkBase() {

        @Override
        public DataStreamSinkProvider getSinkRuntimeProvider(DynamicTableSink.Context context) {
            return (providerContext, dataStream) -> dataStream.addSink(sinkFunction);
        }
    }).build();
    final Table source = tableEnv.fromValues(DataTypes.ROW(DataTypes.FIELD("a", DataTypes.INT())), Row.of(1), Row.of(2), Row.of(3));
    source.executeInsert(sinkDescriptor).await();
    assertThat(watermarks.get().size()).isEqualTo(env.getParallelism());
    for (Long watermark : watermarks.get()) {
        assertThat(watermark).isEqualTo(Watermark.MAX_WATERMARK.getTimestamp());
    }
}
Also used : StreamTableEnvironment(org.apache.flink.table.api.bridge.java.StreamTableEnvironment) Arrays(java.util.Arrays) Schema(org.apache.flink.table.api.Schema) TableDescriptor(org.apache.flink.table.api.TableDescriptor) SinkV1Adapter(org.apache.flink.streaming.api.transformations.SinkV1Adapter) SharedObjects(org.apache.flink.testutils.junit.SharedObjects) Assertions.assertThat(org.assertj.core.api.Assertions.assertThat) ExplainDetail(org.apache.flink.table.api.ExplainDetail) ExceptionUtils(org.apache.flink.util.ExceptionUtils) TestSink(org.apache.flink.streaming.runtime.operators.sink.TestSink) Parameterized(org.junit.runners.Parameterized) AbstractTestBase(org.apache.flink.test.util.AbstractTestBase) TableFactoryHarness(org.apache.flink.table.planner.factories.TableFactoryHarness) DynamicTableSource(org.apache.flink.table.connector.source.DynamicTableSource) SinkFunction(org.apache.flink.streaming.api.functions.sink.SinkFunction) DynamicTableSink(org.apache.flink.table.connector.sink.DynamicTableSink) Collection(java.util.Collection) Table(org.apache.flink.table.api.Table) Instant(java.time.Instant) Collectors(java.util.stream.Collectors) List(java.util.List) DataStreamSinkProvider(org.apache.flink.table.connector.sink.DataStreamSinkProvider) ValidationException(org.apache.flink.table.api.ValidationException) TableResult(org.apache.flink.table.api.TableResult) Row(org.apache.flink.types.Row) NotNull(org.jetbrains.annotations.NotNull) StreamExecutionEnvironment(org.apache.flink.streaming.api.environment.StreamExecutionEnvironment) Assert.assertThrows(org.junit.Assert.assertThrows) RunWith(org.junit.runner.RunWith) Watermark(org.apache.flink.streaming.api.watermark.Watermark) ScanTableSource(org.apache.flink.table.connector.source.ScanTableSource) ArrayList(java.util.ArrayList) SinkV2Provider(org.apache.flink.table.connector.sink.SinkV2Provider) TABLE_EXEC_SINK_TYPE_LENGTH_ENFORCER(org.apache.flink.table.api.config.ExecutionConfigOptions.TABLE_EXEC_SINK_TYPE_LENGTH_ENFORCER) SourceFunction(org.apache.flink.streaming.api.functions.source.SourceFunction) TABLE_EXEC_SINK_NOT_NULL_ENFORCER(org.apache.flink.table.api.config.ExecutionConfigOptions.TABLE_EXEC_SINK_NOT_NULL_ENFORCER) SourceFunctionProvider(org.apache.flink.table.connector.source.SourceFunctionProvider) SharedReference(org.apache.flink.testutils.junit.SharedReference) INT(org.apache.flink.table.api.DataTypes.INT) Before(org.junit.Before) RowData(org.apache.flink.table.data.RowData) DataTypes(org.apache.flink.table.api.DataTypes) SinkProvider(org.apache.flink.table.connector.sink.SinkProvider) Test(org.junit.Test) ExecutionException(java.util.concurrent.ExecutionException) Rule(org.junit.Rule) ExecutionConfigOptions(org.apache.flink.table.api.config.ExecutionConfigOptions) Collections(java.util.Collections) Assert.assertEquals(org.junit.Assert.assertEquals) Table(org.apache.flink.table.api.Table) TableDescriptor(org.apache.flink.table.api.TableDescriptor) RowData(org.apache.flink.table.data.RowData) SinkFunction(org.apache.flink.streaming.api.functions.sink.SinkFunction) List(java.util.List) ArrayList(java.util.ArrayList) StreamTableEnvironment(org.apache.flink.table.api.bridge.java.StreamTableEnvironment) Watermark(org.apache.flink.streaming.api.watermark.Watermark) Test(org.junit.Test)

Example 5 with SharedReference

use of org.apache.flink.testutils.junit.SharedReference in project flink by apache.

the class CommonExecSinkITCase method testStreamRecordTimestampInserterDataStreamSinkProvider.

@Test
public void testStreamRecordTimestampInserterDataStreamSinkProvider() throws ExecutionException, InterruptedException {
    final StreamTableEnvironment tableEnv = StreamTableEnvironment.create(env);
    final SharedReference<List<Long>> timestamps = sharedObjects.add(new ArrayList<>());
    final List<Row> rows = Arrays.asList(Row.of(1, "foo", Instant.parse("2020-11-10T11:34:56.123Z")), Row.of(2, "foo", Instant.parse("2020-11-10T12:34:56.789Z")), Row.of(3, "foo", Instant.parse("2020-11-11T10:11:22.777Z")), Row.of(4, "foo", Instant.parse("2020-11-11T10:11:23.888Z")));
    final SinkFunction<RowData> sinkFunction = new SinkFunction<RowData>() {

        @Override
        public void invoke(RowData value, Context context) {
            addElement(timestamps, context.timestamp());
        }
    };
    final TableDescriptor sourceDescriptor = TableFactoryHarness.newBuilder().schema(schemaStreamRecordTimestampInserter(true)).source(new TestSource(rows)).sink(new TableFactoryHarness.SinkBase() {

        @Override
        public DataStreamSinkProvider getSinkRuntimeProvider(DynamicTableSink.Context context) {
            return (providerContext, dataStream) -> dataStream.addSink(sinkFunction);
        }
    }).build();
    tableEnv.createTable("T1", sourceDescriptor);
    final String sqlStmt = "INSERT INTO T1 SELECT * FROM T1";
    assertPlan(tableEnv, sqlStmt, true);
    tableEnv.executeSql(sqlStmt).await();
    Collections.sort(timestamps.get());
    assertTimestampResults(timestamps, rows);
}
Also used : StreamTableEnvironment(org.apache.flink.table.api.bridge.java.StreamTableEnvironment) Arrays(java.util.Arrays) Schema(org.apache.flink.table.api.Schema) TableDescriptor(org.apache.flink.table.api.TableDescriptor) SinkV1Adapter(org.apache.flink.streaming.api.transformations.SinkV1Adapter) SharedObjects(org.apache.flink.testutils.junit.SharedObjects) Assertions.assertThat(org.assertj.core.api.Assertions.assertThat) ExplainDetail(org.apache.flink.table.api.ExplainDetail) ExceptionUtils(org.apache.flink.util.ExceptionUtils) TestSink(org.apache.flink.streaming.runtime.operators.sink.TestSink) Parameterized(org.junit.runners.Parameterized) AbstractTestBase(org.apache.flink.test.util.AbstractTestBase) TableFactoryHarness(org.apache.flink.table.planner.factories.TableFactoryHarness) DynamicTableSource(org.apache.flink.table.connector.source.DynamicTableSource) SinkFunction(org.apache.flink.streaming.api.functions.sink.SinkFunction) DynamicTableSink(org.apache.flink.table.connector.sink.DynamicTableSink) Collection(java.util.Collection) Table(org.apache.flink.table.api.Table) Instant(java.time.Instant) Collectors(java.util.stream.Collectors) List(java.util.List) DataStreamSinkProvider(org.apache.flink.table.connector.sink.DataStreamSinkProvider) ValidationException(org.apache.flink.table.api.ValidationException) TableResult(org.apache.flink.table.api.TableResult) Row(org.apache.flink.types.Row) NotNull(org.jetbrains.annotations.NotNull) StreamExecutionEnvironment(org.apache.flink.streaming.api.environment.StreamExecutionEnvironment) Assert.assertThrows(org.junit.Assert.assertThrows) RunWith(org.junit.runner.RunWith) Watermark(org.apache.flink.streaming.api.watermark.Watermark) ScanTableSource(org.apache.flink.table.connector.source.ScanTableSource) ArrayList(java.util.ArrayList) SinkV2Provider(org.apache.flink.table.connector.sink.SinkV2Provider) TABLE_EXEC_SINK_TYPE_LENGTH_ENFORCER(org.apache.flink.table.api.config.ExecutionConfigOptions.TABLE_EXEC_SINK_TYPE_LENGTH_ENFORCER) SourceFunction(org.apache.flink.streaming.api.functions.source.SourceFunction) TABLE_EXEC_SINK_NOT_NULL_ENFORCER(org.apache.flink.table.api.config.ExecutionConfigOptions.TABLE_EXEC_SINK_NOT_NULL_ENFORCER) SourceFunctionProvider(org.apache.flink.table.connector.source.SourceFunctionProvider) SharedReference(org.apache.flink.testutils.junit.SharedReference) INT(org.apache.flink.table.api.DataTypes.INT) Before(org.junit.Before) RowData(org.apache.flink.table.data.RowData) DataTypes(org.apache.flink.table.api.DataTypes) SinkProvider(org.apache.flink.table.connector.sink.SinkProvider) Test(org.junit.Test) ExecutionException(java.util.concurrent.ExecutionException) Rule(org.junit.Rule) ExecutionConfigOptions(org.apache.flink.table.api.config.ExecutionConfigOptions) Collections(java.util.Collections) Assert.assertEquals(org.junit.Assert.assertEquals) TableDescriptor(org.apache.flink.table.api.TableDescriptor) RowData(org.apache.flink.table.data.RowData) SinkFunction(org.apache.flink.streaming.api.functions.sink.SinkFunction) List(java.util.List) ArrayList(java.util.ArrayList) StreamTableEnvironment(org.apache.flink.table.api.bridge.java.StreamTableEnvironment) Row(org.apache.flink.types.Row) Test(org.junit.Test)

Aggregations

StreamExecutionEnvironment (org.apache.flink.streaming.api.environment.StreamExecutionEnvironment)5 SharedObjects (org.apache.flink.testutils.junit.SharedObjects)5 SharedReference (org.apache.flink.testutils.junit.SharedReference)5 Rule (org.junit.Rule)5 Test (org.junit.Test)5 ArrayList (java.util.ArrayList)4 Collection (java.util.Collection)4 Collections (java.util.Collections)4 Collectors (java.util.stream.Collectors)4 SinkFunction (org.apache.flink.streaming.api.functions.sink.SinkFunction)4 SourceFunction (org.apache.flink.streaming.api.functions.source.SourceFunction)4 Arrays (java.util.Arrays)3 List (java.util.List)3 ExecutionException (java.util.concurrent.ExecutionException)3 Assert.assertEquals (org.junit.Assert.assertEquals)3 Before (org.junit.Before)3 Instant (java.time.Instant)2 CommonTestUtils (org.apache.flink.runtime.testutils.CommonTestUtils)2 SinkV1Adapter (org.apache.flink.streaming.api.transformations.SinkV1Adapter)2 Watermark (org.apache.flink.streaming.api.watermark.Watermark)2