Search in sources :

Example 1 with CheckedThread

use of org.apache.flink.core.testutils.CheckedThread in project flink by apache.

the class FlinkKafkaProducerBaseTest method testAtLeastOnceProducer.

/**
 * Test ensuring that the producer is not dropping buffered records; we set a timeout because
 * the test will not finish if the logic is broken.
 */
@SuppressWarnings("unchecked")
@Test(timeout = 10000)
public void testAtLeastOnceProducer() throws Throwable {
    final DummyFlinkKafkaProducer<String> producer = new DummyFlinkKafkaProducer<>(FakeStandardProducerConfig.get(), new KeyedSerializationSchemaWrapper<>(new SimpleStringSchema()), null);
    producer.setFlushOnCheckpoint(true);
    final KafkaProducer<?, ?> mockProducer = producer.getMockKafkaProducer();
    final OneInputStreamOperatorTestHarness<String, Object> testHarness = new OneInputStreamOperatorTestHarness<>(new StreamSink<>(producer));
    testHarness.open();
    testHarness.processElement(new StreamRecord<>("msg-1"));
    testHarness.processElement(new StreamRecord<>("msg-2"));
    testHarness.processElement(new StreamRecord<>("msg-3"));
    verify(mockProducer, times(3)).send(any(ProducerRecord.class), any(Callback.class));
    Assert.assertEquals(3, producer.getPendingSize());
    // start a thread to perform checkpointing
    CheckedThread snapshotThread = new CheckedThread() {

        @Override
        public void go() throws Exception {
            // this should block until all records are flushed;
            // if the snapshot implementation returns before pending records are
            // flushed,
            testHarness.snapshot(123L, 123L);
        }
    };
    snapshotThread.start();
    // before proceeding, make sure that flushing has started and that the snapshot is still
    // blocked;
    // this would block forever if the snapshot didn't perform a flush
    producer.waitUntilFlushStarted();
    Assert.assertTrue("Snapshot returned before all records were flushed", snapshotThread.isAlive());
    // now, complete the callbacks
    producer.getPendingCallbacks().get(0).onCompletion(null, null);
    Assert.assertTrue("Snapshot returned before all records were flushed", snapshotThread.isAlive());
    Assert.assertEquals(2, producer.getPendingSize());
    producer.getPendingCallbacks().get(1).onCompletion(null, null);
    Assert.assertTrue("Snapshot returned before all records were flushed", snapshotThread.isAlive());
    Assert.assertEquals(1, producer.getPendingSize());
    producer.getPendingCallbacks().get(2).onCompletion(null, null);
    Assert.assertEquals(0, producer.getPendingSize());
    // this would fail with an exception if flushing wasn't completed before the snapshot method
    // returned
    snapshotThread.sync();
    testHarness.close();
}
Also used : Mockito.anyString(org.mockito.Mockito.anyString) OneInputStreamOperatorTestHarness(org.apache.flink.streaming.util.OneInputStreamOperatorTestHarness) CheckedThread(org.apache.flink.core.testutils.CheckedThread) Callback(org.apache.kafka.clients.producer.Callback) ProducerRecord(org.apache.kafka.clients.producer.ProducerRecord) SimpleStringSchema(org.apache.flink.api.common.serialization.SimpleStringSchema) Test(org.junit.Test)

Example 2 with CheckedThread

use of org.apache.flink.core.testutils.CheckedThread in project flink by apache.

the class FlinkKafkaProducerBaseTest method testAsyncErrorRethrownOnCheckpointAfterFlush.

/**
 * Test ensuring that if an async exception is caught for one of the flushed requests on
 * checkpoint, it should be rethrown; we set a timeout because the test will not finish if the
 * logic is broken.
 *
 * <p>Note that this test does not test the snapshot method is blocked correctly when there are
 * pending records. The test for that is covered in testAtLeastOnceProducer.
 */
@SuppressWarnings("unchecked")
@Test(timeout = 5000)
public void testAsyncErrorRethrownOnCheckpointAfterFlush() throws Throwable {
    final DummyFlinkKafkaProducer<String> producer = new DummyFlinkKafkaProducer<>(FakeStandardProducerConfig.get(), new KeyedSerializationSchemaWrapper<>(new SimpleStringSchema()), null);
    producer.setFlushOnCheckpoint(true);
    final KafkaProducer<?, ?> mockProducer = producer.getMockKafkaProducer();
    final OneInputStreamOperatorTestHarness<String, Object> testHarness = new OneInputStreamOperatorTestHarness<>(new StreamSink<>(producer));
    testHarness.open();
    testHarness.processElement(new StreamRecord<>("msg-1"));
    testHarness.processElement(new StreamRecord<>("msg-2"));
    testHarness.processElement(new StreamRecord<>("msg-3"));
    verify(mockProducer, times(3)).send(any(ProducerRecord.class), any(Callback.class));
    // only let the first callback succeed for now
    producer.getPendingCallbacks().get(0).onCompletion(null, null);
    CheckedThread snapshotThread = new CheckedThread() {

        @Override
        public void go() throws Exception {
            // this should block at first, since there are still two pending records
            // that needs to be flushed
            testHarness.snapshot(123L, 123L);
        }
    };
    snapshotThread.start();
    // let the 2nd message fail with an async exception
    producer.getPendingCallbacks().get(1).onCompletion(null, new Exception("artificial async failure for 2nd message"));
    producer.getPendingCallbacks().get(2).onCompletion(null, null);
    try {
        snapshotThread.sync();
    } catch (Exception e) {
        // the snapshot should have failed with the async exception
        Assert.assertTrue(e.getCause().getMessage().contains("artificial async failure for 2nd message"));
        // test succeeded
        return;
    }
    Assert.fail();
}
Also used : Mockito.anyString(org.mockito.Mockito.anyString) OneInputStreamOperatorTestHarness(org.apache.flink.streaming.util.OneInputStreamOperatorTestHarness) CheckedThread(org.apache.flink.core.testutils.CheckedThread) Callback(org.apache.kafka.clients.producer.Callback) ProducerRecord(org.apache.kafka.clients.producer.ProducerRecord) SimpleStringSchema(org.apache.flink.api.common.serialization.SimpleStringSchema) Test(org.junit.Test)

Example 3 with CheckedThread

use of org.apache.flink.core.testutils.CheckedThread in project flink by apache.

the class AbstractFetcherTest method testConcurrentPartitionsDiscoveryAndLoopFetching.

@Test
public void testConcurrentPartitionsDiscoveryAndLoopFetching() throws Exception {
    // test data
    final KafkaTopicPartition testPartition = new KafkaTopicPartition("test", 42);
    // ----- create the test fetcher -----
    SourceContext<String> sourceContext = new TestSourceContext<>();
    Map<KafkaTopicPartition, Long> partitionsWithInitialOffsets = Collections.singletonMap(testPartition, KafkaTopicPartitionStateSentinel.GROUP_OFFSET);
    final OneShotLatch fetchLoopWaitLatch = new OneShotLatch();
    final OneShotLatch stateIterationBlockLatch = new OneShotLatch();
    final TestFetcher<String> fetcher = new TestFetcher<>(sourceContext, partitionsWithInitialOffsets, null, /* watermark strategy */
    new TestProcessingTimeService(), 10, fetchLoopWaitLatch, stateIterationBlockLatch);
    // ----- run the fetcher -----
    final CheckedThread checkedThread = new CheckedThread() {

        @Override
        public void go() throws Exception {
            fetcher.runFetchLoop();
        }
    };
    checkedThread.start();
    // wait until state iteration begins before adding discovered partitions
    fetchLoopWaitLatch.await();
    fetcher.addDiscoveredPartitions(Collections.singletonList(testPartition));
    stateIterationBlockLatch.trigger();
    checkedThread.sync();
}
Also used : TestSourceContext(org.apache.flink.streaming.connectors.kafka.testutils.TestSourceContext) OneShotLatch(org.apache.flink.core.testutils.OneShotLatch) TestProcessingTimeService(org.apache.flink.streaming.runtime.tasks.TestProcessingTimeService) CheckedThread(org.apache.flink.core.testutils.CheckedThread) Test(org.junit.Test)

Example 4 with CheckedThread

use of org.apache.flink.core.testutils.CheckedThread in project flink by apache.

the class LimitedConnectionsFileSystemTest method testLimitingMixedStreams.

@Test
public void testLimitingMixedStreams() throws Exception {
    final int maxConcurrentOpen = 2;
    final int numThreads = 61;
    final LimitedConnectionsFileSystem limitedFs = new LimitedConnectionsFileSystem(LocalFileSystem.getSharedInstance(), // limited total
    maxConcurrentOpen);
    final Random rnd = new Random();
    final CheckedThread[] threads = new CheckedThread[numThreads];
    for (int i = 0; i < numThreads; i++) {
        File file = tempFolder.newFile();
        Path path = new Path(file.toURI());
        if (rnd.nextBoolean()) {
            // reader thread
            createRandomContents(file, rnd);
            threads[i] = new ReaderThread(limitedFs, path, Integer.MAX_VALUE, maxConcurrentOpen);
        } else {
            threads[i] = new WriterThread(limitedFs, path, Integer.MAX_VALUE, maxConcurrentOpen);
        }
    }
    for (CheckedThread t : threads) {
        t.start();
    }
    for (CheckedThread t : threads) {
        t.sync();
    }
}
Also used : Random(java.util.Random) CheckedThread(org.apache.flink.core.testutils.CheckedThread) File(java.io.File) Test(org.junit.Test)

Example 5 with CheckedThread

use of org.apache.flink.core.testutils.CheckedThread in project flink by apache.

the class LimitedConnectionsFileSystemTest method testTerminateStalledMixedStreams.

@Test
public void testTerminateStalledMixedStreams() throws Exception {
    final int maxConcurrentOpen = 2;
    final int numThreads = 20;
    final LimitedConnectionsFileSystem limitedFs = new LimitedConnectionsFileSystem(LocalFileSystem.getSharedInstance(), // limited total
    maxConcurrentOpen, // no opening timeout
    0L, // inactivity timeout of 50 ms
    50L);
    final Random rnd = new Random();
    final CheckedThread[] threads = new CheckedThread[numThreads];
    final BlockingThread[] blockers = new BlockingThread[numThreads];
    for (int i = 0; i < numThreads; i++) {
        File file1 = tempFolder.newFile();
        File file2 = tempFolder.newFile();
        Path path1 = new Path(file1.toURI());
        Path path2 = new Path(file2.toURI());
        if (rnd.nextBoolean()) {
            createRandomContents(file1, rnd);
            createRandomContents(file2, rnd);
            threads[i] = new ReaderThread(limitedFs, path1, maxConcurrentOpen, Integer.MAX_VALUE);
            blockers[i] = new BlockingReaderThread(limitedFs, path2, maxConcurrentOpen, Integer.MAX_VALUE);
        } else {
            threads[i] = new WriterThread(limitedFs, path1, maxConcurrentOpen, Integer.MAX_VALUE);
            blockers[i] = new BlockingWriterThread(limitedFs, path2, maxConcurrentOpen, Integer.MAX_VALUE);
        }
    }
    // start normal and blocker threads
    for (int i = 0; i < numThreads; i++) {
        blockers[i].start();
        threads[i].start();
    }
    // the blockers eventually time out
    for (CheckedThread t : threads) {
        try {
            t.sync();
        } catch (LimitedConnectionsFileSystem.StreamTimeoutException e) {
        // also the regular threads may occasionally get a timeout on
        // slower test machines because we set very aggressive timeouts
        // to reduce the test time
        }
    }
    // unblock all the blocking threads
    for (BlockingThread t : blockers) {
        t.wakeup();
    }
    for (BlockingThread t : blockers) {
        try {
            t.sync();
        } catch (LimitedConnectionsFileSystem.StreamTimeoutException ignored) {
        }
    }
}
Also used : CheckedThread(org.apache.flink.core.testutils.CheckedThread) Random(java.util.Random) File(java.io.File) Test(org.junit.Test)

Aggregations

CheckedThread (org.apache.flink.core.testutils.CheckedThread)45 Test (org.junit.Test)41 SimpleStringSchema (org.apache.flink.api.common.serialization.SimpleStringSchema)12 HashMap (java.util.HashMap)8 LinkedList (java.util.LinkedList)8 OneInputStreamOperatorTestHarness (org.apache.flink.streaming.util.OneInputStreamOperatorTestHarness)8 TestableKinesisDataFetcher (org.apache.flink.streaming.connectors.kinesis.testutils.TestableKinesisDataFetcher)7 KinesisStreamShardState (org.apache.flink.streaming.connectors.kinesis.model.KinesisStreamShardState)6 IOException (java.io.IOException)5 Map (java.util.Map)5 CountDownLatch (java.util.concurrent.CountDownLatch)5 OneShotLatch (org.apache.flink.core.testutils.OneShotLatch)5 SequenceNumber (org.apache.flink.streaming.connectors.kinesis.model.SequenceNumber)5 Shard (com.amazonaws.services.kinesis.model.Shard)4 File (java.io.File)4 Random (java.util.Random)4 UserRecordResult (com.amazonaws.services.kinesis.producer.UserRecordResult)3 ArrayList (java.util.ArrayList)3 CompletableFuture (java.util.concurrent.CompletableFuture)3 Configuration (org.apache.flink.configuration.Configuration)3