Search in sources :

Example 76 with OneShotLatch

use of org.apache.flink.core.testutils.OneShotLatch in project flink by apache.

the class CliFrontendStopWithSavepointTest method testStopWithMaxWMAndDefaultSavepointDir.

@Test
public void testStopWithMaxWMAndDefaultSavepointDir() throws Exception {
    JobID jid = new JobID();
    String[] parameters = { "-p", "-d", jid.toString() };
    OneShotLatch stopWithSavepointLatch = new OneShotLatch();
    TestingClusterClient<String> clusterClient = new TestingClusterClient<>();
    clusterClient.setStopWithSavepointFunction((jobID, advanceToEndOfEventTime, savepointDirectory, formatType) -> {
        assertThat(jobID, is(jid));
        assertThat(advanceToEndOfEventTime, is(true));
        assertNull(savepointDirectory);
        stopWithSavepointLatch.trigger();
        return CompletableFuture.completedFuture(savepointDirectory);
    });
    MockedCliFrontend testFrontend = new MockedCliFrontend(clusterClient);
    testFrontend.stop(parameters);
    stopWithSavepointLatch.await();
}
Also used : MockedCliFrontend(org.apache.flink.client.cli.util.MockedCliFrontend) TestingClusterClient(org.apache.flink.client.program.TestingClusterClient) OneShotLatch(org.apache.flink.core.testutils.OneShotLatch) JobID(org.apache.flink.api.common.JobID) Test(org.junit.Test)

Example 77 with OneShotLatch

use of org.apache.flink.core.testutils.OneShotLatch in project flink by apache.

the class InitOutputPathTest method runTest.

private void runTest(final boolean useAwaits) throws Exception {
    final File tempFile = tempDir.newFile();
    final Path path1 = new Path(tempFile.getAbsolutePath(), "1");
    final Path path2 = new Path(tempFile.getAbsolutePath(), "2");
    final OneShotLatch deleteAwaitLatch1 = new OneShotLatch();
    final OneShotLatch deleteAwaitLatch2 = new OneShotLatch();
    final OneShotLatch mkdirsAwaitLatch1 = new OneShotLatch();
    final OneShotLatch mkdirsAwaitLatch2 = new OneShotLatch();
    final OneShotLatch deleteTriggerLatch1 = new OneShotLatch();
    final OneShotLatch deletetriggerLatch2 = new OneShotLatch();
    final OneShotLatch mkdirsTriggerLatch1 = new OneShotLatch();
    final OneShotLatch mkdirsTriggerLatch2 = new OneShotLatch();
    final OneShotLatch createAwaitLatch = new OneShotLatch();
    final OneShotLatch createTriggerLatch = new OneShotLatch();
    // this "new LocalDataOutputStream()" is in the end called by the async threads
    whenNew(LocalDataOutputStream.class).withAnyArguments().thenAnswer(new Answer<LocalDataOutputStream>() {

        @Override
        public LocalDataOutputStream answer(InvocationOnMock invocation) throws Throwable {
            createAwaitLatch.trigger();
            createTriggerLatch.await();
            final File file = (File) invocation.getArguments()[0];
            return new LocalDataOutputStream(file);
        }
    });
    final LocalFileSystem fs1 = new SyncedFileSystem(deleteAwaitLatch1, mkdirsAwaitLatch1, deleteTriggerLatch1, mkdirsTriggerLatch1);
    final LocalFileSystem fs2 = new SyncedFileSystem(deleteAwaitLatch2, mkdirsAwaitLatch2, deletetriggerLatch2, mkdirsTriggerLatch2);
    // start the concurrent file creators
    FileCreator thread1 = new FileCreator(fs1, path1);
    FileCreator thread2 = new FileCreator(fs2, path2);
    thread1.start();
    thread2.start();
    // wait until they both decide to delete the directory
    if (useAwaits) {
        deleteAwaitLatch1.await();
        deleteAwaitLatch2.await();
    } else {
        Thread.sleep(5);
    }
    // now send off #1 to delete the directory (it will pass the 'mkdirs' fast) and wait to
    // create the file
    mkdirsTriggerLatch1.trigger();
    deleteTriggerLatch1.trigger();
    if (useAwaits) {
        createAwaitLatch.await();
    } else {
        // this needs a bit more sleep time, because here mockito is working
        Thread.sleep(100);
    }
    // now send off #2 to delete the directory - it waits at 'mkdirs'
    deletetriggerLatch2.trigger();
    if (useAwaits) {
        mkdirsAwaitLatch2.await();
    } else {
        Thread.sleep(5);
    }
    // let #1 try to create the file and see if it succeeded
    createTriggerLatch.trigger();
    if (useAwaits) {
        thread1.sync();
    } else {
        Thread.sleep(5);
    }
    // now let #1 finish up
    mkdirsTriggerLatch2.trigger();
    thread1.sync();
    thread2.sync();
}
Also used : LocalDataOutputStream(org.apache.flink.core.fs.local.LocalDataOutputStream) InvocationOnMock(org.mockito.invocation.InvocationOnMock) LocalFileSystem(org.apache.flink.core.fs.local.LocalFileSystem) OneShotLatch(org.apache.flink.core.testutils.OneShotLatch) File(java.io.File)

Example 78 with OneShotLatch

use of org.apache.flink.core.testutils.OneShotLatch in project flink by apache.

the class ContinuousFileProcessingTest method testReaderSnapshotRestore.

@Test
public void testReaderSnapshotRestore() throws Exception {
    String testBasePath = hdfsURI + "/" + UUID.randomUUID() + "/";
    TimestampedFileInputSplit split1 = new TimestampedFileInputSplit(0, 3, new Path("test/test1"), 0, 100, null);
    TimestampedFileInputSplit split2 = new TimestampedFileInputSplit(10, 2, new Path("test/test2"), 101, 200, null);
    TimestampedFileInputSplit split3 = new TimestampedFileInputSplit(10, 1, new Path("test/test2"), 0, 100, null);
    TimestampedFileInputSplit split4 = new TimestampedFileInputSplit(11, 0, new Path("test/test3"), 0, 100, null);
    final OneShotLatch latch = new OneShotLatch();
    BlockingFileInputFormat format = new BlockingFileInputFormat(latch, new Path(testBasePath));
    TypeInformation<FileInputSplit> typeInfo = TypeExtractor.getInputFormatTypes(format);
    OneInputStreamOperatorTestHarness<TimestampedFileInputSplit, FileInputSplit> initTestInstance = createHarness(format);
    initTestInstance.setTimeCharacteristic(TimeCharacteristic.EventTime);
    initTestInstance.open();
    // create some state in the reader
    initTestInstance.processElement(new StreamRecord<>(split1));
    initTestInstance.processElement(new StreamRecord<>(split2));
    initTestInstance.processElement(new StreamRecord<>(split3));
    initTestInstance.processElement(new StreamRecord<>(split4));
    // take a snapshot of the operator's state. This will be used
    // to initialize another reader and compare the results of the
    // two operators.
    final OperatorSubtaskState snapshot;
    synchronized (initTestInstance.getCheckpointLock()) {
        snapshot = initTestInstance.snapshot(0L, 0L);
    }
    OneInputStreamOperatorTestHarness<TimestampedFileInputSplit, FileInputSplit> restoredTestInstance = createHarness(new BlockingFileInputFormat(latch, new Path(testBasePath)));
    restoredTestInstance.setTimeCharacteristic(TimeCharacteristic.EventTime);
    restoredTestInstance.initializeState(snapshot);
    restoredTestInstance.open();
    // now let computation start
    latch.trigger();
    synchronized (initTestInstance.getCheckpointLock()) {
        initTestInstance.close();
    }
    synchronized (restoredTestInstance.getCheckpointLock()) {
        restoredTestInstance.close();
    }
    FileInputSplit fsSplit1 = createSplitFromTimestampedSplit(split1);
    FileInputSplit fsSplit2 = createSplitFromTimestampedSplit(split2);
    FileInputSplit fsSplit3 = createSplitFromTimestampedSplit(split3);
    FileInputSplit fsSplit4 = createSplitFromTimestampedSplit(split4);
    // compare if the results contain what they should contain and also if
    // they are the same, as they should.
    Assert.assertTrue(initTestInstance.getOutput().contains(new StreamRecord<>(fsSplit1)));
    Assert.assertTrue(initTestInstance.getOutput().contains(new StreamRecord<>(fsSplit2)));
    Assert.assertTrue(initTestInstance.getOutput().contains(new StreamRecord<>(fsSplit3)));
    Assert.assertTrue(initTestInstance.getOutput().contains(new StreamRecord<>(fsSplit4)));
    Assert.assertArrayEquals(initTestInstance.getOutput().toArray(), restoredTestInstance.getOutput().toArray());
}
Also used : Path(org.apache.flink.core.fs.Path) TimestampedFileInputSplit(org.apache.flink.streaming.api.functions.source.TimestampedFileInputSplit) FileInputSplit(org.apache.flink.core.fs.FileInputSplit) StreamRecord(org.apache.flink.streaming.runtime.streamrecord.StreamRecord) TimestampedFileInputSplit(org.apache.flink.streaming.api.functions.source.TimestampedFileInputSplit) OneShotLatch(org.apache.flink.core.testutils.OneShotLatch) OperatorSubtaskState(org.apache.flink.runtime.checkpoint.OperatorSubtaskState) Test(org.junit.Test)

Example 79 with OneShotLatch

use of org.apache.flink.core.testutils.OneShotLatch in project flink by apache.

the class ContinuousFileProcessingTest method testProcessContinuously.

@Test
public void testProcessContinuously() throws Exception {
    String testBasePath = hdfsURI + "/" + UUID.randomUUID() + "/";
    final OneShotLatch latch = new OneShotLatch();
    // create a single file in the directory
    Tuple2<org.apache.hadoop.fs.Path, String> bootstrap = createFileAndFillWithData(testBasePath, "file", NO_OF_FILES + 1, "This is test line.");
    Assert.assertTrue(hdfs.exists(bootstrap.f0));
    final Set<String> filesToBeRead = new TreeSet<>();
    filesToBeRead.add(bootstrap.f0.getName());
    TextInputFormat format = new TextInputFormat(new Path(testBasePath));
    format.setFilesFilter(FilePathFilter.createDefaultFilter());
    final ContinuousFileMonitoringFunction<String> monitoringFunction = createTestContinuousFileMonitoringFunction(format, FileProcessingMode.PROCESS_CONTINUOUSLY);
    // 1 for the bootstrap + NO_OF_FILES
    final int totalNoOfFilesToBeRead = NO_OF_FILES + 1;
    final FileVerifyingSourceContext context = new FileVerifyingSourceContext(latch, monitoringFunction, 1, totalNoOfFilesToBeRead);
    final Thread t = new Thread() {

        @Override
        public void run() {
            try {
                monitoringFunction.open(new Configuration());
                monitoringFunction.run(context);
            } catch (Exception e) {
                Assert.fail(e.getMessage());
            }
        }
    };
    t.start();
    if (!latch.isTriggered()) {
        latch.await();
    }
    // create some additional files that will be processed in the case of PROCESS_CONTINUOUSLY
    final org.apache.hadoop.fs.Path[] filesCreated = new org.apache.hadoop.fs.Path[NO_OF_FILES];
    for (int i = 0; i < NO_OF_FILES; i++) {
        Tuple2<org.apache.hadoop.fs.Path, String> file = createFileAndFillWithData(testBasePath, "file", i, "This is test line.");
        filesCreated[i] = file.f0;
        filesToBeRead.add(file.f0.getName());
    }
    // wait until the monitoring thread exits
    t.join();
    Assert.assertArrayEquals(filesToBeRead.toArray(), context.getSeenFiles().toArray());
    // finally delete the files created for the test.
    hdfs.delete(bootstrap.f0, false);
    for (org.apache.hadoop.fs.Path path : filesCreated) {
        hdfs.delete(path, false);
    }
}
Also used : Path(org.apache.flink.core.fs.Path) Configuration(org.apache.flink.configuration.Configuration) FileNotFoundException(java.io.FileNotFoundException) RunnableWithException(org.apache.flink.util.function.RunnableWithException) IOException(java.io.IOException) TextInputFormat(org.apache.flink.api.java.io.TextInputFormat) TreeSet(java.util.TreeSet) OneShotLatch(org.apache.flink.core.testutils.OneShotLatch) Test(org.junit.Test)

Example 80 with OneShotLatch

use of org.apache.flink.core.testutils.OneShotLatch in project flink by apache.

the class ContinuousFileProcessingMigrationTest method testReaderRestore.

@Test
public void testReaderRestore() throws Exception {
    File testFolder = tempFolder.newFolder();
    final OneShotLatch latch = new OneShotLatch();
    BlockingFileInputFormat format = new BlockingFileInputFormat(latch, new Path(testFolder.getAbsolutePath()));
    TypeInformation<FileInputSplit> typeInfo = TypeExtractor.getInputFormatTypes(format);
    OneInputStreamOperatorTestHarness<TimestampedFileInputSplit, FileInputSplit> testHarness = createHarness(format);
    testHarness.setTimeCharacteristic(TimeCharacteristic.EventTime);
    testHarness.setup();
    testHarness.initializeState(OperatorSnapshotUtil.getResourceFilename("reader-migration-test-flink" + testMigrateVersion + "-snapshot"));
    testHarness.open();
    latch.trigger();
    synchronized (testHarness.getCheckpointLock()) {
        testHarness.close();
    }
    TimestampedFileInputSplit split1 = new TimestampedFileInputSplit(0, 3, new Path("test/test1"), 0, 100, null);
    TimestampedFileInputSplit split2 = new TimestampedFileInputSplit(10, 2, new Path("test/test2"), 101, 200, null);
    TimestampedFileInputSplit split3 = new TimestampedFileInputSplit(10, 1, new Path("test/test2"), 0, 100, null);
    TimestampedFileInputSplit split4 = new TimestampedFileInputSplit(11, 0, new Path("test/test3"), 0, 100, null);
    // compare if the results contain what they should contain and also if
    // they are the same, as they should.
    Assert.assertTrue(testHarness.getOutput().contains(new StreamRecord<>(split1)));
    Assert.assertTrue(testHarness.getOutput().contains(new StreamRecord<>(split2)));
    Assert.assertTrue(testHarness.getOutput().contains(new StreamRecord<>(split3)));
    Assert.assertTrue(testHarness.getOutput().contains(new StreamRecord<>(split4)));
}
Also used : Path(org.apache.flink.core.fs.Path) FileInputSplit(org.apache.flink.core.fs.FileInputSplit) TimestampedFileInputSplit(org.apache.flink.streaming.api.functions.source.TimestampedFileInputSplit) StreamRecord(org.apache.flink.streaming.runtime.streamrecord.StreamRecord) TimestampedFileInputSplit(org.apache.flink.streaming.api.functions.source.TimestampedFileInputSplit) OneShotLatch(org.apache.flink.core.testutils.OneShotLatch) File(java.io.File) Test(org.junit.Test)

Aggregations

OneShotLatch (org.apache.flink.core.testutils.OneShotLatch)138 Test (org.junit.Test)118 JobID (org.apache.flink.api.common.JobID)41 CompletableFuture (java.util.concurrent.CompletableFuture)38 ExecutionException (java.util.concurrent.ExecutionException)27 Configuration (org.apache.flink.configuration.Configuration)26 IOException (java.io.IOException)24 Before (org.junit.Before)24 FlinkException (org.apache.flink.util.FlinkException)23 TestLogger (org.apache.flink.util.TestLogger)21 File (java.io.File)20 UUID (java.util.UUID)18 TimeoutException (java.util.concurrent.TimeoutException)18 TestingResourceManagerGateway (org.apache.flink.runtime.resourcemanager.utils.TestingResourceManagerGateway)18 Time (org.apache.flink.api.common.time.Time)17 TestingJobMasterGateway (org.apache.flink.runtime.jobmaster.utils.TestingJobMasterGateway)17 Rule (org.junit.Rule)17 Collections (java.util.Collections)16 ArrayBlockingQueue (java.util.concurrent.ArrayBlockingQueue)16 RpcUtils (org.apache.flink.runtime.rpc.RpcUtils)16