Search in sources :

Example 51 with CompletionException

use of java.util.concurrent.CompletionException in project flink by apache.

the class EmbeddedExecutor method submitJob.

private static CompletableFuture<JobID> submitJob(final Configuration configuration, final DispatcherGateway dispatcherGateway, final JobGraph jobGraph, final Time rpcTimeout) {
    checkNotNull(jobGraph);
    LOG.info("Submitting Job with JobId={}.", jobGraph.getJobID());
    return dispatcherGateway.getBlobServerPort(rpcTimeout).thenApply(blobServerPort -> new InetSocketAddress(dispatcherGateway.getHostname(), blobServerPort)).thenCompose(blobServerAddress -> {
        try {
            ClientUtils.extractAndUploadJobGraphFiles(jobGraph, () -> new BlobClient(blobServerAddress, configuration));
        } catch (FlinkException e) {
            throw new CompletionException(e);
        }
        return dispatcherGateway.submitJob(jobGraph, rpcTimeout);
    }).thenApply(ack -> jobGraph.getJobID());
}
Also used : FlinkException(org.apache.flink.util.FlinkException) PipelineExecutorUtils(org.apache.flink.client.deployment.executors.PipelineExecutorUtils) Pipeline(org.apache.flink.api.dag.Pipeline) JobGraph(org.apache.flink.runtime.jobgraph.JobGraph) LoggerFactory(org.slf4j.LoggerFactory) CompletableFuture(java.util.concurrent.CompletableFuture) DispatcherGateway(org.apache.flink.runtime.dispatcher.DispatcherGateway) PipelineExecutor(org.apache.flink.core.execution.PipelineExecutor) PipelineOptionsInternal(org.apache.flink.configuration.PipelineOptionsInternal) FunctionUtils(org.apache.flink.util.function.FunctionUtils) Preconditions.checkNotNull(org.apache.flink.util.Preconditions.checkNotNull) ClientOptions(org.apache.flink.client.cli.ClientOptions) Logger(org.slf4j.Logger) MalformedURLException(java.net.MalformedURLException) Collection(java.util.Collection) Configuration(org.apache.flink.configuration.Configuration) CompletionException(java.util.concurrent.CompletionException) ClientUtils(org.apache.flink.runtime.client.ClientUtils) InetSocketAddress(java.net.InetSocketAddress) JobClient(org.apache.flink.core.execution.JobClient) JobID(org.apache.flink.api.common.JobID) Optional(java.util.Optional) Internal(org.apache.flink.annotation.Internal) BlobClient(org.apache.flink.runtime.blob.BlobClient) Time(org.apache.flink.api.common.time.Time) BlobClient(org.apache.flink.runtime.blob.BlobClient) InetSocketAddress(java.net.InetSocketAddress) CompletionException(java.util.concurrent.CompletionException) FlinkException(org.apache.flink.util.FlinkException)

Example 52 with CompletionException

use of java.util.concurrent.CompletionException in project flink by apache.

the class BlobServerPutTest method testConcurrentPutOperations.

/**
 * [FLINK-6020] Tests that concurrent put operations will only upload the file once to the
 * {@link BlobStore} and that the files are not corrupt at any time.
 *
 * @param jobId job ID to use (or <tt>null</tt> if job-unrelated)
 * @param blobType whether the BLOB should become permanent or transient
 */
private void testConcurrentPutOperations(@Nullable final JobID jobId, final BlobKey.BlobType blobType) throws IOException, InterruptedException, ExecutionException {
    final Configuration config = new Configuration();
    final int concurrentPutOperations = 2;
    final int dataSize = 1024;
    Collection<BlobKey> persistedBlobs = ConcurrentHashMap.newKeySet();
    TestingBlobStore blobStore = new TestingBlobStoreBuilder().setPutFunction((file, jobID, blobKey) -> {
        persistedBlobs.add(blobKey);
        return true;
    }).createTestingBlobStore();
    final CountDownLatch countDownLatch = new CountDownLatch(concurrentPutOperations);
    final byte[] data = new byte[dataSize];
    ArrayList<CompletableFuture<BlobKey>> allFutures = new ArrayList<>(concurrentPutOperations);
    ExecutorService executor = Executors.newFixedThreadPool(concurrentPutOperations);
    try (final BlobServer server = new BlobServer(config, temporaryFolder.newFolder(), blobStore)) {
        server.start();
        for (int i = 0; i < concurrentPutOperations; i++) {
            CompletableFuture<BlobKey> putFuture = CompletableFuture.supplyAsync(() -> {
                try {
                    BlockingInputStream inputStream = new BlockingInputStream(countDownLatch, data);
                    BlobKey uploadedKey = put(server, jobId, inputStream, blobType);
                    // check the uploaded file's contents (concurrently)
                    verifyContents(server, jobId, uploadedKey, data);
                    return uploadedKey;
                } catch (IOException e) {
                    throw new CompletionException(new FlinkException("Could not upload blob.", e));
                }
            }, executor);
            allFutures.add(putFuture);
        }
        FutureUtils.ConjunctFuture<Collection<BlobKey>> conjunctFuture = FutureUtils.combineAll(allFutures);
        // wait until all operations have completed and check that no exception was thrown
        Collection<BlobKey> blobKeys = conjunctFuture.get();
        Iterator<BlobKey> blobKeyIterator = blobKeys.iterator();
        assertTrue(blobKeyIterator.hasNext());
        BlobKey blobKey = blobKeyIterator.next();
        // make sure that all blob keys are the same
        while (blobKeyIterator.hasNext()) {
            verifyKeyDifferentHashEquals(blobKey, blobKeyIterator.next());
        }
        // check the uploaded file's contents
        verifyContents(server, jobId, blobKey, data);
        // check that we only uploaded the file once to the blob store
        if (blobType == PERMANENT_BLOB) {
            assertThat(persistedBlobs).hasSameElementsAs(blobKeys);
        } else {
            // can't really verify much in the other cases other than that the put operations
            // should
            // work and not corrupt files
            assertThat(persistedBlobs).isEmpty();
        }
    } finally {
        executor.shutdownNow();
    }
}
Also used : Arrays(java.util.Arrays) Assertions.assertThat(org.assertj.core.api.Assertions.assertThat) Random(java.util.Random) ByteArrayInputStream(java.io.ByteArrayInputStream) Path(org.apache.flink.core.fs.Path) TestLogger(org.apache.flink.util.TestLogger) Assert.fail(org.junit.Assert.fail) TRANSIENT_BLOB(org.apache.flink.runtime.blob.BlobKey.BlobType.TRANSIENT_BLOB) Collection(java.util.Collection) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) CompletionException(java.util.concurrent.CompletionException) Preconditions(org.apache.flink.util.Preconditions) InetSocketAddress(java.net.InetSocketAddress) Executors(java.util.concurrent.Executors) CountDownLatch(java.util.concurrent.CountDownLatch) List(java.util.List) Assume.assumeTrue(org.junit.Assume.assumeTrue) AccessDeniedException(java.nio.file.AccessDeniedException) FlinkException(org.apache.flink.util.FlinkException) BlobClientTest.validateGetAndClose(org.apache.flink.runtime.blob.BlobClientTest.validateGetAndClose) CompletableFuture(java.util.concurrent.CompletableFuture) ArrayList(java.util.ArrayList) BlobServerGetTest.get(org.apache.flink.runtime.blob.BlobServerGetTest.get) CheckedThread(org.apache.flink.core.testutils.CheckedThread) FutureUtils(org.apache.flink.util.concurrent.FutureUtils) Assert.assertArrayEquals(org.junit.Assert.assertArrayEquals) ExpectedException(org.junit.rules.ExpectedException) Nullable(javax.annotation.Nullable) ExecutorService(java.util.concurrent.ExecutorService) Iterator(java.util.Iterator) Files(java.nio.file.Files) Assert.assertNotNull(org.junit.Assert.assertNotNull) Configuration(org.apache.flink.configuration.Configuration) Assert.assertTrue(org.junit.Assert.assertTrue) FileUtils(org.apache.commons.io.FileUtils) Test(org.junit.Test) IOException(java.io.IOException) FileInputStream(java.io.FileInputStream) BlobKeyTest.verifyKeyDifferentHashEquals(org.apache.flink.runtime.blob.BlobKeyTest.verifyKeyDifferentHashEquals) OperatingSystem(org.apache.flink.util.OperatingSystem) File(java.io.File) ExecutionException(java.util.concurrent.ExecutionException) JobID(org.apache.flink.api.common.JobID) Rule(org.junit.Rule) Collections(java.util.Collections) TemporaryFolder(org.junit.rules.TemporaryFolder) PERMANENT_BLOB(org.apache.flink.runtime.blob.BlobKey.BlobType.PERMANENT_BLOB) Assert.assertEquals(org.junit.Assert.assertEquals) InputStream(java.io.InputStream) Configuration(org.apache.flink.configuration.Configuration) FutureUtils(org.apache.flink.util.concurrent.FutureUtils) ArrayList(java.util.ArrayList) IOException(java.io.IOException) CountDownLatch(java.util.concurrent.CountDownLatch) FlinkException(org.apache.flink.util.FlinkException) CompletableFuture(java.util.concurrent.CompletableFuture) CompletionException(java.util.concurrent.CompletionException) ExecutorService(java.util.concurrent.ExecutorService) Collection(java.util.Collection)

Example 53 with CompletionException

use of java.util.concurrent.CompletionException in project flink by apache.

the class BlobServerDeleteTest method testConcurrentDeleteOperations.

/**
 * [FLINK-6020] Tests that concurrent delete operations don't interfere with each other.
 *
 * <p>Note: This test checks that there cannot be two threads which have checked whether a given
 * blob file exist and then one of them fails deleting it. Without the introduced lock, this
 * situation should rarely happen and make this test fail. Thus, if this test should become
 * "unstable", then the delete atomicity is most likely broken.
 *
 * @param jobId job ID to use (or <tt>null</tt> if job-unrelated)
 */
private void testConcurrentDeleteOperations(@Nullable final JobID jobId) throws IOException, InterruptedException, ExecutionException {
    final Configuration config = new Configuration();
    final int concurrentDeleteOperations = 3;
    final ExecutorService executor = Executors.newFixedThreadPool(concurrentDeleteOperations);
    final List<CompletableFuture<Void>> deleteFutures = new ArrayList<>(concurrentDeleteOperations);
    final byte[] data = { 1, 2, 3 };
    try (final BlobServer server = new BlobServer(config, temporaryFolder.newFolder(), new VoidBlobStore())) {
        server.start();
        final TransientBlobKey blobKey = (TransientBlobKey) put(server, jobId, data, TRANSIENT_BLOB);
        assertTrue(server.getStorageLocation(jobId, blobKey).exists());
        for (int i = 0; i < concurrentDeleteOperations; i++) {
            CompletableFuture<Void> deleteFuture = CompletableFuture.supplyAsync(() -> {
                try {
                    assertTrue(delete(server, jobId, blobKey));
                    assertFalse(server.getStorageLocation(jobId, blobKey).exists());
                    return null;
                } catch (IOException e) {
                    throw new CompletionException(new FlinkException("Could not delete the given blob key " + blobKey + '.'));
                }
            }, executor);
            deleteFutures.add(deleteFuture);
        }
        CompletableFuture<Void> waitFuture = FutureUtils.waitForAll(deleteFutures);
        // make sure all delete operation have completed successfully
        // in case of no lock, one of the delete operations should eventually fail
        waitFuture.get();
        assertFalse(server.getStorageLocation(jobId, blobKey).exists());
    } finally {
        executor.shutdownNow();
    }
}
Also used : Configuration(org.apache.flink.configuration.Configuration) ArrayList(java.util.ArrayList) IOException(java.io.IOException) FlinkException(org.apache.flink.util.FlinkException) CompletableFuture(java.util.concurrent.CompletableFuture) CompletionException(java.util.concurrent.CompletionException) ExecutorService(java.util.concurrent.ExecutorService)

Example 54 with CompletionException

use of java.util.concurrent.CompletionException in project flink by apache.

the class BlobCacheDeleteTest method testConcurrentDeleteOperations.

/**
 * [FLINK-6020] Tests that concurrent delete operations don't interfere with each other.
 *
 * <p>Note: This test checks that there cannot be two threads which have checked whether a given
 * blob file exist and then one of them fails deleting it. Without the introduced lock, this
 * situation should rarely happen and make this test fail. Thus, if this test should become
 * "unstable", then the delete atomicity is most likely broken.
 *
 * @param jobId job ID to use (or <tt>null</tt> if job-unrelated)
 */
private void testConcurrentDeleteOperations(@Nullable final JobID jobId) throws IOException, InterruptedException, ExecutionException {
    final Configuration config = new Configuration();
    final int concurrentDeleteOperations = 3;
    final ExecutorService executor = Executors.newFixedThreadPool(concurrentDeleteOperations);
    final List<CompletableFuture<Void>> deleteFutures = new ArrayList<>(concurrentDeleteOperations);
    final byte[] data = { 1, 2, 3 };
    try (BlobServer server = new BlobServer(config, temporaryFolder.newFolder(), new VoidBlobStore());
        BlobCacheService cache = new BlobCacheService(config, temporaryFolder.newFolder(), new VoidBlobStore(), new InetSocketAddress("localhost", server.getPort()))) {
        server.start();
        final TransientBlobKey blobKey = (TransientBlobKey) put(server, jobId, data, TRANSIENT_BLOB);
        assertTrue(server.getStorageLocation(jobId, blobKey).exists());
        for (int i = 0; i < concurrentDeleteOperations; i++) {
            CompletableFuture<Void> deleteFuture = CompletableFuture.supplyAsync(() -> {
                try {
                    assertTrue(delete(cache, jobId, blobKey));
                    assertFalse(cache.getTransientBlobService().getStorageLocation(jobId, blobKey).exists());
                    // delete only works on local cache!
                    assertTrue(server.getStorageLocation(jobId, blobKey).exists());
                    return null;
                } catch (IOException e) {
                    throw new CompletionException(new FlinkException("Could not upload blob.", e));
                }
            }, executor);
            deleteFutures.add(deleteFuture);
        }
        CompletableFuture<Void> waitFuture = FutureUtils.waitForAll(deleteFutures);
        // make sure all delete operation have completed successfully
        // in case of no lock, one of the delete operations should eventually fail
        waitFuture.get();
        // delete only works on local cache!
        assertTrue(server.getStorageLocation(jobId, blobKey).exists());
    } finally {
        executor.shutdownNow();
    }
}
Also used : Configuration(org.apache.flink.configuration.Configuration) InetSocketAddress(java.net.InetSocketAddress) ArrayList(java.util.ArrayList) IOException(java.io.IOException) FlinkException(org.apache.flink.util.FlinkException) CompletableFuture(java.util.concurrent.CompletableFuture) CompletionException(java.util.concurrent.CompletionException) ExecutorService(java.util.concurrent.ExecutorService)

Example 55 with CompletionException

use of java.util.concurrent.CompletionException in project flink by apache.

the class BlobCachePutTest method testConcurrentPutOperations.

/**
 * [FLINK-6020] Tests that concurrent put operations will only upload the file once to the
 * {@link BlobStore} and that the files are not corrupt at any time.
 *
 * @param jobId job ID to use (or <tt>null</tt> if job-unrelated)
 * @param blobType whether the BLOB should become permanent or transient
 */
private void testConcurrentPutOperations(@Nullable final JobID jobId, final BlobKey.BlobType blobType) throws IOException, InterruptedException, ExecutionException {
    final Configuration config = new Configuration();
    final BlobStore blobStoreServer = mock(BlobStore.class);
    final BlobStore blobStoreCache = mock(BlobStore.class);
    int concurrentPutOperations = 2;
    int dataSize = 1024;
    final CountDownLatch countDownLatch = new CountDownLatch(concurrentPutOperations);
    final byte[] data = new byte[dataSize];
    final List<Path> jars;
    if (blobType == PERMANENT_BLOB) {
        // implement via JAR file upload instead:
        File tmpFile = temporaryFolder.newFile();
        FileUtils.writeByteArrayToFile(tmpFile, data);
        jars = Collections.singletonList(new Path(tmpFile.getAbsolutePath()));
    } else {
        jars = null;
    }
    Collection<CompletableFuture<BlobKey>> allFutures = new ArrayList<>(concurrentPutOperations);
    ExecutorService executor = Executors.newFixedThreadPool(concurrentPutOperations);
    try (final BlobServer server = new BlobServer(config, temporaryFolder.newFolder(), blobStoreServer);
        final BlobCacheService cache = new BlobCacheService(config, temporaryFolder.newFolder(), blobStoreCache, new InetSocketAddress("localhost", server.getPort()))) {
        server.start();
        // for highAvailability
        final InetSocketAddress serverAddress = new InetSocketAddress("localhost", server.getPort());
        for (int i = 0; i < concurrentPutOperations; i++) {
            final Supplier<BlobKey> callable;
            if (blobType == PERMANENT_BLOB) {
                // cannot use a blocking stream here (upload only possible via files)
                callable = () -> {
                    try {
                        List<PermanentBlobKey> keys = BlobClient.uploadFiles(serverAddress, config, jobId, jars);
                        assertEquals(1, keys.size());
                        BlobKey uploadedKey = keys.get(0);
                        // check the uploaded file's contents (concurrently)
                        verifyContents(server, jobId, uploadedKey, data);
                        return uploadedKey;
                    } catch (IOException e) {
                        throw new CompletionException(new FlinkException("Could not upload blob.", e));
                    }
                };
            } else {
                callable = () -> {
                    try {
                        BlockingInputStream inputStream = new BlockingInputStream(countDownLatch, data);
                        BlobKey uploadedKey = put(cache, jobId, inputStream, blobType);
                        // check the uploaded file's contents (concurrently)
                        verifyContents(server, jobId, uploadedKey, data);
                        return uploadedKey;
                    } catch (IOException e) {
                        throw new CompletionException(new FlinkException("Could not upload blob.", e));
                    }
                };
            }
            CompletableFuture<BlobKey> putFuture = CompletableFuture.supplyAsync(callable, executor);
            allFutures.add(putFuture);
        }
        FutureUtils.ConjunctFuture<Collection<BlobKey>> conjunctFuture = FutureUtils.combineAll(allFutures);
        // wait until all operations have completed and check that no exception was thrown
        Collection<BlobKey> blobKeys = conjunctFuture.get();
        Iterator<BlobKey> blobKeyIterator = blobKeys.iterator();
        assertTrue(blobKeyIterator.hasNext());
        BlobKey blobKey = blobKeyIterator.next();
        // make sure that all blob keys are the same
        while (blobKeyIterator.hasNext()) {
            // check for unique BlobKey, but should have same hash
            verifyKeyDifferentHashEquals(blobKey, blobKeyIterator.next());
        }
        // check the uploaded file's contents
        verifyContents(server, jobId, blobKey, data);
        // check that we only uploaded the file once to the blob store
        if (blobType == PERMANENT_BLOB) {
            verify(blobStoreServer, times(1)).put(any(File.class), eq(jobId), eq(blobKey));
        } else {
            // can't really verify much in the other cases other than that the put operations
            // should
            // work and not corrupt files
            verify(blobStoreServer, times(0)).put(any(File.class), eq(jobId), eq(blobKey));
        }
        // caches must not access the blob store (they are not allowed to write there)
        verify(blobStoreCache, times(0)).put(any(File.class), eq(jobId), eq(blobKey));
    } finally {
        executor.shutdownNow();
    }
}
Also used : Configuration(org.apache.flink.configuration.Configuration) InetSocketAddress(java.net.InetSocketAddress) FutureUtils(org.apache.flink.util.concurrent.FutureUtils) ArrayList(java.util.ArrayList) CompletableFuture(java.util.concurrent.CompletableFuture) Path(org.apache.flink.core.fs.Path) IOException(java.io.IOException) CountDownLatch(java.util.concurrent.CountDownLatch) FlinkException(org.apache.flink.util.FlinkException) CompletionException(java.util.concurrent.CompletionException) ExecutorService(java.util.concurrent.ExecutorService) Collection(java.util.Collection) File(java.io.File) BlockingInputStream(org.apache.flink.runtime.blob.BlobServerPutTest.BlockingInputStream)

Aggregations

CompletionException (java.util.concurrent.CompletionException)199 Test (org.junit.Test)80 CompletableFuture (java.util.concurrent.CompletableFuture)62 List (java.util.List)52 ArrayList (java.util.ArrayList)51 IOException (java.io.IOException)45 Map (java.util.Map)39 Collection (java.util.Collection)31 ExecutionException (java.util.concurrent.ExecutionException)31 HashMap (java.util.HashMap)30 Collections (java.util.Collections)24 TimeUnit (java.util.concurrent.TimeUnit)22 Collectors (java.util.stream.Collectors)22 FlinkException (org.apache.flink.util.FlinkException)22 Before (org.junit.Before)21 Duration (java.time.Duration)19 Arrays (java.util.Arrays)19 BeforeClass (org.junit.BeforeClass)19 ExecutorService (java.util.concurrent.ExecutorService)18 Nonnull (javax.annotation.Nonnull)17