use of java.util.concurrent.CompletionException in project flink by apache.
the class EmbeddedExecutor method submitJob.
private static CompletableFuture<JobID> submitJob(final Configuration configuration, final DispatcherGateway dispatcherGateway, final JobGraph jobGraph, final Time rpcTimeout) {
checkNotNull(jobGraph);
LOG.info("Submitting Job with JobId={}.", jobGraph.getJobID());
return dispatcherGateway.getBlobServerPort(rpcTimeout).thenApply(blobServerPort -> new InetSocketAddress(dispatcherGateway.getHostname(), blobServerPort)).thenCompose(blobServerAddress -> {
try {
ClientUtils.extractAndUploadJobGraphFiles(jobGraph, () -> new BlobClient(blobServerAddress, configuration));
} catch (FlinkException e) {
throw new CompletionException(e);
}
return dispatcherGateway.submitJob(jobGraph, rpcTimeout);
}).thenApply(ack -> jobGraph.getJobID());
}
use of java.util.concurrent.CompletionException in project flink by apache.
the class BlobServerPutTest method testConcurrentPutOperations.
/**
* [FLINK-6020] Tests that concurrent put operations will only upload the file once to the
* {@link BlobStore} and that the files are not corrupt at any time.
*
* @param jobId job ID to use (or <tt>null</tt> if job-unrelated)
* @param blobType whether the BLOB should become permanent or transient
*/
private void testConcurrentPutOperations(@Nullable final JobID jobId, final BlobKey.BlobType blobType) throws IOException, InterruptedException, ExecutionException {
final Configuration config = new Configuration();
final int concurrentPutOperations = 2;
final int dataSize = 1024;
Collection<BlobKey> persistedBlobs = ConcurrentHashMap.newKeySet();
TestingBlobStore blobStore = new TestingBlobStoreBuilder().setPutFunction((file, jobID, blobKey) -> {
persistedBlobs.add(blobKey);
return true;
}).createTestingBlobStore();
final CountDownLatch countDownLatch = new CountDownLatch(concurrentPutOperations);
final byte[] data = new byte[dataSize];
ArrayList<CompletableFuture<BlobKey>> allFutures = new ArrayList<>(concurrentPutOperations);
ExecutorService executor = Executors.newFixedThreadPool(concurrentPutOperations);
try (final BlobServer server = new BlobServer(config, temporaryFolder.newFolder(), blobStore)) {
server.start();
for (int i = 0; i < concurrentPutOperations; i++) {
CompletableFuture<BlobKey> putFuture = CompletableFuture.supplyAsync(() -> {
try {
BlockingInputStream inputStream = new BlockingInputStream(countDownLatch, data);
BlobKey uploadedKey = put(server, jobId, inputStream, blobType);
// check the uploaded file's contents (concurrently)
verifyContents(server, jobId, uploadedKey, data);
return uploadedKey;
} catch (IOException e) {
throw new CompletionException(new FlinkException("Could not upload blob.", e));
}
}, executor);
allFutures.add(putFuture);
}
FutureUtils.ConjunctFuture<Collection<BlobKey>> conjunctFuture = FutureUtils.combineAll(allFutures);
// wait until all operations have completed and check that no exception was thrown
Collection<BlobKey> blobKeys = conjunctFuture.get();
Iterator<BlobKey> blobKeyIterator = blobKeys.iterator();
assertTrue(blobKeyIterator.hasNext());
BlobKey blobKey = blobKeyIterator.next();
// make sure that all blob keys are the same
while (blobKeyIterator.hasNext()) {
verifyKeyDifferentHashEquals(blobKey, blobKeyIterator.next());
}
// check the uploaded file's contents
verifyContents(server, jobId, blobKey, data);
// check that we only uploaded the file once to the blob store
if (blobType == PERMANENT_BLOB) {
assertThat(persistedBlobs).hasSameElementsAs(blobKeys);
} else {
// can't really verify much in the other cases other than that the put operations
// should
// work and not corrupt files
assertThat(persistedBlobs).isEmpty();
}
} finally {
executor.shutdownNow();
}
}
use of java.util.concurrent.CompletionException in project flink by apache.
the class BlobServerDeleteTest method testConcurrentDeleteOperations.
/**
* [FLINK-6020] Tests that concurrent delete operations don't interfere with each other.
*
* <p>Note: This test checks that there cannot be two threads which have checked whether a given
* blob file exist and then one of them fails deleting it. Without the introduced lock, this
* situation should rarely happen and make this test fail. Thus, if this test should become
* "unstable", then the delete atomicity is most likely broken.
*
* @param jobId job ID to use (or <tt>null</tt> if job-unrelated)
*/
private void testConcurrentDeleteOperations(@Nullable final JobID jobId) throws IOException, InterruptedException, ExecutionException {
final Configuration config = new Configuration();
final int concurrentDeleteOperations = 3;
final ExecutorService executor = Executors.newFixedThreadPool(concurrentDeleteOperations);
final List<CompletableFuture<Void>> deleteFutures = new ArrayList<>(concurrentDeleteOperations);
final byte[] data = { 1, 2, 3 };
try (final BlobServer server = new BlobServer(config, temporaryFolder.newFolder(), new VoidBlobStore())) {
server.start();
final TransientBlobKey blobKey = (TransientBlobKey) put(server, jobId, data, TRANSIENT_BLOB);
assertTrue(server.getStorageLocation(jobId, blobKey).exists());
for (int i = 0; i < concurrentDeleteOperations; i++) {
CompletableFuture<Void> deleteFuture = CompletableFuture.supplyAsync(() -> {
try {
assertTrue(delete(server, jobId, blobKey));
assertFalse(server.getStorageLocation(jobId, blobKey).exists());
return null;
} catch (IOException e) {
throw new CompletionException(new FlinkException("Could not delete the given blob key " + blobKey + '.'));
}
}, executor);
deleteFutures.add(deleteFuture);
}
CompletableFuture<Void> waitFuture = FutureUtils.waitForAll(deleteFutures);
// make sure all delete operation have completed successfully
// in case of no lock, one of the delete operations should eventually fail
waitFuture.get();
assertFalse(server.getStorageLocation(jobId, blobKey).exists());
} finally {
executor.shutdownNow();
}
}
use of java.util.concurrent.CompletionException in project flink by apache.
the class BlobCacheDeleteTest method testConcurrentDeleteOperations.
/**
* [FLINK-6020] Tests that concurrent delete operations don't interfere with each other.
*
* <p>Note: This test checks that there cannot be two threads which have checked whether a given
* blob file exist and then one of them fails deleting it. Without the introduced lock, this
* situation should rarely happen and make this test fail. Thus, if this test should become
* "unstable", then the delete atomicity is most likely broken.
*
* @param jobId job ID to use (or <tt>null</tt> if job-unrelated)
*/
private void testConcurrentDeleteOperations(@Nullable final JobID jobId) throws IOException, InterruptedException, ExecutionException {
final Configuration config = new Configuration();
final int concurrentDeleteOperations = 3;
final ExecutorService executor = Executors.newFixedThreadPool(concurrentDeleteOperations);
final List<CompletableFuture<Void>> deleteFutures = new ArrayList<>(concurrentDeleteOperations);
final byte[] data = { 1, 2, 3 };
try (BlobServer server = new BlobServer(config, temporaryFolder.newFolder(), new VoidBlobStore());
BlobCacheService cache = new BlobCacheService(config, temporaryFolder.newFolder(), new VoidBlobStore(), new InetSocketAddress("localhost", server.getPort()))) {
server.start();
final TransientBlobKey blobKey = (TransientBlobKey) put(server, jobId, data, TRANSIENT_BLOB);
assertTrue(server.getStorageLocation(jobId, blobKey).exists());
for (int i = 0; i < concurrentDeleteOperations; i++) {
CompletableFuture<Void> deleteFuture = CompletableFuture.supplyAsync(() -> {
try {
assertTrue(delete(cache, jobId, blobKey));
assertFalse(cache.getTransientBlobService().getStorageLocation(jobId, blobKey).exists());
// delete only works on local cache!
assertTrue(server.getStorageLocation(jobId, blobKey).exists());
return null;
} catch (IOException e) {
throw new CompletionException(new FlinkException("Could not upload blob.", e));
}
}, executor);
deleteFutures.add(deleteFuture);
}
CompletableFuture<Void> waitFuture = FutureUtils.waitForAll(deleteFutures);
// make sure all delete operation have completed successfully
// in case of no lock, one of the delete operations should eventually fail
waitFuture.get();
// delete only works on local cache!
assertTrue(server.getStorageLocation(jobId, blobKey).exists());
} finally {
executor.shutdownNow();
}
}
use of java.util.concurrent.CompletionException in project flink by apache.
the class BlobCachePutTest method testConcurrentPutOperations.
/**
* [FLINK-6020] Tests that concurrent put operations will only upload the file once to the
* {@link BlobStore} and that the files are not corrupt at any time.
*
* @param jobId job ID to use (or <tt>null</tt> if job-unrelated)
* @param blobType whether the BLOB should become permanent or transient
*/
private void testConcurrentPutOperations(@Nullable final JobID jobId, final BlobKey.BlobType blobType) throws IOException, InterruptedException, ExecutionException {
final Configuration config = new Configuration();
final BlobStore blobStoreServer = mock(BlobStore.class);
final BlobStore blobStoreCache = mock(BlobStore.class);
int concurrentPutOperations = 2;
int dataSize = 1024;
final CountDownLatch countDownLatch = new CountDownLatch(concurrentPutOperations);
final byte[] data = new byte[dataSize];
final List<Path> jars;
if (blobType == PERMANENT_BLOB) {
// implement via JAR file upload instead:
File tmpFile = temporaryFolder.newFile();
FileUtils.writeByteArrayToFile(tmpFile, data);
jars = Collections.singletonList(new Path(tmpFile.getAbsolutePath()));
} else {
jars = null;
}
Collection<CompletableFuture<BlobKey>> allFutures = new ArrayList<>(concurrentPutOperations);
ExecutorService executor = Executors.newFixedThreadPool(concurrentPutOperations);
try (final BlobServer server = new BlobServer(config, temporaryFolder.newFolder(), blobStoreServer);
final BlobCacheService cache = new BlobCacheService(config, temporaryFolder.newFolder(), blobStoreCache, new InetSocketAddress("localhost", server.getPort()))) {
server.start();
// for highAvailability
final InetSocketAddress serverAddress = new InetSocketAddress("localhost", server.getPort());
for (int i = 0; i < concurrentPutOperations; i++) {
final Supplier<BlobKey> callable;
if (blobType == PERMANENT_BLOB) {
// cannot use a blocking stream here (upload only possible via files)
callable = () -> {
try {
List<PermanentBlobKey> keys = BlobClient.uploadFiles(serverAddress, config, jobId, jars);
assertEquals(1, keys.size());
BlobKey uploadedKey = keys.get(0);
// check the uploaded file's contents (concurrently)
verifyContents(server, jobId, uploadedKey, data);
return uploadedKey;
} catch (IOException e) {
throw new CompletionException(new FlinkException("Could not upload blob.", e));
}
};
} else {
callable = () -> {
try {
BlockingInputStream inputStream = new BlockingInputStream(countDownLatch, data);
BlobKey uploadedKey = put(cache, jobId, inputStream, blobType);
// check the uploaded file's contents (concurrently)
verifyContents(server, jobId, uploadedKey, data);
return uploadedKey;
} catch (IOException e) {
throw new CompletionException(new FlinkException("Could not upload blob.", e));
}
};
}
CompletableFuture<BlobKey> putFuture = CompletableFuture.supplyAsync(callable, executor);
allFutures.add(putFuture);
}
FutureUtils.ConjunctFuture<Collection<BlobKey>> conjunctFuture = FutureUtils.combineAll(allFutures);
// wait until all operations have completed and check that no exception was thrown
Collection<BlobKey> blobKeys = conjunctFuture.get();
Iterator<BlobKey> blobKeyIterator = blobKeys.iterator();
assertTrue(blobKeyIterator.hasNext());
BlobKey blobKey = blobKeyIterator.next();
// make sure that all blob keys are the same
while (blobKeyIterator.hasNext()) {
// check for unique BlobKey, but should have same hash
verifyKeyDifferentHashEquals(blobKey, blobKeyIterator.next());
}
// check the uploaded file's contents
verifyContents(server, jobId, blobKey, data);
// check that we only uploaded the file once to the blob store
if (blobType == PERMANENT_BLOB) {
verify(blobStoreServer, times(1)).put(any(File.class), eq(jobId), eq(blobKey));
} else {
// can't really verify much in the other cases other than that the put operations
// should
// work and not corrupt files
verify(blobStoreServer, times(0)).put(any(File.class), eq(jobId), eq(blobKey));
}
// caches must not access the blob store (they are not allowed to write there)
verify(blobStoreCache, times(0)).put(any(File.class), eq(jobId), eq(blobKey));
} finally {
executor.shutdownNow();
}
}
Aggregations