use of org.apache.flink.runtime.blob.BlobServerPutTest.BlockingInputStream in project flink by apache.
the class BlobCachePutTest method testConcurrentPutOperations.
/**
* [FLINK-6020] Tests that concurrent put operations will only upload the file once to the
* {@link BlobStore} and that the files are not corrupt at any time.
*
* @param jobId job ID to use (or <tt>null</tt> if job-unrelated)
* @param blobType whether the BLOB should become permanent or transient
*/
private void testConcurrentPutOperations(@Nullable final JobID jobId, final BlobKey.BlobType blobType) throws IOException, InterruptedException, ExecutionException {
final Configuration config = new Configuration();
final BlobStore blobStoreServer = mock(BlobStore.class);
final BlobStore blobStoreCache = mock(BlobStore.class);
int concurrentPutOperations = 2;
int dataSize = 1024;
final CountDownLatch countDownLatch = new CountDownLatch(concurrentPutOperations);
final byte[] data = new byte[dataSize];
final List<Path> jars;
if (blobType == PERMANENT_BLOB) {
// implement via JAR file upload instead:
File tmpFile = temporaryFolder.newFile();
FileUtils.writeByteArrayToFile(tmpFile, data);
jars = Collections.singletonList(new Path(tmpFile.getAbsolutePath()));
} else {
jars = null;
}
Collection<CompletableFuture<BlobKey>> allFutures = new ArrayList<>(concurrentPutOperations);
ExecutorService executor = Executors.newFixedThreadPool(concurrentPutOperations);
try (final BlobServer server = new BlobServer(config, temporaryFolder.newFolder(), blobStoreServer);
final BlobCacheService cache = new BlobCacheService(config, temporaryFolder.newFolder(), blobStoreCache, new InetSocketAddress("localhost", server.getPort()))) {
server.start();
// for highAvailability
final InetSocketAddress serverAddress = new InetSocketAddress("localhost", server.getPort());
for (int i = 0; i < concurrentPutOperations; i++) {
final Supplier<BlobKey> callable;
if (blobType == PERMANENT_BLOB) {
// cannot use a blocking stream here (upload only possible via files)
callable = () -> {
try {
List<PermanentBlobKey> keys = BlobClient.uploadFiles(serverAddress, config, jobId, jars);
assertEquals(1, keys.size());
BlobKey uploadedKey = keys.get(0);
// check the uploaded file's contents (concurrently)
verifyContents(server, jobId, uploadedKey, data);
return uploadedKey;
} catch (IOException e) {
throw new CompletionException(new FlinkException("Could not upload blob.", e));
}
};
} else {
callable = () -> {
try {
BlockingInputStream inputStream = new BlockingInputStream(countDownLatch, data);
BlobKey uploadedKey = put(cache, jobId, inputStream, blobType);
// check the uploaded file's contents (concurrently)
verifyContents(server, jobId, uploadedKey, data);
return uploadedKey;
} catch (IOException e) {
throw new CompletionException(new FlinkException("Could not upload blob.", e));
}
};
}
CompletableFuture<BlobKey> putFuture = CompletableFuture.supplyAsync(callable, executor);
allFutures.add(putFuture);
}
FutureUtils.ConjunctFuture<Collection<BlobKey>> conjunctFuture = FutureUtils.combineAll(allFutures);
// wait until all operations have completed and check that no exception was thrown
Collection<BlobKey> blobKeys = conjunctFuture.get();
Iterator<BlobKey> blobKeyIterator = blobKeys.iterator();
assertTrue(blobKeyIterator.hasNext());
BlobKey blobKey = blobKeyIterator.next();
// make sure that all blob keys are the same
while (blobKeyIterator.hasNext()) {
// check for unique BlobKey, but should have same hash
verifyKeyDifferentHashEquals(blobKey, blobKeyIterator.next());
}
// check the uploaded file's contents
verifyContents(server, jobId, blobKey, data);
// check that we only uploaded the file once to the blob store
if (blobType == PERMANENT_BLOB) {
verify(blobStoreServer, times(1)).put(any(File.class), eq(jobId), eq(blobKey));
} else {
// can't really verify much in the other cases other than that the put operations
// should
// work and not corrupt files
verify(blobStoreServer, times(0)).put(any(File.class), eq(jobId), eq(blobKey));
}
// caches must not access the blob store (they are not allowed to write there)
verify(blobStoreCache, times(0)).put(any(File.class), eq(jobId), eq(blobKey));
} finally {
executor.shutdownNow();
}
}
Aggregations