use of alluxio.collections.ConcurrentHashSet in project alluxio by Alluxio.
the class ConcurrentFileSystemMasterTest method concurrentRename.
/**
* Helper for renaming a list of paths concurrently. Assumes the srcs are already created and
* dsts do not exist. Enforces that the run time of this method is not greater than twice the
* sleep time (to infer concurrent operations). Injects an artificial sleep time to the
* sleeping under file system and resets it after the renames are complete.
*
* @param src list of source paths
* @param dst list of destination paths
* @return how many errors occurred
*/
private int concurrentRename(final AlluxioURI[] src, final AlluxioURI[] dst) throws Exception {
final int numFiles = src.length;
final CyclicBarrier barrier = new CyclicBarrier(numFiles);
List<Thread> threads = new ArrayList<>(numFiles);
// If there are exceptions, we will store them here.
final ConcurrentHashSet<Throwable> errors = new ConcurrentHashSet<>();
Thread.UncaughtExceptionHandler exceptionHandler = new Thread.UncaughtExceptionHandler() {
public void uncaughtException(Thread th, Throwable ex) {
errors.add(ex);
}
};
for (int i = 0; i < numFiles; i++) {
final int iteration = i;
Thread t = new Thread(new Runnable() {
@Override
public void run() {
try {
AuthenticatedClientUser.set(TEST_USER);
barrier.await();
mFileSystem.rename(src[iteration], dst[iteration]);
} catch (Exception e) {
Throwables.propagate(e);
}
}
});
t.setUncaughtExceptionHandler(exceptionHandler);
threads.add(t);
}
Collections.shuffle(threads);
long startMs = CommonUtils.getCurrentMs();
for (Thread t : threads) {
t.start();
}
for (Thread t : threads) {
t.join();
}
long durationMs = CommonUtils.getCurrentMs() - startMs;
Assert.assertTrue("Execution duration " + durationMs + " took longer than expected " + LIMIT_MS, durationMs < LIMIT_MS);
return errors.size();
}
use of alluxio.collections.ConcurrentHashSet in project alluxio by Alluxio.
the class SharedGrpcDataReaderTest method MultiThreadConcurrentRead.
/**
* 10 threads read from the same file with the shared cache concurrently.
*/
@Test(timeout = 1000 * 60)
public void MultiThreadConcurrentRead() throws Exception {
int concurrency = 10;
List<Thread> threads = new ArrayList<>(concurrency);
// If there are exceptions, we will store them here
final ConcurrentHashSet<Throwable> errors = new ConcurrentHashSet<>();
Thread.UncaughtExceptionHandler exceptionHandler = (th, ex) -> errors.add(ex);
for (int i = 0; i < concurrency; i++) {
Thread t = new Thread(() -> {
Random random = new Random();
int offset = random.nextInt(mBlockSize);
int len = random.nextInt(mBlockSize - offset);
ReadRequest partialReadRequest = ReadRequest.newBuilder().setOffset(offset).setLength(len).setBlockId(mReadRequest.getBlockId()).setChunkSize(mReadRequest.getChunkSize()).build();
try (SharedGrpcDataReader reader = new SharedGrpcDataReader(partialReadRequest, mBufferCachingDataReader)) {
while (offset != -1) {
offset = validateRead(reader, offset, getChunkNum(len));
}
} catch (Exception e) {
Throwables.throwIfUnchecked(e);
}
});
t.setUncaughtExceptionHandler(exceptionHandler);
threads.add(t);
}
Collections.shuffle(threads);
for (Thread t : threads) {
t.start();
}
for (Thread t : threads) {
t.join();
}
}
use of alluxio.collections.ConcurrentHashSet in project alluxio by Alluxio.
the class ConcurrentFileSystemMasterSetTtlIntegrationTest method concurrentSetTtl.
private ConcurrentHashSet<Throwable> concurrentSetTtl(final AlluxioURI[] paths, final long[] ttls) throws Exception {
final int numFiles = paths.length;
final CyclicBarrier barrier = new CyclicBarrier(numFiles);
List<Thread> threads = new ArrayList<>(numFiles);
// If there are exceptions, we will store them here.
final ConcurrentHashSet<Throwable> errors = new ConcurrentHashSet<>();
Thread.UncaughtExceptionHandler exceptionHandler = new Thread.UncaughtExceptionHandler() {
public void uncaughtException(Thread th, Throwable ex) {
errors.add(ex);
}
};
for (int i = 0; i < numFiles; i++) {
final int iteration = i;
Thread t = new Thread(new Runnable() {
@Override
public void run() {
try {
AuthenticatedClientUser.set(TEST_USER);
barrier.await();
mFileSystem.setAttribute(paths[iteration], SetAttributePOptions.newBuilder().setCommonOptions(FileSystemMasterCommonPOptions.newBuilder().setTtl(ttls[iteration]).setTtlAction(TtlAction.DELETE)).build());
} catch (Exception e) {
throw new RuntimeException(e);
}
}
});
t.setUncaughtExceptionHandler(exceptionHandler);
threads.add(t);
}
Collections.shuffle(threads);
long startMs = CommonUtils.getCurrentMs();
for (Thread t : threads) {
t.start();
}
for (Thread t : threads) {
t.join();
}
long durationMs = CommonUtils.getCurrentMs() - startMs;
Assert.assertTrue("Execution duration " + durationMs + " took longer than expected " + LIMIT_MS, durationMs < LIMIT_MS);
return errors;
}
use of alluxio.collections.ConcurrentHashSet in project alluxio by Alluxio.
the class ConcurrentRenameIntegrationTest method concurrentRename.
/**
* Helper for renaming a list of paths concurrently. Assumes the srcs are already created and
* dsts do not exist. Enforces that the run time of this method is not greater than twice the
* sleep time (to infer concurrent operations). Injects an artificial sleep time to the
* sleeping under file system and resets it after the renames are complete.
*
* @param src list of source paths
* @param dst list of destination paths
* @return the occurred errors
*/
private ConcurrentHashSet<Throwable> concurrentRename(final AlluxioURI[] src, final AlluxioURI[] dst) throws Exception {
final int numFiles = src.length;
final CyclicBarrier barrier = new CyclicBarrier(numFiles);
List<Thread> threads = new ArrayList<>(numFiles);
// If there are exceptions, we will store them here.
final ConcurrentHashSet<Throwable> errors = new ConcurrentHashSet<>();
Thread.UncaughtExceptionHandler exceptionHandler = new Thread.UncaughtExceptionHandler() {
public void uncaughtException(Thread th, Throwable ex) {
errors.add(ex);
}
};
for (int i = 0; i < numFiles; i++) {
final int iteration = i;
Thread t = new Thread(new Runnable() {
@Override
public void run() {
try {
AuthenticatedClientUser.set(TEST_USER);
barrier.await();
mFileSystem.rename(src[iteration], dst[iteration]);
} catch (Exception e) {
Throwables.propagate(e);
}
}
});
t.setUncaughtExceptionHandler(exceptionHandler);
threads.add(t);
}
Collections.shuffle(threads);
long startMs = CommonUtils.getCurrentMs();
for (Thread t : threads) {
t.start();
}
for (Thread t : threads) {
t.join();
}
long durationMs = CommonUtils.getCurrentMs() - startMs;
Assert.assertTrue("Execution duration " + durationMs + " took longer than expected " + LIMIT_MS, durationMs < LIMIT_MS);
return errors;
}
use of alluxio.collections.ConcurrentHashSet in project alluxio by Alluxio.
the class ConcurrentRenameIntegrationTest method sameDstConcurrentRename.
/**
* Tests renaming files concurrently to the same destination will only succeed once.
*/
@Test
public void sameDstConcurrentRename() throws Exception {
int numThreads = CONCURRENCY_FACTOR;
final AlluxioURI[] srcs = new AlluxioURI[numThreads];
final AlluxioURI[] dsts = new AlluxioURI[numThreads];
for (int i = 0; i < numThreads; i++) {
srcs[i] = new AlluxioURI("/file" + i);
mFileSystem.createFile(srcs[i], sCreatePersistedFileOptions).close();
dsts[i] = new AlluxioURI("/renamed");
}
ConcurrentHashSet<Throwable> errors = concurrentRename(srcs, dsts);
// We should get an error for all but 1 rename.
assertErrorsSizeEquals(errors, numThreads - 1);
List<URIStatus> files = mFileSystem.listStatus(new AlluxioURI("/"));
// Store file names in a set to ensure the names are all unique.
Set<String> renamedFiles = new HashSet<>();
Set<String> originalFiles = new HashSet<>();
for (URIStatus file : files) {
if (file.getName().startsWith("renamed")) {
renamedFiles.add(file.getName());
}
if (file.getName().startsWith("file")) {
originalFiles.add(file.getName());
}
}
// One renamed file should exist, and numThreads - 1 original source files
Assert.assertEquals(numThreads, files.size());
Assert.assertEquals(1, renamedFiles.size());
Assert.assertEquals(numThreads - 1, originalFiles.size());
}
Aggregations