use of org.apache.jackrabbit.oak.commons.concurrent.ExecutorCloser in project jackrabbit-oak by apache.
the class ConsolidatedDataStoreStatsTest method init.
private void init(int i) throws Exception {
testFile = folder.newFile();
copyInputStreamToFile(randomStream(0, 16384), testFile);
String testNodeId = getIdForInputStream(new FileInputStream(testFile));
mockBlob = mock(Blob.class);
when(mockBlob.getContentIdentity()).thenReturn(testNodeId);
nodeStore = initNodeStore(Optional.of(mockBlob), Optional.<Blob>absent(), Optional.<String>absent(), Optional.<Integer>absent(), Optional.<List<Blob>>absent());
// create executor
taskLatch = new CountDownLatch(1);
callbackLatch = new CountDownLatch(1);
afterExecuteLatch = new CountDownLatch(i);
executor = new TestExecutor(1, taskLatch, callbackLatch, afterExecuteLatch);
// stats
ScheduledExecutorService statsExecutor = Executors.newSingleThreadScheduledExecutor();
closer.register(new ExecutorCloser(statsExecutor, 500, TimeUnit.MILLISECONDS));
statsProvider = new DefaultStatisticsProvider(statsExecutor);
scheduledExecutor = Executors.newSingleThreadScheduledExecutor();
closer.register(new ExecutorCloser(scheduledExecutor, 500, TimeUnit.MILLISECONDS));
final File datastoreRoot = folder.newFolder();
dataStore = new AbstractSharedCachingDataStore() {
@Override
protected AbstractSharedBackend createBackend() {
return new TestMemoryBackend(datastoreRoot);
}
@Override
public int getMinRecordLength() {
return 0;
}
};
dataStore.setStatisticsProvider(statsProvider);
dataStore.listeningExecutor = executor;
dataStore.schedulerExecutor = scheduledExecutor;
dataStore.init(root.getAbsolutePath());
stats = new ConsolidatedDataStoreCacheStats();
stats.nodeStore = nodeStore;
stats.cachingDataStore = dataStore;
}
use of org.apache.jackrabbit.oak.commons.concurrent.ExecutorCloser in project jackrabbit-oak by apache.
the class CompositeDataStoreCacheTest method concurrentGetFromStagedAndCached.
/**
* Concurrently retrieves 2 different files from cache.
* One is staged and other in the download cache.
* @throws Exception
*/
@Test
public void concurrentGetFromStagedAndCached() throws Exception {
LOG.info("Starting concurrentGetFromStagedAndCached");
// Add 1 to backend
// Add 2 to upload area
// Stop upload execution
// Concurrently get 1 & 2
// continue upload execution
ListeningExecutorService executorService = MoreExecutors.listeningDecorator(Executors.newFixedThreadPool(2));
closer.register(new ExecutorCloser(executorService, 5, TimeUnit.MILLISECONDS));
// Add file to backend
File f2 = copyToFile(randomStream(1, 4 * 1024), folder.newFile());
loader.write(ID_PREFIX + 1, f2);
assertTrue(f2.exists());
// stage for upload
File f = copyToFile(randomStream(0, 4 * 1024), folder.newFile());
boolean accepted = cache.stage(ID_PREFIX + 0, f);
assertTrue(accepted);
// Would hit the staging cache
CountDownLatch thread1Start = new CountDownLatch(1);
SettableFuture<File> future1 = retrieveThread(executorService, ID_PREFIX + 0, cache, thread1Start);
// Would hit the download cache and load
CountDownLatch thread2Start = new CountDownLatch(1);
SettableFuture<File> future2 = retrieveThread(executorService, ID_PREFIX + 1, cache, thread2Start);
thread1Start.countDown();
thread2Start.countDown();
File cached = future1.get();
File cached2 = future2.get();
LOG.info("Async tasks finished");
assertFile(cached, 0, folder);
assertTrue(Files.equal(f2, cached2));
//start the original upload
taskLatch.countDown();
callbackLatch.countDown();
waitFinish();
assertCacheStats(cache.getStagingCacheStats(), 0, 0, 1, 1);
assertEquals(2, cache.getStagingCacheStats().getLoadCount());
assertEquals(1, cache.getStagingCacheStats().getLoadSuccessCount());
assertCacheStats(cache.getCacheStats(), 2, 8 * 1024, 0, 2);
assertEquals(1, cache.getCacheStats().getLoadCount());
assertEquals(1, cache.getCacheStats().getLoadSuccessCount());
LOG.info("Finished concurrentGetFromStagedAndCached");
}
use of org.apache.jackrabbit.oak.commons.concurrent.ExecutorCloser in project jackrabbit-oak by apache.
the class CompositeDataStoreCacheTest method concurrentAddGet.
/**
* Concurrently stage and get a file and then upload.
* Use the file retrieve to read contents.
* @throws Exception
*/
@Test
public void concurrentAddGet() throws Exception {
LOG.info("Starting concurrentAddGet");
// Add to the upload area
// stop upload execution
// Same as above but concurrently
// Get
// Continue upload execution
ListeningExecutorService executorService = MoreExecutors.listeningDecorator(Executors.newFixedThreadPool(2));
closer.register(new ExecutorCloser(executorService, 5, TimeUnit.MILLISECONDS));
// stage for upload
File f = copyToFile(randomStream(0, 4 * 1024), folder.newFile());
boolean accepted = cache.stage(ID_PREFIX + 0, f);
assertTrue(accepted);
// Would hit the staging cache
CountDownLatch thread1Start = new CountDownLatch(1);
SettableFuture<File> future1 = retrieveThread(executorService, ID_PREFIX + 0, cache, thread1Start);
// Get a handle to the file and open stream
File fileOnUpload = cache.getIfPresent(ID_PREFIX + 0);
assertNotNull(fileOnUpload);
final InputStream fStream = Files.asByteSource(fileOnUpload).openStream();
thread1Start.countDown();
//start the original upload
taskLatch.countDown();
callbackLatch.countDown();
future1.get();
waitFinish();
LOG.info("Async tasks finished");
File gold = copyToFile(randomStream(0, 4 * 1024), folder.newFile());
File fromUploadStream = copyToFile(fStream, folder.newFile());
assertTrue(Files.equal(gold, fromUploadStream));
assertEquals(2, cache.getStagingCacheStats().getLoadCount());
assertEquals(0, cache.getCacheStats().getLoadCount());
assertEquals(0, cache.getCacheStats().getLoadSuccessCount());
LOG.info("Finished concurrentAddGet");
}
use of org.apache.jackrabbit.oak.commons.concurrent.ExecutorCloser in project jackrabbit-oak by apache.
the class CompositeDataStoreCacheTest method setup.
@Before
public void setup() throws Exception {
LOG.info("Starting setup");
root = folder.newFolder();
loader = new TestCacheLoader<String, InputStream>(folder.newFolder());
uploader = new TestStagingUploader(folder.newFolder());
// create executor
taskLatch = new CountDownLatch(1);
callbackLatch = new CountDownLatch(1);
afterExecuteLatch = new CountDownLatch(1);
executor = new TestExecutor(1, taskLatch, callbackLatch, afterExecuteLatch);
// stats
ScheduledExecutorService statsExecutor = Executors.newSingleThreadScheduledExecutor();
closer.register(new ExecutorCloser(statsExecutor, 500, TimeUnit.MILLISECONDS));
statsProvider = new DefaultStatisticsProvider(statsExecutor);
scheduledExecutor = Executors.newSingleThreadScheduledExecutor();
closer.register(new ExecutorCloser(scheduledExecutor, 500, TimeUnit.MILLISECONDS));
fileCacheExecutor = sameThreadExecutor();
//cache instance
cache = new CompositeDataStoreCache(root.getAbsolutePath(), null, 80 * 1024, /* bytes */
10, 1, /*threads*/
loader, uploader, statsProvider, executor, scheduledExecutor, fileCacheExecutor, 3000, 6000);
closer.register(cache);
LOG.info("Finished setup");
}
use of org.apache.jackrabbit.oak.commons.concurrent.ExecutorCloser in project jackrabbit-oak by apache.
the class DocumentNodeStoreStatsTest method shutDown.
@After
public void shutDown() {
statsProvider.close();
new ExecutorCloser(executor).close();
}
Aggregations