use of java.util.concurrent.ScheduledExecutorService in project jackrabbit-oak by apache.
the class CompactionAndCleanupIT method offlineCompactionBinC2.
/**
* Create 2 binary nodes with same content but not same reference. Reduce
* the max size if de-duplicated binaries under the binary length. Verify
* de-duplication capabilities of compaction.
*/
@Test
public void offlineCompactionBinC2() throws Exception {
int blobSize = 5 * 1024 * 1024;
SegmentGCOptions gcOptions = defaultGCOptions().setOffline().withBinaryDeduplication().setBinaryDeduplicationMaxSize(blobSize / 2);
ScheduledExecutorService executor = newSingleThreadScheduledExecutor();
FileStore fileStore = fileStoreBuilder(getFileStoreFolder()).withMaxFileSize(1).withGCOptions(gcOptions).withStatisticsProvider(new DefaultStatisticsProvider(executor)).build();
SegmentNodeStore nodeStore = SegmentNodeStoreBuilders.builder(fileStore).build();
try {
NodeBuilder extra = nodeStore.getRoot().builder();
NodeBuilder content = extra.child("content");
byte[] data = new byte[blobSize];
new Random().nextBytes(data);
NodeBuilder c1 = content.child("c1");
Blob b1 = nodeStore.createBlob(new ByteArrayInputStream(data));
c1.setProperty("blob1", b1);
NodeBuilder c2 = content.child("c2");
Blob b2 = nodeStore.createBlob(new ByteArrayInputStream(data));
c2.setProperty("blob2", b2);
nodeStore.merge(extra, EmptyHook.INSTANCE, CommitInfo.EMPTY);
fileStore.flush();
int cpNo = 4;
Set<String> cps = new HashSet<String>();
for (int i = 0; i < cpNo; i++) {
cps.add(nodeStore.checkpoint(60000));
}
assertEquals(cpNo, cps.size());
for (String cp : cps) {
assertTrue(nodeStore.retrieve(cp) != null);
}
long size1 = fileStore.getStats().getApproximateSize();
fileStore.compact();
fileStore.cleanup();
long size2 = fileStore.getStats().getApproximateSize();
// not expected to reduce the size too much, as the binaries are
// above the threshold
assertSize("with compacted binaries", size2, size1 * 9 / 10, size1 * 11 / 10);
} finally {
fileStore.close();
}
}
use of java.util.concurrent.ScheduledExecutorService in project jackrabbit-oak by apache.
the class CachingDataStoreTest method init.
private void init(int i, int cacheSize, int uploadSplit) throws Exception {
LOG.info("Starting init");
// create executor
taskLatch = new CountDownLatch(1);
callbackLatch = new CountDownLatch(1);
afterExecuteLatch = new CountDownLatch(i);
TestExecutor listeningExecutor = new TestExecutor(1, taskLatch, callbackLatch, afterExecuteLatch);
// stats
ScheduledExecutorService statsExecutor = Executors.newSingleThreadScheduledExecutor();
closer.register(new ExecutorCloser(statsExecutor, 500, TimeUnit.MILLISECONDS));
StatisticsProvider statsProvider = new DefaultStatisticsProvider(statsExecutor);
scheduledExecutor = Executors.newSingleThreadScheduledExecutor();
closer.register(new ExecutorCloser(scheduledExecutor, 500, TimeUnit.MILLISECONDS));
final File datastoreRoot = folder.newFolder();
final TestMemoryBackend testBackend = new TestMemoryBackend(datastoreRoot);
this.backend = testBackend;
dataStore = new AbstractSharedCachingDataStore() {
@Override
protected AbstractSharedBackend createBackend() {
return testBackend;
}
@Override
public int getMinRecordLength() {
return 0;
}
};
dataStore.setStatisticsProvider(statsProvider);
dataStore.setCacheSize(cacheSize);
dataStore.setStagingSplitPercentage(uploadSplit);
dataStore.listeningExecutor = listeningExecutor;
dataStore.schedulerExecutor = scheduledExecutor;
dataStore.executor = sameThreadExecutor();
dataStore.init(root.getAbsolutePath());
LOG.info("Finished init");
}
use of java.util.concurrent.ScheduledExecutorService in project jackrabbit-oak by apache.
the class SegmentDataStoreBlobGCIT method getNodeStore.
private SegmentNodeStore getNodeStore(BlobStore blobStore) throws Exception {
if (nodeStore == null) {
ScheduledExecutorService executor = Executors.newSingleThreadScheduledExecutor();
FileStoreBuilder builder = fileStoreBuilder(getWorkDir()).withNodeDeduplicationCacheSize(16384).withBlobStore(blobStore).withMaxFileSize(256).withMemoryMapping(false).withStatisticsProvider(new DefaultStatisticsProvider(executor)).withGCOptions(gcOptions);
store = builder.build();
nodeStore = SegmentNodeStoreBuilders.builder(store).build();
}
return nodeStore;
}
use of java.util.concurrent.ScheduledExecutorService in project wildfly by wildfly.
the class RemoteFailoverTestCase method testConcurrentFailover.
public void testConcurrentFailover(Lifecycle lifecycle) throws Exception {
// TODO Elytron: Once support for legacy EJB properties has been added back, actually set the EJB properties
// that should be used for this test using CLIENT_PROPERTIES and ensure the EJB client context is reset
// to its original state at the end of the test
EJBClientContextSelector.setup(CLIENT_PROPERTIES);
try (EJBDirectory directory = new RemoteEJBDirectory(MODULE_NAME)) {
Incrementor bean = directory.lookupStateful(SlowToDestroyStatefulIncrementorBean.class, Incrementor.class);
AtomicInteger count = new AtomicInteger();
// Allow sufficient time for client to receive full topology
Thread.sleep(CLIENT_TOPOLOGY_UPDATE_WAIT);
String target = bean.increment().getNode();
count.incrementAndGet();
ScheduledExecutorService executor = Executors.newSingleThreadScheduledExecutor();
try {
CountDownLatch latch = new CountDownLatch(1);
Future<?> future = executor.scheduleWithFixedDelay(new IncrementTask(bean, count, latch), 0, INVOCATION_WAIT, TimeUnit.MILLISECONDS);
latch.await();
lifecycle.stop(target);
future.cancel(false);
try {
future.get();
} catch (CancellationException e) {
// Ignore
}
lifecycle.start(target);
latch = new CountDownLatch(1);
future = executor.scheduleWithFixedDelay(new LookupTask(directory, SlowToDestroyStatefulIncrementorBean.class, latch), 0, INVOCATION_WAIT, TimeUnit.MILLISECONDS);
latch.await();
lifecycle.stop(target);
future.cancel(false);
try {
future.get();
} catch (CancellationException e) {
// Ignore
}
lifecycle.start(target);
} finally {
executor.shutdownNow();
}
}
}
use of java.util.concurrent.ScheduledExecutorService in project wildfly by wildfly.
the class RemoteEJBTwoClusterTestCase method testConcurrentFailoverOverWithTwoClusters.
/*
* Tests that EJBClient invocations on stateful session beans can still successfully be processed
* as long as one node in each cluster is available.
*/
public void testConcurrentFailoverOverWithTwoClusters(boolean useTransactions) throws Exception {
// TODO Elytron: Once support for legacy EJB properties has been added back, actually set the EJB properties
// that should be used for this test using FORWARDER_CLIENT_PROPERTIES and ensure the EJB client context is reset
// to its original state at the end of the test
EJBClientContextSelector.setup(FORWARDER_CLIENT_PROPERTIES);
try {
// get the correct forwarder deployment on cluster A
RemoteStatefulSB bean = null;
if (useTransactions)
bean = txnBeanDirectory.lookupStateful(ForwardingStatefulSBImpl.class, RemoteStatefulSB.class);
else
bean = beanDirectory.lookupStateful(NonTxForwardingStatefulSBImpl.class, RemoteStatefulSB.class);
AtomicInteger count = new AtomicInteger();
// Allow sufficient time for client to receive full topology
logger.trace("Waiting for clusters to form:");
Thread.sleep(CLIENT_TOPOLOGY_UPDATE_WAIT);
int newSerialValue = bean.getSerialAndIncrement();
int newCountValue = count.getAndIncrement();
logger.trace("First invocation: count = " + newCountValue + ", serial = " + newSerialValue);
//
ScheduledExecutorService executor = Executors.newSingleThreadScheduledExecutor();
CountDownLatch latch = new CountDownLatch(1);
ClientInvocationTask client = new ClientInvocationTask(bean, latch, count);
try {
// set up the client invocations
Future<?> future = executor.scheduleWithFixedDelay(client, 0, INVOCATION_WAIT, TimeUnit.MILLISECONDS);
latch.await();
// a few seconds of non-failure behaviour
Thread.sleep(FAILURE_FREE_TIME);
logger.trace("------ Shutdown clusterA-node0 -----");
// stop cluster A node 0
stop(CONTAINER_1);
// Let the server stay down for a while
Thread.sleep(SERVER_DOWN_TIME);
logger.trace("------ Startup clusterA-node0 -----");
start(CONTAINER_1);
// a few seconds of non-failure behaviour
Thread.sleep(FAILURE_FREE_TIME);
logger.trace("----- Shutdown clusterA-node1 -----");
// stop cluster A node 1
stop(CONTAINER_2);
// Let the server stay down for a while
Thread.sleep(SERVER_DOWN_TIME);
logger.trace("------ Startup clusterA-node1 -----");
start(CONTAINER_2);
// a few seconds of non-failure behaviour
Thread.sleep(FAILURE_FREE_TIME);
logger.trace("----- Shutdown clusterB-node0 -----");
// stop cluster B node 0
stop(CONTAINER_3);
// Let the server stay down for a while
Thread.sleep(SERVER_DOWN_TIME);
logger.trace("------ Startup clusterB-node0 -----");
start(CONTAINER_3);
// a few seconds of non-failure behaviour
Thread.sleep(FAILURE_FREE_TIME);
logger.trace("----- Shutdown clusterB-node1 -----");
// stop cluster B node 1
stop(CONTAINER_4);
// Let the server stay down for a while
Thread.sleep(SERVER_DOWN_TIME);
logger.trace("------ Startup clusterB-node1 -----");
start(CONTAINER_4);
// a few seconds of non-failure behaviour
Thread.sleep(FAILURE_FREE_TIME);
// cancel the executor and wait for it to complete
future.cancel(false);
try {
future.get();
} catch (CancellationException e) {
logger.trace("Could not cancel future: " + e.toString());
}
// test is completed, report results
double invocations = client.getInvocationCount();
double exceptions = client.getExceptionCount();
logger.trace("Total invocations = " + invocations + ", total exceptions = " + exceptions);
Assert.assertTrue("Too many exceptions! percentage = " + 100 * (exceptions / invocations), (exceptions / invocations) < EXCEPTION_PERCENTAGE);
} catch (Exception e) {
Assert.fail("Exception occurred on client: " + e.getMessage() + ", test did not complete successfully (inner)");
} finally {
logger.trace("Shutting down executor");
executor.shutdownNow();
}
} catch (Exception e) {
Assert.fail("Exception occurred on client: " + e.getMessage() + ", test did not complete successfully (outer)");
}
}
Aggregations