use of com.google.common.base.Supplier in project bazel by bazelbuild.
the class EagerInvalidatorTest method interruptChild.
@Test
public void interruptChild() throws Exception {
graph = new InMemoryGraphImpl();
// More values than the invalidator has threads.
int numValues = 50;
final SkyKey[] family = new SkyKey[numValues];
final SkyKey child = GraphTester.skyKey("child");
final StringValue childValue = new StringValue("child");
tester.set(child, childValue);
family[0] = child;
for (int i = 1; i < numValues; i++) {
SkyKey member = skyKey(Integer.toString(i));
tester.getOrCreate(member).addDependency(family[i - 1]).setComputedValue(CONCATENATE);
family[i] = member;
}
SkyKey parent = GraphTester.skyKey("parent");
tester.getOrCreate(parent).addDependency(family[numValues - 1]).setComputedValue(CONCATENATE);
eval(/*keepGoing=*/
false, parent);
final Thread mainThread = Thread.currentThread();
final AtomicReference<SkyKey> badKey = new AtomicReference<>();
DirtyTrackingProgressReceiver receiver = new DirtyTrackingProgressReceiver(new EvaluationProgressReceiver() {
@Override
public void invalidated(SkyKey skyKey, InvalidationState state) {
if (skyKey.equals(child)) {
// Interrupt on the very first invalidate
mainThread.interrupt();
} else if (!skyKey.functionName().equals(NODE_TYPE)) {
// All other invalidations should have the GraphTester's key type.
// Exceptions thrown here may be silently dropped, so keep track of errors ourselves.
badKey.set(skyKey);
}
try {
assertTrue(visitor.get().getInterruptionLatchForTestingOnly().await(2, TimeUnit.HOURS));
} catch (InterruptedException e) {
// We may well have thrown here because by the time we try to await, the main
// thread is already interrupted.
}
}
@Override
public void enqueueing(SkyKey skyKey) {
throw new UnsupportedOperationException();
}
@Override
public void computed(SkyKey skyKey, long elapsedTimeNanos) {
throw new UnsupportedOperationException();
}
@Override
public void evaluated(SkyKey skyKey, Supplier<SkyValue> skyValueSupplier, EvaluationState state) {
throw new UnsupportedOperationException();
}
});
try {
invalidateWithoutError(receiver, child);
fail();
} catch (InterruptedException e) {
// Expected.
}
assertNull(badKey.get());
assertFalse(state.isEmpty());
final Set<SkyKey> invalidated = Sets.newConcurrentHashSet();
assertFalse(isInvalidated(parent));
assertNotNull(graph.get(null, Reason.OTHER, parent).getValue());
receiver = new DirtyTrackingProgressReceiver(new EvaluationProgressReceiver() {
@Override
public void invalidated(SkyKey skyKey, InvalidationState state) {
invalidated.add(skyKey);
}
@Override
public void enqueueing(SkyKey skyKey) {
throw new UnsupportedOperationException();
}
@Override
public void computed(SkyKey skyKey, long elapsedTimeNanos) {
throw new UnsupportedOperationException();
}
@Override
public void evaluated(SkyKey skyKey, Supplier<SkyValue> skyValueSupplier, EvaluationState state) {
throw new UnsupportedOperationException();
}
});
invalidateWithoutError(receiver);
assertTrue(invalidated.contains(parent));
assertThat(state.getInvalidationsForTesting()).isEmpty();
// Regression test coverage:
// "all pending values are marked changed on interrupt".
assertTrue(isInvalidated(child));
assertChanged(child);
for (int i = 1; i < numValues; i++) {
assertDirtyAndNotChanged(family[i]);
}
assertDirtyAndNotChanged(parent);
}
use of com.google.common.base.Supplier in project buck by facebook.
the class MoreSuppliersTest method weakMemoizeShouldRunDelegateOnlyOnceOnConcurrentAccess.
@Test
public void weakMemoizeShouldRunDelegateOnlyOnceOnConcurrentAccess() throws Exception {
final int numFetchers = 10;
final Semaphore semaphore = new Semaphore(0);
class TestDelegate implements Supplier<Object> {
private int timesCalled = 0;
public int getTimesCalled() {
return timesCalled;
}
@Override
public Object get() {
try {
// Wait for all the fetch threads to be ready.
semaphore.acquire(numFetchers);
// Give other threads a chance to catch up.
Thread.sleep(50);
timesCalled++;
return new Object();
} catch (InterruptedException e) {
throw new RuntimeException(e);
} finally {
semaphore.release(numFetchers);
}
}
}
TestDelegate delegate = new TestDelegate();
final Supplier<Object> supplier = MoreSuppliers.weakMemoize(delegate);
ExecutorService threadPool = Executors.newFixedThreadPool(numFetchers);
try {
ListeningExecutorService executor = MoreExecutors.listeningDecorator(threadPool);
class Fetcher implements Callable<Object> {
@Override
public Object call() {
// Signal that this particular fetcher is ready.
semaphore.release();
return supplier.get();
}
}
ImmutableList.Builder<Callable<Object>> fetcherBuilder = ImmutableList.builder();
for (int i = 0; i < numFetchers; i++) {
fetcherBuilder.add(new Fetcher());
}
@SuppressWarnings("unchecked") List<ListenableFuture<Object>> futures = (List<ListenableFuture<Object>>) (List<?>) executor.invokeAll(fetcherBuilder.build());
// Wait for all fetchers to finish.
List<Object> results = Futures.allAsList(futures).get();
Assert.assertEquals("should only have been called once", 1, delegate.getTimesCalled());
Assert.assertThat("all result items are the same", ImmutableSet.copyOf(results), Matchers.hasSize(1));
Preconditions.checkState(threadPool.shutdownNow().isEmpty(), "All jobs should have completed");
Preconditions.checkState(threadPool.awaitTermination(10, TimeUnit.SECONDS), "Thread pool should terminate in a reasonable amount of time");
} finally {
// In case exceptions were thrown, attempt to shut down the thread pool.
threadPool.shutdownNow();
threadPool.awaitTermination(10, TimeUnit.SECONDS);
}
}
use of com.google.common.base.Supplier in project error-prone by google.
the class ScannerSupplier method applyOverrides.
/**
* Applies options to this {@link ScannerSupplier}.
*
* <p>Command-line options to override check severities may do any of the following:
* <ul>
* <li>Enable a check that is currently off</li>
* <li>Disable a check that is currently on</li>
* <li>Change the severity of a check that is on, promoting a warning to an error or demoting
* an error to a warning</li>
* </ul>
*
* @param errorProneOptions an {@link ErrorProneOptions} object that encapsulates the overrides
* for this compilation
* @throws InvalidCommandLineOptionException if the override map attempts to disable a check
* that may not be disabled
*/
@CheckReturnValue
public ScannerSupplier applyOverrides(ErrorProneOptions errorProneOptions) throws InvalidCommandLineOptionException {
Map<String, Severity> severityOverrides = errorProneOptions.getSeverityMap();
if (severityOverrides.isEmpty() && !errorProneOptions.isEnableAllChecks() && !errorProneOptions.isDropErrorsToWarnings() && !errorProneOptions.isDisableAllChecks()) {
return this;
}
// Initialize result allChecks map and enabledChecks set with current state of this Supplier.
ImmutableBiMap<String, BugCheckerInfo> checks = getAllChecks();
Map<String, SeverityLevel> severities = new LinkedHashMap<>(severities());
Set<String> disabled = new HashSet<>(disabled());
if (errorProneOptions.isEnableAllChecks()) {
disabled.forEach(c -> severities.put(c, checks.get(c).defaultSeverity()));
disabled.clear();
}
if (errorProneOptions.isDropErrorsToWarnings()) {
getAllChecks().values().stream().filter(c -> c.defaultSeverity() == SeverityLevel.ERROR && c.suppressibility().disableable()).forEach(c -> severities.put(c.canonicalName(), SeverityLevel.WARNING));
}
if (errorProneOptions.isDisableAllChecks()) {
getAllChecks().values().stream().filter(c -> c.suppressibility().disableable()).forEach(c -> disabled.add(c.canonicalName()));
}
// Process overrides
severityOverrides.forEach((checkName, newSeverity) -> {
BugCheckerInfo check = getAllChecks().get(checkName);
if (check == null) {
if (errorProneOptions.ignoreUnknownChecks()) {
return;
}
throw new InvalidCommandLineOptionException(checkName + " is not a valid checker name");
}
switch(newSeverity) {
case OFF:
if (!check.suppressibility().disableable()) {
throw new InvalidCommandLineOptionException(check.canonicalName() + " may not be disabled");
}
severities.remove(check.canonicalName());
disabled.add(check.canonicalName());
break;
case DEFAULT:
severities.put(check.canonicalName(), check.defaultSeverity());
disabled.remove(check.canonicalName());
break;
case WARN:
// Demoting an enabled check from an error to a warning is a form of disabling
if (!disabled().contains(check.canonicalName()) && !check.suppressibility().disableable() && check.defaultSeverity() == SeverityLevel.ERROR) {
throw new InvalidCommandLineOptionException(check.canonicalName() + " is not disableable and may not be demoted to a warning");
}
severities.put(check.canonicalName(), SeverityLevel.WARNING);
disabled.remove(check.canonicalName());
break;
case ERROR:
severities.put(check.canonicalName(), SeverityLevel.ERROR);
disabled.remove(check.canonicalName());
break;
default:
throw new IllegalStateException("Unexpected severity level: " + newSeverity);
}
});
return new ScannerSupplierImpl(checks, ImmutableMap.copyOf(severities), ImmutableSet.copyOf(disabled));
}
use of com.google.common.base.Supplier in project jackrabbit-oak by apache.
the class DocumentNodeStoreService method registerJMXBeans.
private void registerJMXBeans(final DocumentNodeStore store, DocumentMK.Builder mkBuilder) throws IOException {
addRegistration(registerMBean(whiteboard, CacheStatsMBean.class, store.getNodeCacheStats(), CacheStatsMBean.TYPE, store.getNodeCacheStats().getName()));
addRegistration(registerMBean(whiteboard, CacheStatsMBean.class, store.getNodeChildrenCacheStats(), CacheStatsMBean.TYPE, store.getNodeChildrenCacheStats().getName()));
for (CacheStats cs : store.getDiffCacheStats()) {
addRegistration(registerMBean(whiteboard, CacheStatsMBean.class, cs, CacheStatsMBean.TYPE, cs.getName()));
}
DocumentStore ds = store.getDocumentStore();
if (ds.getCacheStats() != null) {
for (CacheStats cacheStats : ds.getCacheStats()) {
addRegistration(registerMBean(whiteboard, CacheStatsMBean.class, cacheStats, CacheStatsMBean.TYPE, cacheStats.getName()));
}
}
addRegistration(registerMBean(whiteboard, CheckpointMBean.class, new DocumentCheckpointMBean(store), CheckpointMBean.TYPE, "Document node store checkpoint management"));
addRegistration(registerMBean(whiteboard, DocumentNodeStoreMBean.class, store.getMBean(), DocumentNodeStoreMBean.TYPE, "Document node store management"));
if (mkBuilder.getBlobStoreCacheStats() != null) {
addRegistration(registerMBean(whiteboard, CacheStatsMBean.class, mkBuilder.getBlobStoreCacheStats(), CacheStatsMBean.TYPE, mkBuilder.getBlobStoreCacheStats().getName()));
}
if (mkBuilder.getDocumentStoreStatsCollector() instanceof DocumentStoreStatsMBean) {
addRegistration(registerMBean(whiteboard, DocumentStoreStatsMBean.class, (DocumentStoreStatsMBean) mkBuilder.getDocumentStoreStatsCollector(), DocumentStoreStatsMBean.TYPE, "DocumentStore Statistics"));
}
// register persistent cache stats
Map<CacheType, PersistentCacheStats> persistenceCacheStats = mkBuilder.getPersistenceCacheStats();
for (PersistentCacheStats pcs : persistenceCacheStats.values()) {
addRegistration(registerMBean(whiteboard, PersistentCacheStatsMBean.class, pcs, PersistentCacheStatsMBean.TYPE, pcs.getName()));
}
final long versionGcMaxAgeInSecs = toLong(prop(PROP_VER_GC_MAX_AGE), DEFAULT_VER_GC_MAX_AGE);
final long blobGcMaxAgeInSecs = toLong(prop(PROP_BLOB_GC_MAX_AGE), DEFAULT_BLOB_GC_MAX_AGE);
if (store.getBlobStore() instanceof GarbageCollectableBlobStore) {
BlobGarbageCollector gc = store.createBlobGarbageCollector(blobGcMaxAgeInSecs, ClusterRepositoryInfo.getOrCreateId(nodeStore));
addRegistration(registerMBean(whiteboard, BlobGCMBean.class, new BlobGC(gc, executor), BlobGCMBean.TYPE, "Document node store blob garbage collection"));
}
Runnable startGC = new Runnable() {
@Override
public void run() {
try {
store.getVersionGarbageCollector().gc(versionGcMaxAgeInSecs, TimeUnit.SECONDS);
} catch (IOException e) {
log.warn("Error occurred while executing the Version Garbage Collector", e);
}
}
};
Runnable cancelGC = new Runnable() {
@Override
public void run() {
store.getVersionGarbageCollector().cancel();
}
};
Supplier<String> status = new Supplier<String>() {
@Override
public String get() {
return store.getVersionGarbageCollector().getStatus();
}
};
RevisionGC revisionGC = new RevisionGC(startGC, cancelGC, status, executor);
addRegistration(registerMBean(whiteboard, RevisionGCMBean.class, revisionGC, RevisionGCMBean.TYPE, "Document node store revision garbage collection"));
BlobStoreStats blobStoreStats = mkBuilder.getBlobStoreStats();
if (!customBlobStore && blobStoreStats != null) {
addRegistration(registerMBean(whiteboard, BlobStoreStatsMBean.class, blobStoreStats, BlobStoreStatsMBean.TYPE, ds.getClass().getSimpleName()));
}
if (!mkBuilder.isBundlingDisabled()) {
addRegistration(registerMBean(whiteboard, BackgroundObserverMBean.class, store.getBundlingConfigHandler().getMBean(), BackgroundObserverMBean.TYPE, "BundlingConfigObserver"));
}
}
use of com.google.common.base.Supplier in project jackrabbit-oak by apache.
the class CacheWeightsTest method testSegmentCache.
@Test
public void testSegmentCache() {
final int count = 10000;
final int cacheSizeMB = 100;
final int bufferSize = 5 * 1024;
Supplier<Entry<Object, Long[]>> factory = new Supplier<Entry<Object, Long[]>>() {
@Override
public Entry<Object, Long[]> get() {
SegmentCache cache = new SegmentCache(cacheSizeMB);
for (int i = 0; i < count; ++i) {
Segment segment = randomSegment(bufferSize);
cache.putSegment(segment);
}
AbstractCacheStats stats = cache.getCacheStats();
long elements = stats.getElementCount();
long weight = stats.estimateCurrentWeight();
return new SimpleImmutableEntry<Object, Long[]>(cache, new Long[] { elements, weight });
}
};
runTest(factory, "SegmentCache[x" + cacheSizeMB + "MB|" + bufferSize + "|Cache<SegmentId, Segment>]");
}
Aggregations