use of org.apache.flink.shaded.guava30.com.google.common.cache.Cache in project bookkeeper by apache.
the class TestFileInfoBackingCache method testRaceGuavaEvictAndReleaseBeforeRetain.
@Test
public void testRaceGuavaEvictAndReleaseBeforeRetain() throws Exception {
AtomicBoolean done = new AtomicBoolean(false);
FileInfoBackingCache cache = new FileInfoBackingCache((ledgerId, createIfNotFound) -> {
File f = new File(baseDir, String.valueOf(ledgerId));
f.deleteOnExit();
return f;
});
Cache<Long, CachedFileInfo> guavaCache = CacheBuilder.newBuilder().maximumSize(1).removalListener(this::guavaEvictionListener).build();
Iterable<Future<Set<CachedFileInfo>>> futures = LongStream.range(0L, 2L).mapToObj((i) -> {
Callable<Set<CachedFileInfo>> c = () -> {
Set<CachedFileInfo> allFileInfos = new HashSet<>();
while (!done.get()) {
CachedFileInfo fi = null;
do {
fi = guavaCache.get(i, () -> cache.loadFileInfo(i, masterKey));
allFileInfos.add(fi);
Thread.sleep(100);
} while (!fi.tryRetain());
Assert.assertFalse(fi.isClosed());
fi.release();
}
return allFileInfos;
};
return executor.submit(c);
}).collect(Collectors.toList());
Thread.sleep(TimeUnit.SECONDS.toMillis(10));
done.set(true);
// ensure all threads are finished operating on cache, before checking any
for (Future<Set<CachedFileInfo>> f : futures) {
f.get();
}
guavaCache.invalidateAll();
for (Future<Set<CachedFileInfo>> f : futures) {
for (CachedFileInfo fi : f.get()) {
Assert.assertTrue(fi.isClosed());
Assert.assertEquals(FileInfoBackingCache.DEAD_REF, fi.getRefCount());
}
}
}
use of org.apache.flink.shaded.guava30.com.google.common.cache.Cache in project micrometer by micrometer-metrics.
the class CacheSample method main.
public static void main(String[] args) {
MeterRegistry registry = SampleConfig.myMonitoringSystem();
GuavaCacheMetrics.monitor(registry, guavaCache, "book.guava");
// read all of Frankenstein
HttpClient.create("www.gutenberg.org").get("/cache/epub/84/pg84.txt").flatMapMany(res -> res.addHandler(wordDecoder()).receive().asString()).delayElements(// one word per 10 ms
Duration.ofMillis(10)).filter(word -> !word.isEmpty()).doOnNext(word -> {
if (guavaCache.getIfPresent(word) == null)
guavaCache.put(word, 1);
}).blockLast();
}
use of org.apache.flink.shaded.guava30.com.google.common.cache.Cache in project atlasdb by palantir.
the class AtlasDbMetrics method registerCache.
public static void registerCache(Cache<?, ?> cache, String metricsPrefix) {
MetricRegistry metricRegistry = getMetricRegistry();
Set<String> existingMetrics = metricRegistry.getMetrics().keySet().stream().filter(name -> name.startsWith(metricsPrefix)).collect(Collectors.toSet());
if (existingMetrics.isEmpty()) {
MetricRegistries.registerCache(metricRegistry, cache, metricsPrefix);
} else {
log.info("Not registering cache with prefix '{}' as metric registry already contains metrics: {}", metricsPrefix, existingMetrics);
}
}
use of org.apache.flink.shaded.guava30.com.google.common.cache.Cache in project OpenTripPlanner by opentripplanner.
the class AnalystWorker method run.
/**
* This is the main worker event loop which fetches tasks from a broker and schedules them for execution.
* It maintains a small local queue on the worker so that it doesn't idle while fetching new tasks.
*/
@Override
public void run() {
// create executors with up to one thread per processor
int nP = Runtime.getRuntime().availableProcessors();
highPriorityExecutor = new ThreadPoolExecutor(1, nP, 60, TimeUnit.SECONDS, new ArrayBlockingQueue<>(255));
highPriorityExecutor.setRejectedExecutionHandler(new ThreadPoolExecutor.CallerRunsPolicy());
batchExecutor = new ThreadPoolExecutor(1, nP, 60, TimeUnit.SECONDS, new ArrayBlockingQueue<>(nP * 2));
batchExecutor.setRejectedExecutionHandler(new ThreadPoolExecutor.AbortPolicy());
// Build a graph on startup, iff a graph ID was provided.
if (graphId != null) {
LOG.info("Prebuilding graph {}", graphId);
Graph graph = clusterGraphBuilder.getGraph(graphId);
// also prebuild the stop tree cache
graph.index.getStopTreeCache();
LOG.info("Done prebuilding graph {}", graphId);
}
// Start filling the work queues.
boolean idle = false;
while (true) {
long now = System.currentTimeMillis();
// Consider shutting down if enough time has passed
if (now > nextShutdownCheckTime && autoShutdown) {
if (idle && now > lastHighPriorityRequestProcessed + SINGLE_POINT_KEEPALIVE) {
LOG.warn("Machine is idle, shutting down.");
try {
Process process = new ProcessBuilder("sudo", "/sbin/shutdown", "-h", "now").start();
process.waitFor();
} catch (Exception ex) {
LOG.error("Unable to terminate worker", ex);
} finally {
System.exit(0);
}
}
nextShutdownCheckTime += 60 * 60 * 1000;
}
LOG.info("Long-polling for work ({} second timeout).", POLL_TIMEOUT / 1000.0);
// Long-poll (wait a few seconds for messages to become available)
List<AnalystClusterRequest> tasks = getSomeWork(WorkType.BATCH);
if (tasks == null) {
LOG.info("Didn't get any work. Retrying.");
idle = true;
continue;
}
// run through high-priority tasks first to ensure they are enqueued even if the batch
// queue blocks.
tasks.stream().filter(t -> t.outputLocation == null).forEach(t -> highPriorityExecutor.execute(() -> {
LOG.warn("Handling single point request via normal channel, side channel should open shortly.");
this.handleOneRequest(t);
}));
logQueueStatus();
// enqueue low-priority tasks; note that this may block anywhere in the process
tasks.stream().filter(t -> t.outputLocation != null).forEach(t -> {
// attempt to enqueue, waiting if the queue is full
while (true) {
try {
batchExecutor.execute(() -> this.handleOneRequest(t));
break;
} catch (RejectedExecutionException e) {
// queue is full, wait 200ms and try again
try {
Thread.sleep(200);
} catch (InterruptedException e1) {
/* nothing */
}
}
}
});
logQueueStatus();
idle = false;
}
}
use of org.apache.flink.shaded.guava30.com.google.common.cache.Cache in project free-framework by a601942905git.
the class CacheTest1 method main.
public static void main(String[] args) throws ExecutionException, InterruptedException {
CacheData cacheData = new CacheData();
Cache cache = CacheBuilder.newBuilder().build();
List<CacheEntity> cacheEntityList = (List<CacheEntity>) cache.get(CACHE_ENTITY_KEY, () -> cacheData.getCacheList());
cacheEntityList.stream().forEach(System.out::println);
System.out.println(cache.size());
System.out.println("沉睡2s");
Thread.sleep(2000);
System.out.println("苏醒");
cacheEntityList = (List<CacheEntity>) cache.get(CACHE_ENTITY_KEY, () -> cacheData.getCacheList());
cacheEntityList.stream().forEach(System.out::println);
System.out.println(cache.size());
cache.put("key1", "value1");
System.out.println(cache.size());
}
Aggregations