use of com.google.common.cache.Cache in project atlasdb by palantir.
the class AtlasDbMetrics method registerCache.
public static void registerCache(Cache<?, ?> cache, String metricsPrefix) {
MetricRegistry metricRegistry = getMetricRegistry();
Set<String> existingMetrics = metricRegistry.getMetrics().keySet().stream().filter(name -> name.startsWith(metricsPrefix)).collect(Collectors.toSet());
if (existingMetrics.isEmpty()) {
MetricRegistries.registerCache(metricRegistry, cache, metricsPrefix);
} else {
log.info("Not registering cache with prefix '{}' as metric registry already contains metrics: {}", metricsPrefix, existingMetrics);
}
}
use of com.google.common.cache.Cache in project OpenTripPlanner by opentripplanner.
the class AnalystWorker method run.
/**
* This is the main worker event loop which fetches tasks from a broker and schedules them for execution.
* It maintains a small local queue on the worker so that it doesn't idle while fetching new tasks.
*/
@Override
public void run() {
// create executors with up to one thread per processor
int nP = Runtime.getRuntime().availableProcessors();
highPriorityExecutor = new ThreadPoolExecutor(1, nP, 60, TimeUnit.SECONDS, new ArrayBlockingQueue<>(255));
highPriorityExecutor.setRejectedExecutionHandler(new ThreadPoolExecutor.CallerRunsPolicy());
batchExecutor = new ThreadPoolExecutor(1, nP, 60, TimeUnit.SECONDS, new ArrayBlockingQueue<>(nP * 2));
batchExecutor.setRejectedExecutionHandler(new ThreadPoolExecutor.AbortPolicy());
// Build a graph on startup, iff a graph ID was provided.
if (graphId != null) {
LOG.info("Prebuilding graph {}", graphId);
Graph graph = clusterGraphBuilder.getGraph(graphId);
// also prebuild the stop tree cache
graph.index.getStopTreeCache();
LOG.info("Done prebuilding graph {}", graphId);
}
// Start filling the work queues.
boolean idle = false;
while (true) {
long now = System.currentTimeMillis();
// Consider shutting down if enough time has passed
if (now > nextShutdownCheckTime && autoShutdown) {
if (idle && now > lastHighPriorityRequestProcessed + SINGLE_POINT_KEEPALIVE) {
LOG.warn("Machine is idle, shutting down.");
try {
Process process = new ProcessBuilder("sudo", "/sbin/shutdown", "-h", "now").start();
process.waitFor();
} catch (Exception ex) {
LOG.error("Unable to terminate worker", ex);
} finally {
System.exit(0);
}
}
nextShutdownCheckTime += 60 * 60 * 1000;
}
LOG.info("Long-polling for work ({} second timeout).", POLL_TIMEOUT / 1000.0);
// Long-poll (wait a few seconds for messages to become available)
List<AnalystClusterRequest> tasks = getSomeWork(WorkType.BATCH);
if (tasks == null) {
LOG.info("Didn't get any work. Retrying.");
idle = true;
continue;
}
// run through high-priority tasks first to ensure they are enqueued even if the batch
// queue blocks.
tasks.stream().filter(t -> t.outputLocation == null).forEach(t -> highPriorityExecutor.execute(() -> {
LOG.warn("Handling single point request via normal channel, side channel should open shortly.");
this.handleOneRequest(t);
}));
logQueueStatus();
// enqueue low-priority tasks; note that this may block anywhere in the process
tasks.stream().filter(t -> t.outputLocation != null).forEach(t -> {
// attempt to enqueue, waiting if the queue is full
while (true) {
try {
batchExecutor.execute(() -> this.handleOneRequest(t));
break;
} catch (RejectedExecutionException e) {
// queue is full, wait 200ms and try again
try {
Thread.sleep(200);
} catch (InterruptedException e1) {
/* nothing */
}
}
}
});
logQueueStatus();
idle = false;
}
}
use of com.google.common.cache.Cache in project free-framework by a601942905git.
the class CacheTest1 method main.
public static void main(String[] args) throws ExecutionException, InterruptedException {
CacheData cacheData = new CacheData();
Cache cache = CacheBuilder.newBuilder().build();
List<CacheEntity> cacheEntityList = (List<CacheEntity>) cache.get(CACHE_ENTITY_KEY, () -> cacheData.getCacheList());
cacheEntityList.stream().forEach(System.out::println);
System.out.println(cache.size());
System.out.println("沉睡2s");
Thread.sleep(2000);
System.out.println("苏醒");
cacheEntityList = (List<CacheEntity>) cache.get(CACHE_ENTITY_KEY, () -> cacheData.getCacheList());
cacheEntityList.stream().forEach(System.out::println);
System.out.println(cache.size());
cache.put("key1", "value1");
System.out.println(cache.size());
}
use of com.google.common.cache.Cache in project presto by prestodb.
the class IcebergModule method createStripeMetadataSourceFactory.
@Singleton
@Provides
public StripeMetadataSourceFactory createStripeMetadataSourceFactory(OrcCacheConfig orcCacheConfig, MBeanExporter exporter) {
StripeMetadataSource stripeMetadataSource = new StorageStripeMetadataSource();
if (orcCacheConfig.isStripeMetadataCacheEnabled()) {
Cache<StripeReader.StripeId, Slice> footerCache = CacheBuilder.newBuilder().maximumWeight(orcCacheConfig.getStripeFooterCacheSize().toBytes()).weigher((id, footer) -> toIntExact(((Slice) footer).getRetainedSize())).expireAfterAccess(orcCacheConfig.getStripeFooterCacheTtlSinceLastAccess().toMillis(), MILLISECONDS).recordStats().build();
Cache<StripeReader.StripeStreamId, Slice> streamCache = CacheBuilder.newBuilder().maximumWeight(orcCacheConfig.getStripeStreamCacheSize().toBytes()).weigher((id, stream) -> toIntExact(((Slice) stream).getRetainedSize())).expireAfterAccess(orcCacheConfig.getStripeStreamCacheTtlSinceLastAccess().toMillis(), MILLISECONDS).recordStats().build();
CacheStatsMBean footerCacheStatsMBean = new CacheStatsMBean(footerCache);
CacheStatsMBean streamCacheStatsMBean = new CacheStatsMBean(streamCache);
exporter.export(generatedNameOf(CacheStatsMBean.class, connectorId + "_StripeFooter"), footerCacheStatsMBean);
exporter.export(generatedNameOf(CacheStatsMBean.class, connectorId + "_StripeStream"), streamCacheStatsMBean);
Optional<Cache<StripeReader.StripeStreamId, List<RowGroupIndex>>> rowGroupIndexCache = Optional.empty();
if (orcCacheConfig.isRowGroupIndexCacheEnabled()) {
rowGroupIndexCache = Optional.of(CacheBuilder.newBuilder().maximumWeight(orcCacheConfig.getRowGroupIndexCacheSize().toBytes()).weigher((id, rowGroupIndices) -> toIntExact(((List<RowGroupIndex>) rowGroupIndices).stream().mapToLong(RowGroupIndex::getRetainedSizeInBytes).sum())).expireAfterAccess(orcCacheConfig.getStripeStreamCacheTtlSinceLastAccess().toMillis(), MILLISECONDS).recordStats().build());
CacheStatsMBean rowGroupIndexCacheStatsMBean = new CacheStatsMBean(rowGroupIndexCache.get());
exporter.export(generatedNameOf(CacheStatsMBean.class, connectorId + "_StripeStreamRowGroupIndex"), rowGroupIndexCacheStatsMBean);
}
stripeMetadataSource = new CachingStripeMetadataSource(stripeMetadataSource, footerCache, streamCache, rowGroupIndexCache);
}
StripeMetadataSourceFactory factory = StripeMetadataSourceFactory.of(stripeMetadataSource);
if (orcCacheConfig.isDwrfStripeCacheEnabled()) {
factory = new DwrfAwareStripeMetadataSourceFactory(factory);
}
return factory;
}
use of com.google.common.cache.Cache in project presto by prestodb.
the class DruidClient method toRemoteTable.
Optional<RemoteTableObject> toRemoteTable(SchemaTableName schemaTableName) {
requireNonNull(schemaTableName, "schemaTableName is null");
verify(CharMatcher.forPredicate(Character::isUpperCase).matchesNoneOf(schemaTableName.getTableName()), "Expected table name from internal metadata to be lowercase: %s", schemaTableName);
if (!caseInsensitiveNameMatching) {
return Optional.of(RemoteTableObject.of(schemaTableName.getTableName()));
}
@Nullable Optional<RemoteTableObject> remoteTable = remoteTables.getIfPresent(schemaTableName);
if (remoteTable != null) {
return remoteTable;
}
// Cache miss, reload the cache
Map<SchemaTableName, Optional<RemoteTableObject>> mapping = new HashMap<>();
for (String table : getTables()) {
SchemaTableName cacheKey = new SchemaTableName(getSchema(), table);
mapping.merge(cacheKey, Optional.of(RemoteTableObject.of(table)), (currentValue, collision) -> currentValue.map(current -> current.registerCollision(collision.get().getOnlyRemoteTableName())));
remoteTables.put(cacheKey, mapping.get(cacheKey));
}
// explicitly cache if the requested table doesn't exist
if (!mapping.containsKey(schemaTableName)) {
remoteTables.put(schemaTableName, Optional.empty());
}
return mapping.containsKey(schemaTableName) ? mapping.get(schemaTableName) : Optional.empty();
}
Aggregations