use of com.google.common.cache.LoadingCache in project buck by facebook.
the class HaskellPrebuiltLibraryDescription method createBuildRule.
@Override
public <A extends Arg> BuildRule createBuildRule(TargetGraph targetGraph, BuildRuleParams params, BuildRuleResolver resolver, final A args) throws NoSuchBuildTargetException {
return new PrebuiltHaskellLibrary(params) {
private final LoadingCache<CxxPreprocessables.CxxPreprocessorInputCacheKey, ImmutableMap<BuildTarget, CxxPreprocessorInput>> transitiveCxxPreprocessorInputCache = CxxPreprocessables.getTransitiveCxxPreprocessorInputCache(this);
@Override
public HaskellCompileInput getCompileInput(CxxPlatform cxxPlatform, Linker.LinkableDepType depType) throws NoSuchBuildTargetException {
return HaskellCompileInput.builder().addAllFlags(args.exportedCompilerFlags).addPackages(HaskellPackage.builder().setInfo(HaskellPackageInfo.of(getBuildTarget().getShortName(), args.version, args.id.orElse(String.format("%s-%s", getBuildTarget().getShortName(), args.version)))).setPackageDb(args.db).addAllInterfaces(args.importDirs).addAllLibraries(depType == Linker.LinkableDepType.SHARED ? args.sharedLibs.values() : args.staticLibs).build()).build();
}
@Override
public Iterable<? extends NativeLinkable> getNativeLinkableDeps() {
return ImmutableList.of();
}
@Override
public Iterable<? extends NativeLinkable> getNativeLinkableExportedDeps() {
return FluentIterable.from(getDeclaredDeps()).filter(NativeLinkable.class);
}
@Override
public NativeLinkableInput getNativeLinkableInput(CxxPlatform cxxPlatform, Linker.LinkableDepType type) {
NativeLinkableInput.Builder builder = NativeLinkableInput.builder();
builder.addAllArgs(StringArg.from(args.exportedLinkerFlags));
if (type == Linker.LinkableDepType.SHARED) {
builder.addAllArgs(SourcePathArg.from(args.sharedLibs.values()));
} else {
builder.addAllArgs(SourcePathArg.from(args.staticLibs));
}
return builder.build();
}
@Override
public Linkage getPreferredLinkage(CxxPlatform cxxPlatform) {
return Linkage.ANY;
}
@Override
public ImmutableMap<String, SourcePath> getSharedLibraries(CxxPlatform cxxPlatform) {
return args.sharedLibs;
}
@Override
public Iterable<? extends CxxPreprocessorDep> getCxxPreprocessorDeps(CxxPlatform cxxPlatform) {
return FluentIterable.from(getDeps()).filter(CxxPreprocessorDep.class);
}
@Override
public Optional<HeaderSymlinkTree> getExportedHeaderSymlinkTree(CxxPlatform cxxPlatform) {
return Optional.empty();
}
@Override
public CxxPreprocessorInput getCxxPreprocessorInput(CxxPlatform cxxPlatform, HeaderVisibility headerVisibility) throws NoSuchBuildTargetException {
CxxPreprocessorInput.Builder builder = CxxPreprocessorInput.builder();
for (SourcePath headerDir : args.cxxHeaderDirs) {
builder.addIncludes(CxxHeadersDir.of(CxxPreprocessables.IncludeType.SYSTEM, headerDir));
}
return builder.build();
}
@Override
public ImmutableMap<BuildTarget, CxxPreprocessorInput> getTransitiveCxxPreprocessorInput(CxxPlatform cxxPlatform, HeaderVisibility headerVisibility) throws NoSuchBuildTargetException {
return transitiveCxxPreprocessorInputCache.getUnchecked(ImmutableCxxPreprocessorInputCacheKey.of(cxxPlatform, headerVisibility));
}
};
}
use of com.google.common.cache.LoadingCache in project pinot by linkedin.
the class ThirdEyeCacheRegistry method initCaches.
private static void initCaches(ThirdEyeConfiguration config) {
ThirdEyeCacheRegistry cacheRegistry = ThirdEyeCacheRegistry.getInstance();
RemovalListener<PinotQuery, ResultSetGroup> listener = new RemovalListener<PinotQuery, ResultSetGroup>() {
@Override
public void onRemoval(RemovalNotification<PinotQuery, ResultSetGroup> notification) {
LOGGER.info("Expired {}", notification.getKey().getPql());
}
};
// ResultSetGroup Cache. The size of this cache is limited by the total number of buckets in all ResultSetGroup.
// We estimate that 1 bucket (including overhead) consumes 1KB and this cache is allowed to use up to 50% of max
// heap space.
long maxBucketNumber = getApproximateMaxBucketNumber(DEFAULT_HEAP_PERCENTAGE_FOR_RESULTSETGROUP_CACHE);
LoadingCache<PinotQuery, ResultSetGroup> resultSetGroupCache = CacheBuilder.newBuilder().removalListener(listener).expireAfterAccess(1, TimeUnit.HOURS).maximumWeight(maxBucketNumber).weigher((pinotQuery, resultSetGroup) -> {
int resultSetCount = resultSetGroup.getResultSetCount();
int weight = 0;
for (int idx = 0; idx < resultSetCount; ++idx) {
com.linkedin.pinot.client.ResultSet resultSet = resultSetGroup.getResultSet(idx);
weight += (resultSet.getColumnCount() * resultSet.getRowCount());
}
return weight;
}).build(new ResultSetGroupCacheLoader(pinotThirdeyeClientConfig));
cacheRegistry.registerResultSetGroupCache(resultSetGroupCache);
LOGGER.info("Max bucket number for ResultSetGroup cache is set to {}", maxBucketNumber);
// CollectionMaxDataTime Cache
LoadingCache<String, Long> collectionMaxDataTimeCache = CacheBuilder.newBuilder().refreshAfterWrite(5, TimeUnit.MINUTES).build(new CollectionMaxDataTimeCacheLoader(resultSetGroupCache, datasetConfigDAO));
cacheRegistry.registerCollectionMaxDataTimeCache(collectionMaxDataTimeCache);
// Query Cache
QueryCache queryCache = new QueryCache(thirdEyeClient, Executors.newFixedThreadPool(10));
cacheRegistry.registerQueryCache(queryCache);
// Dimension Filter cache
LoadingCache<String, String> dimensionFiltersCache = CacheBuilder.newBuilder().build(new DimensionFiltersCacheLoader(cacheRegistry.getQueryCache()));
cacheRegistry.registerDimensionFiltersCache(dimensionFiltersCache);
// Dashboards cache
LoadingCache<String, String> dashboardsCache = CacheBuilder.newBuilder().build(new DashboardsCacheLoader(dashboardConfigDAO));
cacheRegistry.registerDashboardsCache(dashboardsCache);
// Collections cache
CollectionsCache collectionsCache = new CollectionsCache(datasetConfigDAO, config);
cacheRegistry.registerCollectionsCache(collectionsCache);
// DatasetConfig cache
LoadingCache<String, DatasetConfigDTO> datasetConfigCache = CacheBuilder.newBuilder().build(new DatasetConfigCacheLoader(datasetConfigDAO));
cacheRegistry.registerDatasetConfigCache(datasetConfigCache);
// MetricConfig cache
LoadingCache<MetricDataset, MetricConfigDTO> metricConfigCache = CacheBuilder.newBuilder().build(new MetricConfigCacheLoader(metricConfigDAO));
cacheRegistry.registerMetricConfigCache(metricConfigCache);
// DashboardConfigs cache
LoadingCache<String, List<DashboardConfigDTO>> dashboardConfigsCache = CacheBuilder.newBuilder().build(new DashboardConfigCacheLoader(dashboardConfigDAO));
cacheRegistry.registerDashboardConfigsCache(dashboardConfigsCache);
}
use of com.google.common.cache.LoadingCache in project cdap by caskdata.
the class AggregatedMetricsCollectionService method getMetrics.
private Iterator<MetricValues> getMetrics(final long timestamp) {
// NOTE : emitters.asMap does not reset the access time in cache,
// so it's the preferred way to access the cache entries. as we access and emit metrics every second.
final Iterator<Map.Entry<Map<String, String>, LoadingCache<String, AggregatedMetricsEmitter>>> iterator = emitters.asMap().entrySet().iterator();
return new AbstractIterator<MetricValues>() {
@Override
protected MetricValues computeNext() {
while (iterator.hasNext()) {
Map.Entry<Map<String, String>, LoadingCache<String, AggregatedMetricsEmitter>> entry = iterator.next();
Map<String, AggregatedMetricsEmitter> metricEmitters = entry.getValue().asMap();
// +1 because we add extra metric about how many metric values did we emit in this context (see below)
List<MetricValue> metricValues = Lists.newArrayListWithCapacity(metricEmitters.size() + 1);
for (Map.Entry<String, AggregatedMetricsEmitter> emitterEntry : metricEmitters.entrySet()) {
MetricValue metricValue = emitterEntry.getValue().emit();
// skip increment by 0
if (metricValue.getType() == MetricType.COUNTER && metricValue.getValue() == 0) {
continue;
}
metricValues.add(metricValue);
}
if (metricValues.isEmpty()) {
// skip if there are no metric values to send
continue;
}
// number of emitted metrics
metricValues.add(new MetricValue("metrics.emitted.count", MetricType.COUNTER, metricValues.size() + 1));
LOG.trace("Emit metric {}", metricValues);
return new MetricValues(entry.getKey(), timestamp, metricValues);
}
return endOfData();
}
};
}
use of com.google.common.cache.LoadingCache in project FoamFix by asiekierka.
the class ProxyClient method onWorldUnload.
@SubscribeEvent(priority = EventPriority.LOWEST)
public void onWorldUnload(WorldEvent.Unload event) {
if (FoamFixShared.config.clClearCachesOnUnload && event.getWorld() instanceof WorldClient && REGION_CACHE_GETTER != null) {
try {
LoadingCache cache = (LoadingCache) (REGION_CACHE_GETTER.invoke());
cache.invalidateAll();
cache.cleanUp();
} catch (Throwable t) {
t.printStackTrace();
}
}
}
use of com.google.common.cache.LoadingCache in project meghanada-server by mopemope.
the class JavaCompletion method completionPackage.
private Collection<? extends CandidateUnit> completionPackage() {
final GlobalCache globalCache = GlobalCache.getInstance();
final LoadingCache<File, Source> sourceCache = globalCache.getSourceCache(project);
return sourceCache.asMap().values().stream().map(source -> ClassIndex.createPackage(source.getPackageName())).collect(Collectors.toSet());
}
Aggregations