use of com.google.common.cache.CacheLoader in project hadoop by apache.
the class DFSClientCache method inputStreamLoader.
private CacheLoader<DFSInputStreamCaheKey, FSDataInputStream> inputStreamLoader() {
return new CacheLoader<DFSInputStreamCaheKey, FSDataInputStream>() {
@Override
public FSDataInputStream load(DFSInputStreamCaheKey key) throws Exception {
DFSClient client = getDfsClient(key.userId);
DFSInputStream dis = client.open(key.inodePath);
return client.createWrappedInputStream(dis);
}
};
}
use of com.google.common.cache.CacheLoader in project weave by continuuity.
the class ZKDiscoveryService method createServiceLoader.
/**
* Creates a CacheLoader for creating live Iterable for watching instances changes for a given service.
*/
private CacheLoader<String, ServiceDiscovered> createServiceLoader() {
return new CacheLoader<String, ServiceDiscovered>() {
@Override
public ServiceDiscovered load(String service) throws Exception {
final DefaultServiceDiscovered serviceDiscovered = new DefaultServiceDiscovered(service);
final String serviceBase = "/" + service;
// Watch for children changes in /service
ZKOperations.watchChildren(zkClient, serviceBase, new ZKOperations.ChildrenCallback() {
@Override
public void updated(NodeChildren nodeChildren) {
// Fetch data of all children nodes in parallel.
List<String> children = nodeChildren.getChildren();
List<OperationFuture<NodeData>> dataFutures = Lists.newArrayListWithCapacity(children.size());
for (String child : children) {
dataFutures.add(zkClient.getData(serviceBase + "/" + child));
}
// Update the service map when all fetching are done.
final ListenableFuture<List<NodeData>> fetchFuture = Futures.successfulAsList(dataFutures);
fetchFuture.addListener(new Runnable() {
@Override
public void run() {
ImmutableSet.Builder<Discoverable> builder = ImmutableSet.builder();
for (NodeData nodeData : Futures.getUnchecked(fetchFuture)) {
// For successful fetch, decode the content.
if (nodeData != null) {
Discoverable discoverable = DiscoverableAdapter.decode(nodeData.getData());
if (discoverable != null) {
builder.add(discoverable);
}
}
}
serviceDiscovered.setDiscoverables(builder.build());
}
}, Threads.SAME_THREAD_EXECUTOR);
}
});
return serviceDiscovered;
}
};
}
use of com.google.common.cache.CacheLoader in project buck by facebook.
the class CellProvider method createForLocalBuild.
/**
* Create a cell provider at a given root.
*/
public static CellProvider createForLocalBuild(ProjectFilesystem rootFilesystem, Watchman watchman, BuckConfig rootConfig, CellConfig rootCellConfigOverrides, KnownBuildRuleTypesFactory knownBuildRuleTypesFactory) throws IOException {
DefaultCellPathResolver rootCellCellPathResolver = new DefaultCellPathResolver(rootFilesystem.getRootPath(), rootConfig.getConfig());
ImmutableMap<RelativeCellName, Path> transitiveCellPathMapping = rootCellCellPathResolver.getTransitivePathMapping();
ImmutableMap<Path, RawConfig> pathToConfigOverrides;
try {
pathToConfigOverrides = rootCellConfigOverrides.getOverridesByPath(transitiveCellPathMapping);
} catch (CellConfig.MalformedOverridesException e) {
throw new HumanReadableException(e.getMessage());
}
ImmutableSet<Path> allRoots = ImmutableSet.copyOf(transitiveCellPathMapping.values());
return new CellProvider(cellProvider -> new CacheLoader<Path, Cell>() {
@Override
public Cell load(Path cellPath) throws IOException, InterruptedException {
Path normalizedCellPath = cellPath.toRealPath().normalize();
Preconditions.checkState(allRoots.contains(normalizedCellPath), "Cell %s outside of transitive closure of root cell (%s).", normalizedCellPath, allRoots);
RawConfig configOverrides = Optional.ofNullable(pathToConfigOverrides.get(normalizedCellPath)).orElse(RawConfig.of(ImmutableMap.of()));
Config config = Configs.createDefaultConfig(normalizedCellPath, configOverrides);
DefaultCellPathResolver cellPathResolver = new DefaultCellPathResolver(normalizedCellPath, config);
cellPathResolver.getCellPaths().forEach((name, path) -> {
Path pathInRootResolver = rootCellCellPathResolver.getCellPaths().get(name);
if (pathInRootResolver == null) {
throw new HumanReadableException("In the config of %s: %s.%s must exist in the root cell's cell mappings.", cellPath.toString(), DefaultCellPathResolver.REPOSITORIES_SECTION, name);
} else if (!pathInRootResolver.equals(path)) {
throw new HumanReadableException("In the config of %s: %s.%s must point to the same directory as the root " + "cell's cell mapping: (root) %s != (current) %s", cellPath.toString(), DefaultCellPathResolver.REPOSITORIES_SECTION, name, pathInRootResolver, path);
}
});
ProjectFilesystem cellFilesystem = new ProjectFilesystem(normalizedCellPath, config);
BuckConfig buckConfig = new BuckConfig(config, cellFilesystem, rootConfig.getArchitecture(), rootConfig.getPlatform(), rootConfig.getEnvironment(), cellPathResolver);
return new Cell(cellPathResolver.getKnownRoots(), cellFilesystem, watchman, buckConfig, knownBuildRuleTypesFactory, cellProvider);
}
}, cellProvider -> {
try {
return new Cell(rootCellCellPathResolver.getKnownRoots(), rootFilesystem, watchman, rootConfig, knownBuildRuleTypesFactory, cellProvider);
} catch (InterruptedException e) {
throw new RuntimeException("Interrupted while loading root cell", e);
} catch (IOException e) {
throw new HumanReadableException("Failed to load root cell", e);
}
});
}
use of com.google.common.cache.CacheLoader in project cdap by caskdata.
the class MapReduceTaskContextProvider method createCacheLoader.
/**
* Creates a {@link CacheLoader} for the task context cache.
*/
private CacheLoader<ContextCacheKey, BasicMapReduceTaskContext> createCacheLoader(final Injector injector) {
final DiscoveryServiceClient discoveryServiceClient = injector.getInstance(DiscoveryServiceClient.class);
final DatasetFramework datasetFramework = injector.getInstance(DatasetFramework.class);
final SecureStore secureStore = injector.getInstance(SecureStore.class);
final SecureStoreManager secureStoreManager = injector.getInstance(SecureStoreManager.class);
final MessagingService messagingService = injector.getInstance(MessagingService.class);
// Multiple instances of BasicMapReduceTaskContext can shares the same program.
final AtomicReference<Program> programRef = new AtomicReference<>();
return new CacheLoader<ContextCacheKey, BasicMapReduceTaskContext>() {
@Override
public BasicMapReduceTaskContext load(ContextCacheKey key) throws Exception {
MapReduceContextConfig contextConfig = new MapReduceContextConfig(key.getConfiguration());
MapReduceClassLoader classLoader = MapReduceClassLoader.getFromConfiguration(key.getConfiguration());
Program program = programRef.get();
if (program == null) {
// Creation of program is relatively cheap, so just create and do compare and set.
programRef.compareAndSet(null, createProgram(contextConfig, classLoader.getProgramClassLoader()));
program = programRef.get();
}
WorkflowProgramInfo workflowInfo = contextConfig.getWorkflowProgramInfo();
DatasetFramework programDatasetFramework = workflowInfo == null ? datasetFramework : NameMappedDatasetFramework.createFromWorkflowProgramInfo(datasetFramework, workflowInfo, program.getApplicationSpecification());
// Setup dataset framework context, if required
if (programDatasetFramework instanceof ProgramContextAware) {
ProgramRunId programRunId = program.getId().run(ProgramRunners.getRunId(contextConfig.getProgramOptions()));
((ProgramContextAware) programDatasetFramework).setContext(new BasicProgramContext(programRunId));
}
MapReduceSpecification spec = program.getApplicationSpecification().getMapReduce().get(program.getName());
MetricsCollectionService metricsCollectionService = null;
MapReduceMetrics.TaskType taskType = null;
String taskId = null;
TaskAttemptID taskAttemptId = key.getTaskAttemptID();
// from a org.apache.hadoop.io.RawComparator
if (taskAttemptId != null) {
taskId = taskAttemptId.getTaskID().toString();
if (MapReduceMetrics.TaskType.hasType(taskAttemptId.getTaskType())) {
taskType = MapReduceMetrics.TaskType.from(taskAttemptId.getTaskType());
// if this is not for a mapper or a reducer, we don't need the metrics collection service
metricsCollectionService = injector.getInstance(MetricsCollectionService.class);
}
}
CConfiguration cConf = injector.getInstance(CConfiguration.class);
TransactionSystemClient txClient = injector.getInstance(TransactionSystemClient.class);
return new BasicMapReduceTaskContext(program, contextConfig.getProgramOptions(), cConf, taskType, taskId, spec, workflowInfo, discoveryServiceClient, metricsCollectionService, txClient, contextConfig.getTx(), programDatasetFramework, classLoader.getPluginInstantiator(), contextConfig.getLocalizedResources(), secureStore, secureStoreManager, authorizationEnforcer, authenticationContext, messagingService);
}
};
}
use of com.google.common.cache.CacheLoader in project fess by codelibs.
the class SystemHelper method init.
@PostConstruct
public void init() {
final FessConfig fessConfig = ComponentUtil.getFessConfig();
filterPathEncoding = fessConfig.getPathEncoding();
supportedLanguages = fessConfig.getSupportedLanguagesAsArray();
langItemsCache = CacheBuilder.newBuilder().maximumSize(20).expireAfterAccess(1, TimeUnit.HOURS).build(new CacheLoader<String, List<Map<String, String>>>() {
@Override
public List<Map<String, String>> load(final String key) throws Exception {
final ULocale uLocale = new ULocale(key);
final Locale displayLocale = uLocale.toLocale();
final List<Map<String, String>> langItems = new ArrayList<>(supportedLanguages.length);
final String msg = ComponentUtil.getMessageManager().getMessage(displayLocale, "labels.allLanguages");
final Map<String, String> defaultMap = new HashMap<>(2);
defaultMap.put(Constants.ITEM_LABEL, msg);
defaultMap.put(Constants.ITEM_VALUE, "all");
langItems.add(defaultMap);
for (final String lang : supportedLanguages) {
final Locale locale = LocaleUtils.toLocale(lang);
final String label = locale.getDisplayName(displayLocale);
final Map<String, String> map = new HashMap<>(2);
map.put(Constants.ITEM_LABEL, label);
map.put(Constants.ITEM_VALUE, lang);
langItems.add(map);
}
return langItems;
}
});
ComponentUtil.doInitProcesses(p -> p.run());
}
Aggregations