use of com.google.common.base.Optional in project hadoop by apache.
the class DFSAdmin method getReconfigurationStatus.
int getReconfigurationStatus(final String nodeType, final String address, final PrintStream out, final PrintStream err) throws IOException {
String outMsg = null;
String errMsg = null;
ReconfigurationTaskStatus status = null;
try {
status = getReconfigurationStatusDispatch(nodeType, address, out, err);
outMsg = String.format("Reconfiguring status for node [%s]: ", address);
} catch (IOException e) {
errMsg = String.format("Node [%s] reloading configuration: %s.", address, e.toString());
}
if (errMsg != null) {
err.println(errMsg);
return 1;
} else {
out.print(outMsg);
}
if (status != null) {
if (!status.hasTask()) {
out.println("no task was found.");
return 0;
}
out.print("started at " + new Date(status.getStartTime()));
if (!status.stopped()) {
out.println(" and is still running.");
return 0;
}
out.println(" and finished at " + new Date(status.getEndTime()).toString() + ".");
if (status.getStatus() == null) {
// Nothing to report.
return 0;
}
for (Map.Entry<PropertyChange, Optional<String>> result : status.getStatus().entrySet()) {
if (!result.getValue().isPresent()) {
out.printf("SUCCESS: Changed property %s%n\tFrom: \"%s\"%n\tTo: \"%s\"%n", result.getKey().prop, result.getKey().oldVal, result.getKey().newVal);
} else {
final String errorMsg = result.getValue().get();
out.printf("FAILED: Change property %s%n\tFrom: \"%s\"%n\tTo: \"%s\"%n", result.getKey().prop, result.getKey().oldVal, result.getKey().newVal);
out.println("\tError: " + errorMsg + ".");
}
}
} else {
return 1;
}
return 0;
}
use of com.google.common.base.Optional in project MinecraftForge by MinecraftForge.
the class MultiLayerModel method process.
@Override
public MultiLayerModel process(ImmutableMap<String, String> customData) {
ImmutableMap.Builder<Optional<BlockRenderLayer>, ModelResourceLocation> builder = ImmutableMap.builder();
for (String key : customData.keySet()) {
if ("base".equals(key)) {
builder.put(Optional.<BlockRenderLayer>absent(), getLocation(customData.get(key)));
}
for (BlockRenderLayer layer : BlockRenderLayer.values()) {
if (layer.toString().equals(key)) {
builder.put(Optional.of(layer), getLocation(customData.get(key)));
}
}
}
ImmutableMap<Optional<BlockRenderLayer>, ModelResourceLocation> models = builder.build();
if (models.isEmpty())
return INSTANCE;
return new MultiLayerModel(models);
}
use of com.google.common.base.Optional in project che by eclipse.
the class TestResultViewImpl method gotoClass.
@Override
public void gotoClass(String packagePath, int line) {
lastWentLine = line;
final Project project = appContext.getRootProject();
String testSrcPath = project.getPath() + "/" + DEFAULT_TEST_SOURCE_FOLDER;
appContext.getWorkspaceRoot().getFile(testSrcPath + packagePath).then(new Operation<Optional<File>>() {
@Override
public void apply(Optional<File> file) throws OperationException {
if (file.isPresent()) {
eventBus.fireEvent(FileEvent.createOpenFileEvent(file.get()));
Timer t = new Timer() {
@Override
public void run() {
EditorPartPresenter editorPart = editorAgent.getActiveEditor();
Document doc = ((TextEditor) editorPart).getDocument();
doc.setCursorPosition(new TextPosition(lastWentLine - 1, 0));
}
};
t.schedule(500);
}
}
}).catchError(new Operation<PromiseError>() {
@Override
public void apply(PromiseError error) throws OperationException {
Log.info(TestResultViewImpl.class, error);
}
});
}
use of com.google.common.base.Optional in project che by eclipse.
the class OpenDeclarationFinder method handleDescriptor.
private void handleDescriptor(final Path projectPath, final OpenDeclarationDescriptor descriptor) {
final EditorPartPresenter openedEditor = editorAgent.getOpenedEditor(Path.valueOf(descriptor.getPath()));
if (openedEditor != null) {
editorAgent.openEditor(openedEditor.getEditorInput().getFile(), new OpenEditorCallbackImpl() {
@Override
public void onEditorOpened(EditorPartPresenter editor) {
setCursorAndActivateEditor(editor, descriptor.getOffset());
}
@Override
public void onEditorActivated(EditorPartPresenter editor) {
setCursorAndActivateEditor(editor, descriptor.getOffset());
}
});
return;
}
if (descriptor.isBinary()) {
navigationService.getEntry(projectPath, descriptor.getLibId(), descriptor.getPath()).then(new Operation<JarEntry>() {
@Override
public void apply(final JarEntry entry) throws OperationException {
navigationService.getContent(projectPath, descriptor.getLibId(), Path.valueOf(entry.getPath())).then(new Operation<ClassContent>() {
@Override
public void apply(ClassContent content) throws OperationException {
final VirtualFile file = javaNodeFactory.newJarFileNode(entry, descriptor.getLibId(), projectPath, null);
editorAgent.openEditor(file, new OpenEditorCallbackImpl() {
@Override
public void onEditorOpened(final EditorPartPresenter editor) {
Scheduler.get().scheduleDeferred(new Scheduler.ScheduledCommand() {
@Override
public void execute() {
if (editor instanceof TextEditor) {
((TextEditor) editor).getDocument().setSelectedRange(LinearRange.createWithStart(descriptor.getOffset()).andLength(0), true);
editor.activate();
}
}
});
}
});
}
});
}
});
} else {
appContext.getWorkspaceRoot().getFile(descriptor.getPath()).then(new Operation<Optional<File>>() {
@Override
public void apply(Optional<File> file) throws OperationException {
if (file.isPresent()) {
editorAgent.openEditor(file.get(), new OpenEditorCallbackImpl() {
@Override
public void onEditorOpened(final EditorPartPresenter editor) {
Scheduler.get().scheduleDeferred(new Scheduler.ScheduledCommand() {
@Override
public void execute() {
if (editor instanceof TextEditor) {
((TextEditor) editor).getDocument().setSelectedRange(LinearRange.createWithStart(descriptor.getOffset()).andLength(0), true);
editor.activate();
}
}
});
}
});
}
}
});
}
}
use of com.google.common.base.Optional in project druid by druid-io.
the class CachingClusteredClient method run.
@Override
public Sequence<T> run(final Query<T> query, final Map<String, Object> responseContext) {
final QueryToolChest<T, Query<T>> toolChest = warehouse.getToolChest(query);
final CacheStrategy<T, Object, Query<T>> strategy = toolChest.getCacheStrategy(query);
final Map<DruidServer, List<SegmentDescriptor>> serverSegments = Maps.newTreeMap();
final List<Pair<Interval, byte[]>> cachedResults = Lists.newArrayList();
final Map<String, CachePopulator> cachePopulatorMap = Maps.newHashMap();
final boolean useCache = CacheUtil.useCacheOnBrokers(query, strategy, cacheConfig);
final boolean populateCache = CacheUtil.populateCacheOnBrokers(query, strategy, cacheConfig);
final boolean isBySegment = BaseQuery.getContextBySegment(query, false);
final ImmutableMap.Builder<String, Object> contextBuilder = new ImmutableMap.Builder<>();
final int priority = BaseQuery.getContextPriority(query, 0);
contextBuilder.put("priority", priority);
if (populateCache) {
// prevent down-stream nodes from caching results as well if we are populating the cache
contextBuilder.put(CacheConfig.POPULATE_CACHE, false);
contextBuilder.put("bySegment", true);
}
TimelineLookup<String, ServerSelector> timeline = serverView.getTimeline(query.getDataSource());
if (timeline == null) {
return Sequences.empty();
}
// build set of segments to query
Set<Pair<ServerSelector, SegmentDescriptor>> segments = Sets.newLinkedHashSet();
List<TimelineObjectHolder<String, ServerSelector>> serversLookup = Lists.newLinkedList();
// Note that enabling this leads to putting uncovered intervals information in the response headers
// and might blow up in some cases https://github.com/druid-io/druid/issues/2108
int uncoveredIntervalsLimit = BaseQuery.getContextUncoveredIntervalsLimit(query, 0);
if (uncoveredIntervalsLimit > 0) {
List<Interval> uncoveredIntervals = Lists.newArrayListWithCapacity(uncoveredIntervalsLimit);
boolean uncoveredIntervalsOverflowed = false;
for (Interval interval : query.getIntervals()) {
Iterable<TimelineObjectHolder<String, ServerSelector>> lookup = timeline.lookup(interval);
long startMillis = interval.getStartMillis();
long endMillis = interval.getEndMillis();
for (TimelineObjectHolder<String, ServerSelector> holder : lookup) {
Interval holderInterval = holder.getInterval();
long intervalStart = holderInterval.getStartMillis();
if (!uncoveredIntervalsOverflowed && startMillis != intervalStart) {
if (uncoveredIntervalsLimit > uncoveredIntervals.size()) {
uncoveredIntervals.add(new Interval(startMillis, intervalStart));
} else {
uncoveredIntervalsOverflowed = true;
}
}
startMillis = holderInterval.getEndMillis();
serversLookup.add(holder);
}
if (!uncoveredIntervalsOverflowed && startMillis < endMillis) {
if (uncoveredIntervalsLimit > uncoveredIntervals.size()) {
uncoveredIntervals.add(new Interval(startMillis, endMillis));
} else {
uncoveredIntervalsOverflowed = true;
}
}
}
if (!uncoveredIntervals.isEmpty()) {
// This returns intervals for which NO segment is present.
// Which is not necessarily an indication that the data doesn't exist or is
// incomplete. The data could exist and just not be loaded yet. In either
// case, though, this query will not include any data from the identified intervals.
responseContext.put("uncoveredIntervals", uncoveredIntervals);
responseContext.put("uncoveredIntervalsOverflowed", uncoveredIntervalsOverflowed);
}
} else {
for (Interval interval : query.getIntervals()) {
Iterables.addAll(serversLookup, timeline.lookup(interval));
}
}
// Let tool chest filter out unneeded segments
final List<TimelineObjectHolder<String, ServerSelector>> filteredServersLookup = toolChest.filterSegments(query, serversLookup);
Map<String, Optional<RangeSet<String>>> dimensionRangeCache = Maps.newHashMap();
// Filter unneeded chunks based on partition dimension
for (TimelineObjectHolder<String, ServerSelector> holder : filteredServersLookup) {
final Set<PartitionChunk<ServerSelector>> filteredChunks = DimFilterUtils.filterShards(query.getFilter(), holder.getObject(), new Function<PartitionChunk<ServerSelector>, ShardSpec>() {
@Override
public ShardSpec apply(PartitionChunk<ServerSelector> input) {
return input.getObject().getSegment().getShardSpec();
}
}, dimensionRangeCache);
for (PartitionChunk<ServerSelector> chunk : filteredChunks) {
ServerSelector selector = chunk.getObject();
final SegmentDescriptor descriptor = new SegmentDescriptor(holder.getInterval(), holder.getVersion(), chunk.getChunkNumber());
segments.add(Pair.of(selector, descriptor));
}
}
final byte[] queryCacheKey;
if (// implies strategy != null
(populateCache || useCache) && // explicit bySegment queries are never cached
!isBySegment) {
queryCacheKey = strategy.computeCacheKey(query);
} else {
queryCacheKey = null;
}
if (query.getContext().get(QueryResource.HDR_IF_NONE_MATCH) != null) {
String prevEtag = (String) query.getContext().get(QueryResource.HDR_IF_NONE_MATCH);
//compute current Etag
Hasher hasher = Hashing.sha1().newHasher();
boolean hasOnlyHistoricalSegments = true;
for (Pair<ServerSelector, SegmentDescriptor> p : segments) {
if (!p.lhs.pick().getServer().isAssignable()) {
hasOnlyHistoricalSegments = false;
break;
}
hasher.putString(p.lhs.getSegment().getIdentifier(), Charsets.UTF_8);
}
if (hasOnlyHistoricalSegments) {
hasher.putBytes(queryCacheKey == null ? strategy.computeCacheKey(query) : queryCacheKey);
String currEtag = Base64.encodeBase64String(hasher.hash().asBytes());
responseContext.put(QueryResource.HDR_ETAG, currEtag);
if (prevEtag.equals(currEtag)) {
return Sequences.empty();
}
}
}
if (queryCacheKey != null) {
// cachKeys map must preserve segment ordering, in order for shards to always be combined in the same order
Map<Pair<ServerSelector, SegmentDescriptor>, Cache.NamedKey> cacheKeys = Maps.newLinkedHashMap();
for (Pair<ServerSelector, SegmentDescriptor> segment : segments) {
final Cache.NamedKey segmentCacheKey = CacheUtil.computeSegmentCacheKey(segment.lhs.getSegment().getIdentifier(), segment.rhs, queryCacheKey);
cacheKeys.put(segment, segmentCacheKey);
}
// Pull cached segments from cache and remove from set of segments to query
final Map<Cache.NamedKey, byte[]> cachedValues;
if (useCache) {
cachedValues = cache.getBulk(Iterables.limit(cacheKeys.values(), cacheConfig.getCacheBulkMergeLimit()));
} else {
cachedValues = ImmutableMap.of();
}
for (Map.Entry<Pair<ServerSelector, SegmentDescriptor>, Cache.NamedKey> entry : cacheKeys.entrySet()) {
Pair<ServerSelector, SegmentDescriptor> segment = entry.getKey();
Cache.NamedKey segmentCacheKey = entry.getValue();
final Interval segmentQueryInterval = segment.rhs.getInterval();
final byte[] cachedValue = cachedValues.get(segmentCacheKey);
if (cachedValue != null) {
// remove cached segment from set of segments to query
segments.remove(segment);
cachedResults.add(Pair.of(segmentQueryInterval, cachedValue));
} else if (populateCache) {
// otherwise, if populating cache, add segment to list of segments to cache
final String segmentIdentifier = segment.lhs.getSegment().getIdentifier();
cachePopulatorMap.put(String.format("%s_%s", segmentIdentifier, segmentQueryInterval), new CachePopulator(cache, objectMapper, segmentCacheKey));
}
}
}
// Compile list of all segments not pulled from cache
for (Pair<ServerSelector, SegmentDescriptor> segment : segments) {
final QueryableDruidServer queryableDruidServer = segment.lhs.pick();
if (queryableDruidServer == null) {
log.makeAlert("No servers found for SegmentDescriptor[%s] for DataSource[%s]?! How can this be?!", segment.rhs, query.getDataSource()).emit();
} else {
final DruidServer server = queryableDruidServer.getServer();
List<SegmentDescriptor> descriptors = serverSegments.get(server);
if (descriptors == null) {
descriptors = Lists.newArrayList();
serverSegments.put(server, descriptors);
}
descriptors.add(segment.rhs);
}
}
return new LazySequence<>(new Supplier<Sequence<T>>() {
@Override
public Sequence<T> get() {
ArrayList<Sequence<T>> sequencesByInterval = Lists.newArrayList();
addSequencesFromCache(sequencesByInterval);
addSequencesFromServer(sequencesByInterval);
return mergeCachedAndUncachedSequences(query, sequencesByInterval);
}
private void addSequencesFromCache(ArrayList<Sequence<T>> listOfSequences) {
if (strategy == null) {
return;
}
final Function<Object, T> pullFromCacheFunction = strategy.pullFromCache();
final TypeReference<Object> cacheObjectClazz = strategy.getCacheObjectClazz();
for (Pair<Interval, byte[]> cachedResultPair : cachedResults) {
final byte[] cachedResult = cachedResultPair.rhs;
Sequence<Object> cachedSequence = new BaseSequence<>(new BaseSequence.IteratorMaker<Object, Iterator<Object>>() {
@Override
public Iterator<Object> make() {
try {
if (cachedResult.length == 0) {
return Iterators.emptyIterator();
}
return objectMapper.readValues(objectMapper.getFactory().createParser(cachedResult), cacheObjectClazz);
} catch (IOException e) {
throw Throwables.propagate(e);
}
}
@Override
public void cleanup(Iterator<Object> iterFromMake) {
}
});
listOfSequences.add(Sequences.map(cachedSequence, pullFromCacheFunction));
}
}
private void addSequencesFromServer(ArrayList<Sequence<T>> listOfSequences) {
listOfSequences.ensureCapacity(listOfSequences.size() + serverSegments.size());
final Query<T> rewrittenQuery = query.withOverriddenContext(contextBuilder.build());
// The data gets handled as a Future and parsed in the long Sequence chain in the resultSeqToAdd setter.
for (Map.Entry<DruidServer, List<SegmentDescriptor>> entry : serverSegments.entrySet()) {
final DruidServer server = entry.getKey();
final List<SegmentDescriptor> descriptors = entry.getValue();
final QueryRunner clientQueryable = serverView.getQueryRunner(server);
if (clientQueryable == null) {
log.error("WTF!? server[%s] doesn't have a client Queryable?", server);
continue;
}
final MultipleSpecificSegmentSpec segmentSpec = new MultipleSpecificSegmentSpec(descriptors);
final Sequence<T> resultSeqToAdd;
if (!server.isAssignable() || !populateCache || isBySegment) {
// Direct server queryable
if (!isBySegment) {
resultSeqToAdd = clientQueryable.run(query.withQuerySegmentSpec(segmentSpec), responseContext);
} else {
// bySegment queries need to be de-serialized, see DirectDruidClient.run()
@SuppressWarnings("unchecked") final Query<Result<BySegmentResultValueClass<T>>> bySegmentQuery = (Query<Result<BySegmentResultValueClass<T>>>) ((Query) query);
@SuppressWarnings("unchecked") final Sequence<Result<BySegmentResultValueClass<T>>> resultSequence = clientQueryable.run(bySegmentQuery.withQuerySegmentSpec(segmentSpec), responseContext);
resultSeqToAdd = (Sequence) Sequences.map(resultSequence, new Function<Result<BySegmentResultValueClass<T>>, Result<BySegmentResultValueClass<T>>>() {
@Override
public Result<BySegmentResultValueClass<T>> apply(Result<BySegmentResultValueClass<T>> input) {
final BySegmentResultValueClass<T> bySegmentValue = input.getValue();
return new Result<>(input.getTimestamp(), new BySegmentResultValueClass<T>(Lists.transform(bySegmentValue.getResults(), toolChest.makePreComputeManipulatorFn(query, MetricManipulatorFns.deserializing())), bySegmentValue.getSegmentId(), bySegmentValue.getInterval()));
}
});
}
} else {
// Requires some manipulation on broker side
@SuppressWarnings("unchecked") final Sequence<Result<BySegmentResultValueClass<T>>> runningSequence = clientQueryable.run(rewrittenQuery.withQuerySegmentSpec(segmentSpec), responseContext);
resultSeqToAdd = new MergeSequence(query.getResultOrdering(), Sequences.<Result<BySegmentResultValueClass<T>>, Sequence<T>>map(runningSequence, new Function<Result<BySegmentResultValueClass<T>>, Sequence<T>>() {
private final Function<T, Object> cacheFn = strategy.prepareForCache();
// Acctually do something with the results
@Override
public Sequence<T> apply(Result<BySegmentResultValueClass<T>> input) {
final BySegmentResultValueClass<T> value = input.getValue();
final CachePopulator cachePopulator = cachePopulatorMap.get(String.format("%s_%s", value.getSegmentId(), value.getInterval()));
final Queue<ListenableFuture<Object>> cacheFutures = new ConcurrentLinkedQueue<>();
return Sequences.<T>withEffect(Sequences.<T, T>map(Sequences.<T, T>map(Sequences.<T>simple(value.getResults()), new Function<T, T>() {
@Override
public T apply(final T input) {
if (cachePopulator != null) {
// only compute cache data if populating cache
cacheFutures.add(backgroundExecutorService.submit(new Callable<Object>() {
@Override
public Object call() {
return cacheFn.apply(input);
}
}));
}
return input;
}
}), toolChest.makePreComputeManipulatorFn(// This casting is sub-optimal, but hasn't caused any major problems yet...
(Query) rewrittenQuery, MetricManipulatorFns.deserializing())), new Runnable() {
@Override
public void run() {
if (cachePopulator != null) {
Futures.addCallback(Futures.allAsList(cacheFutures), new FutureCallback<List<Object>>() {
@Override
public void onSuccess(List<Object> cacheData) {
cachePopulator.populate(cacheData);
// Help out GC by making sure all references are gone
cacheFutures.clear();
}
@Override
public void onFailure(Throwable throwable) {
log.error(throwable, "Background caching failed");
}
}, backgroundExecutorService);
}
}
}, MoreExecutors.sameThreadExecutor());
// End withEffect
}
}));
}
listOfSequences.add(resultSeqToAdd);
}
}
});
}
Aggregations