use of com.google.common.util.concurrent.SettableFuture in project hale by halestudio.
the class CompilingSourceViewer method init.
@Override
protected void init() {
compileJob = new Job("Compile") {
@Override
public boolean shouldRun() {
return compilationEnabled;
}
@Override
public boolean shouldSchedule() {
return compilationEnabled;
}
@Override
protected IStatus run(IProgressMonitor monitor) {
String content;
changeLock.lock();
try {
if (!changed) {
return Status.OK_STATUS;
}
IDocument doc = getDocument();
if (doc != null) {
content = doc.get();
} else {
content = null;
}
changed = false;
} finally {
changeLock.unlock();
}
C result = null;
if (content != null) {
try {
// this is the potentially long running stuff
result = compile(content);
} catch (Exception e) {
// ignore, but log
log.warn("Error compiling document content", e);
}
}
boolean notify = false;
C previous = null;
changeLock.lock();
try {
/*
* Only notify listeners if the document was not changed in
* the meantime.
*/
notify = !changed;
if (notify) {
// set result
previous = compiled;
compiled = result;
// set result for futures
for (SettableFuture<C> future : toUpdate) {
future.set(result);
}
toUpdate.clear();
}
} finally {
changeLock.unlock();
}
if (notify) {
// notify listeners
PropertyChangeEvent event = new PropertyChangeEvent(CompilingSourceViewer.this, PROPERTY_COMPILED, previous, result);
notifyOnPropertyChange(event);
}
return Status.OK_STATUS;
}
};
compileJob.setSystem(true);
compileJob.setRule(new ExclusiveSchedulingRule(compileJob));
super.init();
}
use of com.google.common.util.concurrent.SettableFuture in project airlift by airlift.
the class BenchmarkWhenAnyCompleteCancelOthers method benchmark.
@Benchmark
public void benchmark() throws Exception {
Semaphore semaphore = new Semaphore(futureCount);
ArrayList<SettableFuture<?>> futures = new ArrayList<>();
for (int i = 0; i < futureCount; i++) {
SettableFuture<?> future = SettableFuture.create();
future.addListener(() -> semaphore.release(1), directExecutor());
futures.add(future);
}
ListenableFuture<?> anyComplete = whenAnyCompleteCancelOthers(futures);
futures.get(futureCount / 2).set(null);
semaphore.acquireUninterruptibly(futureCount);
anyComplete.get();
}
use of com.google.common.util.concurrent.SettableFuture in project workflow-cps-plugin by jenkinsci.
the class CpsFlowExecution method onLoad.
@Override
@SuppressFBWarnings(value = "RC_REF_COMPARISON_BAD_PRACTICE_BOOLEAN", justification = "We want to explicitly check for boolean not-null and true")
public void onLoad(FlowExecutionOwner owner) throws IOException {
this.owner = owner;
try {
try {
// Throws exception and bombs out if we can't load FlowNodes
initializeStorage();
} catch (Exception ex) {
LOGGER.log(Level.WARNING, "Error initializing storage and loading nodes, will try to create placeholders for: " + this, ex);
createPlaceholderNodes(ex);
return;
}
} catch (Exception ex) {
done = true;
programPromise = Futures.immediateFailedFuture(ex);
throw new IOException("Failed to even create placeholder nodes for execution", ex);
}
try {
if (isComplete()) {
if (done == Boolean.TRUE && !super.isComplete()) {
LOGGER.log(Level.INFO, "Completed flow without FlowEndNode: " + this + " heads:" + getHeadsAsString());
}
if (super.isComplete() && done != Boolean.TRUE) {
LOGGER.log(Level.FINE, "Flow has FlowEndNode, but is not marked as done, fixing this for" + this);
done = true;
saveOwner();
}
} else {
// See if we can/should resume build
if (canResume()) {
loadProgramAsync(getProgramDataFile());
} else {
// TODO if possible, consider trying to close out unterminated blocks to keep existing graph history
// That way we can visualize the graph in some error cases.
LOGGER.log(Level.WARNING, "Pipeline state not properly persisted, cannot resume " + owner.getUrl());
throw new IOException("Cannot resume build -- was not cleanly saved when Jenkins shut down.");
}
}
} catch (Exception e) {
// Broad catch ensures that failure to load do NOT nuke the controller
SettableFuture<CpsThreadGroup> p = SettableFuture.create();
programPromise = p;
loadProgramFailed(e, p);
} finally {
if (programPromise == null) {
programPromise = Futures.immediateFailedFuture(new IllegalStateException("completed or broken execution"));
}
}
}
use of com.google.common.util.concurrent.SettableFuture in project druid by druid-io.
the class CachingClusteredClientTest method testOutOfOrderBackgroundCachePopulation.
@Test
public void testOutOfOrderBackgroundCachePopulation() throws Exception {
// to trigger the actual execution when we are ready to shuffle the order.
abstract class DrainTask implements Runnable {
}
final ForwardingListeningExecutorService randomizingExecutorService = new ForwardingListeningExecutorService() {
final ConcurrentLinkedDeque<Pair<SettableFuture, Object>> taskQueue = new ConcurrentLinkedDeque<>();
final ListeningExecutorService delegate = MoreExecutors.listeningDecorator(// are complete before moving on to the next query run.
MoreExecutors.sameThreadExecutor());
@Override
protected ListeningExecutorService delegate() {
return delegate;
}
private <T> ListenableFuture<T> maybeSubmitTask(Object task, boolean wait) {
if (wait) {
SettableFuture<T> future = SettableFuture.create();
taskQueue.addFirst(Pair.<SettableFuture, Object>of(future, task));
return future;
} else {
List<Pair<SettableFuture, Object>> tasks = Lists.newArrayList(taskQueue.iterator());
Collections.shuffle(tasks, new Random(0));
for (final Pair<SettableFuture, Object> pair : tasks) {
ListenableFuture future = pair.rhs instanceof Callable ? delegate.submit((Callable) pair.rhs) : delegate.submit((Runnable) pair.rhs);
Futures.addCallback(future, new FutureCallback() {
@Override
public void onSuccess(@Nullable Object result) {
pair.lhs.set(result);
}
@Override
public void onFailure(Throwable t) {
pair.lhs.setException(t);
}
});
}
}
return task instanceof Callable ? delegate.submit((Callable) task) : (ListenableFuture<T>) delegate.submit((Runnable) task);
}
@Override
public <T> ListenableFuture<T> submit(Callable<T> task) {
return maybeSubmitTask(task, true);
}
@Override
public ListenableFuture<?> submit(Runnable task) {
if (task instanceof DrainTask) {
return maybeSubmitTask(task, false);
} else {
return maybeSubmitTask(task, true);
}
}
};
client = makeClient(randomizingExecutorService);
// callback to be run every time a query run is complete, to ensure all background
// caching tasks are executed, and cache is populated before we move onto the next query
queryCompletedCallback = new Runnable() {
@Override
public void run() {
try {
randomizingExecutorService.submit(new DrainTask() {
@Override
public void run() {
// no-op
}
}).get();
} catch (Exception e) {
Throwables.propagate(e);
}
}
};
final Druids.TimeseriesQueryBuilder builder = Druids.newTimeseriesQueryBuilder().dataSource(DATA_SOURCE).intervals(SEG_SPEC).filters(DIM_FILTER).granularity(GRANULARITY).aggregators(AGGS).postAggregators(POST_AGGS).context(CONTEXT);
QueryRunner runner = new FinalizeResultsQueryRunner(client, new TimeseriesQueryQueryToolChest(QueryRunnerTestHelper.NoopIntervalChunkingQueryRunnerDecorator()));
testQueryCaching(runner, builder.build(), new Interval("2011-01-05/2011-01-10"), makeTimeResults(new DateTime("2011-01-05"), 85, 102, new DateTime("2011-01-06"), 412, 521, new DateTime("2011-01-07"), 122, 21894, new DateTime("2011-01-08"), 5, 20, new DateTime("2011-01-09"), 18, 521), new Interval("2011-01-10/2011-01-13"), makeTimeResults(new DateTime("2011-01-10"), 85, 102, new DateTime("2011-01-11"), 412, 521, new DateTime("2011-01-12"), 122, 21894));
}
use of com.google.common.util.concurrent.SettableFuture in project druid by druid-io.
the class DirectDruidClientTest method testRun.
@Test
public void testRun() throws Exception {
HttpClient httpClient = EasyMock.createMock(HttpClient.class);
final URL url = new URL("http://foo/druid/v2/");
SettableFuture<InputStream> futureResult = SettableFuture.create();
Capture<Request> capturedRequest = EasyMock.newCapture();
EasyMock.expect(httpClient.go(EasyMock.capture(capturedRequest), EasyMock.<HttpResponseHandler>anyObject())).andReturn(futureResult).times(1);
SettableFuture futureException = SettableFuture.create();
EasyMock.expect(httpClient.go(EasyMock.capture(capturedRequest), EasyMock.<HttpResponseHandler>anyObject())).andReturn(futureException).times(1);
EasyMock.expect(httpClient.go(EasyMock.capture(capturedRequest), EasyMock.<HttpResponseHandler>anyObject())).andReturn(SettableFuture.create()).atLeastOnce();
EasyMock.replay(httpClient);
final ServerSelector serverSelector = new ServerSelector(new DataSegment("test", new Interval("2013-01-01/2013-01-02"), new DateTime("2013-01-01").toString(), Maps.<String, Object>newHashMap(), Lists.<String>newArrayList(), Lists.<String>newArrayList(), NoneShardSpec.instance(), 0, 0L), new HighestPriorityTierSelectorStrategy(new ConnectionCountServerSelectorStrategy()));
DirectDruidClient client1 = new DirectDruidClient(new ReflectionQueryToolChestWarehouse(), QueryRunnerTestHelper.NOOP_QUERYWATCHER, new DefaultObjectMapper(), httpClient, "foo", new NoopServiceEmitter());
DirectDruidClient client2 = new DirectDruidClient(new ReflectionQueryToolChestWarehouse(), QueryRunnerTestHelper.NOOP_QUERYWATCHER, new DefaultObjectMapper(), httpClient, "foo2", new NoopServiceEmitter());
QueryableDruidServer queryableDruidServer1 = new QueryableDruidServer(new DruidServer("test1", "localhost", 0, "historical", DruidServer.DEFAULT_TIER, 0), client1);
serverSelector.addServerAndUpdateSegment(queryableDruidServer1, serverSelector.getSegment());
QueryableDruidServer queryableDruidServer2 = new QueryableDruidServer(new DruidServer("test1", "localhost", 0, "historical", DruidServer.DEFAULT_TIER, 0), client2);
serverSelector.addServerAndUpdateSegment(queryableDruidServer2, serverSelector.getSegment());
TimeBoundaryQuery query = Druids.newTimeBoundaryQueryBuilder().dataSource("test").build();
HashMap<String, List> context = Maps.newHashMap();
Sequence s1 = client1.run(query, context);
Assert.assertTrue(capturedRequest.hasCaptured());
Assert.assertEquals(url, capturedRequest.getValue().getUrl());
Assert.assertEquals(HttpMethod.POST, capturedRequest.getValue().getMethod());
Assert.assertEquals(1, client1.getNumOpenConnections());
// simulate read timeout
Sequence s2 = client1.run(query, context);
Assert.assertEquals(2, client1.getNumOpenConnections());
futureException.setException(new ReadTimeoutException());
Assert.assertEquals(1, client1.getNumOpenConnections());
// subsequent connections should work
Sequence s3 = client1.run(query, context);
Sequence s4 = client1.run(query, context);
Sequence s5 = client1.run(query, context);
Assert.assertTrue(client1.getNumOpenConnections() == 4);
// produce result for first connection
futureResult.set(new ByteArrayInputStream("[{\"timestamp\":\"2014-01-01T01:02:03Z\", \"result\": 42.0}]".getBytes()));
List<Result> results = Sequences.toList(s1, Lists.<Result>newArrayList());
Assert.assertEquals(1, results.size());
Assert.assertEquals(new DateTime("2014-01-01T01:02:03Z"), results.get(0).getTimestamp());
Assert.assertEquals(3, client1.getNumOpenConnections());
client2.run(query, context);
client2.run(query, context);
Assert.assertTrue(client2.getNumOpenConnections() == 2);
Assert.assertTrue(serverSelector.pick() == queryableDruidServer2);
EasyMock.verify(httpClient);
}
Aggregations