use of java.util.concurrent.CompletableFuture in project crate by crate.
the class SimplePortal method sync.
@Override
public CompletableFuture<?> sync(Planner planner, JobsLogs jobsLogs) {
UUID jobId = UUID.randomUUID();
Plan plan;
try {
plan = planner.plan(analysis, jobId, defaultLimit, maxRows);
} catch (Throwable t) {
jobsLogs.logPreExecutionFailure(jobId, query, SQLExceptions.messageOf(t));
throw t;
}
if (!analysis.analyzedStatement().isWriteOperation()) {
resultReceiver = new ResultReceiverRetryWrapper(resultReceiver, this, portalContext.getAnalyzer(), planner, portalContext.getExecutor(), jobId, sessionContext);
}
jobsLogs.logExecutionStart(jobId, query);
JobsLogsUpdateListener jobsLogsUpdateListener = new JobsLogsUpdateListener(jobId, jobsLogs);
CompletableFuture completableFuture = resultReceiver.completionFuture().whenComplete(jobsLogsUpdateListener);
if (!resumeIfSuspended()) {
consumer = new BatchConsumerToResultReceiver(resultReceiver, maxRows);
portalContext.getExecutor().execute(plan, consumer, this.rowParams);
}
synced = true;
return completableFuture;
}
use of java.util.concurrent.CompletableFuture in project crate by crate.
the class AlterTableOperation method executeAlterTable.
public CompletableFuture<Long> executeAlterTable(AlterTableAnalyzedStatement analysis) {
DocTableInfo table = analysis.table();
if (table.isAlias() && !table.isPartitioned()) {
return CompletableFutures.failedFuture(new AlterTableAliasException(table.ident().fqn()));
}
List<CompletableFuture<Long>> results = new ArrayList<>(3);
if (table.isPartitioned()) {
// create new filtered partition table settings
PartitionedTableParameterInfo tableSettingsInfo = (PartitionedTableParameterInfo) table.tableParameterInfo();
TableParameter parameterWithFilteredSettings = new TableParameter(analysis.tableParameter().settings(), tableSettingsInfo.partitionTableSettingsInfo().supportedInternalSettings());
Optional<PartitionName> partitionName = analysis.partitionName();
if (partitionName.isPresent()) {
String index = partitionName.get().asIndexName();
results.add(updateMapping(analysis.tableParameter().mappings(), index));
results.add(updateSettings(parameterWithFilteredSettings, index));
} else {
// template gets all changes unfiltered
results.add(updateTemplate(analysis.tableParameter(), table.ident()));
if (!analysis.excludePartitions()) {
String[] indices = table.concreteIndices();
results.add(updateMapping(analysis.tableParameter().mappings(), indices));
results.add(updateSettings(parameterWithFilteredSettings, indices));
}
}
} else {
results.add(updateMapping(analysis.tableParameter().mappings(), table.ident().indexName()));
results.add(updateSettings(analysis.tableParameter(), table.ident().indexName()));
}
final CompletableFuture<Long> result = new CompletableFuture<>();
applyMultiFutureCallback(result, results);
return result;
}
use of java.util.concurrent.CompletableFuture in project crate by crate.
the class RepositoryService method execute.
public CompletableFuture<Long> execute(DropRepositoryAnalyzedStatement analyzedStatement) {
final CompletableFuture<Long> future = new CompletableFuture<>();
final String repoName = analyzedStatement.repositoryName();
deleteRepositoryAction.execute(new DeleteRepositoryRequest(repoName), new ActionListener<DeleteRepositoryResponse>() {
@Override
public void onResponse(DeleteRepositoryResponse deleteRepositoryResponse) {
if (!deleteRepositoryResponse.isAcknowledged()) {
LOGGER.info("delete repository '{}' not acknowledged", repoName);
}
future.complete(1L);
}
@Override
public void onFailure(Throwable e) {
future.completeExceptionally(e);
}
});
return future;
}
use of java.util.concurrent.CompletableFuture in project crate by crate.
the class DocLevelCollectTest method collect.
private Bucket collect(RoutedCollectPhase collectNode) throws Throwable {
ContextPreparer contextPreparer = internalCluster().getDataNodeInstance(ContextPreparer.class);
JobContextService contextService = internalCluster().getDataNodeInstance(JobContextService.class);
SharedShardContexts sharedShardContexts = new SharedShardContexts(internalCluster().getDataNodeInstance(IndicesService.class));
JobExecutionContext.Builder builder = contextService.newBuilder(collectNode.jobId());
NodeOperation nodeOperation = NodeOperation.withDownstream(collectNode, mock(ExecutionPhase.class), (byte) 0, "remoteNode");
List<CompletableFuture<Bucket>> results = contextPreparer.prepareOnRemote(ImmutableList.of(nodeOperation), builder, sharedShardContexts);
JobExecutionContext context = contextService.createContext(builder);
context.start();
return results.get(0).get(2, TimeUnit.SECONDS);
}
use of java.util.concurrent.CompletableFuture in project jetty.project by eclipse.
the class FlowControlStrategyTest method testServerFlowControlOneBigWrite.
@Test
public void testServerFlowControlOneBigWrite() throws Exception {
final int windowSize = 1536;
final int length = 5 * windowSize;
final CountDownLatch settingsLatch = new CountDownLatch(1);
start(new ServerSessionListener.Adapter() {
@Override
public void onSettings(Session session, SettingsFrame frame) {
settingsLatch.countDown();
}
@Override
public Stream.Listener onNewStream(Stream stream, HeadersFrame requestFrame) {
MetaData.Response metaData = new MetaData.Response(HttpVersion.HTTP_2, 200, new HttpFields());
HeadersFrame responseFrame = new HeadersFrame(stream.getId(), metaData, null, false);
CompletableFuture<Void> completable = new CompletableFuture<>();
stream.headers(responseFrame, Callback.from(completable));
completable.thenRun(() -> {
DataFrame dataFrame = new DataFrame(stream.getId(), ByteBuffer.allocate(length), true);
stream.data(dataFrame, Callback.NOOP);
});
return null;
}
});
Session session = newClient(new Session.Listener.Adapter());
Map<Integer, Integer> settings = new HashMap<>();
settings.put(SettingsFrame.INITIAL_WINDOW_SIZE, windowSize);
session.settings(new SettingsFrame(settings, false), Callback.NOOP);
Assert.assertTrue(settingsLatch.await(5, TimeUnit.SECONDS));
final CountDownLatch dataLatch = new CountDownLatch(1);
final Exchanger<Callback> exchanger = new Exchanger<>();
MetaData.Request metaData = newRequest("GET", new HttpFields());
HeadersFrame requestFrame = new HeadersFrame(metaData, null, true);
session.newStream(requestFrame, new Promise.Adapter<>(), new Stream.Listener.Adapter() {
private AtomicInteger dataFrames = new AtomicInteger();
@Override
public void onData(Stream stream, DataFrame frame, Callback callback) {
try {
int dataFrames = this.dataFrames.incrementAndGet();
if (dataFrames == 1 || dataFrames == 2) {
// Do not consume the data frame.
// We should then be flow-control stalled.
exchanger.exchange(callback);
} else if (dataFrames == 3 || dataFrames == 4 || dataFrames == 5) {
// Consume totally.
callback.succeeded();
if (frame.isEndStream())
dataLatch.countDown();
} else {
Assert.fail();
}
} catch (InterruptedException x) {
callback.failed(x);
}
}
});
Callback callback = exchanger.exchange(null, 5, TimeUnit.SECONDS);
checkThatWeAreFlowControlStalled(exchanger);
// Consume the first chunk.
callback.succeeded();
callback = exchanger.exchange(null, 5, TimeUnit.SECONDS);
checkThatWeAreFlowControlStalled(exchanger);
// Consume the second chunk.
callback.succeeded();
Assert.assertTrue(dataLatch.await(5, TimeUnit.SECONDS));
}
Aggregations