use of io.vertx.core.Future in project gravitee-gateway by gravitee-io.
the class FailoverInvoker method invoke.
@Override
public Request invoke(ExecutionContext executionContext, Request serverRequest, ReadStream<Buffer> stream, Handler<ProxyConnection> connectionHandler) {
final Request failoverServerRequest = new FailoverRequest(serverRequest);
circuitBreaker.execute(new io.vertx.core.Handler<Future<ProxyConnection>>() {
@Override
public void handle(Future<ProxyConnection> event) {
invoker.invoke(executionContext, failoverServerRequest, stream, proxyConnection -> {
proxyConnection.exceptionHandler(event::fail);
proxyConnection.responseHandler(response -> event.complete(new FailoverProxyConnection(proxyConnection, response)));
});
}
}).setHandler(new io.vertx.core.Handler<AsyncResult<ProxyConnection>>() {
@Override
public void handle(AsyncResult<ProxyConnection> event) {
if (event.failed()) {
FailoverConnection connection = new FailoverConnection();
connectionHandler.handle(connection);
connection.sendBadGatewayResponse();
} else {
FailoverProxyConnection proxyConnection = (FailoverProxyConnection) event.result();
connectionHandler.handle(proxyConnection);
proxyConnection.sendResponse();
}
}
});
return failoverServerRequest;
}
use of io.vertx.core.Future in project raml-module-builder by folio-org.
the class PostgresClientIT method parallel.
@Test
public void parallel(TestContext context) {
/**
* number of parallel queries
*/
int n = 20;
/**
* sleep time in milliseconds
*/
double sleep = 150;
String selectSleep = "select pg_sleep(" + sleep / 1000 + ")";
/**
* maximum duration in milliseconds for the completion of all parallel queries
*/
long maxDuration = (long) (n * sleep / 2);
/* create n queries in parallel, each sleeping for some time.
* If vert.x properly processes them in parallel it finishes
* in less than half of the time needed for sequential processing.
*/
Async async = context.async();
PostgresClient client = PostgresClient.getInstance(vertx);
List<Future> futures = new ArrayList<>(n);
for (int i = 0; i < n; i++) {
Future<ResultSet> future = Future.future();
client.select(selectSleep, future.completer());
futures.add(future);
}
long start = System.currentTimeMillis();
CompositeFuture.all(futures).setHandler(handler -> {
long duration = System.currentTimeMillis() - start;
client.closeClient(whenDone -> {
});
context.assertTrue(handler.succeeded());
context.assertTrue(duration < maxDuration, "duration must be less than " + maxDuration + " ms, it is " + duration + " ms");
async.complete();
});
}
use of io.vertx.core.Future in project strimzi by strimzi.
the class ControllerAssignedKafkaImpl method changeReplicationFactor.
@Override
public void changeReplicationFactor(Topic topic, Handler<AsyncResult<Void>> handler) {
LOGGER.info("Changing replication factor of topic {} to {}", topic.getTopicName(), topic.getNumReplicas());
final String zookeeper = config.get(Config.ZOOKEEPER_CONNECT);
Future<File> generateFuture = Future.future();
// generate a reassignment
vertx.executeBlocking(fut -> {
try {
LOGGER.debug("Generating reassignment json for topic {}", topic.getTopicName());
String reassignment = generateReassignment(topic, zookeeper);
LOGGER.debug("Reassignment json for topic {}: {}", topic.getTopicName(), reassignment);
File reassignmentJsonFile = createTmpFile("-reassignment.json");
try (Writer w = new OutputStreamWriter(new FileOutputStream(reassignmentJsonFile), StandardCharsets.UTF_8)) {
w.write(reassignment);
}
fut.complete(reassignmentJsonFile);
} catch (Exception e) {
fut.fail(e);
}
}, generateFuture.completer());
Future<File> executeFuture = Future.future();
generateFuture.compose(reassignmentJsonFile -> {
// execute the reassignment
vertx.executeBlocking(fut -> {
final Long throttle = config.get(Config.REASSIGN_THROTTLE);
try {
LOGGER.debug("Starting reassignment for topic {} with throttle {}", topic.getTopicName(), throttle);
executeReassignment(reassignmentJsonFile, zookeeper, throttle);
fut.complete(reassignmentJsonFile);
} catch (Exception e) {
fut.fail(e);
}
}, executeFuture.completer());
}, executeFuture);
Future<Void> periodicFuture = Future.future();
Future<Void> reassignmentFinishedFuture = Future.future();
executeFuture.compose(reassignmentJsonFile -> {
// Poll repeatedly, calling --verify to remove the throttle
long timeout = 10_000;
long first = System.currentTimeMillis();
final Long periodMs = config.get(Config.REASSIGN_VERIFY_INTERVAL_MS);
LOGGER.debug("Verifying reassignment every {} seconds", TimeUnit.SECONDS.convert(periodMs, TimeUnit.MILLISECONDS));
vertx.setPeriodic(periodMs, timerId -> vertx.<Boolean>executeBlocking(fut -> {
LOGGER.debug(String.format("Verifying reassignment for topic {} (timer id=%s)", topic.getTopicName(), timerId));
final Long throttle = config.get(Config.REASSIGN_THROTTLE);
final boolean reassignmentComplete;
try {
reassignmentComplete = verifyReassignment(reassignmentJsonFile, zookeeper, throttle);
} catch (Exception e) {
fut.fail(e);
return;
}
fut.complete(reassignmentComplete);
}, ar -> {
if (ar.succeeded()) {
if (ar.result()) {
LOGGER.info("Reassignment complete");
delete(reassignmentJsonFile);
LOGGER.debug("Cancelling timer " + timerId);
vertx.cancelTimer(timerId);
reassignmentFinishedFuture.complete();
} else if (System.currentTimeMillis() - first > timeout) {
LOGGER.error("Reassignment timed out");
delete(reassignmentJsonFile);
LOGGER.debug("Cancelling timer " + timerId);
vertx.cancelTimer(timerId);
reassignmentFinishedFuture.fail("Timeout");
}
} else {
// reassignmentFinishedFuture.fail(ar.cause());
LOGGER.error("Error while verifying reassignment", ar.cause());
}
}));
periodicFuture.complete();
}, periodicFuture);
CompositeFuture.all(periodicFuture, reassignmentFinishedFuture).map((Void) null).setHandler(handler);
// TODO The algorithm should really be more like this:
// 1. Use the cmdline tool to generate an assignment
// 2. Set the throttles
// 3. Update the reassign_partitions znode
// 4. Watch for changes or deletion of reassign_partitions
// a. Update the throttles
// b. complete the handler
// Doing this is much better because means we don't have to batch reassignments
// and also means we need less state for reassignment
// though we aren't relieved of the statefullness wrt removing throttles :-(
}
use of io.vertx.core.Future in project vertx-openshift-it by cescoffier.
the class LocksVerticle method setupRouter.
private Router setupRouter() {
Router router = Router.router(vertx);
router.get("/locks/:name").handler(rc -> {
String name = rc.pathParam("name");
getLock(name).compose(l -> {
Future<Void> future = Future.future();
vertx.setTimer(2000, tid -> {
l.release();
future.complete();
});
return future;
}).setHandler(ar -> sendResponse(rc, ar));
});
router.get("/health").handler(rc -> {
rc.response().putHeader(HttpHeaders.CONTENT_TYPE, "text/plain").end("OK");
});
return router;
}
use of io.vertx.core.Future in project georocket by georocket.
the class MetadataVerticle method start.
@Override
public void start(Future<Void> startFuture) {
// load and copy all indexer factories now and not lazily to avoid
// concurrent modifications to the service loader's internal cache
indexerFactories = ImmutableList.copyOf(ServiceLoader.load(IndexerFactory.class));
queryCompiler = createQueryCompiler();
queryCompiler.setQueryCompilers(indexerFactories);
new ElasticsearchClientFactory(vertx).createElasticsearchClient(INDEX_NAME).doOnNext(es -> {
client = es;
}).flatMap(v -> client.ensureIndex()).flatMap(v -> ensureMapping()).subscribe(es -> {
registerMessageConsumers();
startFuture.complete();
}, startFuture::fail);
}
Aggregations