use of java.util.concurrent.TimeUnit.NANOSECONDS in project samza by apache.
the class EmbeddedTaggedRateLimiter method acquire.
@Override
public Map<String, Integer> acquire(Map<String, Integer> tagToCreditsMap, long timeout, TimeUnit unit) {
ensureTagsAreValid(tagToCreditsMap);
long timeoutInNanos = NANOSECONDS.convert(timeout, unit);
Stopwatch stopwatch = Stopwatch.createStarted();
return tagToCreditsMap.entrySet().stream().map(e -> {
String tag = e.getKey();
int requiredCredits = e.getValue();
long remainingTimeoutInNanos = Math.max(0L, timeoutInNanos - stopwatch.elapsed(NANOSECONDS));
com.google.common.util.concurrent.RateLimiter rateLimiter = tagToRateLimiterMap.get(tag);
int availableCredits = rateLimiter.tryAcquire(requiredCredits, remainingTimeoutInNanos, NANOSECONDS) ? requiredCredits : 0;
return new ImmutablePair<>(tag, availableCredits);
}).collect(Collectors.toMap(ImmutablePair::getKey, ImmutablePair::getValue));
}
use of java.util.concurrent.TimeUnit.NANOSECONDS in project cassandra by apache.
the class StorageProxy method describeSchemaVersions.
/**
* initiate a request/response session with each live node to check whether or not everybody is using the same
* migration id. This is useful for determining if a schema change has propagated through the cluster. Disagreement
* is assumed if any node fails to respond.
*/
public static Map<String, List<String>> describeSchemaVersions(boolean withPort) {
final String myVersion = Schema.instance.getVersion().toString();
final Map<InetAddressAndPort, UUID> versions = new ConcurrentHashMap<>();
final Set<InetAddressAndPort> liveHosts = Gossiper.instance.getLiveMembers();
final CountDownLatch latch = newCountDownLatch(liveHosts.size());
RequestCallback<UUID> cb = message -> {
// record the response from the remote node.
versions.put(message.from(), message.payload);
latch.decrement();
};
// an empty message acts as a request to the SchemaVersionVerbHandler.
Message message = out(SCHEMA_VERSION_REQ, noPayload);
for (InetAddressAndPort endpoint : liveHosts) MessagingService.instance().sendWithCallback(message, endpoint, cb);
try {
// wait for as long as possible. timeout-1s if possible.
latch.await(DatabaseDescriptor.getRpcTimeout(NANOSECONDS), NANOSECONDS);
} catch (InterruptedException e) {
throw new UncheckedInterruptedException(e);
}
// maps versions to hosts that are on that version.
Map<String, List<String>> results = new HashMap<String, List<String>>();
Iterable<InetAddressAndPort> allHosts = concat(Gossiper.instance.getLiveMembers(), Gossiper.instance.getUnreachableMembers());
for (InetAddressAndPort host : allHosts) {
UUID version = versions.get(host);
String stringVersion = version == null ? UNREACHABLE : version.toString();
List<String> hosts = results.get(stringVersion);
if (hosts == null) {
hosts = new ArrayList<String>();
results.put(stringVersion, hosts);
}
hosts.add(host.getHostAddress(withPort));
}
// we're done: the results map is ready to return to the client. the rest is just debug logging:
if (results.get(UNREACHABLE) != null)
logger.debug("Hosts not in agreement. Didn't get a response from everybody: {}", join(results.get(UNREACHABLE), ","));
for (Map.Entry<String, List<String>> entry : results.entrySet()) {
// check for version disagreement. log the hosts that don't agree.
if (entry.getKey().equals(UNREACHABLE) || entry.getKey().equals(myVersion))
continue;
for (String host : entry.getValue()) logger.debug("{} disagrees ({})", host, entry.getKey());
}
if (results.size() == 1)
logger.debug("Schemas are in agreement.");
return results;
}
use of java.util.concurrent.TimeUnit.NANOSECONDS in project mongo-java-driver by mongodb.
the class DefaultConnectionPool method getAsync.
@Override
public void getAsync(final SingleResultCallback<InternalConnection> callback) {
if (LOGGER.isTraceEnabled()) {
LOGGER.trace(format("Asynchronously getting a connection from the pool for server %s", serverId));
}
connectionPoolListener.connectionCheckOutStarted(new ConnectionCheckOutStartedEvent(serverId));
Timeout timeout = Timeout.startNow(settings.getMaxWaitTime(NANOSECONDS));
SingleResultCallback<InternalConnection> eventSendingCallback = (result, failure) -> {
SingleResultCallback<InternalConnection> errHandlingCallback = errorHandlingCallback(callback, LOGGER);
if (failure == null) {
connectionPoolListener.connectionCheckedOut(new ConnectionCheckedOutEvent(getId(result)));
errHandlingCallback.onResult(result, null);
} else {
errHandlingCallback.onResult(null, checkOutFailed(failure));
}
};
try {
stateAndGeneration.throwIfClosedOrPaused();
} catch (RuntimeException e) {
eventSendingCallback.onResult(null, e);
return;
}
asyncWorkManager.enqueue(new Task(timeout, t -> {
if (t != null) {
eventSendingCallback.onResult(null, t);
} else {
PooledConnection connection;
try {
connection = getPooledConnection(timeout);
} catch (RuntimeException e) {
eventSendingCallback.onResult(null, e);
return;
}
if (connection.opened()) {
if (LOGGER.isTraceEnabled()) {
LOGGER.trace(format("Pooled connection %s to server %s is already open", getId(connection), serverId));
}
eventSendingCallback.onResult(connection, null);
} else {
if (LOGGER.isTraceEnabled()) {
LOGGER.trace(format("Pooled connection %s to server %s is not yet open", getId(connection), serverId));
}
openConcurrencyLimiter.openAsyncWithConcurrencyLimit(connection, timeout, eventSendingCallback);
}
}
}));
}
use of java.util.concurrent.TimeUnit.NANOSECONDS in project aries by apache.
the class AbstractPushStreamImpl method window.
@Override
public <R> PushStream<R> window(Supplier<Duration> time, IntSupplier maxEvents, Executor ex, BiFunction<Long, Collection<T>, R> f) {
AtomicLong timestamp = new AtomicLong();
AtomicLong counter = new AtomicLong();
Object lock = new Object();
AtomicReference<Queue<T>> queueRef = new AtomicReference<Queue<T>>(null);
// This code is declared as a separate block to avoid any confusion
// about which instance's methods and variables are in scope
Consumer<AbstractPushStreamImpl<R>> begin = p -> {
synchronized (lock) {
timestamp.lazySet(System.nanoTime());
long count = counter.get();
scheduler.schedule(getWindowTask(p, f, time, maxEvents, lock, count, queueRef, timestamp, counter, ex), time.get().toNanos(), NANOSECONDS);
}
queueRef.set(getQueueForInternalBuffering(maxEvents.getAsInt()));
};
@SuppressWarnings("resource") AbstractPushStreamImpl<R> eventStream = new IntermediatePushStreamImpl<R>(psp, ex, scheduler, this) {
@Override
protected void beginning() {
begin.accept(this);
}
};
AtomicBoolean endPending = new AtomicBoolean(false);
updateNext((event) -> {
try {
if (eventStream.closed.get() == CLOSED) {
return ABORT;
}
Queue<T> queue;
if (!event.isTerminal()) {
long elapsed;
long newCount;
synchronized (lock) {
for (; ; ) {
queue = queueRef.get();
if (queue == null) {
if (endPending.get()) {
return ABORT;
} else {
continue;
}
} else if (queue.offer(event.getData())) {
return CONTINUE;
} else {
queueRef.lazySet(null);
break;
}
}
long now = System.nanoTime();
elapsed = now - timestamp.get();
timestamp.lazySet(now);
newCount = counter.get() + 1;
counter.lazySet(newCount);
// This is a non-blocking call, and must happen in the
// synchronized block to avoid re=ordering the executor
// enqueue with a subsequent incoming close operation
aggregateAndForward(f, eventStream, event, queue, ex, elapsed);
}
// These must happen outside the synchronized block as we
// call out to user code
queueRef.set(getQueueForInternalBuffering(maxEvents.getAsInt()));
scheduler.schedule(getWindowTask(eventStream, f, time, maxEvents, lock, newCount, queueRef, timestamp, counter, ex), time.get().toNanos(), NANOSECONDS);
return CONTINUE;
} else {
long elapsed;
synchronized (lock) {
queue = queueRef.get();
queueRef.lazySet(null);
endPending.set(true);
long now = System.nanoTime();
elapsed = now - timestamp.get();
counter.lazySet(counter.get() + 1);
}
Collection<T> collected = queue == null ? emptyList() : queue;
ex.execute(() -> {
try {
eventStream.handleEvent(PushEvent.data(f.apply(Long.valueOf(NANOSECONDS.toMillis(elapsed)), collected)));
} catch (Exception e) {
close(PushEvent.error(e));
}
});
}
ex.execute(() -> eventStream.handleEvent(event.nodata()));
return ABORT;
} catch (Exception e) {
close(PushEvent.error(e));
return ABORT;
}
});
return eventStream;
}
use of java.util.concurrent.TimeUnit.NANOSECONDS in project vert.x by eclipse.
the class HAManager method simulateKill.
public void simulateKill() {
if (!stopped) {
killed = true;
CountDownLatch latch = new CountDownLatch(1);
Promise<Void> promise = Promise.promise();
clusterManager.leave(promise);
promise.future().onFailure(t -> log.error("Failed to leave cluster", t)).onComplete(ar -> latch.countDown());
long timerID = checkQuorumTimerID;
if (timerID >= 0L) {
checkQuorumTimerID = -1L;
vertx.cancelTimer(timerID);
}
vertx.cancelTimer(quorumTimerID);
boolean interrupted = false;
try {
long remainingNanos = MINUTES.toNanos(1);
long end = System.nanoTime() + remainingNanos;
while (true) {
try {
latch.await(remainingNanos, NANOSECONDS);
break;
} catch (InterruptedException e) {
interrupted = true;
remainingNanos = end - System.nanoTime();
}
}
} finally {
if (interrupted) {
Thread.currentThread().interrupt();
}
}
stopped = true;
}
}
Aggregations