use of org.apache.ignite.internal.util.typedef.CI1 in project ignite by apache.
the class GridJobExecutionSingleNodeSemaphoreLoadTest method runTest.
/**
* Runs the actual load test.
*
* @param g Grid.
* @param threadCnt Number of threads.
* @param taskCnt Number of tasks.
* @param dur Test duration.
* @param iterCntr Iteration counter.
*/
private static void runTest(final Ignite g, int threadCnt, int taskCnt, long dur, final LongAdder8 iterCntr) {
final Semaphore sem = new Semaphore(taskCnt);
final IgniteInClosure<IgniteFuture> lsnr = new CI1<IgniteFuture>() {
@Override
public void apply(IgniteFuture t) {
sem.release();
}
};
GridLoadTestUtils.runMultithreadedInLoop(new Callable<Object>() {
@Nullable
@Override
public Object call() throws Exception {
sem.acquire();
g.compute().executeAsync(GridJobExecutionLoadTestTask.class, null).listen(lsnr);
iterCntr.increment();
return null;
}
}, threadCnt, dur > 0 ? dur : Long.MAX_VALUE);
}
use of org.apache.ignite.internal.util.typedef.CI1 in project ignite by apache.
the class GridDhtPartitionDemander method addAssignments.
/**
* Initiates new rebalance process from given {@code assignments}.
* If previous rebalance is not finished method cancels it.
* In case of delayed rebalance method schedules new with configured delay.
*
* @param assignments Assignments.
* @param force {@code True} if dummy reassign.
* @param rebalanceId Rebalance id.
* @param next Runnable responsible for cache rebalancing start.
* @param forcedRebFut External future for forced rebalance.
* @return Rebalancing runnable.
*/
Runnable addAssignments(final GridDhtPreloaderAssignments assignments, boolean force, long rebalanceId, final Runnable next, @Nullable final GridCompoundFuture<Boolean, Boolean> forcedRebFut) {
if (log.isDebugEnabled())
log.debug("Adding partition assignments: " + assignments);
assert force == (forcedRebFut != null);
long delay = grp.config().getRebalanceDelay();
if ((delay == 0 || force) && assignments != null) {
final RebalanceFuture oldFut = rebalanceFut;
final RebalanceFuture fut = new RebalanceFuture(grp, assignments, log, rebalanceId);
if (!oldFut.isInitial())
oldFut.cancel();
else
fut.listen(f -> oldFut.onDone(f.result()));
if (forcedRebFut != null)
forcedRebFut.add(fut);
rebalanceFut = fut;
for (final GridCacheContext cctx : grp.caches()) {
if (cctx.statisticsEnabled()) {
final CacheMetricsImpl metrics = cctx.cache().metrics0();
metrics.clearRebalanceCounters();
metrics.startRebalance(0);
rebalanceFut.listen(f -> metrics.clearRebalanceCounters());
}
}
fut.sendRebalanceStartedEvent();
if (assignments.cancelled()) {
// Pending exchange.
if (log.isDebugEnabled())
log.debug("Rebalancing skipped due to cancelled assignments.");
fut.onDone(false);
fut.sendRebalanceFinishedEvent();
return null;
}
if (assignments.isEmpty()) {
// Nothing to rebalance.
if (log.isDebugEnabled())
log.debug("Rebalancing skipped due to empty assignments.");
fut.onDone(true);
((GridFutureAdapter) grp.preloader().syncFuture()).onDone();
fut.sendRebalanceFinishedEvent();
return null;
}
return () -> {
if (next != null)
fut.listen(f -> {
try {
if (// Not cancelled.
f.get())
// Starts next cache rebalancing (according to the order).
next.run();
} catch (IgniteCheckedException e) {
if (log.isDebugEnabled())
log.debug(e.getMessage());
}
});
requestPartitions(fut, assignments);
};
} else if (delay > 0) {
for (GridCacheContext cctx : grp.caches()) {
if (cctx.statisticsEnabled()) {
final CacheMetricsImpl metrics = cctx.cache().metrics0();
metrics.startRebalance(delay);
}
}
GridTimeoutObject obj = lastTimeoutObj.get();
if (obj != null)
ctx.time().removeTimeoutObject(obj);
final GridDhtPartitionsExchangeFuture exchFut = lastExchangeFut;
assert exchFut != null : "Delaying rebalance process without topology event.";
obj = new GridTimeoutObjectAdapter(delay) {
@Override
public void onTimeout() {
exchFut.listen(new CI1<IgniteInternalFuture<AffinityTopologyVersion>>() {
@Override
public void apply(IgniteInternalFuture<AffinityTopologyVersion> f) {
ctx.exchange().forceRebalance(exchFut.exchangeId());
}
});
}
};
lastTimeoutObj.set(obj);
ctx.time().addTimeoutObject(obj);
}
return null;
}
use of org.apache.ignite.internal.util.typedef.CI1 in project ignite by apache.
the class GridDhtPartitionsExchangeFuture method waitAndReplyToNode.
/**
* @param nodeId Node ID.
* @param msg Client's message.
*/
public void waitAndReplyToNode(final UUID nodeId, final GridDhtPartitionsSingleMessage msg) {
if (log.isDebugEnabled())
log.debug("Single message will be handled on completion of exchange future: " + this);
listen(failureHandlerWrapper(new CI1<IgniteInternalFuture<AffinityTopologyVersion>>() {
@Override
public void apply(IgniteInternalFuture<AffinityTopologyVersion> fut) {
if (cctx.kernalContext().isStopping())
return;
// Thus, there is no need to create and send GridDhtPartitionsFullMessage.
if (cacheChangeFailureMsgSent)
return;
FinishState finishState0;
synchronized (mux) {
finishState0 = finishState;
}
if (finishState0 == null) {
assert (firstDiscoEvt.type() == EVT_NODE_JOINED && firstDiscoEvt.eventNode().isClient()) : GridDhtPartitionsExchangeFuture.this;
ClusterNode node = cctx.node(nodeId);
if (node == null) {
if (log.isDebugEnabled()) {
log.debug("No node found for nodeId: " + nodeId + ", handling of single message will be stopped: " + msg);
}
return;
}
GridDhtPartitionsFullMessage msg = createPartitionsMessage(true, node.version().compareToIgnoreTimestamp(PARTIAL_COUNTERS_MAP_SINCE) >= 0);
msg.rebalanced(rebalanced());
finishState0 = new FinishState(cctx.localNodeId(), initialVersion(), msg);
}
sendAllPartitionsToNode(finishState0, msg, nodeId);
}
}));
}
use of org.apache.ignite.internal.util.typedef.CI1 in project ignite by apache.
the class GridDhtPreloader method request0.
/**
* @param cctx Cache context.
* @param keys Keys to request.
* @param topVer Topology version.
* @return Future for request.
*/
@SuppressWarnings({ "unchecked", "RedundantCast" })
private GridDhtFuture<Object> request0(GridCacheContext cctx, Collection<KeyCacheObject> keys, AffinityTopologyVersion topVer) {
if (cctx.isNear())
cctx = cctx.near().dht().context();
final GridDhtForceKeysFuture<?, ?> fut = new GridDhtForceKeysFuture<>(cctx, topVer, keys);
IgniteInternalFuture<?> topReadyFut = cctx.affinity().affinityReadyFuturex(topVer);
if (startFut.isDone() && topReadyFut == null)
fut.init();
else {
if (topReadyFut == null)
startFut.listen(new CI1<IgniteInternalFuture<?>>() {
@Override
public void apply(IgniteInternalFuture<?> syncFut) {
ctx.kernalContext().closure().runLocalSafe(new GridPlainRunnable() {
@Override
public void run() {
fut.init();
}
});
}
});
else {
GridCompoundFuture<Object, Object> compound = new GridCompoundFuture<>();
compound.add((IgniteInternalFuture<Object>) startFut);
compound.add((IgniteInternalFuture<Object>) topReadyFut);
compound.markInitialized();
compound.listen(new CI1<IgniteInternalFuture<?>>() {
@Override
public void apply(IgniteInternalFuture<?> syncFut) {
fut.init();
}
});
}
}
return (GridDhtFuture) fut;
}
use of org.apache.ignite.internal.util.typedef.CI1 in project ignite by apache.
the class GridDhtPartitionDemander method addAssignments.
/**
* This method initiates new rebalance process from given {@code assignments} by creating new rebalance
* future based on them. Cancels previous rebalance future and sends rebalance started event.
* In case of delayed rebalance method schedules the new one with configured delay based on {@code lastExchangeFut}.
*
* @param assignments Assignments to process.
* @param force {@code True} if preload request by {@link ForceRebalanceExchangeTask}.
* @param rebalanceId Rebalance id generated from exchange thread.
* @param next A next rebalance routine in chain.
* @param forcedRebFut External future for forced rebalance.
* @param compatibleRebFut Future for waiting for compatible rebalances.
*
* @return Rebalancing future or {@code null} to exclude an assignment from a chain.
*/
@Nullable
RebalanceFuture addAssignments(final GridDhtPreloaderAssignments assignments, boolean force, long rebalanceId, final RebalanceFuture next, @Nullable final GridCompoundFuture<Boolean, Boolean> forcedRebFut, GridCompoundFuture<Boolean, Boolean> compatibleRebFut) {
if (log.isDebugEnabled())
log.debug("Adding partition assignments: " + assignments);
assert force == (forcedRebFut != null);
long delay = grp.config().getRebalanceDelay();
if (delay == 0 || force) {
assert assignments != null;
final RebalanceFuture oldFut = rebalanceFut;
if (assignments.cancelled()) {
// Pending exchange.
if (log.isDebugEnabled())
log.debug("Rebalancing skipped due to cancelled assignments.");
return null;
}
if (assignments.isEmpty()) {
// Nothing to rebalance.
if (log.isDebugEnabled())
log.debug("Rebalancing skipped due to empty assignments.");
if (oldFut.isInitial())
oldFut.onDone(true);
else if (!oldFut.isDone())
oldFut.tryCancel();
((GridFutureAdapter) grp.preloader().syncFuture()).onDone();
return null;
}
// Check if ongoing rebalancing is compatible with a new assignment.
if (!force && (!oldFut.isDone() || oldFut.result()) && oldFut.compatibleWith(assignments)) {
if (!oldFut.isDone())
compatibleRebFut.add(oldFut);
return null;
}
// Cancel ongoing rebalancing.
if (!oldFut.isDone() && !oldFut.isInitial())
oldFut.tryCancel();
// Partition states cannot be changed from now on by previous incompatible rebalancing.
// Retain only moving partitions. Assignment can become empty as a result.
// Delayed partition owning happens in the exchange worker as well, so no race with delayed owning here.
assignments.retainMoving(grp.topology());
// Skip rebalanced group.
if (assignments.isEmpty())
return null;
final RebalanceFuture fut = new RebalanceFuture(grp, lastExchangeFut, assignments, log, rebalanceId, next, lastCancelledTime);
if (oldFut.isInitial())
fut.listen(f -> oldFut.onDone(f.result()));
if (forcedRebFut != null)
forcedRebFut.add(fut);
rebalanceFut = fut;
for (final GridCacheContext cctx : grp.caches()) {
if (cctx.statisticsEnabled()) {
final CacheMetricsImpl metrics = cctx.cache().metrics0();
metrics.clearRebalanceCounters();
for (GridDhtPartitionDemandMessage msg : assignments.values()) {
for (Integer partId : msg.partitions().fullSet()) metrics.onRebalancingKeysCountEstimateReceived(grp.topology().globalPartSizes().get(partId));
CachePartitionPartialCountersMap histMap = msg.partitions().historicalMap();
for (int i = 0; i < histMap.size(); i++) {
long from = histMap.initialUpdateCounterAt(i);
long to = histMap.updateCounterAt(i);
metrics.onRebalancingKeysCountEstimateReceived(to - from);
}
}
metrics.startRebalance(0);
}
}
fut.sendRebalanceStartedEvent();
return fut;
} else if (delay > 0) {
for (GridCacheContext cctx : grp.caches()) {
if (cctx.statisticsEnabled()) {
final CacheMetricsImpl metrics = cctx.cache().metrics0();
metrics.startRebalance(delay);
}
}
GridTimeoutObject obj = lastTimeoutObj.get();
if (obj != null)
ctx.time().removeTimeoutObject(obj);
final GridDhtPartitionsExchangeFuture exchFut = lastExchangeFut;
assert exchFut != null : "Delaying rebalance process without topology event.";
obj = new GridTimeoutObjectAdapter(delay) {
@Override
public void onTimeout() {
exchFut.listen(new CI1<IgniteInternalFuture<AffinityTopologyVersion>>() {
@Override
public void apply(IgniteInternalFuture<AffinityTopologyVersion> f) {
ctx.exchange().forceRebalance(exchFut.exchangeId());
}
});
}
};
lastTimeoutObj.set(obj);
ctx.time().addTimeoutObject(obj);
}
return null;
}
Aggregations