use of org.apache.ignite.internal.processors.cache.CacheMetricsImpl in project ignite by apache.
the class GridDhtAtomicCache method start.
/**
* {@inheritDoc}
*/
@SuppressWarnings({ "IfMayBeConditional", "SimplifiableIfStatement" })
@Override
public void start() throws IgniteCheckedException {
super.start();
CacheMetricsImpl m = new CacheMetricsImpl(ctx);
if (ctx.dht().near() != null)
m.delegate(ctx.dht().near().metrics0());
metrics = m;
ctx.io().addCacheHandler(ctx.cacheId(), GridNearGetRequest.class, new CI2<UUID, GridNearGetRequest>() {
@Override
public void apply(UUID nodeId, GridNearGetRequest req) {
processNearGetRequest(nodeId, req);
}
});
ctx.io().addCacheHandler(ctx.cacheId(), GridNearSingleGetRequest.class, new CI2<UUID, GridNearSingleGetRequest>() {
@Override
public void apply(UUID nodeId, GridNearSingleGetRequest req) {
processNearSingleGetRequest(nodeId, req);
}
});
ctx.io().addCacheHandler(ctx.cacheId(), GridNearAtomicAbstractUpdateRequest.class, new CI2<UUID, GridNearAtomicAbstractUpdateRequest>() {
@Override
public void apply(UUID nodeId, GridNearAtomicAbstractUpdateRequest req) {
processNearAtomicUpdateRequest(nodeId, req);
}
@Override
public String toString() {
return "GridNearAtomicAbstractUpdateRequest handler " + "[msgIdx=" + GridNearAtomicAbstractUpdateRequest.CACHE_MSG_IDX + ']';
}
});
ctx.io().addCacheHandler(ctx.cacheId(), GridNearAtomicUpdateResponse.class, new CI2<UUID, GridNearAtomicUpdateResponse>() {
@Override
public void apply(UUID nodeId, GridNearAtomicUpdateResponse res) {
processNearAtomicUpdateResponse(nodeId, res);
}
@Override
public String toString() {
return "GridNearAtomicUpdateResponse handler " + "[msgIdx=" + GridNearAtomicUpdateResponse.CACHE_MSG_IDX + ']';
}
});
ctx.io().addCacheHandler(ctx.cacheId(), GridDhtAtomicAbstractUpdateRequest.class, new CI2<UUID, GridDhtAtomicAbstractUpdateRequest>() {
@Override
public void apply(UUID nodeId, GridDhtAtomicAbstractUpdateRequest req) {
processDhtAtomicUpdateRequest(nodeId, req);
}
@Override
public String toString() {
return "GridDhtAtomicUpdateRequest handler " + "[msgIdx=" + GridDhtAtomicUpdateRequest.CACHE_MSG_IDX + ']';
}
});
ctx.io().addCacheHandler(ctx.cacheId(), GridDhtAtomicUpdateResponse.class, new CI2<UUID, GridDhtAtomicUpdateResponse>() {
@Override
public void apply(UUID nodeId, GridDhtAtomicUpdateResponse res) {
processDhtAtomicUpdateResponse(nodeId, res);
}
@Override
public String toString() {
return "GridDhtAtomicUpdateResponse handler " + "[msgIdx=" + GridDhtAtomicUpdateResponse.CACHE_MSG_IDX + ']';
}
});
ctx.io().addCacheHandler(ctx.cacheId(), GridDhtAtomicDeferredUpdateResponse.class, new CI2<UUID, GridDhtAtomicDeferredUpdateResponse>() {
@Override
public void apply(UUID nodeId, GridDhtAtomicDeferredUpdateResponse res) {
processDhtAtomicDeferredUpdateResponse(nodeId, res);
}
@Override
public String toString() {
return "GridDhtAtomicDeferredUpdateResponse handler " + "[msgIdx=" + GridDhtAtomicDeferredUpdateResponse.CACHE_MSG_IDX + ']';
}
});
ctx.io().addCacheHandler(ctx.cacheId(), GridDhtAtomicNearResponse.class, new CI2<UUID, GridDhtAtomicNearResponse>() {
@Override
public void apply(UUID uuid, GridDhtAtomicNearResponse msg) {
processDhtAtomicNearResponse(uuid, msg);
}
@Override
public String toString() {
return "GridDhtAtomicNearResponse handler " + "[msgIdx=" + GridDhtAtomicNearResponse.CACHE_MSG_IDX + ']';
}
});
ctx.io().addCacheHandler(ctx.cacheId(), GridNearAtomicCheckUpdateRequest.class, new CI2<UUID, GridNearAtomicCheckUpdateRequest>() {
@Override
public void apply(UUID uuid, GridNearAtomicCheckUpdateRequest msg) {
processCheckUpdateRequest(uuid, msg);
}
@Override
public String toString() {
return "GridNearAtomicCheckUpdateRequest handler " + "[msgIdx=" + GridNearAtomicCheckUpdateRequest.CACHE_MSG_IDX + ']';
}
});
ctx.io().addCacheHandler(ctx.cacheId(), GridDhtForceKeysRequest.class, new MessageHandler<GridDhtForceKeysRequest>() {
@Override
public void onMessage(ClusterNode node, GridDhtForceKeysRequest msg) {
processForceKeysRequest(node, msg);
}
});
ctx.io().addCacheHandler(ctx.cacheId(), GridDhtForceKeysResponse.class, new MessageHandler<GridDhtForceKeysResponse>() {
@Override
public void onMessage(ClusterNode node, GridDhtForceKeysResponse msg) {
processForceKeyResponse(node, msg);
}
});
if (near == null) {
ctx.io().addCacheHandler(ctx.cacheId(), GridNearGetResponse.class, new CI2<UUID, GridNearGetResponse>() {
@Override
public void apply(UUID nodeId, GridNearGetResponse res) {
processNearGetResponse(nodeId, res);
}
});
ctx.io().addCacheHandler(ctx.cacheId(), GridNearSingleGetResponse.class, new CI2<UUID, GridNearSingleGetResponse>() {
@Override
public void apply(UUID nodeId, GridNearSingleGetResponse res) {
processNearSingleGetResponse(nodeId, res);
}
});
}
}
use of org.apache.ignite.internal.processors.cache.CacheMetricsImpl in project ignite by apache.
the class GridDhtPartitionDemander method addAssignments.
/**
* Initiates new rebalance process from given {@code assignments}.
* If previous rebalance is not finished method cancels it.
* In case of delayed rebalance method schedules new with configured delay.
*
* @param assignments Assignments.
* @param force {@code True} if dummy reassign.
* @param rebalanceId Rebalance id.
* @param next Runnable responsible for cache rebalancing start.
* @param forcedRebFut External future for forced rebalance.
* @return Rebalancing runnable.
*/
Runnable addAssignments(final GridDhtPreloaderAssignments assignments, boolean force, long rebalanceId, final Runnable next, @Nullable final GridCompoundFuture<Boolean, Boolean> forcedRebFut) {
if (log.isDebugEnabled())
log.debug("Adding partition assignments: " + assignments);
assert force == (forcedRebFut != null);
long delay = grp.config().getRebalanceDelay();
if ((delay == 0 || force) && assignments != null) {
final RebalanceFuture oldFut = rebalanceFut;
final RebalanceFuture fut = new RebalanceFuture(grp, assignments, log, rebalanceId);
if (!oldFut.isInitial())
oldFut.cancel();
else
fut.listen(f -> oldFut.onDone(f.result()));
if (forcedRebFut != null)
forcedRebFut.add(fut);
rebalanceFut = fut;
for (final GridCacheContext cctx : grp.caches()) {
if (cctx.statisticsEnabled()) {
final CacheMetricsImpl metrics = cctx.cache().metrics0();
metrics.clearRebalanceCounters();
metrics.startRebalance(0);
rebalanceFut.listen(f -> metrics.clearRebalanceCounters());
}
}
fut.sendRebalanceStartedEvent();
if (assignments.cancelled()) {
// Pending exchange.
if (log.isDebugEnabled())
log.debug("Rebalancing skipped due to cancelled assignments.");
fut.onDone(false);
fut.sendRebalanceFinishedEvent();
return null;
}
if (assignments.isEmpty()) {
// Nothing to rebalance.
if (log.isDebugEnabled())
log.debug("Rebalancing skipped due to empty assignments.");
fut.onDone(true);
((GridFutureAdapter) grp.preloader().syncFuture()).onDone();
fut.sendRebalanceFinishedEvent();
return null;
}
return () -> {
if (next != null)
fut.listen(f -> {
try {
if (// Not cancelled.
f.get())
// Starts next cache rebalancing (according to the order).
next.run();
} catch (IgniteCheckedException e) {
if (log.isDebugEnabled())
log.debug(e.getMessage());
}
});
requestPartitions(fut, assignments);
};
} else if (delay > 0) {
for (GridCacheContext cctx : grp.caches()) {
if (cctx.statisticsEnabled()) {
final CacheMetricsImpl metrics = cctx.cache().metrics0();
metrics.startRebalance(delay);
}
}
GridTimeoutObject obj = lastTimeoutObj.get();
if (obj != null)
ctx.time().removeTimeoutObject(obj);
final GridDhtPartitionsExchangeFuture exchFut = lastExchangeFut;
assert exchFut != null : "Delaying rebalance process without topology event.";
obj = new GridTimeoutObjectAdapter(delay) {
@Override
public void onTimeout() {
exchFut.listen(new CI1<IgniteInternalFuture<AffinityTopologyVersion>>() {
@Override
public void apply(IgniteInternalFuture<AffinityTopologyVersion> f) {
ctx.exchange().forceRebalance(exchFut.exchangeId());
}
});
}
};
lastTimeoutObj.set(obj);
ctx.time().addTimeoutObject(obj);
}
return null;
}
use of org.apache.ignite.internal.processors.cache.CacheMetricsImpl in project ignite by apache.
the class GridDhtPartitionDemander method awaitClearingAndStartRebalance.
/**
* Awaits partitions clearing for full partitions and sends initial demand request
* after all partitions are cleared and safe to consume data.
*
* @param fut Rebalance future.
* @param demandMessage Initial demand message which contains set of full partitions to await.
* @param initDemandRequestTask Task which sends initial demand request.
*/
private void awaitClearingAndStartRebalance(RebalanceFuture fut, GridDhtPartitionDemandMessage demandMessage, Runnable initDemandRequestTask) {
Set<Integer> fullPartitions = demandMessage.partitions().fullSet();
if (fullPartitions.isEmpty()) {
ctx.kernalContext().closure().runLocalSafe(initDemandRequestTask, true);
return;
}
for (GridCacheContext cctx : grp.caches()) {
if (cctx.statisticsEnabled()) {
final CacheMetricsImpl metrics = cctx.cache().metrics0();
metrics.rebalanceClearingPartitions(fullPartitions.size());
}
}
final AtomicInteger clearingPartitions = new AtomicInteger(fullPartitions.size());
for (int partId : fullPartitions) {
if (fut.isDone())
return;
GridDhtLocalPartition part = grp.topology().localPartition(partId);
if (part != null && part.state() == MOVING) {
part.onClearFinished(f -> {
// Cancel rebalance if partition clearing was failed.
if (f.error() != null) {
if (!fut.isDone()) {
for (GridCacheContext cctx : grp.caches()) {
if (cctx.statisticsEnabled()) {
final CacheMetricsImpl metrics = cctx.cache().metrics0();
metrics.rebalanceClearingPartitions(0);
}
}
log.error("Unable to await partition clearing " + part, f.error());
fut.cancel();
}
} else {
if (!fut.isDone()) {
int existed = clearingPartitions.decrementAndGet();
for (GridCacheContext cctx : grp.caches()) {
if (cctx.statisticsEnabled()) {
final CacheMetricsImpl metrics = cctx.cache().metrics0();
metrics.rebalanceClearingPartitions(existed);
}
}
// If all partitions are cleared send initial demand message.
if (existed == 0)
ctx.kernalContext().closure().runLocalSafe(initDemandRequestTask, true);
}
}
});
} else
clearingPartitions.decrementAndGet();
}
}
use of org.apache.ignite.internal.processors.cache.CacheMetricsImpl in project ignite by apache.
the class StopRebuildIndexTest method stopRebuildIndexes.
/**
* Restart the rebuild of the indexes, checking that it completes gracefully.
*
* @param stopRebuildIndexes Stop index rebuild function.
* @param expThrowEx Expect an exception on index rebuild futures.
* @throws Exception If failed.
*/
private void stopRebuildIndexes(IgniteThrowableConsumer<IgniteEx> stopRebuildIndexes, boolean expThrowEx) throws Exception {
prepareBeforeNodeStart();
int keys = 100_000;
IgniteEx n = startGrid(0);
populate(n.cache(DEFAULT_CACHE_NAME), keys);
GridCacheContext<?, ?> cacheCtx = n.cachex(DEFAULT_CACHE_NAME).context();
addCacheRowConsumer(nodeName(n), cacheCtx.name(), row -> U.sleep(10));
forceRebuildIndexes(n, cacheCtx);
IgniteInternalFuture<?> fut0 = indexRebuildFuture(n, cacheCtx.cacheId());
assertNotNull(fut0);
SchemaIndexCacheFuture fut1 = internalIndexRebuildFuture(n, cacheCtx.cacheId());
assertNotNull(fut1);
CacheMetricsImpl metrics0 = cacheMetrics0(n, cacheCtx.name());
assertTrue(metrics0.isIndexRebuildInProgress());
assertFalse(fut0.isDone());
assertFalse(fut1.isDone());
assertNull(fut1.cancelToken().cancelException());
assertTrue(waitForCondition(() -> metrics0.getIndexRebuildKeysProcessed() >= keys / 100, getTestTimeout()));
assertTrue(metrics0.isIndexRebuildInProgress());
assertFalse(fut0.isDone());
assertFalse(fut1.isDone());
assertNull(fut1.cancelToken().cancelException());
stopRebuildIndexes.accept(n);
assertFalse(metrics0.isIndexRebuildInProgress());
assertTrue(metrics0.getIndexRebuildKeysProcessed() < keys);
if (expThrowEx) {
assertThrows(log, () -> fut0.get(getTestTimeout()), SchemaIndexOperationCancellationException.class, null);
assertThrows(log, () -> fut1.get(getTestTimeout()), SchemaIndexOperationCancellationException.class, null);
assertNotNull(fut1.cancelToken().cancelException());
} else {
fut0.get(getTestTimeout());
fut1.get(getTestTimeout());
assertNull(fut1.cancelToken().cancelException());
}
assertNull(internalIndexRebuildFuture(n, cacheCtx.cacheId()));
}
use of org.apache.ignite.internal.processors.cache.CacheMetricsImpl in project ignite by apache.
the class GridDhtPartitionDemander method addAssignments.
/**
* This method initiates new rebalance process from given {@code assignments} by creating new rebalance
* future based on them. Cancels previous rebalance future and sends rebalance started event.
* In case of delayed rebalance method schedules the new one with configured delay based on {@code lastExchangeFut}.
*
* @param assignments Assignments to process.
* @param force {@code True} if preload request by {@link ForceRebalanceExchangeTask}.
* @param rebalanceId Rebalance id generated from exchange thread.
* @param next A next rebalance routine in chain.
* @param forcedRebFut External future for forced rebalance.
* @param compatibleRebFut Future for waiting for compatible rebalances.
*
* @return Rebalancing future or {@code null} to exclude an assignment from a chain.
*/
@Nullable
RebalanceFuture addAssignments(final GridDhtPreloaderAssignments assignments, boolean force, long rebalanceId, final RebalanceFuture next, @Nullable final GridCompoundFuture<Boolean, Boolean> forcedRebFut, GridCompoundFuture<Boolean, Boolean> compatibleRebFut) {
if (log.isDebugEnabled())
log.debug("Adding partition assignments: " + assignments);
assert force == (forcedRebFut != null);
long delay = grp.config().getRebalanceDelay();
if (delay == 0 || force) {
assert assignments != null;
final RebalanceFuture oldFut = rebalanceFut;
if (assignments.cancelled()) {
// Pending exchange.
if (log.isDebugEnabled())
log.debug("Rebalancing skipped due to cancelled assignments.");
return null;
}
if (assignments.isEmpty()) {
// Nothing to rebalance.
if (log.isDebugEnabled())
log.debug("Rebalancing skipped due to empty assignments.");
if (oldFut.isInitial())
oldFut.onDone(true);
else if (!oldFut.isDone())
oldFut.tryCancel();
((GridFutureAdapter) grp.preloader().syncFuture()).onDone();
return null;
}
// Check if ongoing rebalancing is compatible with a new assignment.
if (!force && (!oldFut.isDone() || oldFut.result()) && oldFut.compatibleWith(assignments)) {
if (!oldFut.isDone())
compatibleRebFut.add(oldFut);
return null;
}
// Cancel ongoing rebalancing.
if (!oldFut.isDone() && !oldFut.isInitial())
oldFut.tryCancel();
// Partition states cannot be changed from now on by previous incompatible rebalancing.
// Retain only moving partitions. Assignment can become empty as a result.
// Delayed partition owning happens in the exchange worker as well, so no race with delayed owning here.
assignments.retainMoving(grp.topology());
// Skip rebalanced group.
if (assignments.isEmpty())
return null;
final RebalanceFuture fut = new RebalanceFuture(grp, lastExchangeFut, assignments, log, rebalanceId, next, lastCancelledTime);
if (oldFut.isInitial())
fut.listen(f -> oldFut.onDone(f.result()));
if (forcedRebFut != null)
forcedRebFut.add(fut);
rebalanceFut = fut;
for (final GridCacheContext cctx : grp.caches()) {
if (cctx.statisticsEnabled()) {
final CacheMetricsImpl metrics = cctx.cache().metrics0();
metrics.clearRebalanceCounters();
for (GridDhtPartitionDemandMessage msg : assignments.values()) {
for (Integer partId : msg.partitions().fullSet()) metrics.onRebalancingKeysCountEstimateReceived(grp.topology().globalPartSizes().get(partId));
CachePartitionPartialCountersMap histMap = msg.partitions().historicalMap();
for (int i = 0; i < histMap.size(); i++) {
long from = histMap.initialUpdateCounterAt(i);
long to = histMap.updateCounterAt(i);
metrics.onRebalancingKeysCountEstimateReceived(to - from);
}
}
metrics.startRebalance(0);
}
}
fut.sendRebalanceStartedEvent();
return fut;
} else if (delay > 0) {
for (GridCacheContext cctx : grp.caches()) {
if (cctx.statisticsEnabled()) {
final CacheMetricsImpl metrics = cctx.cache().metrics0();
metrics.startRebalance(delay);
}
}
GridTimeoutObject obj = lastTimeoutObj.get();
if (obj != null)
ctx.time().removeTimeoutObject(obj);
final GridDhtPartitionsExchangeFuture exchFut = lastExchangeFut;
assert exchFut != null : "Delaying rebalance process without topology event.";
obj = new GridTimeoutObjectAdapter(delay) {
@Override
public void onTimeout() {
exchFut.listen(new CI1<IgniteInternalFuture<AffinityTopologyVersion>>() {
@Override
public void apply(IgniteInternalFuture<AffinityTopologyVersion> f) {
ctx.exchange().forceRebalance(exchFut.exchangeId());
}
});
}
};
lastTimeoutObj.set(obj);
ctx.time().addTimeoutObject(obj);
}
return null;
}
Aggregations