use of org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtLocalPartition in project ignite by apache.
the class ValidateIndexesClosure method checkSizes.
/**
* Checking size of records in cache and indexes with a record into
* {@code checkSizeRes} if they are not equal.
*
* @param cacheSizesFutures Futures calculating size of records in caches.
* @param idxSizeFutures Futures calculating size of indexes of caches.
* @param checkSizeRes Result of size check.
*/
private void checkSizes(List<T3<CacheGroupContext, GridDhtLocalPartition, Future<CacheSize>>> cacheSizesFutures, List<T3<GridCacheContext, Index, Future<T2<Throwable, Long>>>> idxSizeFutures, Map<String, ValidateIndexesCheckSizeResult> checkSizeRes) throws ExecutionException, InterruptedException {
if (!checkSizes)
return;
Map<Integer, CacheSize> cacheSizeTotal = new HashMap<>();
for (T3<CacheGroupContext, GridDhtLocalPartition, Future<CacheSize>> cacheSizeFut : cacheSizesFutures) {
CacheGroupContext cacheGrpCtx = cacheSizeFut.get1();
CacheSize cacheSize = cacheSizeFut.get3().get();
Throwable cacheSizeErr = cacheSize.err;
int grpId = cacheGrpCtx.groupId();
if (failCalcCacheSizeGrpIds.contains(grpId) && nonNull(cacheSizeErr)) {
checkSizeRes.computeIfAbsent(cacheGrpInfo(cacheGrpCtx), s -> new ValidateIndexesCheckSizeResult(0, new ArrayList<>())).issues().add(new ValidateIndexesCheckSizeIssue(null, 0, cacheSizeErr));
} else {
cacheSizeTotal.computeIfAbsent(grpId, i -> new CacheSize(null, new HashMap<>())).merge(cacheSize.cacheSizePerTbl);
}
}
for (T3<GridCacheContext, Index, Future<T2<Throwable, Long>>> idxSizeFut : idxSizeFutures) {
GridCacheContext cacheCtx = idxSizeFut.get1();
int grpId = cacheCtx.groupId();
if (failCalcCacheSizeGrpIds.contains(grpId))
continue;
Index idx = idxSizeFut.get2();
String tblName = idx.getTable().getName();
AtomicLong cacheSizeObj = cacheSizeTotal.get(grpId).cacheSizePerTbl.getOrDefault(cacheCtx.cacheId(), emptyMap()).get(tblName);
long cacheSizeByTbl = isNull(cacheSizeObj) ? 0L : cacheSizeObj.get();
T2<Throwable, Long> idxSizeRes = idxSizeFut.get3().get();
Throwable err = idxSizeRes.get1();
long idxSize = idxSizeRes.get2();
if (isNull(err) && idxSize != cacheSizeByTbl)
err = new IgniteException("Cache and index size not same.");
if (nonNull(err)) {
checkSizeRes.computeIfAbsent("[" + cacheGrpInfo(cacheCtx.group()) + ", " + cacheInfo(cacheCtx) + ", tableName=" + tblName + "]", s -> new ValidateIndexesCheckSizeResult(cacheSizeByTbl, new ArrayList<>())).issues().add(new ValidateIndexesCheckSizeIssue(idx.getName(), idxSize, err));
}
}
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtLocalPartition in project ignite by apache.
the class ValidateIndexesClosure method calcCacheSize.
/**
* Calculation of caches size with divided by tables.
*
* @param grpCtx Cache group context.
* @param locPart Local partition.
* @return Cache size representation object.
*/
private CacheSize calcCacheSize(CacheGroupContext grpCtx, GridDhtLocalPartition locPart) {
try {
if (validateCtx.isCancelled())
return new CacheSize(null, emptyMap());
@Nullable PartitionUpdateCounter updCntr = locPart.dataStore().partUpdateCounter();
PartitionUpdateCounter updateCntrBefore = updCntr == null ? updCntr : updCntr.copy();
int grpId = grpCtx.groupId();
if (failCalcCacheSizeGrpIds.contains(grpId))
return new CacheSize(null, null);
boolean reserve = false;
int partId = locPart.id();
try {
if (!(reserve = locPart.reserve()))
throw new IgniteException("Can't reserve partition");
if (locPart.state() != OWNING)
throw new IgniteException("Partition not in state " + OWNING);
Map<Integer, Map<String, AtomicLong>> cacheSizeByTbl = new HashMap<>();
GridIterator<CacheDataRow> partIter = grpCtx.offheap().partitionIterator(partId);
GridQueryProcessor qryProcessor = ignite.context().query();
IgniteH2Indexing h2Indexing = (IgniteH2Indexing) qryProcessor.getIndexing();
while (partIter.hasNextX() && !failCalcCacheSizeGrpIds.contains(grpId)) {
CacheDataRow cacheDataRow = partIter.nextX();
int cacheId = cacheDataRow.cacheId();
GridCacheContext cacheCtx = cacheId == 0 ? grpCtx.singleCacheContext() : grpCtx.shared().cacheContext(cacheId);
if (cacheCtx == null)
throw new IgniteException("Unknown cacheId of CacheDataRow: " + cacheId);
if (cacheDataRow.link() == 0L)
throw new IgniteException("Contains invalid partition row, possibly deleted");
String cacheName = cacheCtx.name();
QueryTypeDescriptorImpl qryTypeDesc = qryProcessor.typeByValue(cacheName, cacheCtx.cacheObjectContext(), cacheDataRow.key(), cacheDataRow.value(), true);
if (isNull(qryTypeDesc))
// Tolerate - (k, v) is just not indexed.
continue;
String tableName = qryTypeDesc.tableName();
GridH2Table gridH2Tbl = h2Indexing.schemaManager().dataTable(cacheName, tableName);
if (isNull(gridH2Tbl))
// Tolerate - (k, v) is just not indexed.
continue;
cacheSizeByTbl.computeIfAbsent(cacheCtx.cacheId(), i -> new HashMap<>()).computeIfAbsent(tableName, s -> new AtomicLong()).incrementAndGet();
}
PartitionUpdateCounter updateCntrAfter = locPart.dataStore().partUpdateCounter();
if (updateCntrAfter != null && !updateCntrAfter.equals(updateCntrBefore)) {
throw new GridNotIdleException(GRID_NOT_IDLE_MSG + "[grpName=" + grpCtx.cacheOrGroupName() + ", grpId=" + grpCtx.groupId() + ", partId=" + locPart.id() + "] changed during size " + "calculation [updCntrBefore=" + updateCntrBefore + ", updCntrAfter=" + updateCntrAfter + "]");
}
return new CacheSize(null, cacheSizeByTbl);
} catch (Throwable t) {
IgniteException cacheSizeErr = new IgniteException("Cache size calculation error [" + cacheGrpInfo(grpCtx) + ", locParId=" + partId + ", err=" + t.getMessage() + "]", t);
error(log, cacheSizeErr);
failCalcCacheSizeGrpIds.add(grpId);
return new CacheSize(cacheSizeErr, null);
} finally {
if (reserve)
locPart.release();
}
} finally {
processedCacheSizePartitions.incrementAndGet();
printProgressOfIndexValidationIfNeeded();
}
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtLocalPartition in project ignite by apache.
the class ValidateIndexesClosure method call0.
/**
*/
private VisorValidateIndexesJobResult call0() {
if (validateCtx.isCancelled())
throw new IgniteException(CANCELLED_MSG);
Set<Integer> grpIds = collectGroupIds();
/**
* Update counters per partition per group.
*/
final Map<Integer, Map<Integer, PartitionUpdateCounter>> partsWithCntrsPerGrp = getUpdateCountersSnapshot(ignite, grpIds);
IdleVerifyUtility.IdleChecker idleChecker = new IdleVerifyUtility.IdleChecker(ignite, partsWithCntrsPerGrp);
List<T2<CacheGroupContext, GridDhtLocalPartition>> partArgs = new ArrayList<>();
List<T2<GridCacheContext, Index>> idxArgs = new ArrayList<>();
totalCacheGrps = grpIds.size();
Map<Integer, IndexIntegrityCheckIssue> integrityCheckResults = integrityCheckIndexesPartitions(grpIds, idleChecker);
GridQueryProcessor qryProcessor = ignite.context().query();
IgniteH2Indexing h2Indexing = (IgniteH2Indexing) qryProcessor.getIndexing();
for (Integer grpId : grpIds) {
CacheGroupContext grpCtx = ignite.context().cache().cacheGroup(grpId);
if (isNull(grpCtx) || integrityCheckResults.containsKey(grpId))
continue;
for (GridDhtLocalPartition part : grpCtx.topology().localPartitions()) partArgs.add(new T2<>(grpCtx, part));
for (GridCacheContext ctx : grpCtx.caches()) {
String cacheName = ctx.name();
if (cacheNames == null || cacheNames.contains(cacheName)) {
Collection<GridQueryTypeDescriptor> types = qryProcessor.types(cacheName);
if (F.isEmpty(types))
continue;
for (GridQueryTypeDescriptor type : types) {
GridH2Table gridH2Tbl = h2Indexing.schemaManager().dataTable(cacheName, type.tableName());
if (isNull(gridH2Tbl))
continue;
for (Index idx : gridH2Tbl.getIndexes()) {
if (idx instanceof H2TreeIndexBase)
idxArgs.add(new T2<>(ctx, idx));
}
}
}
}
}
// To decrease contention on same indexes.
shuffle(partArgs);
shuffle(idxArgs);
totalPartitions = partArgs.size();
totalIndexes = idxArgs.size();
List<Future<Map<PartitionKey, ValidateIndexesPartitionResult>>> procPartFutures = new ArrayList<>(partArgs.size());
List<Future<Map<String, ValidateIndexesPartitionResult>>> procIdxFutures = new ArrayList<>(idxArgs.size());
List<T3<CacheGroupContext, GridDhtLocalPartition, Future<CacheSize>>> cacheSizeFutures = new ArrayList<>(partArgs.size());
List<T3<GridCacheContext, Index, Future<T2<Throwable, Long>>>> idxSizeFutures = new ArrayList<>(idxArgs.size());
partArgs.forEach(k -> procPartFutures.add(processPartitionAsync(k.get1(), k.get2())));
idxArgs.forEach(k -> procIdxFutures.add(processIndexAsync(k, idleChecker)));
if (checkSizes) {
for (T2<CacheGroupContext, GridDhtLocalPartition> partArg : partArgs) {
CacheGroupContext cacheGrpCtx = partArg.get1();
GridDhtLocalPartition locPart = partArg.get2();
cacheSizeFutures.add(new T3<>(cacheGrpCtx, locPart, calcCacheSizeAsync(cacheGrpCtx, locPart)));
}
for (T2<GridCacheContext, Index> idxArg : idxArgs) {
GridCacheContext cacheCtx = idxArg.get1();
Index idx = idxArg.get2();
idxSizeFutures.add(new T3<>(cacheCtx, idx, calcIndexSizeAsync(cacheCtx, idx, idleChecker)));
}
}
Map<PartitionKey, ValidateIndexesPartitionResult> partResults = new HashMap<>();
Map<String, ValidateIndexesPartitionResult> idxResults = new HashMap<>();
Map<String, ValidateIndexesCheckSizeResult> checkSizeResults = new HashMap<>();
int curPart = 0;
int curIdx = 0;
int curCacheSize = 0;
int curIdxSize = 0;
try {
for (; curPart < procPartFutures.size(); curPart++) {
Future<Map<PartitionKey, ValidateIndexesPartitionResult>> fut = procPartFutures.get(curPart);
Map<PartitionKey, ValidateIndexesPartitionResult> partRes = fut.get();
if (!partRes.isEmpty() && partRes.entrySet().stream().anyMatch(e -> !e.getValue().issues().isEmpty()))
partResults.putAll(partRes);
}
for (; curIdx < procIdxFutures.size(); curIdx++) {
Future<Map<String, ValidateIndexesPartitionResult>> fut = procIdxFutures.get(curIdx);
Map<String, ValidateIndexesPartitionResult> idxRes = fut.get();
if (!idxRes.isEmpty() && idxRes.entrySet().stream().anyMatch(e -> !e.getValue().issues().isEmpty()))
idxResults.putAll(idxRes);
}
if (checkSizes) {
for (; curCacheSize < cacheSizeFutures.size(); curCacheSize++) cacheSizeFutures.get(curCacheSize).get3().get();
for (; curIdxSize < idxSizeFutures.size(); curIdxSize++) idxSizeFutures.get(curIdxSize).get3().get();
checkSizes(cacheSizeFutures, idxSizeFutures, checkSizeResults);
Map<Integer, Map<Integer, PartitionUpdateCounter>> partsWithCntrsPerGrpAfterChecks = getUpdateCountersSnapshot(ignite, grpIds);
List<Integer> diff = compareUpdateCounters(ignite, partsWithCntrsPerGrp, partsWithCntrsPerGrpAfterChecks);
if (!F.isEmpty(diff)) {
String res = formatUpdateCountersDiff(ignite, diff);
if (!res.isEmpty())
throw new GridNotIdleException(GRID_NOT_IDLE_MSG + "[" + res + "]");
}
}
log.warning("ValidateIndexesClosure finished: processed " + totalPartitions + " partitions and " + totalIndexes + " indexes.");
} catch (InterruptedException | ExecutionException e) {
for (int j = curPart; j < procPartFutures.size(); j++) procPartFutures.get(j).cancel(false);
for (int j = curIdx; j < procIdxFutures.size(); j++) procIdxFutures.get(j).cancel(false);
for (int j = curCacheSize; j < cacheSizeFutures.size(); j++) cacheSizeFutures.get(j).get3().cancel(false);
for (int j = curIdxSize; j < idxSizeFutures.size(); j++) idxSizeFutures.get(j).get3().cancel(false);
throw unwrapFutureException(e);
}
if (validateCtx.isCancelled())
throw new IgniteException(CANCELLED_MSG);
return new VisorValidateIndexesJobResult(partResults, idxResults, integrityCheckResults.values(), checkSizeResults);
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtLocalPartition in project ignite by apache.
the class IgniteCacheLockPartitionOnAffinityRunTest method getOrganizationCountFromPartitionMap.
/**
* @param ignite Ignite.
* @param orgId Organization id.
* @return Count of found Person object with specified orgId
* @throws Exception If failed.
*/
private static int getOrganizationCountFromPartitionMap(final IgniteEx ignite, int orgId) throws Exception {
int part = ignite.affinity(Organization.class.getSimpleName()).partition(orgId);
GridCacheAdapter<?, ?> cacheAdapterOrg = ignite.context().cache().internalCache(Organization.class.getSimpleName());
GridDhtLocalPartition pOrgs = cacheAdapterOrg.context().topology().localPartition(part, AffinityTopologyVersion.NONE, false);
int cnt = 0;
GridCursor<? extends CacheDataRow> c = pOrgs.dataStore().cursor();
CacheObjectContext ctx = cacheAdapterOrg.context().cacheObjectContext();
while (c.next()) {
CacheDataRow e = c.get();
Integer k = e.key().value(ctx, false);
Organization org = e.value().value(ctx, false);
if (org != null && org.getId() == orgId)
++cnt;
}
return cnt;
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtLocalPartition in project ignite by apache.
the class IgniteCacheLockPartitionOnAffinityRunTest method primaryPartition.
/**
* @param ignite Ignite instance.
* @param orgId Organization ID.
* @return {@code true} if partition for the given organization ID is primary on the given node.
*/
private static boolean primaryPartition(IgniteEx ignite, int orgId) {
int part = ignite.affinity(Organization.class.getSimpleName()).partition(orgId);
GridCacheAdapter<?, ?> cacheAdapterPers = ignite.context().cache().internalCache(Person.class.getSimpleName());
GridDhtLocalPartition pPers = cacheAdapterPers.context().topology().localPartition(part, AffinityTopologyVersion.NONE, false);
return pPers.primary(AffinityTopologyVersion.NONE);
}
Aggregations