use of org.apache.ignite.internal.processors.cache.persistence.CacheDataRow in project ignite by apache.
the class ValidateIndexesClosure method calcCacheSize.
/**
* Calculation of caches size with divided by tables.
*
* @param grpCtx Cache group context.
* @param locPart Local partition.
* @return Cache size representation object.
*/
private CacheSize calcCacheSize(CacheGroupContext grpCtx, GridDhtLocalPartition locPart) {
try {
if (validateCtx.isCancelled())
return new CacheSize(null, emptyMap());
@Nullable PartitionUpdateCounter updCntr = locPart.dataStore().partUpdateCounter();
PartitionUpdateCounter updateCntrBefore = updCntr == null ? updCntr : updCntr.copy();
int grpId = grpCtx.groupId();
if (failCalcCacheSizeGrpIds.contains(grpId))
return new CacheSize(null, null);
boolean reserve = false;
int partId = locPart.id();
try {
if (!(reserve = locPart.reserve()))
throw new IgniteException("Can't reserve partition");
if (locPart.state() != OWNING)
throw new IgniteException("Partition not in state " + OWNING);
Map<Integer, Map<String, AtomicLong>> cacheSizeByTbl = new HashMap<>();
GridIterator<CacheDataRow> partIter = grpCtx.offheap().partitionIterator(partId);
GridQueryProcessor qryProcessor = ignite.context().query();
IgniteH2Indexing h2Indexing = (IgniteH2Indexing) qryProcessor.getIndexing();
while (partIter.hasNextX() && !failCalcCacheSizeGrpIds.contains(grpId)) {
CacheDataRow cacheDataRow = partIter.nextX();
int cacheId = cacheDataRow.cacheId();
GridCacheContext cacheCtx = cacheId == 0 ? grpCtx.singleCacheContext() : grpCtx.shared().cacheContext(cacheId);
if (cacheCtx == null)
throw new IgniteException("Unknown cacheId of CacheDataRow: " + cacheId);
if (cacheDataRow.link() == 0L)
throw new IgniteException("Contains invalid partition row, possibly deleted");
String cacheName = cacheCtx.name();
QueryTypeDescriptorImpl qryTypeDesc = qryProcessor.typeByValue(cacheName, cacheCtx.cacheObjectContext(), cacheDataRow.key(), cacheDataRow.value(), true);
if (isNull(qryTypeDesc))
// Tolerate - (k, v) is just not indexed.
continue;
String tableName = qryTypeDesc.tableName();
GridH2Table gridH2Tbl = h2Indexing.schemaManager().dataTable(cacheName, tableName);
if (isNull(gridH2Tbl))
// Tolerate - (k, v) is just not indexed.
continue;
cacheSizeByTbl.computeIfAbsent(cacheCtx.cacheId(), i -> new HashMap<>()).computeIfAbsent(tableName, s -> new AtomicLong()).incrementAndGet();
}
PartitionUpdateCounter updateCntrAfter = locPart.dataStore().partUpdateCounter();
if (updateCntrAfter != null && !updateCntrAfter.equals(updateCntrBefore)) {
throw new GridNotIdleException(GRID_NOT_IDLE_MSG + "[grpName=" + grpCtx.cacheOrGroupName() + ", grpId=" + grpCtx.groupId() + ", partId=" + locPart.id() + "] changed during size " + "calculation [updCntrBefore=" + updateCntrBefore + ", updCntrAfter=" + updateCntrAfter + "]");
}
return new CacheSize(null, cacheSizeByTbl);
} catch (Throwable t) {
IgniteException cacheSizeErr = new IgniteException("Cache size calculation error [" + cacheGrpInfo(grpCtx) + ", locParId=" + partId + ", err=" + t.getMessage() + "]", t);
error(log, cacheSizeErr);
failCalcCacheSizeGrpIds.add(grpId);
return new CacheSize(cacheSizeErr, null);
} finally {
if (reserve)
locPart.release();
}
} finally {
processedCacheSizePartitions.incrementAndGet();
printProgressOfIndexValidationIfNeeded();
}
}
use of org.apache.ignite.internal.processors.cache.persistence.CacheDataRow in project ignite by apache.
the class ValidateIndexesClosure method processPartition.
/**
* @param grpCtx Group context.
* @param part Local partition.
*/
private Map<PartitionKey, ValidateIndexesPartitionResult> processPartition(CacheGroupContext grpCtx, GridDhtLocalPartition part) {
if (validateCtx.isCancelled() || !part.reserve())
return emptyMap();
ValidateIndexesPartitionResult partRes;
try {
if (part.state() != OWNING)
return emptyMap();
@Nullable PartitionUpdateCounter updCntr = part.dataStore().partUpdateCounter();
PartitionUpdateCounter updateCntrBefore = updCntr == null ? null : updCntr.copy();
partRes = new ValidateIndexesPartitionResult();
boolean hasMvcc = grpCtx.caches().stream().anyMatch(GridCacheContext::mvccEnabled);
if (hasMvcc) {
for (GridCacheContext<?, ?> context : grpCtx.caches()) {
try (Session session = mvccSession(context)) {
MvccSnapshot mvccSnapshot = null;
boolean mvccEnabled = context.mvccEnabled();
if (mvccEnabled)
mvccSnapshot = ((QueryContext) session.getVariable(H2Utils.QCTX_VARIABLE_NAME).getObject()).mvccSnapshot();
GridIterator<CacheDataRow> iterator = grpCtx.offheap().cachePartitionIterator(context.cacheId(), part.id(), mvccSnapshot, null);
processPartIterator(grpCtx, partRes, session, iterator);
}
}
} else
processPartIterator(grpCtx, partRes, null, grpCtx.offheap().partitionIterator(part.id()));
PartitionUpdateCounter updateCntrAfter = part.dataStore().partUpdateCounter();
if (updateCntrAfter != null && !updateCntrAfter.equals(updateCntrBefore)) {
throw new GridNotIdleException(GRID_NOT_IDLE_MSG + "[grpName=" + grpCtx.cacheOrGroupName() + ", grpId=" + grpCtx.groupId() + ", partId=" + part.id() + "] changed during index validation " + "[before=" + updateCntrBefore + ", after=" + updateCntrAfter + "]");
}
} catch (IgniteCheckedException e) {
error(log, "Failed to process partition [grpId=" + grpCtx.groupId() + ", partId=" + part.id() + "]", e);
return emptyMap();
} finally {
part.release();
printProgressOfIndexValidationIfNeeded();
}
PartitionKey partKey = new PartitionKey(grpCtx.groupId(), part.id(), grpCtx.cacheOrGroupName());
processedPartitions.incrementAndGet();
return Collections.singletonMap(partKey, partRes);
}
use of org.apache.ignite.internal.processors.cache.persistence.CacheDataRow in project ignite by apache.
the class CacheMvccBackupsAbstractTest method allVersions.
/**
* Retrieves all versions of all keys from cache.
*
* @param cache Cache.
* @return {@link Map} of keys to its versions.
* @throws IgniteCheckedException If failed.
*/
private Map<KeyCacheObject, List<CacheDataRow>> allVersions(IgniteCache cache) throws IgniteCheckedException {
IgniteCacheProxy cache0 = (IgniteCacheProxy) cache;
GridCacheContext cctx = cache0.context();
assert cctx.mvccEnabled();
Map<KeyCacheObject, List<CacheDataRow>> vers = new HashMap<>();
for (Object e : cache) {
IgniteBiTuple entry = (IgniteBiTuple) e;
KeyCacheObject key = cctx.toCacheKeyObject(entry.getKey());
GridCursor<CacheDataRow> cur = cctx.offheap().mvccAllVersionsCursor(cctx, key, null);
List<CacheDataRow> rows = new ArrayList<>();
while (cur.next()) {
CacheDataRow row = cur.get();
rows.add(row);
}
vers.put(key, rows);
}
return vers;
}
use of org.apache.ignite.internal.processors.cache.persistence.CacheDataRow in project ignite by apache.
the class CacheMvccBackupsAbstractTest method doTestRebalanceNodeLeave.
/**
* @throws Exception If failed.
*/
public void doTestRebalanceNodeLeave(boolean startClient) throws Exception {
testSpi = true;
disableScheduledVacuum = true;
startGridsMultiThreaded(4);
client = true;
final Ignite node = startClient ? startGrid(4) : grid(0);
final IgniteCache<Object, Object> cache = node.createCache(cacheConfiguration(cacheMode(), FULL_SYNC, 2, 16).setIndexedTypes(Integer.class, Integer.class));
List<Integer> keys = new ArrayList<>();
for (int i = 0; i < 4; i++) keys.addAll(primaryKeys(grid(i).cache(DEFAULT_CACHE_NAME), 2));
try (Transaction tx = node.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) {
StringBuilder sb = new StringBuilder("INSERT INTO Integer (_key, _val) values ");
for (int i = 0; i < keys.size(); i++) {
if (i > 0)
sb.append(", ");
sb.append("(" + keys.get(i) + ", " + keys.get(i) + ")");
}
SqlFieldsQuery qry = new SqlFieldsQuery(sb.toString());
cache.query(qry).getAll();
tx.commit();
}
stopGrid(3);
awaitPartitionMapExchange();
try (Transaction tx = node.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) {
SqlFieldsQuery qry = new SqlFieldsQuery("UPDATE Integer SET _val = 10*_key");
cache.query(qry).getAll();
tx.commit();
}
awaitPartitionMapExchange();
for (Integer key : keys) {
List<CacheDataRow> vers = null;
for (int i = 0; i < 3; i++) {
ClusterNode n = grid(i).cluster().localNode();
if (node.affinity(DEFAULT_CACHE_NAME).isPrimaryOrBackup(n, key)) {
List<CacheDataRow> vers0 = allKeyVersions(grid(i).cache(DEFAULT_CACHE_NAME), key);
if (vers != null)
assertKeyVersionsEquals(vers, vers0);
vers = vers0;
}
}
}
}
use of org.apache.ignite.internal.processors.cache.persistence.CacheDataRow in project ignite by apache.
the class GeoSpatialIndexImpl method rowIterator.
/**
* @param i Spatial key iterator.
* @param filter Table filter.
* @return Iterator over rows.
*/
@SuppressWarnings("unchecked")
private GridCursor<IndexRow> rowIterator(Iterator<SpatialKey> i, TableFilter filter) {
if (!i.hasNext())
return IndexValueCursor.EMPTY;
long time = System.currentTimeMillis();
IndexingQueryFilter qryFilter = null;
QueryContext qctx = H2Utils.context(filter.getSession());
if (qctx != null)
qryFilter = qctx.filter();
IndexingQueryCacheFilter qryCacheFilter = qryFilter != null ? qryFilter.forCache(def.rowHandler().getTable().cacheName()) : null;
List<IndexRow> rows = new ArrayList<>();
do {
IndexRow row = idToRow.get(i.next().getId());
CacheDataRow cacheRow = row.cacheDataRow();
assert row != null;
if (cacheRow.expireTime() != 0 && cacheRow.expireTime() <= time)
continue;
if (qryCacheFilter == null || qryCacheFilter.applyPartition(cacheRow.partition()))
rows.add(row);
} while (i.hasNext());
return new GridCursorIteratorWrapper(rows.iterator());
}
Aggregations