use of org.apache.ignite.internal.processors.query.h2.opt.GridH2Table in project ignite by apache.
the class CollocationModel method joinedWithCollocated.
/**
* @param f Filter.
* @return Affinity join type.
*/
@SuppressWarnings("ForLoopReplaceableByForEach")
private CollocationModelAffinity joinedWithCollocated(int f) {
TableFilter tf = childFilters[f];
GridH2Table tbl = (GridH2Table) tf.getTable();
if (validate) {
if (tbl.isCustomAffinityMapper())
throw customAffinityError(tbl.cacheName());
if (F.isEmpty(tf.getIndexConditions())) {
throw new CacheException("Failed to prepare distributed join query: " + "join condition does not use index [joinedCache=" + tbl.cacheName() + ", plan=" + tf.getSelect().getPlanSQL() + ']');
}
}
IndexColumn affCol = tbl.getAffinityKeyColumn();
boolean affKeyCondFound = false;
if (affCol != null) {
ArrayList<IndexCondition> idxConditions = tf.getIndexConditions();
int affColId = affCol.column.getColumnId();
for (int i = 0; i < idxConditions.size(); i++) {
IndexCondition c = idxConditions.get(i);
int colId = c.getColumn().getColumnId();
int cmpType = c.getCompareType();
if ((cmpType == Comparison.EQUAL || cmpType == Comparison.EQUAL_NULL_SAFE) && (colId == affColId || tbl.rowDescriptor().isKeyColumn(colId)) && c.isEvaluatable()) {
affKeyCondFound = true;
Expression exp = c.getExpression();
exp = exp.getNonAliasExpression();
if (exp instanceof ExpressionColumn) {
ExpressionColumn expCol = (ExpressionColumn) exp;
// This is one of our previous joins.
TableFilter prevJoin = expCol.getTableFilter();
if (prevJoin != null) {
CollocationModel cm = child(indexOf(prevJoin), true);
// different affinity columns from different tables.
if (cm != null && !cm.view) {
CollocationModelType t = cm.type(true);
if (t.isPartitioned() && t.isCollocated() && isAffinityColumn(prevJoin, expCol, validate))
return CollocationModelAffinity.COLLOCATED_JOIN;
}
}
}
}
}
}
return affKeyCondFound ? CollocationModelAffinity.HAS_AFFINITY_CONDITION : CollocationModelAffinity.NONE;
}
use of org.apache.ignite.internal.processors.query.h2.opt.GridH2Table in project ignite by apache.
the class ValidateIndexesClosure method calcCacheSize.
/**
* Calculation of caches size with divided by tables.
*
* @param grpCtx Cache group context.
* @param locPart Local partition.
* @return Cache size representation object.
*/
private CacheSize calcCacheSize(CacheGroupContext grpCtx, GridDhtLocalPartition locPart) {
try {
if (validateCtx.isCancelled())
return new CacheSize(null, emptyMap());
@Nullable PartitionUpdateCounter updCntr = locPart.dataStore().partUpdateCounter();
PartitionUpdateCounter updateCntrBefore = updCntr == null ? updCntr : updCntr.copy();
int grpId = grpCtx.groupId();
if (failCalcCacheSizeGrpIds.contains(grpId))
return new CacheSize(null, null);
boolean reserve = false;
int partId = locPart.id();
try {
if (!(reserve = locPart.reserve()))
throw new IgniteException("Can't reserve partition");
if (locPart.state() != OWNING)
throw new IgniteException("Partition not in state " + OWNING);
Map<Integer, Map<String, AtomicLong>> cacheSizeByTbl = new HashMap<>();
GridIterator<CacheDataRow> partIter = grpCtx.offheap().partitionIterator(partId);
GridQueryProcessor qryProcessor = ignite.context().query();
IgniteH2Indexing h2Indexing = (IgniteH2Indexing) qryProcessor.getIndexing();
while (partIter.hasNextX() && !failCalcCacheSizeGrpIds.contains(grpId)) {
CacheDataRow cacheDataRow = partIter.nextX();
int cacheId = cacheDataRow.cacheId();
GridCacheContext cacheCtx = cacheId == 0 ? grpCtx.singleCacheContext() : grpCtx.shared().cacheContext(cacheId);
if (cacheCtx == null)
throw new IgniteException("Unknown cacheId of CacheDataRow: " + cacheId);
if (cacheDataRow.link() == 0L)
throw new IgniteException("Contains invalid partition row, possibly deleted");
String cacheName = cacheCtx.name();
QueryTypeDescriptorImpl qryTypeDesc = qryProcessor.typeByValue(cacheName, cacheCtx.cacheObjectContext(), cacheDataRow.key(), cacheDataRow.value(), true);
if (isNull(qryTypeDesc))
// Tolerate - (k, v) is just not indexed.
continue;
String tableName = qryTypeDesc.tableName();
GridH2Table gridH2Tbl = h2Indexing.schemaManager().dataTable(cacheName, tableName);
if (isNull(gridH2Tbl))
// Tolerate - (k, v) is just not indexed.
continue;
cacheSizeByTbl.computeIfAbsent(cacheCtx.cacheId(), i -> new HashMap<>()).computeIfAbsent(tableName, s -> new AtomicLong()).incrementAndGet();
}
PartitionUpdateCounter updateCntrAfter = locPart.dataStore().partUpdateCounter();
if (updateCntrAfter != null && !updateCntrAfter.equals(updateCntrBefore)) {
throw new GridNotIdleException(GRID_NOT_IDLE_MSG + "[grpName=" + grpCtx.cacheOrGroupName() + ", grpId=" + grpCtx.groupId() + ", partId=" + locPart.id() + "] changed during size " + "calculation [updCntrBefore=" + updateCntrBefore + ", updCntrAfter=" + updateCntrAfter + "]");
}
return new CacheSize(null, cacheSizeByTbl);
} catch (Throwable t) {
IgniteException cacheSizeErr = new IgniteException("Cache size calculation error [" + cacheGrpInfo(grpCtx) + ", locParId=" + partId + ", err=" + t.getMessage() + "]", t);
error(log, cacheSizeErr);
failCalcCacheSizeGrpIds.add(grpId);
return new CacheSize(cacheSizeErr, null);
} finally {
if (reserve)
locPart.release();
}
} finally {
processedCacheSizePartitions.incrementAndGet();
printProgressOfIndexValidationIfNeeded();
}
}
use of org.apache.ignite.internal.processors.query.h2.opt.GridH2Table in project ignite by apache.
the class ValidateIndexesClosure method call0.
/**
*/
private VisorValidateIndexesJobResult call0() {
if (validateCtx.isCancelled())
throw new IgniteException(CANCELLED_MSG);
Set<Integer> grpIds = collectGroupIds();
/**
* Update counters per partition per group.
*/
final Map<Integer, Map<Integer, PartitionUpdateCounter>> partsWithCntrsPerGrp = getUpdateCountersSnapshot(ignite, grpIds);
IdleVerifyUtility.IdleChecker idleChecker = new IdleVerifyUtility.IdleChecker(ignite, partsWithCntrsPerGrp);
List<T2<CacheGroupContext, GridDhtLocalPartition>> partArgs = new ArrayList<>();
List<T2<GridCacheContext, Index>> idxArgs = new ArrayList<>();
totalCacheGrps = grpIds.size();
Map<Integer, IndexIntegrityCheckIssue> integrityCheckResults = integrityCheckIndexesPartitions(grpIds, idleChecker);
GridQueryProcessor qryProcessor = ignite.context().query();
IgniteH2Indexing h2Indexing = (IgniteH2Indexing) qryProcessor.getIndexing();
for (Integer grpId : grpIds) {
CacheGroupContext grpCtx = ignite.context().cache().cacheGroup(grpId);
if (isNull(grpCtx) || integrityCheckResults.containsKey(grpId))
continue;
for (GridDhtLocalPartition part : grpCtx.topology().localPartitions()) partArgs.add(new T2<>(grpCtx, part));
for (GridCacheContext ctx : grpCtx.caches()) {
String cacheName = ctx.name();
if (cacheNames == null || cacheNames.contains(cacheName)) {
Collection<GridQueryTypeDescriptor> types = qryProcessor.types(cacheName);
if (F.isEmpty(types))
continue;
for (GridQueryTypeDescriptor type : types) {
GridH2Table gridH2Tbl = h2Indexing.schemaManager().dataTable(cacheName, type.tableName());
if (isNull(gridH2Tbl))
continue;
for (Index idx : gridH2Tbl.getIndexes()) {
if (idx instanceof H2TreeIndexBase)
idxArgs.add(new T2<>(ctx, idx));
}
}
}
}
}
// To decrease contention on same indexes.
shuffle(partArgs);
shuffle(idxArgs);
totalPartitions = partArgs.size();
totalIndexes = idxArgs.size();
List<Future<Map<PartitionKey, ValidateIndexesPartitionResult>>> procPartFutures = new ArrayList<>(partArgs.size());
List<Future<Map<String, ValidateIndexesPartitionResult>>> procIdxFutures = new ArrayList<>(idxArgs.size());
List<T3<CacheGroupContext, GridDhtLocalPartition, Future<CacheSize>>> cacheSizeFutures = new ArrayList<>(partArgs.size());
List<T3<GridCacheContext, Index, Future<T2<Throwable, Long>>>> idxSizeFutures = new ArrayList<>(idxArgs.size());
partArgs.forEach(k -> procPartFutures.add(processPartitionAsync(k.get1(), k.get2())));
idxArgs.forEach(k -> procIdxFutures.add(processIndexAsync(k, idleChecker)));
if (checkSizes) {
for (T2<CacheGroupContext, GridDhtLocalPartition> partArg : partArgs) {
CacheGroupContext cacheGrpCtx = partArg.get1();
GridDhtLocalPartition locPart = partArg.get2();
cacheSizeFutures.add(new T3<>(cacheGrpCtx, locPart, calcCacheSizeAsync(cacheGrpCtx, locPart)));
}
for (T2<GridCacheContext, Index> idxArg : idxArgs) {
GridCacheContext cacheCtx = idxArg.get1();
Index idx = idxArg.get2();
idxSizeFutures.add(new T3<>(cacheCtx, idx, calcIndexSizeAsync(cacheCtx, idx, idleChecker)));
}
}
Map<PartitionKey, ValidateIndexesPartitionResult> partResults = new HashMap<>();
Map<String, ValidateIndexesPartitionResult> idxResults = new HashMap<>();
Map<String, ValidateIndexesCheckSizeResult> checkSizeResults = new HashMap<>();
int curPart = 0;
int curIdx = 0;
int curCacheSize = 0;
int curIdxSize = 0;
try {
for (; curPart < procPartFutures.size(); curPart++) {
Future<Map<PartitionKey, ValidateIndexesPartitionResult>> fut = procPartFutures.get(curPart);
Map<PartitionKey, ValidateIndexesPartitionResult> partRes = fut.get();
if (!partRes.isEmpty() && partRes.entrySet().stream().anyMatch(e -> !e.getValue().issues().isEmpty()))
partResults.putAll(partRes);
}
for (; curIdx < procIdxFutures.size(); curIdx++) {
Future<Map<String, ValidateIndexesPartitionResult>> fut = procIdxFutures.get(curIdx);
Map<String, ValidateIndexesPartitionResult> idxRes = fut.get();
if (!idxRes.isEmpty() && idxRes.entrySet().stream().anyMatch(e -> !e.getValue().issues().isEmpty()))
idxResults.putAll(idxRes);
}
if (checkSizes) {
for (; curCacheSize < cacheSizeFutures.size(); curCacheSize++) cacheSizeFutures.get(curCacheSize).get3().get();
for (; curIdxSize < idxSizeFutures.size(); curIdxSize++) idxSizeFutures.get(curIdxSize).get3().get();
checkSizes(cacheSizeFutures, idxSizeFutures, checkSizeResults);
Map<Integer, Map<Integer, PartitionUpdateCounter>> partsWithCntrsPerGrpAfterChecks = getUpdateCountersSnapshot(ignite, grpIds);
List<Integer> diff = compareUpdateCounters(ignite, partsWithCntrsPerGrp, partsWithCntrsPerGrpAfterChecks);
if (!F.isEmpty(diff)) {
String res = formatUpdateCountersDiff(ignite, diff);
if (!res.isEmpty())
throw new GridNotIdleException(GRID_NOT_IDLE_MSG + "[" + res + "]");
}
}
log.warning("ValidateIndexesClosure finished: processed " + totalPartitions + " partitions and " + totalIndexes + " indexes.");
} catch (InterruptedException | ExecutionException e) {
for (int j = curPart; j < procPartFutures.size(); j++) procPartFutures.get(j).cancel(false);
for (int j = curIdx; j < procIdxFutures.size(); j++) procIdxFutures.get(j).cancel(false);
for (int j = curCacheSize; j < cacheSizeFutures.size(); j++) cacheSizeFutures.get(j).get3().cancel(false);
for (int j = curIdxSize; j < idxSizeFutures.size(); j++) idxSizeFutures.get(j).get3().cancel(false);
throw unwrapFutureException(e);
}
if (validateCtx.isCancelled())
throw new IgniteException(CANCELLED_MSG);
return new VisorValidateIndexesJobResult(partResults, idxResults, integrityCheckResults.values(), checkSizeResults);
}
use of org.apache.ignite.internal.processors.query.h2.opt.GridH2Table in project ignite by apache.
the class SqlTwoCachesInGroupWithSameEntryTest method test.
/**
* @throws Exception On error.
*/
@SuppressWarnings("unchecked")
@Test
public void test() throws Exception {
IgniteEx ign = startGrid(0);
ign.cluster().active(true);
IgniteCache cache0 = ign.createCache(new CacheConfiguration<>("cache0").setAtomicityMode(CacheAtomicityMode.TRANSACTIONAL).setGroupName("grp0").setSqlSchema("CACHE0").setIndexedTypes(Integer.class, Integer.class));
IgniteCache cache1 = ign.createCache(new CacheConfiguration<>("cache1").setAtomicityMode(CacheAtomicityMode.TRANSACTIONAL).setGroupName("grp0").setSqlSchema("CACHE1").setIndexedTypes(Integer.class, Integer.class));
for (int i = 0; i < KEYS; ++i) {
cache0.put(i, i);
cache1.put(i, i);
}
if (useOnlyPkHashIndex) {
for (GridH2Table t : ((IgniteH2Indexing) grid(0).context().query().getIndexing()).schemaManager().dataTables()) GridTestUtils.setFieldValue(t, "rebuildFromHashInProgress", 1);
}
assertEquals(KEYS, cache0.size());
assertEquals(KEYS, cache1.size());
assertEquals(KEYS, sql("select * FROM cache0.Integer").getAll().size());
assertEquals(KEYS, sql("select * FROM cache1.Integer").getAll().size());
cache0.clear();
assertEquals(0, cache0.size());
assertEquals(KEYS, cache1.size());
assertEquals(0, sql("select * FROM cache0.Integer").getAll().size());
assertEquals(KEYS, sql("select * FROM cache1.Integer").getAll().size());
}
use of org.apache.ignite.internal.processors.query.h2.opt.GridH2Table in project ignite by apache.
the class PartitionExtractor method extractFromIn.
/**
* Extract partition information from IN.
*
* @param op Operation.
* @param tblModel Table model.
* @return Partition.
*/
private PartitionNode extractFromIn(GridSqlOperation op, PartitionTableModel tblModel) throws IgniteCheckedException {
// Operation should contain at least two children: left (column) and right (const or column).
if (op.size() < 2)
return PartitionAllNode.INSTANCE;
// Left operand should be column.
GridSqlAst left = op.child();
GridSqlColumn leftCol = unwrapColumn(left);
if (leftCol == null)
return PartitionAllNode.INSTANCE;
// Can work only with Ignite tables.
if (!(leftCol.column().getTable() instanceof GridH2Table))
return PartitionAllNode.INSTANCE;
Set<PartitionSingleNode> parts = new HashSet<>();
for (int i = 1; i < op.size(); i++) {
GridSqlAst right = op.child(i);
GridSqlConst rightConst;
GridSqlParameter rightParam;
if (right instanceof GridSqlConst) {
rightConst = (GridSqlConst) right;
rightParam = null;
} else if (right instanceof GridSqlParameter) {
rightConst = null;
rightParam = (GridSqlParameter) right;
} else
// set globally. Hence, returning null.
return PartitionAllNode.INSTANCE;
// Extract.
PartitionSingleNode part = extractSingle(leftCol, rightConst, rightParam, tblModel);
// Same thing as above: single unknown partition in disjunction defeats optimization.
if (part == null)
return PartitionAllNode.INSTANCE;
parts.add(part);
}
return parts.size() == 1 ? parts.iterator().next() : new PartitionGroupNode(parts);
}
Aggregations