use of org.apache.ignite.internal.processors.query.h2.sql.GridSqlOperationType.IN in project ignite by apache.
the class DistributedLookupBatch method addSearchRows.
/**
* {@inheritDoc}
*/
@SuppressWarnings({ "ForLoopReplaceableByForEach", "IfMayBeConditional" })
@Override
public boolean addSearchRows(SearchRow firstRow, SearchRow lastRow) {
if (joinCtx == null || findCalled) {
if (joinCtx == null) {
// It is the first call after query begin (may be after reuse),
// reinitialize query context and result.
QueryContext qctx = QueryContext.threadLocal();
res = new ArrayList<>();
assert qctx != null;
assert !findCalled;
joinCtx = qctx.distributedJoinContext();
} else {
// Cleanup after the previous lookup phase.
assert batchLookupId != 0;
findCalled = false;
joinCtx.putStreams(batchLookupId, null);
res.clear();
}
// Reinitialize for the next lookup phase.
batchLookupId = joinCtx.nextBatchLookupId();
rangeStreams = new HashMap<>();
}
Object affKey = getAffinityKey(firstRow, lastRow);
List<SegmentKey> segmentKeys;
if (affKey != null) {
// Affinity key is provided.
if (// Affinity key is explicit null, we will not find anything.
affKey == EXPLICIT_NULL)
return false;
segmentKeys = F.asList(rangeSegment(affKey));
} else {
// Affinity key is not provided or is not the same in upper and lower bounds, we have to broadcast.
if (broadcastSegments == null)
broadcastSegments = broadcastSegments();
segmentKeys = broadcastSegments;
}
assert !F.isEmpty(segmentKeys) : segmentKeys;
final int rangeId = res.size();
// Create messages.
GridH2RowMessage first = idx.toSearchRowMessage(firstRow);
GridH2RowMessage last = idx.toSearchRowMessage(lastRow);
// Range containing upper and lower bounds.
GridH2RowRangeBounds rangeBounds = rangeBounds(rangeId, first, last);
// Add range to every message of every participating node.
for (int i = 0; i < segmentKeys.size(); i++) {
SegmentKey segmentKey = segmentKeys.get(i);
assert segmentKey != null;
RangeStream stream = rangeStreams.get(segmentKey);
List<GridH2RowRangeBounds> bounds;
if (stream == null) {
stream = new RangeStream(cctx.kernalContext(), idx, joinCtx, segmentKey.node());
stream.request(createRequest(joinCtx, batchLookupId, segmentKey.segmentId()));
stream.request().bounds(bounds = new ArrayList<>());
rangeStreams.put(segmentKey, stream);
} else
bounds = stream.request().bounds();
bounds.add(rangeBounds);
// If at least one node will have a full batch then we are ok.
if (bounds.size() >= joinCtx.pageSize())
batchFull = true;
}
Cursor cur;
if (segmentKeys.size() == 1)
cur = new UnicastCursor(rangeId, rangeStreams.get(F.first(segmentKeys)));
else
cur = new BroadcastCursor(idx, rangeId, segmentKeys, rangeStreams);
res.add(new DoneFuture<>(cur));
return true;
}
use of org.apache.ignite.internal.processors.query.h2.sql.GridSqlOperationType.IN in project ignite by apache.
the class ValidateIndexesClosure method checkSizes.
/**
* Checking size of records in cache and indexes with a record into
* {@code checkSizeRes} if they are not equal.
*
* @param cacheSizesFutures Futures calculating size of records in caches.
* @param idxSizeFutures Futures calculating size of indexes of caches.
* @param checkSizeRes Result of size check.
*/
private void checkSizes(List<T3<CacheGroupContext, GridDhtLocalPartition, Future<CacheSize>>> cacheSizesFutures, List<T3<GridCacheContext, Index, Future<T2<Throwable, Long>>>> idxSizeFutures, Map<String, ValidateIndexesCheckSizeResult> checkSizeRes) throws ExecutionException, InterruptedException {
if (!checkSizes)
return;
Map<Integer, CacheSize> cacheSizeTotal = new HashMap<>();
for (T3<CacheGroupContext, GridDhtLocalPartition, Future<CacheSize>> cacheSizeFut : cacheSizesFutures) {
CacheGroupContext cacheGrpCtx = cacheSizeFut.get1();
CacheSize cacheSize = cacheSizeFut.get3().get();
Throwable cacheSizeErr = cacheSize.err;
int grpId = cacheGrpCtx.groupId();
if (failCalcCacheSizeGrpIds.contains(grpId) && nonNull(cacheSizeErr)) {
checkSizeRes.computeIfAbsent(cacheGrpInfo(cacheGrpCtx), s -> new ValidateIndexesCheckSizeResult(0, new ArrayList<>())).issues().add(new ValidateIndexesCheckSizeIssue(null, 0, cacheSizeErr));
} else {
cacheSizeTotal.computeIfAbsent(grpId, i -> new CacheSize(null, new HashMap<>())).merge(cacheSize.cacheSizePerTbl);
}
}
for (T3<GridCacheContext, Index, Future<T2<Throwable, Long>>> idxSizeFut : idxSizeFutures) {
GridCacheContext cacheCtx = idxSizeFut.get1();
int grpId = cacheCtx.groupId();
if (failCalcCacheSizeGrpIds.contains(grpId))
continue;
Index idx = idxSizeFut.get2();
String tblName = idx.getTable().getName();
AtomicLong cacheSizeObj = cacheSizeTotal.get(grpId).cacheSizePerTbl.getOrDefault(cacheCtx.cacheId(), emptyMap()).get(tblName);
long cacheSizeByTbl = isNull(cacheSizeObj) ? 0L : cacheSizeObj.get();
T2<Throwable, Long> idxSizeRes = idxSizeFut.get3().get();
Throwable err = idxSizeRes.get1();
long idxSize = idxSizeRes.get2();
if (isNull(err) && idxSize != cacheSizeByTbl)
err = new IgniteException("Cache and index size not same.");
if (nonNull(err)) {
checkSizeRes.computeIfAbsent("[" + cacheGrpInfo(cacheCtx.group()) + ", " + cacheInfo(cacheCtx) + ", tableName=" + tblName + "]", s -> new ValidateIndexesCheckSizeResult(cacheSizeByTbl, new ArrayList<>())).issues().add(new ValidateIndexesCheckSizeIssue(idx.getName(), idxSize, err));
}
}
}
use of org.apache.ignite.internal.processors.query.h2.sql.GridSqlOperationType.IN in project ignite by apache.
the class ValidateIndexesClosure method calcCacheSize.
/**
* Calculation of caches size with divided by tables.
*
* @param grpCtx Cache group context.
* @param locPart Local partition.
* @return Cache size representation object.
*/
private CacheSize calcCacheSize(CacheGroupContext grpCtx, GridDhtLocalPartition locPart) {
try {
if (validateCtx.isCancelled())
return new CacheSize(null, emptyMap());
@Nullable PartitionUpdateCounter updCntr = locPart.dataStore().partUpdateCounter();
PartitionUpdateCounter updateCntrBefore = updCntr == null ? updCntr : updCntr.copy();
int grpId = grpCtx.groupId();
if (failCalcCacheSizeGrpIds.contains(grpId))
return new CacheSize(null, null);
boolean reserve = false;
int partId = locPart.id();
try {
if (!(reserve = locPart.reserve()))
throw new IgniteException("Can't reserve partition");
if (locPart.state() != OWNING)
throw new IgniteException("Partition not in state " + OWNING);
Map<Integer, Map<String, AtomicLong>> cacheSizeByTbl = new HashMap<>();
GridIterator<CacheDataRow> partIter = grpCtx.offheap().partitionIterator(partId);
GridQueryProcessor qryProcessor = ignite.context().query();
IgniteH2Indexing h2Indexing = (IgniteH2Indexing) qryProcessor.getIndexing();
while (partIter.hasNextX() && !failCalcCacheSizeGrpIds.contains(grpId)) {
CacheDataRow cacheDataRow = partIter.nextX();
int cacheId = cacheDataRow.cacheId();
GridCacheContext cacheCtx = cacheId == 0 ? grpCtx.singleCacheContext() : grpCtx.shared().cacheContext(cacheId);
if (cacheCtx == null)
throw new IgniteException("Unknown cacheId of CacheDataRow: " + cacheId);
if (cacheDataRow.link() == 0L)
throw new IgniteException("Contains invalid partition row, possibly deleted");
String cacheName = cacheCtx.name();
QueryTypeDescriptorImpl qryTypeDesc = qryProcessor.typeByValue(cacheName, cacheCtx.cacheObjectContext(), cacheDataRow.key(), cacheDataRow.value(), true);
if (isNull(qryTypeDesc))
// Tolerate - (k, v) is just not indexed.
continue;
String tableName = qryTypeDesc.tableName();
GridH2Table gridH2Tbl = h2Indexing.schemaManager().dataTable(cacheName, tableName);
if (isNull(gridH2Tbl))
// Tolerate - (k, v) is just not indexed.
continue;
cacheSizeByTbl.computeIfAbsent(cacheCtx.cacheId(), i -> new HashMap<>()).computeIfAbsent(tableName, s -> new AtomicLong()).incrementAndGet();
}
PartitionUpdateCounter updateCntrAfter = locPart.dataStore().partUpdateCounter();
if (updateCntrAfter != null && !updateCntrAfter.equals(updateCntrBefore)) {
throw new GridNotIdleException(GRID_NOT_IDLE_MSG + "[grpName=" + grpCtx.cacheOrGroupName() + ", grpId=" + grpCtx.groupId() + ", partId=" + locPart.id() + "] changed during size " + "calculation [updCntrBefore=" + updateCntrBefore + ", updCntrAfter=" + updateCntrAfter + "]");
}
return new CacheSize(null, cacheSizeByTbl);
} catch (Throwable t) {
IgniteException cacheSizeErr = new IgniteException("Cache size calculation error [" + cacheGrpInfo(grpCtx) + ", locParId=" + partId + ", err=" + t.getMessage() + "]", t);
error(log, cacheSizeErr);
failCalcCacheSizeGrpIds.add(grpId);
return new CacheSize(cacheSizeErr, null);
} finally {
if (reserve)
locPart.release();
}
} finally {
processedCacheSizePartitions.incrementAndGet();
printProgressOfIndexValidationIfNeeded();
}
}
use of org.apache.ignite.internal.processors.query.h2.sql.GridSqlOperationType.IN in project ignite by apache.
the class IgniteH2Indexing method tablesInformation.
/**
* {@inheritDoc}
*/
@Override
public Collection<TableInformation> tablesInformation(String schemaNamePtrn, String tblNamePtrn, String... tblTypes) {
Set<String> types = F.isEmpty(tblTypes) ? Collections.emptySet() : new HashSet<>(Arrays.asList(tblTypes));
Collection<TableInformation> infos = new ArrayList<>();
boolean allTypes = F.isEmpty(tblTypes);
if (allTypes || types.contains(TableType.TABLE.name())) {
schemaMgr.dataTables().stream().filter(t -> matches(t.getSchema().getName(), schemaNamePtrn)).filter(t -> matches(t.getName(), tblNamePtrn)).map(t -> {
int cacheGrpId = t.cacheInfo().groupId();
CacheGroupDescriptor cacheGrpDesc = ctx.cache().cacheGroupDescriptors().get(cacheGrpId);
// We should skip table in case regarding cache group has been removed.
if (cacheGrpDesc == null)
return null;
GridQueryTypeDescriptor type = t.rowDescriptor().type();
IndexColumn affCol = t.getExplicitAffinityKeyColumn();
String affinityKeyCol = affCol != null ? affCol.columnName : null;
return new TableInformation(t.getSchema().getName(), t.getName(), TableType.TABLE.name(), cacheGrpId, cacheGrpDesc.cacheOrGroupName(), t.cacheId(), t.cacheName(), affinityKeyCol, type.keyFieldAlias(), type.valueFieldAlias(), type.keyTypeName(), type.valueTypeName());
}).filter(Objects::nonNull).forEach(infos::add);
}
if ((allTypes || types.contains(TableType.VIEW.name())) && matches(QueryUtils.SCHEMA_SYS, schemaNamePtrn)) {
schemaMgr.systemViews().stream().filter(t -> matches(t.getTableName(), tblNamePtrn)).map(v -> new TableInformation(QueryUtils.SCHEMA_SYS, v.getTableName(), TableType.VIEW.name())).forEach(infos::add);
}
return infos;
}
use of org.apache.ignite.internal.processors.query.h2.sql.GridSqlOperationType.IN in project ignite by apache.
the class IgniteH2Indexing method start.
/**
* {@inheritDoc}
*/
@SuppressWarnings({ "deprecation", "AssignmentToStaticFieldFromInstanceMethod" })
@Override
public void start(GridKernalContext ctx, GridSpinBusyLock busyLock) throws IgniteCheckedException {
if (log.isDebugEnabled())
log.debug("Starting cache query index...");
this.busyLock = busyLock;
if (SysProperties.serializeJavaObject) {
U.warn(log, "Serialization of Java objects in H2 was enabled.");
SysProperties.serializeJavaObject = false;
}
this.ctx = ctx;
partReservationMgr = new PartitionReservationManager(ctx);
connMgr = new ConnectionManager(ctx);
longRunningQryMgr = new LongRunningQueryManager(ctx);
parser = new QueryParser(this, connMgr);
schemaMgr = new SchemaManager(ctx, connMgr);
schemaMgr.start(ctx.config().getSqlConfiguration().getSqlSchemas());
statsMgr = new IgniteStatisticsManagerImpl(ctx, schemaMgr);
nodeId = ctx.localNodeId();
marshaller = ctx.config().getMarshaller();
mapQryExec = new GridMapQueryExecutor();
rdcQryExec = new GridReduceQueryExecutor();
mapQryExec.start(ctx, this);
rdcQryExec.start(ctx, this);
discoLsnr = evt -> {
mapQryExec.onNodeLeft((DiscoveryEvent) evt);
rdcQryExec.onNodeLeft((DiscoveryEvent) evt);
};
ctx.event().addLocalEventListener(discoLsnr, EventType.EVT_NODE_FAILED, EventType.EVT_NODE_LEFT);
qryLsnr = (nodeId, msg, plc) -> onMessage(nodeId, msg);
ctx.io().addMessageListener(GridTopic.TOPIC_QUERY, qryLsnr);
runningQryMgr = new RunningQueryManager(ctx);
partExtractor = new PartitionExtractor(new H2PartitionResolver(this), ctx);
cmdProc = new CommandProcessor(ctx, schemaMgr, this);
cmdProc.start();
if (JdbcUtils.serializer != null)
U.warn(log, "Custom H2 serialization is already configured, will override.");
JdbcUtils.serializer = h2Serializer();
distrCfg = new DistributedSqlConfiguration(ctx, log);
funcMgr = new FunctionsManager(distrCfg);
}
Aggregations