use of org.apache.ignite.internal.processors.query.h2.opt.QueryContext in project ignite by apache.
the class H2TreeIndex method findFirstOrLast.
/**
* {@inheritDoc}
*/
@Override
public Cursor findFirstOrLast(Session ses, boolean b) {
try {
QueryContext qctx = H2Utils.context(ses);
IndexQueryContext qryCtx = idxQryContext(qctx);
GridCursor<IndexRow> cursor = b ? queryIndex.findFirst(segment(qctx), qryCtx) : queryIndex.findLast(segment(qctx), qryCtx);
return new H2Cursor(new IndexValueCursor<>(cursor, this::mapIndexRow));
} catch (IgniteCheckedException e) {
throw DbException.convert(e);
}
}
use of org.apache.ignite.internal.processors.query.h2.opt.QueryContext in project ignite by apache.
the class DistributedLookupBatch method addSearchRows.
/**
* {@inheritDoc}
*/
@SuppressWarnings({ "ForLoopReplaceableByForEach", "IfMayBeConditional" })
@Override
public boolean addSearchRows(SearchRow firstRow, SearchRow lastRow) {
if (joinCtx == null || findCalled) {
if (joinCtx == null) {
// It is the first call after query begin (may be after reuse),
// reinitialize query context and result.
QueryContext qctx = QueryContext.threadLocal();
res = new ArrayList<>();
assert qctx != null;
assert !findCalled;
joinCtx = qctx.distributedJoinContext();
} else {
// Cleanup after the previous lookup phase.
assert batchLookupId != 0;
findCalled = false;
joinCtx.putStreams(batchLookupId, null);
res.clear();
}
// Reinitialize for the next lookup phase.
batchLookupId = joinCtx.nextBatchLookupId();
rangeStreams = new HashMap<>();
}
Object affKey = getAffinityKey(firstRow, lastRow);
List<SegmentKey> segmentKeys;
if (affKey != null) {
// Affinity key is provided.
if (// Affinity key is explicit null, we will not find anything.
affKey == EXPLICIT_NULL)
return false;
segmentKeys = F.asList(rangeSegment(affKey));
} else {
// Affinity key is not provided or is not the same in upper and lower bounds, we have to broadcast.
if (broadcastSegments == null)
broadcastSegments = broadcastSegments();
segmentKeys = broadcastSegments;
}
assert !F.isEmpty(segmentKeys) : segmentKeys;
final int rangeId = res.size();
// Create messages.
GridH2RowMessage first = idx.toSearchRowMessage(firstRow);
GridH2RowMessage last = idx.toSearchRowMessage(lastRow);
// Range containing upper and lower bounds.
GridH2RowRangeBounds rangeBounds = rangeBounds(rangeId, first, last);
// Add range to every message of every participating node.
for (int i = 0; i < segmentKeys.size(); i++) {
SegmentKey segmentKey = segmentKeys.get(i);
assert segmentKey != null;
RangeStream stream = rangeStreams.get(segmentKey);
List<GridH2RowRangeBounds> bounds;
if (stream == null) {
stream = new RangeStream(cctx.kernalContext(), idx, joinCtx, segmentKey.node());
stream.request(createRequest(joinCtx, batchLookupId, segmentKey.segmentId()));
stream.request().bounds(bounds = new ArrayList<>());
rangeStreams.put(segmentKey, stream);
} else
bounds = stream.request().bounds();
bounds.add(rangeBounds);
// If at least one node will have a full batch then we are ok.
if (bounds.size() >= joinCtx.pageSize())
batchFull = true;
}
Cursor cur;
if (segmentKeys.size() == 1)
cur = new UnicastCursor(rangeId, rangeStreams.get(F.first(segmentKeys)));
else
cur = new BroadcastCursor(idx, rangeId, segmentKeys, rangeStreams);
res.add(new DoneFuture<>(cur));
return true;
}
use of org.apache.ignite.internal.processors.query.h2.opt.QueryContext in project ignite by apache.
the class ValidateIndexesClosure method processPartition.
/**
* @param grpCtx Group context.
* @param part Local partition.
*/
private Map<PartitionKey, ValidateIndexesPartitionResult> processPartition(CacheGroupContext grpCtx, GridDhtLocalPartition part) {
if (validateCtx.isCancelled() || !part.reserve())
return emptyMap();
ValidateIndexesPartitionResult partRes;
try {
if (part.state() != OWNING)
return emptyMap();
@Nullable PartitionUpdateCounter updCntr = part.dataStore().partUpdateCounter();
PartitionUpdateCounter updateCntrBefore = updCntr == null ? null : updCntr.copy();
partRes = new ValidateIndexesPartitionResult();
boolean hasMvcc = grpCtx.caches().stream().anyMatch(GridCacheContext::mvccEnabled);
if (hasMvcc) {
for (GridCacheContext<?, ?> context : grpCtx.caches()) {
try (Session session = mvccSession(context)) {
MvccSnapshot mvccSnapshot = null;
boolean mvccEnabled = context.mvccEnabled();
if (mvccEnabled)
mvccSnapshot = ((QueryContext) session.getVariable(H2Utils.QCTX_VARIABLE_NAME).getObject()).mvccSnapshot();
GridIterator<CacheDataRow> iterator = grpCtx.offheap().cachePartitionIterator(context.cacheId(), part.id(), mvccSnapshot, null);
processPartIterator(grpCtx, partRes, session, iterator);
}
}
} else
processPartIterator(grpCtx, partRes, null, grpCtx.offheap().partitionIterator(part.id()));
PartitionUpdateCounter updateCntrAfter = part.dataStore().partUpdateCounter();
if (updateCntrAfter != null && !updateCntrAfter.equals(updateCntrBefore)) {
throw new GridNotIdleException(GRID_NOT_IDLE_MSG + "[grpName=" + grpCtx.cacheOrGroupName() + ", grpId=" + grpCtx.groupId() + ", partId=" + part.id() + "] changed during index validation " + "[before=" + updateCntrBefore + ", after=" + updateCntrAfter + "]");
}
} catch (IgniteCheckedException e) {
error(log, "Failed to process partition [grpId=" + grpCtx.groupId() + ", partId=" + part.id() + "]", e);
return emptyMap();
} finally {
part.release();
printProgressOfIndexValidationIfNeeded();
}
PartitionKey partKey = new PartitionKey(grpCtx.groupId(), part.id(), grpCtx.cacheOrGroupName());
processedPartitions.incrementAndGet();
return Collections.singletonMap(partKey, partRes);
}
use of org.apache.ignite.internal.processors.query.h2.opt.QueryContext in project ignite by apache.
the class IgniteH2Indexing method executeSelectLocal.
/**
* Queries individual fields (generally used by JDBC drivers).
*
* @param qryId Query id.
* @param qryDesc Query descriptor.
* @param qryParams Query parameters.
* @param select Select.
* @param filter Cache name and key filter.
* @param mvccTracker Query tracker.
* @param cancel Query cancel.
* @param inTx Flag whether the query is executed in transaction.
* @param timeout Timeout.
* @return Query result.
* @throws IgniteCheckedException If failed.
*/
private GridQueryFieldsResult executeSelectLocal(long qryId, QueryDescriptor qryDesc, QueryParameters qryParams, QueryParserResultSelect select, final IndexingQueryFilter filter, MvccQueryTracker mvccTracker, GridQueryCancel cancel, boolean inTx, int timeout) throws IgniteCheckedException {
assert !select.mvccEnabled() || mvccTracker != null;
String qry;
if (select.forUpdate())
qry = inTx ? select.forUpdateQueryTx() : select.forUpdateQueryOutTx();
else
qry = qryDesc.sql();
boolean mvccEnabled = mvccTracker != null;
try {
assert select != null;
if (ctx.security().enabled())
checkSecurity(select.cacheIds());
MvccSnapshot mvccSnapshot = null;
if (mvccEnabled)
mvccSnapshot = mvccTracker.snapshot();
final QueryContext qctx = new QueryContext(0, filter, null, mvccSnapshot, null, true);
return new GridQueryFieldsResultAdapter(select.meta(), null) {
@Override
public GridCloseableIterator<List<?>> iterator() throws IgniteCheckedException {
H2PooledConnection conn = connections().connection(qryDesc.schemaName());
try (TraceSurroundings ignored = MTC.support(ctx.tracing().create(SQL_ITER_OPEN, MTC.span()))) {
H2Utils.setupConnection(conn, qctx, qryDesc.distributedJoins(), qryDesc.enforceJoinOrder(), qryParams.lazy());
PreparedStatement stmt = conn.prepareStatement(qry, H2StatementCache.queryFlags(qryDesc));
// Convert parameters into BinaryObjects.
Marshaller m = ctx.config().getMarshaller();
byte[] paramsBytes = U.marshal(m, qryParams.arguments());
final ClassLoader ldr = U.resolveClassLoader(ctx.config());
Object[] params;
if (m instanceof BinaryMarshaller) {
params = BinaryUtils.rawArrayFromBinary(((BinaryMarshaller) m).binaryMarshaller().unmarshal(paramsBytes, ldr));
} else
params = U.unmarshal(m, paramsBytes, ldr);
H2Utils.bindParameters(stmt, F.asList(params));
H2QueryInfo qryInfo = new H2QueryInfo(H2QueryInfo.QueryType.LOCAL, stmt, qry, ctx.localNodeId(), qryId);
ResultSet rs = executeSqlQueryWithTimer(stmt, conn, qry, timeout, cancel, qryParams.dataPageScanEnabled(), qryInfo);
return new H2FieldsIterator(rs, mvccTracker, conn, qryParams.pageSize(), log, IgniteH2Indexing.this, qryInfo, ctx.tracing());
} catch (IgniteCheckedException | RuntimeException | Error e) {
conn.close();
try {
if (mvccTracker != null)
mvccTracker.onDone();
} catch (Exception e0) {
e.addSuppressed(e0);
}
throw e;
}
}
};
} catch (Exception e) {
GridNearTxLocal tx = null;
if (mvccEnabled && (tx != null || (tx = tx(ctx)) != null))
tx.setRollbackOnly();
throw e;
}
}
use of org.apache.ignite.internal.processors.query.h2.opt.QueryContext in project ignite by apache.
the class H2TreeIndex method onIndexRangeResponse.
/**
* @param node Responded node.
* @param msg Response message.
*/
private void onIndexRangeResponse(ClusterNode node, GridH2IndexRangeResponse msg) {
try (TraceSurroundings ignored = MTC.support(ctx.tracing().create(SQL_IDX_RANGE_RESP, MTC.span()))) {
QueryContext qctx = qryCtxRegistry.getShared(msg.originNodeId(), msg.queryId(), msg.originSegmentId());
if (qctx == null)
return;
DistributedJoinContext joinCtx = qctx.distributedJoinContext();
assert joinCtx != null;
Map<SegmentKey, RangeStream> streams = joinCtx.getStreams(msg.batchLookupId());
if (streams == null)
return;
RangeStream stream = streams.get(new SegmentKey(node, msg.segment()));
assert stream != null;
stream.onResponse(msg);
}
}
Aggregations