use of org.h2.mvstore.Page in project h2database by h2database.
the class PageDataOverflow method create.
/**
* Create a new overflow page.
*
* @param store the page store
* @param page the page id
* @param type the page type
* @param parentPageId the parent page id
* @param next the next page or 0
* @param all the data
* @param offset the offset within the data
* @param size the number of bytes
* @return the page
*/
static PageDataOverflow create(PageStore store, int page, int type, int parentPageId, int next, Data all, int offset, int size) {
Data data = store.createData();
PageDataOverflow p = new PageDataOverflow(store, page, data);
store.logUndo(p, null);
data.writeByte((byte) type);
data.writeShortInt(0);
data.writeInt(parentPageId);
if (type == Page.TYPE_DATA_OVERFLOW) {
data.writeInt(next);
} else {
data.writeShortInt(size);
}
p.start = data.length();
data.write(all.getBytes(), offset, size);
p.type = type;
p.parentPageId = parentPageId;
p.nextPage = next;
p.size = size;
return p;
}
use of org.h2.mvstore.Page in project ignite by apache.
the class ValidateIndexesClosure method processIndex.
/**
* @param cacheCtxWithIdx Cache context and appropriate index.
* @param idleChecker Idle check closure.
*/
private Map<String, ValidateIndexesPartitionResult> processIndex(T2<GridCacheContext, Index> cacheCtxWithIdx, IgniteInClosure<Integer> idleChecker) {
if (validateCtx.isCancelled())
return emptyMap();
GridCacheContext ctx = cacheCtxWithIdx.get1();
Index idx = cacheCtxWithIdx.get2();
ValidateIndexesPartitionResult idxValidationRes = new ValidateIndexesPartitionResult();
boolean enoughIssues = false;
Cursor cursor = null;
try (Session session = mvccSession(cacheCtxWithIdx.get1())) {
cursor = idx.find(session, null, null);
if (cursor == null)
throw new IgniteCheckedException("Can't iterate through index: " + idx);
} catch (Throwable t) {
IndexValidationIssue is = new IndexValidationIssue(null, ctx.name(), idx.getName(), t);
log.error("Find in index failed: " + is.toString());
idxValidationRes.reportIssue(is);
enoughIssues = true;
}
final boolean skipConditions = checkFirst > 0 || checkThrough > 0;
final boolean bothSkipConditions = checkFirst > 0 && checkThrough > 0;
long current = 0;
long processedNumber = 0;
KeyCacheObject previousKey = null;
while (!enoughIssues && !validateCtx.isCancelled()) {
KeyCacheObject h2key = null;
try {
try {
if (!cursor.next())
break;
} catch (DbException e) {
if (X.hasCause(e, CorruptedTreeException.class))
throw new IgniteCheckedException("Key is present in SQL index, but is missing in corresponding " + "data page. Previous successfully read key: " + CacheObjectUtils.unwrapBinaryIfNeeded(ctx.cacheObjectContext(), previousKey, true, true), X.cause(e, CorruptedTreeException.class));
throw e;
}
H2CacheRow h2Row = (H2CacheRow) cursor.get();
if (skipConditions) {
if (bothSkipConditions) {
if (processedNumber > checkFirst)
break;
else if (current++ % checkThrough > 0)
continue;
else
processedNumber++;
} else {
if (checkFirst > 0) {
if (current++ > checkFirst)
break;
} else {
if (current++ % checkThrough > 0)
continue;
}
}
}
h2key = h2Row.key();
if (h2Row.link() != 0L) {
CacheDataRow cacheDataStoreRow = ctx.group().offheap().read(ctx, h2key);
if (cacheDataStoreRow == null)
throw new IgniteCheckedException("Key is present in SQL index, but can't be found in CacheDataTree.");
} else
throw new IgniteCheckedException("Invalid index row, possibly deleted " + h2Row);
} catch (Throwable t) {
Object o = CacheObjectUtils.unwrapBinaryIfNeeded(ctx.cacheObjectContext(), h2key, true, true);
IndexValidationIssue is = new IndexValidationIssue(String.valueOf(o), ctx.name(), idx.getName(), t);
log.error("Failed to lookup key: " + is.toString());
enoughIssues |= idxValidationRes.reportIssue(is);
} finally {
previousKey = h2key;
}
}
CacheGroupContext group = ctx.group();
String uniqueIdxName = String.format("[cacheGroup=%s, cacheGroupId=%s, cache=%s, cacheId=%s, idx=%s]", group.name(), group.groupId(), ctx.name(), ctx.cacheId(), idx.getName());
idleChecker.apply(group.groupId());
processedIndexes.incrementAndGet();
printProgressOfIndexValidationIfNeeded();
return Collections.singletonMap(uniqueIdxName, idxValidationRes);
}
use of org.h2.mvstore.Page in project ignite by apache.
the class GridReduceQueryExecutor method update.
/**
* @param schemaName Schema name.
* @param cacheIds Cache ids.
* @param selectQry Select query.
* @param params SQL parameters.
* @param enforceJoinOrder Enforce join order of tables.
* @param pageSize Page size.
* @param timeoutMillis Timeout.
* @param parts Partitions.
* @param isReplicatedOnly Whether query uses only replicated caches.
* @param cancel Cancel state.
* @return Update result, or {@code null} when some map node doesn't support distributed DML.
*/
@SuppressWarnings("IfMayBeConditional")
public UpdateResult update(String schemaName, List<Integer> cacheIds, String selectQry, Object[] params, boolean enforceJoinOrder, int pageSize, int timeoutMillis, final int[] parts, boolean isReplicatedOnly, GridQueryCancel cancel) {
AffinityTopologyVersion topVer = h2.readyTopologyVersion();
ReducePartitionMapResult nodesParts = mapper.nodesForPartitions(cacheIds, topVer, parts, isReplicatedOnly);
Collection<ClusterNode> nodes = nodesParts.nodes();
if (F.isEmpty(nodes))
throw new CacheException("Failed to determine nodes participating in the update. " + "Explanation (Retry update once topology recovers).");
if (isReplicatedOnly) {
ClusterNode locNode = ctx.discovery().localNode();
if (nodes.contains(locNode))
nodes = singletonList(locNode);
else
nodes = singletonList(F.rand(nodes));
}
for (ClusterNode n : nodes) {
if (!n.version().greaterThanEqual(2, 3, 0)) {
log.warning("Server-side DML optimization is skipped because map node does not support it. " + "Falling back to normal DML. [node=" + n.id() + ", v=" + n.version() + "].");
return null;
}
}
final long reqId = qryReqIdGen.incrementAndGet();
h2.runningQueryManager().trackRequestId(reqId);
final DmlDistributedUpdateRun r = new DmlDistributedUpdateRun(nodes.size());
int flags = enforceJoinOrder ? GridH2QueryRequest.FLAG_ENFORCE_JOIN_ORDER : 0;
if (isReplicatedOnly)
flags |= GridH2QueryRequest.FLAG_REPLICATED;
GridH2DmlRequest req = new GridH2DmlRequest().requestId(reqId).topologyVersion(topVer).caches(cacheIds).schemaName(schemaName).query(selectQry).pageSize(pageSize).parameters(params).timeout(timeoutMillis).explicitTimeout(true).flags(flags);
updRuns.put(reqId, r);
boolean release = false;
try {
Map<ClusterNode, IntArray> partsMap = (nodesParts.queryPartitionsMap() != null) ? nodesParts.queryPartitionsMap() : nodesParts.partitionsMap();
ReducePartitionsSpecializer partsSpec = (parts == null) ? null : new ReducePartitionsSpecializer(partsMap);
final Collection<ClusterNode> finalNodes = nodes;
cancel.add(() -> {
r.future().onCancelled();
send(finalNodes, new GridQueryCancelRequest(reqId), null, true);
});
// send() logs the debug message
if (send(nodes, req, partsSpec, false))
return r.future().get();
throw new CacheException("Failed to send update request to participating nodes.");
} catch (IgniteCheckedException | RuntimeException e) {
release = true;
U.error(log, "Error during update [localNodeId=" + ctx.localNodeId() + "]", e);
throw new CacheException("Failed to run SQL update query. " + e.getMessage(), e);
} finally {
if (release)
send(nodes, new GridQueryCancelRequest(reqId), null, false);
if (!updRuns.remove(reqId, r))
U.warn(log, "Update run was already removed: " + reqId);
}
}
use of org.h2.mvstore.Page in project ignite by apache.
the class GridReduceQueryExecutor method createReduceQueryRun.
/**
* Query run factory method.
*
* @param conn H2 connection.
* @param mapQueries Map queries.
* @param nodes Target nodes.
* @param pageSize Page size.
* @param nodeToSegmentsCnt Segments per-index.
* @param skipMergeTbl Skip merge table flag.
* @param explain Explain query flag.
* @param dataPageScanEnabled DataPage scan enabled flag.
* @return Reduce query run.
*/
@NotNull
private ReduceQueryRun createReduceQueryRun(H2PooledConnection conn, List<GridCacheSqlQuery> mapQueries, Collection<ClusterNode> nodes, int pageSize, Map<ClusterNode, Integer> nodeToSegmentsCnt, boolean skipMergeTbl, boolean explain, Boolean dataPageScanEnabled) {
final ReduceQueryRun r = new ReduceQueryRun(mapQueries.size(), pageSize, dataPageScanEnabled);
int tblIdx = 0;
int replicatedQrysCnt = 0;
for (GridCacheSqlQuery mapQry : mapQueries) {
Reducer reducer;
if (skipMergeTbl)
reducer = UnsortedOneWayReducer.createDummy(ctx);
else {
ReduceTable tbl;
try {
tbl = createMergeTable(conn, mapQry, explain);
} catch (IgniteCheckedException e) {
throw new IgniteException(e);
}
reducer = tbl.getReducer();
fakeTable(conn, tblIdx++).innerTable(tbl);
}
// If the query has only replicated tables, we have to run it on a single node only.
if (!mapQry.isPartitioned()) {
ClusterNode node = F.rand(nodes);
mapQry.node(node.id());
replicatedQrysCnt++;
// Replicated tables can have only 1 segment.
reducer.setSources(singletonMap(node, 1));
} else
reducer.setSources(nodeToSegmentsCnt);
reducer.setPageSize(r.pageSize());
r.reducers().add(reducer);
}
int cnt = nodeToSegmentsCnt.values().stream().mapToInt(i -> i).sum();
r.init((r.reducers().size() - replicatedQrysCnt) * cnt + replicatedQrysCnt);
return r;
}
use of org.h2.mvstore.Page in project ignite by apache.
the class MapQueryResult method fetchNextPage.
/**
* @param rows Collection to fetch into.
* @param pageSize Page size.
* @param dataPageScanEnabled If data page scan is enabled.
* @return {@code true} If there are no more rows available.
*/
boolean fetchNextPage(List<Value[]> rows, int pageSize, Boolean dataPageScanEnabled) {
assert lock.isHeldByCurrentThread();
if (closed)
return true;
assert res != null;
boolean readEvt = cctx != null && cctx.name() != null && cctx.events().isRecordable(EVT_CACHE_QUERY_OBJECT_READ);
QueryContext.threadLocal(H2Utils.context(ses));
page++;
h2.enableDataPageScan(dataPageScanEnabled);
try {
for (int i = 0; i < pageSize; i++) {
if (!res.res.next())
return true;
Value[] row = res.res.currentRow();
if (cpNeeded) {
boolean copied = false;
for (int j = 0; j < row.length; j++) {
Value val = row[j];
if (val instanceof GridH2ValueCacheObject) {
GridH2ValueCacheObject valCacheObj = (GridH2ValueCacheObject) val;
row[j] = new GridH2ValueCacheObject(valCacheObj.getCacheObject(), h2.objectContext()) {
@Override
public Object getObject() {
return getObject(true);
}
};
copied = true;
}
}
if (i == 0 && !copied)
// No copy on read caches, skip next checks.
cpNeeded = false;
}
assert row != null;
if (readEvt) {
GridKernalContext ctx = h2.kernalContext();
ctx.event().record(new CacheQueryReadEvent<>(ctx.discovery().localNode(), "SQL fields query result set row read.", EVT_CACHE_QUERY_OBJECT_READ, CacheQueryType.SQL.name(), cctx.name(), null, qry.query(), null, null, params, qrySrcNodeId, null, null, null, null, row(row)));
}
rows.add(res.res.currentRow());
res.fetchSizeInterceptor.checkOnFetchNext();
}
return !res.res.hasNext();
} finally {
CacheDataTree.setDataPageScanEnabled(false);
}
}
Aggregations