use of org.h2.index.Cursor in project ignite by apache.
the class GridH2TableSelfTest method checkIndexesConsistent.
/**
* @param idxs Indexes.
* @param rowSet Rows.
* @return Rows.
*/
private Set<Row> checkIndexesConsistent(ArrayList<Index> idxs, @Nullable Set<Row> rowSet) throws IgniteCheckedException {
for (Index idx : idxs) {
if (!(idx instanceof GridH2TreeIndex))
continue;
Set<Row> set = new HashSet<>();
GridCursor<GridH2Row> cursor = ((GridH2TreeIndex) idx).rows();
while (cursor.next()) assertTrue(set.add(cursor.get()));
if (rowSet == null || rowSet.isEmpty())
rowSet = set;
else
assertEquals(rowSet, set);
}
return rowSet;
}
use of org.h2.index.Cursor in project ignite by apache.
the class GridMergeIndex method getRowCount.
/** {@inheritDoc} */
@Override
public long getRowCount(Session ses) {
Cursor c = find(ses, null, null);
long cnt = 0;
while (c.next()) cnt++;
return cnt;
}
use of org.h2.index.Cursor in project ignite by apache.
the class DmlStatementsProcessor method doUpdate.
/**
* Perform UPDATE operation on top of results of SELECT.
* @param cursor SELECT results.
* @param pageSize Batch size for streaming, anything <= 0 for single page operations.
* @return Pair [cursor corresponding to results of UPDATE (contains number of items affected); keys whose values
* had been modified concurrently (arguments for a re-run)].
*/
@SuppressWarnings({ "unchecked", "ThrowableResultOfMethodCallIgnored" })
private UpdateResult doUpdate(UpdatePlan plan, Iterable<List<?>> cursor, int pageSize) throws IgniteCheckedException {
GridH2RowDescriptor desc = plan.tbl.rowDescriptor();
GridCacheContext cctx = desc.context();
boolean bin = cctx.binaryMarshaller();
String[] updatedColNames = plan.colNames;
int valColIdx = plan.valColIdx;
boolean hasNewVal = (valColIdx != -1);
// Statement updates distinct properties if it does not have _val in updated columns list
// or if its list of updated columns includes only _val, i.e. is single element.
boolean hasProps = !hasNewVal || updatedColNames.length > 1;
long res = 0;
Map<Object, EntryProcessor<Object, Object, Boolean>> rows = new LinkedHashMap<>();
// Keys that failed to UPDATE due to concurrent updates.
List<Object> failedKeys = new ArrayList<>();
SQLException resEx = null;
Iterator<List<?>> it = cursor.iterator();
while (it.hasNext()) {
List<?> e = it.next();
Object key = e.get(0);
Object newVal;
Map<String, Object> newColVals = new HashMap<>();
for (int i = 0; i < plan.colNames.length; i++) {
if (hasNewVal && i == valColIdx - 2)
continue;
GridQueryProperty prop = plan.tbl.rowDescriptor().type().property(plan.colNames[i]);
assert prop != null;
newColVals.put(plan.colNames[i], convert(e.get(i + 2), desc, prop.type(), plan.colTypes[i]));
}
newVal = plan.valSupplier.apply(e);
if (newVal == null)
throw new IgniteSQLException("New value for UPDATE must not be null", IgniteQueryErrorCode.NULL_VALUE);
// Skip key and value - that's why we start off with 3rd column
for (int i = 0; i < plan.tbl.getColumns().length - DEFAULT_COLUMNS_COUNT; i++) {
Column c = plan.tbl.getColumn(i + DEFAULT_COLUMNS_COUNT);
if (desc.isKeyValueOrVersionColumn(c.getColumnId()))
continue;
GridQueryProperty prop = desc.type().property(c.getName());
if (prop.key())
// Don't get values of key's columns - we won't use them anyway
continue;
boolean hasNewColVal = newColVals.containsKey(c.getName());
if (!hasNewColVal)
continue;
Object colVal = newColVals.get(c.getName());
// UPDATE currently does not allow to modify key or its fields, so we must be safe to pass null as key.
desc.setColumnValue(null, newVal, colVal, i);
}
if (bin && hasProps) {
assert newVal instanceof BinaryObjectBuilder;
newVal = ((BinaryObjectBuilder) newVal).build();
}
Object srcVal = e.get(1);
if (bin && !(srcVal instanceof BinaryObject))
srcVal = cctx.grid().binary().toBinary(srcVal);
rows.put(key, new ModifyingEntryProcessor(srcVal, new EntryValueUpdater(newVal)));
if ((pageSize > 0 && rows.size() == pageSize) || (!it.hasNext())) {
PageProcessingResult pageRes = processPage(cctx, rows);
res += pageRes.cnt;
failedKeys.addAll(F.asList(pageRes.errKeys));
if (pageRes.ex != null) {
if (resEx == null)
resEx = pageRes.ex;
else
resEx.setNextException(pageRes.ex);
}
if (it.hasNext())
// No need to clear after the last batch.
rows.clear();
}
}
if (resEx != null) {
if (!F.isEmpty(failedKeys)) {
// Don't go for a re-run if processing of some keys yielded exceptions and report keys that
// had been modified concurrently right away.
String msg = "Failed to UPDATE some keys because they had been modified concurrently " + "[keys=" + failedKeys + ']';
SQLException dupEx = createJdbcSqlException(msg, IgniteQueryErrorCode.CONCURRENT_UPDATE);
dupEx.setNextException(resEx);
resEx = dupEx;
}
throw new IgniteSQLException(resEx);
}
return new UpdateResult(res, failedKeys.toArray());
}
use of org.h2.index.Cursor in project ignite by apache.
the class IgniteH2Indexing method queryDistributedSqlFields.
/** {@inheritDoc} */
@Override
public FieldsQueryCursor<List<?>> queryDistributedSqlFields(String schemaName, SqlFieldsQuery qry, boolean keepBinary, GridQueryCancel cancel, @Nullable Integer mainCacheId) {
final String sqlQry = qry.getSql();
Connection c = connectionForSchema(schemaName);
final boolean enforceJoinOrder = qry.isEnforceJoinOrder();
final boolean distributedJoins = qry.isDistributedJoins();
final boolean grpByCollocated = qry.isCollocated();
final DistributedJoinMode distributedJoinMode = distributedJoinMode(qry.isLocal(), distributedJoins);
GridCacheTwoStepQuery twoStepQry = null;
List<GridQueryFieldMetadata> meta;
final H2TwoStepCachedQueryKey cachedQryKey = new H2TwoStepCachedQueryKey(schemaName, sqlQry, grpByCollocated, distributedJoins, enforceJoinOrder, qry.isLocal());
H2TwoStepCachedQuery cachedQry = twoStepCache.get(cachedQryKey);
if (cachedQry != null) {
twoStepQry = cachedQry.query().copy();
meta = cachedQry.meta();
} else {
final UUID locNodeId = ctx.localNodeId();
// Here we will just parse the statement, no need to optimize it at all.
H2Utils.setupConnection(c, /*distributedJoins*/
false, /*enforceJoinOrder*/
true);
GridH2QueryContext.set(new GridH2QueryContext(locNodeId, locNodeId, 0, PREPARE).distributedJoinMode(distributedJoinMode));
PreparedStatement stmt = null;
Prepared prepared;
boolean cachesCreated = false;
try {
try {
while (true) {
try {
// Do not cache this statement because the whole query object will be cached later on.
stmt = prepareStatement(c, sqlQry, false);
break;
} catch (SQLException e) {
if (!cachesCreated && (e.getErrorCode() == ErrorCode.SCHEMA_NOT_FOUND_1 || e.getErrorCode() == ErrorCode.TABLE_OR_VIEW_NOT_FOUND_1 || e.getErrorCode() == ErrorCode.INDEX_NOT_FOUND_1)) {
try {
ctx.cache().createMissingQueryCaches();
} catch (IgniteCheckedException ignored) {
throw new CacheException("Failed to create missing caches.", e);
}
cachesCreated = true;
} else
throw new IgniteSQLException("Failed to parse query: " + sqlQry, IgniteQueryErrorCode.PARSING, e);
}
}
prepared = GridSqlQueryParser.prepared(stmt);
if (qry instanceof JdbcSqlFieldsQuery && ((JdbcSqlFieldsQuery) qry).isQuery() != prepared.isQuery())
throw new IgniteSQLException("Given statement type does not match that declared by JDBC driver", IgniteQueryErrorCode.STMT_TYPE_MISMATCH);
if (prepared.isQuery()) {
bindParameters(stmt, F.asList(qry.getArgs()));
twoStepQry = GridSqlQuerySplitter.split((JdbcPreparedStatement) stmt, qry.getArgs(), grpByCollocated, distributedJoins, enforceJoinOrder, this);
assert twoStepQry != null;
}
} finally {
GridH2QueryContext.clearThreadLocal();
}
// It is a DML statement if we did not create a twoStepQuery.
if (twoStepQry == null) {
if (DmlStatementsProcessor.isDmlStatement(prepared)) {
try {
return dmlProc.updateSqlFieldsDistributed(schemaName, stmt, qry, cancel);
} catch (IgniteCheckedException e) {
throw new IgniteSQLException("Failed to execute DML statement [stmt=" + sqlQry + ", params=" + Arrays.deepToString(qry.getArgs()) + "]", e);
}
}
if (DdlStatementsProcessor.isDdlStatement(prepared)) {
try {
return ddlProc.runDdlStatement(sqlQry, stmt);
} catch (IgniteCheckedException e) {
throw new IgniteSQLException("Failed to execute DDL statement [stmt=" + sqlQry + ']', e);
}
}
}
LinkedHashSet<Integer> caches0 = new LinkedHashSet<>();
assert twoStepQry != null;
int tblCnt = twoStepQry.tablesCount();
if (mainCacheId != null)
caches0.add(mainCacheId);
if (tblCnt > 0) {
for (QueryTable tblKey : twoStepQry.tables()) {
GridH2Table tbl = dataTable(tblKey);
int cacheId = CU.cacheId(tbl.cacheName());
caches0.add(cacheId);
}
}
if (caches0.isEmpty())
twoStepQry.local(true);
else {
//Prohibit usage indices with different numbers of segments in same query.
List<Integer> cacheIds = new ArrayList<>(caches0);
checkCacheIndexSegmentation(cacheIds);
twoStepQry.cacheIds(cacheIds);
twoStepQry.local(qry.isLocal());
}
meta = H2Utils.meta(stmt.getMetaData());
} catch (IgniteCheckedException e) {
throw new CacheException("Failed to bind parameters: [qry=" + sqlQry + ", params=" + Arrays.deepToString(qry.getArgs()) + "]", e);
} catch (SQLException e) {
throw new IgniteSQLException(e);
} finally {
U.close(stmt, log);
}
}
if (log.isDebugEnabled())
log.debug("Parsed query: `" + sqlQry + "` into two step query: " + twoStepQry);
twoStepQry.pageSize(qry.getPageSize());
if (cancel == null)
cancel = new GridQueryCancel();
int[] partitions = qry.getPartitions();
if (partitions == null && twoStepQry.derivedPartitions() != null) {
try {
partitions = calculateQueryPartitions(twoStepQry.derivedPartitions(), qry.getArgs());
} catch (IgniteCheckedException e) {
throw new CacheException("Failed to calculate derived partitions: [qry=" + sqlQry + ", params=" + Arrays.deepToString(qry.getArgs()) + "]", e);
}
}
QueryCursorImpl<List<?>> cursor = new QueryCursorImpl<>(runQueryTwoStep(schemaName, twoStepQry, keepBinary, enforceJoinOrder, qry.getTimeout(), cancel, qry.getArgs(), partitions), cancel);
cursor.fieldsMeta(meta);
if (cachedQry == null && !twoStepQry.explain()) {
cachedQry = new H2TwoStepCachedQuery(meta, twoStepQry.copy());
twoStepCache.putIfAbsent(cachedQryKey, cachedQry);
}
return cursor;
}
use of org.h2.index.Cursor in project ignite by apache.
the class IgniteH2Indexing method rebuildIndexesFromHash.
/** {@inheritDoc} */
@SuppressWarnings("SynchronizationOnLocalVariableOrMethodParameter")
@Override
public void rebuildIndexesFromHash(GridCacheContext cctx, String schemaName, String typeName) throws IgniteCheckedException {
H2TableDescriptor tbl = tableDescriptor(schemaName, typeName);
if (tbl == null)
return;
assert tbl.table() != null;
assert tbl.table().rebuildFromHashInProgress();
H2PkHashIndex hashIdx = tbl.primaryKeyHashIndex();
Cursor cursor = hashIdx.find((Session) null, null, null);
while (cursor.next()) {
CacheDataRow dataRow = (CacheDataRow) cursor.get();
boolean done = false;
while (!done) {
GridCacheEntryEx entry = cctx.cache().entryEx(dataRow.key());
try {
synchronized (entry) {
// TODO : How to correctly get current value and link here?
GridH2Row row = tbl.table().rowDescriptor().createRow(entry.key(), entry.partition(), dataRow.value(), entry.version(), entry.expireTime());
row.link(dataRow.link());
List<Index> indexes = tbl.table().getAllIndexes();
for (int i = 2; i < indexes.size(); i++) {
Index idx = indexes.get(i);
if (idx instanceof H2TreeIndex)
((H2TreeIndex) idx).put(row);
}
done = true;
}
} catch (GridCacheEntryRemovedException e) {
// No-op
}
}
}
tbl.table().markRebuildFromHashInProgress(false);
}
Aggregations