use of org.apache.ignite.internal.processors.query.h2.dml.UpdatePlan in project ignite by apache.
the class IgniteH2Indexing method executeUpdateOnDataNodeTransactional.
/**
* {@inheritDoc}
*/
@Override
public UpdateSourceIterator<?> executeUpdateOnDataNodeTransactional(GridCacheContext<?, ?> cctx, int[] ids, int[] parts, String schema, String qry, Object[] params, int flags, int pageSize, int timeout, AffinityTopologyVersion topVer, MvccSnapshot mvccSnapshot, GridQueryCancel cancel) throws IgniteCheckedException {
SqlFieldsQuery fldsQry = QueryUtils.withQueryTimeout(new SqlFieldsQuery(qry), timeout, TimeUnit.MILLISECONDS);
if (params != null)
fldsQry.setArgs(params);
fldsQry.setEnforceJoinOrder(U.isFlagSet(flags, GridH2QueryRequest.FLAG_ENFORCE_JOIN_ORDER));
fldsQry.setTimeout(timeout, TimeUnit.MILLISECONDS);
fldsQry.setPageSize(pageSize);
fldsQry.setLocal(true);
fldsQry.setLazy(U.isFlagSet(flags, GridH2QueryRequest.FLAG_LAZY));
boolean loc = true;
final boolean replicated = U.isFlagSet(flags, GridH2QueryRequest.FLAG_REPLICATED);
GridCacheContext<?, ?> cctx0;
if (!replicated && !F.isEmpty(ids) && (cctx0 = CU.firstPartitioned(cctx.shared(), ids)) != null && cctx0.config().getQueryParallelism() > 1) {
fldsQry.setDistributedJoins(true);
loc = false;
}
QueryParserResult parseRes = parser.parse(schema, fldsQry, false);
assert parseRes.remainingQuery() == null;
QueryParserResultDml dml = parseRes.dml();
assert dml != null;
IndexingQueryFilter filter = backupFilter(topVer, parts);
UpdatePlan plan = dml.plan();
GridCacheContext planCctx = plan.cacheContext();
// Force keepBinary for operation context to avoid binary deserialization inside entry processor
DmlUtils.setKeepBinaryContext(planCctx);
SqlFieldsQuery selectFieldsQry = QueryUtils.withQueryTimeout(new SqlFieldsQuery(plan.selectQuery(), fldsQry.isCollocated()), fldsQry.getTimeout(), TimeUnit.MILLISECONDS).setArgs(fldsQry.getArgs()).setDistributedJoins(fldsQry.isDistributedJoins()).setEnforceJoinOrder(fldsQry.isEnforceJoinOrder()).setLocal(fldsQry.isLocal()).setPageSize(fldsQry.getPageSize()).setTimeout(fldsQry.getTimeout(), TimeUnit.MILLISECONDS).setLazy(fldsQry.isLazy());
QueryCursorImpl<List<?>> cur;
// sub-query and not some dummy stuff like "select 1, 2, 3;"
if (!loc && !plan.isLocalSubquery()) {
cur = executeSelectForDml(RunningQueryManager.UNDEFINED_QUERY_ID, schema, selectFieldsQry, new StaticMvccQueryTracker(planCctx, mvccSnapshot), cancel, timeout);
} else {
selectFieldsQry.setLocal(true);
QueryParserResult selectParseRes = parser.parse(schema, selectFieldsQry, false);
GridQueryFieldsResult res = executeSelectLocal(RunningQueryManager.UNDEFINED_QUERY_ID, selectParseRes.queryDescriptor(), selectParseRes.queryParameters(), selectParseRes.select(), filter, new StaticMvccQueryTracker(planCctx, mvccSnapshot), cancel, true, timeout);
cur = new QueryCursorImpl<>(new Iterable<List<?>>() {
@Override
public Iterator<List<?>> iterator() {
try {
return res.iterator();
} catch (IgniteCheckedException e) {
throw new IgniteException(e);
}
}
}, cancel, true, selectParseRes.queryParameters().lazy());
}
return plan.iteratorForTransaction(connMgr, cur);
}
use of org.apache.ignite.internal.processors.query.h2.dml.UpdatePlan in project ignite by apache.
the class IgniteH2Indexing method executeUpdateDistributed.
/**
* @param qryId Query id.
* @param qryDesc Query descriptor.
* @param qryParams Query parameters.
* @param dml DML statement.
* @param cancel Query cancel.
* @return Update result wrapped into {@link GridQueryFieldsResult}
* @throws IgniteCheckedException if failed.
*/
@SuppressWarnings("unchecked")
private List<QueryCursorImpl<List<?>>> executeUpdateDistributed(long qryId, QueryDescriptor qryDesc, QueryParameters qryParams, QueryParserResultDml dml, GridQueryCancel cancel) throws IgniteCheckedException {
if (qryDesc.batched()) {
Collection<UpdateResult> ress;
List<Object[]> argss = qryParams.batchedArguments();
UpdatePlan plan = dml.plan();
GridCacheContext<?, ?> cctx = plan.cacheContext();
// For MVCC case, let's enlist batch elements one by one.
if (plan.hasRows() && plan.mode() == UpdateMode.INSERT && !cctx.mvccEnabled()) {
CacheOperationContext opCtx = DmlUtils.setKeepBinaryContext(cctx);
try {
List<List<List<?>>> cur = plan.createRows(argss);
// TODO: IGNITE-11176 - Need to support cancellation
ress = DmlUtils.processSelectResultBatched(plan, cur, qryParams.updateBatchSize());
} finally {
DmlUtils.restoreKeepBinaryContext(cctx, opCtx);
}
} else {
// Fallback to previous mode.
ress = new ArrayList<>(argss.size());
SQLException batchException = null;
int[] cntPerRow = new int[argss.size()];
int cntr = 0;
for (Object[] args : argss) {
UpdateResult res;
try {
res = executeUpdate(qryId, qryDesc, qryParams.toSingleBatchedArguments(args), dml, false, null, cancel);
cntPerRow[cntr++] = (int) res.counter();
ress.add(res);
} catch (Exception e) {
SQLException sqlEx = QueryUtils.toSqlException(e);
batchException = DmlUtils.chainException(batchException, sqlEx);
cntPerRow[cntr++] = Statement.EXECUTE_FAILED;
}
}
if (batchException != null) {
BatchUpdateException e = new BatchUpdateException(batchException.getMessage(), batchException.getSQLState(), batchException.getErrorCode(), cntPerRow, batchException);
throw new IgniteCheckedException(e);
}
}
ArrayList<QueryCursorImpl<List<?>>> resCurs = new ArrayList<>(ress.size());
for (UpdateResult res : ress) {
res.throwIfError();
QueryCursorImpl<List<?>> resCur = (QueryCursorImpl<List<?>>) new QueryCursorImpl(singletonList(singletonList(res.counter())), cancel, false, false);
resCur.fieldsMeta(UPDATE_RESULT_META);
resCurs.add(resCur);
}
return resCurs;
} else {
UpdateResult res = executeUpdate(qryId, qryDesc, qryParams, dml, false, null, cancel);
res.throwIfError();
QueryCursorImpl<List<?>> resCur = (QueryCursorImpl<List<?>>) new QueryCursorImpl(singletonList(singletonList(res.counter())), cancel, false, false);
resCur.fieldsMeta(UPDATE_RESULT_META);
resCur.partitionResult(res.partitionResult());
return singletonList(resCur);
}
}
use of org.apache.ignite.internal.processors.query.h2.dml.UpdatePlan in project ignite by apache.
the class IgniteH2Indexing method executeUpdateNonTransactional.
/**
* Execute update in non-transactional mode.
*
* @param qryId Query id.
* @param qryDesc Query descriptor.
* @param qryParams Query parameters.
* @param dml Plan.
* @param loc Local flag.
* @param filters Filters.
* @param cancel Cancel hook.
* @return Update result.
* @throws IgniteCheckedException If failed.
*/
private UpdateResult executeUpdateNonTransactional(long qryId, QueryDescriptor qryDesc, QueryParameters qryParams, QueryParserResultDml dml, boolean loc, IndexingQueryFilter filters, GridQueryCancel cancel) throws IgniteCheckedException {
UpdatePlan plan = dml.plan();
UpdateResult fastUpdateRes = plan.processFast(qryParams.arguments());
if (fastUpdateRes != null)
return fastUpdateRes;
DmlDistributedPlanInfo distributedPlan = loc ? null : plan.distributedPlan();
if (distributedPlan != null) {
if (cancel == null)
cancel = new GridQueryCancel();
UpdateResult result = rdcQryExec.update(qryDesc.schemaName(), distributedPlan.getCacheIds(), qryDesc.sql(), qryParams.arguments(), qryDesc.enforceJoinOrder(), qryParams.pageSize(), qryParams.timeout(), qryParams.partitions(), distributedPlan.isReplicatedOnly(), cancel);
// Null is returned in case not all nodes support distributed DML.
if (result != null)
return result;
}
final GridQueryCancel selectCancel = (cancel != null) ? new GridQueryCancel() : null;
if (cancel != null)
cancel.add(selectCancel::cancel);
SqlFieldsQuery selectFieldsQry = new SqlFieldsQuery(plan.selectQuery(), qryDesc.collocated()).setArgs(qryParams.arguments()).setDistributedJoins(qryDesc.distributedJoins()).setEnforceJoinOrder(qryDesc.enforceJoinOrder()).setLocal(qryDesc.local()).setPageSize(qryParams.pageSize()).setTimeout(qryParams.timeout(), TimeUnit.MILLISECONDS).setLazy(qryParams.lazy() && plan.canSelectBeLazy());
Iterable<List<?>> cur;
// sub-query and not some dummy stuff like "select 1, 2, 3;"
if (!loc && !plan.isLocalSubquery()) {
assert !F.isEmpty(plan.selectQuery());
cur = executeSelectForDml(qryId, qryDesc.schemaName(), selectFieldsQry, null, selectCancel, qryParams.timeout());
} else if (plan.hasRows())
cur = plan.createRows(qryParams.arguments());
else {
selectFieldsQry.setLocal(true);
QueryParserResult selectParseRes = parser.parse(qryDesc.schemaName(), selectFieldsQry, false);
final GridQueryFieldsResult res = executeSelectLocal(qryId, selectParseRes.queryDescriptor(), selectParseRes.queryParameters(), selectParseRes.select(), filters, null, selectCancel, false, qryParams.timeout());
cur = new QueryCursorImpl<>(new Iterable<List<?>>() {
@Override
public Iterator<List<?>> iterator() {
try {
return new GridQueryCacheObjectsIterator(res.iterator(), objectContext(), true);
} catch (IgniteCheckedException e) {
throw new IgniteException(e);
}
}
}, cancel, true, qryParams.lazy());
}
int pageSize = qryParams.updateBatchSize();
// TODO: IGNITE-11176 - Need to support cancellation
try {
return DmlUtils.processSelectResult(plan, cur, pageSize);
} finally {
if (cur instanceof AutoCloseable)
U.closeQuiet((AutoCloseable) cur);
}
}
use of org.apache.ignite.internal.processors.query.h2.dml.UpdatePlan in project ignite by apache.
the class DmlStatementsProcessor method rowToKeyValue.
/**
* Convert row presented as an array of Objects into key-value pair to be inserted to cache.
* @param cctx Cache context.
* @param row Row to process.
* @param plan Update plan.
* @throws IgniteCheckedException if failed.
*/
@SuppressWarnings({ "unchecked", "ConstantConditions", "ResultOfMethodCallIgnored" })
private IgniteBiTuple<?, ?> rowToKeyValue(GridCacheContext cctx, List<?> row, UpdatePlan plan) throws IgniteCheckedException {
GridH2RowDescriptor rowDesc = plan.tbl.rowDescriptor();
GridQueryTypeDescriptor desc = rowDesc.type();
Object key = plan.keySupplier.apply(row);
if (QueryUtils.isSqlType(desc.keyClass())) {
assert plan.keyColIdx != -1;
key = convert(key, rowDesc, desc.keyClass(), plan.colTypes[plan.keyColIdx]);
}
Object val = plan.valSupplier.apply(row);
if (QueryUtils.isSqlType(desc.valueClass())) {
assert plan.valColIdx != -1;
val = convert(val, rowDesc, desc.valueClass(), plan.colTypes[plan.valColIdx]);
}
if (key == null)
throw new IgniteSQLException("Key for INSERT or MERGE must not be null", IgniteQueryErrorCode.NULL_KEY);
if (val == null)
throw new IgniteSQLException("Value for INSERT or MERGE must not be null", IgniteQueryErrorCode.NULL_VALUE);
Map<String, Object> newColVals = new HashMap<>();
for (int i = 0; i < plan.colNames.length; i++) {
if (i == plan.keyColIdx || i == plan.valColIdx)
continue;
String colName = plan.colNames[i];
GridQueryProperty prop = desc.property(colName);
assert prop != null;
Class<?> expCls = prop.type();
newColVals.put(colName, convert(row.get(i), rowDesc, expCls, plan.colTypes[i]));
}
// We update columns in the order specified by the table for a reason - table's
// column order preserves their precedence for correct update of nested properties.
Column[] cols = plan.tbl.getColumns();
// First 3 columns are _key, _val and _ver. Skip 'em.
for (int i = DEFAULT_COLUMNS_COUNT; i < cols.length; i++) {
if (plan.tbl.rowDescriptor().isKeyValueOrVersionColumn(i))
continue;
String colName = cols[i].getName();
if (!newColVals.containsKey(colName))
continue;
Object colVal = newColVals.get(colName);
desc.setValue(colName, key, val, colVal);
}
if (cctx.binaryMarshaller()) {
if (key instanceof BinaryObjectBuilder)
key = ((BinaryObjectBuilder) key).build();
if (val instanceof BinaryObjectBuilder)
val = ((BinaryObjectBuilder) val).build();
}
return new IgniteBiTuple<>(key, val);
}
use of org.apache.ignite.internal.processors.query.h2.dml.UpdatePlan in project ignite by apache.
the class DmlStatementsProcessor method doFastUpdate.
/**
* Perform single cache operation based on given args.
* @param args Query parameters.
* @return 1 if an item was affected, 0 otherwise.
* @throws IgniteCheckedException if failed.
*/
@SuppressWarnings({ "unchecked", "ConstantConditions" })
private static UpdateResult doFastUpdate(UpdatePlan plan, Object[] args) throws IgniteCheckedException {
GridCacheContext cctx = plan.tbl.rowDescriptor().context();
FastUpdateArguments singleUpdate = plan.fastUpdateArgs;
assert singleUpdate != null;
boolean valBounded = (singleUpdate.val != FastUpdateArguments.NULL_ARGUMENT);
if (singleUpdate.newVal != FastUpdateArguments.NULL_ARGUMENT) {
// Single item UPDATE
Object key = singleUpdate.key.apply(args);
Object newVal = singleUpdate.newVal.apply(args);
if (valBounded) {
Object val = singleUpdate.val.apply(args);
return (cctx.cache().replace(key, val, newVal) ? UpdateResult.ONE : UpdateResult.ZERO);
} else
return (cctx.cache().replace(key, newVal) ? UpdateResult.ONE : UpdateResult.ZERO);
} else {
// Single item DELETE
Object key = singleUpdate.key.apply(args);
Object val = singleUpdate.val.apply(args);
if (// No _val bound in source query
singleUpdate.val == FastUpdateArguments.NULL_ARGUMENT)
return cctx.cache().remove(key) ? UpdateResult.ONE : UpdateResult.ZERO;
else
return cctx.cache().remove(key, val) ? UpdateResult.ONE : UpdateResult.ZERO;
}
}
Aggregations