use of org.apache.ignite.internal.processors.query.h2.dml.UpdatePlan in project ignite by apache.
the class IgniteH2Indexing method executeUpdateTransactional.
/**
* Execute update in transactional mode.
*
* @param qryId Query id.
* @param qryDesc Query descriptor.
* @param qryParams Query parameters.
* @param dml Plan.
* @param loc Local flag.
* @param cancel Cancel hook.
* @return Update result.
* @throws IgniteCheckedException If failed.
*/
private UpdateResult executeUpdateTransactional(long qryId, QueryDescriptor qryDesc, QueryParameters qryParams, QueryParserResultDml dml, boolean loc, GridQueryCancel cancel) throws IgniteCheckedException {
UpdatePlan plan = dml.plan();
GridCacheContext cctx = plan.cacheContext();
assert cctx != null;
assert cctx.transactional();
GridNearTxLocal tx = tx(ctx);
boolean implicit = (tx == null);
boolean commit = implicit && qryParams.autoCommit();
if (implicit)
tx = txStart(cctx, qryParams.timeout());
requestSnapshot(tx);
try (GridNearTxLocal toCommit = commit ? tx : null) {
DmlDistributedPlanInfo distributedPlan = loc ? null : plan.distributedPlan();
long timeout = implicit ? tx.remainingTime() : operationTimeout(qryParams.timeout(), tx);
if (cctx.isReplicated() || distributedPlan == null || ((plan.mode() == UpdateMode.INSERT || plan.mode() == UpdateMode.MERGE) && !plan.isLocalSubquery())) {
boolean sequential = true;
UpdateSourceIterator<?> it;
if (plan.fastResult()) {
IgniteBiTuple row = plan.getFastRow(qryParams.arguments());
assert row != null;
EnlistOperation op = UpdatePlan.enlistOperation(plan.mode());
it = new DmlUpdateSingleEntryIterator<>(op, op.isDeleteOrLock() ? row.getKey() : row);
} else if (plan.hasRows()) {
it = new DmlUpdateResultsIterator(UpdatePlan.enlistOperation(plan.mode()), plan, plan.createRows(qryParams.arguments()));
} else {
SqlFieldsQuery selectFieldsQry = new SqlFieldsQuery(plan.selectQuery(), qryDesc.collocated()).setArgs(qryParams.arguments()).setDistributedJoins(qryDesc.distributedJoins()).setEnforceJoinOrder(qryDesc.enforceJoinOrder()).setLocal(qryDesc.local()).setPageSize(qryParams.pageSize()).setTimeout((int) timeout, TimeUnit.MILLISECONDS).setLazy(qryParams.lazy());
FieldsQueryCursor<List<?>> cur = executeSelectForDml(qryId, qryDesc.schemaName(), selectFieldsQry, MvccUtils.mvccTracker(cctx, tx), cancel, (int) timeout);
it = plan.iteratorForTransaction(connMgr, cur);
}
// TODO: IGNITE-11176 - Need to support cancellation
IgniteInternalFuture<Long> fut = tx.updateAsync(cctx, it, qryParams.pageSize(), timeout, sequential);
UpdateResult res = new UpdateResult(fut.get(), X.EMPTY_OBJECT_ARRAY, plan.distributedPlan() != null ? plan.distributedPlan().derivedPartitions() : null);
if (commit)
toCommit.commit();
return res;
}
int[] ids = U.toIntArray(distributedPlan.getCacheIds());
int flags = 0;
if (qryDesc.enforceJoinOrder())
flags |= GridH2QueryRequest.FLAG_ENFORCE_JOIN_ORDER;
if (distributedPlan.isReplicatedOnly())
flags |= GridH2QueryRequest.FLAG_REPLICATED;
if (qryParams.lazy())
flags |= GridH2QueryRequest.FLAG_LAZY;
flags = GridH2QueryRequest.setDataPageScanEnabled(flags, qryParams.dataPageScanEnabled());
int[] parts = PartitionResult.calculatePartitions(qryParams.partitions(), distributedPlan.derivedPartitions(), qryParams.arguments());
if (parts != null && parts.length == 0)
return new UpdateResult(0, X.EMPTY_OBJECT_ARRAY, distributedPlan.derivedPartitions());
else {
// TODO: IGNITE-11176 - Need to support cancellation
IgniteInternalFuture<Long> fut = tx.updateAsync(cctx, ids, parts, qryDesc.schemaName(), qryDesc.sql(), qryParams.arguments(), flags, qryParams.pageSize(), timeout);
UpdateResult res = new UpdateResult(fut.get(), X.EMPTY_OBJECT_ARRAY, distributedPlan.derivedPartitions());
if (commit)
toCommit.commit();
return res;
}
} catch (ClusterTopologyServerNotFoundException e) {
throw new CacheServerNotFoundException(e.getMessage(), e);
} catch (IgniteCheckedException e) {
IgniteSQLException sqlEx = X.cause(e, IgniteSQLException.class);
if (sqlEx != null)
throw sqlEx;
Exception ex = IgniteUtils.convertExceptionNoWrap(e);
if (ex instanceof IgniteException)
throw (IgniteException) ex;
U.error(log, "Error during update [localNodeId=" + ctx.localNodeId() + "]", ex);
throw new IgniteSQLException("Failed to run update. " + ex.getMessage(), ex);
} finally {
if (commit)
cctx.tm().resetContext();
}
}
use of org.apache.ignite.internal.processors.query.h2.dml.UpdatePlan in project ignite by apache.
the class IgniteH2Indexing method streamQuery0.
/**
* Perform given statement against given data streamer. Only rows based INSERT is supported.
*
* @param qry Query.
* @param schemaName Schema name.
* @param streamer Streamer to feed data to.
* @param dml DML statement.
* @param args Statement arguments.
* @return Number of rows in given INSERT statement.
* @throws IgniteCheckedException if failed.
*/
@SuppressWarnings({ "unchecked" })
private long streamQuery0(String qry, String schemaName, IgniteDataStreamer streamer, QueryParserResultDml dml, final Object[] args, String qryInitiatorId) throws IgniteCheckedException {
long qryId = runningQryMgr.register(QueryUtils.INCLUDE_SENSITIVE ? qry : sqlWithoutConst(dml.statement()), GridCacheQueryType.SQL_FIELDS, schemaName, true, null, qryInitiatorId);
Exception failReason = null;
try {
UpdatePlan plan = dml.plan();
Iterator<List<?>> iter = new GridQueryCacheObjectsIterator(updateQueryRows(qryId, schemaName, plan, args), objectContext(), true);
if (!iter.hasNext())
return 0;
IgniteBiTuple<?, ?> t = plan.processRow(iter.next());
if (!iter.hasNext()) {
streamer.addData(t.getKey(), t.getValue());
return 1;
} else {
Map<Object, Object> rows = new LinkedHashMap<>(plan.rowCount());
rows.put(t.getKey(), t.getValue());
while (iter.hasNext()) {
List<?> row = iter.next();
t = plan.processRow(row);
rows.put(t.getKey(), t.getValue());
}
streamer.addData(rows);
return rows.size();
}
} catch (IgniteException | IgniteCheckedException e) {
failReason = e;
throw e;
} finally {
runningQryMgr.unregister(qryId, failReason);
}
}
use of org.apache.ignite.internal.processors.query.h2.dml.UpdatePlan in project ignite by apache.
the class CommandProcessor method processBulkLoadCommand.
/**
* Process bulk load COPY command.
*
* @param cmd The command.
* @param qryId Query id.
* @return The context (which is the result of the first request/response).
* @throws IgniteCheckedException If something failed.
*/
private FieldsQueryCursor<List<?>> processBulkLoadCommand(SqlBulkLoadCommand cmd, long qryId) throws IgniteCheckedException {
if (cmd.packetSize() == null)
cmd.packetSize(BulkLoadAckClientParameters.DFLT_PACKET_SIZE);
GridH2Table tbl = schemaMgr.dataTable(cmd.schemaName(), cmd.tableName());
if (tbl == null) {
throw new IgniteSQLException("Table does not exist: " + cmd.tableName(), IgniteQueryErrorCode.TABLE_NOT_FOUND);
}
H2Utils.checkAndStartNotStartedCache(ctx, tbl);
UpdatePlan plan = UpdatePlanBuilder.planForBulkLoad(cmd, tbl);
IgniteClosureX<List<?>, IgniteBiTuple<?, ?>> dataConverter = new DmlBulkLoadDataConverter(plan);
IgniteDataStreamer<Object, Object> streamer = ctx.grid().dataStreamer(tbl.cacheName());
BulkLoadCacheWriter outputWriter = new BulkLoadStreamerWriter(streamer);
BulkLoadParser inputParser = BulkLoadParser.createParser(cmd.inputFormat());
BulkLoadProcessor processor = new BulkLoadProcessor(inputParser, dataConverter, outputWriter, idx.runningQueryManager(), qryId, ctx.tracing());
BulkLoadAckClientParameters params = new BulkLoadAckClientParameters(cmd.localFileName(), cmd.packetSize());
return new BulkLoadContextCursor(processor, params);
}
Aggregations