use of org.apache.ignite.internal.processors.query.h2.dml.UpdatePlan in project ignite by apache.
the class DmlStatementsProcessor method doUpdate.
/**
* Perform UPDATE operation on top of results of SELECT.
* @param cursor SELECT results.
* @param pageSize Batch size for streaming, anything <= 0 for single page operations.
* @return Pair [cursor corresponding to results of UPDATE (contains number of items affected); keys whose values
* had been modified concurrently (arguments for a re-run)].
*/
@SuppressWarnings({ "unchecked", "ThrowableResultOfMethodCallIgnored" })
private UpdateResult doUpdate(UpdatePlan plan, Iterable<List<?>> cursor, int pageSize) throws IgniteCheckedException {
GridCacheContext cctx = plan.cacheContext();
DmlBatchSender sender = new DmlBatchSender(cctx, pageSize, 1);
for (List<?> row : cursor) {
T3<Object, Object, Object> row0 = plan.processRowForUpdate(row);
Object key = row0.get1();
Object oldVal = row0.get2();
Object newVal = row0.get3();
sender.add(key, new ModifyingEntryProcessor(oldVal, new EntryValueUpdater(newVal)), 0);
}
sender.flush();
SQLException resEx = sender.error();
if (resEx != null) {
if (!F.isEmpty(sender.failedKeys())) {
// Don't go for a re-run if processing of some keys yielded exceptions and report keys that
// had been modified concurrently right away.
String msg = "Failed to UPDATE some keys because they had been modified concurrently " + "[keys=" + sender.failedKeys() + ']';
SQLException dupEx = createJdbcSqlException(msg, IgniteQueryErrorCode.CONCURRENT_UPDATE);
dupEx.setNextException(resEx);
resEx = dupEx;
}
throw new IgniteSQLException(resEx);
}
return new UpdateResult(sender.updateCount(), sender.failedKeys().toArray());
}
use of org.apache.ignite.internal.processors.query.h2.dml.UpdatePlan in project ignite by apache.
the class DmlStatementsProcessor method executeUpdateStatement.
/**
* Actually perform SQL DML operation locally.
*
* @param schemaName Schema name.
* @param cctx Cache context.
* @param c Connection.
* @param prepared Prepared statement for DML query.
* @param fieldsQry Fields query.
* @param loc Local query flag.
* @param filters Cache name and key filter.
* @param cancel Query cancel state holder.
* @return Pair [number of successfully processed items; keys that have failed to be processed]
* @throws IgniteCheckedException if failed.
*/
@SuppressWarnings({ "ConstantConditions", "unchecked" })
private UpdateResult executeUpdateStatement(String schemaName, final GridCacheContext cctx, Connection c, Prepared prepared, SqlFieldsQuery fieldsQry, boolean loc, IndexingQueryFilter filters, GridQueryCancel cancel) throws IgniteCheckedException {
Integer errKeysPos = null;
UpdatePlan plan = getPlanForStatement(schemaName, c, prepared, fieldsQry, loc, errKeysPos);
UpdateResult fastUpdateRes = plan.processFast(fieldsQry.getArgs());
if (fastUpdateRes != null)
return fastUpdateRes;
if (plan.distributedPlan() != null) {
UpdateResult result = doDistributedUpdate(schemaName, fieldsQry, plan, cancel);
// null is returned in case not all nodes support distributed DML.
if (result != null)
return result;
}
Iterable<List<?>> cur;
// sub-query and not some dummy stuff like "select 1, 2, 3;"
if (!loc && !plan.isLocalSubquery()) {
assert !F.isEmpty(plan.selectQuery());
SqlFieldsQuery newFieldsQry = new SqlFieldsQuery(plan.selectQuery(), fieldsQry.isCollocated()).setArgs(fieldsQry.getArgs()).setDistributedJoins(fieldsQry.isDistributedJoins()).setEnforceJoinOrder(fieldsQry.isEnforceJoinOrder()).setLocal(fieldsQry.isLocal()).setPageSize(fieldsQry.getPageSize()).setTimeout(fieldsQry.getTimeout(), TimeUnit.MILLISECONDS);
cur = (QueryCursorImpl<List<?>>) idx.querySqlFields(schemaName, newFieldsQry, null, true, true, cancel).get(0);
} else if (plan.hasRows())
cur = plan.createRows(fieldsQry.getArgs());
else {
final GridQueryFieldsResult res = idx.queryLocalSqlFields(schemaName, plan.selectQuery(), F.asList(fieldsQry.getArgs()), filters, fieldsQry.isEnforceJoinOrder(), fieldsQry.getTimeout(), cancel);
cur = new QueryCursorImpl<>(new Iterable<List<?>>() {
@Override
public Iterator<List<?>> iterator() {
try {
return new GridQueryCacheObjectsIterator(res.iterator(), idx.objectContext(), true);
} catch (IgniteCheckedException e) {
throw new IgniteException(e);
}
}
}, cancel);
}
int pageSize = loc ? 0 : fieldsQry.getPageSize();
return processDmlSelectResult(cctx, plan, cur, pageSize);
}
use of org.apache.ignite.internal.processors.query.h2.dml.UpdatePlan in project ignite by apache.
the class DmlStatementsProcessor method updateSqlFields.
/**
* Execute DML statement, possibly with few re-attempts in case of concurrent data modifications.
*
* @param schemaName Schema.
* @param conn Connection.
* @param prepared Prepared statement.
* @param fieldsQry Original query.
* @param loc Query locality flag.
* @param filters Cache name and key filter.
* @param cancel Cancel.
* @return Update result (modified items count and failed keys).
* @throws IgniteCheckedException if failed.
*/
private UpdateResult updateSqlFields(String schemaName, Connection conn, Prepared prepared, SqlFieldsQuery fieldsQry, boolean loc, IndexingQueryFilter filters, GridQueryCancel cancel) throws IgniteCheckedException {
Object[] errKeys = null;
long items = 0;
UpdatePlan plan = getPlanForStatement(schemaName, conn, prepared, fieldsQry, loc, null);
GridCacheContext<?, ?> cctx = plan.cacheContext();
for (int i = 0; i < DFLT_DML_RERUN_ATTEMPTS; i++) {
CacheOperationContext opCtx = setKeepBinaryContext(cctx);
UpdateResult r;
try {
r = executeUpdateStatement(schemaName, cctx, conn, prepared, fieldsQry, loc, filters, cancel);
} finally {
cctx.operationContextPerCall(opCtx);
}
items += r.counter();
errKeys = r.errorKeys();
if (F.isEmpty(errKeys))
break;
}
if (F.isEmpty(errKeys)) {
if (items == 1L)
return UpdateResult.ONE;
else if (items == 0L)
return UpdateResult.ZERO;
}
return new UpdateResult(items, errKeys);
}
use of org.apache.ignite.internal.processors.query.h2.dml.UpdatePlan in project ignite by apache.
the class DmlStatementsProcessor method updateSqlFieldsBatched.
/**
* Execute DML statement, possibly with few re-attempts in case of concurrent data modifications.
*
* @param schemaName Schema.
* @param conn Connection.
* @param prepared Prepared statement.
* @param fieldsQry Original query.
* @param loc Query locality flag.
* @param filters Cache name and key filter.
* @param cancel Cancel.
* @return Update result (modified items count and failed keys).
* @throws IgniteCheckedException if failed.
*/
private Collection<UpdateResult> updateSqlFieldsBatched(String schemaName, Connection conn, Prepared prepared, SqlFieldsQueryEx fieldsQry, boolean loc, IndexingQueryFilter filters, GridQueryCancel cancel) throws IgniteCheckedException {
List<Object[]> argss = fieldsQry.batchedArguments();
UpdatePlan plan = getPlanForStatement(schemaName, conn, prepared, fieldsQry, loc, null);
if (plan.hasRows() && plan.mode() == UpdateMode.INSERT) {
GridCacheContext<?, ?> cctx = plan.cacheContext();
CacheOperationContext opCtx = setKeepBinaryContext(cctx);
try {
List<List<List<?>>> cur = plan.createRows(argss);
List<UpdateResult> res = processDmlSelectResultBatched(plan, cur, fieldsQry.getPageSize());
return res;
} finally {
cctx.operationContextPerCall(opCtx);
}
} else {
// Fallback to previous mode.
Collection<UpdateResult> ress = new ArrayList<>(argss.size());
SQLException batchException = null;
int[] cntPerRow = new int[argss.size()];
int cntr = 0;
for (Object[] args : argss) {
SqlFieldsQueryEx qry0 = (SqlFieldsQueryEx) fieldsQry.copy();
qry0.clearBatchedArgs();
qry0.setArgs(args);
UpdateResult res;
try {
res = updateSqlFields(schemaName, conn, prepared, qry0, loc, filters, cancel);
cntPerRow[cntr++] = (int) res.counter();
ress.add(res);
} catch (Exception e) {
String sqlState;
int code;
if (e instanceof IgniteSQLException) {
sqlState = ((IgniteSQLException) e).sqlState();
code = ((IgniteSQLException) e).statusCode();
} else {
sqlState = SqlStateCode.INTERNAL_ERROR;
code = IgniteQueryErrorCode.UNKNOWN;
}
batchException = chainException(batchException, new SQLException(e.getMessage(), sqlState, code, e));
cntPerRow[cntr++] = Statement.EXECUTE_FAILED;
}
}
if (batchException != null) {
BatchUpdateException e = new BatchUpdateException(batchException.getMessage(), batchException.getSQLState(), batchException.getErrorCode(), cntPerRow, batchException);
throw new IgniteCheckedException(e);
}
return ress;
}
}
use of org.apache.ignite.internal.processors.query.h2.dml.UpdatePlan in project ignite by apache.
the class DmlStatementsProcessor method processBulkLoadCommand.
/**
* Process bulk load COPY command.
*
* @param cmd The command.
* @return The context (which is the result of the first request/response).
* @throws IgniteCheckedException If something failed.
*/
public FieldsQueryCursor<List<?>> processBulkLoadCommand(SqlBulkLoadCommand cmd) throws IgniteCheckedException {
if (cmd.packetSize() == null)
cmd.packetSize(BulkLoadAckClientParameters.DFLT_PACKET_SIZE);
GridH2Table tbl = idx.dataTable(cmd.schemaName(), cmd.tableName());
if (tbl == null) {
idx.kernalContext().cache().createMissingQueryCaches();
tbl = idx.dataTable(cmd.schemaName(), cmd.tableName());
}
if (tbl == null) {
throw new IgniteSQLException("Table does not exist: " + cmd.tableName(), IgniteQueryErrorCode.TABLE_NOT_FOUND);
}
UpdatePlan plan = UpdatePlanBuilder.planForBulkLoad(cmd, tbl);
IgniteClosureX<List<?>, IgniteBiTuple<?, ?>> dataConverter = new BulkLoadDataConverter(plan);
GridCacheContext cache = tbl.cache();
IgniteDataStreamer<Object, Object> streamer = cache.grid().dataStreamer(cache.name());
BulkLoadCacheWriter outputWriter = new BulkLoadStreamerWriter(streamer);
BulkLoadParser inputParser = BulkLoadParser.createParser(cmd.inputFormat());
BulkLoadProcessor processor = new BulkLoadProcessor(inputParser, dataConverter, outputWriter);
BulkLoadAckClientParameters params = new BulkLoadAckClientParameters(cmd.localFileName(), cmd.packetSize());
return new BulkLoadContextCursor(processor, params);
}
Aggregations