use of org.apache.ignite.internal.processors.query.h2.dml.UpdatePlan in project ignite by apache.
the class DmlStatementsProcessor method doInsert.
/**
* Execute INSERT statement plan.
* @param cursor Cursor to take inserted data from.
* @param pageSize Batch size for streaming, anything <= 0 for single page operations.
* @return Number of items affected.
* @throws IgniteCheckedException if failed, particularly in case of duplicate keys.
*/
@SuppressWarnings({ "unchecked", "ConstantConditions" })
private long doInsert(UpdatePlan plan, Iterable<List<?>> cursor, int pageSize) throws IgniteCheckedException {
GridCacheContext cctx = plan.cacheContext();
// If we have just one item to put, just do so
if (plan.rowCount() == 1) {
IgniteBiTuple t = plan.processRow(cursor.iterator().next());
if (cctx.cache().putIfAbsent(t.getKey(), t.getValue()))
return 1;
else
throw new IgniteSQLException("Duplicate key during INSERT [key=" + t.getKey() + ']', DUPLICATE_KEY);
} else {
// Keys that failed to INSERT due to duplication.
DmlBatchSender sender = new DmlBatchSender(cctx, pageSize, 1);
for (List<?> row : cursor) {
final IgniteBiTuple keyValPair = plan.processRow(row);
sender.add(keyValPair.getKey(), new InsertEntryProcessor(keyValPair.getValue()), 0);
}
sender.flush();
SQLException resEx = sender.error();
if (!F.isEmpty(sender.failedKeys())) {
String msg = "Failed to INSERT some keys because they are already in cache " + "[keys=" + sender.failedKeys() + ']';
SQLException dupEx = new SQLException(msg, SqlStateCode.CONSTRAINT_VIOLATION);
if (resEx == null)
resEx = dupEx;
else
resEx.setNextException(dupEx);
}
if (resEx != null)
throw new IgniteSQLException(resEx);
return sender.updateCount();
}
}
use of org.apache.ignite.internal.processors.query.h2.dml.UpdatePlan in project ignite by apache.
the class QueryParser method prepareDmlStatement.
/**
* Prepare DML statement.
*
* @param planKey Plan key.
* @param prepared Prepared.
* @return Statement.
*/
private QueryParserResultDml prepareDmlStatement(QueryDescriptor planKey, Prepared prepared) {
if (F.eq(QueryUtils.SCHEMA_SYS, planKey.schemaName()))
throw new IgniteSQLException("DML statements are not supported on " + planKey.schemaName() + " schema", IgniteQueryErrorCode.UNSUPPORTED_OPERATION);
// Prepare AST.
GridSqlQueryParser parser = new GridSqlQueryParser(false, log);
GridSqlStatement stmt = parser.parse(prepared);
List<GridH2Table> tbls = parser.tablesForDml();
// available on local node.
for (GridH2Table h2tbl : tbls) H2Utils.checkAndStartNotStartedCache(idx.kernalContext(), h2tbl);
// Check MVCC mode.
GridCacheContextInfo ctx = null;
boolean mvccEnabled = false;
for (GridH2Table h2tbl : tbls) {
GridCacheContextInfo curCtx = h2tbl.cacheInfo();
boolean curMvccEnabled = curCtx.config().getAtomicityMode() == CacheAtomicityMode.TRANSACTIONAL_SNAPSHOT;
if (ctx == null) {
ctx = curCtx;
mvccEnabled = curMvccEnabled;
} else if (curMvccEnabled != mvccEnabled)
MvccUtils.throwAtomicityModesMismatchException(ctx.config(), curCtx.config());
}
// Get streamer info.
GridH2Table streamTbl = null;
if (GridSqlQueryParser.isStreamableInsertStatement(prepared)) {
GridSqlInsert insert = (GridSqlInsert) stmt;
streamTbl = DmlAstUtils.gridTableForElement(insert.into()).dataTable();
}
// Create update plan.
UpdatePlan plan;
try {
plan = UpdatePlanBuilder.planForStatement(planKey, stmt, mvccEnabled, idx, log, forceFillAbsentPKsWithDefaults);
} catch (Exception e) {
if (e instanceof IgniteSQLException)
throw (IgniteSQLException) e;
else
throw new IgniteSQLException("Failed to prepare update plan.", e);
}
return new QueryParserResultDml(stmt, mvccEnabled, streamTbl, plan);
}
use of org.apache.ignite.internal.processors.query.h2.dml.UpdatePlan in project ignite by apache.
the class DmlUtils method dmlDoInsert.
/**
* Execute INSERT statement plan.
* @param cursor Cursor to take inserted data from.
* @param pageSize Batch size for streaming, anything <= 0 for single page operations.
* @return Number of items affected.
* @throws IgniteCheckedException if failed, particularly in case of duplicate keys.
*/
@SuppressWarnings({ "unchecked" })
private static long dmlDoInsert(UpdatePlan plan, Iterable<List<?>> cursor, int pageSize) throws IgniteCheckedException {
GridCacheContext cctx = plan.cacheContext();
// If we have just one item to put, just do so
if (plan.rowCount() == 1) {
IgniteBiTuple t = plan.processRow(cursor.iterator().next());
try (MTC.TraceSurroundings ignored = MTC.support(cctx.kernalContext().tracing().create(SQL_CACHE_UPDATE, MTC.span()).addTag(SQL_CACHE_UPDATES, () -> "1"))) {
if (cctx.cache().putIfAbsent(t.getKey(), t.getValue()))
return 1;
else
throw new TransactionDuplicateKeyException("Duplicate key during INSERT [key=" + t.getKey() + ']');
}
} else {
// Keys that failed to INSERT due to duplication.
DmlBatchSender sender = new DmlBatchSender(cctx, pageSize, 1);
for (List<?> row : cursor) {
final IgniteBiTuple keyValPair = plan.processRow(row);
sender.add(keyValPair.getKey(), new DmlStatementsProcessor.InsertEntryProcessor(keyValPair.getValue()), 0);
}
sender.flush();
SQLException resEx = sender.error();
if (!F.isEmpty(sender.failedKeys())) {
String msg = "Failed to INSERT some keys because they are already in cache " + "[keys=" + sender.failedKeys() + ']';
SQLException dupEx = new SQLException(msg, SqlStateCode.CONSTRAINT_VIOLATION);
if (resEx == null)
resEx = dupEx;
else
resEx.setNextException(dupEx);
}
if (resEx != null)
throw new IgniteSQLException(resEx);
return sender.updateCount();
}
}
use of org.apache.ignite.internal.processors.query.h2.dml.UpdatePlan in project ignite by apache.
the class DmlUtils method doUpdate.
/**
* Perform UPDATE operation on top of results of SELECT.
* @param cursor SELECT results.
* @param pageSize Batch size for streaming, anything <= 0 for single page operations.
* @return Pair [cursor corresponding to results of UPDATE (contains number of items affected); keys whose values
* had been modified concurrently (arguments for a re-run)].
*/
private static UpdateResult doUpdate(UpdatePlan plan, Iterable<List<?>> cursor, int pageSize) throws IgniteCheckedException {
GridCacheContext cctx = plan.cacheContext();
DmlBatchSender sender = new DmlBatchSender(cctx, pageSize, 1);
for (List<?> row : cursor) {
T3<Object, Object, Object> row0 = plan.processRowForUpdate(row);
Object key = row0.get1();
Object oldVal = row0.get2();
Object newVal = row0.get3();
sender.add(key, new DmlStatementsProcessor.ModifyingEntryProcessor(oldVal, new DmlStatementsProcessor.EntryValueUpdater(newVal)), 0);
}
sender.flush();
SQLException resEx = sender.error();
if (resEx != null) {
if (!F.isEmpty(sender.failedKeys())) {
// Don't go for a re-run if processing of some keys yielded exceptions and report keys that
// had been modified concurrently right away.
String msg = "Failed to UPDATE some keys because they had been modified concurrently " + "[keys=" + sender.failedKeys() + ']';
SQLException dupEx = createJdbcSqlException(msg, IgniteQueryErrorCode.CONCURRENT_UPDATE);
dupEx.setNextException(resEx);
resEx = dupEx;
}
throw new IgniteSQLException(resEx);
}
return new UpdateResult(sender.updateCount(), sender.failedKeys().toArray(), cursor instanceof QueryCursorImpl ? ((QueryCursorImpl) cursor).partitionResult() : null);
}
use of org.apache.ignite.internal.processors.query.h2.dml.UpdatePlan in project ignite by apache.
the class DmlUtils method doInsertBatched.
/**
* Execute INSERT statement plan.
*
* @param plan Plan to execute.
* @param cursor Cursor to take inserted data from. I.e. list of batch arguments for each query.
* @param pageSize Batch size for streaming, anything <= 0 for single page operations.
* @return Number of items affected.
* @throws IgniteCheckedException if failed, particularly in case of duplicate keys.
*/
private static List<UpdateResult> doInsertBatched(UpdatePlan plan, List<List<List<?>>> cursor, int pageSize) throws IgniteCheckedException {
GridCacheContext cctx = plan.cacheContext();
DmlBatchSender snd = new DmlBatchSender(cctx, pageSize, cursor.size());
int rowNum = 0;
SQLException resEx = null;
for (List<List<?>> qryRow : cursor) {
for (List<?> row : qryRow) {
try {
final IgniteBiTuple keyValPair = plan.processRow(row);
snd.add(keyValPair.getKey(), new DmlStatementsProcessor.InsertEntryProcessor(keyValPair.getValue()), rowNum);
} catch (Exception e) {
String sqlState;
int code;
if (e instanceof IgniteSQLException) {
sqlState = ((IgniteSQLException) e).sqlState();
code = ((IgniteSQLException) e).statusCode();
} else {
sqlState = SqlStateCode.INTERNAL_ERROR;
code = IgniteQueryErrorCode.UNKNOWN;
}
resEx = chainException(resEx, new SQLException(e.getMessage(), sqlState, code, e));
snd.setFailed(rowNum);
}
}
rowNum++;
}
try {
snd.flush();
} catch (Exception e) {
resEx = chainException(resEx, new SQLException(e.getMessage(), SqlStateCode.INTERNAL_ERROR, IgniteQueryErrorCode.UNKNOWN, e));
}
resEx = chainException(resEx, snd.error());
if (!F.isEmpty(snd.failedKeys())) {
SQLException e = new SQLException("Failed to INSERT some keys because they are already in cache [keys=" + snd.failedKeys() + ']', SqlStateCode.CONSTRAINT_VIOLATION, DUPLICATE_KEY);
resEx = chainException(resEx, e);
}
if (resEx != null) {
BatchUpdateException e = new BatchUpdateException(resEx.getMessage(), resEx.getSQLState(), resEx.getErrorCode(), snd.perRowCounterAsArray(), resEx);
throw new IgniteCheckedException(e);
}
int[] cntPerRow = snd.perRowCounterAsArray();
List<UpdateResult> res = new ArrayList<>(cntPerRow.length);
for (int i = 0; i < cntPerRow.length; i++) {
int cnt = cntPerRow[i];
res.add(new UpdateResult(cnt, X.EMPTY_OBJECT_ARRAY));
}
return res;
}
Aggregations