use of org.apache.ignite.internal.processors.query.h2.dml.DmlBatchSender in project ignite by apache.
the class DmlStatementsProcessor method doDelete.
/**
* Perform DELETE operation on top of results of SELECT.
* @param cctx Cache context.
* @param cursor SELECT results.
* @param pageSize Batch size for streaming, anything <= 0 for single page operations.
* @return Results of DELETE (number of items affected AND keys that failed to be updated).
*/
@SuppressWarnings({ "unchecked", "ConstantConditions", "ThrowableResultOfMethodCallIgnored" })
private UpdateResult doDelete(GridCacheContext cctx, Iterable<List<?>> cursor, int pageSize) throws IgniteCheckedException {
DmlBatchSender sender = new DmlBatchSender(cctx, pageSize, 1);
for (List<?> row : cursor) {
if (row.size() != 2) {
U.warn(log, "Invalid row size on DELETE - expected 2, got " + row.size());
continue;
}
sender.add(row.get(0), new ModifyingEntryProcessor(row.get(1), RMV), 0);
}
sender.flush();
SQLException resEx = sender.error();
if (resEx != null) {
if (!F.isEmpty(sender.failedKeys())) {
// Don't go for a re-run if processing of some keys yielded exceptions and report keys that
// had been modified concurrently right away.
String msg = "Failed to DELETE some keys because they had been modified concurrently " + "[keys=" + sender.failedKeys() + ']';
SQLException conEx = createJdbcSqlException(msg, IgniteQueryErrorCode.CONCURRENT_UPDATE);
conEx.setNextException(resEx);
resEx = conEx;
}
throw new IgniteSQLException(resEx);
}
return new UpdateResult(sender.updateCount(), sender.failedKeys().toArray());
}
use of org.apache.ignite.internal.processors.query.h2.dml.DmlBatchSender in project ignite by apache.
the class DmlStatementsProcessor method doUpdate.
/**
* Perform UPDATE operation on top of results of SELECT.
* @param cursor SELECT results.
* @param pageSize Batch size for streaming, anything <= 0 for single page operations.
* @return Pair [cursor corresponding to results of UPDATE (contains number of items affected); keys whose values
* had been modified concurrently (arguments for a re-run)].
*/
@SuppressWarnings({ "unchecked", "ThrowableResultOfMethodCallIgnored" })
private UpdateResult doUpdate(UpdatePlan plan, Iterable<List<?>> cursor, int pageSize) throws IgniteCheckedException {
GridCacheContext cctx = plan.cacheContext();
DmlBatchSender sender = new DmlBatchSender(cctx, pageSize, 1);
for (List<?> row : cursor) {
T3<Object, Object, Object> row0 = plan.processRowForUpdate(row);
Object key = row0.get1();
Object oldVal = row0.get2();
Object newVal = row0.get3();
sender.add(key, new ModifyingEntryProcessor(oldVal, new EntryValueUpdater(newVal)), 0);
}
sender.flush();
SQLException resEx = sender.error();
if (resEx != null) {
if (!F.isEmpty(sender.failedKeys())) {
// Don't go for a re-run if processing of some keys yielded exceptions and report keys that
// had been modified concurrently right away.
String msg = "Failed to UPDATE some keys because they had been modified concurrently " + "[keys=" + sender.failedKeys() + ']';
SQLException dupEx = createJdbcSqlException(msg, IgniteQueryErrorCode.CONCURRENT_UPDATE);
dupEx.setNextException(resEx);
resEx = dupEx;
}
throw new IgniteSQLException(resEx);
}
return new UpdateResult(sender.updateCount(), sender.failedKeys().toArray());
}
use of org.apache.ignite.internal.processors.query.h2.dml.DmlBatchSender in project ignite by apache.
the class DmlStatementsProcessor method doInsert.
/**
* Execute INSERT statement plan.
* @param cursor Cursor to take inserted data from.
* @param pageSize Batch size for streaming, anything <= 0 for single page operations.
* @return Number of items affected.
* @throws IgniteCheckedException if failed, particularly in case of duplicate keys.
*/
@SuppressWarnings({ "unchecked", "ConstantConditions" })
private long doInsert(UpdatePlan plan, Iterable<List<?>> cursor, int pageSize) throws IgniteCheckedException {
GridCacheContext cctx = plan.cacheContext();
// If we have just one item to put, just do so
if (plan.rowCount() == 1) {
IgniteBiTuple t = plan.processRow(cursor.iterator().next());
if (cctx.cache().putIfAbsent(t.getKey(), t.getValue()))
return 1;
else
throw new IgniteSQLException("Duplicate key during INSERT [key=" + t.getKey() + ']', DUPLICATE_KEY);
} else {
// Keys that failed to INSERT due to duplication.
DmlBatchSender sender = new DmlBatchSender(cctx, pageSize, 1);
for (List<?> row : cursor) {
final IgniteBiTuple keyValPair = plan.processRow(row);
sender.add(keyValPair.getKey(), new InsertEntryProcessor(keyValPair.getValue()), 0);
}
sender.flush();
SQLException resEx = sender.error();
if (!F.isEmpty(sender.failedKeys())) {
String msg = "Failed to INSERT some keys because they are already in cache " + "[keys=" + sender.failedKeys() + ']';
SQLException dupEx = new SQLException(msg, SqlStateCode.CONSTRAINT_VIOLATION);
if (resEx == null)
resEx = dupEx;
else
resEx.setNextException(dupEx);
}
if (resEx != null)
throw new IgniteSQLException(resEx);
return sender.updateCount();
}
}
use of org.apache.ignite.internal.processors.query.h2.dml.DmlBatchSender in project ignite by apache.
the class DmlUtils method dmlDoInsert.
/**
* Execute INSERT statement plan.
* @param cursor Cursor to take inserted data from.
* @param pageSize Batch size for streaming, anything <= 0 for single page operations.
* @return Number of items affected.
* @throws IgniteCheckedException if failed, particularly in case of duplicate keys.
*/
@SuppressWarnings({ "unchecked" })
private static long dmlDoInsert(UpdatePlan plan, Iterable<List<?>> cursor, int pageSize) throws IgniteCheckedException {
GridCacheContext cctx = plan.cacheContext();
// If we have just one item to put, just do so
if (plan.rowCount() == 1) {
IgniteBiTuple t = plan.processRow(cursor.iterator().next());
try (MTC.TraceSurroundings ignored = MTC.support(cctx.kernalContext().tracing().create(SQL_CACHE_UPDATE, MTC.span()).addTag(SQL_CACHE_UPDATES, () -> "1"))) {
if (cctx.cache().putIfAbsent(t.getKey(), t.getValue()))
return 1;
else
throw new TransactionDuplicateKeyException("Duplicate key during INSERT [key=" + t.getKey() + ']');
}
} else {
// Keys that failed to INSERT due to duplication.
DmlBatchSender sender = new DmlBatchSender(cctx, pageSize, 1);
for (List<?> row : cursor) {
final IgniteBiTuple keyValPair = plan.processRow(row);
sender.add(keyValPair.getKey(), new DmlStatementsProcessor.InsertEntryProcessor(keyValPair.getValue()), 0);
}
sender.flush();
SQLException resEx = sender.error();
if (!F.isEmpty(sender.failedKeys())) {
String msg = "Failed to INSERT some keys because they are already in cache " + "[keys=" + sender.failedKeys() + ']';
SQLException dupEx = new SQLException(msg, SqlStateCode.CONSTRAINT_VIOLATION);
if (resEx == null)
resEx = dupEx;
else
resEx.setNextException(dupEx);
}
if (resEx != null)
throw new IgniteSQLException(resEx);
return sender.updateCount();
}
}
use of org.apache.ignite.internal.processors.query.h2.dml.DmlBatchSender in project ignite by apache.
the class DmlUtils method doUpdate.
/**
* Perform UPDATE operation on top of results of SELECT.
* @param cursor SELECT results.
* @param pageSize Batch size for streaming, anything <= 0 for single page operations.
* @return Pair [cursor corresponding to results of UPDATE (contains number of items affected); keys whose values
* had been modified concurrently (arguments for a re-run)].
*/
private static UpdateResult doUpdate(UpdatePlan plan, Iterable<List<?>> cursor, int pageSize) throws IgniteCheckedException {
GridCacheContext cctx = plan.cacheContext();
DmlBatchSender sender = new DmlBatchSender(cctx, pageSize, 1);
for (List<?> row : cursor) {
T3<Object, Object, Object> row0 = plan.processRowForUpdate(row);
Object key = row0.get1();
Object oldVal = row0.get2();
Object newVal = row0.get3();
sender.add(key, new DmlStatementsProcessor.ModifyingEntryProcessor(oldVal, new DmlStatementsProcessor.EntryValueUpdater(newVal)), 0);
}
sender.flush();
SQLException resEx = sender.error();
if (resEx != null) {
if (!F.isEmpty(sender.failedKeys())) {
// Don't go for a re-run if processing of some keys yielded exceptions and report keys that
// had been modified concurrently right away.
String msg = "Failed to UPDATE some keys because they had been modified concurrently " + "[keys=" + sender.failedKeys() + ']';
SQLException dupEx = createJdbcSqlException(msg, IgniteQueryErrorCode.CONCURRENT_UPDATE);
dupEx.setNextException(resEx);
resEx = dupEx;
}
throw new IgniteSQLException(resEx);
}
return new UpdateResult(sender.updateCount(), sender.failedKeys().toArray(), cursor instanceof QueryCursorImpl ? ((QueryCursorImpl) cursor).partitionResult() : null);
}
Aggregations