use of com.google.api.ads.admanager.axis.v202205.UpdateResult in project gridgain by gridgain.
the class DmlUtils method doDelete.
/**
* Perform DELETE operation on top of results of SELECT.
*
* @param cctx Cache context.
* @param cursor SELECT results.
* @param pageSize Batch size for streaming, anything <= 0 for single page operations.
* @return Results of DELETE (number of items affected AND keys that failed to be updated).
*/
private static UpdateResult doDelete(GridCacheContext cctx, Iterable<List<?>> cursor, int pageSize) throws IgniteCheckedException {
DmlBatchSender sender = new DmlBatchSender(cctx, pageSize, 1);
for (List<?> row : cursor) {
if (row.size() != 2)
continue;
Object key = row.get(0);
ClusterNode node = sender.primaryNodeByKey(key);
IgniteInClosure<MutableEntry<Object, Object>> rmvC = DmlStatementsProcessor.getRemoveClosure(node, key);
sender.add(key, new DmlStatementsProcessor.ModifyingEntryProcessor(row.get(1), rmvC), 0);
}
sender.flush();
SQLException resEx = sender.error();
if (resEx != null) {
if (!F.isEmpty(sender.failedKeys())) {
// Don't go for a re-run if processing of some keys yielded exceptions and report keys that
// had been modified concurrently right away.
String msg = "Failed to DELETE some keys because they had been modified concurrently " + "[keys=" + sender.failedKeys() + ']';
SQLException conEx = createJdbcSqlException(msg, IgniteQueryErrorCode.CONCURRENT_UPDATE);
conEx.setNextException(resEx);
resEx = conEx;
}
throw new IgniteSQLException(resEx);
}
return new UpdateResult(sender.updateCount(), sender.failedKeys().toArray(), cursor instanceof QueryCursorImpl ? ((QueryCursorImpl) cursor).partitionResult() : null);
}
use of com.google.api.ads.admanager.axis.v202205.UpdateResult in project gridgain by gridgain.
the class DmlUtils method doUpdate.
/**
* Perform UPDATE operation on top of results of SELECT.
* @param cursor SELECT results.
* @param pageSize Batch size for streaming, anything <= 0 for single page operations.
* @return Pair [cursor corresponding to results of UPDATE (contains number of items affected); keys whose values
* had been modified concurrently (arguments for a re-run)].
*/
private static UpdateResult doUpdate(UpdatePlan plan, Iterable<List<?>> cursor, int pageSize) throws IgniteCheckedException {
GridCacheContext cctx = plan.cacheContext();
DmlBatchSender sender = new DmlBatchSender(cctx, pageSize, 1);
for (List<?> row : cursor) {
T3<Object, Object, Object> row0 = plan.processRowForUpdate(row);
Object key = row0.get1();
Object oldVal = row0.get2();
Object newVal = row0.get3();
sender.add(key, new DmlStatementsProcessor.ModifyingEntryProcessor(oldVal, new DmlStatementsProcessor.EntryValueUpdater(newVal)), 0);
}
sender.flush();
SQLException resEx = sender.error();
if (resEx != null) {
if (!F.isEmpty(sender.failedKeys())) {
// Don't go for a re-run if processing of some keys yielded exceptions and report keys that
// had been modified concurrently right away.
String msg = "Failed to UPDATE some keys because they had been modified concurrently " + "[keys=" + sender.failedKeys() + ']';
SQLException dupEx = createJdbcSqlException(msg, IgniteQueryErrorCode.CONCURRENT_UPDATE);
dupEx.setNextException(resEx);
resEx = dupEx;
}
throw new IgniteSQLException(resEx);
}
return new UpdateResult(sender.updateCount(), sender.failedKeys().toArray(), cursor instanceof QueryCursorImpl ? ((QueryCursorImpl) cursor).partitionResult() : null);
}
use of com.google.api.ads.admanager.axis.v202205.UpdateResult in project gridgain by gridgain.
the class DmlDistributedUpdateRun method handleResponse.
/**
* Handle response from remote node.
*
* @param id Node id.
* @param msg Response message.
*/
public void handleResponse(UUID id, GridH2DmlResponse msg) {
synchronized (this) {
if (!rspNodes.add(id))
// ignore duplicated messages
return;
String err = msg.error();
if (err != null) {
fut.onDone(new IgniteCheckedException("Update failed. " + (F.isEmpty(err) ? "" : err) + "[reqId=" + msg.requestId() + ", node=" + id + "]."));
return;
}
if (!F.isEmpty(msg.errorKeys())) {
List<Object> errList = Arrays.asList(msg.errorKeys());
if (errorKeys == null)
errorKeys = new HashSet<>(errList);
else
errorKeys.addAll(errList);
}
updCntr += msg.updateCounter();
if (rspNodes.size() == nodeCount)
fut.onDone(new UpdateResult(updCntr, errorKeys == null ? null : errorKeys.toArray()));
}
}
Aggregations