use of org.apache.ignite.internal.processors.cache.CacheOperationContext in project ignite by apache.
the class DmlStatementsProcessor method updateSqlFields.
/**
* Execute DML statement, possibly with few re-attempts in case of concurrent data modifications.
*
* @param schemaName Schema.
* @param stmt JDBC statement.
* @param fieldsQry Original query.
* @param loc Query locality flag.
* @param filters Cache name and key filter.
* @param cancel Cancel.
* @return Update result (modified items count and failed keys).
* @throws IgniteCheckedException if failed.
*/
private UpdateResult updateSqlFields(String schemaName, PreparedStatement stmt, SqlFieldsQuery fieldsQry, boolean loc, IndexingQueryFilter filters, GridQueryCancel cancel) throws IgniteCheckedException {
Object[] errKeys = null;
long items = 0;
UpdatePlan plan = getPlanForStatement(schemaName, stmt, null);
GridCacheContext<?, ?> cctx = plan.tbl.rowDescriptor().context();
for (int i = 0; i < DFLT_DML_RERUN_ATTEMPTS; i++) {
CacheOperationContext opCtx = cctx.operationContextPerCall();
// Force keepBinary for operation context to avoid binary deserialization inside entry processor
if (cctx.binaryMarshaller()) {
CacheOperationContext newOpCtx = null;
if (opCtx == null)
// Mimics behavior of GridCacheAdapter#keepBinary and GridCacheProxyImpl#keepBinary
newOpCtx = new CacheOperationContext(false, null, true, null, false, null, false);
else if (!opCtx.isKeepBinary())
newOpCtx = opCtx.keepBinary();
if (newOpCtx != null)
cctx.operationContextPerCall(newOpCtx);
}
UpdateResult r;
try {
r = executeUpdateStatement(schemaName, cctx, stmt, fieldsQry, loc, filters, cancel, errKeys);
} finally {
cctx.operationContextPerCall(opCtx);
}
items += r.cnt;
errKeys = r.errKeys;
if (F.isEmpty(errKeys))
break;
}
if (F.isEmpty(errKeys)) {
if (items == 1L)
return UpdateResult.ONE;
else if (items == 0L)
return UpdateResult.ZERO;
}
return new UpdateResult(items, errKeys);
}
use of org.apache.ignite.internal.processors.cache.CacheOperationContext in project ignite by apache.
the class GridDhtAtomicCache method get0.
/**
* @param key Key.
* @param deserializeBinary Deserialize binary.
* @param needVer Need version.
* @return Value.
* @throws IgniteCheckedException If failed.
*/
@Nullable
public V get0(K key, boolean deserializeBinary, boolean needVer) throws IgniteCheckedException {
ctx.checkSecurity(SecurityPermission.CACHE_READ);
if (keyCheck)
validateCacheKey(key);
String taskName = ctx.kernalContext().job().currentTaskName();
CacheOperationContext opCtx = ctx.operationContextPerCall();
UUID subjId = ctx.subjectIdPerCall(null, opCtx);
final ExpiryPolicy expiryPlc = opCtx != null ? opCtx.expiry() : null;
final boolean skipStore = opCtx != null && opCtx.skipStore();
try {
return getAsync0(ctx.toCacheKeyObject(key), !ctx.config().isReadFromBackup(), subjId, taskName, deserializeBinary, opCtx != null && opCtx.recovery(), expiryPlc, false, skipStore, true, needVer).get();
} catch (IgniteException e) {
if (e.getCause(IgniteCheckedException.class) != null)
throw e.getCause(IgniteCheckedException.class);
else
throw e;
}
}
use of org.apache.ignite.internal.processors.cache.CacheOperationContext in project ignite by apache.
the class GridNearTxLocal method getAllAsync.
/**
* @param cacheCtx Cache context.
* @param keys Keys to get.
* @param deserializeBinary Deserialize binary flag.
* @param skipVals Skip values flag.
* @param keepCacheObjects Keep cache objects
* @param skipStore Skip store flag.
* @return Future for this get.
*/
@SuppressWarnings("unchecked")
public <K, V> IgniteInternalFuture<Map<K, V>> getAllAsync(final GridCacheContext cacheCtx, @Nullable final AffinityTopologyVersion entryTopVer, Collection<KeyCacheObject> keys, final boolean deserializeBinary, final boolean skipVals, final boolean keepCacheObjects, final boolean skipStore, final boolean recovery, final boolean needVer) {
if (F.isEmpty(keys))
return new GridFinishedFuture<>(Collections.<K, V>emptyMap());
init();
int keysCnt = keys.size();
boolean single = keysCnt == 1;
try {
checkValid();
final Map<K, V> retMap = new GridLeanMap<>(keysCnt);
final Map<KeyCacheObject, GridCacheVersion> missed = new GridLeanMap<>(pessimistic() ? keysCnt : 0);
CacheOperationContext opCtx = cacheCtx.operationContextPerCall();
ExpiryPolicy expiryPlc = opCtx != null ? opCtx.expiry() : null;
final Collection<KeyCacheObject> lockKeys = enlistRead(cacheCtx, entryTopVer, keys, expiryPlc, retMap, missed, keysCnt, deserializeBinary, skipVals, keepCacheObjects, skipStore, recovery, needVer);
if (single && missed.isEmpty())
return new GridFinishedFuture<>(retMap);
// Handle locks.
if (pessimistic() && !readCommitted() && !skipVals) {
if (expiryPlc == null)
expiryPlc = cacheCtx.expiry();
long accessTtl = expiryPlc != null ? CU.toTtl(expiryPlc.getExpiryForAccess()) : CU.TTL_NOT_CHANGED;
long createTtl = expiryPlc != null ? CU.toTtl(expiryPlc.getExpiryForCreation()) : CU.TTL_NOT_CHANGED;
long timeout = remainingTime();
if (timeout == -1)
return new GridFinishedFuture<>(timeoutException());
IgniteInternalFuture<Boolean> fut = cacheCtx.cache().txLockAsync(lockKeys, timeout, this, true, true, isolation, isInvalidate(), createTtl, accessTtl);
final ExpiryPolicy expiryPlc0 = expiryPlc;
PLC2<Map<K, V>> plc2 = new PLC2<Map<K, V>>() {
@Override
public IgniteInternalFuture<Map<K, V>> postLock() throws IgniteCheckedException {
if (log.isDebugEnabled())
log.debug("Acquired transaction lock for read on keys: " + lockKeys);
// Load keys only after the locks have been acquired.
for (KeyCacheObject cacheKey : lockKeys) {
K keyVal = (K) (keepCacheObjects ? cacheKey : cacheCtx.cacheObjectContext().unwrapBinaryIfNeeded(cacheKey, !deserializeBinary, true));
if (retMap.containsKey(keyVal))
// We already have a return value.
continue;
IgniteTxKey txKey = cacheCtx.txKey(cacheKey);
IgniteTxEntry txEntry = entry(txKey);
assert txEntry != null;
// Check if there is cached value.
while (true) {
GridCacheEntryEx cached = txEntry.cached();
CacheObject val = null;
GridCacheVersion readVer = null;
EntryGetResult getRes = null;
try {
Object transformClo = (!F.isEmpty(txEntry.entryProcessors()) && cctx.gridEvents().isRecordable(EVT_CACHE_OBJECT_READ)) ? F.first(txEntry.entryProcessors()) : null;
if (needVer) {
getRes = cached.innerGetVersioned(null, GridNearTxLocal.this, /*update-metrics*/
true, /*event*/
!skipVals, CU.subjectId(GridNearTxLocal.this, cctx), transformClo, resolveTaskName(), null, txEntry.keepBinary(), null);
if (getRes != null) {
val = getRes.value();
readVer = getRes.version();
}
} else {
val = cached.innerGet(null, GridNearTxLocal.this, /*read through*/
false, /*metrics*/
true, /*events*/
!skipVals, CU.subjectId(GridNearTxLocal.this, cctx), transformClo, resolveTaskName(), null, txEntry.keepBinary());
}
// If value is in cache and passed the filter.
if (val != null) {
missed.remove(cacheKey);
txEntry.setAndMarkValid(val);
if (!F.isEmpty(txEntry.entryProcessors()))
val = txEntry.applyEntryProcessors(val);
cacheCtx.addResult(retMap, cacheKey, val, skipVals, keepCacheObjects, deserializeBinary, false, getRes, readVer, 0, 0, needVer);
if (readVer != null)
txEntry.entryReadVersion(readVer);
}
// While.
break;
} catch (GridCacheEntryRemovedException ignore) {
if (log.isDebugEnabled())
log.debug("Got removed exception in get postLock (will retry): " + cached);
txEntry.cached(entryEx(cacheCtx, txKey, topologyVersion()));
}
}
}
if (!missed.isEmpty() && cacheCtx.isLocal()) {
AffinityTopologyVersion topVer = topologyVersionSnapshot();
if (topVer == null)
topVer = entryTopVer;
return checkMissed(cacheCtx, topVer != null ? topVer : topologyVersion(), retMap, missed, deserializeBinary, skipVals, keepCacheObjects, skipStore, recovery, needVer, expiryPlc0);
}
return new GridFinishedFuture<>(Collections.<K, V>emptyMap());
}
};
FinishClosure<Map<K, V>> finClos = new FinishClosure<Map<K, V>>() {
@Override
Map<K, V> finish(Map<K, V> loaded) {
retMap.putAll(loaded);
return retMap;
}
};
if (fut.isDone()) {
try {
IgniteInternalFuture<Map<K, V>> fut1 = plc2.apply(fut.get(), null);
return fut1.isDone() ? new GridFinishedFuture<>(finClos.apply(fut1.get(), null)) : new GridEmbeddedFuture<>(finClos, fut1);
} catch (GridClosureException e) {
return new GridFinishedFuture<>(e.unwrap());
} catch (IgniteCheckedException e) {
try {
return plc2.apply(false, e);
} catch (Exception e1) {
return new GridFinishedFuture<>(e1);
}
}
} else {
return new GridEmbeddedFuture<>(fut, plc2, finClos);
}
} else {
assert optimistic() || readCommitted() || skipVals;
if (!missed.isEmpty()) {
if (!readCommitted())
for (Iterator<KeyCacheObject> it = missed.keySet().iterator(); it.hasNext(); ) {
KeyCacheObject cacheKey = it.next();
K keyVal = (K) (keepCacheObjects ? cacheKey : cacheCtx.cacheObjectContext().unwrapBinaryIfNeeded(cacheKey, !deserializeBinary, false));
if (retMap.containsKey(keyVal))
it.remove();
}
if (missed.isEmpty())
return new GridFinishedFuture<>(retMap);
AffinityTopologyVersion topVer = topologyVersionSnapshot();
if (topVer == null)
topVer = entryTopVer;
return checkMissed(cacheCtx, topVer != null ? topVer : topologyVersion(), retMap, missed, deserializeBinary, skipVals, keepCacheObjects, skipStore, recovery, needVer, expiryPlc);
}
return new GridFinishedFuture<>(retMap);
}
} catch (IgniteCheckedException e) {
setRollbackOnly();
return new GridFinishedFuture<>(e);
}
}
use of org.apache.ignite.internal.processors.cache.CacheOperationContext in project ignite by apache.
the class DmlStatementsProcessor method updateSqlFields.
/**
* Execute DML statement, possibly with few re-attempts in case of concurrent data modifications.
*
* @param schemaName Schema.
* @param conn Connection.
* @param prepared Prepared statement.
* @param fieldsQry Original query.
* @param loc Query locality flag.
* @param filters Cache name and key filter.
* @param cancel Cancel.
* @return Update result (modified items count and failed keys).
* @throws IgniteCheckedException if failed.
*/
private UpdateResult updateSqlFields(String schemaName, Connection conn, Prepared prepared, SqlFieldsQuery fieldsQry, boolean loc, IndexingQueryFilter filters, GridQueryCancel cancel) throws IgniteCheckedException {
Object[] errKeys = null;
long items = 0;
UpdatePlan plan = getPlanForStatement(schemaName, conn, prepared, fieldsQry, loc, null);
GridCacheContext<?, ?> cctx = plan.cacheContext();
for (int i = 0; i < DFLT_DML_RERUN_ATTEMPTS; i++) {
CacheOperationContext opCtx = setKeepBinaryContext(cctx);
UpdateResult r;
try {
r = executeUpdateStatement(schemaName, cctx, conn, prepared, fieldsQry, loc, filters, cancel);
} finally {
cctx.operationContextPerCall(opCtx);
}
items += r.counter();
errKeys = r.errorKeys();
if (F.isEmpty(errKeys))
break;
}
if (F.isEmpty(errKeys)) {
if (items == 1L)
return UpdateResult.ONE;
else if (items == 0L)
return UpdateResult.ZERO;
}
return new UpdateResult(items, errKeys);
}
use of org.apache.ignite.internal.processors.cache.CacheOperationContext in project ignite by apache.
the class DmlStatementsProcessor method updateSqlFieldsBatched.
/**
* Execute DML statement, possibly with few re-attempts in case of concurrent data modifications.
*
* @param schemaName Schema.
* @param conn Connection.
* @param prepared Prepared statement.
* @param fieldsQry Original query.
* @param loc Query locality flag.
* @param filters Cache name and key filter.
* @param cancel Cancel.
* @return Update result (modified items count and failed keys).
* @throws IgniteCheckedException if failed.
*/
private Collection<UpdateResult> updateSqlFieldsBatched(String schemaName, Connection conn, Prepared prepared, SqlFieldsQueryEx fieldsQry, boolean loc, IndexingQueryFilter filters, GridQueryCancel cancel) throws IgniteCheckedException {
List<Object[]> argss = fieldsQry.batchedArguments();
UpdatePlan plan = getPlanForStatement(schemaName, conn, prepared, fieldsQry, loc, null);
if (plan.hasRows() && plan.mode() == UpdateMode.INSERT) {
GridCacheContext<?, ?> cctx = plan.cacheContext();
CacheOperationContext opCtx = setKeepBinaryContext(cctx);
try {
List<List<List<?>>> cur = plan.createRows(argss);
List<UpdateResult> res = processDmlSelectResultBatched(plan, cur, fieldsQry.getPageSize());
return res;
} finally {
cctx.operationContextPerCall(opCtx);
}
} else {
// Fallback to previous mode.
Collection<UpdateResult> ress = new ArrayList<>(argss.size());
SQLException batchException = null;
int[] cntPerRow = new int[argss.size()];
int cntr = 0;
for (Object[] args : argss) {
SqlFieldsQueryEx qry0 = (SqlFieldsQueryEx) fieldsQry.copy();
qry0.clearBatchedArgs();
qry0.setArgs(args);
UpdateResult res;
try {
res = updateSqlFields(schemaName, conn, prepared, qry0, loc, filters, cancel);
cntPerRow[cntr++] = (int) res.counter();
ress.add(res);
} catch (Exception e) {
String sqlState;
int code;
if (e instanceof IgniteSQLException) {
sqlState = ((IgniteSQLException) e).sqlState();
code = ((IgniteSQLException) e).statusCode();
} else {
sqlState = SqlStateCode.INTERNAL_ERROR;
code = IgniteQueryErrorCode.UNKNOWN;
}
batchException = chainException(batchException, new SQLException(e.getMessage(), sqlState, code, e));
cntPerRow[cntr++] = Statement.EXECUTE_FAILED;
}
}
if (batchException != null) {
BatchUpdateException e = new BatchUpdateException(batchException.getMessage(), batchException.getSQLState(), batchException.getErrorCode(), cntPerRow, batchException);
throw new IgniteCheckedException(e);
}
return ress;
}
}
Aggregations