use of org.apache.ignite.internal.processors.cache.CacheObject in project ignite by apache.
the class StandaloneWalRecordsIterator method postProcessDataEntry.
/**
* Converts entry or lazy data entry into unwrapped entry
* @param processor cache object processor for de-serializing objects.
* @param fakeCacheObjCtx cache object context for de-serializing binary and unwrapping objects.
* @param dataEntry entry to process
* @return post precessed entry
* @throws IgniteCheckedException if failed
*/
@NotNull
private DataEntry postProcessDataEntry(final IgniteCacheObjectProcessor processor, final CacheObjectContext fakeCacheObjCtx, final DataEntry dataEntry) throws IgniteCheckedException {
final KeyCacheObject key;
final CacheObject val;
final File marshallerMappingFileStoreDir = fakeCacheObjCtx.kernalContext().marshallerContext().getMarshallerMappingFileStoreDir();
if (dataEntry instanceof LazyDataEntry) {
final LazyDataEntry lazyDataEntry = (LazyDataEntry) dataEntry;
key = processor.toKeyCacheObject(fakeCacheObjCtx, lazyDataEntry.getKeyType(), lazyDataEntry.getKeyBytes());
final byte type = lazyDataEntry.getValType();
val = type == 0 ? null : processor.toCacheObject(fakeCacheObjCtx, type, lazyDataEntry.getValBytes());
} else {
key = dataEntry.key();
val = dataEntry.value();
}
return new UnwrapDataEntry(dataEntry.cacheId(), key, val, dataEntry.op(), dataEntry.nearXidVersion(), dataEntry.writeVersion(), dataEntry.expireTime(), dataEntry.partitionId(), dataEntry.partitionCounter(), fakeCacheObjCtx, keepBinary || marshallerMappingFileStoreDir == null);
}
use of org.apache.ignite.internal.processors.cache.CacheObject in project ignite by apache.
the class GridNearCacheEntry method loadedValue.
/**
* @param tx Transaction.
* @param primaryNodeId Primary node ID.
* @param val New value.
* @param ver Version to use.
* @param dhtVer DHT version received from remote node.
* @param ttl Time to live.
* @param expireTime Expiration time.
* @param evt Event flag.
* @param topVer Topology version.
* @param subjId Subject ID.
* @return {@code True} if initial value was set.
* @throws IgniteCheckedException In case of error.
* @throws GridCacheEntryRemovedException If entry was removed.
*/
@SuppressWarnings({ "RedundantTypeArguments" })
public boolean loadedValue(@Nullable IgniteInternalTx tx, UUID primaryNodeId, CacheObject val, GridCacheVersion ver, GridCacheVersion dhtVer, long ttl, long expireTime, boolean evt, boolean keepBinary, AffinityTopologyVersion topVer, UUID subjId) throws IgniteCheckedException, GridCacheEntryRemovedException {
assert dhtVer != null;
GridCacheVersion enqueueVer = null;
lockEntry();
try {
checkObsolete();
if (cctx.statisticsEnabled())
cctx.cache().metrics0().onRead(false);
boolean ret = false;
CacheObject old = this.val;
boolean hasVal = hasValueUnlocked();
if (this.dhtVer == null || this.dhtVer.compareTo(dhtVer) < 0 || !valid(topVer)) {
primaryNode(primaryNodeId, topVer);
update(val, expireTime, ttl, ver, true);
if (cctx.deferredDelete() && !isInternal()) {
boolean deleted = val == null;
if (deleted != deletedUnlocked()) {
deletedUnlocked(deleted);
if (deleted)
enqueueVer = ver;
}
}
this.dhtVer = dhtVer;
ret = true;
}
if (evt && cctx.events().isRecordable(EVT_CACHE_OBJECT_READ))
cctx.events().addEvent(partition(), key, tx, null, EVT_CACHE_OBJECT_READ, val, val != null, old, hasVal, subjId, null, null, keepBinary);
return ret;
} finally {
unlockEntry();
if (enqueueVer != null)
cctx.onDeferredDelete(this, enqueueVer);
}
}
use of org.apache.ignite.internal.processors.cache.CacheObject in project ignite by apache.
the class GridNearCacheEntry method addNearLocal.
/**
* Add near local candidate.
*
* @param dhtNodeId DHT node ID.
* @param threadId Owning thread ID.
* @param ver Lock version.
* @param topVer Topology version.
* @param timeout Timeout to acquire lock.
* @param reenter Reentry flag.
* @param tx Transaction flag.
* @param implicitSingle Implicit flag.
* @param read Read lock flag.
* @return New candidate.
* @throws GridCacheEntryRemovedException If entry has been removed.
*/
@Nullable
GridCacheMvccCandidate addNearLocal(@Nullable UUID dhtNodeId, long threadId, GridCacheVersion ver, AffinityTopologyVersion topVer, long timeout, boolean reenter, boolean tx, boolean implicitSingle, boolean read) throws GridCacheEntryRemovedException {
CacheLockCandidates prev;
CacheLockCandidates owner = null;
GridCacheMvccCandidate cand;
CacheObject val;
UUID locId = cctx.nodeId();
lockEntry();
try {
checkObsolete();
GridCacheMvcc mvcc = mvccExtras();
if (mvcc == null) {
mvcc = new GridCacheMvcc(cctx);
mvccExtras(mvcc);
}
GridCacheMvccCandidate c = mvcc.localCandidate(locId, threadId);
if (c != null)
return reenter ? c.reenter() : null;
prev = mvcc.allOwners();
boolean emptyBefore = mvcc.isEmpty();
// Lock could not be acquired.
if (timeout < 0 && !emptyBefore)
return null;
// Local lock for near cache is a local lock.
cand = mvcc.addNearLocal(this, locId, dhtNodeId, threadId, ver, tx, implicitSingle, read);
cand.topologyVersion(topVer);
boolean emptyAfter = mvcc.isEmpty();
checkCallbacks(emptyBefore, emptyAfter);
val = this.val;
if (emptyAfter)
mvccExtras(null);
else
owner = mvcc.allOwners();
} finally {
unlockEntry();
}
// This call must be outside of synchronization.
checkOwnerChanged(prev, owner, val);
return cand;
}
use of org.apache.ignite.internal.processors.cache.CacheObject in project ignite by apache.
the class GridNearLockFuture method proceedMapping0.
/**
* Gets next near lock mapping and either acquires dht locks locally or sends near lock request to
* remote primary node.
*
* @throws IgniteCheckedException If mapping can not be completed.
*/
@SuppressWarnings("unchecked")
private void proceedMapping0() throws IgniteCheckedException {
GridNearLockMapping map;
synchronized (this) {
map = mappings.poll();
}
// If there are no more mappings to process, complete the future.
if (map == null)
return;
final GridNearLockRequest req = map.request();
final Collection<KeyCacheObject> mappedKeys = map.distributedKeys();
final ClusterNode node = map.node();
if (filter != null && filter.length != 0)
req.filter(filter, cctx);
if (node.isLocal()) {
req.miniId(-1);
if (log.isDebugEnabled())
log.debug("Before locally locking near request: " + req);
IgniteInternalFuture<GridNearLockResponse> fut = dht().lockAllAsync(cctx, cctx.localNode(), req, filter);
// Add new future.
add(new GridEmbeddedFuture<>(new C2<GridNearLockResponse, Exception, Boolean>() {
@Override
public Boolean apply(GridNearLockResponse res, Exception e) {
if (CU.isLockTimeoutOrCancelled(e) || (res != null && CU.isLockTimeoutOrCancelled(res.error())))
return false;
if (e != null) {
onError(e);
return false;
}
if (res == null) {
onError(new IgniteCheckedException("Lock response is null for future: " + this));
return false;
}
if (res.error() != null) {
onError(res.error());
return false;
}
if (log.isDebugEnabled())
log.debug("Acquired lock for local DHT mapping [locId=" + cctx.nodeId() + ", mappedKeys=" + mappedKeys + ", fut=" + GridNearLockFuture.this + ']');
try {
int i = 0;
for (KeyCacheObject k : mappedKeys) {
while (true) {
GridNearCacheEntry entry = cctx.near().entryExx(k, req.topologyVersion());
try {
IgniteBiTuple<GridCacheVersion, CacheObject> oldValTup = valMap.get(entry.key());
boolean hasBytes = entry.hasValue();
CacheObject oldVal = entry.rawGet();
CacheObject newVal = res.value(i);
GridCacheVersion dhtVer = res.dhtVersion(i);
GridCacheVersion mappedVer = res.mappedVersion(i);
// On local node don't record twice if DHT cache already recorded.
boolean record = retval && oldValTup != null && oldValTup.get1().equals(dhtVer);
if (newVal == null) {
if (oldValTup != null) {
if (oldValTup.get1().equals(dhtVer))
newVal = oldValTup.get2();
oldVal = oldValTup.get2();
}
}
// Lock is held at this point, so we can set the
// returned value if any.
entry.resetFromPrimary(newVal, lockVer, dhtVer, node.id(), topVer);
entry.readyNearLock(lockVer, mappedVer, res.committedVersions(), res.rolledbackVersions(), res.pending());
if (inTx() && implicitTx() && tx.onePhaseCommit()) {
boolean pass = res.filterResult(i);
tx.entry(cctx.txKey(k)).filters(pass ? CU.empty0() : CU.alwaysFalse0Arr());
}
if (record) {
if (cctx.events().isRecordable(EVT_CACHE_OBJECT_READ))
cctx.events().addEvent(entry.partition(), entry.key(), tx, null, EVT_CACHE_OBJECT_READ, newVal, newVal != null, oldVal, hasBytes, CU.subjectId(tx, cctx.shared()), null, inTx() ? tx.resolveTaskName() : null, keepBinary);
if (cctx.statisticsEnabled())
cctx.cache().metrics0().onRead(oldVal != null);
}
if (log.isDebugEnabled())
log.debug("Processed response for entry [res=" + res + ", entry=" + entry + ']');
// Inner while loop.
break;
} catch (GridCacheEntryRemovedException ignored) {
if (log.isDebugEnabled())
log.debug("Failed to add candidates because entry was " + "removed (will renew).");
synchronized (GridNearLockFuture.this) {
// Replace old entry with new one.
entries.set(i, (GridDistributedCacheEntry) cctx.cache().entryEx(entry.key()));
}
}
}
// Increment outside of while loop.
i++;
}
// Proceed and add new future (if any) before completing embedded future.
proceedMapping();
} catch (IgniteCheckedException ex) {
onError(ex);
return false;
}
return true;
}
}, fut));
} else {
final MiniFuture fut = new MiniFuture(node, mappedKeys, ++miniId);
req.miniId(fut.futureId());
// Append new future.
add(fut);
IgniteInternalFuture<?> txSync = null;
if (inTx())
txSync = cctx.tm().awaitFinishAckAsync(node.id(), tx.threadId());
if (txSync == null || txSync.isDone()) {
try {
if (log.isDebugEnabled())
log.debug("Sending near lock request [node=" + node.id() + ", req=" + req + ']');
cctx.io().send(node, req, cctx.ioPolicy());
} catch (ClusterTopologyCheckedException ex) {
fut.onResult(ex);
}
} else {
txSync.listen(new CI1<IgniteInternalFuture<?>>() {
@Override
public void apply(IgniteInternalFuture<?> t) {
try {
if (log.isDebugEnabled())
log.debug("Sending near lock request [node=" + node.id() + ", req=" + req + ']');
cctx.io().send(node, req, cctx.ioPolicy());
} catch (ClusterTopologyCheckedException ex) {
fut.onResult(ex);
} catch (IgniteCheckedException e) {
onError(e);
}
}
});
}
}
}
use of org.apache.ignite.internal.processors.cache.CacheObject in project ignite by apache.
the class GridNearLockFuture method map.
/**
* Maps keys to nodes. Note that we can not simply group keys by nodes and send lock request as
* such approach does not preserve order of lock acquisition. Instead, keys are split in continuous
* groups belonging to one primary node and locks for these groups are acquired sequentially.
*
* @param keys Keys.
* @param remap Remap flag.
* @param topLocked {@code True} if thread already acquired lock preventing topology change.
*/
private void map(Iterable<KeyCacheObject> keys, boolean remap, boolean topLocked) {
try {
AffinityTopologyVersion topVer = this.topVer;
assert topVer != null;
assert topVer.topologyVersion() > 0 : topVer;
if (CU.affinityNodes(cctx, topVer).isEmpty()) {
onDone(new ClusterTopologyServerNotFoundException("Failed to map keys for near-only cache (all " + "partition nodes left the grid)."));
return;
}
boolean clientNode = cctx.kernalContext().clientNode();
assert !remap || (clientNode && (tx == null || !tx.hasRemoteLocks()));
synchronized (this) {
mappings = new ArrayDeque<>();
// Assign keys to primary nodes.
GridNearLockMapping map = null;
for (KeyCacheObject key : keys) {
GridNearLockMapping updated = map(key, map, topVer);
// If new mapping was created, add to collection.
if (updated != map) {
mappings.add(updated);
if (tx != null && updated.node().isLocal())
tx.nearLocallyMapped(true);
}
map = updated;
}
if (isDone()) {
if (log.isDebugEnabled())
log.debug("Abandoning (re)map because future is done: " + this);
return;
}
if (log.isDebugEnabled())
log.debug("Starting (re)map for mappings [mappings=" + mappings + ", fut=" + this + ']');
boolean first = true;
// Create mini futures.
for (Iterator<GridNearLockMapping> iter = mappings.iterator(); iter.hasNext(); ) {
GridNearLockMapping mapping = iter.next();
ClusterNode node = mapping.node();
Collection<KeyCacheObject> mappedKeys = mapping.mappedKeys();
assert !mappedKeys.isEmpty();
GridNearLockRequest req = null;
Collection<KeyCacheObject> distributedKeys = new ArrayList<>(mappedKeys.size());
boolean explicit = false;
for (KeyCacheObject key : mappedKeys) {
IgniteTxKey txKey = cctx.txKey(key);
while (true) {
GridNearCacheEntry entry = null;
try {
entry = cctx.near().entryExx(key, topVer);
if (!cctx.isAll(entry, filter)) {
if (log.isDebugEnabled())
log.debug("Entry being locked did not pass filter (will not lock): " + entry);
onComplete(false, false, true);
return;
}
// Removed exception may be thrown here.
GridCacheMvccCandidate cand = addEntry(topVer, entry, node.id());
if (isDone()) {
if (log.isDebugEnabled())
log.debug("Abandoning (re)map because future is done after addEntry attempt " + "[fut=" + this + ", entry=" + entry + ']');
return;
}
if (cand != null) {
if (tx == null && !cand.reentry())
cctx.mvcc().addExplicitLock(threadId, cand, topVer);
IgniteBiTuple<GridCacheVersion, CacheObject> val = entry.versionedValue();
if (val == null) {
GridDhtCacheEntry dhtEntry = dht().peekExx(key);
try {
if (dhtEntry != null)
val = dhtEntry.versionedValue(topVer);
} catch (GridCacheEntryRemovedException ignored) {
assert dhtEntry.obsolete() : dhtEntry;
if (log.isDebugEnabled())
log.debug("Got removed exception for DHT entry in map (will ignore): " + dhtEntry);
}
}
GridCacheVersion dhtVer = null;
if (val != null) {
dhtVer = val.get1();
valMap.put(key, val);
}
if (!cand.reentry()) {
if (req == null) {
boolean clientFirst = false;
if (first) {
clientFirst = clientNode && !topLocked && (tx == null || !tx.hasRemoteLocks());
first = false;
}
assert !implicitTx() && !implicitSingleTx() : tx;
req = new GridNearLockRequest(cctx.cacheId(), topVer, cctx.nodeId(), threadId, futId, lockVer, inTx(), read, retval, isolation(), isInvalidate(), timeout, mappedKeys.size(), inTx() ? tx.size() : mappedKeys.size(), inTx() && tx.syncMode() == FULL_SYNC, inTx() ? tx.subjectId() : null, inTx() ? tx.taskNameHash() : 0, read ? createTtl : -1L, read ? accessTtl : -1L, skipStore, keepBinary, clientFirst, true, cctx.deploymentEnabled());
mapping.request(req);
}
distributedKeys.add(key);
if (tx != null)
tx.addKeyMapping(txKey, mapping.node());
req.addKeyBytes(key, retval && dhtVer == null, dhtVer, // Include DHT version to match remote DHT entry.
cctx);
}
if (cand.reentry())
explicit = tx != null && !entry.hasLockCandidate(tx.xidVersion());
} else {
if (timedOut)
return;
// Ignore reentries within transactions.
explicit = tx != null && !entry.hasLockCandidate(tx.xidVersion());
}
if (explicit)
tx.addKeyMapping(txKey, mapping.node());
break;
} catch (GridCacheEntryRemovedException ignored) {
assert entry.obsolete() : "Got removed exception on non-obsolete entry: " + entry;
if (log.isDebugEnabled())
log.debug("Got removed entry in lockAsync(..) method (will retry): " + entry);
}
}
// Mark mapping explicit lock flag.
if (explicit) {
boolean marked = tx != null && tx.markExplicit(node.id());
assert tx == null || marked;
}
}
if (!distributedKeys.isEmpty())
mapping.distributedKeys(distributedKeys);
else {
assert mapping.request() == null;
iter.remove();
}
}
}
cctx.mvcc().recheckPendingLocks();
proceedMapping();
} catch (IgniteCheckedException ex) {
onError(ex);
}
}
Aggregations