use of org.apache.ignite.internal.processors.cache.KeyCacheObject in project ignite by apache.
the class GridNearTransactionalCache method unlockAll.
/**
* {@inheritDoc}
*/
@Override
public void unlockAll(Collection<? extends K> keys) {
if (keys.isEmpty())
return;
try {
GridCacheVersion ver = null;
int keyCnt = -1;
Map<ClusterNode, GridNearUnlockRequest> map = null;
Collection<KeyCacheObject> locKeys = new LinkedList<>();
for (K key : keys) {
while (true) {
KeyCacheObject cacheKey = ctx.toCacheKeyObject(key);
GridDistributedCacheEntry entry = peekExx(cacheKey);
if (entry == null)
// While.
break;
try {
GridCacheMvccCandidate cand = entry.candidate(ctx.nodeId(), Thread.currentThread().getId());
AffinityTopologyVersion topVer = AffinityTopologyVersion.NONE;
if (cand != null) {
assert cand.nearLocal() : "Got non-near-local candidate in near cache: " + cand;
ver = cand.version();
if (map == null) {
Collection<ClusterNode> affNodes = CU.affinityNodes(ctx, cand.topologyVersion());
if (F.isEmpty(affNodes))
return;
keyCnt = (int) Math.ceil((double) keys.size() / affNodes.size());
map = U.newHashMap(affNodes.size());
}
topVer = cand.topologyVersion();
// Send request to remove from remote nodes.
ClusterNode primary = ctx.affinity().primaryByKey(key, topVer);
if (primary == null) {
if (log.isDebugEnabled())
log.debug("Failed to unlock key (all partition nodes left the grid).");
break;
}
GridNearUnlockRequest req = map.get(primary);
if (req == null) {
map.put(primary, req = new GridNearUnlockRequest(ctx.cacheId(), keyCnt, ctx.deploymentEnabled()));
req.version(ver);
}
// Remove candidate from local node first.
GridCacheMvccCandidate rmv = entry.removeLock();
if (rmv != null) {
if (!rmv.reentry()) {
if (ver != null && !ver.equals(rmv.version()))
throw new IgniteCheckedException("Failed to unlock (if keys were locked separately, " + "then they need to be unlocked separately): " + keys);
if (!primary.isLocal()) {
assert req != null;
req.addKey(entry.key(), ctx);
} else
locKeys.add(cacheKey);
if (log.isDebugEnabled())
log.debug("Removed lock (will distribute): " + rmv);
} else if (log.isDebugEnabled())
log.debug("Current thread still owns lock (or there are no other nodes)" + " [lock=" + rmv + ", curThreadId=" + Thread.currentThread().getId() + ']');
}
}
assert !topVer.equals(AffinityTopologyVersion.NONE) || cand == null;
if (topVer.equals(AffinityTopologyVersion.NONE))
topVer = ctx.affinity().affinityTopologyVersion();
entry.touch();
break;
} catch (GridCacheEntryRemovedException ignore) {
if (log.isDebugEnabled())
log.debug("Attempted to unlock removed entry (will retry): " + entry);
}
}
}
if (ver == null)
return;
for (Map.Entry<ClusterNode, GridNearUnlockRequest> mapping : map.entrySet()) {
ClusterNode n = mapping.getKey();
GridDistributedUnlockRequest req = mapping.getValue();
if (n.isLocal())
dht.removeLocks(ctx.nodeId(), req.version(), locKeys, true);
else if (!F.isEmpty(req.keys()))
// We don't wait for reply to this message.
ctx.io().send(n, req, ctx.ioPolicy());
}
} catch (IgniteCheckedException ex) {
U.error(log, "Failed to unlock the lock for keys: " + keys, ex);
}
}
use of org.apache.ignite.internal.processors.cache.KeyCacheObject in project ignite by apache.
the class GridNearTxEnlistFuture method continueLoop.
/**
* Iterate data rows and form batches.
*
* @param nodeId Id of node acknowledged the last batch.
* @return Collection of newly completed batches.
* @throws IgniteCheckedException If failed.
*/
private Collection<Batch> continueLoop(@Nullable UUID nodeId) throws IgniteCheckedException {
if (nodeId != null)
batches.remove(nodeId);
// Let only one thread do the looping.
if (isDone() || SKIP_UPD.getAndIncrement(this) != 0)
return null;
ArrayList<Batch> res = null;
Batch batch = null;
boolean flush = false;
EnlistOperation op = it.operation();
while (true) {
while (hasNext0()) {
checkCompleted();
Object cur = next0();
KeyCacheObject key = cctx.toCacheKeyObject(op.isDeleteOrLock() ? cur : ((IgniteBiTuple) cur).getKey());
ClusterNode node = cctx.affinity().primaryByKey(key, topVer);
if (node == null)
throw new ClusterTopologyServerNotFoundException("Failed to get primary node " + "[topVer=" + topVer + ", key=" + key + ']');
if (!sequential)
batch = batches.get(node.id());
else if (batch != null && !batch.node().equals(node))
res = markReady(res, batch);
if (batch == null)
batches.put(node.id(), batch = new Batch(node));
if (batch.ready()) {
// Can't advance further at the moment.
batch = null;
peek = cur;
it.beforeDetach();
flush = true;
break;
}
batch.add(op.isDeleteOrLock() ? key : cur, !node.isLocal() && isLocalBackup(op, key));
if (batch.size() == batchSize)
res = markReady(res, batch);
}
if (SKIP_UPD.decrementAndGet(this) == 0)
break;
skipCntr = 1;
}
if (flush)
return res;
// No data left - flush incomplete batches.
for (Batch batch0 : batches.values()) {
if (!batch0.ready()) {
if (res == null)
res = new ArrayList<>();
batch0.ready(true);
res.add(batch0);
}
}
if (batches.isEmpty())
onDone(this.res);
return res;
}
use of org.apache.ignite.internal.processors.cache.KeyCacheObject in project ignite by apache.
the class DataStreamerImpl method addData.
/**
* {@inheritDoc}
*/
@Override
public IgniteFuture<?> addData(K key, V val) {
A.notNull(key, "key");
KeyCacheObject key0 = cacheObjProc.toCacheKeyObject(cacheObjCtx, null, key, true);
CacheObject val0 = cacheObjProc.toCacheObject(cacheObjCtx, val, true);
return addDataInternal(Collections.singleton(new DataStreamerEntry(key0, val0)));
}
use of org.apache.ignite.internal.processors.cache.KeyCacheObject in project ignite by apache.
the class CacheMvccSqlTxQueriesAbstractTest method checkAllVersionsHints.
/**
*/
private void checkAllVersionsHints(IgniteCache cache) throws IgniteCheckedException {
IgniteCacheProxy cache0 = (IgniteCacheProxy) cache;
GridCacheContext cctx = cache0.context();
assert cctx.mvccEnabled();
for (Object e : cache) {
IgniteBiTuple entry = (IgniteBiTuple) e;
KeyCacheObject key = cctx.toCacheKeyObject(entry.getKey());
GridCursor<CacheDataRow> cur = cctx.offheap().mvccAllVersionsCursor(cctx, key, CacheDataRowAdapter.RowData.LINK_WITH_HEADER);
while (cur.next()) {
CacheDataRow row = cur.get();
assertTrue(row.mvccTxState() != 0);
}
}
}
use of org.apache.ignite.internal.processors.cache.KeyCacheObject in project ignite by apache.
the class CacheMvccBackupsAbstractTest method testBackupsCoherenceWithInFlightBatchesOverflow.
/**
* Checks cache backups consistency with in-flight batches overflow.
*
* @throws Exception If failed.
*/
@Test
public void testBackupsCoherenceWithInFlightBatchesOverflow() throws Exception {
testSpi = true;
disableScheduledVacuum = true;
ccfg = cacheConfiguration(cacheMode(), FULL_SYNC, 1, DFLT_PARTITION_COUNT).setIndexedTypes(Integer.class, Integer.class);
final int KEYS_CNT = 30_000;
assert KEYS_CNT % 2 == 0;
startGrids(2);
Ignite node1 = grid(0);
Ignite node2 = grid(1);
client = true;
Ignite client = startGrid();
awaitPartitionMapExchange();
IgniteCache<?, ?> clientCache = client.cache(DEFAULT_CACHE_NAME);
IgniteCache<?, ?> cache1 = node1.cache(DEFAULT_CACHE_NAME);
IgniteCache<?, ?> cache2 = node2.cache(DEFAULT_CACHE_NAME);
AtomicInteger keyGen = new AtomicInteger();
Affinity affinity = affinity(clientCache);
ClusterNode cNode1 = ((IgniteEx) node1).localNode();
ClusterNode cNode2 = ((IgniteEx) node2).localNode();
StringBuilder insert = new StringBuilder("INSERT INTO Integer (_key, _val) values ");
for (int i = 0; i < KEYS_CNT; i++) {
if (i > 0)
insert.append(',');
// To make big batches in near results future.
Integer key = i < KEYS_CNT / 2 ? keyForNode(affinity, keyGen, cNode1) : keyForNode(affinity, keyGen, cNode2);
assert key != null;
insert.append('(').append(key).append(',').append(key * 10).append(')');
}
String qryStr = insert.toString();
try (Transaction tx = client.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) {
tx.timeout(txLongTimeout);
SqlFieldsQuery qry = new SqlFieldsQuery(qryStr);
clientCache.query(qry).getAll();
tx.commit();
}
// Add a delay to simulate batches overflow.
TestRecordingCommunicationSpi spi1 = TestRecordingCommunicationSpi.spi(node1);
TestRecordingCommunicationSpi spi2 = TestRecordingCommunicationSpi.spi(node2);
spi1.closure(new IgniteBiInClosure<ClusterNode, Message>() {
@Override
public void apply(ClusterNode node, Message msg) {
if (msg instanceof GridDhtTxQueryEnlistResponse)
doSleep(100);
}
});
spi2.closure(new IgniteBiInClosure<ClusterNode, Message>() {
@Override
public void apply(ClusterNode node, Message msg) {
if (msg instanceof GridDhtTxQueryEnlistResponse)
doSleep(100);
}
});
qryStr = "DELETE FROM Integer WHERE _key >= " + 10;
try (Transaction tx = client.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) {
tx.timeout(txLongTimeout);
SqlFieldsQuery qry = new SqlFieldsQuery(qryStr);
clientCache.query(qry).getAll();
tx.commit();
}
Map<KeyCacheObject, List<CacheDataRow>> cache1Vers = allVersions(cache1);
List res1 = getAll(cache1, "Integer");
stopGrid(0);
awaitPartitionMapExchange();
Map<KeyCacheObject, List<CacheDataRow>> cache2Vers = allVersions(cache2);
assertVersionsEquals(cache1Vers, cache2Vers);
List res2 = getAll(cache2, "Integer");
assertEqualsCollections(res1, res2);
}
Aggregations