use of org.apache.ignite.internal.processors.cache.GridCacheContext in project ignite by apache.
the class IgniteTxAdapter method ownsLockUnsafe.
/**
* {@inheritDoc}
*/
@SuppressWarnings("SimplifiableIfStatement")
@Override
public boolean ownsLockUnsafe(GridCacheEntryEx entry) {
GridCacheContext cacheCtx = entry.context();
IgniteTxEntry txEntry = entry(entry.txKey());
GridCacheVersion explicit = txEntry == null ? null : txEntry.explicitVersion();
return local() && !cacheCtx.isDht() ? entry.lockedByThreadUnsafe(threadId()) || (explicit != null && entry.lockedByUnsafe(explicit)) : // Otherwise, check if entry is owned by version.
!entry.hasLockCandidateUnsafe(xidVersion()) || entry.lockedByUnsafe(xidVersion());
}
use of org.apache.ignite.internal.processors.cache.GridCacheContext in project ignite by apache.
the class GridCacheNearReadersSelfTest method testTwoNodesTwoKeysOneBackup.
/**
* @throws Exception If failed.
*/
public void testTwoNodesTwoKeysOneBackup() throws Exception {
aff.backups(1);
grids = 2;
aff.partitions(grids);
startGrids();
ClusterNode n1 = F.first(aff.nodes(aff.partition(1), grid(0).cluster().nodes()));
ClusterNode n2 = F.first(aff.nodes(aff.partition(2), grid(0).cluster().nodes()));
assertNotNull(n1);
assertNotNull(n2);
assertNotSame(n1, n2);
assertFalse("Nodes cannot be equal: " + n1, n1.equals(n2));
Ignite g1 = grid(n1.id());
Ignite g2 = grid(n2.id());
awaitPartitionMapExchange();
GridCacheContext ctx = ((IgniteKernal) g1).internalCache(DEFAULT_CACHE_NAME).context();
List<KeyCacheObject> cacheKeys = F.asList(ctx.toCacheKeyObject(1), ctx.toCacheKeyObject(2));
IgniteInternalFuture<Object> f1 = ((IgniteKernal) g1).internalCache(DEFAULT_CACHE_NAME).preloader().request(ctx, cacheKeys, new AffinityTopologyVersion(2));
if (f1 != null)
f1.get();
IgniteInternalFuture<Object> f2 = ((IgniteKernal) g2).internalCache(DEFAULT_CACHE_NAME).preloader().request(((IgniteKernal) g2).internalCache(DEFAULT_CACHE_NAME).context(), cacheKeys, new AffinityTopologyVersion(2));
if (f2 != null)
f2.get();
IgniteCache<Integer, String> cache1 = g1.cache(DEFAULT_CACHE_NAME);
IgniteCache<Integer, String> cache2 = g2.cache(DEFAULT_CACHE_NAME);
assertEquals(g1.affinity(DEFAULT_CACHE_NAME).mapKeyToNode(1), g1.cluster().localNode());
assertFalse(g1.affinity(DEFAULT_CACHE_NAME).mapKeyToNode(2).equals(g1.cluster().localNode()));
assertEquals(g1.affinity(DEFAULT_CACHE_NAME).mapKeyToNode(2), g2.cluster().localNode());
assertFalse(g2.affinity(DEFAULT_CACHE_NAME).mapKeyToNode(1).equals(g2.cluster().localNode()));
// Store first value in cache.
assertNull(cache1.getAndPut(1, "v1"));
assertTrue(cache1.containsKey(1));
assertTrue(cache2.containsKey(1));
assertEquals("v1", nearPeek(cache1, 1));
assertEquals("v1", nearPeek(cache2, 1));
assertEquals("v1", dhtPeek(cache1, 1));
assertEquals("v1", dhtPeek(cache2, 1));
assertNull(near(cache1).peekEx(1));
assertNull(near(cache2).peekEx(1));
GridDhtCacheEntry e1 = (GridDhtCacheEntry) dht(cache1).entryEx(1);
// Store second value in cache.
assertNull(cache1.getAndPut(2, "v2"));
assertTrue(cache1.containsKey(2));
assertTrue(cache2.containsKey(2));
assertEquals("v2", nearPeek(cache1, 2));
assertEquals("v2", nearPeek(cache2, 2));
assertEquals("v2", dhtPeek(cache1, 2));
assertEquals("v2", dhtPeek(cache2, 2));
assertNull(near(cache1).peekEx(2));
assertNull(near(cache2).peekEx(2));
GridDhtCacheEntry c2e2 = (GridDhtCacheEntry) dht(cache2).entryEx(2);
// Nodes are backups of each other, so no readers should be added.
assertFalse(c2e2.readers().contains(n1.id()));
assertFalse(e1.readers().contains(n2.id()));
// Get key1 on node2 (value should come from local DHT cache, as it has a backup).
assertEquals("v1", cache2.get(1));
// Since DHT cache2 has the value, Near cache2 should not have it.
assertNull(near(cache2).peekEx(1));
e1 = (GridDhtCacheEntry) dht(cache1).entryEx(1);
// Since v1 was retrieved locally from cache2, cache1 should not know about it.
assertFalse(e1.readers().contains(n2.id()));
// Evict locally from cache2.
// It should not be successful since it's not allowed to evict entry on backup node.
cache2.localEvict(Collections.singleton(1));
assertNull(near(cache2).peekEx(1));
assertEquals("v1", dhtPeek(cache2, 1));
assertEquals("v1", cache1.getAndPut(1, "z1"));
e1 = (GridDhtCacheEntry) dht(cache1).entryEx(1);
// Node 1 should not have node2 in readers map.
assertFalse(e1.readers().contains(n2.id()));
assertNull(near(cache2).peekEx(1));
assertEquals("z1", dhtPeek(cache2, 1));
}
use of org.apache.ignite.internal.processors.cache.GridCacheContext in project ignite by apache.
the class WalRecoveryTxLogicalRecordsTest method getReuseListData.
/**
* @param ignite Node.
* @param cacheName Cache name.
* @return Cache reuse list data.
*/
private T2<long[], Integer> getReuseListData(Ignite ignite, String cacheName) {
GridCacheContext ctx = ((IgniteEx) ignite).context().cache().cache(cacheName).context();
ReuseListImpl reuseList = GridTestUtils.getFieldValue(ctx.offheap(), "reuseList");
PagesList.Stripe[] bucket = GridTestUtils.getFieldValue(reuseList, "bucket");
long[] ids = null;
if (bucket != null) {
ids = new long[bucket.length];
for (int i = 0; i < bucket.length; i++) ids[i] = bucket[i].tailId;
}
return new T2<>(ids, 0);
}
use of org.apache.ignite.internal.processors.cache.GridCacheContext in project ignite by apache.
the class GridQueryProcessor method querySqlFields.
/**
* Query SQL fields.
*
* @param cctx Cache context.
* @param qry Query.
* @param cliCtx Client context.
* @param keepBinary Keep binary flag.
* @param failOnMultipleStmts If {@code true} the method must throws exception when query contains
* more then one SQL statement.
* @return Cursor.
*/
@SuppressWarnings("unchecked")
public List<FieldsQueryCursor<List<?>>> querySqlFields(@Nullable final GridCacheContext<?, ?> cctx, final SqlFieldsQuery qry, final SqlClientContext cliCtx, final boolean keepBinary, final boolean failOnMultipleStmts) {
checkxEnabled();
validateSqlFieldsQuery(qry);
if (!ctx.state().publicApiActiveState(true)) {
throw new IgniteException("Can not perform the operation because the cluster is inactive. Note, that " + "the cluster is considered inactive by default if Ignite Persistent Store is used to let all the nodes " + "join the cluster. To activate the cluster call Ignite.active(true).");
}
if (!busyLock.enterBusy())
throw new IllegalStateException("Failed to execute query (grid is stopping).");
GridCacheContext oldCctx = curCache.get();
curCache.set(cctx);
final String schemaName = qry.getSchema() != null ? qry.getSchema() : (cctx != null ? idx.schema(cctx.name()) : QueryUtils.DFLT_SCHEMA);
try {
IgniteOutClosureX<List<FieldsQueryCursor<List<?>>>> clo = new IgniteOutClosureX<List<FieldsQueryCursor<List<?>>>>() {
@Override
public List<FieldsQueryCursor<List<?>>> applyx() throws IgniteCheckedException {
GridQueryCancel cancel = new GridQueryCancel();
List<FieldsQueryCursor<List<?>>> res = idx.querySqlFields(schemaName, qry, cliCtx, keepBinary, failOnMultipleStmts, cancel);
if (cctx != null)
sendQueryExecutedEvent(qry.getSql(), qry.getArgs(), cctx);
return res;
}
};
return executeQuery(GridCacheQueryType.SQL_FIELDS, qry.getSql(), cctx, clo, true);
} catch (IgniteCheckedException e) {
throw new CacheException(e);
} finally {
curCache.set(oldCctx);
busyLock.leaveBusy();
}
}
use of org.apache.ignite.internal.processors.cache.GridCacheContext in project ignite by apache.
the class GridQueryProcessor method start.
/**
* {@inheritDoc}
*/
@Override
public void start() throws IgniteCheckedException {
super.start();
if (idx != null) {
ctx.resource().injectGeneric(idx);
idx.start(ctx, busyLock);
}
ctx.io().addMessageListener(TOPIC_SCHEMA, ioLsnr);
// Schedule queries detail metrics eviction.
qryDetailMetricsEvictTask = ctx.timeout().schedule(new Runnable() {
@Override
public void run() {
for (GridCacheContext ctxs : ctx.cache().context().cacheContexts()) ctxs.queries().evictDetailMetrics();
}
}, QRY_DETAIL_METRICS_EVICTION_FREQ, QRY_DETAIL_METRICS_EVICTION_FREQ);
}
Aggregations