use of org.apache.ignite.internal.processors.cache.IgniteInternalCache in project ignite by apache.
the class IgniteCacheClientNodePartitionsExchangeTest method waitForTopologyUpdate.
/**
* @param expNodes Expected number of nodes.
* @param topVer Expected topology version.
* @throws Exception If failed.
*/
private void waitForTopologyUpdate(int expNodes, final AffinityTopologyVersion topVer) throws Exception {
List<Ignite> nodes = G.allGrids();
assertEquals(expNodes, nodes.size());
for (Ignite ignite : nodes) {
final IgniteKernal kernal = (IgniteKernal) ignite;
GridTestUtils.waitForCondition(new GridAbsPredicate() {
@Override
public boolean apply() {
return topVer.equals(kernal.context().cache().context().exchange().readyAffinityVersion());
}
}, 10_000);
assertEquals("Unexpected affinity version for " + ignite.name(), topVer, kernal.context().cache().context().exchange().readyAffinityVersion());
}
Iterator<Ignite> it = nodes.iterator();
Ignite ignite0 = it.next();
Affinity<Integer> aff0 = ignite0.affinity(DEFAULT_CACHE_NAME);
while (it.hasNext()) {
Ignite ignite = it.next();
Affinity<Integer> aff = ignite.affinity(DEFAULT_CACHE_NAME);
assertEquals(aff0.partitions(), aff.partitions());
for (int part = 0; part < aff.partitions(); part++) assertEquals(aff0.mapPartitionToPrimaryAndBackups(part), aff.mapPartitionToPrimaryAndBackups(part));
}
for (Ignite ignite : nodes) {
final IgniteKernal kernal = (IgniteKernal) ignite;
for (IgniteInternalCache cache : kernal.context().cache().caches()) {
GridDhtPartitionTopology top = cache.context().topology();
waitForReadyTopology(top, topVer);
assertEquals("Unexpected topology version [node=" + ignite.name() + ", cache=" + cache.name() + ']', topVer, top.readyTopologyVersion());
}
}
awaitPartitionMapExchange();
}
use of org.apache.ignite.internal.processors.cache.IgniteInternalCache in project ignite by apache.
the class PartitionEvictionOrderTest method testSyncCachesEvictedAtFirst.
/**
* Tests that {@link CacheRebalanceMode#SYNC} caches are evicted at first.
*/
@Test
@WithSystemProperty(key = IgniteSystemProperties.IGNITE_EVICTION_PERMITS, value = "1")
@WithSystemProperty(key = IGNITE_PDS_WAL_REBALANCE_THRESHOLD, value = "500_000")
public void testSyncCachesEvictedAtFirst() throws Exception {
IgniteEx node0 = startGrid(0);
node0.cluster().state(ACTIVE);
IgniteEx node1 = startGrid(1);
node0.cluster().setBaselineTopology(node1.cluster().topologyVersion());
GridCacheAdapter<Object, Object> utilCache0 = grid(0).context().cache().internalCache(CU.UTILITY_CACHE_NAME);
IgniteCache<Object, Object> cache = node0.getOrCreateCache(DEFAULT_CACHE_NAME);
for (int i = 0; i < 1000; i++) {
utilCache0.put(i, i);
cache.put(i, i);
}
awaitPartitionMapExchange();
stopGrid(0);
GridCacheAdapter<Object, Object> utilCache1 = grid(1).context().cache().internalCache(CU.UTILITY_CACHE_NAME);
IgniteInternalCache<Object, Object> cache2 = grid(1).context().cache().cache(DEFAULT_CACHE_NAME);
for (int i = 0; i < 2000; i++) {
try {
cache2.put(i, i + 1);
utilCache1.put(i, i + 1);
} catch (IgniteCheckedException e) {
e.printStackTrace();
}
}
List<T2<Integer, Integer>> evictionOrder = Collections.synchronizedList(new ArrayList<>());
TestDependencyResolver rslvr = new TestDependencyResolver(new DependencyResolver() {
@Override
public <T> T resolve(T instance) {
if (instance instanceof GridDhtPartitionTopologyImpl) {
GridDhtPartitionTopologyImpl top = (GridDhtPartitionTopologyImpl) instance;
top.partitionFactory((ctx, grp, id, recovery) -> new GridDhtLocalPartition(ctx, grp, id, recovery) {
@Override
public long clearAll(EvictionContext evictionCtx) throws NodeStoppingException {
evictionOrder.add(new T2<>(grp.groupId(), id));
return super.clearAll(evictionCtx);
}
});
}
return instance;
}
});
startGrid(0, rslvr);
awaitPartitionMapExchange(true, true, null);
assertEquals(utilCache0.affinity().partitions() + grid(0).cachex(DEFAULT_CACHE_NAME).affinity().partitions(), evictionOrder.size());
for (int i = 0; i < utilCache0.affinity().partitions(); i++) assertEquals(CU.UTILITY_CACHE_GROUP_ID, evictionOrder.get(i).get1().intValue());
}
use of org.apache.ignite.internal.processors.cache.IgniteInternalCache in project ignite by apache.
the class WaitMapExchangeFinishCallable method call.
/**
* {@inheritDoc}
*/
@Override
public Void call() throws Exception {
Collection<IgniteInternalCache<?, ?>> cachesx = ((IgniteKernal) ignite).cachesx(null);
for (IgniteInternalCache<?, ?> cache : cachesx) {
try {
GridDhtPartitionTopology top = cache.context().isNear() ? cache.context().near().dht().topology() : cache.context().dht().topology();
BenchmarkUtils.println("Validating cache: " + cache.name());
for (; ; ) {
boolean success = true;
if (top.readyTopologyVersion().topologyVersion() == ignite.cluster().topologyVersion()) {
for (Map.Entry<UUID, GridDhtPartitionMap> e : top.partitionMap(true).entrySet()) {
for (Map.Entry<Integer, GridDhtPartitionState> p : e.getValue().entrySet()) {
if (p.getValue() != GridDhtPartitionState.OWNING) {
BenchmarkUtils.println("Not owning partition [part=" + p.getKey() + ", state=" + p.getValue() + ']');
success = false;
break;
}
}
if (!success)
break;
}
} else {
BenchmarkUtils.println("Topology version is different [cache=" + top.readyTopologyVersion() + ", cluster=" + ignite.cluster().topologyVersion() + ']');
success = false;
}
if (!success)
Thread.sleep(1000);
else {
BenchmarkUtils.println("Cache state is fine: " + cache.name());
break;
}
}
} catch (RuntimeException e1) {
BenchmarkUtils.println("Ignored exception: " + e1);
}
}
return null;
}
use of org.apache.ignite.internal.processors.cache.IgniteInternalCache in project ignite by apache.
the class CacheDataPageScanQueryTest method testDataPageScanWithRestart.
/**
* @throws Exception If failed.
*/
@SuppressWarnings("ConstantConditions")
@Test
@Ignore("https://issues.apache.org/jira/browse/IGNITE-11998")
public void testDataPageScanWithRestart() throws Exception {
IgniteEx ignite = startGrid(0);
ignite.cluster().active(true);
IgniteInternalCache<Long, String> cache = ignite.cachex(CACHE);
CacheGroupMetricsImpl metrics = cache.context().group().metrics();
DataRegionMetricsImpl rmx = cache.context().dataRegion().metrics();
long maxKey = 10_000;
Map<Long, String> map = new ConcurrentHashMap<>();
int threads = 16;
AtomicInteger threadShift = new AtomicInteger();
multithreaded((Callable<Void>) () -> {
ThreadLocalRandom rnd = ThreadLocalRandom.current();
int shift = threadShift.getAndIncrement();
for (int i = shift; i < maxKey; i += threads) {
Long k = (long) i;
// Bigger than single page.
String v = GridTestUtils.randomString(rnd, 6 * 1024);
cache.put(k, v);
map.put(k, v);
}
return null;
}, threads);
assertEquals(map.size(), cache.size());
info("Page mem : " + rmx.getPhysicalMemorySize());
info("Alloc size: " + metrics.getTotalAllocatedSize());
info("Store size: " + metrics.getStorageSize());
HashMap<Long, String> map2 = new HashMap<>(map);
IgniteCache<Long, String> c = ignite.cache(CACHE);
for (Cache.Entry<Long, String> e : c.query(new ScanQuery<Long, String>()).getAll()) assertEquals(e.getValue(), map.remove(e.getKey()));
assertTrue(map.isEmpty());
assertTrue(CacheDataTree.isLastFindWithDataPageScan());
stopAllGrids(true);
ignite = startGrid(0);
ignite.cluster().active(true);
c = ignite.cache(CACHE);
for (Cache.Entry<Long, String> e : c.query(new ScanQuery<Long, String>()).getAll()) assertEquals(e.getValue(), map2.remove(e.getKey()));
assertTrue(map2.isEmpty());
assertTrue(CacheDataTree.isLastFindWithDataPageScan());
}
use of org.apache.ignite.internal.processors.cache.IgniteInternalCache in project ignite by apache.
the class H2DynamicTableSelfTest method testQueryLocalWithRecreate.
/**
* @throws Exception If test failed.
*/
@Test
public void testQueryLocalWithRecreate() throws Exception {
execute("CREATE TABLE A(id int primary key, name varchar, surname varchar) WITH \"cache_name=cache," + "template=replicated\"");
// In order for local queries to work, let's use non client node.
IgniteInternalCache cache = grid(0).cachex("cache");
assertNotNull(cache);
executeLocal(cache.context(), "INSERT INTO A(id, name, surname) values (1, 'X', 'Y')");
assertEqualsCollections(Collections.singletonList(Arrays.asList(1, "X", "Y")), executeLocal(cache.context(), "SELECT id, name, surname FROM A"));
execute("DROP TABLE A");
execute("CREATE TABLE A(id int primary key, name varchar, surname varchar) WITH \"cache_name=cache\"");
cache = grid(0).cachex("cache");
assertNotNull(cache);
try {
executeLocal(cache.context(), "INSERT INTO A(id, name, surname) values (1, 'X', 'Y')");
} finally {
execute("DROP TABLE A");
}
}
Aggregations