use of org.apache.ignite.internal.processors.cache.persistence.GridCacheDatabaseSharedManager in project ignite by apache.
the class WalRecoveryTxLogicalRecordsTest method testWalRecoveryRemoves.
/**
* @throws Exception if failed.
*/
@Test
public void testWalRecoveryRemoves() throws Exception {
Ignite ignite = startGrid();
ignite.cluster().active(true);
try {
GridCacheDatabaseSharedManager dbMgr = (GridCacheDatabaseSharedManager) ((IgniteEx) ignite).context().cache().context().database();
IgniteCache<Integer, IndexedValue> cache = ignite.cache(CACHE_NAME);
int txCnt = 100;
int keysPerTx = 10;
for (int i = 0; i < txCnt; i++) {
try (Transaction tx = ignite.transactions().txStart()) {
for (int j = 0; j < keysPerTx; j++) {
int k = i * keysPerTx + j;
cache.put(k, new IndexedValue(k));
}
tx.commit();
}
}
for (int i = 0; i < txCnt; i++) {
for (int j = 0; j < keysPerTx; j++) {
int k = i * keysPerTx + j;
assertEquals(k, cache.get(k).value());
}
}
dbMgr.waitForCheckpoint("test");
dbMgr.enableCheckpoints(false).get();
for (int i = 0; i < txCnt / 2; i++) {
try (Transaction tx = ignite.transactions().txStart()) {
for (int j = 0; j < keysPerTx; j++) {
int k = i * keysPerTx + j;
cache.remove(k);
}
tx.commit();
}
}
stopGrid();
ignite = startGrid();
ignite.cluster().active(true);
cache = ignite.cache(CACHE_NAME);
for (int i = 0; i < txCnt; i++) {
for (int j = 0; j < keysPerTx; j++) {
int k = i * keysPerTx + j;
QueryCursor<List<?>> cur = cache.query(new SqlFieldsQuery("select sVal from IndexedValue where iVal=?").setArgs(k));
List<List<?>> vals = cur.getAll();
if (i < txCnt / 2) {
assertNull(cache.get(k));
assertTrue(F.isEmpty(vals));
} else {
assertEquals(k, cache.get(k).value());
assertEquals(1, vals.size());
assertEquals("string-" + k, vals.get(0).get(0));
}
}
}
} finally {
stopAllGrids();
}
}
use of org.apache.ignite.internal.processors.cache.persistence.GridCacheDatabaseSharedManager in project ignite by apache.
the class WalRecoveryTxLogicalRecordsTest method testRecoveryRandomPutRemove.
/**
* @throws Exception If failed.
*/
@Test
public void testRecoveryRandomPutRemove() throws Exception {
try {
pageSize = 1024;
extraCcfg = new CacheConfiguration(CACHE2_NAME);
extraCcfg.setAffinity(new RendezvousAffinityFunction(false, PARTS));
Ignite ignite = startGrid(0);
ignite.cluster().active(true);
GridCacheDatabaseSharedManager dbMgr = (GridCacheDatabaseSharedManager) ((IgniteEx) ignite).context().cache().context().database();
dbMgr.enableCheckpoints(false).get();
IgniteCache<Integer, IndexedValue> cache1 = ignite.cache(CACHE_NAME);
IgniteCache<Object, Object> cache2 = ignite.cache(CACHE2_NAME);
final int KEYS1 = 100;
for (int i = 0; i < KEYS1; i++) cache1.put(i, new IndexedValue(i));
for (int i = 0; i < KEYS1; i++) {
if (i % 2 == 0)
cache1.remove(i);
}
ThreadLocalRandom rnd = ThreadLocalRandom.current();
for (int i = 0; i < KEYS1; i++) {
cache2.put(i, new byte[rnd.nextInt(512)]);
if (rnd.nextBoolean())
cache2.put(i, new byte[rnd.nextInt(512)]);
if (rnd.nextBoolean())
cache2.remove(i);
}
ignite.close();
ignite = startGrid(0);
ignite.cluster().active(true);
ignite.cache(CACHE_NAME).put(1, new IndexedValue(0));
} finally {
stopAllGrids();
}
}
use of org.apache.ignite.internal.processors.cache.persistence.GridCacheDatabaseSharedManager in project ignite by apache.
the class FreeListCachingTest method testFreeListCaching.
/**
*/
@Test
public void testFreeListCaching() throws Exception {
IgniteEx ignite = startGrid(0);
ignite.cluster().active(true);
int partCnt = 10;
GridCacheProcessor cacheProc = ignite.context().cache();
GridCacheDatabaseSharedManager dbMgr = (GridCacheDatabaseSharedManager) cacheProc.context().database();
dbMgr.enableCheckpoints(false).get();
IgniteCache<Object, Object> cache = ignite.createCache(new CacheConfiguration<>(DEFAULT_CACHE_NAME).setAffinity(new RendezvousAffinityFunction().setPartitions(partCnt)).setAtomicityMode(CacheAtomicityMode.ATOMIC));
GridCacheOffheapManager offheap = (GridCacheOffheapManager) cacheProc.cache(DEFAULT_CACHE_NAME).context().group().offheap();
for (int i = 0; i < 5_000; i++) {
for (int p = 0; p < partCnt; p++) {
Integer key = i * partCnt + p;
cache.put(key, new byte[i + 1]);
cache.remove(key);
}
}
offheap.cacheDataStores().forEach(cacheData -> {
PagesList list = (PagesList) cacheData.rowStore().freeList();
AtomicLongArray bucketsSize = list.bucketsSize;
// All buckets except reuse bucket must be empty after puts and removes of the same key.
for (int i = 0; i < bucketsSize.length(); i++) {
if (list.isReuseBucket(i))
assertTrue(bucketsSize.get(i) > 0);
else
assertEquals(0, bucketsSize.get(i));
}
});
for (int i = 0; i < 100; i++) {
for (int p = 0; p < partCnt; p++) cache.put(i * partCnt + p, new byte[(i + p) * 10]);
}
for (int i = 0; i < 50; i += 2) {
for (int p = 0; p < partCnt; p++) cache.remove(i * partCnt + p);
}
Map<Integer, List<Long>> partsBucketsSize = new HashMap<>();
offheap.cacheDataStores().forEach(cacheData -> {
PagesList list = (PagesList) cacheData.rowStore().freeList();
AtomicLongArray bucketsSize = list.bucketsSize;
List<Long> bucketsSizeList = new ArrayList<>(bucketsSize.length());
partsBucketsSize.put(cacheData.partId(), bucketsSizeList);
long notReuseSize = 0;
for (int i = 0; i < bucketsSize.length(); i++) {
bucketsSizeList.add(bucketsSize.get(i));
PagesList.Stripe[] bucket = list.getBucket(i);
// by indexes bypassing caching.
if (!list.isReuseBucket(i)) {
notReuseSize += bucketsSize.get(i);
assertNull("Expected null bucket [partId=" + cacheData.partId() + ", i=" + i + ", bucket=" + bucket + ']', bucket);
PagesList.PagesCache pagesCache = list.getBucketCache(i, false);
assertEquals("Wrong pages cache size [partId=" + cacheData.partId() + ", i=" + i + ']', bucketsSize.get(i), pagesCache == null ? 0 : pagesCache.size());
}
}
assertTrue(notReuseSize > 0);
});
dbMgr.enableCheckpoints(true).get();
forceCheckpoint(ignite);
offheap.cacheDataStores().forEach(cacheData -> {
PagesList list = (PagesList) cacheData.rowStore().freeList();
AtomicLongArray bucketsSize = list.bucketsSize;
for (int i = 0; i < bucketsSize.length(); i++) {
long bucketSize = bucketsSize.get(i);
PagesList.Stripe[] bucket = list.getBucket(i);
// After checkpoint all buckets must flush onheap cache to page memory.
if (bucketSize > 0) {
assertNotNull("Expected not null bucket [partId=" + cacheData.partId() + ", i=" + i + ']', bucket);
}
PagesList.PagesCache pagesCache = list.getBucketCache(i, false);
assertEquals("Wrong pages cache size [partId=" + cacheData.partId() + ", i=" + i + ']', 0, pagesCache == null ? 0 : pagesCache.size());
assertEquals("Bucket size changed after checkpoint [partId=" + cacheData.partId() + ", i=" + i + ']', (long) partsBucketsSize.get(cacheData.partId()).get(i), bucketSize);
}
});
dbMgr.enableCheckpoints(false).get();
for (int i = 0; i < 50; i++) {
for (int p = 0; p < partCnt; p++) cache.put(i * partCnt + p, new byte[(i + p) * 10]);
}
offheap.cacheDataStores().forEach(cacheData -> {
PagesList list = (PagesList) cacheData.rowStore().freeList();
int totalCacheSize = 0;
for (int i = 0; i < list.bucketsSize.length(); i++) {
PagesList.PagesCache pagesCache = list.getBucketCache(i, false);
totalCacheSize += pagesCache == null ? 0 : pagesCache.size();
}
assertTrue("Some buckets should be cached [partId=" + cacheData.partId() + ']', totalCacheSize > 0);
});
}
use of org.apache.ignite.internal.processors.cache.persistence.GridCacheDatabaseSharedManager in project ignite by apache.
the class FreeListCachingTest method testPageListCacheLimit.
/**
* @throws Exception If test failed.
*/
@Test
public void testPageListCacheLimit() throws Exception {
IgniteEx ignite = startGrid(0);
ignite.cluster().active(true);
ignite.getOrCreateCache("cache1");
ignite.getOrCreateCache("cache2");
GridCacheContext<?, ?> cctx1 = ignite.context().cache().cache("cache1").context();
GridCacheContext<?, ?> cctx2 = ignite.context().cache().cache("cache2").context();
GridCacheOffheapManager offheap1 = (GridCacheOffheapManager) cctx1.offheap();
GridCacheOffheapManager offheap2 = (GridCacheOffheapManager) cctx2.offheap();
GridCacheDatabaseSharedManager db = (GridCacheDatabaseSharedManager) ignite.context().cache().context().database();
assertEquals(db.pageListCacheLimitHolder(cctx1.dataRegion()), db.pageListCacheLimitHolder(cctx2.dataRegion()));
long limit = db.pageListCacheLimitHolder(cctx1.dataRegion()).get();
try (IgniteDataStreamer<Object, Object> streamer1 = ignite.dataStreamer("cache1");
IgniteDataStreamer<Object, Object> streamer2 = ignite.dataStreamer("cache2")) {
// Fill caches to trigger "too many dirty pages" checkpoint.
for (int i = 0; i < 50_000; i++) {
streamer1.addData(i, new byte[i % 2048]);
streamer2.addData(i, new byte[i % 2048]);
// Calculates page list caches count and validate this value periodically.
if (i % 5_000 == 0) {
streamer1.flush();
streamer2.flush();
AtomicInteger pageCachesCnt = new AtomicInteger();
for (GridCacheOffheapManager offheap : F.asList(offheap1, offheap2)) {
offheap.cacheDataStores().forEach(cacheData -> {
if (cacheData.rowStore() == null)
return;
PagesList list = (PagesList) cacheData.rowStore().freeList();
for (int b = 0; b < list.bucketsSize.length(); b++) {
PagesList.PagesCache pagesCache = list.getBucketCache(b, false);
if (pagesCache != null && pagesCache.size() > 0)
pageCachesCnt.incrementAndGet();
}
});
}
// There can be a race and actual page list caches count can exceed the limit in very rare cases.
assertTrue("Page list caches count is more than expected [count: " + pageCachesCnt.get() + ", limit=" + limit + ']', pageCachesCnt.get() <= limit + ignite.configuration().getDataStreamerThreadPoolSize() - 1);
}
}
}
}
use of org.apache.ignite.internal.processors.cache.persistence.GridCacheDatabaseSharedManager in project ignite by apache.
the class DefragmentationMXBeanTest method testDefragmentationStatus.
/**
* Test that JMX bean provides correct defragmentation status.
* Description:
* 1. Start one node,
* 2. Put a load of data on it.
* 3. Schedule defragmentation.
* 4. Completely stop defragmentation when 128 partitions processed.
* 5. Check defragmentation status.
* 6. Continue defragmentation and wait for it to end.
* 7. Check defragmentation finished.
* @throws Exception If failed.
*/
@Test
public void testDefragmentationStatus() throws Exception {
IgniteEx ig = startGrid(0);
ig.cluster().state(ClusterState.ACTIVE);
ig.getOrCreateCache(DEFAULT_CACHE_NAME + "1");
IgniteCache<Object, Object> cache = ig.getOrCreateCache(DEFAULT_CACHE_NAME + "2");
ig.getOrCreateCache(DEFAULT_CACHE_NAME + "3");
for (int i = 0; i < 1024; i++) cache.put(i, i);
forceCheckpoint(ig);
DefragmentationMXBean mxBean = defragmentationMXBean(ig.name());
mxBean.schedule("");
stopGrid(0);
blockCdl = new CountDownLatch(128);
waitCdl = new CountDownLatch(1);
UnaryOperator<IgniteConfiguration> cfgOp = cfg -> {
DataStorageConfiguration dsCfg = cfg.getDataStorageConfiguration();
FileIOFactory delegate = dsCfg.getFileIOFactory();
dsCfg.setFileIOFactory((file, modes) -> {
if (file.getName().contains("dfrg")) {
if (blockCdl.getCount() == 0) {
try {
waitCdl.await();
} catch (InterruptedException ignore) {
// No-op.
}
} else
blockCdl.countDown();
}
return delegate.create(file, modes);
});
return cfg;
};
IgniteInternalFuture<?> fut = GridTestUtils.runAsync(() -> {
try {
startGrid(0, cfgOp);
} catch (Exception e) {
// No-op.
throw new RuntimeException(e);
}
});
blockCdl.await();
mxBean = defragmentationMXBean(ig.name());
final IgniteKernal gridx = IgnitionEx.gridx(ig.name());
final IgniteDefragmentation defragmentation = gridx.context().defragmentation();
final IgniteDefragmentation.DefragmentationStatus status1 = defragmentation.status();
assertEquals(status1.getStartTs(), mxBean.startTime());
assertTrue(mxBean.inProgress());
final int totalPartitions = status1.getTotalPartitions();
assertEquals(totalPartitions, mxBean.totalPartitions());
waitCdl.countDown();
fut.get();
((GridCacheDatabaseSharedManager) grid(0).context().cache().context().database()).defragmentationManager().completionFuture().get();
assertFalse(mxBean.inProgress());
assertEquals(totalPartitions, mxBean.processedPartitions());
}
Aggregations