use of org.apache.ignite.internal.processors.cache.persistence.GridCacheDatabaseSharedManager in project ignite by apache.
the class TxCrossCachePartitionConsistencyTest method stopGrid.
/**
* @param skipCheckpointOnStop Skip checkpoint on stop.
* @param name Grid instance.
*/
protected void stopGrid(boolean skipCheckpointOnStop, String name) {
IgniteEx grid = grid(name);
if (skipCheckpointOnStop) {
GridCacheDatabaseSharedManager db = (GridCacheDatabaseSharedManager) grid.context().cache().context().database();
db.enableCheckpoints(false);
}
stopGrid(grid.name(), skipCheckpointOnStop);
}
use of org.apache.ignite.internal.processors.cache.persistence.GridCacheDatabaseSharedManager in project ignite by apache.
the class MaintenanceModeNodeSecurityTest method testNodeStartInMaintenanceMode.
/**
* Tests that node can be successfully restarted in maintenance mode with security enabled.
*/
@Test
public void testNodeStartInMaintenanceMode() throws Exception {
IgniteEx crd = startGridAllowAll(getTestIgniteInstanceName(0));
IgniteEx srv = startGridAllowAll(getTestIgniteInstanceName(1));
crd.cluster().baselineAutoAdjustEnabled(false);
crd.cluster().state(ACTIVE);
IgniteCache<Object, Object> cache = crd.getOrCreateCache(new CacheConfiguration<>(DEFAULT_CACHE_NAME).setAffinity(new RendezvousAffinityFunction(false, 32)).setBackups(1));
String srvPdsFolder = srv.context().pdsFolderResolver().resolveFolders().folderName();
File dbDir = U.resolveWorkDirectory(srv.configuration().getWorkDirectory(), "db", false);
File srvLfsDir = new File(dbDir, srvPdsFolder);
for (int k = 0; k < 1000; k++) cache.put(k, k);
GridCacheDatabaseSharedManager srvDbMgr = (GridCacheDatabaseSharedManager) crd.context().cache().context().database();
GridCacheDatabaseSharedManager crdDbMgr = (GridCacheDatabaseSharedManager) srv.context().cache().context().database();
srvDbMgr.forceCheckpoint("cp").futureFor(CheckpointState.FINISHED).get();
crdDbMgr.forceCheckpoint("cp").futureFor(CheckpointState.FINISHED).get();
crd.cluster().disableWal(cache.getName());
for (int k = 1000; k < 2000; k++) cache.put(k, k);
stopGrid(1);
File[] cpMarkers = new File(srvLfsDir, "cp").listFiles();
for (File cpMark : cpMarkers) {
if (cpMark.getName().contains("-END"))
cpMark.delete();
}
assertThrows(log, () -> startGridAllowAll(getTestIgniteInstanceName(1)), Exception.class, null);
assertTrue(startGridAllowAll(getTestIgniteInstanceName(1)).context().maintenanceRegistry().isMaintenanceMode());
}
use of org.apache.ignite.internal.processors.cache.persistence.GridCacheDatabaseSharedManager in project ignite by apache.
the class SqlViewExporterSpiTest method testPagesList.
/**
*/
@Test
public void testPagesList() throws Exception {
String cacheName = "cacheFL";
IgniteCache<Integer, byte[]> cache = ignite0.getOrCreateCache(new CacheConfiguration<Integer, byte[]>().setName(cacheName).setAffinity(new RendezvousAffinityFunction().setPartitions(1)));
GridCacheDatabaseSharedManager dbMgr = (GridCacheDatabaseSharedManager) ignite0.context().cache().context().database();
int pageSize = dbMgr.pageSize();
try {
dbMgr.enableCheckpoints(false).get();
int key = 0;
// Fill up different free-list buckets.
for (int j = 0; j < pageSize / 2; j++) cache.put(key++, new byte[j + 1]);
// Put some pages to one bucket to overflow pages cache.
for (int j = 0; j < 1000; j++) cache.put(key++, new byte[pageSize / 2]);
// Test filtering by 3 columns.
assertFalse(execute(ignite0, "SELECT * FROM SYS.CACHE_GROUP_PAGE_LISTS WHERE BUCKET_NUMBER = 0 " + "AND PARTITION_ID = 0 AND CACHE_GROUP_ID = ?", cacheId(cacheName)).isEmpty());
// Test filtering with invalid cache group id.
assertTrue(execute(ignite0, "SELECT * FROM SYS.CACHE_GROUP_PAGE_LISTS WHERE CACHE_GROUP_ID = ?", -1).isEmpty());
// Test filtering with invalid partition id.
assertTrue(execute(ignite0, "SELECT * FROM SYS.CACHE_GROUP_PAGE_LISTS WHERE PARTITION_ID = ?", -1).isEmpty());
// Test filtering with invalid bucket number.
assertTrue(execute(ignite0, "SELECT * FROM SYS.CACHE_GROUP_PAGE_LISTS WHERE BUCKET_NUMBER = -1").isEmpty());
assertFalse(execute(ignite0, "SELECT * FROM SYS.CACHE_GROUP_PAGE_LISTS WHERE BUCKET_SIZE > 0 " + "AND CACHE_GROUP_ID = ?", cacheId(cacheName)).isEmpty());
assertFalse(execute(ignite0, "SELECT * FROM SYS.CACHE_GROUP_PAGE_LISTS WHERE STRIPES_COUNT > 0 " + "AND CACHE_GROUP_ID = ?", cacheId(cacheName)).isEmpty());
assertFalse(execute(ignite0, "SELECT * FROM SYS.CACHE_GROUP_PAGE_LISTS WHERE CACHED_PAGES_COUNT > 0 " + "AND CACHE_GROUP_ID = ?", cacheId(cacheName)).isEmpty());
assertFalse(execute(ignite0, "SELECT * FROM SYS.DATA_REGION_PAGE_LISTS WHERE NAME LIKE 'in-memory%'").isEmpty());
assertEquals(0L, execute(ignite0, "SELECT COUNT(*) FROM SYS.DATA_REGION_PAGE_LISTS " + "WHERE NAME LIKE 'in-memory%' AND BUCKET_SIZE > 0").get(0).get(0));
} finally {
dbMgr.enableCheckpoints(true).get();
}
ignite0.cluster().active(false);
ignite0.cluster().active(true);
IgniteCache<Integer, Integer> cacheInMemory = ignite0.getOrCreateCache(new CacheConfiguration<Integer, Integer>().setName("cacheFLInMemory").setDataRegionName("in-memory"));
cacheInMemory.put(0, 0);
// After activation/deactivation new view for data region pages lists should be created, check that new view
// correctly reflects changes in free-lists.
assertFalse(execute(ignite0, "SELECT * FROM SYS.DATA_REGION_PAGE_LISTS WHERE NAME LIKE 'in-memory%' AND " + "BUCKET_SIZE > 0").isEmpty());
}
use of org.apache.ignite.internal.processors.cache.persistence.GridCacheDatabaseSharedManager in project ignite by apache.
the class IgniteWalRecoveryTest method testApplyDeltaRecords.
/**
* @throws Exception if failed.
*/
@Test
public void testApplyDeltaRecords() throws Exception {
IgniteEx ignite0 = (IgniteEx) startGrid("node0");
ignite0.cluster().active(true);
IgniteCache<Object, Object> cache0 = ignite0.cache(CACHE_NAME);
for (int i = 0; i < 1000; i++) cache0.put(i, new IndexedObject(i));
GridCacheSharedContext<Object, Object> sharedCtx = ignite0.context().cache().context();
GridCacheDatabaseSharedManager db = (GridCacheDatabaseSharedManager) sharedCtx.database();
db.waitForCheckpoint("test");
db.enableCheckpoints(false).get();
// Log something to know where to start.
WALPointer ptr = sharedCtx.wal().log(new MemoryRecoveryRecord(U.currentTimeMillis()));
info("Replay marker: " + ptr);
for (int i = 1000; i < 5000; i++) cache0.put(i, new IndexedObject(i));
info("Done puts...");
for (int i = 2_000; i < 3_000; i++) cache0.remove(i);
info("Done removes...");
for (int i = 5000; i < 6000; i++) cache0.put(i, new IndexedObject(i));
info("Done puts...");
Map<FullPageId, byte[]> rolledPages = new HashMap<>();
int pageSize = sharedCtx.database().pageSize();
ByteBuffer buf = ByteBuffer.allocateDirect(pageSize);
buf.order(ByteOrder.nativeOrder());
// Now check that deltas can be correctly applied.
try (WALIterator it = sharedCtx.wal().replay(ptr)) {
while (it.hasNext()) {
IgniteBiTuple<WALPointer, WALRecord> tup = it.next();
WALRecord rec = tup.get2();
if (rec instanceof PageSnapshot) {
PageSnapshot page = (PageSnapshot) rec;
CacheGroupContext gctx = sharedCtx.cache().cacheGroup(page.groupId());
int realPageSize = gctx == null ? pageSize : gctx.dataRegion().pageMemory().realPageSize(page.groupId());
byte[] pageData = page.pageData();
if (pageData.length < realPageSize) {
buf.clear();
buf.put(pageData);
buf.flip();
sharedCtx.kernalContext().compress().decompressPage(buf, realPageSize);
pageData = new byte[realPageSize];
buf.position(0);
buf.get(pageData);
}
rolledPages.put(page.fullPageId(), pageData);
} else if (rec instanceof PageDeltaRecord) {
PageDeltaRecord delta = (PageDeltaRecord) rec;
FullPageId fullId = new FullPageId(delta.pageId(), delta.groupId());
byte[] pageData = rolledPages.get(fullId);
if (pageData == null) {
pageData = new byte[pageSize];
rolledPages.put(fullId, pageData);
}
assertNotNull("Missing page snapshot [page=" + fullId + ", delta=" + delta + ']', pageData);
buf.clear();
buf.put(pageData);
buf.flip();
delta.applyDelta(sharedCtx.database().dataRegion(null).pageMemory(), GridUnsafe.bufferAddress(buf));
buf.get(pageData);
}
}
}
info("Done apply...");
PageMemoryEx pageMem = (PageMemoryEx) db.dataRegion(null).pageMemory();
ByteBuffer bufWal = ByteBuffer.allocateDirect(pageSize);
for (Map.Entry<FullPageId, byte[]> entry : rolledPages.entrySet()) {
FullPageId fullId = entry.getKey();
ignite0.context().cache().context().database().checkpointReadLock();
try {
long page = pageMem.acquirePage(fullId.groupId(), fullId.pageId(), IoStatisticsHolderNoOp.INSTANCE, true);
try {
long bufPtr = pageMem.writeLock(fullId.groupId(), fullId.pageId(), page, true);
try {
byte[] data = entry.getValue();
if (fullId.pageId() == TrackingPageIO.VERSIONS.latest().trackingPageFor(fullId.pageId(), db.pageSize()))
// Skip tracking pages.
continue;
// compacted pages in case of compaction is used.
if (walPageCompression != null && PageIO.getPageIO(bufPtr) instanceof CompactablePageIO) {
CompactablePageIO pageIO = PageIO.getPageIO(bufPtr);
buf.clear();
bufWal.clear();
int realPageSize = data.length;
pageIO.compactPage(GridUnsafe.wrapPointer(bufPtr, realPageSize), buf, realPageSize);
pageIO.compactPage(ByteBuffer.wrap(data).order(ByteOrder.nativeOrder()), bufWal, realPageSize);
bufPtr = GridUnsafe.bufferAddress(buf);
data = new byte[bufWal.limit()];
bufWal.rewind();
bufWal.get(data);
}
for (int i = 0; i < data.length; i++) assertEquals("page=" + fullId + ", pos=" + i, PageUtils.getByte(bufPtr, i), data[i]);
} finally {
pageMem.writeUnlock(fullId.groupId(), fullId.pageId(), page, null, false, true);
}
} finally {
pageMem.releasePage(fullId.groupId(), fullId.pageId(), page);
}
} finally {
ignite0.context().cache().context().database().checkpointReadUnlock();
}
}
ignite0.close();
}
use of org.apache.ignite.internal.processors.cache.persistence.GridCacheDatabaseSharedManager in project ignite by apache.
the class LongDestroyDurableBackgroundTaskTest method prepareAndPopulateCluster.
/**
* Starts cluster and populates with data.
*
* @param nodeCnt Nodes count.
* @param multicolumn Is index multicolumn.
* @return Ignite instance.
* @throws Exception If failed.
*/
private IgniteEx prepareAndPopulateCluster(int nodeCnt, boolean multicolumn, boolean createLsnr) throws Exception {
IgniteEx ignite = startGrids(nodeCnt);
if (createLsnr) {
GridCacheSharedContext ctx = ignite.context().cache().context();
durableBackgroundTaskTestLsnr = new DurableBackgroundTaskTestListener(ctx.database().metaStorage());
((GridCacheDatabaseSharedManager) ctx.cache().context().database()).addCheckpointListener(durableBackgroundTaskTestLsnr);
}
ignite.cluster().state(ACTIVE);
ignite.cluster().baselineAutoAdjustEnabled(false);
IgniteCache<Integer, Integer> cache = ignite.getOrCreateCache(DEFAULT_CACHE_NAME);
query(cache, "create table t (id integer primary key, p integer, f integer) with \"BACKUPS=1\"");
createIndex(cache, multicolumn);
batchQuery(ignite, "insert into t (id, p, f) values (?, ?, ?)", batchInsertArgs(5_000, 3));
forceCheckpoint();
checkSelectAndPlan(cache, true);
final IgniteCache<Integer, Integer> finalCache = cache;
new Thread(() -> finalCache.query(new SqlFieldsQuery("drop index " + IDX_NAME)).getAll()).start();
// Waiting for some modified pages
doSleep(500);
// Now checkpoint will happen during index deletion before it completes.
forceCheckpoint();
return ignite;
}
Aggregations