use of org.apache.ignite.internal.processors.cache.persistence.DataRegion in project ignite by apache.
the class IgniteKernal method dataStorageReport.
/**
*/
private static String dataStorageReport(IgniteCacheDatabaseSharedManager db, DecimalFormat dblFmt, boolean includeMemoryStatistics) {
// Off-heap params.
Collection<DataRegion> regions = db.dataRegions();
SB dataRegionsInfo = new SB();
long loadedPages = 0;
long offHeapUsedSummary = 0;
long offHeapMaxSummary = 0;
long offHeapCommSummary = 0;
long pdsUsedSummary = 0;
boolean persistenceEnabled = false;
if (!F.isEmpty(regions)) {
for (DataRegion region : regions) {
DataRegionConfiguration regCfg = region.config();
long pagesCnt = region.pageMemory().loadedPages();
long offHeapUsed = region.pageMemory().systemPageSize() * pagesCnt;
long offHeapInit = regCfg.getInitialSize();
long offHeapMax = regCfg.getMaxSize();
long offHeapComm = region.metrics().getOffHeapSize();
long offHeapUsedInMBytes = offHeapUsed / MEGABYTE;
long offHeapMaxInMBytes = offHeapMax / MEGABYTE;
long offHeapCommInMBytes = offHeapComm / MEGABYTE;
long offHeapInitInMBytes = offHeapInit / MEGABYTE;
double freeOffHeapPct = offHeapMax > 0 ? ((double) ((offHeapMax - offHeapUsed) * 100)) / offHeapMax : -1;
offHeapUsedSummary += offHeapUsed;
offHeapMaxSummary += offHeapMax;
offHeapCommSummary += offHeapComm;
loadedPages += pagesCnt;
String type = "user";
try {
if (region == db.dataRegion(null))
type = "default";
else if (INTERNAL_DATA_REGION_NAMES.contains(regCfg.getName()))
type = "internal";
} catch (IgniteCheckedException ice) {
// Should never happen
ice.printStackTrace();
}
dataRegionsInfo.a(" ^-- ").a(regCfg.getName()).a(" region [type=").a(type).a(", persistence=").a(regCfg.isPersistenceEnabled()).a(", lazyAlloc=").a(regCfg.isLazyMemoryAllocation()).a(',').nl().a(" ... ").a("initCfg=").a(dblFmt.format(offHeapInitInMBytes)).a("MB, maxCfg=").a(dblFmt.format(offHeapMaxInMBytes)).a("MB, usedRam=").a(dblFmt.format(offHeapUsedInMBytes)).a("MB, freeRam=").a(dblFmt.format(freeOffHeapPct)).a("%, allocRam=").a(dblFmt.format(offHeapCommInMBytes)).a("MB");
if (regCfg.isPersistenceEnabled()) {
long pdsUsed = region.metrics().getTotalAllocatedSize();
long pdsUsedInMBytes = pdsUsed / MEGABYTE;
pdsUsedSummary += pdsUsed;
dataRegionsInfo.a(", allocTotal=").a(dblFmt.format(pdsUsedInMBytes)).a("MB");
persistenceEnabled = true;
}
dataRegionsInfo.a(']').nl();
}
}
SB info = new SB();
if (includeMemoryStatistics) {
long offHeapUsedInMBytes = offHeapUsedSummary / MEGABYTE;
long offHeapCommInMBytes = offHeapCommSummary / MEGABYTE;
double freeOffHeapPct = offHeapMaxSummary > 0 ? ((double) ((offHeapMaxSummary - offHeapUsedSummary) * 100)) / offHeapMaxSummary : -1;
info.a(" ^-- Off-heap memory [used=").a(dblFmt.format(offHeapUsedInMBytes)).a("MB, free=").a(dblFmt.format(freeOffHeapPct)).a("%, allocated=").a(dblFmt.format(offHeapCommInMBytes)).a("MB]").nl().a(" ^-- Page memory [pages=").a(loadedPages).a("]").nl();
}
info.a(dataRegionsInfo);
if (persistenceEnabled) {
long pdsUsedMBytes = pdsUsedSummary / MEGABYTE;
info.a(" ^-- Ignite persistence [used=").a(dblFmt.format(pdsUsedMBytes)).a("MB]").nl();
}
return info.toString();
}
use of org.apache.ignite.internal.processors.cache.persistence.DataRegion in project ignite by apache.
the class TxLog method init.
/**
* @param ctx Kernal context.
* @throws IgniteCheckedException If failed.
*/
private void init(GridKernalContext ctx) throws IgniteCheckedException {
String txLogName = TX_LOG_CACHE_NAME + "##Tree";
DataRegion txLogDataRegion = mgr.dataRegion(TX_LOG_CACHE_NAME);
GridCacheSharedContext<?, ?> cacheCtx = ctx.cache().context();
if (CU.isPersistenceEnabled(ctx.config())) {
String txLogReuseListName = TX_LOG_CACHE_NAME + "##ReuseList";
mgr.checkpointReadLock();
try {
IgniteWriteAheadLogManager wal = cacheCtx.wal();
PageMemoryEx pageMemory = (PageMemoryEx) txLogDataRegion.pageMemory();
long metaId = PageMemory.META_PAGE_ID;
long metaPage = pageMemory.acquirePage(TX_LOG_CACHE_ID, metaId);
long treeRoot, reuseListRoot;
boolean isNew = false;
try {
long pageAddr = pageMemory.writeLock(TX_LOG_CACHE_ID, metaId, metaPage);
try {
if (PageIO.getType(pageAddr) != PageIO.T_META) {
// Initialize new page.
PageMetaIO io = PageMetaIOV2.VERSIONS.latest();
PageMetrics metrics = txLogDataRegion.metrics().pageMetrics();
io.initNewPage(pageAddr, metaId, pageMemory.pageSize(), metrics);
treeRoot = pageMemory.allocatePage(TX_LOG_CACHE_ID, INDEX_PARTITION, FLAG_IDX);
reuseListRoot = pageMemory.allocatePage(TX_LOG_CACHE_ID, INDEX_PARTITION, FLAG_IDX);
assert PageIdUtils.flag(treeRoot) == FLAG_IDX;
assert PageIdUtils.flag(reuseListRoot) == FLAG_IDX;
io.setTreeRoot(pageAddr, treeRoot);
io.setReuseListRoot(pageAddr, reuseListRoot);
if (PageHandler.isWalDeltaRecordNeeded(pageMemory, TX_LOG_CACHE_ID, metaId, metaPage, wal, null))
assert io.getType() == PageIO.T_META;
wal.log(new MetaPageInitRecord(TX_LOG_CACHE_ID, metaId, io.getType(), io.getVersion(), treeRoot, reuseListRoot));
isNew = true;
} else {
PageMetaIO io = PageIO.getPageIO(pageAddr);
treeRoot = io.getTreeRoot(pageAddr);
reuseListRoot = io.getReuseListRoot(pageAddr);
assert PageIdUtils.flag(treeRoot) == FLAG_IDX : U.hexLong(treeRoot) + ", TX_LOG_CACHE_ID=" + TX_LOG_CACHE_ID;
assert PageIdUtils.flag(reuseListRoot) == FLAG_IDX : U.hexLong(reuseListRoot) + ", TX_LOG_CACHE_ID=" + TX_LOG_CACHE_ID;
}
} finally {
pageMemory.writeUnlock(TX_LOG_CACHE_ID, metaId, metaPage, null, isNew);
}
} finally {
pageMemory.releasePage(TX_LOG_CACHE_ID, metaId, metaPage);
}
reuseList = new ReuseListImpl(TX_LOG_CACHE_ID, txLogReuseListName, pageMemory, wal, reuseListRoot, isNew, cacheCtx.diagnostic().pageLockTracker(), ctx, null, FLAG_IDX);
tree = new TxLogTree(TX_LOG_CACHE_NAME, pageMemory, wal, treeRoot, reuseList, ctx.failure(), isNew, cacheCtx.diagnostic().pageLockTracker());
((GridCacheDatabaseSharedManager) mgr).addCheckpointListener(this, txLogDataRegion);
} finally {
mgr.checkpointReadUnlock();
}
} else {
PageMemory pageMemory = txLogDataRegion.pageMemory();
ReuseList reuseList1 = mgr.reuseList(TX_LOG_CACHE_NAME);
long treeRoot;
if ((treeRoot = reuseList1.takeRecycledPage()) == 0L)
treeRoot = pageMemory.allocatePage(TX_LOG_CACHE_ID, INDEX_PARTITION, FLAG_IDX);
tree = new TxLogTree(txLogName, pageMemory, null, treeRoot, reuseList1, ctx.failure(), true, cacheCtx.diagnostic().pageLockTracker());
}
}
use of org.apache.ignite.internal.processors.cache.persistence.DataRegion in project ignite by apache.
the class LoadAllWarmUpStrategy method loadDataInfo.
/**
* Calculation of cache groups, partitions and count of pages that can load
* into data region. Calculation starts and includes an index partition for
* each group.
*
* @param region Data region.
* @return Loadable groups and partitions.
* @throws IgniteCheckedException – if faild.
*/
protected Map<CacheGroupContext, List<LoadPartition>> loadDataInfo(DataRegion region) throws IgniteCheckedException {
// Get cache groups of data region.
List<CacheGroupContext> regionGrps = grpCtxSup.get().stream().filter(grpCtx -> region.equals(grpCtx.dataRegion())).collect(toList());
long availableLoadPageCnt = availableLoadPageCount(region);
// Computing groups, partitions, and pages to load into data region.
Map<CacheGroupContext, List<LoadPartition>> loadableGrps = new LinkedHashMap<>();
for (int i = 0; i < regionGrps.size() && availableLoadPageCnt > 0; i++) {
CacheGroupContext grp = regionGrps.get(i);
// Index partition in priority.
List<GridDhtLocalPartition> locParts = grp.topology().localPartitions();
for (int j = -1; j < locParts.size() && availableLoadPageCnt > 0; j++) {
int p = j == -1 ? INDEX_PARTITION : locParts.get(j).id();
long partPageCnt = grp.shared().pageStore().pages(grp.groupId(), p);
if (partPageCnt > 0) {
long pageCnt = (availableLoadPageCnt - partPageCnt) >= 0 ? partPageCnt : availableLoadPageCnt;
availableLoadPageCnt -= pageCnt;
loadableGrps.computeIfAbsent(grp, grpCtx -> new ArrayList<>()).add(new LoadPartition(p, pageCnt));
}
}
}
return loadableGrps;
}
use of org.apache.ignite.internal.processors.cache.persistence.DataRegion in project ignite by apache.
the class FilePageStoreManager method initForCache.
/**
* @param grpDesc Cache group descriptor.
* @param ccfg Cache configuration.
* @return Cache store holder.
* @throws IgniteCheckedException If failed.
*/
private CacheStoreHolder initForCache(CacheGroupDescriptor grpDesc, CacheConfiguration ccfg) throws IgniteCheckedException {
assert !grpDesc.sharedGroup() || ccfg.getGroupName() != null : ccfg.getName();
File cacheWorkDir = cacheWorkDir(ccfg);
String dataRegionName = grpDesc.config().getDataRegionName();
DataRegion dataRegion = cctx.database().dataRegion(dataRegionName);
PageMetrics pageMetrics = dataRegion.metrics().cacheGrpPageMetrics(grpDesc.groupId());
return initDir(cacheWorkDir, grpDesc.groupId(), grpDesc.config().getAffinity().partitions(), pageMetrics, ccfg.isEncryptionEnabled());
}
use of org.apache.ignite.internal.processors.cache.persistence.DataRegion in project ignite by apache.
the class CheckpointListenerForRegionTest method testCheckpointListenersInvokedOnlyIfRegionConfigured.
/**
* 1. Start the one node.
* 2. Configure the default cache.
* 3. Set the checkpoint listeners(for default region and for all regions) to watch the checkpoint.
* 4. Fill the data and trigger the checkpoint.
* 5. Expected: Both listeners should be called.
* 6. Remove the default region from the checkpoint.
* 7. Fill the data and trigger the checkpoint.
* 8. Expected: The only listener for all regions should be called.
* 9. Return default region back to the checkpoint.
* 10. Fill the data and trigger the checkpoint.
* 11. Expected: Both listeners should be called.
*
* @throws Exception if fail.
*/
@Test
public void testCheckpointListenersInvokedOnlyIfRegionConfigured() throws Exception {
// given: One started node with default cache.
IgniteEx ignite0 = startGrid(0);
ignite0.cluster().active(true);
IgniteCache<Integer, Object> cache = ignite0.cache(DEFAULT_CACHE_NAME);
GridCacheDatabaseSharedManager db = (GridCacheDatabaseSharedManager) (ignite0.context().cache().context().database());
DataRegion defaultRegion = db.checkpointedDataRegions().stream().filter(region -> DFLT_DATA_REG_DEFAULT_NAME.equals(region.config().getName())).findFirst().orElse(null);
assertNotNull("Expected default data region in checkpoint list is not found.", defaultRegion);
// and: Configure the listeners(for default region and for all regions) for watching for checkpoint.
AtomicInteger checkpointListenerDefaultRegionCounter = checkpointListenerWatcher(db, defaultRegion);
AtomicInteger checkpointListenerAllRegionCounter = checkpointListenerWatcher(db, null);
// when: Checkpoint happened.
fillDataAndCheckpoint(ignite0, cache);
// then: Both listeners should be called.
assertEquals(CALLS_COUNT_PER_CHECKPOINT, checkpointListenerDefaultRegionCounter.get());
assertEquals(CALLS_COUNT_PER_CHECKPOINT, checkpointListenerAllRegionCounter.get());
// Remove the default region from checkpoint.
db.checkpointedDataRegions().remove(defaultRegion);
// when: Checkpoint happened.
fillDataAndCheckpoint(ignite0, cache);
// then: Only listener for all regions should be called.
assertEquals(CALLS_COUNT_PER_CHECKPOINT, checkpointListenerDefaultRegionCounter.get());
assertEquals(2 * CALLS_COUNT_PER_CHECKPOINT, checkpointListenerAllRegionCounter.get());
assertTrue("Expected default data region in all regions list is not found.", db.dataRegions().stream().anyMatch(region -> DFLT_DATA_REG_DEFAULT_NAME.equals(region.config().getName())));
// Return default region back to the checkpoint.
db.checkpointedDataRegions().add(defaultRegion);
// when: Checkpoint happened.
fillDataAndCheckpoint(ignite0, cache);
// then: Both listeners should be called.
assertEquals(2 * CALLS_COUNT_PER_CHECKPOINT, checkpointListenerDefaultRegionCounter.get());
assertEquals(3 * CALLS_COUNT_PER_CHECKPOINT, checkpointListenerAllRegionCounter.get());
}
Aggregations