use of org.apache.ignite.internal.pagemem.PageMemory in project ignite by apache.
the class InlineIndexHelperTest method putAndCompare.
/**
* @param v1 Value 1.
* @param v2 Value 2.
* @param maxSize Max inline size.
* @return Compare result.
* @throws Exception If failed.
*/
private int putAndCompare(String v1, String v2, int maxSize) throws Exception {
DataRegionConfiguration plcCfg = new DataRegionConfiguration().setInitialSize(1024 * MB).setMaxSize(1024 * MB);
PageMemory pageMem = new PageMemoryNoStoreImpl(log, new UnsafeMemoryProvider(log), null, PAGE_SIZE, plcCfg, new DataRegionMetricsImpl(plcCfg), false);
pageMem.start();
long pageId = 0L;
long page = 0L;
try {
pageId = pageMem.allocatePage(CACHE_ID, 1, PageIdAllocator.FLAG_DATA);
page = pageMem.acquirePage(CACHE_ID, pageId);
long pageAddr = pageMem.readLock(CACHE_ID, pageId, page);
int off = 0;
InlineIndexHelper ih = new InlineIndexHelper(Value.STRING, 1, 0, CompareMode.getInstance(null, 0));
ih.put(pageAddr, off, v1 == null ? ValueNull.INSTANCE : ValueString.get(v1), maxSize);
Comparator<Value> comp = new Comparator<Value>() {
@Override
public int compare(Value o1, Value o2) {
throw new AssertionError("Optimized algorithm should be used.");
}
};
return ih.compare(pageAddr, off, maxSize, v2 == null ? ValueNull.INSTANCE : ValueString.get(v2), comp);
} finally {
if (page != 0L)
pageMem.releasePage(CACHE_ID, pageId, page);
pageMem.stop();
}
}
use of org.apache.ignite.internal.pagemem.PageMemory in project ignite by apache.
the class UpgradePendingTreeToPerPartitionTask method processPendingTree.
/**
* Move pending rows for CacheGroup entries to per-partition PendingTree.
* Invalid pending rows will be ignored.
*
* @param grp Cache group.
* @param oldPendingEntries Old-style PendingTree.
* @throws IgniteCheckedException If error occurs.
*/
private void processPendingTree(CacheGroupContext grp, PendingEntriesTree oldPendingEntries) throws IgniteCheckedException {
final PageMemory pageMemory = grp.dataRegion().pageMemory();
final IgniteCacheDatabaseSharedManager db = grp.shared().database();
final Set<Integer> cacheIds = grp.cacheIds();
PendingRow row = null;
int processedEntriesCnt = 0;
int skippedEntries = 0;
// Re-acquire checkpoint lock for every next batch.
while (!Thread.currentThread().isInterrupted()) {
int cnt = 0;
db.checkpointReadLock();
try {
GridCursor<PendingRow> cursor = oldPendingEntries.find(row, null, WITHOUT_KEY);
while (cnt++ < BATCH_SIZE && cursor.next()) {
row = cursor.get();
assert row.link != 0 && row.expireTime != 0 : row;
GridCacheEntryEx entry;
// Lost cache or lost entry.
if (!cacheIds.contains(row.cacheId) || (entry = getEntry(grp, row)) == null) {
skippedEntries++;
oldPendingEntries.removex(row);
continue;
}
entry.lockEntry();
try {
if (processRow(pageMemory, grp, row))
processedEntriesCnt++;
else
skippedEntries++;
} finally {
entry.unlockEntry();
}
oldPendingEntries.removex(row);
}
if (cnt < BATCH_SIZE)
break;
} finally {
db.checkpointReadUnlock();
}
}
log.info("PendingTree upgraded: " + "[grpId=" + grp.groupId() + ", grpName=" + grp.name() + ", processedEntries=" + processedEntriesCnt + ", failedEntries=" + skippedEntries + ']');
}
use of org.apache.ignite.internal.pagemem.PageMemory in project ignite by apache.
the class GridCacheDatabaseSharedManager method cleanupRestoredCaches.
/**
* {@inheritDoc}
*/
@Override
public void cleanupRestoredCaches() {
if (dataRegionMap.isEmpty())
return;
boolean hasMvccCache = false;
for (CacheGroupDescriptor grpDesc : cctx.cache().cacheGroupDescriptors().values()) {
hasMvccCache |= grpDesc.config().getAtomicityMode() == TRANSACTIONAL_SNAPSHOT;
String regionName = grpDesc.config().getDataRegionName();
DataRegion region = regionName != null ? dataRegionMap.get(regionName) : dfltDataRegion;
if (region == null)
continue;
if (log.isInfoEnabled())
log.info("Page memory " + region.config().getName() + " for " + grpDesc + " has invalidated.");
int partitions = grpDesc.config().getAffinity().partitions();
if (region.pageMemory() instanceof PageMemoryEx) {
PageMemoryEx memEx = (PageMemoryEx) region.pageMemory();
for (int partId = 0; partId < partitions; partId++) memEx.invalidate(grpDesc.groupId(), partId);
memEx.invalidate(grpDesc.groupId(), PageIdAllocator.INDEX_PARTITION);
}
if (grpDesc.config().isEncryptionEnabled())
cctx.kernalContext().encryption().onCacheGroupStop(grpDesc.groupId());
}
if (!hasMvccCache && dataRegionMap.containsKey(TxLog.TX_LOG_CACHE_NAME)) {
PageMemory memory = dataRegionMap.get(TxLog.TX_LOG_CACHE_NAME).pageMemory();
if (memory instanceof PageMemoryEx)
((PageMemoryEx) memory).invalidate(TxLog.TX_LOG_CACHE_ID, PageIdAllocator.INDEX_PARTITION);
}
final boolean hasMvccCache0 = hasMvccCache;
storeMgr.cleanupPageStoreIfMatch(new Predicate<Integer>() {
@Override
public boolean test(Integer grpId) {
return MetaStorage.METASTORAGE_CACHE_ID != grpId && (TxLog.TX_LOG_CACHE_ID != grpId || !hasMvccCache0);
}
}, true);
}
use of org.apache.ignite.internal.pagemem.PageMemory in project ignite by apache.
the class MvccUtils method invoke.
/**
* Encapsulates common logic for working with row mvcc info: page locking/unlocking, checks and other.
* Strategy pattern.
*
* @param cctx Cache group.
* @param link Row link.
* @param clo Closure to apply.
* @param snapshot Mvcc snapshot.
* @param <R> Return type.
* @return Result.
* @throws IgniteCheckedException If failed.
*/
private static <R> R invoke(GridCacheContext cctx, long link, MvccClosure<R> clo, MvccSnapshot snapshot) throws IgniteCheckedException {
assert cctx.mvccEnabled();
PageMemory pageMem = cctx.dataRegion().pageMemory();
int grpId = cctx.groupId();
int pageSize = pageMem.realPageSize(grpId);
long pageId = pageId(link);
int itemId = itemId(link);
long page = pageMem.acquirePage(grpId, pageId);
try {
long pageAddr = pageMem.readLock(grpId, pageId, page);
try {
DataPageIO dataIo = DataPageIO.VERSIONS.forPage(pageAddr);
return invoke(cctx, dataIo, pageAddr, itemId, pageSize, clo, snapshot);
} finally {
pageMem.readUnlock(grpId, pageId, page);
}
} finally {
pageMem.releasePage(grpId, pageId, page);
}
}
use of org.apache.ignite.internal.pagemem.PageMemory in project ignite by apache.
the class IgniteCacheDatabaseSharedManager method ensureFreeSpaceForInsert.
/**
* Checks that the given {@code region} has enough space for putting a new entry.
*
* This method makes sense then and only then
* the data region is not persisted {@link DataRegionConfiguration#isPersistenceEnabled()}
* and page eviction is disabled {@link DataPageEvictionMode#DISABLED}.
*
* The non-persistent region should reserve a number of pages to support a free list {@link AbstractFreeList}.
* For example, removing a row from underlying store may require allocating a new data page
* in order to move a tracked page from one bucket to another one which does not have a free space for a new stripe.
* See {@link AbstractFreeList#removeDataRowByLink}.
* Therefore, inserting a new entry should be prevented in case of some threshold is exceeded.
*
* @param region Data region to be checked.
* @param dataRowSize Size of data row to be inserted.
* @throws IgniteOutOfMemoryException In case of the given data region does not have enough free space
* for putting a new entry.
*/
public void ensureFreeSpaceForInsert(DataRegion region, int dataRowSize) throws IgniteOutOfMemoryException {
if (region == null)
return;
DataRegionConfiguration regCfg = region.config();
if (regCfg.getPageEvictionMode() != DataPageEvictionMode.DISABLED || regCfg.isPersistenceEnabled())
return;
long memorySize = regCfg.getMaxSize();
PageMemory pageMem = region.pageMemory();
CacheFreeList freeList = freeListMap.get(regCfg.getName());
long nonEmptyPages = (pageMem.loadedPages() - freeList.emptyDataPages());
// The maximum number of pages that can be allocated (memorySize / systemPageSize)
// should be greater or equal to pages required for inserting a new entry plus
// the current number of non-empty pages plus the number of pages that may be required in order to move
// all pages to a reuse bucket, that is equal to nonEmptyPages * 8 / pageSize, where 8 is the size of a link.
// Note that not the whole page can be used to storing links,
// see PagesListNodeIO and PagesListMetaIO#getCapacity(), so we pessimistically multiply the result on 1.5,
// in any way, the number of required pages is less than 1 percent.
boolean oomThreshold = (memorySize / pageMem.systemPageSize()) < ((double) dataRowSize / pageMem.pageSize() + nonEmptyPages * (8.0 * 1.5 / pageMem.pageSize() + 1) + 256);
if (oomThreshold) {
IgniteOutOfMemoryException oom = new IgniteOutOfMemoryException("Out of memory in data region [" + "name=" + regCfg.getName() + ", initSize=" + U.readableSize(regCfg.getInitialSize(), false) + ", maxSize=" + U.readableSize(regCfg.getMaxSize(), false) + ", persistenceEnabled=" + regCfg.isPersistenceEnabled() + "] Try the following:" + U.nl() + " ^-- Increase maximum off-heap memory size (DataRegionConfiguration.maxSize)" + U.nl() + " ^-- Enable Ignite persistence (DataRegionConfiguration.persistenceEnabled)" + U.nl() + " ^-- Enable eviction or expiration policies");
if (cctx.kernalContext() != null)
cctx.kernalContext().failure().process(new FailureContext(FailureType.CRITICAL_ERROR, oom));
throw oom;
}
}
Aggregations