use of org.apache.ignite.internal.processors.cache.IgniteCacheOffheapManager.CacheDataStore in project ignite by apache.
the class CachePartitionDefragmentationManager method defragmentOnePartition.
/**
* Defragment one given partition.
*/
private boolean defragmentOnePartition(CacheGroupContext oldGrpCtx, int grpId, File workDir, GridCacheOffheapManager offheap, FileVersionCheckingFactory pageStoreFactory, GridCompoundFuture<Object, Object> cmpFut, PageMemoryEx oldPageMem, CacheGroupContext newGrpCtx, CacheDataStore oldCacheDataStore) throws IgniteCheckedException {
TreeIterator treeIter = new TreeIterator(pageSize);
checkCancellation();
int partId = oldCacheDataStore.partId();
PartitionContext partCtx = new PartitionContext(workDir, grpId, partId, partDataRegion, mappingDataRegion, oldGrpCtx, newGrpCtx, oldCacheDataStore, pageStoreFactory);
if (skipAlreadyDefragmentedPartition(workDir, grpId, partId, log)) {
partCtx.createPageStore(() -> defragmentedPartMappingFile(workDir, partId).toPath(), partCtx.mappingPagesAllocated, partCtx.mappingPageMemory);
linkMapByPart.put(partId, partCtx.createLinkMapTree(false));
return false;
}
partCtx.createPageStore(() -> defragmentedPartMappingFile(workDir, partId).toPath(), partCtx.mappingPagesAllocated, partCtx.mappingPageMemory);
linkMapByPart.put(partId, partCtx.createLinkMapTree(true));
checkCancellation();
partCtx.createPageStore(() -> defragmentedPartTmpFile(workDir, partId).toPath(), partCtx.partPagesAllocated, partCtx.partPageMemory);
partCtx.createNewCacheDataStore(offheap);
copyPartitionData(partCtx, treeIter);
DefragmentationPageReadWriteManager pageMgr = (DefragmentationPageReadWriteManager) partCtx.partPageMemory.pageManager();
PageStore oldPageStore = filePageStoreMgr.getStore(grpId, partId);
status.onPartitionDefragmented(oldGrpCtx, oldPageStore.size(), // + file header.
pageSize + partCtx.partPagesAllocated.get() * pageSize);
// TODO Move inside of defragmentSinglePartition.
IgniteInClosure<IgniteInternalFuture<?>> cpLsnr = fut -> {
if (fut.error() == null) {
if (log.isDebugEnabled()) {
log.debug(S.toString("Partition defragmented", "grpId", grpId, false, "partId", partId, false, "oldPages", oldPageStore.pages(), false, "newPages", partCtx.partPagesAllocated.get() + 1, false, "mappingPages", partCtx.mappingPagesAllocated.get() + 1, false, "pageSize", pageSize, false, "partFile", defragmentedPartFile(workDir, partId).getName(), false, "workDir", workDir, false));
}
oldPageMem.invalidate(grpId, partId);
partCtx.partPageMemory.invalidate(grpId, partId);
// Yes, it'll be invalid in a second.
pageMgr.pageStoreMap().removePageStore(grpId, partId);
renameTempPartitionFile(workDir, partId);
}
};
GridFutureAdapter<?> cpFut = defragmentationCheckpoint.forceCheckpoint("partition defragmented", null).futureFor(FINISHED);
cpFut.listen(cpLsnr);
cmpFut.add((IgniteInternalFuture<Object>) cpFut);
return true;
}
use of org.apache.ignite.internal.processors.cache.IgniteCacheOffheapManager.CacheDataStore in project ignite by apache.
the class GridDhtLocalPartition method destroyCacheDataStore.
/**
* Release created data store for this partition.
*/
private void destroyCacheDataStore() {
try {
CacheDataStore store = dataStore();
cctx.offheap().destroyCacheDataStore(id, store);
} catch (IgniteCheckedException e) {
log.error("Unable to destroy cache data store on partition eviction [id=" + id + "]", e);
}
}
use of org.apache.ignite.internal.processors.cache.IgniteCacheOffheapManager.CacheDataStore in project ignite by apache.
the class GridCommandHandlerIndexingUtils method breakCacheDataTree.
/**
* Deleting a rows from the cache without updating indexes.
*
* @param log Logger.
* @param internalCache Cache.
* @param partId Partition number.
* @param filter Entry filter.
*/
static <K, V> void breakCacheDataTree(IgniteLogger log, IgniteInternalCache<K, V> internalCache, int partId, @Nullable BiPredicate<Integer, Entry<K, V>> filter) {
requireNonNull(log);
requireNonNull(internalCache);
GridCacheContext<K, V> cacheCtx = internalCache.context();
CacheDataStore cacheDataStore = cacheCtx.dht().topology().localPartition(partId).dataStore();
String delegate = "delegate";
if (hasField(cacheDataStore, delegate))
cacheDataStore = field(cacheDataStore, delegate);
CacheDataRowStore cacheDataRowStore = field(cacheDataStore, "rowStore");
CacheDataTree cacheDataTree = field(cacheDataStore, "dataTree");
String cacheName = internalCache.name();
QueryCursor<Entry<K, V>> qryCursor = cacheCtx.kernalContext().grid().cache(cacheName).withKeepBinary().query(new ScanQuery<>(partId));
Iterator<Entry<K, V>> cacheEntryIter = qryCursor.iterator();
IgniteCacheDatabaseSharedManager db = cacheCtx.shared().database();
int cacheId = CU.cacheId(cacheName);
int i = 0;
while (cacheEntryIter.hasNext()) {
Entry<K, V> entry = cacheEntryIter.next();
if (nonNull(filter) && !filter.test(i++, entry))
continue;
db.checkpointReadLock();
try {
CacheDataRow oldRow = cacheDataTree.remove(new SearchRow(cacheId, cacheCtx.toCacheKeyObject(entry.getKey())));
if (nonNull(oldRow))
cacheDataRowStore.removeRow(oldRow.link(), INSTANCE);
} catch (IgniteCheckedException e) {
throw new IgniteException("Failed to remove key skipping indexes: " + entry, e);
} finally {
db.checkpointReadUnlock();
}
}
}
use of org.apache.ignite.internal.processors.cache.IgniteCacheOffheapManager.CacheDataStore in project ignite by apache.
the class CachePartitionDefragmentationManager method executeDefragmentation.
/**
*/
public void executeDefragmentation() throws IgniteCheckedException {
Map<Integer, List<CacheDataStore>> oldStores = new HashMap<>();
for (CacheGroupContext oldGrpCtx : cacheGrpCtxsForDefragmentation) {
int grpId = oldGrpCtx.groupId();
final IgniteCacheOffheapManager offheap = oldGrpCtx.offheap();
List<CacheDataStore> oldCacheDataStores = stream(offheap.cacheDataStores().spliterator(), false).filter(store -> {
try {
return filePageStoreMgr.exists(grpId, store.partId());
} catch (IgniteCheckedException e) {
throw new IgniteException(e);
}
}).collect(Collectors.toList());
oldStores.put(grpId, oldCacheDataStores);
}
int partitionCount = oldStores.values().stream().mapToInt(List::size).sum();
status.onStart(cacheGrpCtxsForDefragmentation, partitionCount);
try {
// Now the actual process starts.
IgniteInternalFuture<?> idxDfrgFut = null;
DataPageEvictionMode prevPageEvictionMode = null;
for (CacheGroupContext oldGrpCtx : cacheGrpCtxsForDefragmentation) {
int grpId = oldGrpCtx.groupId();
File workDir = filePageStoreMgr.cacheWorkDir(oldGrpCtx.sharedGroup(), oldGrpCtx.cacheOrGroupName());
List<CacheDataStore> oldCacheDataStores = oldStores.get(grpId);
if (skipAlreadyDefragmentedCacheGroup(workDir, grpId, log)) {
status.onCacheGroupSkipped(oldGrpCtx, oldCacheDataStores.size());
continue;
}
try {
GridCacheOffheapManager offheap = (GridCacheOffheapManager) oldGrpCtx.offheap();
status.onCacheGroupStart(oldGrpCtx, oldCacheDataStores.size());
if (workDir == null || oldCacheDataStores.isEmpty()) {
status.onCacheGroupFinish(oldGrpCtx);
continue;
}
// We can't start defragmentation of new group on the region that has wrong eviction mode.
// So waiting of the previous cache group defragmentation is inevitable.
DataPageEvictionMode curPageEvictionMode = oldGrpCtx.dataRegion().config().getPageEvictionMode();
if (prevPageEvictionMode == null || prevPageEvictionMode != curPageEvictionMode) {
prevPageEvictionMode = curPageEvictionMode;
partDataRegion.config().setPageEvictionMode(curPageEvictionMode);
if (idxDfrgFut != null)
idxDfrgFut.get();
}
IntMap<CacheDataStore> cacheDataStores = new IntHashMap<>();
for (CacheDataStore store : offheap.cacheDataStores()) {
// This would mean that these partitions are empty.
assert store.tree() == null || store.tree().groupId() == grpId;
if (store.tree() != null)
cacheDataStores.put(store.partId(), store);
}
dbMgr.checkpointedDataRegions().remove(oldGrpCtx.dataRegion());
// Another cheat. Ttl cleanup manager knows too much shit.
oldGrpCtx.caches().stream().filter(cacheCtx -> cacheCtx.groupId() == grpId).forEach(cacheCtx -> cacheCtx.ttl().unregister());
// Technically wal is already disabled, but "PageHandler.isWalDeltaRecordNeeded" doesn't care
// and WAL records will be allocated anyway just to be ignored later if we don't disable WAL for
// cache group explicitly.
oldGrpCtx.localWalEnabled(false, false);
boolean encrypted = oldGrpCtx.config().isEncryptionEnabled();
FileVersionCheckingFactory pageStoreFactory = filePageStoreMgr.getPageStoreFactory(grpId, encrypted);
AtomicLong idxAllocationTracker = new GridAtomicLong();
createIndexPageStore(grpId, workDir, pageStoreFactory, partDataRegion, idxAllocationTracker::addAndGet);
checkCancellation();
GridCompoundFuture<Object, Object> cmpFut = new GridCompoundFuture<>();
PageMemoryEx oldPageMem = (PageMemoryEx) oldGrpCtx.dataRegion().pageMemory();
CacheGroupContext newGrpCtx = new CacheGroupContext(sharedCtx, grpId, oldGrpCtx.receivedFrom(), CacheType.USER, oldGrpCtx.config(), oldGrpCtx.affinityNode(), partDataRegion, oldGrpCtx.cacheObjectContext(), null, null, oldGrpCtx.localStartVersion(), true, false, true);
defragmentationCheckpoint.checkpointTimeoutLock().checkpointReadLock();
try {
// This will initialize partition meta in index partition - meta tree and reuse list.
newGrpCtx.start();
} finally {
defragmentationCheckpoint.checkpointTimeoutLock().checkpointReadUnlock();
}
IgniteUtils.doInParallel(defragmentationThreadPool, oldCacheDataStores, oldCacheDataStore -> defragmentOnePartition(oldGrpCtx, grpId, workDir, offheap, pageStoreFactory, cmpFut, oldPageMem, newGrpCtx, oldCacheDataStore));
// A bit too general for now, but I like it more then saving only the last checkpoint future.
cmpFut.markInitialized().get();
idxDfrgFut = new GridFinishedFuture<>();
if (filePageStoreMgr.hasIndexStore(grpId)) {
defragmentIndexPartition(oldGrpCtx, newGrpCtx);
idxDfrgFut = defragmentationCheckpoint.forceCheckpoint("index defragmented", null).futureFor(FINISHED);
}
PageStore oldIdxPageStore = filePageStoreMgr.getStore(grpId, INDEX_PARTITION);
idxDfrgFut = idxDfrgFut.chain(fut -> {
if (log.isDebugEnabled()) {
log.debug(S.toString("Index partition defragmented", "grpId", grpId, false, "oldPages", oldIdxPageStore.pages(), false, "newPages", idxAllocationTracker.get() + 1, false, "pageSize", pageSize, false, "partFile", defragmentedIndexFile(workDir).getName(), false, "workDir", workDir, false));
}
oldPageMem.invalidate(grpId, INDEX_PARTITION);
PageMemoryEx partPageMem = (PageMemoryEx) partDataRegion.pageMemory();
partPageMem.invalidate(grpId, INDEX_PARTITION);
DefragmentationPageReadWriteManager pageMgr = (DefragmentationPageReadWriteManager) partPageMem.pageManager();
pageMgr.pageStoreMap().removePageStore(grpId, INDEX_PARTITION);
PageMemoryEx mappingPageMem = (PageMemoryEx) mappingDataRegion.pageMemory();
pageMgr = (DefragmentationPageReadWriteManager) mappingPageMem.pageManager();
pageMgr.pageStoreMap().clear(grpId);
renameTempIndexFile(workDir);
writeDefragmentationCompletionMarker(filePageStoreMgr.getPageStoreFileIoFactory(), workDir, log);
batchRenameDefragmentedCacheGroupPartitions(workDir, log);
return null;
});
status.onIndexDefragmented(oldGrpCtx, oldIdxPageStore.size(), // + file header.
pageSize + idxAllocationTracker.get() * pageSize);
} catch (DefragmentationCancelledException e) {
DefragmentationFileUtils.deleteLeftovers(workDir);
throw e;
}
status.onCacheGroupFinish(oldGrpCtx);
}
if (idxDfrgFut != null)
idxDfrgFut.get();
mntcReg.unregisterMaintenanceTask(DEFRAGMENTATION_MNTC_TASK_NAME);
status.onFinish();
completionFut.onDone();
} catch (DefragmentationCancelledException e) {
mntcReg.unregisterMaintenanceTask(DEFRAGMENTATION_MNTC_TASK_NAME);
log.info("Defragmentation process has been cancelled.");
status.onFinish();
completionFut.onDone();
} catch (Throwable t) {
log.error("Defragmentation process failed.", t);
status.onFinish();
completionFut.onDone(t);
throw t;
} finally {
defragmentationCheckpoint.stop(true);
}
}
Aggregations