use of org.apache.ignite.internal.pagemem.PageIdAllocator.FLAG_IDX in project ignite by apache.
the class SnapshotPartitionsVerifyHandler method invoke.
/**
* {@inheritDoc}
*/
@Override
public Map<PartitionKeyV2, PartitionHashRecordV2> invoke(SnapshotHandlerContext opCtx) throws IgniteCheckedException {
SnapshotMetadata meta = opCtx.metadata();
Set<Integer> grps = F.isEmpty(opCtx.groups()) ? new HashSet<>(meta.partitions().keySet()) : opCtx.groups().stream().map(CU::cacheId).collect(Collectors.toSet());
Set<File> partFiles = new HashSet<>();
IgniteSnapshotManager snpMgr = cctx.snapshotMgr();
for (File dir : snpMgr.snapshotCacheDirectories(meta.snapshotName(), meta.folderName())) {
int grpId = CU.cacheId(cacheGroupName(dir));
if (!grps.remove(grpId))
continue;
Set<Integer> parts = meta.partitions().get(grpId) == null ? Collections.emptySet() : new HashSet<>(meta.partitions().get(grpId));
for (File part : cachePartitionFiles(dir)) {
int partId = partId(part.getName());
if (!parts.remove(partId))
continue;
partFiles.add(part);
}
if (!parts.isEmpty()) {
throw new IgniteException("Snapshot data doesn't contain required cache group partition " + "[grpId=" + grpId + ", snpName=" + meta.snapshotName() + ", consId=" + meta.consistentId() + ", missed=" + parts + ", meta=" + meta + ']');
}
}
if (!grps.isEmpty()) {
throw new IgniteException("Snapshot data doesn't contain required cache groups " + "[grps=" + grps + ", snpName=" + meta.snapshotName() + ", consId=" + meta.consistentId() + ", meta=" + meta + ']');
}
Map<PartitionKeyV2, PartitionHashRecordV2> res = new ConcurrentHashMap<>();
ThreadLocal<ByteBuffer> buff = ThreadLocal.withInitial(() -> ByteBuffer.allocateDirect(meta.pageSize()).order(ByteOrder.nativeOrder()));
GridKernalContext snpCtx = snpMgr.createStandaloneKernalContext(meta.snapshotName(), meta.folderName());
for (GridComponent comp : snpCtx) comp.start();
try {
U.doInParallel(snpMgr.snapshotExecutorService(), partFiles, part -> {
String grpName = cacheGroupName(part.getParentFile());
int grpId = CU.cacheId(grpName);
int partId = partId(part.getName());
FilePageStoreManager storeMgr = (FilePageStoreManager) cctx.pageStore();
try (FilePageStore pageStore = (FilePageStore) storeMgr.getPageStoreFactory(grpId, false).createPageStore(getTypeByPartId(partId), part::toPath, val -> {
})) {
if (partId == INDEX_PARTITION) {
checkPartitionsPageCrcSum(() -> pageStore, INDEX_PARTITION, FLAG_IDX);
return null;
}
if (grpId == MetaStorage.METASTORAGE_CACHE_ID) {
checkPartitionsPageCrcSum(() -> pageStore, partId, FLAG_DATA);
return null;
}
ByteBuffer pageBuff = buff.get();
pageBuff.clear();
pageStore.read(0, pageBuff, true);
long pageAddr = GridUnsafe.bufferAddress(pageBuff);
PagePartitionMetaIO io = PageIO.getPageIO(pageBuff);
GridDhtPartitionState partState = fromOrdinal(io.getPartitionState(pageAddr));
if (partState != OWNING) {
throw new IgniteCheckedException("Snapshot partitions must be in the OWNING " + "state only: " + partState);
}
long updateCntr = io.getUpdateCounter(pageAddr);
long size = io.getSize(pageAddr);
if (log.isDebugEnabled()) {
log.debug("Partition [grpId=" + grpId + ", id=" + partId + ", counter=" + updateCntr + ", size=" + size + "]");
}
// Snapshot partitions must always be in OWNING state.
// There is no `primary` partitions for snapshot.
PartitionKeyV2 key = new PartitionKeyV2(grpId, partId, grpName);
PartitionHashRecordV2 hash = calculatePartitionHash(key, updateCntr, meta.consistentId(), GridDhtPartitionState.OWNING, false, size, snpMgr.partitionRowIterator(snpCtx, grpName, partId, pageStore));
assert hash != null : "OWNING must have hash: " + key;
res.put(key, hash);
} catch (IOException e) {
throw new IgniteCheckedException(e);
}
return null;
});
} catch (Throwable t) {
log.error("Error executing handler: ", t);
throw t;
} finally {
for (GridComponent comp : snpCtx) comp.stop(true);
}
return res;
}
use of org.apache.ignite.internal.pagemem.PageIdAllocator.FLAG_IDX in project ignite by apache.
the class PageMemoryImplTest method runThrottlingEmptifyCpBufFirst.
/**
* @throws Exception if failed.
*/
public void runThrottlingEmptifyCpBufFirst(PageMemoryImpl.ThrottlingPolicy plc) throws Exception {
TestPageStoreManager pageStoreMgr = new TestPageStoreManager();
final List<FullPageId> allocated = new ArrayList<>();
int pagesForStartThrottling = 10;
// Number of pages which were poll from checkpoint buffer for throttling.
AtomicInteger cpBufferPollPages = new AtomicInteger();
// Create a 1 mb page memory.
PageMemoryImpl memory = createPageMemory(1, plc, pageStoreMgr, pageStoreMgr, (IgniteInClosure<FullPageId>) fullPageId -> {
assertEquals(cpBufferPollPages.incrementAndGet(), pageStoreMgr.storedPages.size());
});
assert pagesForStartThrottling < memory.checkpointBufferPagesSize() / 3;
for (int i = 0; i < pagesForStartThrottling + (memory.checkpointBufferPagesSize() * 2 / 3); i++) {
long id = memory.allocatePage(1, INDEX_PARTITION, FLAG_IDX);
FullPageId fullId = new FullPageId(id, 1);
allocated.add(fullId);
writePage(memory, fullId, (byte) 1);
}
GridMultiCollectionWrapper<FullPageId> markedPages = memory.beginCheckpoint(new GridFinishedFuture());
for (int i = 0; i < pagesForStartThrottling + (memory.checkpointBufferPagesSize() * 2 / 3); i++) writePage(memory, allocated.get(i), (byte) 1);
doCheckpoint(markedPages, memory, pageStoreMgr);
// There is 'pagesForStartThrottling - 1' because we should write pagesForStartThrottling pages
// from checkpoint buffer before throttling will be disabled but at least one page always would be written
// outside of throttling and in our case we certainly know that this page is also contained in checkpoint buffer
// (because all of our pages are in checkpoint buffer).
assertEquals(pagesForStartThrottling - 1, cpBufferPollPages.get());
}
use of org.apache.ignite.internal.pagemem.PageIdAllocator.FLAG_IDX in project ignite by apache.
the class PageMemoryImplTest method testCheckpointBufferCantOverflowWithThrottlingMixedLoad.
/**
* @throws Exception If failed.
*/
private void testCheckpointBufferCantOverflowWithThrottlingMixedLoad(PageMemoryImpl.ThrottlingPolicy plc) throws Exception {
PageMemoryImpl memory = createPageMemory(plc, null);
List<FullPageId> pages = new ArrayList<>();
for (int i = 0; i < (MAX_SIZE - 10) * MB / PAGE_SIZE / 2; i++) {
long pageId = memory.allocatePage(1, INDEX_PARTITION, FLAG_IDX);
FullPageId fullPageId = new FullPageId(pageId, 1);
pages.add(fullPageId);
acquireAndReleaseWriteLock(memory, fullPageId);
}
memory.beginCheckpoint(new GridFinishedFuture());
CheckpointMetricsTracker mockTracker = Mockito.mock(CheckpointMetricsTracker.class);
for (FullPageId checkpointPage : pages) memory.checkpointWritePage(checkpointPage, ByteBuffer.allocate(PAGE_SIZE), (fullPageId, buffer, tag) -> {
// No-op.
}, mockTracker);
memory.finishCheckpoint();
for (int i = (int) ((MAX_SIZE - 10) * MB / PAGE_SIZE / 2); i < (MAX_SIZE - 20) * MB / PAGE_SIZE; i++) {
long pageId = memory.allocatePage(1, INDEX_PARTITION, FLAG_IDX);
FullPageId fullPageId = new FullPageId(pageId, 1);
pages.add(fullPageId);
acquireAndReleaseWriteLock(memory, fullPageId);
}
memory.beginCheckpoint(new GridFinishedFuture());
// Mix pages in checkpoint with clean pages
Collections.shuffle(pages);
AtomicBoolean stop = new AtomicBoolean(false);
try {
GridTestUtils.runAsync(() -> {
for (FullPageId page : pages) {
if (// Mark dirty 50% of pages
ThreadLocalRandom.current().nextDouble() < 0.5)
try {
acquireAndReleaseWriteLock(memory, page);
if (stop.get())
break;
} catch (IgniteCheckedException e) {
log.error("runAsync ended with exception", e);
fail();
}
}
}).get(5_000);
} catch (IgniteFutureTimeoutCheckedException ignore) {
// Expected.
} finally {
stop.set(true);
}
memory.finishCheckpoint();
LongAdderMetric totalThrottlingTime = U.field(memory.metrics(), "totalThrottlingTime");
assertNotNull(totalThrottlingTime);
assertTrue(totalThrottlingTime.value() > 0);
}
use of org.apache.ignite.internal.pagemem.PageIdAllocator.FLAG_IDX in project ignite by apache.
the class IgniteIndexReader method readIdx.
/**
* Read index file.
*/
public void readIdx() {
long partPageStoresNum = Arrays.stream(partStores).filter(Objects::nonNull).count();
print("Partitions files num: " + partPageStoresNum);
Map<Class, Long> pageClasses = new HashMap<>();
long pagesNum = isNull(idxStore) ? 0 : (idxStore.size() - idxStore.headerSize()) / pageSize;
print("Going to check " + pagesNum + " pages.");
Set<Long> pageIds = new HashSet<>();
AtomicReference<Map<String, TreeTraversalInfo>> treeInfo = new AtomicReference<>();
AtomicReference<Map<String, TreeTraversalInfo>> horizontalScans = new AtomicReference<>();
AtomicReference<PageListsInfo> pageListsInfo = new AtomicReference<>();
List<Throwable> errors;
try {
IgniteBiTuple<Long, Long> indexPartitionRoots = partitionRoots(partMetaPageId(INDEX_PARTITION, FLAG_IDX));
long metaTreeRootId = indexPartitionRoots.get1();
long pageListMetaPageId = indexPartitionRoots.get2();
// Traversing trees.
treeInfo.set(traverseAllTrees("Index trees traversal", metaTreeRootId, CountOnlyStorage::new, this::traverseTree));
treeInfo.get().forEach((name, info) -> {
pageIds.addAll(info.innerPageIds);
pageIds.add(info.rootPageId);
});
Supplier<ItemStorage> itemStorageFactory = checkParts ? LinkStorage::new : CountOnlyStorage::new;
horizontalScans.set(traverseAllTrees("Scan index trees horizontally", metaTreeRootId, itemStorageFactory, this::horizontalTreeScan));
// Scanning page reuse lists.
if (pageListMetaPageId != 0)
pageListsInfo.set(getPageListsInfo(pageListMetaPageId));
ProgressPrinter progressPrinter = new ProgressPrinter(System.out, "Reading pages sequentially", pagesNum);
// Scan all pages in file.
errors = scanFileStore(INDEX_PARTITION, FLAG_IDX, idxStore, (pageId, addr, io) -> {
progressPrinter.printProgress();
pageClasses.compute(io.getClass(), (k, v) -> v == null ? 1 : v + 1);
if (!(io instanceof PageMetaIO || io instanceof PagesListMetaIO)) {
if (idxFilter == null) {
if ((io instanceof BPlusMetaIO || io instanceof BPlusInnerIO) && !pageIds.contains(pageId) && pageListsInfo.get() != null && !pageListsInfo.get().allPages.contains(pageId)) {
throw new IgniteException("Possibly orphan " + io.getClass().getSimpleName() + " page, pageId=" + pageId);
}
}
}
return true;
});
} catch (IgniteCheckedException e) {
throw new IgniteException(INDEX_FILE_NAME + " scan problem", e);
}
if (treeInfo.get() == null)
printErr("No tree meta info found.");
else {
printTraversalResults(RECURSIVE_TRAVERSE_NAME, treeInfo.get());
printTraversalResults(HORIZONTAL_SCAN_NAME, horizontalScans.get());
}
compareTraversals(treeInfo.get(), horizontalScans.get());
if (pageListsInfo.get() == null)
printErr("No page lists meta info found.");
else
printPagesListsInfo(pageListsInfo.get());
printPageStat("", "\n---These pages types were encountered during sequential scan:", pageClasses);
if (!errors.isEmpty()) {
printErr("---");
printErr("Errors:");
errors.forEach(this::printStackTrace);
}
print("---");
print("Total pages encountered during sequential scan: " + pageClasses.values().stream().mapToLong(a -> a).sum());
print("Total errors occurred during sequential scan: " + errors.size());
if (idxFilter != null)
print("Orphan pages were not reported due to --indexes filter.");
print("Note that some pages can be occupied by meta info, tracking info, etc., so total page count can differ " + "from count of pages found in index trees and page lists.");
if (checkParts) {
Map<Integer, List<Throwable>> checkPartsErrors = checkParts(horizontalScans.get());
print("");
printErrors("", "Partitions check:", "Partitions check detected no errors.", "Errors detected in partition, partId=%s", false, checkPartsErrors);
print("\nPartition check finished, total errors: " + checkPartsErrors.values().stream().mapToInt(List::size).sum() + ", total problem partitions: " + checkPartsErrors.size());
}
}
Aggregations