use of org.apache.ignite.internal.pagemem.wal.record.PageSnapshot in project ignite by apache.
the class IgniteWalReplayingAfterRestartTest method testWalRecordsAfterRestart.
/**
* @throws Exception If failed.
*/
@Test
public void testWalRecordsAfterRestart() throws Exception {
IgniteEx ignite = startGrid(0);
ignite.cluster().active(true);
IgniteCache<Integer, byte[]> cache = ignite.getOrCreateCache(CACHE_NAME);
int key = 0;
while (ignite.context().cache().context().wal().lastArchivedSegment() < SEGMENTS_CNT) cache.put(key++ % PART_NUM, new byte[1024]);
ignite.context().cache().context().database().waitForCheckpoint("test-checkpoint");
long lastArchived = ignite.context().cache().context().wal().lastArchivedSegment();
while (ignite.context().cache().context().wal().lastArchivedSegment() < lastArchived + 1) cache.put(key++ % PART_NUM, new byte[1024]);
stopGrid(0);
// There are no exceptions should be thrown here.
ignite = startGrid(0);
ignite.cluster().active();
// delta records should always follow PageSnapshot records.
String workDir = U.defaultWorkDirectory();
IteratorParametersBuilder builder = new IteratorParametersBuilder().filesOrDirs(workDir).filter((rec, ptr) -> rec.purpose() == PHYSICAL);
Map<FullPageId, PageSnapshot> snapshots = new HashMap<>();
try (WALIterator it = new IgniteWalIteratorFactory().iterator(builder)) {
while (it.hasNext()) {
IgniteBiTuple<WALPointer, WALRecord> tup = it.next();
WALRecord rec = tup.get2();
if (rec.type() == CHECKPOINT_RECORD)
snapshots.clear();
// let's check partition meta pages.
if (rec instanceof PageSnapshot) {
PageSnapshot snpRec = (PageSnapshot) rec;
assertFalse(snapshots.containsKey(snpRec.fullPageId()));
snapshots.put(snpRec.fullPageId(), snpRec);
} else if (rec instanceof MetaPageUpdatePartitionDataRecord) {
MetaPageUpdatePartitionDataRecord metaRec = (MetaPageUpdatePartitionDataRecord) rec;
assertTrue(snapshots.containsKey(metaRec.fullPageId()));
}
}
}
}
use of org.apache.ignite.internal.pagemem.wal.record.PageSnapshot in project ignite by apache.
the class WalCompactionTest method testApplyingUpdatesFromCompactedWal.
/**
* @param switchOffCompressor Switch off compressor after restart.
* @throws Exception If failed.
*/
private void testApplyingUpdatesFromCompactedWal(boolean switchOffCompressor) throws Exception {
IgniteEx ig = (IgniteEx) startGrids(3);
ig.cluster().baselineAutoAdjustEnabled(false);
ig.cluster().active(true);
IgniteCache<Integer, byte[]> cache = ig.cache(CACHE_NAME);
final int pageSize = ig.cachex(CACHE_NAME).context().dataRegion().pageMemory().pageSize();
for (int i = 0; i < ENTRIES; i++) {
// At least 20MB of raw data in total.
final byte[] val = new byte[20000];
val[i] = 1;
cache.put(i, val);
}
byte[] dummyPage = dummyPage(pageSize);
// Spam WAL to move all data records to compressible WAL zone.
for (int i = 0; i < WAL_SEGMENT_SIZE / pageSize * 2; i++) {
ig.context().cache().context().wal().log(new PageSnapshot(new FullPageId(-1, -1), dummyPage, pageSize));
}
// WAL archive segment is allowed to be compressed when it's at least one checkpoint away from current WAL head.
ig.context().cache().context().database().wakeupForCheckpoint("Forced checkpoint").get();
ig.context().cache().context().database().wakeupForCheckpoint("Forced checkpoint").get();
String nodeFolderName = ig.context().pdsFolderResolver().resolveFolders().folderName();
File dbDir = U.resolveWorkDirectory(U.defaultWorkDirectory(), "db", false);
File walDir = new File(dbDir, "wal");
File archiveDir = new File(walDir, "archive");
File nodeArchiveDir = new File(archiveDir, nodeFolderName);
File walSegment = new File(nodeArchiveDir, FileDescriptor.fileName(0) + ZIP_SUFFIX);
// Allow compressor to compress WAL segments.
assertTrue(GridTestUtils.waitForCondition(walSegment::exists, 15_000));
// Should be compressed at least in half.
assertTrue(walSegment.length() < WAL_SEGMENT_SIZE / 2);
stopAllGrids();
File nodeLfsDir = new File(dbDir, nodeFolderName);
File cpMarkersDir = new File(nodeLfsDir, "cp");
File[] cpMarkers = cpMarkersDir.listFiles();
assertNotNull(cpMarkers);
assertTrue(cpMarkers.length > 0);
File cacheDir = new File(nodeLfsDir, "cache-" + CACHE_NAME);
File[] lfsFiles = cacheDir.listFiles();
assertNotNull(lfsFiles);
assertTrue(lfsFiles.length > 0);
// Enforce reading WAL from the very beginning at the next start.
for (File f : cpMarkers) f.delete();
for (File f : lfsFiles) f.delete();
compactionEnabled = !switchOffCompressor;
ig = (IgniteEx) startGrids(3);
awaitPartitionMapExchange();
cache = ig.cache(CACHE_NAME);
boolean fail = false;
// Check that all data is recovered from compacted WAL.
for (int i = 0; i < ENTRIES; i++) {
byte[] arr = cache.get(i);
if (arr == null) {
System.out.println(">>> Missing: " + i);
fail = true;
} else if (arr[i] != 1) {
System.out.println(">>> Corrupted: " + i);
fail = true;
}
}
assertFalse(fail);
// Check compation successfully reset on blt changed.
stopAllGrids();
Ignite ignite = startGrids(2);
ignite.cluster().active(true);
resetBaselineTopology();
ignite.resetLostPartitions(Collections.singleton(CACHE_NAME));
// This node will join to different blt.
startGrid(2);
awaitPartitionMapExchange();
}
use of org.apache.ignite.internal.pagemem.wal.record.PageSnapshot in project ignite by apache.
the class WalCompactionTest method testSeekingStartInCompactedSegment.
/**
* @throws Exception If failed.
*/
@Test
public void testSeekingStartInCompactedSegment() throws Exception {
IgniteEx ig = startGrids(3);
ig.cluster().active(true);
IgniteCache<Integer, byte[]> cache = ig.cache(CACHE_NAME);
for (int i = 0; i < 100; i++) {
final byte[] val = new byte[20000];
val[i] = 1;
cache.put(i, val);
}
ig.context().cache().context().database().wakeupForCheckpoint("Forced checkpoint").get();
ig.context().cache().context().database().wakeupForCheckpoint("Forced checkpoint").get();
String nodeFolderName = ig.context().pdsFolderResolver().resolveFolders().folderName();
File dbDir = U.resolveWorkDirectory(U.defaultWorkDirectory(), "db", false);
File nodeLfsDir = new File(dbDir, nodeFolderName);
File cpMarkersDir = new File(nodeLfsDir, "cp");
Set<String> cpMarkersToSave = Arrays.stream(cpMarkersDir.listFiles()).map(File::getName).collect(toSet());
assertTrue(cpMarkersToSave.size() >= 2);
for (int i = 100; i < ENTRIES; i++) {
// At least 20MB of raw data in total.
final byte[] val = new byte[20000];
val[i] = 1;
cache.put(i, val);
// to allow the compressor to delete unzipped segments.
if (i % 100 == 0)
ig.context().cache().context().database().wakeupForCheckpoint("Forced checkpoint").get();
}
final int pageSize = ig.cachex(CACHE_NAME).context().dataRegion().pageMemory().pageSize();
byte[] dummyPage = dummyPage(pageSize);
// Spam WAL to move all data records to compressible WAL zone.
for (int i = 0; i < WAL_SEGMENT_SIZE / pageSize * 2; i++) {
ig.context().cache().context().wal().log(new PageSnapshot(new FullPageId(-1, -1), dummyPage, pageSize));
}
// WAL archive segment is allowed to be compressed when it's at least one checkpoint away from current WAL head.
ig.context().cache().context().database().wakeupForCheckpoint("Forced checkpoint").get();
ig.context().cache().context().database().wakeupForCheckpoint("Forced checkpoint").get();
File nodeArchiveDir = dbDir.toPath().resolve(Paths.get("wal", "archive", nodeFolderName)).toFile();
File unzippedWalSegment = new File(nodeArchiveDir, FileDescriptor.fileName(0));
File walSegment = new File(nodeArchiveDir, FileDescriptor.fileName(0) + ZIP_SUFFIX);
// Allow compressor to compress WAL segments.
assertTrue(GridTestUtils.waitForCondition(() -> !unzippedWalSegment.exists(), 15_000));
assertTrue(walSegment.exists());
// Should be compressed at least in half.
assertTrue(walSegment.length() < WAL_SEGMENT_SIZE / 2);
stopAllGrids();
File[] cpMarkers = cpMarkersDir.listFiles((dir, name) -> !cpMarkersToSave.contains(name));
assertNotNull(cpMarkers);
assertTrue(cpMarkers.length > 0);
File cacheDir = new File(nodeLfsDir, "cache-" + CACHE_NAME);
File[] lfsFiles = cacheDir.listFiles();
assertNotNull(lfsFiles);
assertTrue(lfsFiles.length > 0);
// Enforce reading WAL from the very beginning at the next start.
for (File f : cpMarkers) f.delete();
for (File f : lfsFiles) f.delete();
ig = startGrids(3);
awaitPartitionMapExchange();
cache = ig.cache(CACHE_NAME);
int missing = 0;
for (int i = 0; i < 100; i++) {
if (!cache.containsKey(i))
missing++;
}
log.info(">>> Missing " + missing + " entries logged before WAL iteration start");
assertTrue(missing > 0);
boolean fail = false;
// Check that all data is recovered from compacted WAL.
for (int i = 100; i < ENTRIES; i++) {
byte[] arr = cache.get(i);
if (arr == null) {
log.info(">>> Missing: " + i);
fail = true;
} else if (arr[i] != 1) {
log.info(">>> Corrupted: " + i);
fail = true;
}
}
assertFalse(fail);
}
use of org.apache.ignite.internal.pagemem.wal.record.PageSnapshot in project ignite by apache.
the class WalScannerTest method shouldFindCorrectRecordsForMoreThanOnePages.
/**
* @throws Exception If failed.
*/
@Test
public void shouldFindCorrectRecordsForMoreThanOnePages() throws Exception {
// given: Iterator with random value and value which should be find by scanner with several ids.
long expPageId1 = 984;
long expPageId2 = 9584;
long expPageId3 = 98344;
int grpId = 123;
PageSnapshot expPageSnapshot = new PageSnapshot(new FullPageId(expPageId1, grpId), dummyPage(1024, expPageId1), 1024);
CheckpointRecord expCheckpoint = new CheckpointRecord(new WALPointer(5738, 0, 0));
FixCountRecord expDeltaPage1 = new FixCountRecord(grpId, expPageId2, 4);
FixCountRecord expDeltaPage2 = new FixCountRecord(grpId, expPageId3, 4);
WALIterator mockedIter = mockWalIterator(new IgniteBiTuple<>(NULL_PTR, expPageSnapshot), new IgniteBiTuple<>(NULL_PTR, new PageSnapshot(new FullPageId(455, grpId), dummyPage(1024, 455), 1024)), new IgniteBiTuple<>(NULL_PTR, expCheckpoint), new IgniteBiTuple<>(NULL_PTR, new MetastoreDataRecord("key", new byte[0])), new IgniteBiTuple<>(NULL_PTR, new PartitionMetaStateRecord(grpId, 1, OWNING, 1)), new IgniteBiTuple<>(NULL_PTR, expDeltaPage1), new IgniteBiTuple<>(NULL_PTR, new FixCountRecord(grpId, 98348, 4)), new IgniteBiTuple<>(NULL_PTR, new PartitionMetaStateRecord(grpId, 1, OWNING, 1)), new IgniteBiTuple<>(NULL_PTR, expDeltaPage2));
IgniteWalIteratorFactory mockedFactory = mock(IgniteWalIteratorFactory.class);
when(mockedFactory.iterator(any(IteratorParametersBuilder.class))).thenReturn(mockedIter);
List<WALRecord> holder = new ArrayList<>();
ScannerHandler recordCaptor = (rec) -> holder.add(rec.get2());
Set<T2<Integer, Long>> groupAndPageIds = new HashSet<>();
groupAndPageIds.add(new T2<>(grpId, expPageId1));
groupAndPageIds.add(new T2<>(grpId, expPageId2));
groupAndPageIds.add(new T2<>(grpId, expPageId3));
// when: Scanning WAL for searching expected page.
buildWalScanner(withIteratorParameters(), mockedFactory).findAllRecordsFor(groupAndPageIds).forEach(recordCaptor);
// then: Should be find only expected value.
assertEquals(4, holder.size());
assertEquals(expPageSnapshot, holder.get(0));
assertEquals(expCheckpoint, holder.get(1));
assertEquals(expDeltaPage1, holder.get(2));
assertEquals(expDeltaPage2, holder.get(3));
}
use of org.apache.ignite.internal.pagemem.wal.record.PageSnapshot in project ignite by apache.
the class WalOnNodeStartTest method testNoNewMetaPagesSnapshotsOnNodeStart.
/**
*/
@Test
public void testNoNewMetaPagesSnapshotsOnNodeStart() throws Exception {
IgniteEx ignite = startGrid(0);
ignite.cluster().state(ClusterState.ACTIVE);
// Default cache with a lot of partitions required.
IgniteCache<Object, Object> cache = ignite.getOrCreateCache(DEFAULT_CACHE_NAME);
for (int k = 0; k < 1024; k++) cache.put(k, k);
// Graceful caches shutdown with the final checkpoint.
ignite.cluster().state(ClusterState.INACTIVE);
WALPointer lastWalPtr = ignite.context().cache().context().database().lastCheckpointMarkWalPointer();
stopGrid(0);
ignite = startGrid(0);
awaitPartitionMapExchange();
ignite.cluster().state(ClusterState.INACTIVE);
String walPath = ignite.configuration().getDataStorageConfiguration().getWalPath();
String walArchivePath = ignite.configuration().getDataStorageConfiguration().getWalArchivePath();
// Stop grid so there are no ongoing wal records (BLT update and something else maybe).
stopGrid(0);
WALIterator replayIter = new IgniteWalIteratorFactory(log).iterator(lastWalPtr.next(), new File(walArchivePath), new File(walPath));
replayIter.forEach(walPtrAndRecordPair -> {
WALRecord walRecord = walPtrAndRecordPair.getValue();
if (walRecord.type() == WALRecord.RecordType.PAGE_RECORD) {
PageSnapshot pageSnapshot = (PageSnapshot) walRecord;
ByteBuffer data = pageSnapshot.pageDataBuffer();
// No metapages should be present in WAL because they all were in correct states already.
assertThat(PageIO.T_PART_META, not(equalTo(PageIO.getType(data))));
}
});
}
Aggregations