use of org.apache.ignite.internal.processors.cache.persistence.wal.aware.SegmentAware in project ignite by apache.
the class FileWriteAheadLogManager method start0.
/**
* {@inheritDoc}
*/
@Override
public void start0() throws IgniteCheckedException {
if (cctx.kernalContext().clientNode())
return;
final PdsFolderSettings resolveFolders = cctx.kernalContext().pdsFolderResolver().resolveFolders();
checkWalConfiguration();
synchronized (this) {
final File walWorkDir0 = walWorkDir = initDirectory(dsCfg.getWalPath(), DataStorageConfiguration.DFLT_WAL_PATH, resolveFolders.folderName(), "write ahead log work directory");
final File walArchiveDir0 = walArchiveDir = initDirectory(dsCfg.getWalArchivePath(), DataStorageConfiguration.DFLT_WAL_ARCHIVE_PATH, resolveFolders.folderName(), "write ahead log archive directory");
if (dsCfg.isCdcEnabled()) {
walCdcDir = initDirectory(dsCfg.getCdcWalPath(), DataStorageConfiguration.DFLT_WAL_CDC_PATH, resolveFolders.folderName(), "change data capture directory");
}
serializer = new RecordSerializerFactoryImpl(cctx).createSerializer(serializerVer);
GridCacheDatabaseSharedManager dbMgr = (GridCacheDatabaseSharedManager) cctx.database();
metrics = dbMgr.persistentStoreMetricsImpl();
if (metrics != null) {
metrics.setWalSizeProvider(new CO<Long>() {
/**
* {@inheritDoc}
*/
@Override
public Long apply() {
long size = 0;
for (File f : walWorkDir0.listFiles()) size += f.length();
if (isArchiverEnabled()) {
for (File f : walArchiveDir0.listFiles()) size += f.length();
}
return size;
}
});
}
segmentAware = new SegmentAware(log, dsCfg.getWalSegments(), dsCfg.isWalCompactionEnabled(), minWalArchiveSize, maxWalArchiveSize);
// Otherwise, FileArchiver initialization will trigger redundant work for FileCompressor.
if (dsCfg.isWalCompactionEnabled()) {
compressor = new FileCompressor(log);
decompressor = new FileDecompressor(log);
}
if (isArchiverEnabled())
archiver = new FileArchiver(log);
if (!walArchiveUnlimited())
cleaner = new FileCleaner(log);
prepareAndCheckWalFiles();
if (compressor != null)
compressor.initAlreadyCompressedSegments();
if (archiver != null)
archiver.init(segmentAware);
segmentRouter = new SegmentRouter(walWorkDir, walArchiveDir, segmentAware, dsCfg);
fileHandleManager = fileHandleManagerFactory.build(cctx, metrics, mmap, serializer, this::currentHandle);
lockedSegmentFileInputFactory = new LockedSegmentFileInputFactory(segmentAware, segmentRouter, ioFactory);
pageCompression = dsCfg.getWalPageCompression();
if (pageCompression != DiskPageCompression.DISABLED) {
if (serializerVer < 2) {
throw new IgniteCheckedException("WAL page snapshots compression not supported for serializerVer=" + serializerVer);
}
cctx.kernalContext().compress().checkPageCompressionSupported();
pageCompressionLevel = dsCfg.getWalPageCompressionLevel() != null ? checkCompressionLevelBounds(dsCfg.getWalPageCompressionLevel(), pageCompression) : getDefaultCompressionLevel(pageCompression);
}
}
}
use of org.apache.ignite.internal.processors.cache.persistence.wal.aware.SegmentAware in project ignite by apache.
the class IgnitePdsStartWIthEmptyArchive method test.
/**
* @throws Exception If failed.
*/
@Test
public void test() throws Exception {
IgniteEx ig = startGrid(0);
ig.cluster().active(true);
FileWriteAheadLogManager walMgr = (FileWriteAheadLogManager) ig.context().cache().context().wal();
// Populate data for generate WAL archive segments.
try (IgniteDataStreamer<Integer, byte[]> st = ig.dataStreamer(DEFAULT_CACHE_NAME)) {
int entries = 1000;
for (int i = 0; i < entries; i++) st.addData(i, new byte[1024 * 1024]);
}
File archiveDir = U.field(walMgr, "walArchiveDir");
stopGrid(0, false);
SegmentAware beforeSaw = U.field(walMgr, "segmentAware");
long beforeLastArchivedAbsoluteIdx = beforeSaw.lastArchivedAbsoluteIndex();
FileWriteHandle fhBefore = U.field(walMgr, "currHnd");
long idxBefore = fhBefore.getSegmentId();
File[] files = archiveDir.listFiles(WAL_SEGMENT_COMPACTED_OR_RAW_FILE_FILTER);
Arrays.sort(files);
// Cleanup archive directory.
for (File f : files) {
if (f.delete())
log.info("File " + f.getAbsolutePath() + " deleted");
}
Assert.assertEquals(0, archiveDir.listFiles().length);
evts.clear();
// Restart grid again after archive was removed.
ig = startGrid(0);
walMgr = (FileWriteAheadLogManager) ig.context().cache().context().wal();
SegmentAware afterSaw = U.field(walMgr, "segmentAware");
long afterLastArchivedAbsoluteIndex = afterSaw.lastArchivedAbsoluteIndex();
int segments = ig.configuration().getDataStorageConfiguration().getWalSegments();
Assert.assertTrue("lastArchivedBeforeIdx=" + beforeLastArchivedAbsoluteIdx + ", lastArchivedAfterIdx=" + afterLastArchivedAbsoluteIndex + ", segments=" + segments, afterLastArchivedAbsoluteIndex >= (beforeLastArchivedAbsoluteIdx - segments));
ig.cluster().active(true);
FileWriteHandle fhAfter = U.field(walMgr, "currHnd");
Assert.assertNotNull(fhAfter);
long idxAfter = fhAfter.getSegmentId();
Assert.assertEquals(idxBefore, idxAfter);
Assert.assertTrue(idxAfter >= beforeLastArchivedAbsoluteIdx);
log.info("currentIdx=" + idxAfter + ", lastArchivedBeforeIdx=" + beforeLastArchivedAbsoluteIdx + ", lastArchivedAfteridx=" + afterLastArchivedAbsoluteIndex + ", segments=" + segments);
// One is a last archived, secod is a current write segment.
final long awaitAchviedSegments = idxAfter - afterLastArchivedAbsoluteIndex - 2;
// Await all current available semgment will be archived.
assertTrue(GridTestUtils.waitForCondition(new GridAbsPredicate() {
@Override
public boolean apply() {
long cut = evts.keySet().stream().filter(e -> e > afterLastArchivedAbsoluteIndex).count();
return cut >= awaitAchviedSegments;
}
}, 10_000));
}
use of org.apache.ignite.internal.processors.cache.persistence.wal.aware.SegmentAware in project ignite by apache.
the class ReleaseSegmentOnHistoricalRebalanceTest method testNoReserveSegment.
/**
* Checks that that if there is no reservation,
* there will be no errors and the rebalance will be completed.
*
* @throws Exception If failed.
*/
@Test
public void testNoReserveSegment() throws Exception {
checkHistoricalRebalance(n -> {
SegmentAware spy = spy(segmentAware(n));
when(spy.reserve(anyLong())).thenAnswer(m -> false);
segmentAware(n, spy);
});
}
use of org.apache.ignite.internal.processors.cache.persistence.wal.aware.SegmentAware in project ignite by apache.
the class ReleaseSegmentOnHistoricalRebalanceTest method testImmediateReleaseSegment.
/**
* Checks that if the reservation is released immediately,
* there will be no errors and the rebalance will be completed.
*
* @throws Exception If failed.
*/
@Test
public void testImmediateReleaseSegment() throws Exception {
checkHistoricalRebalance(n -> {
SegmentAware spy = spy(segmentAware(n));
doAnswer(m -> {
Object o = m.callRealMethod();
spy.release(m.getArgument(0));
return o;
}).when(spy).reserve(anyLong());
segmentAware(n, spy);
});
}
use of org.apache.ignite.internal.processors.cache.persistence.wal.aware.SegmentAware in project ignite by apache.
the class IgniteWalIteratorSwitchSegmentTest method checkSwitchReadingSegmentDuringIteration.
/**
* @param serVer WAL serializer version.
* @throws Exception If some thing failed.
*/
private void checkSwitchReadingSegmentDuringIteration(int serVer) throws Exception {
String workDir = U.defaultWorkDirectory();
T2<IgniteWriteAheadLogManager, RecordSerializer> initTup = initiate(serVer, workDir);
IgniteWriteAheadLogManager walMgr = initTup.get1();
RecordSerializer recordSerializer = initTup.get2();
MetastoreDataRecord rec = new MetastoreDataRecord("0", new byte[100]);
int recSize = recordSerializer.size(rec);
// Add more record for rollover to the next segment.
int recordsToWrite = SEGMENT_SIZE / recSize + 100;
SegmentAware segmentAware = GridTestUtils.getFieldValue(walMgr, "segmentAware");
// Guard from archiving before iterator would be created.
assertTrue(segmentAware.lock(0));
for (int i = 0; i < recordsToWrite; i++) walMgr.log(new MetastoreDataRecord(rec.key(), rec.value()));
walMgr.flush(null, true);
AtomicInteger actualRecords = new AtomicInteger(0);
AtomicReference<String> startedSegmentPath = new AtomicReference<>();
AtomicReference<String> finishedSegmentPath = new AtomicReference<>();
CountDownLatch startedIterLatch = new CountDownLatch(1);
CountDownLatch finishedArchivedLatch = new CountDownLatch(1);
IgniteInternalFuture<?> fut = GridTestUtils.runAsync(() -> {
// Check that switch segment works as expected and all record is reachable.
try (WALIterator it = walMgr.replay(null)) {
Object handle = getFieldValueHierarchy(it, "currWalSegment");
FileInput in = getFieldValueHierarchy(handle, "in");
Object delegate = getFieldValueHierarchy(in.io(), "delegate");
Channel ch = getFieldValueHierarchy(delegate, "ch");
String path = getFieldValueHierarchy(ch, "path");
startedSegmentPath.set(path);
startedIterLatch.countDown();
while (it.hasNext()) {
IgniteBiTuple<WALPointer, WALRecord> tup = it.next();
WALRecord rec0 = tup.get2();
if (rec0.type() == METASTORE_DATA_RECORD)
actualRecords.incrementAndGet();
finishedArchivedLatch.await();
}
in = getFieldValueHierarchy(handle, "in");
delegate = getFieldValueHierarchy(in.io(), "delegate");
ch = getFieldValueHierarchy(delegate, "ch");
path = getFieldValueHierarchy(ch, "path");
finishedSegmentPath.set(path);
}
return null;
});
startedIterLatch.await();
segmentAware.unlock(0);
waitForCondition(() -> segmentAware.lastArchivedAbsoluteIndex() == 0, 5000);
finishedArchivedLatch.countDown();
fut.get();
// should started iteration from work directory but finish from archive directory.
assertEquals(workDir + WORK_SUB_DIR + File.separator + "0000000000000000.wal", startedSegmentPath.get());
assertEquals(workDir + ARCHIVE_SUB_DIR + File.separator + "0000000000000000.wal", finishedSegmentPath.get());
Assert.assertEquals("Not all records read during iteration.", recordsToWrite, actualRecords.get());
}
Aggregations