use of org.apache.ignite.internal.processors.cache.persistence.wal.io.FileInput in project ignite by apache.
the class AbstractWalRecordsIterator method initReadHandle.
/**
* Assumes fileIO will be closed in this method in case of error occurred.
*
* @param desc File descriptor.
* @param start Optional start pointer. Null means read from the beginning.
* @param fileIO fileIO associated with file descriptor
* @param segmentHeader read segment header from fileIO
* @return Initialized file read header.
* @throws IgniteCheckedException If initialized failed due to another unexpected error.
*/
protected AbstractReadFileHandle initReadHandle(@NotNull final AbstractFileDescriptor desc, @Nullable final WALPointer start, @NotNull final SegmentIO fileIO, @NotNull final SegmentHeader segmentHeader) throws IgniteCheckedException {
try {
boolean isCompacted = segmentHeader.isCompacted();
if (isCompacted)
serializerFactory.skipPositionCheck(true);
FileInput in = segmentFileInputFactory.createFileInput(fileIO, buf);
if (start != null && desc.idx() == start.index()) {
if (isCompacted) {
if (start.fileOffset() != 0)
serializerFactory.recordDeserializeFilter(new StartSeekingFilter(start));
} else {
// Make sure we skip header with serializer version.
long startOff = Math.max(start.fileOffset(), fileIO.position());
in.seek(startOff);
}
}
int serVer = segmentHeader.getSerializerVersion();
return createReadFileHandle(fileIO, serializerFactory.createSerializer(serVer), in);
} catch (SegmentEofException | EOFException ignore) {
try {
fileIO.close();
} catch (IOException ce) {
throw new IgniteCheckedException(ce);
}
return null;
} catch (IgniteCheckedException e) {
U.closeWithSuppressingException(fileIO, e);
throw e;
} catch (IOException e) {
U.closeWithSuppressingException(fileIO, e);
throw new IgniteCheckedException("Failed to initialize WAL segment after reading segment header: " + desc.file().getAbsolutePath(), e);
}
}
use of org.apache.ignite.internal.processors.cache.persistence.wal.io.FileInput in project ignite by apache.
the class IgniteWalIteratorSwitchSegmentTest method checkSwitchReadingSegmentDuringIteration.
/**
* @param serVer WAL serializer version.
* @throws Exception If some thing failed.
*/
private void checkSwitchReadingSegmentDuringIteration(int serVer) throws Exception {
String workDir = U.defaultWorkDirectory();
T2<IgniteWriteAheadLogManager, RecordSerializer> initTup = initiate(serVer, workDir);
IgniteWriteAheadLogManager walMgr = initTup.get1();
RecordSerializer recordSerializer = initTup.get2();
MetastoreDataRecord rec = new MetastoreDataRecord("0", new byte[100]);
int recSize = recordSerializer.size(rec);
// Add more record for rollover to the next segment.
int recordsToWrite = SEGMENT_SIZE / recSize + 100;
SegmentAware segmentAware = GridTestUtils.getFieldValue(walMgr, "segmentAware");
// Guard from archiving before iterator would be created.
assertTrue(segmentAware.lock(0));
for (int i = 0; i < recordsToWrite; i++) walMgr.log(new MetastoreDataRecord(rec.key(), rec.value()));
walMgr.flush(null, true);
AtomicInteger actualRecords = new AtomicInteger(0);
AtomicReference<String> startedSegmentPath = new AtomicReference<>();
AtomicReference<String> finishedSegmentPath = new AtomicReference<>();
CountDownLatch startedIterLatch = new CountDownLatch(1);
CountDownLatch finishedArchivedLatch = new CountDownLatch(1);
IgniteInternalFuture<?> fut = GridTestUtils.runAsync(() -> {
// Check that switch segment works as expected and all record is reachable.
try (WALIterator it = walMgr.replay(null)) {
Object handle = getFieldValueHierarchy(it, "currWalSegment");
FileInput in = getFieldValueHierarchy(handle, "in");
Object delegate = getFieldValueHierarchy(in.io(), "delegate");
Channel ch = getFieldValueHierarchy(delegate, "ch");
String path = getFieldValueHierarchy(ch, "path");
startedSegmentPath.set(path);
startedIterLatch.countDown();
while (it.hasNext()) {
IgniteBiTuple<WALPointer, WALRecord> tup = it.next();
WALRecord rec0 = tup.get2();
if (rec0.type() == METASTORE_DATA_RECORD)
actualRecords.incrementAndGet();
finishedArchivedLatch.await();
}
in = getFieldValueHierarchy(handle, "in");
delegate = getFieldValueHierarchy(in.io(), "delegate");
ch = getFieldValueHierarchy(delegate, "ch");
path = getFieldValueHierarchy(ch, "path");
finishedSegmentPath.set(path);
}
return null;
});
startedIterLatch.await();
segmentAware.unlock(0);
waitForCondition(() -> segmentAware.lastArchivedAbsoluteIndex() == 0, 5000);
finishedArchivedLatch.countDown();
fut.get();
// should started iteration from work directory but finish from archive directory.
assertEquals(workDir + WORK_SUB_DIR + File.separator + "0000000000000000.wal", startedSegmentPath.get());
assertEquals(workDir + ARCHIVE_SUB_DIR + File.separator + "0000000000000000.wal", finishedSegmentPath.get());
Assert.assertEquals("Not all records read during iteration.", recordsToWrite, actualRecords.get());
}
Aggregations