use of org.apache.ignite.internal.processors.cache.persistence.file.FileIO in project ignite by apache.
the class IgnitePdsRecoveryAfterFileCorruptionTest method eraseDataFromDisk.
/**
* @param pageStore Page store.
* @param cacheId Cache id.
* @param page Page.
*/
private void eraseDataFromDisk(FilePageStoreManager pageStore, int cacheId, FullPageId page) throws IgniteCheckedException, IOException {
PageStore store = pageStore.getStore(cacheId, PageIdUtils.partId(page.pageId()));
FilePageStore filePageStore = (FilePageStore) store;
FileIO fileIO = U.field(filePageStore, "fileIO");
long size = fileIO.size();
fileIO.write(ByteBuffer.allocate((int) size - filePageStore.headerSize()), filePageStore.headerSize());
fileIO.force();
}
use of org.apache.ignite.internal.processors.cache.persistence.file.FileIO in project ignite by apache.
the class FileWriteAheadLogManager method restoreWriteHandle.
/**
* @param lastReadPtr Last read WAL file pointer.
* @return Initialized file write handle.
* @throws IgniteCheckedException If failed to initialize WAL write handle.
*/
private FileWriteHandle restoreWriteHandle(FileWALPointer lastReadPtr) throws IgniteCheckedException {
long absIdx = lastReadPtr == null ? 0 : lastReadPtr.index();
@Nullable FileArchiver archiver0 = archiver;
long segNo = archiver0 == null ? absIdx : absIdx % dsCfg.getWalSegments();
File curFile = new File(walWorkDir, FileDescriptor.fileName(segNo));
int off = lastReadPtr == null ? 0 : lastReadPtr.fileOffset();
int len = lastReadPtr == null ? 0 : lastReadPtr.length();
try {
FileIO fileIO = ioFactory.create(curFile);
IgniteInClosure<FileIO> lsnr = createWalFileListener;
if (lsnr != null)
lsnr.apply(fileIO);
try {
int serVer = serializerVer;
// If we have existing segment, try to read version from it.
if (lastReadPtr != null) {
try {
serVer = readSerializerVersionAndCompactedFlag(fileIO).get1();
} catch (SegmentEofException | EOFException ignore) {
serVer = serializerVer;
}
}
RecordSerializer ser = new RecordSerializerFactoryImpl(cctx).createSerializer(serVer);
if (log.isInfoEnabled())
log.info("Resuming logging to WAL segment [file=" + curFile.getAbsolutePath() + ", offset=" + off + ", ver=" + serVer + ']');
SegmentedRingByteBuffer rbuf;
if (mmap) {
try {
MappedByteBuffer buf = fileIO.map((int) maxWalSegmentSize);
rbuf = new SegmentedRingByteBuffer(buf, metrics);
} catch (IOException e) {
throw new IgniteCheckedException(e);
}
} else
rbuf = new SegmentedRingByteBuffer(dsCfg.getWalBufferSize(), maxWalSegmentSize, DIRECT, metrics);
if (lastReadPtr != null)
rbuf.init(lastReadPtr.fileOffset() + lastReadPtr.length());
FileWriteHandle hnd = new FileWriteHandle(fileIO, absIdx, off + len, true, ser, rbuf);
if (archiver0 != null)
archiver0.currentWalIndex(absIdx);
else
archivedMonitor.setLastArchivedAbsoluteIndex(absIdx - 1);
return hnd;
} catch (IgniteCheckedException | IOException e) {
fileIO.close();
throw e;
}
} catch (IOException e) {
throw new IgniteCheckedException("Failed to restore WAL write handle: " + curFile.getAbsolutePath(), e);
}
}
use of org.apache.ignite.internal.processors.cache.persistence.file.FileIO in project ignite by apache.
the class FileWriteAheadLogManager method initNextWriteHandle.
/**
* Fills the file header for a new segment. Calling this method signals we are done with the segment and it can be
* archived. If we don't have prepared file yet and achiever is busy this method blocks
*
* @param cur Current file write handle released by WAL writer
* @return Initialized file handle.
* @throws StorageException If IO exception occurred.
* @throws IgniteCheckedException If failed.
*/
private FileWriteHandle initNextWriteHandle(FileWriteHandle cur) throws StorageException, IgniteCheckedException {
try {
File nextFile = pollNextFile(cur.idx);
if (log.isDebugEnabled())
log.debug("Switching to a new WAL segment: " + nextFile.getAbsolutePath());
SegmentedRingByteBuffer rbuf = null;
FileIO fileIO = null;
FileWriteHandle hnd;
boolean interrupted = this.interrupted.get();
while (true) {
try {
fileIO = ioFactory.create(nextFile);
IgniteInClosure<FileIO> lsnr = createWalFileListener;
if (lsnr != null)
lsnr.apply(fileIO);
if (mmap) {
MappedByteBuffer buf = fileIO.map((int) maxWalSegmentSize);
rbuf = new SegmentedRingByteBuffer(buf, metrics);
} else
rbuf = cur.buf.reset();
hnd = new FileWriteHandle(fileIO, cur.idx + 1, 0, false, serializer, rbuf);
if (interrupted)
Thread.currentThread().interrupt();
break;
} catch (ClosedByInterruptException ignore) {
interrupted = true;
Thread.interrupted();
if (fileIO != null) {
try {
fileIO.close();
} catch (IOException ignored) {
// No-op.
}
fileIO = null;
}
if (rbuf != null) {
rbuf.free();
rbuf = null;
}
} finally {
this.interrupted.set(false);
}
}
return hnd;
} catch (IOException e) {
StorageException se = new StorageException("Unable to initialize WAL segment", e);
NodeInvalidator.INSTANCE.invalidate(cctx.kernalContext(), se);
throw se;
}
}
use of org.apache.ignite.internal.processors.cache.persistence.file.FileIO in project ignite by apache.
the class FsyncModeFileWriteAheadLogManager method formatFile.
/**
* Clears the file with zeros.
*
* @param file File to format.
*/
private void formatFile(File file) throws IgniteCheckedException {
if (log.isDebugEnabled())
log.debug("Formatting file [exists=" + file.exists() + ", file=" + file.getAbsolutePath() + ']');
try (FileIO fileIO = ioFactory.create(file, CREATE, READ, WRITE)) {
int left = dsCfg.getWalSegmentSize();
if (mode == WALMode.FSYNC) {
while (left > 0) {
int toWrite = Math.min(FILL_BUF.length, left);
fileIO.write(FILL_BUF, 0, toWrite);
left -= toWrite;
}
fileIO.force();
} else
fileIO.clear();
} catch (IOException e) {
throw new IgniteCheckedException("Failed to format WAL segment file: " + file.getAbsolutePath(), e);
}
}
use of org.apache.ignite.internal.processors.cache.persistence.file.FileIO in project ignite by apache.
the class GridCacheDatabaseSharedManager method resolvePageSizeFromPartitionFile.
/**
* @param partFile Partition file.
*/
private int resolvePageSizeFromPartitionFile(Path partFile) throws IOException, IgniteCheckedException {
try (FileIO fileIO = persistenceCfg.getFileIOFactory().create(partFile.toFile())) {
int minimalHdr = FilePageStore.HEADER_SIZE;
if (fileIO.size() < minimalHdr)
throw new IgniteCheckedException("Partition file is too small: " + partFile);
ByteBuffer hdr = ByteBuffer.allocate(minimalHdr).order(ByteOrder.LITTLE_ENDIAN);
while (hdr.remaining() > 0) fileIO.read(hdr);
hdr.rewind();
// Read signature.
hdr.getLong();
// Read version.
hdr.getInt();
// Read type.
hdr.get();
int pageSize = hdr.getInt();
if (pageSize == 2048) {
U.quietAndWarn(log, "You are currently using persistent store with 2K pages (DataStorageConfiguration#" + "pageSize). If you use SSD disk, consider migrating to 4K pages for better IO performance.");
}
return pageSize;
}
}
Aggregations