use of com.orientechnologies.orient.core.storage.fs.OFileClassic in project orientdb by orientechnologies.
the class OWOWCache method flushPage.
private void flushPage(final int fileId, final long pageIndex, final ByteBuffer buffer) throws IOException, InterruptedException {
if (writeAheadLog != null) {
final OLogSequenceNumber lsn = ODurablePage.getLogSequenceNumberFromPage(buffer);
final OLogSequenceNumber flushedLSN = writeAheadLog.getFlushedLsn();
if (flushedLSN == null || flushedLSN.compareTo(lsn) < 0)
writeAheadLog.flush();
}
final byte[] content = new byte[pageSize];
buffer.position(0);
buffer.get(content);
OLongSerializer.INSTANCE.serializeNative(checksumMode == OChecksumMode.Off ? MAGIC_NUMBER_WITHOUT_CHECKSUM : MAGIC_NUMBER_WITH_CHECKSUM, content, MAGIC_NUMBER_OFFSET);
if (checksumMode != OChecksumMode.Off) {
final int crc32 = calculatePageCrc(content);
OIntegerSerializer.INSTANCE.serializeNative(crc32, content, CHECKSUM_OFFSET);
}
final long externalId = composeFileId(id, fileId);
final OClosableEntry<Long, OFileClassic> entry = files.acquire(externalId);
try {
final OFileClassic fileClassic = entry.get();
fileClassic.write(pageIndex * pageSize, content);
if (syncOnPageFlush)
fileClassic.synch();
} finally {
files.release(entry);
}
}
use of com.orientechnologies.orient.core.storage.fs.OFileClassic in project orientdb by orientechnologies.
the class OWOWCache method addFile.
public long addFile(String fileName) throws IOException {
filesLock.acquireWriteLock();
try {
Integer fileId = nameIdMap.get(fileName);
OFileClassic fileClassic;
if (fileId != null && fileId >= 0)
throw new OStorageException("File with name " + fileName + " already exists in storage " + storageLocal.getName());
if (fileId == null) {
++fileCounter;
fileId = fileCounter;
} else
fileId = -fileId;
fileClassic = createFileInstance(fileName);
createFile(fileClassic);
final long externalId = composeFileId(id, fileId);
files.add(externalId, fileClassic);
nameIdMap.put(fileName, fileId);
writeNameIdEntry(new NameFileIdEntry(fileName, fileId), true);
return externalId;
} catch (InterruptedException e) {
throw OException.wrapException(new OStorageException("Thread was interrupted"), e);
} finally {
filesLock.releaseWriteLock();
}
}
use of com.orientechnologies.orient.core.storage.fs.OFileClassic in project orientdb by orientechnologies.
the class OWOWCache method loadFileContent.
private OCachePointer[] loadFileContent(final int intId, final long startPageIndex, final int pageCount, boolean verifyChecksums) throws IOException {
final long fileId = composeFileId(id, intId);
try {
final OClosableEntry<Long, OFileClassic> entry = files.acquire(fileId);
try {
final OFileClassic fileClassic = entry.get();
if (fileClassic == null)
throw new IllegalArgumentException("File with id " + intId + " not found in WOW Cache");
final long firstPageStartPosition = startPageIndex * pageSize;
final long firstPageEndPosition = firstPageStartPosition + pageSize;
if (fileClassic.getFileSize() >= firstPageEndPosition) {
final OSessionStoragePerformanceStatistic sessionStoragePerformanceStatistic = performanceStatisticManager.getSessionPerformanceStatistic();
if (sessionStoragePerformanceStatistic != null) {
sessionStoragePerformanceStatistic.startPageReadFromFileTimer();
}
int pagesRead = 0;
final OLogSequenceNumber lastLsn = writeAheadLog == null ? new OLogSequenceNumber(-1, -1) : writeAheadLog.getFlushedLsn();
try {
if (pageCount == 1) {
final ByteBuffer buffer = bufferPool.acquireDirect(false);
assert buffer.position() == 0;
fileClassic.read(firstPageStartPosition, buffer, false);
if (verifyChecksums && (checksumMode == OChecksumMode.StoreAndVerify || checksumMode == OChecksumMode.StoreAndThrow))
verifyChecksum(buffer, fileId, startPageIndex, null);
buffer.position(0);
final OCachePointer dataPointer = new OCachePointer(buffer, bufferPool, lastLsn, fileId, startPageIndex);
pagesRead = 1;
return new OCachePointer[] { dataPointer };
}
final long maxPageCount = (fileClassic.getFileSize() - firstPageStartPosition) / pageSize;
final int realPageCount = Math.min((int) maxPageCount, pageCount);
final ByteBuffer[] buffers = new ByteBuffer[realPageCount];
for (int i = 0; i < buffers.length; i++) {
buffers[i] = bufferPool.acquireDirect(false);
assert buffers[i].position() == 0;
}
fileClassic.read(firstPageStartPosition, buffers, false);
if (verifyChecksums && (checksumMode == OChecksumMode.StoreAndVerify || checksumMode == OChecksumMode.StoreAndThrow))
for (int i = 0; i < buffers.length; ++i) verifyChecksum(buffers[i], fileId, startPageIndex + i, buffers);
final OCachePointer[] dataPointers = new OCachePointer[buffers.length];
for (int n = 0; n < buffers.length; n++) {
buffers[n].position(0);
dataPointers[n] = new OCachePointer(buffers[n], bufferPool, lastLsn, fileId, startPageIndex + n);
}
pagesRead = dataPointers.length;
return dataPointers;
} finally {
if (sessionStoragePerformanceStatistic != null) {
sessionStoragePerformanceStatistic.stopPageReadFromFileTimer(pagesRead);
}
}
} else
return null;
} finally {
files.release(entry);
}
} catch (InterruptedException e) {
throw OException.wrapException(new OStorageException("Data load was interrupted"), e);
}
}
use of com.orientechnologies.orient.core.storage.fs.OFileClassic in project orientdb by orientechnologies.
the class OWOWCache method readNameIdMap.
private void readNameIdMap() throws IOException, InterruptedException {
// older versions of ODB incorrectly logged file deletions
// some deleted files have the same id
// because we reuse ids of removed files when we re-create them
// we need to fix this situation
final Map<Integer, Set<String>> filesWithfNegativeIds = new HashMap<Integer, Set<String>>();
nameIdMap = new ConcurrentHashMap<String, Integer>();
long localFileCounter = -1;
nameIdMapHolder.seek(0);
NameFileIdEntry nameFileIdEntry;
while ((nameFileIdEntry = readNextNameIdEntry()) != null) {
final long absFileId = Math.abs(nameFileIdEntry.fileId);
if (localFileCounter < absFileId)
localFileCounter = absFileId;
final Integer existingId = nameIdMap.get(nameFileIdEntry.name);
if (existingId != null && existingId < 0) {
final Set<String> files = filesWithfNegativeIds.get(existingId);
if (files != null) {
files.remove(nameFileIdEntry.name);
if (files.isEmpty()) {
filesWithfNegativeIds.remove(existingId);
}
}
}
if (nameFileIdEntry.fileId < 0) {
Set<String> files = filesWithfNegativeIds.get(nameFileIdEntry.fileId);
if (files == null) {
files = new HashSet<String>();
files.add(nameFileIdEntry.name);
filesWithfNegativeIds.put(nameFileIdEntry.fileId, files);
} else {
files.add(nameFileIdEntry.name);
}
}
nameIdMap.put(nameFileIdEntry.name, nameFileIdEntry.fileId);
}
if (localFileCounter > 0 && fileCounter < localFileCounter)
fileCounter = (int) localFileCounter;
for (Map.Entry<String, Integer> nameIdEntry : nameIdMap.entrySet()) {
if (nameIdEntry.getValue() >= 0) {
final long externalId = composeFileId(id, nameIdEntry.getValue());
if (files.get(externalId) == null) {
OFileClassic fileClassic = createFileInstance(nameIdEntry.getKey());
if (fileClassic.exists()) {
fileClassic.open();
files.add(externalId, fileClassic);
} else {
final Integer fileId = nameIdMap.get(nameIdEntry.getKey());
if (fileId != null && fileId > 0) {
nameIdMap.put(nameIdEntry.getKey(), -fileId);
}
}
}
}
}
final Set<String> fixedFiles = new HashSet<String>();
for (Map.Entry<Integer, Set<String>> entry : filesWithfNegativeIds.entrySet()) {
final Set<String> files = entry.getValue();
if (files.size() > 1) {
for (String fileName : files) {
fileCounter++;
final int nextId = -fileCounter;
nameIdMap.put(fileName, nextId);
fixedFiles.add(fileName);
}
}
}
if (!fixedFiles.isEmpty())
OLogManager.instance().warn(this, "Removed files " + fixedFiles + " had duplicated ids. Problem is fixed automatically.");
}
Aggregations