use of com.orientechnologies.orient.core.storage.fs.OFileClassic in project orientdb by orientechnologies.
the class WOWCacheTest method testMagicFailure.
@Test
public void testMagicFailure() throws IOException {
wowCache.setChecksumMode(OChecksumMode.StoreAndThrow);
final long fileId = wowCache.addFile(fileName);
final OCachePointer cachePointer = wowCache.load(fileId, 0, 1, true, new OModifiableBoolean(), true)[0];
cachePointer.acquireExclusiveLock();
final ByteBuffer buffer = cachePointer.getSharedBuffer();
buffer.position(systemOffset);
buffer.put(new byte[buffer.remaining()]);
cachePointer.releaseExclusiveLock();
wowCache.store(fileId, 0, cachePointer);
cachePointer.decrementReadersReferrer();
wowCache.flush();
final String path = storageLocal.getConfiguration().getDirectory() + File.separator + fileName;
final OFileClassic file = new OFileClassic(path, "rw");
file.open();
file.writeByte(0, (byte) 1);
file.close();
Assert.assertThrows(OStorageException.class, new Assert.ThrowingRunnable() {
@Override
public void run() throws Throwable {
wowCache.load(fileId, 0, 1, true, new OModifiableBoolean(), true);
}
});
}
use of com.orientechnologies.orient.core.storage.fs.OFileClassic in project orientdb by orientechnologies.
the class ReadWriteDiskCacheTest method assertFile.
private void assertFile(long pageIndex, byte[] value, OLogSequenceNumber lsn) throws IOException {
String path = storageLocal.getConfiguration().getDirectory() + "/readWriteDiskCacheTest.tst";
OFileClassic fileClassic = new OFileClassic(path, "r");
fileClassic.open();
byte[] content = new byte[userDataSize + systemOffset];
fileClassic.read(pageIndex * (userDataSize + systemOffset), content, userDataSize + systemOffset);
Assert.assertEquals(Arrays.copyOfRange(content, systemOffset, userDataSize + systemOffset), value);
long magicNumber = OLongSerializer.INSTANCE.deserializeNative(content, 0);
Assert.assertEquals(magicNumber, OWOWCache.MAGIC_NUMBER_WITH_CHECKSUM);
CRC32 crc32 = new CRC32();
crc32.update(content, OIntegerSerializer.INT_SIZE + OLongSerializer.LONG_SIZE, content.length - OIntegerSerializer.INT_SIZE - OLongSerializer.LONG_SIZE);
int crc = OIntegerSerializer.INSTANCE.deserializeNative(content, OLongSerializer.LONG_SIZE);
Assert.assertEquals(crc, (int) crc32.getValue());
long segment = OLongSerializer.INSTANCE.deserializeNative(content, ODurablePage.WAL_SEGMENT_OFFSET);
long position = OLongSerializer.INSTANCE.deserializeNative(content, ODurablePage.WAL_POSITION_OFFSET);
OLogSequenceNumber readLsn = new OLogSequenceNumber(segment, position);
Assert.assertEquals(readLsn, lsn);
fileClassic.close();
}
use of com.orientechnologies.orient.core.storage.fs.OFileClassic in project orientdb by orientechnologies.
the class ReadWriteDiskCacheTest method updateFilePage.
private void updateFilePage(long pageIndex, long offset, byte[] value) throws IOException {
String path = storageLocal.getConfiguration().getDirectory() + "/readWriteDiskCacheTest.tst";
OFileClassic fileClassic = new OFileClassic(path, "rw");
fileClassic.open();
fileClassic.write(pageIndex * (8 + systemOffset) + offset, value, value.length, 0);
fileClassic.synch();
fileClassic.close();
}
use of com.orientechnologies.orient.core.storage.fs.OFileClassic in project orientdb by orientechnologies.
the class OWOWCache method doDeleteFile.
private String doDeleteFile(long fileId) throws IOException {
final int intId = extractFileId(fileId);
fileId = composeFileId(id, intId);
removeCachedPages(intId);
final OFileClassic fileClassic = files.remove(fileId);
String name = null;
if (fileClassic != null) {
name = fileClassic.getName();
if (fileClassic.exists())
fileClassic.delete();
}
return name;
}
use of com.orientechnologies.orient.core.storage.fs.OFileClassic in project orientdb by orientechnologies.
the class OWOWCache method load.
public OCachePointer[] load(long fileId, long startPageIndex, int pageCount, boolean addNewPages, OModifiableBoolean cacheHit, boolean verifyChecksums) throws IOException {
final int intId = extractFileId(fileId);
if (pageCount < 1)
throw new IllegalArgumentException("Amount of pages to load should be not less than 1 but provided value is " + pageCount);
filesLock.acquireReadLock();
try {
// first check that requested page is already cached so we do not need to load it from file
final PageKey startPageKey = new PageKey(intId, startPageIndex);
final Lock startPageLock = lockManager.acquireSharedLock(startPageKey);
// check if page already presented in write cache
final PageGroup startPageGroup = writeCachePages.get(startPageKey);
// page is not cached load it from file
if (startPageGroup == null) {
// load it from file and preload requested pages
// there is small optimization
// if we need single page no need to release already locked page
Lock[] pageLocks;
PageKey[] pageKeys;
if (pageCount > 1) {
startPageLock.unlock();
pageKeys = new PageKey[pageCount];
for (int i = 0; i < pageCount; i++) {
pageKeys[i] = new PageKey(intId, startPageIndex + i);
}
pageLocks = lockManager.acquireSharedLocksInBatch(pageKeys);
} else {
pageLocks = new Lock[] { startPageLock };
pageKeys = new PageKey[] { startPageKey };
}
OCachePointer[] pagePointers;
try {
// load requested page and preload requested amount of pages
pagePointers = loadFileContent(intId, startPageIndex, pageCount, verifyChecksums);
if (pagePointers != null) {
assert pagePointers.length > 0;
for (int n = 0; n < pagePointers.length; n++) {
pagePointers[n].incrementReadersReferrer();
if (n > 0) {
PageGroup pageGroup = writeCachePages.get(pageKeys[n]);
assert pageKeys[n].pageIndex == pagePointers[n].getPageIndex();
// if page already exists in cache we should drop already loaded page and load cache page instead
if (pageGroup != null) {
pagePointers[n].decrementReadersReferrer();
pagePointers[n] = pageGroup.page;
pagePointers[n].incrementReadersReferrer();
}
}
}
return pagePointers;
}
} finally {
for (Lock pageLock : pageLocks) {
pageLock.unlock();
}
}
// we need to allocate pages on the disk first
if (!addNewPages)
return new OCachePointer[0];
final OClosableEntry<Long, OFileClassic> entry = files.acquire(fileId);
try {
final OFileClassic fileClassic = entry.get();
long startAllocationIndex = fileClassic.getFileSize() / pageSize;
long stopAllocationIndex = startPageIndex;
final PageKey[] allocationPageKeys = new PageKey[(int) (stopAllocationIndex - startAllocationIndex + 1)];
for (long pageIndex = startAllocationIndex; pageIndex <= stopAllocationIndex; pageIndex++) {
int index = (int) (pageIndex - startAllocationIndex);
allocationPageKeys[index] = new PageKey(intId, pageIndex);
}
// use exclusive locks to prevent to have duplication of pointers
// when page is loaded from file because space is already allocated
// but it the same moment another page for the same index is added to the write cache
Lock[] locks = lockManager.acquireExclusiveLocksInBatch(allocationPageKeys);
try {
final long fileSize = fileClassic.getFileSize();
final long spaceToAllocate = ((stopAllocationIndex + 1) * pageSize - fileSize);
OCachePointer resultPointer = null;
if (spaceToAllocate > 0) {
final OLogSequenceNumber lastLsn = writeAheadLog == null ? new OLogSequenceNumber(-1, -1) : writeAheadLog.getFlushedLsn();
fileClassic.allocateSpace(spaceToAllocate);
startAllocationIndex = fileSize / pageSize;
for (long index = startAllocationIndex; index <= stopAllocationIndex; index++) {
final ByteBuffer buffer = bufferPool.acquireDirect(true);
buffer.putLong(MAGIC_NUMBER_OFFSET, MAGIC_NUMBER_WITHOUT_CHECKSUM);
final OCachePointer cachePointer = new OCachePointer(buffer, bufferPool, lastLsn, fileId, index);
// item only in write cache till we will not return
// it to read cache so we increment exclusive size by one
// otherwise call of write listener inside pointer may set exclusive size to negative value
exclusiveWriteCacheSize.increment();
doPutInCache(cachePointer, new PageKey(intId, index));
if (index == startPageIndex) {
resultPointer = cachePointer;
}
}
// we check is it enough space on disk to continue to write data on it
// otherwise we switch storage in read-only mode
freeSpaceCheckAfterNewPageAdd((int) (stopAllocationIndex - startAllocationIndex + 1));
}
if (resultPointer != null) {
resultPointer.incrementReadersReferrer();
cacheHit.setValue(true);
return new OCachePointer[] { resultPointer };
}
} finally {
for (Lock lock : locks) {
lock.unlock();
}
}
} finally {
files.release(entry);
}
// in such case we read it again
return load(fileId, startPageIndex, pageCount, true, cacheHit, verifyChecksums);
} else {
startPageGroup.page.incrementReadersReferrer();
startPageLock.unlock();
cacheHit.setValue(true);
return new OCachePointer[] { startPageGroup.page };
}
} catch (InterruptedException e) {
throw OException.wrapException(new OStorageException("Load was interrupted"), e);
} finally {
filesLock.releaseReadLock();
}
}
Aggregations