use of com.orientechnologies.orient.core.storage.cache.OCachePointer in project orientdb by orientechnologies.
the class ClusterPageTest method testAddOneRecord.
public void testAddOneRecord() throws Exception {
OByteBufferPool bufferPool = OByteBufferPool.instance();
ByteBuffer buffer = bufferPool.acquireDirect(true);
OCachePointer cachePointer = new OCachePointer(buffer, bufferPool, new OLogSequenceNumber(0, 0), 0, 0);
cachePointer.incrementReferrer();
OCacheEntry cacheEntry = new OCacheEntry(0, 0, cachePointer, false);
cacheEntry.acquireExclusiveLock();
ByteBuffer directBuffer = bufferPool.acquireDirect(true);
OCachePointer directCachePointer = new OCachePointer(directBuffer, bufferPool, new OLogSequenceNumber(0, 0), 0, 0);
directCachePointer.incrementReferrer();
OCacheEntry directCacheEntry = new OCacheEntry(0, 0, directCachePointer, false);
directCacheEntry.acquireExclusiveLock();
try {
OClusterPage localPage = new OClusterPage(cacheEntry, true, new OWALChangesTree());
OClusterPage directLocalPage = new OClusterPage(directCacheEntry, true, null);
addOneRecord(localPage);
addOneRecord(directLocalPage);
assertChangesTracking(localPage, directBuffer, bufferPool);
} finally {
cacheEntry.releaseExclusiveLock();
directCacheEntry.releaseExclusiveLock();
cachePointer.decrementReferrer();
directCachePointer.decrementReferrer();
}
}
use of com.orientechnologies.orient.core.storage.cache.OCachePointer in project orientdb by orientechnologies.
the class OHashTableDirectory method releasePage.
private void releasePage(ODirectoryPage page, boolean exclusiveLock, OAtomicOperation atomicOperation) {
final OCacheEntry cacheEntry = page.getEntry();
final OCachePointer cachePointer = cacheEntry.getCachePointer();
if (exclusiveLock)
cachePointer.releaseExclusiveLock();
else
cachePointer.releaseSharedLock();
releasePage(atomicOperation, cacheEntry);
}
use of com.orientechnologies.orient.core.storage.cache.OCachePointer in project orientdb by orientechnologies.
the class OWOWCache method loadFileContent.
private OCachePointer[] loadFileContent(final int intId, final long startPageIndex, final int pageCount, boolean verifyChecksums) throws IOException {
final long fileId = composeFileId(id, intId);
try {
final OClosableEntry<Long, OFileClassic> entry = files.acquire(fileId);
try {
final OFileClassic fileClassic = entry.get();
if (fileClassic == null)
throw new IllegalArgumentException("File with id " + intId + " not found in WOW Cache");
final long firstPageStartPosition = startPageIndex * pageSize;
final long firstPageEndPosition = firstPageStartPosition + pageSize;
if (fileClassic.getFileSize() >= firstPageEndPosition) {
final OSessionStoragePerformanceStatistic sessionStoragePerformanceStatistic = performanceStatisticManager.getSessionPerformanceStatistic();
if (sessionStoragePerformanceStatistic != null) {
sessionStoragePerformanceStatistic.startPageReadFromFileTimer();
}
int pagesRead = 0;
final OLogSequenceNumber lastLsn = writeAheadLog == null ? new OLogSequenceNumber(-1, -1) : writeAheadLog.getFlushedLsn();
try {
if (pageCount == 1) {
final ByteBuffer buffer = bufferPool.acquireDirect(false);
assert buffer.position() == 0;
fileClassic.read(firstPageStartPosition, buffer, false);
if (verifyChecksums && (checksumMode == OChecksumMode.StoreAndVerify || checksumMode == OChecksumMode.StoreAndThrow))
verifyChecksum(buffer, fileId, startPageIndex, null);
buffer.position(0);
final OCachePointer dataPointer = new OCachePointer(buffer, bufferPool, lastLsn, fileId, startPageIndex);
pagesRead = 1;
return new OCachePointer[] { dataPointer };
}
final long maxPageCount = (fileClassic.getFileSize() - firstPageStartPosition) / pageSize;
final int realPageCount = Math.min((int) maxPageCount, pageCount);
final ByteBuffer[] buffers = new ByteBuffer[realPageCount];
for (int i = 0; i < buffers.length; i++) {
buffers[i] = bufferPool.acquireDirect(false);
assert buffers[i].position() == 0;
}
fileClassic.read(firstPageStartPosition, buffers, false);
if (verifyChecksums && (checksumMode == OChecksumMode.StoreAndVerify || checksumMode == OChecksumMode.StoreAndThrow))
for (int i = 0; i < buffers.length; ++i) verifyChecksum(buffers[i], fileId, startPageIndex + i, buffers);
final OCachePointer[] dataPointers = new OCachePointer[buffers.length];
for (int n = 0; n < buffers.length; n++) {
buffers[n].position(0);
dataPointers[n] = new OCachePointer(buffers[n], bufferPool, lastLsn, fileId, startPageIndex + n);
}
pagesRead = dataPointers.length;
return dataPointers;
} finally {
if (sessionStoragePerformanceStatistic != null) {
sessionStoragePerformanceStatistic.stopPageReadFromFileTimer(pagesRead);
}
}
} else
return null;
} finally {
files.release(entry);
}
} catch (InterruptedException e) {
throw OException.wrapException(new OStorageException("Data load was interrupted"), e);
}
}
use of com.orientechnologies.orient.core.storage.cache.OCachePointer in project orientdb by orientechnologies.
the class OWOWCache method load.
public OCachePointer[] load(long fileId, long startPageIndex, int pageCount, boolean addNewPages, OModifiableBoolean cacheHit, boolean verifyChecksums) throws IOException {
final int intId = extractFileId(fileId);
if (pageCount < 1)
throw new IllegalArgumentException("Amount of pages to load should be not less than 1 but provided value is " + pageCount);
filesLock.acquireReadLock();
try {
// first check that requested page is already cached so we do not need to load it from file
final PageKey startPageKey = new PageKey(intId, startPageIndex);
final Lock startPageLock = lockManager.acquireSharedLock(startPageKey);
// check if page already presented in write cache
final PageGroup startPageGroup = writeCachePages.get(startPageKey);
// page is not cached load it from file
if (startPageGroup == null) {
// load it from file and preload requested pages
// there is small optimization
// if we need single page no need to release already locked page
Lock[] pageLocks;
PageKey[] pageKeys;
if (pageCount > 1) {
startPageLock.unlock();
pageKeys = new PageKey[pageCount];
for (int i = 0; i < pageCount; i++) {
pageKeys[i] = new PageKey(intId, startPageIndex + i);
}
pageLocks = lockManager.acquireSharedLocksInBatch(pageKeys);
} else {
pageLocks = new Lock[] { startPageLock };
pageKeys = new PageKey[] { startPageKey };
}
OCachePointer[] pagePointers;
try {
// load requested page and preload requested amount of pages
pagePointers = loadFileContent(intId, startPageIndex, pageCount, verifyChecksums);
if (pagePointers != null) {
assert pagePointers.length > 0;
for (int n = 0; n < pagePointers.length; n++) {
pagePointers[n].incrementReadersReferrer();
if (n > 0) {
PageGroup pageGroup = writeCachePages.get(pageKeys[n]);
assert pageKeys[n].pageIndex == pagePointers[n].getPageIndex();
// if page already exists in cache we should drop already loaded page and load cache page instead
if (pageGroup != null) {
pagePointers[n].decrementReadersReferrer();
pagePointers[n] = pageGroup.page;
pagePointers[n].incrementReadersReferrer();
}
}
}
return pagePointers;
}
} finally {
for (Lock pageLock : pageLocks) {
pageLock.unlock();
}
}
// we need to allocate pages on the disk first
if (!addNewPages)
return new OCachePointer[0];
final OClosableEntry<Long, OFileClassic> entry = files.acquire(fileId);
try {
final OFileClassic fileClassic = entry.get();
long startAllocationIndex = fileClassic.getFileSize() / pageSize;
long stopAllocationIndex = startPageIndex;
final PageKey[] allocationPageKeys = new PageKey[(int) (stopAllocationIndex - startAllocationIndex + 1)];
for (long pageIndex = startAllocationIndex; pageIndex <= stopAllocationIndex; pageIndex++) {
int index = (int) (pageIndex - startAllocationIndex);
allocationPageKeys[index] = new PageKey(intId, pageIndex);
}
// use exclusive locks to prevent to have duplication of pointers
// when page is loaded from file because space is already allocated
// but it the same moment another page for the same index is added to the write cache
Lock[] locks = lockManager.acquireExclusiveLocksInBatch(allocationPageKeys);
try {
final long fileSize = fileClassic.getFileSize();
final long spaceToAllocate = ((stopAllocationIndex + 1) * pageSize - fileSize);
OCachePointer resultPointer = null;
if (spaceToAllocate > 0) {
final OLogSequenceNumber lastLsn = writeAheadLog == null ? new OLogSequenceNumber(-1, -1) : writeAheadLog.getFlushedLsn();
fileClassic.allocateSpace(spaceToAllocate);
startAllocationIndex = fileSize / pageSize;
for (long index = startAllocationIndex; index <= stopAllocationIndex; index++) {
final ByteBuffer buffer = bufferPool.acquireDirect(true);
buffer.putLong(MAGIC_NUMBER_OFFSET, MAGIC_NUMBER_WITHOUT_CHECKSUM);
final OCachePointer cachePointer = new OCachePointer(buffer, bufferPool, lastLsn, fileId, index);
// item only in write cache till we will not return
// it to read cache so we increment exclusive size by one
// otherwise call of write listener inside pointer may set exclusive size to negative value
exclusiveWriteCacheSize.increment();
doPutInCache(cachePointer, new PageKey(intId, index));
if (index == startPageIndex) {
resultPointer = cachePointer;
}
}
// we check is it enough space on disk to continue to write data on it
// otherwise we switch storage in read-only mode
freeSpaceCheckAfterNewPageAdd((int) (stopAllocationIndex - startAllocationIndex + 1));
}
if (resultPointer != null) {
resultPointer.incrementReadersReferrer();
cacheHit.setValue(true);
return new OCachePointer[] { resultPointer };
}
} finally {
for (Lock lock : locks) {
lock.unlock();
}
}
} finally {
files.release(entry);
}
// in such case we read it again
return load(fileId, startPageIndex, pageCount, true, cacheHit, verifyChecksums);
} else {
startPageGroup.page.incrementReadersReferrer();
startPageLock.unlock();
cacheHit.setValue(true);
return new OCachePointer[] { startPageGroup.page };
}
} catch (InterruptedException e) {
throw OException.wrapException(new OStorageException("Load was interrupted"), e);
} finally {
filesLock.releaseReadLock();
}
}
use of com.orientechnologies.orient.core.storage.cache.OCachePointer in project orientdb by orientechnologies.
the class SBTreeNonLeafBucketTest method testSearch.
public void testSearch() throws Exception {
long seed = System.currentTimeMillis();
System.out.println("testSearch seed : " + seed);
TreeSet<Long> keys = new TreeSet<Long>();
Random random = new Random(seed);
while (keys.size() < 2 * OSBTreeBucket.MAX_PAGE_SIZE_BYTES / OLongSerializer.LONG_SIZE) {
keys.add(random.nextLong());
}
final OByteBufferPool bufferPool = OByteBufferPool.instance();
final ByteBuffer buffer = bufferPool.acquireDirect(true);
OCachePointer cachePointer = new OCachePointer(buffer, bufferPool, new OLogSequenceNumber(0, 0), 0, 0);
OCacheEntry cacheEntry = new OCacheEntry(0, 0, cachePointer, false);
cacheEntry.acquireExclusiveLock();
cachePointer.incrementReferrer();
OSBTreeBucket<Long, OIdentifiable> treeBucket = new OSBTreeBucket<Long, OIdentifiable>(cacheEntry, false, OLongSerializer.INSTANCE, null, OLinkSerializer.INSTANCE, null);
int index = 0;
Map<Long, Integer> keyIndexMap = new HashMap<Long, Integer>();
for (Long key : keys) {
if (!treeBucket.addEntry(index, new OSBTreeBucket.SBTreeEntry<Long, OIdentifiable>(random.nextInt(Integer.MAX_VALUE), random.nextInt(Integer.MAX_VALUE), key, null), true))
break;
keyIndexMap.put(key, index);
index++;
}
Assert.assertEquals(treeBucket.size(), keyIndexMap.size());
for (Map.Entry<Long, Integer> keyIndexEntry : keyIndexMap.entrySet()) {
int bucketIndex = treeBucket.find(keyIndexEntry.getKey());
Assert.assertEquals(bucketIndex, (int) keyIndexEntry.getValue());
}
long prevRight = -1;
for (int i = 0; i < treeBucket.size(); i++) {
OSBTreeBucket.SBTreeEntry<Long, OIdentifiable> entry = treeBucket.getEntry(i);
if (prevRight > 0)
Assert.assertEquals(entry.leftChild, prevRight);
prevRight = entry.rightChild;
}
long prevLeft = -1;
for (int i = treeBucket.size() - 1; i >= 0; i--) {
OSBTreeBucket.SBTreeEntry<Long, OIdentifiable> entry = treeBucket.getEntry(i);
if (prevLeft > 0)
Assert.assertEquals(entry.rightChild, prevLeft);
prevLeft = entry.leftChild;
}
cacheEntry.releaseExclusiveLock();
cachePointer.decrementReferrer();
}
Aggregations