use of com.orientechnologies.orient.core.storage.cache.OCacheEntry in project orientdb by orientechnologies.
the class OLocalHashTable20 method size.
@Override
public long size() {
atomicOperationsManager.acquireReadLock(this);
try {
acquireSharedLock();
try {
final OAtomicOperation atomicOperation = atomicOperationsManager.getCurrentOperation();
final OCacheEntry hashStateEntry = loadPage(atomicOperation, fileStateId, hashStateEntryIndex, true);
try {
OHashIndexFileLevelMetadataPage metadataPage = new OHashIndexFileLevelMetadataPage(hashStateEntry, getChanges(atomicOperation, hashStateEntry), false);
return metadataPage.getRecordsCount();
} finally {
releasePage(atomicOperation, hashStateEntry);
}
} finally {
releaseSharedLock();
}
} catch (IOException e) {
throw OException.wrapException(new OIndexException("Error during index size request"), e);
} finally {
atomicOperationsManager.releaseReadLock(this);
}
}
use of com.orientechnologies.orient.core.storage.cache.OCacheEntry in project orientdb by orientechnologies.
the class OLocalHashTable20 method floorEntries.
@Override
public OHashIndexBucket.Entry<K, V>[] floorEntries(K key) {
atomicOperationsManager.acquireReadLock(this);
try {
acquireSharedLock();
try {
OAtomicOperation atomicOperation = atomicOperationsManager.getCurrentOperation();
key = keySerializer.preprocess(key, (Object[]) keyTypes);
final long hashCode = keyHashFunction.hashCode(key);
BucketPath bucketPath = getBucket(hashCode);
long bucketPointer = directory.getNodePointer(bucketPath.nodeIndex, bucketPath.itemIndex + bucketPath.hashMapOffset);
int fileLevel = getFileLevel(bucketPointer);
long pageIndex = getPageIndex(bucketPointer);
OCacheEntry cacheEntry = loadPageEntry(pageIndex, fileLevel, atomicOperation);
try {
OHashIndexBucket<K, V> bucket = new OHashIndexBucket<K, V>(cacheEntry, keySerializer, valueSerializer, keyTypes, getChanges(atomicOperation, cacheEntry));
while (bucket.size() == 0) {
final BucketPath prevBucketPath = prevBucketToFind(bucketPath, bucket.getDepth());
if (prevBucketPath == null)
return OCommonConst.EMPTY_BUCKET_ENTRY_ARRAY;
releasePage(atomicOperation, cacheEntry);
final long prevPointer = directory.getNodePointer(prevBucketPath.nodeIndex, prevBucketPath.itemIndex + prevBucketPath.hashMapOffset);
fileLevel = getFileLevel(prevPointer);
pageIndex = getPageIndex(prevPointer);
cacheEntry = loadPageEntry(pageIndex, fileLevel, atomicOperation);
bucket = new OHashIndexBucket<K, V>(cacheEntry, keySerializer, valueSerializer, keyTypes, getChanges(atomicOperation, cacheEntry));
bucketPath = prevBucketPath;
}
final int startIndex = 0;
final int index = bucket.getIndex(hashCode, key);
final int endIndex;
if (index >= 0)
endIndex = index + 1;
else
endIndex = -index - 1;
return convertBucketToEntries(bucket, startIndex, endIndex);
} finally {
releasePage(atomicOperation, cacheEntry);
}
} finally {
releaseSharedLock();
}
} catch (IOException ioe) {
throw OException.wrapException(new OIndexException("Exception during data read"), ioe);
} finally {
atomicOperationsManager.releaseReadLock(this);
}
}
use of com.orientechnologies.orient.core.storage.cache.OCacheEntry in project orientdb by orientechnologies.
the class OLocalHashTable20 method splitBucket.
private BucketSplitResult splitBucket(OHashIndexBucket<K, V> bucket, int fileLevel, long pageIndex, OAtomicOperation atomicOperation) throws IOException {
int bucketDepth = bucket.getDepth();
int newBucketDepth = bucketDepth + 1;
final int newFileLevel = newBucketDepth - MAX_LEVEL_DEPTH;
final OCacheEntry hashStateEntry = loadPage(atomicOperation, fileStateId, hashStateEntryIndex, true);
hashStateEntry.acquireExclusiveLock();
try {
OHashIndexFileLevelMetadataPage metadataPage = new OHashIndexFileLevelMetadataPage(hashStateEntry, getChanges(atomicOperation, hashStateEntry), false);
if (metadataPage.isRemoved(newFileLevel))
createFileMetadata(newFileLevel, metadataPage, atomicOperation);
final long tombstoneIndex = metadataPage.getTombstoneIndex(newFileLevel);
final long updatedBucketIndex;
if (tombstoneIndex >= 0) {
final OCacheEntry tombstoneCacheEntry = loadPageEntry(tombstoneIndex, newFileLevel, atomicOperation);
try {
final OHashIndexBucket<K, V> tombstone = new OHashIndexBucket<K, V>(tombstoneCacheEntry, keySerializer, valueSerializer, keyTypes, getChanges(atomicOperation, tombstoneCacheEntry));
metadataPage.setTombstoneIndex(newFileLevel, tombstone.getNextRemovedBucketPair());
updatedBucketIndex = tombstoneIndex;
} finally {
releasePage(atomicOperation, tombstoneCacheEntry);
}
} else
updatedBucketIndex = getFilledUpTo(atomicOperation, metadataPage.getFileId(newFileLevel));
final long newBucketIndex = updatedBucketIndex + 1;
final OCacheEntry updatedBucketCacheEntry = loadPageEntry(updatedBucketIndex, newFileLevel, atomicOperation);
updatedBucketCacheEntry.acquireExclusiveLock();
try {
final OCacheEntry newBucketCacheEntry = loadPageEntry(newBucketIndex, newFileLevel, atomicOperation);
newBucketCacheEntry.acquireExclusiveLock();
try {
final OHashIndexBucket<K, V> updatedBucket = new OHashIndexBucket<K, V>(newBucketDepth, updatedBucketCacheEntry, keySerializer, valueSerializer, keyTypes, getChanges(atomicOperation, updatedBucketCacheEntry));
final OHashIndexBucket<K, V> newBucket = new OHashIndexBucket<K, V>(newBucketDepth, newBucketCacheEntry, keySerializer, valueSerializer, keyTypes, getChanges(atomicOperation, newBucketCacheEntry));
splitBucketContent(bucket, updatedBucket, newBucket, newBucketDepth);
assert bucket.getDepth() == bucketDepth;
metadataPage.setBucketsCount(fileLevel, metadataPage.getBucketsCount(fileLevel) - 1);
assert metadataPage.getBucketsCount(fileLevel) >= 0;
updatedBucket.setSplitHistory(fileLevel, pageIndex);
newBucket.setSplitHistory(fileLevel, pageIndex);
metadataPage.setBucketsCount(newFileLevel, metadataPage.getBucketsCount(newFileLevel) + 2);
final long updatedBucketPointer = createBucketPointer(updatedBucketIndex, newFileLevel);
final long newBucketPointer = createBucketPointer(newBucketIndex, newFileLevel);
return new BucketSplitResult(updatedBucketPointer, newBucketPointer, newBucketDepth);
} finally {
newBucketCacheEntry.releaseExclusiveLock();
releasePage(atomicOperation, newBucketCacheEntry);
}
} finally {
updatedBucketCacheEntry.releaseExclusiveLock();
releasePage(atomicOperation, updatedBucketCacheEntry);
}
} finally {
hashStateEntry.releaseExclusiveLock();
releasePage(atomicOperation, hashStateEntry);
}
}
use of com.orientechnologies.orient.core.storage.cache.OCacheEntry in project orientdb by orientechnologies.
the class OLocalHashTable20 method ceilingEntries.
@Override
public OHashIndexBucket.Entry<K, V>[] ceilingEntries(K key) {
atomicOperationsManager.acquireReadLock(this);
try {
acquireSharedLock();
try {
OAtomicOperation atomicOperation = atomicOperationsManager.getCurrentOperation();
key = keySerializer.preprocess(key, (Object[]) keyTypes);
final long hashCode = keyHashFunction.hashCode(key);
BucketPath bucketPath = getBucket(hashCode);
long bucketPointer = directory.getNodePointer(bucketPath.nodeIndex, bucketPath.itemIndex + bucketPath.hashMapOffset);
int fileLevel = getFileLevel(bucketPointer);
long pageIndex = getPageIndex(bucketPointer);
OCacheEntry cacheEntry = loadPageEntry(pageIndex, fileLevel, atomicOperation);
try {
OHashIndexBucket<K, V> bucket = new OHashIndexBucket<K, V>(cacheEntry, keySerializer, valueSerializer, keyTypes, getChanges(atomicOperation, cacheEntry));
while (bucket.size() == 0) {
bucketPath = nextBucketToFind(bucketPath, bucket.getDepth());
if (bucketPath == null)
return OCommonConst.EMPTY_BUCKET_ENTRY_ARRAY;
releasePage(atomicOperation, cacheEntry);
final long nextPointer = directory.getNodePointer(bucketPath.nodeIndex, bucketPath.itemIndex + bucketPath.hashMapOffset);
fileLevel = getFileLevel(nextPointer);
pageIndex = getPageIndex(nextPointer);
cacheEntry = loadPageEntry(pageIndex, fileLevel, atomicOperation);
bucket = new OHashIndexBucket<K, V>(cacheEntry, keySerializer, valueSerializer, keyTypes, getChanges(atomicOperation, cacheEntry));
}
final int index = bucket.getIndex(hashCode, key);
final int startIndex;
if (index >= 0)
startIndex = index;
else
startIndex = -index - 1;
final int endIndex = bucket.size();
return convertBucketToEntries(bucket, startIndex, endIndex);
} finally {
releasePage(atomicOperation, cacheEntry);
}
} finally {
releaseSharedLock();
}
} catch (IOException ioe) {
throw OException.wrapException(new OIndexException("Error during data retrieval"), ioe);
} finally {
atomicOperationsManager.releaseReadLock(this);
}
}
use of com.orientechnologies.orient.core.storage.cache.OCacheEntry in project orientdb by orientechnologies.
the class OLocalHashTable20 method remove.
@Override
public V remove(K key) {
final OAtomicOperation atomicOperation;
try {
atomicOperation = startAtomicOperation(true);
} catch (IOException e) {
throw OException.wrapException(new OIndexException("Error during hash table entry deletion"), e);
}
acquireExclusiveLock();
try {
checkNullSupport(key);
int sizeDiff = 0;
if (key != null) {
key = keySerializer.preprocess(key, (Object[]) keyTypes);
final long hashCode = keyHashFunction.hashCode(key);
final BucketPath nodePath = getBucket(hashCode);
final long bucketPointer = directory.getNodePointer(nodePath.nodeIndex, nodePath.itemIndex + nodePath.hashMapOffset);
final long pageIndex = getPageIndex(bucketPointer);
final int fileLevel = getFileLevel(bucketPointer);
final V removed;
final boolean found;
final OCacheEntry cacheEntry = loadPageEntry(pageIndex, fileLevel, atomicOperation);
cacheEntry.acquireExclusiveLock();
try {
final OHashIndexBucket<K, V> bucket = new OHashIndexBucket<K, V>(cacheEntry, keySerializer, valueSerializer, keyTypes, getChanges(atomicOperation, cacheEntry));
final int positionIndex = bucket.getIndex(hashCode, key);
found = positionIndex >= 0;
if (found) {
removed = bucket.deleteEntry(positionIndex).value;
sizeDiff--;
mergeBucketsAfterDeletion(nodePath, bucket, atomicOperation);
} else
removed = null;
} finally {
cacheEntry.releaseExclusiveLock();
releasePage(atomicOperation, cacheEntry);
}
if (found) {
if (nodePath.parent != null) {
final int hashMapSize = 1 << nodePath.nodeLocalDepth;
final boolean allMapsContainSameBucket = checkAllMapsContainSameBucket(directory.getNode(nodePath.nodeIndex), hashMapSize);
if (allMapsContainSameBucket)
mergeNodeToParent(nodePath);
}
changeSize(sizeDiff, atomicOperation);
}
endAtomicOperation(false, null);
return removed;
} else {
if (getFilledUpTo(atomicOperation, nullBucketFileId) == 0) {
endAtomicOperation(false, null);
return null;
}
V removed = null;
OCacheEntry cacheEntry = loadPage(atomicOperation, nullBucketFileId, 0, false);
if (cacheEntry == null)
cacheEntry = addPage(atomicOperation, nullBucketFileId);
cacheEntry.acquireExclusiveLock();
try {
final ONullBucket<V> nullBucket = new ONullBucket<V>(cacheEntry, getChanges(atomicOperation, cacheEntry), valueSerializer, false);
removed = nullBucket.getValue();
if (removed != null) {
nullBucket.removeValue();
sizeDiff--;
}
} finally {
cacheEntry.releaseExclusiveLock();
releasePage(atomicOperation, cacheEntry);
}
changeSize(sizeDiff, atomicOperation);
endAtomicOperation(false, null);
return removed;
}
} catch (IOException e) {
rollback();
throw OException.wrapException(new OIndexException("Error during index removal"), e);
} catch (Exception e) {
rollback();
throw OException.wrapException(new OStorageException("Error during index removal"), e);
} finally {
releaseExclusiveLock();
}
}
Aggregations