use of com.orientechnologies.orient.core.storage.cache.OCacheEntry in project orientdb by orientechnologies.
the class OLocalHashTable20 method deleteWithoutLoad.
@Override
public void deleteWithoutLoad(String name, OAbstractPaginatedStorage storageLocal) {
final OAtomicOperation atomicOperation;
try {
atomicOperation = startAtomicOperation(false);
} catch (IOException e) {
throw OException.wrapException(new OIndexException("Error during hash table deletion"), e);
}
acquireExclusiveLock();
try {
if (isFileExists(atomicOperation, name + metadataConfigurationFileExtension)) {
fileStateId = openFile(atomicOperation, name + metadataConfigurationFileExtension);
OCacheEntry hashStateEntry = loadPage(atomicOperation, fileStateId, 0, true);
try {
OHashIndexFileLevelMetadataPage metadataPage = new OHashIndexFileLevelMetadataPage(hashStateEntry, getChanges(atomicOperation, hashStateEntry), false);
for (int i = 0; i < HASH_CODE_SIZE; i++) {
if (!metadataPage.isRemoved(i)) {
final long fileId = metadataPage.getFileId(i);
deleteFile(atomicOperation, fileId);
}
}
} finally {
releasePage(atomicOperation, hashStateEntry);
}
if (isFileExists(atomicOperation, fileStateId))
deleteFile(atomicOperation, fileStateId);
directory = new OHashTableDirectory(treeStateFileExtension, name, getFullName(), durableInNonTxMode, storage);
directory.deleteWithoutOpen();
if (isFileExists(atomicOperation, name + nullBucketFileExtension)) {
final long nullBucketId = openFile(atomicOperation, name + nullBucketFileExtension);
deleteFile(atomicOperation, nullBucketId);
}
}
endAtomicOperation(false, null);
} catch (IOException ioe) {
rollback();
throw OException.wrapException(new OIndexException("Cannot delete hash table with name " + name), ioe);
} catch (Exception e) {
rollback();
throw OException.wrapException(new OIndexException("Cannot delete hash table with name " + name), e);
} finally {
releaseExclusiveLock();
}
}
use of com.orientechnologies.orient.core.storage.cache.OCacheEntry in project orientdb by orientechnologies.
the class OSBTree method clear.
public void clear() {
startOperation();
try {
final OAtomicOperation atomicOperation;
try {
atomicOperation = startAtomicOperation(true);
} catch (IOException e) {
throw OException.wrapException(new OSBTreeException("Error during sbtree clear", this), e);
}
acquireExclusiveLock();
try {
truncateFile(atomicOperation, fileId);
if (nullPointerSupport)
truncateFile(atomicOperation, nullBucketFileId);
OCacheEntry cacheEntry = loadPage(atomicOperation, fileId, ROOT_INDEX, false);
if (cacheEntry == null) {
cacheEntry = addPage(atomicOperation, fileId);
}
cacheEntry.acquireExclusiveLock();
try {
OSBTreeBucket<K, V> rootBucket = new OSBTreeBucket<K, V>(cacheEntry, true, keySerializer, keyTypes, valueSerializer, getChanges(atomicOperation, cacheEntry));
rootBucket.setTreeSize(0);
} finally {
cacheEntry.releaseExclusiveLock();
releasePage(atomicOperation, cacheEntry);
}
endAtomicOperation(false, null);
} catch (IOException e) {
rollback(e);
throw OException.wrapException(new OSBTreeException("Error during clear of sbtree with name " + getName(), this), e);
} catch (RuntimeException e) {
rollback(e);
throw e;
} finally {
releaseExclusiveLock();
}
} finally {
completeOperation();
}
}
use of com.orientechnologies.orient.core.storage.cache.OCacheEntry in project orientdb by orientechnologies.
the class OSBTree method setSize.
private void setSize(long size, OAtomicOperation atomicOperation) throws IOException {
OCacheEntry rootCacheEntry = loadPage(atomicOperation, fileId, ROOT_INDEX, false);
rootCacheEntry.acquireExclusiveLock();
try {
OSBTreeBucket<K, V> rootBucket = new OSBTreeBucket<K, V>(rootCacheEntry, keySerializer, keyTypes, valueSerializer, getChanges(atomicOperation, rootCacheEntry));
rootBucket.setTreeSize(size);
} finally {
rootCacheEntry.releaseExclusiveLock();
releasePage(atomicOperation, rootCacheEntry);
}
}
use of com.orientechnologies.orient.core.storage.cache.OCacheEntry in project orientdb by orientechnologies.
the class OLocalHashTable method lowerEntries.
@Override
public OHashIndexBucket.Entry<K, V>[] lowerEntries(K key) {
startOperation();
try {
atomicOperationsManager.acquireReadLock(this);
try {
acquireSharedLock();
try {
key = keySerializer.preprocess(key, (Object[]) keyTypes);
final long hashCode = keyHashFunction.hashCode(key);
OHashTable.BucketPath bucketPath = getBucket(hashCode);
long bucketPointer = directory.getNodePointer(bucketPath.nodeIndex, bucketPath.itemIndex + bucketPath.hashMapOffset);
long pageIndex = getPageIndex(bucketPointer);
OAtomicOperation atomicOperation = atomicOperationsManager.getCurrentOperation();
OCacheEntry cacheEntry = loadPage(atomicOperation, fileId, pageIndex, false);
cacheEntry.acquireSharedLock();
try {
OHashIndexBucket<K, V> bucket = new OHashIndexBucket<K, V>(cacheEntry, keySerializer, valueSerializer, keyTypes, getChanges(atomicOperation, cacheEntry));
while (bucket.size() == 0 || comparator.compare(bucket.getKey(0), key) >= 0) {
final OHashTable.BucketPath prevBucketPath = prevBucketToFind(bucketPath, bucket.getDepth());
if (prevBucketPath == null)
return OCommonConst.EMPTY_BUCKET_ENTRY_ARRAY;
cacheEntry.releaseSharedLock();
releasePage(atomicOperation, cacheEntry);
final long prevPointer = directory.getNodePointer(prevBucketPath.nodeIndex, prevBucketPath.itemIndex + prevBucketPath.hashMapOffset);
pageIndex = getPageIndex(prevPointer);
cacheEntry = loadPage(atomicOperation, fileId, pageIndex, false);
cacheEntry.acquireSharedLock();
bucket = new OHashIndexBucket<K, V>(cacheEntry, keySerializer, valueSerializer, keyTypes, getChanges(atomicOperation, cacheEntry));
bucketPath = prevBucketPath;
}
final int startIndex = 0;
final int index = bucket.getIndex(hashCode, key);
final int endIndex;
if (index >= 0)
endIndex = index;
else
endIndex = -index - 1;
return convertBucketToEntries(bucket, startIndex, endIndex);
} finally {
cacheEntry.releaseSharedLock();
releasePage(atomicOperation, cacheEntry);
}
} finally {
releaseSharedLock();
}
} catch (IOException ioe) {
throw OException.wrapException(new OLocalHashTableException("Exception during data read", this), ioe);
} finally {
atomicOperationsManager.releaseReadLock(this);
}
} finally {
completeOperation();
}
}
use of com.orientechnologies.orient.core.storage.cache.OCacheEntry in project orientdb by orientechnologies.
the class OLocalHashTable method initHashTreeState.
@SuppressFBWarnings("DLS_DEAD_LOCAL_STORE")
private void initHashTreeState(OAtomicOperation atomicOperation) throws IOException {
truncateFile(atomicOperation, fileId);
for (long pageIndex = 0; pageIndex < MAX_LEVEL_SIZE; pageIndex++) {
final OCacheEntry cacheEntry = addPage(atomicOperation, fileId);
assert cacheEntry.getPageIndex() == pageIndex;
cacheEntry.acquireExclusiveLock();
try {
final OHashIndexBucket<K, V> emptyBucket = new OHashIndexBucket<K, V>(MAX_LEVEL_DEPTH, cacheEntry, keySerializer, valueSerializer, keyTypes, getChanges(atomicOperation, cacheEntry));
} finally {
cacheEntry.releaseExclusiveLock();
releasePage(atomicOperation, cacheEntry);
}
}
final long[] rootTree = new long[MAX_LEVEL_SIZE];
for (int pageIndex = 0; pageIndex < MAX_LEVEL_SIZE; pageIndex++) rootTree[pageIndex] = createBucketPointer(pageIndex);
directory.clear();
directory.addNewNode((byte) 0, (byte) 0, (byte) MAX_LEVEL_DEPTH, rootTree);
OCacheEntry hashStateEntry = loadPage(atomicOperation, fileStateId, hashStateEntryIndex, true);
hashStateEntry.acquireExclusiveLock();
try {
OHashIndexFileLevelMetadataPage metadataPage = new OHashIndexFileLevelMetadataPage(hashStateEntry, getChanges(atomicOperation, hashStateEntry), false);
metadataPage.setRecordsCount(0);
} finally {
hashStateEntry.releaseExclusiveLock();
releasePage(atomicOperation, hashStateEntry);
}
}
Aggregations