use of com.orientechnologies.orient.core.index.OIndexException in project orientdb by orientechnologies.
the class OLocalHashTable20 method create.
@Override
public void create(OBinarySerializer<K> keySerializer, OBinarySerializer<V> valueSerializer, OType[] keyTypes, boolean nullKeyIsSupported) {
final OAtomicOperation atomicOperation;
try {
atomicOperation = startAtomicOperation(false);
} catch (IOException e) {
throw OException.wrapException(new OIndexException("Error during hash table creation"), e);
}
acquireExclusiveLock();
try {
try {
if (keyTypes != null)
this.keyTypes = Arrays.copyOf(keyTypes, keyTypes.length);
else
this.keyTypes = null;
this.nullKeyIsSupported = nullKeyIsSupported;
this.directory = new OHashTableDirectory(treeStateFileExtension, getName(), getFullName(), durableInNonTxMode, storage);
fileStateId = addFile(atomicOperation, getName() + metadataConfigurationFileExtension);
directory.create();
final OCacheEntry hashStateEntry = addPage(atomicOperation, fileStateId);
pinPage(atomicOperation, hashStateEntry);
hashStateEntry.acquireExclusiveLock();
try {
OHashIndexFileLevelMetadataPage page = new OHashIndexFileLevelMetadataPage(hashStateEntry, getChanges(atomicOperation, hashStateEntry), true);
createFileMetadata(0, page, atomicOperation);
hashStateEntryIndex = hashStateEntry.getPageIndex();
} finally {
hashStateEntry.releaseExclusiveLock();
releasePage(atomicOperation, hashStateEntry);
}
setKeySerializer(keySerializer);
setValueSerializer(valueSerializer);
initHashTreeState(atomicOperation);
if (nullKeyIsSupported)
nullBucketFileId = addFile(atomicOperation, getName() + nullBucketFileExtension);
endAtomicOperation(false, null);
} catch (IOException e) {
endAtomicOperation(true, e);
throw e;
} catch (Exception e) {
endAtomicOperation(true, e);
throw OException.wrapException(new OStorageException("Error during local hash table creation"), e);
}
} catch (IOException e) {
throw OException.wrapException(new OIndexException("Error during local hash table creation"), e);
} finally {
releaseExclusiveLock();
}
}
use of com.orientechnologies.orient.core.index.OIndexException in project orientdb by orientechnologies.
the class OLocalHashTable20 method deleteWithoutLoad.
@Override
public void deleteWithoutLoad(String name, OAbstractPaginatedStorage storageLocal) {
final OAtomicOperation atomicOperation;
try {
atomicOperation = startAtomicOperation(false);
} catch (IOException e) {
throw OException.wrapException(new OIndexException("Error during hash table deletion"), e);
}
acquireExclusiveLock();
try {
if (isFileExists(atomicOperation, name + metadataConfigurationFileExtension)) {
fileStateId = openFile(atomicOperation, name + metadataConfigurationFileExtension);
OCacheEntry hashStateEntry = loadPage(atomicOperation, fileStateId, 0, true);
try {
OHashIndexFileLevelMetadataPage metadataPage = new OHashIndexFileLevelMetadataPage(hashStateEntry, getChanges(atomicOperation, hashStateEntry), false);
for (int i = 0; i < HASH_CODE_SIZE; i++) {
if (!metadataPage.isRemoved(i)) {
final long fileId = metadataPage.getFileId(i);
deleteFile(atomicOperation, fileId);
}
}
} finally {
releasePage(atomicOperation, hashStateEntry);
}
if (isFileExists(atomicOperation, fileStateId))
deleteFile(atomicOperation, fileStateId);
directory = new OHashTableDirectory(treeStateFileExtension, name, getFullName(), durableInNonTxMode, storage);
directory.deleteWithoutOpen();
if (isFileExists(atomicOperation, name + nullBucketFileExtension)) {
final long nullBucketId = openFile(atomicOperation, name + nullBucketFileExtension);
deleteFile(atomicOperation, nullBucketId);
}
}
endAtomicOperation(false, null);
} catch (IOException ioe) {
rollback();
throw OException.wrapException(new OIndexException("Cannot delete hash table with name " + name), ioe);
} catch (Exception e) {
rollback();
throw OException.wrapException(new OIndexException("Cannot delete hash table with name " + name), e);
} finally {
releaseExclusiveLock();
}
}
use of com.orientechnologies.orient.core.index.OIndexException in project orientdb by orientechnologies.
the class OLocalHashTable20 method size.
@Override
public long size() {
atomicOperationsManager.acquireReadLock(this);
try {
acquireSharedLock();
try {
final OAtomicOperation atomicOperation = atomicOperationsManager.getCurrentOperation();
final OCacheEntry hashStateEntry = loadPage(atomicOperation, fileStateId, hashStateEntryIndex, true);
try {
OHashIndexFileLevelMetadataPage metadataPage = new OHashIndexFileLevelMetadataPage(hashStateEntry, getChanges(atomicOperation, hashStateEntry), false);
return metadataPage.getRecordsCount();
} finally {
releasePage(atomicOperation, hashStateEntry);
}
} finally {
releaseSharedLock();
}
} catch (IOException e) {
throw OException.wrapException(new OIndexException("Error during index size request"), e);
} finally {
atomicOperationsManager.releaseReadLock(this);
}
}
use of com.orientechnologies.orient.core.index.OIndexException in project orientdb by orientechnologies.
the class OLocalHashTable20 method floorEntries.
@Override
public OHashIndexBucket.Entry<K, V>[] floorEntries(K key) {
atomicOperationsManager.acquireReadLock(this);
try {
acquireSharedLock();
try {
OAtomicOperation atomicOperation = atomicOperationsManager.getCurrentOperation();
key = keySerializer.preprocess(key, (Object[]) keyTypes);
final long hashCode = keyHashFunction.hashCode(key);
BucketPath bucketPath = getBucket(hashCode);
long bucketPointer = directory.getNodePointer(bucketPath.nodeIndex, bucketPath.itemIndex + bucketPath.hashMapOffset);
int fileLevel = getFileLevel(bucketPointer);
long pageIndex = getPageIndex(bucketPointer);
OCacheEntry cacheEntry = loadPageEntry(pageIndex, fileLevel, atomicOperation);
try {
OHashIndexBucket<K, V> bucket = new OHashIndexBucket<K, V>(cacheEntry, keySerializer, valueSerializer, keyTypes, getChanges(atomicOperation, cacheEntry));
while (bucket.size() == 0) {
final BucketPath prevBucketPath = prevBucketToFind(bucketPath, bucket.getDepth());
if (prevBucketPath == null)
return OCommonConst.EMPTY_BUCKET_ENTRY_ARRAY;
releasePage(atomicOperation, cacheEntry);
final long prevPointer = directory.getNodePointer(prevBucketPath.nodeIndex, prevBucketPath.itemIndex + prevBucketPath.hashMapOffset);
fileLevel = getFileLevel(prevPointer);
pageIndex = getPageIndex(prevPointer);
cacheEntry = loadPageEntry(pageIndex, fileLevel, atomicOperation);
bucket = new OHashIndexBucket<K, V>(cacheEntry, keySerializer, valueSerializer, keyTypes, getChanges(atomicOperation, cacheEntry));
bucketPath = prevBucketPath;
}
final int startIndex = 0;
final int index = bucket.getIndex(hashCode, key);
final int endIndex;
if (index >= 0)
endIndex = index + 1;
else
endIndex = -index - 1;
return convertBucketToEntries(bucket, startIndex, endIndex);
} finally {
releasePage(atomicOperation, cacheEntry);
}
} finally {
releaseSharedLock();
}
} catch (IOException ioe) {
throw OException.wrapException(new OIndexException("Exception during data read"), ioe);
} finally {
atomicOperationsManager.releaseReadLock(this);
}
}
use of com.orientechnologies.orient.core.index.OIndexException in project orientdb by orientechnologies.
the class OLocalHashTable20 method ceilingEntries.
@Override
public OHashIndexBucket.Entry<K, V>[] ceilingEntries(K key) {
atomicOperationsManager.acquireReadLock(this);
try {
acquireSharedLock();
try {
OAtomicOperation atomicOperation = atomicOperationsManager.getCurrentOperation();
key = keySerializer.preprocess(key, (Object[]) keyTypes);
final long hashCode = keyHashFunction.hashCode(key);
BucketPath bucketPath = getBucket(hashCode);
long bucketPointer = directory.getNodePointer(bucketPath.nodeIndex, bucketPath.itemIndex + bucketPath.hashMapOffset);
int fileLevel = getFileLevel(bucketPointer);
long pageIndex = getPageIndex(bucketPointer);
OCacheEntry cacheEntry = loadPageEntry(pageIndex, fileLevel, atomicOperation);
try {
OHashIndexBucket<K, V> bucket = new OHashIndexBucket<K, V>(cacheEntry, keySerializer, valueSerializer, keyTypes, getChanges(atomicOperation, cacheEntry));
while (bucket.size() == 0) {
bucketPath = nextBucketToFind(bucketPath, bucket.getDepth());
if (bucketPath == null)
return OCommonConst.EMPTY_BUCKET_ENTRY_ARRAY;
releasePage(atomicOperation, cacheEntry);
final long nextPointer = directory.getNodePointer(bucketPath.nodeIndex, bucketPath.itemIndex + bucketPath.hashMapOffset);
fileLevel = getFileLevel(nextPointer);
pageIndex = getPageIndex(nextPointer);
cacheEntry = loadPageEntry(pageIndex, fileLevel, atomicOperation);
bucket = new OHashIndexBucket<K, V>(cacheEntry, keySerializer, valueSerializer, keyTypes, getChanges(atomicOperation, cacheEntry));
}
final int index = bucket.getIndex(hashCode, key);
final int startIndex;
if (index >= 0)
startIndex = index;
else
startIndex = -index - 1;
final int endIndex = bucket.size();
return convertBucketToEntries(bucket, startIndex, endIndex);
} finally {
releasePage(atomicOperation, cacheEntry);
}
} finally {
releaseSharedLock();
}
} catch (IOException ioe) {
throw OException.wrapException(new OIndexException("Error during data retrieval"), ioe);
} finally {
atomicOperationsManager.releaseReadLock(this);
}
}
Aggregations