use of com.orientechnologies.orient.core.index.OIndexException in project orientdb by orientechnologies.
the class OLocalHashTable20 method get.
@Override
public V get(K key) {
atomicOperationsManager.acquireReadLock(this);
try {
acquireSharedLock();
try {
final OAtomicOperation atomicOperation = atomicOperationsManager.getCurrentOperation();
checkNullSupport(key);
if (key == null) {
if (getFilledUpTo(atomicOperation, nullBucketFileId) == 0)
return null;
V result = null;
OCacheEntry cacheEntry = loadPage(atomicOperation, nullBucketFileId, 0, false);
try {
ONullBucket<V> nullBucket = new ONullBucket<V>(cacheEntry, getChanges(atomicOperation, cacheEntry), valueSerializer, false);
result = nullBucket.getValue();
} finally {
releasePage(atomicOperation, cacheEntry);
}
return result;
} else {
key = keySerializer.preprocess(key, (Object[]) keyTypes);
final long hashCode = keyHashFunction.hashCode(key);
BucketPath bucketPath = getBucket(hashCode);
final long bucketPointer = directory.getNodePointer(bucketPath.nodeIndex, bucketPath.itemIndex + bucketPath.hashMapOffset);
if (bucketPointer == 0)
return null;
long pageIndex = getPageIndex(bucketPointer);
int fileLevel = getFileLevel(bucketPointer);
OCacheEntry cacheEntry = loadPageEntry(pageIndex, fileLevel, atomicOperation);
try {
final OHashIndexBucket<K, V> bucket = new OHashIndexBucket<K, V>(cacheEntry, keySerializer, valueSerializer, keyTypes, getChanges(atomicOperation, cacheEntry));
OHashIndexBucket.Entry<K, V> entry = bucket.find(key, hashCode);
if (entry == null)
return null;
return entry.value;
} finally {
releasePage(atomicOperation, cacheEntry);
}
}
} finally {
releaseSharedLock();
}
} catch (IOException e) {
throw OException.wrapException(new OIndexException("Exception during index value retrieval"), e);
} finally {
atomicOperationsManager.releaseReadLock(this);
}
}
use of com.orientechnologies.orient.core.index.OIndexException in project orientdb by orientechnologies.
the class OLocalHashTable20 method flush.
@Override
public void flush() {
acquireExclusiveLock();
try {
OAtomicOperation atomicOperation = atomicOperationsManager.getCurrentOperation();
final OCacheEntry hashStateEntry = loadPage(atomicOperation, fileStateId, hashStateEntryIndex, true);
try {
for (int i = 0; i < HASH_CODE_SIZE; i++) {
OHashIndexFileLevelMetadataPage metadataPage = new OHashIndexFileLevelMetadataPage(hashStateEntry, getChanges(atomicOperation, hashStateEntry), false);
if (!metadataPage.isRemoved(i))
writeCache.flush(metadataPage.getFileId(i));
}
} finally {
releasePage(atomicOperation, hashStateEntry);
}
writeCache.flush(fileStateId);
directory.flush();
if (nullKeyIsSupported)
writeCache.flush(nullBucketFileId);
} catch (IOException e) {
throw OException.wrapException(new OIndexException("Error during hash table flush"), e);
} finally {
releaseExclusiveLock();
}
}
use of com.orientechnologies.orient.core.index.OIndexException in project orientdb by orientechnologies.
the class OLocalHashTable20 method lastEntry.
@Override
public OHashIndexBucket.Entry<K, V> lastEntry() {
atomicOperationsManager.acquireReadLock(this);
try {
acquireSharedLock();
try {
OAtomicOperation atomicOperation = atomicOperationsManager.getCurrentOperation();
BucketPath bucketPath = getBucket(HASH_CODE_MAX_VALUE);
long bucketPointer = directory.getNodePointer(bucketPath.nodeIndex, bucketPath.itemIndex + bucketPath.hashMapOffset);
int fileLevel = getFileLevel(bucketPointer);
long pageIndex = getPageIndex(bucketPointer);
OCacheEntry cacheEntry = loadPageEntry(pageIndex, fileLevel, atomicOperation);
try {
OHashIndexBucket<K, V> bucket = new OHashIndexBucket<K, V>(cacheEntry, keySerializer, valueSerializer, keyTypes, getChanges(atomicOperation, cacheEntry));
while (bucket.size() == 0) {
final BucketPath prevBucketPath = prevBucketToFind(bucketPath, bucket.getDepth());
if (prevBucketPath == null)
return null;
releasePage(atomicOperation, cacheEntry);
final long prevPointer = directory.getNodePointer(prevBucketPath.nodeIndex, prevBucketPath.itemIndex + prevBucketPath.hashMapOffset);
fileLevel = getFileLevel(prevPointer);
pageIndex = getPageIndex(prevPointer);
cacheEntry = loadPageEntry(pageIndex, fileLevel, atomicOperation);
bucket = new OHashIndexBucket<K, V>(cacheEntry, keySerializer, valueSerializer, keyTypes, getChanges(atomicOperation, cacheEntry));
bucketPath = prevBucketPath;
}
return bucket.getEntry(bucket.size() - 1);
} finally {
releasePage(atomicOperation, cacheEntry);
}
} finally {
releaseSharedLock();
}
} catch (IOException ioe) {
throw OException.wrapException(new OIndexException("Exception during data read"), ioe);
} finally {
atomicOperationsManager.releaseReadLock(this);
}
}
use of com.orientechnologies.orient.core.index.OIndexException in project orientdb by orientechnologies.
the class OLocalHashTable20 method put.
private boolean put(K key, V value, OIndexEngine.Validator<K, V> validator) {
final OAtomicOperation atomicOperation;
try {
atomicOperation = startAtomicOperation(true);
} catch (IOException e) {
throw OException.wrapException(new OIndexException("Error during hash table entry put"), e);
}
acquireExclusiveLock();
try {
checkNullSupport(key);
key = keySerializer.preprocess(key, (Object[]) keyTypes);
final boolean putResult = doPut(key, value, validator, atomicOperation);
endAtomicOperation(false, null);
return putResult;
} catch (IOException e) {
rollback();
throw OException.wrapException(new OIndexException("Error during index update"), e);
} catch (Exception e) {
rollback();
throw OException.wrapException(new OStorageException("Error during index update"), e);
} finally {
releaseExclusiveLock();
}
}
use of com.orientechnologies.orient.core.index.OIndexException in project orientdb by orientechnologies.
the class OLocalHashTable20 method load.
@Override
public void load(String name, OType[] keyTypes, boolean nullKeyIsSupported) {
acquireExclusiveLock();
try {
if (keyTypes != null)
this.keyTypes = Arrays.copyOf(keyTypes, keyTypes.length);
else
this.keyTypes = null;
this.nullKeyIsSupported = nullKeyIsSupported;
OAtomicOperation atomicOperation = atomicOperationsManager.getCurrentOperation();
fileStateId = openFile(atomicOperation, name + metadataConfigurationFileExtension);
final OCacheEntry hashStateEntry = loadPage(atomicOperation, fileStateId, 0, true);
hashStateEntryIndex = hashStateEntry.getPageIndex();
directory = new OHashTableDirectory(treeStateFileExtension, name, getFullName(), durableInNonTxMode, storage);
directory.open();
pinPage(atomicOperation, hashStateEntry);
try {
OHashIndexFileLevelMetadataPage page = new OHashIndexFileLevelMetadataPage(hashStateEntry, getChanges(atomicOperation, hashStateEntry), false);
keySerializer = (OBinarySerializer<K>) storage.getComponentsFactory().binarySerializerFactory.getObjectSerializer(page.getKeySerializerId());
valueSerializer = (OBinarySerializer<V>) storage.getComponentsFactory().binarySerializerFactory.getObjectSerializer(page.getValueSerializerId());
} finally {
releasePage(atomicOperation, hashStateEntry);
}
if (nullKeyIsSupported)
nullBucketFileId = openFile(atomicOperation, name + nullBucketFileExtension);
} catch (IOException e) {
throw OException.wrapException(new OIndexException("Exception during hash table loading"), e);
} finally {
releaseExclusiveLock();
}
}
Aggregations