use of org.apache.hyracks.storage.common.buffercache.ICachedPage in project asterixdb by apache.
the class BTree method insertInterior.
private void insertInterior(ICachedPage node, int pageId, ITupleReference tuple, BTreeOpContext ctx) throws Exception {
ctx.getInteriorFrame().setPage(node);
int targetTupleIndex = ctx.getInteriorFrame().findInsertTupleIndex(tuple);
FrameOpSpaceStatus spaceStatus = ctx.getInteriorFrame().hasSpaceInsert(tuple);
switch(spaceStatus) {
case INSUFFICIENT_SPACE:
{
int rightPageId = freePageManager.takePage(ctx.getMetaFrame());
ICachedPage rightNode = bufferCache.pin(BufferedFileHandle.getDiskPageId(fileId, rightPageId), true);
rightNode.acquireWriteLatch();
try {
IBTreeFrame rightFrame = ctx.createInteriorFrame();
rightFrame.setPage(rightNode);
rightFrame.initBuffer(ctx.getInteriorFrame().getLevel());
rightFrame.setMultiComparator(ctx.getCmp());
// instead of creating a new split key, use the existing
// splitKey
ctx.getInteriorFrame().split(rightFrame, ctx.getSplitKey().getTuple(), ctx.getSplitKey(), ctx, bufferCache);
ctx.getSmPages().add(pageId);
ctx.getSmPages().add(rightPageId);
ctx.getInteriorFrame().setSmFlag(true);
rightFrame.setSmFlag(true);
rightFrame.setPageLsn(rightFrame.getPageLsn() + 1);
ctx.getInteriorFrame().setPageLsn(ctx.getInteriorFrame().getPageLsn() + 1);
ctx.getSplitKey().setPages(pageId, rightPageId);
} finally {
rightNode.releaseWriteLatch(true);
bufferCache.unpin(rightNode);
}
break;
}
case SUFFICIENT_CONTIGUOUS_SPACE:
{
ctx.getInteriorFrame().insert(tuple, targetTupleIndex);
ctx.getSplitKey().reset();
break;
}
case SUFFICIENT_SPACE:
{
boolean slotsChanged = ctx.getInteriorFrame().compact();
if (slotsChanged) {
targetTupleIndex = ctx.getInteriorFrame().findInsertTupleIndex(tuple);
}
ctx.getInteriorFrame().insert(tuple, targetTupleIndex);
ctx.getSplitKey().reset();
break;
}
case TOO_LARGE:
{
int tupleSize = ctx.getInteriorFrame().getBytesRequiredToWriteTuple(tuple);
throw HyracksDataException.create(ErrorCode.RECORD_IS_TOO_LARGE, tupleSize, maxTupleSize);
}
default:
{
throw new IllegalStateException("NYI: " + spaceStatus);
}
}
}
use of org.apache.hyracks.storage.common.buffercache.ICachedPage in project asterixdb by apache.
the class BTree method diskOrderScan.
private void diskOrderScan(ITreeIndexCursor icursor, BTreeOpContext ctx) throws HyracksDataException {
TreeIndexDiskOrderScanCursor cursor = (TreeIndexDiskOrderScanCursor) icursor;
ctx.reset();
RangePredicate diskOrderScanPred = new RangePredicate(null, null, true, true, ctx.getCmp(), ctx.getCmp());
int maxPageId = freePageManager.getMaxPageId(ctx.getMetaFrame());
int currentPageId = bulkloadLeafStart;
ICachedPage page = bufferCache.pin(BufferedFileHandle.getDiskPageId(fileId, currentPageId), false);
page.acquireReadLatch();
try {
cursor.setBufferCache(bufferCache);
cursor.setFileId(fileId);
cursor.setCurrentPageId(currentPageId);
cursor.setMaxPageId(maxPageId);
ctx.getCursorInitialState().setPage(page);
ctx.getCursorInitialState().setSearchOperationCallback(ctx.getSearchCallback());
ctx.getCursorInitialState().setOriginialKeyComparator(ctx.getCmp());
cursor.open(ctx.getCursorInitialState(), diskOrderScanPred);
} catch (Exception e) {
page.releaseReadLatch();
bufferCache.unpin(page);
throw HyracksDataException.create(e);
}
}
use of org.apache.hyracks.storage.common.buffercache.ICachedPage in project asterixdb by apache.
the class AppendOnlyLinkedMetadataPageManager method getRootPageId.
@Override
public int getRootPageId() throws HyracksDataException {
ICachedPage metaNode;
if (confiscatedPage == null) {
metaNode = bufferCache.pin(BufferedFileHandle.getDiskPageId(fileId, getMetadataPageId()), false);
} else {
metaNode = confiscatedPage;
}
ITreeIndexMetadataFrame metaFrame = frameFactory.createFrame();
metaNode.acquireReadLatch();
try {
metaFrame.setPage(metaNode);
return metaFrame.getRootPageId();
} finally {
metaNode.releaseReadLatch();
if (confiscatedPage == null) {
bufferCache.unpin(metaNode);
}
}
}
use of org.apache.hyracks.storage.common.buffercache.ICachedPage in project asterixdb by apache.
the class TreeTupleSorter method compare.
private int compare(int[] tPointers, int tp1, int tp2i, int tp2j) throws HyracksDataException {
int i1 = tPointers[tp1 * 2];
int j1 = tPointers[tp1 * 2 + 1];
int i2 = tp2i;
int j2 = tp2j;
ICachedPage node1 = bufferCache.pin(BufferedFileHandle.getDiskPageId(fileId, i1), false);
leafFrame1.setPage(node1);
ICachedPage node2 = bufferCache.pin(BufferedFileHandle.getDiskPageId(fileId, i2), false);
leafFrame2.setPage(node2);
try {
frameTuple1.resetByTupleOffset(leafFrame1.getBuffer().array(), j1);
frameTuple2.resetByTupleOffset(leafFrame2.getBuffer().array(), j2);
return cmp.selectiveFieldCompare(frameTuple1, frameTuple2, comparatorFields);
} finally {
bufferCache.unpin(node1);
bufferCache.unpin(node2);
}
}
use of org.apache.hyracks.storage.common.buffercache.ICachedPage in project asterixdb by apache.
the class RTree method diskOrderScan.
private void diskOrderScan(ITreeIndexCursor icursor, RTreeOpContext ctx) throws HyracksDataException {
TreeIndexDiskOrderScanCursor cursor = (TreeIndexDiskOrderScanCursor) icursor;
ctx.reset();
MultiComparator cmp = MultiComparator.create(cmpFactories);
SearchPredicate searchPred = new SearchPredicate(null, cmp);
int currentPageId = bulkloadLeafStart;
int maxPageId = freePageManager.getMaxPageId(ctx.getMetaFrame());
ICachedPage page = bufferCache.pin(BufferedFileHandle.getDiskPageId(fileId, currentPageId), false);
page.acquireReadLatch();
try {
cursor.setBufferCache(bufferCache);
cursor.setFileId(fileId);
cursor.setCurrentPageId(currentPageId);
cursor.setMaxPageId(maxPageId);
ctx.getCursorInitialState().setOriginialKeyComparator(ctx.getCmp());
ctx.getCursorInitialState().setPage(page);
cursor.open(ctx.getCursorInitialState(), searchPred);
} catch (Exception e) {
page.releaseReadLatch();
bufferCache.unpin(page);
throw new HyracksDataException(e);
}
}
Aggregations