use of org.apache.hyracks.storage.common.buffercache.ICachedPage in project asterixdb by apache.
the class FieldPrefixNSMTest method test01.
@Test
public void test01() throws Exception {
// declare fields
int fieldCount = 3;
ITypeTraits[] typeTraits = new ITypeTraits[fieldCount];
typeTraits[0] = IntegerPointable.TYPE_TRAITS;
typeTraits[1] = IntegerPointable.TYPE_TRAITS;
typeTraits[2] = IntegerPointable.TYPE_TRAITS;
// declare keys
int keyFieldCount = 3;
IBinaryComparator[] cmps = new IBinaryComparator[keyFieldCount];
cmps[0] = PointableBinaryComparatorFactory.of(IntegerPointable.FACTORY).createBinaryComparator();
cmps[1] = PointableBinaryComparatorFactory.of(IntegerPointable.FACTORY).createBinaryComparator();
cmps[2] = PointableBinaryComparatorFactory.of(IntegerPointable.FACTORY).createBinaryComparator();
MultiComparator cmp = new MultiComparator(cmps);
// just for printing
@SuppressWarnings("rawtypes") ISerializerDeserializer[] fieldSerdes = { IntegerSerializerDeserializer.INSTANCE, IntegerSerializerDeserializer.INSTANCE, IntegerSerializerDeserializer.INSTANCE };
Random rnd = new Random();
rnd.setSeed(50);
IBufferCache bufferCache = harness.getBufferCache();
IFileMapProvider fileMapProvider = harness.getFileMapProvider();
bufferCache.createFile(harness.getFileReference());
int btreeFileId = fileMapProvider.lookupFileId(harness.getFileReference());
bufferCache.openFile(btreeFileId);
IHyracksTaskContext ctx = harness.getHyracksTaskContext();
ICachedPage page = bufferCache.pin(BufferedFileHandle.getDiskPageId(btreeFileId, 0), true);
try {
ITreeIndexTupleWriter tupleWriter = new TypeAwareTupleWriter(typeTraits);
BTreeFieldPrefixNSMLeafFrame frame = new BTreeFieldPrefixNSMLeafFrame(tupleWriter);
frame.setPage(page);
frame.initBuffer((byte) 0);
frame.setMultiComparator(cmp);
frame.setPrefixTupleCount(0);
String before = new String();
String after = new String();
int compactFreq = 5;
int compressFreq = 5;
int smallMax = 10;
int numRecords = 1000;
int[][] savedFields = new int[numRecords][3];
// insert records with random calls to compact and compress
for (int i = 0; i < numRecords; i++) {
if (LOGGER.isLoggable(Level.INFO)) {
if ((i + 1) % 100 == 0) {
LOGGER.info("INSERTING " + (i + 1) + " / " + numRecords);
}
}
int a = rnd.nextInt() % smallMax;
int b = rnd.nextInt() % smallMax;
int c = i;
ITupleReference tuple = createTuple(ctx, a, b, c, false);
try {
int targetTupleIndex = frame.findInsertTupleIndex(tuple);
frame.insert(tuple, targetTupleIndex);
} catch (Exception e) {
e.printStackTrace();
}
savedFields[i][0] = a;
savedFields[i][1] = b;
savedFields[i][2] = c;
if (rnd.nextInt() % compactFreq == 0) {
before = TreeIndexUtils.printFrameTuples(frame, fieldSerdes);
frame.compact();
after = TreeIndexUtils.printFrameTuples(frame, fieldSerdes);
Assert.assertEquals(before, after);
}
if (rnd.nextInt() % compressFreq == 0) {
before = TreeIndexUtils.printFrameTuples(frame, fieldSerdes);
frame.compress();
after = TreeIndexUtils.printFrameTuples(frame, fieldSerdes);
Assert.assertEquals(before, after);
}
}
// delete records with random calls to compact and compress
for (int i = 0; i < numRecords; i++) {
if (LOGGER.isLoggable(Level.INFO)) {
if ((i + 1) % 100 == 0) {
LOGGER.info("DELETING " + (i + 1) + " / " + numRecords);
}
}
ITupleReference tuple = createTuple(ctx, savedFields[i][0], savedFields[i][1], savedFields[i][2], false);
try {
int tupleIndex = frame.findDeleteTupleIndex(tuple);
frame.delete(tuple, tupleIndex);
} catch (Exception e) {
}
if (rnd.nextInt() % compactFreq == 0) {
before = TreeIndexUtils.printFrameTuples(frame, fieldSerdes);
frame.compact();
after = TreeIndexUtils.printFrameTuples(frame, fieldSerdes);
Assert.assertEquals(before, after);
}
if (rnd.nextInt() % compressFreq == 0) {
before = TreeIndexUtils.printFrameTuples(frame, fieldSerdes);
frame.compress();
after = TreeIndexUtils.printFrameTuples(frame, fieldSerdes);
Assert.assertEquals(before, after);
}
}
} finally {
bufferCache.unpin(page);
bufferCache.closeFile(btreeFileId);
bufferCache.close();
}
}
use of org.apache.hyracks.storage.common.buffercache.ICachedPage in project asterixdb by apache.
the class TreeIndexDiskOrderScanCursor method positionToNextLeaf.
private boolean positionToNextLeaf(boolean skipCurrent) throws HyracksDataException {
while (frame.getLevel() != 0 || skipCurrent || frame.getTupleCount() == 0) {
if (++currentPageId > maxPageId) {
break;
}
page.releaseReadLatch();
bufferCache.unpin(page);
ICachedPage nextPage = bufferCache.pin(BufferedFileHandle.getDiskPageId(fileId, currentPageId), false);
nextPage.acquireReadLatch();
page = nextPage;
frame.setPage(page);
tupleIndex = 0;
skipCurrent = false;
}
if (currentPageId <= maxPageId) {
return true;
} else {
return false;
}
}
use of org.apache.hyracks.storage.common.buffercache.ICachedPage in project asterixdb by apache.
the class TreeIndexBufferCacheWarmup method warmup.
public void warmup(ITreeIndexFrame frame, ITreeIndexMetadataFrame metaFrame, int[] warmupTreeLevels, int[] warmupRepeats) throws HyracksDataException {
bufferCache.openFile(fileId);
// scan entire file to determine pages in each level
int maxPageId = freePageManager.getMaxPageId(metaFrame);
for (int pageId = 0; pageId <= maxPageId; pageId++) {
ICachedPage page = bufferCache.pin(BufferedFileHandle.getDiskPageId(fileId, pageId), false);
page.acquireReadLatch();
try {
frame.setPage(page);
byte level = frame.getLevel();
while (level >= pagesByLevel.size()) {
pagesByLevel.add(new IntArrayList(100, 100));
}
if (level >= 0) {
pagesByLevel.get(level).add(pageId);
}
} finally {
page.releaseReadLatch();
bufferCache.unpin(page);
}
}
// pin certain pages again to simulate frequent access
for (int i = 0; i < warmupTreeLevels.length; i++) {
if (warmupTreeLevels[i] < pagesByLevel.size()) {
int repeats = warmupRepeats[i];
IntArrayList pageIds = pagesByLevel.get(warmupTreeLevels[i]);
int[] remainingPageIds = new int[pageIds.size()];
for (int r = 0; r < repeats; r++) {
for (int j = 0; j < pageIds.size(); j++) {
remainingPageIds[j] = pageIds.get(j);
}
int remainingLength = pageIds.size();
for (int j = 0; j < pageIds.size(); j++) {
int index = MathUtil.stripSignBit(rnd.nextInt()) % remainingLength;
int pageId = remainingPageIds[index];
// pin & latch then immediately unlatch & unpin
ICachedPage page = bufferCache.pin(BufferedFileHandle.getDiskPageId(fileId, pageId), false);
page.acquireReadLatch();
page.releaseReadLatch();
bufferCache.unpin(page);
remainingPageIds[index] = remainingPageIds[remainingLength - 1];
remainingLength--;
}
}
}
}
bufferCache.closeFile(fileId);
}
use of org.apache.hyracks.storage.common.buffercache.ICachedPage in project asterixdb by apache.
the class TreeIndexStatsGatherer method gatherStats.
public TreeIndexStats gatherStats(ITreeIndexFrame leafFrame, ITreeIndexFrame interiorFrame, ITreeIndexMetadataFrame metaFrame) throws HyracksDataException {
bufferCache.openFile(fileId);
treeIndexStats.begin();
int maxPageId = freePageManager.getMaxPageId(metaFrame);
for (int pageId = 0; pageId <= maxPageId; pageId++) {
ICachedPage page = bufferCache.pin(BufferedFileHandle.getDiskPageId(fileId, pageId), false);
page.acquireReadLatch();
try {
metaFrame.setPage(page);
leafFrame.setPage(page);
interiorFrame.setPage(page);
if (leafFrame.isLeaf()) {
if (pageId == rootPage) {
treeIndexStats.addRoot(leafFrame);
} else {
treeIndexStats.add(leafFrame);
}
} else if (interiorFrame.isInterior()) {
if (pageId == rootPage) {
treeIndexStats.addRoot(interiorFrame);
} else {
treeIndexStats.add(interiorFrame);
}
} else {
treeIndexStats.add(metaFrame);
}
} finally {
page.releaseReadLatch();
bufferCache.unpin(page);
}
}
treeIndexStats.end();
bufferCache.closeFile(fileId);
return treeIndexStats;
}
use of org.apache.hyracks.storage.common.buffercache.ICachedPage in project asterixdb by apache.
the class LinkedMetaDataPageManager method close.
@Override
public void close() throws HyracksDataException {
if (ready) {
ICachedPage metaNode = bufferCache.pin(BufferedFileHandle.getDiskPageId(fileId, getMetadataPageId()), false);
ITreeIndexMetadataFrame metaFrame = frameFactory.createFrame();
metaNode.acquireWriteLatch();
try {
metaFrame.setPage(metaNode);
metaFrame.setValid(true);
} finally {
metaNode.releaseWriteLatch(true);
bufferCache.flushDirtyPage(metaNode);
bufferCache.unpin(metaNode);
ready = true;
}
ready = false;
}
}
Aggregations