use of org.apache.hyracks.storage.common.buffercache.IBufferCache in project asterixdb by apache.
the class BufferCacheRegressionTest method flushBehaviorTest.
private void flushBehaviorTest(boolean deleteFile) throws IOException {
TestStorageManagerComponentHolder.init(PAGE_SIZE, 10, 1);
IBufferCache bufferCache = TestStorageManagerComponentHolder.getBufferCache(ctx.getJobletContext().getServiceContext());
IFileMapProvider fmp = TestStorageManagerComponentHolder.getFileMapProvider();
IOManager ioManager = TestStorageManagerComponentHolder.getIOManager();
FileReference firstFileRef = ioManager.resolve(fileName);
bufferCache.createFile(firstFileRef);
int firstFileId = fmp.lookupFileId(firstFileRef);
bufferCache.openFile(firstFileId);
// Fill the first page with known data and make it dirty by write
// latching it.
ICachedPage writePage = bufferCache.pin(BufferedFileHandle.getDiskPageId(firstFileId, 0), true);
writePage.acquireWriteLatch();
try {
ByteBuffer buf = writePage.getBuffer();
for (int i = 0; i < buf.capacity(); i++) {
buf.put(Byte.MAX_VALUE);
}
} finally {
writePage.releaseWriteLatch(true);
bufferCache.unpin(writePage);
}
bufferCache.closeFile(firstFileId);
if (deleteFile) {
bufferCache.deleteFile(firstFileId, false);
}
// Create a file with the same name.
FileReference secondFileRef = ioManager.resolve(fileName);
bufferCache.createFile(secondFileRef);
int secondFileId = fmp.lookupFileId(secondFileRef);
// This open will replace the firstFileRef's slot in the BufferCache,
// causing it's pages to be cleaned up. We want to make sure that those
// dirty pages are not flushed to the disk, because the file was
// declared as deleted, and
// somebody might be already using the same filename again (having been
// assigned a different fileId).
bufferCache.openFile(secondFileId);
// Manually open the file and inspect it's contents. We cannot simply
// ask the BufferCache to pin the page, because it would return the same
// physical memory again, and for performance reasons pages are never
// reset with 0's.
FileReference testFileRef = ioManager.resolve(fileName);
IFileHandle testFileHandle = ioManager.open(testFileRef, FileReadWriteMode.READ_ONLY, FileSyncMode.METADATA_SYNC_DATA_SYNC);
ByteBuffer testBuffer = ByteBuffer.allocate(PAGE_SIZE + BufferCache.RESERVED_HEADER_BYTES);
ioManager.syncRead(testFileHandle, 0, testBuffer);
for (int i = BufferCache.RESERVED_HEADER_BYTES; i < testBuffer.capacity(); i++) {
if (deleteFile) {
// We deleted the file. We expect to see a clean buffer.
if (testBuffer.get(i) == Byte.MAX_VALUE) {
fail("Page 0 of deleted file was fazily flushed in openFile(), " + "corrupting the data of a newly created file with the same name.");
}
} else {
// Byte.MAX_VALUE.
if (testBuffer.get(i) != Byte.MAX_VALUE) {
fail("Page 0 of closed file was not flushed when properly, when reclaiming the file slot of fileId 0 in the BufferCache.");
}
}
}
ioManager.close(testFileHandle);
bufferCache.closeFile(secondFileId);
if (deleteFile) {
bufferCache.deleteFile(secondFileId, false);
}
bufferCache.close();
}
use of org.apache.hyracks.storage.common.buffercache.IBufferCache in project asterixdb by apache.
the class BufferCacheTest method simpleMaxOpenFilesTest.
@Test
public void simpleMaxOpenFilesTest() throws HyracksException {
TestStorageManagerComponentHolder.init(PAGE_SIZE, NUM_PAGES, MAX_OPEN_FILES);
IBufferCache bufferCache = TestStorageManagerComponentHolder.getBufferCache(ctx.getJobletContext().getServiceContext());
IFileMapProvider fmp = TestStorageManagerComponentHolder.getFileMapProvider();
IIOManager ioManager = TestStorageManagerComponentHolder.getIOManager();
List<Integer> fileIds = new ArrayList<>();
for (int i = 0; i < MAX_OPEN_FILES; i++) {
String fileName = getFileName();
FileReference file = ioManager.resolve(fileName);
bufferCache.createFile(file);
int fileId = fmp.lookupFileId(file);
bufferCache.openFile(fileId);
fileIds.add(fileId);
}
boolean exceptionThrown = false;
// since all files are open, next open should fail
try {
String fileName = getFileName();
FileReference file = ioManager.resolve(fileName);
bufferCache.createFile(file);
int fileId = fmp.lookupFileId(file);
bufferCache.openFile(fileId);
} catch (HyracksDataException e) {
exceptionThrown = true;
}
Assert.assertTrue(exceptionThrown);
// close a random file
int ix = Math.abs(rnd.nextInt()) % fileIds.size();
bufferCache.closeFile(fileIds.get(ix));
fileIds.remove(ix);
// now open should succeed again
exceptionThrown = false;
try {
String fileName = getFileName();
FileReference file = ioManager.resolve(fileName);
bufferCache.createFile(file);
int fileId = fmp.lookupFileId(file);
bufferCache.openFile(fileId);
fileIds.add(fileId);
} catch (HyracksDataException e) {
exceptionThrown = true;
}
Assert.assertFalse(exceptionThrown);
for (Integer i : fileIds) {
bufferCache.closeFile(i.intValue());
}
bufferCache.close();
}
use of org.apache.hyracks.storage.common.buffercache.IBufferCache in project asterixdb by apache.
the class BufferCacheTest method contentCheckingMaxOpenFilesTest.
@Test
public void contentCheckingMaxOpenFilesTest() throws HyracksException {
TestStorageManagerComponentHolder.init(PAGE_SIZE, NUM_PAGES, MAX_OPEN_FILES);
IBufferCache bufferCache = TestStorageManagerComponentHolder.getBufferCache(ctx.getJobletContext().getServiceContext());
IFileMapProvider fmp = TestStorageManagerComponentHolder.getFileMapProvider();
IIOManager ioManager = TestStorageManagerComponentHolder.getIOManager();
List<Integer> fileIds = new ArrayList<>();
Map<Integer, ArrayList<Integer>> pageContents = new HashMap<>();
int num = 10;
int testPageId = 0;
// open max number of files and write some stuff into their first page
for (int i = 0; i < MAX_OPEN_FILES; i++) {
String fileName = getFileName();
FileReference file = ioManager.resolve(fileName);
bufferCache.createFile(file);
int fileId = fmp.lookupFileId(file);
bufferCache.openFile(fileId);
fileIds.add(fileId);
ICachedPage page = null;
page = bufferCache.pin(BufferedFileHandle.getDiskPageId(fileId, testPageId), true);
page.acquireWriteLatch();
try {
ArrayList<Integer> values = new ArrayList<>();
for (int j = 0; j < num; j++) {
int x = Math.abs(rnd.nextInt());
page.getBuffer().putInt(j * 4, x);
values.add(x);
}
pageContents.put(fileId, values);
} finally {
page.releaseWriteLatch(true);
bufferCache.unpin(page);
}
}
boolean exceptionThrown = false;
// since all files are open, next open should fail
try {
String fileName = getFileName();
FileReference file = ioManager.resolve(fileName);
bufferCache.createFile(file);
int fileId = fmp.lookupFileId(file);
bufferCache.openFile(fileId);
} catch (HyracksDataException e) {
exceptionThrown = true;
}
Assert.assertTrue(exceptionThrown);
// close a few random files
ArrayList<Integer> closedFileIds = new ArrayList<>();
int filesToClose = 5;
for (int i = 0; i < filesToClose; i++) {
int ix = Math.abs(rnd.nextInt()) % fileIds.size();
bufferCache.closeFile(fileIds.get(ix));
closedFileIds.add(fileIds.get(ix));
fileIds.remove(ix);
}
// now open a few new files
for (int i = 0; i < filesToClose; i++) {
String fileName = getFileName();
FileReference file = ioManager.resolve(fileName);
bufferCache.createFile(file);
int fileId = fmp.lookupFileId(file);
bufferCache.openFile(fileId);
fileIds.add(fileId);
}
// since all files are open, next open should fail
try {
String fileName = getFileName();
FileReference file = ioManager.resolve(fileName);
bufferCache.createFile(file);
int fileId = fmp.lookupFileId(file);
bufferCache.openFile(fileId);
} catch (HyracksDataException e) {
exceptionThrown = true;
}
Assert.assertTrue(exceptionThrown);
// close a few random files again
for (int i = 0; i < filesToClose; i++) {
int ix = Math.abs(rnd.nextInt()) % fileIds.size();
bufferCache.closeFile(fileIds.get(ix));
closedFileIds.add(fileIds.get(ix));
fileIds.remove(ix);
}
// now open those closed files again and verify their contents
for (int i = 0; i < filesToClose; i++) {
int closedFileId = closedFileIds.get(i);
bufferCache.openFile(closedFileId);
fileIds.add(closedFileId);
// pin first page and verify contents
ICachedPage page = null;
page = bufferCache.pin(BufferedFileHandle.getDiskPageId(closedFileId, testPageId), false);
page.acquireReadLatch();
try {
ArrayList<Integer> values = pageContents.get(closedFileId);
for (int j = 0; j < values.size(); j++) {
Assert.assertEquals(values.get(j).intValue(), page.getBuffer().getInt(j * 4));
}
} finally {
page.releaseReadLatch();
bufferCache.unpin(page);
}
}
for (Integer i : fileIds) {
bufferCache.closeFile(i.intValue());
}
bufferCache.close();
}
use of org.apache.hyracks.storage.common.buffercache.IBufferCache in project asterixdb by apache.
the class BTreeStatsTest method test01.
@Test
public void test01() throws Exception {
TestStorageManagerComponentHolder.init(PAGE_SIZE, NUM_PAGES, MAX_OPEN_FILES);
IBufferCache bufferCache = harness.getBufferCache();
IFileMapProvider fmp = harness.getFileMapProvider();
// declare fields
int fieldCount = 2;
ITypeTraits[] typeTraits = new ITypeTraits[fieldCount];
typeTraits[0] = IntegerPointable.TYPE_TRAITS;
typeTraits[1] = IntegerPointable.TYPE_TRAITS;
// declare keys
int keyFieldCount = 1;
IBinaryComparatorFactory[] cmpFactories = new IBinaryComparatorFactory[keyFieldCount];
cmpFactories[0] = PointableBinaryComparatorFactory.of(IntegerPointable.FACTORY);
TypeAwareTupleWriterFactory tupleWriterFactory = new TypeAwareTupleWriterFactory(typeTraits);
ITreeIndexFrameFactory leafFrameFactory = new BTreeNSMLeafFrameFactory(tupleWriterFactory);
ITreeIndexFrameFactory interiorFrameFactory = new BTreeNSMInteriorFrameFactory(tupleWriterFactory);
ITreeIndexMetadataFrameFactory metaFrameFactory = new LIFOMetaDataFrameFactory();
IBTreeLeafFrame leafFrame = (IBTreeLeafFrame) leafFrameFactory.createFrame();
IBTreeInteriorFrame interiorFrame = (IBTreeInteriorFrame) interiorFrameFactory.createFrame();
ITreeIndexMetadataFrame metaFrame = metaFrameFactory.createFrame();
IMetadataPageManager freePageManager = new LinkedMetaDataPageManager(bufferCache, metaFrameFactory);
BTree btree = new BTree(bufferCache, fmp, freePageManager, interiorFrameFactory, leafFrameFactory, cmpFactories, fieldCount, harness.getFileReference());
btree.create();
btree.activate();
Random rnd = new Random();
rnd.setSeed(50);
long start = System.currentTimeMillis();
if (LOGGER.isLoggable(Level.INFO)) {
LOGGER.info("INSERTING INTO TREE");
}
IFrame frame = new VSizeFrame(ctx);
FrameTupleAppender appender = new FrameTupleAppender();
ArrayTupleBuilder tb = new ArrayTupleBuilder(fieldCount);
DataOutput dos = tb.getDataOutput();
ISerializerDeserializer[] recDescSers = { IntegerSerializerDeserializer.INSTANCE, IntegerSerializerDeserializer.INSTANCE };
RecordDescriptor recDesc = new RecordDescriptor(recDescSers);
IFrameTupleAccessor accessor = new FrameTupleAccessor(recDesc);
accessor.reset(frame.getBuffer());
FrameTupleReference tuple = new FrameTupleReference();
ITreeIndexAccessor indexAccessor = btree.createAccessor(TestOperationCallback.INSTANCE, TestOperationCallback.INSTANCE);
// 10000
for (int i = 0; i < 100000; i++) {
int f0 = rnd.nextInt() % 100000;
int f1 = 5;
tb.reset();
IntegerSerializerDeserializer.INSTANCE.serialize(f0, dos);
tb.addFieldEndOffset();
IntegerSerializerDeserializer.INSTANCE.serialize(f1, dos);
tb.addFieldEndOffset();
appender.reset(frame, true);
appender.append(tb.getFieldEndOffsets(), tb.getByteArray(), 0, tb.getSize());
tuple.reset(accessor, 0);
if (LOGGER.isLoggable(Level.INFO)) {
if (i % 10000 == 0) {
long end = System.currentTimeMillis();
LOGGER.info("INSERTING " + i + " : " + f0 + " " + f1 + " " + (end - start));
}
}
try {
indexAccessor.insert(tuple);
} catch (HyracksDataException e) {
if (e.getErrorCode() != ErrorCode.DUPLICATE_KEY) {
e.printStackTrace();
throw e;
}
}
}
int fileId = fmp.lookupFileId(harness.getFileReference());
TreeIndexStatsGatherer statsGatherer = new TreeIndexStatsGatherer(bufferCache, freePageManager, fileId, btree.getRootPageId());
TreeIndexStats stats = statsGatherer.gatherStats(leafFrame, interiorFrame, metaFrame);
if (LOGGER.isLoggable(Level.INFO)) {
LOGGER.info("\n" + stats.toString());
}
TreeIndexBufferCacheWarmup bufferCacheWarmup = new TreeIndexBufferCacheWarmup(bufferCache, freePageManager, fileId);
bufferCacheWarmup.warmup(leafFrame, metaFrame, new int[] { 1, 2 }, new int[] { 2, 5 });
btree.deactivate();
btree.destroy();
bufferCache.close();
}
use of org.apache.hyracks.storage.common.buffercache.IBufferCache in project asterixdb by apache.
the class BTreeUpdateSearchTest method test01.
// Update scan test on fixed-length tuples.
@Test
public void test01() throws Exception {
IBufferCache bufferCache = harness.getBufferCache();
// declare fields
int fieldCount = 2;
ITypeTraits[] typeTraits = new ITypeTraits[fieldCount];
typeTraits[0] = IntegerPointable.TYPE_TRAITS;
typeTraits[1] = IntegerPointable.TYPE_TRAITS;
// declare keys
int keyFieldCount = 1;
IBinaryComparatorFactory[] cmpFactories = new IBinaryComparatorFactory[keyFieldCount];
cmpFactories[0] = PointableBinaryComparatorFactory.of(IntegerPointable.FACTORY);
@SuppressWarnings("rawtypes") ISerializerDeserializer[] recDescSers = { IntegerSerializerDeserializer.INSTANCE, IntegerSerializerDeserializer.INSTANCE };
TypeAwareTupleWriterFactory tupleWriterFactory = new TypeAwareTupleWriterFactory(typeTraits);
ITreeIndexFrameFactory leafFrameFactory = new BTreeNSMLeafFrameFactory(tupleWriterFactory);
ITreeIndexFrameFactory interiorFrameFactory = new BTreeNSMInteriorFrameFactory(tupleWriterFactory);
ITreeIndexMetadataFrameFactory metaFrameFactory = new LIFOMetaDataFrameFactory();
IBTreeLeafFrame leafFrame = (IBTreeLeafFrame) leafFrameFactory.createFrame();
IMetadataPageManager freePageManager = new LinkedMetaDataPageManager(bufferCache, metaFrameFactory);
BTree btree = new BTree(bufferCache, harness.getFileMapProvider(), freePageManager, interiorFrameFactory, leafFrameFactory, cmpFactories, fieldCount, harness.getFileReference());
btree.create();
btree.activate();
Random rnd = new Random();
rnd.setSeed(50);
long start = System.currentTimeMillis();
if (LOGGER.isLoggable(Level.INFO)) {
LOGGER.info("INSERTING INTO TREE");
}
ArrayTupleBuilder tb = new ArrayTupleBuilder(fieldCount);
ArrayTupleReference insertTuple = new ArrayTupleReference();
ITreeIndexAccessor indexAccessor = btree.createAccessor(TestOperationCallback.INSTANCE, TestOperationCallback.INSTANCE);
int numInserts = 10000;
for (int i = 0; i < numInserts; i++) {
int f0 = rnd.nextInt() % 10000;
int f1 = 5;
TupleUtils.createIntegerTuple(tb, insertTuple, f0, f1);
if (LOGGER.isLoggable(Level.INFO)) {
if (i % 10000 == 0) {
long end = System.currentTimeMillis();
LOGGER.info("INSERTING " + i + " : " + f0 + " " + f1 + " " + (end - start));
}
}
try {
indexAccessor.insert(insertTuple);
} catch (HyracksDataException hde) {
if (hde.getErrorCode() != ErrorCode.DUPLICATE_KEY) {
hde.printStackTrace();
throw hde;
}
}
}
long end = System.currentTimeMillis();
long duration = end - start;
if (LOGGER.isLoggable(Level.INFO)) {
LOGGER.info("DURATION: " + duration);
}
// Update scan.
if (LOGGER.isLoggable(Level.INFO)) {
LOGGER.info("UPDATE SCAN:");
}
// Set the cursor to X latch nodes.
ITreeIndexCursor updateScanCursor = new BTreeRangeSearchCursor(leafFrame, true);
RangePredicate nullPred = new RangePredicate(null, null, true, true, null, null);
indexAccessor.search(updateScanCursor, nullPred);
try {
while (updateScanCursor.hasNext()) {
updateScanCursor.next();
ITupleReference tuple = updateScanCursor.getTuple();
// Change the value field.
IntegerPointable.setInteger(tuple.getFieldData(1), tuple.getFieldStart(1), 10);
}
} catch (Exception e) {
e.printStackTrace();
} finally {
updateScanCursor.close();
}
// Ordered scan to verify the values.
if (LOGGER.isLoggable(Level.INFO)) {
LOGGER.info("ORDERED SCAN:");
}
// Set the cursor to X latch nodes.
ITreeIndexCursor scanCursor = new BTreeRangeSearchCursor(leafFrame, true);
indexAccessor.search(scanCursor, nullPred);
try {
while (scanCursor.hasNext()) {
scanCursor.next();
ITupleReference tuple = scanCursor.getTuple();
String rec = TupleUtils.printTuple(tuple, recDescSers);
if (LOGGER.isLoggable(Level.INFO)) {
LOGGER.info(rec);
}
}
} catch (Exception e) {
e.printStackTrace();
} finally {
scanCursor.close();
}
btree.deactivate();
btree.destroy();
}
Aggregations