use of org.apache.hyracks.storage.common.file.IFileMapProvider in project asterixdb by apache.
the class BufferCacheRegressionTest method flushBehaviorTest.
private void flushBehaviorTest(boolean deleteFile) throws IOException {
TestStorageManagerComponentHolder.init(PAGE_SIZE, 10, 1);
IBufferCache bufferCache = TestStorageManagerComponentHolder.getBufferCache(ctx.getJobletContext().getServiceContext());
IFileMapProvider fmp = TestStorageManagerComponentHolder.getFileMapProvider();
IOManager ioManager = TestStorageManagerComponentHolder.getIOManager();
FileReference firstFileRef = ioManager.resolve(fileName);
bufferCache.createFile(firstFileRef);
int firstFileId = fmp.lookupFileId(firstFileRef);
bufferCache.openFile(firstFileId);
// Fill the first page with known data and make it dirty by write
// latching it.
ICachedPage writePage = bufferCache.pin(BufferedFileHandle.getDiskPageId(firstFileId, 0), true);
writePage.acquireWriteLatch();
try {
ByteBuffer buf = writePage.getBuffer();
for (int i = 0; i < buf.capacity(); i++) {
buf.put(Byte.MAX_VALUE);
}
} finally {
writePage.releaseWriteLatch(true);
bufferCache.unpin(writePage);
}
bufferCache.closeFile(firstFileId);
if (deleteFile) {
bufferCache.deleteFile(firstFileId, false);
}
// Create a file with the same name.
FileReference secondFileRef = ioManager.resolve(fileName);
bufferCache.createFile(secondFileRef);
int secondFileId = fmp.lookupFileId(secondFileRef);
// This open will replace the firstFileRef's slot in the BufferCache,
// causing it's pages to be cleaned up. We want to make sure that those
// dirty pages are not flushed to the disk, because the file was
// declared as deleted, and
// somebody might be already using the same filename again (having been
// assigned a different fileId).
bufferCache.openFile(secondFileId);
// Manually open the file and inspect it's contents. We cannot simply
// ask the BufferCache to pin the page, because it would return the same
// physical memory again, and for performance reasons pages are never
// reset with 0's.
FileReference testFileRef = ioManager.resolve(fileName);
IFileHandle testFileHandle = ioManager.open(testFileRef, FileReadWriteMode.READ_ONLY, FileSyncMode.METADATA_SYNC_DATA_SYNC);
ByteBuffer testBuffer = ByteBuffer.allocate(PAGE_SIZE + BufferCache.RESERVED_HEADER_BYTES);
ioManager.syncRead(testFileHandle, 0, testBuffer);
for (int i = BufferCache.RESERVED_HEADER_BYTES; i < testBuffer.capacity(); i++) {
if (deleteFile) {
// We deleted the file. We expect to see a clean buffer.
if (testBuffer.get(i) == Byte.MAX_VALUE) {
fail("Page 0 of deleted file was fazily flushed in openFile(), " + "corrupting the data of a newly created file with the same name.");
}
} else {
// Byte.MAX_VALUE.
if (testBuffer.get(i) != Byte.MAX_VALUE) {
fail("Page 0 of closed file was not flushed when properly, when reclaiming the file slot of fileId 0 in the BufferCache.");
}
}
}
ioManager.close(testFileHandle);
bufferCache.closeFile(secondFileId);
if (deleteFile) {
bufferCache.deleteFile(secondFileId, false);
}
bufferCache.close();
}
use of org.apache.hyracks.storage.common.file.IFileMapProvider in project asterixdb by apache.
the class BufferCacheTest method simpleMaxOpenFilesTest.
@Test
public void simpleMaxOpenFilesTest() throws HyracksException {
TestStorageManagerComponentHolder.init(PAGE_SIZE, NUM_PAGES, MAX_OPEN_FILES);
IBufferCache bufferCache = TestStorageManagerComponentHolder.getBufferCache(ctx.getJobletContext().getServiceContext());
IFileMapProvider fmp = TestStorageManagerComponentHolder.getFileMapProvider();
IIOManager ioManager = TestStorageManagerComponentHolder.getIOManager();
List<Integer> fileIds = new ArrayList<>();
for (int i = 0; i < MAX_OPEN_FILES; i++) {
String fileName = getFileName();
FileReference file = ioManager.resolve(fileName);
bufferCache.createFile(file);
int fileId = fmp.lookupFileId(file);
bufferCache.openFile(fileId);
fileIds.add(fileId);
}
boolean exceptionThrown = false;
// since all files are open, next open should fail
try {
String fileName = getFileName();
FileReference file = ioManager.resolve(fileName);
bufferCache.createFile(file);
int fileId = fmp.lookupFileId(file);
bufferCache.openFile(fileId);
} catch (HyracksDataException e) {
exceptionThrown = true;
}
Assert.assertTrue(exceptionThrown);
// close a random file
int ix = Math.abs(rnd.nextInt()) % fileIds.size();
bufferCache.closeFile(fileIds.get(ix));
fileIds.remove(ix);
// now open should succeed again
exceptionThrown = false;
try {
String fileName = getFileName();
FileReference file = ioManager.resolve(fileName);
bufferCache.createFile(file);
int fileId = fmp.lookupFileId(file);
bufferCache.openFile(fileId);
fileIds.add(fileId);
} catch (HyracksDataException e) {
exceptionThrown = true;
}
Assert.assertFalse(exceptionThrown);
for (Integer i : fileIds) {
bufferCache.closeFile(i.intValue());
}
bufferCache.close();
}
use of org.apache.hyracks.storage.common.file.IFileMapProvider in project asterixdb by apache.
the class BufferCacheTest method contentCheckingMaxOpenFilesTest.
@Test
public void contentCheckingMaxOpenFilesTest() throws HyracksException {
TestStorageManagerComponentHolder.init(PAGE_SIZE, NUM_PAGES, MAX_OPEN_FILES);
IBufferCache bufferCache = TestStorageManagerComponentHolder.getBufferCache(ctx.getJobletContext().getServiceContext());
IFileMapProvider fmp = TestStorageManagerComponentHolder.getFileMapProvider();
IIOManager ioManager = TestStorageManagerComponentHolder.getIOManager();
List<Integer> fileIds = new ArrayList<>();
Map<Integer, ArrayList<Integer>> pageContents = new HashMap<>();
int num = 10;
int testPageId = 0;
// open max number of files and write some stuff into their first page
for (int i = 0; i < MAX_OPEN_FILES; i++) {
String fileName = getFileName();
FileReference file = ioManager.resolve(fileName);
bufferCache.createFile(file);
int fileId = fmp.lookupFileId(file);
bufferCache.openFile(fileId);
fileIds.add(fileId);
ICachedPage page = null;
page = bufferCache.pin(BufferedFileHandle.getDiskPageId(fileId, testPageId), true);
page.acquireWriteLatch();
try {
ArrayList<Integer> values = new ArrayList<>();
for (int j = 0; j < num; j++) {
int x = Math.abs(rnd.nextInt());
page.getBuffer().putInt(j * 4, x);
values.add(x);
}
pageContents.put(fileId, values);
} finally {
page.releaseWriteLatch(true);
bufferCache.unpin(page);
}
}
boolean exceptionThrown = false;
// since all files are open, next open should fail
try {
String fileName = getFileName();
FileReference file = ioManager.resolve(fileName);
bufferCache.createFile(file);
int fileId = fmp.lookupFileId(file);
bufferCache.openFile(fileId);
} catch (HyracksDataException e) {
exceptionThrown = true;
}
Assert.assertTrue(exceptionThrown);
// close a few random files
ArrayList<Integer> closedFileIds = new ArrayList<>();
int filesToClose = 5;
for (int i = 0; i < filesToClose; i++) {
int ix = Math.abs(rnd.nextInt()) % fileIds.size();
bufferCache.closeFile(fileIds.get(ix));
closedFileIds.add(fileIds.get(ix));
fileIds.remove(ix);
}
// now open a few new files
for (int i = 0; i < filesToClose; i++) {
String fileName = getFileName();
FileReference file = ioManager.resolve(fileName);
bufferCache.createFile(file);
int fileId = fmp.lookupFileId(file);
bufferCache.openFile(fileId);
fileIds.add(fileId);
}
// since all files are open, next open should fail
try {
String fileName = getFileName();
FileReference file = ioManager.resolve(fileName);
bufferCache.createFile(file);
int fileId = fmp.lookupFileId(file);
bufferCache.openFile(fileId);
} catch (HyracksDataException e) {
exceptionThrown = true;
}
Assert.assertTrue(exceptionThrown);
// close a few random files again
for (int i = 0; i < filesToClose; i++) {
int ix = Math.abs(rnd.nextInt()) % fileIds.size();
bufferCache.closeFile(fileIds.get(ix));
closedFileIds.add(fileIds.get(ix));
fileIds.remove(ix);
}
// now open those closed files again and verify their contents
for (int i = 0; i < filesToClose; i++) {
int closedFileId = closedFileIds.get(i);
bufferCache.openFile(closedFileId);
fileIds.add(closedFileId);
// pin first page and verify contents
ICachedPage page = null;
page = bufferCache.pin(BufferedFileHandle.getDiskPageId(closedFileId, testPageId), false);
page.acquireReadLatch();
try {
ArrayList<Integer> values = pageContents.get(closedFileId);
for (int j = 0; j < values.size(); j++) {
Assert.assertEquals(values.get(j).intValue(), page.getBuffer().getInt(j * 4));
}
} finally {
page.releaseReadLatch();
bufferCache.unpin(page);
}
}
for (Integer i : fileIds) {
bufferCache.closeFile(i.intValue());
}
bufferCache.close();
}
use of org.apache.hyracks.storage.common.file.IFileMapProvider in project asterixdb by apache.
the class BTreeStatsTest method test01.
@Test
public void test01() throws Exception {
TestStorageManagerComponentHolder.init(PAGE_SIZE, NUM_PAGES, MAX_OPEN_FILES);
IBufferCache bufferCache = harness.getBufferCache();
IFileMapProvider fmp = harness.getFileMapProvider();
// declare fields
int fieldCount = 2;
ITypeTraits[] typeTraits = new ITypeTraits[fieldCount];
typeTraits[0] = IntegerPointable.TYPE_TRAITS;
typeTraits[1] = IntegerPointable.TYPE_TRAITS;
// declare keys
int keyFieldCount = 1;
IBinaryComparatorFactory[] cmpFactories = new IBinaryComparatorFactory[keyFieldCount];
cmpFactories[0] = PointableBinaryComparatorFactory.of(IntegerPointable.FACTORY);
TypeAwareTupleWriterFactory tupleWriterFactory = new TypeAwareTupleWriterFactory(typeTraits);
ITreeIndexFrameFactory leafFrameFactory = new BTreeNSMLeafFrameFactory(tupleWriterFactory);
ITreeIndexFrameFactory interiorFrameFactory = new BTreeNSMInteriorFrameFactory(tupleWriterFactory);
ITreeIndexMetadataFrameFactory metaFrameFactory = new LIFOMetaDataFrameFactory();
IBTreeLeafFrame leafFrame = (IBTreeLeafFrame) leafFrameFactory.createFrame();
IBTreeInteriorFrame interiorFrame = (IBTreeInteriorFrame) interiorFrameFactory.createFrame();
ITreeIndexMetadataFrame metaFrame = metaFrameFactory.createFrame();
IMetadataPageManager freePageManager = new LinkedMetaDataPageManager(bufferCache, metaFrameFactory);
BTree btree = new BTree(bufferCache, fmp, freePageManager, interiorFrameFactory, leafFrameFactory, cmpFactories, fieldCount, harness.getFileReference());
btree.create();
btree.activate();
Random rnd = new Random();
rnd.setSeed(50);
long start = System.currentTimeMillis();
if (LOGGER.isLoggable(Level.INFO)) {
LOGGER.info("INSERTING INTO TREE");
}
IFrame frame = new VSizeFrame(ctx);
FrameTupleAppender appender = new FrameTupleAppender();
ArrayTupleBuilder tb = new ArrayTupleBuilder(fieldCount);
DataOutput dos = tb.getDataOutput();
ISerializerDeserializer[] recDescSers = { IntegerSerializerDeserializer.INSTANCE, IntegerSerializerDeserializer.INSTANCE };
RecordDescriptor recDesc = new RecordDescriptor(recDescSers);
IFrameTupleAccessor accessor = new FrameTupleAccessor(recDesc);
accessor.reset(frame.getBuffer());
FrameTupleReference tuple = new FrameTupleReference();
ITreeIndexAccessor indexAccessor = btree.createAccessor(TestOperationCallback.INSTANCE, TestOperationCallback.INSTANCE);
// 10000
for (int i = 0; i < 100000; i++) {
int f0 = rnd.nextInt() % 100000;
int f1 = 5;
tb.reset();
IntegerSerializerDeserializer.INSTANCE.serialize(f0, dos);
tb.addFieldEndOffset();
IntegerSerializerDeserializer.INSTANCE.serialize(f1, dos);
tb.addFieldEndOffset();
appender.reset(frame, true);
appender.append(tb.getFieldEndOffsets(), tb.getByteArray(), 0, tb.getSize());
tuple.reset(accessor, 0);
if (LOGGER.isLoggable(Level.INFO)) {
if (i % 10000 == 0) {
long end = System.currentTimeMillis();
LOGGER.info("INSERTING " + i + " : " + f0 + " " + f1 + " " + (end - start));
}
}
try {
indexAccessor.insert(tuple);
} catch (HyracksDataException e) {
if (e.getErrorCode() != ErrorCode.DUPLICATE_KEY) {
e.printStackTrace();
throw e;
}
}
}
int fileId = fmp.lookupFileId(harness.getFileReference());
TreeIndexStatsGatherer statsGatherer = new TreeIndexStatsGatherer(bufferCache, freePageManager, fileId, btree.getRootPageId());
TreeIndexStats stats = statsGatherer.gatherStats(leafFrame, interiorFrame, metaFrame);
if (LOGGER.isLoggable(Level.INFO)) {
LOGGER.info("\n" + stats.toString());
}
TreeIndexBufferCacheWarmup bufferCacheWarmup = new TreeIndexBufferCacheWarmup(bufferCache, freePageManager, fileId);
bufferCacheWarmup.warmup(leafFrame, metaFrame, new int[] { 1, 2 }, new int[] { 2, 5 });
btree.deactivate();
btree.destroy();
bufferCache.close();
}
use of org.apache.hyracks.storage.common.file.IFileMapProvider in project asterixdb by apache.
the class FieldPrefixNSMTest method test01.
@Test
public void test01() throws Exception {
// declare fields
int fieldCount = 3;
ITypeTraits[] typeTraits = new ITypeTraits[fieldCount];
typeTraits[0] = IntegerPointable.TYPE_TRAITS;
typeTraits[1] = IntegerPointable.TYPE_TRAITS;
typeTraits[2] = IntegerPointable.TYPE_TRAITS;
// declare keys
int keyFieldCount = 3;
IBinaryComparator[] cmps = new IBinaryComparator[keyFieldCount];
cmps[0] = PointableBinaryComparatorFactory.of(IntegerPointable.FACTORY).createBinaryComparator();
cmps[1] = PointableBinaryComparatorFactory.of(IntegerPointable.FACTORY).createBinaryComparator();
cmps[2] = PointableBinaryComparatorFactory.of(IntegerPointable.FACTORY).createBinaryComparator();
MultiComparator cmp = new MultiComparator(cmps);
// just for printing
@SuppressWarnings("rawtypes") ISerializerDeserializer[] fieldSerdes = { IntegerSerializerDeserializer.INSTANCE, IntegerSerializerDeserializer.INSTANCE, IntegerSerializerDeserializer.INSTANCE };
Random rnd = new Random();
rnd.setSeed(50);
IBufferCache bufferCache = harness.getBufferCache();
IFileMapProvider fileMapProvider = harness.getFileMapProvider();
bufferCache.createFile(harness.getFileReference());
int btreeFileId = fileMapProvider.lookupFileId(harness.getFileReference());
bufferCache.openFile(btreeFileId);
IHyracksTaskContext ctx = harness.getHyracksTaskContext();
ICachedPage page = bufferCache.pin(BufferedFileHandle.getDiskPageId(btreeFileId, 0), true);
try {
ITreeIndexTupleWriter tupleWriter = new TypeAwareTupleWriter(typeTraits);
BTreeFieldPrefixNSMLeafFrame frame = new BTreeFieldPrefixNSMLeafFrame(tupleWriter);
frame.setPage(page);
frame.initBuffer((byte) 0);
frame.setMultiComparator(cmp);
frame.setPrefixTupleCount(0);
String before = new String();
String after = new String();
int compactFreq = 5;
int compressFreq = 5;
int smallMax = 10;
int numRecords = 1000;
int[][] savedFields = new int[numRecords][3];
// insert records with random calls to compact and compress
for (int i = 0; i < numRecords; i++) {
if (LOGGER.isLoggable(Level.INFO)) {
if ((i + 1) % 100 == 0) {
LOGGER.info("INSERTING " + (i + 1) + " / " + numRecords);
}
}
int a = rnd.nextInt() % smallMax;
int b = rnd.nextInt() % smallMax;
int c = i;
ITupleReference tuple = createTuple(ctx, a, b, c, false);
try {
int targetTupleIndex = frame.findInsertTupleIndex(tuple);
frame.insert(tuple, targetTupleIndex);
} catch (Exception e) {
e.printStackTrace();
}
savedFields[i][0] = a;
savedFields[i][1] = b;
savedFields[i][2] = c;
if (rnd.nextInt() % compactFreq == 0) {
before = TreeIndexUtils.printFrameTuples(frame, fieldSerdes);
frame.compact();
after = TreeIndexUtils.printFrameTuples(frame, fieldSerdes);
Assert.assertEquals(before, after);
}
if (rnd.nextInt() % compressFreq == 0) {
before = TreeIndexUtils.printFrameTuples(frame, fieldSerdes);
frame.compress();
after = TreeIndexUtils.printFrameTuples(frame, fieldSerdes);
Assert.assertEquals(before, after);
}
}
// delete records with random calls to compact and compress
for (int i = 0; i < numRecords; i++) {
if (LOGGER.isLoggable(Level.INFO)) {
if ((i + 1) % 100 == 0) {
LOGGER.info("DELETING " + (i + 1) + " / " + numRecords);
}
}
ITupleReference tuple = createTuple(ctx, savedFields[i][0], savedFields[i][1], savedFields[i][2], false);
try {
int tupleIndex = frame.findDeleteTupleIndex(tuple);
frame.delete(tuple, tupleIndex);
} catch (Exception e) {
}
if (rnd.nextInt() % compactFreq == 0) {
before = TreeIndexUtils.printFrameTuples(frame, fieldSerdes);
frame.compact();
after = TreeIndexUtils.printFrameTuples(frame, fieldSerdes);
Assert.assertEquals(before, after);
}
if (rnd.nextInt() % compressFreq == 0) {
before = TreeIndexUtils.printFrameTuples(frame, fieldSerdes);
frame.compress();
after = TreeIndexUtils.printFrameTuples(frame, fieldSerdes);
Assert.assertEquals(before, after);
}
}
} finally {
bufferCache.unpin(page);
bufferCache.closeFile(btreeFileId);
bufferCache.close();
}
}
Aggregations