use of org.apache.hyracks.storage.am.common.tuples.TypeAwareTupleWriterFactory in project asterixdb by apache.
the class InvertedIndexUtils method createLSMInvertedIndex.
public static LSMInvertedIndex createLSMInvertedIndex(IIOManager ioManager, List<IVirtualBufferCache> virtualBufferCaches, IFileMapProvider diskFileMapProvider, ITypeTraits[] invListTypeTraits, IBinaryComparatorFactory[] invListCmpFactories, ITypeTraits[] tokenTypeTraits, IBinaryComparatorFactory[] tokenCmpFactories, IBinaryTokenizerFactory tokenizerFactory, IBufferCache diskBufferCache, String absoluteOnDiskDir, double bloomFilterFalsePositiveRate, ILSMMergePolicy mergePolicy, ILSMOperationTracker opTracker, ILSMIOOperationScheduler ioScheduler, ILSMIOOperationCallback ioOpCallback, int[] invertedIndexFields, ITypeTraits[] filterTypeTraits, IBinaryComparatorFactory[] filterCmpFactories, int[] filterFields, int[] filterFieldsForNonBulkLoadOps, int[] invertedIndexFieldsForNonBulkLoadOps, boolean durable, IMetadataPageManagerFactory pageManagerFactory) throws HyracksDataException {
BTreeFactory deletedKeysBTreeFactory = createDeletedKeysBTreeFactory(ioManager, diskFileMapProvider, invListTypeTraits, invListCmpFactories, diskBufferCache, pageManagerFactory);
int[] bloomFilterKeyFields = new int[invListCmpFactories.length];
for (int i = 0; i < invListCmpFactories.length; i++) {
bloomFilterKeyFields[i] = i;
}
BloomFilterFactory bloomFilterFactory = new BloomFilterFactory(diskBufferCache, diskFileMapProvider, bloomFilterKeyFields);
FileReference onDiskDirFileRef = ioManager.resolveAbsolutePath(absoluteOnDiskDir);
LSMInvertedIndexFileManager fileManager = new LSMInvertedIndexFileManager(ioManager, diskFileMapProvider, onDiskDirFileRef, deletedKeysBTreeFactory);
IInvertedListBuilderFactory invListBuilderFactory = new FixedSizeElementInvertedListBuilderFactory(invListTypeTraits);
OnDiskInvertedIndexFactory invIndexFactory = new OnDiskInvertedIndexFactory(ioManager, diskBufferCache, diskFileMapProvider, invListBuilderFactory, invListTypeTraits, invListCmpFactories, tokenTypeTraits, tokenCmpFactories, fileManager, pageManagerFactory);
ComponentFilterHelper filterHelper = null;
LSMComponentFilterFrameFactory filterFrameFactory = null;
LSMComponentFilterManager filterManager = null;
if (filterCmpFactories != null) {
TypeAwareTupleWriterFactory filterTupleWriterFactory = new TypeAwareTupleWriterFactory(filterTypeTraits);
filterHelper = new ComponentFilterHelper(filterTupleWriterFactory, filterCmpFactories);
filterFrameFactory = new LSMComponentFilterFrameFactory(filterTupleWriterFactory);
filterManager = new LSMComponentFilterManager(filterFrameFactory);
}
return new LSMInvertedIndex(ioManager, virtualBufferCaches, invIndexFactory, deletedKeysBTreeFactory, bloomFilterFactory, filterHelper, filterFrameFactory, filterManager, bloomFilterFalsePositiveRate, fileManager, diskFileMapProvider, invListTypeTraits, invListCmpFactories, tokenTypeTraits, tokenCmpFactories, tokenizerFactory, mergePolicy, opTracker, ioScheduler, ioOpCallback, invertedIndexFields, filterFields, filterFieldsForNonBulkLoadOps, invertedIndexFieldsForNonBulkLoadOps, durable);
}
use of org.apache.hyracks.storage.am.common.tuples.TypeAwareTupleWriterFactory in project asterixdb by apache.
the class InvertedIndexUtils method createDeletedKeysBTreeFactory.
public static BTreeFactory createDeletedKeysBTreeFactory(IIOManager ioManager, IFileMapProvider diskFileMapProvider, ITypeTraits[] invListTypeTraits, IBinaryComparatorFactory[] invListCmpFactories, IBufferCache diskBufferCache, IPageManagerFactory freePageManagerFactory) throws HyracksDataException {
TypeAwareTupleWriterFactory tupleWriterFactory = new TypeAwareTupleWriterFactory(invListTypeTraits);
ITreeIndexFrameFactory leafFrameFactory = BTreeUtils.getLeafFrameFactory(tupleWriterFactory, BTreeLeafFrameType.REGULAR_NSM);
ITreeIndexFrameFactory interiorFrameFactory = new BTreeNSMInteriorFrameFactory(tupleWriterFactory);
BTreeFactory deletedKeysBTreeFactory = new BTreeFactory(ioManager, diskBufferCache, diskFileMapProvider, freePageManagerFactory, interiorFrameFactory, leafFrameFactory, invListCmpFactories, invListCmpFactories.length);
return deletedKeysBTreeFactory;
}
use of org.apache.hyracks.storage.am.common.tuples.TypeAwareTupleWriterFactory in project asterixdb by apache.
the class InvertedIndexUtils method createPartitionedLSMInvertedIndex.
public static PartitionedLSMInvertedIndex createPartitionedLSMInvertedIndex(IIOManager ioManager, List<IVirtualBufferCache> virtualBufferCaches, IFileMapProvider diskFileMapProvider, ITypeTraits[] invListTypeTraits, IBinaryComparatorFactory[] invListCmpFactories, ITypeTraits[] tokenTypeTraits, IBinaryComparatorFactory[] tokenCmpFactories, IBinaryTokenizerFactory tokenizerFactory, IBufferCache diskBufferCache, String absoluteOnDiskDir, double bloomFilterFalsePositiveRate, ILSMMergePolicy mergePolicy, ILSMOperationTracker opTracker, ILSMIOOperationScheduler ioScheduler, ILSMIOOperationCallback ioOpCallback, int[] invertedIndexFields, ITypeTraits[] filterTypeTraits, IBinaryComparatorFactory[] filterCmpFactories, int[] filterFields, int[] filterFieldsForNonBulkLoadOps, int[] invertedIndexFieldsForNonBulkLoadOps, boolean durable, IPageManagerFactory pageManagerFactory) throws HyracksDataException {
BTreeFactory deletedKeysBTreeFactory = createDeletedKeysBTreeFactory(ioManager, diskFileMapProvider, invListTypeTraits, invListCmpFactories, diskBufferCache, pageManagerFactory);
int[] bloomFilterKeyFields = new int[invListCmpFactories.length];
for (int i = 0; i < invListCmpFactories.length; i++) {
bloomFilterKeyFields[i] = i;
}
BloomFilterFactory bloomFilterFactory = new BloomFilterFactory(diskBufferCache, diskFileMapProvider, bloomFilterKeyFields);
FileReference onDiskDirFileRef = ioManager.resolveAbsolutePath(absoluteOnDiskDir);
LSMInvertedIndexFileManager fileManager = new LSMInvertedIndexFileManager(ioManager, diskFileMapProvider, onDiskDirFileRef, deletedKeysBTreeFactory);
IInvertedListBuilderFactory invListBuilderFactory = new FixedSizeElementInvertedListBuilderFactory(invListTypeTraits);
PartitionedOnDiskInvertedIndexFactory invIndexFactory = new PartitionedOnDiskInvertedIndexFactory(ioManager, diskBufferCache, diskFileMapProvider, invListBuilderFactory, invListTypeTraits, invListCmpFactories, tokenTypeTraits, tokenCmpFactories, fileManager, pageManagerFactory);
ComponentFilterHelper filterHelper = null;
LSMComponentFilterFrameFactory filterFrameFactory = null;
LSMComponentFilterManager filterManager = null;
if (filterCmpFactories != null) {
TypeAwareTupleWriterFactory filterTupleWriterFactory = new TypeAwareTupleWriterFactory(filterTypeTraits);
filterHelper = new ComponentFilterHelper(filterTupleWriterFactory, filterCmpFactories);
filterFrameFactory = new LSMComponentFilterFrameFactory(filterTupleWriterFactory);
filterManager = new LSMComponentFilterManager(filterFrameFactory);
}
return new PartitionedLSMInvertedIndex(ioManager, virtualBufferCaches, invIndexFactory, deletedKeysBTreeFactory, bloomFilterFactory, filterHelper, filterFrameFactory, filterManager, bloomFilterFalsePositiveRate, fileManager, diskFileMapProvider, invListTypeTraits, invListCmpFactories, tokenTypeTraits, tokenCmpFactories, tokenizerFactory, mergePolicy, opTracker, ioScheduler, ioOpCallback, invertedIndexFields, filterFields, filterFieldsForNonBulkLoadOps, invertedIndexFieldsForNonBulkLoadOps, durable);
}
use of org.apache.hyracks.storage.am.common.tuples.TypeAwareTupleWriterFactory in project asterixdb by apache.
the class BTreeStatsTest method test01.
@Test
public void test01() throws Exception {
TestStorageManagerComponentHolder.init(PAGE_SIZE, NUM_PAGES, MAX_OPEN_FILES);
IBufferCache bufferCache = harness.getBufferCache();
IFileMapProvider fmp = harness.getFileMapProvider();
// declare fields
int fieldCount = 2;
ITypeTraits[] typeTraits = new ITypeTraits[fieldCount];
typeTraits[0] = IntegerPointable.TYPE_TRAITS;
typeTraits[1] = IntegerPointable.TYPE_TRAITS;
// declare keys
int keyFieldCount = 1;
IBinaryComparatorFactory[] cmpFactories = new IBinaryComparatorFactory[keyFieldCount];
cmpFactories[0] = PointableBinaryComparatorFactory.of(IntegerPointable.FACTORY);
TypeAwareTupleWriterFactory tupleWriterFactory = new TypeAwareTupleWriterFactory(typeTraits);
ITreeIndexFrameFactory leafFrameFactory = new BTreeNSMLeafFrameFactory(tupleWriterFactory);
ITreeIndexFrameFactory interiorFrameFactory = new BTreeNSMInteriorFrameFactory(tupleWriterFactory);
ITreeIndexMetadataFrameFactory metaFrameFactory = new LIFOMetaDataFrameFactory();
IBTreeLeafFrame leafFrame = (IBTreeLeafFrame) leafFrameFactory.createFrame();
IBTreeInteriorFrame interiorFrame = (IBTreeInteriorFrame) interiorFrameFactory.createFrame();
ITreeIndexMetadataFrame metaFrame = metaFrameFactory.createFrame();
IMetadataPageManager freePageManager = new LinkedMetaDataPageManager(bufferCache, metaFrameFactory);
BTree btree = new BTree(bufferCache, fmp, freePageManager, interiorFrameFactory, leafFrameFactory, cmpFactories, fieldCount, harness.getFileReference());
btree.create();
btree.activate();
Random rnd = new Random();
rnd.setSeed(50);
long start = System.currentTimeMillis();
if (LOGGER.isLoggable(Level.INFO)) {
LOGGER.info("INSERTING INTO TREE");
}
IFrame frame = new VSizeFrame(ctx);
FrameTupleAppender appender = new FrameTupleAppender();
ArrayTupleBuilder tb = new ArrayTupleBuilder(fieldCount);
DataOutput dos = tb.getDataOutput();
ISerializerDeserializer[] recDescSers = { IntegerSerializerDeserializer.INSTANCE, IntegerSerializerDeserializer.INSTANCE };
RecordDescriptor recDesc = new RecordDescriptor(recDescSers);
IFrameTupleAccessor accessor = new FrameTupleAccessor(recDesc);
accessor.reset(frame.getBuffer());
FrameTupleReference tuple = new FrameTupleReference();
ITreeIndexAccessor indexAccessor = btree.createAccessor(TestOperationCallback.INSTANCE, TestOperationCallback.INSTANCE);
// 10000
for (int i = 0; i < 100000; i++) {
int f0 = rnd.nextInt() % 100000;
int f1 = 5;
tb.reset();
IntegerSerializerDeserializer.INSTANCE.serialize(f0, dos);
tb.addFieldEndOffset();
IntegerSerializerDeserializer.INSTANCE.serialize(f1, dos);
tb.addFieldEndOffset();
appender.reset(frame, true);
appender.append(tb.getFieldEndOffsets(), tb.getByteArray(), 0, tb.getSize());
tuple.reset(accessor, 0);
if (LOGGER.isLoggable(Level.INFO)) {
if (i % 10000 == 0) {
long end = System.currentTimeMillis();
LOGGER.info("INSERTING " + i + " : " + f0 + " " + f1 + " " + (end - start));
}
}
try {
indexAccessor.insert(tuple);
} catch (HyracksDataException e) {
if (e.getErrorCode() != ErrorCode.DUPLICATE_KEY) {
e.printStackTrace();
throw e;
}
}
}
int fileId = fmp.lookupFileId(harness.getFileReference());
TreeIndexStatsGatherer statsGatherer = new TreeIndexStatsGatherer(bufferCache, freePageManager, fileId, btree.getRootPageId());
TreeIndexStats stats = statsGatherer.gatherStats(leafFrame, interiorFrame, metaFrame);
if (LOGGER.isLoggable(Level.INFO)) {
LOGGER.info("\n" + stats.toString());
}
TreeIndexBufferCacheWarmup bufferCacheWarmup = new TreeIndexBufferCacheWarmup(bufferCache, freePageManager, fileId);
bufferCacheWarmup.warmup(leafFrame, metaFrame, new int[] { 1, 2 }, new int[] { 2, 5 });
btree.deactivate();
btree.destroy();
bufferCache.close();
}
use of org.apache.hyracks.storage.am.common.tuples.TypeAwareTupleWriterFactory in project asterixdb by apache.
the class BTreeUpdateSearchTest method test01.
// Update scan test on fixed-length tuples.
@Test
public void test01() throws Exception {
IBufferCache bufferCache = harness.getBufferCache();
// declare fields
int fieldCount = 2;
ITypeTraits[] typeTraits = new ITypeTraits[fieldCount];
typeTraits[0] = IntegerPointable.TYPE_TRAITS;
typeTraits[1] = IntegerPointable.TYPE_TRAITS;
// declare keys
int keyFieldCount = 1;
IBinaryComparatorFactory[] cmpFactories = new IBinaryComparatorFactory[keyFieldCount];
cmpFactories[0] = PointableBinaryComparatorFactory.of(IntegerPointable.FACTORY);
@SuppressWarnings("rawtypes") ISerializerDeserializer[] recDescSers = { IntegerSerializerDeserializer.INSTANCE, IntegerSerializerDeserializer.INSTANCE };
TypeAwareTupleWriterFactory tupleWriterFactory = new TypeAwareTupleWriterFactory(typeTraits);
ITreeIndexFrameFactory leafFrameFactory = new BTreeNSMLeafFrameFactory(tupleWriterFactory);
ITreeIndexFrameFactory interiorFrameFactory = new BTreeNSMInteriorFrameFactory(tupleWriterFactory);
ITreeIndexMetadataFrameFactory metaFrameFactory = new LIFOMetaDataFrameFactory();
IBTreeLeafFrame leafFrame = (IBTreeLeafFrame) leafFrameFactory.createFrame();
IMetadataPageManager freePageManager = new LinkedMetaDataPageManager(bufferCache, metaFrameFactory);
BTree btree = new BTree(bufferCache, harness.getFileMapProvider(), freePageManager, interiorFrameFactory, leafFrameFactory, cmpFactories, fieldCount, harness.getFileReference());
btree.create();
btree.activate();
Random rnd = new Random();
rnd.setSeed(50);
long start = System.currentTimeMillis();
if (LOGGER.isLoggable(Level.INFO)) {
LOGGER.info("INSERTING INTO TREE");
}
ArrayTupleBuilder tb = new ArrayTupleBuilder(fieldCount);
ArrayTupleReference insertTuple = new ArrayTupleReference();
ITreeIndexAccessor indexAccessor = btree.createAccessor(TestOperationCallback.INSTANCE, TestOperationCallback.INSTANCE);
int numInserts = 10000;
for (int i = 0; i < numInserts; i++) {
int f0 = rnd.nextInt() % 10000;
int f1 = 5;
TupleUtils.createIntegerTuple(tb, insertTuple, f0, f1);
if (LOGGER.isLoggable(Level.INFO)) {
if (i % 10000 == 0) {
long end = System.currentTimeMillis();
LOGGER.info("INSERTING " + i + " : " + f0 + " " + f1 + " " + (end - start));
}
}
try {
indexAccessor.insert(insertTuple);
} catch (HyracksDataException hde) {
if (hde.getErrorCode() != ErrorCode.DUPLICATE_KEY) {
hde.printStackTrace();
throw hde;
}
}
}
long end = System.currentTimeMillis();
long duration = end - start;
if (LOGGER.isLoggable(Level.INFO)) {
LOGGER.info("DURATION: " + duration);
}
// Update scan.
if (LOGGER.isLoggable(Level.INFO)) {
LOGGER.info("UPDATE SCAN:");
}
// Set the cursor to X latch nodes.
ITreeIndexCursor updateScanCursor = new BTreeRangeSearchCursor(leafFrame, true);
RangePredicate nullPred = new RangePredicate(null, null, true, true, null, null);
indexAccessor.search(updateScanCursor, nullPred);
try {
while (updateScanCursor.hasNext()) {
updateScanCursor.next();
ITupleReference tuple = updateScanCursor.getTuple();
// Change the value field.
IntegerPointable.setInteger(tuple.getFieldData(1), tuple.getFieldStart(1), 10);
}
} catch (Exception e) {
e.printStackTrace();
} finally {
updateScanCursor.close();
}
// Ordered scan to verify the values.
if (LOGGER.isLoggable(Level.INFO)) {
LOGGER.info("ORDERED SCAN:");
}
// Set the cursor to X latch nodes.
ITreeIndexCursor scanCursor = new BTreeRangeSearchCursor(leafFrame, true);
indexAccessor.search(scanCursor, nullPred);
try {
while (scanCursor.hasNext()) {
scanCursor.next();
ITupleReference tuple = scanCursor.getTuple();
String rec = TupleUtils.printTuple(tuple, recDescSers);
if (LOGGER.isLoggable(Level.INFO)) {
LOGGER.info(rec);
}
}
} catch (Exception e) {
e.printStackTrace();
} finally {
scanCursor.close();
}
btree.deactivate();
btree.destroy();
}
Aggregations