use of org.apache.hyracks.dataflow.common.data.accessors.ITupleReference in project asterixdb by apache.
the class BTreeUpdateSearchOperatorNodePushable method writeSearchResults.
@Override
protected void writeSearchResults(int tupleIndex) throws Exception {
while (cursor.hasNext()) {
tb.reset();
cursor.next();
if (retainInput) {
frameTuple.reset(accessor, tupleIndex);
for (int i = 0; i < frameTuple.getFieldCount(); i++) {
dos.write(frameTuple.getFieldData(i), frameTuple.getFieldStart(i), frameTuple.getFieldLength(i));
tb.addFieldEndOffset();
}
}
ITupleReference tuple = cursor.getTuple();
tupleUpdater.updateTuple(tuple);
for (int i = 0; i < tuple.getFieldCount(); i++) {
dos.write(tuple.getFieldData(i), tuple.getFieldStart(i), tuple.getFieldLength(i));
tb.addFieldEndOffset();
}
FrameUtils.appendToWriter(writer, appender, tb.getFieldEndOffsets(), tb.getByteArray(), 0, tb.getSize());
}
}
use of org.apache.hyracks.dataflow.common.data.accessors.ITupleReference in project asterixdb by apache.
the class LSMInvertedIndexTestWorker method insert.
private void insert(LSMInvertedIndexAccessor accessor, ITupleReference tuple) throws HyracksDataException {
// Ignore ongoing merges. Do an insert instead.
accessor.insert(tuple);
// Add tuple to document corpus so we can delete it.
ITupleReference copyTuple = TupleUtils.copyTuple(tuple);
documentCorpus.add(copyTuple);
}
use of org.apache.hyracks.dataflow.common.data.accessors.ITupleReference in project asterixdb by apache.
the class LSMInvertedIndexTestUtils method bulkLoadInvIndex.
public static void bulkLoadInvIndex(LSMInvertedIndexTestContext testCtx, TupleGenerator tupleGen, int numDocs, boolean appendOnly) throws HyracksDataException, IOException {
SortedSet<CheckTuple> tmpMemIndex = new TreeSet<>();
// First generate the expected index by inserting the documents one-by-one.
for (int i = 0; i < numDocs; i++) {
ITupleReference tuple = tupleGen.next();
testCtx.insertCheckTuples(tuple, tmpMemIndex);
}
ISerializerDeserializer[] fieldSerdes = testCtx.getFieldSerdes();
// Use the expected index to bulk-load the actual index.
IIndexBulkLoader bulkLoader = testCtx.getIndex().createBulkLoader(1.0f, false, numDocs, true);
ArrayTupleBuilder tupleBuilder = new ArrayTupleBuilder(testCtx.getFieldSerdes().length);
ArrayTupleReference tuple = new ArrayTupleReference();
Iterator<CheckTuple> checkTupleIter = tmpMemIndex.iterator();
while (checkTupleIter.hasNext()) {
CheckTuple checkTuple = checkTupleIter.next();
OrderedIndexTestUtils.createTupleFromCheckTuple(checkTuple, tupleBuilder, tuple, fieldSerdes);
bulkLoader.add(tuple);
}
bulkLoader.end();
// Add all check tuples from the temp index to the text context.
testCtx.getCheckTuples().addAll(tmpMemIndex);
}
use of org.apache.hyracks.dataflow.common.data.accessors.ITupleReference in project asterixdb by apache.
the class TreeIndexDiskOrderScanOperatorNodePushable method initialize.
@Override
public void initialize() throws HyracksDataException {
treeIndexHelper.open();
ITreeIndex treeIndex = (ITreeIndex) treeIndexHelper.getIndexInstance();
try {
ITreeIndexFrame cursorFrame = treeIndex.getLeafFrameFactory().createFrame();
ITreeIndexCursor cursor = new TreeIndexDiskOrderScanCursor(cursorFrame);
LocalResource resource = treeIndexHelper.getResource();
ISearchOperationCallback searchCallback = searchCallbackFactory.createSearchOperationCallback(resource.getId(), ctx, null);
ITreeIndexAccessor indexAccessor = (ITreeIndexAccessor) treeIndex.createAccessor(NoOpOperationCallback.INSTANCE, searchCallback);
try {
writer.open();
indexAccessor.diskOrderScan(cursor);
int fieldCount = treeIndex.getFieldCount();
FrameTupleAppender appender = new FrameTupleAppender(new VSizeFrame(ctx));
ArrayTupleBuilder tb = new ArrayTupleBuilder(fieldCount);
DataOutput dos = tb.getDataOutput();
while (cursor.hasNext()) {
tb.reset();
cursor.next();
ITupleReference frameTuple = cursor.getTuple();
for (int i = 0; i < frameTuple.getFieldCount(); i++) {
dos.write(frameTuple.getFieldData(i), frameTuple.getFieldStart(i), frameTuple.getFieldLength(i));
tb.addFieldEndOffset();
}
FrameUtils.appendToWriter(writer, appender, tb.getFieldEndOffsets(), tb.getByteArray(), 0, tb.getSize());
}
appender.write(writer, true);
} catch (Throwable th) {
writer.fail();
throw new HyracksDataException(th);
} finally {
try {
cursor.close();
} catch (Exception cursorCloseException) {
throw new IllegalStateException(cursorCloseException);
} finally {
writer.close();
}
}
} catch (Throwable th) {
treeIndexHelper.close();
throw new HyracksDataException(th);
}
}
use of org.apache.hyracks.dataflow.common.data.accessors.ITupleReference in project asterixdb by apache.
the class LSMBTree method flush.
@Override
public ILSMDiskComponent flush(ILSMIOOperation operation) throws HyracksDataException {
LSMBTreeFlushOperation flushOp = (LSMBTreeFlushOperation) operation;
LSMBTreeMemoryComponent flushingComponent = (LSMBTreeMemoryComponent) flushOp.getFlushingComponent();
IIndexAccessor accessor = flushingComponent.getBTree().createAccessor(NoOpOperationCallback.INSTANCE, NoOpOperationCallback.INSTANCE);
RangePredicate nullPred = new RangePredicate(null, null, true, true, null, null);
long numElements = 0L;
if (hasBloomFilter) {
//count elements in btree for creating Bloomfilter
IIndexCursor countingCursor = ((BTreeAccessor) accessor).createCountingSearchCursor();
accessor.search(countingCursor, nullPred);
try {
while (countingCursor.hasNext()) {
countingCursor.next();
ITupleReference countTuple = countingCursor.getTuple();
numElements = IntegerPointable.getInteger(countTuple.getFieldData(0), countTuple.getFieldStart(0));
}
} finally {
countingCursor.close();
}
}
LSMBTreeDiskComponent component = createDiskComponent(componentFactory, flushOp.getTarget(), flushOp.getBloomFilterTarget(), true);
ILSMDiskComponentBulkLoader componentBulkLoader = createComponentBulkLoader(component, 1.0f, false, numElements, false, false);
IIndexCursor scanCursor = accessor.createSearchCursor(false);
accessor.search(scanCursor, nullPred);
try {
while (scanCursor.hasNext()) {
scanCursor.next();
componentBulkLoader.add(scanCursor.getTuple());
}
} finally {
scanCursor.close();
}
if (component.getLSMComponentFilter() != null) {
List<ITupleReference> filterTuples = new ArrayList<>();
filterTuples.add(flushingComponent.getLSMComponentFilter().getMinTuple());
filterTuples.add(flushingComponent.getLSMComponentFilter().getMaxTuple());
getFilterManager().updateFilter(component.getLSMComponentFilter(), filterTuples);
getFilterManager().writeFilter(component.getLSMComponentFilter(), component.getBTree());
}
// Write metadata from memory component to disk
// Q. what about the merge operation? how do we resolve conflicts
// A. Through providing an appropriate ILSMIOOperationCallback
// Must not reset the metadata before the flush is completed
// Use the copy of the metadata in the opContext
// TODO This code should be in the callback and not in the index
flushingComponent.getMetadata().copy(component.getMetadata());
componentBulkLoader.end();
return component;
}
Aggregations