use of org.apache.hyracks.storage.common.IIndex in project asterixdb by apache.
the class AbstractInvertedIndexDeleteTest method runTest.
protected void runTest(LSMInvertedIndexTestContext testCtx, TupleGenerator tupleGen) throws IOException {
IIndex invIndex = testCtx.getIndex();
if ((invIndexType != InvertedIndexType.LSM) && (invIndexType != InvertedIndexType.PARTITIONED_LSM) || !bulkLoad) {
invIndex.create();
invIndex.activate();
}
for (int i = 0; i < numInsertRounds; i++) {
// Start generating documents ids from 0 again.
tupleGen.reset();
if (bulkLoad) {
if ((invIndexType != InvertedIndexType.LSM) && (invIndexType != InvertedIndexType.PARTITIONED_LSM)) {
LSMInvertedIndexTestUtils.bulkLoadInvIndex(testCtx, tupleGen, NUM_DOCS_TO_INSERT, false);
} else {
LSMInvertedIndexTestUtils.bulkLoadInvIndex(testCtx, tupleGen, NUM_DOCS_TO_INSERT, true);
}
} else {
LSMInvertedIndexTestUtils.insertIntoInvIndex(testCtx, tupleGen, NUM_DOCS_TO_INSERT);
}
// Delete all documents in a couple of rounds.
int numTuplesPerDeleteRound = (int) Math.ceil((float) testCtx.getDocumentCorpus().size() / (float) numDeleteRounds);
for (int j = 0; j < numDeleteRounds; j++) {
LSMInvertedIndexTestUtils.deleteFromInvIndex(testCtx, harness.getRandom(), numTuplesPerDeleteRound);
validateAndCheckIndex(testCtx);
runTinySearchWorkload(testCtx, tupleGen);
}
}
invIndex.deactivate();
invIndex.destroy();
}
use of org.apache.hyracks.storage.common.IIndex in project asterixdb by apache.
the class AbstractInvertedIndexLoadTest method runTest.
protected void runTest(LSMInvertedIndexTestContext testCtx, TupleGenerator tupleGen) throws IOException {
IIndex invIndex = testCtx.getIndex();
invIndex.create();
invIndex.activate();
if (bulkLoad) {
LSMInvertedIndexTestUtils.bulkLoadInvIndex(testCtx, tupleGen, NUM_DOCS_TO_INSERT, true);
} else {
LSMInvertedIndexTestUtils.insertIntoInvIndex(testCtx, tupleGen, NUM_DOCS_TO_INSERT);
}
validateAndCheckIndex(testCtx);
runTinySearchWorkload(testCtx, tupleGen);
invIndex.deactivate();
invIndex.destroy();
}
use of org.apache.hyracks.storage.common.IIndex in project asterixdb by apache.
the class RecoveryManager method getLocalMinFirstLSN.
@Override
public long getLocalMinFirstLSN() throws HyracksDataException {
IDatasetLifecycleManager datasetLifecycleManager = txnSubsystem.getAsterixAppRuntimeContextProvider().getDatasetLifecycleManager();
List<IIndex> openIndexList = datasetLifecycleManager.getOpenResources();
long firstLSN;
//the min first lsn can only be the current append or smaller
long minFirstLSN = logMgr.getAppendLSN();
if (!openIndexList.isEmpty()) {
for (IIndex index : openIndexList) {
AbstractLSMIOOperationCallback ioCallback = (AbstractLSMIOOperationCallback) ((ILSMIndex) index).getIOOperationCallback();
if (!((AbstractLSMIndex) index).isCurrentMutableComponentEmpty() || ioCallback.hasPendingFlush()) {
firstLSN = ioCallback.getFirstLSN();
minFirstLSN = Math.min(minFirstLSN, firstLSN);
}
}
}
return minFirstLSN;
}
use of org.apache.hyracks.storage.common.IIndex in project asterixdb by apache.
the class IndexBuilder method build.
@Override
public void build() throws HyracksDataException {
IResourceLifecycleManager<IIndex> lcManager = storageManager.getLifecycleManager(ctx);
synchronized (lcManager) {
// The previous resource Id needs to be removed since calling IIndex.create() may possibly destroy any
// physical artifact that the LocalResourceRepository is managing (e.g. a file containing the resource Id).
// Once the index has been created, a new resource Id can be generated.
ILocalResourceRepository localResourceRepository = storageManager.getLocalResourceRepository(ctx);
LocalResource lr = localResourceRepository.get(resourceRef.getRelativePath());
long resourceId = lr == null ? -1 : lr.getId();
if (resourceId != -1) {
localResourceRepository.delete(resourceRef.getRelativePath());
}
resourceId = resourceIdFactory.createId();
IResource resource = localResourceFactory.createResource(resourceRef);
lr = new LocalResource(resourceId, ITreeIndexFrame.Constants.VERSION, durable, resource);
IIndex index = lcManager.get(resourceRef.getRelativePath());
if (index != null) {
//how is this right?????????? <needs to be fixed>
//The reason for this is to handle many cases such as:
//1. Crash while delete index is running (we don't do global cleanup on restart)
//2. Node leaves and then join with old data
lcManager.unregister(resourceRef.getRelativePath());
} else {
index = resource.createInstance(ctx);
}
index.create();
try {
localResourceRepository.insert(lr);
} catch (IOException e) {
throw HyracksDataException.create(e);
}
lcManager.register(resourceRef.getRelativePath(), index);
}
}
use of org.apache.hyracks.storage.common.IIndex in project asterixdb by apache.
the class MetadataNode method initializeDatasetIdFactory.
@Override
public void initializeDatasetIdFactory(JobId jobId) throws MetadataException, RemoteException {
int mostRecentDatasetId = MetadataIndexImmutableProperties.FIRST_AVAILABLE_USER_DATASET_ID;
try {
String resourceName = MetadataPrimaryIndexes.DATASET_DATASET.getFile().getRelativePath();
IIndex indexInstance = datasetLifecycleManager.get(resourceName);
datasetLifecycleManager.open(resourceName);
try {
IIndexAccessor indexAccessor = indexInstance.createAccessor(NoOpOperationCallback.INSTANCE, NoOpOperationCallback.INSTANCE);
IIndexCursor rangeCursor = indexAccessor.createSearchCursor(false);
DatasetTupleTranslator tupleReaderWriter = tupleTranslatorProvider.getDatasetTupleTranslator(false);
IValueExtractor<Dataset> valueExtractor = new MetadataEntityValueExtractor<>(tupleReaderWriter);
RangePredicate rangePred = new RangePredicate(null, null, true, true, null, null);
indexAccessor.search(rangeCursor, rangePred);
int datasetId;
try {
while (rangeCursor.hasNext()) {
rangeCursor.next();
final ITupleReference ref = rangeCursor.getTuple();
final Dataset ds = valueExtractor.getValue(jobId, ref);
datasetId = ds.getDatasetId();
if (mostRecentDatasetId < datasetId) {
mostRecentDatasetId = datasetId;
}
}
} finally {
rangeCursor.close();
}
} finally {
datasetLifecycleManager.close(resourceName);
}
} catch (HyracksDataException e) {
throw new MetadataException(e);
}
DatasetIdFactory.initialize(mostRecentDatasetId);
}
Aggregations