use of org.apache.hyracks.storage.common.LocalResource in project asterixdb by apache.
the class MetadataBootstrap method enlistMetadataDataset.
/**
* Enlist a metadata index so it is available for metadata operations
* should be performed upon bootstrapping
*
* @param index
* @throws HyracksDataException
*/
public static void enlistMetadataDataset(INCServiceContext ncServiceCtx, IMetadataIndex index) throws HyracksDataException {
ClusterPartition metadataPartition = appContext.getMetadataProperties().getMetadataPartition();
int metadataDeviceId = metadataPartition.getIODeviceNum();
String metadataPartitionPath = StoragePathUtil.prepareStoragePartitionPath(ClusterProperties.INSTANCE.getStorageDirectoryName(), metadataPartition.getPartitionId());
String resourceName = metadataPartitionPath + File.separator + index.getFileNameRelativePath();
FileReference file = ioManager.getFileReference(metadataDeviceId, resourceName);
index.setFile(file);
ITypeTraits[] typeTraits = index.getTypeTraits();
IBinaryComparatorFactory[] cmpFactories = index.getKeyBinaryComparatorFactory();
int[] bloomFilterKeyFields = index.getBloomFilterKeyFields();
// opTrackerProvider and ioOpCallbackFactory should both be acquired through IStorageManager
// We are unable to do this since IStorageManager needs a dataset to determine the appropriate
// objects
ILSMOperationTrackerFactory opTrackerFactory = index.isPrimaryIndex() ? new PrimaryIndexOperationTrackerFactory(index.getDatasetId().getId()) : new SecondaryIndexOperationTrackerFactory(index.getDatasetId().getId());
ILSMIOOperationCallbackFactory ioOpCallbackFactory = LSMBTreeIOOperationCallbackFactory.INSTANCE;
IStorageComponentProvider storageComponentProvider = appContext.getStorageComponentProvider();
if (isNewUniverse()) {
LSMBTreeLocalResourceFactory lsmBtreeFactory = new LSMBTreeLocalResourceFactory(storageComponentProvider.getStorageManager(), typeTraits, cmpFactories, null, null, null, opTrackerFactory, ioOpCallbackFactory, storageComponentProvider.getMetadataPageManagerFactory(), new AsterixVirtualBufferCacheProvider(index.getDatasetId().getId()), storageComponentProvider.getIoOperationSchedulerProvider(), appContext.getMetadataMergePolicyFactory(), GlobalConfig.DEFAULT_COMPACTION_POLICY_PROPERTIES, true, bloomFilterKeyFields, appContext.getBloomFilterFalsePositiveRate(), true, null);
DatasetLocalResourceFactory dsLocalResourceFactory = new DatasetLocalResourceFactory(index.getDatasetId().getId(), lsmBtreeFactory);
// TODO(amoudi) Creating the index should be done through the same code path as other indexes
// This is to be done by having a metadata dataset associated with each index
IIndexBuilder indexBuilder = new IndexBuilder(ncServiceCtx, storageComponentProvider.getStorageManager(), index::getResourceId, file, dsLocalResourceFactory, true);
indexBuilder.build();
} else {
final LocalResource resource = localResourceRepository.get(file.getRelativePath());
if (resource == null) {
throw new HyracksDataException("Could not find required metadata indexes. Please delete " + appContext.getMetadataProperties().getTransactionLogDirs().get(appContext.getTransactionSubsystem().getId()) + " to intialize as a new instance. (WARNING: all data will be lost.)");
}
// Why do we care about metadata dataset's resource ids? why not assign them ids similar to other resources?
if (index.getResourceId() != resource.getId()) {
throw new HyracksDataException("Resource Id doesn't match expected metadata index resource id");
}
IndexDataflowHelper indexHelper = new IndexDataflowHelper(ncServiceCtx, storageComponentProvider.getStorageManager(), file);
// Opening the index through the helper will ensure it gets instantiated
indexHelper.open();
indexHelper.close();
}
}
use of org.apache.hyracks.storage.common.LocalResource in project asterixdb by apache.
the class RecoveryManager method startRecoveryRedoPhase.
private synchronized void startRecoveryRedoPhase(Set<Integer> partitions, ILogReader logReader, long lowWaterMarkLSN, Set<Integer> winnerJobSet) throws IOException, ACIDException {
int redoCount = 0;
int jobId = -1;
long resourceId;
long maxDiskLastLsn;
long lsn = -1;
ILSMIndex index = null;
LocalResource localResource = null;
DatasetLocalResource localResourceMetadata = null;
boolean foundWinner = false;
JobEntityCommits jobEntityWinners = null;
IAppRuntimeContextProvider appRuntimeContext = txnSubsystem.getAsterixAppRuntimeContextProvider();
IDatasetLifecycleManager datasetLifecycleManager = appRuntimeContext.getDatasetLifecycleManager();
Map<Long, LocalResource> resourcesMap = localResourceRepository.loadAndGetAllResources();
Map<Long, Long> resourceId2MaxLSNMap = new HashMap<>();
TxnId tempKeyTxnId = new TxnId(-1, -1, -1, null, -1, false);
ILogRecord logRecord = null;
try {
logReader.initializeScan(lowWaterMarkLSN);
logRecord = logReader.next();
while (logRecord != null) {
if (IS_DEBUG_MODE) {
LOGGER.info(logRecord.getLogRecordForDisplay());
}
lsn = logRecord.getLSN();
jobId = logRecord.getJobId();
foundWinner = false;
switch(logRecord.getLogType()) {
case LogType.UPDATE:
if (partitions.contains(logRecord.getResourcePartition())) {
if (winnerJobSet.contains(jobId)) {
foundWinner = true;
} else if (jobId2WinnerEntitiesMap.containsKey(jobId)) {
jobEntityWinners = jobId2WinnerEntitiesMap.get(jobId);
tempKeyTxnId.setTxnId(jobId, logRecord.getDatasetId(), logRecord.getPKHashValue(), logRecord.getPKValue(), logRecord.getPKValueSize());
if (jobEntityWinners.containsEntityCommitForTxnId(lsn, tempKeyTxnId)) {
foundWinner = true;
}
}
if (foundWinner) {
resourceId = logRecord.getResourceId();
localResource = resourcesMap.get(resourceId);
/*******************************************************************
* [Notice]
* -> Issue
* Delete index may cause a problem during redo.
* The index operation to be redone couldn't be redone because the corresponding index
* may not exist in NC due to the possible index drop DDL operation.
* -> Approach
* Avoid the problem during redo.
* More specifically, the problem will be detected when the localResource of
* the corresponding index is retrieved, which will end up with 'null'.
* If null is returned, then just go and process the next
* log record.
*******************************************************************/
if (localResource == null) {
LOGGER.log(Level.WARNING, "resource was not found for resource id " + resourceId);
logRecord = logReader.next();
continue;
}
/*******************************************************************/
//get index instance from IndexLifeCycleManager
//if index is not registered into IndexLifeCycleManager,
//create the index using LocalMetadata stored in LocalResourceRepository
//get partition path in this node
localResourceMetadata = (DatasetLocalResource) localResource.getResource();
index = (ILSMIndex) datasetLifecycleManager.get(localResource.getPath());
if (index == null) {
//#. create index instance and register to indexLifeCycleManager
index = (ILSMIndex) localResourceMetadata.createInstance(serviceCtx);
datasetLifecycleManager.register(localResource.getPath(), index);
datasetLifecycleManager.open(localResource.getPath());
//#. get maxDiskLastLSN
ILSMIndex lsmIndex = index;
try {
maxDiskLastLsn = ((AbstractLSMIOOperationCallback) lsmIndex.getIOOperationCallback()).getComponentLSN(lsmIndex.getImmutableComponents());
} catch (HyracksDataException e) {
datasetLifecycleManager.close(localResource.getPath());
throw e;
}
//#. set resourceId and maxDiskLastLSN to the map
resourceId2MaxLSNMap.put(resourceId, maxDiskLastLsn);
} else {
maxDiskLastLsn = resourceId2MaxLSNMap.get(resourceId);
}
if (lsn > maxDiskLastLsn) {
redo(logRecord, datasetLifecycleManager);
redoCount++;
}
}
}
break;
case LogType.JOB_COMMIT:
case LogType.ENTITY_COMMIT:
case LogType.ABORT:
case LogType.FLUSH:
case LogType.WAIT:
case LogType.MARKER:
//do nothing
break;
default:
throw new ACIDException("Unsupported LogType: " + logRecord.getLogType());
}
logRecord = logReader.next();
}
LOGGER.info("Logs REDO phase completed. Redo logs count: " + redoCount);
} finally {
//close all indexes
Set<Long> resourceIdList = resourceId2MaxLSNMap.keySet();
for (long r : resourceIdList) {
datasetLifecycleManager.close(resourcesMap.get(r).getPath());
}
}
}
use of org.apache.hyracks.storage.common.LocalResource in project asterixdb by apache.
the class TreeIndexDiskOrderScanOperatorNodePushable method initialize.
@Override
public void initialize() throws HyracksDataException {
treeIndexHelper.open();
ITreeIndex treeIndex = (ITreeIndex) treeIndexHelper.getIndexInstance();
try {
ITreeIndexFrame cursorFrame = treeIndex.getLeafFrameFactory().createFrame();
ITreeIndexCursor cursor = new TreeIndexDiskOrderScanCursor(cursorFrame);
LocalResource resource = treeIndexHelper.getResource();
ISearchOperationCallback searchCallback = searchCallbackFactory.createSearchOperationCallback(resource.getId(), ctx, null);
ITreeIndexAccessor indexAccessor = (ITreeIndexAccessor) treeIndex.createAccessor(NoOpOperationCallback.INSTANCE, searchCallback);
try {
writer.open();
indexAccessor.diskOrderScan(cursor);
int fieldCount = treeIndex.getFieldCount();
FrameTupleAppender appender = new FrameTupleAppender(new VSizeFrame(ctx));
ArrayTupleBuilder tb = new ArrayTupleBuilder(fieldCount);
DataOutput dos = tb.getDataOutput();
while (cursor.hasNext()) {
tb.reset();
cursor.next();
ITupleReference frameTuple = cursor.getTuple();
for (int i = 0; i < frameTuple.getFieldCount(); i++) {
dos.write(frameTuple.getFieldData(i), frameTuple.getFieldStart(i), frameTuple.getFieldLength(i));
tb.addFieldEndOffset();
}
FrameUtils.appendToWriter(writer, appender, tb.getFieldEndOffsets(), tb.getByteArray(), 0, tb.getSize());
}
appender.write(writer, true);
} catch (Throwable th) {
writer.fail();
throw new HyracksDataException(th);
} finally {
try {
cursor.close();
} catch (Exception cursorCloseException) {
throw new IllegalStateException(cursorCloseException);
} finally {
writer.close();
}
}
} catch (Throwable th) {
treeIndexHelper.close();
throw new HyracksDataException(th);
}
}
use of org.apache.hyracks.storage.common.LocalResource in project asterixdb by apache.
the class TreeIndexStatsOperatorNodePushable method initialize.
@Override
public void initialize() throws HyracksDataException {
treeIndexHelper.open();
ITreeIndex treeIndex = (ITreeIndex) treeIndexHelper.getIndexInstance();
try {
writer.open();
IBufferCache bufferCache = storageManager.getBufferCache(ctx.getJobletContext().getServiceContext());
IFileMapProvider fileMapProvider = storageManager.getFileMapProvider(ctx.getJobletContext().getServiceContext());
LocalResource resource = treeIndexHelper.getResource();
IIOManager ioManager = ctx.getIoManager();
FileReference fileRef = ioManager.resolve(resource.getPath());
int indexFileId = fileMapProvider.lookupFileId(fileRef);
TreeIndexStatsGatherer statsGatherer = new TreeIndexStatsGatherer(bufferCache, treeIndex.getPageManager(), indexFileId, treeIndex.getRootPageId());
TreeIndexStats stats = statsGatherer.gatherStats(treeIndex.getLeafFrameFactory().createFrame(), treeIndex.getInteriorFrameFactory().createFrame(), treeIndex.getPageManager().createMetadataFrame());
// Write the stats output as a single string field.
FrameTupleAppender appender = new FrameTupleAppender(new VSizeFrame(ctx));
ArrayTupleBuilder tb = new ArrayTupleBuilder(1);
DataOutput dos = tb.getDataOutput();
tb.reset();
utf8SerDer.serialize(stats.toString(), dos);
tb.addFieldEndOffset();
if (!appender.append(tb.getFieldEndOffsets(), tb.getByteArray(), 0, tb.getSize())) {
throw new HyracksDataException("Record size (" + tb.getSize() + ") larger than frame size (" + appender.getBuffer().capacity() + ")");
}
appender.write(writer, false);
} catch (Exception e) {
writer.fail();
throw new HyracksDataException(e);
} finally {
try {
writer.close();
} finally {
treeIndexHelper.close();
}
}
}
use of org.apache.hyracks.storage.common.LocalResource in project asterixdb by apache.
the class IndexInsertUpdateDeleteOperatorNodePushable method open.
@Override
public void open() throws HyracksDataException {
accessor = new FrameTupleAccessor(inputRecDesc);
writeBuffer = new VSizeFrame(ctx);
indexHelper.open();
index = indexHelper.getIndexInstance();
try {
writer.open();
LocalResource resource = indexHelper.getResource();
modCallback = modOpCallbackFactory.createModificationOperationCallback(resource, ctx, this);
indexAccessor = index.createAccessor(modCallback, NoOpOperationCallback.INSTANCE);
if (tupleFilterFactory != null) {
tupleFilter = tupleFilterFactory.createTupleFilter(ctx);
frameTuple = new FrameTupleReference();
}
} catch (Exception e) {
throw new HyracksDataException(e);
}
}
Aggregations