use of org.apache.asterix.common.context.IStorageComponentProvider in project asterixdb by apache.
the class ExecuteStatementRequestMessage method handle.
@Override
public void handle(ICcApplicationContext ccAppCtx) throws HyracksDataException, InterruptedException {
ICCServiceContext ccSrvContext = ccAppCtx.getServiceContext();
ClusterControllerService ccSrv = (ClusterControllerService) ccSrvContext.getControllerService();
CCApplication ccApp = (CCApplication) ccSrv.getApplication();
CCMessageBroker messageBroker = (CCMessageBroker) ccSrvContext.getMessageBroker();
CCExtensionManager ccExtMgr = (CCExtensionManager) ccAppCtx.getExtensionManager();
ILangCompilationProvider compilationProvider = ccExtMgr.getCompilationProvider(lang);
IStorageComponentProvider storageComponentProvider = ccAppCtx.getStorageComponentProvider();
IStatementExecutorFactory statementExecutorFactory = ccApp.getStatementExecutorFactory();
IStatementExecutorContext statementExecutorContext = ccApp.getStatementExecutorContext();
ccSrv.getExecutor().submit(() -> {
ExecuteStatementResponseMessage responseMsg = new ExecuteStatementResponseMessage(requestMessageId);
try {
final IClusterManagementWork.ClusterState clusterState = ClusterStateManager.INSTANCE.getState();
if (clusterState != IClusterManagementWork.ClusterState.ACTIVE) {
throw new IllegalStateException("Cannot execute request, cluster is " + clusterState);
}
IParser parser = compilationProvider.getParserFactory().createParser(statementsText);
List<Statement> statements = parser.parse();
StringWriter outWriter = new StringWriter(256);
PrintWriter outPrinter = new PrintWriter(outWriter);
SessionOutput.ResultDecorator resultPrefix = ResultUtil.createPreResultDecorator();
SessionOutput.ResultDecorator resultPostfix = ResultUtil.createPostResultDecorator();
SessionOutput.ResultAppender appendHandle = ResultUtil.createResultHandleAppender(handleUrl);
SessionOutput.ResultAppender appendStatus = ResultUtil.createResultStatusAppender();
SessionOutput sessionOutput = new SessionOutput(sessionConfig, outPrinter, resultPrefix, resultPostfix, appendHandle, appendStatus);
IStatementExecutor.ResultMetadata outMetadata = new IStatementExecutor.ResultMetadata();
MetadataManager.INSTANCE.init();
IStatementExecutor translator = statementExecutorFactory.create(ccAppCtx, statements, sessionOutput, compilationProvider, storageComponentProvider);
translator.compileAndExecute(ccAppCtx.getHcc(), null, delivery, outMetadata, new IStatementExecutor.Stats(), clientContextID, statementExecutorContext);
outPrinter.close();
responseMsg.setResult(outWriter.toString());
responseMsg.setMetadata(outMetadata);
} catch (AlgebricksException | HyracksException | TokenMgrError | org.apache.asterix.aqlplus.parser.TokenMgrError pe) {
// we trust that "our" exceptions are serializable and have a comprehensible error message
GlobalConfig.ASTERIX_LOGGER.log(Level.WARNING, pe.getMessage(), pe);
responseMsg.setError(pe);
} catch (Exception e) {
GlobalConfig.ASTERIX_LOGGER.log(Level.SEVERE, "Unexpected exception", e);
responseMsg.setError(new Exception(e.toString()));
}
try {
messageBroker.sendApplicationMessageToNC(responseMsg, requestNodeId);
} catch (Exception e) {
LOGGER.log(Level.WARNING, e.toString(), e);
}
});
}
use of org.apache.asterix.common.context.IStorageComponentProvider in project asterixdb by apache.
the class QueryTranslator method handleStartFeedStatement.
private void handleStartFeedStatement(MetadataProvider metadataProvider, Statement stmt, IHyracksClientConnection hcc) throws Exception {
StartFeedStatement sfs = (StartFeedStatement) stmt;
String dataverseName = getActiveDataverse(sfs.getDataverseName());
String feedName = sfs.getFeedName().getValue();
// Transcation handler
MetadataTransactionContext mdTxnCtx = MetadataManager.INSTANCE.beginTransaction();
metadataProvider.setMetadataTxnContext(mdTxnCtx);
// Runtime handler
EntityId entityId = new EntityId(Feed.EXTENSION_NAME, dataverseName, feedName);
// Feed & Feed Connections
Feed feed = FeedMetadataUtil.validateIfFeedExists(dataverseName, feedName, metadataProvider.getMetadataTxnContext());
List<FeedConnection> feedConnections = MetadataManager.INSTANCE.getFeedConections(metadataProvider.getMetadataTxnContext(), dataverseName, feedName);
ILangCompilationProvider compilationProvider = new AqlCompilationProvider();
IStorageComponentProvider storageComponentProvider = new StorageComponentProvider();
DefaultStatementExecutorFactory qtFactory = new DefaultStatementExecutorFactory();
ActiveLifecycleListener activeListener = (ActiveLifecycleListener) appCtx.getActiveLifecycleListener();
ActiveJobNotificationHandler activeEventHandler = activeListener.getNotificationHandler();
FeedEventsListener listener = (FeedEventsListener) activeEventHandler.getActiveEntityListener(entityId);
if (listener != null) {
throw new AlgebricksException("Feed " + feedName + " is started already.");
}
// Start
MetadataLockManager.INSTANCE.startFeedBegin(metadataProvider.getLocks(), dataverseName, dataverseName + "." + feedName, feedConnections);
try {
// Prepare policy
List<IDataset> datasets = new ArrayList<>();
for (FeedConnection connection : feedConnections) {
Dataset ds = metadataProvider.findDataset(connection.getDataverseName(), connection.getDatasetName());
datasets.add(ds);
}
org.apache.commons.lang3.tuple.Pair<JobSpecification, AlgebricksAbsolutePartitionConstraint> jobInfo = FeedOperations.buildStartFeedJob(sessionOutput, metadataProvider, feed, feedConnections, compilationProvider, storageComponentProvider, qtFactory, hcc);
JobSpecification feedJob = jobInfo.getLeft();
listener = new FeedEventsListener(appCtx, entityId, datasets, jobInfo.getRight().getLocations());
activeEventHandler.registerListener(listener);
IActiveEventSubscriber eventSubscriber = listener.subscribe(ActivityState.STARTED);
feedJob.setProperty(ActiveJobNotificationHandler.ACTIVE_ENTITY_PROPERTY_NAME, entityId);
JobUtils.runJob(hcc, feedJob, Boolean.valueOf(metadataProvider.getConfig().get(StartFeedStatement.WAIT_FOR_COMPLETION)));
eventSubscriber.sync();
LOGGER.log(Level.INFO, "Submitted");
} catch (Exception e) {
abort(e, e, mdTxnCtx);
if (listener != null) {
activeEventHandler.unregisterListener(listener);
}
throw e;
} finally {
metadataProvider.getLocks().unlock();
}
}
use of org.apache.asterix.common.context.IStorageComponentProvider in project asterixdb by apache.
the class DatasetUtil method createPrimaryIndexUpsertOp.
/**
* Creates a primary index upsert operator for a given dataset.
*
* @param spec,
* the job specification.
* @param metadataProvider,
* the metadata provider.
* @param dataset,
* the dataset to upsert.
* @param inputRecordDesc,the
* record descriptor for an input tuple.
* @param fieldPermutation,
* the field permutation according to the input.
* @param missingWriterFactory,
* the factory for customizing missing value serialization.
* @return a primary index scan operator and its location constraints.
* @throws AlgebricksException
*/
public static Pair<IOperatorDescriptor, AlgebricksPartitionConstraint> createPrimaryIndexUpsertOp(JobSpecification spec, MetadataProvider metadataProvider, Dataset dataset, RecordDescriptor inputRecordDesc, int[] fieldPermutation, IMissingWriterFactory missingWriterFactory) throws AlgebricksException {
int numKeys = dataset.getPrimaryKeys().size();
int numFilterFields = DatasetUtil.getFilterField(dataset) == null ? 0 : 1;
ARecordType itemType = (ARecordType) metadataProvider.findType(dataset);
ARecordType metaItemType = (ARecordType) metadataProvider.findMetaType(dataset);
try {
Index primaryIndex = metadataProvider.getIndex(dataset.getDataverseName(), dataset.getDatasetName(), dataset.getDatasetName());
Pair<IFileSplitProvider, AlgebricksPartitionConstraint> splitsAndConstraint = metadataProvider.getSplitProviderAndConstraints(dataset);
// prepare callback
JobId jobId = ((JobEventListenerFactory) spec.getJobletEventListenerFactory()).getJobId();
int[] primaryKeyFields = new int[numKeys];
for (int i = 0; i < numKeys; i++) {
primaryKeyFields[i] = i;
}
boolean hasSecondaries = metadataProvider.getDatasetIndexes(dataset.getDataverseName(), dataset.getDatasetName()).size() > 1;
IStorageComponentProvider storageComponentProvider = metadataProvider.getStorageComponentProvider();
IModificationOperationCallbackFactory modificationCallbackFactory = dataset.getModificationCallbackFactory(storageComponentProvider, primaryIndex, jobId, IndexOperation.UPSERT, primaryKeyFields);
ISearchOperationCallbackFactory searchCallbackFactory = dataset.getSearchCallbackFactory(storageComponentProvider, primaryIndex, jobId, IndexOperation.UPSERT, primaryKeyFields);
IIndexDataflowHelperFactory idfh = new IndexDataflowHelperFactory(storageComponentProvider.getStorageManager(), splitsAndConstraint.first);
LSMPrimaryUpsertOperatorDescriptor op;
ITypeTraits[] outputTypeTraits = new ITypeTraits[inputRecordDesc.getFieldCount() + (dataset.hasMetaPart() ? 2 : 1) + numFilterFields];
ISerializerDeserializer<?>[] outputSerDes = new ISerializerDeserializer[inputRecordDesc.getFieldCount() + (dataset.hasMetaPart() ? 2 : 1) + numFilterFields];
// add the previous record first
int f = 0;
outputSerDes[f] = FormatUtils.getDefaultFormat().getSerdeProvider().getSerializerDeserializer(itemType);
f++;
// add the previous meta second
if (dataset.hasMetaPart()) {
outputSerDes[f] = FormatUtils.getDefaultFormat().getSerdeProvider().getSerializerDeserializer(metaItemType);
outputTypeTraits[f] = FormatUtils.getDefaultFormat().getTypeTraitProvider().getTypeTrait(metaItemType);
f++;
}
// add the previous filter third
int fieldIdx = -1;
if (numFilterFields > 0) {
String filterField = DatasetUtil.getFilterField(dataset).get(0);
String[] fieldNames = itemType.getFieldNames();
int i = 0;
for (; i < fieldNames.length; i++) {
if (fieldNames[i].equals(filterField)) {
break;
}
}
fieldIdx = i;
outputTypeTraits[f] = FormatUtils.getDefaultFormat().getTypeTraitProvider().getTypeTrait(itemType.getFieldTypes()[fieldIdx]);
outputSerDes[f] = FormatUtils.getDefaultFormat().getSerdeProvider().getSerializerDeserializer(itemType.getFieldTypes()[fieldIdx]);
f++;
}
for (int j = 0; j < inputRecordDesc.getFieldCount(); j++) {
outputTypeTraits[j + f] = inputRecordDesc.getTypeTraits()[j];
outputSerDes[j + f] = inputRecordDesc.getFields()[j];
}
RecordDescriptor outputRecordDesc = new RecordDescriptor(outputSerDes, outputTypeTraits);
op = new LSMPrimaryUpsertOperatorDescriptor(spec, outputRecordDesc, fieldPermutation, idfh, missingWriterFactory, modificationCallbackFactory, searchCallbackFactory, dataset.getFrameOpCallbackFactory(), numKeys, itemType, fieldIdx, hasSecondaries);
return new Pair<>(op, splitsAndConstraint.second);
} catch (MetadataException me) {
throw new AlgebricksException(me);
}
}
use of org.apache.asterix.common.context.IStorageComponentProvider in project asterixdb by apache.
the class RTreeResourceFactoryProvider method getResourceFactory.
@Override
public IResourceFactory getResourceFactory(MetadataProvider mdProvider, Dataset dataset, Index index, ARecordType recordType, ARecordType metaType, ILSMMergePolicyFactory mergePolicyFactory, Map<String, String> mergePolicyProperties, ITypeTraits[] filterTypeTraits, IBinaryComparatorFactory[] filterCmpFactories) throws AlgebricksException {
if (index.getKeyFieldNames().size() != 1) {
throw new CompilationException(ErrorCode.COMPILATION_ILLEGAL_INDEX_NUM_OF_FIELD, index.getKeyFieldNames().size(), index.getIndexType(), 1);
}
IAType spatialType = Index.getNonNullableOpenFieldType(index.getKeyFieldTypes().get(0), index.getKeyFieldNames().get(0), recordType).first;
if (spatialType == null) {
throw new CompilationException(ErrorCode.COMPILATION_FIELD_NOT_FOUND, StringUtils.join(index.getKeyFieldNames().get(0), '.'));
}
List<List<String>> primaryKeyFields = dataset.getPrimaryKeys();
int numPrimaryKeys = primaryKeyFields.size();
ITypeTraits[] primaryTypeTraits = null;
IBinaryComparatorFactory[] primaryComparatorFactories = null;
IStorageComponentProvider storageComponentProvider = mdProvider.getStorageComponentProvider();
if (dataset.getDatasetType() == DatasetType.INTERNAL) {
primaryTypeTraits = new ITypeTraits[numPrimaryKeys + 1 + (dataset.hasMetaPart() ? 1 : 0)];
primaryComparatorFactories = new IBinaryComparatorFactory[numPrimaryKeys];
List<Integer> indicators = null;
if (dataset.hasMetaPart()) {
indicators = ((InternalDatasetDetails) dataset.getDatasetDetails()).getKeySourceIndicator();
}
for (int i = 0; i < numPrimaryKeys; i++) {
IAType keyType = (indicators == null || indicators.get(i) == 0) ? recordType.getSubFieldType(primaryKeyFields.get(i)) : metaType.getSubFieldType(primaryKeyFields.get(i));
primaryComparatorFactories[i] = storageComponentProvider.getComparatorFactoryProvider().getBinaryComparatorFactory(keyType, true);
primaryTypeTraits[i] = storageComponentProvider.getTypeTraitProvider().getTypeTrait(keyType);
}
primaryTypeTraits[numPrimaryKeys] = storageComponentProvider.getTypeTraitProvider().getTypeTrait(recordType);
if (dataset.hasMetaPart()) {
primaryTypeTraits[numPrimaryKeys + 1] = storageComponentProvider.getTypeTraitProvider().getTypeTrait(recordType);
}
}
boolean isPointMBR = spatialType.getTypeTag() == ATypeTag.POINT || spatialType.getTypeTag() == ATypeTag.POINT3D;
int numDimensions = NonTaggedFormatUtil.getNumDimensions(spatialType.getTypeTag());
int numNestedSecondaryKeyFields = numDimensions * 2;
IBinaryComparatorFactory[] secondaryComparatorFactories = new IBinaryComparatorFactory[numNestedSecondaryKeyFields];
IPrimitiveValueProviderFactory[] valueProviderFactories = new IPrimitiveValueProviderFactory[numNestedSecondaryKeyFields];
ITypeTraits[] secondaryTypeTraits = new ITypeTraits[numNestedSecondaryKeyFields + numPrimaryKeys];
IAType nestedKeyType = NonTaggedFormatUtil.getNestedSpatialType(spatialType.getTypeTag());
ATypeTag keyType = nestedKeyType.getTypeTag();
for (int i = 0; i < numNestedSecondaryKeyFields; i++) {
secondaryComparatorFactories[i] = storageComponentProvider.getComparatorFactoryProvider().getBinaryComparatorFactory(nestedKeyType, true);
secondaryTypeTraits[i] = storageComponentProvider.getTypeTraitProvider().getTypeTrait(nestedKeyType);
valueProviderFactories[i] = storageComponentProvider.getPrimitiveValueProviderFactory();
}
for (int i = 0; i < numPrimaryKeys; i++) {
secondaryTypeTraits[numNestedSecondaryKeyFields + i] = (dataset.getDatasetType() == DatasetType.INTERNAL) ? primaryTypeTraits[i] : IndexingConstants.getTypeTraits(i);
}
int[] rtreeFields = null;
if (filterTypeTraits != null && filterTypeTraits.length > 0) {
rtreeFields = new int[numNestedSecondaryKeyFields + numPrimaryKeys];
for (int i = 0; i < rtreeFields.length; i++) {
rtreeFields[i] = i;
}
}
IStorageManager storageManager = storageComponentProvider.getStorageManager();
ILSMOperationTrackerFactory opTrackerFactory = dataset.getIndexOperationTrackerFactory(index);
ILSMIOOperationCallbackFactory ioOpCallbackFactory = dataset.getIoOperationCallbackFactory(index);
IMetadataPageManagerFactory metadataPageManagerFactory = storageComponentProvider.getMetadataPageManagerFactory();
ILSMIOOperationSchedulerProvider ioSchedulerProvider = storageComponentProvider.getIoOperationSchedulerProvider();
boolean durable = !dataset.isTemp();
ILinearizeComparatorFactory linearizeCmpFactory = MetadataProvider.proposeLinearizer(keyType, secondaryComparatorFactories.length);
ITypeTraits[] typeTraits = getTypeTraits(mdProvider, dataset, index, recordType, metaType);
IBinaryComparatorFactory[] rtreeCmpFactories = getCmpFactories(mdProvider, index, recordType, metaType);
int[] secondaryFilterFields = (filterTypeTraits != null && filterTypeTraits.length > 0) ? new int[] { numNestedSecondaryKeyFields + numPrimaryKeys } : null;
IBinaryComparatorFactory[] btreeCompFactories = dataset.getDatasetType() == DatasetType.EXTERNAL ? IndexingConstants.getBuddyBtreeComparatorFactories() : getComparatorFactoriesForDeletedKeyBTree(secondaryTypeTraits, primaryComparatorFactories, secondaryComparatorFactories);
if (dataset.getDatasetType() == DatasetType.INTERNAL) {
AsterixVirtualBufferCacheProvider vbcProvider = new AsterixVirtualBufferCacheProvider(dataset.getDatasetId());
return new LSMRTreeWithAntiMatterLocalResourceFactory(storageManager, typeTraits, rtreeCmpFactories, filterTypeTraits, filterCmpFactories, secondaryFilterFields, opTrackerFactory, ioOpCallbackFactory, metadataPageManagerFactory, vbcProvider, ioSchedulerProvider, mergePolicyFactory, mergePolicyProperties, durable, valueProviderFactories, rTreePolicyType, linearizeCmpFactory, rtreeFields, isPointMBR, btreeCompFactories);
} else {
return new ExternalRTreeLocalResourceFactory(storageManager, typeTraits, rtreeCmpFactories, filterTypeTraits, filterCmpFactories, secondaryFilterFields, opTrackerFactory, ioOpCallbackFactory, metadataPageManagerFactory, ioSchedulerProvider, mergePolicyFactory, mergePolicyProperties, durable, btreeCompFactories, valueProviderFactories, rTreePolicyType, linearizeCmpFactory, rtreeFields, new int[] { numNestedSecondaryKeyFields }, isPointMBR, mdProvider.getStorageProperties().getBloomFilterFalsePositiveRate());
}
}
use of org.apache.asterix.common.context.IStorageComponentProvider in project asterixdb by apache.
the class ExternalIndexingOperations method buildFilesIndexUpdateJobSpec.
public static JobSpecification buildFilesIndexUpdateJobSpec(Dataset dataset, List<ExternalFile> externalFilesSnapshot, MetadataProvider metadataProvider) throws AlgebricksException {
IStorageComponentProvider storageComponentProvider = metadataProvider.getStorageComponentProvider();
JobSpecification spec = RuntimeUtils.createJobSpecification(metadataProvider.getApplicationContext());
Pair<IFileSplitProvider, AlgebricksPartitionConstraint> secondarySplitsAndConstraint = metadataProvider.getSplitProviderAndConstraints(dataset, IndexingConstants.getFilesIndexName(dataset.getDatasetName()));
IFileSplitProvider secondaryFileSplitProvider = secondarySplitsAndConstraint.first;
IIndexDataflowHelperFactory dataflowHelperFactory = new IndexDataflowHelperFactory(storageComponentProvider.getStorageManager(), secondaryFileSplitProvider);
ExternalFilesIndexModificationOperatorDescriptor externalFilesOp = new ExternalFilesIndexModificationOperatorDescriptor(spec, dataflowHelperFactory, externalFilesSnapshot);
AlgebricksPartitionConstraintHelper.setPartitionConstraintInJobSpec(spec, externalFilesOp, secondarySplitsAndConstraint.second);
spec.addRoot(externalFilesOp);
spec.setConnectorPolicyAssignmentPolicy(new ConnectorPolicyAssignmentPolicy());
return spec;
}
Aggregations