Search in sources :

Example 6 with MetadataException

use of org.apache.asterix.metadata.MetadataException in project asterixdb by apache.

the class MetadataProvider method buildBtreeRuntime.

public Pair<IOperatorDescriptor, AlgebricksPartitionConstraint> buildBtreeRuntime(JobSpecification jobSpec, IOperatorSchema opSchema, IVariableTypeEnvironment typeEnv, JobGenContext context, boolean retainInput, boolean retainMissing, Dataset dataset, String indexName, int[] lowKeyFields, int[] highKeyFields, boolean lowKeyInclusive, boolean highKeyInclusive, int[] minFilterFieldIndexes, int[] maxFilterFieldIndexes) throws AlgebricksException {
    boolean isSecondary = true;
    try {
        Index primaryIndex = MetadataManager.INSTANCE.getIndex(mdTxnCtx, dataset.getDataverseName(), dataset.getDatasetName(), dataset.getDatasetName());
        if (primaryIndex != null && (dataset.getDatasetType() != DatasetType.EXTERNAL)) {
            isSecondary = !indexName.equals(primaryIndex.getIndexName());
        }
        Index theIndex = isSecondary ? MetadataManager.INSTANCE.getIndex(mdTxnCtx, dataset.getDataverseName(), dataset.getDatasetName(), indexName) : primaryIndex;
        int numPrimaryKeys = dataset.getPrimaryKeys().size();
        RecordDescriptor outputRecDesc = JobGenHelper.mkRecordDescriptor(typeEnv, opSchema, context);
        Pair<IFileSplitProvider, AlgebricksPartitionConstraint> spPc = getSplitProviderAndConstraints(dataset, theIndex.getIndexName());
        int[] primaryKeyFields = new int[numPrimaryKeys];
        for (int i = 0; i < numPrimaryKeys; i++) {
            primaryKeyFields[i] = i;
        }
        ISearchOperationCallbackFactory searchCallbackFactory = dataset.getSearchCallbackFactory(storaegComponentProvider, theIndex, jobId, IndexOperation.SEARCH, primaryKeyFields);
        IStorageManager storageManager = getStorageComponentProvider().getStorageManager();
        IIndexDataflowHelperFactory indexHelperFactory = new IndexDataflowHelperFactory(storageManager, spPc.first);
        BTreeSearchOperatorDescriptor btreeSearchOp;
        if (dataset.getDatasetType() == DatasetType.INTERNAL) {
            btreeSearchOp = new BTreeSearchOperatorDescriptor(jobSpec, outputRecDesc, lowKeyFields, highKeyFields, lowKeyInclusive, highKeyInclusive, indexHelperFactory, retainInput, retainMissing, context.getMissingWriterFactory(), searchCallbackFactory, minFilterFieldIndexes, maxFilterFieldIndexes, false);
        } else {
            btreeSearchOp = new ExternalBTreeSearchOperatorDescriptor(jobSpec, outputRecDesc, lowKeyFields, highKeyFields, lowKeyInclusive, highKeyInclusive, indexHelperFactory, retainInput, retainMissing, context.getMissingWriterFactory(), searchCallbackFactory, minFilterFieldIndexes, maxFilterFieldIndexes, ExternalDatasetsRegistry.INSTANCE.getAndLockDatasetVersion(dataset, this));
        }
        return new Pair<>(btreeSearchOp, spPc.second);
    } catch (MetadataException me) {
        throw new AlgebricksException(me);
    }
}
Also used : RecordDescriptor(org.apache.hyracks.api.dataflow.value.RecordDescriptor) IFileSplitProvider(org.apache.hyracks.dataflow.std.file.IFileSplitProvider) ExternalBTreeSearchOperatorDescriptor(org.apache.asterix.external.operators.ExternalBTreeSearchOperatorDescriptor) BTreeSearchOperatorDescriptor(org.apache.hyracks.storage.am.btree.dataflow.BTreeSearchOperatorDescriptor) AlgebricksException(org.apache.hyracks.algebricks.common.exceptions.AlgebricksException) Index(org.apache.asterix.metadata.entities.Index) IDataSourceIndex(org.apache.hyracks.algebricks.core.algebra.metadata.IDataSourceIndex) AlgebricksPartitionConstraint(org.apache.hyracks.algebricks.common.constraints.AlgebricksPartitionConstraint) DatasetCardinalityHint(org.apache.asterix.metadata.dataset.hints.DatasetHints.DatasetCardinalityHint) AlgebricksAbsolutePartitionConstraint(org.apache.hyracks.algebricks.common.constraints.AlgebricksAbsolutePartitionConstraint) MetadataException(org.apache.asterix.metadata.MetadataException) ISearchOperationCallbackFactory(org.apache.hyracks.storage.am.common.api.ISearchOperationCallbackFactory) IStorageManager(org.apache.hyracks.storage.common.IStorageManager) IIndexDataflowHelperFactory(org.apache.hyracks.storage.am.common.dataflow.IIndexDataflowHelperFactory) AlgebricksPartitionConstraint(org.apache.hyracks.algebricks.common.constraints.AlgebricksPartitionConstraint) ExternalBTreeSearchOperatorDescriptor(org.apache.asterix.external.operators.ExternalBTreeSearchOperatorDescriptor) IndexDataflowHelperFactory(org.apache.hyracks.storage.am.common.dataflow.IndexDataflowHelperFactory) IIndexDataflowHelperFactory(org.apache.hyracks.storage.am.common.dataflow.IIndexDataflowHelperFactory) Pair(org.apache.hyracks.algebricks.common.utils.Pair)

Example 7 with MetadataException

use of org.apache.asterix.metadata.MetadataException in project asterixdb by apache.

the class MetadataProvider method getTokenizerRuntime.

@Override
public Pair<IOperatorDescriptor, AlgebricksPartitionConstraint> getTokenizerRuntime(IDataSourceIndex<String, DataSourceId> dataSourceIndex, IOperatorSchema propagatedSchema, IOperatorSchema[] inputSchemas, IVariableTypeEnvironment typeEnv, List<LogicalVariable> primaryKeys, List<LogicalVariable> secondaryKeys, ILogicalExpression filterExpr, RecordDescriptor recordDesc, JobGenContext context, JobSpecification spec, boolean bulkload) throws AlgebricksException {
    String indexName = dataSourceIndex.getId();
    String dataverseName = dataSourceIndex.getDataSource().getId().getDataverseName();
    String datasetName = dataSourceIndex.getDataSource().getId().getDatasourceName();
    IOperatorSchema inputSchema;
    if (inputSchemas.length > 0) {
        inputSchema = inputSchemas[0];
    } else {
        throw new AlgebricksException("TokenizeOperator can not operate without any input variable.");
    }
    Dataset dataset = MetadataManagerUtil.findExistingDataset(mdTxnCtx, dataverseName, datasetName);
    Index secondaryIndex;
    try {
        secondaryIndex = MetadataManager.INSTANCE.getIndex(mdTxnCtx, dataset.getDataverseName(), dataset.getDatasetName(), indexName);
    } catch (MetadataException e) {
        throw new AlgebricksException(e);
    }
    // TokenizeOperator only supports a keyword or n-gram index.
    switch(secondaryIndex.getIndexType()) {
        case SINGLE_PARTITION_WORD_INVIX:
        case SINGLE_PARTITION_NGRAM_INVIX:
        case LENGTH_PARTITIONED_WORD_INVIX:
        case LENGTH_PARTITIONED_NGRAM_INVIX:
            return getBinaryTokenizerRuntime(dataverseName, datasetName, indexName, inputSchema, propagatedSchema, primaryKeys, secondaryKeys, recordDesc, spec, secondaryIndex.getIndexType());
        default:
            throw new AlgebricksException("Currently, we do not support TokenizeOperator for the index type: " + secondaryIndex.getIndexType());
    }
}
Also used : IOperatorSchema(org.apache.hyracks.algebricks.core.algebra.operators.logical.IOperatorSchema) Dataset(org.apache.asterix.metadata.entities.Dataset) AlgebricksException(org.apache.hyracks.algebricks.common.exceptions.AlgebricksException) Index(org.apache.asterix.metadata.entities.Index) IDataSourceIndex(org.apache.hyracks.algebricks.core.algebra.metadata.IDataSourceIndex) MetadataException(org.apache.asterix.metadata.MetadataException)

Example 8 with MetadataException

use of org.apache.asterix.metadata.MetadataException in project asterixdb by apache.

the class MetadataProvider method getWriteResultRuntime.

@Override
public Pair<IOperatorDescriptor, AlgebricksPartitionConstraint> getWriteResultRuntime(IDataSource<DataSourceId> dataSource, IOperatorSchema propagatedSchema, List<LogicalVariable> keys, LogicalVariable payload, List<LogicalVariable> additionalNonKeyFields, JobGenContext context, JobSpecification spec) throws AlgebricksException {
    String dataverseName = dataSource.getId().getDataverseName();
    String datasetName = dataSource.getId().getDatasourceName();
    Dataset dataset = MetadataManagerUtil.findExistingDataset(mdTxnCtx, dataverseName, datasetName);
    int numKeys = keys.size();
    int numFilterFields = DatasetUtil.getFilterField(dataset) == null ? 0 : 1;
    // move key fields to front
    int[] fieldPermutation = new int[numKeys + 1 + numFilterFields];
    int i = 0;
    for (LogicalVariable varKey : keys) {
        int idx = propagatedSchema.findVariable(varKey);
        fieldPermutation[i] = idx;
        i++;
    }
    fieldPermutation[numKeys] = propagatedSchema.findVariable(payload);
    if (numFilterFields > 0) {
        int idx = propagatedSchema.findVariable(additionalNonKeyFields.get(0));
        fieldPermutation[numKeys + 1] = idx;
    }
    try {
        boolean temp = dataset.getDatasetDetails().isTemp();
        isTemporaryDatasetWriteJob = isTemporaryDatasetWriteJob && temp;
        Pair<IFileSplitProvider, AlgebricksPartitionConstraint> splitsAndConstraint = getSplitProviderAndConstraints(dataset);
        long numElementsHint = getCardinalityPerPartitionHint(dataset);
        // TODO
        // figure out the right behavior of the bulkload and then give the
        // right callback
        // (ex. what's the expected behavior when there is an error during
        // bulkload?)
        IIndexDataflowHelperFactory indexHelperFactory = new IndexDataflowHelperFactory(storaegComponentProvider.getStorageManager(), splitsAndConstraint.first);
        TreeIndexBulkLoadOperatorDescriptor btreeBulkLoad = new TreeIndexBulkLoadOperatorDescriptor(spec, null, fieldPermutation, GlobalConfig.DEFAULT_TREE_FILL_FACTOR, false, numElementsHint, true, indexHelperFactory);
        return new Pair<>(btreeBulkLoad, splitsAndConstraint.second);
    } catch (MetadataException me) {
        throw new AlgebricksException(me);
    }
}
Also used : LogicalVariable(org.apache.hyracks.algebricks.core.algebra.base.LogicalVariable) Dataset(org.apache.asterix.metadata.entities.Dataset) IFileSplitProvider(org.apache.hyracks.dataflow.std.file.IFileSplitProvider) AlgebricksException(org.apache.hyracks.algebricks.common.exceptions.AlgebricksException) AlgebricksPartitionConstraint(org.apache.hyracks.algebricks.common.constraints.AlgebricksPartitionConstraint) DatasetCardinalityHint(org.apache.asterix.metadata.dataset.hints.DatasetHints.DatasetCardinalityHint) AlgebricksAbsolutePartitionConstraint(org.apache.hyracks.algebricks.common.constraints.AlgebricksAbsolutePartitionConstraint) MetadataException(org.apache.asterix.metadata.MetadataException) IIndexDataflowHelperFactory(org.apache.hyracks.storage.am.common.dataflow.IIndexDataflowHelperFactory) AlgebricksPartitionConstraint(org.apache.hyracks.algebricks.common.constraints.AlgebricksPartitionConstraint) TreeIndexBulkLoadOperatorDescriptor(org.apache.hyracks.storage.am.common.dataflow.TreeIndexBulkLoadOperatorDescriptor) IndexDataflowHelperFactory(org.apache.hyracks.storage.am.common.dataflow.IndexDataflowHelperFactory) IIndexDataflowHelperFactory(org.apache.hyracks.storage.am.common.dataflow.IIndexDataflowHelperFactory) Pair(org.apache.hyracks.algebricks.common.utils.Pair)

Example 9 with MetadataException

use of org.apache.asterix.metadata.MetadataException in project asterixdb by apache.

the class MetadataProvider method getInsertOrDeleteRuntime.

private Pair<IOperatorDescriptor, AlgebricksPartitionConstraint> getInsertOrDeleteRuntime(IndexOperation indexOp, IDataSource<DataSourceId> dataSource, IOperatorSchema propagatedSchema, List<LogicalVariable> keys, LogicalVariable payload, List<LogicalVariable> additionalNonKeyFields, RecordDescriptor inputRecordDesc, JobGenContext context, JobSpecification spec, boolean bulkload, List<LogicalVariable> additionalNonFilteringFields) throws AlgebricksException {
    String datasetName = dataSource.getId().getDatasourceName();
    Dataset dataset = MetadataManagerUtil.findExistingDataset(mdTxnCtx, dataSource.getId().getDataverseName(), datasetName);
    boolean temp = dataset.getDatasetDetails().isTemp();
    isTemporaryDatasetWriteJob = isTemporaryDatasetWriteJob && temp;
    int numKeys = keys.size();
    int numFilterFields = DatasetUtil.getFilterField(dataset) == null ? 0 : 1;
    // Move key fields to front.
    int[] fieldPermutation = new int[numKeys + 1 + numFilterFields + (additionalNonFilteringFields == null ? 0 : additionalNonFilteringFields.size())];
    int[] bloomFilterKeyFields = new int[numKeys];
    int i = 0;
    for (LogicalVariable varKey : keys) {
        int idx = propagatedSchema.findVariable(varKey);
        fieldPermutation[i] = idx;
        bloomFilterKeyFields[i] = i;
        i++;
    }
    fieldPermutation[i++] = propagatedSchema.findVariable(payload);
    if (numFilterFields > 0) {
        int idx = propagatedSchema.findVariable(additionalNonKeyFields.get(0));
        fieldPermutation[i++] = idx;
    }
    if (additionalNonFilteringFields != null) {
        for (LogicalVariable variable : additionalNonFilteringFields) {
            int idx = propagatedSchema.findVariable(variable);
            fieldPermutation[i++] = idx;
        }
    }
    try {
        Index primaryIndex = MetadataManager.INSTANCE.getIndex(mdTxnCtx, dataset.getDataverseName(), dataset.getDatasetName(), dataset.getDatasetName());
        Pair<IFileSplitProvider, AlgebricksPartitionConstraint> splitsAndConstraint = getSplitProviderAndConstraints(dataset);
        // prepare callback
        int[] primaryKeyFields = new int[numKeys];
        for (i = 0; i < numKeys; i++) {
            primaryKeyFields[i] = i;
        }
        IModificationOperationCallbackFactory modificationCallbackFactory = dataset.getModificationCallbackFactory(storaegComponentProvider, primaryIndex, jobId, indexOp, primaryKeyFields);
        IIndexDataflowHelperFactory idfh = new IndexDataflowHelperFactory(storaegComponentProvider.getStorageManager(), splitsAndConstraint.first);
        IOperatorDescriptor op;
        if (bulkload) {
            long numElementsHint = getCardinalityPerPartitionHint(dataset);
            op = new TreeIndexBulkLoadOperatorDescriptor(spec, inputRecordDesc, fieldPermutation, GlobalConfig.DEFAULT_TREE_FILL_FACTOR, true, numElementsHint, true, idfh);
        } else {
            op = new LSMTreeInsertDeleteOperatorDescriptor(spec, inputRecordDesc, fieldPermutation, indexOp, idfh, null, true, modificationCallbackFactory);
        }
        return new Pair<>(op, splitsAndConstraint.second);
    } catch (MetadataException me) {
        throw new AlgebricksException(me);
    }
}
Also used : LogicalVariable(org.apache.hyracks.algebricks.core.algebra.base.LogicalVariable) Dataset(org.apache.asterix.metadata.entities.Dataset) IFileSplitProvider(org.apache.hyracks.dataflow.std.file.IFileSplitProvider) AlgebricksException(org.apache.hyracks.algebricks.common.exceptions.AlgebricksException) Index(org.apache.asterix.metadata.entities.Index) IDataSourceIndex(org.apache.hyracks.algebricks.core.algebra.metadata.IDataSourceIndex) AlgebricksPartitionConstraint(org.apache.hyracks.algebricks.common.constraints.AlgebricksPartitionConstraint) DatasetCardinalityHint(org.apache.asterix.metadata.dataset.hints.DatasetHints.DatasetCardinalityHint) AlgebricksAbsolutePartitionConstraint(org.apache.hyracks.algebricks.common.constraints.AlgebricksAbsolutePartitionConstraint) MetadataException(org.apache.asterix.metadata.MetadataException) IIndexDataflowHelperFactory(org.apache.hyracks.storage.am.common.dataflow.IIndexDataflowHelperFactory) IOperatorDescriptor(org.apache.hyracks.api.dataflow.IOperatorDescriptor) AlgebricksPartitionConstraint(org.apache.hyracks.algebricks.common.constraints.AlgebricksPartitionConstraint) IModificationOperationCallbackFactory(org.apache.hyracks.storage.am.common.api.IModificationOperationCallbackFactory) LSMTreeInsertDeleteOperatorDescriptor(org.apache.asterix.common.dataflow.LSMTreeInsertDeleteOperatorDescriptor) TreeIndexBulkLoadOperatorDescriptor(org.apache.hyracks.storage.am.common.dataflow.TreeIndexBulkLoadOperatorDescriptor) IndexDataflowHelperFactory(org.apache.hyracks.storage.am.common.dataflow.IndexDataflowHelperFactory) IIndexDataflowHelperFactory(org.apache.hyracks.storage.am.common.dataflow.IIndexDataflowHelperFactory) Pair(org.apache.hyracks.algebricks.common.utils.Pair)

Example 10 with MetadataException

use of org.apache.asterix.metadata.MetadataException in project asterixdb by apache.

the class MetadataBootstrap method startUniverse.

/**
     * bootstrap metadata
     *
     * @param ncServiceContext
     * @param isNewUniverse
     * @throws ACIDException
     * @throws RemoteException
     * @throws MetadataException
     * @throws Exception
     */
public static void startUniverse(INCServiceContext ncServiceContext, boolean isNewUniverse) throws RemoteException, ACIDException, MetadataException {
    MetadataBootstrap.setNewUniverse(isNewUniverse);
    appContext = (INcApplicationContext) ncServiceContext.getApplicationContext();
    MetadataProperties metadataProperties = appContext.getMetadataProperties();
    metadataNodeName = metadataProperties.getMetadataNodeName();
    nodeNames = metadataProperties.getNodeNames();
    localResourceRepository = appContext.getLocalResourceRepository();
    ioManager = ncServiceContext.getIoManager();
    MetadataTransactionContext mdTxnCtx = MetadataManager.INSTANCE.beginTransaction();
    try {
        // Begin a transaction against the metadata.
        // Lock the metadata in X mode.
        MetadataManager.INSTANCE.lock(mdTxnCtx, LockMode.X);
        for (int i = 0; i < PRIMARY_INDEXES.length; i++) {
            enlistMetadataDataset(ncServiceContext, PRIMARY_INDEXES[i]);
        }
        if (LOGGER.isLoggable(Level.INFO)) {
            LOGGER.info("Finished enlistment of metadata B-trees in " + (isNewUniverse ? "new" : "old") + " universe");
        }
        if (isNewUniverse) {
            insertInitialDataverses(mdTxnCtx);
            insertMetadataDatasets(mdTxnCtx, PRIMARY_INDEXES);
            insertMetadataDatatypes(mdTxnCtx);
            insertNodes(mdTxnCtx);
            insertInitialGroups(mdTxnCtx);
            insertInitialAdapters(mdTxnCtx);
            insertInitialFeedPolicies(mdTxnCtx);
            insertInitialCompactionPolicies(mdTxnCtx);
            if (LOGGER.isLoggable(Level.INFO)) {
                LOGGER.info("Finished creating metadata B-trees.");
            }
        }
        // #. initialize datasetIdFactory
        MetadataManager.INSTANCE.initializeDatasetIdFactory(mdTxnCtx);
        MetadataManager.INSTANCE.commitTransaction(mdTxnCtx);
    } catch (Exception e) {
        try {
            if (IS_DEBUG_MODE) {
                LOGGER.log(Level.SEVERE, "Failure during metadata bootstrap", e);
            }
            MetadataManager.INSTANCE.abortTransaction(mdTxnCtx);
        } catch (Exception e2) {
            e.addSuppressed(e2);
            // TODO change the exception type to AbortFailureException
            throw new MetadataException(e);
        }
        throw new MetadataException(e);
    }
}
Also used : MetadataTransactionContext(org.apache.asterix.metadata.MetadataTransactionContext) MetadataProperties(org.apache.asterix.common.config.MetadataProperties) ACIDException(org.apache.asterix.common.exceptions.ACIDException) MetadataException(org.apache.asterix.metadata.MetadataException) RemoteException(java.rmi.RemoteException) HyracksDataException(org.apache.hyracks.api.exceptions.HyracksDataException) MetadataException(org.apache.asterix.metadata.MetadataException)

Aggregations

MetadataException (org.apache.asterix.metadata.MetadataException)19 AlgebricksException (org.apache.hyracks.algebricks.common.exceptions.AlgebricksException)12 Index (org.apache.asterix.metadata.entities.Index)9 AlgebricksPartitionConstraint (org.apache.hyracks.algebricks.common.constraints.AlgebricksPartitionConstraint)8 Dataset (org.apache.asterix.metadata.entities.Dataset)7 Pair (org.apache.hyracks.algebricks.common.utils.Pair)7 IFileSplitProvider (org.apache.hyracks.dataflow.std.file.IFileSplitProvider)7 IIndexDataflowHelperFactory (org.apache.hyracks.storage.am.common.dataflow.IIndexDataflowHelperFactory)7 IndexDataflowHelperFactory (org.apache.hyracks.storage.am.common.dataflow.IndexDataflowHelperFactory)7 RemoteException (java.rmi.RemoteException)6 IDataSourceIndex (org.apache.hyracks.algebricks.core.algebra.metadata.IDataSourceIndex)6 DatasetCardinalityHint (org.apache.asterix.metadata.dataset.hints.DatasetHints.DatasetCardinalityHint)5 ACIDException (org.apache.asterix.common.exceptions.ACIDException)4 MetadataTransactionContext (org.apache.asterix.metadata.MetadataTransactionContext)4 ARecordType (org.apache.asterix.om.types.ARecordType)4 AlgebricksAbsolutePartitionConstraint (org.apache.hyracks.algebricks.common.constraints.AlgebricksAbsolutePartitionConstraint)4 LogicalVariable (org.apache.hyracks.algebricks.core.algebra.base.LogicalVariable)4 RecordDescriptor (org.apache.hyracks.api.dataflow.value.RecordDescriptor)4 IAType (org.apache.asterix.om.types.IAType)3 JobEventListenerFactory (org.apache.asterix.runtime.job.listener.JobEventListenerFactory)3