Search in sources :

Example 21 with MetadataProvider

use of org.apache.asterix.metadata.declared.MetadataProvider in project asterixdb by apache.

the class IntroduceLSMComponentFilterRule method rewritePost.

@Override
public boolean rewritePost(Mutable<ILogicalOperator> opRef, IOptimizationContext context) throws AlgebricksException {
    if (!checkIfRuleIsApplicable(opRef, context)) {
        return false;
    }
    AbstractLogicalOperator op = (AbstractLogicalOperator) opRef.getValue();
    typeEnvironment = context.getOutputTypeEnvironment(op);
    ILogicalExpression condExpr = ((SelectOperator) op).getCondition().getValue();
    AccessMethodAnalysisContext analysisCtx = analyzeCondition(condExpr, context, typeEnvironment);
    if (analysisCtx.getMatchedFuncExprs().isEmpty()) {
        return false;
    }
    Dataset dataset = getDataset(op, context);
    List<String> filterFieldName = null;
    ARecordType recType = null;
    if (dataset != null && dataset.getDatasetType() == DatasetType.INTERNAL) {
        filterFieldName = DatasetUtil.getFilterField(dataset);
        IAType itemType = ((MetadataProvider) context.getMetadataProvider()).findType(dataset.getItemTypeDataverseName(), dataset.getItemTypeName());
        if (itemType.getTypeTag() == ATypeTag.OBJECT) {
            recType = (ARecordType) itemType;
        }
    }
    if (filterFieldName == null || recType == null) {
        return false;
    }
    List<Index> datasetIndexes = ((MetadataProvider) context.getMetadataProvider()).getDatasetIndexes(dataset.getDataverseName(), dataset.getDatasetName());
    List<IOptimizableFuncExpr> optFuncExprs = new ArrayList<>();
    for (int i = 0; i < analysisCtx.getMatchedFuncExprs().size(); i++) {
        IOptimizableFuncExpr optFuncExpr = analysisCtx.getMatchedFuncExpr(i);
        boolean found = findMacthedExprFieldName(optFuncExpr, op, dataset, recType, datasetIndexes, context);
        if (found && optFuncExpr.getFieldName(0).equals(filterFieldName)) {
            optFuncExprs.add(optFuncExpr);
        }
    }
    if (optFuncExprs.isEmpty()) {
        return false;
    }
    changePlan(optFuncExprs, op, dataset, context);
    OperatorPropertiesUtil.typeOpRec(opRef, context);
    context.addToDontApplySet(this, op);
    return true;
}
Also used : AbstractLogicalOperator(org.apache.hyracks.algebricks.core.algebra.operators.logical.AbstractLogicalOperator) Dataset(org.apache.asterix.metadata.entities.Dataset) ArrayList(java.util.ArrayList) Index(org.apache.asterix.metadata.entities.Index) ILogicalExpression(org.apache.hyracks.algebricks.core.algebra.base.ILogicalExpression) MetadataProvider(org.apache.asterix.metadata.declared.MetadataProvider) ARecordType(org.apache.asterix.om.types.ARecordType) IAType(org.apache.asterix.om.types.IAType)

Example 22 with MetadataProvider

use of org.apache.asterix.metadata.declared.MetadataProvider in project asterixdb by apache.

the class FilePartition method get.

@Override
protected void get(IServletRequest request, IServletResponse response) {
    response.setStatus(HttpResponseStatus.OK);
    try {
        HttpUtil.setContentType(response, HttpUtil.ContentType.APPLICATION_JSON, HttpUtil.Encoding.UTF8);
    } catch (IOException e) {
        LOGGER.log(Level.WARNING, "Failure setting content type", e);
        response.setStatus(HttpResponseStatus.INTERNAL_SERVER_ERROR);
        response.writer().write(e.toString());
        return;
    }
    PrintWriter out = response.writer();
    try {
        ObjectMapper om = new ObjectMapper();
        ObjectNode jsonResponse = om.createObjectNode();
        String dataverseName = request.getParameter("dataverseName");
        String datasetName = request.getParameter("datasetName");
        if (dataverseName == null || datasetName == null) {
            jsonResponse.put("error", "Parameter dataverseName or datasetName is null,");
            out.write(jsonResponse.toString());
            return;
        }
        IHyracksClientConnection hcc = (IHyracksClientConnection) ctx.get(HYRACKS_CONNECTION_ATTR);
        // Metadata transaction begins.
        MetadataManager.INSTANCE.init();
        MetadataTransactionContext mdTxnCtx = MetadataManager.INSTANCE.beginTransaction();
        // Retrieves file splits of the dataset.
        MetadataProvider metadataProvider = new MetadataProvider(appCtx, null, new StorageComponentProvider());
        try {
            metadataProvider.setMetadataTxnContext(mdTxnCtx);
            Dataset dataset = metadataProvider.findDataset(dataverseName, datasetName);
            if (dataset == null) {
                jsonResponse.put("error", "Dataset " + datasetName + " does not exist in " + "dataverse " + dataverseName);
                out.write(jsonResponse.toString());
                out.flush();
                return;
            }
            boolean temp = dataset.getDatasetDetails().isTemp();
            FileSplit[] fileSplits = metadataProvider.splitsForIndex(mdTxnCtx, dataset, datasetName);
            ARecordType recordType = (ARecordType) metadataProvider.findType(dataset.getItemTypeDataverseName(), dataset.getItemTypeName());
            List<List<String>> primaryKeys = dataset.getPrimaryKeys();
            StringBuilder pkStrBuf = new StringBuilder();
            for (List<String> keys : primaryKeys) {
                for (String key : keys) {
                    pkStrBuf.append(key).append(",");
                }
            }
            pkStrBuf.delete(pkStrBuf.length() - 1, pkStrBuf.length());
            // Constructs the returned json object.
            formResponseObject(jsonResponse, fileSplits, recordType, pkStrBuf.toString(), temp, hcc.getNodeControllerInfos());
            // Flush the cached contents of the dataset to file system.
            FlushDatasetUtil.flushDataset(hcc, metadataProvider, dataverseName, datasetName, datasetName);
            // Metadata transaction commits.
            MetadataManager.INSTANCE.commitTransaction(mdTxnCtx);
            // Writes file splits.
            out.write(jsonResponse.toString());
        } finally {
            metadataProvider.getLocks().unlock();
        }
    } catch (Exception e) {
        LOGGER.log(Level.WARNING, "Failure handling a request", e);
        response.setStatus(HttpResponseStatus.INTERNAL_SERVER_ERROR);
        out.write(e.toString());
    } finally {
        out.flush();
    }
}
Also used : IHyracksClientConnection(org.apache.hyracks.api.client.IHyracksClientConnection) ObjectNode(com.fasterxml.jackson.databind.node.ObjectNode) Dataset(org.apache.asterix.metadata.entities.Dataset) MetadataTransactionContext(org.apache.asterix.metadata.MetadataTransactionContext) StorageComponentProvider(org.apache.asterix.file.StorageComponentProvider) IOException(java.io.IOException) FileSplit(org.apache.hyracks.api.io.FileSplit) IOException(java.io.IOException) MetadataProvider(org.apache.asterix.metadata.declared.MetadataProvider) List(java.util.List) ARecordType(org.apache.asterix.om.types.ARecordType) ObjectMapper(com.fasterxml.jackson.databind.ObjectMapper) PrintWriter(java.io.PrintWriter)

Example 23 with MetadataProvider

use of org.apache.asterix.metadata.declared.MetadataProvider in project asterixdb by apache.

the class RebalanceApiServlet method rebalanceDataset.

// Rebalances a given dataset.
private void rebalanceDataset(String dataverseName, String datasetName, String[] targetNodes) throws Exception {
    IHyracksClientConnection hcc = (IHyracksClientConnection) ctx.get(HYRACKS_CONNECTION_ATTR);
    MetadataProvider metadataProvider = new MetadataProvider(appCtx, null, new StorageComponentProvider());
    RebalanceUtil.rebalance(dataverseName, datasetName, new LinkedHashSet<>(Arrays.asList(targetNodes)), metadataProvider, hcc);
}
Also used : IHyracksClientConnection(org.apache.hyracks.api.client.IHyracksClientConnection) MetadataProvider(org.apache.asterix.metadata.declared.MetadataProvider) StorageComponentProvider(org.apache.asterix.file.StorageComponentProvider)

Example 24 with MetadataProvider

use of org.apache.asterix.metadata.declared.MetadataProvider in project asterixdb by apache.

the class QueryTranslator method handleDataverseDropStatement.

protected void handleDataverseDropStatement(MetadataProvider metadataProvider, Statement stmt, IHyracksClientConnection hcc) throws Exception {
    DataverseDropStatement stmtDelete = (DataverseDropStatement) stmt;
    String dataverseName = stmtDelete.getDataverseName().getValue();
    if (dataverseName.equals(MetadataBuiltinEntities.DEFAULT_DATAVERSE_NAME)) {
        throw new HyracksDataException(MetadataBuiltinEntities.DEFAULT_DATAVERSE_NAME + " dataverse can't be dropped");
    }
    ProgressState progress = ProgressState.NO_PROGRESS;
    MetadataTransactionContext mdTxnCtx = MetadataManager.INSTANCE.beginTransaction();
    boolean bActiveTxn = true;
    metadataProvider.setMetadataTxnContext(mdTxnCtx);
    List<JobSpecification> jobsToExecute = new ArrayList<>();
    MetadataLockManager.INSTANCE.acquireDataverseWriteLock(metadataProvider.getLocks(), dataverseName);
    try {
        Dataverse dv = MetadataManager.INSTANCE.getDataverse(mdTxnCtx, dataverseName);
        if (dv == null) {
            if (stmtDelete.getIfExists()) {
                MetadataManager.INSTANCE.commitTransaction(mdTxnCtx);
                return;
            } else {
                throw new AlgebricksException("There is no dataverse with this name " + dataverseName + ".");
            }
        }
        // # disconnect all feeds from any datasets in the dataverse.
        ActiveLifecycleListener activeListener = (ActiveLifecycleListener) appCtx.getActiveLifecycleListener();
        ActiveJobNotificationHandler activeEventHandler = activeListener.getNotificationHandler();
        IActiveEntityEventsListener[] activeListeners = activeEventHandler.getEventListeners();
        Identifier dvId = new Identifier(dataverseName);
        MetadataProvider tempMdProvider = new MetadataProvider(appCtx, metadataProvider.getDefaultDataverse(), metadataProvider.getStorageComponentProvider());
        tempMdProvider.setConfig(metadataProvider.getConfig());
        for (IActiveEntityEventsListener listener : activeListeners) {
            EntityId activeEntityId = listener.getEntityId();
            if (activeEntityId.getExtensionName().equals(Feed.EXTENSION_NAME) && activeEntityId.getDataverse().equals(dataverseName)) {
                tempMdProvider.getLocks().reset();
                stopFeedBeforeDelete(new Pair<>(dvId, new Identifier(activeEntityId.getEntityName())), tempMdProvider);
                // prepare job to remove feed log storage
                jobsToExecute.add(FeedOperations.buildRemoveFeedStorageJob(metadataProvider, MetadataManager.INSTANCE.getFeed(mdTxnCtx, dataverseName, activeEntityId.getEntityName())));
            }
        }
        // #. prepare jobs which will drop corresponding datasets with indexes.
        List<Dataset> datasets = MetadataManager.INSTANCE.getDataverseDatasets(mdTxnCtx, dataverseName);
        for (Dataset dataset : datasets) {
            String datasetName = dataset.getDatasetName();
            DatasetType dsType = dataset.getDatasetType();
            if (dsType == DatasetType.INTERNAL) {
                List<Index> indexes = MetadataManager.INSTANCE.getDatasetIndexes(mdTxnCtx, dataverseName, datasetName);
                for (Index index : indexes) {
                    jobsToExecute.add(IndexUtil.buildDropIndexJobSpec(index, metadataProvider, dataset));
                }
            } else {
                // External dataset
                List<Index> indexes = MetadataManager.INSTANCE.getDatasetIndexes(mdTxnCtx, dataverseName, datasetName);
                for (int k = 0; k < indexes.size(); k++) {
                    if (ExternalIndexingOperations.isFileIndex(indexes.get(k))) {
                        jobsToExecute.add(ExternalIndexingOperations.buildDropFilesIndexJobSpec(metadataProvider, dataset));
                    } else {
                        jobsToExecute.add(IndexUtil.buildDropIndexJobSpec(indexes.get(k), metadataProvider, dataset));
                    }
                }
                ExternalDatasetsRegistry.INSTANCE.removeDatasetInfo(dataset);
            }
        }
        jobsToExecute.add(DataverseUtil.dropDataverseJobSpec(dv, metadataProvider));
        // #. mark PendingDropOp on the dataverse record by
        // first, deleting the dataverse record from the DATAVERSE_DATASET
        // second, inserting the dataverse record with the PendingDropOp value into the
        // DATAVERSE_DATASET
        MetadataManager.INSTANCE.dropDataverse(mdTxnCtx, dataverseName);
        MetadataManager.INSTANCE.addDataverse(mdTxnCtx, new Dataverse(dataverseName, dv.getDataFormat(), MetadataUtil.PENDING_DROP_OP));
        MetadataManager.INSTANCE.commitTransaction(mdTxnCtx);
        bActiveTxn = false;
        progress = ProgressState.ADDED_PENDINGOP_RECORD_TO_METADATA;
        for (JobSpecification jobSpec : jobsToExecute) {
            JobUtils.runJob(hcc, jobSpec, true);
        }
        mdTxnCtx = MetadataManager.INSTANCE.beginTransaction();
        bActiveTxn = true;
        metadataProvider.setMetadataTxnContext(mdTxnCtx);
        // #. finally, delete the dataverse.
        MetadataManager.INSTANCE.dropDataverse(mdTxnCtx, dataverseName);
        // Drops all node groups that no longer needed
        for (Dataset dataset : datasets) {
            String nodeGroup = dataset.getNodeGroupName();
            MetadataLockManager.INSTANCE.acquireNodeGroupWriteLock(metadataProvider.getLocks(), nodeGroup);
            if (MetadataManager.INSTANCE.getNodegroup(mdTxnCtx, nodeGroup) != null) {
                MetadataManager.INSTANCE.dropNodegroup(mdTxnCtx, nodeGroup, true);
            }
        }
        if (activeDataverse != null && activeDataverse.getDataverseName() == dataverseName) {
            activeDataverse = null;
        }
        MetadataManager.INSTANCE.commitTransaction(mdTxnCtx);
    } catch (Exception e) {
        if (bActiveTxn) {
            abort(e, e, mdTxnCtx);
        }
        if (progress == ProgressState.ADDED_PENDINGOP_RECORD_TO_METADATA) {
            if (activeDataverse != null && activeDataverse.getDataverseName() == dataverseName) {
                activeDataverse = null;
            }
            // remove the all indexes in NC
            try {
                for (JobSpecification jobSpec : jobsToExecute) {
                    JobUtils.runJob(hcc, jobSpec, true);
                }
            } catch (Exception e2) {
                // do no throw exception since still the metadata needs to be compensated.
                e.addSuppressed(e2);
            }
            // remove the record from the metadata.
            mdTxnCtx = MetadataManager.INSTANCE.beginTransaction();
            try {
                MetadataManager.INSTANCE.dropDataverse(mdTxnCtx, dataverseName);
                MetadataManager.INSTANCE.commitTransaction(mdTxnCtx);
            } catch (Exception e2) {
                e.addSuppressed(e2);
                abort(e, e2, mdTxnCtx);
                throw new IllegalStateException("System is inconsistent state: pending dataverse(" + dataverseName + ") couldn't be removed from the metadata", e);
            }
        }
        throw e;
    } finally {
        metadataProvider.getLocks().unlock();
        ExternalDatasetsRegistry.INSTANCE.releaseAcquiredLocks(metadataProvider);
    }
}
Also used : ProgressState(org.apache.asterix.common.utils.JobUtils.ProgressState) ArrayList(java.util.ArrayList) MetadataTransactionContext(org.apache.asterix.metadata.MetadataTransactionContext) DatasetType(org.apache.asterix.common.config.DatasetConfig.DatasetType) Index(org.apache.asterix.metadata.entities.Index) DataverseDropStatement(org.apache.asterix.lang.common.statement.DataverseDropStatement) ActiveLifecycleListener(org.apache.asterix.active.ActiveLifecycleListener) Identifier(org.apache.asterix.lang.common.struct.Identifier) JobSpecification(org.apache.hyracks.api.job.JobSpecification) ActiveJobNotificationHandler(org.apache.asterix.active.ActiveJobNotificationHandler) IHyracksDataset(org.apache.hyracks.api.dataset.IHyracksDataset) IDataset(org.apache.asterix.common.metadata.IDataset) Dataset(org.apache.asterix.metadata.entities.Dataset) AlgebricksException(org.apache.hyracks.algebricks.common.exceptions.AlgebricksException) Dataverse(org.apache.asterix.metadata.entities.Dataverse) HyracksDataException(org.apache.hyracks.api.exceptions.HyracksDataException) DatasetNodegroupCardinalityHint(org.apache.asterix.metadata.dataset.hints.DatasetHints.DatasetNodegroupCardinalityHint) AlgebricksAbsolutePartitionConstraint(org.apache.hyracks.algebricks.common.constraints.AlgebricksAbsolutePartitionConstraint) ACIDException(org.apache.asterix.common.exceptions.ACIDException) MetadataException(org.apache.asterix.metadata.MetadataException) AlgebricksException(org.apache.hyracks.algebricks.common.exceptions.AlgebricksException) HyracksDataException(org.apache.hyracks.api.exceptions.HyracksDataException) CompilationException(org.apache.asterix.common.exceptions.CompilationException) IOException(java.io.IOException) RemoteException(java.rmi.RemoteException) AsterixException(org.apache.asterix.common.exceptions.AsterixException) IActiveEntityEventsListener(org.apache.asterix.active.IActiveEntityEventsListener) EntityId(org.apache.asterix.active.EntityId) MetadataProvider(org.apache.asterix.metadata.declared.MetadataProvider)

Example 25 with MetadataProvider

use of org.apache.asterix.metadata.declared.MetadataProvider in project asterixdb by apache.

the class QueryTranslator method compileAndExecute.

@Override
public void compileAndExecute(IHyracksClientConnection hcc, IHyracksDataset hdc, ResultDelivery resultDelivery, ResultMetadata outMetadata, Stats stats, String clientContextId, IStatementExecutorContext ctx) throws Exception {
    int resultSetIdCounter = 0;
    FileSplit outputFile = null;
    IAWriterFactory writerFactory = PrinterBasedWriterFactory.INSTANCE;
    IResultSerializerFactoryProvider resultSerializerFactoryProvider = ResultSerializerFactoryProvider.INSTANCE;
    Map<String, String> config = new HashMap<>();
    /* Since the system runs a large number of threads, when HTTP requests don't return, it becomes difficult to
         * find the thread running the request to determine where it has stopped.
         * Setting the thread name helps make that easier
         */
    String threadName = Thread.currentThread().getName();
    Thread.currentThread().setName(QueryTranslator.class.getSimpleName());
    try {
        for (Statement stmt : statements) {
            if (sessionConfig.is(SessionConfig.FORMAT_HTML)) {
                sessionOutput.out().println(ApiServlet.HTML_STATEMENT_SEPARATOR);
            }
            validateOperation(appCtx, activeDataverse, stmt);
            // Rewrite the statement's AST.
            rewriteStatement(stmt);
            MetadataProvider metadataProvider = new MetadataProvider(appCtx, activeDataverse, componentProvider);
            metadataProvider.setWriterFactory(writerFactory);
            metadataProvider.setResultSerializerFactoryProvider(resultSerializerFactoryProvider);
            metadataProvider.setOutputFile(outputFile);
            metadataProvider.setConfig(config);
            switch(stmt.getKind()) {
                case Statement.Kind.SET:
                    handleSetStatement(stmt, config);
                    break;
                case Statement.Kind.DATAVERSE_DECL:
                    activeDataverse = handleUseDataverseStatement(metadataProvider, stmt);
                    break;
                case Statement.Kind.CREATE_DATAVERSE:
                    handleCreateDataverseStatement(metadataProvider, stmt);
                    break;
                case Statement.Kind.DATASET_DECL:
                    handleCreateDatasetStatement(metadataProvider, stmt, hcc);
                    break;
                case Statement.Kind.CREATE_INDEX:
                    handleCreateIndexStatement(metadataProvider, stmt, hcc);
                    break;
                case Statement.Kind.TYPE_DECL:
                    handleCreateTypeStatement(metadataProvider, stmt);
                    break;
                case Statement.Kind.NODEGROUP_DECL:
                    handleCreateNodeGroupStatement(metadataProvider, stmt);
                    break;
                case Statement.Kind.DATAVERSE_DROP:
                    handleDataverseDropStatement(metadataProvider, stmt, hcc);
                    break;
                case Statement.Kind.DATASET_DROP:
                    handleDatasetDropStatement(metadataProvider, stmt, hcc);
                    break;
                case Statement.Kind.INDEX_DROP:
                    handleIndexDropStatement(metadataProvider, stmt, hcc);
                    break;
                case Statement.Kind.TYPE_DROP:
                    handleTypeDropStatement(metadataProvider, stmt);
                    break;
                case Statement.Kind.NODEGROUP_DROP:
                    handleNodegroupDropStatement(metadataProvider, stmt);
                    break;
                case Statement.Kind.CREATE_FUNCTION:
                    handleCreateFunctionStatement(metadataProvider, stmt);
                    break;
                case Statement.Kind.FUNCTION_DROP:
                    handleFunctionDropStatement(metadataProvider, stmt);
                    break;
                case Statement.Kind.LOAD:
                    handleLoadStatement(metadataProvider, stmt, hcc);
                    break;
                case Statement.Kind.INSERT:
                case Statement.Kind.UPSERT:
                    if (((InsertStatement) stmt).getReturnExpression() != null) {
                        metadataProvider.setResultSetId(new ResultSetId(resultSetIdCounter++));
                        metadataProvider.setResultAsyncMode(resultDelivery == ResultDelivery.ASYNC || resultDelivery == ResultDelivery.DEFERRED);
                    }
                    handleInsertUpsertStatement(metadataProvider, stmt, hcc, hdc, resultDelivery, outMetadata, stats, false, clientContextId, ctx);
                    break;
                case Statement.Kind.DELETE:
                    handleDeleteStatement(metadataProvider, stmt, hcc, false);
                    break;
                case Statement.Kind.CREATE_FEED:
                    handleCreateFeedStatement(metadataProvider, stmt);
                    break;
                case Statement.Kind.DROP_FEED:
                    handleDropFeedStatement(metadataProvider, stmt, hcc);
                    break;
                case Statement.Kind.DROP_FEED_POLICY:
                    handleDropFeedPolicyStatement(metadataProvider, stmt);
                    break;
                case Statement.Kind.CONNECT_FEED:
                    handleConnectFeedStatement(metadataProvider, stmt);
                    break;
                case Statement.Kind.DISCONNECT_FEED:
                    handleDisconnectFeedStatement(metadataProvider, stmt);
                    break;
                case Statement.Kind.START_FEED:
                    handleStartFeedStatement(metadataProvider, stmt, hcc);
                    break;
                case Statement.Kind.STOP_FEED:
                    handleStopFeedStatement(metadataProvider, stmt);
                    break;
                case Statement.Kind.CREATE_FEED_POLICY:
                    handleCreateFeedPolicyStatement(metadataProvider, stmt);
                    break;
                case Statement.Kind.QUERY:
                    metadataProvider.setResultSetId(new ResultSetId(resultSetIdCounter++));
                    metadataProvider.setResultAsyncMode(resultDelivery == ResultDelivery.ASYNC || resultDelivery == ResultDelivery.DEFERRED);
                    handleQuery(metadataProvider, (Query) stmt, hcc, hdc, resultDelivery, outMetadata, stats, clientContextId, ctx);
                    break;
                case Statement.Kind.COMPACT:
                    handleCompactStatement(metadataProvider, stmt, hcc);
                    break;
                case Statement.Kind.EXTERNAL_DATASET_REFRESH:
                    handleExternalDatasetRefreshStatement(metadataProvider, stmt, hcc);
                    break;
                case Statement.Kind.WRITE:
                    Pair<IAWriterFactory, FileSplit> result = handleWriteStatement(stmt);
                    writerFactory = (result.first != null) ? result.first : writerFactory;
                    outputFile = result.second;
                    break;
                case Statement.Kind.RUN:
                    handleRunStatement(metadataProvider, stmt, hcc);
                    break;
                case Statement.Kind.FUNCTION_DECL:
                    // No op
                    break;
                case Statement.Kind.EXTENSION:
                    ((IExtensionStatement) stmt).handle(this, metadataProvider, hcc, hdc, resultDelivery, stats, resultSetIdCounter);
                    break;
                default:
                    throw new CompilationException("Unknown function");
            }
        }
    } finally {
        Thread.currentThread().setName(threadName);
    }
}
Also used : IExtensionStatement(org.apache.asterix.algebra.extension.IExtensionStatement) CompilationException(org.apache.asterix.common.exceptions.CompilationException) HashMap(java.util.HashMap) IResultSerializerFactoryProvider(org.apache.hyracks.algebricks.data.IResultSerializerFactoryProvider) StopFeedStatement(org.apache.asterix.lang.common.statement.StopFeedStatement) FunctionDropStatement(org.apache.asterix.lang.common.statement.FunctionDropStatement) LoadStatement(org.apache.asterix.lang.common.statement.LoadStatement) CompiledInsertStatement(org.apache.asterix.translator.CompiledStatements.CompiledInsertStatement) CreateDataverseStatement(org.apache.asterix.lang.common.statement.CreateDataverseStatement) InsertStatement(org.apache.asterix.lang.common.statement.InsertStatement) CompiledLoadFromFileStatement(org.apache.asterix.translator.CompiledStatements.CompiledLoadFromFileStatement) CreateFeedPolicyStatement(org.apache.asterix.lang.common.statement.CreateFeedPolicyStatement) CreateIndexStatement(org.apache.asterix.lang.common.statement.CreateIndexStatement) RunStatement(org.apache.asterix.lang.common.statement.RunStatement) IExtensionStatement(org.apache.asterix.algebra.extension.IExtensionStatement) FeedPolicyDropStatement(org.apache.asterix.lang.common.statement.FeedPolicyDropStatement) Statement(org.apache.asterix.lang.common.base.Statement) DisconnectFeedStatement(org.apache.asterix.lang.common.statement.DisconnectFeedStatement) CompiledDeleteStatement(org.apache.asterix.translator.CompiledStatements.CompiledDeleteStatement) CreateFeedStatement(org.apache.asterix.lang.common.statement.CreateFeedStatement) DeleteStatement(org.apache.asterix.lang.common.statement.DeleteStatement) DataverseDropStatement(org.apache.asterix.lang.common.statement.DataverseDropStatement) TypeDropStatement(org.apache.asterix.lang.common.statement.TypeDropStatement) CompactStatement(org.apache.asterix.lang.common.statement.CompactStatement) StartFeedStatement(org.apache.asterix.lang.common.statement.StartFeedStatement) NodeGroupDropStatement(org.apache.asterix.lang.common.statement.NodeGroupDropStatement) RefreshExternalDatasetStatement(org.apache.asterix.lang.common.statement.RefreshExternalDatasetStatement) SetStatement(org.apache.asterix.lang.common.statement.SetStatement) CompiledUpsertStatement(org.apache.asterix.translator.CompiledStatements.CompiledUpsertStatement) ConnectFeedStatement(org.apache.asterix.lang.common.statement.ConnectFeedStatement) ICompiledDmlStatement(org.apache.asterix.translator.CompiledStatements.ICompiledDmlStatement) IndexDropStatement(org.apache.asterix.lang.common.statement.IndexDropStatement) CreateFunctionStatement(org.apache.asterix.lang.common.statement.CreateFunctionStatement) WriteStatement(org.apache.asterix.lang.common.statement.WriteStatement) IReturningStatement(org.apache.asterix.lang.common.base.IReturningStatement) DropDatasetStatement(org.apache.asterix.lang.common.statement.DropDatasetStatement) FeedDropStatement(org.apache.asterix.lang.common.statement.FeedDropStatement) FileSplit(org.apache.hyracks.api.io.FileSplit) UnmanagedFileSplit(org.apache.hyracks.api.io.UnmanagedFileSplit) DatasetNodegroupCardinalityHint(org.apache.asterix.metadata.dataset.hints.DatasetHints.DatasetNodegroupCardinalityHint) AlgebricksAbsolutePartitionConstraint(org.apache.hyracks.algebricks.common.constraints.AlgebricksAbsolutePartitionConstraint) MetadataProvider(org.apache.asterix.metadata.declared.MetadataProvider) ResultSetId(org.apache.hyracks.api.dataset.ResultSetId) IAWriterFactory(org.apache.hyracks.algebricks.data.IAWriterFactory)

Aggregations

MetadataProvider (org.apache.asterix.metadata.declared.MetadataProvider)30 Dataset (org.apache.asterix.metadata.entities.Dataset)16 ILogicalExpression (org.apache.hyracks.algebricks.core.algebra.base.ILogicalExpression)16 AbstractFunctionCallExpression (org.apache.hyracks.algebricks.core.algebra.expressions.AbstractFunctionCallExpression)13 ArrayList (java.util.ArrayList)10 AlgebricksException (org.apache.hyracks.algebricks.common.exceptions.AlgebricksException)10 ILogicalOperator (org.apache.hyracks.algebricks.core.algebra.base.ILogicalOperator)10 LogicalVariable (org.apache.hyracks.algebricks.core.algebra.base.LogicalVariable)10 AbstractLogicalOperator (org.apache.hyracks.algebricks.core.algebra.operators.logical.AbstractLogicalOperator)10 List (java.util.List)9 IAType (org.apache.asterix.om.types.IAType)9 ARecordType (org.apache.asterix.om.types.ARecordType)8 Index (org.apache.asterix.metadata.entities.Index)7 Mutable (org.apache.commons.lang3.mutable.Mutable)7 VariableReferenceExpression (org.apache.hyracks.algebricks.core.algebra.expressions.VariableReferenceExpression)7 FunctionIdentifier (org.apache.hyracks.algebricks.core.algebra.functions.FunctionIdentifier)7 AString (org.apache.asterix.om.base.AString)5 AsterixConstantValue (org.apache.asterix.om.constants.AsterixConstantValue)5 AlgebricksPartitionConstraint (org.apache.hyracks.algebricks.common.constraints.AlgebricksPartitionConstraint)4 IVariableTypeEnvironment (org.apache.hyracks.algebricks.core.algebra.expressions.IVariableTypeEnvironment)4