Search in sources :

Example 76 with WriteEntity

use of org.apache.hadoop.hive.ql.hooks.WriteEntity in project hive by apache.

the class DDLSemanticAnalyzer method addTablePartsOutputs.

/**
 * Add the table partitions to be modified in the output, so that it is available for the
 * pre-execution hook. If the partition does not exist, throw an error if
 * throwIfNonExistent is true, otherwise ignore it.
 */
private void addTablePartsOutputs(Table table, List<Map<String, String>> partSpecs, boolean throwIfNonExistent, boolean allowMany, ASTNode ast, WriteEntity.WriteType writeType) throws SemanticException {
    Iterator<Map<String, String>> i;
    int index;
    for (i = partSpecs.iterator(), index = 1; i.hasNext(); ++index) {
        Map<String, String> partSpec = i.next();
        List<Partition> parts = null;
        if (allowMany) {
            try {
                parts = db.getPartitions(table, partSpec);
            } catch (HiveException e) {
                LOG.error("Got HiveException during obtaining list of partitions" + StringUtils.stringifyException(e));
                throw new SemanticException(e.getMessage(), e);
            }
        } else {
            parts = new ArrayList<Partition>();
            try {
                Partition p = db.getPartition(table, partSpec, false);
                if (p != null) {
                    parts.add(p);
                }
            } catch (HiveException e) {
                LOG.debug("Wrong specification" + StringUtils.stringifyException(e));
                throw new SemanticException(e.getMessage(), e);
            }
        }
        if (parts.isEmpty()) {
            if (throwIfNonExistent) {
                throw new SemanticException(ErrorMsg.INVALID_PARTITION.getMsg(ast.getChild(index)));
            }
        }
        for (Partition p : parts) {
            // Don't request any locks here, as the table has already been locked.
            outputs.add(new WriteEntity(p, writeType));
        }
    }
}
Also used : Partition(org.apache.hadoop.hive.ql.metadata.Partition) AlterTableExchangePartition(org.apache.hadoop.hive.ql.plan.AlterTableExchangePartition) HiveException(org.apache.hadoop.hive.ql.metadata.HiveException) Map(java.util.Map) LinkedHashMap(java.util.LinkedHashMap) HashMap(java.util.HashMap) WriteEntity(org.apache.hadoop.hive.ql.hooks.WriteEntity) SQLUniqueConstraint(org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint) NotNullConstraint(org.apache.hadoop.hive.ql.metadata.NotNullConstraint) DefaultConstraint(org.apache.hadoop.hive.ql.metadata.DefaultConstraint) SQLCheckConstraint(org.apache.hadoop.hive.metastore.api.SQLCheckConstraint) SQLNotNullConstraint(org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint) SQLDefaultConstraint(org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint)

Example 77 with WriteEntity

use of org.apache.hadoop.hive.ql.hooks.WriteEntity in project hive by apache.

the class DDLSemanticAnalyzer method addTableDropPartsOutputs.

/**
 * Add the table partitions to be modified in the output, so that it is available for the
 * pre-execution hook. If the partition does not exist, throw an error if
 * throwIfNonExistent is true, otherwise ignore it.
 */
private void addTableDropPartsOutputs(Table tab, Collection<List<ExprNodeGenericFuncDesc>> partSpecs, boolean throwIfNonExistent) throws SemanticException {
    for (List<ExprNodeGenericFuncDesc> specs : partSpecs) {
        for (ExprNodeGenericFuncDesc partSpec : specs) {
            List<Partition> parts = new ArrayList<Partition>();
            boolean hasUnknown = false;
            try {
                hasUnknown = db.getPartitionsByExpr(tab, partSpec, conf, parts);
            } catch (Exception e) {
                throw new SemanticException(ErrorMsg.INVALID_PARTITION.getMsg(partSpec.getExprString()), e);
            }
            if (hasUnknown) {
                throw new SemanticException("Unexpected unknown partitions for " + partSpec.getExprString());
            }
            // earlier... If we get rid of output, we can get rid of this.
            if (parts.isEmpty()) {
                if (throwIfNonExistent) {
                    throw new SemanticException(ErrorMsg.INVALID_PARTITION.getMsg(partSpec.getExprString()));
                }
            }
            for (Partition p : parts) {
                outputs.add(new WriteEntity(p, WriteEntity.WriteType.DDL_EXCLUSIVE));
            }
        }
    }
}
Also used : Partition(org.apache.hadoop.hive.ql.metadata.Partition) AlterTableExchangePartition(org.apache.hadoop.hive.ql.plan.AlterTableExchangePartition) ArrayList(java.util.ArrayList) ExprNodeGenericFuncDesc(org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc) WriteEntity(org.apache.hadoop.hive.ql.hooks.WriteEntity) LockException(org.apache.hadoop.hive.ql.lockmgr.LockException) InvocationTargetException(java.lang.reflect.InvocationTargetException) MetaException(org.apache.hadoop.hive.metastore.api.MetaException) URISyntaxException(java.net.URISyntaxException) FileNotFoundException(java.io.FileNotFoundException) HiveException(org.apache.hadoop.hive.ql.metadata.HiveException) InvalidTableException(org.apache.hadoop.hive.ql.metadata.InvalidTableException)

Example 78 with WriteEntity

use of org.apache.hadoop.hive.ql.hooks.WriteEntity in project hive by apache.

the class DDLSemanticAnalyzer method analyzeAlterTableTouch.

/**
 * Rewrite the metadata for one or more partitions in a table. Useful when
 * an external process modifies files on HDFS and you want the pre/post
 * hooks to be fired for the specified partition.
 *
 * @param ast
 *          The parsed command tree.
 * @throws SemanticException
 *           Parsing failed
 */
private void analyzeAlterTableTouch(String[] qualified, CommonTree ast) throws SemanticException {
    Table tab = getTable(qualified);
    validateAlterTableType(tab, AlterTableTypes.TOUCH);
    inputs.add(new ReadEntity(tab));
    // partition name to value
    List<Map<String, String>> partSpecs = getPartitionSpecs(tab, ast);
    if (partSpecs.size() == 0) {
        AlterTableSimpleDesc touchDesc = new AlterTableSimpleDesc(getDotName(qualified), null, AlterTableDesc.AlterTableTypes.TOUCH);
        outputs.add(new WriteEntity(tab, WriteEntity.WriteType.DDL_NO_LOCK));
        rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), touchDesc)));
    } else {
        addTablePartsOutputs(tab, partSpecs, WriteEntity.WriteType.DDL_NO_LOCK);
        for (Map<String, String> partSpec : partSpecs) {
            AlterTableSimpleDesc touchDesc = new AlterTableSimpleDesc(getDotName(qualified), partSpec, AlterTableDesc.AlterTableTypes.TOUCH);
            rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), touchDesc)));
        }
    }
}
Also used : ReadEntity(org.apache.hadoop.hive.ql.hooks.ReadEntity) Table(org.apache.hadoop.hive.ql.metadata.Table) AlterTableSimpleDesc(org.apache.hadoop.hive.ql.plan.AlterTableSimpleDesc) DDLWork(org.apache.hadoop.hive.ql.plan.DDLWork) Map(java.util.Map) LinkedHashMap(java.util.LinkedHashMap) HashMap(java.util.HashMap) WriteEntity(org.apache.hadoop.hive.ql.hooks.WriteEntity)

Example 79 with WriteEntity

use of org.apache.hadoop.hive.ql.hooks.WriteEntity in project hive by apache.

the class DDLSemanticAnalyzer method analyzeAlterTableAddParts.

/**
 * Add one or more partitions to a table. Useful when the data has been copied
 * to the right location by some other process.
 *
 * @param ast
 *          The parsed command tree.
 *
 * @param expectView
 *          True for ALTER VIEW, false for ALTER TABLE.
 *
 * @throws SemanticException
 *           Parsing failed
 */
private void analyzeAlterTableAddParts(String[] qualified, CommonTree ast, boolean expectView) throws SemanticException {
    // ^(TOK_ALTERTABLE_ADDPARTS identifier ifNotExists? alterStatementSuffixAddPartitionsElement+)
    boolean ifNotExists = ast.getChild(0).getType() == HiveParser.TOK_IFNOTEXISTS;
    Table tab = getTable(qualified);
    boolean isView = tab.isView();
    validateAlterTableType(tab, AlterTableTypes.ADDPARTITION, expectView);
    outputs.add(new WriteEntity(tab, WriteEntity.WriteType.DDL_SHARED));
    int numCh = ast.getChildCount();
    int start = ifNotExists ? 1 : 0;
    String currentLocation = null;
    Map<String, String> currentPart = null;
    // Parser has done some verification, so the order of tokens doesn't need to be verified here.
    AddPartitionDesc addPartitionDesc = new AddPartitionDesc(tab.getDbName(), tab.getTableName(), ifNotExists);
    for (int num = start; num < numCh; num++) {
        ASTNode child = (ASTNode) ast.getChild(num);
        switch(child.getToken().getType()) {
            case HiveParser.TOK_PARTSPEC:
                if (currentPart != null) {
                    addPartitionDesc.addPartition(currentPart, currentLocation);
                    currentLocation = null;
                }
                currentPart = getValidatedPartSpec(tab, child, conf, true);
                // validate reserved values
                validatePartitionValues(currentPart);
                break;
            case HiveParser.TOK_PARTITIONLOCATION:
                // if location specified, set in partition
                if (isView) {
                    throw new SemanticException("LOCATION clause illegal for view partition");
                }
                currentLocation = unescapeSQLString(child.getChild(0).getText());
                inputs.add(toReadEntity(currentLocation));
                break;
            default:
                throw new SemanticException("Unknown child: " + child);
        }
    }
    // add the last one
    if (currentPart != null) {
        addPartitionDesc.addPartition(currentPart, currentLocation);
    }
    if (this.conf.getBoolVar(HiveConf.ConfVars.HIVESTATSAUTOGATHER)) {
        for (int index = 0; index < addPartitionDesc.getPartitionCount(); index++) {
            OnePartitionDesc desc = addPartitionDesc.getPartition(index);
            if (desc.getLocation() == null) {
                if (desc.getPartParams() == null) {
                    desc.setPartParams(new HashMap<String, String>());
                }
                StatsSetupConst.setStatsStateForCreateTable(desc.getPartParams(), MetaStoreUtils.getColumnNames(tab.getCols()), StatsSetupConst.TRUE);
            }
        }
    }
    if (addPartitionDesc.getPartitionCount() == 0) {
        // nothing to do
        return;
    }
    rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), addPartitionDesc)));
    if (isView) {
        // Compile internal query to capture underlying table partition dependencies
        StringBuilder cmd = new StringBuilder();
        cmd.append("SELECT * FROM ");
        cmd.append(HiveUtils.unparseIdentifier(getDotName(qualified)));
        cmd.append(" WHERE ");
        boolean firstOr = true;
        for (int i = 0; i < addPartitionDesc.getPartitionCount(); ++i) {
            AddPartitionDesc.OnePartitionDesc partitionDesc = addPartitionDesc.getPartition(i);
            if (firstOr) {
                firstOr = false;
            } else {
                cmd.append(" OR ");
            }
            boolean firstAnd = true;
            cmd.append("(");
            for (Map.Entry<String, String> entry : partitionDesc.getPartSpec().entrySet()) {
                if (firstAnd) {
                    firstAnd = false;
                } else {
                    cmd.append(" AND ");
                }
                cmd.append(HiveUtils.unparseIdentifier(entry.getKey()));
                cmd.append(" = '");
                cmd.append(HiveUtils.escapeString(entry.getValue()));
                cmd.append("'");
            }
            cmd.append(")");
        }
        SessionState ss = SessionState.get();
        String uName = (ss == null ? null : ss.getUserName());
        Driver driver = new Driver(conf, uName, queryState.getLineageState());
        int rc = driver.compile(cmd.toString(), false);
        if (rc != 0) {
            throw new SemanticException(ErrorMsg.NO_VALID_PARTN.getMsg());
        }
        inputs.addAll(driver.getPlan().getInputs());
    }
}
Also used : SessionState(org.apache.hadoop.hive.ql.session.SessionState) Table(org.apache.hadoop.hive.ql.metadata.Table) Driver(org.apache.hadoop.hive.ql.Driver) SQLUniqueConstraint(org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint) NotNullConstraint(org.apache.hadoop.hive.ql.metadata.NotNullConstraint) DefaultConstraint(org.apache.hadoop.hive.ql.metadata.DefaultConstraint) SQLCheckConstraint(org.apache.hadoop.hive.metastore.api.SQLCheckConstraint) SQLNotNullConstraint(org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint) SQLDefaultConstraint(org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint) DDLWork(org.apache.hadoop.hive.ql.plan.DDLWork) OnePartitionDesc(org.apache.hadoop.hive.ql.plan.AddPartitionDesc.OnePartitionDesc) AddPartitionDesc(org.apache.hadoop.hive.ql.plan.AddPartitionDesc) OnePartitionDesc(org.apache.hadoop.hive.ql.plan.AddPartitionDesc.OnePartitionDesc) WriteEntity(org.apache.hadoop.hive.ql.hooks.WriteEntity) Map(java.util.Map) LinkedHashMap(java.util.LinkedHashMap) HashMap(java.util.HashMap)

Example 80 with WriteEntity

use of org.apache.hadoop.hive.ql.hooks.WriteEntity in project hive by apache.

the class FunctionSemanticAnalyzer method addEntities.

/**
 * Add write entities to the semantic analyzer to restrict function creation to privileged users.
 */
private void addEntities(String functionName, String className, boolean isTemporaryFunction, List<ResourceUri> resources) throws SemanticException {
    // If the function is being added under a database 'namespace', then add an entity representing
    // the database (only applicable to permanent/metastore functions).
    // We also add a second entity representing the function name.
    // The authorization api implementation can decide which entities it wants to use to
    // authorize the create/drop function call.
    // Add the relevant database 'namespace' as a WriteEntity
    Database database = null;
    // it matters only for permanent functions
    if (!isTemporaryFunction) {
        try {
            String[] qualifiedNameParts = FunctionUtils.getQualifiedFunctionNameParts(functionName);
            String dbName = qualifiedNameParts[0];
            functionName = qualifiedNameParts[1];
            database = getDatabase(dbName);
        } catch (HiveException e) {
            LOG.error("Failed to get database ", e);
            throw new SemanticException(e);
        }
    }
    if (database != null) {
        outputs.add(new WriteEntity(database, WriteEntity.WriteType.DDL_NO_LOCK));
    }
    // Add the function name as a WriteEntity
    outputs.add(new WriteEntity(database, functionName, className, Type.FUNCTION, WriteEntity.WriteType.DDL_NO_LOCK));
    if (resources != null) {
        for (ResourceUri resource : resources) {
            String uriPath = resource.getUri();
            outputs.add(toWriteEntity(uriPath));
        }
    }
}
Also used : ResourceUri(org.apache.hadoop.hive.metastore.api.ResourceUri) HiveException(org.apache.hadoop.hive.ql.metadata.HiveException) Database(org.apache.hadoop.hive.metastore.api.Database) WriteEntity(org.apache.hadoop.hive.ql.hooks.WriteEntity)

Aggregations

WriteEntity (org.apache.hadoop.hive.ql.hooks.WriteEntity)88 Table (org.apache.hadoop.hive.ql.metadata.Table)39 ReadEntity (org.apache.hadoop.hive.ql.hooks.ReadEntity)35 HiveException (org.apache.hadoop.hive.ql.metadata.HiveException)24 Partition (org.apache.hadoop.hive.ql.metadata.Partition)24 ArrayList (java.util.ArrayList)18 DDLWork (org.apache.hadoop.hive.ql.plan.DDLWork)14 Path (org.apache.hadoop.fs.Path)13 AlterTableExchangePartition (org.apache.hadoop.hive.ql.plan.AlterTableExchangePartition)13 Referenceable (org.apache.atlas.typesystem.Referenceable)11 Database (org.apache.hadoop.hive.metastore.api.Database)11 Test (org.junit.Test)11 QueryPlan (org.apache.hadoop.hive.ql.QueryPlan)10 HashMap (java.util.HashMap)9 LinkedHashMap (java.util.LinkedHashMap)9 Test (org.testng.annotations.Test)9 SQLCheckConstraint (org.apache.hadoop.hive.metastore.api.SQLCheckConstraint)8 SQLDefaultConstraint (org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint)8 SQLNotNullConstraint (org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint)8 SQLUniqueConstraint (org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint)8