Search in sources :

Example 11 with TableName

use of org.apache.hadoop.hive.common.TableName in project hive by apache.

the class AlterViewAsAnalyzer method analyzeInternal.

@Override
public void analyzeInternal(ASTNode root) throws SemanticException {
    TableName viewName = getQualifiedTableName((ASTNode) root.getChild(0));
    String fqViewName = viewName.getNotEmptyDbTable();
    LOG.info("Altering the query of view " + fqViewName + " position=" + root.getCharPositionInLine());
    ASTNode select = (ASTNode) root.getChild(1).getChild(0);
    String originalText = ctx.getTokenRewriteStream().toString(select.getTokenStartIndex(), select.getTokenStopIndex());
    SemanticAnalyzer analyzer = analyzeQuery(select, fqViewName);
    schema = new ArrayList<FieldSchema>(analyzer.getResultSchema());
    ParseUtils.validateColumnNameUniqueness(analyzer.getOriginalResultSchema() == null ? schema : analyzer.getOriginalResultSchema());
    String expandedText = ctx.getTokenRewriteStream().toString(select.getTokenStartIndex(), select.getTokenStopIndex());
    AlterViewAsDesc desc = new AlterViewAsDesc(fqViewName, schema, originalText, expandedText);
    validateCreateView(desc, analyzer);
    rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc)));
    DDLUtils.addDbAndTableToOutputs(getDatabase(viewName.getDb()), viewName, TableType.VIRTUAL_VIEW, false, null, outputs);
}
Also used : TableName(org.apache.hadoop.hive.common.TableName) DDLWork(org.apache.hadoop.hive.ql.ddl.DDLWork) FieldSchema(org.apache.hadoop.hive.metastore.api.FieldSchema) ASTNode(org.apache.hadoop.hive.ql.parse.ASTNode) SemanticAnalyzer(org.apache.hadoop.hive.ql.parse.SemanticAnalyzer)

Example 12 with TableName

use of org.apache.hadoop.hive.common.TableName in project hive by apache.

the class CreateViewAnalyzer method analyzeInternal.

@Override
public void analyzeInternal(ASTNode root) throws SemanticException {
    TableName viewName = getQualifiedTableName((ASTNode) root.getChild(0));
    String fqViewName = viewName.getNotEmptyDbTable();
    LOG.info("Creating view " + fqViewName + " position=" + root.getCharPositionInLine());
    Map<Integer, ASTNode> children = new HashMap<>();
    for (int i = 1; i < root.getChildCount(); i++) {
        ASTNode child = (ASTNode) root.getChild(i);
        children.put(child.getToken().getType(), child);
    }
    List<FieldSchema> imposedSchema = children.containsKey(HiveParser.TOK_TABCOLNAME) ? getColumns((ASTNode) children.remove(HiveParser.TOK_TABCOLNAME)) : null;
    boolean ifNotExists = children.remove(HiveParser.TOK_IFNOTEXISTS) != null;
    boolean orReplace = children.remove(HiveParser.TOK_ORREPLACE) != null;
    String comment = children.containsKey(HiveParser.TOK_TABLECOMMENT) ? unescapeSQLString(children.remove(HiveParser.TOK_TABLECOMMENT).getChild(0).getText()) : null;
    ASTNode select = children.remove(HiveParser.TOK_QUERY);
    Map<String, String> properties = children.containsKey(HiveParser.TOK_TABLEPROPERTIES) ? getProps((ASTNode) children.remove(HiveParser.TOK_TABLEPROPERTIES).getChild(0)) : null;
    List<String> partitionColumnNames = children.containsKey(HiveParser.TOK_VIEWPARTCOLS) ? getColumnNames((ASTNode) children.remove(HiveParser.TOK_VIEWPARTCOLS).getChild(0)) : null;
    assert children.isEmpty();
    if (ifNotExists && orReplace) {
        throw new SemanticException("Can't combine IF NOT EXISTS and OR REPLACE.");
    }
    String originalText = ctx.getTokenRewriteStream().toString(select.getTokenStartIndex(), select.getTokenStopIndex());
    SemanticAnalyzer analyzer = analyzeQuery(select, fqViewName);
    schema = new ArrayList<FieldSchema>(analyzer.getResultSchema());
    ParseUtils.validateColumnNameUniqueness(analyzer.getOriginalResultSchema() == null ? schema : analyzer.getOriginalResultSchema());
    String expandedText = getExpandedText(imposedSchema, select, viewName);
    List<FieldSchema> partitionColumns = getPartitionColumns(imposedSchema, select, viewName, partitionColumnNames);
    CreateViewDesc desc = new CreateViewDesc(fqViewName, schema, comment, properties, partitionColumnNames, ifNotExists, orReplace, originalText, expandedText, partitionColumns);
    validateCreateView(desc, analyzer);
    rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc)));
    DDLUtils.addDbAndTableToOutputs(getDatabase(viewName.getDb()), viewName, TableType.VIRTUAL_VIEW, false, properties, outputs);
}
Also used : HashMap(java.util.HashMap) FieldSchema(org.apache.hadoop.hive.metastore.api.FieldSchema) SemanticAnalyzer(org.apache.hadoop.hive.ql.parse.SemanticAnalyzer) TableName(org.apache.hadoop.hive.common.TableName) DDLWork(org.apache.hadoop.hive.ql.ddl.DDLWork) ASTNode(org.apache.hadoop.hive.ql.parse.ASTNode) SemanticException(org.apache.hadoop.hive.ql.parse.SemanticException)

Example 13 with TableName

use of org.apache.hadoop.hive.common.TableName in project hive by apache.

the class ReplUtils method getTableCheckpointTask.

public static Task<?> getTableCheckpointTask(ImportTableDesc tableDesc, HashMap<String, String> partSpec, String dumpRoot, ReplicationMetricCollector metricCollector, HiveConf conf) throws SemanticException {
    HashMap<String, String> mapProp = new HashMap<>();
    mapProp.put(REPL_CHECKPOINT_KEY, dumpRoot);
    final TableName tName = TableName.fromString(tableDesc.getTableName(), null, tableDesc.getDatabaseName());
    AlterTableSetPropertiesDesc alterTblDesc = new AlterTableSetPropertiesDesc(tName, partSpec, null, false, mapProp, false, false, null);
    return TaskFactory.get(new DDLWork(new HashSet<>(), new HashSet<>(), alterTblDesc, true, (new Path(dumpRoot)).getParent().toString(), metricCollector), conf);
}
Also used : Path(org.apache.hadoop.fs.Path) TableName(org.apache.hadoop.hive.common.TableName) DDLWork(org.apache.hadoop.hive.ql.ddl.DDLWork) HashMap(java.util.HashMap) AlterTableSetPropertiesDesc(org.apache.hadoop.hive.ql.ddl.table.misc.properties.AlterTableSetPropertiesDesc) HashSet(java.util.HashSet)

Example 14 with TableName

use of org.apache.hadoop.hive.common.TableName in project hive by apache.

the class AcidExportSemanticAnalyzer method analyzeAcidExport.

/**
 * See {@link #isAcidExport(ASTNode)}
 * 1. create the temp table T
 * 2. compile 'insert into T select * from acidTable'
 * 3. compile 'export acidTable'  (acidTable will be replaced with T during execution)
 * 4. create task to drop T
 *
 * Using a true temp (session level) table means it should not affect replication and the table
 * is not visible outside the Session that created for security
 */
private void analyzeAcidExport(ASTNode ast) throws SemanticException {
    assert ast != null && ast.getToken() != null && ast.getToken().getType() == HiveParser.TOK_EXPORT;
    ASTNode tableTree = (ASTNode) ast.getChild(0);
    assert tableTree != null && tableTree.getType() == HiveParser.TOK_TAB;
    ASTNode tokRefOrNameExportTable = (ASTNode) tableTree.getChild(0);
    Table exportTable = getTargetTable(tokRefOrNameExportTable);
    if (exportTable != null && (exportTable.isView() || exportTable.isMaterializedView())) {
        throw new SemanticException("Views and Materialized Views can not be exported.");
    }
    assert AcidUtils.isFullAcidTable(exportTable);
    // need to create the table "manually" rather than creating a task since it has to exist to
    // compile the insert into T...
    // this is db.table
    final String newTableName = getTmptTableNameForExport(exportTable);
    final TableName newTableNameRef = HiveTableName.of(newTableName);
    Map<String, String> tblProps = new HashMap<>();
    tblProps.put(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL, Boolean.FALSE.toString());
    String location;
    // it has the same life cycle as the tmp table
    try {
        // Generate a unique ID for temp table path.
        // This path will be fixed for the life of the temp table.
        Path path = new Path(SessionState.getTempTableSpace(conf), UUID.randomUUID().toString());
        path = Warehouse.getDnsPath(path, conf);
        location = path.toString();
    } catch (MetaException err) {
        throw new SemanticException("Error while generating temp table path:", err);
    }
    CreateTableLikeDesc ctlt = new CreateTableLikeDesc(newTableName, false, true, null, null, location, null, null, tblProps, // important so we get an exception on name collision
    true, Warehouse.getQualifiedName(exportTable.getTTable()), false);
    Table newTable;
    try {
        ReadEntity dbForTmpTable = new ReadEntity(db.getDatabase(exportTable.getDbName()));
        // so the plan knows we are 'reading' this db - locks, security...
        inputs.add(dbForTmpTable);
        DDLTask createTableTask = (DDLTask) TaskFactory.get(new DDLWork(new HashSet<>(), new HashSet<>(), ctlt), conf);
        // above get() doesn't set it
        createTableTask.setConf(conf);
        Context context = new Context(conf);
        createTableTask.initialize(null, null, new TaskQueue(context), context);
        createTableTask.execute();
        newTable = db.getTable(newTableName);
    } catch (HiveException ex) {
        throw new SemanticException(ex);
    }
    // now generate insert statement
    // insert into newTableName select * from ts <where partition spec>
    StringBuilder rewrittenQueryStr = generateExportQuery(newTable.getPartCols(), tokRefOrNameExportTable, tableTree, newTableName);
    ReparseResult rr = parseRewrittenQuery(rewrittenQueryStr, ctx.getCmd());
    Context rewrittenCtx = rr.rewrittenCtx;
    // it's set in parseRewrittenQuery()
    rewrittenCtx.setIsUpdateDeleteMerge(false);
    ASTNode rewrittenTree = rr.rewrittenTree;
    try {
        useSuper = true;
        // newTable has to exist at this point to compile
        super.analyze(rewrittenTree, rewrittenCtx);
    } finally {
        useSuper = false;
    }
    // now we have the rootTasks set up for Insert ... Select
    removeStatsTasks(rootTasks);
    // now make an ExportTask from temp table
    /*analyzeExport() creates TableSpec which in turn tries to build
     "public List<Partition> partitions" by looking in the metastore to find Partitions matching
     the partition spec in the Export command.  These of course don't exist yet since we've not
     ran the insert stmt yet!!!!!!!
      */
    Task<ExportWork> exportTask = ExportSemanticAnalyzer.analyzeExport(ast, newTableName, db, conf, inputs, outputs);
    // Add an alter table task to set transactional props
    // do it after populating temp table so that it's written as non-transactional table but
    // update props before export so that export archive metadata has these props.  This way when
    // IMPORT is done for this archive and target table doesn't exist, it will be created as Acid.
    Map<String, String> mapProps = new HashMap<>();
    mapProps.put(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL, Boolean.TRUE.toString());
    AlterTableSetPropertiesDesc alterTblDesc = new AlterTableSetPropertiesDesc(newTableNameRef, null, null, false, mapProps, false, false, null);
    addExportTask(rootTasks, exportTask, TaskFactory.get(new DDLWork(getInputs(), getOutputs(), alterTblDesc)));
    // Now make a task to drop temp table
    // {@link DropTableAnalyzer#analyzeInternal(ASTNode ast)
    ReplicationSpec replicationSpec = new ReplicationSpec();
    DropTableDesc dropTblDesc = new DropTableDesc(newTableName, false, true, replicationSpec);
    Task<DDLWork> dropTask = TaskFactory.get(new DDLWork(new HashSet<>(), new HashSet<>(), dropTblDesc), conf);
    exportTask.addDependentTask(dropTask);
    markReadEntityForUpdate();
    if (ctx.isExplainPlan()) {
        try {
            // so that "explain" doesn't "leak" tmp tables
            // TODO: catalog
            db.dropTable(newTable.getDbName(), newTable.getTableName(), true, true, true);
        } catch (HiveException ex) {
            LOG.warn("Unable to drop " + newTableName + " due to: " + ex.getMessage(), ex);
        }
    }
}
Also used : HiveException(org.apache.hadoop.hive.ql.metadata.HiveException) HashMap(java.util.HashMap) ExportWork(org.apache.hadoop.hive.ql.plan.ExportWork) CreateTableLikeDesc(org.apache.hadoop.hive.ql.ddl.table.create.like.CreateTableLikeDesc) TaskQueue(org.apache.hadoop.hive.ql.TaskQueue) AlterTableSetPropertiesDesc(org.apache.hadoop.hive.ql.ddl.table.misc.properties.AlterTableSetPropertiesDesc) MetaException(org.apache.hadoop.hive.metastore.api.MetaException) HashSet(java.util.HashSet) Path(org.apache.hadoop.fs.Path) Context(org.apache.hadoop.hive.ql.Context) Table(org.apache.hadoop.hive.ql.metadata.Table) DropTableDesc(org.apache.hadoop.hive.ql.ddl.table.drop.DropTableDesc) ReadEntity(org.apache.hadoop.hive.ql.hooks.ReadEntity) TableName(org.apache.hadoop.hive.common.TableName) DDLWork(org.apache.hadoop.hive.ql.ddl.DDLWork) DDLTask(org.apache.hadoop.hive.ql.ddl.DDLTask)

Example 15 with TableName

use of org.apache.hadoop.hive.common.TableName in project hive by apache.

the class AddNotNullConstraintHandler method handle.

@Override
public List<Task<?>> handle(Context context) throws SemanticException {
    AddNotNullConstraintMessage msg = deserializer.getAddNotNullConstraintMessage(context.dmd.getPayload());
    List<SQLNotNullConstraint> nns;
    try {
        nns = msg.getNotNullConstraints();
    } catch (Exception e) {
        if (!(e instanceof SemanticException)) {
            throw new SemanticException("Error reading message members", e);
        } else {
            throw (SemanticException) e;
        }
    }
    List<Task<?>> tasks = new ArrayList<Task<?>>();
    if (nns.isEmpty()) {
        return tasks;
    }
    final String actualDbName = context.isDbNameEmpty() ? nns.get(0).getTable_db() : context.dbName;
    final String actualTblName = nns.get(0).getTable_name();
    final TableName tName = TableName.fromString(actualTblName, null, actualDbName);
    for (SQLNotNullConstraint nn : nns) {
        nn.setTable_db(actualDbName);
        nn.setTable_name(actualTblName);
    }
    Constraints constraints = new Constraints(null, null, nns, null, null, null);
    AlterTableAddConstraintDesc addConstraintsDesc = new AlterTableAddConstraintDesc(tName, context.eventOnlyReplicationSpec(), constraints);
    Task<DDLWork> addConstraintsTask = TaskFactory.get(new DDLWork(readEntitySet, writeEntitySet, addConstraintsDesc, true, context.getDumpDirectory(), context.getMetricCollector()), context.hiveConf);
    tasks.add(addConstraintsTask);
    context.log.debug("Added add constrains task : {}:{}", addConstraintsTask.getId(), actualTblName);
    updatedMetadata.set(context.dmd.getEventTo().toString(), actualDbName, actualTblName, null);
    return Collections.singletonList(addConstraintsTask);
}
Also used : Task(org.apache.hadoop.hive.ql.exec.Task) ArrayList(java.util.ArrayList) AddNotNullConstraintMessage(org.apache.hadoop.hive.metastore.messaging.AddNotNullConstraintMessage) SemanticException(org.apache.hadoop.hive.ql.parse.SemanticException) SQLNotNullConstraint(org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint) TableName(org.apache.hadoop.hive.common.TableName) Constraints(org.apache.hadoop.hive.ql.ddl.table.constraint.Constraints) DDLWork(org.apache.hadoop.hive.ql.ddl.DDLWork) AlterTableAddConstraintDesc(org.apache.hadoop.hive.ql.ddl.table.constraint.add.AlterTableAddConstraintDesc) SemanticException(org.apache.hadoop.hive.ql.parse.SemanticException)

Aggregations

TableName (org.apache.hadoop.hive.common.TableName)47 DDLWork (org.apache.hadoop.hive.ql.ddl.DDLWork)22 ArrayList (java.util.ArrayList)16 SemanticException (org.apache.hadoop.hive.ql.parse.SemanticException)15 Table (org.apache.hadoop.hive.ql.metadata.Table)14 FieldSchema (org.apache.hadoop.hive.metastore.api.FieldSchema)11 MetaException (org.apache.hadoop.hive.metastore.api.MetaException)11 HiveTableName (org.apache.hadoop.hive.ql.parse.HiveTableName)10 HashMap (java.util.HashMap)9 HiveException (org.apache.hadoop.hive.ql.metadata.HiveException)9 HashSet (java.util.HashSet)8 IOException (java.io.IOException)7 List (java.util.List)7 Path (org.apache.hadoop.fs.Path)7 SQLCheckConstraint (org.apache.hadoop.hive.metastore.api.SQLCheckConstraint)7 SQLDefaultConstraint (org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint)7 SQLNotNullConstraint (org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint)7 SQLUniqueConstraint (org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint)7 ASTNode (org.apache.hadoop.hive.ql.parse.ASTNode)7 SourceTable (org.apache.hadoop.hive.metastore.api.SourceTable)6