Search in sources :

Example 41 with ReadEntity

use of org.apache.hadoop.hive.ql.hooks.ReadEntity in project hive by apache.

the class DropDataConnectorAnalyzer method analyzeInternal.

@Override
public void analyzeInternal(ASTNode root) throws SemanticException {
    String connectorName = unescapeIdentifier(root.getChild(0).getText());
    boolean ifExists = root.getFirstChildWithType(HiveParser.TOK_IFEXISTS) != null;
    DataConnector connector = getDataConnector(connectorName, !ifExists);
    if (connector == null) {
        return;
    }
    inputs.add(new ReadEntity(connector));
    outputs.add(new WriteEntity(connector, WriteEntity.WriteType.DDL_EXCLUSIVE));
    DropDataConnectorDesc desc = new DropDataConnectorDesc(connectorName, ifExists);
    rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc)));
}
Also used : ReadEntity(org.apache.hadoop.hive.ql.hooks.ReadEntity) DDLWork(org.apache.hadoop.hive.ql.ddl.DDLWork) DataConnector(org.apache.hadoop.hive.metastore.api.DataConnector) WriteEntity(org.apache.hadoop.hive.ql.hooks.WriteEntity)

Example 42 with ReadEntity

use of org.apache.hadoop.hive.ql.hooks.ReadEntity in project hive by apache.

the class DropTableAnalyzer method analyzeInternal.

@Override
public void analyzeInternal(ASTNode root) throws SemanticException {
    String tableName = getUnescapedName((ASTNode) root.getChild(0));
    boolean ifExists = (root.getFirstChildWithType(HiveParser.TOK_IFEXISTS) != null);
    boolean throwException = !ifExists && !HiveConf.getBoolVar(conf, ConfVars.DROP_IGNORES_NON_EXISTENT);
    Table table = getTable(tableName, throwException);
    if (table != null) {
        inputs.add(new ReadEntity(table));
        boolean tableWithSuffix = (HiveConf.getBoolVar(conf, ConfVars.HIVE_ACID_CREATE_TABLE_USE_SUFFIX) || HiveConf.getBoolVar(conf, ConfVars.HIVE_ACID_LOCKLESS_READS_ENABLED)) && AcidUtils.isTransactionalTable(table) && Boolean.parseBoolean(table.getProperty(SOFT_DELETE_TABLE));
        outputs.add(new WriteEntity(table, tableWithSuffix ? WriteType.DDL_EXCL_WRITE : WriteType.DDL_EXCLUSIVE));
    }
    boolean purge = (root.getFirstChildWithType(HiveParser.KW_PURGE) != null);
    ReplicationSpec replicationSpec = new ReplicationSpec(root);
    DropTableDesc desc = new DropTableDesc(tableName, ifExists, purge, replicationSpec);
    rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc)));
}
Also used : ReadEntity(org.apache.hadoop.hive.ql.hooks.ReadEntity) ReplicationSpec(org.apache.hadoop.hive.ql.parse.ReplicationSpec) Table(org.apache.hadoop.hive.ql.metadata.Table) DDLWork(org.apache.hadoop.hive.ql.ddl.DDLWork) WriteEntity(org.apache.hadoop.hive.ql.hooks.WriteEntity)

Example 43 with ReadEntity

use of org.apache.hadoop.hive.ql.hooks.ReadEntity in project hive by apache.

the class DescTableAnalyzer method analyzeInternal.

@Override
public void analyzeInternal(ASTNode root) throws SemanticException {
    ctx.setResFile(ctx.getLocalTmpPath());
    ASTNode tableTypeExpr = (ASTNode) root.getChild(0);
    TableName tableName = getQualifiedTableName((ASTNode) tableTypeExpr.getChild(0));
    // if database is not the one currently using validate database
    if (tableName.getDb() != null) {
        db.validateDatabaseExists(tableName.getDb());
    }
    Table table = getTable(tableName);
    // process the second child, if exists, node to get partition spec(s)
    Map<String, String> partitionSpec = getPartitionSpec(db, tableTypeExpr, tableName);
    if (partitionSpec != null) {
        // validate that partition exists
        PartitionUtils.getPartition(db, table, partitionSpec, true);
    }
    // process the third child node,if exists, to get partition spec(s)
    String columnPath = getColumnPath(db, tableTypeExpr, tableName, partitionSpec);
    boolean showColStats = false;
    boolean isFormatted = false;
    boolean isExt = false;
    if (root.getChildCount() == 2) {
        int descOptions = root.getChild(1).getType();
        isFormatted = descOptions == HiveParser.KW_FORMATTED;
        isExt = descOptions == HiveParser.KW_EXTENDED;
        // This is how we can differentiate if we are describing a table or column.
        if (columnPath != null && isFormatted) {
            showColStats = true;
        }
    }
    inputs.add(new ReadEntity(table));
    DescTableDesc desc = new DescTableDesc(ctx.getResFile(), tableName, partitionSpec, columnPath, isExt, isFormatted);
    Task<?> task = TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc));
    rootTasks.add(task);
    task.setFetchSource(true);
    String schema = showColStats ? DescTableDesc.COLUMN_STATISTICS_SCHEMA : DescTableDesc.SCHEMA;
    setFetchTask(createFetchTask(schema));
}
Also used : ReadEntity(org.apache.hadoop.hive.ql.hooks.ReadEntity) TableName(org.apache.hadoop.hive.common.TableName) Table(org.apache.hadoop.hive.ql.metadata.Table) DDLWork(org.apache.hadoop.hive.ql.ddl.DDLWork) ASTNode(org.apache.hadoop.hive.ql.parse.ASTNode)

Example 44 with ReadEntity

use of org.apache.hadoop.hive.ql.hooks.ReadEntity in project hive by apache.

the class AlterTableTouchAnalyzer method analyzeCommand.

@Override
protected void analyzeCommand(TableName tableName, Map<String, String> partitionSpecFromFramework, ASTNode command) throws SemanticException {
    Table table = getTable(tableName);
    validateAlterTableType(table, AlterTableType.TOUCH, false);
    inputs.add(new ReadEntity(table));
    List<Map<String, String>> partitionSpecs = getPartitionSpecs(table, command);
    if (partitionSpecs.isEmpty()) {
        AlterTableTouchDesc desc = new AlterTableTouchDesc(tableName.getNotEmptyDbTable(), null);
        outputs.add(new WriteEntity(table, WriteEntity.WriteType.DDL_NO_LOCK));
        rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc)));
    } else {
        PartitionUtils.addTablePartsOutputs(db, outputs, table, partitionSpecs, false, WriteEntity.WriteType.DDL_NO_LOCK);
        for (Map<String, String> partitionSpec : partitionSpecs) {
            AlterTableTouchDesc desc = new AlterTableTouchDesc(tableName.getNotEmptyDbTable(), partitionSpec);
            rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc)));
        }
    }
}
Also used : ReadEntity(org.apache.hadoop.hive.ql.hooks.ReadEntity) Table(org.apache.hadoop.hive.ql.metadata.Table) DDLWork(org.apache.hadoop.hive.ql.ddl.DDLWork) Map(java.util.Map) WriteEntity(org.apache.hadoop.hive.ql.hooks.WriteEntity)

Example 45 with ReadEntity

use of org.apache.hadoop.hive.ql.hooks.ReadEntity in project hive by apache.

the class AlterTableTouchOperation method execute.

@Override
public int execute() throws HiveException {
    // TODO: catalog
    Table table = context.getDb().getTable(desc.getTableName());
    EnvironmentContext environmentContext = new EnvironmentContext();
    environmentContext.putToProperties(StatsSetupConst.DO_NOT_UPDATE_STATS, StatsSetupConst.TRUE);
    if (desc.getPartitionSpec() == null) {
        context.getDb().alterTable(table, false, environmentContext, true);
        context.getWork().getInputs().add(new ReadEntity(table));
        DDLUtils.addIfAbsentByName(new WriteEntity(table, WriteEntity.WriteType.DDL_NO_LOCK), context);
    } else {
        Partition part = context.getDb().getPartition(table, desc.getPartitionSpec(), false);
        if (part == null) {
            throw new HiveException("Specified partition does not exist");
        }
        try {
            context.getDb().alterPartition(table.getCatalogName(), table.getDbName(), table.getTableName(), part, environmentContext, true);
        } catch (InvalidOperationException e) {
            throw new HiveException(e);
        }
        context.getWork().getInputs().add(new ReadEntity(part));
        DDLUtils.addIfAbsentByName(new WriteEntity(part, WriteEntity.WriteType.DDL_NO_LOCK), context);
    }
    return 0;
}
Also used : EnvironmentContext(org.apache.hadoop.hive.metastore.api.EnvironmentContext) ReadEntity(org.apache.hadoop.hive.ql.hooks.ReadEntity) Partition(org.apache.hadoop.hive.ql.metadata.Partition) Table(org.apache.hadoop.hive.ql.metadata.Table) HiveException(org.apache.hadoop.hive.ql.metadata.HiveException) InvalidOperationException(org.apache.hadoop.hive.metastore.api.InvalidOperationException) WriteEntity(org.apache.hadoop.hive.ql.hooks.WriteEntity)

Aggregations

ReadEntity (org.apache.hadoop.hive.ql.hooks.ReadEntity)139 WriteEntity (org.apache.hadoop.hive.ql.hooks.WriteEntity)70 Table (org.apache.hadoop.hive.ql.metadata.Table)69 DDLWork (org.apache.hadoop.hive.ql.ddl.DDLWork)31 Partition (org.apache.hadoop.hive.ql.metadata.Partition)29 ArrayList (java.util.ArrayList)27 HiveException (org.apache.hadoop.hive.ql.metadata.HiveException)27 DDLWork (org.apache.hadoop.hive.ql.plan.DDLWork)24 SemanticException (org.apache.hadoop.hive.ql.parse.SemanticException)22 HashMap (java.util.HashMap)16 Test (org.testng.annotations.Test)16 Map (java.util.Map)13 LinkedHashMap (java.util.LinkedHashMap)12 Path (org.apache.hadoop.fs.Path)12 List (java.util.List)11 Database (org.apache.hadoop.hive.metastore.api.Database)11 AtlasEntity (org.apache.atlas.model.instance.AtlasEntity)10 Referenceable (org.apache.atlas.typesystem.Referenceable)10 HashSet (java.util.HashSet)9 FileNotFoundException (java.io.FileNotFoundException)7