Search in sources :

Example 41 with SemanticException

use of org.apache.hadoop.hive.ql.parse.SemanticException in project hive by apache.

the class AddNotNullConstraintHandler method handle.

@Override
public List<Task<? extends Serializable>> handle(Context context) throws SemanticException {
    AddNotNullConstraintMessage msg = deserializer.getAddNotNullConstraintMessage(context.dmd.getPayload());
    List<SQLNotNullConstraint> nns = null;
    try {
        nns = msg.getNotNullConstraints();
    } catch (Exception e) {
        if (!(e instanceof SemanticException)) {
            throw new SemanticException("Error reading message members", e);
        } else {
            throw (SemanticException) e;
        }
    }
    List<Task<? extends Serializable>> tasks = new ArrayList<Task<? extends Serializable>>();
    if (nns.isEmpty()) {
        return tasks;
    }
    String actualDbName = context.isDbNameEmpty() ? nns.get(0).getTable_db() : context.dbName;
    String actualTblName = context.isTableNameEmpty() ? nns.get(0).getTable_name() : context.tableName;
    for (SQLNotNullConstraint nn : nns) {
        nn.setTable_db(actualDbName);
        nn.setTable_name(actualTblName);
    }
    AlterTableDesc addConstraintsDesc = new AlterTableDesc(actualDbName + "." + actualTblName, new ArrayList<SQLPrimaryKey>(), new ArrayList<SQLForeignKey>(), new ArrayList<SQLUniqueConstraint>(), nns, new ArrayList<SQLDefaultConstraint>(), new ArrayList<SQLCheckConstraint>(), context.eventOnlyReplicationSpec());
    Task<DDLWork> addConstraintsTask = TaskFactory.get(new DDLWork(readEntitySet, writeEntitySet, addConstraintsDesc));
    tasks.add(addConstraintsTask);
    context.log.debug("Added add constrains task : {}:{}", addConstraintsTask.getId(), actualTblName);
    updatedMetadata.set(context.dmd.getEventTo().toString(), actualDbName, actualTblName, null);
    return Collections.singletonList(addConstraintsTask);
}
Also used : SQLUniqueConstraint(org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint) AlterTableDesc(org.apache.hadoop.hive.ql.plan.AlterTableDesc) Task(org.apache.hadoop.hive.ql.exec.Task) Serializable(java.io.Serializable) SQLPrimaryKey(org.apache.hadoop.hive.metastore.api.SQLPrimaryKey) SQLCheckConstraint(org.apache.hadoop.hive.metastore.api.SQLCheckConstraint) SQLForeignKey(org.apache.hadoop.hive.metastore.api.SQLForeignKey) ArrayList(java.util.ArrayList) AddNotNullConstraintMessage(org.apache.hadoop.hive.metastore.messaging.AddNotNullConstraintMessage) SemanticException(org.apache.hadoop.hive.ql.parse.SemanticException) SQLDefaultConstraint(org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint) SQLNotNullConstraint(org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint) DDLWork(org.apache.hadoop.hive.ql.plan.DDLWork) SemanticException(org.apache.hadoop.hive.ql.parse.SemanticException)

Example 42 with SemanticException

use of org.apache.hadoop.hive.ql.parse.SemanticException in project hive by apache.

the class AlterDatabaseHandler method handle.

@Override
public List<Task<? extends Serializable>> handle(Context context) throws SemanticException {
    if (!context.isTableNameEmpty()) {
        throw new SemanticException("Alter Database are not supported for table-level replication");
    }
    AlterDatabaseMessage msg = deserializer.getAlterDatabaseMessage(context.dmd.getPayload());
    String actualDbName = context.isDbNameEmpty() ? msg.getDB() : context.dbName;
    try {
        Database oldDb = msg.getDbObjBefore();
        Database newDb = msg.getDbObjAfter();
        AlterDatabaseDesc alterDbDesc;
        if ((oldDb.getOwnerType() == newDb.getOwnerType()) && oldDb.getOwnerName().equalsIgnoreCase(newDb.getOwnerName())) {
            // If owner information is unchanged, then DB properties would've changed
            Map<String, String> newDbProps = new HashMap<>();
            Map<String, String> dbProps = newDb.getParameters();
            for (Map.Entry<String, String> entry : dbProps.entrySet()) {
                String key = entry.getKey();
                // Ignore the keys which are local to source warehouse
                if (key.startsWith(Utils.BOOTSTRAP_DUMP_STATE_KEY_PREFIX) || key.equals(ReplicationSpec.KEY.CURR_STATE_ID.toString())) {
                    continue;
                }
                newDbProps.put(key, entry.getValue());
            }
            alterDbDesc = new AlterDatabaseDesc(actualDbName, newDbProps, context.eventOnlyReplicationSpec());
        } else {
            alterDbDesc = new AlterDatabaseDesc(actualDbName, new PrincipalDesc(newDb.getOwnerName(), newDb.getOwnerType()), context.eventOnlyReplicationSpec());
        }
        Task<DDLWork> alterDbTask = TaskFactory.get(new DDLWork(readEntitySet, writeEntitySet, alterDbDesc));
        context.log.debug("Added alter database task : {}:{}", alterDbTask.getId(), actualDbName);
        // Only database object is updated
        updatedMetadata.set(context.dmd.getEventTo().toString(), actualDbName, null, null);
        return Collections.singletonList(alterDbTask);
    } catch (Exception e) {
        throw (e instanceof SemanticException) ? (SemanticException) e : new SemanticException("Error reading message members", e);
    }
}
Also used : HashMap(java.util.HashMap) SemanticException(org.apache.hadoop.hive.ql.parse.SemanticException) PrincipalDesc(org.apache.hadoop.hive.ql.plan.PrincipalDesc) AlterDatabaseMessage(org.apache.hadoop.hive.metastore.messaging.AlterDatabaseMessage) DDLWork(org.apache.hadoop.hive.ql.plan.DDLWork) Database(org.apache.hadoop.hive.metastore.api.Database) AlterDatabaseDesc(org.apache.hadoop.hive.ql.plan.AlterDatabaseDesc) HashMap(java.util.HashMap) Map(java.util.Map) SemanticException(org.apache.hadoop.hive.ql.parse.SemanticException)

Example 43 with SemanticException

use of org.apache.hadoop.hive.ql.parse.SemanticException in project hive by apache.

the class CreateFunctionHandler method handle.

@Override
public List<Task<? extends Serializable>> handle(Context context) throws SemanticException {
    try {
        FunctionDescBuilder builder = new FunctionDescBuilder(context);
        CreateFunctionDesc descToLoad = builder.build();
        this.functionName = builder.metadata.function.getFunctionName();
        context.log.debug("Loading function desc : {}", descToLoad.toString());
        Task<FunctionWork> createTask = TaskFactory.get(new FunctionWork(descToLoad));
        context.log.debug("Added create function task : {}:{},{}", createTask.getId(), descToLoad.getFunctionName(), descToLoad.getClassName());
        // different handlers, unless this is a common pattern that is seen, leaving this here.
        if (context.dmd != null) {
            updatedMetadata.set(context.dmd.getEventTo().toString(), builder.destinationDbName, null, null);
        }
        readEntitySet.add(toReadEntity(new Path(context.location), context.hiveConf));
        if (builder.replCopyTasks.isEmpty()) {
            // reply copy only happens for jars on hdfs not otherwise.
            return Collections.singletonList(createTask);
        } else {
            /**
             *  This is to understand how task dependencies work.
             *  All root tasks are executed in parallel. For bootstrap replication there should be only one root task of creating db. Incremental can be multiple ( have to verify ).
             *  Task has children, which are put in queue for execution after the parent has finished execution.
             *  One -to- One dependency can be satisfied by adding children to a given task, do this recursively where the relation holds.
             *  for many to one , create a barrier task that is the child of every item in 'many' dependencies, make the 'one' dependency as child of barrier task.
             *  add the 'many' to parent/root tasks. The execution environment will make sure that the child barrier task will not get executed unless all parents of the barrier task are complete,
             *  which should only happen when the last task is finished, at which point the child of the barrier task is picked up.
             */
            Task<? extends Serializable> barrierTask = TaskFactory.get(new DependencyCollectionWork());
            builder.replCopyTasks.forEach(t -> t.addDependentTask(barrierTask));
            barrierTask.addDependentTask(createTask);
            return builder.replCopyTasks;
        }
    } catch (Exception e) {
        throw (e instanceof SemanticException) ? (SemanticException) e : new SemanticException("Error reading message members", e);
    }
}
Also used : Path(org.apache.hadoop.fs.Path) DependencyCollectionWork(org.apache.hadoop.hive.ql.plan.DependencyCollectionWork) CreateFunctionDesc(org.apache.hadoop.hive.ql.plan.CreateFunctionDesc) SemanticException(org.apache.hadoop.hive.ql.parse.SemanticException) IOException(java.io.IOException) FunctionWork(org.apache.hadoop.hive.ql.plan.FunctionWork) SemanticException(org.apache.hadoop.hive.ql.parse.SemanticException)

Example 44 with SemanticException

use of org.apache.hadoop.hive.ql.parse.SemanticException in project hive by apache.

the class DropPartitionHandler method handle.

@Override
public List<Task<? extends Serializable>> handle(Context context) throws SemanticException {
    try {
        DropPartitionMessage msg = deserializer.getDropPartitionMessage(context.dmd.getPayload());
        String actualDbName = context.isDbNameEmpty() ? msg.getDB() : context.dbName;
        String actualTblName = context.isTableNameEmpty() ? msg.getTable() : context.tableName;
        Map<Integer, List<ExprNodeGenericFuncDesc>> partSpecs = genPartSpecs(new Table(msg.getTableObj()), msg.getPartitions());
        if (partSpecs.size() > 0) {
            DropTableDesc dropPtnDesc = new DropTableDesc(actualDbName + "." + actualTblName, partSpecs, null, true, context.eventOnlyReplicationSpec());
            Task<DDLWork> dropPtnTask = TaskFactory.get(new DDLWork(readEntitySet, writeEntitySet, dropPtnDesc));
            context.log.debug("Added drop ptn task : {}:{},{}", dropPtnTask.getId(), dropPtnDesc.getTableName(), msg.getPartitions());
            updatedMetadata.set(context.dmd.getEventTo().toString(), actualDbName, actualTblName, null);
            return Collections.singletonList(dropPtnTask);
        } else {
            throw new SemanticException("DROP PARTITION EVENT does not return any part descs for event message :" + context.dmd.getPayload());
        }
    } catch (Exception e) {
        throw (e instanceof SemanticException) ? (SemanticException) e : new SemanticException("Error reading message members", e);
    }
}
Also used : Table(org.apache.hadoop.hive.ql.metadata.Table) DropPartitionMessage(org.apache.hadoop.hive.metastore.messaging.DropPartitionMessage) DDLWork(org.apache.hadoop.hive.ql.plan.DDLWork) DropTableDesc(org.apache.hadoop.hive.ql.plan.DropTableDesc) ArrayList(java.util.ArrayList) List(java.util.List) SemanticException(org.apache.hadoop.hive.ql.parse.SemanticException) SemanticException(org.apache.hadoop.hive.ql.parse.SemanticException)

Example 45 with SemanticException

use of org.apache.hadoop.hive.ql.parse.SemanticException in project hive by apache.

the class RenamePartitionHandler method handle.

@Override
public List<Task<? extends Serializable>> handle(Context context) throws SemanticException {
    AlterPartitionMessage msg = deserializer.getAlterPartitionMessage(context.dmd.getPayload());
    String actualDbName = context.isDbNameEmpty() ? msg.getDB() : context.dbName;
    String actualTblName = context.isTableNameEmpty() ? msg.getTable() : context.tableName;
    Map<String, String> newPartSpec = new LinkedHashMap<>();
    Map<String, String> oldPartSpec = new LinkedHashMap<>();
    String tableName = actualDbName + "." + actualTblName;
    try {
        Table tblObj = msg.getTableObj();
        Iterator<String> beforeIterator = msg.getPtnObjBefore().getValuesIterator();
        Iterator<String> afterIterator = msg.getPtnObjAfter().getValuesIterator();
        for (FieldSchema fs : tblObj.getPartitionKeys()) {
            oldPartSpec.put(fs.getName(), beforeIterator.next());
            newPartSpec.put(fs.getName(), afterIterator.next());
        }
    } catch (Exception e) {
        throw (e instanceof SemanticException) ? (SemanticException) e : new SemanticException("Error reading message members", e);
    }
    RenamePartitionDesc renamePtnDesc = new RenamePartitionDesc(tableName, oldPartSpec, newPartSpec, context.eventOnlyReplicationSpec());
    Task<DDLWork> renamePtnTask = TaskFactory.get(new DDLWork(readEntitySet, writeEntitySet, renamePtnDesc));
    context.log.debug("Added rename ptn task : {}:{}->{}", renamePtnTask.getId(), oldPartSpec, newPartSpec);
    updatedMetadata.set(context.dmd.getEventTo().toString(), actualDbName, actualTblName, newPartSpec);
    return Collections.singletonList(renamePtnTask);
}
Also used : Table(org.apache.hadoop.hive.metastore.api.Table) DDLWork(org.apache.hadoop.hive.ql.plan.DDLWork) FieldSchema(org.apache.hadoop.hive.metastore.api.FieldSchema) RenamePartitionDesc(org.apache.hadoop.hive.ql.plan.RenamePartitionDesc) AlterPartitionMessage(org.apache.hadoop.hive.metastore.messaging.AlterPartitionMessage) SemanticException(org.apache.hadoop.hive.ql.parse.SemanticException) LinkedHashMap(java.util.LinkedHashMap) SemanticException(org.apache.hadoop.hive.ql.parse.SemanticException)

Aggregations

SemanticException (org.apache.hadoop.hive.ql.parse.SemanticException)131 ArrayList (java.util.ArrayList)64 ExprNodeDesc (org.apache.hadoop.hive.ql.plan.ExprNodeDesc)36 HashMap (java.util.HashMap)30 HiveException (org.apache.hadoop.hive.ql.metadata.HiveException)27 Path (org.apache.hadoop.fs.Path)22 IOException (java.io.IOException)20 LinkedHashMap (java.util.LinkedHashMap)19 List (java.util.List)18 TableScanOperator (org.apache.hadoop.hive.ql.exec.TableScanOperator)18 Node (org.apache.hadoop.hive.ql.lib.Node)17 ExprNodeGenericFuncDesc (org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc)17 ReduceSinkOperator (org.apache.hadoop.hive.ql.exec.ReduceSinkOperator)16 DefaultRuleDispatcher (org.apache.hadoop.hive.ql.lib.DefaultRuleDispatcher)16 Dispatcher (org.apache.hadoop.hive.ql.lib.Dispatcher)16 GraphWalker (org.apache.hadoop.hive.ql.lib.GraphWalker)16 ExprNodeColumnDesc (org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc)16 Operator (org.apache.hadoop.hive.ql.exec.Operator)15 DefaultGraphWalker (org.apache.hadoop.hive.ql.lib.DefaultGraphWalker)15 Table (org.apache.hadoop.hive.ql.metadata.Table)14