use of org.apache.hadoop.hive.ql.parse.SemanticException in project hive by apache.
the class AddNotNullConstraintHandler method handle.
@Override
public List<Task<? extends Serializable>> handle(Context context) throws SemanticException {
AddNotNullConstraintMessage msg = deserializer.getAddNotNullConstraintMessage(context.dmd.getPayload());
List<SQLNotNullConstraint> nns = null;
try {
nns = msg.getNotNullConstraints();
} catch (Exception e) {
if (!(e instanceof SemanticException)) {
throw new SemanticException("Error reading message members", e);
} else {
throw (SemanticException) e;
}
}
List<Task<? extends Serializable>> tasks = new ArrayList<Task<? extends Serializable>>();
if (nns.isEmpty()) {
return tasks;
}
String actualDbName = context.isDbNameEmpty() ? nns.get(0).getTable_db() : context.dbName;
String actualTblName = context.isTableNameEmpty() ? nns.get(0).getTable_name() : context.tableName;
for (SQLNotNullConstraint nn : nns) {
nn.setTable_db(actualDbName);
nn.setTable_name(actualTblName);
}
AlterTableDesc addConstraintsDesc = new AlterTableDesc(actualDbName + "." + actualTblName, new ArrayList<SQLPrimaryKey>(), new ArrayList<SQLForeignKey>(), new ArrayList<SQLUniqueConstraint>(), nns, new ArrayList<SQLDefaultConstraint>(), new ArrayList<SQLCheckConstraint>(), context.eventOnlyReplicationSpec());
Task<DDLWork> addConstraintsTask = TaskFactory.get(new DDLWork(readEntitySet, writeEntitySet, addConstraintsDesc));
tasks.add(addConstraintsTask);
context.log.debug("Added add constrains task : {}:{}", addConstraintsTask.getId(), actualTblName);
updatedMetadata.set(context.dmd.getEventTo().toString(), actualDbName, actualTblName, null);
return Collections.singletonList(addConstraintsTask);
}
use of org.apache.hadoop.hive.ql.parse.SemanticException in project hive by apache.
the class AlterDatabaseHandler method handle.
@Override
public List<Task<? extends Serializable>> handle(Context context) throws SemanticException {
if (!context.isTableNameEmpty()) {
throw new SemanticException("Alter Database are not supported for table-level replication");
}
AlterDatabaseMessage msg = deserializer.getAlterDatabaseMessage(context.dmd.getPayload());
String actualDbName = context.isDbNameEmpty() ? msg.getDB() : context.dbName;
try {
Database oldDb = msg.getDbObjBefore();
Database newDb = msg.getDbObjAfter();
AlterDatabaseDesc alterDbDesc;
if ((oldDb.getOwnerType() == newDb.getOwnerType()) && oldDb.getOwnerName().equalsIgnoreCase(newDb.getOwnerName())) {
// If owner information is unchanged, then DB properties would've changed
Map<String, String> newDbProps = new HashMap<>();
Map<String, String> dbProps = newDb.getParameters();
for (Map.Entry<String, String> entry : dbProps.entrySet()) {
String key = entry.getKey();
// Ignore the keys which are local to source warehouse
if (key.startsWith(Utils.BOOTSTRAP_DUMP_STATE_KEY_PREFIX) || key.equals(ReplicationSpec.KEY.CURR_STATE_ID.toString())) {
continue;
}
newDbProps.put(key, entry.getValue());
}
alterDbDesc = new AlterDatabaseDesc(actualDbName, newDbProps, context.eventOnlyReplicationSpec());
} else {
alterDbDesc = new AlterDatabaseDesc(actualDbName, new PrincipalDesc(newDb.getOwnerName(), newDb.getOwnerType()), context.eventOnlyReplicationSpec());
}
Task<DDLWork> alterDbTask = TaskFactory.get(new DDLWork(readEntitySet, writeEntitySet, alterDbDesc));
context.log.debug("Added alter database task : {}:{}", alterDbTask.getId(), actualDbName);
// Only database object is updated
updatedMetadata.set(context.dmd.getEventTo().toString(), actualDbName, null, null);
return Collections.singletonList(alterDbTask);
} catch (Exception e) {
throw (e instanceof SemanticException) ? (SemanticException) e : new SemanticException("Error reading message members", e);
}
}
use of org.apache.hadoop.hive.ql.parse.SemanticException in project hive by apache.
the class CreateFunctionHandler method handle.
@Override
public List<Task<? extends Serializable>> handle(Context context) throws SemanticException {
try {
FunctionDescBuilder builder = new FunctionDescBuilder(context);
CreateFunctionDesc descToLoad = builder.build();
this.functionName = builder.metadata.function.getFunctionName();
context.log.debug("Loading function desc : {}", descToLoad.toString());
Task<FunctionWork> createTask = TaskFactory.get(new FunctionWork(descToLoad));
context.log.debug("Added create function task : {}:{},{}", createTask.getId(), descToLoad.getFunctionName(), descToLoad.getClassName());
// different handlers, unless this is a common pattern that is seen, leaving this here.
if (context.dmd != null) {
updatedMetadata.set(context.dmd.getEventTo().toString(), builder.destinationDbName, null, null);
}
readEntitySet.add(toReadEntity(new Path(context.location), context.hiveConf));
if (builder.replCopyTasks.isEmpty()) {
// reply copy only happens for jars on hdfs not otherwise.
return Collections.singletonList(createTask);
} else {
/**
* This is to understand how task dependencies work.
* All root tasks are executed in parallel. For bootstrap replication there should be only one root task of creating db. Incremental can be multiple ( have to verify ).
* Task has children, which are put in queue for execution after the parent has finished execution.
* One -to- One dependency can be satisfied by adding children to a given task, do this recursively where the relation holds.
* for many to one , create a barrier task that is the child of every item in 'many' dependencies, make the 'one' dependency as child of barrier task.
* add the 'many' to parent/root tasks. The execution environment will make sure that the child barrier task will not get executed unless all parents of the barrier task are complete,
* which should only happen when the last task is finished, at which point the child of the barrier task is picked up.
*/
Task<? extends Serializable> barrierTask = TaskFactory.get(new DependencyCollectionWork());
builder.replCopyTasks.forEach(t -> t.addDependentTask(barrierTask));
barrierTask.addDependentTask(createTask);
return builder.replCopyTasks;
}
} catch (Exception e) {
throw (e instanceof SemanticException) ? (SemanticException) e : new SemanticException("Error reading message members", e);
}
}
use of org.apache.hadoop.hive.ql.parse.SemanticException in project hive by apache.
the class DropPartitionHandler method handle.
@Override
public List<Task<? extends Serializable>> handle(Context context) throws SemanticException {
try {
DropPartitionMessage msg = deserializer.getDropPartitionMessage(context.dmd.getPayload());
String actualDbName = context.isDbNameEmpty() ? msg.getDB() : context.dbName;
String actualTblName = context.isTableNameEmpty() ? msg.getTable() : context.tableName;
Map<Integer, List<ExprNodeGenericFuncDesc>> partSpecs = genPartSpecs(new Table(msg.getTableObj()), msg.getPartitions());
if (partSpecs.size() > 0) {
DropTableDesc dropPtnDesc = new DropTableDesc(actualDbName + "." + actualTblName, partSpecs, null, true, context.eventOnlyReplicationSpec());
Task<DDLWork> dropPtnTask = TaskFactory.get(new DDLWork(readEntitySet, writeEntitySet, dropPtnDesc));
context.log.debug("Added drop ptn task : {}:{},{}", dropPtnTask.getId(), dropPtnDesc.getTableName(), msg.getPartitions());
updatedMetadata.set(context.dmd.getEventTo().toString(), actualDbName, actualTblName, null);
return Collections.singletonList(dropPtnTask);
} else {
throw new SemanticException("DROP PARTITION EVENT does not return any part descs for event message :" + context.dmd.getPayload());
}
} catch (Exception e) {
throw (e instanceof SemanticException) ? (SemanticException) e : new SemanticException("Error reading message members", e);
}
}
use of org.apache.hadoop.hive.ql.parse.SemanticException in project hive by apache.
the class RenamePartitionHandler method handle.
@Override
public List<Task<? extends Serializable>> handle(Context context) throws SemanticException {
AlterPartitionMessage msg = deserializer.getAlterPartitionMessage(context.dmd.getPayload());
String actualDbName = context.isDbNameEmpty() ? msg.getDB() : context.dbName;
String actualTblName = context.isTableNameEmpty() ? msg.getTable() : context.tableName;
Map<String, String> newPartSpec = new LinkedHashMap<>();
Map<String, String> oldPartSpec = new LinkedHashMap<>();
String tableName = actualDbName + "." + actualTblName;
try {
Table tblObj = msg.getTableObj();
Iterator<String> beforeIterator = msg.getPtnObjBefore().getValuesIterator();
Iterator<String> afterIterator = msg.getPtnObjAfter().getValuesIterator();
for (FieldSchema fs : tblObj.getPartitionKeys()) {
oldPartSpec.put(fs.getName(), beforeIterator.next());
newPartSpec.put(fs.getName(), afterIterator.next());
}
} catch (Exception e) {
throw (e instanceof SemanticException) ? (SemanticException) e : new SemanticException("Error reading message members", e);
}
RenamePartitionDesc renamePtnDesc = new RenamePartitionDesc(tableName, oldPartSpec, newPartSpec, context.eventOnlyReplicationSpec());
Task<DDLWork> renamePtnTask = TaskFactory.get(new DDLWork(readEntitySet, writeEntitySet, renamePtnDesc));
context.log.debug("Added rename ptn task : {}:{}->{}", renamePtnTask.getId(), oldPartSpec, newPartSpec);
updatedMetadata.set(context.dmd.getEventTo().toString(), actualDbName, actualTblName, newPartSpec);
return Collections.singletonList(renamePtnTask);
}
Aggregations