use of org.apache.hadoop.hive.ql.ddl.DDLWork in project hive by apache.
the class DropDatabaseHandler method handle.
@Override
public List<Task<?>> handle(Context context) throws SemanticException {
DropDatabaseMessage msg = deserializer.getDropDatabaseMessage(context.dmd.getPayload());
String actualDbName = context.isDbNameEmpty() ? msg.getDB() : context.dbName;
DropDatabaseDesc desc = new DropDatabaseDesc(actualDbName, true, context.eventOnlyReplicationSpec());
Task<?> dropDBTask = TaskFactory.get(new DDLWork(new HashSet<>(), new HashSet<>(), desc, true, context.getDumpDirectory(), context.getMetricCollector()), context.hiveConf);
context.log.info("Added drop database task : {}:{}", dropDBTask.getId(), desc.getDatabaseName());
updatedMetadata.set(context.dmd.getEventTo().toString(), actualDbName, null, null);
return Collections.singletonList(dropDBTask);
}
use of org.apache.hadoop.hive.ql.ddl.DDLWork in project hive by apache.
the class DropPartitionHandler method handle.
@Override
public List<Task<?>> handle(Context context) throws SemanticException {
try {
DropPartitionMessage msg = deserializer.getDropPartitionMessage(context.dmd.getPayload());
String actualDbName = context.isDbNameEmpty() ? msg.getDB() : context.dbName;
String actualTblName = msg.getTable();
Map<Integer, List<ExprNodeGenericFuncDesc>> partSpecs = ReplUtils.genPartSpecs(new Table(msg.getTableObj()), msg.getPartitions());
if (partSpecs.size() > 0) {
AlterTableDropPartitionDesc dropPtnDesc = new AlterTableDropPartitionDesc(HiveTableName.ofNullable(actualTblName, actualDbName), partSpecs, true, context.eventOnlyReplicationSpec());
Task<DDLWork> dropPtnTask = TaskFactory.get(new DDLWork(readEntitySet, writeEntitySet, dropPtnDesc, true, context.getDumpDirectory(), context.getMetricCollector()), context.hiveConf);
context.log.debug("Added drop ptn task : {}:{},{}", dropPtnTask.getId(), dropPtnDesc.getTableName(), msg.getPartitions());
updatedMetadata.set(context.dmd.getEventTo().toString(), actualDbName, actualTblName, null);
return Collections.singletonList(dropPtnTask);
} else {
throw new SemanticException("DROP PARTITION EVENT does not return any part descs for event message :" + context.dmd.getPayload());
}
} catch (Exception e) {
throw (e instanceof SemanticException) ? (SemanticException) e : new SemanticException("Error reading message members", e);
}
}
use of org.apache.hadoop.hive.ql.ddl.DDLWork in project hive by apache.
the class RenamePartitionHandler method handle.
@Override
public List<Task<?>> handle(Context context) throws SemanticException {
AlterPartitionMessage msg = deserializer.getAlterPartitionMessage(context.dmd.getPayload());
String actualDbName = context.isDbNameEmpty() ? msg.getDB() : context.dbName;
String actualTblName = msg.getTable();
Map<String, String> newPartSpec = new LinkedHashMap<>();
Map<String, String> oldPartSpec = new LinkedHashMap<>();
TableName tableName = TableName.fromString(actualTblName, null, actualDbName);
Table tableObj;
ReplicationSpec replicationSpec = context.eventOnlyReplicationSpec();
try {
Iterator<String> beforeIterator = msg.getPtnObjBefore().getValuesIterator();
Iterator<String> afterIterator = msg.getPtnObjAfter().getValuesIterator();
tableObj = msg.getTableObj();
for (FieldSchema fs : tableObj.getPartitionKeys()) {
oldPartSpec.put(fs.getName(), beforeIterator.next());
newPartSpec.put(fs.getName(), afterIterator.next());
}
AlterTableRenamePartitionDesc renamePtnDesc = new AlterTableRenamePartitionDesc(tableName, oldPartSpec, newPartSpec, replicationSpec, null);
renamePtnDesc.setWriteId(msg.getWriteId());
Task<DDLWork> renamePtnTask = TaskFactory.get(new DDLWork(readEntitySet, writeEntitySet, renamePtnDesc, true, context.getDumpDirectory(), context.getMetricCollector()), context.hiveConf);
context.log.debug("Added rename ptn task : {}:{}->{}", renamePtnTask.getId(), oldPartSpec, newPartSpec);
updatedMetadata.set(context.dmd.getEventTo().toString(), actualDbName, actualTblName, newPartSpec);
return ReplUtils.addChildTask(renamePtnTask);
} catch (Exception e) {
throw (e instanceof SemanticException) ? (SemanticException) e : new SemanticException("Error reading message members", e);
}
}
use of org.apache.hadoop.hive.ql.ddl.DDLWork in project hive by apache.
the class AddDefaultConstraintHandler method handle.
@Override
public List<Task<?>> handle(Context context) throws SemanticException {
AddDefaultConstraintMessage msg = deserializer.getAddDefaultConstraintMessage(context.dmd.getPayload());
List<SQLDefaultConstraint> dcs;
try {
dcs = msg.getDefaultConstraints();
} catch (Exception e) {
if (!(e instanceof SemanticException)) {
throw new SemanticException("Error reading message members", e);
} else {
throw (SemanticException) e;
}
}
List<Task<?>> tasks = new ArrayList<Task<?>>();
if (dcs.isEmpty()) {
return tasks;
}
final String actualDbName = context.isDbNameEmpty() ? dcs.get(0).getTable_db() : context.dbName;
final String actualTblName = dcs.get(0).getTable_name();
final TableName tName = TableName.fromString(actualTblName, null, actualDbName);
for (SQLDefaultConstraint dc : dcs) {
dc.setTable_db(actualDbName);
dc.setTable_name(actualTblName);
}
Constraints constraints = new Constraints(null, null, null, null, dcs, null);
AlterTableAddConstraintDesc addConstraintsDesc = new AlterTableAddConstraintDesc(tName, context.eventOnlyReplicationSpec(), constraints);
Task<DDLWork> addConstraintsTask = TaskFactory.get(new DDLWork(readEntitySet, writeEntitySet, addConstraintsDesc, true, context.getDumpDirectory(), context.getMetricCollector()), context.hiveConf);
tasks.add(addConstraintsTask);
context.log.debug("Added add constrains task : {}:{}", addConstraintsTask.getId(), actualTblName);
updatedMetadata.set(context.dmd.getEventTo().toString(), actualDbName, actualTblName, null);
return Collections.singletonList(addConstraintsTask);
}
use of org.apache.hadoop.hive.ql.ddl.DDLWork in project hive by apache.
the class CreateFunctionHandler method handle.
@Override
public List<Task<?>> handle(Context context) throws SemanticException {
try {
FunctionDescBuilder builder = new FunctionDescBuilder(context);
CreateFunctionDesc descToLoad = builder.build();
this.functionName = builder.metadata.function.getFunctionName();
context.log.debug("Loading function desc : {}", descToLoad.toString());
Task<DDLWork> createTask = TaskFactory.get(new DDLWork(readEntitySet, writeEntitySet, descToLoad, true, context.getDumpDirectory(), context.getMetricCollector()), context.hiveConf);
context.log.debug("Added create function task : {}:{},{}", createTask.getId(), descToLoad.getName(), descToLoad.getClassName());
// different handlers, unless this is a common pattern that is seen, leaving this here.
if (context.dmd != null) {
updatedMetadata.set(context.dmd.getEventTo().toString(), builder.destinationDbName, null, null);
}
readEntitySet.add(toReadEntity(new Path(context.location), context.hiveConf));
if (builder.replCopyTasks.isEmpty()) {
// reply copy only happens for jars on hdfs not otherwise.
return Collections.singletonList(createTask);
} else {
/**
* This is to understand how task dependencies work.
* All root tasks are executed in parallel. For bootstrap replication there should be only one root task of creating db. Incremental can be multiple ( have to verify ).
* Task has children, which are put in queue for execution after the parent has finished execution.
* One -to- One dependency can be satisfied by adding children to a given task, do this recursively where the relation holds.
* for many to one , create a barrier task that is the child of every item in 'many' dependencies, make the 'one' dependency as child of barrier task.
* add the 'many' to parent/root tasks. The execution environment will make sure that the child barrier task will not get executed unless all parents of the barrier task are complete,
* which should only happen when the last task is finished, at which point the child of the barrier task is picked up.
*/
Task<?> barrierTask = TaskFactory.get(new DependencyCollectionWork(), context.hiveConf);
builder.replCopyTasks.forEach(t -> t.addDependentTask(barrierTask));
barrierTask.addDependentTask(createTask);
return builder.replCopyTasks;
}
} catch (Exception e) {
throw (e instanceof SemanticException) ? (SemanticException) e : new SemanticException("Error reading message members", e);
}
}
Aggregations