use of org.apache.hadoop.hive.ql.ddl.DDLWork in project hive by apache.
the class CreateDataConnectorAnalyzer method analyzeInternal.
@Override
public void analyzeInternal(ASTNode root) throws SemanticException {
boolean ifNotExists = false;
String comment = null;
String url = null;
String type = null;
Map<String, String> props = null;
String connectorName = unescapeIdentifier(root.getChild(0).getText());
for (int i = 1; i < root.getChildCount(); i++) {
ASTNode childNode = (ASTNode) root.getChild(i);
switch(childNode.getToken().getType()) {
case HiveParser.TOK_IFNOTEXISTS:
ifNotExists = true;
break;
case HiveParser.TOK_DATACONNECTORCOMMENT:
comment = unescapeSQLString(childNode.getChild(0).getText());
break;
case HiveParser.TOK_DATACONNECTORPROPERTIES:
props = getProps((ASTNode) childNode.getChild(0));
break;
case HiveParser.TOK_DATACONNECTORURL:
url = unescapeSQLString(childNode.getChild(0).getText());
// outputs.add(toWriteEntity(url));
break;
case HiveParser.TOK_DATACONNECTORTYPE:
type = unescapeSQLString(childNode.getChild(0).getText());
break;
default:
throw new SemanticException("Unrecognized token in CREATE CONNECTOR statement");
}
}
CreateDataConnectorDesc desc = null;
DataConnector connector = new DataConnector(connectorName, type, url);
if (comment != null)
connector.setDescription(comment);
if (props != null)
connector.setParameters(props);
desc = new CreateDataConnectorDesc(connectorName, type, url, ifNotExists, comment, props);
rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc)));
outputs.add(new WriteEntity(connector, WriteEntity.WriteType.DDL_NO_LOCK));
}
use of org.apache.hadoop.hive.ql.ddl.DDLWork in project hive by apache.
the class MsckAnalyzer method analyzeInternal.
@Override
public void analyzeInternal(ASTNode root) throws SemanticException {
if (root.getChildCount() == 0) {
throw new SemanticException("MSCK command must have arguments");
}
ctx.setResFile(ctx.getLocalTmpPath());
boolean repair = root.getChild(0).getType() == HiveParser.KW_REPAIR;
int offset = repair ? 1 : 0;
String tableName = getUnescapedName((ASTNode) root.getChild(0 + offset));
boolean addPartitions = true;
boolean dropPartitions = false;
if (root.getChildCount() > 1 + offset) {
addPartitions = isMsckAddPartition(root.getChild(1 + offset).getType());
dropPartitions = isMsckDropPartition(root.getChild(1 + offset).getType());
}
Table table = getTable(tableName);
Map<Integer, List<ExprNodeGenericFuncDesc>> partitionSpecs = ParseUtils.getFullPartitionSpecs(root, table, conf, false);
byte[] filterExp = null;
if (partitionSpecs != null & !partitionSpecs.isEmpty()) {
// expression proxy class needs to be PartitionExpressionForMetastore since we intend to use the
// filterPartitionsByExpr of PartitionExpressionForMetastore for partition pruning down the line.
// Bail out early if expressionProxyClass is not configured properly.
String expressionProxyClass = conf.get(MetastoreConf.ConfVars.EXPRESSION_PROXY_CLASS.getVarname());
if (!PartitionExpressionForMetastore.class.getCanonicalName().equals(expressionProxyClass)) {
throw new SemanticException("Invalid expression proxy class. The config metastore.expression.proxy needs " + "to be set to org.apache.hadoop.hive.ql.optimizer.ppr.PartitionExpressionForMetastore");
}
// fetch the first value of partitionSpecs map since it will always have one key, value pair
filterExp = SerializationUtilities.serializeExpressionToKryo((ExprNodeGenericFuncDesc) ((List) partitionSpecs.values().toArray()[0]).get(0));
}
if (repair && AcidUtils.isTransactionalTable(table)) {
outputs.add(new WriteEntity(table, WriteType.DDL_EXCLUSIVE));
} else {
outputs.add(new WriteEntity(table, WriteEntity.WriteType.DDL_SHARED));
}
MsckDesc desc = new MsckDesc(tableName, filterExp, ctx.getResFile(), repair, addPartitions, dropPartitions);
rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc)));
}
use of org.apache.hadoop.hive.ql.ddl.DDLWork in project hive by apache.
the class AbstractAlterDataConnectorAnalyzer method addAlterDataConnectorDesc.
protected void addAlterDataConnectorDesc(AbstractAlterDataConnectorDesc alterDesc) throws SemanticException {
DataConnector connector = getDataConnector(alterDesc.getConnectorName());
outputs.add(new WriteEntity(connector, WriteEntity.WriteType.DDL_NO_LOCK));
rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), alterDesc)));
}
use of org.apache.hadoop.hive.ql.ddl.DDLWork in project hive by apache.
the class DropDataConnectorAnalyzer method analyzeInternal.
@Override
public void analyzeInternal(ASTNode root) throws SemanticException {
String connectorName = unescapeIdentifier(root.getChild(0).getText());
boolean ifExists = root.getFirstChildWithType(HiveParser.TOK_IFEXISTS) != null;
DataConnector connector = getDataConnector(connectorName, !ifExists);
if (connector == null) {
return;
}
inputs.add(new ReadEntity(connector));
outputs.add(new WriteEntity(connector, WriteEntity.WriteType.DDL_EXCLUSIVE));
DropDataConnectorDesc desc = new DropDataConnectorDesc(connectorName, ifExists);
rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc)));
}
use of org.apache.hadoop.hive.ql.ddl.DDLWork in project hive by apache.
the class AlterTableDropConstraintAnalyzer method analyzeCommand.
@Override
protected void analyzeCommand(TableName tableName, Map<String, String> partitionSpec, ASTNode command) throws SemanticException {
String constraintName = unescapeIdentifier(command.getChild(0).getText());
AlterTableDropConstraintDesc desc = new AlterTableDropConstraintDesc(tableName, null, constraintName);
Table table = getTable(tableName);
WriteEntity.WriteType writeType = null;
if (AcidUtils.isTransactionalTable(table)) {
setAcidDdlDesc(desc);
writeType = WriteType.DDL_EXCLUSIVE;
} else {
writeType = WriteEntity.determineAlterTableWriteType(AlterTableType.DROP_CONSTRAINT);
}
inputs.add(new ReadEntity(table));
WriteEntity alterTableOutput = new WriteEntity(table, writeType);
outputs.add(alterTableOutput);
rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc)));
}
Aggregations