use of org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint in project hive by apache.
the class DDLTask method createTable.
/**
* Create a new table.
*
* @param db
* The database in question.
* @param crtTbl
* This is the table we're creating.
* @return Returns 0 when execution succeeds and above 0 if it fails.
* @throws HiveException
* Throws this exception if an unexpected error occurs.
*/
private int createTable(Hive db, CreateTableDesc crtTbl) throws HiveException {
// create the table
Table tbl = crtTbl.toTable(conf);
List<SQLPrimaryKey> primaryKeys = crtTbl.getPrimaryKeys();
List<SQLForeignKey> foreignKeys = crtTbl.getForeignKeys();
List<SQLUniqueConstraint> uniqueConstraints = crtTbl.getUniqueConstraints();
List<SQLNotNullConstraint> notNullConstraints = crtTbl.getNotNullConstraints();
List<SQLDefaultConstraint> defaultConstraints = crtTbl.getDefaultConstraints();
List<SQLCheckConstraint> checkConstraints = crtTbl.getCheckConstraints();
LOG.debug("creating table {} on {}", tbl.getFullyQualifiedName(), tbl.getDataLocation());
if (crtTbl.getReplicationSpec().isInReplicationScope() && (!crtTbl.getReplaceMode())) {
// if this is a replication spec, then replace-mode semantics might apply.
// if we're already asking for a table replacement, then we can skip this check.
// however, otherwise, if in replication scope, and we've not been explicitly asked
// to replace, we should check if the object we're looking at exists, and if so,
// trigger replace-mode semantics.
Table existingTable = db.getTable(tbl.getDbName(), tbl.getTableName(), false);
if (existingTable != null) {
if (crtTbl.getReplicationSpec().allowEventReplacementInto(existingTable.getParameters())) {
// we replace existing table.
crtTbl.setReplaceMode(true);
} else {
LOG.debug("DDLTask: Create Table is skipped as table {} is newer than update", crtTbl.getTableName());
// no replacement, the existing table state is newer than our update.
return 0;
}
}
}
// create the table
if (crtTbl.getReplaceMode()) {
// replace-mode creates are really alters using CreateTableDesc.
db.alterTable(tbl, null);
} else {
if ((foreignKeys != null && foreignKeys.size() > 0) || (primaryKeys != null && primaryKeys.size() > 0) || (uniqueConstraints != null && uniqueConstraints.size() > 0) || (notNullConstraints != null && notNullConstraints.size() > 0) || (checkConstraints != null && checkConstraints.size() > 0) || defaultConstraints != null && defaultConstraints.size() > 0) {
db.createTable(tbl, crtTbl.getIfNotExists(), primaryKeys, foreignKeys, uniqueConstraints, notNullConstraints, defaultConstraints, checkConstraints);
} else {
db.createTable(tbl, crtTbl.getIfNotExists());
}
Long mmWriteId = crtTbl.getInitialMmWriteId();
if (crtTbl.isCTAS() || mmWriteId != null) {
Table createdTable = db.getTable(tbl.getDbName(), tbl.getTableName());
if (crtTbl.isCTAS()) {
DataContainer dc = new DataContainer(createdTable.getTTable());
queryState.getLineageState().setLineage(createdTable.getPath(), dc, createdTable.getCols());
}
}
}
addIfAbsentByName(new WriteEntity(tbl, WriteEntity.WriteType.DDL_NO_LOCK));
return 0;
}
use of org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint in project hive by apache.
the class TestReplicationScenarios method testConstraints.
@Test
public void testConstraints() throws IOException {
String testName = "constraints";
String dbName = createDB(testName, driver);
String replDbName = dbName + "_dupe";
run("CREATE TABLE " + dbName + ".tbl1(a string, b string, primary key (a, b) disable novalidate rely)", driver);
run("CREATE TABLE " + dbName + ".tbl2(a string, b string, foreign key (a, b) references " + dbName + ".tbl1(a, b) disable novalidate)", driver);
run("CREATE TABLE " + dbName + ".tbl3(a string, b string not null disable, unique (a) disable)", driver);
run("CREATE TABLE " + dbName + ".tbl7(a string CHECK (a like 'a%'), price double CHECK (price > 0 AND price <= 1000))", driver);
run("CREATE TABLE " + dbName + ".tbl8(a string, b int DEFAULT 0)", driver);
Tuple bootstrapDump = bootstrapLoadAndVerify(dbName, replDbName);
String replDumpId = bootstrapDump.lastReplId;
try {
List<SQLPrimaryKey> pks = metaStoreClientMirror.getPrimaryKeys(new PrimaryKeysRequest(replDbName, "tbl1"));
assertEquals(pks.size(), 2);
List<SQLUniqueConstraint> uks = metaStoreClientMirror.getUniqueConstraints(new UniqueConstraintsRequest(DEFAULT_CATALOG_NAME, replDbName, "tbl3"));
assertEquals(uks.size(), 1);
List<SQLForeignKey> fks = metaStoreClientMirror.getForeignKeys(new ForeignKeysRequest(null, null, replDbName, "tbl2"));
assertEquals(fks.size(), 2);
List<SQLNotNullConstraint> nns = metaStoreClientMirror.getNotNullConstraints(new NotNullConstraintsRequest(DEFAULT_CATALOG_NAME, replDbName, "tbl3"));
assertEquals(nns.size(), 1);
List<SQLCheckConstraint> cks = metaStoreClientMirror.getCheckConstraints(new CheckConstraintsRequest(DEFAULT_CATALOG_NAME, replDbName, "tbl7"));
assertEquals(cks.size(), 2);
List<SQLDefaultConstraint> dks = metaStoreClientMirror.getDefaultConstraints(new DefaultConstraintsRequest(DEFAULT_CATALOG_NAME, replDbName, "tbl8"));
assertEquals(dks.size(), 1);
} catch (TException te) {
assertNull(te);
}
run("CREATE TABLE " + dbName + ".tbl4(a string, b string, primary key (a, b) disable novalidate rely)", driver);
run("CREATE TABLE " + dbName + ".tbl5(a string, b string, foreign key (a, b) references " + dbName + ".tbl4(a, b) disable novalidate)", driver);
run("CREATE TABLE " + dbName + ".tbl6(a string, b string not null disable, unique (a) disable)", driver);
run("CREATE TABLE " + dbName + ".tbl9(a string CHECK (a like 'a%'), price double CHECK (price > 0 AND price <= 1000))", driver);
run("CREATE TABLE " + dbName + ".tbl10(a string, b int DEFAULT 0)", driver);
Tuple incrementalDump = incrementalLoadAndVerify(dbName, replDbName);
replDumpId = incrementalDump.lastReplId;
String pkName = null;
String ukName = null;
String fkName = null;
String nnName = null;
String dkName1 = null;
String ckName1 = null;
String ckName2 = null;
try {
List<SQLPrimaryKey> pks = metaStoreClientMirror.getPrimaryKeys(new PrimaryKeysRequest(replDbName, "tbl4"));
assertEquals(pks.size(), 2);
pkName = pks.get(0).getPk_name();
List<SQLUniqueConstraint> uks = metaStoreClientMirror.getUniqueConstraints(new UniqueConstraintsRequest(DEFAULT_CATALOG_NAME, replDbName, "tbl6"));
assertEquals(uks.size(), 1);
ukName = uks.get(0).getUk_name();
List<SQLForeignKey> fks = metaStoreClientMirror.getForeignKeys(new ForeignKeysRequest(null, null, replDbName, "tbl5"));
assertEquals(fks.size(), 2);
fkName = fks.get(0).getFk_name();
List<SQLNotNullConstraint> nns = metaStoreClientMirror.getNotNullConstraints(new NotNullConstraintsRequest(DEFAULT_CATALOG_NAME, replDbName, "tbl6"));
assertEquals(nns.size(), 1);
nnName = nns.get(0).getNn_name();
List<SQLCheckConstraint> cks = metaStoreClientMirror.getCheckConstraints(new CheckConstraintsRequest(DEFAULT_CATALOG_NAME, replDbName, "tbl9"));
assertEquals(cks.size(), 2);
ckName1 = cks.get(0).getDc_name();
ckName2 = cks.get(1).getDc_name();
List<SQLDefaultConstraint> dks = metaStoreClientMirror.getDefaultConstraints(new DefaultConstraintsRequest(DEFAULT_CATALOG_NAME, replDbName, "tbl10"));
assertEquals(dks.size(), 1);
dkName1 = dks.get(0).getDc_name();
} catch (TException te) {
assertNull(te);
}
String dkName2 = "custom_dk_name";
String ckName3 = "customer_ck_name";
run("ALTER TABLE " + dbName + ".tbl10 CHANGE COLUMN a a string CONSTRAINT " + ckName3 + " CHECK (a like 'a%')", driver);
run("ALTER TABLE " + dbName + ".tbl10 CHANGE COLUMN b b int CONSTRAINT " + dkName2 + " DEFAULT 1 ENABLE", driver);
incrementalLoadAndVerify(dbName, replDbName);
try {
List<SQLDefaultConstraint> dks = metaStoreClientMirror.getDefaultConstraints(new DefaultConstraintsRequest(DEFAULT_CATALOG_NAME, replDbName, "tbl10"));
assertEquals(dks.size(), 2);
assertEquals(dks.get(1).getDefault_value(), "1");
List<SQLCheckConstraint> cks = metaStoreClientMirror.getCheckConstraints(new CheckConstraintsRequest(DEFAULT_CATALOG_NAME, replDbName, "tbl10"));
assertEquals(cks.size(), 1);
assertEquals(cks.get(0).getDc_name(), ckName3);
} catch (TException te) {
assertNull(te);
}
run("ALTER TABLE " + dbName + ".tbl4 DROP CONSTRAINT `" + pkName + "`", driver);
run("ALTER TABLE " + dbName + ".tbl4 DROP CONSTRAINT `" + ukName + "`", driver);
run("ALTER TABLE " + dbName + ".tbl5 DROP CONSTRAINT `" + fkName + "`", driver);
run("ALTER TABLE " + dbName + ".tbl6 DROP CONSTRAINT `" + nnName + "`", driver);
run("ALTER TABLE " + dbName + ".tbl9 DROP CONSTRAINT `" + ckName1 + "`", driver);
run("ALTER TABLE " + dbName + ".tbl9 DROP CONSTRAINT `" + ckName2 + "`", driver);
run("ALTER TABLE " + dbName + ".tbl10 DROP CONSTRAINT `" + ckName3 + "`", driver);
run("ALTER TABLE " + dbName + ".tbl10 DROP CONSTRAINT `" + dkName1 + "`", driver);
run("ALTER TABLE " + dbName + ".tbl10 DROP CONSTRAINT `" + dkName2 + "`", driver);
incrementalLoadAndVerify(dbName, replDbName);
try {
List<SQLPrimaryKey> pks = metaStoreClientMirror.getPrimaryKeys(new PrimaryKeysRequest(replDbName, "tbl4"));
assertTrue(pks.isEmpty());
List<SQLUniqueConstraint> uks = metaStoreClientMirror.getUniqueConstraints(new UniqueConstraintsRequest(DEFAULT_CATALOG_NAME, replDbName, "tbl4"));
assertTrue(uks.isEmpty());
List<SQLForeignKey> fks = metaStoreClientMirror.getForeignKeys(new ForeignKeysRequest(null, null, replDbName, "tbl5"));
assertTrue(fks.isEmpty());
List<SQLNotNullConstraint> nns = metaStoreClientMirror.getNotNullConstraints(new NotNullConstraintsRequest(DEFAULT_CATALOG_NAME, replDbName, "tbl6"));
assertTrue(nns.isEmpty());
List<SQLDefaultConstraint> dks = metaStoreClientMirror.getDefaultConstraints(new DefaultConstraintsRequest(DEFAULT_CATALOG_NAME, replDbName, "tbl10"));
assertTrue(dks.isEmpty());
List<SQLCheckConstraint> cks = metaStoreClientMirror.getCheckConstraints(new CheckConstraintsRequest(DEFAULT_CATALOG_NAME, replDbName, "tbl9"));
assertTrue(cks.isEmpty());
cks = metaStoreClientMirror.getCheckConstraints(new CheckConstraintsRequest(DEFAULT_CATALOG_NAME, replDbName, "tbl10"));
assertTrue(cks.isEmpty());
dks = metaStoreClientMirror.getDefaultConstraints(new DefaultConstraintsRequest(DEFAULT_CATALOG_NAME, replDbName, "tbl12"));
assertTrue(dks.isEmpty());
cks = metaStoreClientMirror.getCheckConstraints(new CheckConstraintsRequest(DEFAULT_CATALOG_NAME, replDbName, "tbl12"));
assertTrue(cks.isEmpty());
} catch (TException te) {
assertNull(te);
}
}
use of org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint in project hive by apache.
the class LoadConstraint method isNotNullConstraintsAlreadyLoaded.
private boolean isNotNullConstraintsAlreadyLoaded(String nnsMsgString) throws Exception {
AddNotNullConstraintMessage msg = deserializer.getAddNotNullConstraintMessage(nnsMsgString);
List<SQLNotNullConstraint> nnsInMsg = msg.getNotNullConstraints();
if (nnsInMsg.isEmpty()) {
return true;
}
String dbName = StringUtils.isBlank(dbNameToLoadIn) ? nnsInMsg.get(0).getTable_db() : dbNameToLoadIn;
List<SQLNotNullConstraint> nns;
try {
nns = context.hiveDb.getNotNullConstraintList(dbName, nnsInMsg.get(0).getTable_name());
} catch (NoSuchObjectException e) {
return false;
}
return CollectionUtils.isNotEmpty(nns);
}
use of org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint in project hive by apache.
the class SemanticAnalyzer method analyzeCreateTable.
/**
* Analyze the create table command. If it is a regular create-table or
* create-table-like statements, we create a DDLWork and return true. If it is
* a create-table-as-select, we get the necessary info such as the SerDe and
* Storage Format and put it in QB, and return false, indicating the rest of
* the semantic analyzer need to deal with the select statement with respect
* to the SerDe and Storage Format.
*/
ASTNode analyzeCreateTable(ASTNode ast, QB qb, PlannerContext plannerCtx) throws SemanticException {
TableName qualifiedTabName = getQualifiedTableName((ASTNode) ast.getChild(0));
final String dbDotTab = qualifiedTabName.getNotEmptyDbTable();
String likeTableName = null;
List<FieldSchema> cols = new ArrayList<FieldSchema>();
List<FieldSchema> partCols = new ArrayList<FieldSchema>();
List<String> partColNames = new ArrayList<>();
List<String> bucketCols = new ArrayList<String>();
List<SQLPrimaryKey> primaryKeys = new ArrayList<SQLPrimaryKey>();
List<SQLForeignKey> foreignKeys = new ArrayList<SQLForeignKey>();
List<SQLUniqueConstraint> uniqueConstraints = new ArrayList<>();
List<SQLNotNullConstraint> notNullConstraints = new ArrayList<>();
List<SQLDefaultConstraint> defaultConstraints = new ArrayList<>();
List<SQLCheckConstraint> checkConstraints = new ArrayList<>();
List<Order> sortCols = new ArrayList<Order>();
int numBuckets = -1;
String comment = null;
String location = null;
Map<String, String> tblProps = null;
boolean ifNotExists = false;
boolean isExt = false;
boolean isTemporary = false;
boolean isManaged = false;
boolean isMaterialization = false;
boolean isTransactional = false;
ASTNode selectStmt = null;
// regular CREATE TABLE
final int CREATE_TABLE = 0;
// CREATE TABLE LIKE ... (CTLT)
final int CTLT = 1;
// CREATE TABLE AS SELECT ... (CTAS)
final int CTAS = 2;
// CREATE TRANSACTIONAL TABLE
final int ctt = 3;
int command_type = CREATE_TABLE;
List<String> skewedColNames = new ArrayList<String>();
List<List<String>> skewedValues = new ArrayList<List<String>>();
Map<List<String>, String> listBucketColValuesMapping = new HashMap<List<String>, String>();
boolean storedAsDirs = false;
boolean isUserStorageFormat = false;
boolean partitionTransformSpecExists = false;
RowFormatParams rowFormatParams = new RowFormatParams();
StorageFormat storageFormat = new StorageFormat(conf);
LOG.info("Creating table " + dbDotTab + " position=" + ast.getCharPositionInLine());
int numCh = ast.getChildCount();
// set storage handler if default handler is provided in config
String defaultStorageHandler = HiveConf.getVar(conf, HIVE_DEFAULT_STORAGE_HANDLER);
if (defaultStorageHandler != null && !defaultStorageHandler.isEmpty()) {
LOG.info("Default storage handler class detected in config. Using storage handler class if exists: '{}'", defaultStorageHandler);
storageFormat.setStorageHandler(defaultStorageHandler);
isUserStorageFormat = true;
}
/*
* Check the 1st-level children and do simple semantic checks: 1) CTLT and
* CTAS should not coexists. 2) CTLT or CTAS should not coexists with column
* list (target table schema). 3) CTAS does not support partitioning (for
* now).
*/
for (int num = 1; num < numCh; num++) {
ASTNode child = (ASTNode) ast.getChild(num);
if (storageFormat.fillStorageFormat(child)) {
isUserStorageFormat = true;
continue;
}
switch(child.getToken().getType()) {
case HiveParser.TOK_IFNOTEXISTS:
ifNotExists = true;
break;
case HiveParser.KW_EXTERNAL:
isExt = true;
break;
case HiveParser.KW_MANAGED:
isManaged = true;
isTransactional = true;
break;
case HiveParser.KW_TEMPORARY:
isTemporary = true;
isMaterialization = MATERIALIZATION_MARKER.equals(child.getText());
break;
case HiveParser.KW_TRANSACTIONAL:
isTransactional = true;
command_type = ctt;
break;
case HiveParser.TOK_LIKETABLE:
if (child.getChildCount() > 0) {
likeTableName = getUnescapedName((ASTNode) child.getChild(0));
if (likeTableName != null) {
if (command_type == CTAS) {
throw new SemanticException(ErrorMsg.CTAS_CTLT_COEXISTENCE.getMsg());
}
if (cols.size() != 0) {
throw new SemanticException(ErrorMsg.CTLT_COLLST_COEXISTENCE.getMsg());
}
}
command_type = CTLT;
}
break;
case // CTAS
HiveParser.TOK_QUERY:
if (command_type == CTLT) {
throw new SemanticException(ErrorMsg.CTAS_CTLT_COEXISTENCE.getMsg());
}
if (cols.size() != 0) {
throw new SemanticException(ErrorMsg.CTAS_COLLST_COEXISTENCE.getMsg());
}
if (partCols.size() != 0 || bucketCols.size() != 0) {
boolean dynPart = HiveConf.getBoolVar(conf, HiveConf.ConfVars.DYNAMICPARTITIONING);
if (dynPart == false) {
throw new SemanticException(ErrorMsg.CTAS_PARCOL_COEXISTENCE.getMsg());
} else {
// TODO: support dynamic partition for CTAS
throw new SemanticException(ErrorMsg.CTAS_PARCOL_COEXISTENCE.getMsg());
}
}
if (!conf.getBoolVar(ConfVars.HIVE_CTAS_EXTERNAL_TABLES) && isExt) {
throw new SemanticException(ErrorMsg.CTAS_EXTTBL_COEXISTENCE.getMsg());
}
command_type = CTAS;
if (plannerCtx != null) {
plannerCtx.setCTASToken(child);
}
selectStmt = child;
break;
case HiveParser.TOK_TABCOLLIST:
cols = getColumns(child, true, ctx.getTokenRewriteStream(), primaryKeys, foreignKeys, uniqueConstraints, notNullConstraints, defaultConstraints, checkConstraints, conf);
break;
case HiveParser.TOK_TABLECOMMENT:
comment = unescapeSQLString(child.getChild(0).getText());
break;
case HiveParser.TOK_TABLEPARTCOLS:
partCols = getColumns(child, false, ctx.getTokenRewriteStream(), primaryKeys, foreignKeys, uniqueConstraints, notNullConstraints, defaultConstraints, checkConstraints, conf);
if (hasConstraints(partCols, defaultConstraints, notNullConstraints, checkConstraints)) {
// TODO: these constraints should be supported for partition columns
throw new SemanticException(ErrorMsg.INVALID_CSTR_SYNTAX.getMsg("NOT NULL,DEFAULT and CHECK Constraints are not allowed with " + "partition columns. "));
}
break;
case HiveParser.TOK_TABLEPARTCOLSBYSPEC:
List<PartitionTransformSpec> partitionTransformSpec = PartitionTransform.getPartitionTransformSpec(child);
if (!SessionStateUtil.addResource(conf, hive_metastoreConstants.PARTITION_TRANSFORM_SPEC, partitionTransformSpec)) {
throw new SemanticException("Query state attached to Session state must be not null. " + "Partition transform metadata cannot be saved.");
}
partitionTransformSpecExists = true;
break;
case HiveParser.TOK_TABLEPARTCOLNAMES:
partColNames = getColumnNames(child);
break;
case HiveParser.TOK_ALTERTABLE_BUCKETS:
bucketCols = getColumnNames((ASTNode) child.getChild(0));
if (child.getChildCount() == 2) {
numBuckets = Integer.parseInt(child.getChild(1).getText());
} else {
sortCols = getColumnNamesOrder((ASTNode) child.getChild(1));
numBuckets = Integer.parseInt(child.getChild(2).getText());
}
break;
case HiveParser.TOK_TABLEROWFORMAT:
rowFormatParams.analyzeRowFormat(child);
break;
case HiveParser.TOK_TABLELOCATION:
location = unescapeSQLString(child.getChild(0).getText());
location = EximUtil.relativeToAbsolutePath(conf, location);
inputs.add(toReadEntity(location));
break;
case HiveParser.TOK_TABLEPROPERTIES:
tblProps = getProps((ASTNode) child.getChild(0));
addPropertyReadEntry(tblProps, inputs);
break;
case HiveParser.TOK_TABLESERIALIZER:
child = (ASTNode) child.getChild(0);
storageFormat.setSerde(unescapeSQLString(child.getChild(0).getText()));
if (child.getChildCount() == 2) {
readProps((ASTNode) (child.getChild(1).getChild(0)), storageFormat.getSerdeProps());
}
break;
case HiveParser.TOK_TABLESKEWED:
/**
* Throw an error if the user tries to use the DDL with
* hive.internal.ddl.list.bucketing.enable set to false.
*/
HiveConf hiveConf = SessionState.get().getConf();
// skewed column names
skewedColNames = SkewedTableUtils.analyzeSkewedTableDDLColNames(child);
// skewed value
skewedValues = SkewedTableUtils.analyzeDDLSkewedValues(child);
// stored as directories
storedAsDirs = analyzeStoredAdDirs(child);
break;
default:
throw new AssertionError("Unknown token: " + child.getToken());
}
}
HiveStorageHandler handler;
try {
handler = HiveUtils.getStorageHandler(conf, storageFormat.getStorageHandler());
} catch (HiveException e) {
throw new SemanticException("Failed to load storage handler: " + e.getMessage());
}
if (handler != null) {
if (partitionTransformSpecExists && !handler.supportsPartitionTransform()) {
throw new SemanticException("Partition transform is not supported for " + handler.getClass().getName());
}
String fileFormatPropertyKey = handler.getFileFormatPropertyKey();
if (fileFormatPropertyKey != null) {
if (tblProps != null && tblProps.containsKey(fileFormatPropertyKey) && storageFormat.getSerdeProps() != null && storageFormat.getSerdeProps().containsKey(fileFormatPropertyKey)) {
String fileFormat = tblProps.get(fileFormatPropertyKey);
throw new SemanticException("Provide only one of the following: STORED BY " + fileFormat + " or WITH SERDEPROPERTIES('" + fileFormatPropertyKey + "'='" + fileFormat + "') or" + " TBLPROPERTIES('" + fileFormatPropertyKey + "'='" + fileFormat + "')");
}
}
}
if (command_type == CREATE_TABLE || command_type == CTLT || command_type == ctt) {
queryState.setCommandType(HiveOperation.CREATETABLE);
} else if (command_type == CTAS) {
queryState.setCommandType(HiveOperation.CREATETABLE_AS_SELECT);
} else {
throw new SemanticException("Unrecognized command.");
}
if (isExt && ConstraintsUtils.hasEnabledOrValidatedConstraints(notNullConstraints, defaultConstraints, checkConstraints)) {
throw new SemanticException(ErrorMsg.INVALID_CSTR_SYNTAX.getMsg("Constraints are disallowed with External tables. " + "Only RELY is allowed."));
}
if (checkConstraints != null && !checkConstraints.isEmpty()) {
ConstraintsUtils.validateCheckConstraint(cols, checkConstraints, ctx.getConf());
}
storageFormat.fillDefaultStorageFormat(isExt, false);
// check for existence of table
if (ifNotExists) {
try {
Table table = getTable(qualifiedTabName, false);
if (table != null) {
// table exists
return null;
}
} catch (HiveException e) {
// should not occur since second parameter to getTableWithQN is false
throw new IllegalStateException("Unexpected Exception thrown: " + e.getMessage(), e);
}
}
if (isTemporary) {
if (location == null) {
// it has the same life cycle as the tmp table
try {
// Generate a unique ID for temp table path.
// This path will be fixed for the life of the temp table.
location = SessionState.generateTempTableLocation(conf);
} catch (MetaException err) {
throw new SemanticException("Error while generating temp table path:", err);
}
}
}
switch(command_type) {
case // REGULAR CREATE TABLE DDL
CREATE_TABLE:
if (!CollectionUtils.isEmpty(partColNames)) {
throw new SemanticException("Partition columns can only declared using their name and types in regular CREATE TABLE statements");
}
tblProps = validateAndAddDefaultProperties(tblProps, isExt, storageFormat, dbDotTab, sortCols, isMaterialization, isTemporary, isTransactional, isManaged);
addDbAndTabToOutputs(new String[] { qualifiedTabName.getDb(), qualifiedTabName.getTable() }, TableType.MANAGED_TABLE, isTemporary, tblProps, storageFormat);
CreateTableDesc crtTblDesc = new CreateTableDesc(qualifiedTabName, isExt, isTemporary, cols, partCols, bucketCols, sortCols, numBuckets, rowFormatParams.fieldDelim, rowFormatParams.fieldEscape, rowFormatParams.collItemDelim, rowFormatParams.mapKeyDelim, rowFormatParams.lineDelim, comment, storageFormat.getInputFormat(), storageFormat.getOutputFormat(), location, storageFormat.getSerde(), storageFormat.getStorageHandler(), storageFormat.getSerdeProps(), tblProps, ifNotExists, skewedColNames, skewedValues, primaryKeys, foreignKeys, uniqueConstraints, notNullConstraints, defaultConstraints, checkConstraints);
crtTblDesc.setStoredAsSubDirectories(storedAsDirs);
crtTblDesc.setNullFormat(rowFormatParams.nullFormat);
crtTblDesc.validate(conf);
// outputs is empty, which means this create table happens in the current
// database.
rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), crtTblDesc)));
break;
case // CREATE TRANSACTIONAL TABLE
ctt:
if (isExt) {
throw new SemanticException(qualifiedTabName.getTable() + " cannot be declared transactional because it's an external table");
}
tblProps = validateAndAddDefaultProperties(tblProps, isExt, storageFormat, dbDotTab, sortCols, isMaterialization, isTemporary, isTransactional, isManaged);
addDbAndTabToOutputs(new String[] { qualifiedTabName.getDb(), qualifiedTabName.getTable() }, TableType.MANAGED_TABLE, false, tblProps, storageFormat);
CreateTableDesc crtTranTblDesc = new CreateTableDesc(qualifiedTabName, isExt, isTemporary, cols, partCols, bucketCols, sortCols, numBuckets, rowFormatParams.fieldDelim, rowFormatParams.fieldEscape, rowFormatParams.collItemDelim, rowFormatParams.mapKeyDelim, rowFormatParams.lineDelim, comment, storageFormat.getInputFormat(), storageFormat.getOutputFormat(), location, storageFormat.getSerde(), storageFormat.getStorageHandler(), storageFormat.getSerdeProps(), tblProps, ifNotExists, skewedColNames, skewedValues, primaryKeys, foreignKeys, uniqueConstraints, notNullConstraints, defaultConstraints, checkConstraints);
crtTranTblDesc.setStoredAsSubDirectories(storedAsDirs);
crtTranTblDesc.setNullFormat(rowFormatParams.nullFormat);
crtTranTblDesc.validate(conf);
// outputs is empty, which means this create table happens in the current
// database.
rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), crtTranTblDesc)));
break;
case // create table like <tbl_name>
CTLT:
tblProps = validateAndAddDefaultProperties(tblProps, isExt, storageFormat, dbDotTab, sortCols, isMaterialization, isTemporary, isTransactional, isManaged);
addDbAndTabToOutputs(new String[] { qualifiedTabName.getDb(), qualifiedTabName.getTable() }, TableType.MANAGED_TABLE, isTemporary, tblProps, storageFormat);
Table likeTable = getTable(likeTableName, false);
if (likeTable != null) {
if (isTemporary || isExt) {
updateDefaultTblProps(likeTable.getParameters(), tblProps, new ArrayList<>(Arrays.asList(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL, hive_metastoreConstants.TABLE_TRANSACTIONAL_PROPERTIES)));
} else {
updateDefaultTblProps(likeTable.getParameters(), tblProps, null);
}
}
CreateTableLikeDesc crtTblLikeDesc = new CreateTableLikeDesc(dbDotTab, isExt, isTemporary, storageFormat.getInputFormat(), storageFormat.getOutputFormat(), location, storageFormat.getSerde(), storageFormat.getSerdeProps(), tblProps, ifNotExists, likeTableName, isUserStorageFormat);
rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), crtTblLikeDesc)));
break;
case // create table as select
CTAS:
if (isTemporary) {
if (!ctx.isExplainSkipExecution() && !isMaterialization) {
SessionState ss = SessionState.get();
if (ss == null) {
throw new SemanticException("No current SessionState, cannot create temporary table " + qualifiedTabName.getNotEmptyDbTable());
}
Map<String, Table> tables = SessionHiveMetaStoreClient.getTempTablesForDatabase(qualifiedTabName.getDb(), qualifiedTabName.getTable());
if (tables != null && tables.containsKey(qualifiedTabName.getTable())) {
throw new SemanticException("Temporary table " + qualifiedTabName.getNotEmptyDbTable() + " already exists");
}
}
} else {
// dumpTable is only used to check the conflict for non-temporary tables
try {
Table dumpTable = db.newTable(dbDotTab);
if (null != db.getTable(dumpTable.getDbName(), dumpTable.getTableName(), false) && !ctx.isExplainSkipExecution()) {
throw new SemanticException(ErrorMsg.TABLE_ALREADY_EXISTS.getMsg(dbDotTab));
}
} catch (HiveException e) {
throw new SemanticException(e);
}
}
if (location != null && location.length() != 0) {
Path locPath = new Path(location);
FileSystem curFs = null;
FileStatus locStats = null;
try {
curFs = locPath.getFileSystem(conf);
if (curFs != null) {
locStats = curFs.getFileStatus(locPath);
}
if (locStats != null && locStats.isDir()) {
FileStatus[] lStats = curFs.listStatus(locPath);
if (lStats != null && lStats.length != 0) {
// Don't throw an exception if the target location only contains the staging-dirs
for (FileStatus lStat : lStats) {
if (!lStat.getPath().getName().startsWith(HiveConf.getVar(conf, HiveConf.ConfVars.STAGINGDIR))) {
throw new SemanticException(ErrorMsg.CTAS_LOCATION_NONEMPTY.getMsg(location));
}
}
}
}
} catch (FileNotFoundException nfe) {
// we will create the folder if it does not exist.
} catch (IOException ioE) {
LOG.debug("Exception when validate folder", ioE);
}
}
if (!CollectionUtils.isEmpty(partCols)) {
throw new SemanticException("Partition columns can only declared using their names in CTAS statements");
}
tblProps = validateAndAddDefaultProperties(tblProps, isExt, storageFormat, dbDotTab, sortCols, isMaterialization, isTemporary, isTransactional, isManaged);
tblProps.put(TABLE_IS_CTAS, "true");
addDbAndTabToOutputs(new String[] { qualifiedTabName.getDb(), qualifiedTabName.getTable() }, TableType.MANAGED_TABLE, isTemporary, tblProps, storageFormat);
tableDesc = new CreateTableDesc(qualifiedTabName, isExt, isTemporary, cols, partColNames, bucketCols, sortCols, numBuckets, rowFormatParams.fieldDelim, rowFormatParams.fieldEscape, rowFormatParams.collItemDelim, rowFormatParams.mapKeyDelim, rowFormatParams.lineDelim, comment, storageFormat.getInputFormat(), storageFormat.getOutputFormat(), location, storageFormat.getSerde(), storageFormat.getStorageHandler(), storageFormat.getSerdeProps(), tblProps, ifNotExists, skewedColNames, skewedValues, true, primaryKeys, foreignKeys, uniqueConstraints, notNullConstraints, defaultConstraints, checkConstraints);
tableDesc.setMaterialization(isMaterialization);
tableDesc.setStoredAsSubDirectories(storedAsDirs);
tableDesc.setNullFormat(rowFormatParams.nullFormat);
qb.setTableDesc(tableDesc);
return selectStmt;
default:
throw new SemanticException("Unrecognized command.");
}
return null;
}
use of org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint in project hive by apache.
the class AddNotNullConstraintHandler method handle.
@Override
public List<Task<?>> handle(Context context) throws SemanticException {
AddNotNullConstraintMessage msg = deserializer.getAddNotNullConstraintMessage(context.dmd.getPayload());
List<SQLNotNullConstraint> nns;
try {
nns = msg.getNotNullConstraints();
} catch (Exception e) {
if (!(e instanceof SemanticException)) {
throw new SemanticException("Error reading message members", e);
} else {
throw (SemanticException) e;
}
}
List<Task<?>> tasks = new ArrayList<Task<?>>();
if (nns.isEmpty()) {
return tasks;
}
final String actualDbName = context.isDbNameEmpty() ? nns.get(0).getTable_db() : context.dbName;
final String actualTblName = nns.get(0).getTable_name();
final TableName tName = TableName.fromString(actualTblName, null, actualDbName);
for (SQLNotNullConstraint nn : nns) {
nn.setTable_db(actualDbName);
nn.setTable_name(actualTblName);
}
Constraints constraints = new Constraints(null, null, nns, null, null, null);
AlterTableAddConstraintDesc addConstraintsDesc = new AlterTableAddConstraintDesc(tName, context.eventOnlyReplicationSpec(), constraints);
Task<DDLWork> addConstraintsTask = TaskFactory.get(new DDLWork(readEntitySet, writeEntitySet, addConstraintsDesc, true, context.getDumpDirectory(), context.getMetricCollector()), context.hiveConf);
tasks.add(addConstraintsTask);
context.log.debug("Added add constrains task : {}:{}", addConstraintsTask.getId(), actualTblName);
updatedMetadata.set(context.dmd.getEventTo().toString(), actualDbName, actualTblName, null);
return Collections.singletonList(addConstraintsTask);
}
Aggregations