use of org.apache.hadoop.hive.metastore.api.AlreadyExistsException in project hive by apache.
the class Hive method createTable.
/**
* Creates the table with the given objects. It takes additional arguments for
* primary keys and foreign keys associated with the table.
*
* @param tbl
* a table object
* @param ifNotExists
* if true, ignore AlreadyExistsException
* @param primaryKeys
* primary key columns associated with the table
* @param foreignKeys
* foreign key columns associated with the table
* @throws HiveException
*/
public void createTable(Table tbl, boolean ifNotExists, List<SQLPrimaryKey> primaryKeys, List<SQLForeignKey> foreignKeys) throws HiveException {
try {
if (tbl.getDbName() == null || "".equals(tbl.getDbName().trim())) {
tbl.setDbName(SessionState.get().getCurrentDatabase());
}
if (tbl.getCols().size() == 0 || tbl.getSd().getColsSize() == 0) {
tbl.setFields(MetaStoreUtils.getFieldsFromDeserializer(tbl.getTableName(), tbl.getDeserializer()));
}
tbl.checkValidity(conf);
if (tbl.getParameters() != null) {
tbl.getParameters().remove(hive_metastoreConstants.DDL_TIME);
}
org.apache.hadoop.hive.metastore.api.Table tTbl = tbl.getTTable();
PrincipalPrivilegeSet principalPrivs = new PrincipalPrivilegeSet();
SessionState ss = SessionState.get();
if (ss != null) {
CreateTableAutomaticGrant grants = ss.getCreateTableGrants();
if (grants != null) {
principalPrivs.setUserPrivileges(grants.getUserGrants());
principalPrivs.setGroupPrivileges(grants.getGroupGrants());
principalPrivs.setRolePrivileges(grants.getRoleGrants());
tTbl.setPrivileges(principalPrivs);
}
}
if (primaryKeys == null && foreignKeys == null) {
getMSC().createTable(tTbl);
} else {
getMSC().createTableWithConstraints(tTbl, primaryKeys, foreignKeys);
}
} catch (AlreadyExistsException e) {
if (!ifNotExists) {
throw new HiveException(e);
}
} catch (Exception e) {
throw new HiveException(e);
}
}
use of org.apache.hadoop.hive.metastore.api.AlreadyExistsException in project hive by apache.
the class HCatClientHMSImpl method addPartition.
@Override
public void addPartition(HCatAddPartitionDesc partInfo) throws HCatException {
Table tbl = null;
try {
tbl = hmsClient.getTable(partInfo.getDatabaseName(), partInfo.getTableName());
// TODO: Should be moved out.
if (tbl.getPartitionKeysSize() == 0) {
throw new HCatException("The table " + partInfo.getTableName() + " is not partitioned.");
}
HCatTable hcatTable = new HCatTable(tbl);
HCatPartition hcatPartition = partInfo.getHCatPartition();
// This is only required to support the deprecated methods in HCatAddPartitionDesc.Builder.
if (hcatPartition == null) {
hcatPartition = partInfo.getHCatPartition(hcatTable);
}
hmsClient.add_partition(hcatPartition.toHivePartition());
} catch (InvalidObjectException e) {
throw new HCatException("InvalidObjectException while adding partition.", e);
} catch (AlreadyExistsException e) {
throw new HCatException("AlreadyExistsException while adding partition.", e);
} catch (MetaException e) {
throw new HCatException("MetaException while adding partition.", e);
} catch (NoSuchObjectException e) {
throw new ObjectNotFoundException("The table " + partInfo.getTableName() + " is could not be found.", e);
} catch (TException e) {
throw new ConnectionFailureException("TException while adding partition.", e);
}
}
use of org.apache.hadoop.hive.metastore.api.AlreadyExistsException in project hive by apache.
the class HCatClientHMSImpl method addPartitions.
/*
* @param partInfoList
* @return The size of the list of partitions.
* @throws HCatException,ConnectionFailureException
* @see org.apache.hive.hcatalog.api.HCatClient#addPartitions(java.util.List)
*/
@Override
public int addPartitions(List<HCatAddPartitionDesc> partInfoList) throws HCatException {
int numPartitions = -1;
if ((partInfoList == null) || (partInfoList.size() == 0)) {
throw new HCatException("The partition list is null or empty.");
}
Table tbl = null;
try {
tbl = hmsClient.getTable(partInfoList.get(0).getDatabaseName(), partInfoList.get(0).getTableName());
HCatTable hcatTable = new HCatTable(tbl);
ArrayList<Partition> ptnList = new ArrayList<Partition>();
for (HCatAddPartitionDesc desc : partInfoList) {
HCatPartition hCatPartition = desc.getHCatPartition();
// This is required only to support the deprecated HCatAddPartitionDesc.Builder interfaces.
if (hCatPartition == null) {
hCatPartition = desc.getHCatPartition(hcatTable);
}
ptnList.add(hCatPartition.toHivePartition());
}
numPartitions = hmsClient.add_partitions(ptnList);
} catch (InvalidObjectException e) {
throw new HCatException("InvalidObjectException while adding partition.", e);
} catch (AlreadyExistsException e) {
throw new HCatException("AlreadyExistsException while adding partition.", e);
} catch (MetaException e) {
throw new HCatException("MetaException while adding partition.", e);
} catch (NoSuchObjectException e) {
throw new ObjectNotFoundException("The table " + partInfoList.get(0).getTableName() + " is could not be found.", e);
} catch (TException e) {
throw new ConnectionFailureException("TException while adding partition.", e);
}
return numPartitions;
}
use of org.apache.hadoop.hive.metastore.api.AlreadyExistsException in project metacat by Netflix.
the class HiveConnectorPartitionService method savePartitions.
/**
* {@inheritDoc}.
*/
@Override
public PartitionsSaveResponse savePartitions(@Nonnull @NonNull final ConnectorContext requestContext, @Nonnull @NonNull final QualifiedName tableName, @Nonnull @NonNull final PartitionsSaveRequest partitionsSaveRequest) {
final String databasename = tableName.getDatabaseName();
final String tablename = tableName.getTableName();
// New partitions
final List<Partition> hivePartitions = Lists.newArrayList();
try {
final Table table = metacatHiveClient.getTableByName(databasename, tablename);
final List<PartitionInfo> partitionInfos = partitionsSaveRequest.getPartitions();
// New partition ids
final List<String> addedPartitionIds = Lists.newArrayList();
// Updated partition ids
final List<String> existingPartitionIds = Lists.newArrayList();
// Existing partitions
final List<Partition> existingHivePartitions = Lists.newArrayList();
// Existing partition map
Map<String, Partition> existingPartitionMap = Collections.emptyMap();
if (partitionsSaveRequest.getCheckIfExists()) {
final List<String> partitionNames = partitionInfos.stream().map(partition -> {
final String partitionName = partition.getName().getPartitionName();
PartitionUtil.validatePartitionName(partitionName, getPartitionKeys(table.getPartitionKeys()));
return partitionName;
}).collect(Collectors.toList());
existingPartitionMap = getPartitionsByNames(table, partitionNames);
}
final TableInfo tableInfo = hiveMetacatConverters.toTableInfo(tableName, table);
for (PartitionInfo partitionInfo : partitionInfos) {
final String partitionName = partitionInfo.getName().getPartitionName();
final Partition hivePartition = existingPartitionMap.get(partitionName);
if (hivePartition == null) {
addedPartitionIds.add(partitionName);
hivePartitions.add(hiveMetacatConverters.fromPartitionInfo(tableInfo, partitionInfo));
} else {
//unless we alterifExists
if (partitionsSaveRequest.getAlterIfExists()) {
final Partition existingPartition = hiveMetacatConverters.fromPartitionInfo(tableInfo, partitionInfo);
existingPartitionIds.add(partitionName);
existingPartition.setParameters(hivePartition.getParameters());
existingPartition.setCreateTime(hivePartition.getCreateTime());
existingPartition.setLastAccessTime(hivePartition.getLastAccessTime());
existingHivePartitions.add(existingPartition);
}
}
}
final Set<String> deletePartitionIds = Sets.newHashSet();
if (!partitionsSaveRequest.getAlterIfExists()) {
deletePartitionIds.addAll(existingPartitionIds);
}
if (partitionsSaveRequest.getPartitionIdsForDeletes() != null) {
deletePartitionIds.addAll(partitionsSaveRequest.getPartitionIdsForDeletes());
}
if (partitionsSaveRequest.getAlterIfExists() && !existingHivePartitions.isEmpty()) {
copyTableSdToPartitionSd(existingHivePartitions, table);
metacatHiveClient.alterPartitions(databasename, tablename, existingHivePartitions);
}
copyTableSdToPartitionSd(hivePartitions, table);
metacatHiveClient.addDropPartitions(databasename, tablename, hivePartitions, Lists.newArrayList(deletePartitionIds));
final PartitionsSaveResponse result = new PartitionsSaveResponse();
result.setAdded(addedPartitionIds);
result.setUpdated(existingPartitionIds);
return result;
} catch (NoSuchObjectException exception) {
if (exception.getMessage() != null && exception.getMessage().startsWith("Partition doesn't exist")) {
throw new PartitionNotFoundException(tableName, "", exception);
} else {
throw new TableNotFoundException(tableName, exception);
}
} catch (MetaException | InvalidObjectException exception) {
throw new InvalidMetaException("One or more partitions are invalid.", exception);
} catch (AlreadyExistsException e) {
final List<String> ids = getFakePartitionName(hivePartitions);
throw new PartitionAlreadyExistsException(tableName, ids, e);
} catch (TException exception) {
throw new ConnectorException(String.format("Failed savePartitions hive table %s", tableName), exception);
}
}
use of org.apache.hadoop.hive.metastore.api.AlreadyExistsException in project hive by apache.
the class Hive method createTable.
/**
* Creates the table with the given objects. It takes additional arguments for
* primary keys and foreign keys associated with the table.
*
* @param tbl
* a table object
* @param ifNotExists
* if true, ignore AlreadyExistsException
* @param primaryKeys
* primary key columns associated with the table
* @param foreignKeys
* foreign key columns associated with the table
* @param uniqueConstraints
* UNIQUE constraints associated with the table
* @param notNullConstraints
* NOT NULL constraints associated with the table
* @param defaultConstraints
* DEFAULT constraints associated with the table
* @param checkConstraints
* CHECK constraints associated with the table
* @throws HiveException
*/
public void createTable(Table tbl, boolean ifNotExists, List<SQLPrimaryKey> primaryKeys, List<SQLForeignKey> foreignKeys, List<SQLUniqueConstraint> uniqueConstraints, List<SQLNotNullConstraint> notNullConstraints, List<SQLDefaultConstraint> defaultConstraints, List<SQLCheckConstraint> checkConstraints) throws HiveException {
try {
if (tbl.getDbName() == null || "".equals(tbl.getDbName().trim())) {
tbl.setDbName(SessionState.get().getCurrentDatabase());
}
if (tbl.getCols().size() == 0 || tbl.getSd().getColsSize() == 0) {
tbl.setFields(HiveMetaStoreUtils.getFieldsFromDeserializer(tbl.getTableName(), tbl.getDeserializer()));
}
tbl.checkValidity(conf);
if (tbl.getParameters() != null) {
tbl.getParameters().remove(hive_metastoreConstants.DDL_TIME);
}
org.apache.hadoop.hive.metastore.api.Table tTbl = tbl.getTTable();
PrincipalPrivilegeSet principalPrivs = new PrincipalPrivilegeSet();
SessionState ss = SessionState.get();
if (ss != null) {
CreateTableAutomaticGrant grants = ss.getCreateTableGrants();
if (grants != null) {
principalPrivs.setUserPrivileges(grants.getUserGrants());
principalPrivs.setGroupPrivileges(grants.getGroupGrants());
principalPrivs.setRolePrivileges(grants.getRoleGrants());
tTbl.setPrivileges(principalPrivs);
}
}
if (primaryKeys == null && foreignKeys == null && uniqueConstraints == null && notNullConstraints == null && defaultConstraints == null && checkConstraints == null) {
getMSC().createTable(tTbl);
} else {
getMSC().createTableWithConstraints(tTbl, primaryKeys, foreignKeys, uniqueConstraints, notNullConstraints, defaultConstraints, checkConstraints);
}
} catch (AlreadyExistsException e) {
if (!ifNotExists) {
throw new HiveException(e);
}
} catch (Exception e) {
throw new HiveException(e);
}
}
Aggregations