use of org.apache.hadoop.hive.metastore.model.MColumnDescriptor in project hive by apache.
the class ObjectStore method alterPartitionNoTxn.
/**
* Alters an existing partition. Initiates copy of SD. Returns the old CD.
* @param dbname
* @param name
* @param part_vals Partition values (of the original partition instance)
* @param newPart Partition object containing new information
* @return The column descriptor of the old partition instance (null if table is a view)
* @throws InvalidObjectException
* @throws MetaException
*/
private MColumnDescriptor alterPartitionNoTxn(String dbname, String name, List<String> part_vals, Partition newPart) throws InvalidObjectException, MetaException {
name = normalizeIdentifier(name);
dbname = normalizeIdentifier(dbname);
MPartition oldp = getMPartition(dbname, name, part_vals);
MPartition newp = convertToMPart(newPart, false);
MColumnDescriptor oldCD = null;
MStorageDescriptor oldSD = oldp.getSd();
if (oldSD != null) {
oldCD = oldSD.getCD();
}
if (oldp == null || newp == null) {
throw new InvalidObjectException("partition does not exist.");
}
oldp.setValues(newp.getValues());
oldp.setPartitionName(newp.getPartitionName());
oldp.setParameters(newPart.getParameters());
if (!TableType.VIRTUAL_VIEW.name().equals(oldp.getTable().getTableType())) {
copyMSD(newp.getSd(), oldp.getSd());
}
if (newp.getCreateTime() != oldp.getCreateTime()) {
oldp.setCreateTime(newp.getCreateTime());
}
if (newp.getLastAccessTime() != oldp.getLastAccessTime()) {
oldp.setLastAccessTime(newp.getLastAccessTime());
}
return oldCD;
}
use of org.apache.hadoop.hive.metastore.model.MColumnDescriptor in project hive by apache.
the class ObjectStore method alterPartitions.
@Override
public void alterPartitions(String dbname, String name, List<List<String>> part_vals, List<Partition> newParts) throws InvalidObjectException, MetaException {
boolean success = false;
Exception e = null;
try {
openTransaction();
Iterator<List<String>> part_val_itr = part_vals.iterator();
Set<MColumnDescriptor> oldCds = new HashSet<>();
for (Partition tmpPart : newParts) {
List<String> tmpPartVals = part_val_itr.next();
MColumnDescriptor oldCd = alterPartitionNoTxn(dbname, name, tmpPartVals, tmpPart);
if (oldCd != null) {
oldCds.add(oldCd);
}
}
for (MColumnDescriptor oldCd : oldCds) {
removeUnusedColumnDescriptor(oldCd);
}
// commit the changes
success = commitTransaction();
} catch (Exception exception) {
e = exception;
} finally {
if (!success) {
rollbackTransaction();
MetaException metaException = new MetaException("The transaction for alter partition did not commit successfully.");
if (e != null) {
metaException.initCause(e);
}
throw metaException;
}
}
}
use of org.apache.hadoop.hive.metastore.model.MColumnDescriptor in project hive by apache.
the class ObjectStore method addPrimaryKeys.
private List<String> addPrimaryKeys(List<SQLPrimaryKey> pks, boolean retrieveCD) throws InvalidObjectException, MetaException {
List<String> pkNames = new ArrayList<>();
List<MConstraint> mpks = new ArrayList<>();
String constraintName = null;
for (int i = 0; i < pks.size(); i++) {
final String tableDB = normalizeIdentifier(pks.get(i).getTable_db());
final String tableName = normalizeIdentifier(pks.get(i).getTable_name());
final String columnName = normalizeIdentifier(pks.get(i).getColumn_name());
// If retrieveCD is false, we do not need to do a deep retrieval of the Table Column Descriptor.
// For instance, this is the case when we are creating the table.
AttachedMTableInfo nParentTable = getMTable(tableDB, tableName, retrieveCD);
MTable parentTable = nParentTable.mtbl;
if (parentTable == null) {
throw new InvalidObjectException("Parent table not found: " + tableName);
}
MColumnDescriptor parentCD = retrieveCD ? nParentTable.mcd : parentTable.getSd().getCD();
int parentIntegerIndex = getColumnIndexFromTableColumns(parentCD == null ? null : parentCD.getCols(), columnName);
if (parentIntegerIndex == -1) {
if (parentTable.getPartitionKeys() != null) {
parentCD = null;
parentIntegerIndex = getColumnIndexFromTableColumns(parentTable.getPartitionKeys(), columnName);
}
if (parentIntegerIndex == -1) {
throw new InvalidObjectException("Parent column not found: " + columnName);
}
}
if (getPrimaryKeyConstraintName(parentTable.getDatabase().getName(), parentTable.getTableName()) != null) {
throw new MetaException(" Primary key already exists for: " + parentTable.getDatabase().getName() + "." + pks.get(i).getTable_name());
}
if (pks.get(i).getPk_name() == null) {
if (pks.get(i).getKey_seq() == 1) {
constraintName = generateConstraintName(tableDB, tableName, columnName, "pk");
}
} else {
constraintName = normalizeIdentifier(pks.get(i).getPk_name());
if (constraintNameAlreadyExists(constraintName)) {
throw new InvalidObjectException("Constraint name already exists: " + constraintName);
}
}
pkNames.add(constraintName);
int enableValidateRely = (pks.get(i).isEnable_cstr() ? 4 : 0) + (pks.get(i).isValidate_cstr() ? 2 : 0) + (pks.get(i).isRely_cstr() ? 1 : 0);
MConstraint mpk = new MConstraint(constraintName, MConstraint.PRIMARY_KEY_CONSTRAINT, pks.get(i).getKey_seq(), null, null, enableValidateRely, parentTable, null, parentCD, null, null, parentIntegerIndex);
mpks.add(mpk);
}
pm.makePersistentAll(mpks);
return pkNames;
}
use of org.apache.hadoop.hive.metastore.model.MColumnDescriptor in project hive by apache.
the class ObjectStore method alterTable.
@Override
public void alterTable(String dbname, String name, Table newTable) throws InvalidObjectException, MetaException {
boolean success = false;
boolean registerCreationSignature = false;
try {
openTransaction();
name = normalizeIdentifier(name);
dbname = normalizeIdentifier(dbname);
MTable newt = convertToMTable(newTable);
if (newt == null) {
throw new InvalidObjectException("new table is invalid");
}
MTable oldt = getMTable(dbname, name);
if (oldt == null) {
throw new MetaException("table " + dbname + "." + name + " doesn't exist");
}
// For now only alter name, owner, parameters, cols, bucketcols are allowed
oldt.setDatabase(newt.getDatabase());
oldt.setTableName(normalizeIdentifier(newt.getTableName()));
oldt.setParameters(newt.getParameters());
oldt.setOwner(newt.getOwner());
// Fully copy over the contents of the new SD into the old SD,
// so we don't create an extra SD in the metastore db that has no references.
MColumnDescriptor oldCD = null;
MStorageDescriptor oldSD = oldt.getSd();
if (oldSD != null) {
oldCD = oldSD.getCD();
}
copyMSD(newt.getSd(), oldt.getSd());
removeUnusedColumnDescriptor(oldCD);
oldt.setRetention(newt.getRetention());
oldt.setPartitionKeys(newt.getPartitionKeys());
oldt.setTableType(newt.getTableType());
oldt.setLastAccessTime(newt.getLastAccessTime());
oldt.setViewOriginalText(newt.getViewOriginalText());
oldt.setViewExpandedText(newt.getViewExpandedText());
oldt.setRewriteEnabled(newt.isRewriteEnabled());
// commit the changes
success = commitTransaction();
} finally {
if (!success) {
rollbackTransaction();
}
}
}
Aggregations