use of org.apache.hadoop.hive.metastore.model.MColumnDescriptor in project hive by apache.
the class ObjectStore method copyMSD.
private void copyMSD(MStorageDescriptor newSd, MStorageDescriptor oldSd) {
oldSd.setLocation(newSd.getLocation());
MColumnDescriptor oldCD = oldSd.getCD();
// not the application).
if (!(oldSd != null && oldSd.getCD() != null && oldSd.getCD().getCols() != null && newSd != null && newSd.getCD() != null && newSd.getCD().getCols() != null && convertToFieldSchemas(newSd.getCD().getCols()).equals(convertToFieldSchemas(oldSd.getCD().getCols())))) {
oldSd.setCD(newSd.getCD());
}
//If oldCd does not have any more references, then we should delete it
// from the backend db
removeUnusedColumnDescriptor(oldCD);
oldSd.setBucketCols(newSd.getBucketCols());
oldSd.setCompressed(newSd.isCompressed());
oldSd.setInputFormat(newSd.getInputFormat());
oldSd.setOutputFormat(newSd.getOutputFormat());
oldSd.setNumBuckets(newSd.getNumBuckets());
oldSd.getSerDeInfo().setName(newSd.getSerDeInfo().getName());
oldSd.getSerDeInfo().setSerializationLib(newSd.getSerDeInfo().getSerializationLib());
oldSd.getSerDeInfo().setParameters(newSd.getSerDeInfo().getParameters());
oldSd.setSkewedColNames(newSd.getSkewedColNames());
oldSd.setSkewedColValues(newSd.getSkewedColValues());
oldSd.setSkewedColValueLocationMaps(newSd.getSkewedColValueLocationMaps());
oldSd.setSortCols(newSd.getSortCols());
oldSd.setParameters(newSd.getParameters());
oldSd.setStoredAsSubDirectories(newSd.isStoredAsSubDirectories());
}
use of org.apache.hadoop.hive.metastore.model.MColumnDescriptor in project hive by apache.
the class ObjectStore method addForeignKeys.
private void addForeignKeys(List<SQLForeignKey> fks, boolean retrieveCD) throws InvalidObjectException, MetaException {
List<MConstraint> mpkfks = new ArrayList<MConstraint>();
String currentConstraintName = null;
for (int i = 0; i < fks.size(); i++) {
AttachedMTableInfo nParentTable = getMTable(fks.get(i).getPktable_db(), fks.get(i).getPktable_name(), retrieveCD);
MTable parentTable = nParentTable.mtbl;
if (parentTable == null) {
throw new InvalidObjectException("Parent table not found: " + fks.get(i).getPktable_name());
}
AttachedMTableInfo nChildTable = getMTable(fks.get(i).getFktable_db(), fks.get(i).getFktable_name(), retrieveCD);
MTable childTable = nChildTable.mtbl;
if (childTable == null) {
throw new InvalidObjectException("Child table not found: " + fks.get(i).getFktable_name());
}
MColumnDescriptor parentCD = retrieveCD ? nParentTable.mcd : parentTable.getSd().getCD();
List<MFieldSchema> parentCols = parentCD == null ? null : parentCD.getCols();
int parentIntegerIndex = getColumnIndexFromTableColumns(parentCols, fks.get(i).getPkcolumn_name());
if (parentIntegerIndex == -1) {
throw new InvalidObjectException("Parent column not found: " + fks.get(i).getPkcolumn_name());
}
MColumnDescriptor childCD = retrieveCD ? nChildTable.mcd : childTable.getSd().getCD();
List<MFieldSchema> childCols = childCD.getCols();
int childIntegerIndex = getColumnIndexFromTableColumns(childCols, fks.get(i).getFkcolumn_name());
if (childIntegerIndex == -1) {
throw new InvalidObjectException("Child column not found: " + fks.get(i).getFkcolumn_name());
}
if (fks.get(i).getFk_name() == null) {
// the uniqueness of the generated constraint name.
if (fks.get(i).getKey_seq() == 1) {
currentConstraintName = generateConstraintName(fks.get(i).getFktable_db(), fks.get(i).getFktable_name(), fks.get(i).getPktable_db(), fks.get(i).getPktable_name(), fks.get(i).getPkcolumn_name(), fks.get(i).getFkcolumn_name(), "fk");
}
} else {
currentConstraintName = fks.get(i).getFk_name();
}
Integer updateRule = fks.get(i).getUpdate_rule();
Integer deleteRule = fks.get(i).getDelete_rule();
int enableValidateRely = (fks.get(i).isEnable_cstr() ? 4 : 0) + (fks.get(i).isValidate_cstr() ? 2 : 0) + (fks.get(i).isRely_cstr() ? 1 : 0);
MConstraint mpkfk = new MConstraint(currentConstraintName, MConstraint.FOREIGN_KEY_CONSTRAINT, fks.get(i).getKey_seq(), deleteRule, updateRule, enableValidateRely, parentTable, childTable, parentCD, childCD, childIntegerIndex, parentIntegerIndex);
mpkfks.add(mpkfk);
}
pm.makePersistentAll(mpkfks);
}
use of org.apache.hadoop.hive.metastore.model.MColumnDescriptor in project hive by apache.
the class ObjectStore method dropPartitions.
@Override
public void dropPartitions(String dbName, String tblName, List<String> partNames) throws MetaException, NoSuchObjectException {
if (CollectionUtils.isEmpty(partNames)) {
return;
}
boolean success = false;
openTransaction();
try {
// Delete all things.
dropPartitionGrantsNoTxn(dbName, tblName, partNames);
dropPartitionAllColumnGrantsNoTxn(dbName, tblName, partNames);
dropPartitionColumnStatisticsNoTxn(dbName, tblName, partNames);
// CDs are reused; go thry partition SDs, detach all CDs from SDs, then remove unused CDs.
for (MColumnDescriptor mcd : detachCdsFromSdsNoTxn(dbName, tblName, partNames)) {
removeUnusedColumnDescriptor(mcd);
}
dropPartitionsNoTxn(dbName, tblName, partNames);
if (!(success = commitTransaction())) {
// Should not happen?
throw new MetaException("Failed to drop partitions");
}
} finally {
if (!success) {
rollbackTransaction();
}
}
}
use of org.apache.hadoop.hive.metastore.model.MColumnDescriptor in project hive by apache.
the class ObjectStore method preDropStorageDescriptor.
/**
* Called right before an action that would drop a storage descriptor.
* This function makes the SD's reference to a CD null, and then deletes the CD
* if it no longer is referenced in the table.
* @param msd the storage descriptor to drop
*/
private void preDropStorageDescriptor(MStorageDescriptor msd) {
if (msd == null || msd.getCD() == null) {
return;
}
MColumnDescriptor mcd = msd.getCD();
// Because there is a 1-N relationship between CDs and SDs,
// we must set the SD's CD to null first before dropping the storage descriptor
// to satisfy foreign key constraints.
msd.setCD(null);
removeUnusedColumnDescriptor(mcd);
}
use of org.apache.hadoop.hive.metastore.model.MColumnDescriptor in project hive by apache.
the class ObjectStore method detachCdsFromSdsNoTxn.
/**
* Detaches column descriptors from storage descriptors; returns the set of unique CDs
* thus detached. This is done before dropping partitions because CDs are reused between
* SDs; so, we remove the links to delete SDs and then check the returned CDs to see if
* they are referenced by other SDs.
*/
private HashSet<MColumnDescriptor> detachCdsFromSdsNoTxn(String dbName, String tblName, List<String> partNames) {
ObjectPair<Query, Map<String, String>> queryWithParams = getPartQueryWithParams(dbName, tblName, partNames);
Query query = queryWithParams.getFirst();
query.setClass(MPartition.class);
query.setResult("sd");
@SuppressWarnings("unchecked") List<MStorageDescriptor> sds = (List<MStorageDescriptor>) query.executeWithMap(queryWithParams.getSecond());
HashSet<MColumnDescriptor> candidateCds = new HashSet<>();
for (MStorageDescriptor sd : sds) {
if (sd != null && sd.getCD() != null) {
candidateCds.add(sd.getCD());
sd.setCD(null);
}
}
if (query != null) {
query.closeAll();
}
return candidateCds;
}
Aggregations