use of org.apache.hadoop.hive.metastore.model.MStorageDescriptor in project hive by apache.
the class ObjectStore method convertToMIndex.
private MIndex convertToMIndex(Index index) throws InvalidObjectException, MetaException {
StorageDescriptor sd = index.getSd();
if (sd == null) {
throw new InvalidObjectException("Storage descriptor is not defined for index.");
}
MStorageDescriptor msd = this.convertToMStorageDescriptor(sd);
MTable origTable = getMTable(index.getDbName(), index.getOrigTableName());
if (origTable == null) {
throw new InvalidObjectException("Original table does not exist for the given index.");
}
String[] qualified = MetaStoreUtils.getQualifiedName(index.getDbName(), index.getIndexTableName());
MTable indexTable = getMTable(qualified[0], qualified[1]);
if (indexTable == null) {
throw new InvalidObjectException("Underlying index table does not exist for the given index.");
}
return new MIndex(HiveStringUtils.normalizeIdentifier(index.getIndexName()), origTable, index.getCreateTime(), index.getLastAccessTime(), index.getParameters(), indexTable, msd, index.getIndexHandlerClass(), index.isDeferredRebuild());
}
use of org.apache.hadoop.hive.metastore.model.MStorageDescriptor in project hive by apache.
the class ObjectStore method convertToMPart.
/**
* Convert a Partition object into an MPartition, which is an object backed by the db
* If the Partition's set of columns is the same as the parent table's AND useTableCD
* is true, then this partition's storage descriptor's column descriptor will point
* to the same one as the table's storage descriptor.
* @param part the partition to convert
* @param useTableCD whether to try to use the parent table's column descriptor.
* @return the model partition object
* @throws InvalidObjectException
* @throws MetaException
*/
private MPartition convertToMPart(Partition part, boolean useTableCD) throws InvalidObjectException, MetaException {
if (part == null) {
return null;
}
MTable mt = getMTable(part.getDbName(), part.getTableName());
if (mt == null) {
throw new InvalidObjectException("Partition doesn't have a valid table or database name");
}
// If this partition's set of columns is the same as the parent table's,
// use the parent table's, so we do not create a duplicate column descriptor,
// thereby saving space
MStorageDescriptor msd;
if (useTableCD && mt.getSd() != null && mt.getSd().getCD() != null && mt.getSd().getCD().getCols() != null && part.getSd() != null && convertToFieldSchemas(mt.getSd().getCD().getCols()).equals(part.getSd().getCols())) {
msd = convertToMStorageDescriptor(part.getSd(), mt.getSd().getCD());
} else {
msd = convertToMStorageDescriptor(part.getSd());
}
return new MPartition(Warehouse.makePartName(convertToFieldSchemas(mt.getPartitionKeys()), part.getValues()), mt, part.getValues(), part.getCreateTime(), part.getLastAccessTime(), msd, part.getParameters());
}
use of org.apache.hadoop.hive.metastore.model.MStorageDescriptor in project hive by apache.
the class ObjectStore method alterPartitionNoTxn.
private Partition alterPartitionNoTxn(String catName, String dbname, String name, MPartition oldp, Partition newPart, String validWriteIds, Ref<MColumnDescriptor> oldCd, MTable table) throws InvalidObjectException, MetaException {
catName = normalizeIdentifier(catName);
name = normalizeIdentifier(name);
dbname = normalizeIdentifier(dbname);
MPartition newp = convertToMPart(newPart, table, false);
MColumnDescriptor oldCD = null;
MStorageDescriptor oldSD = oldp.getSd();
if (oldSD != null) {
oldCD = oldSD.getCD();
}
if (newp == null) {
throw new InvalidObjectException("partition does not exist.");
}
oldp.setValues(newp.getValues());
oldp.setPartitionName(newp.getPartitionName());
boolean isTxn = TxnUtils.isTransactionalTable(table.getParameters());
if (isTxn && areTxnStatsSupported) {
// Transactional table is altered without a txn. Make sure there are no changes to the flag.
String errorMsg = verifyStatsChangeCtx(TableName.getDbTable(dbname, name), oldp.getParameters(), newPart.getParameters(), newPart.getWriteId(), validWriteIds, false);
if (errorMsg != null) {
throw new MetaException(errorMsg);
}
}
oldp.setParameters(newPart.getParameters());
if (!TableType.VIRTUAL_VIEW.name().equals(oldp.getTable().getTableType())) {
copyMSD(newp.getSd(), oldp.getSd());
}
if (newp.getCreateTime() != oldp.getCreateTime()) {
oldp.setCreateTime(newp.getCreateTime());
}
if (newp.getLastAccessTime() != oldp.getLastAccessTime()) {
oldp.setLastAccessTime(newp.getLastAccessTime());
}
// for the current updater query.
if (isTxn) {
if (!areTxnStatsSupported) {
StatsSetupConst.setBasicStatsState(oldp.getParameters(), StatsSetupConst.FALSE);
} else if (validWriteIds != null && newPart.getWriteId() > 0) {
// Check concurrent INSERT case and set false to the flag.
if (!isCurrentStatsValidForTheQuery(oldp, validWriteIds, true)) {
StatsSetupConst.setBasicStatsState(oldp.getParameters(), StatsSetupConst.FALSE);
LOG.info("Removed COLUMN_STATS_ACCURATE from the parameters of the partition " + dbname + "." + name + "." + oldp.getPartitionName() + " will be made persistent.");
}
oldp.setWriteId(newPart.getWriteId());
}
}
oldCd.t = oldCD;
return convertToPart(oldp, false);
}
use of org.apache.hadoop.hive.metastore.model.MStorageDescriptor in project hive by apache.
the class ObjectStore method updateMStorageDescriptorTblURI.
/**
* The following APIs
*
* - updateMStorageDescriptorTblURI
*
* is used by HiveMetaTool. This API **shouldn't** be exposed via Thrift.
*/
public UpdateMStorageDescriptorTblURIRetVal updateMStorageDescriptorTblURI(URI oldLoc, URI newLoc, boolean isDryRun) {
boolean committed = false;
Query query = null;
Map<String, String> updateLocations = new HashMap<>();
List<String> badRecords = new ArrayList<>();
int numNullRecords = 0;
UpdateMStorageDescriptorTblURIRetVal retVal = null;
try {
openTransaction();
query = pm.newQuery(MStorageDescriptor.class);
List<MStorageDescriptor> mSDSs = (List<MStorageDescriptor>) query.execute();
pm.retrieveAll(mSDSs);
for (MStorageDescriptor mSDS : mSDSs) {
URI locationURI = null;
String location = mSDS.getLocation();
if (location == null) {
// This can happen for View or Index
numNullRecords++;
continue;
}
try {
locationURI = new Path(location).toUri();
} catch (IllegalArgumentException e) {
badRecords.add(location);
}
if (locationURI == null) {
badRecords.add(location);
} else {
if (shouldUpdateURI(locationURI, oldLoc)) {
String tblLoc = mSDS.getLocation().replaceAll(oldLoc.toString(), newLoc.toString());
updateLocations.put(locationURI.toString(), tblLoc);
if (!isDryRun) {
mSDS.setLocation(tblLoc);
}
}
}
}
committed = commitTransaction();
if (committed) {
retVal = new UpdateMStorageDescriptorTblURIRetVal(badRecords, updateLocations, numNullRecords);
}
return retVal;
} finally {
rollbackAndCleanup(committed, query);
}
}
use of org.apache.hadoop.hive.metastore.model.MStorageDescriptor in project hive by apache.
the class ObjectStore method detachCdsFromSdsNoTxn.
/**
* Detaches column descriptors from storage descriptors; returns the set of unique CDs
* thus detached. This is done before dropping partitions because CDs are reused between
* SDs; so, we remove the links to delete SDs and then check the returned CDs to see if
* they are referenced by other SDs.
*/
private Set<MColumnDescriptor> detachCdsFromSdsNoTxn(String catName, String dbName, String tblName, List<String> partNames) {
Pair<Query, Map<String, String>> queryWithParams = getPartQueryWithParams(catName, dbName, tblName, partNames);
try (QueryWrapper query = new QueryWrapper(queryWithParams.getLeft())) {
query.setClass(MPartition.class);
query.setResult("sd");
List<MStorageDescriptor> sds = (List<MStorageDescriptor>) query.executeWithMap(queryWithParams.getRight());
HashSet<MColumnDescriptor> candidateCds = new HashSet<>();
for (MStorageDescriptor sd : sds) {
if (sd != null && sd.getCD() != null) {
candidateCds.add(sd.getCD());
sd.setCD(null);
}
}
return candidateCds;
}
}
Aggregations