use of org.apache.hadoop.hive.metastore.api.GetPartitionsByNamesRequest in project hive by apache.
the class Hive method getPartitionsByNames.
public List<org.apache.hadoop.hive.metastore.api.Partition> getPartitionsByNames(String dbName, String tableName, List<String> partitionNames, Table t) throws HiveException {
try {
GetPartitionsByNamesRequest req = new GetPartitionsByNamesRequest();
req.setDb_name(dbName);
req.setTbl_name(tableName);
req.setNames(partitionNames);
return getPartitionsByNames(req, t);
} catch (Exception e) {
LOG.error("Failed getPartitionsByNames", e);
throw new HiveException(e);
}
}
use of org.apache.hadoop.hive.metastore.api.GetPartitionsByNamesRequest in project hive by apache.
the class Hive method getPartitionsByNames.
/**
* Get all partitions of the table that matches the list of given partition names.
*
* @param tbl
* object for which partition is needed. Must be partitioned.
* @param partNames
* list of partition names
* @param getColStats
* if true, Partition object includes column statistics for that partition.
* @return list of partition objects
* @throws HiveException
*/
public List<Partition> getPartitionsByNames(Table tbl, List<String> partNames, boolean getColStats) throws HiveException {
if (!tbl.isPartitioned()) {
throw new HiveException(ErrorMsg.TABLE_NOT_PARTITIONED, tbl.getTableName());
}
List<Partition> partitions = new ArrayList<Partition>(partNames.size());
int batchSize = HiveConf.getIntVar(conf, HiveConf.ConfVars.METASTORE_BATCH_RETRIEVE_MAX);
// TODO: might want to increase the default batch size. 1024 is viable; MS gets OOM if too high.
int nParts = partNames.size();
int nBatches = nParts / batchSize;
try {
for (int i = 0; i < nBatches; ++i) {
GetPartitionsByNamesRequest req = new GetPartitionsByNamesRequest();
req.setDb_name(tbl.getDbName());
req.setTbl_name(tbl.getTableName());
req.setNames(partNames.subList(i * batchSize, (i + 1) * batchSize));
req.setGet_col_stats(false);
List<org.apache.hadoop.hive.metastore.api.Partition> tParts = getPartitionsByNames(req, tbl);
if (tParts != null) {
for (org.apache.hadoop.hive.metastore.api.Partition tpart : tParts) {
partitions.add(new Partition(tbl, tpart));
}
}
}
if (nParts > nBatches * batchSize) {
String validWriteIdList = null;
Long tableId = null;
if (AcidUtils.isTransactionalTable(tbl)) {
ValidWriteIdList vWriteIdList = getValidWriteIdList(tbl.getDbName(), tbl.getTableName());
validWriteIdList = vWriteIdList != null ? vWriteIdList.toString() : null;
tableId = tbl.getTTable().getId();
}
List<org.apache.hadoop.hive.metastore.api.Partition> tParts = getMSC().getPartitionsByNames(tbl.getDbName(), tbl.getTableName(), partNames.subList(nBatches * batchSize, nParts), getColStats, Constants.HIVE_ENGINE);
if (tParts != null) {
for (org.apache.hadoop.hive.metastore.api.Partition tpart : tParts) {
partitions.add(new Partition(tbl, tpart));
}
}
}
} catch (Exception e) {
throw new HiveException(e);
}
return partitions;
}
Aggregations