use of org.apache.hadoop.hive.metastore.api.PartitionValuesRow in project hive by apache.
the class TestListPartitions method assertCorrectPartitionValuesResponse.
private static void assertCorrectPartitionValuesResponse(List<List<String>> testValues, PartitionValuesResponse resp) throws Exception {
assertEquals(testValues.size(), resp.getPartitionValuesSize());
List<PartitionValuesRow> rowList = resp.getPartitionValues();
for (int i = 0; i < rowList.size(); ++i) {
PartitionValuesRow pvr = rowList.get(i);
List<String> values = pvr.getRow();
for (int j = 0; j < values.size(); ++j) {
assertEquals(testValues.get(i).get(j), values.get(j));
}
}
}
use of org.apache.hadoop.hive.metastore.api.PartitionValuesRow in project hive by apache.
the class ObjectStore method getDistinctValuesForPartitionsNoTxn.
private PartitionValuesResponse getDistinctValuesForPartitionsNoTxn(String dbName, String tableName, List<FieldSchema> cols, boolean applyDistinct, boolean ascending, long maxParts) throws MetaException {
try {
openTransaction();
Query q = pm.newQuery("select partitionName from org.apache.hadoop.hive.metastore.model.MPartition " + "where table.database.name == t1 && table.tableName == t2 ");
q.declareParameters("java.lang.String t1, java.lang.String t2");
/*
if (ascending) {
q.setOrdering("partitionName ascending");
} else {
q.setOrdering("partitionName descending");
}
*/
if (maxParts > 0) {
q.setRange(0, maxParts);
}
StringBuilder partValuesSelect = new StringBuilder(256);
if (applyDistinct) {
partValuesSelect.append("DISTINCT ");
}
List<FieldSchema> partitionKeys = getTable(dbName, tableName).getPartitionKeys();
for (FieldSchema key : cols) {
partValuesSelect.append(extractPartitionKey(key, partitionKeys)).append(", ");
}
partValuesSelect.setLength(partValuesSelect.length() - 2);
LOG.info("Columns to be selected from Partitions: {}", partValuesSelect);
q.setResult(partValuesSelect.toString());
PartitionValuesResponse response = new PartitionValuesResponse();
response.setPartitionValues(new ArrayList<PartitionValuesRow>());
if (cols.size() > 1) {
List<Object[]> results = (List<Object[]>) q.execute(dbName, tableName);
for (Object[] row : results) {
PartitionValuesRow rowResponse = new PartitionValuesRow();
for (Object columnValue : row) {
rowResponse.addToRow((String) columnValue);
}
response.addToPartitionValues(rowResponse);
}
} else {
List<Object> results = (List<Object>) q.execute(dbName, tableName);
for (Object row : results) {
PartitionValuesRow rowResponse = new PartitionValuesRow();
rowResponse.addToRow((String) row);
response.addToPartitionValues(rowResponse);
}
}
q.closeAll();
return response;
} finally {
commitTransaction();
}
}
use of org.apache.hadoop.hive.metastore.api.PartitionValuesRow in project hive by apache.
the class ObjectStore method extractPartitionNamesByFilter.
private PartitionValuesResponse extractPartitionNamesByFilter(String dbName, String tableName, String filter, List<FieldSchema> cols, boolean ascending, boolean applyDistinct, long maxParts) throws MetaException, NoSuchObjectException {
LOG.info("Database: {} Table: {} filter: \"{}\" cols: {}", dbName, tableName, filter, cols);
List<String> partitionNames = null;
List<Partition> partitions = null;
Table tbl = getTable(dbName, tableName);
try {
// Get partitions by name - ascending or descending
partitionNames = getPartitionNamesByFilter(dbName, tableName, filter, ascending, maxParts);
} catch (MetaException e) {
LOG.warn("Querying by partition names failed, trying out with partition objects, filter: {}", filter);
}
if (partitionNames == null) {
partitions = getPartitionsByFilter(dbName, tableName, filter, (short) maxParts);
}
if (partitions != null) {
partitionNames = new ArrayList<String>(partitions.size());
for (Partition partition : partitions) {
// Check for NULL's just to be safe
if (tbl.getPartitionKeys() != null && partition.getValues() != null) {
partitionNames.add(Warehouse.makePartName(tbl.getPartitionKeys(), partition.getValues()));
}
}
}
if (partitionNames == null && partitions == null) {
throw new MetaException("Cannot obtain list of partitions by filter:\"" + filter + "\" for " + dbName + ":" + tableName);
}
if (!ascending) {
Collections.sort(partitionNames, Collections.reverseOrder());
}
// Return proper response
PartitionValuesResponse response = new PartitionValuesResponse();
response.setPartitionValues(new ArrayList<PartitionValuesRow>(partitionNames.size()));
LOG.info("Converting responses to Partition values for items: {}", partitionNames.size());
for (String partName : partitionNames) {
ArrayList<String> vals = new ArrayList<String>(Collections.nCopies(tbl.getPartitionKeys().size(), null));
PartitionValuesRow row = new PartitionValuesRow();
Warehouse.makeValsFromName(partName, vals);
for (String value : vals) {
row.addToRow(value);
}
response.addToPartitionValues(row);
}
return response;
}
Aggregations