use of org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy in project hive by apache.
the class TestListPartitions method testListPartitionSpecsByFilterNullFilter.
@Test
public void testListPartitionSpecsByFilterNullFilter() throws Exception {
List<List<String>> values = createTable4PartColsParts(client).testValues;
PartitionSpecProxy pproxy = client.listPartitionSpecsByFilter(DB_NAME, TABLE_NAME, null, -1);
assertPartitionsSpecProxy(pproxy, values);
}
use of org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy in project hive by apache.
the class TestListPartitions method testListPartitionsSpecsByFilter.
/**
* Testing listPartitionSpecsByFilter(String,String,String,int) ->
* get_part_specs_by_filter(String,String,String,int).
*/
@Test
public void testListPartitionsSpecsByFilter() throws Exception {
List<List<String>> testValues = createTable4PartColsParts(client).testValues;
PartitionSpecProxy partSpecProxy = client.listPartitionSpecsByFilter(DB_NAME, TABLE_NAME, "yyyy=\"2017\" OR " + "mm=\"02\"", -1);
assertPartitionsSpecProxy(partSpecProxy, testValues.subList(1, 4));
partSpecProxy = client.listPartitionSpecsByFilter(DB_NAME, TABLE_NAME, "yyyy=\"2017\" OR " + "mm=\"02\"", 2);
assertPartitionsSpecProxy(partSpecProxy, testValues.subList(1, 3));
partSpecProxy = client.listPartitionSpecsByFilter(DB_NAME, TABLE_NAME, "yyyy=\"2017\" OR " + "mm=\"02\"", 0);
assertPartitionsSpecProxy(partSpecProxy, Lists.newArrayList());
partSpecProxy = client.listPartitionSpecsByFilter(DB_NAME, TABLE_NAME, "yyyy=\"20177\"", -1);
assertPartitionsSpecProxy(partSpecProxy, Lists.newArrayList());
// HIVE-18977
if (MetastoreConf.getBoolVar(metaStore.getConf(), MetastoreConf.ConfVars.TRY_DIRECT_SQL)) {
partSpecProxy = client.listPartitionSpecsByFilter(DB_NAME, TABLE_NAME, "yYyY=\"2017\"", -1);
assertPartitionsSpecProxy(partSpecProxy, testValues.subList(2, 4));
}
partSpecProxy = client.listPartitionSpecsByFilter(DB_NAME, TABLE_NAME, "yyyy=\"2017\" AND mm=\"99\"", -1);
assertPartitionsSpecProxy(partSpecProxy, Lists.newArrayList());
}
use of org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy in project metacat by Netflix.
the class CatalogThriftHiveMetastore method add_partitions_pspec.
/**
* {@inheritDoc}
*/
@Override
public int add_partitions_pspec(final List<PartitionSpec> newParts) throws TException {
if (newParts == null || newParts.isEmpty()) {
return 0;
}
final String dbName = newParts.get(0).getDbName();
final String tableName = newParts.get(0).getTableName();
return requestWrapper("add_partition", new Object[] { dbName, tableName }, () -> {
final PartitionSpecProxy partitionSpecProxy = PartitionSpecProxy.Factory.get(newParts);
final PartitionSpecProxy.PartitionIterator partitionIterator = partitionSpecProxy.getPartitionIterator();
final List<Partition> partitions = addPartitionsCore(dbName, tableName, Lists.newArrayList(partitionIterator), false);
return partitions.size();
});
}
use of org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy in project flink by apache.
the class HiveCatalog method listPartitionsByFilter.
@Override
public List<CatalogPartitionSpec> listPartitionsByFilter(ObjectPath tablePath, List<Expression> expressions) throws TableNotExistException, TableNotPartitionedException, CatalogException {
Table hiveTable = getHiveTable(tablePath);
ensurePartitionedTable(tablePath, hiveTable);
List<String> partColNames = getFieldNames(hiveTable.getPartitionKeys());
Optional<String> filter = HiveTableUtil.makePartitionFilter(HiveTableUtil.getNonPartitionFields(hiveConf, hiveTable, hiveShim).size(), partColNames, expressions, hiveShim);
if (!filter.isPresent()) {
throw new UnsupportedOperationException("HiveCatalog is unable to handle the partition filter expressions: " + expressions);
}
try {
PartitionSpecProxy partitionSpec = client.listPartitionSpecsByFilter(tablePath.getDatabaseName(), tablePath.getObjectName(), filter.get(), (short) -1);
List<CatalogPartitionSpec> res = new ArrayList<>(partitionSpec.size());
PartitionSpecProxy.PartitionIterator partitions = partitionSpec.getPartitionIterator();
while (partitions.hasNext()) {
Partition partition = partitions.next();
Map<String, String> spec = new HashMap<>();
for (int i = 0; i < partColNames.size(); i++) {
spec.put(partColNames.get(i), partition.getValues().get(i));
}
res.add(new CatalogPartitionSpec(spec));
}
return res;
} catch (TException e) {
throw new UnsupportedOperationException("Failed to list partition by filter from HMS, filter expressions: " + expressions, e);
}
}
Aggregations