use of org.apache.hadoop.hive.metastore.api.Database in project hive by apache.
the class TestHiveMetaStore method testTableFilter.
@Test
public void testTableFilter() throws Exception {
try {
String dbName = "testTableFilter";
String owner1 = "testOwner1";
String owner2 = "testOwner2";
int lastAccessTime1 = 90;
int lastAccessTime2 = 30;
String tableName1 = "table1";
String tableName2 = "table2";
String tableName3 = "table3";
client.dropTable(dbName, tableName1);
client.dropTable(dbName, tableName2);
client.dropTable(dbName, tableName3);
silentDropDatabase(dbName);
Database db = new Database();
db.setName(dbName);
db.setDescription("Alter Partition Test database");
client.createDatabase(db);
Table table1 = createTableForTestFilter(dbName, tableName1, owner1, lastAccessTime1, true);
Table table2 = createTableForTestFilter(dbName, tableName2, owner2, lastAccessTime2, true);
Table table3 = createTableForTestFilter(dbName, tableName3, owner1, lastAccessTime2, false);
List<String> tableNames;
String filter;
// test owner
// owner like ".*Owner.*" and owner like "test.*"
filter = org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.HIVE_FILTER_FIELD_OWNER + " like \".*Owner.*\" and " + org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.HIVE_FILTER_FIELD_OWNER + " like \"test.*\"";
tableNames = client.listTableNamesByFilter(dbName, filter, (short) -1);
assertEquals(tableNames.size(), 3);
assert (tableNames.contains(table1.getTableName()));
assert (tableNames.contains(table2.getTableName()));
assert (tableNames.contains(table3.getTableName()));
// owner = "testOwner1"
filter = org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.HIVE_FILTER_FIELD_OWNER + " = \"testOwner1\"";
tableNames = client.listTableNamesByFilter(dbName, filter, (short) -1);
assertEquals(2, tableNames.size());
assert (tableNames.contains(table1.getTableName()));
assert (tableNames.contains(table3.getTableName()));
// lastAccessTime < 90
filter = org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.HIVE_FILTER_FIELD_LAST_ACCESS + " < 90";
tableNames = client.listTableNamesByFilter(dbName, filter, (short) -1);
assertEquals(2, tableNames.size());
assert (tableNames.contains(table2.getTableName()));
assert (tableNames.contains(table3.getTableName()));
// lastAccessTime > 90
filter = org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.HIVE_FILTER_FIELD_LAST_ACCESS + " > 90";
tableNames = client.listTableNamesByFilter(dbName, filter, (short) -1);
assertEquals(0, tableNames.size());
// test params
// test_param_2 = "50"
filter = org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.HIVE_FILTER_FIELD_PARAMS + "test_param_2 LIKE \"50\"";
tableNames = client.listTableNamesByFilter(dbName, filter, (short) -1);
assertEquals(2, tableNames.size());
assert (tableNames.contains(table1.getTableName()));
assert (tableNames.contains(table2.getTableName()));
// test_param_2 = "75"
filter = org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.HIVE_FILTER_FIELD_PARAMS + "test_param_2 LIKE \"75\"";
tableNames = client.listTableNamesByFilter(dbName, filter, (short) -1);
assertEquals(0, tableNames.size());
// key_dne = "50"
filter = org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.HIVE_FILTER_FIELD_PARAMS + "key_dne LIKE \"50\"";
tableNames = client.listTableNamesByFilter(dbName, filter, (short) -1);
assertEquals(0, tableNames.size());
// test_param_1 != "yellow"
filter = org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.HIVE_FILTER_FIELD_PARAMS + "test_param_1 NOT LIKE \"yellow\"";
// Commenting as part of HIVE-12274 != and <> are not supported for CLOBs
// tableNames = client.listTableNamesByFilter(dbName, filter, (short) 2);
// assertEquals(2, tableNames.size());
filter = org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.HIVE_FILTER_FIELD_PARAMS + "test_param_1 NOT LIKE \"yellow\"";
// tableNames = client.listTableNamesByFilter(dbName, filter, (short) 2);
// assertEquals(2, tableNames.size());
// owner = "testOwner1" and (lastAccessTime = 30 or test_param_1 = "hi")
filter = org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.HIVE_FILTER_FIELD_OWNER + " = \"testOwner1\" and (" + org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.HIVE_FILTER_FIELD_LAST_ACCESS + " = 30 or " + org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.HIVE_FILTER_FIELD_PARAMS + "test_param_1 LIKE \"hi\")";
tableNames = client.listTableNamesByFilter(dbName, filter, (short) -1);
assertEquals(2, tableNames.size());
assert (tableNames.contains(table1.getTableName()));
assert (tableNames.contains(table3.getTableName()));
// Negative tests
Exception me = null;
try {
filter = "badKey = \"testOwner1\"";
tableNames = client.listTableNamesByFilter(dbName, filter, (short) -1);
} catch (MetaException e) {
me = e;
}
assertNotNull(me);
assertTrue("Bad filter key test", me.getMessage().contains("Invalid key name in filter"));
client.dropTable(dbName, tableName1);
client.dropTable(dbName, tableName2);
client.dropTable(dbName, tableName3);
client.dropDatabase(dbName);
} catch (Exception e) {
System.err.println(StringUtils.stringifyException(e));
System.err.println("testTableFilter() failed.");
throw e;
}
}
use of org.apache.hadoop.hive.metastore.api.Database in project hive by apache.
the class TestHiveMetaStore method testDatabaseLocationWithPermissionProblems.
@Test
public void testDatabaseLocationWithPermissionProblems() throws Exception {
if (System.getProperty("user.name").equals("root")) {
System.err.println("Skipping test because you are running as root!");
return;
}
silentDropDatabase(TEST_DB1_NAME);
Database db = new Database();
db.setName(TEST_DB1_NAME);
String dbLocation = MetastoreConf.getVar(conf, ConfVars.WAREHOUSE) + "/test/_testDB_create_";
FileSystem fs = FileSystem.get(new Path(dbLocation).toUri(), conf);
fs.mkdirs(new Path(MetastoreConf.getVar(conf, ConfVars.WAREHOUSE) + "/test"), new FsPermission((short) 0));
db.setLocationUri(dbLocation);
boolean createFailed = false;
try {
client.createDatabase(db);
} catch (MetaException cantCreateDB) {
createFailed = true;
} finally {
// Cleanup
if (!createFailed) {
try {
client.dropDatabase(TEST_DB1_NAME);
} catch (Exception e) {
System.err.println("Failed to remove database in cleanup: " + e.getMessage());
}
}
fs.setPermission(new Path(MetastoreConf.getVar(conf, ConfVars.WAREHOUSE) + "/test"), new FsPermission((short) 755));
fs.delete(new Path(MetastoreConf.getVar(conf, ConfVars.WAREHOUSE) + "/test"), true);
}
assertTrue("Database creation succeeded even with permission problem", createFailed);
}
use of org.apache.hadoop.hive.metastore.api.Database in project hive by apache.
the class TestHiveMetaStore method testGetTableObjects.
/**
* Test table objects can be retrieved in batches
*/
@Test
public void testGetTableObjects() throws Exception {
String dbName = "db";
List<String> tableNames = Arrays.asList("table1", "table2", "table3", "table4", "table5");
// Setup
silentDropDatabase(dbName);
Database db = new Database();
db.setName(dbName);
client.createDatabase(db);
for (String tableName : tableNames) {
createTable(dbName, tableName);
}
// Test
List<Table> tableObjs = client.getTableObjectsByName(dbName, tableNames);
// Verify
assertEquals(tableNames.size(), tableObjs.size());
for (Table table : tableObjs) {
assertTrue(tableNames.contains(table.getTableName().toLowerCase()));
}
// Cleanup
client.dropDatabase(dbName, true, true, true);
}
use of org.apache.hadoop.hive.metastore.api.Database in project hive by apache.
the class TestHiveMetaStore method partitionTester.
private static void partitionTester(HiveMetaStoreClient client, Configuration conf) throws Exception {
try {
String dbName = "compdb";
String tblName = "comptbl";
String typeName = "Person";
List<String> vals = makeVals("2008-07-01 14:13:12", "14");
List<String> vals2 = makeVals("2008-07-01 14:13:12", "15");
List<String> vals3 = makeVals("2008-07-02 14:13:12", "15");
List<String> vals4 = makeVals("2008-07-03 14:13:12", "151");
client.dropTable(dbName, tblName);
silentDropDatabase(dbName);
Database db = new Database();
db.setName(dbName);
client.createDatabase(db);
db = client.getDatabase(dbName);
Path dbPath = new Path(db.getLocationUri());
FileSystem fs = FileSystem.get(dbPath.toUri(), conf);
client.dropType(typeName);
Type typ1 = new Type();
typ1.setName(typeName);
typ1.setFields(new ArrayList<>(2));
typ1.getFields().add(new FieldSchema("name", ColumnType.STRING_TYPE_NAME, ""));
typ1.getFields().add(new FieldSchema("income", ColumnType.INT_TYPE_NAME, ""));
client.createType(typ1);
List<String> skewedColValue = Collections.singletonList("1");
Table tbl = new TableBuilder().setDbName(dbName).setTableName(tblName).setCols(typ1.getFields()).setNumBuckets(1).addBucketCol("name").addTableParam("test_param_1", "Use this for comments etc").addSerdeParam(ColumnType.SERIALIZATION_FORMAT, "1").addSkewedColName("name").setSkewedColValues(Collections.singletonList(skewedColValue)).setSkewedColValueLocationMaps(Collections.singletonMap(skewedColValue, "location1")).addPartCol("ds", ColumnType.STRING_TYPE_NAME).addPartCol("hr", ColumnType.STRING_TYPE_NAME).build();
client.createTable(tbl);
if (isThriftClient) {
// the createTable() above does not update the location in the 'tbl'
// object when the client is a thrift client and the code below relies
// on the location being present in the 'tbl' object - so get the table
// from the metastore
tbl = client.getTable(dbName, tblName);
}
Partition part = makePartitionObject(dbName, tblName, vals, tbl, "/part1");
Partition part2 = makePartitionObject(dbName, tblName, vals2, tbl, "/part2");
Partition part3 = makePartitionObject(dbName, tblName, vals3, tbl, "/part3");
Partition part4 = makePartitionObject(dbName, tblName, vals4, tbl, "/part4");
// check if the partition exists (it shouldn't)
boolean exceptionThrown = false;
try {
Partition p = client.getPartition(dbName, tblName, vals);
} catch (Exception e) {
assertEquals("partition should not have existed", NoSuchObjectException.class, e.getClass());
exceptionThrown = true;
}
assertTrue("getPartition() should have thrown NoSuchObjectException", exceptionThrown);
Partition retp = client.add_partition(part);
assertNotNull("Unable to create partition " + part, retp);
Partition retp2 = client.add_partition(part2);
assertNotNull("Unable to create partition " + part2, retp2);
Partition retp3 = client.add_partition(part3);
assertNotNull("Unable to create partition " + part3, retp3);
Partition retp4 = client.add_partition(part4);
assertNotNull("Unable to create partition " + part4, retp4);
Partition part_get = client.getPartition(dbName, tblName, part.getValues());
if (isThriftClient) {
// since we are using thrift, 'part' will not have the create time and
// last DDL time set since it does not get updated in the add_partition()
// call - likewise part2 and part3 - set it correctly so that equals check
// doesn't fail
adjust(client, part, dbName, tblName);
adjust(client, part2, dbName, tblName);
adjust(client, part3, dbName, tblName);
}
assertTrue("Partitions are not same", part.equals(part_get));
// check null cols schemas for a partition
List<String> vals6 = makeVals("2016-02-22 00:00:00", "16");
Partition part6 = makePartitionObject(dbName, tblName, vals6, tbl, "/part5");
part6.getSd().setCols(null);
LOG.info("Creating partition will null field schema");
client.add_partition(part6);
LOG.info("Listing all partitions for table " + dbName + "." + tblName);
final List<Partition> partitions = client.listPartitions(dbName, tblName, (short) -1);
boolean foundPart = false;
for (Partition p : partitions) {
if (p.getValues().equals(vals6)) {
assertNull(p.getSd().getCols());
LOG.info("Found partition " + p + " having null field schema");
foundPart = true;
}
}
assertTrue(foundPart);
String partName = "ds=" + FileUtils.escapePathName("2008-07-01 14:13:12") + "/hr=14";
String part2Name = "ds=" + FileUtils.escapePathName("2008-07-01 14:13:12") + "/hr=15";
String part3Name = "ds=" + FileUtils.escapePathName("2008-07-02 14:13:12") + "/hr=15";
String part4Name = "ds=" + FileUtils.escapePathName("2008-07-03 14:13:12") + "/hr=151";
part_get = client.getPartition(dbName, tblName, partName);
assertTrue("Partitions are not the same", part.equals(part_get));
// Test partition listing with a partial spec - ds is specified but hr is not
List<String> partialVals = new ArrayList<>();
partialVals.add(vals.get(0));
Set<Partition> parts = new HashSet<>();
parts.add(part);
parts.add(part2);
List<Partition> partial = client.listPartitions(dbName, tblName, partialVals, (short) -1);
assertTrue("Should have returned 2 partitions", partial.size() == 2);
assertTrue("Not all parts returned", partial.containsAll(parts));
Set<String> partNames = new HashSet<>();
partNames.add(partName);
partNames.add(part2Name);
List<String> partialNames = client.listPartitionNames(dbName, tblName, partialVals, (short) -1);
assertTrue("Should have returned 2 partition names", partialNames.size() == 2);
assertTrue("Not all part names returned", partialNames.containsAll(partNames));
partNames.add(part3Name);
partNames.add(part4Name);
partialVals.clear();
partialVals.add("");
partialNames = client.listPartitionNames(dbName, tblName, partialVals, (short) -1);
assertTrue("Should have returned 5 partition names", partialNames.size() == 5);
assertTrue("Not all part names returned", partialNames.containsAll(partNames));
// Test partition listing with a partial spec - hr is specified but ds is not
parts.clear();
parts.add(part2);
parts.add(part3);
partialVals.clear();
partialVals.add("");
partialVals.add(vals2.get(1));
partial = client.listPartitions(dbName, tblName, partialVals, (short) -1);
assertEquals("Should have returned 2 partitions", 2, partial.size());
assertTrue("Not all parts returned", partial.containsAll(parts));
partNames.clear();
partNames.add(part2Name);
partNames.add(part3Name);
partialNames = client.listPartitionNames(dbName, tblName, partialVals, (short) -1);
assertEquals("Should have returned 2 partition names", 2, partialNames.size());
assertTrue("Not all part names returned", partialNames.containsAll(partNames));
// Verify escaped partition names don't return partitions
exceptionThrown = false;
try {
String badPartName = "ds=2008-07-01 14%3A13%3A12/hrs=14";
client.getPartition(dbName, tblName, badPartName);
} catch (NoSuchObjectException e) {
exceptionThrown = true;
}
assertTrue("Bad partition spec should have thrown an exception", exceptionThrown);
Path partPath = new Path(part.getSd().getLocation());
assertTrue(fs.exists(partPath));
client.dropPartition(dbName, tblName, part.getValues(), true);
assertFalse(fs.exists(partPath));
// Test append_partition_by_name
client.appendPartition(dbName, tblName, partName);
Partition part5 = client.getPartition(dbName, tblName, part.getValues());
assertTrue("Append partition by name failed", part5.getValues().equals(vals));
Path part5Path = new Path(part5.getSd().getLocation());
assertTrue(fs.exists(part5Path));
// Test drop_partition_by_name
assertTrue("Drop partition by name failed", client.dropPartition(dbName, tblName, partName, true));
assertFalse(fs.exists(part5Path));
// add the partition again so that drop table with a partition can be
// tested
retp = client.add_partition(part);
assertNotNull("Unable to create partition " + part, retp);
// test add_partitions
List<String> mvals1 = makeVals("2008-07-04 14:13:12", "14641");
List<String> mvals2 = makeVals("2008-07-04 14:13:12", "14642");
List<String> mvals3 = makeVals("2008-07-04 14:13:12", "14643");
// equal to 3
List<String> mvals4 = makeVals("2008-07-04 14:13:12", "14643");
List<String> mvals5 = makeVals("2008-07-04 14:13:12", "14645");
Exception savedException;
// add_partitions(empty list) : ok, normal operation
client.add_partitions(new ArrayList<>());
// add_partitions(1,2,3) : ok, normal operation
Partition mpart1 = makePartitionObject(dbName, tblName, mvals1, tbl, "/mpart1");
Partition mpart2 = makePartitionObject(dbName, tblName, mvals2, tbl, "/mpart2");
Partition mpart3 = makePartitionObject(dbName, tblName, mvals3, tbl, "/mpart3");
client.add_partitions(Arrays.asList(mpart1, mpart2, mpart3));
if (isThriftClient) {
// do DDL time munging if thrift mode
adjust(client, mpart1, dbName, tblName);
adjust(client, mpart2, dbName, tblName);
adjust(client, mpart3, dbName, tblName);
}
verifyPartitionsPublished(client, dbName, tblName, Arrays.asList(mvals1.get(0)), Arrays.asList(mpart1, mpart2, mpart3));
Partition mpart4 = makePartitionObject(dbName, tblName, mvals4, tbl, "/mpart4");
Partition mpart5 = makePartitionObject(dbName, tblName, mvals5, tbl, "/mpart5");
// create dir for /mpart5
Path mp5Path = new Path(mpart5.getSd().getLocation());
warehouse.mkdirs(mp5Path);
assertTrue(fs.exists(mp5Path));
// add_partitions(5,4) : err = duplicate keyvals on mpart4
savedException = null;
try {
client.add_partitions(Arrays.asList(mpart5, mpart4));
} catch (Exception e) {
savedException = e;
} finally {
assertNotNull(savedException);
}
// check that /mpart4 does not exist, but /mpart5 still does.
assertTrue(fs.exists(mp5Path));
assertFalse(fs.exists(new Path(mpart4.getSd().getLocation())));
// add_partitions(5) : ok
client.add_partitions(Arrays.asList(mpart5));
if (isThriftClient) {
// do DDL time munging if thrift mode
adjust(client, mpart5, dbName, tblName);
}
verifyPartitionsPublished(client, dbName, tblName, Arrays.asList(mvals1.get(0)), Arrays.asList(mpart1, mpart2, mpart3, mpart5));
// // end add_partitions tests
client.dropTable(dbName, tblName);
client.dropType(typeName);
// recreate table as external, drop partition and it should
// still exist
tbl.setParameters(new HashMap<>());
tbl.getParameters().put("EXTERNAL", "TRUE");
client.createTable(tbl);
retp = client.add_partition(part);
assertTrue(fs.exists(partPath));
client.dropPartition(dbName, tblName, part.getValues(), true);
assertTrue(fs.exists(partPath));
for (String tableName : client.getTables(dbName, "*")) {
client.dropTable(dbName, tableName);
}
client.dropDatabase(dbName);
} catch (Exception e) {
System.err.println(StringUtils.stringifyException(e));
System.err.println("testPartition() failed.");
throw e;
}
}
use of org.apache.hadoop.hive.metastore.api.Database in project hive by apache.
the class TestHiveMetaStore method testColumnStatistics.
@Test
public void testColumnStatistics() throws Throwable {
String dbName = "columnstatstestdb";
String tblName = "tbl";
String typeName = "Person";
String tblOwner = "testowner";
int lastAccessed = 6796;
try {
cleanUp(dbName, tblName, typeName);
Database db = new Database();
db.setName(dbName);
client.createDatabase(db);
createTableForTestFilter(dbName, tblName, tblOwner, lastAccessed, true);
// Create a ColumnStatistics Obj
String[] colName = new String[] { "income", "name" };
double lowValue = 50000.21;
double highValue = 1200000.4525;
long numNulls = 3;
long numDVs = 22;
double avgColLen = 50.30;
long maxColLen = 102;
String[] colType = new String[] { "double", "string" };
boolean isTblLevel = true;
String partName = null;
List<ColumnStatisticsObj> statsObjs = new ArrayList<>();
ColumnStatisticsDesc statsDesc = new ColumnStatisticsDesc();
statsDesc.setDbName(dbName);
statsDesc.setTableName(tblName);
statsDesc.setIsTblLevel(isTblLevel);
statsDesc.setPartName(partName);
ColumnStatisticsObj statsObj = new ColumnStatisticsObj();
statsObj.setColName(colName[0]);
statsObj.setColType(colType[0]);
ColumnStatisticsData statsData = new ColumnStatisticsData();
DoubleColumnStatsData numericStats = new DoubleColumnStatsData();
statsData.setDoubleStats(numericStats);
statsData.getDoubleStats().setHighValue(highValue);
statsData.getDoubleStats().setLowValue(lowValue);
statsData.getDoubleStats().setNumDVs(numDVs);
statsData.getDoubleStats().setNumNulls(numNulls);
statsObj.setStatsData(statsData);
statsObjs.add(statsObj);
statsObj = new ColumnStatisticsObj();
statsObj.setColName(colName[1]);
statsObj.setColType(colType[1]);
statsData = new ColumnStatisticsData();
StringColumnStatsData stringStats = new StringColumnStatsData();
statsData.setStringStats(stringStats);
statsData.getStringStats().setAvgColLen(avgColLen);
statsData.getStringStats().setMaxColLen(maxColLen);
statsData.getStringStats().setNumDVs(numDVs);
statsData.getStringStats().setNumNulls(numNulls);
statsObj.setStatsData(statsData);
statsObjs.add(statsObj);
ColumnStatistics colStats = new ColumnStatistics();
colStats.setStatsDesc(statsDesc);
colStats.setStatsObj(statsObjs);
// write stats objs persistently
client.updateTableColumnStatistics(colStats);
// retrieve the stats obj that was just written
ColumnStatisticsObj colStats2 = client.getTableColumnStatistics(dbName, tblName, Lists.newArrayList(colName[0])).get(0);
// compare stats obj to ensure what we get is what we wrote
assertNotNull(colStats2);
assertEquals(colStats2.getColName(), colName[0]);
assertEquals(colStats2.getStatsData().getDoubleStats().getLowValue(), lowValue, 0.01);
assertEquals(colStats2.getStatsData().getDoubleStats().getHighValue(), highValue, 0.01);
assertEquals(colStats2.getStatsData().getDoubleStats().getNumNulls(), numNulls);
assertEquals(colStats2.getStatsData().getDoubleStats().getNumDVs(), numDVs);
// test delete column stats; if no col name is passed all column stats associated with the
// table is deleted
boolean status = client.deleteTableColumnStatistics(dbName, tblName, null);
assertTrue(status);
// try to query stats for a column for which stats doesn't exist
assertTrue(client.getTableColumnStatistics(dbName, tblName, Lists.newArrayList(colName[1])).isEmpty());
colStats.setStatsDesc(statsDesc);
colStats.setStatsObj(statsObjs);
// update table level column stats
client.updateTableColumnStatistics(colStats);
// query column stats for column whose stats were updated in the previous call
colStats2 = client.getTableColumnStatistics(dbName, tblName, Lists.newArrayList(colName[0])).get(0);
// partition level column statistics test
// create a table with multiple partitions
cleanUp(dbName, tblName, typeName);
List<List<String>> values = new ArrayList<>();
values.add(makeVals("2008-07-01 14:13:12", "14"));
values.add(makeVals("2008-07-01 14:13:12", "15"));
values.add(makeVals("2008-07-02 14:13:12", "15"));
values.add(makeVals("2008-07-03 14:13:12", "151"));
createMultiPartitionTableSchema(dbName, tblName, typeName, values);
List<String> partitions = client.listPartitionNames(dbName, tblName, (short) -1);
partName = partitions.get(0);
isTblLevel = false;
// create a new columnstatistics desc to represent partition level column stats
statsDesc = new ColumnStatisticsDesc();
statsDesc.setDbName(dbName);
statsDesc.setTableName(tblName);
statsDesc.setPartName(partName);
statsDesc.setIsTblLevel(isTblLevel);
colStats = new ColumnStatistics();
colStats.setStatsDesc(statsDesc);
colStats.setStatsObj(statsObjs);
client.updatePartitionColumnStatistics(colStats);
colStats2 = client.getPartitionColumnStatistics(dbName, tblName, Lists.newArrayList(partName), Lists.newArrayList(colName[1])).get(partName).get(0);
// compare stats obj to ensure what we get is what we wrote
assertNotNull(colStats2);
assertEquals(colStats.getStatsDesc().getPartName(), partName);
assertEquals(colStats2.getColName(), colName[1]);
assertEquals(colStats2.getStatsData().getStringStats().getMaxColLen(), maxColLen);
assertEquals(colStats2.getStatsData().getStringStats().getAvgColLen(), avgColLen, 0.01);
assertEquals(colStats2.getStatsData().getStringStats().getNumNulls(), numNulls);
assertEquals(colStats2.getStatsData().getStringStats().getNumDVs(), numDVs);
// test stats deletion at partition level
client.deletePartitionColumnStatistics(dbName, tblName, partName, colName[1]);
colStats2 = client.getPartitionColumnStatistics(dbName, tblName, Lists.newArrayList(partName), Lists.newArrayList(colName[0])).get(partName).get(0);
// test get stats on a column for which stats doesn't exist
assertTrue(client.getPartitionColumnStatistics(dbName, tblName, Lists.newArrayList(partName), Lists.newArrayList(colName[1])).isEmpty());
} catch (Exception e) {
System.err.println(StringUtils.stringifyException(e));
System.err.println("testColumnStatistics() failed.");
throw e;
} finally {
cleanUp(dbName, tblName, typeName);
}
}
Aggregations