use of org.apache.hadoop.hive.metastore.api.ColumnStatistics in project hive by apache.
the class SharedCache method populateTableInCache.
public void populateTableInCache(Table table, ColumnStatistics tableColStats, List<Partition> partitions, List<ColumnStatistics> partitionColStats, AggrStats aggrStatsAllPartitions, AggrStats aggrStatsAllButDefaultPartition) {
String dbName = StringUtils.normalizeIdentifier(table.getDbName());
String tableName = StringUtils.normalizeIdentifier(table.getTableName());
// 1. Don't add tables that were deleted while we were preparing list for prewarm
if (tablesDeletedDuringPrewarm.contains(CacheUtils.buildTableCacheKey(dbName, tableName))) {
return;
}
TableWrapper tblWrapper = createTableWrapper(dbName, tableName, table);
if (!table.isSetPartitionKeys() && (tableColStats != null)) {
tblWrapper.updateTableColStats(tableColStats.getStatsObj());
} else {
if (partitions != null) {
tblWrapper.cachePartitions(partitions, this);
}
if (partitionColStats != null) {
for (ColumnStatistics cs : partitionColStats) {
List<String> partVal;
try {
partVal = Warehouse.makeValsFromName(cs.getStatsDesc().getPartName(), null);
List<ColumnStatisticsObj> colStats = cs.getStatsObj();
tblWrapper.updatePartitionColStats(partVal, colStats);
} catch (MetaException e) {
LOG.debug("Unable to cache partition column stats for table: " + tableName, e);
}
}
}
tblWrapper.cacheAggrPartitionColStats(aggrStatsAllPartitions, aggrStatsAllButDefaultPartition);
}
try {
cacheLock.writeLock().lock();
// 2. Skip overwriting exisiting table object
// (which is present because it was added after prewarm started)
tableCache.putIfAbsent(CacheUtils.buildTableCacheKey(dbName, tableName), tblWrapper);
} finally {
cacheLock.writeLock().unlock();
}
}
use of org.apache.hadoop.hive.metastore.api.ColumnStatistics in project hive by apache.
the class VerifyingObjectStore method getTableColumnStatistics.
@Override
public ColumnStatistics getTableColumnStatistics(String dbName, String tableName, List<String> colNames) throws MetaException, NoSuchObjectException {
ColumnStatistics sqlResult = getTableColumnStatisticsInternal(dbName, tableName, colNames, true, false);
ColumnStatistics jdoResult = getTableColumnStatisticsInternal(dbName, tableName, colNames, false, true);
verifyObjects(sqlResult, jdoResult, ColumnStatistics.class);
return sqlResult;
}
use of org.apache.hadoop.hive.metastore.api.ColumnStatistics in project hive by apache.
the class TestCachedStore method testPartitionAggrStatsBitVector.
@Test
public void testPartitionAggrStatsBitVector() throws Exception {
String dbName = "testTableColStatsOps2";
String tblName = "tbl2";
String colName = "f1";
Database db = new Database(dbName, null, "some_location", null);
cachedStore.createDatabase(db);
List<FieldSchema> cols = new ArrayList<>();
cols.add(new FieldSchema(colName, "int", null));
List<FieldSchema> partCols = new ArrayList<>();
partCols.add(new FieldSchema("col", "int", null));
StorageDescriptor sd = new StorageDescriptor(cols, null, "input", "output", false, 0, new SerDeInfo("serde", "seriallib", new HashMap<>()), null, null, null);
Table tbl = new Table(tblName, dbName, null, 0, 0, 0, sd, partCols, new HashMap<>(), null, null, TableType.MANAGED_TABLE.toString());
cachedStore.createTable(tbl);
List<String> partVals1 = new ArrayList<>();
partVals1.add("1");
List<String> partVals2 = new ArrayList<>();
partVals2.add("2");
Partition ptn1 = new Partition(partVals1, dbName, tblName, 0, 0, sd, new HashMap<>());
cachedStore.addPartition(ptn1);
Partition ptn2 = new Partition(partVals2, dbName, tblName, 0, 0, sd, new HashMap<>());
cachedStore.addPartition(ptn2);
ColumnStatistics stats = new ColumnStatistics();
ColumnStatisticsDesc statsDesc = new ColumnStatisticsDesc(true, dbName, tblName);
statsDesc.setPartName("col");
List<ColumnStatisticsObj> colStatObjs = new ArrayList<>();
ColumnStatisticsData data = new ColumnStatisticsData();
ColumnStatisticsObj colStats = new ColumnStatisticsObj(colName, "int", data);
LongColumnStatsDataInspector longStats = new LongColumnStatsDataInspector();
longStats.setLowValue(0);
longStats.setHighValue(100);
longStats.setNumNulls(50);
longStats.setNumDVs(30);
HyperLogLog hll = HyperLogLog.builder().build();
hll.addLong(1);
hll.addLong(2);
hll.addLong(3);
longStats.setBitVectors(hll.serialize());
data.setLongStats(longStats);
colStatObjs.add(colStats);
stats.setStatsDesc(statsDesc);
stats.setStatsObj(colStatObjs);
cachedStore.updatePartitionColumnStatistics(stats.deepCopy(), partVals1);
longStats.setNumDVs(40);
hll = HyperLogLog.builder().build();
hll.addLong(2);
hll.addLong(3);
hll.addLong(4);
hll.addLong(5);
longStats.setBitVectors(hll.serialize());
cachedStore.updatePartitionColumnStatistics(stats.deepCopy(), partVals2);
List<String> colNames = new ArrayList<>();
colNames.add(colName);
List<String> aggrPartVals = new ArrayList<>();
aggrPartVals.add("1");
aggrPartVals.add("2");
AggrStats aggrStats = cachedStore.get_aggr_stats_for(dbName, tblName, aggrPartVals, colNames);
Assert.assertEquals(aggrStats.getColStats().get(0).getStatsData().getLongStats().getNumNulls(), 100);
Assert.assertEquals(aggrStats.getColStats().get(0).getStatsData().getLongStats().getNumDVs(), 5);
aggrStats = cachedStore.get_aggr_stats_for(dbName, tblName, aggrPartVals, colNames);
Assert.assertEquals(aggrStats.getColStats().get(0).getStatsData().getLongStats().getNumNulls(), 100);
Assert.assertEquals(aggrStats.getColStats().get(0).getStatsData().getLongStats().getNumDVs(), 5);
}
use of org.apache.hadoop.hive.metastore.api.ColumnStatistics in project hive by apache.
the class TestCachedStore method testTableColStatsOps.
// @Test
public void testTableColStatsOps() throws Exception {
// Add a db via ObjectStore
String dbName = "testTableColStatsOps";
String dbOwner = "user1";
Database db = createTestDb(dbName, dbOwner);
objectStore.createDatabase(db);
db = objectStore.getDatabase(dbName);
// Add a table via ObjectStore
final String tblName = "tbl";
final String tblOwner = "user1";
final FieldSchema col1 = new FieldSchema("col1", "int", "integer column");
// Stats values for col1
long col1LowVal = 5;
long col1HighVal = 500;
long col1Nulls = 10;
long col1DV = 20;
final FieldSchema col2 = new FieldSchema("col2", "string", "string column");
// Stats values for col2
long col2MaxColLen = 100;
double col2AvgColLen = 45.5;
long col2Nulls = 5;
long col2DV = 40;
final FieldSchema col3 = new FieldSchema("col3", "boolean", "boolean column");
// Stats values for col3
long col3NumTrues = 100;
long col3NumFalses = 30;
long col3Nulls = 10;
final List<FieldSchema> cols = new ArrayList<>();
cols.add(col1);
cols.add(col2);
cols.add(col3);
FieldSchema ptnCol1 = new FieldSchema("part1", "string", "string partition column");
List<FieldSchema> ptnCols = new ArrayList<FieldSchema>();
ptnCols.add(ptnCol1);
Table tbl = createTestTbl(dbName, tblName, tblOwner, cols, ptnCols);
objectStore.createTable(tbl);
tbl = objectStore.getTable(dbName, tblName);
// Add ColumnStatistics for tbl to metastore DB via ObjectStore
ColumnStatistics stats = new ColumnStatistics();
ColumnStatisticsDesc statsDesc = new ColumnStatisticsDesc(true, dbName, tblName);
List<ColumnStatisticsObj> colStatObjs = new ArrayList<>();
// Col1
ColumnStatisticsData data1 = new ColumnStatisticsData();
ColumnStatisticsObj col1Stats = new ColumnStatisticsObj(col1.getName(), col1.getType(), data1);
LongColumnStatsDataInspector longStats = new LongColumnStatsDataInspector();
longStats.setLowValue(col1LowVal);
longStats.setHighValue(col1HighVal);
longStats.setNumNulls(col1Nulls);
longStats.setNumDVs(col1DV);
data1.setLongStats(longStats);
colStatObjs.add(col1Stats);
// Col2
ColumnStatisticsData data2 = new ColumnStatisticsData();
ColumnStatisticsObj col2Stats = new ColumnStatisticsObj(col2.getName(), col2.getType(), data2);
StringColumnStatsDataInspector stringStats = new StringColumnStatsDataInspector();
stringStats.setMaxColLen(col2MaxColLen);
stringStats.setAvgColLen(col2AvgColLen);
stringStats.setNumNulls(col2Nulls);
stringStats.setNumDVs(col2DV);
data2.setStringStats(stringStats);
colStatObjs.add(col2Stats);
// Col3
ColumnStatisticsData data3 = new ColumnStatisticsData();
ColumnStatisticsObj col3Stats = new ColumnStatisticsObj(col3.getName(), col3.getType(), data3);
BooleanColumnStatsData boolStats = new BooleanColumnStatsData();
boolStats.setNumTrues(col3NumTrues);
boolStats.setNumFalses(col3NumFalses);
boolStats.setNumNulls(col3Nulls);
data3.setBooleanStats(boolStats);
colStatObjs.add(col3Stats);
stats.setStatsDesc(statsDesc);
stats.setStatsObj(colStatObjs);
// Save to DB
objectStore.updateTableColumnStatistics(stats);
// Prewarm CachedStore
CachedStore.setCachePrewarmedState(false);
CachedStore.prewarm(objectStore);
// Read table stats via CachedStore
ColumnStatistics newStats = cachedStore.getTableColumnStatistics(dbName, tblName, Arrays.asList(col1.getName(), col2.getName(), col3.getName()));
Assert.assertEquals(stats, newStats);
// Clean up
objectStore.dropTable(dbName, tblName);
objectStore.dropDatabase(dbName);
sharedCache.getDatabaseCache().clear();
sharedCache.getTableCache().clear();
sharedCache.getSdCache().clear();
}
use of org.apache.hadoop.hive.metastore.api.ColumnStatistics in project hive by apache.
the class TestCachedStore method testPartitionAggrStats.
@Test
public void testPartitionAggrStats() throws Exception {
String dbName = "testTableColStatsOps1";
String tblName = "tbl1";
String colName = "f1";
Database db = new Database(dbName, null, "some_location", null);
cachedStore.createDatabase(db);
List<FieldSchema> cols = new ArrayList<>();
cols.add(new FieldSchema(colName, "int", null));
List<FieldSchema> partCols = new ArrayList<>();
partCols.add(new FieldSchema("col", "int", null));
StorageDescriptor sd = new StorageDescriptor(cols, null, "input", "output", false, 0, new SerDeInfo("serde", "seriallib", new HashMap<>()), null, null, null);
Table tbl = new Table(tblName, dbName, null, 0, 0, 0, sd, partCols, new HashMap<>(), null, null, TableType.MANAGED_TABLE.toString());
cachedStore.createTable(tbl);
List<String> partVals1 = new ArrayList<>();
partVals1.add("1");
List<String> partVals2 = new ArrayList<>();
partVals2.add("2");
Partition ptn1 = new Partition(partVals1, dbName, tblName, 0, 0, sd, new HashMap<>());
cachedStore.addPartition(ptn1);
Partition ptn2 = new Partition(partVals2, dbName, tblName, 0, 0, sd, new HashMap<>());
cachedStore.addPartition(ptn2);
ColumnStatistics stats = new ColumnStatistics();
ColumnStatisticsDesc statsDesc = new ColumnStatisticsDesc(true, dbName, tblName);
statsDesc.setPartName("col");
List<ColumnStatisticsObj> colStatObjs = new ArrayList<>();
ColumnStatisticsData data = new ColumnStatisticsData();
ColumnStatisticsObj colStats = new ColumnStatisticsObj(colName, "int", data);
LongColumnStatsDataInspector longStats = new LongColumnStatsDataInspector();
longStats.setLowValue(0);
longStats.setHighValue(100);
longStats.setNumNulls(50);
longStats.setNumDVs(30);
data.setLongStats(longStats);
colStatObjs.add(colStats);
stats.setStatsDesc(statsDesc);
stats.setStatsObj(colStatObjs);
cachedStore.updatePartitionColumnStatistics(stats.deepCopy(), partVals1);
longStats.setNumDVs(40);
cachedStore.updatePartitionColumnStatistics(stats.deepCopy(), partVals2);
List<String> colNames = new ArrayList<>();
colNames.add(colName);
List<String> aggrPartVals = new ArrayList<>();
aggrPartVals.add("1");
aggrPartVals.add("2");
AggrStats aggrStats = cachedStore.get_aggr_stats_for(dbName, tblName, aggrPartVals, colNames);
Assert.assertEquals(aggrStats.getColStats().get(0).getStatsData().getLongStats().getNumNulls(), 100);
Assert.assertEquals(aggrStats.getColStats().get(0).getStatsData().getLongStats().getNumDVs(), 40);
aggrStats = cachedStore.get_aggr_stats_for(dbName, tblName, aggrPartVals, colNames);
Assert.assertEquals(aggrStats.getColStats().get(0).getStatsData().getLongStats().getNumNulls(), 100);
Assert.assertEquals(aggrStats.getColStats().get(0).getStatsData().getLongStats().getNumDVs(), 40);
}
Aggregations