use of org.apache.hadoop.hive.metastore.api.ColumnStatistics in project hive by apache.
the class HBaseReadWrite method getPartitionStatistics.
/**
* Get statistics for a set of partitions
*
* @param dbName name of database table is in
* @param tblName table partitions are in
* @param partNames names of the partitions, used only to set values inside the return stats
* objects
* @param partVals partition values for each partition, needed because this class doesn't know how
* to translate from partName to partVals
* @param colNames column names to fetch stats for. These columns will be fetched for all
* requested partitions
* @return list of ColumnStats, one for each partition for which we found at least one column's
* stats.
* @throws IOException
*/
List<ColumnStatistics> getPartitionStatistics(String dbName, String tblName, List<String> partNames, List<List<String>> partVals, List<String> colNames) throws IOException {
List<ColumnStatistics> statsList = new ArrayList<>(partNames.size());
Map<List<String>, String> valToPartMap = new HashMap<>(partNames.size());
List<Get> gets = new ArrayList<>(partNames.size() * colNames.size());
assert partNames.size() == partVals.size();
byte[][] colNameBytes = new byte[colNames.size()][];
for (int i = 0; i < colNames.size(); i++) {
colNameBytes[i] = HBaseUtils.buildKey(colNames.get(i));
}
for (int i = 0; i < partNames.size(); i++) {
valToPartMap.put(partVals.get(i), partNames.get(i));
byte[] partKey = HBaseUtils.buildPartitionKey(dbName, tblName, HBaseUtils.getPartitionKeyTypes(getTable(dbName, tblName).getPartitionKeys()), partVals.get(i));
Get get = new Get(partKey);
for (byte[] colName : colNameBytes) {
get.addColumn(STATS_CF, colName);
}
gets.add(get);
}
HTableInterface htab = conn.getHBaseTable(PART_TABLE);
Result[] results = htab.get(gets);
for (int i = 0; i < results.length; i++) {
ColumnStatistics colStats = null;
for (int j = 0; j < colNameBytes.length; j++) {
byte[] serializedColStats = results[i].getValue(STATS_CF, colNameBytes[j]);
if (serializedColStats != null) {
if (colStats == null) {
// We initialize this late so that we don't create extras in the case of
// partitions with no stats
colStats = buildColStats(results[i].getRow(), false);
statsList.add(colStats);
}
ColumnStatisticsObj cso = HBaseUtils.deserializeStatsForOneColumn(colStats, serializedColStats);
cso.setColName(colNames.get(j));
colStats.addToStatsObj(cso);
}
}
}
return statsList;
}
use of org.apache.hadoop.hive.metastore.api.ColumnStatistics in project hive by apache.
the class HBaseReadWrite method getTableStatistics.
/**
* Get statistics for a table
*
* @param dbName name of database table is in
* @param tblName name of table
* @param colNames list of column names to get statistics for
* @return column statistics for indicated table
* @throws IOException
*/
ColumnStatistics getTableStatistics(String dbName, String tblName, List<String> colNames) throws IOException {
byte[] tabKey = HBaseUtils.buildKey(dbName, tblName);
ColumnStatistics tableStats = new ColumnStatistics();
ColumnStatisticsDesc statsDesc = new ColumnStatisticsDesc();
statsDesc.setIsTblLevel(true);
statsDesc.setDbName(dbName);
statsDesc.setTableName(tblName);
tableStats.setStatsDesc(statsDesc);
byte[][] colKeys = new byte[colNames.size()][];
for (int i = 0; i < colKeys.length; i++) {
colKeys[i] = HBaseUtils.buildKey(colNames.get(i));
}
Result result = read(TABLE_TABLE, tabKey, STATS_CF, colKeys);
for (int i = 0; i < colKeys.length; i++) {
byte[] serializedColStats = result.getValue(STATS_CF, colKeys[i]);
if (serializedColStats == null) {
// There were no stats for this column, so skip it
continue;
}
ColumnStatisticsObj obj = HBaseUtils.deserializeStatsForOneColumn(tableStats, serializedColStats);
obj.setColName(colNames.get(i));
tableStats.addToStatsObj(obj);
}
return tableStats;
}
use of org.apache.hadoop.hive.metastore.api.ColumnStatistics in project hive by apache.
the class HBaseReadWrite method printOnePartition.
private String printOnePartition(Result result) throws IOException, TException {
byte[] key = result.getRow();
HBaseUtils.StorageDescriptorParts sdParts = HBaseUtils.deserializePartition(key, result.getValue(CATALOG_CF, CATALOG_COL), this);
StringBuilder builder = new StringBuilder();
builder.append(dumpThriftObject(sdParts.containingPartition)).append(" sdHash: ").append(Base64.encodeBase64URLSafeString(sdParts.sdHash)).append(" stats:");
NavigableMap<byte[], byte[]> statsCols = result.getFamilyMap(STATS_CF);
for (Map.Entry<byte[], byte[]> statsCol : statsCols.entrySet()) {
builder.append(" column ").append(new String(statsCol.getKey(), HBaseUtils.ENCODING)).append(": ");
ColumnStatistics pcs = buildColStats(key, false);
ColumnStatisticsObj cso = HBaseUtils.deserializeStatsForOneColumn(pcs, statsCol.getValue());
builder.append(dumpThriftObject(cso));
}
return builder.toString();
}
use of org.apache.hadoop.hive.metastore.api.ColumnStatistics in project hive by apache.
the class TestHBaseAggrStatsCacheIntegration method hit.
@Test
public void hit() throws Exception {
String dbName = "default";
String tableName = "hit";
List<String> partVals1 = Arrays.asList("today");
List<String> partVals2 = Arrays.asList("yesterday");
long now = System.currentTimeMillis();
List<FieldSchema> cols = new ArrayList<>();
cols.add(new FieldSchema("col1", "boolean", "nocomment"));
cols.add(new FieldSchema("col2", "varchar", "nocomment"));
SerDeInfo serde = new SerDeInfo("serde", "seriallib", null);
StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, serde, null, null, Collections.<String, String>emptyMap());
List<FieldSchema> partCols = new ArrayList<>();
partCols.add(new FieldSchema("ds", "string", ""));
Table table = new Table(tableName, dbName, "me", (int) now, (int) now, 0, sd, partCols, Collections.<String, String>emptyMap(), null, null, null);
store.createTable(table);
for (List<String> partVals : Arrays.asList(partVals1, partVals2)) {
StorageDescriptor psd = new StorageDescriptor(sd);
psd.setLocation("file:/tmp/default/hit/ds=" + partVals.get(0));
Partition part = new Partition(partVals, dbName, tableName, (int) now, (int) now, psd, Collections.<String, String>emptyMap());
store.addPartition(part);
ColumnStatistics cs = new ColumnStatistics();
ColumnStatisticsDesc desc = new ColumnStatisticsDesc(false, dbName, tableName);
desc.setLastAnalyzed(now);
desc.setPartName("ds=" + partVals.get(0));
cs.setStatsDesc(desc);
ColumnStatisticsObj obj = new ColumnStatisticsObj();
obj.setColName("col1");
obj.setColType("boolean");
ColumnStatisticsData data = new ColumnStatisticsData();
BooleanColumnStatsData bcsd = new BooleanColumnStatsData();
bcsd.setNumFalses(10);
bcsd.setNumTrues(20);
bcsd.setNumNulls(30);
data.setBooleanStats(bcsd);
obj.setStatsData(data);
cs.addToStatsObj(obj);
obj = new ColumnStatisticsObj();
obj.setColName("col2");
obj.setColType("varchar");
data = new ColumnStatisticsData();
StringColumnStatsData scsd = new StringColumnStatsData();
scsd.setAvgColLen(10.3);
scsd.setMaxColLen(2000);
scsd.setNumNulls(3);
scsd.setNumDVs(12342);
data.setStringStats(scsd);
obj.setStatsData(data);
cs.addToStatsObj(obj);
store.updatePartitionColumnStatistics(cs, partVals);
}
Checker statChecker = new Checker() {
@Override
public void checkStats(AggrStats aggrStats) throws Exception {
Assert.assertEquals(2, aggrStats.getPartsFound());
Assert.assertEquals(2, aggrStats.getColStatsSize());
ColumnStatisticsObj cso = aggrStats.getColStats().get(0);
Assert.assertEquals("col1", cso.getColName());
Assert.assertEquals("boolean", cso.getColType());
BooleanColumnStatsData bcsd = cso.getStatsData().getBooleanStats();
Assert.assertEquals(20, bcsd.getNumFalses());
Assert.assertEquals(40, bcsd.getNumTrues());
Assert.assertEquals(60, bcsd.getNumNulls());
cso = aggrStats.getColStats().get(1);
Assert.assertEquals("col2", cso.getColName());
Assert.assertEquals("varchar", cso.getColType());
StringColumnStatsData scsd = cso.getStatsData().getStringStats();
Assert.assertEquals(10.3, scsd.getAvgColLen(), 0.1);
Assert.assertEquals(2000, scsd.getMaxColLen());
Assert.assertEquals(6, scsd.getNumNulls());
Assert.assertEquals(12342, scsd.getNumDVs());
}
};
AggrStats aggrStats = store.get_aggr_stats_for(dbName, tableName, Arrays.asList("ds=today", "ds=yesterday"), Arrays.asList("col1", "col2"));
statChecker.checkStats(aggrStats);
// Check that we had to build it from the stats
Assert.assertEquals(0, store.backdoor().getStatsCache().hbaseHits.getCnt());
Assert.assertEquals(2, store.backdoor().getStatsCache().totalGets.getCnt());
Assert.assertEquals(2, store.backdoor().getStatsCache().misses.getCnt());
// Call again, this time it should come from memory. Also, reverse the name order this time
// to assure that we still hit.
aggrStats = store.get_aggr_stats_for(dbName, tableName, Arrays.asList("ds=yesterday", "ds=today"), Arrays.asList("col1", "col2"));
statChecker.checkStats(aggrStats);
Assert.assertEquals(0, store.backdoor().getStatsCache().hbaseHits.getCnt());
Assert.assertEquals(4, store.backdoor().getStatsCache().totalGets.getCnt());
Assert.assertEquals(2, store.backdoor().getStatsCache().misses.getCnt());
store.backdoor().getStatsCache().flushMemory();
// Call again, this time it should come from hbase
aggrStats = store.get_aggr_stats_for(dbName, tableName, Arrays.asList("ds=today", "ds=yesterday"), Arrays.asList("col1", "col2"));
statChecker.checkStats(aggrStats);
Assert.assertEquals(2, store.backdoor().getStatsCache().hbaseHits.getCnt());
Assert.assertEquals(6, store.backdoor().getStatsCache().totalGets.getCnt());
Assert.assertEquals(2, store.backdoor().getStatsCache().misses.getCnt());
}
use of org.apache.hadoop.hive.metastore.api.ColumnStatistics in project hive by apache.
the class TestHBaseAggrStatsCacheIntegration method someWithStats.
@Test
public void someWithStats() throws Exception {
String dbName = "default";
String tableName = "psws";
List<String> partVals1 = Arrays.asList("today");
List<String> partVals2 = Arrays.asList("yesterday");
long now = System.currentTimeMillis();
List<FieldSchema> cols = new ArrayList<>();
cols.add(new FieldSchema("col1", "long", "nocomment"));
SerDeInfo serde = new SerDeInfo("serde", "seriallib", null);
StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, serde, null, null, Collections.<String, String>emptyMap());
List<FieldSchema> partCols = new ArrayList<>();
partCols.add(new FieldSchema("ds", "string", ""));
Table table = new Table(tableName, dbName, "me", (int) now, (int) now, 0, sd, partCols, Collections.<String, String>emptyMap(), null, null, null);
store.createTable(table);
boolean first = true;
for (List<String> partVals : Arrays.asList(partVals1, partVals2)) {
StorageDescriptor psd = new StorageDescriptor(sd);
psd.setLocation("file:/tmp/default/psws/ds=" + partVals.get(0));
Partition part = new Partition(partVals, dbName, tableName, (int) now, (int) now, psd, Collections.<String, String>emptyMap());
store.addPartition(part);
if (first) {
ColumnStatistics cs = new ColumnStatistics();
ColumnStatisticsDesc desc = new ColumnStatisticsDesc(false, dbName, tableName);
desc.setLastAnalyzed(now);
desc.setPartName("ds=" + partVals.get(0));
cs.setStatsDesc(desc);
ColumnStatisticsObj obj = new ColumnStatisticsObj();
obj.setColName("col1");
obj.setColType("long");
ColumnStatisticsData data = new ColumnStatisticsData();
LongColumnStatsData lcsd = new LongColumnStatsData();
lcsd.setHighValue(192L);
lcsd.setLowValue(-20L);
lcsd.setNumNulls(30);
lcsd.setNumDVs(32);
data.setLongStats(lcsd);
obj.setStatsData(data);
cs.addToStatsObj(obj);
store.updatePartitionColumnStatistics(cs, partVals);
first = false;
}
}
Checker statChecker = new Checker() {
@Override
public void checkStats(AggrStats aggrStats) throws Exception {
Assert.assertEquals(1, aggrStats.getPartsFound());
Assert.assertEquals(1, aggrStats.getColStatsSize());
ColumnStatisticsObj cso = aggrStats.getColStats().get(0);
Assert.assertEquals("col1", cso.getColName());
Assert.assertEquals("long", cso.getColType());
LongColumnStatsData lcsd = cso.getStatsData().getLongStats();
Assert.assertEquals(192L, lcsd.getHighValue());
Assert.assertEquals(-20L, lcsd.getLowValue());
Assert.assertEquals(30, lcsd.getNumNulls());
Assert.assertEquals(32, lcsd.getNumDVs());
}
};
AggrStats aggrStats = store.get_aggr_stats_for(dbName, tableName, Arrays.asList("ds=today", "ds=yesterday"), Arrays.asList("col1"));
statChecker.checkStats(aggrStats);
// Check that we had to build it from the stats
Assert.assertEquals(0, store.backdoor().getStatsCache().hbaseHits.getCnt());
Assert.assertEquals(1, store.backdoor().getStatsCache().totalGets.getCnt());
Assert.assertEquals(1, store.backdoor().getStatsCache().misses.getCnt());
// Call again, this time it should come from memory. Also, reverse the name order this time
// to assure that we still hit.
aggrStats = store.get_aggr_stats_for(dbName, tableName, Arrays.asList("ds=yesterday", "ds=today"), Arrays.asList("col1"));
statChecker.checkStats(aggrStats);
Assert.assertEquals(0, store.backdoor().getStatsCache().hbaseHits.getCnt());
Assert.assertEquals(2, store.backdoor().getStatsCache().totalGets.getCnt());
Assert.assertEquals(1, store.backdoor().getStatsCache().misses.getCnt());
store.backdoor().getStatsCache().flushMemory();
// Call again, this time it should come from hbase
aggrStats = store.get_aggr_stats_for(dbName, tableName, Arrays.asList("ds=today", "ds=yesterday"), Arrays.asList("col1"));
statChecker.checkStats(aggrStats);
Assert.assertEquals(1, store.backdoor().getStatsCache().hbaseHits.getCnt());
Assert.assertEquals(3, store.backdoor().getStatsCache().totalGets.getCnt());
Assert.assertEquals(1, store.backdoor().getStatsCache().misses.getCnt());
}
Aggregations