use of org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj in project hive by apache.
the class BooleanColumnStatsAggregator method aggregate.
@Override
public ColumnStatisticsObj aggregate(List<ColStatsObjWithSourceInfo> colStatsWithSourceInfo, List<String> partNames, boolean areAllPartsFound) throws MetaException {
ColumnStatisticsObj statsObj = null;
String colType = null;
String colName = null;
BooleanColumnStatsData aggregateData = null;
for (ColStatsObjWithSourceInfo csp : colStatsWithSourceInfo) {
ColumnStatisticsObj cso = csp.getColStatsObj();
if (statsObj == null) {
colName = cso.getColName();
colType = cso.getColType();
statsObj = ColumnStatsAggregatorFactory.newColumnStaticsObj(colName, colType, cso.getStatsData().getSetField());
}
BooleanColumnStatsData newData = cso.getStatsData().getBooleanStats();
if (aggregateData == null) {
aggregateData = newData.deepCopy();
} else {
aggregateData.setNumTrues(aggregateData.getNumTrues() + newData.getNumTrues());
aggregateData.setNumFalses(aggregateData.getNumFalses() + newData.getNumFalses());
aggregateData.setNumNulls(aggregateData.getNumNulls() + newData.getNumNulls());
}
}
ColumnStatisticsData columnStatisticsData = new ColumnStatisticsData();
columnStatisticsData.setBooleanStats(aggregateData);
statsObj.setStatsData(columnStatisticsData);
return statsObj;
}
use of org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj in project hive by apache.
the class DateColumnStatsAggregator method aggregate.
@Override
public ColumnStatisticsObj aggregate(List<ColStatsObjWithSourceInfo> colStatsWithSourceInfo, List<String> partNames, boolean areAllPartsFound) throws MetaException {
ColumnStatisticsObj statsObj = null;
String colType = null;
String colName = null;
// check if all the ColumnStatisticsObjs contain stats and all the ndv are
// bitvectors
boolean doAllPartitionContainStats = partNames.size() == colStatsWithSourceInfo.size();
NumDistinctValueEstimator ndvEstimator = null;
for (ColStatsObjWithSourceInfo csp : colStatsWithSourceInfo) {
ColumnStatisticsObj cso = csp.getColStatsObj();
if (statsObj == null) {
colName = cso.getColName();
colType = cso.getColType();
statsObj = ColumnStatsAggregatorFactory.newColumnStaticsObj(colName, colType, cso.getStatsData().getSetField());
LOG.trace("doAllPartitionContainStats for column: {} is: {}", colName, doAllPartitionContainStats);
}
DateColumnStatsDataInspector dateColumnStats = dateInspectorFromStats(cso);
if (dateColumnStats.getNdvEstimator() == null) {
ndvEstimator = null;
break;
} else {
// check if all of the bit vectors can merge
NumDistinctValueEstimator estimator = dateColumnStats.getNdvEstimator();
if (ndvEstimator == null) {
ndvEstimator = estimator;
} else {
if (ndvEstimator.canMerge(estimator)) {
continue;
} else {
ndvEstimator = null;
break;
}
}
}
}
if (ndvEstimator != null) {
ndvEstimator = NumDistinctValueEstimatorFactory.getEmptyNumDistinctValueEstimator(ndvEstimator);
}
LOG.debug("all of the bit vectors can merge for " + colName + " is " + (ndvEstimator != null));
ColumnStatisticsData columnStatisticsData = new ColumnStatisticsData();
if (doAllPartitionContainStats || colStatsWithSourceInfo.size() < 2) {
DateColumnStatsDataInspector aggregateData = null;
long lowerBound = 0;
long higherBound = 0;
double densityAvgSum = 0.0;
for (ColStatsObjWithSourceInfo csp : colStatsWithSourceInfo) {
ColumnStatisticsObj cso = csp.getColStatsObj();
DateColumnStatsDataInspector newData = dateInspectorFromStats(cso);
higherBound += newData.getNumDVs();
if (newData.isSetLowValue() && newData.isSetHighValue()) {
densityAvgSum += (diff(newData.getHighValue(), newData.getLowValue())) / newData.getNumDVs();
}
if (ndvEstimator != null) {
ndvEstimator.mergeEstimators(newData.getNdvEstimator());
}
if (aggregateData == null) {
aggregateData = newData.deepCopy();
} else {
DateColumnStatsMerger merger = new DateColumnStatsMerger();
merger.setLowValue(aggregateData, newData);
merger.setHighValue(aggregateData, newData);
aggregateData.setNumNulls(aggregateData.getNumNulls() + newData.getNumNulls());
aggregateData.setNumDVs(Math.max(aggregateData.getNumDVs(), newData.getNumDVs()));
}
}
if (ndvEstimator != null) {
// if all the ColumnStatisticsObjs contain bitvectors, we do not need to
// use uniform distribution assumption because we can merge bitvectors
// to get a good estimation.
aggregateData.setNumDVs(ndvEstimator.estimateNumDistinctValues());
} else {
long estimation;
if (useDensityFunctionForNDVEstimation) {
// We have estimation, lowerbound and higherbound. We use estimation
// if it is between lowerbound and higherbound.
double densityAvg = densityAvgSum / partNames.size();
estimation = (long) (diff(aggregateData.getHighValue(), aggregateData.getLowValue()) / densityAvg);
if (estimation < lowerBound) {
estimation = lowerBound;
} else if (estimation > higherBound) {
estimation = higherBound;
}
} else {
estimation = (long) (lowerBound + (higherBound - lowerBound) * ndvTuner);
}
aggregateData.setNumDVs(estimation);
}
columnStatisticsData.setDateStats(aggregateData);
} else {
// we need extrapolation
LOG.debug("start extrapolation for " + colName);
Map<String, Integer> indexMap = new HashMap<>();
for (int index = 0; index < partNames.size(); index++) {
indexMap.put(partNames.get(index), index);
}
Map<String, Double> adjustedIndexMap = new HashMap<>();
Map<String, ColumnStatisticsData> adjustedStatsMap = new HashMap<>();
// while we scan the css, we also get the densityAvg, lowerbound and
// higerbound when useDensityFunctionForNDVEstimation is true.
double densityAvgSum = 0.0;
if (ndvEstimator == null) {
// the traditional extrapolation methods.
for (ColStatsObjWithSourceInfo csp : colStatsWithSourceInfo) {
ColumnStatisticsObj cso = csp.getColStatsObj();
String partName = csp.getPartName();
DateColumnStatsData newData = cso.getStatsData().getDateStats();
if (useDensityFunctionForNDVEstimation) {
densityAvgSum += diff(newData.getHighValue(), newData.getLowValue()) / newData.getNumDVs();
}
adjustedIndexMap.put(partName, (double) indexMap.get(partName));
adjustedStatsMap.put(partName, cso.getStatsData());
}
} else {
// we first merge all the adjacent bitvectors that we could merge and
// derive new partition names and index.
StringBuilder pseudoPartName = new StringBuilder();
double pseudoIndexSum = 0;
int length = 0;
int curIndex = -1;
DateColumnStatsDataInspector aggregateData = null;
for (ColStatsObjWithSourceInfo csp : colStatsWithSourceInfo) {
ColumnStatisticsObj cso = csp.getColStatsObj();
String partName = csp.getPartName();
DateColumnStatsDataInspector newData = dateInspectorFromStats(cso);
// already checked it before.
if (indexMap.get(partName) != curIndex) {
// There is bitvector, but it is not adjacent to the previous ones.
if (length > 0) {
// we have to set ndv
adjustedIndexMap.put(pseudoPartName.toString(), pseudoIndexSum / length);
aggregateData.setNumDVs(ndvEstimator.estimateNumDistinctValues());
ColumnStatisticsData csd = new ColumnStatisticsData();
csd.setDateStats(aggregateData);
adjustedStatsMap.put(pseudoPartName.toString(), csd);
if (useDensityFunctionForNDVEstimation) {
densityAvgSum += diff(aggregateData.getHighValue(), aggregateData.getLowValue()) / aggregateData.getNumDVs();
}
// reset everything
pseudoPartName = new StringBuilder();
pseudoIndexSum = 0;
length = 0;
ndvEstimator = NumDistinctValueEstimatorFactory.getEmptyNumDistinctValueEstimator(ndvEstimator);
}
aggregateData = null;
}
curIndex = indexMap.get(partName);
pseudoPartName.append(partName);
pseudoIndexSum += curIndex;
length++;
curIndex++;
if (aggregateData == null) {
aggregateData = newData.deepCopy();
} else {
aggregateData.setLowValue(min(aggregateData.getLowValue(), newData.getLowValue()));
aggregateData.setHighValue(max(aggregateData.getHighValue(), newData.getHighValue()));
aggregateData.setNumNulls(aggregateData.getNumNulls() + newData.getNumNulls());
}
ndvEstimator.mergeEstimators(newData.getNdvEstimator());
}
if (length > 0) {
// we have to set ndv
adjustedIndexMap.put(pseudoPartName.toString(), pseudoIndexSum / length);
aggregateData.setNumDVs(ndvEstimator.estimateNumDistinctValues());
ColumnStatisticsData csd = new ColumnStatisticsData();
csd.setDateStats(aggregateData);
adjustedStatsMap.put(pseudoPartName.toString(), csd);
if (useDensityFunctionForNDVEstimation) {
densityAvgSum += diff(aggregateData.getHighValue(), aggregateData.getLowValue()) / aggregateData.getNumDVs();
}
}
}
extrapolate(columnStatisticsData, partNames.size(), colStatsWithSourceInfo.size(), adjustedIndexMap, adjustedStatsMap, densityAvgSum / adjustedStatsMap.size());
}
LOG.debug("Ndv estimatation for {} is {} # of partitions requested: {} # of partitions found: {}", colName, columnStatisticsData.getDateStats().getNumDVs(), partNames.size(), colStatsWithSourceInfo.size());
statsObj.setStatsData(columnStatisticsData);
return statsObj;
}
use of org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj in project hive by apache.
the class ColumnStatsMergerFactory method newColumnStaticsObj.
public static ColumnStatisticsObj newColumnStaticsObj(final String colName, final String colType, final _Fields type) {
final ColumnStatisticsObj cso = new ColumnStatisticsObj();
final ColumnStatisticsData csd = new ColumnStatisticsData();
Objects.requireNonNull(colName, "Column name cannot be null");
Objects.requireNonNull(colType, "Column type cannot be null");
Objects.requireNonNull(type, "Field type cannot be null");
switch(type) {
case BOOLEAN_STATS:
csd.setBooleanStats(new BooleanColumnStatsData());
break;
case LONG_STATS:
csd.setLongStats(new LongColumnStatsDataInspector());
break;
case DOUBLE_STATS:
csd.setDoubleStats(new DoubleColumnStatsDataInspector());
break;
case STRING_STATS:
csd.setStringStats(new StringColumnStatsDataInspector());
break;
case BINARY_STATS:
csd.setBinaryStats(new BinaryColumnStatsData());
break;
case DECIMAL_STATS:
csd.setDecimalStats(new DecimalColumnStatsDataInspector());
break;
case DATE_STATS:
csd.setDateStats(new DateColumnStatsDataInspector());
break;
case TIMESTAMP_STATS:
csd.setTimestampStats(new TimestampColumnStatsDataInspector());
break;
default:
throw new IllegalArgumentException("Unknown stats type: " + type);
}
cso.setColName(colName);
cso.setColType(colType);
cso.setStatsData(csd);
return cso;
}
use of org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj in project hive by apache.
the class TestHiveMetaStore method testColumnStatistics.
@Test
public void testColumnStatistics() throws Throwable {
String dbName = "columnstatstestdb";
String tblName = "tbl";
String typeName = "Person";
String tblOwner = "testowner";
int lastAccessed = 6796;
try {
cleanUp(dbName, tblName, typeName);
new DatabaseBuilder().setName(dbName).create(client, conf);
createTableForTestFilter(dbName, tblName, tblOwner, lastAccessed, true);
// Create a ColumnStatistics Obj
String[] colName = new String[] { "income", "name" };
double lowValue = 50000.21;
double highValue = 1200000.4525;
long numNulls = 3;
long numDVs = 22;
double avgColLen = 50.30;
long maxColLen = 102;
String[] colType = new String[] { "double", "string" };
boolean isTblLevel = true;
String partName = null;
List<ColumnStatisticsObj> statsObjs = new ArrayList<>();
ColumnStatisticsDesc statsDesc = new ColumnStatisticsDesc();
statsDesc.setDbName(dbName);
statsDesc.setTableName(tblName);
statsDesc.setIsTblLevel(isTblLevel);
statsDesc.setPartName(partName);
ColumnStatisticsObj statsObj = new ColumnStatisticsObj();
statsObj.setColName(colName[0]);
statsObj.setColType(colType[0]);
ColumnStatisticsData statsData = new ColumnStatisticsData();
DoubleColumnStatsData numericStats = new DoubleColumnStatsData();
statsData.setDoubleStats(numericStats);
statsData.getDoubleStats().setHighValue(highValue);
statsData.getDoubleStats().setLowValue(lowValue);
statsData.getDoubleStats().setNumDVs(numDVs);
statsData.getDoubleStats().setNumNulls(numNulls);
statsObj.setStatsData(statsData);
statsObjs.add(statsObj);
statsObj = new ColumnStatisticsObj();
statsObj.setColName(colName[1]);
statsObj.setColType(colType[1]);
statsData = new ColumnStatisticsData();
StringColumnStatsData stringStats = new StringColumnStatsData();
statsData.setStringStats(stringStats);
statsData.getStringStats().setAvgColLen(avgColLen);
statsData.getStringStats().setMaxColLen(maxColLen);
statsData.getStringStats().setNumDVs(numDVs);
statsData.getStringStats().setNumNulls(numNulls);
statsObj.setStatsData(statsData);
statsObjs.add(statsObj);
ColumnStatistics colStats = new ColumnStatistics();
colStats.setStatsDesc(statsDesc);
colStats.setStatsObj(statsObjs);
colStats.setEngine(ENGINE);
// write stats objs persistently
client.updateTableColumnStatistics(colStats);
// retrieve the stats obj that was just written
ColumnStatisticsObj colStats2 = client.getTableColumnStatistics(dbName, tblName, Lists.newArrayList(colName[0]), ENGINE).get(0);
// compare stats obj to ensure what we get is what we wrote
assertNotNull(colStats2);
assertEquals(colStats2.getColName(), colName[0]);
assertEquals(colStats2.getStatsData().getDoubleStats().getLowValue(), lowValue, 0.01);
assertEquals(colStats2.getStatsData().getDoubleStats().getHighValue(), highValue, 0.01);
assertEquals(colStats2.getStatsData().getDoubleStats().getNumNulls(), numNulls);
assertEquals(colStats2.getStatsData().getDoubleStats().getNumDVs(), numDVs);
// test delete column stats; if no col name is passed all column stats associated with the
// table is deleted
boolean status = client.deleteTableColumnStatistics(dbName, tblName, null, ENGINE);
assertTrue(status);
// try to query stats for a column for which stats doesn't exist
assertTrue(client.getTableColumnStatistics(dbName, tblName, Lists.newArrayList(colName[1]), ENGINE).isEmpty());
colStats.setStatsDesc(statsDesc);
colStats.setStatsObj(statsObjs);
// update table level column stats
client.updateTableColumnStatistics(colStats);
// query column stats for column whose stats were updated in the previous call
colStats2 = client.getTableColumnStatistics(dbName, tblName, Lists.newArrayList(colName[0]), ENGINE).get(0);
// partition level column statistics test
// create a table with multiple partitions
cleanUp(dbName, tblName, typeName);
List<List<String>> values = new ArrayList<>();
values.add(makeVals("2008-07-01 14:13:12", "14"));
values.add(makeVals("2008-07-01 14:13:12", "15"));
values.add(makeVals("2008-07-02 14:13:12", "15"));
values.add(makeVals("2008-07-03 14:13:12", "151"));
createMultiPartitionTableSchema(dbName, tblName, typeName, values);
List<String> partitions = client.listPartitionNames(dbName, tblName, (short) -1);
partName = partitions.get(0);
isTblLevel = false;
// create a new columnstatistics desc to represent partition level column stats
statsDesc = new ColumnStatisticsDesc();
statsDesc.setDbName(dbName);
statsDesc.setTableName(tblName);
statsDesc.setPartName(partName);
statsDesc.setIsTblLevel(isTblLevel);
colStats = new ColumnStatistics();
colStats.setStatsDesc(statsDesc);
colStats.setStatsObj(statsObjs);
colStats.setEngine(ENGINE);
client.updatePartitionColumnStatistics(colStats);
colStats2 = client.getPartitionColumnStatistics(dbName, tblName, Lists.newArrayList(partName), Lists.newArrayList(colName[1]), ENGINE).get(partName).get(0);
// compare stats obj to ensure what we get is what we wrote
assertNotNull(colStats2);
assertEquals(colStats.getStatsDesc().getPartName(), partName);
assertEquals(colStats2.getColName(), colName[1]);
assertEquals(colStats2.getStatsData().getStringStats().getMaxColLen(), maxColLen);
assertEquals(colStats2.getStatsData().getStringStats().getAvgColLen(), avgColLen, 0.01);
assertEquals(colStats2.getStatsData().getStringStats().getNumNulls(), numNulls);
assertEquals(colStats2.getStatsData().getStringStats().getNumDVs(), numDVs);
// test stats deletion at partition level
client.deletePartitionColumnStatistics(dbName, tblName, partName, colName[1], ENGINE);
colStats2 = client.getPartitionColumnStatistics(dbName, tblName, Lists.newArrayList(partName), Lists.newArrayList(colName[0]), ENGINE).get(partName).get(0);
// test get stats on a column for which stats doesn't exist
assertTrue(client.getPartitionColumnStatistics(dbName, tblName, Lists.newArrayList(partName), Lists.newArrayList(colName[1]), ENGINE).isEmpty());
} catch (Exception e) {
System.err.println(StringUtils.stringifyException(e));
System.err.println("testColumnStatistics() failed.");
throw e;
} finally {
cleanUp(dbName, tblName, typeName);
}
}
use of org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj in project hive by apache.
the class TestAggregateStatsCache method testAddGetWithVariance.
@Test
public void testAddGetWithVariance() throws Exception {
// Partnames: [tab1part1...tab1part9]
List<String> partNames = preparePartNames(tables.get(0), 1, 9);
// Prepare the bloom filter
BloomFilter bloomFilter = prepareBloomFilter(partNames);
// Add a dummy aggregate stats object for the above parts (part1...part9) of tab1 for col1
String tblName = tables.get(0);
String colName = tabCols.get(0);
int highVal = 100, lowVal = 10, numDVs = 50, numNulls = 5;
// We'll treat this as the aggregate col stats for part1...part9 of tab1, col1
ColumnStatisticsObj aggrColStats = getDummyLongColStat(colName, highVal, lowVal, numDVs, numNulls);
// Now add to cache
cache.add(DEFAULT_CATALOG_NAME, DB_NAME, tblName, colName, 10, aggrColStats, bloomFilter);
// Now prepare partnames with only 5 partitions: [tab1part1...tab1part5]
partNames = preparePartNames(tables.get(0), 1, 5);
// This get should fail because its variance ((10-5)/5) is way past MAX_VARIANCE (0.5)
AggrColStats aggrStatsCached = cache.get(DEFAULT_CATALOG_NAME, DB_NAME, tblName, colName, partNames);
Assert.assertNull(aggrStatsCached);
// Now prepare partnames with 10 partitions: [tab1part11...tab1part20], but with no overlap
partNames = preparePartNames(tables.get(0), 11, 20);
// This get should fail because its variance ((10-0)/10) is way past MAX_VARIANCE (0.5)
aggrStatsCached = cache.get(DEFAULT_CATALOG_NAME, DB_NAME, tblName, colName, partNames);
Assert.assertNull(aggrStatsCached);
// Now prepare partnames with 9 partitions: [tab1part1...tab1part8], which are contained in the
// object that we added to the cache
partNames = preparePartNames(tables.get(0), 1, 8);
// This get should succeed because its variance ((10-9)/9) is within past MAX_VARIANCE (0.5)
aggrStatsCached = cache.get(DEFAULT_CATALOG_NAME, DB_NAME, tblName, colName, partNames);
Assert.assertNotNull(aggrStatsCached);
ColumnStatisticsObj aggrColStatsCached = aggrStatsCached.getColStats();
Assert.assertEquals(aggrColStats, aggrColStatsCached);
}
Aggregations