use of org.apache.hadoop.hive.metastore.api.ColumnStatistics in project hive by apache.
the class BooleanColumnStatsAggregator method aggregate.
@Override
public ColumnStatisticsObj aggregate(String colName, List<String> partNames, List<ColumnStatistics> css) throws MetaException {
ColumnStatisticsObj statsObj = null;
BooleanColumnStatsData aggregateData = null;
String colType = null;
for (ColumnStatistics cs : css) {
if (cs.getStatsObjSize() != 1) {
throw new MetaException("The number of columns should be exactly one in aggrStats, but found " + cs.getStatsObjSize());
}
ColumnStatisticsObj cso = cs.getStatsObjIterator().next();
if (statsObj == null) {
colType = cso.getColType();
statsObj = ColumnStatsAggregatorFactory.newColumnStaticsObj(colName, colType, cso.getStatsData().getSetField());
}
BooleanColumnStatsData newData = cso.getStatsData().getBooleanStats();
if (aggregateData == null) {
aggregateData = newData.deepCopy();
} else {
aggregateData.setNumTrues(aggregateData.getNumTrues() + newData.getNumTrues());
aggregateData.setNumFalses(aggregateData.getNumFalses() + newData.getNumFalses());
aggregateData.setNumNulls(aggregateData.getNumNulls() + newData.getNumNulls());
}
}
ColumnStatisticsData columnStatisticsData = new ColumnStatisticsData();
columnStatisticsData.setBooleanStats(aggregateData);
statsObj.setStatsData(columnStatisticsData);
return statsObj;
}
use of org.apache.hadoop.hive.metastore.api.ColumnStatistics in project hive by apache.
the class LongColumnStatsAggregator method aggregate.
@Override
public ColumnStatisticsObj aggregate(String colName, List<String> partNames, List<ColumnStatistics> css) throws MetaException {
ColumnStatisticsObj statsObj = null;
// check if all the ColumnStatisticsObjs contain stats and all the ndv are
// bitvectors
boolean doAllPartitionContainStats = partNames.size() == css.size();
boolean isNDVBitVectorSet = true;
String colType = null;
for (ColumnStatistics cs : css) {
if (cs.getStatsObjSize() != 1) {
throw new MetaException("The number of columns should be exactly one in aggrStats, but found " + cs.getStatsObjSize());
}
ColumnStatisticsObj cso = cs.getStatsObjIterator().next();
if (statsObj == null) {
colType = cso.getColType();
statsObj = ColumnStatsAggregatorFactory.newColumnStaticsObj(colName, colType, cso.getStatsData().getSetField());
}
if (numBitVectors <= 0 || !cso.getStatsData().getLongStats().isSetBitVectors() || cso.getStatsData().getLongStats().getBitVectors().length() == 0) {
isNDVBitVectorSet = false;
break;
}
}
ColumnStatisticsData columnStatisticsData = new ColumnStatisticsData();
if (doAllPartitionContainStats || css.size() < 2) {
LongColumnStatsData aggregateData = null;
long lowerBound = 0;
long higherBound = 0;
double densityAvgSum = 0.0;
NumDistinctValueEstimator ndvEstimator = null;
if (isNDVBitVectorSet) {
ndvEstimator = new NumDistinctValueEstimator(numBitVectors);
}
for (ColumnStatistics cs : css) {
ColumnStatisticsObj cso = cs.getStatsObjIterator().next();
LongColumnStatsData newData = cso.getStatsData().getLongStats();
if (useDensityFunctionForNDVEstimation) {
lowerBound = Math.max(lowerBound, newData.getNumDVs());
higherBound += newData.getNumDVs();
densityAvgSum += (newData.getHighValue() - newData.getLowValue()) / newData.getNumDVs();
}
if (isNDVBitVectorSet) {
ndvEstimator.mergeEstimators(new NumDistinctValueEstimator(newData.getBitVectors(), ndvEstimator.getnumBitVectors()));
}
if (aggregateData == null) {
aggregateData = newData.deepCopy();
} else {
aggregateData.setLowValue(Math.min(aggregateData.getLowValue(), newData.getLowValue()));
aggregateData.setHighValue(Math.max(aggregateData.getHighValue(), newData.getHighValue()));
aggregateData.setNumNulls(aggregateData.getNumNulls() + newData.getNumNulls());
aggregateData.setNumDVs(Math.max(aggregateData.getNumDVs(), newData.getNumDVs()));
}
}
if (isNDVBitVectorSet) {
// if all the ColumnStatisticsObjs contain bitvectors, we do not need to
// use uniform distribution assumption because we can merge bitvectors
// to get a good estimation.
aggregateData.setNumDVs(ndvEstimator.estimateNumDistinctValues());
} else {
if (useDensityFunctionForNDVEstimation) {
// We have estimation, lowerbound and higherbound. We use estimation
// if it is between lowerbound and higherbound.
double densityAvg = densityAvgSum / partNames.size();
long estimation = (long) ((aggregateData.getHighValue() - aggregateData.getLowValue()) / densityAvg);
if (estimation < lowerBound) {
aggregateData.setNumDVs(lowerBound);
} else if (estimation > higherBound) {
aggregateData.setNumDVs(higherBound);
} else {
aggregateData.setNumDVs(estimation);
}
} else {
// Without useDensityFunctionForNDVEstimation, we just use the
// default one, which is the max of all the partitions and it is
// already done.
}
}
columnStatisticsData.setLongStats(aggregateData);
} else {
// we need extrapolation
Map<String, Integer> indexMap = new HashMap<String, Integer>();
for (int index = 0; index < partNames.size(); index++) {
indexMap.put(partNames.get(index), index);
}
Map<String, Double> adjustedIndexMap = new HashMap<String, Double>();
Map<String, ColumnStatisticsData> adjustedStatsMap = new HashMap<String, ColumnStatisticsData>();
// while we scan the css, we also get the densityAvg, lowerbound and
// higerbound when useDensityFunctionForNDVEstimation is true.
double densityAvgSum = 0.0;
if (!isNDVBitVectorSet) {
// the traditional extrapolation methods.
for (ColumnStatistics cs : css) {
String partName = cs.getStatsDesc().getPartName();
ColumnStatisticsObj cso = cs.getStatsObjIterator().next();
LongColumnStatsData newData = cso.getStatsData().getLongStats();
if (useDensityFunctionForNDVEstimation) {
densityAvgSum += (newData.getHighValue() - newData.getLowValue()) / newData.getNumDVs();
}
adjustedIndexMap.put(partName, (double) indexMap.get(partName));
adjustedStatsMap.put(partName, cso.getStatsData());
}
} else {
// we first merge all the adjacent bitvectors that we could merge and
// derive new partition names and index.
NumDistinctValueEstimator ndvEstimator = new NumDistinctValueEstimator(numBitVectors);
StringBuilder pseudoPartName = new StringBuilder();
double pseudoIndexSum = 0;
int length = 0;
int curIndex = -1;
LongColumnStatsData aggregateData = null;
for (ColumnStatistics cs : css) {
String partName = cs.getStatsDesc().getPartName();
ColumnStatisticsObj cso = cs.getStatsObjIterator().next();
LongColumnStatsData newData = cso.getStatsData().getLongStats();
// already checked it before.
if (indexMap.get(partName) != curIndex) {
// There is bitvector, but it is not adjacent to the previous ones.
if (length > 0) {
// we have to set ndv
adjustedIndexMap.put(pseudoPartName.toString(), pseudoIndexSum / length);
aggregateData.setNumDVs(ndvEstimator.estimateNumDistinctValues());
ColumnStatisticsData csd = new ColumnStatisticsData();
csd.setLongStats(aggregateData);
adjustedStatsMap.put(pseudoPartName.toString(), csd);
if (useDensityFunctionForNDVEstimation) {
densityAvgSum += (aggregateData.getHighValue() - aggregateData.getLowValue()) / aggregateData.getNumDVs();
}
// reset everything
pseudoPartName = new StringBuilder();
pseudoIndexSum = 0;
length = 0;
}
aggregateData = null;
}
curIndex = indexMap.get(partName);
pseudoPartName.append(partName);
pseudoIndexSum += curIndex;
length++;
curIndex++;
if (aggregateData == null) {
aggregateData = newData.deepCopy();
} else {
aggregateData.setLowValue(Math.min(aggregateData.getLowValue(), newData.getLowValue()));
aggregateData.setHighValue(Math.max(aggregateData.getHighValue(), newData.getHighValue()));
aggregateData.setNumNulls(aggregateData.getNumNulls() + newData.getNumNulls());
}
ndvEstimator.mergeEstimators(new NumDistinctValueEstimator(newData.getBitVectors(), ndvEstimator.getnumBitVectors()));
}
if (length > 0) {
// we have to set ndv
adjustedIndexMap.put(pseudoPartName.toString(), pseudoIndexSum / length);
aggregateData.setNumDVs(ndvEstimator.estimateNumDistinctValues());
ColumnStatisticsData csd = new ColumnStatisticsData();
csd.setLongStats(aggregateData);
adjustedStatsMap.put(pseudoPartName.toString(), csd);
if (useDensityFunctionForNDVEstimation) {
densityAvgSum += (aggregateData.getHighValue() - aggregateData.getLowValue()) / aggregateData.getNumDVs();
}
}
}
extrapolate(columnStatisticsData, partNames.size(), css.size(), adjustedIndexMap, adjustedStatsMap, densityAvgSum / adjustedStatsMap.size());
}
statsObj.setStatsData(columnStatisticsData);
return statsObj;
}
use of org.apache.hadoop.hive.metastore.api.ColumnStatistics in project hive by apache.
the class HBaseReadWrite method buildColStats.
private ColumnStatistics buildColStats(byte[] key, boolean fromTable) throws IOException {
// We initialize this late so that we don't create extras in the case of
// partitions with no stats
ColumnStatistics colStats = new ColumnStatistics();
ColumnStatisticsDesc csd = new ColumnStatisticsDesc();
// If this is a table key, parse it as one
List<String> reconstructedKey;
if (fromTable) {
reconstructedKey = Arrays.asList(HBaseUtils.deserializeKey(key));
csd.setIsTblLevel(true);
} else {
reconstructedKey = HBaseUtils.deserializePartitionKey(key, this);
csd.setIsTblLevel(false);
}
csd.setDbName(reconstructedKey.get(0));
csd.setTableName(reconstructedKey.get(1));
if (!fromTable) {
// Build the part name, for which we need the table
Table table = getTable(reconstructedKey.get(0), reconstructedKey.get(1));
if (table == null) {
throw new RuntimeException("Unable to find table " + reconstructedKey.get(0) + "." + reconstructedKey.get(1) + " even though I have a partition for it!");
}
csd.setPartName(HBaseStore.buildExternalPartName(table, reconstructedKey.subList(2, reconstructedKey.size())));
}
colStats.setStatsDesc(csd);
return colStats;
}
use of org.apache.hadoop.hive.metastore.api.ColumnStatistics in project hive by apache.
the class HBaseStore method getPartitionColumnStatistics.
@Override
public List<ColumnStatistics> getPartitionColumnStatistics(String dbName, String tblName, List<String> partNames, List<String> colNames) throws MetaException, NoSuchObjectException {
List<List<String>> partVals = new ArrayList<List<String>>(partNames.size());
for (String partName : partNames) {
partVals.add(partNameToVals(partName));
}
boolean commit = false;
openTransaction();
try {
List<ColumnStatistics> cs = getHBase().getPartitionStatistics(dbName, tblName, partNames, partVals, colNames);
commit = true;
return cs;
} catch (IOException e) {
LOG.error("Unable to fetch column statistics", e);
throw new MetaException("Failed fetching column statistics, " + e.getMessage());
} finally {
commitOrRoleBack(commit);
}
}
use of org.apache.hadoop.hive.metastore.api.ColumnStatistics in project hive by apache.
the class ColumnStatsTask method persistColumnStats.
private int persistColumnStats(Hive db) throws HiveException, MetaException, IOException {
// Construct a column statistics object from the result
List<ColumnStatistics> colStats = constructColumnStatsFromPackedRows(db);
// Persist the column statistics object to the metastore
// Note, this function is shared for both table and partition column stats.
SetPartitionsStatsRequest request = new SetPartitionsStatsRequest(colStats);
if (work.getColStats() != null && work.getColStats().getNumBitVector() > 0) {
request.setNeedMerge(true);
}
db.setPartitionColumnStatistics(request);
return 0;
}
Aggregations