use of org.apache.hadoop.hive.metastore.api.ColumnStatisticsData._Fields in project hive by apache.
the class DateColumnStatsAggregator method aggregate.
@Override
public ColumnStatisticsObj aggregate(List<ColStatsObjWithSourceInfo> colStatsWithSourceInfo, List<String> partNames, boolean areAllPartsFound) throws MetaException {
ColumnStatisticsObj statsObj = null;
String colType = null;
String colName = null;
// check if all the ColumnStatisticsObjs contain stats and all the ndv are
// bitvectors
boolean doAllPartitionContainStats = partNames.size() == colStatsWithSourceInfo.size();
NumDistinctValueEstimator ndvEstimator = null;
for (ColStatsObjWithSourceInfo csp : colStatsWithSourceInfo) {
ColumnStatisticsObj cso = csp.getColStatsObj();
if (statsObj == null) {
colName = cso.getColName();
colType = cso.getColType();
statsObj = ColumnStatsAggregatorFactory.newColumnStaticsObj(colName, colType, cso.getStatsData().getSetField());
LOG.trace("doAllPartitionContainStats for column: {} is: {}", colName, doAllPartitionContainStats);
}
DateColumnStatsDataInspector dateColumnStats = (DateColumnStatsDataInspector) cso.getStatsData().getDateStats();
if (dateColumnStats.getNdvEstimator() == null) {
ndvEstimator = null;
break;
} else {
// check if all of the bit vectors can merge
NumDistinctValueEstimator estimator = dateColumnStats.getNdvEstimator();
if (ndvEstimator == null) {
ndvEstimator = estimator;
} else {
if (ndvEstimator.canMerge(estimator)) {
continue;
} else {
ndvEstimator = null;
break;
}
}
}
}
if (ndvEstimator != null) {
ndvEstimator = NumDistinctValueEstimatorFactory.getEmptyNumDistinctValueEstimator(ndvEstimator);
}
LOG.debug("all of the bit vectors can merge for " + colName + " is " + (ndvEstimator != null));
ColumnStatisticsData columnStatisticsData = new ColumnStatisticsData();
if (doAllPartitionContainStats || colStatsWithSourceInfo.size() < 2) {
DateColumnStatsDataInspector aggregateData = null;
long lowerBound = 0;
long higherBound = 0;
double densityAvgSum = 0.0;
for (ColStatsObjWithSourceInfo csp : colStatsWithSourceInfo) {
ColumnStatisticsObj cso = csp.getColStatsObj();
DateColumnStatsDataInspector newData = (DateColumnStatsDataInspector) cso.getStatsData().getDateStats();
lowerBound = Math.max(lowerBound, newData.getNumDVs());
higherBound += newData.getNumDVs();
densityAvgSum += (diff(newData.getHighValue(), newData.getLowValue())) / newData.getNumDVs();
if (ndvEstimator != null) {
ndvEstimator.mergeEstimators(newData.getNdvEstimator());
}
if (aggregateData == null) {
aggregateData = newData.deepCopy();
} else {
aggregateData.setLowValue(min(aggregateData.getLowValue(), newData.getLowValue()));
aggregateData.setHighValue(max(aggregateData.getHighValue(), newData.getHighValue()));
aggregateData.setNumNulls(aggregateData.getNumNulls() + newData.getNumNulls());
aggregateData.setNumDVs(Math.max(aggregateData.getNumDVs(), newData.getNumDVs()));
}
}
if (ndvEstimator != null) {
// if all the ColumnStatisticsObjs contain bitvectors, we do not need to
// use uniform distribution assumption because we can merge bitvectors
// to get a good estimation.
aggregateData.setNumDVs(ndvEstimator.estimateNumDistinctValues());
} else {
long estimation;
if (useDensityFunctionForNDVEstimation) {
// We have estimation, lowerbound and higherbound. We use estimation
// if it is between lowerbound and higherbound.
double densityAvg = densityAvgSum / partNames.size();
estimation = (long) (diff(aggregateData.getHighValue(), aggregateData.getLowValue()) / densityAvg);
if (estimation < lowerBound) {
estimation = lowerBound;
} else if (estimation > higherBound) {
estimation = higherBound;
}
} else {
estimation = (long) (lowerBound + (higherBound - lowerBound) * ndvTuner);
}
aggregateData.setNumDVs(estimation);
}
columnStatisticsData.setDateStats(aggregateData);
} else {
// we need extrapolation
LOG.debug("start extrapolation for " + colName);
Map<String, Integer> indexMap = new HashMap<>();
for (int index = 0; index < partNames.size(); index++) {
indexMap.put(partNames.get(index), index);
}
Map<String, Double> adjustedIndexMap = new HashMap<>();
Map<String, ColumnStatisticsData> adjustedStatsMap = new HashMap<>();
// while we scan the css, we also get the densityAvg, lowerbound and
// higerbound when useDensityFunctionForNDVEstimation is true.
double densityAvgSum = 0.0;
if (ndvEstimator == null) {
// the traditional extrapolation methods.
for (ColStatsObjWithSourceInfo csp : colStatsWithSourceInfo) {
ColumnStatisticsObj cso = csp.getColStatsObj();
String partName = csp.getPartName();
DateColumnStatsData newData = cso.getStatsData().getDateStats();
if (useDensityFunctionForNDVEstimation) {
densityAvgSum += diff(newData.getHighValue(), newData.getLowValue()) / newData.getNumDVs();
}
adjustedIndexMap.put(partName, (double) indexMap.get(partName));
adjustedStatsMap.put(partName, cso.getStatsData());
}
} else {
// we first merge all the adjacent bitvectors that we could merge and
// derive new partition names and index.
StringBuilder pseudoPartName = new StringBuilder();
double pseudoIndexSum = 0;
int length = 0;
int curIndex = -1;
DateColumnStatsDataInspector aggregateData = null;
for (ColStatsObjWithSourceInfo csp : colStatsWithSourceInfo) {
ColumnStatisticsObj cso = csp.getColStatsObj();
String partName = csp.getPartName();
DateColumnStatsDataInspector newData = (DateColumnStatsDataInspector) cso.getStatsData().getDateStats();
// already checked it before.
if (indexMap.get(partName) != curIndex) {
// There is bitvector, but it is not adjacent to the previous ones.
if (length > 0) {
// we have to set ndv
adjustedIndexMap.put(pseudoPartName.toString(), pseudoIndexSum / length);
aggregateData.setNumDVs(ndvEstimator.estimateNumDistinctValues());
ColumnStatisticsData csd = new ColumnStatisticsData();
csd.setDateStats(aggregateData);
adjustedStatsMap.put(pseudoPartName.toString(), csd);
if (useDensityFunctionForNDVEstimation) {
densityAvgSum += diff(aggregateData.getHighValue(), aggregateData.getLowValue()) / aggregateData.getNumDVs();
}
// reset everything
pseudoPartName = new StringBuilder();
pseudoIndexSum = 0;
length = 0;
ndvEstimator = NumDistinctValueEstimatorFactory.getEmptyNumDistinctValueEstimator(ndvEstimator);
}
aggregateData = null;
}
curIndex = indexMap.get(partName);
pseudoPartName.append(partName);
pseudoIndexSum += curIndex;
length++;
curIndex++;
if (aggregateData == null) {
aggregateData = newData.deepCopy();
} else {
aggregateData.setLowValue(min(aggregateData.getLowValue(), newData.getLowValue()));
aggregateData.setHighValue(max(aggregateData.getHighValue(), newData.getHighValue()));
aggregateData.setNumNulls(aggregateData.getNumNulls() + newData.getNumNulls());
}
ndvEstimator.mergeEstimators(newData.getNdvEstimator());
}
if (length > 0) {
// we have to set ndv
adjustedIndexMap.put(pseudoPartName.toString(), pseudoIndexSum / length);
aggregateData.setNumDVs(ndvEstimator.estimateNumDistinctValues());
ColumnStatisticsData csd = new ColumnStatisticsData();
csd.setDateStats(aggregateData);
adjustedStatsMap.put(pseudoPartName.toString(), csd);
if (useDensityFunctionForNDVEstimation) {
densityAvgSum += diff(aggregateData.getHighValue(), aggregateData.getLowValue()) / aggregateData.getNumDVs();
}
}
}
extrapolate(columnStatisticsData, partNames.size(), colStatsWithSourceInfo.size(), adjustedIndexMap, adjustedStatsMap, densityAvgSum / adjustedStatsMap.size());
}
LOG.debug("Ndv estimatation for {} is {} # of partitions requested: {} # of partitions found: {}", colName, columnStatisticsData.getDateStats().getNumDVs(), partNames.size(), colStatsWithSourceInfo.size());
statsObj.setStatsData(columnStatisticsData);
return statsObj;
}
use of org.apache.hadoop.hive.metastore.api.ColumnStatisticsData._Fields in project hive by apache.
the class DecimalColumnStatsAggregator method extrapolate.
@Override
public void extrapolate(ColumnStatisticsData extrapolateData, int numParts, int numPartsWithStats, Map<String, Double> adjustedIndexMap, Map<String, ColumnStatisticsData> adjustedStatsMap, double densityAvg) {
int rightBorderInd = numParts;
DecimalColumnStatsDataInspector extrapolateDecimalData = new DecimalColumnStatsDataInspector();
Map<String, DecimalColumnStatsData> extractedAdjustedStatsMap = new HashMap<>();
for (Map.Entry<String, ColumnStatisticsData> entry : adjustedStatsMap.entrySet()) {
extractedAdjustedStatsMap.put(entry.getKey(), entry.getValue().getDecimalStats());
}
List<Map.Entry<String, DecimalColumnStatsData>> list = new LinkedList<>(extractedAdjustedStatsMap.entrySet());
// get the lowValue
Collections.sort(list, new Comparator<Map.Entry<String, DecimalColumnStatsData>>() {
@Override
public int compare(Map.Entry<String, DecimalColumnStatsData> o1, Map.Entry<String, DecimalColumnStatsData> o2) {
return o1.getValue().getLowValue().compareTo(o2.getValue().getLowValue());
}
});
double minInd = adjustedIndexMap.get(list.get(0).getKey());
double maxInd = adjustedIndexMap.get(list.get(list.size() - 1).getKey());
double lowValue = 0;
double min = MetaStoreUtils.decimalToDouble(list.get(0).getValue().getLowValue());
double max = MetaStoreUtils.decimalToDouble(list.get(list.size() - 1).getValue().getLowValue());
if (minInd == maxInd) {
lowValue = min;
} else if (minInd < maxInd) {
// left border is the min
lowValue = (max - (max - min) * maxInd / (maxInd - minInd));
} else {
// right border is the min
lowValue = (max - (max - min) * (rightBorderInd - maxInd) / (minInd - maxInd));
}
// get the highValue
Collections.sort(list, new Comparator<Map.Entry<String, DecimalColumnStatsData>>() {
@Override
public int compare(Map.Entry<String, DecimalColumnStatsData> o1, Map.Entry<String, DecimalColumnStatsData> o2) {
return o1.getValue().getHighValue().compareTo(o2.getValue().getHighValue());
}
});
minInd = adjustedIndexMap.get(list.get(0).getKey());
maxInd = adjustedIndexMap.get(list.get(list.size() - 1).getKey());
double highValue = 0;
min = MetaStoreUtils.decimalToDouble(list.get(0).getValue().getHighValue());
max = MetaStoreUtils.decimalToDouble(list.get(list.size() - 1).getValue().getHighValue());
if (minInd == maxInd) {
highValue = min;
} else if (minInd < maxInd) {
// right border is the max
highValue = (min + (max - min) * (rightBorderInd - minInd) / (maxInd - minInd));
} else {
// left border is the max
highValue = (min + (max - min) * minInd / (minInd - maxInd));
}
// get the #nulls
long numNulls = 0;
for (Map.Entry<String, DecimalColumnStatsData> entry : extractedAdjustedStatsMap.entrySet()) {
numNulls += entry.getValue().getNumNulls();
}
// we scale up sumNulls based on the number of partitions
numNulls = numNulls * numParts / numPartsWithStats;
// get the ndv
long ndv = 0;
long ndvMin = 0;
long ndvMax = 0;
Collections.sort(list, new Comparator<Map.Entry<String, DecimalColumnStatsData>>() {
@Override
public int compare(Map.Entry<String, DecimalColumnStatsData> o1, Map.Entry<String, DecimalColumnStatsData> o2) {
return Long.compare(o1.getValue().getNumDVs(), o2.getValue().getNumDVs());
}
});
long lowerBound = list.get(list.size() - 1).getValue().getNumDVs();
long higherBound = 0;
for (Map.Entry<String, DecimalColumnStatsData> entry : list) {
higherBound += entry.getValue().getNumDVs();
}
if (useDensityFunctionForNDVEstimation && densityAvg != 0.0) {
ndv = (long) ((highValue - lowValue) / densityAvg);
if (ndv < lowerBound) {
ndv = lowerBound;
} else if (ndv > higherBound) {
ndv = higherBound;
}
} else {
minInd = adjustedIndexMap.get(list.get(0).getKey());
maxInd = adjustedIndexMap.get(list.get(list.size() - 1).getKey());
ndvMin = list.get(0).getValue().getNumDVs();
ndvMax = list.get(list.size() - 1).getValue().getNumDVs();
if (minInd == maxInd) {
ndv = ndvMin;
} else if (minInd < maxInd) {
// right border is the max
ndv = (long) (ndvMin + (ndvMax - ndvMin) * (rightBorderInd - minInd) / (maxInd - minInd));
} else {
// left border is the max
ndv = (long) (ndvMin + (ndvMax - ndvMin) * minInd / (minInd - maxInd));
}
}
extrapolateDecimalData.setLowValue(StatObjectConverter.createThriftDecimal(String.valueOf(lowValue)));
extrapolateDecimalData.setHighValue(StatObjectConverter.createThriftDecimal(String.valueOf(highValue)));
extrapolateDecimalData.setNumNulls(numNulls);
extrapolateDecimalData.setNumDVs(ndv);
extrapolateData.setDecimalStats(extrapolateDecimalData);
}
use of org.apache.hadoop.hive.metastore.api.ColumnStatisticsData._Fields in project hive by apache.
the class StringColumnStatsAggregator method extrapolate.
@Override
public void extrapolate(ColumnStatisticsData extrapolateData, int numParts, int numPartsWithStats, Map<String, Double> adjustedIndexMap, Map<String, ColumnStatisticsData> adjustedStatsMap, double densityAvg) {
int rightBorderInd = numParts;
StringColumnStatsDataInspector extrapolateStringData = new StringColumnStatsDataInspector();
Map<String, StringColumnStatsData> extractedAdjustedStatsMap = new HashMap<>();
for (Map.Entry<String, ColumnStatisticsData> entry : adjustedStatsMap.entrySet()) {
extractedAdjustedStatsMap.put(entry.getKey(), entry.getValue().getStringStats());
}
List<Map.Entry<String, StringColumnStatsData>> list = new LinkedList<>(extractedAdjustedStatsMap.entrySet());
// get the avgLen
Collections.sort(list, new Comparator<Map.Entry<String, StringColumnStatsData>>() {
@Override
public int compare(Map.Entry<String, StringColumnStatsData> o1, Map.Entry<String, StringColumnStatsData> o2) {
return Double.compare(o1.getValue().getAvgColLen(), o2.getValue().getAvgColLen());
}
});
double minInd = adjustedIndexMap.get(list.get(0).getKey());
double maxInd = adjustedIndexMap.get(list.get(list.size() - 1).getKey());
double avgColLen = 0;
double min = list.get(0).getValue().getAvgColLen();
double max = list.get(list.size() - 1).getValue().getAvgColLen();
if (minInd == maxInd) {
avgColLen = min;
} else if (minInd < maxInd) {
// right border is the max
avgColLen = (min + (max - min) * (rightBorderInd - minInd) / (maxInd - minInd));
} else {
// left border is the max
avgColLen = (min + (max - min) * minInd / (minInd - maxInd));
}
// get the maxLen
Collections.sort(list, new Comparator<Map.Entry<String, StringColumnStatsData>>() {
@Override
public int compare(Map.Entry<String, StringColumnStatsData> o1, Map.Entry<String, StringColumnStatsData> o2) {
return Long.compare(o1.getValue().getMaxColLen(), o2.getValue().getMaxColLen());
}
});
minInd = adjustedIndexMap.get(list.get(0).getKey());
maxInd = adjustedIndexMap.get(list.get(list.size() - 1).getKey());
double maxColLen = 0;
min = list.get(0).getValue().getAvgColLen();
max = list.get(list.size() - 1).getValue().getAvgColLen();
if (minInd == maxInd) {
maxColLen = min;
} else if (minInd < maxInd) {
// right border is the max
maxColLen = (min + (max - min) * (rightBorderInd - minInd) / (maxInd - minInd));
} else {
// left border is the max
maxColLen = (min + (max - min) * minInd / (minInd - maxInd));
}
// get the #nulls
long numNulls = 0;
for (Map.Entry<String, StringColumnStatsData> entry : extractedAdjustedStatsMap.entrySet()) {
numNulls += entry.getValue().getNumNulls();
}
// we scale up sumNulls based on the number of partitions
numNulls = numNulls * numParts / numPartsWithStats;
// get the ndv
long ndv = 0;
Collections.sort(list, new Comparator<Map.Entry<String, StringColumnStatsData>>() {
@Override
public int compare(Map.Entry<String, StringColumnStatsData> o1, Map.Entry<String, StringColumnStatsData> o2) {
return Long.compare(o1.getValue().getNumDVs(), o2.getValue().getNumDVs());
}
});
minInd = adjustedIndexMap.get(list.get(0).getKey());
maxInd = adjustedIndexMap.get(list.get(list.size() - 1).getKey());
min = list.get(0).getValue().getNumDVs();
max = list.get(list.size() - 1).getValue().getNumDVs();
if (minInd == maxInd) {
ndv = (long) min;
} else if (minInd < maxInd) {
// right border is the max
ndv = (long) (min + (max - min) * (rightBorderInd - minInd) / (maxInd - minInd));
} else {
// left border is the max
ndv = (long) (min + (max - min) * minInd / (minInd - maxInd));
}
extrapolateStringData.setAvgColLen(avgColLen);
extrapolateStringData.setMaxColLen((long) maxColLen);
extrapolateStringData.setNumNulls(numNulls);
extrapolateStringData.setNumDVs(ndv);
extrapolateData.setStringStats(extrapolateStringData);
}
use of org.apache.hadoop.hive.metastore.api.ColumnStatisticsData._Fields in project hive by apache.
the class ColumnStatsMergerFactory method getColumnStatsMerger.
public static ColumnStatsMerger getColumnStatsMerger(ColumnStatisticsObj statsObjNew, ColumnStatisticsObj statsObjOld) {
ColumnStatsMerger agg;
_Fields typeNew = statsObjNew.getStatsData().getSetField();
_Fields typeOld = statsObjOld.getStatsData().getSetField();
// make sure that they have the same type
typeNew = typeNew == typeOld ? typeNew : null;
switch(typeNew) {
case BOOLEAN_STATS:
agg = new BooleanColumnStatsMerger();
break;
case LONG_STATS:
{
agg = new LongColumnStatsMerger();
break;
}
case DOUBLE_STATS:
{
agg = new DoubleColumnStatsMerger();
break;
}
case STRING_STATS:
{
agg = new StringColumnStatsMerger();
break;
}
case BINARY_STATS:
agg = new BinaryColumnStatsMerger();
break;
case DECIMAL_STATS:
{
agg = new DecimalColumnStatsMerger();
break;
}
case DATE_STATS:
{
agg = new DateColumnStatsMerger();
break;
}
default:
throw new IllegalArgumentException("Unknown stats type " + statsObjNew.getStatsData().getSetField());
}
return agg;
}
use of org.apache.hadoop.hive.metastore.api.ColumnStatisticsData._Fields in project hive by apache.
the class ColumnStatsMergerFactory method newColumnStaticsObj.
public static ColumnStatisticsObj newColumnStaticsObj(String colName, String colType, _Fields type) {
ColumnStatisticsObj cso = new ColumnStatisticsObj();
ColumnStatisticsData csd = new ColumnStatisticsData();
cso.setColName(colName);
cso.setColType(colType);
switch(type) {
case BOOLEAN_STATS:
csd.setBooleanStats(new BooleanColumnStatsData());
break;
case LONG_STATS:
csd.setLongStats(new LongColumnStatsDataInspector());
break;
case DOUBLE_STATS:
csd.setDoubleStats(new DoubleColumnStatsDataInspector());
break;
case STRING_STATS:
csd.setStringStats(new StringColumnStatsDataInspector());
break;
case BINARY_STATS:
csd.setBinaryStats(new BinaryColumnStatsData());
break;
case DECIMAL_STATS:
csd.setDecimalStats(new DecimalColumnStatsDataInspector());
break;
case DATE_STATS:
csd.setDateStats(new DateColumnStatsDataInspector());
break;
default:
throw new IllegalArgumentException("Unknown stats type");
}
cso.setStatsData(csd);
return cso;
}
Aggregations