use of org.apache.hadoop.hive.ql.exec.vector.expressions.aggregates.VectorUDAFCount.Aggregation in project hive by apache.
the class VectorUDAFBloomFilter method iterateNoNullsRepeatingWithAggregationSelection.
private void iterateNoNullsRepeatingWithAggregationSelection(VectorAggregationBufferRow[] aggregationBufferSets, int aggregateIndex, ColumnVector inputColumn, int batchSize) {
for (int i = 0; i < batchSize; ++i) {
Aggregation myagg = getCurrentAggregationBuffer(aggregationBufferSets, aggregateIndex, i);
valueProcessor.processValue(myagg, inputColumn, 0);
}
}
use of org.apache.hadoop.hive.ql.exec.vector.expressions.aggregates.VectorUDAFCount.Aggregation in project hive by apache.
the class VectorUDAFBloomFilter method iterateHasNullsWithAggregationSelection.
private void iterateHasNullsWithAggregationSelection(VectorAggregationBufferRow[] aggregationBufferSets, int aggregateIndex, ColumnVector inputColumn, int batchSize) {
for (int i = 0; i < batchSize; ++i) {
if (!inputColumn.isNull[i]) {
Aggregation myagg = getCurrentAggregationBuffer(aggregationBufferSets, aggregateIndex, i);
valueProcessor.processValue(myagg, inputColumn, i);
}
}
}
use of org.apache.hadoop.hive.ql.exec.vector.expressions.aggregates.VectorUDAFCount.Aggregation in project hive by apache.
the class VectorUDAFBloomFilter method assignRowColumn.
@Override
public void assignRowColumn(VectorizedRowBatch batch, int batchIndex, int columnNum, AggregationBuffer agg) throws HiveException {
BytesColumnVector outputColVector = (BytesColumnVector) batch.cols[columnNum];
Aggregation myagg = (Aggregation) agg;
outputColVector.isNull[batchIndex] = false;
try {
Aggregation bfAgg = (Aggregation) agg;
byteStream.reset();
BloomKFilter.serialize(byteStream, bfAgg.bf);
byte[] bytes = byteStream.toByteArray();
outputColVector.setVal(batchIndex, bytes);
} catch (IOException err) {
throw new HiveException("Error encountered while serializing bloomfilter", err);
} finally {
IOUtils.closeStream(byteStream);
}
}
use of org.apache.hadoop.hive.ql.exec.vector.expressions.aggregates.VectorUDAFCount.Aggregation in project hive by apache.
the class VectorUDAFBloomFilter method iterateHasNullsSelectionWithAggregationSelection.
private void iterateHasNullsSelectionWithAggregationSelection(VectorAggregationBufferRow[] aggregationBufferSets, int aggregateIndex, ColumnVector inputColumn, int batchSize, int[] selection) {
for (int i = 0; i < batchSize; ++i) {
int row = selection[i];
if (!inputColumn.isNull[row]) {
Aggregation myagg = getCurrentAggregationBuffer(aggregationBufferSets, aggregateIndex, i);
valueProcessor.processValue(myagg, inputColumn, i);
}
}
}
use of org.apache.hadoop.hive.ql.exec.vector.expressions.aggregates.VectorUDAFCount.Aggregation in project hive by apache.
the class VectorUDAFBloomFilter method aggregateInput.
@Override
public void aggregateInput(AggregationBuffer agg, VectorizedRowBatch batch) throws HiveException {
inputExpression.evaluate(batch);
ColumnVector inputColumn = batch.cols[this.inputExpression.getOutputColumnNum()];
int batchSize = batch.size;
if (batchSize == 0) {
return;
}
Aggregation myagg = (Aggregation) agg;
if (inputColumn.isRepeating) {
if (inputColumn.noNulls || !inputColumn.isNull[0]) {
valueProcessor.processValue(myagg, inputColumn, 0);
}
return;
}
if (!batch.selectedInUse && inputColumn.noNulls) {
iterateNoSelectionNoNulls(myagg, inputColumn, batchSize);
} else if (!batch.selectedInUse) {
iterateNoSelectionHasNulls(myagg, inputColumn, batchSize);
} else if (inputColumn.noNulls) {
iterateSelectionNoNulls(myagg, inputColumn, batchSize, batch.selected);
} else {
iterateSelectionHasNulls(myagg, inputColumn, batchSize, batch.selected);
}
}
Aggregations