use of org.apache.hadoop.hive.ql.exec.vector.DoubleColumnVector in project hive by apache.
the class DoubleToStringUnaryUDF method evaluate.
@Override
public void evaluate(VectorizedRowBatch batch) {
if (childExpressions != null) {
super.evaluateChildren(batch);
}
DoubleColumnVector inputColVector = (DoubleColumnVector) batch.cols[inputColumn];
int[] sel = batch.selected;
int n = batch.size;
double[] vector = inputColVector.vector;
BytesColumnVector outputColVector = (BytesColumnVector) batch.cols[outputColumnNum];
boolean[] outputIsNull = outputColVector.isNull;
outputColVector.initBuffer();
boolean[] inputIsNull = inputColVector.isNull;
if (n == 0) {
// Nothing to do
return;
}
// We do not need to do a column reset since we are carefully changing the output.
outputColVector.isRepeating = false;
if (inputColVector.isRepeating) {
if (inputColVector.noNulls || !inputIsNull[0]) {
// Set isNull before call in case it changes it mind.
outputIsNull[0] = false;
func(outputColVector, vector, 0);
} else {
outputIsNull[0] = true;
outputColVector.noNulls = false;
}
outputColVector.isRepeating = true;
return;
}
if (inputColVector.noNulls) {
if (batch.selectedInUse) {
if (!outputColVector.noNulls) {
for (int j = 0; j != n; j++) {
final int i = sel[j];
// Set isNull before call in case it changes it mind.
outputIsNull[i] = false;
func(outputColVector, vector, i);
}
} else {
for (int j = 0; j != n; j++) {
final int i = sel[j];
func(outputColVector, vector, i);
}
}
} else {
if (!outputColVector.noNulls) {
// Assume it is almost always a performance win to fill all of isNull so we can
// safely reset noNulls.
Arrays.fill(outputIsNull, false);
outputColVector.noNulls = true;
}
for (int i = 0; i != n; i++) {
func(outputColVector, vector, i);
}
}
} else /* there are NULLs in the inputColVector */
{
// Handle case with nulls. Don't do function if the value is null,
// because the data may be undefined for a null value.
outputColVector.noNulls = false;
if (batch.selectedInUse) {
for (int j = 0; j != n; j++) {
int i = sel[j];
outputColVector.isNull[i] = inputColVector.isNull[i];
if (!inputColVector.isNull[i]) {
func(outputColVector, vector, i);
}
}
} else {
System.arraycopy(inputColVector.isNull, 0, outputColVector.isNull, 0, n);
for (int i = 0; i != n; i++) {
if (!inputColVector.isNull[i]) {
func(outputColVector, vector, i);
}
}
}
}
}
use of org.apache.hadoop.hive.ql.exec.vector.DoubleColumnVector in project hive by apache.
the class FilterDoubleColumnInList method evaluate.
@Override
public void evaluate(VectorizedRowBatch batch) {
if (childExpressions != null) {
super.evaluateChildren(batch);
}
DoubleColumnVector inputColVector = (DoubleColumnVector) batch.cols[inputCol];
int[] sel = batch.selected;
boolean[] nullPos = inputColVector.isNull;
int n = batch.size;
double[] vector = inputColVector.vector;
// return immediately if batch is empty
if (n == 0) {
return;
}
if (inputColVector.noNulls) {
if (inputColVector.isRepeating) {
if (!(inSet.lookup(vector[0]))) {
// Entire batch is filtered out.
batch.size = 0;
}
} else if (batch.selectedInUse) {
int newSize = 0;
for (int j = 0; j != n; j++) {
int i = sel[j];
if (inSet.lookup(vector[i])) {
sel[newSize++] = i;
}
}
batch.size = newSize;
} else {
int newSize = 0;
for (int i = 0; i != n; i++) {
if (inSet.lookup(vector[i])) {
sel[newSize++] = i;
}
}
if (newSize < n) {
batch.size = newSize;
batch.selectedInUse = true;
}
}
} else {
if (inputColVector.isRepeating) {
// Repeating property will not change.
if (!nullPos[0]) {
if (!inSet.lookup(vector[0])) {
// Entire batch is filtered out.
batch.size = 0;
}
} else {
batch.size = 0;
}
} else if (batch.selectedInUse) {
int newSize = 0;
for (int j = 0; j != n; j++) {
int i = sel[j];
if (!nullPos[i]) {
if (inSet.lookup(vector[i])) {
sel[newSize++] = i;
}
}
}
// Change the selected vector
batch.size = newSize;
} else {
int newSize = 0;
for (int i = 0; i != n; i++) {
if (!nullPos[i]) {
if (inSet.lookup(vector[i])) {
sel[newSize++] = i;
}
}
}
if (newSize < n) {
batch.size = newSize;
batch.selectedInUse = true;
}
}
}
}
use of org.apache.hadoop.hive.ql.exec.vector.DoubleColumnVector in project hive by apache.
the class FuncDecimalToDouble method evaluate.
@Override
public void evaluate(VectorizedRowBatch batch) {
if (childExpressions != null) {
super.evaluateChildren(batch);
}
DecimalColumnVector inputColVector = (DecimalColumnVector) batch.cols[inputColumn];
int[] sel = batch.selected;
int n = batch.size;
DoubleColumnVector outputColVector = (DoubleColumnVector) batch.cols[outputColumnNum];
boolean[] inputIsNull = inputColVector.isNull;
boolean[] outputIsNull = outputColVector.isNull;
if (n == 0) {
// Nothing to do
return;
}
// We do not need to do a column reset since we are carefully changing the output.
outputColVector.isRepeating = false;
if (inputColVector.isRepeating) {
if (inputColVector.noNulls || !inputIsNull[0]) {
// Set isNull before call in case it changes it mind.
outputIsNull[0] = false;
func(outputColVector, inputColVector, 0);
} else {
outputIsNull[0] = true;
outputColVector.noNulls = false;
}
outputColVector.isRepeating = true;
return;
}
if (inputColVector.noNulls) {
if (batch.selectedInUse) {
if (!outputColVector.noNulls) {
for (int j = 0; j != n; j++) {
final int i = sel[j];
// Set isNull before call in case it changes it mind.
outputIsNull[i] = false;
func(outputColVector, inputColVector, i);
}
} else {
for (int j = 0; j != n; j++) {
final int i = sel[j];
func(outputColVector, inputColVector, i);
}
}
} else {
if (!outputColVector.noNulls) {
// Assume it is almost always a performance win to fill all of isNull so we can
// safely reset noNulls.
Arrays.fill(outputIsNull, false);
outputColVector.noNulls = true;
}
for (int i = 0; i != n; i++) {
func(outputColVector, inputColVector, i);
}
}
} else /* there are nulls in the inputColVector */
{
// Carefully handle NULLs...
outputColVector.noNulls = false;
if (batch.selectedInUse) {
for (int j = 0; j != n; j++) {
int i = sel[j];
outputColVector.isNull[i] = inputColVector.isNull[i];
if (!inputColVector.isNull[i]) {
func(outputColVector, inputColVector, i);
}
}
} else {
System.arraycopy(inputColVector.isNull, 0, outputColVector.isNull, 0, n);
for (int i = 0; i != n; i++) {
if (!inputColVector.isNull[i]) {
func(outputColVector, inputColVector, i);
}
}
}
}
}
use of org.apache.hadoop.hive.ql.exec.vector.DoubleColumnVector in project hive by apache.
the class FuncDoubleToDecimal method evaluate.
@Override
public void evaluate(VectorizedRowBatch batch) {
if (childExpressions != null) {
super.evaluateChildren(batch);
}
DoubleColumnVector inputColVector = (DoubleColumnVector) batch.cols[inputColumn];
int[] sel = batch.selected;
int n = batch.size;
DecimalColumnVector outputColVector = (DecimalColumnVector) batch.cols[outputColumnNum];
boolean[] inputIsNull = inputColVector.isNull;
boolean[] outputIsNull = outputColVector.isNull;
if (n == 0) {
// Nothing to do
return;
}
// We do not need to do a column reset since we are carefully changing the output.
outputColVector.isRepeating = false;
if (inputColVector.isRepeating) {
if (inputColVector.noNulls || !inputIsNull[0]) {
// Set isNull before call in case it changes it mind.
outputIsNull[0] = false;
func(outputColVector, inputColVector, 0);
} else {
outputIsNull[0] = true;
outputColVector.noNulls = false;
}
outputColVector.isRepeating = true;
return;
}
if (inputColVector.noNulls) {
if (batch.selectedInUse) {
if (!outputColVector.noNulls) {
for (int j = 0; j != n; j++) {
final int i = sel[j];
// Set isNull before call in case it changes it mind.
outputIsNull[i] = false;
func(outputColVector, inputColVector, i);
}
} else {
for (int j = 0; j != n; j++) {
final int i = sel[j];
func(outputColVector, inputColVector, i);
}
}
} else {
if (!outputColVector.noNulls) {
// Assume it is almost always a performance win to fill all of isNull so we can
// safely reset noNulls.
Arrays.fill(outputIsNull, false);
outputColVector.noNulls = true;
}
for (int i = 0; i != n; i++) {
func(outputColVector, inputColVector, i);
}
}
} else /* there are nulls in the inputColVector */
{
// Carefully handle NULLs...
outputColVector.noNulls = false;
if (batch.selectedInUse) {
for (int j = 0; j != n; j++) {
int i = sel[j];
outputColVector.isNull[i] = inputColVector.isNull[i];
if (!inputColVector.isNull[i]) {
func(outputColVector, inputColVector, i);
}
}
} else {
System.arraycopy(inputColVector.isNull, 0, outputColVector.isNull, 0, n);
for (int i = 0; i != n; i++) {
if (!inputColVector.isNull[i]) {
func(outputColVector, inputColVector, i);
}
}
}
}
}
use of org.apache.hadoop.hive.ql.exec.vector.DoubleColumnVector in project hive by apache.
the class FuncRand method evaluate.
@Override
public void evaluate(VectorizedRowBatch batch) {
if (childExpressions != null) {
this.evaluateChildren(batch);
}
DoubleColumnVector outputColVector = (DoubleColumnVector) batch.cols[outputColumnNum];
int[] sel = batch.selected;
int n = batch.size;
double[] outputVector = outputColVector.vector;
outputColVector.isRepeating = false;
boolean[] outputIsNull = outputColVector.isNull;
// return immediately if batch is empty
if (n == 0) {
return;
}
if (batch.selectedInUse) {
if (!outputColVector.noNulls) {
for (int j = 0; j != n; j++) {
final int i = sel[j];
// Set isNull before call in case it changes it mind.
outputIsNull[i] = false;
outputVector[i] = random.nextDouble();
}
} else {
for (int j = 0; j != n; j++) {
final int i = sel[j];
outputVector[i] = random.nextDouble();
}
}
} else {
if (!outputColVector.noNulls) {
// Assume it is almost always a performance win to fill all of isNull so we can
// safely reset noNulls.
Arrays.fill(outputIsNull, false);
outputColVector.noNulls = true;
}
for (int i = 0; i != n; i++) {
outputVector[i] = random.nextDouble();
}
}
}
Aggregations