use of org.apache.hadoop.hive.ql.exec.vector.LongColumnVector in project hive by apache.
the class IfExprIntervalDayTimeColumnScalar method evaluate.
@Override
public void evaluate(VectorizedRowBatch batch) {
if (childExpressions != null) {
super.evaluateChildren(batch);
}
LongColumnVector arg1ColVector = (LongColumnVector) batch.cols[arg1Column];
IntervalDayTimeColumnVector arg2ColVector = (IntervalDayTimeColumnVector) batch.cols[arg2Column];
IntervalDayTimeColumnVector outputColVector = (IntervalDayTimeColumnVector) batch.cols[outputColumnNum];
int[] sel = batch.selected;
boolean[] outputIsNull = outputColVector.isNull;
// We do not need to do a column reset since we are carefully changing the output.
outputColVector.isRepeating = false;
int n = batch.size;
long[] vector1 = arg1ColVector.vector;
// return immediately if batch is empty
if (n == 0) {
return;
}
if (arg1ColVector.isRepeating) {
if ((arg1ColVector.noNulls || !arg1ColVector.isNull[0]) && vector1[0] == 1) {
arg2ColVector.copySelected(batch.selectedInUse, sel, n, outputColVector);
} else {
outputColVector.fill(arg3Scalar);
}
return;
}
// Extend any repeating values and noNulls indicator in the inputs to
// reduce the number of code paths needed below.
arg2ColVector.flatten(batch.selectedInUse, sel, n);
if (arg1ColVector.noNulls) {
if (batch.selectedInUse) {
for (int j = 0; j != n; j++) {
int i = sel[j];
outputIsNull[i] = false;
outputColVector.set(i, vector1[i] == 1 ? arg2ColVector.asScratchIntervalDayTime(i) : arg3Scalar);
}
} else {
Arrays.fill(outputIsNull, 0, n, false);
for (int i = 0; i != n; i++) {
outputColVector.set(i, vector1[i] == 1 ? arg2ColVector.asScratchIntervalDayTime(i) : arg3Scalar);
}
}
} else /* there are nulls */
{
// Carefully handle NULLs...
/*
* For better performance on LONG/DOUBLE we don't want the conditional
* statements inside the for loop.
*/
outputColVector.noNulls = false;
if (batch.selectedInUse) {
for (int j = 0; j != n; j++) {
int i = sel[j];
outputColVector.set(i, !arg1ColVector.isNull[i] && vector1[i] == 1 ? arg2ColVector.asScratchIntervalDayTime(i) : arg3Scalar);
outputIsNull[i] = (!arg1ColVector.isNull[i] && vector1[i] == 1 ? arg2ColVector.isNull[i] : false);
}
} else {
for (int i = 0; i != n; i++) {
outputColVector.set(i, !arg1ColVector.isNull[i] && vector1[i] == 1 ? arg2ColVector.asScratchIntervalDayTime(i) : arg3Scalar);
outputIsNull[i] = (!arg1ColVector.isNull[i] && vector1[i] == 1 ? arg2ColVector.isNull[i] : false);
}
}
}
// restore repeating and no nulls indicators
arg2ColVector.unFlatten();
}
use of org.apache.hadoop.hive.ql.exec.vector.LongColumnVector in project hive by apache.
the class IfExprIntervalDayTimeScalarScalar method evaluate.
@Override
public void evaluate(VectorizedRowBatch batch) {
if (childExpressions != null) {
super.evaluateChildren(batch);
}
LongColumnVector arg1ColVector = (LongColumnVector) batch.cols[arg1Column];
IntervalDayTimeColumnVector outputColVector = (IntervalDayTimeColumnVector) batch.cols[outputColumnNum];
int[] sel = batch.selected;
boolean[] outputIsNull = outputColVector.isNull;
// We do not need to do a column reset since we are carefully changing the output.
outputColVector.isRepeating = false;
int n = batch.size;
long[] vector1 = arg1ColVector.vector;
// return immediately if batch is empty
if (n == 0) {
return;
}
if (arg1ColVector.isRepeating) {
if ((arg1ColVector.noNulls || !arg1ColVector.isNull[0]) && vector1[0] == 1) {
outputColVector.fill(arg2Scalar);
} else {
outputColVector.fill(arg3Scalar);
}
return;
}
if (arg1ColVector.noNulls) {
if (batch.selectedInUse) {
if (!outputColVector.noNulls) {
for (int j = 0; j != n; j++) {
final int i = sel[j];
// Set isNull before call in case it changes it mind.
outputIsNull[i] = false;
outputColVector.set(i, vector1[i] == 1 ? arg2Scalar : arg3Scalar);
}
} else {
for (int j = 0; j != n; j++) {
final int i = sel[j];
outputColVector.set(i, vector1[i] == 1 ? arg2Scalar : arg3Scalar);
}
}
} else {
if (!outputColVector.noNulls) {
// Assume it is almost always a performance win to fill all of isNull so we can
// safely reset noNulls.
Arrays.fill(outputIsNull, false);
outputColVector.noNulls = true;
}
for (int i = 0; i != n; i++) {
outputColVector.set(i, vector1[i] == 1 ? arg2Scalar : arg3Scalar);
}
}
} else /* there are nulls */
{
// Carefully handle NULLs...
/*
* For better performance on LONG/DOUBLE we don't want the conditional
* statements inside the for loop.
*/
outputColVector.noNulls = false;
if (batch.selectedInUse) {
for (int j = 0; j != n; j++) {
int i = sel[j];
outputColVector.set(i, !arg1ColVector.isNull[i] && vector1[i] == 1 ? arg2Scalar : arg3Scalar);
outputIsNull[i] = false;
}
} else {
for (int i = 0; i != n; i++) {
outputColVector.set(i, !arg1ColVector.isNull[i] && vector1[i] == 1 ? arg2Scalar : arg3Scalar);
}
Arrays.fill(outputIsNull, 0, n, false);
}
}
}
use of org.apache.hadoop.hive.ql.exec.vector.LongColumnVector in project hive by apache.
the class IfExprLongColumnLongColumn method evaluate.
@Override
public void evaluate(VectorizedRowBatch batch) {
if (childExpressions != null) {
super.evaluateChildren(batch);
}
LongColumnVector arg1ColVector = (LongColumnVector) batch.cols[arg1Column];
LongColumnVector arg2ColVector = (LongColumnVector) batch.cols[arg2Column];
LongColumnVector arg3ColVector = (LongColumnVector) batch.cols[arg3Column];
LongColumnVector outputColVector = (LongColumnVector) batch.cols[outputColumnNum];
int[] sel = batch.selected;
boolean[] outputIsNull = outputColVector.isNull;
// We do not need to do a column reset since we are carefully changing the output.
outputColVector.isRepeating = false;
int n = batch.size;
long[] vector1 = arg1ColVector.vector;
long[] vector2 = arg2ColVector.vector;
long[] vector3 = arg3ColVector.vector;
long[] outputVector = outputColVector.vector;
// return immediately if batch is empty
if (n == 0) {
return;
}
/* All the code paths below propagate nulls even if neither arg2 nor arg3
* have nulls. This is to reduce the number of code paths and shorten the
* code, at the expense of maybe doing unnecessary work if neither input
* has nulls. This could be improved in the future by expanding the number
* of code paths.
*/
if (arg1ColVector.isRepeating) {
if (vector1[0] == 1) {
arg2ColVector.copySelected(batch.selectedInUse, sel, n, outputColVector);
} else {
arg3ColVector.copySelected(batch.selectedInUse, sel, n, outputColVector);
}
return;
}
// extend any repeating values and noNulls indicator in the inputs
arg2ColVector.flatten(batch.selectedInUse, sel, n);
arg3ColVector.flatten(batch.selectedInUse, sel, n);
// Carefully handle NULLs...
outputColVector.noNulls = false;
if (arg1ColVector.noNulls) {
if (batch.selectedInUse) {
for (int j = 0; j != n; j++) {
int i = sel[j];
outputVector[i] = (~(vector1[i] - 1) & vector2[i]) | ((vector1[i] - 1) & vector3[i]);
outputIsNull[i] = (vector1[i] == 1 ? arg2ColVector.isNull[i] : arg3ColVector.isNull[i]);
}
} else {
for (int i = 0; i != n; i++) {
outputVector[i] = (~(vector1[i] - 1) & vector2[i]) | ((vector1[i] - 1) & vector3[i]);
outputIsNull[i] = (vector1[i] == 1 ? arg2ColVector.isNull[i] : arg3ColVector.isNull[i]);
}
}
} else /* there are nulls */
{
if (batch.selectedInUse) {
for (int j = 0; j != n; j++) {
int i = sel[j];
outputVector[i] = (!arg1ColVector.isNull[i] && vector1[i] == 1 ? vector2[i] : vector3[i]);
outputIsNull[i] = (!arg1ColVector.isNull[i] && vector1[i] == 1 ? arg2ColVector.isNull[i] : arg3ColVector.isNull[i]);
}
} else {
for (int i = 0; i != n; i++) {
outputVector[i] = (!arg1ColVector.isNull[i] && vector1[i] == 1 ? vector2[i] : vector3[i]);
outputIsNull[i] = (!arg1ColVector.isNull[i] && vector1[i] == 1 ? arg2ColVector.isNull[i] : arg3ColVector.isNull[i]);
}
}
}
// restore repeating and no nulls indicators
arg2ColVector.unFlatten();
arg3ColVector.unFlatten();
}
use of org.apache.hadoop.hive.ql.exec.vector.LongColumnVector in project hive by apache.
the class IfExprStringGroupColumnStringGroupColumn method evaluate.
@Override
public void evaluate(VectorizedRowBatch batch) {
if (childExpressions != null) {
super.evaluateChildren(batch);
}
LongColumnVector arg1ColVector = (LongColumnVector) batch.cols[arg1Column];
BytesColumnVector arg2ColVector = (BytesColumnVector) batch.cols[arg2Column];
BytesColumnVector arg3ColVector = (BytesColumnVector) batch.cols[arg3Column];
BytesColumnVector outputColVector = (BytesColumnVector) batch.cols[outputColumnNum];
int[] sel = batch.selected;
boolean[] outputIsNull = outputColVector.isNull;
// We do not need to do a column reset since we are carefully changing the output.
outputColVector.isRepeating = false;
int n = batch.size;
long[] vector1 = arg1ColVector.vector;
// return immediately if batch is empty
if (n == 0) {
return;
}
outputColVector.initBuffer();
/* All the code paths below propagate nulls even if neither arg2 nor arg3
* have nulls. This is to reduce the number of code paths and shorten the
* code, at the expense of maybe doing unnecessary work if neither input
* has nulls. This could be improved in the future by expanding the number
* of code paths.
*/
if (arg1ColVector.isRepeating) {
if ((arg1ColVector.noNulls || !arg1ColVector.isNull[0]) && vector1[0] == 1) {
arg2ColVector.copySelected(batch.selectedInUse, sel, n, outputColVector);
} else {
arg3ColVector.copySelected(batch.selectedInUse, sel, n, outputColVector);
}
return;
}
// extend any repeating values and noNulls indicator in the inputs
arg2ColVector.flatten(batch.selectedInUse, sel, n);
arg3ColVector.flatten(batch.selectedInUse, sel, n);
/*
* Do careful maintenance of NULLs.
*/
outputColVector.noNulls = false;
if (arg1ColVector.noNulls) {
if (batch.selectedInUse) {
for (int j = 0; j != n; j++) {
int i = sel[j];
if (vector1[i] == 1) {
if (!arg2ColVector.isNull[i]) {
outputColVector.setVal(i, arg2ColVector.vector[i], arg2ColVector.start[i], arg2ColVector.length[i]);
}
} else {
if (!arg3ColVector.isNull[i]) {
outputColVector.setVal(i, arg3ColVector.vector[i], arg3ColVector.start[i], arg3ColVector.length[i]);
}
}
outputIsNull[i] = (vector1[i] == 1 ? arg2ColVector.isNull[i] : arg3ColVector.isNull[i]);
}
} else {
for (int i = 0; i != n; i++) {
if (vector1[i] == 1) {
if (!arg2ColVector.isNull[i]) {
outputColVector.setVal(i, arg2ColVector.vector[i], arg2ColVector.start[i], arg2ColVector.length[i]);
}
} else {
if (!arg3ColVector.isNull[i]) {
outputColVector.setVal(i, arg3ColVector.vector[i], arg3ColVector.start[i], arg3ColVector.length[i]);
}
}
outputIsNull[i] = (vector1[i] == 1 ? arg2ColVector.isNull[i] : arg3ColVector.isNull[i]);
}
}
} else /* there are nulls */
{
if (batch.selectedInUse) {
for (int j = 0; j != n; j++) {
int i = sel[j];
if (!arg1ColVector.isNull[i] && vector1[i] == 1) {
if (!arg2ColVector.isNull[i]) {
outputColVector.setVal(i, arg2ColVector.vector[i], arg2ColVector.start[i], arg2ColVector.length[i]);
}
} else {
if (!arg3ColVector.isNull[i]) {
outputColVector.setVal(i, arg3ColVector.vector[i], arg3ColVector.start[i], arg3ColVector.length[i]);
}
}
outputIsNull[i] = (!arg1ColVector.isNull[i] && vector1[i] == 1 ? arg2ColVector.isNull[i] : arg3ColVector.isNull[i]);
}
} else {
for (int i = 0; i != n; i++) {
if (!arg1ColVector.isNull[i] && vector1[i] == 1) {
if (!arg2ColVector.isNull[i]) {
outputColVector.setVal(i, arg2ColVector.vector[i], arg2ColVector.start[i], arg2ColVector.length[i]);
}
} else {
if (!arg3ColVector.isNull[i]) {
outputColVector.setVal(i, arg3ColVector.vector[i], arg3ColVector.start[i], arg3ColVector.length[i]);
}
}
outputIsNull[i] = (!arg1ColVector.isNull[i] && vector1[i] == 1 ? arg2ColVector.isNull[i] : arg3ColVector.isNull[i]);
}
}
}
arg2ColVector.unFlatten();
arg3ColVector.unFlatten();
}
use of org.apache.hadoop.hive.ql.exec.vector.LongColumnVector in project hive by apache.
the class IfExprStringGroupColumnStringScalar method evaluate.
@Override
public void evaluate(VectorizedRowBatch batch) {
if (childExpressions != null) {
super.evaluateChildren(batch);
}
LongColumnVector arg1ColVector = (LongColumnVector) batch.cols[arg1Column];
BytesColumnVector arg2ColVector = (BytesColumnVector) batch.cols[arg2Column];
BytesColumnVector outputColVector = (BytesColumnVector) batch.cols[outputColumnNum];
int[] sel = batch.selected;
boolean[] outputIsNull = outputColVector.isNull;
// We do not need to do a column reset since we are carefully changing the output.
outputColVector.isRepeating = false;
int n = batch.size;
long[] vector1 = arg1ColVector.vector;
// return immediately if batch is empty
if (n == 0) {
return;
}
outputColVector.initBuffer();
/* All the code paths below propagate nulls even if arg2 has no nulls.
* This is to reduce the number of code paths and shorten the
* code, at the expense of maybe doing unnecessary work if neither input
* has nulls. This could be improved in the future by expanding the number
* of code paths.
*/
if (arg1ColVector.isRepeating) {
if ((arg1ColVector.noNulls || !arg1ColVector.isNull[0]) && vector1[0] == 1) {
arg2ColVector.copySelected(batch.selectedInUse, sel, n, outputColVector);
} else {
outputColVector.fill(arg3Scalar);
}
return;
}
// extend any repeating values and noNulls indicator in the inputs
arg2ColVector.flatten(batch.selectedInUse, sel, n);
/*
* Do careful maintenance of NULLs.
*/
outputColVector.noNulls = false;
if (arg1ColVector.noNulls) {
// FUTURE: We could check arg2ColVector.noNulls and optimize these loops.
if (batch.selectedInUse) {
for (int j = 0; j != n; j++) {
int i = sel[j];
if (vector1[i] == 1) {
if (!arg2ColVector.isNull[i]) {
outputColVector.setVal(i, arg2ColVector.vector[i], arg2ColVector.start[i], arg2ColVector.length[i]);
}
} else {
outputColVector.setRef(i, arg3Scalar, 0, arg3Scalar.length);
}
outputIsNull[i] = (vector1[i] == 1 ? arg2ColVector.isNull[i] : false);
}
} else {
for (int i = 0; i != n; i++) {
if (vector1[i] == 1) {
if (!arg2ColVector.isNull[i]) {
outputColVector.setVal(i, arg2ColVector.vector[i], arg2ColVector.start[i], arg2ColVector.length[i]);
}
} else {
outputColVector.setRef(i, arg3Scalar, 0, arg3Scalar.length);
}
outputIsNull[i] = (vector1[i] == 1 ? arg2ColVector.isNull[i] : false);
}
}
} else /* there are nulls */
{
if (batch.selectedInUse) {
for (int j = 0; j != n; j++) {
int i = sel[j];
if (!arg1ColVector.isNull[i] && vector1[i] == 1) {
if (!arg2ColVector.isNull[i]) {
outputColVector.setVal(i, arg2ColVector.vector[i], arg2ColVector.start[i], arg2ColVector.length[i]);
}
} else {
outputColVector.setRef(i, arg3Scalar, 0, arg3Scalar.length);
}
outputIsNull[i] = (!arg1ColVector.isNull[i] && vector1[i] == 1 ? arg2ColVector.isNull[i] : false);
}
} else {
for (int i = 0; i != n; i++) {
if (!arg1ColVector.isNull[i] && vector1[i] == 1) {
if (!arg2ColVector.isNull[i]) {
outputColVector.setVal(i, arg2ColVector.vector[i], arg2ColVector.start[i], arg2ColVector.length[i]);
}
} else {
outputColVector.setRef(i, arg3Scalar, 0, arg3Scalar.length);
}
outputIsNull[i] = (!arg1ColVector.isNull[i] && vector1[i] == 1 ? arg2ColVector.isNull[i] : false);
}
}
}
// restore state of repeating and non nulls indicators
arg2ColVector.unFlatten();
}
Aggregations