use of org.apache.hadoop.hive.ql.exec.vector.IntervalDayTimeColumnVector in project hive by apache.
the class IfExprIntervalDayTimeColumnColumn method evaluate.
@Override
public void evaluate(VectorizedRowBatch batch) {
if (childExpressions != null) {
super.evaluateChildren(batch);
}
LongColumnVector arg1ColVector = (LongColumnVector) batch.cols[arg1Column];
IntervalDayTimeColumnVector arg2ColVector = (IntervalDayTimeColumnVector) batch.cols[arg2Column];
IntervalDayTimeColumnVector arg3ColVector = (IntervalDayTimeColumnVector) batch.cols[arg3Column];
IntervalDayTimeColumnVector outputColVector = (IntervalDayTimeColumnVector) batch.cols[outputColumnNum];
int[] sel = batch.selected;
boolean[] outputIsNull = outputColVector.isNull;
int n = batch.size;
long[] vector1 = arg1ColVector.vector;
// return immediately if batch is empty
if (n == 0) {
return;
}
// We do not need to do a column reset since we are carefully changing the output.
outputColVector.isRepeating = false;
/* All the code paths below propagate nulls even if neither arg2 nor arg3
* have nulls. This is to reduce the number of code paths and shorten the
* code, at the expense of maybe doing unnecessary work if neither input
* has nulls. This could be improved in the future by expanding the number
* of code paths.
*/
if (arg1ColVector.isRepeating) {
if ((arg1ColVector.noNulls || !arg1ColVector.isNull[0]) && vector1[0] == 1) {
arg2ColVector.copySelected(batch.selectedInUse, sel, n, outputColVector);
} else {
arg3ColVector.copySelected(batch.selectedInUse, sel, n, outputColVector);
}
return;
}
// extend any repeating values and noNulls indicator in the inputs
arg2ColVector.flatten(batch.selectedInUse, sel, n);
arg3ColVector.flatten(batch.selectedInUse, sel, n);
if (arg1ColVector.noNulls) {
// Carefully handle NULLs...
/*
* For better performance on LONG/DOUBLE we don't want the conditional
* statements inside the for loop.
*/
outputColVector.noNulls = false;
if (batch.selectedInUse) {
for (int j = 0; j != n; j++) {
int i = sel[j];
outputIsNull[i] = (vector1[i] == 1 ? arg2ColVector.isNull[i] : arg3ColVector.isNull[i]);
outputColVector.set(i, vector1[i] == 1 ? arg2ColVector.asScratchIntervalDayTime(i) : arg3ColVector.asScratchIntervalDayTime(i));
}
} else {
for (int i = 0; i != n; i++) {
outputIsNull[i] = (vector1[i] == 1 ? arg2ColVector.isNull[i] : arg3ColVector.isNull[i]);
outputColVector.set(i, vector1[i] == 1 ? arg2ColVector.asScratchIntervalDayTime(i) : arg3ColVector.asScratchIntervalDayTime(i));
}
}
} else /* there are NULLs in the inputColVector */
{
// Carefully handle NULLs...
/*
* For better performance on LONG/DOUBLE we don't want the conditional
* statements inside the for loop.
*/
outputColVector.noNulls = false;
if (batch.selectedInUse) {
for (int j = 0; j != n; j++) {
int i = sel[j];
outputColVector.set(i, !arg1ColVector.isNull[i] && vector1[i] == 1 ? arg2ColVector.asScratchIntervalDayTime(i) : arg3ColVector.asScratchIntervalDayTime(i));
outputIsNull[i] = (!arg1ColVector.isNull[i] && vector1[i] == 1 ? arg2ColVector.isNull[i] : arg3ColVector.isNull[i]);
}
} else {
for (int i = 0; i != n; i++) {
outputColVector.set(i, !arg1ColVector.isNull[i] && vector1[i] == 1 ? arg2ColVector.asScratchIntervalDayTime(i) : arg3ColVector.asScratchIntervalDayTime(i));
outputIsNull[i] = (!arg1ColVector.isNull[i] && vector1[i] == 1 ? arg2ColVector.isNull[i] : arg3ColVector.isNull[i]);
}
}
}
// restore repeating and no nulls indicators
arg2ColVector.unFlatten();
arg3ColVector.unFlatten();
}
use of org.apache.hadoop.hive.ql.exec.vector.IntervalDayTimeColumnVector in project hive by apache.
the class IfExprIntervalDayTimeColumnScalar method evaluate.
@Override
public void evaluate(VectorizedRowBatch batch) {
if (childExpressions != null) {
super.evaluateChildren(batch);
}
LongColumnVector arg1ColVector = (LongColumnVector) batch.cols[arg1Column];
IntervalDayTimeColumnVector arg2ColVector = (IntervalDayTimeColumnVector) batch.cols[arg2Column];
IntervalDayTimeColumnVector outputColVector = (IntervalDayTimeColumnVector) batch.cols[outputColumnNum];
int[] sel = batch.selected;
boolean[] outputIsNull = outputColVector.isNull;
// We do not need to do a column reset since we are carefully changing the output.
outputColVector.isRepeating = false;
int n = batch.size;
long[] vector1 = arg1ColVector.vector;
// return immediately if batch is empty
if (n == 0) {
return;
}
if (arg1ColVector.isRepeating) {
if ((arg1ColVector.noNulls || !arg1ColVector.isNull[0]) && vector1[0] == 1) {
arg2ColVector.copySelected(batch.selectedInUse, sel, n, outputColVector);
} else {
outputColVector.fill(arg3Scalar);
}
return;
}
// Extend any repeating values and noNulls indicator in the inputs to
// reduce the number of code paths needed below.
arg2ColVector.flatten(batch.selectedInUse, sel, n);
if (arg1ColVector.noNulls) {
if (batch.selectedInUse) {
for (int j = 0; j != n; j++) {
int i = sel[j];
outputIsNull[i] = false;
outputColVector.set(i, vector1[i] == 1 ? arg2ColVector.asScratchIntervalDayTime(i) : arg3Scalar);
}
} else {
Arrays.fill(outputIsNull, 0, n, false);
for (int i = 0; i != n; i++) {
outputColVector.set(i, vector1[i] == 1 ? arg2ColVector.asScratchIntervalDayTime(i) : arg3Scalar);
}
}
} else /* there are nulls */
{
// Carefully handle NULLs...
/*
* For better performance on LONG/DOUBLE we don't want the conditional
* statements inside the for loop.
*/
outputColVector.noNulls = false;
if (batch.selectedInUse) {
for (int j = 0; j != n; j++) {
int i = sel[j];
outputColVector.set(i, !arg1ColVector.isNull[i] && vector1[i] == 1 ? arg2ColVector.asScratchIntervalDayTime(i) : arg3Scalar);
outputIsNull[i] = (!arg1ColVector.isNull[i] && vector1[i] == 1 ? arg2ColVector.isNull[i] : false);
}
} else {
for (int i = 0; i != n; i++) {
outputColVector.set(i, !arg1ColVector.isNull[i] && vector1[i] == 1 ? arg2ColVector.asScratchIntervalDayTime(i) : arg3Scalar);
outputIsNull[i] = (!arg1ColVector.isNull[i] && vector1[i] == 1 ? arg2ColVector.isNull[i] : false);
}
}
}
// restore repeating and no nulls indicators
arg2ColVector.unFlatten();
}
use of org.apache.hadoop.hive.ql.exec.vector.IntervalDayTimeColumnVector in project hive by apache.
the class IfExprIntervalDayTimeScalarScalar method evaluate.
@Override
public void evaluate(VectorizedRowBatch batch) {
if (childExpressions != null) {
super.evaluateChildren(batch);
}
LongColumnVector arg1ColVector = (LongColumnVector) batch.cols[arg1Column];
IntervalDayTimeColumnVector outputColVector = (IntervalDayTimeColumnVector) batch.cols[outputColumnNum];
int[] sel = batch.selected;
boolean[] outputIsNull = outputColVector.isNull;
// We do not need to do a column reset since we are carefully changing the output.
outputColVector.isRepeating = false;
int n = batch.size;
long[] vector1 = arg1ColVector.vector;
// return immediately if batch is empty
if (n == 0) {
return;
}
if (arg1ColVector.isRepeating) {
if ((arg1ColVector.noNulls || !arg1ColVector.isNull[0]) && vector1[0] == 1) {
outputColVector.fill(arg2Scalar);
} else {
outputColVector.fill(arg3Scalar);
}
return;
}
if (arg1ColVector.noNulls) {
if (batch.selectedInUse) {
if (!outputColVector.noNulls) {
for (int j = 0; j != n; j++) {
final int i = sel[j];
// Set isNull before call in case it changes it mind.
outputIsNull[i] = false;
outputColVector.set(i, vector1[i] == 1 ? arg2Scalar : arg3Scalar);
}
} else {
for (int j = 0; j != n; j++) {
final int i = sel[j];
outputColVector.set(i, vector1[i] == 1 ? arg2Scalar : arg3Scalar);
}
}
} else {
if (!outputColVector.noNulls) {
// Assume it is almost always a performance win to fill all of isNull so we can
// safely reset noNulls.
Arrays.fill(outputIsNull, false);
outputColVector.noNulls = true;
}
for (int i = 0; i != n; i++) {
outputColVector.set(i, vector1[i] == 1 ? arg2Scalar : arg3Scalar);
}
}
} else /* there are nulls */
{
// Carefully handle NULLs...
/*
* For better performance on LONG/DOUBLE we don't want the conditional
* statements inside the for loop.
*/
outputColVector.noNulls = false;
if (batch.selectedInUse) {
for (int j = 0; j != n; j++) {
int i = sel[j];
outputColVector.set(i, !arg1ColVector.isNull[i] && vector1[i] == 1 ? arg2Scalar : arg3Scalar);
outputIsNull[i] = false;
}
} else {
for (int i = 0; i != n; i++) {
outputColVector.set(i, !arg1ColVector.isNull[i] && vector1[i] == 1 ? arg2Scalar : arg3Scalar);
}
Arrays.fill(outputIsNull, 0, n, false);
}
}
}
use of org.apache.hadoop.hive.ql.exec.vector.IntervalDayTimeColumnVector in project hive by apache.
the class VectorHashKeyWrapperBatch method evaluateBatchGroupingSets.
public void evaluateBatchGroupingSets(VectorizedRowBatch batch, boolean[] groupingSetsOverrideIsNulls) throws HiveException {
for (int i = 0; i < batch.size; ++i) {
vectorHashKeyWrappers[i].clearIsNull();
}
int keyIndex;
int columnIndex;
for (int i = 0; i < longIndices.length; ++i) {
keyIndex = longIndices[i];
if (groupingSetsOverrideIsNulls[keyIndex]) {
final int batchSize = batch.size;
for (int r = 0; r < batchSize; ++r) {
vectorHashKeyWrappers[r].assignNullLong(keyIndex, i);
}
continue;
}
columnIndex = keyExpressions[keyIndex].getOutputColumnNum();
LongColumnVector columnVector = (LongColumnVector) batch.cols[columnIndex];
evaluateLongColumnVector(batch, columnVector, keyIndex, i);
}
for (int i = 0; i < doubleIndices.length; ++i) {
keyIndex = doubleIndices[i];
if (groupingSetsOverrideIsNulls[keyIndex]) {
final int batchSize = batch.size;
for (int r = 0; r < batchSize; ++r) {
vectorHashKeyWrappers[r].assignNullDouble(keyIndex, i);
}
continue;
}
columnIndex = keyExpressions[keyIndex].getOutputColumnNum();
DoubleColumnVector columnVector = (DoubleColumnVector) batch.cols[columnIndex];
evaluateDoubleColumnVector(batch, columnVector, keyIndex, i);
}
for (int i = 0; i < stringIndices.length; ++i) {
keyIndex = stringIndices[i];
if (groupingSetsOverrideIsNulls[keyIndex]) {
final int batchSize = batch.size;
for (int r = 0; r < batchSize; ++r) {
vectorHashKeyWrappers[r].assignNullString(keyIndex, i);
}
continue;
}
columnIndex = keyExpressions[keyIndex].getOutputColumnNum();
BytesColumnVector columnVector = (BytesColumnVector) batch.cols[columnIndex];
evaluateStringColumnVector(batch, columnVector, keyIndex, i);
}
for (int i = 0; i < decimalIndices.length; ++i) {
keyIndex = decimalIndices[i];
if (groupingSetsOverrideIsNulls[keyIndex]) {
final int batchSize = batch.size;
for (int r = 0; r < batchSize; ++r) {
vectorHashKeyWrappers[r].assignNullDecimal(keyIndex, i);
}
continue;
}
columnIndex = keyExpressions[keyIndex].getOutputColumnNum();
DecimalColumnVector columnVector = (DecimalColumnVector) batch.cols[columnIndex];
evaluateDecimalColumnVector(batch, columnVector, keyIndex, i);
}
for (int i = 0; i < timestampIndices.length; ++i) {
keyIndex = timestampIndices[i];
if (groupingSetsOverrideIsNulls[keyIndex]) {
final int batchSize = batch.size;
for (int r = 0; r < batchSize; ++r) {
vectorHashKeyWrappers[r].assignNullTimestamp(keyIndex, i);
}
continue;
}
columnIndex = keyExpressions[keyIndex].getOutputColumnNum();
TimestampColumnVector columnVector = (TimestampColumnVector) batch.cols[columnIndex];
evaluateTimestampColumnVector(batch, columnVector, keyIndex, i);
}
for (int i = 0; i < intervalDayTimeIndices.length; ++i) {
keyIndex = intervalDayTimeIndices[i];
if (groupingSetsOverrideIsNulls[keyIndex]) {
final int batchSize = batch.size;
for (int r = 0; r < batchSize; ++r) {
vectorHashKeyWrappers[r].assignNullIntervalDayTime(keyIndex, i);
}
continue;
}
columnIndex = keyExpressions[keyIndex].getOutputColumnNum();
IntervalDayTimeColumnVector columnVector = (IntervalDayTimeColumnVector) batch.cols[columnIndex];
evaluateIntervalDayTimeColumnVector(batch, columnVector, keyIndex, i);
}
for (int i = 0; i < batch.size; ++i) {
vectorHashKeyWrappers[i].setHashKey();
}
}
use of org.apache.hadoop.hive.ql.exec.vector.IntervalDayTimeColumnVector in project hive by apache.
the class VectorHashKeyWrapperBatch method evaluateBatch.
/**
* Processes a batch:
* <ul>
* <li>Evaluates each key vector expression.</li>
* <li>Copies out each key's primitive values into the key wrappers</li>
* <li>computes the hashcode of the key wrappers</li>
* </ul>
* @param batch
* @throws HiveException
*/
public void evaluateBatch(VectorizedRowBatch batch) throws HiveException {
if (keyCount == 0) {
// all keywrappers must be EmptyVectorHashKeyWrapper
return;
}
for (int i = 0; i < batch.size; ++i) {
vectorHashKeyWrappers[i].clearIsNull();
}
int keyIndex;
int columnIndex;
for (int i = 0; i < longIndices.length; ++i) {
keyIndex = longIndices[i];
columnIndex = keyExpressions[keyIndex].getOutputColumnNum();
LongColumnVector columnVector = (LongColumnVector) batch.cols[columnIndex];
evaluateLongColumnVector(batch, columnVector, keyIndex, i);
}
for (int i = 0; i < doubleIndices.length; ++i) {
keyIndex = doubleIndices[i];
columnIndex = keyExpressions[keyIndex].getOutputColumnNum();
DoubleColumnVector columnVector = (DoubleColumnVector) batch.cols[columnIndex];
evaluateDoubleColumnVector(batch, columnVector, keyIndex, i);
}
for (int i = 0; i < stringIndices.length; ++i) {
keyIndex = stringIndices[i];
columnIndex = keyExpressions[keyIndex].getOutputColumnNum();
BytesColumnVector columnVector = (BytesColumnVector) batch.cols[columnIndex];
evaluateStringColumnVector(batch, columnVector, keyIndex, i);
}
for (int i = 0; i < decimalIndices.length; ++i) {
keyIndex = decimalIndices[i];
columnIndex = keyExpressions[keyIndex].getOutputColumnNum();
DecimalColumnVector columnVector = (DecimalColumnVector) batch.cols[columnIndex];
evaluateDecimalColumnVector(batch, columnVector, keyIndex, i);
}
for (int i = 0; i < timestampIndices.length; ++i) {
keyIndex = timestampIndices[i];
columnIndex = keyExpressions[keyIndex].getOutputColumnNum();
TimestampColumnVector columnVector = (TimestampColumnVector) batch.cols[columnIndex];
evaluateTimestampColumnVector(batch, columnVector, keyIndex, i);
}
for (int i = 0; i < intervalDayTimeIndices.length; ++i) {
keyIndex = intervalDayTimeIndices[i];
columnIndex = keyExpressions[keyIndex].getOutputColumnNum();
IntervalDayTimeColumnVector columnVector = (IntervalDayTimeColumnVector) batch.cols[columnIndex];
evaluateIntervalDayTimeColumnVector(batch, columnVector, keyIndex, i);
}
for (int i = 0; i < batch.size; ++i) {
vectorHashKeyWrappers[i].setHashKey();
}
}
Aggregations