use of org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory.writableShortObjectInspector in project hive by apache.
the class TestGenericUDFPower method testBytePowerShort.
@Test
public void testBytePowerShort() throws HiveException {
GenericUDFPower udf = new GenericUDFPower();
ByteWritable left = new ByteWritable((byte) 2);
ShortWritable right = new ShortWritable((short) 4);
ObjectInspector[] inputOIs = { PrimitiveObjectInspectorFactory.writableByteObjectInspector, PrimitiveObjectInspectorFactory.writableShortObjectInspector };
DeferredObject[] args = { new DeferredJavaObject(left), new DeferredJavaObject(right) };
PrimitiveObjectInspector oi = (PrimitiveObjectInspector) udf.initialize(inputOIs);
Assert.assertEquals(TypeInfoFactory.doubleTypeInfo, oi.getTypeInfo());
DoubleWritable res = (DoubleWritable) udf.evaluate(args);
Assert.assertEquals(new Double(16), new Double(res.get()));
}
use of org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory.writableShortObjectInspector in project hive by apache.
the class TestGenericUDFOPMod method testModByZero2.
@Test
public void testModByZero2() throws HiveException {
GenericUDFOPMod udf = new GenericUDFOPMod();
// Short
ShortWritable s1 = new ShortWritable((short) 4);
ShortWritable s2 = new ShortWritable((short) 0);
ObjectInspector[] inputOIs = { PrimitiveObjectInspectorFactory.writableShortObjectInspector, PrimitiveObjectInspectorFactory.writableShortObjectInspector };
DeferredObject[] args = { new DeferredJavaObject(s1), new DeferredJavaObject(s2) };
udf.initialize(inputOIs);
ShortWritable s3 = (ShortWritable) udf.evaluate(args);
Assert.assertNull(s3);
}
use of org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory.writableShortObjectInspector in project hive by apache.
the class TestGenericUDFOPMultiply method testByteTimesShort.
@Test
public void testByteTimesShort() throws HiveException {
GenericUDFOPMultiply udf = new GenericUDFOPMultiply();
ByteWritable left = new ByteWritable((byte) 4);
ShortWritable right = new ShortWritable((short) 6);
ObjectInspector[] inputOIs = { PrimitiveObjectInspectorFactory.writableByteObjectInspector, PrimitiveObjectInspectorFactory.writableShortObjectInspector };
DeferredObject[] args = { new DeferredJavaObject(left), new DeferredJavaObject(right) };
PrimitiveObjectInspector oi = (PrimitiveObjectInspector) udf.initialize(inputOIs);
Assert.assertEquals(oi.getTypeInfo(), TypeInfoFactory.shortTypeInfo);
ShortWritable res = (ShortWritable) udf.evaluate(args);
Assert.assertEquals(24, res.get());
}
use of org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory.writableShortObjectInspector in project hive by apache.
the class CommonJoinOperator method initializeOp.
@Override
@SuppressWarnings("unchecked")
protected void initializeOp(Configuration hconf) throws HiveException {
super.initializeOp(hconf);
closeOpCalled = false;
this.handleSkewJoin = conf.getHandleSkewJoin();
this.hconf = hconf;
heartbeatInterval = HiveConf.getIntVar(hconf, HiveConf.ConfVars.HIVESENDHEARTBEAT);
countAfterReport = 0;
totalSz = 0;
int tagLen = conf.getTagLength();
// Map that contains the rows for each alias
storage = new AbstractRowContainer[tagLen];
numAliases = conf.getExprs().size();
joinValues = new List[tagLen];
joinFilters = new List[tagLen];
order = conf.getTagOrder();
condn = conf.getConds();
nullsafes = conf.getNullSafes();
noOuterJoin = conf.isNoOuterJoin();
totalSz = JoinUtil.populateJoinKeyValue(joinValues, conf.getExprs(), order, NOTSKIPBIGTABLE, hconf);
// process join filters
joinFilters = new List[tagLen];
JoinUtil.populateJoinKeyValue(joinFilters, conf.getFilters(), order, NOTSKIPBIGTABLE, hconf);
joinValuesObjectInspectors = JoinUtil.getObjectInspectorsFromEvaluators(joinValues, inputObjInspectors, NOTSKIPBIGTABLE, tagLen);
joinFilterObjectInspectors = JoinUtil.getObjectInspectorsFromEvaluators(joinFilters, inputObjInspectors, NOTSKIPBIGTABLE, tagLen);
joinValuesStandardObjectInspectors = JoinUtil.getStandardObjectInspectors(joinValuesObjectInspectors, NOTSKIPBIGTABLE, tagLen);
filterMaps = conf.getFilterMap();
if (noOuterJoin) {
rowContainerStandardObjectInspectors = joinValuesStandardObjectInspectors;
} else {
List<ObjectInspector>[] rowContainerObjectInspectors = new List[tagLen];
for (Byte alias : order) {
ArrayList<ObjectInspector> rcOIs = new ArrayList<ObjectInspector>();
rcOIs.addAll(joinValuesObjectInspectors[alias]);
// for each alias, add object inspector for short as the last element
rcOIs.add(PrimitiveObjectInspectorFactory.writableShortObjectInspector);
rowContainerObjectInspectors[alias] = rcOIs;
}
rowContainerStandardObjectInspectors = JoinUtil.getStandardObjectInspectors(rowContainerObjectInspectors, NOTSKIPBIGTABLE, tagLen);
}
dummyObj = new ArrayList[numAliases];
dummyObjVectors = new RowContainer[numAliases];
joinEmitInterval = HiveConf.getIntVar(hconf, HiveConf.ConfVars.HIVEJOINEMITINTERVAL);
joinCacheSize = HiveConf.getIntVar(hconf, HiveConf.ConfVars.HIVEJOINCACHESIZE);
// construct dummy null row (indicating empty table) and
// construct spill table serde which is used if input is too
// large to fit into main memory.
byte pos = 0;
for (Byte alias : order) {
int sz = conf.getExprs().get(alias).size();
ArrayList<Object> nr = new ArrayList<Object>(sz);
for (int j = 0; j < sz; j++) {
nr.add(null);
}
if (!noOuterJoin) {
// add whether the row is filtered or not
// this value does not matter for the dummyObj
// because the join values are already null
nr.add(new ShortWritable());
}
dummyObj[pos] = nr;
// there should be only 1 dummy object in the RowContainer
RowContainer<List<Object>> values = JoinUtil.getRowContainer(hconf, rowContainerStandardObjectInspectors[pos], alias, 1, spillTableDesc, conf, !hasFilter(pos), reporter);
values.addRow(dummyObj[pos]);
dummyObjVectors[pos] = values;
// if serde is null, the input doesn't need to be spilled out
// e.g., the output columns does not contains the input table
RowContainer<List<Object>> rc = JoinUtil.getRowContainer(hconf, rowContainerStandardObjectInspectors[pos], alias, joinCacheSize, spillTableDesc, conf, !hasFilter(pos), reporter);
storage[pos] = rc;
pos++;
}
forwardCache = new Object[totalSz];
aliasFilterTags = new short[numAliases];
Arrays.fill(aliasFilterTags, (byte) 0xff);
filterTags = new short[numAliases];
skipVectors = new boolean[numAliases][];
for (int i = 0; i < skipVectors.length; i++) {
skipVectors[i] = new boolean[i + 1];
}
intermediate = new List[numAliases];
offsets = new int[numAliases + 1];
int sum = 0;
for (int i = 0; i < numAliases; i++) {
offsets[i] = sum;
sum += joinValues[order[i]].size();
}
offsets[numAliases] = sum;
outputObjInspector = getJoinOutputObjectInspector(order, joinValuesStandardObjectInspectors, conf);
for (int i = 0; i < condn.length; i++) {
if (condn[i].getType() == JoinDesc.LEFT_SEMI_JOIN) {
hasLeftSemiJoin = true;
}
}
// Create post-filtering evaluators if needed
if (conf.getResidualFilterExprs() != null) {
residualJoinFilters = new ArrayList<>(conf.getResidualFilterExprs().size());
residualJoinFiltersOIs = new ArrayList<>(conf.getResidualFilterExprs().size());
for (int i = 0; i < conf.getResidualFilterExprs().size(); i++) {
ExprNodeDesc expr = conf.getResidualFilterExprs().get(i);
residualJoinFilters.add(ExprNodeEvaluatorFactory.get(expr));
residualJoinFiltersOIs.add(residualJoinFilters.get(i).initialize(outputObjInspector));
}
needsPostEvaluation = true;
if (!noOuterJoin) {
// We need to disable join emit interval, since for outer joins with post conditions
// we need to have the full view on the right matching rows to know whether we need
// to produce a row with NULL values or not
joinEmitInterval = -1;
}
}
if (LOG.isInfoEnabled()) {
LOG.info("JOIN " + outputObjInspector.getTypeName() + " totalsz = " + totalSz);
}
}
Aggregations