use of org.apache.hadoop.hive.ql.plan.SparkHashTableSinkDesc in project hive by apache.
the class Vectorizer method validateSparkHashTableSinkOperator.
private boolean validateSparkHashTableSinkOperator(SparkHashTableSinkOperator op) {
SparkHashTableSinkDesc desc = op.getConf();
byte tag = desc.getTag();
// it's essentially a MapJoinDesc
List<ExprNodeDesc> filterExprs = desc.getFilters().get(tag);
List<ExprNodeDesc> keyExprs = desc.getKeys().get(tag);
List<ExprNodeDesc> valueExprs = desc.getExprs().get(tag);
return validateExprNodeDesc(filterExprs, "Filter", VectorExpressionDescriptor.Mode.FILTER, /* allowComplex */
true) && validateExprNodeDesc(keyExprs, "Key") && validateExprNodeDesc(valueExprs, "Value");
}
use of org.apache.hadoop.hive.ql.plan.SparkHashTableSinkDesc in project hive by apache.
the class SparkReduceSinkMapJoinProc method process.
/* (non-Javadoc)
* This processor addresses the RS-MJ case that occurs in spark on the small/hash
* table side of things. The work that RS will be a part of must be connected
* to the MJ work via be a broadcast edge.
* We should not walk down the tree when we encounter this pattern because:
* the type of work (map work or reduce work) needs to be determined
* on the basis of the big table side because it may be a mapwork (no need for shuffle)
* or reduce work.
*/
@SuppressWarnings("unchecked")
@Override
public Object process(Node nd, Stack<Node> stack, NodeProcessorCtx procContext, Object... nodeOutputs) throws SemanticException {
GenSparkProcContext context = (GenSparkProcContext) procContext;
if (!nd.getClass().equals(MapJoinOperator.class)) {
return null;
}
MapJoinOperator mapJoinOp = (MapJoinOperator) nd;
if (stack.size() < 2 || !(stack.get(stack.size() - 2) instanceof ReduceSinkOperator)) {
context.currentMapJoinOperators.add(mapJoinOp);
return null;
}
context.preceedingWork = null;
context.currentRootOperator = null;
ReduceSinkOperator parentRS = (ReduceSinkOperator) stack.get(stack.size() - 2);
// remove the tag for in-memory side of mapjoin
parentRS.getConf().setSkipTag(true);
parentRS.setSkipTag(true);
// remember the original parent list before we start modifying it.
if (!context.mapJoinParentMap.containsKey(mapJoinOp)) {
List<Operator<?>> parents = new ArrayList<Operator<?>>(mapJoinOp.getParentOperators());
context.mapJoinParentMap.put(mapJoinOp, parents);
}
List<BaseWork> mapJoinWork;
/*
* If there was a pre-existing work generated for the big-table mapjoin side,
* we need to hook the work generated for the RS (associated with the RS-MJ pattern)
* with the pre-existing work.
*
* Otherwise, we need to associate that the mapjoin op
* to be linked to the RS work (associated with the RS-MJ pattern).
*
*/
mapJoinWork = context.mapJoinWorkMap.get(mapJoinOp);
int workMapSize = context.childToWorkMap.get(parentRS).size();
Preconditions.checkArgument(workMapSize == 1, "AssertionError: expected context.childToWorkMap.get(parentRS).size() to be 1, but was " + workMapSize);
BaseWork parentWork = context.childToWorkMap.get(parentRS).get(0);
// set the link between mapjoin and parent vertex
int pos = context.mapJoinParentMap.get(mapJoinOp).indexOf(parentRS);
if (pos == -1) {
throw new SemanticException("Cannot find position of parent in mapjoin");
}
LOG.debug("Mapjoin " + mapJoinOp + ", pos: " + pos + " --> " + parentWork.getName());
mapJoinOp.getConf().getParentToInput().put(pos, parentWork.getName());
SparkEdgeProperty edgeProp = new SparkEdgeProperty(SparkEdgeProperty.SHUFFLE_NONE);
if (mapJoinWork != null) {
for (BaseWork myWork : mapJoinWork) {
// link the work with the work associated with the reduce sink that triggered this rule
SparkWork sparkWork = context.currentTask.getWork();
LOG.debug("connecting " + parentWork.getName() + " with " + myWork.getName());
sparkWork.connect(parentWork, myWork, edgeProp);
}
}
// remember in case we need to connect additional work later
Map<BaseWork, SparkEdgeProperty> linkWorkMap = null;
if (context.linkOpWithWorkMap.containsKey(mapJoinOp)) {
linkWorkMap = context.linkOpWithWorkMap.get(mapJoinOp);
} else {
linkWorkMap = new HashMap<BaseWork, SparkEdgeProperty>();
}
linkWorkMap.put(parentWork, edgeProp);
context.linkOpWithWorkMap.put(mapJoinOp, linkWorkMap);
List<ReduceSinkOperator> reduceSinks = context.linkWorkWithReduceSinkMap.get(parentWork);
if (reduceSinks == null) {
reduceSinks = new ArrayList<ReduceSinkOperator>();
}
reduceSinks.add(parentRS);
context.linkWorkWithReduceSinkMap.put(parentWork, reduceSinks);
// create the dummy operators
List<Operator<?>> dummyOperators = new ArrayList<Operator<?>>();
// create an new operator: HashTableDummyOperator, which share the table desc
HashTableDummyDesc desc = new HashTableDummyDesc();
HashTableDummyOperator dummyOp = (HashTableDummyOperator) OperatorFactory.get(mapJoinOp.getCompilationOpContext(), desc);
TableDesc tbl;
// need to create the correct table descriptor for key/value
RowSchema rowSchema = parentRS.getParentOperators().get(0).getSchema();
tbl = PlanUtils.getReduceValueTableDesc(PlanUtils.getFieldSchemasFromRowSchema(rowSchema, ""));
dummyOp.getConf().setTbl(tbl);
Map<Byte, List<ExprNodeDesc>> keyExprMap = mapJoinOp.getConf().getKeys();
List<ExprNodeDesc> keyCols = keyExprMap.get(Byte.valueOf((byte) 0));
StringBuilder keyOrder = new StringBuilder();
StringBuilder keyNullOrder = new StringBuilder();
for (int i = 0; i < keyCols.size(); i++) {
keyOrder.append("+");
keyNullOrder.append(NullOrdering.defaultNullOrder(context.conf));
}
TableDesc keyTableDesc = PlanUtils.getReduceKeyTableDesc(PlanUtils.getFieldSchemasFromColumnList(keyCols, "mapjoinkey"), keyOrder.toString(), keyNullOrder.toString());
mapJoinOp.getConf().setKeyTableDesc(keyTableDesc);
// let the dummy op be the parent of mapjoin op
mapJoinOp.replaceParent(parentRS, dummyOp);
List<Operator<? extends OperatorDesc>> dummyChildren = new ArrayList<Operator<? extends OperatorDesc>>();
dummyChildren.add(mapJoinOp);
dummyOp.setChildOperators(dummyChildren);
dummyOperators.add(dummyOp);
// cut the operator tree so as to not retain connections from the parent RS downstream
List<Operator<? extends OperatorDesc>> childOperators = parentRS.getChildOperators();
int childIndex = childOperators.indexOf(mapJoinOp);
childOperators.remove(childIndex);
// at task startup
if (mapJoinWork != null) {
for (BaseWork myWork : mapJoinWork) {
myWork.addDummyOp(dummyOp);
}
}
if (context.linkChildOpWithDummyOp.containsKey(mapJoinOp)) {
for (Operator<?> op : context.linkChildOpWithDummyOp.get(mapJoinOp)) {
dummyOperators.add(op);
}
}
context.linkChildOpWithDummyOp.put(mapJoinOp, dummyOperators);
// replace ReduceSinkOp with HashTableSinkOp for the RSops which are parents of MJop
MapJoinDesc mjDesc = mapJoinOp.getConf();
HiveConf conf = context.conf;
// Unlike in MR, we may call this method multiple times, for each
// small table HTS. But, since it's idempotent, it should be OK.
mjDesc.resetOrder();
float hashtableMemoryUsage;
if (hasGroupBy(mapJoinOp, context)) {
hashtableMemoryUsage = conf.getFloatVar(HiveConf.ConfVars.HIVEHASHTABLEFOLLOWBYGBYMAXMEMORYUSAGE);
} else {
hashtableMemoryUsage = conf.getFloatVar(HiveConf.ConfVars.HIVEHASHTABLEMAXMEMORYUSAGE);
}
mjDesc.setHashTableMemoryUsage(hashtableMemoryUsage);
SparkHashTableSinkDesc hashTableSinkDesc = new SparkHashTableSinkDesc(mjDesc);
SparkHashTableSinkOperator hashTableSinkOp = (SparkHashTableSinkOperator) OperatorFactory.get(mapJoinOp.getCompilationOpContext(), hashTableSinkDesc);
byte tag = (byte) pos;
int[] valueIndex = mjDesc.getValueIndex(tag);
if (valueIndex != null) {
List<ExprNodeDesc> newValues = new ArrayList<ExprNodeDesc>();
List<ExprNodeDesc> values = hashTableSinkDesc.getExprs().get(tag);
for (int index = 0; index < values.size(); index++) {
if (valueIndex[index] < 0) {
newValues.add(values.get(index));
}
}
hashTableSinkDesc.getExprs().put(tag, newValues);
}
// get all parents of reduce sink
List<Operator<? extends OperatorDesc>> rsParentOps = parentRS.getParentOperators();
for (Operator<? extends OperatorDesc> parent : rsParentOps) {
parent.replaceChild(parentRS, hashTableSinkOp);
}
hashTableSinkOp.setParentOperators(rsParentOps);
hashTableSinkOp.getConf().setTag(tag);
return true;
}
use of org.apache.hadoop.hive.ql.plan.SparkHashTableSinkDesc in project hive by apache.
the class Vectorizer method validateAndVectorizeOperator.
public Operator<? extends OperatorDesc> validateAndVectorizeOperator(Operator<? extends OperatorDesc> op, VectorizationContext vContext, boolean isReduce, boolean isTezOrSpark, VectorTaskColumnInfo vectorTaskColumnInfo) throws HiveException, VectorizerCannotVectorizeException {
Operator<? extends OperatorDesc> vectorOp = null;
// This "global" allows various validation methods to set the "not vectorized" reason.
currentOperator = op;
boolean isNative;
try {
switch(op.getType()) {
case MAPJOIN:
{
if (op instanceof MapJoinOperator) {
if (!validateMapJoinOperator((MapJoinOperator) op)) {
throw new VectorizerCannotVectorizeException();
}
} else if (op instanceof SMBMapJoinOperator) {
if (!validateSMBMapJoinOperator((SMBMapJoinOperator) op)) {
throw new VectorizerCannotVectorizeException();
}
} else {
setOperatorNotSupported(op);
throw new VectorizerCannotVectorizeException();
}
if (op instanceof MapJoinOperator) {
MapJoinDesc desc = (MapJoinDesc) op.getConf();
int joinType = desc.getConds()[0].getType();
VectorMapJoinDesc vectorMapJoinDesc = new VectorMapJoinDesc();
boolean specialize = canSpecializeMapJoin(op, desc, isTezOrSpark, vContext, vectorMapJoinDesc);
if (!specialize) {
Class<? extends Operator<?>> opClass = null;
// *NON-NATIVE* vector map differences for LEFT OUTER JOIN and Filtered...
List<ExprNodeDesc> bigTableFilters = desc.getFilters().get((byte) desc.getPosBigTable());
boolean isOuterAndFiltered = (!desc.isNoOuterJoin() && bigTableFilters.size() > 0);
if (!isOuterAndFiltered) {
opClass = VectorMapJoinOperator.class;
} else {
if (joinType == JoinDesc.FULL_OUTER_JOIN) {
setOperatorIssue("Vectorized & filtered full-outer joins not supported");
throw new VectorizerCannotVectorizeException();
}
opClass = VectorMapJoinOuterFilteredOperator.class;
}
vectorOp = OperatorFactory.getVectorOperator(opClass, op.getCompilationOpContext(), desc, vContext, vectorMapJoinDesc);
isNative = false;
} else {
// TEMPORARY Until Native Vector Map Join with Hybrid passes tests...
// HiveConf.setBoolVar(physicalContext.getConf(),
// HiveConf.ConfVars.HIVEUSEHYBRIDGRACEHASHJOIN, false);
vectorOp = specializeMapJoinOperator(op, vContext, desc, vectorMapJoinDesc);
isNative = true;
if (vectorTaskColumnInfo != null) {
if (usesVectorUDFAdaptor(vectorMapJoinDesc.getAllBigTableKeyExpressions())) {
vectorTaskColumnInfo.setUsesVectorUDFAdaptor(true);
}
if (usesVectorUDFAdaptor(vectorMapJoinDesc.getAllBigTableValueExpressions())) {
vectorTaskColumnInfo.setUsesVectorUDFAdaptor(true);
}
}
}
} else {
Preconditions.checkState(op instanceof SMBMapJoinOperator);
SMBJoinDesc smbJoinSinkDesc = (SMBJoinDesc) op.getConf();
// Check additional constraint.
if (smbJoinSinkDesc.getFilterMap() != null) {
setOperatorIssue("FilterMaps not supported for Vector Pass-Thru SMB MapJoin");
throw new VectorizerCannotVectorizeException();
}
VectorSMBJoinDesc vectorSMBJoinDesc = new VectorSMBJoinDesc();
vectorOp = OperatorFactory.getVectorOperator(op.getCompilationOpContext(), smbJoinSinkDesc, vContext, vectorSMBJoinDesc);
isNative = false;
}
}
break;
case REDUCESINK:
{
if (!validateReduceSinkOperator((ReduceSinkOperator) op)) {
throw new VectorizerCannotVectorizeException();
}
ReduceSinkDesc reduceDesc = (ReduceSinkDesc) op.getConf();
VectorReduceSinkDesc vectorReduceSinkDesc = new VectorReduceSinkDesc();
boolean specialize = canSpecializeReduceSink(reduceDesc, isTezOrSpark, vContext, vectorReduceSinkDesc);
if (!specialize) {
vectorOp = OperatorFactory.getVectorOperator(op.getCompilationOpContext(), reduceDesc, vContext, vectorReduceSinkDesc);
isNative = false;
} else {
vectorOp = specializeReduceSinkOperator(op, vContext, reduceDesc, vectorReduceSinkDesc);
isNative = true;
if (vectorTaskColumnInfo != null) {
VectorReduceSinkInfo vectorReduceSinkInfo = vectorReduceSinkDesc.getVectorReduceSinkInfo();
if (usesVectorUDFAdaptor(vectorReduceSinkInfo.getReduceSinkKeyExpressions())) {
vectorTaskColumnInfo.setUsesVectorUDFAdaptor(true);
}
if (usesVectorUDFAdaptor(vectorReduceSinkInfo.getReduceSinkValueExpressions())) {
vectorTaskColumnInfo.setUsesVectorUDFAdaptor(true);
}
}
}
}
break;
case FILTER:
{
if (!validateFilterOperator((FilterOperator) op)) {
throw new VectorizerCannotVectorizeException();
}
VectorFilterDesc vectorFilterDesc = new VectorFilterDesc();
vectorOp = vectorizeFilterOperator(op, vContext, vectorFilterDesc);
isNative = true;
if (vectorTaskColumnInfo != null) {
VectorExpression vectorPredicateExpr = vectorFilterDesc.getPredicateExpression();
if (usesVectorUDFAdaptor(vectorPredicateExpr)) {
vectorTaskColumnInfo.setUsesVectorUDFAdaptor(true);
}
}
}
break;
case TOPNKEY:
{
if (!validateTopNKeyOperator((TopNKeyOperator) op)) {
throw new VectorizerCannotVectorizeException();
}
VectorTopNKeyDesc vectorTopNKeyDesc = new VectorTopNKeyDesc();
vectorOp = vectorizeTopNKeyOperator(op, vContext, vectorTopNKeyDesc);
isNative = true;
if (vectorTaskColumnInfo != null) {
VectorExpression[] keyExpressions = vectorTopNKeyDesc.getKeyExpressions();
if (usesVectorUDFAdaptor(keyExpressions)) {
vectorTaskColumnInfo.setUsesVectorUDFAdaptor(true);
}
}
}
break;
case SELECT:
{
if (!validateSelectOperator((SelectOperator) op)) {
throw new VectorizerCannotVectorizeException();
}
VectorSelectDesc vectorSelectDesc = new VectorSelectDesc();
vectorOp = vectorizeSelectOperator(op, vContext, vectorSelectDesc);
isNative = true;
if (vectorTaskColumnInfo != null) {
VectorExpression[] vectorSelectExprs = vectorSelectDesc.getSelectExpressions();
if (usesVectorUDFAdaptor(vectorSelectExprs)) {
vectorTaskColumnInfo.setUsesVectorUDFAdaptor(true);
}
}
}
break;
case GROUPBY:
{
// The validateGroupByOperator method will update vectorGroupByDesc.
VectorGroupByDesc vectorGroupByDesc = new VectorGroupByDesc();
if (!validateGroupByOperator((GroupByOperator) op, isReduce, isTezOrSpark, vectorGroupByDesc)) {
throw new VectorizerCannotVectorizeException();
}
ImmutablePair<Operator<? extends OperatorDesc>, String> pair = doVectorizeGroupByOperator(op, vContext, vectorGroupByDesc);
if (pair.left == null) {
setOperatorIssue(pair.right);
throw new VectorizerCannotVectorizeException();
}
vectorOp = pair.left;
isNative = false;
if (vectorTaskColumnInfo != null) {
VectorExpression[] vecKeyExpressions = vectorGroupByDesc.getKeyExpressions();
if (usesVectorUDFAdaptor(vecKeyExpressions)) {
vectorTaskColumnInfo.setUsesVectorUDFAdaptor(true);
}
VectorAggregationDesc[] vecAggrDescs = vectorGroupByDesc.getVecAggrDescs();
for (VectorAggregationDesc vecAggrDesc : vecAggrDescs) {
if (usesVectorUDFAdaptor(vecAggrDesc.getInputExpression())) {
vectorTaskColumnInfo.setUsesVectorUDFAdaptor(true);
}
}
}
}
break;
case FILESINK:
{
if (!validateFileSinkOperator((FileSinkOperator) op)) {
throw new VectorizerCannotVectorizeException();
}
FileSinkDesc fileSinkDesc = (FileSinkDesc) op.getConf();
VectorFileSinkDesc vectorFileSinkDesc = new VectorFileSinkDesc();
boolean isArrowSpecialization = checkForArrowFileSink(fileSinkDesc, isTezOrSpark, vContext, vectorFileSinkDesc);
if (isArrowSpecialization) {
vectorOp = specializeArrowFileSinkOperator(op, vContext, fileSinkDesc, vectorFileSinkDesc);
isNative = true;
} else {
vectorOp = OperatorFactory.getVectorOperator(op.getCompilationOpContext(), fileSinkDesc, vContext, vectorFileSinkDesc);
isNative = false;
}
}
break;
case LIMIT:
{
// No validation.
LimitDesc limitDesc = (LimitDesc) op.getConf();
VectorLimitDesc vectorLimitDesc = new VectorLimitDesc();
vectorOp = OperatorFactory.getVectorOperator(op.getCompilationOpContext(), limitDesc, vContext, vectorLimitDesc);
isNative = true;
}
break;
case EVENT:
{
// No validation.
AppMasterEventDesc eventDesc = (AppMasterEventDesc) op.getConf();
VectorAppMasterEventDesc vectorEventDesc = new VectorAppMasterEventDesc();
vectorOp = OperatorFactory.getVectorOperator(op.getCompilationOpContext(), eventDesc, vContext, vectorEventDesc);
isNative = true;
}
break;
case PTF:
{
// The validatePTFOperator method will update vectorPTFDesc.
VectorPTFDesc vectorPTFDesc = new VectorPTFDesc();
if (!validatePTFOperator((PTFOperator) op, vContext, vectorPTFDesc)) {
throw new VectorizerCannotVectorizeException();
}
vectorOp = vectorizePTFOperator(op, vContext, vectorPTFDesc);
isNative = true;
}
break;
case HASHTABLESINK:
{
// No validation.
SparkHashTableSinkDesc sparkHashTableSinkDesc = (SparkHashTableSinkDesc) op.getConf();
VectorSparkHashTableSinkDesc vectorSparkHashTableSinkDesc = new VectorSparkHashTableSinkDesc();
vectorOp = OperatorFactory.getVectorOperator(op.getCompilationOpContext(), sparkHashTableSinkDesc, vContext, vectorSparkHashTableSinkDesc);
isNative = true;
}
break;
case SPARKPRUNINGSINK:
{
// No validation.
SparkPartitionPruningSinkDesc sparkPartitionPruningSinkDesc = (SparkPartitionPruningSinkDesc) op.getConf();
VectorSparkPartitionPruningSinkDesc vectorSparkPartitionPruningSinkDesc = new VectorSparkPartitionPruningSinkDesc();
vectorOp = OperatorFactory.getVectorOperator(op.getCompilationOpContext(), sparkPartitionPruningSinkDesc, vContext, vectorSparkPartitionPruningSinkDesc);
// need to maintain the unique ID so that target map works can
// read the output
((SparkPartitionPruningSinkOperator) vectorOp).setUniqueId(((SparkPartitionPruningSinkOperator) op).getUniqueId());
isNative = true;
}
break;
default:
setOperatorNotSupported(op);
throw new VectorizerCannotVectorizeException();
}
} catch (HiveException e) {
setOperatorIssue(e.getMessage());
throw new VectorizerCannotVectorizeException();
}
Preconditions.checkState(vectorOp != null);
if (vectorTaskColumnInfo != null && !isNative) {
vectorTaskColumnInfo.setAllNative(false);
}
LOG.debug("vectorizeOperator " + vectorOp.getClass().getName());
LOG.debug("vectorizeOperator " + vectorOp.getConf().getClass().getName());
// These operators need to be linked to enable runtime statistics to be gathered/used correctly
planMapper.link(op, vectorOp);
return vectorOp;
}
use of org.apache.hadoop.hive.ql.plan.SparkHashTableSinkDesc in project hive by apache.
the class Vectorizer method vectorizeOperator.
public Operator<? extends OperatorDesc> vectorizeOperator(Operator<? extends OperatorDesc> op, VectorizationContext vContext, boolean isTezOrSpark, VectorTaskColumnInfo vectorTaskColumnInfo) throws HiveException {
Operator<? extends OperatorDesc> vectorOp = null;
boolean isNative;
switch(op.getType()) {
case TABLESCAN:
vectorOp = vectorizeTableScanOperator(op, vContext);
isNative = true;
break;
case MAPJOIN:
{
if (op instanceof MapJoinOperator) {
VectorMapJoinInfo vectorMapJoinInfo = new VectorMapJoinInfo();
MapJoinDesc desc = (MapJoinDesc) op.getConf();
boolean specialize = canSpecializeMapJoin(op, desc, isTezOrSpark, vContext, vectorMapJoinInfo);
if (!specialize) {
Class<? extends Operator<?>> opClass = null;
// *NON-NATIVE* vector map differences for LEFT OUTER JOIN and Filtered...
List<ExprNodeDesc> bigTableFilters = desc.getFilters().get((byte) desc.getPosBigTable());
boolean isOuterAndFiltered = (!desc.isNoOuterJoin() && bigTableFilters.size() > 0);
if (!isOuterAndFiltered) {
opClass = VectorMapJoinOperator.class;
} else {
opClass = VectorMapJoinOuterFilteredOperator.class;
}
vectorOp = OperatorFactory.getVectorOperator(opClass, op.getCompilationOpContext(), op.getConf(), vContext);
isNative = false;
} else {
// TEMPORARY Until Native Vector Map Join with Hybrid passes tests...
// HiveConf.setBoolVar(physicalContext.getConf(),
// HiveConf.ConfVars.HIVEUSEHYBRIDGRACEHASHJOIN, false);
vectorOp = specializeMapJoinOperator(op, vContext, desc, vectorMapJoinInfo);
isNative = true;
if (vectorTaskColumnInfo != null) {
if (usesVectorUDFAdaptor(vectorMapJoinInfo.getBigTableKeyExpressions())) {
vectorTaskColumnInfo.setUsesVectorUDFAdaptor(true);
}
if (usesVectorUDFAdaptor(vectorMapJoinInfo.getBigTableValueExpressions())) {
vectorTaskColumnInfo.setUsesVectorUDFAdaptor(true);
}
}
}
} else {
Preconditions.checkState(op instanceof SMBMapJoinOperator);
SMBJoinDesc smbJoinSinkDesc = (SMBJoinDesc) op.getConf();
VectorSMBJoinDesc vectorSMBJoinDesc = new VectorSMBJoinDesc();
smbJoinSinkDesc.setVectorDesc(vectorSMBJoinDesc);
vectorOp = OperatorFactory.getVectorOperator(op.getCompilationOpContext(), smbJoinSinkDesc, vContext);
isNative = false;
}
}
break;
case REDUCESINK:
{
VectorReduceSinkInfo vectorReduceSinkInfo = new VectorReduceSinkInfo();
ReduceSinkDesc desc = (ReduceSinkDesc) op.getConf();
boolean specialize = canSpecializeReduceSink(desc, isTezOrSpark, vContext, vectorReduceSinkInfo);
if (!specialize) {
vectorOp = OperatorFactory.getVectorOperator(op.getCompilationOpContext(), op.getConf(), vContext);
isNative = false;
} else {
vectorOp = specializeReduceSinkOperator(op, vContext, desc, vectorReduceSinkInfo);
isNative = true;
if (vectorTaskColumnInfo != null) {
if (usesVectorUDFAdaptor(vectorReduceSinkInfo.getReduceSinkKeyExpressions())) {
vectorTaskColumnInfo.setUsesVectorUDFAdaptor(true);
}
if (usesVectorUDFAdaptor(vectorReduceSinkInfo.getReduceSinkValueExpressions())) {
vectorTaskColumnInfo.setUsesVectorUDFAdaptor(true);
}
}
}
}
break;
case FILTER:
{
vectorOp = vectorizeFilterOperator(op, vContext);
isNative = true;
if (vectorTaskColumnInfo != null) {
VectorFilterDesc vectorFilterDesc = (VectorFilterDesc) ((AbstractOperatorDesc) vectorOp.getConf()).getVectorDesc();
VectorExpression vectorPredicateExpr = vectorFilterDesc.getPredicateExpression();
if (usesVectorUDFAdaptor(vectorPredicateExpr)) {
vectorTaskColumnInfo.setUsesVectorUDFAdaptor(true);
}
}
}
break;
case SELECT:
{
vectorOp = vectorizeSelectOperator(op, vContext);
isNative = true;
if (vectorTaskColumnInfo != null) {
VectorSelectDesc vectorSelectDesc = (VectorSelectDesc) ((AbstractOperatorDesc) vectorOp.getConf()).getVectorDesc();
VectorExpression[] vectorSelectExprs = vectorSelectDesc.getSelectExpressions();
if (usesVectorUDFAdaptor(vectorSelectExprs)) {
vectorTaskColumnInfo.setUsesVectorUDFAdaptor(true);
}
}
}
break;
case GROUPBY:
{
vectorOp = vectorizeGroupByOperator(op, vContext);
isNative = false;
if (vectorTaskColumnInfo != null) {
VectorGroupByDesc vectorGroupByDesc = (VectorGroupByDesc) ((AbstractOperatorDesc) vectorOp.getConf()).getVectorDesc();
if (!vectorGroupByDesc.isVectorOutput()) {
vectorTaskColumnInfo.setGroupByVectorOutput(false);
}
VectorExpression[] vecKeyExpressions = vectorGroupByDesc.getKeyExpressions();
if (usesVectorUDFAdaptor(vecKeyExpressions)) {
vectorTaskColumnInfo.setUsesVectorUDFAdaptor(true);
}
VectorAggregateExpression[] vecAggregators = vectorGroupByDesc.getAggregators();
for (VectorAggregateExpression vecAggr : vecAggregators) {
if (usesVectorUDFAdaptor(vecAggr.inputExpression())) {
vectorTaskColumnInfo.setUsesVectorUDFAdaptor(true);
}
}
}
}
break;
case FILESINK:
{
FileSinkDesc fileSinkDesc = (FileSinkDesc) op.getConf();
VectorFileSinkDesc vectorFileSinkDesc = new VectorFileSinkDesc();
fileSinkDesc.setVectorDesc(vectorFileSinkDesc);
vectorOp = OperatorFactory.getVectorOperator(op.getCompilationOpContext(), fileSinkDesc, vContext);
isNative = false;
}
break;
case LIMIT:
{
LimitDesc limitDesc = (LimitDesc) op.getConf();
VectorLimitDesc vectorLimitDesc = new VectorLimitDesc();
limitDesc.setVectorDesc(vectorLimitDesc);
vectorOp = OperatorFactory.getVectorOperator(op.getCompilationOpContext(), limitDesc, vContext);
isNative = true;
}
break;
case EVENT:
{
AppMasterEventDesc eventDesc = (AppMasterEventDesc) op.getConf();
VectorAppMasterEventDesc vectorEventDesc = new VectorAppMasterEventDesc();
eventDesc.setVectorDesc(vectorEventDesc);
vectorOp = OperatorFactory.getVectorOperator(op.getCompilationOpContext(), eventDesc, vContext);
isNative = true;
}
break;
case HASHTABLESINK:
{
SparkHashTableSinkDesc sparkHashTableSinkDesc = (SparkHashTableSinkDesc) op.getConf();
VectorSparkHashTableSinkDesc vectorSparkHashTableSinkDesc = new VectorSparkHashTableSinkDesc();
sparkHashTableSinkDesc.setVectorDesc(vectorSparkHashTableSinkDesc);
vectorOp = OperatorFactory.getVectorOperator(op.getCompilationOpContext(), sparkHashTableSinkDesc, vContext);
isNative = true;
}
break;
case SPARKPRUNINGSINK:
{
SparkPartitionPruningSinkDesc sparkPartitionPruningSinkDesc = (SparkPartitionPruningSinkDesc) op.getConf();
VectorSparkPartitionPruningSinkDesc vectorSparkPartitionPruningSinkDesc = new VectorSparkPartitionPruningSinkDesc();
sparkPartitionPruningSinkDesc.setVectorDesc(vectorSparkPartitionPruningSinkDesc);
vectorOp = OperatorFactory.getVectorOperator(op.getCompilationOpContext(), sparkPartitionPruningSinkDesc, vContext);
isNative = true;
}
break;
default:
// These are children of GROUP BY operators with non-vector outputs.
isNative = false;
vectorOp = op;
break;
}
Preconditions.checkState(vectorOp != null);
if (vectorTaskColumnInfo != null && !isNative) {
vectorTaskColumnInfo.setAllNative(false);
}
LOG.debug("vectorizeOperator " + vectorOp.getClass().getName());
LOG.debug("vectorizeOperator " + vectorOp.getConf().getClass().getName());
if (vectorOp != op) {
fixupParentChildOperators(op, vectorOp);
((AbstractOperatorDesc) vectorOp.getConf()).setVectorMode(true);
}
return vectorOp;
}
use of org.apache.hadoop.hive.ql.plan.SparkHashTableSinkDesc in project hive by apache.
the class GenSparkSkewJoinProcessor method insertSHTS.
/**
* Insert SparkHashTableSink and HashTableDummy between small dir TS and MJ.
*/
@SuppressWarnings("unchecked")
private static void insertSHTS(byte tag, TableScanOperator tableScan, MapWork bigMapWork) {
Preconditions.checkArgument(tableScan.getChildOperators().size() == 1 && tableScan.getChildOperators().get(0) instanceof MapJoinOperator);
HashTableDummyDesc desc = new HashTableDummyDesc();
HashTableDummyOperator dummyOp = (HashTableDummyOperator) OperatorFactory.get(tableScan.getCompilationOpContext(), desc);
dummyOp.getConf().setTbl(tableScan.getTableDescSkewJoin());
MapJoinOperator mapJoinOp = (MapJoinOperator) tableScan.getChildOperators().get(0);
mapJoinOp.replaceParent(tableScan, dummyOp);
List<Operator<? extends OperatorDesc>> mapJoinChildren = new ArrayList<Operator<? extends OperatorDesc>>();
mapJoinChildren.add(mapJoinOp);
dummyOp.setChildOperators(mapJoinChildren);
bigMapWork.addDummyOp(dummyOp);
MapJoinDesc mjDesc = mapJoinOp.getConf();
// mapjoin should not be affected by join reordering
mjDesc.resetOrder();
SparkHashTableSinkDesc hashTableSinkDesc = new SparkHashTableSinkDesc(mjDesc);
SparkHashTableSinkOperator hashTableSinkOp = (SparkHashTableSinkOperator) OperatorFactory.get(tableScan.getCompilationOpContext(), hashTableSinkDesc);
int[] valueIndex = mjDesc.getValueIndex(tag);
if (valueIndex != null) {
List<ExprNodeDesc> newValues = new ArrayList<ExprNodeDesc>();
List<ExprNodeDesc> values = hashTableSinkDesc.getExprs().get(tag);
for (int index = 0; index < values.size(); index++) {
if (valueIndex[index] < 0) {
newValues.add(values.get(index));
}
}
hashTableSinkDesc.getExprs().put(tag, newValues);
}
tableScan.replaceChild(mapJoinOp, hashTableSinkOp);
List<Operator<? extends OperatorDesc>> tableScanParents = new ArrayList<Operator<? extends OperatorDesc>>();
tableScanParents.add(tableScan);
hashTableSinkOp.setParentOperators(tableScanParents);
hashTableSinkOp.getConf().setTag(tag);
}
Aggregations