use of org.apache.hadoop.hive.ql.plan.TableDesc in project hive by apache.
the class MapJoinProcessor method getMapJoinDesc.
public static MapJoinDesc getMapJoinDesc(HiveConf hconf, JoinOperator op, boolean leftInputJoin, String[] baseSrc, List<String> mapAliases, int mapJoinPos, boolean noCheckOuterJoin, boolean adjustParentsChildren) throws SemanticException {
JoinDesc desc = op.getConf();
JoinCondDesc[] condns = desc.getConds();
Byte[] tagOrder = desc.getTagOrder();
// outer join cannot be performed on a table which is being cached
if (!noCheckOuterJoin) {
if (checkMapJoin(mapJoinPos, condns) < 0) {
throw new SemanticException(ErrorMsg.NO_OUTER_MAPJOIN.getMsg());
}
}
Map<String, ExprNodeDesc> colExprMap = op.getColumnExprMap();
List<ColumnInfo> schema = new ArrayList<ColumnInfo>(op.getSchema().getSignature());
Map<Byte, List<ExprNodeDesc>> valueExprs = op.getConf().getExprs();
Map<Byte, List<ExprNodeDesc>> newValueExprs = new HashMap<Byte, List<ExprNodeDesc>>();
ObjectPair<List<ReduceSinkOperator>, Map<Byte, List<ExprNodeDesc>>> pair = getKeys(leftInputJoin, baseSrc, op);
List<ReduceSinkOperator> oldReduceSinkParentOps = pair.getFirst();
for (Map.Entry<Byte, List<ExprNodeDesc>> entry : valueExprs.entrySet()) {
byte tag = entry.getKey();
Operator<?> terminal = oldReduceSinkParentOps.get(tag);
List<ExprNodeDesc> values = entry.getValue();
List<ExprNodeDesc> newValues = ExprNodeDescUtils.backtrack(values, op, terminal);
newValueExprs.put(tag, newValues);
for (int i = 0; i < schema.size(); i++) {
ColumnInfo column = schema.get(i);
if (column == null) {
continue;
}
ExprNodeDesc expr = colExprMap.get(column.getInternalName());
int index = ExprNodeDescUtils.indexOf(expr, values);
if (index >= 0) {
schema.set(i, null);
if (adjustParentsChildren) {
// Since we remove reduce sink parents, replace original expressions
colExprMap.put(column.getInternalName(), newValues.get(index));
}
}
}
}
// rewrite value index for mapjoin
Map<Byte, int[]> valueIndices = new HashMap<Byte, int[]>();
// get the join keys from old parent ReduceSink operators
Map<Byte, List<ExprNodeDesc>> keyExprMap = pair.getSecond();
if (!adjustParentsChildren) {
// Since we did not remove reduce sink parents, keep the original value expressions
newValueExprs = valueExprs;
// Join key exprs are represented in terms of the original table columns,
// we need to convert these to the generated column names we can see in the Join operator
Map<Byte, List<ExprNodeDesc>> newKeyExprMap = new HashMap<Byte, List<ExprNodeDesc>>();
for (Map.Entry<Byte, List<ExprNodeDesc>> mapEntry : keyExprMap.entrySet()) {
Byte pos = mapEntry.getKey();
ReduceSinkOperator rsParent = oldReduceSinkParentOps.get(pos.byteValue());
List<ExprNodeDesc> keyExprList = ExprNodeDescUtils.resolveJoinKeysAsRSColumns(mapEntry.getValue(), rsParent);
if (keyExprList == null) {
throw new SemanticException("Error resolving join keys");
}
newKeyExprMap.put(pos, keyExprList);
}
keyExprMap = newKeyExprMap;
}
// construct valueTableDescs and valueFilteredTableDescs
List<TableDesc> valueTableDescs = new ArrayList<TableDesc>();
List<TableDesc> valueFilteredTableDescs = new ArrayList<TableDesc>();
int[][] filterMap = desc.getFilterMap();
for (byte pos = 0; pos < op.getParentOperators().size(); pos++) {
List<ExprNodeDesc> valueCols = newValueExprs.get(pos);
if (pos != mapJoinPos) {
// remove values in key exprs for value table schema
// value expression for hashsink will be modified in
// LocalMapJoinProcessor
int[] valueIndex = new int[valueCols.size()];
List<ExprNodeDesc> valueColsInValueExpr = new ArrayList<ExprNodeDesc>();
for (int i = 0; i < valueIndex.length; i++) {
ExprNodeDesc expr = valueCols.get(i);
int kindex = ExprNodeDescUtils.indexOf(expr, keyExprMap.get(pos));
if (kindex >= 0) {
valueIndex[i] = kindex;
} else {
valueIndex[i] = -valueColsInValueExpr.size() - 1;
valueColsInValueExpr.add(expr);
}
}
if (needValueIndex(valueIndex)) {
valueIndices.put(pos, valueIndex);
}
valueCols = valueColsInValueExpr;
}
// deep copy expr node desc
List<ExprNodeDesc> valueFilteredCols = ExprNodeDescUtils.clone(valueCols);
if (filterMap != null && filterMap[pos] != null && pos != mapJoinPos) {
ExprNodeColumnDesc isFilterDesc = new ExprNodeColumnDesc(TypeInfoFactory.getPrimitiveTypeInfo(serdeConstants.SMALLINT_TYPE_NAME), "filter", "filter", false);
valueFilteredCols.add(isFilterDesc);
}
TableDesc valueTableDesc = PlanUtils.getMapJoinValueTableDesc(PlanUtils.getFieldSchemasFromColumnList(valueCols, "mapjoinvalue"));
TableDesc valueFilteredTableDesc = PlanUtils.getMapJoinValueTableDesc(PlanUtils.getFieldSchemasFromColumnList(valueFilteredCols, "mapjoinvalue"));
valueTableDescs.add(valueTableDesc);
valueFilteredTableDescs.add(valueFilteredTableDesc);
}
Map<Byte, List<ExprNodeDesc>> filters = desc.getFilters();
Map<Byte, List<ExprNodeDesc>> newFilters = new HashMap<Byte, List<ExprNodeDesc>>();
for (Map.Entry<Byte, List<ExprNodeDesc>> entry : filters.entrySet()) {
byte srcTag = entry.getKey();
List<ExprNodeDesc> filter = entry.getValue();
Operator<?> terminal = oldReduceSinkParentOps.get(srcTag);
newFilters.put(srcTag, ExprNodeDescUtils.backtrack(filter, op, terminal));
}
desc.setFilters(filters = newFilters);
// create dumpfile prefix needed to create descriptor
String dumpFilePrefix = "";
if (mapAliases != null) {
for (String mapAlias : mapAliases) {
dumpFilePrefix = dumpFilePrefix + mapAlias;
}
dumpFilePrefix = dumpFilePrefix + "-" + PlanUtils.getCountForMapJoinDumpFilePrefix();
} else {
dumpFilePrefix = "mapfile" + PlanUtils.getCountForMapJoinDumpFilePrefix();
}
List<ExprNodeDesc> keyCols = keyExprMap.get((byte) mapJoinPos);
List<String> outputColumnNames = op.getConf().getOutputColumnNames();
TableDesc keyTableDesc = PlanUtils.getMapJoinKeyTableDesc(hconf, PlanUtils.getFieldSchemasFromColumnList(keyCols, MAPJOINKEY_FIELDPREFIX));
JoinCondDesc[] joinCondns = op.getConf().getConds();
MapJoinDesc mapJoinDescriptor = new MapJoinDesc(keyExprMap, keyTableDesc, newValueExprs, valueTableDescs, valueFilteredTableDescs, outputColumnNames, mapJoinPos, joinCondns, filters, op.getConf().getNoOuterJoin(), dumpFilePrefix, op.getConf().getMemoryMonitorInfo(), op.getConf().getInMemoryDataSize());
mapJoinDescriptor.setStatistics(op.getConf().getStatistics());
mapJoinDescriptor.setTagOrder(tagOrder);
mapJoinDescriptor.setNullSafes(desc.getNullSafes());
mapJoinDescriptor.setFilterMap(desc.getFilterMap());
mapJoinDescriptor.setResidualFilterExprs(desc.getResidualFilterExprs());
mapJoinDescriptor.setColumnExprMap(colExprMap);
if (!valueIndices.isEmpty()) {
mapJoinDescriptor.setValueIndices(valueIndices);
}
return mapJoinDescriptor;
}
use of org.apache.hadoop.hive.ql.plan.TableDesc in project hive by apache.
the class ReduceSinkMapJoinProc method processReduceSinkToHashJoin.
public static Object processReduceSinkToHashJoin(ReduceSinkOperator parentRS, MapJoinOperator mapJoinOp, GenTezProcContext context) throws SemanticException {
// remove the tag for in-memory side of mapjoin
parentRS.getConf().setSkipTag(true);
parentRS.setSkipTag(true);
// Mark this small table as being processed
if (mapJoinOp.getConf().isDynamicPartitionHashJoin()) {
context.mapJoinToUnprocessedSmallTableReduceSinks.get(mapJoinOp).remove(parentRS);
}
List<BaseWork> mapJoinWork = null;
/*
* if there was a pre-existing work generated for the big-table mapjoin side,
* we need to hook the work generated for the RS (associated with the RS-MJ pattern)
* with the pre-existing work.
*
* Otherwise, we need to associate that the mapjoin op
* to be linked to the RS work (associated with the RS-MJ pattern).
*
*/
mapJoinWork = context.mapJoinWorkMap.get(mapJoinOp);
BaseWork parentWork = getMapJoinParentWork(context, parentRS);
// set the link between mapjoin and parent vertex
int pos = context.mapJoinParentMap.get(mapJoinOp).indexOf(parentRS);
if (pos == -1) {
throw new SemanticException("Cannot find position of parent in mapjoin");
}
MapJoinDesc joinConf = mapJoinOp.getConf();
long keyCount = Long.MAX_VALUE, rowCount = Long.MAX_VALUE, bucketCount = 1;
long tableSize = Long.MAX_VALUE;
Statistics stats = parentRS.getStatistics();
if (stats != null) {
keyCount = rowCount = stats.getNumRows();
if (keyCount <= 0) {
keyCount = rowCount = Long.MAX_VALUE;
}
tableSize = stats.getDataSize();
ArrayList<String> keyCols = parentRS.getConf().getOutputKeyColumnNames();
if (keyCols != null && !keyCols.isEmpty()) {
// See if we can arrive at a smaller number using distinct stats from key columns.
long maxKeyCount = 1;
String prefix = Utilities.ReduceField.KEY.toString();
for (String keyCol : keyCols) {
ExprNodeDesc realCol = parentRS.getColumnExprMap().get(prefix + "." + keyCol);
ColStatistics cs = StatsUtils.getColStatisticsFromExpression(context.conf, stats, realCol);
if (cs == null || cs.getCountDistint() <= 0) {
maxKeyCount = Long.MAX_VALUE;
break;
}
maxKeyCount *= cs.getCountDistint();
if (maxKeyCount >= keyCount) {
break;
}
}
keyCount = Math.min(maxKeyCount, keyCount);
}
if (joinConf.isBucketMapJoin()) {
OpTraits opTraits = mapJoinOp.getOpTraits();
bucketCount = (opTraits == null) ? -1 : opTraits.getNumBuckets();
if (bucketCount > 0) {
// We cannot obtain a better estimate without CustomPartitionVertex providing it
// to us somehow; in which case using statistics would be completely unnecessary.
keyCount /= bucketCount;
tableSize /= bucketCount;
}
} else if (joinConf.isDynamicPartitionHashJoin()) {
// For dynamic partitioned hash join, assuming table is split evenly among the reduce tasks.
bucketCount = parentRS.getConf().getNumReducers();
keyCount /= bucketCount;
tableSize /= bucketCount;
}
}
if (keyCount == 0) {
keyCount = 1;
}
if (tableSize == 0) {
tableSize = 1;
}
LOG.info("Mapjoin " + mapJoinOp + "(bucket map join = " + joinConf.isBucketMapJoin() + "), pos: " + pos + " --> " + parentWork.getName() + " (" + keyCount + " keys estimated from " + rowCount + " rows, " + bucketCount + " buckets)");
joinConf.getParentToInput().put(pos, parentWork.getName());
if (keyCount != Long.MAX_VALUE) {
joinConf.getParentKeyCounts().put(pos, keyCount);
}
joinConf.getParentDataSizes().put(pos, tableSize);
int numBuckets = -1;
EdgeType edgeType = EdgeType.BROADCAST_EDGE;
if (joinConf.isBucketMapJoin()) {
numBuckets = (Integer) joinConf.getBigTableBucketNumMapping().values().toArray()[0];
/*
* Here, we can be in one of 4 states.
*
* 1. If map join work is null implies that we have not yet traversed the big table side. We
* just need to see if we can find a reduce sink operator in the big table side. This would
* imply a reduce side operation.
*
* 2. If we don't find a reducesink in 1 it has to be the case that it is a map side operation.
*
* 3. If we have already created a work item for the big table side, we need to see if we can
* find a table scan operator in the big table side. This would imply a map side operation.
*
* 4. If we don't find a table scan operator, it has to be a reduce side operation.
*/
if (mapJoinWork == null) {
Operator<?> rootOp = OperatorUtils.findSingleOperatorUpstreamJoinAccounted(mapJoinOp.getParentOperators().get(joinConf.getPosBigTable()), ReduceSinkOperator.class);
if (rootOp == null) {
// likely we found a table scan operator
edgeType = EdgeType.CUSTOM_EDGE;
} else {
// we have found a reduce sink
edgeType = EdgeType.CUSTOM_SIMPLE_EDGE;
}
} else {
Operator<?> rootOp = OperatorUtils.findSingleOperatorUpstreamJoinAccounted(mapJoinOp.getParentOperators().get(joinConf.getPosBigTable()), TableScanOperator.class);
if (rootOp != null) {
// likely we found a table scan operator
edgeType = EdgeType.CUSTOM_EDGE;
} else {
// we have found a reduce sink
edgeType = EdgeType.CUSTOM_SIMPLE_EDGE;
}
}
} else if (mapJoinOp.getConf().isDynamicPartitionHashJoin()) {
if (parentRS.getConf().isForwarding()) {
edgeType = EdgeType.ONE_TO_ONE_EDGE;
} else {
edgeType = EdgeType.CUSTOM_SIMPLE_EDGE;
}
}
if (edgeType == EdgeType.CUSTOM_EDGE) {
// disable auto parallelism for bucket map joins
parentRS.getConf().setReducerTraits(EnumSet.of(FIXED));
}
TezEdgeProperty edgeProp = new TezEdgeProperty(null, edgeType, numBuckets);
if (mapJoinWork != null) {
for (BaseWork myWork : mapJoinWork) {
// link the work with the work associated with the reduce sink that triggered this rule
TezWork tezWork = context.currentTask.getWork();
LOG.debug("connecting " + parentWork.getName() + " with " + myWork.getName());
tezWork.connect(parentWork, myWork, edgeProp);
if (edgeType == EdgeType.CUSTOM_EDGE) {
tezWork.setVertexType(myWork, VertexType.INITIALIZED_EDGES);
}
ReduceSinkOperator r = null;
if (context.connectedReduceSinks.contains(parentRS)) {
LOG.debug("Cloning reduce sink " + parentRS + " for multi-child broadcast edge");
// we've already set this one up. Need to clone for the next work.
r = (ReduceSinkOperator) OperatorFactory.getAndMakeChild(parentRS.getCompilationOpContext(), (ReduceSinkDesc) parentRS.getConf().clone(), new RowSchema(parentRS.getSchema()), parentRS.getParentOperators());
context.clonedReduceSinks.add(r);
} else {
r = parentRS;
}
// remember the output name of the reduce sink
r.getConf().setOutputName(myWork.getName());
context.connectedReduceSinks.add(r);
}
}
// remember in case we need to connect additional work later
Map<BaseWork, TezEdgeProperty> linkWorkMap = null;
if (context.linkOpWithWorkMap.containsKey(mapJoinOp)) {
linkWorkMap = context.linkOpWithWorkMap.get(mapJoinOp);
} else {
linkWorkMap = new HashMap<BaseWork, TezEdgeProperty>();
}
linkWorkMap.put(parentWork, edgeProp);
context.linkOpWithWorkMap.put(mapJoinOp, linkWorkMap);
List<ReduceSinkOperator> reduceSinks = context.linkWorkWithReduceSinkMap.get(parentWork);
if (reduceSinks == null) {
reduceSinks = new ArrayList<ReduceSinkOperator>();
}
reduceSinks.add(parentRS);
context.linkWorkWithReduceSinkMap.put(parentWork, reduceSinks);
// create the dummy operators
List<Operator<?>> dummyOperators = new ArrayList<Operator<?>>();
// create an new operator: HashTableDummyOperator, which share the table desc
HashTableDummyDesc desc = new HashTableDummyDesc();
@SuppressWarnings("unchecked") HashTableDummyOperator dummyOp = (HashTableDummyOperator) OperatorFactory.get(parentRS.getCompilationOpContext(), desc);
TableDesc tbl;
// need to create the correct table descriptor for key/value
RowSchema rowSchema = parentRS.getParentOperators().get(0).getSchema();
tbl = PlanUtils.getReduceValueTableDesc(PlanUtils.getFieldSchemasFromRowSchema(rowSchema, ""));
dummyOp.getConf().setTbl(tbl);
Map<Byte, List<ExprNodeDesc>> keyExprMap = mapJoinOp.getConf().getKeys();
List<ExprNodeDesc> keyCols = keyExprMap.get(Byte.valueOf((byte) 0));
StringBuilder keyOrder = new StringBuilder();
StringBuilder keyNullOrder = new StringBuilder();
for (ExprNodeDesc k : keyCols) {
keyOrder.append("+");
keyNullOrder.append("a");
}
TableDesc keyTableDesc = PlanUtils.getReduceKeyTableDesc(PlanUtils.getFieldSchemasFromColumnList(keyCols, "mapjoinkey"), keyOrder.toString(), keyNullOrder.toString());
mapJoinOp.getConf().setKeyTableDesc(keyTableDesc);
// let the dummy op be the parent of mapjoin op
mapJoinOp.replaceParent(parentRS, dummyOp);
List<Operator<? extends OperatorDesc>> dummyChildren = new ArrayList<Operator<? extends OperatorDesc>>();
dummyChildren.add(mapJoinOp);
dummyOp.setChildOperators(dummyChildren);
dummyOperators.add(dummyOp);
// cut the operator tree so as to not retain connections from the parent RS downstream
List<Operator<? extends OperatorDesc>> childOperators = parentRS.getChildOperators();
int childIndex = childOperators.indexOf(mapJoinOp);
childOperators.remove(childIndex);
// at task startup
if (mapJoinWork != null) {
for (BaseWork myWork : mapJoinWork) {
LOG.debug("adding dummy op to work " + myWork.getName() + " from MJ work: " + dummyOp);
myWork.addDummyOp(dummyOp);
}
}
if (context.linkChildOpWithDummyOp.containsKey(mapJoinOp)) {
for (Operator<?> op : context.linkChildOpWithDummyOp.get(mapJoinOp)) {
dummyOperators.add(op);
}
}
context.linkChildOpWithDummyOp.put(mapJoinOp, dummyOperators);
return true;
}
use of org.apache.hadoop.hive.ql.plan.TableDesc in project hive by apache.
the class TestHCatMultiOutputFormat method getTableData.
/**
* Method to fetch table data
*
* @param table table name
* @param database database
* @return list of columns in comma seperated way
* @throws Exception if any error occurs
*/
private List<String> getTableData(String table, String database) throws Exception {
QueryState queryState = new QueryState.Builder().build();
HiveConf conf = queryState.getConf();
conf.addResource("hive-site.xml");
ArrayList<String> results = new ArrayList<String>();
ArrayList<String> temp = new ArrayList<String>();
Hive hive = Hive.get(conf);
org.apache.hadoop.hive.ql.metadata.Table tbl = hive.getTable(database, table);
FetchWork work;
if (!tbl.getPartCols().isEmpty()) {
List<Partition> partitions = hive.getPartitions(tbl);
List<PartitionDesc> partDesc = new ArrayList<PartitionDesc>();
List<Path> partLocs = new ArrayList<Path>();
TableDesc tableDesc = Utilities.getTableDesc(tbl);
for (Partition part : partitions) {
partLocs.add(part.getDataLocation());
partDesc.add(Utilities.getPartitionDescFromTableDesc(tableDesc, part, true));
}
work = new FetchWork(partLocs, partDesc, tableDesc);
work.setLimit(100);
} else {
work = new FetchWork(tbl.getDataLocation(), Utilities.getTableDesc(tbl));
}
FetchTask task = new FetchTask();
task.setWork(work);
task.initialize(queryState, null, null, new CompilationOpContext());
task.fetch(temp);
for (String str : temp) {
results.add(str.replace("\t", ","));
}
return results;
}
use of org.apache.hadoop.hive.ql.plan.TableDesc in project hive by apache.
the class TestAccumuloStorageHandler method testTablePropertiesPassedToOutputJobProperties.
@Test
public void testTablePropertiesPassedToOutputJobProperties() {
TableDesc tableDesc = Mockito.mock(TableDesc.class);
Properties props = new Properties();
Map<String, String> jobProperties = new HashMap<String, String>();
props.setProperty(AccumuloSerDeParameters.COLUMN_MAPPINGS, "cf:cq1,cf:cq2,cf:cq3");
props.setProperty(serdeConstants.LIST_COLUMN_TYPES, "string:int:string");
props.setProperty(serdeConstants.LIST_COLUMNS, "name,age,email");
props.setProperty(AccumuloSerDeParameters.TABLE_NAME, "table");
props.setProperty(AccumuloSerDeParameters.VISIBILITY_LABEL_KEY, "foo");
Mockito.when(tableDesc.getProperties()).thenReturn(props);
storageHandler.configureOutputJobProperties(tableDesc, jobProperties);
Assert.assertEquals(3, jobProperties.size());
Assert.assertTrue("Job properties did not contain column mappings", jobProperties.containsKey(AccumuloSerDeParameters.COLUMN_MAPPINGS));
Assert.assertEquals(props.getProperty(AccumuloSerDeParameters.COLUMN_MAPPINGS), jobProperties.get(AccumuloSerDeParameters.COLUMN_MAPPINGS));
Assert.assertTrue("Job properties did not contain accumulo table name", jobProperties.containsKey(AccumuloSerDeParameters.TABLE_NAME));
Assert.assertEquals(props.getProperty(AccumuloSerDeParameters.TABLE_NAME), jobProperties.get(AccumuloSerDeParameters.TABLE_NAME));
Assert.assertTrue("Job properties did not contain visibility label", jobProperties.containsKey(AccumuloSerDeParameters.VISIBILITY_LABEL_KEY));
Assert.assertEquals(props.getProperty(AccumuloSerDeParameters.VISIBILITY_LABEL_KEY), jobProperties.get(AccumuloSerDeParameters.VISIBILITY_LABEL_KEY));
}
use of org.apache.hadoop.hive.ql.plan.TableDesc in project hive by apache.
the class ReduceRecordProcessor method initializeSourceForTag.
private void initializeSourceForTag(ReduceWork redWork, int tag, ObjectInspector[] ois, ReduceRecordSource[] sources, TableDesc valueTableDesc, String inputName) throws Exception {
reducer = redWork.getReducer();
reducer.getParentOperators().clear();
// clear out any parents as reducer is the root
reducer.setParentOperators(null);
TableDesc keyTableDesc = redWork.getKeyDesc();
Reader reader = inputs.get(inputName).getReader();
sources[tag] = new ReduceRecordSource();
// Only the big table input source should be vectorized (if applicable)
// Note this behavior may have to change if we ever implement a vectorized merge join
boolean vectorizedRecordSource = (tag == bigTablePosition) && redWork.getVectorMode();
sources[tag].init(jconf, redWork.getReducer(), vectorizedRecordSource, keyTableDesc, valueTableDesc, reader, tag == bigTablePosition, (byte) tag, redWork.getVectorizedRowBatchCtx(), redWork.getVectorizedVertexNum(), redWork.getVectorizedTestingReducerBatchSize());
ois[tag] = sources[tag].getObjectInspector();
}
Aggregations