use of org.apache.hadoop.hive.ql.plan.BaseWork in project hive by apache.
the class DagUtils method createVertex.
/**
* Create a vertex from a given work object.
*
* @param conf JobConf to be used to this execution unit
* @param work The instance of BaseWork representing the actual work to be performed
* by this vertex.
* @param scratchDir HDFS scratch dir for this execution unit.
* @param fileSystem FS corresponding to scratchDir and LocalResources
* @param ctx This query's context
* @return Vertex
*/
@SuppressWarnings("deprecation")
public Vertex createVertex(JobConf conf, BaseWork work, Path scratchDir, FileSystem fileSystem, Context ctx, boolean hasChildren, TezWork tezWork, VertexType vertexType, Map<String, LocalResource> localResources) throws Exception {
Vertex v = null;
// BaseWork.
if (work instanceof MapWork) {
v = createVertex(conf, (MapWork) work, fileSystem, scratchDir, ctx, vertexType, localResources);
} else if (work instanceof ReduceWork) {
v = createVertex(conf, (ReduceWork) work, fileSystem, scratchDir, ctx, localResources);
} else if (work instanceof MergeJoinWork) {
v = createVertex(conf, (MergeJoinWork) work, fileSystem, scratchDir, ctx, vertexType, localResources);
// set VertexManagerPlugin if whether it's a cross product destination vertex
List<String> crossProductSources = new ArrayList<>();
for (BaseWork parentWork : tezWork.getParents(work)) {
if (tezWork.getEdgeType(parentWork, work) == EdgeType.XPROD_EDGE) {
crossProductSources.add(parentWork.getName());
}
}
if (!crossProductSources.isEmpty()) {
CartesianProductConfig cpConfig = new CartesianProductConfig(crossProductSources);
v.setVertexManagerPlugin(VertexManagerPluginDescriptor.create(CartesianProductVertexManager.class.getName()).setUserPayload(cpConfig.toUserPayload(new TezConfiguration(conf))));
// parallelism shouldn't be set for cartesian product vertex
}
} else {
// something is seriously wrong if this is happening
throw new HiveException(ErrorMsg.GENERIC_ERROR.getErrorCodedMsg());
}
// initialize stats publisher if necessary
if (work.isGatheringStats()) {
StatsPublisher statsPublisher;
StatsFactory factory = StatsFactory.newFactory(conf);
if (factory != null) {
StatsCollectionContext sCntxt = new StatsCollectionContext(conf);
sCntxt.setStatsTmpDirs(Utilities.getStatsTmpDirs(work, conf));
statsPublisher = factory.getStatsPublisher();
if (!statsPublisher.init(sCntxt)) {
// creating stats table if not exists
if (HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_STATS_RELIABLE)) {
throw new HiveException(ErrorMsg.STATSPUBLISHER_INITIALIZATION_ERROR.getErrorCodedMsg());
}
}
}
}
// final vertices need to have at least one output
if (!hasChildren) {
v.addDataSink("out_" + work.getName(), new DataSinkDescriptor(OutputDescriptor.create(MROutput.class.getName()).setUserPayload(TezUtils.createUserPayloadFromConf(conf)), null, null));
}
return v;
}
use of org.apache.hadoop.hive.ql.plan.BaseWork in project hive by apache.
the class SerializationUtilities method cloneBaseWork.
/**
* Clones using the powers of XML. Do not use unless necessary.
* @param plan The plan.
* @return The clone.
*/
public static BaseWork cloneBaseWork(BaseWork plan) {
PerfLogger perfLogger = SessionState.getPerfLogger();
perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.CLONE_PLAN);
Operator<?> op = plan.getAnyRootOperator();
CompilationOpContext ctx = (op == null) ? null : op.getCompilationOpContext();
ByteArrayOutputStream baos = new ByteArrayOutputStream(4096);
serializePlan(plan, baos, true);
BaseWork newPlan = deserializePlan(new ByteArrayInputStream(baos.toByteArray()), plan.getClass(), true);
// Restore the context.
for (Operator<?> newOp : newPlan.getAllOperators()) {
newOp.setCompilationOpContext(ctx);
}
perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.CLONE_PLAN);
return newPlan;
}
use of org.apache.hadoop.hive.ql.plan.BaseWork in project hive by apache.
the class Utilities method getBaseWork.
/**
* Returns the Map or Reduce plan
* Side effect: the BaseWork returned is also placed in the gWorkMap
* @param conf
* @param name
* @return BaseWork based on the name supplied will return null if name is null
* @throws RuntimeException if the configuration files are not proper or if plan can not be loaded
*/
private static BaseWork getBaseWork(Configuration conf, String name) {
Path path = null;
InputStream in = null;
Kryo kryo = SerializationUtilities.borrowKryo();
try {
String engine = HiveConf.getVar(conf, ConfVars.HIVE_EXECUTION_ENGINE);
if (engine.equals("spark")) {
// TODO Add jar into current thread context classloader as it may be invoked by Spark driver inside
// threads, should be unnecessary while SPARK-5377 is resolved.
String addedJars = conf.get(HIVE_ADDED_JARS);
if (StringUtils.isNotEmpty(addedJars)) {
ClassLoader loader = Thread.currentThread().getContextClassLoader();
ClassLoader newLoader = addToClassPath(loader, addedJars.split(";"));
Thread.currentThread().setContextClassLoader(newLoader);
kryo.setClassLoader(newLoader);
}
}
path = getPlanPath(conf, name);
LOG.info("PLAN PATH = {}", path);
if (path == null) {
// Map/reduce plan may not be generated
return null;
}
BaseWork gWork = gWorkMap.get(conf).get(path);
if (gWork == null) {
Path localPath = path;
LOG.debug("local path = {}", localPath);
final long serializedSize;
final String planMode;
if (HiveConf.getBoolVar(conf, ConfVars.HIVE_RPC_QUERY_PLAN)) {
String planStringPath = path.toUri().getPath();
LOG.debug("Loading plan from string: {}", planStringPath);
String planString = conf.getRaw(planStringPath);
if (planString == null) {
LOG.info("Could not find plan string in conf");
return null;
}
serializedSize = planString.length();
planMode = "RPC";
byte[] planBytes = Base64.decodeBase64(planString);
in = new ByteArrayInputStream(planBytes);
in = new InflaterInputStream(in);
} else {
LOG.debug("Open file to read in plan: {}", localPath);
FileSystem fs = localPath.getFileSystem(conf);
in = fs.open(localPath);
serializedSize = fs.getFileStatus(localPath).getLen();
planMode = "FILE";
}
if (MAP_PLAN_NAME.equals(name)) {
if (ExecMapper.class.getName().equals(conf.get(MAPRED_MAPPER_CLASS))) {
gWork = SerializationUtilities.deserializePlan(kryo, in, MapWork.class);
} else if (MergeFileMapper.class.getName().equals(conf.get(MAPRED_MAPPER_CLASS))) {
gWork = SerializationUtilities.deserializePlan(kryo, in, MergeFileWork.class);
} else if (ColumnTruncateMapper.class.getName().equals(conf.get(MAPRED_MAPPER_CLASS))) {
gWork = SerializationUtilities.deserializePlan(kryo, in, ColumnTruncateWork.class);
} else {
throw new RuntimeException("unable to determine work from configuration ." + MAPRED_MAPPER_CLASS + " was " + conf.get(MAPRED_MAPPER_CLASS));
}
} else if (REDUCE_PLAN_NAME.equals(name)) {
if (ExecReducer.class.getName().equals(conf.get(MAPRED_REDUCER_CLASS))) {
gWork = SerializationUtilities.deserializePlan(kryo, in, ReduceWork.class);
} else {
throw new RuntimeException("unable to determine work from configuration ." + MAPRED_REDUCER_CLASS + " was " + conf.get(MAPRED_REDUCER_CLASS));
}
} else if (name.contains(MERGE_PLAN_NAME)) {
if (name.startsWith(MAPNAME)) {
gWork = SerializationUtilities.deserializePlan(kryo, in, MapWork.class);
} else if (name.startsWith(REDUCENAME)) {
gWork = SerializationUtilities.deserializePlan(kryo, in, ReduceWork.class);
} else {
throw new RuntimeException("Unknown work type: " + name);
}
}
LOG.info("Deserialized plan (via {}) - name: {} size: {}", planMode, gWork.getName(), humanReadableByteCount(serializedSize));
gWorkMap.get(conf).put(path, gWork);
} else {
LOG.debug("Found plan in cache for name: {}", name);
}
return gWork;
} catch (FileNotFoundException fnf) {
// happens. e.g.: no reduce work.
LOG.debug("No plan file found: {}", path, fnf);
return null;
} catch (Exception e) {
String msg = "Failed to load plan: " + path;
LOG.error(msg, e);
throw new RuntimeException(msg, e);
} finally {
SerializationUtilities.releaseKryo(kryo);
IOUtils.closeStream(in);
}
}
use of org.apache.hadoop.hive.ql.plan.BaseWork in project hive by apache.
the class MapRecordProcessor method init.
@Override
void init(MRTaskReporter mrReporter, Map<String, LogicalInput> inputs, Map<String, LogicalOutput> outputs) throws Exception {
perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.TEZ_INIT_OPERATORS);
super.init(mrReporter, inputs, outputs);
checkAbortCondition();
String key = processorContext.getTaskVertexName() + MAP_PLAN_KEY;
cacheKeys.add(key);
// create map and fetch operators
mapWork = (MapWork) cache.retrieve(key, new Callable<Object>() {
@Override
public Object call() {
return Utilities.getMapWork(jconf);
}
});
// TODO HIVE-14042. Cleanup may be required if exiting early.
Utilities.setMapWork(jconf, mapWork);
String prefixes = jconf.get(DagUtils.TEZ_MERGE_WORK_FILE_PREFIXES);
if (prefixes != null) {
mergeWorkList = new ArrayList<MapWork>();
for (final String prefix : prefixes.split(",")) {
if (prefix == null || prefix.isEmpty()) {
continue;
}
key = processorContext.getTaskVertexName() + prefix;
cacheKeys.add(key);
checkAbortCondition();
mergeWorkList.add((MapWork) cache.retrieve(key, new Callable<Object>() {
@Override
public Object call() {
return Utilities.getMergeWork(jconf, prefix);
}
}));
}
}
MapredContext.init(true, new JobConf(jconf));
((TezContext) MapredContext.get()).setInputs(inputs);
((TezContext) MapredContext.get()).setTezProcessorContext(processorContext);
// Update JobConf using MRInput, info like filename comes via this
checkAbortCondition();
legacyMRInput = getMRInput(inputs);
if (legacyMRInput != null) {
Configuration updatedConf = legacyMRInput.getConfigUpdates();
if (updatedConf != null) {
for (Entry<String, String> entry : updatedConf) {
jconf.set(entry.getKey(), entry.getValue());
}
}
}
checkAbortCondition();
createOutputMap();
// Start all the Outputs.
for (Entry<String, LogicalOutput> outputEntry : outputs.entrySet()) {
l4j.debug("Starting Output: " + outputEntry.getKey());
outputEntry.getValue().start();
((TezKVOutputCollector) outMap.get(outputEntry.getKey())).initialize();
}
checkAbortCondition();
try {
CompilationOpContext runtimeCtx = new CompilationOpContext();
if (mapWork.getVectorMode()) {
mapOp = new VectorMapOperator(runtimeCtx);
} else {
mapOp = new MapOperator(runtimeCtx);
}
// Not synchronizing creation of mapOp with an invocation. Check immediately
// after creation in case abort has been set.
// Relying on the regular flow to clean up the actual operator. i.e. If an exception is
// thrown, an attempt will be made to cleanup the op.
// If we are here - exit out via an exception. If we're in the middle of the opeartor.initialize
// call further down, we rely upon op.abort().
checkAbortCondition();
mapOp.clearConnectedOperators();
mapOp.setExecContext(execContext);
boolean fromCache = false;
if (mergeWorkList != null) {
AbstractMapOperator mergeMapOp = null;
for (BaseWork mergeWork : mergeWorkList) {
// TODO HIVE-14042. What is mergeWork, and why is it not part of the regular operator chain.
// The mergeMapOp.initialize call further down can block, and will not receive information
// about an abort request.
MapWork mergeMapWork = (MapWork) mergeWork;
if (mergeMapWork.getVectorMode()) {
mergeMapOp = new VectorMapOperator(runtimeCtx);
} else {
mergeMapOp = new MapOperator(runtimeCtx);
}
mergeMapOpList.add(mergeMapOp);
// initialize the merge operators first.
if (mergeMapOp != null) {
mergeMapOp.setConf(mergeMapWork);
l4j.info("Input name is " + mergeMapWork.getName());
jconf.set(Utilities.INPUT_NAME, mergeMapWork.getName());
mergeMapOp.initialize(jconf, null);
// if there are no files/partitions to read, we need to skip trying to read
MultiMRInput multiMRInput = multiMRInputMap.get(mergeMapWork.getName());
boolean skipRead = false;
if (multiMRInput == null) {
l4j.info("Multi MR Input for work " + mergeMapWork.getName() + " is null. Skipping read.");
skipRead = true;
} else {
Collection<KeyValueReader> keyValueReaders = multiMRInput.getKeyValueReaders();
if ((keyValueReaders == null) || (keyValueReaders.isEmpty())) {
l4j.info("Key value readers are null or empty and hence skipping read. " + "KeyValueReaders = " + keyValueReaders);
skipRead = true;
}
}
if (skipRead) {
List<Operator<?>> children = new ArrayList<Operator<?>>();
children.addAll(mergeMapOp.getConf().getAliasToWork().values());
// do the same thing as setChildren when there is nothing to read.
// the setChildren method initializes the object inspector needed by the operators
// based on path and partition information which we don't have in this case.
mergeMapOp.initEmptyInputChildren(children, jconf);
} else {
// the setChildren method initializes the object inspector needed by the operators
// based on path and partition information.
mergeMapOp.setChildren(jconf);
}
Operator<? extends OperatorDesc> finalOp = getFinalOp(mergeMapOp);
if (finalOp instanceof TezDummyStoreOperator) {
// we ensure that we don't try to read any data in case of skip read.
((TezDummyStoreOperator) finalOp).setFetchDone(skipRead);
mapOp.setConnectedOperators(mergeMapWork.getTag(), (DummyStoreOperator) finalOp);
} else {
// found the plan is already connected which means this is derived from the cache.
fromCache = true;
}
mergeMapOp.passExecContext(new ExecMapperContext(jconf));
mergeMapOp.initializeLocalWork(jconf);
}
}
}
if (!fromCache) {
// if not from cache, we still need to hook up the plans.
((TezContext) (MapredContext.get())).setDummyOpsMap(mapOp.getConnectedOperators());
}
// initialize map operator
mapOp.setConf(mapWork);
l4j.info("Main input name is " + mapWork.getName());
jconf.set(Utilities.INPUT_NAME, mapWork.getName());
mapOp.initialize(jconf, null);
checkAbortCondition();
mapOp.setChildren(jconf);
mapOp.passExecContext(execContext);
l4j.info(mapOp.dump(0));
// set memory available for operators
long memoryAvailableToTask = processorContext.getTotalMemoryAvailableToTask();
if (mapOp.getConf() != null) {
mapOp.getConf().setMaxMemoryAvailable(memoryAvailableToTask);
l4j.info("Memory available for operators set to {}", LlapUtil.humanReadableByteCount(memoryAvailableToTask));
}
OperatorUtils.setMemoryAvailable(mapOp.getChildOperators(), memoryAvailableToTask);
mapOp.initializeLocalWork(jconf);
// Setup values registry
checkAbortCondition();
String valueRegistryKey = DynamicValue.DYNAMIC_VALUE_REGISTRY_CACHE_KEY;
// On LLAP dynamic value registry might already be cached.
final DynamicValueRegistryTez registryTez = dynamicValueCache.retrieve(valueRegistryKey, new Callable<DynamicValueRegistryTez>() {
@Override
public DynamicValueRegistryTez call() {
return new DynamicValueRegistryTez();
}
});
dynamicValueCacheKeys.add(valueRegistryKey);
RegistryConfTez registryConf = new RegistryConfTez(jconf, mapWork, processorContext, inputs);
registryTez.init(registryConf);
checkAbortCondition();
initializeMapRecordSources();
mapOp.initializeMapOperator(jconf);
if ((mergeMapOpList != null) && mergeMapOpList.isEmpty() == false) {
for (AbstractMapOperator mergeMapOp : mergeMapOpList) {
jconf.set(Utilities.INPUT_NAME, mergeMapOp.getConf().getName());
// TODO HIVE-14042. abort handling: Handling of mergeMapOp
mergeMapOp.initializeMapOperator(jconf);
}
}
// Initialization isn't finished until all parents of all operators
// are initialized. For broadcast joins that means initializing the
// dummy parent operators as well.
List<HashTableDummyOperator> dummyOps = mapWork.getDummyOps();
jconf.set(Utilities.INPUT_NAME, mapWork.getName());
if (dummyOps != null) {
for (Operator<? extends OperatorDesc> dummyOp : dummyOps) {
dummyOp.setExecContext(execContext);
// TODO HIVE-14042. Handling of dummyOps, and propagating abort information to them
dummyOp.initialize(jconf, null);
}
}
OperatorUtils.setChildrenCollector(mapOp.getChildOperators(), outMap);
mapOp.setReporter(reporter);
MapredContext.get().setReporter(reporter);
} catch (Throwable e) {
setAborted(true);
if (e instanceof OutOfMemoryError) {
// Don't create a new object if we are already out of memory
throw (OutOfMemoryError) e;
} else if (e instanceof InterruptedException) {
l4j.info("Hit an interrupt while initializing MapRecordProcessor. Message={}", e.getMessage());
throw (InterruptedException) e;
} else {
throw new RuntimeException("Map operator initialization failed", e);
}
}
perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.TEZ_INIT_OPERATORS);
}
use of org.apache.hadoop.hive.ql.plan.BaseWork in project hive by apache.
the class SparkPlanGenerator method isCachingWork.
private boolean isCachingWork(BaseWork work, SparkWork sparkWork) {
boolean caching = true;
List<BaseWork> children = sparkWork.getChildren(work);
if (children.size() < 2) {
caching = false;
} else {
// do not cache this if its child RDD is intend to be cached.
for (BaseWork child : children) {
if (cloneToWork.containsKey(child)) {
caching = false;
}
}
}
return caching;
}
Aggregations