use of org.apache.tez.client.CallerContext in project tez by apache.
the class TezExampleBase method runDag.
/**
* @param dag the dag to execute
* @param printCounters whether to print counters or not
* @param logger the logger to use while printing diagnostics
* @return Zero indicates success, non-zero indicates failure
* @throws TezException
* @throws InterruptedException
* @throws IOException
*/
public int runDag(DAG dag, boolean printCounters, Logger logger) throws TezException, InterruptedException, IOException {
tezClientInternal.waitTillReady();
CallerContext callerContext = CallerContext.create("TezExamples", "Tez Example DAG: " + dag.getName());
ApplicationId appId = tezClientInternal.getAppMasterApplicationId();
if (hadoopShim == null) {
Configuration conf = (getConf() == null ? new Configuration(false) : getConf());
hadoopShim = new HadoopShimsLoader(conf).getHadoopShim();
}
if (appId != null) {
TezUtilsInternal.setHadoopCallerContext(hadoopShim, appId);
callerContext.setCallerIdAndType(appId.toString(), "TezExampleApplication");
}
dag.setCallerContext(callerContext);
DAGClient dagClient = tezClientInternal.submitDAG(dag);
Set<StatusGetOpts> getOpts = Sets.newHashSet();
if (printCounters) {
getOpts.add(StatusGetOpts.GET_COUNTERS);
}
DAGStatus dagStatus;
dagStatus = dagClient.waitForCompletionWithStatusUpdates(getOpts);
if (dagStatus.getState() != DAGStatus.State.SUCCEEDED) {
logger.info("DAG diagnostics: " + dagStatus.getDiagnostics());
return -1;
}
return 0;
}
use of org.apache.tez.client.CallerContext in project hive by apache.
the class TezTask method execute.
@Override
public int execute(DriverContext driverContext) {
int rc = 1;
boolean cleanContext = false;
Context ctx = null;
Ref<TezSessionState> sessionRef = Ref.from(null);
try {
// Get or create Context object. If we create it we have to clean it later as well.
ctx = driverContext.getCtx();
if (ctx == null) {
ctx = new Context(conf);
cleanContext = true;
// some DDL task that directly executes a TezTask does not setup Context and hence TriggerContext.
// Setting queryId is messed up. Some DDL tasks have executionId instead of proper queryId.
String queryId = HiveConf.getVar(conf, HiveConf.ConfVars.HIVEQUERYID);
WmContext wmContext = new WmContext(System.currentTimeMillis(), queryId);
ctx.setWmContext(wmContext);
}
// Need to remove this static hack. But this is the way currently to get a session.
SessionState ss = SessionState.get();
// Note: given that we return pool sessions to the pool in the finally block below, and that
// we need to set the global to null to do that, this "reuse" may be pointless.
TezSessionState session = sessionRef.value = ss.getTezSession();
if (session != null && !session.isOpen()) {
LOG.warn("The session: " + session + " has not been opened");
}
// We only need a username for UGI to use for groups; getGroups will fetch the groups
// based on Hadoop configuration, as documented at
// https://hadoop.apache.org/docs/r2.8.0/hadoop-project-dist/hadoop-common/GroupsMapping.html
String userName = ss.getUserName();
List<String> groups = null;
if (userName == null) {
userName = "anonymous";
} else {
groups = UserGroupInformation.createRemoteUser(ss.getUserName()).getGroups();
}
MappingInput mi = new MappingInput(userName, groups, ss.getHiveVariables().get("wmpool"), ss.getHiveVariables().get("wmapp"));
WmContext wmContext = ctx.getWmContext();
// jobConf will hold all the configuration for hadoop, tez, and hive
JobConf jobConf = utils.createConfiguration(conf);
// Get all user jars from work (e.g. input format stuff).
String[] allNonConfFiles = work.configureJobConfAndExtractJars(jobConf);
// DAG scratch dir. We get a session from the pool so it may be different from Tez one.
// TODO: we could perhaps reuse the same directory for HiveResources?
Path scratchDir = utils.createTezDir(ctx.getMRScratchDir(), conf);
CallerContext callerContext = CallerContext.create("HIVE", queryPlan.getQueryId(), "HIVE_QUERY_ID", queryPlan.getQueryStr());
perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.TEZ_GET_SESSION);
session = sessionRef.value = WorkloadManagerFederation.getSession(sessionRef.value, conf, mi, getWork().getLlapMode(), wmContext);
perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.TEZ_GET_SESSION);
try {
ss.setTezSession(session);
LOG.info("Subscribed to counters: {} for queryId: {}", wmContext.getSubscribedCounters(), wmContext.getQueryId());
// Ensure the session is open and has the necessary local resources.
// This would refresh any conf resources and also local resources.
ensureSessionHasResources(session, allNonConfFiles);
// This is a combination of the jar stuff from conf, and not from conf.
List<LocalResource> allNonAppResources = session.getLocalizedResources();
logResources(allNonAppResources);
Map<String, LocalResource> allResources = DagUtils.createTezLrMap(session.getAppJarLr(), allNonAppResources);
// next we translate the TezWork to a Tez DAG
DAG dag = build(jobConf, work, scratchDir, ctx, allResources);
dag.setCallerContext(callerContext);
// Check isShutdown opportunistically; it's never unset.
if (this.isShutdown) {
throw new HiveException("Operation cancelled");
}
DAGClient dagClient = submit(jobConf, dag, sessionRef);
session = sessionRef.value;
boolean wasShutdown = false;
synchronized (dagClientLock) {
assert this.dagClient == null;
wasShutdown = this.isShutdown;
if (!wasShutdown) {
this.dagClient = dagClient;
}
}
if (wasShutdown) {
closeDagClientOnCancellation(dagClient);
throw new HiveException("Operation cancelled");
}
// finally monitor will print progress until the job is done
TezJobMonitor monitor = new TezJobMonitor(work.getAllWork(), dagClient, conf, dag, ctx);
rc = monitor.monitorExecution();
if (rc != 0) {
this.setException(new HiveException(monitor.getDiagnostics()));
}
// fetch the counters
try {
Set<StatusGetOpts> statusGetOpts = EnumSet.of(StatusGetOpts.GET_COUNTERS);
counters = dagClient.getDAGStatus(statusGetOpts).getDAGCounters();
} catch (Exception err) {
// Don't fail execution due to counters - just don't print summary info
LOG.warn("Failed to get counters. Ignoring, summary info will be incomplete. " + err, err);
counters = null;
}
} finally {
// Note: due to TEZ-3846, the session may actually be invalid in case of some errors.
// Currently, reopen on an attempted reuse will take care of that; we cannot tell
// if the session is usable until we try.
// We return this to the pool even if it's unusable; reopen is supposed to handle this.
wmContext = ctx.getWmContext();
try {
if (sessionRef.value != null) {
sessionRef.value.returnToSessionManager();
}
} catch (Exception e) {
LOG.error("Failed to return session: {} to pool", session, e);
throw e;
}
if (!conf.getVar(HiveConf.ConfVars.TEZ_SESSION_EVENTS_SUMMARY).equalsIgnoreCase("none") && wmContext != null) {
if (conf.getVar(HiveConf.ConfVars.TEZ_SESSION_EVENTS_SUMMARY).equalsIgnoreCase("json")) {
wmContext.printJson(console);
} else if (conf.getVar(HiveConf.ConfVars.TEZ_SESSION_EVENTS_SUMMARY).equalsIgnoreCase("text")) {
wmContext.print(console);
}
}
}
if (LOG.isInfoEnabled() && counters != null && (HiveConf.getBoolVar(conf, HiveConf.ConfVars.TEZ_EXEC_SUMMARY) || Utilities.isPerfOrAboveLogging(conf))) {
for (CounterGroup group : counters) {
LOG.info(group.getDisplayName() + ":");
for (TezCounter counter : group) {
LOG.info(" " + counter.getDisplayName() + ": " + counter.getValue());
}
}
}
} catch (Exception e) {
LOG.error("Failed to execute tez graph.", e);
// rc will be 1 at this point indicating failure.
} finally {
Utilities.clearWork(conf);
// Clear gWorkMap
for (BaseWork w : work.getAllWork()) {
JobConf workCfg = workToConf.get(w);
if (workCfg != null) {
Utilities.clearWorkMapForConf(workCfg);
}
}
if (cleanContext) {
try {
ctx.clear();
} catch (Exception e) {
/*best effort*/
LOG.warn("Failed to clean up after tez job", e);
}
}
// need to either move tmp files or remove them
DAGClient dagClient = null;
synchronized (dagClientLock) {
dagClient = this.dagClient;
this.dagClient = null;
}
// DagClient as such should have no bearing on jobClose.
if (dagClient != null) {
// rc will only be overwritten if close errors out
rc = close(work, rc, dagClient);
}
}
return rc;
}
use of org.apache.tez.client.CallerContext in project tez by apache.
the class DAGAppMaster method startDAG.
private void startDAG(DAGPlan dagPlan, Map<String, LocalResource> additionalAMResources) throws TezException {
long submitTime = this.clock.getTime();
this.appName = dagPlan.getName();
// /////////////////// Create the job itself.
final DAG newDAG = createDAG(dagPlan);
_updateLoggers(newDAG, "");
if (LOG.isDebugEnabled()) {
LOG.debug("Running a DAG with " + dagPlan.getVertexCount() + " vertices ");
for (VertexPlan v : dagPlan.getVertexList()) {
LOG.debug("DAG has vertex " + v.getName());
}
}
Map<String, LocalResource> lrDiff = getAdditionalLocalResourceDiff(newDAG, additionalAMResources);
if (lrDiff != null) {
amResources.putAll(lrDiff);
cumulativeAdditionalResources.putAll(lrDiff);
}
String callerContextStr = "";
if (dagPlan.hasCallerContext()) {
CallerContext callerContext = DagTypeConverters.convertCallerContextFromProto(dagPlan.getCallerContext());
callerContextStr = ", callerContext=" + callerContext.contextAsSimpleString();
}
LOG.info("Running DAG: " + dagPlan.getName() + callerContextStr);
String timeStamp = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss").format(Calendar.getInstance().getTime());
System.err.println(timeStamp + " Running Dag: " + newDAG.getID());
System.out.println(timeStamp + " Running Dag: " + newDAG.getID());
// Job name is the same as the app name until we support multiple dags
// for an app later
final DAGSubmittedEvent submittedEvent = new DAGSubmittedEvent(newDAG.getID(), submitTime, dagPlan, this.appAttemptID, cumulativeAdditionalResources, newDAG.getUserName(), newDAG.getConf(), containerLogs, getContext().getQueueName());
boolean dagLoggingEnabled = newDAG.getConf().getBoolean(TezConfiguration.TEZ_DAG_HISTORY_LOGGING_ENABLED, TezConfiguration.TEZ_DAG_HISTORY_LOGGING_ENABLED_DEFAULT);
submittedEvent.setHistoryLoggingEnabled(dagLoggingEnabled);
try {
appMasterUgi.doAs(new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
historyEventHandler.handleCriticalEvent(new DAGHistoryEvent(newDAG.getID(), submittedEvent));
return null;
}
});
} catch (IOException e) {
throw new TezUncheckedException(e);
} catch (InterruptedException e) {
throw new TezUncheckedException(e);
}
startDAGExecution(newDAG, lrDiff);
// set state after curDag is set
this.state = DAGAppMasterState.RUNNING;
}
use of org.apache.tez.client.CallerContext in project tez by apache.
the class TestDAG method testCallerContext.
@Test
public void testCallerContext() {
try {
CallerContext.create("ctxt", "", "", "desc");
Assert.fail("Expected failure for invalid args");
} catch (Exception e) {
// Expected
}
try {
CallerContext.create("", "desc");
Assert.fail("Expected failure for invalid args");
} catch (Exception e) {
// Expected
}
CallerContext.create("ctxt", "a", "a", "desc");
CallerContext.create("ctxt", null);
CallerContext callerContext = CallerContext.create("ctxt", "desc");
Assert.assertTrue(callerContext.toString().contains("desc"));
Assert.assertFalse(callerContext.contextAsSimpleString().contains("desc"));
}
use of org.apache.tez.client.CallerContext in project tez by apache.
the class TestHistoryParser method runWordCount.
private String runWordCount(String tokenizerProcessor, String summationProcessor, String dagName, boolean withTimeline) throws Exception {
// HDFS path
Path outputLoc = new Path("/tmp/outPath_" + System.currentTimeMillis());
DataSourceDescriptor dataSource = MRInput.createConfigBuilder(conf, TextInputFormat.class, inputLoc.toString()).build();
DataSinkDescriptor dataSink = MROutput.createConfigBuilder(conf, TextOutputFormat.class, outputLoc.toString()).build();
Vertex tokenizerVertex = Vertex.create(TOKENIZER, ProcessorDescriptor.create(tokenizerProcessor)).addDataSource(INPUT, dataSource);
OrderedPartitionedKVEdgeConfig edgeConf = OrderedPartitionedKVEdgeConfig.newBuilder(Text.class.getName(), IntWritable.class.getName(), HashPartitioner.class.getName()).build();
Vertex summationVertex = Vertex.create(SUMMATION, ProcessorDescriptor.create(summationProcessor), 1).addDataSink(OUTPUT, dataSink);
// Create DAG and add the vertices. Connect the producer and consumer vertices via the edge
DAG dag = DAG.create(dagName);
dag.addVertex(tokenizerVertex).addVertex(summationVertex).addEdge(Edge.create(tokenizerVertex, summationVertex, edgeConf.createDefaultEdgeProperty()));
TezClient tezClient = getTezClient(withTimeline);
// Update Caller Context
CallerContext callerContext = CallerContext.create("TezExamples", "Tez WordCount Example Job");
ApplicationId appId = tezClient.getAppMasterApplicationId();
if (appId == null) {
appId = ApplicationId.newInstance(1001l, 1);
}
callerContext.setCallerIdAndType(appId.toString(), "TezApplication");
dag.setCallerContext(callerContext);
DAGClient client = tezClient.submitDAG(dag);
client.waitForCompletionWithStatusUpdates(Sets.newHashSet(StatusGetOpts.GET_COUNTERS));
TezDAGID tezDAGID = TezDAGID.getInstance(tezClient.getAppMasterApplicationId(), 1);
if (tezClient != null) {
tezClient.stop();
}
return tezDAGID.toString();
}
Aggregations