use of org.apache.tez.serviceplugins.api.TaskSchedulerDescriptor in project tez by apache.
the class TestDagTypeConverters method testServiceDescriptorTranslation.
@Test(timeout = 5000)
public void testServiceDescriptorTranslation() {
TaskSchedulerDescriptor[] taskSchedulers;
ContainerLauncherDescriptor[] containerLaunchers;
TaskCommunicatorDescriptor[] taskComms;
ServicePluginsDescriptor servicePluginsDescriptor;
AMPluginDescriptorProto proto;
// Uber-execution
servicePluginsDescriptor = ServicePluginsDescriptor.create(true);
proto = DagTypeConverters.convertServicePluginDescriptorToProto(servicePluginsDescriptor);
assertTrue(proto.hasUberEnabled());
assertTrue(proto.hasContainersEnabled());
assertTrue(proto.getUberEnabled());
assertTrue(proto.getContainersEnabled());
assertEquals(0, proto.getTaskSchedulersCount());
assertEquals(0, proto.getContainerLaunchersCount());
assertEquals(0, proto.getTaskCommunicatorsCount());
// Single plugin set specified. One with a payload.
taskSchedulers = createTaskScheduelrs(1, false);
containerLaunchers = createContainerLaunchers(1, false);
taskComms = createTaskCommunicators(1, true);
servicePluginsDescriptor = ServicePluginsDescriptor.create(taskSchedulers, containerLaunchers, taskComms);
proto = DagTypeConverters.convertServicePluginDescriptorToProto(servicePluginsDescriptor);
assertTrue(proto.hasUberEnabled());
assertTrue(proto.hasContainersEnabled());
assertFalse(proto.getUberEnabled());
assertTrue(proto.getContainersEnabled());
verifyPlugins(proto.getTaskSchedulersList(), 1, testScheduler, false);
verifyPlugins(proto.getContainerLaunchersList(), 1, testLauncher, false);
verifyPlugins(proto.getTaskCommunicatorsList(), 1, testComm, true);
// Multiple plugin set specified. All with a payload
taskSchedulers = createTaskScheduelrs(3, true);
containerLaunchers = createContainerLaunchers(3, true);
taskComms = createTaskCommunicators(3, true);
servicePluginsDescriptor = ServicePluginsDescriptor.create(taskSchedulers, containerLaunchers, taskComms);
proto = DagTypeConverters.convertServicePluginDescriptorToProto(servicePluginsDescriptor);
assertTrue(proto.hasUberEnabled());
assertTrue(proto.hasContainersEnabled());
assertFalse(proto.getUberEnabled());
assertTrue(proto.getContainersEnabled());
verifyPlugins(proto.getTaskSchedulersList(), 3, testScheduler, true);
verifyPlugins(proto.getContainerLaunchersList(), 3, testLauncher, true);
verifyPlugins(proto.getTaskCommunicatorsList(), 3, testComm, true);
// Single plugin set specified. One with a payload. No container execution. Uber enabled.
taskSchedulers = createTaskScheduelrs(1, false);
containerLaunchers = createContainerLaunchers(1, false);
taskComms = createTaskCommunicators(1, true);
servicePluginsDescriptor = ServicePluginsDescriptor.create(false, true, taskSchedulers, containerLaunchers, taskComms);
proto = DagTypeConverters.convertServicePluginDescriptorToProto(servicePluginsDescriptor);
assertTrue(proto.hasUberEnabled());
assertTrue(proto.hasContainersEnabled());
assertTrue(proto.getUberEnabled());
assertFalse(proto.getContainersEnabled());
verifyPlugins(proto.getTaskSchedulersList(), 1, testScheduler, false);
verifyPlugins(proto.getContainerLaunchersList(), 1, testLauncher, false);
verifyPlugins(proto.getTaskCommunicatorsList(), 1, testComm, true);
}
use of org.apache.tez.serviceplugins.api.TaskSchedulerDescriptor in project hive by apache.
the class TezSessionState method openInternal.
protected void openInternal(String[] additionalFilesNotFromConf, boolean isAsync, LogHelper console, HiveResources resources) throws IOException, LoginException, URISyntaxException, TezException {
// TODO Why is the queue name set again. It has already been setup via setQueueName. Do only one of the two.
String confQueueName = conf.get(TezConfiguration.TEZ_QUEUE_NAME);
if (queueName != null && !queueName.equals(confQueueName)) {
LOG.warn("Resetting a queue name that was already set: was " + queueName + ", now " + confQueueName);
}
this.queueName = confQueueName;
this.doAsEnabled = conf.getBoolVar(HiveConf.ConfVars.HIVE_SERVER2_ENABLE_DOAS);
final boolean llapMode = "llap".equalsIgnoreCase(HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_EXECUTION_MODE));
// TODO This - at least for the session pool - will always be the hive user. How does doAs above this affect things ?
UserGroupInformation ugi = Utils.getUGI();
user = ugi.getShortUserName();
LOG.info("User of session id " + sessionId + " is " + user);
// Create the tez tmp dir and a directory for Hive resources.
tezScratchDir = createTezDir(sessionId, null);
if (resources != null) {
// If we are getting the resources externally, don't relocalize anything.
this.resources = resources;
LOG.info("Setting resources to " + resources);
} else {
this.resources = new HiveResources(createTezDir(sessionId, "resources"));
ensureLocalResources(conf, additionalFilesNotFromConf);
LOG.info("Created new resources: " + resources);
}
// unless already installed on all the cluster nodes, we'll have to
// localize hive-exec.jar as well.
appJarLr = createJarLocalResource(utils.getExecJarPathLocal(conf));
// configuration for the application master
final Map<String, LocalResource> commonLocalResources = new HashMap<String, LocalResource>();
commonLocalResources.put(DagUtils.getBaseName(appJarLr), appJarLr);
for (LocalResource lr : this.resources.localizedResources) {
commonLocalResources.put(DagUtils.getBaseName(lr), lr);
}
if (llapMode) {
// localize llap client jars
addJarLRByClass(LlapTaskSchedulerService.class, commonLocalResources);
addJarLRByClass(LlapProtocolClientImpl.class, commonLocalResources);
addJarLRByClass(LlapProtocolClientProxy.class, commonLocalResources);
addJarLRByClass(RegistryOperations.class, commonLocalResources);
}
// Create environment for AM.
Map<String, String> amEnv = new HashMap<String, String>();
MRHelpers.updateEnvBasedOnMRAMEnv(conf, amEnv);
// and finally we're ready to create and start the session
// generate basic tez config
final TezConfiguration tezConfig = new TezConfiguration(true);
tezConfig.addResource(conf);
setupTezParamsBasedOnMR(tezConfig);
// set up the staging directory to use
tezConfig.set(TezConfiguration.TEZ_AM_STAGING_DIR, tezScratchDir.toUri().toString());
conf.stripHiddenConfigurations(tezConfig);
ServicePluginsDescriptor servicePluginsDescriptor;
Credentials llapCredentials = null;
if (llapMode) {
if (isKerberosEnabled(tezConfig)) {
llapCredentials = new Credentials();
llapCredentials.addToken(LlapTokenIdentifier.KIND_NAME, getLlapToken(user, tezConfig));
}
// TODO Change this to not serialize the entire Configuration - minor.
UserPayload servicePluginPayload = TezUtils.createUserPayloadFromConf(tezConfig);
// we need plugins to handle llap and uber mode
servicePluginsDescriptor = ServicePluginsDescriptor.create(true, new TaskSchedulerDescriptor[] { TaskSchedulerDescriptor.create(LLAP_SERVICE, LLAP_SCHEDULER).setUserPayload(servicePluginPayload) }, new ContainerLauncherDescriptor[] { ContainerLauncherDescriptor.create(LLAP_SERVICE, LLAP_LAUNCHER) }, new TaskCommunicatorDescriptor[] { TaskCommunicatorDescriptor.create(LLAP_SERVICE, LLAP_TASK_COMMUNICATOR).setUserPayload(servicePluginPayload) });
} else {
servicePluginsDescriptor = ServicePluginsDescriptor.create(true);
}
// container prewarming. tell the am how many containers we need
if (HiveConf.getBoolVar(conf, ConfVars.HIVE_PREWARM_ENABLED)) {
int n = HiveConf.getIntVar(conf, ConfVars.HIVE_PREWARM_NUM_CONTAINERS);
n = Math.max(tezConfig.getInt(TezConfiguration.TEZ_AM_SESSION_MIN_HELD_CONTAINERS, TezConfiguration.TEZ_AM_SESSION_MIN_HELD_CONTAINERS_DEFAULT), n);
tezConfig.setInt(TezConfiguration.TEZ_AM_SESSION_MIN_HELD_CONTAINERS, n);
}
setupSessionAcls(tezConfig, conf);
/*
* Update HADOOP_CREDSTORE_PASSWORD for the TezAM.
* If there is a job specific credential store, it will be set.
* HiveConfUtil.updateJobCredentialProviders should not be used here,
* as it changes the credential store path too, which causes the dag submission fail,
* as this config has an effect in HS2 (on TezClient codepath), and the original hadoop
* credential store should be used.
*/
HiveConfUtil.updateCredentialProviderPasswordForJobs(tezConfig);
String tezJobNameFormat = HiveConf.getVar(conf, ConfVars.HIVETEZJOBNAME);
final TezClient session = TezClient.newBuilder(String.format(tezJobNameFormat, sessionId), tezConfig).setIsSession(true).setLocalResources(commonLocalResources).setCredentials(llapCredentials).setServicePluginDescriptor(servicePluginsDescriptor).build();
LOG.info("Opening new Tez Session (id: " + sessionId + ", scratch dir: " + tezScratchDir + ")");
TezJobMonitor.initShutdownHook();
if (!isAsync) {
startSessionAndContainers(session, conf, commonLocalResources, tezConfig, false);
this.session = session;
} else {
FutureTask<TezClient> sessionFuture = new FutureTask<>(new Callable<TezClient>() {
@Override
public TezClient call() throws Exception {
TezClient result = null;
try {
result = startSessionAndContainers(session, conf, commonLocalResources, tezConfig, true);
} catch (Throwable t) {
// The caller has already stopped the session.
LOG.error("Failed to start Tez session", t);
throw (t instanceof Exception) ? (Exception) t : new Exception(t);
}
// This is not bulletproof but should allow us to close session in most cases.
if (Thread.interrupted()) {
LOG.info("Interrupted while starting Tez session");
closeAndIgnoreExceptions(result);
return null;
}
return result;
}
});
new Thread(sessionFuture, "Tez session start thread").start();
// We assume here nobody will try to get session before open() returns.
this.console = console;
this.sessionFuture = sessionFuture;
}
}
Aggregations