use of org.apache.hadoop.yarn.api.records.LocalResource in project incubator-systemml by apache.
the class DMLYarnClient method constructLocalResourceMap.
private Map<String, LocalResource> constructLocalResourceMap(YarnConfiguration yconf) throws IOException {
Map<String, LocalResource> rMap = new HashMap<>();
Path path = new Path(_hdfsJarFile);
LocalResource resource = Records.newRecord(LocalResource.class);
FileStatus jarStat = IOUtilFunctions.getFileSystem(path, yconf).getFileStatus(path);
resource.setResource(ConverterUtils.getYarnUrlFromPath(path));
resource.setSize(jarStat.getLen());
resource.setTimestamp(jarStat.getModificationTime());
resource.setType(LocalResourceType.FILE);
resource.setVisibility(LocalResourceVisibility.PUBLIC);
rMap.put(DML_JAR_NAME, resource);
return rMap;
}
use of org.apache.hadoop.yarn.api.records.LocalResource in project hive by apache.
the class TezSessionState method openInternal.
protected void openInternal(final HiveConf conf, Collection<String> additionalFiles, boolean isAsync, LogHelper console, Path scratchDir) throws IOException, LoginException, IllegalArgumentException, URISyntaxException, TezException {
this.conf = conf;
// TODO Why is the queue name set again. It has already been setup via setQueueName. Do only one of the two.
String confQueueName = conf.get(TezConfiguration.TEZ_QUEUE_NAME);
if (queueName != null && !queueName.equals(confQueueName)) {
LOG.warn("Resetting a queue name that was already set: was " + queueName + ", now " + confQueueName);
}
this.queueName = confQueueName;
this.doAsEnabled = conf.getBoolVar(HiveConf.ConfVars.HIVE_SERVER2_ENABLE_DOAS);
final boolean llapMode = "llap".equalsIgnoreCase(HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_EXECUTION_MODE));
// TODO This - at least for the session pool - will always be the hive user. How does doAs above this affect things ?
UserGroupInformation ugi = Utils.getUGI();
user = ugi.getShortUserName();
LOG.info("User of session id " + sessionId + " is " + user);
// create the tez tmp dir
tezScratchDir = scratchDir == null ? createTezDir(sessionId) : scratchDir;
additionalFilesNotFromConf.clear();
if (additionalFiles != null) {
additionalFilesNotFromConf.addAll(additionalFiles);
}
refreshLocalResourcesFromConf(conf);
// unless already installed on all the cluster nodes, we'll have to
// localize hive-exec.jar as well.
appJarLr = createJarLocalResource(utils.getExecJarPathLocal());
// configuration for the application master
final Map<String, LocalResource> commonLocalResources = new HashMap<String, LocalResource>();
commonLocalResources.put(utils.getBaseName(appJarLr), appJarLr);
for (LocalResource lr : localizedResources) {
commonLocalResources.put(utils.getBaseName(lr), lr);
}
if (llapMode) {
// localize llap client jars
addJarLRByClass(LlapTaskSchedulerService.class, commonLocalResources);
addJarLRByClass(LlapProtocolClientImpl.class, commonLocalResources);
addJarLRByClass(LlapProtocolClientProxy.class, commonLocalResources);
addJarLRByClassName("org.apache.hadoop.registry.client.api.RegistryOperations", commonLocalResources);
}
// Create environment for AM.
Map<String, String> amEnv = new HashMap<String, String>();
MRHelpers.updateEnvBasedOnMRAMEnv(conf, amEnv);
// and finally we're ready to create and start the session
// generate basic tez config
final TezConfiguration tezConfig = new TezConfiguration(conf);
// set up the staging directory to use
tezConfig.set(TezConfiguration.TEZ_AM_STAGING_DIR, tezScratchDir.toUri().toString());
conf.stripHiddenConfigurations(tezConfig);
ServicePluginsDescriptor servicePluginsDescriptor;
Credentials llapCredentials = null;
if (llapMode) {
if (UserGroupInformation.isSecurityEnabled()) {
llapCredentials = new Credentials();
llapCredentials.addToken(LlapTokenIdentifier.KIND_NAME, getLlapToken(user, tezConfig));
}
// TODO Change this to not serialize the entire Configuration - minor.
UserPayload servicePluginPayload = TezUtils.createUserPayloadFromConf(tezConfig);
// we need plugins to handle llap and uber mode
servicePluginsDescriptor = ServicePluginsDescriptor.create(true, new TaskSchedulerDescriptor[] { TaskSchedulerDescriptor.create(LLAP_SERVICE, LLAP_SCHEDULER).setUserPayload(servicePluginPayload) }, new ContainerLauncherDescriptor[] { ContainerLauncherDescriptor.create(LLAP_SERVICE, LLAP_LAUNCHER) }, new TaskCommunicatorDescriptor[] { TaskCommunicatorDescriptor.create(LLAP_SERVICE, LLAP_TASK_COMMUNICATOR).setUserPayload(servicePluginPayload) });
} else {
servicePluginsDescriptor = ServicePluginsDescriptor.create(true);
}
// container prewarming. tell the am how many containers we need
if (HiveConf.getBoolVar(conf, ConfVars.HIVE_PREWARM_ENABLED)) {
int n = HiveConf.getIntVar(conf, ConfVars.HIVE_PREWARM_NUM_CONTAINERS);
n = Math.max(tezConfig.getInt(TezConfiguration.TEZ_AM_SESSION_MIN_HELD_CONTAINERS, TezConfiguration.TEZ_AM_SESSION_MIN_HELD_CONTAINERS_DEFAULT), n);
tezConfig.setInt(TezConfiguration.TEZ_AM_SESSION_MIN_HELD_CONTAINERS, n);
}
setupSessionAcls(tezConfig, conf);
final TezClient session = TezClient.newBuilder("HIVE-" + sessionId, tezConfig).setIsSession(true).setLocalResources(commonLocalResources).setCredentials(llapCredentials).setServicePluginDescriptor(servicePluginsDescriptor).build();
LOG.info("Opening new Tez Session (id: " + sessionId + ", scratch dir: " + tezScratchDir + ")");
TezJobMonitor.initShutdownHook();
if (!isAsync) {
startSessionAndContainers(session, conf, commonLocalResources, tezConfig, false);
this.session = session;
} else {
FutureTask<TezClient> sessionFuture = new FutureTask<>(new Callable<TezClient>() {
@Override
public TezClient call() throws Exception {
try {
return startSessionAndContainers(session, conf, commonLocalResources, tezConfig, true);
} catch (Throwable t) {
LOG.error("Failed to start Tez session", t);
throw (t instanceof Exception) ? (Exception) t : new Exception(t);
}
}
});
new Thread(sessionFuture, "Tez session start thread").start();
// We assume here nobody will try to get session before open() returns.
this.console = console;
this.sessionFuture = sessionFuture;
}
}
use of org.apache.hadoop.yarn.api.records.LocalResource in project hive by apache.
the class DagUtils method createLocalResource.
/*
* Helper method to create a yarn local resource.
*/
private LocalResource createLocalResource(FileSystem remoteFs, Path file, LocalResourceType type, LocalResourceVisibility visibility) {
FileStatus fstat = null;
try {
fstat = remoteFs.getFileStatus(file);
} catch (IOException e) {
e.printStackTrace();
}
URL resourceURL = ConverterUtils.getYarnUrlFromPath(file);
long resourceSize = fstat.getLen();
long resourceModificationTime = fstat.getModificationTime();
LOG.info("Resource modification time: " + resourceModificationTime + " for " + file);
LocalResource lr = Records.newRecord(LocalResource.class);
lr.setResource(resourceURL);
lr.setType(type);
lr.setSize(resourceSize);
lr.setVisibility(visibility);
lr.setTimestamp(resourceModificationTime);
return lr;
}
use of org.apache.hadoop.yarn.api.records.LocalResource in project hive by apache.
the class DagUtils method createVertex.
/*
* Helper function to create Vertex from MapWork.
*/
private Vertex createVertex(JobConf conf, MapWork mapWork, LocalResource appJarLr, List<LocalResource> additionalLr, FileSystem fs, Path mrScratchDir, Context ctx, VertexType vertexType) throws Exception {
Path tezDir = getTezDir(mrScratchDir);
// set up the operator plan
Utilities.cacheMapWork(conf, mapWork, mrScratchDir);
// create the directories FileSinkOperators need
Utilities.createTmpDirs(conf, mapWork);
// finally create the vertex
Vertex map = null;
// use tez to combine splits
boolean groupSplitsInInputInitializer;
DataSourceDescriptor dataSource;
int numTasks = -1;
@SuppressWarnings("rawtypes") Class inputFormatClass = conf.getClass("mapred.input.format.class", InputFormat.class);
boolean vertexHasCustomInput = VertexType.isCustomInputType(vertexType);
LOG.info("Vertex has custom input? " + vertexHasCustomInput);
if (vertexHasCustomInput) {
groupSplitsInInputInitializer = false;
// grouping happens in execution phase. The input payload should not enable grouping here,
// it will be enabled in the CustomVertex.
inputFormatClass = HiveInputFormat.class;
conf.setClass("mapred.input.format.class", HiveInputFormat.class, InputFormat.class);
// mapreduce.tez.input.initializer.serialize.event.payload should be set to false when using
// this plug-in to avoid getting a serialized event at run-time.
conf.setBoolean("mapreduce.tez.input.initializer.serialize.event.payload", false);
} else {
// is HiveInputFormat
if (inputFormatClass == HiveInputFormat.class) {
groupSplitsInInputInitializer = true;
} else {
groupSplitsInInputInitializer = false;
}
}
if (mapWork instanceof MergeFileWork) {
Path outputPath = ((MergeFileWork) mapWork).getOutputDir();
// prepare the tmp output directory. The output tmp directory should
// exist before jobClose (before renaming after job completion)
Path tempOutPath = Utilities.toTempPath(outputPath);
try {
FileSystem tmpOutFS = tempOutPath.getFileSystem(conf);
if (!tmpOutFS.exists(tempOutPath)) {
tmpOutFS.mkdirs(tempOutPath);
}
} catch (IOException e) {
throw new RuntimeException("Can't make path " + outputPath + " : " + e.getMessage(), e);
}
}
// remember mapping of plan to input
conf.set(Utilities.INPUT_NAME, mapWork.getName());
if (HiveConf.getBoolVar(conf, ConfVars.HIVE_AM_SPLIT_GENERATION)) {
// set up the operator plan. (before setting up splits on the AM)
Utilities.setMapWork(conf, mapWork, mrScratchDir, false);
// the correct plugin.
if (groupSplitsInInputInitializer) {
// Not setting a payload, since the MRInput payload is the same and can be accessed.
InputInitializerDescriptor descriptor = InputInitializerDescriptor.create(HiveSplitGenerator.class.getName());
dataSource = MRInputLegacy.createConfigBuilder(conf, inputFormatClass).groupSplits(true).setCustomInitializerDescriptor(descriptor).build();
} else {
// Not HiveInputFormat, or a custom VertexManager will take care of grouping splits
if (vertexHasCustomInput) {
dataSource = MultiMRInput.createConfigBuilder(conf, inputFormatClass).groupSplits(false).build();
} else {
dataSource = MRInputLegacy.createConfigBuilder(conf, inputFormatClass).groupSplits(false).build();
}
}
} else {
// Setup client side split generation.
// we need to set this, because with HS2 and client side split
// generation we end up not finding the map work. This is
// because of thread local madness (tez split generation is
// multi-threaded - HS2 plan cache uses thread locals). Setting
// VECTOR_MODE/USE_VECTORIZED_INPUT_FILE_FORMAT causes the split gen code to use the conf instead
// of the map work.
conf.setBoolean(Utilities.VECTOR_MODE, mapWork.getVectorMode());
conf.setBoolean(Utilities.USE_VECTORIZED_INPUT_FILE_FORMAT, mapWork.getUseVectorizedInputFileFormat());
dataSource = MRInputHelpers.configureMRInputWithLegacySplitGeneration(conf, new Path(tezDir, "split_" + mapWork.getName().replaceAll(" ", "_")), true);
numTasks = dataSource.getNumberOfShards();
// set up the operator plan. (after generating splits - that changes configs)
Utilities.setMapWork(conf, mapWork, mrScratchDir, false);
}
UserPayload serializedConf = TezUtils.createUserPayloadFromConf(conf);
String procClassName = MapTezProcessor.class.getName();
if (mapWork instanceof MergeFileWork) {
procClassName = MergeFileTezProcessor.class.getName();
}
VertexExecutionContext executionContext = createVertexExecutionContext(mapWork);
map = Vertex.create(mapWork.getName(), ProcessorDescriptor.create(procClassName).setUserPayload(serializedConf), numTasks, getContainerResource(conf));
map.setTaskEnvironment(getContainerEnvironment(conf, true));
map.setExecutionContext(executionContext);
map.setTaskLaunchCmdOpts(getContainerJavaOpts(conf));
assert mapWork.getAliasToWork().keySet().size() == 1;
// Add the actual source input
String alias = mapWork.getAliasToWork().keySet().iterator().next();
map.addDataSource(alias, dataSource);
Map<String, LocalResource> localResources = new HashMap<String, LocalResource>();
localResources.put(getBaseName(appJarLr), appJarLr);
for (LocalResource lr : additionalLr) {
localResources.put(getBaseName(lr), lr);
}
map.addTaskLocalFiles(localResources);
return map;
}
use of org.apache.hadoop.yarn.api.records.LocalResource in project hive by apache.
the class TestTezTask method testGetExtraLocalResources.
@Test
public void testGetExtraLocalResources() throws Exception {
final String[] inputOutputJars = new String[] { "file:///tmp/foo.jar" };
LocalResource res = mock(LocalResource.class);
final List<LocalResource> resources = Collections.singletonList(res);
final Map<String, LocalResource> resMap = new HashMap<String, LocalResource>();
resMap.put("foo.jar", res);
when(utils.localizeTempFiles(path.toString(), conf, inputOutputJars)).thenReturn(resources);
when(utils.getBaseName(res)).thenReturn("foo.jar");
assertEquals(resMap, task.getExtraLocalResources(conf, path, inputOutputJars));
}
Aggregations