Search in sources :

Example 61 with LocalResource

use of org.apache.hadoop.yarn.api.records.LocalResource in project incubator-systemml by apache.

the class DMLYarnClient method constructLocalResourceMap.

private Map<String, LocalResource> constructLocalResourceMap(YarnConfiguration yconf) throws IOException {
    Map<String, LocalResource> rMap = new HashMap<>();
    Path path = new Path(_hdfsJarFile);
    LocalResource resource = Records.newRecord(LocalResource.class);
    FileStatus jarStat = IOUtilFunctions.getFileSystem(path, yconf).getFileStatus(path);
    resource.setResource(ConverterUtils.getYarnUrlFromPath(path));
    resource.setSize(jarStat.getLen());
    resource.setTimestamp(jarStat.getModificationTime());
    resource.setType(LocalResourceType.FILE);
    resource.setVisibility(LocalResourceVisibility.PUBLIC);
    rMap.put(DML_JAR_NAME, resource);
    return rMap;
}
Also used : Path(org.apache.hadoop.fs.Path) FileStatus(org.apache.hadoop.fs.FileStatus) HashMap(java.util.HashMap) LocalResource(org.apache.hadoop.yarn.api.records.LocalResource)

Example 62 with LocalResource

use of org.apache.hadoop.yarn.api.records.LocalResource in project hive by apache.

the class TezSessionState method openInternal.

protected void openInternal(final HiveConf conf, Collection<String> additionalFiles, boolean isAsync, LogHelper console, Path scratchDir) throws IOException, LoginException, IllegalArgumentException, URISyntaxException, TezException {
    this.conf = conf;
    // TODO Why is the queue name set again. It has already been setup via setQueueName. Do only one of the two.
    String confQueueName = conf.get(TezConfiguration.TEZ_QUEUE_NAME);
    if (queueName != null && !queueName.equals(confQueueName)) {
        LOG.warn("Resetting a queue name that was already set: was " + queueName + ", now " + confQueueName);
    }
    this.queueName = confQueueName;
    this.doAsEnabled = conf.getBoolVar(HiveConf.ConfVars.HIVE_SERVER2_ENABLE_DOAS);
    final boolean llapMode = "llap".equalsIgnoreCase(HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_EXECUTION_MODE));
    // TODO This - at least for the session pool - will always be the hive user. How does doAs above this affect things ?
    UserGroupInformation ugi = Utils.getUGI();
    user = ugi.getShortUserName();
    LOG.info("User of session id " + sessionId + " is " + user);
    // create the tez tmp dir
    tezScratchDir = scratchDir == null ? createTezDir(sessionId) : scratchDir;
    additionalFilesNotFromConf.clear();
    if (additionalFiles != null) {
        additionalFilesNotFromConf.addAll(additionalFiles);
    }
    refreshLocalResourcesFromConf(conf);
    // unless already installed on all the cluster nodes, we'll have to
    // localize hive-exec.jar as well.
    appJarLr = createJarLocalResource(utils.getExecJarPathLocal());
    // configuration for the application master
    final Map<String, LocalResource> commonLocalResources = new HashMap<String, LocalResource>();
    commonLocalResources.put(utils.getBaseName(appJarLr), appJarLr);
    for (LocalResource lr : localizedResources) {
        commonLocalResources.put(utils.getBaseName(lr), lr);
    }
    if (llapMode) {
        // localize llap client jars
        addJarLRByClass(LlapTaskSchedulerService.class, commonLocalResources);
        addJarLRByClass(LlapProtocolClientImpl.class, commonLocalResources);
        addJarLRByClass(LlapProtocolClientProxy.class, commonLocalResources);
        addJarLRByClassName("org.apache.hadoop.registry.client.api.RegistryOperations", commonLocalResources);
    }
    // Create environment for AM.
    Map<String, String> amEnv = new HashMap<String, String>();
    MRHelpers.updateEnvBasedOnMRAMEnv(conf, amEnv);
    // and finally we're ready to create and start the session
    // generate basic tez config
    final TezConfiguration tezConfig = new TezConfiguration(conf);
    // set up the staging directory to use
    tezConfig.set(TezConfiguration.TEZ_AM_STAGING_DIR, tezScratchDir.toUri().toString());
    conf.stripHiddenConfigurations(tezConfig);
    ServicePluginsDescriptor servicePluginsDescriptor;
    Credentials llapCredentials = null;
    if (llapMode) {
        if (UserGroupInformation.isSecurityEnabled()) {
            llapCredentials = new Credentials();
            llapCredentials.addToken(LlapTokenIdentifier.KIND_NAME, getLlapToken(user, tezConfig));
        }
        // TODO Change this to not serialize the entire Configuration - minor.
        UserPayload servicePluginPayload = TezUtils.createUserPayloadFromConf(tezConfig);
        // we need plugins to handle llap and uber mode
        servicePluginsDescriptor = ServicePluginsDescriptor.create(true, new TaskSchedulerDescriptor[] { TaskSchedulerDescriptor.create(LLAP_SERVICE, LLAP_SCHEDULER).setUserPayload(servicePluginPayload) }, new ContainerLauncherDescriptor[] { ContainerLauncherDescriptor.create(LLAP_SERVICE, LLAP_LAUNCHER) }, new TaskCommunicatorDescriptor[] { TaskCommunicatorDescriptor.create(LLAP_SERVICE, LLAP_TASK_COMMUNICATOR).setUserPayload(servicePluginPayload) });
    } else {
        servicePluginsDescriptor = ServicePluginsDescriptor.create(true);
    }
    // container prewarming. tell the am how many containers we need
    if (HiveConf.getBoolVar(conf, ConfVars.HIVE_PREWARM_ENABLED)) {
        int n = HiveConf.getIntVar(conf, ConfVars.HIVE_PREWARM_NUM_CONTAINERS);
        n = Math.max(tezConfig.getInt(TezConfiguration.TEZ_AM_SESSION_MIN_HELD_CONTAINERS, TezConfiguration.TEZ_AM_SESSION_MIN_HELD_CONTAINERS_DEFAULT), n);
        tezConfig.setInt(TezConfiguration.TEZ_AM_SESSION_MIN_HELD_CONTAINERS, n);
    }
    setupSessionAcls(tezConfig, conf);
    final TezClient session = TezClient.newBuilder("HIVE-" + sessionId, tezConfig).setIsSession(true).setLocalResources(commonLocalResources).setCredentials(llapCredentials).setServicePluginDescriptor(servicePluginsDescriptor).build();
    LOG.info("Opening new Tez Session (id: " + sessionId + ", scratch dir: " + tezScratchDir + ")");
    TezJobMonitor.initShutdownHook();
    if (!isAsync) {
        startSessionAndContainers(session, conf, commonLocalResources, tezConfig, false);
        this.session = session;
    } else {
        FutureTask<TezClient> sessionFuture = new FutureTask<>(new Callable<TezClient>() {

            @Override
            public TezClient call() throws Exception {
                try {
                    return startSessionAndContainers(session, conf, commonLocalResources, tezConfig, true);
                } catch (Throwable t) {
                    LOG.error("Failed to start Tez session", t);
                    throw (t instanceof Exception) ? (Exception) t : new Exception(t);
                }
            }
        });
        new Thread(sessionFuture, "Tez session start thread").start();
        // We assume here nobody will try to get session before open() returns.
        this.console = console;
        this.sessionFuture = sessionFuture;
    }
}
Also used : UserPayload(org.apache.tez.dag.api.UserPayload) TaskSchedulerDescriptor(org.apache.tez.serviceplugins.api.TaskSchedulerDescriptor) HashMap(java.util.HashMap) ServicePluginsDescriptor(org.apache.tez.serviceplugins.api.ServicePluginsDescriptor) LoginException(javax.security.auth.login.LoginException) URISyntaxException(java.net.URISyntaxException) TimeoutException(java.util.concurrent.TimeoutException) CancellationException(java.util.concurrent.CancellationException) FileNotFoundException(java.io.FileNotFoundException) IOException(java.io.IOException) TezException(org.apache.tez.dag.api.TezException) ExecutionException(java.util.concurrent.ExecutionException) LocalResource(org.apache.hadoop.yarn.api.records.LocalResource) TaskCommunicatorDescriptor(org.apache.tez.serviceplugins.api.TaskCommunicatorDescriptor) TezClient(org.apache.tez.client.TezClient) ContainerLauncherDescriptor(org.apache.tez.serviceplugins.api.ContainerLauncherDescriptor) FutureTask(java.util.concurrent.FutureTask) Credentials(org.apache.hadoop.security.Credentials) UserGroupInformation(org.apache.hadoop.security.UserGroupInformation) TezConfiguration(org.apache.tez.dag.api.TezConfiguration)

Example 63 with LocalResource

use of org.apache.hadoop.yarn.api.records.LocalResource in project hive by apache.

the class DagUtils method createLocalResource.

/*
   * Helper method to create a yarn local resource.
   */
private LocalResource createLocalResource(FileSystem remoteFs, Path file, LocalResourceType type, LocalResourceVisibility visibility) {
    FileStatus fstat = null;
    try {
        fstat = remoteFs.getFileStatus(file);
    } catch (IOException e) {
        e.printStackTrace();
    }
    URL resourceURL = ConverterUtils.getYarnUrlFromPath(file);
    long resourceSize = fstat.getLen();
    long resourceModificationTime = fstat.getModificationTime();
    LOG.info("Resource modification time: " + resourceModificationTime + " for " + file);
    LocalResource lr = Records.newRecord(LocalResource.class);
    lr.setResource(resourceURL);
    lr.setType(type);
    lr.setSize(resourceSize);
    lr.setVisibility(visibility);
    lr.setTimestamp(resourceModificationTime);
    return lr;
}
Also used : FileStatus(org.apache.hadoop.fs.FileStatus) IOException(java.io.IOException) URL(org.apache.hadoop.yarn.api.records.URL) LocalResource(org.apache.hadoop.yarn.api.records.LocalResource)

Example 64 with LocalResource

use of org.apache.hadoop.yarn.api.records.LocalResource in project hive by apache.

the class DagUtils method createVertex.

/*
   * Helper function to create Vertex from MapWork.
   */
private Vertex createVertex(JobConf conf, MapWork mapWork, LocalResource appJarLr, List<LocalResource> additionalLr, FileSystem fs, Path mrScratchDir, Context ctx, VertexType vertexType) throws Exception {
    Path tezDir = getTezDir(mrScratchDir);
    // set up the operator plan
    Utilities.cacheMapWork(conf, mapWork, mrScratchDir);
    // create the directories FileSinkOperators need
    Utilities.createTmpDirs(conf, mapWork);
    // finally create the vertex
    Vertex map = null;
    // use tez to combine splits
    boolean groupSplitsInInputInitializer;
    DataSourceDescriptor dataSource;
    int numTasks = -1;
    @SuppressWarnings("rawtypes") Class inputFormatClass = conf.getClass("mapred.input.format.class", InputFormat.class);
    boolean vertexHasCustomInput = VertexType.isCustomInputType(vertexType);
    LOG.info("Vertex has custom input? " + vertexHasCustomInput);
    if (vertexHasCustomInput) {
        groupSplitsInInputInitializer = false;
        // grouping happens in execution phase. The input payload should not enable grouping here,
        // it will be enabled in the CustomVertex.
        inputFormatClass = HiveInputFormat.class;
        conf.setClass("mapred.input.format.class", HiveInputFormat.class, InputFormat.class);
        // mapreduce.tez.input.initializer.serialize.event.payload should be set to false when using
        // this plug-in to avoid getting a serialized event at run-time.
        conf.setBoolean("mapreduce.tez.input.initializer.serialize.event.payload", false);
    } else {
        // is HiveInputFormat
        if (inputFormatClass == HiveInputFormat.class) {
            groupSplitsInInputInitializer = true;
        } else {
            groupSplitsInInputInitializer = false;
        }
    }
    if (mapWork instanceof MergeFileWork) {
        Path outputPath = ((MergeFileWork) mapWork).getOutputDir();
        // prepare the tmp output directory. The output tmp directory should
        // exist before jobClose (before renaming after job completion)
        Path tempOutPath = Utilities.toTempPath(outputPath);
        try {
            FileSystem tmpOutFS = tempOutPath.getFileSystem(conf);
            if (!tmpOutFS.exists(tempOutPath)) {
                tmpOutFS.mkdirs(tempOutPath);
            }
        } catch (IOException e) {
            throw new RuntimeException("Can't make path " + outputPath + " : " + e.getMessage(), e);
        }
    }
    // remember mapping of plan to input
    conf.set(Utilities.INPUT_NAME, mapWork.getName());
    if (HiveConf.getBoolVar(conf, ConfVars.HIVE_AM_SPLIT_GENERATION)) {
        // set up the operator plan. (before setting up splits on the AM)
        Utilities.setMapWork(conf, mapWork, mrScratchDir, false);
        // the correct plugin.
        if (groupSplitsInInputInitializer) {
            // Not setting a payload, since the MRInput payload is the same and can be accessed.
            InputInitializerDescriptor descriptor = InputInitializerDescriptor.create(HiveSplitGenerator.class.getName());
            dataSource = MRInputLegacy.createConfigBuilder(conf, inputFormatClass).groupSplits(true).setCustomInitializerDescriptor(descriptor).build();
        } else {
            // Not HiveInputFormat, or a custom VertexManager will take care of grouping splits
            if (vertexHasCustomInput) {
                dataSource = MultiMRInput.createConfigBuilder(conf, inputFormatClass).groupSplits(false).build();
            } else {
                dataSource = MRInputLegacy.createConfigBuilder(conf, inputFormatClass).groupSplits(false).build();
            }
        }
    } else {
        // Setup client side split generation.
        // we need to set this, because with HS2 and client side split
        // generation we end up not finding the map work. This is
        // because of thread local madness (tez split generation is
        // multi-threaded - HS2 plan cache uses thread locals). Setting
        // VECTOR_MODE/USE_VECTORIZED_INPUT_FILE_FORMAT causes the split gen code to use the conf instead
        // of the map work.
        conf.setBoolean(Utilities.VECTOR_MODE, mapWork.getVectorMode());
        conf.setBoolean(Utilities.USE_VECTORIZED_INPUT_FILE_FORMAT, mapWork.getUseVectorizedInputFileFormat());
        dataSource = MRInputHelpers.configureMRInputWithLegacySplitGeneration(conf, new Path(tezDir, "split_" + mapWork.getName().replaceAll(" ", "_")), true);
        numTasks = dataSource.getNumberOfShards();
        // set up the operator plan. (after generating splits - that changes configs)
        Utilities.setMapWork(conf, mapWork, mrScratchDir, false);
    }
    UserPayload serializedConf = TezUtils.createUserPayloadFromConf(conf);
    String procClassName = MapTezProcessor.class.getName();
    if (mapWork instanceof MergeFileWork) {
        procClassName = MergeFileTezProcessor.class.getName();
    }
    VertexExecutionContext executionContext = createVertexExecutionContext(mapWork);
    map = Vertex.create(mapWork.getName(), ProcessorDescriptor.create(procClassName).setUserPayload(serializedConf), numTasks, getContainerResource(conf));
    map.setTaskEnvironment(getContainerEnvironment(conf, true));
    map.setExecutionContext(executionContext);
    map.setTaskLaunchCmdOpts(getContainerJavaOpts(conf));
    assert mapWork.getAliasToWork().keySet().size() == 1;
    // Add the actual source input
    String alias = mapWork.getAliasToWork().keySet().iterator().next();
    map.addDataSource(alias, dataSource);
    Map<String, LocalResource> localResources = new HashMap<String, LocalResource>();
    localResources.put(getBaseName(appJarLr), appJarLr);
    for (LocalResource lr : additionalLr) {
        localResources.put(getBaseName(lr), lr);
    }
    map.addTaskLocalFiles(localResources);
    return map;
}
Also used : Path(org.apache.hadoop.fs.Path) Vertex(org.apache.tez.dag.api.Vertex) PreWarmVertex(org.apache.tez.dag.api.PreWarmVertex) MergeFileWork(org.apache.hadoop.hive.ql.io.merge.MergeFileWork) UserPayload(org.apache.tez.dag.api.UserPayload) VertexExecutionContext(org.apache.tez.dag.api.Vertex.VertexExecutionContext) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) HashMap(java.util.HashMap) IOException(java.io.IOException) LocalResource(org.apache.hadoop.yarn.api.records.LocalResource) FileSystem(org.apache.hadoop.fs.FileSystem) InputInitializerDescriptor(org.apache.tez.dag.api.InputInitializerDescriptor) DataSourceDescriptor(org.apache.tez.dag.api.DataSourceDescriptor)

Example 65 with LocalResource

use of org.apache.hadoop.yarn.api.records.LocalResource in project hive by apache.

the class TestTezTask method testGetExtraLocalResources.

@Test
public void testGetExtraLocalResources() throws Exception {
    final String[] inputOutputJars = new String[] { "file:///tmp/foo.jar" };
    LocalResource res = mock(LocalResource.class);
    final List<LocalResource> resources = Collections.singletonList(res);
    final Map<String, LocalResource> resMap = new HashMap<String, LocalResource>();
    resMap.put("foo.jar", res);
    when(utils.localizeTempFiles(path.toString(), conf, inputOutputJars)).thenReturn(resources);
    when(utils.getBaseName(res)).thenReturn("foo.jar");
    assertEquals(resMap, task.getExtraLocalResources(conf, path, inputOutputJars));
}
Also used : HashMap(java.util.HashMap) LinkedHashMap(java.util.LinkedHashMap) LocalResource(org.apache.hadoop.yarn.api.records.LocalResource) Test(org.junit.Test)

Aggregations

LocalResource (org.apache.hadoop.yarn.api.records.LocalResource)128 Path (org.apache.hadoop.fs.Path)84 HashMap (java.util.HashMap)67 Test (org.junit.Test)48 ArrayList (java.util.ArrayList)42 ContainerLaunchContext (org.apache.hadoop.yarn.api.records.ContainerLaunchContext)40 IOException (java.io.IOException)37 File (java.io.File)30 FileSystem (org.apache.hadoop.fs.FileSystem)29 ApplicationId (org.apache.hadoop.yarn.api.records.ApplicationId)29 Configuration (org.apache.hadoop.conf.Configuration)28 URL (org.apache.hadoop.yarn.api.records.URL)26 FileStatus (org.apache.hadoop.fs.FileStatus)25 YarnConfiguration (org.apache.hadoop.yarn.conf.YarnConfiguration)24 ContainerId (org.apache.hadoop.yarn.api.records.ContainerId)22 ByteBuffer (java.nio.ByteBuffer)18 LocalResourceVisibility (org.apache.hadoop.yarn.api.records.LocalResourceVisibility)18 Credentials (org.apache.hadoop.security.Credentials)17 StartContainerRequest (org.apache.hadoop.yarn.api.protocolrecords.StartContainerRequest)17 DataOutputBuffer (org.apache.hadoop.io.DataOutputBuffer)16