Search in sources :

Example 1 with JobManagerProcessSpec

use of org.apache.flink.runtime.jobmanager.JobManagerProcessSpec in project flink by apache.

the class BashJavaUtils method getJmResourceParams.

/**
 * Generate and print JVM parameters of Flink Master resources as one line.
 */
@VisibleForTesting
static List<String> getJmResourceParams(Configuration configuration) {
    JobManagerProcessSpec jobManagerProcessSpec = JobManagerProcessUtils.processSpecFromConfigWithNewOptionToInterpretLegacyHeap(configuration, JobManagerOptions.JVM_HEAP_MEMORY);
    logMasterConfiguration(jobManagerProcessSpec);
    return Arrays.asList(JobManagerProcessUtils.generateJvmParametersStr(jobManagerProcessSpec, configuration), JobManagerProcessUtils.generateDynamicConfigsStr(jobManagerProcessSpec));
}
Also used : JobManagerProcessSpec(org.apache.flink.runtime.jobmanager.JobManagerProcessSpec) VisibleForTesting(org.apache.flink.annotation.VisibleForTesting)

Example 2 with JobManagerProcessSpec

use of org.apache.flink.runtime.jobmanager.JobManagerProcessSpec in project flink by apache.

the class YarnClusterDescriptorTest method testSetupApplicationMasterContainer.

@Test
public void testSetupApplicationMasterContainer() {
    Configuration cfg = new Configuration();
    YarnClusterDescriptor clusterDescriptor = createYarnClusterDescriptor(cfg);
    final JobManagerProcessSpec jobManagerProcessSpec = createDefaultJobManagerProcessSpec(1024);
    final String java = "$JAVA_HOME/bin/java";
    final String jvmmem = JobManagerProcessUtils.generateJvmParametersStr(jobManagerProcessSpec, cfg);
    final String dynamicParameters = JobManagerProcessUtils.generateDynamicConfigsStr(jobManagerProcessSpec);
    // if set
    final String jvmOpts = "-Djvm";
    // if set
    final String jmJvmOpts = "-DjmJvm";
    final String krb5 = "-Djava.security.krb5.conf=krb5.conf";
    final String logfile = "-Dlog.file=\"" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + // if set
    "/jobmanager.log\"";
    final String logback = "-Dlogback.configurationFile=file:" + // if set
    YarnLogConfigUtil.CONFIG_FILE_LOGBACK_NAME;
    final String log4j = "-Dlog4j.configuration=file:" + YarnLogConfigUtil.CONFIG_FILE_LOG4J_NAME + " -Dlog4j.configurationFile=file:" + // if set
    YarnLogConfigUtil.CONFIG_FILE_LOG4J_NAME;
    final String mainClass = clusterDescriptor.getYarnSessionClusterEntrypoint();
    final String redirects = "1> " + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/jobmanager.out " + "2> " + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/jobmanager.err";
    try {
        // no logging, with/out krb5
        assertEquals(java + " " + jvmmem + "" + // jvmOpts
        "" + // logging
        " " + mainClass + " " + dynamicParameters + " " + redirects, clusterDescriptor.setupApplicationMasterContainer(mainClass, false, jobManagerProcessSpec).getCommands().get(0));
        assertEquals(java + " " + jvmmem + " " + krb5 + // jvmOpts
        "" + // logging
        " " + mainClass + " " + dynamicParameters + " " + redirects, clusterDescriptor.setupApplicationMasterContainer(mainClass, true, jobManagerProcessSpec).getCommands().get(0));
        // logback only, with/out krb5
        cfg.set(YarnConfigOptionsInternal.APPLICATION_LOG_CONFIG_FILE, YarnLogConfigUtil.CONFIG_FILE_LOGBACK_NAME);
        assertEquals(java + " " + jvmmem + "" + // jvmOpts
        " " + logfile + " " + logback + " " + mainClass + " " + dynamicParameters + " " + redirects, clusterDescriptor.setupApplicationMasterContainer(mainClass, false, jobManagerProcessSpec).getCommands().get(0));
        cfg.set(YarnConfigOptionsInternal.APPLICATION_LOG_CONFIG_FILE, YarnLogConfigUtil.CONFIG_FILE_LOGBACK_NAME);
        assertEquals(java + " " + jvmmem + " " + krb5 + // jvmOpts
        " " + logfile + " " + logback + " " + mainClass + " " + dynamicParameters + " " + redirects, clusterDescriptor.setupApplicationMasterContainer(mainClass, true, jobManagerProcessSpec).getCommands().get(0));
        // log4j, with/out krb5
        cfg.set(YarnConfigOptionsInternal.APPLICATION_LOG_CONFIG_FILE, YarnLogConfigUtil.CONFIG_FILE_LOG4J_NAME);
        assertEquals(java + " " + jvmmem + "" + // jvmOpts
        " " + logfile + " " + log4j + " " + mainClass + " " + dynamicParameters + " " + redirects, clusterDescriptor.setupApplicationMasterContainer(mainClass, false, jobManagerProcessSpec).getCommands().get(0));
        cfg.set(YarnConfigOptionsInternal.APPLICATION_LOG_CONFIG_FILE, YarnLogConfigUtil.CONFIG_FILE_LOG4J_NAME);
        assertEquals(java + " " + jvmmem + " " + krb5 + // jvmOpts
        " " + logfile + " " + log4j + " " + mainClass + " " + dynamicParameters + " " + redirects, clusterDescriptor.setupApplicationMasterContainer(mainClass, true, jobManagerProcessSpec).getCommands().get(0));
        // logback, with/out krb5
        cfg.set(YarnConfigOptionsInternal.APPLICATION_LOG_CONFIG_FILE, YarnLogConfigUtil.CONFIG_FILE_LOGBACK_NAME);
        assertEquals(java + " " + jvmmem + "" + // jvmOpts
        " " + logfile + " " + logback + " " + mainClass + " " + dynamicParameters + " " + redirects, clusterDescriptor.setupApplicationMasterContainer(mainClass, false, jobManagerProcessSpec).getCommands().get(0));
        cfg.set(YarnConfigOptionsInternal.APPLICATION_LOG_CONFIG_FILE, YarnLogConfigUtil.CONFIG_FILE_LOGBACK_NAME);
        assertEquals(java + " " + jvmmem + " " + krb5 + // jvmOpts
        " " + logfile + " " + logback + " " + mainClass + " " + dynamicParameters + " " + redirects, clusterDescriptor.setupApplicationMasterContainer(mainClass, true, jobManagerProcessSpec).getCommands().get(0));
        // logback, with/out krb5, different JVM opts
        // IMPORTANT: Be aware that we are using side effects here to modify the created
        // YarnClusterDescriptor,
        // because we have a reference to the ClusterDescriptor's configuration which we modify
        // continuously
        cfg.setString(CoreOptions.FLINK_JVM_OPTIONS, jvmOpts);
        cfg.set(YarnConfigOptionsInternal.APPLICATION_LOG_CONFIG_FILE, YarnLogConfigUtil.CONFIG_FILE_LOGBACK_NAME);
        assertEquals(java + " " + jvmmem + " " + jvmOpts + " " + logfile + " " + logback + " " + mainClass + " " + dynamicParameters + " " + redirects, clusterDescriptor.setupApplicationMasterContainer(mainClass, false, jobManagerProcessSpec).getCommands().get(0));
        cfg.set(YarnConfigOptionsInternal.APPLICATION_LOG_CONFIG_FILE, YarnLogConfigUtil.CONFIG_FILE_LOGBACK_NAME);
        assertEquals(java + " " + jvmmem + " " + jvmOpts + " " + krb5 + // jvmOpts
        " " + logfile + " " + logback + " " + mainClass + " " + dynamicParameters + " " + redirects, clusterDescriptor.setupApplicationMasterContainer(mainClass, true, jobManagerProcessSpec).getCommands().get(0));
        // log4j, with/out krb5, different JVM opts
        // IMPORTANT: Be aware that we are using side effects here to modify the created
        // YarnClusterDescriptor
        cfg.setString(CoreOptions.FLINK_JM_JVM_OPTIONS, jmJvmOpts);
        cfg.set(YarnConfigOptionsInternal.APPLICATION_LOG_CONFIG_FILE, YarnLogConfigUtil.CONFIG_FILE_LOG4J_NAME);
        assertEquals(java + " " + jvmmem + " " + jvmOpts + " " + jmJvmOpts + " " + logfile + " " + log4j + " " + mainClass + " " + dynamicParameters + " " + redirects, clusterDescriptor.setupApplicationMasterContainer(mainClass, false, jobManagerProcessSpec).getCommands().get(0));
        cfg.set(YarnConfigOptionsInternal.APPLICATION_LOG_CONFIG_FILE, YarnLogConfigUtil.CONFIG_FILE_LOG4J_NAME);
        assertEquals(java + " " + jvmmem + " " + jvmOpts + " " + jmJvmOpts + " " + krb5 + // jvmOpts
        " " + logfile + " " + log4j + " " + mainClass + " " + dynamicParameters + " " + redirects, clusterDescriptor.setupApplicationMasterContainer(mainClass, true, jobManagerProcessSpec).getCommands().get(0));
        // now try some configurations with different yarn.container-start-command-template
        // IMPORTANT: Be aware that we are using side effects here to modify the created
        // YarnClusterDescriptor
        cfg.set(YarnConfigOptionsInternal.APPLICATION_LOG_CONFIG_FILE, YarnLogConfigUtil.CONFIG_FILE_LOGBACK_NAME);
        cfg.setString(ConfigConstants.YARN_CONTAINER_START_COMMAND_TEMPLATE, "%java% 1 %jvmmem% 2 %jvmopts% 3 %logging% 4 %class% 5 %args% 6 %redirects%");
        assertEquals(java + " 1 " + jvmmem + " 2 " + jvmOpts + " " + jmJvmOpts + " " + krb5 + // jvmOpts
        " 3 " + logfile + " " + logback + " 4 " + mainClass + " 5 " + dynamicParameters + " 6 " + redirects, clusterDescriptor.setupApplicationMasterContainer(mainClass, true, jobManagerProcessSpec).getCommands().get(0));
        cfg.set(YarnConfigOptionsInternal.APPLICATION_LOG_CONFIG_FILE, YarnLogConfigUtil.CONFIG_FILE_LOGBACK_NAME);
        cfg.setString(ConfigConstants.YARN_CONTAINER_START_COMMAND_TEMPLATE, "%java% %logging% %jvmopts% %jvmmem% %class% %args% %redirects%");
        // IMPORTANT: Be aware that we are using side effects here to modify the created
        // YarnClusterDescriptor
        assertEquals(java + " " + logfile + " " + logback + " " + jvmOpts + " " + jmJvmOpts + " " + krb5 + // jvmOpts
        " " + jvmmem + " " + mainClass + " " + dynamicParameters + " " + redirects, clusterDescriptor.setupApplicationMasterContainer(mainClass, true, jobManagerProcessSpec).getCommands().get(0));
    } finally {
        clusterDescriptor.close();
    }
}
Also used : ApplicationConfiguration(org.apache.flink.client.deployment.application.ApplicationConfiguration) YarnConfiguration(org.apache.hadoop.yarn.conf.YarnConfiguration) Configuration(org.apache.flink.configuration.Configuration) JobManagerProcessUtils.createDefaultJobManagerProcessSpec(org.apache.flink.runtime.jobmanager.JobManagerProcessUtils.createDefaultJobManagerProcessSpec) JobManagerProcessSpec(org.apache.flink.runtime.jobmanager.JobManagerProcessSpec) Matchers.containsString(org.hamcrest.Matchers.containsString) Test(org.junit.Test)

Example 3 with JobManagerProcessSpec

use of org.apache.flink.runtime.jobmanager.JobManagerProcessSpec in project flink-mirror by flink-ci.

the class BashJavaUtils method getJmResourceParams.

/**
 * Generate and print JVM parameters of Flink Master resources as one line.
 */
@VisibleForTesting
static List<String> getJmResourceParams(Configuration configuration) {
    JobManagerProcessSpec jobManagerProcessSpec = JobManagerProcessUtils.processSpecFromConfigWithNewOptionToInterpretLegacyHeap(configuration, JobManagerOptions.JVM_HEAP_MEMORY);
    logMasterConfiguration(jobManagerProcessSpec);
    return Arrays.asList(JobManagerProcessUtils.generateJvmParametersStr(jobManagerProcessSpec, configuration), JobManagerProcessUtils.generateDynamicConfigsStr(jobManagerProcessSpec));
}
Also used : JobManagerProcessSpec(org.apache.flink.runtime.jobmanager.JobManagerProcessSpec) VisibleForTesting(org.apache.flink.annotation.VisibleForTesting)

Example 4 with JobManagerProcessSpec

use of org.apache.flink.runtime.jobmanager.JobManagerProcessSpec in project flink-mirror by flink-ci.

the class YarnClusterDescriptor method startAppMaster.

private ApplicationReport startAppMaster(Configuration configuration, String applicationName, String yarnClusterEntrypoint, JobGraph jobGraph, YarnClient yarnClient, YarnClientApplication yarnApplication, ClusterSpecification clusterSpecification) throws Exception {
    // ------------------ Initialize the file systems -------------------------
    org.apache.flink.core.fs.FileSystem.initialize(configuration, PluginUtils.createPluginManagerFromRootFolder(configuration));
    final FileSystem fs = FileSystem.get(yarnConfiguration);
    // method.
    if (!fs.getClass().getSimpleName().equals("GoogleHadoopFileSystem") && fs.getScheme().startsWith("file")) {
        LOG.warn("The file system scheme is '" + fs.getScheme() + "'. This indicates that the " + "specified Hadoop configuration path is wrong and the system is using the default Hadoop configuration values." + "The Flink YARN client needs to store its files in a distributed file system");
    }
    ApplicationSubmissionContext appContext = yarnApplication.getApplicationSubmissionContext();
    final List<Path> providedLibDirs = Utils.getQualifiedRemoteSharedPaths(configuration, yarnConfiguration);
    Path stagingDirPath = getStagingDir(fs);
    FileSystem stagingDirFs = stagingDirPath.getFileSystem(yarnConfiguration);
    final YarnApplicationFileUploader fileUploader = YarnApplicationFileUploader.from(stagingDirFs, stagingDirPath, providedLibDirs, appContext.getApplicationId(), getFileReplication());
    // The files need to be shipped and added to classpath.
    Set<File> systemShipFiles = new HashSet<>(shipFiles.size());
    for (File file : shipFiles) {
        systemShipFiles.add(file.getAbsoluteFile());
    }
    final String logConfigFilePath = configuration.getString(YarnConfigOptionsInternal.APPLICATION_LOG_CONFIG_FILE);
    if (logConfigFilePath != null) {
        systemShipFiles.add(new File(logConfigFilePath));
    }
    // Set-up ApplicationSubmissionContext for the application
    final ApplicationId appId = appContext.getApplicationId();
    // ------------------ Add Zookeeper namespace to local flinkConfiguraton ------
    setHAClusterIdIfNotSet(configuration, appId);
    if (HighAvailabilityMode.isHighAvailabilityModeActivated(configuration)) {
        // activate re-execution of failed applications
        appContext.setMaxAppAttempts(configuration.getInteger(YarnConfigOptions.APPLICATION_ATTEMPTS.key(), YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS));
        activateHighAvailabilitySupport(appContext);
    } else {
        // set number of application retries to 1 in the default case
        appContext.setMaxAppAttempts(configuration.getInteger(YarnConfigOptions.APPLICATION_ATTEMPTS.key(), 1));
    }
    final Set<Path> userJarFiles = new HashSet<>();
    if (jobGraph != null) {
        userJarFiles.addAll(jobGraph.getUserJars().stream().map(f -> f.toUri()).map(Path::new).collect(Collectors.toSet()));
    }
    final List<URI> jarUrls = ConfigUtils.decodeListFromConfig(configuration, PipelineOptions.JARS, URI::create);
    if (jarUrls != null && YarnApplicationClusterEntryPoint.class.getName().equals(yarnClusterEntrypoint)) {
        userJarFiles.addAll(jarUrls.stream().map(Path::new).collect(Collectors.toSet()));
    }
    // only for per job mode
    if (jobGraph != null) {
        for (Map.Entry<String, DistributedCache.DistributedCacheEntry> entry : jobGraph.getUserArtifacts().entrySet()) {
            // only upload local files
            if (!Utils.isRemotePath(entry.getValue().filePath)) {
                Path localPath = new Path(entry.getValue().filePath);
                Tuple2<Path, Long> remoteFileInfo = fileUploader.uploadLocalFileToRemote(localPath, entry.getKey());
                jobGraph.setUserArtifactRemotePath(entry.getKey(), remoteFileInfo.f0.toString());
            }
        }
        jobGraph.writeUserArtifactEntriesToConfiguration();
    }
    if (providedLibDirs == null || providedLibDirs.isEmpty()) {
        addLibFoldersToShipFiles(systemShipFiles);
    }
    // Register all files in provided lib dirs as local resources with public visibility
    // and upload the remaining dependencies as local resources with APPLICATION visibility.
    final List<String> systemClassPaths = fileUploader.registerProvidedLocalResources();
    final List<String> uploadedDependencies = fileUploader.registerMultipleLocalResources(systemShipFiles.stream().map(e -> new Path(e.toURI())).collect(Collectors.toSet()), Path.CUR_DIR, LocalResourceType.FILE);
    systemClassPaths.addAll(uploadedDependencies);
    // Plugin files only need to be shipped and should not be added to classpath.
    if (providedLibDirs == null || providedLibDirs.isEmpty()) {
        Set<File> shipOnlyFiles = new HashSet<>();
        addPluginsFoldersToShipFiles(shipOnlyFiles);
        fileUploader.registerMultipleLocalResources(shipOnlyFiles.stream().map(e -> new Path(e.toURI())).collect(Collectors.toSet()), Path.CUR_DIR, LocalResourceType.FILE);
    }
    if (!shipArchives.isEmpty()) {
        fileUploader.registerMultipleLocalResources(shipArchives.stream().map(e -> new Path(e.toURI())).collect(Collectors.toSet()), Path.CUR_DIR, LocalResourceType.ARCHIVE);
    }
    // Upload and register user jars
    final List<String> userClassPaths = fileUploader.registerMultipleLocalResources(userJarFiles, userJarInclusion == YarnConfigOptions.UserJarInclusion.DISABLED ? ConfigConstants.DEFAULT_FLINK_USR_LIB_DIR : Path.CUR_DIR, LocalResourceType.FILE);
    // usrlib will be automatically shipped if it exists.
    if (ClusterEntrypointUtils.tryFindUserLibDirectory().isPresent()) {
        final Set<File> usrLibShipFiles = new HashSet<>();
        addUsrLibFolderToShipFiles(usrLibShipFiles);
        final List<String> usrLibClassPaths = fileUploader.registerMultipleLocalResources(usrLibShipFiles.stream().map(e -> new Path(e.toURI())).collect(Collectors.toSet()), Path.CUR_DIR, LocalResourceType.FILE);
        userClassPaths.addAll(usrLibClassPaths);
    }
    if (userJarInclusion == YarnConfigOptions.UserJarInclusion.ORDER) {
        systemClassPaths.addAll(userClassPaths);
    }
    // normalize classpath by sorting
    Collections.sort(systemClassPaths);
    Collections.sort(userClassPaths);
    // classpath assembler
    StringBuilder classPathBuilder = new StringBuilder();
    if (userJarInclusion == YarnConfigOptions.UserJarInclusion.FIRST) {
        for (String userClassPath : userClassPaths) {
            classPathBuilder.append(userClassPath).append(File.pathSeparator);
        }
    }
    for (String classPath : systemClassPaths) {
        classPathBuilder.append(classPath).append(File.pathSeparator);
    }
    // Setup jar for ApplicationMaster
    final YarnLocalResourceDescriptor localResourceDescFlinkJar = fileUploader.uploadFlinkDist(flinkJarPath);
    classPathBuilder.append(localResourceDescFlinkJar.getResourceKey()).append(File.pathSeparator);
    // TODO: server use user main method to generate job graph
    if (jobGraph != null) {
        File tmpJobGraphFile = null;
        try {
            tmpJobGraphFile = File.createTempFile(appId.toString(), null);
            try (FileOutputStream output = new FileOutputStream(tmpJobGraphFile);
                ObjectOutputStream obOutput = new ObjectOutputStream(output)) {
                obOutput.writeObject(jobGraph);
            }
            final String jobGraphFilename = "job.graph";
            configuration.setString(JOB_GRAPH_FILE_PATH, jobGraphFilename);
            fileUploader.registerSingleLocalResource(jobGraphFilename, new Path(tmpJobGraphFile.toURI()), "", LocalResourceType.FILE, true, false);
            classPathBuilder.append(jobGraphFilename).append(File.pathSeparator);
        } catch (Exception e) {
            LOG.warn("Add job graph to local resource fail.");
            throw e;
        } finally {
            if (tmpJobGraphFile != null && !tmpJobGraphFile.delete()) {
                LOG.warn("Fail to delete temporary file {}.", tmpJobGraphFile.toPath());
            }
        }
    }
    // Upload the flink configuration
    // write out configuration file
    File tmpConfigurationFile = null;
    try {
        tmpConfigurationFile = File.createTempFile(appId + "-flink-conf.yaml", null);
        BootstrapTools.writeConfiguration(configuration, tmpConfigurationFile);
        String flinkConfigKey = "flink-conf.yaml";
        fileUploader.registerSingleLocalResource(flinkConfigKey, new Path(tmpConfigurationFile.getAbsolutePath()), "", LocalResourceType.FILE, true, true);
        classPathBuilder.append("flink-conf.yaml").append(File.pathSeparator);
    } finally {
        if (tmpConfigurationFile != null && !tmpConfigurationFile.delete()) {
            LOG.warn("Fail to delete temporary file {}.", tmpConfigurationFile.toPath());
        }
    }
    if (userJarInclusion == YarnConfigOptions.UserJarInclusion.LAST) {
        for (String userClassPath : userClassPaths) {
            classPathBuilder.append(userClassPath).append(File.pathSeparator);
        }
    }
    // To support Yarn Secure Integration Test Scenario
    // In Integration test setup, the Yarn containers created by YarnMiniCluster does not have
    // the Yarn site XML
    // and KRB5 configuration files. We are adding these files as container local resources for
    // the container
    // applications (JM/TMs) to have proper secure cluster setup
    Path remoteYarnSiteXmlPath = null;
    if (System.getenv("IN_TESTS") != null) {
        File f = new File(System.getenv("YARN_CONF_DIR"), Utils.YARN_SITE_FILE_NAME);
        LOG.info("Adding Yarn configuration {} to the AM container local resource bucket", f.getAbsolutePath());
        Path yarnSitePath = new Path(f.getAbsolutePath());
        remoteYarnSiteXmlPath = fileUploader.registerSingleLocalResource(Utils.YARN_SITE_FILE_NAME, yarnSitePath, "", LocalResourceType.FILE, false, false).getPath();
        if (System.getProperty("java.security.krb5.conf") != null) {
            configuration.set(SecurityOptions.KERBEROS_KRB5_PATH, System.getProperty("java.security.krb5.conf"));
        }
    }
    Path remoteKrb5Path = null;
    boolean hasKrb5 = false;
    String krb5Config = configuration.get(SecurityOptions.KERBEROS_KRB5_PATH);
    if (!StringUtils.isNullOrWhitespaceOnly(krb5Config)) {
        final File krb5 = new File(krb5Config);
        LOG.info("Adding KRB5 configuration {} to the AM container local resource bucket", krb5.getAbsolutePath());
        final Path krb5ConfPath = new Path(krb5.getAbsolutePath());
        remoteKrb5Path = fileUploader.registerSingleLocalResource(Utils.KRB5_FILE_NAME, krb5ConfPath, "", LocalResourceType.FILE, false, false).getPath();
        hasKrb5 = true;
    }
    Path remotePathKeytab = null;
    String localizedKeytabPath = null;
    String keytab = configuration.getString(SecurityOptions.KERBEROS_LOGIN_KEYTAB);
    if (keytab != null) {
        boolean localizeKeytab = flinkConfiguration.getBoolean(YarnConfigOptions.SHIP_LOCAL_KEYTAB);
        localizedKeytabPath = flinkConfiguration.getString(YarnConfigOptions.LOCALIZED_KEYTAB_PATH);
        if (localizeKeytab) {
            // Localize the keytab to YARN containers via local resource.
            LOG.info("Adding keytab {} to the AM container local resource bucket", keytab);
            remotePathKeytab = fileUploader.registerSingleLocalResource(localizedKeytabPath, new Path(keytab), "", LocalResourceType.FILE, false, false).getPath();
        } else {
            // // Assume Keytab is pre-installed in the container.
            localizedKeytabPath = flinkConfiguration.getString(YarnConfigOptions.LOCALIZED_KEYTAB_PATH);
        }
    }
    final JobManagerProcessSpec processSpec = JobManagerProcessUtils.processSpecFromConfigWithNewOptionToInterpretLegacyHeap(flinkConfiguration, JobManagerOptions.TOTAL_PROCESS_MEMORY);
    final ContainerLaunchContext amContainer = setupApplicationMasterContainer(yarnClusterEntrypoint, hasKrb5, processSpec);
    // setup security tokens
    if (UserGroupInformation.isSecurityEnabled()) {
        // set HDFS delegation tokens when security is enabled
        LOG.info("Adding delegation token to the AM container.");
        final List<Path> pathsToObtainToken = new ArrayList<>();
        boolean fetchToken = configuration.getBoolean(SecurityOptions.KERBEROS_FETCH_DELEGATION_TOKEN);
        if (fetchToken) {
            List<Path> yarnAccessList = ConfigUtils.decodeListFromConfig(configuration, YarnConfigOptions.YARN_ACCESS, Path::new);
            pathsToObtainToken.addAll(yarnAccessList);
            pathsToObtainToken.addAll(fileUploader.getRemotePaths());
        }
        Utils.setTokensFor(amContainer, pathsToObtainToken, yarnConfiguration, fetchToken);
    }
    amContainer.setLocalResources(fileUploader.getRegisteredLocalResources());
    fileUploader.close();
    // Setup CLASSPATH and environment variables for ApplicationMaster
    final Map<String, String> appMasterEnv = new HashMap<>();
    // set user specified app master environment variables
    appMasterEnv.putAll(ConfigurationUtils.getPrefixedKeyValuePairs(ResourceManagerOptions.CONTAINERIZED_MASTER_ENV_PREFIX, configuration));
    // set Flink app class path
    appMasterEnv.put(YarnConfigKeys.ENV_FLINK_CLASSPATH, classPathBuilder.toString());
    // set Flink on YARN internal configuration values
    appMasterEnv.put(YarnConfigKeys.FLINK_DIST_JAR, localResourceDescFlinkJar.toString());
    appMasterEnv.put(YarnConfigKeys.ENV_APP_ID, appId.toString());
    appMasterEnv.put(YarnConfigKeys.ENV_CLIENT_HOME_DIR, fileUploader.getHomeDir().toString());
    appMasterEnv.put(YarnConfigKeys.ENV_CLIENT_SHIP_FILES, encodeYarnLocalResourceDescriptorListToString(fileUploader.getEnvShipResourceList()));
    appMasterEnv.put(YarnConfigKeys.FLINK_YARN_FILES, fileUploader.getApplicationDir().toUri().toString());
    // https://github.com/apache/hadoop/blob/trunk/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/YarnApplicationSecurity.md#identity-on-an-insecure-cluster-hadoop_user_name
    appMasterEnv.put(YarnConfigKeys.ENV_HADOOP_USER_NAME, UserGroupInformation.getCurrentUser().getUserName());
    if (localizedKeytabPath != null) {
        appMasterEnv.put(YarnConfigKeys.LOCAL_KEYTAB_PATH, localizedKeytabPath);
        String principal = configuration.getString(SecurityOptions.KERBEROS_LOGIN_PRINCIPAL);
        appMasterEnv.put(YarnConfigKeys.KEYTAB_PRINCIPAL, principal);
        if (remotePathKeytab != null) {
            appMasterEnv.put(YarnConfigKeys.REMOTE_KEYTAB_PATH, remotePathKeytab.toString());
        }
    }
    // To support Yarn Secure Integration Test Scenario
    if (remoteYarnSiteXmlPath != null) {
        appMasterEnv.put(YarnConfigKeys.ENV_YARN_SITE_XML_PATH, remoteYarnSiteXmlPath.toString());
    }
    if (remoteKrb5Path != null) {
        appMasterEnv.put(YarnConfigKeys.ENV_KRB5_PATH, remoteKrb5Path.toString());
    }
    // set classpath from YARN configuration
    Utils.setupYarnClassPath(yarnConfiguration, appMasterEnv);
    amContainer.setEnvironment(appMasterEnv);
    // Set up resource type requirements for ApplicationMaster
    Resource capability = Records.newRecord(Resource.class);
    capability.setMemory(clusterSpecification.getMasterMemoryMB());
    capability.setVirtualCores(flinkConfiguration.getInteger(YarnConfigOptions.APP_MASTER_VCORES));
    final String customApplicationName = customName != null ? customName : applicationName;
    appContext.setApplicationName(customApplicationName);
    appContext.setApplicationType(applicationType != null ? applicationType : "Apache Flink");
    appContext.setAMContainerSpec(amContainer);
    appContext.setResource(capability);
    // Set priority for application
    int priorityNum = flinkConfiguration.getInteger(YarnConfigOptions.APPLICATION_PRIORITY);
    if (priorityNum >= 0) {
        Priority priority = Priority.newInstance(priorityNum);
        appContext.setPriority(priority);
    }
    if (yarnQueue != null) {
        appContext.setQueue(yarnQueue);
    }
    setApplicationNodeLabel(appContext);
    setApplicationTags(appContext);
    // add a hook to clean up in case deployment fails
    Thread deploymentFailureHook = new DeploymentFailureHook(yarnApplication, fileUploader.getApplicationDir());
    Runtime.getRuntime().addShutdownHook(deploymentFailureHook);
    LOG.info("Submitting application master " + appId);
    yarnClient.submitApplication(appContext);
    LOG.info("Waiting for the cluster to be allocated");
    final long startTime = System.currentTimeMillis();
    ApplicationReport report;
    YarnApplicationState lastAppState = YarnApplicationState.NEW;
    loop: while (true) {
        try {
            report = yarnClient.getApplicationReport(appId);
        } catch (IOException e) {
            throw new YarnDeploymentException("Failed to deploy the cluster.", e);
        }
        YarnApplicationState appState = report.getYarnApplicationState();
        LOG.debug("Application State: {}", appState);
        switch(appState) {
            case FAILED:
            case KILLED:
                throw new YarnDeploymentException("The YARN application unexpectedly switched to state " + appState + " during deployment. \n" + "Diagnostics from YARN: " + report.getDiagnostics() + "\n" + "If log aggregation is enabled on your cluster, use this command to further investigate the issue:\n" + "yarn logs -applicationId " + appId);
            // break ..
            case RUNNING:
                LOG.info("YARN application has been deployed successfully.");
                break loop;
            case FINISHED:
                LOG.info("YARN application has been finished successfully.");
                break loop;
            default:
                if (appState != lastAppState) {
                    LOG.info("Deploying cluster, current state " + appState);
                }
                if (System.currentTimeMillis() - startTime > 60000) {
                    LOG.info("Deployment took more than 60 seconds. Please check if the requested resources are available in the YARN cluster");
                }
        }
        lastAppState = appState;
        Thread.sleep(250);
    }
    // since deployment was successful, remove the hook
    ShutdownHookUtil.removeShutdownHook(deploymentFailureHook, getClass().getSimpleName(), LOG);
    return report;
}
Also used : SecurityOptions(org.apache.flink.configuration.SecurityOptions) Tuple2(org.apache.flink.api.java.tuple.Tuple2) FileSystem(org.apache.hadoop.fs.FileSystem) ResourceManagerOptions(org.apache.flink.configuration.ResourceManagerOptions) YarnClientApplication(org.apache.hadoop.yarn.client.api.YarnClientApplication) YarnJobClusterEntrypoint(org.apache.flink.yarn.entrypoint.YarnJobClusterEntrypoint) ConfigUtils(org.apache.flink.configuration.ConfigUtils) YarnClusterMetrics(org.apache.hadoop.yarn.api.records.YarnClusterMetrics) ConverterUtils(org.apache.hadoop.yarn.util.ConverterUtils) Map(java.util.Map) ApplicationReport(org.apache.hadoop.yarn.api.records.ApplicationReport) Resource(org.apache.hadoop.yarn.api.records.Resource) YarnLogConfigUtil(org.apache.flink.yarn.configuration.YarnLogConfigUtil) YarnConfigOptionsInternal(org.apache.flink.yarn.configuration.YarnConfigOptionsInternal) Set(java.util.Set) JobManagerOptions(org.apache.flink.configuration.JobManagerOptions) YarnConfigOptions(org.apache.flink.yarn.configuration.YarnConfigOptions) InvocationTargetException(java.lang.reflect.InvocationTargetException) ContainerLaunchContext(org.apache.hadoop.yarn.api.records.ContainerLaunchContext) ApplicationId(org.apache.hadoop.yarn.api.records.ApplicationId) ClusterClientProvider(org.apache.flink.client.program.ClusterClientProvider) HadoopUtils(org.apache.flink.runtime.util.HadoopUtils) ClusterEntrypointUtils(org.apache.flink.runtime.entrypoint.ClusterEntrypointUtils) FlinkException(org.apache.flink.util.FlinkException) ByteArrayOutputStream(java.io.ByteArrayOutputStream) DFSConfigKeys(org.apache.hadoop.hdfs.DFSConfigKeys) ShutdownHookUtil(org.apache.flink.util.ShutdownHookUtil) ArrayList(java.util.ArrayList) YarnConfiguration(org.apache.hadoop.yarn.conf.YarnConfiguration) JobManagerProcessUtils(org.apache.flink.runtime.jobmanager.JobManagerProcessUtils) UserGroupInformation(org.apache.hadoop.security.UserGroupInformation) RestOptions(org.apache.flink.configuration.RestOptions) ObjectOutputStream(java.io.ObjectOutputStream) ConfigOption(org.apache.flink.configuration.ConfigOption) Nullable(javax.annotation.Nullable) FileOutputStream(java.io.FileOutputStream) IOException(java.io.IOException) GetNewApplicationResponse(org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationResponse) BootstrapTools(org.apache.flink.runtime.clusterframework.BootstrapTools) File(java.io.File) VisibleForTesting(org.apache.flink.annotation.VisibleForTesting) ConfigurationUtils(org.apache.flink.configuration.ConfigurationUtils) ClusterDescriptor(org.apache.flink.client.deployment.ClusterDescriptor) CoreOptions(org.apache.flink.configuration.CoreOptions) PluginUtils(org.apache.flink.core.plugin.PluginUtils) Records(org.apache.hadoop.yarn.util.Records) LOCAL_RESOURCE_DESCRIPTOR_SEPARATOR(org.apache.flink.yarn.YarnConfigKeys.LOCAL_RESOURCE_DESCRIPTOR_SEPARATOR) PluginConfig(org.apache.flink.core.plugin.PluginConfig) ApplicationConstants(org.apache.hadoop.yarn.api.ApplicationConstants) URLDecoder(java.net.URLDecoder) PipelineOptions(org.apache.flink.configuration.PipelineOptions) JobGraph(org.apache.flink.runtime.jobgraph.JobGraph) LoggerFactory(org.slf4j.LoggerFactory) ENV_FLINK_LIB_DIR(org.apache.flink.configuration.ConfigConstants.ENV_FLINK_LIB_DIR) ApplicationConfiguration(org.apache.flink.client.deployment.application.ApplicationConfiguration) YarnDeploymentTarget(org.apache.flink.yarn.configuration.YarnDeploymentTarget) Path(org.apache.hadoop.fs.Path) NodeReport(org.apache.hadoop.yarn.api.records.NodeReport) URI(java.net.URI) Preconditions.checkNotNull(org.apache.flink.util.Preconditions.checkNotNull) Method(java.lang.reflect.Method) Priority(org.apache.hadoop.yarn.api.records.Priority) Collection(java.util.Collection) YarnSessionClusterEntrypoint(org.apache.flink.yarn.entrypoint.YarnSessionClusterEntrypoint) DistributedCache(org.apache.flink.api.common.cache.DistributedCache) Preconditions(org.apache.flink.util.Preconditions) StringUtils(org.apache.flink.util.StringUtils) Collectors(java.util.stream.Collectors) ClusterDeploymentException(org.apache.flink.client.deployment.ClusterDeploymentException) List(java.util.List) Preconditions.checkArgument(org.apache.flink.util.Preconditions.checkArgument) FinalApplicationStatus(org.apache.hadoop.yarn.api.records.FinalApplicationStatus) YarnApplicationState(org.apache.hadoop.yarn.api.records.YarnApplicationState) Optional(java.util.Optional) UnsupportedEncodingException(java.io.UnsupportedEncodingException) IllegalConfigurationException(org.apache.flink.configuration.IllegalConfigurationException) LocalResourceType(org.apache.hadoop.yarn.api.records.LocalResourceType) YarnClient(org.apache.hadoop.yarn.client.api.YarnClient) NodeState(org.apache.hadoop.yarn.api.records.NodeState) ClusterSpecification(org.apache.flink.client.deployment.ClusterSpecification) HashMap(java.util.HashMap) RestClusterClient(org.apache.flink.client.program.rest.RestClusterClient) HashSet(java.util.HashSet) Charset(java.nio.charset.Charset) YarnException(org.apache.hadoop.yarn.exceptions.YarnException) ConfigConstants(org.apache.flink.configuration.ConfigConstants) QueueInfo(org.apache.hadoop.yarn.api.records.QueueInfo) ClusterRetrieveException(org.apache.flink.client.deployment.ClusterRetrieveException) HighAvailabilityMode(org.apache.flink.runtime.jobmanager.HighAvailabilityMode) LinkedList(java.util.LinkedList) PrintStream(java.io.PrintStream) JOB_GRAPH_FILE_PATH(org.apache.flink.runtime.entrypoint.component.FileJobGraphRetriever.JOB_GRAPH_FILE_PATH) Logger(org.slf4j.Logger) JobManagerProcessSpec(org.apache.flink.runtime.jobmanager.JobManagerProcessSpec) YarnApplicationClusterEntryPoint(org.apache.flink.yarn.entrypoint.YarnApplicationClusterEntryPoint) Configuration(org.apache.flink.configuration.Configuration) CollectionUtil(org.apache.flink.util.CollectionUtil) DEFAULT_FLINK_USR_LIB_DIR(org.apache.flink.configuration.ConfigConstants.DEFAULT_FLINK_USR_LIB_DIR) ApplicationSubmissionContext(org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext) ClusterEntrypoint(org.apache.flink.runtime.entrypoint.ClusterEntrypoint) Collections(java.util.Collections) HighAvailabilityOptions(org.apache.flink.configuration.HighAvailabilityOptions) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) YarnApplicationState(org.apache.hadoop.yarn.api.records.YarnApplicationState) ObjectOutputStream(java.io.ObjectOutputStream) URI(java.net.URI) FileSystem(org.apache.hadoop.fs.FileSystem) ApplicationSubmissionContext(org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext) HashSet(java.util.HashSet) Path(org.apache.hadoop.fs.Path) Priority(org.apache.hadoop.yarn.api.records.Priority) Resource(org.apache.hadoop.yarn.api.records.Resource) ContainerLaunchContext(org.apache.hadoop.yarn.api.records.ContainerLaunchContext) IOException(java.io.IOException) InvocationTargetException(java.lang.reflect.InvocationTargetException) FlinkException(org.apache.flink.util.FlinkException) IOException(java.io.IOException) ClusterDeploymentException(org.apache.flink.client.deployment.ClusterDeploymentException) UnsupportedEncodingException(java.io.UnsupportedEncodingException) IllegalConfigurationException(org.apache.flink.configuration.IllegalConfigurationException) YarnException(org.apache.hadoop.yarn.exceptions.YarnException) ClusterRetrieveException(org.apache.flink.client.deployment.ClusterRetrieveException) YarnJobClusterEntrypoint(org.apache.flink.yarn.entrypoint.YarnJobClusterEntrypoint) YarnSessionClusterEntrypoint(org.apache.flink.yarn.entrypoint.YarnSessionClusterEntrypoint) YarnApplicationClusterEntryPoint(org.apache.flink.yarn.entrypoint.YarnApplicationClusterEntryPoint) ClusterEntrypoint(org.apache.flink.runtime.entrypoint.ClusterEntrypoint) ApplicationReport(org.apache.hadoop.yarn.api.records.ApplicationReport) JobManagerProcessSpec(org.apache.flink.runtime.jobmanager.JobManagerProcessSpec) FileOutputStream(java.io.FileOutputStream) ApplicationId(org.apache.hadoop.yarn.api.records.ApplicationId) File(java.io.File) Map(java.util.Map) HashMap(java.util.HashMap)

Example 5 with JobManagerProcessSpec

use of org.apache.flink.runtime.jobmanager.JobManagerProcessSpec in project flink-mirror by flink-ci.

the class YarnClusterDescriptorTest method testSetupApplicationMasterContainer.

@Test
public void testSetupApplicationMasterContainer() {
    Configuration cfg = new Configuration();
    YarnClusterDescriptor clusterDescriptor = createYarnClusterDescriptor(cfg);
    final JobManagerProcessSpec jobManagerProcessSpec = createDefaultJobManagerProcessSpec(1024);
    final String java = "$JAVA_HOME/bin/java";
    final String jvmmem = JobManagerProcessUtils.generateJvmParametersStr(jobManagerProcessSpec, cfg);
    final String dynamicParameters = JobManagerProcessUtils.generateDynamicConfigsStr(jobManagerProcessSpec);
    // if set
    final String jvmOpts = "-Djvm";
    // if set
    final String jmJvmOpts = "-DjmJvm";
    final String krb5 = "-Djava.security.krb5.conf=krb5.conf";
    final String logfile = "-Dlog.file=\"" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + // if set
    "/jobmanager.log\"";
    final String logback = "-Dlogback.configurationFile=file:" + // if set
    YarnLogConfigUtil.CONFIG_FILE_LOGBACK_NAME;
    final String log4j = "-Dlog4j.configuration=file:" + YarnLogConfigUtil.CONFIG_FILE_LOG4J_NAME + " -Dlog4j.configurationFile=file:" + // if set
    YarnLogConfigUtil.CONFIG_FILE_LOG4J_NAME;
    final String mainClass = clusterDescriptor.getYarnSessionClusterEntrypoint();
    final String redirects = "1> " + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/jobmanager.out " + "2> " + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/jobmanager.err";
    try {
        // no logging, with/out krb5
        assertEquals(java + " " + jvmmem + "" + // jvmOpts
        "" + // logging
        " " + mainClass + " " + dynamicParameters + " " + redirects, clusterDescriptor.setupApplicationMasterContainer(mainClass, false, jobManagerProcessSpec).getCommands().get(0));
        assertEquals(java + " " + jvmmem + " " + krb5 + // jvmOpts
        "" + // logging
        " " + mainClass + " " + dynamicParameters + " " + redirects, clusterDescriptor.setupApplicationMasterContainer(mainClass, true, jobManagerProcessSpec).getCommands().get(0));
        // logback only, with/out krb5
        cfg.set(YarnConfigOptionsInternal.APPLICATION_LOG_CONFIG_FILE, YarnLogConfigUtil.CONFIG_FILE_LOGBACK_NAME);
        assertEquals(java + " " + jvmmem + "" + // jvmOpts
        " " + logfile + " " + logback + " " + mainClass + " " + dynamicParameters + " " + redirects, clusterDescriptor.setupApplicationMasterContainer(mainClass, false, jobManagerProcessSpec).getCommands().get(0));
        cfg.set(YarnConfigOptionsInternal.APPLICATION_LOG_CONFIG_FILE, YarnLogConfigUtil.CONFIG_FILE_LOGBACK_NAME);
        assertEquals(java + " " + jvmmem + " " + krb5 + // jvmOpts
        " " + logfile + " " + logback + " " + mainClass + " " + dynamicParameters + " " + redirects, clusterDescriptor.setupApplicationMasterContainer(mainClass, true, jobManagerProcessSpec).getCommands().get(0));
        // log4j, with/out krb5
        cfg.set(YarnConfigOptionsInternal.APPLICATION_LOG_CONFIG_FILE, YarnLogConfigUtil.CONFIG_FILE_LOG4J_NAME);
        assertEquals(java + " " + jvmmem + "" + // jvmOpts
        " " + logfile + " " + log4j + " " + mainClass + " " + dynamicParameters + " " + redirects, clusterDescriptor.setupApplicationMasterContainer(mainClass, false, jobManagerProcessSpec).getCommands().get(0));
        cfg.set(YarnConfigOptionsInternal.APPLICATION_LOG_CONFIG_FILE, YarnLogConfigUtil.CONFIG_FILE_LOG4J_NAME);
        assertEquals(java + " " + jvmmem + " " + krb5 + // jvmOpts
        " " + logfile + " " + log4j + " " + mainClass + " " + dynamicParameters + " " + redirects, clusterDescriptor.setupApplicationMasterContainer(mainClass, true, jobManagerProcessSpec).getCommands().get(0));
        // logback, with/out krb5
        cfg.set(YarnConfigOptionsInternal.APPLICATION_LOG_CONFIG_FILE, YarnLogConfigUtil.CONFIG_FILE_LOGBACK_NAME);
        assertEquals(java + " " + jvmmem + "" + // jvmOpts
        " " + logfile + " " + logback + " " + mainClass + " " + dynamicParameters + " " + redirects, clusterDescriptor.setupApplicationMasterContainer(mainClass, false, jobManagerProcessSpec).getCommands().get(0));
        cfg.set(YarnConfigOptionsInternal.APPLICATION_LOG_CONFIG_FILE, YarnLogConfigUtil.CONFIG_FILE_LOGBACK_NAME);
        assertEquals(java + " " + jvmmem + " " + krb5 + // jvmOpts
        " " + logfile + " " + logback + " " + mainClass + " " + dynamicParameters + " " + redirects, clusterDescriptor.setupApplicationMasterContainer(mainClass, true, jobManagerProcessSpec).getCommands().get(0));
        // logback, with/out krb5, different JVM opts
        // IMPORTANT: Be aware that we are using side effects here to modify the created
        // YarnClusterDescriptor,
        // because we have a reference to the ClusterDescriptor's configuration which we modify
        // continuously
        cfg.setString(CoreOptions.FLINK_JVM_OPTIONS, jvmOpts);
        cfg.set(YarnConfigOptionsInternal.APPLICATION_LOG_CONFIG_FILE, YarnLogConfigUtil.CONFIG_FILE_LOGBACK_NAME);
        assertEquals(java + " " + jvmmem + " " + jvmOpts + " " + logfile + " " + logback + " " + mainClass + " " + dynamicParameters + " " + redirects, clusterDescriptor.setupApplicationMasterContainer(mainClass, false, jobManagerProcessSpec).getCommands().get(0));
        cfg.set(YarnConfigOptionsInternal.APPLICATION_LOG_CONFIG_FILE, YarnLogConfigUtil.CONFIG_FILE_LOGBACK_NAME);
        assertEquals(java + " " + jvmmem + " " + jvmOpts + " " + krb5 + // jvmOpts
        " " + logfile + " " + logback + " " + mainClass + " " + dynamicParameters + " " + redirects, clusterDescriptor.setupApplicationMasterContainer(mainClass, true, jobManagerProcessSpec).getCommands().get(0));
        // log4j, with/out krb5, different JVM opts
        // IMPORTANT: Be aware that we are using side effects here to modify the created
        // YarnClusterDescriptor
        cfg.setString(CoreOptions.FLINK_JM_JVM_OPTIONS, jmJvmOpts);
        cfg.set(YarnConfigOptionsInternal.APPLICATION_LOG_CONFIG_FILE, YarnLogConfigUtil.CONFIG_FILE_LOG4J_NAME);
        assertEquals(java + " " + jvmmem + " " + jvmOpts + " " + jmJvmOpts + " " + logfile + " " + log4j + " " + mainClass + " " + dynamicParameters + " " + redirects, clusterDescriptor.setupApplicationMasterContainer(mainClass, false, jobManagerProcessSpec).getCommands().get(0));
        cfg.set(YarnConfigOptionsInternal.APPLICATION_LOG_CONFIG_FILE, YarnLogConfigUtil.CONFIG_FILE_LOG4J_NAME);
        assertEquals(java + " " + jvmmem + " " + jvmOpts + " " + jmJvmOpts + " " + krb5 + // jvmOpts
        " " + logfile + " " + log4j + " " + mainClass + " " + dynamicParameters + " " + redirects, clusterDescriptor.setupApplicationMasterContainer(mainClass, true, jobManagerProcessSpec).getCommands().get(0));
        // now try some configurations with different yarn.container-start-command-template
        // IMPORTANT: Be aware that we are using side effects here to modify the created
        // YarnClusterDescriptor
        cfg.set(YarnConfigOptionsInternal.APPLICATION_LOG_CONFIG_FILE, YarnLogConfigUtil.CONFIG_FILE_LOGBACK_NAME);
        cfg.setString(ConfigConstants.YARN_CONTAINER_START_COMMAND_TEMPLATE, "%java% 1 %jvmmem% 2 %jvmopts% 3 %logging% 4 %class% 5 %args% 6 %redirects%");
        assertEquals(java + " 1 " + jvmmem + " 2 " + jvmOpts + " " + jmJvmOpts + " " + krb5 + // jvmOpts
        " 3 " + logfile + " " + logback + " 4 " + mainClass + " 5 " + dynamicParameters + " 6 " + redirects, clusterDescriptor.setupApplicationMasterContainer(mainClass, true, jobManagerProcessSpec).getCommands().get(0));
        cfg.set(YarnConfigOptionsInternal.APPLICATION_LOG_CONFIG_FILE, YarnLogConfigUtil.CONFIG_FILE_LOGBACK_NAME);
        cfg.setString(ConfigConstants.YARN_CONTAINER_START_COMMAND_TEMPLATE, "%java% %logging% %jvmopts% %jvmmem% %class% %args% %redirects%");
        // IMPORTANT: Be aware that we are using side effects here to modify the created
        // YarnClusterDescriptor
        assertEquals(java + " " + logfile + " " + logback + " " + jvmOpts + " " + jmJvmOpts + " " + krb5 + // jvmOpts
        " " + jvmmem + " " + mainClass + " " + dynamicParameters + " " + redirects, clusterDescriptor.setupApplicationMasterContainer(mainClass, true, jobManagerProcessSpec).getCommands().get(0));
    } finally {
        clusterDescriptor.close();
    }
}
Also used : ApplicationConfiguration(org.apache.flink.client.deployment.application.ApplicationConfiguration) YarnConfiguration(org.apache.hadoop.yarn.conf.YarnConfiguration) Configuration(org.apache.flink.configuration.Configuration) JobManagerProcessUtils.createDefaultJobManagerProcessSpec(org.apache.flink.runtime.jobmanager.JobManagerProcessUtils.createDefaultJobManagerProcessSpec) JobManagerProcessSpec(org.apache.flink.runtime.jobmanager.JobManagerProcessSpec) Matchers.containsString(org.hamcrest.Matchers.containsString) Test(org.junit.Test)

Aggregations

JobManagerProcessSpec (org.apache.flink.runtime.jobmanager.JobManagerProcessSpec)16 VisibleForTesting (org.apache.flink.annotation.VisibleForTesting)7 ApplicationConfiguration (org.apache.flink.client.deployment.application.ApplicationConfiguration)7 Configuration (org.apache.flink.configuration.Configuration)7 YarnConfiguration (org.apache.hadoop.yarn.conf.YarnConfiguration)7 Test (org.junit.Test)6 ByteArrayOutputStream (java.io.ByteArrayOutputStream)4 File (java.io.File)4 FileOutputStream (java.io.FileOutputStream)4 IOException (java.io.IOException)4 ObjectOutputStream (java.io.ObjectOutputStream)4 PrintStream (java.io.PrintStream)4 UnsupportedEncodingException (java.io.UnsupportedEncodingException)4 InvocationTargetException (java.lang.reflect.InvocationTargetException)4 Method (java.lang.reflect.Method)4 URI (java.net.URI)4 URLDecoder (java.net.URLDecoder)4 Charset (java.nio.charset.Charset)4 ArrayList (java.util.ArrayList)4 Collection (java.util.Collection)4