Search in sources :

Example 1 with ContaineredTaskManagerParameters

use of org.apache.flink.runtime.clusterframework.ContaineredTaskManagerParameters in project flink by apache.

the class YarnResourceManager method createTaskExecutorLaunchContext.

private ContainerLaunchContext createTaskExecutorLaunchContext(Resource resource, String containerId, String host) throws Exception {
    // init the ContainerLaunchContext
    final String currDir = ENV.get(ApplicationConstants.Environment.PWD.key());
    final ContaineredTaskManagerParameters taskManagerParameters = ContaineredTaskManagerParameters.create(flinkConfig, resource.getMemory(), 1);
    LOG.info("TaskExecutor{} will be started with container size {} MB, JVM heap size {} MB, " + "JVM direct memory limit {} MB", containerId, taskManagerParameters.taskManagerTotalMemoryMB(), taskManagerParameters.taskManagerHeapSizeMB(), taskManagerParameters.taskManagerDirectMemoryLimitMB());
    int timeout = flinkConfig.getInteger(ConfigConstants.TASK_MANAGER_MAX_REGISTRATION_DURATION, DEFAULT_TASK_MANAGER_REGISTRATION_DURATION);
    FiniteDuration teRegistrationTimeout = new FiniteDuration(timeout, TimeUnit.SECONDS);
    final Configuration taskManagerConfig = BootstrapTools.generateTaskManagerConfiguration(flinkConfig, "", 0, 1, teRegistrationTimeout);
    LOG.debug("TaskManager configuration: {}", taskManagerConfig);
    ContainerLaunchContext taskExecutorLaunchContext = Utils.createTaskExecutorContext(flinkConfig, yarnConfig, ENV, taskManagerParameters, taskManagerConfig, currDir, YarnTaskExecutorRunner.class, LOG);
    // set a special environment variable to uniquely identify this container
    taskExecutorLaunchContext.getEnvironment().put(ENV_FLINK_CONTAINER_ID, containerId);
    taskExecutorLaunchContext.getEnvironment().put(ENV_FLINK_NODE_ID, host);
    return taskExecutorLaunchContext;
}
Also used : YarnConfiguration(org.apache.hadoop.yarn.conf.YarnConfiguration) ResourceManagerConfiguration(org.apache.flink.runtime.resourcemanager.ResourceManagerConfiguration) Configuration(org.apache.flink.configuration.Configuration) ContaineredTaskManagerParameters(org.apache.flink.runtime.clusterframework.ContaineredTaskManagerParameters) FiniteDuration(scala.concurrent.duration.FiniteDuration) ContainerLaunchContext(org.apache.hadoop.yarn.api.records.ContainerLaunchContext)

Example 2 with ContaineredTaskManagerParameters

use of org.apache.flink.runtime.clusterframework.ContaineredTaskManagerParameters in project flink by apache.

the class YarnApplicationMasterRunnerTest method testCreateTaskExecutorContext.

@Test
public void testCreateTaskExecutorContext() throws Exception {
    File root = folder.getRoot();
    File home = new File(root, "home");
    boolean created = home.mkdir();
    assertTrue(created);
    Answer<?> getDefault = new Answer<Object>() {

        @Override
        public Object answer(InvocationOnMock invocationOnMock) throws Throwable {
            return invocationOnMock.getArguments()[1];
        }
    };
    Configuration flinkConf = new Configuration();
    YarnConfiguration yarnConf = mock(YarnConfiguration.class);
    doAnswer(getDefault).when(yarnConf).get(anyString(), anyString());
    doAnswer(getDefault).when(yarnConf).getInt(anyString(), anyInt());
    doAnswer(new Answer() {

        @Override
        public Object answer(InvocationOnMock invocationOnMock) throws Throwable {
            return new String[] { (String) invocationOnMock.getArguments()[1] };
        }
    }).when(yarnConf).getStrings(anyString(), Mockito.<String>anyVararg());
    Map<String, String> env = ImmutableMap.<String, String>builder().put(ENV_APP_ID, "foo").put(ENV_CLIENT_HOME_DIR, home.getAbsolutePath()).put(ENV_CLIENT_SHIP_FILES, "").put(ENV_FLINK_CLASSPATH, "").put(ENV_HADOOP_USER_NAME, "foo").put(FLINK_JAR_PATH, root.toURI().toString()).build();
    ContaineredTaskManagerParameters tmParams = mock(ContaineredTaskManagerParameters.class);
    Configuration taskManagerConf = new Configuration();
    String workingDirectory = root.getAbsolutePath();
    Class<?> taskManagerMainClass = YarnApplicationMasterRunnerTest.class;
    ContainerLaunchContext ctx = Utils.createTaskExecutorContext(flinkConf, yarnConf, env, tmParams, taskManagerConf, workingDirectory, taskManagerMainClass, LOG);
    assertEquals("file", ctx.getLocalResources().get("flink.jar").getResource().getScheme());
}
Also used : Configuration(org.apache.flink.configuration.Configuration) YarnConfiguration(org.apache.hadoop.yarn.conf.YarnConfiguration) ContaineredTaskManagerParameters(org.apache.flink.runtime.clusterframework.ContaineredTaskManagerParameters) Matchers.anyString(org.mockito.Matchers.anyString) ContainerLaunchContext(org.apache.hadoop.yarn.api.records.ContainerLaunchContext) Answer(org.mockito.stubbing.Answer) Mockito.doAnswer(org.mockito.Mockito.doAnswer) YarnConfiguration(org.apache.hadoop.yarn.conf.YarnConfiguration) InvocationOnMock(org.mockito.invocation.InvocationOnMock) File(java.io.File) Test(org.junit.Test)

Example 3 with ContaineredTaskManagerParameters

use of org.apache.flink.runtime.clusterframework.ContaineredTaskManagerParameters in project flink by apache.

the class YarnResourceManagerDriver method createTaskExecutorLaunchContext.

private ContainerLaunchContext createTaskExecutorLaunchContext(ResourceID containerId, String host, TaskExecutorProcessSpec taskExecutorProcessSpec) throws Exception {
    // init the ContainerLaunchContext
    final String currDir = configuration.getCurrentDir();
    final ContaineredTaskManagerParameters taskManagerParameters = ContaineredTaskManagerParameters.create(flinkConfig, taskExecutorProcessSpec);
    log.info("TaskExecutor {} will be started on {} with {}.", containerId.getStringWithMetadata(), host, taskExecutorProcessSpec);
    final Configuration taskManagerConfig = BootstrapTools.cloneConfiguration(flinkConfig);
    taskManagerConfig.set(TaskManagerOptions.TASK_MANAGER_RESOURCE_ID, containerId.getResourceIdString());
    taskManagerConfig.set(TaskManagerOptionsInternal.TASK_MANAGER_RESOURCE_ID_METADATA, containerId.getMetadata());
    final String taskManagerDynamicProperties = BootstrapTools.getDynamicPropertiesAsString(flinkClientConfig, taskManagerConfig);
    log.debug("TaskManager configuration: {}", taskManagerConfig);
    final ContainerLaunchContext taskExecutorLaunchContext = Utils.createTaskExecutorContext(flinkConfig, yarnConfig, configuration, taskManagerParameters, taskManagerDynamicProperties, currDir, YarnTaskExecutorRunner.class, log);
    taskExecutorLaunchContext.getEnvironment().put(ENV_FLINK_NODE_ID, host);
    return taskExecutorLaunchContext;
}
Also used : YarnConfiguration(org.apache.hadoop.yarn.conf.YarnConfiguration) YarnResourceManagerDriverConfiguration(org.apache.flink.yarn.configuration.YarnResourceManagerDriverConfiguration) Configuration(org.apache.flink.configuration.Configuration) GlobalConfiguration(org.apache.flink.configuration.GlobalConfiguration) ContaineredTaskManagerParameters(org.apache.flink.runtime.clusterframework.ContaineredTaskManagerParameters) ContainerLaunchContext(org.apache.hadoop.yarn.api.records.ContainerLaunchContext)

Example 4 with ContaineredTaskManagerParameters

use of org.apache.flink.runtime.clusterframework.ContaineredTaskManagerParameters in project flink by apache.

the class UtilsTest method testCreateTaskExecutorCredentials.

@Test
public void testCreateTaskExecutorCredentials() throws Exception {
    File root = temporaryFolder.getRoot();
    File home = new File(root, "home");
    boolean created = home.mkdir();
    assertTrue(created);
    Configuration flinkConf = new Configuration();
    YarnConfiguration yarnConf = new YarnConfiguration();
    Map<String, String> env = new HashMap<>();
    env.put(YarnConfigKeys.ENV_APP_ID, "foo");
    env.put(YarnConfigKeys.ENV_CLIENT_HOME_DIR, home.getAbsolutePath());
    env.put(YarnConfigKeys.ENV_CLIENT_SHIP_FILES, "");
    env.put(YarnConfigKeys.ENV_FLINK_CLASSPATH, "");
    env.put(YarnConfigKeys.ENV_HADOOP_USER_NAME, "foo");
    env.put(YarnConfigKeys.FLINK_DIST_JAR, new YarnLocalResourceDescriptor("flink.jar", new Path(root.toURI()), 0, System.currentTimeMillis(), LocalResourceVisibility.APPLICATION, LocalResourceType.FILE).toString());
    env.put(YarnConfigKeys.FLINK_YARN_FILES, "");
    env.put(ApplicationConstants.Environment.PWD.key(), home.getAbsolutePath());
    env = Collections.unmodifiableMap(env);
    final YarnResourceManagerDriverConfiguration yarnResourceManagerDriverConfiguration = new YarnResourceManagerDriverConfiguration(env, "localhost", null);
    File credentialFile = temporaryFolder.newFile("container_tokens");
    final Text amRmTokenKind = AMRMTokenIdentifier.KIND_NAME;
    final Text hdfsDelegationTokenKind = new Text("HDFS_DELEGATION_TOKEN");
    final Text amRmTokenService = new Text("rm-ip:8030");
    final Text hdfsDelegationTokenService = new Text("ha-hdfs:hadoop-namespace");
    Credentials amCredentials = new Credentials();
    amCredentials.addToken(amRmTokenService, new Token<>(new byte[4], new byte[4], amRmTokenKind, amRmTokenService));
    amCredentials.addToken(hdfsDelegationTokenService, new Token<>(new byte[4], new byte[4], hdfsDelegationTokenKind, hdfsDelegationTokenService));
    amCredentials.writeTokenStorageFile(new org.apache.hadoop.fs.Path(credentialFile.getAbsolutePath()), yarnConf);
    TaskExecutorProcessSpec spec = TaskExecutorProcessUtils.newProcessSpecBuilder(flinkConf).withTotalProcessMemory(MemorySize.parse("1g")).build();
    ContaineredTaskManagerParameters tmParams = new ContaineredTaskManagerParameters(spec, new HashMap<>(1));
    Configuration taskManagerConf = new Configuration();
    String workingDirectory = root.getAbsolutePath();
    Class<?> taskManagerMainClass = YarnTaskExecutorRunner.class;
    ContainerLaunchContext ctx;
    final Map<String, String> originalEnv = System.getenv();
    try {
        Map<String, String> systemEnv = new HashMap<>(originalEnv);
        systemEnv.put("HADOOP_TOKEN_FILE_LOCATION", credentialFile.getAbsolutePath());
        CommonTestUtils.setEnv(systemEnv);
        ctx = Utils.createTaskExecutorContext(flinkConf, yarnConf, yarnResourceManagerDriverConfiguration, tmParams, "", workingDirectory, taskManagerMainClass, LOG);
    } finally {
        CommonTestUtils.setEnv(originalEnv);
    }
    Credentials credentials = new Credentials();
    try (DataInputStream dis = new DataInputStream(new ByteArrayInputStream(ctx.getTokens().array()))) {
        credentials.readTokenStorageStream(dis);
    }
    Collection<Token<? extends TokenIdentifier>> tokens = credentials.getAllTokens();
    boolean hasHdfsDelegationToken = false;
    boolean hasAmRmToken = false;
    for (Token<? extends TokenIdentifier> token : tokens) {
        if (token.getKind().equals(amRmTokenKind)) {
            hasAmRmToken = true;
        } else if (token.getKind().equals(hdfsDelegationTokenKind)) {
            hasHdfsDelegationToken = true;
        }
    }
    assertTrue(hasHdfsDelegationToken);
    assertFalse(hasAmRmToken);
}
Also used : AMRMTokenIdentifier(org.apache.hadoop.yarn.security.AMRMTokenIdentifier) TokenIdentifier(org.apache.hadoop.security.token.TokenIdentifier) YarnResourceManagerDriverConfiguration(org.apache.flink.yarn.configuration.YarnResourceManagerDriverConfiguration) YarnConfiguration(org.apache.hadoop.yarn.conf.YarnConfiguration) YarnResourceManagerDriverConfiguration(org.apache.flink.yarn.configuration.YarnResourceManagerDriverConfiguration) Configuration(org.apache.flink.configuration.Configuration) HashMap(java.util.HashMap) ContaineredTaskManagerParameters(org.apache.flink.runtime.clusterframework.ContaineredTaskManagerParameters) Token(org.apache.hadoop.security.token.Token) YarnConfiguration(org.apache.hadoop.yarn.conf.YarnConfiguration) Path(org.apache.hadoop.fs.Path) Path(org.apache.hadoop.fs.Path) TaskExecutorProcessSpec(org.apache.flink.runtime.clusterframework.TaskExecutorProcessSpec) Text(org.apache.hadoop.io.Text) ContainerLaunchContext(org.apache.hadoop.yarn.api.records.ContainerLaunchContext) DataInputStream(java.io.DataInputStream) ByteArrayInputStream(java.io.ByteArrayInputStream) File(java.io.File) Credentials(org.apache.hadoop.security.Credentials) Test(org.junit.Test)

Example 5 with ContaineredTaskManagerParameters

use of org.apache.flink.runtime.clusterframework.ContaineredTaskManagerParameters in project flink by apache.

the class LaunchableMesosWorker method launch.

/**
	 * Construct the TaskInfo needed to launch the worker.
	 * @param slaveId the assigned slave.
	 * @param assignment the assignment details.
	 * @return a fully-baked TaskInfo.
	 */
@Override
public Protos.TaskInfo launch(Protos.SlaveID slaveId, TaskAssignmentResult assignment) {
    ContaineredTaskManagerParameters tmParams = params.containeredParameters();
    final Configuration dynamicProperties = new Configuration();
    // incorporate the dynamic properties set by the template
    dynamicProperties.addAll(containerSpec.getDynamicConfiguration());
    // build a TaskInfo with assigned resources, environment variables, etc
    final Protos.TaskInfo.Builder taskInfo = Protos.TaskInfo.newBuilder().setSlaveId(slaveId).setTaskId(taskID).setName(taskID.getValue()).addResources(scalar("cpus", assignment.getRequest().getCPUs())).addResources(scalar("mem", assignment.getRequest().getMemory()));
    final Protos.CommandInfo.Builder cmd = taskInfo.getCommandBuilder();
    final Protos.Environment.Builder env = cmd.getEnvironmentBuilder();
    final StringBuilder jvmArgs = new StringBuilder();
    // use the assigned ports for the TM
    if (assignment.getAssignedPorts().size() < TM_PORT_KEYS.length) {
        throw new IllegalArgumentException("unsufficient # of ports assigned");
    }
    for (int i = 0; i < TM_PORT_KEYS.length; i++) {
        int port = assignment.getAssignedPorts().get(i);
        String key = TM_PORT_KEYS[i];
        taskInfo.addResources(ranges("ports", range(port, port)));
        dynamicProperties.setInteger(key, port);
    }
    // ship additional files
    for (ContainerSpecification.Artifact artifact : containerSpec.getArtifacts()) {
        cmd.addUris(Utils.uri(resolver, artifact));
    }
    // propagate environment variables
    for (Map.Entry<String, String> entry : params.containeredParameters().taskManagerEnv().entrySet()) {
        env.addVariables(variable(entry.getKey(), entry.getValue()));
    }
    for (Map.Entry<String, String> entry : containerSpec.getEnvironmentVariables().entrySet()) {
        env.addVariables(variable(entry.getKey(), entry.getValue()));
    }
    // propagate the Mesos task ID to the TM
    env.addVariables(variable(MesosConfigKeys.ENV_FLINK_CONTAINER_ID, taskInfo.getTaskId().getValue()));
    // finalize the memory parameters
    jvmArgs.append(" -Xms").append(tmParams.taskManagerHeapSizeMB()).append("m");
    jvmArgs.append(" -Xmx").append(tmParams.taskManagerHeapSizeMB()).append("m");
    jvmArgs.append(" -XX:MaxDirectMemorySize=").append(tmParams.taskManagerDirectMemoryLimitMB()).append("m");
    // pass dynamic system properties
    jvmArgs.append(' ').append(ContainerSpecification.formatSystemProperties(containerSpec.getSystemProperties()));
    // finalize JVM args
    env.addVariables(variable(MesosConfigKeys.ENV_JVM_ARGS, jvmArgs.toString()));
    // build the launch command w/ dynamic application properties
    StringBuilder launchCommand = new StringBuilder("$FLINK_HOME/bin/mesos-taskmanager.sh ");
    launchCommand.append(ContainerSpecification.formatSystemProperties(dynamicProperties));
    cmd.setValue(launchCommand.toString());
    // build the container info
    Protos.ContainerInfo.Builder containerInfo = null;
    switch(params.containerType()) {
        case MESOS:
            if (params.containerImageName().isDefined()) {
                containerInfo = Protos.ContainerInfo.newBuilder().setType(Protos.ContainerInfo.Type.MESOS).setMesos(Protos.ContainerInfo.MesosInfo.newBuilder().setImage(Protos.Image.newBuilder().setType(Protos.Image.Type.DOCKER).setDocker(Protos.Image.Docker.newBuilder().setName(params.containerImageName().get()))));
            }
            break;
        case DOCKER:
            assert (params.containerImageName().isDefined());
            containerInfo = Protos.ContainerInfo.newBuilder().setType(Protos.ContainerInfo.Type.DOCKER).setDocker(Protos.ContainerInfo.DockerInfo.newBuilder().setNetwork(Protos.ContainerInfo.DockerInfo.Network.HOST).setImage(params.containerImageName().get()));
            break;
        default:
            throw new IllegalStateException("unsupported container type");
    }
    if (containerInfo != null) {
        taskInfo.setContainer(containerInfo);
    }
    return taskInfo.build();
}
Also used : Configuration(org.apache.flink.configuration.Configuration) ContaineredTaskManagerParameters(org.apache.flink.runtime.clusterframework.ContaineredTaskManagerParameters) ContainerSpecification(org.apache.flink.runtime.clusterframework.ContainerSpecification) Map(java.util.Map)

Aggregations

ContaineredTaskManagerParameters (org.apache.flink.runtime.clusterframework.ContaineredTaskManagerParameters)10 Configuration (org.apache.flink.configuration.Configuration)8 ContainerLaunchContext (org.apache.hadoop.yarn.api.records.ContainerLaunchContext)6 YarnConfiguration (org.apache.hadoop.yarn.conf.YarnConfiguration)6 File (java.io.File)3 GlobalConfiguration (org.apache.flink.configuration.GlobalConfiguration)3 Test (org.junit.Test)3 ActorRef (akka.actor.ActorRef)2 TaskExecutorProcessSpec (org.apache.flink.runtime.clusterframework.TaskExecutorProcessSpec)2 YarnResourceManagerDriverConfiguration (org.apache.flink.yarn.configuration.YarnResourceManagerDriverConfiguration)2 Mockito.doAnswer (org.mockito.Mockito.doAnswer)2 InvocationOnMock (org.mockito.invocation.InvocationOnMock)2 Answer (org.mockito.stubbing.Answer)2 FiniteDuration (scala.concurrent.duration.FiniteDuration)2 ActorSystem (akka.actor.ActorSystem)1 Props (akka.actor.Props)1 JavaTestKit (akka.testkit.JavaTestKit)1 ByteArrayInputStream (java.io.ByteArrayInputStream)1 DataInputStream (java.io.DataInputStream)1 ArrayList (java.util.ArrayList)1