use of org.apache.flink.yarn.configuration.YarnResourceManagerDriverConfiguration in project flink by apache.
the class UtilsTest method testCreateTaskExecutorCredentials.
@Test
public void testCreateTaskExecutorCredentials() throws Exception {
File root = temporaryFolder.getRoot();
File home = new File(root, "home");
boolean created = home.mkdir();
assertTrue(created);
Configuration flinkConf = new Configuration();
YarnConfiguration yarnConf = new YarnConfiguration();
Map<String, String> env = new HashMap<>();
env.put(YarnConfigKeys.ENV_APP_ID, "foo");
env.put(YarnConfigKeys.ENV_CLIENT_HOME_DIR, home.getAbsolutePath());
env.put(YarnConfigKeys.ENV_CLIENT_SHIP_FILES, "");
env.put(YarnConfigKeys.ENV_FLINK_CLASSPATH, "");
env.put(YarnConfigKeys.ENV_HADOOP_USER_NAME, "foo");
env.put(YarnConfigKeys.FLINK_DIST_JAR, new YarnLocalResourceDescriptor("flink.jar", new Path(root.toURI()), 0, System.currentTimeMillis(), LocalResourceVisibility.APPLICATION, LocalResourceType.FILE).toString());
env.put(YarnConfigKeys.FLINK_YARN_FILES, "");
env.put(ApplicationConstants.Environment.PWD.key(), home.getAbsolutePath());
env = Collections.unmodifiableMap(env);
final YarnResourceManagerDriverConfiguration yarnResourceManagerDriverConfiguration = new YarnResourceManagerDriverConfiguration(env, "localhost", null);
File credentialFile = temporaryFolder.newFile("container_tokens");
final Text amRmTokenKind = AMRMTokenIdentifier.KIND_NAME;
final Text hdfsDelegationTokenKind = new Text("HDFS_DELEGATION_TOKEN");
final Text amRmTokenService = new Text("rm-ip:8030");
final Text hdfsDelegationTokenService = new Text("ha-hdfs:hadoop-namespace");
Credentials amCredentials = new Credentials();
amCredentials.addToken(amRmTokenService, new Token<>(new byte[4], new byte[4], amRmTokenKind, amRmTokenService));
amCredentials.addToken(hdfsDelegationTokenService, new Token<>(new byte[4], new byte[4], hdfsDelegationTokenKind, hdfsDelegationTokenService));
amCredentials.writeTokenStorageFile(new org.apache.hadoop.fs.Path(credentialFile.getAbsolutePath()), yarnConf);
TaskExecutorProcessSpec spec = TaskExecutorProcessUtils.newProcessSpecBuilder(flinkConf).withTotalProcessMemory(MemorySize.parse("1g")).build();
ContaineredTaskManagerParameters tmParams = new ContaineredTaskManagerParameters(spec, new HashMap<>(1));
Configuration taskManagerConf = new Configuration();
String workingDirectory = root.getAbsolutePath();
Class<?> taskManagerMainClass = YarnTaskExecutorRunner.class;
ContainerLaunchContext ctx;
final Map<String, String> originalEnv = System.getenv();
try {
Map<String, String> systemEnv = new HashMap<>(originalEnv);
systemEnv.put("HADOOP_TOKEN_FILE_LOCATION", credentialFile.getAbsolutePath());
CommonTestUtils.setEnv(systemEnv);
ctx = Utils.createTaskExecutorContext(flinkConf, yarnConf, yarnResourceManagerDriverConfiguration, tmParams, "", workingDirectory, taskManagerMainClass, LOG);
} finally {
CommonTestUtils.setEnv(originalEnv);
}
Credentials credentials = new Credentials();
try (DataInputStream dis = new DataInputStream(new ByteArrayInputStream(ctx.getTokens().array()))) {
credentials.readTokenStorageStream(dis);
}
Collection<Token<? extends TokenIdentifier>> tokens = credentials.getAllTokens();
boolean hasHdfsDelegationToken = false;
boolean hasAmRmToken = false;
for (Token<? extends TokenIdentifier> token : tokens) {
if (token.getKind().equals(amRmTokenKind)) {
hasAmRmToken = true;
} else if (token.getKind().equals(hdfsDelegationTokenKind)) {
hasHdfsDelegationToken = true;
}
}
assertTrue(hasHdfsDelegationToken);
assertFalse(hasAmRmToken);
}
Aggregations