use of java.nio.file.FileVisitResult in project flink by apache.
the class AbstractYarnClusterDescriptor method startAppMaster.
public ApplicationReport startAppMaster(JobGraph jobGraph, YarnClient yarnClient, YarnClientApplication yarnApplication) throws Exception {
try {
org.apache.flink.core.fs.FileSystem.setDefaultScheme(flinkConfiguration);
} catch (IOException e) {
throw new IOException("Error while setting the default " + "filesystem scheme from configuration.", e);
}
// initialize file system
// Copy the application master jar to the filesystem
// Create a local resource to point to the destination jar path
final FileSystem fs = FileSystem.get(conf);
// hard coded check for the GoogleHDFS client because its not overriding the getScheme() method.
if (!fs.getClass().getSimpleName().equals("GoogleHadoopFileSystem") && fs.getScheme().startsWith("file")) {
LOG.warn("The file system scheme is '" + fs.getScheme() + "'. This indicates that the " + "specified Hadoop configuration path is wrong and the system is using the default Hadoop configuration values." + "The Flink YARN client needs to store its files in a distributed file system");
}
ApplicationSubmissionContext appContext = yarnApplication.getApplicationSubmissionContext();
Set<File> effectiveShipFiles = new HashSet<>(shipFiles.size());
for (File file : shipFiles) {
effectiveShipFiles.add(file.getAbsoluteFile());
}
//check if there is a logback or log4j file
File logbackFile = new File(configurationDirectory + File.separator + CONFIG_FILE_LOGBACK_NAME);
final boolean hasLogback = logbackFile.exists();
if (hasLogback) {
effectiveShipFiles.add(logbackFile);
}
File log4jFile = new File(configurationDirectory + File.separator + CONFIG_FILE_LOG4J_NAME);
final boolean hasLog4j = log4jFile.exists();
if (hasLog4j) {
effectiveShipFiles.add(log4jFile);
if (hasLogback) {
// this means there is already a logback configuration file --> fail
LOG.warn("The configuration directory ('" + configurationDirectory + "') contains both LOG4J and " + "Logback configuration files. Please delete or rename one of them.");
}
}
addLibFolderToShipFiles(effectiveShipFiles);
// add the user jar to the classpath of the to-be-created cluster
if (userJarFiles != null) {
effectiveShipFiles.addAll(userJarFiles);
}
// Set-up ApplicationSubmissionContext for the application
final ApplicationId appId = appContext.getApplicationId();
// ------------------ Add Zookeeper namespace to local flinkConfiguraton ------
String zkNamespace = getZookeeperNamespace();
// no user specified cli argument for namespace?
if (zkNamespace == null || zkNamespace.isEmpty()) {
// namespace defined in config? else use applicationId as default.
zkNamespace = flinkConfiguration.getString(HighAvailabilityOptions.HA_CLUSTER_ID, String.valueOf(appId));
setZookeeperNamespace(zkNamespace);
}
flinkConfiguration.setString(HighAvailabilityOptions.HA_CLUSTER_ID, zkNamespace);
if (HighAvailabilityMode.isHighAvailabilityModeActivated(flinkConfiguration)) {
// activate re-execution of failed applications
appContext.setMaxAppAttempts(flinkConfiguration.getInteger(ConfigConstants.YARN_APPLICATION_ATTEMPTS, YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS));
activateHighAvailabilitySupport(appContext);
} else {
// set number of application retries to 1 in the default case
appContext.setMaxAppAttempts(flinkConfiguration.getInteger(ConfigConstants.YARN_APPLICATION_ATTEMPTS, 1));
}
// local resource map for Yarn
final Map<String, LocalResource> localResources = new HashMap<>(2 + effectiveShipFiles.size());
// list of remote paths (after upload)
final List<Path> paths = new ArrayList<>(2 + effectiveShipFiles.size());
// classpath assembler
final StringBuilder classPathBuilder = new StringBuilder();
// ship list that enables reuse of resources for task manager containers
StringBuilder envShipFileList = new StringBuilder();
// upload and register ship files
for (File shipFile : effectiveShipFiles) {
LocalResource shipResources = Records.newRecord(LocalResource.class);
Path shipLocalPath = new Path("file://" + shipFile.getAbsolutePath());
Path remotePath = Utils.setupLocalResource(fs, appId.toString(), shipLocalPath, shipResources, fs.getHomeDirectory());
paths.add(remotePath);
localResources.put(shipFile.getName(), shipResources);
if (shipFile.isDirectory()) {
// add directories to the classpath
java.nio.file.Path shipPath = shipFile.toPath();
final java.nio.file.Path parentPath = shipPath.getParent();
Files.walkFileTree(shipPath, new SimpleFileVisitor<java.nio.file.Path>() {
@Override
public FileVisitResult preVisitDirectory(java.nio.file.Path dir, BasicFileAttributes attrs) throws IOException {
super.preVisitDirectory(dir, attrs);
java.nio.file.Path relativePath = parentPath.relativize(dir);
classPathBuilder.append(relativePath).append(File.separator).append("*").append(File.pathSeparator);
return FileVisitResult.CONTINUE;
}
});
} else {
// add files to the classpath
classPathBuilder.append(shipFile.getName()).append(File.pathSeparator);
}
envShipFileList.append(remotePath).append(",");
}
// Setup jar for ApplicationMaster
LocalResource appMasterJar = Records.newRecord(LocalResource.class);
LocalResource flinkConf = Records.newRecord(LocalResource.class);
Path remotePathJar = Utils.setupLocalResource(fs, appId.toString(), flinkJarPath, appMasterJar, fs.getHomeDirectory());
Path remotePathConf = Utils.setupLocalResource(fs, appId.toString(), flinkConfigurationPath, flinkConf, fs.getHomeDirectory());
localResources.put("flink.jar", appMasterJar);
localResources.put("flink-conf.yaml", flinkConf);
paths.add(remotePathJar);
classPathBuilder.append("flink.jar").append(File.pathSeparator);
paths.add(remotePathConf);
classPathBuilder.append("flink-conf.yaml").append(File.pathSeparator);
// TODO: server use user main method to generate job graph
if (jobGraph != null) {
try {
File fp = File.createTempFile(appId.toString(), null);
fp.deleteOnExit();
try (FileOutputStream output = new FileOutputStream(fp);
ObjectOutputStream obOutput = new ObjectOutputStream(output)) {
obOutput.writeObject(jobGraph);
}
LocalResource jobgraph = Records.newRecord(LocalResource.class);
Path remoteJobGraph = Utils.setupLocalResource(fs, appId.toString(), new Path(fp.toURI()), jobgraph, fs.getHomeDirectory());
localResources.put("job.graph", jobgraph);
paths.add(remoteJobGraph);
classPathBuilder.append("job.graph").append(File.pathSeparator);
} catch (Exception e) {
LOG.warn("Add job graph to local resource fail");
throw e;
}
}
sessionFilesDir = new Path(fs.getHomeDirectory(), ".flink/" + appId.toString() + "/");
FsPermission permission = new FsPermission(FsAction.ALL, FsAction.NONE, FsAction.NONE);
// set permission for path.
fs.setPermission(sessionFilesDir, permission);
//To support Yarn Secure Integration Test Scenario
//In Integration test setup, the Yarn containers created by YarnMiniCluster does not have the Yarn site XML
//and KRB5 configuration files. We are adding these files as container local resources for the container
//applications (JM/TMs) to have proper secure cluster setup
Path remoteKrb5Path = null;
Path remoteYarnSiteXmlPath = null;
boolean hasKrb5 = false;
if (System.getenv("IN_TESTS") != null) {
String krb5Config = System.getProperty("java.security.krb5.conf");
if (krb5Config != null && krb5Config.length() != 0) {
File krb5 = new File(krb5Config);
LOG.info("Adding KRB5 configuration {} to the AM container local resource bucket", krb5.getAbsolutePath());
LocalResource krb5ConfResource = Records.newRecord(LocalResource.class);
Path krb5ConfPath = new Path(krb5.getAbsolutePath());
remoteKrb5Path = Utils.setupLocalResource(fs, appId.toString(), krb5ConfPath, krb5ConfResource, fs.getHomeDirectory());
localResources.put(Utils.KRB5_FILE_NAME, krb5ConfResource);
File f = new File(System.getenv("YARN_CONF_DIR"), Utils.YARN_SITE_FILE_NAME);
LOG.info("Adding Yarn configuration {} to the AM container local resource bucket", f.getAbsolutePath());
LocalResource yarnConfResource = Records.newRecord(LocalResource.class);
Path yarnSitePath = new Path(f.getAbsolutePath());
remoteYarnSiteXmlPath = Utils.setupLocalResource(fs, appId.toString(), yarnSitePath, yarnConfResource, fs.getHomeDirectory());
localResources.put(Utils.YARN_SITE_FILE_NAME, yarnConfResource);
hasKrb5 = true;
}
}
// setup security tokens
LocalResource keytabResource = null;
Path remotePathKeytab = null;
String keytab = flinkConfiguration.getString(SecurityOptions.KERBEROS_LOGIN_KEYTAB);
if (keytab != null) {
LOG.info("Adding keytab {} to the AM container local resource bucket", keytab);
keytabResource = Records.newRecord(LocalResource.class);
Path keytabPath = new Path(keytab);
remotePathKeytab = Utils.setupLocalResource(fs, appId.toString(), keytabPath, keytabResource, fs.getHomeDirectory());
localResources.put(Utils.KEYTAB_FILE_NAME, keytabResource);
}
final ContainerLaunchContext amContainer = setupApplicationMasterContainer(hasLogback, hasLog4j, hasKrb5);
if (UserGroupInformation.isSecurityEnabled() && keytab == null) {
//set tokens only when keytab is not provided
LOG.info("Adding delegation token to the AM container..");
Utils.setTokensFor(amContainer, paths, conf);
}
amContainer.setLocalResources(localResources);
fs.close();
// Setup CLASSPATH and environment variables for ApplicationMaster
final Map<String, String> appMasterEnv = new HashMap<>();
// set user specified app master environment variables
appMasterEnv.putAll(Utils.getEnvironmentVariables(ConfigConstants.YARN_APPLICATION_MASTER_ENV_PREFIX, flinkConfiguration));
// set Flink app class path
appMasterEnv.put(YarnConfigKeys.ENV_FLINK_CLASSPATH, classPathBuilder.toString());
// set Flink on YARN internal configuration values
appMasterEnv.put(YarnConfigKeys.ENV_TM_COUNT, String.valueOf(taskManagerCount));
appMasterEnv.put(YarnConfigKeys.ENV_TM_MEMORY, String.valueOf(taskManagerMemoryMb));
appMasterEnv.put(YarnConfigKeys.FLINK_JAR_PATH, remotePathJar.toString());
appMasterEnv.put(YarnConfigKeys.ENV_APP_ID, appId.toString());
appMasterEnv.put(YarnConfigKeys.ENV_CLIENT_HOME_DIR, fs.getHomeDirectory().toString());
appMasterEnv.put(YarnConfigKeys.ENV_CLIENT_SHIP_FILES, envShipFileList.toString());
appMasterEnv.put(YarnConfigKeys.ENV_SLOTS, String.valueOf(slots));
appMasterEnv.put(YarnConfigKeys.ENV_DETACHED, String.valueOf(detached));
appMasterEnv.put(YarnConfigKeys.ENV_ZOOKEEPER_NAMESPACE, getZookeeperNamespace());
// https://github.com/apache/hadoop/blob/trunk/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/YarnApplicationSecurity.md#identity-on-an-insecure-cluster-hadoop_user_name
appMasterEnv.put(YarnConfigKeys.ENV_HADOOP_USER_NAME, UserGroupInformation.getCurrentUser().getUserName());
if (keytabResource != null) {
appMasterEnv.put(YarnConfigKeys.KEYTAB_PATH, remotePathKeytab.toString());
String principal = flinkConfiguration.getString(SecurityOptions.KERBEROS_LOGIN_PRINCIPAL);
appMasterEnv.put(YarnConfigKeys.KEYTAB_PRINCIPAL, principal);
}
//To support Yarn Secure Integration Test Scenario
if (remoteYarnSiteXmlPath != null && remoteKrb5Path != null) {
appMasterEnv.put(YarnConfigKeys.ENV_YARN_SITE_XML_PATH, remoteYarnSiteXmlPath.toString());
appMasterEnv.put(YarnConfigKeys.ENV_KRB5_PATH, remoteKrb5Path.toString());
}
if (dynamicPropertiesEncoded != null) {
appMasterEnv.put(YarnConfigKeys.ENV_DYNAMIC_PROPERTIES, dynamicPropertiesEncoded);
}
// set classpath from YARN configuration
Utils.setupYarnClassPath(conf, appMasterEnv);
amContainer.setEnvironment(appMasterEnv);
// Set up resource type requirements for ApplicationMaster
Resource capability = Records.newRecord(Resource.class);
capability.setMemory(jobManagerMemoryMb);
capability.setVirtualCores(1);
String name;
if (customName == null) {
name = "Flink session with " + taskManagerCount + " TaskManagers";
if (detached) {
name += " (detached)";
}
} else {
name = customName;
}
appContext.setApplicationName(name);
appContext.setApplicationType("Apache Flink");
appContext.setAMContainerSpec(amContainer);
appContext.setResource(capability);
if (yarnQueue != null) {
appContext.setQueue(yarnQueue);
}
setApplicationTags(appContext);
// add a hook to clean up in case deployment fails
Thread deploymentFailureHook = new DeploymentFailureHook(yarnClient, yarnApplication);
Runtime.getRuntime().addShutdownHook(deploymentFailureHook);
LOG.info("Submitting application master " + appId);
yarnClient.submitApplication(appContext);
LOG.info("Waiting for the cluster to be allocated");
final long startTime = System.currentTimeMillis();
ApplicationReport report;
YarnApplicationState lastAppState = YarnApplicationState.NEW;
loop: while (true) {
try {
report = yarnClient.getApplicationReport(appId);
} catch (IOException e) {
throw new YarnDeploymentException("Failed to deploy the cluster.", e);
}
YarnApplicationState appState = report.getYarnApplicationState();
LOG.debug("Application State: {}", appState);
switch(appState) {
case FAILED:
//TODO: the finished state may be valid in flip-6
case FINISHED:
case KILLED:
throw new YarnDeploymentException("The YARN application unexpectedly switched to state " + appState + " during deployment. \n" + "Diagnostics from YARN: " + report.getDiagnostics() + "\n" + "If log aggregation is enabled on your cluster, use this command to further investigate the issue:\n" + "yarn logs -applicationId " + appId);
//break ..
case RUNNING:
LOG.info("YARN application has been deployed successfully.");
break loop;
default:
if (appState != lastAppState) {
LOG.info("Deploying cluster, current state " + appState);
}
if (System.currentTimeMillis() - startTime > 60000) {
LOG.info("Deployment took more than 60 seconds. Please check if the requested resources are available in the YARN cluster");
}
}
lastAppState = appState;
Thread.sleep(250);
}
// print the application id for user to cancel themselves.
if (isDetachedMode()) {
LOG.info("The Flink YARN client has been started in detached mode. In order to stop " + "Flink on YARN, use the following command or a YARN web interface to stop " + "it:\nyarn application -kill " + appId + "\nPlease also note that the " + "temporary files of the YARN session in the home directoy will not be removed.");
}
// since deployment was successful, remove the hook
try {
Runtime.getRuntime().removeShutdownHook(deploymentFailureHook);
} catch (IllegalStateException e) {
// we're already in the shut down hook.
}
return report;
}
use of java.nio.file.FileVisitResult in project OpenGrok by OpenGrok.
the class RuntimeEnvironment method startWatchDogService.
/**
* Starts a watch dog service for a directory. It automatically reloads the
* AuthorizationFramework if there was a change.
*
* You can control start of this service by context-parameter in web.xml
* param-name: enableAuthorizationWatchDog
*
* @param directory root directory for plugins
*/
public void startWatchDogService(File directory) {
if (directory == null || !directory.isDirectory() || !directory.canRead()) {
LOGGER.log(Level.INFO, "Watch dog cannot be started - invalid directory: {0}", directory);
return;
}
watchDogThread = new Thread(new Runnable() {
@Override
public void run() {
try {
watchDogWatcher = FileSystems.getDefault().newWatchService();
Path dir = Paths.get(directory.getAbsolutePath());
Files.walkFileTree(dir, new SimpleFileVisitor<Path>() {
@Override
public FileVisitResult postVisitDirectory(Path d, IOException exc) throws IOException {
// attach monitor
d.register(watchDogWatcher, ENTRY_CREATE, ENTRY_DELETE, ENTRY_MODIFY);
return CONTINUE;
}
});
LOGGER.log(Level.INFO, "Watch dog started {0}", directory);
while (!Thread.currentThread().isInterrupted()) {
final WatchKey key;
try {
key = watchDogWatcher.take();
} catch (ClosedWatchServiceException x) {
break;
}
boolean reload = false;
for (WatchEvent<?> event : key.pollEvents()) {
final WatchEvent.Kind<?> kind = event.kind();
if (kind == ENTRY_CREATE) {
reload = true;
} else if (kind == ENTRY_DELETE) {
reload = true;
} else if (kind == ENTRY_MODIFY) {
reload = true;
}
}
if (reload) {
// experimental wait if file is being written right now
Thread.sleep(THREAD_SLEEP_TIME);
AuthorizationFramework.getInstance().reload();
}
if (!key.reset()) {
break;
}
}
} catch (InterruptedException | IOException ex) {
Thread.currentThread().interrupt();
}
LOGGER.log(Level.INFO, "Watchdog finishing (exiting)");
}
}, "watchDogService");
watchDogThread.start();
}
use of java.nio.file.FileVisitResult in project randomizedtesting by randomizedtesting.
the class JUnit4 method getWorkingDirectory.
private Path getWorkingDirectory(ForkedJvmInfo jvmInfo) throws IOException {
Path baseDir = (dir == null ? getProject().getBaseDir().toPath() : dir);
final Path forkedDir;
if (isolateWorkingDirectories) {
forkedDir = baseDir.resolve("J" + jvmInfo.id);
if (Files.isDirectory(forkedDir)) {
// If there are any files inside the forkedDir, issue a warning.
List<String> existingFiles = listFiles(forkedDir);
if (!existingFiles.isEmpty()) {
switch(nonEmptyWorkDirAction) {
case IGNORE:
log("Cwd of a forked JVM already exists and is not empty: " + existingFiles + " (ignoring).", Project.MSG_DEBUG);
break;
case WIPE:
log("Cwd of a forked JVM already exists and is not empty, trying to wipe: " + existingFiles, Project.MSG_DEBUG);
try {
Files.walkFileTree(forkedDir, new SimpleFileVisitor<Path>() {
@Override
public FileVisitResult postVisitDirectory(Path dir, IOException iterationError) throws IOException {
if (iterationError != null) {
throw iterationError;
}
if (forkedDir != dir) {
Files.delete(dir);
}
return FileVisitResult.CONTINUE;
}
@Override
public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException {
Files.delete(file);
return FileVisitResult.CONTINUE;
}
@Override
public FileVisitResult visitFileFailed(Path file, IOException e) throws IOException {
throw e;
}
});
} catch (IOException e) {
throw new BuildException("An exception occurred while trying to wipe the working directory: " + forkedDir, e);
}
existingFiles = listFiles(forkedDir);
if (existingFiles.isEmpty()) {
break;
} else {
// Intentional fall-through.
}
case FAIL:
throw new BuildException("Cwd of a forked JVM already exists and is not empty " + "and setOnNonEmptyWorkDirectory=" + nonEmptyWorkDirAction + ": " + existingFiles);
default:
throw new RuntimeException("Unreachable.");
}
}
} else {
Files.createDirectories(forkedDir);
temporaryFiles.add(forkedDir);
}
} else {
forkedDir = baseDir;
}
return forkedDir;
}
use of java.nio.file.FileVisitResult in project neo4j by neo4j.
the class FileVisitorsDecoratorsTest method shouldPropagateReturnValueFromPreVisitDirectory.
@Test
public void shouldPropagateReturnValueFromPreVisitDirectory() throws IOException {
for (FileVisitResult result : FileVisitResult.values()) {
when(wrapped.preVisitDirectory(any(), any())).thenReturn(result);
assertThat(decorator.preVisitDirectory(null, null), is(result));
}
}
use of java.nio.file.FileVisitResult in project neo4j by neo4j.
the class FileVisitorsDecoratorsTest method shouldPropagateReturnValueFromVisitFile.
@Test
public void shouldPropagateReturnValueFromVisitFile() throws IOException {
for (FileVisitResult result : FileVisitResult.values()) {
when(wrapped.visitFile(any(), any())).thenReturn(result);
assertThat(decorator.visitFile(null, null), is(result));
}
}
Aggregations