use of org.apache.hadoop.fs.FileSystem in project flink by apache.
the class YarnClusterClient method shutdownCluster.
/**
* Shuts down the Yarn application
*/
public void shutdownCluster() {
if (hasBeenShutDown.getAndSet(true)) {
return;
}
if (!isConnected) {
throw new IllegalStateException("The cluster has been not been connected to the ApplicationMaster.");
}
try {
Runtime.getRuntime().removeShutdownHook(clientShutdownHook);
} catch (IllegalStateException e) {
// we are already in the shutdown hook
}
LOG.info("Sending shutdown request to the Application Master");
try {
Future<Object> response = Patterns.ask(applicationClient.get(), new YarnMessages.LocalStopYarnSession(getApplicationStatus(), "Flink YARN Client requested shutdown"), new Timeout(akkaDuration));
Await.ready(response, akkaDuration);
} catch (Exception e) {
LOG.warn("Error while stopping YARN cluster.", e);
}
try {
File propertiesFile = FlinkYarnSessionCli.getYarnPropertiesLocation(flinkConfig);
if (propertiesFile.isFile()) {
if (propertiesFile.delete()) {
LOG.info("Deleted Yarn properties file at {}", propertiesFile.getAbsoluteFile().toString());
} else {
LOG.warn("Couldn't delete Yarn properties file at {}", propertiesFile.getAbsoluteFile().toString());
}
}
} catch (Exception e) {
LOG.warn("Exception while deleting the JobManager address file", e);
}
if (sessionFilesDir != null) {
LOG.info("Deleting files in " + sessionFilesDir);
try {
FileSystem shutFS = FileSystem.get(hadoopConfig);
// delete conf and jar file.
shutFS.delete(sessionFilesDir, true);
shutFS.close();
} catch (IOException e) {
LOG.error("Could not delete the Flink jar and configuration files in HDFS..", e);
}
} else {
LOG.warn("Session file directory not set. Not deleting session files");
}
try {
pollingRunner.stopRunner();
pollingRunner.join(1000);
} catch (InterruptedException e) {
LOG.warn("Shutdown of the polling runner was interrupted", e);
Thread.currentThread().interrupt();
}
try {
ApplicationReport appReport = yarnClient.getApplicationReport(appId);
LOG.info("Application " + appId + " finished with state " + appReport.getYarnApplicationState() + " and final state " + appReport.getFinalApplicationStatus() + " at " + appReport.getFinishTime());
if (appReport.getYarnApplicationState() == YarnApplicationState.FAILED || appReport.getYarnApplicationState() == YarnApplicationState.KILLED) {
LOG.warn("Application failed. Diagnostics " + appReport.getDiagnostics());
LOG.warn("If log aggregation is activated in the Hadoop cluster, we recommend to retrieve " + "the full application log using this command:" + System.lineSeparator() + "\tyarn logs -applicationId " + appReport.getApplicationId() + System.lineSeparator() + "(It sometimes takes a few seconds until the logs are aggregated)");
}
} catch (Exception e) {
LOG.warn("Couldn't get final report", e);
}
LOG.info("YARN Client is shutting down");
// actorRunner is using the yarnClient.
yarnClient.stop();
// set null to clearly see if somebody wants to access it afterwards.
yarnClient = null;
}
use of org.apache.hadoop.fs.FileSystem in project hadoop by apache.
the class Configuration method getLocalPath.
/**
* Get a local file under a directory named by <i>dirsProp</i> with
* the given <i>path</i>. If <i>dirsProp</i> contains multiple directories,
* then one is chosen based on <i>path</i>'s hash code. If the selected
* directory does not exist, an attempt is made to create it.
*
* @param dirsProp directory in which to locate the file.
* @param path file-path.
* @return local file under the directory with the given path.
*/
public Path getLocalPath(String dirsProp, String path) throws IOException {
String[] dirs = getTrimmedStrings(dirsProp);
int hashCode = path.hashCode();
FileSystem fs = FileSystem.getLocal(this);
for (int i = 0; i < dirs.length; i++) {
// try each local dir
int index = (hashCode + i & Integer.MAX_VALUE) % dirs.length;
Path file = new Path(dirs[index], path);
Path dir = file.getParent();
if (fs.mkdirs(dir) || fs.exists(dir)) {
return file;
}
}
LOG.warn("Could not make " + path + " in local directories from " + dirsProp);
for (int i = 0; i < dirs.length; i++) {
int index = (hashCode + i & Integer.MAX_VALUE) % dirs.length;
LOG.warn(dirsProp + "[" + index + "]=" + dirs[index]);
}
throw new IOException("No valid local directories in property: " + dirsProp);
}
use of org.apache.hadoop.fs.FileSystem in project hadoop by apache.
the class TestKeyProviderFactory method testJksProvider.
@Test
public void testJksProvider() throws Exception {
Configuration conf = new Configuration();
final Path jksPath = new Path(testRootDir.toString(), "test.jks");
final String ourUrl = JavaKeyStoreProvider.SCHEME_NAME + "://file" + jksPath.toUri();
File file = new File(testRootDir, "test.jks");
file.delete();
conf.set(KeyProviderFactory.KEY_PROVIDER_PATH, ourUrl);
checkSpecificProvider(conf, ourUrl);
// START : Test flush error by failure injection
conf.set(KeyProviderFactory.KEY_PROVIDER_PATH, ourUrl.replace(JavaKeyStoreProvider.SCHEME_NAME, FailureInjectingJavaKeyStoreProvider.SCHEME_NAME));
// get a new instance of the provider to ensure it was saved correctly
KeyProvider provider = KeyProviderFactory.getProviders(conf).get(0);
// inject failure during keystore write
FailureInjectingJavaKeyStoreProvider fProvider = (FailureInjectingJavaKeyStoreProvider) provider;
fProvider.setWriteFail(true);
provider.createKey("key5", new byte[] { 1 }, KeyProvider.options(conf).setBitLength(8));
assertNotNull(provider.getCurrentKey("key5"));
try {
provider.flush();
Assert.fail("Should not succeed");
} catch (Exception e) {
// Ignore
}
// SHould be reset to pre-flush state
Assert.assertNull(provider.getCurrentKey("key5"));
// Un-inject last failure and
// inject failure during keystore backup
fProvider.setWriteFail(false);
fProvider.setBackupFail(true);
provider.createKey("key6", new byte[] { 1 }, KeyProvider.options(conf).setBitLength(8));
assertNotNull(provider.getCurrentKey("key6"));
try {
provider.flush();
Assert.fail("Should not succeed");
} catch (Exception e) {
// Ignore
}
// SHould be reset to pre-flush state
Assert.assertNull(provider.getCurrentKey("key6"));
// END : Test flush error by failure injection
conf.set(KeyProviderFactory.KEY_PROVIDER_PATH, ourUrl.replace(FailureInjectingJavaKeyStoreProvider.SCHEME_NAME, JavaKeyStoreProvider.SCHEME_NAME));
Path path = ProviderUtils.unnestUri(new URI(ourUrl));
FileSystem fs = path.getFileSystem(conf);
FileStatus s = fs.getFileStatus(path);
assertTrue(s.getPermission().toString().equals("rw-------"));
assertTrue(file + " should exist", file.isFile());
// Corrupt file and Check if JKS can reload from _OLD file
File oldFile = new File(file.getPath() + "_OLD");
file.renameTo(oldFile);
file.delete();
file.createNewFile();
assertTrue(oldFile.exists());
provider = KeyProviderFactory.getProviders(conf).get(0);
assertTrue(file.exists());
assertTrue(oldFile + "should be deleted", !oldFile.exists());
verifyAfterReload(file, provider);
assertTrue(!oldFile.exists());
// _NEW and current file should not exist together
File newFile = new File(file.getPath() + "_NEW");
newFile.createNewFile();
try {
provider = KeyProviderFactory.getProviders(conf).get(0);
Assert.fail("_NEW and current file should not exist together !!");
} catch (Exception e) {
// Ignore
} finally {
if (newFile.exists()) {
newFile.delete();
}
}
// Load from _NEW file
file.renameTo(newFile);
file.delete();
try {
provider = KeyProviderFactory.getProviders(conf).get(0);
Assert.assertFalse(newFile.exists());
Assert.assertFalse(oldFile.exists());
} catch (Exception e) {
Assert.fail("JKS should load from _NEW file !!");
// Ignore
}
verifyAfterReload(file, provider);
// _NEW exists but corrupt.. must load from _OLD
newFile.createNewFile();
file.renameTo(oldFile);
file.delete();
try {
provider = KeyProviderFactory.getProviders(conf).get(0);
Assert.assertFalse(newFile.exists());
Assert.assertFalse(oldFile.exists());
} catch (Exception e) {
Assert.fail("JKS should load from _OLD file !!");
// Ignore
} finally {
if (newFile.exists()) {
newFile.delete();
}
}
verifyAfterReload(file, provider);
// check permission retention after explicit change
fs.setPermission(path, new FsPermission("777"));
checkPermissionRetention(conf, ourUrl, path);
// Check that an uppercase keyname results in an error
provider = KeyProviderFactory.getProviders(conf).get(0);
try {
provider.createKey("UPPERCASE", KeyProvider.options(conf));
Assert.fail("Expected failure on creating key name with uppercase " + "characters");
} catch (IllegalArgumentException e) {
GenericTestUtils.assertExceptionContains("Uppercase key names", e);
}
}
use of org.apache.hadoop.fs.FileSystem in project hadoop by apache.
the class TestKeyProviderFactory method checkPermissionRetention.
public void checkPermissionRetention(Configuration conf, String ourUrl, Path path) throws Exception {
KeyProvider provider = KeyProviderFactory.getProviders(conf).get(0);
// let's add a new key and flush and check that permissions are still set to 777
byte[] key = new byte[16];
for (int i = 0; i < key.length; ++i) {
key[i] = (byte) i;
}
// create a new key
try {
provider.createKey("key5", key, KeyProvider.options(conf));
} catch (Exception e) {
e.printStackTrace();
throw e;
}
provider.flush();
// get a new instance of the provider to ensure it was saved correctly
provider = KeyProviderFactory.getProviders(conf).get(0);
assertArrayEquals(key, provider.getCurrentKey("key5").getMaterial());
FileSystem fs = path.getFileSystem(conf);
FileStatus s = fs.getFileStatus(path);
assertTrue("Permissions should have been retained from the preexisting keystore.", s.getPermission().toString().equals("rwxrwxrwx"));
}
use of org.apache.hadoop.fs.FileSystem in project flink by apache.
the class HadoopIOFormatsITCase method preSubmit.
@Override
protected void preSubmit() throws Exception {
resultPath = new String[] { getTempDirPath("result0"), getTempDirPath("result1") };
File sequenceFile = createAndRegisterTempFile("seqFile");
sequenceFileInPath = sequenceFile.toURI().toString();
// Create a sequence file
org.apache.hadoop.conf.Configuration conf = new org.apache.hadoop.conf.Configuration();
FileSystem fs = FileSystem.get(URI.create(sequenceFile.getAbsolutePath()), conf);
Path path = new Path(sequenceFile.getAbsolutePath());
// ------------------ Long / Text Key Value pair: ------------
int kvCount = 4;
LongWritable key = new LongWritable();
Text value = new Text();
SequenceFile.Writer writer = null;
try {
writer = SequenceFile.createWriter(fs, conf, path, key.getClass(), value.getClass());
for (int i = 0; i < kvCount; i++) {
if (i == 1) {
// write key = 0 a bit more often.
for (int a = 0; a < 15; a++) {
key.set(i);
value.set(i + " - somestring");
writer.append(key, value);
}
}
key.set(i);
value.set(i + " - somestring");
writer.append(key, value);
}
} finally {
IOUtils.closeStream(writer);
}
// ------------------ Long / Text Key Value pair: ------------
File sequenceFileNull = createAndRegisterTempFile("seqFileNullKey");
sequenceFileInPathNull = sequenceFileNull.toURI().toString();
path = new Path(sequenceFileInPathNull);
LongWritable value1 = new LongWritable();
SequenceFile.Writer writer1 = null;
try {
writer1 = SequenceFile.createWriter(fs, conf, path, NullWritable.class, value1.getClass());
for (int i = 0; i < kvCount; i++) {
value1.set(i);
writer1.append(NullWritable.get(), value1);
}
} finally {
IOUtils.closeStream(writer1);
}
}
Aggregations