use of org.apache.hadoop.fs.FileStatus in project flink by apache.
the class Utils method registerLocalResource.
public static void registerLocalResource(FileSystem fs, Path remoteRsrcPath, LocalResource localResource) throws IOException {
FileStatus jarStat = fs.getFileStatus(remoteRsrcPath);
localResource.setResource(ConverterUtils.getYarnUrlFromURI(remoteRsrcPath.toUri()));
localResource.setSize(jarStat.getLen());
localResource.setTimestamp(jarStat.getModificationTime());
localResource.setType(LocalResourceType.FILE);
localResource.setVisibility(LocalResourceVisibility.APPLICATION);
}
use of org.apache.hadoop.fs.FileStatus in project hadoop by apache.
the class TestKeyProviderFactory method testJksProvider.
@Test
public void testJksProvider() throws Exception {
Configuration conf = new Configuration();
final Path jksPath = new Path(testRootDir.toString(), "test.jks");
final String ourUrl = JavaKeyStoreProvider.SCHEME_NAME + "://file" + jksPath.toUri();
File file = new File(testRootDir, "test.jks");
file.delete();
conf.set(KeyProviderFactory.KEY_PROVIDER_PATH, ourUrl);
checkSpecificProvider(conf, ourUrl);
// START : Test flush error by failure injection
conf.set(KeyProviderFactory.KEY_PROVIDER_PATH, ourUrl.replace(JavaKeyStoreProvider.SCHEME_NAME, FailureInjectingJavaKeyStoreProvider.SCHEME_NAME));
// get a new instance of the provider to ensure it was saved correctly
KeyProvider provider = KeyProviderFactory.getProviders(conf).get(0);
// inject failure during keystore write
FailureInjectingJavaKeyStoreProvider fProvider = (FailureInjectingJavaKeyStoreProvider) provider;
fProvider.setWriteFail(true);
provider.createKey("key5", new byte[] { 1 }, KeyProvider.options(conf).setBitLength(8));
assertNotNull(provider.getCurrentKey("key5"));
try {
provider.flush();
Assert.fail("Should not succeed");
} catch (Exception e) {
// Ignore
}
// SHould be reset to pre-flush state
Assert.assertNull(provider.getCurrentKey("key5"));
// Un-inject last failure and
// inject failure during keystore backup
fProvider.setWriteFail(false);
fProvider.setBackupFail(true);
provider.createKey("key6", new byte[] { 1 }, KeyProvider.options(conf).setBitLength(8));
assertNotNull(provider.getCurrentKey("key6"));
try {
provider.flush();
Assert.fail("Should not succeed");
} catch (Exception e) {
// Ignore
}
// SHould be reset to pre-flush state
Assert.assertNull(provider.getCurrentKey("key6"));
// END : Test flush error by failure injection
conf.set(KeyProviderFactory.KEY_PROVIDER_PATH, ourUrl.replace(FailureInjectingJavaKeyStoreProvider.SCHEME_NAME, JavaKeyStoreProvider.SCHEME_NAME));
Path path = ProviderUtils.unnestUri(new URI(ourUrl));
FileSystem fs = path.getFileSystem(conf);
FileStatus s = fs.getFileStatus(path);
assertTrue(s.getPermission().toString().equals("rw-------"));
assertTrue(file + " should exist", file.isFile());
// Corrupt file and Check if JKS can reload from _OLD file
File oldFile = new File(file.getPath() + "_OLD");
file.renameTo(oldFile);
file.delete();
file.createNewFile();
assertTrue(oldFile.exists());
provider = KeyProviderFactory.getProviders(conf).get(0);
assertTrue(file.exists());
assertTrue(oldFile + "should be deleted", !oldFile.exists());
verifyAfterReload(file, provider);
assertTrue(!oldFile.exists());
// _NEW and current file should not exist together
File newFile = new File(file.getPath() + "_NEW");
newFile.createNewFile();
try {
provider = KeyProviderFactory.getProviders(conf).get(0);
Assert.fail("_NEW and current file should not exist together !!");
} catch (Exception e) {
// Ignore
} finally {
if (newFile.exists()) {
newFile.delete();
}
}
// Load from _NEW file
file.renameTo(newFile);
file.delete();
try {
provider = KeyProviderFactory.getProviders(conf).get(0);
Assert.assertFalse(newFile.exists());
Assert.assertFalse(oldFile.exists());
} catch (Exception e) {
Assert.fail("JKS should load from _NEW file !!");
// Ignore
}
verifyAfterReload(file, provider);
// _NEW exists but corrupt.. must load from _OLD
newFile.createNewFile();
file.renameTo(oldFile);
file.delete();
try {
provider = KeyProviderFactory.getProviders(conf).get(0);
Assert.assertFalse(newFile.exists());
Assert.assertFalse(oldFile.exists());
} catch (Exception e) {
Assert.fail("JKS should load from _OLD file !!");
// Ignore
} finally {
if (newFile.exists()) {
newFile.delete();
}
}
verifyAfterReload(file, provider);
// check permission retention after explicit change
fs.setPermission(path, new FsPermission("777"));
checkPermissionRetention(conf, ourUrl, path);
// Check that an uppercase keyname results in an error
provider = KeyProviderFactory.getProviders(conf).get(0);
try {
provider.createKey("UPPERCASE", KeyProvider.options(conf));
Assert.fail("Expected failure on creating key name with uppercase " + "characters");
} catch (IllegalArgumentException e) {
GenericTestUtils.assertExceptionContains("Uppercase key names", e);
}
}
use of org.apache.hadoop.fs.FileStatus in project hadoop by apache.
the class TestKeyProviderFactory method checkPermissionRetention.
public void checkPermissionRetention(Configuration conf, String ourUrl, Path path) throws Exception {
KeyProvider provider = KeyProviderFactory.getProviders(conf).get(0);
// let's add a new key and flush and check that permissions are still set to 777
byte[] key = new byte[16];
for (int i = 0; i < key.length; ++i) {
key[i] = (byte) i;
}
// create a new key
try {
provider.createKey("key5", key, KeyProvider.options(conf));
} catch (Exception e) {
e.printStackTrace();
throw e;
}
provider.flush();
// get a new instance of the provider to ensure it was saved correctly
provider = KeyProviderFactory.getProviders(conf).get(0);
assertArrayEquals(key, provider.getCurrentKey("key5").getMaterial());
FileSystem fs = path.getFileSystem(conf);
FileStatus s = fs.getFileStatus(path);
assertTrue("Permissions should have been retained from the preexisting keystore.", s.getPermission().toString().equals("rwxrwxrwx"));
}
use of org.apache.hadoop.fs.FileStatus in project hadoop by apache.
the class FTPFileSystem method listStatus.
/**
* Convenience method, so that we don't open a new connection when using this
* method from within another method. Otherwise every API invocation incurs
* the overhead of opening/closing a TCP connection.
*/
private FileStatus[] listStatus(FTPClient client, Path file) throws IOException {
Path workDir = new Path(client.printWorkingDirectory());
Path absolute = makeAbsolute(workDir, file);
FileStatus fileStat = getFileStatus(client, absolute);
if (fileStat.isFile()) {
return new FileStatus[] { fileStat };
}
FTPFile[] ftpFiles = client.listFiles(absolute.toUri().getPath());
FileStatus[] fileStats = new FileStatus[ftpFiles.length];
for (int i = 0; i < ftpFiles.length; i++) {
fileStats[i] = getFileStatus(ftpFiles[i], absolute);
}
return fileStats;
}
use of org.apache.hadoop.fs.FileStatus in project hadoop by apache.
the class SFTPFileSystem method open.
@Override
public FSDataInputStream open(Path f, int bufferSize) throws IOException {
ChannelSftp channel = connect();
Path workDir;
try {
workDir = new Path(channel.pwd());
} catch (SftpException e) {
throw new IOException(e);
}
Path absolute = makeAbsolute(workDir, f);
FileStatus fileStat = getFileStatus(channel, absolute);
if (fileStat.isDirectory()) {
disconnect(channel);
throw new IOException(String.format(E_PATH_DIR, f));
}
InputStream is;
try {
// the path could be a symbolic link, so get the real path
absolute = new Path("/", channel.realpath(absolute.toUri().getPath()));
is = channel.get(absolute.toUri().getPath());
} catch (SftpException e) {
throw new IOException(e);
}
FSDataInputStream fis = new FSDataInputStream(new SFTPInputStream(is, channel, statistics));
return fis;
}
Aggregations