use of org.apache.hadoop.fs.FileSystem in project hadoop by apache.
the class TestHttpFSServer method testGlobFilter.
@Test
@TestDir
@TestJetty
@TestHdfs
public void testGlobFilter() throws Exception {
createHttpFSServer(false);
FileSystem fs = FileSystem.get(TestHdfsHelper.getHdfsConf());
fs.mkdirs(new Path("/tmp"));
fs.create(new Path("/tmp/foo.txt")).close();
String user = HadoopUsersConfTestHelper.getHadoopUsers()[0];
URL url = new URL(TestJettyHelper.getJettyURL(), MessageFormat.format("/webhdfs/v1/tmp?user.name={0}&op=liststatus&filter=f*", user));
HttpURLConnection conn = (HttpURLConnection) url.openConnection();
Assert.assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_OK);
BufferedReader reader = new BufferedReader(new InputStreamReader(conn.getInputStream()));
reader.readLine();
reader.close();
}
use of org.apache.hadoop.fs.FileSystem in project hadoop by apache.
the class TestHttpFSServer method testXAttrs.
/**
* Validate XAttr get/set/remove calls.
*/
@Test
@TestDir
@TestJetty
@TestHdfs
public void testXAttrs() throws Exception {
final String name1 = "user.a1";
final byte[] value1 = new byte[] { 0x31, 0x32, 0x33 };
final String name2 = "user.a2";
final byte[] value2 = new byte[] { 0x41, 0x42, 0x43 };
final String dir = "/xattrTest";
final String path = dir + "/file";
createHttpFSServer(false);
FileSystem fs = FileSystem.get(TestHdfsHelper.getHdfsConf());
fs.mkdirs(new Path(dir));
createWithHttp(path, null);
String statusJson = getStatus(path, "GETXATTRS");
Map<String, byte[]> xAttrs = getXAttrs(statusJson);
Assert.assertEquals(0, xAttrs.size());
// Set two xattrs
putCmd(path, "SETXATTR", setXAttrParam(name1, value1));
putCmd(path, "SETXATTR", setXAttrParam(name2, value2));
statusJson = getStatus(path, "GETXATTRS");
xAttrs = getXAttrs(statusJson);
Assert.assertEquals(2, xAttrs.size());
Assert.assertArrayEquals(value1, xAttrs.get(name1));
Assert.assertArrayEquals(value2, xAttrs.get(name2));
// Remove one xattr
putCmd(path, "REMOVEXATTR", "xattr.name=" + name1);
statusJson = getStatus(path, "GETXATTRS");
xAttrs = getXAttrs(statusJson);
Assert.assertEquals(1, xAttrs.size());
Assert.assertArrayEquals(value2, xAttrs.get(name2));
// Remove another xattr, then there is no xattr
putCmd(path, "REMOVEXATTR", "xattr.name=" + name2);
statusJson = getStatus(path, "GETXATTRS");
xAttrs = getXAttrs(statusJson);
Assert.assertEquals(0, xAttrs.size());
}
use of org.apache.hadoop.fs.FileSystem in project hadoop by apache.
the class TestHttpFSServerNoACLs method testWithNoAcls.
/**
* Ensure that
* <ol>
* <li>GETFILESTATUS and LISTSTATUS work happily</li>
* <li>ACLSTATUS throws an exception</li>
* <li>The ACL SET, REMOVE, etc calls all fail</li>
* </ol>
*
* @throws Exception
*/
@Test
@TestDir
@TestJetty
public void testWithNoAcls() throws Exception {
final String aclUser1 = "user:foo:rw-";
final String rmAclUser1 = "user:foo:";
final String aclUser2 = "user:bar:r--";
final String aclGroup1 = "group::r--";
final String aclSpec = "aclspec=user::rwx," + aclUser1 + "," + aclGroup1 + ",other::---";
final String modAclSpec = "aclspec=" + aclUser2;
final String remAclSpec = "aclspec=" + rmAclUser1;
final String defUser1 = "default:user:glarch:r-x";
final String defSpec1 = "aclspec=" + defUser1;
final String dir = "/noACLs";
final String path = dir + "/foo";
startMiniDFS();
createHttpFSServer();
FileSystem fs = FileSystem.get(nnConf);
fs.mkdirs(new Path(dir));
OutputStream os = fs.create(new Path(path));
os.write(1);
os.close();
/* The normal status calls work as expected; GETACLSTATUS fails */
getStatus(path, "GETFILESTATUS", true);
getStatus(dir, "LISTSTATUS", true);
getStatus(path, "GETACLSTATUS", false);
/* All the ACL-based PUT commands fail with ACL exceptions */
putCmd(path, "SETACL", aclSpec, false);
putCmd(path, "MODIFYACLENTRIES", modAclSpec, false);
putCmd(path, "REMOVEACLENTRIES", remAclSpec, false);
putCmd(path, "REMOVEACL", null, false);
putCmd(dir, "SETACL", defSpec1, false);
putCmd(dir, "REMOVEDEFAULTACL", null, false);
miniDfs.shutdown();
}
use of org.apache.hadoop.fs.FileSystem in project hadoop by apache.
the class TestHttpFSWithKerberos method testDelegationTokenWithFS.
@SuppressWarnings("deprecation")
private void testDelegationTokenWithFS(Class fileSystemClass) throws Exception {
createHttpFSServer();
Configuration conf = new Configuration();
conf.set("fs.webhdfs.impl", fileSystemClass.getName());
conf.set("fs.hdfs.impl.disable.cache", "true");
URI uri = new URI("webhdfs://" + TestJettyHelper.getJettyURL().toURI().getAuthority());
FileSystem fs = FileSystem.get(uri, conf);
Token<?>[] tokens = fs.addDelegationTokens("foo", null);
fs.close();
Assert.assertEquals(1, tokens.length);
fs = FileSystem.get(uri, conf);
((DelegationTokenRenewer.Renewable) fs).setDelegationToken(tokens[0]);
fs.listStatus(new Path("/"));
fs.close();
}
use of org.apache.hadoop.fs.FileSystem in project hadoop by apache.
the class TestFileSystemAccessService method fileSystemCache.
@Test
@TestDir
@TestHdfs
public void fileSystemCache() throws Exception {
String dir = TestDirHelper.getTestDir().getAbsolutePath();
String services = StringUtils.join(",", Arrays.asList(InstrumentationService.class.getName(), SchedulerService.class.getName(), FileSystemAccessService.class.getName()));
Configuration hadoopConf = new Configuration(false);
hadoopConf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, TestHdfsHelper.getHdfsConf().get(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY));
createHadoopConf(hadoopConf);
Configuration conf = new Configuration(false);
conf.set("server.services", services);
conf.set("server.hadoop.filesystem.cache.purge.frequency", "1");
conf.set("server.hadoop.filesystem.cache.purge.timeout", "1");
Server server = new Server("server", dir, dir, dir, dir, conf);
try {
server.init();
FileSystemAccess hadoop = server.get(FileSystemAccess.class);
FileSystem fs1 = hadoop.createFileSystem("u", hadoop.getFileSystemConfiguration());
Assert.assertNotNull(fs1);
fs1.mkdirs(new Path("/tmp/foo1"));
hadoop.releaseFileSystem(fs1);
//still around because of caching
fs1.mkdirs(new Path("/tmp/foo2"));
FileSystem fs2 = hadoop.createFileSystem("u", hadoop.getFileSystemConfiguration());
//should be same instance because of caching
Assert.assertEquals(fs1, fs2);
Thread.sleep(4 * 1000);
//still around because of lease count is 1 (fs2 is out)
fs1.mkdirs(new Path("/tmp/foo2"));
Thread.sleep(4 * 1000);
//still around because of lease count is 1 (fs2 is out)
fs2.mkdirs(new Path("/tmp/foo"));
hadoop.releaseFileSystem(fs2);
Thread.sleep(4 * 1000);
//should not be around as lease count is 0
try {
fs2.mkdirs(new Path("/tmp/foo"));
Assert.fail();
} catch (IOException ex) {
} catch (Exception ex) {
Assert.fail();
}
} finally {
server.destroy();
}
}
Aggregations