use of org.apache.hadoop.test.TestDir in project hadoop by apache.
the class TestHttpFSServer method testGetTrashRoot.
@Test
@TestDir
@TestJetty
@TestHdfs
public void testGetTrashRoot() throws Exception {
String user = HadoopUsersConfTestHelper.getHadoopUsers()[0];
createHttpFSServer(false);
String trashJson = getStatus("/", "GETTRASHROOT");
String trashPath = getPath(trashJson);
Path expectedPath = new Path(FileSystem.USER_HOME_PREFIX, new Path(user, FileSystem.TRASH_PREFIX));
Assert.assertEquals(expectedPath.toUri().getPath(), trashPath);
byte[] array = new byte[] { 0, 1, 2, 3 };
FileSystem fs = FileSystem.get(TestHdfsHelper.getHdfsConf());
fs.mkdirs(new Path("/tmp"));
OutputStream os = fs.create(new Path("/tmp/foo"));
os.write(array);
os.close();
trashJson = getStatus("/tmp/foo", "GETTRASHROOT");
trashPath = getPath(trashJson);
Assert.assertEquals(expectedPath.toUri().getPath(), trashPath);
//TestHdfsHelp has already set up EZ environment
final Path ezFile = TestHdfsHelper.ENCRYPTED_FILE;
final Path ezPath = TestHdfsHelper.ENCRYPTION_ZONE;
trashJson = getStatus(ezFile.toUri().getPath(), "GETTRASHROOT");
trashPath = getPath(trashJson);
expectedPath = new Path(ezPath, new Path(FileSystem.TRASH_PREFIX, user));
Assert.assertEquals(expectedPath.toUri().getPath(), trashPath);
}
use of org.apache.hadoop.test.TestDir in project hadoop by apache.
the class TestHttpFSServer method testGlobFilter.
@Test
@TestDir
@TestJetty
@TestHdfs
public void testGlobFilter() throws Exception {
createHttpFSServer(false);
FileSystem fs = FileSystem.get(TestHdfsHelper.getHdfsConf());
fs.mkdirs(new Path("/tmp"));
fs.create(new Path("/tmp/foo.txt")).close();
String user = HadoopUsersConfTestHelper.getHadoopUsers()[0];
URL url = new URL(TestJettyHelper.getJettyURL(), MessageFormat.format("/webhdfs/v1/tmp?user.name={0}&op=liststatus&filter=f*", user));
HttpURLConnection conn = (HttpURLConnection) url.openConnection();
Assert.assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_OK);
BufferedReader reader = new BufferedReader(new InputStreamReader(conn.getInputStream()));
reader.readLine();
reader.close();
}
use of org.apache.hadoop.test.TestDir in project hadoop by apache.
the class TestHttpFSServer method testXAttrs.
/**
* Validate XAttr get/set/remove calls.
*/
@Test
@TestDir
@TestJetty
@TestHdfs
public void testXAttrs() throws Exception {
final String name1 = "user.a1";
final byte[] value1 = new byte[] { 0x31, 0x32, 0x33 };
final String name2 = "user.a2";
final byte[] value2 = new byte[] { 0x41, 0x42, 0x43 };
final String dir = "/xattrTest";
final String path = dir + "/file";
createHttpFSServer(false);
FileSystem fs = FileSystem.get(TestHdfsHelper.getHdfsConf());
fs.mkdirs(new Path(dir));
createWithHttp(path, null);
String statusJson = getStatus(path, "GETXATTRS");
Map<String, byte[]> xAttrs = getXAttrs(statusJson);
Assert.assertEquals(0, xAttrs.size());
// Set two xattrs
putCmd(path, "SETXATTR", setXAttrParam(name1, value1));
putCmd(path, "SETXATTR", setXAttrParam(name2, value2));
statusJson = getStatus(path, "GETXATTRS");
xAttrs = getXAttrs(statusJson);
Assert.assertEquals(2, xAttrs.size());
Assert.assertArrayEquals(value1, xAttrs.get(name1));
Assert.assertArrayEquals(value2, xAttrs.get(name2));
// Remove one xattr
putCmd(path, "REMOVEXATTR", "xattr.name=" + name1);
statusJson = getStatus(path, "GETXATTRS");
xAttrs = getXAttrs(statusJson);
Assert.assertEquals(1, xAttrs.size());
Assert.assertArrayEquals(value2, xAttrs.get(name2));
// Remove another xattr, then there is no xattr
putCmd(path, "REMOVEXATTR", "xattr.name=" + name2);
statusJson = getStatus(path, "GETXATTRS");
xAttrs = getXAttrs(statusJson);
Assert.assertEquals(0, xAttrs.size());
}
use of org.apache.hadoop.test.TestDir in project hadoop by apache.
the class TestHttpFSServerNoACLs method testWithNoAcls.
/**
* Ensure that
* <ol>
* <li>GETFILESTATUS and LISTSTATUS work happily</li>
* <li>ACLSTATUS throws an exception</li>
* <li>The ACL SET, REMOVE, etc calls all fail</li>
* </ol>
*
* @throws Exception
*/
@Test
@TestDir
@TestJetty
public void testWithNoAcls() throws Exception {
final String aclUser1 = "user:foo:rw-";
final String rmAclUser1 = "user:foo:";
final String aclUser2 = "user:bar:r--";
final String aclGroup1 = "group::r--";
final String aclSpec = "aclspec=user::rwx," + aclUser1 + "," + aclGroup1 + ",other::---";
final String modAclSpec = "aclspec=" + aclUser2;
final String remAclSpec = "aclspec=" + rmAclUser1;
final String defUser1 = "default:user:glarch:r-x";
final String defSpec1 = "aclspec=" + defUser1;
final String dir = "/noACLs";
final String path = dir + "/foo";
startMiniDFS();
createHttpFSServer();
FileSystem fs = FileSystem.get(nnConf);
fs.mkdirs(new Path(dir));
OutputStream os = fs.create(new Path(path));
os.write(1);
os.close();
/* The normal status calls work as expected; GETACLSTATUS fails */
getStatus(path, "GETFILESTATUS", true);
getStatus(dir, "LISTSTATUS", true);
getStatus(path, "GETACLSTATUS", false);
/* All the ACL-based PUT commands fail with ACL exceptions */
putCmd(path, "SETACL", aclSpec, false);
putCmd(path, "MODIFYACLENTRIES", modAclSpec, false);
putCmd(path, "REMOVEACLENTRIES", remAclSpec, false);
putCmd(path, "REMOVEACL", null, false);
putCmd(dir, "SETACL", defSpec1, false);
putCmd(dir, "REMOVEDEFAULTACL", null, false);
miniDfs.shutdown();
}
use of org.apache.hadoop.test.TestDir in project hadoop by apache.
the class TestHttpFSWithKerberos method testInvalidadHttpFSAccess.
@Test
@TestDir
@TestJetty
@TestHdfs
public void testInvalidadHttpFSAccess() throws Exception {
createHttpFSServer();
URL url = new URL(TestJettyHelper.getJettyURL(), "/webhdfs/v1/?op=GETHOMEDIRECTORY");
HttpURLConnection conn = (HttpURLConnection) url.openConnection();
Assert.assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_UNAUTHORIZED);
}
Aggregations