use of org.apache.hadoop.fs.FileSystem in project hadoop by apache.
the class TestChRootedFileSystem method testUnsetStoragePolicy.
@Test(timeout = 30000)
public void testUnsetStoragePolicy() throws Exception {
Path storagePolicyPath = new Path("/storagePolicy");
Path chRootedStoragePolicyPath = new Path("/a/b/storagePolicy");
Configuration conf = new Configuration();
conf.setClass("fs.mockfs.impl", MockFileSystem.class, FileSystem.class);
URI chrootUri = URI.create("mockfs://foo/a/b");
ChRootedFileSystem chrootFs = new ChRootedFileSystem(chrootUri, conf);
FileSystem mockFs = ((FilterFileSystem) chrootFs.getRawFileSystem()).getRawFileSystem();
chrootFs.unsetStoragePolicy(storagePolicyPath);
verify(mockFs).unsetStoragePolicy(chRootedStoragePolicyPath);
}
use of org.apache.hadoop.fs.FileSystem in project hadoop by apache.
the class RollingFileSystemSinkTestBase method readLogFile.
/**
* Read the log files at the target path and return the contents as a single
* string. This method will assert that the correct number of files is found.
*
* @param path the target path
* @param then when the test method began. Used to find the log directory in
* the case that the test run crosses the top of the hour.
* @param count the number of log files to expect
* @return
* @throws IOException
* @throws URISyntaxException
*/
protected String readLogFile(String path, String then, int count) throws IOException, URISyntaxException {
final String now = DATE_FORMAT.format(new Date()) + "00";
final String logFile = getLogFilename();
FileSystem fs = FileSystem.get(new URI(path), new Configuration());
StringBuilder metrics = new StringBuilder();
boolean found = false;
for (FileStatus status : fs.listStatus(new Path(path))) {
Path logDir = status.getPath();
// the test started and the current time. Anything else can be ignored.
if (now.equals(logDir.getName()) || then.equals(logDir.getName())) {
readLogData(fs, findMostRecentLogFile(fs, new Path(logDir, logFile)), metrics);
assertFileCount(fs, logDir, count);
found = true;
}
}
assertTrue("No valid log directories found", found);
return metrics.toString();
}
use of org.apache.hadoop.fs.FileSystem in project hadoop by apache.
the class TestHttpFSServer method testPerms.
/**
* Validate that files are created with 755 permissions when no
* 'permissions' attribute is specified, and when 'permissions'
* is specified, that value is honored.
*/
@Test
@TestDir
@TestJetty
@TestHdfs
public void testPerms() throws Exception {
createHttpFSServer(false);
FileSystem fs = FileSystem.get(TestHdfsHelper.getHdfsConf());
fs.mkdirs(new Path("/perm"));
createWithHttp("/perm/none", null);
String statusJson = getStatus("/perm/none", "GETFILESTATUS");
Assert.assertTrue("755".equals(getPerms(statusJson)));
createWithHttp("/perm/p-777", "777");
statusJson = getStatus("/perm/p-777", "GETFILESTATUS");
Assert.assertTrue("777".equals(getPerms(statusJson)));
createWithHttp("/perm/p-654", "654");
statusJson = getStatus("/perm/p-654", "GETFILESTATUS");
Assert.assertTrue("654".equals(getPerms(statusJson)));
createWithHttp("/perm/p-321", "321");
statusJson = getStatus("/perm/p-321", "GETFILESTATUS");
Assert.assertTrue("321".equals(getPerms(statusJson)));
}
use of org.apache.hadoop.fs.FileSystem in project hadoop by apache.
the class TestHttpFSServer method testDirAcls.
/**
* Test ACL operations on a directory, including default ACLs.
* General strategy is to use GETFILESTATUS and GETACLSTATUS to verify:
* <ol>
* <li>Initial status with no ACLs</li>
* <li>The addition of a default ACL</li>
* <li>The removal of default ACLs</li>
* </ol>
*
* @throws Exception
*/
@Test
@TestDir
@TestJetty
@TestHdfs
public void testDirAcls() throws Exception {
final String defUser1 = "default:user:glarch:r-x";
final String defSpec1 = "aclspec=" + defUser1;
final String dir = "/aclDirTest";
String statusJson;
List<String> aclEntries;
createHttpFSServer(false);
FileSystem fs = FileSystem.get(TestHdfsHelper.getHdfsConf());
fs.mkdirs(new Path(dir));
/* getfilestatus and liststatus don't have 'aclBit' in their reply */
statusJson = getStatus(dir, "GETFILESTATUS");
Assert.assertEquals(-1, statusJson.indexOf("aclBit"));
/* No ACLs, either */
statusJson = getStatus(dir, "GETACLSTATUS");
aclEntries = getAclEntries(statusJson);
Assert.assertTrue(aclEntries.size() == 0);
/* Give it a default ACL and verify */
putCmd(dir, "SETACL", defSpec1);
statusJson = getStatus(dir, "GETFILESTATUS");
Assert.assertNotEquals(-1, statusJson.indexOf("aclBit"));
statusJson = getStatus(dir, "GETACLSTATUS");
aclEntries = getAclEntries(statusJson);
Assert.assertTrue(aclEntries.size() == 5);
/* 4 Entries are default:(user|group|mask|other):perm */
Assert.assertTrue(aclEntries.contains(defUser1));
/* Remove the default ACL and re-verify */
putCmd(dir, "REMOVEDEFAULTACL", null);
statusJson = getStatus(dir, "GETFILESTATUS");
Assert.assertEquals(-1, statusJson.indexOf("aclBit"));
statusJson = getStatus(dir, "GETACLSTATUS");
aclEntries = getAclEntries(statusJson);
Assert.assertTrue(aclEntries.size() == 0);
}
use of org.apache.hadoop.fs.FileSystem in project hadoop by apache.
the class TestHttpFSServer method testGetTrashRoot.
@Test
@TestDir
@TestJetty
@TestHdfs
public void testGetTrashRoot() throws Exception {
String user = HadoopUsersConfTestHelper.getHadoopUsers()[0];
createHttpFSServer(false);
String trashJson = getStatus("/", "GETTRASHROOT");
String trashPath = getPath(trashJson);
Path expectedPath = new Path(FileSystem.USER_HOME_PREFIX, new Path(user, FileSystem.TRASH_PREFIX));
Assert.assertEquals(expectedPath.toUri().getPath(), trashPath);
byte[] array = new byte[] { 0, 1, 2, 3 };
FileSystem fs = FileSystem.get(TestHdfsHelper.getHdfsConf());
fs.mkdirs(new Path("/tmp"));
OutputStream os = fs.create(new Path("/tmp/foo"));
os.write(array);
os.close();
trashJson = getStatus("/tmp/foo", "GETTRASHROOT");
trashPath = getPath(trashJson);
Assert.assertEquals(expectedPath.toUri().getPath(), trashPath);
//TestHdfsHelp has already set up EZ environment
final Path ezFile = TestHdfsHelper.ENCRYPTED_FILE;
final Path ezPath = TestHdfsHelper.ENCRYPTION_ZONE;
trashJson = getStatus(ezFile.toUri().getPath(), "GETTRASHROOT");
trashPath = getPath(trashJson);
expectedPath = new Path(ezPath, new Path(FileSystem.TRASH_PREFIX, user));
Assert.assertEquals(expectedPath.toUri().getPath(), trashPath);
}
Aggregations