use of org.apache.hadoop.fs.FileStatus in project hadoop by apache.
the class ViewFsBaseTest method testOwnerForInternalDir.
@Test
public void testOwnerForInternalDir() throws IOException, InterruptedException, URISyntaxException {
final UserGroupInformation userUgi = UserGroupInformation.createUserForTesting("user@HADOOP.COM", new String[] { "hadoop" });
userUgi.doAs(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws IOException, URISyntaxException {
UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
String doAsUserName = ugi.getUserName();
assertEquals(doAsUserName, "user@HADOOP.COM");
FileContext viewFS = FileContext.getFileContext(FsConstants.VIEWFS_URI, conf);
FileStatus stat = viewFS.getFileStatus(new Path("/internalDir"));
assertEquals(userUgi.getShortUserName(), stat.getOwner());
return null;
}
});
}
use of org.apache.hadoop.fs.FileStatus in project hadoop by apache.
the class TestDiskChecker method _mkdirs.
private void _mkdirs(boolean exists, FsPermission before, FsPermission after) throws Throwable {
File localDir = make(stub(File.class).returning(exists).from.exists());
when(localDir.mkdir()).thenReturn(true);
// use default stubs
Path dir = mock(Path.class);
LocalFileSystem fs = make(stub(LocalFileSystem.class).returning(localDir).from.pathToFile(dir));
FileStatus stat = make(stub(FileStatus.class).returning(after).from.getPermission());
when(fs.getFileStatus(dir)).thenReturn(stat);
try {
DiskChecker.mkdirsWithExistsAndPermissionCheck(fs, dir, before);
if (!exists)
verify(fs).setPermission(dir, before);
else {
verify(fs).getFileStatus(dir);
verify(stat).getPermission();
}
} catch (DiskErrorException e) {
if (before != after)
assertTrue(e.getMessage().startsWith("Incorrect permission"));
}
}
use of org.apache.hadoop.fs.FileStatus in project hadoop by apache.
the class TestViewFsFileStatusHdfs method testFileStatusSerialziation.
@Test
public void testFileStatusSerialziation() throws IOException, URISyntaxException {
long len = fileSystemTestHelper.createFile(fHdfs, testfilename);
FileStatus stat = vfs.getFileStatus(new Path(testfilename));
assertEquals(len, stat.getLen());
// check serialization/deserialization
DataOutputBuffer dob = new DataOutputBuffer();
stat.write(dob);
DataInputBuffer dib = new DataInputBuffer();
dib.reset(dob.getData(), 0, dob.getLength());
FileStatus deSer = new FileStatus();
deSer.readFields(dib);
assertEquals(len, deSer.getLen());
}
use of org.apache.hadoop.fs.FileStatus in project hadoop by apache.
the class AppendTestUtil method check.
public static void check(FileSystem fs, Path p, long length) throws IOException {
int i = -1;
try {
final FileStatus status = fs.getFileStatus(p);
FSDataInputStream in = fs.open(p);
if (in.getWrappedStream() instanceof DFSInputStream) {
long len = ((DFSInputStream) in.getWrappedStream()).getFileLength();
assertEquals(length, len);
} else {
assertEquals(length, status.getLen());
}
for (i++; i < length; i++) {
assertEquals((byte) i, (byte) in.read());
}
i = -(int) length;
//EOF
assertEquals(-1, in.read());
in.close();
} catch (IOException ioe) {
throw new IOException("p=" + p + ", length=" + length + ", i=" + i, ioe);
}
}
use of org.apache.hadoop.fs.FileStatus in project hadoop by apache.
the class AppendTestUtil method checkFullFile.
public static void checkFullFile(FileSystem fs, Path name, int len, final byte[] compareContent, String message, boolean checkFileStatus) throws IOException {
if (checkFileStatus) {
final FileStatus status = fs.getFileStatus(name);
assertEquals("len=" + len + " but status.getLen()=" + status.getLen(), len, status.getLen());
}
FSDataInputStream stm = fs.open(name);
byte[] actual = new byte[len];
stm.readFully(0, actual);
checkData(actual, 0, compareContent, message);
stm.close();
}
Aggregations