use of org.apache.hadoop.conf.Configuration in project hadoop by apache.
the class TestLocalFileSystem method testBufferedFSInputStream.
/**
* Regression test for HADOOP-9307: BufferedFSInputStream returning
* wrong results after certain sequences of seeks and reads.
*/
@Test
public void testBufferedFSInputStream() throws IOException {
Configuration conf = new Configuration();
conf.setClass("fs.file.impl", RawLocalFileSystem.class, FileSystem.class);
conf.setInt(CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY, 4096);
FileSystem fs = FileSystem.newInstance(conf);
byte[] buf = new byte[10 * 1024];
new Random().nextBytes(buf);
// Write random bytes to file
FSDataOutputStream stream = fs.create(TEST_PATH);
try {
stream.write(buf);
} finally {
stream.close();
}
Random r = new Random();
FSDataInputStream stm = fs.open(TEST_PATH);
// Record the sequence of seeks and reads which trigger a failure.
int[] seeks = new int[10];
int[] reads = new int[10];
try {
for (int i = 0; i < 1000; i++) {
int seekOff = r.nextInt(buf.length);
int toRead = r.nextInt(Math.min(buf.length - seekOff, 32000));
seeks[i % seeks.length] = seekOff;
reads[i % reads.length] = toRead;
verifyRead(stm, buf, seekOff, toRead);
}
} catch (AssertionError afe) {
StringBuilder sb = new StringBuilder();
sb.append("Sequence of actions:\n");
for (int j = 0; j < seeks.length; j++) {
sb.append("seek @ ").append(seeks[j]).append(" ").append("read ").append(reads[j]).append("\n");
}
System.err.println(sb.toString());
throw afe;
} finally {
stm.close();
}
}
use of org.apache.hadoop.conf.Configuration in project hadoop by apache.
the class TestLocalFileSystem method testStripFragmentFromPath.
@Test
public void testStripFragmentFromPath() throws Exception {
FileSystem fs = FileSystem.getLocal(new Configuration());
Path pathQualified = TEST_PATH.makeQualified(fs.getUri(), fs.getWorkingDirectory());
Path pathWithFragment = new Path(new URI(pathQualified.toString() + "#glacier"));
// Create test file with fragment
FileSystemTestHelper.createFile(fs, pathWithFragment);
Path resolved = fs.resolvePath(pathWithFragment);
assertEquals("resolvePath did not strip fragment from Path", pathQualified, resolved);
}
use of org.apache.hadoop.conf.Configuration in project hadoop by apache.
the class TestLocalFileSystem method testFileStatusPipeFile.
@Test
public void testFileStatusPipeFile() throws Exception {
RawLocalFileSystem origFs = new RawLocalFileSystem();
RawLocalFileSystem fs = spy(origFs);
Configuration conf = mock(Configuration.class);
fs.setConf(conf);
Whitebox.setInternalState(fs, "useDeprecatedFileStatus", false);
Path path = new Path("/foo");
File pipe = mock(File.class);
when(pipe.isFile()).thenReturn(false);
when(pipe.isDirectory()).thenReturn(false);
when(pipe.exists()).thenReturn(true);
FileStatus stat = mock(FileStatus.class);
doReturn(pipe).when(fs).pathToFile(path);
doReturn(stat).when(fs).getFileStatus(path);
FileStatus[] stats = fs.listStatus(path);
assertTrue(stats != null && stats.length == 1 && stats[0] == stat);
}
use of org.apache.hadoop.conf.Configuration in project hadoop by apache.
the class TestLocalFileSystemPermission method testSetUmaskInRealTime.
/**
* Steps:
* 1. Create a directory with default permissions: 777 with umask 022
* 2. Check the directory has good permissions: 755
* 3. Set the umask to 062.
* 4. Create a new directory with default permissions.
* 5. For this directory we expect 715 as permission not 755
* @throws Exception we can throw away all the exception.
*/
@Test
public void testSetUmaskInRealTime() throws Exception {
assumeNotWindows();
LocalFileSystem localfs = FileSystem.getLocal(new Configuration());
Configuration conf = localfs.getConf();
conf.set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY, "022");
LOGGER.info("Current umask is {}", conf.get(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY));
Path dir = new Path(TEST_PATH_PREFIX + "dir");
Path dir2 = new Path(TEST_PATH_PREFIX + "dir2");
try {
assertTrue(localfs.mkdirs(dir));
FsPermission initialPermission = getPermission(localfs, dir);
assertEquals("With umask 022 permission should be 755 since the default " + "permission is 777", new FsPermission("755"), initialPermission);
// Modify umask and create a new directory
// and check if new umask is applied
conf.set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY, "062");
assertTrue(localfs.mkdirs(dir2));
FsPermission finalPermission = localfs.getFileStatus(dir2).getPermission();
assertThat("With umask 062 permission should not be 755 since the " + "default permission is 777", new FsPermission("755"), is(not(finalPermission)));
assertEquals("With umask 062 we expect 715 since the default permission is 777", new FsPermission("715"), finalPermission);
} finally {
conf.set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY, "022");
cleanup(localfs, dir);
cleanup(localfs, dir2);
}
}
use of org.apache.hadoop.conf.Configuration in project hadoop by apache.
the class TestLocalFileSystemPermission method testLocalFSsetOwner.
/** Test LocalFileSystem.setOwner. */
@Test
public void testLocalFSsetOwner() throws IOException {
assumeNotWindows();
Configuration conf = new Configuration();
conf.set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY, "044");
LocalFileSystem localfs = FileSystem.getLocal(conf);
String filename = "bar";
Path f = writeFile(localfs, filename);
List<String> groups;
try {
groups = getGroups();
LOGGER.info("{}: {}", filename, getPermission(localfs, f));
// create files and manipulate them.
String g0 = groups.get(0);
localfs.setOwner(f, null, g0);
assertEquals(g0, getGroup(localfs, f));
if (groups.size() > 1) {
String g1 = groups.get(1);
localfs.setOwner(f, null, g1);
assertEquals(g1, getGroup(localfs, f));
} else {
LOGGER.info("Not testing changing the group since user " + "belongs to only one group.");
}
} finally {
cleanup(localfs, f);
}
}
Aggregations