use of org.apache.hadoop.conf.Configuration in project hadoop by apache.
the class TestHarFileSystemBasics method testPositiveNewHarFsOnTheSameUnderlyingFs.
@Test
public void testPositiveNewHarFsOnTheSameUnderlyingFs() throws Exception {
// Init 2nd har file system on the same underlying FS, so the
// metadata gets reused:
final HarFileSystem hfs = new HarFileSystem(localFileSystem);
final URI uri = new URI("har://" + harPath.toString());
hfs.initialize(uri, new Configuration());
// the metadata should be reused from cache:
assertTrue(hfs.getMetadata() == harFileSystem.getMetadata());
}
use of org.apache.hadoop.conf.Configuration in project hadoop by apache.
the class TestHarFileSystemBasics method testPositiveInitWithoutUnderlyingFS.
@Test
public void testPositiveInitWithoutUnderlyingFS() throws Exception {
// Init HarFS with no constructor arg, so that the underlying FS object
// is created on demand or got from cache in #initialize() method.
final HarFileSystem hfs = new HarFileSystem();
final URI uri = new URI("har://" + harPath.toString());
hfs.initialize(uri, new Configuration());
}
use of org.apache.hadoop.conf.Configuration in project hadoop by apache.
the class TestHarFileSystemBasics method testListLocatedStatus.
@Test
public void testListLocatedStatus() throws Exception {
String testHarPath = this.getClass().getResource("/test.har").getPath();
URI uri = new URI("har://" + testHarPath);
HarFileSystem hfs = new HarFileSystem(localFileSystem);
hfs.initialize(uri, new Configuration());
// test.har has the following contents:
// dir1/1.txt
// dir1/2.txt
Set<String> expectedFileNames = new HashSet<String>();
expectedFileNames.add("1.txt");
expectedFileNames.add("2.txt");
// List contents of dir, and ensure we find all expected files
Path path = new Path("dir1");
RemoteIterator<LocatedFileStatus> fileList = hfs.listLocatedStatus(path);
while (fileList.hasNext()) {
String fileName = fileList.next().getPath().getName();
assertTrue(fileName + " not in expected files list", expectedFileNames.contains(fileName));
expectedFileNames.remove(fileName);
}
assertEquals("Didn't find all of the expected file names: " + expectedFileNames, 0, expectedFileNames.size());
}
use of org.apache.hadoop.conf.Configuration in project hadoop by apache.
the class TestFsShellReturnCode method testInterrupt.
@Test(timeout = 30000)
public void testInterrupt() throws Exception {
MyFsShell shell = new MyFsShell();
shell.setConf(new Configuration());
final Path d = new Path(TEST_ROOT_DIR, "testInterrupt");
final Path f1 = new Path(d, "f1");
final Path f2 = new Path(d, "f2");
assertTrue(fileSys.mkdirs(d));
writeFile(fileSys, f1);
assertTrue(fileSys.isFile(f1));
writeFile(fileSys, f2);
assertTrue(fileSys.isFile(f2));
int exitCode = shell.run(new String[] { "-testInterrupt", f1.toString(), f2.toString() });
// processing a file throws an interrupt, it should blow on first file
assertEquals(1, InterruptCommand.processed);
assertEquals(130, exitCode);
exitCode = shell.run(new String[] { "-testInterrupt", d.toString() });
// processing a file throws an interrupt, it should blow on file
// after descent into dir
assertEquals(2, InterruptCommand.processed);
assertEquals(130, exitCode);
}
use of org.apache.hadoop.conf.Configuration in project hadoop by apache.
the class TestFsShellTouch method setup.
@BeforeClass
public static void setup() throws Exception {
Configuration conf = new Configuration();
shell = new FsShell(conf);
lfs = FileSystem.getLocal(conf);
testRootDir = lfs.makeQualified(new Path(GenericTestUtils.getTempPath("testFsShell")));
lfs.mkdirs(testRootDir);
lfs.setWorkingDirectory(testRootDir);
}
Aggregations