use of org.apache.hadoop.fs.LocatedFileStatus in project hadoop by apache.
the class TestHadoopArchives method makeArchiveWithRepl.
/*
* Run the HadoopArchives tool to create an archive on the
* given file system with a specified replication degree.
*/
private String makeArchiveWithRepl() throws Exception {
final String inputPathStr = inputPath.toUri().getPath();
System.out.println("inputPathStr = " + inputPathStr);
final URI uri = fs.getUri();
final String prefix = "har://hdfs-" + uri.getHost() + ":" + uri.getPort() + archivePath.toUri().getPath() + Path.SEPARATOR;
final String harName = "foo.har";
final String fullHarPathStr = prefix + harName;
final String[] args = { "-archiveName", harName, "-p", inputPathStr, "-r", "2", "*", archivePath.toString() };
System.setProperty(HadoopArchives.TEST_HADOOP_ARCHIVES_JAR_PATH, HADOOP_ARCHIVES_JAR);
final HadoopArchives har = new HadoopArchives(conf);
assertEquals(0, ToolRunner.run(har, args));
RemoteIterator<LocatedFileStatus> listFiles = fs.listFiles(new Path(archivePath.toString() + "/" + harName), false);
while (listFiles.hasNext()) {
LocatedFileStatus next = listFiles.next();
if (!next.getPath().toString().endsWith("_SUCCESS")) {
assertEquals(next.getPath().toString(), 2, next.getReplication());
}
}
return fullHarPathStr;
}
use of org.apache.hadoop.fs.LocatedFileStatus in project hadoop by apache.
the class TestV2LsOperations method assertListFilesFinds.
/**
* To get this project to compile under Hadoop 1, this code needs to be
* commented out
*
*
* @param fs filesystem
* @param dir dir
* @param subdir subdir
* @param recursive recurse?
* @throws IOException IO problems
*/
public static void assertListFilesFinds(FileSystem fs, Path dir, Path subdir, boolean recursive) throws IOException {
RemoteIterator<LocatedFileStatus> iterator = fs.listFiles(dir, recursive);
boolean found = false;
int entries = 0;
StringBuilder builder = new StringBuilder();
while (iterator.hasNext()) {
LocatedFileStatus next = iterator.next();
entries++;
builder.append(next.toString()).append('\n');
if (next.getPath().equals(subdir)) {
found = true;
}
}
assertTrue("Path " + subdir + " not found in directory " + dir + " : " + " entries=" + entries + " content" + builder.toString(), found);
}
use of org.apache.hadoop.fs.LocatedFileStatus in project hadoop by apache.
the class JobHistoryFileReplayHelper method selectJobFiles.
private Collection<JobFiles> selectJobFiles(FileSystem fs, Path processingRoot, int i, int size) throws IOException {
Map<String, JobFiles> jobs = new HashMap<>();
RemoteIterator<LocatedFileStatus> it = fs.listFiles(processingRoot, true);
while (it.hasNext()) {
LocatedFileStatus status = it.next();
Path path = status.getPath();
String fileName = path.getName();
Matcher m = JOB_ID_PARSER.matcher(fileName);
if (!m.matches()) {
continue;
}
String jobId = m.group(1);
int lastId = Integer.parseInt(m.group(2));
int mod = lastId % size;
if (mod != i) {
continue;
}
LOG.info("this mapper will process file " + fileName);
// it's mine
JobFiles jobFiles = jobs.get(jobId);
if (jobFiles == null) {
jobFiles = new JobFiles(jobId);
jobs.put(jobId, jobFiles);
}
setFilePath(fileName, path, jobFiles);
}
return jobs.values();
}
use of org.apache.hadoop.fs.LocatedFileStatus in project hadoop by apache.
the class TestMRJobClient method testJobHistory.
/**
* print job history from file
*/
private void testJobHistory(String jobId, Configuration conf) throws Exception {
CLI jc = createJobClient();
ByteArrayOutputStream out = new ByteArrayOutputStream();
// Find jhist file
String historyFileUri = null;
RemoteIterator<LocatedFileStatus> it = getFileSystem().listFiles(new Path("/"), true);
while (it.hasNext() && historyFileUri == null) {
LocatedFileStatus file = it.next();
if (file.getPath().getName().endsWith(".jhist")) {
historyFileUri = file.getPath().toUri().toString();
}
}
assertNotNull("Could not find jhist file", historyFileUri);
for (String historyFileOrJobId : new String[] { historyFileUri, jobId }) {
// Try a bunch of different valid combinations of the command
int exitCode = runTool(conf, jc, new String[] { "-history", "all", historyFileOrJobId }, out);
assertEquals("Exit code", 0, exitCode);
checkHistoryHumanOutput(jobId, out);
File outFile = File.createTempFile("myout", ".txt");
exitCode = runTool(conf, jc, new String[] { "-history", "all", historyFileOrJobId, "-outfile", outFile.getAbsolutePath() }, out);
assertEquals("Exit code", 0, exitCode);
checkHistoryHumanFileOutput(jobId, out, outFile);
outFile = File.createTempFile("myout", ".txt");
exitCode = runTool(conf, jc, new String[] { "-history", "all", historyFileOrJobId, "-outfile", outFile.getAbsolutePath(), "-format", "human" }, out);
assertEquals("Exit code", 0, exitCode);
checkHistoryHumanFileOutput(jobId, out, outFile);
exitCode = runTool(conf, jc, new String[] { "-history", historyFileOrJobId, "-format", "human" }, out);
assertEquals("Exit code", 0, exitCode);
checkHistoryHumanOutput(jobId, out);
exitCode = runTool(conf, jc, new String[] { "-history", "all", historyFileOrJobId, "-format", "json" }, out);
assertEquals("Exit code", 0, exitCode);
checkHistoryJSONOutput(jobId, out);
outFile = File.createTempFile("myout", ".txt");
exitCode = runTool(conf, jc, new String[] { "-history", "all", historyFileOrJobId, "-outfile", outFile.getAbsolutePath(), "-format", "json" }, out);
assertEquals("Exit code", 0, exitCode);
checkHistoryJSONFileOutput(jobId, out, outFile);
exitCode = runTool(conf, jc, new String[] { "-history", historyFileOrJobId, "-format", "json" }, out);
assertEquals("Exit code", 0, exitCode);
checkHistoryJSONOutput(jobId, out);
// Check some bad arguments
exitCode = runTool(conf, jc, new String[] { "-history", historyFileOrJobId, "foo" }, out);
assertEquals("Exit code", -1, exitCode);
exitCode = runTool(conf, jc, new String[] { "-history", historyFileOrJobId, "-format" }, out);
assertEquals("Exit code", -1, exitCode);
exitCode = runTool(conf, jc, new String[] { "-history", historyFileOrJobId, "-outfile" }, out);
assertEquals("Exit code", -1, exitCode);
try {
runTool(conf, jc, new String[] { "-history", historyFileOrJobId, "-format", "foo" }, out);
fail();
} catch (IllegalArgumentException e) {
// Expected
}
}
try {
runTool(conf, jc, new String[] { "-history", "not_a_valid_history_file_or_job_id" }, out);
fail();
} catch (IllegalArgumentException e) {
// Expected
}
}
use of org.apache.hadoop.fs.LocatedFileStatus in project hadoop by apache.
the class TestDistributedFileSystem method testLocatedFileStatusStorageIdsTypes.
@Test(timeout = 120000)
public void testLocatedFileStatusStorageIdsTypes() throws Exception {
final Configuration conf = getTestConfiguration();
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
try {
final DistributedFileSystem fs = cluster.getFileSystem();
final Path testFile = new Path("/testListLocatedStatus");
final int blockSize = 4096;
final int numBlocks = 10;
// Create a test file
final int repl = 2;
DFSTestUtil.createFile(fs, testFile, blockSize, numBlocks * blockSize, blockSize, (short) repl, 0xADDED);
DFSTestUtil.waitForReplication(fs, testFile, (short) repl, 30000);
// Get the listing
RemoteIterator<LocatedFileStatus> it = fs.listLocatedStatus(testFile);
assertTrue("Expected file to be present", it.hasNext());
LocatedFileStatus stat = it.next();
BlockLocation[] locs = stat.getBlockLocations();
assertEquals("Unexpected number of locations", numBlocks, locs.length);
Set<String> dnStorageIds = new HashSet<>();
for (DataNode d : cluster.getDataNodes()) {
try (FsDatasetSpi.FsVolumeReferences volumes = d.getFSDataset().getFsVolumeReferences()) {
for (FsVolumeSpi vol : volumes) {
dnStorageIds.add(vol.getStorageID());
}
}
}
for (BlockLocation loc : locs) {
String[] ids = loc.getStorageIds();
// Run it through a set to deduplicate, since there should be no dupes
Set<String> storageIds = new HashSet<>();
Collections.addAll(storageIds, ids);
assertEquals("Unexpected num storage ids", repl, storageIds.size());
// Make sure these are all valid storage IDs
assertTrue("Unknown storage IDs found!", dnStorageIds.containsAll(storageIds));
// Check storage types are the default, since we didn't set any
StorageType[] types = loc.getStorageTypes();
assertEquals("Unexpected num storage types", repl, types.length);
for (StorageType t : types) {
assertEquals("Unexpected storage type", StorageType.DEFAULT, t);
}
}
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
Aggregations