use of org.apache.hadoop.hdfs.protocol.DirectoryListing in project hadoop by apache.
the class Hdfs method listStatus.
@Override
public FileStatus[] listStatus(Path f) throws IOException, UnresolvedLinkException {
String src = getUriPath(f);
// fetch the first batch of entries in the directory
DirectoryListing thisListing = dfs.listPaths(src, HdfsFileStatus.EMPTY_NAME);
if (thisListing == null) {
// the directory does not exist
throw new FileNotFoundException("File " + f + " does not exist.");
}
HdfsFileStatus[] partialListing = thisListing.getPartialListing();
if (!thisListing.hasMore()) {
// got all entries of the directory
FileStatus[] stats = new FileStatus[partialListing.length];
for (int i = 0; i < partialListing.length; i++) {
stats[i] = partialListing[i].makeQualified(getUri(), f);
}
return stats;
}
// The directory size is too big that it needs to fetch more
// estimate the total number of entries in the directory
int totalNumEntries = partialListing.length + thisListing.getRemainingEntries();
ArrayList<FileStatus> listing = new ArrayList<FileStatus>(totalNumEntries);
// add the first batch of entries to the array list
for (HdfsFileStatus fileStatus : partialListing) {
listing.add(fileStatus.makeQualified(getUri(), f));
}
// now fetch more entries
do {
thisListing = dfs.listPaths(src, thisListing.getLastName());
if (thisListing == null) {
// the directory is deleted
throw new FileNotFoundException("File " + f + " does not exist.");
}
partialListing = thisListing.getPartialListing();
for (HdfsFileStatus fileStatus : partialListing) {
listing.add(fileStatus.makeQualified(getUri(), f));
}
} while (thisListing.hasMore());
return listing.toArray(new FileStatus[listing.size()]);
}
use of org.apache.hadoop.hdfs.protocol.DirectoryListing in project hadoop by apache.
the class FSDirStatAndListingOp method getSnapshotsListing.
/**
* Get a listing of all the snapshots of a snapshottable directory
*/
private static DirectoryListing getSnapshotsListing(FSDirectory fsd, INodesInPath iip, byte[] startAfter) throws IOException {
Preconditions.checkState(fsd.hasReadLock());
Preconditions.checkArgument(iip.isDotSnapshotDir(), "%s does not end with %s", iip.getPath(), HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR);
// drop off the null .snapshot component
iip = iip.getParentINodesInPath();
final String dirPath = iip.getPath();
final INode node = iip.getLastINode();
final INodeDirectory dirNode = INodeDirectory.valueOf(node, dirPath);
final DirectorySnapshottableFeature sf = dirNode.getDirectorySnapshottableFeature();
if (sf == null) {
throw new SnapshotException("Directory is not a snapshottable directory: " + dirPath);
}
final ReadOnlyList<Snapshot> snapshots = sf.getSnapshotList();
int skipSize = ReadOnlyList.Util.binarySearch(snapshots, startAfter);
skipSize = skipSize < 0 ? -skipSize - 1 : skipSize + 1;
int numOfListing = Math.min(snapshots.size() - skipSize, fsd.getLsLimit());
final HdfsFileStatus[] listing = new HdfsFileStatus[numOfListing];
for (int i = 0; i < numOfListing; i++) {
Snapshot.Root sRoot = snapshots.get(i + skipSize).getRoot();
listing[i] = createFileStatus(fsd, iip, sRoot, HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED, false);
}
return new DirectoryListing(listing, snapshots.size() - skipSize - numOfListing);
}
use of org.apache.hadoop.hdfs.protocol.DirectoryListing in project hadoop by apache.
the class FSNamesystem method getListing.
/**
* Get a partial listing of the indicated directory
*
* @param src the directory name
* @param startAfter the name to start after
* @param needLocation if blockLocations need to be returned
* @return a partial listing starting after startAfter
*
* @throws AccessControlException if access is denied
* @throws UnresolvedLinkException if symbolic link is encountered
* @throws IOException if other I/O error occurred
*/
DirectoryListing getListing(String src, byte[] startAfter, boolean needLocation) throws IOException {
checkOperation(OperationCategory.READ);
final String operationName = "listStatus";
DirectoryListing dl = null;
readLock();
try {
checkOperation(NameNode.OperationCategory.READ);
dl = getListingInt(dir, src, startAfter, needLocation);
} catch (AccessControlException e) {
logAuditEvent(false, operationName, src);
throw e;
} finally {
readUnlock(operationName);
}
logAuditEvent(true, operationName, src);
return dl;
}
use of org.apache.hadoop.hdfs.protocol.DirectoryListing in project hadoop by apache.
the class NamenodeFsck method checkDir.
private void checkDir(String path, Result replRes, Result ecRes) throws IOException {
if (snapshottableDirs != null && snapshottableDirs.contains(path)) {
String snapshotPath = (path.endsWith(Path.SEPARATOR) ? path : path + Path.SEPARATOR) + HdfsConstants.DOT_SNAPSHOT_DIR;
HdfsFileStatus snapshotFileInfo = namenode.getRpcServer().getFileInfo(snapshotPath);
check(snapshotPath, snapshotFileInfo, replRes, ecRes);
}
byte[] lastReturnedName = HdfsFileStatus.EMPTY_NAME;
DirectoryListing thisListing;
if (showFiles) {
out.println(path + " <dir>");
}
totalDirs++;
do {
assert lastReturnedName != null;
thisListing = namenode.getRpcServer().getListing(path, lastReturnedName, false);
if (thisListing == null) {
return;
}
HdfsFileStatus[] files = thisListing.getPartialListing();
for (int i = 0; i < files.length; i++) {
check(path, files[i], replRes, ecRes);
}
lastReturnedName = thisListing.getLastName();
} while (thisListing.hasMore());
}
use of org.apache.hadoop.hdfs.protocol.DirectoryListing in project hadoop by apache.
the class TestErasureCodingPolicies method testReplicatedFileUnderECDir.
/**
* for pre-existing files (with replicated blocks) in an EC dir, getListing
* should report them as non-ec.
*/
@Test
public void testReplicatedFileUnderECDir() throws IOException {
final Path dir = new Path("/ec");
final Path replicatedFile = new Path(dir, "replicatedFile");
// create a file with replicated blocks
DFSTestUtil.createFile(fs, replicatedFile, 0, (short) 3, 0L);
// set ec policy on dir
fs.setErasureCodingPolicy(dir, StripedFileTestUtil.getDefaultECPolicy().getName());
// create a file which should be using ec
final Path ecSubDir = new Path(dir, "ecSubDir");
final Path ecFile = new Path(ecSubDir, "ecFile");
DFSTestUtil.createFile(fs, ecFile, 0, (short) 1, 0L);
assertNull(fs.getClient().getFileInfo(replicatedFile.toString()).getErasureCodingPolicy());
assertNotNull(fs.getClient().getFileInfo(ecFile.toString()).getErasureCodingPolicy());
// list "/ec"
DirectoryListing listing = fs.getClient().listPaths(dir.toString(), new byte[0], false);
HdfsFileStatus[] files = listing.getPartialListing();
assertEquals(2, files.length);
// the listing is always sorted according to the local name
assertEquals(ecSubDir.getName(), files[0].getLocalName());
// ecSubDir
assertNotNull(files[0].getErasureCodingPolicy());
assertEquals(replicatedFile.getName(), files[1].getLocalName());
// replicatedFile
assertNull(files[1].getErasureCodingPolicy());
// list "/ec/ecSubDir"
files = fs.getClient().listPaths(ecSubDir.toString(), new byte[0], false).getPartialListing();
assertEquals(1, files.length);
assertEquals(ecFile.getName(), files[0].getLocalName());
// ecFile
assertNotNull(files[0].getErasureCodingPolicy());
// list "/"
files = fs.getClient().listPaths("/", new byte[0], false).getPartialListing();
assertEquals(1, files.length);
// ec
assertEquals(dir.getName(), files[0].getLocalName());
assertNotNull(files[0].getErasureCodingPolicy());
// rename "/ec/ecSubDir/ecFile" to "/ecFile"
assertTrue(fs.rename(ecFile, new Path("/ecFile")));
files = fs.getClient().listPaths("/", new byte[0], false).getPartialListing();
assertEquals(2, files.length);
// ec
assertEquals(dir.getName(), files[0].getLocalName());
assertNotNull(files[0].getErasureCodingPolicy());
assertEquals(ecFile.getName(), files[1].getLocalName());
assertNotNull(files[1].getErasureCodingPolicy());
}
Aggregations