use of org.apache.hadoop.hdfs.protocol.HdfsFileStatus in project hadoop by apache.
the class DistributedFileSystem method listStatusInternal.
private FileStatus[] listStatusInternal(Path p) throws IOException {
String src = getPathName(p);
// fetch the first batch of entries in the directory
DirectoryListing thisListing = dfs.listPaths(src, HdfsFileStatus.EMPTY_NAME);
if (thisListing == null) {
// the directory does not exist
throw new FileNotFoundException("File " + p + " does not exist.");
}
HdfsFileStatus[] partialListing = thisListing.getPartialListing();
if (!thisListing.hasMore()) {
// got all entries of the directory
FileStatus[] stats = new FileStatus[partialListing.length];
for (int i = 0; i < partialListing.length; i++) {
stats[i] = partialListing[i].makeQualified(getUri(), p);
}
statistics.incrementReadOps(1);
storageStatistics.incrementOpCounter(OpType.LIST_STATUS);
return stats;
}
// The directory size is too big that it needs to fetch more
// estimate the total number of entries in the directory
int totalNumEntries = partialListing.length + thisListing.getRemainingEntries();
ArrayList<FileStatus> listing = new ArrayList<>(totalNumEntries);
// add the first batch of entries to the array list
for (HdfsFileStatus fileStatus : partialListing) {
listing.add(fileStatus.makeQualified(getUri(), p));
}
statistics.incrementLargeReadOps(1);
storageStatistics.incrementOpCounter(OpType.LIST_STATUS);
// now fetch more entries
do {
thisListing = dfs.listPaths(src, thisListing.getLastName());
if (thisListing == null) {
// the directory is deleted
throw new FileNotFoundException("File " + p + " does not exist.");
}
partialListing = thisListing.getPartialListing();
for (HdfsFileStatus fileStatus : partialListing) {
listing.add(fileStatus.makeQualified(getUri(), p));
}
statistics.incrementLargeReadOps(1);
storageStatistics.incrementOpCounter(OpType.LIST_STATUS);
} while (thisListing.hasMore());
return listing.toArray(new FileStatus[listing.size()]);
}
use of org.apache.hadoop.hdfs.protocol.HdfsFileStatus in project hadoop by apache.
the class DistributedFileSystem method getFileLinkStatus.
@Override
public FileStatus getFileLinkStatus(final Path f) throws IOException {
statistics.incrementReadOps(1);
storageStatistics.incrementOpCounter(OpType.GET_FILE_LINK_STATUS);
final Path absF = fixRelativePart(f);
FileStatus status = new FileSystemLinkResolver<FileStatus>() {
@Override
public FileStatus doCall(final Path p) throws IOException {
HdfsFileStatus fi = dfs.getFileLinkInfo(getPathName(p));
if (fi != null) {
return fi.makeQualified(getUri(), p);
} else {
throw new FileNotFoundException("File does not exist: " + p);
}
}
@Override
public FileStatus next(final FileSystem fs, final Path p) throws IOException {
return fs.getFileLinkStatus(p);
}
}.resolve(this, absF);
// Fully-qualify the symlink
if (status.isSymlink()) {
Path targetQual = FSLinkResolver.qualifySymlinkTarget(this.getUri(), status.getPath(), status.getSymlink());
status.setSymlink(targetQual);
}
return status;
}
use of org.apache.hadoop.hdfs.protocol.HdfsFileStatus in project hadoop by apache.
the class WebHdfsFileSystem method listStatusBatch.
@Override
public DirectoryEntries listStatusBatch(Path f, byte[] token) throws FileNotFoundException, IOException {
byte[] prevKey = EMPTY_ARRAY;
if (token != null) {
prevKey = token;
}
DirectoryListing listing = new FsPathResponseRunner<DirectoryListing>(GetOpParam.Op.LISTSTATUS_BATCH, f, new StartAfterParam(new String(prevKey, Charsets.UTF_8))) {
@Override
DirectoryListing decodeResponse(Map<?, ?> json) throws IOException {
return JsonUtilClient.toDirectoryListing(json);
}
}.run();
// Qualify the returned FileStatus array
final HdfsFileStatus[] statuses = listing.getPartialListing();
FileStatus[] qualified = new FileStatus[statuses.length];
for (int i = 0; i < statuses.length; i++) {
qualified[i] = makeQualified(statuses[i], f);
}
return new DirectoryEntries(qualified, listing.getLastName(), listing.hasMore());
}
use of org.apache.hadoop.hdfs.protocol.HdfsFileStatus in project hadoop by apache.
the class DFSOutputStream method newStreamForCreate.
static DFSOutputStream newStreamForCreate(DFSClient dfsClient, String src, FsPermission masked, EnumSet<CreateFlag> flag, boolean createParent, short replication, long blockSize, Progressable progress, DataChecksum checksum, String[] favoredNodes) throws IOException {
try (TraceScope ignored = dfsClient.newPathTraceScope("newStreamForCreate", src)) {
HdfsFileStatus stat = null;
// Retry the create if we get a RetryStartFileException up to a maximum
// number of times
boolean shouldRetry = true;
int retryCount = CREATE_RETRY_COUNT;
while (shouldRetry) {
shouldRetry = false;
try {
stat = dfsClient.namenode.create(src, masked, dfsClient.clientName, new EnumSetWritable<>(flag), createParent, replication, blockSize, SUPPORTED_CRYPTO_VERSIONS);
break;
} catch (RemoteException re) {
IOException e = re.unwrapRemoteException(AccessControlException.class, DSQuotaExceededException.class, QuotaByStorageTypeExceededException.class, FileAlreadyExistsException.class, FileNotFoundException.class, ParentNotDirectoryException.class, NSQuotaExceededException.class, RetryStartFileException.class, SafeModeException.class, UnresolvedPathException.class, SnapshotAccessControlException.class, UnknownCryptoProtocolVersionException.class);
if (e instanceof RetryStartFileException) {
if (retryCount > 0) {
shouldRetry = true;
retryCount--;
} else {
throw new IOException("Too many retries because of encryption" + " zone operations", e);
}
} else {
throw e;
}
}
}
Preconditions.checkNotNull(stat, "HdfsFileStatus should not be null!");
final DFSOutputStream out;
if (stat.getErasureCodingPolicy() != null) {
out = new DFSStripedOutputStream(dfsClient, src, stat, flag, progress, checksum, favoredNodes);
} else {
out = new DFSOutputStream(dfsClient, src, stat, flag, progress, checksum, favoredNodes, true);
}
out.start();
return out;
}
}
use of org.apache.hadoop.hdfs.protocol.HdfsFileStatus in project hadoop by apache.
the class Hdfs method listStatus.
@Override
public FileStatus[] listStatus(Path f) throws IOException, UnresolvedLinkException {
String src = getUriPath(f);
// fetch the first batch of entries in the directory
DirectoryListing thisListing = dfs.listPaths(src, HdfsFileStatus.EMPTY_NAME);
if (thisListing == null) {
// the directory does not exist
throw new FileNotFoundException("File " + f + " does not exist.");
}
HdfsFileStatus[] partialListing = thisListing.getPartialListing();
if (!thisListing.hasMore()) {
// got all entries of the directory
FileStatus[] stats = new FileStatus[partialListing.length];
for (int i = 0; i < partialListing.length; i++) {
stats[i] = partialListing[i].makeQualified(getUri(), f);
}
return stats;
}
// The directory size is too big that it needs to fetch more
// estimate the total number of entries in the directory
int totalNumEntries = partialListing.length + thisListing.getRemainingEntries();
ArrayList<FileStatus> listing = new ArrayList<FileStatus>(totalNumEntries);
// add the first batch of entries to the array list
for (HdfsFileStatus fileStatus : partialListing) {
listing.add(fileStatus.makeQualified(getUri(), f));
}
// now fetch more entries
do {
thisListing = dfs.listPaths(src, thisListing.getLastName());
if (thisListing == null) {
// the directory is deleted
throw new FileNotFoundException("File " + f + " does not exist.");
}
partialListing = thisListing.getPartialListing();
for (HdfsFileStatus fileStatus : partialListing) {
listing.add(fileStatus.makeQualified(getUri(), f));
}
} while (thisListing.hasMore());
return listing.toArray(new FileStatus[listing.size()]);
}
Aggregations