Search in sources :

Example 66 with HdfsFileStatus

use of org.apache.hadoop.hdfs.protocol.HdfsFileStatus in project hadoop by apache.

the class DistributedFileSystem method listStatusInternal.

private FileStatus[] listStatusInternal(Path p) throws IOException {
    String src = getPathName(p);
    // fetch the first batch of entries in the directory
    DirectoryListing thisListing = dfs.listPaths(src, HdfsFileStatus.EMPTY_NAME);
    if (thisListing == null) {
        // the directory does not exist
        throw new FileNotFoundException("File " + p + " does not exist.");
    }
    HdfsFileStatus[] partialListing = thisListing.getPartialListing();
    if (!thisListing.hasMore()) {
        // got all entries of the directory
        FileStatus[] stats = new FileStatus[partialListing.length];
        for (int i = 0; i < partialListing.length; i++) {
            stats[i] = partialListing[i].makeQualified(getUri(), p);
        }
        statistics.incrementReadOps(1);
        storageStatistics.incrementOpCounter(OpType.LIST_STATUS);
        return stats;
    }
    // The directory size is too big that it needs to fetch more
    // estimate the total number of entries in the directory
    int totalNumEntries = partialListing.length + thisListing.getRemainingEntries();
    ArrayList<FileStatus> listing = new ArrayList<>(totalNumEntries);
    // add the first batch of entries to the array list
    for (HdfsFileStatus fileStatus : partialListing) {
        listing.add(fileStatus.makeQualified(getUri(), p));
    }
    statistics.incrementLargeReadOps(1);
    storageStatistics.incrementOpCounter(OpType.LIST_STATUS);
    // now fetch more entries
    do {
        thisListing = dfs.listPaths(src, thisListing.getLastName());
        if (thisListing == null) {
            // the directory is deleted
            throw new FileNotFoundException("File " + p + " does not exist.");
        }
        partialListing = thisListing.getPartialListing();
        for (HdfsFileStatus fileStatus : partialListing) {
            listing.add(fileStatus.makeQualified(getUri(), p));
        }
        statistics.incrementLargeReadOps(1);
        storageStatistics.incrementOpCounter(OpType.LIST_STATUS);
    } while (thisListing.hasMore());
    return listing.toArray(new FileStatus[listing.size()]);
}
Also used : DirectoryListing(org.apache.hadoop.hdfs.protocol.DirectoryListing) FileStatus(org.apache.hadoop.fs.FileStatus) LocatedFileStatus(org.apache.hadoop.fs.LocatedFileStatus) HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) HdfsLocatedFileStatus(org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus) HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) FileNotFoundException(java.io.FileNotFoundException) ArrayList(java.util.ArrayList)

Example 67 with HdfsFileStatus

use of org.apache.hadoop.hdfs.protocol.HdfsFileStatus in project hadoop by apache.

the class DistributedFileSystem method getFileLinkStatus.

@Override
public FileStatus getFileLinkStatus(final Path f) throws IOException {
    statistics.incrementReadOps(1);
    storageStatistics.incrementOpCounter(OpType.GET_FILE_LINK_STATUS);
    final Path absF = fixRelativePart(f);
    FileStatus status = new FileSystemLinkResolver<FileStatus>() {

        @Override
        public FileStatus doCall(final Path p) throws IOException {
            HdfsFileStatus fi = dfs.getFileLinkInfo(getPathName(p));
            if (fi != null) {
                return fi.makeQualified(getUri(), p);
            } else {
                throw new FileNotFoundException("File does not exist: " + p);
            }
        }

        @Override
        public FileStatus next(final FileSystem fs, final Path p) throws IOException {
            return fs.getFileLinkStatus(p);
        }
    }.resolve(this, absF);
    // Fully-qualify the symlink
    if (status.isSymlink()) {
        Path targetQual = FSLinkResolver.qualifySymlinkTarget(this.getUri(), status.getPath(), status.getSymlink());
        status.setSymlink(targetQual);
    }
    return status;
}
Also used : Path(org.apache.hadoop.fs.Path) FileStatus(org.apache.hadoop.fs.FileStatus) LocatedFileStatus(org.apache.hadoop.fs.LocatedFileStatus) HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) HdfsLocatedFileStatus(org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus) HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) FileSystem(org.apache.hadoop.fs.FileSystem) FileNotFoundException(java.io.FileNotFoundException) IOException(java.io.IOException)

Example 68 with HdfsFileStatus

use of org.apache.hadoop.hdfs.protocol.HdfsFileStatus in project hadoop by apache.

the class WebHdfsFileSystem method listStatusBatch.

@Override
public DirectoryEntries listStatusBatch(Path f, byte[] token) throws FileNotFoundException, IOException {
    byte[] prevKey = EMPTY_ARRAY;
    if (token != null) {
        prevKey = token;
    }
    DirectoryListing listing = new FsPathResponseRunner<DirectoryListing>(GetOpParam.Op.LISTSTATUS_BATCH, f, new StartAfterParam(new String(prevKey, Charsets.UTF_8))) {

        @Override
        DirectoryListing decodeResponse(Map<?, ?> json) throws IOException {
            return JsonUtilClient.toDirectoryListing(json);
        }
    }.run();
    // Qualify the returned FileStatus array
    final HdfsFileStatus[] statuses = listing.getPartialListing();
    FileStatus[] qualified = new FileStatus[statuses.length];
    for (int i = 0; i < statuses.length; i++) {
        qualified[i] = makeQualified(statuses[i], f);
    }
    return new DirectoryEntries(qualified, listing.getLastName(), listing.hasMore());
}
Also used : DirectoryListing(org.apache.hadoop.hdfs.protocol.DirectoryListing) FileStatus(org.apache.hadoop.fs.FileStatus) HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) IOException(java.io.IOException) HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus)

Example 69 with HdfsFileStatus

use of org.apache.hadoop.hdfs.protocol.HdfsFileStatus in project hadoop by apache.

the class DFSOutputStream method newStreamForCreate.

static DFSOutputStream newStreamForCreate(DFSClient dfsClient, String src, FsPermission masked, EnumSet<CreateFlag> flag, boolean createParent, short replication, long blockSize, Progressable progress, DataChecksum checksum, String[] favoredNodes) throws IOException {
    try (TraceScope ignored = dfsClient.newPathTraceScope("newStreamForCreate", src)) {
        HdfsFileStatus stat = null;
        // Retry the create if we get a RetryStartFileException up to a maximum
        // number of times
        boolean shouldRetry = true;
        int retryCount = CREATE_RETRY_COUNT;
        while (shouldRetry) {
            shouldRetry = false;
            try {
                stat = dfsClient.namenode.create(src, masked, dfsClient.clientName, new EnumSetWritable<>(flag), createParent, replication, blockSize, SUPPORTED_CRYPTO_VERSIONS);
                break;
            } catch (RemoteException re) {
                IOException e = re.unwrapRemoteException(AccessControlException.class, DSQuotaExceededException.class, QuotaByStorageTypeExceededException.class, FileAlreadyExistsException.class, FileNotFoundException.class, ParentNotDirectoryException.class, NSQuotaExceededException.class, RetryStartFileException.class, SafeModeException.class, UnresolvedPathException.class, SnapshotAccessControlException.class, UnknownCryptoProtocolVersionException.class);
                if (e instanceof RetryStartFileException) {
                    if (retryCount > 0) {
                        shouldRetry = true;
                        retryCount--;
                    } else {
                        throw new IOException("Too many retries because of encryption" + " zone operations", e);
                    }
                } else {
                    throw e;
                }
            }
        }
        Preconditions.checkNotNull(stat, "HdfsFileStatus should not be null!");
        final DFSOutputStream out;
        if (stat.getErasureCodingPolicy() != null) {
            out = new DFSStripedOutputStream(dfsClient, src, stat, flag, progress, checksum, favoredNodes);
        } else {
            out = new DFSOutputStream(dfsClient, src, stat, flag, progress, checksum, favoredNodes, true);
        }
        out.start();
        return out;
    }
}
Also used : EnumSetWritable(org.apache.hadoop.io.EnumSetWritable) QuotaByStorageTypeExceededException(org.apache.hadoop.hdfs.protocol.QuotaByStorageTypeExceededException) FileAlreadyExistsException(org.apache.hadoop.fs.FileAlreadyExistsException) TraceScope(org.apache.htrace.core.TraceScope) FileNotFoundException(java.io.FileNotFoundException) AccessControlException(org.apache.hadoop.security.AccessControlException) SnapshotAccessControlException(org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) MultipleIOException(org.apache.hadoop.io.MultipleIOException) RetryStartFileException(org.apache.hadoop.hdfs.server.namenode.RetryStartFileException) ParentNotDirectoryException(org.apache.hadoop.fs.ParentNotDirectoryException) HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) DSQuotaExceededException(org.apache.hadoop.hdfs.protocol.DSQuotaExceededException) NSQuotaExceededException(org.apache.hadoop.hdfs.protocol.NSQuotaExceededException) SafeModeException(org.apache.hadoop.hdfs.server.namenode.SafeModeException) SnapshotAccessControlException(org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException) RemoteException(org.apache.hadoop.ipc.RemoteException) UnresolvedPathException(org.apache.hadoop.hdfs.protocol.UnresolvedPathException)

Example 70 with HdfsFileStatus

use of org.apache.hadoop.hdfs.protocol.HdfsFileStatus in project hadoop by apache.

the class Hdfs method listStatus.

@Override
public FileStatus[] listStatus(Path f) throws IOException, UnresolvedLinkException {
    String src = getUriPath(f);
    // fetch the first batch of entries in the directory
    DirectoryListing thisListing = dfs.listPaths(src, HdfsFileStatus.EMPTY_NAME);
    if (thisListing == null) {
        // the directory does not exist
        throw new FileNotFoundException("File " + f + " does not exist.");
    }
    HdfsFileStatus[] partialListing = thisListing.getPartialListing();
    if (!thisListing.hasMore()) {
        // got all entries of the directory
        FileStatus[] stats = new FileStatus[partialListing.length];
        for (int i = 0; i < partialListing.length; i++) {
            stats[i] = partialListing[i].makeQualified(getUri(), f);
        }
        return stats;
    }
    // The directory size is too big that it needs to fetch more
    // estimate the total number of entries in the directory
    int totalNumEntries = partialListing.length + thisListing.getRemainingEntries();
    ArrayList<FileStatus> listing = new ArrayList<FileStatus>(totalNumEntries);
    // add the first batch of entries to the array list
    for (HdfsFileStatus fileStatus : partialListing) {
        listing.add(fileStatus.makeQualified(getUri(), f));
    }
    // now fetch more entries
    do {
        thisListing = dfs.listPaths(src, thisListing.getLastName());
        if (thisListing == null) {
            // the directory is deleted
            throw new FileNotFoundException("File " + f + " does not exist.");
        }
        partialListing = thisListing.getPartialListing();
        for (HdfsFileStatus fileStatus : partialListing) {
            listing.add(fileStatus.makeQualified(getUri(), f));
        }
    } while (thisListing.hasMore());
    return listing.toArray(new FileStatus[listing.size()]);
}
Also used : DirectoryListing(org.apache.hadoop.hdfs.protocol.DirectoryListing) HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) HdfsLocatedFileStatus(org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus) HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) FileNotFoundException(java.io.FileNotFoundException) ArrayList(java.util.ArrayList)

Aggregations

HdfsFileStatus (org.apache.hadoop.hdfs.protocol.HdfsFileStatus)124 Test (org.junit.Test)51 FileHandle (org.apache.hadoop.nfs.nfs3.FileHandle)34 IOException (java.io.IOException)28 InetSocketAddress (java.net.InetSocketAddress)28 XDR (org.apache.hadoop.oncrpc.XDR)28 AccessControlException (org.apache.hadoop.security.AccessControlException)26 Path (org.apache.hadoop.fs.Path)23 SnapshotAccessControlException (org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException)23 FileNotFoundException (java.io.FileNotFoundException)16 DFSClient (org.apache.hadoop.hdfs.DFSClient)11 DirectoryListing (org.apache.hadoop.hdfs.protocol.DirectoryListing)11 FsPermission (org.apache.hadoop.fs.permission.FsPermission)10 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)9 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)9 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)8 SetAttr3 (org.apache.hadoop.nfs.nfs3.request.SetAttr3)8 FileStatus (org.apache.hadoop.fs.FileStatus)7 Matchers.anyString (org.mockito.Matchers.anyString)7 Configuration (org.apache.hadoop.conf.Configuration)6