Search in sources :

Example 16 with FileStatus

use of org.apache.hadoop.fs.FileStatus in project hadoop by apache.

the class SFTPFileSystem method getFileStatus.

/**
   * Convenience method, so that we don't open a new connection when using this
   * method from within another method. Otherwise every API invocation incurs
   * the overhead of opening/closing a TCP connection.
   */
@SuppressWarnings("unchecked")
private FileStatus getFileStatus(ChannelSftp client, Path file) throws IOException {
    FileStatus fileStat = null;
    Path workDir;
    try {
        workDir = new Path(client.pwd());
    } catch (SftpException e) {
        throw new IOException(e);
    }
    Path absolute = makeAbsolute(workDir, file);
    Path parentPath = absolute.getParent();
    if (parentPath == null) {
        // root directory
        // Length of root directory on server not known
        long length = -1;
        boolean isDir = true;
        int blockReplication = 1;
        // Block Size not known.
        long blockSize = DEFAULT_BLOCK_SIZE;
        // Modification time of root directory not known.
        long modTime = -1;
        Path root = new Path("/");
        return new FileStatus(length, isDir, blockReplication, blockSize, modTime, root.makeQualified(this.getUri(), this.getWorkingDirectory()));
    }
    String pathName = parentPath.toUri().getPath();
    Vector<LsEntry> sftpFiles;
    try {
        sftpFiles = (Vector<LsEntry>) client.ls(pathName);
    } catch (SftpException e) {
        throw new FileNotFoundException(String.format(E_FILE_NOTFOUND, file));
    }
    if (sftpFiles != null) {
        for (LsEntry sftpFile : sftpFiles) {
            if (sftpFile.getFilename().equals(file.getName())) {
                // file found in directory
                fileStat = getFileStatus(client, sftpFile, parentPath);
                break;
            }
        }
        if (fileStat == null) {
            throw new FileNotFoundException(String.format(E_FILE_NOTFOUND, file));
        }
    } else {
        throw new FileNotFoundException(String.format(E_FILE_NOTFOUND, file));
    }
    return fileStat;
}
Also used : Path(org.apache.hadoop.fs.Path) FileStatus(org.apache.hadoop.fs.FileStatus) SftpException(com.jcraft.jsch.SftpException) FileNotFoundException(java.io.FileNotFoundException) IOException(java.io.IOException) LsEntry(com.jcraft.jsch.ChannelSftp.LsEntry)

Example 17 with FileStatus

use of org.apache.hadoop.fs.FileStatus in project hadoop by apache.

the class FTPFileSystem method delete.

/**
   * Convenience method, so that we don't open a new connection when using this
   * method from within another method. Otherwise every API invocation incurs
   * the overhead of opening/closing a TCP connection.
   */
private boolean delete(FTPClient client, Path file, boolean recursive) throws IOException {
    Path workDir = new Path(client.printWorkingDirectory());
    Path absolute = makeAbsolute(workDir, file);
    String pathName = absolute.toUri().getPath();
    try {
        FileStatus fileStat = getFileStatus(client, absolute);
        if (fileStat.isFile()) {
            return client.deleteFile(pathName);
        }
    } catch (FileNotFoundException e) {
        //the file is not there
        return false;
    }
    FileStatus[] dirEntries = listStatus(client, absolute);
    if (dirEntries != null && dirEntries.length > 0 && !(recursive)) {
        throw new IOException("Directory: " + file + " is not empty.");
    }
    for (FileStatus dirEntry : dirEntries) {
        delete(client, new Path(absolute, dirEntry.getPath()), recursive);
    }
    return client.removeDirectory(pathName);
}
Also used : Path(org.apache.hadoop.fs.Path) FileStatus(org.apache.hadoop.fs.FileStatus) FileNotFoundException(java.io.FileNotFoundException) IOException(java.io.IOException)

Example 18 with FileStatus

use of org.apache.hadoop.fs.FileStatus in project hadoop by apache.

the class PathData method expandAsGlob.

/**
   * Expand the given path as a glob pattern.  Non-existent paths do not
   * throw an exception because creation commands like touch and mkdir need
   * to create them.  The "stat" field will be null if the path does not
   * exist.
   * @param pattern the pattern to expand as a glob
   * @param conf the hadoop configuration
   * @return list of {@link PathData} objects.  if the pattern is not a glob,
   * and does not exist, the list will contain a single PathData with a null
   * stat 
   * @throws IOException anything else goes wrong...
   */
public static PathData[] expandAsGlob(String pattern, Configuration conf) throws IOException {
    Path globPath = new Path(pattern);
    FileSystem fs = globPath.getFileSystem(conf);
    FileStatus[] stats = fs.globStatus(globPath);
    PathData[] items = null;
    if (stats == null) {
        // remove any quoting in the glob pattern
        pattern = pattern.replaceAll("\\\\(.)", "$1");
        // not a glob & file not found, so add the path with a null stat
        items = new PathData[] { new PathData(fs, pattern, null) };
    } else {
        // figure out what type of glob path was given, will convert globbed
        // paths to match the type to preserve relativity
        PathType globType;
        URI globUri = globPath.toUri();
        if (globUri.getScheme() != null) {
            globType = PathType.HAS_SCHEME;
        } else if (!globUri.getPath().isEmpty() && new Path(globUri.getPath()).isAbsolute()) {
            globType = PathType.SCHEMELESS_ABSOLUTE;
        } else {
            globType = PathType.RELATIVE;
        }
        // convert stats to PathData
        items = new PathData[stats.length];
        int i = 0;
        for (FileStatus stat : stats) {
            URI matchUri = stat.getPath().toUri();
            String globMatch = null;
            switch(globType) {
                case // use as-is, but remove authority if necessary
                HAS_SCHEME:
                    if (globUri.getAuthority() == null) {
                        matchUri = removeAuthority(matchUri);
                    }
                    globMatch = uriToString(matchUri, false);
                    break;
                case // take just the uri's path
                SCHEMELESS_ABSOLUTE:
                    globMatch = matchUri.getPath();
                    break;
                case // make it relative to the current working dir
                RELATIVE:
                    URI cwdUri = fs.getWorkingDirectory().toUri();
                    globMatch = relativize(cwdUri, matchUri, stat.isDirectory());
                    break;
            }
            items[i++] = new PathData(fs, globMatch, stat);
        }
    }
    Arrays.sort(items);
    return items;
}
Also used : Path(org.apache.hadoop.fs.Path) FileStatus(org.apache.hadoop.fs.FileStatus) FileSystem(org.apache.hadoop.fs.FileSystem) LocalFileSystem(org.apache.hadoop.fs.LocalFileSystem) URI(java.net.URI)

Example 19 with FileStatus

use of org.apache.hadoop.fs.FileStatus in project hadoop by apache.

the class Stat method processPath.

@Override
protected void processPath(PathData item) throws IOException {
    FileStatus stat = item.stat;
    StringBuilder buf = new StringBuilder();
    char[] fmt = format.toCharArray();
    for (int i = 0; i < fmt.length; ++i) {
        if (fmt[i] != '%') {
            buf.append(fmt[i]);
        } else {
            // this silently drops a trailing %?
            if (i + 1 == fmt.length)
                break;
            switch(fmt[++i]) {
                case 'a':
                    buf.append(stat.getPermission().toOctal());
                    break;
                case 'A':
                    buf.append(stat.getPermission());
                    break;
                case 'b':
                    buf.append(stat.getLen());
                    break;
                case 'F':
                    buf.append(stat.isDirectory() ? "directory" : (stat.isFile() ? "regular file" : "symlink"));
                    break;
                case 'g':
                    buf.append(stat.getGroup());
                    break;
                case 'n':
                    buf.append(item.path.getName());
                    break;
                case 'o':
                    buf.append(stat.getBlockSize());
                    break;
                case 'r':
                    buf.append(stat.getReplication());
                    break;
                case 'u':
                    buf.append(stat.getOwner());
                    break;
                case 'y':
                    buf.append(timeFmt.format(new Date(stat.getModificationTime())));
                    break;
                case 'Y':
                    buf.append(stat.getModificationTime());
                    break;
                default:
                    // this leaves %<unknown> alone, which causes the potential for
                    // future format options to break strings; should use %% to
                    // escape percents
                    buf.append(fmt[i]);
                    break;
            }
        }
    }
    out.println(buf.toString());
}
Also used : FileStatus(org.apache.hadoop.fs.FileStatus) Date(java.util.Date)

Example 20 with FileStatus

use of org.apache.hadoop.fs.FileStatus in project hadoop by apache.

the class BaseExpression method getFileStatus.

/**
   * Returns the {@link FileStatus} from the {@link PathData} item. If the
   * current options require links to be followed then the returned file status
   * is that of the linked file.
   *
   * @param item
   *          PathData
   * @param depth
   *          current depth in the process directories
   * @return FileStatus
   */
protected FileStatus getFileStatus(PathData item, int depth) throws IOException {
    FileStatus fileStatus = item.stat;
    if (fileStatus.isSymlink()) {
        if (options.isFollowLink() || (options.isFollowArgLink() && (depth == 0))) {
            Path linkedFile = item.fs.resolvePath(fileStatus.getSymlink());
            fileStatus = getFileSystem(item).getFileStatus(linkedFile);
        }
    }
    return fileStatus;
}
Also used : Path(org.apache.hadoop.fs.Path) FileStatus(org.apache.hadoop.fs.FileStatus)

Aggregations

FileStatus (org.apache.hadoop.fs.FileStatus)1156 Path (org.apache.hadoop.fs.Path)910 FileSystem (org.apache.hadoop.fs.FileSystem)417 Test (org.junit.Test)372 IOException (java.io.IOException)296 Configuration (org.apache.hadoop.conf.Configuration)187 ArrayList (java.util.ArrayList)175 FileNotFoundException (java.io.FileNotFoundException)136 LocatedFileStatus (org.apache.hadoop.fs.LocatedFileStatus)105 FsPermission (org.apache.hadoop.fs.permission.FsPermission)86 FSDataInputStream (org.apache.hadoop.fs.FSDataInputStream)67 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)65 HashMap (java.util.HashMap)54 File (java.io.File)41 URI (java.net.URI)41 PathFilter (org.apache.hadoop.fs.PathFilter)38 BufferedReader (java.io.BufferedReader)30 InputStreamReader (java.io.InputStreamReader)30 BlockLocation (org.apache.hadoop.fs.BlockLocation)30 HdfsFileStatus (org.apache.hadoop.hdfs.protocol.HdfsFileStatus)30