use of org.apache.hadoop.fs.FileStatus in project hadoop by apache.
the class SFTPFileSystem method getFileStatus.
/**
* Convenience method, so that we don't open a new connection when using this
* method from within another method. Otherwise every API invocation incurs
* the overhead of opening/closing a TCP connection.
*/
@SuppressWarnings("unchecked")
private FileStatus getFileStatus(ChannelSftp client, Path file) throws IOException {
FileStatus fileStat = null;
Path workDir;
try {
workDir = new Path(client.pwd());
} catch (SftpException e) {
throw new IOException(e);
}
Path absolute = makeAbsolute(workDir, file);
Path parentPath = absolute.getParent();
if (parentPath == null) {
// root directory
// Length of root directory on server not known
long length = -1;
boolean isDir = true;
int blockReplication = 1;
// Block Size not known.
long blockSize = DEFAULT_BLOCK_SIZE;
// Modification time of root directory not known.
long modTime = -1;
Path root = new Path("/");
return new FileStatus(length, isDir, blockReplication, blockSize, modTime, root.makeQualified(this.getUri(), this.getWorkingDirectory()));
}
String pathName = parentPath.toUri().getPath();
Vector<LsEntry> sftpFiles;
try {
sftpFiles = (Vector<LsEntry>) client.ls(pathName);
} catch (SftpException e) {
throw new FileNotFoundException(String.format(E_FILE_NOTFOUND, file));
}
if (sftpFiles != null) {
for (LsEntry sftpFile : sftpFiles) {
if (sftpFile.getFilename().equals(file.getName())) {
// file found in directory
fileStat = getFileStatus(client, sftpFile, parentPath);
break;
}
}
if (fileStat == null) {
throw new FileNotFoundException(String.format(E_FILE_NOTFOUND, file));
}
} else {
throw new FileNotFoundException(String.format(E_FILE_NOTFOUND, file));
}
return fileStat;
}
use of org.apache.hadoop.fs.FileStatus in project hadoop by apache.
the class FTPFileSystem method delete.
/**
* Convenience method, so that we don't open a new connection when using this
* method from within another method. Otherwise every API invocation incurs
* the overhead of opening/closing a TCP connection.
*/
private boolean delete(FTPClient client, Path file, boolean recursive) throws IOException {
Path workDir = new Path(client.printWorkingDirectory());
Path absolute = makeAbsolute(workDir, file);
String pathName = absolute.toUri().getPath();
try {
FileStatus fileStat = getFileStatus(client, absolute);
if (fileStat.isFile()) {
return client.deleteFile(pathName);
}
} catch (FileNotFoundException e) {
//the file is not there
return false;
}
FileStatus[] dirEntries = listStatus(client, absolute);
if (dirEntries != null && dirEntries.length > 0 && !(recursive)) {
throw new IOException("Directory: " + file + " is not empty.");
}
for (FileStatus dirEntry : dirEntries) {
delete(client, new Path(absolute, dirEntry.getPath()), recursive);
}
return client.removeDirectory(pathName);
}
use of org.apache.hadoop.fs.FileStatus in project hadoop by apache.
the class PathData method expandAsGlob.
/**
* Expand the given path as a glob pattern. Non-existent paths do not
* throw an exception because creation commands like touch and mkdir need
* to create them. The "stat" field will be null if the path does not
* exist.
* @param pattern the pattern to expand as a glob
* @param conf the hadoop configuration
* @return list of {@link PathData} objects. if the pattern is not a glob,
* and does not exist, the list will contain a single PathData with a null
* stat
* @throws IOException anything else goes wrong...
*/
public static PathData[] expandAsGlob(String pattern, Configuration conf) throws IOException {
Path globPath = new Path(pattern);
FileSystem fs = globPath.getFileSystem(conf);
FileStatus[] stats = fs.globStatus(globPath);
PathData[] items = null;
if (stats == null) {
// remove any quoting in the glob pattern
pattern = pattern.replaceAll("\\\\(.)", "$1");
// not a glob & file not found, so add the path with a null stat
items = new PathData[] { new PathData(fs, pattern, null) };
} else {
// figure out what type of glob path was given, will convert globbed
// paths to match the type to preserve relativity
PathType globType;
URI globUri = globPath.toUri();
if (globUri.getScheme() != null) {
globType = PathType.HAS_SCHEME;
} else if (!globUri.getPath().isEmpty() && new Path(globUri.getPath()).isAbsolute()) {
globType = PathType.SCHEMELESS_ABSOLUTE;
} else {
globType = PathType.RELATIVE;
}
// convert stats to PathData
items = new PathData[stats.length];
int i = 0;
for (FileStatus stat : stats) {
URI matchUri = stat.getPath().toUri();
String globMatch = null;
switch(globType) {
case // use as-is, but remove authority if necessary
HAS_SCHEME:
if (globUri.getAuthority() == null) {
matchUri = removeAuthority(matchUri);
}
globMatch = uriToString(matchUri, false);
break;
case // take just the uri's path
SCHEMELESS_ABSOLUTE:
globMatch = matchUri.getPath();
break;
case // make it relative to the current working dir
RELATIVE:
URI cwdUri = fs.getWorkingDirectory().toUri();
globMatch = relativize(cwdUri, matchUri, stat.isDirectory());
break;
}
items[i++] = new PathData(fs, globMatch, stat);
}
}
Arrays.sort(items);
return items;
}
use of org.apache.hadoop.fs.FileStatus in project hadoop by apache.
the class Stat method processPath.
@Override
protected void processPath(PathData item) throws IOException {
FileStatus stat = item.stat;
StringBuilder buf = new StringBuilder();
char[] fmt = format.toCharArray();
for (int i = 0; i < fmt.length; ++i) {
if (fmt[i] != '%') {
buf.append(fmt[i]);
} else {
// this silently drops a trailing %?
if (i + 1 == fmt.length)
break;
switch(fmt[++i]) {
case 'a':
buf.append(stat.getPermission().toOctal());
break;
case 'A':
buf.append(stat.getPermission());
break;
case 'b':
buf.append(stat.getLen());
break;
case 'F':
buf.append(stat.isDirectory() ? "directory" : (stat.isFile() ? "regular file" : "symlink"));
break;
case 'g':
buf.append(stat.getGroup());
break;
case 'n':
buf.append(item.path.getName());
break;
case 'o':
buf.append(stat.getBlockSize());
break;
case 'r':
buf.append(stat.getReplication());
break;
case 'u':
buf.append(stat.getOwner());
break;
case 'y':
buf.append(timeFmt.format(new Date(stat.getModificationTime())));
break;
case 'Y':
buf.append(stat.getModificationTime());
break;
default:
// this leaves %<unknown> alone, which causes the potential for
// future format options to break strings; should use %% to
// escape percents
buf.append(fmt[i]);
break;
}
}
}
out.println(buf.toString());
}
use of org.apache.hadoop.fs.FileStatus in project hadoop by apache.
the class BaseExpression method getFileStatus.
/**
* Returns the {@link FileStatus} from the {@link PathData} item. If the
* current options require links to be followed then the returned file status
* is that of the linked file.
*
* @param item
* PathData
* @param depth
* current depth in the process directories
* @return FileStatus
*/
protected FileStatus getFileStatus(PathData item, int depth) throws IOException {
FileStatus fileStatus = item.stat;
if (fileStatus.isSymlink()) {
if (options.isFollowLink() || (options.isFollowArgLink() && (depth == 0))) {
Path linkedFile = item.fs.resolvePath(fileStatus.getSymlink());
fileStatus = getFileSystem(item).getFileStatus(linkedFile);
}
}
return fileStatus;
}
Aggregations