use of org.apache.hadoop.fs.UnresolvedLinkException in project incubator-crail by apache.
the class CrailHDFS method getFileBlockLocations.
@Override
public BlockLocation[] getFileBlockLocations(Path path, long start, long len) throws AccessControlException, FileNotFoundException, UnresolvedLinkException, IOException {
try {
CrailBlockLocation[] _locations = dfs.lookup(path.toUri().getRawPath()).get().asFile().getBlockLocations(start, len);
BlockLocation[] locations = new BlockLocation[_locations.length];
for (int i = 0; i < locations.length; i++) {
locations[i] = new BlockLocation();
locations[i].setOffset(_locations[i].getOffset());
locations[i].setLength(_locations[i].getLength());
locations[i].setNames(_locations[i].getNames());
locations[i].setHosts(_locations[i].getHosts());
locations[i].setTopologyPaths(_locations[i].getTopology());
}
return locations;
} catch (Exception e) {
throw new IOException(e);
}
}
use of org.apache.hadoop.fs.UnresolvedLinkException in project SSM by Intel-bigdata.
the class SmartFileSystem method removeDefaultAcl.
@Override
public void removeDefaultAcl(Path path) throws IOException {
final Path absF = fixRelativePart(path);
new FileSystemLinkResolver<Void>() {
@Override
public Void doCall(final Path p) throws IOException {
smartDFSClient.removeDefaultAcl(getPathName(p));
return null;
}
@Override
public Void next(final FileSystem fs, final Path p) throws IOException, UnresolvedLinkException {
fs.removeDefaultAcl(p);
return null;
}
}.resolve(this, absF);
}
use of org.apache.hadoop.fs.UnresolvedLinkException in project SSM by Intel-bigdata.
the class SmartFileSystem method setOwner.
@Override
public void setOwner(Path p, final String username, final String groupname) throws IOException {
if (username == null && groupname == null) {
throw new IOException("username == null && groupname == null");
}
statistics.incrementWriteOps(1);
Path absF = fixRelativePart(p);
new FileSystemLinkResolver<Void>() {
@Override
public Void doCall(final Path p) throws IOException, UnresolvedLinkException {
smartDFSClient.setOwner(getPathName(p), username, groupname);
return null;
}
@Override
public Void next(final FileSystem fs, final Path p) throws IOException {
fs.setOwner(p, username, groupname);
return null;
}
}.resolve(this, absF);
}
use of org.apache.hadoop.fs.UnresolvedLinkException in project hadoop by apache.
the class DistributedFileSystem method rename.
/**
* This rename operation is guaranteed to be atomic.
*/
@SuppressWarnings("deprecation")
@Override
public void rename(Path src, Path dst, final Options.Rename... options) throws IOException {
statistics.incrementWriteOps(1);
storageStatistics.incrementOpCounter(OpType.RENAME);
final Path absSrc = fixRelativePart(src);
final Path absDst = fixRelativePart(dst);
// Try the rename without resolving first
try {
dfs.rename(getPathName(absSrc), getPathName(absDst), options);
} catch (UnresolvedLinkException e) {
// Fully resolve the source
final Path source = getFileLinkStatus(absSrc).getPath();
// Keep trying to resolve the destination
new FileSystemLinkResolver<Void>() {
@Override
public Void doCall(final Path p) throws IOException {
dfs.rename(getPathName(source), getPathName(p), options);
return null;
}
@Override
public Void next(final FileSystem fs, final Path p) throws IOException {
// Should just throw an error in FileSystem#checkPath
return doCall(p);
}
}.resolve(this, absDst);
}
}
use of org.apache.hadoop.fs.UnresolvedLinkException in project hadoop by apache.
the class DistributedFileSystem method concat.
/**
* Move blocks from srcs to trg and delete srcs afterwards.
* The file block sizes must be the same.
*
* @param trg existing file to append to
* @param psrcs list of files (same block size, same replication)
* @throws IOException
*/
@Override
public void concat(Path trg, Path[] psrcs) throws IOException {
statistics.incrementWriteOps(1);
storageStatistics.incrementOpCounter(OpType.CONCAT);
// Make target absolute
Path absF = fixRelativePart(trg);
// Make all srcs absolute
Path[] srcs = new Path[psrcs.length];
for (int i = 0; i < psrcs.length; i++) {
srcs[i] = fixRelativePart(psrcs[i]);
}
// Try the concat without resolving any links
String[] srcsStr = new String[psrcs.length];
try {
for (int i = 0; i < psrcs.length; i++) {
srcsStr[i] = getPathName(srcs[i]);
}
dfs.concat(getPathName(absF), srcsStr);
} catch (UnresolvedLinkException e) {
// Exception could be from trg or any src.
// Fully resolve trg and srcs. Fail if any of them are a symlink.
FileStatus stat = getFileLinkStatus(absF);
if (stat.isSymlink()) {
throw new IOException("Cannot concat with a symlink target: " + trg + " -> " + stat.getPath());
}
absF = fixRelativePart(stat.getPath());
for (int i = 0; i < psrcs.length; i++) {
stat = getFileLinkStatus(srcs[i]);
if (stat.isSymlink()) {
throw new IOException("Cannot concat with a symlink src: " + psrcs[i] + " -> " + stat.getPath());
}
srcs[i] = fixRelativePart(stat.getPath());
}
// Try concat again. Can still race with another symlink.
for (int i = 0; i < psrcs.length; i++) {
srcsStr[i] = getPathName(srcs[i]);
}
dfs.concat(getPathName(absF), srcsStr);
}
}
Aggregations