use of org.apache.hadoop.fs.FileAlreadyExistsException in project hadoop by apache.
the class HdfsAdmin method provisionEZTrash.
private void provisionEZTrash(Path path) throws IOException {
// make sure the path is an EZ
EncryptionZone ez = dfs.getEZForPath(path);
if (ez == null) {
throw new IllegalArgumentException(path + " is not an encryption zone.");
}
String ezPath = ez.getPath();
if (!path.toString().equals(ezPath)) {
throw new IllegalArgumentException(path + " is not the root of an " + "encryption zone. Do you mean " + ez.getPath() + "?");
}
// check if the trash directory exists
Path trashPath = new Path(ez.getPath(), FileSystem.TRASH_PREFIX);
try {
FileStatus trashFileStatus = dfs.getFileStatus(trashPath);
String errMessage = "Will not provision new trash directory for " + "encryption zone " + ez.getPath() + ". Path already exists.";
if (!trashFileStatus.isDirectory()) {
errMessage += "\r\n" + "Warning: " + trashPath.toString() + " is not a directory";
}
if (!trashFileStatus.getPermission().equals(TRASH_PERMISSION)) {
errMessage += "\r\n" + "Warning: the permission of " + trashPath.toString() + " is not " + TRASH_PERMISSION;
}
throw new FileAlreadyExistsException(errMessage);
} catch (FileNotFoundException ignored) {
// no trash path
}
// Update the permission bits
dfs.mkdir(trashPath, TRASH_PERMISSION);
dfs.setPermission(trashPath, TRASH_PERMISSION);
}
use of org.apache.hadoop.fs.FileAlreadyExistsException in project hadoop by apache.
the class FTPFileSystem method create.
/**
* A stream obtained via this call must be closed before using other APIs of
* this class or else the invocation will block.
*/
@Override
public FSDataOutputStream create(Path file, FsPermission permission, boolean overwrite, int bufferSize, short replication, long blockSize, Progressable progress) throws IOException {
final FTPClient client = connect();
Path workDir = new Path(client.printWorkingDirectory());
Path absolute = makeAbsolute(workDir, file);
FileStatus status;
try {
status = getFileStatus(client, file);
} catch (FileNotFoundException fnfe) {
status = null;
}
if (status != null) {
if (overwrite && !status.isDirectory()) {
delete(client, file, false);
} else {
disconnect(client);
throw new FileAlreadyExistsException("File already exists: " + file);
}
}
Path parent = absolute.getParent();
if (parent == null || !mkdirs(client, parent, FsPermission.getDirDefault())) {
parent = (parent == null) ? new Path("/") : parent;
disconnect(client);
throw new IOException("create(): Mkdirs failed to create: " + parent);
}
client.allocate(bufferSize);
// Change to parent directory on the server. Only then can we write to the
// file on the server by opening up an OutputStream. As a side effect the
// working directory on the server is changed to the parent directory of the
// file. The FTP client connection is closed when close() is called on the
// FSDataOutputStream.
client.changeWorkingDirectory(parent.toUri().getPath());
FSDataOutputStream fos = new FSDataOutputStream(client.storeFileStream(file.getName()), statistics) {
@Override
public void close() throws IOException {
super.close();
if (!client.isConnected()) {
throw new FTPException("Client not connected");
}
boolean cmdCompleted = client.completePendingCommand();
disconnect(client);
if (!cmdCompleted) {
throw new FTPException("Could not complete transfer, Reply Code - " + client.getReplyCode());
}
}
};
if (!FTPReply.isPositivePreliminary(client.getReplyCode())) {
// The ftpClient is an inconsistent state. Must close the stream
// which in turn will logout and disconnect from FTP server
fos.close();
throw new IOException("Unable to create file: " + file + ", Aborting");
}
return fos;
}
use of org.apache.hadoop.fs.FileAlreadyExistsException in project hadoop by apache.
the class FTPFileSystem method rename.
/**
* Convenience method, so that we don't open a new connection when using this
* method from within another method. Otherwise every API invocation incurs
* the overhead of opening/closing a TCP connection.
*
* @param client
* @param src
* @param dst
* @return
* @throws IOException
*/
@SuppressWarnings("deprecation")
private boolean rename(FTPClient client, Path src, Path dst) throws IOException {
Path workDir = new Path(client.printWorkingDirectory());
Path absoluteSrc = makeAbsolute(workDir, src);
Path absoluteDst = makeAbsolute(workDir, dst);
if (!exists(client, absoluteSrc)) {
throw new FileNotFoundException("Source path " + src + " does not exist");
}
if (isDirectory(absoluteDst)) {
// destination is a directory: rename goes underneath it with the
// source name
absoluteDst = new Path(absoluteDst, absoluteSrc.getName());
}
if (exists(client, absoluteDst)) {
throw new FileAlreadyExistsException("Destination path " + dst + " already exists");
}
String parentSrc = absoluteSrc.getParent().toUri().toString();
String parentDst = absoluteDst.getParent().toUri().toString();
if (isParentOf(absoluteSrc, absoluteDst)) {
throw new IOException("Cannot rename " + absoluteSrc + " under itself" + " : " + absoluteDst);
}
if (!parentSrc.equals(parentDst)) {
throw new IOException("Cannot rename source: " + absoluteSrc + " to " + absoluteDst + " -" + E_SAME_DIRECTORY_ONLY);
}
String from = absoluteSrc.getName();
String to = absoluteDst.getName();
client.changeWorkingDirectory(parentSrc);
boolean renamed = client.rename(from, to);
return renamed;
}
use of org.apache.hadoop.fs.FileAlreadyExistsException in project hadoop by apache.
the class AbstractContractCreateTest method testOverwriteNonEmptyDirectory.
@Test
public void testOverwriteNonEmptyDirectory() throws Throwable {
describe("verify trying to create a file over a non-empty dir fails");
Path path = path("testOverwriteNonEmptyDirectory");
mkdirs(path);
try {
assertIsDirectory(path);
} catch (AssertionError failure) {
if (isSupported(CREATE_OVERWRITES_DIRECTORY)) {
// file/directory hack surfaces here
throw new AssumptionViolatedException(failure.toString(), failure);
}
// else: rethrow
throw failure;
}
Path child = new Path(path, "child");
writeTextFile(getFileSystem(), child, "child file", true);
byte[] data = dataset(256, 'a', 'z');
try {
writeDataset(getFileSystem(), path, data, data.length, 1024, true);
FileStatus status = getFileSystem().getFileStatus(path);
boolean isDir = status.isDirectory();
if (!isDir && isSupported(CREATE_OVERWRITES_DIRECTORY)) {
// For some file systems, downgrade to a skip so that the failure is
// visible in test results.
skip("This Filesystem allows a file to overwrite a directory");
}
fail("write of file over dir succeeded");
} catch (FileAlreadyExistsException expected) {
//expected
handleExpectedException(expected);
} catch (FileNotFoundException e) {
handleRelaxedException("overwriting a dir with a file ", "FileAlreadyExistsException", e);
} catch (IOException e) {
handleRelaxedException("overwriting a dir with a file ", "FileAlreadyExistsException", e);
}
assertIsDirectory(path);
assertIsFile(child);
}
use of org.apache.hadoop.fs.FileAlreadyExistsException in project hadoop by apache.
the class AbstractContractCreateTest method testOverwriteEmptyDirectory.
@Test
public void testOverwriteEmptyDirectory() throws Throwable {
describe("verify trying to create a file over an empty dir fails");
Path path = path("testOverwriteEmptyDirectory");
mkdirs(path);
assertIsDirectory(path);
byte[] data = dataset(256, 'a', 'z');
try {
writeDataset(getFileSystem(), path, data, data.length, 1024, true);
assertIsDirectory(path);
fail("write of file over empty dir succeeded");
} catch (FileAlreadyExistsException expected) {
//expected
handleExpectedException(expected);
} catch (FileNotFoundException e) {
handleRelaxedException("overwriting a dir with a file ", "FileAlreadyExistsException", e);
} catch (IOException e) {
handleRelaxedException("overwriting a dir with a file ", "FileAlreadyExistsException", e);
}
assertIsDirectory(path);
}
Aggregations