use of org.apache.hadoop.fs.FileAlreadyExistsException in project hadoop by apache.
the class CuratorService method operationFailure.
/**
* Create an IOE when an operation fails
* @param path path of operation
* @param operation operation attempted
* @param exception caught the exception caught
* @return an IOE to throw that contains the path and operation details.
*/
protected IOException operationFailure(String path, String operation, Exception exception, List<ACL> acls) {
IOException ioe;
String aclList = "[" + RegistrySecurity.aclsToString(acls) + "]";
if (exception instanceof KeeperException.NoNodeException) {
ioe = new PathNotFoundException(path);
} else if (exception instanceof KeeperException.NodeExistsException) {
ioe = new FileAlreadyExistsException(path);
} else if (exception instanceof KeeperException.NoAuthException) {
ioe = new NoPathPermissionsException(path, "Not authorized to access path; ACLs: " + aclList);
} else if (exception instanceof KeeperException.NotEmptyException) {
ioe = new PathIsNotEmptyDirectoryException(path);
} else if (exception instanceof KeeperException.AuthFailedException) {
ioe = new AuthenticationFailedException(path, "Authentication Failed: " + exception + "; " + securityConnectionDiagnostics, exception);
} else if (exception instanceof KeeperException.NoChildrenForEphemeralsException) {
ioe = new NoChildrenForEphemeralsException(path, "Cannot create a path under an ephemeral node: " + exception, exception);
} else if (exception instanceof KeeperException.InvalidACLException) {
// this is a security exception of a kind
// include the ACLs to help the diagnostics
StringBuilder builder = new StringBuilder();
builder.append("Path access failure ").append(aclList);
builder.append(" ");
builder.append(securityConnectionDiagnostics);
ioe = new NoPathPermissionsException(path, builder.toString());
} else {
ioe = new RegistryIOException(path, "Failure of " + operation + " on " + path + ": " + exception.toString(), exception);
}
if (ioe.getCause() == null) {
ioe.initCause(exception);
}
return ioe;
}
use of org.apache.hadoop.fs.FileAlreadyExistsException in project hadoop by apache.
the class HistoryFileManager method makeDoneSubdir.
private void makeDoneSubdir(Path path) throws IOException {
try {
doneDirFc.getFileStatus(path);
existingDoneSubdirs.add(path);
} catch (FileNotFoundException fnfE) {
try {
FsPermission fsp = new FsPermission(JobHistoryUtils.HISTORY_DONE_DIR_PERMISSION);
doneDirFc.mkdir(path, fsp, true);
FileStatus fsStatus = doneDirFc.getFileStatus(path);
LOG.info("Perms after creating " + fsStatus.getPermission().toShort() + ", Expected: " + fsp.toShort());
if (fsStatus.getPermission().toShort() != fsp.toShort()) {
LOG.info("Explicitly setting permissions to : " + fsp.toShort() + ", " + fsp);
doneDirFc.setPermission(path, fsp);
}
existingDoneSubdirs.add(path);
} catch (FileAlreadyExistsException faeE) {
// Nothing to do.
}
}
}
use of org.apache.hadoop.fs.FileAlreadyExistsException in project hadoop by apache.
the class HistoryServerFileSystemStateStoreService method storeTokenMasterKey.
@Override
public void storeTokenMasterKey(DelegationKey key) throws IOException {
if (LOG.isDebugEnabled()) {
LOG.debug("Storing master key " + key.getKeyId());
}
Path keyPath = new Path(tokenKeysStatePath, TOKEN_MASTER_KEY_FILE_PREFIX + key.getKeyId());
if (fs.exists(keyPath)) {
throw new FileAlreadyExistsException(keyPath + " already exists");
}
ByteArrayOutputStream memStream = new ByteArrayOutputStream();
DataOutputStream dataStream = new DataOutputStream(memStream);
try {
key.write(dataStream);
dataStream.close();
dataStream = null;
} finally {
IOUtils.cleanup(LOG, dataStream);
}
createNewFile(keyPath, memStream.toByteArray());
}
use of org.apache.hadoop.fs.FileAlreadyExistsException in project hadoop by apache.
the class S3AFileSystem method innerMkdirs.
/**
*
* Make the given path and all non-existent parents into
* directories.
* See {@link #mkdirs(Path, FsPermission)}
* @param f path to create
* @param permission to apply to f
* @return true if a directory was created
* @throws FileAlreadyExistsException there is a file at the path specified
* @throws IOException other IO problems
* @throws AmazonClientException on failures inside the AWS SDK
*/
// TODO: If we have created an empty file at /foo/bar and we then call
// mkdirs for /foo/bar/baz/roo what happens to the empty file /foo/bar/?
private boolean innerMkdirs(Path f, FsPermission permission) throws IOException, FileAlreadyExistsException, AmazonClientException {
LOG.debug("Making directory: {}", f);
incrementStatistic(INVOCATION_MKDIRS);
FileStatus fileStatus;
try {
fileStatus = getFileStatus(f);
if (fileStatus.isDirectory()) {
return true;
} else {
throw new FileAlreadyExistsException("Path is a file: " + f);
}
} catch (FileNotFoundException e) {
Path fPart = f.getParent();
do {
try {
fileStatus = getFileStatus(fPart);
if (fileStatus.isDirectory()) {
break;
}
if (fileStatus.isFile()) {
throw new FileAlreadyExistsException(String.format("Can't make directory for path '%s' since it is a file.", fPart));
}
} catch (FileNotFoundException fnfe) {
instrumentation.errorIgnored();
}
fPart = fPart.getParent();
} while (fPart != null);
String key = pathToKey(f);
createFakeDirectory(key);
return true;
}
}
use of org.apache.hadoop.fs.FileAlreadyExistsException in project hadoop by apache.
the class NativeS3FileSystem method create.
@Override
public FSDataOutputStream create(Path f, FsPermission permission, boolean overwrite, int bufferSize, short replication, long blockSize, Progressable progress) throws IOException {
if (exists(f) && !overwrite) {
throw new FileAlreadyExistsException("File already exists: " + f);
}
if (LOG.isDebugEnabled()) {
LOG.debug("Creating new file '" + f + "' in S3");
}
Path absolutePath = makeAbsolute(f);
String key = pathToKey(absolutePath);
return new FSDataOutputStream(new NativeS3FsOutputStream(getConf(), store, key, progress, bufferSize), statistics);
}
Aggregations