use of org.apache.hadoop.fs.FileAlreadyExistsException in project hadoop by apache.
the class NativeAzureFileSystem method create.
/**
* Create an Azure blob and return an output stream to use
* to write data to it.
*
* @param f
* @param permission
* @param overwrite
* @param createParent
* @param bufferSize
* @param replication
* @param blockSize
* @param progress
* @param parentFolderLease Lease on parent folder (or null if
* no lease).
* @return
* @throws IOException
*/
private FSDataOutputStream create(Path f, FsPermission permission, boolean overwrite, boolean createParent, int bufferSize, short replication, long blockSize, Progressable progress, SelfRenewingLease parentFolderLease) throws FileAlreadyExistsException, IOException {
LOG.debug("Creating file: {}", f.toString());
if (containsColon(f)) {
throw new IOException("Cannot create file " + f + " through WASB that has colons in the name");
}
Path absolutePath = makeAbsolute(f);
performAuthCheck(absolutePath.toString(), WasbAuthorizationOperations.WRITE.toString(), "create");
String key = pathToKey(absolutePath);
FileMetadata existingMetadata = store.retrieveMetadata(key);
if (existingMetadata != null) {
if (existingMetadata.isDir()) {
throw new FileAlreadyExistsException("Cannot create file " + f + "; already exists as a directory.");
}
if (!overwrite) {
throw new FileAlreadyExistsException("File already exists:" + f);
}
}
Path parentFolder = absolutePath.getParent();
if (parentFolder != null && parentFolder.getParent() != null) {
// skip root
// Update the parent folder last modified time if the parent folder
// already exists.
String parentKey = pathToKey(parentFolder);
FileMetadata parentMetadata = store.retrieveMetadata(parentKey);
if (parentMetadata != null && parentMetadata.isDir() && parentMetadata.getBlobMaterialization() == BlobMaterialization.Explicit) {
if (parentFolderLease != null) {
store.updateFolderLastModifiedTime(parentKey, parentFolderLease);
} else {
updateParentFolderLastModifiedTime(key);
}
} else {
// Make sure that the parent folder exists.
// Create it using inherited permissions from the first existing directory going up the path
Path firstExisting = parentFolder.getParent();
FileMetadata metadata = store.retrieveMetadata(pathToKey(firstExisting));
while (metadata == null) {
// Guaranteed to terminate properly because we will eventually hit root, which will return non-null metadata
firstExisting = firstExisting.getParent();
metadata = store.retrieveMetadata(pathToKey(firstExisting));
}
mkdirs(parentFolder, metadata.getPermissionStatus().getPermission(), true);
}
}
// Mask the permission first (with the default permission mask as well).
FsPermission masked = applyUMask(permission, UMaskApplyMode.NewFile);
PermissionStatus permissionStatus = createPermissionStatus(masked);
OutputStream bufOutStream;
if (store.isPageBlobKey(key)) {
// Store page blobs directly in-place without renames.
bufOutStream = store.storefile(key, permissionStatus);
} else {
// This is a block blob, so open the output blob stream based on the
// encoded key.
//
String keyEncoded = encodeKey(key);
// First create a blob at the real key, pointing back to the temporary file
// This accomplishes a few things:
// 1. Makes sure we can create a file there.
// 2. Makes it visible to other concurrent threads/processes/nodes what
// we're
// doing.
// 3. Makes it easier to restore/cleanup data in the event of us crashing.
store.storeEmptyLinkFile(key, keyEncoded, permissionStatus);
// The key is encoded to point to a common container at the storage server.
// This reduces the number of splits on the server side when load balancing.
// Ingress to Azure storage can take advantage of earlier splits. We remove
// the root path to the key and prefix a random GUID to the tail (or leaf
// filename) of the key. Keys are thus broadly and randomly distributed over
// a single container to ease load balancing on the storage server. When the
// blob is committed it is renamed to its earlier key. Uncommitted blocks
// are not cleaned up and we leave it to Azure storage to garbage collect
// these
// blocks.
bufOutStream = new NativeAzureFsOutputStream(store.storefile(keyEncoded, permissionStatus), key, keyEncoded);
}
// Construct the data output stream from the buffered output stream.
FSDataOutputStream fsOut = new FSDataOutputStream(bufOutStream, statistics);
// Increment the counter
instrumentation.fileCreated();
// Return data output stream to caller.
return fsOut;
}
use of org.apache.hadoop.fs.FileAlreadyExistsException in project hadoop by apache.
the class StreamJob method submitAndMonitorJob.
// Based on JobClient
public int submitAndMonitorJob() throws IOException {
if (jar_ != null && isLocalHadoop()) {
// getAbs became required when shell and subvm have different working dirs...
File wd = new File(".").getAbsoluteFile();
RunJar.unJar(new File(jar_), wd);
}
// if jobConf_ changes must recreate a JobClient
jc_ = new JobClient(jobConf_);
running_ = null;
try {
running_ = jc_.submitJob(jobConf_);
jobId_ = running_.getID();
if (background_) {
LOG.info("Job is running in background.");
} else if (!jc_.monitorAndPrintJob(jobConf_, running_)) {
LOG.error("Job not successful!");
return 1;
}
LOG.info("Output directory: " + output_);
} catch (FileNotFoundException fe) {
LOG.error("Error launching job , bad input path : " + fe.getMessage());
return 2;
} catch (InvalidJobConfException je) {
LOG.error("Error launching job , Invalid job conf : " + je.getMessage());
return 3;
} catch (FileAlreadyExistsException fae) {
LOG.error("Error launching job , Output path already exists : " + fae.getMessage());
return 4;
} catch (IOException ioe) {
LOG.error("Error Launching job : " + ioe.getMessage());
return 5;
} catch (InterruptedException ie) {
LOG.error("Error monitoring job : " + ie.getMessage());
return 6;
} finally {
jc_.close();
}
return 0;
}
use of org.apache.hadoop.fs.FileAlreadyExistsException in project ignite by apache.
the class HadoopRawLocalFileSystem method mkdirs.
/** {@inheritDoc} */
@Override
public boolean mkdirs(Path f, FsPermission permission) throws IOException {
if (f == null)
throw new IllegalArgumentException("mkdirs path arg is null");
Path parent = f.getParent();
File p2f = convert(f);
if (parent != null) {
File parent2f = convert(parent);
if (parent2f != null && parent2f.exists() && !parent2f.isDirectory())
throw new FileAlreadyExistsException("Parent path is not a directory: " + parent);
}
return (parent == null || mkdirs(parent)) && (p2f.mkdir() || p2f.isDirectory());
}
use of org.apache.hadoop.fs.FileAlreadyExistsException in project ignite by apache.
the class HadoopIgfs20FileSystemAbstractSelfTest method testRenameDirectoryIfDstPathExists.
/**
* @throws Exception If failed.
*/
public void testRenameDirectoryIfDstPathExists() throws Exception {
Path fsHome = new Path(primaryFsUri);
Path srcDir = new Path(fsHome, "/tmp/");
Path dstDir = new Path(fsHome, "/tmpNew/");
FSDataOutputStream os = fs.create(new Path(srcDir, "file1"), EnumSet.noneOf(CreateFlag.class), Options.CreateOpts.perms(FsPermission.getDefault()));
os.close();
os = fs.create(new Path(dstDir, "file2"), EnumSet.noneOf(CreateFlag.class), Options.CreateOpts.perms(FsPermission.getDefault()));
os.close();
try {
fs.rename(srcDir, dstDir);
fail("FileAlreadyExistsException expected.");
} catch (FileAlreadyExistsException ignore) {
// No-op.
}
// Check all the files stay unchanged:
assertPathExists(fs, dstDir);
assertPathExists(fs, new Path(dstDir, "file2"));
assertPathExists(fs, srcDir);
assertPathExists(fs, new Path(srcDir, "file1"));
}
Aggregations