use of org.apache.hadoop.fs.FileAlreadyExistsException in project hadoop by apache.
the class StreamJob method submitAndMonitorJob.
// Based on JobClient
public int submitAndMonitorJob() throws IOException {
if (jar_ != null && isLocalHadoop()) {
// getAbs became required when shell and subvm have different working dirs...
File wd = new File(".").getAbsoluteFile();
RunJar.unJar(new File(jar_), wd);
}
// if jobConf_ changes must recreate a JobClient
jc_ = new JobClient(jobConf_);
running_ = null;
try {
running_ = jc_.submitJob(jobConf_);
jobId_ = running_.getID();
if (background_) {
LOG.info("Job is running in background.");
} else if (!jc_.monitorAndPrintJob(jobConf_, running_)) {
LOG.error("Job not successful!");
return 1;
}
LOG.info("Output directory: " + output_);
} catch (FileNotFoundException fe) {
LOG.error("Error launching job , bad input path : " + fe.getMessage());
return 2;
} catch (InvalidJobConfException je) {
LOG.error("Error launching job , Invalid job conf : " + je.getMessage());
return 3;
} catch (FileAlreadyExistsException fae) {
LOG.error("Error launching job , Output path already exists : " + fae.getMessage());
return 4;
} catch (IOException ioe) {
LOG.error("Error Launching job : " + ioe.getMessage());
return 5;
} catch (InterruptedException ie) {
LOG.error("Error monitoring job : " + ie.getMessage());
return 6;
} finally {
jc_.close();
}
return 0;
}
use of org.apache.hadoop.fs.FileAlreadyExistsException in project hadoop by apache.
the class FSDirRenameOp method unprotectedRenameTo.
/**
* Rename src to dst.
* See {@link DistributedFileSystem#rename(Path, Path, Options.Rename...)}
* for details related to rename semantics and exceptions.
*
* @param fsd FSDirectory
* @param src source path
* @param dst destination path
* @param timestamp modification time
* @param collectedBlocks blocks to be removed
* @param options Rename options
* @return whether a file/directory gets overwritten in the dst path
*/
static RenameResult unprotectedRenameTo(FSDirectory fsd, final INodesInPath srcIIP, final INodesInPath dstIIP, long timestamp, BlocksMapUpdateInfo collectedBlocks, Options.Rename... options) throws IOException {
assert fsd.hasWriteLock();
boolean overwrite = options != null && Arrays.asList(options).contains(Options.Rename.OVERWRITE);
final String src = srcIIP.getPath();
final String dst = dstIIP.getPath();
final String error;
final INode srcInode = srcIIP.getLastINode();
validateRenameSource(fsd, srcIIP);
// validate the destination
if (dst.equals(src)) {
throw new FileAlreadyExistsException("The source " + src + " and destination " + dst + " are the same");
}
validateDestination(src, dst, srcInode);
if (dstIIP.length() == 1) {
error = "rename destination cannot be the root";
NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedRenameTo: " + error);
throw new IOException(error);
}
BlockStoragePolicySuite bsps = fsd.getBlockStoragePolicySuite();
fsd.ezManager.checkMoveValidity(srcIIP, dstIIP);
final INode dstInode = dstIIP.getLastINode();
List<INodeDirectory> snapshottableDirs = new ArrayList<>();
if (dstInode != null) {
// Destination exists
validateOverwrite(src, dst, overwrite, srcInode, dstInode);
FSDirSnapshotOp.checkSnapshot(fsd, dstIIP, snapshottableDirs);
}
INode dstParent = dstIIP.getINode(-2);
if (dstParent == null) {
error = "rename destination parent " + dst + " not found.";
NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedRenameTo: " + error);
throw new FileNotFoundException(error);
}
if (!dstParent.isDirectory()) {
error = "rename destination parent " + dst + " is a file.";
NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedRenameTo: " + error);
throw new ParentNotDirectoryException(error);
}
// Ensure dst has quota to accommodate rename
verifyFsLimitsForRename(fsd, srcIIP, dstIIP);
verifyQuotaForRename(fsd, srcIIP, dstIIP);
RenameOperation tx = new RenameOperation(fsd, srcIIP, dstIIP);
boolean undoRemoveSrc = true;
tx.removeSrc();
boolean undoRemoveDst = false;
long removedNum = 0;
try {
if (dstInode != null) {
// dst exists, remove it
removedNum = tx.removeDst();
if (removedNum != -1) {
undoRemoveDst = true;
}
}
// add src as dst to complete rename
INodesInPath renamedIIP = tx.addSourceToDestination();
if (renamedIIP != null) {
undoRemoveSrc = false;
if (NameNode.stateChangeLog.isDebugEnabled()) {
NameNode.stateChangeLog.debug("DIR* FSDirectory.unprotectedRenameTo: " + src + " is renamed to " + dst);
}
tx.updateMtimeAndLease(timestamp);
// Collect the blocks and remove the lease for previous dst
boolean filesDeleted = false;
if (undoRemoveDst) {
undoRemoveDst = false;
if (removedNum > 0) {
filesDeleted = tx.cleanDst(bsps, collectedBlocks);
}
}
if (snapshottableDirs.size() > 0) {
// There are snapshottable directories (without snapshots) to be
// deleted. Need to update the SnapshotManager.
fsd.getFSNamesystem().removeSnapshottableDirs(snapshottableDirs);
}
tx.updateQuotasInSourceTree(bsps);
return createRenameResult(fsd, renamedIIP, filesDeleted, collectedBlocks);
}
} finally {
if (undoRemoveSrc) {
tx.restoreSource();
}
if (undoRemoveDst) {
// Rename failed - restore dst
tx.restoreDst(bsps);
}
}
NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedRenameTo: " + "failed to rename " + src + " to " + dst);
throw new IOException("rename from " + src + " to " + dst + " failed.");
}
use of org.apache.hadoop.fs.FileAlreadyExistsException in project hadoop by apache.
the class S3AFileSystem method innerCopyFromLocalFile.
/**
* The src file is on the local disk. Add it to FS at
* the given dst name.
*
* This version doesn't need to create a temporary file to calculate the md5.
* Sadly this doesn't seem to be used by the shell cp :(
*
* delSrc indicates if the source should be removed
* @param delSrc whether to delete the src
* @param overwrite whether to overwrite an existing file
* @param src path
* @param dst path
* @throws IOException IO problem
* @throws FileAlreadyExistsException the destination file exists and
* overwrite==false
* @throws AmazonClientException failure in the AWS SDK
*/
private void innerCopyFromLocalFile(boolean delSrc, boolean overwrite, Path src, Path dst) throws IOException, FileAlreadyExistsException, AmazonClientException {
incrementStatistic(INVOCATION_COPY_FROM_LOCAL_FILE);
final String key = pathToKey(dst);
if (!overwrite && exists(dst)) {
throw new FileAlreadyExistsException(dst + " already exists");
}
LOG.debug("Copying local file from {} to {}", src, dst);
// Since we have a local file, we don't need to stream into a temporary file
LocalFileSystem local = getLocal(getConf());
File srcfile = local.pathToFile(src);
final ObjectMetadata om = newObjectMetadata(srcfile.length());
PutObjectRequest putObjectRequest = newPutObjectRequest(key, om, srcfile);
Upload up = putObject(putObjectRequest);
ProgressableProgressListener listener = new ProgressableProgressListener(this, key, up, null);
up.addProgressListener(listener);
try {
up.waitForUploadResult();
} catch (InterruptedException e) {
throw new InterruptedIOException("Interrupted copying " + src + " to " + dst + ", cancelling");
}
listener.uploadCompleted();
// This will delete unnecessary fake parent directories
finishedWrite(key);
if (delSrc) {
local.delete(src, false);
}
}
use of org.apache.hadoop.fs.FileAlreadyExistsException in project hadoop by apache.
the class AliyunOSSFileSystem method create.
@Override
public FSDataOutputStream create(Path path, FsPermission permission, boolean overwrite, int bufferSize, short replication, long blockSize, Progressable progress) throws IOException {
String key = pathToKey(path);
FileStatus status = null;
try {
// get the status or throw a FNFE
status = getFileStatus(path);
// if the thread reaches here, there is something at the path
if (status.isDirectory()) {
// path references a directory
throw new FileAlreadyExistsException(path + " is a directory");
}
if (!overwrite) {
// path references a file and overwrite is disabled
throw new FileAlreadyExistsException(path + " already exists");
}
LOG.debug("Overwriting file {}", path);
} catch (FileNotFoundException e) {
// this means the file is not found
}
return new FSDataOutputStream(new AliyunOSSOutputStream(getConf(), store, key, progress, statistics), (Statistics) (null));
}
use of org.apache.hadoop.fs.FileAlreadyExistsException in project hadoop by apache.
the class AliyunOSSFileSystem method rename.
@Override
public boolean rename(Path srcPath, Path dstPath) throws IOException {
if (srcPath.isRoot()) {
// Cannot rename root of file system
if (LOG.isDebugEnabled()) {
LOG.debug("Cannot rename the root of a filesystem");
}
return false;
}
Path parent = dstPath.getParent();
while (parent != null && !srcPath.equals(parent)) {
parent = parent.getParent();
}
if (parent != null) {
return false;
}
FileStatus srcStatus = getFileStatus(srcPath);
FileStatus dstStatus;
try {
dstStatus = getFileStatus(dstPath);
} catch (FileNotFoundException fnde) {
dstStatus = null;
}
if (dstStatus == null) {
// If dst doesn't exist, check whether dst dir exists or not
dstStatus = getFileStatus(dstPath.getParent());
if (!dstStatus.isDirectory()) {
throw new IOException(String.format("Failed to rename %s to %s, %s is a file", srcPath, dstPath, dstPath.getParent()));
}
} else {
if (srcStatus.getPath().equals(dstStatus.getPath())) {
return !srcStatus.isDirectory();
} else if (dstStatus.isDirectory()) {
// If dst is a directory
dstPath = new Path(dstPath, srcPath.getName());
FileStatus[] statuses;
try {
statuses = listStatus(dstPath);
} catch (FileNotFoundException fnde) {
statuses = null;
}
if (statuses != null && statuses.length > 0) {
// If dst exists and not a directory / not empty
throw new FileAlreadyExistsException(String.format("Failed to rename %s to %s, file already exists or not empty!", srcPath, dstPath));
}
} else {
// If dst is not a directory
throw new FileAlreadyExistsException(String.format("Failed to rename %s to %s, file already exists!", srcPath, dstPath));
}
}
if (srcStatus.isDirectory()) {
copyDirectory(srcPath, dstPath);
} else {
copyFile(srcPath, dstPath);
}
return srcPath.equals(dstPath) || delete(srcPath, true);
}
Aggregations