Search in sources :

Example 36 with FileAlreadyExistsException

use of org.apache.hadoop.fs.FileAlreadyExistsException in project hadoop by apache.

the class StreamJob method submitAndMonitorJob.

// Based on JobClient
public int submitAndMonitorJob() throws IOException {
    if (jar_ != null && isLocalHadoop()) {
        // getAbs became required when shell and subvm have different working dirs...
        File wd = new File(".").getAbsoluteFile();
        RunJar.unJar(new File(jar_), wd);
    }
    // if jobConf_ changes must recreate a JobClient
    jc_ = new JobClient(jobConf_);
    running_ = null;
    try {
        running_ = jc_.submitJob(jobConf_);
        jobId_ = running_.getID();
        if (background_) {
            LOG.info("Job is running in background.");
        } else if (!jc_.monitorAndPrintJob(jobConf_, running_)) {
            LOG.error("Job not successful!");
            return 1;
        }
        LOG.info("Output directory: " + output_);
    } catch (FileNotFoundException fe) {
        LOG.error("Error launching job , bad input path : " + fe.getMessage());
        return 2;
    } catch (InvalidJobConfException je) {
        LOG.error("Error launching job , Invalid job conf : " + je.getMessage());
        return 3;
    } catch (FileAlreadyExistsException fae) {
        LOG.error("Error launching job , Output path already exists : " + fae.getMessage());
        return 4;
    } catch (IOException ioe) {
        LOG.error("Error Launching job : " + ioe.getMessage());
        return 5;
    } catch (InterruptedException ie) {
        LOG.error("Error monitoring job : " + ie.getMessage());
        return 6;
    } finally {
        jc_.close();
    }
    return 0;
}
Also used : FileAlreadyExistsException(org.apache.hadoop.fs.FileAlreadyExistsException) FileNotFoundException(java.io.FileNotFoundException) InvalidJobConfException(org.apache.hadoop.mapred.InvalidJobConfException) IOException(java.io.IOException) File(java.io.File) JobClient(org.apache.hadoop.mapred.JobClient)

Example 37 with FileAlreadyExistsException

use of org.apache.hadoop.fs.FileAlreadyExistsException in project hadoop by apache.

the class FSDirRenameOp method unprotectedRenameTo.

/**
   * Rename src to dst.
   * See {@link DistributedFileSystem#rename(Path, Path, Options.Rename...)}
   * for details related to rename semantics and exceptions.
   *
   * @param fsd             FSDirectory
   * @param src             source path
   * @param dst             destination path
   * @param timestamp       modification time
   * @param collectedBlocks blocks to be removed
   * @param options         Rename options
   * @return whether a file/directory gets overwritten in the dst path
   */
static RenameResult unprotectedRenameTo(FSDirectory fsd, final INodesInPath srcIIP, final INodesInPath dstIIP, long timestamp, BlocksMapUpdateInfo collectedBlocks, Options.Rename... options) throws IOException {
    assert fsd.hasWriteLock();
    boolean overwrite = options != null && Arrays.asList(options).contains(Options.Rename.OVERWRITE);
    final String src = srcIIP.getPath();
    final String dst = dstIIP.getPath();
    final String error;
    final INode srcInode = srcIIP.getLastINode();
    validateRenameSource(fsd, srcIIP);
    // validate the destination
    if (dst.equals(src)) {
        throw new FileAlreadyExistsException("The source " + src + " and destination " + dst + " are the same");
    }
    validateDestination(src, dst, srcInode);
    if (dstIIP.length() == 1) {
        error = "rename destination cannot be the root";
        NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedRenameTo: " + error);
        throw new IOException(error);
    }
    BlockStoragePolicySuite bsps = fsd.getBlockStoragePolicySuite();
    fsd.ezManager.checkMoveValidity(srcIIP, dstIIP);
    final INode dstInode = dstIIP.getLastINode();
    List<INodeDirectory> snapshottableDirs = new ArrayList<>();
    if (dstInode != null) {
        // Destination exists
        validateOverwrite(src, dst, overwrite, srcInode, dstInode);
        FSDirSnapshotOp.checkSnapshot(fsd, dstIIP, snapshottableDirs);
    }
    INode dstParent = dstIIP.getINode(-2);
    if (dstParent == null) {
        error = "rename destination parent " + dst + " not found.";
        NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedRenameTo: " + error);
        throw new FileNotFoundException(error);
    }
    if (!dstParent.isDirectory()) {
        error = "rename destination parent " + dst + " is a file.";
        NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedRenameTo: " + error);
        throw new ParentNotDirectoryException(error);
    }
    // Ensure dst has quota to accommodate rename
    verifyFsLimitsForRename(fsd, srcIIP, dstIIP);
    verifyQuotaForRename(fsd, srcIIP, dstIIP);
    RenameOperation tx = new RenameOperation(fsd, srcIIP, dstIIP);
    boolean undoRemoveSrc = true;
    tx.removeSrc();
    boolean undoRemoveDst = false;
    long removedNum = 0;
    try {
        if (dstInode != null) {
            // dst exists, remove it
            removedNum = tx.removeDst();
            if (removedNum != -1) {
                undoRemoveDst = true;
            }
        }
        // add src as dst to complete rename
        INodesInPath renamedIIP = tx.addSourceToDestination();
        if (renamedIIP != null) {
            undoRemoveSrc = false;
            if (NameNode.stateChangeLog.isDebugEnabled()) {
                NameNode.stateChangeLog.debug("DIR* FSDirectory.unprotectedRenameTo: " + src + " is renamed to " + dst);
            }
            tx.updateMtimeAndLease(timestamp);
            // Collect the blocks and remove the lease for previous dst
            boolean filesDeleted = false;
            if (undoRemoveDst) {
                undoRemoveDst = false;
                if (removedNum > 0) {
                    filesDeleted = tx.cleanDst(bsps, collectedBlocks);
                }
            }
            if (snapshottableDirs.size() > 0) {
                // There are snapshottable directories (without snapshots) to be
                // deleted. Need to update the SnapshotManager.
                fsd.getFSNamesystem().removeSnapshottableDirs(snapshottableDirs);
            }
            tx.updateQuotasInSourceTree(bsps);
            return createRenameResult(fsd, renamedIIP, filesDeleted, collectedBlocks);
        }
    } finally {
        if (undoRemoveSrc) {
            tx.restoreSource();
        }
        if (undoRemoveDst) {
            // Rename failed - restore dst
            tx.restoreDst(bsps);
        }
    }
    NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedRenameTo: " + "failed to rename " + src + " to " + dst);
    throw new IOException("rename from " + src + " to " + dst + " failed.");
}
Also used : BlockStoragePolicySuite(org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite) FileAlreadyExistsException(org.apache.hadoop.fs.FileAlreadyExistsException) ChunkedArrayList(org.apache.hadoop.util.ChunkedArrayList) ArrayList(java.util.ArrayList) FileNotFoundException(java.io.FileNotFoundException) IOException(java.io.IOException) ParentNotDirectoryException(org.apache.hadoop.fs.ParentNotDirectoryException)

Example 38 with FileAlreadyExistsException

use of org.apache.hadoop.fs.FileAlreadyExistsException in project hadoop by apache.

the class S3AFileSystem method innerCopyFromLocalFile.

/**
   * The src file is on the local disk.  Add it to FS at
   * the given dst name.
   *
   * This version doesn't need to create a temporary file to calculate the md5.
   * Sadly this doesn't seem to be used by the shell cp :(
   *
   * delSrc indicates if the source should be removed
   * @param delSrc whether to delete the src
   * @param overwrite whether to overwrite an existing file
   * @param src path
   * @param dst path
   * @throws IOException IO problem
   * @throws FileAlreadyExistsException the destination file exists and
   * overwrite==false
   * @throws AmazonClientException failure in the AWS SDK
   */
private void innerCopyFromLocalFile(boolean delSrc, boolean overwrite, Path src, Path dst) throws IOException, FileAlreadyExistsException, AmazonClientException {
    incrementStatistic(INVOCATION_COPY_FROM_LOCAL_FILE);
    final String key = pathToKey(dst);
    if (!overwrite && exists(dst)) {
        throw new FileAlreadyExistsException(dst + " already exists");
    }
    LOG.debug("Copying local file from {} to {}", src, dst);
    // Since we have a local file, we don't need to stream into a temporary file
    LocalFileSystem local = getLocal(getConf());
    File srcfile = local.pathToFile(src);
    final ObjectMetadata om = newObjectMetadata(srcfile.length());
    PutObjectRequest putObjectRequest = newPutObjectRequest(key, om, srcfile);
    Upload up = putObject(putObjectRequest);
    ProgressableProgressListener listener = new ProgressableProgressListener(this, key, up, null);
    up.addProgressListener(listener);
    try {
        up.waitForUploadResult();
    } catch (InterruptedException e) {
        throw new InterruptedIOException("Interrupted copying " + src + " to " + dst + ", cancelling");
    }
    listener.uploadCompleted();
    // This will delete unnecessary fake parent directories
    finishedWrite(key);
    if (delSrc) {
        local.delete(src, false);
    }
}
Also used : InterruptedIOException(java.io.InterruptedIOException) FileAlreadyExistsException(org.apache.hadoop.fs.FileAlreadyExistsException) LocalFileSystem(org.apache.hadoop.fs.LocalFileSystem) Upload(com.amazonaws.services.s3.transfer.Upload) File(java.io.File) ObjectMetadata(com.amazonaws.services.s3.model.ObjectMetadata) PutObjectRequest(com.amazonaws.services.s3.model.PutObjectRequest)

Example 39 with FileAlreadyExistsException

use of org.apache.hadoop.fs.FileAlreadyExistsException in project hadoop by apache.

the class AliyunOSSFileSystem method create.

@Override
public FSDataOutputStream create(Path path, FsPermission permission, boolean overwrite, int bufferSize, short replication, long blockSize, Progressable progress) throws IOException {
    String key = pathToKey(path);
    FileStatus status = null;
    try {
        // get the status or throw a FNFE
        status = getFileStatus(path);
        // if the thread reaches here, there is something at the path
        if (status.isDirectory()) {
            // path references a directory
            throw new FileAlreadyExistsException(path + " is a directory");
        }
        if (!overwrite) {
            // path references a file and overwrite is disabled
            throw new FileAlreadyExistsException(path + " already exists");
        }
        LOG.debug("Overwriting file {}", path);
    } catch (FileNotFoundException e) {
    // this means the file is not found
    }
    return new FSDataOutputStream(new AliyunOSSOutputStream(getConf(), store, key, progress, statistics), (Statistics) (null));
}
Also used : FileAlreadyExistsException(org.apache.hadoop.fs.FileAlreadyExistsException) FileStatus(org.apache.hadoop.fs.FileStatus) FileNotFoundException(java.io.FileNotFoundException) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream)

Example 40 with FileAlreadyExistsException

use of org.apache.hadoop.fs.FileAlreadyExistsException in project hadoop by apache.

the class AliyunOSSFileSystem method rename.

@Override
public boolean rename(Path srcPath, Path dstPath) throws IOException {
    if (srcPath.isRoot()) {
        // Cannot rename root of file system
        if (LOG.isDebugEnabled()) {
            LOG.debug("Cannot rename the root of a filesystem");
        }
        return false;
    }
    Path parent = dstPath.getParent();
    while (parent != null && !srcPath.equals(parent)) {
        parent = parent.getParent();
    }
    if (parent != null) {
        return false;
    }
    FileStatus srcStatus = getFileStatus(srcPath);
    FileStatus dstStatus;
    try {
        dstStatus = getFileStatus(dstPath);
    } catch (FileNotFoundException fnde) {
        dstStatus = null;
    }
    if (dstStatus == null) {
        // If dst doesn't exist, check whether dst dir exists or not
        dstStatus = getFileStatus(dstPath.getParent());
        if (!dstStatus.isDirectory()) {
            throw new IOException(String.format("Failed to rename %s to %s, %s is a file", srcPath, dstPath, dstPath.getParent()));
        }
    } else {
        if (srcStatus.getPath().equals(dstStatus.getPath())) {
            return !srcStatus.isDirectory();
        } else if (dstStatus.isDirectory()) {
            // If dst is a directory
            dstPath = new Path(dstPath, srcPath.getName());
            FileStatus[] statuses;
            try {
                statuses = listStatus(dstPath);
            } catch (FileNotFoundException fnde) {
                statuses = null;
            }
            if (statuses != null && statuses.length > 0) {
                // If dst exists and not a directory / not empty
                throw new FileAlreadyExistsException(String.format("Failed to rename %s to %s, file already exists or not empty!", srcPath, dstPath));
            }
        } else {
            // If dst is not a directory
            throw new FileAlreadyExistsException(String.format("Failed to rename %s to %s, file already exists!", srcPath, dstPath));
        }
    }
    if (srcStatus.isDirectory()) {
        copyDirectory(srcPath, dstPath);
    } else {
        copyFile(srcPath, dstPath);
    }
    return srcPath.equals(dstPath) || delete(srcPath, true);
}
Also used : Path(org.apache.hadoop.fs.Path) FileAlreadyExistsException(org.apache.hadoop.fs.FileAlreadyExistsException) FileStatus(org.apache.hadoop.fs.FileStatus) FileNotFoundException(java.io.FileNotFoundException) IOException(java.io.IOException) PathIOException(org.apache.hadoop.fs.PathIOException)

Aggregations

FileAlreadyExistsException (org.apache.hadoop.fs.FileAlreadyExistsException)48 Path (org.apache.hadoop.fs.Path)32 IOException (java.io.IOException)22 FileNotFoundException (java.io.FileNotFoundException)17 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)14 FileStatus (org.apache.hadoop.fs.FileStatus)14 Test (org.junit.Test)13 FileSystem (org.apache.hadoop.fs.FileSystem)7 ParentNotDirectoryException (org.apache.hadoop.fs.ParentNotDirectoryException)4 RemoteException (org.apache.hadoop.ipc.RemoteException)4 File (java.io.File)3 ArrayList (java.util.ArrayList)3 Cleanup (lombok.Cleanup)3 lombok.val (lombok.val)3 FsPermission (org.apache.hadoop.fs.permission.FsPermission)3 AlreadyBeingCreatedException (org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException)3 DataOutputStream (java.io.DataOutputStream)2 InterruptedIOException (java.io.InterruptedIOException)2 HashMap (java.util.HashMap)2 Configuration (org.apache.hadoop.conf.Configuration)2