Search in sources :

Example 21 with FileAlreadyExistsException

use of org.apache.hadoop.fs.FileAlreadyExistsException in project cdap by caskdata.

the class MasterServiceMain method createDirectory.

private void createDirectory(FileContext fileContext, String path) {
    try {
        org.apache.hadoop.fs.Path fPath = new org.apache.hadoop.fs.Path(path);
        boolean dirExists = checkDirectoryExists(fileContext, fPath);
        if (!dirExists) {
            FsPermission permission = new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL);
            // file context does ( permission AND  (NOT of umask) ) and uses that as permission, by default umask is 022,
            // if we want 777 permission, we have to set umask to 000
            fileContext.setUMask(new FsPermission(FsAction.NONE, FsAction.NONE, FsAction.NONE));
            fileContext.mkdir(fPath, permission, true);
        }
    } catch (FileAlreadyExistsException e) {
    // should not happen as we create only if dir exists
    } catch (AccessControlException | ParentNotDirectoryException | FileNotFoundException e) {
        // just log the exception
        LOG.error("Exception while trying to create directory at {}", path, e);
    } catch (IOException e) {
        throw Throwables.propagate(e);
    }
}
Also used : Path(java.nio.file.Path) FileAlreadyExistsException(org.apache.hadoop.fs.FileAlreadyExistsException) FileNotFoundException(java.io.FileNotFoundException) AccessControlException(org.apache.hadoop.security.AccessControlException) IOException(java.io.IOException) ParentNotDirectoryException(org.apache.hadoop.fs.ParentNotDirectoryException) FsPermission(org.apache.hadoop.fs.permission.FsPermission)

Example 22 with FileAlreadyExistsException

use of org.apache.hadoop.fs.FileAlreadyExistsException in project hbase by apache.

the class WALSplitter method writeRegionSequenceIdFile.

/**
   * Create a file with name as region open sequence id
   * @param fs
   * @param regiondir
   * @param newSeqId
   * @param saftyBumper
   * @return long new sequence Id value
   * @throws IOException
   */
public static long writeRegionSequenceIdFile(final FileSystem fs, final Path regiondir, long newSeqId, long saftyBumper) throws IOException {
    Path editsdir = WALSplitter.getRegionDirRecoveredEditsDir(regiondir);
    long maxSeqId = 0;
    FileStatus[] files = null;
    if (fs.exists(editsdir)) {
        files = FSUtils.listStatus(fs, editsdir, new PathFilter() {

            @Override
            public boolean accept(Path p) {
                return isSequenceIdFile(p);
            }
        });
        if (files != null) {
            for (FileStatus status : files) {
                String fileName = status.getPath().getName();
                try {
                    Long tmpSeqId = Long.parseLong(fileName.substring(0, fileName.length() - SEQUENCE_ID_FILE_SUFFIX_LENGTH));
                    maxSeqId = Math.max(tmpSeqId, maxSeqId);
                } catch (NumberFormatException ex) {
                    LOG.warn("Invalid SeqId File Name=" + fileName);
                }
            }
        }
    }
    if (maxSeqId > newSeqId) {
        newSeqId = maxSeqId;
    }
    // bump up SeqId
    newSeqId += saftyBumper;
    // write a new seqId file
    Path newSeqIdFile = new Path(editsdir, newSeqId + SEQUENCE_ID_FILE_SUFFIX);
    if (newSeqId != maxSeqId) {
        try {
            if (!fs.createNewFile(newSeqIdFile) && !fs.exists(newSeqIdFile)) {
                throw new IOException("Failed to create SeqId file:" + newSeqIdFile);
            }
            if (LOG.isDebugEnabled()) {
                LOG.debug("Wrote region seqId=" + newSeqIdFile + " to file, newSeqId=" + newSeqId + ", maxSeqId=" + maxSeqId);
            }
        } catch (FileAlreadyExistsException ignored) {
        // latest hdfs throws this exception. it's all right if newSeqIdFile already exists
        }
    }
    // remove old ones
    if (files != null) {
        for (FileStatus status : files) {
            if (newSeqIdFile.equals(status.getPath())) {
                continue;
            }
            fs.delete(status.getPath(), false);
        }
    }
    return newSeqId;
}
Also used : Path(org.apache.hadoop.fs.Path) PathFilter(org.apache.hadoop.fs.PathFilter) FileAlreadyExistsException(org.apache.hadoop.fs.FileAlreadyExistsException) FileStatus(org.apache.hadoop.fs.FileStatus) AtomicLong(java.util.concurrent.atomic.AtomicLong) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) MultipleIOException(org.apache.hadoop.io.MultipleIOException)

Example 23 with FileAlreadyExistsException

use of org.apache.hadoop.fs.FileAlreadyExistsException in project storm by apache.

the class TestHdfsSemantics method testDoubleCreateSemantics.

@Test
public void testDoubleCreateSemantics() throws Exception {
    //1 create an already existing open file w/o override flag
    Path file1 = new Path(dir.toString() + Path.SEPARATOR_CHAR + "file1");
    FSDataOutputStream os1 = fs.create(file1, false);
    try {
        // should fail
        fs.create(file1, false);
        Assert.assertTrue("Create did not throw an exception", false);
    } catch (RemoteException e) {
        Assert.assertEquals(AlreadyBeingCreatedException.class, e.unwrapRemoteException().getClass());
    }
    //2 close file and retry creation
    os1.close();
    try {
        // should still fail
        fs.create(file1, false);
    } catch (FileAlreadyExistsException e) {
    // expecting this exception
    }
    //3 delete file and retry creation
    fs.delete(file1, false);
    // should pass
    FSDataOutputStream os2 = fs.create(file1, false);
    Assert.assertNotNull(os2);
    os2.close();
}
Also used : Path(org.apache.hadoop.fs.Path) FileAlreadyExistsException(org.apache.hadoop.fs.FileAlreadyExistsException) AlreadyBeingCreatedException(org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) RemoteException(org.apache.hadoop.ipc.RemoteException) Test(org.junit.Test)

Example 24 with FileAlreadyExistsException

use of org.apache.hadoop.fs.FileAlreadyExistsException in project hive by apache.

the class ClearDanglingScratchDir method run.

@Override
public void run() {
    try {
        Path rootHDFSDirPath = new Path(rootHDFSDir);
        FileSystem fs = FileSystem.get(rootHDFSDirPath.toUri(), conf);
        FileStatus[] userHDFSDirList = fs.listStatus(rootHDFSDirPath);
        List<Path> scratchDirToRemove = new ArrayList<Path>();
        for (FileStatus userHDFSDir : userHDFSDirList) {
            FileStatus[] scratchDirList = fs.listStatus(userHDFSDir.getPath());
            for (FileStatus scratchDir : scratchDirList) {
                Path lockFilePath = new Path(scratchDir.getPath(), SessionState.LOCK_FILE_NAME);
                if (!fs.exists(lockFilePath)) {
                    String message = "Skipping " + scratchDir.getPath() + " since it does not contain " + SessionState.LOCK_FILE_NAME;
                    if (verbose) {
                        consoleMessage(message);
                    }
                    continue;
                }
                boolean removable = false;
                boolean inuse = false;
                try {
                    IOUtils.closeStream(fs.append(lockFilePath));
                    removable = true;
                } catch (RemoteException eAppend) {
                    // if the file is currently held by a writer
                    if (AlreadyBeingCreatedException.class.getName().equals(eAppend.getClassName())) {
                        inuse = true;
                    } else if (UnsupportedOperationException.class.getName().equals(eAppend.getClassName())) {
                        // Append is not supported in the cluster, try to use create
                        try {
                            IOUtils.closeStream(fs.create(lockFilePath, false));
                        } catch (RemoteException eCreate) {
                            if (AlreadyBeingCreatedException.class.getName().equals(eCreate.getClassName())) {
                                // If the file is held by a writer, will throw AlreadyBeingCreatedException
                                inuse = true;
                            } else {
                                consoleMessage("Unexpected error:" + eCreate.getMessage());
                            }
                        } catch (FileAlreadyExistsException eCreateNormal) {
                            // Otherwise, throw FileAlreadyExistsException, which means the file owner is
                            // dead
                            removable = true;
                        }
                    } else {
                        consoleMessage("Unexpected error:" + eAppend.getMessage());
                    }
                }
                if (inuse) {
                    // Cannot open the lock file for writing, must be held by a live process
                    String message = scratchDir.getPath() + " is being used by live process";
                    if (verbose) {
                        consoleMessage(message);
                    }
                }
                if (removable) {
                    scratchDirToRemove.add(scratchDir.getPath());
                }
            }
        }
        if (scratchDirToRemove.size() == 0) {
            consoleMessage("Cannot find any scratch directory to clear");
            return;
        }
        consoleMessage("Removing " + scratchDirToRemove.size() + " scratch directories");
        for (Path scratchDir : scratchDirToRemove) {
            if (dryRun) {
                System.out.println(scratchDir);
            } else {
                boolean succ = fs.delete(scratchDir, true);
                if (!succ) {
                    consoleMessage("Cannot remove " + scratchDir);
                } else {
                    String message = scratchDir + " removed";
                    if (verbose) {
                        consoleMessage(message);
                    }
                }
            }
        }
    } catch (IOException e) {
        consoleMessage("Unexpected exception " + e.getMessage());
    }
}
Also used : Path(org.apache.hadoop.fs.Path) FileAlreadyExistsException(org.apache.hadoop.fs.FileAlreadyExistsException) FileStatus(org.apache.hadoop.fs.FileStatus) FileSystem(org.apache.hadoop.fs.FileSystem) ArrayList(java.util.ArrayList) IOException(java.io.IOException) RemoteException(org.apache.hadoop.ipc.RemoteException)

Example 25 with FileAlreadyExistsException

use of org.apache.hadoop.fs.FileAlreadyExistsException in project glusterfs-hadoop by gluster.

the class HcfsMainOperationsBaseTest method testMkdirsFailsForSubdirectoryOfExistingFile.

@Test
public void testMkdirsFailsForSubdirectoryOfExistingFile() throws Exception {
    Path testDir = getTestRootPath(fSys, "test/hadoop");
    Assert.assertFalse(exists(fSys, testDir));
    fSys.mkdirs(testDir);
    Assert.assertTrue(exists(fSys, testDir));
    createFile(getTestRootPath(fSys, "test/hadoop/file"));
    Path testSubDir = getTestRootPath(fSys, "test/hadoop/file/subdir");
    try {
        Assert.assertFalse(fSys.mkdirs(testSubDir));
    } catch (FileAlreadyExistsException ex) {
    // catch exception as expected.
    }
    Assert.assertFalse(exists(fSys, testSubDir));
    Path testDeepSubDir = getTestRootPath(fSys, "test/hadoop/file/deep/sub/dir");
    Assert.assertFalse(exists(fSys, testSubDir));
    try {
        Assert.assertFalse(fSys.mkdirs(testDeepSubDir));
    } catch (FileAlreadyExistsException ex) {
    // catch exception as expected.
    }
    Assert.assertFalse(exists(fSys, testDeepSubDir));
}
Also used : Path(org.apache.hadoop.fs.Path) FileAlreadyExistsException(org.apache.hadoop.fs.FileAlreadyExistsException) Test(org.junit.Test)

Aggregations

FileAlreadyExistsException (org.apache.hadoop.fs.FileAlreadyExistsException)44 Path (org.apache.hadoop.fs.Path)31 IOException (java.io.IOException)22 FileNotFoundException (java.io.FileNotFoundException)16 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)13 FileStatus (org.apache.hadoop.fs.FileStatus)13 Test (org.junit.Test)10 FileSystem (org.apache.hadoop.fs.FileSystem)7 ParentNotDirectoryException (org.apache.hadoop.fs.ParentNotDirectoryException)4 RemoteException (org.apache.hadoop.ipc.RemoteException)4 File (java.io.File)3 ArrayList (java.util.ArrayList)3 FsPermission (org.apache.hadoop.fs.permission.FsPermission)3 AlreadyBeingCreatedException (org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException)3 DataOutputStream (java.io.DataOutputStream)2 InterruptedIOException (java.io.InterruptedIOException)2 Configuration (org.apache.hadoop.conf.Configuration)2 PermissionStatus (org.apache.hadoop.fs.permission.PermissionStatus)2 SwiftOperationFailedException (org.apache.hadoop.fs.swift.exceptions.SwiftOperationFailedException)2 SwiftObjectPath (org.apache.hadoop.fs.swift.util.SwiftObjectPath)2