Search in sources :

Example 6 with FsPermission

use of org.apache.hadoop.fs.permission.FsPermission in project hadoop by apache.

the class JavaKeyStoreProvider method tryLoadIncompleteFlush.

/**
   * The KeyStore might have gone down during a flush, In which case either the
   * _NEW or _OLD files might exists. This method tries to load the KeyStore
   * from one of these intermediate files.
   * @param oldPath the _OLD file created during flush
   * @param newPath the _NEW file created during flush
   * @return The permissions of the loaded file
   * @throws IOException
   * @throws NoSuchAlgorithmException
   * @throws CertificateException
   */
private FsPermission tryLoadIncompleteFlush(Path oldPath, Path newPath) throws IOException, NoSuchAlgorithmException, CertificateException {
    FsPermission perm = null;
    // completed the re-naming)
    if (fs.exists(newPath)) {
        perm = loadAndReturnPerm(newPath, oldPath);
    }
    // writing completely)
    if ((perm == null) && fs.exists(oldPath)) {
        perm = loadAndReturnPerm(oldPath, newPath);
    }
    // required to create an empty keystore. *sigh*
    if (perm == null) {
        keyStore.load(null, password);
        LOG.debug("KeyStore initialized anew successfully !!");
        perm = new FsPermission("600");
    }
    return perm;
}
Also used : FsPermission(org.apache.hadoop.fs.permission.FsPermission)

Example 7 with FsPermission

use of org.apache.hadoop.fs.permission.FsPermission in project hadoop by apache.

the class JavaKeyStoreProvider method locateKeystore.

/**
   * Open up and initialize the keyStore.
   * @throws IOException If there is a problem reading the password file
   * or a problem reading the keystore.
   */
private void locateKeystore() throws IOException {
    try {
        password = ProviderUtils.locatePassword(KEYSTORE_PASSWORD_ENV_VAR, getConf().get(KEYSTORE_PASSWORD_FILE_KEY));
        if (password == null) {
            password = KEYSTORE_PASSWORD_DEFAULT;
        }
        Path oldPath = constructOldPath(path);
        Path newPath = constructNewPath(path);
        keyStore = KeyStore.getInstance(SCHEME_NAME);
        FsPermission perm = null;
        if (fs.exists(path)) {
            // _NEW should not exist
            if (fs.exists(newPath)) {
                throw new IOException(String.format("Keystore not loaded due to some inconsistency " + "('%s' and '%s' should not exist together)!!", path, newPath));
            }
            perm = tryLoadFromPath(path, oldPath);
        } else {
            perm = tryLoadIncompleteFlush(oldPath, newPath);
        }
        // Need to save off permissions in case we need to
        // rewrite the keystore in flush()
        permissions = perm;
    } catch (KeyStoreException e) {
        throw new IOException("Can't create keystore: " + e, e);
    } catch (GeneralSecurityException e) {
        throw new IOException("Can't load keystore " + path + " : " + e, e);
    }
}
Also used : Path(org.apache.hadoop.fs.Path) GeneralSecurityException(java.security.GeneralSecurityException) FsPermission(org.apache.hadoop.fs.permission.FsPermission) IOException(java.io.IOException) KeyStoreException(java.security.KeyStoreException)

Example 8 with FsPermission

use of org.apache.hadoop.fs.permission.FsPermission in project hadoop by apache.

the class TestKeyProviderFactory method testJksProvider.

@Test
public void testJksProvider() throws Exception {
    Configuration conf = new Configuration();
    final Path jksPath = new Path(testRootDir.toString(), "test.jks");
    final String ourUrl = JavaKeyStoreProvider.SCHEME_NAME + "://file" + jksPath.toUri();
    File file = new File(testRootDir, "test.jks");
    file.delete();
    conf.set(KeyProviderFactory.KEY_PROVIDER_PATH, ourUrl);
    checkSpecificProvider(conf, ourUrl);
    // START : Test flush error by failure injection
    conf.set(KeyProviderFactory.KEY_PROVIDER_PATH, ourUrl.replace(JavaKeyStoreProvider.SCHEME_NAME, FailureInjectingJavaKeyStoreProvider.SCHEME_NAME));
    // get a new instance of the provider to ensure it was saved correctly
    KeyProvider provider = KeyProviderFactory.getProviders(conf).get(0);
    // inject failure during keystore write
    FailureInjectingJavaKeyStoreProvider fProvider = (FailureInjectingJavaKeyStoreProvider) provider;
    fProvider.setWriteFail(true);
    provider.createKey("key5", new byte[] { 1 }, KeyProvider.options(conf).setBitLength(8));
    assertNotNull(provider.getCurrentKey("key5"));
    try {
        provider.flush();
        Assert.fail("Should not succeed");
    } catch (Exception e) {
    // Ignore
    }
    // SHould be reset to pre-flush state
    Assert.assertNull(provider.getCurrentKey("key5"));
    // Un-inject last failure and
    // inject failure during keystore backup
    fProvider.setWriteFail(false);
    fProvider.setBackupFail(true);
    provider.createKey("key6", new byte[] { 1 }, KeyProvider.options(conf).setBitLength(8));
    assertNotNull(provider.getCurrentKey("key6"));
    try {
        provider.flush();
        Assert.fail("Should not succeed");
    } catch (Exception e) {
    // Ignore
    }
    // SHould be reset to pre-flush state
    Assert.assertNull(provider.getCurrentKey("key6"));
    // END : Test flush error by failure injection
    conf.set(KeyProviderFactory.KEY_PROVIDER_PATH, ourUrl.replace(FailureInjectingJavaKeyStoreProvider.SCHEME_NAME, JavaKeyStoreProvider.SCHEME_NAME));
    Path path = ProviderUtils.unnestUri(new URI(ourUrl));
    FileSystem fs = path.getFileSystem(conf);
    FileStatus s = fs.getFileStatus(path);
    assertTrue(s.getPermission().toString().equals("rw-------"));
    assertTrue(file + " should exist", file.isFile());
    // Corrupt file and Check if JKS can reload from _OLD file
    File oldFile = new File(file.getPath() + "_OLD");
    file.renameTo(oldFile);
    file.delete();
    file.createNewFile();
    assertTrue(oldFile.exists());
    provider = KeyProviderFactory.getProviders(conf).get(0);
    assertTrue(file.exists());
    assertTrue(oldFile + "should be deleted", !oldFile.exists());
    verifyAfterReload(file, provider);
    assertTrue(!oldFile.exists());
    // _NEW and current file should not exist together
    File newFile = new File(file.getPath() + "_NEW");
    newFile.createNewFile();
    try {
        provider = KeyProviderFactory.getProviders(conf).get(0);
        Assert.fail("_NEW and current file should not exist together !!");
    } catch (Exception e) {
    // Ignore
    } finally {
        if (newFile.exists()) {
            newFile.delete();
        }
    }
    // Load from _NEW file
    file.renameTo(newFile);
    file.delete();
    try {
        provider = KeyProviderFactory.getProviders(conf).get(0);
        Assert.assertFalse(newFile.exists());
        Assert.assertFalse(oldFile.exists());
    } catch (Exception e) {
        Assert.fail("JKS should load from _NEW file !!");
    // Ignore
    }
    verifyAfterReload(file, provider);
    // _NEW exists but corrupt.. must load from _OLD
    newFile.createNewFile();
    file.renameTo(oldFile);
    file.delete();
    try {
        provider = KeyProviderFactory.getProviders(conf).get(0);
        Assert.assertFalse(newFile.exists());
        Assert.assertFalse(oldFile.exists());
    } catch (Exception e) {
        Assert.fail("JKS should load from _OLD file !!");
    // Ignore
    } finally {
        if (newFile.exists()) {
            newFile.delete();
        }
    }
    verifyAfterReload(file, provider);
    // check permission retention after explicit change
    fs.setPermission(path, new FsPermission("777"));
    checkPermissionRetention(conf, ourUrl, path);
    // Check that an uppercase keyname results in an error
    provider = KeyProviderFactory.getProviders(conf).get(0);
    try {
        provider.createKey("UPPERCASE", KeyProvider.options(conf));
        Assert.fail("Expected failure on creating key name with uppercase " + "characters");
    } catch (IllegalArgumentException e) {
        GenericTestUtils.assertExceptionContains("Uppercase key names", e);
    }
}
Also used : Path(org.apache.hadoop.fs.Path) FileStatus(org.apache.hadoop.fs.FileStatus) Configuration(org.apache.hadoop.conf.Configuration) FileSystem(org.apache.hadoop.fs.FileSystem) FsPermission(org.apache.hadoop.fs.permission.FsPermission) File(java.io.File) URI(java.net.URI) IOException(java.io.IOException) Test(org.junit.Test)

Example 9 with FsPermission

use of org.apache.hadoop.fs.permission.FsPermission in project hadoop by apache.

the class FSMainOperationsBaseTest method testGlobStatusThrowsExceptionForUnreadableDir.

@Test
public void testGlobStatusThrowsExceptionForUnreadableDir() throws Exception {
    Path testRootDir = getTestRootPath(fSys, "test/hadoop/dir");
    Path obscuredDir = new Path(testRootDir, "foo");
    //so foo is non-empty
    Path subDir = new Path(obscuredDir, "bar");
    fSys.mkdirs(subDir);
    //no access
    fSys.setPermission(obscuredDir, new FsPermission((short) 0));
    try {
        fSys.globStatus(getTestRootPath(fSys, "test/hadoop/dir/foo/*"));
        Assert.fail("Should throw IOException");
    } catch (IOException ioe) {
    // expected
    } finally {
        // make sure the test directory can be deleted
        //default
        fSys.setPermission(obscuredDir, new FsPermission((short) 0755));
    }
}
Also used : FsPermission(org.apache.hadoop.fs.permission.FsPermission) IOException(java.io.IOException) Test(org.junit.Test)

Example 10 with FsPermission

use of org.apache.hadoop.fs.permission.FsPermission in project hadoop by apache.

the class FileContextPermissionBase method testSetPermission.

@Test
public void testSetPermission() throws IOException {
    assumeNotWindows();
    String filename = "foo";
    Path f = fileContextTestHelper.getTestRootPath(fc, filename);
    createFile(fc, f);
    try {
        // create files and manipulate them.
        FsPermission all = new FsPermission((short) 0777);
        FsPermission none = new FsPermission((short) 0);
        fc.setPermission(f, none);
        doFilePermissionCheck(none, fc.getFileStatus(f).getPermission());
        fc.setPermission(f, all);
        doFilePermissionCheck(all, fc.getFileStatus(f).getPermission());
    } finally {
        cleanupFile(fc, f);
    }
}
Also used : FsPermission(org.apache.hadoop.fs.permission.FsPermission) Test(org.junit.Test)

Aggregations

FsPermission (org.apache.hadoop.fs.permission.FsPermission)427 Path (org.apache.hadoop.fs.Path)267 Test (org.junit.Test)180 IOException (java.io.IOException)120 FileSystem (org.apache.hadoop.fs.FileSystem)93 Configuration (org.apache.hadoop.conf.Configuration)89 FileStatus (org.apache.hadoop.fs.FileStatus)87 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)52 AccessControlException (org.apache.hadoop.security.AccessControlException)43 UserGroupInformation (org.apache.hadoop.security.UserGroupInformation)36 FileNotFoundException (java.io.FileNotFoundException)33 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)29 File (java.io.File)26 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)26 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)26 AclEntry (org.apache.hadoop.fs.permission.AclEntry)25 ArrayList (java.util.ArrayList)22 HashMap (java.util.HashMap)19 YarnConfiguration (org.apache.hadoop.yarn.conf.YarnConfiguration)16 URI (java.net.URI)15