Search in sources :

Example 1 with MiniDFSShim

use of org.apache.hadoop.hive.shims.HadoopShims.MiniDFSShim in project hive by apache.

the class TestHS2ImpersonationWithRemoteMS method testImpersonation.

@Test
public void testImpersonation() throws Exception {
    assertTrue("Test setup failed. MiniHS2 is not initialized", miniHS2 != null && miniHS2.isStarted());
    Class.forName(MiniHS2.getJdbcDriverName());
    // Create two tables one as user "foo" and other as user "bar"
    Connection hs2Conn = DriverManager.getConnection(miniHS2.getJdbcURL(), "foo", null);
    Statement stmt = hs2Conn.createStatement();
    String tableName = "foo_table";
    stmt.execute("drop table if exists " + tableName);
    stmt.execute("create table " + tableName + " (value string)");
    stmt.close();
    hs2Conn.close();
    hs2Conn = DriverManager.getConnection(miniHS2.getJdbcURL(), "bar", null);
    stmt = hs2Conn.createStatement();
    tableName = "bar_table";
    stmt.execute("drop table if exists " + tableName);
    stmt.execute("create table " + tableName + " (value string)");
    stmt.close();
    hs2Conn.close();
    MiniDFSShim dfs = miniHS2.getDfs();
    FileSystem fs = dfs.getFileSystem();
    FileStatus[] files = fs.listStatus(miniHS2.getWareHouseDir());
    boolean fooTableValidated = false;
    boolean barTableValidated = false;
    for (FileStatus file : files) {
        final String name = file.getPath().getName();
        final String owner = file.getOwner();
        if (name.equals("foo_table")) {
            fooTableValidated = owner.equals("foo");
            assertTrue(String.format("User 'foo' table has wrong ownership '%s'", owner), fooTableValidated);
        } else if (name.equals("bar_table")) {
            barTableValidated = owner.equals("bar");
            assertTrue(String.format("User 'bar' table has wrong ownership '%s'", owner), barTableValidated);
        } else {
            fail(String.format("Unexpected table directory '%s' in warehouse", name));
        }
        System.out.println(String.format("File: %s, Owner: %s", name, owner));
    }
    assertTrue("User 'foo' table not found in warehouse", fooTableValidated);
    assertTrue("User 'bar' table not found in warehouse", barTableValidated);
}
Also used : MiniDFSShim(org.apache.hadoop.hive.shims.HadoopShims.MiniDFSShim) FileStatus(org.apache.hadoop.fs.FileStatus) Statement(java.sql.Statement) FileSystem(org.apache.hadoop.fs.FileSystem) Connection(java.sql.Connection) Test(org.junit.Test)

Example 2 with MiniDFSShim

use of org.apache.hadoop.hive.shims.HadoopShims.MiniDFSShim in project hive by apache.

the class FolderPermissionBase method baseSetup.

public static void baseSetup() throws Exception {
    MiniDFSShim dfs = ShimLoader.getHadoopShims().getMiniDfs(conf, 4, true, null);
    fs = dfs.getFileSystem();
    baseDfsDir = new Path(new Path(fs.getUri()), "/base");
    fs.mkdirs(baseDfsDir);
    warehouseDir = new Path(baseDfsDir, "warehouse");
    fs.mkdirs(warehouseDir);
    conf.setVar(ConfVars.METASTOREWAREHOUSE, warehouseDir.toString());
    // Assuming the tests are run either in C or D drive in Windows OS!
    dataFileDir = conf.get("test.data.files").replace('\\', '/').replace("c:", "").replace("C:", "").replace("D:", "").replace("d:", "");
    dataFilePath = new Path(dataFileDir, "kv1.txt");
    // Set up scratch directory
    Path scratchDir = new Path(baseDfsDir, "scratchdir");
    conf.setVar(HiveConf.ConfVars.SCRATCHDIR, scratchDir.toString());
    //set hive conf vars
    conf.setBoolVar(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY, false);
    conf.setBoolVar(HiveConf.ConfVars.HIVE_WAREHOUSE_SUBDIR_INHERIT_PERMS, true);
    conf.setVar(HiveConf.ConfVars.DYNAMICPARTITIONINGMODE, "nonstrict");
    int port = MetaStoreUtils.findFreePort();
    MetaStoreUtils.startMetaStore(port, ShimLoader.getHadoopThriftAuthBridge());
    SessionState.start(new CliSessionState(conf));
    driver = new Driver(conf);
    setupDataTable();
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSShim(org.apache.hadoop.hive.shims.HadoopShims.MiniDFSShim) Driver(org.apache.hadoop.hive.ql.Driver) CliSessionState(org.apache.hadoop.hive.cli.CliSessionState)

Example 3 with MiniDFSShim

use of org.apache.hadoop.hive.shims.HadoopShims.MiniDFSShim in project hive by apache.

the class TestDFSErrorHandling method testAccessDenied.

@Test
public void testAccessDenied() throws Exception {
    assertTrue("Test setup failed. MiniHS2 is not initialized", miniHS2 != null && miniHS2.isStarted());
    Class.forName(MiniHS2.getJdbcDriverName());
    Path scratchDir = new Path(HiveConf.getVar(hiveConf, HiveConf.ConfVars.SCRATCHDIR));
    MiniDFSShim dfs = miniHS2.getDfs();
    FileSystem fs = dfs.getFileSystem();
    Path stickyBitDir = new Path(scratchDir, "stickyBitDir");
    fs.mkdirs(stickyBitDir);
    String dataFileDir = hiveConf.get("test.data.files").replace('\\', '/').replace("c:", "").replace("C:", "").replace("D:", "").replace("d:", "");
    Path dataFilePath = new Path(dataFileDir, "kv1.txt");
    fs.copyFromLocalFile(dataFilePath, stickyBitDir);
    FsPermission fsPermission = new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL, true);
    // Sets the sticky bit on stickyBitDir - now removing file kv1.txt from stickyBitDir by
    // unprivileged user will result in a DFS error.
    fs.setPermission(stickyBitDir, fsPermission);
    FileStatus[] files = fs.listStatus(stickyBitDir);
    // Connecting to HS2 as foo.
    Connection hs2Conn = DriverManager.getConnection(miniHS2.getJdbcURL(), "foo", "bar");
    Statement stmt = hs2Conn.createStatement();
    String tableName = "stickyBitTable";
    stmt.execute("drop table if exists " + tableName);
    stmt.execute("create table " + tableName + " (foo int, bar string)");
    try {
        // This statement will attempt to move kv1.txt out of stickyBitDir as user foo.  HS2 is
        // expected to return 20009.
        stmt.execute("LOAD DATA INPATH '" + stickyBitDir.toUri().getPath() + "/kv1.txt' " + "OVERWRITE INTO TABLE " + tableName);
    } catch (Exception e) {
        if (e instanceof SQLException) {
            SQLException se = (SQLException) e;
            Assert.assertEquals("Unexpected error code", 20009, se.getErrorCode());
            System.out.println(String.format("Error Message: %s", se.getMessage()));
        } else
            throw e;
    }
    stmt.execute("drop table if exists " + tableName);
    stmt.close();
    hs2Conn.close();
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSShim(org.apache.hadoop.hive.shims.HadoopShims.MiniDFSShim) FileStatus(org.apache.hadoop.fs.FileStatus) HiveSQLException(org.apache.hive.service.cli.HiveSQLException) SQLException(java.sql.SQLException) Statement(java.sql.Statement) FileSystem(org.apache.hadoop.fs.FileSystem) Connection(java.sql.Connection) FsPermission(org.apache.hadoop.fs.permission.FsPermission) HiveSQLException(org.apache.hive.service.cli.HiveSQLException) SQLException(java.sql.SQLException) Test(org.junit.Test)

Aggregations

MiniDFSShim (org.apache.hadoop.hive.shims.HadoopShims.MiniDFSShim)3 Connection (java.sql.Connection)2 Statement (java.sql.Statement)2 FileStatus (org.apache.hadoop.fs.FileStatus)2 FileSystem (org.apache.hadoop.fs.FileSystem)2 Path (org.apache.hadoop.fs.Path)2 Test (org.junit.Test)2 SQLException (java.sql.SQLException)1 FsPermission (org.apache.hadoop.fs.permission.FsPermission)1 CliSessionState (org.apache.hadoop.hive.cli.CliSessionState)1 Driver (org.apache.hadoop.hive.ql.Driver)1 HiveSQLException (org.apache.hive.service.cli.HiveSQLException)1