Search in sources :

Example 46 with HiveMetaStoreClient

use of org.apache.hadoop.hive.metastore.HiveMetaStoreClient in project hive by apache.

the class TestRemoteHiveMetaStoreDualAuthCustom method createClient.

@Override
protected HiveMetaStoreClient createClient() throws Exception {
    MetastoreConf.setVar(clientConf, ConfVars.THRIFT_URIS, "thrift://localhost:" + port);
    MetastoreConf.setBoolVar(clientConf, ConfVars.USE_THRIFT_SASL, false);
    MetastoreConf.setVar(clientConf, ConfVars.METASTORE_CLIENT_AUTH_MODE, "PLAIN");
    MetastoreConf.setBoolVar(clientConf, ConfVars.EXECUTE_SET_UGI, false);
    // Trying to log in using correct username but wrong password should fail
    String credsCUWP = createCredFile(correctUser, wrongPassword);
    clientConf.set(CredentialProviderFactory.CREDENTIAL_PROVIDER_PATH, credsCUWP);
    String exceptionMessage = null;
    try {
        MetastoreConf.setVar(clientConf, ConfVars.METASTORE_CLIENT_PLAIN_USERNAME, correctUser);
        HiveMetaStoreClient tmpClient = new HiveMetaStoreClient(clientConf);
    } catch (MetaException e) {
        exceptionMessage = e.getMessage();
    }
    Assert.assertNotNull(exceptionMessage);
    Assert.assertTrue(exceptionMessage.contains("Error validating the login"));
    // Trying to log in with a user whose credentials do not exist in the given file should fail.
    exceptionMessage = null;
    try {
        MetastoreConf.setVar(clientConf, ConfVars.METASTORE_CLIENT_PLAIN_USERNAME, wrongUser);
        HiveMetaStoreClient tmpClient = new HiveMetaStoreClient(clientConf);
    } catch (MetaException e) {
        exceptionMessage = e.getMessage();
    }
    Assert.assertNotNull(exceptionMessage);
    Assert.assertTrue(exceptionMessage.contains("No password found for user"));
    // Trying to login with a use who does not exist but whose password is found in credential
    // file should fail. It doesn't matter what the password is since the user doesn't exist
    exceptionMessage = null;
    String credsWUWP = createCredFile(wrongUser, wrongPassword);
    clientConf.set(CredentialProviderFactory.CREDENTIAL_PROVIDER_PATH, credsWUWP);
    try {
        MetastoreConf.setVar(clientConf, ConfVars.METASTORE_CLIENT_PLAIN_USERNAME, wrongUser);
        HiveMetaStoreClient tmpClient = new HiveMetaStoreClient(clientConf);
    } catch (MetaException e) {
        exceptionMessage = e.getMessage();
    }
    Assert.assertNotNull(exceptionMessage);
    Assert.assertTrue(exceptionMessage.contains("Error validating the login"));
    // correct_user and correct_password creds file
    String credsCUCP = createCredFile(correctUser, correctPassword);
    clientConf.set(CredentialProviderFactory.CREDENTIAL_PROVIDER_PATH, credsCUCP);
    MetastoreConf.setVar(clientConf, ConfVars.METASTORE_CLIENT_PLAIN_USERNAME, correctUser);
    return new HiveMetaStoreClient(clientConf);
}
Also used : HiveMetaStoreClient(org.apache.hadoop.hive.metastore.HiveMetaStoreClient) MetaException(org.apache.hadoop.hive.metastore.api.MetaException)

Example 47 with HiveMetaStoreClient

use of org.apache.hadoop.hive.metastore.HiveMetaStoreClient in project hive by apache.

the class TestTxnCommands method testAddAndDropPartitionAdvancingWriteIds.

@Test
public void testAddAndDropPartitionAdvancingWriteIds() throws Exception {
    runStatementOnDriver("create database IF NOT EXISTS db1");
    String tableName = "db1.add_drop_partition";
    IMetaStoreClient msClient = new HiveMetaStoreClient(hiveConf);
    runStatementOnDriver(String.format("CREATE TABLE %s (f1 string) PARTITIONED BY (ds STRING) " + "TBLPROPERTIES ('transactional'='true', 'transactional_properties'='insert_only')", tableName));
    String validWriteIds = msClient.getValidWriteIds(tableName).toString();
    LOG.info("ValidWriteIds before add partition::" + validWriteIds);
    Assert.assertEquals("db1.add_drop_partition:0:9223372036854775807::", validWriteIds);
    validWriteIds = msClient.getValidWriteIds(tableName).toString();
    runStatementOnDriver("ALTER TABLE db1.add_drop_partition ADD PARTITION (ds='2013-04-05')");
    validWriteIds = msClient.getValidWriteIds(tableName).toString();
    LOG.info("ValidWriteIds after add partition::" + validWriteIds);
    Assert.assertEquals("db1.add_drop_partition:1:9223372036854775807::", validWriteIds);
    runStatementOnDriver("ALTER TABLE db1.add_drop_partition DROP PARTITION (ds='2013-04-05')");
    validWriteIds = msClient.getValidWriteIds(tableName).toString();
    LOG.info("ValidWriteIds after drop partition::" + validWriteIds);
    Assert.assertEquals("db1.add_drop_partition:2:9223372036854775807::", validWriteIds);
}
Also used : HiveMetaStoreClient(org.apache.hadoop.hive.metastore.HiveMetaStoreClient) IMetaStoreClient(org.apache.hadoop.hive.metastore.IMetaStoreClient) Test(org.junit.Test)

Example 48 with HiveMetaStoreClient

use of org.apache.hadoop.hive.metastore.HiveMetaStoreClient in project hive by apache.

the class TestTxnCommands method testTxnStatsOnOff.

@Test
public void testTxnStatsOnOff() throws Exception {
    String tableName = "mm_table";
    hiveConf.setBoolean("hive.stats.autogather", true);
    hiveConf.setBoolean("hive.stats.column.autogather", true);
    // Need to close the thread local Hive object so that configuration change is reflected to HMS.
    Hive.closeCurrent();
    runStatementOnDriver("drop table if exists " + tableName);
    runStatementOnDriver(String.format("create table %s (a int) stored as orc " + "TBLPROPERTIES ('transactional'='true', 'transactional_properties'='insert_only')", tableName));
    runStatementOnDriver(String.format("insert into %s (a) values (1)", tableName));
    IMetaStoreClient msClient = new HiveMetaStoreClient(hiveConf);
    List<ColumnStatisticsObj> stats = getTxnTableStats(msClient, tableName);
    Assert.assertEquals(1, stats.size());
    runStatementOnDriver(String.format("insert into %s (a) values (1)", tableName));
    stats = getTxnTableStats(msClient, tableName);
    Assert.assertEquals(1, stats.size());
    msClient.close();
    hiveConf.setBoolean(MetastoreConf.ConfVars.HIVE_TXN_STATS_ENABLED.getVarname(), false);
    msClient = new HiveMetaStoreClient(hiveConf);
    // Even though the stats are valid in metastore, txn stats are disabled.
    stats = getTxnTableStats(msClient, tableName);
    Assert.assertEquals(0, stats.size());
    msClient.close();
    hiveConf.setBoolean(MetastoreConf.ConfVars.HIVE_TXN_STATS_ENABLED.getVarname(), true);
    msClient = new HiveMetaStoreClient(hiveConf);
    stats = getTxnTableStats(msClient, tableName);
    // Now the stats are visible again.
    Assert.assertEquals(1, stats.size());
    msClient.close();
    hiveConf.setBoolean(MetastoreConf.ConfVars.HIVE_TXN_STATS_ENABLED.getVarname(), false);
    // Need to close the thread local Hive object so that configuration change is reflected to HMS.
    Hive.closeCurrent();
    // Running the query with stats disabled will cause stats in metastore itself to become invalid.
    runStatementOnDriver(String.format("insert into %s (a) values (1)", tableName));
    hiveConf.setBoolean(MetastoreConf.ConfVars.HIVE_TXN_STATS_ENABLED.getVarname(), true);
    msClient = new HiveMetaStoreClient(hiveConf);
    stats = getTxnTableStats(msClient, tableName);
    Assert.assertEquals(0, stats.size());
    msClient.close();
}
Also used : ColumnStatisticsObj(org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj) HiveMetaStoreClient(org.apache.hadoop.hive.metastore.HiveMetaStoreClient) IMetaStoreClient(org.apache.hadoop.hive.metastore.IMetaStoreClient) Test(org.junit.Test)

Example 49 with HiveMetaStoreClient

use of org.apache.hadoop.hive.metastore.HiveMetaStoreClient in project hive by apache.

the class TestTxnCommands method testDDLsAdvancingWriteIds.

@Test
public void testDDLsAdvancingWriteIds() throws Exception {
    String tableName = "alter_table";
    runStatementOnDriver("drop table if exists " + tableName);
    runStatementOnDriver(String.format("create table %s (a int, b string, c BIGINT, d INT) " + "PARTITIONED BY (ds STRING)" + "TBLPROPERTIES ('transactional'='true', 'transactional_properties'='insert_only')", tableName));
    runStatementOnDriver(String.format("insert into %s (a) values (0)", tableName));
    IMetaStoreClient msClient = new HiveMetaStoreClient(hiveConf);
    String validWriteIds = msClient.getValidWriteIds("default." + tableName).toString();
    Assert.assertEquals("default.alter_table:1:9223372036854775807::", validWriteIds);
    runStatementOnDriver(String.format("alter table %s SET OWNER USER user_name", tableName));
    validWriteIds = msClient.getValidWriteIds("default." + tableName).toString();
    Assert.assertEquals("default.alter_table:2:9223372036854775807::", validWriteIds);
    runStatementOnDriver(String.format("alter table %s CLUSTERED BY(c) SORTED BY(d) INTO 32 BUCKETS", tableName));
    validWriteIds = msClient.getValidWriteIds("default." + tableName).toString();
    Assert.assertEquals("default.alter_table:3:9223372036854775807::", validWriteIds);
    runStatementOnDriver(String.format("ALTER TABLE %s ADD PARTITION (ds='2013-04-05')", tableName));
    validWriteIds = msClient.getValidWriteIds("default." + tableName).toString();
    Assert.assertEquals("default.alter_table:4:9223372036854775807::", validWriteIds);
    runStatementOnDriver(String.format("ALTER TABLE %s SET SERDEPROPERTIES ('field.delim'='\\u0001')", tableName));
    validWriteIds = msClient.getValidWriteIds("default." + tableName).toString();
    Assert.assertEquals("default.alter_table:5:9223372036854775807::", validWriteIds);
    runStatementOnDriver(String.format("ALTER TABLE %s PARTITION (ds='2013-04-05') SET FILEFORMAT PARQUET", tableName));
    validWriteIds = msClient.getValidWriteIds("default." + tableName).toString();
    Assert.assertEquals("default.alter_table:6:9223372036854775807::", validWriteIds);
    // We should not advance the Write ID during compaction, since it affects the performance of
    // materialized views. So, below assertion ensures that we do not advance the write during compaction.
    runStatementOnDriver(String.format("ALTER TABLE %s PARTITION (ds='2013-04-05') COMPACT 'minor'", tableName));
    validWriteIds = msClient.getValidWriteIds("default." + tableName).toString();
    Assert.assertEquals("default.alter_table:6:9223372036854775807::", validWriteIds);
    runStatementOnDriver(String.format("ALTER TABLE %s PARTITION (ds='2013-04-05') CONCATENATE", tableName));
    validWriteIds = msClient.getValidWriteIds("default." + tableName).toString();
    Assert.assertEquals("default.alter_table:7:9223372036854775807::", validWriteIds);
    runStatementOnDriver(String.format("ALTER TABLE %s SKEWED BY (a) ON (1,2)", tableName));
    validWriteIds = msClient.getValidWriteIds("default." + tableName).toString();
    Assert.assertEquals("default.alter_table:8:9223372036854775807::", validWriteIds);
    runStatementOnDriver(String.format("ALTER TABLE %s SET SKEWED LOCATION (1='hdfs://127.0.0.1:8020/abcd/1')", tableName));
    validWriteIds = msClient.getValidWriteIds("default." + tableName).toString();
    Assert.assertEquals("default.alter_table:9:9223372036854775807::", validWriteIds);
    runStatementOnDriver(String.format("ALTER TABLE %s NOT SKEWED", tableName));
    validWriteIds = msClient.getValidWriteIds("default." + tableName).toString();
    Assert.assertEquals("default.alter_table:10:9223372036854775807::", validWriteIds);
    runStatementOnDriver(String.format("ALTER TABLE %s UNSET SERDEPROPERTIES ('field.delim')", tableName));
    validWriteIds = msClient.getValidWriteIds("default." + tableName).toString();
    Assert.assertEquals("default.alter_table:11:9223372036854775807::", validWriteIds);
}
Also used : HiveMetaStoreClient(org.apache.hadoop.hive.metastore.HiveMetaStoreClient) IMetaStoreClient(org.apache.hadoop.hive.metastore.IMetaStoreClient) Test(org.junit.Test)

Example 50 with HiveMetaStoreClient

use of org.apache.hadoop.hive.metastore.HiveMetaStoreClient in project hive by apache.

the class TestMsckCreatePartitionsInBatches method setupClass.

@BeforeClass
public static void setupClass() throws HiveException, MetaException {
    hiveConf = new HiveConf(TestMsckCreatePartitionsInBatches.class);
    hiveConf.setIntVar(ConfVars.HIVE_MSCK_REPAIR_BATCH_SIZE, 5);
    hiveConf.setVar(HiveConf.ConfVars.HIVE_AUTHORIZATION_MANAGER, "org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory");
    SessionState.start(hiveConf);
    try {
        db = new HiveMetaStoreClient(hiveConf);
    } catch (MetaException e) {
        throw new HiveException(e);
    }
    msck = new Msck(false, false);
    msck.init(Msck.getMsckConf(hiveConf));
}
Also used : HiveMetaStoreClient(org.apache.hadoop.hive.metastore.HiveMetaStoreClient) HiveException(org.apache.hadoop.hive.ql.metadata.HiveException) HiveConf(org.apache.hadoop.hive.conf.HiveConf) Msck(org.apache.hadoop.hive.metastore.Msck) MetaException(org.apache.hadoop.hive.metastore.api.MetaException) BeforeClass(org.junit.BeforeClass)

Aggregations

HiveMetaStoreClient (org.apache.hadoop.hive.metastore.HiveMetaStoreClient)141 IMetaStoreClient (org.apache.hadoop.hive.metastore.IMetaStoreClient)81 Test (org.junit.Test)78 Table (org.apache.hadoop.hive.metastore.api.Table)60 FileSystem (org.apache.hadoop.fs.FileSystem)57 Path (org.apache.hadoop.fs.Path)45 HiveConf (org.apache.hadoop.hive.conf.HiveConf)31 Before (org.junit.Before)23 MetaException (org.apache.hadoop.hive.metastore.api.MetaException)18 FileStatus (org.apache.hadoop.fs.FileStatus)17 CliSessionState (org.apache.hadoop.hive.cli.CliSessionState)16 File (java.io.File)12 IOException (java.io.IOException)12 HiveStreamingConnection (org.apache.hive.streaming.HiveStreamingConnection)12 ArrayList (java.util.ArrayList)11 TxnStore (org.apache.hadoop.hive.metastore.txn.TxnStore)10 StreamingConnection (org.apache.hive.streaming.StreamingConnection)10 List (java.util.List)9 HashMap (java.util.HashMap)8 CompactionRequest (org.apache.hadoop.hive.metastore.api.CompactionRequest)8