use of org.apache.hadoop.hive.metastore.HiveMetaStoreClient in project hive by apache.
the class TestRemoteHiveMetaStoreDualAuthCustom method createClient.
@Override
protected HiveMetaStoreClient createClient() throws Exception {
MetastoreConf.setVar(clientConf, ConfVars.THRIFT_URIS, "thrift://localhost:" + port);
MetastoreConf.setBoolVar(clientConf, ConfVars.USE_THRIFT_SASL, false);
MetastoreConf.setVar(clientConf, ConfVars.METASTORE_CLIENT_AUTH_MODE, "PLAIN");
MetastoreConf.setBoolVar(clientConf, ConfVars.EXECUTE_SET_UGI, false);
// Trying to log in using correct username but wrong password should fail
String credsCUWP = createCredFile(correctUser, wrongPassword);
clientConf.set(CredentialProviderFactory.CREDENTIAL_PROVIDER_PATH, credsCUWP);
String exceptionMessage = null;
try {
MetastoreConf.setVar(clientConf, ConfVars.METASTORE_CLIENT_PLAIN_USERNAME, correctUser);
HiveMetaStoreClient tmpClient = new HiveMetaStoreClient(clientConf);
} catch (MetaException e) {
exceptionMessage = e.getMessage();
}
Assert.assertNotNull(exceptionMessage);
Assert.assertTrue(exceptionMessage.contains("Error validating the login"));
// Trying to log in with a user whose credentials do not exist in the given file should fail.
exceptionMessage = null;
try {
MetastoreConf.setVar(clientConf, ConfVars.METASTORE_CLIENT_PLAIN_USERNAME, wrongUser);
HiveMetaStoreClient tmpClient = new HiveMetaStoreClient(clientConf);
} catch (MetaException e) {
exceptionMessage = e.getMessage();
}
Assert.assertNotNull(exceptionMessage);
Assert.assertTrue(exceptionMessage.contains("No password found for user"));
// Trying to login with a use who does not exist but whose password is found in credential
// file should fail. It doesn't matter what the password is since the user doesn't exist
exceptionMessage = null;
String credsWUWP = createCredFile(wrongUser, wrongPassword);
clientConf.set(CredentialProviderFactory.CREDENTIAL_PROVIDER_PATH, credsWUWP);
try {
MetastoreConf.setVar(clientConf, ConfVars.METASTORE_CLIENT_PLAIN_USERNAME, wrongUser);
HiveMetaStoreClient tmpClient = new HiveMetaStoreClient(clientConf);
} catch (MetaException e) {
exceptionMessage = e.getMessage();
}
Assert.assertNotNull(exceptionMessage);
Assert.assertTrue(exceptionMessage.contains("Error validating the login"));
// correct_user and correct_password creds file
String credsCUCP = createCredFile(correctUser, correctPassword);
clientConf.set(CredentialProviderFactory.CREDENTIAL_PROVIDER_PATH, credsCUCP);
MetastoreConf.setVar(clientConf, ConfVars.METASTORE_CLIENT_PLAIN_USERNAME, correctUser);
return new HiveMetaStoreClient(clientConf);
}
use of org.apache.hadoop.hive.metastore.HiveMetaStoreClient in project hive by apache.
the class TestTxnCommands method testAddAndDropPartitionAdvancingWriteIds.
@Test
public void testAddAndDropPartitionAdvancingWriteIds() throws Exception {
runStatementOnDriver("create database IF NOT EXISTS db1");
String tableName = "db1.add_drop_partition";
IMetaStoreClient msClient = new HiveMetaStoreClient(hiveConf);
runStatementOnDriver(String.format("CREATE TABLE %s (f1 string) PARTITIONED BY (ds STRING) " + "TBLPROPERTIES ('transactional'='true', 'transactional_properties'='insert_only')", tableName));
String validWriteIds = msClient.getValidWriteIds(tableName).toString();
LOG.info("ValidWriteIds before add partition::" + validWriteIds);
Assert.assertEquals("db1.add_drop_partition:0:9223372036854775807::", validWriteIds);
validWriteIds = msClient.getValidWriteIds(tableName).toString();
runStatementOnDriver("ALTER TABLE db1.add_drop_partition ADD PARTITION (ds='2013-04-05')");
validWriteIds = msClient.getValidWriteIds(tableName).toString();
LOG.info("ValidWriteIds after add partition::" + validWriteIds);
Assert.assertEquals("db1.add_drop_partition:1:9223372036854775807::", validWriteIds);
runStatementOnDriver("ALTER TABLE db1.add_drop_partition DROP PARTITION (ds='2013-04-05')");
validWriteIds = msClient.getValidWriteIds(tableName).toString();
LOG.info("ValidWriteIds after drop partition::" + validWriteIds);
Assert.assertEquals("db1.add_drop_partition:2:9223372036854775807::", validWriteIds);
}
use of org.apache.hadoop.hive.metastore.HiveMetaStoreClient in project hive by apache.
the class TestTxnCommands method testTxnStatsOnOff.
@Test
public void testTxnStatsOnOff() throws Exception {
String tableName = "mm_table";
hiveConf.setBoolean("hive.stats.autogather", true);
hiveConf.setBoolean("hive.stats.column.autogather", true);
// Need to close the thread local Hive object so that configuration change is reflected to HMS.
Hive.closeCurrent();
runStatementOnDriver("drop table if exists " + tableName);
runStatementOnDriver(String.format("create table %s (a int) stored as orc " + "TBLPROPERTIES ('transactional'='true', 'transactional_properties'='insert_only')", tableName));
runStatementOnDriver(String.format("insert into %s (a) values (1)", tableName));
IMetaStoreClient msClient = new HiveMetaStoreClient(hiveConf);
List<ColumnStatisticsObj> stats = getTxnTableStats(msClient, tableName);
Assert.assertEquals(1, stats.size());
runStatementOnDriver(String.format("insert into %s (a) values (1)", tableName));
stats = getTxnTableStats(msClient, tableName);
Assert.assertEquals(1, stats.size());
msClient.close();
hiveConf.setBoolean(MetastoreConf.ConfVars.HIVE_TXN_STATS_ENABLED.getVarname(), false);
msClient = new HiveMetaStoreClient(hiveConf);
// Even though the stats are valid in metastore, txn stats are disabled.
stats = getTxnTableStats(msClient, tableName);
Assert.assertEquals(0, stats.size());
msClient.close();
hiveConf.setBoolean(MetastoreConf.ConfVars.HIVE_TXN_STATS_ENABLED.getVarname(), true);
msClient = new HiveMetaStoreClient(hiveConf);
stats = getTxnTableStats(msClient, tableName);
// Now the stats are visible again.
Assert.assertEquals(1, stats.size());
msClient.close();
hiveConf.setBoolean(MetastoreConf.ConfVars.HIVE_TXN_STATS_ENABLED.getVarname(), false);
// Need to close the thread local Hive object so that configuration change is reflected to HMS.
Hive.closeCurrent();
// Running the query with stats disabled will cause stats in metastore itself to become invalid.
runStatementOnDriver(String.format("insert into %s (a) values (1)", tableName));
hiveConf.setBoolean(MetastoreConf.ConfVars.HIVE_TXN_STATS_ENABLED.getVarname(), true);
msClient = new HiveMetaStoreClient(hiveConf);
stats = getTxnTableStats(msClient, tableName);
Assert.assertEquals(0, stats.size());
msClient.close();
}
use of org.apache.hadoop.hive.metastore.HiveMetaStoreClient in project hive by apache.
the class TestTxnCommands method testDDLsAdvancingWriteIds.
@Test
public void testDDLsAdvancingWriteIds() throws Exception {
String tableName = "alter_table";
runStatementOnDriver("drop table if exists " + tableName);
runStatementOnDriver(String.format("create table %s (a int, b string, c BIGINT, d INT) " + "PARTITIONED BY (ds STRING)" + "TBLPROPERTIES ('transactional'='true', 'transactional_properties'='insert_only')", tableName));
runStatementOnDriver(String.format("insert into %s (a) values (0)", tableName));
IMetaStoreClient msClient = new HiveMetaStoreClient(hiveConf);
String validWriteIds = msClient.getValidWriteIds("default." + tableName).toString();
Assert.assertEquals("default.alter_table:1:9223372036854775807::", validWriteIds);
runStatementOnDriver(String.format("alter table %s SET OWNER USER user_name", tableName));
validWriteIds = msClient.getValidWriteIds("default." + tableName).toString();
Assert.assertEquals("default.alter_table:2:9223372036854775807::", validWriteIds);
runStatementOnDriver(String.format("alter table %s CLUSTERED BY(c) SORTED BY(d) INTO 32 BUCKETS", tableName));
validWriteIds = msClient.getValidWriteIds("default." + tableName).toString();
Assert.assertEquals("default.alter_table:3:9223372036854775807::", validWriteIds);
runStatementOnDriver(String.format("ALTER TABLE %s ADD PARTITION (ds='2013-04-05')", tableName));
validWriteIds = msClient.getValidWriteIds("default." + tableName).toString();
Assert.assertEquals("default.alter_table:4:9223372036854775807::", validWriteIds);
runStatementOnDriver(String.format("ALTER TABLE %s SET SERDEPROPERTIES ('field.delim'='\\u0001')", tableName));
validWriteIds = msClient.getValidWriteIds("default." + tableName).toString();
Assert.assertEquals("default.alter_table:5:9223372036854775807::", validWriteIds);
runStatementOnDriver(String.format("ALTER TABLE %s PARTITION (ds='2013-04-05') SET FILEFORMAT PARQUET", tableName));
validWriteIds = msClient.getValidWriteIds("default." + tableName).toString();
Assert.assertEquals("default.alter_table:6:9223372036854775807::", validWriteIds);
// We should not advance the Write ID during compaction, since it affects the performance of
// materialized views. So, below assertion ensures that we do not advance the write during compaction.
runStatementOnDriver(String.format("ALTER TABLE %s PARTITION (ds='2013-04-05') COMPACT 'minor'", tableName));
validWriteIds = msClient.getValidWriteIds("default." + tableName).toString();
Assert.assertEquals("default.alter_table:6:9223372036854775807::", validWriteIds);
runStatementOnDriver(String.format("ALTER TABLE %s PARTITION (ds='2013-04-05') CONCATENATE", tableName));
validWriteIds = msClient.getValidWriteIds("default." + tableName).toString();
Assert.assertEquals("default.alter_table:7:9223372036854775807::", validWriteIds);
runStatementOnDriver(String.format("ALTER TABLE %s SKEWED BY (a) ON (1,2)", tableName));
validWriteIds = msClient.getValidWriteIds("default." + tableName).toString();
Assert.assertEquals("default.alter_table:8:9223372036854775807::", validWriteIds);
runStatementOnDriver(String.format("ALTER TABLE %s SET SKEWED LOCATION (1='hdfs://127.0.0.1:8020/abcd/1')", tableName));
validWriteIds = msClient.getValidWriteIds("default." + tableName).toString();
Assert.assertEquals("default.alter_table:9:9223372036854775807::", validWriteIds);
runStatementOnDriver(String.format("ALTER TABLE %s NOT SKEWED", tableName));
validWriteIds = msClient.getValidWriteIds("default." + tableName).toString();
Assert.assertEquals("default.alter_table:10:9223372036854775807::", validWriteIds);
runStatementOnDriver(String.format("ALTER TABLE %s UNSET SERDEPROPERTIES ('field.delim')", tableName));
validWriteIds = msClient.getValidWriteIds("default." + tableName).toString();
Assert.assertEquals("default.alter_table:11:9223372036854775807::", validWriteIds);
}
use of org.apache.hadoop.hive.metastore.HiveMetaStoreClient in project hive by apache.
the class TestMsckCreatePartitionsInBatches method setupClass.
@BeforeClass
public static void setupClass() throws HiveException, MetaException {
hiveConf = new HiveConf(TestMsckCreatePartitionsInBatches.class);
hiveConf.setIntVar(ConfVars.HIVE_MSCK_REPAIR_BATCH_SIZE, 5);
hiveConf.setVar(HiveConf.ConfVars.HIVE_AUTHORIZATION_MANAGER, "org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory");
SessionState.start(hiveConf);
try {
db = new HiveMetaStoreClient(hiveConf);
} catch (MetaException e) {
throw new HiveException(e);
}
msck = new Msck(false, false);
msck.init(Msck.getMsckConf(hiveConf));
}
Aggregations