use of org.apache.hadoop.hive.metastore.HiveMetaStoreClient in project incubator-gobblin by apache.
the class HivePurgerPublisher method initHiveMetastoreClient.
public void initHiveMetastoreClient() throws Exception {
if (this.state.contains(ConfigurationKeys.SUPER_USER_KEY_TAB_LOCATION)) {
String superUser = this.state.getProp(ComplianceConfigurationKeys.GOBBLIN_COMPLIANCE_SUPER_USER);
String realm = this.state.getProp(ConfigurationKeys.KERBEROS_REALM);
String keytabLocation = this.state.getProp(ConfigurationKeys.SUPER_USER_KEY_TAB_LOCATION);
log.info("Establishing MetastoreClient connection using " + keytabLocation);
UserGroupInformation.loginUserFromKeytab(HostUtils.getPrincipalUsingHostname(superUser, realm), keytabLocation);
UserGroupInformation loginUser = UserGroupInformation.getLoginUser();
loginUser.doAs(new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws TException {
HivePurgerPublisher.this.client = new HiveMetaStoreClient(new HiveConf());
return null;
}
});
} else {
HivePurgerPublisher.this.client = new HiveMetaStoreClient(new HiveConf());
}
}
use of org.apache.hadoop.hive.metastore.HiveMetaStoreClient in project incubator-gobblin by apache.
the class ProxyUtils method cancelTokens.
public static void cancelTokens(State state) throws IOException, InterruptedException, TException {
Preconditions.checkArgument(state.contains(ConfigurationKeys.SUPER_USER_KEY_TAB_LOCATION), "Missing required property " + ConfigurationKeys.SUPER_USER_KEY_TAB_LOCATION);
Preconditions.checkArgument(state.contains(ComplianceConfigurationKeys.GOBBLIN_COMPLIANCE_SUPER_USER), "Missing required property " + ComplianceConfigurationKeys.GOBBLIN_COMPLIANCE_SUPER_USER);
Preconditions.checkArgument(state.contains(ConfigurationKeys.KERBEROS_REALM), "Missing required property " + ConfigurationKeys.KERBEROS_REALM);
String superUser = state.getProp(ComplianceConfigurationKeys.GOBBLIN_COMPLIANCE_SUPER_USER);
String keytabLocation = state.getProp(ConfigurationKeys.SUPER_USER_KEY_TAB_LOCATION);
String realm = state.getProp(ConfigurationKeys.KERBEROS_REALM);
UserGroupInformation.loginUserFromKeytab(HostUtils.getPrincipalUsingHostname(superUser, realm), keytabLocation);
UserGroupInformation currentUser = UserGroupInformation.getCurrentUser();
UserGroupInformation realUser = currentUser.getRealUser();
Credentials credentials = realUser.getCredentials();
for (Token<?> token : credentials.getAllTokens()) {
if (token.getKind().equals(DelegationTokenIdentifier.HIVE_DELEGATION_KIND)) {
log.info("Cancelling hive token");
HiveMetaStoreClient hiveClient = new HiveMetaStoreClient(new HiveConf());
hiveClient.cancelDelegationToken(token.encodeToUrlString());
}
}
}
use of org.apache.hadoop.hive.metastore.HiveMetaStoreClient in project hive by apache.
the class TestStatsUpdaterThread method testTxnTable.
@Test(timeout = 160000)
public void testTxnTable() throws Exception {
StatsUpdaterThread su = createUpdater();
IMetaStoreClient msClient = new HiveMetaStoreClient(hiveConf);
executeQuery("create table simple_stats (s string) TBLPROPERTIES " + "(\"transactional\"=\"true\", \"transactional_properties\"=\"insert_only\")");
executeQuery("insert into simple_stats (s) values ('test')");
List<String> cols = Lists.newArrayList("s");
String dbName = ss.getCurrentDatabase(), tblName = "simple_stats", fqName = dbName + "." + tblName;
ValidWriteIdList initialWriteIds = msClient.getValidWriteIds(fqName);
verifyStatsUpToDate(tblName, cols, msClient, initialWriteIds.toString(), true);
assertFalse(su.runOneIteration());
drainWorkQueue(su, 0);
executeQuery("insert overwrite table simple_stats values ('test2')");
ValidWriteIdList nextWriteIds = msClient.getValidWriteIds(fqName);
verifyStatsUpToDate(tblName, cols, msClient, nextWriteIds.toString(), true);
assertFalse(su.runOneIteration());
drainWorkQueue(su, 0);
String currentWriteIds = msClient.getValidWriteIds(fqName).toString();
// Overwrite the txn state to refer to an open txn.
long badTxnId = msClient.openTxn("moo");
long badWriteId = msClient.allocateTableWriteId(badTxnId, dbName, tblName);
Table tbl = msClient.getTable(dbName, tblName);
tbl.setWriteId(badWriteId);
msClient.alter_table(null, dbName, tblName, tbl, new EnvironmentContext(), initialWriteIds.toString());
// Stats should not be valid.
verifyStatsUpToDate(tblName, cols, msClient, currentWriteIds, false);
// Analyze should not be able to set valid stats for a running txn.
assertTrue(su.runOneIteration());
drainWorkQueue(su);
currentWriteIds = msClient.getValidWriteIds(fqName).toString();
verifyStatsUpToDate(tblName, cols, msClient, currentWriteIds, false);
msClient.abortTxns(Lists.newArrayList(badTxnId));
// Analyze should be able to override stats of an aborted txn.
assertTrue(su.runOneIteration());
drainWorkQueue(su);
// Stats will now be valid.
currentWriteIds = msClient.getValidWriteIds(fqName).toString();
verifyStatsUpToDate(tblName, cols, msClient, currentWriteIds, true);
// Verify that incorrect stats from a valid write ID are also handled.
badTxnId = msClient.openTxn("moo");
badWriteId = msClient.allocateTableWriteId(badTxnId, dbName, tblName);
tbl = msClient.getTable(dbName, tblName);
tbl.setWriteId(badWriteId);
StatsSetupConst.setBasicStatsState(tbl.getParameters(), StatsSetupConst.FALSE);
msClient.alter_table(null, dbName, tblName, tbl, new EnvironmentContext(), initialWriteIds.toString());
// Stats should not be valid.
verifyStatsUpToDate(tblName, cols, msClient, currentWriteIds, false);
// Analyze should not be able to set valid stats for a running txn.
assertTrue(su.runOneIteration());
drainWorkQueue(su);
currentWriteIds = msClient.getValidWriteIds(fqName).toString();
verifyStatsUpToDate(tblName, cols, msClient, currentWriteIds, false);
msClient.commitTxn(badTxnId);
// Analyze should be able to override stats of an committed txn.
assertTrue(su.runOneIteration());
drainWorkQueue(su);
// Stats will now be valid.
currentWriteIds = msClient.getValidWriteIds(fqName).toString();
verifyStatsUpToDate(tblName, cols, msClient, currentWriteIds, true);
msClient.close();
}
use of org.apache.hadoop.hive.metastore.HiveMetaStoreClient in project hive by apache.
the class TestStatsUpdaterThread method testExistingOnly.
@Test(timeout = 80000)
public void testExistingOnly() throws Exception {
hiveConf.set(MetastoreConf.ConfVars.STATS_AUTO_UPDATE.getVarname(), "existing");
StatsUpdaterThread su = createUpdater();
IMetaStoreClient msClient = new HiveMetaStoreClient(hiveConf);
executeQuery("create table simple_stats (i int, s string)");
hiveConf.setBoolVar(HiveConf.ConfVars.HIVESTATSAUTOGATHER, false);
hiveConf.setBoolVar(HiveConf.ConfVars.HIVESTATSCOLAUTOGATHER, false);
executeQuery("insert into simple_stats (i, s) values (1, 'test')");
executeQuery("analyze table simple_stats compute statistics for columns i");
verifyStatsUpToDate("simple_stats", Lists.newArrayList("s"), msClient, false);
verifyAndUnsetColStats("simple_stats", Lists.newArrayList("i"), msClient);
assertTrue(su.runOneIteration());
drainWorkQueue(su);
verifyStatsUpToDate("simple_stats", Lists.newArrayList("i"), msClient, true);
verifyStatsUpToDate("simple_stats", Lists.newArrayList("s"), msClient, false);
msClient.close();
}
use of org.apache.hadoop.hive.metastore.HiveMetaStoreClient in project hive by apache.
the class TestStatsUpdaterThread method testTxnDynamicPartitions.
@Test
public void testTxnDynamicPartitions() throws Exception {
StatsUpdaterThread su = createUpdater();
IMetaStoreClient msClient = new HiveMetaStoreClient(hiveConf);
hiveConf.setBoolVar(HiveConf.ConfVars.HIVESTATSAUTOGATHER, false);
executeQuery("create table simple_stats (s string) partitioned by (i int)" + " stored as orc " + " TBLPROPERTIES (\"transactional\"=\"true\")");
executeQuery("insert into simple_stats (i, s) values (1, 'test')");
executeQuery("insert into simple_stats (i, s) values (2, 'test2')");
executeQuery("insert into simple_stats (i, s) values (3, 'test3')");
assertTrue(su.runOneIteration());
drainWorkQueue(su);
verifyPartStatsUpToDate(3, 1, msClient, "simple_stats", true);
executeQuery("insert into simple_stats (i, s) values (1, 'test12')");
executeQuery("insert into simple_stats (i, s) values (2, 'test22')");
executeQuery("insert into simple_stats (i, s) values (3, 'test32')");
assertTrue(su.runOneIteration());
drainWorkQueue(su);
verifyPartStatsUpToDate(3, 1, msClient, "simple_stats", true);
msClient.close();
}
Aggregations