use of org.apache.hadoop.hive.cli.CliSessionState in project hive by apache.
the class TestNotificationListener method setUp.
@Before
public void setUp() throws Exception {
System.setProperty("java.naming.factory.initial", "org.apache.activemq.jndi.ActiveMQInitialContextFactory");
System.setProperty("java.naming.provider.url", "vm://localhost?broker.persistent=false");
ConnectionFactory connFac = new ActiveMQConnectionFactory("vm://localhost?broker.persistent=false");
Connection conn = connFac.createConnection();
conn.start();
// We want message to be sent when session commits, thus we run in
// transacted mode.
Session session = conn.createSession(true, Session.SESSION_TRANSACTED);
Destination hcatTopic = session.createTopic(HCatConstants.HCAT_DEFAULT_TOPIC_PREFIX);
MessageConsumer consumer1 = session.createConsumer(hcatTopic);
consumer1.setMessageListener(this);
Destination tblTopic = session.createTopic(HCatConstants.HCAT_DEFAULT_TOPIC_PREFIX + ".mydb.mytbl");
MessageConsumer consumer2 = session.createConsumer(tblTopic);
consumer2.setMessageListener(this);
Destination dbTopic = session.createTopic(HCatConstants.HCAT_DEFAULT_TOPIC_PREFIX + ".mydb");
MessageConsumer consumer3 = session.createConsumer(dbTopic);
consumer3.setMessageListener(this);
setUpHiveConf();
hiveConf.set(ConfVars.METASTORE_EVENT_LISTENERS.varname, NotificationListener.class.getName());
hiveConf.setVar(HiveConf.ConfVars.HIVE_AUTHORIZATION_MANAGER, "org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory");
SessionState.start(new CliSessionState(hiveConf));
driver = DriverFactory.newDriver(hiveConf);
client = new HiveMetaStoreClient(hiveConf);
}
use of org.apache.hadoop.hive.cli.CliSessionState in project hive by apache.
the class TestReplicationScenariosAcidTables method testAcidTablesBootstrapWithConcurrentDropTable.
@Test
public void testAcidTablesBootstrapWithConcurrentDropTable() throws Throwable {
HiveConf primaryConf = primary.getConf();
primary.run("use " + primaryDbName).run("create table t1 (id int) clustered by(id) into 3 buckets stored as orc " + "tblproperties (\"transactional\"=\"true\")").run("insert into t1 values(1)");
// Perform concurrent write + drop on the acid table t1 when bootstrap dump in progress. Bootstrap
// won't dump the table but the subsequent incremental repl with new table with same name should be seen.
BehaviourInjection<CallerArguments, Boolean> callerInjectedBehavior = new BehaviourInjection<CallerArguments, Boolean>() {
@Nullable
@Override
public Boolean apply(@Nullable CallerArguments args) {
if (injectionPathCalled) {
nonInjectedPathCalled = true;
} else {
// Insert another row to t1 and drop the table from another txn when bootstrap dump in progress.
injectionPathCalled = true;
Thread t = new Thread(new Runnable() {
@Override
public void run() {
LOG.info("Entered new thread");
IDriver driver = DriverFactory.newDriver(primaryConf);
SessionState.start(new CliSessionState(primaryConf));
try {
driver.run("insert into " + primaryDbName + ".t1 values(2)");
driver.run("drop table " + primaryDbName + ".t1");
} catch (CommandProcessorException e) {
throw new RuntimeException(e);
}
LOG.info("Exit new thread success");
}
});
t.start();
LOG.info("Created new thread {}", t.getName());
try {
t.join();
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
}
return true;
}
};
InjectableBehaviourObjectStore.setCallerVerifier(callerInjectedBehavior);
WarehouseInstance.Tuple bootstrapDump = null;
try {
bootstrapDump = primary.dump(primaryDbName);
callerInjectedBehavior.assertInjectionsPerformed(true, true);
} finally {
// reset the behaviour
InjectableBehaviourObjectStore.resetCallerVerifier();
}
// Bootstrap dump has taken latest list of tables and hence won't see table t1 as it is dropped.
replica.load(replicatedDbName, primaryDbName).run("use " + replicatedDbName).run("repl status " + replicatedDbName).verifyResult(bootstrapDump.lastReplicationId).run("show tables").verifyResult(null);
// Create another ACID table with same name and insert a row. It should be properly replicated.
WarehouseInstance.Tuple incrementalDump = primary.run("use " + primaryDbName).run("create table t1 (id int) clustered by(id) into 3 buckets stored as orc " + "tblproperties (\"transactional\"=\"true\")").run("insert into t1 values(100)").dump(primaryDbName);
replica.load(replicatedDbName, primaryDbName).run("use " + replicatedDbName).run("repl status " + replicatedDbName).verifyResult(incrementalDump.lastReplicationId).run("select id from t1 order by id").verifyResult("100");
}
use of org.apache.hadoop.hive.cli.CliSessionState in project hive by apache.
the class TestReplicationScenarios method internalBeforeClassSetup.
static void internalBeforeClassSetup(Map<String, String> additionalProperties) throws Exception {
hconf = new HiveConf(TestReplicationScenarios.class);
String metastoreUri = System.getProperty("test." + MetastoreConf.ConfVars.THRIFT_URIS.getHiveName());
if (metastoreUri != null) {
hconf.set(MetastoreConf.ConfVars.THRIFT_URIS.getHiveName(), metastoreUri);
return;
}
hconf.set(MetastoreConf.ConfVars.TRANSACTIONAL_EVENT_LISTENERS.getHiveName(), // turn on db notification listener on metastore
DBNOTIF_LISTENER_CLASSNAME);
hconf.setBoolVar(HiveConf.ConfVars.REPLCMENABLED, true);
hconf.setBoolVar(HiveConf.ConfVars.FIRE_EVENTS_FOR_DML, true);
hconf.setVar(HiveConf.ConfVars.REPLCMDIR, TEST_PATH + "/cmroot/");
proxySettingName = "hadoop.proxyuser." + Utils.getUGI().getShortUserName() + ".hosts";
hconf.set(proxySettingName, "*");
hconf.setVar(HiveConf.ConfVars.REPLDIR, TEST_PATH + "/hrepl/");
hconf.set(MetastoreConf.ConfVars.THRIFT_CONNECTION_RETRIES.getHiveName(), "3");
hconf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, "");
hconf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, "");
hconf.set(HiveConf.ConfVars.HIVE_IN_TEST_REPL.varname, "true");
hconf.setBoolVar(HiveConf.ConfVars.HIVE_IN_TEST, true);
hconf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false");
hconf.set(HiveConf.ConfVars.HIVE_TXN_MANAGER.varname, "org.apache.hadoop.hive.ql.lockmgr.DummyTxnManager");
hconf.set(HiveConf.ConfVars.METASTORE_RAW_STORE_IMPL.varname, "org.apache.hadoop.hive.metastore.InjectableBehaviourObjectStore");
hconf.set(HiveConf.ConfVars.HIVE_METASTORE_WAREHOUSE_EXTERNAL.varname, "/tmp/warehouse/external");
hconf.setBoolVar(HiveConf.ConfVars.HIVEOPTIMIZEMETADATAQUERIES, true);
hconf.setBoolVar(HiveConf.ConfVars.HIVESTATSAUTOGATHER, true);
hconf.setBoolVar(HiveConf.ConfVars.HIVE_STATS_RELIABLE, true);
hconf.setBoolVar(HiveConf.ConfVars.REPL_RUN_DATA_COPY_TASKS_ON_TARGET, false);
System.setProperty(HiveConf.ConfVars.PREEXECHOOKS.varname, " ");
System.setProperty(HiveConf.ConfVars.POSTEXECHOOKS.varname, " ");
additionalProperties.forEach((key, value) -> {
hconf.set(key, value);
});
MetaStoreTestUtils.startMetaStoreWithRetry(hconf, true);
// re set the WAREHOUSE property to the test dir, as the previous command added a random port to it
hconf.set(MetastoreConf.ConfVars.WAREHOUSE.getVarname(), System.getProperty("test.warehouse.dir", "/tmp/warehouse/managed"));
hconf.set(MetastoreConf.ConfVars.WAREHOUSE_EXTERNAL.getVarname(), System.getProperty("test.warehouse.external.dir", "/tmp/warehouse/external"));
Path testPath = new Path(TEST_PATH);
FileSystem fs = FileSystem.get(testPath.toUri(), hconf);
fs.mkdirs(testPath);
driver = DriverFactory.newDriver(hconf);
SessionState.start(new CliSessionState(hconf));
metaStoreClient = new HiveMetaStoreClient(hconf);
FileUtils.deleteDirectory(new File("metastore_db2"));
HiveConf hconfMirrorServer = new HiveConf();
hconfMirrorServer.set(HiveConf.ConfVars.METASTORECONNECTURLKEY.varname, "jdbc:derby:;databaseName=metastore_db2;create=true");
MetaStoreTestUtils.startMetaStoreWithRetry(hconfMirrorServer, true);
hconfMirror = new HiveConf(hconf);
String thriftUri = MetastoreConf.getVar(hconfMirrorServer, MetastoreConf.ConfVars.THRIFT_URIS);
MetastoreConf.setVar(hconfMirror, MetastoreConf.ConfVars.THRIFT_URIS, thriftUri);
driverMirror = DriverFactory.newDriver(hconfMirror);
metaStoreClientMirror = new HiveMetaStoreClient(hconfMirror);
PersistenceManagerProvider.setTwoMetastoreTesting(true);
MetastoreConf.setTimeVar(hconf, EVENT_DB_LISTENER_CLEAN_STARTUP_WAIT_INTERVAL, 0, TimeUnit.SECONDS);
MetastoreConf.setTimeVar(hconfMirror, EVENT_DB_LISTENER_CLEAN_STARTUP_WAIT_INTERVAL, 0, TimeUnit.SECONDS);
}
use of org.apache.hadoop.hive.cli.CliSessionState in project hive by apache.
the class TestMetastoreAuthorizationProvider method setUp.
@Before
public void setUp() throws Exception {
// Turn on metastore-side authorization
System.setProperty(HiveConf.ConfVars.METASTORE_PRE_EVENT_LISTENERS.varname, AuthorizationPreEventListener.class.getName());
System.setProperty(HiveConf.ConfVars.HIVE_METASTORE_AUTHORIZATION_MANAGER.varname, getAuthorizationProvider());
System.setProperty(HiveConf.ConfVars.HIVE_AUTHORIZATION_MANAGER.varname, getAuthorizationProvider());
setupMetaStoreReadAuthorization();
System.setProperty(HiveConf.ConfVars.HIVE_METASTORE_AUTHENTICATOR_MANAGER.varname, InjectableDummyAuthenticator.class.getName());
System.setProperty(HiveConf.ConfVars.HIVE_AUTHORIZATION_TABLE_OWNER_GRANTS.varname, "");
int port = MetaStoreTestUtils.startMetaStoreWithRetry();
clientHiveConf = createHiveConf();
// Turn off client-side authorization
clientHiveConf.setBoolVar(HiveConf.ConfVars.HIVE_AUTHORIZATION_ENABLED, false);
clientHiveConf.setVar(HiveConf.ConfVars.METASTOREURIS, "thrift://localhost:" + port);
clientHiveConf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES, 3);
clientHiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false");
clientHiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, "");
clientHiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, "");
ugi = Utils.getUGI();
SessionState.start(new CliSessionState(clientHiveConf));
msc = new HiveMetaStoreClient(clientHiveConf);
driver = DriverFactory.newDriver(clientHiveConf);
}
use of org.apache.hadoop.hive.cli.CliSessionState in project hive by apache.
the class TestMultiAuthorizationPreEventListener method setUp.
@BeforeClass
public static void setUp() throws Exception {
System.setProperty(HiveConf.ConfVars.METASTORE_PRE_EVENT_LISTENERS.varname, AuthorizationPreEventListener.class.getName());
// Set two dummy classes as authorization managers. Two instances should get created.
System.setProperty(HiveConf.ConfVars.HIVE_METASTORE_AUTHORIZATION_MANAGER.varname, DummyHiveMetastoreAuthorizationProvider.class.getName() + "," + DummyHiveMetastoreAuthorizationProvider.class.getName());
System.setProperty(HiveConf.ConfVars.HIVE_METASTORE_AUTHENTICATOR_MANAGER.varname, HadoopDefaultMetastoreAuthenticator.class.getName());
int port = MetaStoreTestUtils.startMetaStoreWithRetry();
clientHiveConf = new HiveConf();
clientHiveConf.setVar(HiveConf.ConfVars.METASTOREURIS, "thrift://localhost:" + port);
clientHiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false");
SessionState.start(new CliSessionState(clientHiveConf));
msc = new HiveMetaStoreClient(clientHiveConf);
driver = DriverFactory.newDriver(clientHiveConf);
}
Aggregations