use of org.apache.hadoop.hive.metastore.HiveMetaStoreClient in project hive by apache.
the class TestReplicationScenarios method setUpBeforeClass.
// if verifySetup is set to true, all the test setup we do will perform additional
// verifications as well, which is useful to verify that our setup occurred
// correctly when developing and debugging tests. These verifications, however
// do not test any new functionality for replication, and thus, are not relevant
// for testing replication itself. For steady state, we want this to be false.
@BeforeClass
public static void setUpBeforeClass() throws Exception {
hconf = new HiveConf(TestReplicationScenarios.class);
String metastoreUri = System.getProperty("test." + HiveConf.ConfVars.METASTOREURIS.varname);
if (metastoreUri != null) {
hconf.setVar(HiveConf.ConfVars.METASTOREURIS, metastoreUri);
return;
}
hconf.setVar(HiveConf.ConfVars.METASTORE_TRANSACTIONAL_EVENT_LISTENERS, // turn on db notification listener on metastore
DBNOTIF_LISTENER_CLASSNAME);
hconf.setBoolVar(HiveConf.ConfVars.REPLCMENABLED, true);
hconf.setBoolVar(HiveConf.ConfVars.FIRE_EVENTS_FOR_DML, true);
hconf.setVar(HiveConf.ConfVars.REPLCMDIR, TEST_PATH + "/cmroot/");
proxySettingName = "hadoop.proxyuser." + Utils.getUGI().getShortUserName() + ".hosts";
hconf.set(proxySettingName, "*");
MetaStoreTestUtils.startMetaStoreWithRetry(hconf);
hconf.setVar(HiveConf.ConfVars.REPLDIR, TEST_PATH + "/hrepl/");
hconf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES, 3);
hconf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, "");
hconf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, "");
hconf.set(HiveConf.ConfVars.HIVE_IN_TEST_REPL.varname, "true");
hconf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false");
hconf.set(HiveConf.ConfVars.HIVE_TXN_MANAGER.varname, "org.apache.hadoop.hive.ql.lockmgr.DummyTxnManager");
hconf.set(HiveConf.ConfVars.METASTORE_RAW_STORE_IMPL.varname, "org.apache.hadoop.hive.metastore.InjectableBehaviourObjectStore");
hconf.setBoolVar(HiveConf.ConfVars.HIVEOPTIMIZEMETADATAQUERIES, true);
System.setProperty(HiveConf.ConfVars.PREEXECHOOKS.varname, " ");
System.setProperty(HiveConf.ConfVars.POSTEXECHOOKS.varname, " ");
Path testPath = new Path(TEST_PATH);
FileSystem fs = FileSystem.get(testPath.toUri(), hconf);
fs.mkdirs(testPath);
driver = DriverFactory.newDriver(hconf);
SessionState.start(new CliSessionState(hconf));
metaStoreClient = new HiveMetaStoreClient(hconf);
FileUtils.deleteDirectory(new File("metastore_db2"));
HiveConf hconfMirrorServer = new HiveConf();
hconfMirrorServer.set(HiveConf.ConfVars.METASTORECONNECTURLKEY.varname, "jdbc:derby:;databaseName=metastore_db2;create=true");
MetaStoreTestUtils.startMetaStoreWithRetry(hconfMirrorServer);
hconfMirror = new HiveConf(hconf);
String thriftUri = MetastoreConf.getVar(hconfMirrorServer, MetastoreConf.ConfVars.THRIFT_URIS);
MetastoreConf.setVar(hconfMirror, MetastoreConf.ConfVars.THRIFT_URIS, thriftUri);
driverMirror = DriverFactory.newDriver(hconfMirror);
metaStoreClientMirror = new HiveMetaStoreClient(hconfMirror);
ObjectStore.setTwoMetastoreTesting(true);
}
use of org.apache.hadoop.hive.metastore.HiveMetaStoreClient in project hive by apache.
the class TestMetastoreAuthorizationProvider method setUp.
@Override
protected void setUp() throws Exception {
super.setUp();
// Turn on metastore-side authorization
System.setProperty(HiveConf.ConfVars.METASTORE_PRE_EVENT_LISTENERS.varname, AuthorizationPreEventListener.class.getName());
System.setProperty(HiveConf.ConfVars.HIVE_METASTORE_AUTHORIZATION_MANAGER.varname, getAuthorizationProvider());
System.setProperty(HiveConf.ConfVars.HIVE_AUTHORIZATION_MANAGER.varname, getAuthorizationProvider());
setupMetaStoreReadAuthorization();
System.setProperty(HiveConf.ConfVars.HIVE_METASTORE_AUTHENTICATOR_MANAGER.varname, InjectableDummyAuthenticator.class.getName());
System.setProperty(HiveConf.ConfVars.HIVE_AUTHORIZATION_TABLE_OWNER_GRANTS.varname, "");
int port = MetaStoreTestUtils.startMetaStoreWithRetry();
clientHiveConf = createHiveConf();
// Turn off client-side authorization
clientHiveConf.setBoolVar(HiveConf.ConfVars.HIVE_AUTHORIZATION_ENABLED, false);
clientHiveConf.setVar(HiveConf.ConfVars.METASTOREURIS, "thrift://localhost:" + port);
clientHiveConf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES, 3);
clientHiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false");
clientHiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, "");
clientHiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, "");
ugi = Utils.getUGI();
SessionState.start(new CliSessionState(clientHiveConf));
msc = new HiveMetaStoreClient(clientHiveConf);
driver = DriverFactory.newDriver(clientHiveConf);
}
use of org.apache.hadoop.hive.metastore.HiveMetaStoreClient in project hive by apache.
the class TestMultiAuthorizationPreEventListener method setUp.
@BeforeClass
public static void setUp() throws Exception {
System.setProperty(HiveConf.ConfVars.METASTORE_PRE_EVENT_LISTENERS.varname, AuthorizationPreEventListener.class.getName());
// Set two dummy classes as authorizatin managers. Two instances should get created.
System.setProperty(HiveConf.ConfVars.HIVE_METASTORE_AUTHORIZATION_MANAGER.varname, DummyHiveMetastoreAuthorizationProvider.class.getName() + "," + DummyHiveMetastoreAuthorizationProvider.class.getName());
System.setProperty(HiveConf.ConfVars.HIVE_METASTORE_AUTHENTICATOR_MANAGER.varname, HadoopDefaultMetastoreAuthenticator.class.getName());
int port = MetaStoreTestUtils.startMetaStoreWithRetry();
clientHiveConf = new HiveConf();
clientHiveConf.setVar(HiveConf.ConfVars.METASTOREURIS, "thrift://localhost:" + port);
clientHiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false");
SessionState.start(new CliSessionState(clientHiveConf));
msc = new HiveMetaStoreClient(clientHiveConf);
driver = DriverFactory.newDriver(clientHiveConf);
}
use of org.apache.hadoop.hive.metastore.HiveMetaStoreClient in project hive by apache.
the class TestCleanerWithReplication method setup.
@Before
public void setup() throws Exception {
conf = new HiveConf();
TxnDbUtil.setConfValues(conf);
TxnDbUtil.cleanDb(conf);
conf.set("fs.defaultFS", fs.getUri().toString());
conf.setBoolVar(HiveConf.ConfVars.REPLCMENABLED, true);
ms = new HiveMetaStoreClient(conf);
txnHandler = TxnUtils.getTxnStore(conf);
cmRootDirectory = new Path(conf.get(HiveConf.ConfVars.REPLCMDIR.varname));
if (!fs.exists(cmRootDirectory)) {
fs.mkdirs(cmRootDirectory);
}
tmpdir = new File(Files.createTempDirectory("compactor_test_table_").toString());
}
use of org.apache.hadoop.hive.metastore.HiveMetaStoreClient in project hive by apache.
the class TestHCatPartitionPublish method setup.
@BeforeClass
public static void setup() throws Exception {
File workDir = handleWorkDir();
conf.set("yarn.scheduler.capacity.root.queues", "default");
conf.set("yarn.scheduler.capacity.root.default.capacity", "100");
conf.set("fs.pfile.impl", "org.apache.hadoop.fs.ProxyLocalFileSystem");
fs = FileSystem.get(conf);
System.setProperty("hadoop.log.dir", new File(workDir, "/logs").getAbsolutePath());
// LocalJobRunner does not work with mapreduce OutputCommitter. So need
// to use MiniMRCluster. MAPREDUCE-2350
mrCluster = new MiniMRCluster(1, fs.getUri().toString(), 1, null, null, new JobConf(conf));
mrConf = mrCluster.createJobConf();
if (isServerRunning) {
return;
}
msPort = MetaStoreTestUtils.startMetaStoreWithRetry();
Thread.sleep(10000);
isServerRunning = true;
securityManager = System.getSecurityManager();
System.setSecurityManager(new NoExitSecurityManager());
Policy.setPolicy(new DerbyPolicy());
hcatConf = new HiveConf(TestHCatPartitionPublish.class);
hcatConf.setVar(HiveConf.ConfVars.METASTOREURIS, "thrift://localhost:" + msPort);
hcatConf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES, 3);
hcatConf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTFAILURERETRIES, 3);
hcatConf.setTimeVar(HiveConf.ConfVars.METASTORE_CLIENT_SOCKET_TIMEOUT, 120, TimeUnit.SECONDS);
hcatConf.set(HiveConf.ConfVars.SEMANTIC_ANALYZER_HOOK.varname, HCatSemanticAnalyzer.class.getName());
hcatConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, "");
hcatConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, "");
hcatConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false");
msc = new HiveMetaStoreClient(hcatConf);
System.setProperty(HiveConf.ConfVars.PREEXECHOOKS.varname, " ");
System.setProperty(HiveConf.ConfVars.POSTEXECHOOKS.varname, " ");
}
Aggregations