use of org.apache.hadoop.hive.metastore.HiveMetaStoreClient in project hive by apache.
the class TestHCatMultiOutputFormat method initializeSetup.
private static void initializeSetup() throws Exception {
hiveConf = new HiveConf(mrConf, TestHCatMultiOutputFormat.class);
hiveConf.setVar(HiveConf.ConfVars.METASTOREURIS, "thrift://localhost:" + msPort);
hiveConf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES, 3);
hiveConf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTFAILURERETRIES, 3);
hiveConf.set(HiveConf.ConfVars.SEMANTIC_ANALYZER_HOOK.varname, HCatSemanticAnalyzer.class.getName());
hiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, "");
hiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, "");
hiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false");
System.setProperty(HiveConf.ConfVars.PREEXECHOOKS.varname, " ");
System.setProperty(HiveConf.ConfVars.POSTEXECHOOKS.varname, " ");
hiveConf.set(HiveConf.ConfVars.METASTOREWAREHOUSE.varname, warehousedir.toString());
try {
hmsc = new HiveMetaStoreClient(hiveConf);
initalizeTables();
} catch (Throwable e) {
LOG.error("Exception encountered while setting up testcase", e);
throw new Exception(e);
} finally {
hmsc.close();
}
}
use of org.apache.hadoop.hive.metastore.HiveMetaStoreClient in project hive by apache.
the class TestReplicationScenarios method setUpBeforeClass.
// if verifySetup is set to true, all the test setup we do will perform additional
// verifications as well, which is useful to verify that our setup occurred
// correctly when developing and debugging tests. These verifications, however
// do not test any new functionality for replication, and thus, are not relevant
// for testing replication itself. For steady state, we want this to be false.
@BeforeClass
public static void setUpBeforeClass() throws Exception {
hconf = new HiveConf(TestReplicationScenarios.class);
String metastoreUri = System.getProperty("test." + HiveConf.ConfVars.METASTOREURIS.varname);
if (metastoreUri != null) {
hconf.setVar(HiveConf.ConfVars.METASTOREURIS, metastoreUri);
useExternalMS = true;
return;
}
hconf.setVar(HiveConf.ConfVars.METASTORE_TRANSACTIONAL_EVENT_LISTENERS, // turn on db notification listener on metastore
DBNOTIF_LISTENER_CLASSNAME);
hconf.setBoolVar(HiveConf.ConfVars.REPLCMENABLED, true);
hconf.setBoolVar(HiveConf.ConfVars.FIRE_EVENTS_FOR_DML, true);
hconf.setVar(HiveConf.ConfVars.REPLCMDIR, TEST_PATH + "/cmroot/");
msPort = MetaStoreUtils.startMetaStore(hconf);
hconf.setVar(HiveConf.ConfVars.REPLDIR, TEST_PATH + "/hrepl/");
hconf.setVar(HiveConf.ConfVars.METASTOREURIS, "thrift://localhost:" + msPort);
hconf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES, 3);
hconf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, "");
hconf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, "");
hconf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false");
System.setProperty(HiveConf.ConfVars.PREEXECHOOKS.varname, " ");
System.setProperty(HiveConf.ConfVars.POSTEXECHOOKS.varname, " ");
Path testPath = new Path(TEST_PATH);
FileSystem fs = FileSystem.get(testPath.toUri(), hconf);
fs.mkdirs(testPath);
driver = new Driver(hconf);
SessionState.start(new CliSessionState(hconf));
metaStoreClient = new HiveMetaStoreClient(hconf);
}
use of org.apache.hadoop.hive.metastore.HiveMetaStoreClient in project storm by apache.
the class HiveSetupUtil method createDbAndTable.
public static void createDbAndTable(HiveConf conf, String databaseName, String tableName, List<String> partVals, String[] colNames, String[] colTypes, String[] partNames, String dbLocation) throws Exception {
IMetaStoreClient client = new HiveMetaStoreClient(conf);
try {
Database db = new Database();
db.setName(databaseName);
db.setLocationUri(dbLocation);
client.createDatabase(db);
Table tbl = new Table();
tbl.setDbName(databaseName);
tbl.setTableName(tableName);
tbl.setTableType(TableType.MANAGED_TABLE.toString());
StorageDescriptor sd = new StorageDescriptor();
sd.setCols(getTableColumns(colNames, colTypes));
sd.setNumBuckets(1);
sd.setLocation(dbLocation + Path.SEPARATOR + tableName);
if (partNames != null && partNames.length != 0) {
tbl.setPartitionKeys(getPartitionKeys(partNames));
}
tbl.setSd(sd);
sd.setBucketCols(new ArrayList<String>(2));
sd.setSerdeInfo(new SerDeInfo());
sd.getSerdeInfo().setName(tbl.getTableName());
sd.getSerdeInfo().setParameters(new HashMap<String, String>());
sd.getSerdeInfo().getParameters().put(serdeConstants.SERIALIZATION_FORMAT, "1");
sd.getSerdeInfo().setSerializationLib(OrcSerde.class.getName());
sd.setInputFormat(OrcInputFormat.class.getName());
sd.setOutputFormat(OrcOutputFormat.class.getName());
Map<String, String> tableParams = new HashMap<String, String>();
tbl.setParameters(tableParams);
client.createTable(tbl);
try {
if (partVals != null && partVals.size() > 0) {
addPartition(client, tbl, partVals);
}
} catch (AlreadyExistsException e) {
}
} finally {
client.close();
}
}
use of org.apache.hadoop.hive.metastore.HiveMetaStoreClient in project hive by apache.
the class HiveMetastoreTest method startMetastore.
@BeforeClass
public static void startMetastore() throws Exception {
HiveMetastoreTest.metastore = new TestHiveMetastore();
metastore.start();
HiveMetastoreTest.hiveConf = metastore.hiveConf();
HiveMetastoreTest.metastoreClient = new HiveMetaStoreClient(hiveConf);
String dbPath = metastore.getDatabasePath(DB_NAME);
Database db = new Database(DB_NAME, "description", dbPath, new HashMap<>());
metastoreClient.createDatabase(db);
HiveMetastoreTest.catalog = (HiveCatalog) CatalogUtil.loadCatalog(HiveCatalog.class.getName(), CatalogUtil.ICEBERG_CATALOG_TYPE_HIVE, ImmutableMap.of(CatalogProperties.CLIENT_POOL_CACHE_EVICTION_INTERVAL_MS, String.valueOf(EVICTION_INTERVAL)), hiveConf);
}
use of org.apache.hadoop.hive.metastore.HiveMetaStoreClient in project hive by apache.
the class TestHiveCommitLocks method initializeSpies.
@BeforeClass
public static void initializeSpies() throws Exception {
overriddenHiveConf.setLong("iceberg.hive.lock-timeout-ms", 6 * 1000);
overriddenHiveConf.setLong("iceberg.hive.lock-check-min-wait-ms", 50);
overriddenHiveConf.setLong("iceberg.hive.lock-check-max-wait-ms", 5 * 1000);
// Set up the spy clients as static variables instead of before every test.
// The spy clients are reused between methods and closed at the end of all tests in this class.
spyClientPool = spy(new HiveClientPool(1, overriddenHiveConf));
when(spyClientPool.newClient()).thenAnswer(invocation -> {
// cannot spy on RetryingHiveMetastoreClient as it is a proxy
IMetaStoreClient client = spy(new HiveMetaStoreClient(hiveConf));
spyClientRef.set(client);
return spyClientRef.get();
});
// To ensure new client is created.
spyClientPool.run(IMetaStoreClient::isLocalMetaStore);
spyCachedClientPool = spy(new CachedClientPool(hiveConf, Collections.emptyMap()));
when(spyCachedClientPool.clientPool()).thenAnswer(invocation -> spyClientPool);
Assert.assertNotNull(spyClientRef.get());
spyClient = spyClientRef.get();
}
Aggregations