use of org.apache.hadoop.hive.metastore.client.builder.DatabaseBuilder in project hive by apache.
the class TestFilterHooks method setUp.
@BeforeClass
public static void setUp() throws Exception {
DummyMetaStoreFilterHookImpl.blockResults = false;
conf = MetastoreConf.newMetastoreConf();
MetastoreConf.setLongVar(conf, ConfVars.THRIFT_CONNECTION_RETRIES, 3);
MetastoreConf.setBoolVar(conf, ConfVars.HIVE_SUPPORT_CONCURRENCY, false);
MetastoreConf.setClass(conf, ConfVars.FILTER_HOOK, DummyMetaStoreFilterHookImpl.class, MetaStoreFilterHook.class);
MetaStoreTestUtils.setConfForStandloneMode(conf);
MetaStoreTestUtils.startMetaStoreWithRetry(HadoopThriftAuthBridge.getBridge(), conf);
msc = new HiveMetaStoreClient(conf);
msc.dropDatabase(DBNAME1, true, true, true);
msc.dropDatabase(DBNAME2, true, true, true);
Database db1 = new DatabaseBuilder().setName(DBNAME1).build();
msc.createDatabase(db1);
Database db2 = new DatabaseBuilder().setName(DBNAME2).build();
msc.createDatabase(db2);
Table tab1 = new TableBuilder().setDbName(DBNAME1).setTableName(TAB1).addCol("id", "int").addCol("name", "string").build();
msc.createTable(tab1);
Table tab2 = new TableBuilder().setDbName(DBNAME1).setTableName(TAB2).addCol("id", "int").addPartCol("name", "string").build();
msc.createTable(tab2);
Partition part1 = new PartitionBuilder().fromTable(tab2).addValue("value1").build();
msc.add_partition(part1);
Partition part2 = new PartitionBuilder().fromTable(tab2).addValue("value2").build();
msc.add_partition(part2);
}
use of org.apache.hadoop.hive.metastore.client.builder.DatabaseBuilder in project hive by apache.
the class TestHiveMetaStore method testDatabase.
@Test
public void testDatabase() throws Throwable {
try {
// clear up any existing databases
silentDropDatabase(TEST_DB1_NAME);
silentDropDatabase(TEST_DB2_NAME);
Database db = new DatabaseBuilder().setName(TEST_DB1_NAME).setOwnerName(SecurityUtils.getUser()).build();
Assert.assertEquals(SecurityUtils.getUser(), db.getOwnerName());
client.createDatabase(db);
db = client.getDatabase(TEST_DB1_NAME);
assertEquals("name of returned db is different from that of inserted db", TEST_DB1_NAME, db.getName());
assertEquals("location of the returned db is different from that of inserted db", warehouse.getDatabasePath(db).toString(), db.getLocationUri());
assertEquals(db.getOwnerName(), SecurityUtils.getUser());
assertEquals(db.getOwnerType(), PrincipalType.USER);
Database db2 = new Database();
db2.setName(TEST_DB2_NAME);
client.createDatabase(db2);
db2 = client.getDatabase(TEST_DB2_NAME);
assertEquals("name of returned db is different from that of inserted db", TEST_DB2_NAME, db2.getName());
assertEquals("location of the returned db is different from that of inserted db", warehouse.getDatabasePath(db2).toString(), db2.getLocationUri());
List<String> dbs = client.getDatabases(".*");
assertTrue("first database is not " + TEST_DB1_NAME, dbs.contains(TEST_DB1_NAME));
assertTrue("second database is not " + TEST_DB2_NAME, dbs.contains(TEST_DB2_NAME));
client.dropDatabase(TEST_DB1_NAME);
client.dropDatabase(TEST_DB2_NAME);
silentDropDatabase(TEST_DB1_NAME);
silentDropDatabase(TEST_DB2_NAME);
} catch (Throwable e) {
System.err.println(StringUtils.stringifyException(e));
System.err.println("testDatabase() failed.");
throw e;
}
}
use of org.apache.hadoop.hive.metastore.client.builder.DatabaseBuilder in project hive by apache.
the class TestHiveMetaStoreSchemaMethods method setVersionStateOtherDb.
@Test
public void setVersionStateOtherDb() throws TException {
String dbName = "other_db_set_state";
Database db = new DatabaseBuilder().setName(dbName).build();
client.createDatabase(db);
String schemaName = uniqueSchemaName();
ISchema schema = new ISchemaBuilder().setSchemaType(SchemaType.AVRO).setName(schemaName).setDbName(dbName).build();
client.createISchema(schema);
SchemaVersion schemaVersion = new SchemaVersionBuilder().versionOf(schema).setVersion(1).addCol("a", ColumnType.BINARY_TYPE_NAME).build();
client.addSchemaVersion(schemaVersion);
schemaVersion = client.getSchemaVersion(dbName, schemaName, 1);
Assert.assertNull(schemaVersion.getState());
client.setSchemaVersionState(dbName, schemaName, 1, SchemaVersionState.INITIATED);
Assert.assertEquals(1, (int) preEvents.get(PreEventContext.PreEventType.ALTER_SCHEMA_VERSION));
Assert.assertEquals(1, (int) events.get(EventMessage.EventType.ALTER_SCHEMA_VERSION));
Assert.assertEquals(1, (int) transactionalEvents.get(EventMessage.EventType.ALTER_SCHEMA_VERSION));
schemaVersion = client.getSchemaVersion(dbName, schemaName, 1);
Assert.assertEquals(SchemaVersionState.INITIATED, schemaVersion.getState());
client.setSchemaVersionState(dbName, schemaName, 1, SchemaVersionState.REVIEWED);
Assert.assertEquals(2, (int) preEvents.get(PreEventContext.PreEventType.ALTER_SCHEMA_VERSION));
Assert.assertEquals(2, (int) events.get(EventMessage.EventType.ALTER_SCHEMA_VERSION));
Assert.assertEquals(2, (int) transactionalEvents.get(EventMessage.EventType.ALTER_SCHEMA_VERSION));
schemaVersion = client.getSchemaVersion(dbName, schemaName, 1);
Assert.assertEquals(SchemaVersionState.REVIEWED, schemaVersion.getState());
}
use of org.apache.hadoop.hive.metastore.client.builder.DatabaseBuilder in project hive by apache.
the class TestMarkPartition method testMarkingPartitionSet.
@Test
public void testMarkingPartitionSet() throws TException, InterruptedException {
HiveMetaStoreClient msc = new HiveMetaStoreClient(conf);
final String dbName = "hive2215";
msc.dropDatabase(dbName, true, true, true);
Database db = new DatabaseBuilder().setName(dbName).build();
msc.createDatabase(db);
final String tableName = "tmptbl";
msc.dropTable(dbName, tableName, true, true);
Table table = new TableBuilder().setDbName(dbName).setTableName(tableName).addCol("a", "string").addPartCol("b", "string").build();
msc.createTable(table);
Partition part = new PartitionBuilder().fromTable(table).addValue("2011").build();
msc.add_partition(part);
Map<String, String> kvs = new HashMap<>();
kvs.put("b", "'2011'");
msc.markPartitionForEvent(dbName, tableName, kvs, PartitionEventType.LOAD_DONE);
Assert.assertTrue(msc.isPartitionMarkedForEvent(dbName, tableName, kvs, PartitionEventType.LOAD_DONE));
Thread.sleep(3000);
Assert.assertFalse(msc.isPartitionMarkedForEvent(dbName, tableName, kvs, PartitionEventType.LOAD_DONE));
kvs.put("b", "'2012'");
Assert.assertFalse(msc.isPartitionMarkedForEvent(dbName, tableName, kvs, PartitionEventType.LOAD_DONE));
try {
msc.markPartitionForEvent(dbName, "tmptbl2", kvs, PartitionEventType.LOAD_DONE);
Assert.fail("Expected UnknownTableException");
} catch (UnknownTableException e) {
// All good
} catch (Exception e) {
Assert.fail("Expected UnknownTableException");
}
try {
msc.isPartitionMarkedForEvent(dbName, "tmptbl2", kvs, PartitionEventType.LOAD_DONE);
Assert.fail("Expected UnknownTableException");
} catch (UnknownTableException e) {
// All good
} catch (Exception e) {
Assert.fail("Expected UnknownTableException, received " + e.getClass().getName());
}
kvs.put("a", "'2012'");
try {
msc.isPartitionMarkedForEvent(dbName, tableName, kvs, PartitionEventType.LOAD_DONE);
Assert.fail("Expected InvalidPartitionException");
} catch (InvalidPartitionException e) {
// All good
} catch (Exception e) {
Assert.fail("Expected InvalidPartitionException, received " + e.getClass().getName());
}
}
use of org.apache.hadoop.hive.metastore.client.builder.DatabaseBuilder in project hive by apache.
the class TestMetaStoreEndFunctionListener method testEndFunctionListener.
@Test
public void testEndFunctionListener() throws Exception {
/* Objective here is to ensure that when exceptions are thrown in HiveMetaStore in API methods
* they bubble up and are stored in the MetaStoreEndFunctionContext objects
*/
String dbName = "hive3524";
String tblName = "tmptbl";
int listSize;
Database db = new DatabaseBuilder().setName(dbName).build();
msc.createDatabase(db);
try {
msc.getDatabase("UnknownDB");
} catch (Exception e) {
// All good
}
listSize = DummyEndFunctionListener.funcNameList.size();
String func_name = DummyEndFunctionListener.funcNameList.get(listSize - 1);
MetaStoreEndFunctionContext context = DummyEndFunctionListener.contextList.get(listSize - 1);
assertEquals(func_name, "get_database");
assertFalse(context.isSuccess());
Exception e = context.getException();
assertTrue((e != null));
assertTrue((e instanceof NoSuchObjectException));
assertEquals(context.getInputTableName(), null);
String unknownTable = "UnknownTable";
Table table = new TableBuilder().setDbName(db).setTableName(tblName).addCol("a", "string").addPartCol("b", "string").build();
msc.createTable(table);
try {
msc.getTable(dbName, unknownTable);
} catch (Exception e1) {
// All good
}
listSize = DummyEndFunctionListener.funcNameList.size();
func_name = DummyEndFunctionListener.funcNameList.get(listSize - 1);
context = DummyEndFunctionListener.contextList.get(listSize - 1);
assertEquals(func_name, "get_table");
assertFalse(context.isSuccess());
e = context.getException();
assertTrue((e != null));
assertTrue((e instanceof NoSuchObjectException));
assertEquals(context.getInputTableName(), unknownTable);
try {
msc.getPartition("hive3524", tblName, "b=2012");
} catch (Exception e2) {
// All good
}
listSize = DummyEndFunctionListener.funcNameList.size();
func_name = DummyEndFunctionListener.funcNameList.get(listSize - 1);
context = DummyEndFunctionListener.contextList.get(listSize - 1);
assertEquals(func_name, "get_partition_by_name");
assertFalse(context.isSuccess());
e = context.getException();
assertTrue((e != null));
assertTrue((e instanceof NoSuchObjectException));
assertEquals(context.getInputTableName(), tblName);
try {
msc.dropTable(dbName, unknownTable);
} catch (Exception e4) {
// All good
}
listSize = DummyEndFunctionListener.funcNameList.size();
func_name = DummyEndFunctionListener.funcNameList.get(listSize - 1);
context = DummyEndFunctionListener.contextList.get(listSize - 1);
assertEquals(func_name, "get_table");
assertFalse(context.isSuccess());
e = context.getException();
assertTrue((e != null));
assertTrue((e instanceof NoSuchObjectException));
assertEquals(context.getInputTableName(), "UnknownTable");
}
Aggregations