use of org.apache.hadoop.hive.metastore.api.UnknownDBException in project hive by apache.
the class TestHiveMetaStore method testSimpleTable.
public void testSimpleTable() throws Exception {
try {
String dbName = "simpdb";
String tblName = "simptbl";
String tblName2 = "simptbl2";
String typeName = "Person";
client.dropTable(dbName, tblName);
silentDropDatabase(dbName);
Database db = new Database();
db.setName(dbName);
client.createDatabase(db);
client.dropType(typeName);
Type typ1 = new Type();
typ1.setName(typeName);
typ1.setFields(new ArrayList<FieldSchema>(2));
typ1.getFields().add(new FieldSchema("name", serdeConstants.STRING_TYPE_NAME, ""));
typ1.getFields().add(new FieldSchema("income", serdeConstants.INT_TYPE_NAME, ""));
client.createType(typ1);
Table tbl = new Table();
tbl.setDbName(dbName);
tbl.setTableName(tblName);
StorageDescriptor sd = new StorageDescriptor();
tbl.setSd(sd);
sd.setCols(typ1.getFields());
sd.setCompressed(false);
sd.setNumBuckets(1);
sd.setParameters(new HashMap<String, String>());
sd.getParameters().put("test_param_1", "Use this for comments etc");
sd.setBucketCols(new ArrayList<String>(2));
sd.getBucketCols().add("name");
sd.setSerdeInfo(new SerDeInfo());
sd.getSerdeInfo().setName(tbl.getTableName());
sd.getSerdeInfo().setParameters(new HashMap<String, String>());
sd.getSerdeInfo().getParameters().put(org.apache.hadoop.hive.serde.serdeConstants.SERIALIZATION_FORMAT, "1");
sd.getSerdeInfo().setSerializationLib(org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe.class.getName());
sd.setInputFormat(HiveInputFormat.class.getName());
sd.setInputFormat(HiveOutputFormat.class.getName());
tbl.setPartitionKeys(new ArrayList<FieldSchema>());
client.createTable(tbl);
if (isThriftClient) {
// the createTable() above does not update the location in the 'tbl'
// object when the client is a thrift client and the code below relies
// on the location being present in the 'tbl' object - so get the table
// from the metastore
tbl = client.getTable(dbName, tblName);
}
Table tbl2 = client.getTable(dbName, tblName);
assertNotNull(tbl2);
assertEquals(tbl2.getDbName(), dbName);
assertEquals(tbl2.getTableName(), tblName);
assertEquals(tbl2.getSd().getCols().size(), typ1.getFields().size());
assertEquals(tbl2.getSd().isCompressed(), false);
assertEquals(tbl2.getSd().getNumBuckets(), 1);
assertEquals(tbl2.getSd().getLocation(), tbl.getSd().getLocation());
assertNotNull(tbl2.getSd().getSerdeInfo());
sd.getSerdeInfo().setParameters(new HashMap<String, String>());
sd.getSerdeInfo().getParameters().put(org.apache.hadoop.hive.serde.serdeConstants.SERIALIZATION_FORMAT, "1");
tbl2.setTableName(tblName2);
tbl2.setParameters(new HashMap<String, String>());
tbl2.getParameters().put("EXTERNAL", "TRUE");
tbl2.getSd().setLocation(tbl.getSd().getLocation() + "-2");
List<FieldSchema> fieldSchemas = client.getFields(dbName, tblName);
assertNotNull(fieldSchemas);
assertEquals(fieldSchemas.size(), tbl.getSd().getCols().size());
for (FieldSchema fs : tbl.getSd().getCols()) {
assertTrue(fieldSchemas.contains(fs));
}
List<FieldSchema> fieldSchemasFull = client.getSchema(dbName, tblName);
assertNotNull(fieldSchemasFull);
assertEquals(fieldSchemasFull.size(), tbl.getSd().getCols().size() + tbl.getPartitionKeys().size());
for (FieldSchema fs : tbl.getSd().getCols()) {
assertTrue(fieldSchemasFull.contains(fs));
}
for (FieldSchema fs : tbl.getPartitionKeys()) {
assertTrue(fieldSchemasFull.contains(fs));
}
client.createTable(tbl2);
if (isThriftClient) {
tbl2 = client.getTable(tbl2.getDbName(), tbl2.getTableName());
}
Table tbl3 = client.getTable(dbName, tblName2);
assertNotNull(tbl3);
assertEquals(tbl3.getDbName(), dbName);
assertEquals(tbl3.getTableName(), tblName2);
assertEquals(tbl3.getSd().getCols().size(), typ1.getFields().size());
assertEquals(tbl3.getSd().isCompressed(), false);
assertEquals(tbl3.getSd().getNumBuckets(), 1);
assertEquals(tbl3.getSd().getLocation(), tbl2.getSd().getLocation());
assertEquals(tbl3.getParameters(), tbl2.getParameters());
fieldSchemas = client.getFields(dbName, tblName2);
assertNotNull(fieldSchemas);
assertEquals(fieldSchemas.size(), tbl2.getSd().getCols().size());
for (FieldSchema fs : tbl2.getSd().getCols()) {
assertTrue(fieldSchemas.contains(fs));
}
fieldSchemasFull = client.getSchema(dbName, tblName2);
assertNotNull(fieldSchemasFull);
assertEquals(fieldSchemasFull.size(), tbl2.getSd().getCols().size() + tbl2.getPartitionKeys().size());
for (FieldSchema fs : tbl2.getSd().getCols()) {
assertTrue(fieldSchemasFull.contains(fs));
}
for (FieldSchema fs : tbl2.getPartitionKeys()) {
assertTrue(fieldSchemasFull.contains(fs));
}
assertEquals("Use this for comments etc", tbl2.getSd().getParameters().get("test_param_1"));
assertEquals("name", tbl2.getSd().getBucketCols().get(0));
assertTrue("Partition key list is not empty", (tbl2.getPartitionKeys() == null) || (tbl2.getPartitionKeys().size() == 0));
//test get_table_objects_by_name functionality
ArrayList<String> tableNames = new ArrayList<String>();
tableNames.add(tblName2);
tableNames.add(tblName);
tableNames.add(tblName2);
List<Table> foundTables = client.getTableObjectsByName(dbName, tableNames);
assertEquals(2, foundTables.size());
for (Table t : foundTables) {
if (t.getTableName().equals(tblName2)) {
assertEquals(t.getSd().getLocation(), tbl2.getSd().getLocation());
} else {
assertEquals(t.getTableName(), tblName);
assertEquals(t.getSd().getLocation(), tbl.getSd().getLocation());
}
assertEquals(t.getSd().getCols().size(), typ1.getFields().size());
assertEquals(t.getSd().isCompressed(), false);
assertEquals(foundTables.get(0).getSd().getNumBuckets(), 1);
assertNotNull(t.getSd().getSerdeInfo());
assertEquals(t.getDbName(), dbName);
}
tableNames.add(1, "table_that_doesnt_exist");
foundTables = client.getTableObjectsByName(dbName, tableNames);
assertEquals(foundTables.size(), 2);
InvalidOperationException ioe = null;
try {
foundTables = client.getTableObjectsByName(dbName, null);
} catch (InvalidOperationException e) {
ioe = e;
}
assertNotNull(ioe);
assertTrue("Table not found", ioe.getMessage().contains("null tables"));
UnknownDBException udbe = null;
try {
foundTables = client.getTableObjectsByName("db_that_doesnt_exist", tableNames);
} catch (UnknownDBException e) {
udbe = e;
}
assertNotNull(udbe);
assertTrue("DB not found", udbe.getMessage().contains("not find database db_that_doesnt_exist"));
udbe = null;
try {
foundTables = client.getTableObjectsByName("", tableNames);
} catch (UnknownDBException e) {
udbe = e;
}
assertNotNull(udbe);
assertTrue("DB not found", udbe.getMessage().contains("is null or empty"));
FileSystem fs = FileSystem.get((new Path(tbl.getSd().getLocation())).toUri(), hiveConf);
client.dropTable(dbName, tblName);
assertFalse(fs.exists(new Path(tbl.getSd().getLocation())));
client.dropTable(dbName, tblName2);
assertTrue(fs.exists(new Path(tbl2.getSd().getLocation())));
client.dropType(typeName);
client.dropDatabase(dbName);
} catch (Exception e) {
System.err.println(StringUtils.stringifyException(e));
System.err.println("testSimpleTable() failed.");
throw e;
}
}
use of org.apache.hadoop.hive.metastore.api.UnknownDBException in project hive by apache.
the class ObjectStore method getTableObjectsByName.
@Override
public List<Table> getTableObjectsByName(String db, List<String> tbl_names) throws MetaException, UnknownDBException {
List<Table> tables = new ArrayList<Table>();
boolean committed = false;
Query dbExistsQuery = null;
Query query = null;
try {
openTransaction();
db = HiveStringUtils.normalizeIdentifier(db);
dbExistsQuery = pm.newQuery(MDatabase.class, "name == db");
dbExistsQuery.declareParameters("java.lang.String db");
dbExistsQuery.setUnique(true);
dbExistsQuery.setResult("name");
String dbNameIfExists = (String) dbExistsQuery.execute(db);
if (dbNameIfExists == null || dbNameIfExists.isEmpty()) {
throw new UnknownDBException("Could not find database " + db);
}
List<String> lowered_tbl_names = new ArrayList<String>();
for (String t : tbl_names) {
lowered_tbl_names.add(HiveStringUtils.normalizeIdentifier(t));
}
query = pm.newQuery(MTable.class);
query.setFilter("database.name == db && tbl_names.contains(tableName)");
query.declareParameters("java.lang.String db, java.util.Collection tbl_names");
Collection mtables = (Collection) query.execute(db, lowered_tbl_names);
for (Iterator iter = mtables.iterator(); iter.hasNext(); ) {
tables.add(convertToTable((MTable) iter.next()));
}
committed = commitTransaction();
} finally {
if (!committed) {
rollbackTransaction();
}
if (dbExistsQuery != null) {
dbExistsQuery.closeAll();
}
if (query != null) {
query.closeAll();
}
}
return tables;
}
use of org.apache.hadoop.hive.metastore.api.UnknownDBException in project hive by apache.
the class TestMarkPartition method testMarkingPartitionSet.
public void testMarkingPartitionSet() throws CommandNeedRetryException, MetaException, TException, NoSuchObjectException, UnknownDBException, UnknownTableException, InvalidPartitionException, UnknownPartitionException, InterruptedException {
HiveMetaStoreClient msc = new HiveMetaStoreClient(hiveConf);
driver = new Driver(hiveConf);
driver.run("drop database if exists hive2215 cascade");
driver.run("create database hive2215");
driver.run("use hive2215");
driver.run("drop table if exists tmptbl");
driver.run("create table tmptbl (a string) partitioned by (b string)");
driver.run("alter table tmptbl add partition (b='2011')");
Map<String, String> kvs = new HashMap<String, String>();
kvs.put("b", "'2011'");
msc.markPartitionForEvent("hive2215", "tmptbl", kvs, PartitionEventType.LOAD_DONE);
assert msc.isPartitionMarkedForEvent("hive2215", "tmptbl", kvs, PartitionEventType.LOAD_DONE);
Thread.sleep(10000);
assert !msc.isPartitionMarkedForEvent("hive2215", "tmptbl", kvs, PartitionEventType.LOAD_DONE);
kvs.put("b", "'2012'");
assert !msc.isPartitionMarkedForEvent("hive2215", "tmptbl", kvs, PartitionEventType.LOAD_DONE);
try {
msc.markPartitionForEvent("hive2215", "tmptbl2", kvs, PartitionEventType.LOAD_DONE);
assert false;
} catch (Exception e) {
assert e instanceof UnknownTableException;
}
try {
msc.isPartitionMarkedForEvent("hive2215", "tmptbl2", kvs, PartitionEventType.LOAD_DONE);
assert false;
} catch (Exception e) {
assert e instanceof UnknownTableException;
}
kvs.put("a", "'2012'");
try {
msc.isPartitionMarkedForEvent("hive2215", "tmptbl", kvs, PartitionEventType.LOAD_DONE);
assert false;
} catch (Exception e) {
assert e instanceof InvalidPartitionException;
}
}
Aggregations