use of org.apache.hadoop.hive.metastore.api.Database in project hive by apache.
the class TestHiveMetaStore method checkDbOwnerType.
private void checkDbOwnerType(String dbName, String ownerName, PrincipalType ownerType) throws TException {
Database db = client.getDatabase(dbName);
assertEquals("Owner name", ownerName, db.getOwnerName());
assertEquals("Owner type", ownerType, db.getOwnerType());
}
use of org.apache.hadoop.hive.metastore.api.Database in project hive by apache.
the class TestHiveMetaStore method testSimpleTable.
@Test
public void testSimpleTable() throws Exception {
try {
String dbName = "simpdb";
String tblName = "simptbl";
String tblName2 = "simptbl2";
String typeName = "Person";
client.dropTable(dbName, tblName);
silentDropDatabase(dbName);
Database db = new Database();
db.setName(dbName);
client.createDatabase(db);
client.dropType(typeName);
Type typ1 = new Type();
typ1.setName(typeName);
typ1.setFields(new ArrayList<>(2));
typ1.getFields().add(new FieldSchema("name", ColumnType.STRING_TYPE_NAME, ""));
typ1.getFields().add(new FieldSchema("income", ColumnType.INT_TYPE_NAME, ""));
client.createType(typ1);
Table tbl = new TableBuilder().setDbName(dbName).setTableName(tblName).setCols(typ1.getFields()).setNumBuckets(1).addBucketCol("name").addStorageDescriptorParam("test_param_1", "Use this for comments etc").build();
client.createTable(tbl);
if (isThriftClient) {
// the createTable() above does not update the location in the 'tbl'
// object when the client is a thrift client and the code below relies
// on the location being present in the 'tbl' object - so get the table
// from the metastore
tbl = client.getTable(dbName, tblName);
}
Table tbl2 = client.getTable(dbName, tblName);
assertNotNull(tbl2);
assertEquals(tbl2.getDbName(), dbName);
assertEquals(tbl2.getTableName(), tblName);
assertEquals(tbl2.getSd().getCols().size(), typ1.getFields().size());
assertEquals(tbl2.getSd().isCompressed(), false);
assertEquals(tbl2.getSd().getNumBuckets(), 1);
assertEquals(tbl2.getSd().getLocation(), tbl.getSd().getLocation());
assertNotNull(tbl2.getSd().getSerdeInfo());
tbl.getSd().getSerdeInfo().setParameters(new HashMap<>());
tbl.getSd().getSerdeInfo().getParameters().put(ColumnType.SERIALIZATION_FORMAT, "1");
tbl2.setTableName(tblName2);
tbl2.setParameters(new HashMap<>());
tbl2.getParameters().put("EXTERNAL", "TRUE");
tbl2.getSd().setLocation(tbl.getSd().getLocation() + "-2");
List<FieldSchema> fieldSchemas = client.getFields(dbName, tblName);
assertNotNull(fieldSchemas);
assertEquals(fieldSchemas.size(), tbl.getSd().getCols().size());
for (FieldSchema fs : tbl.getSd().getCols()) {
assertTrue(fieldSchemas.contains(fs));
}
List<FieldSchema> fieldSchemasFull = client.getSchema(dbName, tblName);
assertNotNull(fieldSchemasFull);
assertEquals(fieldSchemasFull.size(), tbl.getSd().getCols().size() + tbl.getPartitionKeys().size());
for (FieldSchema fs : tbl.getSd().getCols()) {
assertTrue(fieldSchemasFull.contains(fs));
}
for (FieldSchema fs : tbl.getPartitionKeys()) {
assertTrue(fieldSchemasFull.contains(fs));
}
client.createTable(tbl2);
if (isThriftClient) {
tbl2 = client.getTable(tbl2.getDbName(), tbl2.getTableName());
}
Table tbl3 = client.getTable(dbName, tblName2);
assertNotNull(tbl3);
assertEquals(tbl3.getDbName(), dbName);
assertEquals(tbl3.getTableName(), tblName2);
assertEquals(tbl3.getSd().getCols().size(), typ1.getFields().size());
assertEquals(tbl3.getSd().isCompressed(), false);
assertEquals(tbl3.getSd().getNumBuckets(), 1);
assertEquals(tbl3.getSd().getLocation(), tbl2.getSd().getLocation());
assertEquals(tbl3.getParameters(), tbl2.getParameters());
fieldSchemas = client.getFields(dbName, tblName2);
assertNotNull(fieldSchemas);
assertEquals(fieldSchemas.size(), tbl2.getSd().getCols().size());
for (FieldSchema fs : tbl2.getSd().getCols()) {
assertTrue(fieldSchemas.contains(fs));
}
fieldSchemasFull = client.getSchema(dbName, tblName2);
assertNotNull(fieldSchemasFull);
assertEquals(fieldSchemasFull.size(), tbl2.getSd().getCols().size() + tbl2.getPartitionKeys().size());
for (FieldSchema fs : tbl2.getSd().getCols()) {
assertTrue(fieldSchemasFull.contains(fs));
}
for (FieldSchema fs : tbl2.getPartitionKeys()) {
assertTrue(fieldSchemasFull.contains(fs));
}
assertEquals("Use this for comments etc", tbl2.getSd().getParameters().get("test_param_1"));
assertEquals("name", tbl2.getSd().getBucketCols().get(0));
assertTrue("Partition key list is not empty", (tbl2.getPartitionKeys() == null) || (tbl2.getPartitionKeys().size() == 0));
// test get_table_objects_by_name functionality
ArrayList<String> tableNames = new ArrayList<>();
tableNames.add(tblName2);
tableNames.add(tblName);
tableNames.add(tblName2);
List<Table> foundTables = client.getTableObjectsByName(dbName, tableNames);
assertEquals(2, foundTables.size());
for (Table t : foundTables) {
if (t.getTableName().equals(tblName2)) {
assertEquals(t.getSd().getLocation(), tbl2.getSd().getLocation());
} else {
assertEquals(t.getTableName(), tblName);
assertEquals(t.getSd().getLocation(), tbl.getSd().getLocation());
}
assertEquals(t.getSd().getCols().size(), typ1.getFields().size());
assertEquals(t.getSd().isCompressed(), false);
assertEquals(foundTables.get(0).getSd().getNumBuckets(), 1);
assertNotNull(t.getSd().getSerdeInfo());
assertEquals(t.getDbName(), dbName);
}
tableNames.add(1, "table_that_doesnt_exist");
foundTables = client.getTableObjectsByName(dbName, tableNames);
assertEquals(foundTables.size(), 2);
InvalidOperationException ioe = null;
try {
foundTables = client.getTableObjectsByName(dbName, null);
} catch (InvalidOperationException e) {
ioe = e;
}
assertNotNull(ioe);
assertTrue("Table not found", ioe.getMessage().contains("null tables"));
UnknownDBException udbe = null;
try {
foundTables = client.getTableObjectsByName("db_that_doesnt_exist", tableNames);
} catch (UnknownDBException e) {
udbe = e;
}
assertNotNull(udbe);
assertTrue("DB not found", udbe.getMessage().contains("not find database db_that_doesnt_exist"));
udbe = null;
try {
foundTables = client.getTableObjectsByName("", tableNames);
} catch (UnknownDBException e) {
udbe = e;
}
assertNotNull(udbe);
assertTrue("DB not found", udbe.getMessage().contains("is null or empty"));
FileSystem fs = FileSystem.get((new Path(tbl.getSd().getLocation())).toUri(), conf);
client.dropTable(dbName, tblName);
assertFalse(fs.exists(new Path(tbl.getSd().getLocation())));
client.dropTable(dbName, tblName2);
assertTrue(fs.exists(new Path(tbl2.getSd().getLocation())));
client.dropType(typeName);
client.dropDatabase(dbName);
} catch (Exception e) {
System.err.println(StringUtils.stringifyException(e));
System.err.println("testSimpleTable() failed.");
throw e;
}
}
use of org.apache.hadoop.hive.metastore.api.Database in project hive by apache.
the class TestHiveMetaStore method testAlterViewParititon.
@Test
public void testAlterViewParititon() throws Throwable {
String dbName = "compdb";
String tblName = "comptbl";
String viewName = "compView";
client.dropTable(dbName, tblName);
silentDropDatabase(dbName);
Database db = new Database();
db.setName(dbName);
db.setDescription("Alter Partition Test database");
client.createDatabase(db);
Table tbl = new TableBuilder().setDbName(dbName).setTableName(tblName).addCol("name", ColumnType.STRING_TYPE_NAME).addCol("income", ColumnType.INT_TYPE_NAME).build();
client.createTable(tbl);
if (isThriftClient) {
// the createTable() above does not update the location in the 'tbl'
// object when the client is a thrift client and the code below relies
// on the location being present in the 'tbl' object - so get the table
// from the metastore
tbl = client.getTable(dbName, tblName);
}
ArrayList<FieldSchema> viewCols = new ArrayList<>(1);
viewCols.add(new FieldSchema("income", ColumnType.INT_TYPE_NAME, ""));
ArrayList<FieldSchema> viewPartitionCols = new ArrayList<>(1);
viewPartitionCols.add(new FieldSchema("name", ColumnType.STRING_TYPE_NAME, ""));
Table view = new Table();
view.setDbName(dbName);
view.setTableName(viewName);
view.setTableType(TableType.VIRTUAL_VIEW.name());
view.setPartitionKeys(viewPartitionCols);
view.setViewOriginalText("SELECT income, name FROM " + tblName);
view.setViewExpandedText("SELECT `" + tblName + "`.`income`, `" + tblName + "`.`name` FROM `" + dbName + "`.`" + tblName + "`");
view.setRewriteEnabled(false);
StorageDescriptor viewSd = new StorageDescriptor();
view.setSd(viewSd);
viewSd.setCols(viewCols);
viewSd.setCompressed(false);
viewSd.setParameters(new HashMap<>());
viewSd.setSerdeInfo(new SerDeInfo());
viewSd.getSerdeInfo().setParameters(new HashMap<>());
client.createTable(view);
if (isThriftClient) {
// the createTable() above does not update the location in the 'tbl'
// object when the client is a thrift client and the code below relies
// on the location being present in the 'tbl' object - so get the table
// from the metastore
view = client.getTable(dbName, viewName);
}
List<String> vals = new ArrayList<>(1);
vals.add("abc");
Partition part = new Partition();
part.setDbName(dbName);
part.setTableName(viewName);
part.setValues(vals);
part.setParameters(new HashMap<>());
client.add_partition(part);
Partition part2 = client.getPartition(dbName, viewName, part.getValues());
part2.getParameters().put("a", "b");
client.alter_partition(dbName, viewName, part2, null);
Partition part3 = client.getPartition(dbName, viewName, part.getValues());
assertEquals("couldn't view alter partition", part3.getParameters().get("a"), "b");
client.dropTable(dbName, viewName);
client.dropTable(dbName, tblName);
client.dropDatabase(dbName);
}
use of org.apache.hadoop.hive.metastore.api.Database in project hive by apache.
the class TestHiveMetaStore method testValidateTableCols.
@Test
public void testValidateTableCols() throws Throwable {
try {
String dbName = "compdb";
String tblName = "comptbl";
client.dropTable(dbName, tblName);
silentDropDatabase(dbName);
Database db = new Database();
db.setName(dbName);
db.setDescription("Validate Table Columns test");
client.createDatabase(db);
Table tbl = new TableBuilder().setDbName(dbName).setTableName(tblName).addCol("name", ColumnType.STRING_TYPE_NAME).addCol("income", ColumnType.INT_TYPE_NAME).build();
client.createTable(tbl);
if (isThriftClient) {
tbl = client.getTable(dbName, tblName);
}
List<String> expectedCols = Lists.newArrayList();
expectedCols.add("name");
ObjectStore objStore = new ObjectStore();
try {
objStore.validateTableCols(tbl, expectedCols);
} catch (MetaException ex) {
throw new RuntimeException(ex);
}
expectedCols.add("doesntExist");
boolean exceptionFound = false;
try {
objStore.validateTableCols(tbl, expectedCols);
} catch (MetaException ex) {
assertEquals(ex.getMessage(), "Column doesntExist doesn't exist in table comptbl in database compdb");
exceptionFound = true;
}
assertTrue(exceptionFound);
} catch (Exception e) {
System.err.println(StringUtils.stringifyException(e));
System.err.println("testValidateTableCols() failed.");
throw e;
}
}
use of org.apache.hadoop.hive.metastore.api.Database in project hive by apache.
the class TestHiveMetaStore method testDBOwner.
@Test
public void testDBOwner() throws TException {
Database db = client.getDatabase(Warehouse.DEFAULT_DATABASE_NAME);
assertEquals(db.getOwnerName(), HiveMetaStore.PUBLIC);
assertEquals(db.getOwnerType(), PrincipalType.ROLE);
}
Aggregations