use of org.apache.hadoop.hive.metastore.client.builder.CatalogBuilder in project hive by apache.
the class TestCatalogs method catalogOperations.
@Test
public void catalogOperations() throws TException {
String[] catNames = { "cat1", "cat2", "ADifferentName" };
String[] description = { "a description", "super descriptive", null };
String[] location = { MetaStoreTestUtils.getTestWarehouseDir("cat1"), MetaStoreTestUtils.getTestWarehouseDir("cat2"), MetaStoreTestUtils.getTestWarehouseDir("different") };
for (int i = 0; i < catNames.length; i++) {
Catalog cat = new CatalogBuilder().setName(catNames[i]).setLocation(location[i]).setDescription(description[i]).build();
client.createCatalog(cat);
File dir = new File(cat.getLocationUri());
Assert.assertTrue(dir.exists() && dir.isDirectory());
}
for (int i = 0; i < catNames.length; i++) {
Catalog cat = client.getCatalog(catNames[i]);
Assert.assertTrue(catNames[i].equalsIgnoreCase(cat.getName()));
Assert.assertEquals(description[i], cat.getDescription());
Assert.assertEquals(location[i], cat.getLocationUri());
Assert.assertTrue("Create time of catalog should be set", cat.isSetCreateTime());
Assert.assertTrue("Create time of catalog should be non-zero", cat.getCreateTime() > 0);
File dir = new File(cat.getLocationUri());
Assert.assertTrue(dir.exists() && dir.isDirectory());
// Make sure there's a default database associated with each catalog
Database db = client.getDatabase(catNames[i], DEFAULT_DATABASE_NAME);
Assert.assertEquals("file:" + cat.getLocationUri(), db.getLocationUri());
}
List<String> catalogs = client.getCatalogs();
Assert.assertEquals(4, catalogs.size());
catalogs.sort(Comparator.naturalOrder());
List<String> expected = new ArrayList<>(catNames.length + 1);
expected.add(Warehouse.DEFAULT_CATALOG_NAME);
expected.addAll(Arrays.asList(catNames));
expected.sort(Comparator.naturalOrder());
for (int i = 0; i < catalogs.size(); i++) {
Assert.assertTrue("Expected " + expected.get(i) + " actual " + catalogs.get(i), catalogs.get(i).equalsIgnoreCase(expected.get(i)));
}
// Update catalogs
// Update location
Catalog newCat = new Catalog(client.getCatalog(catNames[0]));
String newLocation = MetaStoreTestUtils.getTestWarehouseDir("a_different_location");
newCat.setLocationUri(newLocation);
client.alterCatalog(catNames[0], newCat);
Catalog fetchedNewCat = client.getCatalog(catNames[0]);
Assert.assertEquals(newLocation, fetchedNewCat.getLocationUri());
Assert.assertEquals(description[0], fetchedNewCat.getDescription());
// Update description
newCat = new Catalog(client.getCatalog(catNames[1]));
String newDescription = "an even more descriptive description";
newCat.setDescription(newDescription);
client.alterCatalog(catNames[1], newCat);
fetchedNewCat = client.getCatalog(catNames[1]);
Assert.assertEquals(location[1], fetchedNewCat.getLocationUri());
Assert.assertEquals(newDescription, fetchedNewCat.getDescription());
for (int i = 0; i < catNames.length; i++) {
client.dropCatalog(catNames[i]);
File dir = new File(location[i]);
Assert.assertFalse(dir.exists());
}
catalogs = client.getCatalogs();
Assert.assertEquals(1, catalogs.size());
Assert.assertTrue(catalogs.get(0).equalsIgnoreCase(Warehouse.DEFAULT_CATALOG_NAME));
}
use of org.apache.hadoop.hive.metastore.client.builder.CatalogBuilder in project hive by apache.
the class TestCatalogs method alterChangeName.
@Test(expected = InvalidOperationException.class)
public void alterChangeName() throws TException {
String catName = "alter_change_name";
String location = MetaStoreTestUtils.getTestWarehouseDir(catName);
String description = "I have a bad feeling about this";
new CatalogBuilder().setName(catName).setLocation(location).setDescription(description).create(client);
Catalog newCat = client.getCatalog(catName);
newCat.setName("you_may_call_me_tim");
client.alterCatalog(catName, newCat);
}
use of org.apache.hadoop.hive.metastore.client.builder.CatalogBuilder in project hive by apache.
the class TestGetPartitions method otherCatalog.
@Test
@ConditionalIgnoreOnSessionHiveMetastoreClient
public void otherCatalog() throws TException {
String catName = "get_partition_catalog";
Catalog cat = new CatalogBuilder().setName(catName).setLocation(MetaStoreTestUtils.getTestWarehouseDir(catName)).build();
client.createCatalog(cat);
String dbName = "get_partition_database_in_other_catalog";
Database db = new DatabaseBuilder().setName(dbName).setCatalogName(catName).create(client, metaStore.getConf());
String tableName = "table_in_other_catalog";
Table table = new TableBuilder().inDb(db).setTableName(tableName).addCol("id", "int").addCol("name", "string").addPartCol("partcol", "string").addTableParam("PARTITION_LEVEL_PRIVILEGE", "true").create(client, metaStore.getConf());
Partition[] parts = new Partition[5];
for (int i = 0; i < parts.length; i++) {
parts[i] = new PartitionBuilder().inTable(table).addValue("a" + i).build(metaStore.getConf());
}
client.add_partitions(Arrays.asList(parts));
Partition fetched = client.getPartition(catName, dbName, tableName, Collections.singletonList("a0"));
Assert.assertEquals(catName, fetched.getCatName());
Assert.assertEquals("a0", fetched.getValues().get(0));
fetched = client.getPartition(catName, dbName, tableName, "partcol=a0");
Assert.assertEquals(catName, fetched.getCatName());
Assert.assertEquals("a0", fetched.getValues().get(0));
List<Partition> fetchedParts = client.getPartitionsByNames(catName, dbName, tableName, Arrays.asList("partcol=a0", "partcol=a1"));
Assert.assertEquals(2, fetchedParts.size());
Set<String> vals = new HashSet<>(fetchedParts.size());
for (Partition part : fetchedParts) {
vals.add(part.getValues().get(0));
}
Assert.assertTrue(vals.contains("a0"));
Assert.assertTrue(vals.contains("a1"));
}
use of org.apache.hadoop.hive.metastore.client.builder.CatalogBuilder in project hive by apache.
the class TestNotNullConstraint method setUp.
@Before
public void setUp() throws Exception {
// Get new client
client = metaStore.getClient();
// Clean up the database
client.dropDatabase(OTHER_DATABASE, true, true, true);
// Drop every table in the default database
for (String tableName : client.getAllTables(DEFAULT_DATABASE_NAME)) {
client.dropTable(DEFAULT_DATABASE_NAME, tableName, true, true, true);
}
client.dropDatabase(OTHER_CATALOG, DATABASE_IN_OTHER_CATALOG, true, true, true);
try {
client.dropCatalog(OTHER_CATALOG);
} catch (NoSuchObjectException e) {
// NOP
}
// Clean up trash
metaStore.cleanWarehouseDirs();
new DatabaseBuilder().setName(OTHER_DATABASE).create(client, metaStore.getConf());
Catalog cat = new CatalogBuilder().setName(OTHER_CATALOG).setLocation(MetaStoreTestUtils.getTestWarehouseDir(OTHER_CATALOG)).build();
client.createCatalog(cat);
// For this one don't specify a location to make sure it gets put in the catalog directory
inOtherCatalog = new DatabaseBuilder().setName(DATABASE_IN_OTHER_CATALOG).setCatalogName(OTHER_CATALOG).create(client, metaStore.getConf());
testTables[0] = new TableBuilder().setTableName("test_table_1").addCol("col1", "int").addCol("col2", "varchar(32)").create(client, metaStore.getConf());
testTables[1] = new TableBuilder().setDbName(OTHER_DATABASE).setTableName("test_table_2").addCol("col1", "int").addCol("col2", "varchar(32)").create(client, metaStore.getConf());
testTables[2] = new TableBuilder().inDb(inOtherCatalog).setTableName("test_table_3").addCol("col1", "int").addCol("col2", "varchar(32)").create(client, metaStore.getConf());
// Reload tables from the MetaStore
for (int i = 0; i < testTables.length; i++) {
testTables[i] = client.getTable(testTables[i].getCatName(), testTables[i].getDbName(), testTables[i].getTableName());
}
}
use of org.apache.hadoop.hive.metastore.client.builder.CatalogBuilder in project hive by apache.
the class TestUniqueConstraint method setUp.
@Before
public void setUp() throws Exception {
// Get new client
client = metaStore.getClient();
// Clean up the database
client.dropDatabase(OTHER_DATABASE, true, true, true);
// Drop every table in the default database
for (String tableName : client.getAllTables(DEFAULT_DATABASE_NAME)) {
client.dropTable(DEFAULT_DATABASE_NAME, tableName, true, true, true);
}
client.dropDatabase(OTHER_CATALOG, DATABASE_IN_OTHER_CATALOG, true, true, true);
try {
client.dropCatalog(OTHER_CATALOG);
} catch (NoSuchObjectException e) {
// NOP
}
// Clean up trash
metaStore.cleanWarehouseDirs();
new DatabaseBuilder().setName(OTHER_DATABASE).create(client, metaStore.getConf());
Catalog cat = new CatalogBuilder().setName(OTHER_CATALOG).setLocation(MetaStoreTestUtils.getTestWarehouseDir(OTHER_CATALOG)).build();
client.createCatalog(cat);
// For this one don't specify a location to make sure it gets put in the catalog directory
inOtherCatalog = new DatabaseBuilder().setName(DATABASE_IN_OTHER_CATALOG).setCatalogName(OTHER_CATALOG).create(client, metaStore.getConf());
testTables[0] = new TableBuilder().setTableName("test_table_1").addCol("col1", "int").addCol("col2", "varchar(32)").create(client, metaStore.getConf());
testTables[1] = new TableBuilder().setDbName(OTHER_DATABASE).setTableName("test_table_2").addCol("col1", "int").addCol("col2", "varchar(32)").create(client, metaStore.getConf());
testTables[2] = new TableBuilder().inDb(inOtherCatalog).setTableName("test_table_3").addCol("col1", "int").addCol("col2", "varchar(32)").create(client, metaStore.getConf());
// Reload tables from the MetaStore
for (int i = 0; i < testTables.length; i++) {
testTables[i] = client.getTable(testTables[i].getCatName(), testTables[i].getDbName(), testTables[i].getTableName());
}
}
Aggregations