use of org.apache.hadoop.hive.metastore.client.builder.CatalogBuilder in project hive by apache.
the class TestCatalogNonDefaultClient method getClient.
@Override
protected IMetaStoreClient getClient() throws Exception {
Configuration svrConf = new Configuration(conf);
int port = MetaStoreTestUtils.startMetaStoreWithRetry(HadoopThriftAuthBridge.getBridge(), svrConf, false, false, false, false, false);
// Only set the default catalog on the client.
MetastoreConf.setVar(conf, MetastoreConf.ConfVars.THRIFT_URIS, "thrift://localhost:" + port);
MetastoreConf.setVar(conf, MetastoreConf.ConfVars.CATALOG_DEFAULT, catName);
IMetaStoreClient client = new HiveMetaStoreClient(conf);
assert !client.isLocalMetaStore();
// Don't make any calls but catalog calls until the catalog has been created, as we just told
// the client to direct all calls to a catalog that does not yet exist.
catLocation = MetaStoreTestUtils.getTestWarehouseDir(catName);
Catalog cat = new CatalogBuilder().setName(catName).setLocation(catLocation).build();
client.createCatalog(cat);
return client;
}
use of org.apache.hadoop.hive.metastore.client.builder.CatalogBuilder in project hive by apache.
the class TestDropPartitions method otherCatalog.
@Test
@ConditionalIgnoreOnSessionHiveMetastoreClient
public void otherCatalog() throws TException {
String catName = "drop_partition_catalog";
Catalog cat = new CatalogBuilder().setName(catName).setLocation(MetaStoreTestUtils.getTestWarehouseDir(catName)).build();
client.createCatalog(cat);
String dbName = "drop_partition_database_in_other_catalog";
Database db = new DatabaseBuilder().setName(dbName).setCatalogName(catName).create(client, metaStore.getConf());
String tableName = "table_in_other_catalog";
Table table = new TableBuilder().inDb(db).setTableName(tableName).addCol("id", "int").addCol("name", "string").addPartCol("partcol", "string").create(client, metaStore.getConf());
Partition[] parts = new Partition[2];
for (int i = 0; i < parts.length; i++) {
parts[i] = new PartitionBuilder().inTable(table).addValue("a" + i).build(metaStore.getConf());
}
client.add_partitions(Arrays.asList(parts));
List<Partition> fetched = client.listPartitions(catName, dbName, tableName, (short) -1);
Assert.assertEquals(parts.length, fetched.size());
Assert.assertTrue(client.dropPartition(catName, dbName, tableName, Collections.singletonList("a0"), PartitionDropOptions.instance().ifExists(false)));
try {
client.getPartition(catName, dbName, tableName, Collections.singletonList("a0"));
Assert.fail();
} catch (NoSuchObjectException e) {
// NOP
}
Assert.assertTrue(client.dropPartition(catName, dbName, tableName, "partcol=a1", true));
try {
client.getPartition(catName, dbName, tableName, Collections.singletonList("a1"));
Assert.fail();
} catch (NoSuchObjectException e) {
// NOP
}
}
use of org.apache.hadoop.hive.metastore.client.builder.CatalogBuilder in project hive by apache.
the class TestForeignKey method setUp.
@Before
public void setUp() throws Exception {
// Get new client
client = metaStore.getClient();
// Clean up the database
client.dropDatabase(OTHER_DATABASE, true, true, true);
// Drop every table in the default database
for (String tableName : client.getAllTables(DEFAULT_DATABASE_NAME)) {
client.dropTable(DEFAULT_DATABASE_NAME, tableName, true, true, true);
}
client.dropDatabase(OTHER_CATALOG, DATABASE_IN_OTHER_CATALOG, true, true, true);
try {
client.dropCatalog(OTHER_CATALOG);
} catch (NoSuchObjectException e) {
// NOP
}
// Clean up trash
metaStore.cleanWarehouseDirs();
new DatabaseBuilder().setName(OTHER_DATABASE).create(client, metaStore.getConf());
Catalog cat = new CatalogBuilder().setName(OTHER_CATALOG).setLocation(MetaStoreTestUtils.getTestWarehouseDir(OTHER_CATALOG)).build();
client.createCatalog(cat);
// For this one don't specify a location to make sure it gets put in the catalog directory
inOtherCatalog = new DatabaseBuilder().setName(DATABASE_IN_OTHER_CATALOG).setCatalogName(OTHER_CATALOG).create(client, metaStore.getConf());
testTables[0] = new TableBuilder().setTableName("test_table_1").addCol("col1", "int").addCol("col2", "varchar(32)").create(client, metaStore.getConf());
testTables[1] = new TableBuilder().setDbName(OTHER_DATABASE).setTableName("test_table_2").addCol("col1", "int").addCol("col2", "varchar(32)").create(client, metaStore.getConf());
testTables[2] = new TableBuilder().inDb(inOtherCatalog).setTableName("test_table_3").addCol("col1", "int").addCol("col2", "varchar(32)").create(client, metaStore.getConf());
testTables[3] = new TableBuilder().inDb(inOtherCatalog).setTableName("test_table_4").addCol("col1", "int").addCol("col2", "varchar(32)").create(client, metaStore.getConf());
// Reload tables from the MetaStore
for (int i = 0; i < testTables.length; i++) {
testTables[i] = client.getTable(testTables[i].getCatName(), testTables[i].getDbName(), testTables[i].getTableName());
}
}
use of org.apache.hadoop.hive.metastore.client.builder.CatalogBuilder in project hive by apache.
the class TestListPartitions method otherCatalog.
@Test
@ConditionalIgnoreOnSessionHiveMetastoreClient
public void otherCatalog() throws TException {
String catName = "list_partition_catalog";
Catalog cat = new CatalogBuilder().setName(catName).setLocation(MetaStoreTestUtils.getTestWarehouseDir(catName)).build();
client.createCatalog(cat);
String dbName = "list_partition_database_in_other_catalog";
Database db = new DatabaseBuilder().setName(dbName).setCatalogName(catName).create(client, metaStore.getConf());
String tableName = "table_in_other_catalog";
Table table = new TableBuilder().inDb(db).setTableName(tableName).addCol("id", "int").addCol("name", "string").addPartCol("partcol", "string").create(client, metaStore.getConf());
Partition[] parts = new Partition[5];
for (int i = 0; i < parts.length; i++) {
parts[i] = new PartitionBuilder().inTable(table).addValue("a" + i).build(metaStore.getConf());
}
client.add_partitions(Arrays.asList(parts));
List<Partition> fetched = client.listPartitions(catName, dbName, tableName, -1);
Assert.assertEquals(parts.length, fetched.size());
Assert.assertEquals(catName, fetched.get(0).getCatName());
fetched = client.listPartitions(catName, dbName, tableName, Collections.singletonList("a0"), -1);
Assert.assertEquals(1, fetched.size());
Assert.assertEquals(catName, fetched.get(0).getCatName());
PartitionSpecProxy proxy = client.listPartitionSpecs(catName, dbName, tableName, -1);
Assert.assertEquals(parts.length, proxy.size());
Assert.assertEquals(catName, proxy.getCatName());
fetched = client.listPartitionsByFilter(catName, dbName, tableName, "partcol=\"a0\"", -1);
Assert.assertEquals(1, fetched.size());
Assert.assertEquals(catName, fetched.get(0).getCatName());
proxy = client.listPartitionSpecsByFilter(catName, dbName, tableName, "partcol=\"a0\"", -1);
Assert.assertEquals(1, proxy.size());
Assert.assertEquals(catName, proxy.getCatName());
Assert.assertEquals(1, client.getNumPartitionsByFilter(catName, dbName, tableName, "partcol=\"a0\""));
List<String> names = client.listPartitionNames(catName, dbName, tableName, 57);
Assert.assertEquals(parts.length, names.size());
names = client.listPartitionNames(catName, dbName, tableName, Collections.singletonList("a0"), Short.MAX_VALUE + 1);
Assert.assertEquals(1, names.size());
PartitionValuesRequest rqst = new PartitionValuesRequest(dbName, tableName, Lists.newArrayList(new FieldSchema("partcol", "string", "")));
rqst.setCatName(catName);
PartitionValuesResponse rsp = client.listPartitionValues(rqst);
Assert.assertEquals(5, rsp.getPartitionValuesSize());
}
use of org.apache.hadoop.hive.metastore.client.builder.CatalogBuilder in project hive by apache.
the class TestTablesGetExists method otherCatalog.
// Tests for getTable in other catalogs are covered in TestTablesCreateDropAlterTruncate.
@Test
public void otherCatalog() throws TException {
String catName = "get_exists_tables_in_other_catalogs";
Catalog cat = new CatalogBuilder().setName(catName).setLocation(MetaStoreTestUtils.getTestWarehouseDir(catName)).build();
client.createCatalog(cat);
String dbName = "db_in_other_catalog";
// For this one don't specify a location to make sure it gets put in the catalog directory
Database db = new DatabaseBuilder().setName(dbName).setCatalogName(catName).create(client, metaStore.getConf());
String[] tableNames = new String[4];
for (int i = 0; i < tableNames.length; i++) {
tableNames[i] = "table_in_other_catalog_" + i;
new TableBuilder().inDb(db).setTableName(tableNames[i]).addCol("col1_" + i, ColumnType.STRING_TYPE_NAME).addCol("col2_" + i, ColumnType.INT_TYPE_NAME).create(client, metaStore.getConf());
}
Set<String> tables = new HashSet<>(client.getTables(catName, dbName, "*e_in_other_*"));
Assert.assertEquals(4, tables.size());
for (String tableName : tableNames) Assert.assertTrue(tables.contains(tableName));
List<String> fetchedNames = client.getTables(catName, dbName, "*_3");
Assert.assertEquals(1, fetchedNames.size());
Assert.assertEquals(tableNames[3], fetchedNames.get(0));
Assert.assertTrue("Table exists", client.tableExists(catName, dbName, tableNames[0]));
Assert.assertFalse("Table not exists", client.tableExists(catName, dbName, "non_existing_table"));
}
Aggregations