use of org.apache.hadoop.hive.metastore.api.Catalog in project hive by apache.
the class TestPartitionManagement method createMetadata.
private List<String> createMetadata(String catName, String dbName, String tableName, List<String> partKeys, List<String> partKeyTypes, List<List<String>> partVals, Map<String, Column> colMap, boolean isOrc) throws TException {
if (!DEFAULT_CATALOG_NAME.equals(catName)) {
Catalog cat = new CatalogBuilder().setName(catName).setLocation(MetaStoreTestUtils.getTestWarehouseDir(catName)).build();
client.createCatalog(cat);
}
Database db;
if (!DEFAULT_DATABASE_NAME.equals(dbName)) {
DatabaseBuilder dbBuilder = new DatabaseBuilder().setName(dbName);
dbBuilder.setCatalogName(catName);
db = dbBuilder.create(client, conf);
} else {
db = client.getDatabase(DEFAULT_CATALOG_NAME, DEFAULT_DATABASE_NAME);
}
TableBuilder tb = new TableBuilder().inDb(db).setTableName(tableName);
if (isOrc) {
tb.setInputFormat("org.apache.hadoop.hive.ql.io.orc.OrcInputFormat").setOutputFormat("org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat");
}
for (Column col : colMap.values()) {
tb.addCol(col.colName, col.colType);
}
if (partKeys != null) {
if (partKeyTypes == null) {
throw new IllegalArgumentException("partKeyTypes cannot be null when partKeys is non-null");
}
if (partKeys.size() != partKeyTypes.size()) {
throw new IllegalArgumentException("partKeys and partKeyTypes size should be same");
}
if (partVals.isEmpty()) {
throw new IllegalArgumentException("partVals cannot be empty for patitioned table");
}
for (int i = 0; i < partKeys.size(); i++) {
tb.addPartCol(partKeys.get(i), partKeyTypes.get(i));
}
}
Table table = tb.create(client, conf);
if (partKeys != null) {
for (List<String> partVal : partVals) {
new PartitionBuilder().inTable(table).setValues(partVal).addToTable(client, conf);
}
}
List<String> partNames = new ArrayList<>();
if (partKeys != null) {
for (int i = 0; i < partKeys.size(); i++) {
String partKey = partKeys.get(i);
for (String partVal : partVals.get(i)) {
String partName = partKey + "=" + partVal;
partNames.add(partName);
}
}
}
client.flushCache();
return partNames;
}
use of org.apache.hadoop.hive.metastore.api.Catalog in project hive by apache.
the class TestStats method createMetadata.
private List<String> createMetadata(String catName, String dbName, String tableName, String partKey, List<String> partVals, Map<String, Column> colMap) throws TException {
if (!DEFAULT_CATALOG_NAME.equals(catName) && !NO_CAT.equals(catName)) {
Catalog cat = new CatalogBuilder().setName(catName).setLocation(MetaStoreTestUtils.getTestWarehouseDir(catName)).build();
client.createCatalog(cat);
}
Database db;
if (!DEFAULT_DATABASE_NAME.equals(dbName)) {
DatabaseBuilder dbBuilder = new DatabaseBuilder().setName(dbName);
if (!NO_CAT.equals(catName))
dbBuilder.setCatalogName(catName);
db = dbBuilder.create(client, conf);
} else {
db = client.getDatabase(DEFAULT_CATALOG_NAME, DEFAULT_DATABASE_NAME);
}
TableBuilder tb = new TableBuilder().inDb(db).setTableName(tableName);
for (Column col : colMap.values()) tb.addCol(col.colName, col.colType);
if (partKey != null) {
assert partVals != null && !partVals.isEmpty() : "Must provide partition values for partitioned table";
tb.addPartCol(partKey, ColumnType.STRING_TYPE_NAME);
}
Table table = tb.create(client, conf);
if (partKey != null) {
for (String partVal : partVals) {
new PartitionBuilder().inTable(table).addValue(partVal).addToTable(client, conf);
}
}
SetPartitionsStatsRequest rqst = new SetPartitionsStatsRequest();
List<String> partNames = new ArrayList<>();
if (partKey == null) {
rqst.addToColStats(buildStatsForOneTableOrPartition(catName, dbName, tableName, null, colMap.values()));
} else {
for (String partVal : partVals) {
String partName = partKey + "=" + partVal;
rqst.addToColStats(buildStatsForOneTableOrPartition(catName, dbName, tableName, partName, colMap.values()));
partNames.add(partName);
}
}
rqst.setEngine(ENGINE);
client.setPartitionColumnStatistics(rqst);
return partNames;
}
use of org.apache.hadoop.hive.metastore.api.Catalog in project hive by apache.
the class TestCatalogs method dropNonEmptyCatalog.
@Test(expected = InvalidOperationException.class)
public void dropNonEmptyCatalog() throws TException {
String catName = "toBeDropped";
Catalog cat = new CatalogBuilder().setName(catName).setLocation(MetaStoreTestUtils.getTestWarehouseDir(catName)).build();
client.createCatalog(cat);
String dbName = "dontDropMe";
new DatabaseBuilder().setName(dbName).setCatalogName(catName).create(client, metaStore.getConf());
client.dropCatalog(catName);
}
use of org.apache.hadoop.hive.metastore.api.Catalog in project hive by apache.
the class TestCatalogs method alterNonExistentCatalog.
@Test(expected = NoSuchObjectException.class)
public void alterNonExistentCatalog() throws TException {
String catName = "alter_no_such_catalog";
Catalog cat = new CatalogBuilder().setName(catName).setLocation(MetaStoreTestUtils.getTestWarehouseDir(catName)).build();
client.alterCatalog(catName, cat);
}
use of org.apache.hadoop.hive.metastore.api.Catalog in project hive by apache.
the class TestDatabases method databasesInCatalogs.
@Test
public void databasesInCatalogs() throws TException, URISyntaxException {
String catName = "mycatalog";
Catalog cat = new CatalogBuilder().setName(catName).setLocation(MetaStoreTestUtils.getTestWarehouseDir(catName)).build();
client.createCatalog(cat);
String[] dbNames = { "db1", "db9" };
Database[] dbs = new Database[2];
// For this one don't specify a location to make sure it gets put in the catalog directory
dbs[0] = new DatabaseBuilder().setName(dbNames[0]).setCatalogName(catName).create(client, metaStore.getConf());
// For the second one, explicitly set a location to make sure it ends up in the specified place.
String db1Location = MetaStoreTestUtils.getTestWarehouseDir(dbNames[1]);
dbs[1] = new DatabaseBuilder().setName(dbNames[1]).setCatalogName(catName).setLocation(db1Location).create(client, metaStore.getConf());
Database fetched = client.getDatabase(catName, dbNames[0]);
String expectedLocation = new File(cat.getLocationUri(), dbNames[0] + ".db").toURI().toString();
Assert.assertEquals(expectedLocation, fetched.getLocationUri() + "/");
String db0Location = new URI(fetched.getLocationUri()).getPath();
File dir = new File(db0Location);
Assert.assertTrue(dir.exists() && dir.isDirectory());
fetched = client.getDatabase(catName, dbNames[1]);
Assert.assertEquals(new File(db1Location).toURI().toString(), fetched.getLocationUri() + "/");
dir = new File(new URI(fetched.getLocationUri()).getPath());
Assert.assertTrue(dir.exists() && dir.isDirectory());
Set<String> fetchedDbs = new HashSet<>(client.getAllDatabases(catName));
Assert.assertEquals(3, fetchedDbs.size());
for (String dbName : dbNames) Assert.assertTrue(fetchedDbs.contains(dbName));
fetchedDbs = new HashSet<>(client.getAllDatabases());
Assert.assertEquals(5, fetchedDbs.size());
Assert.assertTrue(fetchedDbs.contains(Warehouse.DEFAULT_DATABASE_NAME));
// Intentionally using the deprecated method to make sure it returns correct results.
fetchedDbs = new HashSet<>(client.getAllDatabases());
Assert.assertEquals(5, fetchedDbs.size());
Assert.assertTrue(fetchedDbs.contains(Warehouse.DEFAULT_DATABASE_NAME));
fetchedDbs = new HashSet<>(client.getDatabases(catName, "d*"));
Assert.assertEquals(3, fetchedDbs.size());
for (String dbName : dbNames) Assert.assertTrue(fetchedDbs.contains(dbName));
fetchedDbs = new HashSet<>(client.getDatabases("d*"));
Assert.assertEquals(1, fetchedDbs.size());
Assert.assertTrue(fetchedDbs.contains(Warehouse.DEFAULT_DATABASE_NAME));
// Intentionally using the deprecated method to make sure it returns correct results.
fetchedDbs = new HashSet<>(client.getDatabases("d*"));
Assert.assertEquals(1, fetchedDbs.size());
Assert.assertTrue(fetchedDbs.contains(Warehouse.DEFAULT_DATABASE_NAME));
fetchedDbs = new HashSet<>(client.getDatabases(catName, "*1"));
Assert.assertEquals(1, fetchedDbs.size());
Assert.assertTrue(fetchedDbs.contains(dbNames[0]));
fetchedDbs = new HashSet<>(client.getDatabases("*9"));
Assert.assertEquals(0, fetchedDbs.size());
// Intentionally using the deprecated method to make sure it returns correct results.
fetchedDbs = new HashSet<>(client.getDatabases("*9"));
Assert.assertEquals(0, fetchedDbs.size());
fetchedDbs = new HashSet<>(client.getDatabases(catName, "*x"));
Assert.assertEquals(0, fetchedDbs.size());
// Check that dropping database from wrong catalog fails
try {
client.dropDatabase(dbNames[0], true, false, false);
Assert.fail();
} catch (NoSuchObjectException e) {
// NOP
}
// Check that dropping database from wrong catalog fails
try {
// Intentionally using deprecated method
client.dropDatabase(dbNames[0], true, false, false);
Assert.fail();
} catch (NoSuchObjectException e) {
// NOP
}
// Drop them from the proper catalog
client.dropDatabase(catName, dbNames[0], true, false, false);
dir = new File(db0Location);
Assert.assertFalse(dir.exists());
client.dropDatabase(catName, dbNames[1], true, false, false);
dir = new File(db1Location);
Assert.assertFalse(dir.exists());
fetchedDbs = new HashSet<>(client.getAllDatabases(catName));
Assert.assertEquals(1, fetchedDbs.size());
}
Aggregations