use of org.apache.hadoop.hive.metastore.api.Catalog in project hive by apache.
the class TestReplicationScenariosExternalTables method differentCatalogIncrementalReplication.
@Test
public void differentCatalogIncrementalReplication() throws Throwable {
// Create the catalog
Catalog catalog = new Catalog();
catalog.setName("spark");
Warehouse wh = new Warehouse(conf);
catalog.setLocationUri(wh.getWhRootExternal().toString() + File.separator + catalog);
catalog.setDescription("Non-hive catalog");
Hive.get(primary.hiveConf).getMSC().createCatalog(catalog);
// Create database and table in spark catalog
String sparkDbName = "src_spark";
Database sparkdb = new Database();
sparkdb.setCatalogName("spark");
sparkdb.setName(sparkDbName);
Hive.get(primary.hiveConf).getMSC().createDatabase(sparkdb);
SerDeInfo serdeInfo = new SerDeInfo("LBCSerDe", LazyBinaryColumnarSerDe.class.getCanonicalName(), new HashMap<String, String>());
ArrayList<FieldSchema> cols = new ArrayList<FieldSchema>(1);
cols.add(new FieldSchema("place", serdeConstants.STRING_TYPE_NAME, ""));
StorageDescriptor sd = new StorageDescriptor(cols, null, "org.apache.hadoop.hive.ql.io.orc.OrcInputFormat", "org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat", false, 0, serdeInfo, null, null, null);
Map<String, String> tableParameters = new HashMap<String, String>();
Table sparkTable = new Table("mgt1", sparkDbName, "", 0, 0, 0, sd, null, tableParameters, "", "", "");
sparkTable.setCatName("spark");
Hive.get(primary.hiveConf).getMSC().createTable(sparkTable);
// create same db in hive catalog
Map<String, String> params = new HashMap<>();
params.put(SOURCE_OF_REPLICATION, "1");
Database hiveDb = new Database();
hiveDb.setCatalogName("hive");
hiveDb.setName(sparkDbName);
hiveDb.setParameters(params);
Hive.get(primary.hiveConf).getMSC().createDatabase(hiveDb);
primary.dump(sparkDbName);
// spark tables are not replicated in bootstrap
replica.load(replicatedDbName, sparkDbName).run("use " + replicatedDbName).run("show tables like mgdt1").verifyResult(null);
Path externalTableLocation = new Path("/" + testName.getMethodName() + "/t1/");
DistributedFileSystem fs = primary.miniDFSCluster.getFileSystem();
fs.mkdirs(externalTableLocation, new FsPermission("777"));
// Create another table in spark
sparkTable = new Table("mgt2", sparkDbName, "", 0, 0, 0, sd, null, tableParameters, "", "", "");
sparkTable.setCatName("spark");
Hive.get(primary.hiveConf).getMSC().createTable(sparkTable);
// Incremental load shouldn't copy any events from spark catalog
primary.dump(sparkDbName);
replica.load(replicatedDbName, sparkDbName).run("use " + replicatedDbName).run("show tables like mgdt1").verifyResult(null).run("show tables like 'mgt2'").verifyResult(null);
primary.run("drop database if exists " + sparkDbName + " cascade");
}
use of org.apache.hadoop.hive.metastore.api.Catalog in project hive by apache.
the class TestHiveMetastoreTransformer method createTableWithCapabilities.
private Table createTableWithCapabilities(Map<String, Object> props) throws Exception {
String catalog = (String) props.getOrDefault("CATALOG", MetaStoreUtils.getDefaultCatalog(conf));
String dbName = (String) props.getOrDefault("DBNAME", "simpdb");
String tblName = (String) props.getOrDefault("TBLNAME", "test_table");
TableType type = (TableType) props.getOrDefault("TBLTYPE", TableType.MANAGED_TABLE);
int buckets = ((Integer) props.getOrDefault("BUCKETS", -1)).intValue();
String properties = (String) props.getOrDefault("PROPERTIES", "");
String location = (String) (props.get("LOCATION"));
boolean dropDb = ((Boolean) props.getOrDefault("DROPDB", Boolean.TRUE)).booleanValue();
int partitionCount = ((Integer) props.getOrDefault("PARTITIONS", 0)).intValue();
final String typeName = "Person";
if (type == TableType.EXTERNAL_TABLE) {
if (!properties.contains("EXTERNAL=TRUE")) {
properties.concat(";EXTERNAL=TRUE;");
}
}
Map<String, String> table_params = new HashMap();
if (properties.length() > 0) {
String[] propArray = properties.split(";");
for (String prop : propArray) {
String[] keyValue = prop.split("=");
table_params.put(keyValue[0], keyValue[1]);
}
}
Catalog cat = null;
try {
cat = client.getCatalog(catalog);
} catch (NoSuchObjectException e) {
LOG.info("Catalog does not exist, creating a new one");
try {
if (cat == null) {
cat = new Catalog();
cat.setName(catalog.toLowerCase());
Warehouse wh = new Warehouse(conf);
cat.setLocationUri(wh.getWhRootExternal().toString() + File.separator + catalog);
cat.setDescription("Non-hive catalog");
client.createCatalog(cat);
LOG.info("Catalog " + catalog + " created");
}
} catch (Exception ce) {
LOG.warn("Catalog " + catalog + " could not be created");
}
} catch (Exception e) {
LOG.error("Creation of a new catalog failed, aborting test");
throw e;
}
try {
client.dropTable(dbName, tblName);
} catch (Exception e) {
LOG.info("Drop table failed for " + dbName + "." + tblName);
}
try {
if (dropDb)
silentDropDatabase(dbName);
} catch (Exception e) {
LOG.info("Drop database failed for " + dbName);
}
if (dropDb)
new DatabaseBuilder().setName(dbName).setCatalogName(catalog).create(client, conf);
try {
client.dropType(typeName);
} catch (Exception e) {
LOG.info("Drop type failed for " + typeName);
}
Type typ1 = new Type();
typ1.setName(typeName);
typ1.setFields(new ArrayList<>(2));
typ1.getFields().add(new FieldSchema("name", ColumnType.STRING_TYPE_NAME, ""));
typ1.getFields().add(new FieldSchema("income", ColumnType.INT_TYPE_NAME, ""));
client.createType(typ1);
TableBuilder builder = new TableBuilder().setCatName(catalog).setDbName(dbName).setTableName(tblName).setCols(typ1.getFields()).setType(type.name()).setLocation(location).setNumBuckets(buckets).setTableParams(table_params).addBucketCol("name").addStorageDescriptorParam("test_param_1", "Use this for comments etc");
if (location != null)
builder.setLocation(location);
if (buckets > 0)
builder.setNumBuckets(buckets).addBucketCol("name");
if (partitionCount > 0) {
builder.addPartCol("partcol", "string");
}
if (type == TableType.MANAGED_TABLE) {
if (properties.contains("transactional=true") && !properties.contains("transactional_properties=insert_only")) {
builder.setInputFormat("org.apache.hadoop.hive.ql.io.orc.OrcInputFormat");
builder.setOutputFormat("org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat");
builder.setSerdeLib("org.apache.hadoop.hive.ql.io.orc.OrcSerde");
builder.addStorageDescriptorParam("inputFormat", "org.apache.hadoop.hive.ql.io.orc.OrcInputFormat");
builder.addStorageDescriptorParam("outputFormat", "org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat");
}
}
Table tbl = builder.create(client, conf);
LOG.info("Table " + tbl.getTableName() + " created:type=" + tbl.getTableType());
if (partitionCount > 0) {
List<Partition> partitions = new ArrayList<>();
List<List<String>> partValues = new ArrayList<>();
for (int i = 1; i <= partitionCount; i++) {
partValues.add(Lists.newArrayList("" + i));
}
for (List<String> vals : partValues) {
addPartition(client, tbl, vals);
}
}
if (isThriftClient) {
// the createTable() above does not update the location in the 'tbl'
// object when the client is a thrift client and the code below relies
// on the location being present in the 'tbl' object - so get the table
// from the metastore
tbl = client.getTable(catalog, dbName, tblName);
LOG.info("Fetched Table " + tbl.getTableName() + " created:type=" + tbl.getTableType());
}
return tbl;
}
use of org.apache.hadoop.hive.metastore.api.Catalog in project hive by apache.
the class TestSchemaToolCatalogOps method alterCatalog.
@Test
public void alterCatalog() throws HiveMetaException, TException {
String catName = "an_alterable_catalog";
String location = "file:///tmp/an_alterable_catalog";
String description = "description";
String argsCreate = String.format("-createCatalog %s -catalogLocation \"%s\" -catalogDescription \"%s\"", catName, location, description);
execute(new SchemaToolTaskCreateCatalog(), argsCreate);
location = "file:///tmp/somewhere_else";
String argsAlter1 = String.format("-alterCatalog %s -catalogLocation \"%s\"", catName, location);
execute(new SchemaToolTaskAlterCatalog(), argsAlter1);
Catalog cat = client.getCatalog(catName);
Assert.assertEquals(location, cat.getLocationUri());
Assert.assertEquals(description, cat.getDescription());
description = "a better description";
String argsAlter2 = String.format("-alterCatalog %s -catalogDescription \"%s\"", catName, description);
execute(new SchemaToolTaskAlterCatalog(), argsAlter2);
cat = client.getCatalog(catName);
Assert.assertEquals(location, cat.getLocationUri());
Assert.assertEquals(description, cat.getDescription());
location = "file:///tmp/a_third_location";
description = "best description yet";
String argsAlter3 = String.format("-alterCatalog %s -catalogLocation \"%s\" -catalogDescription \"%s\"", catName, location, description);
execute(new SchemaToolTaskAlterCatalog(), argsAlter3);
cat = client.getCatalog(catName);
Assert.assertEquals(location, cat.getLocationUri());
Assert.assertEquals(description, cat.getDescription());
}
use of org.apache.hadoop.hive.metastore.api.Catalog in project hive by apache.
the class TestSchemaToolCatalogOps method createCatalog.
@Test
public void createCatalog() throws HiveMetaException, TException {
String catName = "my_test_catalog";
String location = "file:///tmp/my_test_catalog";
String description = "very descriptive";
String argsCreate = String.format("-createCatalog %s -catalogLocation \"%s\" -catalogDescription \"%s\"", catName, location, description);
execute(new SchemaToolTaskCreateCatalog(), argsCreate);
Catalog cat = client.getCatalog(catName);
Assert.assertEquals(location, cat.getLocationUri());
Assert.assertEquals(description, cat.getDescription());
}
use of org.apache.hadoop.hive.metastore.api.Catalog in project hive by apache.
the class TestTransactionalValidationListener method createCatalogs.
private void createCatalogs() throws Exception {
String[] catNames = { "spark", "myapp" };
String[] location = { MetaStoreTestUtils.getTestWarehouseDir("spark"), MetaStoreTestUtils.getTestWarehouseDir("myapp") };
for (int i = 0; i < catNames.length; i++) {
Catalog cat = new CatalogBuilder().setName(catNames[i]).setLocation(location[i]).build();
client.createCatalog(cat);
File dir = new File(cat.getLocationUri());
Assert.assertTrue(dir.exists() && dir.isDirectory());
}
}
Aggregations