use of org.apache.hadoop.hive.metastore.api.Database in project hive by apache.
the class TestHCatOutputFormat method initTable.
private void initTable() throws Exception {
try {
client.dropTable(dbName, tblName);
} catch (Exception e) {
}
try {
client.dropDatabase(dbName);
} catch (Exception e) {
}
client.createDatabase(new Database(dbName, "", null, null));
assertNotNull((client.getDatabase(dbName).getLocationUri()));
List<FieldSchema> fields = new ArrayList<FieldSchema>();
fields.add(new FieldSchema("colname", serdeConstants.STRING_TYPE_NAME, ""));
Table tbl = new Table();
tbl.setDbName(dbName);
tbl.setTableName(tblName);
StorageDescriptor sd = new StorageDescriptor();
sd.setCols(Lists.newArrayList(new FieldSchema("data_column", serdeConstants.STRING_TYPE_NAME, "")));
tbl.setSd(sd);
sd.setInputFormat(RCFileInputFormat.class.getName());
sd.setOutputFormat(RCFileOutputFormat.class.getName());
sd.setParameters(new HashMap<String, String>());
sd.getParameters().put("test_param_1", "Use this for comments etc");
// sd.setBucketCols(new ArrayList<String>(2));
// sd.getBucketCols().add("name");
sd.setSerdeInfo(new SerDeInfo());
sd.getSerdeInfo().setName(tbl.getTableName());
sd.getSerdeInfo().setParameters(new HashMap<String, String>());
sd.getSerdeInfo().getParameters().put(serdeConstants.SERIALIZATION_FORMAT, "1");
sd.getSerdeInfo().setSerializationLib(org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe.class.getName());
tbl.setPartitionKeys(fields);
Map<String, String> tableParams = new HashMap<String, String>();
tableParams.put("hcat.testarg", "testArgValue");
tbl.setParameters(tableParams);
client.createTable(tbl);
Path tblPath = new Path(client.getTable(dbName, tblName).getSd().getLocation());
assertTrue(tblPath.getFileSystem(hiveConf).mkdirs(new Path(tblPath, "colname=p1")));
}
use of org.apache.hadoop.hive.metastore.api.Database in project hive by apache.
the class ObjectStore method getJDODatabase.
public Database getJDODatabase(String name) throws NoSuchObjectException {
MDatabase mdb = null;
boolean commited = false;
try {
openTransaction();
mdb = getMDatabase(name);
commited = commitTransaction();
} finally {
if (!commited) {
rollbackTransaction();
}
}
Database db = new Database();
db.setName(mdb.getName());
db.setDescription(mdb.getDescription());
db.setLocationUri(mdb.getLocationUri());
db.setParameters(convertMap(mdb.getParameters()));
db.setOwnerName(mdb.getOwnerName());
String type = org.apache.commons.lang.StringUtils.defaultIfBlank(mdb.getOwnerType(), null);
PrincipalType principalType = (type == null) ? null : PrincipalType.valueOf(type);
db.setOwnerType(principalType);
return db;
}
use of org.apache.hadoop.hive.metastore.api.Database in project hive by apache.
the class SharedCache method refreshDatabasesInCache.
public void refreshDatabasesInCache(List<Database> databases) {
try {
cacheLock.writeLock().lock();
if (isDatabaseCacheDirty.compareAndSet(true, false)) {
LOG.debug("Skipping database cache update; the database list we have is dirty.");
return;
}
databaseCache.clear();
for (Database db : databases) {
addDatabaseToCache(db);
}
} finally {
cacheLock.writeLock().unlock();
}
}
use of org.apache.hadoop.hive.metastore.api.Database in project hive by apache.
the class TestHiveMetaStore method testTableDatabase.
@Test
public void testTableDatabase() throws Exception {
String dbName = "testDb";
String tblName_1 = "testTbl_1";
String tblName_2 = "testTbl_2";
try {
silentDropDatabase(dbName);
Database db = new Database();
db.setName(dbName);
String dbLocation = MetastoreConf.getVar(conf, ConfVars.WAREHOUSE) + "_testDB_table_create_";
db.setLocationUri(dbLocation);
client.createDatabase(db);
db = client.getDatabase(dbName);
Table tbl = new TableBuilder().setDbName(dbName).setTableName(tblName_1).addCol("name", ColumnType.STRING_TYPE_NAME).addCol("income", ColumnType.INT_TYPE_NAME).build();
client.createTable(tbl);
tbl = client.getTable(dbName, tblName_1);
Path path = new Path(tbl.getSd().getLocation());
System.err.println("Table's location " + path + ", Database's location " + db.getLocationUri());
assertEquals("Table location is not a subset of the database location", path.getParent().toString(), db.getLocationUri());
} catch (Exception e) {
System.err.println(StringUtils.stringifyException(e));
System.err.println("testTableDatabase() failed.");
throw e;
} finally {
silentDropDatabase(dbName);
}
}
use of org.apache.hadoop.hive.metastore.api.Database in project hive by apache.
the class TestHiveMetaStore method testRenamePartition.
@Test
public void testRenamePartition() throws Throwable {
try {
String dbName = "compdb1";
String tblName = "comptbl1";
List<String> vals = new ArrayList<>(2);
vals.add("2011-07-11");
vals.add("8");
String part_path = "/ds=2011-07-11/hr=8";
List<String> tmp_vals = new ArrayList<>(2);
tmp_vals.add("tmp_2011-07-11");
tmp_vals.add("-8");
String part2_path = "/ds=tmp_2011-07-11/hr=-8";
client.dropTable(dbName, tblName);
silentDropDatabase(dbName);
Database db = new Database();
db.setName(dbName);
db.setDescription("Rename Partition Test database");
client.createDatabase(db);
Table tbl = new TableBuilder().setDbName(dbName).setTableName(tblName).addCol("name", ColumnType.STRING_TYPE_NAME).addCol("income", ColumnType.INT_TYPE_NAME).addPartCol("ds", ColumnType.STRING_TYPE_NAME).addPartCol("hr", ColumnType.INT_TYPE_NAME).build();
client.createTable(tbl);
if (isThriftClient) {
// the createTable() above does not update the location in the 'tbl'
// object when the client is a thrift client and the code below relies
// on the location being present in the 'tbl' object - so get the table
// from the metastore
tbl = client.getTable(dbName, tblName);
}
Partition part = new Partition();
part.setDbName(dbName);
part.setTableName(tblName);
part.setValues(vals);
part.setParameters(new HashMap<>());
part.setSd(tbl.getSd().deepCopy());
part.getSd().setSerdeInfo(tbl.getSd().getSerdeInfo());
part.getSd().setLocation(tbl.getSd().getLocation() + "/part1");
part.getParameters().put("retention", "10");
part.getSd().setNumBuckets(12);
part.getSd().getSerdeInfo().getParameters().put("abc", "1");
client.add_partition(part);
part.setValues(tmp_vals);
client.renamePartition(dbName, tblName, vals, part);
boolean exceptionThrown = false;
try {
Partition p = client.getPartition(dbName, tblName, vals);
} catch (Exception e) {
assertEquals("partition should not have existed", NoSuchObjectException.class, e.getClass());
exceptionThrown = true;
}
assertTrue("Expected NoSuchObjectException", exceptionThrown);
Partition part3 = client.getPartition(dbName, tblName, tmp_vals);
assertEquals("couldn't rename partition", part3.getParameters().get("retention"), "10");
assertEquals("couldn't rename partition", part3.getSd().getSerdeInfo().getParameters().get("abc"), "1");
assertEquals("couldn't rename partition", part3.getSd().getNumBuckets(), 12);
assertEquals("new partition sd matches", part3.getSd().getLocation(), tbl.getSd().getLocation() + part2_path);
part.setValues(vals);
client.renamePartition(dbName, tblName, tmp_vals, part);
exceptionThrown = false;
try {
Partition p = client.getPartition(dbName, tblName, tmp_vals);
} catch (Exception e) {
assertEquals("partition should not have existed", NoSuchObjectException.class, e.getClass());
exceptionThrown = true;
}
assertTrue("Expected NoSuchObjectException", exceptionThrown);
part3 = client.getPartition(dbName, tblName, vals);
assertEquals("couldn't rename partition", part3.getParameters().get("retention"), "10");
assertEquals("couldn't rename partition", part3.getSd().getSerdeInfo().getParameters().get("abc"), "1");
assertEquals("couldn't rename partition", part3.getSd().getNumBuckets(), 12);
assertEquals("new partition sd matches", part3.getSd().getLocation(), tbl.getSd().getLocation() + part_path);
client.dropTable(dbName, tblName);
client.dropDatabase(dbName);
} catch (Exception e) {
System.err.println(StringUtils.stringifyException(e));
System.err.println("testRenamePartition() failed.");
throw e;
}
}
Aggregations