use of org.apache.hadoop.hive.metastore.api.Database in project hive by apache.
the class DbNotificationListener method onCreateDatabase.
/**
* @param dbEvent database event
* @throws MetaException
*/
@Override
public void onCreateDatabase(CreateDatabaseEvent dbEvent) throws MetaException {
Database db = dbEvent.getDatabase();
NotificationEvent event = new NotificationEvent(0, now(), EventType.CREATE_DATABASE.toString(), msgFactory.buildCreateDatabaseMessage(db).toString());
event.setDbName(db.getName());
process(event, dbEvent);
}
use of org.apache.hadoop.hive.metastore.api.Database in project hive by apache.
the class DbNotificationListener method onDropDatabase.
/**
* @param dbEvent database event
* @throws MetaException
*/
@Override
public void onDropDatabase(DropDatabaseEvent dbEvent) throws MetaException {
Database db = dbEvent.getDatabase();
NotificationEvent event = new NotificationEvent(0, now(), EventType.DROP_DATABASE.toString(), msgFactory.buildDropDatabaseMessage(db).toString());
event.setDbName(db.getName());
process(event, dbEvent);
}
use of org.apache.hadoop.hive.metastore.api.Database in project hive by apache.
the class TestAcidTableSerializer method testSerializeDeserialize.
@Test
public void testSerializeDeserialize() throws Exception {
Database database = StreamingTestUtils.databaseBuilder(new File("/tmp")).name("db_1").build();
Table table = StreamingTestUtils.tableBuilder(database).name("table_1").addColumn("one", "string").addColumn("two", "integer").partitionKeys("partition").addPartition("p1").buckets(10).build();
AcidTable acidTable = new AcidTable("db_1", "table_1", true, TableType.SINK);
acidTable.setTable(table);
acidTable.setWriteId(42L);
String encoded = AcidTableSerializer.encode(acidTable);
System.out.println(encoded);
AcidTable decoded = AcidTableSerializer.decode(encoded);
assertThat(decoded.getDatabaseName(), is("db_1"));
assertThat(decoded.getTableName(), is("table_1"));
assertThat(decoded.createPartitions(), is(true));
assertThat(decoded.getOutputFormatName(), is("org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat"));
assertThat(decoded.getTotalBuckets(), is(10));
assertThat(decoded.getQualifiedName(), is("DB_1.TABLE_1"));
assertThat(decoded.getWriteId(), is(42L));
assertThat(decoded.getTableType(), is(TableType.SINK));
assertThat(decoded.getTable(), is(table));
}
use of org.apache.hadoop.hive.metastore.api.Database in project hive by apache.
the class TestObjectStore method testPartitionOps.
/**
* Tests partition operations
*/
@Test
public void testPartitionOps() throws MetaException, InvalidObjectException, NoSuchObjectException, InvalidInputException {
Database db1 = new Database(DB1, "description", "locationurl", null);
objectStore.createDatabase(db1);
StorageDescriptor sd = createFakeSd("location");
HashMap<String, String> tableParams = new HashMap<>();
tableParams.put("EXTERNAL", "false");
FieldSchema partitionKey1 = new FieldSchema("Country", ColumnType.STRING_TYPE_NAME, "");
FieldSchema partitionKey2 = new FieldSchema("State", ColumnType.STRING_TYPE_NAME, "");
Table tbl1 = new Table(TABLE1, DB1, "owner", 1, 2, 3, sd, Arrays.asList(partitionKey1, partitionKey2), tableParams, null, null, "MANAGED_TABLE");
objectStore.createTable(tbl1);
HashMap<String, String> partitionParams = new HashMap<>();
partitionParams.put("PARTITION_LEVEL_PRIVILEGE", "true");
List<String> value1 = Arrays.asList("US", "CA");
Partition part1 = new Partition(value1, DB1, TABLE1, 111, 111, sd, partitionParams);
objectStore.addPartition(part1);
List<String> value2 = Arrays.asList("US", "MA");
Partition part2 = new Partition(value2, DB1, TABLE1, 222, 222, sd, partitionParams);
objectStore.addPartition(part2);
Deadline.startTimer("getPartition");
List<Partition> partitions = objectStore.getPartitions(DB1, TABLE1, 10);
Assert.assertEquals(2, partitions.size());
Assert.assertEquals(111, partitions.get(0).getCreateTime());
Assert.assertEquals(222, partitions.get(1).getCreateTime());
int numPartitions = objectStore.getNumPartitionsByFilter(DB1, TABLE1, "");
Assert.assertEquals(partitions.size(), numPartitions);
numPartitions = objectStore.getNumPartitionsByFilter(DB1, TABLE1, "country = \"US\"");
Assert.assertEquals(2, numPartitions);
objectStore.dropPartition(DB1, TABLE1, value1);
partitions = objectStore.getPartitions(DB1, TABLE1, 10);
Assert.assertEquals(1, partitions.size());
Assert.assertEquals(222, partitions.get(0).getCreateTime());
objectStore.dropPartition(DB1, TABLE1, value2);
objectStore.dropTable(DB1, TABLE1);
objectStore.dropDatabase(DB1);
}
use of org.apache.hadoop.hive.metastore.api.Database in project hive by apache.
the class TestObjectStore method testDirectSqlErrorMetrics.
@Test
public void testDirectSqlErrorMetrics() throws Exception {
Configuration conf = MetastoreConf.newMetastoreConf();
MetastoreConf.setBoolVar(conf, MetastoreConf.ConfVars.METRICS_ENABLED, true);
Metrics.initialize(conf);
MetastoreConf.setVar(conf, MetastoreConf.ConfVars.HIVE_CODAHALE_METRICS_REPORTER_CLASSES, "org.apache.hadoop.hive.common.metrics.metrics2.JsonFileMetricsReporter, " + "org.apache.hadoop.hive.common.metrics.metrics2.JmxMetricsReporter");
// recall setup so that we get an object store with the metrics initalized
setUp();
Counter directSqlErrors = Metrics.getRegistry().getCounters().get(MetricsConstants.DIRECTSQL_ERRORS);
objectStore.new GetDbHelper("foo", true, true) {
@Override
protected Database getSqlResult(ObjectStore.GetHelper<Database> ctx) throws MetaException {
return null;
}
@Override
protected Database getJdoResult(ObjectStore.GetHelper<Database> ctx) throws MetaException, NoSuchObjectException {
return null;
}
}.run(false);
Assert.assertEquals(0, directSqlErrors.getCount());
objectStore.new GetDbHelper("foo", true, true) {
@Override
protected Database getSqlResult(ObjectStore.GetHelper<Database> ctx) throws MetaException {
throw new RuntimeException();
}
@Override
protected Database getJdoResult(ObjectStore.GetHelper<Database> ctx) throws MetaException, NoSuchObjectException {
return null;
}
}.run(false);
Assert.assertEquals(1, directSqlErrors.getCount());
}
Aggregations