use of org.apache.hadoop.hive.metastore.api.Database in project hive by apache.
the class TestHBaseImport method parallelOdd.
// Same as the test above except we create 9 of everything instead of 10. This is important
// because in using a batch size of 2 the previous test guarantees 10 /2 =5 , meaning we'll
// have 5 writes on the partition queue with exactly 2 entries. In this test we'll handle the
// case where the last entry in the queue has fewer partitions.
@Test
public void parallelOdd() throws Exception {
int parallelFactor = 9;
RawStore rdbms;
rdbms = new ObjectStore();
rdbms.setConf(conf);
String[] dbNames = new String[] { "oddparalleldb1" };
int now = (int) System.currentTimeMillis() / 1000;
for (int i = 0; i < dbNames.length; i++) {
rdbms.createDatabase(new Database(dbNames[i], "no description", "file:/tmp", emptyParameters));
List<FieldSchema> cols = new ArrayList<>();
cols.add(new FieldSchema("col1", "int", "nocomment"));
SerDeInfo serde = new SerDeInfo("serde", "seriallib", null);
StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, serde, null, null, emptyParameters);
List<FieldSchema> partCols = new ArrayList<>();
partCols.add(new FieldSchema("region", "string", ""));
for (int j = 0; j < parallelFactor; j++) {
rdbms.createTable(new Table("t" + j, dbNames[i], "me", now, now, 0, sd, partCols, emptyParameters, null, null, null));
for (int k = 0; k < parallelFactor; k++) {
StorageDescriptor psd = new StorageDescriptor(sd);
psd.setLocation("file:/tmp/region=" + k);
Partition part = new Partition(Arrays.asList("p" + k), dbNames[i], "t" + j, now, now, psd, emptyParameters);
rdbms.addPartition(part);
}
}
}
HBaseImport importer = new HBaseImport("-p", "2", "-b", "2", "-d", dbNames[0]);
importer.setConnections(rdbms, store);
importer.run();
for (int i = 0; i < dbNames.length; i++) {
Database db = store.getDatabase(dbNames[i]);
Assert.assertNotNull(db);
for (int j = 0; j < parallelFactor; j++) {
Table table = store.getTable(db.getName(), "t" + j);
Assert.assertNotNull(table);
Assert.assertEquals(now, table.getLastAccessTime());
Assert.assertEquals("input", table.getSd().getInputFormat());
for (int k = 0; k < parallelFactor; k++) {
Partition part = store.getPartition(dbNames[i], "t" + j, Arrays.asList("p" + k));
Assert.assertNotNull(part);
Assert.assertEquals("file:/tmp/region=" + k, part.getSd().getLocation());
}
Assert.assertEquals(parallelFactor, store.getPartitions(dbNames[i], "t" + j, -1).size());
}
Assert.assertEquals(parallelFactor, store.getAllTables(dbNames[i]).size());
}
}
use of org.apache.hadoop.hive.metastore.api.Database in project hive by apache.
the class TestHBaseStore method alterDb.
@Test
public void alterDb() throws Exception {
String dbname = "mydb";
Database db = new Database(dbname, "no description", "file:///tmp", emptyParameters);
store.createDatabase(db);
db.setDescription("a description");
store.alterDatabase(dbname, db);
Database d = store.getDatabase(dbname);
Assert.assertEquals(dbname, d.getName());
Assert.assertEquals("a description", d.getDescription());
Assert.assertEquals("file:///tmp", d.getLocationUri());
}
use of org.apache.hadoop.hive.metastore.api.Database in project hive by apache.
the class TestHBaseStore method createDb.
@Test
public void createDb() throws Exception {
String dbname = "mydb";
Database db = new Database(dbname, "no description", "file:///tmp", emptyParameters);
store.createDatabase(db);
Database d = store.getDatabase(dbname);
Assert.assertEquals(dbname, d.getName());
Assert.assertEquals("no description", d.getDescription());
Assert.assertEquals("file:///tmp", d.getLocationUri());
}
use of org.apache.hadoop.hive.metastore.api.Database in project hive by apache.
the class TestObjectStore method testPartitionOps.
/**
* Tests partition operations
*/
@Test
public void testPartitionOps() throws MetaException, InvalidObjectException, NoSuchObjectException, InvalidInputException {
Database db1 = new Database(DB1, "description", "locationurl", null);
objectStore.createDatabase(db1);
StorageDescriptor sd = new StorageDescriptor(null, "location", null, null, false, 0, new SerDeInfo("SerDeName", "serializationLib", null), null, null, null);
HashMap<String, String> tableParams = new HashMap<String, String>();
tableParams.put("EXTERNAL", "false");
FieldSchema partitionKey1 = new FieldSchema("Country", serdeConstants.STRING_TYPE_NAME, "");
FieldSchema partitionKey2 = new FieldSchema("State", serdeConstants.STRING_TYPE_NAME, "");
Table tbl1 = new Table(TABLE1, DB1, "owner", 1, 2, 3, sd, Arrays.asList(partitionKey1, partitionKey2), tableParams, null, null, "MANAGED_TABLE");
objectStore.createTable(tbl1);
HashMap<String, String> partitionParams = new HashMap<String, String>();
partitionParams.put("PARTITION_LEVEL_PRIVILEGE", "true");
List<String> value1 = Arrays.asList("US", "CA");
Partition part1 = new Partition(value1, DB1, TABLE1, 111, 111, sd, partitionParams);
objectStore.addPartition(part1);
List<String> value2 = Arrays.asList("US", "MA");
Partition part2 = new Partition(value2, DB1, TABLE1, 222, 222, sd, partitionParams);
objectStore.addPartition(part2);
Deadline.startTimer("getPartition");
List<Partition> partitions = objectStore.getPartitions(DB1, TABLE1, 10);
Assert.assertEquals(2, partitions.size());
Assert.assertEquals(111, partitions.get(0).getCreateTime());
Assert.assertEquals(222, partitions.get(1).getCreateTime());
int numPartitions = objectStore.getNumPartitionsByFilter(DB1, TABLE1, "");
Assert.assertEquals(partitions.size(), numPartitions);
numPartitions = objectStore.getNumPartitionsByFilter(DB1, TABLE1, "country = \"US\"");
Assert.assertEquals(2, numPartitions);
objectStore.dropPartition(DB1, TABLE1, value1);
partitions = objectStore.getPartitions(DB1, TABLE1, 10);
Assert.assertEquals(1, partitions.size());
Assert.assertEquals(222, partitions.get(0).getCreateTime());
objectStore.dropPartition(DB1, TABLE1, value2);
objectStore.dropTable(DB1, TABLE1);
objectStore.dropDatabase(DB1);
}
use of org.apache.hadoop.hive.metastore.api.Database in project hive by apache.
the class TestObjectStore method testDirectSqlErrorMetrics.
@Test
public void testDirectSqlErrorMetrics() throws Exception {
HiveConf conf = new HiveConf();
conf.setBoolVar(HiveConf.ConfVars.HIVE_SERVER2_METRICS_ENABLED, true);
conf.setVar(HiveConf.ConfVars.HIVE_METRICS_REPORTER, MetricsReporting.JSON_FILE.name() + "," + MetricsReporting.JMX.name());
MetricsFactory.init(conf);
CodahaleMetrics metrics = (CodahaleMetrics) MetricsFactory.getInstance();
objectStore.new GetDbHelper("foo", null, true, true) {
@Override
protected Database getSqlResult(ObjectStore.GetHelper<Database> ctx) throws MetaException {
return null;
}
@Override
protected Database getJdoResult(ObjectStore.GetHelper<Database> ctx) throws MetaException, NoSuchObjectException {
return null;
}
}.run(false);
String json = metrics.dumpJson();
MetricsTestUtils.verifyMetricsJson(json, MetricsTestUtils.COUNTER, MetricsConstant.DIRECTSQL_ERRORS, "");
objectStore.new GetDbHelper("foo", null, true, true) {
@Override
protected Database getSqlResult(ObjectStore.GetHelper<Database> ctx) throws MetaException {
throw new RuntimeException();
}
@Override
protected Database getJdoResult(ObjectStore.GetHelper<Database> ctx) throws MetaException, NoSuchObjectException {
return null;
}
}.run(false);
json = metrics.dumpJson();
MetricsTestUtils.verifyMetricsJson(json, MetricsTestUtils.COUNTER, MetricsConstant.DIRECTSQL_ERRORS, 1);
}
Aggregations