Search in sources :

Example 71 with PartitionBuilder

use of org.apache.hadoop.hive.metastore.client.builder.PartitionBuilder in project hive by apache.

the class SmokeTest method runTest.

private void runTest(IMetaStoreClient client) throws TException {
    LOG.info("Starting smoke test");
    File dbDir = new File(System.getProperty("java.io.tmpdir"), "internal_smoke_test");
    if (!dbDir.mkdir()) {
        throw new RuntimeException("Unable to create direcotory " + dbDir.getAbsolutePath());
    }
    dbDir.deleteOnExit();
    LOG.info("Going to create database " + dbName);
    Database db = new DatabaseBuilder().setName(dbName).setLocation(dbDir.getAbsolutePath()).create(client, conf);
    LOG.info("Going to create table " + tableName);
    Table table = new TableBuilder().inDb(db).setTableName(tableName).addCol("col1", ColumnType.INT_TYPE_NAME).addCol("col2", ColumnType.TIMESTAMP_TYPE_NAME).addPartCol("pcol1", ColumnType.STRING_TYPE_NAME).create(client, conf);
    LOG.info("Going to create partition with value " + partValue);
    Partition part = new PartitionBuilder().inTable(table).addValue("val1").addToTable(client, conf);
    LOG.info("Going to list the partitions");
    List<Partition> parts = client.listPartitions(dbName, tableName, (short) -1);
    LOG.info("Fetched: { " + parts.toString() + "}");
    LOG.info("Going to drop database");
    client.dropDatabase(dbName, true, false, true);
    LOG.info("Completed smoke test");
}
Also used : DatabaseBuilder(org.apache.hadoop.hive.metastore.client.builder.DatabaseBuilder) Partition(org.apache.hadoop.hive.metastore.api.Partition) Table(org.apache.hadoop.hive.metastore.api.Table) PartitionBuilder(org.apache.hadoop.hive.metastore.client.builder.PartitionBuilder) Database(org.apache.hadoop.hive.metastore.api.Database) TableBuilder(org.apache.hadoop.hive.metastore.client.builder.TableBuilder) File(java.io.File)

Example 72 with PartitionBuilder

use of org.apache.hadoop.hive.metastore.client.builder.PartitionBuilder in project hive by apache.

the class TestMetaStoreEventListener method testListener.

@Test
public void testListener() throws Exception {
    int listSize = 0;
    List<ListenerEvent> notifyList = DummyListener.notifyList;
    List<PreEventContext> preNotifyList = DummyPreListener.notifyList;
    assertEquals(notifyList.size(), listSize);
    assertEquals(preNotifyList.size(), listSize);
    new DatabaseBuilder().setName(dbName).create(msc, conf);
    listSize++;
    PreCreateDatabaseEvent preDbEvent = (PreCreateDatabaseEvent) (preNotifyList.get(preNotifyList.size() - 1));
    Database db = msc.getDatabase(dbName);
    assertEquals(listSize, notifyList.size());
    assertEquals(listSize + 1, preNotifyList.size());
    validateCreateDb(db, preDbEvent.getDatabase());
    CreateDatabaseEvent dbEvent = (CreateDatabaseEvent) (notifyList.get(listSize - 1));
    Assert.assertTrue(dbEvent.getStatus());
    validateCreateDb(db, dbEvent.getDatabase());
    Table table = new TableBuilder().inDb(db).setTableName(tblName).addCol("a", "string").addPartCol("b", "string").create(msc, conf);
    PreCreateTableEvent preTblEvent = (PreCreateTableEvent) (preNotifyList.get(preNotifyList.size() - 1));
    listSize++;
    Table tbl = msc.getTable(dbName, tblName);
    validateCreateTable(tbl, preTblEvent.getTable());
    assertEquals(notifyList.size(), listSize);
    CreateTableEvent tblEvent = (CreateTableEvent) (notifyList.get(listSize - 1));
    Assert.assertTrue(tblEvent.getStatus());
    validateCreateTable(tbl, tblEvent.getTable());
    new PartitionBuilder().inTable(table).addValue("2011").addToTable(msc, conf);
    listSize++;
    assertEquals(notifyList.size(), listSize);
    PreAddPartitionEvent prePartEvent = (PreAddPartitionEvent) (preNotifyList.get(preNotifyList.size() - 1));
    AddPartitionEvent partEvent = (AddPartitionEvent) (notifyList.get(listSize - 1));
    Assert.assertTrue(partEvent.getStatus());
    Partition part = msc.getPartition("hive2038", "tmptbl", "b=2011");
    Partition partAdded = partEvent.getPartitionIterator().next();
    partAdded.setWriteId(part.getWriteId());
    validateAddPartition(part, partAdded);
    validateTableInAddPartition(tbl, partEvent.getTable());
    validateAddPartition(part, prePartEvent.getPartitions().get(0));
    // Test adding multiple partitions in a single partition-set, atomically.
    int currentTime = (int) System.currentTimeMillis();
    HiveMetaStoreClient hmsClient = new HiveMetaStoreClient(conf);
    table = hmsClient.getTable(dbName, "tmptbl");
    Partition partition1 = new Partition(Arrays.asList("20110101"), dbName, "tmptbl", currentTime, currentTime, table.getSd(), table.getParameters());
    Partition partition2 = new Partition(Arrays.asList("20110102"), dbName, "tmptbl", currentTime, currentTime, table.getSd(), table.getParameters());
    Partition partition3 = new Partition(Arrays.asList("20110103"), dbName, "tmptbl", currentTime, currentTime, table.getSd(), table.getParameters());
    hmsClient.add_partitions(Arrays.asList(partition1, partition2, partition3));
    ++listSize;
    AddPartitionEvent multiplePartitionEvent = (AddPartitionEvent) (notifyList.get(listSize - 1));
    validateTableInAddPartition(table, multiplePartitionEvent.getTable());
    List<Partition> multiParts = Lists.newArrayList(multiplePartitionEvent.getPartitionIterator());
    assertEquals("Unexpected number of partitions in event!", 3, multiParts.size());
    assertEquals("Unexpected partition value.", partition1.getValues(), multiParts.get(0).getValues());
    assertEquals("Unexpected partition value.", partition2.getValues(), multiParts.get(1).getValues());
    assertEquals("Unexpected partition value.", partition3.getValues(), multiParts.get(2).getValues());
    part.setLastAccessTime((int) (System.currentTimeMillis() / 1000));
    msc.alter_partition(dbName, tblName, part);
    listSize++;
    assertEquals(notifyList.size(), listSize);
    PreAlterPartitionEvent preAlterPartEvent = (PreAlterPartitionEvent) preNotifyList.get(preNotifyList.size() - 1);
    // the partition did not change,
    // so the new partition should be similar to the original partition
    Partition origP = msc.getPartition(dbName, tblName, "b=2011");
    AlterPartitionEvent alterPartEvent = (AlterPartitionEvent) notifyList.get(listSize - 1);
    Assert.assertTrue(alterPartEvent.getStatus());
    validateAlterPartition(origP, origP, alterPartEvent.getOldPartition().getDbName(), alterPartEvent.getOldPartition().getTableName(), alterPartEvent.getOldPartition().getValues(), alterPartEvent.getNewPartition());
    validateAlterPartition(origP, origP, preAlterPartEvent.getDbName(), preAlterPartEvent.getTableName(), preAlterPartEvent.getNewPartition().getValues(), preAlterPartEvent.getNewPartition());
    List<String> part_vals = new ArrayList<>();
    part_vals.add("c=2012");
    int preEventListSize;
    preEventListSize = preNotifyList.size() + 1;
    Partition newPart = msc.appendPartition(dbName, tblName, part_vals);
    listSize++;
    assertEquals(notifyList.size(), listSize);
    assertEquals(preNotifyList.size(), preEventListSize);
    AddPartitionEvent appendPartEvent = (AddPartitionEvent) (notifyList.get(listSize - 1));
    Partition partAppended = appendPartEvent.getPartitionIterator().next();
    validateAddPartition(newPart, partAppended);
    PreAddPartitionEvent preAppendPartEvent = (PreAddPartitionEvent) (preNotifyList.get(preNotifyList.size() - 1));
    validateAddPartition(newPart, preAppendPartEvent.getPartitions().get(0));
    Table renamedTable = new Table(table);
    renamedTable.setTableName(renamed);
    msc.alter_table(dbName, tblName, renamedTable);
    listSize++;
    assertEquals(notifyList.size(), listSize);
    PreAlterTableEvent preAlterTableE = (PreAlterTableEvent) preNotifyList.get(preNotifyList.size() - 1);
    renamedTable = msc.getTable(dbName, renamed);
    AlterTableEvent alterTableE = (AlterTableEvent) notifyList.get(listSize - 1);
    Assert.assertTrue(alterTableE.getStatus());
    validateAlterTable(tbl, renamedTable, alterTableE.getOldTable(), alterTableE.getNewTable());
    validateAlterTable(tbl, renamedTable, preAlterTableE.getOldTable(), preAlterTableE.getNewTable());
    // change the table name back
    table = new Table(renamedTable);
    table.setTableName(tblName);
    msc.alter_table(dbName, renamed, table);
    listSize++;
    assertEquals(notifyList.size(), listSize);
    table = msc.getTable(dbName, tblName);
    table.getSd().addToCols(new FieldSchema("c", "int", ""));
    msc.alter_table(dbName, tblName, table);
    listSize++;
    assertEquals(notifyList.size(), listSize);
    preAlterTableE = (PreAlterTableEvent) preNotifyList.get(preNotifyList.size() - 1);
    Table altTable = msc.getTable(dbName, tblName);
    alterTableE = (AlterTableEvent) notifyList.get(listSize - 1);
    Assert.assertTrue(alterTableE.getStatus());
    validateAlterTableColumns(tbl, altTable, alterTableE.getOldTable(), alterTableE.getNewTable());
    validateAlterTableColumns(tbl, altTable, preAlterTableE.getOldTable(), preAlterTableE.getNewTable());
    Map<String, String> kvs = new HashMap<>(1);
    kvs.put("b", "2011");
    msc.markPartitionForEvent("hive2038", "tmptbl", kvs, PartitionEventType.LOAD_DONE);
    listSize++;
    assertEquals(notifyList.size(), listSize);
    LoadPartitionDoneEvent partMarkEvent = (LoadPartitionDoneEvent) notifyList.get(listSize - 1);
    Assert.assertTrue(partMarkEvent.getStatus());
    validateLoadPartitionDone("tmptbl", kvs, partMarkEvent.getTable().getTableName(), partMarkEvent.getPartitionName());
    PreLoadPartitionDoneEvent prePartMarkEvent = (PreLoadPartitionDoneEvent) preNotifyList.get(preNotifyList.size() - 1);
    validateLoadPartitionDone("tmptbl", kvs, prePartMarkEvent.getTableName(), prePartMarkEvent.getPartitionName());
    msc.dropPartition(dbName, tblName, Collections.singletonList("2011"));
    listSize++;
    assertEquals(notifyList.size(), listSize);
    PreDropPartitionEvent preDropPart = (PreDropPartitionEvent) preNotifyList.get(preNotifyList.size() - 1);
    DropPartitionEvent dropPart = (DropPartitionEvent) notifyList.get(listSize - 1);
    Assert.assertTrue(dropPart.getStatus());
    validateDropPartition(Collections.singletonList(part).iterator(), dropPart.getPartitionIterator());
    validateTableInDropPartition(tbl, dropPart.getTable());
    validateDropPartition(Collections.singletonList(part).iterator(), preDropPart.getPartitionIterator());
    validateTableInDropPartition(tbl, preDropPart.getTable());
    msc.dropTable(dbName, tblName);
    listSize++;
    assertEquals(notifyList.size(), listSize);
    PreDropTableEvent preDropTbl = (PreDropTableEvent) preNotifyList.get(preNotifyList.size() - 1);
    DropTableEvent dropTbl = (DropTableEvent) notifyList.get(listSize - 1);
    Assert.assertTrue(dropTbl.getStatus());
    validateDropTable(tbl, dropTbl.getTable());
    validateDropTable(tbl, preDropTbl.getTable());
    msc.dropDatabase(dbName);
    listSize++;
    assertEquals(notifyList.size(), listSize);
    PreDropDatabaseEvent preDropDB = (PreDropDatabaseEvent) preNotifyList.get(preNotifyList.size() - 1);
    DropDatabaseEvent dropDB = (DropDatabaseEvent) notifyList.get(listSize - 1);
    Assert.assertTrue(dropDB.getStatus());
    validateDropDb(db, dropDB.getDatabase());
    validateDropDb(db, preDropDB.getDatabase());
    msc.setMetaConf("metastore.try.direct.sql", "false");
    ConfigChangeEvent event = (ConfigChangeEvent) notifyList.get(notifyList.size() - 1);
    assertEquals("metastore.try.direct.sql", event.getKey());
    assertEquals("true", event.getOldValue());
    assertEquals("false", event.getNewValue());
}
Also used : PreAddPartitionEvent(org.apache.hadoop.hive.metastore.events.PreAddPartitionEvent) HashMap(java.util.HashMap) CreateDatabaseEvent(org.apache.hadoop.hive.metastore.events.CreateDatabaseEvent) PreCreateDatabaseEvent(org.apache.hadoop.hive.metastore.events.PreCreateDatabaseEvent) PreAlterPartitionEvent(org.apache.hadoop.hive.metastore.events.PreAlterPartitionEvent) FieldSchema(org.apache.hadoop.hive.metastore.api.FieldSchema) ArrayList(java.util.ArrayList) PreAlterTableEvent(org.apache.hadoop.hive.metastore.events.PreAlterTableEvent) PreLoadPartitionDoneEvent(org.apache.hadoop.hive.metastore.events.PreLoadPartitionDoneEvent) TableBuilder(org.apache.hadoop.hive.metastore.client.builder.TableBuilder) PreDropDatabaseEvent(org.apache.hadoop.hive.metastore.events.PreDropDatabaseEvent) ListenerEvent(org.apache.hadoop.hive.metastore.events.ListenerEvent) AlterTableEvent(org.apache.hadoop.hive.metastore.events.AlterTableEvent) PreAlterTableEvent(org.apache.hadoop.hive.metastore.events.PreAlterTableEvent) DropDatabaseEvent(org.apache.hadoop.hive.metastore.events.DropDatabaseEvent) PreDropDatabaseEvent(org.apache.hadoop.hive.metastore.events.PreDropDatabaseEvent) PreCreateTableEvent(org.apache.hadoop.hive.metastore.events.PreCreateTableEvent) CreateTableEvent(org.apache.hadoop.hive.metastore.events.CreateTableEvent) PartitionBuilder(org.apache.hadoop.hive.metastore.client.builder.PartitionBuilder) PreLoadPartitionDoneEvent(org.apache.hadoop.hive.metastore.events.PreLoadPartitionDoneEvent) LoadPartitionDoneEvent(org.apache.hadoop.hive.metastore.events.LoadPartitionDoneEvent) PreDropTableEvent(org.apache.hadoop.hive.metastore.events.PreDropTableEvent) DropTableEvent(org.apache.hadoop.hive.metastore.events.DropTableEvent) Database(org.apache.hadoop.hive.metastore.api.Database) PreCreateTableEvent(org.apache.hadoop.hive.metastore.events.PreCreateTableEvent) Partition(org.apache.hadoop.hive.metastore.api.Partition) PreDropPartitionEvent(org.apache.hadoop.hive.metastore.events.PreDropPartitionEvent) DropPartitionEvent(org.apache.hadoop.hive.metastore.events.DropPartitionEvent) Table(org.apache.hadoop.hive.metastore.api.Table) AlterPartitionEvent(org.apache.hadoop.hive.metastore.events.AlterPartitionEvent) PreAlterPartitionEvent(org.apache.hadoop.hive.metastore.events.PreAlterPartitionEvent) PreCreateDatabaseEvent(org.apache.hadoop.hive.metastore.events.PreCreateDatabaseEvent) DatabaseBuilder(org.apache.hadoop.hive.metastore.client.builder.DatabaseBuilder) PreDropPartitionEvent(org.apache.hadoop.hive.metastore.events.PreDropPartitionEvent) PreEventContext(org.apache.hadoop.hive.metastore.events.PreEventContext) PreDropTableEvent(org.apache.hadoop.hive.metastore.events.PreDropTableEvent) ConfigChangeEvent(org.apache.hadoop.hive.metastore.events.ConfigChangeEvent) PreAddPartitionEvent(org.apache.hadoop.hive.metastore.events.PreAddPartitionEvent) AddPartitionEvent(org.apache.hadoop.hive.metastore.events.AddPartitionEvent) MetastoreUnitTest(org.apache.hadoop.hive.metastore.annotation.MetastoreUnitTest) Test(org.junit.Test)

Example 73 with PartitionBuilder

use of org.apache.hadoop.hive.metastore.client.builder.PartitionBuilder in project hive by apache.

the class TestMarkPartition method testMarkingPartitionSet.

@Test
public void testMarkingPartitionSet() throws TException, InterruptedException {
    HiveMetaStoreClient msc = new HiveMetaStoreClient(conf);
    final String dbName = "hive2215";
    msc.dropDatabase(dbName, true, true, true);
    Database db = new DatabaseBuilder().setName(dbName).create(msc, conf);
    final String tableName = "tmptbl";
    msc.dropTable(dbName, tableName, true, true);
    Table table = new TableBuilder().setDbName(dbName).setTableName(tableName).addCol("a", "string").addPartCol("b", "string").create(msc, conf);
    Partition part = new PartitionBuilder().inTable(table).addValue("2011").build(conf);
    msc.add_partition(part);
    Map<String, String> kvs = new HashMap<>();
    kvs.put("b", "'2011'");
    msc.markPartitionForEvent(dbName, tableName, kvs, PartitionEventType.LOAD_DONE);
    Assert.assertTrue(msc.isPartitionMarkedForEvent(dbName, tableName, kvs, PartitionEventType.LOAD_DONE));
    Thread.sleep(10000);
    Assert.assertFalse(msc.isPartitionMarkedForEvent(dbName, tableName, kvs, PartitionEventType.LOAD_DONE));
    kvs.put("b", "'2012'");
    Assert.assertFalse(msc.isPartitionMarkedForEvent(dbName, tableName, kvs, PartitionEventType.LOAD_DONE));
    try {
        msc.markPartitionForEvent(dbName, "tmptbl2", kvs, PartitionEventType.LOAD_DONE);
        Assert.fail("Expected UnknownTableException");
    } catch (UnknownTableException e) {
    // All good
    } catch (Exception e) {
        Assert.fail("Expected UnknownTableException");
    }
    try {
        msc.isPartitionMarkedForEvent(dbName, "tmptbl2", kvs, PartitionEventType.LOAD_DONE);
        Assert.fail("Expected UnknownTableException");
    } catch (UnknownTableException e) {
    // All good
    } catch (Exception e) {
        Assert.fail("Expected UnknownTableException, received " + e.getClass().getName());
    }
    kvs.put("a", "'2012'");
    try {
        msc.isPartitionMarkedForEvent(dbName, tableName, kvs, PartitionEventType.LOAD_DONE);
        Assert.fail("Expected InvalidPartitionException");
    } catch (InvalidPartitionException e) {
    // All good
    } catch (Exception e) {
        Assert.fail("Expected InvalidPartitionException, received " + e.getClass().getName());
    }
}
Also used : Partition(org.apache.hadoop.hive.metastore.api.Partition) Table(org.apache.hadoop.hive.metastore.api.Table) HashMap(java.util.HashMap) TableBuilder(org.apache.hadoop.hive.metastore.client.builder.TableBuilder) UnknownTableException(org.apache.hadoop.hive.metastore.api.UnknownTableException) TException(org.apache.thrift.TException) InvalidPartitionException(org.apache.hadoop.hive.metastore.api.InvalidPartitionException) InvalidPartitionException(org.apache.hadoop.hive.metastore.api.InvalidPartitionException) DatabaseBuilder(org.apache.hadoop.hive.metastore.client.builder.DatabaseBuilder) UnknownTableException(org.apache.hadoop.hive.metastore.api.UnknownTableException) PartitionBuilder(org.apache.hadoop.hive.metastore.client.builder.PartitionBuilder) Database(org.apache.hadoop.hive.metastore.api.Database) Test(org.junit.Test) MetastoreCheckinTest(org.apache.hadoop.hive.metastore.annotation.MetastoreCheckinTest)

Example 74 with PartitionBuilder

use of org.apache.hadoop.hive.metastore.client.builder.PartitionBuilder in project hive by apache.

the class TestMetaStoreEventListenerOnlyOnCommit method testEventStatus.

@Test
public void testEventStatus() throws Exception {
    int listSize = 0;
    List<ListenerEvent> notifyList = DummyListener.notifyList;
    assertEquals(notifyList.size(), listSize);
    String dbName = "tmpDb";
    Database db = new DatabaseBuilder().setName(dbName).setCatalogName(Warehouse.DEFAULT_CATALOG_NAME).create(msc, conf);
    listSize += 1;
    notifyList = DummyListener.notifyList;
    assertEquals(notifyList.size(), listSize);
    assertTrue(DummyListener.getLastEvent().getStatus());
    String tableName = "unittest_TestMetaStoreEventListenerOnlyOnCommit";
    Table table = new TableBuilder().inDb(db).setTableName(tableName).addCol("id", "int").addPartCol("ds", "string").create(msc, conf);
    listSize += 1;
    notifyList = DummyListener.notifyList;
    assertEquals(notifyList.size(), listSize);
    assertTrue(DummyListener.getLastEvent().getStatus());
    new PartitionBuilder().inTable(table).addValue("foo1").addToTable(msc, conf);
    listSize += 1;
    notifyList = DummyListener.notifyList;
    assertEquals(notifyList.size(), listSize);
    assertTrue(DummyListener.getLastEvent().getStatus());
    DummyRawStoreControlledCommit.setCommitSucceed(false);
    new PartitionBuilder().inTable(table).addValue("foo2").addToTable(msc, conf);
    listSize += 1;
    notifyList = DummyListener.notifyList;
    assertEquals(notifyList.size(), listSize);
    assertFalse(DummyListener.getLastEvent().getStatus());
}
Also used : DatabaseBuilder(org.apache.hadoop.hive.metastore.client.builder.DatabaseBuilder) Table(org.apache.hadoop.hive.metastore.api.Table) PartitionBuilder(org.apache.hadoop.hive.metastore.client.builder.PartitionBuilder) Database(org.apache.hadoop.hive.metastore.api.Database) TableBuilder(org.apache.hadoop.hive.metastore.client.builder.TableBuilder) ListenerEvent(org.apache.hadoop.hive.metastore.events.ListenerEvent) Test(org.junit.Test) MetastoreUnitTest(org.apache.hadoop.hive.metastore.annotation.MetastoreUnitTest)

Example 75 with PartitionBuilder

use of org.apache.hadoop.hive.metastore.client.builder.PartitionBuilder in project hive by apache.

the class TestHiveMetaStoreWithEnvironmentContext method setUp.

@Before
public void setUp() throws Exception {
    System.setProperty("hive.metastore.event.listeners", DummyListener.class.getName());
    conf = MetastoreConf.newMetastoreConf();
    MetastoreConf.setLongVar(conf, ConfVars.THRIFT_CONNECTION_RETRIES, 3);
    MetastoreConf.setBoolVar(conf, ConfVars.HIVE_SUPPORT_CONCURRENCY, false);
    MetaStoreTestUtils.setConfForStandloneMode(conf);
    MetaStoreTestUtils.startMetaStoreWithRetry(HadoopThriftAuthBridge.getBridge(), conf);
    msc = new HiveMetaStoreClient(conf);
    msc.dropDatabase(dbName, true, true);
    Map<String, String> envProperties = new HashMap<>();
    envProperties.put("hadoop.job.ugi", "test_user");
    envContext = new EnvironmentContext(envProperties);
    db.setName(dbName);
    db.setCatalogName(DEFAULT_CATALOG_NAME);
    table = new TableBuilder().setDbName(dbName).setTableName(tblName).addTableParam("a", "string").addPartCol("b", "string").addCol("a", "string").addCol("b", "string").build(conf);
    partition = new PartitionBuilder().inTable(table).addValue("2011").build(conf);
    DummyListener.notifyList.clear();
}
Also used : EnvironmentContext(org.apache.hadoop.hive.metastore.api.EnvironmentContext) PartitionBuilder(org.apache.hadoop.hive.metastore.client.builder.PartitionBuilder) HashMap(java.util.HashMap) TableBuilder(org.apache.hadoop.hive.metastore.client.builder.TableBuilder) Before(org.junit.Before)

Aggregations

PartitionBuilder (org.apache.hadoop.hive.metastore.client.builder.PartitionBuilder)75 Partition (org.apache.hadoop.hive.metastore.api.Partition)63 Test (org.junit.Test)47 Table (org.apache.hadoop.hive.metastore.api.Table)44 TableBuilder (org.apache.hadoop.hive.metastore.client.builder.TableBuilder)33 MetastoreCheckinTest (org.apache.hadoop.hive.metastore.annotation.MetastoreCheckinTest)28 DatabaseBuilder (org.apache.hadoop.hive.metastore.client.builder.DatabaseBuilder)27 Database (org.apache.hadoop.hive.metastore.api.Database)22 ArrayList (java.util.ArrayList)14 MetastoreUnitTest (org.apache.hadoop.hive.metastore.annotation.MetastoreUnitTest)10 CatalogBuilder (org.apache.hadoop.hive.metastore.client.builder.CatalogBuilder)10 EnvironmentContext (org.apache.hadoop.hive.metastore.api.EnvironmentContext)9 SQLCheckConstraint (org.apache.hadoop.hive.metastore.api.SQLCheckConstraint)7 SQLDefaultConstraint (org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint)7 SQLNotNullConstraint (org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint)7 SQLUniqueConstraint (org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint)7 Catalog (org.apache.hadoop.hive.metastore.api.Catalog)6 HashMap (java.util.HashMap)5 FieldSchema (org.apache.hadoop.hive.metastore.api.FieldSchema)5 HashSet (java.util.HashSet)4