use of org.apache.hadoop.hive.metastore.client.builder.TableBuilder in project hive by apache.
the class TestCheckConstraint method setUp.
@Before
public void setUp() throws Exception {
// Get new client
client = metaStore.getClient();
// Clean up the database
client.dropDatabase(OTHER_DATABASE, true, true, true);
// Drop every table in the default database
for (String tableName : client.getAllTables(DEFAULT_DATABASE_NAME)) {
client.dropTable(DEFAULT_DATABASE_NAME, tableName, true, true, true);
}
client.dropDatabase(OTHER_CATALOG, DATABASE_IN_OTHER_CATALOG, true, true, true);
try {
client.dropCatalog(OTHER_CATALOG);
} catch (NoSuchObjectException e) {
// NOP
}
// Clean up trash
metaStore.cleanWarehouseDirs();
new DatabaseBuilder().setName(OTHER_DATABASE).create(client, metaStore.getConf());
Catalog cat = new CatalogBuilder().setName(OTHER_CATALOG).setLocation(MetaStoreTestUtils.getTestWarehouseDir(OTHER_CATALOG)).build();
client.createCatalog(cat);
// For this one don't specify a location to make sure it gets put in the catalog directory
inOtherCatalog = new DatabaseBuilder().setName(DATABASE_IN_OTHER_CATALOG).setCatalogName(OTHER_CATALOG).create(client, metaStore.getConf());
testTables[0] = new TableBuilder().setTableName("test_table_1").addCol("col1", "int").addCol("col2", "varchar(32)").create(client, metaStore.getConf());
testTables[1] = new TableBuilder().setDbName(OTHER_DATABASE).setTableName("test_table_2").addCol("col1", "int").addCol("col2", "varchar(32)").create(client, metaStore.getConf());
testTables[2] = new TableBuilder().inDb(inOtherCatalog).setTableName("test_table_3").addCol("col1", "int").addCol("col2", "varchar(32)").create(client, metaStore.getConf());
// Reload tables from the MetaStore
for (int i = 0; i < testTables.length; i++) {
testTables[i] = client.getTable(testTables[i].getCatName(), testTables[i].getDbName(), testTables[i].getTableName());
}
}
use of org.apache.hadoop.hive.metastore.client.builder.TableBuilder in project hive by apache.
the class TestCheckConstraint method createTableWithConstraintsPk.
@Test
public void createTableWithConstraintsPk() throws TException {
String constraintName = "ctwccc";
Table table = new TableBuilder().setTableName("table_with_constraints").addCol("col1", "int").addCol("col2", "varchar(32)").build(metaStore.getConf());
List<SQLCheckConstraint> cc = new SQLCheckConstraintBuilder().onTable(table).addColumn("col1").setConstraintName(constraintName).setCheckExpression("> 0").build(metaStore.getConf());
client.createTableWithConstraints(table, null, null, null, null, null, cc);
CheckConstraintsRequest rqst = new CheckConstraintsRequest(table.getCatName(), table.getDbName(), table.getTableName());
List<SQLCheckConstraint> fetched = client.getCheckConstraints(rqst);
Assert.assertEquals(cc, fetched);
client.dropConstraint(table.getCatName(), table.getDbName(), table.getTableName(), constraintName);
rqst = new CheckConstraintsRequest(table.getCatName(), table.getDbName(), table.getTableName());
fetched = client.getCheckConstraints(rqst);
Assert.assertTrue(fetched.isEmpty());
}
use of org.apache.hadoop.hive.metastore.client.builder.TableBuilder in project hive by apache.
the class TestCheckConstraint method createTableWithConstraintsPkInOtherCatalog.
@Test
public void createTableWithConstraintsPkInOtherCatalog() throws TException {
Table table = new TableBuilder().setTableName("table_in_other_catalog_with_constraints").inDb(inOtherCatalog).addCol("col1", "int").addCol("col2", "varchar(32)").build(metaStore.getConf());
List<SQLCheckConstraint> cc = new SQLCheckConstraintBuilder().onTable(table).addColumn("col1").setCheckExpression("> 0").build(metaStore.getConf());
client.createTableWithConstraints(table, null, null, null, null, null, cc);
CheckConstraintsRequest rqst = new CheckConstraintsRequest(table.getCatName(), table.getDbName(), table.getTableName());
List<SQLCheckConstraint> fetched = client.getCheckConstraints(rqst);
cc.get(0).setDc_name(fetched.get(0).getDc_name());
Assert.assertEquals(cc, fetched);
client.dropConstraint(table.getCatName(), table.getDbName(), table.getTableName(), cc.get(0).getDc_name());
rqst = new CheckConstraintsRequest(table.getCatName(), table.getDbName(), table.getTableName());
fetched = client.getCheckConstraints(rqst);
Assert.assertTrue(fetched.isEmpty());
}
use of org.apache.hadoop.hive.metastore.client.builder.TableBuilder in project hive by apache.
the class TestAddPartitions method testAddPartitionNoPartColOnTable.
@Test(expected = MetaException.class)
public void testAddPartitionNoPartColOnTable() throws Exception {
new TableBuilder().setDbName(DB_NAME).setTableName(TABLE_NAME).addCol("test_id", "int", "test col id").addCol("test_value", "string", "test col value").create(client, metaStore.getConf());
Partition partition = buildPartition(DB_NAME, TABLE_NAME, DEFAULT_YEAR_VALUE);
client.add_partition(partition);
}
use of org.apache.hadoop.hive.metastore.client.builder.TableBuilder in project hive by apache.
the class TestMetaStoreServerUtils method testUpdateTableStatsSlow_statsUpdated.
/**
* Verify that updateTableStatsSlow really updates table statistics.
* The test does the following:
* <ol>
* <li>Create database</li>
* <li>Create unpartitioned table</li>
* <li>Create unpartitioned table which has params</li>
* <li>Call updateTableStatsSlow with arguments which should cause stats calculation</li>
* <li>Verify table statistics using mocked warehouse</li>
* <li>Create table which already have stats</li>
* <li>Call updateTableStatsSlow forcing stats recompute</li>
* <li>Verify table statistics using mocked warehouse</li>
* <li>Verifies behavior when STATS_GENERATED is set in environment context</li>
* </ol>
*/
@Test
public void testUpdateTableStatsSlow_statsUpdated() throws TException {
long fileLength = 5;
// Create database and table
Table tbl = new TableBuilder().setDbName(DB_NAME).setTableName(TABLE_NAME).addCol("id", "int").build(null);
// Set up mock warehouse
FileStatus fs1 = getFileStatus(1, true, 2, 3, 4, "/tmp/0", false);
FileStatus fs2 = getFileStatus(fileLength, false, 3, 4, 5, "/tmp/1", true);
FileStatus fs3 = getFileStatus(fileLength, false, 3, 4, 5, "/tmp/1", false);
List<FileStatus> fileStatus = Arrays.asList(fs1, fs2, fs3);
Warehouse wh = mock(Warehouse.class);
when(wh.getFileStatusesForUnpartitionedTable(db, tbl)).thenReturn(fileStatus);
Map<String, String> expected = ImmutableMap.of(NUM_FILES, "2", TOTAL_SIZE, String.valueOf(2 * fileLength), NUM_ERASURE_CODED_FILES, "1");
MetaStoreServerUtils.updateTableStatsSlow(db, tbl, wh, false, false, null);
assertThat(tbl.getParameters(), is(expected));
// Verify that when stats are already present and forceRecompute is specified they are recomputed
Table tbl1 = new TableBuilder().setDbName(DB_NAME).setTableName(TABLE_NAME).addCol("id", "int").addTableParam(NUM_FILES, "0").addTableParam(TOTAL_SIZE, "0").build(null);
when(wh.getFileStatusesForUnpartitionedTable(db, tbl1)).thenReturn(fileStatus);
MetaStoreServerUtils.updateTableStatsSlow(db, tbl1, wh, false, true, null);
assertThat(tbl1.getParameters(), is(expected));
// Verify that COLUMN_STATS_ACCURATE is removed from params
Table tbl2 = new TableBuilder().setDbName(DB_NAME).setTableName(TABLE_NAME).addCol("id", "int").addTableParam(COLUMN_STATS_ACCURATE, "true").build(null);
when(wh.getFileStatusesForUnpartitionedTable(db, tbl2)).thenReturn(fileStatus);
MetaStoreServerUtils.updateTableStatsSlow(db, tbl2, wh, false, true, null);
assertThat(tbl2.getParameters(), is(expected));
EnvironmentContext context = new EnvironmentContext(ImmutableMap.of(STATS_GENERATED, StatsSetupConst.TASK));
// Verify that if environment context has STATS_GENERATED set to task,
// COLUMN_STATS_ACCURATE in params is set to correct value
Table tbl3 = new TableBuilder().setDbName(DB_NAME).setTableName(TABLE_NAME).addCol("id", "int").addTableParam(COLUMN_STATS_ACCURATE, // The value doesn't matter
"foo").build(null);
when(wh.getFileStatusesForUnpartitionedTable(db, tbl3)).thenReturn(fileStatus);
MetaStoreServerUtils.updateTableStatsSlow(db, tbl3, wh, false, true, context);
Map<String, String> expected1 = ImmutableMap.of(NUM_FILES, "2", TOTAL_SIZE, String.valueOf(2 * fileLength), NUM_ERASURE_CODED_FILES, "1", COLUMN_STATS_ACCURATE, "{\"BASIC_STATS\":\"true\"}");
assertThat(tbl3.getParameters(), is(expected1));
}
Aggregations