use of org.apache.hadoop.hive.metastore.client.builder.TableBuilder in project hive by apache.
the class TestAddPartitions method addPartitionOtherCatalog.
@Test
@ConditionalIgnoreOnSessionHiveMetastoreClient
public void addPartitionOtherCatalog() throws TException {
String catName = "add_partition_catalog";
Catalog cat = new CatalogBuilder().setName(catName).setLocation(MetaStoreTestUtils.getTestWarehouseDir(catName)).build();
client.createCatalog(cat);
String dbName = "add_partition_database_in_other_catalog";
Database db = new DatabaseBuilder().setName(dbName).setCatalogName(catName).create(client, metaStore.getConf());
String tableName = "table_in_other_catalog";
Table table = new TableBuilder().inDb(db).setTableName(tableName).addCol("id", "int").addCol("name", "string").addPartCol("partcol", "string").create(client, metaStore.getConf());
Partition[] parts = new Partition[5];
for (int i = 0; i < parts.length; i++) {
parts[i] = new PartitionBuilder().inTable(table).addValue("a" + i).build(metaStore.getConf());
}
client.add_partition(parts[0]);
Assert.assertEquals(2, client.add_partitions(Arrays.asList(parts[1], parts[2])));
client.add_partitions(Arrays.asList(parts), true, false);
for (int i = 0; i < parts.length; i++) {
Partition fetched = client.getPartition(catName, dbName, tableName, Collections.singletonList("a" + i));
Assert.assertEquals(catName, fetched.getCatName());
Assert.assertEquals(dbName, fetched.getDbName());
Assert.assertEquals(tableName, fetched.getTableName());
}
client.dropDatabase(catName, dbName, true, true, true);
client.dropCatalog(catName);
}
use of org.apache.hadoop.hive.metastore.client.builder.TableBuilder in project hive by apache.
the class TestMetaStoreServerUtils method testGetPartitionspecsGroupedBySDonePartitionExternal.
/**
* Test getPartitionspecsGroupedByStorageDescriptor() for partitions with a single
* partition which is located outside table location.
*/
@Test
public void testGetPartitionspecsGroupedBySDonePartitionExternal() throws MetaException {
// Create database and table
Table tbl = new TableBuilder().setDbName(DB_NAME).setTableName(TABLE_NAME).addCol("id", "int").setLocation("/foo").build(null);
Partition p1 = new PartitionBuilder().setDbName("DB_NAME").setTableName(TABLE_NAME).setLocation("/a/b").addCol("a", "int").addValue("val1").setInputFormat("foo").build(null);
List<PartitionSpec> result = MetaStoreServerUtils.getPartitionspecsGroupedByStorageDescriptor(tbl, Collections.singleton(p1));
assertThat(result.size(), is(1));
PartitionSpec ps = result.get(0);
assertThat(ps.getRootPath(), is((String) null));
List<Partition> partitions = ps.getPartitionList().getPartitions();
assertThat(partitions.size(), is(1));
Partition partition = partitions.get(0);
assertThat(partition.getSd().getLocation(), is("/a/b"));
assertThat(partition.getValues(), is(Collections.singletonList("val1")));
}
use of org.apache.hadoop.hive.metastore.client.builder.TableBuilder in project hive by apache.
the class TestMetaStoreServerUtils method testUpdateTableStatsSlow_doesNotUpdateStats.
/**
* Verify that updateTableStatsSlow() does not calculate table statistics when
* <ol>
* <li>newDir is true</li>
* <li>Table is partitioned</li>
* <li>Stats are already present and forceRecompute isn't set</li>
* </ol>
*/
@Test
public void testUpdateTableStatsSlow_doesNotUpdateStats() throws TException {
// Create database and table
FieldSchema fs = new FieldSchema("date", "string", "date column");
List<FieldSchema> cols = Collections.singletonList(fs);
Table tbl = new TableBuilder().setDbName(DB_NAME).setTableName(TABLE_NAME).addCol("id", "int").build(null);
Warehouse wh = mock(Warehouse.class);
// newDir(true) => stats not updated
MetaStoreServerUtils.updateTableStatsSlow(db, tbl, wh, true, false, null);
verify(wh, never()).getFileStatusesForUnpartitionedTable(db, tbl);
// partitioned table => stats not updated
Table tbl1 = new TableBuilder().setDbName(DB_NAME).setTableName(TABLE_NAME).addCol("id", "int").setPartCols(cols).build(null);
MetaStoreServerUtils.updateTableStatsSlow(db, tbl1, wh, false, false, null);
verify(wh, never()).getFileStatusesForUnpartitionedTable(db, tbl1);
// Already contains stats => stats not updated when forceRecompute isn't set
Table tbl2 = new TableBuilder().setDbName(DB_NAME).setTableName(TABLE_NAME).addCol("id", "int").setTableParams(paramsWithStats).build(null);
MetaStoreServerUtils.updateTableStatsSlow(db, tbl2, wh, false, false, null);
verify(wh, never()).getFileStatusesForUnpartitionedTable(db, tbl2);
}
use of org.apache.hadoop.hive.metastore.client.builder.TableBuilder in project hive by apache.
the class TestMetaStoreServerUtils method testGetPartitionspecsGroupedBySDonePartitionCombined.
/**
* Test getPartitionspecsGroupedByStorageDescriptor() multiple partitions:
* <ul>
* <li>Partition with null SD</li>
* <li>Two partitions under the table location</li>
* <li>One partition outside of table location</li>
* </ul>
*/
@Test
public void testGetPartitionspecsGroupedBySDonePartitionCombined() throws MetaException {
// Create database and table
String sharedInputFormat = "foo1";
Table tbl = new TableBuilder().setDbName(DB_NAME).setTableName(TABLE_NAME).addCol("id", "int").setLocation("/foo").build(null);
Partition p1 = new PartitionBuilder().setDbName("DB_NAME").setTableName(TABLE_NAME).setLocation("/foo/bar").addCol("a1", "int").addValue("val1").setInputFormat(sharedInputFormat).build(null);
Partition p2 = new PartitionBuilder().setDbName("DB_NAME").setTableName(TABLE_NAME).setLocation("/a/b").addCol("a2", "int").addValue("val2").setInputFormat("foo2").build(null);
Partition p3 = new PartitionBuilder().setDbName("DB_NAME").setTableName(TABLE_NAME).addCol("a3", "int").addValue("val3").setInputFormat("foo3").build(null);
Partition p4 = new PartitionBuilder().setDbName("DB_NAME").setTableName("TABLE_NAME").setLocation("/foo/baz").addCol("a1", "int").addValue("val4").setInputFormat(sharedInputFormat).build(null);
p3.unsetSd();
List<PartitionSpec> result = MetaStoreServerUtils.getPartitionspecsGroupedByStorageDescriptor(tbl, Arrays.asList(p1, p2, p3, p4));
assertThat(result.size(), is(3));
PartitionSpec ps1 = result.get(0);
assertThat(ps1.getRootPath(), is((String) null));
assertThat(ps1.getPartitionList(), is((List<Partition>) null));
PartitionSpecWithSharedSD partSpec = ps1.getSharedSDPartitionSpec();
List<PartitionWithoutSD> partitions1 = partSpec.getPartitions();
assertThat(partitions1.size(), is(1));
PartitionWithoutSD partition1 = partitions1.get(0);
assertThat(partition1.getRelativePath(), is((String) null));
assertThat(partition1.getValues(), is(Collections.singletonList("val3")));
PartitionSpec ps2 = result.get(1);
assertThat(ps2.getRootPath(), is(tbl.getSd().getLocation()));
assertThat(ps2.getPartitionList(), is((List<Partition>) null));
List<PartitionWithoutSD> partitions2 = ps2.getSharedSDPartitionSpec().getPartitions();
assertThat(partitions2.size(), is(2));
PartitionWithoutSD partition2_1 = partitions2.get(0);
PartitionWithoutSD partition2_2 = partitions2.get(1);
if (partition2_1.getRelativePath().equals("baz")) {
// Swap p2_1 and p2_2
PartitionWithoutSD tmp = partition2_1;
partition2_1 = partition2_2;
partition2_2 = tmp;
}
assertThat(partition2_1.getRelativePath(), is("/bar"));
assertThat(partition2_1.getValues(), is(Collections.singletonList("val1")));
assertThat(partition2_2.getRelativePath(), is("/baz"));
assertThat(partition2_2.getValues(), is(Collections.singletonList("val4")));
PartitionSpec ps4 = result.get(2);
assertThat(ps4.getRootPath(), is((String) null));
assertThat(ps4.getSharedSDPartitionSpec(), is((PartitionSpecWithSharedSD) null));
List<Partition> partitions = ps4.getPartitionList().getPartitions();
assertThat(partitions.size(), is(1));
Partition partition = partitions.get(0);
assertThat(partition.getSd().getLocation(), is("/a/b"));
assertThat(partition.getValues(), is(Collections.singletonList("val2")));
}
use of org.apache.hadoop.hive.metastore.client.builder.TableBuilder in project hive by apache.
the class TestFilterHooks method setUp.
@BeforeClass
public static void setUp() throws Exception {
DummyMetaStoreFilterHookImpl.blockResults = false;
conf = MetastoreConf.newMetastoreConf();
MetastoreConf.setLongVar(conf, ConfVars.THRIFT_CONNECTION_RETRIES, 3);
MetastoreConf.setBoolVar(conf, ConfVars.HIVE_SUPPORT_CONCURRENCY, false);
MetastoreConf.setClass(conf, ConfVars.FILTER_HOOK, DummyMetaStoreFilterHookImpl.class, MetaStoreFilterHook.class);
MetaStoreTestUtils.setConfForStandloneMode(conf);
MetaStoreTestUtils.startMetaStoreWithRetry(HadoopThriftAuthBridge.getBridge(), conf);
msc = new HiveMetaStoreClient(conf);
msc.dropDatabase(DBNAME1, true, true, true);
msc.dropDatabase(DBNAME2, true, true, true);
Database db1 = new DatabaseBuilder().setName(DBNAME1).build();
msc.createDatabase(db1);
Database db2 = new DatabaseBuilder().setName(DBNAME2).build();
msc.createDatabase(db2);
Table tab1 = new TableBuilder().setDbName(DBNAME1).setTableName(TAB1).addCol("id", "int").addCol("name", "string").build();
msc.createTable(tab1);
Table tab2 = new TableBuilder().setDbName(DBNAME1).setTableName(TAB2).addCol("id", "int").addPartCol("name", "string").build();
msc.createTable(tab2);
Partition part1 = new PartitionBuilder().fromTable(tab2).addValue("value1").build();
msc.add_partition(part1);
Partition part2 = new PartitionBuilder().fromTable(tab2).addValue("value2").build();
msc.add_partition(part2);
}
Aggregations