use of org.apache.hadoop.hive.metastore.client.builder.TableBuilder in project hive by apache.
the class TestMetaStoreServerUtils method testUpdateTableStatsSlow_removesDoNotUpdateStats.
/**
* Verify that the call to updateTableStatsSlow() removes DO_NOT_UPDATE_STATS from table params.
*/
@Test
public void testUpdateTableStatsSlow_removesDoNotUpdateStats() throws TException {
// Create database and table
Table tbl = new TableBuilder().setDbName(DB_NAME).setTableName(TABLE_NAME).addCol("id", "int").addTableParam(StatsSetupConst.DO_NOT_UPDATE_STATS, "true").build(null);
Table tbl1 = new TableBuilder().setDbName(DB_NAME).setTableName(TABLE_NAME).addCol("id", "int").addTableParam(StatsSetupConst.DO_NOT_UPDATE_STATS, "false").build(null);
Warehouse wh = mock(Warehouse.class);
MetaStoreServerUtils.updateTableStatsSlow(db, tbl, wh, false, true, null);
assertThat(tbl.getParameters(), is(Collections.emptyMap()));
verify(wh, never()).getFileStatusesForUnpartitionedTable(db, tbl);
MetaStoreServerUtils.updateTableStatsSlow(db, tbl1, wh, true, false, null);
assertThat(tbl.getParameters(), is(Collections.emptyMap()));
verify(wh, never()).getFileStatusesForUnpartitionedTable(db, tbl1);
}
use of org.apache.hadoop.hive.metastore.client.builder.TableBuilder in project hive by apache.
the class TestMetaStoreServerUtils method testGetPartitionspecsGroupedBySDOnePartitionInTable.
/**
* Test getPartitionspecsGroupedByStorageDescriptor() for partitions with a single
* partition which is located under table location.
*/
@Test
public void testGetPartitionspecsGroupedBySDOnePartitionInTable() throws MetaException {
// Create database and table
Table tbl = new TableBuilder().setDbName(DB_NAME).setTableName(TABLE_NAME).addCol("id", "int").setLocation("/foo").build(null);
Partition p1 = new PartitionBuilder().setDbName("DB_NAME").setTableName(TABLE_NAME).setLocation("/foo/bar").addCol("a", "int").addValue("val1").setInputFormat("foo").build(null);
List<PartitionSpec> result = MetaStoreServerUtils.getPartitionspecsGroupedByStorageDescriptor(tbl, Collections.singleton(p1));
assertThat(result.size(), is(1));
PartitionSpec ps = result.get(0);
assertThat(ps.getRootPath(), is(tbl.getSd().getLocation()));
List<PartitionWithoutSD> partitions = ps.getSharedSDPartitionSpec().getPartitions();
assertThat(partitions.size(), is(1));
PartitionWithoutSD partition = partitions.get(0);
assertThat(partition.getRelativePath(), is("/bar"));
assertThat(partition.getValues(), is(Collections.singletonList("val1")));
}
use of org.apache.hadoop.hive.metastore.client.builder.TableBuilder in project hive by apache.
the class TestMetaStoreServerUtils method testGetPartitionspecsGroupedBySDNullSD.
/**
* Test getPartitionspecsGroupedByStorageDescriptor() for partitions with null SDs.
*/
@Test
public void testGetPartitionspecsGroupedBySDNullSD() throws MetaException {
// Create database and table
Table tbl = new TableBuilder().setDbName(DB_NAME).setTableName(TABLE_NAME).addCol("id", "int").setLocation("/foo").build(null);
Partition p1 = new PartitionBuilder().setDbName("DB_NAME").setTableName(TABLE_NAME).addCol("a", "int").addValue("val1").setInputFormat("foo").build(null);
// Set SD to null
p1.unsetSd();
assertThat(p1.getSd(), is((StorageDescriptor) null));
List<PartitionSpec> result = MetaStoreServerUtils.getPartitionspecsGroupedByStorageDescriptor(tbl, Collections.singleton(p1));
assertThat(result.size(), is(1));
PartitionSpec ps = result.get(0);
assertThat(ps.getRootPath(), is((String) null));
List<PartitionWithoutSD> partitions = ps.getSharedSDPartitionSpec().getPartitions();
assertThat(partitions.size(), is(1));
PartitionWithoutSD partition = partitions.get(0);
assertThat(partition.getRelativePath(), is((String) null));
assertThat(partition.getValues(), is(Collections.singletonList("val1")));
}
use of org.apache.hadoop.hive.metastore.client.builder.TableBuilder in project hive by apache.
the class TestGetPartitionsUsingProjectionAndFilterSpecs method testNonStandardPartitions.
@Test
public void testNonStandardPartitions() throws TException {
String testTblName = "test_non_standard";
new TableBuilder().setTableName(testTblName).setDbName(dbName).addCol("ns_c1", "string", "comment 1").addCol("ns_c2", "int", "comment 2").addPartCol("part", "string").addPartCol("city", "string").addBucketCol("ns_c1").addSortCol("ns_c2", 1).addTableParam("tblparamKey", "Partitions of this table are not located within table directory").create(client, conf);
Table table = client.getTable(dbName, testTblName);
Assert.assertNotNull("Unable to create a test table ", table);
List<Partition> partitions = new ArrayList<>();
partitions.add(createPartition(Arrays.asList("p1", "SanFrancisco"), table));
partitions.add(createPartition(Arrays.asList("p1", "PaloAlto"), table));
partitions.add(createPartition(Arrays.asList("p2", "Seattle"), table));
partitions.add(createPartition(Arrays.asList("p2", "Phoenix"), table));
client.add_partitions(partitions);
// change locations of two of the partitions outside table directory
List<Partition> testPartitions = client.listPartitions(dbName, testTblName, (short) -1);
Assert.assertEquals(4, testPartitions.size());
Partition p1 = testPartitions.get(2);
p1.getSd().setLocation("/tmp/some_other_location/part=p2/city=Seattle");
Partition p2 = testPartitions.get(3);
p2.getSd().setLocation("/tmp/some_other_location/part=p2/city=Phoenix");
client.alter_partitions(dbName, testTblName, Arrays.asList(p1, p2));
GetPartitionsRequest request = getGetPartitionsRequest();
request.getProjectionSpec().setFieldList(Arrays.asList("values", "sd"));
request.setDbName(dbName);
request.setTblName(testTblName);
GetPartitionsResponse response = client.getPartitionsWithSpecs(request);
Assert.assertNotNull("Response should have returned partition specs", response.getPartitionSpec());
Assert.assertEquals("We should have two partition specs", 2, response.getPartitionSpec().size());
Assert.assertNotNull("One SharedSD spec is expected", response.getPartitionSpec().get(0).getSharedSDPartitionSpec());
Assert.assertNotNull("One composing spec is expected", response.getPartitionSpec().get(1).getPartitionList());
PartitionSpecWithSharedSD partitionSpecWithSharedSD = response.getPartitionSpec().get(0).getSharedSDPartitionSpec();
Assert.assertNotNull("sd was requested but not returned", partitionSpecWithSharedSD.getSd());
Assert.assertEquals("shared SD should have table location", table.getSd().getLocation(), partitionSpecWithSharedSD.getSd().getLocation());
List<List<String>> expectedVals = new ArrayList<>(2);
expectedVals.add(Arrays.asList("p1", "PaloAlto"));
expectedVals.add(Arrays.asList("p1", "SanFrancisco"));
for (int i = 0; i < partitionSpecWithSharedSD.getPartitions().size(); i++) {
PartitionWithoutSD retPartition = partitionSpecWithSharedSD.getPartitions().get(i);
Assert.assertEquals(2, retPartition.getValuesSize());
validateList(expectedVals.get(i), retPartition.getValues());
Assert.assertNull("parameters were not requested so should have been null", retPartition.getParameters());
}
PartitionListComposingSpec composingSpec = response.getPartitionSpec().get(1).getPartitionList();
Assert.assertNotNull("composing spec should have returned 2 partitions", composingSpec.getPartitions());
Assert.assertEquals("composing spec should have returned 2 partitions", 2, composingSpec.getPartitionsSize());
expectedVals.clear();
expectedVals.add(Arrays.asList("p2", "Phoenix"));
expectedVals.add(Arrays.asList("p2", "Seattle"));
for (int i = 0; i < composingSpec.getPartitions().size(); i++) {
Partition partition = composingSpec.getPartitions().get(i);
Assert.assertEquals(2, partition.getValuesSize());
validateList(expectedVals.get(i), partition.getValues());
Assert.assertNull("parameters were not requested so should have been null", partition.getParameters());
}
}
use of org.apache.hadoop.hive.metastore.client.builder.TableBuilder in project hive by apache.
the class TestHiveMetaStore method testAlterTableRenameBucketedColumnPositive.
public void testAlterTableRenameBucketedColumnPositive() throws Exception {
String dbName = "alterTblDb";
String tblName = "altertbl";
client.dropTable(dbName, tblName);
silentDropDatabase(dbName);
new DatabaseBuilder().setName(dbName).create(client, conf);
ArrayList<FieldSchema> origCols = new ArrayList<>(2);
origCols.add(new FieldSchema("originalColName", ColumnType.STRING_TYPE_NAME, ""));
origCols.add(new FieldSchema("income", ColumnType.INT_TYPE_NAME, ""));
Table origTbl = new TableBuilder().setDbName(dbName).setTableName(tblName).setCols(origCols).setBucketCols(Lists.newArrayList("originalColName")).build(conf);
client.createTable(origTbl);
// Rename bucketed column positive case
ArrayList<FieldSchema> colsUpdated = new ArrayList<>(origCols);
colsUpdated.set(0, new FieldSchema("updatedColName1", ColumnType.STRING_TYPE_NAME, ""));
Table tblUpdated = client.getTable(dbName, tblName);
tblUpdated.getSd().setCols(colsUpdated);
tblUpdated.getSd().getBucketCols().set(0, colsUpdated.get(0).getName());
client.alter_table(dbName, tblName, tblUpdated);
Table resultTbl = client.getTable(dbName, tblUpdated.getTableName());
assertEquals("Num bucketed columns is not 1 ", 1, resultTbl.getSd().getBucketCols().size());
assertEquals("Bucketed column names incorrect", colsUpdated.get(0).getName(), resultTbl.getSd().getBucketCols().get(0));
silentDropDatabase(dbName);
}
Aggregations