Search in sources :

Example 91 with TableBuilder

use of org.apache.hadoop.hive.metastore.client.builder.TableBuilder in project hive by apache.

the class TestMetaStoreServerUtils method testUpdateTableStatsSlow_removesDoNotUpdateStats.

/**
 * Verify that the call to updateTableStatsSlow() removes DO_NOT_UPDATE_STATS from table params.
 */
@Test
public void testUpdateTableStatsSlow_removesDoNotUpdateStats() throws TException {
    // Create database and table
    Table tbl = new TableBuilder().setDbName(DB_NAME).setTableName(TABLE_NAME).addCol("id", "int").addTableParam(StatsSetupConst.DO_NOT_UPDATE_STATS, "true").build(null);
    Table tbl1 = new TableBuilder().setDbName(DB_NAME).setTableName(TABLE_NAME).addCol("id", "int").addTableParam(StatsSetupConst.DO_NOT_UPDATE_STATS, "false").build(null);
    Warehouse wh = mock(Warehouse.class);
    MetaStoreServerUtils.updateTableStatsSlow(db, tbl, wh, false, true, null);
    assertThat(tbl.getParameters(), is(Collections.emptyMap()));
    verify(wh, never()).getFileStatusesForUnpartitionedTable(db, tbl);
    MetaStoreServerUtils.updateTableStatsSlow(db, tbl1, wh, true, false, null);
    assertThat(tbl.getParameters(), is(Collections.emptyMap()));
    verify(wh, never()).getFileStatusesForUnpartitionedTable(db, tbl1);
}
Also used : Warehouse(org.apache.hadoop.hive.metastore.Warehouse) Table(org.apache.hadoop.hive.metastore.api.Table) TableBuilder(org.apache.hadoop.hive.metastore.client.builder.TableBuilder) MetastoreUnitTest(org.apache.hadoop.hive.metastore.annotation.MetastoreUnitTest) Test(org.junit.Test)

Example 92 with TableBuilder

use of org.apache.hadoop.hive.metastore.client.builder.TableBuilder in project hive by apache.

the class TestMetaStoreServerUtils method testGetPartitionspecsGroupedBySDOnePartitionInTable.

/**
 * Test getPartitionspecsGroupedByStorageDescriptor() for partitions with a single
 * partition which is located under table location.
 */
@Test
public void testGetPartitionspecsGroupedBySDOnePartitionInTable() throws MetaException {
    // Create database and table
    Table tbl = new TableBuilder().setDbName(DB_NAME).setTableName(TABLE_NAME).addCol("id", "int").setLocation("/foo").build(null);
    Partition p1 = new PartitionBuilder().setDbName("DB_NAME").setTableName(TABLE_NAME).setLocation("/foo/bar").addCol("a", "int").addValue("val1").setInputFormat("foo").build(null);
    List<PartitionSpec> result = MetaStoreServerUtils.getPartitionspecsGroupedByStorageDescriptor(tbl, Collections.singleton(p1));
    assertThat(result.size(), is(1));
    PartitionSpec ps = result.get(0);
    assertThat(ps.getRootPath(), is(tbl.getSd().getLocation()));
    List<PartitionWithoutSD> partitions = ps.getSharedSDPartitionSpec().getPartitions();
    assertThat(partitions.size(), is(1));
    PartitionWithoutSD partition = partitions.get(0);
    assertThat(partition.getRelativePath(), is("/bar"));
    assertThat(partition.getValues(), is(Collections.singletonList("val1")));
}
Also used : Partition(org.apache.hadoop.hive.metastore.api.Partition) Table(org.apache.hadoop.hive.metastore.api.Table) PartitionBuilder(org.apache.hadoop.hive.metastore.client.builder.PartitionBuilder) PartitionWithoutSD(org.apache.hadoop.hive.metastore.api.PartitionWithoutSD) TableBuilder(org.apache.hadoop.hive.metastore.client.builder.TableBuilder) PartitionSpec(org.apache.hadoop.hive.metastore.api.PartitionSpec) MetastoreUnitTest(org.apache.hadoop.hive.metastore.annotation.MetastoreUnitTest) Test(org.junit.Test)

Example 93 with TableBuilder

use of org.apache.hadoop.hive.metastore.client.builder.TableBuilder in project hive by apache.

the class TestMetaStoreServerUtils method testGetPartitionspecsGroupedBySDNullSD.

/**
 * Test getPartitionspecsGroupedByStorageDescriptor() for partitions with null SDs.
 */
@Test
public void testGetPartitionspecsGroupedBySDNullSD() throws MetaException {
    // Create database and table
    Table tbl = new TableBuilder().setDbName(DB_NAME).setTableName(TABLE_NAME).addCol("id", "int").setLocation("/foo").build(null);
    Partition p1 = new PartitionBuilder().setDbName("DB_NAME").setTableName(TABLE_NAME).addCol("a", "int").addValue("val1").setInputFormat("foo").build(null);
    // Set SD to null
    p1.unsetSd();
    assertThat(p1.getSd(), is((StorageDescriptor) null));
    List<PartitionSpec> result = MetaStoreServerUtils.getPartitionspecsGroupedByStorageDescriptor(tbl, Collections.singleton(p1));
    assertThat(result.size(), is(1));
    PartitionSpec ps = result.get(0);
    assertThat(ps.getRootPath(), is((String) null));
    List<PartitionWithoutSD> partitions = ps.getSharedSDPartitionSpec().getPartitions();
    assertThat(partitions.size(), is(1));
    PartitionWithoutSD partition = partitions.get(0);
    assertThat(partition.getRelativePath(), is((String) null));
    assertThat(partition.getValues(), is(Collections.singletonList("val1")));
}
Also used : Partition(org.apache.hadoop.hive.metastore.api.Partition) Table(org.apache.hadoop.hive.metastore.api.Table) PartitionBuilder(org.apache.hadoop.hive.metastore.client.builder.PartitionBuilder) PartitionWithoutSD(org.apache.hadoop.hive.metastore.api.PartitionWithoutSD) StorageDescriptor(org.apache.hadoop.hive.metastore.api.StorageDescriptor) TableBuilder(org.apache.hadoop.hive.metastore.client.builder.TableBuilder) PartitionSpec(org.apache.hadoop.hive.metastore.api.PartitionSpec) MetastoreUnitTest(org.apache.hadoop.hive.metastore.annotation.MetastoreUnitTest) Test(org.junit.Test)

Example 94 with TableBuilder

use of org.apache.hadoop.hive.metastore.client.builder.TableBuilder in project hive by apache.

the class TestGetPartitionsUsingProjectionAndFilterSpecs method testNonStandardPartitions.

@Test
public void testNonStandardPartitions() throws TException {
    String testTblName = "test_non_standard";
    new TableBuilder().setTableName(testTblName).setDbName(dbName).addCol("ns_c1", "string", "comment 1").addCol("ns_c2", "int", "comment 2").addPartCol("part", "string").addPartCol("city", "string").addBucketCol("ns_c1").addSortCol("ns_c2", 1).addTableParam("tblparamKey", "Partitions of this table are not located within table directory").create(client, conf);
    Table table = client.getTable(dbName, testTblName);
    Assert.assertNotNull("Unable to create a test table ", table);
    List<Partition> partitions = new ArrayList<>();
    partitions.add(createPartition(Arrays.asList("p1", "SanFrancisco"), table));
    partitions.add(createPartition(Arrays.asList("p1", "PaloAlto"), table));
    partitions.add(createPartition(Arrays.asList("p2", "Seattle"), table));
    partitions.add(createPartition(Arrays.asList("p2", "Phoenix"), table));
    client.add_partitions(partitions);
    // change locations of two of the partitions outside table directory
    List<Partition> testPartitions = client.listPartitions(dbName, testTblName, (short) -1);
    Assert.assertEquals(4, testPartitions.size());
    Partition p1 = testPartitions.get(2);
    p1.getSd().setLocation("/tmp/some_other_location/part=p2/city=Seattle");
    Partition p2 = testPartitions.get(3);
    p2.getSd().setLocation("/tmp/some_other_location/part=p2/city=Phoenix");
    client.alter_partitions(dbName, testTblName, Arrays.asList(p1, p2));
    GetPartitionsRequest request = getGetPartitionsRequest();
    request.getProjectionSpec().setFieldList(Arrays.asList("values", "sd"));
    request.setDbName(dbName);
    request.setTblName(testTblName);
    GetPartitionsResponse response = client.getPartitionsWithSpecs(request);
    Assert.assertNotNull("Response should have returned partition specs", response.getPartitionSpec());
    Assert.assertEquals("We should have two partition specs", 2, response.getPartitionSpec().size());
    Assert.assertNotNull("One SharedSD spec is expected", response.getPartitionSpec().get(0).getSharedSDPartitionSpec());
    Assert.assertNotNull("One composing spec is expected", response.getPartitionSpec().get(1).getPartitionList());
    PartitionSpecWithSharedSD partitionSpecWithSharedSD = response.getPartitionSpec().get(0).getSharedSDPartitionSpec();
    Assert.assertNotNull("sd was requested but not returned", partitionSpecWithSharedSD.getSd());
    Assert.assertEquals("shared SD should have table location", table.getSd().getLocation(), partitionSpecWithSharedSD.getSd().getLocation());
    List<List<String>> expectedVals = new ArrayList<>(2);
    expectedVals.add(Arrays.asList("p1", "PaloAlto"));
    expectedVals.add(Arrays.asList("p1", "SanFrancisco"));
    for (int i = 0; i < partitionSpecWithSharedSD.getPartitions().size(); i++) {
        PartitionWithoutSD retPartition = partitionSpecWithSharedSD.getPartitions().get(i);
        Assert.assertEquals(2, retPartition.getValuesSize());
        validateList(expectedVals.get(i), retPartition.getValues());
        Assert.assertNull("parameters were not requested so should have been null", retPartition.getParameters());
    }
    PartitionListComposingSpec composingSpec = response.getPartitionSpec().get(1).getPartitionList();
    Assert.assertNotNull("composing spec should have returned 2 partitions", composingSpec.getPartitions());
    Assert.assertEquals("composing spec should have returned 2 partitions", 2, composingSpec.getPartitionsSize());
    expectedVals.clear();
    expectedVals.add(Arrays.asList("p2", "Phoenix"));
    expectedVals.add(Arrays.asList("p2", "Seattle"));
    for (int i = 0; i < composingSpec.getPartitions().size(); i++) {
        Partition partition = composingSpec.getPartitions().get(i);
        Assert.assertEquals(2, partition.getValuesSize());
        validateList(expectedVals.get(i), partition.getValues());
        Assert.assertNull("parameters were not requested so should have been null", partition.getParameters());
    }
}
Also used : Partition(org.apache.hadoop.hive.metastore.api.Partition) Table(org.apache.hadoop.hive.metastore.api.Table) ArrayList(java.util.ArrayList) PartitionListComposingSpec(org.apache.hadoop.hive.metastore.api.PartitionListComposingSpec) GetPartitionsResponse(org.apache.hadoop.hive.metastore.api.GetPartitionsResponse) TableBuilder(org.apache.hadoop.hive.metastore.client.builder.TableBuilder) GetPartitionsRequest(org.apache.hadoop.hive.metastore.api.GetPartitionsRequest) PartitionSpecWithSharedSD(org.apache.hadoop.hive.metastore.api.PartitionSpecWithSharedSD) PartitionWithoutSD(org.apache.hadoop.hive.metastore.api.PartitionWithoutSD) ArrayList(java.util.ArrayList) List(java.util.List) Test(org.junit.Test) MetastoreCheckinTest(org.apache.hadoop.hive.metastore.annotation.MetastoreCheckinTest)

Example 95 with TableBuilder

use of org.apache.hadoop.hive.metastore.client.builder.TableBuilder in project hive by apache.

the class TestHiveMetaStore method testAlterTableRenameBucketedColumnPositive.

public void testAlterTableRenameBucketedColumnPositive() throws Exception {
    String dbName = "alterTblDb";
    String tblName = "altertbl";
    client.dropTable(dbName, tblName);
    silentDropDatabase(dbName);
    new DatabaseBuilder().setName(dbName).create(client, conf);
    ArrayList<FieldSchema> origCols = new ArrayList<>(2);
    origCols.add(new FieldSchema("originalColName", ColumnType.STRING_TYPE_NAME, ""));
    origCols.add(new FieldSchema("income", ColumnType.INT_TYPE_NAME, ""));
    Table origTbl = new TableBuilder().setDbName(dbName).setTableName(tblName).setCols(origCols).setBucketCols(Lists.newArrayList("originalColName")).build(conf);
    client.createTable(origTbl);
    // Rename bucketed column positive case
    ArrayList<FieldSchema> colsUpdated = new ArrayList<>(origCols);
    colsUpdated.set(0, new FieldSchema("updatedColName1", ColumnType.STRING_TYPE_NAME, ""));
    Table tblUpdated = client.getTable(dbName, tblName);
    tblUpdated.getSd().setCols(colsUpdated);
    tblUpdated.getSd().getBucketCols().set(0, colsUpdated.get(0).getName());
    client.alter_table(dbName, tblName, tblUpdated);
    Table resultTbl = client.getTable(dbName, tblUpdated.getTableName());
    assertEquals("Num bucketed columns is not 1 ", 1, resultTbl.getSd().getBucketCols().size());
    assertEquals("Bucketed column names incorrect", colsUpdated.get(0).getName(), resultTbl.getSd().getBucketCols().get(0));
    silentDropDatabase(dbName);
}
Also used : DatabaseBuilder(org.apache.hadoop.hive.metastore.client.builder.DatabaseBuilder) SourceTable(org.apache.hadoop.hive.metastore.api.SourceTable) Table(org.apache.hadoop.hive.metastore.api.Table) FieldSchema(org.apache.hadoop.hive.metastore.api.FieldSchema) ArrayList(java.util.ArrayList) TableBuilder(org.apache.hadoop.hive.metastore.client.builder.TableBuilder)

Aggregations

TableBuilder (org.apache.hadoop.hive.metastore.client.builder.TableBuilder)136 Table (org.apache.hadoop.hive.metastore.api.Table)111 Test (org.junit.Test)92 DatabaseBuilder (org.apache.hadoop.hive.metastore.client.builder.DatabaseBuilder)81 Database (org.apache.hadoop.hive.metastore.api.Database)40 Partition (org.apache.hadoop.hive.metastore.api.Partition)36 NoSuchObjectException (org.apache.hadoop.hive.metastore.api.NoSuchObjectException)35 PartitionBuilder (org.apache.hadoop.hive.metastore.client.builder.PartitionBuilder)33 MetastoreCheckinTest (org.apache.hadoop.hive.metastore.annotation.MetastoreCheckinTest)31 FieldSchema (org.apache.hadoop.hive.metastore.api.FieldSchema)30 ArrayList (java.util.ArrayList)28 MetaException (org.apache.hadoop.hive.metastore.api.MetaException)27 SourceTable (org.apache.hadoop.hive.metastore.api.SourceTable)25 CatalogBuilder (org.apache.hadoop.hive.metastore.client.builder.CatalogBuilder)23 Path (org.apache.hadoop.fs.Path)19 Catalog (org.apache.hadoop.hive.metastore.api.Catalog)19 Type (org.apache.hadoop.hive.metastore.api.Type)19 InvalidOperationException (org.apache.hadoop.hive.metastore.api.InvalidOperationException)17 TException (org.apache.thrift.TException)16 IOException (java.io.IOException)15