Search in sources :

Example 36 with PartitionBuilder

use of org.apache.hadoop.hive.metastore.client.builder.PartitionBuilder in project hive by apache.

the class TestAddPartitions method testAddPartitionDifferentNamesAndTypesInColAndTableCol.

@Test
public void testAddPartitionDifferentNamesAndTypesInColAndTableCol() throws Exception {
    createTable();
    Partition partition = new PartitionBuilder().setDbName(DB_NAME).setTableName(TABLE_NAME).addValue("1000").addCol("time", "int").build(metaStore.getConf());
    client.add_partition(partition);
    Partition part = client.getPartition(DB_NAME, TABLE_NAME, "year=1000");
    Assert.assertNotNull(part);
    Assert.assertEquals(TABLE_NAME, part.getTableName());
    Assert.assertEquals("1000", part.getValues().get(0));
    Assert.assertTrue(metaStore.isPathExists(new Path(part.getSd().getLocation())));
}
Also used : Path(org.apache.hadoop.fs.Path) Partition(org.apache.hadoop.hive.metastore.api.Partition) PartitionBuilder(org.apache.hadoop.hive.metastore.client.builder.PartitionBuilder) Test(org.junit.Test) MetastoreCheckinTest(org.apache.hadoop.hive.metastore.annotation.MetastoreCheckinTest)

Example 37 with PartitionBuilder

use of org.apache.hadoop.hive.metastore.client.builder.PartitionBuilder in project hive by apache.

the class TestAddPartitions method testAddPartitionWithDefaultAttributes.

@Test
public void testAddPartitionWithDefaultAttributes() throws Exception {
    Table table = createTable();
    Partition partition = new PartitionBuilder().setDbName(DB_NAME).setTableName(TABLE_NAME).addValue("2017").setCols(getYearPartCol()).addCol("test_id", "int", "test col id").addCol("test_value", "string", "test col value").build(metaStore.getConf());
    client.add_partition(partition);
    // Check if the default values are set for all unfilled attributes
    Partition part = client.getPartition(DB_NAME, TABLE_NAME, "year=2017");
    Assert.assertNotNull(part);
    Assert.assertEquals(TABLE_NAME, part.getTableName());
    Assert.assertEquals(DB_NAME, part.getDbName());
    Assert.assertEquals(Lists.newArrayList("2017"), part.getValues());
    List<FieldSchema> cols = new ArrayList<>();
    cols.addAll(getYearPartCol());
    cols.add(new FieldSchema("test_id", "int", "test col id"));
    cols.add(new FieldSchema("test_value", "string", "test col value"));
    Assert.assertEquals(cols, part.getSd().getCols());
    verifyPartitionAttributesDefaultValues(part, table.getSd().getLocation());
}
Also used : Partition(org.apache.hadoop.hive.metastore.api.Partition) Table(org.apache.hadoop.hive.metastore.api.Table) PartitionBuilder(org.apache.hadoop.hive.metastore.client.builder.PartitionBuilder) FieldSchema(org.apache.hadoop.hive.metastore.api.FieldSchema) ArrayList(java.util.ArrayList) Test(org.junit.Test) MetastoreCheckinTest(org.apache.hadoop.hive.metastore.annotation.MetastoreCheckinTest)

Example 38 with PartitionBuilder

use of org.apache.hadoop.hive.metastore.client.builder.PartitionBuilder in project hive by apache.

the class TestMetaStoreServerUtils method testGetPartitionspecsGroupedBySDonePartitionExternal.

/**
 * Test getPartitionspecsGroupedByStorageDescriptor() for partitions with a single
 * partition which is located outside table location.
 */
@Test
public void testGetPartitionspecsGroupedBySDonePartitionExternal() throws MetaException {
    // Create database and table
    Table tbl = new TableBuilder().setDbName(DB_NAME).setTableName(TABLE_NAME).addCol("id", "int").setLocation("/foo").build(null);
    Partition p1 = new PartitionBuilder().setDbName("DB_NAME").setTableName(TABLE_NAME).setLocation("/a/b").addCol("a", "int").addValue("val1").setInputFormat("foo").build(null);
    List<PartitionSpec> result = MetaStoreServerUtils.getPartitionspecsGroupedByStorageDescriptor(tbl, Collections.singleton(p1));
    assertThat(result.size(), is(1));
    PartitionSpec ps = result.get(0);
    assertThat(ps.getRootPath(), is((String) null));
    List<Partition> partitions = ps.getPartitionList().getPartitions();
    assertThat(partitions.size(), is(1));
    Partition partition = partitions.get(0);
    assertThat(partition.getSd().getLocation(), is("/a/b"));
    assertThat(partition.getValues(), is(Collections.singletonList("val1")));
}
Also used : Partition(org.apache.hadoop.hive.metastore.api.Partition) Table(org.apache.hadoop.hive.metastore.api.Table) PartitionBuilder(org.apache.hadoop.hive.metastore.client.builder.PartitionBuilder) TableBuilder(org.apache.hadoop.hive.metastore.client.builder.TableBuilder) PartitionSpec(org.apache.hadoop.hive.metastore.api.PartitionSpec) MetastoreUnitTest(org.apache.hadoop.hive.metastore.annotation.MetastoreUnitTest) Test(org.junit.Test)

Example 39 with PartitionBuilder

use of org.apache.hadoop.hive.metastore.client.builder.PartitionBuilder in project hive by apache.

the class TestMetaStoreServerUtils method testGetPartitionspecsGroupedBySDonePartitionCombined.

/**
 * Test getPartitionspecsGroupedByStorageDescriptor() multiple partitions:
 * <ul>
 *   <li>Partition with null SD</li>
 *   <li>Two partitions under the table location</li>
 *   <li>One partition outside of table location</li>
 * </ul>
 */
@Test
public void testGetPartitionspecsGroupedBySDonePartitionCombined() throws MetaException {
    // Create database and table
    String sharedInputFormat = "foo1";
    Table tbl = new TableBuilder().setDbName(DB_NAME).setTableName(TABLE_NAME).addCol("id", "int").setLocation("/foo").build(null);
    Partition p1 = new PartitionBuilder().setDbName("DB_NAME").setTableName(TABLE_NAME).setLocation("/foo/bar").addCol("a1", "int").addValue("val1").setInputFormat(sharedInputFormat).build(null);
    Partition p2 = new PartitionBuilder().setDbName("DB_NAME").setTableName(TABLE_NAME).setLocation("/a/b").addCol("a2", "int").addValue("val2").setInputFormat("foo2").build(null);
    Partition p3 = new PartitionBuilder().setDbName("DB_NAME").setTableName(TABLE_NAME).addCol("a3", "int").addValue("val3").setInputFormat("foo3").build(null);
    Partition p4 = new PartitionBuilder().setDbName("DB_NAME").setTableName("TABLE_NAME").setLocation("/foo/baz").addCol("a1", "int").addValue("val4").setInputFormat(sharedInputFormat).build(null);
    p3.unsetSd();
    List<PartitionSpec> result = MetaStoreServerUtils.getPartitionspecsGroupedByStorageDescriptor(tbl, Arrays.asList(p1, p2, p3, p4));
    assertThat(result.size(), is(3));
    PartitionSpec ps1 = result.get(0);
    assertThat(ps1.getRootPath(), is((String) null));
    assertThat(ps1.getPartitionList(), is((List<Partition>) null));
    PartitionSpecWithSharedSD partSpec = ps1.getSharedSDPartitionSpec();
    List<PartitionWithoutSD> partitions1 = partSpec.getPartitions();
    assertThat(partitions1.size(), is(1));
    PartitionWithoutSD partition1 = partitions1.get(0);
    assertThat(partition1.getRelativePath(), is((String) null));
    assertThat(partition1.getValues(), is(Collections.singletonList("val3")));
    PartitionSpec ps2 = result.get(1);
    assertThat(ps2.getRootPath(), is(tbl.getSd().getLocation()));
    assertThat(ps2.getPartitionList(), is((List<Partition>) null));
    List<PartitionWithoutSD> partitions2 = ps2.getSharedSDPartitionSpec().getPartitions();
    assertThat(partitions2.size(), is(2));
    PartitionWithoutSD partition2_1 = partitions2.get(0);
    PartitionWithoutSD partition2_2 = partitions2.get(1);
    if (partition2_1.getRelativePath().equals("baz")) {
        // Swap p2_1 and p2_2
        PartitionWithoutSD tmp = partition2_1;
        partition2_1 = partition2_2;
        partition2_2 = tmp;
    }
    assertThat(partition2_1.getRelativePath(), is("/bar"));
    assertThat(partition2_1.getValues(), is(Collections.singletonList("val1")));
    assertThat(partition2_2.getRelativePath(), is("/baz"));
    assertThat(partition2_2.getValues(), is(Collections.singletonList("val4")));
    PartitionSpec ps4 = result.get(2);
    assertThat(ps4.getRootPath(), is((String) null));
    assertThat(ps4.getSharedSDPartitionSpec(), is((PartitionSpecWithSharedSD) null));
    List<Partition> partitions = ps4.getPartitionList().getPartitions();
    assertThat(partitions.size(), is(1));
    Partition partition = partitions.get(0);
    assertThat(partition.getSd().getLocation(), is("/a/b"));
    assertThat(partition.getValues(), is(Collections.singletonList("val2")));
}
Also used : Partition(org.apache.hadoop.hive.metastore.api.Partition) Table(org.apache.hadoop.hive.metastore.api.Table) PartitionBuilder(org.apache.hadoop.hive.metastore.client.builder.PartitionBuilder) PartitionWithoutSD(org.apache.hadoop.hive.metastore.api.PartitionWithoutSD) List(java.util.List) TableBuilder(org.apache.hadoop.hive.metastore.client.builder.TableBuilder) PartitionSpec(org.apache.hadoop.hive.metastore.api.PartitionSpec) PartitionSpecWithSharedSD(org.apache.hadoop.hive.metastore.api.PartitionSpecWithSharedSD) MetastoreUnitTest(org.apache.hadoop.hive.metastore.annotation.MetastoreUnitTest) Test(org.junit.Test)

Example 40 with PartitionBuilder

use of org.apache.hadoop.hive.metastore.client.builder.PartitionBuilder in project hive by apache.

the class TestMetaStoreServerUtils method testCompareWithSdSamePrefixDifferentOutputFormat.

/**
 * Two StorageDescriptorKey objects with the same base location
 * should be equal iff their output formats are equal
 */
@Test
public void testCompareWithSdSamePrefixDifferentOutputFormat() throws MetaException {
    Partition p1 = new PartitionBuilder().setDbName("DB_NAME").setTableName(TABLE_NAME).setLocation("l1").addCol("a", "int").addValue("val1").setOutputFormat("foo").build(null);
    Partition p2 = new PartitionBuilder().setDbName("DB_NAME").setTableName(TABLE_NAME).setLocation("l2").addCol("a", "int").setOutputFormat("bar").addValue("val1").build(null);
    Partition p3 = new PartitionBuilder().setDbName("DB_NAME").setTableName(TABLE_NAME).setLocation("l2").addCol("a", "int").setOutputFormat("foo").addValue("val1").build(null);
    assertThat(new MetaStoreServerUtils.StorageDescriptorKey("a", p1.getSd()), IsNot.not(new MetaStoreServerUtils.StorageDescriptorKey("a", p2.getSd())));
    assertThat(new MetaStoreServerUtils.StorageDescriptorKey("a", p1.getSd()), is(new MetaStoreServerUtils.StorageDescriptorKey("a", p3.getSd())));
}
Also used : Partition(org.apache.hadoop.hive.metastore.api.Partition) PartitionBuilder(org.apache.hadoop.hive.metastore.client.builder.PartitionBuilder) MetastoreUnitTest(org.apache.hadoop.hive.metastore.annotation.MetastoreUnitTest) Test(org.junit.Test)

Aggregations

PartitionBuilder (org.apache.hadoop.hive.metastore.client.builder.PartitionBuilder)75 Partition (org.apache.hadoop.hive.metastore.api.Partition)63 Test (org.junit.Test)47 Table (org.apache.hadoop.hive.metastore.api.Table)44 TableBuilder (org.apache.hadoop.hive.metastore.client.builder.TableBuilder)33 MetastoreCheckinTest (org.apache.hadoop.hive.metastore.annotation.MetastoreCheckinTest)28 DatabaseBuilder (org.apache.hadoop.hive.metastore.client.builder.DatabaseBuilder)27 Database (org.apache.hadoop.hive.metastore.api.Database)22 ArrayList (java.util.ArrayList)14 MetastoreUnitTest (org.apache.hadoop.hive.metastore.annotation.MetastoreUnitTest)10 CatalogBuilder (org.apache.hadoop.hive.metastore.client.builder.CatalogBuilder)10 EnvironmentContext (org.apache.hadoop.hive.metastore.api.EnvironmentContext)9 SQLCheckConstraint (org.apache.hadoop.hive.metastore.api.SQLCheckConstraint)7 SQLDefaultConstraint (org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint)7 SQLNotNullConstraint (org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint)7 SQLUniqueConstraint (org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint)7 Catalog (org.apache.hadoop.hive.metastore.api.Catalog)6 HashMap (java.util.HashMap)5 FieldSchema (org.apache.hadoop.hive.metastore.api.FieldSchema)5 HashSet (java.util.HashSet)4