use of org.apache.hadoop.hive.metastore.client.builder.PartitionBuilder in project hive by apache.
the class TestAddPartitions method testAddPartitionDifferentNamesAndTypesInColAndTableCol.
@Test
public void testAddPartitionDifferentNamesAndTypesInColAndTableCol() throws Exception {
createTable();
Partition partition = new PartitionBuilder().setDbName(DB_NAME).setTableName(TABLE_NAME).addValue("1000").addCol("time", "int").build(metaStore.getConf());
client.add_partition(partition);
Partition part = client.getPartition(DB_NAME, TABLE_NAME, "year=1000");
Assert.assertNotNull(part);
Assert.assertEquals(TABLE_NAME, part.getTableName());
Assert.assertEquals("1000", part.getValues().get(0));
Assert.assertTrue(metaStore.isPathExists(new Path(part.getSd().getLocation())));
}
use of org.apache.hadoop.hive.metastore.client.builder.PartitionBuilder in project hive by apache.
the class TestAddPartitions method testAddPartitionWithDefaultAttributes.
@Test
public void testAddPartitionWithDefaultAttributes() throws Exception {
Table table = createTable();
Partition partition = new PartitionBuilder().setDbName(DB_NAME).setTableName(TABLE_NAME).addValue("2017").setCols(getYearPartCol()).addCol("test_id", "int", "test col id").addCol("test_value", "string", "test col value").build(metaStore.getConf());
client.add_partition(partition);
// Check if the default values are set for all unfilled attributes
Partition part = client.getPartition(DB_NAME, TABLE_NAME, "year=2017");
Assert.assertNotNull(part);
Assert.assertEquals(TABLE_NAME, part.getTableName());
Assert.assertEquals(DB_NAME, part.getDbName());
Assert.assertEquals(Lists.newArrayList("2017"), part.getValues());
List<FieldSchema> cols = new ArrayList<>();
cols.addAll(getYearPartCol());
cols.add(new FieldSchema("test_id", "int", "test col id"));
cols.add(new FieldSchema("test_value", "string", "test col value"));
Assert.assertEquals(cols, part.getSd().getCols());
verifyPartitionAttributesDefaultValues(part, table.getSd().getLocation());
}
use of org.apache.hadoop.hive.metastore.client.builder.PartitionBuilder in project hive by apache.
the class TestMetaStoreServerUtils method testGetPartitionspecsGroupedBySDonePartitionExternal.
/**
* Test getPartitionspecsGroupedByStorageDescriptor() for partitions with a single
* partition which is located outside table location.
*/
@Test
public void testGetPartitionspecsGroupedBySDonePartitionExternal() throws MetaException {
// Create database and table
Table tbl = new TableBuilder().setDbName(DB_NAME).setTableName(TABLE_NAME).addCol("id", "int").setLocation("/foo").build(null);
Partition p1 = new PartitionBuilder().setDbName("DB_NAME").setTableName(TABLE_NAME).setLocation("/a/b").addCol("a", "int").addValue("val1").setInputFormat("foo").build(null);
List<PartitionSpec> result = MetaStoreServerUtils.getPartitionspecsGroupedByStorageDescriptor(tbl, Collections.singleton(p1));
assertThat(result.size(), is(1));
PartitionSpec ps = result.get(0);
assertThat(ps.getRootPath(), is((String) null));
List<Partition> partitions = ps.getPartitionList().getPartitions();
assertThat(partitions.size(), is(1));
Partition partition = partitions.get(0);
assertThat(partition.getSd().getLocation(), is("/a/b"));
assertThat(partition.getValues(), is(Collections.singletonList("val1")));
}
use of org.apache.hadoop.hive.metastore.client.builder.PartitionBuilder in project hive by apache.
the class TestMetaStoreServerUtils method testGetPartitionspecsGroupedBySDonePartitionCombined.
/**
* Test getPartitionspecsGroupedByStorageDescriptor() multiple partitions:
* <ul>
* <li>Partition with null SD</li>
* <li>Two partitions under the table location</li>
* <li>One partition outside of table location</li>
* </ul>
*/
@Test
public void testGetPartitionspecsGroupedBySDonePartitionCombined() throws MetaException {
// Create database and table
String sharedInputFormat = "foo1";
Table tbl = new TableBuilder().setDbName(DB_NAME).setTableName(TABLE_NAME).addCol("id", "int").setLocation("/foo").build(null);
Partition p1 = new PartitionBuilder().setDbName("DB_NAME").setTableName(TABLE_NAME).setLocation("/foo/bar").addCol("a1", "int").addValue("val1").setInputFormat(sharedInputFormat).build(null);
Partition p2 = new PartitionBuilder().setDbName("DB_NAME").setTableName(TABLE_NAME).setLocation("/a/b").addCol("a2", "int").addValue("val2").setInputFormat("foo2").build(null);
Partition p3 = new PartitionBuilder().setDbName("DB_NAME").setTableName(TABLE_NAME).addCol("a3", "int").addValue("val3").setInputFormat("foo3").build(null);
Partition p4 = new PartitionBuilder().setDbName("DB_NAME").setTableName("TABLE_NAME").setLocation("/foo/baz").addCol("a1", "int").addValue("val4").setInputFormat(sharedInputFormat).build(null);
p3.unsetSd();
List<PartitionSpec> result = MetaStoreServerUtils.getPartitionspecsGroupedByStorageDescriptor(tbl, Arrays.asList(p1, p2, p3, p4));
assertThat(result.size(), is(3));
PartitionSpec ps1 = result.get(0);
assertThat(ps1.getRootPath(), is((String) null));
assertThat(ps1.getPartitionList(), is((List<Partition>) null));
PartitionSpecWithSharedSD partSpec = ps1.getSharedSDPartitionSpec();
List<PartitionWithoutSD> partitions1 = partSpec.getPartitions();
assertThat(partitions1.size(), is(1));
PartitionWithoutSD partition1 = partitions1.get(0);
assertThat(partition1.getRelativePath(), is((String) null));
assertThat(partition1.getValues(), is(Collections.singletonList("val3")));
PartitionSpec ps2 = result.get(1);
assertThat(ps2.getRootPath(), is(tbl.getSd().getLocation()));
assertThat(ps2.getPartitionList(), is((List<Partition>) null));
List<PartitionWithoutSD> partitions2 = ps2.getSharedSDPartitionSpec().getPartitions();
assertThat(partitions2.size(), is(2));
PartitionWithoutSD partition2_1 = partitions2.get(0);
PartitionWithoutSD partition2_2 = partitions2.get(1);
if (partition2_1.getRelativePath().equals("baz")) {
// Swap p2_1 and p2_2
PartitionWithoutSD tmp = partition2_1;
partition2_1 = partition2_2;
partition2_2 = tmp;
}
assertThat(partition2_1.getRelativePath(), is("/bar"));
assertThat(partition2_1.getValues(), is(Collections.singletonList("val1")));
assertThat(partition2_2.getRelativePath(), is("/baz"));
assertThat(partition2_2.getValues(), is(Collections.singletonList("val4")));
PartitionSpec ps4 = result.get(2);
assertThat(ps4.getRootPath(), is((String) null));
assertThat(ps4.getSharedSDPartitionSpec(), is((PartitionSpecWithSharedSD) null));
List<Partition> partitions = ps4.getPartitionList().getPartitions();
assertThat(partitions.size(), is(1));
Partition partition = partitions.get(0);
assertThat(partition.getSd().getLocation(), is("/a/b"));
assertThat(partition.getValues(), is(Collections.singletonList("val2")));
}
use of org.apache.hadoop.hive.metastore.client.builder.PartitionBuilder in project hive by apache.
the class TestMetaStoreServerUtils method testCompareWithSdSamePrefixDifferentOutputFormat.
/**
* Two StorageDescriptorKey objects with the same base location
* should be equal iff their output formats are equal
*/
@Test
public void testCompareWithSdSamePrefixDifferentOutputFormat() throws MetaException {
Partition p1 = new PartitionBuilder().setDbName("DB_NAME").setTableName(TABLE_NAME).setLocation("l1").addCol("a", "int").addValue("val1").setOutputFormat("foo").build(null);
Partition p2 = new PartitionBuilder().setDbName("DB_NAME").setTableName(TABLE_NAME).setLocation("l2").addCol("a", "int").setOutputFormat("bar").addValue("val1").build(null);
Partition p3 = new PartitionBuilder().setDbName("DB_NAME").setTableName(TABLE_NAME).setLocation("l2").addCol("a", "int").setOutputFormat("foo").addValue("val1").build(null);
assertThat(new MetaStoreServerUtils.StorageDescriptorKey("a", p1.getSd()), IsNot.not(new MetaStoreServerUtils.StorageDescriptorKey("a", p2.getSd())));
assertThat(new MetaStoreServerUtils.StorageDescriptorKey("a", p1.getSd()), is(new MetaStoreServerUtils.StorageDescriptorKey("a", p3.getSd())));
}
Aggregations