use of org.apache.iceberg.catalog.TableIdentifier in project hive by apache.
the class TestHiveIcebergStorageHandlerNoScan method testAlterTableChangeColumnNameAndComment.
@Test
public void testAlterTableChangeColumnNameAndComment() throws TException, InterruptedException {
TableIdentifier identifier = TableIdentifier.of("default", "customers");
Schema schema = new Schema(optional(1, "customer_id", Types.IntegerType.get()), optional(2, "last_name", Types.StringType.get(), "This is last name"));
testTables.createTable(shell, identifier.name(), schema, SPEC, FileFormat.PARQUET, ImmutableList.of());
shell.executeStatement("ALTER TABLE default.customers CHANGE COLUMN " + "last_name family_name string COMMENT 'This is family name'");
org.apache.iceberg.Table icebergTable = testTables.loadTable(identifier);
org.apache.hadoop.hive.metastore.api.Table hmsTable = shell.metastore().getTable("default", "customers");
List<FieldSchema> icebergSchema = HiveSchemaUtil.convert(icebergTable.schema());
List<FieldSchema> hmsSchema = hmsTable.getSd().getCols();
List<FieldSchema> expectedSchema = Lists.newArrayList(new FieldSchema("customer_id", "int", null), new FieldSchema("family_name", "string", "This is family name"));
Assert.assertEquals(expectedSchema, icebergSchema);
if (testTableType != TestTables.TestTableType.HIVE_CATALOG) {
expectedSchema.stream().filter(fs -> fs.getComment() == null).forEach(fs -> fs.setComment("from deserializer"));
}
Assert.assertEquals(expectedSchema, hmsSchema);
}
use of org.apache.iceberg.catalog.TableIdentifier in project hive by apache.
the class TestHiveIcebergStorageHandlerNoScan method testAlterTableAddColumnsConcurrently.
@Test
public void testAlterTableAddColumnsConcurrently() throws Exception {
TableIdentifier identifier = TableIdentifier.of("default", "customers");
testTables.createTable(shell, identifier.name(), HiveIcebergStorageHandlerTestUtils.CUSTOMER_SCHEMA, SPEC, FileFormat.PARQUET, ImmutableList.of());
org.apache.iceberg.Table icebergTable = testTables.loadTable(identifier);
UpdateSchema updateSchema = icebergTable.updateSchema().addColumn("newfloatcol", Types.FloatType.get());
shell.executeStatement("ALTER TABLE default.customers ADD COLUMNS " + "(newintcol int, newstringcol string COMMENT 'Column with description')");
try {
updateSchema.commit();
Assert.fail();
} catch (CommitFailedException expectedException) {
// Should fail to commit the addition of newfloatcol as another commit went in from Hive side adding 2 other cols
}
// Same verification should be applied, as we expect newfloatcol NOT to be added to the schema
verifyAlterTableAddColumnsTests();
}
use of org.apache.iceberg.catalog.TableIdentifier in project hive by apache.
the class TestHiveIcebergStorageHandlerNoScan method testCreateTableWithoutSpec.
@Test
public void testCreateTableWithoutSpec() {
TableIdentifier identifier = TableIdentifier.of("default", "customers");
shell.executeStatement("CREATE EXTERNAL TABLE customers " + "STORED BY ICEBERG " + testTables.locationForCreateTableSQL(identifier) + "TBLPROPERTIES ('" + InputFormatConfig.TABLE_SCHEMA + "'='" + SchemaParser.toJson(HiveIcebergStorageHandlerTestUtils.CUSTOMER_SCHEMA) + "','" + InputFormatConfig.CATALOG_NAME + "'='" + testTables.catalogName() + "')");
// Check the Iceberg table partition data
org.apache.iceberg.Table icebergTable = testTables.loadTable(identifier);
Assert.assertEquals(PartitionSpec.unpartitioned(), icebergTable.spec());
}
use of org.apache.iceberg.catalog.TableIdentifier in project hive by apache.
the class TestHiveIcebergStorageHandlerNoScan method testCommandsWithPartitionClauseThrow.
@Test
public void testCommandsWithPartitionClauseThrow() {
TableIdentifier target = TableIdentifier.of("default", "target");
PartitionSpec spec = PartitionSpec.builderFor(HiveIcebergStorageHandlerTestUtils.CUSTOMER_SCHEMA).identity("last_name").build();
testTables.createTable(shell, target.name(), HiveIcebergStorageHandlerTestUtils.CUSTOMER_SCHEMA, spec, FileFormat.PARQUET, ImmutableList.of());
String[] commands = { "INSERT INTO target PARTITION (last_name='Johnson') VALUES (1, 'Rob')", "INSERT OVERWRITE TABLE target PARTITION (last_name='Johnson') SELECT * FROM target WHERE FALSE", "DESCRIBE target PARTITION (last_name='Johnson')", "TRUNCATE target PARTITION (last_name='Johnson')" };
for (String command : commands) {
AssertHelpers.assertThrows("Should throw unsupported operation exception for queries with partition spec", IllegalArgumentException.class, "Using partition spec in query is unsupported", () -> shell.executeStatement(command));
}
}
use of org.apache.iceberg.catalog.TableIdentifier in project hive by apache.
the class TestHiveIcebergStorageHandlerNoScan method verifyAlterTableAddColumnsTests.
/**
* Checks that the new schema has newintcol and newstring col columns on both HMS and Iceberg sides
* @throws Exception - any test error
*/
private void verifyAlterTableAddColumnsTests() throws Exception {
TableIdentifier identifier = TableIdentifier.of("default", "customers");
org.apache.iceberg.Table icebergTable = testTables.loadTable(identifier);
org.apache.hadoop.hive.metastore.api.Table hmsTable = shell.metastore().getTable("default", "customers");
List<FieldSchema> icebergSchema = HiveSchemaUtil.convert(icebergTable.schema());
List<FieldSchema> hmsSchema = hmsTable.getSd().getCols();
List<FieldSchema> expectedSchema = Lists.newArrayList(new FieldSchema("customer_id", "bigint", null), new FieldSchema("first_name", "string", "This is first name"), new FieldSchema("last_name", "string", "This is last name"), new FieldSchema("newintcol", "int", null), new FieldSchema("newstringcol", "string", "Column with description"));
Assert.assertEquals(expectedSchema, icebergSchema);
if (testTableType != TestTables.TestTableType.HIVE_CATALOG) {
expectedSchema.get(0).setComment("from deserializer");
}
Assert.assertEquals(expectedSchema, hmsSchema);
}
Aggregations