use of org.apache.iceberg.catalog.TableIdentifier in project hive by apache.
the class TestHiveIcebergStorageHandlerNoScan method testCreateTableStoredByIceberg.
@Test
public void testCreateTableStoredByIceberg() {
TableIdentifier identifier = TableIdentifier.of("default", "customers");
String query = String.format("CREATE EXTERNAL TABLE customers (customer_id BIGINT, first_name STRING, last_name " + "STRING) STORED BY iceBerg %s TBLPROPERTIES ('%s'='%s')", testTables.locationForCreateTableSQL(identifier), InputFormatConfig.CATALOG_NAME, testTables.catalogName());
shell.executeStatement(query);
Assert.assertNotNull(testTables.loadTable(identifier));
}
use of org.apache.iceberg.catalog.TableIdentifier in project hive by apache.
the class TestHiveIcebergStorageHandlerNoScan method testCreateTableStoredByIcebergWithSerdeProperties.
@Test
public void testCreateTableStoredByIcebergWithSerdeProperties() {
TableIdentifier identifier = TableIdentifier.of("default", "customers");
String query = String.format("CREATE EXTERNAL TABLE customers (customer_id BIGINT, first_name STRING, last_name " + "STRING) STORED BY iceberg WITH SERDEPROPERTIES('%s'='%s') %s TBLPROPERTIES ('%s'='%s')", TableProperties.DEFAULT_FILE_FORMAT, "orc", testTables.locationForCreateTableSQL(identifier), InputFormatConfig.CATALOG_NAME, testTables.catalogName());
shell.executeStatement(query);
Table table = testTables.loadTable(identifier);
Assert.assertNotNull(table);
Assert.assertEquals("orc", table.properties().get(TableProperties.DEFAULT_FILE_FORMAT));
}
use of org.apache.iceberg.catalog.TableIdentifier in project hive by apache.
the class TestHiveIcebergStorageHandlerNoScan method testCreateTableWithColumnComments.
@Test
public void testCreateTableWithColumnComments() {
TableIdentifier identifier = TableIdentifier.of("default", "comment_table");
shell.executeStatement("CREATE EXTERNAL TABLE comment_table (" + "t_int INT COMMENT 'int column', " + "t_string STRING COMMENT 'string column', " + "t_string_2 STRING) " + "STORED BY ICEBERG " + testTables.locationForCreateTableSQL(identifier) + testTables.propertiesForCreateTableSQL(ImmutableMap.of()));
org.apache.iceberg.Table icebergTable = testTables.loadTable(identifier);
List<Object[]> rows = shell.executeStatement("DESCRIBE default.comment_table");
Assert.assertEquals(icebergTable.schema().columns().size(), rows.size());
for (int i = 0; i < icebergTable.schema().columns().size(); i++) {
Types.NestedField field = icebergTable.schema().columns().get(i);
Assert.assertArrayEquals(new Object[] { field.name(), HiveSchemaUtil.convert(field.type()).getTypeName(), field.doc() != null ? field.doc() : "from deserializer" }, rows.get(i));
}
}
use of org.apache.iceberg.catalog.TableIdentifier in project hive by apache.
the class TestHiveIcebergStorageHandlerNoScan method testPartitionEvolution.
@Test
public void testPartitionEvolution() {
Schema schema = new Schema(optional(1, "id", Types.LongType.get()), optional(2, "ts", Types.TimestampType.withZone()));
TableIdentifier identifier = TableIdentifier.of("default", "part_test");
shell.executeStatement("CREATE EXTERNAL TABLE " + identifier + " STORED BY ICEBERG " + testTables.locationForCreateTableSQL(identifier) + " TBLPROPERTIES ('" + InputFormatConfig.TABLE_SCHEMA + "'='" + SchemaParser.toJson(schema) + "', " + "'" + InputFormatConfig.CATALOG_NAME + "'='" + testTables.catalogName() + "')");
shell.executeStatement("ALTER TABLE " + identifier + " SET PARTITION SPEC (month(ts))");
PartitionSpec spec = PartitionSpec.builderFor(schema).withSpecId(1).month("ts").build();
Table table = testTables.loadTable(identifier);
Assert.assertEquals(spec, table.spec());
shell.executeStatement("ALTER TABLE " + identifier + " SET PARTITION SPEC (day(ts))");
spec = PartitionSpec.builderFor(schema).withSpecId(2).alwaysNull("ts", "ts_month").day("ts").build();
table.refresh();
Assert.assertEquals(spec, table.spec());
}
use of org.apache.iceberg.catalog.TableIdentifier in project hive by apache.
the class TestHiveIcebergStorageHandlerNoScan method testDeleteBackingTable.
@Test
public void testDeleteBackingTable() throws TException, IOException, InterruptedException {
TableIdentifier identifier = TableIdentifier.of("default", "customers");
shell.executeStatement("CREATE EXTERNAL TABLE customers " + "STORED BY ICEBERG " + testTables.locationForCreateTableSQL(identifier) + "TBLPROPERTIES ('" + InputFormatConfig.TABLE_SCHEMA + "'='" + SchemaParser.toJson(HiveIcebergStorageHandlerTestUtils.CUSTOMER_SCHEMA) + "', " + "'" + InputFormatConfig.EXTERNAL_TABLE_PURGE + "'='FALSE', " + "'" + InputFormatConfig.CATALOG_NAME + "'='" + testTables.catalogName() + "')");
org.apache.hadoop.hive.metastore.api.Table hmsTable = shell.metastore().getTable("default", "customers");
Properties tableProperties = new Properties();
hmsTable.getParameters().entrySet().stream().filter(e -> !IGNORED_PARAMS.contains(e.getKey())).forEach(e -> tableProperties.put(e.getKey(), e.getValue()));
if (!Catalogs.hiveCatalog(shell.getHiveConf(), tableProperties)) {
shell.executeStatement("DROP TABLE customers");
// Check if the table remains
testTables.loadTable(identifier);
} else {
// Check the HMS table parameters
Path hmsTableLocation = new Path(hmsTable.getSd().getLocation());
// Drop the table
shell.executeStatement("DROP TABLE customers");
// Check if we drop an exception when trying to drop the table
AssertHelpers.assertThrows("should throw exception", NoSuchTableException.class, "Table does not exist", () -> {
testTables.loadTable(identifier);
});
// Check if the files are kept
FileSystem fs = Util.getFs(hmsTableLocation, shell.getHiveConf());
Assert.assertEquals(1, fs.listStatus(hmsTableLocation).length);
Assert.assertEquals(1, fs.listStatus(new Path(hmsTableLocation, "metadata")).length);
}
}
Aggregations