use of org.apache.iceberg.catalog.TableIdentifier in project hive by apache.
the class TestHiveCatalog method testTableName.
@Test
public void testTableName() {
Schema schema = new Schema(required(1, "id", Types.IntegerType.get(), "unique ID"), required(2, "data", Types.StringType.get()));
PartitionSpec spec = PartitionSpec.builderFor(schema).bucket("data", 16).build();
TableIdentifier tableIdent = TableIdentifier.of(DB_NAME, "tbl");
try {
catalog.buildTable(tableIdent, schema).withPartitionSpec(spec).create();
Table table = catalog.loadTable(tableIdent);
Assert.assertEquals("Name must match", "hive.hivedb.tbl", table.name());
TableIdentifier snapshotsTableIdent = TableIdentifier.of(DB_NAME, "tbl", "snapshots");
Table snapshotsTable = catalog.loadTable(snapshotsTableIdent);
Assert.assertEquals("Name must match", "hive.hivedb.tbl.snapshots", snapshotsTable.name());
} finally {
catalog.dropTable(tableIdent);
}
}
use of org.apache.iceberg.catalog.TableIdentifier in project hive by apache.
the class TestHiveIcebergStorageHandlerNoScan method testSetPartitionTransformCaseSensitive.
@Test
public void testSetPartitionTransformCaseSensitive() {
Schema schema = new Schema(optional(1, "id", Types.LongType.get()), optional(2, "truncate_field", Types.StringType.get()), optional(3, "bucket_field", Types.StringType.get()));
TableIdentifier identifier = TableIdentifier.of("default", "part_test");
shell.executeStatement("CREATE EXTERNAL TABLE " + identifier + " PARTITIONED BY SPEC (truncate(2, truncate_field), bucket(2, bucket_field))" + " STORED BY ICEBERG " + testTables.locationForCreateTableSQL(identifier) + "TBLPROPERTIES ('" + InputFormatConfig.TABLE_SCHEMA + "'='" + SchemaParser.toJson(schema) + "', " + "'" + InputFormatConfig.CATALOG_NAME + "'='" + testTables.catalogName() + "')");
PartitionSpec spec = PartitionSpec.builderFor(schema).truncate("truncate_field", 2).bucket("bucket_field", 2).build();
Table table = testTables.loadTable(identifier);
Assert.assertEquals(spec, table.spec());
shell.executeStatement("ALTER TABLE default.part_test " + "SET PARTITION SPEC (truncaTe(3, truncate_Field), buCket(3, bUckeT_field))");
spec = PartitionSpec.builderFor(schema).withSpecId(1).alwaysNull("truncate_field", "truncate_field_trunc").alwaysNull("bucket_field", "bucket_field_bucket").truncate("truncate_field", 3, "truncate_field_trunc_3").bucket("bucket_field", 3, "bucket_field_bucket_3").build();
table.refresh();
Assert.assertEquals(spec, table.spec());
}
use of org.apache.iceberg.catalog.TableIdentifier in project hive by apache.
the class TestHiveIcebergStorageHandlerNoScan method testAuthzURI.
@Test
public void testAuthzURI() throws TException, InterruptedException, URISyntaxException {
TableIdentifier target = TableIdentifier.of("default", "target");
testTables.createTable(shell, target.name(), HiveIcebergStorageHandlerTestUtils.CUSTOMER_SCHEMA, PartitionSpec.unpartitioned(), FileFormat.PARQUET, ImmutableList.of());
org.apache.hadoop.hive.metastore.api.Table hmsTable = shell.metastore().getTable(target);
HiveIcebergStorageHandler storageHandler = new HiveIcebergStorageHandler();
storageHandler.setConf(shell.getHiveConf());
URI uriForAuth = storageHandler.getURIForAuth(hmsTable);
Assert.assertEquals("iceberg://" + hmsTable.getDbName() + "/" + hmsTable.getTableName(), uriForAuth.toString());
}
use of org.apache.iceberg.catalog.TableIdentifier in project hive by apache.
the class TestHiveIcebergStorageHandlerNoScan method testCreateTableWithAllSupportedTypes.
@Test
public void testCreateTableWithAllSupportedTypes() {
TableIdentifier identifier = TableIdentifier.of("default", "all_types");
Schema allSupportedSchema = new Schema(optional(1, "t_float", Types.FloatType.get()), optional(2, "t_double", Types.DoubleType.get()), optional(3, "t_boolean", Types.BooleanType.get()), optional(4, "t_int", Types.IntegerType.get()), optional(5, "t_bigint", Types.LongType.get()), optional(6, "t_binary", Types.BinaryType.get()), optional(7, "t_string", Types.StringType.get()), optional(8, "t_timestamp", Types.TimestampType.withoutZone()), optional(9, "t_date", Types.DateType.get()), optional(10, "t_decimal", Types.DecimalType.of(3, 2)));
// Intentionally adding some mixed letters to test that we handle them correctly
shell.executeStatement("CREATE EXTERNAL TABLE all_types (" + "t_Float FLOaT, t_dOuble DOUBLE, t_boolean BOOLEAN, t_int INT, t_bigint BIGINT, t_binary BINARY, " + "t_string STRING, t_timestamp TIMESTAMP, t_date DATE, t_decimal DECIMAL(3,2)) " + "STORED BY ICEBERG " + testTables.locationForCreateTableSQL(identifier) + testTables.propertiesForCreateTableSQL(ImmutableMap.of()));
// Check the Iceberg table data
org.apache.iceberg.Table icebergTable = testTables.loadTable(identifier);
Assert.assertEquals(allSupportedSchema.asStruct(), icebergTable.schema().asStruct());
}
use of org.apache.iceberg.catalog.TableIdentifier in project hive by apache.
the class TestHiveIcebergStorageHandlerNoScan method testCreateDropTable.
@Test
public void testCreateDropTable() throws TException, IOException, InterruptedException {
TableIdentifier identifier = TableIdentifier.of("default", "customers");
shell.executeStatement("CREATE EXTERNAL TABLE customers " + "STORED BY ICEBERG " + testTables.locationForCreateTableSQL(identifier) + "TBLPROPERTIES ('" + InputFormatConfig.TABLE_SCHEMA + "'='" + SchemaParser.toJson(HiveIcebergStorageHandlerTestUtils.CUSTOMER_SCHEMA) + "', " + "'" + InputFormatConfig.PARTITION_SPEC + "'='" + PartitionSpecParser.toJson(PartitionSpec.unpartitioned()) + "', " + "'dummy'='test', " + "'" + InputFormatConfig.CATALOG_NAME + "'='" + testTables.catalogName() + "')");
// Check the Iceberg table data
org.apache.iceberg.Table icebergTable = testTables.loadTable(identifier);
Assert.assertEquals(HiveIcebergStorageHandlerTestUtils.CUSTOMER_SCHEMA.asStruct(), icebergTable.schema().asStruct());
Assert.assertEquals(PartitionSpec.unpartitioned(), icebergTable.spec());
org.apache.hadoop.hive.metastore.api.Table hmsTable = shell.metastore().getTable("default", "customers");
Properties tableProperties = new Properties();
hmsTable.getParameters().entrySet().stream().filter(e -> !IGNORED_PARAMS.contains(e.getKey())).forEach(e -> tableProperties.put(e.getKey(), e.getValue()));
if (!Catalogs.hiveCatalog(shell.getHiveConf(), tableProperties)) {
shell.executeStatement("DROP TABLE customers");
// Check if the table was really dropped even from the Catalog
AssertHelpers.assertThrows("should throw exception", NoSuchTableException.class, "Table does not exist", () -> {
testTables.loadTable(identifier);
});
} else {
Path hmsTableLocation = new Path(hmsTable.getSd().getLocation());
// Drop the table
shell.executeStatement("DROP TABLE customers");
// Check if we drop an exception when trying to load the table
AssertHelpers.assertThrows("should throw exception", NoSuchTableException.class, "Table does not exist", () -> {
testTables.loadTable(identifier);
});
// Check if the files are removed
FileSystem fs = Util.getFs(hmsTableLocation, shell.getHiveConf());
if (fs.exists(hmsTableLocation)) {
// if table directory has been deleted, we're good. This is the expected behavior in Hive4.
// if table directory exists, its contents should have been cleaned up, save for an empty metadata dir (Hive3).
Assert.assertEquals(1, fs.listStatus(hmsTableLocation).length);
Assert.assertEquals(0, fs.listStatus(new Path(hmsTableLocation, "metadata")).length);
}
}
}
Aggregations