use of org.apache.iceberg.Table in project hive by apache.
the class TestHiveIcebergStorageHandlerNoScan method testAlterTableProperties.
@Test
public void testAlterTableProperties() {
TableIdentifier identifier = TableIdentifier.of("default", "customers");
shell.executeStatement("CREATE EXTERNAL TABLE customers (" + "t_int INT, " + "t_string STRING) " + "STORED BY ICEBERG " + testTables.locationForCreateTableSQL(identifier) + testTables.propertiesForCreateTableSQL(ImmutableMap.of()));
String propKey = "dummy";
String propValue = "dummy_val";
// add new property
shell.executeStatement(String.format("ALTER TABLE customers SET TBLPROPERTIES('%s'='%s')", propKey, propValue));
// Check the Iceberg table parameters
Table icebergTable = testTables.loadTable(identifier);
Assert.assertTrue(icebergTable.properties().containsKey(propKey));
Assert.assertEquals(icebergTable.properties().get(propKey), propValue);
// update existing property
propValue = "new_dummy_val";
shell.executeStatement(String.format("ALTER TABLE customers SET TBLPROPERTIES('%s'='%s')", propKey, propValue));
// Check the Iceberg table parameters
icebergTable.refresh();
Assert.assertTrue(icebergTable.properties().containsKey(propKey));
Assert.assertEquals(icebergTable.properties().get(propKey), propValue);
// remove existing property
shell.executeStatement(String.format("ALTER TABLE customers UNSET TBLPROPERTIES('%s'='%s')", propKey, propValue));
// Check the Iceberg table parameters
icebergTable.refresh();
Assert.assertFalse(icebergTable.properties().containsKey(propKey));
}
use of org.apache.iceberg.Table in project hive by apache.
the class TestHiveIcebergStorageHandlerNoScan method testPartitionTransform.
@Test
public void testPartitionTransform() {
Schema schema = new Schema(optional(1, "id", Types.LongType.get()), optional(2, "year_field", Types.DateType.get()), optional(3, "month_field", Types.TimestampType.withZone()), optional(4, "day_field", Types.TimestampType.withoutZone()), optional(5, "hour_field", Types.TimestampType.withoutZone()), optional(6, "truncate_field", Types.StringType.get()), optional(7, "bucket_field", Types.StringType.get()), optional(8, "identity_field", Types.StringType.get()));
PartitionSpec spec = PartitionSpec.builderFor(schema).year("year_field").month("month_field").day("day_field").hour("hour_field").truncate("truncate_field", 2).bucket("bucket_field", 2).identity("identity_field").build();
TableIdentifier identifier = TableIdentifier.of("default", "part_test");
shell.executeStatement("CREATE EXTERNAL TABLE " + identifier + " PARTITIONED BY SPEC (year(year_field), month(month_field), day(day_field), hour(hour_field), " + "truncate(2, truncate_field), bucket(2, bucket_field), identity_field)" + " STORED BY ICEBERG " + testTables.locationForCreateTableSQL(identifier) + " TBLPROPERTIES ('" + InputFormatConfig.TABLE_SCHEMA + "'='" + SchemaParser.toJson(schema) + "', " + "'" + InputFormatConfig.CATALOG_NAME + "'='" + testTables.catalogName() + "')");
Table table = testTables.loadTable(identifier);
Assert.assertEquals(spec, table.spec());
}
use of org.apache.iceberg.Table in project hive by apache.
the class TestHiveIcebergStorageHandlerNoScan method testDropTableWithCorruptedMetadata.
@Test
public void testDropTableWithCorruptedMetadata() throws TException, IOException, InterruptedException {
Assume.assumeTrue("Only HiveCatalog attempts to load the Iceberg table prior to dropping it.", testTableType == TestTables.TestTableType.HIVE_CATALOG);
// create test table
TableIdentifier identifier = TableIdentifier.of("default", "customers");
testTables.createTable(shell, identifier.name(), HiveIcebergStorageHandlerTestUtils.CUSTOMER_SCHEMA, FileFormat.PARQUET, ImmutableList.of());
// enable data purging (this should set external.table.purge=true on the HMS table)
Table table = testTables.loadTable(identifier);
table.updateProperties().set(GC_ENABLED, "true").commit();
// delete its current snapshot file (i.e. corrupt the metadata to make the Iceberg table unloadable)
String metadataLocation = shell.metastore().getTable(identifier).getParameters().get(BaseMetastoreTableOperations.METADATA_LOCATION_PROP);
table.io().deleteFile(metadataLocation);
// check if HMS table is nonetheless still droppable
shell.executeStatement(String.format("DROP TABLE %s", identifier));
AssertHelpers.assertThrows("should throw exception", NoSuchTableException.class, "Table does not exist", () -> {
testTables.loadTable(identifier);
});
}
use of org.apache.iceberg.Table in project hive by apache.
the class TestHiveIcebergStorageHandlerWithMultipleCatalogs method createAndAddRecords.
private void createAndAddRecords(TestTables testTables, FileFormat fileFormat, TableIdentifier identifier, List<Record> records) throws IOException {
String createSql = String.format("CREATE EXTERNAL TABLE %s (customer_id BIGINT, first_name STRING, last_name STRING)" + " STORED BY ICEBERG %s " + " TBLPROPERTIES ('%s'='%s', '%s'='%s')", identifier, testTables.locationForCreateTableSQL(identifier), InputFormatConfig.CATALOG_NAME, testTables.catalogName(), TableProperties.DEFAULT_FILE_FORMAT, fileFormat);
shell.executeStatement(createSql);
Table icebergTable = testTables.loadTable(identifier);
testTables.appendIcebergTable(shell.getHiveConf(), icebergTable, fileFormat, null, records);
}
use of org.apache.iceberg.Table in project hive by apache.
the class TestHiveIcebergTruncateTable method testTruncateTable.
private void testTruncateTable(String databaseName, String tableName, Table icebergTable, List<Record> records, Schema schema, boolean externalTablePurge, boolean force) throws TException, InterruptedException {
TableIdentifier identifier = TableIdentifier.of(databaseName, tableName);
// Set the 'external.table.purge' table property on the table
String alterTableCommand = "ALTER TABLE " + identifier + " SET TBLPROPERTIES('external.table.purge'='" + externalTablePurge + "')";
shell.executeStatement(alterTableCommand);
// Validate the initial data and the table statistics
List<Object[]> rows = shell.executeStatement("SELECT * FROM " + identifier);
HiveIcebergTestUtils.validateData(records, HiveIcebergTestUtils.valueForRow(schema, rows), 0);
shell.executeStatement("ANALYZE TABLE " + identifier + " COMPUTE STATISTICS");
validateBasicStats(icebergTable, databaseName, tableName);
// Run a 'truncate table' or 'truncate table force' command
String truncateCommand = "TRUNCATE " + identifier;
if (force) {
truncateCommand = truncateCommand + " FORCE";
}
shell.executeStatement(truncateCommand);
// Validate if the data is deleted from the table and also that the table
// statistics are reset to 0.
Table table = testTables.loadTable(identifier);
Map<String, String> summary = table.currentSnapshot().summary();
for (String key : STATS_MAPPING.values()) {
Assert.assertEquals("0", summary.get(key));
}
rows = shell.executeStatement("SELECT * FROM " + identifier);
Assert.assertEquals(0, rows.size());
validateBasicStats(table, databaseName, tableName);
}
Aggregations