use of org.apache.iceberg.Table in project hive by apache.
the class HiveCreateReplaceTableTest method testCreateOrReplaceTableTxnTableCreatedConcurrently.
@Test
public void testCreateOrReplaceTableTxnTableCreatedConcurrently() {
Assert.assertFalse("Table should not exist", catalog.tableExists(TABLE_IDENTIFIER));
Transaction txn = catalog.newReplaceTableTransaction(TABLE_IDENTIFIER, SCHEMA, PartitionSpec.unpartitioned(), tableLocation, Maps.newHashMap(), true);
txn.updateProperties().set("prop", "value").commit();
// create the table concurrently
catalog.createTable(TABLE_IDENTIFIER, SCHEMA, SPEC);
Assert.assertTrue("Table should be created", catalog.tableExists(TABLE_IDENTIFIER));
// expect the transaction to succeed anyway
txn.commitTransaction();
Table table = catalog.loadTable(TABLE_IDENTIFIER);
Assert.assertEquals("Partition spec should match", PartitionSpec.unpartitioned(), table.spec());
Assert.assertEquals("Table props should match", "value", table.properties().get("prop"));
}
use of org.apache.iceberg.Table in project hive by apache.
the class TestHiveCommits method testThriftExceptionUnknownFailedCommit.
/**
* Pretends we throw an exception while persisting and don't know what happened, can't check to find out,
* but in reality the commit failed
*/
@Test
public void testThriftExceptionUnknownFailedCommit() throws TException, InterruptedException {
Table table = catalog.loadTable(TABLE_IDENTIFIER);
HiveTableOperations ops = (HiveTableOperations) ((HasTableOperations) table).operations();
TableMetadata metadataV1 = ops.current();
table.updateSchema().addColumn("n", Types.IntegerType.get()).commit();
ops.refresh();
TableMetadata metadataV2 = ops.current();
Assert.assertEquals(2, ops.current().schema().columns().size());
HiveTableOperations spyOps = spy(ops);
failCommitAndThrowException(spyOps);
breakFallbackCatalogCommitCheck(spyOps);
AssertHelpers.assertThrows("Should throw CommitStateUnknownException since the catalog check was blocked", CommitStateUnknownException.class, "Datacenter on fire", () -> spyOps.commit(metadataV2, metadataV1));
ops.refresh();
Assert.assertEquals("Current metadata should not have changed", metadataV2, ops.current());
Assert.assertTrue("Current metadata file should still exist", metadataFileExists(ops.current()));
Assert.assertEquals("Client could not determine outcome so new metadata file should also exist", 3, metadataFileCount(ops.current()));
}
use of org.apache.iceberg.Table in project hive by apache.
the class TestHiveIcebergStatistics method testStatsRemoved.
@Test
public void testStatsRemoved() throws IOException {
Assume.assumeTrue("Only HiveCatalog can remove stats which become obsolete", testTableType == TestTables.TestTableType.HIVE_CATALOG);
TableIdentifier identifier = TableIdentifier.of("default", "customers");
shell.setHiveSessionValue(HiveConf.ConfVars.HIVESTATSAUTOGATHER.varname, true);
testTables.createTable(shell, identifier.name(), HiveIcebergStorageHandlerTestUtils.CUSTOMER_SCHEMA, PartitionSpec.unpartitioned(), fileFormat, ImmutableList.of());
String insert = testTables.getInsertQuery(HiveIcebergStorageHandlerTestUtils.CUSTOMER_RECORDS, identifier, true);
shell.executeStatement(insert);
checkColStat(identifier.name(), "customer_id", true);
checkColStatMinMaxValue(identifier.name(), "customer_id", 0, 2);
// Create a Catalog where the KEEP_HIVE_STATS is false
shell.metastore().hiveConf().set(HiveTableOperations.KEEP_HIVE_STATS, StatsSetupConst.FALSE);
TestTables nonHiveTestTables = HiveIcebergStorageHandlerTestUtils.testTables(shell, testTableType, temp);
Table nonHiveTable = nonHiveTestTables.loadTable(identifier);
// Append data to the table through a this non-Hive engine (here java API)
nonHiveTestTables.appendIcebergTable(shell.getHiveConf(), nonHiveTable, fileFormat, null, HiveIcebergStorageHandlerTestUtils.CUSTOMER_RECORDS);
checkColStat(identifier.name(), "customer_id", false);
}
use of org.apache.iceberg.Table in project hive by apache.
the class TestHiveIcebergStatistics method testAnalyzeTableComputeStatistics.
@Test
public void testAnalyzeTableComputeStatistics() throws IOException, TException, InterruptedException {
String dbName = "default";
String tableName = "customers";
Table table = testTables.createTable(shell, tableName, HiveIcebergStorageHandlerTestUtils.CUSTOMER_SCHEMA, fileFormat, HiveIcebergStorageHandlerTestUtils.CUSTOMER_RECORDS);
shell.executeStatement("ANALYZE TABLE " + dbName + "." + tableName + " COMPUTE STATISTICS");
validateBasicStats(table, dbName, tableName);
}
use of org.apache.iceberg.Table in project hive by apache.
the class TestHiveIcebergStorageHandlerNoScan method testCreateTableStoredByIcebergWithSerdeProperties.
@Test
public void testCreateTableStoredByIcebergWithSerdeProperties() {
TableIdentifier identifier = TableIdentifier.of("default", "customers");
String query = String.format("CREATE EXTERNAL TABLE customers (customer_id BIGINT, first_name STRING, last_name " + "STRING) STORED BY iceberg WITH SERDEPROPERTIES('%s'='%s') %s TBLPROPERTIES ('%s'='%s')", TableProperties.DEFAULT_FILE_FORMAT, "orc", testTables.locationForCreateTableSQL(identifier), InputFormatConfig.CATALOG_NAME, testTables.catalogName());
shell.executeStatement(query);
Table table = testTables.loadTable(identifier);
Assert.assertNotNull(table);
Assert.assertEquals("orc", table.properties().get(TableProperties.DEFAULT_FILE_FORMAT));
}
Aggregations