use of org.apache.iceberg.catalog.TableIdentifier in project hive by apache.
the class TestHiveCatalog method testCreateTableTxnBuilder.
@Test
public void testCreateTableTxnBuilder() throws Exception {
Schema schema = new Schema(required(1, "id", Types.IntegerType.get(), "unique ID"), required(2, "data", Types.StringType.get()));
TableIdentifier tableIdent = TableIdentifier.of(DB_NAME, "tbl");
String location = temp.newFolder("tbl").toString();
try {
Transaction txn = catalog.buildTable(tableIdent, schema).withLocation(location).createTransaction();
txn.commitTransaction();
Table table = catalog.loadTable(tableIdent);
Assert.assertEquals(location, table.location());
Assert.assertEquals(2, table.schema().columns().size());
Assert.assertTrue(table.spec().isUnpartitioned());
} finally {
catalog.dropTable(tableIdent);
}
}
use of org.apache.iceberg.catalog.TableIdentifier in project hive by apache.
the class HiveCatalog method renameTable.
@Override
public void renameTable(TableIdentifier from, TableIdentifier originalTo) {
if (!isValidIdentifier(from)) {
throw new NoSuchTableException("Invalid identifier: %s", from);
}
TableIdentifier to = removeCatalogName(originalTo);
Preconditions.checkArgument(isValidIdentifier(to), "Invalid identifier: %s", to);
String toDatabase = to.namespace().level(0);
String fromDatabase = from.namespace().level(0);
String fromName = from.name();
try {
Table table = clients.run(client -> client.getTable(fromDatabase, fromName));
HiveTableOperations.validateTableIsIceberg(table, fullTableName(name, from));
table.setDbName(toDatabase);
table.setTableName(to.name());
clients.run(client -> {
client.alter_table(fromDatabase, fromName, table);
return null;
});
LOG.info("Renamed table from {}, to {}", from, to);
} catch (NoSuchObjectException e) {
throw new NoSuchTableException("Table does not exist: %s", from);
} catch (AlreadyExistsException e) {
throw new org.apache.iceberg.exceptions.AlreadyExistsException("Table already exists: %s", to);
} catch (TException e) {
throw new RuntimeException("Failed to rename " + from + " to " + to, e);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new RuntimeException("Interrupted in call to rename", e);
}
}
use of org.apache.iceberg.catalog.TableIdentifier in project hive by apache.
the class TestHiveIcebergStatistics method testStatsWithPartitionedInsert.
@Test
public void testStatsWithPartitionedInsert() {
TableIdentifier identifier = TableIdentifier.of("default", "customers");
PartitionSpec spec = PartitionSpec.builderFor(HiveIcebergStorageHandlerTestUtils.CUSTOMER_SCHEMA).identity("last_name").build();
shell.setHiveSessionValue(HiveConf.ConfVars.HIVESTATSAUTOGATHER.varname, true);
testTables.createTable(shell, identifier.name(), HiveIcebergStorageHandlerTestUtils.CUSTOMER_SCHEMA, spec, fileFormat, ImmutableList.of());
if (testTableType != TestTables.TestTableType.HIVE_CATALOG) {
// If the location is set and we have to gather stats, then we have to update the table stats now
shell.executeStatement("ANALYZE TABLE " + identifier + " COMPUTE STATISTICS FOR COLUMNS");
}
String insert = testTables.getInsertQuery(HiveIcebergStorageHandlerTestUtils.CUSTOMER_RECORDS, identifier, false);
shell.executeStatement(insert);
checkColStat(identifier.name(), "customer_id", true);
checkColStat(identifier.name(), "first_name", true);
checkColStatMinMaxValue(identifier.name(), "customer_id", 0, 2);
}
use of org.apache.iceberg.catalog.TableIdentifier in project hive by apache.
the class TestHiveIcebergStatistics method testStatsRemoved.
@Test
public void testStatsRemoved() throws IOException {
Assume.assumeTrue("Only HiveCatalog can remove stats which become obsolete", testTableType == TestTables.TestTableType.HIVE_CATALOG);
TableIdentifier identifier = TableIdentifier.of("default", "customers");
shell.setHiveSessionValue(HiveConf.ConfVars.HIVESTATSAUTOGATHER.varname, true);
testTables.createTable(shell, identifier.name(), HiveIcebergStorageHandlerTestUtils.CUSTOMER_SCHEMA, PartitionSpec.unpartitioned(), fileFormat, ImmutableList.of());
String insert = testTables.getInsertQuery(HiveIcebergStorageHandlerTestUtils.CUSTOMER_RECORDS, identifier, true);
shell.executeStatement(insert);
checkColStat(identifier.name(), "customer_id", true);
checkColStatMinMaxValue(identifier.name(), "customer_id", 0, 2);
// Create a Catalog where the KEEP_HIVE_STATS is false
shell.metastore().hiveConf().set(HiveTableOperations.KEEP_HIVE_STATS, StatsSetupConst.FALSE);
TestTables nonHiveTestTables = HiveIcebergStorageHandlerTestUtils.testTables(shell, testTableType, temp);
Table nonHiveTable = nonHiveTestTables.loadTable(identifier);
// Append data to the table through a this non-Hive engine (here java API)
nonHiveTestTables.appendIcebergTable(shell.getHiveConf(), nonHiveTable, fileFormat, null, HiveIcebergStorageHandlerTestUtils.CUSTOMER_RECORDS);
checkColStat(identifier.name(), "customer_id", false);
}
use of org.apache.iceberg.catalog.TableIdentifier in project hive by apache.
the class TestHiveIcebergStorageHandlerNoScan method testMetaHookWithUndefinedAlterOperationType.
/**
* Checks that HiveIcebergMetaHook doesn't run into failures with undefined alter operation type (e.g. stat updates)
* @throws Exception - any test failure
*/
@Test
public void testMetaHookWithUndefinedAlterOperationType() throws Exception {
Assume.assumeTrue("Enough to check for one type only", testTableType.equals(TestTables.TestTableType.HIVE_CATALOG));
TableIdentifier identifier = TableIdentifier.of("default", "customers");
testTables.createTable(shell, identifier.name(), HiveIcebergStorageHandlerTestUtils.CUSTOMER_SCHEMA, SPEC, FileFormat.PARQUET, ImmutableList.of());
org.apache.hadoop.hive.metastore.api.Table hmsTable = shell.metastore().getTable("default", "customers");
HiveIcebergMetaHook metaHook = new HiveIcebergMetaHook(shell.getHiveConf());
EnvironmentContext environmentContext = new EnvironmentContext(new HashMap<>());
metaHook.preAlterTable(hmsTable, environmentContext);
metaHook.commitAlterTable(hmsTable, environmentContext, null);
}
Aggregations