use of org.apache.iceberg.catalog.TableIdentifier in project hive by apache.
the class HiveTableTest method testRename.
@Test
public void testRename() {
String renamedTableName = "rename_table_name";
TableIdentifier renameTableIdentifier = TableIdentifier.of(TABLE_IDENTIFIER.namespace(), renamedTableName);
Table original = catalog.loadTable(TABLE_IDENTIFIER);
catalog.renameTable(TABLE_IDENTIFIER, renameTableIdentifier);
Assert.assertFalse(catalog.tableExists(TABLE_IDENTIFIER));
Assert.assertTrue(catalog.tableExists(renameTableIdentifier));
Table renamed = catalog.loadTable(renameTableIdentifier);
Assert.assertEquals(original.schema().asStruct(), renamed.schema().asStruct());
Assert.assertEquals(original.spec(), renamed.spec());
Assert.assertEquals(original.location(), renamed.location());
Assert.assertEquals(original.currentSnapshot(), renamed.currentSnapshot());
Assert.assertTrue(catalog.dropTable(renameTableIdentifier));
}
use of org.apache.iceberg.catalog.TableIdentifier in project hive by apache.
the class TestHiveCatalog method testCreateTableBuilder.
@Test
public void testCreateTableBuilder() throws Exception {
Schema schema = new Schema(required(1, "id", Types.IntegerType.get(), "unique ID"), required(2, "data", Types.StringType.get()));
PartitionSpec spec = PartitionSpec.builderFor(schema).bucket("data", 16).build();
TableIdentifier tableIdent = TableIdentifier.of(DB_NAME, "tbl");
String location = temp.newFolder("tbl").toString();
try {
Table table = catalog.buildTable(tableIdent, schema).withPartitionSpec(spec).withLocation(location).withProperty("key1", "value1").withProperty("key2", "value2").create();
Assert.assertEquals(location, table.location());
Assert.assertEquals(2, table.schema().columns().size());
Assert.assertEquals(1, table.spec().fields().size());
Assert.assertEquals("value1", table.properties().get("key1"));
Assert.assertEquals("value2", table.properties().get("key2"));
} finally {
catalog.dropTable(tableIdent);
}
}
use of org.apache.iceberg.catalog.TableIdentifier in project hive by apache.
the class TestHiveCatalog method testReplaceTxnBuilder.
@Test
public void testReplaceTxnBuilder() throws Exception {
Schema schema = new Schema(required(1, "id", Types.IntegerType.get(), "unique ID"), required(2, "data", Types.StringType.get()));
PartitionSpec spec = PartitionSpec.builderFor(schema).bucket("data", 16).build();
TableIdentifier tableIdent = TableIdentifier.of(DB_NAME, "tbl");
String location = temp.newFolder("tbl").toString();
try {
Transaction createTxn = catalog.buildTable(tableIdent, schema).withPartitionSpec(spec).withLocation(location).withProperty("key1", "value1").createOrReplaceTransaction();
createTxn.commitTransaction();
Table table = catalog.loadTable(tableIdent);
Assert.assertEquals(1, table.spec().fields().size());
String newLocation = temp.newFolder("tbl-2").toString();
Transaction replaceTxn = catalog.buildTable(tableIdent, schema).withProperty("key2", "value2").withLocation(newLocation).replaceTransaction();
replaceTxn.commitTransaction();
table = catalog.loadTable(tableIdent);
Assert.assertEquals(newLocation, table.location());
Assert.assertNull(table.currentSnapshot());
PartitionSpec v1Expected = PartitionSpec.builderFor(table.schema()).alwaysNull("data", "data_bucket").withSpecId(1).build();
Assert.assertEquals("Table should have a spec with one void field", v1Expected, table.spec());
Assert.assertEquals("value1", table.properties().get("key1"));
Assert.assertEquals("value2", table.properties().get("key2"));
} finally {
catalog.dropTable(tableIdent);
}
}
use of org.apache.iceberg.catalog.TableIdentifier in project hive by apache.
the class TestHiveCatalog method testCreateTableWithCaching.
@Test
public void testCreateTableWithCaching() throws Exception {
Schema schema = new Schema(required(1, "id", Types.IntegerType.get(), "unique ID"), required(2, "data", Types.StringType.get()));
PartitionSpec spec = PartitionSpec.builderFor(schema).bucket("data", 16).build();
TableIdentifier tableIdent = TableIdentifier.of(DB_NAME, "tbl");
String location = temp.newFolder("tbl").toString();
ImmutableMap<String, String> properties = ImmutableMap.of("key1", "value1", "key2", "value2");
Catalog cachingCatalog = CachingCatalog.wrap(catalog);
try {
Table table = cachingCatalog.createTable(tableIdent, schema, spec, location, properties);
Assert.assertEquals(location, table.location());
Assert.assertEquals(2, table.schema().columns().size());
Assert.assertEquals(1, table.spec().fields().size());
Assert.assertEquals("value1", table.properties().get("key1"));
Assert.assertEquals("value2", table.properties().get("key2"));
} finally {
cachingCatalog.dropTable(tableIdent);
}
}
use of org.apache.iceberg.catalog.TableIdentifier in project hive by apache.
the class HiveCatalog method listTables.
@Override
public List<TableIdentifier> listTables(Namespace namespace) {
Preconditions.checkArgument(isValidateNamespace(namespace), "Missing database in namespace: %s", namespace);
String database = namespace.level(0);
try {
List<String> tableNames = clients.run(client -> client.getAllTables(database));
List<Table> tableObjects = clients.run(client -> client.getTableObjectsByName(database, tableNames));
List<TableIdentifier> tableIdentifiers = tableObjects.stream().filter(table -> table.getParameters() == null ? false : BaseMetastoreTableOperations.ICEBERG_TABLE_TYPE_VALUE.equalsIgnoreCase(table.getParameters().get(BaseMetastoreTableOperations.TABLE_TYPE_PROP))).map(table -> TableIdentifier.of(namespace, table.getTableName())).collect(Collectors.toList());
LOG.debug("Listing of namespace: {} resulted in the following tables: {}", namespace, tableIdentifiers);
return tableIdentifiers;
} catch (UnknownDBException e) {
throw new NoSuchNamespaceException("Namespace does not exist: %s", namespace);
} catch (TException e) {
throw new RuntimeException("Failed to list all tables under namespace " + namespace, e);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new RuntimeException("Interrupted in call to listTables", e);
}
}
Aggregations