Search in sources :

Example 46 with TableIdentifier

use of org.apache.iceberg.catalog.TableIdentifier in project hive by apache.

the class HiveTableTest method testRename.

@Test
public void testRename() {
    String renamedTableName = "rename_table_name";
    TableIdentifier renameTableIdentifier = TableIdentifier.of(TABLE_IDENTIFIER.namespace(), renamedTableName);
    Table original = catalog.loadTable(TABLE_IDENTIFIER);
    catalog.renameTable(TABLE_IDENTIFIER, renameTableIdentifier);
    Assert.assertFalse(catalog.tableExists(TABLE_IDENTIFIER));
    Assert.assertTrue(catalog.tableExists(renameTableIdentifier));
    Table renamed = catalog.loadTable(renameTableIdentifier);
    Assert.assertEquals(original.schema().asStruct(), renamed.schema().asStruct());
    Assert.assertEquals(original.spec(), renamed.spec());
    Assert.assertEquals(original.location(), renamed.location());
    Assert.assertEquals(original.currentSnapshot(), renamed.currentSnapshot());
    Assert.assertTrue(catalog.dropTable(renameTableIdentifier));
}
Also used : TableIdentifier(org.apache.iceberg.catalog.TableIdentifier) Table(org.apache.iceberg.Table) PosixFilePermissions.fromString(java.nio.file.attribute.PosixFilePermissions.fromString) Test(org.junit.Test)

Example 47 with TableIdentifier

use of org.apache.iceberg.catalog.TableIdentifier in project hive by apache.

the class TestHiveCatalog method testCreateTableBuilder.

@Test
public void testCreateTableBuilder() throws Exception {
    Schema schema = new Schema(required(1, "id", Types.IntegerType.get(), "unique ID"), required(2, "data", Types.StringType.get()));
    PartitionSpec spec = PartitionSpec.builderFor(schema).bucket("data", 16).build();
    TableIdentifier tableIdent = TableIdentifier.of(DB_NAME, "tbl");
    String location = temp.newFolder("tbl").toString();
    try {
        Table table = catalog.buildTable(tableIdent, schema).withPartitionSpec(spec).withLocation(location).withProperty("key1", "value1").withProperty("key2", "value2").create();
        Assert.assertEquals(location, table.location());
        Assert.assertEquals(2, table.schema().columns().size());
        Assert.assertEquals(1, table.spec().fields().size());
        Assert.assertEquals("value1", table.properties().get("key1"));
        Assert.assertEquals("value2", table.properties().get("key2"));
    } finally {
        catalog.dropTable(tableIdent);
    }
}
Also used : TableIdentifier(org.apache.iceberg.catalog.TableIdentifier) Table(org.apache.iceberg.Table) Schema(org.apache.iceberg.Schema) PartitionSpec(org.apache.iceberg.PartitionSpec) Test(org.junit.Test)

Example 48 with TableIdentifier

use of org.apache.iceberg.catalog.TableIdentifier in project hive by apache.

the class TestHiveCatalog method testReplaceTxnBuilder.

@Test
public void testReplaceTxnBuilder() throws Exception {
    Schema schema = new Schema(required(1, "id", Types.IntegerType.get(), "unique ID"), required(2, "data", Types.StringType.get()));
    PartitionSpec spec = PartitionSpec.builderFor(schema).bucket("data", 16).build();
    TableIdentifier tableIdent = TableIdentifier.of(DB_NAME, "tbl");
    String location = temp.newFolder("tbl").toString();
    try {
        Transaction createTxn = catalog.buildTable(tableIdent, schema).withPartitionSpec(spec).withLocation(location).withProperty("key1", "value1").createOrReplaceTransaction();
        createTxn.commitTransaction();
        Table table = catalog.loadTable(tableIdent);
        Assert.assertEquals(1, table.spec().fields().size());
        String newLocation = temp.newFolder("tbl-2").toString();
        Transaction replaceTxn = catalog.buildTable(tableIdent, schema).withProperty("key2", "value2").withLocation(newLocation).replaceTransaction();
        replaceTxn.commitTransaction();
        table = catalog.loadTable(tableIdent);
        Assert.assertEquals(newLocation, table.location());
        Assert.assertNull(table.currentSnapshot());
        PartitionSpec v1Expected = PartitionSpec.builderFor(table.schema()).alwaysNull("data", "data_bucket").withSpecId(1).build();
        Assert.assertEquals("Table should have a spec with one void field", v1Expected, table.spec());
        Assert.assertEquals("value1", table.properties().get("key1"));
        Assert.assertEquals("value2", table.properties().get("key2"));
    } finally {
        catalog.dropTable(tableIdent);
    }
}
Also used : TableIdentifier(org.apache.iceberg.catalog.TableIdentifier) Table(org.apache.iceberg.Table) Transaction(org.apache.iceberg.Transaction) Schema(org.apache.iceberg.Schema) PartitionSpec(org.apache.iceberg.PartitionSpec) Test(org.junit.Test)

Example 49 with TableIdentifier

use of org.apache.iceberg.catalog.TableIdentifier in project hive by apache.

the class TestHiveCatalog method testCreateTableWithCaching.

@Test
public void testCreateTableWithCaching() throws Exception {
    Schema schema = new Schema(required(1, "id", Types.IntegerType.get(), "unique ID"), required(2, "data", Types.StringType.get()));
    PartitionSpec spec = PartitionSpec.builderFor(schema).bucket("data", 16).build();
    TableIdentifier tableIdent = TableIdentifier.of(DB_NAME, "tbl");
    String location = temp.newFolder("tbl").toString();
    ImmutableMap<String, String> properties = ImmutableMap.of("key1", "value1", "key2", "value2");
    Catalog cachingCatalog = CachingCatalog.wrap(catalog);
    try {
        Table table = cachingCatalog.createTable(tableIdent, schema, spec, location, properties);
        Assert.assertEquals(location, table.location());
        Assert.assertEquals(2, table.schema().columns().size());
        Assert.assertEquals(1, table.spec().fields().size());
        Assert.assertEquals("value1", table.properties().get("key1"));
        Assert.assertEquals("value2", table.properties().get("key2"));
    } finally {
        cachingCatalog.dropTable(tableIdent);
    }
}
Also used : TableIdentifier(org.apache.iceberg.catalog.TableIdentifier) Table(org.apache.iceberg.Table) Schema(org.apache.iceberg.Schema) PartitionSpec(org.apache.iceberg.PartitionSpec) Catalog(org.apache.iceberg.catalog.Catalog) CachingCatalog(org.apache.iceberg.CachingCatalog) Test(org.junit.Test)

Example 50 with TableIdentifier

use of org.apache.iceberg.catalog.TableIdentifier in project hive by apache.

the class HiveCatalog method listTables.

@Override
public List<TableIdentifier> listTables(Namespace namespace) {
    Preconditions.checkArgument(isValidateNamespace(namespace), "Missing database in namespace: %s", namespace);
    String database = namespace.level(0);
    try {
        List<String> tableNames = clients.run(client -> client.getAllTables(database));
        List<Table> tableObjects = clients.run(client -> client.getTableObjectsByName(database, tableNames));
        List<TableIdentifier> tableIdentifiers = tableObjects.stream().filter(table -> table.getParameters() == null ? false : BaseMetastoreTableOperations.ICEBERG_TABLE_TYPE_VALUE.equalsIgnoreCase(table.getParameters().get(BaseMetastoreTableOperations.TABLE_TYPE_PROP))).map(table -> TableIdentifier.of(namespace, table.getTableName())).collect(Collectors.toList());
        LOG.debug("Listing of namespace: {} resulted in the following tables: {}", namespace, tableIdentifiers);
        return tableIdentifiers;
    } catch (UnknownDBException e) {
        throw new NoSuchNamespaceException("Namespace does not exist: %s", namespace);
    } catch (TException e) {
        throw new RuntimeException("Failed to list all tables under namespace " + namespace, e);
    } catch (InterruptedException e) {
        Thread.currentThread().interrupt();
        throw new RuntimeException("Interrupted in call to listTables", e);
    }
}
Also used : TableIdentifier(org.apache.iceberg.catalog.TableIdentifier) CatalogUtil(org.apache.iceberg.CatalogUtil) ImmutableMap(org.apache.iceberg.relocated.com.google.common.collect.ImmutableMap) LoggerFactory(org.slf4j.LoggerFactory) HadoopFileIO(org.apache.iceberg.hadoop.HadoopFileIO) TableMetadata(org.apache.iceberg.TableMetadata) AlreadyExistsException(org.apache.hadoop.hive.metastore.api.AlreadyExistsException) CatalogProperties(org.apache.iceberg.CatalogProperties) TableOperations(org.apache.iceberg.TableOperations) BaseMetastoreTableOperations(org.apache.iceberg.BaseMetastoreTableOperations) NoSuchNamespaceException(org.apache.iceberg.exceptions.NoSuchNamespaceException) Map(java.util.Map) Configuration(org.apache.hadoop.conf.Configuration) BaseMetastoreCatalog(org.apache.iceberg.BaseMetastoreCatalog) NoSuchTableException(org.apache.iceberg.exceptions.NoSuchTableException) Path(org.apache.hadoop.fs.Path) NamespaceNotEmptyException(org.apache.iceberg.exceptions.NamespaceNotEmptyException) Namespace(org.apache.iceberg.catalog.Namespace) Configurable(org.apache.hadoop.conf.Configurable) SupportsNamespaces(org.apache.iceberg.catalog.SupportsNamespaces) Logger(org.slf4j.Logger) UnknownDBException(org.apache.hadoop.hive.metastore.api.UnknownDBException) TableIdentifier(org.apache.iceberg.catalog.TableIdentifier) HiveConf(org.apache.hadoop.hive.conf.HiveConf) Maps(org.apache.iceberg.relocated.com.google.common.collect.Maps) Set(java.util.Set) TException(org.apache.thrift.TException) MoreObjects(org.apache.iceberg.relocated.com.google.common.base.MoreObjects) ImmutableList(org.apache.iceberg.relocated.com.google.common.collect.ImmutableList) Collectors(java.util.stream.Collectors) Table(org.apache.hadoop.hive.metastore.api.Table) List(java.util.List) IMetaStoreClient(org.apache.hadoop.hive.metastore.IMetaStoreClient) ClientPool(org.apache.iceberg.ClientPool) Preconditions(org.apache.iceberg.relocated.com.google.common.base.Preconditions) FileIO(org.apache.iceberg.io.FileIO) Database(org.apache.hadoop.hive.metastore.api.Database) InvalidOperationException(org.apache.hadoop.hive.metastore.api.InvalidOperationException) NoSuchObjectException(org.apache.hadoop.hive.metastore.api.NoSuchObjectException) TException(org.apache.thrift.TException) Table(org.apache.hadoop.hive.metastore.api.Table) UnknownDBException(org.apache.hadoop.hive.metastore.api.UnknownDBException) NoSuchNamespaceException(org.apache.iceberg.exceptions.NoSuchNamespaceException)

Aggregations

TableIdentifier (org.apache.iceberg.catalog.TableIdentifier)87 Test (org.junit.Test)69 Table (org.apache.iceberg.Table)56 PartitionSpec (org.apache.iceberg.PartitionSpec)27 Schema (org.apache.iceberg.Schema)25 FieldSchema (org.apache.hadoop.hive.metastore.api.FieldSchema)16 BaseTable (org.apache.iceberg.BaseTable)15 UpdateSchema (org.apache.iceberg.UpdateSchema)15 List (java.util.List)13 NoSuchTableException (org.apache.iceberg.exceptions.NoSuchTableException)13 ArrayList (java.util.ArrayList)11 ImmutableList (org.apache.iceberg.relocated.com.google.common.collect.ImmutableList)11 IOException (java.io.IOException)10 Map (java.util.Map)10 Types (org.apache.iceberg.types.Types)10 HashMap (java.util.HashMap)9 Path (org.apache.hadoop.fs.Path)9 TableProperties (org.apache.iceberg.TableProperties)9 Collections (java.util.Collections)8 Properties (java.util.Properties)8