Search in sources :

Example 21 with Table

use of org.apache.iceberg.Table in project hive by apache.

the class HiveTableTest method testNonDefaultDatabaseLocation.

@Test
public void testNonDefaultDatabaseLocation() throws IOException, TException {
    Namespace namespace = Namespace.of(NON_DEFAULT_DATABASE);
    // Create a new location and a non-default database / namespace for it
    File nonDefaultLocation = createTempDirectory(NON_DEFAULT_DATABASE, asFileAttribute(fromString("rwxrwxrwx"))).toFile();
    catalog.createNamespace(namespace, Collections.singletonMap("location", nonDefaultLocation.getPath()));
    Map<String, String> namespaceMeta = catalog.loadNamespaceMetadata(namespace);
    // Make sure that we are testing a namespace with a non default location :)
    Assert.assertEquals(namespaceMeta.get("location"), "file:" + nonDefaultLocation.getPath());
    TableIdentifier tableIdentifier = TableIdentifier.of(namespace, TABLE_NAME);
    catalog.createTable(tableIdentifier, schema);
    // Let's check the location loaded through the catalog
    Table table = catalog.loadTable(tableIdentifier);
    Assert.assertEquals(namespaceMeta.get("location") + "/" + TABLE_NAME, table.location());
    // Drop the database and purge the files
    metastoreClient.dropDatabase(NON_DEFAULT_DATABASE, true, true, true);
}
Also used : TableIdentifier(org.apache.iceberg.catalog.TableIdentifier) Table(org.apache.iceberg.Table) PosixFilePermissions.fromString(java.nio.file.attribute.PosixFilePermissions.fromString) DataFile(org.apache.iceberg.DataFile) ManifestFile(org.apache.iceberg.ManifestFile) File(java.io.File) Namespace(org.apache.iceberg.catalog.Namespace) Test(org.junit.Test)

Example 22 with Table

use of org.apache.iceberg.Table in project hive by apache.

the class TestHiveCatalog method testCreateTableDefaultSortOrder.

@Test
public void testCreateTableDefaultSortOrder() {
    Schema schema = new Schema(required(1, "id", Types.IntegerType.get(), "unique ID"), required(2, "data", Types.StringType.get()));
    PartitionSpec spec = PartitionSpec.builderFor(schema).bucket("data", 16).build();
    TableIdentifier tableIdent = TableIdentifier.of(DB_NAME, "tbl");
    try {
        Table table = catalog.createTable(tableIdent, schema, spec);
        Assert.assertEquals("Order ID must match", 0, table.sortOrder().orderId());
        Assert.assertTrue("Order must unsorted", table.sortOrder().isUnsorted());
    } finally {
        catalog.dropTable(tableIdent);
    }
}
Also used : TableIdentifier(org.apache.iceberg.catalog.TableIdentifier) Table(org.apache.iceberg.Table) Schema(org.apache.iceberg.Schema) PartitionSpec(org.apache.iceberg.PartitionSpec) Test(org.junit.Test)

Example 23 with Table

use of org.apache.iceberg.Table in project hive by apache.

the class TestHiveCatalog method testCreateTableCustomSortOrder.

@Test
public void testCreateTableCustomSortOrder() {
    Schema schema = new Schema(required(1, "id", Types.IntegerType.get(), "unique ID"), required(2, "data", Types.StringType.get()));
    PartitionSpec spec = PartitionSpec.builderFor(schema).bucket("data", 16).build();
    SortOrder order = SortOrder.builderFor(schema).asc("id", NULLS_FIRST).build();
    TableIdentifier tableIdent = TableIdentifier.of(DB_NAME, "tbl");
    try {
        Table table = catalog.buildTable(tableIdent, schema).withPartitionSpec(spec).withSortOrder(order).create();
        SortOrder sortOrder = table.sortOrder();
        Assert.assertEquals("Order ID must match", 1, sortOrder.orderId());
        Assert.assertEquals("Order must have 1 field", 1, sortOrder.fields().size());
        Assert.assertEquals("Direction must match ", ASC, sortOrder.fields().get(0).direction());
        Assert.assertEquals("Null order must match ", NULLS_FIRST, sortOrder.fields().get(0).nullOrder());
        Transform<?, ?> transform = Transforms.identity(Types.IntegerType.get());
        Assert.assertEquals("Transform must match", transform, sortOrder.fields().get(0).transform());
    } finally {
        catalog.dropTable(tableIdent);
    }
}
Also used : TableIdentifier(org.apache.iceberg.catalog.TableIdentifier) Table(org.apache.iceberg.Table) Schema(org.apache.iceberg.Schema) SortOrder(org.apache.iceberg.SortOrder) PartitionSpec(org.apache.iceberg.PartitionSpec) Test(org.junit.Test)

Example 24 with Table

use of org.apache.iceberg.Table in project hive by apache.

the class TestHiveCatalog method testCreateTableTxnBuilder.

@Test
public void testCreateTableTxnBuilder() throws Exception {
    Schema schema = new Schema(required(1, "id", Types.IntegerType.get(), "unique ID"), required(2, "data", Types.StringType.get()));
    TableIdentifier tableIdent = TableIdentifier.of(DB_NAME, "tbl");
    String location = temp.newFolder("tbl").toString();
    try {
        Transaction txn = catalog.buildTable(tableIdent, schema).withLocation(location).createTransaction();
        txn.commitTransaction();
        Table table = catalog.loadTable(tableIdent);
        Assert.assertEquals(location, table.location());
        Assert.assertEquals(2, table.schema().columns().size());
        Assert.assertTrue(table.spec().isUnpartitioned());
    } finally {
        catalog.dropTable(tableIdent);
    }
}
Also used : TableIdentifier(org.apache.iceberg.catalog.TableIdentifier) Table(org.apache.iceberg.Table) Transaction(org.apache.iceberg.Transaction) Schema(org.apache.iceberg.Schema) Test(org.junit.Test)

Example 25 with Table

use of org.apache.iceberg.Table in project hive by apache.

the class TestHiveCommitLocks method before.

@Before
public void before() throws Exception {
    Table table = catalog.loadTable(TABLE_IDENTIFIER);
    ops = (HiveTableOperations) ((HasTableOperations) table).operations();
    String dbName = TABLE_IDENTIFIER.namespace().level(0);
    String tableName = TABLE_IDENTIFIER.name();
    metadataV1 = ops.current();
    table.updateSchema().addColumn("n", Types.IntegerType.get()).commit();
    ops.refresh();
    metadataV2 = ops.current();
    Assert.assertEquals(2, ops.current().schema().columns().size());
    spyOps = spy(new HiveTableOperations(overriddenHiveConf, spyCachedClientPool, ops.io(), catalog.name(), dbName, tableName));
}
Also used : Table(org.apache.iceberg.Table) HasTableOperations(org.apache.iceberg.HasTableOperations) Before(org.junit.Before)

Aggregations

Table (org.apache.iceberg.Table)188 Test (org.junit.Test)132 Schema (org.apache.iceberg.Schema)66 TableIdentifier (org.apache.iceberg.catalog.TableIdentifier)56 Record (org.apache.iceberg.data.Record)56 PartitionSpec (org.apache.iceberg.PartitionSpec)51 IOException (java.io.IOException)27 FieldSchema (org.apache.hadoop.hive.metastore.api.FieldSchema)27 List (java.util.List)22 Map (java.util.Map)20 DataFile (org.apache.iceberg.DataFile)19 NoSuchTableException (org.apache.iceberg.exceptions.NoSuchTableException)19 Collectors (java.util.stream.Collectors)18 BaseTable (org.apache.iceberg.BaseTable)18 Types (org.apache.iceberg.types.Types)18 Properties (java.util.Properties)17 Configuration (org.apache.hadoop.conf.Configuration)17 Path (org.apache.hadoop.fs.Path)17 FileFormat (org.apache.iceberg.FileFormat)16 ArrayList (java.util.ArrayList)15