Search in sources :

Example 86 with Table

use of org.apache.iceberg.Table in project hive by apache.

the class HiveTableTest method testColumnTypeChangeInMetastore.

@Test
public void testColumnTypeChangeInMetastore() throws TException {
    Table icebergTable = catalog.loadTable(TABLE_IDENTIFIER);
    Schema expectedSchema = new Schema(Types.StructType.of(required(1, "id", Types.LongType.get()), optional(2, "data", Types.LongType.get()), optional(3, "string", Types.StringType.get()), optional(4, "int", Types.IntegerType.get())).fields());
    // Add columns with different types, then verify we could delete one column in hive metastore
    // as hive conf METASTORE_DISALLOW_INCOMPATIBLE_COL_TYPE_CHANGES was set to false. If this was set to true,
    // an InvalidOperationException would thrown in method MetaStoreUtils#throwExceptionIfIncompatibleColTypeChange()
    icebergTable.updateSchema().addColumn("data", Types.LongType.get()).addColumn("string", Types.StringType.get()).addColumn("int", Types.IntegerType.get()).commit();
    Assert.assertEquals("Schema should match expected", expectedSchema.asStruct(), icebergTable.schema().asStruct());
    expectedSchema = new Schema(Types.StructType.of(required(1, "id", Types.LongType.get()), optional(2, "data", Types.LongType.get()), optional(4, "int", Types.IntegerType.get())).fields());
    icebergTable.updateSchema().deleteColumn("string").commit();
    Assert.assertEquals("Schema should match expected", expectedSchema.asStruct(), icebergTable.schema().asStruct());
}
Also used : Table(org.apache.iceberg.Table) Schema(org.apache.iceberg.Schema) FieldSchema(org.apache.hadoop.hive.metastore.api.FieldSchema) Test(org.junit.Test)

Example 87 with Table

use of org.apache.iceberg.Table in project hive by apache.

the class HiveTableTest method testCreate.

@Test
public void testCreate() throws TException {
    // Table should be created in hive metastore
    // Table should be renamed in hive metastore
    String tableName = TABLE_IDENTIFIER.name();
    org.apache.hadoop.hive.metastore.api.Table table = metastoreClient.getTable(TABLE_IDENTIFIER.namespace().level(0), tableName);
    // check parameters are in expected state
    Map<String, String> parameters = table.getParameters();
    Assert.assertNotNull(parameters);
    Assert.assertTrue(ICEBERG_TABLE_TYPE_VALUE.equalsIgnoreCase(parameters.get(TABLE_TYPE_PROP)));
    Assert.assertTrue("EXTERNAL_TABLE".equalsIgnoreCase(table.getTableType()));
    // Ensure the table is pointing to empty location
    Assert.assertEquals(getTableLocation(tableName), table.getSd().getLocation());
    // Ensure it is stored as unpartitioned table in hive.
    Assert.assertEquals(0, table.getPartitionKeysSize());
    // Only 1 snapshotFile Should exist and no manifests should exist
    Assert.assertEquals(1, metadataVersionFiles(tableName).size());
    Assert.assertEquals(0, manifestFiles(tableName).size());
    final Table icebergTable = catalog.loadTable(TABLE_IDENTIFIER);
    // Iceberg schema should match the loaded table
    Assert.assertEquals(schema.asStruct(), icebergTable.schema().asStruct());
}
Also used : Table(org.apache.iceberg.Table) PosixFilePermissions.fromString(java.nio.file.attribute.PosixFilePermissions.fromString) Test(org.junit.Test)

Example 88 with Table

use of org.apache.iceberg.Table in project hive by apache.

the class HiveTableTest method testFailure.

@Test(expected = CommitFailedException.class)
public void testFailure() throws TException {
    Table icebergTable = catalog.loadTable(TABLE_IDENTIFIER);
    org.apache.hadoop.hive.metastore.api.Table table = metastoreClient.getTable(DB_NAME, TABLE_NAME);
    String dummyLocation = "dummylocation";
    table.getParameters().put(METADATA_LOCATION_PROP, dummyLocation);
    metastoreClient.alter_table(DB_NAME, TABLE_NAME, table);
    icebergTable.updateSchema().addColumn("data", Types.LongType.get()).commit();
}
Also used : Table(org.apache.iceberg.Table) PosixFilePermissions.fromString(java.nio.file.attribute.PosixFilePermissions.fromString) Test(org.junit.Test)

Example 89 with Table

use of org.apache.iceberg.Table in project hive by apache.

the class HiveTableTest method testRename.

@Test
public void testRename() {
    String renamedTableName = "rename_table_name";
    TableIdentifier renameTableIdentifier = TableIdentifier.of(TABLE_IDENTIFIER.namespace(), renamedTableName);
    Table original = catalog.loadTable(TABLE_IDENTIFIER);
    catalog.renameTable(TABLE_IDENTIFIER, renameTableIdentifier);
    Assert.assertFalse(catalog.tableExists(TABLE_IDENTIFIER));
    Assert.assertTrue(catalog.tableExists(renameTableIdentifier));
    Table renamed = catalog.loadTable(renameTableIdentifier);
    Assert.assertEquals(original.schema().asStruct(), renamed.schema().asStruct());
    Assert.assertEquals(original.spec(), renamed.spec());
    Assert.assertEquals(original.location(), renamed.location());
    Assert.assertEquals(original.currentSnapshot(), renamed.currentSnapshot());
    Assert.assertTrue(catalog.dropTable(renameTableIdentifier));
}
Also used : TableIdentifier(org.apache.iceberg.catalog.TableIdentifier) Table(org.apache.iceberg.Table) PosixFilePermissions.fromString(java.nio.file.attribute.PosixFilePermissions.fromString) Test(org.junit.Test)

Example 90 with Table

use of org.apache.iceberg.Table in project hive by apache.

the class HiveTableTest method testExistingTableUpdate.

@Test
public void testExistingTableUpdate() throws TException {
    Table icebergTable = catalog.loadTable(TABLE_IDENTIFIER);
    // add a column
    icebergTable.updateSchema().addColumn("data", Types.LongType.get()).commit();
    icebergTable = catalog.loadTable(TABLE_IDENTIFIER);
    // Only 2 snapshotFile Should exist and no manifests should exist
    Assert.assertEquals(2, metadataVersionFiles(TABLE_NAME).size());
    Assert.assertEquals(0, manifestFiles(TABLE_NAME).size());
    Assert.assertEquals(altered.asStruct(), icebergTable.schema().asStruct());
    final org.apache.hadoop.hive.metastore.api.Table table = metastoreClient.getTable(DB_NAME, TABLE_NAME);
    final List<String> hiveColumns = table.getSd().getCols().stream().map(FieldSchema::getName).collect(Collectors.toList());
    final List<String> icebergColumns = altered.columns().stream().map(Types.NestedField::name).collect(Collectors.toList());
    Assert.assertEquals(icebergColumns, hiveColumns);
}
Also used : Types(org.apache.iceberg.types.Types) Table(org.apache.iceberg.Table) PosixFilePermissions.fromString(java.nio.file.attribute.PosixFilePermissions.fromString) Test(org.junit.Test)

Aggregations

Table (org.apache.iceberg.Table)188 Test (org.junit.Test)132 Schema (org.apache.iceberg.Schema)66 TableIdentifier (org.apache.iceberg.catalog.TableIdentifier)56 Record (org.apache.iceberg.data.Record)56 PartitionSpec (org.apache.iceberg.PartitionSpec)51 IOException (java.io.IOException)27 FieldSchema (org.apache.hadoop.hive.metastore.api.FieldSchema)27 List (java.util.List)22 Map (java.util.Map)20 DataFile (org.apache.iceberg.DataFile)19 NoSuchTableException (org.apache.iceberg.exceptions.NoSuchTableException)19 Collectors (java.util.stream.Collectors)18 BaseTable (org.apache.iceberg.BaseTable)18 Types (org.apache.iceberg.types.Types)18 Properties (java.util.Properties)17 Configuration (org.apache.hadoop.conf.Configuration)17 Path (org.apache.hadoop.fs.Path)17 FileFormat (org.apache.iceberg.FileFormat)16 ArrayList (java.util.ArrayList)15