use of org.apache.iceberg.Schema in project hive by apache.
the class TestHiveIcebergStorageHandlerNoScan method testAlterTableReplaceColumns.
@Test
public void testAlterTableReplaceColumns() throws TException, InterruptedException {
TableIdentifier identifier = TableIdentifier.of("default", "customers");
Schema schema = new Schema(optional(1, "customer_id", Types.IntegerType.get()), optional(2, "first_name", Types.StringType.get(), "This is first name"), optional(3, "last_name", Types.StringType.get(), "This is last name"), optional(4, "address", Types.StructType.of(optional(5, "city", Types.StringType.get()), optional(6, "street", Types.StringType.get())), null));
testTables.createTable(shell, identifier.name(), schema, SPEC, FileFormat.PARQUET, ImmutableList.of());
shell.executeStatement("ALTER TABLE default.customers REPLACE COLUMNS " + "(customer_id int, last_name string COMMENT 'This is last name', " + "address struct<city:string,street:string>)");
org.apache.iceberg.Table icebergTable = testTables.loadTable(identifier);
org.apache.hadoop.hive.metastore.api.Table hmsTable = shell.metastore().getTable("default", "customers");
List<FieldSchema> icebergSchema = HiveSchemaUtil.convert(icebergTable.schema());
List<FieldSchema> hmsSchema = hmsTable.getSd().getCols();
List<FieldSchema> expectedSchema = Lists.newArrayList(new FieldSchema("customer_id", "int", null), // first_name column is dropped
new FieldSchema("last_name", "string", "This is last name"), new FieldSchema("address", "struct<city:string,street:string>", null));
Assert.assertEquals(expectedSchema, icebergSchema);
Assert.assertEquals(expectedSchema, hmsSchema);
}
use of org.apache.iceberg.Schema in project hive by apache.
the class TestHiveIcebergV2 method testReadAndWriteFormatV2Partitioned_EqDelete_OnlyEqColumnsSupplied.
@Test
public void testReadAndWriteFormatV2Partitioned_EqDelete_OnlyEqColumnsSupplied() throws IOException {
Assume.assumeFalse("Reading V2 tables with delete files are only supported currently in " + "non-vectorized mode and only Parquet/Avro", isVectorized || fileFormat == FileFormat.ORC);
PartitionSpec spec = PartitionSpec.builderFor(HiveIcebergStorageHandlerTestUtils.CUSTOMER_SCHEMA).identity("customer_id").build();
Table tbl = testTables.createTable(shell, "customers", HiveIcebergStorageHandlerTestUtils.CUSTOMER_SCHEMA, spec, fileFormat, HiveIcebergStorageHandlerTestUtils.CUSTOMER_RECORDS, 2);
// add one more row to the same partition
shell.executeStatement("insert into customers values (1, 'Bob', 'Hoover')");
// delete all rows with id=1 and first_name=Bob
Schema shorterSchema = new Schema(optional(1, "id", Types.LongType.get()), optional(2, "name", Types.StringType.get()));
List<Record> toDelete = TestHelper.RecordsBuilder.newInstance(shorterSchema).add(1L, "Bob").build();
DeleteFile deleteFile = HiveIcebergTestUtils.createEqualityDeleteFile(tbl, "dummyPath", ImmutableList.of("customer_id", "first_name"), fileFormat, toDelete);
tbl.newRowDelta().addDeletes(deleteFile).commit();
List<Object[]> objects = shell.executeStatement("SELECT * FROM customers ORDER BY customer_id");
Assert.assertEquals(2, objects.size());
Assert.assertArrayEquals(new Object[] { 0L, "Alice", "Brown" }, objects.get(0));
Assert.assertArrayEquals(new Object[] { 2L, "Trudy", "Pink" }, objects.get(1));
}
use of org.apache.iceberg.Schema in project hive by apache.
the class TestHiveIcebergComplexTypeWrites method testWriteArrayOfArraysInTable.
@Test
public void testWriteArrayOfArraysInTable() throws IOException {
Schema schema = new Schema(required(1, "id", Types.LongType.get()), required(2, "arrayofarrays", Types.ListType.ofRequired(3, Types.ListType.ofRequired(4, Types.StringType.get()))));
List<Record> records = TestHelper.generateRandomRecords(schema, 3, 1L);
testComplexTypeWrite(schema, records);
}
use of org.apache.iceberg.Schema in project hive by apache.
the class TestHiveIcebergComplexTypeWrites method testWriteMapOfArraysInTable.
@Test
public void testWriteMapOfArraysInTable() throws IOException {
Schema schema = new Schema(required(1, "id", Types.LongType.get()), required(2, "mapofarrays", Types.MapType.ofRequired(3, 4, Types.StringType.get(), Types.ListType.ofRequired(5, Types.StringType.get()))));
List<Record> records = TestHelper.generateRandomRecords(schema, 5, 0L);
testComplexTypeWrite(schema, records);
}
use of org.apache.iceberg.Schema in project hive by apache.
the class TestHiveIcebergComplexTypeWrites method testWriteMapOfStructsInTable.
@Test
public void testWriteMapOfStructsInTable() throws IOException {
Schema schema = new Schema(required(1, "id", Types.LongType.get()), required(2, "mapofstructs", Types.MapType.ofRequired(3, 4, Types.StringType.get(), Types.StructType.of(required(5, "something", Types.StringType.get()), required(6, "someone", Types.StringType.get()), required(7, "somewhere", Types.StringType.get())))));
List<Record> records = TestHelper.generateRandomRecords(schema, 5, 0L);
testComplexTypeWrite(schema, records);
}
Aggregations