use of org.apache.iceberg.data.Record in project hive by apache.
the class TestHiveIcebergComplexTypeWrites method testWriteArrayOfPrimitivesInTable.
@Test
public void testWriteArrayOfPrimitivesInTable() throws IOException {
Schema schema = new Schema(required(1, "id", Types.LongType.get()), required(2, "arrayofprimitives", Types.ListType.ofRequired(3, Types.StringType.get())));
List<Record> records = TestHelper.generateRandomRecords(schema, 5, 0L);
testComplexTypeWrite(schema, records);
}
use of org.apache.iceberg.data.Record in project hive by apache.
the class TestHiveIcebergComplexTypeWrites method testWriteStructOfPrimitivesInTable.
@Test
public void testWriteStructOfPrimitivesInTable() throws IOException {
Schema schema = new Schema(required(1, "id", Types.LongType.get()), required(2, "structofprimitives", Types.StructType.of(required(3, "key", Types.StringType.get()), required(4, "value", Types.StringType.get()))));
List<Record> records = TestHelper.generateRandomRecords(schema, 5, 0L);
testComplexTypeWrite(schema, records);
}
use of org.apache.iceberg.data.Record in project hive by apache.
the class TestHiveIcebergInserts method testInsertOverwritePartitionedTable.
@Test
public void testInsertOverwritePartitionedTable() throws IOException {
TableIdentifier target = TableIdentifier.of("default", "target");
PartitionSpec spec = PartitionSpec.builderFor(HiveIcebergStorageHandlerTestUtils.CUSTOMER_SCHEMA).identity("last_name").build();
Table table = testTables.createTable(shell, target.name(), HiveIcebergStorageHandlerTestUtils.CUSTOMER_SCHEMA, spec, fileFormat, ImmutableList.of());
// IOW into empty target table -> whole source result set is inserted
List<Record> expected = new ArrayList<>(HiveIcebergStorageHandlerTestUtils.CUSTOMER_RECORDS);
expected.add(TestHelper.RecordsBuilder.newInstance(HiveIcebergStorageHandlerTestUtils.CUSTOMER_SCHEMA).add(8L, "Sue", "Green").build().get(// add one more to 'Green' so we have a partition w/ multiple records
0));
shell.executeStatement(testTables.getInsertQuery(expected, target, true));
HiveIcebergTestUtils.validateData(table, expected, 0);
// IOW into non-empty target table -> only the affected partitions are overwritten
List<Record> newRecords = TestHelper.RecordsBuilder.newInstance(HiveIcebergStorageHandlerTestUtils.CUSTOMER_SCHEMA).add(0L, "Mike", // overwritten
"Brown").add(1L, "Christy", // overwritten (partition has this single record now)
"Green").add(3L, "Bill", // appended (new partition)
"Purple").build();
shell.executeStatement(testTables.getInsertQuery(newRecords, target, true));
expected = new ArrayList<>(newRecords);
// existing, untouched partition ('Pink')
expected.add(HiveIcebergStorageHandlerTestUtils.CUSTOMER_RECORDS.get(2));
HiveIcebergTestUtils.validateData(table, expected, 0);
// IOW empty source result set -> has no effect on partitioned table
shell.executeStatement("INSERT OVERWRITE TABLE target SELECT * FROM target WHERE FALSE");
HiveIcebergTestUtils.validateData(table, expected, 0);
}
use of org.apache.iceberg.data.Record in project hive by apache.
the class TestHiveIcebergInserts method testInsertFromSelect.
/**
* Testing map only inserts.
* @throws IOException If there is an underlying IOException
*/
@Test
public void testInsertFromSelect() throws IOException {
Table table = testTables.createTable(shell, "customers", HiveIcebergStorageHandlerTestUtils.CUSTOMER_SCHEMA, fileFormat, HiveIcebergStorageHandlerTestUtils.CUSTOMER_RECORDS);
shell.executeStatement("INSERT INTO customers SELECT * FROM customers");
// Check that everything is duplicated as expected
List<Record> records = new ArrayList<>(HiveIcebergStorageHandlerTestUtils.CUSTOMER_RECORDS);
records.addAll(HiveIcebergStorageHandlerTestUtils.CUSTOMER_RECORDS);
HiveIcebergTestUtils.validateData(table, records, 0);
}
use of org.apache.iceberg.data.Record in project hive by apache.
the class TestHiveIcebergInserts method testInsertSupportedTypes.
@Test
public void testInsertSupportedTypes() throws IOException {
for (int i = 0; i < SUPPORTED_TYPES.size(); i++) {
Type type = SUPPORTED_TYPES.get(i);
// TODO: remove this filter when issue #1881 is resolved
if (type == Types.UUIDType.get() && fileFormat == FileFormat.PARQUET) {
continue;
}
// TODO: remove this filter when we figure out how we could test binary types
if (type.equals(Types.BinaryType.get()) || type.equals(Types.FixedType.ofLength(5))) {
continue;
}
String columnName = type.typeId().toString().toLowerCase() + "_column";
Schema schema = new Schema(required(1, "id", Types.LongType.get()), required(2, columnName, type));
List<Record> expected = TestHelper.generateRandomRecords(schema, 5, 0L);
Table table = testTables.createTable(shell, type.typeId().toString().toLowerCase() + "_table_" + i, schema, PartitionSpec.unpartitioned(), fileFormat, expected);
HiveIcebergTestUtils.validateData(table, expected, 0);
}
}
Aggregations