use of org.apache.iceberg.data.Record in project hive by apache.
the class TestHiveIcebergStorageHandlerTimezone method testTimestampQuery.
@Test
public void testTimestampQuery() throws IOException {
Schema timestampSchema = new Schema(optional(1, "d_ts", Types.TimestampType.withoutZone()));
List<Record> records = TestHelper.RecordsBuilder.newInstance(timestampSchema).add(LocalDateTime.of(2019, 1, 22, 9, 44, 54, 100000000)).add(LocalDateTime.of(2019, 2, 22, 9, 44, 54, 200000000)).build();
testTables.createTable(shell, "ts_test", timestampSchema, FileFormat.PARQUET, records);
List<Object[]> result = shell.executeStatement("SELECT d_ts FROM ts_test WHERE d_ts='2019-02-22 09:44:54.2'");
Assert.assertEquals(1, result.size());
Assert.assertEquals("2019-02-22 09:44:54.2", result.get(0)[0]);
result = shell.executeStatement("SELECT * FROM ts_test WHERE d_ts in ('2017-01-01 22:30:57.1', '2019-02-22 09:44:54.2')");
Assert.assertEquals(1, result.size());
Assert.assertEquals("2019-02-22 09:44:54.2", result.get(0)[0]);
result = shell.executeStatement("SELECT d_ts FROM ts_test WHERE d_ts < '2019-02-22 09:44:54.2'");
Assert.assertEquals(1, result.size());
Assert.assertEquals("2019-01-22 09:44:54.1", result.get(0)[0]);
result = shell.executeStatement("SELECT * FROM ts_test WHERE d_ts='2017-01-01 22:30:57.3'");
Assert.assertEquals(0, result.size());
}
use of org.apache.iceberg.data.Record in project hive by apache.
the class TestHiveIcebergTypes method testStructOfPrimitivesInTable.
@Test
public void testStructOfPrimitivesInTable() throws IOException {
Schema schema = new Schema(required(1, "structofprimitives", Types.StructType.of(required(2, "key", Types.StringType.get()), required(3, "value", Types.IntegerType.get()))));
List<Record> records = testTables.createTableWithGeneratedRecords(shell, "structtable", schema, fileFormat, 1);
// access a single value in a struct
for (int i = 0; i < records.size(); i++) {
GenericRecord expectedStruct = (GenericRecord) records.get(i).getField("structofprimitives");
List<Object[]> queryResult = shell.executeStatement(String.format("SELECT structofprimitives.key, structofprimitives.value FROM default.structtable LIMIT 1 OFFSET %d", i));
Assert.assertEquals(expectedStruct.getField("key"), queryResult.get(0)[0]);
Assert.assertEquals(expectedStruct.getField("value"), queryResult.get(0)[1]);
}
}
use of org.apache.iceberg.data.Record in project hive by apache.
the class TestHiveIcebergTypes method testDecimalTableWithPredicateLiterals.
@Test
public void testDecimalTableWithPredicateLiterals() throws IOException {
Schema schema = new Schema(required(1, "decimal_field", Types.DecimalType.of(7, 2)));
List<Record> records = TestHelper.RecordsBuilder.newInstance(schema).add(new BigDecimal("85.00")).add(new BigDecimal("100.56")).add(new BigDecimal("100.57")).build();
testTables.createTable(shell, "dec_test", schema, fileFormat, records);
// Use integer literal in predicate
List<Object[]> rows = shell.executeStatement("SELECT * FROM default.dec_test where decimal_field >= 85");
Assert.assertEquals(3, rows.size());
Assert.assertArrayEquals(new Object[] { "85.00" }, rows.get(0));
Assert.assertArrayEquals(new Object[] { "100.56" }, rows.get(1));
Assert.assertArrayEquals(new Object[] { "100.57" }, rows.get(2));
// Use decimal literal in predicate with smaller scale than schema type definition
rows = shell.executeStatement("SELECT * FROM default.dec_test where decimal_field > 99.1");
Assert.assertEquals(2, rows.size());
Assert.assertArrayEquals(new Object[] { "100.56" }, rows.get(0));
Assert.assertArrayEquals(new Object[] { "100.57" }, rows.get(1));
// Use decimal literal in predicate with higher scale than schema type definition
rows = shell.executeStatement("SELECT * FROM default.dec_test where decimal_field > 100.565");
Assert.assertEquals(1, rows.size());
Assert.assertArrayEquals(new Object[] { "100.57" }, rows.get(0));
// Use decimal literal in predicate with the same scale as schema type definition
rows = shell.executeStatement("SELECT * FROM default.dec_test where decimal_field > 640.34");
Assert.assertEquals(0, rows.size());
}
use of org.apache.iceberg.data.Record in project hive by apache.
the class TestHiveIcebergComplexTypeWrites method testWriteMapOfPrimitivesInTable.
@Test
public void testWriteMapOfPrimitivesInTable() throws IOException {
Schema schema = new Schema(required(1, "id", Types.LongType.get()), required(2, "mapofprimitives", Types.MapType.ofRequired(3, 4, Types.StringType.get(), Types.StringType.get())));
List<Record> records = TestHelper.generateRandomRecords(schema, 5, 0L);
testComplexTypeWrite(schema, records);
}
use of org.apache.iceberg.data.Record in project hive by apache.
the class TestHiveIcebergInserts method testInsertUsingSourceTableWithSharedColumnsNames.
@Test
public void testInsertUsingSourceTableWithSharedColumnsNames() throws IOException {
List<Record> records = HiveIcebergStorageHandlerTestUtils.CUSTOMER_RECORDS;
PartitionSpec spec = PartitionSpec.builderFor(HiveIcebergStorageHandlerTestUtils.CUSTOMER_SCHEMA).identity("last_name").build();
testTables.createTable(shell, "source_customers", HiveIcebergStorageHandlerTestUtils.CUSTOMER_SCHEMA, spec, fileFormat, records);
Table table = testTables.createTable(shell, "target_customers", HiveIcebergStorageHandlerTestUtils.CUSTOMER_SCHEMA, spec, fileFormat, ImmutableList.of());
// Below select from source table should produce: "hive.io.file.readcolumn.names=customer_id,last_name".
// Inserting into the target table should not fail because first_name is not selected from the source table
shell.executeStatement("INSERT INTO target_customers SELECT customer_id, 'Sam', last_name FROM source_customers");
List<Record> expected = Lists.newArrayListWithExpectedSize(records.size());
records.forEach(r -> {
Record copy = r.copy();
copy.setField("first_name", "Sam");
expected.add(copy);
});
HiveIcebergTestUtils.validateData(table, expected, 0);
}
Aggregations