use of io.trino.testing.MaterializedRow in project trino by trinodb.
the class TestTimestampMicros method testTimestampMicros.
@Test(dataProvider = "testTimestampMicrosDataProvider")
public void testTimestampMicros(HiveTimestampPrecision timestampPrecision, LocalDateTime expected) throws Exception {
ConnectorSession session = getHiveSession(new HiveConfig().setTimestampPrecision(timestampPrecision));
File parquetFile = new File(Resources.getResource("issue-5483.parquet").toURI());
Type columnType = createTimestampType(timestampPrecision.getPrecision());
try (ConnectorPageSource pageSource = createPageSource(session, parquetFile, "created", HIVE_TIMESTAMP, columnType)) {
MaterializedResult result = materializeSourceDataStream(session, pageSource, List.of(columnType)).toTestTypes();
assertThat(result.getMaterializedRows()).containsOnly(new MaterializedRow(List.of(expected)));
}
}
use of io.trino.testing.MaterializedRow in project trino by trinodb.
the class TestTimestampMicros method testTimestampMicrosAsTimestampWithTimeZone.
@Test(dataProvider = "testTimestampMicrosDataProvider")
public void testTimestampMicrosAsTimestampWithTimeZone(HiveTimestampPrecision timestampPrecision, LocalDateTime expected) throws Exception {
ConnectorSession session = getHiveSession(new HiveConfig().setTimestampPrecision(timestampPrecision));
File parquetFile = new File(Resources.getResource("issue-5483.parquet").toURI());
Type columnType = createTimestampWithTimeZoneType(timestampPrecision.getPrecision());
try (ConnectorPageSource pageSource = createPageSource(session, parquetFile, "created", HIVE_TIMESTAMP, columnType)) {
MaterializedResult result = materializeSourceDataStream(session, pageSource, List.of(columnType)).toTestTypes();
assertThat(result.getMaterializedRows()).containsOnly(new MaterializedRow(List.of(expected.atZone(ZoneId.of("UTC")))));
}
}
use of io.trino.testing.MaterializedRow in project trino by trinodb.
the class BaseIcebergConnectorTest method testPartitionedTableStatistics.
@Test
public void testPartitionedTableStatistics() {
assertUpdate("CREATE TABLE iceberg.tpch.test_partitioned_table_statistics (col1 REAL, col2 BIGINT) WITH (partitioning = ARRAY['col2'])");
String insertStart = "INSERT INTO test_partitioned_table_statistics";
assertUpdate(insertStart + " VALUES (-10, -1)", 1);
assertUpdate(insertStart + " VALUES (100, 10)", 1);
MaterializedResult result = computeActual("SHOW STATS FOR iceberg.tpch.test_partitioned_table_statistics");
assertEquals(result.getRowCount(), 3);
MaterializedRow row0 = result.getMaterializedRows().get(0);
assertEquals(row0.getField(0), "col1");
assertEquals(row0.getField(3), 0.0);
assertEquals(row0.getField(5), "-10.0");
assertEquals(row0.getField(6), "100.0");
MaterializedRow row1 = result.getMaterializedRows().get(1);
assertEquals(row1.getField(0), "col2");
assertEquals(row1.getField(3), 0.0);
assertEquals(row1.getField(5), "-1");
assertEquals(row1.getField(6), "10");
MaterializedRow row2 = result.getMaterializedRows().get(2);
assertEquals(row2.getField(4), 2.0);
assertUpdate(insertStart + " VALUES " + IntStream.rangeClosed(1, 5).mapToObj(i -> format("(%d, 10)", i + 100)).collect(joining(", ")), 5);
assertUpdate(insertStart + " VALUES " + IntStream.rangeClosed(6, 10).mapToObj(i -> "(NULL, 10)").collect(joining(", ")), 5);
result = computeActual("SHOW STATS FOR iceberg.tpch.test_partitioned_table_statistics");
assertEquals(result.getRowCount(), 3);
row0 = result.getMaterializedRows().get(0);
assertEquals(row0.getField(0), "col1");
assertEquals(row0.getField(3), 5.0 / 12.0);
assertEquals(row0.getField(5), "-10.0");
assertEquals(row0.getField(6), "105.0");
row1 = result.getMaterializedRows().get(1);
assertEquals(row1.getField(0), "col2");
assertEquals(row1.getField(3), 0.0);
assertEquals(row1.getField(5), "-1");
assertEquals(row1.getField(6), "10");
row2 = result.getMaterializedRows().get(2);
assertEquals(row2.getField(4), 12.0);
assertUpdate(insertStart + " VALUES " + IntStream.rangeClosed(6, 10).mapToObj(i -> "(100, NULL)").collect(joining(", ")), 5);
result = computeActual("SHOW STATS FOR iceberg.tpch.test_partitioned_table_statistics");
row0 = result.getMaterializedRows().get(0);
assertEquals(row0.getField(0), "col1");
assertEquals(row0.getField(3), 5.0 / 17.0);
assertEquals(row0.getField(5), "-10.0");
assertEquals(row0.getField(6), "105.0");
row1 = result.getMaterializedRows().get(1);
assertEquals(row1.getField(0), "col2");
assertEquals(row1.getField(3), 5.0 / 17.0);
assertEquals(row1.getField(5), "-1");
assertEquals(row1.getField(6), "10");
row2 = result.getMaterializedRows().get(2);
assertEquals(row2.getField(4), 17.0);
dropTable("iceberg.tpch.test_partitioned_table_statistics");
}
use of io.trino.testing.MaterializedRow in project trino by trinodb.
the class BaseIcebergConnectorTest method testLocalDynamicFilteringWithSelectiveBuildSizeJoin.
@Test
public void testLocalDynamicFilteringWithSelectiveBuildSizeJoin() {
long fullTableScan = (Long) computeActual("SELECT count(*) FROM lineitem").getOnlyValue();
// Pick a value for totalprice where file level stats will not be able to filter out any data
// This assumes the totalprice ranges in every file have some overlap, otherwise this test will fail.
MaterializedRow range = getOnlyElement(computeActual("SELECT max(lower_bounds[4]), min(upper_bounds[4]) FROM \"orders$files\"").getMaterializedRows());
double totalPrice = (Double) computeActual(format("SELECT totalprice FROM orders WHERE totalprice > %s AND totalprice < %s LIMIT 1", range.getField(0), range.getField(1))).getOnlyValue();
Session session = Session.builder(getSession()).setSystemProperty(JOIN_DISTRIBUTION_TYPE, BROADCAST.name()).build();
ResultWithQueryId<MaterializedResult> result = getDistributedQueryRunner().executeWithQueryId(session, "SELECT * FROM lineitem JOIN orders ON lineitem.orderkey = orders.orderkey AND orders.totalprice = " + totalPrice);
OperatorStats probeStats = searchScanFilterAndProjectOperatorStats(result.getQueryId(), new QualifiedObjectName(ICEBERG_CATALOG, "tpch", "lineitem"));
// Assert some lineitem rows were filtered out on file level
assertThat(probeStats.getInputPositions()).isLessThan(fullTableScan);
}
use of io.trino.testing.MaterializedRow in project trino by trinodb.
the class BaseMongoConnectorTest method testInsertWithEveryType.
@Test
public void testInsertWithEveryType() {
String createSql = "" + "CREATE TABLE test_insert_types_table " + "(" + " vc varchar" + ", vb varbinary" + ", bi bigint" + ", d double" + ", b boolean" + ", dt date" + ", ts timestamp" + ", objid objectid" + ", _json json" + ")";
getQueryRunner().execute(getSession(), createSql);
String insertSql = "" + "INSERT INTO test_insert_types_table " + "SELECT" + " 'foo' _varchar" + ", cast('bar' as varbinary) _varbinary" + ", cast(1 as bigint) _bigint" + ", 3.14E0 _double" + ", true _boolean" + ", DATE '1980-05-07' _date" + ", TIMESTAMP '1980-05-07 11:22:33.456' _timestamp" + ", ObjectId('ffffffffffffffffffffffff') _objectid" + ", JSON '{\"name\":\"alice\"}' _json";
getQueryRunner().execute(getSession(), insertSql);
MaterializedResult results = getQueryRunner().execute(getSession(), "SELECT * FROM test_insert_types_table").toTestTypes();
assertEquals(results.getRowCount(), 1);
MaterializedRow row = results.getMaterializedRows().get(0);
assertEquals(row.getField(0), "foo");
assertEquals(row.getField(1), "bar".getBytes(UTF_8));
assertEquals(row.getField(2), 1L);
assertEquals(row.getField(3), 3.14);
assertEquals(row.getField(4), true);
assertEquals(row.getField(5), LocalDate.of(1980, 5, 7));
assertEquals(row.getField(6), LocalDateTime.of(1980, 5, 7, 11, 22, 33, 456_000_000));
assertEquals(row.getField(8), "{\"name\":\"alice\"}");
assertUpdate("DROP TABLE test_insert_types_table");
assertFalse(getQueryRunner().tableExists(getSession(), "test_insert_types_table"));
}
Aggregations