use of org.apache.beam.sdk.extensions.sql.impl.rel.BeamRelNode in project beam by apache.
the class ZetaSqlDialectSpecTest method testUNNESTLiteral.
@Test
public void testUNNESTLiteral() {
String sql = "SELECT * FROM UNNEST(ARRAY<STRING>['foo', 'bar']);";
ZetaSQLQueryPlanner zetaSQLQueryPlanner = new ZetaSQLQueryPlanner(config);
BeamRelNode beamRelNode = zetaSQLQueryPlanner.convertToBeamRel(sql);
PCollection<Row> stream = BeamSqlRelUtils.toPCollection(pipeline, beamRelNode);
Schema schema = Schema.builder().addStringField("str_field").build();
PAssert.that(stream).containsInAnyOrder(Row.withSchema(schema).addValues("foo").build(), Row.withSchema(schema).addValues("bar").build());
pipeline.run().waitUntilFinish(Duration.standardMinutes(PIPELINE_EXECUTION_WAITTIME_MINUTES));
}
use of org.apache.beam.sdk.extensions.sql.impl.rel.BeamRelNode in project beam by apache.
the class ZetaSqlDialectSpecTest method testUNNESTParameters.
@Test
public void testUNNESTParameters() {
String sql = "SELECT * FROM UNNEST(@p0);";
ImmutableMap<String, Value> params = ImmutableMap.of("p0", Value.createArrayValue(TypeFactory.createArrayType(TypeFactory.createSimpleType(TypeKind.TYPE_STRING)), ImmutableList.of(Value.createStringValue("foo"), Value.createStringValue("bar"))));
ZetaSQLQueryPlanner zetaSQLQueryPlanner = new ZetaSQLQueryPlanner(config);
BeamRelNode beamRelNode = zetaSQLQueryPlanner.convertToBeamRel(sql, params);
PCollection<Row> stream = BeamSqlRelUtils.toPCollection(pipeline, beamRelNode);
Schema schema = Schema.builder().addStringField("str_field").build();
PAssert.that(stream).containsInAnyOrder(Row.withSchema(schema).addValues("foo").build(), Row.withSchema(schema).addValues("bar").build());
pipeline.run().waitUntilFinish(Duration.standardMinutes(PIPELINE_EXECUTION_WAITTIME_MINUTES));
}
use of org.apache.beam.sdk.extensions.sql.impl.rel.BeamRelNode in project beam by apache.
the class MongoDbReadWriteIT method testWriteAndRead.
@Test
public void testWriteAndRead() {
Row testRow = row(SOURCE_SCHEMA, 9223372036854775807L, (byte) 127, (short) 32767, 2147483647, (float) 1.0, 1.0, true, "varchar", Arrays.asList("123", "456"));
String createTableStatement = "CREATE EXTERNAL TABLE TEST( \n" + " c_bigint BIGINT, \n " + " c_tinyint TINYINT, \n" + " c_smallint SMALLINT, \n" + " c_integer INTEGER, \n" + " c_float FLOAT, \n" + " c_double DOUBLE, \n" + " c_boolean BOOLEAN, \n" + " c_varchar VARCHAR, \n " + " c_arr ARRAY<VARCHAR> \n" + ") \n" + "TYPE 'mongodb' \n" + "LOCATION '" + mongoSqlUrl + "'";
sqlEnv.executeDdl(createTableStatement);
String insertStatement = "INSERT INTO TEST VALUES (" + "9223372036854775807, " + "127, " + "32767, " + "2147483647, " + "1.0, " + "1.0, " + "TRUE, " + "'varchar', " + "ARRAY['123', '456']" + ")";
BeamRelNode insertRelNode = sqlEnv.parseQuery(insertStatement);
BeamSqlRelUtils.toPCollection(writePipeline, insertRelNode);
writePipeline.run().waitUntilFinish();
PCollection<Row> output = BeamSqlRelUtils.toPCollection(readPipeline, sqlEnv.parseQuery("select * from TEST"));
assertThat(output.getSchema(), equalTo(SOURCE_SCHEMA));
PAssert.that(output).containsInAnyOrder(testRow);
readPipeline.run().waitUntilFinish();
}
use of org.apache.beam.sdk.extensions.sql.impl.rel.BeamRelNode in project beam by apache.
the class TestTableProviderWithFilterPushDown method testIOSourceRel_selectAll_withSupportedFilter_shouldDropCalc.
@Test
public void testIOSourceRel_selectAll_withSupportedFilter_shouldDropCalc() {
String selectTableStatement = "SELECT * FROM TEST where name='two'";
BeamRelNode beamRelNode = sqlEnv.parseQuery(selectTableStatement);
PCollection<Row> result = BeamSqlRelUtils.toPCollection(pipeline, beamRelNode);
// Calc is dropped, because all fields are projected in the same order and filter is
// pushed-down.
assertThat(beamRelNode, instanceOf(BeamIOSourceRel.class));
List<String> projects = beamRelNode.getRowType().getFieldNames();
assertThat(projects, containsInAnyOrder("unused1", "id", "name", "unused2", "b"));
assertEquals(BASIC_SCHEMA, result.getSchema());
PAssert.that(result).containsInAnyOrder(row(result.getSchema(), 200, 2, "two", (short) 200, false));
pipeline.run().waitUntilFinish(Duration.standardMinutes(2));
}
use of org.apache.beam.sdk.extensions.sql.impl.rel.BeamRelNode in project beam by apache.
the class TestTableProviderWithFilterPushDown method testIOSourceRel_selectAllFieldsInRandomOrder_shouldPushDownSupportedFilter.
@Test
public void testIOSourceRel_selectAllFieldsInRandomOrder_shouldPushDownSupportedFilter() {
String selectTableStatement = "SELECT unused2, name, id, b, unused1 FROM TEST where name='two'";
BeamRelNode beamRelNode = sqlEnv.parseQuery(selectTableStatement);
PCollection<Row> result = BeamSqlRelUtils.toPCollection(pipeline, beamRelNode);
// Calc should not be dropped, because fields are selected in a different order, even though
// all filters are supported and all fields are projected.
assertThat(beamRelNode, instanceOf(BeamCalcRel.class));
assertNull(((BeamCalcRel) beamRelNode).getProgram().getCondition());
assertThat(beamRelNode.getInput(0), instanceOf(BeamIOSourceRel.class));
List<String> projects = beamRelNode.getInput(0).getRowType().getFieldNames();
// When performing standalone filter push-down IO should project all fields.
assertThat(projects, containsInAnyOrder("unused1", "id", "name", "unused2", "b"));
assertEquals(Schema.builder().addInt16Field("unused2").addStringField("name").addInt32Field("id").addBooleanField("b").addInt32Field("unused1").build(), result.getSchema());
PAssert.that(result).containsInAnyOrder(row(result.getSchema(), (short) 200, "two", 2, false, 200));
pipeline.run().waitUntilFinish(Duration.standardMinutes(2));
}
Aggregations