use of org.apache.beam.sdk.extensions.sql.meta.BeamSqlTable in project beam by apache.
the class BeamIOPushDownRule method onMatch.
// ~ Methods ----------------------------------------------------------------
@Override
public void onMatch(RelOptRuleCall call) {
final BeamIOSourceRel ioSourceRel = call.rel(1);
final BeamSqlTable beamSqlTable = ioSourceRel.getBeamSqlTable();
if (ioSourceRel instanceof BeamPushDownIOSourceRel) {
return;
}
// Nested rows are not supported at the moment
for (RelDataTypeField field : ioSourceRel.getRowType().getFieldList()) {
if (field.getType() instanceof RelRecordType) {
return;
}
}
final Calc calc = call.rel(0);
final RexProgram program = calc.getProgram();
final Pair<ImmutableList<RexNode>, ImmutableList<RexNode>> projectFilter = program.split();
final RelDataType calcInputRowType = program.getInputRowType();
// When predicate push-down is not supported - all filters are unsupported.
final BeamSqlTableFilter tableFilter = beamSqlTable.constructFilter(projectFilter.right);
if (!beamSqlTable.supportsProjects().isSupported() && tableFilter instanceof DefaultTableFilter) {
// Either project or filter push-down must be supported by the IO.
return;
}
Set<String> usedFields = new LinkedHashSet<>();
if (!(tableFilter instanceof DefaultTableFilter) && !beamSqlTable.supportsProjects().isSupported()) {
// When applying standalone filter push-down all fields must be project by an IO.
// With a single exception: Calc projects all fields (in the same order) and does nothing
// else.
usedFields.addAll(calcInputRowType.getFieldNames());
} else {
// Find all input refs used by projects
for (RexNode project : projectFilter.left) {
findUtilizedInputRefs(calcInputRowType, project, usedFields);
}
// Find all input refs used by filters
for (RexNode filter : tableFilter.getNotSupported()) {
findUtilizedInputRefs(calcInputRowType, filter, usedFields);
}
}
if (usedFields.isEmpty()) {
// No need to do push-down for queries like this: "select UPPER('hello')".
return;
}
// IO only projects fields utilized by a calc.
if (tableFilter.getNotSupported().containsAll(projectFilter.right) && usedFields.containsAll(ioSourceRel.getRowType().getFieldNames())) {
return;
}
FieldAccessDescriptor resolved = FieldAccessDescriptor.withFieldNames(usedFields);
resolved = resolved.resolve(beamSqlTable.getSchema());
if (canDropCalc(program, beamSqlTable.supportsProjects(), tableFilter)) {
call.transformTo(ioSourceRel.createPushDownRel(calc.getRowType(), resolved.getFieldsAccessed().stream().map(FieldDescriptor::getFieldName).collect(Collectors.toList()), tableFilter));
return;
}
// IO only projects fields utilised by a calc.
if (tableFilter.getNotSupported().equals(projectFilter.right) && usedFields.containsAll(ioSourceRel.getRowType().getFieldNames())) {
return;
}
RelNode result = constructNodesWithPushDown(resolved, call.builder(), ioSourceRel, tableFilter, calc.getRowType(), projectFilter.left);
if (tableFilter.getNotSupported().size() <= projectFilter.right.size() || usedFields.size() < calcInputRowType.getFieldCount()) {
// Smaller Calc programs are indisputably better, as well as IOs with less projected fields.
// We can consider something with the same number of filters.
call.transformTo(result);
}
}
use of org.apache.beam.sdk.extensions.sql.meta.BeamSqlTable in project beam by apache.
the class BigQueryTestTableProvider method buildBeamSqlTable.
@Override
public BeamSqlTable buildBeamSqlTable(Table table) {
BeamSqlTable t = beamSqlTableMap.get(table.getLocation());
if (t != null) {
return t;
}
t = new BigQueryTestTable(table, BigQueryUtils.ConversionOptions.builder().setTruncateTimestamps(firstNonNull(table.getProperties().getBoolean("truncateTimestamps"), false) ? BigQueryUtils.ConversionOptions.TruncateTimestamps.TRUNCATE : BigQueryUtils.ConversionOptions.TruncateTimestamps.REJECT).build());
beamSqlTableMap.put(table.getLocation(), t);
return t;
}
use of org.apache.beam.sdk.extensions.sql.meta.BeamSqlTable in project beam by apache.
the class KafkaTableProviderTest method testBuildBeamSqlNestedThriftTable.
@Test
public void testBuildBeamSqlNestedThriftTable() {
Table table = mockNestedThriftTable("hello", SimpleThriftMessage.class, TCompactProtocol.Factory.class);
BeamSqlTable sqlTable = provider.buildBeamSqlTable(table);
assertNotNull(sqlTable);
assertTrue(sqlTable instanceof NestedPayloadKafkaTable);
BeamKafkaTable kafkaTable = (BeamKafkaTable) sqlTable;
assertEquals(LOCATION_BROKER, kafkaTable.getBootstrapServers());
assertEquals(ImmutableList.of(LOCATION_TOPIC), kafkaTable.getTopics());
}
use of org.apache.beam.sdk.extensions.sql.meta.BeamSqlTable in project beam by apache.
the class KafkaTableProviderTest method testBuildWithExtraServers.
@Test
public void testBuildWithExtraServers() {
Table table = mockTableWithExtraServers("hello", ImmutableList.of("localhost:1111", "localhost:2222"));
BeamSqlTable sqlTable = provider.buildBeamSqlTable(table);
assertNotNull(sqlTable);
assertTrue(sqlTable instanceof BeamKafkaCSVTable);
BeamKafkaCSVTable kafkaTable = (BeamKafkaCSVTable) sqlTable;
assertEquals(LOCATION_BROKER + ",localhost:1111,localhost:2222", kafkaTable.getBootstrapServers());
assertEquals(ImmutableList.of(LOCATION_TOPIC), kafkaTable.getTopics());
}
use of org.apache.beam.sdk.extensions.sql.meta.BeamSqlTable in project beam by apache.
the class KafkaTableProviderTest method testBuildBeamSqlThriftTable.
@Test
public void testBuildBeamSqlThriftTable() {
Table table = mockThriftTable("hello", SimpleThriftMessage.class, TCompactProtocol.Factory.class);
BeamSqlTable sqlTable = provider.buildBeamSqlTable(table);
assertNotNull(sqlTable);
assertTrue(sqlTable instanceof BeamKafkaTable);
BeamKafkaTable kafkaTable = (BeamKafkaTable) sqlTable;
assertEquals(LOCATION_BROKER, kafkaTable.getBootstrapServers());
assertEquals(ImmutableList.of(LOCATION_TOPIC), kafkaTable.getTopics());
}
Aggregations