Search in sources :

Example 16 with PTransform

use of org.apache.beam.sdk.transforms.PTransform in project beam by apache.

the class ApexPipelineTranslator method visitPrimitiveTransform.

@Override
public void visitPrimitiveTransform(TransformHierarchy.Node node) {
    LOG.debug("visiting transform {}", node.getTransform());
    PTransform transform = node.getTransform();
    TransformTranslator translator = getTransformTranslator(transform.getClass());
    if (null == translator) {
        throw new UnsupportedOperationException("no translator registered for " + transform);
    }
    translationContext.setCurrentTransform(node.toAppliedPTransform(getPipeline()));
    translator.translate(transform, translationContext);
}
Also used : PTransform(org.apache.beam.sdk.transforms.PTransform)

Example 17 with PTransform

use of org.apache.beam.sdk.transforms.PTransform in project components by Talend.

the class PubSubOutputRuntimeTestIT method createTopicSub.

private void createTopicSub(Pipeline pipeline) throws IOException {
    String testID = "createTopicSubTest" + new Random().nextInt();
    final String newTopicName = "tcomp-pubsub-createTopicSub" + uuid;
    final String newSubName = "tcomp-pubsub-createTopicSub-sub" + uuid;
    final String fieldDelimited = ";";
    List<Person> expectedPersons = Person.genRandomList(testID, maxRecords);
    List<String> expectedMessages = new ArrayList<>();
    List<String[]> sendMessages = new ArrayList<>();
    for (Person person : expectedPersons) {
        expectedMessages.add(person.toCSV(fieldDelimited));
        sendMessages.add(person.toCSV(fieldDelimited).split(fieldDelimited));
    }
    PubSubOutputRuntime outputRuntime = new PubSubOutputRuntime();
    PubSubOutputProperties outputProperties = createOutput(addSubscriptionForDataset(createDatasetFromCSV(createDatastore(), newTopicName, fieldDelimited), newSubName));
    outputProperties.topicOperation.setValue(PubSubOutputProperties.TopicOperation.CREATE_IF_NOT_EXISTS);
    outputRuntime.initialize(runtimeContainer, outputProperties);
    PCollection<IndexedRecord> records = (PCollection<IndexedRecord>) pipeline.apply(Create.of(sendMessages)).apply((PTransform) ConvertToIndexedRecord.of());
    records.setCoder(LazyAvroCoder.of()).apply(outputRuntime);
    pipeline.run().waitUntilFinish();
    List<String> actual = new ArrayList<>();
    while (true) {
        List<ReceivedMessage> messages = client.pull(newSubName, maxRecords);
        List<String> ackIds = new ArrayList<>();
        for (ReceivedMessage message : messages) {
            actual.add(new String(message.getMessage().decodeData()));
            ackIds.add(message.getAckId());
        }
        client.ack(newSubName, ackIds);
        if (actual.size() >= maxRecords) {
            break;
        }
    }
    client.deleteSubscription(newSubName);
    client.deleteTopic(newTopicName);
    assertThat(actual, containsInAnyOrder(expectedMessages.toArray()));
}
Also used : ConvertToIndexedRecord(org.talend.components.adapter.beam.transform.ConvertToIndexedRecord) IndexedRecord(org.apache.avro.generic.IndexedRecord) ArrayList(java.util.ArrayList) ReceivedMessage(com.google.api.services.pubsub.model.ReceivedMessage) PCollection(org.apache.beam.sdk.values.PCollection) PubSubOutputProperties(org.talend.components.pubsub.output.PubSubOutputProperties) Random(java.util.Random) PTransform(org.apache.beam.sdk.transforms.PTransform)

Example 18 with PTransform

use of org.apache.beam.sdk.transforms.PTransform in project beam by apache.

the class BeamWindowRel method buildPTransform.

@Override
public PTransform<PCollectionList<Row>, PCollection<Row>> buildPTransform() {
    Schema outputSchema = CalciteUtils.toSchema(getRowType());
    final List<FieldAggregation> analyticFields = Lists.newArrayList();
    this.groups.stream().forEach(anAnalyticGroup -> {
        List<Integer> partitionKeysDef = anAnalyticGroup.keys.toList();
        List<Integer> orderByKeys = Lists.newArrayList();
        List<Boolean> orderByDirections = Lists.newArrayList();
        List<Boolean> orderByNullDirections = Lists.newArrayList();
        anAnalyticGroup.orderKeys.getFieldCollations().stream().forEach(fc -> {
            orderByKeys.add(fc.getFieldIndex());
            orderByDirections.add(fc.direction == RelFieldCollation.Direction.ASCENDING);
            orderByNullDirections.add(fc.nullDirection == RelFieldCollation.NullDirection.FIRST);
        });
        // Unbounded by default
        BigDecimal lowerB = null;
        // Unbounded by default
        BigDecimal upperB = null;
        if (anAnalyticGroup.lowerBound.isCurrentRow()) {
            lowerB = BigDecimal.ZERO;
        } else if (anAnalyticGroup.lowerBound.isPreceding()) {
            if (!anAnalyticGroup.lowerBound.isUnbounded()) {
                lowerB = getLiteralValueConstants(anAnalyticGroup.lowerBound.getOffset());
            }
        } else if (anAnalyticGroup.lowerBound.isFollowing()) {
            if (!anAnalyticGroup.lowerBound.isUnbounded()) {
                lowerB = getLiteralValueConstants(anAnalyticGroup.lowerBound.getOffset()).negate();
            }
        }
        if (anAnalyticGroup.upperBound.isCurrentRow()) {
            upperB = BigDecimal.ZERO;
        } else if (anAnalyticGroup.upperBound.isPreceding()) {
            if (!anAnalyticGroup.upperBound.isUnbounded()) {
                upperB = getLiteralValueConstants(anAnalyticGroup.upperBound.getOffset()).negate();
            }
        } else if (anAnalyticGroup.upperBound.isFollowing()) {
            if (!anAnalyticGroup.upperBound.isUnbounded()) {
                upperB = getLiteralValueConstants(anAnalyticGroup.upperBound.getOffset());
            }
        }
        final BigDecimal lowerBFinal = lowerB;
        final BigDecimal upperBFinal = upperB;
        List<AggregateCall> aggregateCalls = anAnalyticGroup.getAggregateCalls(this);
        aggregateCalls.stream().forEach(anAggCall -> {
            List<Integer> argList = anAggCall.getArgList();
            Schema.Field field = CalciteUtils.toField(anAggCall.getName(), anAggCall.getType());
            Combine.CombineFn combineFn = AggregationCombineFnAdapter.createCombineFnAnalyticsFunctions(anAggCall, field, anAggCall.getAggregation().getName());
            FieldAggregation fieldAggregation = new FieldAggregation(partitionKeysDef, orderByKeys, orderByDirections, orderByNullDirections, lowerBFinal, upperBFinal, anAnalyticGroup.isRows, argList, combineFn, field);
            analyticFields.add(fieldAggregation);
        });
    });
    return new Transform(outputSchema, analyticFields);
}
Also used : Combine(org.apache.beam.sdk.transforms.Combine) Schema(org.apache.beam.sdk.schemas.Schema) BigDecimal(java.math.BigDecimal) AggregateCall(org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.rel.core.AggregateCall) PTransform(org.apache.beam.sdk.transforms.PTransform)

Example 19 with PTransform

use of org.apache.beam.sdk.transforms.PTransform in project beam by apache.

the class PubsubLiteIO method write.

/**
 * Write messages to Pub/Sub Lite.
 *
 * <pre>{@code
 * TopicPath topicPath =
 *         TopicPath.newBuilder()
 *             .setProjectNumber(projectNum)
 *             .setLocation(zone)
 *             .setName(topicName)
 *             .build();
 *
 * PCollection<Message> messages = ...;
 * messages.apply(PubsubLiteIO.write(
 *     PublisherOptions.newBuilder().setTopicPath(topicPath).build());
 *
 * }</pre>
 */
public static PTransform<PCollection<PubSubMessage>, PDone> write(PublisherOptions options) {
    return new PTransform<PCollection<PubSubMessage>, PDone>() {

        @Override
        public PDone expand(PCollection<PubSubMessage> input) {
            PubsubLiteSink sink = new PubsubLiteSink(options);
            input.apply("Write", ParDo.of(sink));
            return PDone.in(input.getPipeline());
        }
    };
}
Also used : PCollection(org.apache.beam.sdk.values.PCollection) PubsubLiteSink(org.apache.beam.sdk.io.gcp.pubsublite.internal.PubsubLiteSink) PubSubMessage(com.google.cloud.pubsublite.proto.PubSubMessage) PTransform(org.apache.beam.sdk.transforms.PTransform)

Example 20 with PTransform

use of org.apache.beam.sdk.transforms.PTransform in project beam by apache.

the class HCatToRow method fromSpec.

/**
 * Wraps the {@link HCatalogIO#read()} to convert {@link HCatRecord HCatRecords} to {@link Row
 * Rows}.
 *
 * <p>Eventually this should become part of the IO, e.g. {@code HCatalogIO.readRows()}.
 */
public static PTransform<PBegin, PCollection<Row>> fromSpec(HCatalogIO.Read readSpec) {
    return new PTransform<PBegin, PCollection<Row>>() {

        @Override
        public PCollection<Row> expand(PBegin input) {
            HCatalogBeamSchema hcatSchema = HCatalogBeamSchema.create(readSpec.getConfigProperties());
            Schema schema = hcatSchema.getTableSchema(readSpec.getDatabase(), readSpec.getTable()).get();
            return input.apply("ReadHCatRecords", readSpec).apply("ConvertToRows", forSchema(schema)).setRowSchema(schema);
        }
    };
}
Also used : Schema(org.apache.beam.sdk.schemas.Schema) Row(org.apache.beam.sdk.values.Row) PBegin(org.apache.beam.sdk.values.PBegin) PTransform(org.apache.beam.sdk.transforms.PTransform)

Aggregations

PTransform (org.apache.beam.sdk.transforms.PTransform)41 PCollection (org.apache.beam.sdk.values.PCollection)29 Test (org.junit.Test)18 AppliedPTransform (org.apache.beam.sdk.runners.AppliedPTransform)11 PBegin (org.apache.beam.sdk.values.PBegin)11 IOException (java.io.IOException)10 ArrayList (java.util.ArrayList)10 List (java.util.List)10 Map (java.util.Map)10 TupleTag (org.apache.beam.sdk.values.TupleTag)10 DoFn (org.apache.beam.sdk.transforms.DoFn)9 Coder (org.apache.beam.sdk.coders.Coder)8 Create (org.apache.beam.sdk.transforms.Create)8 ParDo (org.apache.beam.sdk.transforms.ParDo)7 PDone (org.apache.beam.sdk.values.PDone)7 PCollectionTuple (org.apache.beam.sdk.values.PCollectionTuple)6 Collection (java.util.Collection)5 HashMap (java.util.HashMap)5 Collectors.toList (java.util.stream.Collectors.toList)5 Schema (org.apache.beam.sdk.schemas.Schema)5