use of org.apache.beam.sdk.Pipeline in project beam by apache.
the class TrackStreamingSourcesTest method testTrackSingle.
@Test
public void testTrackSingle() {
options.setRunner(SparkRunner.class);
JavaSparkContext jsc = SparkContextFactory.getSparkContext(options);
JavaStreamingContext jssc = new JavaStreamingContext(jsc, new org.apache.spark.streaming.Duration(options.getBatchIntervalMillis()));
Pipeline p = Pipeline.create(options);
CreateStream<Integer> emptyStream = CreateStream.of(VarIntCoder.of(), Duration.millis(options.getBatchIntervalMillis())).emptyBatch();
p.apply(emptyStream).apply(ParDo.of(new PassthroughFn<>()));
p.traverseTopologically(new StreamingSourceTracker(jssc, p, ParDo.MultiOutput.class, 0));
assertThat(StreamingSourceTracker.numAssertions, equalTo(1));
}
use of org.apache.beam.sdk.Pipeline in project beam by apache.
the class ForceStreamingTest method test.
@Test
public void test() throws IOException {
TestSparkPipelineOptions options = PipelineOptionsFactory.create().as(TestSparkPipelineOptions.class);
options.setRunner(TestSparkRunner.class);
options.setForceStreaming(true);
// pipeline with a bounded read.
Pipeline pipeline = Pipeline.create(options);
// apply the BoundedReadFromUnboundedSource.
BoundedReadFromUnboundedSource<?> boundedRead = Read.from(CountingSource.unbounded()).withMaxNumRecords(-1);
pipeline.apply(boundedRead);
// adapt reads
TestSparkRunner runner = TestSparkRunner.fromOptions(options);
runner.adaptBoundedReads(pipeline);
UnboundedReadDetector unboundedReadDetector = new UnboundedReadDetector();
pipeline.traverseTopologically(unboundedReadDetector);
// assert that the applied BoundedReadFromUnboundedSource
// is being treated as an unbounded read.
assertThat("Expected to have an unbounded read.", unboundedReadDetector.isUnbounded);
}
use of org.apache.beam.sdk.Pipeline in project beam by apache.
the class SparkRuntimeContextTest method testSerializingPipelineOptionsWithCustomUserType.
@Test
public void testSerializingPipelineOptionsWithCustomUserType() throws Exception {
PipelineOptions options = PipelineOptionsFactory.fromArgs("--jacksonIncompatible=\"testValue\"").as(JacksonIncompatibleOptions.class);
options.setRunner(CrashingRunner.class);
Pipeline p = Pipeline.create(options);
SparkRuntimeContext context = new SparkRuntimeContext(p, options);
ByteArrayOutputStream baos = new ByteArrayOutputStream();
try (ObjectOutputStream outputStream = new ObjectOutputStream(baos)) {
outputStream.writeObject(context);
}
try (ObjectInputStream inputStream = new ObjectInputStream(new ByteArrayInputStream(baos.toByteArray()))) {
SparkRuntimeContext copy = (SparkRuntimeContext) inputStream.readObject();
assertEquals("testValue", copy.getPipelineOptions().as(JacksonIncompatibleOptions.class).getJacksonIncompatible().value);
}
}
use of org.apache.beam.sdk.Pipeline in project beam by apache.
the class SparkPipelineStateTest method getPipeline.
private Pipeline getPipeline(final SparkPipelineOptions options) {
final Pipeline pipeline = Pipeline.create(options);
final String name = testName.getMethodName() + "(isStreaming=" + options.isStreaming() + ")";
pipeline.apply(getValues(options)).setCoder(StringUtf8Coder.of()).apply(printParDo(name));
return pipeline;
}
use of org.apache.beam.sdk.Pipeline in project beam by apache.
the class SparkPipelineStateTest method testRunningPipeline.
private void testRunningPipeline(final SparkPipelineOptions options) throws Exception {
final Pipeline pipeline = getPipeline(options);
final SparkPipelineResult result = (SparkPipelineResult) pipeline.run();
assertThat(result.getState(), is(PipelineResult.State.RUNNING));
result.cancel();
}
Aggregations