use of org.apache.beam.runners.dataflow.util.CloudObject in project beam by apache.
the class UserParDoFnFactoryTest method testFactoryReuseInStep.
@Test
public void testFactoryReuseInStep() throws Exception {
PipelineOptions options = PipelineOptionsFactory.create();
CounterSet counters = new CounterSet();
TestDoFn initialFn = new TestDoFn(Collections.<TupleTag<String>>emptyList());
CloudObject cloudObject = getCloudObject(initialFn);
TestOperationContext operationContext = TestOperationContext.create(counters);
ParDoFn parDoFn = factory.create(options, cloudObject, null, MAIN_OUTPUT, ImmutableMap.<TupleTag<?>, Integer>of(MAIN_OUTPUT, 0), BatchModeExecutionContext.forTesting(options, "testStage"), operationContext);
Receiver rcvr = new OutputReceiver();
parDoFn.startBundle(rcvr);
parDoFn.processElement(WindowedValue.valueInGlobalWindow("foo"));
TestDoFn fn = (TestDoFn) ((SimpleParDoFn) parDoFn).getDoFnInfo().getDoFn();
assertThat(fn, not(theInstance(initialFn)));
parDoFn.finishBundle();
assertThat(fn.state, equalTo(TestDoFn.State.FINISHED));
// The fn should be reused for the second call to create
ParDoFn secondParDoFn = factory.create(options, cloudObject, null, MAIN_OUTPUT, ImmutableMap.<TupleTag<?>, Integer>of(MAIN_OUTPUT, 0), BatchModeExecutionContext.forTesting(options, "testStage"), operationContext);
// The fn should still be finished from the last call; it should not be set up again
assertThat(fn.state, equalTo(TestDoFn.State.FINISHED));
secondParDoFn.startBundle(rcvr);
secondParDoFn.processElement(WindowedValue.valueInGlobalWindow("spam"));
TestDoFn reobtainedFn = (TestDoFn) ((SimpleParDoFn) secondParDoFn).getDoFnInfo().getDoFn();
secondParDoFn.finishBundle();
assertThat(reobtainedFn.state, equalTo(TestDoFn.State.FINISHED));
assertThat(fn, theInstance(reobtainedFn));
}
use of org.apache.beam.runners.dataflow.util.CloudObject in project beam by apache.
the class StreamingDataflowWorkerTest method testExceptionInvalidatesCache.
@Test
public void testExceptionInvalidatesCache() throws Exception {
// We'll need to force the system to limit bundles to one message at a time.
// Sequence is as follows:
// 01. GetWork[0] (token 0)
// 02. Create counter reader
// 03. Counter yields 0
// 04. GetData[0] (state as null)
// 05. Read state as null
// 06. Set state as 42
// 07. THROW on taking counter reader checkpoint
// 08. Create counter reader
// 09. Counter yields 0
// 10. GetData[1] (state as null)
// 11. Read state as null (*** not 42 ***)
// 12. Take counter reader checkpoint as 0
// 13. CommitWork[0] (message 0:0, state 42, checkpoint 0)
// 14. GetWork[1] (token 1, checkpoint as 0)
// 15. Counter yields 1
// 16. Read (cached) state as 42
// 17. Take counter reader checkpoint 1
// 18. CommitWork[1] (message 0:1, checkpoint 1)
// 19. GetWork[2] (token 2, checkpoint as 1)
// 20. Counter yields 2
// 21. THROW on processElement
// 22. Recreate reader from checkpoint 1
// 23. Counter yields 2 (*** not eof ***)
// 24. GetData[2] (state as 42)
// 25. Read state as 42
// 26. Take counter reader checkpoint 2
// 27. CommitWork[2] (message 0:2, checkpoint 2)
FakeWindmillServer server = new FakeWindmillServer(errorCollector);
server.setExpectedExceptionCount(2);
DataflowPipelineOptions options = createTestingPipelineOptions(server);
options.setNumWorkers(1);
DataflowPipelineDebugOptions debugOptions = options.as(DataflowPipelineDebugOptions.class);
debugOptions.setUnboundedReaderMaxElements(1);
CloudObject codec = CloudObjects.asCloudObject(WindowedValue.getFullCoder(ValueWithRecordId.ValueWithRecordIdCoder.of(KvCoder.of(VarIntCoder.of(), VarIntCoder.of())), GlobalWindow.Coder.INSTANCE), /*sdkComponents=*/
null);
TestCountingSource counter = new TestCountingSource(3).withThrowOnFirstSnapshot(true);
List<ParallelInstruction> instructions = Arrays.asList(new ParallelInstruction().setOriginalName("OriginalReadName").setSystemName("Read").setName(DEFAULT_PARDO_USER_NAME).setRead(new ReadInstruction().setSource(CustomSources.serializeToCloudSource(counter, options).setCodec(codec))).setOutputs(Arrays.asList(new InstructionOutput().setName("read_output").setOriginalName(DEFAULT_OUTPUT_ORIGINAL_NAME).setSystemName(DEFAULT_OUTPUT_SYSTEM_NAME).setCodec(codec))), makeDoFnInstruction(new TestExceptionInvalidatesCacheFn(), 0, StringUtf8Coder.of(), WindowingStrategy.globalDefault()), makeSinkInstruction(StringUtf8Coder.of(), 1, GlobalWindow.Coder.INSTANCE));
StreamingDataflowWorker worker = makeWorker(instructions, options.as(StreamingDataflowWorkerOptions.class), true);
worker.setRetryLocallyDelayMs(100);
worker.start();
// Three GetData requests
for (int i = 0; i < 3; i++) {
ByteString state;
if (i == 0 || i == 1) {
state = ByteString.EMPTY;
} else {
state = ByteString.copyFrom(new byte[] { 42 });
}
Windmill.GetDataResponse.Builder dataResponse = Windmill.GetDataResponse.newBuilder();
dataResponse.addDataBuilder().setComputationId(DEFAULT_COMPUTATION_ID).addDataBuilder().setKey(ByteString.copyFromUtf8("0000000000000001")).setShardingKey(1).addValuesBuilder().setTag(ByteString.copyFromUtf8("//+uint")).setStateFamily(DEFAULT_PARDO_STATE_FAMILY).getValueBuilder().setTimestamp(0).setData(state);
server.addDataToOffer(dataResponse.build());
}
// Three GetWork requests and commits
for (int i = 0; i < 3; i++) {
StringBuilder sb = new StringBuilder();
sb.append("work {\n");
sb.append(" computation_id: \"computation\"\n");
sb.append(" input_data_watermark: 0\n");
sb.append(" work {\n");
sb.append(" key: \"0000000000000001\"\n");
sb.append(" sharding_key: 1\n");
sb.append(" work_token: ");
sb.append(i);
sb.append(" cache_token: 1");
sb.append("\n");
if (i > 0) {
int previousCheckpoint = i - 1;
sb.append(" source_state {\n");
sb.append(" state: \"");
sb.append((char) previousCheckpoint);
sb.append("\"\n");
// We'll elide the finalize ids since it's not necessary to trigger the finalizer
// for this test.
sb.append(" }\n");
}
sb.append(" }\n");
sb.append("}\n");
server.addWorkToOffer(buildInput(sb.toString(), null));
Map<Long, Windmill.WorkItemCommitRequest> result = server.waitForAndGetCommits(1);
Windmill.WorkItemCommitRequest commit = result.get((long) i);
UnsignedLong finalizeId = UnsignedLong.fromLongBits(commit.getSourceStateUpdates().getFinalizeIds(0));
sb = new StringBuilder();
sb.append("key: \"0000000000000001\"\n");
sb.append("sharding_key: 1\n");
sb.append("work_token: ");
sb.append(i);
sb.append("\n");
sb.append("cache_token: 1\n");
sb.append("output_messages {\n");
sb.append(" destination_stream_id: \"out\"\n");
sb.append(" bundles {\n");
sb.append(" key: \"0000000000000001\"\n");
int messageNum = i;
sb.append(" messages {\n");
sb.append(" timestamp: ");
sb.append(messageNum * 1000);
sb.append("\n");
sb.append(" data: \"0:");
sb.append(messageNum);
sb.append("\"\n");
sb.append(" }\n");
sb.append(" messages_ids: \"\"\n");
sb.append(" }\n");
sb.append("}\n");
if (i == 0) {
sb.append("value_updates {\n");
sb.append(" tag: \"//+uint\"\n");
sb.append(" value {\n");
sb.append(" timestamp: 0\n");
sb.append(" data: \"");
sb.append((char) 42);
sb.append("\"\n");
sb.append(" }\n");
sb.append(" state_family: \"parDoStateFamily\"\n");
sb.append("}\n");
}
int sourceState = i;
sb.append("source_state_updates {\n");
sb.append(" state: \"");
sb.append((char) sourceState);
sb.append("\"\n");
sb.append(" finalize_ids: ");
sb.append(finalizeId);
sb.append("}\n");
sb.append("source_watermark: ");
sb.append((sourceState + 1) * 1000);
sb.append("\n");
sb.append("source_backlog_bytes: 7\n");
assertThat(// for the current test.
setValuesTimestamps(commit.toBuilder().clearOutputTimers()).build(), equalTo(setMessagesMetadata(PaneInfo.NO_FIRING, CoderUtils.encodeToByteArray(CollectionCoder.of(GlobalWindow.Coder.INSTANCE), ImmutableList.of(GlobalWindow.INSTANCE)), parseCommitRequest(sb.toString())).build()));
}
}
use of org.apache.beam.runners.dataflow.util.CloudObject in project beam by apache.
the class PubsubReaderTest method testReadWith.
private void testReadWith(String parseFn) throws Exception {
when(mockContext.getWork()).thenReturn(Windmill.WorkItem.newBuilder().setKey(ByteString.copyFromUtf8("key")).setWorkToken(0).addMessageBundles(Windmill.InputMessageBundle.newBuilder().setSourceComputationId("pubsub").addMessages(Windmill.Message.newBuilder().setTimestamp(0).setData(ByteString.copyFromUtf8("e0"))).addMessages(Windmill.Message.newBuilder().setTimestamp(1000).setData(ByteString.copyFromUtf8("e1"))).addMessages(Windmill.Message.newBuilder().setTimestamp(2000).setData(ByteString.copyFromUtf8("e2")))).build());
Map<String, Object> spec = new HashMap<>();
spec.put(PropertyNames.OBJECT_TYPE_NAME, "");
if (parseFn != null) {
spec.put(PropertyNames.PUBSUB_SERIALIZED_ATTRIBUTES_FN, parseFn);
}
CloudObject cloudSourceSpec = CloudObject.fromSpec(spec);
PubsubReader.Factory factory = new PubsubReader.Factory();
PubsubReader<String> reader = (PubsubReader<String>) factory.create(cloudSourceSpec, WindowedValue.getFullCoder(StringUtf8Coder.of(), IntervalWindow.getCoder()), null, mockContext, null);
NativeReader.NativeReaderIterator<WindowedValue<String>> iter = reader.iterator();
assertTrue(iter.start());
assertEquals(iter.getCurrent(), WindowedValue.timestampedValueInGlobalWindow("e0", new Instant(0)));
assertTrue(iter.advance());
assertEquals(iter.getCurrent(), WindowedValue.timestampedValueInGlobalWindow("e1", new Instant(1)));
assertTrue(iter.advance());
assertEquals(iter.getCurrent(), WindowedValue.timestampedValueInGlobalWindow("e2", new Instant(2)));
assertFalse(iter.advance());
}
use of org.apache.beam.runners.dataflow.util.CloudObject in project beam by apache.
the class SinkRegistryTest method testCreateUnknownSink.
@Test
public void testCreateUnknownSink() throws Exception {
CloudObject spec = CloudObject.forClassName("UnknownSink");
com.google.api.services.dataflow.model.Sink cloudSink = new com.google.api.services.dataflow.model.Sink();
cloudSink.setSpec(spec);
cloudSink.setCodec(CloudObjects.asCloudObject(StringUtf8Coder.of(), /*sdkComponents=*/
null));
try {
SinkRegistry.defaultRegistry().create(spec, StringUtf8Coder.of(), options, BatchModeExecutionContext.forTesting(options, "testStage"), TestOperationContext.create());
Assert.fail("should have thrown an exception");
} catch (Exception exn) {
assertThat(exn.toString(), CoreMatchers.containsString("Unable to create a Sink"));
}
}
use of org.apache.beam.runners.dataflow.util.CloudObject in project beam by apache.
the class SinkRegistryTest method testCreatePredefinedSink.
@Test
public void testCreatePredefinedSink() throws Exception {
CloudObject spec = CloudObject.forClassName("AvroSink");
addString(spec, "filename", "/path/to/file.txt");
SizeReportingSinkWrapper<?> sink = SinkRegistry.defaultRegistry().create(spec, StringUtf8Coder.of(), options, BatchModeExecutionContext.forTesting(options, "testStage"), TestOperationContext.create());
assertThat(sink.getUnderlyingSink(), new IsInstanceOf(AvroByteSink.class));
}
Aggregations