Search in sources :

Example 26 with Server

use of org.apache.beam.vendor.grpc.v1p43p2.io.grpc.Server in project beam by apache.

the class StreamingDataflowWorkerTest method testBasicHarness.

@Test
public void testBasicHarness() throws Exception {
    List<ParallelInstruction> instructions = Arrays.asList(makeSourceInstruction(StringUtf8Coder.of()), makeSinkInstruction(StringUtf8Coder.of(), 0));
    FakeWindmillServer server = new FakeWindmillServer(errorCollector);
    StreamingDataflowWorkerOptions options = createTestingPipelineOptions(server);
    StreamingDataflowWorker worker = makeWorker(instructions, options, true);
    worker.start();
    final int numIters = 2000;
    for (int i = 0; i < numIters; ++i) {
        server.addWorkToOffer(makeInput(i, TimeUnit.MILLISECONDS.toMicros(i)));
    }
    Map<Long, Windmill.WorkItemCommitRequest> result = server.waitForAndGetCommits(numIters);
    worker.stop();
    for (int i = 0; i < numIters; ++i) {
        assertTrue(result.containsKey((long) i));
        assertEquals(makeExpectedOutput(i, TimeUnit.MILLISECONDS.toMicros(i)).build(), result.get((long) i));
    }
    verify(hotKeyLogger, atLeastOnce()).logHotKeyDetection(nullable(String.class), any());
}
Also used : ParallelInstruction(com.google.api.services.dataflow.model.ParallelInstruction) WorkItemCommitRequest(org.apache.beam.runners.dataflow.worker.windmill.Windmill.WorkItemCommitRequest) AtomicLong(java.util.concurrent.atomic.AtomicLong) DataflowCounterUpdateExtractor.splitIntToLong(org.apache.beam.runners.dataflow.worker.counters.DataflowCounterUpdateExtractor.splitIntToLong) UnsignedLong(org.apache.beam.vendor.guava.v26_0_jre.com.google.common.primitives.UnsignedLong) ByteString(org.apache.beam.vendor.grpc.v1p43p2.com.google.protobuf.ByteString) Structs.addString(org.apache.beam.runners.dataflow.util.Structs.addString) StreamingDataflowWorkerOptions(org.apache.beam.runners.dataflow.worker.options.StreamingDataflowWorkerOptions) Test(org.junit.Test)

Example 27 with Server

use of org.apache.beam.vendor.grpc.v1p43p2.io.grpc.Server in project beam by apache.

the class StreamingDataflowWorkerTest method createTestingPipelineOptions.

private StreamingDataflowWorkerOptions createTestingPipelineOptions(FakeWindmillServer server, String... args) {
    List<String> argsList = Lists.newArrayList(args);
    if (streamingEngine) {
        argsList.add("--experiments=enable_streaming_engine");
    }
    StreamingDataflowWorkerOptions options = PipelineOptionsFactory.fromArgs(argsList.toArray(new String[0])).as(StreamingDataflowWorkerOptions.class);
    options.setAppName("StreamingWorkerHarnessTest");
    options.setJobId("test_job_id");
    options.setStreaming(true);
    options.setWindmillServerStub(server);
    options.setActiveWorkRefreshPeriodMillis(0);
    return options;
}
Also used : ByteString(org.apache.beam.vendor.grpc.v1p43p2.com.google.protobuf.ByteString) Structs.addString(org.apache.beam.runners.dataflow.util.Structs.addString) StreamingDataflowWorkerOptions(org.apache.beam.runners.dataflow.worker.options.StreamingDataflowWorkerOptions)

Example 28 with Server

use of org.apache.beam.vendor.grpc.v1p43p2.io.grpc.Server in project beam by apache.

the class StreamingDataflowWorkerTest method testExceptions.

@Test(timeout = 30000)
public void testExceptions() throws Exception {
    if (streamingEngine) {
        // TODO: This test needs to be adapted to work with streamingEngine=true.
        return;
    }
    List<ParallelInstruction> instructions = Arrays.asList(makeSourceInstruction(StringUtf8Coder.of()), makeDoFnInstruction(new TestExceptionFn(), 0, StringUtf8Coder.of()), makeSinkInstruction(StringUtf8Coder.of(), 1));
    FakeWindmillServer server = new FakeWindmillServer(errorCollector);
    server.setExpectedExceptionCount(1);
    String keyString = keyStringForIndex(0);
    server.addWorkToOffer(buildInput("work {" + "  computation_id: \"" + DEFAULT_COMPUTATION_ID + "\"" + "  input_data_watermark: 0" + "  work {" + "    key: \"" + keyString + "\"" + "    sharding_key: 1" + "    work_token: 0" + "    cache_token: 1" + "    message_bundles {" + "      source_computation_id: \"" + DEFAULT_SOURCE_COMPUTATION_ID + "\"" + "      messages {" + "        timestamp: 0" + "        data: \"0\"" + "      }" + "    }" + "  }" + "}", CoderUtils.encodeToByteArray(CollectionCoder.of(IntervalWindow.getCoder()), Arrays.asList(DEFAULT_WINDOW))));
    StreamingDataflowWorker worker = makeWorker(instructions, createTestingPipelineOptions(server), true);
    worker.start();
    server.waitForEmptyWorkQueue();
    // Wait until the worker has given up.
    int maxTries = 10;
    while (maxTries-- > 0 && !worker.workExecutorIsEmpty()) {
        Uninterruptibles.sleepUninterruptibly(1000, TimeUnit.MILLISECONDS);
    }
    assertTrue(worker.workExecutorIsEmpty());
    // Spam worker updates a few times.
    maxTries = 10;
    while (maxTries-- > 0) {
        worker.reportPeriodicWorkerUpdates();
        Uninterruptibles.sleepUninterruptibly(1000, TimeUnit.MILLISECONDS);
    }
    // We should see our update only one time with the exceptions we are expecting.
    ArgumentCaptor<WorkItemStatus> workItemStatusCaptor = ArgumentCaptor.forClass(WorkItemStatus.class);
    verify(mockWorkUnitClient, atLeast(1)).reportWorkItemStatus(workItemStatusCaptor.capture());
    List<WorkItemStatus> capturedStatuses = workItemStatusCaptor.getAllValues();
    boolean foundErrors = false;
    int lastUpdateWithoutErrors = 0;
    int lastUpdateWithErrors = 0;
    for (WorkItemStatus status : capturedStatuses) {
        if (status.getErrors().isEmpty()) {
            lastUpdateWithoutErrors++;
            continue;
        }
        lastUpdateWithErrors++;
        assertFalse(foundErrors);
        foundErrors = true;
        String stacktrace = status.getErrors().get(0).getMessage();
        assertThat(stacktrace, Matchers.containsString("Exception!"));
        assertThat(stacktrace, Matchers.containsString("Another exception!"));
        assertThat(stacktrace, Matchers.containsString("processElement"));
    }
    assertTrue(foundErrors);
    // The last update we see should not have any errors. This indicates we've retried the workitem.
    assertTrue(lastUpdateWithoutErrors > lastUpdateWithErrors);
    // Confirm we've received the expected stats. There is no guarantee stats will only be reported
    // once.
    assertThat(server.getStatsReceived().size(), Matchers.greaterThanOrEqualTo(1));
    Windmill.ReportStatsRequest stats = server.getStatsReceived().get(0);
    assertEquals(DEFAULT_COMPUTATION_ID, stats.getComputationId());
    assertEquals(keyString, stats.getKey().toStringUtf8());
    assertEquals(0, stats.getWorkToken());
    assertEquals(1, stats.getShardingKey());
}
Also used : ByteString(org.apache.beam.vendor.grpc.v1p43p2.com.google.protobuf.ByteString) Structs.addString(org.apache.beam.runners.dataflow.util.Structs.addString) ParallelInstruction(com.google.api.services.dataflow.model.ParallelInstruction) WorkItemStatus(com.google.api.services.dataflow.model.WorkItemStatus) Windmill(org.apache.beam.runners.dataflow.worker.windmill.Windmill) Test(org.junit.Test)

Example 29 with Server

use of org.apache.beam.vendor.grpc.v1p43p2.io.grpc.Server in project beam by apache.

the class StreamingDataflowWorkerTest method testBasic.

@Test
public void testBasic() throws Exception {
    List<ParallelInstruction> instructions = Arrays.asList(makeSourceInstruction(StringUtf8Coder.of()), makeSinkInstruction(StringUtf8Coder.of(), 0));
    FakeWindmillServer server = new FakeWindmillServer(errorCollector);
    server.setIsReady(false);
    StreamingConfigTask streamingConfig = new StreamingConfigTask();
    streamingConfig.setStreamingComputationConfigs(ImmutableList.of(makeDefaultStreamingComputationConfig(instructions)));
    streamingConfig.setWindmillServiceEndpoint("foo");
    WorkItem workItem = new WorkItem();
    workItem.setStreamingConfigTask(streamingConfig);
    when(mockWorkUnitClient.getGlobalStreamingConfigWorkItem()).thenReturn(Optional.of(workItem));
    StreamingDataflowWorkerOptions options = createTestingPipelineOptions(server);
    StreamingDataflowWorker worker = makeWorker(instructions, options, true);
    worker.start();
    final int numIters = 2000;
    for (int i = 0; i < numIters; ++i) {
        server.addWorkToOffer(makeInput(i, TimeUnit.MILLISECONDS.toMicros(i)));
    }
    Map<Long, Windmill.WorkItemCommitRequest> result = server.waitForAndGetCommits(numIters);
    worker.stop();
    for (int i = 0; i < numIters; ++i) {
        assertTrue(result.containsKey((long) i));
        assertEquals(makeExpectedOutput(i, TimeUnit.MILLISECONDS.toMicros(i)).build(), result.get((long) i));
    }
    verify(hotKeyLogger, atLeastOnce()).logHotKeyDetection(nullable(String.class), any());
}
Also used : ParallelInstruction(com.google.api.services.dataflow.model.ParallelInstruction) WorkItemCommitRequest(org.apache.beam.runners.dataflow.worker.windmill.Windmill.WorkItemCommitRequest) AtomicLong(java.util.concurrent.atomic.AtomicLong) DataflowCounterUpdateExtractor.splitIntToLong(org.apache.beam.runners.dataflow.worker.counters.DataflowCounterUpdateExtractor.splitIntToLong) UnsignedLong(org.apache.beam.vendor.guava.v26_0_jre.com.google.common.primitives.UnsignedLong) StreamingConfigTask(com.google.api.services.dataflow.model.StreamingConfigTask) ByteString(org.apache.beam.vendor.grpc.v1p43p2.com.google.protobuf.ByteString) Structs.addString(org.apache.beam.runners.dataflow.util.Structs.addString) WorkItem(com.google.api.services.dataflow.model.WorkItem) StreamingDataflowWorkerOptions(org.apache.beam.runners.dataflow.worker.options.StreamingDataflowWorkerOptions) Test(org.junit.Test)

Example 30 with Server

use of org.apache.beam.vendor.grpc.v1p43p2.io.grpc.Server in project beam by apache.

the class StateFetcherTest method testEmptyFetchGlobalData.

@Test
public void testEmptyFetchGlobalData() throws Exception {
    StateFetcher fetcher = new StateFetcher(server);
    ByteString encodedIterable = ByteString.EMPTY;
    PCollectionView<Long> view = TestPipeline.create().apply(Create.empty(VarLongCoder.of())).apply(Sum.longsGlobally().asSingletonView());
    String tag = view.getTagInternal().getId();
    // Test three calls in a row. First, data is not ready, then data is ready,
    // then the data is already cached.
    when(server.getSideInputData(any(Windmill.GlobalDataRequest.class))).thenReturn(buildGlobalDataResponse(tag, ByteString.EMPTY, true, encodedIterable));
    assertEquals(0L, (long) fetcher.fetchSideInput(view, GlobalWindow.INSTANCE, STATE_FAMILY, SideInputState.UNKNOWN, readStateSupplier).orNull());
    verify(server).getSideInputData(buildGlobalDataRequest(tag, ByteString.EMPTY));
    verifyNoMoreInteractions(server);
}
Also used : ByteString(org.apache.beam.vendor.grpc.v1p43p2.com.google.protobuf.ByteString) ByteString(org.apache.beam.vendor.grpc.v1p43p2.com.google.protobuf.ByteString) Test(org.junit.Test)

Aggregations

Test (org.junit.Test)76 Server (io.grpc.Server)68 IOException (java.io.IOException)27 ByteString (org.apache.beam.vendor.grpc.v1p43p2.com.google.protobuf.ByteString)24 ArrayList (java.util.ArrayList)21 StreamObserver (org.apache.beam.vendor.grpc.v1p43p2.io.grpc.stub.StreamObserver)21 ExecutionException (java.util.concurrent.ExecutionException)20 TimeoutException (java.util.concurrent.TimeoutException)20 CountDownLatch (java.util.concurrent.CountDownLatch)19 ManagedChannel (org.apache.beam.vendor.grpc.v1p43p2.io.grpc.ManagedChannel)19 Server (org.apache.beam.vendor.grpc.v1p43p2.io.grpc.Server)18 StatusException (io.grpc.StatusException)17 Metadata (io.grpc.Metadata)12 List (java.util.List)12 BeamFnApi (org.apache.beam.model.fnexecution.v1.BeamFnApi)12 FilterChain (io.grpc.xds.EnvoyServerProtoData.FilterChain)11 ExecutorService (java.util.concurrent.ExecutorService)11 ParallelInstruction (com.google.api.services.dataflow.model.ParallelInstruction)10 ServerCall (io.grpc.ServerCall)10 StreamObserver (io.grpc.stub.StreamObserver)10