use of org.apache.beam.runners.dataflow.worker.windmill.WindmillServerStub.GetDataStream in project beam by apache.
the class MetricTrackingWindmillServerStub method getSideInputData.
public Windmill.GlobalData getSideInputData(Windmill.GlobalDataRequest request) {
gcThrashingMonitor.waitForResources("GetSideInputData");
activeSideInputs.getAndIncrement();
try {
if (useStreamingRequests) {
GetDataStream stream = streamPool.getStream();
try {
return stream.requestGlobalData(request);
} finally {
streamPool.releaseStream(stream);
}
} else {
return server.getData(Windmill.GetDataRequest.newBuilder().addGlobalDataFetchRequests(request).build()).getGlobalData(0);
}
} catch (Exception e) {
throw new RuntimeException("Failed to get side input: ", e);
} finally {
activeSideInputs.getAndDecrement();
}
}
use of org.apache.beam.runners.dataflow.worker.windmill.WindmillServerStub.GetDataStream in project beam by apache.
the class MetricTrackingWindmillServerStub method getStateData.
public Windmill.KeyedGetDataResponse getStateData(String computation, Windmill.KeyedGetDataRequest request) {
gcThrashingMonitor.waitForResources("GetStateData");
activeStateReads.getAndIncrement();
try {
if (useStreamingRequests) {
GetDataStream stream = streamPool.getStream();
try {
return stream.requestKeyedData(computation, request);
} finally {
streamPool.releaseStream(stream);
}
} else {
SettableFuture<Windmill.KeyedGetDataResponse> response = SettableFuture.create();
ReadBatch batch = addToReadBatch(new QueueEntry(computation, request, response));
if (batch != null) {
issueReadBatch(batch);
}
return response.get();
}
} catch (Exception e) {
throw new RuntimeException(e);
} finally {
activeStateReads.getAndDecrement();
}
}
use of org.apache.beam.runners.dataflow.worker.windmill.WindmillServerStub.GetDataStream in project beam by apache.
the class GrpcWindmillServerTest method testStreamingGetDataHeartbeats.
@Test
public void testStreamingGetDataHeartbeats() throws Exception {
// This server records the heartbeats observed but doesn't respond.
final Map<String, List<KeyedGetDataRequest>> heartbeats = new HashMap<>();
serviceRegistry.addService(new CloudWindmillServiceV1Alpha1ImplBase() {
@Override
public StreamObserver<StreamingGetDataRequest> getDataStream(StreamObserver<StreamingGetDataResponse> responseObserver) {
return new StreamObserver<StreamingGetDataRequest>() {
boolean sawHeader = false;
@Override
public void onNext(StreamingGetDataRequest chunk) {
try {
if (!sawHeader) {
LOG.info("Received header");
errorCollector.checkThat(chunk.getHeader(), Matchers.equalTo(JobHeader.newBuilder().setJobId("job").setProjectId("project").setWorkerId("worker").build()));
sawHeader = true;
} else {
LOG.info("Received {} heartbeats", chunk.getStateRequestCount());
errorCollector.checkThat(chunk.getSerializedSize(), Matchers.lessThanOrEqualTo(STREAM_CHUNK_SIZE));
errorCollector.checkThat(chunk.getRequestIdCount(), Matchers.is(0));
synchronized (heartbeats) {
for (ComputationGetDataRequest request : chunk.getStateRequestList()) {
errorCollector.checkThat(request.getRequestsCount(), Matchers.is(1));
heartbeats.putIfAbsent(request.getComputationId(), new ArrayList<>());
heartbeats.get(request.getComputationId()).add(request.getRequestsList().get(0));
}
}
}
} catch (Exception e) {
errorCollector.addError(e);
}
}
@Override
public void onError(Throwable throwable) {
}
@Override
public void onCompleted() {
responseObserver.onCompleted();
}
};
}
});
Map<String, List<KeyedGetDataRequest>> activeMap = new HashMap<>();
List<String> computation1Keys = new ArrayList<>();
List<String> computation2Keys = new ArrayList<>();
for (int i = 0; i < 100; ++i) {
computation1Keys.add("Computation1Key" + i);
computation2Keys.add("Computation2Key" + largeString(i * 20));
}
activeMap.put("Computation1", makeHeartbeatRequest(computation1Keys));
activeMap.put("Computation2", makeHeartbeatRequest(computation2Keys));
GetDataStream stream = client.getDataStream();
stream.refreshActiveWork(activeMap);
stream.close();
assertTrue(stream.awaitTermination(60, TimeUnit.SECONDS));
while (true) {
Thread.sleep(100);
synchronized (heartbeats) {
if (heartbeats.size() != activeMap.size()) {
continue;
}
assertEquals(heartbeats, activeMap);
break;
}
}
}
use of org.apache.beam.runners.dataflow.worker.windmill.WindmillServerStub.GetDataStream in project beam by apache.
the class GrpcWindmillServerTest method testStreamingGetData.
@Test
@SuppressWarnings("FutureReturnValueIgnored")
public void testStreamingGetData() throws Exception {
// This server responds to GetDataRequests with responses that mirror the requests.
serviceRegistry.addService(new CloudWindmillServiceV1Alpha1ImplBase() {
@Override
public StreamObserver<StreamingGetDataRequest> getDataStream(StreamObserver<StreamingGetDataResponse> responseObserver) {
return new StreamObserver<StreamingGetDataRequest>() {
boolean sawHeader = false;
HashSet<Long> seenIds = new HashSet<>();
ResponseErrorInjector injector = new ResponseErrorInjector(responseObserver);
StreamingGetDataResponse.Builder responseBuilder = StreamingGetDataResponse.newBuilder();
@Override
public void onNext(StreamingGetDataRequest chunk) {
maybeInjectError(responseObserver);
try {
if (!sawHeader) {
LOG.info("Received header");
errorCollector.checkThat(chunk.getHeader(), Matchers.equalTo(JobHeader.newBuilder().setJobId("job").setProjectId("project").setWorkerId("worker").build()));
sawHeader = true;
} else {
LOG.info("Received get data of {} global data, {} data requests", chunk.getGlobalDataRequestCount(), chunk.getStateRequestCount());
errorCollector.checkThat(chunk.getSerializedSize(), Matchers.lessThanOrEqualTo(STREAM_CHUNK_SIZE));
int i = 0;
for (GlobalDataRequest request : chunk.getGlobalDataRequestList()) {
long requestId = chunk.getRequestId(i++);
errorCollector.checkThat(seenIds.add(requestId), Matchers.is(true));
sendResponse(requestId, processGlobalDataRequest(request));
}
for (ComputationGetDataRequest request : chunk.getStateRequestList()) {
long requestId = chunk.getRequestId(i++);
errorCollector.checkThat(seenIds.add(requestId), Matchers.is(true));
sendResponse(requestId, processStateRequest(request));
}
flushResponse();
}
} catch (Exception e) {
errorCollector.addError(e);
}
}
@Override
public void onError(Throwable throwable) {
}
@Override
public void onCompleted() {
injector.cancel();
responseObserver.onCompleted();
}
private ByteString processGlobalDataRequest(GlobalDataRequest request) {
errorCollector.checkThat(request.getStateFamily(), Matchers.is("family"));
return GlobalData.newBuilder().setDataId(request.getDataId()).setStateFamily("family").setData(ByteString.copyFromUtf8(request.getDataId().getTag())).build().toByteString();
}
private ByteString processStateRequest(ComputationGetDataRequest compRequest) {
errorCollector.checkThat(compRequest.getRequestsCount(), Matchers.is(1));
errorCollector.checkThat(compRequest.getComputationId(), Matchers.is("computation"));
KeyedGetDataRequest request = compRequest.getRequests(0);
KeyedGetDataResponse response = makeGetDataResponse(request.getValuesToFetch(0).getTag().toStringUtf8());
return response.toByteString();
}
private void sendResponse(long id, ByteString serializedResponse) {
if (ThreadLocalRandom.current().nextInt(4) == 0) {
sendChunkedResponse(id, serializedResponse);
} else {
responseBuilder.addRequestId(id).addSerializedResponse(serializedResponse);
if (responseBuilder.getRequestIdCount() > 10) {
flushResponse();
}
}
}
private void sendChunkedResponse(long id, ByteString serializedResponse) {
LOG.info("Sending response with {} chunks", (serializedResponse.size() / 10) + 1);
for (int i = 0; i < serializedResponse.size(); i += 10) {
int end = Math.min(serializedResponse.size(), i + 10);
try {
responseObserver.onNext(StreamingGetDataResponse.newBuilder().addRequestId(id).addSerializedResponse(serializedResponse.substring(i, end)).setRemainingBytesForResponse(serializedResponse.size() - end).build());
} catch (IllegalStateException e) {
// Stream is already closed.
}
}
}
private void flushResponse() {
if (responseBuilder.getRequestIdCount() > 0) {
LOG.info("Sending batched response of {} ids", responseBuilder.getRequestIdCount());
try {
responseObserver.onNext(responseBuilder.build());
} catch (IllegalStateException e) {
// Stream is already closed.
}
responseBuilder.clear();
}
}
};
}
});
GetDataStream stream = client.getDataStream();
// Make requests of varying sizes to test chunking, and verify the responses.
ExecutorService executor = Executors.newFixedThreadPool(50);
final CountDownLatch done = new CountDownLatch(200);
for (int i = 0; i < 100; ++i) {
final String key = "key" + i;
final String s = i % 5 == 0 ? largeString(i) : "tag";
executor.submit(() -> {
errorCollector.checkThat(stream.requestKeyedData("computation", makeGetDataRequest(key, s)), Matchers.equalTo(makeGetDataResponse(s)));
done.countDown();
});
executor.execute(() -> {
errorCollector.checkThat(stream.requestGlobalData(makeGlobalDataRequest(key)), Matchers.equalTo(makeGlobalDataResponse(key)));
done.countDown();
});
}
done.await();
stream.close();
assertTrue(stream.awaitTermination(60, TimeUnit.SECONDS));
executor.shutdown();
}
Aggregations