use of org.checkerframework.checker.nullness.qual.Nullable in project beam by apache.
the class POJOUtils method createGetter.
/**
* Generate the following {@link FieldValueSetter} class for the {@link Field}.
*
* <pre><code>
* class Getter implements {@literal FieldValueGetter<POJO, FieldType>} {
* {@literal @}Override public String name() { return field.getName(); }
* {@literal @}Override public Class type() { return field.getType(); }
* {@literal @}Override public FieldType get(POJO pojo) {
* return convert(pojo.field);
* }
* }
* </code></pre>
*/
@SuppressWarnings("unchecked")
@Nullable
static <ObjectT, ValueT> FieldValueGetter<ObjectT, ValueT> createGetter(FieldValueTypeInformation typeInformation, TypeConversionsFactory typeConversionsFactory) {
Field field = typeInformation.getField();
DynamicType.Builder<FieldValueGetter> builder = ByteBuddyUtils.subclassGetterInterface(BYTE_BUDDY, field.getDeclaringClass(), typeConversionsFactory.createTypeConversion(false).convert(TypeDescriptor.of(field.getType())));
builder = implementGetterMethods(builder, field, typeInformation.getName(), typeConversionsFactory);
try {
return builder.visit(new AsmVisitorWrapper.ForDeclaredMethods().writerFlags(ClassWriter.COMPUTE_FRAMES)).make().load(ReflectHelpers.findClassLoader(field.getDeclaringClass().getClassLoader()), ClassLoadingStrategy.Default.INJECTION).getLoaded().getDeclaredConstructor().newInstance();
} catch (InstantiationException | IllegalAccessException | NoSuchMethodException | InvocationTargetException e) {
throw new RuntimeException("Unable to generate a getter for field '" + field + "'.", e);
}
}
use of org.checkerframework.checker.nullness.qual.Nullable in project guava by google.
the class LocalCache method loadAll.
/**
* Returns the result of calling {@link CacheLoader#loadAll}, or null if {@code loader} doesn't
* implement {@code loadAll}.
*/
@Nullable
Map<K, V> loadAll(Set<? extends K> keys, CacheLoader<? super K, V> loader) throws ExecutionException {
checkNotNull(loader);
checkNotNull(keys);
Stopwatch stopwatch = Stopwatch.createStarted();
Map<K, V> result;
boolean success = false;
try {
// safe since all keys extend K
@SuppressWarnings("unchecked") Map<K, V> map = (Map<K, V>) loader.loadAll(keys);
result = map;
success = true;
} catch (UnsupportedLoadingOperationException e) {
success = true;
throw e;
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new ExecutionException(e);
} catch (RuntimeException e) {
throw new UncheckedExecutionException(e);
} catch (Exception e) {
throw new ExecutionException(e);
} catch (Error e) {
throw new ExecutionError(e);
} finally {
if (!success) {
globalStatsCounter.recordLoadException(stopwatch.elapsed(NANOSECONDS));
}
}
if (result == null) {
globalStatsCounter.recordLoadException(stopwatch.elapsed(NANOSECONDS));
throw new InvalidCacheLoadException(loader + " returned null map from loadAll");
}
stopwatch.stop();
// TODO(fry): batch by segment
boolean nullsPresent = false;
for (Entry<K, V> entry : result.entrySet()) {
K key = entry.getKey();
V value = entry.getValue();
if (key == null || value == null) {
// delay failure until non-null entries are stored
nullsPresent = true;
} else {
put(key, value);
}
}
if (nullsPresent) {
globalStatsCounter.recordLoadException(stopwatch.elapsed(NANOSECONDS));
throw new InvalidCacheLoadException(loader + " returned null keys or values from loadAll");
}
// TODO(fry): record count of loaded entries
globalStatsCounter.recordLoadSuccess(stopwatch.elapsed(NANOSECONDS));
return result;
}
use of org.checkerframework.checker.nullness.qual.Nullable in project beam by apache.
the class DataflowRunnerHarness method main.
/**
* Fetches and processes work units from the Dataflow service.
*/
public static void main(String[] unusedArgs) throws Exception {
RunnerApi.@Nullable Pipeline pipeline = DataflowWorkerHarnessHelper.getPipelineFromEnv();
// This descriptor is used for all services except logging. They are isolated to keep
// critical traffic protected from best effort traffic.
ApiServiceDescriptor controlApiService = DataflowWorkerHarnessHelper.getControlDescriptor();
ApiServiceDescriptor loggingApiService = DataflowWorkerHarnessHelper.getLoggingDescriptor();
ApiServiceDescriptor statusApiService = DataflowWorkerHarnessHelper.getStatusDescriptor();
LOG.info("{} started, using port {} for control, {} for logging.", DataflowRunnerHarness.class, controlApiService, loggingApiService);
DataflowWorkerHarnessHelper.initializeLogging(DataflowRunnerHarness.class);
DataflowWorkerHarnessOptions pipelineOptions = DataflowWorkerHarnessHelper.initializeGlobalStateAndPipelineOptions(DataflowRunnerHarness.class);
DataflowWorkerHarnessHelper.configureLogging(pipelineOptions);
// Initialized registered file systems.˜
FileSystems.setDefaultPipelineOptions(pipelineOptions);
DataflowPipelineDebugOptions dataflowOptions = pipelineOptions.as(DataflowPipelineDebugOptions.class);
ServerFactory serverFactory;
if (DataflowRunner.hasExperiment(dataflowOptions, "beam_fn_api_epoll_domain_socket")) {
serverFactory = ServerFactory.createEpollDomainSocket();
} else if (DataflowRunner.hasExperiment(dataflowOptions, "beam_fn_api_epoll")) {
serverFactory = ServerFactory.createEpollSocket();
} else {
serverFactory = ServerFactory.createDefault();
}
ServerStreamObserverFactory streamObserverFactory = ServerStreamObserverFactory.fromOptions(pipelineOptions);
Server servicesServer = null;
Server loggingServer = null;
Server statusServer = null;
try (BeamFnLoggingService beamFnLoggingService = new BeamFnLoggingService(loggingApiService, DataflowWorkerLoggingInitializer.getSdkLoggingHandler()::publish, streamObserverFactory::from, GrpcContextHeaderAccessorProvider.getHeaderAccessor());
BeamFnControlService beamFnControlService = new BeamFnControlService(controlApiService, streamObserverFactory::from, GrpcContextHeaderAccessorProvider.getHeaderAccessor());
BeamFnDataGrpcService beamFnDataService = new BeamFnDataGrpcService(pipelineOptions, controlApiService, streamObserverFactory::from, GrpcContextHeaderAccessorProvider.getHeaderAccessor());
BeamWorkerStatusGrpcService beamWorkerStatusGrpcService = statusApiService == null ? null : BeamWorkerStatusGrpcService.create(statusApiService, GrpcContextHeaderAccessorProvider.getHeaderAccessor());
GrpcStateService beamFnStateService = GrpcStateService.create()) {
servicesServer = serverFactory.create(ImmutableList.of(beamFnControlService, beamFnDataService, beamFnStateService), controlApiService);
loggingServer = serverFactory.create(ImmutableList.of(beamFnLoggingService), loggingApiService);
// gRPC server for obtaining SDK harness runtime status information.
if (beamWorkerStatusGrpcService != null) {
statusServer = serverFactory.create(ImmutableList.of(beamWorkerStatusGrpcService), statusApiService);
}
start(pipeline, pipelineOptions, beamFnControlService, beamFnDataService, controlApiService, beamFnStateService, beamWorkerStatusGrpcService);
if (statusServer != null) {
statusServer.shutdown();
}
servicesServer.shutdown();
loggingServer.shutdown();
// wait 30 secs for outstanding requests to finish.
if (statusServer != null) {
statusServer.awaitTermination(30, TimeUnit.SECONDS);
}
servicesServer.awaitTermination(30, TimeUnit.SECONDS);
loggingServer.awaitTermination(30, TimeUnit.SECONDS);
} finally {
if (statusServer != null && !statusServer.isTerminated()) {
statusServer.shutdownNow();
}
if (servicesServer != null && !servicesServer.isTerminated()) {
servicesServer.shutdownNow();
}
if (loggingServer != null && !loggingServer.isTerminated()) {
loggingServer.shutdownNow();
}
}
}
use of org.checkerframework.checker.nullness.qual.Nullable in project beam by apache.
the class GrpcWindmillServerTest method testStreamingGetWork.
@Test
public void testStreamingGetWork() throws Exception {
// This fake server returns an infinite stream of identical WorkItems, obeying the request size
// limits set by the client.
serviceRegistry.addService(new CloudWindmillServiceV1Alpha1ImplBase() {
@Override
public StreamObserver<StreamingGetWorkRequest> getWorkStream(StreamObserver<StreamingGetWorkResponseChunk> responseObserver) {
return new StreamObserver<StreamingGetWorkRequest>() {
boolean sawHeader = false;
ResponseErrorInjector injector = new ResponseErrorInjector(responseObserver);
@Override
public void onNext(StreamingGetWorkRequest request) {
maybeInjectError(responseObserver);
try {
long maxItems;
if (!sawHeader) {
errorCollector.checkThat(request.getRequest(), Matchers.equalTo(GetWorkRequest.newBuilder().setClientId(10).setJobId("job").setProjectId("project").setWorkerId("worker").setMaxItems(3).setMaxBytes(10000).build()));
sawHeader = true;
maxItems = request.getRequest().getMaxItems();
} else {
maxItems = request.getRequestExtension().getMaxItems();
}
for (int item = 0; item < maxItems; item++) {
long id = ThreadLocalRandom.current().nextLong();
ByteString serializedResponse = WorkItem.newBuilder().setKey(ByteString.copyFromUtf8("somewhat_long_key")).setWorkToken(id).setShardingKey(id).build().toByteString();
// Break the WorkItem into smaller chunks to test chunking code.
for (int i = 0; i < serializedResponse.size(); i += 10) {
int end = Math.min(serializedResponse.size(), i + 10);
StreamingGetWorkResponseChunk.Builder builder = StreamingGetWorkResponseChunk.newBuilder().setStreamId(id).setSerializedWorkItem(serializedResponse.substring(i, end)).setRemainingBytesForWorkItem(serializedResponse.size() - end);
if (i == 0) {
builder.setComputationMetadata(ComputationWorkItemMetadata.newBuilder().setComputationId("comp").setDependentRealtimeInputWatermark(17000).setInputDataWatermark(18000));
}
try {
responseObserver.onNext(builder.build());
} catch (IllegalStateException e) {
// Client closed stream, we're done.
return;
}
}
}
} catch (Exception e) {
errorCollector.addError(e);
}
}
@Override
public void onError(Throwable throwable) {
}
@Override
public void onCompleted() {
injector.cancel();
responseObserver.onCompleted();
}
};
}
});
// Read the stream of WorkItems until 100 of them are received.
CountDownLatch latch = new CountDownLatch(100);
GetWorkStream stream = client.getWorkStream(GetWorkRequest.newBuilder().setClientId(10).setMaxItems(3).setMaxBytes(10000).build(), (String computation, @Nullable Instant inputDataWatermark, Instant synchronizedProcessingTime, Windmill.WorkItem workItem) -> {
latch.countDown();
assertEquals(inputDataWatermark, new Instant(18));
assertEquals(synchronizedProcessingTime, new Instant(17));
assertEquals(workItem.getKey(), ByteString.copyFromUtf8("somewhat_long_key"));
});
assertTrue(latch.await(30, TimeUnit.SECONDS));
stream.close();
assertTrue(stream.awaitTermination(30, TimeUnit.SECONDS));
}
use of org.checkerframework.checker.nullness.qual.Nullable in project beam by apache.
the class DataflowPipelineJob method waitUntilFinish.
/**
* Waits until the pipeline finishes and returns the final status.
*
* @param duration The time to wait for the job to finish. Provide a value less than 1 ms for an
* infinite wait.
* @param messageHandler If non null this handler will be invoked for each batch of messages
* received.
* @param sleeper A sleeper to use to sleep between attempts.
* @param nanoClock A nanoClock used to time the total time taken.
* @return The final state of the job or null on timeout.
* @throws IOException If there is a persistent problem getting job information.
* @throws InterruptedException if the thread is interrupted.
*/
@VisibleForTesting
@Nullable
State waitUntilFinish(Duration duration, MonitoringUtil.@Nullable JobMessagesHandler messageHandler, Sleeper sleeper, NanoClock nanoClock, MonitoringUtil monitor) throws IOException, InterruptedException {
BackOff backoff = getMessagesBackoff(duration);
// This function tracks the cumulative time from the *first request* to enforce the wall-clock
// limit. Any backoff instance could, at best, track the the time since the first attempt at a
// given request. Thus, we need to track the cumulative time ourselves.
long startNanos = nanoClock.nanoTime();
State state = State.UNKNOWN;
Exception exception;
do {
exception = null;
try {
// Get the state of the job before listing messages. This ensures we always fetch job
// messages after the job finishes to ensure we have all them.
state = getStateWithRetries(BackOffAdapter.toGcpBackOff(STATUS_BACKOFF_FACTORY.withMaxRetries(0).backoff()), sleeper);
} catch (IOException e) {
exception = e;
LOG.warn("Failed to get job state: {}", e.getMessage());
LOG.debug("Failed to get job state.", e);
continue;
}
exception = processJobMessages(messageHandler, monitor);
if (exception != null) {
continue;
}
// We can stop if the job is done.
if (state.isTerminal()) {
logTerminalState(state);
return state;
}
// Reset attempts count and update cumulative wait time.
backoff = resetBackoff(duration, nanoClock, startNanos);
} while (BackOffUtils.next(sleeper, backoff));
if (exception == null) {
LOG.warn("No terminal state was returned within allotted timeout. State value {}", state);
} else {
LOG.error("Failed to fetch job metadata.", exception);
}
return null;
}
Aggregations