use of org.apache.beam.sdk.util.Sleeper in project DataflowJavaSDK-examples by GoogleCloudPlatform.
the class ExampleUtils method setup.
/**
* Sets up external resources that are required by the example,
* such as Pub/Sub topics and BigQuery tables.
*
* @throws IOException if there is a problem setting up the resources
*/
public void setup() throws IOException {
Sleeper sleeper = Sleeper.DEFAULT;
BackOff backOff = FluentBackoff.DEFAULT.withMaxRetries(3).withInitialBackoff(Duration.millis(200)).backoff();
Throwable lastException = null;
try {
do {
try {
setupPubsub();
setupBigQueryTable();
return;
} catch (GoogleJsonResponseException e) {
lastException = e;
}
} while (BackOffUtils.next(sleeper, backOff));
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
// Ignore InterruptedException
}
throw new RuntimeException(lastException);
}
use of org.apache.beam.sdk.util.Sleeper in project beam by apache.
the class SimplifiedKinesisClient method describeStreamSummary.
private StreamDescriptionSummary describeStreamSummary(final String streamName) throws IOException, InterruptedException {
// DescribeStreamSummary has limits that can be hit fairly easily if we are attempting
// to configure multiple KinesisIO inputs in the same account. Retry up to
// DESCRIBE_STREAM_SUMMARY_MAX_ATTEMPTS times if we end up hitting that limit.
//
// Only pass the wrapped exception up once that limit is reached. Use FluentBackoff
// to implement the retry policy.
FluentBackoff retryBackoff = FluentBackoff.DEFAULT.withMaxRetries(DESCRIBE_STREAM_SUMMARY_MAX_ATTEMPTS).withInitialBackoff(DESCRIBE_STREAM_SUMMARY_INITIAL_BACKOFF);
BackOff backoff = retryBackoff.backoff();
Sleeper sleeper = Sleeper.DEFAULT;
DescribeStreamSummaryRequest request = new DescribeStreamSummaryRequest();
request.setStreamName(streamName);
while (true) {
try {
return kinesis.describeStreamSummary(request).getStreamDescriptionSummary();
} catch (LimitExceededException exc) {
if (!BackOffUtils.next(sleeper, backoff)) {
throw exc;
}
}
}
}
use of org.apache.beam.sdk.util.Sleeper in project beam by apache.
the class RateLimitPolicyFactoryTest method defaultRateLimiterShouldUseBackoffs.
@Test
public void defaultRateLimiterShouldUseBackoffs() throws Exception {
assertThat(withDefaultRateLimiter().getRateLimitPolicy()).isInstanceOf(DefaultRateLimiter.class);
assertThat(withDefaultRateLimiter(millis(1), millis(1), millis(1)).getRateLimitPolicy()).isInstanceOf(DefaultRateLimiter.class);
Sleeper sleeper = mock(Sleeper.class);
BackOff emptySuccess = mock(BackOff.class);
BackOff throttled = mock(BackOff.class);
RateLimitPolicy policy = new DefaultRateLimiter(emptySuccess, throttled, sleeper);
// reset emptySuccess after receiving at least 1 record, throttled is reset on any success
policy.onSuccess(ImmutableList.of(mock(KinesisRecord.class)));
verify(emptySuccess).reset();
verify(throttled).reset();
verifyNoInteractions(sleeper);
clearInvocations(emptySuccess, throttled);
when(emptySuccess.nextBackOffMillis()).thenReturn(88L, 99L);
// throttle if no records received, throttled is reset again
policy.onSuccess(ImmutableList.of());
policy.onSuccess(ImmutableList.of());
verify(emptySuccess, times(2)).nextBackOffMillis();
verify(throttled, times(2)).reset();
verify(sleeper).sleep(88L);
verify(sleeper).sleep(99L);
verifyNoMoreInteractions(sleeper, throttled, emptySuccess);
clearInvocations(emptySuccess, throttled, sleeper);
when(throttled.nextBackOffMillis()).thenReturn(111L, 222L);
// throttle onThrottle
policy.onThrottle(mock(KinesisClientThrottledException.class));
policy.onThrottle(mock(KinesisClientThrottledException.class));
verify(throttled, times(2)).nextBackOffMillis();
verify(sleeper).sleep(111L);
verify(sleeper).sleep(222L);
verifyNoMoreInteractions(sleeper, throttled, emptySuccess);
}
use of org.apache.beam.sdk.util.Sleeper in project beam by apache.
the class BaseFirestoreV1WriteFnTest method endToEnd_deadlineExceededOnAnIndividualWriteResultsInThrottling.
@Test
public final void endToEnd_deadlineExceededOnAnIndividualWriteResultsInThrottling() throws Exception {
final long totalDocCount = 1_000_000;
final int numWorkers = 100;
final long docCount = totalDocCount / numWorkers;
LOG.info("docCount = {}", docCount);
RpcQosOptions options = rpcQosOptions.toBuilder().withHintMaxNumWorkers(numWorkers).withSamplePeriod(Duration.standardMinutes(10)).withReportDiagnosticMetrics().build();
LOG.debug("options = {}", options);
FirestoreStatefulComponentFactory ff = mock(FirestoreStatefulComponentFactory.class);
when(ff.getFirestoreStub(any())).thenReturn(stub);
Random random = new Random(12345);
TestClock clock = new TestClock(Instant.EPOCH, Duration.standardSeconds(1));
Sleeper sleeper = millis -> clock.setNext(advanceClockBy(Duration.millis(millis)));
RpcQosImpl qos = new RpcQosImpl(options, random, sleeper, metricsFixture.counterFactory, metricsFixture.distributionFactory);
RpcQos qosSpy = mock(RpcQos.class, invocation -> {
Method method = invocation.getMethod();
LOG.debug("method = {}", method);
Method actualMethod = qos.getClass().getMethod(method.getName(), method.getParameterTypes());
return actualMethod.invoke(qos, invocation.getArguments());
});
when(ff.getRpcQos(options)).thenReturn(qosSpy);
int defaultDocumentWriteLatency = 30;
final AtomicLong writeCounter = new AtomicLong();
when(processContext.element()).thenAnswer(invocation -> newWrite(writeCounter.getAndIncrement()));
when(callable.call(any())).thenAnswer(new Answer<BatchWriteResponse>() {
private final Random rand = new Random(84572908);
private final Instant threshold = Instant.ofEpochMilli(Duration.standardMinutes(20).getMillis());
@Override
public BatchWriteResponse answer(InvocationOnMock invocation) throws Throwable {
BatchWriteRequest request = invocation.getArgument(0, BatchWriteRequest.class);
LOG.debug("request = {}", request);
long requestDurationMs = 0;
BatchWriteResponse.Builder builder = BatchWriteResponse.newBuilder();
for (Write ignored : request.getWritesList()) {
builder.addWriteResults(WriteResult.newBuilder().build());
if (clock.prev.isBefore(threshold)) {
requestDurationMs += defaultDocumentWriteLatency;
builder.addStatus(STATUS_OK);
} else {
int latency = rand.nextInt(1500);
LOG.debug("latency = {}", latency);
if (latency > 300) {
builder.addStatus(STATUS_DEADLINE_EXCEEDED);
} else {
builder.addStatus(STATUS_OK);
}
requestDurationMs += latency;
}
}
clock.setNext(advanceClockBy(Duration.millis(requestDurationMs)));
return builder.build();
}
});
LOG.info("### parameters: {defaultDocumentWriteLatency: {}, rpcQosOptions: {}}", defaultDocumentWriteLatency, options);
FnT fn = getFn(clock, ff, options, metricsFixture.counterFactory, metricsFixture.distributionFactory);
fn.setup();
fn.startBundle(startBundleContext);
while (writeCounter.get() < docCount) {
fn.processElement(processContext, window);
}
fn.finishBundle(finishBundleContext);
LOG.info("writeCounter = {}", writeCounter.get());
LOG.info("clock.prev = {}", clock.prev);
MyDistribution qosAdaptiveThrottlerThrottlingMs = metricsFixture.distributions.get("qos_adaptiveThrottler_throttlingMs");
assertNotNull(qosAdaptiveThrottlerThrottlingMs);
List<Long> updateInvocations = qosAdaptiveThrottlerThrottlingMs.updateInvocations;
assertFalse(updateInvocations.isEmpty());
}
use of org.apache.beam.sdk.util.Sleeper in project beam by apache.
the class SpannerIOWriteTest method retryOnAbortedAndDeadlineExceeded.
@Test
public void retryOnAbortedAndDeadlineExceeded() throws InterruptedException {
List<Mutation> mutationList = Arrays.asList(m((long) 1));
String errString = "Transaction aborted. " + "Database schema probably changed during transaction, retry may succeed.";
// mock sleeper so that it does not actually sleep.
WriteToSpannerFn.sleeper = Mockito.mock(Sleeper.class);
// Respond with (1) Aborted transaction a couple of times (2) deadline exceeded
// (3) Aborted transaction 3 times (4) deadline exceeded and finally return success.
when(serviceFactory.mockDatabaseClient().writeAtLeastOnceWithOptions(any(), any(ReadQueryUpdateTransactionOption.class))).thenThrow(SpannerExceptionFactory.newSpannerException(ErrorCode.ABORTED, errString)).thenThrow(SpannerExceptionFactory.newSpannerException(ErrorCode.ABORTED, errString)).thenThrow(SpannerExceptionFactory.newSpannerException(ErrorCode.DEADLINE_EXCEEDED, "simulated Timeout 1")).thenThrow(SpannerExceptionFactory.newSpannerException(ErrorCode.ABORTED, errString)).thenThrow(SpannerExceptionFactory.newSpannerException(ErrorCode.ABORTED, errString)).thenThrow(SpannerExceptionFactory.newSpannerException(ErrorCode.ABORTED, errString)).thenThrow(SpannerExceptionFactory.newSpannerException(ErrorCode.DEADLINE_EXCEEDED, "simulated Timeout 2")).thenReturn(new CommitResponse(Timestamp.now()));
SpannerWriteResult result = pipeline.apply(Create.of(mutationList)).apply(SpannerIO.write().withProjectId("test-project").withInstanceId("test-instance").withDatabaseId("test-database").withServiceFactory(serviceFactory).withBatchSizeBytes(0).withFailureMode(FailureMode.FAIL_FAST));
// Zero error
PAssert.that(result.getFailedMutations()).satisfies(m -> {
assertEquals(0, Iterables.size(m));
return null;
});
pipeline.run().waitUntilFinish();
// 2 calls to sleeper
verify(WriteToSpannerFn.sleeper, times(2)).sleep(anyLong());
// 8 write attempts for the single mutationGroup.
verify(serviceFactory.mockDatabaseClient(), times(8)).writeAtLeastOnceWithOptions(any(), any(ReadQueryUpdateTransactionOption.class));
}
Aggregations