use of org.apache.beam.sdk.util.Sleeper in project beam by apache.
the class ExampleUtils method setup.
/**
* Sets up external resources that are required by the example, such as Pub/Sub topics and
* BigQuery tables.
*
* @throws IOException if there is a problem setting up the resources
*/
public void setup() throws IOException {
Sleeper sleeper = Sleeper.DEFAULT;
BackOff backOff = FluentBackoff.DEFAULT.withMaxRetries(3).withInitialBackoff(Duration.millis(200)).backoff();
Throwable lastException = null;
try {
do {
try {
setupPubsub();
setupBigQueryTable();
return;
} catch (GoogleJsonResponseException e) {
lastException = e;
}
} while (BackOffUtils.next(sleeper, backOff));
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
// Ignore InterruptedException
}
throw new RuntimeException(lastException);
}
use of org.apache.beam.sdk.util.Sleeper in project beam by apache.
the class SpannerIOWriteTest method retryOnSchemaChangeException.
@Test
public void retryOnSchemaChangeException() throws InterruptedException {
List<Mutation> mutationList = Arrays.asList(m((long) 1));
String errString = "Transaction aborted. " + "Database schema probably changed during transaction, retry may succeed.";
// mock sleeper so that it does not actually sleep.
WriteToSpannerFn.sleeper = Mockito.mock(Sleeper.class);
// respond with 2 timeouts and a success.
when(serviceFactory.mockDatabaseClient().writeAtLeastOnceWithOptions(any(), any(ReadQueryUpdateTransactionOption.class))).thenThrow(SpannerExceptionFactory.newSpannerException(ErrorCode.ABORTED, errString)).thenThrow(SpannerExceptionFactory.newSpannerException(ErrorCode.ABORTED, errString)).thenReturn(new CommitResponse(Timestamp.now()));
SpannerWriteResult result = pipeline.apply(Create.of(mutationList)).apply(SpannerIO.write().withProjectId("test-project").withInstanceId("test-instance").withDatabaseId("test-database").withServiceFactory(serviceFactory).withBatchSizeBytes(0).withFailureMode(FailureMode.FAIL_FAST));
// all success, so veryify no errors
PAssert.that(result.getFailedMutations()).satisfies(m -> {
assertEquals(0, Iterables.size(m));
return null;
});
pipeline.run().waitUntilFinish();
// 0 calls to sleeper
verify(WriteToSpannerFn.sleeper, times(0)).sleep(anyLong());
// 3 write attempts for the single mutationGroup.
verify(serviceFactory.mockDatabaseClient(), times(3)).writeAtLeastOnceWithOptions(any(), any(ReadQueryUpdateTransactionOption.class));
}
use of org.apache.beam.sdk.util.Sleeper in project beam by apache.
the class SpannerIOWriteTest method deadlineExceededRetries.
@Test
public void deadlineExceededRetries() throws InterruptedException {
List<Mutation> mutationList = Arrays.asList(m((long) 1));
// mock sleeper so that it does not actually sleep.
WriteToSpannerFn.sleeper = Mockito.mock(Sleeper.class);
// respond with 2 timeouts and a success.
when(serviceFactory.mockDatabaseClient().writeAtLeastOnceWithOptions(any(), any(ReadQueryUpdateTransactionOption.class))).thenThrow(SpannerExceptionFactory.newSpannerException(ErrorCode.DEADLINE_EXCEEDED, "simulated Timeout 1")).thenThrow(SpannerExceptionFactory.newSpannerException(ErrorCode.DEADLINE_EXCEEDED, "simulated Timeout 2")).thenReturn(new CommitResponse(Timestamp.now()));
SpannerWriteResult result = pipeline.apply(Create.of(mutationList)).apply(SpannerIO.write().withProjectId("test-project").withInstanceId("test-instance").withDatabaseId("test-database").withServiceFactory(serviceFactory).withBatchSizeBytes(0).withFailureMode(SpannerIO.FailureMode.REPORT_FAILURES));
// all success, so veryify no errors
PAssert.that(result.getFailedMutations()).satisfies(m -> {
assertEquals(0, Iterables.size(m));
return null;
});
pipeline.run().waitUntilFinish();
// 2 calls to sleeper
verify(WriteToSpannerFn.sleeper, times(2)).sleep(anyLong());
// 3 write attempts for the single mutationGroup.
verify(serviceFactory.mockDatabaseClient(), times(3)).writeAtLeastOnceWithOptions(any(), any(ReadQueryUpdateTransactionOption.class));
}
use of org.apache.beam.sdk.util.Sleeper in project beam by apache.
the class RateLimitPolicyFactoryTest method defaultRateLimiterShouldUseBackoffs.
@Test
public void defaultRateLimiterShouldUseBackoffs() throws Exception {
assertThat(withDefaultRateLimiter().getRateLimitPolicy()).isInstanceOf(DefaultRateLimiter.class);
assertThat(withDefaultRateLimiter(millis(1), millis(1), millis(1)).getRateLimitPolicy()).isInstanceOf(DefaultRateLimiter.class);
Sleeper sleeper = mock(Sleeper.class);
BackOff emptySuccess = mock(BackOff.class);
BackOff throttled = mock(BackOff.class);
RateLimitPolicy policy = new DefaultRateLimiter(emptySuccess, throttled, sleeper);
// reset emptySuccess after receiving at least 1 record, throttled is reset on any success
policy.onSuccess(ImmutableList.of(mock(KinesisRecord.class)));
verify(emptySuccess).reset();
verify(throttled).reset();
verifyNoInteractions(sleeper);
clearInvocations(emptySuccess, throttled);
when(emptySuccess.nextBackOffMillis()).thenReturn(88L, 99L);
// throttle if no records received, throttled is reset again
policy.onSuccess(ImmutableList.of());
policy.onSuccess(ImmutableList.of());
verify(emptySuccess, times(2)).nextBackOffMillis();
verify(throttled, times(2)).reset();
verify(sleeper).sleep(88L);
verify(sleeper).sleep(99L);
verifyNoMoreInteractions(sleeper, throttled, emptySuccess);
clearInvocations(emptySuccess, throttled, sleeper);
when(throttled.nextBackOffMillis()).thenReturn(111L, 222L);
// throttle onThrottle
policy.onThrottle(mock(KinesisClientThrottledException.class));
policy.onThrottle(mock(KinesisClientThrottledException.class));
verify(throttled, times(2)).nextBackOffMillis();
verify(sleeper).sleep(111L);
verify(sleeper).sleep(222L);
verifyNoMoreInteractions(sleeper, throttled, emptySuccess);
}
use of org.apache.beam.sdk.util.Sleeper in project beam by apache.
the class SimplifiedKinesisClient method describeStreamSummary.
private StreamDescriptionSummary describeStreamSummary(final String streamName) throws IOException, InterruptedException {
// DescribeStreamSummary has limits that can be hit fairly easily if we are attempting
// to configure multiple KinesisIO inputs in the same account. Retry up to
// DESCRIBE_STREAM_SUMMARY_MAX_ATTEMPTS times if we end up hitting that limit.
//
// Only pass the wrapped exception up once that limit is reached. Use FluentBackoff
// to implement the retry policy.
FluentBackoff retryBackoff = FluentBackoff.DEFAULT.withMaxRetries(DESCRIBE_STREAM_SUMMARY_MAX_ATTEMPTS).withInitialBackoff(DESCRIBE_STREAM_SUMMARY_INITIAL_BACKOFF);
BackOff backoff = retryBackoff.backoff();
Sleeper sleeper = Sleeper.DEFAULT;
DescribeStreamSummaryRequest request = DescribeStreamSummaryRequest.builder().streamName(streamName).build();
while (true) {
try {
return kinesis.describeStreamSummary(request).streamDescriptionSummary();
} catch (LimitExceededException exc) {
if (!BackOffUtils.next(sleeper, backoff)) {
throw exc;
}
}
}
}
Aggregations