use of org.apache.beam.sdk.io.gcp.firestore.RpcQos.RpcWriteAttempt.Element in project beam by apache.
the class BaseFirestoreV1WriteFnTest method endToEnd_awaitSafeToProceed_falseIsTerminalForAttempt.
@Test
public final void endToEnd_awaitSafeToProceed_falseIsTerminalForAttempt() throws Exception {
RpcQosOptions options = rpcQosOptions.toBuilder().withBatchMaxCount(2).build();
Instant rpc1Start = Instant.ofEpochMilli(3);
Instant rpc1End = Instant.ofEpochMilli(4);
ArgumentCaptor<BatchWriteRequest> requestCaptor = ArgumentCaptor.forClass(BatchWriteRequest.class);
Write write = newWrite();
BatchWriteRequest expectedRequest = BatchWriteRequest.newBuilder().setDatabase("projects/testing-project/databases/(default)").addWrites(write).build();
BatchWriteResponse response = BatchWriteResponse.newBuilder().addStatus(STATUS_OK).build();
when(processContext.element()).thenReturn(write);
// process element attempt 1
when(attempt.awaitSafeToProceed(any())).thenReturn(false).thenThrow(new IllegalStateException("too many attempt1#awaitSafeToProceed"));
// process element attempt 2
when(attempt2.awaitSafeToProceed(any())).thenReturn(true).thenThrow(new IllegalStateException("too many attempt2#awaitSafeToProceed"));
when(attempt2.<Write, Element<Write>>newFlushBuffer(any())).thenAnswer(invocation -> newFlushBuffer(options));
// finish bundle attempt
RpcQos.RpcWriteAttempt finishBundleAttempt = mock(RpcWriteAttempt.class);
when(finishBundleAttempt.awaitSafeToProceed(any())).thenReturn(true, true).thenThrow(new IllegalStateException("too many finishBundleAttempt#awaitSafeToProceed"));
when(finishBundleAttempt.<Write, Element<Write>>newFlushBuffer(any())).thenAnswer(invocation -> newFlushBuffer(options));
when(rpcQos.newWriteAttempt(any())).thenReturn(attempt, attempt2, finishBundleAttempt);
when(callable.call(requestCaptor.capture())).thenReturn(response);
FnT fn = getFn(clock, ff, options, CounterFactory.DEFAULT, DistributionFactory.DEFAULT);
runFunction(fn);
assertEquals(expectedRequest, requestCaptor.getValue());
verify(attempt, times(1)).awaitSafeToProceed(any());
verifyNoMoreInteractions(attempt);
verify(attempt2, times(1)).awaitSafeToProceed(any());
verify(attempt2, times(1)).newFlushBuffer(any());
verifyNoMoreInteractions(attempt2);
verify(finishBundleAttempt, times(1)).recordRequestStart(rpc1Start, 1);
verify(finishBundleAttempt, times(1)).recordWriteCounts(rpc1End, 1, 0);
verify(finishBundleAttempt, times(1)).completeSuccess();
verify(finishBundleAttempt, never()).checkCanRetry(any(), any());
}
use of org.apache.beam.sdk.io.gcp.firestore.RpcQos.RpcWriteAttempt.Element in project beam by apache.
the class BaseFirestoreV1WriteFnTest method writesRemainInQueueWhenFlushIsNotReadyAndThenFlushesInFinishBundle.
@Test
public final void writesRemainInQueueWhenFlushIsNotReadyAndThenFlushesInFinishBundle() throws Exception {
RpcQosOptions options = rpcQosOptions.toBuilder().withMaxAttempts(1).build();
Write write = newWrite();
BatchWriteRequest expectedRequest = BatchWriteRequest.newBuilder().setDatabase("projects/testing-project/databases/(default)").addWrites(write).build();
BatchWriteResponse response = BatchWriteResponse.newBuilder().addStatus(STATUS_OK).build();
when(processContext.element()).thenReturn(write).thenThrow(new IllegalStateException("too many element calls"));
when(rpcQos.newWriteAttempt(any())).thenReturn(attempt, attempt2).thenThrow(new IllegalStateException("too many attempt calls"));
when(attempt.awaitSafeToProceed(any())).thenReturn(true);
when(attempt2.awaitSafeToProceed(any())).thenReturn(true);
when(attempt.<Write, Element<Write>>newFlushBuffer(any())).thenAnswer(invocation -> newFlushBuffer(options));
when(attempt2.<Write, Element<Write>>newFlushBuffer(any())).thenAnswer(invocation -> newFlushBuffer(options));
FnT fn = getFn(clock, ff, options, CounterFactory.DEFAULT, DistributionFactory.DEFAULT);
fn.populateDisplayData(displayDataBuilder);
fn.setup();
fn.startBundle(startBundleContext);
fn.processElement(processContext, window);
assertEquals(1, fn.writes.size());
verify(attempt, never()).recordWriteCounts(any(), anyInt(), anyInt());
verify(attempt, never()).checkCanRetry(any(), any());
verify(attempt, never()).completeSuccess();
Instant attempt2RpcStart = Instant.ofEpochMilli(2);
Instant attempt2RpcEnd = Instant.ofEpochMilli(3);
ArgumentCaptor<BatchWriteRequest> requestCaptor = ArgumentCaptor.forClass(BatchWriteRequest.class);
when(callable.call(requestCaptor.capture())).thenReturn(response);
fn.finishBundle(finishBundleContext);
assertEquals(0, fn.writes.size());
assertEquals(expectedRequest, requestCaptor.getValue());
verify(attempt2, times(1)).recordRequestStart(attempt2RpcStart, 1);
verify(attempt2, times(1)).recordWriteCounts(attempt2RpcEnd, 1, 0);
verify(attempt2, never()).recordWriteCounts(any(), anyInt(), gt(0));
verify(attempt2, never()).checkCanRetry(any(), any());
verify(attempt2, times(1)).completeSuccess();
}
use of org.apache.beam.sdk.io.gcp.firestore.RpcQos.RpcWriteAttempt.Element in project beam by apache.
the class BaseFirestoreV1WriteFnTest method endToEnd_maxBatchSizeRespected.
@Test
public final void endToEnd_maxBatchSizeRespected() throws Exception {
Instant enqueue0 = Instant.ofEpochMilli(0);
Instant enqueue1 = Instant.ofEpochMilli(1);
Instant enqueue2 = Instant.ofEpochMilli(2);
Instant enqueue3 = Instant.ofEpochMilli(3);
Instant enqueue4 = Instant.ofEpochMilli(4);
Instant group1Rpc1Start = Instant.ofEpochMilli(5);
Instant group1Rpc1End = Instant.ofEpochMilli(6);
Instant enqueue5 = Instant.ofEpochMilli(7);
Instant finalFlush = Instant.ofEpochMilli(8);
Instant group2Rpc1Start = Instant.ofEpochMilli(9);
Instant group2Rpc1End = Instant.ofEpochMilli(10);
Write write0 = newWrite(0);
Write write1 = newWrite(1);
Write write2 = newWrite(2);
Write write3 = newWrite(3);
Write write4 = newWrite(4);
Write write5 = newWrite(5);
int maxValuesPerGroup = 5;
BatchWriteRequest.Builder builder = BatchWriteRequest.newBuilder().setDatabase("projects/testing-project/databases/(default)");
BatchWriteRequest expectedGroup1Request = builder.build().toBuilder().addWrites(write0).addWrites(write1).addWrites(write2).addWrites(write3).addWrites(write4).build();
BatchWriteRequest expectedGroup2Request = builder.build().toBuilder().addWrites(write5).build();
BatchWriteResponse group1Response = BatchWriteResponse.newBuilder().addStatus(STATUS_OK).addStatus(STATUS_OK).addStatus(STATUS_OK).addStatus(STATUS_OK).addStatus(STATUS_OK).build();
BatchWriteResponse group2Response = BatchWriteResponse.newBuilder().addStatus(STATUS_OK).build();
RpcQosOptions options = rpcQosOptions.toBuilder().withBatchMaxCount(maxValuesPerGroup).build();
FlushBuffer<Element<Write>> flushBuffer = spy(newFlushBuffer(options));
FlushBuffer<Element<Write>> flushBuffer2 = spy(newFlushBuffer(options));
when(processContext.element()).thenReturn(write0, write1, write2, write3, write4, write5);
when(rpcQos.newWriteAttempt(any())).thenReturn(attempt, attempt, attempt, attempt, attempt, attempt2, attempt2, attempt2).thenThrow(new IllegalStateException("too many attempts"));
when(attempt.awaitSafeToProceed(any())).thenReturn(true);
when(attempt2.awaitSafeToProceed(any())).thenReturn(true);
when(attempt.<Write, Element<Write>>newFlushBuffer(enqueue0)).thenReturn(newFlushBuffer(options));
when(attempt.<Write, Element<Write>>newFlushBuffer(enqueue1)).thenReturn(newFlushBuffer(options));
when(attempt.<Write, Element<Write>>newFlushBuffer(enqueue2)).thenReturn(newFlushBuffer(options));
when(attempt.<Write, Element<Write>>newFlushBuffer(enqueue3)).thenReturn(newFlushBuffer(options));
when(attempt.<Write, Element<Write>>newFlushBuffer(enqueue4)).thenReturn(flushBuffer);
when(callable.call(expectedGroup1Request)).thenReturn(group1Response);
when(attempt2.<Write, Element<Write>>newFlushBuffer(enqueue5)).thenReturn(newFlushBuffer(options));
when(attempt2.<Write, Element<Write>>newFlushBuffer(finalFlush)).thenReturn(flushBuffer2);
when(callable.call(expectedGroup2Request)).thenReturn(group2Response);
runFunction(getFn(clock, ff, options, CounterFactory.DEFAULT, DistributionFactory.DEFAULT), maxValuesPerGroup + 1);
verify(attempt, times(1)).recordRequestStart(group1Rpc1Start, 5);
verify(attempt, times(1)).recordWriteCounts(group1Rpc1End, 5, 0);
verify(attempt, times(1)).completeSuccess();
verify(attempt2, times(1)).recordRequestStart(group2Rpc1Start, 1);
verify(attempt2, times(1)).recordWriteCounts(group2Rpc1End, 1, 0);
verify(attempt2, times(1)).completeSuccess();
verify(callable, times(1)).call(expectedGroup1Request);
verify(callable, times(1)).call(expectedGroup2Request);
verifyNoMoreInteractions(callable);
verify(flushBuffer, times(maxValuesPerGroup)).offer(any());
verify(flushBuffer2, times(1)).offer(any());
}
use of org.apache.beam.sdk.io.gcp.firestore.RpcQos.RpcWriteAttempt.Element in project beam by apache.
the class BaseFirestoreV1WriteFnTest method attemptsExhaustedForRetryableError.
@Override
@Test
public final void attemptsExhaustedForRetryableError() throws Exception {
Instant attemptStart = Instant.ofEpochMilli(0);
Instant rpc1Start = Instant.ofEpochMilli(1);
Instant rpc1End = Instant.ofEpochMilli(2);
Instant rpc2Start = Instant.ofEpochMilli(3);
Instant rpc2End = Instant.ofEpochMilli(4);
Instant rpc3Start = Instant.ofEpochMilli(5);
Instant rpc3End = Instant.ofEpochMilli(6);
Write write = newWrite();
Element<Write> element1 = new WriteElement(0, write, window);
when(ff.getFirestoreStub(any())).thenReturn(stub);
when(ff.getRpcQos(any())).thenReturn(rpcQos);
when(rpcQos.newWriteAttempt(FirestoreV1RpcAttemptContexts.V1FnRpcAttemptContext.BatchWrite)).thenReturn(attempt);
when(stub.batchWriteCallable()).thenReturn(callable);
FlushBuffer<Element<Write>> flushBuffer = spy(newFlushBuffer(rpcQosOptions));
when(attempt.awaitSafeToProceed(any())).thenReturn(true);
when(attempt.<Write, Element<Write>>newFlushBuffer(attemptStart)).thenReturn(flushBuffer);
when(flushBuffer.offer(element1)).thenReturn(true);
when(flushBuffer.iterator()).thenReturn(newArrayList(element1).iterator());
when(flushBuffer.getBufferedElementsCount()).thenReturn(1);
when(flushBuffer.isFull()).thenReturn(true);
when(callable.call(any())).thenThrow(RETRYABLE_ERROR, RETRYABLE_ERROR, RETRYABLE_ERROR);
doNothing().when(attempt).recordWriteCounts(any(), anyInt(), anyInt());
doNothing().doNothing().doThrow(RETRYABLE_ERROR).when(attempt).checkCanRetry(any(), eq(RETRYABLE_ERROR));
when(processContext.element()).thenReturn(write);
try {
runFunction(getFn(clock, ff, rpcQosOptions, CounterFactory.DEFAULT, DistributionFactory.DEFAULT));
fail("Expected ApiException to be throw after exhausted attempts");
} catch (ApiException e) {
assertSame(RETRYABLE_ERROR, e);
}
verify(attempt, times(1)).awaitSafeToProceed(attemptStart);
verify(attempt, times(1)).recordRequestStart(rpc1Start, 1);
verify(attempt, times(1)).recordWriteCounts(rpc1End, 0, 1);
verify(attempt, times(1)).recordRequestStart(rpc2Start, 1);
verify(attempt, times(1)).recordWriteCounts(rpc2End, 0, 1);
verify(attempt, times(1)).recordRequestStart(rpc3Start, 1);
verify(attempt, times(1)).recordWriteCounts(rpc3End, 0, 1);
verify(attempt, times(0)).recordWriteCounts(any(), gt(0), anyInt());
verify(attempt, never()).completeSuccess();
}
use of org.apache.beam.sdk.io.gcp.firestore.RpcQos.RpcWriteAttempt.Element in project beam by apache.
the class FirestoreV1FnBatchWriteWithSummaryTest method nonRetryableWriteResultStopsAttempts.
@Test
public void nonRetryableWriteResultStopsAttempts() throws Exception {
Write write0 = FirestoreProtoHelpers.newWrite(0);
Write write1 = FirestoreProtoHelpers.newWrite(1).toBuilder().setCurrentDocument(Precondition.newBuilder().setExists(false).build()).build();
BatchWriteRequest expectedRequest1 = BatchWriteRequest.newBuilder().setDatabase("projects/testing-project/databases/(default)").addWrites(write0).addWrites(write1).build();
BatchWriteResponse response1 = BatchWriteResponse.newBuilder().addStatus(STATUS_OK).addWriteResults(WriteResult.newBuilder().setUpdateTime(Timestamp.newBuilder().setSeconds(1).build()).build()).addStatus(statusForCode(Code.ALREADY_EXISTS)).addWriteResults(WriteResult.newBuilder().build()).build();
RpcQosOptions options = rpcQosOptions.toBuilder().withMaxAttempts(1).withBatchMaxCount(2).build();
when(processContext.element()).thenReturn(write0, write1).thenThrow(new IllegalStateException("too many calls"));
when(rpcQos.newWriteAttempt(any())).thenReturn(attempt);
when(attempt.awaitSafeToProceed(any())).thenReturn(true);
when(attempt.<Write, Element<Write>>newFlushBuffer(any())).thenReturn(newFlushBuffer(options)).thenReturn(newFlushBuffer(options)).thenThrow(new IllegalStateException("too many attempt#newFlushBuffer calls"));
when(attempt.isCodeRetryable(Code.ALREADY_EXISTS)).thenReturn(false);
ArgumentCaptor<BatchWriteRequest> requestCaptor1 = ArgumentCaptor.forClass(BatchWriteRequest.class);
when(callable.call(requestCaptor1.capture())).thenReturn(response1);
BaseBatchWriteFn<WriteSuccessSummary> fn = new BatchWriteFnWithSummary(clock, ff, options, CounterFactory.DEFAULT);
fn.setup();
fn.startBundle(startBundleContext);
// write0
fn.processElement(processContext, window);
try {
// write1
fn.processElement(processContext, window);
fail("expected an exception when trying to apply a write with a failed precondition");
} catch (FailedWritesException e) {
List<WriteFailure> writeFailures = e.getWriteFailures();
assertEquals(1, writeFailures.size());
WriteFailure failure = writeFailures.get(0);
assertEquals(Code.ALREADY_EXISTS.getNumber(), failure.getStatus().getCode());
assertEquals(write1, failure.getWrite());
assertEquals(WriteResult.getDefaultInstance(), failure.getWriteResult());
}
assertEquals(expectedRequest1, requestCaptor1.getValue());
List<WriteElement> actualWrites = new ArrayList<>(fn.writes);
Instant flush1Attempt1Begin = Instant.ofEpochMilli(1);
Instant flush1Attempt1RpcStart = Instant.ofEpochMilli(2);
Instant flush1Attempt1RpcEnd = Instant.ofEpochMilli(3);
assertTrue(actualWrites.isEmpty());
fn.finishBundle(finishBundleContext);
verify(attempt, times(1)).newFlushBuffer(flush1Attempt1Begin);
verify(attempt, times(1)).recordRequestStart(flush1Attempt1RpcStart, 2);
verify(attempt, times(1)).recordWriteCounts(flush1Attempt1RpcEnd, 1, 1);
verify(attempt, never()).completeSuccess();
verify(callable, times(1)).call(any());
verifyNoMoreInteractions(callable);
}
Aggregations