use of org.apache.beam.sdk.io.gcp.firestore.FirestoreV1WriteFn.WriteElement in project beam by apache.
the class BaseFirestoreV1WriteFnTest method endToEnd_partialSuccessReturnsWritesToQueue.
@Test
public final void endToEnd_partialSuccessReturnsWritesToQueue() throws Exception {
Write write0 = newWrite(0);
Write write1 = newWrite(1);
Write write2 = newWrite(2);
Write write3 = newWrite(3);
Write write4 = newWrite(4);
BatchWriteRequest expectedRequest1 = BatchWriteRequest.newBuilder().setDatabase("projects/testing-project/databases/(default)").addWrites(write0).addWrites(write1).addWrites(write2).addWrites(write3).addWrites(write4).build();
BatchWriteResponse response1 = BatchWriteResponse.newBuilder().addStatus(STATUS_OK).addStatus(statusForCode(Code.INVALID_ARGUMENT)).addStatus(statusForCode(Code.FAILED_PRECONDITION)).addStatus(statusForCode(Code.UNAUTHENTICATED)).addStatus(STATUS_OK).build();
BatchWriteRequest expectedRequest2 = BatchWriteRequest.newBuilder().setDatabase("projects/testing-project/databases/(default)").addWrites(write1).addWrites(write2).addWrites(write3).build();
BatchWriteResponse response2 = BatchWriteResponse.newBuilder().addStatus(STATUS_OK).addStatus(STATUS_OK).addStatus(STATUS_OK).build();
RpcQosOptions options = rpcQosOptions.toBuilder().withMaxAttempts(1).withBatchMaxCount(5).build();
when(processContext.element()).thenReturn(write0, write1, write2, write3, write4).thenThrow(new IllegalStateException("too many calls"));
when(rpcQos.newWriteAttempt(any())).thenReturn(attempt);
when(attempt.awaitSafeToProceed(any())).thenReturn(true);
when(attempt.<Write, Element<Write>>newFlushBuffer(any())).thenAnswer(invocation -> newFlushBuffer(options));
when(attempt.isCodeRetryable(Code.INVALID_ARGUMENT)).thenReturn(true);
when(attempt.isCodeRetryable(Code.FAILED_PRECONDITION)).thenReturn(true);
when(attempt.isCodeRetryable(Code.UNAUTHENTICATED)).thenReturn(true);
ArgumentCaptor<BatchWriteRequest> requestCaptor1 = ArgumentCaptor.forClass(BatchWriteRequest.class);
when(callable.call(requestCaptor1.capture())).thenReturn(response1);
FnT fn = getFn(clock, ff, options, CounterFactory.DEFAULT, DistributionFactory.DEFAULT);
fn.setup();
fn.startBundle(startBundleContext);
// write0
fn.processElement(processContext, window);
// write1
fn.processElement(processContext, window);
// write2
fn.processElement(processContext, window);
// write3
fn.processElement(processContext, window);
// write4
fn.processElement(processContext, window);
assertEquals(expectedRequest1, requestCaptor1.getValue());
List<WriteElement> expectedRemainingWrites = newArrayList(new WriteElement(1, write1, window), new WriteElement(2, write2, window), new WriteElement(3, write3, window));
List<WriteElement> actualWrites = new ArrayList<>(fn.writes);
assertEquals(expectedRemainingWrites, actualWrites);
assertEquals(5, fn.queueNextEntryPriority);
ArgumentCaptor<BatchWriteRequest> requestCaptor2 = ArgumentCaptor.forClass(BatchWriteRequest.class);
when(callable.call(requestCaptor2.capture())).thenReturn(response2);
fn.finishBundle(finishBundleContext);
assertEquals(expectedRequest2, requestCaptor2.getValue());
assertEquals(0, fn.queueNextEntryPriority);
verify(attempt, times(1)).recordRequestStart(any(), eq(5));
verify(attempt, times(1)).recordWriteCounts(any(), eq(2), eq(3));
verify(attempt, times(1)).recordRequestStart(any(), eq(3));
verify(attempt, times(1)).recordWriteCounts(any(), eq(3), eq(0));
verify(attempt, times(1)).completeSuccess();
verify(callable, times(2)).call(any());
verifyNoMoreInteractions(callable);
}
use of org.apache.beam.sdk.io.gcp.firestore.FirestoreV1WriteFn.WriteElement in project beam by apache.
the class FirestoreV1FnBatchWriteWithSummaryTest method nonRetryableWriteResultStopsAttempts.
@Test
public void nonRetryableWriteResultStopsAttempts() throws Exception {
Write write0 = FirestoreProtoHelpers.newWrite(0);
Write write1 = FirestoreProtoHelpers.newWrite(1).toBuilder().setCurrentDocument(Precondition.newBuilder().setExists(false).build()).build();
BatchWriteRequest expectedRequest1 = BatchWriteRequest.newBuilder().setDatabase("projects/testing-project/databases/(default)").addWrites(write0).addWrites(write1).build();
BatchWriteResponse response1 = BatchWriteResponse.newBuilder().addStatus(STATUS_OK).addWriteResults(WriteResult.newBuilder().setUpdateTime(Timestamp.newBuilder().setSeconds(1).build()).build()).addStatus(statusForCode(Code.ALREADY_EXISTS)).addWriteResults(WriteResult.newBuilder().build()).build();
RpcQosOptions options = rpcQosOptions.toBuilder().withMaxAttempts(1).withBatchMaxCount(2).build();
when(processContext.element()).thenReturn(write0, write1).thenThrow(new IllegalStateException("too many calls"));
when(rpcQos.newWriteAttempt(any())).thenReturn(attempt);
when(attempt.awaitSafeToProceed(any())).thenReturn(true);
when(attempt.<Write, Element<Write>>newFlushBuffer(any())).thenReturn(newFlushBuffer(options)).thenReturn(newFlushBuffer(options)).thenThrow(new IllegalStateException("too many attempt#newFlushBuffer calls"));
when(attempt.isCodeRetryable(Code.ALREADY_EXISTS)).thenReturn(false);
ArgumentCaptor<BatchWriteRequest> requestCaptor1 = ArgumentCaptor.forClass(BatchWriteRequest.class);
when(callable.call(requestCaptor1.capture())).thenReturn(response1);
BaseBatchWriteFn<WriteSuccessSummary> fn = new BatchWriteFnWithSummary(clock, ff, options, CounterFactory.DEFAULT);
fn.setup();
fn.startBundle(startBundleContext);
// write0
fn.processElement(processContext, window);
try {
// write1
fn.processElement(processContext, window);
fail("expected an exception when trying to apply a write with a failed precondition");
} catch (FailedWritesException e) {
List<WriteFailure> writeFailures = e.getWriteFailures();
assertEquals(1, writeFailures.size());
WriteFailure failure = writeFailures.get(0);
assertEquals(Code.ALREADY_EXISTS.getNumber(), failure.getStatus().getCode());
assertEquals(write1, failure.getWrite());
assertEquals(WriteResult.getDefaultInstance(), failure.getWriteResult());
}
assertEquals(expectedRequest1, requestCaptor1.getValue());
List<WriteElement> actualWrites = new ArrayList<>(fn.writes);
Instant flush1Attempt1Begin = Instant.ofEpochMilli(1);
Instant flush1Attempt1RpcStart = Instant.ofEpochMilli(2);
Instant flush1Attempt1RpcEnd = Instant.ofEpochMilli(3);
assertTrue(actualWrites.isEmpty());
fn.finishBundle(finishBundleContext);
verify(attempt, times(1)).newFlushBuffer(flush1Attempt1Begin);
verify(attempt, times(1)).recordRequestStart(flush1Attempt1RpcStart, 2);
verify(attempt, times(1)).recordWriteCounts(flush1Attempt1RpcEnd, 1, 1);
verify(attempt, never()).completeSuccess();
verify(callable, times(1)).call(any());
verifyNoMoreInteractions(callable);
}
use of org.apache.beam.sdk.io.gcp.firestore.FirestoreV1WriteFn.WriteElement in project beam by apache.
the class RpcQosTest method doTest_shouldFlush_numWritesHigherThanBatchCount.
private void doTest_shouldFlush_numWritesHigherThanBatchCount(boolean expectFlush, int batchCount, Consumer<RpcQos> preAttempt) {
RpcQosOptions rpcQosOptions = options.toBuilder().withBatchInitialCount(10).withBatchMaxCount(10).unsafeBuild();
RpcQos qos = new RpcQosImpl(rpcQosOptions, random, sleeper, counterFactory, distributionFactory);
preAttempt.accept(qos);
RpcWriteAttempt attempt = qos.newWriteAttempt(RPC_ATTEMPT_CONTEXT);
FlushBuffer<Element<Write>> accumulator = attempt.newFlushBuffer(monotonicClock.instant());
for (int i = 0; i < batchCount; i++) {
accumulator.offer(new WriteElement(i, newWrite(), window));
}
if (expectFlush) {
assertTrue(accumulator.isFull());
assertEquals(10, accumulator.getBufferedElementsCount());
} else {
assertFalse(accumulator.isFull());
assertEquals(batchCount, accumulator.getBufferedElementsCount());
}
}
Aggregations