use of com.google.firestore.v1.BatchWriteResponse in project beam by apache.
the class FirestoreV1FnBatchWriteWithDeadLetterQueueTest method nonRetryableWriteIsOutput.
@Test
public void nonRetryableWriteIsOutput() throws Exception {
Write write0 = FirestoreProtoHelpers.newWrite(0);
Write write1 = FirestoreProtoHelpers.newWrite(1).toBuilder().setCurrentDocument(Precondition.newBuilder().setExists(false).build()).build();
BatchWriteRequest expectedRequest1 = BatchWriteRequest.newBuilder().setDatabase("projects/testing-project/databases/(default)").addWrites(write0).addWrites(write1).build();
BatchWriteResponse response1 = BatchWriteResponse.newBuilder().addStatus(STATUS_OK).addWriteResults(WriteResult.newBuilder().setUpdateTime(Timestamp.newBuilder().setSeconds(1).build()).build()).addStatus(statusForCode(Code.ALREADY_EXISTS)).addWriteResults(WriteResult.newBuilder().build()).build();
RpcQosOptions options = rpcQosOptions.toBuilder().withMaxAttempts(1).withBatchMaxCount(2).build();
when(processContext.element()).thenReturn(write0, write1).thenThrow(new IllegalStateException("too many calls"));
when(rpcQos.newWriteAttempt(any())).thenReturn(attempt);
when(attempt.awaitSafeToProceed(any())).thenReturn(true);
when(attempt.<Write, Element<Write>>newFlushBuffer(any())).thenReturn(newFlushBuffer(options)).thenReturn(newFlushBuffer(options)).thenThrow(new IllegalStateException("too many attempt#newFlushBuffer calls"));
when(attempt.isCodeRetryable(Code.ALREADY_EXISTS)).thenReturn(false);
ArgumentCaptor<BatchWriteRequest> requestCaptor1 = ArgumentCaptor.forClass(BatchWriteRequest.class);
when(callable.call(requestCaptor1.capture())).thenReturn(response1);
BatchWriteFnWithDeadLetterQueue fn = getFn(clock, ff, options, CounterFactory.DEFAULT, DistributionFactory.DEFAULT);
fn.setup();
fn.startBundle(startBundleContext);
// write0
fn.processElement(processContext, window);
ArgumentCaptor<WriteFailure> writeFailureCapture = ArgumentCaptor.forClass(WriteFailure.class);
doNothing().when(processContext).outputWithTimestamp(writeFailureCapture.capture(), any());
// write1
fn.processElement(processContext, window);
WriteFailure failure = writeFailureCapture.getValue();
assertEquals(Code.ALREADY_EXISTS.getNumber(), failure.getStatus().getCode());
assertEquals(write1, failure.getWrite());
assertEquals(WriteResult.getDefaultInstance(), failure.getWriteResult());
assertEquals(expectedRequest1, requestCaptor1.getValue());
List<WriteElement> actualWrites = new ArrayList<>(fn.writes);
assertTrue(actualWrites.isEmpty());
fn.finishBundle(finishBundleContext);
verify(attempt, times(1)).recordRequestStart(any(), eq(2));
verify(attempt, times(1)).recordWriteCounts(any(), eq(1), eq(1));
verify(attempt, never()).completeSuccess();
verify(callable, times(1)).call(any());
verifyNoMoreInteractions(callable);
}
use of com.google.firestore.v1.BatchWriteResponse in project beam by apache.
the class BaseFirestoreV1WriteFnTest method endToEnd_deadlineExceededOnAnIndividualWriteResultsInThrottling.
@Test
public final void endToEnd_deadlineExceededOnAnIndividualWriteResultsInThrottling() throws Exception {
final long totalDocCount = 1_000_000;
final int numWorkers = 100;
final long docCount = totalDocCount / numWorkers;
LOG.info("docCount = {}", docCount);
RpcQosOptions options = rpcQosOptions.toBuilder().withHintMaxNumWorkers(numWorkers).withSamplePeriod(Duration.standardMinutes(10)).withReportDiagnosticMetrics().build();
LOG.debug("options = {}", options);
FirestoreStatefulComponentFactory ff = mock(FirestoreStatefulComponentFactory.class);
when(ff.getFirestoreStub(any())).thenReturn(stub);
Random random = new Random(12345);
TestClock clock = new TestClock(Instant.EPOCH, Duration.standardSeconds(1));
Sleeper sleeper = millis -> clock.setNext(advanceClockBy(Duration.millis(millis)));
RpcQosImpl qos = new RpcQosImpl(options, random, sleeper, metricsFixture.counterFactory, metricsFixture.distributionFactory);
RpcQos qosSpy = mock(RpcQos.class, invocation -> {
Method method = invocation.getMethod();
LOG.debug("method = {}", method);
Method actualMethod = qos.getClass().getMethod(method.getName(), method.getParameterTypes());
return actualMethod.invoke(qos, invocation.getArguments());
});
when(ff.getRpcQos(options)).thenReturn(qosSpy);
int defaultDocumentWriteLatency = 30;
final AtomicLong writeCounter = new AtomicLong();
when(processContext.element()).thenAnswer(invocation -> newWrite(writeCounter.getAndIncrement()));
when(callable.call(any())).thenAnswer(new Answer<BatchWriteResponse>() {
private final Random rand = new Random(84572908);
private final Instant threshold = Instant.ofEpochMilli(Duration.standardMinutes(20).getMillis());
@Override
public BatchWriteResponse answer(InvocationOnMock invocation) throws Throwable {
BatchWriteRequest request = invocation.getArgument(0, BatchWriteRequest.class);
LOG.debug("request = {}", request);
long requestDurationMs = 0;
BatchWriteResponse.Builder builder = BatchWriteResponse.newBuilder();
for (Write ignored : request.getWritesList()) {
builder.addWriteResults(WriteResult.newBuilder().build());
if (clock.prev.isBefore(threshold)) {
requestDurationMs += defaultDocumentWriteLatency;
builder.addStatus(STATUS_OK);
} else {
int latency = rand.nextInt(1500);
LOG.debug("latency = {}", latency);
if (latency > 300) {
builder.addStatus(STATUS_DEADLINE_EXCEEDED);
} else {
builder.addStatus(STATUS_OK);
}
requestDurationMs += latency;
}
}
clock.setNext(advanceClockBy(Duration.millis(requestDurationMs)));
return builder.build();
}
});
LOG.info("### parameters: {defaultDocumentWriteLatency: {}, rpcQosOptions: {}}", defaultDocumentWriteLatency, options);
FnT fn = getFn(clock, ff, options, metricsFixture.counterFactory, metricsFixture.distributionFactory);
fn.setup();
fn.startBundle(startBundleContext);
while (writeCounter.get() < docCount) {
fn.processElement(processContext, window);
}
fn.finishBundle(finishBundleContext);
LOG.info("writeCounter = {}", writeCounter.get());
LOG.info("clock.prev = {}", clock.prev);
MyDistribution qosAdaptiveThrottlerThrottlingMs = metricsFixture.distributions.get("qos_adaptiveThrottler_throttlingMs");
assertNotNull(qosAdaptiveThrottlerThrottlingMs);
List<Long> updateInvocations = qosAdaptiveThrottlerThrottlingMs.updateInvocations;
assertFalse(updateInvocations.isEmpty());
}
use of com.google.firestore.v1.BatchWriteResponse in project beam by apache.
the class BaseFirestoreV1WriteFnTest method endToEnd_success.
@Test
public final void endToEnd_success() throws Exception {
Write write = newWrite();
BatchWriteRequest expectedRequest = BatchWriteRequest.newBuilder().setDatabase("projects/testing-project/databases/(default)").addWrites(write).build();
BatchWriteResponse response = BatchWriteResponse.newBuilder().addStatus(STATUS_OK).build();
Element<Write> element1 = new WriteElement(0, write, window);
Instant attemptStart = Instant.ofEpochMilli(0);
Instant rpcStart = Instant.ofEpochMilli(1);
Instant rpcEnd = Instant.ofEpochMilli(2);
RpcQosOptions options = rpcQosOptions.toBuilder().withBatchMaxCount(1).build();
FlushBuffer<Element<Write>> flushBuffer = spy(newFlushBuffer(options));
when(processContext.element()).thenReturn(write);
when(attempt.awaitSafeToProceed(any())).thenReturn(true);
when(attempt.<Write, Element<Write>>newFlushBuffer(attemptStart)).thenReturn(flushBuffer);
ArgumentCaptor<BatchWriteRequest> requestCaptor = ArgumentCaptor.forClass(BatchWriteRequest.class);
when(callable.call(requestCaptor.capture())).thenReturn(response);
runFunction(getFn(clock, ff, options, CounterFactory.DEFAULT, DistributionFactory.DEFAULT));
assertEquals(expectedRequest, requestCaptor.getValue());
verify(flushBuffer, times(1)).offer(element1);
verify(flushBuffer, times(1)).isFull();
verify(attempt, times(1)).recordRequestStart(rpcStart, 1);
verify(attempt, times(1)).recordWriteCounts(rpcEnd, 1, 0);
verify(attempt, never()).recordWriteCounts(any(), anyInt(), gt(0));
verify(attempt, never()).checkCanRetry(any(), any());
}
use of com.google.firestore.v1.BatchWriteResponse in project beam by apache.
the class BaseFirestoreV1WriteFnTest method endToEnd_awaitSafeToProceed_falseIsTerminalForAttempt.
@Test
public final void endToEnd_awaitSafeToProceed_falseIsTerminalForAttempt() throws Exception {
RpcQosOptions options = rpcQosOptions.toBuilder().withBatchMaxCount(2).build();
Instant rpc1Start = Instant.ofEpochMilli(3);
Instant rpc1End = Instant.ofEpochMilli(4);
ArgumentCaptor<BatchWriteRequest> requestCaptor = ArgumentCaptor.forClass(BatchWriteRequest.class);
Write write = newWrite();
BatchWriteRequest expectedRequest = BatchWriteRequest.newBuilder().setDatabase("projects/testing-project/databases/(default)").addWrites(write).build();
BatchWriteResponse response = BatchWriteResponse.newBuilder().addStatus(STATUS_OK).build();
when(processContext.element()).thenReturn(write);
// process element attempt 1
when(attempt.awaitSafeToProceed(any())).thenReturn(false).thenThrow(new IllegalStateException("too many attempt1#awaitSafeToProceed"));
// process element attempt 2
when(attempt2.awaitSafeToProceed(any())).thenReturn(true).thenThrow(new IllegalStateException("too many attempt2#awaitSafeToProceed"));
when(attempt2.<Write, Element<Write>>newFlushBuffer(any())).thenAnswer(invocation -> newFlushBuffer(options));
// finish bundle attempt
RpcQos.RpcWriteAttempt finishBundleAttempt = mock(RpcWriteAttempt.class);
when(finishBundleAttempt.awaitSafeToProceed(any())).thenReturn(true, true).thenThrow(new IllegalStateException("too many finishBundleAttempt#awaitSafeToProceed"));
when(finishBundleAttempt.<Write, Element<Write>>newFlushBuffer(any())).thenAnswer(invocation -> newFlushBuffer(options));
when(rpcQos.newWriteAttempt(any())).thenReturn(attempt, attempt2, finishBundleAttempt);
when(callable.call(requestCaptor.capture())).thenReturn(response);
FnT fn = getFn(clock, ff, options, CounterFactory.DEFAULT, DistributionFactory.DEFAULT);
runFunction(fn);
assertEquals(expectedRequest, requestCaptor.getValue());
verify(attempt, times(1)).awaitSafeToProceed(any());
verifyNoMoreInteractions(attempt);
verify(attempt2, times(1)).awaitSafeToProceed(any());
verify(attempt2, times(1)).newFlushBuffer(any());
verifyNoMoreInteractions(attempt2);
verify(finishBundleAttempt, times(1)).recordRequestStart(rpc1Start, 1);
verify(finishBundleAttempt, times(1)).recordWriteCounts(rpc1End, 1, 0);
verify(finishBundleAttempt, times(1)).completeSuccess();
verify(finishBundleAttempt, never()).checkCanRetry(any(), any());
}
use of com.google.firestore.v1.BatchWriteResponse in project beam by apache.
the class BaseFirestoreV1WriteFnTest method endToEnd_maxBatchSizeRespected.
@Test
public final void endToEnd_maxBatchSizeRespected() throws Exception {
Instant enqueue0 = Instant.ofEpochMilli(0);
Instant enqueue1 = Instant.ofEpochMilli(1);
Instant enqueue2 = Instant.ofEpochMilli(2);
Instant enqueue3 = Instant.ofEpochMilli(3);
Instant enqueue4 = Instant.ofEpochMilli(4);
Instant group1Rpc1Start = Instant.ofEpochMilli(5);
Instant group1Rpc1End = Instant.ofEpochMilli(6);
Instant enqueue5 = Instant.ofEpochMilli(7);
Instant finalFlush = Instant.ofEpochMilli(8);
Instant group2Rpc1Start = Instant.ofEpochMilli(9);
Instant group2Rpc1End = Instant.ofEpochMilli(10);
Write write0 = newWrite(0);
Write write1 = newWrite(1);
Write write2 = newWrite(2);
Write write3 = newWrite(3);
Write write4 = newWrite(4);
Write write5 = newWrite(5);
int maxValuesPerGroup = 5;
BatchWriteRequest.Builder builder = BatchWriteRequest.newBuilder().setDatabase("projects/testing-project/databases/(default)");
BatchWriteRequest expectedGroup1Request = builder.build().toBuilder().addWrites(write0).addWrites(write1).addWrites(write2).addWrites(write3).addWrites(write4).build();
BatchWriteRequest expectedGroup2Request = builder.build().toBuilder().addWrites(write5).build();
BatchWriteResponse group1Response = BatchWriteResponse.newBuilder().addStatus(STATUS_OK).addStatus(STATUS_OK).addStatus(STATUS_OK).addStatus(STATUS_OK).addStatus(STATUS_OK).build();
BatchWriteResponse group2Response = BatchWriteResponse.newBuilder().addStatus(STATUS_OK).build();
RpcQosOptions options = rpcQosOptions.toBuilder().withBatchMaxCount(maxValuesPerGroup).build();
FlushBuffer<Element<Write>> flushBuffer = spy(newFlushBuffer(options));
FlushBuffer<Element<Write>> flushBuffer2 = spy(newFlushBuffer(options));
when(processContext.element()).thenReturn(write0, write1, write2, write3, write4, write5);
when(rpcQos.newWriteAttempt(any())).thenReturn(attempt, attempt, attempt, attempt, attempt, attempt2, attempt2, attempt2).thenThrow(new IllegalStateException("too many attempts"));
when(attempt.awaitSafeToProceed(any())).thenReturn(true);
when(attempt2.awaitSafeToProceed(any())).thenReturn(true);
when(attempt.<Write, Element<Write>>newFlushBuffer(enqueue0)).thenReturn(newFlushBuffer(options));
when(attempt.<Write, Element<Write>>newFlushBuffer(enqueue1)).thenReturn(newFlushBuffer(options));
when(attempt.<Write, Element<Write>>newFlushBuffer(enqueue2)).thenReturn(newFlushBuffer(options));
when(attempt.<Write, Element<Write>>newFlushBuffer(enqueue3)).thenReturn(newFlushBuffer(options));
when(attempt.<Write, Element<Write>>newFlushBuffer(enqueue4)).thenReturn(flushBuffer);
when(callable.call(expectedGroup1Request)).thenReturn(group1Response);
when(attempt2.<Write, Element<Write>>newFlushBuffer(enqueue5)).thenReturn(newFlushBuffer(options));
when(attempt2.<Write, Element<Write>>newFlushBuffer(finalFlush)).thenReturn(flushBuffer2);
when(callable.call(expectedGroup2Request)).thenReturn(group2Response);
runFunction(getFn(clock, ff, options, CounterFactory.DEFAULT, DistributionFactory.DEFAULT), maxValuesPerGroup + 1);
verify(attempt, times(1)).recordRequestStart(group1Rpc1Start, 5);
verify(attempt, times(1)).recordWriteCounts(group1Rpc1End, 5, 0);
verify(attempt, times(1)).completeSuccess();
verify(attempt2, times(1)).recordRequestStart(group2Rpc1Start, 1);
verify(attempt2, times(1)).recordWriteCounts(group2Rpc1End, 1, 0);
verify(attempt2, times(1)).completeSuccess();
verify(callable, times(1)).call(expectedGroup1Request);
verify(callable, times(1)).call(expectedGroup2Request);
verifyNoMoreInteractions(callable);
verify(flushBuffer, times(maxValuesPerGroup)).offer(any());
verify(flushBuffer2, times(1)).offer(any());
}
Aggregations