use of com.google.firestore.v1.Write in project beam by apache.
the class DatastoreV1Test method testWriteValidationSucceedsWithProject.
@Test
public void testWriteValidationSucceedsWithProject() throws Exception {
Write write = DatastoreIO.v1().write().withProjectId(PROJECT_ID);
write.validate(null);
}
use of com.google.firestore.v1.Write in project beam by apache.
the class FirestoreV1FnBatchWriteWithDeadLetterQueueTest method nonRetryableWriteIsOutput.
@Test
public void nonRetryableWriteIsOutput() throws Exception {
Write write0 = FirestoreProtoHelpers.newWrite(0);
Write write1 = FirestoreProtoHelpers.newWrite(1).toBuilder().setCurrentDocument(Precondition.newBuilder().setExists(false).build()).build();
BatchWriteRequest expectedRequest1 = BatchWriteRequest.newBuilder().setDatabase("projects/testing-project/databases/(default)").addWrites(write0).addWrites(write1).build();
BatchWriteResponse response1 = BatchWriteResponse.newBuilder().addStatus(STATUS_OK).addWriteResults(WriteResult.newBuilder().setUpdateTime(Timestamp.newBuilder().setSeconds(1).build()).build()).addStatus(statusForCode(Code.ALREADY_EXISTS)).addWriteResults(WriteResult.newBuilder().build()).build();
RpcQosOptions options = rpcQosOptions.toBuilder().withMaxAttempts(1).withBatchMaxCount(2).build();
when(processContext.element()).thenReturn(write0, write1).thenThrow(new IllegalStateException("too many calls"));
when(rpcQos.newWriteAttempt(any())).thenReturn(attempt);
when(attempt.awaitSafeToProceed(any())).thenReturn(true);
when(attempt.<Write, Element<Write>>newFlushBuffer(any())).thenReturn(newFlushBuffer(options)).thenReturn(newFlushBuffer(options)).thenThrow(new IllegalStateException("too many attempt#newFlushBuffer calls"));
when(attempt.isCodeRetryable(Code.ALREADY_EXISTS)).thenReturn(false);
ArgumentCaptor<BatchWriteRequest> requestCaptor1 = ArgumentCaptor.forClass(BatchWriteRequest.class);
when(callable.call(requestCaptor1.capture())).thenReturn(response1);
BatchWriteFnWithDeadLetterQueue fn = getFn(clock, ff, options, CounterFactory.DEFAULT, DistributionFactory.DEFAULT);
fn.setup();
fn.startBundle(startBundleContext);
// write0
fn.processElement(processContext, window);
ArgumentCaptor<WriteFailure> writeFailureCapture = ArgumentCaptor.forClass(WriteFailure.class);
doNothing().when(processContext).outputWithTimestamp(writeFailureCapture.capture(), any());
// write1
fn.processElement(processContext, window);
WriteFailure failure = writeFailureCapture.getValue();
assertEquals(Code.ALREADY_EXISTS.getNumber(), failure.getStatus().getCode());
assertEquals(write1, failure.getWrite());
assertEquals(WriteResult.getDefaultInstance(), failure.getWriteResult());
assertEquals(expectedRequest1, requestCaptor1.getValue());
List<WriteElement> actualWrites = new ArrayList<>(fn.writes);
assertTrue(actualWrites.isEmpty());
fn.finishBundle(finishBundleContext);
verify(attempt, times(1)).recordRequestStart(any(), eq(2));
verify(attempt, times(1)).recordWriteCounts(any(), eq(1), eq(1));
verify(attempt, never()).completeSuccess();
verify(callable, times(1)).call(any());
verifyNoMoreInteractions(callable);
}
use of com.google.firestore.v1.Write in project beam by apache.
the class BaseFirestoreV1WriteFnTest method endToEnd_deadlineExceededOnAnIndividualWriteResultsInThrottling.
@Test
public final void endToEnd_deadlineExceededOnAnIndividualWriteResultsInThrottling() throws Exception {
final long totalDocCount = 1_000_000;
final int numWorkers = 100;
final long docCount = totalDocCount / numWorkers;
LOG.info("docCount = {}", docCount);
RpcQosOptions options = rpcQosOptions.toBuilder().withHintMaxNumWorkers(numWorkers).withSamplePeriod(Duration.standardMinutes(10)).withReportDiagnosticMetrics().build();
LOG.debug("options = {}", options);
FirestoreStatefulComponentFactory ff = mock(FirestoreStatefulComponentFactory.class);
when(ff.getFirestoreStub(any())).thenReturn(stub);
Random random = new Random(12345);
TestClock clock = new TestClock(Instant.EPOCH, Duration.standardSeconds(1));
Sleeper sleeper = millis -> clock.setNext(advanceClockBy(Duration.millis(millis)));
RpcQosImpl qos = new RpcQosImpl(options, random, sleeper, metricsFixture.counterFactory, metricsFixture.distributionFactory);
RpcQos qosSpy = mock(RpcQos.class, invocation -> {
Method method = invocation.getMethod();
LOG.debug("method = {}", method);
Method actualMethod = qos.getClass().getMethod(method.getName(), method.getParameterTypes());
return actualMethod.invoke(qos, invocation.getArguments());
});
when(ff.getRpcQos(options)).thenReturn(qosSpy);
int defaultDocumentWriteLatency = 30;
final AtomicLong writeCounter = new AtomicLong();
when(processContext.element()).thenAnswer(invocation -> newWrite(writeCounter.getAndIncrement()));
when(callable.call(any())).thenAnswer(new Answer<BatchWriteResponse>() {
private final Random rand = new Random(84572908);
private final Instant threshold = Instant.ofEpochMilli(Duration.standardMinutes(20).getMillis());
@Override
public BatchWriteResponse answer(InvocationOnMock invocation) throws Throwable {
BatchWriteRequest request = invocation.getArgument(0, BatchWriteRequest.class);
LOG.debug("request = {}", request);
long requestDurationMs = 0;
BatchWriteResponse.Builder builder = BatchWriteResponse.newBuilder();
for (Write ignored : request.getWritesList()) {
builder.addWriteResults(WriteResult.newBuilder().build());
if (clock.prev.isBefore(threshold)) {
requestDurationMs += defaultDocumentWriteLatency;
builder.addStatus(STATUS_OK);
} else {
int latency = rand.nextInt(1500);
LOG.debug("latency = {}", latency);
if (latency > 300) {
builder.addStatus(STATUS_DEADLINE_EXCEEDED);
} else {
builder.addStatus(STATUS_OK);
}
requestDurationMs += latency;
}
}
clock.setNext(advanceClockBy(Duration.millis(requestDurationMs)));
return builder.build();
}
});
LOG.info("### parameters: {defaultDocumentWriteLatency: {}, rpcQosOptions: {}}", defaultDocumentWriteLatency, options);
FnT fn = getFn(clock, ff, options, metricsFixture.counterFactory, metricsFixture.distributionFactory);
fn.setup();
fn.startBundle(startBundleContext);
while (writeCounter.get() < docCount) {
fn.processElement(processContext, window);
}
fn.finishBundle(finishBundleContext);
LOG.info("writeCounter = {}", writeCounter.get());
LOG.info("clock.prev = {}", clock.prev);
MyDistribution qosAdaptiveThrottlerThrottlingMs = metricsFixture.distributions.get("qos_adaptiveThrottler_throttlingMs");
assertNotNull(qosAdaptiveThrottlerThrottlingMs);
List<Long> updateInvocations = qosAdaptiveThrottlerThrottlingMs.updateInvocations;
assertFalse(updateInvocations.isEmpty());
}
use of com.google.firestore.v1.Write in project beam by apache.
the class BaseFirestoreV1WriteFnTest method endToEnd_exhaustingAttemptsResultsInException.
@Test
public final void endToEnd_exhaustingAttemptsResultsInException() throws Exception {
ApiException err1 = ApiExceptionFactory.createException(new IOException("err1"), GrpcStatusCode.of(io.grpc.Status.Code.ABORTED), false);
ApiException err2 = ApiExceptionFactory.createException(new IOException("err2"), GrpcStatusCode.of(io.grpc.Status.Code.ABORTED), false);
ApiException err3 = ApiExceptionFactory.createException(new IOException("err3"), GrpcStatusCode.of(io.grpc.Status.Code.ABORTED), false);
Instant attemptStart = Instant.ofEpochMilli(0);
Instant rpc1Start = Instant.ofEpochMilli(1);
Instant rpc1End = Instant.ofEpochMilli(2);
Instant rpc2Start = Instant.ofEpochMilli(3);
Instant rpc2End = Instant.ofEpochMilli(4);
Instant rpc3Start = Instant.ofEpochMilli(5);
Instant rpc3End = Instant.ofEpochMilli(6);
Write write = newWrite();
Element<Write> element1 = new WriteElement(0, write, window);
FlushBuffer<Element<Write>> flushBuffer = spy(newFlushBuffer(rpcQosOptions));
when(processContext.element()).thenReturn(write);
when(attempt.awaitSafeToProceed(any())).thenReturn(true);
when(attempt.<Write, Element<Write>>newFlushBuffer(attemptStart)).thenReturn(flushBuffer);
when(flushBuffer.isFull()).thenReturn(true);
when(flushBuffer.offer(element1)).thenReturn(true);
when(flushBuffer.iterator()).thenReturn(newArrayList(element1).iterator());
when(flushBuffer.getBufferedElementsCount()).thenReturn(1);
when(callable.call(any())).thenThrow(err1, err2, err3);
doNothing().when(attempt).checkCanRetry(any(), eq(err1));
doNothing().when(attempt).checkCanRetry(any(), eq(err2));
doThrow(err3).when(attempt).checkCanRetry(any(), eq(err3));
try {
FnT fn = getFn(clock, ff, rpcQosOptions, CounterFactory.DEFAULT, DistributionFactory.DEFAULT);
runFunction(fn);
fail("Expected exception");
} catch (ApiException e) {
assertNotNull(e.getMessage());
assertTrue(e.getMessage().contains("err3"));
}
verify(flushBuffer, times(1)).offer(element1);
verify(flushBuffer, atLeastOnce()).isFull();
verify(attempt, times(1)).recordRequestStart(rpc1Start, 1);
verify(attempt, times(1)).recordWriteCounts(rpc1End, 0, 1);
verify(attempt, times(1)).recordRequestStart(rpc2Start, 1);
verify(attempt, times(1)).recordWriteCounts(rpc2End, 0, 1);
verify(attempt, times(1)).recordRequestStart(rpc3Start, 1);
verify(attempt, times(1)).recordWriteCounts(rpc3End, 0, 1);
verify(attempt, never()).recordWriteCounts(any(), gt(0), anyInt());
verify(attempt, never()).completeSuccess();
}
use of com.google.firestore.v1.Write in project beam by apache.
the class BaseFirestoreV1WriteFnTest method endToEnd_success.
@Test
public final void endToEnd_success() throws Exception {
Write write = newWrite();
BatchWriteRequest expectedRequest = BatchWriteRequest.newBuilder().setDatabase("projects/testing-project/databases/(default)").addWrites(write).build();
BatchWriteResponse response = BatchWriteResponse.newBuilder().addStatus(STATUS_OK).build();
Element<Write> element1 = new WriteElement(0, write, window);
Instant attemptStart = Instant.ofEpochMilli(0);
Instant rpcStart = Instant.ofEpochMilli(1);
Instant rpcEnd = Instant.ofEpochMilli(2);
RpcQosOptions options = rpcQosOptions.toBuilder().withBatchMaxCount(1).build();
FlushBuffer<Element<Write>> flushBuffer = spy(newFlushBuffer(options));
when(processContext.element()).thenReturn(write);
when(attempt.awaitSafeToProceed(any())).thenReturn(true);
when(attempt.<Write, Element<Write>>newFlushBuffer(attemptStart)).thenReturn(flushBuffer);
ArgumentCaptor<BatchWriteRequest> requestCaptor = ArgumentCaptor.forClass(BatchWriteRequest.class);
when(callable.call(requestCaptor.capture())).thenReturn(response);
runFunction(getFn(clock, ff, options, CounterFactory.DEFAULT, DistributionFactory.DEFAULT));
assertEquals(expectedRequest, requestCaptor.getValue());
verify(flushBuffer, times(1)).offer(element1);
verify(flushBuffer, times(1)).isFull();
verify(attempt, times(1)).recordRequestStart(rpcStart, 1);
verify(attempt, times(1)).recordWriteCounts(rpcEnd, 1, 0);
verify(attempt, never()).recordWriteCounts(any(), anyInt(), gt(0));
verify(attempt, never()).checkCanRetry(any(), any());
}
Aggregations