use of com.google.firestore.v1.Precondition in project beam by apache.
the class FirestoreV1IT method batchWrite_partialFailureOutputsToDeadLetterQueue.
@Test
public void batchWrite_partialFailureOutputsToDeadLetterQueue() throws InterruptedException, ExecutionException, TimeoutException {
String collectionId = "a";
String docId = helper.docId();
Write validWrite = Write.newBuilder().setUpdate(Document.newBuilder().setName(docPath(helper.getBaseDocumentPath(), collectionId, docId)).putFields("foo", Value.newBuilder().setStringValue(docId).build())).build();
long millis = System.currentTimeMillis();
Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000).setNanos((int) ((millis % 1000) * 1000000)).build();
String docId2 = helper.docId();
helper.getBaseDocument().collection(collectionId).document(docId2).create(ImmutableMap.of("foo", "baz")).get(10, TimeUnit.SECONDS);
// this will fail because we're setting a updateTime precondition to before it was created
Write conditionalUpdate = Write.newBuilder().setUpdate(Document.newBuilder().setName(docPath(helper.getBaseDocumentPath(), collectionId, docId2)).putFields("foo", Value.newBuilder().setStringValue(docId).build())).setCurrentDocument(Precondition.newBuilder().setUpdateTime(timestamp)).build();
List<Write> writes = newArrayList(validWrite, conditionalUpdate);
RpcQosOptions options = BaseFirestoreIT.rpcQosOptions.toBuilder().withBatchMaxCount(2).build();
PCollection<WriteFailure> writeFailurePCollection = testPipeline.apply(Create.of(writes)).apply(FirestoreIO.v1().write().batchWrite().withDeadLetterQueue().withRpcQosOptions(options).build());
PAssert.that(writeFailurePCollection).satisfies((writeFailures) -> {
Iterator<WriteFailure> iterator = writeFailures.iterator();
assertTrue(iterator.hasNext());
WriteFailure failure = iterator.next();
assertEquals(Code.FAILED_PRECONDITION, Code.forNumber(failure.getStatus().getCode()));
assertNotNull(failure.getWriteResult());
assertFalse(failure.getWriteResult().hasUpdateTime());
assertEquals(conditionalUpdate, failure.getWrite());
assertFalse(iterator.hasNext());
return null;
});
testPipeline.run(this.options);
ApiFuture<QuerySnapshot> actualDocsQuery = helper.getBaseDocument().collection(collectionId).orderBy("__name__").get();
QuerySnapshot querySnapshot = actualDocsQuery.get(10, TimeUnit.SECONDS);
List<QueryDocumentSnapshot> documents = querySnapshot.getDocuments();
List<KV<String, String>> actualDocumentIds = documents.stream().map(doc -> KV.of(doc.getId(), doc.getString("foo"))).collect(Collectors.toList());
List<KV<String, String>> expected = newArrayList(KV.of(docId, docId), KV.of(docId2, "baz"));
assertEquals(expected, actualDocumentIds);
}
use of com.google.firestore.v1.Precondition in project beam by apache.
the class FirestoreV1FnBatchWriteWithSummaryTest method nonRetryableWriteResultStopsAttempts.
@Test
public void nonRetryableWriteResultStopsAttempts() throws Exception {
Write write0 = FirestoreProtoHelpers.newWrite(0);
Write write1 = FirestoreProtoHelpers.newWrite(1).toBuilder().setCurrentDocument(Precondition.newBuilder().setExists(false).build()).build();
BatchWriteRequest expectedRequest1 = BatchWriteRequest.newBuilder().setDatabase("projects/testing-project/databases/(default)").addWrites(write0).addWrites(write1).build();
BatchWriteResponse response1 = BatchWriteResponse.newBuilder().addStatus(STATUS_OK).addWriteResults(WriteResult.newBuilder().setUpdateTime(Timestamp.newBuilder().setSeconds(1).build()).build()).addStatus(statusForCode(Code.ALREADY_EXISTS)).addWriteResults(WriteResult.newBuilder().build()).build();
RpcQosOptions options = rpcQosOptions.toBuilder().withMaxAttempts(1).withBatchMaxCount(2).build();
when(processContext.element()).thenReturn(write0, write1).thenThrow(new IllegalStateException("too many calls"));
when(rpcQos.newWriteAttempt(any())).thenReturn(attempt);
when(attempt.awaitSafeToProceed(any())).thenReturn(true);
when(attempt.<Write, Element<Write>>newFlushBuffer(any())).thenReturn(newFlushBuffer(options)).thenReturn(newFlushBuffer(options)).thenThrow(new IllegalStateException("too many attempt#newFlushBuffer calls"));
when(attempt.isCodeRetryable(Code.ALREADY_EXISTS)).thenReturn(false);
ArgumentCaptor<BatchWriteRequest> requestCaptor1 = ArgumentCaptor.forClass(BatchWriteRequest.class);
when(callable.call(requestCaptor1.capture())).thenReturn(response1);
BaseBatchWriteFn<WriteSuccessSummary> fn = new BatchWriteFnWithSummary(clock, ff, options, CounterFactory.DEFAULT);
fn.setup();
fn.startBundle(startBundleContext);
// write0
fn.processElement(processContext, window);
try {
// write1
fn.processElement(processContext, window);
fail("expected an exception when trying to apply a write with a failed precondition");
} catch (FailedWritesException e) {
List<WriteFailure> writeFailures = e.getWriteFailures();
assertEquals(1, writeFailures.size());
WriteFailure failure = writeFailures.get(0);
assertEquals(Code.ALREADY_EXISTS.getNumber(), failure.getStatus().getCode());
assertEquals(write1, failure.getWrite());
assertEquals(WriteResult.getDefaultInstance(), failure.getWriteResult());
}
assertEquals(expectedRequest1, requestCaptor1.getValue());
List<WriteElement> actualWrites = new ArrayList<>(fn.writes);
Instant flush1Attempt1Begin = Instant.ofEpochMilli(1);
Instant flush1Attempt1RpcStart = Instant.ofEpochMilli(2);
Instant flush1Attempt1RpcEnd = Instant.ofEpochMilli(3);
assertTrue(actualWrites.isEmpty());
fn.finishBundle(finishBundleContext);
verify(attempt, times(1)).newFlushBuffer(flush1Attempt1Begin);
verify(attempt, times(1)).recordRequestStart(flush1Attempt1RpcStart, 2);
verify(attempt, times(1)).recordWriteCounts(flush1Attempt1RpcEnd, 1, 1);
verify(attempt, never()).completeSuccess();
verify(callable, times(1)).call(any());
verifyNoMoreInteractions(callable);
}
Aggregations