use of org.apache.beam.sdk.io.gcp.firestore.FirestoreV1.WriteFailure in project beam by apache.
the class FirestoreV1FnBatchWriteWithDeadLetterQueueTest method nonRetryableWriteIsOutput.
@Test
public void nonRetryableWriteIsOutput() throws Exception {
Write write0 = FirestoreProtoHelpers.newWrite(0);
Write write1 = FirestoreProtoHelpers.newWrite(1).toBuilder().setCurrentDocument(Precondition.newBuilder().setExists(false).build()).build();
BatchWriteRequest expectedRequest1 = BatchWriteRequest.newBuilder().setDatabase("projects/testing-project/databases/(default)").addWrites(write0).addWrites(write1).build();
BatchWriteResponse response1 = BatchWriteResponse.newBuilder().addStatus(STATUS_OK).addWriteResults(WriteResult.newBuilder().setUpdateTime(Timestamp.newBuilder().setSeconds(1).build()).build()).addStatus(statusForCode(Code.ALREADY_EXISTS)).addWriteResults(WriteResult.newBuilder().build()).build();
RpcQosOptions options = rpcQosOptions.toBuilder().withMaxAttempts(1).withBatchMaxCount(2).build();
when(processContext.element()).thenReturn(write0, write1).thenThrow(new IllegalStateException("too many calls"));
when(rpcQos.newWriteAttempt(any())).thenReturn(attempt);
when(attempt.awaitSafeToProceed(any())).thenReturn(true);
when(attempt.<Write, Element<Write>>newFlushBuffer(any())).thenReturn(newFlushBuffer(options)).thenReturn(newFlushBuffer(options)).thenThrow(new IllegalStateException("too many attempt#newFlushBuffer calls"));
when(attempt.isCodeRetryable(Code.ALREADY_EXISTS)).thenReturn(false);
ArgumentCaptor<BatchWriteRequest> requestCaptor1 = ArgumentCaptor.forClass(BatchWriteRequest.class);
when(callable.call(requestCaptor1.capture())).thenReturn(response1);
BatchWriteFnWithDeadLetterQueue fn = getFn(clock, ff, options, CounterFactory.DEFAULT, DistributionFactory.DEFAULT);
fn.setup();
fn.startBundle(startBundleContext);
// write0
fn.processElement(processContext, window);
ArgumentCaptor<WriteFailure> writeFailureCapture = ArgumentCaptor.forClass(WriteFailure.class);
doNothing().when(processContext).outputWithTimestamp(writeFailureCapture.capture(), any());
// write1
fn.processElement(processContext, window);
WriteFailure failure = writeFailureCapture.getValue();
assertEquals(Code.ALREADY_EXISTS.getNumber(), failure.getStatus().getCode());
assertEquals(write1, failure.getWrite());
assertEquals(WriteResult.getDefaultInstance(), failure.getWriteResult());
assertEquals(expectedRequest1, requestCaptor1.getValue());
List<WriteElement> actualWrites = new ArrayList<>(fn.writes);
assertTrue(actualWrites.isEmpty());
fn.finishBundle(finishBundleContext);
verify(attempt, times(1)).recordRequestStart(any(), eq(2));
verify(attempt, times(1)).recordWriteCounts(any(), eq(1), eq(1));
verify(attempt, never()).completeSuccess();
verify(callable, times(1)).call(any());
verifyNoMoreInteractions(callable);
}
use of org.apache.beam.sdk.io.gcp.firestore.FirestoreV1.WriteFailure in project beam by apache.
the class FirestoreV1IT method batchWrite_partialFailureOutputsToDeadLetterQueue.
@Test
public void batchWrite_partialFailureOutputsToDeadLetterQueue() throws InterruptedException, ExecutionException, TimeoutException {
String collectionId = "a";
String docId = helper.docId();
Write validWrite = Write.newBuilder().setUpdate(Document.newBuilder().setName(docPath(helper.getBaseDocumentPath(), collectionId, docId)).putFields("foo", Value.newBuilder().setStringValue(docId).build())).build();
long millis = System.currentTimeMillis();
Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000).setNanos((int) ((millis % 1000) * 1000000)).build();
String docId2 = helper.docId();
helper.getBaseDocument().collection(collectionId).document(docId2).create(ImmutableMap.of("foo", "baz")).get(10, TimeUnit.SECONDS);
// this will fail because we're setting a updateTime precondition to before it was created
Write conditionalUpdate = Write.newBuilder().setUpdate(Document.newBuilder().setName(docPath(helper.getBaseDocumentPath(), collectionId, docId2)).putFields("foo", Value.newBuilder().setStringValue(docId).build())).setCurrentDocument(Precondition.newBuilder().setUpdateTime(timestamp)).build();
List<Write> writes = newArrayList(validWrite, conditionalUpdate);
RpcQosOptions options = BaseFirestoreIT.rpcQosOptions.toBuilder().withBatchMaxCount(2).build();
PCollection<WriteFailure> writeFailurePCollection = testPipeline.apply(Create.of(writes)).apply(FirestoreIO.v1().write().batchWrite().withDeadLetterQueue().withRpcQosOptions(options).build());
PAssert.that(writeFailurePCollection).satisfies((writeFailures) -> {
Iterator<WriteFailure> iterator = writeFailures.iterator();
assertTrue(iterator.hasNext());
WriteFailure failure = iterator.next();
assertEquals(Code.FAILED_PRECONDITION, Code.forNumber(failure.getStatus().getCode()));
assertNotNull(failure.getWriteResult());
assertFalse(failure.getWriteResult().hasUpdateTime());
assertEquals(conditionalUpdate, failure.getWrite());
assertFalse(iterator.hasNext());
return null;
});
testPipeline.run(this.options);
ApiFuture<QuerySnapshot> actualDocsQuery = helper.getBaseDocument().collection(collectionId).orderBy("__name__").get();
QuerySnapshot querySnapshot = actualDocsQuery.get(10, TimeUnit.SECONDS);
List<QueryDocumentSnapshot> documents = querySnapshot.getDocuments();
List<KV<String, String>> actualDocumentIds = documents.stream().map(doc -> KV.of(doc.getId(), doc.getString("foo"))).collect(Collectors.toList());
List<KV<String, String>> expected = newArrayList(KV.of(docId, docId), KV.of(docId2, "baz"));
assertEquals(expected, actualDocumentIds);
}
use of org.apache.beam.sdk.io.gcp.firestore.FirestoreV1.WriteFailure in project beam by apache.
the class FirestoreV1FnBatchWriteWithDeadLetterQueueTest method enqueueingWritesValidateBytesSize.
@Override
@Test
public void enqueueingWritesValidateBytesSize() throws Exception {
int maxBytes = 50;
RpcQosOptions options = rpcQosOptions.toBuilder().withBatchMaxBytes(maxBytes).build();
when(ff.getFirestoreStub(any())).thenReturn(stub);
when(ff.getRpcQos(any())).thenReturn(FirestoreStatefulComponentFactory.INSTANCE.getRpcQos(options));
byte[] bytes = new byte[maxBytes + 1];
SecureRandom.getInstanceStrong().nextBytes(bytes);
byte[] base64Bytes = Base64.getEncoder().encode(bytes);
String base64String = Base64.getEncoder().encodeToString(bytes);
Value largeValue = Value.newBuilder().setStringValueBytes(ByteString.copyFrom(base64Bytes)).build();
// apply a doc transform that is too large
Write write1 = Write.newBuilder().setTransform(DocumentTransform.newBuilder().setDocument(String.format("doc-%03d", 2)).addFieldTransforms(FieldTransform.newBuilder().setAppendMissingElements(ArrayValue.newBuilder().addValues(largeValue)))).build();
// delete a doc that is too large
Write write2 = Write.newBuilder().setDelete(String.format("doc-%03d_%s", 3, base64String)).build();
// update a doc that is too large
Write write3 = Write.newBuilder().setUpdate(Document.newBuilder().setName(String.format("doc-%03d", 4)).putAllFields(ImmutableMap.of("foo", largeValue))).build();
BatchWriteFnWithDeadLetterQueue fn = getFn(clock, ff, options, metricsFixture.counterFactory, metricsFixture.distributionFactory);
fn.populateDisplayData(displayDataBuilder);
fn.setup();
fn.startBundle(startBundleContext);
ArgumentCaptor<WriteFailure> write1FailureCapture = ArgumentCaptor.forClass(WriteFailure.class);
doNothing().when(processContext).outputWithTimestamp(write1FailureCapture.capture(), any());
when(processContext.element()).thenReturn(write1);
fn.processElement(processContext, window);
WriteFailure failure = write1FailureCapture.getValue();
assertNotNull(failure);
String message = failure.getStatus().getMessage();
assertTrue(message.contains("TRANSFORM"));
assertTrue(message.contains("larger than configured max allowed bytes per batch"));
ArgumentCaptor<WriteFailure> write2FailureCapture = ArgumentCaptor.forClass(WriteFailure.class);
doNothing().when(processContext).outputWithTimestamp(write2FailureCapture.capture(), any());
when(processContext.element()).thenReturn(write2);
fn.processElement(processContext, window);
WriteFailure failure2 = write2FailureCapture.getValue();
assertNotNull(failure2);
String message2 = failure2.getStatus().getMessage();
assertTrue(message2.contains("DELETE"));
assertTrue(message2.contains("larger than configured max allowed bytes per batch"));
ArgumentCaptor<WriteFailure> write3FailureCapture = ArgumentCaptor.forClass(WriteFailure.class);
doNothing().when(processContext).outputWithTimestamp(write3FailureCapture.capture(), any());
when(processContext.element()).thenReturn(write3);
fn.processElement(processContext, window);
WriteFailure failure3 = write3FailureCapture.getValue();
assertNotNull(failure3);
String message3 = failure3.getStatus().getMessage();
assertTrue(message3.contains("UPDATE"));
assertTrue(message3.contains("larger than configured max allowed bytes per batch"));
assertEquals(0, fn.writes.size());
}
use of org.apache.beam.sdk.io.gcp.firestore.FirestoreV1.WriteFailure in project beam by apache.
the class FirestoreV1FnBatchWriteWithSummaryTest method nonRetryableWriteResultStopsAttempts.
@Test
public void nonRetryableWriteResultStopsAttempts() throws Exception {
Write write0 = FirestoreProtoHelpers.newWrite(0);
Write write1 = FirestoreProtoHelpers.newWrite(1).toBuilder().setCurrentDocument(Precondition.newBuilder().setExists(false).build()).build();
BatchWriteRequest expectedRequest1 = BatchWriteRequest.newBuilder().setDatabase("projects/testing-project/databases/(default)").addWrites(write0).addWrites(write1).build();
BatchWriteResponse response1 = BatchWriteResponse.newBuilder().addStatus(STATUS_OK).addWriteResults(WriteResult.newBuilder().setUpdateTime(Timestamp.newBuilder().setSeconds(1).build()).build()).addStatus(statusForCode(Code.ALREADY_EXISTS)).addWriteResults(WriteResult.newBuilder().build()).build();
RpcQosOptions options = rpcQosOptions.toBuilder().withMaxAttempts(1).withBatchMaxCount(2).build();
when(processContext.element()).thenReturn(write0, write1).thenThrow(new IllegalStateException("too many calls"));
when(rpcQos.newWriteAttempt(any())).thenReturn(attempt);
when(attempt.awaitSafeToProceed(any())).thenReturn(true);
when(attempt.<Write, Element<Write>>newFlushBuffer(any())).thenReturn(newFlushBuffer(options)).thenReturn(newFlushBuffer(options)).thenThrow(new IllegalStateException("too many attempt#newFlushBuffer calls"));
when(attempt.isCodeRetryable(Code.ALREADY_EXISTS)).thenReturn(false);
ArgumentCaptor<BatchWriteRequest> requestCaptor1 = ArgumentCaptor.forClass(BatchWriteRequest.class);
when(callable.call(requestCaptor1.capture())).thenReturn(response1);
BaseBatchWriteFn<WriteSuccessSummary> fn = new BatchWriteFnWithSummary(clock, ff, options, CounterFactory.DEFAULT);
fn.setup();
fn.startBundle(startBundleContext);
// write0
fn.processElement(processContext, window);
try {
// write1
fn.processElement(processContext, window);
fail("expected an exception when trying to apply a write with a failed precondition");
} catch (FailedWritesException e) {
List<WriteFailure> writeFailures = e.getWriteFailures();
assertEquals(1, writeFailures.size());
WriteFailure failure = writeFailures.get(0);
assertEquals(Code.ALREADY_EXISTS.getNumber(), failure.getStatus().getCode());
assertEquals(write1, failure.getWrite());
assertEquals(WriteResult.getDefaultInstance(), failure.getWriteResult());
}
assertEquals(expectedRequest1, requestCaptor1.getValue());
List<WriteElement> actualWrites = new ArrayList<>(fn.writes);
Instant flush1Attempt1Begin = Instant.ofEpochMilli(1);
Instant flush1Attempt1RpcStart = Instant.ofEpochMilli(2);
Instant flush1Attempt1RpcEnd = Instant.ofEpochMilli(3);
assertTrue(actualWrites.isEmpty());
fn.finishBundle(finishBundleContext);
verify(attempt, times(1)).newFlushBuffer(flush1Attempt1Begin);
verify(attempt, times(1)).recordRequestStart(flush1Attempt1RpcStart, 2);
verify(attempt, times(1)).recordWriteCounts(flush1Attempt1RpcEnd, 1, 1);
verify(attempt, never()).completeSuccess();
verify(callable, times(1)).call(any());
verifyNoMoreInteractions(callable);
}
use of org.apache.beam.sdk.io.gcp.firestore.FirestoreV1.WriteFailure in project beam by apache.
the class FirestoreV1FnBatchWriteWithSummaryTest method enqueueingWritesValidateBytesSize.
@Override
@Test
public void enqueueingWritesValidateBytesSize() throws Exception {
int maxBytes = 50;
RpcQosOptions options = rpcQosOptions.toBuilder().withBatchMaxBytes(maxBytes).build();
when(ff.getFirestoreStub(any())).thenReturn(stub);
when(ff.getRpcQos(any())).thenReturn(FirestoreStatefulComponentFactory.INSTANCE.getRpcQos(options));
byte[] bytes = new byte[maxBytes + 1];
SecureRandom.getInstanceStrong().nextBytes(bytes);
byte[] base64Bytes = Base64.getEncoder().encode(bytes);
String base64String = Base64.getEncoder().encodeToString(bytes);
Value largeValue = Value.newBuilder().setStringValueBytes(ByteString.copyFrom(base64Bytes)).build();
// apply a doc transform that is too large
Write write1 = Write.newBuilder().setTransform(DocumentTransform.newBuilder().setDocument(String.format("doc-%03d", 2)).addFieldTransforms(FieldTransform.newBuilder().setAppendMissingElements(ArrayValue.newBuilder().addValues(largeValue)))).build();
// delete a doc that is too large
Write write2 = Write.newBuilder().setDelete(String.format("doc-%03d_%s", 3, base64String)).build();
// update a doc that is too large
Write write3 = Write.newBuilder().setUpdate(Document.newBuilder().setName(String.format("doc-%03d", 4)).putAllFields(ImmutableMap.of("foo", largeValue))).build();
BatchWriteFnWithSummary fn = getFn(clock, ff, options, metricsFixture.counterFactory, metricsFixture.distributionFactory);
fn.populateDisplayData(displayDataBuilder);
fn.setup();
fn.startBundle(startBundleContext);
try {
when(processContext.element()).thenReturn(write1);
fn.processElement(processContext, window);
fail("expected validation error");
} catch (FailedWritesException e) {
WriteFailure failure = e.getWriteFailures().get(0);
assertNotNull(failure);
String message = failure.getStatus().getMessage();
assertTrue(message.contains("TRANSFORM"));
assertTrue(message.contains("larger than configured max allowed bytes per batch"));
}
try {
when(processContext.element()).thenReturn(write2);
fn.processElement(processContext, window);
fail("expected validation error");
} catch (FailedWritesException e) {
WriteFailure failure = e.getWriteFailures().get(0);
assertNotNull(failure);
String message = failure.getStatus().getMessage();
assertTrue(message.contains("DELETE"));
assertTrue(message.contains("larger than configured max allowed bytes per batch"));
}
try {
when(processContext.element()).thenReturn(write3);
fn.processElement(processContext, window);
fail("expected validation error");
} catch (FailedWritesException e) {
WriteFailure failure = e.getWriteFailures().get(0);
assertNotNull(failure);
String message = failure.getStatus().getMessage();
assertTrue(message.contains("UPDATE"));
assertTrue(message.contains("larger than configured max allowed bytes per batch"));
}
assertEquals(0, fn.writes.size());
}
Aggregations