use of org.apache.beam.sdk.io.gcp.firestore.RpcQosOptions in project beam by apache.
the class BaseFirestoreIT method listCollections.
@Test
@TestDataLayoutHint(DataLayout.Deep)
public final void listCollections() throws Exception {
// verification and cleanup of nested collections is much slower because each document
// requires an rpc to find its collections, instead of using the usual size, use 20
// to keep the test quick
List<String> collectionIds = IntStream.rangeClosed(1, 20).mapToObj(i -> helper.colId()).collect(Collectors.toList());
ApiFutures.transform(ApiFutures.allAsList(chunkUpDocIds(collectionIds).map(chunk -> {
WriteBatch batch = helper.getFs().batch();
chunk.stream().map(col -> helper.getBaseDocument().collection(col).document()).forEach(ref -> batch.set(ref, ImmutableMap.of("foo", "bar")));
return batch.commit();
}).collect(Collectors.toList())), FirestoreTestingHelper.flattenListList(), MoreExecutors.directExecutor()).get(10, TimeUnit.SECONDS);
PCollection<String> actualCollectionIds = testPipeline.apply(Create.of("")).apply(getListCollectionIdsPTransform(testName.getMethodName())).apply(FirestoreIO.v1().read().listCollectionIds().withRpcQosOptions(rpcQosOptions).build());
PAssert.that(actualCollectionIds).containsInAnyOrder(collectionIds);
testPipeline.run(options);
}
use of org.apache.beam.sdk.io.gcp.firestore.RpcQosOptions in project beam by apache.
the class FirestoreV1IT method batchWrite_partialFailureOutputsToDeadLetterQueue.
@Test
public void batchWrite_partialFailureOutputsToDeadLetterQueue() throws InterruptedException, ExecutionException, TimeoutException {
String collectionId = "a";
String docId = helper.docId();
Write validWrite = Write.newBuilder().setUpdate(Document.newBuilder().setName(docPath(helper.getBaseDocumentPath(), collectionId, docId)).putFields("foo", Value.newBuilder().setStringValue(docId).build())).build();
long millis = System.currentTimeMillis();
Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000).setNanos((int) ((millis % 1000) * 1000000)).build();
String docId2 = helper.docId();
helper.getBaseDocument().collection(collectionId).document(docId2).create(ImmutableMap.of("foo", "baz")).get(10, TimeUnit.SECONDS);
// this will fail because we're setting a updateTime precondition to before it was created
Write conditionalUpdate = Write.newBuilder().setUpdate(Document.newBuilder().setName(docPath(helper.getBaseDocumentPath(), collectionId, docId2)).putFields("foo", Value.newBuilder().setStringValue(docId).build())).setCurrentDocument(Precondition.newBuilder().setUpdateTime(timestamp)).build();
List<Write> writes = newArrayList(validWrite, conditionalUpdate);
RpcQosOptions options = BaseFirestoreIT.rpcQosOptions.toBuilder().withBatchMaxCount(2).build();
PCollection<WriteFailure> writeFailurePCollection = testPipeline.apply(Create.of(writes)).apply(FirestoreIO.v1().write().batchWrite().withDeadLetterQueue().withRpcQosOptions(options).build());
PAssert.that(writeFailurePCollection).satisfies((writeFailures) -> {
Iterator<WriteFailure> iterator = writeFailures.iterator();
assertTrue(iterator.hasNext());
WriteFailure failure = iterator.next();
assertEquals(Code.FAILED_PRECONDITION, Code.forNumber(failure.getStatus().getCode()));
assertNotNull(failure.getWriteResult());
assertFalse(failure.getWriteResult().hasUpdateTime());
assertEquals(conditionalUpdate, failure.getWrite());
assertFalse(iterator.hasNext());
return null;
});
testPipeline.run(this.options);
ApiFuture<QuerySnapshot> actualDocsQuery = helper.getBaseDocument().collection(collectionId).orderBy("__name__").get();
QuerySnapshot querySnapshot = actualDocsQuery.get(10, TimeUnit.SECONDS);
List<QueryDocumentSnapshot> documents = querySnapshot.getDocuments();
List<KV<String, String>> actualDocumentIds = documents.stream().map(doc -> KV.of(doc.getId(), doc.getString("foo"))).collect(Collectors.toList());
List<KV<String, String>> expected = newArrayList(KV.of(docId, docId), KV.of(docId2, "baz"));
assertEquals(expected, actualDocumentIds);
}
Aggregations