use of bio.terra.service.filedata.exception.FileSystemExecutionException in project jade-data-repo by DataBiosphere.
the class FireStoreDirectoryDao method storeDirectoryEntry.
// Non-transactional store of a directory entry
private void storeDirectoryEntry(Firestore firestore, String collectionId, FireStoreDirectoryEntry entry) {
try {
DocumentReference newRef = getDocRef(firestore, collectionId, entry);
ApiFuture<DocumentSnapshot> newSnapFuture = newRef.get();
DocumentSnapshot newSnap = newSnapFuture.get();
if (!newSnap.exists()) {
ApiFuture<WriteResult> writeFuture = newRef.set(entry);
writeFuture.get();
}
} catch (InterruptedException ex) {
Thread.currentThread().interrupt();
throw new FileSystemExecutionException("storeDirectoryEntry - execution interrupted", ex);
} catch (AbortedException | ExecutionException ex) {
throw handleExecutionException("storeDirectoryEntry", ex);
}
}
use of bio.terra.service.filedata.exception.FileSystemExecutionException in project jade-data-repo by DataBiosphere.
the class FireStoreDirectoryDao method lookupByPathNoXn.
// Non-transactional lookup of an entry
private DocumentSnapshot lookupByPathNoXn(Firestore firestore, String collectionId, String lookupPath) {
try {
DocumentReference docRef = firestore.collection(collectionId).document(encodePathAsFirestoreDocumentName(lookupPath));
ApiFuture<DocumentSnapshot> docSnapFuture = docRef.get();
return docSnapFuture.get();
} catch (InterruptedException ex) {
Thread.currentThread().interrupt();
throw new FileSystemExecutionException("lookupByPathNoXn - execution interrupted", ex);
} catch (AbortedException | ExecutionException ex) {
throw handleExecutionException("lookupByPathNoXn", ex);
}
}
use of bio.terra.service.filedata.exception.FileSystemExecutionException in project jade-data-repo by DataBiosphere.
the class FireStoreDirectoryDao method lookupByFileId.
// Returns null if not found
private QueryDocumentSnapshot lookupByFileId(Firestore firestore, String collectionId, String fileId, Transaction xn) {
try {
CollectionReference datasetCollection = firestore.collection(collectionId);
Query query = datasetCollection.whereEqualTo("fileId", fileId);
ApiFuture<QuerySnapshot> querySnapshot = xn.get(query);
List<QueryDocumentSnapshot> documents = querySnapshot.get().getDocuments();
if (documents.size() == 0) {
return null;
}
if (documents.size() != 1) {
// TODO: We have seen duplicate documents as a result of concurrency issues.
// The query.get() does not appear to be reliably transactional. That may
// be a FireStore bug. Regardless, we treat this as a retryable situation.
// It *might* be corruption bug on our side. If so, the retry will consistently
// fail and eventually give up. When debugging that case, one will have to understand
// the purpose of this logic.
logger.warn("Found too many entries: " + documents.size() + "; for file: " + collectionId + "/" + fileId);
throw new FileSystemAbortTransactionException("lookupByFileId found too many entries");
}
return documents.get(0);
} catch (InterruptedException ex) {
Thread.currentThread().interrupt();
throw new FileSystemExecutionException("lookupByFileId - execution interrupted", ex);
} catch (AbortedException | ExecutionException ex) {
throw handleExecutionException("lookupByFileId", ex);
}
}
use of bio.terra.service.filedata.exception.FileSystemExecutionException in project jade-data-repo by DataBiosphere.
the class FireStoreUtils method handleExecutionException.
RuntimeException handleExecutionException(ExecutionException ex, String op) {
// The ExecutionException wraps the underlying exception caught in the FireStore Future, so we need
// to examine the properties of the cause to understand what to do.
// Possible outcomes:
// - FileSystemAbortTransactionException for retryable firestore exceptions to ask the step to retry
// - FileSystemExecutionException for other firestore exceptions
// - RuntimeExceptions to expose other unexpected exceptions
// - FileSystemExecutionException to wrap non-Runtime (oddball) exceptions
Throwable throwable = ex.getCause();
while (throwable instanceof ExecutionException) {
throwable = throwable.getCause();
}
if (throwable instanceof AbortedException) {
AbortedException aex = (AbortedException) throwable;
// TODO: in general, log + rethrow is bad form. For now, I want to make sure we see these in
// the log as they happen. Once we are comfortable that retry is working properly, we can
// rely on the Stairway debug logging as needed.
String msg = "Retrying aborted exception: " + aex;
logger.info(msg);
return new FileSystemAbortTransactionException(msg, aex);
}
if (throwable instanceof FirestoreException) {
FirestoreException fex = (FirestoreException) throwable;
String msg = "Retrying firestore exception: " + fex;
logger.info(msg);
return new FileSystemAbortTransactionException(msg, fex);
}
if (throwable instanceof RuntimeException) {
return (RuntimeException) throwable;
}
return new FileSystemExecutionException(op + " - execution exception wrapping: " + throwable, throwable);
}
use of bio.terra.service.filedata.exception.FileSystemExecutionException in project jade-data-repo by DataBiosphere.
the class FireStoreUtils method scanCollectionObjects.
/**
* This code is a bit ugly, but here is why...
* (from https://cloud.google.com/firestore/docs/solutions/delete-collections)
* <ul>
* <li>There is no operation that atomically deletes a collection.</li>
* <li>Deleting a document does not delete the documents in its subcollections.</li>
* <li>If your documents have dynamic subcollections, (we don't do this!)
* it can be hard to know what data to delete for a given path.</li>
* <li>Deleting a collection of more than 500 documents requires multiple batched
* write operations or hundreds of single deletes.</li>
* </ul>
* <p>
* Our objects are small, so I think we can use the maximum batch size without
* concern for using too much memory.
*/
void scanCollectionObjects(Firestore firestore, String collectionId, int batchSize, Consumer<QueryDocumentSnapshot> func) {
CollectionReference datasetCollection = firestore.collection(collectionId);
try {
int batchCount = 0;
int visited;
do {
visited = 0;
ApiFuture<QuerySnapshot> future = datasetCollection.limit(batchSize).get();
List<QueryDocumentSnapshot> documents = future.get().getDocuments();
batchCount++;
logger.info("Visiting batch " + batchCount + " of ~" + batchSize + " documents");
for (QueryDocumentSnapshot document : documents) {
func.accept(document);
visited++;
}
} while (visited >= batchSize);
} catch (InterruptedException ex) {
Thread.currentThread().interrupt();
throw new FileSystemExecutionException("scanning collection - execution interrupted", ex);
} catch (ExecutionException ex) {
throw new FileSystemExecutionException("scanning collection - execution exception", ex);
}
}
Aggregations