use of com.google.api.gax.rpc.AbortedException in project jade-data-repo by DataBiosphere.
the class FireStoreDirectoryDao method lookupByFileId.
// Returns null if not found
private QueryDocumentSnapshot lookupByFileId(Firestore firestore, String collectionId, String fileId, Transaction xn) {
try {
CollectionReference datasetCollection = firestore.collection(collectionId);
Query query = datasetCollection.whereEqualTo("fileId", fileId);
ApiFuture<QuerySnapshot> querySnapshot = xn.get(query);
List<QueryDocumentSnapshot> documents = querySnapshot.get().getDocuments();
if (documents.size() == 0) {
return null;
}
if (documents.size() != 1) {
// TODO: We have seen duplicate documents as a result of concurrency issues.
// The query.get() does not appear to be reliably transactional. That may
// be a FireStore bug. Regardless, we treat this as a retryable situation.
// It *might* be corruption bug on our side. If so, the retry will consistently
// fail and eventually give up. When debugging that case, one will have to understand
// the purpose of this logic.
logger.warn("Found too many entries: " + documents.size() + "; for file: " + collectionId + "/" + fileId);
throw new FileSystemAbortTransactionException("lookupByFileId found too many entries");
}
return documents.get(0);
} catch (InterruptedException ex) {
Thread.currentThread().interrupt();
throw new FileSystemExecutionException("lookupByFileId - execution interrupted", ex);
} catch (AbortedException | ExecutionException ex) {
throw handleExecutionException("lookupByFileId", ex);
}
}
use of com.google.api.gax.rpc.AbortedException in project jade-data-repo by DataBiosphere.
the class FireStoreUtils method handleExecutionException.
RuntimeException handleExecutionException(ExecutionException ex, String op) {
// The ExecutionException wraps the underlying exception caught in the FireStore Future, so we need
// to examine the properties of the cause to understand what to do.
// Possible outcomes:
// - FileSystemAbortTransactionException for retryable firestore exceptions to ask the step to retry
// - FileSystemExecutionException for other firestore exceptions
// - RuntimeExceptions to expose other unexpected exceptions
// - FileSystemExecutionException to wrap non-Runtime (oddball) exceptions
Throwable throwable = ex.getCause();
while (throwable instanceof ExecutionException) {
throwable = throwable.getCause();
}
if (throwable instanceof AbortedException) {
AbortedException aex = (AbortedException) throwable;
// TODO: in general, log + rethrow is bad form. For now, I want to make sure we see these in
// the log as they happen. Once we are comfortable that retry is working properly, we can
// rely on the Stairway debug logging as needed.
String msg = "Retrying aborted exception: " + aex;
logger.info(msg);
return new FileSystemAbortTransactionException(msg, aex);
}
if (throwable instanceof FirestoreException) {
FirestoreException fex = (FirestoreException) throwable;
String msg = "Retrying firestore exception: " + fex;
logger.info(msg);
return new FileSystemAbortTransactionException(msg, fex);
}
if (throwable instanceof RuntimeException) {
return (RuntimeException) throwable;
}
return new FileSystemExecutionException(op + " - execution exception wrapping: " + throwable, throwable);
}
use of com.google.api.gax.rpc.AbortedException in project java-spanner by googleapis.
the class PartitionedDmlTransaction method executeStreamingPartitionedUpdate.
/**
* Executes the {@link Statement} using a partitioned dml transaction with automatic retry if the
* transaction was aborted. The update method uses the ExecuteStreamingSql RPC to execute the
* statement, and will retry the stream if an {@link UnavailableException} is thrown, using the
* last seen resume token if the server returns any.
*/
long executeStreamingPartitionedUpdate(final Statement statement, final Duration timeout, final UpdateOption... updateOptions) {
checkState(isValid, "Partitioned DML has been invalidated by a new operation on the session");
LOGGER.log(Level.FINER, "Starting PartitionedUpdate statement");
ByteString resumeToken = ByteString.EMPTY;
boolean foundStats = false;
long updateCount = 0L;
Stopwatch stopwatch = Stopwatch.createStarted(ticker);
Options options = Options.fromUpdateOptions(updateOptions);
try {
ExecuteSqlRequest request = newTransactionRequestFrom(statement, options);
while (true) {
final Duration remainingTimeout = tryUpdateTimeout(timeout, stopwatch);
try {
ServerStream<PartialResultSet> stream = rpc.executeStreamingPartitionedDml(request, session.getOptions(), remainingTimeout);
for (PartialResultSet rs : stream) {
if (rs.getResumeToken() != null && !rs.getResumeToken().isEmpty()) {
resumeToken = rs.getResumeToken();
}
if (rs.hasStats()) {
foundStats = true;
updateCount += rs.getStats().getRowCountLowerBound();
}
}
break;
} catch (UnavailableException e) {
LOGGER.log(Level.FINER, "Retrying PartitionedDml transaction after UnavailableException", e);
request = resumeOrRestartRequest(resumeToken, statement, request, options);
} catch (InternalException e) {
if (!isRetryableInternalErrorPredicate.apply(e)) {
throw e;
}
LOGGER.log(Level.FINER, "Retrying PartitionedDml transaction after InternalException - EOS", e);
request = resumeOrRestartRequest(resumeToken, statement, request, options);
} catch (AbortedException e) {
LOGGER.log(Level.FINER, "Retrying PartitionedDml transaction after AbortedException", e);
resumeToken = ByteString.EMPTY;
foundStats = false;
updateCount = 0L;
request = newTransactionRequestFrom(statement, options);
}
}
if (!foundStats) {
throw SpannerExceptionFactory.newSpannerException(ErrorCode.INVALID_ARGUMENT, "Partitioned DML response missing stats possibly due to non-DML statement as input");
}
LOGGER.log(Level.FINER, "Finished PartitionedUpdate statement");
return updateCount;
} catch (Exception e) {
throw SpannerExceptionFactory.newSpannerException(e);
}
}
use of com.google.api.gax.rpc.AbortedException in project java-spanner by googleapis.
the class PartitionedDmlTransactionTest method testExecuteStreamingPartitionedUpdateAborted.
@Test
public void testExecuteStreamingPartitionedUpdateAborted() {
ResultSetStats stats = ResultSetStats.newBuilder().setRowCountLowerBound(1000L).build();
PartialResultSet p1 = PartialResultSet.newBuilder().setResumeToken(resumeToken).build();
PartialResultSet p2 = PartialResultSet.newBuilder().setStats(stats).build();
ServerStream<PartialResultSet> stream1 = mock(ServerStream.class);
Iterator<PartialResultSet> iterator = mock(Iterator.class);
when(iterator.hasNext()).thenReturn(true, true, false);
when(iterator.next()).thenReturn(p1).thenThrow(new AbortedException("transaction aborted", null, GrpcStatusCode.of(Code.ABORTED), true));
when(stream1.iterator()).thenReturn(iterator);
ServerStream<PartialResultSet> stream2 = mock(ServerStream.class);
when(stream2.iterator()).thenReturn(ImmutableList.of(p1, p2).iterator());
when(rpc.executeStreamingPartitionedDml(any(ExecuteSqlRequest.class), anyMap(), any(Duration.class))).thenReturn(stream1, stream2);
long count = tx.executeStreamingPartitionedUpdate(Statement.of(sql), Duration.ofMinutes(10));
assertThat(count).isEqualTo(1000L);
verify(rpc, times(2)).beginTransaction(any(BeginTransactionRequest.class), anyMap());
verify(rpc, times(2)).executeStreamingPartitionedDml(Mockito.eq(executeRequestWithoutResumeToken), anyMap(), any(Duration.class));
}
use of com.google.api.gax.rpc.AbortedException in project java-spanner by googleapis.
the class PartitionedDmlTransactionTest method testExecuteStreamingPartitionedUpdateMultipleAbortsUntilDeadlineExceeded.
@Test
public void testExecuteStreamingPartitionedUpdateMultipleAbortsUntilDeadlineExceeded() {
PartialResultSet p1 = PartialResultSet.newBuilder().setResumeToken(resumeToken).build();
ServerStream<PartialResultSet> stream1 = mock(ServerStream.class);
Iterator<PartialResultSet> iterator = mock(Iterator.class);
when(iterator.hasNext()).thenReturn(true);
when(iterator.next()).thenReturn(p1).thenThrow(new AbortedException("transaction aborted", null, GrpcStatusCode.of(Code.ABORTED), true));
when(stream1.iterator()).thenReturn(iterator);
when(rpc.executeStreamingPartitionedDml(Mockito.eq(executeRequestWithoutResumeToken), anyMap(), any(Duration.class))).thenReturn(stream1);
when(ticker.read()).thenAnswer(new Answer<Long>() {
long ticks = 0L;
@Override
public Long answer(InvocationOnMock invocation) {
return TimeUnit.NANOSECONDS.convert(++ticks, TimeUnit.MINUTES);
}
});
SpannerException e = assertThrows(SpannerException.class, () -> tx.executeStreamingPartitionedUpdate(Statement.of(sql), Duration.ofMinutes(10)));
assertEquals(ErrorCode.DEADLINE_EXCEEDED, e.getErrorCode());
// It should start a transaction exactly 10 times (10 ticks == 10 minutes).
verify(rpc, times(10)).beginTransaction(any(BeginTransactionRequest.class), anyMap());
// The last transaction should timeout before it starts the actual statement execution, which
// means that the execute method is only executed 9 times.
verify(rpc, times(9)).executeStreamingPartitionedDml(Mockito.eq(executeRequestWithoutResumeToken), anyMap(), any(Duration.class));
}
Aggregations