use of com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsResponse in project beam by apache.
the class FakeDatasetService method commitWriteStreams.
@Override
public ApiFuture<BatchCommitWriteStreamsResponse> commitWriteStreams(String tableUrn, Iterable<String> writeStreamNames) {
synchronized (tables) {
for (String streamName : writeStreamNames) {
Stream stream = writeStreams.get(streamName);
if (stream == null) {
throw new RuntimeException("No such stream: " + streamName);
}
stream.commit();
}
}
return ApiFutures.immediateFuture(BatchCommitWriteStreamsResponse.newBuilder().setCommitTime(Timestamp.newBuilder().build()).build());
}
use of com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsResponse in project beam by apache.
the class StorageApiFinalizeWritesDoFn method finishBundle.
@FinishBundle
@SuppressWarnings({ "nullness" })
public void finishBundle(PipelineOptions pipelineOptions) throws Exception {
DatasetService datasetService = getDatasetService(pipelineOptions);
for (Map.Entry<String, Collection<String>> entry : commitStreams.entrySet()) {
final String tableId = entry.getKey();
final Collection<String> streamNames = entry.getValue();
final Set<String> alreadyCommittedStreams = Sets.newHashSet();
RetryManager<BatchCommitWriteStreamsResponse, Context<BatchCommitWriteStreamsResponse>> retryManager = new RetryManager<>(Duration.standardSeconds(1), Duration.standardMinutes(1), 3);
retryManager.addOperation(c -> {
Iterable<String> streamsToCommit = Iterables.filter(streamNames, s -> !alreadyCommittedStreams.contains(s));
batchCommitOperationsSent.inc();
return datasetService.commitWriteStreams(tableId, streamsToCommit);
}, contexts -> {
LOG.error("BatchCommit failed. tableId " + tableId + " streamNames " + streamNames + " error: " + Iterables.getFirst(contexts, null).getError());
batchCommitOperationsFailed.inc();
return RetryType.RETRY_ALL_OPERATIONS;
}, c -> {
LOG.info("BatchCommit succeeded for tableId " + tableId + " response " + c.getResult());
batchCommitOperationsSucceeded.inc();
}, response -> {
if (!response.hasCommitTime()) {
for (StorageError storageError : response.getStreamErrorsList()) {
if (storageError.getCode() == StorageErrorCode.STREAM_ALREADY_COMMITTED) {
// Make sure that we don't retry any streams that are already committed.
alreadyCommittedStreams.add(storageError.getEntity());
}
}
Iterable<String> streamsToCommit = Iterables.filter(streamNames, s -> !alreadyCommittedStreams.contains(s));
// retry.
return Iterables.isEmpty(streamsToCommit);
}
return true;
}, new Context<>());
retryManager.run(true);
}
}
Aggregations