use of com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamResponse in project beam by apache.
the class FakeDatasetService method finalizeWriteStream.
@Override
public ApiFuture<FinalizeWriteStreamResponse> finalizeWriteStream(String streamName) {
synchronized (tables) {
Stream stream = writeStreams.get(streamName);
if (stream == null) {
throw new RuntimeException("No such stream: " + streamName);
}
long numRows = stream.finalizeStream();
return ApiFutures.immediateFuture(FinalizeWriteStreamResponse.newBuilder().setRowCount(numRows).build());
}
}
use of com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamResponse in project beam by apache.
the class StorageApiFinalizeWritesDoFn method process.
@ProcessElement
@SuppressWarnings({ "nullness" })
public void process(PipelineOptions pipelineOptions, @Element KV<String, String> element) throws Exception {
String tableId = element.getKey();
String streamId = element.getValue();
DatasetService datasetService = getDatasetService(pipelineOptions);
RetryManager<FinalizeWriteStreamResponse, Context<FinalizeWriteStreamResponse>> retryManager = new RetryManager<>(Duration.standardSeconds(1), Duration.standardMinutes(1), 3);
retryManager.addOperation(c -> {
finalizeOperationsSent.inc();
return datasetService.finalizeWriteStream(streamId);
}, contexts -> {
LOG.error("Finalize of stream " + streamId + " failed with " + Iterables.getFirst(contexts, null).getError());
finalizeOperationsFailed.inc();
return RetryType.RETRY_ALL_OPERATIONS;
}, c -> {
LOG.info("Finalize of stream " + streamId + " finished with " + c.getResult());
finalizeOperationsSucceeded.inc();
commitStreams.computeIfAbsent(tableId, d -> Lists.newArrayList()).add(streamId);
}, new Context<>());
retryManager.run(true);
}
use of com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamResponse in project beam by apache.
the class StorageApiFlushAndFinalizeDoFn method process.
@SuppressWarnings({ "nullness" })
@ProcessElement
public void process(PipelineOptions pipelineOptions, @Element KV<String, Operation> element) throws Exception {
final String streamId = element.getKey();
final Operation operation = element.getValue();
final DatasetService datasetService = getDatasetService(pipelineOptions);
// Flush the stream. If the flush offset < 0, that means we only need to finalize.
long offset = operation.flushOffset;
if (offset >= 0) {
Instant now = Instant.now();
RetryManager<FlushRowsResponse, Context<FlushRowsResponse>> retryManager = new RetryManager<>(Duration.standardSeconds(1), Duration.standardMinutes(1), 3);
retryManager.addOperation(// runOperation
c -> {
try {
flushOperationsSent.inc();
return datasetService.flush(streamId, offset);
} catch (Exception e) {
throw new RuntimeException(e);
}
}, // onError
contexts -> {
Throwable error = Iterables.getFirst(contexts, null).getError();
LOG.warn("Flush of stream " + streamId + " to offset " + offset + " failed with " + error);
flushOperationsFailed.inc();
if (error instanceof ApiException) {
Code statusCode = ((ApiException) error).getStatusCode().getCode();
if (statusCode.equals(Code.ALREADY_EXISTS)) {
flushOperationsAlreadyExists.inc();
// Implies that we have already flushed up to this point, so don't retry.
return RetryType.DONT_RETRY;
}
if (statusCode.equals(Code.INVALID_ARGUMENT)) {
flushOperationsInvalidArgument.inc();
// TODO: Storage API should provide a more-specific way of identifying this failure.
return RetryType.DONT_RETRY;
}
}
return RetryType.RETRY_ALL_OPERATIONS;
}, // onSuccess
c -> {
flushOperationsSucceeded.inc();
}, new Context<>());
retryManager.run(true);
java.time.Duration timeElapsed = java.time.Duration.between(now, Instant.now());
flushLatencyDistribution.update(timeElapsed.toMillis());
}
// or we would end up with duplicates.
if (operation.finalizeStream) {
RetryManager<FinalizeWriteStreamResponse, Context<FinalizeWriteStreamResponse>> retryManager = new RetryManager<>(Duration.standardSeconds(1), Duration.standardMinutes(1), 3);
retryManager.addOperation(c -> {
finalizeOperationsSent.inc();
return datasetService.finalizeWriteStream(streamId);
}, contexts -> {
LOG.warn("Finalize of stream " + streamId + " failed with " + Iterables.getFirst(contexts, null).getError());
finalizeOperationsFailed.inc();
return RetryType.RETRY_ALL_OPERATIONS;
}, r -> {
finalizeOperationsSucceeded.inc();
}, new Context<>());
retryManager.run(true);
}
}
Aggregations