use of com.google.cloud.vision.v1p4beta1.InputConfig in project java-translate by googleapis.
the class BatchTranslateTextWithGlossary method batchTranslateTextWithGlossary.
// Batch Translate Text with a Glossary.
public static void batchTranslateTextWithGlossary(String projectId, String sourceLanguage, String targetLanguage, String inputUri, String outputUri, String glossaryId) throws IOException, ExecutionException, InterruptedException, TimeoutException {
// the "close" method on the client to safely clean up any remaining background resources.
try (TranslationServiceClient client = TranslationServiceClient.create()) {
// Supported Locations: `global`, [glossary location], or [model location]
// Glossaries must be hosted in `us-central1`
// Custom Models must use the same location as your model. (us-central1)
String location = "us-central1";
LocationName parent = LocationName.of(projectId, location);
// Configure the source of the file from a GCS bucket
GcsSource gcsSource = GcsSource.newBuilder().setInputUri(inputUri).build();
// Supported Mime Types: https://cloud.google.com/translate/docs/supported-formats
InputConfig inputConfig = InputConfig.newBuilder().setGcsSource(gcsSource).setMimeType("text/plain").build();
// Configure where to store the output in a GCS bucket
GcsDestination gcsDestination = GcsDestination.newBuilder().setOutputUriPrefix(outputUri).build();
OutputConfig outputConfig = OutputConfig.newBuilder().setGcsDestination(gcsDestination).build();
// Configure the glossary used in the request
GlossaryName glossaryName = GlossaryName.of(projectId, location, glossaryId);
TranslateTextGlossaryConfig glossaryConfig = TranslateTextGlossaryConfig.newBuilder().setGlossary(glossaryName.toString()).build();
// Build the request that will be sent to the API
BatchTranslateTextRequest request = BatchTranslateTextRequest.newBuilder().setParent(parent.toString()).setSourceLanguageCode(sourceLanguage).addTargetLanguageCodes(targetLanguage).addInputConfigs(inputConfig).setOutputConfig(outputConfig).putGlossaries(targetLanguage, glossaryConfig).build();
// Start an asynchronous request
OperationFuture<BatchTranslateResponse, BatchTranslateMetadata> future = client.batchTranslateTextAsync(request);
System.out.println("Waiting for operation to complete...");
// random number between 300 - 450 (maximum allowed seconds)
long randomNumber = ThreadLocalRandom.current().nextInt(450, 600);
BatchTranslateResponse response = future.get(randomNumber, TimeUnit.SECONDS);
// Display the translation for each input text provided
System.out.printf("Total Characters: %s\n", response.getTotalCharacters());
System.out.printf("Translated Characters: %s\n", response.getTranslatedCharacters());
}
}
use of com.google.cloud.vision.v1p4beta1.InputConfig in project java-automl by googleapis.
the class ImportDataset method importDataset.
// Import a dataset
static void importDataset(String projectId, String datasetId, String path) throws IOException, ExecutionException, InterruptedException, TimeoutException {
Duration totalTimeout = Duration.ofMinutes(45);
RetrySettings retrySettings = RetrySettings.newBuilder().setTotalTimeout(totalTimeout).build();
AutoMlSettings.Builder builder = AutoMlSettings.newBuilder();
builder.importDataSettings().setRetrySettings(retrySettings).build();
AutoMlSettings settings = builder.build();
// the "close" method on the client to safely clean up any remaining background resources.
try (AutoMlClient client = AutoMlClient.create(settings)) {
// Get the complete path of the dataset.
DatasetName datasetFullId = DatasetName.of(projectId, "us-central1", datasetId);
// Get multiple Google Cloud Storage URIs to import data from
GcsSource gcsSource = GcsSource.newBuilder().addAllInputUris(Arrays.asList(path.split(","))).build();
// Import data from the input URI
InputConfig inputConfig = InputConfig.newBuilder().setGcsSource(gcsSource).build();
System.out.println("Processing import...");
// Start the import job
OperationFuture<Empty, OperationMetadata> operation = client.importDataAsync(datasetFullId, inputConfig);
System.out.format("Operation name: %s%n", operation.getName());
// If you want to wait for the operation to finish, adjust the timeout appropriately. The
// operation will still run if you choose not to wait for it to complete. You can check the
// status of your operation using the operation's name.
Empty response = operation.get(45, TimeUnit.MINUTES);
System.out.format("Dataset imported. %s%n", response);
} catch (TimeoutException e) {
System.out.println("The operation's polling period was not long enough.");
System.out.println("You can use the Operation's name to get the current status.");
System.out.println("The import job is still running and will complete as expected.");
throw e;
}
}
use of com.google.cloud.vision.v1p4beta1.InputConfig in project java-automl by googleapis.
the class ImportDataset method importDataset.
// Import a dataset
static void importDataset(String projectId, String datasetId, String path) throws IOException, ExecutionException, InterruptedException, TimeoutException {
// the "close" method on the client to safely clean up any remaining background resources.
try (AutoMlClient client = AutoMlClient.create()) {
// Get the complete path of the dataset.
DatasetName datasetFullId = DatasetName.of(projectId, "us-central1", datasetId);
// Get multiple Google Cloud Storage URIs to import data from
GcsSource gcsSource = GcsSource.newBuilder().addAllInputUris(Arrays.asList(path.split(","))).build();
// Import data from the input URI
InputConfig inputConfig = InputConfig.newBuilder().setGcsSource(gcsSource).build();
System.out.println("Processing import...");
// Start the import job
OperationFuture<Empty, OperationMetadata> operation = client.importDataAsync(datasetFullId, inputConfig);
System.out.format("Operation name: %s%n", operation.getName());
// If you want to wait for the operation to finish, adjust the timeout appropriately. The
// operation will still run if you choose not to wait for it to complete. You can check the
// status of your operation using the operation's name.
Empty response = operation.get(45, TimeUnit.MINUTES);
System.out.format("Dataset imported. %s%n", response);
} catch (TimeoutException e) {
System.out.println("The operation's polling period was not long enough.");
System.out.println("You can use the Operation's name to get the current status.");
System.out.println("The import job is still running and will complete as expected.");
throw e;
}
}
use of com.google.cloud.vision.v1p4beta1.InputConfig in project spring-cloud-gcp by GoogleCloudPlatform.
the class DocumentOcrTemplate method runOcrForDocument.
/**
* Runs OCR processing for a specified {@code document} and generates OCR output files under the
* path specified by {@code outputFilePathPrefix}.
*
* <p>For example, if you specify an {@code outputFilePathPrefix} of
* "gs://bucket_name/ocr_results/myDoc_", all the output files of OCR processing will be saved
* under prefix, such as:
*
* <ul>
* <li>gs://bucket_name/ocr_results/myDoc_output-1-to-5.json
* <li>gs://bucket_name/ocr_results/myDoc_output-6-to-10.json
* <li>gs://bucket_name/ocr_results/myDoc_output-11-to-15.json
* </ul>
*
* <p>Note: OCR processing operations may take several minutes to complete, so it may not be
* advisable to block on the completion of the operation. One may use the returned {@link
* ListenableFuture} to register callbacks or track the status of the operation.
*
* @param document The {@link GoogleStorageLocation} of the document to run OCR processing
* @param outputFilePathPrefix The {@link GoogleStorageLocation} of a file, folder, or a bucket
* describing the path for which all output files shall be saved under
* @return A {@link ListenableFuture} allowing you to register callbacks or wait for the
* completion of the operation.
*/
public ListenableFuture<DocumentOcrResultSet> runOcrForDocument(GoogleStorageLocation document, GoogleStorageLocation outputFilePathPrefix) {
Assert.isTrue(document.isFile(), "Provided document location is not a valid file location: " + document);
GcsSource gcsSource = GcsSource.newBuilder().setUri(document.uriString()).build();
String contentType = extractContentType(document);
InputConfig inputConfig = InputConfig.newBuilder().setMimeType(contentType).setGcsSource(gcsSource).build();
GcsDestination gcsDestination = GcsDestination.newBuilder().setUri(outputFilePathPrefix.uriString()).build();
OutputConfig outputConfig = OutputConfig.newBuilder().setGcsDestination(gcsDestination).setBatchSize(this.jsonOutputBatchSize).build();
AsyncAnnotateFileRequest request = AsyncAnnotateFileRequest.newBuilder().addFeatures(DOCUMENT_OCR_FEATURE).setInputConfig(inputConfig).setOutputConfig(outputConfig).build();
OperationFuture<AsyncBatchAnnotateFilesResponse, OperationMetadata> result = imageAnnotatorClient.asyncBatchAnnotateFilesAsync(Collections.singletonList(request));
return extractOcrResultFuture(result);
}
use of com.google.cloud.vision.v1p4beta1.InputConfig in project java-vision by googleapis.
the class BatchAnnotateFilesGcs method batchAnnotateFilesGcs.
public static void batchAnnotateFilesGcs(String gcsUri) throws IOException {
// the "close" method on the client to safely clean up any remaining background resources.
try (ImageAnnotatorClient imageAnnotatorClient = ImageAnnotatorClient.create()) {
// You can send multiple files to be annotated, this sample demonstrates how to do this with
// one file. If you want to use multiple files, you have to create a `AnnotateImageRequest`
// object for each file that you want annotated.
// First specify where the vision api can find the image
GcsSource gcsSource = GcsSource.newBuilder().setUri(gcsUri).build();
// Specify the input config with the file's uri and its type.
// Supported mime_type: application/pdf, image/tiff, image/gif
// https://cloud.google.com/vision/docs/reference/rpc/google.cloud.vision.v1#inputconfig
InputConfig inputConfig = InputConfig.newBuilder().setMimeType("application/pdf").setGcsSource(gcsSource).build();
// Set the type of annotation you want to perform on the file
// https://cloud.google.com/vision/docs/reference/rpc/google.cloud.vision.v1#google.cloud.vision.v1.Feature.Type
Feature feature = Feature.newBuilder().setType(Feature.Type.DOCUMENT_TEXT_DETECTION).build();
// Build the request object for that one file. Note: for additional file you have to create
// additional `AnnotateFileRequest` objects and store them in a list to be used below.
// Since we are sending a file of type `application/pdf`, we can use the `pages` field to
// specify which pages to process. The service can process up to 5 pages per document file.
// https://cloud.google.com/vision/docs/reference/rpc/google.cloud.vision.v1#google.cloud.vision.v1.AnnotateFileRequest
AnnotateFileRequest fileRequest = AnnotateFileRequest.newBuilder().setInputConfig(inputConfig).addFeatures(feature).addPages(// Process the first page
1).addPages(// Process the second page
2).addPages(// Process the last page
-1).build();
// Add each `AnnotateFileRequest` object to the batch request.
BatchAnnotateFilesRequest request = BatchAnnotateFilesRequest.newBuilder().addRequests(fileRequest).build();
// Make the synchronous batch request.
BatchAnnotateFilesResponse response = imageAnnotatorClient.batchAnnotateFiles(request);
// sample.
for (AnnotateImageResponse imageResponse : response.getResponsesList().get(0).getResponsesList()) {
System.out.format("Full text: %s%n", imageResponse.getFullTextAnnotation().getText());
for (Page page : imageResponse.getFullTextAnnotation().getPagesList()) {
for (Block block : page.getBlocksList()) {
System.out.format("%nBlock confidence: %s%n", block.getConfidence());
for (Paragraph par : block.getParagraphsList()) {
System.out.format("\tParagraph confidence: %s%n", par.getConfidence());
for (Word word : par.getWordsList()) {
System.out.format("\t\tWord confidence: %s%n", word.getConfidence());
for (Symbol symbol : word.getSymbolsList()) {
System.out.format("\t\t\tSymbol: %s, (confidence: %s)%n", symbol.getText(), symbol.getConfidence());
}
}
}
}
}
}
}
}
Aggregations