use of com.google.cloud.vision.v1.ProductSearchResults.Result in project spring-cloud-gcp by spring-cloud.
the class DocumentOcrResultSet method getAllPages.
/**
* Returns an {@link Iterator} over all the OCR pages of the document.
*
* @return iterator of {@link TextAnnotation} describing OCR content of each page in the
* document.
*/
public Iterator<TextAnnotation> getAllPages() {
return new Iterator<TextAnnotation>() {
private final Iterator<OcrPageRange> pageRangeIterator = ocrPageRanges.values().iterator();
private int offset = 0;
private List<TextAnnotation> currentPageRange = Collections.EMPTY_LIST;
@Override
public boolean hasNext() {
return pageRangeIterator.hasNext() || offset < currentPageRange.size();
}
@Override
public TextAnnotation next() {
if (!hasNext()) {
throw new NoSuchElementException("No more pages left in DocumentOcrResultSet.");
}
if (offset >= currentPageRange.size()) {
OcrPageRange pageRange = pageRangeIterator.next();
offset = 0;
try {
currentPageRange = pageRange.getPages();
} catch (InvalidProtocolBufferException e) {
throw new RuntimeException("Failed to parse OCR output from JSON output file " + pageRange.getBlob().getName(), e);
}
}
TextAnnotation result = currentPageRange.get(offset);
offset++;
return result;
}
};
}
use of com.google.cloud.vision.v1.ProductSearchResults.Result in project spring-cloud-gcp by spring-cloud.
the class CloudVisionTemplate method extractTextFromImage.
/**
* Extract the text out of an image and return the result as a String.
* @param imageResource the image one wishes to analyze
* @param imageContext the image context to customize the text extraction request
* @return the text extracted from the image aggregated to a String
* @throws CloudVisionException if the image could not be read or if text extraction failed
*/
public String extractTextFromImage(Resource imageResource, ImageContext imageContext) {
AnnotateImageResponse response = analyzeImage(imageResource, imageContext, Type.TEXT_DETECTION);
String result = response.getFullTextAnnotation().getText();
if (result.isEmpty() && response.getError().getCode() != Code.OK.getNumber()) {
throw new CloudVisionException(response.getError().getMessage());
}
return result;
}
use of com.google.cloud.vision.v1.ProductSearchResults.Result in project spring-cloud-gcp by spring-cloud.
the class DocumentOcrTemplate method runOcrForDocument.
/**
* Runs OCR processing for a specified {@code document} and generates OCR output files
* under the path specified by {@code outputFilePathPrefix}.
*
* <p>
* For example, if you specify an {@code outputFilePathPrefix} of
* "gs://bucket_name/ocr_results/myDoc_", all the output files of OCR processing will be
* saved under prefix, such as:
*
* <ul>
* <li>gs://bucket_name/ocr_results/myDoc_output-1-to-5.json
* <li>gs://bucket_name/ocr_results/myDoc_output-6-to-10.json
* <li>gs://bucket_name/ocr_results/myDoc_output-11-to-15.json
* </ul>
*
* <p>
* Note: OCR processing operations may take several minutes to complete, so it may not be
* advisable to block on the completion of the operation. One may use the returned
* {@link ListenableFuture} to register callbacks or track the status of the operation.
*
* @param document The {@link GoogleStorageLocation} of the document to run OCR processing
* @param outputFilePathPrefix The {@link GoogleStorageLocation} of a file, folder, or a
* bucket describing the path for which all output files shall be saved under
*
* @return A {@link ListenableFuture} allowing you to register callbacks or wait for the
* completion of the operation.
*/
public ListenableFuture<DocumentOcrResultSet> runOcrForDocument(GoogleStorageLocation document, GoogleStorageLocation outputFilePathPrefix) {
Assert.isTrue(document.isFile(), "Provided document location is not a valid file location: " + document);
GcsSource gcsSource = GcsSource.newBuilder().setUri(document.uriString()).build();
String contentType = extractContentType(document);
InputConfig inputConfig = InputConfig.newBuilder().setMimeType(contentType).setGcsSource(gcsSource).build();
GcsDestination gcsDestination = GcsDestination.newBuilder().setUri(outputFilePathPrefix.uriString()).build();
OutputConfig outputConfig = OutputConfig.newBuilder().setGcsDestination(gcsDestination).setBatchSize(this.jsonOutputBatchSize).build();
AsyncAnnotateFileRequest request = AsyncAnnotateFileRequest.newBuilder().addFeatures(DOCUMENT_OCR_FEATURE).setInputConfig(inputConfig).setOutputConfig(outputConfig).build();
OperationFuture<AsyncBatchAnnotateFilesResponse, OperationMetadata> result = imageAnnotatorClient.asyncBatchAnnotateFilesAsync(Collections.singletonList(request));
return extractOcrResultFuture(result);
}
use of com.google.cloud.vision.v1.ProductSearchResults.Result in project spring-cloud-gcp by spring-cloud.
the class VisionApiSampleApplicationTests method testClassifyImageLabels.
@Test
public void testClassifyImageLabels() throws Exception {
this.mockMvc.perform(get(LABEL_IMAGE_URL)).andDo((response) -> {
ModelAndView result = response.getModelAndView();
List<EntityAnnotation> annotations = (List<EntityAnnotation>) result.getModelMap().get("annotations");
List<String> annotationNames = annotations.stream().map(annotation -> annotation.getDescription().toLowerCase().trim()).collect(Collectors.toList());
assertThat(annotationNames).contains("dog");
});
}
use of com.google.cloud.vision.v1.ProductSearchResults.Result in project spring-cloud-gcp by GoogleCloudPlatform.
the class DocumentOcrTemplate method runOcrForDocument.
/**
* Runs OCR processing for a specified {@code document} and generates OCR output files under the
* path specified by {@code outputFilePathPrefix}.
*
* <p>For example, if you specify an {@code outputFilePathPrefix} of
* "gs://bucket_name/ocr_results/myDoc_", all the output files of OCR processing will be saved
* under prefix, such as:
*
* <ul>
* <li>gs://bucket_name/ocr_results/myDoc_output-1-to-5.json
* <li>gs://bucket_name/ocr_results/myDoc_output-6-to-10.json
* <li>gs://bucket_name/ocr_results/myDoc_output-11-to-15.json
* </ul>
*
* <p>Note: OCR processing operations may take several minutes to complete, so it may not be
* advisable to block on the completion of the operation. One may use the returned {@link
* ListenableFuture} to register callbacks or track the status of the operation.
*
* @param document The {@link GoogleStorageLocation} of the document to run OCR processing
* @param outputFilePathPrefix The {@link GoogleStorageLocation} of a file, folder, or a bucket
* describing the path for which all output files shall be saved under
* @return A {@link ListenableFuture} allowing you to register callbacks or wait for the
* completion of the operation.
*/
public ListenableFuture<DocumentOcrResultSet> runOcrForDocument(GoogleStorageLocation document, GoogleStorageLocation outputFilePathPrefix) {
Assert.isTrue(document.isFile(), "Provided document location is not a valid file location: " + document);
GcsSource gcsSource = GcsSource.newBuilder().setUri(document.uriString()).build();
String contentType = extractContentType(document);
InputConfig inputConfig = InputConfig.newBuilder().setMimeType(contentType).setGcsSource(gcsSource).build();
GcsDestination gcsDestination = GcsDestination.newBuilder().setUri(outputFilePathPrefix.uriString()).build();
OutputConfig outputConfig = OutputConfig.newBuilder().setGcsDestination(gcsDestination).setBatchSize(this.jsonOutputBatchSize).build();
AsyncAnnotateFileRequest request = AsyncAnnotateFileRequest.newBuilder().addFeatures(DOCUMENT_OCR_FEATURE).setInputConfig(inputConfig).setOutputConfig(outputConfig).build();
OperationFuture<AsyncBatchAnnotateFilesResponse, OperationMetadata> result = imageAnnotatorClient.asyncBatchAnnotateFilesAsync(Collections.singletonList(request));
return extractOcrResultFuture(result);
}
Aggregations