use of com.google.cloud.vision.v1p3beta1.ImageContext in project spring-cloud-gcp by spring-cloud.
the class CloudVisionTemplateTests method testAddImageContext_analyzeImage.
@Test
public void testAddImageContext_analyzeImage() throws IOException {
when(this.imageAnnotatorClient.batchAnnotateImages(any(BatchAnnotateImagesRequest.class))).thenReturn(DEFAULT_API_RESPONSE);
ImageContext imageContext = Mockito.mock(ImageContext.class);
this.cloudVisionTemplate.analyzeImage(FAKE_IMAGE, imageContext, Type.FACE_DETECTION);
BatchAnnotateImagesRequest expectedRequest = BatchAnnotateImagesRequest.newBuilder().addRequests(AnnotateImageRequest.newBuilder().addFeatures(Feature.newBuilder().setType(Type.FACE_DETECTION)).setImageContext(imageContext).setImage(Image.newBuilder().setContent(ByteString.readFrom(FAKE_IMAGE.getInputStream())).build())).build();
verify(this.imageAnnotatorClient, times(1)).batchAnnotateImages(expectedRequest);
}
use of com.google.cloud.vision.v1p3beta1.ImageContext in project java-docs-samples by GoogleCloudPlatform.
the class Detect method detectWebEntitiesIncludeGeoResults.
// [START vision_web_entities_include_geo_results]
/**
* Find web entities given a local image.
* @param filePath The path of the image to detect.
* @param out A {@link PrintStream} to write the results to.
* @throws Exception on errors while closing the client.
* @throws IOException on Input/Output errors.
*/
public static void detectWebEntitiesIncludeGeoResults(String filePath, PrintStream out) throws Exception, IOException {
// Instantiates a client
try (ImageAnnotatorClient client = ImageAnnotatorClient.create()) {
// Read in the local image
ByteString contents = ByteString.readFrom(new FileInputStream(filePath));
// Build the image
Image image = Image.newBuilder().setContent(contents).build();
// Enable `IncludeGeoResults`
WebDetectionParams webDetectionParams = WebDetectionParams.newBuilder().setIncludeGeoResults(true).build();
// Set the parameters for the image
ImageContext imageContext = ImageContext.newBuilder().setWebDetectionParams(webDetectionParams).build();
// Create the request with the image, imageContext, and the specified feature: web detection
AnnotateImageRequest request = AnnotateImageRequest.newBuilder().addFeatures(Feature.newBuilder().setType(Type.WEB_DETECTION)).setImage(image).setImageContext(imageContext).build();
// Perform the request
BatchAnnotateImagesResponse response = client.batchAnnotateImages(Arrays.asList(request));
// Display the results
response.getResponsesList().stream().forEach(r -> r.getWebDetection().getWebEntitiesList().stream().forEach(entity -> {
out.format("Description: %s\n", entity.getDescription());
out.format("Score: %f\n", entity.getScore());
}));
}
}
use of com.google.cloud.vision.v1p3beta1.ImageContext in project spring-cloud-gcp by GoogleCloudPlatform.
the class CloudVisionTemplateTests method testAddImageContext_extractText.
@Test
public void testAddImageContext_extractText() throws IOException {
when(this.imageAnnotatorClient.batchAnnotateImages(any(BatchAnnotateImagesRequest.class))).thenReturn(DEFAULT_API_RESPONSE);
ImageContext imageContext = Mockito.mock(ImageContext.class);
this.cloudVisionTemplate.extractTextFromImage(FAKE_IMAGE, imageContext);
BatchAnnotateImagesRequest expectedRequest = BatchAnnotateImagesRequest.newBuilder().addRequests(AnnotateImageRequest.newBuilder().addFeatures(Feature.newBuilder().setType(Type.TEXT_DETECTION)).setImageContext(imageContext).setImage(Image.newBuilder().setContent(ByteString.readFrom(FAKE_IMAGE.getInputStream())).build())).build();
verify(this.imageAnnotatorClient, times(1)).batchAnnotateImages(expectedRequest);
}
use of com.google.cloud.vision.v1p3beta1.ImageContext in project java-vision by googleapis.
the class DetectWebEntitiesIncludeGeoResultsGcs method detectWebEntitiesIncludeGeoResultsGcs.
// Find web entities given the remote image on Google Cloud Storage.
public static void detectWebEntitiesIncludeGeoResultsGcs(String gcsPath) throws IOException {
// the "close" method on the client to safely clean up any remaining background resources.
try (ImageAnnotatorClient client = ImageAnnotatorClient.create()) {
// Set the image source to the given gs uri
ImageSource imageSource = ImageSource.newBuilder().setGcsImageUri(gcsPath).build();
// Build the image
Image image = Image.newBuilder().setSource(imageSource).build();
// Enable `IncludeGeoResults`
WebDetectionParams webDetectionParams = WebDetectionParams.newBuilder().setIncludeGeoResults(true).build();
// Set the parameters for the image
ImageContext imageContext = ImageContext.newBuilder().setWebDetectionParams(webDetectionParams).build();
// Create the request with the image, imageContext, and the specified feature: web detection
AnnotateImageRequest request = AnnotateImageRequest.newBuilder().addFeatures(Feature.newBuilder().setType(Feature.Type.WEB_DETECTION)).setImage(image).setImageContext(imageContext).build();
// Perform the request
BatchAnnotateImagesResponse response = client.batchAnnotateImages(Arrays.asList(request));
// Display the results
response.getResponsesList().stream().forEach(r -> r.getWebDetection().getWebEntitiesList().stream().forEach(entity -> {
System.out.format("Description: %s%n", entity.getDescription());
System.out.format("Score: %f%n", entity.getScore());
}));
}
}
use of com.google.cloud.vision.v1p3beta1.ImageContext in project java-vision by googleapis.
the class DetectBeta method detectHandwrittenOcr.
// [END vision_localize_objects_gcs_beta]
// [START vision_handwritten_ocr_beta]
/**
* Performs handwritten text detection on a local image file.
*
* @param filePath The path to the local file to detect handwritten text on.
* @param out A {@link PrintStream} to write the results to.
* @throws Exception on errors while closing the client.
* @throws IOException on Input/Output errors.
*/
public static void detectHandwrittenOcr(String filePath, PrintStream out) throws Exception {
List<AnnotateImageRequest> requests = new ArrayList<>();
ByteString imgBytes = ByteString.readFrom(new FileInputStream(filePath));
Image img = Image.newBuilder().setContent(imgBytes).build();
Feature feat = Feature.newBuilder().setType(Type.DOCUMENT_TEXT_DETECTION).build();
// Set the Language Hint codes for handwritten OCR
ImageContext imageContext = ImageContext.newBuilder().addLanguageHints("en-t-i0-handwrit").build();
AnnotateImageRequest request = AnnotateImageRequest.newBuilder().addFeatures(feat).setImage(img).setImageContext(imageContext).build();
requests.add(request);
try (ImageAnnotatorClient client = ImageAnnotatorClient.create()) {
BatchAnnotateImagesResponse response = client.batchAnnotateImages(requests);
List<AnnotateImageResponse> responses = response.getResponsesList();
client.close();
for (AnnotateImageResponse res : responses) {
if (res.hasError()) {
out.printf("Error: %s\n", res.getError().getMessage());
return;
}
// For full list of available annotations, see http://g.co/cloud/vision/docs
TextAnnotation annotation = res.getFullTextAnnotation();
for (Page page : annotation.getPagesList()) {
String pageText = "";
for (Block block : page.getBlocksList()) {
String blockText = "";
for (Paragraph para : block.getParagraphsList()) {
String paraText = "";
for (Word word : para.getWordsList()) {
String wordText = "";
for (Symbol symbol : word.getSymbolsList()) {
wordText = wordText + symbol.getText();
out.format("Symbol text: %s (confidence: %f)\n", symbol.getText(), symbol.getConfidence());
}
out.format("Word text: %s (confidence: %f)\n\n", wordText, word.getConfidence());
paraText = String.format("%s %s", paraText, wordText);
}
// Output Example using Paragraph:
out.println("\nParagraph: \n" + paraText);
out.format("Paragraph Confidence: %f\n", para.getConfidence());
blockText = blockText + paraText;
}
pageText = pageText + blockText;
}
}
out.println("\nComplete annotation:");
out.println(annotation.getText());
}
}
}
Aggregations