use of com.google.cloud.vision.v1p3beta1.AnnotateImageRequest in project java-vision by googleapis.
the class AsyncBatchAnnotateImagesGcs method asyncBatchAnnotateImagesGcs.
// Performs asynchronous batch annotation of images on Google Cloud Storage
public static void asyncBatchAnnotateImagesGcs(String gcsSourcePath, String gcsDestinationPath) throws Exception {
// String gcsDestinationPath = "gs://YOUR_BUCKET_ID/path_to_store_annotation";
try (ImageAnnotatorClient client = ImageAnnotatorClient.create()) {
List<AnnotateImageRequest> requests = new ArrayList<>();
ImageSource imgSource = ImageSource.newBuilder().setImageUri(gcsSourcePath).build();
Image image = Image.newBuilder().setSource(imgSource).build();
// Set the GCS destination path for where to save the results.
GcsDestination gcsDestination = GcsDestination.newBuilder().setUri(gcsDestinationPath).build();
// Create the configuration for the output with the batch size.
// The batch size sets how many pages should be grouped into each json output file.
OutputConfig outputConfig = OutputConfig.newBuilder().setGcsDestination(gcsDestination).setBatchSize(2).build();
// Select the Features required by the vision API
Feature features = Feature.newBuilder().setType(Type.LABEL_DETECTION).setType(Type.TEXT_DETECTION).setType(Type.IMAGE_PROPERTIES).build();
// Build the request
AnnotateImageRequest annotateImageRequest = AnnotateImageRequest.newBuilder().setImage(image).addFeatures(features).build();
requests.add(annotateImageRequest);
AsyncBatchAnnotateImagesRequest request = AsyncBatchAnnotateImagesRequest.newBuilder().addAllRequests(requests).setOutputConfig(outputConfig).build();
OperationFuture<AsyncBatchAnnotateImagesResponse, OperationMetadata> response = client.asyncBatchAnnotateImagesAsync(request);
System.out.println("Waiting for the operation to finish.");
// we're not processing the response, since we'll be reading the output from GCS.
response.get(180, TimeUnit.SECONDS);
// Once the request has completed and the output has been
// written to GCS, we can list all the output files.
Storage storage = StorageOptions.getDefaultInstance().getService();
// Get the destination location from the gcsDestinationPath
Pattern pattern = Pattern.compile("gs://([^/]+)/(.+)");
Matcher matcher = pattern.matcher(gcsDestinationPath);
if (matcher.find()) {
String bucketName = matcher.group(1);
String prefix = matcher.group(2);
// Get the list of objects with the given prefix from the GCS bucket
Bucket bucket = storage.get(bucketName);
Page<Blob> pageList = bucket.list(BlobListOption.prefix(prefix));
Blob firstOutputFile = null;
// List objects with the given prefix.
System.out.println("Output files:");
for (Blob blob : pageList.iterateAll()) {
System.out.println(blob.getName());
// the first two image requests
if (firstOutputFile == null) {
firstOutputFile = blob;
}
}
// Get the contents of the file and convert the JSON contents to an
// BatchAnnotateImagesResponse
// object. If the Blob is small read all its content in one request
// (Note: the file is a .json file)
// Storage guide: https://cloud.google.com/storage/docs/downloading-objects
String jsonContents = new String(firstOutputFile.getContent());
Builder builder = BatchAnnotateImagesResponse.newBuilder();
JsonFormat.parser().merge(jsonContents, builder);
// Build the AnnotateFileResponse object
BatchAnnotateImagesResponse batchAnnotateImagesResponse = builder.build();
// Here we print the response for the first image
// The response contains more information:
// annotation/pages/blocks/paragraphs/words/symbols/colors
// including confidence score and bounding boxes
System.out.format("\nResponse: %s\n", batchAnnotateImagesResponse.getResponses(0));
} else {
System.out.println("No MATCH");
}
} catch (Exception e) {
System.out.println("Error during asyncBatchAnnotateImagesGcs: \n" + e.toString());
}
}
use of com.google.cloud.vision.v1p3beta1.AnnotateImageRequest in project java-vision by googleapis.
the class Detect method detectDocumentText.
/**
* Performs document text detection on a local image file.
*
* @param filePath The path to the local file to detect document text on.
* @throws Exception on errors while closing the client.
* @throws IOException on Input/Output errors.
*/
// [START vision_fulltext_detection]
public static void detectDocumentText(String filePath) throws IOException {
List<AnnotateImageRequest> requests = new ArrayList<>();
ByteString imgBytes = ByteString.readFrom(new FileInputStream(filePath));
Image img = Image.newBuilder().setContent(imgBytes).build();
Feature feat = Feature.newBuilder().setType(Type.DOCUMENT_TEXT_DETECTION).build();
AnnotateImageRequest request = AnnotateImageRequest.newBuilder().addFeatures(feat).setImage(img).build();
requests.add(request);
// the "close" method on the client to safely clean up any remaining background resources.
try (ImageAnnotatorClient client = ImageAnnotatorClient.create()) {
BatchAnnotateImagesResponse response = client.batchAnnotateImages(requests);
List<AnnotateImageResponse> responses = response.getResponsesList();
client.close();
for (AnnotateImageResponse res : responses) {
if (res.hasError()) {
System.out.format("Error: %s%n", res.getError().getMessage());
return;
}
// For full list of available annotations, see http://g.co/cloud/vision/docs
TextAnnotation annotation = res.getFullTextAnnotation();
for (Page page : annotation.getPagesList()) {
String pageText = "";
for (Block block : page.getBlocksList()) {
String blockText = "";
for (Paragraph para : block.getParagraphsList()) {
String paraText = "";
for (Word word : para.getWordsList()) {
String wordText = "";
for (Symbol symbol : word.getSymbolsList()) {
wordText = wordText + symbol.getText();
System.out.format("Symbol text: %s (confidence: %f)%n", symbol.getText(), symbol.getConfidence());
}
System.out.format("Word text: %s (confidence: %f)%n%n", wordText, word.getConfidence());
paraText = String.format("%s %s", paraText, wordText);
}
// Output Example using Paragraph:
System.out.println("%nParagraph: %n" + paraText);
System.out.format("Paragraph Confidence: %f%n", para.getConfidence());
blockText = blockText + paraText;
}
pageText = pageText + blockText;
}
}
System.out.println("%nComplete annotation:");
System.out.println(annotation.getText());
}
}
}
use of com.google.cloud.vision.v1p3beta1.AnnotateImageRequest in project java-vision by googleapis.
the class DetectSafeSearchGcs method detectSafeSearchGcs.
// Detects whether the specified image on Google Cloud Storage has features you would want to
// moderate.
public static void detectSafeSearchGcs(String gcsPath) throws IOException {
List<AnnotateImageRequest> requests = new ArrayList<>();
ImageSource imgSource = ImageSource.newBuilder().setGcsImageUri(gcsPath).build();
Image img = Image.newBuilder().setSource(imgSource).build();
Feature feat = Feature.newBuilder().setType(Type.SAFE_SEARCH_DETECTION).build();
AnnotateImageRequest request = AnnotateImageRequest.newBuilder().addFeatures(feat).setImage(img).build();
requests.add(request);
// the "close" method on the client to safely clean up any remaining background resources.
try (ImageAnnotatorClient client = ImageAnnotatorClient.create()) {
BatchAnnotateImagesResponse response = client.batchAnnotateImages(requests);
List<AnnotateImageResponse> responses = response.getResponsesList();
for (AnnotateImageResponse res : responses) {
if (res.hasError()) {
System.out.format("Error: %s%n", res.getError().getMessage());
return;
}
// For full list of available annotations, see http://g.co/cloud/vision/docs
SafeSearchAnnotation annotation = res.getSafeSearchAnnotation();
System.out.format("adult: %s%nmedical: %s%nspoofed: %s%nviolence: %s%nracy: %s%n", annotation.getAdult(), annotation.getMedical(), annotation.getSpoof(), annotation.getViolence(), annotation.getRacy());
}
}
}
use of com.google.cloud.vision.v1p3beta1.AnnotateImageRequest in project java-vision by googleapis.
the class DetectText method detectText.
// Detects text in the specified image.
public static void detectText(String filePath) throws IOException {
List<AnnotateImageRequest> requests = new ArrayList<>();
ByteString imgBytes = ByteString.readFrom(new FileInputStream(filePath));
Image img = Image.newBuilder().setContent(imgBytes).build();
Feature feat = Feature.newBuilder().setType(Feature.Type.TEXT_DETECTION).build();
AnnotateImageRequest request = AnnotateImageRequest.newBuilder().addFeatures(feat).setImage(img).build();
requests.add(request);
// the "close" method on the client to safely clean up any remaining background resources.
try (ImageAnnotatorClient client = ImageAnnotatorClient.create()) {
BatchAnnotateImagesResponse response = client.batchAnnotateImages(requests);
List<AnnotateImageResponse> responses = response.getResponsesList();
for (AnnotateImageResponse res : responses) {
if (res.hasError()) {
System.out.format("Error: %s%n", res.getError().getMessage());
return;
}
// For full list of available annotations, see http://g.co/cloud/vision/docs
for (EntityAnnotation annotation : res.getTextAnnotationsList()) {
System.out.format("Text: %s%n", annotation.getDescription());
System.out.format("Position : %s%n", annotation.getBoundingPoly());
}
}
}
}
use of com.google.cloud.vision.v1p3beta1.AnnotateImageRequest in project java-vision by googleapis.
the class DetectWebDetections method detectWebDetections.
// Finds references to the specified image on the web.
public static void detectWebDetections(String filePath) throws IOException {
List<AnnotateImageRequest> requests = new ArrayList<>();
ByteString imgBytes = ByteString.readFrom(new FileInputStream(filePath));
Image img = Image.newBuilder().setContent(imgBytes).build();
Feature feat = Feature.newBuilder().setType(Type.WEB_DETECTION).build();
AnnotateImageRequest request = AnnotateImageRequest.newBuilder().addFeatures(feat).setImage(img).build();
requests.add(request);
// the "close" method on the client to safely clean up any remaining background resources.
try (ImageAnnotatorClient client = ImageAnnotatorClient.create()) {
BatchAnnotateImagesResponse response = client.batchAnnotateImages(requests);
List<AnnotateImageResponse> responses = response.getResponsesList();
for (AnnotateImageResponse res : responses) {
if (res.hasError()) {
System.out.format("Error: %s%n", res.getError().getMessage());
return;
}
// Search the web for usages of the image. You could use these signals later
// for user input moderation or linking external references.
// For a full list of available annotations, see http://g.co/cloud/vision/docs
WebDetection annotation = res.getWebDetection();
System.out.println("Entity:Id:Score");
System.out.println("===============");
for (WebEntity entity : annotation.getWebEntitiesList()) {
System.out.println(entity.getDescription() + " : " + entity.getEntityId() + " : " + entity.getScore());
}
for (WebLabel label : annotation.getBestGuessLabelsList()) {
System.out.format("%nBest guess label: %s", label.getLabel());
}
System.out.println("%nPages with matching images: Score%n==");
for (WebPage page : annotation.getPagesWithMatchingImagesList()) {
System.out.println(page.getUrl() + " : " + page.getScore());
}
System.out.println("%nPages with partially matching images: Score%n==");
for (WebImage image : annotation.getPartialMatchingImagesList()) {
System.out.println(image.getUrl() + " : " + image.getScore());
}
System.out.println("%nPages with fully matching images: Score%n==");
for (WebImage image : annotation.getFullMatchingImagesList()) {
System.out.println(image.getUrl() + " : " + image.getScore());
}
System.out.println("%nPages with visually similar images: Score%n==");
for (WebImage image : annotation.getVisuallySimilarImagesList()) {
System.out.println(image.getUrl() + " : " + image.getScore());
}
}
}
}
Aggregations