use of com.google.cloud.vision.v1p3beta1.AnnotateImageRequest in project java-vision by googleapis.
the class DetectBeta method detectLocalizedObjectsGcs.
// [END vision_localize_objects_beta]
// [START vision_localize_objects_gcs_beta]
/**
* Detects localized objects in a remote image on Google Cloud Storage.
*
* @param gcsPath The path to the remote file on Google Cloud Storage to detect localized objects
* on.
* @param out A {@link PrintStream} to write detected objects to.
* @throws Exception on errors while closing the client.
* @throws IOException on Input/Output errors.
*/
public static void detectLocalizedObjectsGcs(String gcsPath, PrintStream out) throws Exception, IOException {
List<AnnotateImageRequest> requests = new ArrayList<>();
ImageSource imgSource = ImageSource.newBuilder().setGcsImageUri(gcsPath).build();
Image img = Image.newBuilder().setSource(imgSource).build();
AnnotateImageRequest request = AnnotateImageRequest.newBuilder().addFeatures(Feature.newBuilder().setType(Type.OBJECT_LOCALIZATION)).setImage(img).build();
requests.add(request);
// Perform the request
try (ImageAnnotatorClient client = ImageAnnotatorClient.create()) {
BatchAnnotateImagesResponse response = client.batchAnnotateImages(requests);
List<AnnotateImageResponse> responses = response.getResponsesList();
client.close();
// Display the results
for (AnnotateImageResponse res : responses) {
for (LocalizedObjectAnnotation entity : res.getLocalizedObjectAnnotationsList()) {
out.format("Object name: %s\n", entity.getName());
out.format("Confidence: %s\n", entity.getScore());
out.format("Normalized Vertices:\n");
entity.getBoundingPoly().getNormalizedVerticesList().forEach(vertex -> out.format("- (%s, %s)\n", vertex.getX(), vertex.getY()));
}
}
}
}
use of com.google.cloud.vision.v1p3beta1.AnnotateImageRequest in project java-vision by googleapis.
the class DetectBeta method detectHandwrittenOcr.
// [END vision_localize_objects_gcs_beta]
// [START vision_handwritten_ocr_beta]
/**
* Performs handwritten text detection on a local image file.
*
* @param filePath The path to the local file to detect handwritten text on.
* @param out A {@link PrintStream} to write the results to.
* @throws Exception on errors while closing the client.
* @throws IOException on Input/Output errors.
*/
public static void detectHandwrittenOcr(String filePath, PrintStream out) throws Exception {
List<AnnotateImageRequest> requests = new ArrayList<>();
ByteString imgBytes = ByteString.readFrom(new FileInputStream(filePath));
Image img = Image.newBuilder().setContent(imgBytes).build();
Feature feat = Feature.newBuilder().setType(Type.DOCUMENT_TEXT_DETECTION).build();
// Set the Language Hint codes for handwritten OCR
ImageContext imageContext = ImageContext.newBuilder().addLanguageHints("en-t-i0-handwrit").build();
AnnotateImageRequest request = AnnotateImageRequest.newBuilder().addFeatures(feat).setImage(img).setImageContext(imageContext).build();
requests.add(request);
try (ImageAnnotatorClient client = ImageAnnotatorClient.create()) {
BatchAnnotateImagesResponse response = client.batchAnnotateImages(requests);
List<AnnotateImageResponse> responses = response.getResponsesList();
client.close();
for (AnnotateImageResponse res : responses) {
if (res.hasError()) {
out.printf("Error: %s\n", res.getError().getMessage());
return;
}
// For full list of available annotations, see http://g.co/cloud/vision/docs
TextAnnotation annotation = res.getFullTextAnnotation();
for (Page page : annotation.getPagesList()) {
String pageText = "";
for (Block block : page.getBlocksList()) {
String blockText = "";
for (Paragraph para : block.getParagraphsList()) {
String paraText = "";
for (Word word : para.getWordsList()) {
String wordText = "";
for (Symbol symbol : word.getSymbolsList()) {
wordText = wordText + symbol.getText();
out.format("Symbol text: %s (confidence: %f)\n", symbol.getText(), symbol.getConfidence());
}
out.format("Word text: %s (confidence: %f)\n\n", wordText, word.getConfidence());
paraText = String.format("%s %s", paraText, wordText);
}
// Output Example using Paragraph:
out.println("\nParagraph: \n" + paraText);
out.format("Paragraph Confidence: %f\n", para.getConfidence());
blockText = blockText + paraText;
}
pageText = pageText + blockText;
}
}
out.println("\nComplete annotation:");
out.println(annotation.getText());
}
}
}
use of com.google.cloud.vision.v1p3beta1.AnnotateImageRequest in project java-vision by googleapis.
the class ProductSearch method getSimilarProductsGcs.
// [END vision_product_search_get_similar_products]
// [START vision_product_search_get_similar_products_gcs]
/**
* Search similar products to image in Google Cloud Storage.
*
* @param projectId - Id of the project.
* @param computeRegion - Region name.
* @param productSetId - Id of the product set.
* @param productCategory - Category of the product.
* @param gcsUri - GCS file path of the image to be searched
* @param filter - Condition to be applied on the labels. Example for filter: (color = red OR
* color = blue) AND style = kids It will search on all products with the following labels:
* color:red AND style:kids color:blue AND style:kids
* @throws Exception - on errors.
*/
public static void getSimilarProductsGcs(String projectId, String computeRegion, String productSetId, String productCategory, String gcsUri, String filter) throws Exception {
try (ImageAnnotatorClient queryImageClient = ImageAnnotatorClient.create()) {
// Get the full path of the product set.
String productSetPath = ProductSetName.of(projectId, computeRegion, productSetId).toString();
// Get the image from Google Cloud Storage
ImageSource source = ImageSource.newBuilder().setGcsImageUri(gcsUri).build();
// Create annotate image request along with product search feature.
Feature featuresElement = Feature.newBuilder().setType(Type.PRODUCT_SEARCH).build();
Image image = Image.newBuilder().setSource(source).build();
ImageContext imageContext = ImageContext.newBuilder().setProductSearchParams(ProductSearchParams.newBuilder().setProductSet(productSetPath).addProductCategories(productCategory).setFilter(filter)).build();
AnnotateImageRequest annotateImageRequest = AnnotateImageRequest.newBuilder().addFeatures(featuresElement).setImage(image).setImageContext(imageContext).build();
List<AnnotateImageRequest> requests = Arrays.asList(annotateImageRequest);
// Search products similar to the image.
BatchAnnotateImagesResponse response = queryImageClient.batchAnnotateImages(requests);
List<Result> similarProducts = response.getResponses(0).getProductSearchResults().getResultsList();
System.out.println("Similar Products: ");
for (Result product : similarProducts) {
System.out.println(String.format("\nProduct name: %s", product.getProduct().getName()));
System.out.println(String.format("Product display name: %s", product.getProduct().getDisplayName()));
System.out.println(String.format("Product description: %s", product.getProduct().getDescription()));
System.out.println(String.format("Score(Confidence): %s", product.getScore()));
System.out.println(String.format("Image name: %s", product.getImage()));
}
}
}
use of com.google.cloud.vision.v1p3beta1.AnnotateImageRequest in project java-vision by googleapis.
the class QuickstartSample method main.
public static void main(String... args) throws Exception {
// the "close" method on the client to safely clean up any remaining background resources.
try (ImageAnnotatorClient vision = ImageAnnotatorClient.create()) {
// The path to the image file to annotate
String fileName = "./resources/wakeupcat.jpg";
// Reads the image file into memory
Path path = Paths.get(fileName);
byte[] data = Files.readAllBytes(path);
ByteString imgBytes = ByteString.copyFrom(data);
// Builds the image annotation request
List<AnnotateImageRequest> requests = new ArrayList<>();
Image img = Image.newBuilder().setContent(imgBytes).build();
Feature feat = Feature.newBuilder().setType(Type.LABEL_DETECTION).build();
AnnotateImageRequest request = AnnotateImageRequest.newBuilder().addFeatures(feat).setImage(img).build();
requests.add(request);
// Performs label detection on the image file
BatchAnnotateImagesResponse response = vision.batchAnnotateImages(requests);
List<AnnotateImageResponse> responses = response.getResponsesList();
for (AnnotateImageResponse res : responses) {
if (res.hasError()) {
System.out.format("Error: %s%n", res.getError().getMessage());
return;
}
for (EntityAnnotation annotation : res.getLabelAnnotationsList()) {
annotation.getAllFields().forEach((k, v) -> System.out.format("%s : %s%n", k, v.toString()));
}
}
}
}
use of com.google.cloud.vision.v1p3beta1.AnnotateImageRequest in project java-vision by googleapis.
the class DetectCropHints method detectCropHints.
// Suggests a region to crop to for a local file.
public static void detectCropHints(String filePath) throws IOException {
List<AnnotateImageRequest> requests = new ArrayList<>();
ByteString imgBytes = ByteString.readFrom(new FileInputStream(filePath));
Image img = Image.newBuilder().setContent(imgBytes).build();
Feature feat = Feature.newBuilder().setType(Feature.Type.CROP_HINTS).build();
AnnotateImageRequest request = AnnotateImageRequest.newBuilder().addFeatures(feat).setImage(img).build();
requests.add(request);
// the "close" method on the client to safely clean up any remaining background resources.
try (ImageAnnotatorClient client = ImageAnnotatorClient.create()) {
BatchAnnotateImagesResponse response = client.batchAnnotateImages(requests);
List<AnnotateImageResponse> responses = response.getResponsesList();
for (AnnotateImageResponse res : responses) {
if (res.hasError()) {
System.out.format("Error: %s%n", res.getError().getMessage());
return;
}
// For full list of available annotations, see http://g.co/cloud/vision/docs
CropHintsAnnotation annotation = res.getCropHintsAnnotation();
for (CropHint hint : annotation.getCropHintsList()) {
System.out.println(hint.getBoundingPoly());
}
}
}
}
Aggregations