use of com.google.cloud.automl.v1beta1.Image in project java-vision by googleapis.
the class DetectBeta method detectLocalizedObjects.
// [START vision_localize_objects_beta]
/**
* Detects localized objects in the specified local image.
*
* @param filePath The path to the file to perform localized object detection on.
* @param out A {@link PrintStream} to write detected objects to.
* @throws Exception on errors while closing the client.
* @throws IOException on Input/Output errors.
*/
public static void detectLocalizedObjects(String filePath, PrintStream out) throws Exception, IOException {
List<AnnotateImageRequest> requests = new ArrayList<>();
ByteString imgBytes = ByteString.readFrom(new FileInputStream(filePath));
Image img = Image.newBuilder().setContent(imgBytes).build();
AnnotateImageRequest request = AnnotateImageRequest.newBuilder().addFeatures(Feature.newBuilder().setType(Type.OBJECT_LOCALIZATION)).setImage(img).build();
requests.add(request);
// Perform the request
try (ImageAnnotatorClient client = ImageAnnotatorClient.create()) {
BatchAnnotateImagesResponse response = client.batchAnnotateImages(requests);
List<AnnotateImageResponse> responses = response.getResponsesList();
// Display the results
for (AnnotateImageResponse res : responses) {
for (LocalizedObjectAnnotation entity : res.getLocalizedObjectAnnotationsList()) {
out.format("Object name: %s\n", entity.getName());
out.format("Confidence: %s\n", entity.getScore());
out.format("Normalized Vertices:\n");
entity.getBoundingPoly().getNormalizedVerticesList().forEach(vertex -> out.format("- (%s, %s)\n", vertex.getX(), vertex.getY()));
}
}
}
}
use of com.google.cloud.automl.v1beta1.Image in project java-vision by googleapis.
the class ProductSearch method getSimilarProductsFile.
// [START vision_product_search_get_similar_products]
/**
* Search similar products to image in local file.
*
* @param projectId - Id of the project.
* @param computeRegion - Region name.
* @param productSetId - Id of the product set.
* @param productCategory - Category of the product.
* @param filePath - Local file path of the image to be searched
* @param filter - Condition to be applied on the labels. Example for filter: (color = red OR
* color = blue) AND style = kids It will search on all products with the following labels:
* color:red AND style:kids color:blue AND style:kids
* @throws IOException - on I/O errors.
*/
public static void getSimilarProductsFile(String projectId, String computeRegion, String productSetId, String productCategory, String filePath, String filter) throws IOException {
try (ImageAnnotatorClient queryImageClient = ImageAnnotatorClient.create()) {
// Get the full path of the product set.
String productSetPath = ProductSearchClient.formatProductSetName(projectId, computeRegion, productSetId);
// Read the image as a stream of bytes.
File imgPath = new File(filePath);
byte[] content = Files.readAllBytes(imgPath.toPath());
// Create annotate image request along with product search feature.
Feature featuresElement = Feature.newBuilder().setType(Type.PRODUCT_SEARCH).build();
// The input image can be a HTTPS link or Raw image bytes.
// Example:
// To use HTTP link replace with below code
// ImageSource source = ImageSource.newBuilder().setImageUri(imageUri).build();
// Image image = Image.newBuilder().setSource(source).build();
Image image = Image.newBuilder().setContent(ByteString.copyFrom(content)).build();
ImageContext imageContext = ImageContext.newBuilder().setProductSearchParams(ProductSearchParams.newBuilder().setProductSet(productSetPath).addProductCategories(productCategory).setFilter(filter)).build();
AnnotateImageRequest annotateImageRequest = AnnotateImageRequest.newBuilder().addFeatures(featuresElement).setImage(image).setImageContext(imageContext).build();
List<AnnotateImageRequest> requests = Arrays.asList(annotateImageRequest);
// Search products similar to the image.
BatchAnnotateImagesResponse response = queryImageClient.batchAnnotateImages(requests);
List<Result> similarProducts = response.getResponses(0).getProductSearchResults().getResultsList();
System.out.println("Similar Products: ");
for (Result product : similarProducts) {
System.out.println(String.format("\nProduct name: %s", product.getProduct().getName()));
System.out.println(String.format("Product display name: %s", product.getProduct().getDisplayName()));
System.out.println(String.format("Product description: %s", product.getProduct().getDescription()));
System.out.println(String.format("Score(Confidence): %s", product.getScore()));
System.out.println(String.format("Image name: %s", product.getImage()));
}
}
}
use of com.google.cloud.automl.v1beta1.Image in project java-vision by googleapis.
the class AsyncBatchAnnotateImages method asyncBatchAnnotateImages.
public static void asyncBatchAnnotateImages(String inputImageUri, String outputUri) throws IOException, ExecutionException, InterruptedException {
// the "close" method on the client to safely clean up any remaining background resources.
try (ImageAnnotatorClient imageAnnotatorClient = ImageAnnotatorClient.create()) {
// You can send multiple images to be annotated, this sample demonstrates how to do this with
// one image. If you want to use multiple images, you have to create a `AnnotateImageRequest`
// object for each image that you want annotated.
// First specify where the vision api can find the image
ImageSource source = ImageSource.newBuilder().setImageUri(inputImageUri).build();
Image image = Image.newBuilder().setSource(source).build();
// Set the type of annotation you want to perform on the image
// https://cloud.google.com/vision/docs/reference/rpc/google.cloud.vision.v1#google.cloud.vision.v1.Feature.Type
Feature feature = Feature.newBuilder().setType(Feature.Type.LABEL_DETECTION).build();
// Build the request object for that one image. Note: for additional images you have to create
// additional `AnnotateImageRequest` objects and store them in a list to be used below.
AnnotateImageRequest imageRequest = AnnotateImageRequest.newBuilder().setImage(image).addFeatures(feature).build();
// Set where to store the results for the images that will be annotated.
GcsDestination gcsDestination = GcsDestination.newBuilder().setUri(outputUri).build();
OutputConfig outputConfig = OutputConfig.newBuilder().setGcsDestination(gcsDestination).setBatchSize(// The max number of responses to output in each JSON file
2).build();
// Add each `AnnotateImageRequest` object to the batch request and add the output config.
AsyncBatchAnnotateImagesRequest request = AsyncBatchAnnotateImagesRequest.newBuilder().addRequests(imageRequest).setOutputConfig(outputConfig).build();
// Make the asynchronous batch request.
AsyncBatchAnnotateImagesResponse response = imageAnnotatorClient.asyncBatchAnnotateImagesAsync(request).get();
// The output is written to GCS with the provided output_uri as prefix
String gcsOutputUri = response.getOutputConfig().getGcsDestination().getUri();
System.out.format("Output written to GCS with prefix: %s%n", gcsOutputUri);
}
}
use of com.google.cloud.automl.v1beta1.Image in project java-vision by googleapis.
the class DetectLogosGcs method detectLogosGcs.
// Detects logos in the specified remote image on Google Cloud Storage.
public static void detectLogosGcs(String gcsPath) throws IOException {
List<AnnotateImageRequest> requests = new ArrayList<>();
ImageSource imgSource = ImageSource.newBuilder().setGcsImageUri(gcsPath).build();
Image img = Image.newBuilder().setSource(imgSource).build();
Feature feat = Feature.newBuilder().setType(Feature.Type.LOGO_DETECTION).build();
AnnotateImageRequest request = AnnotateImageRequest.newBuilder().addFeatures(feat).setImage(img).build();
requests.add(request);
// the "close" method on the client to safely clean up any remaining background resources.
try (ImageAnnotatorClient client = ImageAnnotatorClient.create()) {
BatchAnnotateImagesResponse response = client.batchAnnotateImages(requests);
List<AnnotateImageResponse> responses = response.getResponsesList();
for (AnnotateImageResponse res : responses) {
if (res.hasError()) {
System.out.format("Error: %s%n", res.getError().getMessage());
return;
}
// For full list of available annotations, see http://g.co/cloud/vision/docs
for (EntityAnnotation annotation : res.getLogoAnnotationsList()) {
System.out.println(annotation.getDescription());
}
}
}
}
use of com.google.cloud.automl.v1beta1.Image in project java-vision by googleapis.
the class DetectText method detectText.
// Detects text in the specified image.
public static void detectText(String filePath) throws IOException {
List<AnnotateImageRequest> requests = new ArrayList<>();
ByteString imgBytes = ByteString.readFrom(new FileInputStream(filePath));
Image img = Image.newBuilder().setContent(imgBytes).build();
Feature feat = Feature.newBuilder().setType(Feature.Type.TEXT_DETECTION).build();
AnnotateImageRequest request = AnnotateImageRequest.newBuilder().addFeatures(feat).setImage(img).build();
requests.add(request);
// the "close" method on the client to safely clean up any remaining background resources.
try (ImageAnnotatorClient client = ImageAnnotatorClient.create()) {
BatchAnnotateImagesResponse response = client.batchAnnotateImages(requests);
List<AnnotateImageResponse> responses = response.getResponsesList();
for (AnnotateImageResponse res : responses) {
if (res.hasError()) {
System.out.format("Error: %s%n", res.getError().getMessage());
return;
}
// For full list of available annotations, see http://g.co/cloud/vision/docs
for (EntityAnnotation annotation : res.getTextAnnotationsList()) {
System.out.format("Text: %s%n", annotation.getDescription());
System.out.format("Position : %s%n", annotation.getBoundingPoly());
}
}
}
}
Aggregations