use of com.google.cloud.vision.v1p4beta1.BatchAnnotateImagesResponse in project java-vision by googleapis.
the class DetectSafeSearch method detectSafeSearch.
// Detects whether the specified image has features you would want to moderate.
public static void detectSafeSearch(String filePath) throws IOException {
List<AnnotateImageRequest> requests = new ArrayList<>();
ByteString imgBytes = ByteString.readFrom(new FileInputStream(filePath));
Image img = Image.newBuilder().setContent(imgBytes).build();
Feature feat = Feature.newBuilder().setType(Feature.Type.SAFE_SEARCH_DETECTION).build();
AnnotateImageRequest request = AnnotateImageRequest.newBuilder().addFeatures(feat).setImage(img).build();
requests.add(request);
// the "close" method on the client to safely clean up any remaining background resources.
try (ImageAnnotatorClient client = ImageAnnotatorClient.create()) {
BatchAnnotateImagesResponse response = client.batchAnnotateImages(requests);
List<AnnotateImageResponse> responses = response.getResponsesList();
for (AnnotateImageResponse res : responses) {
if (res.hasError()) {
System.out.format("Error: %s%n", res.getError().getMessage());
return;
}
// For full list of available annotations, see http://g.co/cloud/vision/docs
SafeSearchAnnotation annotation = res.getSafeSearchAnnotation();
System.out.format("adult: %s%nmedical: %s%nspoofed: %s%nviolence: %s%nracy: %s%n", annotation.getAdult(), annotation.getMedical(), annotation.getSpoof(), annotation.getViolence(), annotation.getRacy());
}
}
}
use of com.google.cloud.vision.v1p4beta1.BatchAnnotateImagesResponse in project java-vision by googleapis.
the class DetectCropHintsGcs method detectCropHintsGcs.
// Suggests a region to crop to for a remote file on Google Cloud Storage.
public static void detectCropHintsGcs(String gcsPath) throws IOException {
List<AnnotateImageRequest> requests = new ArrayList<>();
ImageSource imgSource = ImageSource.newBuilder().setGcsImageUri(gcsPath).build();
Image img = Image.newBuilder().setSource(imgSource).build();
Feature feat = Feature.newBuilder().setType(Feature.Type.CROP_HINTS).build();
AnnotateImageRequest request = AnnotateImageRequest.newBuilder().addFeatures(feat).setImage(img).build();
requests.add(request);
// the "close" method on the client to safely clean up any remaining background resources.
try (ImageAnnotatorClient client = ImageAnnotatorClient.create()) {
BatchAnnotateImagesResponse response = client.batchAnnotateImages(requests);
List<AnnotateImageResponse> responses = response.getResponsesList();
for (AnnotateImageResponse res : responses) {
if (res.hasError()) {
System.out.format("Error: %s%n", res.getError().getMessage());
return;
}
// For full list of available annotations, see http://g.co/cloud/vision/docs
CropHintsAnnotation annotation = res.getCropHintsAnnotation();
for (CropHint hint : annotation.getCropHintsList()) {
System.out.println(hint.getBoundingPoly());
}
}
}
}
use of com.google.cloud.vision.v1p4beta1.BatchAnnotateImagesResponse in project java-vision by googleapis.
the class DetectFaces method detectFaces.
// Detects faces in the specified local image.
public static void detectFaces(String filePath) throws IOException {
List<AnnotateImageRequest> requests = new ArrayList<>();
ByteString imgBytes = ByteString.readFrom(new FileInputStream(filePath));
Image img = Image.newBuilder().setContent(imgBytes).build();
Feature feat = Feature.newBuilder().setType(Feature.Type.FACE_DETECTION).build();
AnnotateImageRequest request = AnnotateImageRequest.newBuilder().addFeatures(feat).setImage(img).build();
requests.add(request);
// the "close" method on the client to safely clean up any remaining background resources.
try (ImageAnnotatorClient client = ImageAnnotatorClient.create()) {
BatchAnnotateImagesResponse response = client.batchAnnotateImages(requests);
List<AnnotateImageResponse> responses = response.getResponsesList();
for (AnnotateImageResponse res : responses) {
if (res.hasError()) {
System.out.format("Error: %s%n", res.getError().getMessage());
return;
}
// For full list of available annotations, see http://g.co/cloud/vision/docs
for (FaceAnnotation annotation : res.getFaceAnnotationsList()) {
System.out.format("anger: %s%njoy: %s%nsurprise: %s%nposition: %s", annotation.getAngerLikelihood(), annotation.getJoyLikelihood(), annotation.getSurpriseLikelihood(), annotation.getBoundingPoly());
}
}
}
}
use of com.google.cloud.vision.v1p4beta1.BatchAnnotateImagesResponse in project java-vision by googleapis.
the class DetectLabels method detectLabels.
// Detects labels in the specified local image.
public static void detectLabels(String filePath) throws IOException {
List<AnnotateImageRequest> requests = new ArrayList<>();
ByteString imgBytes = ByteString.readFrom(new FileInputStream(filePath));
Image img = Image.newBuilder().setContent(imgBytes).build();
Feature feat = Feature.newBuilder().setType(Feature.Type.LABEL_DETECTION).build();
AnnotateImageRequest request = AnnotateImageRequest.newBuilder().addFeatures(feat).setImage(img).build();
requests.add(request);
// the "close" method on the client to safely clean up any remaining background resources.
try (ImageAnnotatorClient client = ImageAnnotatorClient.create()) {
BatchAnnotateImagesResponse response = client.batchAnnotateImages(requests);
List<AnnotateImageResponse> responses = response.getResponsesList();
for (AnnotateImageResponse res : responses) {
if (res.hasError()) {
System.out.format("Error: %s%n", res.getError().getMessage());
return;
}
// For full list of available annotations, see http://g.co/cloud/vision/docs
for (EntityAnnotation annotation : res.getLabelAnnotationsList()) {
annotation.getAllFields().forEach((k, v) -> System.out.format("%s : %s%n", k, v.toString()));
}
}
}
}
use of com.google.cloud.vision.v1p4beta1.BatchAnnotateImagesResponse in project java-vision by googleapis.
the class DetectLandmarks method detectLandmarks.
// Detects landmarks in the specified local image.
public static void detectLandmarks(String filePath) throws IOException {
List<AnnotateImageRequest> requests = new ArrayList<>();
ByteString imgBytes = ByteString.readFrom(new FileInputStream(filePath));
Image img = Image.newBuilder().setContent(imgBytes).build();
Feature feat = Feature.newBuilder().setType(Feature.Type.LANDMARK_DETECTION).build();
AnnotateImageRequest request = AnnotateImageRequest.newBuilder().addFeatures(feat).setImage(img).build();
requests.add(request);
// the "close" method on the client to safely clean up any remaining background resources.
try (ImageAnnotatorClient client = ImageAnnotatorClient.create()) {
BatchAnnotateImagesResponse response = client.batchAnnotateImages(requests);
List<AnnotateImageResponse> responses = response.getResponsesList();
for (AnnotateImageResponse res : responses) {
if (res.hasError()) {
System.out.format("Error: %s%n", res.getError().getMessage());
return;
}
// For full list of available annotations, see http://g.co/cloud/vision/docs
for (EntityAnnotation annotation : res.getLandmarkAnnotationsList()) {
LocationInfo info = annotation.getLocationsList().listIterator().next();
System.out.format("Landmark: %s%n %s%n", annotation.getDescription(), info.getLatLng());
}
}
}
}
Aggregations