use of com.google.cloud.vision.v1p3beta1.Feature in project spring-cloud-gcp by spring-cloud.
the class CloudVisionTemplate method analyzeImage.
/**
* Analyze an image and extract the features of the image specified by
* {@code featureTypes}.
* <p>A feature describes the kind of Cloud Vision analysis one wishes to perform on an
* image, such as text detection, image labelling, facial detection, etc. A full list of
* feature types can be found in {@link Feature.Type}.
* @param imageResource the image one wishes to analyze. The Cloud Vision APIs support
* image formats described here: https://cloud.google.com/vision/docs/supported-files
* @param imageContext the image context used to customize the Vision API request
* @param featureTypes the types of image analysis to perform on the image
* @return the results of image analyses
* @throws CloudVisionException if the image could not be read or if a malformed response
* is received from the Cloud Vision APIs
*/
public AnnotateImageResponse analyzeImage(Resource imageResource, ImageContext imageContext, Feature.Type... featureTypes) {
ByteString imgBytes;
try {
imgBytes = ByteString.readFrom(imageResource.getInputStream());
} catch (IOException ex) {
throw new CloudVisionException("Failed to read image bytes from provided resource.", ex);
}
Image image = Image.newBuilder().setContent(imgBytes).build();
List<Feature> featureList = Arrays.stream(featureTypes).map((featureType) -> Feature.newBuilder().setType(featureType).build()).collect(Collectors.toList());
BatchAnnotateImagesRequest request = BatchAnnotateImagesRequest.newBuilder().addRequests(AnnotateImageRequest.newBuilder().addAllFeatures(featureList).setImageContext(imageContext).setImage(image)).build();
BatchAnnotateImagesResponse batchResponse = this.imageAnnotatorClient.batchAnnotateImages(request);
List<AnnotateImageResponse> annotateImageResponses = batchResponse.getResponsesList();
if (!annotateImageResponses.isEmpty()) {
return annotateImageResponses.get(0);
} else {
throw new CloudVisionException("Failed to receive valid response Vision APIs; empty response received.");
}
}
use of com.google.cloud.vision.v1p3beta1.Feature in project java-mapollage by trixon.
the class Operation method addPolygons.
private void addPolygons(Folder polygonParent, List<Feature> features) {
for (Feature feature : features) {
if (feature instanceof Folder) {
Folder folder = (Folder) feature;
if (folder != mPathFolder && folder != mPathGapFolder && folder != mPolygonFolder) {
Folder polygonFolder = polygonParent.createAndAddFolder().withName(folder.getName()).withOpen(true);
mFolderPolygonInputs.put(polygonFolder, new ArrayList<>());
addPolygons(polygonFolder, folder.getFeature());
if (mFolderPolygonInputs.get(polygonFolder) != null) {
addPolygon(folder.getName(), mFolderPolygonInputs.get(polygonFolder), polygonParent);
}
}
}
if (feature instanceof Placemark) {
Placemark placemark = (Placemark) feature;
Point point = (Point) placemark.getGeometry();
ArrayList<Coordinate> coordinates = mFolderPolygonInputs.computeIfAbsent(polygonParent, k -> new ArrayList<>());
coordinates.addAll(point.getCoordinates());
}
}
ArrayList<Coordinate> rootCoordinates = mFolderPolygonInputs.get(mPolygonFolder);
if (polygonParent == mPolygonFolder && rootCoordinates != null) {
addPolygon(mPolygonFolder.getName(), rootCoordinates, polygonParent);
}
}
use of com.google.cloud.vision.v1p3beta1.Feature in project java-docs-samples by GoogleCloudPlatform.
the class ImageMagick method blurOffensiveImages.
// [END run_imageproc_handler_setup]
// [END cloudrun_imageproc_handler_setup]
// [START cloudrun_imageproc_handler_analyze]
// [START run_imageproc_handler_analyze]
// Blurs uploaded images that are flagged as Adult or Violence.
public static void blurOffensiveImages(JsonObject data) {
String fileName = data.get("name").getAsString();
String bucketName = data.get("bucket").getAsString();
BlobInfo blobInfo = BlobInfo.newBuilder(bucketName, fileName).build();
// Construct URI to GCS bucket and file.
String gcsPath = String.format("gs://%s/%s", bucketName, fileName);
System.out.println(String.format("Analyzing %s", fileName));
// Construct request.
List<AnnotateImageRequest> requests = new ArrayList<>();
ImageSource imgSource = ImageSource.newBuilder().setImageUri(gcsPath).build();
Image img = Image.newBuilder().setSource(imgSource).build();
Feature feature = Feature.newBuilder().setType(Type.SAFE_SEARCH_DETECTION).build();
AnnotateImageRequest request = AnnotateImageRequest.newBuilder().addFeatures(feature).setImage(img).build();
requests.add(request);
// Send request to the Vision API.
try (ImageAnnotatorClient client = ImageAnnotatorClient.create()) {
BatchAnnotateImagesResponse response = client.batchAnnotateImages(requests);
List<AnnotateImageResponse> responses = response.getResponsesList();
for (AnnotateImageResponse res : responses) {
if (res.hasError()) {
System.out.println(String.format("Error: %s\n", res.getError().getMessage()));
return;
}
// Get Safe Search Annotations
SafeSearchAnnotation annotation = res.getSafeSearchAnnotation();
if (annotation.getAdultValue() == 5 || annotation.getViolenceValue() == 5) {
System.out.println(String.format("Detected %s as inappropriate.", fileName));
blur(blobInfo);
} else {
System.out.println(String.format("Detected %s as OK.", fileName));
}
}
} catch (Exception e) {
System.out.println(String.format("Error with Vision API: %s", e.getMessage()));
}
}
use of com.google.cloud.vision.v1p3beta1.Feature in project java-docs-samples by GoogleCloudPlatform.
the class OcrProcessImage method detectText.
// [END functions_ocr_process]
// [START functions_ocr_detect]
private void detectText(String bucket, String filename) {
logger.info("Looking for text in image " + filename);
List<AnnotateImageRequest> visionRequests = new ArrayList<>();
String gcsPath = String.format("gs://%s/%s", bucket, filename);
ImageSource imgSource = ImageSource.newBuilder().setGcsImageUri(gcsPath).build();
Image img = Image.newBuilder().setSource(imgSource).build();
Feature textFeature = Feature.newBuilder().setType(Feature.Type.TEXT_DETECTION).build();
AnnotateImageRequest visionRequest = AnnotateImageRequest.newBuilder().addFeatures(textFeature).setImage(img).build();
visionRequests.add(visionRequest);
// Detect text in an image using the Cloud Vision API
AnnotateImageResponse visionResponse;
try (ImageAnnotatorClient client = ImageAnnotatorClient.create()) {
visionResponse = client.batchAnnotateImages(visionRequests).getResponses(0);
if (visionResponse == null || !visionResponse.hasFullTextAnnotation()) {
logger.info(String.format("Image %s contains no text", filename));
return;
}
if (visionResponse.hasError()) {
// Log error
logger.log(Level.SEVERE, "Error in vision API call: " + visionResponse.getError().getMessage());
return;
}
} catch (IOException e) {
// Log error (since IOException cannot be thrown by a Cloud Function)
logger.log(Level.SEVERE, "Error detecting text: " + e.getMessage(), e);
return;
}
String text = visionResponse.getFullTextAnnotation().getText();
logger.info("Extracted text from image: " + text);
// Detect language using the Cloud Translation API
DetectLanguageRequest languageRequest = DetectLanguageRequest.newBuilder().setParent(LOCATION_NAME).setMimeType("text/plain").setContent(text).build();
DetectLanguageResponse languageResponse;
try (TranslationServiceClient client = TranslationServiceClient.create()) {
languageResponse = client.detectLanguage(languageRequest);
} catch (IOException e) {
// Log error (since IOException cannot be thrown by a function)
logger.log(Level.SEVERE, "Error detecting language: " + e.getMessage(), e);
return;
}
if (languageResponse.getLanguagesCount() == 0) {
logger.info("No languages were detected for text: " + text);
return;
}
String languageCode = languageResponse.getLanguages(0).getLanguageCode();
logger.info(String.format("Detected language %s for file %s", languageCode, filename));
// Send a Pub/Sub translation request for every language we're going to translate to
for (String targetLanguage : TO_LANGS) {
logger.info("Sending translation request for language " + targetLanguage);
OcrTranslateApiMessage message = new OcrTranslateApiMessage(text, filename, targetLanguage);
ByteString byteStr = ByteString.copyFrom(message.toPubsubData());
PubsubMessage pubsubApiMessage = PubsubMessage.newBuilder().setData(byteStr).build();
try {
publisher.publish(pubsubApiMessage).get();
} catch (InterruptedException | ExecutionException e) {
// Log error
logger.log(Level.SEVERE, "Error publishing translation request: " + e.getMessage(), e);
return;
}
}
}
use of com.google.cloud.vision.v1p3beta1.Feature in project osate-plugin by sireum.
the class BAVisitor method caseSubprogramCallAction.
@Override
public Boolean caseSubprogramCallAction(SubprogramCallAction object) {
Name name = null;
List<Feature> features = null;
CalledSubprogramHolder csh = object.getSubprogram();
assert csh.getArrayIndexes().isEmpty() : "has array indexes: " + csh.getArrayIndexes().size();
if (csh instanceof SubprogramSubcomponentHolder) {
SubprogramSubcomponentHolder ssh = (SubprogramSubcomponentHolder) csh;
assert (ssh.getArrayIndexes().isEmpty());
name = toName(ssh.getSubcomponent().getName());
features = ssh.getSubcomponent().getAllFeatures();
} else {
throw new RuntimeException("Currently only supporting subcomponent subprograms");
}
assert features.size() == object.getParameterLabels().size() : "feature size not equal to param labels size: " + features.size() + " vs " + object.getParameterLabels().size();
List<BTSFormalExpPair> params = new ArrayList<>();
for (int index = 0; index < object.getParameterLabels().size(); index++) {
Feature f = features.get(index);
visit(object.getParameterLabels().get(index));
BTSExp ne = pop();
// TODO:
Option<Name> paramName = toSome(toSimpleName(f.getName()));
params.add(BTSFormalExpPair$.MODULE$.apply(paramName, toSome(ne), toNone()));
}
push(BTSSubprogramCallAction$.MODULE$.apply(name, VisitorUtil.toISZ(params)));
return false;
}
Aggregations