use of com.google.cloud.vision.v1p3beta1.ImageSource in project java-docs-samples by GoogleCloudPlatform.
the class Detect method detectPropertiesGcs.
/**
* Detects image properties such as color frequency from the specified remote image on Google
* Cloud Storage.
*
* @param gcsPath The path to the remote file on Google Cloud Storage to detect properties on.
* @param out A {@link PrintStream} to write
* @throws Exception on errors while closing the client.
* @throws IOException on Input/Output errors.
*/
public static void detectPropertiesGcs(String gcsPath, PrintStream out) throws Exception, IOException {
List<AnnotateImageRequest> requests = new ArrayList<>();
ImageSource imgSource = ImageSource.newBuilder().setGcsImageUri(gcsPath).build();
Image img = Image.newBuilder().setSource(imgSource).build();
Feature feat = Feature.newBuilder().setType(Type.IMAGE_PROPERTIES).build();
AnnotateImageRequest request = AnnotateImageRequest.newBuilder().addFeatures(feat).setImage(img).build();
requests.add(request);
try (ImageAnnotatorClient client = ImageAnnotatorClient.create()) {
BatchAnnotateImagesResponse response = client.batchAnnotateImages(requests);
List<AnnotateImageResponse> responses = response.getResponsesList();
for (AnnotateImageResponse res : responses) {
if (res.hasError()) {
out.printf("Error: %s\n", res.getError().getMessage());
return;
}
// For full list of available annotations, see http://g.co/cloud/vision/docs
DominantColorsAnnotation colors = res.getImagePropertiesAnnotation().getDominantColors();
for (ColorInfo color : colors.getColorsList()) {
out.printf("fraction: %f\nr: %f, g: %f, b: %f\n", color.getPixelFraction(), color.getColor().getRed(), color.getColor().getGreen(), color.getColor().getBlue());
}
}
}
}
use of com.google.cloud.vision.v1p3beta1.ImageSource in project java-docs-samples by GoogleCloudPlatform.
the class Detect method detectLogosGcs.
/**
* Detects logos in the specified remote image on Google Cloud Storage.
*
* @param gcsPath The path to the remote file on Google Cloud Storage to perform logo detection
* on.
* @param out A {@link PrintStream} to write detected logos to.
* @throws Exception on errors while closing the client.
* @throws IOException on Input/Output errors.
*/
public static void detectLogosGcs(String gcsPath, PrintStream out) throws Exception, IOException {
List<AnnotateImageRequest> requests = new ArrayList<>();
ImageSource imgSource = ImageSource.newBuilder().setGcsImageUri(gcsPath).build();
Image img = Image.newBuilder().setSource(imgSource).build();
Feature feat = Feature.newBuilder().setType(Type.LOGO_DETECTION).build();
AnnotateImageRequest request = AnnotateImageRequest.newBuilder().addFeatures(feat).setImage(img).build();
requests.add(request);
try (ImageAnnotatorClient client = ImageAnnotatorClient.create()) {
BatchAnnotateImagesResponse response = client.batchAnnotateImages(requests);
List<AnnotateImageResponse> responses = response.getResponsesList();
for (AnnotateImageResponse res : responses) {
if (res.hasError()) {
out.printf("Error: %s\n", res.getError().getMessage());
return;
}
// For full list of available annotations, see http://g.co/cloud/vision/docs
for (EntityAnnotation annotation : res.getLogoAnnotationsList()) {
out.println(annotation.getDescription());
}
}
}
}
use of com.google.cloud.vision.v1p3beta1.ImageSource in project java-docs-samples by GoogleCloudPlatform.
the class OcrProcessImage method detectText.
// [END functions_ocr_process]
// [START functions_ocr_detect]
private void detectText(String bucket, String filename) {
logger.info("Looking for text in image " + filename);
List<AnnotateImageRequest> visionRequests = new ArrayList<>();
String gcsPath = String.format("gs://%s/%s", bucket, filename);
ImageSource imgSource = ImageSource.newBuilder().setGcsImageUri(gcsPath).build();
Image img = Image.newBuilder().setSource(imgSource).build();
Feature textFeature = Feature.newBuilder().setType(Feature.Type.TEXT_DETECTION).build();
AnnotateImageRequest visionRequest = AnnotateImageRequest.newBuilder().addFeatures(textFeature).setImage(img).build();
visionRequests.add(visionRequest);
// Detect text in an image using the Cloud Vision API
AnnotateImageResponse visionResponse;
try (ImageAnnotatorClient client = ImageAnnotatorClient.create()) {
visionResponse = client.batchAnnotateImages(visionRequests).getResponses(0);
if (visionResponse == null || !visionResponse.hasFullTextAnnotation()) {
logger.info(String.format("Image %s contains no text", filename));
return;
}
if (visionResponse.hasError()) {
// Log error
logger.log(Level.SEVERE, "Error in vision API call: " + visionResponse.getError().getMessage());
return;
}
} catch (IOException e) {
// Log error (since IOException cannot be thrown by a Cloud Function)
logger.log(Level.SEVERE, "Error detecting text: " + e.getMessage(), e);
return;
}
String text = visionResponse.getFullTextAnnotation().getText();
logger.info("Extracted text from image: " + text);
// Detect language using the Cloud Translation API
DetectLanguageRequest languageRequest = DetectLanguageRequest.newBuilder().setParent(LOCATION_NAME).setMimeType("text/plain").setContent(text).build();
DetectLanguageResponse languageResponse;
try (TranslationServiceClient client = TranslationServiceClient.create()) {
languageResponse = client.detectLanguage(languageRequest);
} catch (IOException e) {
// Log error (since IOException cannot be thrown by a function)
logger.log(Level.SEVERE, "Error detecting language: " + e.getMessage(), e);
return;
}
if (languageResponse.getLanguagesCount() == 0) {
logger.info("No languages were detected for text: " + text);
return;
}
String languageCode = languageResponse.getLanguages(0).getLanguageCode();
logger.info(String.format("Detected language %s for file %s", languageCode, filename));
// Send a Pub/Sub translation request for every language we're going to translate to
for (String targetLanguage : TO_LANGS) {
logger.info("Sending translation request for language " + targetLanguage);
OcrTranslateApiMessage message = new OcrTranslateApiMessage(text, filename, targetLanguage);
ByteString byteStr = ByteString.copyFrom(message.toPubsubData());
PubsubMessage pubsubApiMessage = PubsubMessage.newBuilder().setData(byteStr).build();
try {
publisher.publish(pubsubApiMessage).get();
} catch (InterruptedException | ExecutionException e) {
// Log error
logger.log(Level.SEVERE, "Error publishing translation request: " + e.getMessage(), e);
return;
}
}
}
use of com.google.cloud.vision.v1p3beta1.ImageSource in project java-docs-samples by GoogleCloudPlatform.
the class ImageMagick method blurOffensiveImages.
// [END run_imageproc_handler_setup]
// [END cloudrun_imageproc_handler_setup]
// [START cloudrun_imageproc_handler_analyze]
// [START run_imageproc_handler_analyze]
// Blurs uploaded images that are flagged as Adult or Violence.
public static void blurOffensiveImages(JsonObject data) {
String fileName = data.get("name").getAsString();
String bucketName = data.get("bucket").getAsString();
BlobInfo blobInfo = BlobInfo.newBuilder(bucketName, fileName).build();
// Construct URI to GCS bucket and file.
String gcsPath = String.format("gs://%s/%s", bucketName, fileName);
System.out.println(String.format("Analyzing %s", fileName));
// Construct request.
List<AnnotateImageRequest> requests = new ArrayList<>();
ImageSource imgSource = ImageSource.newBuilder().setImageUri(gcsPath).build();
Image img = Image.newBuilder().setSource(imgSource).build();
Feature feature = Feature.newBuilder().setType(Type.SAFE_SEARCH_DETECTION).build();
AnnotateImageRequest request = AnnotateImageRequest.newBuilder().addFeatures(feature).setImage(img).build();
requests.add(request);
// Send request to the Vision API.
try (ImageAnnotatorClient client = ImageAnnotatorClient.create()) {
BatchAnnotateImagesResponse response = client.batchAnnotateImages(requests);
List<AnnotateImageResponse> responses = response.getResponsesList();
for (AnnotateImageResponse res : responses) {
if (res.hasError()) {
System.out.println(String.format("Error: %s\n", res.getError().getMessage()));
return;
}
// Get Safe Search Annotations
SafeSearchAnnotation annotation = res.getSafeSearchAnnotation();
if (annotation.getAdultValue() == 5 || annotation.getViolenceValue() == 5) {
System.out.println(String.format("Detected %s as inappropriate.", fileName));
blur(blobInfo);
} else {
System.out.println(String.format("Detected %s as OK.", fileName));
}
}
} catch (Exception e) {
System.out.println(String.format("Error with Vision API: %s", e.getMessage()));
}
}
use of com.google.cloud.vision.v1p3beta1.ImageSource in project java-vision by googleapis.
the class ITSystemTest method detectLandmarksUrlTest.
@Test
public void detectLandmarksUrlTest() throws Exception {
ImageSource imgSource = ImageSource.newBuilder().setImageUri(SAMPLE_URI + "landmark/pofa.jpg").build();
Image img = Image.newBuilder().setSource(imgSource).build();
Feature feat = Feature.newBuilder().setType(Type.LANDMARK_DETECTION).build();
AnnotateImageRequest request = AnnotateImageRequest.newBuilder().addFeatures(feat).setImage(img).build();
List<String> actual = new ArrayList<>();
int tryCount = 0;
int maxTries = 3;
while (tryCount < maxTries) {
try {
actual = addResponsesToList(request);
break;
} catch (StatusRuntimeException ex) {
tryCount++;
System.out.println("retrying due to request throttling or DOS prevention...");
TimeUnit.SECONDS.sleep(30);
}
}
assertThat(actual).contains("Palace of Fine Arts");
}
Aggregations