use of com.google.cloud.vision.v1p3beta1.AnnotateImageResponse in project spring-cloud-gcp by GoogleCloudPlatform.
the class CloudVisionTemplate method analyzeImage.
/**
* Analyze an image and extract the features of the image specified by {@code featureTypes}.
*
* <p>A feature describes the kind of Cloud Vision analysis one wishes to perform on an image,
* such as text detection, image labelling, facial detection, etc. A full list of feature types
* can be found in {@link Feature.Type}.
*
* @param imageResource the image one wishes to analyze. The Cloud Vision APIs support image
* formats described here: https://cloud.google.com/vision/docs/supported-files
* @param imageContext the image context used to customize the Vision API request
* @param featureTypes the types of image analysis to perform on the image
* @return the results of image analyses
* @throws CloudVisionException if the image could not be read or if a malformed response is
* received from the Cloud Vision APIs
*/
public AnnotateImageResponse analyzeImage(Resource imageResource, ImageContext imageContext, Feature.Type... featureTypes) {
ByteString imgBytes;
try {
imgBytes = ByteString.readFrom(imageResource.getInputStream());
} catch (IOException ex) {
throw new CloudVisionException(READ_BYTES_ERROR_MESSAGE, ex);
}
Image image = Image.newBuilder().setContent(imgBytes).build();
List<Feature> featureList = Arrays.stream(featureTypes).map(featureType -> Feature.newBuilder().setType(featureType).build()).collect(Collectors.toList());
BatchAnnotateImagesRequest request = BatchAnnotateImagesRequest.newBuilder().addRequests(AnnotateImageRequest.newBuilder().addAllFeatures(featureList).setImageContext(imageContext).setImage(image)).build();
BatchAnnotateImagesResponse batchResponse = this.imageAnnotatorClient.batchAnnotateImages(request);
List<AnnotateImageResponse> annotateImageResponses = batchResponse.getResponsesList();
if (!annotateImageResponses.isEmpty()) {
return annotateImageResponses.get(0);
} else {
throw new CloudVisionException(EMPTY_RESPONSE_ERROR_MESSAGE);
}
}
use of com.google.cloud.vision.v1p3beta1.AnnotateImageResponse in project spring-cloud-gcp by GoogleCloudPlatform.
the class CloudVisionTemplate method extractTextFromFile.
/**
* Extract the text out of a file and return the result as a String.
*
* @param fileResource the file one wishes to analyze
* @param mimeType the mime type of the fileResource. Currently, only "application/pdf",
* "image/tiff" and "image/gif" are supported.
* @return the text extracted from the pdf as a string per page
* @throws CloudVisionException if the image could not be read or if text extraction failed
*/
public List<String> extractTextFromFile(Resource fileResource, String mimeType) {
AnnotateFileResponse response = analyzeFile(fileResource, mimeType, Type.DOCUMENT_TEXT_DETECTION);
List<AnnotateImageResponse> annotateImageResponses = response.getResponsesList();
if (annotateImageResponses.isEmpty()) {
throw new CloudVisionException(EMPTY_RESPONSE_ERROR_MESSAGE);
}
List<String> result = annotateImageResponses.stream().map(annotateImageResponse -> annotateImageResponse.getFullTextAnnotation().getText()).collect(Collectors.toList());
if (result.isEmpty() && response.getError().getCode() != Code.OK.getNumber()) {
throw new CloudVisionException(response.getError().getMessage());
}
return result;
}
use of com.google.cloud.vision.v1p3beta1.AnnotateImageResponse in project google-cloud-java by googleapis.
the class AnnotateImage method main.
public static void main(String... args) throws Exception {
// Instantiates a client
ImageAnnotatorClient vision = ImageAnnotatorClient.create();
// The path to the image file to annotate
// for example "./resources/wakeupcat.jpg";
String fileName = "your/image/path.jpg";
// Reads the image file into memory
Path path = Paths.get(fileName);
byte[] data = Files.readAllBytes(path);
ByteString imgBytes = ByteString.copyFrom(data);
// Builds the image annotation request
List<AnnotateImageRequest> requests = new ArrayList<>();
Image img = Image.newBuilder().setContent(imgBytes).build();
Feature feat = Feature.newBuilder().setType(Type.LABEL_DETECTION).build();
AnnotateImageRequest request = AnnotateImageRequest.newBuilder().addFeatures(feat).setImage(img).build();
requests.add(request);
// Performs label detection on the image file
BatchAnnotateImagesResponse response = vision.batchAnnotateImages(requests);
List<AnnotateImageResponse> responses = response.getResponsesList();
for (AnnotateImageResponse res : responses) {
if (res.hasError()) {
System.out.printf("Error: %s\n", res.getError().getMessage());
return;
}
for (EntityAnnotation annotation : res.getLabelAnnotationsList()) {
for (Map.Entry<FieldDescriptor, Object> entry : annotation.getAllFields().entrySet()) {
System.out.printf("%s : %s\n", entry.getKey(), entry.getValue());
}
}
}
}
use of com.google.cloud.vision.v1p3beta1.AnnotateImageResponse in project TweetwallFX by TweetWallFX.
the class GoogleVisionCache method load.
private Map<String, ImageContentAnalysis> load(final Stream<String> imageUris) throws IOException {
if (null == getClient()) {
return Collections.emptyMap();
}
final List<AnnotateImageRequest> requests = imageUris.filter(Objects::nonNull).distinct().map(this::createImageRequest).peek(air -> LOG.info("Prepared {}", air)).collect(Collectors.toList());
if (requests.isEmpty()) {
return Collections.emptyMap();
}
LOG.info("Executing analysis for {} AnnotateImageRequests", requests.size());
final BatchAnnotateImagesResponse batchResponse = getClient().batchAnnotateImages(requests);
final Iterator<AnnotateImageResponse> itResponse = batchResponse.getResponsesList().iterator();
final Iterator<AnnotateImageRequest> itRequest = requests.iterator();
final Map<String, ImageContentAnalysis> result = new LinkedHashMap<>(requests.size());
while (itRequest.hasNext() && itResponse.hasNext()) {
final AnnotateImageRequest request = itRequest.next();
final AnnotateImageResponse response = itResponse.next();
final String uri = request.getImage().getSource().getImageUri();
final ImageContentAnalysis ica = new ImageContentAnalysis(response);
LOG.info("Image('{}') was evaluated as {}", uri, ica);
result.put(uri, ica);
cache.put(uri, ica);
}
if (itRequest.hasNext()) {
throw new IllegalStateException("There are still annotate Responses available!");
} else if (itRequest.hasNext()) {
throw new IllegalStateException("There are still annotate Requests available!");
} else {
return Collections.unmodifiableMap(result);
}
}
use of com.google.cloud.vision.v1p3beta1.AnnotateImageResponse in project java-vision by googleapis.
the class DetectBeta method detectLocalizedObjects.
// [START vision_localize_objects_beta]
/**
* Detects localized objects in the specified local image.
*
* @param filePath The path to the file to perform localized object detection on.
* @param out A {@link PrintStream} to write detected objects to.
* @throws Exception on errors while closing the client.
* @throws IOException on Input/Output errors.
*/
public static void detectLocalizedObjects(String filePath, PrintStream out) throws Exception, IOException {
List<AnnotateImageRequest> requests = new ArrayList<>();
ByteString imgBytes = ByteString.readFrom(new FileInputStream(filePath));
Image img = Image.newBuilder().setContent(imgBytes).build();
AnnotateImageRequest request = AnnotateImageRequest.newBuilder().addFeatures(Feature.newBuilder().setType(Type.OBJECT_LOCALIZATION)).setImage(img).build();
requests.add(request);
// Perform the request
try (ImageAnnotatorClient client = ImageAnnotatorClient.create()) {
BatchAnnotateImagesResponse response = client.batchAnnotateImages(requests);
List<AnnotateImageResponse> responses = response.getResponsesList();
// Display the results
for (AnnotateImageResponse res : responses) {
for (LocalizedObjectAnnotation entity : res.getLocalizedObjectAnnotationsList()) {
out.format("Object name: %s\n", entity.getName());
out.format("Confidence: %s\n", entity.getScore());
out.format("Normalized Vertices:\n");
entity.getBoundingPoly().getNormalizedVerticesList().forEach(vertex -> out.format("- (%s, %s)\n", vertex.getX(), vertex.getY()));
}
}
}
}
Aggregations