use of com.google.cloud.compute.v1.Image in project java-vision by googleapis.
the class ITSystemTest method detectWebEntitiesIncludeGeoResultsTest.
@Test
public void detectWebEntitiesIncludeGeoResultsTest() throws IOException {
ByteString imgBytes = ByteString.readFrom(new FileInputStream(RESOURCES + "city.jpg"));
Image img = Image.newBuilder().setContent(imgBytes).build();
Feature feat = Feature.newBuilder().setType(Type.WEB_DETECTION).setMaxResults(MAX_RESULTS).build();
WebDetectionParams webDetectionParams = WebDetectionParams.newBuilder().setIncludeGeoResults(true).build();
ImageContext imageContext = ImageContext.newBuilder().setWebDetectionParams(webDetectionParams).build();
AnnotateImageRequest request = AnnotateImageRequest.newBuilder().addFeatures(feat).setImageContext(imageContext).setImage(img).build();
BatchAnnotateImagesResponse response = imageAnnotatorClient.batchAnnotateImages(ImmutableList.of(request));
List<AnnotateImageResponse> responses = response.getResponsesList();
List<String> actual = new ArrayList<>();
for (AnnotateImageResponse imgResponse : responses) {
for (WebDetection.WebEntity entity : imgResponse.getWebDetection().getWebEntitiesList()) {
actual.add(entity.getDescription());
}
}
List<String> expectedResults = new ArrayList<>();
expectedResults.add("Skyscraper");
expectedResults.add("Suburb");
assertThat(actual).containsAnyIn(expectedResults);
}
use of com.google.cloud.compute.v1.Image in project java-vision by googleapis.
the class ITSystemTest method detectWebEntitiesIncludeGeoResultsGcsTest.
@Test
public void detectWebEntitiesIncludeGeoResultsGcsTest() {
ImageSource imgSource = ImageSource.newBuilder().setGcsImageUri(SAMPLE_BUCKET + "landmark/pofa.jpg").build();
Image img = Image.newBuilder().setSource(imgSource).build();
Feature feat = Feature.newBuilder().setType(Type.WEB_DETECTION).setMaxResults(MAX_RESULTS).build();
WebDetectionParams webDetectionParams = WebDetectionParams.newBuilder().setIncludeGeoResults(true).build();
ImageContext imageContext = ImageContext.newBuilder().setWebDetectionParams(webDetectionParams).build();
AnnotateImageRequest request = AnnotateImageRequest.newBuilder().addFeatures(feat).setImageContext(imageContext).setImage(img).build();
BatchAnnotateImagesResponse response = imageAnnotatorClient.batchAnnotateImages(ImmutableList.of(request));
List<AnnotateImageResponse> responses = response.getResponsesList();
List<String> actual = new ArrayList<>();
System.out.println("WebEntitiesGeo SIZE");
System.out.println(actual.size());
for (AnnotateImageResponse imgResponse : responses) {
for (WebDetection.WebEntity entity : imgResponse.getWebDetection().getWebEntitiesList()) {
actual.add(entity.getDescription());
}
}
assertThat(actual).contains("The Palace Of Fine Arts");
}
use of com.google.cloud.compute.v1.Image in project java-vision by googleapis.
the class Detect method detectDocumentText.
/**
* Performs document text detection on a local image file.
*
* @param filePath The path to the local file to detect document text on.
* @throws Exception on errors while closing the client.
* @throws IOException on Input/Output errors.
*/
// [START vision_fulltext_detection]
public static void detectDocumentText(String filePath) throws IOException {
List<AnnotateImageRequest> requests = new ArrayList<>();
ByteString imgBytes = ByteString.readFrom(new FileInputStream(filePath));
Image img = Image.newBuilder().setContent(imgBytes).build();
Feature feat = Feature.newBuilder().setType(Type.DOCUMENT_TEXT_DETECTION).build();
AnnotateImageRequest request = AnnotateImageRequest.newBuilder().addFeatures(feat).setImage(img).build();
requests.add(request);
// the "close" method on the client to safely clean up any remaining background resources.
try (ImageAnnotatorClient client = ImageAnnotatorClient.create()) {
BatchAnnotateImagesResponse response = client.batchAnnotateImages(requests);
List<AnnotateImageResponse> responses = response.getResponsesList();
client.close();
for (AnnotateImageResponse res : responses) {
if (res.hasError()) {
System.out.format("Error: %s%n", res.getError().getMessage());
return;
}
// For full list of available annotations, see http://g.co/cloud/vision/docs
TextAnnotation annotation = res.getFullTextAnnotation();
for (Page page : annotation.getPagesList()) {
String pageText = "";
for (Block block : page.getBlocksList()) {
String blockText = "";
for (Paragraph para : block.getParagraphsList()) {
String paraText = "";
for (Word word : para.getWordsList()) {
String wordText = "";
for (Symbol symbol : word.getSymbolsList()) {
wordText = wordText + symbol.getText();
System.out.format("Symbol text: %s (confidence: %f)%n", symbol.getText(), symbol.getConfidence());
}
System.out.format("Word text: %s (confidence: %f)%n%n", wordText, word.getConfidence());
paraText = String.format("%s %s", paraText, wordText);
}
// Output Example using Paragraph:
System.out.println("%nParagraph: %n" + paraText);
System.out.format("Paragraph Confidence: %f%n", para.getConfidence());
blockText = blockText + paraText;
}
pageText = pageText + blockText;
}
}
System.out.println("%nComplete annotation:");
System.out.println(annotation.getText());
}
}
}
use of com.google.cloud.compute.v1.Image in project java-vision by googleapis.
the class AsyncBatchAnnotateImagesGcs method asyncBatchAnnotateImagesGcs.
// Performs asynchronous batch annotation of images on Google Cloud Storage
public static void asyncBatchAnnotateImagesGcs(String gcsSourcePath, String gcsDestinationPath) throws Exception {
// String gcsDestinationPath = "gs://YOUR_BUCKET_ID/path_to_store_annotation";
try (ImageAnnotatorClient client = ImageAnnotatorClient.create()) {
List<AnnotateImageRequest> requests = new ArrayList<>();
ImageSource imgSource = ImageSource.newBuilder().setImageUri(gcsSourcePath).build();
Image image = Image.newBuilder().setSource(imgSource).build();
// Set the GCS destination path for where to save the results.
GcsDestination gcsDestination = GcsDestination.newBuilder().setUri(gcsDestinationPath).build();
// Create the configuration for the output with the batch size.
// The batch size sets how many pages should be grouped into each json output file.
OutputConfig outputConfig = OutputConfig.newBuilder().setGcsDestination(gcsDestination).setBatchSize(2).build();
// Select the Features required by the vision API
Feature features = Feature.newBuilder().setType(Type.LABEL_DETECTION).setType(Type.TEXT_DETECTION).setType(Type.IMAGE_PROPERTIES).build();
// Build the request
AnnotateImageRequest annotateImageRequest = AnnotateImageRequest.newBuilder().setImage(image).addFeatures(features).build();
requests.add(annotateImageRequest);
AsyncBatchAnnotateImagesRequest request = AsyncBatchAnnotateImagesRequest.newBuilder().addAllRequests(requests).setOutputConfig(outputConfig).build();
OperationFuture<AsyncBatchAnnotateImagesResponse, OperationMetadata> response = client.asyncBatchAnnotateImagesAsync(request);
System.out.println("Waiting for the operation to finish.");
// we're not processing the response, since we'll be reading the output from GCS.
response.get(180, TimeUnit.SECONDS);
// Once the request has completed and the output has been
// written to GCS, we can list all the output files.
Storage storage = StorageOptions.getDefaultInstance().getService();
// Get the destination location from the gcsDestinationPath
Pattern pattern = Pattern.compile("gs://([^/]+)/(.+)");
Matcher matcher = pattern.matcher(gcsDestinationPath);
if (matcher.find()) {
String bucketName = matcher.group(1);
String prefix = matcher.group(2);
// Get the list of objects with the given prefix from the GCS bucket
Bucket bucket = storage.get(bucketName);
Page<Blob> pageList = bucket.list(BlobListOption.prefix(prefix));
Blob firstOutputFile = null;
// List objects with the given prefix.
System.out.println("Output files:");
for (Blob blob : pageList.iterateAll()) {
System.out.println(blob.getName());
// the first two image requests
if (firstOutputFile == null) {
firstOutputFile = blob;
}
}
// Get the contents of the file and convert the JSON contents to an
// BatchAnnotateImagesResponse
// object. If the Blob is small read all its content in one request
// (Note: the file is a .json file)
// Storage guide: https://cloud.google.com/storage/docs/downloading-objects
String jsonContents = new String(firstOutputFile.getContent());
Builder builder = BatchAnnotateImagesResponse.newBuilder();
JsonFormat.parser().merge(jsonContents, builder);
// Build the AnnotateFileResponse object
BatchAnnotateImagesResponse batchAnnotateImagesResponse = builder.build();
// Here we print the response for the first image
// The response contains more information:
// annotation/pages/blocks/paragraphs/words/symbols/colors
// including confidence score and bounding boxes
System.out.format("\nResponse: %s\n", batchAnnotateImagesResponse.getResponses(0));
} else {
System.out.println("No MATCH");
}
} catch (Exception e) {
System.out.println("Error during asyncBatchAnnotateImagesGcs: \n" + e.toString());
}
}
use of com.google.cloud.compute.v1.Image in project java-vision by googleapis.
the class DetectBeta method detectHandwrittenOcrGcs.
// [END vision_handwritten_ocr_beta]
// [START vision_handwritten_ocr_gcs_beta]
/**
* Performs handwritten text detection on a remote image on Google Cloud Storage.
*
* @param gcsPath The path to the remote file on Google Cloud Storage to detect handwritten text
* on.
* @param out A {@link PrintStream} to write the results to.
* @throws Exception on errors while closing the client.
* @throws IOException on Input/Output errors.
*/
public static void detectHandwrittenOcrGcs(String gcsPath, PrintStream out) throws Exception {
List<AnnotateImageRequest> requests = new ArrayList<>();
ImageSource imgSource = ImageSource.newBuilder().setGcsImageUri(gcsPath).build();
Image img = Image.newBuilder().setSource(imgSource).build();
Feature feat = Feature.newBuilder().setType(Type.DOCUMENT_TEXT_DETECTION).build();
// Set the parameters for the image
ImageContext imageContext = ImageContext.newBuilder().addLanguageHints("en-t-i0-handwrit").build();
AnnotateImageRequest request = AnnotateImageRequest.newBuilder().addFeatures(feat).setImage(img).setImageContext(imageContext).build();
requests.add(request);
try (ImageAnnotatorClient client = ImageAnnotatorClient.create()) {
BatchAnnotateImagesResponse response = client.batchAnnotateImages(requests);
List<AnnotateImageResponse> responses = response.getResponsesList();
client.close();
for (AnnotateImageResponse res : responses) {
if (res.hasError()) {
out.printf("Error: %s\n", res.getError().getMessage());
return;
}
// For full list of available annotations, see http://g.co/cloud/vision/docs
TextAnnotation annotation = res.getFullTextAnnotation();
for (Page page : annotation.getPagesList()) {
String pageText = "";
for (Block block : page.getBlocksList()) {
String blockText = "";
for (Paragraph para : block.getParagraphsList()) {
String paraText = "";
for (Word word : para.getWordsList()) {
String wordText = "";
for (Symbol symbol : word.getSymbolsList()) {
wordText = wordText + symbol.getText();
out.format("Symbol text: %s (confidence: %f)\n", symbol.getText(), symbol.getConfidence());
}
out.format("Word text: %s (confidence: %f)\n\n", wordText, word.getConfidence());
paraText = String.format("%s %s", paraText, wordText);
}
// Output Example using Paragraph:
out.println("\nParagraph: \n" + paraText);
out.format("Paragraph Confidence: %f\n", para.getConfidence());
blockText = blockText + paraText;
}
pageText = pageText + blockText;
}
}
out.println("\nComplete annotation:");
out.println(annotation.getText());
}
}
}
Aggregations