use of com.google.cloud.vision.v1p3beta1.AnnotateImageResponse in project java-vision by googleapis.
the class DetectBeta method detectHandwrittenOcrGcs.
// [END vision_handwritten_ocr_beta]
// [START vision_handwritten_ocr_gcs_beta]
/**
* Performs handwritten text detection on a remote image on Google Cloud Storage.
*
* @param gcsPath The path to the remote file on Google Cloud Storage to detect handwritten text
* on.
* @param out A {@link PrintStream} to write the results to.
* @throws Exception on errors while closing the client.
* @throws IOException on Input/Output errors.
*/
public static void detectHandwrittenOcrGcs(String gcsPath, PrintStream out) throws Exception {
List<AnnotateImageRequest> requests = new ArrayList<>();
ImageSource imgSource = ImageSource.newBuilder().setGcsImageUri(gcsPath).build();
Image img = Image.newBuilder().setSource(imgSource).build();
Feature feat = Feature.newBuilder().setType(Type.DOCUMENT_TEXT_DETECTION).build();
// Set the parameters for the image
ImageContext imageContext = ImageContext.newBuilder().addLanguageHints("en-t-i0-handwrit").build();
AnnotateImageRequest request = AnnotateImageRequest.newBuilder().addFeatures(feat).setImage(img).setImageContext(imageContext).build();
requests.add(request);
try (ImageAnnotatorClient client = ImageAnnotatorClient.create()) {
BatchAnnotateImagesResponse response = client.batchAnnotateImages(requests);
List<AnnotateImageResponse> responses = response.getResponsesList();
client.close();
for (AnnotateImageResponse res : responses) {
if (res.hasError()) {
out.printf("Error: %s\n", res.getError().getMessage());
return;
}
// For full list of available annotations, see http://g.co/cloud/vision/docs
TextAnnotation annotation = res.getFullTextAnnotation();
for (Page page : annotation.getPagesList()) {
String pageText = "";
for (Block block : page.getBlocksList()) {
String blockText = "";
for (Paragraph para : block.getParagraphsList()) {
String paraText = "";
for (Word word : para.getWordsList()) {
String wordText = "";
for (Symbol symbol : word.getSymbolsList()) {
wordText = wordText + symbol.getText();
out.format("Symbol text: %s (confidence: %f)\n", symbol.getText(), symbol.getConfidence());
}
out.format("Word text: %s (confidence: %f)\n\n", wordText, word.getConfidence());
paraText = String.format("%s %s", paraText, wordText);
}
// Output Example using Paragraph:
out.println("\nParagraph: \n" + paraText);
out.format("Paragraph Confidence: %f\n", para.getConfidence());
blockText = blockText + paraText;
}
pageText = pageText + blockText;
}
}
out.println("\nComplete annotation:");
out.println(annotation.getText());
}
}
}
use of com.google.cloud.vision.v1p3beta1.AnnotateImageResponse in project java-vision by googleapis.
the class Detect method detectDocumentsGcs.
// [END vision_fulltext_detection_gcs]
// [START vision_text_detection_pdf_gcs]
/**
* Performs document text OCR with PDF/TIFF as source files on Google Cloud Storage.
*
* @param gcsSourcePath The path to the remote file on Google Cloud Storage to detect document
* text on.
* @param gcsDestinationPath The path to the remote file on Google Cloud Storage to store the
* results on.
* @throws Exception on errors while closing the client.
*/
public static void detectDocumentsGcs(String gcsSourcePath, String gcsDestinationPath) throws Exception {
// the "close" method on the client to safely clean up any remaining background resources.
try (ImageAnnotatorClient client = ImageAnnotatorClient.create()) {
List<AsyncAnnotateFileRequest> requests = new ArrayList<>();
// Set the GCS source path for the remote file.
GcsSource gcsSource = GcsSource.newBuilder().setUri(gcsSourcePath).build();
// Create the configuration with the specified MIME (Multipurpose Internet Mail Extensions)
// types
InputConfig inputConfig = InputConfig.newBuilder().setMimeType(// Supported MimeTypes: "application/pdf", "image/tiff"
"application/pdf").setGcsSource(gcsSource).build();
// Set the GCS destination path for where to save the results.
GcsDestination gcsDestination = GcsDestination.newBuilder().setUri(gcsDestinationPath).build();
// Create the configuration for the System.output with the batch size.
// The batch size sets how many pages should be grouped into each json System.output file.
OutputConfig outputConfig = OutputConfig.newBuilder().setBatchSize(2).setGcsDestination(gcsDestination).build();
// Select the Feature required by the vision API
Feature feature = Feature.newBuilder().setType(Feature.Type.DOCUMENT_TEXT_DETECTION).build();
// Build the OCR request
AsyncAnnotateFileRequest request = AsyncAnnotateFileRequest.newBuilder().addFeatures(feature).setInputConfig(inputConfig).setOutputConfig(outputConfig).build();
requests.add(request);
// Perform the OCR request
OperationFuture<AsyncBatchAnnotateFilesResponse, OperationMetadata> response = client.asyncBatchAnnotateFilesAsync(requests);
System.out.println("Waiting for the operation to finish.");
// Wait for the request to finish. (The result is not used, since the API saves the result to
// the specified location on GCS.)
List<AsyncAnnotateFileResponse> result = response.get(180, TimeUnit.SECONDS).getResponsesList();
// Once the request has completed and the System.output has been
// written to GCS, we can list all the System.output files.
Storage storage = StorageOptions.getDefaultInstance().getService();
// Get the destination location from the gcsDestinationPath
Pattern pattern = Pattern.compile("gs://([^/]+)/(.+)");
Matcher matcher = pattern.matcher(gcsDestinationPath);
if (matcher.find()) {
String bucketName = matcher.group(1);
String prefix = matcher.group(2);
// Get the list of objects with the given prefix from the GCS bucket
Bucket bucket = storage.get(bucketName);
com.google.api.gax.paging.Page<Blob> pageList = bucket.list(BlobListOption.prefix(prefix));
Blob firstOutputFile = null;
// List objects with the given prefix.
System.out.println("Output files:");
for (Blob blob : pageList.iterateAll()) {
System.out.println(blob.getName());
// the first two pages of the input file.
if (firstOutputFile == null) {
firstOutputFile = blob;
}
}
// Get the contents of the file and convert the JSON contents to an AnnotateFileResponse
// object. If the Blob is small read all its content in one request
// (Note: the file is a .json file)
// Storage guide: https://cloud.google.com/storage/docs/downloading-objects
String jsonContents = new String(firstOutputFile.getContent());
Builder builder = AnnotateFileResponse.newBuilder();
JsonFormat.parser().merge(jsonContents, builder);
// Build the AnnotateFileResponse object
AnnotateFileResponse annotateFileResponse = builder.build();
// Parse through the object to get the actual response for the first page of the input file.
AnnotateImageResponse annotateImageResponse = annotateFileResponse.getResponses(0);
// Here we print the full text from the first page.
// The response contains more information:
// annotation/pages/blocks/paragraphs/words/symbols
// including confidence score and bounding boxes
System.out.format("%nText: %s%n", annotateImageResponse.getFullTextAnnotation().getText());
} else {
System.out.println("No MATCH");
}
}
}
use of com.google.cloud.vision.v1p3beta1.AnnotateImageResponse in project java-vision by googleapis.
the class Detect method detectDocumentText.
/**
* Performs document text detection on a local image file.
*
* @param filePath The path to the local file to detect document text on.
* @throws Exception on errors while closing the client.
* @throws IOException on Input/Output errors.
*/
// [START vision_fulltext_detection]
public static void detectDocumentText(String filePath) throws IOException {
List<AnnotateImageRequest> requests = new ArrayList<>();
ByteString imgBytes = ByteString.readFrom(new FileInputStream(filePath));
Image img = Image.newBuilder().setContent(imgBytes).build();
Feature feat = Feature.newBuilder().setType(Type.DOCUMENT_TEXT_DETECTION).build();
AnnotateImageRequest request = AnnotateImageRequest.newBuilder().addFeatures(feat).setImage(img).build();
requests.add(request);
// the "close" method on the client to safely clean up any remaining background resources.
try (ImageAnnotatorClient client = ImageAnnotatorClient.create()) {
BatchAnnotateImagesResponse response = client.batchAnnotateImages(requests);
List<AnnotateImageResponse> responses = response.getResponsesList();
client.close();
for (AnnotateImageResponse res : responses) {
if (res.hasError()) {
System.out.format("Error: %s%n", res.getError().getMessage());
return;
}
// For full list of available annotations, see http://g.co/cloud/vision/docs
TextAnnotation annotation = res.getFullTextAnnotation();
for (Page page : annotation.getPagesList()) {
String pageText = "";
for (Block block : page.getBlocksList()) {
String blockText = "";
for (Paragraph para : block.getParagraphsList()) {
String paraText = "";
for (Word word : para.getWordsList()) {
String wordText = "";
for (Symbol symbol : word.getSymbolsList()) {
wordText = wordText + symbol.getText();
System.out.format("Symbol text: %s (confidence: %f)%n", symbol.getText(), symbol.getConfidence());
}
System.out.format("Word text: %s (confidence: %f)%n%n", wordText, word.getConfidence());
paraText = String.format("%s %s", paraText, wordText);
}
// Output Example using Paragraph:
System.out.println("%nParagraph: %n" + paraText);
System.out.format("Paragraph Confidence: %f%n", para.getConfidence());
blockText = blockText + paraText;
}
pageText = pageText + blockText;
}
}
System.out.println("%nComplete annotation:");
System.out.println(annotation.getText());
}
}
}
use of com.google.cloud.vision.v1p3beta1.AnnotateImageResponse in project java-vision by googleapis.
the class DetectSafeSearchGcs method detectSafeSearchGcs.
// Detects whether the specified image on Google Cloud Storage has features you would want to
// moderate.
public static void detectSafeSearchGcs(String gcsPath) throws IOException {
List<AnnotateImageRequest> requests = new ArrayList<>();
ImageSource imgSource = ImageSource.newBuilder().setGcsImageUri(gcsPath).build();
Image img = Image.newBuilder().setSource(imgSource).build();
Feature feat = Feature.newBuilder().setType(Type.SAFE_SEARCH_DETECTION).build();
AnnotateImageRequest request = AnnotateImageRequest.newBuilder().addFeatures(feat).setImage(img).build();
requests.add(request);
// the "close" method on the client to safely clean up any remaining background resources.
try (ImageAnnotatorClient client = ImageAnnotatorClient.create()) {
BatchAnnotateImagesResponse response = client.batchAnnotateImages(requests);
List<AnnotateImageResponse> responses = response.getResponsesList();
for (AnnotateImageResponse res : responses) {
if (res.hasError()) {
System.out.format("Error: %s%n", res.getError().getMessage());
return;
}
// For full list of available annotations, see http://g.co/cloud/vision/docs
SafeSearchAnnotation annotation = res.getSafeSearchAnnotation();
System.out.format("adult: %s%nmedical: %s%nspoofed: %s%nviolence: %s%nracy: %s%n", annotation.getAdult(), annotation.getMedical(), annotation.getSpoof(), annotation.getViolence(), annotation.getRacy());
}
}
}
use of com.google.cloud.vision.v1p3beta1.AnnotateImageResponse in project java-vision by googleapis.
the class DetectText method detectText.
// Detects text in the specified image.
public static void detectText(String filePath) throws IOException {
List<AnnotateImageRequest> requests = new ArrayList<>();
ByteString imgBytes = ByteString.readFrom(new FileInputStream(filePath));
Image img = Image.newBuilder().setContent(imgBytes).build();
Feature feat = Feature.newBuilder().setType(Feature.Type.TEXT_DETECTION).build();
AnnotateImageRequest request = AnnotateImageRequest.newBuilder().addFeatures(feat).setImage(img).build();
requests.add(request);
// the "close" method on the client to safely clean up any remaining background resources.
try (ImageAnnotatorClient client = ImageAnnotatorClient.create()) {
BatchAnnotateImagesResponse response = client.batchAnnotateImages(requests);
List<AnnotateImageResponse> responses = response.getResponsesList();
for (AnnotateImageResponse res : responses) {
if (res.hasError()) {
System.out.format("Error: %s%n", res.getError().getMessage());
return;
}
// For full list of available annotations, see http://g.co/cloud/vision/docs
for (EntityAnnotation annotation : res.getTextAnnotationsList()) {
System.out.format("Text: %s%n", annotation.getDescription());
System.out.format("Position : %s%n", annotation.getBoundingPoly());
}
}
}
}
Aggregations