use of com.google.cloud.vision.v1p4beta1.OperationMetadata in project java-document-ai by googleapis.
the class BatchParseFormBeta method batchParseFormGcs.
public static void batchParseFormGcs(String projectId, String location, String outputGcsBucketName, String outputGcsPrefix, String inputGcsUri) throws IOException, InterruptedException, ExecutionException, TimeoutException {
// the "close" method on the client to safely clean up any remaining background resources.
try (DocumentUnderstandingServiceClient client = DocumentUnderstandingServiceClient.create()) {
// Configure the request for processing the PDF
String parent = String.format("projects/%s/locations/%s", projectId, location);
// Improve form parsing results by providing key-value pair hints.
// For each key hint, key is text that is likely to appear in the
// document as a form field name (i.e. "DOB").
// Value types are optional, but can be one or more of:
// ADDRESS, LOCATION, ORGANIZATION, PERSON, PHONE_NUMBER, ID,
// NUMBER, EMAIL, PRICE, TERMS, DATE, NAME
KeyValuePairHint keyValuePairHint = KeyValuePairHint.newBuilder().setKey("Phone").addValueTypes("PHONE_NUMBER").build();
KeyValuePairHint keyValuePairHint2 = KeyValuePairHint.newBuilder().setKey("Contact").addValueTypes("EMAIL").addValueTypes("NAME").build();
// Setting enabled=True enables form extraction
FormExtractionParams params = FormExtractionParams.newBuilder().setEnabled(true).addKeyValuePairHints(keyValuePairHint).addKeyValuePairHints(keyValuePairHint2).build();
GcsSource inputUri = GcsSource.newBuilder().setUri(inputGcsUri).build();
// mime_type can be application/pdf, image/tiff,
// and image/gif, or application/json
InputConfig config = InputConfig.newBuilder().setGcsSource(inputUri).setMimeType("application/pdf").build();
GcsDestination gcsDestination = GcsDestination.newBuilder().setUri(String.format("gs://%s/%s", outputGcsBucketName, outputGcsPrefix)).build();
OutputConfig outputConfig = OutputConfig.newBuilder().setGcsDestination(gcsDestination).setPagesPerShard(1).build();
ProcessDocumentRequest request = ProcessDocumentRequest.newBuilder().setFormExtractionParams(params).setInputConfig(config).setOutputConfig(outputConfig).build();
BatchProcessDocumentsRequest requests = BatchProcessDocumentsRequest.newBuilder().addRequests(request).setParent(parent).build();
// Batch process document using a long-running operation.
OperationFuture<BatchProcessDocumentsResponse, OperationMetadata> future = client.batchProcessDocumentsAsync(requests);
// Wait for operation to complete.
System.out.println("Waiting for operation to complete...");
future.get(360, TimeUnit.SECONDS);
System.out.println("Document processing complete.");
Storage storage = StorageOptions.newBuilder().setProjectId(projectId).build().getService();
Bucket bucket = storage.get(outputGcsBucketName);
// List all of the files in the Storage bucket.
Page<Blob> blobs = bucket.list(Storage.BlobListOption.currentDirectory(), Storage.BlobListOption.prefix(outputGcsPrefix));
int idx = 0;
for (Blob blob : blobs.iterateAll()) {
if (!blob.isDirectory()) {
System.out.printf("Fetched file #%d\n", ++idx);
// Read the results
// Download and store json data in a temp file.
File tempFile = File.createTempFile("file", ".json");
Blob fileInfo = storage.get(BlobId.of(outputGcsBucketName, blob.getName()));
fileInfo.downloadTo(tempFile.toPath());
// Parse json file into Document.
FileReader reader = new FileReader(tempFile);
Document.Builder builder = Document.newBuilder();
JsonFormat.parser().merge(reader, builder);
Document document = builder.build();
// Get all of the document text as one big string.
String text = document.getText();
// Process the output.
if (document.getPagesCount() > 0) {
Document.Page page1 = document.getPages(0);
for (Document.Page.FormField field : page1.getFormFieldsList()) {
String fieldName = getText(field.getFieldName(), text);
String fieldValue = getText(field.getFieldValue(), text);
System.out.println("Extracted form fields pair:");
System.out.printf("\t(%s, %s))", fieldName, fieldValue);
}
}
// Clean up temp file.
tempFile.deleteOnExit();
}
}
}
}
use of com.google.cloud.vision.v1p4beta1.OperationMetadata in project java-document-ai by googleapis.
the class BatchParseTableBeta method batchParseTableGcs.
public static void batchParseTableGcs(String projectId, String location, String outputGcsBucketName, String outputGcsPrefix, String inputGcsUri) throws IOException, InterruptedException, ExecutionException, TimeoutException {
// the "close" method on the client to safely clean up any remaining background resources.
try (DocumentUnderstandingServiceClient client = DocumentUnderstandingServiceClient.create()) {
// Configure the request for processing the PDF
String parent = String.format("projects/%s/locations/%s", projectId, location);
TableBoundHint tableBoundHints = TableBoundHint.newBuilder().setBoundingBox(// Each vertice coordinate must be a number between 0 and 1
BoundingPoly.newBuilder().addNormalizedVertices(NormalizedVertex.newBuilder().setX(0).setX(0).build()).addNormalizedVertices(NormalizedVertex.newBuilder().setX(1).setX(0).build()).addNormalizedVertices(NormalizedVertex.newBuilder().setX(1).setX(1).build()).addNormalizedVertices(NormalizedVertex.newBuilder().setX(0).setX(1).build()).build()).setPageNumber(1).build();
TableExtractionParams params = TableExtractionParams.newBuilder().setEnabled(true).addTableBoundHints(tableBoundHints).build();
GcsSource inputUri = GcsSource.newBuilder().setUri(inputGcsUri).build();
// mime_type can be application/pdf, image/tiff,
// and image/gif, or application/json
InputConfig config = InputConfig.newBuilder().setGcsSource(inputUri).setMimeType("application/pdf").build();
GcsDestination gcsDestination = GcsDestination.newBuilder().setUri(String.format("gs://%s/%s", outputGcsBucketName, outputGcsPrefix)).build();
OutputConfig outputConfig = OutputConfig.newBuilder().setGcsDestination(gcsDestination).setPagesPerShard(1).build();
ProcessDocumentRequest request = ProcessDocumentRequest.newBuilder().setTableExtractionParams(params).setInputConfig(config).setOutputConfig(outputConfig).build();
BatchProcessDocumentsRequest requests = BatchProcessDocumentsRequest.newBuilder().addRequests(request).setParent(parent).build();
// Batch process document using a long-running operation.
OperationFuture<BatchProcessDocumentsResponse, OperationMetadata> future = client.batchProcessDocumentsAsync(requests);
// Wait for operation to complete.
System.out.println("Waiting for operation to complete...");
future.get(360, TimeUnit.SECONDS);
System.out.println("Document processing complete.");
Storage storage = StorageOptions.newBuilder().setProjectId(projectId).build().getService();
Bucket bucket = storage.get(outputGcsBucketName);
// List all of the files in the Storage bucket.
Page<Blob> blobs = bucket.list(Storage.BlobListOption.currentDirectory(), Storage.BlobListOption.prefix(outputGcsPrefix));
int idx = 0;
for (Blob blob : blobs.iterateAll()) {
if (!blob.isDirectory()) {
System.out.printf("Fetched file #%d\n", ++idx);
// Read the results
// Download and store json data in a temp file.
File tempFile = File.createTempFile("file", ".json");
Blob fileInfo = storage.get(BlobId.of(outputGcsBucketName, blob.getName()));
fileInfo.downloadTo(tempFile.toPath());
// Parse json file into Document.
FileReader reader = new FileReader(tempFile);
Document.Builder builder = Document.newBuilder();
JsonFormat.parser().merge(reader, builder);
Document document = builder.build();
// Get all of the document text as one big string.
String text = document.getText();
// Process the output.
if (document.getPagesCount() > 0) {
Document.Page page1 = document.getPages(0);
if (page1.getTablesCount() > 0) {
Document.Page.Table table = page1.getTables(0);
System.out.println("Results from first table processed:");
System.out.println("Header row:");
if (table.getHeaderRowsCount() > 0) {
Document.Page.Table.TableRow headerRow = table.getHeaderRows(0);
for (Document.Page.Table.TableCell tableCell : headerRow.getCellsList()) {
if (!tableCell.getLayout().getTextAnchor().getTextSegmentsList().isEmpty()) {
// Extract shards from the text field
// First shard in document doesn't have startIndex property
List<Document.TextAnchor.TextSegment> textSegments = tableCell.getLayout().getTextAnchor().getTextSegmentsList();
int startIdx = textSegments.size() > 0 ? (int) textSegments.get(0).getStartIndex() : 0;
int endIdx = (int) textSegments.get(0).getEndIndex();
System.out.printf("\t%s", text.substring(startIdx, endIdx));
}
}
}
}
}
// Clean up temp file.
tempFile.deleteOnExit();
}
}
}
}
use of com.google.cloud.vision.v1p4beta1.OperationMetadata in project java-vision by googleapis.
the class Detect method detectDocumentsGcs.
// [END vision_fulltext_detection_gcs]
// [START vision_text_detection_pdf_gcs]
/**
* Performs document text OCR with PDF/TIFF as source files on Google Cloud Storage.
*
* @param gcsSourcePath The path to the remote file on Google Cloud Storage to detect document
* text on.
* @param gcsDestinationPath The path to the remote file on Google Cloud Storage to store the
* results on.
* @throws Exception on errors while closing the client.
*/
public static void detectDocumentsGcs(String gcsSourcePath, String gcsDestinationPath) throws Exception {
// the "close" method on the client to safely clean up any remaining background resources.
try (ImageAnnotatorClient client = ImageAnnotatorClient.create()) {
List<AsyncAnnotateFileRequest> requests = new ArrayList<>();
// Set the GCS source path for the remote file.
GcsSource gcsSource = GcsSource.newBuilder().setUri(gcsSourcePath).build();
// Create the configuration with the specified MIME (Multipurpose Internet Mail Extensions)
// types
InputConfig inputConfig = InputConfig.newBuilder().setMimeType(// Supported MimeTypes: "application/pdf", "image/tiff"
"application/pdf").setGcsSource(gcsSource).build();
// Set the GCS destination path for where to save the results.
GcsDestination gcsDestination = GcsDestination.newBuilder().setUri(gcsDestinationPath).build();
// Create the configuration for the System.output with the batch size.
// The batch size sets how many pages should be grouped into each json System.output file.
OutputConfig outputConfig = OutputConfig.newBuilder().setBatchSize(2).setGcsDestination(gcsDestination).build();
// Select the Feature required by the vision API
Feature feature = Feature.newBuilder().setType(Feature.Type.DOCUMENT_TEXT_DETECTION).build();
// Build the OCR request
AsyncAnnotateFileRequest request = AsyncAnnotateFileRequest.newBuilder().addFeatures(feature).setInputConfig(inputConfig).setOutputConfig(outputConfig).build();
requests.add(request);
// Perform the OCR request
OperationFuture<AsyncBatchAnnotateFilesResponse, OperationMetadata> response = client.asyncBatchAnnotateFilesAsync(requests);
System.out.println("Waiting for the operation to finish.");
// Wait for the request to finish. (The result is not used, since the API saves the result to
// the specified location on GCS.)
List<AsyncAnnotateFileResponse> result = response.get(180, TimeUnit.SECONDS).getResponsesList();
// Once the request has completed and the System.output has been
// written to GCS, we can list all the System.output files.
Storage storage = StorageOptions.getDefaultInstance().getService();
// Get the destination location from the gcsDestinationPath
Pattern pattern = Pattern.compile("gs://([^/]+)/(.+)");
Matcher matcher = pattern.matcher(gcsDestinationPath);
if (matcher.find()) {
String bucketName = matcher.group(1);
String prefix = matcher.group(2);
// Get the list of objects with the given prefix from the GCS bucket
Bucket bucket = storage.get(bucketName);
com.google.api.gax.paging.Page<Blob> pageList = bucket.list(BlobListOption.prefix(prefix));
Blob firstOutputFile = null;
// List objects with the given prefix.
System.out.println("Output files:");
for (Blob blob : pageList.iterateAll()) {
System.out.println(blob.getName());
// the first two pages of the input file.
if (firstOutputFile == null) {
firstOutputFile = blob;
}
}
// Get the contents of the file and convert the JSON contents to an AnnotateFileResponse
// object. If the Blob is small read all its content in one request
// (Note: the file is a .json file)
// Storage guide: https://cloud.google.com/storage/docs/downloading-objects
String jsonContents = new String(firstOutputFile.getContent());
Builder builder = AnnotateFileResponse.newBuilder();
JsonFormat.parser().merge(jsonContents, builder);
// Build the AnnotateFileResponse object
AnnotateFileResponse annotateFileResponse = builder.build();
// Parse through the object to get the actual response for the first page of the input file.
AnnotateImageResponse annotateImageResponse = annotateFileResponse.getResponses(0);
// Here we print the full text from the first page.
// The response contains more information:
// annotation/pages/blocks/paragraphs/words/symbols
// including confidence score and bounding boxes
System.out.format("%nText: %s%n", annotateImageResponse.getFullTextAnnotation().getText());
} else {
System.out.println("No MATCH");
}
}
}
use of com.google.cloud.vision.v1p4beta1.OperationMetadata in project java-vision by googleapis.
the class AsyncBatchAnnotateImagesGcs method asyncBatchAnnotateImagesGcs.
// Performs asynchronous batch annotation of images on Google Cloud Storage
public static void asyncBatchAnnotateImagesGcs(String gcsSourcePath, String gcsDestinationPath) throws Exception {
// String gcsDestinationPath = "gs://YOUR_BUCKET_ID/path_to_store_annotation";
try (ImageAnnotatorClient client = ImageAnnotatorClient.create()) {
List<AnnotateImageRequest> requests = new ArrayList<>();
ImageSource imgSource = ImageSource.newBuilder().setImageUri(gcsSourcePath).build();
Image image = Image.newBuilder().setSource(imgSource).build();
// Set the GCS destination path for where to save the results.
GcsDestination gcsDestination = GcsDestination.newBuilder().setUri(gcsDestinationPath).build();
// Create the configuration for the output with the batch size.
// The batch size sets how many pages should be grouped into each json output file.
OutputConfig outputConfig = OutputConfig.newBuilder().setGcsDestination(gcsDestination).setBatchSize(2).build();
// Select the Features required by the vision API
Feature features = Feature.newBuilder().setType(Type.LABEL_DETECTION).setType(Type.TEXT_DETECTION).setType(Type.IMAGE_PROPERTIES).build();
// Build the request
AnnotateImageRequest annotateImageRequest = AnnotateImageRequest.newBuilder().setImage(image).addFeatures(features).build();
requests.add(annotateImageRequest);
AsyncBatchAnnotateImagesRequest request = AsyncBatchAnnotateImagesRequest.newBuilder().addAllRequests(requests).setOutputConfig(outputConfig).build();
OperationFuture<AsyncBatchAnnotateImagesResponse, OperationMetadata> response = client.asyncBatchAnnotateImagesAsync(request);
System.out.println("Waiting for the operation to finish.");
// we're not processing the response, since we'll be reading the output from GCS.
response.get(180, TimeUnit.SECONDS);
// Once the request has completed and the output has been
// written to GCS, we can list all the output files.
Storage storage = StorageOptions.getDefaultInstance().getService();
// Get the destination location from the gcsDestinationPath
Pattern pattern = Pattern.compile("gs://([^/]+)/(.+)");
Matcher matcher = pattern.matcher(gcsDestinationPath);
if (matcher.find()) {
String bucketName = matcher.group(1);
String prefix = matcher.group(2);
// Get the list of objects with the given prefix from the GCS bucket
Bucket bucket = storage.get(bucketName);
Page<Blob> pageList = bucket.list(BlobListOption.prefix(prefix));
Blob firstOutputFile = null;
// List objects with the given prefix.
System.out.println("Output files:");
for (Blob blob : pageList.iterateAll()) {
System.out.println(blob.getName());
// the first two image requests
if (firstOutputFile == null) {
firstOutputFile = blob;
}
}
// Get the contents of the file and convert the JSON contents to an
// BatchAnnotateImagesResponse
// object. If the Blob is small read all its content in one request
// (Note: the file is a .json file)
// Storage guide: https://cloud.google.com/storage/docs/downloading-objects
String jsonContents = new String(firstOutputFile.getContent());
Builder builder = BatchAnnotateImagesResponse.newBuilder();
JsonFormat.parser().merge(jsonContents, builder);
// Build the AnnotateFileResponse object
BatchAnnotateImagesResponse batchAnnotateImagesResponse = builder.build();
// Here we print the response for the first image
// The response contains more information:
// annotation/pages/blocks/paragraphs/words/symbols/colors
// including confidence score and bounding boxes
System.out.format("\nResponse: %s\n", batchAnnotateImagesResponse.getResponses(0));
} else {
System.out.println("No MATCH");
}
} catch (Exception e) {
System.out.println("Error during asyncBatchAnnotateImagesGcs: \n" + e.toString());
}
}
use of com.google.cloud.vision.v1p4beta1.OperationMetadata in project java-automl by googleapis.
the class LanguageTextClassificationCreateDataset method createDataset.
// Create a dataset
static void createDataset(String projectId, String displayName) throws IOException, ExecutionException, InterruptedException {
// the "close" method on the client to safely clean up any remaining background resources.
try (AutoMlClient client = AutoMlClient.create()) {
// A resource that represents Google Cloud Platform location.
LocationName projectLocation = LocationName.of(projectId, "us-central1");
// Specify the classification type
// Types:
// MultiLabel: Multiple labels are allowed for one example.
// MultiClass: At most one label is allowed per example.
ClassificationType classificationType = ClassificationType.MULTILABEL;
// Specify the text classification type for the dataset.
TextClassificationDatasetMetadata metadata = TextClassificationDatasetMetadata.newBuilder().setClassificationType(classificationType).build();
Dataset dataset = Dataset.newBuilder().setDisplayName(displayName).setTextClassificationDatasetMetadata(metadata).build();
OperationFuture<Dataset, OperationMetadata> future = client.createDatasetAsync(projectLocation, dataset);
Dataset createdDataset = future.get();
// Display the dataset information.
System.out.format("Dataset name: %s\n", createdDataset.getName());
// To get the dataset id, you have to parse it out of the `name` field. As dataset Ids are
// required for other methods.
// Name Form: `projects/{project_id}/locations/{location_id}/datasets/{dataset_id}`
String[] names = createdDataset.getName().split("/");
String datasetId = names[names.length - 1];
System.out.format("Dataset id: %s\n", datasetId);
}
}
Aggregations