use of com.google.cloud.datalabeling.v1beta1.GcsDestination in project java-aiplatform by googleapis.
the class CreateBatchPredictionJobVideoObjectTrackingSample method batchPredictionJobVideoObjectTracking.
static void batchPredictionJobVideoObjectTracking(String batchPredictionDisplayName, String modelId, String gcsSourceUri, String gcsDestinationOutputUriPrefix, String project) throws IOException {
JobServiceSettings jobServiceSettings = JobServiceSettings.newBuilder().setEndpoint("us-central1-aiplatform.googleapis.com:443").build();
// the "close" method on the client to safely clean up any remaining background resources.
try (JobServiceClient jobServiceClient = JobServiceClient.create(jobServiceSettings)) {
String location = "us-central1";
LocationName locationName = LocationName.of(project, location);
ModelName modelName = ModelName.of(project, location, modelId);
VideoObjectTrackingPredictionParams modelParamsObj = VideoObjectTrackingPredictionParams.newBuilder().setConfidenceThreshold(((float) 0.5)).build();
Value modelParameters = ValueConverter.toValue(modelParamsObj);
GcsSource.Builder gcsSource = GcsSource.newBuilder();
gcsSource.addUris(gcsSourceUri);
InputConfig inputConfig = InputConfig.newBuilder().setInstancesFormat("jsonl").setGcsSource(gcsSource).build();
GcsDestination gcsDestination = GcsDestination.newBuilder().setOutputUriPrefix(gcsDestinationOutputUriPrefix).build();
OutputConfig outputConfig = OutputConfig.newBuilder().setPredictionsFormat("jsonl").setGcsDestination(gcsDestination).build();
BatchPredictionJob batchPredictionJob = BatchPredictionJob.newBuilder().setDisplayName(batchPredictionDisplayName).setModel(modelName.toString()).setModelParameters(modelParameters).setInputConfig(inputConfig).setOutputConfig(outputConfig).build();
BatchPredictionJob batchPredictionJobResponse = jobServiceClient.createBatchPredictionJob(locationName, batchPredictionJob);
System.out.println("Create Batch Prediction Job Video Object Tracking Response");
System.out.format("\tName: %s\n", batchPredictionJobResponse.getName());
System.out.format("\tDisplay Name: %s\n", batchPredictionJobResponse.getDisplayName());
System.out.format("\tModel %s\n", batchPredictionJobResponse.getModel());
System.out.format("\tModel Parameters: %s\n", batchPredictionJobResponse.getModelParameters());
System.out.format("\tState: %s\n", batchPredictionJobResponse.getState());
System.out.format("\tCreate Time: %s\n", batchPredictionJobResponse.getCreateTime());
System.out.format("\tStart Time: %s\n", batchPredictionJobResponse.getStartTime());
System.out.format("\tEnd Time: %s\n", batchPredictionJobResponse.getEndTime());
System.out.format("\tUpdate Time: %s\n", batchPredictionJobResponse.getUpdateTime());
System.out.format("\tLabels: %s\n", batchPredictionJobResponse.getLabelsMap());
InputConfig inputConfigResponse = batchPredictionJobResponse.getInputConfig();
System.out.println("\tInput Config");
System.out.format("\t\tInstances Format: %s\n", inputConfigResponse.getInstancesFormat());
GcsSource gcsSourceResponse = inputConfigResponse.getGcsSource();
System.out.println("\t\tGcs Source");
System.out.format("\t\t\tUris %s\n", gcsSourceResponse.getUrisList());
BigQuerySource bigQuerySource = inputConfigResponse.getBigquerySource();
System.out.println("\t\tBigquery Source");
System.out.format("\t\t\tInput_uri: %s\n", bigQuerySource.getInputUri());
OutputConfig outputConfigResponse = batchPredictionJobResponse.getOutputConfig();
System.out.println("\tOutput Config");
System.out.format("\t\tPredictions Format: %s\n", outputConfigResponse.getPredictionsFormat());
GcsDestination gcsDestinationResponse = outputConfigResponse.getGcsDestination();
System.out.println("\t\tGcs Destination");
System.out.format("\t\t\tOutput Uri Prefix: %s\n", gcsDestinationResponse.getOutputUriPrefix());
BigQueryDestination bigQueryDestination = outputConfigResponse.getBigqueryDestination();
System.out.println("\t\tBig Query Destination");
System.out.format("\t\t\tOutput Uri: %s\n", bigQueryDestination.getOutputUri());
BatchDedicatedResources batchDedicatedResources = batchPredictionJobResponse.getDedicatedResources();
System.out.println("\tBatch Dedicated Resources");
System.out.format("\t\tStarting Replica Count: %s\n", batchDedicatedResources.getStartingReplicaCount());
System.out.format("\t\tMax Replica Count: %s\n", batchDedicatedResources.getMaxReplicaCount());
MachineSpec machineSpec = batchDedicatedResources.getMachineSpec();
System.out.println("\t\tMachine Spec");
System.out.format("\t\t\tMachine Type: %s\n", machineSpec.getMachineType());
System.out.format("\t\t\tAccelerator Type: %s\n", machineSpec.getAcceleratorType());
System.out.format("\t\t\tAccelerator Count: %s\n", machineSpec.getAcceleratorCount());
ManualBatchTuningParameters manualBatchTuningParameters = batchPredictionJobResponse.getManualBatchTuningParameters();
System.out.println("\tManual Batch Tuning Parameters");
System.out.format("\t\tBatch Size: %s\n", manualBatchTuningParameters.getBatchSize());
OutputInfo outputInfo = batchPredictionJobResponse.getOutputInfo();
System.out.println("\tOutput Info");
System.out.format("\t\tGcs Output Directory: %s\n", outputInfo.getGcsOutputDirectory());
System.out.format("\t\tBigquery Output Dataset: %s\n", outputInfo.getBigqueryOutputDataset());
Status status = batchPredictionJobResponse.getError();
System.out.println("\tError");
System.out.format("\t\tCode: %s\n", status.getCode());
System.out.format("\t\tMessage: %s\n", status.getMessage());
List<Any> details = status.getDetailsList();
for (Status partialFailure : batchPredictionJobResponse.getPartialFailuresList()) {
System.out.println("\tPartial Failure");
System.out.format("\t\tCode: %s\n", partialFailure.getCode());
System.out.format("\t\tMessage: %s\n", partialFailure.getMessage());
List<Any> partialFailureDetailsList = partialFailure.getDetailsList();
}
ResourcesConsumed resourcesConsumed = batchPredictionJobResponse.getResourcesConsumed();
System.out.println("\tResources Consumed");
System.out.format("\t\tReplica Hours: %s\n", resourcesConsumed.getReplicaHours());
CompletionStats completionStats = batchPredictionJobResponse.getCompletionStats();
System.out.println("\tCompletion Stats");
System.out.format("\t\tSuccessful Count: %s\n", completionStats.getSuccessfulCount());
System.out.format("\t\tFailed Count: %s\n", completionStats.getFailedCount());
System.out.format("\t\tIncomplete Count: %s\n", completionStats.getIncompleteCount());
}
}
use of com.google.cloud.datalabeling.v1beta1.GcsDestination in project java-document-ai by googleapis.
the class BatchParseTableBeta method batchParseTableGcs.
public static void batchParseTableGcs(String projectId, String location, String outputGcsBucketName, String outputGcsPrefix, String inputGcsUri) throws IOException, InterruptedException, ExecutionException, TimeoutException {
// the "close" method on the client to safely clean up any remaining background resources.
try (DocumentUnderstandingServiceClient client = DocumentUnderstandingServiceClient.create()) {
// Configure the request for processing the PDF
String parent = String.format("projects/%s/locations/%s", projectId, location);
TableBoundHint tableBoundHints = TableBoundHint.newBuilder().setBoundingBox(// Each vertice coordinate must be a number between 0 and 1
BoundingPoly.newBuilder().addNormalizedVertices(NormalizedVertex.newBuilder().setX(0).setX(0).build()).addNormalizedVertices(NormalizedVertex.newBuilder().setX(1).setX(0).build()).addNormalizedVertices(NormalizedVertex.newBuilder().setX(1).setX(1).build()).addNormalizedVertices(NormalizedVertex.newBuilder().setX(0).setX(1).build()).build()).setPageNumber(1).build();
TableExtractionParams params = TableExtractionParams.newBuilder().setEnabled(true).addTableBoundHints(tableBoundHints).build();
GcsSource inputUri = GcsSource.newBuilder().setUri(inputGcsUri).build();
// mime_type can be application/pdf, image/tiff,
// and image/gif, or application/json
InputConfig config = InputConfig.newBuilder().setGcsSource(inputUri).setMimeType("application/pdf").build();
GcsDestination gcsDestination = GcsDestination.newBuilder().setUri(String.format("gs://%s/%s", outputGcsBucketName, outputGcsPrefix)).build();
OutputConfig outputConfig = OutputConfig.newBuilder().setGcsDestination(gcsDestination).setPagesPerShard(1).build();
ProcessDocumentRequest request = ProcessDocumentRequest.newBuilder().setTableExtractionParams(params).setInputConfig(config).setOutputConfig(outputConfig).build();
BatchProcessDocumentsRequest requests = BatchProcessDocumentsRequest.newBuilder().addRequests(request).setParent(parent).build();
// Batch process document using a long-running operation.
OperationFuture<BatchProcessDocumentsResponse, OperationMetadata> future = client.batchProcessDocumentsAsync(requests);
// Wait for operation to complete.
System.out.println("Waiting for operation to complete...");
future.get(360, TimeUnit.SECONDS);
System.out.println("Document processing complete.");
Storage storage = StorageOptions.newBuilder().setProjectId(projectId).build().getService();
Bucket bucket = storage.get(outputGcsBucketName);
// List all of the files in the Storage bucket.
Page<Blob> blobs = bucket.list(Storage.BlobListOption.currentDirectory(), Storage.BlobListOption.prefix(outputGcsPrefix));
int idx = 0;
for (Blob blob : blobs.iterateAll()) {
if (!blob.isDirectory()) {
System.out.printf("Fetched file #%d\n", ++idx);
// Read the results
// Download and store json data in a temp file.
File tempFile = File.createTempFile("file", ".json");
Blob fileInfo = storage.get(BlobId.of(outputGcsBucketName, blob.getName()));
fileInfo.downloadTo(tempFile.toPath());
// Parse json file into Document.
FileReader reader = new FileReader(tempFile);
Document.Builder builder = Document.newBuilder();
JsonFormat.parser().merge(reader, builder);
Document document = builder.build();
// Get all of the document text as one big string.
String text = document.getText();
// Process the output.
if (document.getPagesCount() > 0) {
Document.Page page1 = document.getPages(0);
if (page1.getTablesCount() > 0) {
Document.Page.Table table = page1.getTables(0);
System.out.println("Results from first table processed:");
System.out.println("Header row:");
if (table.getHeaderRowsCount() > 0) {
Document.Page.Table.TableRow headerRow = table.getHeaderRows(0);
for (Document.Page.Table.TableCell tableCell : headerRow.getCellsList()) {
if (!tableCell.getLayout().getTextAnchor().getTextSegmentsList().isEmpty()) {
// Extract shards from the text field
// First shard in document doesn't have startIndex property
List<Document.TextAnchor.TextSegment> textSegments = tableCell.getLayout().getTextAnchor().getTextSegmentsList();
int startIdx = textSegments.size() > 0 ? (int) textSegments.get(0).getStartIndex() : 0;
int endIdx = (int) textSegments.get(0).getEndIndex();
System.out.printf("\t%s", text.substring(startIdx, endIdx));
}
}
}
}
}
// Clean up temp file.
tempFile.deleteOnExit();
}
}
}
}
use of com.google.cloud.datalabeling.v1beta1.GcsDestination in project java-vision by googleapis.
the class Detect method detectDocumentsGcs.
// [END vision_fulltext_detection_gcs]
// [START vision_text_detection_pdf_gcs]
/**
* Performs document text OCR with PDF/TIFF as source files on Google Cloud Storage.
*
* @param gcsSourcePath The path to the remote file on Google Cloud Storage to detect document
* text on.
* @param gcsDestinationPath The path to the remote file on Google Cloud Storage to store the
* results on.
* @throws Exception on errors while closing the client.
*/
public static void detectDocumentsGcs(String gcsSourcePath, String gcsDestinationPath) throws Exception {
// the "close" method on the client to safely clean up any remaining background resources.
try (ImageAnnotatorClient client = ImageAnnotatorClient.create()) {
List<AsyncAnnotateFileRequest> requests = new ArrayList<>();
// Set the GCS source path for the remote file.
GcsSource gcsSource = GcsSource.newBuilder().setUri(gcsSourcePath).build();
// Create the configuration with the specified MIME (Multipurpose Internet Mail Extensions)
// types
InputConfig inputConfig = InputConfig.newBuilder().setMimeType(// Supported MimeTypes: "application/pdf", "image/tiff"
"application/pdf").setGcsSource(gcsSource).build();
// Set the GCS destination path for where to save the results.
GcsDestination gcsDestination = GcsDestination.newBuilder().setUri(gcsDestinationPath).build();
// Create the configuration for the System.output with the batch size.
// The batch size sets how many pages should be grouped into each json System.output file.
OutputConfig outputConfig = OutputConfig.newBuilder().setBatchSize(2).setGcsDestination(gcsDestination).build();
// Select the Feature required by the vision API
Feature feature = Feature.newBuilder().setType(Feature.Type.DOCUMENT_TEXT_DETECTION).build();
// Build the OCR request
AsyncAnnotateFileRequest request = AsyncAnnotateFileRequest.newBuilder().addFeatures(feature).setInputConfig(inputConfig).setOutputConfig(outputConfig).build();
requests.add(request);
// Perform the OCR request
OperationFuture<AsyncBatchAnnotateFilesResponse, OperationMetadata> response = client.asyncBatchAnnotateFilesAsync(requests);
System.out.println("Waiting for the operation to finish.");
// Wait for the request to finish. (The result is not used, since the API saves the result to
// the specified location on GCS.)
List<AsyncAnnotateFileResponse> result = response.get(180, TimeUnit.SECONDS).getResponsesList();
// Once the request has completed and the System.output has been
// written to GCS, we can list all the System.output files.
Storage storage = StorageOptions.getDefaultInstance().getService();
// Get the destination location from the gcsDestinationPath
Pattern pattern = Pattern.compile("gs://([^/]+)/(.+)");
Matcher matcher = pattern.matcher(gcsDestinationPath);
if (matcher.find()) {
String bucketName = matcher.group(1);
String prefix = matcher.group(2);
// Get the list of objects with the given prefix from the GCS bucket
Bucket bucket = storage.get(bucketName);
com.google.api.gax.paging.Page<Blob> pageList = bucket.list(BlobListOption.prefix(prefix));
Blob firstOutputFile = null;
// List objects with the given prefix.
System.out.println("Output files:");
for (Blob blob : pageList.iterateAll()) {
System.out.println(blob.getName());
// the first two pages of the input file.
if (firstOutputFile == null) {
firstOutputFile = blob;
}
}
// Get the contents of the file and convert the JSON contents to an AnnotateFileResponse
// object. If the Blob is small read all its content in one request
// (Note: the file is a .json file)
// Storage guide: https://cloud.google.com/storage/docs/downloading-objects
String jsonContents = new String(firstOutputFile.getContent());
Builder builder = AnnotateFileResponse.newBuilder();
JsonFormat.parser().merge(jsonContents, builder);
// Build the AnnotateFileResponse object
AnnotateFileResponse annotateFileResponse = builder.build();
// Parse through the object to get the actual response for the first page of the input file.
AnnotateImageResponse annotateImageResponse = annotateFileResponse.getResponses(0);
// Here we print the full text from the first page.
// The response contains more information:
// annotation/pages/blocks/paragraphs/words/symbols
// including confidence score and bounding boxes
System.out.format("%nText: %s%n", annotateImageResponse.getFullTextAnnotation().getText());
} else {
System.out.println("No MATCH");
}
}
}
use of com.google.cloud.datalabeling.v1beta1.GcsDestination in project java-vision by googleapis.
the class AsyncBatchAnnotateImagesGcs method asyncBatchAnnotateImagesGcs.
// Performs asynchronous batch annotation of images on Google Cloud Storage
public static void asyncBatchAnnotateImagesGcs(String gcsSourcePath, String gcsDestinationPath) throws Exception {
// String gcsDestinationPath = "gs://YOUR_BUCKET_ID/path_to_store_annotation";
try (ImageAnnotatorClient client = ImageAnnotatorClient.create()) {
List<AnnotateImageRequest> requests = new ArrayList<>();
ImageSource imgSource = ImageSource.newBuilder().setImageUri(gcsSourcePath).build();
Image image = Image.newBuilder().setSource(imgSource).build();
// Set the GCS destination path for where to save the results.
GcsDestination gcsDestination = GcsDestination.newBuilder().setUri(gcsDestinationPath).build();
// Create the configuration for the output with the batch size.
// The batch size sets how many pages should be grouped into each json output file.
OutputConfig outputConfig = OutputConfig.newBuilder().setGcsDestination(gcsDestination).setBatchSize(2).build();
// Select the Features required by the vision API
Feature features = Feature.newBuilder().setType(Type.LABEL_DETECTION).setType(Type.TEXT_DETECTION).setType(Type.IMAGE_PROPERTIES).build();
// Build the request
AnnotateImageRequest annotateImageRequest = AnnotateImageRequest.newBuilder().setImage(image).addFeatures(features).build();
requests.add(annotateImageRequest);
AsyncBatchAnnotateImagesRequest request = AsyncBatchAnnotateImagesRequest.newBuilder().addAllRequests(requests).setOutputConfig(outputConfig).build();
OperationFuture<AsyncBatchAnnotateImagesResponse, OperationMetadata> response = client.asyncBatchAnnotateImagesAsync(request);
System.out.println("Waiting for the operation to finish.");
// we're not processing the response, since we'll be reading the output from GCS.
response.get(180, TimeUnit.SECONDS);
// Once the request has completed and the output has been
// written to GCS, we can list all the output files.
Storage storage = StorageOptions.getDefaultInstance().getService();
// Get the destination location from the gcsDestinationPath
Pattern pattern = Pattern.compile("gs://([^/]+)/(.+)");
Matcher matcher = pattern.matcher(gcsDestinationPath);
if (matcher.find()) {
String bucketName = matcher.group(1);
String prefix = matcher.group(2);
// Get the list of objects with the given prefix from the GCS bucket
Bucket bucket = storage.get(bucketName);
Page<Blob> pageList = bucket.list(BlobListOption.prefix(prefix));
Blob firstOutputFile = null;
// List objects with the given prefix.
System.out.println("Output files:");
for (Blob blob : pageList.iterateAll()) {
System.out.println(blob.getName());
// the first two image requests
if (firstOutputFile == null) {
firstOutputFile = blob;
}
}
// Get the contents of the file and convert the JSON contents to an
// BatchAnnotateImagesResponse
// object. If the Blob is small read all its content in one request
// (Note: the file is a .json file)
// Storage guide: https://cloud.google.com/storage/docs/downloading-objects
String jsonContents = new String(firstOutputFile.getContent());
Builder builder = BatchAnnotateImagesResponse.newBuilder();
JsonFormat.parser().merge(jsonContents, builder);
// Build the AnnotateFileResponse object
BatchAnnotateImagesResponse batchAnnotateImagesResponse = builder.build();
// Here we print the response for the first image
// The response contains more information:
// annotation/pages/blocks/paragraphs/words/symbols/colors
// including confidence score and bounding boxes
System.out.format("\nResponse: %s\n", batchAnnotateImagesResponse.getResponses(0));
} else {
System.out.println("No MATCH");
}
} catch (Exception e) {
System.out.println("Error during asyncBatchAnnotateImagesGcs: \n" + e.toString());
}
}
use of com.google.cloud.datalabeling.v1beta1.GcsDestination in project java-vision by googleapis.
the class AsyncBatchAnnotateImages method asyncBatchAnnotateImages.
public static void asyncBatchAnnotateImages(String inputImageUri, String outputUri) throws IOException, ExecutionException, InterruptedException {
// the "close" method on the client to safely clean up any remaining background resources.
try (ImageAnnotatorClient imageAnnotatorClient = ImageAnnotatorClient.create()) {
// You can send multiple images to be annotated, this sample demonstrates how to do this with
// one image. If you want to use multiple images, you have to create a `AnnotateImageRequest`
// object for each image that you want annotated.
// First specify where the vision api can find the image
ImageSource source = ImageSource.newBuilder().setImageUri(inputImageUri).build();
Image image = Image.newBuilder().setSource(source).build();
// Set the type of annotation you want to perform on the image
// https://cloud.google.com/vision/docs/reference/rpc/google.cloud.vision.v1#google.cloud.vision.v1.Feature.Type
Feature feature = Feature.newBuilder().setType(Feature.Type.LABEL_DETECTION).build();
// Build the request object for that one image. Note: for additional images you have to create
// additional `AnnotateImageRequest` objects and store them in a list to be used below.
AnnotateImageRequest imageRequest = AnnotateImageRequest.newBuilder().setImage(image).addFeatures(feature).build();
// Set where to store the results for the images that will be annotated.
GcsDestination gcsDestination = GcsDestination.newBuilder().setUri(outputUri).build();
OutputConfig outputConfig = OutputConfig.newBuilder().setGcsDestination(gcsDestination).setBatchSize(// The max number of responses to output in each JSON file
2).build();
// Add each `AnnotateImageRequest` object to the batch request and add the output config.
AsyncBatchAnnotateImagesRequest request = AsyncBatchAnnotateImagesRequest.newBuilder().addRequests(imageRequest).setOutputConfig(outputConfig).build();
// Make the asynchronous batch request.
AsyncBatchAnnotateImagesResponse response = imageAnnotatorClient.asyncBatchAnnotateImagesAsync(request).get();
// The output is written to GCS with the provided output_uri as prefix
String gcsOutputUri = response.getOutputConfig().getGcsDestination().getUri();
System.out.format("Output written to GCS with prefix: %s%n", gcsOutputUri);
}
}
Aggregations