use of com.google.cloud.dialogflow.cx.v3beta1.Page in project java-vision by googleapis.
the class DetectBeta method detectHandwrittenOcr.
// [END vision_localize_objects_gcs_beta]
// [START vision_handwritten_ocr_beta]
/**
* Performs handwritten text detection on a local image file.
*
* @param filePath The path to the local file to detect handwritten text on.
* @param out A {@link PrintStream} to write the results to.
* @throws Exception on errors while closing the client.
* @throws IOException on Input/Output errors.
*/
public static void detectHandwrittenOcr(String filePath, PrintStream out) throws Exception {
List<AnnotateImageRequest> requests = new ArrayList<>();
ByteString imgBytes = ByteString.readFrom(new FileInputStream(filePath));
Image img = Image.newBuilder().setContent(imgBytes).build();
Feature feat = Feature.newBuilder().setType(Type.DOCUMENT_TEXT_DETECTION).build();
// Set the Language Hint codes for handwritten OCR
ImageContext imageContext = ImageContext.newBuilder().addLanguageHints("en-t-i0-handwrit").build();
AnnotateImageRequest request = AnnotateImageRequest.newBuilder().addFeatures(feat).setImage(img).setImageContext(imageContext).build();
requests.add(request);
try (ImageAnnotatorClient client = ImageAnnotatorClient.create()) {
BatchAnnotateImagesResponse response = client.batchAnnotateImages(requests);
List<AnnotateImageResponse> responses = response.getResponsesList();
client.close();
for (AnnotateImageResponse res : responses) {
if (res.hasError()) {
out.printf("Error: %s\n", res.getError().getMessage());
return;
}
// For full list of available annotations, see http://g.co/cloud/vision/docs
TextAnnotation annotation = res.getFullTextAnnotation();
for (Page page : annotation.getPagesList()) {
String pageText = "";
for (Block block : page.getBlocksList()) {
String blockText = "";
for (Paragraph para : block.getParagraphsList()) {
String paraText = "";
for (Word word : para.getWordsList()) {
String wordText = "";
for (Symbol symbol : word.getSymbolsList()) {
wordText = wordText + symbol.getText();
out.format("Symbol text: %s (confidence: %f)\n", symbol.getText(), symbol.getConfidence());
}
out.format("Word text: %s (confidence: %f)\n\n", wordText, word.getConfidence());
paraText = String.format("%s %s", paraText, wordText);
}
// Output Example using Paragraph:
out.println("\nParagraph: \n" + paraText);
out.format("Paragraph Confidence: %f\n", para.getConfidence());
blockText = blockText + paraText;
}
pageText = pageText + blockText;
}
}
out.println("\nComplete annotation:");
out.println(annotation.getText());
}
}
}
use of com.google.cloud.dialogflow.cx.v3beta1.Page in project java-vision by googleapis.
the class BatchAnnotateFilesGcs method batchAnnotateFilesGcs.
public static void batchAnnotateFilesGcs(String gcsUri) throws IOException {
// the "close" method on the client to safely clean up any remaining background resources.
try (ImageAnnotatorClient imageAnnotatorClient = ImageAnnotatorClient.create()) {
// You can send multiple files to be annotated, this sample demonstrates how to do this with
// one file. If you want to use multiple files, you have to create a `AnnotateImageRequest`
// object for each file that you want annotated.
// First specify where the vision api can find the image
GcsSource gcsSource = GcsSource.newBuilder().setUri(gcsUri).build();
// Specify the input config with the file's uri and its type.
// Supported mime_type: application/pdf, image/tiff, image/gif
// https://cloud.google.com/vision/docs/reference/rpc/google.cloud.vision.v1#inputconfig
InputConfig inputConfig = InputConfig.newBuilder().setMimeType("application/pdf").setGcsSource(gcsSource).build();
// Set the type of annotation you want to perform on the file
// https://cloud.google.com/vision/docs/reference/rpc/google.cloud.vision.v1#google.cloud.vision.v1.Feature.Type
Feature feature = Feature.newBuilder().setType(Feature.Type.DOCUMENT_TEXT_DETECTION).build();
// Build the request object for that one file. Note: for additional file you have to create
// additional `AnnotateFileRequest` objects and store them in a list to be used below.
// Since we are sending a file of type `application/pdf`, we can use the `pages` field to
// specify which pages to process. The service can process up to 5 pages per document file.
// https://cloud.google.com/vision/docs/reference/rpc/google.cloud.vision.v1#google.cloud.vision.v1.AnnotateFileRequest
AnnotateFileRequest fileRequest = AnnotateFileRequest.newBuilder().setInputConfig(inputConfig).addFeatures(feature).addPages(// Process the first page
1).addPages(// Process the second page
2).addPages(// Process the last page
-1).build();
// Add each `AnnotateFileRequest` object to the batch request.
BatchAnnotateFilesRequest request = BatchAnnotateFilesRequest.newBuilder().addRequests(fileRequest).build();
// Make the synchronous batch request.
BatchAnnotateFilesResponse response = imageAnnotatorClient.batchAnnotateFiles(request);
// sample.
for (AnnotateImageResponse imageResponse : response.getResponsesList().get(0).getResponsesList()) {
System.out.format("Full text: %s%n", imageResponse.getFullTextAnnotation().getText());
for (Page page : imageResponse.getFullTextAnnotation().getPagesList()) {
for (Block block : page.getBlocksList()) {
System.out.format("%nBlock confidence: %s%n", block.getConfidence());
for (Paragraph par : block.getParagraphsList()) {
System.out.format("\tParagraph confidence: %s%n", par.getConfidence());
for (Word word : par.getWordsList()) {
System.out.format("\t\tWord confidence: %s%n", word.getConfidence());
for (Symbol symbol : word.getSymbolsList()) {
System.out.format("\t\t\tSymbol: %s, (confidence: %s)%n", symbol.getText(), symbol.getConfidence());
}
}
}
}
}
}
}
}
use of com.google.cloud.dialogflow.cx.v3beta1.Page in project java-vision by googleapis.
the class Detect method detectDocumentTextGcs.
// [END vision_fulltext_detection]
/**
* Performs document text detection on a remote image on Google Cloud Storage.
*
* @param gcsPath The path to the remote file on Google Cloud Storage to detect document text on.
* @throws Exception on errors while closing the client.
* @throws IOException on Input/Output errors.
*/
// [START vision_fulltext_detection_gcs]
public static void detectDocumentTextGcs(String gcsPath) throws IOException {
List<AnnotateImageRequest> requests = new ArrayList<>();
ImageSource imgSource = ImageSource.newBuilder().setGcsImageUri(gcsPath).build();
Image img = Image.newBuilder().setSource(imgSource).build();
Feature feat = Feature.newBuilder().setType(Type.DOCUMENT_TEXT_DETECTION).build();
AnnotateImageRequest request = AnnotateImageRequest.newBuilder().addFeatures(feat).setImage(img).build();
requests.add(request);
// the "close" method on the client to safely clean up any remaining background resources.
try (ImageAnnotatorClient client = ImageAnnotatorClient.create()) {
BatchAnnotateImagesResponse response = client.batchAnnotateImages(requests);
List<AnnotateImageResponse> responses = response.getResponsesList();
client.close();
for (AnnotateImageResponse res : responses) {
if (res.hasError()) {
System.out.format("Error: %s%n", res.getError().getMessage());
return;
}
// For full list of available annotations, see http://g.co/cloud/vision/docs
TextAnnotation annotation = res.getFullTextAnnotation();
for (Page page : annotation.getPagesList()) {
String pageText = "";
for (Block block : page.getBlocksList()) {
String blockText = "";
for (Paragraph para : block.getParagraphsList()) {
String paraText = "";
for (Word word : para.getWordsList()) {
String wordText = "";
for (Symbol symbol : word.getSymbolsList()) {
wordText = wordText + symbol.getText();
System.out.format("Symbol text: %s (confidence: %f)%n", symbol.getText(), symbol.getConfidence());
}
System.out.format("Word text: %s (confidence: %f)%n%n", wordText, word.getConfidence());
paraText = String.format("%s %s", paraText, wordText);
}
// Output Example using Paragraph:
System.out.println("%nParagraph: %n" + paraText);
System.out.format("Paragraph Confidence: %f%n", para.getConfidence());
blockText = blockText + paraText;
}
pageText = pageText + blockText;
}
}
System.out.println("%nComplete annotation:");
System.out.println(annotation.getText());
}
}
}
use of com.google.cloud.dialogflow.cx.v3beta1.Page in project aem-core-wcm-components by Adobe-Marketing-Cloud.
the class PageTests method setupBeforeEach.
public void setupBeforeEach(CQClient adminClient, String rootPage, String pageRT, String segmentPath) throws ClientException {
// create the test page
testPage = Commons.createPage(adminClient, Commons.template, rootPage, "testPage", pageTitle, pageRT, "Test Page", 200);
setupResources(segmentPath, adminClient, rootPage);
page = new Page();
}
use of com.google.cloud.dialogflow.cx.v3beta1.Page in project alfresco-repository by Alfresco.
the class TransformationOptionsConverter method getTransformationOptions.
/**
* @deprecated as we do not plan to use TransformationOptions moving forwards as local transformations will also
* use the same options as the Transform Service.
*/
@Deprecated
TransformationOptions getTransformationOptions(String renditionName, Map<String, String> options) {
TransformationOptions transformationOptions = null;
Set<String> optionNames = options.keySet();
// The "pdf" rendition is special as it was incorrectly set up as an SWFTransformationOptions in 6.0
// It should have been simply a TransformationOptions.
boolean isPdfRendition = "pdf".equals(renditionName);
Set<String> subclassOptionNames = new HashSet<>(optionNames);
subclassOptionNames.removeAll(LIMIT_OPTIONS);
subclassOptionNames.remove(INCLUDE_CONTENTS);
boolean hasOptions = !subclassOptionNames.isEmpty();
if (isPdfRendition || hasOptions) {
// The "pdf" rendition used the wrong TransformationOptions subclass.
if (isPdfRendition || FLASH_OPTIONS.containsAll(subclassOptionNames)) {
SWFTransformationOptions opts = new SWFTransformationOptions();
transformationOptions = opts;
opts.setFlashVersion(isPdfRendition ? "9" : options.get(FLASH_VERSION));
} else // that use ImageTransformOptions to specify width, height etc.
if (IMAGE_OPTIONS.containsAll(subclassOptionNames) || PDF_OPTIONS.containsAll(subclassOptionNames)) {
ImageTransformationOptions opts = new ImageTransformationOptions();
transformationOptions = opts;
if (containsAny(subclassOptionNames, RESIZE_OPTIONS)) {
ImageResizeOptions imageResizeOptions = new ImageResizeOptions();
opts.setResizeOptions(imageResizeOptions);
// PDF
ifSet(options, WIDTH, (v) -> imageResizeOptions.setWidth(Integer.parseInt(v)));
ifSet(options, HEIGHT, (v) -> imageResizeOptions.setHeight(Integer.parseInt(v)));
// ImageMagick
ifSet(options, RESIZE_WIDTH, (v) -> imageResizeOptions.setWidth(Integer.parseInt(v)));
ifSet(options, RESIZE_HEIGHT, (v) -> imageResizeOptions.setHeight(Integer.parseInt(v)));
ifSet(options, THUMBNAIL, (v) -> imageResizeOptions.setResizeToThumbnail(Boolean.parseBoolean(v)));
ifSet(options, RESIZE_PERCENTAGE, (v) -> imageResizeOptions.setPercentResize(Boolean.parseBoolean(v)));
set(options, ALLOW_ENLARGEMENT, (v) -> imageResizeOptions.setAllowEnlargement(Boolean.parseBoolean(v == null ? "true" : v)));
set(options, MAINTAIN_ASPECT_RATIO, (v) -> imageResizeOptions.setMaintainAspectRatio(Boolean.parseBoolean(v == null ? "true" : v)));
}
// ALPHA_REMOVE can be ignored as it is automatically added in the legacy code if the sourceMimetype is jpeg
set(options, AUTO_ORIENT, (v) -> opts.setAutoOrient(Boolean.parseBoolean(v == null ? "true" : v)));
boolean containsPaged = containsAny(subclassOptionNames, PAGED_OPTIONS);
boolean containsCrop = containsAny(subclassOptionNames, CROP_OPTIONS);
boolean containsTemporal = containsAny(subclassOptionNames, TEMPORAL_OPTIONS);
if (containsPaged || containsCrop || containsTemporal) {
List<TransformationSourceOptions> sourceOptionsList = new ArrayList<>();
if (containsPaged) {
// The legacy transformer options start at page 1, where as image magick and the local
// transforms start at 0;
PagedSourceOptions pagedSourceOptions = new PagedSourceOptions();
sourceOptionsList.add(pagedSourceOptions);
ifSet(options, START_PAGE, (v) -> pagedSourceOptions.setStartPageNumber(Integer.parseInt(v) + 1));
ifSet(options, END_PAGE, (v) -> pagedSourceOptions.setEndPageNumber(Integer.parseInt(v) + 1));
ifSet(options, PAGE, (v) -> {
int i = Integer.parseInt(v) + 1;
pagedSourceOptions.setStartPageNumber(i);
pagedSourceOptions.setEndPageNumber(i);
});
}
if (containsCrop) {
CropSourceOptions cropSourceOptions = new CropSourceOptions();
sourceOptionsList.add(cropSourceOptions);
ifSet(options, CROP_GRAVITY, (v) -> cropSourceOptions.setGravity(v));
ifSet(options, CROP_PERCENTAGE, (v) -> cropSourceOptions.setPercentageCrop(Boolean.parseBoolean(v)));
ifSet(options, CROP_WIDTH, (v) -> cropSourceOptions.setWidth(Integer.parseInt(v)));
ifSet(options, CROP_HEIGHT, (v) -> cropSourceOptions.setHeight(Integer.parseInt(v)));
ifSet(options, CROP_X_OFFSET, (v) -> cropSourceOptions.setXOffset(Integer.parseInt(v)));
ifSet(options, CROP_Y_OFFSET, (v) -> cropSourceOptions.setYOffset(Integer.parseInt(v)));
}
if (containsTemporal) {
TemporalSourceOptions temporalSourceOptions = new TemporalSourceOptions();
sourceOptionsList.add(temporalSourceOptions);
ifSet(options, DURATION, (v) -> temporalSourceOptions.setDuration(v));
ifSet(options, OFFSET, (v) -> temporalSourceOptions.setOffset(v));
}
opts.setSourceOptionsList(sourceOptionsList);
}
}
} else {
// This what the "pdf" rendition should have used in 6.0 and it is not unreasonable for a custom transformer
// and rendition to do the same.
transformationOptions = new TransformationOptions();
}
if (transformationOptions == null) {
StringJoiner sj = new StringJoiner("\n ");
sj.add("The RenditionDefinition2 " + renditionName + " contains options that cannot be mapped to TransformationOptions used by local transformers. " + " The TransformOptionConverter may need to be sub classed to support this conversion.");
HashSet<String> otherNames = new HashSet<>(optionNames);
otherNames.removeAll(FLASH_OPTIONS);
otherNames.removeAll(IMAGE_OPTIONS);
otherNames.removeAll(PDF_OPTIONS);
otherNames.removeAll(LIMIT_OPTIONS);
otherNames.forEach(sj::add);
sj.add("---");
optionNames.forEach(sj::add);
throw new IllegalArgumentException(sj.toString());
}
final TransformationOptions opts = transformationOptions;
ifSet(options, INCLUDE_CONTENTS, (v) -> opts.setIncludeEmbedded(Boolean.parseBoolean(v)));
if (containsAny(optionNames, LIMIT_OPTIONS)) {
TransformationOptionLimits limits = new TransformationOptionLimits();
transformationOptions.setLimits(limits);
ifSet(options, TIMEOUT, (v) -> limits.setTimeoutMs(Long.parseLong(v)));
limits.setMaxSourceSizeKBytes(maxSourceSizeKBytes);
limits.setReadLimitKBytes(readLimitTimeMs);
limits.setReadLimitTimeMs(readLimitKBytes);
limits.setMaxPages(maxPages);
limits.setPageLimit(pageLimit);
}
transformationOptions.setUse(renditionName);
return transformationOptions;
}
Aggregations