use of com.google.cloud.videointelligence.v1.TextAnnotation in project kie-wb-common by kiegroup.
the class ArtifactsConverterTest method convertTextAnnotation.
@Test
public void convertTextAnnotation() {
TextAnnotation element = Bpmn2Factory.eINSTANCE.createTextAnnotation();
when(typedFactoryManager.newNode(any(), eq(org.kie.workbench.common.stunner.bpmn.definition.TextAnnotation.class))).thenReturn(nodeTextAnnotation);
when(nodeTextAnnotation.getContent()).thenReturn(contentTextAnnotation);
when(contentTextAnnotation.getDefinition()).thenReturn(defTextAnnotation);
when(propertyReaderFactory.of(element)).thenReturn(readerTextAnnotation);
final Result<BpmnNode> node = tested.convert(element);
final Node<? extends View<? extends BPMNViewDefinition>, ?> value = node.value().value();
assertEquals(contentTextAnnotation, value.getContent());
assertEquals(defTextAnnotation, value.getContent().getDefinition());
}
use of com.google.cloud.videointelligence.v1.TextAnnotation in project spring-cloud-gcp by spring-cloud.
the class DocumentOcrResultSet method getAllPages.
/**
* Returns an {@link Iterator} over all the OCR pages of the document.
*
* @return iterator of {@link TextAnnotation} describing OCR content of each page in the
* document.
*/
public Iterator<TextAnnotation> getAllPages() {
return new Iterator<TextAnnotation>() {
private final Iterator<OcrPageRange> pageRangeIterator = ocrPageRanges.values().iterator();
private int offset = 0;
private List<TextAnnotation> currentPageRange = Collections.EMPTY_LIST;
@Override
public boolean hasNext() {
return pageRangeIterator.hasNext() || offset < currentPageRange.size();
}
@Override
public TextAnnotation next() {
if (!hasNext()) {
throw new NoSuchElementException("No more pages left in DocumentOcrResultSet.");
}
if (offset >= currentPageRange.size()) {
OcrPageRange pageRange = pageRangeIterator.next();
offset = 0;
try {
currentPageRange = pageRange.getPages();
} catch (InvalidProtocolBufferException e) {
throw new RuntimeException("Failed to parse OCR output from JSON output file " + pageRange.getBlob().getName(), e);
}
}
TextAnnotation result = currentPageRange.get(offset);
offset++;
return result;
}
};
}
use of com.google.cloud.videointelligence.v1.TextAnnotation in project spring-cloud-gcp by spring-cloud.
the class WebController method renderViewDocumentPage.
@GetMapping("/viewDocument")
public ModelAndView renderViewDocumentPage(@RequestParam("gcsDocumentUrl") String gcsDocumentUrl, @RequestParam("pageNumber") int pageNumber, ModelMap map) throws ExecutionException, InterruptedException, InvalidProtocolBufferException {
TextAnnotation textAnnotation = ocrStatusReporter.getDocumentOcrStatuses().get(gcsDocumentUrl).getResultSet().getPage(pageNumber);
String[] firstWordsTokens = textAnnotation.getText().split(" ", 50);
map.put("pageNumber", pageNumber);
map.put("gcsDocumentUrl", gcsDocumentUrl);
map.put("text", String.join(" ", firstWordsTokens));
return new ModelAndView("viewDocument", map);
}
use of com.google.cloud.videointelligence.v1.TextAnnotation in project java-docs-samples by GoogleCloudPlatform.
the class Detect method detectDocumentText.
// [START vision_detect_document]
/**
* Performs document text detection on a local image file.
*
* @param filePath The path to the local file to detect document text on.
* @param out A {@link PrintStream} to write the results to.
* @throws Exception on errors while closing the client.
* @throws IOException on Input/Output errors.
*/
public static void detectDocumentText(String filePath, PrintStream out) throws Exception, IOException {
List<AnnotateImageRequest> requests = new ArrayList<>();
ByteString imgBytes = ByteString.readFrom(new FileInputStream(filePath));
Image img = Image.newBuilder().setContent(imgBytes).build();
Feature feat = Feature.newBuilder().setType(Type.DOCUMENT_TEXT_DETECTION).build();
AnnotateImageRequest request = AnnotateImageRequest.newBuilder().addFeatures(feat).setImage(img).build();
requests.add(request);
try (ImageAnnotatorClient client = ImageAnnotatorClient.create()) {
BatchAnnotateImagesResponse response = client.batchAnnotateImages(requests);
List<AnnotateImageResponse> responses = response.getResponsesList();
client.close();
for (AnnotateImageResponse res : responses) {
if (res.hasError()) {
out.printf("Error: %s\n", res.getError().getMessage());
return;
}
// For full list of available annotations, see http://g.co/cloud/vision/docs
TextAnnotation annotation = res.getFullTextAnnotation();
for (Page page : annotation.getPagesList()) {
String pageText = "";
for (Block block : page.getBlocksList()) {
String blockText = "";
for (Paragraph para : block.getParagraphsList()) {
String paraText = "";
for (Word word : para.getWordsList()) {
String wordText = "";
for (Symbol symbol : word.getSymbolsList()) {
wordText = wordText + symbol.getText();
out.format("Symbol text: %s (confidence: %f)\n", symbol.getText(), symbol.getConfidence());
}
out.format("Word text: %s (confidence: %f)\n\n", wordText, word.getConfidence());
paraText = String.format("%s %s", paraText, wordText);
}
// Output Example using Paragraph:
out.println("\nParagraph: \n" + paraText);
out.format("Paragraph Confidence: %f\n", para.getConfidence());
blockText = blockText + paraText;
}
pageText = pageText + blockText;
}
}
out.println("\nComplete annotation:");
out.println(annotation.getText());
}
}
}
use of com.google.cloud.videointelligence.v1.TextAnnotation in project java-docs-samples by GoogleCloudPlatform.
the class Detect method detectDocumentTextGcs.
// [END vision_detect_document]
// [START vision_detect_document_uri]
/**
* Performs document text detection on a remote image on Google Cloud Storage.
*
* @param gcsPath The path to the remote file on Google Cloud Storage to detect document text on.
* @param out A {@link PrintStream} to write the results to.
* @throws Exception on errors while closing the client.
* @throws IOException on Input/Output errors.
*/
public static void detectDocumentTextGcs(String gcsPath, PrintStream out) throws Exception, IOException {
List<AnnotateImageRequest> requests = new ArrayList<>();
ImageSource imgSource = ImageSource.newBuilder().setGcsImageUri(gcsPath).build();
Image img = Image.newBuilder().setSource(imgSource).build();
Feature feat = Feature.newBuilder().setType(Type.DOCUMENT_TEXT_DETECTION).build();
AnnotateImageRequest request = AnnotateImageRequest.newBuilder().addFeatures(feat).setImage(img).build();
requests.add(request);
try (ImageAnnotatorClient client = ImageAnnotatorClient.create()) {
BatchAnnotateImagesResponse response = client.batchAnnotateImages(requests);
List<AnnotateImageResponse> responses = response.getResponsesList();
client.close();
for (AnnotateImageResponse res : responses) {
if (res.hasError()) {
out.printf("Error: %s\n", res.getError().getMessage());
return;
}
// For full list of available annotations, see http://g.co/cloud/vision/docs
TextAnnotation annotation = res.getFullTextAnnotation();
for (Page page : annotation.getPagesList()) {
String pageText = "";
for (Block block : page.getBlocksList()) {
String blockText = "";
for (Paragraph para : block.getParagraphsList()) {
String paraText = "";
for (Word word : para.getWordsList()) {
String wordText = "";
for (Symbol symbol : word.getSymbolsList()) {
wordText = wordText + symbol.getText();
out.format("Symbol text: %s (confidence: %f)\n", symbol.getText(), symbol.getConfidence());
}
out.format("Word text: %s (confidence: %f)\n\n", wordText, word.getConfidence());
paraText = String.format("%s %s", paraText, wordText);
}
// Output Example using Paragraph:
out.println("\nParagraph: \n" + paraText);
out.format("Paragraph Confidence: %f\n", para.getConfidence());
blockText = blockText + paraText;
}
pageText = pageText + blockText;
}
}
out.println("\nComplete annotation:");
out.println(annotation.getText());
}
}
}
Aggregations