Search in sources :

Example 41 with TextAnnotation

use of com.google.cloud.videointelligence.v1.TextAnnotation in project java-video-intelligence by googleapis.

the class DetectTextTest method testTextDetection.

@Test
public void testTextDetection() throws Exception {
    VideoAnnotationResults result = TextDetection.detectText("resources/googlework_short.mp4");
    boolean textExists = false;
    for (TextAnnotation textAnnotation : result.getTextAnnotationsList()) {
        for (String possibleText : POSSIBLE_TEXTS) {
            if (textAnnotation.getText().toUpperCase().contains(possibleText.toUpperCase())) {
                textExists = true;
                break;
            }
        }
    }
    assertThat(textExists).isTrue();
}
Also used : VideoAnnotationResults(com.google.cloud.videointelligence.v1.VideoAnnotationResults) TextAnnotation(com.google.cloud.videointelligence.v1.TextAnnotation) Test(org.junit.Test)

Example 42 with TextAnnotation

use of com.google.cloud.videointelligence.v1.TextAnnotation in project java-video-intelligence by googleapis.

the class DetectTextTest method testTextDetectionGcs.

@Test
public void testTextDetectionGcs() throws Exception {
    VideoAnnotationResults result = TextDetection.detectTextGcs(SPEECH_GCS_LOCATION);
    boolean textExists = false;
    for (TextAnnotation textAnnotation : result.getTextAnnotationsList()) {
        for (String possibleText : POSSIBLE_TEXTS) {
            if (textAnnotation.getText().toUpperCase().contains(possibleText.toUpperCase())) {
                textExists = true;
                break;
            }
        }
    }
    assertThat(textExists).isTrue();
}
Also used : VideoAnnotationResults(com.google.cloud.videointelligence.v1.VideoAnnotationResults) TextAnnotation(com.google.cloud.videointelligence.v1.TextAnnotation) Test(org.junit.Test)

Example 43 with TextAnnotation

use of com.google.cloud.videointelligence.v1.TextAnnotation in project runawfe-free-server by processtech.

the class BpmnXmlReader method readNode.

private void readNode(ProcessDefinition processDefinition, Element element, Map<String, String> properties, Node node) {
    node.setNodeId(element.attributeValue(ID));
    node.setName(element.attributeValue(NAME));
    node.setDescription(element.elementTextTrim(DOCUMENTATION));
    if (properties.containsKey(NODE_ASYNC_EXECUTION)) {
        node.setAsyncExecution("new".equals(properties.get(NODE_ASYNC_EXECUTION)));
    }
    processDefinition.addNode(node);
    if (node instanceof StartNode) {
        StartNode startNode = (StartNode) node;
        readTask(processDefinition, element, properties, startNode);
    }
    if (node instanceof BaseTaskNode) {
        BaseTaskNode taskNode = (BaseTaskNode) node;
        readTask(processDefinition, element, properties, taskNode);
        if (properties.containsKey(ASYNC)) {
            taskNode.setAsync(Boolean.valueOf(properties.get(ASYNC)));
        }
        if (properties.containsKey(ASYNC_COMPLETION_MODE)) {
            taskNode.setCompletionMode(AsyncCompletionMode.valueOf(properties.get(ASYNC_COMPLETION_MODE)));
        }
        readActionHandlers(processDefinition, taskNode, element);
    }
    if (node instanceof VariableContainerNode) {
        VariableContainerNode variableContainerNode = (VariableContainerNode) node;
        variableContainerNode.setVariableMappings(readVariableMappings(element));
    }
    if (node instanceof SubprocessNode) {
        SubprocessNode subprocessNode = (SubprocessNode) node;
        subprocessNode.setSubProcessName(element.attributeValue(QName.get(PROCESS, RUNA_NAMESPACE)));
        if (properties.containsKey(TRANSACTIONAL)) {
            subprocessNode.setTransactional(Boolean.parseBoolean(properties.get(TRANSACTIONAL)));
        }
        if (properties.containsKey(EMBEDDED)) {
            subprocessNode.setEmbedded(Boolean.parseBoolean(properties.get(EMBEDDED)));
        }
        if (properties.containsKey(ASYNC)) {
            subprocessNode.setAsync(Boolean.valueOf(properties.get(ASYNC)));
        }
        if (properties.containsKey(ASYNC_COMPLETION_MODE)) {
            subprocessNode.setCompletionMode(AsyncCompletionMode.valueOf(properties.get(ASYNC_COMPLETION_MODE)));
        }
        if (properties.containsKey(VALIDATE_AT_START)) {
            subprocessNode.setValidateAtStart(Boolean.parseBoolean(properties.get(VALIDATE_AT_START)));
        }
        if (properties.containsKey(DISABLE_CASCADING_SUSPENSION)) {
            subprocessNode.setDisableCascadingSuspension(Boolean.parseBoolean(properties.get(DISABLE_CASCADING_SUSPENSION)));
        }
        if (node instanceof MultiSubprocessNode && properties.containsKey(DISCRIMINATOR_CONDITION)) {
            ((MultiSubprocessNode) node).setDiscriminatorCondition(properties.get(DISCRIMINATOR_CONDITION));
        }
    }
    if (node instanceof ExclusiveGateway) {
        ExclusiveGateway gateway = (ExclusiveGateway) node;
        gateway.setDelegation(readDelegation(element, properties, false));
    }
    if (node instanceof BusinessRule) {
        BusinessRule businessRule = (BusinessRule) node;
        businessRule.setDelegation(readDelegation(element, properties, false));
    }
    if (node instanceof TimerNode) {
        TimerNode timerNode = (TimerNode) node;
        readTimer(timerNode, element);
    }
    if (node instanceof ScriptNode) {
        ScriptNode serviceTask = (ScriptNode) node;
        serviceTask.setDelegation(readDelegation(element, properties, true));
    }
    if (node instanceof BaseMessageNode) {
        BaseMessageNode baseMessageNode = (BaseMessageNode) node;
        baseMessageNode.setEventType(MessageEventType.valueOf(element.attributeValue(QName.get(TYPE, RUNA_NAMESPACE), MessageEventType.message.name())));
    }
    if (node instanceof SendMessageNode) {
        SendMessageNode sendMessageNode = (SendMessageNode) node;
        sendMessageNode.setTtlDuration(element.attributeValue(QName.get(TIME_DURATION, RUNA_NAMESPACE), "1 days"));
    }
    if (node instanceof TextAnnotation) {
        node.setName("TextAnnotation_" + node.getNodeId());
        node.setDescription(element.elementTextTrim(TEXT));
    }
}
Also used : SendMessageNode(ru.runa.wfe.lang.SendMessageNode) StartNode(ru.runa.wfe.lang.StartNode) EmbeddedSubprocessStartNode(ru.runa.wfe.lang.EmbeddedSubprocessStartNode) ExclusiveGateway(ru.runa.wfe.lang.bpmn2.ExclusiveGateway) BaseMessageNode(ru.runa.wfe.lang.BaseMessageNode) MultiSubprocessNode(ru.runa.wfe.lang.MultiSubprocessNode) SubprocessNode(ru.runa.wfe.lang.SubprocessNode) TimerNode(ru.runa.wfe.lang.bpmn2.TimerNode) ScriptNode(ru.runa.wfe.lang.ScriptNode) TextAnnotation(ru.runa.wfe.lang.bpmn2.TextAnnotation) MultiSubprocessNode(ru.runa.wfe.lang.MultiSubprocessNode) BusinessRule(ru.runa.wfe.lang.bpmn2.BusinessRule) BaseTaskNode(ru.runa.wfe.lang.BaseTaskNode) VariableContainerNode(ru.runa.wfe.lang.VariableContainerNode)

Example 44 with TextAnnotation

use of com.google.cloud.videointelligence.v1.TextAnnotation in project java-vision by googleapis.

the class DetectBeta method detectHandwrittenOcrGcs.

// [END vision_handwritten_ocr_beta]
// [START vision_handwritten_ocr_gcs_beta]
/**
 * Performs handwritten text detection on a remote image on Google Cloud Storage.
 *
 * @param gcsPath The path to the remote file on Google Cloud Storage to detect handwritten text
 *     on.
 * @param out A {@link PrintStream} to write the results to.
 * @throws Exception on errors while closing the client.
 * @throws IOException on Input/Output errors.
 */
public static void detectHandwrittenOcrGcs(String gcsPath, PrintStream out) throws Exception {
    List<AnnotateImageRequest> requests = new ArrayList<>();
    ImageSource imgSource = ImageSource.newBuilder().setGcsImageUri(gcsPath).build();
    Image img = Image.newBuilder().setSource(imgSource).build();
    Feature feat = Feature.newBuilder().setType(Type.DOCUMENT_TEXT_DETECTION).build();
    // Set the parameters for the image
    ImageContext imageContext = ImageContext.newBuilder().addLanguageHints("en-t-i0-handwrit").build();
    AnnotateImageRequest request = AnnotateImageRequest.newBuilder().addFeatures(feat).setImage(img).setImageContext(imageContext).build();
    requests.add(request);
    try (ImageAnnotatorClient client = ImageAnnotatorClient.create()) {
        BatchAnnotateImagesResponse response = client.batchAnnotateImages(requests);
        List<AnnotateImageResponse> responses = response.getResponsesList();
        client.close();
        for (AnnotateImageResponse res : responses) {
            if (res.hasError()) {
                out.printf("Error: %s\n", res.getError().getMessage());
                return;
            }
            // For full list of available annotations, see http://g.co/cloud/vision/docs
            TextAnnotation annotation = res.getFullTextAnnotation();
            for (Page page : annotation.getPagesList()) {
                String pageText = "";
                for (Block block : page.getBlocksList()) {
                    String blockText = "";
                    for (Paragraph para : block.getParagraphsList()) {
                        String paraText = "";
                        for (Word word : para.getWordsList()) {
                            String wordText = "";
                            for (Symbol symbol : word.getSymbolsList()) {
                                wordText = wordText + symbol.getText();
                                out.format("Symbol text: %s (confidence: %f)\n", symbol.getText(), symbol.getConfidence());
                            }
                            out.format("Word text: %s (confidence: %f)\n\n", wordText, word.getConfidence());
                            paraText = String.format("%s %s", paraText, wordText);
                        }
                        // Output Example using Paragraph:
                        out.println("\nParagraph: \n" + paraText);
                        out.format("Paragraph Confidence: %f\n", para.getConfidence());
                        blockText = blockText + paraText;
                    }
                    pageText = pageText + blockText;
                }
            }
            out.println("\nComplete annotation:");
            out.println(annotation.getText());
        }
    }
}
Also used : Word(com.google.cloud.vision.v1p3beta1.Word) Symbol(com.google.cloud.vision.v1p3beta1.Symbol) ImageAnnotatorClient(com.google.cloud.vision.v1p3beta1.ImageAnnotatorClient) ArrayList(java.util.ArrayList) Page(com.google.cloud.vision.v1p3beta1.Page) ByteString(com.google.protobuf.ByteString) Image(com.google.cloud.vision.v1p3beta1.Image) Feature(com.google.cloud.vision.v1p3beta1.Feature) Paragraph(com.google.cloud.vision.v1p3beta1.Paragraph) AnnotateImageRequest(com.google.cloud.vision.v1p3beta1.AnnotateImageRequest) AnnotateImageResponse(com.google.cloud.vision.v1p3beta1.AnnotateImageResponse) Block(com.google.cloud.vision.v1p3beta1.Block) ImageSource(com.google.cloud.vision.v1p3beta1.ImageSource) TextAnnotation(com.google.cloud.vision.v1p3beta1.TextAnnotation) ImageContext(com.google.cloud.vision.v1p3beta1.ImageContext) BatchAnnotateImagesResponse(com.google.cloud.vision.v1p3beta1.BatchAnnotateImagesResponse)

Example 45 with TextAnnotation

use of com.google.cloud.videointelligence.v1.TextAnnotation in project java-vision by googleapis.

the class Detect method detectDocumentText.

/**
 * Performs document text detection on a local image file.
 *
 * @param filePath The path to the local file to detect document text on.
 * @throws Exception on errors while closing the client.
 * @throws IOException on Input/Output errors.
 */
// [START vision_fulltext_detection]
public static void detectDocumentText(String filePath) throws IOException {
    List<AnnotateImageRequest> requests = new ArrayList<>();
    ByteString imgBytes = ByteString.readFrom(new FileInputStream(filePath));
    Image img = Image.newBuilder().setContent(imgBytes).build();
    Feature feat = Feature.newBuilder().setType(Type.DOCUMENT_TEXT_DETECTION).build();
    AnnotateImageRequest request = AnnotateImageRequest.newBuilder().addFeatures(feat).setImage(img).build();
    requests.add(request);
    // the "close" method on the client to safely clean up any remaining background resources.
    try (ImageAnnotatorClient client = ImageAnnotatorClient.create()) {
        BatchAnnotateImagesResponse response = client.batchAnnotateImages(requests);
        List<AnnotateImageResponse> responses = response.getResponsesList();
        client.close();
        for (AnnotateImageResponse res : responses) {
            if (res.hasError()) {
                System.out.format("Error: %s%n", res.getError().getMessage());
                return;
            }
            // For full list of available annotations, see http://g.co/cloud/vision/docs
            TextAnnotation annotation = res.getFullTextAnnotation();
            for (Page page : annotation.getPagesList()) {
                String pageText = "";
                for (Block block : page.getBlocksList()) {
                    String blockText = "";
                    for (Paragraph para : block.getParagraphsList()) {
                        String paraText = "";
                        for (Word word : para.getWordsList()) {
                            String wordText = "";
                            for (Symbol symbol : word.getSymbolsList()) {
                                wordText = wordText + symbol.getText();
                                System.out.format("Symbol text: %s (confidence: %f)%n", symbol.getText(), symbol.getConfidence());
                            }
                            System.out.format("Word text: %s (confidence: %f)%n%n", wordText, word.getConfidence());
                            paraText = String.format("%s %s", paraText, wordText);
                        }
                        // Output Example using Paragraph:
                        System.out.println("%nParagraph: %n" + paraText);
                        System.out.format("Paragraph Confidence: %f%n", para.getConfidence());
                        blockText = blockText + paraText;
                    }
                    pageText = pageText + blockText;
                }
            }
            System.out.println("%nComplete annotation:");
            System.out.println(annotation.getText());
        }
    }
}
Also used : Word(com.google.cloud.vision.v1.Word) ByteString(com.google.protobuf.ByteString) Symbol(com.google.cloud.vision.v1.Symbol) ImageAnnotatorClient(com.google.cloud.vision.v1.ImageAnnotatorClient) ArrayList(java.util.ArrayList) Page(com.google.cloud.vision.v1.Page) ByteString(com.google.protobuf.ByteString) Image(com.google.cloud.vision.v1.Image) Feature(com.google.cloud.vision.v1.Feature) FileInputStream(java.io.FileInputStream) Paragraph(com.google.cloud.vision.v1.Paragraph) AnnotateImageRequest(com.google.cloud.vision.v1.AnnotateImageRequest) AnnotateImageResponse(com.google.cloud.vision.v1.AnnotateImageResponse) Block(com.google.cloud.vision.v1.Block) TextAnnotation(com.google.cloud.vision.v1.TextAnnotation) BatchAnnotateImagesResponse(com.google.cloud.vision.v1.BatchAnnotateImagesResponse)

Aggregations

ArrayList (java.util.ArrayList)15 TextAnnotation (com.google.cloud.vision.v1.TextAnnotation)12 Test (org.junit.Test)11 ByteString (com.google.protobuf.ByteString)9 TextAnnotation (com.google.cloud.videointelligence.v1.TextAnnotation)8 VideoAnnotationResults (com.google.cloud.videointelligence.v1.VideoAnnotationResults)8 List (java.util.List)7 TextAnnotation (org.kie.workbench.common.dmn.api.definition.v1_1.TextAnnotation)7 FreeTextAnnotation (org.opencastproject.metadata.mpeg7.FreeTextAnnotation)7 TextAnnotation (org.opencastproject.metadata.mpeg7.TextAnnotation)7 AnnotateImageResponse (com.google.cloud.vision.v1.AnnotateImageResponse)6 Duration (com.google.protobuf.Duration)6 AnnotateVideoProgress (com.google.cloud.videointelligence.v1.AnnotateVideoProgress)4 AnnotateVideoRequest (com.google.cloud.videointelligence.v1.AnnotateVideoRequest)4 AnnotateVideoResponse (com.google.cloud.videointelligence.v1.AnnotateVideoResponse)4 NormalizedVertex (com.google.cloud.videointelligence.v1.NormalizedVertex)4 TextFrame (com.google.cloud.videointelligence.v1.TextFrame)4 TextSegment (com.google.cloud.videointelligence.v1.TextSegment)4 VideoIntelligenceServiceClient (com.google.cloud.videointelligence.v1.VideoIntelligenceServiceClient)4 VideoSegment (com.google.cloud.videointelligence.v1.VideoSegment)4