use of com.google.cloud.videointelligence.v1.TextAnnotation in project java-video-intelligence by googleapis.
the class DetectTextTest method testTextDetection.
@Test
public void testTextDetection() throws Exception {
VideoAnnotationResults result = TextDetection.detectText("resources/googlework_short.mp4");
boolean textExists = false;
for (TextAnnotation textAnnotation : result.getTextAnnotationsList()) {
for (String possibleText : POSSIBLE_TEXTS) {
if (textAnnotation.getText().toUpperCase().contains(possibleText.toUpperCase())) {
textExists = true;
break;
}
}
}
assertThat(textExists).isTrue();
}
use of com.google.cloud.videointelligence.v1.TextAnnotation in project java-video-intelligence by googleapis.
the class DetectTextTest method testTextDetectionGcs.
@Test
public void testTextDetectionGcs() throws Exception {
VideoAnnotationResults result = TextDetection.detectTextGcs(SPEECH_GCS_LOCATION);
boolean textExists = false;
for (TextAnnotation textAnnotation : result.getTextAnnotationsList()) {
for (String possibleText : POSSIBLE_TEXTS) {
if (textAnnotation.getText().toUpperCase().contains(possibleText.toUpperCase())) {
textExists = true;
break;
}
}
}
assertThat(textExists).isTrue();
}
use of com.google.cloud.videointelligence.v1.TextAnnotation in project runawfe-free-server by processtech.
the class BpmnXmlReader method readNode.
private void readNode(ProcessDefinition processDefinition, Element element, Map<String, String> properties, Node node) {
node.setNodeId(element.attributeValue(ID));
node.setName(element.attributeValue(NAME));
node.setDescription(element.elementTextTrim(DOCUMENTATION));
if (properties.containsKey(NODE_ASYNC_EXECUTION)) {
node.setAsyncExecution("new".equals(properties.get(NODE_ASYNC_EXECUTION)));
}
processDefinition.addNode(node);
if (node instanceof StartNode) {
StartNode startNode = (StartNode) node;
readTask(processDefinition, element, properties, startNode);
}
if (node instanceof BaseTaskNode) {
BaseTaskNode taskNode = (BaseTaskNode) node;
readTask(processDefinition, element, properties, taskNode);
if (properties.containsKey(ASYNC)) {
taskNode.setAsync(Boolean.valueOf(properties.get(ASYNC)));
}
if (properties.containsKey(ASYNC_COMPLETION_MODE)) {
taskNode.setCompletionMode(AsyncCompletionMode.valueOf(properties.get(ASYNC_COMPLETION_MODE)));
}
readActionHandlers(processDefinition, taskNode, element);
}
if (node instanceof VariableContainerNode) {
VariableContainerNode variableContainerNode = (VariableContainerNode) node;
variableContainerNode.setVariableMappings(readVariableMappings(element));
}
if (node instanceof SubprocessNode) {
SubprocessNode subprocessNode = (SubprocessNode) node;
subprocessNode.setSubProcessName(element.attributeValue(QName.get(PROCESS, RUNA_NAMESPACE)));
if (properties.containsKey(TRANSACTIONAL)) {
subprocessNode.setTransactional(Boolean.parseBoolean(properties.get(TRANSACTIONAL)));
}
if (properties.containsKey(EMBEDDED)) {
subprocessNode.setEmbedded(Boolean.parseBoolean(properties.get(EMBEDDED)));
}
if (properties.containsKey(ASYNC)) {
subprocessNode.setAsync(Boolean.valueOf(properties.get(ASYNC)));
}
if (properties.containsKey(ASYNC_COMPLETION_MODE)) {
subprocessNode.setCompletionMode(AsyncCompletionMode.valueOf(properties.get(ASYNC_COMPLETION_MODE)));
}
if (properties.containsKey(VALIDATE_AT_START)) {
subprocessNode.setValidateAtStart(Boolean.parseBoolean(properties.get(VALIDATE_AT_START)));
}
if (properties.containsKey(DISABLE_CASCADING_SUSPENSION)) {
subprocessNode.setDisableCascadingSuspension(Boolean.parseBoolean(properties.get(DISABLE_CASCADING_SUSPENSION)));
}
if (node instanceof MultiSubprocessNode && properties.containsKey(DISCRIMINATOR_CONDITION)) {
((MultiSubprocessNode) node).setDiscriminatorCondition(properties.get(DISCRIMINATOR_CONDITION));
}
}
if (node instanceof ExclusiveGateway) {
ExclusiveGateway gateway = (ExclusiveGateway) node;
gateway.setDelegation(readDelegation(element, properties, false));
}
if (node instanceof BusinessRule) {
BusinessRule businessRule = (BusinessRule) node;
businessRule.setDelegation(readDelegation(element, properties, false));
}
if (node instanceof TimerNode) {
TimerNode timerNode = (TimerNode) node;
readTimer(timerNode, element);
}
if (node instanceof ScriptNode) {
ScriptNode serviceTask = (ScriptNode) node;
serviceTask.setDelegation(readDelegation(element, properties, true));
}
if (node instanceof BaseMessageNode) {
BaseMessageNode baseMessageNode = (BaseMessageNode) node;
baseMessageNode.setEventType(MessageEventType.valueOf(element.attributeValue(QName.get(TYPE, RUNA_NAMESPACE), MessageEventType.message.name())));
}
if (node instanceof SendMessageNode) {
SendMessageNode sendMessageNode = (SendMessageNode) node;
sendMessageNode.setTtlDuration(element.attributeValue(QName.get(TIME_DURATION, RUNA_NAMESPACE), "1 days"));
}
if (node instanceof TextAnnotation) {
node.setName("TextAnnotation_" + node.getNodeId());
node.setDescription(element.elementTextTrim(TEXT));
}
}
use of com.google.cloud.videointelligence.v1.TextAnnotation in project java-vision by googleapis.
the class DetectBeta method detectHandwrittenOcrGcs.
// [END vision_handwritten_ocr_beta]
// [START vision_handwritten_ocr_gcs_beta]
/**
* Performs handwritten text detection on a remote image on Google Cloud Storage.
*
* @param gcsPath The path to the remote file on Google Cloud Storage to detect handwritten text
* on.
* @param out A {@link PrintStream} to write the results to.
* @throws Exception on errors while closing the client.
* @throws IOException on Input/Output errors.
*/
public static void detectHandwrittenOcrGcs(String gcsPath, PrintStream out) throws Exception {
List<AnnotateImageRequest> requests = new ArrayList<>();
ImageSource imgSource = ImageSource.newBuilder().setGcsImageUri(gcsPath).build();
Image img = Image.newBuilder().setSource(imgSource).build();
Feature feat = Feature.newBuilder().setType(Type.DOCUMENT_TEXT_DETECTION).build();
// Set the parameters for the image
ImageContext imageContext = ImageContext.newBuilder().addLanguageHints("en-t-i0-handwrit").build();
AnnotateImageRequest request = AnnotateImageRequest.newBuilder().addFeatures(feat).setImage(img).setImageContext(imageContext).build();
requests.add(request);
try (ImageAnnotatorClient client = ImageAnnotatorClient.create()) {
BatchAnnotateImagesResponse response = client.batchAnnotateImages(requests);
List<AnnotateImageResponse> responses = response.getResponsesList();
client.close();
for (AnnotateImageResponse res : responses) {
if (res.hasError()) {
out.printf("Error: %s\n", res.getError().getMessage());
return;
}
// For full list of available annotations, see http://g.co/cloud/vision/docs
TextAnnotation annotation = res.getFullTextAnnotation();
for (Page page : annotation.getPagesList()) {
String pageText = "";
for (Block block : page.getBlocksList()) {
String blockText = "";
for (Paragraph para : block.getParagraphsList()) {
String paraText = "";
for (Word word : para.getWordsList()) {
String wordText = "";
for (Symbol symbol : word.getSymbolsList()) {
wordText = wordText + symbol.getText();
out.format("Symbol text: %s (confidence: %f)\n", symbol.getText(), symbol.getConfidence());
}
out.format("Word text: %s (confidence: %f)\n\n", wordText, word.getConfidence());
paraText = String.format("%s %s", paraText, wordText);
}
// Output Example using Paragraph:
out.println("\nParagraph: \n" + paraText);
out.format("Paragraph Confidence: %f\n", para.getConfidence());
blockText = blockText + paraText;
}
pageText = pageText + blockText;
}
}
out.println("\nComplete annotation:");
out.println(annotation.getText());
}
}
}
use of com.google.cloud.videointelligence.v1.TextAnnotation in project java-vision by googleapis.
the class Detect method detectDocumentText.
/**
* Performs document text detection on a local image file.
*
* @param filePath The path to the local file to detect document text on.
* @throws Exception on errors while closing the client.
* @throws IOException on Input/Output errors.
*/
// [START vision_fulltext_detection]
public static void detectDocumentText(String filePath) throws IOException {
List<AnnotateImageRequest> requests = new ArrayList<>();
ByteString imgBytes = ByteString.readFrom(new FileInputStream(filePath));
Image img = Image.newBuilder().setContent(imgBytes).build();
Feature feat = Feature.newBuilder().setType(Type.DOCUMENT_TEXT_DETECTION).build();
AnnotateImageRequest request = AnnotateImageRequest.newBuilder().addFeatures(feat).setImage(img).build();
requests.add(request);
// the "close" method on the client to safely clean up any remaining background resources.
try (ImageAnnotatorClient client = ImageAnnotatorClient.create()) {
BatchAnnotateImagesResponse response = client.batchAnnotateImages(requests);
List<AnnotateImageResponse> responses = response.getResponsesList();
client.close();
for (AnnotateImageResponse res : responses) {
if (res.hasError()) {
System.out.format("Error: %s%n", res.getError().getMessage());
return;
}
// For full list of available annotations, see http://g.co/cloud/vision/docs
TextAnnotation annotation = res.getFullTextAnnotation();
for (Page page : annotation.getPagesList()) {
String pageText = "";
for (Block block : page.getBlocksList()) {
String blockText = "";
for (Paragraph para : block.getParagraphsList()) {
String paraText = "";
for (Word word : para.getWordsList()) {
String wordText = "";
for (Symbol symbol : word.getSymbolsList()) {
wordText = wordText + symbol.getText();
System.out.format("Symbol text: %s (confidence: %f)%n", symbol.getText(), symbol.getConfidence());
}
System.out.format("Word text: %s (confidence: %f)%n%n", wordText, word.getConfidence());
paraText = String.format("%s %s", paraText, wordText);
}
// Output Example using Paragraph:
System.out.println("%nParagraph: %n" + paraText);
System.out.format("Paragraph Confidence: %f%n", para.getConfidence());
blockText = blockText + paraText;
}
pageText = pageText + blockText;
}
}
System.out.println("%nComplete annotation:");
System.out.println(annotation.getText());
}
}
}
Aggregations