use of com.google.cloud.videointelligence.v1beta2.VideoAnnotationResults in project java-video-intelligence by googleapis.
the class DetectPersonGcs method detectPersonGcs.
// Detects people in a video stored in Google Cloud Storage using
// the Cloud Video Intelligence API.
public static void detectPersonGcs(String gcsUri) throws Exception {
try (VideoIntelligenceServiceClient videoIntelligenceServiceClient = VideoIntelligenceServiceClient.create()) {
// Reads a local video file and converts it to base64.
PersonDetectionConfig personDetectionConfig = PersonDetectionConfig.newBuilder().setIncludeBoundingBoxes(true).setIncludePoseLandmarks(true).setIncludeAttributes(true).build();
VideoContext videoContext = VideoContext.newBuilder().setPersonDetectionConfig(personDetectionConfig).build();
AnnotateVideoRequest request = AnnotateVideoRequest.newBuilder().setInputUri(gcsUri).addFeatures(Feature.PERSON_DETECTION).setVideoContext(videoContext).build();
// Detects people in a video
OperationFuture<AnnotateVideoResponse, AnnotateVideoProgress> future = videoIntelligenceServiceClient.annotateVideoAsync(request);
System.out.println("Waiting for operation to complete...");
AnnotateVideoResponse response = future.get();
// Get the first response, since we sent only one video.
VideoAnnotationResults annotationResult = response.getAnnotationResultsList().get(0);
// Annotations for list of people detected, tracked and recognized in video.
for (PersonDetectionAnnotation personDetectionAnnotation : annotationResult.getPersonDetectionAnnotationsList()) {
System.out.print("Person detected:\n");
for (Track track : personDetectionAnnotation.getTracksList()) {
VideoSegment segment = track.getSegment();
System.out.printf("\tStart: %d.%.0fs\n", segment.getStartTimeOffset().getSeconds(), segment.getStartTimeOffset().getNanos() / 1e6);
System.out.printf("\tEnd: %d.%.0fs\n", segment.getEndTimeOffset().getSeconds(), segment.getEndTimeOffset().getNanos() / 1e6);
// Each segment includes timestamped objects that include characteristic--e.g. clothes,
// posture of the person detected.
TimestampedObject firstTimestampedObject = track.getTimestampedObjects(0);
// of the person detected.
for (DetectedAttribute attribute : firstTimestampedObject.getAttributesList()) {
System.out.printf("\tAttribute: %s; Value: %s\n", attribute.getName(), attribute.getValue());
}
// Landmarks in person detection include body parts.
for (DetectedLandmark attribute : firstTimestampedObject.getLandmarksList()) {
System.out.printf("\tLandmark: %s; Vertex: %f, %f\n", attribute.getName(), attribute.getPoint().getX(), attribute.getPoint().getY());
}
}
}
}
}
use of com.google.cloud.videointelligence.v1beta2.VideoAnnotationResults in project java-video-intelligence by googleapis.
the class Detect method speechTranscription.
// [START video_speech_transcription_gcs_beta]
/**
* Transcribe speech from a video stored on GCS.
*
* @param gcsUri the path to the video file to analyze.
*/
public static void speechTranscription(String gcsUri) throws Exception {
// Instantiate a com.google.cloud.videointelligence.v1p1beta1.VideoIntelligenceServiceClient
try (VideoIntelligenceServiceClient client = VideoIntelligenceServiceClient.create()) {
// Set the language code
SpeechTranscriptionConfig config = SpeechTranscriptionConfig.newBuilder().setLanguageCode("en-US").setEnableAutomaticPunctuation(true).build();
// Set the video context with the above configuration
VideoContext context = VideoContext.newBuilder().setSpeechTranscriptionConfig(config).build();
// Create the request
AnnotateVideoRequest request = AnnotateVideoRequest.newBuilder().setInputUri(gcsUri).addFeatures(Feature.SPEECH_TRANSCRIPTION).setVideoContext(context).build();
// asynchronously perform speech transcription on videos
OperationFuture<AnnotateVideoResponse, AnnotateVideoProgress> response = client.annotateVideoAsync(request);
System.out.println("Waiting for operation to complete...");
// Display the results
for (VideoAnnotationResults results : response.get(300, TimeUnit.SECONDS).getAnnotationResultsList()) {
for (SpeechTranscription speechTranscription : results.getSpeechTranscriptionsList()) {
try {
// Print the transcription
if (speechTranscription.getAlternativesCount() > 0) {
SpeechRecognitionAlternative alternative = speechTranscription.getAlternatives(0);
System.out.printf("Transcript: %s\n", alternative.getTranscript());
System.out.printf("Confidence: %.2f\n", alternative.getConfidence());
System.out.println("Word level information:");
for (WordInfo wordInfo : alternative.getWordsList()) {
double startTime = wordInfo.getStartTime().getSeconds() + wordInfo.getStartTime().getNanos() / 1e9;
double endTime = wordInfo.getEndTime().getSeconds() + wordInfo.getEndTime().getNanos() / 1e9;
System.out.printf("\t%4.2fs - %4.2fs: %s\n", startTime, endTime, wordInfo.getWord());
}
} else {
System.out.println("No transcription found");
}
} catch (IndexOutOfBoundsException ioe) {
System.out.println("Could not retrieve frame: " + ioe.getMessage());
}
}
}
}
}
use of com.google.cloud.videointelligence.v1beta2.VideoAnnotationResults in project java-video-intelligence by googleapis.
the class DetectIT method testTextDetectionGcs.
@Test
public void testTextDetectionGcs() throws Exception {
VideoAnnotationResults result = TextDetection.detectTextGcs(SPEECH_GCS_LOCATION);
boolean textExists = false;
for (TextAnnotation textAnnotation : result.getTextAnnotationsList()) {
for (String possibleText : POSSIBLE_TEXTS) {
if (textAnnotation.getText().toUpperCase().contains(possibleText.toUpperCase())) {
textExists = true;
break;
}
}
}
assertThat(textExists).isTrue();
}
use of com.google.cloud.videointelligence.v1beta2.VideoAnnotationResults in project java-video-intelligence by googleapis.
the class ITSystemTest method annotateVideoTest.
@Test
public void annotateVideoTest() throws ExecutionException, InterruptedException {
AnnotateVideoResponse response = client.annotateVideoAsync(BUCKET_URI, FEATURE_LIST).get();
List<VideoAnnotationResults> videoAnnotationResults = response.getAnnotationResultsList();
for (VideoAnnotationResults result : videoAnnotationResults) {
System.out.println(result.getSegmentLabelAnnotationsList().size());
assertEquals(BUCKET_URI.substring(4), result.getInputUri());
assertTrue(result.getSegmentLabelAnnotationsList().size() > 0);
}
}
use of com.google.cloud.videointelligence.v1beta2.VideoAnnotationResults in project java-video-intelligence by googleapis.
the class ITSystemTest method annotateVideoTest.
@Test
public void annotateVideoTest() throws ExecutionException, InterruptedException {
AnnotateVideoResponse response = client.annotateVideoAsync(BUCKET_URI, FEATURE_LIST).get();
List<VideoAnnotationResults> videoAnnotationResults = response.getAnnotationResultsList();
for (VideoAnnotationResults result : videoAnnotationResults) {
assertEquals(BUCKET_URI.substring(4), result.getInputUri());
assertTrue(result.getSegmentLabelAnnotationsList().size() > 0);
}
}
Aggregations