use of com.google.cloud.videointelligence.v1.VideoContext in project beam by apache.
the class VideoIntelligenceIT method annotateVideoFromURIWithContext.
@Test
public void annotateVideoFromURIWithContext() {
VideoContext context = VideoContext.newBuilder().setLabelDetectionConfig(LabelDetectionConfig.newBuilder().setModel("builtin/latest")).build();
PCollection<List<VideoAnnotationResults>> annotationResults = testPipeline.apply(Create.of(KV.of(VIDEO_URI, context))).apply("Annotate video", VideoIntelligence.annotateFromUriWithContext(featureList));
PAssert.that(annotationResults).satisfies(new VerifyVideoAnnotationResult());
testPipeline.run().waitUntilFinish();
}
use of com.google.cloud.videointelligence.v1.VideoContext in project google-cloud-java by GoogleCloudPlatform.
the class VideoIntelligenceServiceClientTest method annotateVideoTest.
@Test
@SuppressWarnings("all")
public void annotateVideoTest() throws Exception {
AnnotateVideoResponse expectedResponse = AnnotateVideoResponse.newBuilder().build();
Operation resultOperation = Operation.newBuilder().setName("annotateVideoTest").setDone(true).setResponse(Any.pack(expectedResponse)).build();
mockVideoIntelligenceService.addResponse(resultOperation);
String inputUri = "inputUri1707300727";
List<Feature> features = new ArrayList<>();
VideoContext videoContext = VideoContext.newBuilder().build();
String outputUri = "outputUri-1273518802";
String locationId = "locationId552319461";
AnnotateVideoResponse actualResponse = client.annotateVideoAsync(inputUri, features, videoContext, outputUri, locationId).get();
Assert.assertEquals(expectedResponse, actualResponse);
List<GeneratedMessageV3> actualRequests = mockVideoIntelligenceService.getRequests();
Assert.assertEquals(1, actualRequests.size());
AnnotateVideoRequest actualRequest = (AnnotateVideoRequest) actualRequests.get(0);
Assert.assertEquals(inputUri, actualRequest.getInputUri());
Assert.assertEquals(features, actualRequest.getFeaturesList());
Assert.assertEquals(videoContext, actualRequest.getVideoContext());
Assert.assertEquals(outputUri, actualRequest.getOutputUri());
Assert.assertEquals(locationId, actualRequest.getLocationId());
}
use of com.google.cloud.videointelligence.v1.VideoContext in project java-docs-samples by GoogleCloudPlatform.
the class Detect method analyzeFaceEmotions.
// [END video_face_bounding_boxes]
// [START video_face_emotions]
/**
* Analyze faces' emotions over frames on the video at the provided Cloud Storage path.
*
* @param gcsUri the path to the video file to analyze.
*/
public static void analyzeFaceEmotions(String gcsUri) throws Exception {
// Instantiate a com.google.cloud.videointelligence.v1p1beta1.VideoIntelligenceServiceClient
try (VideoIntelligenceServiceClient client = VideoIntelligenceServiceClient.create()) {
// Set the configuration to include bounding boxes
FaceConfig config = FaceConfig.newBuilder().setIncludeEmotions(true).build();
// Set the video context with the above configuration
VideoContext context = VideoContext.newBuilder().setFaceDetectionConfig(config).build();
// Create the request
AnnotateVideoRequest request = AnnotateVideoRequest.newBuilder().setInputUri(gcsUri).addFeatures(Feature.FACE_DETECTION).setVideoContext(context).build();
// asynchronously perform facial analysis on videos
OperationFuture<AnnotateVideoResponse, AnnotateVideoProgress> response = client.annotateVideoAsync(request);
System.out.println("Waiting for operation to complete...");
boolean faceFound = false;
// Display the results
for (VideoAnnotationResults results : response.get(600, TimeUnit.SECONDS).getAnnotationResultsList()) {
int faceCount = 0;
// Display the results for each face
for (FaceDetectionAnnotation faceAnnotation : results.getFaceDetectionAnnotationsList()) {
faceFound = true;
System.out.println("\nFace: " + ++faceCount);
// Each FaceDetectionAnnotation has only one segment.
for (FaceSegment segment : faceAnnotation.getSegmentsList()) {
double startTime = segment.getSegment().getStartTimeOffset().getSeconds() + segment.getSegment().getStartTimeOffset().getNanos() / 1e9;
double endTime = segment.getSegment().getEndTimeOffset().getSeconds() + segment.getSegment().getEndTimeOffset().getNanos() / 1e9;
System.out.printf("Segment location: %.3fs to %.3f\n", startTime, endTime);
}
try {
// Print each frame's highest emotion
for (FaceDetectionFrame frame : faceAnnotation.getFramesList()) {
double timeOffset = frame.getTimeOffset().getSeconds() + frame.getTimeOffset().getNanos() / 1e9;
float highestScore = 0.0f;
String emotion = "";
// Get the highest scoring emotion for the current frame
for (EmotionAttribute emotionAttribute : frame.getAttributes(0).getEmotionsList()) {
if (emotionAttribute.getScore() > highestScore) {
highestScore = emotionAttribute.getScore();
emotion = emotionAttribute.getEmotion().name();
}
}
System.out.printf("\t%4.2fs: %14s %4.3f\n", timeOffset, emotion, highestScore);
}
} catch (IndexOutOfBoundsException ioe) {
System.out.println("Could not retrieve frame: " + ioe.getMessage());
}
}
}
if (!faceFound) {
System.out.println("No faces detected in " + gcsUri);
}
}
}
use of com.google.cloud.videointelligence.v1.VideoContext in project java-docs-samples by GoogleCloudPlatform.
the class Detect method speechTranscription.
// [END video_face_emotions]
// [START video_speech_transcription]
/**
* Transcribe speech from a video stored on GCS.
*
* @param gcsUri the path to the video file to analyze.
*/
public static void speechTranscription(String gcsUri) throws Exception {
// Instantiate a com.google.cloud.videointelligence.v1p1beta1.VideoIntelligenceServiceClient
try (VideoIntelligenceServiceClient client = VideoIntelligenceServiceClient.create()) {
// Set the language code
SpeechTranscriptionConfig config = SpeechTranscriptionConfig.newBuilder().setLanguageCode("en-US").build();
// Set the video context with the above configuration
VideoContext context = VideoContext.newBuilder().setSpeechTranscriptionConfig(config).build();
// Create the request
AnnotateVideoRequest request = AnnotateVideoRequest.newBuilder().setInputUri(gcsUri).addFeatures(Feature.SPEECH_TRANSCRIPTION).setVideoContext(context).build();
// asynchronously perform facial analysis on videos
OperationFuture<AnnotateVideoResponse, AnnotateVideoProgress> response = client.annotateVideoAsync(request);
System.out.println("Waiting for operation to complete...");
// Display the results
for (VideoAnnotationResults results : response.get(180, TimeUnit.SECONDS).getAnnotationResultsList()) {
for (SpeechTranscription speechTranscription : results.getSpeechTranscriptionsList()) {
try {
// Print the transcription
if (speechTranscription.getAlternativesCount() > 0) {
SpeechRecognitionAlternative alternative = speechTranscription.getAlternatives(0);
System.out.printf("Transcript: %s\n", alternative.getTranscript());
System.out.printf("Confidence: %.2f\n", alternative.getConfidence());
System.out.println("Word level information:");
for (WordInfo wordInfo : alternative.getWordsList()) {
double startTime = wordInfo.getStartTime().getSeconds() + wordInfo.getStartTime().getNanos() / 1e9;
double endTime = wordInfo.getEndTime().getSeconds() + wordInfo.getEndTime().getNanos() / 1e9;
System.out.printf("\t%4.2fs - %4.2fs: %s\n", startTime, endTime, wordInfo.getWord());
}
} else {
System.out.println("No transcription found");
}
} catch (IndexOutOfBoundsException ioe) {
System.out.println("Could not retrieve frame: " + ioe.getMessage());
}
}
}
}
}
use of com.google.cloud.videointelligence.v1.VideoContext in project beam by apache.
the class AnnotateVideoFn method getVideoAnnotationResults.
/**
* Call the Video Intelligence Cloud AI service and return annotation results.
*
* @param elementURI This or elementContents is required. GCS address of video to be annotated
* @param elementContents this or elementURI is required. Hex-encoded contents of video to be
* annotated
* @param videoContext Optional context for video annotation.
* @return
*/
List<VideoAnnotationResults> getVideoAnnotationResults(String elementURI, ByteString elementContents, VideoContext videoContext) throws InterruptedException, ExecutionException {
AnnotateVideoRequest.Builder requestBuilder = AnnotateVideoRequest.newBuilder().addAllFeatures(featureList);
if (elementURI != null) {
requestBuilder.setInputUri(elementURI);
} else if (elementContents != null) {
requestBuilder.setInputContent(elementContents);
} else {
throw new IllegalArgumentException("Either elementURI or elementContents should be non-null");
}
if (videoContext != null) {
requestBuilder.setVideoContext(videoContext);
}
AnnotateVideoRequest annotateVideoRequest = requestBuilder.build();
OperationFuture<AnnotateVideoResponse, AnnotateVideoProgress> annotateVideoAsync = videoIntelligenceServiceClient.annotateVideoAsync(annotateVideoRequest);
return annotateVideoAsync.get().getAnnotationResultsList();
}
Aggregations