use of com.google.cloud.videointelligence.v1p3beta1.AnnotateVideoRequest in project google-cloud-java by GoogleCloudPlatform.
the class VideoIntelligenceServiceClientTest method annotateVideoTest.
@Test
@SuppressWarnings("all")
public void annotateVideoTest() throws Exception {
AnnotateVideoResponse expectedResponse = AnnotateVideoResponse.newBuilder().build();
Operation resultOperation = Operation.newBuilder().setName("annotateVideoTest").setDone(true).setResponse(Any.pack(expectedResponse)).build();
mockVideoIntelligenceService.addResponse(resultOperation);
String inputUri = "inputUri1707300727";
List<Feature> features = new ArrayList<>();
VideoContext videoContext = VideoContext.newBuilder().build();
String outputUri = "outputUri-1273518802";
String locationId = "locationId552319461";
AnnotateVideoResponse actualResponse = client.annotateVideoAsync(inputUri, features, videoContext, outputUri, locationId).get();
Assert.assertEquals(expectedResponse, actualResponse);
List<GeneratedMessageV3> actualRequests = mockVideoIntelligenceService.getRequests();
Assert.assertEquals(1, actualRequests.size());
AnnotateVideoRequest actualRequest = (AnnotateVideoRequest) actualRequests.get(0);
Assert.assertEquals(inputUri, actualRequest.getInputUri());
Assert.assertEquals(features, actualRequest.getFeaturesList());
Assert.assertEquals(videoContext, actualRequest.getVideoContext());
Assert.assertEquals(outputUri, actualRequest.getOutputUri());
Assert.assertEquals(locationId, actualRequest.getLocationId());
}
use of com.google.cloud.videointelligence.v1p3beta1.AnnotateVideoRequest in project java-docs-samples by GoogleCloudPlatform.
the class Detect method analyzeFaceEmotions.
// [END video_face_bounding_boxes]
// [START video_face_emotions]
/**
* Analyze faces' emotions over frames on the video at the provided Cloud Storage path.
*
* @param gcsUri the path to the video file to analyze.
*/
public static void analyzeFaceEmotions(String gcsUri) throws Exception {
// Instantiate a com.google.cloud.videointelligence.v1p1beta1.VideoIntelligenceServiceClient
try (VideoIntelligenceServiceClient client = VideoIntelligenceServiceClient.create()) {
// Set the configuration to include bounding boxes
FaceConfig config = FaceConfig.newBuilder().setIncludeEmotions(true).build();
// Set the video context with the above configuration
VideoContext context = VideoContext.newBuilder().setFaceDetectionConfig(config).build();
// Create the request
AnnotateVideoRequest request = AnnotateVideoRequest.newBuilder().setInputUri(gcsUri).addFeatures(Feature.FACE_DETECTION).setVideoContext(context).build();
// asynchronously perform facial analysis on videos
OperationFuture<AnnotateVideoResponse, AnnotateVideoProgress> response = client.annotateVideoAsync(request);
System.out.println("Waiting for operation to complete...");
boolean faceFound = false;
// Display the results
for (VideoAnnotationResults results : response.get(600, TimeUnit.SECONDS).getAnnotationResultsList()) {
int faceCount = 0;
// Display the results for each face
for (FaceDetectionAnnotation faceAnnotation : results.getFaceDetectionAnnotationsList()) {
faceFound = true;
System.out.println("\nFace: " + ++faceCount);
// Each FaceDetectionAnnotation has only one segment.
for (FaceSegment segment : faceAnnotation.getSegmentsList()) {
double startTime = segment.getSegment().getStartTimeOffset().getSeconds() + segment.getSegment().getStartTimeOffset().getNanos() / 1e9;
double endTime = segment.getSegment().getEndTimeOffset().getSeconds() + segment.getSegment().getEndTimeOffset().getNanos() / 1e9;
System.out.printf("Segment location: %.3fs to %.3f\n", startTime, endTime);
}
try {
// Print each frame's highest emotion
for (FaceDetectionFrame frame : faceAnnotation.getFramesList()) {
double timeOffset = frame.getTimeOffset().getSeconds() + frame.getTimeOffset().getNanos() / 1e9;
float highestScore = 0.0f;
String emotion = "";
// Get the highest scoring emotion for the current frame
for (EmotionAttribute emotionAttribute : frame.getAttributes(0).getEmotionsList()) {
if (emotionAttribute.getScore() > highestScore) {
highestScore = emotionAttribute.getScore();
emotion = emotionAttribute.getEmotion().name();
}
}
System.out.printf("\t%4.2fs: %14s %4.3f\n", timeOffset, emotion, highestScore);
}
} catch (IndexOutOfBoundsException ioe) {
System.out.println("Could not retrieve frame: " + ioe.getMessage());
}
}
}
if (!faceFound) {
System.out.println("No faces detected in " + gcsUri);
}
}
}
use of com.google.cloud.videointelligence.v1p3beta1.AnnotateVideoRequest in project java-docs-samples by GoogleCloudPlatform.
the class Detect method speechTranscription.
// [END video_face_emotions]
// [START video_speech_transcription]
/**
* Transcribe speech from a video stored on GCS.
*
* @param gcsUri the path to the video file to analyze.
*/
public static void speechTranscription(String gcsUri) throws Exception {
// Instantiate a com.google.cloud.videointelligence.v1p1beta1.VideoIntelligenceServiceClient
try (VideoIntelligenceServiceClient client = VideoIntelligenceServiceClient.create()) {
// Set the language code
SpeechTranscriptionConfig config = SpeechTranscriptionConfig.newBuilder().setLanguageCode("en-US").build();
// Set the video context with the above configuration
VideoContext context = VideoContext.newBuilder().setSpeechTranscriptionConfig(config).build();
// Create the request
AnnotateVideoRequest request = AnnotateVideoRequest.newBuilder().setInputUri(gcsUri).addFeatures(Feature.SPEECH_TRANSCRIPTION).setVideoContext(context).build();
// asynchronously perform facial analysis on videos
OperationFuture<AnnotateVideoResponse, AnnotateVideoProgress> response = client.annotateVideoAsync(request);
System.out.println("Waiting for operation to complete...");
// Display the results
for (VideoAnnotationResults results : response.get(180, TimeUnit.SECONDS).getAnnotationResultsList()) {
for (SpeechTranscription speechTranscription : results.getSpeechTranscriptionsList()) {
try {
// Print the transcription
if (speechTranscription.getAlternativesCount() > 0) {
SpeechRecognitionAlternative alternative = speechTranscription.getAlternatives(0);
System.out.printf("Transcript: %s\n", alternative.getTranscript());
System.out.printf("Confidence: %.2f\n", alternative.getConfidence());
System.out.println("Word level information:");
for (WordInfo wordInfo : alternative.getWordsList()) {
double startTime = wordInfo.getStartTime().getSeconds() + wordInfo.getStartTime().getNanos() / 1e9;
double endTime = wordInfo.getEndTime().getSeconds() + wordInfo.getEndTime().getNanos() / 1e9;
System.out.printf("\t%4.2fs - %4.2fs: %s\n", startTime, endTime, wordInfo.getWord());
}
} else {
System.out.println("No transcription found");
}
} catch (IndexOutOfBoundsException ioe) {
System.out.println("Could not retrieve frame: " + ioe.getMessage());
}
}
}
}
}
use of com.google.cloud.videointelligence.v1p3beta1.AnnotateVideoRequest in project java-docs-samples by GoogleCloudPlatform.
the class Detect method analyzeLabelsFile.
/**
* Performs label analysis on the video at the provided file path.
*
* @param filePath the path to the video file to analyze.
*/
public static void analyzeLabelsFile(String filePath) throws Exception {
// Instantiate a com.google.cloud.videointelligence.v1.VideoIntelligenceServiceClient
try (VideoIntelligenceServiceClient client = VideoIntelligenceServiceClient.create()) {
// Read file and encode into Base64
Path path = Paths.get(filePath);
byte[] data = Files.readAllBytes(path);
byte[] encodedBytes = Base64.encodeBase64(data);
AnnotateVideoRequest request = AnnotateVideoRequest.newBuilder().setInputContent(ByteString.copyFrom(encodedBytes)).addFeatures(Feature.LABEL_DETECTION).build();
// Create an operation that will contain the response when the operation completes.
OperationFuture<AnnotateVideoResponse, AnnotateVideoProgress> response = client.annotateVideoAsync(request);
System.out.println("Waiting for operation to complete...");
for (VideoAnnotationResults results : response.get().getAnnotationResultsList()) {
// process video / segment level label annotations
System.out.println("Locations: ");
for (LabelAnnotation labelAnnotation : results.getSegmentLabelAnnotationsList()) {
System.out.println("Video label: " + labelAnnotation.getEntity().getDescription());
// categories
for (Entity categoryEntity : labelAnnotation.getCategoryEntitiesList()) {
System.out.println("Video label category: " + categoryEntity.getDescription());
}
// segments
for (LabelSegment segment : labelAnnotation.getSegmentsList()) {
double startTime = segment.getSegment().getStartTimeOffset().getSeconds() + segment.getSegment().getStartTimeOffset().getNanos() / 1e9;
double endTime = segment.getSegment().getEndTimeOffset().getSeconds() + segment.getSegment().getEndTimeOffset().getNanos() / 1e9;
System.out.printf("Segment location: %.3f:%.2f\n", startTime, endTime);
System.out.println("Confidence: " + segment.getConfidence());
}
}
// process shot label annotations
for (LabelAnnotation labelAnnotation : results.getShotLabelAnnotationsList()) {
System.out.println("Shot label: " + labelAnnotation.getEntity().getDescription());
// categories
for (Entity categoryEntity : labelAnnotation.getCategoryEntitiesList()) {
System.out.println("Shot label category: " + categoryEntity.getDescription());
}
// segments
for (LabelSegment segment : labelAnnotation.getSegmentsList()) {
double startTime = segment.getSegment().getStartTimeOffset().getSeconds() + segment.getSegment().getStartTimeOffset().getNanos() / 1e9;
double endTime = segment.getSegment().getEndTimeOffset().getSeconds() + segment.getSegment().getEndTimeOffset().getNanos() / 1e9;
System.out.printf("Segment location: %.3f:%.2f\n", startTime, endTime);
System.out.println("Confidence: " + segment.getConfidence());
}
}
// process frame label annotations
for (LabelAnnotation labelAnnotation : results.getFrameLabelAnnotationsList()) {
System.out.println("Frame label: " + labelAnnotation.getEntity().getDescription());
// categories
for (Entity categoryEntity : labelAnnotation.getCategoryEntitiesList()) {
System.out.println("Frame label category: " + categoryEntity.getDescription());
}
// segments
for (LabelSegment segment : labelAnnotation.getSegmentsList()) {
double startTime = segment.getSegment().getStartTimeOffset().getSeconds() + segment.getSegment().getStartTimeOffset().getNanos() / 1e9;
double endTime = segment.getSegment().getEndTimeOffset().getSeconds() + segment.getSegment().getEndTimeOffset().getNanos() / 1e9;
System.out.printf("Segment location: %.3f:%.2f\n", startTime, endTime);
System.out.println("Confidence: " + segment.getConfidence());
}
}
}
}
// [END detect_labels_file]
}
use of com.google.cloud.videointelligence.v1p3beta1.AnnotateVideoRequest in project java-docs-samples by GoogleCloudPlatform.
the class Detect method analyzeShots.
/**
* Performs shot analysis on the video at the provided Cloud Storage path.
*
* @param gcsUri the path to the video file to analyze.
*/
public static void analyzeShots(String gcsUri) throws Exception {
// Instantiate a com.google.cloud.videointelligence.v1.VideoIntelligenceServiceClient
try (VideoIntelligenceServiceClient client = VideoIntelligenceServiceClient.create()) {
// Provide path to file hosted on GCS as "gs://bucket-name/..."
AnnotateVideoRequest request = AnnotateVideoRequest.newBuilder().setInputUri(gcsUri).addFeatures(Feature.SHOT_CHANGE_DETECTION).build();
// Create an operation that will contain the response when the operation completes.
OperationFuture<AnnotateVideoResponse, AnnotateVideoProgress> response = client.annotateVideoAsync(request);
System.out.println("Waiting for operation to complete...");
// Print detected shot changes and their location ranges in the analyzed video.
for (VideoAnnotationResults result : response.get().getAnnotationResultsList()) {
if (result.getShotAnnotationsCount() > 0) {
System.out.println("Shots: ");
for (VideoSegment segment : result.getShotAnnotationsList()) {
double startTime = segment.getStartTimeOffset().getSeconds() + segment.getStartTimeOffset().getNanos() / 1e9;
double endTime = segment.getEndTimeOffset().getSeconds() + segment.getEndTimeOffset().getNanos() / 1e9;
System.out.printf("Location: %.3f:%.3f\n", startTime, endTime);
}
} else {
System.out.println("No shot changes detected in " + gcsUri);
}
}
}
// [END detect_shots]
}
Aggregations