use of com.google.cloud.videointelligence.v1p1beta1.VideoIntelligenceServiceClient in project java-docs-samples by GoogleCloudPlatform.
the class Detect method analyzeFacesBoundingBoxes.
// [START video_face_bounding_boxes]
/**
* Detects faces' bounding boxes on the video at the provided Cloud Storage path.
*
* @param gcsUri the path to the video file to analyze.
*/
public static void analyzeFacesBoundingBoxes(String gcsUri) throws Exception {
// Instantiate a com.google.cloud.videointelligence.v1p1beta1.VideoIntelligenceServiceClient
try (VideoIntelligenceServiceClient client = VideoIntelligenceServiceClient.create()) {
// Set the configuration to include bounding boxes
FaceConfig config = FaceConfig.newBuilder().setIncludeBoundingBoxes(true).build();
// Set the video context with the above configuration
VideoContext context = VideoContext.newBuilder().setFaceDetectionConfig(config).build();
// Create the request
AnnotateVideoRequest request = AnnotateVideoRequest.newBuilder().setInputUri(gcsUri).addFeatures(Feature.FACE_DETECTION).setVideoContext(context).build();
// asynchronously perform facial analysis on videos
OperationFuture<AnnotateVideoResponse, AnnotateVideoProgress> response = client.annotateVideoAsync(request);
System.out.println("Waiting for operation to complete...");
boolean faceFound = false;
// Display the results
for (VideoAnnotationResults results : response.get(900, TimeUnit.SECONDS).getAnnotationResultsList()) {
int faceCount = 0;
// Display the results for each face
for (FaceDetectionAnnotation faceAnnotation : results.getFaceDetectionAnnotationsList()) {
faceFound = true;
System.out.println("\nFace: " + ++faceCount);
// Each FaceDetectionAnnotation has only one segment.
for (FaceSegment segment : faceAnnotation.getSegmentsList()) {
double startTime = segment.getSegment().getStartTimeOffset().getSeconds() + segment.getSegment().getStartTimeOffset().getNanos() / 1e9;
double endTime = segment.getSegment().getEndTimeOffset().getSeconds() + segment.getSegment().getEndTimeOffset().getNanos() / 1e9;
System.out.printf("Segment location: %.3fs to %.3f\n", startTime, endTime);
}
// There are typically many frames for each face,
try {
// Here we process only the first frame.
if (faceAnnotation.getFramesCount() > 0) {
// get the first frame
FaceDetectionFrame frame = faceAnnotation.getFrames(0);
double timeOffset = frame.getTimeOffset().getSeconds() + frame.getTimeOffset().getNanos() / 1e9;
System.out.printf("First frame time offset: %.3fs\n", timeOffset);
// print info on the first normalized bounding box
NormalizedBoundingBox box = frame.getAttributes(0).getNormalizedBoundingBox();
System.out.printf("\tLeft: %.3f\n", box.getLeft());
System.out.printf("\tTop: %.3f\n", box.getTop());
System.out.printf("\tBottom: %.3f\n", box.getBottom());
System.out.printf("\tRight: %.3f\n", box.getRight());
} else {
System.out.println("No frames found in annotation");
}
} catch (IndexOutOfBoundsException ioe) {
System.out.println("Could not retrieve frame: " + ioe.getMessage());
}
}
}
if (!faceFound) {
System.out.println("No faces detected in " + gcsUri);
}
}
}
use of com.google.cloud.videointelligence.v1p1beta1.VideoIntelligenceServiceClient in project java-docs-samples by GoogleCloudPlatform.
the class Detect method analyzeLabels.
/**
* Performs label analysis on the video at the provided Cloud Storage path.
*
* @param gcsUri the path to the video file to analyze.
*/
public static void analyzeLabels(String gcsUri) throws Exception {
// Instantiate a com.google.cloud.videointelligence.v1.VideoIntelligenceServiceClient
try (VideoIntelligenceServiceClient client = VideoIntelligenceServiceClient.create()) {
// Provide path to file hosted on GCS as "gs://bucket-name/..."
AnnotateVideoRequest request = AnnotateVideoRequest.newBuilder().setInputUri(gcsUri).addFeatures(Feature.LABEL_DETECTION).build();
// Create an operation that will contain the response when the operation completes.
OperationFuture<AnnotateVideoResponse, AnnotateVideoProgress> response = client.annotateVideoAsync(request);
System.out.println("Waiting for operation to complete...");
for (VideoAnnotationResults results : response.get().getAnnotationResultsList()) {
// process video / segment level label annotations
System.out.println("Locations: ");
for (LabelAnnotation labelAnnotation : results.getSegmentLabelAnnotationsList()) {
System.out.println("Video label: " + labelAnnotation.getEntity().getDescription());
// categories
for (Entity categoryEntity : labelAnnotation.getCategoryEntitiesList()) {
System.out.println("Video label category: " + categoryEntity.getDescription());
}
// segments
for (LabelSegment segment : labelAnnotation.getSegmentsList()) {
double startTime = segment.getSegment().getStartTimeOffset().getSeconds() + segment.getSegment().getStartTimeOffset().getNanos() / 1e9;
double endTime = segment.getSegment().getEndTimeOffset().getSeconds() + segment.getSegment().getEndTimeOffset().getNanos() / 1e9;
System.out.printf("Segment location: %.3f:%.3f\n", startTime, endTime);
System.out.println("Confidence: " + segment.getConfidence());
}
}
// process shot label annotations
for (LabelAnnotation labelAnnotation : results.getShotLabelAnnotationsList()) {
System.out.println("Shot label: " + labelAnnotation.getEntity().getDescription());
// categories
for (Entity categoryEntity : labelAnnotation.getCategoryEntitiesList()) {
System.out.println("Shot label category: " + categoryEntity.getDescription());
}
// segments
for (LabelSegment segment : labelAnnotation.getSegmentsList()) {
double startTime = segment.getSegment().getStartTimeOffset().getSeconds() + segment.getSegment().getStartTimeOffset().getNanos() / 1e9;
double endTime = segment.getSegment().getEndTimeOffset().getSeconds() + segment.getSegment().getEndTimeOffset().getNanos() / 1e9;
System.out.printf("Segment location: %.3f:%.3f\n", startTime, endTime);
System.out.println("Confidence: " + segment.getConfidence());
}
}
// process frame label annotations
for (LabelAnnotation labelAnnotation : results.getFrameLabelAnnotationsList()) {
System.out.println("Frame label: " + labelAnnotation.getEntity().getDescription());
// categories
for (Entity categoryEntity : labelAnnotation.getCategoryEntitiesList()) {
System.out.println("Frame label category: " + categoryEntity.getDescription());
}
// segments
for (LabelSegment segment : labelAnnotation.getSegmentsList()) {
double startTime = segment.getSegment().getStartTimeOffset().getSeconds() + segment.getSegment().getStartTimeOffset().getNanos() / 1e9;
double endTime = segment.getSegment().getEndTimeOffset().getSeconds() + segment.getSegment().getEndTimeOffset().getNanos() / 1e9;
System.out.printf("Segment location: %.3f:%.2f\n", startTime, endTime);
System.out.println("Confidence: " + segment.getConfidence());
}
}
}
}
// [END detect_labels_gcs]
}
use of com.google.cloud.videointelligence.v1p1beta1.VideoIntelligenceServiceClient in project java-docs-samples by GoogleCloudPlatform.
the class QuickstartSample method main.
/**
* Demonstrates using the video intelligence client to detect labels in a video file.
*/
public static void main(String[] args) throws Exception {
// Instantiate a video intelligence client
try (VideoIntelligenceServiceClient client = VideoIntelligenceServiceClient.create()) {
// The Google Cloud Storage path to the video to annotate.
String gcsUri = "gs://demomaker/cat.mp4";
// Create an operation that will contain the response when the operation completes.
AnnotateVideoRequest request = AnnotateVideoRequest.newBuilder().setInputUri(gcsUri).addFeatures(Feature.LABEL_DETECTION).build();
OperationFuture<AnnotateVideoResponse, AnnotateVideoProgress> response = client.annotateVideoAsync(request);
System.out.println("Waiting for operation to complete...");
List<VideoAnnotationResults> results = response.get().getAnnotationResultsList();
if (results.isEmpty()) {
System.out.println("No labels detected in " + gcsUri);
return;
}
for (VideoAnnotationResults result : results) {
System.out.println("Labels:");
// get video segment label annotations
for (LabelAnnotation annotation : result.getSegmentLabelAnnotationsList()) {
System.out.println("Video label description : " + annotation.getEntity().getDescription());
// categories
for (Entity categoryEntity : annotation.getCategoryEntitiesList()) {
System.out.println("Label Category description : " + categoryEntity.getDescription());
}
// segments
for (LabelSegment segment : annotation.getSegmentsList()) {
double startTime = segment.getSegment().getStartTimeOffset().getSeconds() + segment.getSegment().getStartTimeOffset().getNanos() / 1e9;
double endTime = segment.getSegment().getEndTimeOffset().getSeconds() + segment.getSegment().getEndTimeOffset().getNanos() / 1e9;
System.out.printf("Segment location : %.3f:%.3f\n", startTime, endTime);
System.out.println("Confidence : " + segment.getConfidence());
}
}
}
}
}
use of com.google.cloud.videointelligence.v1p1beta1.VideoIntelligenceServiceClient in project java-docs-samples by GoogleCloudPlatform.
the class Detect method analyzeFaceEmotions.
// [END video_face_bounding_boxes]
// [START video_face_emotions]
/**
* Analyze faces' emotions over frames on the video at the provided Cloud Storage path.
*
* @param gcsUri the path to the video file to analyze.
*/
public static void analyzeFaceEmotions(String gcsUri) throws Exception {
// Instantiate a com.google.cloud.videointelligence.v1p1beta1.VideoIntelligenceServiceClient
try (VideoIntelligenceServiceClient client = VideoIntelligenceServiceClient.create()) {
// Set the configuration to include bounding boxes
FaceConfig config = FaceConfig.newBuilder().setIncludeEmotions(true).build();
// Set the video context with the above configuration
VideoContext context = VideoContext.newBuilder().setFaceDetectionConfig(config).build();
// Create the request
AnnotateVideoRequest request = AnnotateVideoRequest.newBuilder().setInputUri(gcsUri).addFeatures(Feature.FACE_DETECTION).setVideoContext(context).build();
// asynchronously perform facial analysis on videos
OperationFuture<AnnotateVideoResponse, AnnotateVideoProgress> response = client.annotateVideoAsync(request);
System.out.println("Waiting for operation to complete...");
boolean faceFound = false;
// Display the results
for (VideoAnnotationResults results : response.get(600, TimeUnit.SECONDS).getAnnotationResultsList()) {
int faceCount = 0;
// Display the results for each face
for (FaceDetectionAnnotation faceAnnotation : results.getFaceDetectionAnnotationsList()) {
faceFound = true;
System.out.println("\nFace: " + ++faceCount);
// Each FaceDetectionAnnotation has only one segment.
for (FaceSegment segment : faceAnnotation.getSegmentsList()) {
double startTime = segment.getSegment().getStartTimeOffset().getSeconds() + segment.getSegment().getStartTimeOffset().getNanos() / 1e9;
double endTime = segment.getSegment().getEndTimeOffset().getSeconds() + segment.getSegment().getEndTimeOffset().getNanos() / 1e9;
System.out.printf("Segment location: %.3fs to %.3f\n", startTime, endTime);
}
try {
// Print each frame's highest emotion
for (FaceDetectionFrame frame : faceAnnotation.getFramesList()) {
double timeOffset = frame.getTimeOffset().getSeconds() + frame.getTimeOffset().getNanos() / 1e9;
float highestScore = 0.0f;
String emotion = "";
// Get the highest scoring emotion for the current frame
for (EmotionAttribute emotionAttribute : frame.getAttributes(0).getEmotionsList()) {
if (emotionAttribute.getScore() > highestScore) {
highestScore = emotionAttribute.getScore();
emotion = emotionAttribute.getEmotion().name();
}
}
System.out.printf("\t%4.2fs: %14s %4.3f\n", timeOffset, emotion, highestScore);
}
} catch (IndexOutOfBoundsException ioe) {
System.out.println("Could not retrieve frame: " + ioe.getMessage());
}
}
}
if (!faceFound) {
System.out.println("No faces detected in " + gcsUri);
}
}
}
use of com.google.cloud.videointelligence.v1p1beta1.VideoIntelligenceServiceClient in project java-docs-samples by GoogleCloudPlatform.
the class Detect method speechTranscription.
// [END video_face_emotions]
// [START video_speech_transcription]
/**
* Transcribe speech from a video stored on GCS.
*
* @param gcsUri the path to the video file to analyze.
*/
public static void speechTranscription(String gcsUri) throws Exception {
// Instantiate a com.google.cloud.videointelligence.v1p1beta1.VideoIntelligenceServiceClient
try (VideoIntelligenceServiceClient client = VideoIntelligenceServiceClient.create()) {
// Set the language code
SpeechTranscriptionConfig config = SpeechTranscriptionConfig.newBuilder().setLanguageCode("en-US").build();
// Set the video context with the above configuration
VideoContext context = VideoContext.newBuilder().setSpeechTranscriptionConfig(config).build();
// Create the request
AnnotateVideoRequest request = AnnotateVideoRequest.newBuilder().setInputUri(gcsUri).addFeatures(Feature.SPEECH_TRANSCRIPTION).setVideoContext(context).build();
// asynchronously perform facial analysis on videos
OperationFuture<AnnotateVideoResponse, AnnotateVideoProgress> response = client.annotateVideoAsync(request);
System.out.println("Waiting for operation to complete...");
// Display the results
for (VideoAnnotationResults results : response.get(180, TimeUnit.SECONDS).getAnnotationResultsList()) {
for (SpeechTranscription speechTranscription : results.getSpeechTranscriptionsList()) {
try {
// Print the transcription
if (speechTranscription.getAlternativesCount() > 0) {
SpeechRecognitionAlternative alternative = speechTranscription.getAlternatives(0);
System.out.printf("Transcript: %s\n", alternative.getTranscript());
System.out.printf("Confidence: %.2f\n", alternative.getConfidence());
System.out.println("Word level information:");
for (WordInfo wordInfo : alternative.getWordsList()) {
double startTime = wordInfo.getStartTime().getSeconds() + wordInfo.getStartTime().getNanos() / 1e9;
double endTime = wordInfo.getEndTime().getSeconds() + wordInfo.getEndTime().getNanos() / 1e9;
System.out.printf("\t%4.2fs - %4.2fs: %s\n", startTime, endTime, wordInfo.getWord());
}
} else {
System.out.println("No transcription found");
}
} catch (IndexOutOfBoundsException ioe) {
System.out.println("Could not retrieve frame: " + ioe.getMessage());
}
}
}
}
}
Aggregations