use of com.google.cloud.videointelligence.v1p2beta1.VideoAnnotationResults in project java-video-intelligence by googleapis.
the class TrackObjects method trackObjects.
// [START video_object_tracking]
/**
* Track objects in a video.
*
* @param filePath the path to the video file to analyze.
*/
public static VideoAnnotationResults trackObjects(String filePath) throws Exception {
try (VideoIntelligenceServiceClient client = VideoIntelligenceServiceClient.create()) {
// Read file
Path path = Paths.get(filePath);
byte[] data = Files.readAllBytes(path);
// Create the request
AnnotateVideoRequest request = AnnotateVideoRequest.newBuilder().setInputContent(ByteString.copyFrom(data)).addFeatures(Feature.OBJECT_TRACKING).setLocationId("us-east1").build();
// asynchronously perform object tracking on videos
OperationFuture<AnnotateVideoResponse, AnnotateVideoProgress> future = client.annotateVideoAsync(request);
System.out.println("Waiting for operation to complete...");
// The first result is retrieved because a single video was processed.
AnnotateVideoResponse response = future.get(450, TimeUnit.SECONDS);
VideoAnnotationResults results = response.getAnnotationResults(0);
// Get only the first annotation for demo purposes.
ObjectTrackingAnnotation annotation = results.getObjectAnnotations(0);
System.out.println("Confidence: " + annotation.getConfidence());
if (annotation.hasEntity()) {
Entity entity = annotation.getEntity();
System.out.println("Entity description: " + entity.getDescription());
System.out.println("Entity id:: " + entity.getEntityId());
}
if (annotation.hasSegment()) {
VideoSegment videoSegment = annotation.getSegment();
Duration startTimeOffset = videoSegment.getStartTimeOffset();
Duration endTimeOffset = videoSegment.getEndTimeOffset();
// Display the segment time in seconds, 1e9 converts nanos to seconds
System.out.println(String.format("Segment: %.2fs to %.2fs", startTimeOffset.getSeconds() + startTimeOffset.getNanos() / 1e9, endTimeOffset.getSeconds() + endTimeOffset.getNanos() / 1e9));
}
// Here we print only the bounding box of the first frame in this segment.
ObjectTrackingFrame frame = annotation.getFrames(0);
// Display the offset time in seconds, 1e9 converts nanos to seconds
Duration timeOffset = frame.getTimeOffset();
System.out.println(String.format("Time offset of the first frame: %.2fs", timeOffset.getSeconds() + timeOffset.getNanos() / 1e9));
// Display the bounding box of the detected object
NormalizedBoundingBox normalizedBoundingBox = frame.getNormalizedBoundingBox();
System.out.println("Bounding box position:");
System.out.println("\tleft: " + normalizedBoundingBox.getLeft());
System.out.println("\ttop: " + normalizedBoundingBox.getTop());
System.out.println("\tright: " + normalizedBoundingBox.getRight());
System.out.println("\tbottom: " + normalizedBoundingBox.getBottom());
return results;
}
}
use of com.google.cloud.videointelligence.v1p2beta1.VideoAnnotationResults in project java-video-intelligence by googleapis.
the class Detect method analyzeLabelsFile.
/**
* Performs label analysis on the video at the provided file path.
*
* @param filePath the path to the video file to analyze.
*/
public static void analyzeLabelsFile(String filePath) throws Exception {
// Instantiate a com.google.cloud.videointelligence.v1.VideoIntelligenceServiceClient
try (VideoIntelligenceServiceClient client = VideoIntelligenceServiceClient.create()) {
// Read file and encode into Base64
Path path = Paths.get(filePath);
byte[] data = Files.readAllBytes(path);
AnnotateVideoRequest request = AnnotateVideoRequest.newBuilder().setInputContent(ByteString.copyFrom(data)).addFeatures(Feature.LABEL_DETECTION).build();
// Create an operation that will contain the response when the operation completes.
OperationFuture<AnnotateVideoResponse, AnnotateVideoProgress> response = client.annotateVideoAsync(request);
System.out.println("Waiting for operation to complete...");
for (VideoAnnotationResults results : response.get().getAnnotationResultsList()) {
// process video / segment level label annotations
System.out.println("Locations: ");
for (LabelAnnotation labelAnnotation : results.getSegmentLabelAnnotationsList()) {
System.out.println("Video label: " + labelAnnotation.getEntity().getDescription());
// categories
for (Entity categoryEntity : labelAnnotation.getCategoryEntitiesList()) {
System.out.println("Video label category: " + categoryEntity.getDescription());
}
// segments
for (LabelSegment segment : labelAnnotation.getSegmentsList()) {
double startTime = segment.getSegment().getStartTimeOffset().getSeconds() + segment.getSegment().getStartTimeOffset().getNanos() / 1e9;
double endTime = segment.getSegment().getEndTimeOffset().getSeconds() + segment.getSegment().getEndTimeOffset().getNanos() / 1e9;
System.out.printf("Segment location: %.3f:%.2f\n", startTime, endTime);
System.out.println("Confidence: " + segment.getConfidence());
}
}
// process shot label annotations
for (LabelAnnotation labelAnnotation : results.getShotLabelAnnotationsList()) {
System.out.println("Shot label: " + labelAnnotation.getEntity().getDescription());
// categories
for (Entity categoryEntity : labelAnnotation.getCategoryEntitiesList()) {
System.out.println("Shot label category: " + categoryEntity.getDescription());
}
// segments
for (LabelSegment segment : labelAnnotation.getSegmentsList()) {
double startTime = segment.getSegment().getStartTimeOffset().getSeconds() + segment.getSegment().getStartTimeOffset().getNanos() / 1e9;
double endTime = segment.getSegment().getEndTimeOffset().getSeconds() + segment.getSegment().getEndTimeOffset().getNanos() / 1e9;
System.out.printf("Segment location: %.3f:%.2f\n", startTime, endTime);
System.out.println("Confidence: " + segment.getConfidence());
}
}
// process frame label annotations
for (LabelAnnotation labelAnnotation : results.getFrameLabelAnnotationsList()) {
System.out.println("Frame label: " + labelAnnotation.getEntity().getDescription());
// categories
for (Entity categoryEntity : labelAnnotation.getCategoryEntitiesList()) {
System.out.println("Frame label category: " + categoryEntity.getDescription());
}
// segments
for (LabelSegment segment : labelAnnotation.getSegmentsList()) {
double startTime = segment.getSegment().getStartTimeOffset().getSeconds() + segment.getSegment().getStartTimeOffset().getNanos() / 1e9;
double endTime = segment.getSegment().getEndTimeOffset().getSeconds() + segment.getSegment().getEndTimeOffset().getNanos() / 1e9;
System.out.printf("Segment location: %.3f:%.2f\n", startTime, endTime);
System.out.println("Confidence: " + segment.getConfidence());
}
}
}
}
// [END video_analyze_labels]
}
use of com.google.cloud.videointelligence.v1p2beta1.VideoAnnotationResults in project java-video-intelligence by googleapis.
the class Detect method analyzeShots.
/**
* Performs shot analysis on the video at the provided Cloud Storage path.
*
* @param gcsUri the path to the video file to analyze.
*/
public static void analyzeShots(String gcsUri) throws Exception {
// Instantiate a com.google.cloud.videointelligence.v1.VideoIntelligenceServiceClient
try (VideoIntelligenceServiceClient client = VideoIntelligenceServiceClient.create()) {
// Provide path to file hosted on GCS as "gs://bucket-name/..."
AnnotateVideoRequest request = AnnotateVideoRequest.newBuilder().setInputUri(gcsUri).addFeatures(Feature.SHOT_CHANGE_DETECTION).build();
// Create an operation that will contain the response when the operation completes.
OperationFuture<AnnotateVideoResponse, AnnotateVideoProgress> response = client.annotateVideoAsync(request);
System.out.println("Waiting for operation to complete...");
// Print detected shot changes and their location ranges in the analyzed video.
for (VideoAnnotationResults result : response.get().getAnnotationResultsList()) {
if (result.getShotAnnotationsCount() > 0) {
System.out.println("Shots: ");
for (VideoSegment segment : result.getShotAnnotationsList()) {
double startTime = segment.getStartTimeOffset().getSeconds() + segment.getStartTimeOffset().getNanos() / 1e9;
double endTime = segment.getEndTimeOffset().getSeconds() + segment.getEndTimeOffset().getNanos() / 1e9;
System.out.printf("Location: %.3f:%.3f\n", startTime, endTime);
}
} else {
System.out.println("No shot changes detected in " + gcsUri);
}
}
}
// [END video_analyze_shots]
}
use of com.google.cloud.videointelligence.v1p2beta1.VideoAnnotationResults in project java-video-intelligence by googleapis.
the class Detect method speechTranscription.
/**
* Transcribe speech from a video stored on GCS.
*
* @param gcsUri the path to the video file to analyze.
*/
public static void speechTranscription(String gcsUri) throws Exception {
// Instantiate a com.google.cloud.videointelligence.v1.VideoIntelligenceServiceClient
try (VideoIntelligenceServiceClient client = VideoIntelligenceServiceClient.create()) {
// Set the language code
SpeechTranscriptionConfig config = SpeechTranscriptionConfig.newBuilder().setLanguageCode("en-US").setEnableAutomaticPunctuation(true).build();
// Set the video context with the above configuration
VideoContext context = VideoContext.newBuilder().setSpeechTranscriptionConfig(config).build();
// Create the request
AnnotateVideoRequest request = AnnotateVideoRequest.newBuilder().setInputUri(gcsUri).addFeatures(Feature.SPEECH_TRANSCRIPTION).setVideoContext(context).build();
// asynchronously perform speech transcription on videos
OperationFuture<AnnotateVideoResponse, AnnotateVideoProgress> response = client.annotateVideoAsync(request);
System.out.println("Waiting for operation to complete...");
// Display the results
for (VideoAnnotationResults results : response.get(600, TimeUnit.SECONDS).getAnnotationResultsList()) {
for (SpeechTranscription speechTranscription : results.getSpeechTranscriptionsList()) {
try {
// Print the transcription
if (speechTranscription.getAlternativesCount() > 0) {
SpeechRecognitionAlternative alternative = speechTranscription.getAlternatives(0);
System.out.printf("Transcript: %s\n", alternative.getTranscript());
System.out.printf("Confidence: %.2f\n", alternative.getConfidence());
System.out.println("Word level information:");
for (WordInfo wordInfo : alternative.getWordsList()) {
double startTime = wordInfo.getStartTime().getSeconds() + wordInfo.getStartTime().getNanos() / 1e9;
double endTime = wordInfo.getEndTime().getSeconds() + wordInfo.getEndTime().getNanos() / 1e9;
System.out.printf("\t%4.2fs - %4.2fs: %s\n", startTime, endTime, wordInfo.getWord());
}
} else {
System.out.println("No transcription found");
}
} catch (IndexOutOfBoundsException ioe) {
System.out.println("Could not retrieve frame: " + ioe.getMessage());
}
}
}
}
// [END video_speech_transcription_gcs]
}
use of com.google.cloud.videointelligence.v1p2beta1.VideoAnnotationResults in project java-video-intelligence by googleapis.
the class DetectFaces method detectFaces.
// Detects faces in a video stored in a local file using the Cloud Video Intelligence API.
public static void detectFaces(String localFilePath) throws Exception {
try (VideoIntelligenceServiceClient videoIntelligenceServiceClient = VideoIntelligenceServiceClient.create()) {
// Reads a local video file and converts it to base64.
Path path = Paths.get(localFilePath);
byte[] data = Files.readAllBytes(path);
ByteString inputContent = ByteString.copyFrom(data);
FaceDetectionConfig faceDetectionConfig = FaceDetectionConfig.newBuilder().setIncludeBoundingBoxes(true).setIncludeAttributes(true).build();
VideoContext videoContext = VideoContext.newBuilder().setFaceDetectionConfig(faceDetectionConfig).build();
AnnotateVideoRequest request = AnnotateVideoRequest.newBuilder().setInputContent(inputContent).addFeatures(Feature.FACE_DETECTION).setVideoContext(videoContext).build();
// Detects faces in a video
OperationFuture<AnnotateVideoResponse, AnnotateVideoProgress> future = videoIntelligenceServiceClient.annotateVideoAsync(request);
System.out.println("Waiting for operation to complete...");
AnnotateVideoResponse response = future.get();
// Gets annotations for video
VideoAnnotationResults annotationResult = response.getAnnotationResultsList().get(0);
// Annotations for list of faces detected, tracked and recognized in video.
for (FaceDetectionAnnotation faceDetectionAnnotation : annotationResult.getFaceDetectionAnnotationsList()) {
System.out.print("Face detected:\n");
for (Track track : faceDetectionAnnotation.getTracksList()) {
VideoSegment segment = track.getSegment();
System.out.printf("\tStart: %d.%.0fs\n", segment.getStartTimeOffset().getSeconds(), segment.getStartTimeOffset().getNanos() / 1e6);
System.out.printf("\tEnd: %d.%.0fs\n", segment.getEndTimeOffset().getSeconds(), segment.getEndTimeOffset().getNanos() / 1e6);
// Each segment includes timestamped objects that
// include characteristics of the face detected.
TimestampedObject firstTimestampedObject = track.getTimestampedObjects(0);
for (DetectedAttribute attribute : firstTimestampedObject.getAttributesList()) {
// Attributes include glasses, headwear, smiling, direction of gaze
System.out.printf("\tAttribute %s: %s %s\n", attribute.getName(), attribute.getValue(), attribute.getConfidence());
}
}
}
}
}
Aggregations