Search in sources :

Example 1 with DetectedLandmark

use of com.google.cloud.videointelligence.v1.DetectedLandmark in project java-video-intelligence by googleapis.

the class DetectPerson method detectPerson.

// Detects people in a video stored in a local file using the Cloud Video Intelligence API.
public static void detectPerson(String localFilePath) throws Exception {
    try (VideoIntelligenceServiceClient videoIntelligenceServiceClient = VideoIntelligenceServiceClient.create()) {
        // Reads a local video file and converts it to base64.
        Path path = Paths.get(localFilePath);
        byte[] data = Files.readAllBytes(path);
        ByteString inputContent = ByteString.copyFrom(data);
        PersonDetectionConfig personDetectionConfig = PersonDetectionConfig.newBuilder().setIncludeBoundingBoxes(true).setIncludePoseLandmarks(true).setIncludeAttributes(true).build();
        VideoContext videoContext = VideoContext.newBuilder().setPersonDetectionConfig(personDetectionConfig).build();
        AnnotateVideoRequest request = AnnotateVideoRequest.newBuilder().setInputContent(inputContent).addFeatures(Feature.PERSON_DETECTION).setVideoContext(videoContext).build();
        // Detects people in a video
        // We get the first result because only one video is processed.
        OperationFuture<AnnotateVideoResponse, AnnotateVideoProgress> future = videoIntelligenceServiceClient.annotateVideoAsync(request);
        System.out.println("Waiting for operation to complete...");
        AnnotateVideoResponse response = future.get();
        // Gets annotations for video
        VideoAnnotationResults annotationResult = response.getAnnotationResultsList().get(0);
        // Annotations for list of people detected, tracked and recognized in video.
        for (PersonDetectionAnnotation personDetectionAnnotation : annotationResult.getPersonDetectionAnnotationsList()) {
            System.out.print("Person detected:\n");
            for (Track track : personDetectionAnnotation.getTracksList()) {
                VideoSegment segment = track.getSegment();
                System.out.printf("\tStart: %d.%.0fs\n", segment.getStartTimeOffset().getSeconds(), segment.getStartTimeOffset().getNanos() / 1e6);
                System.out.printf("\tEnd: %d.%.0fs\n", segment.getEndTimeOffset().getSeconds(), segment.getEndTimeOffset().getNanos() / 1e6);
                // Each segment includes timestamped objects that include characteristic--e.g. clothes,
                // posture of the person detected.
                TimestampedObject firstTimestampedObject = track.getTimestampedObjects(0);
                // of the person detected.
                for (DetectedAttribute attribute : firstTimestampedObject.getAttributesList()) {
                    System.out.printf("\tAttribute: %s; Value: %s\n", attribute.getName(), attribute.getValue());
                }
                // Landmarks in person detection include body parts.
                for (DetectedLandmark attribute : firstTimestampedObject.getLandmarksList()) {
                    System.out.printf("\tLandmark: %s; Vertex: %f, %f\n", attribute.getName(), attribute.getPoint().getX(), attribute.getPoint().getY());
                }
            }
        }
    }
}
Also used : Path(java.nio.file.Path) AnnotateVideoRequest(com.google.cloud.videointelligence.v1.AnnotateVideoRequest) ByteString(com.google.protobuf.ByteString) VideoContext(com.google.cloud.videointelligence.v1.VideoContext) PersonDetectionAnnotation(com.google.cloud.videointelligence.v1.PersonDetectionAnnotation) PersonDetectionConfig(com.google.cloud.videointelligence.v1.PersonDetectionConfig) VideoIntelligenceServiceClient(com.google.cloud.videointelligence.v1.VideoIntelligenceServiceClient) AnnotateVideoProgress(com.google.cloud.videointelligence.v1.AnnotateVideoProgress) VideoSegment(com.google.cloud.videointelligence.v1.VideoSegment) DetectedLandmark(com.google.cloud.videointelligence.v1.DetectedLandmark) TimestampedObject(com.google.cloud.videointelligence.v1.TimestampedObject) VideoAnnotationResults(com.google.cloud.videointelligence.v1.VideoAnnotationResults) DetectedAttribute(com.google.cloud.videointelligence.v1.DetectedAttribute) Track(com.google.cloud.videointelligence.v1.Track) AnnotateVideoResponse(com.google.cloud.videointelligence.v1.AnnotateVideoResponse)

Example 2 with DetectedLandmark

use of com.google.cloud.videointelligence.v1.DetectedLandmark in project java-video-intelligence by googleapis.

the class DetectPersonGcs method detectPersonGcs.

// Detects people in a video stored in Google Cloud Storage using
// the Cloud Video Intelligence API.
public static void detectPersonGcs(String gcsUri) throws Exception {
    try (VideoIntelligenceServiceClient videoIntelligenceServiceClient = VideoIntelligenceServiceClient.create()) {
        // Reads a local video file and converts it to base64.
        PersonDetectionConfig personDetectionConfig = PersonDetectionConfig.newBuilder().setIncludeBoundingBoxes(true).setIncludePoseLandmarks(true).setIncludeAttributes(true).build();
        VideoContext videoContext = VideoContext.newBuilder().setPersonDetectionConfig(personDetectionConfig).build();
        AnnotateVideoRequest request = AnnotateVideoRequest.newBuilder().setInputUri(gcsUri).addFeatures(Feature.PERSON_DETECTION).setVideoContext(videoContext).build();
        // Detects people in a video
        OperationFuture<AnnotateVideoResponse, AnnotateVideoProgress> future = videoIntelligenceServiceClient.annotateVideoAsync(request);
        System.out.println("Waiting for operation to complete...");
        AnnotateVideoResponse response = future.get();
        // Get the first response, since we sent only one video.
        VideoAnnotationResults annotationResult = response.getAnnotationResultsList().get(0);
        // Annotations for list of people detected, tracked and recognized in video.
        for (PersonDetectionAnnotation personDetectionAnnotation : annotationResult.getPersonDetectionAnnotationsList()) {
            System.out.print("Person detected:\n");
            for (Track track : personDetectionAnnotation.getTracksList()) {
                VideoSegment segment = track.getSegment();
                System.out.printf("\tStart: %d.%.0fs\n", segment.getStartTimeOffset().getSeconds(), segment.getStartTimeOffset().getNanos() / 1e6);
                System.out.printf("\tEnd: %d.%.0fs\n", segment.getEndTimeOffset().getSeconds(), segment.getEndTimeOffset().getNanos() / 1e6);
                // Each segment includes timestamped objects that include characteristic--e.g. clothes,
                // posture of the person detected.
                TimestampedObject firstTimestampedObject = track.getTimestampedObjects(0);
                // of the person detected.
                for (DetectedAttribute attribute : firstTimestampedObject.getAttributesList()) {
                    System.out.printf("\tAttribute: %s; Value: %s\n", attribute.getName(), attribute.getValue());
                }
                // Landmarks in person detection include body parts.
                for (DetectedLandmark attribute : firstTimestampedObject.getLandmarksList()) {
                    System.out.printf("\tLandmark: %s; Vertex: %f, %f\n", attribute.getName(), attribute.getPoint().getX(), attribute.getPoint().getY());
                }
            }
        }
    }
}
Also used : AnnotateVideoRequest(com.google.cloud.videointelligence.v1.AnnotateVideoRequest) VideoContext(com.google.cloud.videointelligence.v1.VideoContext) PersonDetectionAnnotation(com.google.cloud.videointelligence.v1.PersonDetectionAnnotation) PersonDetectionConfig(com.google.cloud.videointelligence.v1.PersonDetectionConfig) VideoIntelligenceServiceClient(com.google.cloud.videointelligence.v1.VideoIntelligenceServiceClient) AnnotateVideoProgress(com.google.cloud.videointelligence.v1.AnnotateVideoProgress) VideoSegment(com.google.cloud.videointelligence.v1.VideoSegment) DetectedLandmark(com.google.cloud.videointelligence.v1.DetectedLandmark) TimestampedObject(com.google.cloud.videointelligence.v1.TimestampedObject) VideoAnnotationResults(com.google.cloud.videointelligence.v1.VideoAnnotationResults) DetectedAttribute(com.google.cloud.videointelligence.v1.DetectedAttribute) Track(com.google.cloud.videointelligence.v1.Track) AnnotateVideoResponse(com.google.cloud.videointelligence.v1.AnnotateVideoResponse)

Aggregations

AnnotateVideoProgress (com.google.cloud.videointelligence.v1.AnnotateVideoProgress)2 AnnotateVideoRequest (com.google.cloud.videointelligence.v1.AnnotateVideoRequest)2 AnnotateVideoResponse (com.google.cloud.videointelligence.v1.AnnotateVideoResponse)2 DetectedAttribute (com.google.cloud.videointelligence.v1.DetectedAttribute)2 DetectedLandmark (com.google.cloud.videointelligence.v1.DetectedLandmark)2 PersonDetectionAnnotation (com.google.cloud.videointelligence.v1.PersonDetectionAnnotation)2 PersonDetectionConfig (com.google.cloud.videointelligence.v1.PersonDetectionConfig)2 TimestampedObject (com.google.cloud.videointelligence.v1.TimestampedObject)2 Track (com.google.cloud.videointelligence.v1.Track)2 VideoAnnotationResults (com.google.cloud.videointelligence.v1.VideoAnnotationResults)2 VideoContext (com.google.cloud.videointelligence.v1.VideoContext)2 VideoIntelligenceServiceClient (com.google.cloud.videointelligence.v1.VideoIntelligenceServiceClient)2 VideoSegment (com.google.cloud.videointelligence.v1.VideoSegment)2 ByteString (com.google.protobuf.ByteString)1 Path (java.nio.file.Path)1