use of com.google.cloud.videointelligence.v1.PersonDetectionConfig in project java-video-intelligence by googleapis.
the class DetectPerson method detectPerson.
// Detects people in a video stored in a local file using the Cloud Video Intelligence API.
public static void detectPerson(String localFilePath) throws Exception {
try (VideoIntelligenceServiceClient videoIntelligenceServiceClient = VideoIntelligenceServiceClient.create()) {
// Reads a local video file and converts it to base64.
Path path = Paths.get(localFilePath);
byte[] data = Files.readAllBytes(path);
ByteString inputContent = ByteString.copyFrom(data);
PersonDetectionConfig personDetectionConfig = PersonDetectionConfig.newBuilder().setIncludeBoundingBoxes(true).setIncludePoseLandmarks(true).setIncludeAttributes(true).build();
VideoContext videoContext = VideoContext.newBuilder().setPersonDetectionConfig(personDetectionConfig).build();
AnnotateVideoRequest request = AnnotateVideoRequest.newBuilder().setInputContent(inputContent).addFeatures(Feature.PERSON_DETECTION).setVideoContext(videoContext).build();
// Detects people in a video
// We get the first result because only one video is processed.
OperationFuture<AnnotateVideoResponse, AnnotateVideoProgress> future = videoIntelligenceServiceClient.annotateVideoAsync(request);
System.out.println("Waiting for operation to complete...");
AnnotateVideoResponse response = future.get();
// Gets annotations for video
VideoAnnotationResults annotationResult = response.getAnnotationResultsList().get(0);
// Annotations for list of people detected, tracked and recognized in video.
for (PersonDetectionAnnotation personDetectionAnnotation : annotationResult.getPersonDetectionAnnotationsList()) {
System.out.print("Person detected:\n");
for (Track track : personDetectionAnnotation.getTracksList()) {
VideoSegment segment = track.getSegment();
System.out.printf("\tStart: %d.%.0fs\n", segment.getStartTimeOffset().getSeconds(), segment.getStartTimeOffset().getNanos() / 1e6);
System.out.printf("\tEnd: %d.%.0fs\n", segment.getEndTimeOffset().getSeconds(), segment.getEndTimeOffset().getNanos() / 1e6);
// Each segment includes timestamped objects that include characteristic--e.g. clothes,
// posture of the person detected.
TimestampedObject firstTimestampedObject = track.getTimestampedObjects(0);
// of the person detected.
for (DetectedAttribute attribute : firstTimestampedObject.getAttributesList()) {
System.out.printf("\tAttribute: %s; Value: %s\n", attribute.getName(), attribute.getValue());
}
// Landmarks in person detection include body parts.
for (DetectedLandmark attribute : firstTimestampedObject.getLandmarksList()) {
System.out.printf("\tLandmark: %s; Vertex: %f, %f\n", attribute.getName(), attribute.getPoint().getX(), attribute.getPoint().getY());
}
}
}
}
}
use of com.google.cloud.videointelligence.v1.PersonDetectionConfig in project java-video-intelligence by googleapis.
the class DetectPersonGcs method detectPersonGcs.
// Detects people in a video stored in Google Cloud Storage using
// the Cloud Video Intelligence API.
public static void detectPersonGcs(String gcsUri) throws Exception {
try (VideoIntelligenceServiceClient videoIntelligenceServiceClient = VideoIntelligenceServiceClient.create()) {
// Reads a local video file and converts it to base64.
PersonDetectionConfig personDetectionConfig = PersonDetectionConfig.newBuilder().setIncludeBoundingBoxes(true).setIncludePoseLandmarks(true).setIncludeAttributes(true).build();
VideoContext videoContext = VideoContext.newBuilder().setPersonDetectionConfig(personDetectionConfig).build();
AnnotateVideoRequest request = AnnotateVideoRequest.newBuilder().setInputUri(gcsUri).addFeatures(Feature.PERSON_DETECTION).setVideoContext(videoContext).build();
// Detects people in a video
OperationFuture<AnnotateVideoResponse, AnnotateVideoProgress> future = videoIntelligenceServiceClient.annotateVideoAsync(request);
System.out.println("Waiting for operation to complete...");
AnnotateVideoResponse response = future.get();
// Get the first response, since we sent only one video.
VideoAnnotationResults annotationResult = response.getAnnotationResultsList().get(0);
// Annotations for list of people detected, tracked and recognized in video.
for (PersonDetectionAnnotation personDetectionAnnotation : annotationResult.getPersonDetectionAnnotationsList()) {
System.out.print("Person detected:\n");
for (Track track : personDetectionAnnotation.getTracksList()) {
VideoSegment segment = track.getSegment();
System.out.printf("\tStart: %d.%.0fs\n", segment.getStartTimeOffset().getSeconds(), segment.getStartTimeOffset().getNanos() / 1e6);
System.out.printf("\tEnd: %d.%.0fs\n", segment.getEndTimeOffset().getSeconds(), segment.getEndTimeOffset().getNanos() / 1e6);
// Each segment includes timestamped objects that include characteristic--e.g. clothes,
// posture of the person detected.
TimestampedObject firstTimestampedObject = track.getTimestampedObjects(0);
// of the person detected.
for (DetectedAttribute attribute : firstTimestampedObject.getAttributesList()) {
System.out.printf("\tAttribute: %s; Value: %s\n", attribute.getName(), attribute.getValue());
}
// Landmarks in person detection include body parts.
for (DetectedLandmark attribute : firstTimestampedObject.getLandmarksList()) {
System.out.printf("\tLandmark: %s; Vertex: %f, %f\n", attribute.getName(), attribute.getPoint().getX(), attribute.getPoint().getY());
}
}
}
}
}
Aggregations