use of com.google.cloud.videointelligence.v1p2beta1.ObjectTrackingAnnotation in project java-video-intelligence by googleapis.
the class TrackObjects method trackObjects.
// [START video_object_tracking_beta]
/**
* Track objects in a video.
*
* @param filePath the path to the video file to analyze.
*/
public static VideoAnnotationResults trackObjects(String filePath) throws Exception {
try (VideoIntelligenceServiceClient client = VideoIntelligenceServiceClient.create()) {
// Read file
Path path = Paths.get(filePath);
byte[] data = Files.readAllBytes(path);
// Create the request
AnnotateVideoRequest request = AnnotateVideoRequest.newBuilder().setInputContent(ByteString.copyFrom(data)).addFeatures(Feature.OBJECT_TRACKING).setLocationId("us-east1").build();
// asynchronously perform object tracking on videos
OperationFuture<AnnotateVideoResponse, AnnotateVideoProgress> future = client.annotateVideoAsync(request);
System.out.println("Waiting for operation to complete...");
// The first result is retrieved because a single video was processed.
AnnotateVideoResponse response = future.get(600, TimeUnit.SECONDS);
VideoAnnotationResults results = response.getAnnotationResults(0);
// Get only the first annotation for demo purposes.
ObjectTrackingAnnotation annotation = results.getObjectAnnotations(0);
System.out.println("Confidence: " + annotation.getConfidence());
if (annotation.hasEntity()) {
Entity entity = annotation.getEntity();
System.out.println("Entity description: " + entity.getDescription());
System.out.println("Entity id:: " + entity.getEntityId());
}
if (annotation.hasSegment()) {
VideoSegment videoSegment = annotation.getSegment();
Duration startTimeOffset = videoSegment.getStartTimeOffset();
Duration endTimeOffset = videoSegment.getEndTimeOffset();
// Display the segment time in seconds, 1e9 converts nanos to seconds
System.out.println(String.format("Segment: %.2fs to %.2fs", startTimeOffset.getSeconds() + startTimeOffset.getNanos() / 1e9, endTimeOffset.getSeconds() + endTimeOffset.getNanos() / 1e9));
}
// Here we print only the bounding box of the first frame in this segment.
ObjectTrackingFrame frame = annotation.getFrames(0);
// Display the offset time in seconds, 1e9 converts nanos to seconds
Duration timeOffset = frame.getTimeOffset();
System.out.println(String.format("Time offset of the first frame: %.2fs", timeOffset.getSeconds() + timeOffset.getNanos() / 1e9));
// Display the bounding box of the detected object
NormalizedBoundingBox normalizedBoundingBox = frame.getNormalizedBoundingBox();
System.out.println("Bounding box position:");
System.out.println("\tleft: " + normalizedBoundingBox.getLeft());
System.out.println("\ttop: " + normalizedBoundingBox.getTop());
System.out.println("\tright: " + normalizedBoundingBox.getRight());
System.out.println("\tbottom: " + normalizedBoundingBox.getBottom());
return results;
}
}
use of com.google.cloud.videointelligence.v1p2beta1.ObjectTrackingAnnotation in project java-video-intelligence by googleapis.
the class TrackObjects method trackObjects.
// [START video_object_tracking]
/**
* Track objects in a video.
*
* @param filePath the path to the video file to analyze.
*/
public static VideoAnnotationResults trackObjects(String filePath) throws Exception {
try (VideoIntelligenceServiceClient client = VideoIntelligenceServiceClient.create()) {
// Read file
Path path = Paths.get(filePath);
byte[] data = Files.readAllBytes(path);
// Create the request
AnnotateVideoRequest request = AnnotateVideoRequest.newBuilder().setInputContent(ByteString.copyFrom(data)).addFeatures(Feature.OBJECT_TRACKING).setLocationId("us-east1").build();
// asynchronously perform object tracking on videos
OperationFuture<AnnotateVideoResponse, AnnotateVideoProgress> future = client.annotateVideoAsync(request);
System.out.println("Waiting for operation to complete...");
// The first result is retrieved because a single video was processed.
AnnotateVideoResponse response = future.get(450, TimeUnit.SECONDS);
VideoAnnotationResults results = response.getAnnotationResults(0);
// Get only the first annotation for demo purposes.
ObjectTrackingAnnotation annotation = results.getObjectAnnotations(0);
System.out.println("Confidence: " + annotation.getConfidence());
if (annotation.hasEntity()) {
Entity entity = annotation.getEntity();
System.out.println("Entity description: " + entity.getDescription());
System.out.println("Entity id:: " + entity.getEntityId());
}
if (annotation.hasSegment()) {
VideoSegment videoSegment = annotation.getSegment();
Duration startTimeOffset = videoSegment.getStartTimeOffset();
Duration endTimeOffset = videoSegment.getEndTimeOffset();
// Display the segment time in seconds, 1e9 converts nanos to seconds
System.out.println(String.format("Segment: %.2fs to %.2fs", startTimeOffset.getSeconds() + startTimeOffset.getNanos() / 1e9, endTimeOffset.getSeconds() + endTimeOffset.getNanos() / 1e9));
}
// Here we print only the bounding box of the first frame in this segment.
ObjectTrackingFrame frame = annotation.getFrames(0);
// Display the offset time in seconds, 1e9 converts nanos to seconds
Duration timeOffset = frame.getTimeOffset();
System.out.println(String.format("Time offset of the first frame: %.2fs", timeOffset.getSeconds() + timeOffset.getNanos() / 1e9));
// Display the bounding box of the detected object
NormalizedBoundingBox normalizedBoundingBox = frame.getNormalizedBoundingBox();
System.out.println("Bounding box position:");
System.out.println("\tleft: " + normalizedBoundingBox.getLeft());
System.out.println("\ttop: " + normalizedBoundingBox.getTop());
System.out.println("\tright: " + normalizedBoundingBox.getRight());
System.out.println("\tbottom: " + normalizedBoundingBox.getBottom());
return results;
}
}
use of com.google.cloud.videointelligence.v1p2beta1.ObjectTrackingAnnotation in project java-video-intelligence by googleapis.
the class TrackObjects method trackObjectsGcs.
// [END video_object_tracking]
// [START video_object_tracking_gcs]
/**
* Track objects in a video.
*
* @param gcsUri the path to the video file to analyze.
*/
public static VideoAnnotationResults trackObjectsGcs(String gcsUri) throws Exception {
try (VideoIntelligenceServiceClient client = VideoIntelligenceServiceClient.create()) {
// Create the request
AnnotateVideoRequest request = AnnotateVideoRequest.newBuilder().setInputUri(gcsUri).addFeatures(Feature.OBJECT_TRACKING).setLocationId("us-east1").build();
// asynchronously perform object tracking on videos
OperationFuture<AnnotateVideoResponse, AnnotateVideoProgress> future = client.annotateVideoAsync(request);
System.out.println("Waiting for operation to complete...");
// The first result is retrieved because a single video was processed.
AnnotateVideoResponse response = future.get(450, TimeUnit.SECONDS);
VideoAnnotationResults results = response.getAnnotationResults(0);
// Get only the first annotation for demo purposes.
ObjectTrackingAnnotation annotation = results.getObjectAnnotations(0);
System.out.println("Confidence: " + annotation.getConfidence());
if (annotation.hasEntity()) {
Entity entity = annotation.getEntity();
System.out.println("Entity description: " + entity.getDescription());
System.out.println("Entity id:: " + entity.getEntityId());
}
if (annotation.hasSegment()) {
VideoSegment videoSegment = annotation.getSegment();
Duration startTimeOffset = videoSegment.getStartTimeOffset();
Duration endTimeOffset = videoSegment.getEndTimeOffset();
// Display the segment time in seconds, 1e9 converts nanos to seconds
System.out.println(String.format("Segment: %.2fs to %.2fs", startTimeOffset.getSeconds() + startTimeOffset.getNanos() / 1e9, endTimeOffset.getSeconds() + endTimeOffset.getNanos() / 1e9));
}
// Here we print only the bounding box of the first frame in this segment.
ObjectTrackingFrame frame = annotation.getFrames(0);
// Display the offset time in seconds, 1e9 converts nanos to seconds
Duration timeOffset = frame.getTimeOffset();
System.out.println(String.format("Time offset of the first frame: %.2fs", timeOffset.getSeconds() + timeOffset.getNanos() / 1e9));
// Display the bounding box of the detected object
NormalizedBoundingBox normalizedBoundingBox = frame.getNormalizedBoundingBox();
System.out.println("Bounding box position:");
System.out.println("\tleft: " + normalizedBoundingBox.getLeft());
System.out.println("\ttop: " + normalizedBoundingBox.getTop());
System.out.println("\tright: " + normalizedBoundingBox.getRight());
System.out.println("\tbottom: " + normalizedBoundingBox.getBottom());
return results;
}
}
use of com.google.cloud.videointelligence.v1p2beta1.ObjectTrackingAnnotation in project java-video-intelligence by googleapis.
the class TrackObjects method trackObjectsGcs.
// [END video_object_tracking]
// [START video_object_tracking_gcs]
/**
* Track objects in a video.
*
* @param gcsUri the path to the video file to analyze.
*/
public static VideoAnnotationResults trackObjectsGcs(String gcsUri) throws Exception {
try (VideoIntelligenceServiceClient client = VideoIntelligenceServiceClient.create()) {
// Create the request
AnnotateVideoRequest request = AnnotateVideoRequest.newBuilder().setInputUri(gcsUri).addFeatures(Feature.OBJECT_TRACKING).setLocationId("us-east1").build();
// asynchronously perform object tracking on videos
OperationFuture<AnnotateVideoResponse, AnnotateVideoProgress> future = client.annotateVideoAsync(request);
System.out.println("Waiting for operation to complete...");
// The first result is retrieved because a single video was processed.
AnnotateVideoResponse response = future.get(300, TimeUnit.SECONDS);
VideoAnnotationResults results = response.getAnnotationResults(0);
// Get only the first annotation for demo purposes.
ObjectTrackingAnnotation annotation = results.getObjectAnnotations(0);
System.out.println("Confidence: " + annotation.getConfidence());
if (annotation.hasEntity()) {
Entity entity = annotation.getEntity();
System.out.println("Entity description: " + entity.getDescription());
System.out.println("Entity id:: " + entity.getEntityId());
}
if (annotation.hasSegment()) {
VideoSegment videoSegment = annotation.getSegment();
Duration startTimeOffset = videoSegment.getStartTimeOffset();
Duration endTimeOffset = videoSegment.getEndTimeOffset();
// Display the segment time in seconds, 1e9 converts nanos to seconds
System.out.println(String.format("Segment: %.2fs to %.2fs", startTimeOffset.getSeconds() + startTimeOffset.getNanos() / 1e9, endTimeOffset.getSeconds() + endTimeOffset.getNanos() / 1e9));
}
// Here we print only the bounding box of the first frame in this segment.
ObjectTrackingFrame frame = annotation.getFrames(0);
// Display the offset time in seconds, 1e9 converts nanos to seconds
Duration timeOffset = frame.getTimeOffset();
System.out.println(String.format("Time offset of the first frame: %.2fs", timeOffset.getSeconds() + timeOffset.getNanos() / 1e9));
// Display the bounding box of the detected object
NormalizedBoundingBox normalizedBoundingBox = frame.getNormalizedBoundingBox();
System.out.println("Bounding box position:");
System.out.println("\tleft: " + normalizedBoundingBox.getLeft());
System.out.println("\ttop: " + normalizedBoundingBox.getTop());
System.out.println("\tright: " + normalizedBoundingBox.getRight());
System.out.println("\tbottom: " + normalizedBoundingBox.getBottom());
return results;
}
}
use of com.google.cloud.videointelligence.v1p2beta1.ObjectTrackingAnnotation in project java-video-intelligence by googleapis.
the class TrackObjects method trackObjects.
// [START video_object_tracking]
/**
* Track objects in a video.
*
* @param filePath the path to the video file to analyze.
*/
public static VideoAnnotationResults trackObjects(String filePath) throws Exception {
try (VideoIntelligenceServiceClient client = VideoIntelligenceServiceClient.create()) {
// Read file
Path path = Paths.get(filePath);
byte[] data = Files.readAllBytes(path);
// Create the request
AnnotateVideoRequest request = AnnotateVideoRequest.newBuilder().setInputContent(ByteString.copyFrom(data)).addFeatures(Feature.OBJECT_TRACKING).setLocationId("us-east1").build();
// asynchronously perform object tracking on videos
OperationFuture<AnnotateVideoResponse, AnnotateVideoProgress> future = client.annotateVideoAsync(request);
System.out.println("Waiting for operation to complete...");
// The first result is retrieved because a single video was processed.
AnnotateVideoResponse response = future.get(450, TimeUnit.SECONDS);
VideoAnnotationResults results = response.getAnnotationResults(0);
// Get only the first annotation for demo purposes.
ObjectTrackingAnnotation annotation = results.getObjectAnnotations(0);
System.out.println("Confidence: " + annotation.getConfidence());
if (annotation.hasEntity()) {
Entity entity = annotation.getEntity();
System.out.println("Entity description: " + entity.getDescription());
System.out.println("Entity id:: " + entity.getEntityId());
}
if (annotation.hasSegment()) {
VideoSegment videoSegment = annotation.getSegment();
Duration startTimeOffset = videoSegment.getStartTimeOffset();
Duration endTimeOffset = videoSegment.getEndTimeOffset();
// Display the segment time in seconds, 1e9 converts nanos to seconds
System.out.println(String.format("Segment: %.2fs to %.2fs", startTimeOffset.getSeconds() + startTimeOffset.getNanos() / 1e9, endTimeOffset.getSeconds() + endTimeOffset.getNanos() / 1e9));
}
// Here we print only the bounding box of the first frame in this segment.
ObjectTrackingFrame frame = annotation.getFrames(0);
// Display the offset time in seconds, 1e9 converts nanos to seconds
Duration timeOffset = frame.getTimeOffset();
System.out.println(String.format("Time offset of the first frame: %.2fs", timeOffset.getSeconds() + timeOffset.getNanos() / 1e9));
// Display the bounding box of the detected object
NormalizedBoundingBox normalizedBoundingBox = frame.getNormalizedBoundingBox();
System.out.println("Bounding box position:");
System.out.println("\tleft: " + normalizedBoundingBox.getLeft());
System.out.println("\ttop: " + normalizedBoundingBox.getTop());
System.out.println("\tright: " + normalizedBoundingBox.getRight());
System.out.println("\tbottom: " + normalizedBoundingBox.getBottom());
return results;
}
}
Aggregations