use of com.google.cloud.videointelligence.v1p2beta1.VideoSegment in project java-video-intelligence by googleapis.
the class StreamingShotChangeDetection method streamingShotChangeDetection.
// Perform streaming video detection for shot changes
static void streamingShotChangeDetection(String filePath) throws IOException, TimeoutException, StatusRuntimeException {
try (StreamingVideoIntelligenceServiceClient client = StreamingVideoIntelligenceServiceClient.create()) {
Path path = Paths.get(filePath);
byte[] data = Files.readAllBytes(path);
// Set the chunk size to 5MB (recommended less than 10MB).
int chunkSize = 5 * 1024 * 1024;
int numChunks = (int) Math.ceil((double) data.length / chunkSize);
StreamingLabelDetectionConfig labelConfig = StreamingLabelDetectionConfig.newBuilder().setStationaryCamera(false).build();
StreamingVideoConfig streamingVideoConfig = StreamingVideoConfig.newBuilder().setFeature(StreamingFeature.STREAMING_SHOT_CHANGE_DETECTION).setLabelDetectionConfig(labelConfig).build();
BidiStream<StreamingAnnotateVideoRequest, StreamingAnnotateVideoResponse> call = client.streamingAnnotateVideoCallable().call();
// The first request must **only** contain the audio configuration:
call.send(StreamingAnnotateVideoRequest.newBuilder().setVideoConfig(streamingVideoConfig).build());
// Send the requests in chunks
for (int i = 0; i < numChunks; i++) {
call.send(StreamingAnnotateVideoRequest.newBuilder().setInputContent(ByteString.copyFrom(Arrays.copyOfRange(data, i * chunkSize, i * chunkSize + chunkSize))).build());
}
// Tell the service you are done sending data
call.closeSend();
for (StreamingAnnotateVideoResponse response : call) {
StreamingVideoAnnotationResults annotationResults = response.getAnnotationResults();
if (response.hasError()) {
System.out.println(response.getError().getMessage());
System.out.format("Error was occured with the following status: %s\n", response.getError());
}
for (VideoSegment segment : annotationResults.getShotAnnotationsList()) {
double startTimeOffset = segment.getStartTimeOffset().getSeconds() + segment.getStartTimeOffset().getNanos() / 1e9;
double endTimeOffset = segment.getEndTimeOffset().getSeconds() + segment.getEndTimeOffset().getNanos() / 1e9;
System.out.format("Shot: %fs to %fs\n", startTimeOffset, endTimeOffset);
}
}
}
}
use of com.google.cloud.videointelligence.v1p2beta1.VideoSegment in project java-video-intelligence by googleapis.
the class TextDetection method detectText.
// [START video_detect_text_beta]
/**
* Detect text in a video.
*
* @param filePath the path to the video file to analyze.
*/
public static VideoAnnotationResults detectText(String filePath) throws IOException, StatusRuntimeException, TimeoutException, ExecutionException, InterruptedException {
try (VideoIntelligenceServiceClient client = VideoIntelligenceServiceClient.create()) {
// Read file
Path path = Paths.get(filePath);
byte[] data = Files.readAllBytes(path);
// Create the request
AnnotateVideoRequest request = AnnotateVideoRequest.newBuilder().setInputContent(ByteString.copyFrom(data)).addFeatures(Feature.TEXT_DETECTION).build();
// asynchronously perform object tracking on videos
OperationFuture<AnnotateVideoResponse, AnnotateVideoProgress> future = client.annotateVideoAsync(request);
System.out.println("Waiting for operation to complete...");
// The first result is retrieved because a single video was processed.
AnnotateVideoResponse response = future.get(600, TimeUnit.SECONDS);
VideoAnnotationResults results = response.getAnnotationResults(0);
// Get only the first annotation for demo purposes.
TextAnnotation annotation = results.getTextAnnotations(0);
System.out.println("Text: " + annotation.getText());
// Get the first text segment.
TextSegment textSegment = annotation.getSegments(0);
System.out.println("Confidence: " + textSegment.getConfidence());
// For the text segment display it's time offset
VideoSegment videoSegment = textSegment.getSegment();
Duration startTimeOffset = videoSegment.getStartTimeOffset();
Duration endTimeOffset = videoSegment.getEndTimeOffset();
// Display the offset times in seconds, 1e9 is part of the formula to convert nanos to seconds
System.out.println(String.format("Start time: %.2f", startTimeOffset.getSeconds() + startTimeOffset.getNanos() / 1e9));
System.out.println(String.format("End time: %.2f", endTimeOffset.getSeconds() + endTimeOffset.getNanos() / 1e9));
// Show the first result for the first frame in the segment.
TextFrame textFrame = textSegment.getFrames(0);
Duration timeOffset = textFrame.getTimeOffset();
System.out.println(String.format("Time offset for the first frame: %.2f", timeOffset.getSeconds() + timeOffset.getNanos() / 1e9));
// Display the rotated bounding box for where the text is on the frame.
System.out.println("Rotated Bounding Box Vertices:");
List<NormalizedVertex> vertices = textFrame.getRotatedBoundingBox().getVerticesList();
for (NormalizedVertex normalizedVertex : vertices) {
System.out.println(String.format("\tVertex.x: %.2f, Vertex.y: %.2f", normalizedVertex.getX(), normalizedVertex.getY()));
}
return results;
}
}
use of com.google.cloud.videointelligence.v1p2beta1.VideoSegment in project java-video-intelligence by googleapis.
the class TrackObjects method trackObjects.
// [START video_object_tracking_beta]
/**
* Track objects in a video.
*
* @param filePath the path to the video file to analyze.
*/
public static VideoAnnotationResults trackObjects(String filePath) throws Exception {
try (VideoIntelligenceServiceClient client = VideoIntelligenceServiceClient.create()) {
// Read file
Path path = Paths.get(filePath);
byte[] data = Files.readAllBytes(path);
// Create the request
AnnotateVideoRequest request = AnnotateVideoRequest.newBuilder().setInputContent(ByteString.copyFrom(data)).addFeatures(Feature.OBJECT_TRACKING).setLocationId("us-east1").build();
// asynchronously perform object tracking on videos
OperationFuture<AnnotateVideoResponse, AnnotateVideoProgress> future = client.annotateVideoAsync(request);
System.out.println("Waiting for operation to complete...");
// The first result is retrieved because a single video was processed.
AnnotateVideoResponse response = future.get(600, TimeUnit.SECONDS);
VideoAnnotationResults results = response.getAnnotationResults(0);
// Get only the first annotation for demo purposes.
ObjectTrackingAnnotation annotation = results.getObjectAnnotations(0);
System.out.println("Confidence: " + annotation.getConfidence());
if (annotation.hasEntity()) {
Entity entity = annotation.getEntity();
System.out.println("Entity description: " + entity.getDescription());
System.out.println("Entity id:: " + entity.getEntityId());
}
if (annotation.hasSegment()) {
VideoSegment videoSegment = annotation.getSegment();
Duration startTimeOffset = videoSegment.getStartTimeOffset();
Duration endTimeOffset = videoSegment.getEndTimeOffset();
// Display the segment time in seconds, 1e9 converts nanos to seconds
System.out.println(String.format("Segment: %.2fs to %.2fs", startTimeOffset.getSeconds() + startTimeOffset.getNanos() / 1e9, endTimeOffset.getSeconds() + endTimeOffset.getNanos() / 1e9));
}
// Here we print only the bounding box of the first frame in this segment.
ObjectTrackingFrame frame = annotation.getFrames(0);
// Display the offset time in seconds, 1e9 converts nanos to seconds
Duration timeOffset = frame.getTimeOffset();
System.out.println(String.format("Time offset of the first frame: %.2fs", timeOffset.getSeconds() + timeOffset.getNanos() / 1e9));
// Display the bounding box of the detected object
NormalizedBoundingBox normalizedBoundingBox = frame.getNormalizedBoundingBox();
System.out.println("Bounding box position:");
System.out.println("\tleft: " + normalizedBoundingBox.getLeft());
System.out.println("\ttop: " + normalizedBoundingBox.getTop());
System.out.println("\tright: " + normalizedBoundingBox.getRight());
System.out.println("\tbottom: " + normalizedBoundingBox.getBottom());
return results;
}
}
use of com.google.cloud.videointelligence.v1p2beta1.VideoSegment in project java-video-intelligence by googleapis.
the class Detect method analyzeShots.
/**
* Performs shot analysis on the video at the provided Cloud Storage path.
*
* @param gcsUri the path to the video file to analyze.
*/
public static void analyzeShots(String gcsUri) throws Exception {
// Instantiate a com.google.cloud.videointelligence.v1.VideoIntelligenceServiceClient
try (VideoIntelligenceServiceClient client = VideoIntelligenceServiceClient.create()) {
// Provide path to file hosted on GCS as "gs://bucket-name/..."
AnnotateVideoRequest request = AnnotateVideoRequest.newBuilder().setInputUri(gcsUri).addFeatures(Feature.SHOT_CHANGE_DETECTION).build();
// Create an operation that will contain the response when the operation completes.
OperationFuture<AnnotateVideoResponse, AnnotateVideoProgress> response = client.annotateVideoAsync(request);
System.out.println("Waiting for operation to complete...");
// Print detected shot changes and their location ranges in the analyzed video.
for (VideoAnnotationResults result : response.get().getAnnotationResultsList()) {
if (result.getShotAnnotationsCount() > 0) {
System.out.println("Shots: ");
for (VideoSegment segment : result.getShotAnnotationsList()) {
double startTime = segment.getStartTimeOffset().getSeconds() + segment.getStartTimeOffset().getNanos() / 1e9;
double endTime = segment.getEndTimeOffset().getSeconds() + segment.getEndTimeOffset().getNanos() / 1e9;
System.out.printf("Location: %.3f:%.3f\n", startTime, endTime);
}
} else {
System.out.println("No shot changes detected in " + gcsUri);
}
}
}
// [END video_analyze_shots]
}
use of com.google.cloud.videointelligence.v1p2beta1.VideoSegment in project java-video-intelligence by googleapis.
the class LogoDetectionGcs method detectLogoGcs.
public static void detectLogoGcs(String inputUri) throws IOException, ExecutionException, InterruptedException, TimeoutException {
// the "close" method on the client to safely clean up any remaining background resources.
try (VideoIntelligenceServiceClient client = VideoIntelligenceServiceClient.create()) {
// Create the request
AnnotateVideoRequest request = AnnotateVideoRequest.newBuilder().setInputUri(inputUri).addFeatures(Feature.LOGO_RECOGNITION).build();
// asynchronously perform object tracking on videos
OperationFuture<AnnotateVideoResponse, AnnotateVideoProgress> future = client.annotateVideoAsync(request);
System.out.println("Waiting for operation to complete...");
// The first result is retrieved because a single video was processed.
AnnotateVideoResponse response = future.get(300, TimeUnit.SECONDS);
VideoAnnotationResults annotationResult = response.getAnnotationResults(0);
// Annotations for list of logos detected, tracked and recognized in video.
for (LogoRecognitionAnnotation logoRecognitionAnnotation : annotationResult.getLogoRecognitionAnnotationsList()) {
Entity entity = logoRecognitionAnnotation.getEntity();
// Opaque entity ID. Some IDs may be available in
// [Google Knowledge Graph Search API](https://developers.google.com/knowledge-graph/).
System.out.printf("Entity Id : %s\n", entity.getEntityId());
System.out.printf("Description : %s\n", entity.getDescription());
// instance appearing in consecutive frames.
for (Track track : logoRecognitionAnnotation.getTracksList()) {
// Video segment of a track.
Duration startTimeOffset = track.getSegment().getStartTimeOffset();
System.out.printf("\n\tStart Time Offset: %s.%s\n", startTimeOffset.getSeconds(), startTimeOffset.getNanos());
Duration endTimeOffset = track.getSegment().getEndTimeOffset();
System.out.printf("\tEnd Time Offset: %s.%s\n", endTimeOffset.getSeconds(), endTimeOffset.getNanos());
System.out.printf("\tConfidence: %s\n", track.getConfidence());
// The object with timestamp and attributes per frame in the track.
for (TimestampedObject timestampedObject : track.getTimestampedObjectsList()) {
// Normalized Bounding box in a frame, where the object is located.
NormalizedBoundingBox normalizedBoundingBox = timestampedObject.getNormalizedBoundingBox();
System.out.printf("\n\t\tLeft: %s\n", normalizedBoundingBox.getLeft());
System.out.printf("\t\tTop: %s\n", normalizedBoundingBox.getTop());
System.out.printf("\t\tRight: %s\n", normalizedBoundingBox.getRight());
System.out.printf("\t\tBottom: %s\n", normalizedBoundingBox.getBottom());
// Optional. The attributes of the object in the bounding box.
for (DetectedAttribute attribute : timestampedObject.getAttributesList()) {
System.out.printf("\n\t\t\tName: %s\n", attribute.getName());
System.out.printf("\t\t\tConfidence: %s\n", attribute.getConfidence());
System.out.printf("\t\t\tValue: %s\n", attribute.getValue());
}
}
// Optional. Attributes in the track level.
for (DetectedAttribute trackAttribute : track.getAttributesList()) {
System.out.printf("\n\t\tName : %s\n", trackAttribute.getName());
System.out.printf("\t\tConfidence : %s\n", trackAttribute.getConfidence());
System.out.printf("\t\tValue : %s\n", trackAttribute.getValue());
}
}
// of the same logo class appearing in one VideoSegment.
for (VideoSegment segment : logoRecognitionAnnotation.getSegmentsList()) {
System.out.printf("\n\tStart Time Offset : %s.%s\n", segment.getStartTimeOffset().getSeconds(), segment.getStartTimeOffset().getNanos());
System.out.printf("\tEnd Time Offset : %s.%s\n", segment.getEndTimeOffset().getSeconds(), segment.getEndTimeOffset().getNanos());
}
}
}
}
Aggregations