use of com.google.cloud.videointelligence.v1p3beta1.StreamingVideoAnnotationResults in project java-video-intelligence by googleapis.
the class StreamingObjectTracking method streamingObjectTracking.
// Perform streaming video object tracking
static void streamingObjectTracking(String filePath) throws IOException, TimeoutException, StatusRuntimeException {
try (StreamingVideoIntelligenceServiceClient client = StreamingVideoIntelligenceServiceClient.create()) {
Path path = Paths.get(filePath);
byte[] data = Files.readAllBytes(path);
// Set the chunk size to 5MB (recommended less than 10MB).
int chunkSize = 5 * 1024 * 1024;
int numChunks = (int) Math.ceil((double) data.length / chunkSize);
StreamingLabelDetectionConfig labelConfig = StreamingLabelDetectionConfig.newBuilder().setStationaryCamera(false).build();
StreamingVideoConfig streamingVideoConfig = StreamingVideoConfig.newBuilder().setFeature(StreamingFeature.STREAMING_OBJECT_TRACKING).setLabelDetectionConfig(labelConfig).build();
BidiStream<StreamingAnnotateVideoRequest, StreamingAnnotateVideoResponse> call = client.streamingAnnotateVideoCallable().call();
// The first request must **only** contain the audio configuration:
call.send(StreamingAnnotateVideoRequest.newBuilder().setVideoConfig(streamingVideoConfig).build());
// Send the requests in chunks
for (int i = 0; i < numChunks; i++) {
call.send(StreamingAnnotateVideoRequest.newBuilder().setInputContent(ByteString.copyFrom(Arrays.copyOfRange(data, i * chunkSize, i * chunkSize + chunkSize))).build());
}
// Tell the service you are done sending data
call.closeSend();
for (StreamingAnnotateVideoResponse response : call) {
StreamingVideoAnnotationResults annotationResults = response.getAnnotationResults();
for (ObjectTrackingAnnotation objectAnnotations : annotationResults.getObjectAnnotationsList()) {
String entity = objectAnnotations.getEntity().getDescription();
float confidence = objectAnnotations.getConfidence();
long trackId = objectAnnotations.getTrackId();
System.out.format("%s: %f (ID: %d)\n", entity, confidence, trackId);
// In streaming, there is always one frame.
ObjectTrackingFrame frame = objectAnnotations.getFrames(0);
double offset = frame.getTimeOffset().getSeconds() + frame.getTimeOffset().getNanos() / 1e9;
System.out.format("Offset: %f\n", offset);
System.out.println("Bounding Box:");
System.out.format("\tLeft: %f\n", frame.getNormalizedBoundingBox().getLeft());
System.out.format("\tTop: %f\n", frame.getNormalizedBoundingBox().getTop());
System.out.format("\tRight: %f\n", frame.getNormalizedBoundingBox().getRight());
System.out.format("\tBottom: %f\n", frame.getNormalizedBoundingBox().getBottom());
}
}
}
}
use of com.google.cloud.videointelligence.v1p3beta1.StreamingVideoAnnotationResults in project java-video-intelligence by googleapis.
the class StreamingAutoMlActionRecognition method streamingAutoMlActionRecognition.
// Perform streaming video action recognition
static void streamingAutoMlActionRecognition(String filePath, String projectId, String modelId) throws IOException, TimeoutException, StatusRuntimeException {
try (StreamingVideoIntelligenceServiceClient client = StreamingVideoIntelligenceServiceClient.create()) {
Path path = Paths.get(filePath);
byte[] data = Files.readAllBytes(path);
// Set the chunk size to 5MB (recommended less than 10MB).
int chunkSize = 5 * 1024 * 1024;
int numChunks = (int) Math.ceil((double) data.length / chunkSize);
String modelPath = String.format("projects/%s/locations/us-central1/models/%s", projectId, modelId);
System.out.println(modelPath);
StreamingAutomlActionRecognitionConfig streamingAutomlActionRecognitionConfig = StreamingAutomlActionRecognitionConfig.newBuilder().setModelName(modelPath).build();
StreamingVideoConfig streamingVideoConfig = StreamingVideoConfig.newBuilder().setFeature(StreamingFeature.STREAMING_AUTOML_ACTION_RECOGNITION).setAutomlActionRecognitionConfig(streamingAutomlActionRecognitionConfig).build();
BidiStream<StreamingAnnotateVideoRequest, StreamingAnnotateVideoResponse> call = client.streamingAnnotateVideoCallable().call();
// The first request must **only** contain the video configuration:
call.send(StreamingAnnotateVideoRequest.newBuilder().setVideoConfig(streamingVideoConfig).build());
// Send the requests in chunks
for (int i = 0; i < numChunks; i++) {
call.send(StreamingAnnotateVideoRequest.newBuilder().setInputContent(ByteString.copyFrom(Arrays.copyOfRange(data, i * chunkSize, i * chunkSize + chunkSize))).build());
}
// Tell the service you are done sending data
call.closeSend();
for (StreamingAnnotateVideoResponse response : call) {
if (response.hasError()) {
System.out.println(response.getError().getMessage());
break;
}
StreamingVideoAnnotationResults annotationResults = response.getAnnotationResults();
for (LabelAnnotation annotation : annotationResults.getLabelAnnotationsList()) {
String entity = annotation.getEntity().getDescription();
// There is only one frame per annotation
LabelFrame labelFrame = annotation.getFrames(0);
double offset = labelFrame.getTimeOffset().getSeconds() + labelFrame.getTimeOffset().getNanos() / 1e9;
float confidence = labelFrame.getConfidence();
System.out.format("At %fs segment: %s (%f)\n", offset, entity, confidence);
}
}
System.out.println("Video streamed successfully.");
}
}
Aggregations