use of com.google.cloud.videointelligence.v1p3beta1.StreamingAutomlActionRecognitionConfig in project java-video-intelligence by googleapis.
the class StreamingAutoMlActionRecognition method streamingAutoMlActionRecognition.
// Perform streaming video action recognition
static void streamingAutoMlActionRecognition(String filePath, String projectId, String modelId) throws IOException, TimeoutException, StatusRuntimeException {
try (StreamingVideoIntelligenceServiceClient client = StreamingVideoIntelligenceServiceClient.create()) {
Path path = Paths.get(filePath);
byte[] data = Files.readAllBytes(path);
// Set the chunk size to 5MB (recommended less than 10MB).
int chunkSize = 5 * 1024 * 1024;
int numChunks = (int) Math.ceil((double) data.length / chunkSize);
String modelPath = String.format("projects/%s/locations/us-central1/models/%s", projectId, modelId);
System.out.println(modelPath);
StreamingAutomlActionRecognitionConfig streamingAutomlActionRecognitionConfig = StreamingAutomlActionRecognitionConfig.newBuilder().setModelName(modelPath).build();
StreamingVideoConfig streamingVideoConfig = StreamingVideoConfig.newBuilder().setFeature(StreamingFeature.STREAMING_AUTOML_ACTION_RECOGNITION).setAutomlActionRecognitionConfig(streamingAutomlActionRecognitionConfig).build();
BidiStream<StreamingAnnotateVideoRequest, StreamingAnnotateVideoResponse> call = client.streamingAnnotateVideoCallable().call();
// The first request must **only** contain the video configuration:
call.send(StreamingAnnotateVideoRequest.newBuilder().setVideoConfig(streamingVideoConfig).build());
// Send the requests in chunks
for (int i = 0; i < numChunks; i++) {
call.send(StreamingAnnotateVideoRequest.newBuilder().setInputContent(ByteString.copyFrom(Arrays.copyOfRange(data, i * chunkSize, i * chunkSize + chunkSize))).build());
}
// Tell the service you are done sending data
call.closeSend();
for (StreamingAnnotateVideoResponse response : call) {
if (response.hasError()) {
System.out.println(response.getError().getMessage());
break;
}
StreamingVideoAnnotationResults annotationResults = response.getAnnotationResults();
for (LabelAnnotation annotation : annotationResults.getLabelAnnotationsList()) {
String entity = annotation.getEntity().getDescription();
// There is only one frame per annotation
LabelFrame labelFrame = annotation.getFrames(0);
double offset = labelFrame.getTimeOffset().getSeconds() + labelFrame.getTimeOffset().getNanos() / 1e9;
float confidence = labelFrame.getConfidence();
System.out.format("At %fs segment: %s (%f)\n", offset, entity, confidence);
}
}
System.out.println("Video streamed successfully.");
}
}
Aggregations