use of com.google.cloud.videointelligence.v1beta1.VideoContext in project google-cloud-java by GoogleCloudPlatform.
the class VideoIntelligenceServiceClientTest method annotateVideoExceptionTest.
@Test
@SuppressWarnings("all")
public void annotateVideoExceptionTest() throws Exception {
StatusRuntimeException exception = new StatusRuntimeException(Status.INVALID_ARGUMENT);
mockVideoIntelligenceService.addException(exception);
try {
String inputUri = "inputUri1707300727";
List<Feature> features = new ArrayList<>();
VideoContext videoContext = VideoContext.newBuilder().build();
String outputUri = "outputUri-1273518802";
String locationId = "locationId552319461";
client.annotateVideoAsync(inputUri, features, videoContext, outputUri, locationId).get();
Assert.fail("No exception raised");
} catch (ExecutionException e) {
Assert.assertEquals(ApiException.class, e.getCause().getClass());
ApiException apiException = (ApiException) e.getCause();
Assert.assertEquals(Status.INVALID_ARGUMENT.getCode(), apiException.getStatusCode());
}
}
use of com.google.cloud.videointelligence.v1beta1.VideoContext in project java-docs-samples by GoogleCloudPlatform.
the class Detect method analyzeFacesBoundingBoxes.
// [START video_face_bounding_boxes]
/**
* Detects faces' bounding boxes on the video at the provided Cloud Storage path.
*
* @param gcsUri the path to the video file to analyze.
*/
public static void analyzeFacesBoundingBoxes(String gcsUri) throws Exception {
// Instantiate a com.google.cloud.videointelligence.v1p1beta1.VideoIntelligenceServiceClient
try (VideoIntelligenceServiceClient client = VideoIntelligenceServiceClient.create()) {
// Set the configuration to include bounding boxes
FaceConfig config = FaceConfig.newBuilder().setIncludeBoundingBoxes(true).build();
// Set the video context with the above configuration
VideoContext context = VideoContext.newBuilder().setFaceDetectionConfig(config).build();
// Create the request
AnnotateVideoRequest request = AnnotateVideoRequest.newBuilder().setInputUri(gcsUri).addFeatures(Feature.FACE_DETECTION).setVideoContext(context).build();
// asynchronously perform facial analysis on videos
OperationFuture<AnnotateVideoResponse, AnnotateVideoProgress> response = client.annotateVideoAsync(request);
System.out.println("Waiting for operation to complete...");
boolean faceFound = false;
// Display the results
for (VideoAnnotationResults results : response.get(900, TimeUnit.SECONDS).getAnnotationResultsList()) {
int faceCount = 0;
// Display the results for each face
for (FaceDetectionAnnotation faceAnnotation : results.getFaceDetectionAnnotationsList()) {
faceFound = true;
System.out.println("\nFace: " + ++faceCount);
// Each FaceDetectionAnnotation has only one segment.
for (FaceSegment segment : faceAnnotation.getSegmentsList()) {
double startTime = segment.getSegment().getStartTimeOffset().getSeconds() + segment.getSegment().getStartTimeOffset().getNanos() / 1e9;
double endTime = segment.getSegment().getEndTimeOffset().getSeconds() + segment.getSegment().getEndTimeOffset().getNanos() / 1e9;
System.out.printf("Segment location: %.3fs to %.3f\n", startTime, endTime);
}
// There are typically many frames for each face,
try {
// Here we process only the first frame.
if (faceAnnotation.getFramesCount() > 0) {
// get the first frame
FaceDetectionFrame frame = faceAnnotation.getFrames(0);
double timeOffset = frame.getTimeOffset().getSeconds() + frame.getTimeOffset().getNanos() / 1e9;
System.out.printf("First frame time offset: %.3fs\n", timeOffset);
// print info on the first normalized bounding box
NormalizedBoundingBox box = frame.getAttributes(0).getNormalizedBoundingBox();
System.out.printf("\tLeft: %.3f\n", box.getLeft());
System.out.printf("\tTop: %.3f\n", box.getTop());
System.out.printf("\tBottom: %.3f\n", box.getBottom());
System.out.printf("\tRight: %.3f\n", box.getRight());
} else {
System.out.println("No frames found in annotation");
}
} catch (IndexOutOfBoundsException ioe) {
System.out.println("Could not retrieve frame: " + ioe.getMessage());
}
}
}
if (!faceFound) {
System.out.println("No faces detected in " + gcsUri);
}
}
}
use of com.google.cloud.videointelligence.v1beta1.VideoContext in project beam by apache.
the class VideoIntelligenceIT method annotateVideoFromURIWithContext.
@Test
public void annotateVideoFromURIWithContext() {
VideoContext context = VideoContext.newBuilder().setLabelDetectionConfig(LabelDetectionConfig.newBuilder().setModel("builtin/latest")).build();
PCollection<List<VideoAnnotationResults>> annotationResults = testPipeline.apply(Create.of(KV.of(VIDEO_URI, context))).apply("Annotate video", VideoIntelligence.annotateFromUriWithContext(featureList));
PAssert.that(annotationResults).satisfies(new VerifyVideoAnnotationResult());
testPipeline.run().waitUntilFinish();
}
use of com.google.cloud.videointelligence.v1beta1.VideoContext in project beam by apache.
the class AnnotateVideoBytesWithContextFn method processElement.
/**
* ProcessElement implementation.
*/
@Override
public void processElement(ProcessContext context) throws ExecutionException, InterruptedException {
ByteString element = context.element().getKey();
VideoContext videoContext = context.element().getValue();
List<VideoAnnotationResults> videoAnnotationResults = getVideoAnnotationResults(null, element, videoContext);
context.output(videoAnnotationResults);
}
use of com.google.cloud.videointelligence.v1beta1.VideoContext in project beam by apache.
the class AnnotateVideoFromBytesFn method processElement.
/**
* Implementation of ProcessElement.
*/
@Override
public void processElement(ProcessContext context) throws ExecutionException, InterruptedException {
ByteString element = context.element();
VideoContext videoContext = null;
if (contextSideInput != null) {
videoContext = context.sideInput(contextSideInput).get(element);
}
List<VideoAnnotationResults> videoAnnotationResults = getVideoAnnotationResults(null, element, videoContext);
context.output(videoAnnotationResults);
}
Aggregations