use of com.google.api.services.vision.v1.model.FaceAnnotation in project java-docs-samples by GoogleCloudPlatform.
the class FaceDetectApp method main.
// [START main]
/**
* Annotates an image using the Vision API.
*/
public static void main(String[] args) throws IOException, GeneralSecurityException {
if (args.length != 2) {
System.err.println("Usage:");
System.err.printf("\tjava %s inputImagePath outputImagePath\n", FaceDetectApp.class.getCanonicalName());
System.exit(1);
}
Path inputPath = Paths.get(args[0]);
Path outputPath = Paths.get(args[1]);
if (!outputPath.toString().toLowerCase().endsWith(".jpg")) {
System.err.println("outputImagePath must have the file extension 'jpg'.");
System.exit(1);
}
FaceDetectApp app = new FaceDetectApp(getVisionService());
List<FaceAnnotation> faces = app.detectFaces(inputPath, MAX_RESULTS);
System.out.printf("Found %d face%s\n", faces.size(), faces.size() == 1 ? "" : "s");
System.out.printf("Writing to file %s\n", outputPath);
app.writeWithFaces(inputPath, outputPath, faces);
}
use of com.google.api.services.vision.v1.model.FaceAnnotation in project java-docs-samples by GoogleCloudPlatform.
the class FaceDetectApp method annotateWithFace.
/**
* Annotates an image {@code img} with a polygon defined by {@code face}.
*/
private static void annotateWithFace(BufferedImage img, FaceAnnotation face) {
Graphics2D gfx = img.createGraphics();
Polygon poly = new Polygon();
for (Vertex vertex : face.getFdBoundingPoly().getVertices()) {
poly.addPoint(vertex.getX(), vertex.getY());
}
gfx.setStroke(new BasicStroke(5));
gfx.setColor(new Color(0x00ff00));
gfx.draw(poly);
}
use of com.google.api.services.vision.v1.model.FaceAnnotation in project java-docs-samples by GoogleCloudPlatform.
the class FaceDetectApp method detectFaces.
// [START detect_face]
/**
* Gets up to {@code maxResults} faces for an image stored at {@code path}.
*/
public List<FaceAnnotation> detectFaces(Path path, int maxResults) throws IOException {
byte[] data = Files.readAllBytes(path);
AnnotateImageRequest request = new AnnotateImageRequest().setImage(new Image().encodeContent(data)).setFeatures(ImmutableList.of(new Feature().setType("FACE_DETECTION").setMaxResults(maxResults)));
Vision.Images.Annotate annotate = vision.images().annotate(new BatchAnnotateImagesRequest().setRequests(ImmutableList.of(request)));
// Due to a bug: requests to Vision API containing large images fail when GZipped.
annotate.setDisableGZipContent(true);
BatchAnnotateImagesResponse batchResponse = annotate.execute();
assert batchResponse.getResponses().size() == 1;
AnnotateImageResponse response = batchResponse.getResponses().get(0);
if (response.getFaceAnnotations() == null) {
throw new IOException(response.getError() != null ? response.getError().getMessage() : "Unknown error getting image annotations");
}
return response.getFaceAnnotations();
}
use of com.google.api.services.vision.v1.model.FaceAnnotation in project java-docs-samples by GoogleCloudPlatform.
the class FaceDetectAppTest method annotateWithFaces_manyFaces_outlinesFaces.
@Test
public void annotateWithFaces_manyFaces_outlinesFaces() throws Exception {
// Arrange
ImmutableList<FaceAnnotation> faces = ImmutableList.of(new FaceAnnotation().setFdBoundingPoly(new BoundingPoly().setVertices(ImmutableList.of(new Vertex().setX(10).setY(5), new Vertex().setX(20).setY(5), new Vertex().setX(20).setY(25), new Vertex().setX(10).setY(25)))), new FaceAnnotation().setFdBoundingPoly(new BoundingPoly().setVertices(ImmutableList.of(new Vertex().setX(60).setY(50), new Vertex().setX(70).setY(60), new Vertex().setX(50).setY(60)))));
BufferedImage img = new BufferedImage(100, 100, BufferedImage.TYPE_INT_RGB);
// Act
FaceDetectApp.annotateWithFaces(img, faces);
// Assert
assertThat(img.getRGB(10, 5) & 0x00ff00).named("img face #1 vertex (10, 5) green channel").isEqualTo(0x00ff00);
assertThat(img.getRGB(20, 5) & 0x00ff00).named("img face #1 vertex (20, 5) green channel").isEqualTo(0x00ff00);
assertThat(img.getRGB(20, 25) & 0x00ff00).named("img face #1 vertex (20, 25) green channel").isEqualTo(0x00ff00);
assertThat(img.getRGB(10, 25) & 0x00ff00).named("img face #1 vertex (10, 25) green channel").isEqualTo(0x00ff00);
assertThat(img.getRGB(60, 50) & 0x00ff00).named("img face #2 vertex (60, 50) green channel").isEqualTo(0x00ff00);
assertThat(img.getRGB(70, 60) & 0x00ff00).named("img face #2 vertex (70, 60) green channel").isEqualTo(0x00ff00);
assertThat(img.getRGB(50, 60) & 0x00ff00).named("img face #2 vertex (50, 60) green channel").isEqualTo(0x00ff00);
}
Aggregations