Search in sources :

Example 1 with FaceDetail

use of com.amazonaws.services.rekognition.model.FaceDetail in project myrobotlab by MyRobotLab.

the class Rekognition method getFaces.

// FIXME make BufferedImage translations...
public List<FaceDetail> getFaces(ByteBuffer imageBytes, Integer width, Integer height) {
    DetectFacesRequest request = new DetectFacesRequest().withImage(new Image().withBytes((imageBytes))).withAttributes(Attribute.ALL);
    DetectFacesResult result = getClient().detectFaces(request);
    System.out.println("Orientation: " + result.getOrientationCorrection() + "\n");
    List<FaceDetail> faceDetails = result.getFaceDetails();
    for (FaceDetail face : faceDetails) {
        System.out.println("Face:");
        ShowBoundingBoxPositions(height, width, face.getBoundingBox(), result.getOrientationCorrection());
        AgeRange ageRange = face.getAgeRange();
        System.out.println("The detected face is estimated to be between " + ageRange.getLow().toString() + " and " + ageRange.getHigh().toString() + " years old.");
        System.out.println();
    }
    return faceDetails;
}
Also used : FaceDetail(com.amazonaws.services.rekognition.model.FaceDetail) AgeRange(com.amazonaws.services.rekognition.model.AgeRange) BufferedImage(java.awt.image.BufferedImage) Image(com.amazonaws.services.rekognition.model.Image) DetectFacesResult(com.amazonaws.services.rekognition.model.DetectFacesResult) DetectFacesRequest(com.amazonaws.services.rekognition.model.DetectFacesRequest)

Example 2 with FaceDetail

use of com.amazonaws.services.rekognition.model.FaceDetail in project aws-doc-sdk-examples by awsdocs.

the class DetectFaces method main.

public static void main(String[] args) throws Exception {
    // Change bucket to your S3 bucket that contains the image file.
    // Change photo to your image file.
    String photo = "input.jpg";
    String bucket = "bucket";
    AmazonRekognition rekognitionClient = AmazonRekognitionClientBuilder.defaultClient();
    DetectFacesRequest request = new DetectFacesRequest().withImage(new Image().withS3Object(new S3Object().withName(photo).withBucket(bucket))).withAttributes(Attribute.ALL);
    try {
        DetectFacesResult result = rekognitionClient.detectFaces(request);
        List<FaceDetail> faceDetails = result.getFaceDetails();
        for (FaceDetail face : faceDetails) {
            if (request.getAttributes().contains("ALL")) {
                AgeRange ageRange = face.getAgeRange();
                System.out.println("The detected face is estimated to be between " + ageRange.getLow().toString() + " and " + ageRange.getHigh().toString() + " years old.");
                System.out.println("Here's the complete set of attributes:");
            } else {
                // non-default attributes have null values.
                System.out.println("Here's the default set of attributes:");
            }
            ObjectMapper objectMapper = new ObjectMapper();
            System.out.println(objectMapper.writerWithDefaultPrettyPrinter().writeValueAsString(face));
        }
    } catch (AmazonRekognitionException e) {
        e.printStackTrace();
    }
}
Also used : FaceDetail(com.amazonaws.services.rekognition.model.FaceDetail) AgeRange(com.amazonaws.services.rekognition.model.AgeRange) AmazonRekognitionException(com.amazonaws.services.rekognition.model.AmazonRekognitionException) AmazonRekognition(com.amazonaws.services.rekognition.AmazonRekognition) S3Object(com.amazonaws.services.rekognition.model.S3Object) Image(com.amazonaws.services.rekognition.model.Image) DetectFacesResult(com.amazonaws.services.rekognition.model.DetectFacesResult) DetectFacesRequest(com.amazonaws.services.rekognition.model.DetectFacesRequest) ObjectMapper(com.fasterxml.jackson.databind.ObjectMapper)

Example 3 with FaceDetail

use of com.amazonaws.services.rekognition.model.FaceDetail in project aws-doc-sdk-examples by awsdocs.

the class DisplayFaces method main.

public static void main(String[] arg) throws Exception {
    // Change the value of bucket to the S3 bucket that contains your image file.
    // Change the value of photo to your image file name.
    String photo = "input.png";
    String bucket = "bucket";
    int height = 0;
    int width = 0;
    // Get the image from an S3 Bucket
    AmazonS3 s3client = AmazonS3ClientBuilder.defaultClient();
    com.amazonaws.services.s3.model.S3Object s3object = s3client.getObject(bucket, photo);
    S3ObjectInputStream inputStream = s3object.getObjectContent();
    BufferedImage image = ImageIO.read(inputStream);
    DetectFacesRequest request = new DetectFacesRequest().withImage(new Image().withS3Object(new S3Object().withName(photo).withBucket(bucket)));
    width = image.getWidth();
    height = image.getHeight();
    // Call DetectFaces
    AmazonRekognition amazonRekognition = AmazonRekognitionClientBuilder.defaultClient();
    DetectFacesResult result = amazonRekognition.detectFaces(request);
    // Show the bounding box info for each face.
    List<FaceDetail> faceDetails = result.getFaceDetails();
    for (FaceDetail face : faceDetails) {
        BoundingBox box = face.getBoundingBox();
        float left = width * box.getLeft();
        float top = height * box.getTop();
        System.out.println("Face:");
        System.out.println("Left: " + String.valueOf((int) left));
        System.out.println("Top: " + String.valueOf((int) top));
        System.out.println("Face Width: " + String.valueOf((int) (width * box.getWidth())));
        System.out.println("Face Height: " + String.valueOf((int) (height * box.getHeight())));
        System.out.println();
    }
    // Create frame and panel.
    JFrame frame = new JFrame("RotateImage");
    frame.setDefaultCloseOperation(JFrame.EXIT_ON_CLOSE);
    DisplayFaces panel = new DisplayFaces(result, image);
    panel.setPreferredSize(new Dimension(image.getWidth() / scale, image.getHeight() / scale));
    frame.setContentPane(panel);
    frame.pack();
    frame.setVisible(true);
}
Also used : AmazonS3(com.amazonaws.services.s3.AmazonS3) S3ObjectInputStream(com.amazonaws.services.s3.model.S3ObjectInputStream) BufferedImage(java.awt.image.BufferedImage) Image(com.amazonaws.services.rekognition.model.Image) BufferedImage(java.awt.image.BufferedImage) FaceDetail(com.amazonaws.services.rekognition.model.FaceDetail) BoundingBox(com.amazonaws.services.rekognition.model.BoundingBox) AmazonRekognition(com.amazonaws.services.rekognition.AmazonRekognition) S3Object(com.amazonaws.services.rekognition.model.S3Object) DetectFacesResult(com.amazonaws.services.rekognition.model.DetectFacesResult) DetectFacesRequest(com.amazonaws.services.rekognition.model.DetectFacesRequest)

Example 4 with FaceDetail

use of com.amazonaws.services.rekognition.model.FaceDetail in project amplify-android by aws-amplify.

the class AWSRekognitionService method detectEntities.

private List<EntityDetails> detectEntities(ByteBuffer imageData) throws PredictionsException {
    DetectFacesRequest request = new DetectFacesRequest().withImage(new Image().withBytes(imageData)).withAttributes(Attribute.ALL.toString());
    // Detect entities in the given image via Amazon Rekognition
    final DetectFacesResult result;
    try {
        result = rekognition.detectFaces(request);
    } catch (AmazonClientException serviceException) {
        throw new PredictionsException("Amazon Rekognition encountered an error while detecting faces.", serviceException, "See attached service exception for more details.");
    }
    List<EntityDetails> entities = new ArrayList<>();
    for (FaceDetail face : result.getFaceDetails()) {
        // Extract details from face detection
        RectF box = RekognitionResultTransformers.fromBoundingBox(face.getBoundingBox());
        AgeRange ageRange = RekognitionResultTransformers.fromRekognitionAgeRange(face.getAgeRange());
        Pose pose = RekognitionResultTransformers.fromRekognitionPose(face.getPose());
        List<Landmark> landmarks = RekognitionResultTransformers.fromLandmarks(face.getLandmarks());
        List<BinaryFeature> features = RekognitionResultTransformers.fromFaceDetail(face);
        // Gender detection
        com.amazonaws.services.rekognition.model.Gender rekognitionGender = face.getGender();
        Gender amplifyGender = Gender.builder().value(GenderBinaryTypeAdapter.fromRekognition(rekognitionGender.getValue())).confidence(rekognitionGender.getConfidence()).build();
        // Emotion detection
        List<Emotion> emotions = new ArrayList<>();
        for (com.amazonaws.services.rekognition.model.Emotion rekognitionEmotion : face.getEmotions()) {
            EmotionType emotion = EmotionTypeAdapter.fromRekognition(rekognitionEmotion.getType());
            Emotion amplifyEmotion = Emotion.builder().value(emotion).confidence(rekognitionEmotion.getConfidence()).build();
            emotions.add(amplifyEmotion);
        }
        Collections.sort(emotions, Collections.reverseOrder());
        EntityDetails entity = EntityDetails.builder().box(box).ageRange(ageRange).pose(pose).gender(amplifyGender).landmarks(landmarks).emotions(emotions).features(features).build();
        entities.add(entity);
    }
    return entities;
}
Also used : AgeRange(com.amplifyframework.predictions.models.AgeRange) AmazonClientException(com.amazonaws.AmazonClientException) ArrayList(java.util.ArrayList) Gender(com.amplifyframework.predictions.models.Gender) Image(com.amazonaws.services.rekognition.model.Image) FaceDetail(com.amazonaws.services.rekognition.model.FaceDetail) EntityDetails(com.amplifyframework.predictions.models.EntityDetails) Pose(com.amplifyframework.predictions.models.Pose) PredictionsException(com.amplifyframework.predictions.PredictionsException) Emotion(com.amplifyframework.predictions.models.Emotion) Landmark(com.amplifyframework.predictions.models.Landmark) BinaryFeature(com.amplifyframework.predictions.models.BinaryFeature) RectF(android.graphics.RectF) EmotionType(com.amplifyframework.predictions.models.EmotionType) DetectFacesResult(com.amazonaws.services.rekognition.model.DetectFacesResult) DetectFacesRequest(com.amazonaws.services.rekognition.model.DetectFacesRequest)

Example 5 with FaceDetail

use of com.amazonaws.services.rekognition.model.FaceDetail in project aws-doc-sdk-examples by awsdocs.

the class DisplayFaces method paintComponent.

// Draws the bounding box around the detected faces.
public void paintComponent(Graphics g) {
    float left = 0;
    float top = 0;
    int height = image.getHeight(this);
    int width = image.getWidth(this);
    // Create a Java2D version of g.
    Graphics2D g2d = (Graphics2D) g;
    // Draw the image.
    g2d.drawImage(image, 0, 0, width / scale, height / scale, this);
    g2d.setColor(new Color(0, 212, 0));
    // Iterate through faces and display bounding boxes.
    List<FaceDetail> faceDetails = result.getFaceDetails();
    for (FaceDetail face : faceDetails) {
        BoundingBox box = face.getBoundingBox();
        left = width * box.getLeft();
        top = height * box.getTop();
        g2d.drawRect(Math.round(left / scale), Math.round(top / scale), Math.round((width * box.getWidth()) / scale), Math.round((height * box.getHeight())) / scale);
    }
}
Also used : FaceDetail(com.amazonaws.services.rekognition.model.FaceDetail) BoundingBox(com.amazonaws.services.rekognition.model.BoundingBox)

Aggregations

FaceDetail (com.amazonaws.services.rekognition.model.FaceDetail)6 DetectFacesRequest (com.amazonaws.services.rekognition.model.DetectFacesRequest)4 DetectFacesResult (com.amazonaws.services.rekognition.model.DetectFacesResult)4 Image (com.amazonaws.services.rekognition.model.Image)4 AmazonRekognition (com.amazonaws.services.rekognition.AmazonRekognition)2 AgeRange (com.amazonaws.services.rekognition.model.AgeRange)2 BoundingBox (com.amazonaws.services.rekognition.model.BoundingBox)2 S3Object (com.amazonaws.services.rekognition.model.S3Object)2 BinaryFeature (com.amplifyframework.predictions.models.BinaryFeature)2 BufferedImage (java.awt.image.BufferedImage)2 RectF (android.graphics.RectF)1 AmazonClientException (com.amazonaws.AmazonClientException)1 AmazonRekognitionException (com.amazonaws.services.rekognition.model.AmazonRekognitionException)1 Beard (com.amazonaws.services.rekognition.model.Beard)1 EyeOpen (com.amazonaws.services.rekognition.model.EyeOpen)1 Eyeglasses (com.amazonaws.services.rekognition.model.Eyeglasses)1 MouthOpen (com.amazonaws.services.rekognition.model.MouthOpen)1 Mustache (com.amazonaws.services.rekognition.model.Mustache)1 Smile (com.amazonaws.services.rekognition.model.Smile)1 Sunglasses (com.amazonaws.services.rekognition.model.Sunglasses)1