use of com.amazonaws.services.rekognition.model.FaceDetail in project myrobotlab by MyRobotLab.
the class Rekognition method getFaces.
// FIXME make BufferedImage translations...
public List<FaceDetail> getFaces(ByteBuffer imageBytes, Integer width, Integer height) {
DetectFacesRequest request = new DetectFacesRequest().withImage(new Image().withBytes((imageBytes))).withAttributes(Attribute.ALL);
DetectFacesResult result = getClient().detectFaces(request);
System.out.println("Orientation: " + result.getOrientationCorrection() + "\n");
List<FaceDetail> faceDetails = result.getFaceDetails();
for (FaceDetail face : faceDetails) {
System.out.println("Face:");
ShowBoundingBoxPositions(height, width, face.getBoundingBox(), result.getOrientationCorrection());
AgeRange ageRange = face.getAgeRange();
System.out.println("The detected face is estimated to be between " + ageRange.getLow().toString() + " and " + ageRange.getHigh().toString() + " years old.");
System.out.println();
}
return faceDetails;
}
use of com.amazonaws.services.rekognition.model.FaceDetail in project aws-doc-sdk-examples by awsdocs.
the class DetectFaces method main.
public static void main(String[] args) throws Exception {
// Change bucket to your S3 bucket that contains the image file.
// Change photo to your image file.
String photo = "input.jpg";
String bucket = "bucket";
AmazonRekognition rekognitionClient = AmazonRekognitionClientBuilder.defaultClient();
DetectFacesRequest request = new DetectFacesRequest().withImage(new Image().withS3Object(new S3Object().withName(photo).withBucket(bucket))).withAttributes(Attribute.ALL);
try {
DetectFacesResult result = rekognitionClient.detectFaces(request);
List<FaceDetail> faceDetails = result.getFaceDetails();
for (FaceDetail face : faceDetails) {
if (request.getAttributes().contains("ALL")) {
AgeRange ageRange = face.getAgeRange();
System.out.println("The detected face is estimated to be between " + ageRange.getLow().toString() + " and " + ageRange.getHigh().toString() + " years old.");
System.out.println("Here's the complete set of attributes:");
} else {
// non-default attributes have null values.
System.out.println("Here's the default set of attributes:");
}
ObjectMapper objectMapper = new ObjectMapper();
System.out.println(objectMapper.writerWithDefaultPrettyPrinter().writeValueAsString(face));
}
} catch (AmazonRekognitionException e) {
e.printStackTrace();
}
}
use of com.amazonaws.services.rekognition.model.FaceDetail in project aws-doc-sdk-examples by awsdocs.
the class DisplayFaces method main.
public static void main(String[] arg) throws Exception {
// Change the value of bucket to the S3 bucket that contains your image file.
// Change the value of photo to your image file name.
String photo = "input.png";
String bucket = "bucket";
int height = 0;
int width = 0;
// Get the image from an S3 Bucket
AmazonS3 s3client = AmazonS3ClientBuilder.defaultClient();
com.amazonaws.services.s3.model.S3Object s3object = s3client.getObject(bucket, photo);
S3ObjectInputStream inputStream = s3object.getObjectContent();
BufferedImage image = ImageIO.read(inputStream);
DetectFacesRequest request = new DetectFacesRequest().withImage(new Image().withS3Object(new S3Object().withName(photo).withBucket(bucket)));
width = image.getWidth();
height = image.getHeight();
// Call DetectFaces
AmazonRekognition amazonRekognition = AmazonRekognitionClientBuilder.defaultClient();
DetectFacesResult result = amazonRekognition.detectFaces(request);
// Show the bounding box info for each face.
List<FaceDetail> faceDetails = result.getFaceDetails();
for (FaceDetail face : faceDetails) {
BoundingBox box = face.getBoundingBox();
float left = width * box.getLeft();
float top = height * box.getTop();
System.out.println("Face:");
System.out.println("Left: " + String.valueOf((int) left));
System.out.println("Top: " + String.valueOf((int) top));
System.out.println("Face Width: " + String.valueOf((int) (width * box.getWidth())));
System.out.println("Face Height: " + String.valueOf((int) (height * box.getHeight())));
System.out.println();
}
// Create frame and panel.
JFrame frame = new JFrame("RotateImage");
frame.setDefaultCloseOperation(JFrame.EXIT_ON_CLOSE);
DisplayFaces panel = new DisplayFaces(result, image);
panel.setPreferredSize(new Dimension(image.getWidth() / scale, image.getHeight() / scale));
frame.setContentPane(panel);
frame.pack();
frame.setVisible(true);
}
use of com.amazonaws.services.rekognition.model.FaceDetail in project amplify-android by aws-amplify.
the class AWSRekognitionService method detectEntities.
private List<EntityDetails> detectEntities(ByteBuffer imageData) throws PredictionsException {
DetectFacesRequest request = new DetectFacesRequest().withImage(new Image().withBytes(imageData)).withAttributes(Attribute.ALL.toString());
// Detect entities in the given image via Amazon Rekognition
final DetectFacesResult result;
try {
result = rekognition.detectFaces(request);
} catch (AmazonClientException serviceException) {
throw new PredictionsException("Amazon Rekognition encountered an error while detecting faces.", serviceException, "See attached service exception for more details.");
}
List<EntityDetails> entities = new ArrayList<>();
for (FaceDetail face : result.getFaceDetails()) {
// Extract details from face detection
RectF box = RekognitionResultTransformers.fromBoundingBox(face.getBoundingBox());
AgeRange ageRange = RekognitionResultTransformers.fromRekognitionAgeRange(face.getAgeRange());
Pose pose = RekognitionResultTransformers.fromRekognitionPose(face.getPose());
List<Landmark> landmarks = RekognitionResultTransformers.fromLandmarks(face.getLandmarks());
List<BinaryFeature> features = RekognitionResultTransformers.fromFaceDetail(face);
// Gender detection
com.amazonaws.services.rekognition.model.Gender rekognitionGender = face.getGender();
Gender amplifyGender = Gender.builder().value(GenderBinaryTypeAdapter.fromRekognition(rekognitionGender.getValue())).confidence(rekognitionGender.getConfidence()).build();
// Emotion detection
List<Emotion> emotions = new ArrayList<>();
for (com.amazonaws.services.rekognition.model.Emotion rekognitionEmotion : face.getEmotions()) {
EmotionType emotion = EmotionTypeAdapter.fromRekognition(rekognitionEmotion.getType());
Emotion amplifyEmotion = Emotion.builder().value(emotion).confidence(rekognitionEmotion.getConfidence()).build();
emotions.add(amplifyEmotion);
}
Collections.sort(emotions, Collections.reverseOrder());
EntityDetails entity = EntityDetails.builder().box(box).ageRange(ageRange).pose(pose).gender(amplifyGender).landmarks(landmarks).emotions(emotions).features(features).build();
entities.add(entity);
}
return entities;
}
use of com.amazonaws.services.rekognition.model.FaceDetail in project aws-doc-sdk-examples by awsdocs.
the class DisplayFaces method paintComponent.
// Draws the bounding box around the detected faces.
public void paintComponent(Graphics g) {
float left = 0;
float top = 0;
int height = image.getHeight(this);
int width = image.getWidth(this);
// Create a Java2D version of g.
Graphics2D g2d = (Graphics2D) g;
// Draw the image.
g2d.drawImage(image, 0, 0, width / scale, height / scale, this);
g2d.setColor(new Color(0, 212, 0));
// Iterate through faces and display bounding boxes.
List<FaceDetail> faceDetails = result.getFaceDetails();
for (FaceDetail face : faceDetails) {
BoundingBox box = face.getBoundingBox();
left = width * box.getLeft();
top = height * box.getTop();
g2d.drawRect(Math.round(left / scale), Math.round(top / scale), Math.round((width * box.getWidth()) / scale), Math.round((height * box.getHeight())) / scale);
}
}
Aggregations