use of com.amazonaws.services.rekognition.model.BoundingBox in project aws-doc-sdk-examples by awsdocs.
the class RecognizeCelebrities method main.
public static void main(String[] args) {
// Change photo to the path and filename of your image.
String photo = "moviestars.jpg";
AmazonRekognition rekognitionClient = AmazonRekognitionClientBuilder.defaultClient();
ByteBuffer imageBytes = null;
try (InputStream inputStream = new FileInputStream(new File(photo))) {
imageBytes = ByteBuffer.wrap(IOUtils.toByteArray(inputStream));
} catch (Exception e) {
System.out.println("Failed to load file " + photo);
System.exit(1);
}
RecognizeCelebritiesRequest request = new RecognizeCelebritiesRequest().withImage(new Image().withBytes(imageBytes));
System.out.println("Looking for celebrities in image " + photo + "\n");
RecognizeCelebritiesResult result = rekognitionClient.recognizeCelebrities(request);
// Display recognized celebrity information
List<Celebrity> celebs = result.getCelebrityFaces();
System.out.println(celebs.size() + " celebrity(s) were recognized.\n");
for (Celebrity celebrity : celebs) {
System.out.println("Celebrity recognized: " + celebrity.getName());
System.out.println("Celebrity ID: " + celebrity.getId());
BoundingBox boundingBox = celebrity.getFace().getBoundingBox();
System.out.println("position: " + boundingBox.getLeft().toString() + " " + boundingBox.getTop().toString());
System.out.println("Further information (if available):");
for (String url : celebrity.getUrls()) {
System.out.println(url);
}
System.out.println();
}
System.out.println(result.getUnrecognizedFaces().size() + " face(s) were unrecognized.");
}
use of com.amazonaws.services.rekognition.model.BoundingBox in project aws-doc-sdk-examples by awsdocs.
the class DisplayFaces method main.
public static void main(String[] arg) throws Exception {
// Change the value of bucket to the S3 bucket that contains your image file.
// Change the value of photo to your image file name.
String photo = "input.png";
String bucket = "bucket";
int height = 0;
int width = 0;
// Get the image from an S3 Bucket
AmazonS3 s3client = AmazonS3ClientBuilder.defaultClient();
com.amazonaws.services.s3.model.S3Object s3object = s3client.getObject(bucket, photo);
S3ObjectInputStream inputStream = s3object.getObjectContent();
BufferedImage image = ImageIO.read(inputStream);
DetectFacesRequest request = new DetectFacesRequest().withImage(new Image().withS3Object(new S3Object().withName(photo).withBucket(bucket)));
width = image.getWidth();
height = image.getHeight();
// Call DetectFaces
AmazonRekognition amazonRekognition = AmazonRekognitionClientBuilder.defaultClient();
DetectFacesResult result = amazonRekognition.detectFaces(request);
// Show the bounding box info for each face.
List<FaceDetail> faceDetails = result.getFaceDetails();
for (FaceDetail face : faceDetails) {
BoundingBox box = face.getBoundingBox();
float left = width * box.getLeft();
float top = height * box.getTop();
System.out.println("Face:");
System.out.println("Left: " + String.valueOf((int) left));
System.out.println("Top: " + String.valueOf((int) top));
System.out.println("Face Width: " + String.valueOf((int) (width * box.getWidth())));
System.out.println("Face Height: " + String.valueOf((int) (height * box.getHeight())));
System.out.println();
}
// Create frame and panel.
JFrame frame = new JFrame("RotateImage");
frame.setDefaultCloseOperation(JFrame.EXIT_ON_CLOSE);
DisplayFaces panel = new DisplayFaces(result, image);
panel.setPreferredSize(new Dimension(image.getWidth() / scale, image.getHeight() / scale));
frame.setContentPane(panel);
frame.pack();
frame.setVisible(true);
}
use of com.amazonaws.services.rekognition.model.BoundingBox in project aws-doc-sdk-examples by awsdocs.
the class DisplayFaces method paintComponent.
// Draws the bounding box around the detected faces.
public void paintComponent(Graphics g) {
float left = 0;
float top = 0;
int height = image.getHeight(this);
int width = image.getWidth(this);
// Create a Java2D version of g.
Graphics2D g2d = (Graphics2D) g;
// Draw the image.
g2d.drawImage(image, 0, 0, width / scale, height / scale, this);
g2d.setColor(new Color(0, 212, 0));
// Iterate through faces and display bounding boxes.
List<FaceDetail> faceDetails = result.getFaceDetails();
for (FaceDetail face : faceDetails) {
BoundingBox box = face.getBoundingBox();
left = width * box.getLeft();
top = height * box.getTop();
g2d.drawRect(Math.round(left / scale), Math.round(top / scale), Math.round((width * box.getWidth()) / scale), Math.round((height * box.getHeight())) / scale);
}
}
use of com.amazonaws.services.rekognition.model.BoundingBox in project aws-doc-sdk-examples by awsdocs.
the class CompareFaces method main.
public static void main(String[] args) throws Exception {
Float similarityThreshold = 70F;
// Replace sourceFile and targetFile with the image files you want to compare.
String sourceImage = "source.jpg";
String targetImage = "target.jpg";
ByteBuffer sourceImageBytes = null;
ByteBuffer targetImageBytes = null;
AmazonRekognition rekognitionClient = AmazonRekognitionClientBuilder.defaultClient();
// Load source and target images and create input parameters
try (InputStream inputStream = new FileInputStream(new File(sourceImage))) {
sourceImageBytes = ByteBuffer.wrap(IOUtils.toByteArray(inputStream));
} catch (Exception e) {
System.out.println("Failed to load source image " + sourceImage);
System.exit(1);
}
try (InputStream inputStream = new FileInputStream(new File(targetImage))) {
targetImageBytes = ByteBuffer.wrap(IOUtils.toByteArray(inputStream));
} catch (Exception e) {
System.out.println("Failed to load target images: " + targetImage);
System.exit(1);
}
Image source = new Image().withBytes(sourceImageBytes);
Image target = new Image().withBytes(targetImageBytes);
CompareFacesRequest request = new CompareFacesRequest().withSourceImage(source).withTargetImage(target).withSimilarityThreshold(similarityThreshold);
// Call operation
CompareFacesResult compareFacesResult = rekognitionClient.compareFaces(request);
// Display results
List<CompareFacesMatch> faceDetails = compareFacesResult.getFaceMatches();
for (CompareFacesMatch match : faceDetails) {
ComparedFace face = match.getFace();
BoundingBox position = face.getBoundingBox();
System.out.println("Face at " + position.getLeft().toString() + " " + position.getTop() + " matches with " + face.getConfidence().toString() + "% confidence.");
}
List<ComparedFace> uncompared = compareFacesResult.getUnmatchedFaces();
System.out.println("There was " + uncompared.size() + " face(s) that did not match");
System.out.println("Source image rotation: " + compareFacesResult.getSourceImageOrientationCorrection());
System.out.println("target image rotation: " + compareFacesResult.getTargetImageOrientationCorrection());
}
use of com.amazonaws.services.rekognition.model.BoundingBox in project amplify-android by aws-amplify.
the class RekognitionResultTransformersTest method testBoundingBoxConversion.
/**
* Tests that the rectangular boundary from Rekognition
* is converted to an equivalent Android rectangle object.
*/
@Test
public void testBoundingBoxConversion() {
BoundingBox box = randomBox();
RectF rect = RekognitionResultTransformers.fromBoundingBox(box);
assertEquals(box.getHeight(), rect.height(), DELTA);
assertEquals(box.getWidth(), rect.width(), DELTA);
assertEquals(box.getLeft(), rect.left, DELTA);
assertEquals(box.getTop(), rect.top, DELTA);
}
Aggregations