use of android.hardware.camera2.params.Face in project android_frameworks_base by ResurrectionRemix.
the class LegacyFaceDetectMapper method mapResultFaces.
/**
* Update the {@code result} camera metadata map with the new value for the
* {@code statistics.faces} and {@code statistics.faceDetectMode}.
*
* <p>Face detect callbacks are processed in the background, and each call to
* {@link #mapResultFaces} will have the latest faces as reflected by the camera1 callbacks.</p>
*
* <p>If the scene mode was set to {@code FACE_PRIORITY} but face detection is disabled,
* the camera will still run face detection in the background, but no faces will be reported
* in the capture result.</p>
*
* @param result a non-{@code null} result
* @param legacyRequest a non-{@code null} request (read-only)
*/
public void mapResultFaces(CameraMetadataNative result, LegacyRequest legacyRequest) {
checkNotNull(result, "result must not be null");
checkNotNull(legacyRequest, "legacyRequest must not be null");
Camera.Face[] faces, previousFaces;
int fdMode;
boolean fdScenePriority;
synchronized (mLock) {
fdMode = mFaceDetectReporting ? STATISTICS_FACE_DETECT_MODE_SIMPLE : STATISTICS_FACE_DETECT_MODE_OFF;
if (mFaceDetectReporting) {
faces = mFaces;
} else {
faces = null;
}
fdScenePriority = mFaceDetectScenePriority;
previousFaces = mFacesPrev;
mFacesPrev = faces;
}
CameraCharacteristics characteristics = legacyRequest.characteristics;
CaptureRequest request = legacyRequest.captureRequest;
Size previewSize = legacyRequest.previewSize;
Camera.Parameters params = legacyRequest.parameters;
Rect activeArray = characteristics.get(CameraCharacteristics.SENSOR_INFO_ACTIVE_ARRAY_SIZE);
ZoomData zoomData = ParameterUtils.convertScalerCropRegion(activeArray, request.get(CaptureRequest.SCALER_CROP_REGION), previewSize, params);
List<Face> convertedFaces = new ArrayList<>();
if (faces != null) {
for (Camera.Face face : faces) {
if (face != null) {
convertedFaces.add(ParameterUtils.convertFaceFromLegacy(face, activeArray, zoomData));
} else {
Log.w(TAG, "mapResultFaces - read NULL face from camera1 device");
}
}
}
if (DEBUG && previousFaces != faces) {
// Log only in verbose and IF the faces changed
Log.v(TAG, "mapResultFaces - changed to " + ListUtils.listToString(convertedFaces));
}
result.set(CaptureResult.STATISTICS_FACES, convertedFaces.toArray(new Face[0]));
result.set(CaptureResult.STATISTICS_FACE_DETECT_MODE, fdMode);
// Override scene mode with FACE_PRIORITY if the request was using FACE_PRIORITY
if (fdScenePriority) {
result.set(CaptureResult.CONTROL_SCENE_MODE, CONTROL_SCENE_MODE_FACE_PRIORITY);
}
}
use of android.hardware.camera2.params.Face in project android_frameworks_base by ResurrectionRemix.
the class CameraMetadataNative method setFaces.
private boolean setFaces(Face[] faces) {
if (faces == null) {
return false;
}
int numFaces = faces.length;
// Detect if all faces are SIMPLE or not; count # of valid faces
boolean fullMode = true;
for (Face face : faces) {
if (face == null) {
numFaces--;
Log.w(TAG, "setFaces - null face detected, skipping");
continue;
}
if (face.getId() == Face.ID_UNSUPPORTED) {
fullMode = false;
}
}
Rect[] faceRectangles = new Rect[numFaces];
byte[] faceScores = new byte[numFaces];
int[] faceIds = null;
int[] faceLandmarks = null;
if (fullMode) {
faceIds = new int[numFaces];
faceLandmarks = new int[numFaces * FACE_LANDMARK_SIZE];
}
int i = 0;
for (Face face : faces) {
if (face == null) {
continue;
}
faceRectangles[i] = face.getBounds();
faceScores[i] = (byte) face.getScore();
if (fullMode) {
faceIds[i] = face.getId();
int j = 0;
faceLandmarks[i * FACE_LANDMARK_SIZE + j++] = face.getLeftEyePosition().x;
faceLandmarks[i * FACE_LANDMARK_SIZE + j++] = face.getLeftEyePosition().y;
faceLandmarks[i * FACE_LANDMARK_SIZE + j++] = face.getRightEyePosition().x;
faceLandmarks[i * FACE_LANDMARK_SIZE + j++] = face.getRightEyePosition().y;
faceLandmarks[i * FACE_LANDMARK_SIZE + j++] = face.getMouthPosition().x;
faceLandmarks[i * FACE_LANDMARK_SIZE + j++] = face.getMouthPosition().y;
}
i++;
}
set(CaptureResult.STATISTICS_FACE_RECTANGLES, faceRectangles);
set(CaptureResult.STATISTICS_FACE_IDS, faceIds);
set(CaptureResult.STATISTICS_FACE_LANDMARKS, faceLandmarks);
set(CaptureResult.STATISTICS_FACE_SCORES, faceScores);
return true;
}
use of android.hardware.camera2.params.Face in project android_frameworks_base by crdroidandroid.
the class CameraMetadataNative method getFaces.
private Face[] getFaces() {
Integer faceDetectMode = get(CaptureResult.STATISTICS_FACE_DETECT_MODE);
byte[] faceScores = get(CaptureResult.STATISTICS_FACE_SCORES);
Rect[] faceRectangles = get(CaptureResult.STATISTICS_FACE_RECTANGLES);
int[] faceIds = get(CaptureResult.STATISTICS_FACE_IDS);
int[] faceLandmarks = get(CaptureResult.STATISTICS_FACE_LANDMARKS);
if (areValuesAllNull(faceDetectMode, faceScores, faceRectangles, faceIds, faceLandmarks)) {
return null;
}
if (faceDetectMode == null) {
Log.w(TAG, "Face detect mode metadata is null, assuming the mode is SIMPLE");
faceDetectMode = CaptureResult.STATISTICS_FACE_DETECT_MODE_SIMPLE;
} else {
if (faceDetectMode == CaptureResult.STATISTICS_FACE_DETECT_MODE_OFF) {
return new Face[0];
}
if (faceDetectMode != CaptureResult.STATISTICS_FACE_DETECT_MODE_SIMPLE && faceDetectMode != CaptureResult.STATISTICS_FACE_DETECT_MODE_FULL) {
Log.w(TAG, "Unknown face detect mode: " + faceDetectMode);
return new Face[0];
}
}
// Face scores and rectangles are required by SIMPLE and FULL mode.
if (faceScores == null || faceRectangles == null) {
Log.w(TAG, "Expect face scores and rectangles to be non-null");
return new Face[0];
} else if (faceScores.length != faceRectangles.length) {
Log.w(TAG, String.format("Face score size(%d) doesn match face rectangle size(%d)!", faceScores.length, faceRectangles.length));
}
// To be safe, make number of faces is the minimal of all face info metadata length.
int numFaces = Math.min(faceScores.length, faceRectangles.length);
// Face id and landmarks are only required by FULL mode.
if (faceDetectMode == CaptureResult.STATISTICS_FACE_DETECT_MODE_FULL) {
if (faceIds == null || faceLandmarks == null) {
Log.w(TAG, "Expect face ids and landmarks to be non-null for FULL mode," + "fallback to SIMPLE mode");
faceDetectMode = CaptureResult.STATISTICS_FACE_DETECT_MODE_SIMPLE;
} else {
if (faceIds.length != numFaces || faceLandmarks.length != numFaces * FACE_LANDMARK_SIZE) {
Log.w(TAG, String.format("Face id size(%d), or face landmark size(%d) don't" + "match face number(%d)!", faceIds.length, faceLandmarks.length * FACE_LANDMARK_SIZE, numFaces));
}
// To be safe, make number of faces is the minimal of all face info metadata length.
numFaces = Math.min(numFaces, faceIds.length);
numFaces = Math.min(numFaces, faceLandmarks.length / FACE_LANDMARK_SIZE);
}
}
ArrayList<Face> faceList = new ArrayList<Face>();
if (faceDetectMode == CaptureResult.STATISTICS_FACE_DETECT_MODE_SIMPLE) {
for (int i = 0; i < numFaces; i++) {
if (faceScores[i] <= Face.SCORE_MAX && faceScores[i] >= Face.SCORE_MIN) {
faceList.add(new Face(faceRectangles[i], faceScores[i]));
}
}
} else {
// CaptureResult.STATISTICS_FACE_DETECT_MODE_FULL
for (int i = 0; i < numFaces; i++) {
if (faceScores[i] <= Face.SCORE_MAX && faceScores[i] >= Face.SCORE_MIN && faceIds[i] >= 0) {
Point leftEye = new Point(faceLandmarks[i * FACE_LANDMARK_SIZE], faceLandmarks[i * FACE_LANDMARK_SIZE + 1]);
Point rightEye = new Point(faceLandmarks[i * FACE_LANDMARK_SIZE + 2], faceLandmarks[i * FACE_LANDMARK_SIZE + 3]);
Point mouth = new Point(faceLandmarks[i * FACE_LANDMARK_SIZE + 4], faceLandmarks[i * FACE_LANDMARK_SIZE + 5]);
Face face = new Face(faceRectangles[i], faceScores[i], faceIds[i], leftEye, rightEye, mouth);
faceList.add(face);
}
}
}
Face[] faces = new Face[faceList.size()];
faceList.toArray(faces);
return faces;
}
use of android.hardware.camera2.params.Face in project android_frameworks_base by crdroidandroid.
the class CameraMetadataTest method testReadWriteOverride.
@SmallTest
public void testReadWriteOverride() {
//
// android.scaler.availableFormats (int x n array)
//
int[] availableFormats = new int[] { // RAW_SENSOR
0x20, // YV12
0x32315659, // YCrCb_420_SP
0x11, // ImageFormat.JPEG
0x100, // IMPLEMENTATION_DEFINED
0x22, // YCbCr_420_888
0x23 };
int[] expectedIntValues = new int[] { // RAW_SENSOR
0x20, // YV12
0x32315659, // YCrCb_420_SP
0x11, // BLOB
0x21, // IMPLEMENTATION_DEFINED
0x22, // YCbCr_420_888
0x23 };
int availableFormatTag = CameraMetadataNative.getTag("android.scaler.availableFormats");
Key<int[]> formatKey = CameraCharacteristics.SCALER_AVAILABLE_FORMATS.getNativeKey();
validateArrayMetadataReadWriteOverride(formatKey, availableFormats, expectedIntValues, availableFormatTag);
//
// android.statistics.faces (Face x n array)
//
int[] expectedFaceIds = new int[] { 1, 2, 3, 4, 5 };
byte[] expectedFaceScores = new byte[] { 10, 20, 30, 40, 50 };
int numFaces = expectedFaceIds.length;
Rect[] expectedRects = new Rect[numFaces];
for (int i = 0; i < numFaces; i++) {
expectedRects[i] = new Rect(i * 4 + 1, i * 4 + 2, i * 4 + 3, i * 4 + 4);
}
int[] expectedFaceLM = new int[] { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30 };
Point[] expectedFaceLMPoints = new Point[numFaces * 3];
for (int i = 0; i < numFaces; i++) {
expectedFaceLMPoints[i * 3] = new Point(expectedFaceLM[i * 6], expectedFaceLM[i * 6 + 1]);
expectedFaceLMPoints[i * 3 + 1] = new Point(expectedFaceLM[i * 6 + 2], expectedFaceLM[i * 6 + 3]);
expectedFaceLMPoints[i * 3 + 2] = new Point(expectedFaceLM[i * 6 + 4], expectedFaceLM[i * 6 + 5]);
}
/**
* Read - FACE_DETECT_MODE == FULL
*/
mMetadata.set(CaptureResult.STATISTICS_FACE_DETECT_MODE, CaptureResult.STATISTICS_FACE_DETECT_MODE_FULL);
mMetadata.set(CaptureResult.STATISTICS_FACE_IDS, expectedFaceIds);
mMetadata.set(CaptureResult.STATISTICS_FACE_SCORES, expectedFaceScores);
mMetadata.set(CaptureResult.STATISTICS_FACE_RECTANGLES, expectedRects);
mMetadata.set(CaptureResult.STATISTICS_FACE_LANDMARKS, expectedFaceLM);
Face[] resultFaces = mMetadata.get(CaptureResult.STATISTICS_FACES);
assertEquals(numFaces, resultFaces.length);
for (int i = 0; i < numFaces; i++) {
assertEquals(expectedFaceIds[i], resultFaces[i].getId());
assertEquals(expectedFaceScores[i], resultFaces[i].getScore());
assertEquals(expectedRects[i], resultFaces[i].getBounds());
assertEquals(expectedFaceLMPoints[i * 3], resultFaces[i].getLeftEyePosition());
assertEquals(expectedFaceLMPoints[i * 3 + 1], resultFaces[i].getRightEyePosition());
assertEquals(expectedFaceLMPoints[i * 3 + 2], resultFaces[i].getMouthPosition());
}
/**
* Read - FACE_DETECT_MODE == SIMPLE
*/
mMetadata.set(CaptureResult.STATISTICS_FACE_DETECT_MODE, CaptureResult.STATISTICS_FACE_DETECT_MODE_SIMPLE);
mMetadata.set(CaptureResult.STATISTICS_FACE_SCORES, expectedFaceScores);
mMetadata.set(CaptureResult.STATISTICS_FACE_RECTANGLES, expectedRects);
Face[] resultSimpleFaces = mMetadata.get(CaptureResult.STATISTICS_FACES);
assertEquals(numFaces, resultSimpleFaces.length);
for (int i = 0; i < numFaces; i++) {
assertEquals(Face.ID_UNSUPPORTED, resultSimpleFaces[i].getId());
assertEquals(expectedFaceScores[i], resultSimpleFaces[i].getScore());
assertEquals(expectedRects[i], resultSimpleFaces[i].getBounds());
assertNull(resultSimpleFaces[i].getLeftEyePosition());
assertNull(resultSimpleFaces[i].getRightEyePosition());
assertNull(resultSimpleFaces[i].getMouthPosition());
}
/**
* Read/Write TonemapCurve
*/
float[] red = new float[] { 0.0f, 0.0f, 1.0f, 1.0f };
float[] green = new float[] { 0.0f, 1.0f, 1.0f, 0.0f };
float[] blue = new float[] { 0.0000f, 0.0000f, 0.0667f, 0.2920f, 0.1333f, 0.4002f, 0.2000f, 0.4812f, 0.2667f, 0.5484f, 0.3333f, 0.6069f, 0.4000f, 0.6594f, 0.4667f, 0.7072f, 0.5333f, 0.7515f, 0.6000f, 0.7928f, 0.6667f, 0.8317f, 0.7333f, 0.8685f, 0.8000f, 0.9035f, 0.8667f, 0.9370f, 0.9333f, 0.9691f, 1.0000f, 1.0000f };
TonemapCurve tcIn = new TonemapCurve(red, green, blue);
mMetadata.set(CaptureResult.TONEMAP_CURVE, tcIn);
float[] redOut = mMetadata.get(CaptureResult.TONEMAP_CURVE_RED);
float[] greenOut = mMetadata.get(CaptureResult.TONEMAP_CURVE_GREEN);
float[] blueOut = mMetadata.get(CaptureResult.TONEMAP_CURVE_BLUE);
assertArrayEquals(red, redOut);
assertArrayEquals(green, greenOut);
assertArrayEquals(blue, blueOut);
TonemapCurve tcOut = mMetadata.get(CaptureResult.TONEMAP_CURVE);
assertEquals(tcIn, tcOut);
mMetadata.set(CaptureResult.TONEMAP_CURVE_GREEN, null);
// If any of channel has null curve, return a null TonemapCurve
assertNull(mMetadata.get(CaptureResult.TONEMAP_CURVE));
}
use of android.hardware.camera2.params.Face in project android_frameworks_base by crdroidandroid.
the class ParameterUtils method convertFaceFromLegacy.
/**
* Convert an api1 face into an active-array based api2 face.
*
* <p>Out-of-ranges scores and ids will be clipped to be within range (with a warning).</p>
*
* @param face a non-{@code null} api1 face
* @param activeArraySize active array size of the sensor (e.g. max jpeg size)
* @param zoomData the calculated zoom data corresponding to this request
*
* @return a non-{@code null} api2 face
*
* @throws NullPointerException if the {@code face} was {@code null}
*/
public static Face convertFaceFromLegacy(Camera.Face face, Rect activeArray, ZoomData zoomData) {
checkNotNull(face, "face must not be null");
Face api2Face;
Camera.Area fakeArea = new Camera.Area(face.rect, /*weight*/
1);
WeightedRectangle faceRect = convertCameraAreaToActiveArrayRectangle(activeArray, zoomData, fakeArea);
Point leftEye = face.leftEye, rightEye = face.rightEye, mouth = face.mouth;
if (leftEye != null && rightEye != null && mouth != null && leftEye.x != -2000 && leftEye.y != -2000 && rightEye.x != -2000 && rightEye.y != -2000 && mouth.x != -2000 && mouth.y != -2000) {
leftEye = convertCameraPointToActiveArrayPoint(activeArray, zoomData, leftEye, /*usePreviewCrop*/
true);
rightEye = convertCameraPointToActiveArrayPoint(activeArray, zoomData, leftEye, /*usePreviewCrop*/
true);
mouth = convertCameraPointToActiveArrayPoint(activeArray, zoomData, leftEye, /*usePreviewCrop*/
true);
api2Face = faceRect.toFace(face.id, leftEye, rightEye, mouth);
} else {
api2Face = faceRect.toFace();
}
return api2Face;
}
Aggregations