use of android.hardware.camera2.params.Face in project android_frameworks_base by DirtyUnicorns.
the class ParameterUtils method convertFaceFromLegacy.
/**
* Convert an api1 face into an active-array based api2 face.
*
* <p>Out-of-ranges scores and ids will be clipped to be within range (with a warning).</p>
*
* @param face a non-{@code null} api1 face
* @param activeArraySize active array size of the sensor (e.g. max jpeg size)
* @param zoomData the calculated zoom data corresponding to this request
*
* @return a non-{@code null} api2 face
*
* @throws NullPointerException if the {@code face} was {@code null}
*/
public static Face convertFaceFromLegacy(Camera.Face face, Rect activeArray, ZoomData zoomData) {
checkNotNull(face, "face must not be null");
Face api2Face;
Camera.Area fakeArea = new Camera.Area(face.rect, /*weight*/
1);
WeightedRectangle faceRect = convertCameraAreaToActiveArrayRectangle(activeArray, zoomData, fakeArea);
Point leftEye = face.leftEye, rightEye = face.rightEye, mouth = face.mouth;
if (leftEye != null && rightEye != null && mouth != null && leftEye.x != -2000 && leftEye.y != -2000 && rightEye.x != -2000 && rightEye.y != -2000 && mouth.x != -2000 && mouth.y != -2000) {
leftEye = convertCameraPointToActiveArrayPoint(activeArray, zoomData, leftEye, /*usePreviewCrop*/
true);
rightEye = convertCameraPointToActiveArrayPoint(activeArray, zoomData, leftEye, /*usePreviewCrop*/
true);
mouth = convertCameraPointToActiveArrayPoint(activeArray, zoomData, leftEye, /*usePreviewCrop*/
true);
api2Face = faceRect.toFace(face.id, leftEye, rightEye, mouth);
} else {
api2Face = faceRect.toFace();
}
return api2Face;
}
use of android.hardware.camera2.params.Face in project android_frameworks_base by DirtyUnicorns.
the class LegacyFaceDetectMapper method mapResultFaces.
/**
* Update the {@code result} camera metadata map with the new value for the
* {@code statistics.faces} and {@code statistics.faceDetectMode}.
*
* <p>Face detect callbacks are processed in the background, and each call to
* {@link #mapResultFaces} will have the latest faces as reflected by the camera1 callbacks.</p>
*
* <p>If the scene mode was set to {@code FACE_PRIORITY} but face detection is disabled,
* the camera will still run face detection in the background, but no faces will be reported
* in the capture result.</p>
*
* @param result a non-{@code null} result
* @param legacyRequest a non-{@code null} request (read-only)
*/
public void mapResultFaces(CameraMetadataNative result, LegacyRequest legacyRequest) {
checkNotNull(result, "result must not be null");
checkNotNull(legacyRequest, "legacyRequest must not be null");
Camera.Face[] faces, previousFaces;
int fdMode;
boolean fdScenePriority;
synchronized (mLock) {
fdMode = mFaceDetectReporting ? STATISTICS_FACE_DETECT_MODE_SIMPLE : STATISTICS_FACE_DETECT_MODE_OFF;
if (mFaceDetectReporting) {
faces = mFaces;
} else {
faces = null;
}
fdScenePriority = mFaceDetectScenePriority;
previousFaces = mFacesPrev;
mFacesPrev = faces;
}
CameraCharacteristics characteristics = legacyRequest.characteristics;
CaptureRequest request = legacyRequest.captureRequest;
Size previewSize = legacyRequest.previewSize;
Camera.Parameters params = legacyRequest.parameters;
Rect activeArray = characteristics.get(CameraCharacteristics.SENSOR_INFO_ACTIVE_ARRAY_SIZE);
ZoomData zoomData = ParameterUtils.convertScalerCropRegion(activeArray, request.get(CaptureRequest.SCALER_CROP_REGION), previewSize, params);
List<Face> convertedFaces = new ArrayList<>();
if (faces != null) {
for (Camera.Face face : faces) {
if (face != null) {
convertedFaces.add(ParameterUtils.convertFaceFromLegacy(face, activeArray, zoomData));
} else {
Log.w(TAG, "mapResultFaces - read NULL face from camera1 device");
}
}
}
if (DEBUG && previousFaces != faces) {
// Log only in verbose and IF the faces changed
Log.v(TAG, "mapResultFaces - changed to " + ListUtils.listToString(convertedFaces));
}
result.set(CaptureResult.STATISTICS_FACES, convertedFaces.toArray(new Face[0]));
result.set(CaptureResult.STATISTICS_FACE_DETECT_MODE, fdMode);
// Override scene mode with FACE_PRIORITY if the request was using FACE_PRIORITY
if (fdScenePriority) {
result.set(CaptureResult.CONTROL_SCENE_MODE, CONTROL_SCENE_MODE_FACE_PRIORITY);
}
}
use of android.hardware.camera2.params.Face in project android_frameworks_base by DirtyUnicorns.
the class CameraMetadataNative method getFaceRectangles.
// Face rectangles are defined as (left, top, right, bottom) instead of
// (left, top, width, height) at the native level, so the normal Rect
// conversion that does (l, t, w, h) -> (l, t, r, b) is unnecessary. Undo
// that conversion here for just the faces.
private Rect[] getFaceRectangles() {
Rect[] faceRectangles = getBase(CaptureResult.STATISTICS_FACE_RECTANGLES);
if (faceRectangles == null)
return null;
Rect[] fixedFaceRectangles = new Rect[faceRectangles.length];
for (int i = 0; i < faceRectangles.length; i++) {
fixedFaceRectangles[i] = new Rect(faceRectangles[i].left, faceRectangles[i].top, faceRectangles[i].right - faceRectangles[i].left, faceRectangles[i].bottom - faceRectangles[i].top);
}
return fixedFaceRectangles;
}
use of android.hardware.camera2.params.Face in project android_frameworks_base by DirtyUnicorns.
the class CameraMetadataNative method getFaces.
private Face[] getFaces() {
Integer faceDetectMode = get(CaptureResult.STATISTICS_FACE_DETECT_MODE);
byte[] faceScores = get(CaptureResult.STATISTICS_FACE_SCORES);
Rect[] faceRectangles = get(CaptureResult.STATISTICS_FACE_RECTANGLES);
int[] faceIds = get(CaptureResult.STATISTICS_FACE_IDS);
int[] faceLandmarks = get(CaptureResult.STATISTICS_FACE_LANDMARKS);
if (areValuesAllNull(faceDetectMode, faceScores, faceRectangles, faceIds, faceLandmarks)) {
return null;
}
if (faceDetectMode == null) {
Log.w(TAG, "Face detect mode metadata is null, assuming the mode is SIMPLE");
faceDetectMode = CaptureResult.STATISTICS_FACE_DETECT_MODE_SIMPLE;
} else {
if (faceDetectMode == CaptureResult.STATISTICS_FACE_DETECT_MODE_OFF) {
return new Face[0];
}
if (faceDetectMode != CaptureResult.STATISTICS_FACE_DETECT_MODE_SIMPLE && faceDetectMode != CaptureResult.STATISTICS_FACE_DETECT_MODE_FULL) {
Log.w(TAG, "Unknown face detect mode: " + faceDetectMode);
return new Face[0];
}
}
// Face scores and rectangles are required by SIMPLE and FULL mode.
if (faceScores == null || faceRectangles == null) {
Log.w(TAG, "Expect face scores and rectangles to be non-null");
return new Face[0];
} else if (faceScores.length != faceRectangles.length) {
Log.w(TAG, String.format("Face score size(%d) doesn match face rectangle size(%d)!", faceScores.length, faceRectangles.length));
}
// To be safe, make number of faces is the minimal of all face info metadata length.
int numFaces = Math.min(faceScores.length, faceRectangles.length);
// Face id and landmarks are only required by FULL mode.
if (faceDetectMode == CaptureResult.STATISTICS_FACE_DETECT_MODE_FULL) {
if (faceIds == null || faceLandmarks == null) {
Log.w(TAG, "Expect face ids and landmarks to be non-null for FULL mode," + "fallback to SIMPLE mode");
faceDetectMode = CaptureResult.STATISTICS_FACE_DETECT_MODE_SIMPLE;
} else {
if (faceIds.length != numFaces || faceLandmarks.length != numFaces * FACE_LANDMARK_SIZE) {
Log.w(TAG, String.format("Face id size(%d), or face landmark size(%d) don't" + "match face number(%d)!", faceIds.length, faceLandmarks.length * FACE_LANDMARK_SIZE, numFaces));
}
// To be safe, make number of faces is the minimal of all face info metadata length.
numFaces = Math.min(numFaces, faceIds.length);
numFaces = Math.min(numFaces, faceLandmarks.length / FACE_LANDMARK_SIZE);
}
}
ArrayList<Face> faceList = new ArrayList<Face>();
if (faceDetectMode == CaptureResult.STATISTICS_FACE_DETECT_MODE_SIMPLE) {
for (int i = 0; i < numFaces; i++) {
if (faceScores[i] <= Face.SCORE_MAX && faceScores[i] >= Face.SCORE_MIN) {
faceList.add(new Face(faceRectangles[i], faceScores[i]));
}
}
} else {
// CaptureResult.STATISTICS_FACE_DETECT_MODE_FULL
for (int i = 0; i < numFaces; i++) {
if (faceScores[i] <= Face.SCORE_MAX && faceScores[i] >= Face.SCORE_MIN && faceIds[i] >= 0) {
Point leftEye = new Point(faceLandmarks[i * FACE_LANDMARK_SIZE], faceLandmarks[i * FACE_LANDMARK_SIZE + 1]);
Point rightEye = new Point(faceLandmarks[i * FACE_LANDMARK_SIZE + 2], faceLandmarks[i * FACE_LANDMARK_SIZE + 3]);
Point mouth = new Point(faceLandmarks[i * FACE_LANDMARK_SIZE + 4], faceLandmarks[i * FACE_LANDMARK_SIZE + 5]);
Face face = new Face(faceRectangles[i], faceScores[i], faceIds[i], leftEye, rightEye, mouth);
faceList.add(face);
}
}
}
Face[] faces = new Face[faceList.size()];
faceList.toArray(faces);
return faces;
}
use of android.hardware.camera2.params.Face in project android_frameworks_base by DirtyUnicorns.
the class CameraMetadataNative method setFaceRectangles.
/**
* Convert Face Rectangles from managed side to native side as they have different definitions.
* <p>
* Managed side face rectangles are defined as: left, top, width, height.
* Native side face rectangles are defined as: left, top, right, bottom.
* The input face rectangle need to be converted to native side definition when set is called.
* </p>
*
* @param faceRects Input face rectangles.
* @return true if face rectangles can be set successfully. Otherwise, Let the caller
* (setBase) to handle it appropriately.
*/
private boolean setFaceRectangles(Rect[] faceRects) {
if (faceRects == null) {
return false;
}
Rect[] newFaceRects = new Rect[faceRects.length];
for (int i = 0; i < newFaceRects.length; i++) {
newFaceRects[i] = new Rect(faceRects[i].left, faceRects[i].top, faceRects[i].right + faceRects[i].left, faceRects[i].bottom + faceRects[i].top);
}
setBase(CaptureResult.STATISTICS_FACE_RECTANGLES, newFaceRects);
return true;
}
Aggregations