use of com.google.android.gms.vision.face.FaceDetector in project android-vision by googlesamples.
the class GooglyEyesActivity method createFaceDetector.
//==============================================================================================
// Detector
//==============================================================================================
/**
* Creates the face detector and associated processing pipeline to support either front facing
* mode or rear facing mode. Checks if the detector is ready to use, and displays a low storage
* warning if it was not possible to download the face library.
*/
@NonNull
private FaceDetector createFaceDetector(Context context) {
// For both front facing and rear facing modes, the detector is initialized to do landmark
// detection (to find the eyes), classification (to determine if the eyes are open), and
// tracking.
//
// Use of "fast mode" enables faster detection for frontward faces, at the expense of not
// attempting to detect faces at more varied angles (e.g., faces in profile). Therefore,
// faces that are turned too far won't be detected under fast mode.
//
// For front facing mode only, the detector will use the "prominent face only" setting,
// which is optimized for tracking a single relatively large face. This setting allows the
// detector to take some shortcuts to make tracking faster, at the expense of not being able
// to track multiple faces.
//
// Setting the minimum face size not only controls how large faces must be in order to be
// detected, it also affects performance. Since it takes longer to scan for smaller faces,
// we increase the minimum face size for the rear facing mode a little bit in order to make
// tracking faster (at the expense of missing smaller faces). But this optimization is less
// important for the front facing case, because when "prominent face only" is enabled, the
// detector stops scanning for faces after it has found the first (large) face.
FaceDetector detector = new FaceDetector.Builder(context).setLandmarkType(FaceDetector.ALL_LANDMARKS).setClassificationType(FaceDetector.ALL_CLASSIFICATIONS).setTrackingEnabled(true).setMode(FaceDetector.FAST_MODE).setProminentFaceOnly(mIsFrontFacing).setMinFaceSize(mIsFrontFacing ? 0.35f : 0.15f).build();
Detector.Processor<Face> processor;
if (mIsFrontFacing) {
// For front facing mode, a single tracker instance is used with an associated focusing
// processor. This configuration allows the face detector to take some shortcuts to
// speed up detection, in that it can quit after finding a single face and can assume
// that the nextIrisPosition face position is usually relatively close to the last seen
// face position.
Tracker<Face> tracker = new GooglyFaceTracker(mGraphicOverlay);
processor = new LargestFaceFocusingProcessor.Builder(detector, tracker).build();
} else {
// For rear facing mode, a factory is used to create per-face tracker instances. A
// tracker is created for each face and is maintained as long as the same face is
// visible, enabling per-face state to be maintained over time. This is used to store
// the iris position and velocity for each face independently, simulating the motion of
// the eyes of any number of faces over time.
//
// Both the front facing mode and the rear facing mode use the same tracker
// implementation, avoiding the need for any additional code. The only difference
// between these cases is the choice of Processor: one that is specialized for tracking
// a single face or one that can handle multiple faces. Here, we use MultiProcessor,
// which is a standard component of the mobile vision API for managing multiple items.
MultiProcessor.Factory<Face> factory = new MultiProcessor.Factory<Face>() {
@Override
public Tracker<Face> create(Face face) {
return new GooglyFaceTracker(mGraphicOverlay);
}
};
processor = new MultiProcessor.Builder<>(factory).build();
}
detector.setProcessor(processor);
if (!detector.isOperational()) {
// Note: The first time that an app using face API is installed on a device, GMS will
// download a native library to the device in order to do detection. Usually this
// completes before the app is run for the first time. But if that download has not yet
// completed, then the above call will not detect any faces.
//
// isOperational() can be used to check if the required native library is currently
// available. The detector will automatically become operational once the library
// download completes on device.
Log.w(TAG, "Face detector dependencies are not yet available.");
// Check for low storage. If there is low storage, the native library will not be
// downloaded, so detection will not become operational.
IntentFilter lowStorageFilter = new IntentFilter(Intent.ACTION_DEVICE_STORAGE_LOW);
boolean hasLowStorage = registerReceiver(null, lowStorageFilter) != null;
if (hasLowStorage) {
Toast.makeText(this, R.string.low_storage_error, Toast.LENGTH_LONG).show();
Log.w(TAG, getString(R.string.low_storage_error));
}
}
return detector;
}
use of com.google.android.gms.vision.face.FaceDetector in project HomeMirror by HannahMitt.
the class MoodModule method createCameraSource.
/**
* Creates and starts the camera. Note that this uses a higher resolution in comparison
* to other detection examples to enable the barcode detector to detect small barcodes
* at long distances.
*/
private void createCameraSource() {
Context context = mContextWeakReference.get();
FaceDetector detector = new FaceDetector.Builder(context).setClassificationType(FaceDetector.ALL_CLASSIFICATIONS).build();
detector.setProcessor(new Detector.Processor<Face>() {
@Override
public void release() {
}
@Override
public void receiveDetections(final Detector.Detections<Face> detections) {
final SparseArray<Face> detectedItems = detections.getDetectedItems();
if (detectedItems.size() != 0) {
final int key = detectedItems.keyAt(0);
final Face face = detectedItems.get(key);
final float isSmilingProbability = face.getIsSmilingProbability();
String feedback = getFeedbackForSmileProbability(isSmilingProbability);
mCallBacks.onShouldGivePositiveAffirmation(feedback);
}
}
});
if (!detector.isOperational()) {
// Note: The first time that an app using face API is installed on a device, GMS will
// download a native library to the device in order to do detection. Usually this
// completes before the app is run for the first time. But if that download has not yet
// completed, then the above call will not detect any faces.
//
// isOperational() can be used to check if the required native library is currently
// available. The detector will automatically become operational once the library
// download completes on device.
Log.w(TAG, "Face detector dependencies are not yet available.");
}
try {
mCameraSource = new CameraSource.Builder(context, detector).setRequestedPreviewSize(640, 480).setFacing(CameraSource.CAMERA_FACING_FRONT).setRequestedFps(30.0f).build();
mCameraSource.start();
} catch (IOException | RuntimeException e) {
Log.e(TAG, "Something went horribly wrong, with your face.", e);
}
}
use of com.google.android.gms.vision.face.FaceDetector in project android-vision by googlesamples.
the class FaceTrackerActivity method createCameraSource.
/**
* Creates and starts the camera. Note that this uses a higher resolution in comparison
* to other detection examples to enable the barcode detector to detect small barcodes
* at long distances.
*/
private void createCameraSource() {
Context context = getApplicationContext();
FaceDetector detector = new FaceDetector.Builder(context).setClassificationType(FaceDetector.ALL_CLASSIFICATIONS).build();
detector.setProcessor(new MultiProcessor.Builder<>(new GraphicFaceTrackerFactory()).build());
if (!detector.isOperational()) {
// Note: The first time that an app using face API is installed on a device, GMS will
// download a native library to the device in order to do detection. Usually this
// completes before the app is run for the first time. But if that download has not yet
// completed, then the above call will not detect any faces.
//
// isOperational() can be used to check if the required native library is currently
// available. The detector will automatically become operational once the library
// download completes on device.
Log.w(TAG, "Face detector dependencies are not yet available.");
}
mCameraSource = new CameraSource.Builder(context, detector).setRequestedPreviewSize(640, 480).setFacing(CameraSource.CAMERA_FACING_BACK).setRequestedFps(30.0f).build();
}
use of com.google.android.gms.vision.face.FaceDetector in project android-vision by googlesamples.
the class MultiTrackerActivity method createCameraSource.
/**
* Creates and starts the camera. Note that this uses a higher resolution in comparison
* to other detection examples to enable the barcode detector to detect small barcodes
* at long distances.
*/
private void createCameraSource() {
Context context = getApplicationContext();
// A face detector is created to track faces. An associated multi-processor instance
// is set to receive the face detection results, track the faces, and maintain graphics for
// each face on screen. The factory is used by the multi-processor to create a separate
// tracker instance for each face.
FaceDetector faceDetector = new FaceDetector.Builder(context).build();
FaceTrackerFactory faceFactory = new FaceTrackerFactory(mGraphicOverlay);
faceDetector.setProcessor(new MultiProcessor.Builder<>(faceFactory).build());
// A barcode detector is created to track barcodes. An associated multi-processor instance
// is set to receive the barcode detection results, track the barcodes, and maintain
// graphics for each barcode on screen. The factory is used by the multi-processor to
// create a separate tracker instance for each barcode.
BarcodeDetector barcodeDetector = new BarcodeDetector.Builder(context).build();
BarcodeTrackerFactory barcodeFactory = new BarcodeTrackerFactory(mGraphicOverlay);
barcodeDetector.setProcessor(new MultiProcessor.Builder<>(barcodeFactory).build());
// A multi-detector groups the two detectors together as one detector. All images received
// by this detector from the camera will be sent to each of the underlying detectors, which
// will each do face and barcode detection, respectively. The detection results from each
// are then sent to associated tracker instances which maintain per-item graphics on the
// screen.
MultiDetector multiDetector = new MultiDetector.Builder().add(faceDetector).add(barcodeDetector).build();
if (!multiDetector.isOperational()) {
// Note: The first time that an app using the barcode or face API is installed on a
// device, GMS will download a native libraries to the device in order to do detection.
// Usually this completes before the app is run for the first time. But if that
// download has not yet completed, then the above call will not detect any barcodes
// and/or faces.
//
// isOperational() can be used to check if the required native libraries are currently
// available. The detectors will automatically become operational once the library
// downloads complete on device.
Log.w(TAG, "Detector dependencies are not yet available.");
// Check for low storage. If there is low storage, the native library will not be
// downloaded, so detection will not become operational.
IntentFilter lowstorageFilter = new IntentFilter(Intent.ACTION_DEVICE_STORAGE_LOW);
boolean hasLowStorage = registerReceiver(null, lowstorageFilter) != null;
if (hasLowStorage) {
Toast.makeText(this, R.string.low_storage_error, Toast.LENGTH_LONG).show();
Log.w(TAG, getString(R.string.low_storage_error));
}
}
// Creates and starts the camera. Note that this uses a higher resolution in comparison
// to other detection examples to enable the barcode detector to detect small barcodes
// at long distances.
mCameraSource = new CameraSource.Builder(getApplicationContext(), multiDetector).setFacing(CameraSource.CAMERA_FACING_BACK).setRequestedPreviewSize(1600, 1024).setRequestedFps(15.0f).build();
}
use of com.google.android.gms.vision.face.FaceDetector in project android-vision by googlesamples.
the class GooglyEyesActivity method createCameraSource.
//==============================================================================================
// Camera Source
//==============================================================================================
/**
* Creates the face detector and the camera.
*/
private void createCameraSource() {
Context context = getApplicationContext();
FaceDetector detector = createFaceDetector(context);
int facing = CameraSource.CAMERA_FACING_FRONT;
if (!mIsFrontFacing) {
facing = CameraSource.CAMERA_FACING_BACK;
}
// The camera source is initialized to use either the front or rear facing camera. We use a
// relatively low resolution for the camera preview, since this is sufficient for this app
// and the face detector will run faster at lower camera resolutions.
//
// However, note that there is a speed/accuracy trade-off with respect to choosing the
// camera resolution. The face detector will run faster with lower camera resolutions,
// but may miss smaller faces, landmarks, or may not correctly detect eyes open/closed in
// comparison to using higher camera resolutions. If you have any of these issues, you may
// want to increase the resolution.
mCameraSource = new CameraSource.Builder(context, detector).setFacing(facing).setRequestedPreviewSize(320, 240).setRequestedFps(60.0f).setAutoFocusEnabled(true).build();
}
Aggregations