use of com.google.android.gms.vision.Frame in project PairingExample by AinaWireless.
the class MainActivity method onActivityResult.
@Override
protected void onActivityResult(int requestCode, int resultCode, Intent data) {
if (requestCode == PHOTO_REQUEST && resultCode == RESULT_OK) {
mButton_scan.setEnabled(false);
launchMediaScanIntent();
try {
Bitmap bitmap = decodeBitmapUri(this, imageUri);
if (detector.isOperational() && bitmap != null) {
Frame frame = new Frame.Builder().setBitmap(bitmap).build();
SparseArray<Barcode> QRcodes = detector.detect(frame);
for (int index = 0; index < QRcodes.size(); index++) {
int tmp;
mac_cnt = 0;
scanHeader.setTypeface(null, Typeface.BOLD);
scanHeader.setText("QR-Code content:");
Barcode code = QRcodes.valueAt(index);
scanResults.setText(code.displayValue);
tmp = code.displayValue.indexOf("/");
String MAC = code.displayValue.substring(0, tmp);
MACs[mac_cnt] = MAC.substring(0, 2) + ":";
MACs[mac_cnt] += MAC.substring(2, 4) + ":";
MACs[mac_cnt] += MAC.substring(4, 6) + ":";
MACs[mac_cnt] += MAC.substring(6, 8) + ":";
MACs[mac_cnt] += MAC.substring(8, 10) + ":";
MACs[mac_cnt] += MAC.substring(10, 12);
mac_cnt++;
tmp = code.displayValue.indexOf("/") + 1;
MAC = code.displayValue.substring(tmp, code.displayValue.length());
MACs[mac_cnt] = MAC.substring(0, 2) + ":";
MACs[mac_cnt] += MAC.substring(2, 4) + ":";
MACs[mac_cnt] += MAC.substring(4, 6) + ":";
MACs[mac_cnt] += MAC.substring(6, 8) + ":";
MACs[mac_cnt] += MAC.substring(8, 10) + ":";
MACs[mac_cnt] += MAC.substring(10, 12);
if (!MACs[1].equals(""))
mText_classicMac.setText("BTC MAC address = " + MACs[1]);
if (!MACs[0].equals(""))
mText_bleMac.setText("BLE MAC address = " + MACs[0]);
tryBTC = true;
TextUpdateHandler.post(updateRunnable);
Runnable r = new ConnectThread(MACs[1]);
new Thread(r).start();
}
if (QRcodes.size() == 0) {
mButton_scan.setEnabled(true);
scanHeader.setText("Scan Failed: Did not found valid QR-Code");
}
} else {
mButton_scan.setEnabled(true);
scanHeader.setText("Could not set up the QR-scanner!");
}
} catch (Exception e) {
Toast.makeText(this, "Failed to load QR-code file", Toast.LENGTH_SHORT).show();
}
File photo = new File(Environment.getExternalStorageDirectory(), "picture.jpg");
if (!photo.delete()) {
Toast.makeText(this, "Failed to delete qr-code picture file", Toast.LENGTH_SHORT).show();
}
}
}
use of com.google.android.gms.vision.Frame in project MiscellaneousStudy by mikoto2000.
the class MainActivity method onCreate.
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
mCameraView = new SurfaceView(getApplicationContext());
// 何もしない Detector を作成
Detector<Void> detector = new Detector<Void>() {
@Override
public SparseArray<Void> detect(Frame frame) {
return null;
}
};
// 何もしない Processor を作って detector にセット
detector.setProcessor(new Detector.Processor<Void>() {
@Override
public void release() {
}
@Override
public void receiveDetections(Detector.Detections<Void> detections) {
}
});
// カメラソース作成
mCameraSource = new CameraSource.Builder(this, detector).setRequestedPreviewSize(640, 480).setRequestedFps(20.0f).setAutoFocusEnabled(true).setFacing(CameraSource.CAMERA_FACING_BACK).build();
// SurfaceView に SurfaceHolderCallback を設定。
mCameraView.getHolder().addCallback(new SurfaceHolder.Callback() {
@Override
public void surfaceCreated(SurfaceHolder holder) {
try {
if (ActivityCompat.checkSelfPermission(getApplicationContext(), Manifest.permission.CAMERA) != PackageManager.PERMISSION_GRANTED) {
return;
}
mCameraSource.start(mCameraView.getHolder());
} catch (IOException ie) {
Log.e("CAMERA SOURCE", ie.getMessage());
}
}
@Override
public void surfaceChanged(SurfaceHolder holder, int format, int width, int height) {
}
@Override
public void surfaceDestroyed(SurfaceHolder holder) {
mCameraSource.stop();
}
});
setContentView(mCameraView);
}
use of com.google.android.gms.vision.Frame in project android-vision by googlesamples.
the class SafeFaceDetector method padFrameRight.
/**
* Creates a new frame based on the original frame, with additional width on the right to
* increase the size to avoid the bug in the underlying face detector.
*/
private Frame padFrameRight(Frame originalFrame, int newWidth) {
Frame.Metadata metadata = originalFrame.getMetadata();
int width = metadata.getWidth();
int height = metadata.getHeight();
Log.i(TAG, "Padded image from: " + width + "x" + height + " to " + newWidth + "x" + height);
ByteBuffer origBuffer = originalFrame.getGrayscaleImageData();
int origOffset = origBuffer.arrayOffset();
byte[] origBytes = origBuffer.array();
// This can be changed to just .allocate in the future, when Frame supports non-direct
// byte buffers.
ByteBuffer paddedBuffer = ByteBuffer.allocateDirect(newWidth * height);
int paddedOffset = paddedBuffer.arrayOffset();
byte[] paddedBytes = paddedBuffer.array();
Arrays.fill(paddedBytes, (byte) 0);
for (int y = 0; y < height; ++y) {
int origStride = origOffset + y * width;
int paddedStride = paddedOffset + y * newWidth;
System.arraycopy(origBytes, origStride, paddedBytes, paddedStride, width);
}
return new Frame.Builder().setImageData(paddedBuffer, newWidth, height, ImageFormat.NV21).setId(metadata.getId()).setRotation(metadata.getRotation()).setTimestampMillis(metadata.getTimestampMillis()).build();
}
use of com.google.android.gms.vision.Frame in project android-vision by googlesamples.
the class SafeFaceDetector method padFrameBottom.
/**
* Creates a new frame based on the original frame, with additional height on the bottom to
* increase the size to avoid the bug in the underlying face detector.
*/
private Frame padFrameBottom(Frame originalFrame, int newHeight) {
Frame.Metadata metadata = originalFrame.getMetadata();
int width = metadata.getWidth();
int height = metadata.getHeight();
Log.i(TAG, "Padded image from: " + width + "x" + height + " to " + width + "x" + newHeight);
ByteBuffer origBuffer = originalFrame.getGrayscaleImageData();
int origOffset = origBuffer.arrayOffset();
byte[] origBytes = origBuffer.array();
// This can be changed to just .allocate in the future, when Frame supports non-direct
// byte buffers.
ByteBuffer paddedBuffer = ByteBuffer.allocateDirect(width * newHeight);
int paddedOffset = paddedBuffer.arrayOffset();
byte[] paddedBytes = paddedBuffer.array();
Arrays.fill(paddedBytes, (byte) 0);
// part.
for (int y = 0; y < height; ++y) {
int origStride = origOffset + y * width;
int paddedStride = paddedOffset + y * width;
System.arraycopy(origBytes, origStride, paddedBytes, paddedStride, width);
}
return new Frame.Builder().setImageData(paddedBuffer, width, newHeight, ImageFormat.NV21).setId(metadata.getId()).setRotation(metadata.getRotation()).setTimestampMillis(metadata.getTimestampMillis()).build();
}
use of com.google.android.gms.vision.Frame in project CameraKit-Android by flurgle.
the class FrameProcessingRunnable method run.
/**
* As long as the processing thread is active, this executes detection on frames
* continuously. The next pending frame is either immediately available or hasn't been
* received yet. Once it is available, we transfer the frame info to local variables and
* run detection on that frame. It immediately loops back for the next frame without
* pausing.
* <p/>
* If detection takes longer than the time in between new frames from the camera, this will
* mean that this loop will run without ever waiting on a frame, avoiding any context
* switching or frame acquisition time latency.
* <p/>
* If you find that this is using more CPU than you'd like, you should probably decrease the
* FPS setting above to allow for some idle time in between frames.
*/
@Override
public void run() {
Frame outputFrame;
java.nio.ByteBuffer data;
while (true) {
synchronized (mLock) {
while (mActive && (mPendingFrameData == null)) {
try {
// Wait for the next frame to be received from the camera, since we
// don't have it yet.
mLock.wait();
} catch (InterruptedException e) {
return;
}
}
if (!mActive) {
// loop.
return;
}
if (mPreviewSize == null) {
// wait for this to be set
Log.d("WHAT", "waitin for preview size to not be null");
continue;
}
outputFrame = new Frame.Builder().setImageData(mPendingFrameData, mPreviewSize.getWidth(), mPreviewSize.getHeight(), android.graphics.ImageFormat.NV21).setId(mPendingFrameId).setTimestampMillis(mPendingTimeMillis).setRotation(0).build();
// Hold onto the frame data locally, so that we can use this for detection
// below. We need to clear mPendingFrameData to ensure that this buffer isn't
// recycled back to the camera before we are done using that data.
data = mPendingFrameData;
mPendingFrameData = null;
}
try {
mDetector.receiveFrame(outputFrame);
} catch (Throwable t) {
Log.e(TAG, "Exception thrown from receiver.", t);
} finally {
mCamera.addCallbackBuffer(data.array());
}
}
}
Aggregations