Camera
Android Camera 允许你能抓取一张图片或者视频,所有一般使用Camera 类去获取视频源。
再说Camera的如何获取视频源之前,先简单过下Camera在Android的是如何使用的。
Camera的要求
Camera
权限
<uses-permission android:name="android.permission.CAMERA" />
features
<uses-feature android:name="android.hardware.camera" />
storage
<uses-permission android:name="android.permission.WRITE_EXTERNAL_STORAGE"/>
<uses-permission android:name="android.permission.READ_EXTERNAL_STORAGE"/>
audio
<uses-permission android:name="android.permission.RECORD_AUDIO"/>
location
<uses-permission android:name="android.permission.ACCESS_FINE_LOCATION"/>
如何使用Camera
跳转系统的拍照
/**
* 通过intent跳转照相
* extra:
* MediaStore.EXTRA_OUTPUT : file local path
*/
private void goToTakePicture() {
Intent intent = new Intent(MediaStore.ACTION_IMAGE_CAPTURE);
Uri localPhotoUri = Uri.fromFile(new File(this.getExternalCacheDir(), LOCAL_TAKE_PHOTO_FILE_NAME));
intent.putExtra(MediaStore.EXTRA_OUTPUT, localPhotoUri);
startActivityForResult(intent, TAKE_PHOTO_REQUEST_CODE);
}
处理回调结果
@Override
protected void onActivityResult(int requestCode, int resultCode, Intent data) {
if (resultCode == RESULT_OK) {
switch (requestCode) {
case TAKE_PHOTO_REQUEST_CODE:
handlePhotoResult(data);
break;
}
}
super.onActivityResult(requestCode, resultCode, data);
}
跳转系统的录制视频
/**
* 通过intent跳转录制视频
* extra:
* MediaStore.EXTRA_VIDEO_QUALITY : video quality 0->low, 1->high
* MediaStore.EXTRA_OUTPUT : video local path
* MediaStore.EXTRA_DURATION_LIMIT:video time duration
* MediaStore.EXTRA_SIZE_LIMIT: video size
*/
private void goToRecordVideo() {
Intent intent = new Intent(MediaStore.ACTION_VIDEO_CAPTURE);
Uri localVideoUri = Uri.fromFile(new File(this.getExternalCacheDir(), LOCAL_RECORD_VIDEO_FILE_NAME));
intent.putExtra(MediaStore.EXTRA_OUTPUT, localVideoUri);
intent.putExtra(MediaStore.EXTRA_VIDEO_QUALITY, 1);
intent.putExtra(MediaStore.EXTRA_DURATION_LIMIT, 8000);
intent.putExtra(MediaStore.EXTRA_SIZE_LIMIT, 20 * 1024 * 1024);
startActivityForResult(intent, RECORD_VIDEO_REQUEST_CODE);
}
回调方法
@Override
protected void onActivityResult(int requestCode, int resultCode, Intent data) {
if (resultCode == RESULT_OK) {
switch (requestCode) {
case RECORD_VIDEO_REQUEST_CODE:
handleVideoResult(data);
break;
}
}
super.onActivityResult(requestCode, resultCode, data);
}
创建Camera App
步骤
1. 检测Camera
2. 获取Camera的对象
3. 创建预览的视图
4. 设置抓取的监听
5. 进行抓取
6. 保存文件
7. 释放Camera
step 1 : 监测Camera
/**
* 检查有没有Camera硬件
*
* @param context context
* @return true have camera hardware ,otherwise
*/
public static boolean hasCameraHardware(Context context) {
return context.getPackageManager().hasSystemFeature(PackageManager.FEATURE_CAMERA);
}
step 2 : 获取Camera实例
/**
* 返回Camera
*
* @return Creates a new Camera object to access the first back-facing camera on the device.
* If the device does not have a back-facing camera, this returns null.
*/
public static Camera getCameraInstance() {
int numberOfCameras = Camera.getNumberOfCameras();
Log.e(Constants.TAG, "numberOfCameras:" + numberOfCameras);
return Camera.open();
}
step 3 : 创建Camera预览视图
/**
* Created by Rrtoyewx on 16/9/1.
* CameraPreviewLayout
*/
public class CameraPreviewLayout extends SurfaceView {
private SurfaceHolder mSurfaceHolder;
private Camera mCamera;
private Callback mSurfaceCallBack = new Callback() {
@Override
public void surfaceCreated(SurfaceHolder holder) {
try {
mCamera = CameraUtil.getCameraInstance();
//设置预览的Display()
mCamera.setDisplayOrientation(90);
mCamera.setPreviewDisplay(holder);
//开启预览
mCamera.startPreview();
} catch (IOException e) {
e.printStackTrace();
}
}
@Override
public void surfaceChanged(SurfaceHolder holder, int format, int width, int height) {
if (holder.getSurface() == null) {
return;
}
mCamera.stopPreview();
try {
mCamera.setPreviewDisplay(holder);
mCamera.startPreview();
} catch (IOException e) {
e.printStackTrace();
}
}
@Override
public void surfaceDestroyed(SurfaceHolder holder) {
if (mCamera != null) {
mCamera.stopPreview();
mCamera.release();
mCamera = null;
}
Log.e(Constants.TAG, "surfaceDestroyed");
}
};
public CameraPreviewLayout(Context context) {
super(context);
init();
}
private void init() {
mSurfaceHolder = getHolder();
mSurfaceHolder.addCallback(mSurfaceCallBack);
mSurfaceHolder.setType(SURFACE_TYPE_PUSH_BUFFERS);
}
public Camera getCamera(){
return mCamera;
}
}
MainActivity
private CameraPreviewLayout mCameraPreviewLayout;
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_main);
Toolbar toolbar = (Toolbar) findViewById(R.id.toolbar);
setSupportActionBar(toolbar);
mPreviewContainer = (FrameLayout) findViewById(R.id.camera_preview);
mCameraPreviewLayout = new CameraPreviewLayout(this);
mPreviewContainer.addView(mCameraPreviewLayout);
}
效果图
step 4 : 设置抓取图片的监听
void takePicture (Camera.ShutterCallback shutter,
Camera.PictureCallback raw,
Camera.PictureCallback postview,
Camera.PictureCallback jpeg)
ShutterCallBack:当抓取到图片的时候回调,可以设置声音来让用户知道图片已经被抓取到了。
raw:当获取到raw图片回调
postview:当抓取到postview图片的回调,有的硬件不一定支持
jpeg:当获取到jpeg图片的回调。
step 5 : 抓取文件
1. 抓取图片
以获取jpeg的图片为准,默认是为jepg的格式我们可以从mCamera.getParameters().getSupportedPictureFormats()回去硬件支持的pictrue格式。通过setPictureFormat(),设置picture format。
public void takePicture() {
mCamera.takePicture(null, null, null, mPictureCallback);
}
同时在mPictureCallback的回调方法,取保存图片
private Camera.PictureCallback mPictureCallback = new Camera.PictureCallback() {
@Override
public void onPictureTaken(byte[] data, Camera camera) {
savePhoto(data);
}
};
private void savePhoto(byte[] data) {
File localPhoto = new File(mContext.getExternalCacheDir(), System.currentTimeMillis() + (count++) + ".jpg");
BufferedOutputStream bos = null;
try {
bos = new BufferedOutputStream(new FileOutputStream(localPhoto));
bos.write(data);
bos.flush();
} catch (FileNotFoundException e) {
e.printStackTrace();
} catch (IOException e) {
e.printStackTrace();
} finally {
if (bos != null) {
try {
bos.close();
} catch (IOException e) {
e.printStackTrace();
}
}
}
}
2.抓取视频
抓取视频这里以MediaRecorder来说明。关于MediaRecorder的说明在视频的编解码还会有说明。
抓取视频和抓取图片存在部分差异,这里为了将抓取视频说的明白点,这里将预览视频的layout的分隔出来以便于分析。
抓取的视频的步骤如下
1. unlock camera:以便于MediaRecorder能够使用Camera;
2. setCamera:设置Camera
3. setXxxSource:设置音视频的来源
4. setProfile:设置配置
5. setOutPutFile:设置输出文件
6. perpare:准备
7. start:开启录制
8. stop:停止录制
9. reset:重置录制
10. release:释放
public class RecordVideoLayout extends SurfaceView {
private SurfaceHolder surfaceHolder;
private MediaRecorder mediaRecorder;
private Context context;
private Camera camera;
private SurfaceHolder.Callback callback = new SurfaceHolder.Callback() {
@Override
public void surfaceCreated(SurfaceHolder holder) {
camera = CameraUtil.getCameraInstance();
camera.setDisplayOrientation(90);
}
@Override
public void surfaceChanged(SurfaceHolder holder, int format, int width, int height) {
}
@Override
public void surfaceDestroyed(SurfaceHolder holder) {
releaseMediaRecorder();
releaseCamera();
}
};
public RecordVideoLayout(Context context) {
super(context);
this.context = context;
init();
}
private void init() {
surfaceHolder = getHolder();
surfaceHolder.addCallback(callback);
}
/**
* 开始录制之前的准备工作
*/
public void prepareVideoRecord() {
mediaRecorder = new MediaRecorder();
//camera.unlock allow MediaRecorder can use camera
camera.unlock();
//set camera
mediaRecorder.setCamera(camera);
//set source
mediaRecorder.setAudioSource(MediaRecorder.AudioSource.CAMCORDER);
mediaRecorder.setVideoSource(MediaRecorder.VideoSource.CAMERA);
//set profile
mediaRecorder.setProfile(CamcorderProfile.get(CamcorderProfile.QUALITY_HIGH));
//set output
mediaRecorder.setOutputFile(context.getExternalCacheDir() + "/record_video");
//set preview display
mediaRecorder.setPreviewDisplay(surfaceHolder.getSurface());
try {
mediaRecorder.prepare();
} catch (IOException e) {
e.printStackTrace();
}
}
/**
* 停止录制
*/
public void stopVideoRecord() {
mediaRecorder.stop();
releaseMediaRecorder();
}
/**
* 开始视频录制
*/
public void startVideoRecord() {
prepareVideoRecord();
mediaRecorder.start();
}
/**
* 释放Camera
*/
public void releaseCamera() {
if (camera != null) {
camera.release();
camera = null;
}
}
/**
* 释放MediaRecorder
*/
public void releaseMediaRecorder() {
if (mediaRecorder != null) {
mediaRecorder.reset();
mediaRecorder.release();
camera.lock();
mediaRecorder = null;
}
}
}
release和存储
Camera释放:
/**
* 释放Camera
*/
public void releaseCamera() {
if (camera != null) {
camera.release();
camera = null;
}
}
MediaRecorder
/**
* 释放MediaRecorder
*/
public void releaseMediaRecorder() {
if (mediaRecorder != null) {
mediaRecorder.reset();
mediaRecorder.release();
camera.lock();
mediaRecorder = null;
}
}
我们应该onStop()去释放这些资源,以便于其他的应该使用的Camera这些硬件
存储:
官方推荐:
//1. 卸载不会随着应用卸载而删除。其他应用可以直接访问到这里的资源
Environment.getExternalStoragePublicDirectory(Environment.DIRECTORY_MOVIES)
//2.卸载应用会随着应用卸载而卸载,其他应用不能直接访问这些的资源
Context.getExternalFilesDir(Environment.DIRECTORY_PICTURES)
Camera的一些进阶使用
聚光和聚焦
大部分手机点击屏幕都会在此点重新聚焦一次,或者当屏幕侦测人脸的时候,都会在人脸的区域重新聚焦一次。大部分都是通过以下方式实现的。
下面以点击屏幕后重新聚焦的情景来说
1. 检查是否支持聚光和聚焦的能
2. 增加聚光或者聚焦的区域
3. 设置setParameters()
4. autoFocus()监听聚光和聚焦的结束的回调
@Override
public boolean onTouchEvent(MotionEvent event) {
switch (event.getAction()) {
case MotionEvent.ACTION_DOWN:
Rect rect = calculateFocusArea((int) event.getX(), (int) event.getY());
Camera.Parameters parameters = mCamera.getParameters();
//检查是否支持测光区域
if (parameters.getMaxNumMeteringAreas() > 0) {
List<Camera.Area> meteringAreas = new ArrayList<Camera.Area>();
//增加聚光区域
meteringAreas.add(new Camera.Area(rect, 600));
//设置参数 parameters.setMeteringAreas(meteringAreas);
}
//检查是否支持聚焦区域
if (parameters.getMaxNumFocusAreas() > 0) {
List<Camera.Area> focusAreas = new ArrayList<Camera.Area>();
//增加聚焦区域
focusAreas.add(new Camera.Area(rect, 600));
//设置参数
parameters.setFocusAreas(focusAreas);
}
mCamera.autoFocus(new Camera.AutoFocusCallback() {
@Override
public void onAutoFocus(boolean success, Camera camera) {
Log.e(Constants.TAG, "onTouchEvent success:" + success);
}
});
return true;
}
return super.onTouchEvent(event);
}
private Rect calculateFocusArea(int x, int y) {
return new Rect(x - 50, y - 50, x + 50, y + 50);
}
人脸识别
预览界面出现人的时候。监测人脸是一个重要的特征
1. 检查是否支持人脸识别的功能
2. 创建一个人脸识别的监听
3. 开始人脸识别
//创建一个人脸识别的监听
private Camera.FaceDetectionListener faceDetectionListener = new Camera.FaceDetectionListener() {
@Override
public void onFaceDetection(Camera.Face[] faces, Camera camera) {
for (int i = 0; i < faces.length; i++) {
Camera.Face face = faces[i];
Log.e(Constants.TAG, "id" + face.id + "score" + face.score + "rect" + face.rect
+ "leftEye" + face.leftEye + "mouth" + face.mouth + "rightEye" + face.rightEye);
}
}
};
private Callback mSurfaceCallBack = new Callback() {
@Override
public void surfaceCreated(SurfaceHolder holder) {
try {
mCamera = CameraUtil.getCameraInstance();
Log.e(Constants.TAG, "getSupportedPictureFormats" + mCamera.getParameters().getSupportedPictureFormats().toString());
Log.e(Constants.TAG, "getSupportedFocusModes:" + mCamera.getParameters().getSupportedFocusModes().toString());
Log.e(Constants.TAG, "getSupportedFlashModes:" + mCamera.getParameters().getSupportedFlashModes().toString());
Camera.Parameters parameters = mCamera.getParameters();
parameters.setFocusMode(Camera.Parameters.FOCUS_MODE_AUTO);
parameters.setFlashMode(Camera.Parameters.FLASH_MODE_ON);
mCamera.setParameters(parameters);
//设置人脸识别的监听
mCamera.setFaceDetectionListener(faceDetectionListener);
// mCamera.setDisplayOrientation(90);
mCamera.setPreviewDisplay(holder);
mCamera.startPreview();
//开始人脸识别
mCamera.startFaceDetection();
} catch (IOException e) {
e.printStackTrace();
}
}
@Override
public void surfaceChanged(SurfaceHolder holder, int format, int width, int height) {
if (holder.getSurface() == null) {
return;
}
mCamera.stopPreview();
try {
mCamera.setPreviewDisplay(holder);
mCamera.startPreview();
//开始人脸识别
mCamera.startFaceDetection();
} catch (IOException e) {
e.printStackTrace();
}
}
@Override
public void surfaceDestroyed(SurfaceHolder holder) {
if (mCamera != null) {
mCamera.stopPreview();
mCamera.release();
mCamera = null;
}
Log.e(Constants.TAG, "surfaceDestroyed");
}
};