Camera(相机)
图像采集流程
< uses-permission android: name= " android.permission.CAMERA" />
2、构建预览画布 -TextureView/SurfaceView
LinearLayout linearLayout = findViewById ( R . id. ll_texture_view) ;
TextureView textureView = new TextureView ( this ) ;
LinearLayout. LayoutParams layoutParams = new LinearLayout. LayoutParams ( LinearLayout. LayoutParams . MATCH_PARENT , LinearLayout. LayoutParams . MATCH_PARENT ) ;
textureView. setLayoutParams ( layoutParams) ;
linearLayout. addView ( textureView) ;
Camera . open ( ) ;
4.设置摄像机参数 -Parameters 通过 Camera.getParameters()获取Parameters
参数 作用 setPreviewFormat / setPictureFormat 设置预览数据格式(默认是NV21) setPreviewSize / setPictureSize 设置摄像头宽、高 setFocusMod 设置对焦模式 getSupportedPreviewSizes / getSupportedPictureSizes 获取摄像头支持的预览大小
5、设置预览画布并启动 -startPreview/setPreviewTexture
mCamera. setPreviewTexture ( surfaceTexture) ;
mCamera. startPreview ( ) ;
7、设置预览数据回调 -PreviewCallback
@Override
public void onPreviewFrame ( byte [ ] bytes, Camera camera) {
}
7、释放相机 -stopPreview/release/remove
mCamera. setPreviewCallback ( null ) ;
mCamera. stopPreview ( ) ;
mCamera. release ( ) ;
mCamera = null ;
linearLayout. remove ( textureView)
AudioRecord(录音)
录音流程:
< uses-permission android: name= " android.permission.RECORD_AUDIO" />
< uses-permission android: name= " android.permission.WRITE_EXTERNAL_STORAGE" />
< uses-permission android: name= " android.permission.READ_EXTERNAL_STORAGE" />
private void initRecord ( ) {
minBufferSize = AudioRecord . getMinBufferSize (
44100 ,
AudioFormat . CHANNEL_IN_STEREO
, AudioFormat . ENCODING_PCM_16BIT ) ;
audioRecord = new AudioRecord (
MediaRecorder. AudioSource . MIC ,
44100 ,
AudioFormat . CHANNEL_IN_STEREO ,
AudioFormat . ENCODING_PCM_16BIT ,
minBufferSize) ;
HandlerThread handlerThread = new HandlerThread ( "record" ) ;
handlerThread. start ( ) ;
startRecordHandler = new Handler ( handlerThread. getLooper ( ) ) ;
}
参数 作用 audioSource 录音源(MediaRecorder.AudioSource.MIC(麦克风)) sampleRateInHz 采样率(44100Hz/16000Hz等) channelConfig 音频通道配置(AudioFormat.CHANNEL_IN_STEREO(立体音)) audioFormat 返回的数据格式(AudioFormat.ENCODING_PCM_16BIT(pcm格式)) bufferSizeInBytes 音数据缓存大小(minBufferSize)
3、开始录制 (保存下来的格式是pcm格式(保存记得在子线程进行))
private void startRecord ( String path) {
audioRecord. startRecording ( ) ;
isRecording = true ;
startRecordHandler. post ( new Runnable ( ) {
@Override
public void run ( ) {
FileOutputStream fo = null ;
try {
byte [ ] bytes = new byte [ minBufferSize] ;
fo = new FileOutputStream ( path) ;
while ( isRecording) {
int read = audioRecord. read ( bytes, 0 , bytes. length) ;
if ( read > 0 ) {
fo. write ( bytes, 0 , bytes. length) ;
}
}
fo. close ( ) ;
} catch ( Exception e) {
e. fillInStackTrace ( ) ;
} finally {
if ( fo != null ) {
try {
fo. close ( ) ;
} catch ( IOException e) {
e. fillInStackTrace ( ) ; }
}
}
}
} ) ;
}
private void stopRecord ( ) {
isRecording = false ;
closeRecord ( ) ;
pcmToWav ( path, pathWav, 44100 ) ;
}
private void closeRecord ( ) {
if ( audioRecord == null ) return ;
if ( audioRecord. getState ( ) == AudioRecord . STATE_UNINITIALIZED ) {
Log . e ( TAG , "AudioRecord initialize fail !" ) ;
return ;
}
audioRecord. stop ( ) ;
audioRecord. release ( ) ;
handlerThread. quit ( ) ;
}
private void pcmToWav ( final String pcmFileName, final String wavFileName, long sampleRateInHz) {
FileInputStream fis;
FileOutputStream fos;
long fiSize;
long fiSize36;
int channels = 2 ;
long byteRate = 16 * sampleRateInHz * channels / 8 ;
byte [ ] bytes = new byte [ minBufferSize] ;
try {
fis = new FileInputStream ( pcmFileName) ;
fos = new FileOutputStream ( wavFileName) ;
fiSize = fis. getChannel ( ) . size ( ) ;
fiSize36 = fiSize + 36 ;
writeWavFileHeader ( fos, fiSize, fiSize36, sampleRateInHz, channels, byteRate) ;
while ( fis. read ( bytes) != - 1 ) {
fos. write ( bytes, 0 , bytes. length) ;
}
fis. close ( ) ;
fos. close ( ) ;
} catch ( Exception e) {
e. fillInStackTrace ( ) ;
}
}
private void writeWavFileHeader ( FileOutputStream fos,
long fiSize, long fiSize36, long sampleRate,
long channels, long byteRate) throws Exception {
byte [ ] header = new byte [ 44 ] ;
header[ 0 ] = 'R' ;
header[ 1 ] = 'I' ;
header[ 2 ] = 'F' ;
header[ 3 ] = 'F' ;
header[ 4 ] = ( byte ) ( fiSize36 & 0xff ) ;
header[ 5 ] = ( byte ) ( ( fiSize36 >> 8 ) & 0xff ) ;
header[ 6 ] = ( byte ) ( ( fiSize36 >> 16 ) & 0xff ) ;
header[ 7 ] = ( byte ) ( ( fiSize36 >> 24 ) & 0xff ) ;
header[ 8 ] = 'W' ;
header[ 9 ] = 'A' ;
header[ 10 ] = 'V' ;
header[ 11 ] = 'E' ;
header[ 12 ] = 'f' ;
header[ 13 ] = 'm' ;
header[ 14 ] = 't' ;
header[ 15 ] = ' ' ;
header[ 16 ] = 16 ;
header[ 17 ] = 0 ;
header[ 18 ] = 0 ;
header[ 19 ] = 0 ;
header[ 20 ] = 1 ;
header[ 21 ] = 0 ;
header[ 22 ] = ( byte ) channels;
header[ 23 ] = 0 ;
header[ 24 ] = ( byte ) ( sampleRate & 0xff ) ;
header[ 25 ] = ( byte ) ( ( sampleRate >> 8 ) & 0xff ) ;
header[ 26 ] = ( byte ) ( ( sampleRate >> 16 ) & 0xff ) ;
header[ 27 ] = ( byte ) ( ( sampleRate >> 24 ) & 0xff ) ;
header[ 28 ] = ( byte ) ( byteRate & 0xff ) ;
header[ 29 ] = ( byte ) ( ( byteRate >> 8 ) & 0xff ) ;
header[ 30 ] = ( byte ) ( ( byteRate >> 16 ) & 0xff ) ;
header[ 31 ] = ( byte ) ( ( byteRate >> 24 ) & 0xff ) ;
header[ 32 ] = ( byte ) ( 2 * 16 / 8 ) ;
header[ 33 ] = 0 ;
header[ 34 ] = 16 ;
header[ 35 ] = 0 ;
header[ 36 ] = 'd' ;
header[ 37 ] = 'a' ;
header[ 38 ] = 't' ;
header[ 39 ] = 'a' ;
header[ 40 ] = ( byte ) ( fiSize & 0xff ) ;
header[ 41 ] = ( byte ) ( ( fiSize >> 8 ) & 0xff ) ;
header[ 42 ] = ( byte ) ( ( fiSize >> 16 ) & 0xff ) ;
header[ 43 ] = ( byte ) ( ( fiSize >> 24 ) & 0xff ) ;
fos. write ( header, 0 , 44 ) ;
}
MediaFormat(参数配置(色彩配置(初始化是黑白色)))
使用
MediaFormat format = MediaFormat . createVideoFormat ( MediaFormat . MIMETYPE_VIDEO_AVC , width, height) ;
format. setInteger ( MediaFormat . KEY_COLOR_FORMAT , MediaCodecInfo. CodecCapabilities. COLOR_FormatYUV420Planar ) ;
format. setInteger ( MediaFormat . KEY_BIT_RATE , 500_000 ) ;
format. setInteger ( MediaFormat . KEY_FRAME_RATE , 20 ) ;
format. setInteger ( MediaFormat . KEY_I_FRAME_INTERVAL , 2 ) ;
MediaCodec(解码器(NV21->i420->mp4))
使用
mCodec = MediaCodec . createEncoderByType ( MediaFormat . MIMETYPE_VIDEO_AVC ) ;
mCodec. configure ( format, null , null , MediaCodec . CONFIGURE_FLAG_ENCODE ) ;
mCodec. start ( ) ;
MediaMuxer (混合器(将视频+音频合并起来的容器))
使用
mMuxer = new MediaMuxer ( path, MediaMuxer. OutputFormat . MUXER_OUTPUT_MPEG_4 ) ;
总结:这一篇到这里就结束了,谢谢观看,想必您对音视频开发有一定的了解了吧,这一章主要是让您了解一下音频开发在Android原生中需要用到SDK中的哪一些API,接下来就是对每一个API的源码分析以及延申技术的讲解了,期待一下吧!