在增加滤镜功能之前,需要对 webrtc 视频采集的流程有一定了解。
WebRTC 中定义了 VideoCapture 接口类,其中定义了相机的初始化,预览,停止预览销毁等操作。
实现类是 CameraCapture,并且封装了Camera1Capture、Camera2Capture 两个,真正的实现在Camera1Session和Camera2Session,里面有相机数据的回调。
我们优先使用Camera1Session,初始化的在外层设置captureToTexture为false,不走纹理,走Buffer,在listenForBytebufferFrames()方法中处理相机返回的buffer数据进行美颜。
在把相机数据byte[] data转成webrtc的VideoFrame.Buffer前,处理data数据。
applyFilter(data, captureFormat.width, captureFormat.height);
private void applyFilter(byte[] data, int width, int height) {
renderer.onPreviewFrame(data, width, height);
Bitmap newBitmapRgb = buffer.getBitmap();
byte[] dataYuv = getNV21(width, height, newBitmapRgb);
System.arraycopy(dataYuv, 0, data, 0, dataYuv.length);
}
byte [] getNV21(int inputWidth, int inputHeight, Bitmap scaled) {
int [] argb = new int[inputWidth * inputHeight];
scaled.getPixels(argb, 0, inputWidth, 0, 0, inputWidth, inputHeight);
byte [] yuv = new byte[inputWidth*inputHeight*3/2];
encodeYUV420SP(yuv, argb, inputWidth, inputHeight);
scaled.recycle();
return yuv;
}
void encodeYUV420SP(byte[] yuv420sp, int[] argb, int width, int height) {
final int frameSize = width * height;
int yIndex = 0;
int uvIndex = frameSize;
int a, R, G, B, Y, U, V;
int index = 0;
for (int j = 0; j < height; j++) {
for (int i = 0; i < width; i++) {
a = (argb[index] & 0xff000000) >> 24; // a is not used obviously
R = (argb[index] & 0xff0000) >> 16;
G = (argb[index] & 0xff00) >> 8;
B = (argb[index] & 0xff) >> 0;
// well known RGB to YUV algorithm
Y = ( ( 66 * R + 129 * G + 25 * B + 128) >> 8) + 16;
U = ( ( -38 * R - 74 * G + 112 * B + 128) >> 8) + 128;
V = ( ( 112 * R - 94 * G - 18 * B + 128) >> 8) + 128;
// NV21 has a plane of Y and interleaved planes of VU each sampled by a factor of 2
// meaning for every 4 Y pixels there are 1 V and 1 U. Note the sampling is every other
// pixel AND every other scanline.
yuv420sp[yIndex++] = (byte) ((Y < 0) ? 0 : ((Y > 255) ? 255 : Y));
if (j % 2 == 0 && index % 2 == 0) {
yuv420sp[uvIndex++] = (byte)((V<0) ? 0 : ((V > 255) ? 255 : V));
yuv420sp[uvIndex++] = (byte)((U<0) ? 0 : ((U > 255) ? 255 : U));
}
index ++;
}
}
}
其中的renderer需要在构造函数中初始化
initFilter(captureFormat.width, captureFormat.height);
private MagicBeautyFilter filter;
private GPUImageRenderer renderer;
private PixelBuffer buffer;
private void initFilter(int width, int height) {
filter = new MagicBeautyFilter(applicationContext);
renderer = new GPUImageRenderer(filter);
renderer.setRotation(Rotation.NORMAL, false, false);
renderer.setScaleType(GPUImage.ScaleType.CENTER_INSIDE);
buffer = new PixelBuffer(width, height);
buffer.setRenderer(renderer);
}
不要忘记在stopInternal中释放滤镜
private void destroyFilter() {
filter.destroy();
buffer.destroy();
}
美颜的功能主要使用GPUImage实现,需要引用GPUImage的库
implementation 'jp.co.cyberagent.android:gpuimage:2.1.0'
美颜具体实现类需要自己实现MagicBeautyFilter,我也是从其他开源美颜相机的代码里面拷贝过来的,忘记是拷贝谁的了,没法放链接。
import android.content.Context;
import android.opengl.GLES20;
import jp.co.cyberagent.android.gpuimage.filter.GPUImageFilter;
public class MagicBeautyFilter extends GPUImageFilter {
private int mSingleStepOffsetLocation;
private int mParamsLocation;
public MagicBeautyFilter(Context context){
super(NO_FILTER_VERTEX_SHADER ,
OpenGlUtil.readShaderFromRawResource(context,R.raw.beauty));
}
@Override
public void onInit() {
super.onInit();
mSingleStepOffsetLocation = GLES20.glGetUniformLocation(getProgram(), "singleStepOffset");
mParamsLocation = GLES20.glGetUniformLocation(getProgram(), "params");
setBeautyLevel(5);
}
private void setTexelSize(final float w, final float h) {
setFloatVec2(mSingleStepOffsetLocation, new float[] {1.0f / w, 1.0f / h});
}
@Override
public void onOutputSizeChanged(final int width, final int height) {
super.onOutputSizeChanged(width, height);
setTexelSize(width, height);
}
public void setBeautyLevel(int level){
switch (level) {
case 1:
setFloat(mParamsLocation, 1.0f);
break;
case 2:
setFloat(mParamsLocation, 0.8f);
break;
case 3:
setFloat(mParamsLocation,0.6f);
break;
case 4:
setFloat(mParamsLocation, 0.4f);
break;
case 5:
setFloat(mParamsLocation,0.33f);
break;
default:
break;
}
}
}
import android.content.Context;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
public class OpenGlUtil {
public static String readShaderFromRawResource(Context context, final int resourceId){
final InputStream inputStream = context.getResources().openRawResource(
resourceId);
final InputStreamReader inputStreamReader = new InputStreamReader(
inputStream);
final BufferedReader bufferedReader = new BufferedReader(
inputStreamReader);
String nextLine;
final StringBuilder body = new StringBuilder();
try{
while ((nextLine = bufferedReader.readLine()) != null){
body.append(nextLine);
body.append('\n');
}
}
catch (IOException e){
return null;
}
return body.toString();
}
}
下面是美颜的算法,也是从其他地方拷贝来的,我看好多美颜相机的开源项目用的都是同一个算法,大家自己搜一下就有了。下面的就是R.raw.beauty
precision mediump float;
varying mediump vec2 textureCoordinate;
uniform sampler2D inputImageTexture;
uniform vec2 singleStepOffset;
uniform mediump float params;
const highp vec3 W = vec3(0.299,0.587,0.114);
vec2 blurCoordinates[20];
float hardLight(float color)
{
if(color <= 0.5)
color = color * color * 2.0;
else
color = 1.0 - ((1.0 - color)*(1.0 - color) * 2.0);
return color;
}
void main(){
vec3 centralColor = texture2D(inputImageTexture, textureCoordinate).rgb;
blurCoordinates[0] = textureCoordinate.xy + singleStepOffset * vec2(0.0, -10.0);
blurCoordinates[1] = textureCoordinate.xy + singleStepOffset * vec2(0.0, 10.0);
blurCoordinates[2] = textureCoordinate.xy + singleStepOffset * vec2(-10.0, 0.0);
blurCoordinates[3] = textureCoordinate.xy + singleStepOffset * vec2(10.0, 0.0);
blurCoordinates[4] = textureCoordinate.xy + singleStepOffset * vec2(5.0, -8.0);
blurCoordinates[5] = textureCoordinate.xy + singleStepOffset * vec2(5.0, 8.0);
blurCoordinates[6] = textureCoordinate.xy + singleStepOffset * vec2(-5.0, 8.0);
blurCoordinates[7] = textureCoordinate.xy + singleStepOffset * vec2(-5.0, -8.0);
blurCoordinates[8] = textureCoordinate.xy + singleStepOffset * vec2(8.0, -5.0);
blurCoordinates[9] = textureCoordinate.xy + singleStepOffset * vec2(8.0, 5.0);
blurCoordinates[10] = textureCoordinate.xy + singleStepOffset * vec2(-8.0, 5.0);
blurCoordinates[11] = textureCoordinate.xy + singleStepOffset * vec2(-8.0, -5.0);
blurCoordinates[12] = textureCoordinate.xy + singleStepOffset * vec2(0.0, -6.0);
blurCoordinates[13] = textureCoordinate.xy + singleStepOffset * vec2(0.0, 6.0);
blurCoordinates[14] = textureCoordinate.xy + singleStepOffset * vec2(6.0, 0.0);
blurCoordinates[15] = textureCoordinate.xy + singleStepOffset * vec2(-6.0, 0.0);
blurCoordinates[16] = textureCoordinate.xy + singleStepOffset * vec2(-4.0, -4.0);
blurCoordinates[17] = textureCoordinate.xy + singleStepOffset * vec2(-4.0, 4.0);
blurCoordinates[18] = textureCoordinate.xy + singleStepOffset * vec2(4.0, -4.0);
blurCoordinates[19] = textureCoordinate.xy + singleStepOffset * vec2(4.0, 4.0);
float sampleColor = centralColor.g * 20.0;
sampleColor += texture2D(inputImageTexture, blurCoordinates[0]).g;
sampleColor += texture2D(inputImageTexture, blurCoordinates[1]).g;
sampleColor += texture2D(inputImageTexture, blurCoordinates[2]).g;
sampleColor += texture2D(inputImageTexture, blurCoordinates[3]).g;
sampleColor += texture2D(inputImageTexture, blurCoordinates[4]).g;
sampleColor += texture2D(inputImageTexture, blurCoordinates[5]).g;
sampleColor += texture2D(inputImageTexture, blurCoordinates[6]).g;
sampleColor += texture2D(inputImageTexture, blurCoordinates[7]).g;
sampleColor += texture2D(inputImageTexture, blurCoordinates[8]).g;
sampleColor += texture2D(inputImageTexture, blurCoordinates[9]).g;
sampleColor += texture2D(inputImageTexture, blurCoordinates[10]).g;
sampleColor += texture2D(inputImageTexture, blurCoordinates[11]).g;
sampleColor += texture2D(inputImageTexture, blurCoordinates[12]).g * 2.0;
sampleColor += texture2D(inputImageTexture, blurCoordinates[13]).g * 2.0;
sampleColor += texture2D(inputImageTexture, blurCoordinates[14]).g * 2.0;
sampleColor += texture2D(inputImageTexture, blurCoordinates[15]).g * 2.0;
sampleColor += texture2D(inputImageTexture, blurCoordinates[16]).g * 2.0;
sampleColor += texture2D(inputImageTexture, blurCoordinates[17]).g * 2.0;
sampleColor += texture2D(inputImageTexture, blurCoordinates[18]).g * 2.0;
sampleColor += texture2D(inputImageTexture, blurCoordinates[19]).g * 2.0;
sampleColor = sampleColor / 48.0;
float highPass = centralColor.g - sampleColor + 0.5;
for(int i = 0; i < 5;i++)
{
highPass = hardLight(highPass);
}
float luminance = dot(centralColor, W);
float alpha = pow(luminance, params);
vec3 smoothColor = centralColor + (centralColor-vec3(highPass))*alpha*0.1;
gl_FragColor = vec4(mix(smoothColor.rgb, max(smoothColor, centralColor), alpha), 1.0);
}