argb888与rgb888转换程序_运用NEON指令集加速RGB与YUV相互转换

本文介绍了在Android平台上,使用NEON指令集进行RGB888到NV12格式转换的过程,包括开启NEON支持、使用Intrinsic_C和汇编代码实现转换函数,以提高视频编解码的效率。
摘要由CSDN通过智能技术生成

最近做H264编解码的工作,使用了Android平台MediaCodec,进行硬编解码,以减少CPU的压力。

然后发现适用性最强的格式是YUV420P和YUV420SP,绝大部分Android设备都可以支持这两种格式的H264输入。

(如何查看Android设备所支持格式,见我的上一篇文章《Android平台MediaCodec避坑指北》)

但是Android API level 23. 开始不被推荐,推荐COLOR_FormatYUV420Flexible.

那我们采集到视频帧数据如果是RGB888或者UYUV422,那就需要进行格式转换。

如果刚好用的NV21,NV12,或者YV12,I420有可能不用转换,也有可能需要转换。见《Android平台MediaCodec避坑指北》坑点3

先说一下开发环境,使用的是,Windows10 + AndroidStudio3.0 + CMake

0、Neon指令

Neon指令可以在arm官网下到文档NEON Programmer’s Guide Version: 1.0

里面有许多特色长宽指令,8_8 变为 16_8,收窄指令 16_8 变为 8_8 ,饱和指令等等,

要多看看文档才能了然于胸。

1、使用Intrinsic_C还是汇编

Intrinsic_C用起来简单,还能跨armv7,armv8,每一个API接口都对应着相应的一条指令。由于充满了sign/unsign,int、short、char,8x8,16x8 ,加 减 乘,长宽,收窄,饱和,位移等等,在arm_neon.h里有几百个Api,但是殊途同归,大同小异,每了解1个汇编指令,就能通吃10~20个Api。

这里有份帮助文档:https://blog.csdn.net/fengbingchun/article/details/38085781

汇编代码就不能跨armv7,armv8了,但是真正懂得每个汇编指令的意思,才能优化到极致。

懂了汇编,反过来写Intrinsic_C也大有帮助。

2、开启Neon之旅

在你的CMakeLists.txt配置文件里,加入这两句,使用mk文件也一样。

(如果不需要的话可以不加)

add_definitions("-D__ARM_NEON__=1")

set_property(SOURCE ${SRC_FILE} APPEND_STRING PROPERTY COMPILE_FLAGS " -mfpu=neon")

也可以加在build.gradle里,AndroidStudio3.0就是方便。

externalNativeBuild {

cmake {

cFlags "-mfpu=neon"

cppFlags "-std=c++11 -frtti -fexceptions"

arguments "-DANDROID_ARM_NEON=TRUE"

}

}

3、CMake开启ASM

enable_language(ASM)

这句不加,你就不能使用.S汇编文件。

4、使用IntrinsicC库

非常简单,只需#include

5、Show me the code

出于保密,不上Github了,只能上相关转换函数。

例子是rgb转yuv420sp nv12的,反过来也是一样道理。

先上C++版本:

void FrameFormatConvert::RGBtoNV12(byte* nv12, byte* rgb, int width, int height)

{

if(CheckIsSupportNeon())

{

#ifndef WIN32

LOGD("start neon nv12");

//rgb888_2_nv12_neon(nv12, rgb, width, height, width >> 4);

rgb888_2_nv12_intrinsic(nv12, rgb, width, height);

return;

#endif

}

int frameSize = width * height;

int yIndex = 0;

int uvIndex = frameSize;

int R, G, B, Y, U, V;

int index = 0;

for (int j = 0; j < height; j++)

{

for (int i = 0; i < width; i++)

{

//a = (argb[index] & 0xff000000) >> 24; // a is not used obviously

//R = (argb[index] & 0xff0000) >> 16;

//G = (argb[index] & 0xff00) >> 8;

//B = (argb[index] & 0xff) >> 0;

R = rgb[index];

index++;

G = rgb[index];

index++;

B = rgb[index];

index++;

// well known RGB to YUV algorithm

Y = ((66 * R + 129 * G + 25 * B + 128) >> 8) + 16;

U = ((-38 * R - 74 * G + 112 * B + 128) >> 8) + 128;

V = ((112 * R - 94 * G - 18 * B + 128) >> 8) + 128;

// NV21 has a plane of Y and interleaved planes of VU each sampled by a factor of 2

// meaning for every 4 Y pixels there are 1 V and 1 U. Note the sampling is every other

// pixel AND every other scanline.

// NV12 YYYYYYYY UVUV

// NV21 YYYYYYYY VUVU

nv12[yIndex++] = (byte)((Y < 0) ? 0 : ((Y > 255) ? 255 : Y));

if (j % 2 == 0 && index % 2 == 0)

{

nv12[uvIndex++] = (byte)((U < 0) ? 0 : ((U > 255) ? 255 : U));

nv12[uvIndex++] = (byte)((V < 0) ? 0 : ((V > 255) ? 255 : V));

}

}

}

}

然后是Intrinsic_C:

void FrameFormatConvert::rgb888_2_nv12_intrinsic(byte * nv12, byte * rgb, int width, int height)

{

#ifndef WIN32

const uint8x8_t u8_zero = vdup_n_u8(0);

const uint8x8_t u8_16 = vdup_n_u8(16);

const uint16x8_t u16_rounding = vdupq_n_u16(128);

const int16x8_t s16_zero = vdupq_n_s16(0);

const int8x8_t s8_rounding = vdup_n_s8(-128);

const int16x8_t s16_rounding = vdupq_n_s16(128);

byte* UVPtr = nv12 + width * height;

int pitch = width >> 4;

for (int j = 0; j < height; ++j)

{

for (int i = 0; i < pitch; ++i)

{

// Load rgb 16 pixel

uint8x16x3_t pixel_rgb = vld3q_u8(rgb);

uint8x8_t high_r = vget_high_u8(pixel_rgb.val[0]);

uint8x8_t low_r = vget_low_u8(pixel_rgb.val[0]);

uint8x8_t high_g = vget_high_u8(pixel_rgb.val[1]);

uint8x8_t low_g = vget_low_u8(pixel_rgb.val[1]);

uint8x8_t high_b = vget_high_u8(pixel_rgb.val[2]);

uint8x8_t low_b = vget_low_u8(pixel_rgb.val[2]);

// NOTE:

// declaration may not appear after executable statement in block

uint16x8_t high_y;

uint16x8_t low_y;

// 1. Multiply transform matrix (Y′: unsigned, U/V: signed)

// 2. Scale down (">>8") to 8-bit values with rounding ("+128") (Y′: unsigned, U/V: signed)

// 3. Add an offset to the values to eliminate any negative values (all results are 8-bit unsigned)

uint8x8_t scalar = vdup_n_u8(66);

high_y = vmull_u8(high_r, scalar);

low_y = vmull_u8(low_r, scalar);

scalar = vdup_n_u8(129);

high_y = vmlal_u8(high_y, high_g, scalar);

low_y = vmlal_u8(low_y, low_g, scalar);

scalar = vdup_n_u8(25);

high_y = vmlal_u8(high_y, high_b, scalar);

low_y = vmlal_u8(low_y, low_b, scalar);

high_y = vaddq_u16(high_y, u16_rounding);

low_y = vaddq_u16(low_y, u16_rounding);

uint8x8_t u8_low_y = vshrn_n_u16(low_y, 8);

uint8x8_t u8_high_y = vshrn_n_u16(high_y, 8);

low_y = vaddl_u8(u8_low_y, u8_16);

high_y = vaddl_u8(u8_high_y, u8_16);

uint8x16_t pixel_y = vcombine_u8(vqmovn_u16(low_y), vqmovn_u16(high_y));

// Store

vst1q_u8(nv12, pixel_y);

if (j % 2 == 0)

{

uint8x8x2_t mix_r = vuzp_u8(low_r, high_r);

uint8x8x2_t mix_g = vuzp_u8(low_g, high_g);

uint8x8x2_t mix_b = vuzp_u8(low_b, high_b);

int16x8_t signed_r = vreinterpretq_s16_u16(vaddl_u8(mix_r.val[0], u8_zero));

int16x8_t signed_g = vreinterpretq_s16_u16(vaddl_u8(mix_g.val[0], u8_zero));

int16x8_t signed_b = vreinterpretq_s16_u16(vaddl_u8(mix_b.val[0], u8_zero));

int16x8_t signed_u;

int16x8_t signed_v;

int16x8_t signed_scalar = vdupq_n_s16(-38);

signed_u = vmulq_s16(signed_r, signed_scalar);

signed_scalar = vdupq_n_s16(112);

signed_v = vmulq_s16(signed_r, signed_scalar);

signed_scalar = vdupq_n_s16(-74);

signed_u = vmlaq_s16(signed_u, signed_g, signed_scalar);

signed_scalar = vdupq_n_s16(-94);

signed_v = vmlaq_s16(signed_v, signed_g, signed_scalar);

signed_scalar = vdupq_n_s16(112);

signed_u = vmlaq_s16(signed_u, signed_b, signed_scalar);

signed_scalar = vdupq_n_s16(-18);

signed_v = vmlaq_s16(signed_v, signed_b, signed_scalar);

signed_u = vaddq_s16(signed_u, s16_rounding);

signed_v = vaddq_s16(signed_v, s16_rounding);

int8x8_t s8_u = vshrn_n_s16(signed_u, 8);

int8x8_t s8_v = vshrn_n_s16(signed_v, 8);

signed_u = vsubl_s8(s8_u, s8_rounding);

signed_v = vsubl_s8(s8_v, s8_rounding);

signed_u = vmaxq_s16(signed_u, s16_zero);

signed_v = vmaxq_s16(signed_v, s16_zero);

uint16x8_t unsigned_u = vreinterpretq_u16_s16(signed_u);

uint16x8_t unsigned_v = vreinterpretq_u16_s16(signed_v);

uint8x8x2_t result;

result.val[0] = vqmovn_u16(unsigned_u);

result.val[1] = vqmovn_u16(unsigned_v);

vst2_u8(UVPtr, result);

UVPtr += 16;

}

rgb += 3 * 16;

nv12 += 16;

}

}

#endif

}

最后是汇编版:

/*

RGB_2_NV21.Neon.S

*/

.text

.align

.global rgb888_2_nv12_neon

.type rgb888_2_nv12_neon, %function

/*the param of the function*/

#define Dst_NV12_Ptr r0

#define Src_RGB_Ptr r1

#define width r2

#define height r3

#define pitch r4 /*width>>4*/

/*the param using in function*/

#define XCount r5

#define YCount r6

#define UV_Ptr r7

#define CData0 q10

#define CData16 d22

#define CData66 d23

#define CData129 d24

#define CData25 d25

#define CData128 d26

#define CData38 d27

#define CData74 d28

#define CData112 d29

#define CData94 d30

#define CData18 d31

rgb888_2_nv12_neon:

PUSH {r4-r12, lr} /* 10 words */

VPUSH {q0-q7} /* 8Q -> 32 words */

VPUSH {q8-q15} /* 8Q -> 32 words */

/* 向量加载常数 */

VMOV.s16 CData0 , #0

VMOV.u8 CData16 , #16

VMOV.u8 CData66 , #66

VMOV.u8 CData129, #129

VMOV.u8 CData25 , #25

VMOV.u8 CData128, #128

VMOV.u8 CData38 , #38

VMOV.u8 CData74 , #74

VMOV.u8 CData112, #112

VMOV.u8 CData94 , #94

VMOV.u8 CData18 , #18

/* char* UVPtr = Dst_NV12_Ptr + width*height; */

MUL UV_Ptr, width, height

ADD UV_Ptr, Dst_NV12_Ptr

MOV YCount, height /* int YCount = height; */

CMP YCount, #0 /* if (YCount == 0) return;*/

BEQ endColNormal

beginColNormal: /* do

{*/

MOV XCount, pitch /* int XCount = pitch;*/

BEQ endRowNormal /* if (XCount == 0) continue;*/

beginRowNormal: /* do

{*/

/* d0 d1 d2 as RGB, d3 d4 d5 as RGB */

VLD3.8 {d0-d2}, [Src_RGB_Ptr]!

VLD3.8 {d3-d5}, [Src_RGB_Ptr]!

#------------------------------------------

/* 计算Y值 Y = ((66 * R + 129 * G + 25 * B + 128) >> 8) + 16; */

VMULL.u8 q3, d0, CData66

VMULL.u8 q4, d3, CData66

VMLAL.u8 q3, d1, CData129

VMLAL.u8 q4, d4, CData129

VMLAL.u8 q3, d2, CData25

VMLAL.u8 q4, d5, CData25

VADDW.u8 q3 , q3, CData128

VADDW.u8 q4, q4, CData128

VSHRN.u16 d10, q3, #8

VSHRN.u16 d11, q4, #8

VADDL.u8 q3,d10, CData16

VADDL.u8 q4,d11, CData16

/* vqmovn 16 to 8 [0~255] */

VQMOVN.u16 d10, q3

VQMOVN.u16 d11, q4

VST1.u8 {d10} , [Dst_NV12_Ptr]!

VST1.u8 {d11} , [Dst_NV12_Ptr]!

TST YCount, #1

BNE skipUV

#------------------------------------------

/* UV预处理,去奇数, 存入d0(R),d1(G),d2(B) */

VUZP.u8 d0 , d3

VUZP.u8 d1 , d4

VUZP.u8 d2 , d5

/* 计算UV

U = ((-38 * R - 74 * G + 112 * B + 128) >> 8) + 128;

V = ((112 * R - 94 * G - 18 * B + 128) >> 8) + 128;

U:q3q5q7, V:q4q6q8

*/

VMULL.u8 q4, d0, CData112

VMULL.u8 q3, d0, CData38

VMULL.u8 q6, d1, CData18

VMULL.u8 q5, d1, CData112

VMLAL.u8 q3, d2, CData74

VMLAL.u8 q6, d2, CData94

VADDW.u8 q7, q5, CData128

VADDW.u8 q8, q4, CData128

VSUB.s16 q2, q7 , q3

VSUB.s16 q3, q8 , q6

VSHRN.s16 d0, q2, #8

VSHRN.s16 d1, q3, #8

VADDL.u8 q2, d0, CData128

VADDL.u8 q3, d1, CData128

VMAX.s16 q4, CData0, q2

VMAX.s16 q5, CData0, q3

/* vqmovn 16 to 8 [0~255] */

VQMOVN.u16 d0, q4

VQMOVN.u16 d1, q5

#------------------------------------------

VST2.u8 {d0-d1}, [UV_Ptr]!

skipUV:

SUBS XCount, #1

BNE beginRowNormal /* }while(--XCount);*/

endRowNormal:

SUBS YCount, #1 /* }while(--YCount);*/

BNE beginColNormal

endColNormal:

VPOP {q8-q15}

VPOP {q0-q7}

POP {r4-r12, pc}

附arm常用寄存器

r0-r15 和 R0-R15

a1-a4(自变量、结果或暂存寄存器,r0 到 r3 的同义词)

v1-v8(变量寄存器,r4 到 r11)

sb 和 SB(静态基址,r9)

ip 和 IP(内部程序调用暂存寄存器,r12)

sp 和 SP(堆栈指针,r13)

lr 和 LR(链接寄存器,r14)

pc 和 PC(程序计数器,r15)。

neon寄存器,注意:这玩意是重叠的,Q0 = D0 + D1 = S0 + S1 + S2 +S3

具体看上文CSDN里博客写的,就不赘述了。

q0-q15 和 Q0-Q15(NEON™ 四字寄存器)

d0-d31 和 D0-D31(NEON 双字寄存器,VFP 双精度寄存器)

s0-s31 和 S0-S31(VFP 单精度寄存器)。

就酱

以下是argb888rgb888转换程序的示例代码: ```java public static int[] argb888ToRgb888(int[] argb888) { int[] rgb888 = new int[argb888.length]; for (int i = 0; i < argb888.length; i++) { int argb = argb888[i]; int alpha = (argb >> 24) & 0xff; int red = (argb >> 16) & 0xff; int green = (argb >> 8) & 0xff; int blue = (argb) & 0xff; int rgb = ((red << 16) | (green << 8) | (blue)); rgb888[i] = rgb; } return rgb888; } ``` 而将Android camera2 api YUV_420_888转换RGB程序,则需要使用ColorSpace和ColorConverter类来进行转换。示例代码如下: ```java private void convertYuvToRgb(Image image) { Image.Plane[] planes = image.getPlanes(); ByteBuffer yBuffer = planes[0].getBuffer(); ByteBuffer uBuffer = planes[1].getBuffer(); ByteBuffer vBuffer = planes[2].getBuffer(); int ySize = yBuffer.remaining(); int uSize = uBuffer.remaining(); int vSize = vBuffer.remaining(); byte[] yBytes = new byte[ySize]; byte[] uBytes = new byte[uSize]; byte[] vBytes = new byte[vSize]; yBuffer.get(yBytes); uBuffer.get(uBytes); vBuffer.get(vBytes); ColorSpace cs = ColorSpace.get(ColorSpace.Named.SRGB); ColorSpace csYuv = ColorSpace.get(ColorSpace.Named.YUV_420_888); ColorConverter cc = new ColorConverter(csYuv, cs); float[] yuvFloat = new float[yBytes.length + uBytes.length + vBytes.length]; for (int i = 0; i < ySize; i++) { yuvFloat[i] = (float) (yBytes[i] & 0xff); } for (int i = 0; i < uSize; i++) { yuvFloat[ySize + i] = (float) (uBytes[i] & 0xff); } for (int i = 0; i < vSize; i++) { yuvFloat[ySize + uSize + i] = (float) (vBytes[i] & 0xff); } float[] rgbFloat = new float[yBytes.length * 3]; cc.convert(yuvFloat, rgbFloat); int[] rgb888 = new int[yBytes.length]; for (int i = 0; i < yBytes.length; i++) { int r = (int) rgbFloat[i * 3]; int g = (int) rgbFloat[i * 3 + 1]; int b = (int) rgbFloat[i * 3 + 2]; rgb888[i] = (r << 16) | (g << 8) | b; } } ``` 注意:上述代码仅为示例代码,实际应用中可能需要根据具体情况进行调整和优化。
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值