一。用unity实现在左半边屏幕显示外部摄像头的画面,右半边屏幕延迟显示外部摄像头的画面。
二。实现原理
1.获取外部摄像头
2.把当前摄像头的拍摄到的画面转换成流并保存
3.把保存的流转成图片,并删除流
三。脚本如下:
1.常量的定义
private List<byte[]> cameraTextureBty = new List<byte[]>();//图片字节流
public RawImage targetTexture;//显示外置摄像头的图片
public RawImage rightTexture;//显示延迟图片
private string cameraName;//设备的名称
private WebCamTexture cameraTexture;//摄像机的Texture
Texture2D t3d;
Texture2D t2d;
2.Start方法
private void Start()
{
t3d = new Texture2D(720, 810, TextureFormat.RGB24, false);
t2d = new Texture2D(720, 810, TextureFormat.RGB24, false);
StartCoroutine(Test1());
Invoke("RecodeCamera1", 1);
InvokeRepeating("PlayCamera", 3, 0.1f);
}
void RecodeCamera1()
{
StartCoroutine(RecodeCamera());
}
void PlayCamera()
{
byte[] textByte = cameraTextureBty.Count > 0 ? cameraTextureBty[0] : null;
if (textByte.Length > 0)
{
if (t3d.LoadImage(textByte))
rightTexture.texture = t3d;
cameraTextureBty.RemoveAt(0);
}
}
IEnumerator Test1()
{
yield return Application.RequestUserAuthorization(UserAuthorization.WebCam);
bool isUser = Application.HasUserAuthorization(UserAuthorization.WebCam);
isUser = false;
if (!isUser)
{
WebCamDevice[] devices = WebCamTexture.devices;
cameraName = devices[0].name;
cameraTexture = new WebCamTexture(cameraName, 720, 810, 30);
cameraTexture.Play();
targetTexture.texture = cameraTexture;
}
}
IEnumerator RecodeCamera()
{
yield return new WaitForSeconds(0.1f);
yield return new WaitForEndOfFrame();
//ReadPixels方法必须在摄像机渲染完成之后再进行调用,否则会报错,可以放到OnPostRender方法中执行
t2d.ReadPixels(new Rect(0, 0, 720, 810), 0, 0);
t2d.Apply();
byte[] byt = t2d.EncodeToJPG();
cameraTextureBty.Add(byt);
StartCoroutine(RecodeCamera());
}