帧动画
动态修改uv 采样大图上面的区域 依次显示形成帧动画
#include <windows.h>
#include <tchar.h>
#include <math.h>
#include "FreeImage.h"
#include "CELLMath.hpp"
#include "OpenGLWindow.h"
#include <gl/GLU.h>
#include <vector>
using namespace CELL;
struct Vertex
{
float x, y, z;
float u, v;
};
class SamplerTexture :public OpenGLWindow
{
GLuint _texture1;
int _texWidth;
int _texHeight;
int _imageWidth;
int _imageHeight;
public:
SamplerTexture()
{
}
virtual void onShutdownGL()
{
glDeleteTextures(1,&_texture1);
}
unsigned createTextureFromImage(const char* fileName)
{
FREE_IMAGE_FORMAT fifmt = FreeImage_GetFileType(fileName, 0);
if (fifmt == FIF_UNKNOWN)
{
return 0;
}
FIBITMAP *dib = FreeImage_Load(fifmt, fileName,0);
FREE_IMAGE_COLOR_TYPE type = FreeImage_GetColorType(dib);
FIBITMAP* temp = dib;
dib = FreeImage_ConvertTo32Bits(dib);
FreeImage_Unload(temp);
BYTE* pixels = (BYTE*)FreeImage_GetBits(dib);
int width = FreeImage_GetWidth(dib);
int height = FreeImage_GetHeight(dib);
_texWidth = width;
_texHeight = height;
_imageWidth = 108;
_imageHeight = 108;
unsigned res = createTexture(width,height,pixels);
FreeImage_Unload(dib);
return res;
}
unsigned createTexture(int w,int h,const void* data)
{
unsigned texId;
glGenTextures(1,&texId);
glBindTexture(GL_TEXTURE_2D,texId);
glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MAG_FILTER,GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MIN_FILTER,GL_LINEAR);
glTexImage2D(GL_TEXTURE_2D,0,GL_RGBA,w,h,0,GL_RGBA,GL_UNSIGNED_BYTE,data);
return texId;
}
virtual void onInitGL()
{
glClearColor(0,0,0,1);
glEnable(GL_TEXTURE_2D);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
gluPerspective(45,double(_width)/double(_height),0.1,100);
_texture1 = createTextureFromImage("xx.png");
}
void renderOrth(float fFrame)
{
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glOrtho(0,_width,_height,0,-100,100);
int _col = 9;
int frame = (int)fFrame;
int col = frame%_col;
int row = frame / _col;
float u = float(_imageWidth * col) / float(_texWidth);
float v = float(_imageHeight * row) / float(_texHeight);
float u1 = float(_imageWidth * (col + 1)) / float(_texWidth);
float v1 = float(_imageHeight * (row + 1)) / float(_texHeight);
int w = 108;
int h = 108;
Vertex vert[] =
{
{ 0, 0 ,0, u, v1 },
{ 0, h ,0, u, v},
{ w, h,0, u1, v },
{ w, 0,0, u1, v1 },
};
glBindTexture(GL_TEXTURE_2D, _texture1);
glEnableClientState(GL_VERTEX_ARRAY);
glVertexPointer(3, GL_FLOAT, sizeof(Vertex), &vert[0].x);
glEnableClientState(GL_TEXTURE_COORD_ARRAY);
glTexCoordPointer(2, GL_FLOAT, sizeof(Vertex), &vert[0].u);
glDrawArrays(GL_QUADS,0,4);
}
virtual void render()
{
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
static float tm = 0;
tm += 0.05;
if (tm >= 64)
{
tm = 0;
}
renderOrth(tm);
}
};
int __stdcall WinMain(
HINSTANCE hInstance,
HINSTANCE hPrevInstance,
LPSTR lpCmdLine,
int nShowCmd
)
{
SamplerTexture instance;
instance.main(800,600);
return 0;
}
视频纹理 在四边形上播放视频
使用ffmpeg 将视频每一帧转换为rgb信息 生成纹理绘制在四边形
#include <windows.h>
#include <tchar.h>
#include <math.h>
#include "FreeImage.h"
#include "CELLMath.hpp"
#include "OpenGLWindow.h"
#include <gl/GLU.h>
#include <vector>
#include "FFMPEGVideoReader.h"
using namespace CELL;
struct Vertex
{
float x, y, z;
float u, v;
};
class SamplerTexture :public OpenGLWindow
{
GLuint _texture;
float _textureV;
FFMPEGVideoReader _video;
public:
SamplerTexture()
{
_textureV = 0;
}
virtual void onShutdownGL()
{
glDeleteTextures(1,&_texture);
}
unsigned createTexture(int w,int h, void* data)
{
unsigned texId;
glGenTextures(1,&texId);
glBindTexture(GL_TEXTURE_2D,texId);
glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MAG_FILTER,GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MIN_FILTER,GL_LINEAR);
glTexImage2D(GL_TEXTURE_2D,0,GL_RGB,w,h,0,GL_RGB,GL_UNSIGNED_BYTE,data);
return texId;
}
virtual void onInitGL()
{
_video.load("D:\\美术资源\\特效资源\\预览视频\\按钮.mp4");
_texture = createTexture(_video.screen_w,_video.screen_h,0);
}
virtual void render()
{
#define M_PI (3.14159265358979323846)
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
gluPerspective(60,double(_width)/double(_height),0.1,1000);
Vertex cubeVertices[] =
{
{ -1.0f,-1, 1.0f ,0, 1 },
{ 1.0f,-1, 1.0f ,1, 1 },
{ 1.0f, 1, 1.0f ,1, 0 },
{ -1.0f, 1, 1.0f ,0, 0 },
};
glMatrixMode(GL_MODELVIEW);
glBindTexture(GL_TEXTURE_2D,_texture);
void* data = _video.readFrame();
glTexSubImage2D(GL_TEXTURE_2D,0,0,0,_video.screen_w,_video.screen_h,GL_RGB,GL_UNSIGNED_BYTE,data);
glEnable(GL_DEPTH_TEST);
glEnable(GL_TEXTURE_2D);
glEnableClientState(GL_VERTEX_ARRAY);
glEnableClientState(GL_TEXTURE_COORD_ARRAY);
glVertexPointer(3,GL_FLOAT, sizeof(Vertex), &cubeVertices[0].x);
glTexCoordPointer(2,GL_FLOAT, sizeof(Vertex), &cubeVertices[0].u);
glLoadIdentity();
glTranslatef(0,0,-2);
glDrawArrays( GL_QUADS, 0, 4 );
}
};
int __stdcall WinMain(
HINSTANCE hInstance,
HINSTANCE hPrevInstance,
LPSTR lpCmdLine,
int nShowCmd
)
{
SamplerTexture instance;
instance.main(800,600);
return 0;
}
#pragma once
extern "C"
{
#include <libavutil/imgutils.h>
#include <libavutil/parseutils.h>
#include <libswscale/swscale.h>
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libavformat/avio.h>
#include <libavutil/file.h>
}
class FFMPEGVideoReader
{
public:
AVFormatContext*pFormatCtx;
int i;
int videoindex;
AVCodecContext* pCodecCtx;
AVCodec* pCodec;
AVFrame* pFrame;
AVFrame* pFrameRGB;
SwsContext* img_convert_ctx;
int screen_w;
int screen_h;
public:
FFMPEGVideoReader()
{
av_register_all();
avformat_network_init();
pFormatCtx = avformat_alloc_context();
}
~FFMPEGVideoReader()
{
sws_freeContext(img_convert_ctx);
av_free(pFrameRGB);
avcodec_close(pCodecCtx);
avformat_close_input(&pFormatCtx);
}
int load(const char* filepath = "c:/FlickAnimation.avi")
{
FILE *fp_yuv;
int ret;
int got_picture;
if (avformat_open_input(&pFormatCtx, filepath, NULL, NULL) != 0)
{
return -1;
}
if (avformat_find_stream_info(pFormatCtx, NULL) < 0)
{
printf("Couldn't find stream information.\n");
return -1;
}
videoindex = -1;
for (i = 0; i < pFormatCtx->nb_streams; i++)
{
if (pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO)
{
videoindex = i;
break;
}
}
if (videoindex == -1)
{
printf("Didn't find a video stream.\n");
return -1;
}
pCodecCtx = pFormatCtx->streams[videoindex]->codec;
pCodec = avcodec_find_decoder(pCodecCtx->codec_id);
if (pCodec == NULL)
{
printf("Codec not found.\n");
return -1;
}
if (avcodec_open2(pCodecCtx, pCodec, NULL) < 0)
{
printf("Could not open codec.\n");
return -1;
}
pFrame = av_frame_alloc();
pFrameRGB = av_frame_alloc();
screen_w = pCodecCtx->width;
screen_h = pCodecCtx->height;
img_convert_ctx = sws_getContext(
pCodecCtx->width
, pCodecCtx->height
, pCodecCtx->pix_fmt
, pCodecCtx->width
, pCodecCtx->height
, AV_PIX_FMT_RGB24
, SWS_BICUBIC
, NULL
, NULL
, NULL
);
if ((ret = av_image_alloc(dst_data, dst_linesize,
pCodecCtx->width, pCodecCtx->height, AV_PIX_FMT_RGB24, 1)) < 0)
{
fprintf(stderr, "Could not allocate destination image\n");
return -1;
}
return 0;
}
uint8_t *src_data[4], *dst_data[4];
int src_linesize[4], dst_linesize[4];
void* readFrame()
{
AVPacket packet;
av_init_packet(&packet);
for (;;)
{
if (av_read_frame(pFormatCtx, &packet))
{
av_free_packet(&packet);
return 0;
}
if (packet.stream_index != videoindex)
{
continue;
}
int frame_finished = 0;
int res = avcodec_decode_video2(pCodecCtx, pFrame, &frame_finished, &packet);
if (frame_finished)
{
int res = sws_scale(
img_convert_ctx
, (const uint8_t* const*)pFrame->data
, pFrame->linesize
, 0
, pCodecCtx->height
, dst_data
, dst_linesize
);
av_free_packet(&packet);
return dst_data[0];
}
}
return 0;
}
};