实验5:JPEG解码器

本文详细介绍了JPEG解码器的工作原理和实现过程,包括从JPEG码流分析到解码转换为YUV文件,以及如何输出DC图像并进行Huffman统计。实验涉及关键步骤如解析JPEG头部、构建量化表、解码宏块,并通过TXT文件输出量化信息和DC、AC系数。
摘要由CSDN通过智能技术生成

实验要求:

将输出文件保存为可供YUVViewer观看的YUV文件
以TXT文件输出
输出DC图像并经过Huffman统计其概率分布

实验原理:

JPEG编解码
在这里插入图片描述
JPEG码流分析
在这里插入图片描述

实验过程:

1.JPEG解码器-将JPG文件解码并转换为YUV文件
static void write_yuv(const char filename,
int width,
int height,
unsigned char **components)
{
FILE F;
char temp[1024];
snprintf(temp, 1024, “%s.Y”, filename);
F = fopen(temp, “wb”);
fwrite(components[0], width, height, F);
fclose(F);
snprintf(temp, 1024, “%s.U”, filename);
F = fopen(temp, “wb”);
fwrite(components[1], width
height/4, 1, F);
fclose(F);
snprintf(temp, 1024, “%s.V”, filename);
F = fopen(temp, “wb”);
fwrite(components[2], width
height/4, 1, F);
fclose(F);

snprintf(temp, 1024, “%s.yuv”, filename);
F = fopen(temp, “wb”);
fwrite(components[0], widthheight, 1, F);
fwrite(components[1], width
height/4, 1, F);
fwrite(components[2], widthheight/4, 1, F);
}
2.huffman 结构体
#define HUFFMAN_BITS_SIZE 256
#define HUFFMAN_HASH_NBITS 9
#define HUFFMAN_HASH_SIZE (1UL<<HUFFMAN_HASH_NBITS)
#define HUFFMAN_HASH_MASK (HUFFMAN_HASH_SIZE-1)
struct huffman_table
{
/
Fast look up table, using HUFFMAN_HASH_NBITS bits we can have directly the symbol,

  • if the symbol is <0, then we need to look into the tree table /
    short int lookup[HUFFMAN_HASH_SIZE];//存储码长未超过9的
    /
    code size: give the number of bits of a symbol is encoded /
    unsigned char code_size[HUFFMAN_HASH_SIZE];
    /
    some place to store value that is not encoded in the lookup table
  • FIXME: Calculate if 256 value is enough to store all values
    */
    uint16_t slowtable[16-HUFFMAN_HASH_NBITS][256];
    };

3.component结构体

struct component
{
unsigned int Hfactor;//反映采样比例(水平)
unsigned int Vfactor;//垂直
float Q_table; / Pointer to the quantisation table to use *///量化表指针,指向的才是真正的码表,指向下面的结构体里面的
struct huffman_table *AC_table;//huffman码表指针
struct huffman_table DC_table;
short int previous_DC; /
Previous DC coefficient /
short int DCT[64]; /
DCT coef *///DCT解码之后临时存放
#if SANITY_CHECK
unsigned int cid;
#endif
};

3.jdec_private结构体

struct jdec_private
{
/* Public variables */
uint8_t components[COMPONENTS];//YUVbuffer
unsigned int width, height; /
Size of the image */
unsigned int flags;

/* Private variables */
const unsigned char *stream_begin, *stream_end;//指向码流
unsigned int stream_length;

const unsigned char stream; / Pointer to the current stream */
unsigned int reservoir, nbits_in_reservoir;

struct component component_infos[COMPONENTS];//3个,分别为YUV
//量化表
float Q_tables[COMPONENTS][64]; /* quantization tables /
//huffman码表(至少4个)
struct huffman_table HTDC[HUFFMAN_TABLES]; /
DC huffman tables ///2
struct huffman_table HTAC[HUFFMAN_TABLES]; /
AC huffman tables ///2
int default_huffman_table_initialized;
int restart_interval;
int restarts_to_go; /
MCUs left in this restart interval /
int last_rst_marker_seen; /
Rst marker is incremented each time */

/* Temp space used after the IDCT to store each components /
uint8_t Y[64
4], Cr[64], Cb[64];

jmp_buf jump_state;
/* Internal Pointer use for colorspace conversion, do not modify it !!! */
uint8_t *plane[COMPONENTS];//三个指针,分别指向Ybuffer的开头、Ubuffer的开头、Vbuffer的开头

};

4.main函数
int main(int argc, char *argv[])
{
int output_format = TINYJPEG_FMT_YUV420P;
char *output_filename, *input_filename,*dc_filename,*ac_filename;
clock_t start_time, finish_time;
unsigned int duration;
int current_argument;
int benchmark_mode = 0;
#if TRACE
p_trace=fopen(TRACEFILE,“w”);
if (p_trace==NULL)
{
printf(“trace file open error!”);
}
#endif
if (argc < 3)
usage();

current_argument = 1;
while (1)
{
if (strcmp(argv[current_argument], “–benchmark”)==0)
benchmark_mode = 1;
else
break;
current_argument++;
}

if (argc < current_argument+2)
usage();

input_filename = argv[current_argument];
if (strcmp(argv[current_argument+1],“yuv420p”)==0)
output_format = TINYJPEG_FMT_YUV420P;
else if (strcmp(argv[current_argument+1],“rgb24”)==0)
output_format = TINYJPEG_FMT_RGB24;
else if (strcmp(argv[current_argument+1],“bgr24”)==0)
output_format = TINYJPEG_FMT_BGR24;
else if (strcmp(argv[current_argument+1],“grey”)==0)
output_format = TINYJPEG_FMT_GREY;
else
exitmessage(“Bad format: need to be one of yuv420p, rgb24, bgr24, grey\n”);
output_filename = argv[current_argument+2];
dc_filename=argv[current_argument+3];
ac_filename=argv[current_argument+4];

start_time = clock();

if (benchmark_mode)
load_multiple_times(input_filename, output_filename, output_format);
else
convert_one_image(input_filename, output_filename, output_format,dc_filename,ac_filename);//转换函数

finish_time = clock();
duration = finish_time - start_time;
snprintf(error_string, sizeof(error_string),“Decoding finished in %u ticks\n”, duration);
#if TRACE
fclose(p_trace);
#endif
return 0;
}
main函数中的核心函数:convert_one_images函数

int convert_one_image(const char *infilename, const char *outfilename, int output_format, const char *dcfilename, const char *acfilename)
{
FILE *fp,*fp_dc,*fp_ac;
unsigned int length_of_file;
unsigned int width, height;
unsigned char *buf;
struct jdec_private *jdec;
unsigned char *components[3];

/* Load the Jpeg into memory */
fp = fopen(infilename, “rb”);
fp_dc=fopen(dcfilename,“wb”);
fp_ac=fopen(acfilename,“wb”);
if (fp == NULL)
exitmessage(“Cannot open filename\n”);
length_of_file = filesize(fp);//测试码流长度,为整个码流的长度
buf = (unsigned char *)malloc(length_of_file + 4);
if (buf == NULL)
exitmessage(“Not enough memory for loading file\n”);
fread(buf, length_of_file, 1, fp);
fclose(fp);

/* Decompress it */
jdec = tinyjpeg_init();//仅初始化
if (jdec == NULL)
exitmessage(“Not enough memory to alloc the structure need for decompressing\n”);

if (tinyjpeg_parse_header(jdec, buf, length_of_file)<0)//解析文件头,jdec为结构体,头文件解开存在结构体中
exitmessage(tinyjpeg_get_errorstring(jdec));

/* Get the size of the image */
tinyjpeg_get_size(jdec, &width, &height);//从结构体中读出长宽

snprintf(error_string, sizeof(error_string),“Decoding JPEG image…\n”);
if (tinyjpeg_decode(jdec, output_format,fp_dc,fp_ac) < 0)
exitmessage(tinyjpeg_get_errorstring(jdec));

/*

  • Get address for each plane (not only max 3 planes is supported), and
  • depending of the output mode, only some components will be filled
  • RGB: 1 plane, YUV420P: 3 planes, GREY: 1 plane
    */
    tinyjpeg_get_components(jdec, components);//从结构体中提取分量

/* Save it */
switch (output_format)
{
case TINYJPEG_FMT_RGB24:
case TINYJPEG_FMT_BGR24:
write_tga(outfilename, output_format, width, height, components);
break;
case TINYJPEG_FMT_YUV420P:
write_yuv(outfilename, width, height, components);//需要修改的函数,要求写出来实一个完整的yuv文件
break;
case TINYJPEG_FMT_GREY:
write_pgm(outfilename, width, height, components);
break;
}

/* Only called this if the buffers were allocated by tinyjpeg_decode() /
tinyjpeg_free(jdec);
/
else called just free(jdec); */

free(buf);
return 0;
}
convert_one_images函数中的核心函数tinyjpeg_decode函数

int tinyjpeg_decode(struct jdec_private *priv, int pixfmt,FILE *fp_dc,FILE *fp_ac)//解码流函数
{
unsigned int x, y, xstride_by_mcu, ystride_by_mcu;
unsigned int bytes_per_blocklines[3], bytes_per_mcu[3];
unsigned char *dcbuffer,*acbuffer,*dcbuffer_write,*acbuffer_write,dcbuffer_del,acbuffer_del;
int i;
decode_MCU_fct decode_MCU;
const decode_MCU_fct decode_mcu_table;
const convert_colorspace_fct colorspace_array_conv;
convert_colorspace_fct convert_to_pixfmt;
/dcbuffer=(unsigned char)malloc(sizeof(unsigned char)
((priv->height
priv->width/64)3/2));
acbuffer=(unsigned char
)malloc(sizeof(unsigned char)
((priv->height
priv->width/64)3/2));
dcbuffer_write=dcbuffer;
acbuffer_write=acbuffer;
dcbuffer_del=dcbuffer;
acbuffer_del=acbuffer;
/

if (setjmp(priv->jump_state))
return -1;

/* To keep gcc happy initialize some array */
bytes_per_mcu[1] = 0;
bytes_per_mcu[2] = 0;
bytes_per_blocklines[1] = 0;
bytes_per_blocklines[2] = 0;

decode_mcu_table = decode_mcu_3comp_table;
switch (pixfmt) {
case TINYJPEG_FMT_YUV420P:
colorspace_array_conv = convert_colorspace_yuv420p;
if (priv->components[0] == NULL)
priv->components[0] = (uint8_t *)malloc(priv->width * priv->height);
if (priv->components[1] == NULL)
priv->components[1] = (uint8_t *)malloc(priv->width * priv->height/4);
if (priv->components[2] == NULL)
priv->components[2] = (uint8_t *)malloc(priv->width * priv->height/4);
bytes_per_blocklines[0] = priv->width;
bytes_per_blocklines[1] = priv->width/4;
bytes_per_blocklines[2] = priv->width/4;
bytes_per_mcu[0] = 8;
bytes_per_mcu[1] = 4;
bytes_per_mcu[2] = 4;
break;

 case TINYJPEG_FMT_RGB24:
   colorspace_array_conv = convert_colorspace_rgb24;
   if (priv->components[0] == NULL)
 priv->components[0] = (uint8_t *)malloc(priv->width * priv->height * 3);
   bytes_per_blocklines[0] = priv->width * 3;
   bytes_per_mcu[0] = 3*8;
   break;

 case TINYJPEG_FMT_BGR24:
   colorspace_array_conv = convert_colorspace_bgr24;
   if (priv->components[0] == NULL)
 priv->components[0] = (uint8_t *)malloc(priv->width * priv->height * 3);
   bytes_per_blocklines[0] = priv->width * 3;
   bytes_per_mcu[0] = 3*8;
   break;

 case TINYJPEG_FMT_GREY:
   decode_mcu_table = decode_mcu_1comp_table;
   colorspace_array_conv = convert_colorspace_grey;
   if (priv->components[0] == NULL)
 priv->components[0] = (uint8_t *)malloc(priv->width * priv->height);
   bytes_per_blocklines[0] = priv->width;
   bytes_per_mcu[0] = 8;
   break;

 default:

#if TRACE
fprintf(p_trace,“Bad pixel format\n”);
fflush(p_trace);
#endif
return -1;
}

xstride_by_mcu = ystride_by_mcu = 8;
if ((priv->component_infos[cY].Hfactor | priv->component_infos[cY].Vfactor) == 1) {
decode_MCU = decode_mcu_table[0];
convert_to_pixfmt = colorspace_array_conv[0];
#if TRACE
fprintf(p_trace,“Use decode 1x1 sampling\n”);
fflush(p_trace);
#endif
} else if (priv->component_infos[cY].Hfactor == 1) {
decode_MCU = decode_mcu_table[1];
convert_to_pixfmt = colorspace_array_conv[1];
ystride_by_mcu = 16;
#if TRACE
fprintf(p_trace,“Use decode 1x2 sampling (not supported)\n”);
fflush(p_trace);
#endif
} else if (priv->component_infos[cY].Vfactor == 2) {
decode_MCU = decode_mcu_table[3];
convert_to_pixfmt = colorspace_array_conv[3];
xstride_by_mcu = 16;
ystride_by_mcu = 16;
#if TRACE
fprintf(p_trace,“Use decode 2x2 sampling\n”);
fflush(p_trace);
#endif
} else {
decode_MCU = decode_mcu_table[2];
convert_to_pixfmt = colorspace_array_conv[2];
xstride_by_mcu = 16;
#if TRACE
fprintf(p_trace,“Use decode 2x1 sampling\n”);
fflush(p_trace);
#endif
}

//for(i=0;i<(priv->height*priv->width/64);i++)
//{
// *dcbuffer=(unsigned char)((priv->component_infos[cY].DCT[0]+512)/4);//限制取值,防止超出最大范围
// acbuffer=(unsigned char)(priv->component_infos[cY].DCT[1]+128);
// dcbuffer++;
// acbuffer++;
//}
//for(i=0;i<((priv->height
priv->width/64)/2);i++)
//{
// *dcbuffer=(unsigned char)0;
// acbuffer=(unsigned char)0;
// dcbuffer++;
// acbuffer++;
//}
//fwrite(dcbuffer_write,sizeof(unsigned char),(priv->height
priv->width/64)3/2,fp_dc);
//fwrite(acbuffer_write,sizeof(unsigned char),(priv->height
priv->width/64)*3/2,fp_ac);
//fclose(fp_dc);
//fclose(fp_ac);
//free(dcbuffer_del);
//free(acbuffer_del);

resync(priv);

/* Don’t forget to that block can be either 8 or 16 lines */
bytes_per_blocklines[0] *= ystride_by_mcu;
bytes_per_blocklines[1] *= ystride_by_mcu;
bytes_per_blocklines[2] *= ystride_by_mcu;

bytes_per_mcu[0] *= xstride_by_mcu/8;
bytes_per_mcu[1] *= xstride_by_mcu/8;
bytes_per_mcu[2] *= xstride_by_mcu/8;

/* Just the decode the image by macroblock (size is 8x8, 8x16, or 16x16) /
dcbuffer=(unsigned char
)malloc(sizeof(unsigned char)((priv->height/ystride_by_mcu)(priv->width/xstride_by_mcu)3/2));
acbuffer=(unsigned char
)malloc(sizeof(unsigned char)((priv->height/ystride_by_mcu)(priv->width/xstride_by_mcu)*3/2));
dcbuffer_write=dcbuffer;
acbuffer_write=acbuffer;
dcbuffer_del=dcbuffer;
acbuffer_del=acbuffer;
for (y=0; y < priv->height/ystride_by_mcu; y++)
{
//trace(“Decoding row %d\n”, y);
priv->plane[0] = priv->components[0] + (y * bytes_per_blocklines[0]);
priv->plane[1] = priv->components[1] + (y * bytes_per_blocklines[1]);
priv->plane[2] = priv->components[2] + (y * bytes_per_blocklines[2]);
for (x=0; x < priv->width; x+=xstride_by_mcu)
{
decode_MCU(priv);
convert_to_pixfmt(priv);
priv->plane[0] += bytes_per_mcu[0];
priv->plane[1] += bytes_per_mcu[1];
priv->plane[2] += bytes_per_mcu[2];
if (priv->restarts_to_go>0)
{
priv->restarts_to_go–;
if (priv->restarts_to_go == 0)
{
priv->stream -= (priv->nbits_in_reservoir/8);
resync(priv);
if (find_next_rst_marker(priv) < 0)
return -1;
}
}
*dcbuffer=(unsigned char)((priv->component_infos[cY].DCT[0]+512)/4);//限制取值,防止超出最大范围
acbuffer=(unsigned char)(priv->component_infos[cY].DCT[1]+128);
dcbuffer++;
acbuffer++;
}
}
for(i=0;i<((priv->height/ystride_by_mcu)
(priv->width/xstride_by_mcu)/2);i++)
{
*dcbuffer=(unsigned char)128;
acbuffer=(unsigned char)128;
dcbuffer++;
acbuffer++;
}
fwrite(dcbuffer_write,sizeof(unsigned char),(priv->height/ystride_by_mcu)
(priv->width/xstride_by_mcu)3/2,fp_dc);
fwrite(acbuffer_write,sizeof(unsigned char),(priv->height/ystride_by_mcu)
(priv->width/xstride_by_mcu)*3/2,fp_ac);
fclose(fp_dc);
fclose(fp_ac);
free(dcbuffer_del);
free(acbuffer_del);
#if TRACE
fprintf(p_trace,“Input file size: %d\n”, priv->stream_length+2);
fprintf(p_trace,“Input bytes actually read: %d\n”, priv->stream - priv->stream_begin + 2);
fflush(p_trace);
#endif

return 0;
}

5.用TXT文件输出
static void build_quantization_table(float *qtable, const unsigned char ref_table)
{
/
Taken from libjpeg. Copyright Independent JPEG Group’s LLM idct.

  • For float AA&N IDCT method, divisors are equal to quantization
  • coefficients scaled by scalefactor[row]*scalefactor[col], where
  • scalefactor[0] = 1
  • scalefactor[k] = cos(k*PI/16) * sqrt(2) for k=1…7
  • We apply a further scale factor of 8.
  • What’s actually stored is 1/divisor so that the inner loop can
  • use a multiplication rather than a division.
    */
    int i, j;
    //修改
    int m,n;
    static const double aanscalefactor[8] = {
    1.0, 1.387039845, 1.306562965, 1.175875602,
    1.0, 0.785694958, 0.541196100, 0.275899379
    };
    const unsigned char *zz = zigzag,*zz1=zigzag;

for (i=0; i<8; i++) {
for (j=0; j<8; j++) {
*qtable++ = ref_table[*zz++] * aanscalefactor[i] * aanscalefactor[j];
}
}
//修改输出量化表(未与质量因子相乘的原始量化表)
#if TRACE
for(m=0;m<8;m++)
{
for(n=0;n<8;n++)
{
fprintf(p_trace,"%d “,ref_table[*zz1++]);
}
fprintf(p_trace,”\n");
}
#endif
}

static int parse_DQT(struct jdec_private *priv, const unsigned char *stream)
{
int qi;
float *table;
const unsigned char dqt_block_end;
#if TRACE
fprintf(p_trace,"> DQT marker\n");
fflush(p_trace);
#endif
dqt_block_end = stream + be16_to_cpu(stream);//DQT的长度用两个字节存储,故be_to_cpu(stream)来获取DQT的长度
stream += 2; /
Skip length */

while (stream < dqt_block_end)
{
qi = *stream++;
#if SANITY_CHECK
if (qi>>4)//右移判断是否为8bit量化,若不是则右移后为1,执行下列语句
snprintf(error_string, sizeof(error_string),“16 bits quantization table is not supported\n”);
if (qi>4)
snprintf(error_string, sizeof(error_string),“No more 4 quantization table is supported (got %d)\n”, qi);
#endif
table = priv->Q_tables[qi];
build_quantization_table(table, stream);//将DQT表存储到了priv结构体中
stream += 64;
}
#if TRACE
fprintf(p_trace,"< DQT marker\n");
fflush(p_trace);
#endif
return 0;
}

6.输出DC、AC
int tinyjpeg_decode(struct jdec_private *priv, int pixfmt,FILE *fp_dc,FILE *fp_ac)//解码流函数
{
unsigned int x, y, xstride_by_mcu, ystride_by_mcu;
unsigned int bytes_per_blocklines[3], bytes_per_mcu[3];
unsigned char *dcbuffer,*acbuffer,*dcbuffer_write,*acbuffer_write,dcbuffer_del,acbuffer_del;
int i;
decode_MCU_fct decode_MCU;
const decode_MCU_fct decode_mcu_table;
const convert_colorspace_fct colorspace_array_conv;
convert_colorspace_fct convert_to_pixfmt;
/dcbuffer=(unsigned char)malloc(sizeof(unsigned char)
((priv->height
priv->width/64)3/2));
acbuffer=(unsigned char
)malloc(sizeof(unsigned char)
((priv->height
priv->width/64)3/2));
dcbuffer_write=dcbuffer;
acbuffer_write=acbuffer;
dcbuffer_del=dcbuffer;
acbuffer_del=acbuffer;
/

if (setjmp(priv->jump_state))
return -1;

/* To keep gcc happy initialize some array */
bytes_per_mcu[1] = 0;
bytes_per_mcu[2] = 0;
bytes_per_blocklines[1] = 0;
bytes_per_blocklines[2] = 0;

decode_mcu_table = decode_mcu_3comp_table;
switch (pixfmt) {
case TINYJPEG_FMT_YUV420P:
colorspace_array_conv = convert_colorspace_yuv420p;
if (priv->components[0] == NULL)
priv->components[0] = (uint8_t *)malloc(priv->width * priv->height);
if (priv->components[1] == NULL)
priv->components[1] = (uint8_t *)malloc(priv->width * priv->height/4);
if (priv->components[2] == NULL)
priv->components[2] = (uint8_t *)malloc(priv->width * priv->height/4);
bytes_per_blocklines[0] = priv->width;
bytes_per_blocklines[1] = priv->width/4;
bytes_per_blocklines[2] = priv->width/4;
bytes_per_mcu[0] = 8;
bytes_per_mcu[1] = 4;
bytes_per_mcu[2] = 4;
break;

 case TINYJPEG_FMT_RGB24:
   colorspace_array_conv = convert_colorspace_rgb24;
   if (priv->components[0] == NULL)
 priv->components[0] = (uint8_t *)malloc(priv->width * priv->height * 3);
   bytes_per_blocklines[0] = priv->width * 3;
   bytes_per_mcu[0] = 3*8;
   break;

 case TINYJPEG_FMT_BGR24:
   colorspace_array_conv = convert_colorspace_bgr24;
   if (priv->components[0] == NULL)
 priv->components[0] = (uint8_t *)malloc(priv->width * priv->height * 3);
   bytes_per_blocklines[0] = priv->width * 3;
   bytes_per_mcu[0] = 3*8;
   break;

 case TINYJPEG_FMT_GREY:
   decode_mcu_table = decode_mcu_1comp_table;
   colorspace_array_conv = convert_colorspace_grey;
   if (priv->components[0] == NULL)
 priv->components[0] = (uint8_t *)malloc(priv->width * priv->height);
   bytes_per_blocklines[0] = priv->width;
   bytes_per_mcu[0] = 8;
   break;

 default:

#if TRACE
fprintf(p_trace,“Bad pixel format\n”);
fflush(p_trace);
#endif
return -1;
}

xstride_by_mcu = ystride_by_mcu = 8;
if ((priv->component_infos[cY].Hfactor | priv->component_infos[cY].Vfactor) == 1) {
decode_MCU = decode_mcu_table[0];
convert_to_pixfmt = colorspace_array_conv[0];
#if TRACE
fprintf(p_trace,“Use decode 1x1 sampling\n”);
fflush(p_trace);
#endif
} else if (priv->component_infos[cY].Hfactor == 1) {
decode_MCU = decode_mcu_table[1];
convert_to_pixfmt = colorspace_array_conv[1];
ystride_by_mcu = 16;
#if TRACE
fprintf(p_trace,“Use decode 1x2 sampling (not supported)\n”);
fflush(p_trace);
#endif
} else if (priv->component_infos[cY].Vfactor == 2) {
decode_MCU = decode_mcu_table[3];
convert_to_pixfmt = colorspace_array_conv[3];
xstride_by_mcu = 16;
ystride_by_mcu = 16;
#if TRACE
fprintf(p_trace,“Use decode 2x2 sampling\n”);
fflush(p_trace);
#endif
} else {
decode_MCU = decode_mcu_table[2];
convert_to_pixfmt = colorspace_array_conv[2];
xstride_by_mcu = 16;
#if TRACE
fprintf(p_trace,“Use decode 2x1 sampling\n”);
fflush(p_trace);
#endif
}

//for(i=0;i<(priv->height*priv->width/64);i++)
//{
// *dcbuffer=(unsigned char)((priv->component_infos[cY].DCT[0]+512)/4);//限制取值,防止超出最大范围
// acbuffer=(unsigned char)(priv->component_infos[cY].DCT[1]+128);
// dcbuffer++;
// acbuffer++;
//}
//for(i=0;i<((priv->height
priv->width/64)/2);i++)
//{
// *dcbuffer=(unsigned char)0;
// acbuffer=(unsigned char)0;
// dcbuffer++;
// acbuffer++;
//}
//fwrite(dcbuffer_write,sizeof(unsigned char),(priv->height
priv->width/64)3/2,fp_dc);
//fwrite(acbuffer_write,sizeof(unsigned char),(priv->height
priv->width/64)*3/2,fp_ac);
//fclose(fp_dc);
//fclose(fp_ac);
//free(dcbuffer_del);
//free(acbuffer_del);

resync(priv);

/* Don’t forget to that block can be either 8 or 16 lines */
bytes_per_blocklines[0] *= ystride_by_mcu;
bytes_per_blocklines[1] *= ystride_by_mcu;
bytes_per_blocklines[2] *= ystride_by_mcu;

bytes_per_mcu[0] *= xstride_by_mcu/8;
bytes_per_mcu[1] *= xstride_by_mcu/8;
bytes_per_mcu[2] *= xstride_by_mcu/8;

/* Just the decode the image by macroblock (size is 8x8, 8x16, or 16x16) /
dcbuffer=(unsigned char
)malloc(sizeof(unsigned char)((priv->height/ystride_by_mcu)(priv->width/xstride_by_mcu)3/2));
acbuffer=(unsigned char
)malloc(sizeof(unsigned char)((priv->height/ystride_by_mcu)(priv->width/xstride_by_mcu)*3/2));
dcbuffer_write=dcbuffer;
acbuffer_write=acbuffer;
dcbuffer_del=dcbuffer;
acbuffer_del=acbuffer;
for (y=0; y < priv->height/ystride_by_mcu; y++)
{
//trace(“Decoding row %d\n”, y);
priv->plane[0] = priv->components[0] + (y * bytes_per_blocklines[0]);
priv->plane[1] = priv->components[1] + (y * bytes_per_blocklines[1]);
priv->plane[2] = priv->components[2] + (y * bytes_per_blocklines[2]);
for (x=0; x < priv->width; x+=xstride_by_mcu)
{
decode_MCU(priv);
convert_to_pixfmt(priv);
priv->plane[0] += bytes_per_mcu[0];
priv->plane[1] += bytes_per_mcu[1];
priv->plane[2] += bytes_per_mcu[2];
if (priv->restarts_to_go>0)
{
priv->restarts_to_go–;
if (priv->restarts_to_go == 0)
{
priv->stream -= (priv->nbits_in_reservoir/8);
resync(priv);
if (find_next_rst_marker(priv) < 0)
return -1;
}
}
*dcbuffer=(unsigned char)((priv->component_infos[cY].DCT[0]+512)/4);//限制取值,防止超出最大范围
acbuffer=(unsigned char)(priv->component_infos[cY].DCT[1]+128);
dcbuffer++;
acbuffer++;
}
}
for(i=0;i<((priv->height/ystride_by_mcu)
(priv->width/xstride_by_mcu)/2);i++)
{
*dcbuffer=(unsigned char)128;
acbuffer=(unsigned char)128;
dcbuffer++;
acbuffer++;
}
fwrite(dcbuffer_write,sizeof(unsigned char),(priv->height/ystride_by_mcu)
(priv->width/xstride_by_mcu)3/2,fp_dc);
fwrite(acbuffer_write,sizeof(unsigned char),(priv->height/ystride_by_mcu)
(priv->width/xstride_by_mcu)*3/2,fp_ac);
fclose(fp_dc);
fclose(fp_ac);
free(dcbuffer_del);
free(acbuffer_del);
#if TRACE
fprintf(p_trace,“Input file size: %d\n”, priv->stream_length+2);
fprintf(p_trace,“Input bytes actually read: %d\n”, priv->stream - priv->stream_begin + 2);
fflush(p_trace);
#endif

return 0;
}

实验结果

1.输出的YUV图像
在这里插入图片描述
2.TXT形式输出量化表
在这里插入图片描述

Huffman表节选
3.输出DC、AC
在这里插入图片描述

实验结论:

DCT变换能使能量集中,去除高频分量,将有记忆信源变为无记忆信源。

#ifndef JPEGDECODE_H #define JPEGDECODE_H #include "global.h" #include "globalextern.h" typedef unsigned char BYTE; struct ImageComponentData { double value[3]; }; class MBitReader { public: BYTE* Data; int m_currentData; int m_currentDataIndex; int m_currentBitPosition; MBitReader(BYTE* data,int currentDataIndex) { Data=data; m_currentBitPosition=8; m_currentDataIndex=currentDataIndex; m_currentData=Data[m_currentDataIndex]; } public: int ReadNextBit() { if (m_currentBitPosition-1> m_currentBitPosition) & 0x01; } }; class MJpegDecode { public: struct _JFIFAPPOInfo { BYTE APP0[2]; /* 02h Application Use Marker */ BYTE Length[2]; /* 04h Length of APP0 Field */ BYTE Identifier[5]; /* 06h "JFIF" (zero terminated) Id String */ BYTE Version[2]; /* 0Bh JFIF Format Revision */ BYTE Units; /* 0Dh Units used for Resolution */ BYTE Xdensity[2]; /* 0Eh Horizontal Resolution */ BYTE Ydensity[2]; /* 10h Vertical Resolution */ BYTE XThumbnail; /* 12h Thumbnail Horizontal Pixel Count */ BYTE YThumbnail; /* 13h Thumbnail Vertical Pixel Count */ } JFIFAPPOINFO; struct _JFIFDQTInfo { BYTE DQT[2]; // 14h 量化表段标记 BYTE Length[2]; // 16h 量化表段长度 BYTE Identifier; // 18h 量化表ID BYTE QTData[64]; // 19h 量化表数据 } JFIFDQTINFO[2]; struct _JFIFSOFOInfo { BYTE SOFO[2]; // 9Eh 帧开始段标记 BYTE Length[2]; // A0h 帧开始段长度 BYTE BitCount; // A2h 样本精度bit位数 BYTE Height[2]; // A5h 图像像素宽度 BYTE Width[2]; // A3h 图像像素高度 BYTE ComponentsCount; // A7h 图像组件计数 BYTE YIdentifier; // A8h 亮度Y的ID号 BYTE YHVSamplingCoefficient; // A9h 亮度Y垂直和水平采样系数 BYTE YUsedDQTIdentifier; // AAh 亮度Y使用的量化表ID号 BYTE CbIdentifier; // ABh 色度Cb的ID号 BYTE CbHVSamplingCoefficient; // ACh 色度Cb垂直和水平采样系数 BYTE CbUsedDQTIdentifier; // ADh 色度Cb使用的量化表ID号 BYTE CrIdentifier; // AEh 色度Cr的ID号 BYTE CrHVSamplingCoefficient; // AFh 色度Cr垂直和水平采样系数 BYTE CrUsedDQTIdentifier; // B0h 色度Cr使用的量化表ID号 } JFIFSOFOINFO; struct _JFIFDRIInfo { BYTE DRI[2]; BYTE Length[2]; BYTE NMCUReset[2]; //每n个MCU块就有一个 RSTn 标记. } JFIFDRIINFO; struct _JFIFDHTInfo { BYTE DHT[2]; // B1h 哈夫曼表定义段标记 BYTE Length[2]; // B3h 哈夫曼表段长度 BYTE HTIdentifier; // B5h 哈夫曼表号 BYTE NBitsSymbolsCount[16]; // B6h (符号的二进制位长度为n)的符号个数 BYTE SymbolsTable[256]; // C6h 按递增次序代码长度排列的符号表 } JFIFDHTINFO[2][2]; struct _JFIFSOSInfo { BYTE SOS[2]; // 261h 扫描开始段标记 BYTE Length[2]; // 263h 扫描开始段长度 BYTE ComponentsCount; // 265h 扫描行内组件的数量 BYTE YIdentifier; // 266h 亮度Y的ID号 BYTE YHTTableID; // 267h 亮度Y使用的哈夫曼表ID号 BYTE CbIdentifier; // 268h 色度Cb的ID号 BYTE CbHTTableID; // 269h 色度Cb使用的哈夫曼表ID号 BYTE CrIdentifier; // 26Ah 色度Cr的ID号 BYTE CrHTTableID; // 26Bh 色度Cr使用的哈夫曼表ID号 BYTE Reserved[3]; // 26Ch 3个未知保留字节 } JFIFSOSINFO; private: struct HuffmanTable { int CodeOfFirstNLengthSymbol[17]; //长度为N的第一个码字的整数值 int NLengthToSymbolsTableIndex[16]; //查表得到第一个长度为N的符号位于符号表的索引 } HUFFMANTABLE[2][2]; public: int ReadJFIFInfo(const BYTE* const jfifData,int jfifDataSize); void DecodeData(int mcuStartIndex,BYTE* jfifData,int jfifDataSize,ImageComponentData*& targetBitmapData); void SetHuffmanTable(); void DecodeOneDUDC(MBitReader* myBitReader,double* DU,double& lastDC,int index1,int index2); void DecodeOneDUAC(MBitReader* myBitReader,double* DU,int index1,int index2); void DecodeOneMCU(MBitReader* myBitReader,int mcuXn,int mcuYn,int mcuWidth,int mcuHeight,double *DU,ImageComponentData* targetImage); void InverseQuantization(double* du,BYTE* quantizationTable); void InverseZigzag(double* sourceDU,double* targetDU); void IDCT(double* sourceDU,double* targetDU); void YCbCrToRGB(ImageComponentData* sourceImage,ImageComponentData* targetImage); public: int imageHeight; int imageWidth; int alignedImageWidth; int alignedImageHeight; HuffmanTable* HT; double DC[3]; int HSamplingCoefficient[3]; int VSamplingCoefficient[3]; int DQTID[3]; int nMCUReset; }; #endif // JPEGDECODE_H
#ifndef JPEGDECODE_H #define JPEGDECODE_H #include "globalextern.h" typedef unsigned char BYTE; struct ImageComponentData { double value[3]; }; class MBitReader { public: MBitReader(BYTE* data,int currentDataIndex) { Data=data; m_currentBitPosition=8; m_currentDataIndex=currentDataIndex; } BYTE* Data; int m_currentDataIndex; int m_currentBitPosition; public: int ReadNextBit() { --m_currentBitPosition; if (m_currentBitPosition<0) { m_currentBitPosition+=8; ++m_currentDataIndex; } //if (m_currentDataIndex>632) theUI->label1->setText("Error!"); return ((Data[m_currentDataIndex]>>m_currentBitPosition) & 0x01); } void GoPreviousBit() { ++m_currentBitPosition; if (m_currentBitPosition>7) { m_currentBitPosition-=8; --m_currentDataIndex; } } int GetCurrentByte() { return Data[m_currentDataIndex]; } }; class MJpegDecode { private: struct _JFIFAPPOInfo { BYTE APP0[2]; /* 02h Application Use Marker */ BYTE Length[2]; /* 04h Length of APP0 Field */ BYTE Identifier[5]; /* 06h "JFIF" (zero terminated) Id String */ BYTE Version[2]; /* 0Bh JFIF Format Revision */ BYTE Units; /* 0Dh Units used for Resolution */ BYTE Xdensity[2]; /* 0Eh Horizontal Resolution */ BYTE Ydensity[2]; /* 10h Vertical Resolution */ BYTE XThumbnail; /* 12h Thumbnail Horizontal Pixel Count */ BYTE YThumbnail; /* 13h Thumbnail Vertical Pixel Count */ } JFIFAPPOINFO; struct _JFIFDQTInfo { BYTE DQT[2]; // 14h 量化表段标记 BYTE Length[2]; // 16h 量化表段长度 BYTE Identifier; // 18h 量化表ID BYTE QTData[64]; // 19h 量化表数据 } JFIFDQTINFO[2]; struct _JFIFSOFOInfo { BYTE SOFO[2]; // 9Eh 帧开始段标记 BYTE Length[2]; // A0h 帧开始段长度 BYTE BitCount; // A2h 样本精度bit位数 BYTE Height[2]; // A5h 图像像素宽度 BYTE Width[2]; // A3h 图像像素高度 BYTE ComponentsCount; // A7h 图像组件计数 BYTE YIdentifier; // A8h 亮度Y的ID号 BYTE YHVSamplingCoefficient; // A9h 亮度Y垂直和水平采样系数 BYTE YUsedDQTIdentifier; // AAh 亮度Y使用的量化表ID号 BYTE CbIdentifier; // ABh 色度Cb的ID号 BYTE CbHVSamplingCoefficient; // ACh 色度Cb垂直和水平采样系数 BYTE CbUsedDQTIdentifier; // ADh 色度Cb使用的量化表ID号 BYTE CrIdentifier; // AEh 色度Cr的ID号 BYTE CrHVSamplingCoefficient; // AFh 色度Cr垂直和水平采样系数 BYTE CrUsedDQTIdentifier; // B0h 色度Cr使用的量化表ID号 } JFIFSOFOINFO; struct _JFIFDHTInfo { BYTE DHT[2]; // B1h 哈夫曼表定义段标记 BYTE Length[2]; // B3h 哈夫曼表段长度 BYTE HTIdentifier; // B5h 哈夫曼表号 BYTE NBitsSymbolsCount[16]; // B6h (符号的二进制位长度为n)的符号个数 BYTE SymbolsTable[256]; // C6h 按递增次序代码长度排列的符号表 } JFIFDHTINFO[4]; struct _JFIFSOSInfo { BYTE SOS[2]; // 261h 扫描开始段标记 BYTE Length[2]; // 263h 扫描开始段长度 BYTE ComponentsCount; // 265h 扫描行内组件的数量 BYTE YIdentifier; // 266h 亮度Y的ID号 BYTE YHTTableID; // 267h 亮度Y使用的哈夫曼表ID号 BYTE CbIdentifier; // 268h 色度Cb的ID号 BYTE CbHTTableID; // 269h 色度Cb使用的哈夫曼表ID号 BYTE CrIdentifier; // 26Ah 色度Cr的ID号 BYTE CrHTTableID; // 26Bh 色度Cr使用的哈夫曼表ID号 BYTE Reserved[3]; // 26Ch 3个未知保留字节 } JFIFSOSINFO; private: struct HuffmanTable { int CodeOfFirstNLengthSymbol[17]; //长度为N的第一个码字的整数值 int NLengthToSymbolsTableIndex[16]; //查表得到第一个长度为N的符号位于符号表的索引 } HUFFMANTABLE[4]; public: int ReadJFIFInfo(const BYTE* const jfifData,int jfifDataSize); void DecodeData(int mcuStartIndex,BYTE* jfifData,int jfifDataSize,ImageComponentData*& targetBitmapData); void SetHuffmanTable(); void DecodeOneDUDC(MBitReader* myBitReader,double* DU,double& lastDC,int HTID); void DecodeOneDUAC(MBitReader* myBitReader,double* DU,int HTID); void DecodeOneMCU(MBitReader* myBitReader,int mcuXn,int mcuYn,int mcuWidth,int mcuHeight,double *DU,ImageComponentData* targetImage); void InverseQuantization(double* du,BYTE* quantizationTable); void InverseZigzag(double* sourceDU,double* targetDU); void IDCT(double* sourceDU,double* targetDU); void YCbCrToRGB(ImageComponentData* sourceImage,ImageComponentData* targetImage); public: int imageHeight; int imageWidth; int alignedImageWidth; int alignedImageHeight; struct HuffmanTableID { int dc; int ac; } HTID[3]; double DC[3]; int HSamplingCoefficient[3]; int VSamplingCoefficient[3]; int DQTID[3]; }; #endif // JPEGDECODE_H
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值