rust 使用 ffmpeg_next 生成视频缩略图

#![allow(non_snake_case)]
#![allow(non_camel_case_types)] 

use std::{
    collections::{ HashMap,VecDeque }, error::Error, fs::{self, File}, io::prelude::*, os::unix::fs::MetadataExt, path::{self,Path,PathBuf}, sync::{atomic::{self, AtomicU64,Ordering}, Arc, Mutex}, time
};

use ffmpeg_next::{
    // util::{channel_layout, chroma, color, dictionary, error, frame, log, mathematics, media, option, picture, rational, time}, 
    // codec::{audio_service, codec, discard, field_order, packet,   subtitle, threading, decoder, encoder}, 
    // filter,
    format::{chapter, format, stream,input, Pixel}, 
    media::Type,
    software::scaling::{context::Context, flag::Flags},
    util::frame::video::Video,
    Rescale,rescale, //引入特征
};

use data_encoding::BASE64;
use webp;
 
use super::{ tool_mod::{self, MyErr},file_mod };



pub struct ffmpeg;

impl ffmpeg {
    

    // ffmpeg -y -v error -i "${图片路径}"  -frames:v 1 -f webp  ${质量} "${保存路径}"
    pub fn 图片格式装换(input_path:&str)->Result<(),Box<dyn Error>>{
        ffmpeg_next::init()?;
        let ictx = ffmpeg_next::format::input(&input_path)?;     // input_with_dictionary
         
        Ok(())
    }
    // ffmpeg -y -v error -i "${图片路径}"  -vf "scale=-1:${大小}" -frames:v 1 -f webp  pipe: | base64 --wrap=0
    pub fn 图片缩略图(input_path:&str, size:u32)->Result<String,Box<dyn Error>>{
        let 路径对象 = Path::new(input_path);
        let 扩展名 = 路径对象.extension().unwrap_or_default().to_str().unwrap_or_default().to_string().to_lowercase();
        if 扩展名 == "svg" {
            let 图片字符串 = fs::read(input_path)?;
            let base64_encoded = BASE64.encode(&图片字符串);
            return Ok(format!("data:image/svg+xml;base64,{}", base64_encoded));
        }

        let 记录时间ms = tool_mod::时间::now();
        let mut rgba格式帧 = Video::empty();
        {
            ffmpeg_next::init()?;
            let mut ictx = ffmpeg_next::format::input(&input_path)?;     // input_with_dictionary

            let 视频流input = ictx
                .streams()
                .best(ffmpeg_next::media::Type::Video)
                .ok_or(ffmpeg_next::Error::StreamNotFound)?;

            let 记录视频流id = 视频流input.index();
             
            let context_decoder = ffmpeg_next::codec::context::Context::from_parameters(视频流input.parameters())?;
            let mut decoder = context_decoder.decoder().video()?;
            let 原始宽 = decoder.width();
            let 原始高 = decoder.height();
            let 计算宽 = if size % 2 == 0 {size} else {size+1};     //必须是偶数
            let 计算高 = 计算宽 * 原始高 / 原始宽;
            let mut 帧缩放器  = Context::get(
                decoder.format(),  原始宽,  原始高, 
                Pixel::RGBA,
                计算宽, 计算高,
                Flags::FAST_BILINEAR ,
            )?;
            
            // let webp_codec = ffmpeg_next::codec::encoder::find(ffmpeg_next::codec::Id::WEBP);
            // let mut webp_encoder =
            // ffmpeg_next::codec::context::Context::new_with_codec(webp_codec.ok_or(ffmpeg_next::Error::InvalidData)?)
            //     .encoder()
            //     .video()?;
            // webp_encoder.set_height(计算高度);
            // webp_encoder.set_width(计算宽度);
            // webp_encoder.set_aspect_ratio(decoder.aspect_ratio());
            // webp_encoder.set_format(decoder.format());
            // webp_encoder.set_frame_rate(decoder.frame_rate());
            // webp_encoder.set_time_base(ist.time_base());

            for (stream, packet) in ictx.packets() {
                // println!("{:?} stream.index : {}   循环查找帧用时:   {}",&decoder.format(), stream.index(), tool_mod::时间::now() - 记录时间ms);
                if stream.index() == 记录视频流id {
                    let mut 解码帧 = Video::empty();
                    decoder.send_packet(&packet)?;  //发送解码
                    match decoder.receive_frame(&mut 解码帧){  
                        Err(e) =>  println!("错误decoder: {:?} ",e.to_string()),  
                        Ok(_) => {
                            帧缩放器.run(&解码帧, &mut rgba格式帧)?;
                            break;
                        },
                    };
                }
            }
    
            decoder.send_eof()?;   //to signal end of stream and enter  println!("-----------结束");
        }
        
        let 数据边界 = rgba格式帧.planes();  //这个 <=0  导致 panic 崩溃
        if 数据边界 <=0{ return Err(MyErr::new("暂时无法生成 图片缩略图".into())); }

        let rgba_data = rgba格式帧.data(0);
        // println!("rgba格式帧 --->   {} X {} ", rgba格式帧.width(), rgba格式帧.height() );
        //转webp
        let encoder = webp::Encoder::from_rgba(rgba_data, rgba格式帧.width(), rgba格式帧.height());
        let 缩略图 =  encoder.encode_lossless(); 
        let bytes数组 = & *缩略图;   // Deref 特征
        let base64_encoded = BASE64.encode(bytes数组);
        let 图片dataurl = format!("data:image/webp;base64,{}", base64_encoded);
        // println!("用时 {} \n {} ", tool_mod::时间::now() - 记录时间ms ,图片dataurl );
        Ok(图片dataurl)
    }
    // ffmpeg -y -v error ${指定time} -i  "${视频路径}"  -frames:v 1 -f webp -vf "scale=-1:${大小}" pipe: | base64
    pub fn 视频缩略图(input_path:&str, size:u32)->Result<String,Box<dyn Error>>{
            let 记录时间ms = tool_mod::时间::now();
            {   //文件 10s 以上没被修改才会执行
                let 文件信息 = std::path::Path::new(input_path).metadata()?;
                #[cfg(not(any(target_os = "linux")))]
                let 上次修改时间ms = 文件信息.modified()?.duration_since(std::time::UNIX_EPOCH)?.as_millis();
                #[cfg(any(target_os = "linux"))]
                let 上次修改时间ms = std::time::Duration::new(文件信息.ctime() as u64,文件信息.ctime_nsec() as u32 ).as_millis();
                 
                // println!("视频缩略图  上次修改距今: {} 毫秒, \n{}\n{}",记录时间ms - 上次修改时间ms, tool_mod::时间::时间转可读(记录时间ms)    ,  tool_mod::时间::时间转可读(上次修改时间ms)  );
                if 记录时间ms - 上次修改时间ms < 5000 {   return Err(MyErr::new("文件被修改,暂时无法生成缩略图".into())); }
            }
 
            let mut rgba格式帧 = Video::empty();
            {   
                ffmpeg_next::init()?;
                let mut ictx = ffmpeg_next::format::input(&input_path)?;     // input_with_dictionary
                let 视频时长 = ictx.duration() as f64 / f64::from(ffmpeg_next::ffi::AV_TIME_BASE);
                let 中间秒 = (视频时长 * 0.5) as i64; 
                {    //跳转到视频中间
                    //引入 ffmpeg_next::Rescale 特征
                    let position = 中间秒.rescale((1, 1), ffmpeg_next::rescale::TIME_BASE);
                    ictx.seek(position, ..position)?;
                    // println!("timestamp {}   {} ",中间帧秒 , position);
                }

                let 视频流input = ictx
                    .streams()
                    .best(ffmpeg_next::media::Type::Video)
                    .ok_or(ffmpeg_next::Error::StreamNotFound)?;
                
                let 记录视频流id = 视频流input.index();
                
                let context_decoder = ffmpeg_next::codec::context::Context::from_parameters(视频流input.parameters())?;
                let mut decoder = context_decoder.decoder().video()?;
                let 原始宽 = decoder.width();
                let 原始高 = decoder.height();
                let 计算宽 = if size % 2 == 0 {size} else {size+1};      //必须是偶数
                let 计算高 = 计算宽 * 原始高 / 原始宽;
                let mut 帧缩放器  = Context::get(
                    decoder.format(),  原始宽,  原始高, 
                    Pixel::RGBA,
                    计算宽, 计算高,
                    Flags::FAST_BILINEAR ,
                )?;
                
                for (stream, packet) in ictx.packets() {
                    // println!("stream.index : {}   循环查找帧用时:   {}",stream.index(), tool_mod::时间::now() - 记录时间ms);
                    if stream.index() == 记录视频流id {
                        let mut 解码帧 = Video::empty();
                        decoder.send_packet(&packet)?;                //发送解码
                        match decoder.receive_frame(&mut 解码帧){    //只处理一帧
                            Err(e) => {}, // println!("错误decoder: {:?} ",e.to_string()),
                            Ok(_) => {
                                帧缩放器.run(&解码帧, &mut rgba格式帧)?;
                                break;
                            },
                        };
                    }
                }
    
                decoder.send_eof()?;   //to signal end of stream and enter  println!("-----------结束");
            }
            
            let 数据边界 = rgba格式帧.planes();  //这个 <=0  导致 panic 崩溃
            if 数据边界 <=0{ 
                println!("ffmpeg 解析错误,暂时无法生成缩略图");
                return Err(MyErr::new("暂时无法生成 视频缩略图".into())); 
            }

            let rgba_data = rgba格式帧.data(0);
            //转webp
            let encoder = webp::Encoder::from_rgba(rgba_data, rgba格式帧.width(), rgba格式帧.height());
            let 缩略图 =  encoder.encode_lossless(); 
            let bytes数组 = & *缩略图;   // Deref 特征
            let base64_encoded = BASE64.encode(bytes数组);
            let 图片dataurl = format!("data:image/webp;base64,{}", base64_encoded);
            // println!("用时 {} \n {} ", tool_mod::时间::now() - 记录时间ms ,图片dataurl );
            Ok(图片dataurl)
    }
    pub fn 视频信息(input_path:&str)->Result<(),Box<dyn Error>>{
        ffmpeg_next::init()?;
        match ffmpeg_next::format::input(&input_path) {
            Ok(context) => {
                for (k, v) in context.metadata().iter() {
                    println!("{}: {}", k, v);
                }

                if let Some(stream) = context.streams().best(ffmpeg_next::media::Type::Video) {
                    println!("Best video stream index: {}", stream.index());
                }

                if let Some(stream) = context.streams().best(ffmpeg_next::media::Type::Audio) {
                    println!("Best audio stream index: {}", stream.index());
                }

                if let Some(stream) = context.streams().best(ffmpeg_next::media::Type::Subtitle) {
                    println!("Best subtitle stream index: {}", stream.index());
                }

                let 视频时长 = context.duration() as f64 / f64::from(ffmpeg_next::ffi::AV_TIME_BASE);
                println!(  "视频时长 (seconds): {:.2}", 视频时长);


                for stream in context.streams() {
                    println!("stream index {}:", stream.index());
                    println!("\ttime_base: {}", stream.time_base());
                    println!("\tstart_time: {}", stream.start_time());
                    println!("\tduration (stream timebase): {}", stream.duration());
                    println!(
                        "\tduration (seconds): {:.2}",
                        stream.duration() as f64 * f64::from(stream.time_base())
                    );
                    println!("\tframes: {}", stream.frames());
                    println!("\tdisposition: {:?}", stream.disposition());
                    println!("\tdiscard: {:?}", stream.discard());
                    println!("\trate: {}", stream.rate());

                    let codec = ffmpeg_next::codec::context::Context::from_parameters(stream.parameters())?;
                    println!("\tmedium: {:?}", codec.medium());
                    println!("\tid: {:?}", codec.id());

                    if codec.medium() == ffmpeg_next::media::Type::Video {
                        if let Ok(video) = codec.decoder().video() {
                            println!("\tbit_rate: {}", video.bit_rate());
                            println!("\tmax_rate: {}", video.max_bit_rate());
                            println!("\tdelay: {}", video.delay());
                            println!("\tvideo.width: {}", video.width());
                            println!("\tvideo.height: {}", video.height());
                            println!("\tvideo.format: {:?}", video.format());
                            println!("\tvideo.has_b_frames: {}", video.has_b_frames());
                            println!("\tvideo.aspect_ratio: {}", video.aspect_ratio());
                            println!("\tvideo.color_space: {:?}", video.color_space());
                            println!("\tvideo.color_range: {:?}", video.color_range());
                            println!("\tvideo.color_primaries: {:?}", video.color_primaries());
                            println!(
                                "\tvideo.color_transfer_characteristic: {:?}",
                                video.color_transfer_characteristic()
                            );
                            println!("\tvideo.chroma_location: {:?}", video.chroma_location());
                            println!("\tvideo.references: {}", video.references());
                            println!("\tvideo.intra_dc_precision: {}", video.intra_dc_precision());
                        }
                    } else if codec.medium() == ffmpeg_next::media::Type::Audio {
                        if let Ok(audio) = codec.decoder().audio() {
                            println!("\tbit_rate: {}", audio.bit_rate());
                            println!("\tmax_rate: {}", audio.max_bit_rate());
                            println!("\tdelay: {}", audio.delay());
                            println!("\taudio.rate: {}", audio.rate());
                            println!("\taudio.channels: {}", audio.channels());
                            println!("\taudio.format: {:?}", audio.format());
                            println!("\taudio.frames: {}", audio.frames());
                            println!("\taudio.align: {}", audio.align());
                            println!("\taudio.channel_layout: {:?}", audio.channel_layout());
                        }
                    }
                }
            }

            Err(error) => println!("error: {}", error),
        };
        Ok(())
   }
}





fn save_file(frame: &Video, index: usize) -> std::result::Result<(), std::io::Error> {
    let mut file = File::create(format!("frame{}.ppm", index))?;
    file.write_all(format!("P6\n{} {}\n255\n", frame.width(), frame.height()).as_bytes())?;
    file.write_all(frame.data(0))?;
    Ok(())
}



#[cfg(test)]
mod 测试生成_ffmpeg_mod { 
    use super::*;
    #[test]
    fn 测试_视频缩略图() {
 
        let 路径 = "/home/lwl/Desktop/7.mp4";
        ffmpeg::视频缩略图(路径, 64);
    }

    #[test]
    fn 测试_信息() {
        let 路径 = "/media/lwl/720/OS/🌷壁纸/.ai图片/🧡/48_3.jpg";
        ffmpeg::视频信息(路径);

        let 路径 = "/media/lwl/720/OS/🌷壁纸/.ai图片/🧡/49_25.jpg";
        ffmpeg::视频信息(路径);
    }

    #[test]
    fn 测试_图片缩略图() {
        let 路径 = "/home/lwl/Desktop/1.jpg";
        ffmpeg::图片缩略图(路径, 63);

        let 路径 = "/home/lwl/Desktop/2.jpg";
        ffmpeg::图片缩略图(路径, 64);
    }


}
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值