ffmpeg视频解码,输出YUV图像到文件
作者:互联网
基于ffmpeg的视频解码,输出YUV图像到文件
具体流程如下
代码如下:`
/**
- output_yuv.cpp
- date:2015/11/02
- 基于FFmpeg的视频解码
- 输出YUV图像到文件
*/
#include <stdio.h>
#define __STDC_CONSTANT_MACROS //为了使用C99的宏
#ifdef _WIN32
//Windows
extern “C”
{
#include “libavcodec/avcodec.h”
#include “libavformat/avformat.h”
#include “libswscale/swscale.h”
};
#else
//Linux…
#ifdef __cplusplus
extern “C”
{
#endif
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libswscale/swscale.h>
#ifdef __cplusplus
};
#endif
#endif
#pragma comment(lib,“avutil.lib”)
#pragma comment(lib,“avcodec.lib”)
#pragma comment(lib,“avformat.lib”)
#pragma comment(lib,“swscale.lib”)
#define FRAMES_NEED 30
int main(int argc, char* argv[])
{
AVFormatContext *pFormatCtx; //格式上下文结构体
int i, videoindex;
AVCodecContext *pCodecCtx; //codec上下文
AVCodec *pCodec; //codec
AVFrame *pFrame; //Frame结构体
AVFrame *pFrameYUV; //Frame结构体
uint8_t *out_buffer;
AVPacket *packet; //packet结构体
int y_size,y_size_align;
int ret, got_picture;
unsigned int frame_num = 0;
struct SwsContext *img_convert_ctx; //图像格式转化上下文
char filepath[]="MP4_test.es"; //input
FILE *fp_frame = fopen("output.yuv","wb+"); //output
FILE *fp_yuv=fopen("output_sws.yuv","wb+");
av_register_all(); //ffmpeg flow 0,注册codec
//avformat_network_init(); //如要打开网络流,必须运行此函数 //暂时不调用
pFormatCtx = avformat_alloc_context(); //格式上下文结构体指针开空间
if(avformat_open_input(&pFormatCtx,filepath,NULL,NULL)!=0){ //打开多媒体文件
printf("Couldn't open input stream.\n");
return -1;
}
if(avformat_find_stream_info(pFormatCtx,NULL)<0){ //读取音视频数据相关信息,参数0:上下文结构体指针,参数1:option
printf("Couldn't find stream information.\n");
return -1;
}
videoindex=-1;
for(i=0; i<pFormatCtx->nb_streams; i++) //遍历多媒体文件中的每一个流,判断是否为视频。
if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO){
videoindex=i;
break;
}
if(videoindex==-1){
printf("Didn't find a video stream.\n");
return -1;
}
// 上面获取视频流可以替换成av_find_best_stream函数
enum AVMediaType type = AVMEDIA_TYPE_VIDEO;
int ret = av_find_best_stream(pFormatCtx, type, -1, -1, NULL, 0);
if (ret < 0) {
fprintf(stderr, "Could not find %s stream in input file '%s'\n",
av_get_media_type_string(type), fileName);
avformat_close_input(&pFormatCtx);
*/
pCodecCtx pCodecCtx=pFormatCtx->streams[videoindex]->codec; //codec上下文指定到格式上下文中的codec
pCodec=avcodec_find_decoder(pCodecCtx->codec_id); //找到一个codec,必须先调用av_register_all()
if(pCodec==NULL){
printf("Codec not found.\n");
return -1;
}
if(avcodec_open2(pCodecCtx, pCodec,NULL)<0){ //初始化一个视音频编解码器的AVCodecContext
printf("Could not open codec.\n");
return -1;
}
pFrame=av_frame_alloc(); //原始帧
pFrameYUV=av_frame_alloc();//YUV帧
out_buffer=(uint8_t *)av_malloc(avpicture_get_size(AV_PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height)); //宏AV_PIX_FMT_YUV420P 代替宏PIX_FMT_YUV420P
avpicture_fill((AVPicture *)pFrameYUV, out_buffer, AV_PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height); //将pFrameYUV和out_buffer联系起来(pFrame指向一段内存);宏AV_PIX_FMT_YUV420P 代替宏PIX_FMT_YUV420P
packet=(AVPacket *)av_malloc(sizeof(AVPacket)); //开空间
//Output Info-----------------------------
printf("--------------- File Information ----------------\n");
av_dump_format(pFormatCtx,0,filepath,0);//调试函数,输出文件的音、视频流的基本信息
printf("-------------------------------------------------\n");
img_convert_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt,
pCodecCtx->width, pCodecCtx->height, AV_PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL); //初始化SWS,图片格式装换上下文//经过验证,输出YUV不需要格式转换,但需要调整尺寸
while(av_read_frame(pFormatCtx, packet)>=0){ //读取码流中的音频若干帧或者视频一帧,作为packet
if(packet->stream_index==videoindex){ //如果是视频
ret = avcodec_decode_video2(pCodecCtx, pFrame, &got_picture, packet); //解码一帧视频数据。输入一个压缩编码的结构体AVPacket,输出一个解码后的结构体AVFrame
if(ret < 0){
printf("Decode Error.\n");
return -1;
}
if(got_picture){
sws_scale(img_convert_ctx, (const uint8_t* const*)pFrame->data, pFrame->linesize, 0, pCodecCtx->height,
pFrameYUV->data, pFrameYUV->linesize); //将输出结果转化成YUV,输出YUV不需要格式转换,但是需要调整尺寸,pFrame中的图像数据的对齐方式可能是按64对齐的。
y_size=pCodecCtx->width*pCodecCtx->height;
//y_size_align=pCodecCtx->width*pCodecCtx->height;
y_size_align=((pCodecCtx->width+63)/64*64)*pCodecCtx->height;
//fwrite(pFrame->data[0],1,y_size,fp_frame); //Y
//fwrite(pFrame->data[1],1,y_size/4,fp_frame); //U
//fwrite(pFrame->data[2],1,y_size/4,fp_frame); //V
fwrite(pFrame->data[0],1,y_size_align,fp_frame); //Y
fwrite(pFrame->data[1],1,y_size_align/4,fp_frame); //U
fwrite(pFrame->data[2],1,y_size_align/4,fp_frame); //V
fwrite(pFrameYUV->data[0],1,y_size,fp_yuv); //Y
fwrite(pFrameYUV->data[1],1,y_size/4,fp_yuv); //U
fwrite(pFrameYUV->data[2],1,y_size/4,fp_yuv); //V
//printf("Succeed to decode 1 frame!\n");
frame_num++;
#ifdef FRAMES_NEED
if(frame_num == FRAMES_NEED){
printf("%d frames done!\n",frame_num);
break;}
#endif
if(frame_num%100 == 0)
printf("%d frames done!\n",frame_num);
}
}
av_free_packet(packet);
}
//flush decoder
//FIX: Flush Frames remained in Codec
#ifndef FRAMES_NEED
while (1) {
ret = avcodec_decode_video2(pCodecCtx, pFrame, &got_picture, packet);
if (ret < 0)
{
printf("%d frames in all\n",frame_num);
break;
}
if (!got_picture)
{
printf("%d frames in all\n",frame_num);
break;
}
sws_scale(img_convert_ctx, (const uint8_t* const*)pFrame->data, pFrame->linesize, 0, pCodecCtx->height,
pFrameYUV->data, pFrameYUV->linesize);
y_size=pCodecCtx->width*pCodecCtx->height;
y_size_align=((pCodecCtx->width+63)/64*64)*pCodecCtx->height;
fwrite(pFrame->data[0],1,y_size_align,fp_frame); //Y
fwrite(pFrame->data[1],1,y_size_align/4,fp_frame); //U
fwrite(pFrame->data[2],1,y_size_align/4,fp_frame); //V
fwrite(pFrameYUV->data[0],1,y_size,fp_yuv); //Y
fwrite(pFrameYUV->data[1],1,y_size/4,fp_yuv); //U
fwrite(pFrameYUV->data[2],1,y_size/4,fp_yuv); //V
frame_num++;
if(frame_num%100 == 0)
printf("%d frames done!\n",frame_num);
}
#endif
sws_freeContext(img_convert_ctx);
fclose(fp_yuv);
fclose(fp_frame);
av_frame_free(&pFrameYUV);
av_frame_free(&pFrame);
avcodec_close(pCodecCtx);
avformat_close_input(&pFormatCtx);
printf("press anything to continue!\n");
getchar();
return 0;
}`
标签:fp,pCodecCtx,ffmpeg,解码,YUV,pFrame,data,frame,size 来源: https://blog.csdn.net/jiushimanya/article/details/110922486