其他分享
首页 > 其他分享> > 使用FFMPEG5.0和SDL2.0编写视频简单播放器

使用FFMPEG5.0和SDL2.0编写视频简单播放器

作者:互联网

使用ffmpeg5.0和SDL1.2(操作系统原本安装了这个版本,所以想先用这个库调试看看),编写视频播放器daemon时,视频卡在第一帧会报错:

测试例程如下:

int B_Play_Openfile(const char* szFilePath)
{
	int iRet = 0;
	int iVideo_index = 0;
	char szError[128] = {0};
	unsigned char *out_buffer = NULL;
	AVFormatContext *FormatContext = avformat_alloc_context();  	/* 媒体文件句柄 */
	AVCodec *Video_Codec = NULL;									/* 解码器句柄 */
	AVCodecContext *Video_CodecContext = NULL;   					/* 解码器上下文 */
	AVFrame *Video_Frame = NULL;									/* 帧缓存 */
	AVFrame *YUV_Frame = NULL;										/* YUV帧 */									
	AVPacket *Video_Packet = NULL;

	if (szFilePath == NULL)
	{
		B_LOG("illegal file path\r\n");
		return -1;
	}
	else
	{
		B_LOG("B_Play_Openfile, Path[%s]\r\n", szFilePath);
	}

	/* 打开一个输入流并读取头部 */
	iRet = avformat_open_input(&FormatContext, szFilePath, NULL, NULL);
	if (iRet != 0)
	{
		av_strerror(iRet, szError, sizeof(szError));
		B_LOG("avformat_open_input fail, Ret[%d], Err[%s]\r\n", iRet, szError);
		return iRet;
	}

	/* 读取媒体文件的数据包以获取流信息 */
	iRet = avformat_find_stream_info(FormatContext, NULL);
	if (iRet < 0)
	{
		av_strerror(iRet, szError, sizeof(szError));
		B_LOG("avformat_open_input fail, Ret[%d], Err[%s]\r\n", iRet, szError);
		avformat_close_input(&FormatContext);
		return iRet;
	}

	/* 查找视频码流索引 */
	iVideo_index = av_find_best_stream(FormatContext, AVMEDIA_TYPE_VIDEO, -1, -1, NULL, 0);
	if (AVERROR_STREAM_NOT_FOUND == iVideo_index)
	{
		/* 未找到视频流 */
		B_LOG("av_find_best_stream not found video stream\r\n");
		avformat_close_input(&FormatContext);
		return -1;
	}
	else if (AVERROR_DECODER_NOT_FOUND == iVideo_index)
	{
		/* 找到视频流,但是没找到解码器 */
		B_LOG("av_find_best_stream streams were found but no decoder\r\n");
		avformat_close_input(&FormatContext);
		return -1;
	}

	/* 查找解码器 */
	Video_Codec = avcodec_find_decoder(FormatContext->streams[iVideo_index]->codecpar->codec_id);
	if (NULL == Video_Codec)
	{
		B_LOG("avcodec_find_decoder fail\r\n");
		avformat_close_input(&FormatContext);
		return -1;
	}

	/* 创建解码器上下文(为Video_CodecContext申请内存空间) */
	Video_CodecContext = avcodec_alloc_context3(Video_Codec);
	if (NULL == Video_CodecContext)
	{
		B_LOG("avcodec_alloc_context3 fail\r\n");
		avformat_close_input(&FormatContext);
		return -1;
	}

	/* 预初始化解码器上下文(从FormatContext->streams[iVideo_index]->codecpar获取解码器的有关参数,并填充至Video_CodecContext) */
	iRet = avcodec_parameters_to_context(Video_CodecContext, FormatContext->streams[iVideo_index]->codecpar);
	if (iRet < 0)
	{
		av_strerror(iRet, szError, sizeof(szError));
		B_LOG("avcodec_parameters_to_context fail, Ret[%d], Err[%s]\r\n", iRet, szError);
		avformat_close_input(&FormatContext);
		return iRet;
	}

	/* 打开解码器上下文 */
	iRet = avcodec_open2(Video_CodecContext, Video_Codec, NULL);
	if (iRet != 0)
	{
		av_strerror(iRet, szError, sizeof(szError));
		B_LOG("avcodec_open2 fail, Ret[%d], Err[%s]\r\n", iRet, szError);
		avformat_close_input(&FormatContext);
		return iRet;
	}

	/* 申请帧缓存 */
	Video_Frame = av_frame_alloc();
	if (NULL == Video_Frame)
	{
		B_LOG("av_frame_alloc fail,\r\n");
		avformat_close_input(&FormatContext);
		return iRet;
	}

	YUV_Frame = av_frame_alloc();
	if (NULL == YUV_Frame)
	{
		B_LOG("av_frame_alloc fail,\r\n");
		avformat_close_input(&FormatContext);
		return iRet;
	}
	
	struct SwsContext *img_convert_ctx;
	int Video_H = 0;												/* 视频高 */
	int Video_W = 0;												/* 视频宽 */
	SDL_Surface *screen; 
	SDL_VideoInfo *vi;
	SDL_Overlay *bmp; 
	SDL_Rect rect;
	int got_picture;
	iRet = SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER);
	if (iRet != 0)
	{
		B_LOG("SDL_Init fail, Ret[%d]\r\n", iRet);
		avformat_close_input(&FormatContext);
		return iRet;
	}

	Video_H = Video_CodecContext->width;
	Video_W = Video_CodecContext->height;
	screen = SDL_SetVideoMode(Video_W, Video_H, 0, 0);
	if (screen == NULL)
	{
		B_LOG("SDL_SetVideoMode fail, Ret[%d]\r\n", iRet);
		avformat_close_input(&FormatContext);
		return iRet;
	}
	bmp = SDL_CreateYUVOverlay(Video_CodecContext->width, Video_CodecContext->height,SDL_YV12_OVERLAY, screen); 
 
	rect.x = 0;    
	rect.y = 0;    
	rect.w = Video_W;    
	rect.h = Video_H; 

	Video_Packet = (AVPacket *)av_malloc(sizeof(AVPacket));
	av_dump_format(FormatContext, 0, szFilePath, 0);


	SDL_WM_SetCaption("B_Play",NULL);

	img_convert_ctx = sws_getContext(Video_CodecContext->width, Video_CodecContext->height, Video_CodecContext->pix_fmt, 
			Video_CodecContext->width, Video_CodecContext->height, AV_PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL); 
	B_LOG("1111111");
	while(av_read_frame(FormatContext, Video_Packet)>=0)
	{
		B_LOG("2222222");
		if(Video_Packet->stream_index==iVideo_index)
		{
			//Decode
			avcodec_send_packet(Video_CodecContext, Video_Packet);
			got_picture = avcodec_receive_frame(Video_CodecContext, Video_Frame);

			if(got_picture)
			{
				SDL_LockYUVOverlay(bmp);
				YUV_Frame->data[0]=bmp->pixels[0];
				YUV_Frame->data[1]=bmp->pixels[2];
				YUV_Frame->data[2]=bmp->pixels[1];     
				YUV_Frame->linesize[0]=bmp->pitches[0];
				YUV_Frame->linesize[1]=bmp->pitches[2];   
				YUV_Frame->linesize[2]=bmp->pitches[1];
				sws_scale(img_convert_ctx, (const uint8_t* const*)Video_Frame->data, Video_Frame->linesize, 0, 
					Video_CodecContext->height, YUV_Frame->data, YUV_Frame->linesize);
				SDL_UnlockYUVOverlay(bmp); 

				SDL_DisplayYUVOverlay(bmp, &rect); 
				//Delay 40ms
				SDL_Delay(40);
			}
		}

		av_packet_unref(Video_Packet);
	}
	
	B_LOG("3333333333\n");
	while (1) 
	{
		B_LOG("4444444444\n");
		avcodec_send_packet(Video_CodecContext, Video_Packet);

		got_picture = avcodec_receive_frame(Video_CodecContext, Video_Frame);
		if (got_picture)
			break;
		sws_scale(img_convert_ctx, (const uint8_t* const*)Video_Frame->data, Video_Frame->linesize, 0, Video_CodecContext->height, YUV_Frame->data, YUV_Frame->linesize);
		SDL_LockYUVOverlay(bmp);
		YUV_Frame->data[0]=bmp->pixels[0];
		YUV_Frame->data[1]=bmp->pixels[2];
		YUV_Frame->data[2]=bmp->pixels[1];     
		YUV_Frame->linesize[0]=bmp->pitches[0];
		YUV_Frame->linesize[1]=bmp->pitches[2];   
		YUV_Frame->linesize[2]=bmp->pitches[1];
 
		SDL_UnlockYUVOverlay(bmp); 
		SDL_DisplayYUVOverlay(bmp, &rect); 
		//Delay 40ms
		SDL_Delay(40);
	}
 
	sws_freeContext(img_convert_ctx);
	SDL_Quit();
	//av_free(out_buffer);
	av_free(YUV_Frame);
	avcodec_close(Video_CodecContext);
	avformat_close_input(&FormatContext);

	return 0;
}

当时对这个问题的理解不是很深刻,不过上网查了一下资料发现别人FFMPEG高版本但是搭配SDL2.0使用的,于是想着先用SDL2.0试试看。不过后面发现这个报错原因是自己代码写的有点问题(avcodec_receive_frame的返回值判断上写反了导致处理视频帧处没有进入。。。)

在这里插入图片描述

安装SDL2.0:

sudo apt-get install libsdl2-2.0
sudo apt-get install libsdl2-dev
在这里插入图片描述
安装完成后重新编译工程,发现有这么多报错:

在这里插入图片描述
原因是SDL1.2和2.0接口发生了比较大的差异,具体可以参看:https://blog.csdn.net/chaoswiming/article/details/46788707
根据SDL2.0接口重新编码后,还是会有编译报错:
在这里插入图片描述

这种问题一般是和库版本或是编译配置有关,于是我重新下了一份SDL源码,编译了一边,并且在自己过程的makefille里面链上了静态库:
在这里插入图片描述

这个时候会有这种报错:
在这里插入图片描述
看来是编译存在问题,修改makefile:
在这里插入图片描述
最终解决了SDL2.0的编译问题。

最后调试例程如下:

int B_Play_Openfile(const char* szFilePath)
{
	int iRet = 0;
	int iVideo_index = 0;
	char szError[128] = {0};
	unsigned char *out_buffer = NULL;
	AVFormatContext *FormatContext = avformat_alloc_context();  	/* 濯掍綋鏂囦欢鍙ユ焺 */
	AVCodec *Video_Codec = NULL;									/* 瑙g爜鍣ㄥ彞鏌?*/
	AVCodecContext *Video_CodecContext = NULL;   					/* 瑙g爜鍣ㄤ笂涓嬫枃 */
	AVFrame *Video_Frame = NULL;									/* 甯х紦瀛?*/
	AVFrame *YUV_Frame = NULL;										/* YUV甯?*/									
	AVPacket *Video_Packet = NULL;
	int ret, got_picture;

	if (szFilePath == NULL)
	{
		B_LOG("illegal file path\r\n");
		return -1;
	}
	else
	{
		B_LOG(">>>>>>>>>>>>>>>>>>>>>>>>>>>B_Play_Openfile, Path[%s]\r\n", szFilePath);
	}

	/* 鎵撳紑涓€涓緭鍏ユ祦骞惰鍙栧ご閮?*/
	iRet = avformat_open_input(&FormatContext, szFilePath, NULL, NULL);
	if (iRet != 0)
	{
		av_strerror(iRet, szError, sizeof(szError));
		B_LOG("avformat_open_input fail, Ret[%d], Err[%s]\r\n", iRet, szError);
		return iRet;
	}

	/* 璇诲彇濯掍綋鏂囦欢鐨勬暟鎹寘浠ヨ幏鍙栨祦淇℃伅 */
	iRet = avformat_find_stream_info(FormatContext, NULL);
	if (iRet < 0)
	{
		av_strerror(iRet, szError, sizeof(szError));
		B_LOG("avformat_open_input fail, Ret[%d], Err[%s]\r\n", iRet, szError);
		avformat_close_input(&FormatContext);
		return iRet;
	}

	/* 鏌ユ壘瑙嗛鐮佹祦绱㈠紩 */
	iVideo_index = av_find_best_stream(FormatContext, AVMEDIA_TYPE_VIDEO, -1, -1, NULL, 0);
	if (AVERROR_STREAM_NOT_FOUND == iVideo_index)
	{
		/* 鏈壘鍒拌棰戞祦 */
		B_LOG("av_find_best_stream not found video stream\r\n");
		avformat_close_input(&FormatContext);
		return -1;
	}
	else if (AVERROR_DECODER_NOT_FOUND == iVideo_index)
	{
		/* 鎵惧埌瑙嗛娴侊紝浣嗘槸娌℃壘鍒拌В鐮佸櫒 */
		B_LOG("av_find_best_stream streams were found but no decoder\r\n");
		avformat_close_input(&FormatContext);
		return -1;
	}

	/* 鏌ユ壘瑙g爜鍣?*/
	Video_Codec = avcodec_find_decoder(FormatContext->streams[iVideo_index]->codecpar->codec_id);
	if (NULL == Video_Codec)
	{
		B_LOG("avcodec_find_decoder fail\r\n");
		avformat_close_input(&FormatContext);
		return -1;
	}

	/* 鍒涘缓瑙g爜鍣ㄤ笂涓嬫枃(涓篤ideo_CodecContext鐢宠鍐呭瓨绌洪棿) */
	Video_CodecContext = avcodec_alloc_context3(Video_Codec);
	if (NULL == Video_CodecContext)
	{
		B_LOG("avcodec_alloc_context3 fail\r\n");
		avformat_close_input(&FormatContext);
		return -1;
	}

	/* 棰勫垵濮嬪寲瑙g爜鍣ㄤ笂涓嬫枃(浠嶧ormatContext->streams[iVideo_index]->codecpar鑾峰彇瑙g爜鍣ㄧ殑鏈夊叧鍙傛暟,骞跺~鍏呰嚦Video_CodecContext) */
	iRet = avcodec_parameters_to_context(Video_CodecContext, FormatContext->streams[iVideo_index]->codecpar);
	if (iRet < 0)
	{
		av_strerror(iRet, szError, sizeof(szError));
		B_LOG("avcodec_parameters_to_context fail, Ret[%d], Err[%s]\r\n", iRet, szError);
		avformat_close_input(&FormatContext);
		return iRet;
	}

	/* 鎵撳紑瑙g爜鍣ㄤ笂涓嬫枃 */
	iRet = avcodec_open2(Video_CodecContext, Video_Codec, NULL);
	if (iRet != 0)
	{
		av_strerror(iRet, szError, sizeof(szError));
		B_LOG("avcodec_open2 fail, Ret[%d], Err[%s]\r\n", iRet, szError);
		avformat_close_input(&FormatContext);
		return iRet;
	}

	/* 鐢宠甯х紦瀛?*/
	Video_Frame = av_frame_alloc();
	if (NULL == Video_Frame)
	{
		B_LOG("av_frame_alloc fail,\r\n");
		avformat_close_input(&FormatContext);
		return iRet;
	}

	YUV_Frame = av_frame_alloc();
	if (NULL == YUV_Frame)
	{
		B_LOG("av_frame_alloc fail,\r\n");
		avformat_close_input(&FormatContext);
		return iRet;
	}
#ifdef SDL2_0
	int Video_H = 0;												/* 瑙嗛楂?*/
	int Video_W = 0;

	out_buffer=(unsigned char *)av_malloc(av_image_get_buffer_size(AV_PIX_FMT_YUV420P,  Video_CodecContext->width, Video_CodecContext->height,1));
	if (NULL == out_buffer)
	{
		B_LOG("av_malloc fail,\r\n");
		avformat_close_input(&FormatContext);
		return iRet;
	}
	struct SwsContext *img_convert_ctx;

	av_image_fill_arrays(YUV_Frame->data, YUV_Frame->linesize,out_buffer, AV_PIX_FMT_YUV420P,Video_CodecContext->width, Video_CodecContext->height,1);
	Video_Packet = (AVPacket *)av_malloc(sizeof(AVPacket));
	av_dump_format(FormatContext, 0, szFilePath, 0);
	img_convert_ctx = sws_getContext(Video_CodecContext->width, Video_CodecContext->height, Video_CodecContext->pix_fmt, 
		Video_CodecContext->width, Video_CodecContext->height, AV_PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL);
	iRet = SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER);
	if (iRet != 0)
	{
		B_LOG("SDL_Init fail, Ret[%d]\r\n", iRet);
		avformat_close_input(&FormatContext);
		return iRet;
	}
	
	Video_H = Video_CodecContext->width;
	Video_W = Video_CodecContext->height;
	SDL_Window *screen = SDL_CreateWindow("B_Play", SDL_WINDOWPOS_UNDEFINED, SDL_WINDOWPOS_UNDEFINED, Video_W, Video_H, SDL_WINDOW_OPENGL);
	if(!screen) 
	{  
		B_LOG("SDL: could not create window - exiting:%s\n",SDL_GetError());  
		return -1;
	}

	SDL_Renderer *sdlRenderer = SDL_CreateRenderer(screen, -1, 0);  
	SDL_Texture *sdlTexture = SDL_CreateTexture(sdlRenderer, SDL_PIXELFORMAT_IYUV, SDL_TEXTUREACCESS_STREAMING,Video_CodecContext->width,Video_CodecContext->height);  
	
	SDL_Rect sdlRect;
	sdlRect.x=0;
	sdlRect.y=0;
	//sdlRect.w=screen_w;
	//sdlRect.h=screen_h;
 	sdlRect.w=500;
	sdlRect.h=800;

	while(av_read_frame(FormatContext, Video_Packet)>=0){
		if(Video_Packet->stream_index==iVideo_index){
			//iRet = avcodec_decode_video2(Video_CodecContext, Video_Frame, &got_picture, Video_Packet);
			avcodec_send_packet(Video_CodecContext, Video_Packet);
			got_picture = avcodec_receive_frame(Video_CodecContext, Video_Frame);
			if(got_picture){
				sws_scale(img_convert_ctx, (const unsigned char* const*)Video_Frame->data, Video_Frame->linesize, 0, Video_CodecContext->height, YUV_Frame->data, YUV_Frame->linesize);
				SDL_UpdateYUVTexture(sdlTexture, &sdlRect,
				YUV_Frame->data[0], YUV_Frame->linesize[0],
				YUV_Frame->data[1], YUV_Frame->linesize[1],
				YUV_Frame->data[2], YUV_Frame->linesize[2]);
				
				SDL_RenderClear( sdlRenderer );  
				SDL_RenderCopy( sdlRenderer, sdlTexture,  NULL, &sdlRect);  
				SDL_RenderPresent( sdlRenderer );  
				
				SDL_Delay(40);
			}
		}
		av_packet_unref(Video_Packet);
	}

	while (1) {
		//ret = avcodec_decode_video2(Video_CodecContext, Video_Frame, &got_picture, Video_Packet);
		avcodec_send_packet(Video_CodecContext, Video_Packet);
		got_picture = avcodec_receive_frame(Video_CodecContext, Video_Frame);
		if (got_picture)
			break;
		sws_scale(img_convert_ctx, (const unsigned char* const*)Video_Frame->data, Video_Frame->linesize, 0, Video_CodecContext->height, 
			YUV_Frame->data, YUV_Frame->linesize);

		SDL_UpdateTexture( sdlTexture, &sdlRect, YUV_Frame->data[0], YUV_Frame->linesize[0] );  
		SDL_RenderClear( sdlRenderer );  
		SDL_RenderCopy( sdlRenderer, sdlTexture,  NULL, &sdlRect);  
		SDL_RenderPresent( sdlRenderer );  

		SDL_Delay(40);
	}
 
	sws_freeContext(img_convert_ctx);
	SDL_Quit();
	av_frame_free(&YUV_Frame);
	av_frame_free(&Video_Frame);
	avcodec_close(Video_CodecContext);
	avformat_close_input(&FormatContext);
	
#else
	struct SwsContext *img_convert_ctx;
	int Video_H = 0;												/* 瑙嗛楂?*/
	int Video_W = 0;												/* 瑙嗛瀹?*/
	SDL_Surface *screen; 
	SDL_VideoInfo *vi;
	SDL_Overlay *bmp; 
	SDL_Rect rect;
	int got_picture;
	iRet = SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER);
	if (iRet != 0)
	{
		B_LOG("SDL_Init fail, Ret[%d]\r\n", iRet);
		avformat_close_input(&FormatContext);
		return iRet;
	}

	Video_H = Video_CodecContext->width;
	Video_W = Video_CodecContext->height;
	screen = SDL_SetVideoMode(Video_W, Video_H, 0, 0);
	if (screen == NULL)
	{
		B_LOG("SDL_SetVideoMode fail, Ret[%d]\r\n", iRet);
		avformat_close_input(&FormatContext);
		return iRet;
	}
	bmp = SDL_CreateYUVOverlay(Video_CodecContext->width, Video_CodecContext->height,SDL_YV12_OVERLAY, screen); 
 
	rect.x = 0;    
	rect.y = 0;    
	rect.w = Video_W;    
	rect.h = Video_H; 

	Video_Packet = (AVPacket *)av_malloc(sizeof(AVPacket));
	av_dump_format(FormatContext, 0, szFilePath, 0);


	SDL_WM_SetCaption("B_Play",NULL);

	img_convert_ctx = sws_getContext(Video_CodecContext->width, Video_CodecContext->height, Video_CodecContext->pix_fmt, 
			Video_CodecContext->width, Video_CodecContext->height, AV_PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL); 
	B_LOG("1111111");
	while(av_read_frame(FormatContext, Video_Packet)>=0)
	{
		B_LOG("2222222");
		if(Video_Packet->stream_index==iVideo_index)
		{
			//Decode
			avcodec_send_packet(Video_CodecContext, Video_Packet);
			got_picture = avcodec_receive_frame(Video_CodecContext, Video_Frame);

			if(got_picture)
			{
				SDL_LockYUVOverlay(bmp);
				YUV_Frame->data[0]=bmp->pixels[0];
				YUV_Frame->data[1]=bmp->pixels[2];
				YUV_Frame->data[2]=bmp->pixels[1];     
				YUV_Frame->linesize[0]=bmp->pitches[0];
				YUV_Frame->linesize[1]=bmp->pitches[2];   
				YUV_Frame->linesize[2]=bmp->pitches[1];
				sws_scale(img_convert_ctx, (const uint8_t* const*)Video_Frame->data, Video_Frame->linesize, 0, 
					Video_CodecContext->height, YUV_Frame->data, YUV_Frame->linesize);
				SDL_UnlockYUVOverlay(bmp); 

				SDL_DisplayYUVOverlay(bmp, &rect); 
				//Delay 40ms
				SDL_Delay(40);
			}
		}

		av_packet_unref(Video_Packet);
	}

	B_LOG("3333333333\n");
	while (1) 
	{
		B_LOG("4444444444\n");
		avcodec_send_packet(Video_CodecContext, Video_Packet);

		got_picture = avcodec_receive_frame(Video_CodecContext, Video_Frame);
		if (got_picture)
			break;
		sws_scale(img_convert_ctx, (const uint8_t* const*)Video_Frame->data, Video_Frame->linesize, 0, Video_CodecContext->height, YUV_Frame->data, YUV_Frame->linesize);
		SDL_LockYUVOverlay(bmp);
		YUV_Frame->data[0]=bmp->pixels[0];
		YUV_Frame->data[1]=bmp->pixels[2];
		YUV_Frame->data[2]=bmp->pixels[1];     
		YUV_Frame->linesize[0]=bmp->pitches[0];
		YUV_Frame->linesize[1]=bmp->pitches[2];   
		YUV_Frame->linesize[2]=bmp->pitches[1];
 
		SDL_UnlockYUVOverlay(bmp); 
		SDL_DisplayYUVOverlay(bmp, &rect); 
		//Delay 40ms
		SDL_Delay(40);
	}
 
	sws_freeContext(img_convert_ctx);
	SDL_Quit();
	//av_free(out_buffer);
	av_free(YUV_Frame);
	avcodec_close(Video_CodecContext);
	avformat_close_input(&FormatContext);
#endif
	return 0;
}

运行进程时发现出来的窗口是一片绿色的:
在这里插入图片描述
后面检查了一下代码,问题出在avcodec_receive_frame这个接口上面,通过查看注释发现,该接口成功获取到帧后返回值置为0,测试例程这个地方写反了
在这里插入图片描述
后面将if (got_picture)改成if (!got_picture)后视频流可以成功推送到屏幕上,不过示还存在一点问题,明天再研究一下:
在这里插入图片描述

标签:播放器,SDL2.0,Frame,YUV,CodecContext,Video,FFMPEG5.0,SDL,iRet
来源: https://blog.csdn.net/qq_45987579/article/details/120603728