其他分享
首页 > 其他分享> > FFmpeg学习:复用器的使用(录制摄像头和麦克风数据,输出mkv文件)

FFmpeg学习:复用器的使用(录制摄像头和麦克风数据,输出mkv文件)

作者:互联网

介绍

本文在上一篇复用器的使用基础上,将输入文件改为摄像头和麦克风

代码如下:

点击查看代码

#include <iostream>
#define __STDC_CONSTANT_MACROS //它允许C++程序使用C99标准中指定的 stdint.h 宏,而这些宏不在C++标准中。 
//诸如 UINT8_MAX , INT64_MIN 和 INT32_C () 之类的宏可能已在C++应用程序中以其他方式定义

extern "C" {

#include "libavutil/avstring.h"
#include "libavutil/eval.h"
#include "libavutil/mathematics.h"
#include "libavutil/pixdesc.h"
#include "libavutil/imgutils.h"
#include "libavutil/dict.h"
#include "libavutil/parseutils.h"
#include "libavutil/samplefmt.h"
#include "libavutil/avassert.h"
#include "libavutil/time.h"
#include "libavformat/avformat.h"
#include "libavdevice/avdevice.h"
#include "libswscale/swscale.h"
#include "libavutil/opt.h"
#include "libavcodec/avfft.h"
#include "libswresample/swresample.h"

#define SDL_MAIN_HANDLED
#include "SDL.h"
#include "SDL_main.h"
#include "SDL_video.h"
#include "SDL_thread.h"
#if CONFIG_AVFILTER
# include "libavfilter/avfilter.h"
# include "libavfilter/buffersink.h"
# include "libavfilter/buffersrc.h"
#endif
}


#include <string>

//Output YUV420P 
#define OUTPUT_YUV420P 0
//'1' Use Dshow 
//'0' Use VFW
#define USE_DSHOW 1
//Refresh Event
#define SFM_REFRESH_EVENT  (SDL_USEREVENT + 1)

//Show Dshow Device
void show_dshow_device() {
	AVFormatContext* pFormatCtx = avformat_alloc_context();
	AVDictionary* options = NULL;
	av_dict_set(&options, "list_devices", "true", 0); //0表示不区分大小写
	AVInputFormat* iformat = av_find_input_format("dshow");
	printf("========Device Info=============\n");
	avformat_open_input(&pFormatCtx, "video=dummy", iformat, &options);
	printf("================================\n");
	avformat_free_context(pFormatCtx);
}
//Show Dshow Device Option
void show_dshow_device_option() {
	AVFormatContext* pFormatCtx = avformat_alloc_context();
	AVDictionary* options = NULL;
	av_dict_set(&options, "list_options", "true", 0);
	AVInputFormat* iformat = av_find_input_format("dshow");
	printf("========Device Option Info======\n");
	avformat_open_input(&pFormatCtx, "video=Integrated Camera", iformat, &options);
	printf("================================\n");
	avformat_free_context(pFormatCtx);
}

//Show VFW Device
void show_vfw_device() {
	AVFormatContext* pFormatCtx = avformat_alloc_context();
	AVInputFormat* iformat = av_find_input_format("vfwcap");
	printf("========VFW Device Info======\n");
	avformat_open_input(&pFormatCtx, "list", iformat, NULL);
	printf("=============================\n");
	avformat_close_input(&pFormatCtx);
	avformat_free_context(pFormatCtx);
}

#include <string>
int main()
{
	AVOutputFormat* OFormat = NULL;
	AVFormatContext* pvFormatCtx = NULL;//保存视频数据
	AVFormatContext* paFormatCtx = NULL;//保存音频数据
	AVFormatContext* poFormatCtx = NULL;//输出数据上下文
	int videoindex = -1, audioindex = -1;
	int videoindex_out = -1, audioindex_out = -1;
	int frame_index = 0;
	int64_t cur_pts_v = 0, cur_pts_a = 0;
	const char* out_filename = "shuchu.mkv";//输出文件
	const char* audio_filename = "audio.mp3";//输出文件
	const char* video_filename = "video.h264";//输出文件
	AVPacket* pkt;
	int ret, i;

	//pvFormatCtx = avformat_alloc_context();
	//paFormatCtx = avformat_alloc_context();
	//poFormatCtx = avformat_alloc_context();
	//设备初始化
	avdevice_register_all();
	//打开摄像头——视频流	
	AVInputFormat* ifmt = av_find_input_format("dshow");
	AVDictionary* opt_v = NULL;
	av_dict_set(&opt_v, "rtbufsize", "10*1280*720", 0);//设置循环缓冲区大小
	av_dict_set(&opt_v, "video_size", "1280x720", 0);
	av_dict_set(&opt_v, "framerate", "30", 0);
	av_dict_set(&opt_v, "vcodec", "libx264", 0);//指定视频编码格式
	if (avformat_open_input(&pvFormatCtx, "video=Integrated Camera", ifmt, &opt_v) != 0) {
//	if (avformat_open_input(&pvFormatCtx, video_filename, NULL, NULL) != 0) {
		printf("Couldn't open video input stream.\n");
		return -1;
	}
	if (avformat_find_stream_info(pvFormatCtx, NULL) < 0)
	{
		printf("Couldn't find stream information.\n");
		return -1;
	}
	//打开麦克风——音频流
	AVDictionary* opt_a = NULL;
	av_dict_set(&opt_a, "acodec", "libfdk_aac",0);//指定音频编码格式
	const char* audio_name = u8"audio=麦克风 (2- Realtek(R) Audio)";//"audio=麦克风阵列 (英特尔® 智音技术)";//
	if (avformat_open_input(&paFormatCtx, audio_name, ifmt, &opt_a) != 0) {
//	if (avformat_open_input(&paFormatCtx, audio_filename, NULL, NULL) != 0) {
		printf("Couldn't open audio input stream.\n");
		return -1;
	}
	if (avformat_find_stream_info(paFormatCtx, NULL) < 0)
	{
		printf("Couldn't find stream information.\n");
		return -1;
	}
	
	//开始输出
	avformat_alloc_output_context2(&poFormatCtx, NULL, NULL, out_filename);//初始化输出文件
	if (!poFormatCtx) {
		printf("Could not create output context\n");
		return -1;
	}
	OFormat = poFormatCtx->oformat;

	//给输出添加视频流
	for (i = 0; i < pvFormatCtx->nb_streams; i++) {
		//Create output AVStream according to input AVStream
		if (pvFormatCtx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
			/*
			* 步骤:
			* 1、首先根据视频流创建新的输出流(要添加初始化编码器)
			* 2、将原视频流的编码器上下文 复制 给输出流
			*/
			AVStream* in_stream = pvFormatCtx->streams[i];
			AVCodec* in_codec = avcodec_find_decoder(in_stream->codecpar->codec_id);
			AVStream* out_stream = avformat_new_stream(poFormatCtx, in_codec);
			videoindex = i;
			if (!out_stream) {
				printf("Failed allocating output stream\n");
				return -1;
			}
			videoindex_out = out_stream->index;
			//Copy the settings of AVCodecContext
			AVCodecContext* codec_ctx = avcodec_alloc_context3(in_codec);
			ret = avcodec_parameters_to_context(codec_ctx,in_stream->codecpar);
			if (ret < 0) {
				printf("Failed to copy in_stream codecpar to codec context\n");
				return -1;
			}
			codec_ctx->codec_tag = 0;
			if (poFormatCtx->oformat->flags & AVFMT_GLOBALHEADER)
				codec_ctx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;

			ret = avcodec_parameters_from_context(out_stream->codecpar, codec_ctx);
			if (ret < 0) {
				printf("Failed to copy codec context to out_stream codecpar context\n");
				return -1;
			}
		}
	}

	//给输出添加音频流
	for (i = 0; i < paFormatCtx->nb_streams; i++) {
		//Create output AVStream according to input AVStream
		if (paFormatCtx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) {
			AVStream* in_stream = paFormatCtx->streams[i];
			AVCodec* in_codec = avcodec_find_decoder(in_stream->codecpar->codec_id);
			AVStream* out_stream = avformat_new_stream(poFormatCtx, in_codec);
			audioindex = i;
			if (!out_stream) {
				printf("Failed allocating output stream\n");
				return -1;
			}

			audioindex_out = out_stream->index;
			//Copy the settings of AVCodecContext
			AVCodecContext* codec_ctx = avcodec_alloc_context3(in_codec);
			ret = avcodec_parameters_to_context(codec_ctx, in_stream->codecpar);
			if (ret < 0) {
				printf("Failed to copy in_stream codecpar to codec context\n");
				return -1;
			}
			codec_ctx->codec_tag = 0;
			if (poFormatCtx->oformat->flags & AVFMT_GLOBALHEADER)
				codec_ctx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;

			ret = avcodec_parameters_from_context(out_stream->codecpar, codec_ctx);
			if (ret < 0) {
				printf("Failed to copy codec context to out_stream codecpar context\n");
				return -1;
			}
		}
	}
	printf("==========Output Information==========\n");
	av_dump_format(poFormatCtx, 0, out_filename, 1);
	printf("======================================\n");
	//Open output file
	if (!(OFormat->flags & AVFMT_NOFILE)) {
		if (avio_open(&poFormatCtx->pb, out_filename, AVIO_FLAG_WRITE) < 0) {
			printf("Could not open output file '%s'", out_filename);
			return -1;
		}
	}
	//Write file header	
	if (avformat_write_header(poFormatCtx, NULL) < 0) {
		printf("Error occurred when opening output file\n");
		return -1;
	}

	//开始写文件
	int num_packets = 0;
	pkt = av_packet_alloc();
	while (1) {
		AVFormatContext* ifmt_ctx;
		int stream_index = 0;
		AVStream* in_stream, * out_stream;

		//Get an AVPacket
		if (av_compare_ts(cur_pts_v, pvFormatCtx->streams[videoindex]->time_base, cur_pts_a, paFormatCtx->streams[audioindex]->time_base) <= 0) {
			ifmt_ctx = pvFormatCtx;
			stream_index = videoindex_out;

			if (av_read_frame(ifmt_ctx, pkt) >= 0) {
				do {
					in_stream = ifmt_ctx->streams[pkt->stream_index];
					out_stream = poFormatCtx->streams[stream_index];

					if (pkt->stream_index == videoindex) {
						//FIX:No PTS (Example: Raw H.264)
						//Simple Write PTS
						if (pkt->pts == AV_NOPTS_VALUE) {
							//Write PTS
							AVRational time_base1 = in_stream->time_base;
							//Duration between 2 frames (us)
							int64_t calc_duration = (double)AV_TIME_BASE / av_q2d(in_stream->r_frame_rate);
							//Parameters
							pkt->pts = (double)(frame_index * calc_duration) / (double)(av_q2d(time_base1) * AV_TIME_BASE);
							pkt->dts = pkt->pts;
							pkt->duration = (double)calc_duration / (double)(av_q2d(time_base1) * AV_TIME_BASE);
							frame_index++;
						}
						num_packets = num_packets + 1;
						cur_pts_v = pkt->pts;
						break;
					}
				} while (av_read_frame(ifmt_ctx, pkt) >= 0);
			}
			else {
				break;
			}
		}
		else {
			ifmt_ctx = paFormatCtx;
			stream_index = audioindex_out;
			if (av_read_frame(ifmt_ctx, pkt) >= 0) {
				do {
					in_stream = ifmt_ctx->streams[pkt->stream_index];
					out_stream = poFormatCtx->streams[stream_index];

					if (pkt->stream_index == audioindex) {

						//FIX:No PTS
						//Simple Write PTS
						if (pkt->pts == AV_NOPTS_VALUE) {
							//Write PTS
							AVRational time_base1 = in_stream->time_base;
							//Duration between 2 frames (us)
							int64_t calc_duration = (double)AV_TIME_BASE / av_q2d(in_stream->r_frame_rate);
							//Parameters
							pkt->pts = (double)(frame_index * calc_duration) / (double)(av_q2d(time_base1) * AV_TIME_BASE);
							pkt->dts = pkt->pts;
							pkt->duration = (double)calc_duration / (double)(av_q2d(time_base1) * AV_TIME_BASE);
							frame_index++;
						}
						cur_pts_a = pkt->pts;

						break;
					}
				} while (av_read_frame(ifmt_ctx, pkt) >= 0);
			}
			else {
				break;
			}

		}
		//Convert PTS/DTS
		pkt->pts = av_rescale_q_rnd(pkt->pts, in_stream->time_base, out_stream->time_base, (AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
		pkt->dts = av_rescale_q_rnd(pkt->dts, in_stream->time_base, out_stream->time_base, (AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
		pkt->duration = av_rescale_q(pkt->duration, in_stream->time_base, out_stream->time_base);
		pkt->pos = -1;
		pkt->stream_index = stream_index;

		printf("Write 1 Packet. size:%5d\tpts:%lld\n", pkt->size, pkt->pts);
		//Write
		if (av_interleaved_write_frame(poFormatCtx, pkt) < 0) {
			printf("Error muxing packet\n");
			break;
		}
		
		if (num_packets > 300) {
			printf("录制完毕");
			break;
		}
	}
	av_packet_free(&pkt);
	//Write file trailer
	av_write_trailer(poFormatCtx);

	avformat_close_input(&pvFormatCtx);
	avformat_close_input(&paFormatCtx);
	/* close output */
	if (poFormatCtx && !(OFormat->flags & AVFMT_NOFILE))
		avio_close(poFormatCtx->pb);
	avformat_free_context(poFormatCtx);
	if (ret < 0 && ret != AVERROR_EOF) {
		printf("Error occurred.\n");
		return -1;
	}

	//avformat_close_input(&poFormatCtx);//关闭之后就不用free了
	//avformat_close_input(&pvFormatCtx);//关闭之后就不用free了
	//avformat_close_input(&paFormatCtx);//关闭之后就不用free了
	return 0;
}

存在问题

已解决问题

【问题一:avformat_write_header时报错】

Could not find tag for codec pcm_s16le in stream #1, codec not currently supported in container 

解决方法:将输出的文件格式由mp4改为mkv;

【问题二:时间戳对不上】
自带播放器解码有问题,推荐使用 VLC。

标签:复用器,pkt,stream,avformat,mkv,codec,av,include,FFmpeg
来源: https://www.cnblogs.com/zjacky/p/16496823.html