其他分享
首页 > 其他分享> > 王纲QSV硬编码

王纲QSV硬编码

作者:互联网

#ifndef PCH_H
#define PCH_H
extern "C"
{
    #include "libavutil/opt.h"
    #include "libavutil/channel_layout.h"
    #include "libavutil/common.h"
    #include "libavutil/imgutils.h"
    #include "libavutil/mathematics.h"
    #include "libavutil/samplefmt.h"
    #include "libavutil/time.h"
    #include "libavutil/fifo.h"
    #include "libavcodec/avcodec.h"
    #include "libavcodec/qsv.h"
    #include "libavformat/avformat.h"
//    #include "libavformat/url.h"
    #include "libavformat/avio.h"
//    #include "libavfilter/avcodec.h"
//    #include "libavfilter/avfiltergraph.h"
    #include "libavfilter/avfilter.h"
    #include "libavfilter/buffersink.h"
    #include "libavfilter/buffersrc.h"
    #include "libswscale/swscale.h"
    #include "libswresample/swresample.h"
    #include "libavdevice/avdevice.h"
}
#endif
pch.h
// Transcode.cpp : 定义控制台应用程序的入口点。
//
/* Copyright [c] 2018-2028 By www.chungen90.com Allrights Reserved 
   This file give a simple example of encoding with qsv.   
   Any questions, you can join QQ group for
   help, QQ  Group number:127903734 or 766718184.
*/
//#include "stdafx.h"

#include "pch.h"
#include <string>
#include <iostream>
#include <memory>
#include <iostream>
#include<fstream>
#include <Winsock2.h> 
#include <Windows.h>

using namespace std;
AVFormatContext * context = NULL;
AVFormatContext* outputContext;
int64_t  lastPts = 0;
int64_t  lastDts = 0;
int64_t lastFrameRealtime = 0;

int64_t firstPts = AV_NOPTS_VALUE;
int64_t startTime = 0;

AVCodecContext*    outPutEncContext = NULL;
AVCodecContext *decoderContext = NULL;
#define SrcWidth 1920
#define SrcHeight 1080
#define DstWidth 1280
#define DstHeight 720

struct SwsContext* pSwsContext;
class SwsScaleContext
{
public:
    SwsScaleContext()
    {
    
    }
    void SetSrcResolution(int width , int height)
    {
        srcWidth = width;
        srcHeight = height;
    }

    void SetDstResolution(int width , int height)
    {
        dstWidth = width;
        dstHeight = height;
    }
    void SetFormat(AVPixelFormat iformat, AVPixelFormat oformat)
    {
        this->iformat = iformat;
        this->oformat = oformat;
    }
public:
    int srcWidth;
    int srcHeight;
    int dstWidth;
    int dstHeight;
    AVPixelFormat iformat;
    AVPixelFormat oformat;    
};

int interrupt_cb(void *ctx)
{
    return 0;
}


void Init()
{
    av_register_all();
    avfilter_register_all();
    avformat_network_init();
    avdevice_register_all();
    av_log_set_level(AV_LOG_ERROR);
}

int OpenInput(char *fileName)
{
     context = avformat_alloc_context();
     context->interrupt_callback.callback = interrupt_cb;
     AVInputFormat *ifmt=NULL;
     AVDictionary *format_opts =  NULL;

    int ret = avformat_open_input(&context, fileName, ifmt, &format_opts);
    if(ret < 0)
    {
        return  ret;
    }
    ret = avformat_find_stream_info(context,NULL);
    av_dump_format(context, 0, fileName, 0);
    if(ret >= 0) 
    {
        std::cout <<"open input stream successfully" << endl;
    }
    return ret;
}



AVPacket *ReadPacketFromSource()
{
    //AVPacket> packet(static_cast<AVPacket*>(av_malloc(sizeof(AVPacket))), [&](AVPacket *p) { av_packet_free(&p); av_freep(&p); });
    AVPacket *packet = (AVPacket*)av_malloc(sizeof(AVPacket));
    av_init_packet(packet);
    int ret = av_read_frame(context, packet);
    if(ret >= 0)
    {        
        return packet;
    }
    else
    {
        av_packet_free(&packet);
        return NULL;
    }
}

void CloseInput()
{
    if(context != NULL)
    {
        avformat_close_input(&context);
    }
}

int OpenOutput(char *fileName)
{
    int ret = 0;
     ret  = avformat_alloc_output_context2(&outputContext, NULL, "mpegts", fileName);
    if(ret < 0)
    {
        goto Error;
    }
    ret = avio_open2(&outputContext->pb, fileName, AVIO_FLAG_READ_WRITE,NULL, NULL);    
    if(ret < 0)
    {
        goto Error;
    }

    for(int i = 0; i < context->nb_streams; i++)
    {
        AVStream * stream = avformat_new_stream(outputContext, outPutEncContext->codec);
        stream->codec = outPutEncContext;
        if(ret < 0)
        {
            goto Error;
        }
    }
     av_dump_format(outputContext, 0, fileName, 1);
     ret = avformat_write_header(outputContext, NULL);
    if(ret < 0)
    {
        goto Error;
    }
    if(ret >= 0)
    cout <<"open output stream successfully" << endl;
    return ret ;
Error:
    if(outputContext)
    {
        avformat_close_input(&outputContext);
    }
    return ret ;
}

void CloseOutput()
{
    if(outputContext != NULL)
    {
        for(int i = 0 ; i < outputContext->nb_streams; i++)
        {
            AVCodecContext *codecContext = outputContext->streams[i]->codec;
            avcodec_close(codecContext);
        }
        //avformat_close_input(&outputContext);
    }
}

int InitEncoderCodec( int iWidth, int iHeight)
{
    AVCodec *  pH264Codec = avcodec_find_encoder_by_name("h264_qsv");
    if(NULL == pH264Codec)
    {
        printf("%s", "avcodec_find_encoder failed");
        return  -1;
    }
    outPutEncContext = avcodec_alloc_context3(pH264Codec);
    outPutEncContext->gop_size = 30;
    //outPutEncContext->framerate = 30;
    outPutEncContext->has_b_frames = 0;
    outPutEncContext->max_b_frames = 0;
    //outPutEncContext = avcodec_alloc_context3(pH264Codec);
    outPutEncContext->codec_id = pH264Codec->id;
    outPutEncContext->time_base.num =1;
    outPutEncContext->time_base.den = 25;
    outPutEncContext->pix_fmt            = *pH264Codec->pix_fmts;
    outPutEncContext->width              =  iWidth;
    outPutEncContext->height             = iHeight;

    AVDictionary *options = NULL;
    outPutEncContext->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
    
    av_opt_set(outPutEncContext->priv_data,"async_depth","1",0);
    av_opt_set(outPutEncContext->priv_data,"max_dec_frame_buffering","1",0);
    av_opt_set(outPutEncContext->priv_data,"look_ahead","0",0);

    int ret = avcodec_open2(outPutEncContext, pH264Codec, &options);
    AVQSVContext *qsv = (AVQSVContext *)outPutEncContext->hwaccel_context;
    if (ret < 0)
    {
        printf("%s", "open codec failed");
        return  ret;
    }
    return 1;
}

int InitDecodeCodec(AVCodecID codecId)
{
    AVCodec *codec = avcodec_find_decoder(codecId);
    if(!codec)
    {
        return -1;
    }
    decoderContext = context->streams[0]->codec;
    if (!decoderContext) {
        fprintf(stderr, "Could not allocate video codec context\n");
        exit(1);
    }

    if (codec->capabilities & AV_CODEC_CAP_TRUNCATED)
        decoderContext->flags |= AV_CODEC_FLAG_TRUNCATED; // we do not send complete frames

    int ret = avcodec_open2(decoderContext, codec, NULL);
    return ret;

}

bool DecodeVideo(AVPacket* packet, AVFrame* frame)
{
    int gotFrame = 0;
    int hr = avcodec_decode_video2(decoderContext, frame, &gotFrame, packet);
    if(hr >= 0 && gotFrame != 0)
    {
        return true;
    }
    return false;
}

int InitSwsContext(struct SwsContext** pSwsContext,SwsScaleContext *swsScaleContext)
{
    *pSwsContext = sws_getContext(swsScaleContext->srcWidth, swsScaleContext->srcHeight, swsScaleContext->iformat,
    swsScaleContext->dstWidth, swsScaleContext->dstHeight, swsScaleContext->oformat,
                                     SWS_POINT,
                                     NULL, NULL, NULL);
        if(pSwsContext == NULL)
        {
            return 0;
        }
        return 1;
}
int InitVideoFrame(AVFrame *frame,AVCodecContext* codecContext)
{
    frame->format = codecContext->pix_fmt;
    frame->width  = codecContext->width;
    frame->height = codecContext->height;
    return av_image_alloc(frame->data, frame->linesize, codecContext->width, codecContext->height,
                         codecContext->pix_fmt, 32);
}
int InitSwsFrame(AVFrame *pSwsFrame,int iWidth, int iHeight)
{
    int numBytes=av_image_get_buffer_size(outPutEncContext->pix_fmt, iWidth, iHeight, 1);
    uint8_t * pSwpBuffer=(uint8_t *)malloc(numBytes*sizeof(uint8_t));
    av_image_fill_arrays(pSwsFrame->data, pSwsFrame->linesize, pSwpBuffer, outPutEncContext->pix_fmt, iWidth, iHeight, 1);
    pSwsFrame->width = iWidth;
    pSwsFrame->height = iHeight;
    pSwsFrame->format = outPutEncContext->pix_fmt;
    return 1;
}


int main(int argc, char* argv[])
{
    string fileInput="D:\\test1.ts";

    string fileOutput = "D:\\test-hardTranscode.ts";//"
    int64_t count = 0;

    Init();
    if(OpenInput((char *)fileInput.c_str()) < 0)
    {
        cout << "Open file Input failed!" << endl;
        Sleep(10000);
        return 0;
    }

    int ret = InitDecodeCodec(context->streams[0]->codecpar->codec_id);
    if(ret <0)
    {
        cout << "InitDecodeCodec failed!" << endl;
        Sleep(10000);
        return 0;
    }

    
    ret = InitEncoderCodec(DstWidth,DstHeight);
    if(ret < 0)
    {
        cout << "open eccoder failed ret is " << ret<<endl;
        cout << "InitEncoderCodec failed!" << endl;
        Sleep(10000);
        return 0;
    }

    if(OpenOutput((char *)fileOutput.c_str()) < 0)
    {
        cout << "Open file Output failed!" << endl;
        Sleep(10000);
        return 0;
    }
    string rtmpAddr = fileOutput ;
    cout <<"rtmp play adress"<<rtmpAddr <<endl;
    double timebase = av_q2d(context->streams[0]->time_base);
    AVStream *in_stream = context->streams[0];
    AVStream *out_stream = outputContext->streams[0];

    AVFrame *pSrcFrame = av_frame_alloc();
    int got_output = 0;
    InitVideoFrame(pSrcFrame,outPutEncContext);

    AVFrame * pSwsFrame = av_frame_alloc();
    InitSwsFrame(pSwsFrame,context->streams[0]->codec->width,context->streams[0]->codec->height);
    SwsScaleContext swsScaleContext;
    swsScaleContext.SetSrcResolution(context->streams[0]->codec->width, context->streams[0]->codec->height);

    swsScaleContext.SetDstResolution(context->streams[0]->codec->width, context->streams[0]->codec->height);
    swsScaleContext.SetFormat(context->streams[0]->codec->pix_fmt,outPutEncContext->pix_fmt);
    InitSwsContext(&pSwsContext,&swsScaleContext);
    int64_t  timeRecord = 0;
    int64_t  firstPacketTime = 0;
    int64_t outLastTime = av_gettime();
    int64_t inLastTime = av_gettime();
    int64_t videoCount = 0;

        while(true)
        {
            outLastTime = av_gettime();
            AVPacket *packet = ReadPacketFromSource();
            
            if(packet)
            {
                if(DecodeVideo(packet,pSrcFrame))
                {
                    sws_scale(pSwsContext, (const uint8_t *const *)pSrcFrame->data,
                  pSrcFrame->linesize, 0,context->streams[0]->codec->height, (uint8_t *const *)pSwsFrame->data, pSwsFrame->linesize);
                    AVPacket *pTmpPkt = (AVPacket *)av_malloc(sizeof(AVPacket));
                    av_init_packet(pTmpPkt);
                    pTmpPkt->data = NULL;
                    pTmpPkt->size = 0;                    
                    ret = avcodec_encode_video2(outPutEncContext, pTmpPkt, pSwsFrame, &got_output);                    
                    if(ret >= 0 && got_output)
                    {                        
                        pTmpPkt->pts = pTmpPkt->dts = 3600 * count++;                        
                        int ret = av_write_frame(outputContext, pTmpPkt);                            
                        
                    }
                    av_packet_free(&pTmpPkt);
                }
                av_packet_free(&packet);
            }
            else break;            
        }
        
        CloseInput();
        CloseOutput();
        std::cout <<"Transcode file end!" << endl;
    

    while(true)
    {
        Sleep(10000);
    }
    
    return 0;
}
main.cpp

 

标签:编码,int,ret,王纲,QSV,context,av,include,outPutEncContext
来源: https://www.cnblogs.com/zeliangzhang/p/15821705.html