编程语言
首页 > 编程语言> > 【六】Android MediaPlayer整体架构源码分析 -【start请求播放处理流程】【Part 6】【02】

【六】Android MediaPlayer整体架构源码分析 -【start请求播放处理流程】【Part 6】【02】

作者:互联网

承接上一章节分析:【六】Android MediaPlayer整体架构源码分析 -【start请求播放处理流程】【Part 6】【01】
本系列文章分析的安卓源码版本:【Android 10.0 版本】

【此章节小节编号就接着上一章节排列】
5.1、GetVideoCodingTypeFromMime(mime, &compressionFormat)实现分析:
根据mime格式获取对应的OMX支持的视频编码类型枚举

// [frameworks/av/media/libstagefright/ACodec.cpp]
status_t ACodec::GetVideoCodingTypeFromMime(
        const char *mime, OMX_VIDEO_CODINGTYPE *codingType) {
    // 其实也就是对 kVideoCodingMapEntry 数组mime映射信息进行匹配转换为OMX识别的枚举类型
    // 如举例的AVC将会返回OMX_VIDEO_CodingAVC枚举
    for (size_t i = 0;
         i < sizeof(kVideoCodingMapEntry) / sizeof(kVideoCodingMapEntry[0]);
         ++i) {
        if (!strcasecmp(mime, kVideoCodingMapEntry[i].mMime)) {
        	// 相同时记录返回
            *codingType = kVideoCodingMapEntry[i].mVideoCodingType;
            return OK;
        }
    }

    *codingType = OMX_VIDEO_CodingUnused;

    return ERROR_UNSUPPORTED;
}

VideoCodingMapEntry 结构及其数组声明定义:

// [frameworks/av/media/libstagefright/ACodec.cpp]
static const struct VideoCodingMapEntry {
    const char *mMime;
    OMX_VIDEO_CODINGTYPE mVideoCodingType;
} kVideoCodingMapEntry[] = {
    { MEDIA_MIMETYPE_VIDEO_AVC, OMX_VIDEO_CodingAVC },
    { MEDIA_MIMETYPE_VIDEO_HEVC, OMX_VIDEO_CodingHEVC },
    { MEDIA_MIMETYPE_VIDEO_MPEG4, OMX_VIDEO_CodingMPEG4 },
    { MEDIA_MIMETYPE_VIDEO_H263, OMX_VIDEO_CodingH263 },
    { MEDIA_MIMETYPE_VIDEO_MPEG2, OMX_VIDEO_CodingMPEG2 },
    { MEDIA_MIMETYPE_VIDEO_VP8, OMX_VIDEO_CodingVP8 },
    { MEDIA_MIMETYPE_VIDEO_VP9, OMX_VIDEO_CodingVP9 },
    { MEDIA_MIMETYPE_VIDEO_DOLBY_VISION, OMX_VIDEO_CodingDolbyVision },
    { MEDIA_MIMETYPE_IMAGE_ANDROID_HEIC, OMX_VIDEO_CodingImageHEIC },
};

5.2、setVideoPortFormatType(kPortIndexInput, compressionFormat, OMX_COLOR_FormatUnused)实现分析:
设置视频输入buffer端口格式类型信息。注意该方法最后一个参数 usingNativeBuffers 默认为false。
备注:举例AVC时compressionFormat为OMX_VIDEO_CodingAVC枚举

// [frameworks/av/media/libstagefright/ACodec.cpp]
status_t ACodec::setVideoPortFormatType(
        OMX_U32 portIndex,
        OMX_VIDEO_CODINGTYPE compressionFormat,
        OMX_COLOR_FORMATTYPE colorFormat,
        bool usingNativeBuffers) {
    // 创建该OMX参数对象
    OMX_VIDEO_PARAM_PORTFORMATTYPE format;
    // 初始化
    InitOMXParams(&format);
    // 赋值
    format.nPortIndex = portIndex;
    format.nIndex = 0;
    bool found = false;
	
	// 枚举kMaxIndicesToCheck = 32, // used when enumerating supported formats and profiles
    for (OMX_U32 index = 0; index <= kMaxIndicesToCheck; ++index) {
    	// 循环尝试获取底层具体组件(如h264解码器SoftAVCDec)支持的参数,若失败则返回错误
    	// 举例分析SoftAVCDec的该处理工作结果:
    	// 该组件实现只允许此循环执行一次,即判断format.nIndex的值为0时才执行,非0返回错误,
    	// 处理结果将是:
    	// format->eCompressionFormat = mCodingType; format->eColorFormat = OMX_COLOR_FormatUnused;
        format.nIndex = index;
        status_t err = mOMXNode->getParameter(
                OMX_IndexParamVideoPortFormat,
                &format, sizeof(format));

        if (err != OK) {
            return err;
        }
        // 获取成功时

        // substitute back flexible color format to codec supported format
        OMX_U32 flexibleEquivalent;
        // 编码格式不确定时灵活替代类型,也就是做一个转换
        // IsFlexibleColorFormat() 该方法见下面分析
        if (compressionFormat == OMX_VIDEO_CodingUnused
                && IsFlexibleColorFormat(
                        mOMXNode, format.eColorFormat, usingNativeBuffers, &flexibleEquivalent)
                && colorFormat == flexibleEquivalent) {
            ALOGI("[%s] using color format %#x in place of %#x",
                    mComponentName.c_str(), format.eColorFormat, colorFormat);
            // 可以替代色彩空间类型时,将直接采用底层色彩空间类型
            colorFormat = format.eColorFormat;
        }

        // The following assertion is violated by TI's video decoder.
        // CHECK_EQ(format.nIndex, index);
		
		// 该解码器不分析
        if (!strcmp("OMX.TI.Video.encoder", mComponentName.c_str())) {
            if (portIndex == kPortIndexInput
                    && colorFormat == format.eColorFormat) {
                // eCompressionFormat does not seem right.
                found = true;
                break;
            }
            if (portIndex == kPortIndexOutput
                    && compressionFormat == format.eCompressionFormat) {
                // eColorFormat does not seem right.
                found = true;
                break;
            }
        }

        if (format.eCompressionFormat == compressionFormat
            && format.eColorFormat == colorFormat) {
            // 只有这两个值都相同时才算找到了,而如上面SoftAVCDec解码器时
            // compressionFormat为OMX_VIDEO_CodingAVC枚举,colorFormat为OMX_COLOR_FormatUnused
            // 因此刚好对应上面获取到的格式信息
            found = true;
            break;
        }

        if (index == kMaxIndicesToCheck) {
            ALOGW("[%s] stopping checking formats after %u: %s(%x)/%s(%x)",
                    mComponentName.c_str(), index,
                    asString(format.eCompressionFormat), format.eCompressionFormat,
                    asString(format.eColorFormat), format.eColorFormat);
        }
    }

    if (!found) {
        return UNKNOWN_ERROR;
    }
	
	// 找到之后设置该参数信息给组件
	// 举例分析SoftAVCDec的该处理工作结果:
	// 其实际该组件只是判断是否和传入的format的视频格式和色彩空间模式是否一致,
	// 根据上面分析肯定一致即相当于啥事不干,因此返回成功,否则返回失败状态。
    status_t err = mOMXNode->setParameter(
            OMX_IndexParamVideoPortFormat, &format, sizeof(format));

    return err;
}

IsFlexibleColorFormat()实现分析:
编码格式不确定时灵活替代类型,也就是做一个转换

// [frameworks/av/media/libstagefright/omx/OMXUtils.cpp]
// static
bool IsFlexibleColorFormat(
         const sp<IOMXNode> &omxNode,
         uint32_t colorFormat, bool usingNativeBuffers, OMX_U32 *flexibleEquivalent) {
    // 创建该OMX参数对象     	
    DescribeColorFormat2Params describeParams;
    InitOMXParams(&describeParams);
    describeParams.eColorFormat = (OMX_COLOR_FORMATTYPE)colorFormat;
    // 合理的模拟数据
    // reasonable dummy values
    describeParams.nFrameWidth = 128;
    describeParams.nFrameHeight = 128;
    describeParams.nStride = 128;
    describeParams.nSliceHeight = 128;
    // 举例SoftAVCDec时该值为false
    describeParams.bUsingNativeBuffers = (OMX_BOOL)usingNativeBuffers;

    CHECK(flexibleEquivalent != NULL);

    if (!DescribeColorFormat(omxNode, describeParams)) {
        return false;
    }

    const MediaImage2 &img = describeParams.sMediaImage;
    if (img.mType == MediaImage2::MEDIA_IMAGE_TYPE_YUV) {
    	// 如下分析,YUV420 色彩空间模式时,检查像素平面信息
        if (img.mNumPlanes != 3
                || img.mPlane[img.Y].mHorizSubsampling != 1
                || img.mPlane[img.Y].mVertSubsampling != 1) {
            return false;
        }

        // YUV 420
        if (img.mPlane[img.U].mHorizSubsampling == 2
                && img.mPlane[img.U].mVertSubsampling == 2
                && img.mPlane[img.V].mHorizSubsampling == 2
                && img.mPlane[img.V].mVertSubsampling == 2) {
            // possible flexible YUV420 format
            if (img.mBitDepth <= 8) {
            	// 条件都成立时,才返回true并赋值
            	// 其实也就是说:若是这情况下时,代码底层组件可以使用替代色彩空间模式类完成,只要是属于任何的YUV420类型格式都行
               *flexibleEquivalent = OMX_COLOR_FormatYUV420Flexible;
               return true;
            }
        }
    }
    return false;
}

DescribeColorFormat(omxNode, describeParams)实现:

// [frameworks/av/media/libstagefright/omx/OMXUtils.cpp]
bool DescribeColorFormat(
        const sp<IOMXNode> &omxNode,
        DescribeColorFormat2Params &describeParams)
{
	// 举例分析SoftAVCDec组件实现,下面两个扩展数据类型都不支持,因此暂不分析其处理
    OMX_INDEXTYPE describeColorFormatIndex;
    if (omxNode->getExtensionIndex(
            "OMX.google.android.index.describeColorFormat",
            &describeColorFormatIndex) == OK) {
        DescribeColorFormatParams describeParamsV1(describeParams);
        if (omxNode->getParameter(
                describeColorFormatIndex,
                &describeParamsV1, sizeof(describeParamsV1)) == OK) {
            describeParams.initFromV1(describeParamsV1);
            return describeParams.sMediaImage.mType != MediaImage2::MEDIA_IMAGE_TYPE_UNKNOWN;
        }
    } else if (omxNode->getExtensionIndex(
            "OMX.google.android.index.describeColorFormat2", &describeColorFormatIndex) == OK
               && omxNode->getParameter(
                       describeColorFormatIndex, &describeParams, sizeof(describeParams)) == OK) {
        return describeParams.sMediaImage.mType != MediaImage2::MEDIA_IMAGE_TYPE_UNKNOWN;
    }

	// 如下
    return DescribeDefaultColorFormat(describeParams);
}

DescribeDefaultColorFormat(describeParams)实现分析:
概述知识:
YUV 格式通常有两大类:打包(packed)格式和平面(planar)格式。
前者将 YUV 分量存放在同一个数组中,通常是几个相邻的像素组成一个宏像素(macro-pixel);
而后者使用三个数组分开存放 YUV 三个分量,就像 是一个三维平面一样。
YUV420、YUV420P和YUV420SP区别
YUV420: 即打包格式的YUV420
YUV420P: 即YUV420 Planar,Y\U\V数据是分开存放的
YUV420SP:即YUV420 semi planar, 这个格式的数据量跟YUV420 Planar的一样,但是U、V是交叉存放的

// [frameworks/av/media/libstagefright/omx/OMXUtils.cpp]
bool DescribeDefaultColorFormat(DescribeColorFormat2Params &params) {
	// 该对象其实是描述媒体一帧视频或图形信息对象
    MediaImage2 &image = params.sMediaImage;
    // 初始化所有内存空间为0
    memset(&image, 0, sizeof(image));
	
    image.mType = MediaImage2::MEDIA_IMAGE_TYPE_UNKNOWN;
    // 帧平面数量
    image.mNumPlanes = 0;

    const OMX_COLOR_FORMATTYPE fmt = params.eColorFormat;
    // 最大平面宽高即视频帧宽高
    image.mWidth = params.nFrameWidth;
    image.mHeight = params.nFrameHeight;

    // 如注释:该处理目前只支持YVU420格式的色彩空间类型
    // only supporting YUV420
    if (fmt != OMX_COLOR_FormatYUV420Planar &&
        fmt != OMX_COLOR_FormatYUV420PackedPlanar &&
        fmt != OMX_COLOR_FormatYUV420SemiPlanar &&
        fmt != OMX_COLOR_FormatYUV420PackedSemiPlanar &&
        fmt != (OMX_COLOR_FORMATTYPE)HAL_PIXEL_FORMAT_YV12) {
        ALOGW("do not know color format 0x%x = %d", fmt, fmt);
        if (fmt == OMX_COLOR_FormatYUV420Planar16) {
            ALOGW("Cannot describe color format OMX_COLOR_FormatYUV420Planar16");
        }
        return false;
    }

    // 兼容处理步幅和片高
    // TEMPORARY FIX for some vendors that advertise sliceHeight as 0
    if (params.nStride != 0 && params.nSliceHeight == 0) {
        ALOGW("using sliceHeight=%u instead of what codec advertised (=0)",
                params.nFrameHeight);
        params.nSliceHeight = params.nFrameHeight;
    }

    // 检查数据有效性,及其避免整型溢出问题
    // we need stride and slice-height to be non-zero and sensible. These values were chosen to
    // prevent integer overflows further down the line, and do not indicate support for
    // 32kx32k video.
    if (params.nStride == 0 || params.nSliceHeight == 0
            || params.nStride > 32768 || params.nSliceHeight > 32768) {
        ALOGW("cannot describe color format 0x%x = %d with stride=%u and sliceHeight=%u",
                fmt, fmt, params.nStride, params.nSliceHeight);
        return false;
    }

    // 初始化YUV格式信息
    // set-up YUV format
    image.mType = MediaImage2::MEDIA_IMAGE_TYPE_YUV;
    // 帧平面数量
    image.mNumPlanes = 3;
    // 位深(也就是MSB宏块大小)
    image.mBitDepth = 8;
    // 组件分配位深
    image.mBitDepthAllocated = 8;
    // 初始化Y分量信息
    // 平面第一个像素的偏移量(字节单位),实际就是Y分量像素数据开始读取位置
    image.mPlane[image.Y].mOffset = 0;
    // 平面列增量(字节数)
    image.mPlane[image.Y].mColInc = 1;
     // 平面行增量(字节数)【即等于步幅】
    image.mPlane[image.Y].mRowInc = params.nStride;
    // 与最大平面相比的水平子采样
    image.mPlane[image.Y].mHorizSubsampling = 1;
    // 与最大平面相比的垂直子采样
    image.mPlane[image.Y].mVertSubsampling = 1;

    switch ((int)fmt) {
        case HAL_PIXEL_FORMAT_YV12:
        	// 在上面分析流程中可知,举例SoftAVCDec组件时该参数为false
            if (params.bUsingNativeBuffers) {
            	// true时
            	// YUV分量步幅大小16位对齐
                size_t ystride = align(params.nStride, 16);
                // UV分量时需将步幅减半处理
                size_t cstride = align(params.nStride / 2, 16);
                // 然后更新YUV分量对应字段值
                image.mPlane[image.Y].mRowInc = ystride;

                image.mPlane[image.V].mOffset = ystride * params.nSliceHeight;
                image.mPlane[image.V].mColInc = 1;
                image.mPlane[image.V].mRowInc = cstride;
                image.mPlane[image.V].mHorizSubsampling = 2;
                image.mPlane[image.V].mVertSubsampling = 2;

                image.mPlane[image.U].mOffset = image.mPlane[image.V].mOffset
                        + (cstride * params.nSliceHeight / 2);
                image.mPlane[image.U].mColInc = 1;
                image.mPlane[image.U].mRowInc = cstride;
                image.mPlane[image.U].mHorizSubsampling = 2;
                image.mPlane[image.U].mVertSubsampling = 2;
                break;
            } else {
                // fall through as YV12 is used for YUV420Planar by some codecs
                FALLTHROUGH_INTENDED;
            }

        case OMX_COLOR_FormatYUV420Planar:
        case OMX_COLOR_FormatYUV420PackedPlanar:
        	// 计算更新YUV分量对应字段值
            image.mPlane[image.U].mOffset = params.nStride * params.nSliceHeight;
            image.mPlane[image.U].mColInc = 1;
            image.mPlane[image.U].mRowInc = params.nStride / 2;
            image.mPlane[image.U].mHorizSubsampling = 2;
            image.mPlane[image.U].mVertSubsampling = 2;

            image.mPlane[image.V].mOffset = image.mPlane[image.U].mOffset
                    + (params.nStride * params.nSliceHeight / 4);
            image.mPlane[image.V].mColInc = 1;
            image.mPlane[image.V].mRowInc = params.nStride / 2;
            image.mPlane[image.V].mHorizSubsampling = 2;
            image.mPlane[image.V].mVertSubsampling = 2;
            break;

        case OMX_COLOR_FormatYUV420SemiPlanar:
            // FIXME: NV21 for sw-encoder, NV12 for decoder and hw-encoder
        case OMX_COLOR_FormatYUV420PackedSemiPlanar:
            // NV12
            image.mPlane[image.U].mOffset = params.nStride * params.nSliceHeight;
            image.mPlane[image.U].mColInc = 2;
            image.mPlane[image.U].mRowInc = params.nStride;
            image.mPlane[image.U].mHorizSubsampling = 2;
            image.mPlane[image.U].mVertSubsampling = 2;

            image.mPlane[image.V].mOffset = image.mPlane[image.U].mOffset + 1;
            image.mPlane[image.V].mColInc = 2;
            image.mPlane[image.V].mRowInc = params.nStride;
            image.mPlane[image.V].mHorizSubsampling = 2;
            image.mPlane[image.V].mVertSubsampling = 2;
            break;

        default:
            TRESPASS();
    }
    // 备注:TODO 关于该部分帧像素YUV分量参数信息计算后续再考虑详细分析,此处也就是不同的色彩空间类型不同对应处理YUV帧像素分布信息
    return true;
}

5.3、setSupportedOutputFormat(!haveNativeWindow /* getLegacyFlexibleFormat */)实现分析:
可替代灵活格式处理流程。设置最佳输出格式信息
备注:举例SoftAVCDec组件时 getLegacyFlexibleFormat 该参数为true

// [frameworks/av/media/libstagefright/ACodec.cpp]

// Set optimal output format. OMX component lists output formats in the order
// of preference, but this got more complicated since the introduction of flexible
// YUV formats. We support a legacy behavior for applications that do not use
// surface output, do not specify an output format, but expect a "usable" standard
// OMX format. SW readable and standard formats must be flex-YUV.
//
// Suggested preference order:
// - optimal format for texture rendering (mediaplayer behavior)
// - optimal SW readable & texture renderable format (flex-YUV support)
// - optimal SW readable non-renderable format (flex-YUV bytebuffer support)
// - legacy "usable" standard formats
//
// For legacy support, we prefer a standard format, but will settle for a SW readable
// flex-YUV format.
status_t ACodec::setSupportedOutputFormat(bool getLegacyFlexibleFormat) {
	// 创建该OMX参数对象并初始化
    OMX_VIDEO_PARAM_PORTFORMATTYPE format, legacyFormat;
    InitOMXParams(&format);
    // 输出buffer端口索引类型
    format.nPortIndex = kPortIndexOutput;

    // 初始化
    InitOMXParams(&legacyFormat);
    // this field will change when we find a suitable legacy format
    legacyFormat.eColorFormat = OMX_COLOR_FormatUnused;

    // 循环查找一个合适的"遗留"色彩空间格式
    for (OMX_U32 index = 0; ; ++index) {
        format.nIndex = index;
        // 将会请求底层组件该信息
        // 举例SoftAVCDec组件实现结果为:
        // format->eCompressionFormat = OMX_VIDEO_CodingUnused; format->eColorFormat = OMX_COLOR_FormatYUV420Planar;
        status_t err = mOMXNode->getParameter(
                OMX_IndexParamVideoPortFormat, &format, sizeof(format));
        if (err != OK) {
            // no more formats, pick legacy format if found
            if (legacyFormat.eColorFormat != OMX_COLOR_FormatUnused) {
                 memcpy(&format, &legacyFormat, sizeof(format));
                 break;
            }
            return err;
        }
        if (format.eCompressionFormat != OMX_VIDEO_CodingUnused) {
            return OMX_ErrorBadParameter;
        }
        if (!getLegacyFlexibleFormat) {
            break;
        }
        // 标准格式类型,由上面分析可知,eColorFormat = OMX_COLOR_FormatYUV420Planar,因此会此处break
        // standard formats that were exposed to users before
        if (format.eColorFormat == OMX_COLOR_FormatYUV420Planar
                || format.eColorFormat == OMX_COLOR_FormatYUV420PackedPlanar
                || format.eColorFormat == OMX_COLOR_FormatYUV420SemiPlanar
                || format.eColorFormat == OMX_COLOR_FormatYUV420PackedSemiPlanar
                || format.eColorFormat == OMX_TI_COLOR_FormatYUV420PackedSemiPlanar) {
            break;
        }
        // 查找最佳遗留非标准色彩空间格式
        // find best legacy non-standard format
        OMX_U32 flexibleEquivalent;
        if (legacyFormat.eColorFormat == OMX_COLOR_FormatUnused
                && IsFlexibleColorFormat(
                        mOMXNode, format.eColorFormat, false /* usingNativeBuffers */,
                        &flexibleEquivalent)
                && flexibleEquivalent == OMX_COLOR_FormatYUV420Flexible) {
            memcpy(&legacyFormat, &format, sizeof(format));
        }
    }
    // 将会设置给底层组件
    // 举例SoftAVCDec组件实现结果为:其实际底层也只是检查色彩空间和视频编码格式这两个值是否和要求一致,
    // 其实从上面分析可知肯定一致,因为本来就是从下层组件获取的。因此返回成功
    return mOMXNode->setParameter(
            OMX_IndexParamVideoPortFormat, &format, sizeof(format));
}

5.4、setVideoFormatOnPort(kPortIndexInput, width, height, compressionFormat, frameRateFloat)实现分析:
设置缓冲区(输入)端口buffer视频格式。如下最后一个参数默认为-1

// [frameworks/av/media/libstagefright/ACodec.cpp]
status_t ACodec::setVideoFormatOnPort(
        OMX_U32 portIndex,
        int32_t width, int32_t height, OMX_VIDEO_CODINGTYPE compressionFormat,
        float frameRate) {
    // 创建该参数结构对象并初始化    
    OMX_PARAM_PORTDEFINITIONTYPE def;
    InitOMXParams(&def);
    def.nPortIndex = portIndex;

    // OMX视频端口定义类型结构对象,即记录视频相关基本信息比如宽高、色彩空间、编码格式、码率、帧率等
    OMX_VIDEO_PORTDEFINITIONTYPE *video_def = &def.format.video;

    // 将会通过OMXNodeInstance对象代理类最终请求底层组件该信息
    // 举例SoftAVCDec组件实现结果为:
    // 最终将会返回该组件中【portIndex】指定端口buffer索引对应的该配置信息【memcpy来完成】
    status_t err = mOMXNode->getParameter(
            OMX_IndexParamPortDefinition, &def, sizeof(def));
    if (err != OK) {
        return err;
    }
	
    if (portIndex == kPortIndexInput) {
    	// 输入buffer端口索引请求时
    	// 需重新校验和计算该缓冲区每个输入buffer(最小)大小【单位字节】,最小必须为64KB
        // XXX Need a (much) better heuristic to compute input buffer sizes.
        const size_t X = 64 * 1024;
        if (def.nBufferSize < X) {
            def.nBufferSize = X;
        }
    }

    // 此方法请求的端口域必须为视频内容,否则失败
    if (def.eDomain != OMX_PortDomainVideo) {
        ALOGE("expected video port, got %s(%d)", asString(def.eDomain), def.eDomain);
        return FAILED_TRANSACTION;
    }

    // 设置视频原始输入格式的宽高
    video_def->nFrameWidth = width;
    video_def->nFrameHeight = height;

    if (portIndex == kPortIndexInput) {
    	// 输入buffer端口索引请求时
    	// 设置视频编码格式
        video_def->eCompressionFormat = compressionFormat;
        // 色彩空间格式为无效
        video_def->eColorFormat = OMX_COLOR_FormatUnused;
        if (frameRate >= 0) {
        	// 帧率设置
        	// 注意:此处将上层表示的帧率如30fps的30乘以2的16次方,得到底层将处理的帧率单位。【上层帧率单位为2的16次方】
            video_def->xFramerate = (OMX_U32)(frameRate * 65536.0f);
        }
    }

	// 将会通过OMXNodeInstance对象代理类最终更新底层组件该对象信息
	// 举例SoftAVCDec组件实现结果为:
	// 也就是将会更新上面重新设置的相关配置信息
    err = mOMXNode->setParameter(
            OMX_IndexParamPortDefinition, &def, sizeof(def));

    return err;
}

5.5、setColorAspectsForVideoDecoder(width, height, haveNativeWindow | usingSwRenderer, msg, outputFormat)实现分析:
设置色彩空间配置描述信息如色彩矩阵转换系数类型等
备注:通过前面分析可知,在安卓原生SoftAVCDec软解码器组件时,haveNativeWindow值为false,usingSwRenderer为true。因此usingNativeWindow为true。

// [frameworks/av/media/libstagefright/ACodec.cpp]
status_t ACodec::setColorAspectsForVideoDecoder(
        int32_t width, int32_t height, bool usingNativeWindow,
        const sp<AMessage> &configFormat, sp<AMessage> &outputFormat) {
    // 创建色彩空间配置描述信息结构对象并初始化    
    DescribeColorAspectsParams params;
    InitOMXParams(&params);
    // 输出buffer端口索引
    params.nPortIndex = kPortIndexOutput;

    // 见5.5.1小节分析
    getColorAspectsFromFormat(configFormat, params.sAspects);
    if (usingNativeWindow) {
    	// 使用native window时
    	// 有需要的话重新设置默认的编码器色彩空间配置描述信息
    	// 见5.5.2小节分析
        setDefaultCodecColorAspectsIfNeeded(params.sAspects, width, height);
        // The default aspects will be set back to the output format during the
        // getFormat phase of configure(). Set non-Unspecified values back into the
        // format, in case component does not support this enumeration.
        // 将该信息放入输出buffer格式中
        // 见5.5.3小节分析
        setColorAspectsIntoFormat(params.sAspects, outputFormat);
    }

    // 初始化色彩空间宽高比描述信息索引
    // 见5.5.4小节分析
    (void)initDescribeColorAspectsIndex();
	
	// 将当前色彩空间宽高比描述信息传递给底层编解码器
    // 见5.5.5小节分析
    // communicate color aspects to codec
    return setCodecColorAspects(params);
}

5.5.1、getColorAspectsFromFormat(configFormat, params.sAspects)实现分析:
configFormat为视频原始输入格式信息,参数为对象引用

// [frameworks/av/media/libstagefright/foundation/ColorUtils.cpp]
// static
void ColorUtils::getColorAspectsFromFormat(const sp<AMessage> &format, ColorAspects &aspects) {
    int32_t range, standard, transfer;
    // 获取视频原始格式中对应的配置信息
    // 见5.5.1.1小节分析
    getColorConfigFromFormat(format, &range, &standard, &transfer);

	// 转换平台色彩空间配置信息类型为编解码器Codec支持的对应类型
	// 见5.5.1.2小节分析
    if (convertPlatformColorAspectsToCodecAspects(
            range, standard, transfer, aspects) != OK) {
        ALOGW("Ignoring illegal color aspects(R:%d(%s), S:%d(%s), T:%d(%s))",
                range, asString((ColorRange)range),
                standard, asString((ColorStandard)standard),
                transfer, asString((ColorTransfer)transfer));
        // 有一个失败,则对于编码器,保持原样。对于解码器,将使用默认值。        
        // Invalid values were converted to unspecified !params!, but otherwise were not changed
        // For encoders, we leave these as is. For decoders, we will use default values.
    }
    ALOGV("Got color aspects (R:%d(%s), P:%d(%s), M:%d(%s), T:%d(%s)) "
          "from format (out:R:%d(%s), S:%d(%s), T:%d(%s))",
            aspects.mRange, asString(aspects.mRange),
            aspects.mPrimaries, asString(aspects.mPrimaries),
            aspects.mMatrixCoeffs, asString(aspects.mMatrixCoeffs),
            aspects.mTransfer, asString(aspects.mTransfer),
            range, asString((ColorRange)range),
            standard, asString((ColorStandard)standard),
            transfer, asString((ColorTransfer)transfer));
}

5.5.1.1、getColorConfigFromFormat(format, &range, &standard, &transfer)实现分析:
其实就是获取视频原始格式中对应的配置信息
由于本章节接下来内容篇幅过长,因此必须放入另一章节分析,请查看:
TODO 【六】Android MediaPlayer整体架构源码分析 -【start请求播放处理流程】【Part 6】【03】

标签:02,mPlane,Part,format,image,源码,params,VIDEO,OMX
来源: https://blog.csdn.net/u012430727/article/details/116805212