其他分享
首页 > 其他分享> > yolov5s.pt -> onnx -> rknn

yolov5s.pt -> onnx -> rknn

作者:互联网

尝试失败的
https://github.com/soloIife/yolov5_for_rknn
rknn toolkit examples里面自带的均可转为rknn,yolov5s无法转换
在这里插入图片描述

尝试成功的

import yaml
from rknn.api import RKNN
import cv2

_model_load_dict = {
    'caffe': 'load_caffe',
    'tensorflow': 'load_tensorflow',
    'tflite': 'load_tflite',
    'onnx': 'load_onnx',
    'darknet': 'load_darknet',
    'pytorch': 'load_pytorch',
    'mxnet': 'load_mxnet',
    'rknn': 'load_rknn',
    }

yaml_file = './config.yaml'


def main():
    with open(yaml_file, 'r') as F:
        config = yaml.load(F)
    # print('config is:')
    # print(config)

    model_type = config['running']['model_type']
    print('model_type is {}'.format(model_type))#检查模型的类型

    rknn = RKNN(verbose=True)



#配置文件
    print('--> config model')
    rknn.config(**config['config'])
    print('done')


    print('--> Loading model')
    load_function = getattr(rknn, _model_load_dict[model_type])
    ret = load_function(**config['parameters'][model_type])
    if ret != 0:
        print('Load yolo failed! Ret = {}'.format(ret))
        exit(ret)
    print('done')

    ####
    #print('hybrid_quantization')
    #ret = rknn.hybrid_quantization_step1(dataset=config['build']['dataset'])


    if model_type != 'rknn':
        print('--> Building model')
        ret = rknn.build(**config['build'])
        print('acc_eval')
        #rknn.accuracy_analysis(inputs='./dataset.txt', target='rk3399pro')
        #print('acc_eval done!')

        if ret != 0:
            print('Build yolo failed!')
            exit(ret)
    else:
        print('--> skip Building model step, cause the model is already rknn')


#导出RKNN模型
    if config['running']['export'] is True:
        print('--> Export RKNN model')
        ret = rknn.export_rknn(**config['export_rknn'])
        if ret != 0:
            print('Init runtime environment failed')
            exit(ret)
    else:
        print('--> skip Export model')
    exit()

#初始化
    print('--> Init runtime environment')
    ret = rknn.init_runtime(**config['init_runtime'])
    if ret != 0:
        print('Init runtime environment failed')
        exit(ret)
    print('done')


    print('--> load img')
    img = cv2.imread(config['img']['path'])
    print('img shape is {}'.format(img.shape))
    img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
    inputs = [img]
    print(inputs[0][0:10,0,0])
#推理
    if config['running']['inference'] is True:
        print('--> Running model')
        config['inference']['inputs'] = inputs
        #print(config['inference'])
        outputs = rknn.inference(inputs)
        #outputs = rknn.inference(config['inference'])
        print('len of output {}'.format(len(outputs)))
        print('outputs[0] shape is {}'.format(outputs[0].shape))
        print(outputs[0][0][0:2])
    else:
        print('--> skip inference')
#评价
    if config['running']['eval_perf'] is True:
        print('--> Begin evaluate model performance')
        config['inference']['inputs'] = inputs
        perf_results = rknn.eval_perf(inputs=[img])
    else:
        print('--> skip eval_perf')


if __name__ == '__main__':
    main()


running:
  model_type: onnx       # 转换模型的类型
  export: True
  inference: False
  eval_perf: True


parameters:
  caffe:
    model: './mobilenet_v2.prototxt'
    proto: 'caffe' #lstm_caffe
    blobs: './mobilenet_v2.caffemodel'
  
  tensorflow:
    tf_pb: './ssd_mobilenet_v1_coco_2017_11_17.pb'
    inputs: ['FeatureExtractor/MobilenetV1/MobilenetV1/Conv2d_0/BatchNorm/batchnorm/mul_1']
    outputs: ['concat', 'concat_1']
    input_size_list: [[300, 300, 3]]

  tflite:
    model: './sample/tflite/mobilenet_v1/mobilenet_v1.tflite'

  onnx:      # 填写要转换模型的model
    model: './best.onnx'   #best_op.onnx   #best_noop.onnx

    #C:\Users\HP\Desktop\CODE\yolov5_for_rknn-master\weights\best.onnx

  darknet:
    model: './yolov3-tiny.cfg'
    weight: './yolov3.weights'

  pytorch:
    model: './yolov5.pt'
    input_size_list: [[3, 512, 512]]

  mxnet:
    symbol: 'resnext50_32x4d-symbol.json'
    params: 'resnext50_32x4d-4ecf62e2.params'
    input_size_list: [[3, 224, 224]]

  rknn:
    path: './bestrk.rknn'

config:
  #mean_value: [[0,0,0]]
  #std_value: [[58.82,58.82,58.82]]
  channel_mean_value: '0 0 0 255' # 123.675 116.28 103.53 58.395 # 0 0 0 255
  reorder_channel: '0 1 2' # '2 1 0'
  need_horizontal_merge: False
  batch_size: 1
  epochs: -1
  target_platform: ['rk3399pro']
  quantized_dtype: 'asymmetric_quantized-u8'
#asymmetric_quantized-u8,dynamic_fixed_point-8,dynamic_fixed_point-16
  optimization_level: 3

build:
  do_quantization: True
  dataset: './dataset.txt' # '/home/zen/rknn_convert/quant_data/hand_dataset/pic_path_less.txt'
  pre_compile: False

export_rknn:
  export_path: './best_noop1.rknn'

init_runtime:
  target: rk3399pro
  device_id: null
  perf_debug: False
  eval_mem: False
  async_mode: False

img: &img
  path: './test.jpg'

inference:
  inputs: *img
  data_type: 'uint8'
  data_format: 'nhwc' # 'nchw', 'nhwc'
  inputs_pass_through: None 

eval_perf:
  inputs: *img
  data_type: 'uint8'
  data_format: 'nhwc'
  is_print: True

rknn尝试了1.6.0 无法兼容slice模块 重装1.7.0解决问题

标签:load,rknn,pt,onnx,ret,print,yolov5s,model,config
来源: https://blog.csdn.net/highoooo/article/details/120175189