其他分享
首页 > 其他分享> > yolo预训练模型自动标注

yolo预训练模型自动标注

作者:互联网

序:想要真正准确的的自动标注,的确不太现实,都能准确的自动标注了,还训练模型干嘛!

所以本文所写方法是小量数据集预训练模型后,自动打标最后微调。

 

 

 (上图是我的文件夹格式,将自己预训练后的模型放到指定位置)

代码包含调用yolo模型。废话不多说!

# coding=utf-8
'''
author : Helen
date : 2020-11-12 16:15
'''

import cv2
import numpy as np
import os
import xml.dom.minidom
# import pypinyin
import time


def ModelYoloV3(frame, confidence=0.5, threshold=0.4):
    # 加载文件路径
    yolo_dir = './previousTrainModel'  # YOLO文件路径
    weightsPath = os.path.join(yolo_dir, 'myVLPChar_yolov3_21000.weights')  # 权重文件
    configPath = os.path.join(yolo_dir, 'myVLPChar_yolov3.cfg')  # 配置文件
    labelsPath = os.path.join(yolo_dir, 'myVLPCharData.names')  # label名称

    print("[INFO] loading YOLO from disk...")  # # 可以打印下信息

    # 加载网络、配置权重
    net = cv2.dnn.readNetFromDarknet(configPath, weightsPath)  # #  利用下载的文件
    # net.setPreferableBackend(cv.dnn.DNN_BACKEND_OPENCV)
    # net.setPreferableTarget(cv.dnn.DNN_TARGET_CPU)

    CONFIDENCE = confidence  # 过滤弱检测的最小概率,默认0.5
    THRESHOLD = threshold  # 非最大值抑制阈值默认0.4

    # 加载图片、转为blob格式、送入网络输入层
    img = frame.copy()
    blobImg = cv2.dnn.blobFromImage(img, 1.0 / 255.0, (416, 416), None, True,
                                   False)  # # net需要的输入是blob格式的,用blobFromImage这个函数来转格式
    net.setInput(blobImg)  # # 调用setInput函数将图片送入输入层

    # 获取网络输出层信息(所有输出层的名字),设定并前向传播
    outInfo = net.getUnconnectedOutLayersNames()  # # 前面的yolov3架构也讲了,yolo在每个scale都有输出,outInfo是每个scale的名字信息,供net.forward使用
    start = time.time()
    layerOutputs = net.forward(outInfo)  # 得到各个输出层的、各个检测框等信息,是二维结构。
    end = time.time()
    print("[INFO] YOLO took {:.6f} seconds".format(end - start))  # # 可以打印下信息

    # 拿到图片尺寸
    (H, W) = img.shape[:2]
    # 过滤layerOutputs
    # layerOutputs的第1维的元素内容: [center_x, center_y, width, height, objectness, N-class score data]
    # 过滤后的结果放入:
    boxes = []  # 所有边界框(各层结果放一起)
    confidences = []  # 所有置信度,概率
    classIDs = []  # 所有分类ID
    rectsAndClasses = []  # 所有概率超过阈值的框位置列表

    # # 1)过滤掉置信度低的框框
    for out in layerOutputs:  # 各个输出层
        for detection in out:  # 各个框框
            # 拿到置信度
            scores = detection[5:]  # 各个类别的置信度
            classID = np.argmax(scores)  # 最高置信度的id即为分类id
            confidence = scores[classID]  # 拿到置信度

            # 根据置信度筛查
            if confidence > CONFIDENCE:
                box = detection[0:4] * np.array([W, H, W, H])  # 将边界框放会图片尺寸
                (centerX, centerY, width, height) = box.astype("int")
                x = int(centerX - (width / 2))
                y = int(centerY - (height / 2))
                boxes.append([x, y, int(width), int(height)])
                confidences.append(float(confidence))
                classIDs.append(classID)

    # # 2)应用非最大值抑制(non-maxima suppression,nms)进一步筛掉
    idxs = cv2.dnn.NMSBoxes(boxes, confidences, CONFIDENCE, THRESHOLD)  # boxes中,保留的box的索引index存入idxs
    # 得到labels列表
    with open(labelsPath, 'rt') as f:
        labels = f.read().rstrip('\n').split('\n')
    # 应用检测结果
    np.random.seed(42)
    COLORS = np.random.randint(0, 255, size=(len(labels), 3),
                               dtype="uint8")  # 框框显示颜色,每一类有不同的颜色,每种颜色都是由RGB三个值组成的,所以size为(len(labels), 3)
    if len(idxs) > 0:
        for i in idxs.flatten():  # indxs是二维的,第0维是输出层,所以这里把它展平成1维
            (x, y) = (boxes[i][0], boxes[i][1])
            (w, h) = (boxes[i][2], boxes[i][3])

            color = [int(c) for c in COLORS[classIDs[i]]]
            cv2.rectangle(img, (x, y), (x + w, y + h), color, 3)  # 线条粗细为2px
            text = "{}: {:.4f}".format(labels[int(classIDs[i])], confidences[i])
            cv2.putText(img, text, (x, y - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color,
                       2)  # cv.FONT_HERSHEY_SIMPLEX字体风格、0.5字体大小、粗细2px
            print(labels[int(classIDs[i])], ":", confidences[i])
            rectsAndClasses.append([x, y, w, h, labels[int(classIDs[i])], confidences[i], color])
    return img, rectsAndClasses  # 返回画过框的图片和NMS后的框列表


def genXML(imgName, shape, labels, classes, xmlPath):  # 

    new_txtname = imgName.split('.')[0]  #

    # 创建空的Dom文档对象
    doc = xml.dom.minidom.Document()
    # 创建根结点,根节点名为 annotation
    annotation = doc.createElement('annotation')  # 根节点
    # 将根节点添加到Dom文档对象中
    doc.appendChild(annotation)

    # folder节点
    folder = doc.createElement('folder')  # 创建一个名叫folder的节点
    # 内容写入
    folder_text = doc.createTextNode('JPEGImages')  # folder节点里面要写的内容
    folder.appendChild(folder_text)  # 添加到folder节点下,如果是内容,节点内容createTextNode类型,就作为内容写入;如果是createElement类型,就作为子节点添加进去
    annotation.appendChild(folder)  # 之后将添加好内容的folder节点,作为子节点添加到annotation节点中

    # filename节点
    filename = doc.createElement('filename')
    filename_text = doc.createTextNode(str(new_txtname) + '.jpg')
    filename.appendChild(filename_text)
    #
    annotation.appendChild(filename)

    # path节点
    path = doc.createElement('path')
    path_text = doc.createTextNode('E:\\darknet-master\\build\\darknet\\myVLPCharData\\JPEGImages\\%s.jpg' % new_txtname)  # 框架路径,根据自己修改
    path.appendChild(path_text)
    #
    annotation.appendChild(path)

    # sourch节点
    source = doc.createElement('source')
    #
    database = doc.createElement('database')
    database_text = doc.createTextNode('Unknown')
    database.appendChild(database_text)
    #
    source.appendChild(database)
    #
    annotation.appendChild(source)

    # size节点
    size = doc.createElement('size')

    width = doc.createElement('width')
    width_text = doc.createTextNode(str(shape[1]))
    width.appendChild(width_text)
    size.appendChild(width)

    height = doc.createElement('height')
    height_text = doc.createTextNode(str(shape[0]))
    height.appendChild(height_text)
    size.appendChild(height)

    depth = doc.createElement('depth')
    depth_text = doc.createTextNode(str(shape[-1]))
    depth.appendChild(depth_text)
    size.appendChild(depth)
    #
    annotation.appendChild(size)

    # segmented节点
    segmented = doc.createElement('segmented')
    segmented_text = doc.createTextNode('0')
    segmented.appendChild(segmented_text)
    #
    annotation.appendChild(segmented)

    # object节点
    for [y1, y2, x1, x2], pChar in zip(labels, classes):  # 分类坐标和分类名称
        object = doc.createElement('object')

        name = doc.createElement('name')
        name_text = doc.createTextNode(pChar)  # 这个地方是标签的name,也就是分类名称
        name.appendChild(name_text)
        object.appendChild(name)

        pose = doc.createElement('pose')
        pose_text = doc.createTextNode("Unspecified")
        pose.appendChild(pose_text)
        object.appendChild(pose)

        truncated = doc.createElement('truncated')
        truncated_text = doc.createTextNode("0")
        truncated.appendChild(truncated_text)
        object.appendChild(truncated)

        difficult = doc.createElement('difficult')
        difficult_text = doc.createTextNode("0")
        difficult.appendChild(difficult_text)
        object.appendChild(difficult)

        bndbox = doc.createElement('bndbox')
        #
        xmin = doc.createElement('xmin')
        xmin_text = doc.createTextNode(str(x1))
        xmin.appendChild(xmin_text)
        bndbox.appendChild(xmin)
        #
        ymin = doc.createElement('ymin')
        ymin_text = doc.createTextNode(str(y1))
        ymin.appendChild(ymin_text)
        bndbox.appendChild(ymin)
        #
        xmax = doc.createElement('xmax')
        xmax_text = doc.createTextNode(str(x2))
        xmax.appendChild(xmax_text)
        bndbox.appendChild(xmax)
        #
        ymax = doc.createElement('ymax')
        ymax_text = doc.createTextNode(str(y2))
        ymax.appendChild(ymax_text)
        bndbox.appendChild(ymax)
        #
        object.appendChild(bndbox)
        #
        annotation.appendChild(object)

    # 写入xml文本文件中
    if not os.path.exists(xmlPath):
        os.mkdir(xmlPath)
    fp = open(xmlPath + '/%s.xml' % new_txtname, 'w+')
    doc.writexml(fp, indent='\n', addindent='\t', newl='', encoding='utf-8')
    fp.close()


def run(imgPath):
    filesName = os.listdir(imgPath)  # 读取所有图片名
    for f in filesName:
        p = os.path.join(imgPath, f)  # 单独图片的路径
        frame = cv2.imread(p)
        classesImg, rectsAndClasses = ModelYoloV3(frame)
        cv2.imshow("Labels", classesImg)

        labels = []
        classes = []
        for x, y, w, h, cn, confidence, color in rectsAndClasses:
            X1 = x
            Y1 = y
            X2 = x + w
            Y2 = y + h
            labels.append([Y1, Y2, X1, X2])  # 分类坐标
            classes.append(cn)  # 分类名称
            genXML(f, frame.shape, labels, classes, 'E:\\darknet-master\\build\\xml')  # args: 图片名,图片shape, 标签坐标列表, 类名列表, xml文件保存路径
        cv2.waitKey(10)

class AutoLabels:

    def __init__(self):
        pass


if __name__ == '__main__':

    path = 'E:\\darknet-master\\build\\image'
    run(path)

参考博客:立即前往 https://www.cnblogs.com/study-/p/13959391.html

 

标签:appendChild,doc,模型,yolo,text,createElement,path,createTextNode,标注
来源: https://www.cnblogs.com/buxian/p/13964606.html