其他分享
首页 > 其他分享> > 2021-10-01

2021-10-01

作者:互联网

from __future__ import division

import time

# se=ser.Serial("/dev/ttyUSB0",115200,timeout=1)
from models import *
from utils.utils import *
from utils.datasets import *
from utils.augmentations import *
from utils.transforms import *
import cv2
import os
import sys
import time
import datetime
import argparse

from PIL import Image

import torch
import torchvision.transforms as transforms
from torch.utils.data import DataLoader
from torchvision import datasets
from torch.autograd import Variable

import matplotlib.pyplot as plt
import matplotlib.patches as patches
from matplotlib.ticker import NullLocator

i = 1
cap = cv2.VideoCapture(0)
array_of_img = []
start = time.time()
directory_name = r'output'
import socket, sys

dest = ('<broadcast>', 7870)
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)

washtime = []
thex = []
they = []
area = 0
count = 0
str1 = ""
filename = 'test_text.txt'
flag = 0
# to = '{"target":"step_z","num":"100"}'
# y = '{"target":"step_x","num":"100"}'
centerx = 300
centery = 300
def abs(x):
    if x>=0:
        return x
    else:
        return -x
# 2000步移六秒
def computetime(x):
    total = int(abs(x)/200)
    total = total*6
    total = total+2
    return total


if __name__ == "__main__":
    time.sleep(5)
    start = time.time()
    # s.sendto(to.encode(),dest)
    # while(cap.isOpened()):
    # count=count+1
    # ret, frame = cap.read()
    # cv2.imshow('frame',frame)
    # if cv2.waitKey(30) == ord('q'):
    # cv2.imwrite('data/custom/dd/'+str(i)+".jpg",frame)
    # if cv2.waitKey(30) == ord('q'):
    # ret, frame = cap.read()

    # frame=cv2.imread('data/custom/dd/'+str(i)+".jpg")
    # break
    # When everything done, release the capture

    # cap.release()

    # cv2.destroyAllWindows()
    parser = argparse.ArgumentParser()
    parser.add_argument("--image_folder", type=str, default="data/custom/dd", help="path to dataset")
    parser.add_argument("--model_def", type=str, default="config/yolov3-custom.cfg",
                        help="path to model definition file")
    parser.add_argument("--weights_path", type=str, default="checkpoints/ckpt_86.pth", help="path to weights file")
    parser.add_argument("--class_path", type=str, default="data/custom/classes.names", help="path to class label file")
    parser.add_argument("--conf_thres", type=float, default=0.8, help="object confidence threshold")
    parser.add_argument("--nms_thres", type=float, default=0.4, help="iou thresshold for non-maximum suppression")
    parser.add_argument("--batch_size", type=int, default=1, help="size of the batches")
    parser.add_argument("--n_cpu", type=int, default=0, help="number of cpu threads to use during batch generation")
    parser.add_argument("--img_size", type=int, default=416, help="size of each image dimension")
    parser.add_argument("--checkpoint_model", type=str, default="checkpoints/ckpt_86.pth",
                        help="path to checkpoint model")
    opt = parser.parse_args()
    print(opt)

    #device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    device = torch.device('cpu')
    os.makedirs("output", exist_ok=True)

    # Set up modella
    model = Darknet(opt.model_def, img_size=opt.img_size).to(device)

    if opt.weights_path.endswith(".weights"):
        # Load darknet weights
        model.load_darknet_weights(opt.weights_path)
    else:
        # Load checkpoint weights
        model.load_state_dict(torch.load(opt.weights_path,map_location = 'cpu')) #cpu环境

    model.eval()  # Set in evaluation mode

    dataloader = DataLoader(
        ImageFolder(opt.image_folder, transform= \
            transforms.Compose([DEFAULT_TRANSFORMS, Resize(opt.img_size)])),
        batch_size=opt.batch_size,
        shuffle=False,
        num_workers=opt.n_cpu,
    )

    classes = load_classes(opt.class_path)  # Extracts class labels from file

    Tensor = torch.cuda.FloatTensor if torch.cuda.is_available() else torch.FloatTensor

    imgs = []  # Stores image paths
    img_detections = []  # Stores detections for each image index

    print("\nPerforming object detection:")
    prev_time = time.time()
    for batch_i, (img_paths, input_imgs) in enumerate(dataloader):
        # Configure input
        input_imgs = Variable(input_imgs.type(Tensor))

        # Get detections
        with torch.no_grad():
            detections = model(input_imgs)
            detections = non_max_suppression(detections, opt.conf_thres, opt.nms_thres)

        # Log progress
        current_time = time.time()
        inference_time = datetime.timedelta(seconds=current_time - prev_time)
        prev_time = current_time
        print("\t+ Batch %d, Inference Time: %s" % (batch_i, inference_time))

        # Save image and detections
        imgs.extend(img_paths)
        img_detections.extend(detections)

    # Bounding-box colors
    cmap = plt.get_cmap("tab20b")
    colors = [cmap(i) for i in np.linspace(0, 1, 20)]

    print("\nSaving images:")

    # Iterate through images and save plot of detections
    for img_i, (path, detections) in enumerate(zip(imgs, img_detections)):

        print("(%d) Image: '%s'" % (img_i, path))

        # Create plot
        img = np.array(Image.open(path))
        plt.figure()
        fig, ax = plt.subplots(1)
        ax.imshow(img)

        # Draw bounding boxes and labels of detections
        if detections is not None:
            flag = 1
            # Rescale boxes to original image
            detections = rescale_boxes(detections, opt.img_size, img.shape[:2])

            unique_labels = detections[:, -1].cpu().unique()
            n_cls_preds = len(unique_labels)
            bbox_colors = random.sample(colors, n_cls_preds)
            alltime = 0
            for x1, y1, x2, y2, conf, cls_conf, cls_pred in detections:
                str1=""
                str2 = ""

                print("\t+ Label: %s, Conf: %.5f" % (classes[int(cls_pred)], cls_conf.item()))

                box_w = x2 - x1
                box_h = y2 - y1
                centx = int((x1 + x2) * 0.1 / 2)
                centy = int((y1 + y2) * 0.1 / 2)
                thex.append(centx)
                they.append(centy)
                str1 += '{"target":"step_z","num":'
#==========================================================================================
# ==========================================================================================
# ==========================================================================================
                str1_time = computetime(centy-centery)
                print("1 time:"+str(str1_time))
                str1 += str(10*(centy-centery))
                str1 += "}"
                s.sendto(str1.encode(), dest)
                time.sleep(abs(str1_time))
                str2 += '{"target":"step_x","num":'
                str2_time = computetime(centy-centery)
                print("2 time:"+str(str1_time))

                str2 +=str(10*(centx-centerx))
                str2+= "}"
                s.sendto(str2.encode(),dest)
                time.sleep(abs(str2_time))
                area = box_h*box_w
                if area > 0:
                    print("begin wash")
                    alltime = 5
                if classes[int(cls_pred)] == "solid":
                    alltime += 7
                str3 = ""
                str3+='{"target":"pump","num":1000}' #打开水泵 程序sleep的时间即进行清洗的时间
                s.sendto(str3.encode(),dest)
                time.sleep(alltime)
                str3 = ""
                str3 +='{"target":"pump","num":0}'
                s.sendto(str3.encode(), dest)
                print("x和y:" + str((x2 + x1) / 2) + "," + str((y1 + y2) / 2))

                print("x:")
                print(int((x1 + x2) / 2))
                print("y:")
                print(int((y1 + y2) / 2))
                print(int(box_w * 0.1))


                color = bbox_colors[int(np.where(unique_labels == int(cls_pred))[0])]
                # Create a Rectangle patch
                bbox = patches.Rectangle((x1, y1), box_w, box_h, linewidth=2, edgecolor=color, facecolor="none")
               # print(int(box_w) * int(box_h) * 0.01)
                str1 = '{"target":"step_z","num":' #上下移动
                str1+=str(10*(centery-centy))
                str1 += "}"
                s.sendto(str1.encode(),dest)
                time.sleep(abs(str1_time))
                str2 = '{"target":"step_x","num":'
                str2 +=str(10*(centerx-centx))
                str2+= "}"
                s.sendto(str2.encode(),dest)
                time.sleep(abs(str2_time))
                print(str1)
                print(str2)
                ax.add_patch(bbox)
                # Add label
                plt.text(
                    x1,
                    y1,
                    s=classes[int(cls_pred)],
                    color="white",
                    verticalalignment="top",
                    bbox={"color": color, "pad": 0},
                )

        # Save generated image with detections
        plt.axis("off")
        plt.gca().xaxis.set_major_locator(NullLocator())
        plt.gca().yaxis.set_major_locator(NullLocator())
        filename = os.path.basename(path).split(".")[0]
        # output_path = os.path.join("output", f"{filename}.png")
        # output_path = os.path.join("/data/custom/dd", f"{filename}.png")
        plt.savefig(filename, bbox_inches="tight", pad_inches=0.0)

        plt.close()
    end = time.time()
    print(end - start)


    total = int(end - start)
    with open("time.txt", 'w+') as file_object:  # 如果不存在将自动创建
        file_object.write(str(total) + "\n")


    # pictureSocket.socket_client()

标签:10,01,int,time,detections,2021,path,print,import
来源: https://blog.csdn.net/qq_41358574/article/details/120576879