如何使用OpenCV从图像中删除特定的标签/贴纸/对象?
作者:互联网
我有数百幅珠宝产品图片.其中一些带有“畅销书”标签.标签的位置因图像而异.我想遍历所有图像,如果图像具有此标签,则将其删除.生成的图像将在移除的对象的像素上渲染背景.
带有标签/贴纸/对象的图像示例:
标签/贴纸/要删除的对象:
import numpy as np
import cv2 as cv
img = plt.imread('./images/001.jpg')
sticker = plt.imread('./images/tag.png',1)
diff_im = cv2.absdiff(img, sticker)
我希望结果图像如下所示:
解决方法:
这是一种使用改良的Template Matching方法的方法.这是整体策略:
>加载模板,转换为灰度,执行Canny边缘检测
>加载原始图像,转换为灰度
>连续重新缩放图像,使用边缘应用模板匹配,并跟踪相关系数(值越大表示匹配越好)
>找到最适合边界框的坐标,然后删除不必要的投资回报率
首先,我们加载模板并执行Canny边缘检测.应用与边缘匹配的模板而不是原始图像,可以消除颜色变化差异,并提供更可靠的结果.从模板图像中提取边缘:
接下来,我们不断缩小图像,并将模板匹配应用于调整大小后的图像.我使用old answer每次调整大小时都保持宽高比.这是该策略的可视化
我们调整图像大小的原因是因为使用cv2.matchTemplate进行标准模板匹配将不可靠,并且如果模板和图像的尺寸不匹配,则可能会给出误报.为了克服这个尺寸问题,我们使用以下修改的方法:
>以各种较小的比例连续调整输入图像的大小
>使用cv2.matchTemplate应用模板匹配并跟踪最大相关系数
>相关系数最大的比率/比例将具有最佳匹配的ROI
获得投资回报率后,我们可以使用以下方法在矩形中填充白色,从而“删除”徽标
cv2.rectangle(final, (start_x, start_y), (end_x, end_y), (255,255,255), -1)
import cv2
import numpy as np
# Resizes a image and maintains aspect ratio
def maintain_aspect_ratio_resize(image, width=None, height=None, inter=cv2.INTER_AREA):
# Grab the image size and initialize dimensions
dim = None
(h, w) = image.shape[:2]
# Return original image if no need to resize
if width is None and height is None:
return image
# We are resizing height if width is none
if width is None:
# Calculate the ratio of the height and construct the dimensions
r = height / float(h)
dim = (int(w * r), height)
# We are resizing width if height is none
else:
# Calculate the ratio of the 0idth and construct the dimensions
r = width / float(w)
dim = (width, int(h * r))
# Return the resized image
return cv2.resize(image, dim, interpolation=inter)
# Load template, convert to grayscale, perform canny edge detection
template = cv2.imread('template.PNG')
template = cv2.cvtColor(template, cv2.COLOR_BGR2GRAY)
template = cv2.Canny(template, 50, 200)
(tH, tW) = template.shape[:2]
cv2.imshow("template", template)
# Load original image, convert to grayscale
original_image = cv2.imread('1.jpg')
final = original_image.copy()
gray = cv2.cvtColor(original_image, cv2.COLOR_BGR2GRAY)
found = None
# Dynamically rescale image for better template matching
for scale in np.linspace(0.2, 1.0, 20)[::-1]:
# Resize image to scale and keep track of ratio
resized = maintain_aspect_ratio_resize(gray, width=int(gray.shape[1] * scale))
r = gray.shape[1] / float(resized.shape[1])
# Stop if template image size is larger than resized image
if resized.shape[0] < tH or resized.shape[1] < tW:
break
# Detect edges in resized image and apply template matching
canny = cv2.Canny(resized, 50, 200)
detected = cv2.matchTemplate(canny, template, cv2.TM_CCOEFF)
(_, max_val, _, max_loc) = cv2.minMaxLoc(detected)
# Uncomment this section for visualization
'''
clone = np.dstack([canny, canny, canny])
cv2.rectangle(clone, (max_loc[0], max_loc[1]), (max_loc[0] + tW, max_loc[1] + tH), (0,255,0), 2)
cv2.imshow('visualize', clone)
cv2.waitKey(0)
'''
# Keep track of correlation value
# Higher correlation means better match
if found is None or max_val > found[0]:
found = (max_val, max_loc, r)
# Compute coordinates of bounding box
(_, max_loc, r) = found
(start_x, start_y) = (int(max_loc[0] * r), int(max_loc[1] * r))
(end_x, end_y) = (int((max_loc[0] + tW) * r), int((max_loc[1] + tH) * r))
# Draw bounding box on ROI to remove
cv2.rectangle(original_image, (start_x, start_y), (end_x, end_y), (0,255,0), 2)
cv2.imshow('detected', original_image)
# Erase unwanted ROI (Fill ROI with white)
cv2.rectangle(final, (start_x, start_y), (end_x, end_y), (255,255,255), -1)
cv2.imshow('final', final)
cv2.waitKey(0)
标签:python,opencv,image,image-processing,computer-vision 来源: https://codeday.me/bug/20191013/1907245.html