Python使用OPENCV的目标跟踪算法实现自动视频标注效果
程序员文章站
2022-06-19 18:10:37
先上效果1.首先,要使用opencv的目标跟踪算法,必须要有opencv环境使用:opencv==4.4.0 和opencv-contrib-python==4.4.0.46,lxml这三个环境包。也...
先上效果
1.首先,要使用opencv的目标跟踪算法,必须要有opencv环境
使用:opencv==4.4.0 和 opencv-contrib-python==4.4.0.46,lxml 这三个环境包。
也可以使用以下方法进行下载 :
pip install opencv-python==4.4.0
pip install opencv-contrib-python==4.4.0.4pip install lxml
2.使用方法:
(1):英文状态下的 “s” 是进行标注
(2):使用小键盘 1-9 按下对应的标签序号,标签序号和标签可自定义(需要提前定义)
(3):对目标进行绘制
(4):按空格键继续
重复进行 (1)(2)(3)(4)步骤,可实现多个目标的跟踪绘制
英文状态下的 “r” 是所有清除绘制
英文状态下的 “q” 是退出
当被跟踪目标丢失时,自动清除所有绘制
import cv2 import os import time from lxml import etree #视频路径 vs = cv2.videocapture('peaple.avi') #自定义标签 label = {1:"people",2:"car",3:"camera"} #图片保存路径 ,一定使用要用绝对路径!! imgpath = r"c:\users\bgt\desktop\opencv\img" #xml保存路径 ,一定使用要用绝对路径!! xmlpath = r"c:\users\bgt\desktop\opencv\xml" #设置视频缩放 cv2.namedwindow("frame", 0) #设置视频宽高 cv2.resizewindow("frame", 618, 416) #定义生成xml类 class gen_annotations: def __init__(self, json_info): self.root = etree.element("annotation") child1 = etree.subelement(self.root, "folder") child1.text = str(json_info["pic_dirname"]) child2 = etree.subelement(self.root, "filename") child2.text = str(json_info["filename"]) child3 = etree.subelement(self.root, "path") child3.text = str(json_info["pic_path"]) child4 = etree.subelement(self.root, "source") child5 = etree.subelement(child4, "database") child5.text = "my name is bgt" def set_size(self, witdh, height, channel): size = etree.subelement(self.root, "size") widthn = etree.subelement(size, "width") widthn.text = str(witdh) heightn = etree.subelement(size, "height") heightn.text = str(height) channeln = etree.subelement(size, "depth") channeln.text = str(channel) segmented = etree.subelement(self.root, "segmented") segmented.text = "0" def savefile(self, filename): tree = etree.elementtree(self.root) tree.write(filename, pretty_print=true, xml_declaration=false, encoding='utf-8') def add_pic_attr(self, label, x0, y0, x1, y1): object = etree.subelement(self.root, "object") namen = etree.subelement(object, "name") namen.text = label pose = etree.subelement(object, "pose") pose.text = "unspecified" truncated = etree.subelement(object, "truncated") truncated.text = "0" difficult = etree.subelement(object, "difficult") difficult.text = "0" bndbox = etree.subelement(object, "bndbox") xminn = etree.subelement(bndbox, "xmin") xminn.text = str(x0) yminn = etree.subelement(bndbox, "ymin") yminn.text = str(y0) xmaxn = etree.subelement(bndbox, "xmax") xmaxn.text = str(x1) ymaxn = etree.subelement(bndbox, "ymax") ymaxn.text = str(y1) #定义生成xml的方法 def voc_opencv_xml(a,b,c,d,e,f,boxes,label,label_a,save="1.xml"): json_info = {} json_info["pic_dirname"] = a json_info["pic_path"] = b json_info["filename"] = c anno = gen_annotations(json_info) anno.set_size(d, e, f) for box in range(len(boxes)): x,y,w,h = [int(v) for v in boxes[box]] anno.add_pic_attr(label[label_a[box]],x,y,x+w,y+h) anno.savefile(save) if __name__ == '__main__': label_a = [] contents = os.path.split(imgpath)[1] trackers = cv2.multitracker_create() while true: filename_jpg = str(time.time()).split(".")[0] + "_" + str(time.time()).split(".")[1] + ".jpg" filename_xml = str(time.time()).split(".")[0] + "_" + str(time.time()).split(".")[1] + ".xml" path_filename_jpg = os.path.join(imgpath,filename_jpg) path_filename_xml = os.path.join(xmlpath,filename_xml) ret,frame = vs.read() if not ret: break success,boxes = trackers.update(frame) if len(boxes)>0: cv2.imwrite(path_filename_jpg, frame) judge = true else: judge = false if success==false: print("目标丢失") trackers = cv2.multitracker_create() label_a = [] judge = false if judge: voc_opencv_xml(contents,filename_jpg,path_filename_jpg,frame.shape[1],frame.shape[0],frame.shape[2],boxes,label,label_a,path_filename_xml) if judge: for box in range(len(boxes)): x,y,w,h = [int(v) for v in boxes[box]] cv2.puttext(frame, label[label_a[box]], (x, y), cv2.font_hershey_complex, 1, (255, 255, 255), 1) cv2.rectangle(frame,(x,y),(x+w,y+h),(255,0,0),2) cv2.imshow('frame',frame) var = cv2.waitkey(30) if var == ord('s'): imgzi = cv2.puttext(frame, str(label), (50, 50), cv2.font_hershey_triplex, 1, (0, 255, 0), 2) cv2.imshow('frame', frame) var = cv2.waitkey(0) if var-48<len(label) or var-48<=len(label): label_a.append(int(var-48)) box = cv2.selectroi("frame", frame, fromcenter=false,showcrosshair=true) tracker = cv2.trackercsrt_create() trackers.add(tracker,frame,box) elif var == ord("r"): trackers = cv2.multitracker_create() label_a = [] elif var == ord('q'): #退出 break vs.release() cv2.destroyallwindows()
3.得到xml和img数据是voc格式,img和xml文件以时间戳进行命名。防止同名覆盖。
4.最后使用 labelimg软件 对获取到的img和xml进行最后的检查和微调
到此这篇关于python使用opencv的目标跟踪算法进自动视频标注效果的文章就介绍到这了,更多相关opencv目标跟踪自动视频标注内容请搜索以前的文章或继续浏览下面的相关文章希望大家以后多多支持!