OpenCV物体跟踪树莓派视觉小车实现过程学习
程序员文章站
2022-03-10 13:30:01
目录物体跟踪效果展示过程:一、初始化def motor_init(): global l_motor, r_motor l_motor= gpio.pwm(l_motor,100)...
物体跟踪效果展示
过程:
一、初始化
def motor_init(): global l_motor, r_motor l_motor= gpio.pwm(l_motor,100) r_motor = gpio.pwm(r_motor,100) l_motor.start(0) r_motor.start(0) def direction_init(): gpio.setup(left_back,gpio.out) gpio.setup(left_front,gpio.out) gpio.setup(l_motor,gpio.out) gpio.setup(right_front,gpio.out) gpio.setup(right_back,gpio.out) gpio.setup(r_motor,gpio.out) def servo_init(): global pwm_servo pwm_servo=adafruit_pca9685.pca9685() def init(): gpio.setwarnings(false) gpio.setmode(gpio.bcm) direction_init() servo_init() motor_init()
二、运动控制函数
def front(speed): l_motor.changedutycycle(speed) gpio.output(left_front,1) #left_front gpio.output(left_back,0) #left_back r_motor.changedutycycle(speed) gpio.output(right_front,1) #right_front gpio.output(right_back,0) #right_back def back(speed): l_motor.changedutycycle(speed) gpio.output(left_front,0) #left_front gpio.output(left_back,1) #left_back r_motor.changedutycycle(speed) gpio.output(right_front,0) #right_front gpio.output(right_back,1) #right_back def left(speed): l_motor.changedutycycle(speed) gpio.output(left_front,0) #left_front gpio.output(left_back,1) #left_back r_motor.changedutycycle(speed) gpio.output(right_front,1) #right_front gpio.output(right_back,0) #right_back def right(speed): l_motor.changedutycycle(speed) gpio.output(left_front,1) #left_front gpio.output(left_back,0) #left_back r_motor.changedutycycle(speed) gpio.output(right_front,0) #right_front gpio.output(right_back,1) #right_back def stop(): l_motor.changedutycycle(0) gpio.output(left_front,0) #left_front gpio.output(left_back,0) #left_back r_motor.changedutycycle(0) gpio.output(right_front,0) #right_front gpio.output(right_back,0) #right_back
三、舵机角度控制
def set_servo_angle(channel,angle): angle=4096*((angle*11)+500)/20000 pwm_servo.set_pwm_freq(50) #frequency==50hz (servo) pwm_servo.set_pwm(channel,0,int(angle))
set_servo_angle(4, 110) #top servo lengthwise #0:back 180:front set_servo_angle(5, 90) #bottom servo crosswise #0:left 180:right
上面的(4):是顶部的舵机(摄像头上下摆动的那个舵机)
下面的(5):是底部的舵机(摄像头左右摆动的那个舵机)
四、摄像头&&图像处理
# 1 image process img, contours = image_processing()
width, height = 160, 120 camera = cv2.videocapture(0) camera.set(3,width) camera.set(4,height)
1、打开摄像头
打开摄像头,并设置窗口大小。
设置小窗口的原因: 小窗口实时性比较好。
# capture the frames ret, frame = camera.read()
2、把图像转换为灰度图
# to gray gray = cv2.cvtcolor(frame, cv2.color_bgr2gray) cv2.imshow('gray',gray)
3、 高斯滤波(去噪)
# gausi blur blur = cv2.gaussianblur(gray,(5,5),0)
4、亮度增强
#brighten blur = cv2.convertscaleabs(blur, none, 1.5, 30)
5、转换为二进制
#to binary ret,binary = cv2.threshold(blur,150,255,cv2.thresh_binary_inv) cv2.imshow('binary',binary)
6、闭运算处理
#close kernel = cv2.getstructuringelement(cv2.morph_rect, (17,17)) close = cv2.morphologyex(binary, cv2.morph_close, kernel) cv2.imshow('close',close)
7、获取轮廓
#get contours binary_c,contours,hierarchy = cv2.findcontours(close, 1, cv2.chain_approx_none) cv2.drawcontours(image, contours, -1, (255,0,255), 2) cv2.imshow('image', image)
代码
def image_processing(): # capture the frames ret, frame = camera.read() # crop the image image = frame cv2.imshow('frame',frame) # to gray gray = cv2.cvtcolor(frame, cv2.color_bgr2gray) cv2.imshow('gray',gray) # gausi blur blur = cv2.gaussianblur(gray,(5,5),0) #brighten blur = cv2.convertscaleabs(blur, none, 1.5, 30) #to binary ret,binary = cv2.threshold(blur,150,255,cv2.thresh_binary_inv) cv2.imshow('binary',binary) #close kernel = cv2.getstructuringelement(cv2.morph_rect, (17,17)) close = cv2.morphologyex(binary, cv2.morph_close, kernel) cv2.imshow('close',close) #get contours binary_c,contours,hierarchy = cv2.findcontours(close, 1, cv2.chain_approx_none) cv2.drawcontours(image, contours, -1, (255,0,255), 2) cv2.imshow('image', image) return frame, contours
五、获取最大轮廓坐标
由于有可能出现多个物体,我们这里只识别最大的物体(深度学习可以搞分类,还没学到这,学到了再做),得到它的坐标。
# 2 get coordinates x, y = get_coord(img, contours)
def get_coord(img, contours): image = img.copy() try: contour = max(contours, key=cv2.contourarea) cv2.drawcontours(image, contour, -1, (255,0,255), 2) cv2.imshow('new_frame', image) # get coord m = cv2.moments(contour) x = int(m['m10']/m['m00']) y = int(m['m01']/m['m00']) print(x, y) return x,y except: print 'no objects' return 0,0
返回最大轮廓的坐标:
六、运动
根据反馈回来的坐标,判断它的位置,进行运动。
# 3 move move(x,y)
1、没有识别到轮廓(静止)
if x==0 and y==0: stop()
2、向前走
识别到物体,且在正*(中间1/2区域),让物体向前走。
#go ahead elif width/4 <x and x<(width-width/4): front(70)
3、向左转
物体在左边1/4区域。
#left elif x < width/4: left(50)
4、向右转
物体在右边1/4区域。
#right elif x > (width-width/4): right(50)
代码
def move(x,y): global second #stop if x==0 and y==0: stop() #go ahead elif width/4 <x and x<(width-width/4): front(70) #left elif x < width/4: left(50) #right elif x > (width-width/4): right(50)
总代码
#object tracking import rpi.gpio as gpio import time import adafruit_pca9685 import numpy as np import cv2 second = 0 width, height = 160, 120 camera = cv2.videocapture(0) camera.set(3,width) camera.set(4,height) l_motor = 18 left_front = 22 left_back = 27 r_motor = 23 right_front = 25 right_back = 24 def motor_init(): global l_motor, r_motor l_motor= gpio.pwm(l_motor,100) r_motor = gpio.pwm(r_motor,100) l_motor.start(0) r_motor.start(0) def direction_init(): gpio.setup(left_back,gpio.out) gpio.setup(left_front,gpio.out) gpio.setup(l_motor,gpio.out) gpio.setup(right_front,gpio.out) gpio.setup(right_back,gpio.out) gpio.setup(r_motor,gpio.out) def servo_init(): global pwm_servo pwm_servo=adafruit_pca9685.pca9685() def init(): gpio.setwarnings(false) gpio.setmode(gpio.bcm) direction_init() servo_init() motor_init() def front(speed): l_motor.changedutycycle(speed) gpio.output(left_front,1) #left_front gpio.output(left_back,0) #left_back r_motor.changedutycycle(speed) gpio.output(right_front,1) #right_front gpio.output(right_back,0) #right_back def back(speed): l_motor.changedutycycle(speed) gpio.output(left_front,0) #left_front gpio.output(left_back,1) #left_back r_motor.changedutycycle(speed) gpio.output(right_front,0) #right_front gpio.output(right_back,1) #right_back def left(speed): l_motor.changedutycycle(speed) gpio.output(left_front,0) #left_front gpio.output(left_back,1) #left_back r_motor.changedutycycle(speed) gpio.output(right_front,1) #right_front gpio.output(right_back,0) #right_back def right(speed): l_motor.changedutycycle(speed) gpio.output(left_front,1) #left_front gpio.output(left_back,0) #left_back r_motor.changedutycycle(speed) gpio.output(right_front,0) #right_front gpio.output(right_back,1) #right_back def stop(): l_motor.changedutycycle(0) gpio.output(left_front,0) #left_front gpio.output(left_back,0) #left_back r_motor.changedutycycle(0) gpio.output(right_front,0) #right_front gpio.output(right_back,0) #right_back def set_servo_angle(channel,angle): angle=4096*((angle*11)+500)/20000 pwm_servo.set_pwm_freq(50) #frequency==50hz (servo) pwm_servo.set_pwm(channel,0,int(angle)) def image_processing(): # capture the frames ret, frame = camera.read() # crop the image image = frame cv2.imshow('frame',frame) # to gray gray = cv2.cvtcolor(frame, cv2.color_bgr2gray) cv2.imshow('gray',gray) # gausi blur blur = cv2.gaussianblur(gray,(5,5),0) #brighten blur = cv2.convertscaleabs(blur, none, 1.5, 30) #to binary ret,binary = cv2.threshold(blur,150,255,cv2.thresh_binary_inv) cv2.imshow('binary',binary) #close kernel = cv2.getstructuringelement(cv2.morph_rect, (17,17)) close = cv2.morphologyex(binary, cv2.morph_close, kernel) cv2.imshow('close',close) #get contours binary_c,contours,hierarchy = cv2.findcontours(close, 1, cv2.chain_approx_none) cv2.drawcontours(image, contours, -1, (255,0,255), 2) cv2.imshow('image', image) return frame, contours def get_coord(img, contours): image = img.copy() try: contour = max(contours, key=cv2.contourarea) cv2.drawcontours(image, contour, -1, (255,0,255), 2) cv2.imshow('new_frame', image) # get coord m = cv2.moments(contour) x = int(m['m10']/m['m00']) y = int(m['m01']/m['m00']) print(x, y) return x,y except: print 'no objects' return 0,0 def move(x,y): global second #stop if x==0 and y==0: stop() #go ahead elif width/4 <x and x<(width-width/4): front(70) #left elif x < width/4: left(50) #right elif x > (width-width/4): right(50) if __name__ == '__main__': init() set_servo_angle(4, 110) #top servo lengthwise #0:back 180:front set_servo_angle(5, 90) #bottom servo crosswise #0:left 180:right while 1: # 1 image process img, contours = image_processing() # 2 get coordinates x, y = get_coord(img, contours) # 3 move move(x,y) # must include this codes(otherwise you can't open camera successfully) if cv2.waitkey(1) & 0xff == ord('q'): stop() gpio.cleanup() break #front(50) #back(50) #$left(50) #right(50) #time.sleep(1) #stop()
检测原理是基于最大轮廓的检测,没有用深度学习的分类,所以容易受到干扰,后期学完深度学习会继续优化。有意见或者想法的朋友欢迎交流。
以上就是opencv物体跟踪树莓派视觉小车实现过程学习的详细内容,更多关于opencv物体跟踪树莓派视觉小车的资料请关注其它相关文章!
上一篇: Java 基础类型范围
下一篇: 将博客搬至CSDN