Python人工智能之混合高斯模型运动目标检测详解分析
程序员文章站
2022-03-22 12:54:58
【人工智能项目】混合高斯模型运动目标检测本次工作主要对视频中运动中的人或物的边缘背景进行检测。那么走起来瓷!!!原视频高斯算法提取工作import cv2import numpy as np# 高斯算...
【人工智能项目】混合高斯模型运动目标检测
本次工作主要对视频中运动中的人或物的边缘背景进行检测。
那么走起来瓷!!!
原视频
高斯算法提取工作
import cv2 import numpy as np # 高斯算法 class gaussian: def __init__(self): self.mean = np.zeros((1, 3)) self.covariance = 0 self.weight = 0; self.next = none self.previous = none class node: def __init__(self): self.pixel_s = none self.pixel_r = none self.no_of_components = 0 self.next = none class node1: def __init__(self): self.gauss = none self.no_of_comp = 0 self.next = none covariance0 = 11.0 def create_gaussian(info1, info2, info3): ptr = gaussian() if (ptr is not none): ptr.mean[1, 1] = info1 ptr.mean[1, 2] = info2 ptr.mean[1, 3] = info3 ptr.covariance = covariance0 ptr.weight = 0.002 ptr.next = none ptr.previous = none return ptr def create_node(info1, info2, info3): n_ptr = node() if (n_ptr is not none): n_ptr.next = none n_ptr.no_of_components = 1 n_ptr.pixel_s = n_ptr.pixel_r = create_gaussian(info1, info2, info3) return n_ptr list_node = [] def insert_end_node(n): list_node.append(n) list_gaussian = [] def insert_end_gaussian(n): list_gaussian.append(n) def delete_gaussian(n): list_gaussian.remove(n); class process: def __init__(self, alpha, firstframe): self.alpha = alpha self.background = firstframe def get_value(self, frame): self.background = frame * self.alpha + self.background * (1 - self.alpha) return cv2.absdiff(self.background.astype(np.uint8), frame) def denoise(frame): frame = cv2.medianblur(frame, 5) frame = cv2.gaussianblur(frame, (5, 5), 0) return frame capture = cv2.videocapture('1.mp4') ret, orig_frame = capture.read( ) if ret is true: value1 = process(0.1, denoise(orig_frame)) run = true else: run = false while (run): ret, frame = capture.read() value = false; if ret is true: cv2.imshow('input', denoise(frame)) grayscale = value1.get_value(denoise(frame)) ret, mask = cv2.threshold(grayscale, 15, 255, cv2.thresh_binary) cv2.imshow('mask', mask) key = cv2.waitkey(10) & 0xff else: break if key == 27: break if value == true: orig_frame = cv2.resize(orig_frame, (340, 260), interpolation=cv2.inter_cubic) orig_frame = cv2.cvtcolor(orig_frame, cv2.color_bgr2gray) orig_image_row = len(orig_frame) orig_image_col = orig_frame[0] bin_frame = np.zeros((orig_image_row, orig_image_col)) value = [] for i in range(0, orig_image_row): for j in range(0, orig_image_col): n_ptr = create_node(orig_frame[i][0], orig_frame[i][1], orig_frame[i][2]) if n_ptr is not none: n_ptr.pixel_s.weight = 1.0 insert_end_node(n_ptr) else: print("error") exit(0) nl = orig_image_row nc = orig_image_col dell = np.array((1, 3)); mal_dist = 0.0; temp_cov = 0.0; alpha = 0.002; ct = 0.05; cf = 0.1; cfbar = 1.0 - cf; alpha_bar = 1.0 - alpha; prune = -alpha * ct; cthr = 0.00001; var = 0.0 mug = 0.0; mur = 0.0; mub = 0.0; dr = 0.0; db = 0.0; dg = 0.0; rval = 0.0; gval = 0.0; bval = 0.0; while (1): duration3 = 0.0; count = 0; count1 = 0; list_node1 = list_node; counter = 0; duration = cv2.gettickcount( ); for i in range(0, nl): r_ptr = orig_frame[i] b_ptr = bin_frame[i] for j in range(0, nc): sum = 0.0; sum1 = 0.0; close = false; background = 0; rval = r_ptr[0][0]; gval = r_ptr[0][0]; bval = r_ptr[0][0]; start = list_node1[counter].pixel_s; rear = list_node1[counter].pixel_r; ptr = start; temp_ptr = none; if (list_node1[counter].no_of_component > 4): delete_gaussian(rear); list_node1[counter].no_of_component = list_node1[counter].no_of_component - 1; for k in range(0, list_node1[counter].no_of_component): weight = list_node1[counter].weight; mult = alpha / weight; weight = weight * alpha_bar + prune; if (close == false): mur = ptr.mean[0]; mug = ptr.mean[1]; mub = ptr.mean[2]; dr = rval - mur; dg = gval - mug; db = bval - mub; var = ptr.covariance; mal_dist = (dr * dr + dg * dg + db * db); if ((sum < cfbar) and (mal_dist < 16.0 * var * var)): background = 255; if (mal_dist < (9.0 * var * var)): weight = weight + alpha; if mult < 20.0 * alpha: mult = mult; else: mult = 20.0 * alpha; close = true; ptr.mean[0] = mur + mult * dr; ptr.mean[1] = mug + mult * dg; ptr.mean[2] = mub + mult * db; temp_cov = var + mult * (mal_dist - var); if temp_cov < 5.0: ptr.covariance = 5.0 else: if (temp_cov > 20.0): ptr.covariance = 20.0 else: ptr.covariance = temp_cov; temp_ptr = ptr; if (weight < -prune): ptr = delete_gaussian(ptr); weight = 0; list_node1[counter].no_of_component = list_node1[counter].no_of_component - 1; else: sum += weight; ptr.weight = weight; ptr = ptr.next; if (close == false): ptr = gaussian( ); ptr.weight = alpha; ptr.mean[0] = rval; ptr.mean[1] = gval; ptr.mean[2] = bval; ptr.covariance = covariance0; ptr.next = none; ptr.previous = none; insert_end_gaussian(ptr); list_gaussian.append(ptr); temp_ptr = ptr; list_node1[counter].no_of_components = list_node1[counter].no_of_components + 1; ptr = start; while (ptr != none): ptr.weight = ptr.weight / sum; ptr = ptr.next; while (temp_ptr != none and temp_ptr.previous != none): if (temp_ptr.weight <= temp_ptr.previous.weight): break; else: next = temp_ptr.next; previous = temp_ptr.previous; if (start == previous): start = temp_ptr; previous.next = next; temp_ptr.previous = previous.previous; temp_ptr.next = previous; if (previous.previous != none): previous.previous.next = temp_ptr; if (next != none): next.previous = previous; else: rear = previous; previous.previous = temp_ptr; temp_ptr = temp_ptr.previous; list_node1[counter].pixel_s = start; list_node1[counter].pixel_r = rear; counter = counter + 1; capture.release() cv2.destroyallwindows()
createbackgroundsubtractormog2
- 背景减法 (bs) 是一种常用且广泛使用的技术,用于通过使用静态相机生成前景蒙版(即,包含属于场景中运动物体的像素的二值图像)。
- 顾名思义,bs 计算前景蒙版,在当前帧和背景模型之间执行减法运算,其中包含场景的静态部分,或者更一般地说,根据观察到的场景的特征,可以将所有内容视为背景。
背景建模包括两个主要步骤:
- 后台初始化;
- 背景更新。
在第一步中,计算背景的初始模型,而在第二步中,更新该模型以适应场景中可能的变化。
import cv2 #构造videocapture对象 cap = cv2.videocapture('1.mp4') # 创建一个背景分割器 # createbackgroundsubtractormog2()函数里,可以指定detectshadows的值 # detectshadows=true,表示检测阴影,反之不检测阴影。默认是true fgbg = cv2.createbackgroundsubtractormog2() while true : ret, frame = cap.read() # 读取视频 fgmask = fgbg.apply(frame) # 背景分割 cv2.imshow('frame', fgmask) # 显示分割结果 if cv2.waitkey(100) & 0xff == ord('q'): break cap.release() cv2.destroyallwindows()
小结
点赞评论走起来,瓷们!!!
到此这篇关于python人工智能之混合高斯模型运动目标检测详解分析的文章就介绍到这了,更多相关python 高斯模型运动目标检测内容请搜索以前的文章或继续浏览下面的相关文章希望大家以后多多支持!
上一篇: 前端开发者必备的Nginx知识
下一篇: mongoose模糊查询