OpenCV提供了一个称为BackgroundSubtractor的类,包括:KNN、MOG2、GMG
1.MOG2背景分割器
import numpy as np
import cv2
cap = cv2.VideoCapture(0)
mog = cv2.createBackgroundSubtractorMOG2()
while(1):
ret, frame = cap.read()
fgmask = mog.apply(frame)
cv2.imshow('frame', fgmask)
cv2.waitKey(30)
# if cv2.waitKey(30) & 0xff:
# break
cap.release()
cv2.destroyAllWindows()
2.KNN分割器目标检测
import cv2
import numpy as np
knn = cv2.createBackgroundSubtractorKNN(detectShadows = True)
es = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (20,12))
camera = cv2.VideoCapture("movie.mpg")
def drawCnt(fn, cnt):
if cv2.contourArea(cnt) > 1400:
(x, y, w, h) = cv2.boundingRect(cnt)
cv2.rectangle(fn, (x, y), (x + w, y + h), (255, 255, 0), 2)
while True:
ret, frame = camera.read()
if not ret:
break
fg = knn.apply(frame.copy())
fg_bgr = cv2.cvtColor(fg, cv2.COLOR_GRAY2BGR)
bw_and = cv2.bitwise_and(fg_bgr, frame)
draw = cv2.cvtColor(bw_and, cv2.COLOR_BGR2GRAY)
draw = cv2.GaussianBlur(draw, (21, 21), 0)
draw = cv2.threshold(draw, 10, 255, cv2.THRESH_BINARY)[1]
draw = cv2.dilate(draw, es, iterations = 2)
image, contours, hierarchy = cv2.findContours(draw.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
for c in contours:
drawCnt(frame, c)
cv2.imshow("motion detection", frame)
if cv2.waitKey(int(1000 / 12)) & 0xff == ord("q"):
break
camera.release()
cv2.destroyAllWindows()
效果图:
3.均值漂移(Meanshift)和CAMShift
Meanshift是一种目标跟踪算法,该算法寻找概率函数离散样本的最大密度,并且重新计算在下一帧中的最大密度,该算法给出了目标的移动方向。
import numpy as np
import cv2
cap = cv2.VideoCapture(0)
# capture the first frame
ret,frame = cap.read()
# mark the ROI
r,h,c,w = 10, 200, 10, 200
# wrap in a tuple
track_window = (c,r,w,h)
# extract the ROI for tracking
roi = frame[r:r+h, c:c+w]
# switch to HSV
hsv_roi = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
# create a mask with upper and lower boundaries of colors you want to track
mask = cv2.inRange(hsv_roi, np.array((100., 30.,32.)), np.array((180.,120.,255.)))
# calculate histograms of roi
roi_hist = cv2.calcHist([hsv_roi],[0],mask,[180],[0,180])
cv2.normalize(roi_hist,roi_hist,0,255,cv2.NORM_MINMAX)
# Setup the termination criteria, either 10 iteration or move by atleast 1 pt
term_crit = ( cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1 )
while(1):
ret ,frame = cap.read()
if ret == True:
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
dst = cv2.calcBackProject([hsv],[0],roi_hist,[0,180],1)
print(dst)
# apply meanshift to get the new location
ret, track_window = cv2.meanShift(dst, track_window, term_crit)
# Draw it on image
x,y,w,h = track_window
img2 = cv2.rectangle(frame, (x,y), (x+w,y+h), 255,2)
cv2.imshow('img2',img2)
k = cv2.waitKey(60) & 0xff
if k == 27:
break
else:
break
cv2.destroyAllWindows()
cap.release()
CAMShift算法是对meanshift算法的改进版本:
由于meanshift算法窗口的阿晓并不能与被跟踪帧中的目标大小一起变化,所以Gary Bradski在1988年发表了一篇论文来提高均值漂移的精度,并称为 连续自适应均值漂移(continuously Adaptive Meanshift, CAMShift)算法。该算法与均值漂移算法相似,但是当均值漂移收敛时会调节跟踪窗口尺寸。根据具体旋转来绘制矩阵,这种旋转会与被跟踪对象一起旋转。
import numpy as np
import cv2
cap = cv2.VideoCapture(0)
# take first frame of the video
ret,frame = cap.read()
# setup initial location of window
r,h,c,w = 300,200,400,300 # simply hardcoded the values
track_window = (c,r,w,h)
roi = frame[r:r+h, c:c+w]
hsv_roi = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(hsv_roi, np.array((100., 30.,32.)), np.array((180.,120.,255.)))
roi_hist = cv2.calcHist([hsv_roi],[0],mask,[180],[0,180])
cv2.normalize(roi_hist,roi_hist,0,255,cv2.NORM_MINMAX)
term_crit = ( cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1 )
while(1):
ret ,frame = cap.read()
if ret == True:
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
dst = cv2.calcBackProject([hsv],[0],roi_hist,[0,180],1)
ret, track_window = cv2.CamShift(dst, track_window, term_crit)
pts = cv2.boxPoints(ret)
pts = np.int0(pts)
img2 = cv2.polylines(frame,[pts],True, 255,2)
cv2.imshow('img2',img2)
k = cv2.waitKey(60) & 0xff
if k == 27:
break
else:
break
cv2.destroyAllWindows()
cap.release()