파이썬+OPENCV로 객체 추적을 확인해 보기 위해서 자료를 찾아보았다.
- 기본적으로 MeanShift를 사용해서 물체를 추적하는것으로 보인다.(CamShift로 개선하여 사용한다)
import cv2
import numpy as np
capture = cv2.VideoCapture("TestVideo.mp4")
histogram = None
terminal = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 15, 0.5)
while capture.isOpened():
ret, frame = capture.read()
if not ret:
break
draw = frame.copy()
if histogram is not None:
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
dst = cv2.calcBackProject([hsv], [0], histogram, [0,180], 1)
ret, (x,y,w,h) = cv2.CamShift(dst, (x,y,w,h), terminal)
cv2.rectangle(draw,(x,y), (x+w, y+h), (0,255,0), 2)
result = np.hstack((draw, cv2.cvtColor(dst, cv2.COLOR_GRAY2BGR)))
else :
cv2.putText(draw, "Target", (10,30), cv2.FONT_HERSHEY_SIMPLEX, 1, (0,0,255), 1, cv2.LINE_AA)
result = draw
cv2.imshow("CamShift Tracking", result)
key = cv2.waitKey(10) & 0xff
if key == 27:
break
elif key == ord(' '):
x,y,w,h = cv2.selectROI("CamShift Tracking", frame, False)
if w and h :
roi = frame[y:y+h, x:x+w]
roi = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)
histogram = cv2.calcHist([roi], [0], None, [180], [0,180])
cv2.normalize(histogram, histogram, 0, 255, cv2.NORM_MINMAX)
else:
histogram = None
capture.release()
cv2.destroyAllWindows()
import cv2
import numpy as np
roi = None
drag_start = None
mouse_status = 0
tracking_start = False
def onMouse(event, x, y, flags, param = None):
global roi
global drag_start
global mouse_status
global tracking_start
if event == cv2.EVENT_LBUTTONDOWN:
drag_start = (x, y)
roi = (0, 0, 0, 0) # ROI를 재설정하는 경우를 위한 초기화
tracking_start = False
elif event == cv2.EVENT_MOUSEMOVE:
if flags == cv2.EVENT_FLAG_LBUTTON:
xmin = min(x, drag_start[0])
ymin = min(y, drag_start[1])
xmax = max(x, drag_start[0])
ymax = max(y, drag_start[1])
roi = (xmin, ymin, xmax, ymax)
mouse_status = 1
elif event == cv2.EVENT_LBUTTONUP:
mouse_status = 2
cv2.namedWindow('meanShift tracking')
cv2.setMouseCallback('meanShift tracking', onMouse)
cap = cv2.VideoCapture('TestVideo.mp4')
if(not cap.isOpened()):
print('Error opening video')
height, width = (int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)), int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)))
roi_mask = np.zeros((height, width), dtype = np.uint8)
term_crit = (cv2.TERM_CRITERIA_MAX_ITER + cv2.TERM_CRITERIA_EPS, 10, 1)
while True:
ret, meanframe = cap.read()
if not ret:
break
camframe = meanframe.copy()
hsv = cv2.cvtColor(meanframe, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(hsv, (0., 60., 32.), (180., 255., 255.))
# Checks if array elements lie between the elements of two other arrays.
if mouse_status == 1:
x1, y1, x2, y2 = roi
cv2.rectangle(meanframe, (x1, y1), (x2, y2), (255, 0, 0), 2)
if mouse_status == 2:
print('Initializing...', end = ' ')
mouse_status = 0
x1, y1, x2, y2 = roi
if (np.abs(x1 - x2) < 10) or (np.abs(y1 - y2) < 10):
print('failed. Too small ROI. (Width: %d, Height: %d)' %(np.abs(x1 - x2), np.abs(y1 - y2)))
continue
mask_roi = mask[y1:y2, x1:x2]
hsv_roi = hsv[y1:y2, x1:x2]
hist_roi = cv2.calcHist([hsv_roi], [0], mask_roi, [16], [0, 180])
cv2.normalize(hist_roi, hist_roi, 0, 255, cv2.NORM_MINMAX)
track_window1 = (x1, y1, x2 - x1, y2 - y1)
track_window2 = (x1, y1, x2 - x1, y2 - y1)
tracking_start = True
print('Done.')
if tracking_start:
backP = cv2.calcBackProject([hsv], [0], hist_roi, [0, 180], 1)
# Calculates the back projection of a histogram.
backP &= mask
cv2.imshow('backP', backP)
ret, track_window1 = cv2.meanShift(backP, track_window1, term_crit)
# Finds an object on a back projection image.
x, y, w, h = track_window1
cv2.rectangle(meanframe, (x, y), (x + w, y + h), (0, 0, 255), 2)
track_box, track_window2 = cv2.CamShift(backP, track_window2, term_crit)
# Finds an object center, size, and orientation.
x, y, w, h = track_window2
cv2.rectangle(camframe, (x, y), (x + w, y + h), (0, 255, 0), 2)
cv2.ellipse(camframe, track_box, (0, 255, 255), 2)
pts = cv2.boxPoints(track_box) # Finds the four vertices of a rotated rect.
pts = np.int0(pts)
dst = cv2.polylines(camframe, [pts], True, (0, 0, 255), 2)
cv2.imshow('meanShift tracking', meanframe)
cv2.imshow('CamShift tracking', camframe)
key = cv2.waitKey(25)
if key == 27:
break
if cap.isOpened():
cap.release()
cv2.destroyAllWindows()
찾은코드
gist.github.com/pknowledge/70ea4e0aa30f728eb2a7235edd41b99a
import numpy as np
import cv2 as cv
cap = cv.VideoCapture('TestVideo.mp4')
# take first frame of the video
ret, frame = cap.read()
# setup initial location of window
x, y, width, height = 880, 185, 50, 50
track_window = (x, y ,width, height)
# set up the ROI for tracking
roi = frame[y:y+height, x : x+width]
hsv_roi = cv.cvtColor(roi, cv.COLOR_BGR2HSV)
mask = cv.inRange(hsv_roi, np.array((0., 60., 32.)), np.array((180., 255., 255)))
roi_hist = cv.calcHist([hsv_roi], [0], mask, [180], [0, 180])
cv.normalize(roi_hist, roi_hist, 0, 255,cv.NORM_MINMAX)
# Setup the termination criteria, either 10 iteration or move by atleast 1 pt
term_crit = ( cv.TERM_CRITERIA_EPS | cv.TERM_CRITERIA_COUNT, 10, 1)
cv.imshow('roi',roi)
while(1):
ret, frame = cap.read()
if ret == True:
hsv = cv.cvtColor(frame, cv.COLOR_BGR2HSV)
dst = cv.calcBackProject([hsv], [0], roi_hist, [0, 180], 1)
# apply meanshift to get the new location
ret, track_window = cv.meanShift(dst, track_window, term_crit)
# Draw it on image
x,y,w,h = track_window
final_image = cv.rectangle(frame, (x,y), (x+w, y+h), 255, 3)
cv.imshow('dst', dst)
cv.imshow('final_image',final_image)
k = cv.waitKey(30) & 0xff
if k == 27:
break
else:
break
우선적으로 검색한 자료
deep-learning-study.tistory.com/275
MeanShift
히스토그램 역투영
'PROGRAM > OpenCV' 카테고리의 다른 글
OpenCV-JAVA #1 시작하기 (0) | 2022.05.27 |
---|---|
Contour 최대 최소값 찾기 (0) | 2021.04.02 |
pyQt5 + cv2.findContours (0) | 2020.06.11 |
Anaconda - (ModuleNotFoundError: No module named 'cv2') (1) | 2020.05.19 |
opencv python - TrackBar 사용하기(YCrCb) (0) | 2020.04.22 |