본문 바로가기
개발

파이썬 opencv를 이용한 multi tracking 예제 (Blob, 객체 감지)

by 화악 2022. 5. 26.
반응형

이번에는 꽤나 긴 코드로 파이썬으로 객체감지를 하고 multi tracking 고속도로 자동차를 인식후 line을 넘어가면 카운트하는 기초부분의 blob tracking의 예제를 소개하려고 합니다. 사실 이부분은 파이썬을 처음 사용하시거나 입문 단계이면 좀 어려운 코드일 수 있습니다. C++로 작성된 blob tracking github 소스를  multi tracking을 위해 제가 python으로 변환한 예제코드입니다. 이해보다는 흐름을 보고 직접 테스트해보고 여러방향으로 변환해 사용하시기 바랍니다.

 

 

 

코드


 

blob class 

import cv2
import sys
import random
import math
# 동영상 불러오기
cap = cv2.VideoCapture('test44.mp4')
blobs= []

firstFrame= True

SCALAR_BLACK = (0,0,0)
SCALAR_WHITE = (255,255,255)
SCALAR_BLUE = (255,0,0)
SCALAR_GREEN = (0,200,0)
SCALAR_RED = (0,0,255)

class blob:

    def __init__(self, contour):

        global centerPosition #blob의 center x,y좌표
        global centerPositions
        global boundingRect #사격형 표현 x,y,w,h 의 멤버변수가짐
        global nextPosition
        global diagonal   #사각형의 기울기
        global aspectRatio # 사각형의 w,h 비율
        global blobTracked # blob이 Traking되고 있는지 판단하는 bool
        global matchNewBlob
        global numMatch
        global Contour
        global cx, cy
        global frameBlob
        global area

        self.centerPositions = []
        self.centerPosition = []
        self.boundingRect = []
        self.predictedNextPosition=[]
        self.Contour = []
        x,y,w,h = cv2.boundingRect(contour)
        self.boundingRect=[x,y,w,h]
        cx=(2*x+w)/2
        cy=(2*y+h)/2
        self.centerPosition = [cx, cy]
        self.diagonal = math.sqrt(w * w + h * h)
        self.aspectRatio = (w / (h * 1.0))
        self.blobTracked = True
        self.matchNewBlob = True
        self.numMatch = 0
        self.centerPositions.append(self.centerPosition)
        self.area = w*h

    def predictNextPosition(self):

            numPositions = len(self.centerPositions)

            if (numPositions == 1):
                self.predictedNextPosition = [self.centerPositions[-1][-2], self.centerPositions[-1][-1]]
            if (numPositions >= 2):
                deltaX = self.centerPositions[1][0] - self.centerPositions[0][0]
                deltaY = self.centerPositions[1][1] - self.centerPositions[0][1]
                self.predictedNextPosition = [self.centerPositions[-1][-2] + deltaX,
                                              self.centerPositions[-1][-1] + deltaY]
            if (numPositions == 3):
                sumOfXChanges = ((self.centerPositions[2][0] - self.centerPositions[1][0]) * 2) + (
                (self.centerPositions[1][0] - self.centerPositions[0][0]) * 1)
                deltaX = (sumOfXChanges / 3)
                sumOfYChanges = ((self.centerPositions[2][1] - self.centerPositions[1][1]) * 2) + (
                (self.centerPositions[1][1] - self.centerPositions[0][1]) * 1)
                deltaY = (sumOfYChanges / 3)
                self.predictedNextPosition = [self.centerPositions[-1][-2] + deltaX,
                                              self.centerPositions[-1][-1] + deltaY]
            if (numPositions == 4):
                sumOfXChanges = ((self.centerPositions[3][0] - self.centerPositions[2][0]) * 3) + (
                (self.centerPositions[2][0] - self.centerPositions[1][0]) * 2) + (
                                (self.centerPositions[1][0] - self.centerPositions[0][0]) * 1)
                deltaX = (sumOfXChanges / 6)
                sumOfYChanges = ((self.centerPositions[3][1] - self.centerPositions[2][1]) * 3) + (
                (self.centerPositions[2][1] - self.centerPositions[1][1]) * 2) + (
                                (self.centerPositions[1][1] - self.centerPositions[0][1]) * 1)
                deltaY = (sumOfYChanges / 6)
                self.predictedNextPosition = [self.centerPositions[-1][-2] + deltaX,
                                              self.centerPositions[-1][-1] + deltaY]
            if (numPositions >= 5):
                sumOfXChanges = ((self.centerPositions[numPositions - 1][0] - self.centerPositions[numPositions - 2][
                    0]) * 4) + ((self.centerPositions[numPositions - 2][0] - self.centerPositions[numPositions - 3][
                    0]) * 3) + ((self.centerPositions[numPositions - 3][0] - self.centerPositions[numPositions - 4][
                    0]) * 2) + ((self.centerPositions[numPositions - 4][0] - self.centerPositions[numPositions - 5][
                    0]) * 1)
                sumOfYChanges = ((self.centerPositions[numPositions - 1][1] - self.centerPositions[numPositions - 2][
                    1]) * 4) + ((self.centerPositions[numPositions - 2][1] - self.centerPositions[numPositions - 3][
                    1]) * 3) + ((self.centerPositions[numPositions - 3][1] - self.centerPositions[numPositions - 4][
                    1]) * 2) + ((self.centerPositions[numPositions - 4][1] - self.centerPositions[numPositions - 5][
                    1]) * 1)
                deltaX = (sumOfXChanges / 10)
                deltaY = (sumOfYChanges / 10)
                self.predictedNextPosition = [self.centerPositions[-1][-2] + deltaX,
                                              self.centerPositions[-1][-1] + deltaY]

먼저 객체를 정의하는 class 입니다. blob을 정의하고 다음 포지션을 계산하는 함수를 class 내부에 선언합니다.

 

blob tracking 함수들

 

def matchCurrentFrameBlobsToExistingBlobs(blobs,currentFrameBlobs):

    for existingBlob in blobs:
        existingBlob.matchNewBlob = False
        existingBlob.predictNextPosition()
    for currentFrameBlob in currentFrameBlobs:
        intIndexOfLeastDistance = 0
        dblLeastDistance = 1000.0
        for i in range(len(blobs)):
            if (blobs[i].blobTracked == True):
                dblDistance=distanceBetweenPoints(currentFrameBlob.centerPositions[-1],blobs[i].predictedNextPosition) #현재 blob의 포지션과 예측된 blob의 다음 포지션의 거리 측정
                if (dblDistance < dblLeastDistance):
                    dblLeastDistance = dblDistance
                    intIndexOfLeastDistance = i
        if (dblLeastDistance < currentFrameBlob.diagonal * 0.5):
            blobs=addBlobToExistingBlobs(currentFrameBlob, blobs, intIndexOfLeastDistance)
        else:
            blobs,currentFrameBlob=addNewBlob(currentFrameBlob, blobs)
    for existingBlob in blobs:
        if (existingBlob.matchNewBlob == False): #blob이 이동한 거리와 blob의 BoundingRect diagonal 크기 비교 후
            existingBlob.numMatch = existingBlob.numMatch + 1
        if (existingBlob.numMatch >=10):
            existingBlob.blobTracked =False
    return blobs

현재 프레임의 blob과 이전 존재했던 blob 간의 match 후 blob정보를 갱신,추가 하는 함수입니다. multi tracking 이므로 여러 객체를 인식하기에 가장 중요한 함수입니다. 

 

 

def distanceBetweenPoints(pos1,pos2):

    if (pos2==[]):
        dblDistance=math.sqrt((pos1[0])**2+(pos1[1])**2)
    else:
        dblDistance=math.sqrt((pos2[0]-pos1[0])**2+(pos2[1]-pos1[1])**2)
    return dblDistance

 

각 객체들 사이의 거리 계산을 위한 함수입니다. math라이브러리의 sqrt로 객체간의 거리를 계산해 retrun 해줍니다.

 

 

def addBlobToExistingBlobs(currentFrameBlob, blobs, intIndex):

    blobs[intIndex].Contour = currentFrameBlob.Contour
    blobs[intIndex].boundingRect = currentFrameBlob.boundingRect
    blobs[intIndex].centerPositions.append(currentFrameBlob.centerPositions[-1])
    blobs[intIndex].diagonal = currentFrameBlob.diagonal
    blobs[intIndex].aspectRatio = currentFrameBlob.aspectRatio
    blobs[intIndex].blobTracked = True
    blobs[intIndex].matchNewBlob = True

    return blobs

이미 존재했던 blob의 정보 프레임당 blob정보 추가 하는 함수입니다. index로 새로운 정보를 갱신합니다.

 

def addNewBlob(currentFrameBlob,Blobs):

    currentFrameBlob.matchNewBlob = True
    blobs.append(currentFrameBlob)
    return blobs,currentFrameBlob

 

새로운 blob frameblob -> blob으로 정보 갱신하는 함수

 

def drawBlobInfoOnImage(blobs,m1):

    for i in range(len(blobs)):
        if (blobs[i].blobTracked == True):

            x,y,w,h = blobs[i].boundingRect
            cv2.rectangle(m1,(x, y), (x + w, y + h), SCALAR_BLUE, 2)

인식된 blob frame 영상에 그려주는 함수

 

 

def checkIfBlobsCrossedTheLine(blobs,linePosition,linePosition2,mortarCount):

    passLinea=False
    passLinea2 = False
    global hh

    for blob in blobs:
        if (blob.blobTracked == True and len(blob.centerPositions) >= 2):

            prevFrameIndex= len(blob.centerPositions) - 2
            currFrameIndex= len(blob.centerPositions) - 1

            if (passLinea==False and blob.centerPositions[prevFrameIndex][-2] <= linePosition and blob.centerPositions[currFrameIndex][-2] > linePosition) :

                cv2.imwrite("TILE" +str(hh)+".jpg", imgPre)

                hh= hh+1
                passLinea = True
                if (blob.boundingRect[2] > 400):
                    if (passLinea2 == False and blob.centerPositions[prevFrameIndex][-2] <= linePosition2 and
                                blob.centerPositions[currFrameIndex][-2] > linePosition2):
                        passLinea2 = True
                        cv2.imwrite("2CountABefore" + ".jpg", imgPre)
            else:
                passLinea =False
                passLinea2=False


    return passLinea,passLinea2

이전 프레임과 현재프레임 blob의 정보를 비교해 linePosition을 넘은 blob만 카운팅 하는 함수입니다. 예제로 따지면 자동차 수를 count 하는 함수가 되겠네요

 

 

while True:


    _, imgP = cap.read()
    _, preimgP = cap.read()

    _ , imgReset =cap.read()

    #imgP= cv2.flip(imgP,3)
    #preimgP=cv2.flip(preimgP,3)

    imgPResize = cv2.resize(imgP, (1000, 1000))
    preimgPResize = cv2.resize(preimgP, (1000, 1000))


    imgPGray = cv2.cvtColor(imgPResize, cv2.COLOR_BGR2GRAY)
    imgPPGray = cv2.cvtColor(preimgPResize, cv2.COLOR_BGR2GRAY)

    imgPGaussian = cv2.GaussianBlur(imgPGray, (5, 5), 0)
    imgPPGaussian = cv2.GaussianBlur(imgPPGray, (5, 5), 0)

    imgPdiff = cv2.absdiff(imgPGaussian, imgPPGaussian)

    _, imgPBinary = cv2.threshold(imgPdiff, 35, 255, cv2.THRESH_BINARY)

    mask = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 5))

    for i in range(0, 5):
        imgPBinary=cv2.dilate(imgPBinary,mask,iterations = 1)
        imgPBinary=cv2.dilate(imgPBinary,mask,iterations = 1)
        imgPBinary=cv2.erode(imgPBinary,mask, iterations = 1)

    imageC, contoursP, hierarchy = cv2.findContours(imgPBinary, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)  # 윤곽선 따기

    imgContour = cv2.drawContours(imgPBinary, contoursP, -1, SCALAR_WHITE, -1)

    for cnt in contoursP:

        x,y,w,h=cv2.boundingRect(cnt)
        #cv2.rectangle(imgPResize,(x,y),(x+w,y+h),(0,255,0),2)

    global convexHulls
    convexHulls = []

    for i in range(len(contoursP)):
        convexHull = cv2.convexHull(contoursP[i])
        convexHulls.append(convexHull)

    frameBlob = []
    for i in range(len(convexHulls)):
        checkBlob = blob(convexHulls[i])

        if ( checkBlob.boundingRect[2] > 20 and checkBlob.boundingRect[3]> 30 and
                    checkBlob.boundingRect[3] <200 ) :
            #print(checkBlob.boundingRect[2])
            frameBlob.append(checkBlob)




    if (firstFrame == True):
        for i in frameBlob:
            blobs.append(i)
    else:
        blobs = matchCurrentFrameBlobsToExistingBlobs(blobs, frameBlob)

    drawBlobInfoOnImage(blobs, imgPResize)



    firstFrame=False



    imgPContour = cv2.drawContours(imgPBinary, contoursP, -1, (255, 255, 255), -1)



    cv2.imshow("a",imgContour)
    cv2.imshow("b",imgPResize)




    frameBlob.clear()


    cv2.waitKey(10)

프로젝트의 메인부분입니다. absDiff 차영상을 통해 움직이는 객체를 인식후 blob class로 정의 한 뒤 blob의 크기를 정해 어떤 크기의 객체를 인식할지 정해주고 인식된 blob을 multi traking하며 네모박스를 그려주는 코드입니다.

 

import cv2
import sys
import random
import math
# 동영상 불러오기
cap = cv2.VideoCapture('test44.mp4')
blobs= []

firstFrame= True

SCALAR_BLACK = (0,0,0)
SCALAR_WHITE = (255,255,255)
SCALAR_BLUE = (255,0,0)
SCALAR_GREEN = (0,200,0)
SCALAR_RED = (0,0,255)

class blob:

    def __init__(self, contour):

        global centerPosition #blob의 center x,y좌표
        global centerPositions
        global boundingRect #사격형 표현 x,y,w,h 의 멤버변수가짐
        global nextPosition
        global diagonal   #사각형의 기울기
        global aspectRatio # 사각형의 w,h 비율
        global blobTracked # blob이 Traking되고 있는지 판단하는 bool
        global matchNewBlob
        global numMatch
        global Contour
        global cx, cy
        global frameBlob
        global area

        self.centerPositions = []
        self.centerPosition = []
        self.boundingRect = []
        self.predictedNextPosition=[]
        self.Contour = []
        x,y,w,h = cv2.boundingRect(contour)
        self.boundingRect=[x,y,w,h]
        cx=(2*x+w)/2
        cy=(2*y+h)/2
        self.centerPosition = [cx, cy]
        self.diagonal = math.sqrt(w * w + h * h)
        self.aspectRatio = (w / (h * 1.0))
        self.blobTracked = True
        self.matchNewBlob = True
        self.numMatch = 0
        self.centerPositions.append(self.centerPosition)
        self.area = w*h

    def predictNextPosition(self):

            numPositions = len(self.centerPositions)

            if (numPositions == 1):
                self.predictedNextPosition = [self.centerPositions[-1][-2], self.centerPositions[-1][-1]]
            if (numPositions >= 2):
                deltaX = self.centerPositions[1][0] - self.centerPositions[0][0]
                deltaY = self.centerPositions[1][1] - self.centerPositions[0][1]
                self.predictedNextPosition = [self.centerPositions[-1][-2] + deltaX,
                                              self.centerPositions[-1][-1] + deltaY]
            if (numPositions == 3):
                sumOfXChanges = ((self.centerPositions[2][0] - self.centerPositions[1][0]) * 2) + (
                (self.centerPositions[1][0] - self.centerPositions[0][0]) * 1)
                deltaX = (sumOfXChanges / 3)
                sumOfYChanges = ((self.centerPositions[2][1] - self.centerPositions[1][1]) * 2) + (
                (self.centerPositions[1][1] - self.centerPositions[0][1]) * 1)
                deltaY = (sumOfYChanges / 3)
                self.predictedNextPosition = [self.centerPositions[-1][-2] + deltaX,
                                              self.centerPositions[-1][-1] + deltaY]
            if (numPositions == 4):
                sumOfXChanges = ((self.centerPositions[3][0] - self.centerPositions[2][0]) * 3) + (
                (self.centerPositions[2][0] - self.centerPositions[1][0]) * 2) + (
                                (self.centerPositions[1][0] - self.centerPositions[0][0]) * 1)
                deltaX = (sumOfXChanges / 6)
                sumOfYChanges = ((self.centerPositions[3][1] - self.centerPositions[2][1]) * 3) + (
                (self.centerPositions[2][1] - self.centerPositions[1][1]) * 2) + (
                                (self.centerPositions[1][1] - self.centerPositions[0][1]) * 1)
                deltaY = (sumOfYChanges / 6)
                self.predictedNextPosition = [self.centerPositions[-1][-2] + deltaX,
                                              self.centerPositions[-1][-1] + deltaY]
            if (numPositions >= 5):
                sumOfXChanges = ((self.centerPositions[numPositions - 1][0] - self.centerPositions[numPositions - 2][
                    0]) * 4) + ((self.centerPositions[numPositions - 2][0] - self.centerPositions[numPositions - 3][
                    0]) * 3) + ((self.centerPositions[numPositions - 3][0] - self.centerPositions[numPositions - 4][
                    0]) * 2) + ((self.centerPositions[numPositions - 4][0] - self.centerPositions[numPositions - 5][
                    0]) * 1)
                sumOfYChanges = ((self.centerPositions[numPositions - 1][1] - self.centerPositions[numPositions - 2][
                    1]) * 4) + ((self.centerPositions[numPositions - 2][1] - self.centerPositions[numPositions - 3][
                    1]) * 3) + ((self.centerPositions[numPositions - 3][1] - self.centerPositions[numPositions - 4][
                    1]) * 2) + ((self.centerPositions[numPositions - 4][1] - self.centerPositions[numPositions - 5][
                    1]) * 1)
                deltaX = (sumOfXChanges / 10)
                deltaY = (sumOfYChanges / 10)
                self.predictedNextPosition = [self.centerPositions[-1][-2] + deltaX,
                                              self.centerPositions[-1][-1] + deltaY]

def drawBlobInfoOnImage(blobs,m1):

    for i in range(len(blobs)):
        if (blobs[i].blobTracked == True):

            x,y,w,h = blobs[i].boundingRect
            cv2.rectangle(m1,(x, y), (x + w, y + h), (255,0,0), 2)


def distanceBetweenPoints(pos1,pos2):

    if (pos2==[]):
        dblDistance=math.sqrt((pos1[0])**2+(pos1[1])**2)
    else:
        dblDistance=math.sqrt((pos2[0]-pos1[0])**2+(pos2[1]-pos1[1])**2)
    return dblDistance

def matchCurrentFrameBlobsToExistingBlobs(blobs,currentFrameBlobs):

    for existingBlob in blobs:
        existingBlob.matchNewBlob = False
        existingBlob.predictNextPosition()
    for currentFrameBlob in currentFrameBlobs:
        intIndexOfLeastDistance = 0
        dblLeastDistance = 1000.0
        for i in range(len(blobs)):
            if (blobs[i].blobTracked == True):
                dblDistance=distanceBetweenPoints(currentFrameBlob.centerPositions[-1],blobs[i].predictedNextPosition) #현재 blob의 포지션과 예측된 blob의 다음 포지션의 거리 측정
                if (dblDistance < dblLeastDistance):
                    dblLeastDistance = dblDistance
                    intIndexOfLeastDistance = i
        if (dblLeastDistance < currentFrameBlob.diagonal * 0.5):
            blobs=addBlobToExistingBlobs(currentFrameBlob, blobs, intIndexOfLeastDistance)
        else:
            blobs,currentFrameBlob=addNewBlob(currentFrameBlob, blobs)
    for existingBlob in blobs:
        if (existingBlob.matchNewBlob == False): #blob이 이동한 거리와 blob의 BoundingRect diagonal 크기 비교 후
            existingBlob.numMatch = existingBlob.numMatch + 1
        if (existingBlob.numMatch >=10):
            existingBlob.blobTracked =False
    return blobs

def addBlobToExistingBlobs(currentFrameBlob, blobs, intIndex):

    blobs[intIndex].Contour = currentFrameBlob.Contour
    blobs[intIndex].boundingRect = currentFrameBlob.boundingRect
    blobs[intIndex].centerPositions.append(currentFrameBlob.centerPositions[-1])
    blobs[intIndex].diagonal = currentFrameBlob.diagonal
    blobs[intIndex].aspectRatio = currentFrameBlob.aspectRatio
    blobs[intIndex].blobTracked = True
    blobs[intIndex].matchNewBlob = True

    return blobs

def addNewBlob(currentFrameBlob,Blobs):

    currentFrameBlob.matchNewBlob = True
    blobs.append(currentFrameBlob)
    return blobs,currentFrameBlob

while True:


    _, imgP = cap.read()
    _, preimgP = cap.read()

    _ , imgReset =cap.read()

    #imgP= cv2.flip(imgP,3)
    #preimgP=cv2.flip(preimgP,3)

    imgPResize = cv2.resize(imgP, (1000, 1000))
    preimgPResize = cv2.resize(preimgP, (1000, 1000))


    imgPGray = cv2.cvtColor(imgPResize, cv2.COLOR_BGR2GRAY)
    imgPPGray = cv2.cvtColor(preimgPResize, cv2.COLOR_BGR2GRAY)

    imgPGaussian = cv2.GaussianBlur(imgPGray, (5, 5), 0)
    imgPPGaussian = cv2.GaussianBlur(imgPPGray, (5, 5), 0)

    imgPdiff = cv2.absdiff(imgPGaussian, imgPPGaussian)

    _, imgPBinary = cv2.threshold(imgPdiff, 35, 255, cv2.THRESH_BINARY)

    mask = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 5))

    for i in range(0, 5):
        imgPBinary=cv2.dilate(imgPBinary,mask,iterations = 1)
        imgPBinary=cv2.dilate(imgPBinary,mask,iterations = 1)
        imgPBinary=cv2.erode(imgPBinary,mask, iterations = 1)

    imageC, contoursP, hierarchy = cv2.findContours(imgPBinary, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)  # 윤곽선 따기

    imgContour = cv2.drawContours(imgPBinary, contoursP, -1, SCALAR_WHITE, -1)

    for cnt in contoursP:

        x,y,w,h=cv2.boundingRect(cnt)
        #cv2.rectangle(imgPResize,(x,y),(x+w,y+h),(0,255,0),2)

    global convexHulls
    convexHulls = []

    for i in range(len(contoursP)):
        convexHull = cv2.convexHull(contoursP[i])
        convexHulls.append(convexHull)

    frameBlob = []
    for i in range(len(convexHulls)):
        checkBlob = blob(convexHulls[i])

        if ( checkBlob.boundingRect[2] > 20 and checkBlob.boundingRect[3]> 30 and
                    checkBlob.boundingRect[3] <200 ) :
            #print(checkBlob.boundingRect[2])
            frameBlob.append(checkBlob)




    if (firstFrame == True):
        for i in frameBlob:
            blobs.append(i)
    else:
        blobs = matchCurrentFrameBlobsToExistingBlobs(blobs, frameBlob)

    drawBlobInfoOnImage(blobs, imgPResize)



    firstFrame=False



    imgPContour = cv2.drawContours(imgPBinary, contoursP, -1, (255, 255, 255), -1)



    cv2.imshow("a",imgContour)
    cv2.imshow("b",imgPResize)




    frameBlob.clear()


    cv2.waitKey(10)

 

전체 코드입니다.

 

multi tracking 예제코드 어디에 사용할까


 

정말 여러가지형식으로 변환하여 사용할 수 있습니다. 보행자 tracking , 고속도로 차량 파악, 컨베이어벨트를 타는 제품 count등 여러방면으로 쓰일 수 있으니 변형시켜 사용하시길 바랍니다. 후에 차영상 blur등의 opencv 함수 사용은 다른 포스팅에서 자세히 다뤄 보도록하겠습니다. 

반응형

댓글