Python/Corner/tracker.py

#! /usr/bin/env python

"""
This script demonstrates detection and tracking of
Harris corners.

NOT COMPLETE
"""

import numpy as np
import cv2 as cv

# Load images and convert to greyscale
I1c = cv.imread("frame27.jpeg")
I1 = cv.cvtColor(I1c, cv.COLOR_BGR2GRAY)
I2c = cv.imread("frame28.jpeg")
I2 = cv.cvtColor(I2c, cv.COLOR_BGR2GRAY)

# Settings for the Sobel filter
scale = 1
delta = 0
ddepth = cv.CV_16S

# Calculate Derivatives
It = I2 - I1
Ix = cv.Sobel(I1, ddepth, 1, 0, ksize=3, scale=scale, delta=delta, borderType=cv.BORDER_DEFAULT)
Iy = cv.Sobel(I1, ddepth, 0, 1, ksize=3, scale=scale, delta=delta, borderType=cv.BORDER_DEFAULT)
Ix = cv.convertScaleAbs(Ix)
Iy = cv.convertScaleAbs(Iy)

# Debug Output (image file)
test = cv.addWeighted(Ix, 0.5, Iy, 0.5, 0)
cv.imwrite("sobeledges.jpeg",test)

# Harris Corners - dst is the Harris heuristic for each pixel
# TODO: Look up and tune parameters
dst = cv.cornerHarris(I1,2,3,0.04)

# Debug Output (image file)
harris = I1c.copy()
harris[dst>0.01*dst.max()]=[0,0,255]
cv.imwrite("harris.jpeg",harris)


def filterFP(l,r=36):
    """
    Given a list l=[(h,x,y)] of corners (x,y) with Harris strength h,
    return a new list of such points by removing points with squared
    distance less than r from a point with greater strength.
    """
    if l == []: return l
    else:
        (h0,x0,y0) = l[-1]
        l0 = [ (h,x,y) for (h,x,y) in l[:-1] if ((x-x0)**2+(y-y0)**2 > r) ]
        l1 = filterFP(l0,r)
        l1.append(l[-1])
        return l1

# Get sorted list of Harris Corners with strength
fpt = dst>0.01*dst.max()
fpt = np.transpose(fpt.nonzero())
fplist = [ (dst[x,y,],x,y) for (x,y) in list(fpt) ]
fplist.sort()
fplist = filterFP(fplist)

# Debug Output (image file)
harris = I1c.copy()
fpl = [ (x,y) for (h,x,y) in fplist ]
for i in fpl:
   harris[i]=[0,0,255]
cv.imwrite("harris-filtered.jpeg",harris)

# Element-wise products
Ixx = Ix*Ix
Ixy = Ix*Iy
Iyy = Iy*Iy
Ixt = Ix*It
Iyt = Iy*It

# Good to check if the shapes are the same here.  It depends on boundary conditions when filtering.
print(I1.shape)
print(It.shape)
print(Ix.shape)
print(Ixt.shape)
print(Ixx.shape)

# Calculate the G matrix and b vector for Algoritm 4.1
# - we consider only the very strongest feature point
(h,x,y) = fplist[-1]
print( f"({x},{y}) has strength {h}" )

# - make the window
wsize = 5
ws = int(np.floor(wsize/2))
mask = np.zeros((I1.shape),dtype=bool)
mask[x-ws:x+ws+1,y-ws:y+ws+1] = True

# - calculate G and b
G = np.array( [ [ Ixx[mask].sum(), Ixy[mask].sum() ], 
                [ Ixy[mask].sum(), Iyy[mask].sum() ] ] )
b = np.array( [ Ixt[mask].sum(), Iyt[mask].sum() ] ) 

print( f"G = {G}" )
print( f"b = {b}" )

# Motion estimate in best feature
Ginv = np.linalg.inv(G)
u = - Ginv @ b.transpose()
print( f"G inverse {Ginv}" )
print( f"Motion vector u is {u}" )

# TODO: validate

# Draw motion vector
pos0 = np.array([x,y],dtype=int)
pos1 = np.round(pos0 + 25*u).astype(int)
print ( f"{pos0} -> {pos1}" )
colour = (0, 255, 0)
thickness = 1
# NOTE: OpenCV writes the horizontal co-ordinate first
test = cv.arrowedLine(harris, (y,x), (pos1[1],pos1[0]), colour, thickness)
test[x,y] = (0,0,255)
cv.imwrite("motion.jpeg",test)