How to I compute matching features between high resolution images?
Asked Answered
M

2

6

I am trying to match SIFT features between two images which I have detected using OpenCV:

sift = cv2.xfeatures2d.SIFT_create()
kp, desc = sift.detectAndCompute(img, None)

The images both seem to contains lots of features, around 15,000 each, shown with the green dots.

enter image description here enter image description here

But after matching them I only retain 87 and some are outliers.

enter image description here

I'm trying to figure out if I'm doing something wrong. My code for matching the two images is:

def match(this_filename, this_desc, this_kp, othr_filename, othr_desc, othr_kp):

    E_RANSAC_PROB = 0.999
    F_RANSAC_PROB = 0.999
    E_PROJ_ERROR = 15.0
    F_PROJ_ERROR = 15.0
    LOWE_RATIO = 0.9
    # FLANN Matcher
    # FLANN_INDEX_KDTREE = 1 # 1? https://opencv-python-tutroals.readthedocs.io/en/latest/py_tutorials/py_feature2d/py_matcher/py_matcher.html#basics-of-brute-force-matcher
    # index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
    # search_params = dict(checks=50)   # or pass empty dictionary
    # flann = cv2.FlannBasedMatcher(index_params, search_params)
    # matcherij = flann.knnMatch(this_desc, othr_desc, k=2)
    # matcherji = flann.knnMatch(othr_desc, this_desc, k=2)

    # BF Matcher
    this_matches = {}
    othr_matches = {}


    bf = cv2.BFMatcher()
    matcherij = bf.knnMatch(this_desc, othr_desc, k=2)
    matcherji = bf.knnMatch(othr_desc, this_desc, k=2)

    matchesij = []
    matchesji = []

    for i,(m,n) in enumerate(matcherij):
        if m.distance < LOWE_RATIO*n.distance:
            matchesij.append((m.queryIdx, m.trainIdx))

    for i,(m,n) in enumerate(matcherji):
        if m.distance < LOWE_RATIO*n.distance:
            matchesji.append((m.trainIdx, m.queryIdx))


    # Make sure matches are symmetric
    symmetric = set(matchesij).intersection(set(matchesji))
    symmetric = list(symmetric)

    this_matches[othr_filename] = [ (a, b) for (a, b) in symmetric ]
    othr_matches[this_filename] = [ (b, a) for (a, b) in symmetric ]

    src = np.array([ this_kp[index[0]].pt for index in this_matches[othr_filename] ])
    dst = np.array([ othr_kp[index[1]].pt for index in this_matches[othr_filename] ])

    if len(this_matches[othr_filename]) == 0:
        print("no symmetric matches")
        return 0

    # retain inliers that fit x.F.xT == 0
    F, inliers = cv2.findFundamentalMat(src, dst, cv2.FM_RANSAC, F_PROJ_ERROR, F_RANSAC_PROB)

    if F is None or inliers is None:
        print("no F matrix estimated")
        return 0

    inliers = inliers.ravel()

    this_matches[othr_filename] = [ this_matches[othr_filename][x] for x in range(len(inliers)) if inliers[x] ]
    othr_matches[this_filename] = [ othr_matches[this_filename][x] for x in range(len(inliers)) if inliers[x] ]

    return this_matches, othr_matches, inliers.sum()

Here are the two original images: https://www.dropbox.com/s/pvi247be2ds0noc/images.zip?dl=0

Metage answered 7/10, 2020 at 14:46 Comment(2)
I'm not sure what your eventual goal is, but you may wish to consider using a neural network or at least a perceptual hashing algorithm to measure the similarity between these images, as those methods (especially the neural network) are more robust than the older SIFT / SURF keypoint methodsSudhir
This is for structure from motion. It's pretty typical step and my code work for low resolution images. So I'm confused what is going wrong with these images.Metage
P
3

I don't get why at the code you are filtering out the matches that their distances are more than 0.9 (LOWE_RATIO). The points are already matched. By filtering out that points you reduce the matched features from around 15000 to 839 and then the inlier detector recognize only 87 of them as inliers.


In addition, using the code below which uses ORB (Oriented FAST and Rotated BRIEF), I've got 500 keypoints and 158 matches that make sense.I believe it could be a good alternative for SIFT :

import cv2
from matplotlib import pyplot as plt

img1 = cv2.imread('IMG_1598.png', cv2.COLOR_BGR2GRAY)  # queryImage
img2 = cv2.imread('IMG_1596.png', cv2.COLOR_BGR2GRAY)  # trainImage
# Initiate  detector
orb = cv2.ORB_create()

# find the keypoints and descriptors 
kp1, des1 = orb.detectAndCompute(img1, None)
kp2, des2 = orb.detectAndCompute(img2, None)
# create BFMatcher object
bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
# Match descriptors.
matches = bf.match(des1, des2)

# Sort them in the order of their distance.
matches = sorted(matches, key=lambda x: x.distance)
# Draw first 10 matches.
img3 = cv2.drawMatches(img1, kp1, img2, kp2, matches, None, flags=2)

plt.imshow(img3)
plt.show()
plt.savefig('foo.png')

and the matches are like this: enter image description here

Petrify answered 10/10, 2020 at 10:47 Comment(0)
C
3

High resolution is not always a good thing in image processing, so I just followed this tutorial tutorial and added median filter. Results, as shown below, was not that bad

im1  = cv.imread('IMG_1596.png')
gry1 = cv.cvtColor(im1, cv.COLOR_BGR2GRAY)
gry1 = cv.medianBlur(gry1, ksize = 5)

im2  = cv.imread('IMG_1598.png')
gry2 = cv.cvtColor(im2, cv.COLOR_BGR2GRAY)
gry2 = cv.medianBlur(gry2, ksize = 5)

# Initiate ORB detector
orb = cv.ORB_create()

# find the keypoints and descriptors with ORB
kp1, des1 = orb.detectAndCompute(gry1,None)
kp2, des2 = orb.detectAndCompute(gry2,None)

# create BFMatcher object
bf = cv.BFMatcher(cv.NORM_HAMMING, crossCheck=True)

# Match descriptors.
matches = bf.match(des1,des2)

# Sort them in the order of their distance.
matches = sorted(matches, key = lambda x:x.distance)

im3 = cv.drawMatches(im1,kp1,im2,kp2,matches,None,flags=cv.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS)
cv.imwrite("ORB_RESULTS.png", im3)

len(matches)
>>> 121

enter image description here

# Initiate SIFT detector
sift = cv.SIFT_create()

# find the keypoints and descriptors with SIFT
kp1, des1 = sift.detectAndCompute(gry1,None)
kp2, des2 = sift.detectAndCompute(gry2,None)

# BFMatcher with default params
bf = cv.BFMatcher()
matches = bf.knnMatch(des1,des2,k=2)

# Apply ratio test
good = []
for m,n in matches:
    if m.distance < 0.75*n.distance:
        good.append([m])
        
# cv.drawMatchesKnn expects list of lists as matches.
im3 = cv.drawMatchesKnn(im1,kp1,im2,kp2,good,None,flags=cv.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS)
cv.imwrite("SIFT_RESULTS.png", im3)
len(good)
>>> 183

enter image description here

FLANN_INDEX_KDTREE = 1
index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)

FLANN_INDEX_LSH = 6
index_params= dict(algorithm = FLANN_INDEX_LSH,
                   table_number = 6, # 12
                   key_size = 12,     # 20
                   multi_probe_level = 1) #2

# Initiate SIFT with FLANN parameters detector
sift = cv.SIFT_create()

# find the keypoints and descriptors with SIFT
kp1, des1 = sift.detectAndCompute(gry1,None)
kp2, des2 = sift.detectAndCompute(gry2,None)

# FLANN parameters
FLANN_INDEX_KDTREE = 1
index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
search_params = dict()
flann = cv.FlannBasedMatcher(index_params,search_params)
matches = flann.knnMatch(des1,des2,k=2)

# Need to draw only good matches, so create a mask
matchesMask = [[0,0] for i in range(len(matches))]

# ratio test as per Lowe's paper
for i,(m,n) in enumerate(matches):
    if m.distance < 0.7*n.distance:
        matchesMask[i]=[1,0]
draw_params = dict(matchColor = (0,255,0),
                   singlePointColor = (255,0,0),
                   matchesMask = matchesMask,
                   flags = cv.DrawMatchesFlags_DEFAULT)
im3 = cv.drawMatchesKnn(im1,kp1,im2,kp2,matches,None,**draw_params)
cv.imwrite("SIFT_w_FLANN_RESULTS.png", im3)

enter image description here

Connaught answered 10/10, 2020 at 11:41 Comment(0)

© 2022 - 2024 — McMap. All rights reserved.