Commit d53a24a9 authored by Jay's avatar Jay Committed by jay
Browse files

Updates for review comments. Also swapped in template based from OpenCV.

parent 35ef8154
Loading
Loading
Loading
Loading
+12 −8
Original line number Diff line number Diff line
@@ -389,6 +389,10 @@ class CandidateGraph(nx.Graph):
            if clean_keys:
                matches, mask = edge._clean(clean_keys)

            subpixel = False
            if 'subpixel' in clean_keys:
                subpixel = True

            kp1 = self.node[source].keypoints
            kp2 = self.node[destination].keypoints
            pt_idx = 0
@@ -400,19 +404,19 @@ class CandidateGraph(nx.Graph):
                m1 = (source, int(row['source_idx']))
                m2 = (destination, int(row['destination_idx']))

                values.append([kp1.iloc[m1_pid]['x'],
                               kp1.iloc[m1_pid]['y'],
                values.append([kp1.loc[m1_pid]['x'],
                               kp1.loc[m1_pid]['y'],
                               m1,
                               pt_idx,
                               source,
                               idx])

                kp2x = kp2.iloc[m2_pid]['x']
                kp2y = kp2.iloc[m2_pid]['y']

                if 'subpixel' in clean_keys:
                    kp2x += row['x_offset']
                    kp2y += row['y_offset']
                if subpixel:
                    kp2x = kp2.loc[m2_pid]['x'] + row['x_offset']
                    kp2y = kp2.loc[m2_pid]['y'] + row['y_offset']
                else:
                    kp2x = kp2.loc[m2_pid]['x']
                    kp2y = kp2.loc[m2_pid]['y']

                values.append([kp2x,
                               kp2y,
+0 −12
Original line number Diff line number Diff line
@@ -39,18 +39,6 @@ class TestNode(unittest.TestCase):
        # Convex hull computation is checked lower in the hull computation
        self.assertRaises(AttributeError, self.node.coverage_ratio)

    def test_provenance(self):
        image = self.node.get_array()
        self.node.extract_features(image, extractor_parameters={'nfeatures':10})
        self.node.extract_features(image, extractor_parameters={'nfeatures':15})
        p0 = self.node.provenance[0]
        p1 = self.node.provenance[1]
        print(self.node.provenance)
        self.assertEqual(len(self.node.provenance.keys()), 2)
        self.assertNotEqual(find_in_dict(p0, 'nfeatures'),
                            find_in_dict(p1, 'nfeatures'))


    def test_isis_serial(self):
        serial = self.node.isis_serial
        self.assertEqual(None, serial)
+32 −24
Original line number Diff line number Diff line
import warnings

import cv2
import pandas as pd

@@ -10,8 +12,7 @@ DEFAULT_FLANN_PARAMETERS = dict(algorithm=FLANN_INDEX_KDTREE,
                                trees=3)


def pattern_match(template, image, upsampling=16,
                  func=match_template):
def pattern_match(template, image, upsampling=16,func=cv2.TM_CCOEFF_NORMED, error_check=False):
    """
    Call an arbitrary pattern matcher

@@ -29,6 +30,12 @@ def pattern_match(template, image, upsampling=16,

    func : object
           The function to be used to perform the template based matching
           Options: {cv2.TM_CCORR_NORMED, cv2.TM_CCOEFF_NORMED, cv2.TM_SQDIFF_NORMED}
           In testing the first two options perform significantly better with Apollo data.

    error_check : bool
                  If True, also apply a different matcher and test that the values
                  are not too divergent.  Default, False.

    Returns
    -------
@@ -42,34 +49,35 @@ def pattern_match(template, image, upsampling=16,
    strength : float
               The strength of the correlation in the range [-1, 1].
    """
    if upsampling < 1:
        raise ValueError

    u_template = zoom(template, upsampling)
    u_image = zoom(image, upsampling, )
    # Find the the upper left origin of the template in the image
    match = func(u_image, u_template)
    y, x = np.unravel_index(np.argmax(match), match.shape)
    different = {cv2.TM_SQDIFF_NORMED: cv2.TM_CCOEFF_NORMED,
                 cv2.TM_CCORR_NORMED: cv2.TM_SQDIFF_NORMED,
                 cv2.TM_CCOEFF_NORMED: cv2.TM_SQDIFF_NORMED}

    # Resample the match back to the native image resolution
    x /= upsampling
    y /= upsampling
    if upsampling < 1:
        raise ValueError

    # Offset from the UL origin to the image center
    x += (template.shape[1] / 2)
    y += (template.shape[0] / 2)
    u_template = zoom(template, upsampling, order=1)
    u_image = zoom(image, upsampling, order=1)

    # Compute the offset to adjust the image match point location
    ideal_y = image.shape[0] / 2
    ideal_x = image.shape[1] / 2
    result = cv2.matchTemplate(u_image, u_template, method=func)
    min_corr, max_corr, min_loc, max_loc = cv2.minMaxLoc(result)
    if func == cv2.TM_SQDIFF or func == cv2.TM_SQDIFF_NORMED:
        x,y = (min_loc[0], min_loc[1])
    else:
        x, y = (max_loc[0], max_loc[1])

    x = ideal_x - x
    y = ideal_y - y
    # Compute the idealized shift (image center)
    ideal_y = u_image.shape[0] / 2
    ideal_x = u_image.shape[1] / 2

    # Find the maximum correlation
    strength = np.max(match)
    # Compute the shift from template upper left to template center
    y += (u_template.shape[0] / 2)
    x += (u_template.shape[1] / 2)

    return x, y, strength
    x = (ideal_x - x) / upsampling
    y = (ideal_y - y) / upsampling
    return x, y, max_corr


class FlannMatcher(object):
@@ -168,7 +176,7 @@ class FlannMatcher(object):
                                    i.queryIdx,
                                    i.distance))
                else:
                    raise ValueError('Likely self neighbor in query!')
                    warnings.warn('Likely self neighbor in query!')
        return pd.DataFrame(matched, columns=['source_image', 'source_idx',
                                              'destination_image', 'destination_idx',
                                              'distance'])
+9 −0
Original line number Diff line number Diff line
@@ -31,6 +31,10 @@ class DistanceRatio(Observable):
             mask are assumed to have passed the ratio test.  Else
             False.

    References
    ----------
    [Lowe2004]_

    """

    def __init__(self, matches):
@@ -126,6 +130,11 @@ class SpatialSuppression(Observable):

    domain : tuple
             The (x,y) extent of the input domain

    References
    ----------
    [Gauglitz2011]_

    """

    def __init__(self, df, domain, min_radius=2, k=250, error_k=0.1):
+1 −1
Original line number Diff line number Diff line
@@ -45,7 +45,7 @@ def clip_roi(img, center, img_size):
                          x_start:x_start + x_stop + 1]
    else:
        clipped_img = img.read_array(pixels=[x_start, y_start,
                                             x_stop, y_stop])
                                             x_stop + 1, y_stop + 1])
    return clipped_img


Loading