Commit 7c6a913d authored by Jay Laura's avatar Jay Laura
Browse files

fixes: subpixel api using transformed templates

parent dd748e6e
Loading
Loading
Loading
Loading
+17 −48
Original line number Diff line number Diff line
@@ -126,58 +126,27 @@ def pattern_match(template, image, upsampling=8, metric=cv2.TM_CCOEFF_NORMED, er
        u_template = template
        u_image = image

    result = cv2.matchTemplate(u_image, u_template, method=metric)
    corrmap = cv2.matchTemplate(u_image, u_template, method=metric)

    _, max_corr, min_loc, max_loc = cv2.minMaxLoc(result)
    # Pad the result array with values outside the valid correlation range
    width = (np.asarray(u_template.shape) - np.asarray(corrmap.shape)) // 2
    
    if metric == cv2.TM_SQDIFF or metric == cv2.TM_SQDIFF_NORMED:
        x = min_loc[0]
        y = min_loc[1]
        result = np.pad(corrmap, width, mode='constant', constant_values=2)
        matched_y, matched_x = np.unravel_index(np.argmin(result), result.shape)
    else:
        x = max_loc[0]
        y = max_loc[1]
        result = np.pad(corrmap, width, mode='constant', constant_values=-2)
        matched_y, matched_x = np.unravel_index(np.argmax(result), result.shape)
    
    # Transform from the results array shape to the template shape
    x = x - (result.shape[1] - u_template.shape[1]) // 2
    y = y - (result.shape[0] - u_template.shape[0]) // 2
    # The center of the template is the origin
    original_y = (u_template.shape[0] - 1) // 2
    original_x = (u_template.shape[1] - 1) // 2

    # Recenter the origin from the upper left to the center of the template
    ideal_x = u_template.shape[1] // 2
    ideal_y = u_template.shape[0] // 2

    x -= ideal_x
    y -= ideal_y

    y /= upsampling
    x /= upsampling


    return -x, -y, max_corr, result

    # -1 because the returned results array is W-w+1 and H-h+1 in shape, 
    # where W, H are the width and height of the image and w,h are the 
    # width and height of the template

    print(u_template.shape, u_image.shape, result.shape, x,y)
    print(u_image.shape, u_template.shape, max_loc)
    # Compute the shift, if any, between the template center and the
    # best correlation index
    shift_x = (original_x - matched_x) / upsampling
    shift_y = (original_y - matched_y) / upsampling

    max_corr = result[matched_y, matched_x]

    # the max_loc array is of shape W-w+1, H-h+1, where W, H are the width
    # and height of the image and w,h are the width and height of the template

    print(x / upsampling - 1, y / upsampling - 1)


    # Compute the idealized shift (image center)
    ideal_y = (u_image.shape[0] + 1) / 2. - 0.5
    ideal_x = (u_image.shape[1] + 1) / 2. - 0.5

    # Compute the shift from template upper left to template center
    y += (u_template.shape[0] / 2.)
    x += (u_template.shape[1] / 2.)

    # 
    x = (x - ideal_x) / upsampling
    y = ((y - ideal_y) / upsampling) + 1
    print(x, y)
    return x, y, max_corr, result
    return shift_x, shift_y , max_corr, corrmap
+22 −24
Original line number Diff line number Diff line
@@ -209,14 +209,14 @@ def subpixel_phase(reference_roi, moving_roi, affine=tf.AffineTransform(), **kwa
                                                                                **kwargs)
    # get shifts in input pixel space 
    shift_x, shift_y = affine([shift_x, shift_y])[0]
    new_affine = tf.AffineTransform(translation=(-shift_y, -shift_x))
    new_affine = tf.AffineTransform(translation=(-shift_x, -shift_y))
    return new_affine, error, diffphase


def subpixel_template(reference_roi, 
                      moving_roi, 
                      affine=tf.AffineTransform(), 
                      mode='reflect',
                      mode='constant',
                      func=pattern_match,
                      **kwargs):
    """
@@ -269,32 +269,30 @@ def subpixel_template(reference_roi,

    if (ref_clip is None) or (moving_clip is None):
        return None, None, None
    shift_x, shift_y, metrics, corrmap = func(moving_clip, ref_clip, **kwargs)
    if shift_x is None:
        return None, None, None
    matcher_shift_x, matcher_shift_y, metrics, corrmap = func(moving_clip, ref_clip, **kwargs)
 
    # Shift x and shift y are computed in the affine space.
    moving_clip_center = (np.array(moving_clip.shape[:2][::-1])-1)/2.
    print(moving_clip_center)
    if matcher_shift_x is None:
        return None, None, None
        
    new_x = moving_clip_center[0] + shift_x
    new_y = moving_clip_center[1] + shift_y
    print('Pre-transform', shift_x, shift_y, moving_clip.shape, moving_clip_center, new_x, new_y, affine)
    # Apply the shift to the center of the moving roi to the center of the reference ROI in index space. One pixel == one index (unitless).
    new_affine_transformed_center_x = moving_roi.center[0] + matcher_shift_x  #Center is indices.
    new_affine_transformed_center_y = moving_roi.center[1] + matcher_shift_y

    # convert shifts in input pixel space
    image_space_new_x, image_space_new_y = affine([new_x, new_y])[0]
    print('Image Space NEW', image_space_new_x, image_space_new_y)
    # Invert the affine transformation of the new center. This result is plotted in the second figure as a red dot.
    inverse_transformed_affine_center_x, inverse_transformed_affine_center_y = affine.inverse((new_affine_transformed_center_x, new_affine_transformed_center_y))[0]
    #print(inverse_transformed_affine_center_x, inverse_transformed_affine_center_y)

    x_shift_in_image = image_space_new_x - moving_roi.center[0]
    y_shift_in_image = image_space_new_y - moving_roi.center[1]
    # Take the original x,y (moving_roi.x, moving_roi.y) and subtract the delta between the original ROI center and the newly computed center.
    # The 
    #new_x = moving_roi.x - (moving_roi.center[0] - inverse_transformed_affine_center_x) 
    #new_y = moving_roi.y - (moving_roi.center[1] - inverse_transformed_affine_center_y)

    new_affine = tf.AffineTransform(affine.params)
    new_affine.translation = (x_shift_in_image,y_shift_in_image)
    # These are the inverse of the translation so that the caller can use affine() to
    # apply the proper translation. Otherwise, the caller would need to use affine.inverse
    translation_x = -(moving_roi.center[0] - inverse_transformed_affine_center_x)
    translation_y = -(moving_roi.center[1] - inverse_transformed_affine_center_y)

    """new_affine = tf.AffineTransform(translation=(x_shift_in_image,
                                                 y_shift_in_image),
                                    rotation=tf.AffineTransform(affine).rotation,
                                    shear=tf.AffineTransform(affine).shear)"""
    new_affine = tf.AffineTransform(translation=(translation_x, translation_y))

    return new_affine, metrics, corrmap

+145 −0
Original line number Diff line number Diff line
import itertools

import pytest
import numpy as np
from skimage import transform as tf
from scipy.ndimage.interpolation import rotate

from autocnet.transformation import roi


def rot(image, xy, angle):
    """
    This function rotates an image about the center and also computes the new
    pixel coordinates of the previous image center.

    This function is intentionally external to the AutoCNet API so that
    it can be used to generate test data without any dependence on AutoCNet.
    """
    im_rot = rotate(image,angle) 
    org_center = (np.array(image.shape[:2][::-1])-1)/2.
    rot_center = (np.array(im_rot.shape[:2][::-1])-1)/2.
    org = xy-org_center
    a = np.deg2rad(angle)
    new = np.array([org[0]*np.cos(a) + org[1]*np.sin(a),
            -org[0]*np.sin(a) + org[1]*np.cos(a) ])
    return im_rot, new, new+rot_center


@pytest.fixture
def reference_image():
    # This is the reference image. The correct location for the letter 'T' in the test image
    # is 5.5, 4.5 (x, y; pixel center)
    test_image = np.array(((0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0),
                        (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0),
                        (0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0),
                        (0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0),
                        (0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0),
                        (0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0),
                        (0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0),
                        (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0),
                        (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0),
                        (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0),
                        (0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0),
                        (0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0),
                        (0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0),
                        (0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0),
                        (0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0)), dtype=np.uint8)
    return test_image

@pytest.fixture
def moving_image():
    # This is the moving image. The correct solution where the letter T in the moving image is
    # 7.5,5.5 (x,y; pixel center) 
    t_shape = np.array(((0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0),
                        (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0),
                        (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0),
                        (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0),
                        (0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0),
                        (0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0),
                        (0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0),
                        (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0),
                        (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0),
                        (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0),
                        (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)), dtype=np.uint8)
    return t_shape


delta_xs = [0, 0.5, -0.5, 1, -1, 1.5, -1.5]
delta_ys = [0, 0.5, -0.5, 1, -1, 1.5, -1.5]
rotation_angles = [0, 1, -1, -90, 90, -70, 70, -60, 60, -45.23, 45.23, -33, 33, -15.1, 15.1]

@pytest.mark.parametrize('delta_x', delta_xs)
@pytest.mark.parametrize('delta_y', delta_ys)
@pytest.mark.parametrize('rotation_angle', rotation_angles)
def test_canned_affine_transformations(reference_image, moving_image, delta_x, delta_y, rotation_angle):
    image_size = (4,4)
    template_size = (3,3)
    x = 5
    y = 5
    x1 = 7 + delta_x  # 7 is correct
    y1 = 5 + delta_y  # 5 is correct

    # Rotate the moving_image by an arbitrary amount for the test. Rotations are CCW.
    rotated_array, new, (rx1, ry1) = rot(moving_image, (x1, y1), rotation_angle)

    # Since the moving_image array is rotated, it is necessary to compute the updated 'expected'
    # correct answer.
    _, _, expected = rot(moving_image, (7, 5), rotation_angle)

    # Get the reference ROIs from the full images above. The reference is a straight clip.
    # The moving array is taken from the rotated array with x1, y1 being the updated x1, y1 coordinates
    # after rotation.
    ref_roi = roi.Roi(reference_image, x, y, *image_size)
    moving_roi = roi.Roi(rotated_array, rx1, ry1, *template_size)  #rx1, rx2 in autocnet would be the a priori coordinates. Yup!

    # Compute the affine transformation. This is how the affine is computed in the code, so replicated here.
    # If the AutoCNet code changes, this will highlight a breaking change.
    t_size = moving_roi.array.shape[:2][::-1]
    shift_y = (t_size[0] - 1) / 2.
    shift_x = (t_size[1] - 1) / 2.

    tf_rotate = tf.AffineTransform(rotation=np.radians(rotation_angle))
    tf_shift = tf.SimilarityTransform(translation=[-shift_x, -shift_y])
    tf_shift_inv = tf.SimilarityTransform(translation=[shift_x, shift_y])
    # The '+' overload that is '@' matrix multiplication is read or applied left to right.
    trans = tf_shift + (tf_rotate + tf_shift_inv)

    # Pretend that the matcher has found a shift in the x and a shift in the y.
    matcher_shift_x = -delta_x
    matcher_shift_y = -delta_y
        
    # Apply the shift to the center of the moving roi to the center of the reference ROI in index space. One pixel == one index (unitless).
    new_affine_transformed_center_x = moving_roi.center[0] + matcher_shift_x  #Center is indices.
    new_affine_transformed_center_y = moving_roi.center[1] + matcher_shift_y

    # Invert the affine transformation of the new center. This result is plotted in the second figure as a red dot.
    inverse_transformed_affine_center_x, inverse_transformed_affine_center_y = trans.inverse((new_affine_transformed_center_x, new_affine_transformed_center_y))[0]
    #print(inverse_transformed_affine_center_x, inverse_transformed_affine_center_y)

    # Take the original x,y (moving_roi.x, moving_roi.y) and subtract the delta between the original ROI center and the newly computed center.
    new_x = moving_roi.x - (moving_roi.center[0] - inverse_transformed_affine_center_x) 
    new_y = moving_roi.y - (moving_roi.center[1] - inverse_transformed_affine_center_y)

    # If testing this in a notebook, uncomment for visualization.
    """fig, axes = plt.subplots(1,4)    
    axes[0].imshow(ref_roi.clip(), cmap='Greys')
    axes[0].set_title('Reference')
    axes[0].plot(4,4, 'ro')
    axes[1].imshow(moving_roi.clip(), cmap='Greys')
    axes[1].plot(inverse_transformed_affine_center_x, inverse_transformed_affine_center_y, 'ro')
    axes[1].set_title('Moving, no affine')

    vis_arr = moving_roi.clip(trans, mode='constant')

    axes[2].imshow(vis_arr, cmap='Greys')
    axes[2].plot(moving_roi.center[0], moving_roi.center[1], 'r*')  # Plot the center attribute of the roi obj. Note that I modified the property in the roi.py code to add 0.5 to each value.  
    axes[2].set_title('Moving, affine')
    axes[3].imshow(rotated_array, cmap='Greys')
    axes[3].plot(expected[0], expected[1], 'y*', markersize=10)  
    axes[3].plot(new_x, new_y, 'ro')
    show()"""
    
    # Approx is accurate to 1e-6. The affine introduces errors on the order of 1e-10 some of the time.
    assert new_x == pytest.approx(expected[0])
    assert new_y == pytest.approx(expected[1])
 No newline at end of file
+64 −32
Original line number Diff line number Diff line
@@ -6,6 +6,7 @@ import unittest
from unittest.mock import patch

from skimage import transform as tf
from scipy.ndimage.interpolation import rotate
from skimage.util import img_as_float   
from skimage import color 
from skimage import data
@@ -19,6 +20,23 @@ from autocnet.examples import get_path
import autocnet.matcher.subpixel as sp
from autocnet.transformation import roi

def rot(image, xy, angle):
    """
    This function rotates an image about the center and also computes the new
    pixel coordinates of the previous image center.

    This function is intentionally external to the AutoCNet API so that
    it can be used to generate test data without any dependence on AutoCNet.
    """
    im_rot = rotate(image,angle) 
    org_center = (np.array(image.shape[:2][::-1])-1)/2.
    rot_center = (np.array(im_rot.shape[:2][::-1])-1)/2.
    org = xy-org_center
    a = np.deg2rad(angle)
    new = np.array([org[0]*np.cos(a) + org[1]*np.sin(a),
            -org[0]*np.sin(a) + org[1]*np.cos(a) ])
    return im_rot, new, new+rot_center

@pytest.fixture
def iris_pair(): 
    angle = 200
@@ -48,37 +66,50 @@ def test_prep_subpixel(nmatches, nstrengths):
    assert arrs[2].shape == (nmatches, nstrengths)
    assert arrs[0][0] == 0

delta_xs = [0.25, 1.5, 2.75, -0.25, -1.5, -2.75, 5]# 3.14, -3.14, 4.7, -4.7]
delta_ys = [0.25, 1.5, 2.75, -0.25, -1.5, -2.75, 5]#3.14, -3.14, 4.7, -4.7]
rotation_angles = [0, 2.5, -2.5, -5, 5] #[0, 5]# 2.22, -2.22, 5, -5, 10, -10, 23.68, -23.68]
@pytest.mark.parametrize("delta_x", delta_xs)
@pytest.mark.parametrize("delta_y", delta_ys)
@pytest.mark.parametrize("rotation_angle", rotation_angles)
def test_subpixel_transformed_template(apollo_subsets, delta_x, delta_y, rotation_angle):
    reference_image = apollo_subsets[0]
    moving_image = apollo_subsets[0]

def test_subpixel_template(apollo_subsets):
    a = apollo_subsets[0]
    b = apollo_subsets[1]
    
    ref_roi = roi.Roi(a, a.shape[0]/2, a.shape[1]/2, 41, 41)
    moving_roi = roi.Roi(b, math.floor(b.shape[0]/2), math.floor(b.shape[1]/2), 11, 11)

    affine, strength, corrmap = sp.subpixel_template(ref_roi, moving_roi, upsampling=16)
    x = 50
    y = 51
    x1 = 50 + delta_x
    y1 = 51 + delta_y  

    assert strength >= 0.80
    assert affine.translation[0] == -0.0625
    assert affine.translation[1] == 2.0625
    # Artifically rotate the b array by an arbitrary rotation angle.
    rotated_array, new, (rx1, ry1) = rot(moving_image, (x1, y1), rotation_angle)
    _, _, expected = rot(moving_image, (x, y), rotation_angle)  
    
    # Since this is not testing the quality of the matcher given different
    # reference and moving image sizes, just hard code to be something large, 
    # and something a fair bit smaller.
    ref_roi = roi.Roi(reference_image, x, y, 39, 39)
    moving_roi = roi.Roi(rotated_array, rx1, ry1, 33, 33)

    # Compute the affine transformation. This is how the affine is computed in the code, so replicated here.
    # If the AutoCNet code changes, this will highlight a breaking change.
    t_size = moving_roi.array.shape[:2][::-1]
    shift_y = (t_size[0] - 1) / 2.
    shift_x = (t_size[1] - 1) / 2.

def test_subpixel_transformed_template(apollo_subsets):
    a = apollo_subsets[0]
    b = apollo_subsets[1]
    tf_rotate = tf.AffineTransform(rotation=np.radians(rotation_angle))
    tf_shift = tf.SimilarityTransform(translation=[-shift_x, -shift_y])
    tf_shift_inv = tf.SimilarityTransform(translation=[shift_x, shift_y])
    # The '+' overload that is '@' matrix multiplication is read or applied left to right.
    affine = tf_shift + (tf_rotate + tf_shift_inv)

    moving_center = math.floor(b.shape[0]/2), math.floor(b.shape[1]/2)
    transform = tf.AffineTransform(rotation=math.radians(1), scale=(1.1,1.1))
    ref_roi = roi.Roi(a, a.shape[0]/2, a.shape[1]/2, 40, 40)
    moving_roi = roi.Roi(b, *moving_center, 11, 11)
    new_affine, strength, corrmap = sp.subpixel_template(ref_roi, moving_roi, affine, upsampling=8)

    # with patch('autocnet.transformation.roi.Roi.clip', side_effect=clip_side_effect):
    affine, strength, corrmap = sp.subpixel_template(ref_roi, moving_roi, transform, upsampling=16)
    new_x, new_y = new_affine((moving_roi.x,
                               moving_roi.y))[0]
    
    assert strength >= 0.83
    assert affine.translation[0] == pytest.approx(-0.670806588)
    assert affine.translation[1] == pytest.approx(0.636804177)
    assert pytest.approx(new_x, abs=1/5) == expected[0]
    assert pytest.approx(new_y, abs=1/5) == expected[1]
    

def test_estimate_logpolar_transform(iris_pair):
@@ -206,9 +237,10 @@ def test_subpixel_phase_cooked(x, y, x1, y1, image_size, expected):
                        (0, 0, 0, 0, 0, 0, 0)), dtype=np.uint8)

    reference_roi = roi.Roi(test_image, x, y, size_x=image_size[0], size_y=image_size[1])
    walking_roi = roi.Roi(t_shape, x1, y1, size_x=image_size[0], size_y=image_size[1])
    moving_roi = roi.Roi(t_shape, x1, y1, size_x=image_size[0], size_y=image_size[1])

    affine, metrics, _ = sp.subpixel_phase(reference_roi, moving_roi)
    dx, dy = affine((moving_roi.x, moving_roi.y))[0]

    affine, metrics, _ = sp.subpixel_phase(reference_roi, walking_roi)
    dx, dy = affine((x1, y1))[0]
    assert dx == expected[0]
    assert dy == expected[1]
+2 −2
Original line number Diff line number Diff line
@@ -51,7 +51,7 @@ class Roi():

    @property
    def x(self):
        return self._x + self.axr
        return self._x #+ self.axr

    @x.setter
    def x(self, x):
@@ -59,7 +59,7 @@ class Roi():

    @property
    def y(self):
        return self._y + self.ayr
        return self._y #+ self.ayr

    @y.setter
    def y(self, y):
Loading