Commit dee8d972 authored by Laura, Jason R's avatar Laura, Jason R
Browse files

Merge branch 'subpixelapi' into 'subpixelapi'

Subpixel api updates

See merge request astrogeology/autocnet!634
parents 5915d8ca 821f64ac
Loading
Loading
Loading
Loading
+3 −6
Original line number Diff line number Diff line
@@ -457,12 +457,9 @@ class Edge(dict, MutableMapping):
        d_img = self.destination.geodata

        # Determine which algorithm is going ot be used.
        if method == 'phase':
        func = sp.iterative_phase
        nstrengths = 2
        elif method == 'template':
            func = sp.subpixel_template
            nstrengths = 1
        
        shifts_x, shifts_y, strengths, new_x, new_y = sp._prep_subpixel(len(matches), nstrengths)

        # for each edge, calculate this for each keypoint pair
+7 −0
Original line number Diff line number Diff line
@@ -117,6 +117,8 @@ def pattern_match(template, image, upsampling=16, metric=cv2.TM_CCOEFF_NORMED, e
    if upsampling < 1:
        raise ValueError

    print(template)
    print(image)
    # Fit a 3rd order polynomial to upsample the images
    if upsampling != 1:
        u_template = zoom(template, upsampling, order=3)
@@ -128,6 +130,8 @@ def pattern_match(template, image, upsampling=16, metric=cv2.TM_CCOEFF_NORMED, e
    result = cv2.matchTemplate(u_image, u_template, method=metric)
    _, max_corr, min_loc, max_loc = cv2.minMaxLoc(result)
    
    print("min/max loc: ", min_loc, max_loc)

    if metric == cv2.TM_SQDIFF or metric == cv2.TM_SQDIFF_NORMED:
        x, y = (min_loc[0], min_loc[1])
    else:
@@ -141,6 +145,9 @@ def pattern_match(template, image, upsampling=16, metric=cv2.TM_CCOEFF_NORMED, e
    y += (u_template.shape[0] / 2.)
    x += (u_template.shape[1] / 2.)

    print("x,ideal x, ", x, ideal_x, (x - ideal_x) / upsampling)
    print("y,ideal y, ", y, ideal_y, (y - ideal_y) / upsampling)

    x = (x - ideal_x) / upsampling
    y = (y - ideal_y) / upsampling
    return x, y, max_corr, result
+57 −417

File changed.

Preview size limit exceeded, changes collapsed.

+44 −110
Original line number Diff line number Diff line
import math
import os
from re import A
import sys
import unittest
from unittest.mock import patch
@@ -47,77 +48,37 @@ def test_prep_subpixel(nmatches, nstrengths):
    assert arrs[2].shape == (nmatches, nstrengths)
    assert arrs[0][0] == 0

@pytest.mark.parametrize("center_x, center_y, size, expected", [(4, 4, 9, 404),
                                                          (55.4, 63.1, 27, 6355)])
def test_clip_roi(center_x, center_y, size, expected):
    img = np.arange(10000).reshape(100, 100)

    clip, axr, ayr = sp.clip_roi(img, center_x, center_y, size)

    assert clip.mean() == expected

def clip_side_effect(*args, **kwargs):
    if np.array_equal(a, args[0]):
        return a, 0, 0
    else:
        center_y = b.shape[0] / 2
        center_x = b.shape[1] / 2
        bxr, bx = math.modf(center_x)
        byr, by = math.modf(center_y)
        bx = int(bx)
        by = int(by)
        return b[by-10:by+11, bx-10:bx+11], bxr, byr

def test_subpixel_template(apollo_subsets):
    a = apollo_subsets[0]
    b = apollo_subsets[1]
    with patch('autocnet.matcher.subpixel.clip_roi', side_effect=clip_side_effect):
        nx, ny, strength, _ = sp.subpixel_template(a.shape[1]/2, a.shape[0]/2,
                                                b.shape[1]/2, b.shape[0]/2,
                                                a, b, upsampling=16)

    assert strength >= 0.99
    assert nx == 50.5
    assert ny == 52.4375

@pytest.mark.parametrize("loc, failure", [((0,4), True),
                                          ((4,0), True),
                                          ((1,1), False)])
def test_subpixel_template_at_edge(apollo_subsets, loc, failure):
    a = apollo_subsets[0]
    b = apollo_subsets[1]
    
    def func(*args, **kwargs):
        corr = np.zeros((10,10))
        corr[loc[0], loc[1]] = 10
        return 0, 0, 0, corr

    with patch('autocnet.matcher.subpixel.clip_roi', side_effect=clip_side_effect):
        if failure:
            with pytest.warns(UserWarning, match=r'Maximum correlation \S+'):
                nx, ny, strength, _ = sp.subpixel_template(a.shape[1]/2, a.shape[0]/2,
                                                        b.shape[1]/2, b.shape[0]/2,
                                                        a, b, upsampling=16,
                                                        func=func)
        else:
            nx, ny, strength, _ = sp.subpixel_template(a.shape[1]/2, a.shape[0]/2,
                                                        b.shape[1]/2, b.shape[0]/2,
                                                        a, b, upsampling=16,
                                                        func=func)
            assert nx == 50.5
    ref_roi = roi.Roi(a, a.shape[0]/2, a.shape[1]/2, 41, 41)
    moving_roi = roi.Roi(b, math.floor(b.shape[0]/2), math.floor(b.shape[1]/2), 11, 11)

    affine, strength, corrmap = sp.subpixel_template(ref_roi, moving_roi, upsampling=16)

    assert strength >= 0.80
    assert affine.translation[0] == -0.0625
    assert affine.translation[1] == 2.0625



def test_subpixel_transformed_template(apollo_subsets):
    a = apollo_subsets[0]
    b = apollo_subsets[1]

    moving_center = math.floor(b.shape[0]/2), math.floor(b.shape[1]/2)
    transform = tf.AffineTransform(rotation=math.radians(1), scale=(1.1,1.1))
    with patch('autocnet.matcher.subpixel.clip_roi', side_effect=clip_side_effect):
        nx, ny, strength, _ = sp.subpixel_transformed_template(a.shape[1]/2, a.shape[0]/2,
                                                b.shape[1]/2, b.shape[0]/2,
                                                a, b, transform, upsampling=16)
    ref_roi = roi.Roi(a, a.shape[0]/2, a.shape[1]/2, 40, 40)
    moving_roi = roi.Roi(b, *moving_center, 11, 11)

    # with patch('autocnet.transformation.roi.Roi.clip', side_effect=clip_side_effect):
    affine, strength, corrmap = sp.subpixel_template(ref_roi, moving_roi, transform, upsampling=16)
    
    assert strength >= 0.83
    assert nx == pytest.approx(50.576284)
    assert ny == pytest.approx(54.0081)
    assert affine.translation[0] == pytest.approx(-0.670806588)
    assert affine.translation[1] == pytest.approx(0.636804177)


def test_estimate_logpolar_transform(iris_pair):
@@ -132,47 +93,21 @@ def test_estimate_logpolar_transform(iris_pair):

def test_fourier_mellen(iris_pair):
    img1, img2 = iris_pair 
    nx, ny, error = sp.fourier_mellen(img1, img2, phase_kwargs = {"reduction" : 11, "size":(401, 401), "convergence_threshold" : 1, "max_dist":100}) 

    ref_roi = roi.Roi(img1, img1.shape[1]/2, img1.shape[0]/2, 401, 401)
    moving_roi = roi.Roi(img2, img2.shape[1]/2, img2.shape[0]/2, 401, 401)
    nx, ny, error = sp.fourier_mellen(ref_roi, moving_roi, phase_kwargs = {"reduction" : 11, "size":(401, 401), "convergence_threshold" : 1, "max_dist":100}) 
    
    assert pytest.approx(nx, 0.01) == 996.39 
    assert pytest.approx(ny, 0.01) ==  984.912 
    assert pytest.approx(error, 0.01) == 0.0422 


@pytest.mark.parametrize("loc, failure", [((0,4), True),
                                          ((4,0), True),
                                          ((1,1), False)])
def test_subpixel_transformed_template_at_edge(apollo_subsets, loc, failure):
    a = apollo_subsets[0]
    b = apollo_subsets[1]

    def func(*args, **kwargs):
        corr = np.zeros((5,5))
        corr[loc[0], loc[1]] = 10
        return 0, 0, 0, corr

    transform = tf.AffineTransform(rotation=math.radians(1), scale=(1.1,1.1))
    with patch('autocnet.matcher.subpixel.clip_roi', side_effect=clip_side_effect):
        if failure:
            with pytest.warns(UserWarning, match=r'Maximum correlation \S+'):
                print(a.shape[1]/2, a.shape[0]/2,b.shape[1]/2, b.shape[0]/2,
                                                        a, b)
                nx, ny, strength, _ = sp.subpixel_transformed_template(a.shape[1]/2, a.shape[0]/2,
                                                        b.shape[1]/2, b.shape[0]/2,
                                                        a, b, transform, upsampling=16,
                                                        func=func)
        else:
            nx, ny, strength, _ = sp.subpixel_transformed_template(a.shape[1]/2, a.shape[0]/2,
                                                        b.shape[1]/2, b.shape[0]/2,
                                                        a, b, transform, upsampling=16,
                                                        func=func)
            assert nx == 50.5

@pytest.mark.parametrize("convergence_threshold, expected", [(2.0, (50.49, 52.44, -9.5e-20))])
@pytest.mark.parametrize("convergence_threshold, expected", [(2.0, (50.51, 48.57, -9.5e-20))])
def test_iterative_phase(apollo_subsets, convergence_threshold, expected):
    reference_image = apollo_subsets[0]
    walking_image = apollo_subsets[1]
    image_size = (51, 51)
    image_size = (31, 31)

    ref_x, ref_y = reference_image.shape[0]/2, reference_image.shape[1]/2
    walk_x, walk_y = walking_image.shape[0]/2, walking_image.shape[1]/2
@@ -197,13 +132,13 @@ def test_check_image_size(data, expected):
    assert sp.check_image_size(data) == expected

@pytest.mark.parametrize("x, y, x1, y1, image_size, template_size, expected",[
    (4, 3, 4, 2, (5,5), (3,3), (4,2)),
    (4, 3, 4, 2, (7,7), (3,3), (4,2)),  # Increase the search image size
    (4, 3, 4, 2, (7,7), (5,5), (4,2)), # Increase the template size
    (4, 3, 3, 2, (7,7), (3,3), (4,2)), # Move point in the x-axis
    (4, 3, 5, 3, (7,7), (3,3), (4,2)), # Move point in the other x-direction
    (4, 3, 4, 1, (7,7), (3,3), (4,2)), # Move point negative in the y-axis
    (4, 3, 4, 3, (7,7), (3,3), (4,2))  # Move point positive in the y-axis
    (4, 3, 4, 3, (3,3), (2,2), (4,3)),
    (4, 3, 4, 3, (3,3), (2,2), (4,3)),  # Increase the search image size
    (4, 3, 4, 3, (3,3), (2,2), (4,3)), # Increase the template size
    (4, 3, 3, 3, (3,3), (2,2), (4,3)), # Move point in the x-axis
    (4, 3, 5, 3, (3,3), (2,2), (4,3)), # Move point in the other x-direction
    (4, 3, 4, 2, (3,3), (2,2), (4,3)), # Move point negative in the y-axis
    (4, 3, 4, 4, (3,3), (2,2), (4,3))  # Move point positive in the y-axis

])
def test_subpixel_template_cooked(x, y, x1, y1, image_size, template_size, expected):
@@ -223,19 +158,21 @@ def test_subpixel_template_cooked(x, y, x1, y1, image_size, template_size, expec

    # Should yield (-3, 3) offset from image center
    t_shape = np.array(((0, 0, 0, 0, 0, 0, 0, 0, 0),
                        (0, 0, 0, 0, 0, 0, 0, 0, 0),
                        (0, 0, 0, 1, 1, 1, 0, 0, 0),
                        (0, 0, 0, 0, 1, 0, 0, 0, 0),
                        (0, 0, 0, 0, 1, 0, 0, 0, 0),
                        (0, 0, 0, 0, 0, 0, 0, 0, 0),
                        (0, 0, 0, 0, 0, 0, 0, 0, 0)), dtype=np.uint8)

    dx, dy, corr, corrmap = sp.subpixel_template(x, y, x1, y1, 
                                                 test_image, t_shape,
                                                 image_size=image_size, 
                                                 template_size=template_size, 
                                                 upsampling=1)
    assert corr >= 1.0  # geq because sometime returning weird float > 1 from OpenCV
    assert dx == expected[0]
    assert dy == expected[1]
    ref_roi = roi.Roi(test_image, x, y, *image_size)
    moving_roi = roi.Roi(t_shape, x1, y1, *template_size)
    new_affine, corr, corrmap = sp.subpixel_template(ref_roi, moving_roi, upsampling=1)
    nx, ny = new_affine([x1,y1])[0]
    # should be 1.0, but in one test the windos has extra 1's so correlation goes down
    assert corr >= .8  # geq because sometime returning weird float > 1 from OpenCV
    assert nx == expected[0]
    assert ny == expected[1]

@pytest.mark.parametrize("x, y, x1, y1, image_size, expected",[
    (4, 3, 3, 2, (1,1), (3,2)),
@@ -243,7 +180,6 @@ def test_subpixel_template_cooked(x, y, x1, y1, image_size, template_size, expec
    (4, 3, 3, 2, (2,2), (3,2)), # Increase the template size
    (4, 3, 2, 2, (2,2), (3,2)), # Move point in the x-axis
    (4, 3, 4, 3, (2,2), (3,2)), # Move point in the other x-direction
    (4, 3, 3, 1, (2,2), (3,2)), # Move point negative in the y-axis; also tests size reduction
    (4, 3, 3, 3, (2,2), (3,2))  # Move point positive in the y-axis

])
@@ -262,7 +198,6 @@ def test_subpixel_phase_cooked(x, y, x1, y1, image_size, expected):
                           (0, 1, 1, 1, 0, 0, 1, 0, 1),
                           (0, 0, 0, 0, 0, 0, 1, 1, 1)), dtype=np.uint8)

    # Should yield (-3, 3) offset from image center
    t_shape = np.array(((0, 0, 0, 0, 0, 0, 0),
                        (0, 0, 1, 1, 1, 0, 0),
                        (0, 0, 0, 1, 0, 0, 0),
@@ -274,7 +209,6 @@ def test_subpixel_phase_cooked(x, y, x1, y1, image_size, expected):
    walking_roi = roi.Roi(t_shape, x1, y1, size_x=image_size[0], size_y=image_size[1])

    affine, metrics, _ = sp.subpixel_phase(reference_roi, walking_roi)

    dx, dy = affine.inverse((x1, y1))[0]
    dx, dy = affine((x1, y1))[0]
    assert dx == expected[0]
    assert dy == expected[1]
+61 −1
Original line number Diff line number Diff line
@@ -4,6 +4,7 @@ import numpy as np
from plio.io.io_gdal import GeoDataset
from skimage import transform as tf

from autocnet.transformation.roi import Roi
from autocnet.spatial import isis

log = logging.getLogger(__name__)
@@ -88,3 +89,62 @@ def estimate_affine_from_sensors(reference_image,
    t2 = time.time()
    log.debug(f'Estimation of the transformation took {t2-t1} seconds.')
    return affine


def estimate_local_affine(reference_image, moving_image, center_x, center_y, size_x, size_y):
    """
    estimate_local_affine

    similar to estimate_affine_from_sensors, but for regions of interest (ROI).

    Parameters
    ----------
    reference_image : plio.io.io_gdal.GeoDataset
                      Image that is expected to be used as the reference during the matching process, 
                      points are laid onto here and projected onto moving image to compute an affine
    moving_image : plio.io.io_gdal.GeoDataset
                   Image that is expected to move around during the matching process, 
                   points are projected onto this image to compute an affine  
    center_sample : number
                    center x (aka center sample) of the ROI in reference image pixel space
    center_line : number
                  center y (aka center line) of the ROI in reference image pixel space
    size_x : number
             distance from the center to the end of the ROI window in the x (aka sample) direction. 
             This is in reference image pixel space, resulting roi shape is (sizey*2, sizex*2)
    size_y : number
             distance from the center to the end of the ROI window in the y (aka line) direction, 
             This is in reference image pixel space, resulting roi shape is (sizey*2, sizex*2)

    Returns
    -------
    affine
        Affine matrix to transform the moving image onto the center image
    """
    # get initial affine
    affine_transform = estimate_affine_from_sensors(reference_image, moving_image, center_x, center_y)

    ref_center = (center_x, center_y)

    # MOVING NO AFFINE; Get the full moving image area so that an applied affine transformation that 
    # adds no data around 1+ edge does not fool the to be applied matcher.
    # The affine transformed center is a better match than the a priori sensor coords at this point.
    affine_center = affine_transform(ref_center)[0]
    moving_roi = Roi(moving_image, *affine_center, size_x=size_x, size_y=size_y)

    # The above coordinate transformation to get the center of the ROI handles translation. 
    # So, we only need to rotate/shear/scale the ROI. Omitting scale, which should be 1 (?) results
    # in an affine transoformation that does not match the full image affine
    tf_rotate = tf.AffineTransform(rotation=affine_transform.rotation, 
                                          shear=affine_transform.shear,
                                          scale=affine_transform.scale)

    # This rotates about the center of the image
    shift_x, shift_y = moving_roi.center
    tf_shift = tf.SimilarityTransform(translation=[-shift_x, -shift_y])
    tf_shift_inv = tf.SimilarityTransform(translation=[shift_x, shift_y])
    
    # Define the full chain
    trans = (tf_shift + (tf_rotate + tf_shift_inv))

    return trans
 No newline at end of file
Loading