Commit e1b653a1 authored by Jay Laura's avatar Jay Laura
Browse files

Updates for trying to get a local transformation going

parent a8675137
Loading
Loading
Loading
Loading
+20 −12
Original line number Diff line number Diff line
@@ -126,25 +126,33 @@ def pattern_match(template, image, upsampling=8, metric=cv2.TM_CCOEFF_NORMED, er
        u_template = template
        u_image = image

    h, w = u_template.shape[:2]

    print(u_image)
    print(u_template)

    result = cv2.matchTemplate(u_image, u_template, method=metric)
    print(result)

    _, max_corr, min_loc, max_loc = cv2.minMaxLoc(result)
    
    if metric == cv2.TM_SQDIFF or metric == cv2.TM_SQDIFF_NORMED:
        x = (min_loc[0] + w//2) / upsampling
        y = (min_loc[1] + h//2) / upsampling
        x = min_loc[0]
        y = min_loc[1]
    else:
        x = ((max_loc[0] + w//2)) / upsampling
        y = ((max_loc[1] + h//2)) / upsampling
        x = max_loc[0]
        y = max_loc[1]

    print('things', x, y, max_loc, h, w)
    return x, y, max_corr, result
    # Transform from the results array shape to the template shape
    x = x - (result.shape[1] - u_template.shape[1]) // 2
    y = y - (result.shape[0] - u_template.shape[0]) // 2

    # Recenter the origin from the upper left to the center of the template
    ideal_x = u_template.shape[1] // 2
    ideal_y = u_template.shape[0] // 2

    x -= ideal_x
    y -= ideal_y

    y /= upsampling
    x /= upsampling


    return -x, -y, max_corr, result

    # -1 because the returned results array is W-w+1 and H-h+1 in shape, 
    # where W, H are the width and height of the image and w,h are the 
+25 −7
Original line number Diff line number Diff line
@@ -263,7 +263,6 @@ def subpixel_template(reference_roi,

    ref_clip = reference_roi.clip()
    moving_clip = moving_roi.clip(affine)
    print(affine)
    if moving_clip.var() == 0:
        warnings.warn('Input ROI has no variance.')
        return [None] * 3
@@ -273,10 +272,29 @@ def subpixel_template(reference_roi,
    shift_x, shift_y, metrics, corrmap = func(moving_clip, ref_clip, **kwargs)
    if shift_x is None:
        return None, None, None
    print(shift_x, shift_y)
    # get shifts in input pixel space
    shift_x, shift_y = affine.inverse([shift_x, shift_y])[0]
    new_affine = tf.AffineTransform(translation=(-shift_x, -shift_y))

    # Shift x and shift y are computed in the affine space.
    moving_clip_center = (np.array(moving_clip.shape[:2][::-1])-1)/2.
    print(moving_clip_center)

    new_x = moving_clip_center[0] + shift_x
    new_y = moving_clip_center[1] + shift_y
    print('Pre-transform', shift_x, shift_y, moving_clip.shape, moving_clip_center, new_x, new_y, affine)

    # convert shifts in input pixel space
    image_space_new_x, image_space_new_y = affine([new_x, new_y])[0]
    print('Image Space NEW', image_space_new_x, image_space_new_y)

    x_shift_in_image = image_space_new_x - moving_roi.center[0]
    y_shift_in_image = image_space_new_y - moving_roi.center[1]

    new_affine = tf.AffineTransform(affine.params)
    new_affine.translation = (x_shift_in_image,y_shift_in_image)

    """new_affine = tf.AffineTransform(translation=(x_shift_in_image,
                                                 y_shift_in_image),
                                    rotation=tf.AffineTransform(affine).rotation,
                                    shear=tf.AffineTransform(affine).shear)"""

    return new_affine,  metrics, corrmap

@@ -1815,7 +1833,7 @@ def subpixel_register_point_smart(pointid,
                     'status': False}
            else:
                metric = maxcorr
                new_x, new_y = updated_affine([measure.sample, measure.line])[0]
                new_x, new_y = updated_affine.inverse([measure.sample, measure.line])[0]
                dist = np.linalg.norm([measure.line-new_x, 
                                      measure.sample-new_y])
                cost = cost_func(dist, metric)
+6 −4
Original line number Diff line number Diff line
@@ -141,10 +141,12 @@ def estimate_local_affine(reference_image, moving_image, center_x, center_y, siz

    # This rotates about the center of the image
    shift_x, shift_y = moving_roi.center
    tf_shift = tf.SimilarityTransform(translation=[-shift_x, -shift_y])
    tf_shift_inv = tf.SimilarityTransform(translation=[shift_x, shift_y])
    tf_shift = tf.SimilarityTransform(translation=[shift_x, shift_y])
    tf_shift_inv = tf.SimilarityTransform(translation=[-shift_x, -shift_y])
    
    # Define the full chain
    trans = (tf_shift + (tf_rotate + tf_shift_inv))
    # Define the full chain multiplying the transformations (read right to left),
    # this is 'shift to the center', apply the rotation, shift back
    # to the origin.
    trans = tf_shift_inv + tf_rotate + tf_shift

    return trans
 No newline at end of file
+3 −5
Original line number Diff line number Diff line
@@ -135,7 +135,7 @@ class Roi():
    @property
    def center(self):
        ie = self.image_extent
        return (ie[1] - ie[0])/2., (ie[3]-ie[2])/2.
        return ((ie[1] - ie[0])-1)/2., ((ie[3]-ie[2])-1)/2.

    @property
    def is_valid(self):
@@ -165,7 +165,7 @@ class Roi():
            data = self.data.read_array(pixels=pixels)
        return img_as_float32(data)

    def clip(self, affine=None, dtype=None, mode="reflect"):
    def clip(self, affine=None, dtype=None, mode="constant"):
        """
        Compatibility function that makes a call to the array property.
        Warning: The dtype passed in via this function resets the dtype attribute of this
@@ -191,9 +191,7 @@ class Roi():
            #                      f"{((self.size_y * 2) + 1, (self.size_x * 2) + 1)} was asked for. Select, " +
            #                      "a smaller region of interest" )


            #Affine transform the larger, moving array
            transformed_array = tf.warp(array_to_warp, affine, order=3, mode=mode)
            transformed_array = tf.warp(array_to_warp, affine.inverse, order=3, mode=mode, cval=0)
            return transformed_array

        return img_as_float32(self.array)