Commit a5bd3e17 authored by Kelvin Rodriguez's avatar Kelvin Rodriguez
Browse files

removed some functions and added others

parent 8fcbafa2
Loading
Loading
Loading
Loading
+3 −6
Original line number Diff line number Diff line
@@ -457,12 +457,9 @@ class Edge(dict, MutableMapping):
        d_img = self.destination.geodata

        # Determine which algorithm is going ot be used.
        if method == 'phase':
        func = sp.iterative_phase
        nstrengths = 2
        elif method == 'template':
            func = sp.subpixel_template
            nstrengths = 1
        
        shifts_x, shifts_y, strengths, new_x, new_y = sp._prep_subpixel(len(matches), nstrengths)

        # for each edge, calculate this for each keypoint pair
+18 −321
Original line number Diff line number Diff line
@@ -61,6 +61,7 @@ def check_geom_func(func):

    raise Exception(f"{func} not a valid geometry function.")


def check_match_func(func):
    match_funcs = {
        "classic": subpixel_template_classic,
@@ -77,6 +78,7 @@ def check_match_func(func):

    raise Exception(f"{func} not a valid matching function.")


# TODO: look into KeyPoint.size and perhaps use to determine an appropriately-sized search/template.
def _prep_subpixel(nmatches, nstrengths=2):
    """
@@ -118,6 +120,7 @@ def _prep_subpixel(nmatches, nstrengths=2):

    return shifts_x, shifts_y, strengths, new_x, new_y


def check_image_size(imagesize):
    """
    Given an x,y tuple, ensure that the values
@@ -145,6 +148,7 @@ def check_image_size(imagesize):
    y = floor(y/2)
    return x,y


def clip_roi(img, center_x, center_y, size_x=200, size_y=200, dtype="uint64"):
    """
    Given an input image, clip a square region of interest
@@ -197,6 +201,7 @@ def clip_roi(img, center_x, center_y, size_x=200, size_y=200, dtype="uint64"):
            return None, 0, 0
    return subarray, axr, ayr


def subpixel_phase(reference_roi, walking_roi, affine=None, **kwargs):
    """
    Apply the spectral domain matcher to a search and template image. To
@@ -260,180 +265,8 @@ def subpixel_phase(reference_roi, walking_roi, affine=None, **kwargs):
    new_affine = tf.AffineTransform(translation=(shift_x, shift_y))
    return new_affine, error, diffphase

def subpixel_transformed_template(sx, sy, dx, dy,
                                  s_img, d_img,
                                  transform,
                                  image_size=(251, 251),
                                  template_size=(51, 51),
                                  template_buffer=5,
                                  func=pattern_match,
                                  verbose=False,
                                  **kwargs):
    """
    Uses a pattern-matcher on subsets of two images determined from the passed-in keypoints and optional sizes to
    compute an x and y offset from the search keypoint to the template keypoint and an associated strength.

    Parameters
    ----------
    sx : Numeric
         Source X coordinate

    sy : Numeric
         Source y coordinate

    dx : Numeric
         The desintation x coordinate

    dy : Numeric
         The destination y coordinate

    s_img : GeoDataset
            The source image GeoDataset

    d_img : GeoDataset
            The destination image GeoDataset

    transform : object
                A skiage transform object that has scale. The transform object is
                used to project the template into the image.

    image_size : tuple
                 (xsize, ysize) of the image that is searched within (this should be larger
                 than the template size)

    template_size : tuple
                    (xsize, ysize) of the template to iterate over the image in order
                    to identify the area(s) of highest correlation.

    template_buffer : int
                      The inverse buffer applied to the transformed template image. When
                      the warp is applied to project from the template into the image, some
                      amount of no data exists around the edges. This variable is used to clip
                      some number of pixels off the edges of the template. The higher the rotation
                      the higher this value should be.

    func : callable
           The function used to pattern match

    verbose : bool
              If true, generate plots of the matches

    Returns
    -------
    x_shift : float
               Shift in the x-dimension

    y_shift : float
               Shift in the y-dimension

    strength : float
               Strength of the correspondence in the range [-1, 1]

    corrmap : ndarray
              An n,m array of correlation coefficients

    See Also
    --------
    autocnet.matcher.naive_template.pattern_match : for the kwargs that can be passed to the matcher
    autocnet.matcher.naive_template.pattern_match_autoreg : for the jwargs that can be passed to the autoreg style matcher
    """
    image_size = check_image_size(image_size)
    template_size = check_image_size(template_size)

    template_size_x = int(template_size[0] * transform.scale[0])
    template_size_y = int(template_size[1] * transform.scale[1])

    s_roi = roi.Roi(s_img, sx, sy, size_x=image_size[0], size_y=image_size[1])
    d_roi = roi.Roi(d_img, dx, dy, size_x=template_size_x, size_y=template_size_y)

    if not s_roi.is_valid or not d_roi.is_valid:
        return [None] * 4

    try:
        s_image_dtype = isis.isis2np_types[pvl.load(s_img.file_name)["IsisCube"]["Core"]["Pixels"]["Type"]]
    except:
        s_image_dtype = None

    try:
        d_template_dtype = isis.isis2np_types[pvl.load(d_img.file_name)["IsisCube"]["Core"]["Pixels"]["Type"]]
    except:
        d_template_dtype = None

    s_image = bytescale(s_roi.array)
    d_template = bytescale(d_roi.array)

    if verbose:
        fig, axs = plt.subplots(1, 5, figsize=(20,10))
        # Plot of the original image and template
        axs[0].imshow(s_image, cmap='Greys')
        axs[0].set_title('Destination')
        axs[1].imshow(d_template, cmap='Greys')
        axs[1].set_title('Original Source')

    # Build the transformation chance
    shift_x, shift_y = d_roi.center

    tf_rotate = tf.AffineTransform(rotation=transform.rotation, shear=transform.shear)
    tf_shift = tf.SimilarityTransform(translation=[-shift_x, -shift_y])
    tf_shift_inv = tf.SimilarityTransform(translation=[shift_x, shift_y])

    # Define the full chain and the inverse
    trans = (tf_shift + (tf_rotate + tf_shift_inv))
    itrans = trans.inverse

    # Now apply the affine transformation
    transformed_roi = tf.warp(d_template,
                                itrans,
                                order=3)

    # Scale the source arr to the destination array size
    scale_y, scale_x = transform.scale
    template_shape_y, template_shape_x = d_template.shape
    scaled_roi = tf.resize(transformed_roi, (int(template_shape_x/scale_x), int(template_shape_y/scale_x)))

    # Clip the transformed template to avoid no data around around the edges
    buffered_template = scaled_roi[template_buffer:-template_buffer,template_buffer:-template_buffer]

    # Apply the matcher on the transformed array
    shift_x, shift_y, metrics, corrmap = func(bytescale(buffered_template), s_image, **kwargs)

    # Hard check here to see if we are on the absolute edge of the template
    max_coord = np.unravel_index(corrmap.argmax(), corrmap.shape)[::-1]
    if 0 in max_coord or corrmap.shape[0]-1 == max_coord[0] or corrmap.shape[1]-1 == max_coord[1]:
        warnings.warn('Maximum correlation is at the edge of the template. Results are ambiguous.', UserWarning)
        return [None] * 4

    if verbose:
        axs[2].imshow(transformed_roi, cmap='Greys')
        axs[2].set_title('Affine Transformed Source')
        axs[3].imshow(buffered_template, cmap='Greys')
        axs[3].set_title('Scaled and Buffered Source')
        axs[4].imshow(corrmap)
        axs[4].set_title('Correlation')
        plt.show()

    # Project the center into the affine space
    projected_center = itrans(d_roi.center)[0]

    # Shifts need to be scaled back into full resolution, affine space
    shift_x *= scale_x
    shift_y *= scale_y

    # Apply the shifts (computed using the warped image) to the affine space center
    new_projected_x = projected_center[0] - shift_x
    new_projected_y = projected_center[1] - shift_y

    # Project the updated location back into image space
    new_unprojected_x, new_unprojected_y = trans([new_projected_x, new_projected_y])[0]

    # Apply the shift
    dx = d_roi.x - (d_roi.center[0] - new_unprojected_x)
    dy = d_roi.y - (d_roi.center[1] - new_unprojected_y)

    return dx, dy, metrics, corrmap


def subpixel_template_classic(reference_roi, moving_roi, affine=tf.AffineTransform(),
def subpixel_template(reference_roi, moving_roi, affine=tf.AffineTransform(), mode='reflect',
                              func=pattern_match,
                              **kwargs):
    """
@@ -450,6 +283,9 @@ def subpixel_template_classic(reference_roi, moving_roi, affine=tf.AffineTransfo
    affine : skimage.transform.AffineTransform
             scikit-image Affine transformation, used as a seed transform that 

    mode : str
           Mode passed into warp used to determine how to pad null pixels from affine transform. See skimage.transform.warp 

    func : callable 
           Some subpixel template matching function 
    
@@ -478,145 +314,27 @@ def subpixel_template_classic(reference_roi, moving_roi, affine=tf.AffineTransfo
    ref_clip = reference_roi.clip()
    moving_clip = moving_roi.clip()
    
    moving_clip = tf.warp(moving_clip, affine, order=3)
    
    print(moving_clip.var())
    moving_clip = tf.warp(moving_clip, affine, order=3, mode=mode)
    
    if moving_clip.var() == 0:
        warnings.warn('Input ROI has no variance.')
        return [None] * 4
        return [None] * 3

    if (ref_clip is None) or (moving_clip is None):
        return None, None, None, None
        return None, None, None

    shift_x, shift_y, metrics, corrmap = func(img_as_float32(ref_clip), img_as_float32(moving_clip), **kwargs)
    shift_x, shift_y, metrics, corrmap = func(img_as_float32(moving_clip), img_as_float32(ref_clip), **kwargs)

    if shift_x is None:
        return None, None, None, None
        return None, None, None
    
    print(shift_x, shift_y)
    # get shifts in input pixel space
    shift_x, shift_y = affine.inverse([shift_x, shift_y])[0]   
    new_affine = tf.AffineTransform(translation=(shift_x, shift_y))
    
    return new_affine,  metrics, corrmap


def subpixel_template(sx, sy, dx, dy,
                      s_img, d_img,
                      image_size=(251, 251),
                      template_size=(51,51),
                      func=pattern_match,
                      verbose=False,
                      **kwargs):
    """
    Uses a pattern-matcher on subsets of two images determined from the passed-in keypoints and optional sizes to
    compute an x and y offset from the search keypoint to the template keypoint and an associated strength.

    Parameters
    ----------
    sx : Numeric
         Source X coordinate

    sy : Numeric
         Source y coordinate

    dx : Numeric
         The desintation x coordinate

    dy : Numeric
         The destination y coordinate

    s_img : GeoDataset
            The source image GeoDataset

    d_img : GeoDataset
            The destination image GeoDataset

    image_size : tuple
                 (xsize, ysize) of the image that is searched within (this should be larger
                 than the template size)

    template_size : tuple
                    (xsize, ysize) of the template to iterate over the image in order
                    to identify the area(s) of highest correlation.

    func : callable
           The function used to pattern match

    verbose : bool
              If true, generate plots of the matches

    Returns
    -------
    x_shift : float
               Shift in the x-dimension

    y_shift : float
               Shift in the y-dimension

    strength : float
               Strength of the correspondence in the range [-1, 1]

    corrmap : ndarray
              An n,m array of correlation coefficients

    See Also
    --------
    autocnet.matcher.naive_template.pattern_match : for the kwargs that can be passed to the matcher
    autocnet.matcher.naive_template.pattern_match_autoreg : for the jwargs that can be passed to the autoreg style matcher
    """
    image_size = check_image_size(image_size)
    template_size = check_image_size(template_size)

    template_size_x = template_size[0]
    template_size_y = template_size[1]

    s_roi = roi.Roi(s_img, sx, sy, size_x=image_size[0], size_y=image_size[1])
    d_roi = roi.Roi(d_img, dx, dy, size_x=template_size_x, size_y=template_size_y)

    if d_roi.variance == 0:
        return [None] * 4

    if not s_roi.is_valid or not d_roi.is_valid:
        return [None] * 4

    try:
        s_image_dtype = isis.isis2np_types[pvl.load(s_img.file_name)["IsisCube"]["Core"]["Pixels"]["Type"]]
    except:
        s_image_dtype = None

    try:
        d_template_dtype = isis.isis2np_types[pvl.load(d_img.file_name)["IsisCube"]["Core"]["Pixels"]["Type"]]
    except:
        d_template_dtype = None

    s_image = s_roi.array
    d_template = d_roi.array

    if (s_image is None) or (d_template is None):
        return None, None, None, None

    # Apply the matcher function
    shift_x, shift_y, metrics, corrmap = func(d_template, s_image, **kwargs)

    if verbose:
        fig, axs = plt.subplots(1, 3, figsize=(20,10))
        axs[0].imshow(s_image, cmap='Greys')
        axs[1].imshow(d_template, cmap='Greys')
        axs[3].imshow(corrmap)
        plt.show()

    # Hard check here to see if we are on the absolute edge of the template
    max_coord = np.unravel_index(corrmap.argmax(), corrmap.shape)[::-1]
    if 0 in max_coord or corrmap.shape[0]-1 == max_coord[0] or corrmap.shape[1]-1 == max_coord[1]:
        warnings.warn('Maximum correlation is at the edge of the template. Results are ambiguous.', UserWarning)
        return [None] * 4

    # Apply the shift to the center of the ROI object
    dx = d_roi.x - shift_x
    dy = d_roi.y - shift_y

    return dx, dy, metrics, corrmap

def subpixel_ciratefi(sx, sy, dx, dy, s_img, d_img, search_size=251, template_size=51, **kwargs):
    """
    Uses a pattern-matcher on subsets of two images determined from the passed-in keypoints and optional sizes to
@@ -668,6 +386,7 @@ def subpixel_ciratefi(sx, sy, dx, dy, s_img, d_img, search_size=251, template_si
    dy += (y_offset + t_roi.ayr)
    return dx, dy, strength


def iterative_phase(reference_roi, walking_roi, affine=None, reduction=11, convergence_threshold=0.1, max_dist=50, **kwargs):
    """
    Iteratively apply a subpixel phase matcher to source (s_img) and destination (d_img)
@@ -737,29 +456,7 @@ def iterative_phase(reference_roi, walking_roi, affine=None, reduction=11, conve
    walking_roi.x, walking_roi.y = dsample, dline
    return subpixel_affine, error, diffphase

'''def estimate_affine_transformation(destination_coordinates, source_coordinates):
    """
    Given a set of destination control points compute the affine transformation
    required to project the source control points into the destination.

    Parameters
    ----------
    destination_coordinates : array-like
                              An n,2 data structure containing the destination control points

    source_coordinates : array-like
                         An n,2 data structure containing the source control points

    Returns
    -------
     : object
       An skimage affine transform object
    """
    destination_coordinates = np.asarray(destination_coordinates)
    source_coordinates = np.asarray(source_coordinates)

    return tf.estimate_transform('affine', destination_coordinates, source_coordinates)
'''
def geom_match_simple(reference_image,
                       moving_image,
                       bcenter_x,
+61 −1
Original line number Diff line number Diff line
@@ -4,6 +4,7 @@ import numpy as np
from plio.io.io_gdal import GeoDataset
from skimage import transform as tf

from autocnet.transformation.roi import Roi
from autocnet.spatial import isis

log = logging.getLogger(__name__)
@@ -87,3 +88,62 @@ def estimate_affine_from_sensors(reference_image,
    t2 = time.time()
    log.debug(f'Estimation of the transformation took {t2-t1} seconds.')
    return affine


def estimate_local_affine(reference_image, moving_image, center_x, center_y, size_x, size_y):
    """
    estimate_local_affine

    similar to estimate_affine_from_sensors, but for regions of interest (ROI).

    Parameters
    ----------
    reference_image : plio.io.io_gdal.GeoDataset
                      Image that is expected to be used as the reference during the matching process, 
                      points are laid onto here and projected onto moving image to compute an affine
    moving_image : plio.io.io_gdal.GeoDataset
                   Image that is expected to move around during the matching process, 
                   points are projected onto this image to compute an affine  
    center_sample : number
                    center x (aka center sample) of the ROI in reference image pixel space
    center_line : number
                  center y (aka center line) of the ROI in reference image pixel space
    size_x : number
             distance from the center to the end of the ROI window in the x (aka sample) direction. 
             This is in reference image pixel space, resulting roi shape is (sizey*2, sizex*2)
    size_y : number
             distance from the center to the end of the ROI window in the y (aka line) direction, 
             This is in reference image pixel space, resulting roi shape is (sizey*2, sizex*2)

    Returns
    -------
    affine
        Affine matrix to transform the moving image onto the center image
    """
    # get initial affine
    affine_transform = estimate_affine_from_sensors(reference_image, moving_image, center_x, center_y)

    ref_center = (center_x, center_y)

    # MOVING NO AFFINE; Get the full moving image area so that an applied affine transformation that 
    # adds no data around 1+ edge does not fool the to be applied matcher.
    # The affine transformed center is a better match than the a priori sensor coords at this point.
    affine_center = affine_transform(ref_center)[0]
    moving_roi = Roi(moving_image, *affine_center, size_x=size_x, size_y=size_y)

    # The above coordinate transformation to get the center of the ROI handles translation. 
    # So, we only need to rotate/shear/scale the ROI. Omitting scale, which should be 1 (?) results
    # in an affine transoformation that does not match the full image affine
    tf_rotate = tf.AffineTransform(rotation=affine_transform.rotation, 
                                          shear=affine_transform.shear,
                                          scale=affine_transform.scale)

    # This rotates about the center of the image
    shift_x, shift_y = moving_roi.center
    tf_shift = tf.SimilarityTransform(translation=[-shift_x, -shift_y])
    tf_shift_inv = tf.SimilarityTransform(translation=[shift_x, shift_y])
    
    # Define the full chain
    trans = (tf_shift + (tf_rotate + tf_shift_inv))

    return trans
 No newline at end of file