Commit 15515191 authored by Jay's avatar Jay
Browse files

Working affine transformation

parent d03aa207
Loading
Loading
Loading
Loading
+65 −54
Original line number Diff line number Diff line
@@ -18,7 +18,6 @@ import cv2
from skimage import transform as tf
from skimage import registration
from skimage import filters
from skimage.util import img_as_float32
from scipy import fftpack
from sqlalchemy.sql.expression import bindparam

@@ -31,10 +30,9 @@ import pvl
import PIL
from PIL import Image

from autocnet.matcher.naive_template import pattern_match, pattern_match_autoreg
from autocnet.matcher.naive_template import pattern_match
from autocnet.matcher.mutual_information import mutual_information_match
from autocnet.matcher import ciratefi
from autocnet.matcher.mutual_information import mutual_information
from autocnet.spatial import isis
from autocnet.io.db.model import Measures, Points, Images, JsonEncoder
from autocnet.graph.node import NetworkNode
@@ -42,6 +40,7 @@ from autocnet.transformation import roi
from autocnet.transformation.affine import estimate_local_affine
from autocnet import spatial
from autocnet.utils.utils import bytescale
from autocnet.utils.serializers import JsonEncoder

from sqlalchemy import inspect

@@ -263,9 +262,22 @@ def subpixel_template(reference_roi,
    autocnet.matcher.naive_template.pattern_match_autoreg : for the jwargs that can be passed to the autoreg style matcher
    """

    try:
        s_image_dtype = isis.isis2np_types[pvl.load(reference_roi.data.file_name)["IsisCube"]["Core"]["Pixels"]["Type"]]
    except:
        s_image_dtype = None

    try:
        d_template_dtype = isis.isis2np_types[pvl.load(moving_roi.data.file_name)["IsisCube"]["Core"]["Pixels"]["Type"]]
    except:
        d_template_dtype = None
    
    
    # In ISIS, the reference image is the search and moving image is the pattern.
    ref_clip = reference_roi.clip()
    moving_clip = moving_roi.clip(affine)
    reference_roi.clip(dtype = s_image_dtype)
    ref_clip = reference_roi.clipped_array
    moving_roi.clip(affine=affine, dtype=d_template_dtype)
    moving_clip = moving_roi.clipped_array
    
    if moving_clip.var() == 0:
        warnings.warn('Input ROI has no variance.')
@@ -274,25 +286,19 @@ def subpixel_template(reference_roi,
    if (ref_clip is None) or (moving_clip is None):
        return None, None, None

    # Just takes 2d arrays, no idea about affines
    matcher_shift_x, matcher_shift_y, metrics, corrmap = func(moving_clip, ref_clip, **kwargs)
    matcher_shift_x, matcher_shift_y, metrics, corrmap = func(moving_clip, ref_clip, upsampling=16, **kwargs)
    if matcher_shift_x is None:
        return None, None, None

    # Apply the shift to the center of the moving roi to the center of the reference ROI in index space. One pixel == one index (unitless).
    # All this does is adjust from the upper left of the maximum correlation to the origin of the 2d array.
    new_affine_transformed_center_x = moving_roi.clip_center - matcher_shift_x  #Center is indices.
    new_affine_transformed_center_y = moving_roi.clip_center- matcher_shift_y

    # Invert the affine transformation of the new center. This result is plotted in the second figure as a red dot.
    inverse_transformed_affine_center_x, inverse_transformed_affine_center_y = affine.inverse((new_affine_transformed_center_x, new_affine_transformed_center_y))[0]
    # shift_x, shift_y are in the affine transformed space. They are relative to center of the affine transformed ROI.
    # Apply the shift to the center of the moving roi to the center of the reference ROI in index space.
    new_center_x = moving_roi.clip_center[0] + matcher_shift_x
    new_center_y = moving_roi.clip_center[1] + matcher_shift_y
    
    # Take the original x,y (moving_roi.x, moving_roi.y) and subtract the delta between the original ROI center and the newly computed center.
    translation_x = - (moving_roi.clip_center - inverse_transformed_affine_center_x) 
    translation_y = - (moving_roi.clip_center - inverse_transformed_affine_center_y)
    new_x, new_y = moving_roi.clip_coordinate_to_image_coordinate(new_center_x, new_center_y)
    
    new_affine = tf.AffineTransform(translation=(translation_x,
                                                 translation_y))
    new_affine = tf.AffineTransform(translation=(-(moving_roi.x - new_x),
                                                 -(moving_roi.y - new_y)))
    
    return new_affine, metrics, corrmap
    
@@ -1078,13 +1084,11 @@ def subpixel_register_point(pointid,
    if isinstance(pointid, Points):
        pointid = pointid.id

    t1 = time.time()
    with ncg.session_scope() as session:
        measures = session.query(Measures).filter(Measures.pointid == pointid).order_by(Measures.id).all()
        point = session.query(Points).filter(Points.id == pointid).one()
        reference_index = point.reference_index
        t2 = time.time()
        print(f'Query took {t2-t1} seconds to find the measures and reference measure.')

        # Get the reference measure. Previously this was index 0, but now it is a database tracked attribute
        source = measures[reference_index]

@@ -1100,8 +1104,7 @@ def subpixel_register_point(pointid,
        sourceres = session.query(Images).filter(Images.id == sourceid).one()
        source_node = NetworkNode(node_id=sourceid, image_path=sourceres.path)
        source_node.parent = ncg
        t3 = time.time()
        print(f'Query for the image to use as source took {t3-t2} seconds.')

        print(f'Attempting to subpixel register {len(measures)-1} measures for point {pointid}')
        nodes = {}
        for measure in measures:
@@ -1195,21 +1198,15 @@ def subpixel_register_point(pointid,
    updated_measures.append(source)

    if use_cache:
        t4 = time.time()
        ncg.redis_queue.rpush(ncg.measure_update_queue,
                              *[json.dumps(measure.to_dict(_hide=[]), cls=JsonEncoder) for measure in updated_measures])
        ncg.redis_queue.incr(ncg.measure_update_counter, amount=len(updated_measures))
        t5 = time.time()
        print(f'Cache load took {t5-t4} seconds')
    else:
        t4 = time.time()
        # Commit the updates back into the DB
        with ncg.session_scope() as session:
            for m in updated_measures:
                ins = inspect(m)
                session.add(m)
        t5 = time.time()
        print(f'Database update took {t5-t4} seconds.')
    return resultlog

def subpixel_register_points(subpixel_template_kwargs={'image_size':(251,251)},
@@ -1628,6 +1625,7 @@ def subpixel_register_point_smart(pointid,

    Parameters
    ----------
    
    pointid : int or obj
              The identifier of the point in the DB or a Points object

@@ -1722,13 +1720,13 @@ def subpixel_register_point_smart(pointid,
                                source.aprioriline, 
                                size_x=size_x, 
                                size_y=size_y, 
                                buffer=10)
                                buffer=0)
        moving_roi = roi.Roi(destination_node.geodata, 
                             measure.apriorisample, 
                             measure.aprioriline, 
                             size_x=size_x, 
                             size_y=size_y, 
                             buffer=10)
                             buffer=20)

        try:
            baseline_affine = estimate_local_affine(reference_roi,
@@ -1743,8 +1741,12 @@ def subpixel_register_point_smart(pointid,
            updated_measures.append([None, None, m])
            continue

        reference_clip = reference_roi.clip()
        moving_clip = moving_roi.clip(baseline_affine)
        reference_roi.clip()
        reference_clip = reference_roi.clipped_array

        moving_roi.clip(affine=baseline_affine)
        moving_clip = moving_roi.clipped_array

        if np.isnan(reference_clip).any() or np.isnan(moving_clip).any():
            print('Unable to process due to NaN values in the input data.')
            m = {'id': measure.id,
@@ -1761,11 +1763,14 @@ def subpixel_register_point_smart(pointid,
            updated_measures.append([None, None, m])
            continue

        # Compute the a priori template and MI correlations with no shifts allowed. 
        _, baseline_corr, _ = subpixel_template(reference_roi, 
                                                moving_roi,
                                                affine=baseline_affine)
        
        baseline_mi = 0 #mutual_information_match(reference_roi, moving_roi, affine=affine)
        baseline_mi = 0 #mutual_information_match(reference_roi, 
                        #                         moving_roi, 
                        #                         affine=baseline_affine)
        
        print(f'Baseline MI: {baseline_mi} | Baseline Corr: {baseline_corr}')
        
@@ -1777,15 +1782,15 @@ def subpixel_register_point_smart(pointid,
                                    source.aprioriline,
                                    size_x=match_kwargs['image_size'][0],
                                    size_y=match_kwargs['image_size'][1], 
                                    buffer=10)
                                    buffer=0)
            moving_roi = roi.Roi(destination_node.geodata,
                                 measure.apriorisample,
                                 measure.aprioriline,
                                 size_x=match_kwargs['template_size'][0],
                                 size_y=match_kwargs['template_size'][1], 
                                 buffer=10)
                                 buffer=20)
            
            try:
            """try:
               affine = estimate_local_affine(reference_roi, moving_roi)
            except Exception as e:
                print(e)
@@ -1795,10 +1800,11 @@ def subpixel_register_point_smart(pointid,
                     'status':False,
                     'choosername':chooser}
                updated_measures.append([None, None, m])
                continue
                continue"""
            
            # Updated so that the affine used is computed a single time.
            updated_affine, maxcorr, temp_corrmap = subpixel_template(reference_roi,
            # Has not scale or shear or rotation.
            updated_affine, maxcorr, _ = subpixel_template(reference_roi,
                                                           moving_roi,
                                                           affine=baseline_affine)
            
@@ -2025,7 +2031,7 @@ def validate_candidate_measure(measure_to_register,
        sample = measure_to_register['sample']
        line = measure_to_register['line']

        print(f'Validating measure: {measure_to_register_id} on image: {source_image.name}')
        print(f'Validating measure: {measure_to_register_id} on image: {source_imageid}')

        dists = []
        for parameter in parameters:
@@ -2050,7 +2056,7 @@ def validate_candidate_measure(measure_to_register,
                print('Unable to transform image to reference space. Likely too close to the edge of the non-reference image. Setting ignore=True')
                return [np.inf] * len(parameters)

            updated_affine, maxcorr, temp_corrmap = subpixel_template(reference_roi,
            updated_affine, maxcorr, _ = subpixel_template(reference_roi,
                                                           moving_roi,
                                                           affine=affine)
            if updated_affine is None:
@@ -2100,8 +2106,9 @@ def smart_register_point(pointid, parameters=[], shared_kwargs={}, ncg=None, Ses

    Parameters
    ----------
    pointid : int
              The id of the point to register
    pointid : int or object
              The id of the point to register or a point object from which the
              id is accessed.

    parameters : list
                 A list of dict subpixel registration kwargs, {template_size: (x,x), image_size: (y,y)}
@@ -2125,17 +2132,18 @@ def smart_register_point(pointid, parameters=[], shared_kwargs={}, ncg=None, Ses
                            building approach

    """
    if isinstance(pointid, Points):
        pointid = pointid.id
    measure_results = subpixel_register_point_smart(pointid, ncg=ncg, parameters=parameters, **shared_kwargs)
    measures_to_update, measures_to_set_false = decider(measure_results)

    #print()
    #print(f'Found {len(measures_to_update)} measures that found subpixel registration consensus. Running validation now...')
    print(f'Found {len(measures_to_update)} measures that found subpixel registration consensus. Running validation now...')
    # Validate that the new position has consensus
    for measure in measures_to_update:
        #print()
        reprojection_distances = validate_candidate_measure(measure, parameters=parameters, ncg=ncg, **shared_kwargs)
        if np.sum(np.array(reprojection_distances) < 1) < 2:
        #if reprojection_distance > 1:
            print(f"Measure {measure['id']} failed validation. Setting ignore=True for this measure.")
            measures_to_set_false.append(measure['id'])

@@ -2167,4 +2175,7 @@ def smart_register_point(pointid, parameters=[], shared_kwargs={}, ncg=None, Ses
            resp = conn.execute(
                stmt, measures_to_set_false
            )
    print(f'Updated measures: {json.dumps(measures_to_update, indent=2, cls=JsonEncoder)}')
    print(f'Ignoring measures: {measures_to_set_false}')

    return measures_to_update, measures_to_set_false
+8 −17
Original line number Diff line number Diff line
@@ -79,15 +79,12 @@ def estimate_affine_from_sensors(reference_image,
            dst_gcps.append((xs[i], ys[i]))
            base_gcps.append((base_x, base_y))
            
    log.debug(f'base_gcps: {base_gcps}')
    log.debug(f'dst_gcps: {dst_gcps}')

    if len(dst_gcps) < 3:
        raise ValueError(f'Unable to find enough points to compute an affine transformation. Found {len(dst_corners)} points, but need at least 3.')

    affine = tf.estimate_transform('affine', np.array([*base_gcps]), np.array([*dst_gcps]))
    t2 = time.time()
    log.debug(f'Estimation of the transformation took {t2-t1} seconds.')
    log.debug(f'Estimation of local affine took {t2-t1} seconds.')
    return affine


@@ -112,16 +109,11 @@ def estimate_local_affine(reference_roi, moving_roi):
    """
    # get initial affine
    roi_buffer = reference_roi.buffer
    size_x = reference_roi.clip_center + roi_buffer
    size_y = reference_roi.clip_center + roi_buffer
    size_x = 60# reference_roi.size_x + roi_buffer
    size_y = 60# reference_roi.size_y + roi_buffer
    
    affine_transform = estimate_affine_from_sensors(reference_roi.data, moving_roi.data, reference_roi.x, reference_roi.y, size_x=size_x, size_y=size_y)
    ref_center = (reference_roi.x, reference_roi.y)

    # MOVING NO AFFINE; Get the full moving image area so that an applied affine transformation that 
    # adds no data around 1+ edge does not fool the to be applied matcher.
    # The affine transformed center is a better match than the a priori sensor coords at this point.
    affine_center = affine_transform(ref_center)[0]

    # The above coordinate transformation to get the center of the ROI handles translation. 
    # So, we only need to rotate/shear/scale the ROI. Omitting scale, which should be 1 (?) results
@@ -131,8 +123,8 @@ def estimate_local_affine(reference_roi, moving_roi):
                                   scale=affine_transform.scale)

    # This rotates about the center of the image
    shift_x = moving_roi.clip_center
    shift_y = moving_roi.clip_center
    shift_x, shift_y = (30.5, 30.5) #moving_roi.center
        
    tf_shift = tf.SimilarityTransform(translation=[shift_x, shift_y])
    tf_shift_inv = tf.SimilarityTransform(translation=[-shift_x, -shift_y])
    
@@ -140,5 +132,4 @@ def estimate_local_affine(reference_roi, moving_roi):
    # this is 'shift to the center', apply the rotation, shift back
    # to the origin.
    trans = tf_shift_inv + tf_rotate + tf_shift

    return trans
+91 −25
Original line number Diff line number Diff line
@@ -40,8 +40,12 @@ class Roi():
                  on instantiation, set to (). When clip is called and the clipped_array
                  variable is set, the clip_center is set to the center of the, potentially
                  affine transformed, cliped_array.
    
    affine : object
             a scikit image affine transformation object that is applied when clipping. The default,
             identity matrix results in no transformation.
    """
    def __init__(self, data, x, y, size_x=200, size_y=200, ndv=None, ndv_threshold=0.5, buffer=5):
    def __init__(self, data, x, y, size_x=200, size_y=200, ndv=None, ndv_threshold=0.5, buffer=5, affine=tf.AffineTransform()):
        if not isinstance(data, GeoDataset):
            raise TypeError('Error: data object must be a plio GeoDataset')
        self.data = data
@@ -54,11 +58,20 @@ class Roi():
        self.buffer = buffer
        self.clipped_array = None
        self.clip_center = ()
        self.affine = affine

    @property
    def center(self):
        return (self.x, self.y)

    @property
    def affine(self):
        return self._affine

    @affine.setter
    def affine(self, affine=tf.AffineTransform()):
        self._affine = affine

    @property
    def x(self):
        return self._whole_x + self._remainder_x
@@ -167,6 +180,37 @@ class Roi():
        """
        return self.clip()

    def clip_coordinate_to_image_coordinate(self, x, y):
        """
        Take a passed coordinate in a clipped array from an ROI and return the coordinate
        in the full image.

        Parameters
        ----------
        x : float
            The x coordinate in the clipped array to be transformed into full image coordinates

        y : float
            The y coordinate in the clipped array to be transformed into full image coordinates

        Returns
        -------
        x_in_image_space : float
                           The transformed x in image coordinate space

        y_in_imag_space : float
                          The transformed y in image coordinate space
        """
        x_in_affine_space = x + self._clip_start_x
        y_in_affine_space = y + self._clip_start_y

        x_in_clip_space, y_in_clip_space = self.affine((x_in_affine_space,
                                                                y_in_affine_space))[0]

        x_in_image_space = x_in_clip_space + self._roi_x_to_clip_center
        y_in_image_space = y_in_clip_space + self._roi_y_to_clip_center

        return x_in_image_space, y_in_image_space

    def clip(self, size_x=None, size_y=None, affine=None, dtype=None, mode="reflect"):
        """
@@ -207,6 +251,45 @@ class Roi():
        # This data is an nd-array that is larger than originally requested, because it may be affine warped.
        data = self.data.read_array(pixels=pixels, dtype=dtype)

        data_center = np.array(data.shape[::-1]) / 2.  # Location within a pixel
        # 12 - 5.5; 12 - 5.5, self.x, self.y = coordinates in the full image and data center are 1/2 the total size of the read array
        self._roi_x_to_clip_center = self.x - data_center[0]
        self._roi_y_to_clip_center = self.y - data_center[1]

        if affine:
            self.affine = affine
            # The cval is being set to the mean of the array,
            warped_data = tf.warp(data, 
                         self.affine, #.inverse, 
                         order=3, 
                         mode='reflect')

            # Confirm inverse! 
            self.warped_array_center = self.affine.inverse(data_center)[0]
            
            # Warped center coordinate - offset from pixel center to pixel edge - desired size
            self._clip_start_x = self.warped_array_center[0] - 0.5 - self.size_x
            self._clip_start_y = self.warped_array_center[1] - 0.5 - self.size_y

            # Now that the whole pixel array has been warped, interpolate the array to align pixel edges
            xi = np.linspace(self._clip_start_x, 
                             self._clip_start_x + (self.size_x * 2) + 1, 
                             (self.size_x * 2) + 1) 
            yi = np.linspace(self._clip_start_y, 
                             self._clip_start_y + (self.size_y * 2) + 1, 
                             (self.size_y * 2) + 1) 
            
            # the xi, yi are intentionally handed in backward, because the map_coordinates indexes column major
            pixel_locked = ndimage.map_coordinates(warped_data, 
                                        np.meshgrid(yi, xi, indexing='ij'),
                                        mode=mode,
                                        order=3)

            self.clip_center = (np.array(pixel_locked.shape)[::-1]) / 2.0

            self.clipped_array = img_as_float32(pixel_locked)
        else:

            # Now that the whole pixel array has been read, interpolate the array to align pixel edges
            xi = np.linspace(self._remainder_x, 
                            ((self.buffer*2) + self._remainder_x + (self.size_x*2)), 
@@ -221,27 +304,10 @@ class Roi():
                                        mode=mode,
                                        order=3)

        if affine:
            # The cval is being set to the mean of the array,
            pixel_locked = tf.warp(pixel_locked, 
                         affine, #.inverse, 
                         order=3, 
                         mode='reflect')

            self.original_array_center =  (np.array(pixel_locked.shape)[::-1] - 1) / 2.0
            # Return order is x,y
            self.clip_center = affine.inverse(self.original_array_center)[0]
            
            # +1 handles non-inclusive end indexing on the nd-array; indexing order is y, x
            pixel_locked = pixel_locked[floor(self.clip_center[1])-self.size_y:ceil(self.clip_center[1])+self.size_y,
                                        floor(self.clip_center[0])-self.size_x:ceil(self.clip_center[0])+self.size_x]
            
            self.clipped_array = img_as_float32(pixel_locked)
            return
        else:
            if self.buffer != 0:
                pixel_locked = pixel_locked[self.buffer:-self.buffer, 
                                            self.buffer:-self.buffer]
            self.clip_center = tuple(np.array(pixel_locked.shape)[::-1] / 2.)
            self.original_array_center = self.clip_center
            self.warped_array_center = self.clip_center
            self.clipped_array = img_as_float32(pixel_locked)