Commit 27705fe7 authored by Rodriguez, Kelvin's avatar Rodriguez, Kelvin
Browse files

Merge branch 'roi' into 'main'

Updates for affine changes and adds debug logging.

See merge request astrogeology/autocnet!660
parents c088c045 e1085baa
Loading
Loading
Loading
Loading
+2 −0
Original line number Diff line number Diff line
@@ -37,9 +37,11 @@ release.
### Added
- [`pool_pre_ping`](https://docs.sqlalchemy.org/en/14/core/pooling.html#disconnect-handling-pessimistic) to the sqlalchemy engine connection to handle instances where hundreds of connections are simultaneously connecting to the database.
- verbose option to the smart subpixel matcher that will visualize the reference and moving ROIs in order to better support single point visualization.
- Debug logging to place_points_in_overlap

### Changed
- Estimation of the affine transformation no longer needs to use points completely within the destination (moving) image. Negative values are still valid for affine estimation and the sensor model is not constrained to within the image.
- to_isis method on the network candidate graph returns both the dataframe (existing functionality) and the filelist (new functionality).

### Fixed
- Fixed connection issues where too many connections to AWS RDW were causing connetions failures by adding an exponential sleep over five retries.
+4 −1
Original line number Diff line number Diff line
@@ -2122,6 +2122,9 @@ class NetworkCandidateGraph(CandidateGraph):
        df : pd.DataFrame
             The pandas dataframe that is passed to plio to generate the control network.

        fpaths : list
                 of paths to the images being included in the control network

        """
        # Read the cnet from the db
        df = io_controlnetwork.db_to_df(self.engine, **db_kwargs)
@@ -2152,7 +2155,7 @@ class NetworkCandidateGraph(CandidateGraph):

        # Even though this method writes, having a non-None return
        # let's a user work with the data that is passed to plio
        return df
        return df, fpaths

    def update_from_jigsaw(self, path, pointid_func=lambda x: int(x.split('_')[-1])):
        """
+1 −1
Original line number Diff line number Diff line
@@ -395,7 +395,7 @@ def subpixel_register_point(pointid,
                             size_y=match_kwargs['template_size'][1],
                             buffer=5)

        affine = estimate_local_affine(reference_roi, moving_roi)
        baseline_affine = estimate_local_affine(reference_roi, moving_roi)

        # Updated so that the affine used is computed a single time.
        # Has not scale or shear or rotation.
+11 −9
Original line number Diff line number Diff line
@@ -167,6 +167,7 @@ def place_points_in_overlap(overlap,

    log.info(f'Attempting to place measures in {len(nodes)} images.')
    for v in valid:
        log.debug(f'Valid point: {v}')
        lon = v[0]
        lat = v[1]

@@ -176,6 +177,7 @@ def place_points_in_overlap(overlap,

        # Need to get the first node and then convert from lat/lon to image space
        for reference_index, node in enumerate(nodes):
            log.debug(f'Starting with reference_index: {reference_index}')
            # reference_index is the index into the list of measures for the image that is not shifted and is set at the
            # reference against which all other images are registered.
            if cam_type == "isis":
@@ -200,14 +202,13 @@ def place_points_in_overlap(overlap,
            if image_roi.variance == 0:
                log.warning(f'Failed to find interesting features in image {node.image_name}.')
                continue
            image = image_roi.clip()

            # Extract the most interesting feature in the search window
            interesting = extract_most_interesting(image)
            image_roi.clip()
            interesting = extract_most_interesting(image_roi.clipped_array)
            if interesting is not None:
                # We have found an interesting feature and have identified the reference point.
                break

        log.debug(f'Current reference index: {reference_index}.')
        if interesting is None:
            log.warning('Unable to find an interesting point, falling back to the a priori pointing')
            newsample = sample
@@ -268,6 +269,7 @@ def place_points_in_overlap(overlap,
            updated_lon, updated_lat = og2oc(updated_lon_og, updated_lat_og, semi_major, semi_minor)

        point_geom = shapely.geometry.Point(x, y, z)
        log.debug(f'Creating point with reference_index: {reference_index}')
        point = Points(identifier=identifier,
                       overlapid=overlap.id,
                       apriori=point_geom,
@@ -298,6 +300,7 @@ def place_points_in_overlap(overlap,
                    # a measure fails to be placed.
                    if current_index < reference_index:
                        reference_index -= 1
                    log.debug('Reference de-incremented.')
                    continue

            point.measures.append(Measures(sample=sample,
@@ -308,7 +311,8 @@ def place_points_in_overlap(overlap,
                                           serial=node.isis_serial,
                                           measuretype=3,
                                           choosername='place_points_in_overlap'))

        log.debug(f'Current reference index in code: {reference_index}.')
        log.debug(f'Current reference index on point: {point.reference_index}')
        if len(point.measures) >= 2:
            points.append(point)
    log.info(f'Able to place {len(points)} points.')
@@ -434,9 +438,9 @@ def place_points_in_image(image,

        # Extract ORB features in a sub-image around the desired point
        image_roi = roi.Roi(node.geodata, sample, line, size_x=size, size_y=size)
        image = image_roi.clip()
        image_roi.clip()
        try:
            interesting = extract_most_interesting(image)
            interesting = extract_most_interesting(image.clipped_array)
        except:
            continue

@@ -508,7 +512,6 @@ def place_points_in_image(image,
                       cam_type=cam_type)

        for node in nodes:
            insert = True
            if cam_type == "csm":
                image_coord = node.camera.groundToImage(gnd)
                sample, line = image_coord.samp, image_coord.line
@@ -518,7 +521,6 @@ def place_points_in_image(image,
                except CalledProcessError as e:
                    if 'Requested position does not project in camera model' in e.stderr:
                        log.exception(f'interesting point ({lon},{lat}) does not project to image {node["image_path"]}')
                        insert = False

            point.measures.append(Measures(sample=sample,
                                           line=line,
+40 −37
Original line number Diff line number Diff line
@@ -66,14 +66,18 @@ class Roi():
        self.ndv = ndv
        self._ndv_threshold = ndv_threshold
        self.buffer = buffer
        self.clipped_array = None
        self.clip_center = ()
        self.affine = affine

    @property
    def center(self):
        return (self.x, self.y)

    @property
    def clip_center(self):
        if not getattr(self, '_clip_center', None):
            self.clip()
        return self._clip_center

    @property
    def affine(self):
        return self._affine
@@ -170,8 +174,6 @@ class Roi():
        """
        if self.ndv == None:
            return True
        if len(self.clipped_array) == 0:
            return False
        # Check if we have any ndv values this will return an inverted array
        # where all no data values are true, we need to then invert the array
        # and return the all result. This ensures that a valid array will return
@@ -181,14 +183,16 @@ class Roi():

    @property
    def variance(self):
        return np.var(self.array)
        return np.var(self.clipped_array)

    @property
    def array(self):
    def clipped_array(self):
        """
        The clipped array associated with this ROI.
        """
        return self.clip()
        if not hasattr(self, "_clipped_array"):
            self.clip()
        return self._clipped_array

    def clip_coordinate_to_image_coordinate(self, x, y):
        """
@@ -307,9 +311,9 @@ class Roi():
                                        mode=mode,
                                        order=3)

            self.clip_center = (np.array(pixel_locked.shape)[::-1]) / 2.0
            self._clip_center = (np.array(pixel_locked.shape)[::-1]) / 2.0

            self.clipped_array = img_as_float32(pixel_locked)
            self._clipped_array = img_as_float32(pixel_locked)
        else:

            # Now that the whole pixel array has been read, interpolate the array to align pixel edges
@@ -329,7 +333,6 @@ class Roi():
            if self.buffer != 0:
                pixel_locked = pixel_locked[self.buffer:-self.buffer,
                                            self.buffer:-self.buffer]
            self.clip_center = tuple(np.array(pixel_locked.shape)[::-1] / 2.)
            self._clip_center = tuple(np.array(pixel_locked.shape)[::-1] / 2.)
            self.warped_array_center = self.clip_center
            self.clipped_array = img_as_float32(pixel_locked)
            self._clipped_array = img_as_float32(pixel_locked)