Commit ca7a5527 authored by Kristin Berry's avatar Kristin Berry
Browse files

Merge pull request #54 from jlaura/master

Adds disk suppression and fixes symmetry issue
parents 7fe9a1ca 7542bc18
Loading
Loading
Loading
Loading
+63 −17
Original line number Diff line number Diff line
import math
import warnings
from collections import MutableMapping

@@ -9,6 +10,7 @@ from autocnet.cg.cg import convex_hull_ratio
from autocnet.cg.cg import overlapping_polygon_area
from autocnet.matcher import health
from autocnet.matcher import outlier_detector as od
from autocnet.matcher import suppression_funcs as spf
from autocnet.matcher import subpixel as sp
from autocnet.transformation.transformations import FundamentalMatrix, Homography
from autocnet.vis.graph_view import plot_edge
@@ -111,7 +113,7 @@ class Edge(dict, MutableMapping):
        else:
            raise AttributeError('No matches have been computed for this edge.')

    def ratio_check(self, ratio=0.8, clean_keys=[]):
    def ratio_check(self, clean_keys=[], **kwargs):
        if hasattr(self, 'matches'):

            if clean_keys:
@@ -119,13 +121,14 @@ class Edge(dict, MutableMapping):
            else:
                mask = pd.Series(True, self.matches.index)


            self.distance_ratio = od.DistanceRatio(self.matches)
            self.distance_ratio.compute(ratio, mask=mask, mask_name=None)
            self.distance_ratio.compute(mask=mask, **kwargs)

            # Setup to be notified
            self.distance_ratio._notify_subscribers(self.distance_ratio)

            self.masks = ('ratio', mask)
            self.masks = ('ratio', self.distance_ratio.mask)
        else:
            raise AttributeError('No matches have been computed for this edge.')

@@ -253,17 +256,16 @@ class Edge(dict, MutableMapping):
                      The maximum (positive) value that a pixel can shift in the y direction
                      without being considered an outlier
        """

        matches = self.matches
        self.subpixel_offsets = pd.DataFrame(0, index=matches.index, columns=['x_offset',
                                                                              'y_offset',
                                                                              'correlation',
                                                                              's_idx', 'd_idx'])
        for column in ['x_offset', 'y_offset', 'correlation']:
            if not column in self.matches.columns:
                self.matches[column] = 0

        # Build up a composite mask from all of the user specified masks
        if clean_keys:
            matches, mask = self._clean(clean_keys)

        # Grab the full images, or handles
        if tiled is True:
            s_img = self.source.handle
            d_img = self.destination.handle
@@ -282,25 +284,21 @@ class Edge(dict, MutableMapping):
            # Get the template and search window
            s_template = sp.clip_roi(s_img, s_keypoint, template_size)
            d_search = sp.clip_roi(d_img, d_keypoint, search_size)

            try:
                x_off, y_off, strength = sp.subpixel_offset(s_template, d_search, upsampling=upsampling)
                self.subpixel_offsets.loc[idx] = [x_off, y_off, strength,s_idx, d_idx]
                x_offset, y_offset, strength = sp.subpixel_offset(s_template, d_search, upsampling=upsampling)
                self.matches.loc[idx, ('x_offset', 'y_offset', 'correlation')] = [x_offset, y_offset, strength]
            except:
                warnings.warn('Template-Search size mismatch, failing for this correspondence point.')
                continue

        self.subpixel_offsets.to_sparse(fill_value=0.0)

        # Compute the mask for correlations less than the threshold
        threshold_mask = self.subpixel_offsets['correlation'] >= threshold
        threshold_mask = self.matches['correlation'] >= threshold

        # Compute the mask for the point shifts that are too large
        subp= self.subpixel_offsets
        query_string = 'x_offset <= -{0} or x_offset >= {0} or y_offset <= -{1} or y_offset >= {1}'.format(max_x_shift,
                                                                                                           max_y_shift)
        sp_shift_outliers = subp.query(query_string)
        shift_mask = pd.Series(True, index=self.subpixel_offsets.index)
        sp_shift_outliers = self.matches.query(query_string)
        shift_mask = pd.Series(True, index=self.matches.index)
        shift_mask[sp_shift_outliers.index] = False

        # Generate the composite mask and write the masks to the mask data structure
@@ -309,6 +307,54 @@ class Edge(dict, MutableMapping):
        self.masks = ('threshold', threshold_mask)
        self.masks = ('subpixel', mask)

    def suppress(self, func=spf.correlation, clean_keys=[], **kwargs):
        """
        Apply a disc based suppression algorithm to get a good spatial
        distribution of high quality points, where the user defines some
        function to be used as the quality metric.

        Parameters
        ----------
        func : object
               A function that returns a scalar value to be used
               as the strength of a given row in the matches data
               frame.

        clean_keys : list
                     of mask keys to be used to reduce the total size
                     of the matches dataframe.
        """
        if not hasattr(self, 'matches'):
            raise AttributeError('This edge does not yet have any matches computed.')

        # Build up a composite mask from all of the user specified masks
        if clean_keys:
            matches, mask = self._clean(clean_keys)
        else:
            matches = self.matches
        domain = self.source.handle.raster_size

        # Massage the dataframe into the correct structure
        coords = self.source.keypoints[['x', 'y']]
        merged = matches.merge(coords, left_on=['source_idx'], right_index=True)
        merged['strength'] = merged.apply(func, axis=1)

        if not hasattr(self, 'suppression'):
            # Instantiate the suppression object and suppress matches
            self.suppression = od.SpatialSuppression(merged, domain, **kwargs)
            self.suppression.suppress()
        else:
            for k, v in kwargs.items():
                if hasattr(self.suppression, k):
                    setattr(self.suppression, k, v)
            self.suppression.suppress()

        if clean_keys:
            mask[mask == True] = self.suppression.mask
        else:
            mask = self.suppression.mask
        self.masks = ('suppression', mask)

    def coverage_ratio(self, clean_keys=[]):
        """
        Compute the ratio $area_{convexhull} / area_{imageoverlap}$.
+43 −26
Original line number Diff line number Diff line
@@ -9,11 +9,11 @@ import pandas as pd
from autocnet.control.control import C
from autocnet.fileio import io_json
from autocnet.matcher.matcher import FlannMatcher
import autocnet.matcher.suppression_funcs as spf
from autocnet.graph.edge import Edge
from autocnet.graph.node import Node
from autocnet.vis.graph_view import plot_graph


class CandidateGraph(nx.Graph):
    """
    A NetworkX derived directed graph to store candidate overlap images.
@@ -55,9 +55,15 @@ class CandidateGraph(nx.Graph):

        nx.relabel_nodes(self, node_labels, copy=False)


        # Add the Edge class as a edge data structure
        for s, d, edge in self.edges_iter(data=True):
            if s < d:
                self.edge[s][d] = Edge(self.node[s], self.node[d])
            else:
                self.remove_edge(s, d)
                self.add_edge(d, s)
                self.edge[d][s] = Edge(self.node[d], self.node[s])

    @classmethod
    def from_graph(cls, graph):
@@ -205,26 +211,33 @@ class CandidateGraph(nx.Graph):
        Parameters
        ----------
        k : int
            The number of matches, minus 1, to find per feature.  For example
            k=5 will find the 4 nearest neighbors for every extracted feature.
            If None,  k = (2 * the number of edges connecting a node) +1
            The number of matches to find per feature.
        """
        degree = self.degree()
        # Instantiate a single flann matcher to be resused for all nodes

        self._fl = FlannMatcher()
        for i, node in self.nodes_iter(data=True):

            # Grab the descriptors
            if not hasattr(node, 'descriptors'):
                raise AttributeError('Descriptors must be extracted before matching can occur.')
            self._fl.add(node.descriptors, key=i)
            descriptors = node.descriptors
            # Load the neighbors of the current node into the FLANN matcher
            neighbors = self.neighbors(i)
            for n in neighbors:
                neighbor_descriptors = self.node[n].descriptors
                self._fl.add(neighbor_descriptors, n)
            self._fl.train()

        for i, node in self.nodes_iter(data=True):
            if k is None:
                k = (degree[i] * 2) + 1
            descriptors = node.descriptors
                k = (self.degree(i) * 2)

            # Query and then empty the FLANN matcher for the next node
            matches = self._fl.query(descriptors, i, k=k)
            self.add_matches(matches)

            self._fl.clear()

    def add_matches(self, matches):
        """
        Adds match data to a node and attributes the data to the
@@ -249,7 +262,7 @@ class CandidateGraph(nx.Graph):

                if hasattr(edge, 'matches'):
                    df = edge.matches
                    edge.matches = pd.concat([df, dest_group], ignore_index=True)
                    edge.matches = df.append(dest_group, ignore_index=True)
                else:
                    edge.matches = dest_group

@@ -260,12 +273,12 @@ class CandidateGraph(nx.Graph):
        for s, d, edge in self.edges_iter(data=True):
            edge.symmetry_check()

    def ratio_checks(self, ratio=0.8, clean_keys=[]):
    def ratio_checks(self, clean_keys=[], **kwargs):
        """
        Perform a ratio check on all edges in the graph
        """
        for s, d, edge in self.edges_iter(data=True):
            edge.ratio_check(ratio=ratio, clean_keys=clean_keys)
            edge.ratio_check(clean_keys=clean_keys)

    def compute_homographies(self, clean_keys=[], **kwargs):
        """
@@ -296,14 +309,18 @@ class CandidateGraph(nx.Graph):
            edge.compute_fundamental_matrix(clean_keys=clean_keys, **kwargs)

    def subpixel_register(self, clean_keys=[], threshold=0.8, upsampling=10,
                                 template_size=9, search_size=27):
                                 template_size=9, search_size=27, tiled=False):
         """
         Compute subpixel offsets for all edges using identical parameters
         """
         for s, d, edge in self.edges_iter(data=True):
             edge.subpixel_register(clean_keys=clean_keys, threshold=threshold,
                                    upsampling=upsampling, template_size=template_size,
                                    search_size=search_size)
                                    search_size=search_size, tiled=tiled)

    def suppress(self, clean_keys=[], func=spf.correlation, **kwargs):
        for s, d, e in self.edges_iter(data=True):
            e.suppress(clean_keys=clean_keys, func=func, **kwargs)

    def to_filelist(self):
        """
@@ -376,8 +393,9 @@ class CandidateGraph(nx.Graph):
            if clean_keys:
                matches, mask = edge._clean(clean_keys)

            subpixel = False
            if 'subpixel' in clean_keys:
                offsets = edge.subpixel_offsets
                subpixel = True

            kp1 = self.node[source].keypoints
            kp2 = self.node[destination].keypoints
@@ -390,19 +408,20 @@ class CandidateGraph(nx.Graph):
                m1 = (source, int(row['source_idx']))
                m2 = (destination, int(row['destination_idx']))

                values.append([kp1.iloc[m1_pid]['x'],
                               kp1.iloc[m1_pid]['y'],
                values.append([kp1.loc[m1_pid]['x'],
                               kp1.loc[m1_pid]['y'],
                               m1,
                               pt_idx,
                               source,
                               idx])

                kp2x = kp2.iloc[m2_pid]['x']
                kp2y = kp2.iloc[m2_pid]['y']
                if subpixel:
                    kp2x = kp2.loc[m2_pid]['x'] + row['x_offset']
                    kp2y = kp2.loc[m2_pid]['y'] + row['y_offset']
                else:
                    kp2x = kp2.loc[m2_pid]['x']
                    kp2y = kp2.loc[m2_pid]['y']

                if 'subpixel' in clean_keys:
                    kp2x += offsets['x_offset'].values[i]
                    kp2y += offsets['y_offset'].values[i]
                values.append([kp2x,
                               kp2y,
                               m2,
@@ -499,8 +518,6 @@ class CandidateGraph(nx.Graph):
        with open(filename, 'wb') as f:
            pickle.dump(self, f, protocol=pickle.HIGHEST_PROTOCOL)

    # TODO: The Edge object requires a get method in order to be plottable, probably Node as well.
    # This is a function of being a dict in NetworkX
    def plot(self, ax=None, **kwargs):
        """
        Plot the graph object
+67 −27
Original line number Diff line number Diff line
@@ -7,6 +7,7 @@ from scipy.misc import bytescale
from autocnet.fileio.io_gdal import GeoDataset
from autocnet.matcher import feature_extractor as fe
from autocnet.matcher import outlier_detector as od
from autocnet.matcher import suppression_funcs as spf
from autocnet.cg.cg import convex_hull_ratio
from autocnet.utils.isis_serial_numbers import generate_serial_number
from autocnet.vis.graph_view import plot_node
@@ -35,20 +36,12 @@ class Node(dict, MutableMapping):
    isis_serial : str
                  If the input images have PVL headers, generate an
                  ISIS compatible serial number

     provenance : dict
                  With key equal to an autoincrementing integer and value
                  equal to a dict of parameters used to generate this
                  realization.
    """

    def __init__(self, image_name=None, image_path=None):
        self.image_name = image_name
        self.image_path = image_path
        self._masks = set()
        self._mask_arrays = {}
        self.provenance = {}
        self._pid = 0

    def __repr__(self):
        return """
@@ -79,12 +72,35 @@ class Node(dict, MutableMapping):

    @property
    def masks(self):
        mask_lookup = {'suppression': 'suppression'}
        if not hasattr(self, '_masks'):
            self._masks = pd.DataFrame()
        # If the mask is coming form another object that tracks
        # state, dynamically draw the mask from the object.
        for c in self._masks.columns:
            if c in mask_lookup:
                self._masks[c] = getattr(self, mask_lookup[c]).mask
        return self._masks

    @masks.setter
    def masks(self, v):
        self._masks.add(v[0])
        self._mask_arrays[v[0]] = v[1]
        column_name = v[0]
        boolean_mask = v[1]
        self.masks[column_name] = boolean_mask

    @property
    def isis_serial(self):
        """
        Generate an ISIS compatible serial number using the data file
        associated with this node.  This assumes that the data file
        has a PVL header.
        """
        if not hasattr(self, '_isis_serial'):
            try:
                self._isis_serial = generate_serial_number(self.image_path)
            except:
                self._isis_serial = None
        return self._isis_serial

    def get_array(self, band=1):
        """
@@ -126,13 +142,25 @@ class Node(dict, MutableMapping):
        self._nkeypoints = len(self.keypoints)
        self.descriptors = descriptors.astype(np.float32)

        self.provenance[self._pid] = {'detector': 'sift',
                                      'parameters':kwargs}
        self._pid += 1
    def suppress(self, func=spf.response, **kwargs):
        if not hasattr(self, 'keypoints'):
            raise AttributeError('No keypoints extracted for this node.')

        domain = self.handle.raster_size
        self.keypoints['strength'] = self.keypoints.apply(func, axis=1)

    def anms(self, nfeatures=100, robust=0.9):
        mask = od.adaptive_non_max_suppression(self.keypoints,nfeatures,robust)
        self.masks = ('anms', mask)
        if not hasattr(self, 'suppression'):
            # Instantiate a suppression object and suppress keypoints
            self.suppression = od.SpatialSuppression(self.keypoints, domain, **kwargs)
            self.suppression.suppress()
        else:
            # Update the suppression object attributes and process
            for k, v in kwargs.items():
                if hasattr(self.suppression, k):
                    setattr(self.suppression, k, v)
            self.suppression.suppress()

        self.masks = ('suppression', self.suppression.mask)

    def coverage_ratio(self, clean_keys=[]):
        """
@@ -159,16 +187,28 @@ class Node(dict, MutableMapping):
    def plot(self, clean_keys=[], **kwargs):  # pragma: no cover
        return plot_node(self, clean_keys=clean_keys, **kwargs)

    @property
    def isis_serial(self):
    def _clean(self, clean_keys):
        """
        Generate an ISIS compatible serial number using the data file
        associated with this node.  This assumes that the data file
        has a PVL header.
        Given a list of clean keys compute the
        mask of valid matches

        Parameters
        ----------
        clean_keys : list
                     of columns names (clean keys)

        Returns
        -------
        matches : dataframe
                  A masked view of the matches dataframe

        mask : series
                    A boolean series to inflate back to the full match set
        """
        if not hasattr(self, '_isis_serial'):
            try:
                self._isis_serial = generate_serial_number(self.image_path)
            except:
                self._isis_serial = None
        return self._isis_serial
 No newline at end of file
        if not hasattr(self, 'keypoints'):
            raise AttributeError('Keypoints have not been extracted for this node.')
        panel = self.masks
        mask = panel[clean_keys].all(axis=1)
        matches = self.keypoints[mask]
        return matches, mask
+0 −18
Original line number Diff line number Diff line
@@ -39,24 +39,6 @@ class TestNode(unittest.TestCase):
        # Convex hull computation is checked lower in the hull computation
        self.assertRaises(AttributeError, self.node.coverage_ratio)

    def test_provenance(self):
        image = self.node.get_array()
        self.node.extract_features(image, extractor_parameters={'nfeatures':10})
        self.node.extract_features(image, extractor_parameters={'nfeatures':15})
        p0 = self.node.provenance[0]
        p1 = self.node.provenance[1]
        print(self.node.provenance)
        self.assertEqual(len(self.node.provenance.keys()), 2)
        self.assertNotEqual(find_in_dict(p0, 'nfeatures'),
                            find_in_dict(p1, 'nfeatures'))

    def test_anms(self):
        image = self.node.get_array()
        self.node.extract_features(image, extractor_parameters={'nfeatures':100})
        self.node.anms(nfeatures=10)
        self.assertIn('anms', self.node.masks)
        self.assertTrue(sum(self.node._mask_arrays['anms']), 10)

    def test_isis_serial(self):
        serial = self.node.isis_serial
        self.assertEqual(None, serial)
+55 −39
Original line number Diff line number Diff line
import warnings

import cv2
import pandas as pd

@@ -10,8 +12,7 @@ DEFAULT_FLANN_PARAMETERS = dict(algorithm=FLANN_INDEX_KDTREE,
                                trees=3)


def pattern_match(template, image, upsampling=16,
                  func=match_template):
def pattern_match(template, image, upsampling=16,func=cv2.TM_CCOEFF_NORMED, error_check=False):
    """
    Call an arbitrary pattern matcher

@@ -29,6 +30,12 @@ def pattern_match(template, image, upsampling=16,

    func : object
           The function to be used to perform the template based matching
           Options: {cv2.TM_CCORR_NORMED, cv2.TM_CCOEFF_NORMED, cv2.TM_SQDIFF_NORMED}
           In testing the first two options perform significantly better with Apollo data.

    error_check : bool
                  If True, also apply a different matcher and test that the values
                  are not too divergent.  Default, False.

    Returns
    -------
@@ -42,34 +49,35 @@ def pattern_match(template, image, upsampling=16,
    strength : float
               The strength of the correlation in the range [-1, 1].
    """
    if upsampling < 1:
        raise ValueError

    u_template = zoom(template, upsampling)
    u_image = zoom(image, upsampling, )
    # Find the the upper left origin of the template in the image
    match = func(u_image, u_template)
    y, x = np.unravel_index(np.argmax(match), match.shape)
    different = {cv2.TM_SQDIFF_NORMED: cv2.TM_CCOEFF_NORMED,
                 cv2.TM_CCORR_NORMED: cv2.TM_SQDIFF_NORMED,
                 cv2.TM_CCOEFF_NORMED: cv2.TM_SQDIFF_NORMED}

    # Resample the match back to the native image resolution
    x /= upsampling
    y /= upsampling
    if upsampling < 1:
        raise ValueError

    # Offset from the UL origin to the image center
    x += (template.shape[1] / 2)
    y += (template.shape[0] / 2)
    u_template = zoom(template, upsampling, order=1)
    u_image = zoom(image, upsampling, order=1)

    # Compute the offset to adjust the image match point location
    ideal_y = image.shape[0] / 2
    ideal_x = image.shape[1] / 2
    result = cv2.matchTemplate(u_image, u_template, method=func)
    min_corr, max_corr, min_loc, max_loc = cv2.minMaxLoc(result)
    if func == cv2.TM_SQDIFF or func == cv2.TM_SQDIFF_NORMED:
        x,y = (min_loc[0], min_loc[1])
    else:
        x, y = (max_loc[0], max_loc[1])

    x = ideal_x - x
    y = ideal_y - y
    # Compute the idealized shift (image center)
    ideal_y = u_image.shape[0] / 2
    ideal_x = u_image.shape[1] / 2

    # Find the maximum correlation
    strength = np.max(match)
    # Compute the shift from template upper left to template center
    y += (u_template.shape[0] / 2)
    x += (u_template.shape[1] / 2)

    return x, y, strength
    x = (ideal_x - x) / upsampling
    y = (ideal_y - y) / upsampling
    return x, y, max_corr


class FlannMatcher(object):
@@ -92,10 +100,10 @@ class FlannMatcher(object):

    def __init__(self, flann_parameters=DEFAULT_FLANN_PARAMETERS):
        self._flann_matcher = cv2.FlannBasedMatcher(flann_parameters, {})
        self.image_indices = {}
        self.image_index_counter = 0
        self.nid_lookup = {}
        self.node_counter = 0

    def add(self, descriptor, key):
    def add(self, descriptor, nid):
        """
        Add a set of descriptors to the matcher and add the image
        index key to the image_indices attribute
@@ -105,12 +113,21 @@ class FlannMatcher(object):
        descriptor : ndarray
                     The descriptor to be added

        key : hashable
              The identifier for this image, e.g. the image name
        nid : int
              The node ids
        """
        self._flann_matcher.add([descriptor])
        self.image_indices[self.image_index_counter] = key
        self.image_index_counter += 1
        self.nid_lookup[self.node_counter] = nid
        self.node_counter += 1

    def clear(self):
        """
        Remove all nodes from the tree and resets
        all counters
        """
        self._flann_matcher.clear()
        self.nid_lookup = {}
        self.node_counter = 0

    def train(self):
        """
@@ -144,23 +161,22 @@ class FlannMatcher(object):
        matched = []
        for m in matches:
            for i in m:
                # This checks for self neighbor and never allows them into the graph
                if self.image_indices[i.imgIdx] == query_image:
                    continue

                # Ensure ordering in the source / destination
                if query_image < self.image_indices[i.imgIdx]:
                source = query_image
                destination = self.nid_lookup[i.imgIdx]
                if source < destination:
                    matched.append((query_image,
                                    i.queryIdx,
                                    self.image_indices[i.imgIdx],
                                    destination,
                                    i.trainIdx,
                                    i.distance))
                else:
                    matched.append((self.image_indices[i.imgIdx],
                elif source > destination:
                    matched.append((destination,
                                    i.trainIdx,
                                    query_image,
                                    i.queryIdx,
                                    i.distance))
                else:
                    warnings.warn('Likely self neighbor in query!')
        return pd.DataFrame(matched, columns=['source_image', 'source_idx',
                                              'destination_image', 'destination_idx',
                                              'distance'])
Loading