Commit 0bae531c authored by Jay's avatar Jay Committed by jay
Browse files

Bug fixes and adds disk suppression for subpixel results.

parent 03715ec5
Loading
Loading
Loading
Loading
+84 −5
Original line number Diff line number Diff line
import math
import warnings
from collections import MutableMapping

@@ -111,7 +112,7 @@ class Edge(dict, MutableMapping):
        else:
            raise AttributeError('No matches have been computed for this edge.')

    def ratio_check(self, ratio=0.8, clean_keys=[]):
    def ratio_check(self, clean_keys=[], **kwargs):
        if hasattr(self, 'matches'):

            if clean_keys:
@@ -119,13 +120,14 @@ class Edge(dict, MutableMapping):
            else:
                mask = pd.Series(True, self.matches.index)


            self.distance_ratio = od.DistanceRatio(self.matches)
            self.distance_ratio.compute(ratio, mask=mask, mask_name=None)
            self.distance_ratio.compute(mask=mask, **kwargs)

            # Setup to be notified
            self.distance_ratio._notify_subscribers(self.distance_ratio)

            self.masks = ('ratio', mask)
            self.masks = ('ratio', self.distance_ratio.mask)
        else:
            raise AttributeError('No matches have been computed for this edge.')

@@ -253,7 +255,6 @@ class Edge(dict, MutableMapping):
                      The maximum (positive) value that a pixel can shift in the y direction
                      without being considered an outlier
        """

        matches = self.matches
        self.subpixel_offsets = pd.DataFrame(0, index=matches.index, columns=['x_offset',
                                                                              'y_offset',
@@ -264,6 +265,7 @@ class Edge(dict, MutableMapping):
        if clean_keys:
            matches, mask = self._clean(clean_keys)

        # Grab the full images, or handles
        if tiled is True:
            s_img = self.source.handle
            d_img = self.destination.handle
@@ -289,7 +291,6 @@ class Edge(dict, MutableMapping):
            except:
                warnings.warn('Template-Search size mismatch, failing for this correspondence point.')
                continue

        self.subpixel_offsets.to_sparse(fill_value=0.0)

        # Compute the mask for correlations less than the threshold
@@ -309,6 +310,84 @@ class Edge(dict, MutableMapping):
        self.masks = ('threshold', threshold_mask)
        self.masks = ('subpixel', mask)

    def suppress(self, min_radius=1, k=100, error_k=0.1):
        """
        Suppress subpixel registered points to that k +- k * error_k
        points, with good spatial distribution, remain

        Adds a suppression mask to the edge mask dataframe.

        Parameters
        ----------
        min_radius : int
                     The lowest acceptable radius value for points

        k : int
            The desired number of output points

        error_k : float
                  [0,1) The acceptable epsilon
        """
        xy_extent = self.source.handle.xy_extent[1]
        max_radius = min(xy_extent) / 4
        k = 100

        sp_mask = self.masks['subpixel']
        sp_values = self.subpixel_offsets[sp_mask]

        coordinates = self.source.keypoints.iloc[sp_values['s_idx']][['x', 'y']]
        merged = pd.merge(sp_values, coordinates, left_on='s_idx', how='left', right_index=True).sort_values(by='correlation')

        previous_cell_size = 0

        while True:
            r = (min_radius + max_radius) / 2
            cell_size = int(r / math.sqrt(2))

            # To prevent cycling
            if cell_size == previous_cell_size:
                break
            previous_cell_size = cell_size

            # Setup to store results
            result = []
            # Compute the bin edges and assign points to the appropriate bins
            x_edges = np.arange(0,xy_extent[0], int(xy_extent[0] / cell_size))
            y_edges = np.arange(0,xy_extent[1], int(xy_extent[1] / cell_size))
            grid = np.zeros((len(y_edges), len(x_edges)), dtype=np.bool)

            xbins = np.digitize(merged['x'], bins=x_edges)
            ybins = np.digitize(merged['y'], bins=y_edges)

            for i, (idx, p) in enumerate(merged.iterrows()):
                x_center = xbins[i]
                y_center = ybins[i]
                cell = grid[y_center-1 , x_center-1]
                if cell == False:
                    result.append(idx)
                    if len(result) > k:
                        # Search the lower half, the radius is too big
                        max_radius = r
                        break

                    # Cover the necessary cells
                    grid[y_center - 5: y_center + 5,
                         x_center - 5:x_center + 5] = True

            # Check break conditions
            if k - k * error_k < len(result) < k + k * error_k:
                break
            elif len(result) < k:
                # Search the upper half, the radius is too small
                min_radius = r
            elif abs(max_radius - min_radius) < 5:
                break

        mask = pd.Series(False, self.masks.index)
        mask.iloc[np.array(result)] = True

        self.masks = ('suppression', mask)

    def coverage_ratio(self, clean_keys=[]):
        """
        Compute the ratio $area_{convexhull} / area_{imageoverlap}$.
+28 −14
Original line number Diff line number Diff line
@@ -55,9 +55,15 @@ class CandidateGraph(nx.Graph):

        nx.relabel_nodes(self, node_labels, copy=False)


        # Add the Edge class as a edge data structure
        for s, d, edge in self.edges_iter(data=True):
            if s < d:
                self.edge[s][d] = Edge(self.node[s], self.node[d])
            else:
                self.remove_edge(s, d)
                self.add_edge(d, s)
                self.edge[d][s] = Edge(self.node[d], self.node[s])

    @classmethod
    def from_graph(cls, graph):
@@ -209,22 +215,32 @@ class CandidateGraph(nx.Graph):
            k=5 will find the 4 nearest neighbors for every extracted feature.
            If None,  k = (2 * the number of edges connecting a node) +1
        """
        degree = self.degree()
        # Instantiate a single flann matcher to be resused for all nodes

        self._fl = FlannMatcher()
        for i, node in self.nodes_iter(data=True):

            # Grab the descriptors
            if not hasattr(node, 'descriptors'):
                raise AttributeError('Descriptors must be extracted before matching can occur.')
            self._fl.add(node.descriptors, key=i)
            descriptors = node.descriptors

            # Load the neighbors of the current node into the FLANN matcher
            neighbors = self.neighbors(i)
            for n in neighbors:
                neighbor_descriptors = self.node[n].descriptors
                self._fl.add(neighbor_descriptors, n)
            self._fl.train()

        for i, node in self.nodes_iter(data=True):
            if k is None:
                k = (degree[i] * 2) + 1
            descriptors = node.descriptors
                k = (self.degree(i) * 2)

            # Query and then empty the FLANN matcher for the next node
            matches = self._fl.query(descriptors, i, k=k)
            self.add_matches(matches)

            self._fl.clear()

    def add_matches(self, matches):
        """
        Adds match data to a node and attributes the data to the
@@ -249,7 +265,7 @@ class CandidateGraph(nx.Graph):

                if hasattr(edge, 'matches'):
                    df = edge.matches
                    edge.matches = pd.concat([df, dest_group], ignore_index=True)
                    edge.matches = df.append(dest_group, ignore_index=True)
                else:
                    edge.matches = dest_group

@@ -260,12 +276,12 @@ class CandidateGraph(nx.Graph):
        for s, d, edge in self.edges_iter(data=True):
            edge.symmetry_check()

    def ratio_checks(self, ratio=0.8, clean_keys=[]):
    def ratio_checks(self, clean_keys=[], **kwargs):
        """
        Perform a ratio check on all edges in the graph
        """
        for s, d, edge in self.edges_iter(data=True):
            edge.ratio_check(ratio=ratio, clean_keys=clean_keys)
            edge.ratio_check(clean_keys=clean_keys)

    def compute_homographies(self, clean_keys=[], **kwargs):
        """
@@ -296,14 +312,14 @@ class CandidateGraph(nx.Graph):
            edge.compute_fundamental_matrix(clean_keys=clean_keys, **kwargs)

    def subpixel_register(self, clean_keys=[], threshold=0.8, upsampling=10,
                                 template_size=9, search_size=27):
                                 template_size=9, search_size=27, tiled=False):
         """
         Compute subpixel offsets for all edges using identical parameters
         """
         for s, d, edge in self.edges_iter(data=True):
             edge.subpixel_register(clean_keys=clean_keys, threshold=threshold,
                                    upsampling=upsampling, template_size=template_size,
                                    search_size=search_size)
                                    search_size=search_size, tiled=tiled)

    def to_filelist(self):
        """
@@ -499,8 +515,6 @@ class CandidateGraph(nx.Graph):
        with open(filename, 'wb') as f:
            pickle.dump(self, f, protocol=pickle.HIGHEST_PROTOCOL)

    # TODO: The Edge object requires a get method in order to be plottable, probably Node as well.
    # This is a function of being a dict in NetworkX
    def plot(self, ax=None, **kwargs):
        """
        Plot the graph object
+24 −16
Original line number Diff line number Diff line
@@ -92,10 +92,10 @@ class FlannMatcher(object):

    def __init__(self, flann_parameters=DEFAULT_FLANN_PARAMETERS):
        self._flann_matcher = cv2.FlannBasedMatcher(flann_parameters, {})
        self.image_indices = {}
        self.image_index_counter = 0
        self.nid_lookup = {}
        self.node_counter = 0

    def add(self, descriptor, key):
    def add(self, descriptor, nid):
        """
        Add a set of descriptors to the matcher and add the image
        index key to the image_indices attribute
@@ -105,12 +105,21 @@ class FlannMatcher(object):
        descriptor : ndarray
                     The descriptor to be added

        key : hashable
              The identifier for this image, e.g. the image name
        nid : int
              The node ids
        """
        self._flann_matcher.add([descriptor])
        self.image_indices[self.image_index_counter] = key
        self.image_index_counter += 1
        self.nid_lookup[self.node_counter] = nid
        self.node_counter += 1

    def clear(self):
        """
        Remove all nodes from the tree and resets
        all counters
        """
        self._flann_matcher.clear()
        self.nid_lookup = {}
        self.node_counter = 0

    def train(self):
        """
@@ -144,23 +153,22 @@ class FlannMatcher(object):
        matched = []
        for m in matches:
            for i in m:
                # This checks for self neighbor and never allows them into the graph
                if self.image_indices[i.imgIdx] == query_image:
                    continue

                # Ensure ordering in the source / destination
                if query_image < self.image_indices[i.imgIdx]:
                source = query_image
                destination = self.nid_lookup[i.imgIdx]
                if source < destination:
                    matched.append((query_image,
                                    i.queryIdx,
                                    self.image_indices[i.imgIdx],
                                    destination,
                                    i.trainIdx,
                                    i.distance))
                else:
                    matched.append((self.image_indices[i.imgIdx],
                elif source > destination:
                    matched.append((destination,
                                    i.trainIdx,
                                    query_image,
                                    i.queryIdx,
                                    i.distance))
                else:
                    raise ValueError('Likely self neighbor in query!')
        return pd.DataFrame(matched, columns=['source_image', 'source_idx',
                                              'destination_image', 'destination_idx',
                                              'distance'])
+7 −7
Original line number Diff line number Diff line
@@ -42,7 +42,7 @@ class DistanceRatio(object):
    def nvalid(self):
        return self.mask.sum()

    def compute(self, ratio, mask=None, mask_name=None, single=False):
    def compute(self, ratio=0.8, mask=None, mask_name=None, single=False):
        """
        Compute and return a mask for a matches dataframe
        using Lowe's ratio test.  If keypoints have a single
@@ -74,20 +74,19 @@ class DistanceRatio(object):
            return res

        self.single = single

        if mask is not None:
            self.mask = mask.copy()
            new_mask = self.matches[mask].groupby('source_idx')['distance'].transform(func).astype('bool')
            self.mask[mask==True] = new_mask
        else:
            new_mask = self.matches.groupby('source_idx')['distance'].transform(func).astype('bool')
            self.mask = new_mask.copy()
            self.mask = self.matches.groupby('source_idx')['distance'].transform(func).astype('bool')

        state_package = {'ratio': ratio,
                         'mask': self.mask.copy(),
                         'clean_keys': mask_name,
                         'single': single
                         }

        self._action_stack.append(state_package)
        self._current_action_stack = len(self._action_stack) - 1

@@ -152,7 +151,6 @@ class DistanceRatio(object):
        # Reset attributes (could also cache)
        self._notify_subscribers(self)


def self_neighbors(matches):
    """
    Returns a pandas data series intended to be used as a mask. Each row
@@ -197,8 +195,10 @@ def mirroring_test(matches):
                 otherwise, they will be false. Keypoints with only one match will be False. Removes
                 duplicate rows.
    """
    duplicates = matches.duplicated(keep='first').astype(bool)
    return duplicates
    duplicate_mask = matches.duplicated(subset=['source_idx', 'destination_idx', 'distance'],
                                    keep='last')

    return duplicate_mask


def compute_fundamental_matrix(kp1, kp2, method='ransac', reproj_threshold=5.0, confidence=0.99):
+0 −1
Original line number Diff line number Diff line
@@ -46,7 +46,6 @@ def clip_roi(img, center, img_size):
    else:
        clipped_img = img.read_array(pixels=[x_start, y_start,
                                             x_stop, y_stop])

    return clipped_img


Loading