Commit ad42e3d3 authored by Jay's avatar Jay Committed by jay
Browse files

Update to how the fundamental matrix computes error. Minor fixes to syntax.

parent 22185cc0
Loading
Loading
Loading
Loading
+20 −14
Original line number Diff line number Diff line
@@ -90,10 +90,9 @@ class Edge(dict, MutableMapping):
    def ratio_check(self, clean_keys=[], **kwargs):
        if hasattr(self, 'matches'):

            _, mask = self._clean(clean_keys)

            matches, mask = self._clean(clean_keys)

            self.distance_ratio = od.DistanceRatio(self.matches)
            self.distance_ratio = od.DistanceRatio(matches)
            self.distance_ratio.compute(mask=mask, **kwargs)

            # Setup to be notified
@@ -111,23 +110,27 @@ class Edge(dict, MutableMapping):
            raise AttributeError('Matches have not been computed for this edge')
            return

        all_source_keypoints = self.source.get_keypoint_coordinates(matches['source_idx'])
        all_destin_keypoints = self.destination.get_keypoint_coordinates(matches['destination_idx'])
        matches, mask = self._clean(clean_keys)
        s_keypoints = self.source.get_keypoint_coordinates(matches['source_idx']).values
        d_keypoints = self.destination.get_keypoint_coordinates(matches['destination_idx']).values
        transformation_matrix, fundam_mask = od.compute_fundamental_matrix(s_keypoints,
                                                                           d_keypoints,

        s_keypoints = self.source.get_keypoint_coordinates(index=matches['source_idx'],
                                                           homogeneous=True)
        d_keypoints = self.destination.get_keypoint_coordinates(index=matches['destination_idx'],
                                                                homogeneous=True)

        transformation_matrix, fundam_mask = od.compute_fundamental_matrix(s_keypoints.values,
                                                                           d_keypoints.values,
                                                                           **kwargs)
        try:
            fundam_mask = fundam_mask.ravel()
        except:
            return

        # Convert the truncated RANSAC mask back into a full length mask
        mask[mask] = fundam_mask

        self.fundamental_matrix = FundamentalMatrix(transformation_matrix,
                                                    all_source_keypoints,
                                                    all_destin_keypoints,
                                                    s_keypoints,
                                                    d_keypoints,
                                                    mask=mask)

        # Subscribe the health watcher to the fundamental matrix observable
@@ -164,8 +167,8 @@ class Edge(dict, MutableMapping):

        matches, mask = self._clean(clean_keys)

        s_keypoints = self.source.get_keypoint_coordinates(matches['source_idx'])
        d_keypoints = self.destination.get_keypoint_coordinates(matches['destination_idx'])
        s_keypoints = self.source.get_keypoint_coordinates(index=matches['source_idx'])
        d_keypoints = self.destination.get_keypoint_coordinates(index=matches['destination_idx'])

        transformation_matrix, ransac_mask = od.compute_homography(s_keypoints.values,
                                                                   d_keypoints.values,
@@ -284,6 +287,9 @@ class Edge(dict, MutableMapping):
                           as the strength of a given row in the matches data
                           frame.

        suppression_args : tuple
                           Arguments to be passed on to the suppression function

        clean_keys : list
                     of mask keys to be used to reduce the total size
                     of the matches dataframe.
@@ -297,7 +303,7 @@ class Edge(dict, MutableMapping):
        # Massage the dataframe into the correct structure
        coords = self.source.get_keypoint_coordinates()
        merged = matches.merge(coords, left_on=['source_idx'], right_index=True)
        merged['strength'] = merged.apply(suppression_func, axis=1)
        merged['strength'] = merged.apply(suppression_func, axis=1, args=([self]))

        if not hasattr(self, 'suppression'):
            # Instantiate the suppression object and suppress matches
+1 −1
Original line number Diff line number Diff line
@@ -144,7 +144,7 @@ class Node(dict, MutableMapping):
        """
        if hasattr(self, '_keypoints'):
            try:
                return self._keypoints.loc[index]
                return self._keypoints.iloc[index]
            except:
                return self._keypoints
        else:
+5 −7
Original line number Diff line number Diff line
@@ -53,7 +53,7 @@ class DistanceRatio(Observable):
    def nvalid(self):
        return self.mask.sum()

    def compute(self, ratio=0.9, mask=None, mask_name=None, single=False):
    def compute(self, ratio=0.95, mask=None, mask_name=None, single=False):
        """
        Compute and return a mask for a matches dataframe
        using Lowe's ratio test.  If keypoints have a single
@@ -147,7 +147,8 @@ class SpatialSuppression(Observable):
        self.max_radius = max(domain)
        self.min_radius = min_radius
        self.domain = domain
        self.mask = None
        self.mask = pd.Series(False, index=self.df.index)

        self.k = k
        self._error_k = error_k

@@ -174,7 +175,7 @@ class SpatialSuppression(Observable):

    def suppress(self):
        """
        Suppress subpixel registered points to that k +- k * error_k
        Suppress subpixel registered points so that k +- k * error_k
        points, with good spatial distribution, remain
        """
        process = True
@@ -258,8 +259,6 @@ class SpatialSuppression(Observable):
                warnings.warn('Unable to optimally solve.  Returning with {} points'.format(len(result)))
                process = False


        self.mask = pd.Series(False, self.df.index)
        self.mask.loc[list(result)] = True
        state_package = {'mask':self.mask,
                         'k': self.k,
@@ -314,7 +313,7 @@ def mirroring_test(matches):
                 otherwise, they will be false. Keypoints with only one match will be False. Removes
                 duplicate rows.
    """
    duplicate_mask = matches.duplicated(subset=['source_idx', 'destination_idx', 'distance'])
    duplicate_mask = matches.duplicated(subset=['source_idx', 'destination_idx', 'distance'], keep='last')
    return duplicate_mask


@@ -363,7 +362,6 @@ def compute_fundamental_matrix(kp1, kp2, method='ransac', reproj_threshold=5.0,
    else:
        raise ValueError("Unknown outlier detection method.  Choices are: 'ransac', 'lmeds', or 'normal'.")


    transformation_matrix, mask = cv2.findFundamentalMat(kp1,
                                                     kp2,
                                                     method_,
+15 −4
Original line number Diff line number Diff line
def response(row):
import numpy as np


def response(row, edge):
    """
    Suppression function that converts 'response' into 'strength'
    """
    return row['response']


def correlation(row):
def correlation(row, edge):
    """
    Suppression function that converts 'correlation' into 'strength'
    """
    return row['correlation']


def distance(row):
def distance(row, edge):
    """
    Suppression function that converts 'distance' into 'strength'
    """
    return row['distance']
    return 1 / row['distance']


def error(row, edge):
    key = row.name
    try:
        return 1 / edge.fundamental_matrix.error.iloc[key]
    except:
        return np.NaN
+41 −37
Original line number Diff line number Diff line
@@ -7,7 +7,7 @@ import pandas as pd
import pysal as ps

from autocnet.matcher.outlier_detector import compute_fundamental_matrix
from autocnet.utils.utils import make_homogeneous
from autocnet.utils.utils import make_homogeneous, normalize_vector


class TransformationMatrix(np.ndarray):
@@ -248,19 +248,48 @@ class FundamentalMatrix(TransformationMatrix):
            except:
                pass

    def compute_error(self, x1, x2, mask=None):
    @property
    def error(self):
        """
        Give this homography, compute the planar reprojection error
        between points a and b.
        Using the currently unmasked correspondences, compute the reprojection
        error.

        Returns
        -------
        : ndarray
          The current error

        See Also
        --------
        compute_error : The method called to compute element-wise error.
        """
        x = self.x1.loc[self.mask]
        x1 = self.x2.loc[self.mask]
        return self.compute_error(self.x1, self.x2)

    def compute_error(self, x, x1):
        """
        Given a set of matches and a known fundamental matrix,
        compute distance between all match points and the associated
        epipolar lines.

        Ideal error is defined by $x^{\intercal}Fx = 0$, where x
        where $x$ are all matchpoints in a given image and
        $x^{\intercal}F$ defines the standard form of the
        epipolar line in the second image.

        The distance between a point and the associated epipolar
        line is computed as: $d = \frac{\lvert ax_{0} + by_{0} + c \rvert}{\sqrt{a^{2} + b^{2}}}$.

        Parameters
        ----------

        a : ndarray
            n,2 array of x,y coordinates
        x : dataframe
            n,3 dataframe of homogeneous coordinates

        b : ndarray
            n,2 array of x,y coordinates
        x1 : dataframe
            n,3 dataframe of homogeneous coordinates with the same
            length as argument a

        mask : Series
               Index to be used in the returned dataframe
@@ -275,37 +304,12 @@ class FundamentalMatrix(TransformationMatrix):
             df.x_rms, df.y_rms, and df.total_rms, respectively.
        """

        if mask is not None:
            mask = mask
        else:
            mask = self.mask
        index = mask[mask == True].index

        x1 = self.x1.iloc[index].values
        x2 = self.x2.iloc[index].values
        err = np.zeros(x1.shape[0])

        # TODO: Vectorize the error computation
        for i, j in enumerate(x1):
            a = self[0, 0] * j[0] + self[0, 1] * j[1] + self[0, 2]
            b = self[1, 0] * j[0] + self[1, 1] * j[1] + self[1, 2]
            c = self[2, 0] * j[0] + self[2, 1] * j[1] + self[2, 2]

            s2 = 1 / (a * a + b * b)
            d2 = x2[i][0] * a + x2[i][1] * b + c

            a = self[0, 0] * x2[i][0] + self[0, 1] * x2[i][1] + self[0, 2]
            b = self[1, 0] * x2[i][0] + self[1, 1] * x2[i][1] + self[1, 2]
            c = self[2, 0] * x2[i][0] + self[2, 1] * x2[i][1] + self[2, 2]

            s1 = 1 / (a * a + b * b)
            d1 = j[0] * a + j[1] * b + c

            err[i] = max(d1 * d1 * s1, d2 * d2 * s2)
        #Normalize the vector
        l_norms = normalize_vector(x.dot(self.T))

        error = pd.DataFrame(err, columns=['Reprojection Error'], index=index)
        F_error = np.abs(np.sum(l_norms * x1, axis=1))

        return error
        return F_error

    def recompute_matrix(self):
        raise NotImplementedError
Loading