Commit fec9cd9a authored by Jay's avatar Jay Committed by jay
Browse files

Bug fixed in subpixel.

parent 319c642d
Loading
Loading
Loading
Loading
+6 −10
Original line number Diff line number Diff line
@@ -379,26 +379,22 @@ class GeoDataset(object):
        if not pixels:
            array = band.ReadAsArray().astype(dtype)
        else:
            xstart = pixels[0]
            # Check that the read start is not outside of the image
            xstart, ystart, xextent, yextent = pixels
            if xstart < 0:
                xstart = 0

            ystart = pixels[1]
            if ystart < 0:
                ystart = 0

            xmax, ymax = map(int, self.xy_extent[1])
            if pixels[2] > xmax:
            if xstart + pixels[2] > xmax:
                xextent = xmax - xstart
            else:
                xextent = pixels[2] - xstart

            if pixels[3] > ymax:
            if ystart + pixels[3] > ymax:
                yextent = ymax - ystart
            else:
                yextent = pixels[3] - ystart
            array = band.ReadAsArray(xstart, ystart,
                                          xextent, yextent).astype(dtype)

            array = band.ReadAsArray(xstart, ystart, xextent, yextent).astype(dtype)
        return array


+70 −8
Original line number Diff line number Diff line
import os
import warnings

import networkx as nx
import pandas as pd
@@ -11,6 +12,7 @@ from scipy.misc import bytescale
from autocnet.control.control import C
from autocnet.fileio import io_json
from autocnet.fileio.io_gdal import GeoDataset
from autocnet.matcher.matcher import FlannMatcher
from autocnet.matcher import feature_extractor as fe
from autocnet.matcher import outlier_detector as od
from autocnet.matcher import subpixel as sp
@@ -63,6 +65,32 @@ class Edge(object):
    def subpixel_offsets(self, v):
        self._subpixel_offsets = v

    def keypoints(self, clean_keys=[]):
        """
        Return a view of the keypoints dataframe after having applied some
        set of clean keys

        Parameters
        ----------
        clean_keys

        Returns
        -------

        """

        matches = self.matches

        # Build up a composite mask from all of the user specified masks
        if clean_keys:
            mask = np.prod([self._mask_arrays[i] for i in clean_keys], axis=0, dtype=np.bool)
            matches = matches[mask]

        # Now that we know the matches, build a pair of dataframes that are the truncated keypoints
        s_kps = self.source.keypoints.iloc[matches['source_idx']]
        d_kps = self.destination.keypoints.iloc[matches['destination_idx']]
        return s_kps, d_kps

    def symmetry_check(self):
        if hasattr(self, 'matches'):
            mask = od.mirroring_test(self.matches)
@@ -122,8 +150,8 @@ class Edge(object):
        self.masks = ('ransac', mask)
        self.homography = transformation_matrix

    def compute_subpixel_offset(self, clean_keys=[], threshold=0.8, upsampling=10,
                                 template_size=9, search_size=39):
    def compute_subpixel_offset(self, clean_keys=[], threshold=0.8, upsampling=16,
                                 template_size=19, search_size=53):
        """
        For the entire graph, compute the subpixel offsets using pattern-matching and add the result
        as an attribute to each edge of the graph.
@@ -176,7 +204,11 @@ class Edge(object):
            s_template = sp.clip_roi(self.source.handle, s_keypoint, template_size)
            d_search = sp.clip_roi(self.destination.handle, d_keypoint, search_size)

            try:
                edge_offsets[i] = sp.subpixel_offset(s_template, d_search, upsampling=upsampling)
            except:
                warnings.warn('Template-Search size mismatch, failing for this correspondence point.')
                continue

        # Compute the mask for correlations less than the threshold
        threshold_mask = edge_offsets[edge_offsets[:, -1] >= threshold]
@@ -341,10 +373,17 @@ class Node(object):

        """
        keypoint_objs, descriptors = fe.extract_features(array, **kwargs)
        keypoints = np.empty((len(keypoint_objs), 4),dtype=np.float32)
        keypoints = np.empty((len(keypoint_objs), 7),dtype=np.float32)
        for i, kpt in enumerate(keypoint_objs):
            keypoints[i] = kpt.pt[0], kpt.pt[1], kpt.response, kpt.size  # y, x
        self.keypoints = pd.DataFrame(keypoints, columns=['x', 'y', 'response', 'size'])
            octave = kpt.octave & 8
            layer = (kpt.octave >> 8) & 255
            if octave < 128:
                octave = octave
            else:
                octave = (-128 | octave)
            keypoints[i] = kpt.pt[0], kpt.pt[1], kpt.response, kpt.size, kpt.angle, octave, layer  # y, x
        self.keypoints = pd.DataFrame(keypoints, columns=['x', 'y', 'response', 'size',
                                                          'angle', 'octave', 'layer'])
        self._nkeypoints = len(self.keypoints)
        self.descriptors = descriptors.astype(np.float32)

@@ -547,6 +586,29 @@ class CandidateGraph(nx.Graph):
            node.extract_features(image, method=method,
                                extractor_parameters=extractor_parameters)

    def match_features(self, k=3):
        """
        For all connected edges in the graph, apply feature matching

        Parameters
        ----------
        k : int
            The number of matches, minus 1, to find per feature.  For example
            k=5 will find the 4 nearest neighbors for every extracted feature.
        """
        #Load a Fast Approximate Nearest Neighbor KD-Tree
        fl = FlannMatcher()
        for i, node in self.nodes_iter(data=True):
            if not hasattr(node, 'descriptors'):
                raise AttributeError('Descriptors must be extracted before matching can occur.')
            fl.add(node.descriptors, key=i)
        fl.train()

        for i, node in self.nodes_iter(data=True):
            descriptors = node.descriptors
            matches = fl.query(descriptors, i, k=k)
            self.add_matches(matches)

    def add_matches(self, matches):
        """
        Adds match data to a node and attributes the data to the
+4 −10
Original line number Diff line number Diff line
@@ -3,14 +3,14 @@ import pandas as pd

import numpy as np
from skimage.feature import match_template
from scipy.misc import imresize
from scipy.ndimage.interpolation import zoom

FLANN_INDEX_KDTREE = 1  # Algorithm to set centers,
DEFAULT_FLANN_PARAMETERS = dict(algorithm=FLANN_INDEX_KDTREE,
                                trees=3)


def pattern_match(template, image, upsampling=10,
def pattern_match(template, image, upsampling=16,
                  func=match_template):
    """
    Call an arbitrary pattern matcher
@@ -45,14 +45,8 @@ def pattern_match(template, image, upsampling=10,
    if upsampling < 1:
        raise ValueError

    u_template = imresize(template, (template.shape[0] * upsampling,
                                   template.shape[1] * upsampling),
                        interp='bicubic')

    u_image = imresize(image, (image.shape[0] * upsampling,
                             image.shape[1] * upsampling),
                     interp='bicubic')

    u_template = zoom(template, upsampling)
    u_image = zoom(image, upsampling, )
    # Find the the upper left origin of the template in the image
    match = func(u_image, u_template)
    y, x = np.unravel_index(np.argmax(match), match.shape)
+15 −17
Original line number Diff line number Diff line
@@ -4,6 +4,7 @@ from autocnet.matcher import matcher

# TODO: look into KeyPoint.size and perhaps use to determine an appropriately-sized search/template.


def clip_roi(img, center, img_size):
    """
    Given an input image, clip a square region of interest
@@ -32,16 +33,16 @@ def clip_roi(img, center, img_size):

    i = int((img_size - 1) / 2)

    y, x = map(int, center)
    x, y = map(int, center)

    y_start = y - i
    y_stop = y + i
    x_start = x - i
    x_stop = x + i
    x_stop = (x + i) - x_start
    y_stop = (y + i) - y_start

    if isinstance(img, np.ndarray):
        clipped_img = img[y_start:y_stop,
                          x_start:x_stop]
        clipped_img = img[y_start:y_start + y_stop + 1,
                          x_start:x_start + x_stop + 1]
    else:
        clipped_img = img.read_array(pixels=[x_start, y_start,
                                             x_stop, y_stop])
@@ -49,7 +50,7 @@ def clip_roi(img, center, img_size):
    return clipped_img


def subpixel_offset(template, search, upsampling=10):
def subpixel_offset(template, search, upsampling=16):
    """
    Uses a pattern-matcher on subsets of two images determined from the passed-in keypoints and optional sizes to
    compute an x and y offset from the search keypoint to the template keypoint and an associated strength.
@@ -64,16 +65,13 @@ def subpixel_offset(template, search, upsampling=10):
                The amount to upsample the image. 
    Returns
    -------
    : tuple
      The returned tuple is of form: (x_offset, y_offset, strength). The offsets are from the search to the template
      keypoint.
    x_offset : float
               Shift in the x-dimension
    y_offset : float
               Shift in the y-dimension
    strength : float
               Strength of the correspondence in the range [-1, 1]
    """

    try:
        results = matcher.pattern_match(template, search, upsampling=upsampling)
        return results
    except ValueError:
        # the match fails if the template or search point is near an edge of the image
        # TODO: come up with a better solution?
        print('Can not subpixel match point.')
        return
    x_offset, y_offset, strength = matcher.pattern_match(template, search, upsampling=upsampling)
    return x_offset, y_offset, strength
+3 −3
Original line number Diff line number Diff line
@@ -16,13 +16,13 @@ class TestSubPixel(unittest.TestCase):

    def test_clip_roi(self):
        img = np.arange(10000).reshape(100,100)
        center = (30,30)
        center = (4,4)

        clip = sp.clip_roi(img, center, 9)
        self.assertEqual(clip.mean(), 2979.5)
        self.assertEqual(clip.mean(), 404)

        center = (55.4, 63.1)
        clip = sp.clip_roi(img, center, 27)
        self.assertEqual(clip.mean(), 5512.5)
        self.assertEqual(clip.mean(), 6355.0)

        self.assertRaises(ValueError, sp.clip_roi, img, center, 10)
 No newline at end of file
Loading