Loading autocnet/graph/network.py +23 −5 Original line number Diff line number Diff line Loading @@ -298,7 +298,8 @@ class CandidateGraph(nx.Graph): attributes['homography'] = transformation_matrix attributes['ransac'] = mask def compute_subpixel_offsets(self, clean_keys=[], threshold=0.8, upsampling=10): def compute_subpixel_offsets(self, clean_keys=[], threshold=0.8, upsampling=10, template_size=9, search_size=27): """ For the entire graph, compute the subpixel offsets using pattern-matching and add the result as an attribute to each edge of the graph. Loading @@ -313,6 +314,16 @@ class CandidateGraph(nx.Graph): On the range [-1, 1]. Values less than or equal to this threshold are masked and can be considered outliers upsampling : int The multiplier to the template and search shapes to upsample for subpixel accuracy template_size : int The size of the template in pixels, must be odd search_size : int The size of the search """ for source, destination, attributes in self.edges_iter(data=True): Loading @@ -336,11 +347,18 @@ class CandidateGraph(nx.Graph): for i, (idx, row) in enumerate(matches.iterrows()): s_idx = int(row['source_idx']) d_idx = int(row['destination_idx']) src_keypoint = self.node[source]['keypoints'][s_idx] dest_keypoint = self.node[destination]['keypoints'][d_idx] # Compute the subpixel offset edge_offsets[i] = sp.subpixel_offset(src_keypoint, dest_keypoint, src_image, dest_image, upsampling=upsampling) s_node = self.node[source] d_node = self.node[destination] s_keypoint = s_node['keypoints'][s_idx].pt d_keypoint = d_node['keypoints'][d_idx].pt # Get the template and search windows s_template = sp.clip_roi(src_image, s_keypoint, template_size) d_search = sp.clip_roi(dest_image, d_keypoint, search_size) edge_offsets[i] = sp.subpixel_offset(s_template, d_search, upsampling=upsampling) # Compute the mask for correlations less than the threshold threshold_mask = edge_offsets[edge_offsets[:,-1] >= threshold] Loading autocnet/matcher/subpixel.py +42 −20 Original line number Diff line number Diff line import numpy as np from autocnet.matcher import matcher # TODO: look into KeyPoint.size and perhaps use to determine an appropriately-sized search/template. def subpixel_offset(template_kp, search_kp, template_img, search_img, template_size=9, search_size=27, upsampling=10): def clip_roi(img, center, img_size): """ Given an input image, clip a square region of interest centered on some pixel at some size. Parameters ---------- img : ndarray or file handle The input image to be clipped center : tuple (y,x) coordinates to center the roi img_size : int Odd, total image size Returns ------- clipped_img : ndarray The clipped image """ if img_size % 2 == 0: raise ValueError('Image size must be odd.') i = (img_size - 1) / 2 y, x = map(int, center) if isinstance(img, np.ndarray): clipped_img = img[y - i:y + i, x - i:x + i] return clipped_img def subpixel_offset(template, search, upsampling=10): """ Uses a pattern-matcher on subsets of two images determined from the passed-in keypoints and optional sizes to compute an x and y offset from the search keypoint to the template keypoint and an associated strength. Loading Loading @@ -32,26 +68,12 @@ def subpixel_offset(template_kp, search_kp, template_img, search_img, The returned tuple is of form: (x_offset, y_offset, strength). The offsets are from the search to the template keypoint. """ if template_size % 2 == 0 or search_size %2 == 0: raise ValueError('The search and template images must have an odd number of lines and samples') # Get the x,y coordinates temp_x, temp_y = map(int, template_kp.pt) search_x, search_y = map(int, search_kp.pt) # Convert desired template and search sizes to offsets to get the bounding box t = int(template_size/2) # index offset for template s = int(search_size/2) # index offset for search template = template_img[temp_y-t:temp_y+t, temp_x-t:temp_x+t] search = search_img[search_y-s:search_y+s, search_x-s:search_x+s] results = (None, None, None) try: results = matcher.pattern_match(template, search, upsampling=upsampling) return results except ValueError: # the match fails if the template or search point is near an edge of the image # TODO: come up with a better solution? print('Template Keypoint ({},{}) cannot be pattern matched'.format(str(temp_x), str(temp_y))) return results print('Can not subpixel match point.') return autocnet/matcher/tests/test_feature_extractor.py +8 −8 Original line number Diff line number Diff line Loading @@ -14,10 +14,10 @@ from autocnet.fileio import io_gdal class TestFeatureExtractor(unittest.TestCase): @classmethod def setUpClass(self): self.dataset = io_gdal.GeoDataset(get_path('AS15-M-0295_SML.png')) self.data_array = self.dataset.read_array(dtype='uint8') self.parameters = {"nfeatures" : 10, def setUpClass(cls): cls.dataset = io_gdal.GeoDataset(get_path('AS15-M-0295_SML.png')) cls.data_array = cls.dataset.read_array(dtype='uint8') cls.parameters = {"nfeatures": 10, "nOctaveLayers": 3, "contrastThreshold": 0.02, "edgeThreshold": 10, Loading autocnet/matcher/tests/test_subpixel.py 0 → 100644 +28 −0 Original line number Diff line number Diff line import os import sys import unittest sys.path.append(os.path.abspath('..')) import numpy as np from .. import subpixel as sp class TestSubPixel(unittest.TestCase): def setup(self): pass def test_clip_roi(self): img = np.arange(10000).reshape(100,100) center = (30,30) clip = sp.clip_roi(img, center, 9) self.assertEqual(clip.mean(), 2979.5) center = (55.4, 63.1) clip = sp.clip_roi(img, center, 27) self.assertEqual(clip.mean(), 5512.5) self.assertRaises(ValueError, sp.clip_roi, img, center, 10) No newline at end of file Loading
autocnet/graph/network.py +23 −5 Original line number Diff line number Diff line Loading @@ -298,7 +298,8 @@ class CandidateGraph(nx.Graph): attributes['homography'] = transformation_matrix attributes['ransac'] = mask def compute_subpixel_offsets(self, clean_keys=[], threshold=0.8, upsampling=10): def compute_subpixel_offsets(self, clean_keys=[], threshold=0.8, upsampling=10, template_size=9, search_size=27): """ For the entire graph, compute the subpixel offsets using pattern-matching and add the result as an attribute to each edge of the graph. Loading @@ -313,6 +314,16 @@ class CandidateGraph(nx.Graph): On the range [-1, 1]. Values less than or equal to this threshold are masked and can be considered outliers upsampling : int The multiplier to the template and search shapes to upsample for subpixel accuracy template_size : int The size of the template in pixels, must be odd search_size : int The size of the search """ for source, destination, attributes in self.edges_iter(data=True): Loading @@ -336,11 +347,18 @@ class CandidateGraph(nx.Graph): for i, (idx, row) in enumerate(matches.iterrows()): s_idx = int(row['source_idx']) d_idx = int(row['destination_idx']) src_keypoint = self.node[source]['keypoints'][s_idx] dest_keypoint = self.node[destination]['keypoints'][d_idx] # Compute the subpixel offset edge_offsets[i] = sp.subpixel_offset(src_keypoint, dest_keypoint, src_image, dest_image, upsampling=upsampling) s_node = self.node[source] d_node = self.node[destination] s_keypoint = s_node['keypoints'][s_idx].pt d_keypoint = d_node['keypoints'][d_idx].pt # Get the template and search windows s_template = sp.clip_roi(src_image, s_keypoint, template_size) d_search = sp.clip_roi(dest_image, d_keypoint, search_size) edge_offsets[i] = sp.subpixel_offset(s_template, d_search, upsampling=upsampling) # Compute the mask for correlations less than the threshold threshold_mask = edge_offsets[edge_offsets[:,-1] >= threshold] Loading
autocnet/matcher/subpixel.py +42 −20 Original line number Diff line number Diff line import numpy as np from autocnet.matcher import matcher # TODO: look into KeyPoint.size and perhaps use to determine an appropriately-sized search/template. def subpixel_offset(template_kp, search_kp, template_img, search_img, template_size=9, search_size=27, upsampling=10): def clip_roi(img, center, img_size): """ Given an input image, clip a square region of interest centered on some pixel at some size. Parameters ---------- img : ndarray or file handle The input image to be clipped center : tuple (y,x) coordinates to center the roi img_size : int Odd, total image size Returns ------- clipped_img : ndarray The clipped image """ if img_size % 2 == 0: raise ValueError('Image size must be odd.') i = (img_size - 1) / 2 y, x = map(int, center) if isinstance(img, np.ndarray): clipped_img = img[y - i:y + i, x - i:x + i] return clipped_img def subpixel_offset(template, search, upsampling=10): """ Uses a pattern-matcher on subsets of two images determined from the passed-in keypoints and optional sizes to compute an x and y offset from the search keypoint to the template keypoint and an associated strength. Loading Loading @@ -32,26 +68,12 @@ def subpixel_offset(template_kp, search_kp, template_img, search_img, The returned tuple is of form: (x_offset, y_offset, strength). The offsets are from the search to the template keypoint. """ if template_size % 2 == 0 or search_size %2 == 0: raise ValueError('The search and template images must have an odd number of lines and samples') # Get the x,y coordinates temp_x, temp_y = map(int, template_kp.pt) search_x, search_y = map(int, search_kp.pt) # Convert desired template and search sizes to offsets to get the bounding box t = int(template_size/2) # index offset for template s = int(search_size/2) # index offset for search template = template_img[temp_y-t:temp_y+t, temp_x-t:temp_x+t] search = search_img[search_y-s:search_y+s, search_x-s:search_x+s] results = (None, None, None) try: results = matcher.pattern_match(template, search, upsampling=upsampling) return results except ValueError: # the match fails if the template or search point is near an edge of the image # TODO: come up with a better solution? print('Template Keypoint ({},{}) cannot be pattern matched'.format(str(temp_x), str(temp_y))) return results print('Can not subpixel match point.') return
autocnet/matcher/tests/test_feature_extractor.py +8 −8 Original line number Diff line number Diff line Loading @@ -14,10 +14,10 @@ from autocnet.fileio import io_gdal class TestFeatureExtractor(unittest.TestCase): @classmethod def setUpClass(self): self.dataset = io_gdal.GeoDataset(get_path('AS15-M-0295_SML.png')) self.data_array = self.dataset.read_array(dtype='uint8') self.parameters = {"nfeatures" : 10, def setUpClass(cls): cls.dataset = io_gdal.GeoDataset(get_path('AS15-M-0295_SML.png')) cls.data_array = cls.dataset.read_array(dtype='uint8') cls.parameters = {"nfeatures": 10, "nOctaveLayers": 3, "contrastThreshold": 0.02, "edgeThreshold": 10, Loading
autocnet/matcher/tests/test_subpixel.py 0 → 100644 +28 −0 Original line number Diff line number Diff line import os import sys import unittest sys.path.append(os.path.abspath('..')) import numpy as np from .. import subpixel as sp class TestSubPixel(unittest.TestCase): def setup(self): pass def test_clip_roi(self): img = np.arange(10000).reshape(100,100) center = (30,30) clip = sp.clip_roi(img, center, 9) self.assertEqual(clip.mean(), 2979.5) center = (55.4, 63.1) clip = sp.clip_roi(img, center, 27) self.assertEqual(clip.mean(), 5512.5) self.assertRaises(ValueError, sp.clip_roi, img, center, 10) No newline at end of file