Loading autocnet/graph/network.py +27 −0 Original line number Diff line number Diff line Loading @@ -12,6 +12,7 @@ from autocnet.fileio import io_json from autocnet.fileio.io_gdal import GeoDataset from autocnet.matcher import feature_extractor as fe # extract features from image from autocnet.matcher import outlier_detector as od from autocnet.matcher import subpixel as sp class CandidateGraph(nx.Graph): Loading Loading @@ -247,6 +248,32 @@ class CandidateGraph(nx.Graph): attributes['homography'] = transformation_matrix attributes['ransac'] = mask def compute_subpixel_offsets(self): """ For the entire graph, compute the subpixel offsets using pattern-matching and add the result as an attribute to each edge of the graph. Returns ------- subpixel_offsets : ndarray A numpy array containing all the subpixel offsets for the entire graph. """ subpixel_offsets = [] for source, destination, attributes in self.edges_iter(data=True): #for each edge matches = attributes['matches'] #grab the matches src_image = self.node[source]['image'] dest_image = self.node[destination]['image'] edge_offsets = [] for i, (idx, row) in enumerate(matches.iterrows()): #for each edge, calculate this for each keypoint pair s_idx = int(row['source_idx']) d_idx = int(row['destination_idx']) src_keypoint = self.node[source]['keypoints'][s_idx] dest_keypoint = self.node[destination]['keypoints'][d_idx] edge_offsets.append(sp.subpixel_offset(src_keypoint, dest_keypoint, src_image, dest_image)) attributes['subpixel_offsets'] = np.array(edge_offsets) subpixel_offsets.append(np.array(edge_offsets)) return subpixel_offsets def to_cnet(self, clean_keys=[]): """ Generate a control network (C) object from a graph Loading autocnet/graph/tests/test_network.py +0 −1 Original line number Diff line number Diff line Loading @@ -41,7 +41,6 @@ class TestCandidateGraph(unittest.TestCase): self.assertIsInstance(node['descriptors'][0], np.ndarray) self.assertEquals(self.graph.get_keypoints(node_number), node['keypoints']) def tearDown(self): try: os.remove('test_graph_to_json.json') Loading autocnet/matcher/subpixel.py 0 → 100644 +55 −0 Original line number Diff line number Diff line import pandas as pd from autocnet.matcher import matcher # TODO: look into KeyPoint.size and perhaps use to determine an appropriately-sized search/template. # TODO: do not allow even sizes """ Uses a pattern-matcher on subsets of two images determined from the passed-in keypoints and optional sizes to compute an x and y offset from the search keypoint to the template keypoint and an associated strength. Parameters ---------- template_kp : KeyPoint The KeyPoint to match the search_kp to. search_kp : KeyPoint The KeyPoint to match to the template_kp template_img : numpy array The entire image that the template chip to match to will be taken out of. search_img : numpy array The entire image that the search chip to match to the template chip will be taken out of. template_size : int The length of one side of the square subset of the template image that will actually be used for the subpixel registration. Default is 9. Must be odd. search_size : int The length of one side of the square subset of the search image that will be used for subpixel registration. Default is 13. Must be odd. Returns ------- : tuple The returned tuple is of form: (x_offset, y_offset, strength). The offsets are from the search to the template keypoint. """ def subpixel_offset(template_kp, search_kp, template_img, search_img, template_size=9, search_size=27): # Get the x,y coordinates temp_x, temp_y = map(int, template_kp.pt) search_x, search_y = map(int, search_kp.pt) # Convert desired template and search sizes to offsets to get the bounding box t = int(template_size/2) #index offset for template s = int(search_size/2) #index offset for search template = template_img[temp_y-t:temp_y+t, temp_x-t:temp_x+t] search = search_img[search_y-s:search_y+s, search_x-s:search_x+s] results = (None, None, None) try: results = matcher.pattern_match(template, search) except ValueError: # the match fails if the template or search point is near an edge of the image # TODO: come up with a better solution? print('Template Keypoint ({},{}) cannot be pattern matched'.format(str(temp_x), str(temp_y))) return results functional_tests/test_two_image.py +4 −0 Original line number Diff line number Diff line Loading @@ -77,6 +77,10 @@ class TestTwoImageMatching(unittest.TestCase): cg.compute_homographies(clean_keys=['symmetry', 'ratio']) #compute subpixel offsets for the entire graph offsets = cg.compute_subpixel_offsets() self.assertEqual(len(offsets), cg.number_of_edges()) # Step: And create a C object cnet = cg.to_cnet(clean_keys=['symmetry', 'ratio', 'ransac']) # Step update the serial numbers Loading Loading
autocnet/graph/network.py +27 −0 Original line number Diff line number Diff line Loading @@ -12,6 +12,7 @@ from autocnet.fileio import io_json from autocnet.fileio.io_gdal import GeoDataset from autocnet.matcher import feature_extractor as fe # extract features from image from autocnet.matcher import outlier_detector as od from autocnet.matcher import subpixel as sp class CandidateGraph(nx.Graph): Loading Loading @@ -247,6 +248,32 @@ class CandidateGraph(nx.Graph): attributes['homography'] = transformation_matrix attributes['ransac'] = mask def compute_subpixel_offsets(self): """ For the entire graph, compute the subpixel offsets using pattern-matching and add the result as an attribute to each edge of the graph. Returns ------- subpixel_offsets : ndarray A numpy array containing all the subpixel offsets for the entire graph. """ subpixel_offsets = [] for source, destination, attributes in self.edges_iter(data=True): #for each edge matches = attributes['matches'] #grab the matches src_image = self.node[source]['image'] dest_image = self.node[destination]['image'] edge_offsets = [] for i, (idx, row) in enumerate(matches.iterrows()): #for each edge, calculate this for each keypoint pair s_idx = int(row['source_idx']) d_idx = int(row['destination_idx']) src_keypoint = self.node[source]['keypoints'][s_idx] dest_keypoint = self.node[destination]['keypoints'][d_idx] edge_offsets.append(sp.subpixel_offset(src_keypoint, dest_keypoint, src_image, dest_image)) attributes['subpixel_offsets'] = np.array(edge_offsets) subpixel_offsets.append(np.array(edge_offsets)) return subpixel_offsets def to_cnet(self, clean_keys=[]): """ Generate a control network (C) object from a graph Loading
autocnet/graph/tests/test_network.py +0 −1 Original line number Diff line number Diff line Loading @@ -41,7 +41,6 @@ class TestCandidateGraph(unittest.TestCase): self.assertIsInstance(node['descriptors'][0], np.ndarray) self.assertEquals(self.graph.get_keypoints(node_number), node['keypoints']) def tearDown(self): try: os.remove('test_graph_to_json.json') Loading
autocnet/matcher/subpixel.py 0 → 100644 +55 −0 Original line number Diff line number Diff line import pandas as pd from autocnet.matcher import matcher # TODO: look into KeyPoint.size and perhaps use to determine an appropriately-sized search/template. # TODO: do not allow even sizes """ Uses a pattern-matcher on subsets of two images determined from the passed-in keypoints and optional sizes to compute an x and y offset from the search keypoint to the template keypoint and an associated strength. Parameters ---------- template_kp : KeyPoint The KeyPoint to match the search_kp to. search_kp : KeyPoint The KeyPoint to match to the template_kp template_img : numpy array The entire image that the template chip to match to will be taken out of. search_img : numpy array The entire image that the search chip to match to the template chip will be taken out of. template_size : int The length of one side of the square subset of the template image that will actually be used for the subpixel registration. Default is 9. Must be odd. search_size : int The length of one side of the square subset of the search image that will be used for subpixel registration. Default is 13. Must be odd. Returns ------- : tuple The returned tuple is of form: (x_offset, y_offset, strength). The offsets are from the search to the template keypoint. """ def subpixel_offset(template_kp, search_kp, template_img, search_img, template_size=9, search_size=27): # Get the x,y coordinates temp_x, temp_y = map(int, template_kp.pt) search_x, search_y = map(int, search_kp.pt) # Convert desired template and search sizes to offsets to get the bounding box t = int(template_size/2) #index offset for template s = int(search_size/2) #index offset for search template = template_img[temp_y-t:temp_y+t, temp_x-t:temp_x+t] search = search_img[search_y-s:search_y+s, search_x-s:search_x+s] results = (None, None, None) try: results = matcher.pattern_match(template, search) except ValueError: # the match fails if the template or search point is near an edge of the image # TODO: come up with a better solution? print('Template Keypoint ({},{}) cannot be pattern matched'.format(str(temp_x), str(temp_y))) return results
functional_tests/test_two_image.py +4 −0 Original line number Diff line number Diff line Loading @@ -77,6 +77,10 @@ class TestTwoImageMatching(unittest.TestCase): cg.compute_homographies(clean_keys=['symmetry', 'ratio']) #compute subpixel offsets for the entire graph offsets = cg.compute_subpixel_offsets() self.assertEqual(len(offsets), cg.number_of_edges()) # Step: And create a C object cnet = cg.to_cnet(clean_keys=['symmetry', 'ratio', 'ransac']) # Step update the serial numbers Loading