Loading autocnet/graph/network.py +20 −0 Original line number Diff line number Diff line Loading @@ -14,6 +14,7 @@ from autocnet.fileio import io_json from autocnet.fileio.io_gdal import GeoDataset from autocnet.matcher import feature_extractor as fe # extract features from image from autocnet.matcher import outlier_detector as od from autocnet.matcher import subpixel as sp class CandidateGraph(nx.Graph): """ Loading Loading @@ -248,6 +249,25 @@ class CandidateGraph(nx.Graph): attributes['homography'] = transformation_matrix attributes['ransac'] = mask # TODO: finish me!!! def compute_subpixel_offsets(self): for source, destination, attributes in self.edges_iter(data=True): #for each edge matches = attributes['matches'] #grab the matches src_image = self.node[source]['image'] dest_image = self.node[destination]['image'] tmp_lst = [] for i, (idx, row) in enumerate(matches.iterrows()): #for each edge, calculate this for each keypoint pair s_idx = int(row['source_idx']) d_idx = int(row['destination_idx']) src_keypoint = self.node[source]['keypoints'][s_idx] dest_keypoint = self.node[destination]['keypoints'][d_idx] tmp_lst.append(sp.subpixel_offset(src_keypoint, dest_keypoint, src_image, dest_image)) print(tmp_lst) return def to_cnet(self, clean_keys=[]): """ Generate a control network (C) object from a graph Loading autocnet/graph/tests/test_network.py +0 −1 Original line number Diff line number Diff line Loading @@ -41,7 +41,6 @@ class TestCandidateGraph(unittest.TestCase): self.assertIsInstance(node['descriptors'][0], np.ndarray) self.assertEquals(self.graph.get_keypoints(node_number), node['keypoints']) def tearDown(self): try: os.remove('test_graph_to_json.json') Loading autocnet/matcher/subpixel.py +24 −0 Original line number Diff line number Diff line import pandas as pd from autocnet.matcher import matcher # docs # operates on one set of (src, dest) (kp, image)s # error if passed in size is even. Don't do this.... # calculate an (x,y, strength) for each keypoint match in edge # can we assume templates are square? # look into the keypoint size and something with the descriptors to check physical area.... def subpixel_offset(template_kp, search_kp, template_img, search_img, template_size=9, search_size=27): # Get the x,y coordinates temp_x, temp_y = map(int, template_kp.pt) search_x, search_y = map(int, search_kp.pt) # Convert desired template and search sizes to offsets to get the bounding box t = int(template_size/2) #index offset for template s = int(search_size/2) #index offset for search template = template_img[temp_y-t:temp_y+t, temp_x-t:temp_x+t] search = search_img[search_y-s:search_y+s, search_x-s:search_x+s] # actually do the pattern match return matcher.pattern_match(template, search) functional_tests/test_two_image.py +2 −0 Original line number Diff line number Diff line Loading @@ -77,6 +77,8 @@ class TestTwoImageMatching(unittest.TestCase): self.assertIn(len(matches.loc[mask]), range(75,101)) cg.compute_homographies(clean_keys=['symmetry', 'ratio']) cg.compute_subpixel_offsets() # Step: And create a C object cnet = cg.to_cnet(clean_keys=['symmetry', 'ratio', 'ransac']) Loading Loading
autocnet/graph/network.py +20 −0 Original line number Diff line number Diff line Loading @@ -14,6 +14,7 @@ from autocnet.fileio import io_json from autocnet.fileio.io_gdal import GeoDataset from autocnet.matcher import feature_extractor as fe # extract features from image from autocnet.matcher import outlier_detector as od from autocnet.matcher import subpixel as sp class CandidateGraph(nx.Graph): """ Loading Loading @@ -248,6 +249,25 @@ class CandidateGraph(nx.Graph): attributes['homography'] = transformation_matrix attributes['ransac'] = mask # TODO: finish me!!! def compute_subpixel_offsets(self): for source, destination, attributes in self.edges_iter(data=True): #for each edge matches = attributes['matches'] #grab the matches src_image = self.node[source]['image'] dest_image = self.node[destination]['image'] tmp_lst = [] for i, (idx, row) in enumerate(matches.iterrows()): #for each edge, calculate this for each keypoint pair s_idx = int(row['source_idx']) d_idx = int(row['destination_idx']) src_keypoint = self.node[source]['keypoints'][s_idx] dest_keypoint = self.node[destination]['keypoints'][d_idx] tmp_lst.append(sp.subpixel_offset(src_keypoint, dest_keypoint, src_image, dest_image)) print(tmp_lst) return def to_cnet(self, clean_keys=[]): """ Generate a control network (C) object from a graph Loading
autocnet/graph/tests/test_network.py +0 −1 Original line number Diff line number Diff line Loading @@ -41,7 +41,6 @@ class TestCandidateGraph(unittest.TestCase): self.assertIsInstance(node['descriptors'][0], np.ndarray) self.assertEquals(self.graph.get_keypoints(node_number), node['keypoints']) def tearDown(self): try: os.remove('test_graph_to_json.json') Loading
autocnet/matcher/subpixel.py +24 −0 Original line number Diff line number Diff line import pandas as pd from autocnet.matcher import matcher # docs # operates on one set of (src, dest) (kp, image)s # error if passed in size is even. Don't do this.... # calculate an (x,y, strength) for each keypoint match in edge # can we assume templates are square? # look into the keypoint size and something with the descriptors to check physical area.... def subpixel_offset(template_kp, search_kp, template_img, search_img, template_size=9, search_size=27): # Get the x,y coordinates temp_x, temp_y = map(int, template_kp.pt) search_x, search_y = map(int, search_kp.pt) # Convert desired template and search sizes to offsets to get the bounding box t = int(template_size/2) #index offset for template s = int(search_size/2) #index offset for search template = template_img[temp_y-t:temp_y+t, temp_x-t:temp_x+t] search = search_img[search_y-s:search_y+s, search_x-s:search_x+s] # actually do the pattern match return matcher.pattern_match(template, search)
functional_tests/test_two_image.py +2 −0 Original line number Diff line number Diff line Loading @@ -77,6 +77,8 @@ class TestTwoImageMatching(unittest.TestCase): self.assertIn(len(matches.loc[mask]), range(75,101)) cg.compute_homographies(clean_keys=['symmetry', 'ratio']) cg.compute_subpixel_offsets() # Step: And create a C object cnet = cg.to_cnet(clean_keys=['symmetry', 'ratio', 'ransac']) Loading