Commit efc569c7 authored by Jay's avatar Jay Committed by jay
Browse files

Added support for downsampling

parent f5a300cd
Loading
Loading
Loading
Loading
+38 −22
Original line number Diff line number Diff line
@@ -184,16 +184,18 @@ class CandidateGraph(nx.Graph):
        newy_size = int(array.shape[1] / downsampling)

        resized_array = imresize(array, (newx_size, newy_size), interp='bicubic')
        self.node[nodeindex]['image'] = bytescale(resized_array)
        self.node[nodeindex]['image_downsampling'] = downsampling
        return bytescale(resized_array)

    def extract_features(self, extractor_parameters={}, downsampling=1):
    def extract_features(self, method='orb', extractor_parameters={}, downsampling=1):
        """
        Extracts features from each image in the graph and uses the result to assign the
        node attributes for 'handle', 'image', 'keypoints', and 'descriptors'.

        Parameters
        ----------
        method : {'orb', 'sift', 'fast'}
                 The descriptor method to be used

        extractor_parameters : dict
                               A dictionary containing OpenCV SIFT parameters names and values.

@@ -202,9 +204,14 @@ class CandidateGraph(nx.Graph):
        """
        for node, attributes in self.nodes_iter(data=True):
            self.get_geodataset(node)
            self.get_array(node, downsampling=downsampling)
            attributes['keypoints'], attributes['descriptors'] = fe.extract_features(attributes['image'],
                                                                                     extractor_parameters)
            attributes['downsampling'] = downsampling
            image = self.get_array(node, downsampling=downsampling)
            keypoints, descriptors = fe.extract_features(image,
                                                         method=method,
                                                         extractor_parameters=extractor_parameters)

            attributes['keypoints'] = keypoints
            attributes['descriptors'] = descriptors.astype(np.float32, copy=False)

    def add_matches(self, matches):
        """
@@ -337,29 +344,35 @@ class CandidateGraph(nx.Graph):
                matches = matches[mask]
                full_mask = np.where(mask == True)

            src_image = self.node[source]['image']
            dest_image = self.node[destination]['image']

            # Preallocate the numpy array to avoid appending and type conversion
            edge_offsets = np.empty((len(matches),3))

            s_node = self.node[source]
            d_node = self.node[destination]

            s_image = s_node['handle']
            d_image = d_node['handle']

            # for each edge, calculate this for each keypoint pair
            for i, (idx, row) in enumerate(matches.iterrows()):
                s_idx = int(row['source_idx'])
                d_idx = int(row['destination_idx'])

                s_node = self.node[source]
                d_node = self.node[destination]
                s_keypoint = [s_node['keypoints'][s_idx].pt[0] * s_node['downsampling'],
                              s_node['keypoints'][s_idx].pt[1] * s_node['downsampling']]

                s_keypoint = s_node['keypoints'][s_idx].pt
                d_keypoint = d_node['keypoints'][d_idx].pt
                d_keypoint = [d_node['keypoints'][d_idx].pt[0] * d_node['downsampling'],
                              d_node['keypoints'][d_idx].pt[1] * d_node['downsampling']]

                # Get the template and search windows
                s_template = sp.clip_roi(src_image, s_keypoint, template_size)
                d_search = sp.clip_roi(dest_image, d_keypoint, search_size)
                s_template = sp.clip_roi(s_image, s_keypoint, template_size * s_node['downsampling'])
                d_search = sp.clip_roi(d_image, d_keypoint, search_size * d_node['downsampling'])

                edge_offsets[i] = sp.subpixel_offset(s_template, d_search, upsampling=upsampling)

            # The destination node is the node that is subpixel registered, so downsample there
            edge_offsets[:, :2] /= d_node['downsampling']

            # Compute the mask for correlations less than the threshold
            threshold_mask = edge_offsets[edge_offsets[:, -1] >= threshold]

@@ -424,6 +437,9 @@ class CandidateGraph(nx.Graph):
        for source, destination, attributes in self.edges_iter(data=True):
            matches = attributes['matches']

            s_downsampling = self.node[source]['downsampling']
            d_downsampling = self.node[destination]['downsampling']

            # Merge all of the masks
            if clean_keys:
                mask = np.prod([attributes[i] for i in clean_keys], axis=0, dtype=np.bool)
@@ -441,18 +457,18 @@ class CandidateGraph(nx.Graph):
                m1 = (source, int(row['source_idx']))
                m2 = (destination, int(row['destination_idx']))

                values.append([kp1[m1[1]].pt[0],
                               kp1[m1[1]].pt[1],
                values.append([kp1[m1[1]].pt[0] * s_downsampling,
                               kp1[m1[1]].pt[1] * s_downsampling,
                               m1,
                               pt_idx,
                               source])

                kp2x = kp2[m2[1]].pt[0]
                kp2y = kp2[m2[1]].pt[1]
                kp2x = kp2[m2[1]].pt[0] * d_downsampling
                kp2y = kp2[m2[1]].pt[1] * d_downsampling

                if 'subpixel' in clean_keys:
                    kp2x += offsets['x_offset'].values[i]
                    kp2y += offsets['y_offset'].values[i]
                    kp2x += (offsets['x_offset'].values[i] * d_downsampling)
                    kp2y += (offsets['y_offset'].values[i] * d_downsampling)
                values.append([kp2x,
                               kp2y,
                               m2,
+6 −1
Original line number Diff line number Diff line
@@ -34,12 +34,17 @@ class TestCandidateGraph(unittest.TestCase):
        except:
            pass

    def test_get_array(self):
        node_number = self.graph.node_name_map['AS15-M-0297_SML.png']
        image = self.graph.get_array(node_number)
        self.assertEqual((1012, 1012), image.shape)
        self.assertEqual(np.uint8, image.dtype)

    def test_extract_features(self):
        # also tests get_geodataset() and get_keypoints
        self.graph.extract_features(extractor_parameters={'nfeatures':10})
        node_number = self.graph.node_name_map['AS15-M-0297_SML.png']
        node = self.graph.node[node_number]
        self.assertEquals(len(node['image']), 1012)
        self.assertEquals(len(node['keypoints']), 10)
        self.assertEquals(len(node['descriptors']), 10)
        self.assertIsInstance(node['keypoints'][0], type(cv2.KeyPoint()))