Commit b3939d1c authored by jlaura's avatar jlaura
Browse files

Merge pull request #84 from acpaquette/config

Fleshed out config file and black box
parents 4c30ea63 af123513
Loading
Loading
Loading
Loading
+48 −15
Original line number Diff line number Diff line
import sys
import os

import argparse

sys.path.insert(0, os.path.abspath('../autocnet'))


from autocnet.utils.utils import find_in_dict
from autocnet.graph.network import CandidateGraph
from autocnet.fileio.io_controlnetwork import to_isis, write_filelist
from autocnet.fileio.io_yaml import read_yaml


def parse_arguments():
    parser = argparse.ArgumentParser()
    parser.add_argument('input_file', action='store', help='Provide the name of the file list/adjacency list')
@@ -12,40 +20,65 @@ def parse_arguments():
    return args

def match_images(args, config_dict):

    # Matches the images in the input file using various candidate graph methods
    # produces two files usable in isis

    try:
        cg = CandidateGraph.from_adjacency(config_dict['inputfile_path'] +
                                           args.input_file, basepath=config['basepath'])
        cg = CandidateGraph.from_adjacency(find_in_dict(config_dict, 'inputfile_path') +
                                           args.input_file, basepath=find_in_dict(config_dict, 'basepath'))
    except:
        cg = CandidateGraph.from_filelist(config_dict['inputfile_path'] + args.input_file)
        cg = CandidateGraph.from_filelist(find_in_dict(config_dict, 'inputfile_path') + args.input_file)

    # Apply SIFT to extract features
    cg.extract_features(method='sift', extractor_parameters={'nfeatures': 1000})
    cg.extract_features(method=config_dict['extract_features']['method'],
                        extractor_parameters=find_in_dict(config_dict, 'extractor_parameters'))

    # Match
    cg.match_features()
    cg.match_features(k=config_dict['match_features']['k'])

    # Apply outlier detection
    cg.apply_func_to_edges('symmetry_check')
    cg.apply_func_to_edges('ratio_check')
    cg.apply_func_to_edges('ratio_check',
                    ratio=find_in_dict(config_dict, 'ratio'),
                    mask_name=find_in_dict(config_dict, 'mask_name'),
                    single=find_in_dict(config_dict, 'single'))

    # Compute a homography and apply RANSAC
    cg.apply_func_to_edges('compute_fundamental_matrix', clean_keys=['ratio', 'symmetry'])
    cg.apply_func_to_edges('compute_fundamental_matrix', clean_keys=find_in_dict(config_dict, 'fundamental_matrices')['clean_keys'],
                                    method=find_in_dict(config_dict, 'fundamental_matrices')['method'],
                                    reproj_threshold=find_in_dict(config_dict, 'reproj_threshold'),
                                    confidence=find_in_dict(config_dict, 'confidence'))

    cg.apply_func_to_edges('subpixel_register', clean_keys=['fundamental', 'symmetry', 'ratio'], template_size=5, search_size=15)
    cg.apply_func_to_edges('subpixel_register', clean_keys=find_in_dict(config_dict, 'subpixel_register')['clean_keys'],
                         template_size=find_in_dict(config_dict, 'template_size'),
                         threshold=find_in_dict(config_dict, 'threshold'),
                         search_size=find_in_dict(config_dict, 'search_size'),
                         max_x_shift=find_in_dict(config_dict, 'max_x_shift'),
                         max_y_shift=find_in_dict(config_dict, 'max_y_shift'),
                         tiled=find_in_dict(config_dict, 'tiled'),
                         upsampling = find_in_dict(config_dict, 'upsampling'),
                         error_check = find_in_dict(config_dict, 'error_check'))

    cg.apply_func_to_edges('suppress', clean_keys=['fundamental'], k=50)
    cg.apply_func_to_edges('suppress', clean_keys=find_in_dict(config_dict, 'suppress')['clean_keys'],
                k=find_in_dict(config_dict, 'suppress')['k'],
                min_radius=find_in_dict(config_dict, 'min_radius'),
                error_k=find_in_dict(config_dict, 'error_k'))

    cnet = cg.to_cnet(clean_keys=['subpixel'], isis_serials=True)
    cnet = cg.to_cnet(clean_keys=find_in_dict(config_dict, 'cnet_conversion')['clean_keys'],
                      isis_serials=True)

    filelist = cg.to_filelist()
    write_filelist(filelist, config_dict['outputfile_path'] + args.output_file + '.lis')

    to_isis(config_dict['outputfile_path'] + args.output_file + '.net', cnet, mode='wb', targetname='Moon')
    write_filelist(filelist, find_in_dict(config_dict, 'outputfile_path') + args.output_file + '.lis')

    to_isis(find_in_dict(config_dict, 'outputfile_path') + args.output_file + '.net', cnet,
            mode='wb',
            networkid=find_in_dict(config_dict, 'networkid'),
            targetname=find_in_dict(config_dict, 'targetname'),
            description=find_in_dict(config_dict, 'description'),
            username=find_in_dict(config_dict, 'username'))

if __name__ == '__main__':
    config = read_yaml()
    config = read_yaml('image_match_config.yml')
    command_line_args = parse_arguments()
    match_images(command_line_args, config)
 No newline at end of file
+69 −3
Original line number Diff line number Diff line
basepath:
inputfile_path:
outputfile_path:
system_paths:
    basepath: /home/acpaquette/Desktop/
    inputfile_path: /home/acpaquette/autocnet/autocnet/examples/Apollo15/
    outputfile_path: /home/acpaquette/autocnet/autocnet/examples/Apollo15/

extract_features:
    method: sift
    extractor_parameters:
        nfeatures: 1000

match_features:
    k: 50

# Any clean keys being passed in requires a method to have been used on the candidate graph object
# before the key can be passed in

ratio_check:
    clean_keys:
        -

# Keyword arguments
    ratio: 0.8
    mask_name: None
    single: False


fundamental_matrices:
    clean_keys:
        - ratio
        - symmetry

# Keyword arguments
    method: ransac
    reproj_threshold: 5.0
    confidence: 0.99

subpixel_register:
    clean_keys:
        - ratio
        - symmetry
        - fundamental
    template_size: 5
    threshold: 0.8
    search_size: 15
    max_x_shift: 1.0
    max_y_shift: 1.0
    tiled: False

# Keyword arguments
    upsampling: 16
    error_check: False

suppress:
    clean_keys:
        - fundamental

# Keyword arguments
    min_radius: 2
    k: 50
    error_k: 0.1

cnet_conversion:
    clean_keys:
        - subpixel
to_isis:
    networkid: None
    targetname: Moon
    description: None
    username: DEFAULTUSERNAME
+57 −114

File changed.

Preview size limit exceeded, changes collapsed.

+19 −40

File changed.

Preview size limit exceeded, changes collapsed.

+1 −0
Original line number Diff line number Diff line
%% Cell type:code id: tags:

``` python
import os # get file path
import sys

sys.path.insert(0, os.path.abspath('../..'))

from scipy.misc import bytescale # store image array

import autocnet

from autocnet.examples import get_path # get file path
from autocnet.fileio.io_gdal import GeoDataset # set handle, get image as array
from autocnet.graph.network import CandidateGraph #construct adjacency graph
from autocnet.matcher import feature_extractor as fe # extract features from image
from autocnet.matcher.matcher import FlannMatcher # match features between images
from autocnet.utils import visualization as vis
```

%% Cell type:code id: tags:

``` python
# display graphs in separate window to be able to change size
%pylab qt4
# displays graphs in noteboook
# %pylab inline
```

%% Output

    Populating the interactive namespace from numpy and matplotlib

%% Cell type:markdown id: tags:

Set up for visualization : Construct an adjacency graph with features extracted
-----------------------------------------------------------------------------------------

%% Cell type:code id: tags:

``` python
adjacency_dict = {"../examples/Apollo15/AS15-M-0297_SML.png"
                  : ["../examples/Apollo15/AS15-M-0298_SML.png"],
                  "../examples/Apollo15/AS15-M-0298_SML.png"
                  : ["../examples/Apollo15/AS15-M-0297_SML.png"]}
adjacencyGraph = CandidateGraph.from_adjacency(adjacency_dict)
```

%% Cell type:code id: tags:

``` python
n = adjacencyGraph.node[0]
n.convex_hull_ratio()
```

%% Output

    ---------------------------------------------------------------------------
    AttributeError                            Traceback (most recent call last)
    <ipython-input-4-1127c95d25db> in <module>()
          1 n = adjacencyGraph.node[0]
    ----> 2 n.convex_hull_ratio()

    /Users/jlaura/github/autocnet/autocnet/graph/network.py in convex_hull_ratio(self)
         97         ideal_area = self.handle.pixel_area
         98         if not hasattr(self, 'keypoints'):
    ---> 99             raise AttributeError('Keypoints must be extracted already, they have not been.')
        100
        101         ratio = convex_hull_ratio(keypoints, ideal_area)
    AttributeError: Keypoints must be extracted already, they have not been.

%% Cell type:code id: tags:

``` python
adjacencyGraph.edge[0][1]
```

%% Output

    <autocnet.graph.network.Edge at 0x11b570b70>

%% Cell type:code id: tags:

``` python
adjacencyGraph.extract_features(method='sift', extractor_parameters={'nfeatures':25})
imageName1 = adjacencyGraph.node[0]['image_name']
imageName2 = adjacencyGraph.node[1]['image_name']
print(imageName1)
print(imageName2)
```

%% Cell type:markdown id: tags:

Use visualization utility plotFeatures() to plot the features of a single image
-----------------------------------------------------------------------------------------
In this example, we plot both images to open in separate windows
1. Features found in AS15-M-0298_SML.png
2. Features found in AS15-M-0297_SML.png

%% Cell type:code id: tags:

``` python
plt.figure(0)
keypoints1 = adjacencyGraph.get_keypoints(imageName1)
vis.plotFeatures(imageName1, keypoints1)

plt.figure(1)
keypoints2 = adjacencyGraph.get_keypoints(imageName2)
vis.plotFeatures(imageName2, keypoints2)

plt.show()
```

%% Cell type:code id: tags:

``` python
hull = adjacencyGraph.covered_area(1)
```

%% Cell type:code id: tags:

``` python
plt.figure(0)
keypoints2 = adjacencyGraph.get_keypoints(imageName2)
vis.plotFeatures(imageName1, keypoints2)
kp2 = np.empty((len(keypoints2), 2))
for i, j in enumerate(keypoints2):
    kp2[i] = j.pt[0], j.pt[1]
plt.plot(kp2[hull.vertices,0], kp2[hull.vertices,1], 'r--', lw=2)
print(adjacencyGraph.node[0]['handle'].pixel_area)
print(hull.volume / adjacencyGraph.node[0]['handle'].pixel_area )
```

%% Cell type:code id: tags:

``` python
print(hull.volume)
```

%% Cell type:code id: tags:

``` python
type(adjacencyGraph[0][1])
adjacencyGraph[0][1]
type(adjacencyGraph.edge[0][1])
print(adjacencyGraph.edge[0][1])
```

%% Cell type:code id: tags:

``` python
plt.close(0)
plt.close(1)
```

%% Cell type:markdown id: tags:

Use visualization utility plotAdjacencyGraphFeatures() to plot the features on all images of the graph in a single figure.
--------------------------------------------------------------------------------------------------------------------------------

%% Cell type:code id: tags:

``` python
vis.plotAdjacencyGraphFeatures(adjacencyGraph, featurePointSize=7)
```

%% Cell type:code id: tags:

``` python
plt.close()
```

%% Cell type:markdown id: tags:

Set up for visualization : Find matches in Adjacency Graph
-----------------------------------------------------------------

%% Cell type:code id: tags:

``` python
# Apply a FLANN matcher
matcher = FlannMatcher()

# Loop through the nodes on the graph and feature descriptors to the matcher
for node, attributes in adjacencyGraph.nodes_iter(data=True):
    matcher.add(attributes['descriptors'], key=node)

# build KD-Tree using the feature descriptors
matcher.train()

# Loop through the nodes on the graph to find all features that match at 1 neighbor
# These matches are returned as PANDAS dataframes and added to the adjacency graph
for node, attributes in adjacencyGraph.nodes_iter(data=True):
    descriptors = attributes['descriptors']
    matches = matcher.query(descriptors, node, k=2)
    adjacencyGraph.add_matches(matches)
```

%% Cell type:markdown id: tags:

Use visualization utility plotAdjacencyGraphMatches() to plot the matches between two images of the graph in a single figure.
------------------------------------------------------------------------------------------------------------------------------------

%% Cell type:code id: tags:

``` python
vis.plotAdjacencyGraphMatches(imageName1,
                              imageName2,
                              adjacencyGraph,
                              aspectRatio=0.44,
                              featurePointSize=3,
                              lineWidth=1,
                              saveToFile='myimage.png')
plt.figure(0)
img = plt.imread('myimage.png')
plt.imshow(img)


vis.plotAdjacencyGraphMatches(imageName1,
                              imageName2,
                              adjacencyGraph,
                              aspectRatio=0.44,
                              featurePointSize=10,
                              lineWidth=3,
                              saveToFile='myimage.png')
plt.figure(1)
img = plt.imread('myimage.png')
plt.imshow(img)
```

%% Cell type:markdown id: tags:

Below is an earlier attempt at plotting images within the same display box.<br>
Features are plotted.<br>
Lines are not drawn.

%% Cell type:code id: tags:

``` python
plt.figure(2)
vis.plotAdjacencyGraphMatchesSingleDisplay(imageName1, imageName2, adjacencyGraph)
```

%% Cell type:code id: tags:

``` python
plt.close(0)
plt.close(1)
plt.close(2)
```

%% Cell type:code id: tags:

``` python

```