Commit 3d2d42a4 authored by Jay's avatar Jay Committed by jay
Browse files

HDF reads/writes of keypoints and descriptors.

parent 4b352e81
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -27,7 +27,7 @@ install:
  - conda info -a

  # Create a virtual env and install dependencies
  - conda create -y -q -n test-env python=$TRAVIS_PYTHON_VERSION nose numpy pillow scipy pandas networkx scikit-image sqlalchemy numexpr dill
  - conda create -y -q -n test-env python=$TRAVIS_PYTHON_VERSION nose numpy pillow scipy pandas networkx scikit-image sqlalchemy numexpr dill cython
  # Activate the env
  - source activate test-env

+1 −0
Original line number Diff line number Diff line
@@ -10,6 +10,7 @@ Development Team
* Jeannie Backer <jwbacker@usgs.gov>
* Dyer Lytle <dmlytle@usgs.gov>
* Kelvin Rodriguez <krodriguez@usgs.gov>
* Adam Paquette <acpaquette@usgs.gov>

Contributors
------------
+6 −1
Original line number Diff line number Diff line
@@ -3,6 +3,9 @@ import numpy as np
import pandas as pd


DEFAULT_COMPRESSION = 'gzip'
DEFAULT_COMPRESSION_VALUE = 8  # 0 - 9

class HDFDataset(h5.File):
    """
    Read / Write an HDF5 dataset using h5py.  If HDF5 is compiled with
@@ -35,7 +38,6 @@ class HDFDataset(h5.File):
        z : ndarray
            a numpy structured array representation of df
        """

        v = df.values
        cols = df.columns
        types = [(cols[i], df[k].dtype.type) for (i, k) in enumerate(cols)]
@@ -55,6 +57,9 @@ class HDFDataset(h5.File):
        sarray : array
                 numpy structured array

        index_column : str
                       The name of the index column.  Default: 'index'

        Returns
        -------
         : dataframe
+19 −2
Original line number Diff line number Diff line
import os
import unittest

import numpy as np
@@ -8,8 +9,24 @@ from .. import io_hdf

class TestHDF(unittest.TestCase):

    @classmethod
    def setUpClass(cls):
        cls.hdf = io_hdf.HDFDataset('test_io_hdf.hdf', mode='w')
        cls.x = np.array([(0 ,2.,'String'), (1, 3.,"String2")],
                         dtype=[('index', 'i8'),('bar', 'f4'), ('baz', 'O')])
        cls.df = pd.DataFrame(cls.x[['bar', 'baz']], index=cls.x['index'],
                              columns=['bar', 'baz'])

    @classmethod
    def tearDownClass(cls):
        os.remove('test_io_hdf.hdf')

    def test_df_sarray(self):
        self.assertTrue(False)
        converted = self.hdf.df_to_sarray(self.df.reset_index())
        np.testing.assert_array_equal(converted, self.x)

    def test_sarray_df(self):
        self.assertTrue(False)
        converted = self.hdf.sarray_to_df(self.x)
        self.assertTrue((self.df == converted).all().all())

+60 −1
Original line number Diff line number Diff line
import itertools
import os
import dill as pickle
import warnings

import networkx as nx
import numpy as np
import pandas as pd

from autocnet.fileio.io_gdal import GeoDataset
from autocnet.fileio import io_hdf
from autocnet.control.control import C
from autocnet.fileio import io_json
from autocnet.matcher.matcher import FlannMatcher
@@ -241,6 +243,63 @@ class CandidateGraph(nx.Graph):
            node.extract_features(image, method=method,
                                  extractor_parameters=extractor_parameters)

    def save_features(self, out_path, nodes=[]):
        """

        Save the features (keypoints and descriptors) for the
        specified nodes.

        Parameters
        ----------
        out_path : str
                   Location of the output file.  If the file exists,
                   features are appended.  Otherwise, the file is created.

        nodes : list
                of nodes to save features for.  If empty, save for all nodes
        """

        if os.path.exists(out_path):
            mode = 'a'
        else:
            mode = 'w'
        hdf = io_hdf.HDFDataset(out_path, mode=mode)

        # Cleaner way to do this?
        if nodes:
            for i, n in self.nodes_iter(nodes, data=True):
                n.save_features(hdf)
        else:
            for i, n in self.nodes_iter(data=True):
                n.save_features(hdf)

        hdf = None

    def load_features(self, in_path, nodes=[]):
        """
        Load features (keypoints and descriptors) for the
        specified nodes.

        Parameters
        ----------
        in_path : str
                  Location of the input file.

        nodes : list
                of nodes to load features for.  If empty, load features
                for all nodes
        """
        hdf = io_hdf.HDFDataset(in_path, 'r')

        if nodes:
            for i, n in self.nodes_iter(nodes, data=True):
                n.load_features(hdf)
        else:
            for i, n in self.nodes_iter(data=True):
                n.load_features(hdf)

        hdf = None

    def match_features(self, k=None):
        """
        For all connected edges in the graph, apply feature matching
Loading