Commit cb0cc71b authored by Laura, Jason R's avatar Laura, Jason R
Browse files

Updated graph module to use logging

parent 579cccd0
Loading
Loading
Loading
Loading
+9 −5
Original line number Diff line number Diff line
@@ -5,7 +5,7 @@ import copy
import os
import json
import sys
import warnings
import logging

from io import StringIO 
from contextlib import redirect_stdout
@@ -20,6 +20,8 @@ from autocnet.utils.utils import import_func
from autocnet.utils.serializers import JsonEncoder, object_hook
from autocnet.io.db.model import JobsHistory

log = logging.getLogger(__name__)

def parse_args():  # pragma: no cover
    parser = argparse.ArgumentParser()
    parser.add_argument('-r', '--host', help='The host URL for the redis queue to to pull messages from.')
@@ -178,7 +180,7 @@ def manage_messages(args, queue):
        
        if msg is None:
            if args['queue'] == False:
                warnings.warn('Expected to process a cluster job, but the message queue is empty.')
                log.warning('Expected to process a cluster job, but the message queue is empty.')
                return
            elif args['queue'] == True:
                print(f'Completed processing from queue: {queue}.')
@@ -197,8 +199,8 @@ def manage_messages(args, queue):
        with redirect_stdout(stdout):
            # Apply the algorithm
            response = process(msgdict)
            # Should go to a logger someday!
            print(response)
            # Should go to a logger someday! (today is that day!)
            log.info(response)
            
        out = stdout.getvalue()
        # print to get everything on the logs in the directory
@@ -226,6 +228,8 @@ def manage_messages(args, queue):

def main():  # pragma: no cover
    args = vars(parse_args())
    # set up the logger
    logging.basicConfig(level=os.environ.get("AUTOCNET_LOGLEVEL", "INFO"))
    # Get the message
    queue = StrictRedis(host=args['host'], port=args['port'], db=0)
    manage_messages(args, queue)
+9 −6
Original line number Diff line number Diff line
from collections import defaultdict, MutableMapping, Counter
from functools import wraps, singledispatch
import json
import warnings
import logging

import numpy as np
import pandas as pd
@@ -29,6 +29,9 @@ from autocnet.io.db.wrappers import DbDataFrame
from plio.io.io_gdal import GeoDataset
from csmapi import csmapi

# set up the logging file
log = logging.getLogger(__name__)

class Edge(dict, MutableMapping):
    """
    Attributes
@@ -176,7 +179,7 @@ class Edge(dict, MutableMapping):
        tar_desc = self.destination.descriptors

        if not 'xm' in ref_kps.columns:
            warnings.warn('To ring match body centered coordinates (xm, ym, zm) must be in the keypoints')
            log.warning('To ring match body centered coordinates (xm, ym, zm) must be in the keypoints')
            return
        ref_feats = ref_kps[['x', 'y', 'xm', 'ym', 'zm']].values
        tar_feats = tar_kps[['x', 'y', 'xm', 'ym', 'zm']].values
@@ -234,7 +237,7 @@ class Edge(dict, MutableMapping):
        node = getattr(self, on)
        camera = getattr(node, 'camera')
        if camera is None:
            warnings.warn('Unable to project matches without a sensor model.')
            log.warning('Unable to project matches without a sensor model.')
            return

        matches = self.matches
@@ -274,7 +277,7 @@ class Edge(dict, MutableMapping):
    def overlap_check(self):
        """Creates a mask for matches on the overlap"""
        if not (self["source_mbr"] and self["destin_mbr"]):
            warnings.warn(
            log.warning(
                "Cannot use overlap constraint, minimum bounding rectangles"
                " have not been computed for one or more Nodes")
            return
@@ -378,7 +381,7 @@ class Edge(dict, MutableMapping):
                of reprojective error indexed to the matches data frame
        """
        if self.fundamental_matrix is None:
            warnings.warn('No fundamental matrix has been compute for this edge.')
            log.error('No fundamental matrix has been compute for this edge.')

        matches, mask = self.clean(clean_keys)
        s_keypoints, d_keypoints = self.get_match_coordinates(clean_keys=clean_keys)
@@ -699,7 +702,7 @@ class Edge(dict, MutableMapping):
            except:
                smbr = self.source.geodata.xy_extent
                dmbr = self.source.geodata.xy_extent
                warnings.warn("Overlap between {} and {} could not be "
                log.warning("Overlap between {} and {} could not be "
                                "computed.  Using the full image extents".format(self.source['image_name'],
                                                      self.destination['image_name']))
                smbr = [smbr[0][0], smbr[1][0], smbr[0][1], smbr[1][1]]
+13 −11
Original line number Diff line number Diff line
@@ -7,7 +7,7 @@ import os
from shutil import copyfile
import threading
from time import gmtime, strftime, time
import warnings
import logging
from itertools import combinations

import networkx as nx
@@ -58,6 +58,8 @@ from autocnet.spatial.isis import point_info
from autocnet.spatial.surface import GdalDem, EllipsoidDem
from autocnet.transformation.spatial import reproject, og2oc

# set up the logging file
log = logging.getLogger(__name__)
#np.warnings.filterwarnings('ignore')

# The total number of pixels squared that can fit into the keys number of GB of RAM for SIFT.
@@ -237,7 +239,7 @@ class CandidateGraph(nx.Graph):
            if fp and fp.IsValid():
                valid_datasets.append(i)
            else:
                warnings.warn(
                log.warning(
                    'Missing or invalid geospatial data for {}'.format(i.base_name))

        # Grab the footprints and test for intersection
@@ -250,7 +252,7 @@ class CandidateGraph(nx.Graph):
                    adjacency_dict[i.file_name].append(j.file_name)
                    adjacency_dict[j.file_name].append(i.file_name)
            except:
                warnings.warn(
                log.warning(
                    'Failed to calculate intersection between {} and {}'.format(i, j))
        return cls.from_adjacency(adjacency_dict)

@@ -377,7 +379,7 @@ class CandidateGraph(nx.Graph):
            else:
                image_path = image_name
            if not os.path.exists(image_path):
                warnings.warn("Cannot find {}".format(image_path))
                log.warning("Cannot find {}".format(image_path))
                return
            n = self.graph["node_counter"]
            self.graph["node_counter"] += 1
@@ -395,7 +397,7 @@ class CandidateGraph(nx.Graph):
        if new_node is not None and adj is not None:
            for adj_img in adj:
                if adj_img not in self.graph["node_name_map"].keys():
                    warnings.warn("{} not found in the graph".format(adj_img))
                    log.warning("{} not found in the graph".format(adj_img))
                    continue
                new_idx = new_node["node_id"]
                adj_idx = self.graph["node_name_map"][adj_img]
@@ -1007,7 +1009,7 @@ class CandidateGraph(nx.Graph):
        """

        if not self.is_connected():
            warnings.warn(
            log.warning(
                'The given graph is not complete and may yield garbage.')

        for s, d, edge in self.edges.data('edge'):
@@ -1630,7 +1632,7 @@ class NetworkCandidateGraph(CandidateGraph):
        Push messages to the redis queue for DB objects e.g., Points, Measures
        """
        if filters and query_string:
            warnings.warn('Use of filters and query_string are mutually exclusive.')
            log.warning('Use of filters and query_string are mutually exclusive.')

        with self.session_scope() as session:
            # Support either an SQL query string, or a simple dict based query
@@ -1943,7 +1945,7 @@ class NetworkCandidateGraph(CandidateGraph):
        fpaths = [self.nodes[i]['data']['image_path'] for i in ids]
        for f in self.files:
            if f not in fpaths:
                warnings.warn(f'{f} in candidate graph but not in output network.')
                log.warning(f'{f} in candidate graph but not in output network.')

        # Remap the df columns back to ISIS
        df.rename(columns={'pointtype':'pointType',
@@ -2038,7 +2040,7 @@ class NetworkCandidateGraph(CandidateGraph):
        elif os.path.exists(filelist):
            filelist = io_utils.file_to_list(filelist)
        else:
            warnings.warn('Unable to parse the passed filelist')
           log.warning('Unable to parse the passed filelist')

        if clear_db:
            self.clear_db()
@@ -2257,7 +2259,7 @@ class NetworkCandidateGraph(CandidateGraph):
                    try:
                        session.execute(f'ALTER SEQUENCE {t}_id_seq RESTART WITH 1')
                    except Exception as e:
                        warnings.warn(f'Failed to reset primary id sequence for table {t}')
                        log.warning(f'Failed to reset primary id sequence for table {t}')

    def cnet_to_db(self, cnet):
        """
@@ -2559,7 +2561,7 @@ class NetworkCandidateGraph(CandidateGraph):
                                          walltime='00:20:00',
                                          chunksize=1000,
                                          exclude=None):
        warnings.warn('This function is not well tested. No tests currently exists \
        log.warning('This function is not well tested. No tests currently exists \
        in the test suite for this version of the function.')

        # Setup the redis queue
+7 −5
Original line number Diff line number Diff line
from collections import defaultdict, MutableMapping
import itertools
import os
import warnings
import logging

from csmapi import csmapi
import numpy as np
@@ -22,6 +22,8 @@ from autocnet.io import keypoints as io_keypoints
from autocnet.vis.graph_view import plot_node
from autocnet.utils import utils

# set up the logging file
log = logging.getLogger(__name__)

class Node(dict, MutableMapping):
    """
@@ -395,7 +397,7 @@ class Node(dict, MutableMapping):
    def project_keypoints(self):
        if self.camera is None:
            # Without a camera, it is not possible to project
            warnings.warn('Unable to project points, no camera available.')
            log.warning('Unable to project points, no camera available.')
            return False
        # Project the sift keypoints to the ground
        def func(row, args):
@@ -444,7 +446,7 @@ class Node(dict, MutableMapping):
                   PATH to the directory for output and base file name
        """
        if self.keypoints.empty:
            warnings.warn('Node {} has not had features extracted.'.format(self['node_id']))
            log.warning('Node {} has not had features extracted.'.format(self['node_id']))
            return

        io_keypoints.to_npy(self.keypoints, self.descriptors,
@@ -524,7 +526,7 @@ class NetworkNode(Node):
        try:
            fp, cam_type = self.footprint
        except Exception as e:
            warnings.warn('Unable to generate image footprint.\n{}'.format(e))
            log.warning('Unable to generate image footprint.\n{}'.format(e))
            fp = cam_type = None
        # Create the image
        i = Images(name=self['image_name'],
@@ -638,7 +640,7 @@ class NetworkNode(Node):
            with open(isdpath, 'w') as f:
                json.dump(response, f)
        except Exception as e:
            warnings.warn('Failed to write JSON ISD for image {}.\n{}'.format(self['image_path'], e))
           log.warning('Failed to write JSON ISD for image {}.\n{}'.format(self['image_path'], e))
        isd = csmapi.Isd(self['image_path'])
        plugin = csmapi.Plugin.findPlugin('UsgsAstroPluginCSM')
        self._camera = plugin.constructModelFromISD(isd, model_name)
+24 −3
Original line number Diff line number Diff line
@@ -5,6 +5,7 @@ from unittest.mock import patch

import numpy as np
import pytest
import logging

from autocnet.utils.serializers import JsonEncoder, object_hook
from autocnet.graph import cluster_submit
@@ -12,6 +13,7 @@ from autocnet.graph.node import NetworkNode
from autocnet.graph.edge import NetworkEdge
from autocnet.io.db.model import Points, JobsHistory

log = logging.getLogger(__name__)

@pytest.fixture
def args():
@@ -102,9 +104,11 @@ def test_finalize_message_from_work_queue(args, queue, simple_message):
    cluster_submit.finalize_message_from_work_queue(queue, args['working_queue'], remove_key)
    assert queue.llen(args['working_queue']) == 0
    
def test_no_msg(args, queue):
    with pytest.warns(UserWarning, match='Expected to process a cluster job, but the message queue is empty.'):
def test_no_msg(caplog,args, queue):
    cluster_submit.manage_messages(args, queue)
    expected_log = 'Expected to process a cluster job, but the message queue is empty.'
    assert expected_log in caplog.text
    


# Classes and funcs for testing job submission.
@@ -167,6 +171,23 @@ def test_process_row(along, func, msg_additions, mocker):
    
    cluster_submit._instantiate_row.assert_called_once()

@pytest.mark.parametrize()
def _do_something(log_level):
    return getattr(log, log_level)(f'Logging at the {log_level}')

def test_do_something(caplog):
    log_levels = ["critical", "error", "warning", "info", "debug"]
    
    for level in log_levels:
        os.environ["AUTOCNET_LOGLEVEL"] = level
        _do_something(os.environ["AUTOCNET_LOGLEVEL"])

        for record in caplog.records:
            # casting the env var and record level to a string for comparison 
            assert(str(os.environ["AUTOCNET_LOGLEVEL"]).upper() == str(record.levelname).upper())
            caplog.clear()


@pytest.mark.parametrize("along, func, msg_additions",[
                        ([1,2,3,4,5], _do_nothing, {})
                        ])
Loading