Unverified Commit 579cccd0 authored by gsn9's avatar gsn9 Committed by GitHub
Browse files

Logging for matcher module (#624)

* updated criatefi.py to use logging over warnings, as well as adding some log's where errors are raised.

* added logging to matcher module

* removed un-used imports

* removed import
parent ddcca3ce
Loading
Loading
Loading
Loading
+24 −6
Original line number Diff line number Diff line
import math
import warnings
import logging
from bisect import bisect_left

import cv2
@@ -11,6 +11,7 @@ from scipy.ndimage.interpolation import zoom

import autocnet.utils.utils as util

log = logging.getLogger(__name__)

def cifi(template, search_image, thresh=90, use_percentile=True,
         radii=list(range(1,12)), scales=[0.5, 0.57, 0.66,  0.76, 0.87, 1.0], verbose=False):
@@ -75,6 +76,7 @@ def cifi(template, search_image, thresh=90, use_percentile=True,

    radii = np.asarray(radii)
    if not radii.size or not np.any(radii):
        log.error('Input radii list is empty')
        raise ValueError('Input radii list is empty')

    scales = np.asarray(scales)
@@ -82,10 +84,11 @@ def cifi(template, search_image, thresh=90, use_percentile=True,
        raise ValueError('Input scales list is empty')

    if max(radii) > max(template.shape)/2:
        warnings.warn('Max Radii is larger than original template, this may produce sub-par results.'
        log.warning('Max Radii is larger than original template, this may produce sub-par results.'
                      'Max radii: {} max template dimension: {}'.format(max(radii), max(template.shape)))

    if thresh < -1. or thresh > 1. and not use_percentile:
        log.error(f'Thresholds must be in range [-1,1] when not using percentiles. Got: {thresh}')
        raise ValueError('Thresholds must be in range [-1,1] when not using percentiles. Got: {}'
                         .format(thresh))

@@ -162,7 +165,7 @@ def cifi(template, search_image, thresh=90, use_percentile=True,
    fg_candidate_pixels = np.array([(y, x) for (y, x), coeff in np.ndenumerate(coeffs) if coeff >= thresh])

    if fg_candidate_pixels.size == 0:
        warnings.warn('Cifi returned empty set.')
        log.warning('Cifi returned empty set.')

    if verbose: # pragma: no cover
        plt.imshow(coeffs, interpolation='none')
@@ -244,25 +247,30 @@ def rafi(template, search_image, candidate_pixels, best_scales, thresh=95,
    # check inputs for validity

    if search_image.shape < template.shape:
        log.error('Template Image size error')
        raise ValueError('Template Image is smaller than Search Image for template of'
                         'size: {} and search image of size: {}'
                         .format(template.shape, search_image.shape))

    candidate_pixels = np.asarray(candidate_pixels)
    if not candidate_pixels.size or not np.any(candidate_pixels):
        log.error('empty pixel list error')
        raise ValueError('cadidate pixel list is empty')

    best_scales = np.asarray(best_scales, dtype=np.float32)
    if not best_scales.size or not np.any(best_scales):
        log.error('scale list empty')
        raise ValueError('best_scale list is empty')

    if best_scales.shape != search_image.shape:
        log.error('Error:: image shape does not match scale')
        raise ValueError('Search image and scales must be of the same shape '
                         'got: best scales shape: {}, search image shape: {}'
                         .format(best_scales.shape, search_image.shape))

    radii = np.asarray(radii, dtype=int)
    if not radii.size or not np.any(radii):
        log.error('Error:: input radii list empty')
        raise ValueError('Input radii list is empty')

    best_scales = np.asarray(best_scales, dtype=float)
@@ -270,14 +278,16 @@ def rafi(template, search_image, candidate_pixels, best_scales, thresh=95,
        raise ValueError('Input best_scales list is empty')

    if max(radii) > max(template.shape)/2:
        warnings.warn('Max Radii is larger than original template, this mat produce sub-par results.'
        log.warning('Max Radii is larger than original template, this may produce sub-par results.'
                      'Max radii: {} max template dimension: {}'.format(max(radii), max(template.shape)))

    if thresh < -1. or thresh > 1. and not use_percentile:
        log.error(f'Thresholds must be in range [-1,1] when not using percentiles. Got: {thresh}')
        raise ValueError('Thresholds must be in range [-1,1] when not using percentiles. Got: {}'
                         .format(thresh))

    if alpha <= 0:
        log.error(f'Alpha: {alpha} not >= 0')
        raise ValueError('Alpha must be >= 0')
    alpha %= 2*math.pi

@@ -317,7 +327,7 @@ def rafi(template, search_image, candidate_pixels, best_scales, thresh=95,
            scaled_center_y, scaled_center_x = (math.floor(scaled_img.shape[0]/2),
                                                math.floor(scaled_img.shape[1]/2))
        except:
            warnings.warn('{}\' window is to small to use for scale {} at resulting size'
            log.warning('{}\' window is to small to use for scale {} at resulting size'
                          .format((y, x), best_scales[y, x], scaled_img.shape))
            rafi_alpha_means[i] = np.negative(np.ones(len(alpha_list)))
            continue
@@ -365,7 +375,7 @@ def rafi(template, search_image, candidate_pixels, best_scales, thresh=95,
    best_rotation = best_rotation[rafi_mask]

    if sg_candidate_points.size == 0:
        warnings.warn('Second filter Rafi returned empty set.')
        log.warning('Second filter Rafi returned empty set.')

    if verbose: # pragma: no cover
        plt.imshow(image_pixels, interpolation='none')
@@ -441,37 +451,45 @@ def tefi(template, search_image, candidate_pixels, best_scales, best_angles,
    # check all inputs for validity, probably a better way to do this

    if search_image.shape < template.shape:
        log.error('Error:: template image size is smaller than searched image size')
        raise ValueError('Template Image is smaller than Search Image for template of'
                         'size: {} and search image of size: {}'
                         .format(template.shape, search_image.shape))

    candidate_pixels = np.asarray(candidate_pixels)
    if not candidate_pixels.size or not np.any(candidate_pixels):
        log.error('Error: pixel list is empty')
        raise ValueError('cadidate pixel list is empty')

    best_scales = np.asarray(best_scales, dtype=np.float32)
    if not best_scales.size or not np.any(best_scales):
        log.error('Error: best scale list is empty')
        raise ValueError('best_scale list is empty')

    if best_scales.shape != search_image.shape:
        log.error('Error: search image and scale are not of the same shape')
        raise ValueError('Search image and scales must be of the same shape '
                         'got: best scales shape: {}, search image shape: {}'
                         .format(best_scales.shape, search_image.shape))

    best_angles = np.asarray(best_angles, dtype=np.float32)
    if not best_angles.size or not np.any(best_angles):
        log.error('Error: input best angle list is empty')
        raise ValueError('Input best angle list is empty')

    best_scales = np.asarray(best_scales, dtype=float)
    if not best_scales.size or not np.any(best_scales):
        log.error('Error: input best scale list is empty')
        raise ValueError('Input best_scales list is empty')

    if thresh < -1. or thresh > 1. and not use_percentile:
        log.error(f'Error: threshold out of range')
        raise ValueError('Thresholds must be in range [-1,1] when not using percentiles. Got: {}'
                         .format(thresh))

    # Check inputs
    if upsampling < 1:
        log.error('Error: upsampling not >= 1')
        raise ValueError('Upsampling must be >= 1, got {}'.format(upsampling))

    tefi_coeffs = np.zeros(candidate_pixels.shape[0])
+4 −2
Original line number Diff line number Diff line
import warnings
import logging

import numpy as np
import pandas as pd
@@ -8,6 +8,8 @@ import cv2
FLANN_INDEX_KDTREE = 1  # Algorithm to set centers,
DEFAULT_FLANN_PARAMETERS = dict(algorithm=FLANN_INDEX_KDTREE, trees=3)

log = logging.getLogger(__name__)

def match(edge, k=2, **kwargs):
    """
    Given two sets of descriptors, utilize a FLANN (Approximate Nearest
@@ -208,7 +210,7 @@ class FlannMatcher(object):
                                    qid,
                                    j.distance))
                else:
                    warnings.warn('Likely self neighbor in query!')
                    log.warning('Likely self neighbor in query!')
        return pd.DataFrame(matched, columns=['source_image', 'source_idx',
                                              'destination_image', 'destination_idx',
                                              'distance']).astype(np.float32)
+4 −3
Original line number Diff line number Diff line
from collections import deque
import math
import warnings
import logging

import numpy as np
import pandas as pd

log = logging.getLogger(__name__)

def distance_ratio(edge, matches, ratio=0.8, single=False):
    """
@@ -112,7 +113,7 @@ def spatial_suppression(df, bounds, xkey='x', ykey='y', k=60, error_k=0.05, nste
        # Binary search
        mid_idx = int((min_idx + max_idx) / 2)
        if min_idx == mid_idx or mid_idx == max_idx:
            warnings.warn('Unable to optimally solve.')
            log.warning('Unable to optimally solve.')
            process = False
        else:
            # Setup to store results
@@ -160,7 +161,7 @@ def spatial_suppression(df, bounds, xkey='x', ykey='y', k=60, error_k=0.05, nste
            max_idx = mid_idx
            if max_idx == 0:
                process = False
                warnings.warn('Unable to retrieve {} points. Consider reducing the amount of points you request(k)'.format(k))
                log.warning('Unable to retrieve {} points. Consider reducing the amount of points you request(k)'.format(k))
            if min_idx == max_idx:
                process = False
        elif len(result) > k + k * error_k:
+6 −4
Original line number Diff line number Diff line
@@ -24,7 +24,9 @@ from autocnet.transformation import roi
from autocnet.matcher.subpixel import geom_match_simple
from autocnet.utils.utils import bytescale

import warnings
import logging

log = logging.getLogger(__name__)

def generate_ground_points(Session, ground_mosaic, nspts_func=lambda x: int(round(x,1)*1), ewpts_func=lambda x: int(round(x,1)*4), size=(100,100)):
    """
@@ -53,7 +55,7 @@ def generate_ground_points(Session, ground_mosaic, nspts_func=lambda x: int(roun
    if isinstance(ground_mosaic, str):
        ground_mosaic = GeoDataset(ground_mosaic)

    warnings.warn('This function is not well tested. No tests currently exist \
    log.warning('This function is not well tested. No tests currently exist \
    in the test suite for this version of the function.')

    session = Session()
@@ -377,7 +379,7 @@ def propagate_control_network(Session,
               and cartesian) of successfully propagated points

    """
    warnings.warn('This function is not well tested. No tests currently exist \
    log.warning('This function is not well tested. No tests currently exist \
    in the test suite for this version of the function.')

    match_func = check_match_func(match_func)
@@ -440,7 +442,7 @@ def propagate_control_network(Session,
        res = session.query(Points).filter(spatial_intersects).all()

        if len(res) > 1:
            warnings.warn(f"There is more than one point at lon: {lon}, lat: {lat}")
           log.warning(f"There is more than one point at lon: {lon}, lat: {lat}")

        elif len(res) == 1:
            # update existing point with new measures
+4 −2
Original line number Diff line number Diff line
import warnings
import logging

log = logging.getLogger(__name__)

try:
    import cudasift as cs
@@ -12,7 +14,7 @@ def extract_features(array, nfeatures=None, **kwargs):
    if not nfeatures:
        nfeatures = int(max(array.shape) / 1.25)
    else:
        warnings.warn('NFeatures specified with the CudaSift implementation.  Please ensure the distribution of keypoints is what you expect.')
        log.warning('NFeatures specified with the CudaSift implementation.  Please ensure the distribution of keypoints is what you expect.')

    siftdata = cs.PySiftData(nfeatures)
    cs.ExtractKeypoints(array, siftdata, **kwargs)
Loading