Unverified Commit e54ccf36 authored by Kelvin Rodriguez's avatar Kelvin Rodriguez Committed by GitHub
Browse files

Updated docs for change detection code (#550)

* added topo generation code

* update cd docs

* updated rv docsstringm, as it totally existed this whole time

* addressed comments
parent 675b946f
Loading
Loading
Loading
Loading
+83 −23
Original line number Diff line number Diff line
@@ -33,6 +33,25 @@ from autocnet.matcher.cpu_extractor import extract_features
from autocnet import cg

def image_diff(arr1, arr2):
    """
    Diff two images but accounts for null pixels. Intended to be used with change 
    detection algorithms.  

    Parameters 
    ----------

    arr1 : np.ndarray
           2D numpy array containing the first image, must have the same shape as arr2

    arr2 : np.ndarray 
           2D numpy array containing the second image, must have the same shape as arr1

    Returns
    -------

    : np.ndarray 
      new diffed image 
    """
    arr1 = arr1.astype("float32")
    arr2 = arr2.astype("float32")
    arr1[arr1 == 0] = np.nan
@@ -49,6 +68,25 @@ def image_diff(arr1, arr2):


def image_ratio(arr1, arr2):
    """
    Gets the ratio of two images but accounts for null and zero pixels. Intended to be used with change 
    detection algorithms.  

    Parameters 
    ----------

    arr1 : np.ndarray
           2D numpy array containing the first image, must have the same shape as arr2

    arr2 : np.ndarray 
           2D numpy array containing the second image, must have the same shape as arr1

    Returns
    -------

    : np.ndarray 
      new image containing ratioed pixels
    """
    arr1 = arr1.astype("float32")
    arr2 = arr2.astype("float32")
    arr1[arr1 == 0] = np.nan
@@ -65,6 +103,25 @@ def image_ratio(arr1, arr2):


def image_diff_sq(arr1, arr2):
     """
     Diff two images but accounts for null pixels then squares the result. Intended to be used with change 
     detection algorithms.  

     Parameters 
     ----------

     arr1 : np.ndarray
            2D numpy array containing the first image, must have the same shape as arr2

     arr2 : np.ndarray 
            2D numpy array containing the second image, must have the same shape as arr1

     Returns
     -------
     
     : np.ndarray 
       new image containing diffed pixels
     """
     return image_diff(arr1, arr2)**2


@@ -473,7 +530,10 @@ def blob_detector(image1, image2, sub_solar_azimuth, image_func=image_diff_sq,

def rv_detector(im1, im2, search_size, pattern_size=None, threshold=.999):
    """
    RV coefficient based change detection.
    RV coefficient based change detection. This computes an RV coefficient on a sliding window 
    and correlates low scores below the input threshold to expected change.  
    
    **WARNING*: The time complexity for this is `1 + (search_size - pattern_size))^2` per overlapping pixel between im1 and im2. So larger the differemce between the search and pattern size, it causes compute time to increase exponentially. 

    Parameters
    ----------
@@ -492,14 +552,14 @@ def rv_detector(im1, im2, search_size, pattern_size=None, threshold=.999):
    threshold : float
        The cutoff value for an RV value to be considered a change


    Returns 
    -------
    : pd.DataFrame
      A pandas dataframe containing a points of changed areas

    : np.ndarray
      A numpy array containing the RV values of each pixel.  Note that the array is
       padded by NaN values for 1/2 window size on each size

    """
    def get_window(arr, ulx, uly, size):
        return arr[ulx:ulx+size, uly:uly+size]
+285 −0
Original line number Diff line number Diff line
@@ -601,3 +601,288 @@ def import_func(func):
    module = importlib.import_module(module, package='autocnet')
    func = getattr(module, func)
    return func


def compute_depression(input_dem, scale_factor=1, curvature_percentile=75):
    """
    Compute depressions and return a new image with larges depressions filled in. 
    
    Parameters
    ----------
    
    input_dem : np.array, rd.rdarray
                2d array of elevation DNs, a DEM
    
    scale_factor : float
                   Value to scale the erotion of planform curvatures by
                   
    curvature_percentile : float 
                           what percentile of the curvature to keep, lower values
                           results in bigger blobs 
                   
    
    Returns
    -------
    dem : rd.rdarray
          Dem with filled depressions
    
    mask : np.array
           Change mask, true on pixels that have been changed 
    
    
    """
    if isinstance(input_dem, np.ndarray):
        dem = rd.rdarray(input_dem.copy(), no_data=0)
    elif isinstance(input_dem, rd.rdarray):
        # take ownership of the reference
        dem = input_dem.copy()

    # create filled DEM
    demfilled = rd.FillDepressions(dem, epsilon=True, in_place=False, topology="D8")
    
    # Mask out filled areas
    mask = np.abs(dem-demfilled)
    thresh = np.percentile(mask, 95)
    mask[mask <= thresh] = False
    mask[mask > thresh] = True
    
    curvatures = rd.TerrainAttribute(dem, attrib='planform_curvature')
    curvatures = (curvatures - np.min(curvatures))/np.ptp(curvatures) 
    curvatures[curvatures < np.percentile(curvatures, curvature_percentile)] = 0
    curvatures[mask.astype(bool)] = 0
    
    demfilled -= curvatures * scale_factor
    
    mask = (curvatures+mask).astype(bool)
    
    # Get 3rd nn distance 
    coords = np.argwhere(mask)
    nbrs = NearestNeighbors(n_neighbors=3, algorithm='kd_tree').fit(coords)
    dists, _ = nbrs.kneighbors(coords)
    eps = np.percentile(dists, 95)
    
    # Cluster
    db = DBSCAN(eps=eps, min_samples=3).fit(coords)
    labels = db.labels_
    unique, counts = np.unique(labels, return_counts=True)
    
    # First count are outliers, ignore
    counts = counts[1:]
    unique = unique[1:]
    
    index = np.argwhere(counts == counts.max())
    group = unique[index][0][0]
    cluster = coords[labels == group]
    
    # mask out depression
    dmask = np.full(dem.shape, False)
    dmask[[*cluster.T]] = True
    
    dem[dmask] = 0
    demfilled[~dmask] = 0
    dem = dem+demfilled

    return dem, dmask


def rasterize_polygon(shape, vertices, dtype=bool):
    """
    Simple tool to convert poly into a boolean numpy array.
    
    source: https://stackoverflow.com/questions/37117878/generating-a-filled-polygon-inside-a-numpy-array
    
    Parameters
    ----------
    
    shape : tuple 
            size of the array in (y,x) format
    
    vertices : np.array, list
               array of vertices in [[x0, y0], [x1, y1]...] format
    
    dtype : type
            datatype of output mask
    
    Returns
    -------
    
    mask : np.array
           mask with filled polygon set to true
    
    """
    def check(p1, p2, base_array):
        idxs = np.indices(base_array.shape) # Create 3D array of indices

        p1 = p1.astype(float)
        p2 = p2.astype(float)

        # Calculate max column idx for each row idx based on interpolated line between two points
        if p1[0] == p2[0]:
            max_col_idx = (idxs[0] - p1[0]) * idxs.shape[1]
            sign = np.sign(p2[1] - p1[1])
        else:
            max_col_idx = (idxs[0] - p1[0]) / (p2[0] - p1[0]) * (p2[1] - p1[1]) + p1[1]
            sign = np.sign(p2[0] - p1[0])
            
        return idxs[1] * sign <= max_col_idx * sign

    base_array = np.zeros(shape, dtype=dtype)  # Initialize your array of zeros

    fill = np.ones(base_array.shape) * True  # Initialize boolean array defining shape fill

    # Create check array for each edge segment, combine into fill array
    for k in range(vertices.shape[0]):
        fill = np.all([fill, check(vertices[k-1], vertices[k], base_array)], axis=0)
    
    print(fill.any())
    # Set all values inside polygon to one
    base_array[fill] = 1
    return base_array


def generate_dem(alpha=1.0, size=800, scales=[160,80,32,16,8,4,2,1], scale_factor=5):
    """
    Produces a random DEM
    
    Parameters
    ----------
    
    alpha : float 
            Controls height variation. Lower number makes a shallower and noisier DEM, 
            higher values create smoother DEM with large peaks and valleys. 
            Reccommended range = (0, 1.5]
    
    size : int
           size of DEM, output DEM is in the shape of (size, size)
    
    scale_factor : float 
                   Scalar to multiply the slope degradation by, higher values = more erosion.
                   Recommended to increase proportionately with alpha
                   (higher alphas mean you might want higher scale_factor)
    
    Returns 
    -------
    
    dem : np.array 
          DEM array in the shape (size, size)
    
    """
    
    topo=np.zeros((2,2))+random.rand(2,2)*(200/(2.**alpha))

    for k in range(len(scales)):
        nn = size/scales[k]
        topo = scipy.misc.imresize(topo, (int(nn), int(nn)), "cubic", mode="F")
        topo = topo + random.rand(int(nn), int(nn))*(200/(nn**alpha))
    
    topo = rd.rdarray(topo, no_data=0)
    
    curvatures = rd.TerrainAttribute(topo, attrib='slope_riserun')
    curvatures = (curvatures - np.min(curvatures))/np.ptp(curvatures) * scale_factor
    return topo - curvatures


def hillshade(img, azi=255, min_slope=20, max_slope=100, min_bright=0, grayscale=False):
    """
    hillshade a DEM, based on IDL code by Colin Dundas 
    
    Parameters
    ----------
    
    img : np.array
          DEM to hillshade
    
    azi : float 
          Sun azimuth 
    
    min_slope : float 
                minimum slope value 
    
    max_slope : float 
                maximum slope value 
    
    min_bright : float 
                 minimum brightness 
    
    grayscale : bool 
                whether or not to produce grayscale image 
    
    
    Returns
    -------
    
    dem : np.array 
          hillshaded DEM 
    
    """
    dem = np.array(np.flip(bytescale(img), axis = 0), dtype=int)
    emax = np.max(dem)
    emin = np.min(dem)

    indices = np.linspace(0, 255, 256) / 25.5

    red_array = [0,25,50,101,153,204,255,255,255,255,255,255]
    red_index = np.arange(len(red_array))
    red_vec = np.interp(indices, red_index, red_array)

    green_array = [42,101,153,204,237,255,255,238,204,153,102,42]
    green_index = np.arange(len(green_array))
    green_vec = np.interp(indices, green_index, green_array)

    blue_array = [255,255,255,255,255,255,204,153,101,50,25,0]
    blue_index = np.arange(len(blue_array))
    blue_vec = np.interp(indices, blue_index, blue_array)

    zz = (255.0/(emax-emin))*(dem-emin)
    zz = zz.astype(int)

    nx = (np.roll(dem, 1, axis = 1) - dem)
    ny = (np.roll(dem, 1, axis = 0) - dem)
    sz = np.shape(nx)
    nz = np.ones(sz)
    nl = np.sqrt(np.power(nx, 2.0) + np.power(ny, 2.0) + np.power(nz, 2.0))
    nx = nx/nl
    ny = ny/nl
    nz = nz/nl

    math.cos(math.radians(1))
    azi_rad = math.radians(azi)
    alt_rad = math.radians(alt)
    lx = math.sin(azi_rad)*math.cos(alt_rad)
    ly = math.cos(azi_rad)*math.cos(alt_rad)
    lz = math.sin(alt_rad)

    dprod = nx*lx + ny*ly + nz*lz

    if min_slope is not None:
        min_dprod = math.cos(math.radians(max_slope + 90.0 - alt))
    else:
        min_dprod = np.min(dprod)

    if max_slope is not None:
        max_dprod = math.cos(math.radians(90.0 - alt - max_slope))
    else:
        max_dprod = np.max(dprod)

    bright = ((dprod - min_dprod) + min_bright)/((max_dprod - min_dprod) + min_bright)

    if grayscale:
        qq=(255*bright)
    else:
        qq = red_vec[zz]*bright

    if grayscale:
        rr = (255*bright)
    else:
        rr = green_vec[zz]*bright

    if grayscale:
        ss=(255*bright)
    else:
        ss = blue_vec[zz]*bright

    arrforout = np.dstack((qq, rr ,ss))
    arrforout = np.flip(arrforout.astype(int), axis = 0)
    arrfotout = bytescale(arrforout)
    arrforout.shape
    return arrforout
+17 −1
Original line number Diff line number Diff line
@@ -59,7 +59,23 @@ def poly_pixel_to_latlon(poly, affine, coord_transform):

    return poly_type(lonlats)

description = """
Registers two image and runs a change detection algorithm on the pair of images. 

WARNING: Runs bundle adjust with update=yes, make sure you are using copies. 

Out is a .tif file and .csv file. The former is a GEOTIFF image, usually the diff image that change detection was run on. The latter .csv file contains polygon results as a WKT field in the CSV file. 

This script runs the images through several steps: 
    1. cubes are inputted into spiceinit, an ISIS program that gives us a spatially enabled level 0 image 
    2. Using Autocnet, we run a pair-wise feature extraction and registration pipeline to programmatically produce a pair-wise control network 
    3. jigsaw is then used with the image pair and output control network to perform relative geodetic control of the image pair and updating camera parameters 
    4. using the new camera pointing, the image pairs are projected onto each other
    5. The selected change detection algorithm is used on the projected images 
    6. Resulting image is written out as a geotiff file containing the diffed image and a csv file containing detected change results.  

See the Autocnet docs for detailed decriptions of each change deteciton algorithm. 
"""

if __name__ == "__main__":
    cd_function_help_string = ("Change detection algorithm to use.\n"
@@ -67,7 +83,7 @@ if __name__ == "__main__":
                               "Okubogar modified method (okbm). Experimental feature based change detection algorithm that expands on okobubogar to allow for programmatic change detection."
    )

    parser = argparse.ArgumentParser(description="Registers two image and runs a change detection algorithm on the pair of images. WARNING: Runs bundle adjust with update=yes, make sure you are using copies. Out is a .tif file and .csv file. The former is a GEOTIFF image, usually the diff image that change detection was run on. The latter .csv file contains polygon results as a WKT field in the CSV file.")
    parser = argparse.ArgumentParser(description=description)
    parser.add_argument('before', action='store', help='Path to image 1, generally the "before image"')
    parser.add_argument('after', action='store', help='Path to image 2, generally the "after image"')
    parser.add_argument('out', action='store', help='Output image path, csv with geometries are also written as a side cart file as a csv.')
+1 −0
Original line number Diff line number Diff line
@@ -51,6 +51,7 @@ import autocnet

# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = '1.3'
nbsphinx_allow_errors = True

# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
+10 −0
Original line number Diff line number Diff line
:mod:`cg.change_detection` --- Change Detection Algorithms
==========================================================

The :mod:`cg.change_detection` module provides change detection methods.

.. versionadded:: 0.1.0

.. automodule:: autocnet.cg.change_detection
   :synopsis: Pairwise Change Detection Algorithms
   :members:
 No newline at end of file
Loading