Commit c82ec480 authored by Claudio Gheller's avatar Claudio Gheller
Browse files

First commit

parent 93a55ae8
Loading
Loading
Loading
Loading
+166 −0
Original line number Original line Diff line number Diff line
import numpy as np
from scipy.ndimage import gaussian_filter
import math
import sys
import random
import matplotlib
import matplotlib.pyplot as plt
matplotlib.use("Agg")

def read_image(infile):

    with open(infile, 'rb') as f1:
       image = np.fromfile(f1, dtype=np.float64)

       ## normalize data
       #image = Rescaling(1/255, 0.0, "rescaling")(image)
       #image = np.clip(image/np.amax(image),0.0,None)
       print(np.amin(image),np.amax(image))

    return [np.array(image,dtype=np.float64)]

# 12/07/19
#refvalues = ([0.013034,0.024206,0.040278,0.077616,0.11809,0.182476,0.238336,0.297136,0.346822,0.419636,0.47138,0.575358,0.634354,0.708442,0.76097,0.873474,0.98833,1.16718,1.29801,1.404438,1.880032])
# 18/08/17
refvalues = ([0.0000, 0.0199, 0.0374, 0.0580, 0.0746, 0.0992, 1.1780, 0.1249, 0.1755, 0.2127, 0.2545, 0.3189, 0.4106, 0.5416, 0.7088])
# 21/01/20
#refvalues = ([1.0329, 1.2153, 1.4229, 0.1422, 1.8228, 0.2065, 0.2803, 0.0244, 0.3328, 0.4142, 0.5566, 0.6644, 0.0641, 0.8193, 0.0917])

#inputdir ='/m100_scratch/userexternal/nsanvita/temp/'
#inputdir ='/m100_scratch/userexternal/nsanvita/exp_data/180817_oldled/'
#inputdir ='/m100_scratch/userexternal/nsanvita/exp_data/180817_oldled_10mm_64px/'
inputdir = '/m100_scratch/userexternal/nsanvita/exp_data/new_led_21-01-2020/'
#inputdir = '/m100_scratch/userexternal/nsanvita/exp_data/180817_oldled_10mm_16px/'
#inputdir = '/m100_scratch/userexternal/nsanvita/exp_data/180817_oldled_10mm_128px/'
for img in range(1,16):
 infile = inputdir+'particleone_synth_'+str(img)+'.bin'
 print(infile)
 numberofimages = 1
 resolution_y = 32
 resolution_x = 32

 if resolution_x == 32:
    edge_radius = 15.5
    edge_brightness = 0.25
##new 1
#    sigmanoise = 0.090
#    background = -0.2
#    sigmabackground = 0.0
#version 4
    sigmanoise = 0.055
    background = 0.2
    sigmabackground = 0.05
#    
    writeimages = 1
    remove_external_values = 1
    sigmablur = 1.0
    fth = 0.25
 if resolution_x == 64:
    edge_radius = 31
    edge_brightness = 0.25
    sigmanoise = 0.080
    sigmablur = 1.2
    background = -0.2
    sigmabackground = 0.0
    writeimages = 1
    remove_external_values = 1
    fth = 0.25
 if resolution_x == 16:
    edge_radius = 7.5
    edge_brightness = 0.25
    sigmanoise = 0.025
    sigmablur = 0.75
    background = -0.2
    sigmabackground = 0.0
    writeimages = 1
    remove_external_values = 1
    fth = 0.25
 if resolution_x == 128:
    edge_radius = 62
    edge_brightness = 0.25
    sigmanoise = 0.095
    sigmablur = 2.0
    background = -0.2
    sigmabackground = 0.0
    writeimages = 1
    remove_external_values = 1
    fth = 0.25

 images = np.zeros((numberofimages,resolution_y,resolution_x))
 images_orig = np.zeros((numberofimages,resolution_y,resolution_x))

 image = read_image(infile)

 index = 0
 images[index,:,:] = np.reshape(image,(resolution_y,resolution_x))
 images_orig[index,:,:] = images[index,:,:]
 refvalue = refvalues[index]

        # Add edge
 if edge_radius > 0:
           for ii in range(resolution_y):
                for jj in range(resolution_x):
                    dist = math.sqrt((ii-0.5*resolution_y)**2 + (jj-0.5*resolution_x)**2)
                    if dist <= edge_radius and dist >= edge_radius-1:
                       edge_int = random.uniform(edge_brightness-0.1,edge_brightness+0.1)
                       images[index,ii,jj] = edge_int
        # Remove edge
 if edge_radius < 0:
           edge_radius_abs = abs(edge_radius)
           for ii in range(resolution_y):
                for jj in range(resolution_x):
                    dist = math.sqrt((ii-0.5*resolution_y)**2 + (jj-0.5*resolution_x)**2)
                    if dist > edge_radius_abs:
                       images[index,ii,jj] = 0.0

        # Add gaussian noise
 if sigmanoise > 0:
            dirtyimg = np.empty_like(images[index,:,:])
            dirtyimg = np.random.normal(loc=0.0, scale=sigmanoise, size=dirtyimg.shape)
            images[index,:,:] = np.clip(dirtyimg[:,:] + images[index,:,:], 0, 1)
            ###images[index,:,:] = images[index,:,:]+background

        # Add background
 if background > 0.0:
            background0  = (background/fth)*refvalue
            if refvalue > fth:
             background0  = background
            dirtyimg = np.random.normal(loc=background0 , scale=sigmabackground, size=dirtyimg.shape)
            images[index,:,:] = np.clip(dirtyimg[:,:] + images[index,:,:], 0, 1)

        # Add gaussian blur
 if sigmablur > 0:
            dirtyimg = np.empty_like(images[index,:,:])
            gaussian_filter(input=images[index,:,:],sigma=sigmablur,output=dirtyimg,mode='constant',cval=0.0,truncate=4.0)
            print("max ",np.max(images[index,:,:]),np.max(dirtyimg))
            images[index,:,:] = dirtyimg

 if remove_external_values == 1:
            edge_radius_abs = abs(edge_radius)+1
            for ii in range(resolution_y):
                for jj in range(resolution_x):
                    dist = math.sqrt((ii-0.5*resolution_y)**2 + (jj-0.5*resolution_x)**2)
                    if dist > edge_radius_abs:
                       images[index,ii,jj] = 0.0

# write bin image
 infile = "bin/noise_newback_vs4_"+str(img)+".bin"
 f1 = open(infile, 'wb')
 f1.write(images[index,:,:])
 f1.close()

# write png images
 if writeimages == 1:
            dpi = 1
            height = images[index,:,:].shape[0]
            width = images[index,:,:].shape[1]
            fig = plt.figure(figsize=(width, height),dpi=dpi)
            ax = fig.add_axes([0, 0, 1, 1])
            ax.axis('off')
            ax.imshow(images[index,:,:], cmap='gray', interpolation='none')
            testfilename = "images/noise"+str(img)+".png"
            fig.savefig(testfilename,dpi=dpi)
            ax.imshow(images_orig[index,:,:], cmap='gray', interpolation='none')
            testfilename = "images/orig"+str(img)+".png"
            fig.savefig(testfilename,dpi=dpi)

Compare/balance.py

0 → 100644
+38 −0
Original line number Original line Diff line number Diff line
import numpy as np
import math
import sys


numberofcontacts = 3
image_set = 7
valuesf = np.zeros((image_set, numberofcontacts))
valuesb = np.zeros((image_set, numberofcontacts))

infile = 'test-force.txt' 
f = open(infile,'r')
for i in range(image_set):
    line = f.readline()
    valstring = line.split()
    for j in range(numberofcontacts):
        valuesf[i,j] = float(valstring[j])
    line = f.readline()
f.close()

infile = 'test-beta.txt'
f = open(infile,'r')
for i in range(image_set):
    line = f.readline()
    valstring = line.split()
    for j in range(numberofcontacts):
        valuesb[i,j] = float(valstring[j])
    line = f.readline()
f.close()

for i in range(image_set):
    resx = 0.0
    resy = 0.0
    for j in range(numberofcontacts):
        resx = resx+valuesf[i,j]*math.cos(valuesb[i,j])
        resy = resy+valuesf[i,j]*math.sin(valuesb[i,j])
    print(resx,resy)

Compare/cnnmodel.py

0 → 100644
+92 −0
Original line number Original line Diff line number Diff line
# import the necessary packages
from tensorflow.keras.layers import BatchNormalization
from tensorflow.keras.layers import MaxPooling2D
from tensorflow.keras.layers import Dropout
from tensorflow.keras.layers import Conv2D
from tensorflow.keras.layers import Conv2DTranspose
from tensorflow.keras.layers import LeakyReLU
from tensorflow.keras.layers import Activation
from tensorflow.keras.layers import Flatten
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import Reshape
from tensorflow.keras.layers import Input
from tensorflow.keras.models import Model
from tensorflow.keras.layers import UpSampling2D
from tensorflow.keras.layers import add
from tensorflow.keras import backend as K
from tensorflow.keras.optimizers import Adam
import numpy as np
from inputparameters import *

class CNN:
        @staticmethod
        def build(width, height, depth, filters=(32, 64), eta=0.7):

                padd = "same"
                #padd = "valid"
         
		# initialize the input shape to be "channels last" along with
		# the channels dimension itself
		# channels dimension itself
                inputShape = (height, width, depth)
                chanDim = -1

		# define the input to the CNN 
                inputs = Input(shape=inputShape)
                x = inputs

		# loop over the number of filters
                index = 0
                for f in filters:
                        if index == 0:
                          # conv2D may be changed to SeparableConv2D
                          x = Conv2D(f, (3, 3), activation="relu", strides=1, padding=padd)(x)
                          x = BatchNormalization(axis=chanDim)(x)
                          previous_block_activation = x
                        else:
                          # apply a CONV => RELU => POOLING => BN operation
                          # first convolutional layer
                          x = Conv2D(f, (3, 3), activation="relu", strides=1, padding=padd)(x)
                          x = BatchNormalization(axis=chanDim)(x)
                          ### second convolutional layer
                          ##x = Conv2D(f, (3, 3), activation="relu", strides=1, padding=padd)(x)
                          ##x = BatchNormalization(axis=chanDim)(x)
                          # Pooling
                          x = MaxPooling2D((2, 2), padding=padd)(x)
                          x = Dropout(eta)(x)
                          # Project residuals
                          residual = Conv2D(f, 1, strides=2, padding=padd)(previous_block_activation)
                          x = add([x, residual])
                          previous_block_activation = x

                        index = index+1

                # flatten the data array
                x = Flatten()(x)

                # fully connected layers
                units = 64
                activation = "relu"
                x = Dropout(eta)(x)
                x = Dense(units, activation=activation)(x)
                units = 32
                activation = "relu"
                x = Dropout(eta)(x)
                x = Dense(units, activation=activation)(x)
                units = 16
                activation = "relu"
                x = Dropout(eta)(x)
                x = Dense(units, activation=activation)(x)

                # output layer 
                Noutputs = numberofcontacts
                activation = "linear"#"softmax"
                x = Dropout(eta)(x)
                outputs = Dense(Noutputs, activation=activation)(x)

                cnn = Model(inputs, outputs, name="autoencoder")
                opt = Adam(lr=eta)
                cnn.compile(optimizer=opt, loss="mean_squared_error",metrics=["mean_squared_error"])#metrics=["accuracy"])

		# return the autoencoder
                return (cnn)
+91 −0
Original line number Original line Diff line number Diff line
import numpy as np
import matplotlib.pyplot as plt

import tensorflow as tf
config = tf.compat.v1.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.compat.v1.Session(config=config)
from tensorflow import keras
from inputparameters import *
from  load_data_Images import *
import glob
import math
import sys
import time
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.wrappers.scikit_learn import KerasRegressor
from cnnmodel import *
import argparse

parser = argparse.ArgumentParser()
parser.add_argument('--hyper_file', type=str, help='Read the hyperparameter file')
args = parser.parse_args()
restartfile_evaluate = args.hyper_file
print(restartfile_evaluate)

go = 1
time0 = time.time()
if go == 1:

     # Load data
     trainXNoisy, trainsize = load_data_Images(seed,comm,0)
     trainXNoisy = np.expand_dims(trainXNoisy, axis=-1)
     timemodel1 = time.time()

     # Load network
     cnn = keras.models.load_model(restartfile_evaluate)
     cnn.summary()
     timemodel2 = time.time()
     timemodel = timemodel2-timemodel1

     # Calculate forces
     timeeval1 = time.time()
     y_pred = cnn.predict(trainXNoisy)
     timeeval2 = time.time()
     timeeval = timeeval2-timeeval1

     #regressor = KerasRegressor(build_fn=cnn)
     #regressor.fit(trainXNoisy, trainX)
     #predicted_values = regressor.predict(trainXNoisy)

     # Error statistics
     if startcontact == 2:
      err = 0.0
      err_abs = 0.0
      f = open('test.txt','w')
      for j in range(y_pred.shape[0]):
        for i in range(y_pred.shape[1]):
             f.write(str(j)+' '+str(i)+' '+str(y_pred[j,i])+'\n')
        if refvalues[j] > 0.0:
           print("Disk %3d:\t%5.3f\t%5.3f\t%5.3f\t%5.3f\t%5.3f" % (j,refvalues[j],y_pred[j,0],y_pred[j,1],y_pred[j,0]-refvalues[j],abs(y_pred[j,0]-refvalues[j])/refvalues[j]))
           err = err + abs(y_pred[j,0]-refvalues[j])/refvalues[j]
           err_abs = err_abs + abs(y_pred[j,0]-refvalues[j])
        else:
           print("Disk %3d:\t%5.3f\t%5.3f\t%5.3f\t%5.3f\t%5.3f" % (j,refvalues[j],y_pred[j,0],y_pred[j,1],y_pred[j,0]-refvalues[j],abs(y_pred[j,0]-refvalues[j])))

        f.write(' \n')
      f.close()
     
     if startcontact == 3:
      err = 0.0
      err_abs = 0.0
      print("Disk id\t\tRef\tCalc\terror\trel err")
      for j in range(y_pred.shape[0]):
        #print(y_pred[j,0], y_pred[j,1], y_pred[j,2])
        f1 = max([y_pred[j,0], y_pred[j,1], y_pred[j,2]])
        if refvalues[j] > 0.0:
          print("Disk %3d:\t%5.3f\t%5.3f\t%5.3f\t%5.3f" % (j,refvalues[j],f1,f1-refvalues[j],abs(f1-refvalues[j])/refvalues[j]))
          err = err + abs(f1-refvalues[j])/refvalues[j]
          err_abs = err_abs + abs(f1-refvalues[j])
        else:
          print("Disk %3d:\t%5.3f\t%5.3f\t%5.3f\t%5.3f" % (j,refvalues[j],f1,f1-refvalues[j],abs(f1-refvalues[j])))


     err = err/(y_pred.shape[0]-1)
     err_abs = err_abs/(y_pred.shape[0]-1)
     print()
     print('Average percentage error:',err)
     print('Average absolute error:',err_abs)


     print("Timings: ",timemodel,timeeval)
+86 −0
Original line number Original line Diff line number Diff line
# Set verbosity
verbose = 1
# Define global input parameters:
seed = 1111
comm = 0 

# Data files
imagepath = '/m100_scratch/userexternal/nsanvita/Disks_paper2/expdata/20mm_clean_190323/'
fileroot_image = "particleone_s_"
numberoffiles = 22
refvalues = ([0.000, 0.031, 0.044, 0.081, 0.115, 0.157, 0.220, 0.258, 0.331, 0.402, 0.457, 0.559, 0.653, 0.775, 0.843, 0.973, 1.106, 1.326, 1.479, 1.725, 1.859, 1.963])

# Dataset properties
startcontact = 3
endcontact = 3
#numberofcontacts = 6

# Image properties
resolution_x = 64
resolution_y = 64

USE_MPI = 0
sigmanoise = 0.0#0.090
sigmablur = 0
# beta
writeimages = 1
remove_edge_radius = 29.0
#alpha
f_knee = 0.40

# generic calibration
# calibration for f <= f_knee
calibration_factor_low = 0.87
background_low = 0.0
# calibration for f > f_knee
calibration_factor_high = 1.15
background_high = 0.0

## 18/08/17 oldled (15 particles)
## calibration for f <= f_knee
#calibration_factor_low = 1.29
#background_low = -0.09
## calibration for f > f_knee
#calibration_factor_high = 1.29
#background_high = -0.09

## 12/07/19 (21 particles)
## calibration for f <= f_knee
#calibration_factor_low = 0.50
#background_low = -0.015
## calibration for f > f_knee
#calibration_factor_high = 1.175
#background_high = -0.30

## 21/01/20 (15 particles) OLD
## calibration for f <= f_knee
#calibration_factor_low = 0.71
#background_low = -0.069
## calibration for f > f_knee
#calibration_factor_high = 1.33
#background_high = -0.349

## 21/01/20 (15 particles) NEW
## calibration for f <= f_knee
#calibration_factor_low = 0.864
#background_low = -0.1
##calibration for f > f_knee
#calibration_factor_high = 1.34
#background_high = -0.35

## 2/10/22 (10 particles) synt
## calibration for f <= f_knee
#calibration_factor_low = 1.0#1.072
#background_low = 0.0#-0.0276
## calibration for f > f_knee
#calibration_factor_high = 1.0
#background_high = 0.0

## gabriella_10mm_loading1 (18 particles) - cal1
## calibration for f <= f_knee
#calibration_factor_low = 0.775
#background_low = -0.061
## calibration for f > f_knee
#calibration_factor_high = 1.108
#background_high = -0.122
Loading