Commit 6f30f5bc authored by Marco Molinaro's avatar Marco Molinaro
Browse files

uncommited changes found on production server (vlkb.ia2.inaf.it)

parent b4037861
Loading
Loading
Loading
Loading
+3 −2
Original line number Diff line number Diff line
FROM registry.fedoraproject.org/fedora:latest
RUN dnf -y update &&\
    dnf -y install httpd python3-mod_wsgi pip &&\
    dnf -y install systemd httpd python3-mod_wsgi pip &&\
    dnf clean all &&\
    pip install pandas tables Pyro4
COPY wsgi.conf /etc/httpd/conf.d/
EXPOSE 80
ENTRYPOINT /usr/sbin/httpd -DFOREGROUND
# ENTRYPOINT /usr/sbin/httpd -DFOREGROUND
ENTRYPOINT /bin/bash

+7 −5
Original line number Diff line number Diff line
#!/usr/bin/env python3

from urllib.parse import unquote
import pandas as pd

import time
def query_out(parameters):
   t1=time.time()
   parsequery=parameters.replace(' ', '')
   query1=parsequery.replace('%27', '')
   
   query_final=unquote(query1)
   table=pd.read_hdf('/sed-data/sedmodels.h5')
   myquery=table.query(query_final)
   dataset=pd.read_csv('/var/www/wsgi-scripts/sim_total.dat', sep =' ')
   myquery=dataset.query(query_final)
   t2=time.time()
   print(t2-t1)

   return myquery
+337 −22
Original line number Diff line number Diff line
@@ -5,36 +5,353 @@ Created on Fri Mar 4 15:06:40 2022

@author: smordini
"""

import Pyro4

import Pyro4.util
import socket
from urllib.parse import unquote

import pandas as pd
@Pyro4.expose
import logging
from idl_to_py_lib import *
import numpy as np
import sys

sys.excepthook = Pyro4.util.excepthook

root_logger= logging.getLogger()
root_logger.setLevel(logging.DEBUG) # or whatever
handler = logging.FileHandler('/home/sedmods/vlkb-sedmods/sedfit_error.log', 'a', 'utf-8') # or whatever
formatter = logging.Formatter('%(asctime)s %(levelname)-8s %(message)s') # or whatever
handler.setFormatter(formatter) # Pass handler as a parameter, not assign
root_logger.addHandler(handler)
#logging.basicConfig(filename='/home/sedmods/vlkb-sedmods/sedfit_error.log', encoding='utf-8', level=logging.DEBUG, format='%(asctime)s %(levelname)-8s %(message)s', datefmt='%Y-%m-%d %H:%M:%S')

class QueryMaker(object):


    dataset=pd.read_hdf('/sed-data/sedmodels.h5')

if socket.gethostname().find('.')>=0:
    hostname=socket.gethostname()
else:
    hostname=socket.gethostbyaddr(socket.gethostname())[0] + ".local"



@Pyro4.expose

class QueryMaker(object):
    dataset=pd.read_csv('/var/www/wsgi-scripts/sim_total.dat', sep =' ')
    def query_out(self, parameters):
        dataset=pd.read_hdf('/sed-data/sedmodels.h5')
        try:
            parsequery=parameters.replace(' ', '')
            query1=parsequery.replace('%27', '')
     
            query_final=unquote(query1)
        myquery=QueryMaker.dataset.query(query1)
            query_log=query_final.replace('_', ' ')
            query_list=query_final.split('_')
            if len(query_list)==0:
                output='Deamon service running correctly'
                return output
            w_in=eval(query_list[0])
            f_in=eval(query_list[1])
            df_in=eval(query_list[2])
            fflag_in=eval(query_list[3])
            distance=eval(query_list[4])
            prefilter_thresh=eval(query_list[5])
            sed_weights=eval(query_list[6])
            local=eval(query_list[7])
            use_wl=eval(query_list[8])
            delta_chi2=eval(query_list[9])
        except:
            logging.exception('Error occurred in reading/importing function parameters')
        logging.info(query_log)
        if distance<0:
            logging.error('Negative value for distance; program interrupted')
            mystr='The distance is set to a negative value. Please provide a positive value.'
            return mystr
            
        phys_par=[tt.upper() for tt in ['clump_mass','compact_mass_fraction','clump_upper_age','dust_temp','bolometric_luminosity','random_sample','n_star_tot','m_star_tot','n_star_zams','m_star_zams','l_star_tot','l_star_zams','zams_luminosity_1','zams_mass_1','zams_temperature_1','zams_luminosity_2','zams_mass_2','zams_temperature_2','zams_luminosity_3','zams_mass_3','zams_temperature_3']]
        jy2mjy=1000.
        d_ref=1000.
        ref_wave=[3.4,3.6,4.5,4.6,5.8,8.0,12.,22.,24.0,70.,100.,160.,250.,350.,500.,870.,1100.]
        col_names=['WISE1','I1','I2','WISE2','I3','I4','WISE3','WISE4','M1','PACS1','PACS2','PACS3','SPIR1','SPIR2','SPIR3','LABOC','BOL11']

        test1=myquery.to_json(orient='split')
        test2=str(test1)
        output=bytes(test2,'utf-8')
        return output        
        fac_resc=(distance/d_ref)**2.
        delta=1-(prefilter_thresh)
        q12,q21=match(w_in, ref_wave)

        w=[w_in[i] for i in q12]
        f=[f_in[i] for i in q12]
        d=[df_in[i] for i in q12]
        ff=[fflag_in[i] for i in q12]

        wwll=[i for i in w]
        w.sort()
        wl=[]
        fl=[]
        df=[]
        fflag=[]
        use_wave=[]
        for ww in w:
            q=wwll.index(ww)
            wl.append(wwll[q])
            fl.append(f[q])
            df.append(d[q])
            fflag.append(ff[q])
            use_wave.append(use_wl[q])

        par_str=''
        par_str_arr=[]
        ret_par_str=''
        phys_str=''
        phys_par_arr=[]
        ret_phys_par='' 
        for t in range(len(ref_wave)):
            for k in wl:
                if ref_wave[t]-0.05<k and ref_wave[t]+0.05>k:
                    par_str=par_str+col_names[t]+','
                    ret_par_str=ret_par_str+col_names[t]+','
                    par_str_arr.append(col_names[t].lower())

        par_str=par_str[:-1].upper()
        ret_par_str=ret_par_str[:-1].lower()
        for k in range(len(phys_par)):
            phys_str=phys_str+phys_par[k]+','
            ret_phys_par=ret_phys_par+phys_par[k]+','
            phys_par_arr.append(phys_par[k].lower())
        phys_str=phys_str[:-1].upper()
        ret_phys_par=ret_phys_par[:-1].lower()
        if use_wave!=0:
            query=""
            for bb, bb_count in zip(use_wave, range(len(use_wave))):
                if bb in wl:
                    qband=wl.index(bb)
                else:
                    logging.error('Reference wavelength required: '+str(wl.index(bb))+' not found in data file; wavelength excluded from fit procedure')
                    continue
                if bb in ref_wave:
                    qrefband=ref_wave.index(bb)
                else:
                    qrefband=-1
                qqueryband,qdummy=match(ref_wave, wl)
                ul_str=''
                if 0 in fflag:
                    qulband=[i for i,e in enumerate(fflag) if e==0 ]
                    nqulband=fflag.count(0)
                    ul_str=' and '
                    for t in range(nqulband):
                        ul_str=ul_str+'('+col_names(qqueryband(qulband(t)))+"<'"+tostring(fl(qulband(t))*jy2mjy*fac_resc)+"') and "
                    if fflag[qband]==1:
                        ul_str=ul_str[0:len(ul_str)-4]
                    if fflag[qband]==0:
                        ul_str=ul_str[4:len(ul_str)-4]
                nreq_par=1+len(phys_par_arr)+len(par_str_arr)
                if fflag[qband]==1:
                    query=query+(str(remove_char(pad0_num(tostring(float(fl[qband]*jy2mjy*fac_resc*(1-(delta**2.))))),'+')+"<"+col_names[qrefband]+"<"+remove_char(pad0_num(tostring(float(fl[qband]*jy2mjy*fac_resc*(1+(delta**2.))))),'+')+' or ') )
                if fflag[qband]==0:
                    query=query+(str(col_names[qrefband]+"<"+remove_char(pad0_num(tostring(float(fl[qband]*jy2mjy*fac_resc))),'+'))+' or ')#fluxes are mutliplied by 1000 because model fluxes are in milliJy  
            query_final=query[:-4]
            try:
                dmodels=QueryMaker.dataset.query(query_final)
            except:
                logging.exception('Error occurred while querying dataset with band intervals')
        else:
            #compute object luminosity from observed SED
            lum=lbol(wl,fl,distance)
            #rescaling factor has to stay at 1 if luminosity is used-----WHYYYY ??????
            fac_resc=1.
            query_fianl=remove_char(pad0_num(tostring(float(lum*fac_resc*(1-(delta**2.))))),'+')+"< bolometric_luminosity <"+remove_char(pad0_num(tostring(float(lum*fac_resc*(1+(delta**2.))))),'+')
            try:
                dmodels=QueryMaker.dataset.query(query_final)
            except:
                logging.exception('Error occurred while querying dataset with bolometricl uminosity interval')
        
daemon=Pyro4.Daemon()
        nlines=len(dmodels)
        if nlines<1:
            logging.info('Model selection not performed. Program interrupted')
            output='Model selection not performed. Retry with different paramenters'
            return output
        else:
            logging.info('Model selection completed, obtained '+str(nlines)+' models.')
        n_sel=len(dmodels['cluster_id'])
        
        flag=[int(ff) for ff in fflag]
        ul_flag=np.zeros(len(flag))
        ll_flag=np.zeros(len(flag))
        dyd=np.zeros(len(flag))
        if sed_weights==0:
            sed_weights=[3./7.,1./7.,3./7.] 
        renorm=sum(sed_weights)
        w1=np.sqrt(sed_weights[0]/renorm)
        w2=np.sqrt(sed_weights[1]/renorm)
        w3=np.sqrt(sed_weights[2]/renorm)
        qmir=[]
        qfir=[]
        qmm=[]
        for i in range(len(wl)):
            if wl[i]<25:
                qmir.append(i)
            if wl[i]>=25 and wl[i]<=250:
                qfir.append(i)
            if wl[i]>250:
                qmm.append(i)
        if len(qmir)>0:
            q1=[]
            q1neg=[]
            for qq in qmir:
                if flag[qq]==1:
                    q1.append(qq)
                else:
                    q1neg.append(qq)
            nq1neg=len(q1neg)
            nq1=len(q1)
            if nq1>0:
                for qq in qmir:
                    dyd[qq]=np.sqrt(nq1)/w1
            if nq1neg>0:
                for qq in q1neg:
                    dyd[qq]=9999.  #i.e. it's an upper/lower limit
                    ul_flag[qq]=1
        if len(qfir)>0:
            q2=[]
            q2neg=[]
            for qq in qfir:
                if flag[qq]==1:
                    q2.append(qq)
                else:
                    q2neg.append(qq)
            nq2neg=len(q2neg)
            nq2=len(q2)
            if nq2>0:
                for qq in qfir:
                    dyd[qq]=np.sqrt(nq2)/w2
            if nq2neg>0:
                for qq in q2neg:
                    dyd[qq]=9999.  #i.e. it's an upper/lower limit
                    ul_flag[qq]=1
        if len(qmm)>0:
            q3=[]
            q3neg=[]
            for qq in qmm:
                if flag[qq]==1:
                    q3.append(qq)
                else:
                    q3neg.append(qq)
            nq3neg=len(q3neg)
            nq3=len(q3)
            if nq3>0:
                for qq in qmm:
                    dyd[qq]=np.sqrt(nq3)/w3
            if nq3neg>0:
                for qq in q3neg:
                    dyd[qq]=9999.  #i.e. it's an upper/lower limit
                    ul_flag[qq]=1      
        good_flag=[1-qq for qq in ul_flag]
        dyd=[dd/min(dyd) for dd in dyd]
        dyd=[dd*ff/ll for dd,ff,ll in zip( dyd, df,fl)]
        dyd=[dd*ff for dd,ff in zip(dyd, fl)]
        nstep=10
        dist_arr=np.arange(10)
        dist_arr=[distance*(1-(delta**2.))+dd*(distance*(1+(delta**2.))-distance*(1-(delta**2.)))/nstep for dd in dist_arr]
        nw=len(wl)
        invalid_chi2=-999
        matrix_models=np.zeros([n_sel,nstep,nw])
        matrix_chi2=np.zeros([n_sel,nstep])
        matrix_chi2[:]=invalid_chi2
        matrix_fluxes=np.zeros([n_sel,nstep,nw])
        matrix_dfluxes=np.zeros([n_sel,nstep,nw])
        for i in range(nstep):
            for k in range(nw):
                    matrix_models[:,i,k]=dmodels[par_str_arr[k].upper()]/1000*((d_ref/dist_arr[i])**2.)
        for k in range(nw):
            matrix_fluxes[:,:,k]=fl[k]
            matrix_dfluxes[:,:,k]=dyd[k]
        dmat=np.zeros([n_sel,nstep,nw])
        for j in range(nstep):
            for k in range(nw):
                dmat[:,j,k]=((matrix_models[:,j,k]-matrix_fluxes[:,j,k])**2)/(matrix_dfluxes[:,j,k]**2)
        matrix_chi2=np.sum(dmat, 2)
        if delta_chi2!=0:
            dchi2=delta_chi2
        else:
            dchi2=1
        chi2min=np.min(matrix_chi2)
        qchi2=np.argwhere(matrix_chi2<=chi2min+dchi2)
        nqchi2=len(qchi2)
        par={'cluster_id':[], 'clump_mass':[], 'compact_mass_fraction':[], 'clump_upper_age':[], 'dust_temp':[], 'bolometric_luminosity':[], 'random_sample':[], 'n_star_tot':[], 'm_star_tot':[], 'n_star_zams':[], 'm_star_zams':[], 'l_star_tot':[], 'l_star_zams':[], 'zams_luminosity_1':[], 'zams_mass_1':[], 'zams_temperature_1':[], 'zams_luminosity_2':[], 'zams_mass_2':[], 'zams_temperature_2':[], 'zams_luminosity_3':[], 'zams_mass_3':[], 'zams_temperature_3':[], 'd':[], 'chi2':[], 'wmod':[], 'fmod':[]}
        logging.info('Selected '+str(nqchi2)+' models befor dubplicate removal.')
        for i in range(nqchi2):
            if dmodels['cluster_id'].iloc[qchi2[i][0]] not in par['cluster_id']:
                par['cluster_id'].append(dmodels['cluster_id'].iloc[qchi2[i][0]])
                par['clump_mass'].append(dmodels['clump_mass'].iloc[qchi2[i][0]])
                par['compact_mass_fraction'].append(dmodels['compact_mass_fraction'].iloc[qchi2[i][0]])
                par['clump_upper_age'].append(dmodels['clump_upper_age'].iloc[qchi2[i][0]])
                par['bolometric_luminosity'].append(dmodels['bolometric_luminosity'].iloc[qchi2[i][0]])
                par['random_sample'].append(dmodels['random_sample'].iloc[qchi2[i][0]])
                par['dust_temp'].append(dmodels['dust_temp'].iloc[qchi2[i][0]])
                par['n_star_tot'].append(dmodels['n_star_tot'].iloc[qchi2[i][0]])
                par['m_star_tot'].append(dmodels['m_star_tot'].iloc[qchi2[i][0]])
                par['n_star_zams'].append(dmodels['n_star_zams'].iloc[qchi2[i][0]])
                par['m_star_zams'].append(dmodels['m_star_zams'].iloc[qchi2[i][0]])
                par['l_star_tot'].append(dmodels['l_star_tot'].iloc[qchi2[i][0]])
                par['l_star_zams'].append(dmodels['l_star_zams'].iloc[qchi2[i][0]])
                par['zams_luminosity_1'].append(dmodels['zams_luminosity_1'].iloc[qchi2[i][0]])
                par['zams_mass_1'].append(dmodels['zams_mass_1'].iloc[qchi2[i][0]])
                par['zams_temperature_1'].append(dmodels['zams_temperature_1'].iloc[qchi2[i][0]])
                par['zams_luminosity_2'].append(dmodels['zams_luminosity_2'].iloc[qchi2[i][0]])
                par['zams_mass_2'].append(dmodels['zams_mass_2'].iloc[qchi2[i][0]])
                par['zams_temperature_2'].append(dmodels['zams_temperature_2'].iloc[qchi2[i][0]])
                par['zams_luminosity_3'].append(dmodels['zams_luminosity_3'].iloc[qchi2[i][0]])
                par['zams_mass_3'].append(dmodels['zams_mass_3'].iloc[qchi2[i][0]])
                par['zams_temperature_3'].append(dmodels['zams_temperature_3'].iloc[qchi2[i][0]])
                par['d'].append(dist_arr[qchi2[i][1]])
                final_dist=dist_arr[qchi2[i][1]]
                par['chi2'].append(matrix_chi2[tuple(qchi2[i])])
                par['wmod'].append(list([3.4,3.6,4.5,4.6,5.8,8.0,12.,22.,24.0,70.,100.,160.,250.,350.,500.,870.,1100.]))
                fluxes=[]
                for ff in col_names:
                    myflux=dmodels[ff].iloc[qchi2[i][0]]/1000*((d_ref/final_dist)**2)
                    fluxes.append(myflux)
                par['fmod'].append(list(fluxes))
            else:
                j=par['cluster_id'].index(dmodels['cluster_id'].iloc[qchi2[i][0]])
                if par['chi2'][j]>matrix_chi2[tuple(qchi2[i])]:
                    par['cluster_id'][j]=dmodels['cluster_id'].iloc[qchi2[i][0]]
                    par['clump_mass'][j]=dmodels['clump_mass'].iloc[qchi2[i][0]]
                    par['compact_mass_fraction'][j]=dmodels['compact_mass_fraction'].iloc[qchi2[i][0]]
                    par['clump_upper_age'][j]=dmodels['clump_upper_age'].iloc[qchi2[i][0]]
                    par['bolometric_luminosity'][j]=dmodels['bolometric_luminosity'].iloc[qchi2[i][0]]
                    par['random_sample'][j]=dmodels['random_sample'].iloc[qchi2[i][0]]
                    par['dust_temp'][j]=dmodels['dust_temp'].iloc[qchi2[i][0]]
                    par['n_star_tot'][j]=dmodels['n_star_tot'].iloc[qchi2[i][0]]
                    par['m_star_tot'][j]=dmodels['m_star_tot'].iloc[qchi2[i][0]]
                    par['n_star_zams'][j]=dmodels['n_star_zams'].iloc[qchi2[i][0]]
                    par['m_star_zams'][j]=dmodels['m_star_zams'].iloc[qchi2[i][0]]
                    par['l_star_tot'][j]=dmodels['l_star_tot'].iloc[qchi2[i][0]]
                    par['l_star_zams'][j]=dmodels['l_star_zams'].iloc[qchi2[i][0]]
                    par['zams_luminosity_1'][j]=dmodels['zams_luminosity_1'].iloc[qchi2[i][0]]
                    par['zams_mass_1'][j]=dmodels['zams_mass_1'].iloc[qchi2[i][0]]
                    par['zams_temperature_1'][j]=dmodels['zams_temperature_1'].iloc[qchi2[i][0]]
                    par['zams_luminosity_2'][j]=dmodels['zams_luminosity_2'].iloc[qchi2[i][0]]
                    par['zams_mass_2'][j]=dmodels['zams_mass_2'].iloc[qchi2[i][0]]
                    par['zams_temperature_2'][j]=dmodels['zams_temperature_2'].iloc[qchi2[i][0]]
                    par['zams_luminosity_3'][j]=dmodels['zams_luminosity_3'].iloc[qchi2[i][0]]
                    par['zams_mass_3'][j]=dmodels['zams_mass_3'].iloc[qchi2[i][0]]
                    par['zams_temperature_3'][j]=dmodels['zams_temperature_3'].iloc[qchi2[i][0]]
                    par['d'][j]=dist_arr[qchi2[i][1]]
                    final_dist=dist_arr[qchi2[i][1]]
                    par['chi2'][j]=matrix_chi2[tuple(qchi2[i])]
                    par['wmod'][j]=list([3.4,3.6,4.5,4.6,5.8,8.0,12.,22.,24.0,70.,100.,160.,250.,350.,500.,870.,1100.])
                    fluxes=[]
                    for ff in col_names:
                        myflux=dmodels[ff].iloc[qchi2[i][0]]/1000*((d_ref/final_dist)**2)
                        fluxes.append(myflux)
                    par['fmod'][j]=(list(fluxes))
        logging.info('Sedfit procedure completed. Obtained '+ str(len(par['cluster_id']))+' models after duplicate removal.')
        pd_dict=pd.DataFrame.from_dict(par)
        test1=pd_dict.to_json(orient='split')
        test2=str(test1)
        logging.info('Procedur completed.')
        return test2

daemon=Pyro4.Daemon(hostname)
ns=Pyro4.locateNS()
uri=daemon.register(QueryMaker)
ns.register("test.query", uri)
@@ -42,5 +359,3 @@ ns.register("test.query", uri)
print("Ready. Object uri=", uri)

daemon.requestLoop()
 No newline at end of file

+5 −6
Original line number Diff line number Diff line
#!/usr/bin/env python3
import sys
print('python version', sys.version)
import pandas
sys.path.insert(0,"/var/www/wsgi-scripts/")
from hdf_query import query_out
@@ -10,18 +11,16 @@ def application(environ, start_response):
    test=query_out(var1)
    test1=test.to_json(orient='split')
    test2=str(test1)
#    test1=str(test)

    output=bytes(test2,'utf-8')
    output1 = b'Hello beautiful World!'


    getstring = environ['QUERY_STRING']
#    test += getstring.encode('utf-8')


    response_headers = [('Content-type', 'text/plain'),
                        ('Content-Length', str(len(output)))]
#    response_headers = [('Content-Disposition', 'attachment; filename= export.csv')]
#    test.headers['Content-Disposition']='attachment; filename= export.csv'
#    test.headers['Content-type']= 'text/csv'

    start_response(status, response_headers)

    return [output]
+6 −11
Original line number Diff line number Diff line
#!/usr/bin/env python3
import sys

import pandas
sys.path.insert(0,"/var/www/html/")
#from parquet_query import query_out
import Pyro4
import pandas


def application(environ, start_response):

    status = '200 OK'
    query_in =str( environ['QUERY_STRING'])
    query_maker=Pyro4.Proxy("PYRONAME:test.query")
    output=query_maker.query_out(query_in)


    test2=query_maker.query_out(query_in)
    output=bytes(test2,'utf-8')
    getstring = environ['QUERY_STRING']
    response_headers = [('Content-type', 'text/plain'),
                        ('Content-Length', str(len(output)))]
#    response_headers = [('Content-Disposition', 'attachment; filename= export.csv')]
#    test.headers['Content-Disposition']='attachment; filename= export.csv'
#    test.headers['Content-type']= 'text/csv'
    start_response(status, response_headers)

    return [output]