Skip to content
Commits on Source (18)
......@@ -229,6 +229,12 @@ git-tree-sha1 = "85b66005c9d16d3d27c9d06fd3be5e25074128da"
uuid = "525bcba6-941b-5504-bd06-fd0dc1a4d2eb"
version = "0.16.7"
[[FileIO]]
deps = ["Pkg", "Requires", "UUIDs"]
git-tree-sha1 = "cfb694feaddf4f0381ef3cc9d4c0d8fc6b7e2ea7"
uuid = "5789e2e9-d7fb-5bc7-8068-2c6fae9b9549"
version = "1.9.0"
[[FileWatching]]
uuid = "7b1f6079-737a-58dc-b8bc-7a2ca5c1b5ee"
......@@ -263,7 +269,7 @@ uuid = "f6369f11-7733-5829-9624-2563aa707210"
version = "0.10.18"
[[GFit]]
deps = ["CMPFit", "DataStructures", "Dates", "Distributions", "ExprTools", "LsqFit", "PrettyTables", "Printf", "Random", "Statistics"]
deps = ["CMPFit", "DataStructures", "Dates", "Dierckx", "Distributions", "LsqFit", "MacroTools", "PrettyTables", "Printf", "ProgressMeter", "QuadGK", "Random", "Statistics"]
path = "/home/gcalderone/.julia/dev/GFit"
uuid = "ac42e6ba-c0bf-4fcf-827d-deea44b16255"
version = "0.1.0"
......@@ -501,8 +507,14 @@ version = "1.0.1"
deps = ["Unicode"]
uuid = "de0858da-6303-5e67-8744-51eddeeeb8d7"
[[ProgressMeter]]
deps = ["Distributed", "Printf"]
git-tree-sha1 = "afadeba63d90ff223a6a48d2009434ecee2ec9e8"
uuid = "92933f4c-e287-5a05-a399-4b506db050ca"
version = "1.7.1"
[[QSFit]]
deps = ["CMPFit", "Cosmology", "DSP", "DataStructures", "Dates", "DelimitedFiles", "Dierckx", "FITSIO", "GFit", "GFitViewer", "Pkg", "Printf", "QuadGK", "Statistics", "Unitful", "UnitfulAstro"]
deps = ["CMPFit", "Cosmology", "DSP", "DataStructures", "Dates", "DelimitedFiles", "Dierckx", "FITSIO", "GFit", "GFitViewer", "Pkg", "Printf", "Statistics", "Unitful", "UnitfulAstro"]
path = "/home/gcalderone/.julia/dev/QSFit"
uuid = "fea25315-b951-4667-83c9-50e53efab241"
version = "0.1.0"
......
......@@ -7,6 +7,7 @@ version = "0.1.0"
CMPFit = "a5944310-3432-5d93-8bb2-e3b5eb62a58f"
DataStructures = "864edb3b-99cc-5e75-8d2d-829cb0a9cfe8"
Dierckx = "39dd38d3-220a-591b-8e3c-4c3a8c710a94"
FileIO = "5789e2e9-d7fb-5bc7-8068-2c6fae9b9549"
GFit = "ac42e6ba-c0bf-4fcf-827d-deea44b16255"
GFitViewer = "5d93b50e-5cc7-4feb-a740-ec85257caa01"
Gnuplot = "dc211083-a33a-5b79-959f-2ff34033469d"
......
......@@ -8,8 +8,9 @@ using Revise, Statistics, Serialization, Dierckx, DataFrames
using QSFit, GFit, Gnuplot, GFitViewer, MyAstroUtils
using CL_1ES_1927p654
using Dates
using TextParse
mkdir("output")
epoch_filenames = Vector{String}()
for (root, dirs, files) in walkdir("AT2018zf")
for file in files
......@@ -19,8 +20,6 @@ for (root, dirs, files) in walkdir("AT2018zf")
end
end
function read_spec(epoch_id; kw...)
if epoch_id == "B03"
file = "boller2003.txt"
......@@ -28,11 +27,11 @@ function read_spec(epoch_id; kw...)
file = epoch_filenames[epoch_id]
end
println(file)
spec = Spectrum(Val(:ASCII), file, columns=[1,2]; kw...);
spec = Spectrum(Val(:ASCII), file, columns=[1,2]; label="$epoch_id: $file", kw...);
if isa(epoch_id, Number)
spec.flux ./= all_scale[epoch_id]
end
spec.err .= 0.05 .* spec.flux;
spec.err .= 0.05 .* median(spec.flux);
return spec
end
......@@ -47,41 +46,45 @@ I tried 180, 200 and 250, and the [OIII] luminosity do not change
=#
spec = read_spec("B03", resolution=200.)
add_spec!(source, spec);
(model, bestfit) = fit(source);
viewer(model, source, bestfit, showcomps=[:qso_cont, :galaxy, :balmer])
println( bestfit[:OIII_5007].norm.val )
res = fit(source);
viewer(res, showcomps=[:qso_cont, :galaxy, :balmer],
filename="output/results_boller.html")
println(res.bestfit[:OIII_5007].norm.val )
# 0.00029085925218136
println( bestfit[:OIII_5007].norm.val / bestfit[:galaxy].norm.val )
println(res.bestfit[:OIII_5007].norm.val / res.bestfit[:galaxy].norm.val )
# 59.406476855719376
all_scale = fill(1., length(epoch_filenames))
all_fwhm = fill(NaN,length(epoch_filenames))
for id in 1:length(epoch_filenames)
spec = read_spec(id)
all_scale[id] *= median(spec.flux) / 25000
try
source = QSO{q1927p654}("1ES 1927+654 ($id)", Z, ebv=EBV);
source.options[:min_spectral_coverage][:OIII_5007] = 0.5
# source.options[:host_template] = "/home/gcalderone/Mbi1.30Zm1.49T00.0700_iTp0.00_baseFe_linear_FWHM_2.51"
if !isfile("output/scale_fwhm.dat")
all_scale = fill(1., length(epoch_filenames))
all_fwhm = fill(NaN,length(epoch_filenames))
for id in 1:length(epoch_filenames)
spec = read_spec(id)
add_spec!(source, spec);
(model, bestfit) = fit(source);
viewer(model, source, bestfit, showcomps=[:qso_cont, :galaxy, :balmer],
filename="output/results_$(id).html")
# Update all_scale and store FWHM
@info bestfit[:OIII_5007].norm.val
all_scale[id] *= bestfit[:OIII_5007].norm.val
all_fwhm[ id] = bestfit[:OIII_5007].fwhm.val
catch
all_scale[id] *= median(spec.flux) / 25000
try
source = QSO{q1927p654}("1ES 1927+654 ($id)", Z, ebv=EBV);
source.options[:min_spectral_coverage][:OIII_5007] = 0.5
# source.options[:host_template] = "/home/gcalderone/Mbi1.30Zm1.49T00.0700_iTp0.00_baseFe_linear_FWHM_2.51"
spec = read_spec(id)
add_spec!(source, spec);
res = fit(source);
viewer(res, showcomps=[:qso_cont, :galaxy, :balmer],
filename="output/results_$(id).html")
# Update all_scale and store FWHM
@info res.bestfit[:OIII_5007].norm.val
all_scale[id] *= res.bestfit[:OIII_5007].norm.val
all_fwhm[ id] = res.bestfit[:OIII_5007].fwhm.val
catch
end
end
all_scale[.!isfinite.(all_fwhm)] .= NaN
serialize("output/scale_fwhm.dat", (all_scale, all_fwhm))
else
(all_scale, all_fwhm) = deserialize("output/scale_fwhm.dat")
end
all_scale[.!isfinite.(all_fwhm)] .= NaN
serialize("output/scale_fwhm.dat", (all_scale, all_fwhm))
@gp :prenorm "set key bottom right" "set grid" xlab="Epoch" ylab="Value" :-
@gp :- :prenorm all_scale./1e-18 "w lp t '[OIII] norm.'" :-
......@@ -106,6 +109,7 @@ for id in 1:length(epoch_filenames)
x = s.λ;
y = s.flux;
y .-= median(y)
y ./= maximum(y)
@gp :- :allepochs x./(1+Z) y "u 1:(\$2+$id):($id) w l notit lc pal" :-
end
@gp :- :allepochs yr=[1, 29]
......@@ -115,60 +119,83 @@ save(:allepochs, term="png size 800,2000", output="output/all_epochs_zoom.png")
dict_chosen_epochs = Dict(
:vecchi => [17, 10, 21, 23],
:lowres => [8,9,10,11,12,14,15,16,18,21,22,23,24,25],
:highres => [5, 7, 13, 17, 20],
:all => collect(5:26))
#=
TODO:
- neglect 9 for limited spectral coverage (it lacks the blue part)?
- gli altri strani sono 3 e 16
=#
:vecchi => [17, 10, 21, 23],
:lowres => [8,9,10,11,12,14,15,16,18,21,22,23,24,25],
:highres => [5, 7, 13, 17, 20],
:limited => [1,2,3,13],
:all => collect(5:26))
# deleteat!(dict_chosen_epochs[:all], 4) #insufficient coverage in na_Hg
# Can't use resolution smaller than 150 km / s otherwise some line
# will be neglected because of spectral coverage
resolution = Dict(
:lowres => 350.,
:highres => 150.,
:all => NaN)
for job in [:highres, :lowres]
chosen_epochs = dict_chosen_epochs[job]
source = QSO{q1927p654}("1ES 1927+654", Z, ebv=EBV);
source.options[:min_spectral_coverage][:OIII_5007] = 0.5
@gp :zoom "set grid" :-
for id in 1:length(chosen_epochs)
spec = read_spec(chosen_epochs[id], resolution=get(resolution, job, NaN))
add_spec!(source, spec);
@gp :- :zoom xr=[4750,5150] spec.λ ./ (1 + source.z) spec.flux "w l t '$(chosen_epochs[id])'"
end
(model, bestfit) = multi_fit(source);
viewer(model, source, bestfit, showcomps=[:qso_cont, :galaxy, :balmer],
filename="output/results_$(job).html")
viewer(model, source, bestfit, showcomps=[:qso_cont, :galaxy, :balmer],
filename="output/results_$(job)_rebin4.html", rebin=4)
models = Dict()
for id in 1:length(chosen_epochs)
models[(id, :x)] = domain(model[id])[:]
models[(id, :y)] = model[id]()
models[(id, :l5100)] = 5100 .* Spline1D(domain(model[id])[:], model[id]())(5100.)
for cname in [:qso_cont, :OIII_5007,
:br_Ha, :na_Ha, :bb_Ha,
:br_Hb, :na_Hb, :bb_Hb]
models[(id, cname)] = model[id](cname)
:highres => 150.)
job = :all
chosen_epochs = dict_chosen_epochs[job]
Nloop = 6
(all_scale, all_fwhm) = deserialize("output/scale_fwhm.dat")
for loop in 1:Nloop
file = "output/results_$(job)_$(loop).dat"
if !isfile(file)
source = QSO{q1927p654}("1ES 1927+654", Z, ebv=EBV);
source.options[:min_spectral_coverage][:OIII_5007] = 0.5
@gp :zoom "set grid" :-
for id in chosen_epochs
spec = read_spec(id, resolution=get(resolution, job, NaN))
add_spec!(source, spec);
@gp :- :zoom xr=[4750,5150] spec.λ ./ (1 + source.z) spec.flux "w l t '$(id)'"
end
res = multi_fit(source);
viewer(res, showcomps=[:qso_cont, :galaxy, :balmer],
filename="output/results_$(job)_$(loop).html")
viewer(res, showcomps=[:qso_cont, :galaxy, :balmer],
filename="output/results_$(job)_$(loop)_rebin4.html", rebin=4)
# Find best normalization for [OIII]
model2 = deepcopy(res.model);
for id in 1:length(chosen_epochs)
for cname in collect(keys(model2[id]))
if cname == :OIII_5007
model2[id][cname].norm.fixed = false
else
freeze(model2[id], cname)
end
end
end
mzer = GFit.cmpfit()
mzer.config.ftol = mzer.config.gtol = mzer.config.xtol = 1.e-6
bestfit2 = fit!(model2, source.data, minimizer=mzer);
OIII_norm = fill(0., length(chosen_epochs))
for id in 1:length(chosen_epochs)
OIII_norm[id] = bestfit2[id][:OIII_5007].norm.val
all_scale[chosen_epochs[id]] *= bestfit2[id][:OIII_5007].norm.val
end
serialize(file, (res, all_scale, OIII_norm))
else
(res, all_scale, OIII_norm) = deserialize(file);
end
serialize("output/results_$(job).dat", (models, bestfit))
end
OIII_norm_evol = fill(NaN, length(chosen_epochs))
for loop in 1:Nloop
file = "output/results_$(job)_$(loop).dat"
(res, all_scale, OIII_norm) = deserialize(file);
OIII_norm_evol = hcat(OIII_norm_evol, OIII_norm)
println(res.bestfit.cost / res.bestfit.dof)
end
OIII_norm_evol = OIII_norm_evol[:,2:end]
@gp "set grid" :-
for id in 1:length(chosen_epochs)
@gp :- 1:Nloop OIII_norm_evol[id, :] "w lp t '$(chosen_epochs[id])'"
end
function cont_at(bestfit, λ)
A1 = bestfit[:qso_cont].norm.val - bestfit[:qso_cont].norm.unc
......@@ -193,85 +220,105 @@ function cont_at(bestfit, λ)
end
tabellone = DataFrame(epoch=Int[], date=String[], galaxy=Float64[],
contslope=Float64[], cont5100=Float64[], l5100=Float64[],
bb_Ha_norm=Float64[], bb_Ha_fwhm=Float64[], bb_Ha_voff=Float64[],
br_Ha_norm=Float64[], br_Ha_fwhm=Float64[], br_Ha_voff=Float64[],
na_Ha_norm=Float64[], na_Ha_fwhm=Float64[], na_Ha_voff=Float64[],
bb_Hb_norm=Float64[], bb_Hb_fwhm=Float64[], bb_Hb_voff=Float64[],
br_Hb_norm=Float64[], br_Hb_fwhm=Float64[], br_Hb_voff=Float64[],
na_Hb_norm=Float64[], na_Hb_fwhm=Float64[], na_Hb_voff=Float64[],
oiii_fwhm=Float64[], oiii_voff=Float64[])
for id in 1:length(bestfit.preds)
push!(tabellone,
(id, epoch_filenames[id][27:end-4],
bestfit[id][:galaxy].norm.val,
bestfit[id][:qso_cont].alpha.val, cont_at(bestfit[id], 4000)[1], 5100 * models[(id,:l5100)],
bestfit[id][:bb_Ha].norm.patched, bestfit[id][:bb_Ha].fwhm.patched, bestfit[id][:bb_Ha].voff.patched,
bestfit[id][:br_Ha].norm.patched, bestfit[id][:br_Ha].fwhm.patched, bestfit[id][:br_Ha].voff.patched,
bestfit[id][:na_Ha].norm.patched, bestfit[id][:na_Ha].fwhm.patched, bestfit[id][:na_Ha].voff.patched,
bestfit[id][:bb_Hb].norm.patched, bestfit[id][:bb_Hb].fwhm.patched, bestfit[id][:bb_Hb].voff.patched,
bestfit[id][:br_Hb].norm.patched, bestfit[id][:br_Hb].fwhm.patched, bestfit[id][:br_Hb].voff.patched,
bestfit[id][:na_Hb].norm.patched, bestfit[id][:na_Hb].fwhm.patched, bestfit[id][:na_Hb].voff.patched,
bestfit[id][:OIII_5007].fwhm.patched, bestfit[id][:OIII_5007].voff.patched))
tab = DataFrame(epoch=Int[], date=String[], instr=String[], galaxy=Float64[],
contslope=Float64[],
c3500=Float64[], c5100=Float64[],
bb_Ha_norm=Float64[], bb_Ha_fwhm=Float64[], bb_Ha_voff=Float64[],
br_Ha_norm=Float64[], br_Ha_fwhm=Float64[], br_Ha_voff=Float64[],
na_Ha_norm=Float64[], na_Ha_fwhm=Float64[], na_Ha_voff=Float64[],
bb_Hb_norm=Float64[], bb_Hb_fwhm=Float64[], bb_Hb_voff=Float64[],
br_Hb_norm=Float64[], br_Hb_fwhm=Float64[], br_Hb_voff=Float64[],
na_Hb_norm=Float64[], na_Hb_fwhm=Float64[], na_Hb_voff=Float64[],
oiii_fwhm=Float64[], oiii_voff=Float64[])
for id in 1:length(res.bestfit.preds)
push!(tab,
(id, epoch_filenames[chosen_epochs[id]][27:end-4], "",
res.bestfit[id][:galaxy].norm.val,
res.bestfit[id][:qso_cont].alpha.val,
cont_at(res.bestfit[id], 3500)[1],
cont_at(res.bestfit[id], 5100)[1],
res.bestfit[id][:bb_Ha].norm.patched, res.bestfit[id][:bb_Ha].fwhm.patched, res.bestfit[id][:bb_Ha].voff.patched,
res.bestfit[id][:br_Ha].norm.patched, res.bestfit[id][:br_Ha].fwhm.patched, res.bestfit[id][:br_Ha].voff.patched,
res.bestfit[id][:na_Ha].norm.patched, res.bestfit[id][:na_Ha].fwhm.patched, res.bestfit[id][:na_Ha].voff.patched,
res.bestfit[id][:bb_Hb].norm.patched, res.bestfit[id][:bb_Hb].fwhm.patched, res.bestfit[id][:bb_Hb].voff.patched,
res.bestfit[id][:br_Hb].norm.patched, res.bestfit[id][:br_Hb].fwhm.patched, res.bestfit[id][:br_Hb].voff.patched,
res.bestfit[id][:na_Hb].norm.patched, res.bestfit[id][:na_Hb].fwhm.patched, res.bestfit[id][:na_Hb].voff.patched,
res.bestfit[id][:OIII_5007].fwhm.patched, res.bestfit[id][:OIII_5007].voff.patched))
end
dd = Date.(tabellone.date, Ref("yyyymmdd"))
tabellone[!, :day] .= getproperty.(dd - dd[1], :value)
tab[!, :l3500] .= [3500 .* Spline1D(domain(res.model[id])[:], res.model[id]())(3500.) for id in 1:length(chosen_epochs)]
tab[!, :l5100] .= [5100 .* Spline1D(domain(res.model[id])[:], res.model[id]())(5100.) for id in 1:length(chosen_epochs)]
tab[!, :Ha] .= tab.br_Ha_norm .+ tab.bb_Ha_norm .+ tab.na_Ha_norm
tab[!, :Hb] .= tab.br_Hb_norm .+ tab.bb_Hb_norm .+ tab.na_Hb_norm
dd = Date.(tab.date, Ref("yyyymmdd"))
tab[!, :day] .= getproperty.(dd - Date("2018-03-06") .+ Day(72), :value)
(d, c) = csvread("misc/tabula-2019-Trakhtenbrot-mod.csv", ',', header_exists=true)
tmp = DataFrame(collect(d), Symbol.(c))
tmp[!, :Date] .= string.(tmp.Date)
tab[!, :instr] .= ""
for i in 1:nrow(tab)
j = findfirst(tmp.Date .== tab[i, :date])
isnothing(j) && continue
tab[i, :instr] = tmp[j, :Inst]
end
f = FITS("1ES_1927p654_results_$(job).fits", "w")
write(f, tabellone)
write(f, tab)
close(f)
@gp( [bestfit[id][:qso_cont].alpha.val for id in 1:length(chosen_epochs)],
[bestfit[id][:br_Ha].norm.patched for id in 1:length(chosen_epochs)],
"w l")
@gp(:-,
[bestfit[id][:qso_cont].alpha.val for id in 1:length(chosen_epochs)],
[bestfit[id][:br_Hb].norm.patched for id in 1:length(chosen_epochs)],
"w l")
tab[!, :pt] .= 0
for i in 1:nrow(tab)
j = findfirst(sort(unique(tab.instr)) .== tab[i, :instr])
isnothing(j) && continue
tab[i, :pt] = j
end
cm = countmap(tab.instr)
setindex!.(Ref(cm), 1:length(cm), collect(keys(cm))[sortperm(collect(values(cm)), rev=true)])
tab.pt .= getindex.(Ref(cm), tab.instr)
@gp( [bestfit[id][:qso_cont].alpha.val for id in 1:length(chosen_epochs)],
[cont_at(bestfit[id], 5100)[1] for id in 1:length(chosen_epochs)],
"w l")
@gp( [cont_at(bestfit[id], 5100)[1] for id in 1:length(chosen_epochs)],
[bestfit[id][:br_Ha].norm.patched for id in 1:length(chosen_epochs)],
"w l")
@gp tab.contslope tab.Ha tab.pt "w lp t 'Ha' pt var ps 2" ylog=true
@gp :- tab.contslope tab.Hb tab.pt "w lp t 'Hb' pt var ps 2"
@gp :- tab.contslope tab.c5100 tab.pt "w lp t 'c5100' pt var ps 2"
@gp :- tab.contslope tab.l5100 tab.pt "w lp t 'l5100' pt var ps 2"
@gp :- tab.contslope tab.c3500 tab.pt "w lp t 'c3500' pt var ps 2"
@gp :- tab.contslope tab.l3500 tab.pt "w lp t 'l3500' pt var ps 2"
@gp( [models[(id, :l5100)] for id in 1:length(chosen_epochs)],
[bestfit[id][:br_Ha].norm.patched for id in 1:length(chosen_epochs)],
"w l")
@gp xlog=true ylog=true
@gp :- tab.c5100 tab.Ha tab.pt "w lp t 'c5100' pt var ps 2"
@gp :- tab.l5100 tab.Ha tab.pt "w lp t 'l5100' pt var ps 2"
@gp tabellone.day [cont_at(bestfit[id], 5100)[1] for id in 1:length(chosen_epochs)] "w l" ylog=true
@gp :- tabellone.day [bestfit[id][:br_Ha].norm.patched for id in 1:length(chosen_epochs)] .* 1e2 "w l"
@gp :- tabellone.day [bestfit[id][:br_Hb].norm.patched for id in 1:length(chosen_epochs)] .* 1e2 "w l"
@gp :- tabellone.day [models[(id, :l5100)] for id in 1:length(chosen_epochs)] "w l"
@gp tab.day tab.c5100 tab.pt "w lp t 'c5100' pt var ps 2" ylog=true
@gp :- tab.day tab.l5100 tab.pt "w lp t 'l5100' pt var ps 2"
@gp :- tab.day tab.Ha tab.pt "w lp t 'Ha' pt var ps 2"
@gp :- tab.day tab.Hb tab.pt "w lp t 'Hb' pt var ps 2"
@gp tabellone.day -3 .+ 1e-3 .*[cont_at(bestfit[id], 5100)[1] for id in 1:length(chosen_epochs)] "w lp"
@gp :- tabellone.day [bestfit[id][:qso_cont].alpha.val for id in 1:length(chosen_epochs)] "w lp"
@gp tab.day tab.contslope tab.pt "w lp t 'SLope' pt var ps 2"
@gp "set grid"
for id in 1:length(chosen_epochs)
#@gp :- models[(id, :x)] models[(id, :y)] "w l t '$id' lw 3"
@gp :- models[(id, :x)] models[(id, :qso_cont)] "w l t '$id' lw 2"
for i in 1:nrow(tab)
ss = "w l t '$(tab[i, :date])' dt $(tab[i, :pt]) lw 2"
@gp :- domain(res.model[i])[:] res.model[i](:qso_cont) ss
end
save(:prenorm, term="png size 800,600", output="output/evolution.png")
@gp "set grid"
for id in 1:length(chosen_epochs)
println(bestfit[id][:OIII_5007])
x = models[(id, :x)]
y = models[(id, :y)]
y0 = y .- models[(id, :OIII_5007)]
#@gp :- x y0 .- Spline1D(x, y0)(5007.) "w l t '$id'"
@gp :- x y .- Spline1D(x, y0)(5007.) "w l t '$id'"
for i in 1:length(chosen_epochs)
println(res.bestfit[i][:OIII_5007])
x = domain(res.model[i])[:]
y = res.model[i]()
y0 = y .- res.model[i](:OIII_5007)
ss = "w l t '$(tab[i, :date])' dt $(tab[i, :pt]) lw 2"
@gp :- x y .- Spline1D(x, y0)(5007.) ss
end
save(:prenorm, term="png size 800,600", output="output/evolution_oiii_norm.png")
......@@ -2,7 +2,7 @@ module CL_1ES_1927p654
using QSFit, DataStructures, GFit, Statistics, Dierckx
import QSFit: AbstractSpectralLine, DefaultRecipe, CombinedLine, NarrowLine, BroadLine, BroadBaseLine, ComboBroadLine,
default_options, qso_cont_component, line_component, line_group_name, known_spectral_lines, fit, multi_fit, spectral_coverage
default_options, qso_cont_component, line_component, line_group_name, known_spectral_lines, fit, multi_fit, spectral_coverage, reduce
abstract type q1927p654 <: DefaultRecipe end
......
......@@ -10,7 +10,7 @@ function multi_fit(source::QSO{TRecipe}; ref_id=1) where TRecipe <: q1927p654
preds = Vector{Prediction}()
for id in 1:Nspec
λ = source.domain[id][:]
pred = Prediction(source.domain[id], :Continuum => Reducer(sum, [:qso_cont]),
pred = Prediction(source.domain[id], :Continuum => reducer_sum([:qso_cont]),
:qso_cont => QSFit.qso_cont_component(TRecipe))
if source.options[:instr_broadening]
......@@ -30,14 +30,12 @@ function multi_fit(source::QSO{TRecipe}; ref_id=1) where TRecipe <: q1927p654
# Host galaxy template
if source.options[:use_host_template] &&
(minimum(λ) .< 5500 .< maximum(λ))
add!(model[id], :Continuum => Reducer(sum, [:qso_cont, :galaxy]),
add!(model[id], :Continuum => reducer_sum([:qso_cont, :galaxy]),
:galaxy => QSFit.hostgalaxy(source.options[:host_template]))
model[id][:galaxy].norm.val = Spline1D(λ, source.data[id].val, k=1, bc="error")(5500.)
if id != ref_id
model[id][:galaxy].norm.fixed = true
patch!(model) do m
m[id][:galaxy].norm = m[ref_id][:galaxy].norm
end
@patch! model m -> m[id][:galaxy].norm = m[ref_id][:galaxy].norm
end
end
......@@ -45,7 +43,7 @@ function multi_fit(source::QSO{TRecipe}; ref_id=1) where TRecipe <: q1927p654
if source.options[:use_balmer]
tmp = [:qso_cont, :balmer]
(:galaxy in keys(model[id])) && push!(tmp, :galaxy)
add!(model[id], :Continuum => Reducer(sum, tmp),
add!(model[id], :Continuum => reducer_sum(tmp),
:balmer => QSFit.balmercont(0.1, 0.5))
c = model[id][:balmer]
c.norm.val = 0.1
......@@ -55,9 +53,7 @@ function multi_fit(source::QSO{TRecipe}; ref_id=1) where TRecipe <: q1927p654
c.ratio.fixed = false
c.ratio.low = 0.1
c.ratio.high = 1
patch!(model) do m
m[id][:balmer].norm *= m[id][:qso_cont].norm
end
@patch! model[id] m -> m[:balmer].norm *= m[:qso_cont].norm
end
end
bestfit = fit!(model, source.data, minimizer=mzer); show(source.log, bestfit)
......@@ -128,13 +124,13 @@ function multi_fit(source::QSO{TRecipe}; ref_id=1) where TRecipe <: q1927p654
end
end
if length(iron_components) > 0
add!(model[id], :Iron => Reducer(sum, iron_components))
add!(model[id], :main => Reducer(sum, [:Continuum, :Iron]))
add!(model[id], :Iron => reducer_sum(iron_components))
add!(model[id], :main => reducer_sum([:Continuum, :Iron]))
evaluate!(model)
bestfit = fit!(model, only_id=id, source.data, minimizer=mzer); show(source.log, bestfit)
else
add!(model[id], :Iron => Reducer(() -> [0.], Symbol[]))
add!(model[id], :main => Reducer(sum, [:Continuum, :Iron]))
add!(model[id], :Iron => @reducer(() -> [0.]))
add!(model[id], :main => reducer_sum([:Continuum, :Iron]))
end
(:ironuv in keys(model[id])) && freeze(model[id], :ironuv)
(:ironoptbr in keys(model[id])) && freeze(model[id], :ironoptbr)
......@@ -151,9 +147,9 @@ function multi_fit(source::QSO{TRecipe}; ref_id=1) where TRecipe <: q1927p654
resid = source.data[id].val - model[id]() # will be used to guess line normalization
add!(model[id], source.line_comps[id])
for (group, lnames) in QSFit.invert_dictionary(source.line_names[id])
add!(model[id], group => Reducer(sum, lnames))
add!(model[id], group => reducer_sum(lnames))
end
add!(model[id], :main => Reducer(sum, [:Continuum, :Iron, line_groups[id]...]))
add!(model[id], :main => reducer_sum([:Continuum, :Iron, line_groups[id]...]))
if haskey(model[id], :MgII_2798)
model[id][:MgII_2798].voff.low = -1000
......@@ -199,50 +195,42 @@ function multi_fit(source::QSO{TRecipe}; ref_id=1) where TRecipe <: q1927p654
if haskey(model[id], :OIII_4959) &&
haskey(model[id], :OIII_5007)
model[id][:OIII_4959].voff.fixed = true
patch!(model) do m
m[id][:OIII_4959].voff = m[id][:OIII_5007].voff
end
@patch! model[id] m -> m[:OIII_4959].voff = m[:OIII_5007].voff
end
if haskey(model[id], :NII_6549) &&
haskey(model[id], :NII_6583)
model[id][:NII_6549].voff.fixed = true
patch!(model) do m
m[id][:NII_6549].voff = m[id][:NII_6583].voff
end
@patch! model[id] m -> m[:NII_6549].voff = m[:NII_6583].voff
end
if haskey(model[id], :OIII_5007_bw) &&
haskey(model[id], :OIII_5007)
patch!(model) do m
m[id][:OIII_5007_bw].voff += m[id][:OIII_5007].voff
m[id][:OIII_5007_bw].fwhm += m[id][:OIII_5007].fwhm
@patch! model[id] m -> begin
m[:OIII_5007_bw].voff += m[:OIII_5007].voff
m[:OIII_5007_bw].fwhm += m[:OIII_5007].fwhm
end
end
if haskey(model[id], :br_Hb) &&
haskey(model[id], :bb_Hb)
# Ensure luminosity at peak of the broad base component is
# smaller than the associated broad component:
model[id][:bb_Hb].norm.high = 1
model[id][:bb_Hb].norm.val = 0.5
patch!(model) do m
m[id][:bb_Hb].norm *= m[id][:br_Hb].norm / m[id][:br_Hb].fwhm * m[id][:bb_Hb].fwhm
end
@patch! model[id] m -> m[:bb_Hb].norm *= m[:br_Hb].norm / m[:br_Hb].fwhm * m[:bb_Hb].fwhm
end
if haskey(model[id], :br_Ha) &&
haskey(model[id], :bb_Ha)
# Ensure luminosity at peak of the broad base component is
# smaller than the associated broad component:
model[id][:bb_Ha].norm.high = 1
model[id][:bb_Ha].norm.val = 0.5
patch!(model) do m
m[id][:bb_Ha].norm *= m[id][:br_Ha].norm / m[id][:br_Ha].fwhm * m[id][:bb_Ha].fwhm
end
@patch! model[id] m -> m[:bb_Ha].norm *= m[:br_Ha].norm / m[:br_Ha].fwhm * m[:bb_Ha].fwhm
end
# Avoid division by zero
model[id][:br_Ha].norm.low = 1.e-10
model[id][:na_Hg].norm.fixed = 1
model[id][:na_Hg].fwhm.fixed = 1
model[id][:na_Hg].voff.fixed = 1
......@@ -251,17 +239,17 @@ function multi_fit(source::QSO{TRecipe}; ref_id=1) where TRecipe <: q1927p654
model[id][:bb_Hg].voff.fixed = 1
model[id][:br_Hg].fwhm.fixed = 1
model[id][:br_Hg].voff.fixed = 1
patch!(model) do m
m[id][:na_Hg].norm = m[id][:br_Hg].norm * (m[id][:na_Ha].norm / m[id][:br_Ha].norm)
m[id][:na_Hg].voff = m[id][:na_Ha].voff
m[id][:na_Hg].fwhm = m[id][:na_Ha].fwhm
@patch! model[id] m -> begin
m[:na_Hg].norm = m[:br_Hg].norm * (m[:na_Ha].norm / m[:br_Ha].norm)
m[:na_Hg].voff = m[:na_Ha].voff
m[:na_Hg].fwhm = m[:na_Ha].fwhm
m[id][:bb_Hg].norm = m[id][:br_Hg].norm * (m[id][:bb_Ha].norm / m[id][:br_Ha].norm)
m[id][:bb_Hg].voff = m[id][:bb_Ha].voff
m[id][:bb_Hg].fwhm = m[id][:bb_Ha].fwhm
m[:bb_Hg].norm = m[:br_Hg].norm * (m[:bb_Ha].norm / m[:br_Ha].norm)
m[:bb_Hg].voff = m[:bb_Ha].voff
m[:bb_Hg].fwhm = m[:bb_Ha].fwhm
m[id][:br_Hg].voff = m[id][:br_Ha].voff
m[id][:br_Hg].fwhm = m[id][:br_Ha].fwhm
m[:br_Hg].voff = m[:br_Ha].voff
m[:br_Hg].fwhm = m[:br_Ha].fwhm
end
model[id][:na_Hb].norm.fixed = 1
......@@ -272,17 +260,17 @@ function multi_fit(source::QSO{TRecipe}; ref_id=1) where TRecipe <: q1927p654
model[id][:bb_Hb].voff.fixed = 1
model[id][:br_Hb].fwhm.fixed = 1
model[id][:br_Hb].voff.fixed = 1
patch!(model) do m
m[id][:na_Hb].norm = m[id][:br_Hb].norm * (m[id][:na_Ha].norm / m[id][:br_Ha].norm)
m[id][:na_Hb].voff = m[id][:na_Ha].voff
m[id][:na_Hb].fwhm = m[id][:na_Ha].fwhm
@patch! model[id] m -> begin
m[:na_Hb].norm = m[:br_Hb].norm * (m[:na_Ha].norm / m[:br_Ha].norm)
m[:na_Hb].voff = m[:na_Ha].voff
m[:na_Hb].fwhm = m[:na_Ha].fwhm
m[id][:bb_Hb].norm = m[id][:br_Hb].norm * (m[id][:bb_Ha].norm / m[id][:br_Ha].norm)
m[id][:bb_Hb].voff = m[id][:bb_Ha].voff
m[id][:bb_Hb].fwhm = m[id][:bb_Ha].fwhm
m[:bb_Hb].norm = m[:br_Hb].norm * (m[:bb_Ha].norm / m[:br_Ha].norm)
m[:bb_Hb].voff = m[:bb_Ha].voff
m[:bb_Hb].fwhm = m[:bb_Ha].fwhm
m[id][:br_Hb].voff = m[id][:br_Ha].voff
m[id][:br_Hb].fwhm = m[id][:br_Ha].fwhm
m[:br_Hb].voff = m[:br_Ha].voff
m[:br_Hb].fwhm = m[:br_Ha].fwhm
end
model[id][:OIII_5007].norm.fixed = 1
......@@ -304,8 +292,8 @@ function multi_fit(source::QSO{TRecipe}; ref_id=1) where TRecipe <: q1927p654
tmp[Symbol(:unk, j)] = line_component(TRecipe, QSFit.UnkLine(5e3))
tmp[Symbol(:unk, j)].norm_integrated = source.options[:norm_integrated]
end
add!(model[id], :UnkLines => Reducer(sum, collect(keys(tmp))), tmp)
add!(model[id], :main => Reducer(sum, [:Continuum, :Iron, line_groups[id]..., :UnkLines]))
add!(model[id], :UnkLines => reducer_sum(collect(keys(tmp))), tmp)
add!(model[id], :main => reducer_sum([:Continuum, :Iron, line_groups[id]..., :UnkLines]))
evaluate!(model)
for j in 1:source.options[:n_unk]
freeze(model[id], Symbol(:unk, j))
......@@ -314,8 +302,8 @@ function multi_fit(source::QSO{TRecipe}; ref_id=1) where TRecipe <: q1927p654
else
# Here we need a :UnkLines reducer, even when n_unk is 0
for id in 1:Nspec
add!(model[id], :UnkLines => Reducer(() -> [0.], Symbol[]))
add!(model[id], :main => Reducer(sum, [:Continuum, :Iron, line_groups[id]...]))
add!(model[id], :UnkLines => @reducer(() -> [0.]))
add!(model[id], :main => reducer_sum([:Continuum, :Iron, line_groups[id]...]))
end
end
evaluate!(model)
......@@ -384,7 +372,7 @@ function multi_fit(source::QSO{TRecipe}; ref_id=1) where TRecipe <: q1927p654
end
end
end
bestfit = fit!(model, source.data, minimizer=mzer); show(source.log, bestfit)
bestfit = fit!(model, source.data, minimizer=mzer)
# Disable "unknown" lines whose normalization uncertainty is larger
# than 3 times the normalization
......@@ -407,11 +395,9 @@ function multi_fit(source::QSO{TRecipe}; ref_id=1) where TRecipe <: q1927p654
end
if needs_fitting
println(source.log, "\nRe-run fit...")
bestfit = fit!(model, source.data, minimizer=mzer); show(source.log, bestfit)
bestfit = fit!(model, source.data, minimizer=mzer)
end
println(source.log, "\nFinal model and bestfit:")
show(source.log, model)
println(source.log)
show(source.log, bestfit)
......@@ -419,5 +405,5 @@ function multi_fit(source::QSO{TRecipe}; ref_id=1) where TRecipe <: q1927p654
println(source.log, "\nElapsed time: $elapsed s")
QSFit.close_log(source)
return (model, bestfit)
return reduce(source, model, bestfit)
end
......@@ -6,7 +6,7 @@ function fit(source::QSO{TRecipe}; id=1) where TRecipe <: q1927p654
# Initialize components and guess initial values
println(source.log, "\nFit continuum components...")
λ = source.domain[id][:]
model = Model(source.domain[id], :Continuum => Reducer(sum, [:qso_cont]),
model = Model(source.domain[id], :Continuum => reducer_sum([:qso_cont]),
:qso_cont => QSFit.qso_cont_component(TRecipe))
if source.options[:instr_broadening]
......@@ -20,7 +20,7 @@ function fit(source::QSO{TRecipe}; id=1) where TRecipe <: q1927p654
# Host galaxy template
if source.options[:use_host_template] &&
(minimum(λ) .< 5500 .< maximum(λ))
add!(model, :Continuum => Reducer(sum, [:qso_cont, :galaxy]),
add!(model, :Continuum => reducer_sum([:qso_cont, :galaxy]),
:galaxy => QSFit.hostgalaxy(source.options[:host_template]))
model[:galaxy].norm.val = Spline1D(λ, source.data[id].val, k=1, bc="error")(5500.)
end
......@@ -29,7 +29,7 @@ function fit(source::QSO{TRecipe}; id=1) where TRecipe <: q1927p654
if source.options[:use_balmer]
tmp = [:qso_cont, :balmer]
(:galaxy in keys(model)) && push!(tmp, :galaxy)
add!(model, :Continuum => Reducer(sum, tmp),
add!(model, :Continuum => reducer_sum(tmp),
:balmer => QSFit.balmercont(0.1, 0.5))
c = model[:balmer]
c.norm.val = 0.1
......@@ -39,11 +39,9 @@ function fit(source::QSO{TRecipe}; id=1) where TRecipe <: q1927p654
c.ratio.fixed = false
c.ratio.low = 0.1
c.ratio.high = 1
patch!(model) do m
m[:balmer].norm *= m[:qso_cont].norm
end
@patch! model m -> m[:balmer].norm *= m[:qso_cont].norm
end
bestfit = fit!(model, source.data, minimizer=mzer); show(source.log, bestfit)
# QSO continuum renormalization
......@@ -107,13 +105,13 @@ function fit(source::QSO{TRecipe}; id=1) where TRecipe <: q1927p654
end
end
if length(iron_components) > 0
add!(model, :Iron => Reducer(sum, iron_components))
add!(model, :main => Reducer(sum, [:Continuum, :Iron]))
add!(model, :Iron => reducer_sum(iron_components))
add!(model, :main => reducer_sum([:Continuum, :Iron]))
evaluate!(model)
bestfit = fit!(model, source.data, minimizer=mzer); show(source.log, bestfit)
else
add!(model, :Iron => Reducer(() -> [0.], Symbol[]))
add!(model, :main => Reducer(sum, [:Continuum, :Iron]))
add!(model, :Iron => @reducer(() -> [0.]))
add!(model, :main => reducer_sum([:Continuum, :Iron]))
end
(:ironuv in keys(model)) && freeze(model, :ironuv)
(:ironoptbr in keys(model)) && freeze(model, :ironoptbr)
......@@ -127,9 +125,9 @@ function fit(source::QSO{TRecipe}; id=1) where TRecipe <: q1927p654
resid = source.data[id].val - model() # will be used to guess line normalization
add!(model, source.line_comps[id])
for (group, lnames) in QSFit.invert_dictionary(source.line_names[id])
add!(model, group => Reducer(sum, lnames))
add!(model, group => reducer_sum(lnames))
end
add!(model, :main => Reducer(sum, [:Continuum, :Iron, line_groups...]))
add!(model, :main => reducer_sum([:Continuum, :Iron, line_groups...]))
if haskey(model, :MgII_2798)
model[:MgII_2798].voff.low = -1000
......@@ -174,46 +172,97 @@ function fit(source::QSO{TRecipe}; id=1) where TRecipe <: q1927p654
# Patch parameters
if haskey(model, :OIII_4959) &&
haskey(model, :OIII_5007)
model[:OIII_4959].norm.fixed = true
model[:OIII_4959].voff.fixed = true
patch!(model) do m
@patch! model m -> begin
m[:OIII_4959].norm = m[:OIII_5007].norm / 3
m[:OIII_4959].voff = m[:OIII_5007].voff
end
end
if haskey(model, :OIII_5007_bw) &&
haskey(model, :OIII_5007)
@patch! model m -> begin
m[:OIII_5007_bw].voff += m[:OIII_5007].voff
m[:OIII_5007_bw].fwhm += m[:OIII_5007].fwhm
end
end
if haskey(model, :OI_6300) &&
haskey(model, :OI_6364)
# model[:OI_6300].norm.fixed = true
model[:OI_6300].voff.fixed = true
@patch! model m -> begin
# m[:OI_6300].norm = m[:OI_6364].norm / 3
m[:OI_6300].voff = m[:OI_6364].voff
end
end
if haskey(model, :NII_6549) &&
haskey(model, :NII_6583)
# model[:NII_6549].norm.fixed = true
model[:NII_6549].voff.fixed = true
patch!(model) do m
@patch! model m -> begin
# m[:NII_6549].norm = m[:NII_6583].norm / 3
m[:NII_6549].voff = m[:NII_6583].voff
end
end
if haskey(model, :OIII_5007_bw) &&
haskey(model, :OIII_5007)
patch!(model) do m
m[:OIII_5007_bw].voff += m[:OIII_5007].voff
m[:OIII_5007_bw].fwhm += m[:OIII_5007].fwhm
if haskey(model, :SII_6716) &&
haskey(model, :SII_6731)
# model[:SII_6716].norm.fixed = true
model[:SII_6716].voff.fixed = true
@patch! model m -> begin
# m[:SII_6716].norm = m[:SII_6731].norm / 1.5
m[:SII_6716].voff = m[:SII_6731].voff
end
end
if haskey(model, :na_Ha) &&
haskey(model, :na_Hb)
model[:na_Hb].voff.fixed = true
@patch! model m -> m[:na_Hb].voff = m[:na_Ha].voff
end
# The following are required to avoid degeneracy with iron
# template
if haskey(model, :Hg) &&
haskey(model, :br_Hb)
model[:Hg].voff.fixed = true
model[:Hg].fwhm.fixed = true
@patch! model m -> begin
m[:Hg].voff = m[:br_Hb].voff
m[:Hg].fwhm = m[:br_Hb].fwhm
end
end
if haskey(model, :br_Hg) &&
haskey(model, :br_Hb)
model[:br_Hg].voff.fixed = true
model[:br_Hg].fwhm.fixed = true
@patch! model m -> begin
m[:br_Hg].voff = m[:br_Hb].voff
m[:br_Hg].fwhm = m[:br_Hb].fwhm
end
end
if haskey(model, :na_Hg) &&
haskey(model, :na_Hb)
model[:na_Hg].voff.fixed = true
model[:na_Hg].fwhm.fixed = true
@patch! model m -> begin
m[:na_Hg].voff = m[:na_Hb].voff
m[:na_Hg].fwhm = m[:na_Hb].fwhm
end
end
# Ensure luminosity at peak of the broad base component is
# smaller than the associated broad component:
if haskey(model, :br_Hb) &&
haskey(model, :bb_Hb)
# Ensure luminosity at peak of the broad base component is
# smaller than the associated broad component:
model[:bb_Hb].norm.high = 1
model[:bb_Hb].norm.val = 0.5
patch!(model) do m
m[:bb_Hb].norm *= m[:br_Hb].norm / m[:br_Hb].fwhm * m[:bb_Hb].fwhm
end
@patch! model m -> m[:bb_Hb].norm *= m[:br_Hb].norm / m[:br_Hb].fwhm * m[:bb_Hb].fwhm
end
if haskey(model, :br_Ha) &&
haskey(model, :bb_Ha)
# Ensure luminosity at peak of the broad base component is
# smaller than the associated broad component:
model[:bb_Ha].norm.high = 1
model[:bb_Ha].norm.val = 0.5
patch!(model) do m
m[:bb_Ha].norm *= m[:br_Ha].norm / m[:br_Ha].fwhm * m[:bb_Ha].fwhm
end
@patch! model m -> m[:bb_Ha].norm *= m[:br_Ha].norm / m[:br_Ha].fwhm * m[:bb_Ha].fwhm
end
# Avoid division by zero
......@@ -227,7 +276,7 @@ function fit(source::QSO{TRecipe}; id=1) where TRecipe <: q1927p654
model[:bb_Hg].voff.fixed = 1
model[:br_Hg].fwhm.fixed = 1
model[:br_Hg].voff.fixed = 1
patch!(model) do m
@patch! model m -> begin
m[:na_Hg].norm = m[:br_Hg].norm * (m[:na_Ha].norm / m[:br_Ha].norm)
m[:na_Hg].voff = m[:na_Ha].voff
m[:na_Hg].fwhm = m[:na_Ha].fwhm
......@@ -248,7 +297,7 @@ function fit(source::QSO{TRecipe}; id=1) where TRecipe <: q1927p654
model[:bb_Hb].voff.fixed = 1
model[:br_Hb].fwhm.fixed = 1
model[:br_Hb].voff.fixed = 1
patch!(model) do m
@patch! model m -> begin
m[:na_Hb].norm = m[:br_Hb].norm * (m[:na_Ha].norm / m[:br_Ha].norm)
m[:na_Hb].voff = m[:na_Ha].voff
m[:na_Hb].fwhm = m[:na_Ha].fwhm
......@@ -274,8 +323,8 @@ function fit(source::QSO{TRecipe}; id=1) where TRecipe <: q1927p654
tmp[Symbol(:unk, j)] = line_component(TRecipe, QSFit.UnkLine(5e3))
tmp[Symbol(:unk, j)].norm_integrated = source.options[:norm_integrated]
end
add!(model, :UnkLines => Reducer(sum, collect(keys(tmp))), tmp)
add!(model, :main => Reducer(sum, [:Continuum, :Iron, line_groups..., :UnkLines]))
add!(model, :UnkLines => reducer_sum(collect(keys(tmp))), tmp)
add!(model, :main => reducer_sum([:Continuum, :Iron, line_groups..., :UnkLines]))
evaluate!(model)
for j in 1:source.options[:n_unk]
freeze(model, Symbol(:unk, j))
......@@ -321,6 +370,7 @@ function fit(source::QSO{TRecipe}; id=1) where TRecipe <: q1927p654
end
evaluate!(model)
# ----------------------------------------------------------------
# Last run with all parameters free
println(source.log, "\nLast run with all parameters free...")
thaw(model, :qso_cont)
......@@ -341,7 +391,7 @@ function fit(source::QSO{TRecipe}; id=1) where TRecipe <: q1927p654
freeze(model, cname)
end
end
bestfit = fit!(model, source.data, minimizer=mzer); show(source.log, bestfit)
bestfit = fit!(model, source.data, minimizer=mzer)
# Disable "unknown" lines whose normalization uncertainty is larger
# than 3 times the normalization
......@@ -362,11 +412,9 @@ function fit(source::QSO{TRecipe}; id=1) where TRecipe <: q1927p654
end
if needs_fitting
println(source.log, "\nRe-run fit...")
bestfit = fit!(model, source.data, minimizer=mzer); show(source.log, bestfit)
bestfit = fit!(model, source.data, minimizer=mzer)
end
println(source.log, "\nFinal model and bestfit:")
show(source.log, model)
println(source.log)
show(source.log, bestfit)
......@@ -374,5 +422,5 @@ function fit(source::QSO{TRecipe}; id=1) where TRecipe <: q1927p654
println(source.log, "\nElapsed time: $elapsed s")
QSFit.close_log(source)
return (model, bestfit)
return reduce(source, model, bestfit)
end