Skip to content
Commits on Source (27)
This diff is collapsed.
......@@ -4,6 +4,7 @@ authors = ["Giorgio Calderone <giorgio.calderone@gmail.com>"]
version = "0.1.0"
[deps]
CMPFit = "a5944310-3432-5d93-8bb2-e3b5eb62a58f"
DataStructures = "864edb3b-99cc-5e75-8d2d-829cb0a9cfe8"
Dierckx = "39dd38d3-220a-591b-8e3c-4c3a8c710a94"
GFit = "ac42e6ba-c0bf-4fcf-827d-deea44b16255"
......
......@@ -77,7 +77,6 @@ chosen_epoch = 17
file = epochs[chosen_epoch]
source = QSO{q1927p654}("1ES 1927+654 ($chosen_epoch)", 0.019422, ebv=0.077);
source.options[:min_spectral_coverage][:OIII_5007] = 0.5
source.options[:norm_integrated] = false
# source.options[:host_template] = "/home/gcalderone/Mbi1.30Zm1.49T00.0700_iTp0.00_baseFe_linear_FWHM_2.51"
add_spec!(source, Spectrum(Val(:ASCII), file, columns=[1,2]));
source.data[1].val .*= 1e17;
......@@ -93,12 +92,12 @@ source.options[:min_spectral_coverage][:OIII_5007] = 0.5
add_spec!(source, Spectrum(Val(:ASCII), "AT2018zf/AT2018zf_optspec_20180716.dat", columns=[1,2]));
add_spec!(source, Spectrum(Val(:ASCII), "AT2018zf/AT2018zf_optspec_20180528.dat", columns=[1,2]));
add_spec!(source, Spectrum(Val(:ASCII), "AT2018zf/AT2018zf_optspec_20180812.dat", columns=[1,2]));
source.options[:norm_integrated] = false
add_spec!(source, Spectrum(Val(:ASCII), "AT2018zf/AT2018zf_optspec_20181113.dat", columns=[1,2]));
for id in 1:length(source.domain)
source.data[id].val .*= 1e17;
source.data[id].unc .= 0.05 .* source.data[id].val;
end
(model, bestfit) = multiepoch_fit(source);
(model, bestfit) = multi_fit(source);
viewer(model, source.data, bestfit, selcomps=[:qso_cont, :galaxy, :balmer])
```
......@@ -107,6 +106,6 @@ viewer(model, source.data, bestfit, selcomps=[:qso_cont, :galaxy, :balmer])
The default QSFit analysis recipes are implemented here:
- [single epoch](https://github.com/gcalderone/QSFit.jl/blob/master/src/DefaultRecipe.jl)
- [multi epoch](https://github.com/gcalderone/QSFit.jl/blob/master/src/DefaultRecipe_multiepoch.jl)
- [multi epoch](https://github.com/gcalderone/QSFit.jl/blob/master/src/DefaultRecipe_multi.jl)
This package implements a custom recipe named `q1927p654` whose code is available in `src/CL_1ES_1927p654.jl`.
using Pkg
Pkg.activate(".")
Z = 0.019422
EBV = 0.077
using Revise, Statistics, Serialization, Dierckx, DataFrames
using QSFit, GFit, Gnuplot, GFitViewer, MyAstroUtils
using CL_1ES_1927p654
using Dates
epoch_filenames = Vector{String}()
for (root, dirs, files) in walkdir("AT2018zf")
for file in files
if file[end-3:end] == ".dat"
push!(epoch_filenames, root * "/" * file)
end
end
end
function read_spec(epoch_id; kw...)
if epoch_id == "B03"
file = "boller2003.txt"
else
file = epoch_filenames[epoch_id]
end
println(file)
spec = Spectrum(Val(:ASCII), file, columns=[1,2]; kw...);
if isa(epoch_id, Number)
spec.flux ./= all_scale[epoch_id]
end
spec.err .= 0.05 .* spec.flux;
return spec
end
source = QSO{q1927p654}("1ES 1927+654 (Boller03)", Z, ebv=EBV);
source.options[:min_spectral_coverage][:OIII_5007] = 0.35
# source.options[:host_template] = "/home/gcalderone/Mbi1.30Zm1.49T00.0700_iTp0.00_baseFe_linear_FWHM_2.51"
#=
Resolution: from Sect 3 it is 6 / 5007 * 3e5 ~ 360 km/s
however with this value the [OIII] FWHM is 100 km/s (limit)
I tried 180, 200 and 250, and the [OIII] luminosity do not change
=#
spec = read_spec("B03", resolution=200.)
add_spec!(source, spec);
(model, bestfit) = fit(source);
viewer(model, source, bestfit, showcomps=[:qso_cont, :galaxy, :balmer])
println( bestfit[:OIII_5007].norm.val )
# 0.00029085925218136
println( bestfit[:OIII_5007].norm.val / bestfit[:galaxy].norm.val )
# 59.406476855719376
all_scale = fill(1., length(epoch_filenames))
all_fwhm = fill(NaN,length(epoch_filenames))
for id in 1:length(epoch_filenames)
spec = read_spec(id)
all_scale[id] *= median(spec.flux) / 25000
try
source = QSO{q1927p654}("1ES 1927+654 ($id)", Z, ebv=EBV);
source.options[:min_spectral_coverage][:OIII_5007] = 0.5
# source.options[:host_template] = "/home/gcalderone/Mbi1.30Zm1.49T00.0700_iTp0.00_baseFe_linear_FWHM_2.51"
spec = read_spec(id)
add_spec!(source, spec);
(model, bestfit) = fit(source);
viewer(model, source, bestfit, showcomps=[:qso_cont, :galaxy, :balmer],
filename="output/results_$(id).html")
# Update all_scale and store FWHM
@info bestfit[:OIII_5007].norm.val
all_scale[id] *= bestfit[:OIII_5007].norm.val
all_fwhm[ id] = bestfit[:OIII_5007].fwhm.val
catch
end
end
all_scale[.!isfinite.(all_fwhm)] .= NaN
serialize("output/scale_fwhm.dat", (all_scale, all_fwhm))
@gp :prenorm "set key bottom right" "set grid" xlab="Epoch" ylab="Value" :-
@gp :- :prenorm all_scale./1e-18 "w lp t '[OIII] norm.'" :-
@gp :- :prenorm all_fwhm./1000 "w lp t '[OIII] FWHM (x1000 km/s)"
save(:prenorm, term="png size 800,600", output="output/oiii_norm_fwhm.png")
w = 10 .^(2:0.01:log10(400));
r = sqrt.(400^2 .- w.^2) ./ 2.355;
@gp w r "w l t '400 km/s'"
w = 10 .^(2:0.01:log10(900));
r = sqrt.(900^2 .- w.^2) ./ 2.355;
@gp :- w r "w l t '900 km/s'"
@gp :- "set grid" xlab="Actual FWHM" ylab="Instr. resolution"
# Ensure no sample is negative
@gp :allepochs "set grid" xr=[3e3,1e4] cbr=[1,29] :-
@gp :- :allepochs cblabel="Epoch" xlab="Rest frame wavelength[A]" ylab="Flux density [arb.units]"
for id in 1:length(epoch_filenames)
s = read_spec(id)
x = s.λ;
y = s.flux;
y .-= median(y)
@gp :- :allepochs x./(1+Z) y "u 1:(\$2+$id):($id) w l notit lc pal" :-
end
@gp :- :allepochs yr=[1, 29]
save(:allepochs, term="png size 800,600", output="output/all_epochs.png")
@gp :- :allepochs xr=[4900, 5100]
save(:allepochs, term="png size 800,2000", output="output/all_epochs_zoom.png")
dict_chosen_epochs = Dict(
:vecchi => [17, 10, 21, 23],
:lowres => [8,9,10,11,12,14,15,16,18,21,22,23,24,25],
:highres => [5, 7, 13, 17, 20],
:all => collect(5:26))
#=
TODO:
- neglect 9 for limited spectral coverage (it lacks the blue part)?
- gli altri strani sono 3 e 16
=#
# Can't use resolution smaller than 150 km / s otherwise some line
# will be neglected because of spectral coverage
resolution = Dict(
:lowres => 350.,
:highres => 150.,
:all => NaN)
for job in [:highres, :lowres]
chosen_epochs = dict_chosen_epochs[job]
source = QSO{q1927p654}("1ES 1927+654", Z, ebv=EBV);
source.options[:min_spectral_coverage][:OIII_5007] = 0.5
@gp :zoom "set grid" :-
for id in 1:length(chosen_epochs)
spec = read_spec(chosen_epochs[id], resolution=get(resolution, job, NaN))
add_spec!(source, spec);
@gp :- :zoom xr=[4750,5150] spec.λ ./ (1 + source.z) spec.flux "w l t '$(chosen_epochs[id])'"
end
(model, bestfit) = multi_fit(source);
viewer(model, source, bestfit, showcomps=[:qso_cont, :galaxy, :balmer],
filename="output/results_$(job).html")
viewer(model, source, bestfit, showcomps=[:qso_cont, :galaxy, :balmer],
filename="output/results_$(job)_rebin4.html", rebin=4)
models = Dict()
for id in 1:length(chosen_epochs)
models[(id, :x)] = domain(model[id])[:]
models[(id, :y)] = model[id]()
models[(id, :l5100)] = 5100 .* Spline1D(domain(model[id])[:], model[id]())(5100.)
for cname in [:qso_cont, :OIII_5007,
:br_Ha, :na_Ha, :bb_Ha,
:br_Hb, :na_Hb, :bb_Hb]
models[(id, cname)] = model[id](cname)
end
end
serialize("output/results_$(job).dat", (models, bestfit))
end
function cont_at(bestfit, λ)
A1 = bestfit[:qso_cont].norm.val - bestfit[:qso_cont].norm.unc
A = bestfit[:qso_cont].norm.val
A2 = bestfit[:qso_cont].norm.val + bestfit[:qso_cont].norm.unc
if A1 > A2
A1, A2 = A2, A1
end
@assert A1 <= A2
B1 = (λ / bestfit[:qso_cont].x0.val)^(bestfit[:qso_cont].alpha.val - bestfit[:qso_cont].alpha.unc)
B = (λ / bestfit[:qso_cont].x0.val)^(bestfit[:qso_cont].alpha.val)
B2 = (λ / bestfit[:qso_cont].x0.val)^(bestfit[:qso_cont].alpha.val + bestfit[:qso_cont].alpha.unc)
if B1 > B2
B1, B2 = B2, B1
end
@assert B1 <= B2
dd = extrema([A1*B1, A1*B2, A2*B1, A2*B2])
return λ * A * B, λ * (dd[2]-dd[1])
end
tabellone = DataFrame(epoch=Int[], date=String[], galaxy=Float64[],
contslope=Float64[], cont5100=Float64[], l5100=Float64[],
bb_Ha_norm=Float64[], bb_Ha_fwhm=Float64[], bb_Ha_voff=Float64[],
br_Ha_norm=Float64[], br_Ha_fwhm=Float64[], br_Ha_voff=Float64[],
na_Ha_norm=Float64[], na_Ha_fwhm=Float64[], na_Ha_voff=Float64[],
bb_Hb_norm=Float64[], bb_Hb_fwhm=Float64[], bb_Hb_voff=Float64[],
br_Hb_norm=Float64[], br_Hb_fwhm=Float64[], br_Hb_voff=Float64[],
na_Hb_norm=Float64[], na_Hb_fwhm=Float64[], na_Hb_voff=Float64[],
oiii_fwhm=Float64[], oiii_voff=Float64[])
for id in 1:length(bestfit.preds)
push!(tabellone,
(id, epoch_filenames[id][27:end-4],
bestfit[id][:galaxy].norm.val,
bestfit[id][:qso_cont].alpha.val, cont_at(bestfit[id], 4000)[1], 5100 * models[(id,:l5100)],
bestfit[id][:bb_Ha].norm.patched, bestfit[id][:bb_Ha].fwhm.patched, bestfit[id][:bb_Ha].voff.patched,
bestfit[id][:br_Ha].norm.patched, bestfit[id][:br_Ha].fwhm.patched, bestfit[id][:br_Ha].voff.patched,
bestfit[id][:na_Ha].norm.patched, bestfit[id][:na_Ha].fwhm.patched, bestfit[id][:na_Ha].voff.patched,
bestfit[id][:bb_Hb].norm.patched, bestfit[id][:bb_Hb].fwhm.patched, bestfit[id][:bb_Hb].voff.patched,
bestfit[id][:br_Hb].norm.patched, bestfit[id][:br_Hb].fwhm.patched, bestfit[id][:br_Hb].voff.patched,
bestfit[id][:na_Hb].norm.patched, bestfit[id][:na_Hb].fwhm.patched, bestfit[id][:na_Hb].voff.patched,
bestfit[id][:OIII_5007].fwhm.patched, bestfit[id][:OIII_5007].voff.patched))
end
dd = Date.(tabellone.date, Ref("yyyymmdd"))
tabellone[!, :day] .= getproperty.(dd - dd[1], :value)
f = FITS("1ES_1927p654_results_$(job).fits", "w")
write(f, tabellone)
close(f)
@gp( [bestfit[id][:qso_cont].alpha.val for id in 1:length(chosen_epochs)],
[bestfit[id][:br_Ha].norm.patched for id in 1:length(chosen_epochs)],
"w l")
@gp(:-,
[bestfit[id][:qso_cont].alpha.val for id in 1:length(chosen_epochs)],
[bestfit[id][:br_Hb].norm.patched for id in 1:length(chosen_epochs)],
"w l")
@gp( [bestfit[id][:qso_cont].alpha.val for id in 1:length(chosen_epochs)],
[cont_at(bestfit[id], 5100)[1] for id in 1:length(chosen_epochs)],
"w l")
@gp( [cont_at(bestfit[id], 5100)[1] for id in 1:length(chosen_epochs)],
[bestfit[id][:br_Ha].norm.patched for id in 1:length(chosen_epochs)],
"w l")
@gp( [models[(id, :l5100)] for id in 1:length(chosen_epochs)],
[bestfit[id][:br_Ha].norm.patched for id in 1:length(chosen_epochs)],
"w l")
@gp tabellone.day [cont_at(bestfit[id], 5100)[1] for id in 1:length(chosen_epochs)] "w l" ylog=true
@gp :- tabellone.day [bestfit[id][:br_Ha].norm.patched for id in 1:length(chosen_epochs)] .* 1e2 "w l"
@gp :- tabellone.day [bestfit[id][:br_Hb].norm.patched for id in 1:length(chosen_epochs)] .* 1e2 "w l"
@gp :- tabellone.day [models[(id, :l5100)] for id in 1:length(chosen_epochs)] "w l"
@gp tabellone.day -3 .+ 1e-3 .*[cont_at(bestfit[id], 5100)[1] for id in 1:length(chosen_epochs)] "w lp"
@gp :- tabellone.day [bestfit[id][:qso_cont].alpha.val for id in 1:length(chosen_epochs)] "w lp"
@gp "set grid"
for id in 1:length(chosen_epochs)
#@gp :- models[(id, :x)] models[(id, :y)] "w l t '$id' lw 3"
@gp :- models[(id, :x)] models[(id, :qso_cont)] "w l t '$id' lw 2"
end
save(:prenorm, term="png size 800,600", output="output/evolution.png")
@gp "set grid"
for id in 1:length(chosen_epochs)
println(bestfit[id][:OIII_5007])
x = models[(id, :x)]
y = models[(id, :y)]
y0 = y .- models[(id, :OIII_5007)]
#@gp :- x y0 .- Spline1D(x, y0)(5007.) "w l t '$id'"
@gp :- x y .- Spline1D(x, y0)(5007.) "w l t '$id'"
end
save(:prenorm, term="png size 800,600", output="output/evolution_oiii_norm.png")
......@@ -2,7 +2,7 @@ module CL_1ES_1927p654
using QSFit, DataStructures, GFit, Statistics, Dierckx
import QSFit: AbstractSpectralLine, DefaultRecipe, CombinedLine, NarrowLine, BroadLine, BroadBaseLine, ComboBroadLine,
default_options, qso_cont_component, line_component, line_group_name, known_spectral_lines, fit, multiepoch_fit, spectral_coverage
default_options, qso_cont_component, line_component, line_group_name, known_spectral_lines, fit, multi_fit, spectral_coverage
abstract type q1927p654 <: DefaultRecipe end
......@@ -59,11 +59,11 @@ function known_spectral_lines(::Type{T}) where T <: q1927p654
list[:NeIII_3869 ] = NarrowLine( 3869.81 )
#list[:Hd ] = BroadLine( 4102.89 )
list[:Hg ] = CombinedLine( 4341.68 )
list[:Hg_base ] = BroadBaseLine( 4341.68 )
#list[:OIII_4363 ] = NarrowLine( 4363.00 ) # TODO: Check wavelength is correct
list[:bb_Hg ] = BroadBaseLine( 4341.68 )
list[:OIII_4363 ] = NarrowLine( 4363.00 ) # TODO: Check wavelength is correct
list[:HeII ] = BroadLine( 4686. )
list[:Hb ] = CombinedLine( 4862.68 )
list[:Hb_base ] = BroadBaseLine( 4862.68 )
list[:bb_Hb ] = BroadBaseLine( 4862.68 )
list[:OIII_4959 ] = NarrowLine( 4960.295)
list[:OIII_5007 ] = NarrowLine( 5008.240) # AsymmNarrowLine
#list[:OIII_5007_bw] = NarrowLine( 5008.240)
......@@ -72,7 +72,7 @@ function known_spectral_lines(::Type{T}) where T <: q1927p654
list[:OI_6364 ] = NarrowLine( 6364.00 ) # TODO: Check wavelength is correct
list[:NII_6549 ] = NarrowLine( 6549.86 )
list[:Ha ] = CombinedLine( 6564.61 )
list[:Ha_base ] = BroadBaseLine( 6564.61 )
list[:bb_Ha ] = BroadBaseLine( 6564.61 )
list[:NII_6583 ] = NarrowLine( 6585.27 )
list[:SII_6716 ] = NarrowLine( 6718.29 )
list[:SII_6731 ] = NarrowLine( 6732.67 )
......
calibsum(calib, args...) = calib .* (.+(args...))
function multiepoch_fit(source::QSO{TRecipe}; ref_id=1) where TRecipe <: q1927p654
function multi_fit(source::QSO{TRecipe}; ref_id=1) where TRecipe <: q1927p654
Nspec = length(source.domain)
elapsed = time()
mzer = GFit.cmpfit()
mzer.config.ftol = mzer.config.gtol = mzer.config.xtol = 1.e-6
# Arrays containing best fit values to be constrained across epochs
galaxy_best = Vector{Float64}()
galaxy_unc = Vector{Float64}()
OIII_best = Vector{Float64}()
OIII_unc = Vector{Float64}()
# Initialize components and guess initial values
println(source.log, "\nFit continuum components...")
preds = Vector{Prediction}()
......@@ -37,10 +28,17 @@ function multiepoch_fit(source::QSO{TRecipe}; ref_id=1) where TRecipe <: q1927p6
λ = source.domain[id][:]
# Host galaxy template
if source.options[:use_host_template]
if source.options[:use_host_template] &&
(minimum(λ) .< 5500 .< maximum(λ))
add!(model[id], :Continuum => Reducer(sum, [:qso_cont, :galaxy]),
:galaxy => QSFit.hostgalaxy(source.options[:host_template]))
model[id][:galaxy].norm.val = Spline1D(λ, source.data[id].val, k=1, bc="error")(5500.)
if id != ref_id
model[id][:galaxy].norm.fixed = true
patch!(model) do m
m[id][:galaxy].norm = m[ref_id][:galaxy].norm
end
end
end
# Balmer continuum and pseudo-continuum
......@@ -55,29 +53,26 @@ function multiepoch_fit(source::QSO{TRecipe}; ref_id=1) where TRecipe <: q1927p6
c.norm.high = 0.5
c.ratio.val = 0.5
c.ratio.fixed = false
c.ratio.low = 0.3
c.ratio.low = 0.1
c.ratio.high = 1
patch!(model) do m
m[id][:balmer].norm *= m[id][:qso_cont].norm
end
end
end
for id in 1:Nspec
bestfit = GFit.fit!(model, only_id=id, source.data, minimizer=mzer); show(source.log, bestfit)
push!(galaxy_best, bestfit[id][:galaxy].norm.val)
push!(galaxy_unc , bestfit[id][:galaxy].norm.unc)
end
bestfit = fit!(model, source.data, minimizer=mzer); show(source.log, bestfit)
# QSO continuum renormalization
for id in 1:Nspec
freeze(model[id], :qso_cont)
c = model[id][:qso_cont]
initialnorm = c.norm.val
if c.norm.val > 0
println(source.log, "$id: Cont. norm. (before): ", c.norm.val)
while true
residuals = (model[id]() - source.data[id].val) ./ source.data[id].unc
(count(residuals .< 0) / length(residuals) > 0.9) && break
(c.norm.val < initialnorm / 5) && break # give up
c.norm.val *= 0.99
evaluate!(model)
end
......@@ -105,7 +100,7 @@ function multiepoch_fit(source::QSO{TRecipe}; ref_id=1) where TRecipe <: q1927p6
threshold = get(source.options[:min_spectral_coverage], :ironuv, source.options[:min_spectral_coverage][:default])
if coverage >= threshold
add!(model[id], :ironuv => comp)
model[:ironuv].norm.val = 0.5
model[id][:ironuv].norm.val = 0.5
push!(iron_components, :ironuv)
else
println(source.log, "Ignoring ironuv component on prediction $id (threshold: $threshold)")
......@@ -136,7 +131,7 @@ function multiepoch_fit(source::QSO{TRecipe}; ref_id=1) where TRecipe <: q1927p6
add!(model[id], :Iron => Reducer(sum, iron_components))
add!(model[id], :main => Reducer(sum, [:Continuum, :Iron]))
evaluate!(model)
bestfit = GFit.fit!(model, only_id=id, source.data, minimizer=mzer); show(source.log, bestfit)
bestfit = fit!(model, only_id=id, source.data, minimizer=mzer); show(source.log, bestfit)
else
add!(model[id], :Iron => Reducer(() -> [0.], Symbol[]))
add!(model[id], :main => Reducer(sum, [:Continuum, :Iron]))
......@@ -223,26 +218,47 @@ function multiepoch_fit(source::QSO{TRecipe}; ref_id=1) where TRecipe <: q1927p6
end
end
patch!(model) do m
m[id][:Ha_base].fwhm += m[id][:br_Ha].fwhm
if haskey(model[id], :br_Hb) &&
haskey(model[id], :bb_Hb)
# Ensure luminosity at peak of the broad base component is
# smaller than the associated broad component:
model[id][:bb_Hb].norm.high = 1
model[id][:bb_Hb].norm.val = 0.5
patch!(model) do m
m[id][:bb_Hb].norm *= m[id][:br_Hb].norm / m[id][:br_Hb].fwhm * m[id][:bb_Hb].fwhm
end
end
if haskey(model[id], :br_Ha) &&
haskey(model[id], :bb_Ha)
# Ensure luminosity at peak of the broad base component is
# smaller than the associated broad component:
model[id][:bb_Ha].norm.high = 1
model[id][:bb_Ha].norm.val = 0.5
patch!(model) do m
m[id][:bb_Ha].norm *= m[id][:br_Ha].norm / m[id][:br_Ha].fwhm * m[id][:bb_Ha].fwhm
end
end
model[id][:na_Hg].norm.fixed = 1
model[id][:na_Hg].fwhm.fixed = 1
model[id][:na_Hg].voff.fixed = 1
model[id][:Hg_base].norm.fixed = 1
model[id][:Hg_base].fwhm.fixed = 1
model[id][:Hg_base].voff.fixed = 1
model[id][:br_Hg].fwhm.fixed = 1
model[id][:br_Hg].voff.fixed = 1
# Avoid division by zero
model[id][:br_Ha].norm.low = 1.e-10
model[id][:na_Hg].norm.fixed = 1
model[id][:na_Hg].fwhm.fixed = 1
model[id][:na_Hg].voff.fixed = 1
model[id][:bb_Hg].norm.fixed = 1
model[id][:bb_Hg].fwhm.fixed = 1
model[id][:bb_Hg].voff.fixed = 1
model[id][:br_Hg].fwhm.fixed = 1
model[id][:br_Hg].voff.fixed = 1
patch!(model) do m
m[id][:na_Hg].norm = m[id][:br_Hg].norm * (m[id][:na_Ha].norm / m[id][:br_Ha].norm)
m[id][:na_Hg].norm = m[id][:br_Hg].norm * (m[id][:na_Ha].norm / m[id][:br_Ha].norm)
m[id][:na_Hg].voff = m[id][:na_Ha].voff
m[id][:na_Hg].fwhm = m[id][:na_Ha].fwhm
m[id][:Hg_base].norm = m[id][:br_Hg].norm * (m[id][:Ha_base].norm / m[id][:br_Ha].norm)
m[id][:Hg_base].voff = m[id][:Ha_base].voff
m[id][:Hg_base].fwhm = m[id][:Ha_base].fwhm
m[id][:bb_Hg].norm = m[id][:br_Hg].norm * (m[id][:bb_Ha].norm / m[id][:br_Ha].norm)
m[id][:bb_Hg].voff = m[id][:bb_Ha].voff
m[id][:bb_Hg].fwhm = m[id][:bb_Ha].fwhm
m[id][:br_Hg].voff = m[id][:br_Ha].voff
m[id][:br_Hg].fwhm = m[id][:br_Ha].fwhm
......@@ -251,27 +267,28 @@ function multiepoch_fit(source::QSO{TRecipe}; ref_id=1) where TRecipe <: q1927p6
model[id][:na_Hb].norm.fixed = 1
model[id][:na_Hb].fwhm.fixed = 1
model[id][:na_Hb].voff.fixed = 1
model[id][:Hb_base].norm.fixed = 1
model[id][:Hb_base].fwhm.fixed = 1
model[id][:Hb_base].voff.fixed = 1
model[id][:bb_Hb].norm.fixed = 1
model[id][:bb_Hb].fwhm.fixed = 1
model[id][:bb_Hb].voff.fixed = 1
model[id][:br_Hb].fwhm.fixed = 1
model[id][:br_Hb].voff.fixed = 1
patch!(model) do m
m[id][:na_Hb].norm = m[id][:br_Hb].norm * (m[id][:na_Ha].norm / m[id][:br_Ha].norm)
m[id][:na_Hb].voff = m[id][:na_Ha].voff
m[id][:na_Hb].fwhm = m[id][:na_Ha].fwhm
m[id][:na_Hb].norm = m[id][:br_Hb].norm * (m[id][:na_Ha].norm / m[id][:br_Ha].norm)
m[id][:na_Hb].voff = m[id][:na_Ha].voff
m[id][:na_Hb].fwhm = m[id][:na_Ha].fwhm
m[id][:Hb_base].norm = m[id][:br_Hb].norm * (m[id][:Ha_base].norm / m[id][:br_Ha].norm)
m[id][:Hb_base].voff = m[id][:Ha_base].voff
m[id][:Hb_base].fwhm = m[id][:Ha_base].fwhm
m[id][:bb_Hb].norm = m[id][:br_Hb].norm * (m[id][:bb_Ha].norm / m[id][:br_Ha].norm)
m[id][:bb_Hb].voff = m[id][:bb_Ha].voff
m[id][:bb_Hb].fwhm = m[id][:bb_Ha].fwhm
m[id][:br_Hb].voff = m[id][:br_Ha].voff
m[id][:br_Hb].fwhm = m[id][:br_Ha].fwhm
m[id][:br_Hb].voff = m[id][:br_Ha].voff
m[id][:br_Hb].fwhm = m[id][:br_Ha].fwhm
end
bestfit = GFit.fit!(model, only_id=id, source.data, minimizer=mzer); show(source.log, bestfit)
push!(OIII_best, bestfit[id][:OIII_5007].norm.val)
push!(OIII_unc , bestfit[id][:OIII_5007].norm.unc)
model[id][:OIII_5007].norm.fixed = 1
model[id][:OIII_5007].norm.val = 1.
bestfit = fit!(model, only_id=id, source.data, minimizer=mzer); show(source.log, bestfit)
for lname in line_names[id]
freeze(model[id], lname)
......@@ -282,9 +299,9 @@ function multiepoch_fit(source::QSO{TRecipe}; ref_id=1) where TRecipe <: q1927p6
println(source.log, "\nFit unknown emission lines...")
if source.options[:n_unk] > 0
for id in 1:Nspec
tmp = OrderedDict{Symbol, AbstractComponent}()
tmp = OrderedDict{Symbol, GFit.AbstractComponent}()
for j in 1:source.options[:n_unk]
tmp[Symbol(:unk, j)] = line_component(TRecipe, UnkLine(5e3))
tmp[Symbol(:unk, j)] = line_component(TRecipe, QSFit.UnkLine(5e3))
tmp[Symbol(:unk, j)].norm_integrated = source.options[:norm_integrated]
end
add!(model[id], :UnkLines => Reducer(sum, collect(keys(tmp))), tmp)
......@@ -298,7 +315,7 @@ function multiepoch_fit(source::QSO{TRecipe}; ref_id=1) where TRecipe <: q1927p6
# Here we need a :UnkLines reducer, even when n_unk is 0
for id in 1:Nspec
add!(model[id], :UnkLines => Reducer(() -> [0.], Symbol[]))
add!(model[id], :main => Reducer(sum, [:Continuum, :Iron, line_groups[id]..., :UnkLines]))
add!(model[id], :main => Reducer(sum, [:Continuum, :Iron, line_groups[id]...]))
end
end
evaluate!(model)
......@@ -337,56 +354,14 @@ function multiepoch_fit(source::QSO{TRecipe}; ref_id=1) where TRecipe <: q1927p6
model[id][cname].center.low = λ[iadd] - λ[iadd]/10. # allow to shift 10%
model[id][cname].center.high = λ[iadd] + λ[iadd]/10.
thaw(model, cname)
bestfit = GFit.fit!(model, only_id=id, source.data, minimizer=mzer); show(source.log, bestfit)
freeze(model, cname)
thaw(model[id], cname)
bestfit = fit!(model, only_id=id, source.data, minimizer=mzer); show(source.log, bestfit)
freeze(model[id], cname)
end
end
evaluate!(model)
# ----------------------------------------------------------------
# Constrain component normalization across epochs. Note:
# reference spectrum must have reliable estimation of all common
# components
rval = [galaxy_best[ref_id], OIII_best[ref_id]]
runc = [galaxy_unc[ ref_id], OIII_unc[ ref_id]]
@assert count((rval .!= 0) .& (runc .!= 0)) == 2
# Estimate calibration in all epochs w.r.t. reference epoch
for id in 1:Nspec
if id != ref_id
val = [galaxy_best[id], OIII_best[id]]
unc = [galaxy_unc[ id], OIII_unc[ id]]
j = findall((val .!= 0) .& (unc .!= 0))
R = val[j] ./ rval[j]
R_unc = (unc[j] ./ val[j] .+ runc[j] ./ rval[j]) .* R
ratio = sum(R ./ R_unc) ./ sum(1 ./ R_unc)
for cname in keys(preds[id].cevals)
if :norm in propertynames(model[cname])
model[cname].norm.val /= ratio
end
end
model[id][:galaxy].norm.fixed = true
model[id][:OIII_5007].norm.fixed = true
add!(model[id],
:main => Reducer(calibsum, [:calib, :Continuum, :Iron, line_groups[id]..., :UnkLines]),
:calib => ratio)
end
end
evaluate!(model)
for id in 1:Nspec
if id != ref_id
patch!(model) do m
m[id][ :galaxy].norm = m[ref_id][ :galaxy].norm
m[id][:OIII_5007].norm = m[ref_id][:OIII_5007].norm
end
end
end
evaluate!(model)
# Last run with all parameters free
println(source.log, "\nLast run with all parameters free...")
for id in 1:Nspec
......@@ -408,11 +383,8 @@ function multiepoch_fit(source::QSO{TRecipe}; ref_id=1) where TRecipe <: q1927p6
freeze(model[id], cname)
end
end
if id != ref_id
thaw(model[id], :calib) # parameter is fixed in preds[ref_id]
end
end
bestfit = GFit.fit!(model, source.data, minimizer=mzer); show(source.log, bestfit)
bestfit = fit!(model, source.data, minimizer=mzer); show(source.log, bestfit)
# Disable "unknown" lines whose normalization uncertainty is larger
# than 3 times the normalization
......@@ -435,7 +407,7 @@ function multiepoch_fit(source::QSO{TRecipe}; ref_id=1) where TRecipe <: q1927p6
end
if needs_fitting
println(source.log, "\nRe-run fit...")
bestfit = GFit.fit!(model, source.data, minimizer=mzer); show(source.log, bestfit)
bestfit = fit!(model, source.data, minimizer=mzer); show(source.log, bestfit)
end
println(source.log, "\nFinal model and bestfit:")
......@@ -447,6 +419,5 @@ function multiepoch_fit(source::QSO{TRecipe}; ref_id=1) where TRecipe <: q1927p6
println(source.log, "\nElapsed time: $elapsed s")
QSFit.close_log(source)
QSFit.populate_metadata!(source, model)
return (model, bestfit)
end
......@@ -18,7 +18,8 @@ function fit(source::QSO{TRecipe}; id=1) where TRecipe <: q1927p654
c.norm.val = Spline1D(λ, source.data[id].val, k=1, bc="error")(c.x0.val)
# Host galaxy template
if source.options[:use_host_template]
if source.options[:use_host_template] &&
(minimum(λ) .< 5500 .< maximum(λ))
add!(model, :Continuum => Reducer(sum, [:qso_cont, :galaxy]),
:galaxy => QSFit.hostgalaxy(source.options[:host_template]))
model[:galaxy].norm.val = Spline1D(λ, source.data[id].val, k=1, bc="error")(5500.)
......@@ -36,23 +37,25 @@ function fit(source::QSO{TRecipe}; id=1) where TRecipe <: q1927p654
c.norm.high = 0.5
c.ratio.val = 0.5
c.ratio.fixed = false
c.ratio.low = 0.3
c.ratio.low = 0.1
c.ratio.high = 1
patch!(model) do m
m[:balmer].norm *= m[:qso_cont].norm
end
end
bestfit = GFit.fit!(model, source.data, minimizer=mzer); show(source.log, bestfit)
bestfit = fit!(model, source.data, minimizer=mzer); show(source.log, bestfit)
# QSO continuum renormalization
freeze(model, :qso_cont)
c = model[:qso_cont]
if c.norm.val > 0
println(source.log, "Cont. norm. (before): ", c.norm.val)
initialnorm = c.norm.val
while true
residuals = (model() - source.data[id].val) ./ source.data[id].unc
(count(residuals .< 0) / length(residuals) > 0.9) && break
(c.norm.val < initialnorm / 5) && break # give up
c.norm.val *= 0.99
evaluate!(model)
end
......@@ -107,7 +110,7 @@ function fit(source::QSO{TRecipe}; id=1) where TRecipe <: q1927p654
add!(model, :Iron => Reducer(sum, iron_components))
add!(model, :main => Reducer(sum, [:Continuum, :Iron]))
evaluate!(model)
bestfit = GFit.fit!(model, source.data, minimizer=mzer); show(source.log, bestfit)
bestfit = fit!(model, source.data, minimizer=mzer); show(source.log, bestfit)
else
add!(model, :Iron => Reducer(() -> [0.], Symbol[]))
add!(model, :main => Reducer(sum, [:Continuum, :Iron]))
......@@ -191,53 +194,74 @@ function fit(source::QSO{TRecipe}; id=1) where TRecipe <: q1927p654
end
end
patch!(model) do m
m[:Ha_base].fwhm += m[:br_Ha].fwhm
if haskey(model, :br_Hb) &&
haskey(model, :bb_Hb)
# Ensure luminosity at peak of the broad base component is
# smaller than the associated broad component:
model[:bb_Hb].norm.high = 1
model[:bb_Hb].norm.val = 0.5
patch!(model) do m
m[:bb_Hb].norm *= m[:br_Hb].norm / m[:br_Hb].fwhm * m[:bb_Hb].fwhm
end
end
if haskey(model, :br_Ha) &&
haskey(model, :bb_Ha)
# Ensure luminosity at peak of the broad base component is
# smaller than the associated broad component:
model[:bb_Ha].norm.high = 1
model[:bb_Ha].norm.val = 0.5
patch!(model) do m
m[:bb_Ha].norm *= m[:br_Ha].norm / m[:br_Ha].fwhm * m[:bb_Ha].fwhm
end
end
model[:na_Hg].norm.fixed = 1
model[:na_Hg].fwhm.fixed = 1
model[:na_Hg].voff.fixed = 1
model[:Hg_base].norm.fixed = 1
model[:Hg_base].fwhm.fixed = 1
model[:Hg_base].voff.fixed = 1
model[:br_Hg].fwhm.fixed = 1
model[:br_Hg].voff.fixed = 1
# Avoid division by zero
model[:br_Ha].norm.low = 1.e-10
model[:na_Hg].norm.fixed = 1
model[:na_Hg].fwhm.fixed = 1
model[:na_Hg].voff.fixed = 1
model[:bb_Hg].norm.fixed = 1
model[:bb_Hg].fwhm.fixed = 1
model[:bb_Hg].voff.fixed = 1
model[:br_Hg].fwhm.fixed = 1
model[:br_Hg].voff.fixed = 1
patch!(model) do m
m[:na_Hg].norm = m[:br_Hg].norm * (m[:na_Ha].norm / m[:br_Ha].norm)
m[:na_Hg].norm = m[:br_Hg].norm * (m[:na_Ha].norm / m[:br_Ha].norm)
m[:na_Hg].voff = m[:na_Ha].voff
m[:na_Hg].fwhm = m[:na_Ha].fwhm
m[:Hg_base].norm = m[:br_Hg].norm * (m[:Ha_base].norm / m[:br_Ha].norm)
m[:Hg_base].voff = m[:Ha_base].voff
m[:Hg_base].fwhm = m[:Ha_base].fwhm
m[:bb_Hg].norm = m[:br_Hg].norm * (m[:bb_Ha].norm / m[:br_Ha].norm)
m[:bb_Hg].voff = m[:bb_Ha].voff
m[:bb_Hg].fwhm = m[:bb_Ha].fwhm
m[:br_Hg].voff = m[:br_Ha].voff
m[:br_Hg].fwhm = m[:br_Ha].fwhm
end
model[:na_Hb].norm.fixed = 1
model[:na_Hb].fwhm.fixed = 1
model[:na_Hb].voff.fixed = 1
model[:Hb_base].norm.fixed = 1
model[:Hb_base].fwhm.fixed = 1
model[:Hb_base].voff.fixed = 1
model[:br_Hb].fwhm.fixed = 1
model[:br_Hb].voff.fixed = 1
model[:na_Hb].norm.fixed = 1
model[:na_Hb].fwhm.fixed = 1
model[:na_Hb].voff.fixed = 1
model[:bb_Hb].norm.fixed = 1
model[:bb_Hb].fwhm.fixed = 1
model[:bb_Hb].voff.fixed = 1
model[:br_Hb].fwhm.fixed = 1
model[:br_Hb].voff.fixed = 1
patch!(model) do m
m[:na_Hb].norm = m[:br_Hb].norm * (m[:na_Ha].norm / m[:br_Ha].norm)
m[:na_Hb].voff = m[:na_Ha].voff
m[:na_Hb].fwhm = m[:na_Ha].fwhm
m[:Hb_base].norm = m[:br_Hb].norm * (m[:Ha_base].norm / m[:br_Ha].norm)
m[:Hb_base].voff = m[:Ha_base].voff
m[:Hb_base].fwhm = m[:Ha_base].fwhm
m[:br_Hb].voff = m[:br_Ha].voff
m[:br_Hb].fwhm = m[:br_Ha].fwhm
m[:na_Hb].norm = m[:br_Hb].norm * (m[:na_Ha].norm / m[:br_Ha].norm)
m[:na_Hb].voff = m[:na_Ha].voff
m[:na_Hb].fwhm = m[:na_Ha].fwhm
m[:bb_Hb].norm = m[:br_Hb].norm * (m[:bb_Ha].norm / m[:br_Ha].norm)
m[:bb_Hb].voff = m[:bb_Ha].voff
m[:bb_Hb].fwhm = m[:bb_Ha].fwhm
m[:br_Hb].voff = m[:br_Ha].voff
m[:br_Hb].fwhm = m[:br_Ha].fwhm
end
bestfit = GFit.fit!(model, source.data, minimizer=mzer); show(source.log, bestfit)
bestfit = fit!(model, source.data, minimizer=mzer); show(source.log, bestfit)
for lname in line_names
freeze(model, lname)
end
......@@ -245,9 +269,9 @@ function fit(source::QSO{TRecipe}; id=1) where TRecipe <: q1927p654
# Add unknown lines
println(source.log, "\nFit unknown emission lines...")
if source.options[:n_unk] > 0
tmp = OrderedDict{Symbol, AbstractComponent}()
tmp = OrderedDict{Symbol, GFit.AbstractComponent}()
for j in 1:source.options[:n_unk]
tmp[Symbol(:unk, j)] = line_component(TRecipe, UnkLine(5e3))
tmp[Symbol(:unk, j)] = line_component(TRecipe, QSFit.UnkLine(5e3))
tmp[Symbol(:unk, j)].norm_integrated = source.options[:norm_integrated]
end
add!(model, :UnkLines => Reducer(sum, collect(keys(tmp))), tmp)
......@@ -291,7 +315,7 @@ function fit(source::QSO{TRecipe}; id=1) where TRecipe <: q1927p654
model[cname].center.high = λ[iadd] + λ[iadd]/10.
thaw(model, cname)
bestfit = GFit.fit!(model, source.data, minimizer=mzer); show(source.log, bestfit)
bestfit = fit!(model, source.data, minimizer=mzer); show(source.log, bestfit)
freeze(model, cname)
end
end
......@@ -317,7 +341,7 @@ function fit(source::QSO{TRecipe}; id=1) where TRecipe <: q1927p654
freeze(model, cname)
end
end
bestfit = GFit.fit!(model, source.data, minimizer=mzer); show(source.log, bestfit)
bestfit = fit!(model, source.data, minimizer=mzer); show(source.log, bestfit)
# Disable "unknown" lines whose normalization uncertainty is larger
# than 3 times the normalization
......@@ -338,7 +362,7 @@ function fit(source::QSO{TRecipe}; id=1) where TRecipe <: q1927p654
end
if needs_fitting
println(source.log, "\nRe-run fit...")
bestfit = GFit.fit!(model, source.data, minimizer=mzer); show(source.log, bestfit)
bestfit = fit!(model, source.data, minimizer=mzer); show(source.log, bestfit)
end
println(source.log, "\nFinal model and bestfit:")
......@@ -350,6 +374,5 @@ function fit(source::QSO{TRecipe}; id=1) where TRecipe <: q1927p654
println(source.log, "\nElapsed time: $elapsed s")
QSFit.close_log(source)
QSFit.populate_metadata!(source, model)
return (model, bestfit)
end