Skip to content
Commits on Source (6)
This diff is collapsed.
https://ui.adsabs.harvard.edu/abs/2018MNRAS.480.3898N/abstract
https://ui.adsabs.harvard.edu/abs/2018MNRAS.480.4468R/abstract
# CL_1ES_1927p654
## Pre-requisites
......
function read_epochs()
out = DataFrame(filename=String[], date=String[], day=Int[],
telescope=String[], instr=String[],
scale=Float64[], scale_rel_unc=Float64[],
OIII_fwhm=Float64[], OIII_fwhm_unc=Float64[])
for (root, dirs, files) in walkdir("AT2018zf")
for file in files
if file[end-3:end] == ".dat"
filename = root * "/" * file
date = filename[27:end-4]
push!(out, (filename, date, 0, "", "", NaN, NaN, NaN, NaN))
end
end
end
@assert issorted(out.date)
dd = Date.(out.date, Ref("yyyymmdd"))
out[!, :day] .= getproperty.(dd - dd[1], :value)
@assert issorted(out.day)
(d, c) = csvread("tabula-2019-Trakhtenbrot-mod.csv", ',', header_exists=true)
tmp = DataFrame(collect(d), Symbol.(c))
tmp[!, :Date] .= string.(tmp.Date)
for i in 1:nrow(out)
j = findfirst(tmp.Date .== out[i, :date])
if isnothing(j)
@warn "Can't find epoch $(out[i, :date]) in tabula-2019-Trakhtenbrot"
continue
end
out[i, :telescope] = tmp[j, :Telescope]
out[i, :instr] = tmp[j, :Inst]
end
out.day .+= 72 # 2018-03-06 corresponds to day 72
return out
end
# function read_spec(epoch::DataFrameRow; kw...)
# irow = findfirst(epochs.date .== date)
# @assert !isnothing(irow)
# file = epochs[irow, :filename]
# spec = Spectrum(Val(:ASCII), file, columns=[1,2]; label="$date: $file", kw...);
# spec.flux ./= epochs[irow, :scale]
# spec.err ./= epochs[irow, :scale]
# return spec
# end
function read_spec(epoch::DataFrameRow; kw...)
file = epoch.filename
spec = Spectrum(Val(:ASCII), file, columns=[1,2]; label="$(epoch.date): $file", kw...);
spec.flux ./= epoch.scale
spec.err ./= epoch.scale
return spec
end
function boller03_analysis(opt)
file = opt.path * "/boller.dat"
if !isfile(file)
source = QSO{q1927p654}("1ES 1927+654 (Boller+03)", opt.z, ebv=opt.ebv);
#=
Resolution: from Sect 3 it is 6 / 5007 * 3e5 ~ 360 km/s
however with this value the [OIII] FWHM hits the limit at 100 km/s
I tried 180, 200 and 250, and the [OIII] luminosity do not change
=#
spec = Spectrum(Val(:ASCII), "boller2003.txt", columns=[1,2]; label="Boller03", resolution=200.)
source.options[:min_spectral_coverage][:OIII_5007] = 0.35
add_spec!(source, spec);
res = qsfit(source);
viewer(res, showcomps=[:qso_cont, :galaxy, :balmer],
filename="$(opt.path)/results_boller.html")
JLD2.save_object(file, res)
else
res = JLD2.load_object(file)
end
@assert res.model[:OIII_5007].norm.val == 0.00029042977121756283
@assert res.model[:OIII_5007].norm.val / res.model[:galaxy].norm.val == 60.07373299362222
return res
end
function estimate_single_epoch_scale!(opt, epochs)
file = opt.path * "/" * "SE_scale.dat"
if !isfile(file)
# Estimate first guess calibration factor
for irow in 1:nrow(epochs)
epochs[irow, :scale] = 1.
spec = read_spec(epochs[irow, :])
epochs[irow, :scale] = median(spec.flux)
end
epochs[:, :scale] .= median(epochs.scale) / 25_000
@assert all(epochs[:, :scale] .== 5.7866e-20)
for irow in 1:nrow(epochs)
date = epochs[irow, :date]
source = QSO{q1927p654}("1ES 1927+654 ($(date))", opt.z, ebv=opt.ebv);
source.options[:min_spectral_coverage][:OIII_5007] = 0.35
spec = read_spec(epochs[irow, :])
add_spec!(source, spec);
res = qsfit(source);
viewer(res, showcomps=[:qso_cont, :galaxy, :balmer],
filename="$(opt.path)/results_$(date).html")
# Update scale and OIII_fwhm
epochs[irow, :scale] *= res.model[:OIII_5007].norm.val
epochs[irow, :scale_rel_unc] = res.model[:OIII_5007].norm.unc / res.model[:OIII_5007].norm.val
epochs[irow, :OIII_fwhm] = res.model[:OIII_5007].fwhm.val
epochs[irow, :OIII_fwhm_unc] = res.model[:OIII_5007].fwhm.unc
end
@assert all(epochs[.!isfinite.(epochs.OIII_fwhm), :scale] .== NaN)
JLD2.save_object(file, epochs)
else
empty!(epochs)
append!(epochs, JLD2.load_object(file))
end
end
function multi_epoch_analysis!(opt, _epochs, job; Nloop = 6)
estimate_single_epoch_scale!(opt, _epochs)
epochs = _epochs[opt.subsets[job], :]
out = fill(NaN, nrow(epochs))
for loop in 1:Nloop
file = "$(opt.path)/results_$(job)_$(loop).dat"
if !isfile(file)
source = QSO{q1927p654}("1ES 1927+654", opt.z, ebv=opt.ebv);
source.options[:min_spectral_coverage][:OIII_5007] = 0.5
source.options[:wavelength_range] = [3500, 6900]
@gp :zoom "set grid" :-
for irow in 1:nrow(epochs)
spec = read_spec(epochs[irow, :])
add_spec!(source, spec);
@gp :- :zoom xr=[4750,5150] spec.λ ./ (1 + source.z) spec.flux "w l t '$(epochs[irow, :date])'"
end
res = qsfit_multi(source);
JLD2.save_object(file, res)
viewer(res, showcomps=[:qso_cont, :galaxy, :balmer], filename="$(opt.path)/results_$(job)_$(loop).html")
viewer(res, showcomps=[:qso_cont, :galaxy, :balmer], filename="$(opt.path)/results_$(job)_$(loop)_rebin4.html", rebin=4)
else
@info file
res = JLD2.load_object(file);
println(loop, " ", res.fitres.gofstat / res.fitres.dof, " ", res.fitres.elapsed / 3600.)
end
OIII_norm = Float64[]
for i in 1:nrow(epochs)
push!(OIII_norm, res.multi[i][:OIII_5007].norm.val)
epochs[i, :scale] *= ((1 + res.multi[i][:OIII_5007].norm.val) / 2)
end
out = hcat(out, OIII_norm)
f = open("$(opt.path)/scales_$(job)_$(loop).txt", "w")
show(f, epochs)
close(f)
end
out = out[:,2:end]
@gp "set grid" "set key outside top horiz" xlab="Iteration" ylab="[OIII]λ5007 norm. (arb. units)" :-
for i in 1:nrow(epochs)
@gp :- 1:Nloop out[i, :] "w lp t '$(epochs[i, :date])'" :-
end
@gp :- xr=[1,6]
save(term="pngcairo size 1000,700", output="$(opt.path)/calib_$(job).png")
@gp :- yr=[0.96, 1.04]
save(term="pngcairo size 1000,700", output="$(opt.path)/calib_$(job)_zoom.png")
_epochs[opt.subsets[job], :scale] .= epochs.scale
return out
end
function cont_at(bestfit, λ)
A1 = bestfit[:qso_cont].norm.val - bestfit[:qso_cont].norm.unc
A = bestfit[:qso_cont].norm.val
A2 = bestfit[:qso_cont].norm.val + bestfit[:qso_cont].norm.unc
if A1 > A2
A1, A2 = A2, A1
end
@assert A1 <= A2
B1 = (λ / bestfit[:qso_cont].x0.val)^(bestfit[:qso_cont].alpha.val - bestfit[:qso_cont].alpha.unc)
B = (λ / bestfit[:qso_cont].x0.val)^(bestfit[:qso_cont].alpha.val)
B2 = (λ / bestfit[:qso_cont].x0.val)^(bestfit[:qso_cont].alpha.val + bestfit[:qso_cont].alpha.unc)
if B1 > B2
B1, B2 = B2, B1
end
@assert B1 <= B2
dd = extrema([A1*B1, A1*B2, A2*B1, A2*B2])
return λ * A * B, λ * (dd[2]-dd[1])
end
......@@ -8,165 +8,46 @@ using Dates
using TextParse
import StatsBase, JLD2
if readlines(`hostname`)[1] != "giorgio-pc"
Gnuplot.options.term="sixelgd size 800,600"
end
function read_epochs()
out = DataFrame(id=Int[], filename=String[], date=String[], scale=Float64[], OIII_fwhm=Float64[])
for (root, dirs, files) in walkdir("AT2018zf")
for file in files
if file[end-3:end] == ".dat"
filename = root * "/" * file
date = filename[27:end-4]
push!(out, (nrow(out)+1, filename, date, NaN, NaN))
end
end
end
@assert issorted(out.date)
return out
end
function read_spec(id; kw...)
if id == "B03"
file = "boller2003.txt"
else
irow = findfirst(epochs.id .== id)
file = epochs[irow, :filename]
end
println(file)
spec = Spectrum(Val(:ASCII), file, columns=[1,2]; label="$id: $file", kw...);
if isa(id, Number)
spec.flux ./= epochs[irow, :scale]
spec.err ./= epochs[irow, :scale]
end
return spec
end
function analyze_boller03()
file = opt.path * "/boller.dat"
if !isfile(file)
source = QSO{q1927p654}("1ES 1927+654 (Boller03)", opt.z, ebv=opt.ebv);
source.options[:min_spectral_coverage][:OIII_5007] = 0.35
# source.options[:host_template] = "/home/gcalderone/Mbi1.30Zm1.49T00.0700_iTp0.00_baseFe_linear_FWHM_2.51"
#=
Resolution: from Sect 3 it is 6 / 5007 * 3e5 ~ 360 km/s
however with this value the [OIII] FWHM is 100 km/s (limit)
I tried 180, 200 and 250, and the [OIII] luminosity do not change
=#
spec = read_spec("B03", resolution=200.)
add_spec!(source, spec);
res = qsfit(source);
viewer(res, showcomps=[:qso_cont, :galaxy, :balmer],
filename="$(opt.path)/results_boller.html")
JLD2.save_object(file, res)
else
res = JLD2.load_object(file)
end
@assert res.model[:OIII_5007].norm.val == 0.00029042977121756283
@assert res.model[:OIII_5007].norm.val / res.model[:galaxy].norm.val == 60.07373299362222
return res
end
function estimate_SE_scale!()
file = opt.path * "/" * "SE_scale.dat"
if !isfile(file)
for irow in 1:nrow(epochs)
@info irow
id = epochs[irow, :id]
epochs[irow, :scale] = 1.
spec = read_spec(id)
epochs[irow, :scale] = median(spec.flux) / 25000
source = QSO{q1927p654}("1ES 1927+654 ($id)", opt.z, ebv=opt.ebv);
source.options[:min_spectral_coverage][:OIII_5007] = 0.5
# source.options[:host_template] = "/home/gcalderone/Mbi1.30Zm1.49T00.0700_iTp0.00_baseFe_linear_FWHM_2.51"
spec = read_spec(id)
add_spec!(source, spec);
res = qsfit(source);
viewer(res, showcomps=[:qso_cont, :galaxy, :balmer],
filename="$(opt.path)/results_$(id).html")
# Update all_scale and store FWHM
@info res.model[:OIII_5007].norm.val
epochs[irow, :scale] *= res.model[:OIII_5007].norm.val
epochs[irow, :OIII_fwhm] = res.model[:OIII_5007].fwhm.val
end
@assert all(epochs[.!isfinite.(epochs.OIII_fwhm), :scale] .== NaN)
JLD2.save_object(file, epochs)
else
empty!(epochs)
append!(epochs, JLD2.load_object(file))
end
end
function analyze_job(job; Nloop = 6)
out = fill(NaN, length(epoch_ids[job]))
estimate_SE_scale!()
for loop in 1:Nloop
file = "$(opt.path)/results_$(job)_$(loop).dat"
if !isfile(file)
source = QSO{q1927p654}("1ES 1927+654", opt.z, ebv=opt.ebv);
source.options[:min_spectral_coverage][:OIII_5007] = 0.5
source.options[:wavelength_range] = [3500, 6900]
@gp :zoom "set grid" :-
for id in epoch_ids[job]
spec = read_spec(id, resolution=get(resolution, job, NaN))
add_spec!(source, spec);
@gp :- :zoom xr=[4750,5150] spec.λ ./ (1 + source.z) spec.flux "w l t '$(id)'"
end
res = qsfit_multi(source);
JLD2.save_object(file, res)
viewer(res, showcomps=[:qso_cont, :galaxy, :balmer], filename="$(opt.path)/results_$(job)_$(loop).html")
viewer(res, showcomps=[:qso_cont, :galaxy, :balmer], filename="$(opt.path)/results_$(job)_$(loop)_rebin4.html", rebin=4)
else
@info file
res = JLD2.load_object(file);
println(loop, " ", res.fitres.gofstat / res.fitres.dof, " ", res.fitres.elapsed / 3600.)
end
OIII_norm = Float64[]
for i in 1:length(epoch_ids[job])
id = epoch_ids[job][i]
push!(OIII_norm, res.multi[i][:OIII_5007].norm.val)
epochs[epochs.id .== id, :scale] *= ((1 + res.multi[i][:OIII_5007].norm.val) / 2)
end
out = hcat(out, OIII_norm)
end
out = out[:,2:end]
@gp "set grid" :-
for i in 1:length(epoch_ids[job])
id = epoch_ids[job][i]
@gp :- 1:Nloop out[i, :] "w lp t '$(id)'" :-
end
@gp
return out
end
include("funcs.jl")
term_hw = "pngcairo size 1000,700 fontscale 1.5"
term_fw = "pngcairo size 1200,600 fontscale 1.3"
const opt = (
z = 0.01942,
ebv = 0.077,
path = "output")
z = 0.01942,
ebv = 0.077,
path = "output",
subsets = Dict())
try; mkdir(opt.path); catch; end
const epochs = read_epochs()
analyze_boller03()
estimate_SE_scale!()
mkpath(opt.path)
@gp :prenorm "set key bottom right" "set grid" xlab="Epoch" ylab="Value" :-
@gp :- :prenorm epochs.scale./1e-18 "w lp t '[OIII] norm.'" :-
@gp :- :prenorm epochs.OIII_fwhm./1000 "w lp t '[OIII] FWHM (x1000 km/s)"
save(:prenorm, term="png size 800,600", output="$(opt.path)/oiii_norm_fwhm.png")
const epochs = read_epochs()
boller03_analysis(opt)
estimate_single_epoch_scale!(opt, epochs)
f = open("$(opt.path)/scales_SE.txt", "w")
show(f, epochs)
close(f)
@gp "set key top right" "set grid" xlab="Epoch" ylab="Value" :-
@gp :- 1:nrow(epochs) epochs.scale./1e-18 epochs.scale./1e-18 "w lp t '[OIII]λ5007 norm. (arbitrary scale)' lc rgb 'red'" :-
@gp :- 1:nrow(epochs) epochs.scale./1e-18 epochs.scale./1e-18 .* epochs.scale_rel_unc "w yerr notit pt 0 lc rgb 'red'" :-
@gp :- 1:nrow(epochs) epochs.OIII_fwhm./1000 "w lp t '[OIII]λ5007 FWHM (x1000 km/s)' lc rgb 'blue'" :-
@gp :- 1:nrow(epochs) epochs.OIII_fwhm./1000 epochs.OIII_fwhm_unc./1000 "w yerr notit pt 0 lc rgb 'blue'"
i = findall(epochs.instr .== "FLOYDS")
@gp :- (1:nrow(epochs))[i] epochs.scale[i]./1e-18 "w p t 'FLOYDS spectrograph' pt 6 ps 3 lc rgb 'black'"
@gp :- (1:nrow(epochs))[i] epochs.OIII_fwhm[i]./1000 "w p notit pt 6 ps 3 lc rgb 'black'"
i = findfirst(epochs.date .== "20180727")
@gp :- yr=[-0.3, 1.7]
@gp :- "set arrow 1 from $i,-0.2 to $i,0 filled size screen 0.03,15,1 lw 3 lc rgb 'black'"
save(term=term_hw, output="$(opt.path)/oiii_norm_fwhm_SE.png")
#=
Check the loci of the true source FWHM compatible with the observed
FWHM of ~400 and ~900 km/s, at different instrument resolutions. The
instrument resolutions (Y axis) when projected to the X axis should
identify a single value for the "true" FWHM".
=#
w = 10 .^(2:0.01:log10(400));
r = sqrt.(400^2 .- w.^2) ./ 2.355;
@gp w r "w l t '400 km/s'"
......@@ -174,80 +55,44 @@ r = sqrt.(400^2 .- w.^2) ./ 2.355;
w = 10 .^(2:0.01:log10(900));
r = sqrt.(900^2 .- w.^2) ./ 2.355;
@gp :- w r "w l t '900 km/s'"
@gp :- "set grid" xlab="Actual FWHM" ylab="Instr. resolution"
@gp :- "set grid" xlab="True FWHM" ylab="Instr. resolution"
# Ensure no sample is negative
@gp :allepochs "set grid" xr=[3e3,1e4] cbr=[1,29] :-
@gp :- :allepochs cblabel="Epoch" xlab="Rest frame wavelength[A]" ylab="Flux density [arb.units]"
for id in epochs.id
s = read_spec(id)
for i in 1:nrow(epochs)
s = read_spec(epochs[i, :])
x = s.λ;
y = s.flux;
y .-= median(y)
y ./= maximum(y)
@gp :- :allepochs x ./ (1 + opt.z) y "u 1:(\$2+$id):($id) w l notit lc pal" :-
@gp :- :allepochs x ./ (1 + opt.z) y "u 1:(\$2+$i):($i) w l notit lc pal" :-
end
@gp :- :allepochs yr=[1, 29]
save(:allepochs, term="png size 800,600", output="$(opt.path)/all_epochs.png")
@gp :- :allepochs yr=[0, 29]
save(:allepochs, term=term_hw, output="$(opt.path)/all_epochs.png")
@gp :- :allepochs xr=[4900, 5100]
save(:allepochs, term="png size 800,2000", output="$(opt.path)/all_epochs_zoom.png")
epoch_ids = Dict(
:vecchi => [17, 10, 21, 23],
:lowres => [8,9,10,11,12,14,15,16,18,21,22,23,24,25],
:highres => [5, 7, 13, 17, 20],
:test => [7, 13],
:limited => [1,2,3,13],
:all => collect(5:26),
:floyds => [6, 8, 9, 10, 11, 12, 14, 15, 16, 18, 19, 21, 22, 23])
# deleteat!(epoch_ids[:all], 4) #insufficient coverage in na_Hg
# Can't use resolution smaller than 150 km / s otherwise some line
# will be neglected because of spectral coverage
resolution = Dict()
# :lowres => 350.,
# :highres => 150.)
analyze_job(:test)
analyze_job(:highres)
analyze_job(:all)
analyze_job(:floyds)
ddd
function cont_at(bestfit, λ)
A1 = bestfit[:qso_cont].norm.val - bestfit[:qso_cont].norm.unc
A = bestfit[:qso_cont].norm.val
A2 = bestfit[:qso_cont].norm.val + bestfit[:qso_cont].norm.unc
if A1 > A2
A1, A2 = A2, A1
end
@assert A1 <= A2
B1 = (λ / bestfit[:qso_cont].x0.val)^(bestfit[:qso_cont].alpha.val - bestfit[:qso_cont].alpha.unc)
B = (λ / bestfit[:qso_cont].x0.val)^(bestfit[:qso_cont].alpha.val)
B2 = (λ / bestfit[:qso_cont].x0.val)^(bestfit[:qso_cont].alpha.val + bestfit[:qso_cont].alpha.unc)
if B1 > B2
B1, B2 = B2, B1
end
@assert B1 <= B2
dd = extrema([A1*B1, A1*B2, A2*B1, A2*B2])
return λ * A * B, λ * (dd[2]-dd[1])
end
save(:allepochs, term=term_hw, output="$(opt.path)/all_epochs_zoom.png")
# Wrap-up
chosen_job = :all
chosen_iloop = 5
# Define epoch subsets
opt.subsets[:lowres] = [8,9,10,11,12,14,15,16,18,21,22,23,24,25]
opt.subsets[:highres] = [5, 7, 13, 17, 20]
opt.subsets[:test] = [7, 13]
opt.subsets[:floyds] = [6, 8, 9, 10, 11, 12, 14, 15, 16, 18, 19, 21, 22, 23, 24, 25, 26]
multi_epoch_analysis!(opt, epochs, :test)
multi_epoch_analysis!(opt, epochs, :highres)
multi_epoch_analysis!(opt, epochs, :floyds)
# Prepare final table
chosen_job = :floyds
chosen_iloop = 6
res = JLD2.load_object("output/results_$(chosen_job)_$(chosen_iloop).dat")
tab = DataFrame(epoch=Int[], date=String[], instr=String[], galaxy=Float64[],
tab = DataFrame(epoch=Int[], date=String[], day=Int[], instr=String[], galaxy=Float64[],
contslope=Float64[],
c3500=Float64[], c5100=Float64[],
Ha_bb_norm=Float64[], Ha_bb_fwhm=Float64[], Ha_bb_voff=Float64[],
......@@ -259,9 +104,10 @@ tab = DataFrame(epoch=Int[], date=String[], instr=String[], galaxy=Float64[],
oiii_fwhm=Float64[], oiii_voff=Float64[])
for id in 1:length(res.multi)
j = opt.subsets[chosen_job][id]
push!(tab,
(epoch_ids[chosen_job][id], string(split(res.source.specs[id].label, "/")[2])[18:end-4], "",
res.multi[id][:galaxy].norm.val,
(j, epochs[j, :date], epochs[j, :day], epochs[j, :instr],
res.multi[id][:galaxy].norm.patched,
res.multi[id][:qso_cont].alpha.val,
cont_at(res.multi[id], 3500)[1],
cont_at(res.multi[id], 5100)[1],
......@@ -279,26 +125,12 @@ tab[!, :l5100] .= [5100 .* Spline1D(domain(res.multi[id])[:], res.multi[id]())(5
tab[!, :Ha] .= tab.Ha_br_norm .+ tab.Ha_bb_norm .+ tab.Ha_na_norm
tab[!, :Hb] .= tab.Hb_br_norm .+ tab.Hb_bb_norm .+ tab.Hb_na_norm
dd = Date.(tab.date, Ref("yyyymmdd"))
tab[!, :day] .= getproperty.(dd - Date("2018-03-06") .+ Day(72), :value)
(d, c) = csvread("misc/tabula-2019-Trakhtenbrot-mod.csv", ',', header_exists=true)
tmp = DataFrame(collect(d), Symbol.(c))
tmp[!, :Date] .= string.(tmp.Date)
tab[!, :instr] .= ""
for i in 1:nrow(tab)
j = findfirst(tmp.Date .== tab[i, :date])
isnothing(j) && continue
tab[i, :instr] = tmp[j, :Inst]
end
f = FITS("$(opt.path)/1ES_1927p654_results.fits", "w")
write(f, tab)
close(f)
# Assign different plot symbols to each instrument
tab[!, :pt] .= 0
for i in 1:nrow(tab)
j = findfirst(sort(unique(tab.instr)) .== tab[i, :instr])
......@@ -310,36 +142,22 @@ setindex!.(Ref(cm), 1:length(cm), collect(keys(cm))[sortperm(collect(values(cm))
tab.pt .= getindex.(Ref(cm), tab.instr)
@gp tab.contslope tab.Ha tab.pt "w lp t 'Ha' pt var ps 2" ylog=true
@gp :- tab.contslope tab.Hb tab.pt "w lp t 'Hb' pt var ps 2"
@gp :- tab.contslope tab.c5100 tab.pt "w lp t 'c5100' pt var ps 2"
@gp :- tab.contslope tab.l5100 tab.pt "w lp t 'l5100' pt var ps 2"
@gp :- tab.contslope tab.c3500 tab.pt "w lp t 'c3500' pt var ps 2"
@gp :- tab.contslope tab.l3500 tab.pt "w lp t 'l3500' pt var ps 2"
@gp xlog=true ylog=true
@gp :- tab.c5100 tab.Ha tab.pt "w lp t 'c5100' pt var ps 2"
@gp :- tab.l5100 tab.Ha tab.pt "w lp t 'l5100' pt var ps 2"
@gp tab.day tab.c5100 tab.pt "w lp t 'c5100' pt var ps 2" ylog=true
@gp :- tab.day tab.l5100 tab.pt "w lp t 'l5100' pt var ps 2"
@gp :- tab.day tab.Ha tab.pt "w lp t 'Ha' pt var ps 2"
@gp :- tab.day tab.Hb tab.pt "w lp t 'Hb' pt var ps 2"
@gp tab.day tab.contslope tab.pt "w lp t 'SLope' pt var ps 2"
@gp "set grid"
# PLOTS
# Assessment of quality of cross calibration
@gp "set grid" xlab="Rest-frame wavelength [A]" ylab="L_{/Symbol l} [arb. units]"
@gp :- "set key out box" linetypes(:darkrainbow, lw=3)
for i in 1:nrow(tab)
ss = "w l t '$(tab[i, :date])' dt $(tab[i, :pt]) lw 2"
@gp :- domain(res.multi[i])[:] res.multi[i](:qso_cont) ss
ss = "w l t '$(tab[i, :date])' dt $(tab[i, :pt])"
@gp :- domain(res.multi[i])[:] res.multi[i]() ss
end
save(:prenorm, term="png size 800,600", output="$(opt.path)/evolution.png")
i = 1
@gp :- domain(res.multi[i])[:] res.multi[i](:galaxy) "w l t 'Host galaxy' lc rgb 'black'"
save(term=term_fw, output="$(opt.path)/evolution.png")
@gp "set grid"
@gp "set grid" xlab="Rest-frame wavelength [A]" ylab="L_{/Symbol l} - cont. at 5007A [arb. units]"
@gp :- "set key out box" linetypes(:darkrainbow, lw=3) xr=[4600,5300]
for i in 1:length(res.multi)
println(res.multi[i][:OIII_5007])
x = domain(res.multi[i])[:]
......@@ -348,4 +166,66 @@ for i in 1:length(res.multi)
ss = "w l t '$(tab[i, :date])' dt $(tab[i, :pt]) lw 2"
@gp :- x y .- Spline1D(x, y0)(5007.) ss
end
save(:prenorm, term="png size 800,600", output="$(opt.path)/evolution_oiii_norm.png")
save(term="pngcairo size 1200,600 fontscale 1.2", output="$(opt.path)/evolution_oiii_norm.png")
@gp :- xr=[4900, 5100]
save(term=term_fw, output="$(opt.path)/evolution_oiii_norm_zoom.png")
# Trends
@gp "set grid" xlab="Cont. slope" ylab="Line luminosity [arb. units]" linetypes(:darkrainbow, lw=3)
@gp :- tab.contslope tab.Ha "w l notit lc rgb 'gray'" ylog=true
@gp :- tab.contslope tab.Hb "w l notit lc rgb 'gray'"
@gp :- tab.contslope tab.Ha 1:nrow(tab) "w p t 'H{/Symbol a}' pt 1 ps 3 lc var" ylog=true
@gp :- tab.contslope tab.Hb 1:nrow(tab) "w p t 'H{/Symbol b}' pt 2 ps 3 lc var"
i = findfirst(tab.date .== "20180727")
@gp :- tab.contslope[i] tab.Ha[i] "w p notit pt 6 ps 4 lc rgb 'black'"
@gp :- tab.contslope[i] tab.Hb[i] "w p notit pt 6 ps 4 lc rgb 'black'"
@gp:- yr=[2, 50]
save(term=term_fw, output="$(opt.path)/slope_vs_lines.png")
@gp "set grid" xlab="Cont. slope" ylab="AGN cont. luminosity [arb. units]" linetypes(:darkrainbow, lw=3)
@gp :- tab.contslope tab.c3500 "w l notit lc rgb 'gray'" ylog=true
@gp :- tab.contslope tab.c5100 "w l notit dt 2 lc rgb 'gray'"
@gp :- tab.contslope tab.c3500 1:nrow(tab) "w p t '{/Symbol l}L_{/Symbol l} at 3500A' pt 1 ps 3 lc var"
@gp :- tab.contslope tab.c5100 1:nrow(tab) "w p t '{/Symbol l}L_{/Symbol l} at 5100A' pt 2 ps 3 lc var"
@gp :- yr=[300, 2500]
i = findfirst(tab.date .== "20180727")
@gp :- tab.contslope[i] tab.c3500[i] "w p notit pt 6 ps 4 lc rgb 'black'"
@gp :- tab.contslope[i] tab.c5100[i] "w p notit pt 6 ps 4 lc rgb 'black'"
save(term=term_fw, output="$(opt.path)/slope_vs_cont.png")
@gp "set grid" "set key left" xlab="{/Symbol l}L_{/Symbol l} [arb. units]" ylab="H{/Symbol a} luminosity [arb. units]" linetypes(:darkrainbow, lw=3) xlog=true ylog=true
@gp :- tab.c3500 tab.Ha "w l notit lc rgb 'gray'"
@gp :- tab.c5100 tab.Ha "w l notit dt 2 lc rgb 'gray'"
@gp :- tab.c3500 tab.Ha 1:nrow(tab) "w p t '{/Symbol l}L_{/Symbol l} at 3500A' pt 1 ps 3 lc var"
@gp :- tab.c5100 tab.Ha 1:nrow(tab) "w p t '{/Symbol l}L_{/Symbol l} at 5100A' pt 2 ps 3 lc var"
@gp :- xr=[300, 2.5e3] yr=[6,40]
i = findfirst(tab.date .== "20180727")
@gp :- tab.c3500[i] tab.Ha[i] "w p notit pt 6 ps 4 lc rgb 'black'"
@gp :- tab.c5100[i] tab.Ha[i] "w p notit pt 6 ps 4 lc rgb 'black'"
save(term=term_fw, output="$(opt.path)/cont_vs_lines.png")
@gp "set grid" xlab="Days since 2018-03-06" ylab="L_{/Symbol l} [arb. units]" linetypes(:darkrainbow, lw=3) ylog=true
@gp :- tab.day tab.c3500 "w l notit lc rgb 'gray'"
@gp :- tab.day tab.c5100 "w l notit dt 2 lc rgb 'gray'"
@gp :- tab.day tab.Ha "w l notit dt 3 lc rgb 'gray'"
@gp :- tab.day tab.Hb "w l notit dt 4 lc rgb 'gray'"
@gp :- tab.day tab.c3500 1:nrow(tab) "w p t 'Cont. {/Symbol l}L_{/Symbol l} at 3500A' pt 1 ps 3 lc var"
@gp :- tab.day tab.c5100 1:nrow(tab) "w p t 'Cont. {/Symbol l}L_{/Symbol l} at 5100A' pt 2 ps 3 lc var"
@gp :- tab.day tab.Ha 1:nrow(tab) "w p t 'H{/Symbol a} int. lum.' pt 3 ps 3 lc var"
@gp :- tab.day tab.Hb 1:nrow(tab) "w p t 'H{/Symbol b} int. lum.' pt 4 ps 3 lc var"
@gp :- yr=[1, 1e4]
i = findfirst(tab.date .== "20180727")
@gp :- tab.day[[i,i]] tab.c3500[[i,i]] "w p notit pt 6 ps 4 lc rgb 'black'"
@gp :- tab.day[[i,i]] tab.c5100[[i,i]] "w p notit pt 6 ps 4 lc rgb 'black'"
@gp :- tab.day[[i,i]] tab.Ha[[i,i]] "w p notit pt 6 ps 4 lc rgb 'black'"
@gp :- tab.day[[i,i]] tab.Hb[[i,i]] "w p notit pt 6 ps 4 lc rgb 'black'"
save(term=term_fw, output="$(opt.path)/evolution_day.png")
......@@ -114,6 +114,11 @@ end
function guess_emission_lines!(source::QSO{T}, pspec::PreparedSpectrum, model::Model) where T <: q1927p654
QSFit.guess_emission_lines!(parent_recipe(source), pspec, model)
end
function qsfit_multi(source::QSO{TRecipe}; ref_id=1) where TRecipe <: q1927p654
elapsed = time()
@assert length(source.specs) > 1
......@@ -181,11 +186,11 @@ function qsfit_multi(source::QSO{TRecipe}; ref_id=1) where TRecipe <: q1927p654
model = multi[id]
pspec = pspecs[id]
QSFit.add_emission_lines!(source, pspec, model)
guess_emission_lines!(source, pspec, model)
QSFit.guess_emission_lines!(source, pspec, model)
QSFit.add_patch_functs!(source, pspec, model)
multi[id][:OIII_5007].norm.val = 1.
# multi[id][:OIII_5007].norm.fixed = true
# multi[id][:OIII_5007].norm.fixed = true
end
fitres = fit!(source, multi, pspecs)
for id in 1:length(pspecs)
......@@ -250,30 +255,4 @@ function qsfit_multi(source::QSO{TRecipe}; ref_id=1) where TRecipe <: q1927p654
end
#=
function line_component(::Type{T}, line::AsymmNarrowLine) where T <: q1927p654
comp = QSFit.SpecLineAsymmGauss(line.λ)
comp.fwhm.val = 5e2
comp.fwhm.low = 100
comp.fwhm.high = 2e3
comp.voff.low = -1e3
comp.voff.high = 1e3
return comp
end
function line_component(::Type{T}, line::ComboBroadLine) where T <: q1927p654
comp = QSFit.SpecLineAsymmGauss(line.λ)
comp.fwhm.val = 5e3
comp.fwhm.low = 900
comp.fwhm.high = 1.5e4
comp.voff.low = -3e3
comp.voff.high = 3e3
return comp
end
=#
# include("Recipe_single.jl")
# include("Recipe_multi.jl")
end
function multi_fit(source::QSO{TRecipe}; ref_id=1) where TRecipe <: q1927p654
Nspec = length(source.domain)
elapsed = time()
mzer = GFit.cmpfit()
mzer.Δfitstat_theshold = 1.e-5
# Initialize components and guess initial values
println(logio(source), "\nFit continuum components...")
multi = MultiModel()
for id in 1:Nspec
λ = source.domain[id][:]
model = Model(source.domain[id],
:qso_cont => QSFit.qso_cont_component(TRecipe),
:Continuum => SumReducer([:qso_cont]))
# TODO if source.options[:instr_broadening]
# TODO GFit.set_instr_response!(model, (l, f) -> instrumental_broadening(l, f, source.spectra[id].resolution))
# TODO end
c = model[:qso_cont]
c.x0.val = median(λ)
c.norm.val = Spline1D(λ, source.data[id].val, k=1, bc="error")(c.x0.val)
push!(multi, model)
end
for id in 1:Nspec
model = multi[id]
λ = source.domain[id][:]
# Host galaxy template
if source.options[:use_host_template] &&
(minimum(λ) .< 5500 .< maximum(λ))
model[:galaxy] = QSFit.hostgalaxy(source.options[:host_template])
model[:Continuum] = SumReducer([:qso_cont, :galaxy])
# Split total flux between continuum and host galaxy
vv = Spline1D(λ, source.data[id].val, k=1, bc="error")(5500.)
model[:galaxy].norm.val = 1/2 * vv
model[:qso_cont].x0.val *= 1/2 * vv / Spline1D(λ, model(:qso_cont), k=1, bc="error")(5500.)
if id != ref_id
@patch! multi[id][:galaxy].norm = multi[ref_id][:galaxy].norm
end
end
# Balmer continuum and pseudo-continuum
if source.options[:use_balmer]
tmp = [:qso_cont, :balmer]
(:galaxy in keys(model)) && push!(tmp, :galaxy)
model[:balmer] = QSFit.balmercont(0.1, 0.5)
model[:Continuum] = SumReducer(tmp)
c = model[:balmer]
c.norm.val = 0.1
c.norm.fixed = false
c.norm.high = 0.5
c.ratio.val = 0.5
c.ratio.fixed = false
c.ratio.low = 0.1
c.ratio.high = 1
@patch! model[:balmer].norm *= model[:qso_cont].norm
end
end
bestfit = fit!(multi, source.data, minimizer=mzer); show(logio(source), bestfit)
# QSO continuum renormalization
for id in 1:Nspec
model = multi[id]
freeze(model, :qso_cont)
c = model[:qso_cont]
initialnorm = c.norm.val
if c.norm.val > 0
println(logio(source), "$id: Cont. norm. (before): ", c.norm.val)
while true
residuals = (model() - source.data[id].val) ./ source.data[id].unc
(count(residuals .< 0) / length(residuals) > 0.9) && break
(c.norm.val < initialnorm / 5) && break # give up
c.norm.val *= 0.99
evaluate!(model)
end
println(logio(source), "$id : Cont. norm. (after) : ", c.norm.val)
else
println(logio(source), "$id: Skipping cont. renormalization")
end
freeze(model, :qso_cont)
(:galaxy in keys(model)) && freeze(model, :galaxy)
(:balmer in keys(model)) && freeze(model, :balmer)
end
evaluate!(multi)
# Fit iron templates
println(logio(source), "\nFit iron templates...")
for id in 1:Nspec
model = multi[id]
λ = source.domain[id][:]
iron_components = Vector{Symbol}()
if source.options[:use_ironuv]
fwhm = 3000.
source.options[:instr_broadening] || (fwhm = sqrt(fwhm^2 + (source.spectra[id].resolution * 2.355)^2))
comp = QSFit.ironuv(fwhm)
(_1, _2, coverage) = spectral_coverage(λ, source.spectra[id].resolution, comp)
threshold = get(source.options[:min_spectral_coverage], :ironuv, source.options[:min_spectral_coverage][:default])
if coverage >= threshold
model[:ironuv] = comp
model[:ironuv].norm.val = 0.5
push!(iron_components, :ironuv)
else
println(logio(source), "Ignoring ironuv component on prediction $id (threshold: $threshold)")
end
end
if source.options[:use_ironopt]
fwhm = 3000.
source.options[:instr_broadening] || (fwhm = sqrt(fwhm^2 + (source.spectra[id].resolution * 2.355)^2))
comp = QSFit.ironopt_broad(fwhm)
(_1, _2, coverage) = spectral_coverage(λ, source.spectra[id].resolution, comp)
threshold = get(source.options[:min_spectral_coverage], :ironopt, source.options[:min_spectral_coverage][:default])
if coverage >= threshold
fwhm = 500.
source.options[:instr_broadening] || (fwhm = sqrt(fwhm^2 + (source.spectra[id].resolution * 2.355)^2))
model[:ironoptbr] = comp
model[:ironoptna] = QSFit.ironopt_narrow(fwhm)
model[:ironoptbr].norm.val = 0.1 # TODO: guess a sensible value
model[:ironoptna].norm.val = 0.0
freeze(model, :ironoptna) # will be freed during last run
push!(iron_components, :ironoptbr, :ironoptna)
else
println(logio(source), "Ignoring ironopt component on prediction $id (threshold: $threshold)")
end
end
if length(iron_components) > 0
model[:Iron] = SumReducer(iron_components)
model[:main] = SumReducer([:Continuum, :Iron])
evaluate!(model)
bestfit = fit!(model, source.data[id], minimizer=mzer); show(logio(source), bestfit)
else
model[:Iron] = @expr m -> [0.]
model[:main] = SumReducer([:Continuum, :Iron])
end
(:ironuv in keys(model)) && freeze(model, :ironuv)
(:ironoptbr in keys(model)) && freeze(model, :ironoptbr)
(:ironoptna in keys(model)) && freeze(model, :ironoptna)
end
evaluate!(multi)
# Add emission lines
line_names = [collect(keys(source.line_names[id])) for id in 1:Nspec]
line_groups = [unique(collect(values(source.line_names[id]))) for id in 1:Nspec]
println(logio(source), "\nFit known emission lines...")
for id in 1:Nspec
model = multi[id]
λ = source.domain[id][:]
resid = source.data[id].val - model() # will be used to guess line normalization
for (cname, comp) in source.line_comps[id]
model[cname] = comp
end
for (group, lnames) in QSFit.invert_dictionary(source.line_names[id])
model[group] = SumReducer(lnames)
end
model[:main] = SumReducer([:Continuum, :Iron, line_groups[id]...])
if haskey(model, :MgII_2798)
model[:MgII_2798].voff.low = -1000
model[:MgII_2798].voff.high = 1000
end
if haskey(model, :OIII_5007_bw)
model[:OIII_5007_bw].fwhm.val = 500
model[:OIII_5007_bw].fwhm.low = 1e2
model[:OIII_5007_bw].fwhm.high = 1e3
model[:OIII_5007_bw].voff.low = 0
model[:OIII_5007_bw].voff.high = 2e3
end
for cname in line_names[id]
model[cname].norm_integrated = source.options[:norm_integrated]
end
# Guess values
evaluate!(model)
for cname in line_names[id]
c = model[cname]
resid_at_line = Spline1D(λ, resid, k=1, bc="nearest")(c.center.val)
c.norm.val *= abs(resid_at_line) / maximum(model(cname))
# If instrumental broadening is not used and the line profile
# is a Gaussian one take spectral resolution into account.
# This is significantly faster than convolving with an
# instrument response but has some limitations:
# - works only with Gaussian profiles;
# - all components must be additive (i.e. no absorptions)
# - further narrow components (besides known emission lines)
# will not be corrected for instrumental resolution
if !source.options[:instr_broadening]
if isa(c, QSFit.SpecLineGauss)
c.spec_res_kms = source.spectra[id].resolution
else
println(logio(source), "Line $cname is not a Gaussian profile: Can't take spectral resolution into account")
end
end
end
# Patch parameters
@patch! begin
# model[:OIII_4959].norm = model[:OIII_5007].norm / 3
model[:OIII_4959].voff = model[:OIII_5007].voff
end
@patch! begin
model[:OIII_5007_bw].voff += model[:OIII_5007].voff
model[:OIII_5007_bw].fwhm += model[:OIII_5007].fwhm
end
@patch! begin
# model[:OI_6300].norm = model[:OI_6364].norm / 3
model[:OI_6300].voff = model[:OI_6364].voff
end
@patch! begin
# model[:NII_6549].norm = model[:NII_6583].norm / 3
model[:NII_6549].voff = model[:NII_6583].voff
end
@patch! begin
# model[:SII_6716].norm = model[:SII_6731].norm / 1.5
model[:SII_6716].voff = model[:SII_6731].voff
end
@patch! model[:na_Hb].voff = model[:na_Ha].voff
# The following are required to avoid degeneracy with iron
# template
@patch! begin
model[:Hg].voff = model[:br_Hb].voff
model[:Hg].fwhm = model[:br_Hb].fwhm
end
@patch! begin
model[:br_Hg].voff = model[:br_Hb].voff
model[:br_Hg].fwhm = model[:br_Hb].fwhm
end
@patch! begin
model[:na_Hg].voff = model[:na_Hb].voff
model[:na_Hg].fwhm = model[:na_Hb].fwhm
end
# Ensure luminosity at peak of the broad base component is
# smaller than the associated broad component:
if haskey(model, :br_Hb) &&
haskey(model, :bb_Hb)
model[:bb_Hb].norm.high = 1
model[:bb_Hb].norm.val = 0.5
@patch! model[:bb_Hb].norm *= model[:br_Hb].norm / model[:br_Hb].fwhm * model[:bb_Hb].fwhm
end
if haskey(model, :br_Ha) &&
haskey(model, :bb_Ha)
model[:bb_Ha].norm.high = 1
model[:bb_Ha].norm.val = 0.5
@patch! model[:bb_Ha].norm *= model[:br_Ha].norm / model[:br_Ha].fwhm * model[:bb_Ha].fwhm
end
# Force Hg and Hb lines to have the same shape as Ha
model[:br_Ha].norm.low = 1.e-10 # avoid division by zero
@patch! begin
model[:na_Hg].norm = model[:br_Hg].norm * (model[:na_Ha].norm / model[:br_Ha].norm)
model[:na_Hg].voff = model[:na_Ha].voff
model[:na_Hg].fwhm = model[:na_Ha].fwhm
model[:bb_Hg].norm = model[:br_Hg].norm * (model[:bb_Ha].norm / model[:br_Ha].norm)
model[:bb_Hg].voff = model[:bb_Ha].voff
model[:bb_Hg].fwhm = model[:bb_Ha].fwhm
model[:br_Hg].voff = model[:br_Ha].voff
model[:br_Hg].fwhm = model[:br_Ha].fwhm
end
@patch! begin
model[:na_Hb].norm = model[:br_Hb].norm * (model[:na_Ha].norm / model[:br_Ha].norm)
model[:na_Hb].voff = model[:na_Ha].voff
model[:na_Hb].fwhm = model[:na_Ha].fwhm
model[:bb_Hb].norm = model[:br_Hb].norm * (model[:bb_Ha].norm / model[:br_Ha].norm)
model[:bb_Hb].voff = model[:bb_Ha].voff
model[:bb_Hb].fwhm = model[:bb_Ha].fwhm
model[:br_Hb].voff = model[:br_Ha].voff
model[:br_Hb].fwhm = model[:br_Ha].fwhm
end
model[:OIII_5007].norm.fixed = 1
model[:OIII_5007].norm.val = 1.
bestfit = fit!(model, source.data[id], minimizer=mzer); show(logio(source), bestfit)
for lname in line_names[id]
freeze(model, lname)
end
end
# Add unknown lines
println(logio(source), "\nFit unknown emission lines...")
if source.options[:n_unk] > 0
for id in 1:Nspec
model = multi[id]
tmp = OrderedDict{Symbol, GFit.AbstractComponent}()
for j in 1:source.options[:n_unk]
tmp[Symbol(:unk, j)] = line_component(TRecipe, QSFit.UnkLine(5e3))
tmp[Symbol(:unk, j)].norm_integrated = source.options[:norm_integrated]
end
for (cname, comp) in tmp
model[cname] = comp
end
model[:UnkLines] = SumReducer(collect(keys(tmp)))
model[:main] = SumReducer([:Continuum, :Iron, line_groups[id]..., :UnkLines])
evaluate!(model)
for j in 1:source.options[:n_unk]
freeze(model, Symbol(:unk, j))
end
end
else
# Here we need a :UnkLines reducer, even when n_unk is 0
for id in 1:Nspec
model = multi[id]
model[:UnkLines] = @expr m -> [0.]
model[:main] = SumReducer([:Continuum, :Iron, line_groups[id]...])
end
end
evaluate!(multi)
# Set "unknown" line center wavelength where there is a maximum in
# the fit residuals, and re-run a fit.
for id in 1:Nspec
model = multi[id]
λ = source.domain[id][:]
λunk = Vector{Float64}()
while true
(length(λunk) >= source.options[:n_unk]) && break
evaluate!(model)
Δ = (source.data[id].val - model()) ./ source.data[id].unc
# Avoid considering again the same region (within 1A) TODO: within resolution
for l in λunk
Δ[findall(abs.(l .- λ) .< 1)] .= 0.
end
# Avoidance regions
for rr in source.options[:unk_avoid]
Δ[findall(rr[1] .< λ .< rr[2])] .= 0.
end
# Do not add lines close to from the edges since these may
# affect qso_cont fitting
Δ[findall((λ .< minimum(λ)*1.02) .|
(λ .> maximum(λ)*0.98))] .= 0.
iadd = argmax(Δ)
(Δ[iadd] <= 0) && break # No residual is greater than 0, skip further residuals....
push!(λunk, λ[iadd])
cname = Symbol(:unk, length(λunk))
model[cname].norm.val = 1.
model[cname].center.val = λ[iadd]
model[cname].center.low = λ[iadd] - λ[iadd]/10. # allow to shift 10%
model[cname].center.high = λ[iadd] + λ[iadd]/10.
thaw(model, cname)
bestfit = fit!(model, source.data[id], minimizer=mzer); show(logio(source), bestfit)
freeze(model, cname)
end
end
evaluate!(multi)
# ----------------------------------------------------------------
# Last run with all parameters free
println(logio(source), "\nLast run with all parameters free...")
for id in 1:Nspec
model = multi[id]
thaw(model, :qso_cont)
(:galaxy in keys(model)) && thaw(model, :galaxy)
(:balmer in keys(model)) && thaw(model, :balmer)
(:ironuv in keys(model)) && thaw(model, :ironuv)
(:ironoptbr in keys(model)) && thaw(model, :ironoptbr)
(:ironoptna in keys(model)) && thaw(model, :ironoptna)
for lname in line_names[id]
thaw(model, lname)
end
for j in 1:source.options[:n_unk]
cname = Symbol(:unk, j)
if model[cname].norm.val > 0
thaw(model, cname)
else
freeze(model, cname)
end
end
end
bestfit = fit!(multi, source.data, minimizer=mzer)
# Disable "unknown" lines whose normalization uncertainty is larger
# than 3 times the normalization
needs_fitting = false
for id in 1:Nspec
model = multi[id]
for ii in 1:source.options[:n_unk]
cname = Symbol(:unk, ii)
isfixed(model, cname) && continue
if bestfit[id][cname].norm.val == 0.
freeze(model, cname)
needs_fitting = true
println(logio(source), "Disabling $cname (norm. = 0)")
elseif bestfit[id][cname].norm.unc / bestfit[id][cname].norm.val > 3
model[cname].norm.val = 0.
freeze(model, cname)
needs_fitting = true
println(logio(source), "Disabling $cname (unc. / norm. > 3)")
end
end
end
if needs_fitting
println(logio(source), "\nRe-run fit...")
bestfit = fit!(multi, source.data, minimizer=mzer)
end
println(logio(source))
show(logio(source), bestfit)
out = QSFit.QSFitMultiResults(source, multi, bestfit)
elapsed = time() - elapsed
println(logio(source), "\nElapsed time: $elapsed s")
QSFit.close_logio(source)
return out
end
function minimizer(source::QSO{T}) where T <: DefaultRecipe
mzer = GFit.cmpfit()
mzer.Δfitstat_theshold = 1.e-5
return mzer
end
function add_qso_continuum!(source::QSO{T}, model::Model; id=1) where T <: DefaultRecipe
λ = domain(model)[:]
comp = QSFit.powerlaw(3000)
comp.x0.val = median(λ)
comp.norm.val = Spline1D(λ, source.data[id].val, k=1, bc="error")(comp.x0.val)
comp.alpha.val = -1.5
comp.alpha.low = -3
comp.alpha.high = 1
model[:qso_cont] = comp
model[:Continuum] = SumReducer([:qso_cont])
end
function add_host_galaxy!(source::QSO{T}, model::Model; id=1) where T <: DefaultRecipe
λ = domain(model)[:]
if source.options[:use_host_template] &&
(minimum(λ) .< 5500 .< maximum(λ))
model[:galaxy] = QSFit.hostgalaxy(source.options[:host_template])
model[:Continuum] = SumReducer([:qso_cont, :galaxy])
# Split total flux between continuum and host galaxy
vv = Spline1D(λ, source.data[id].val, k=1, bc="error")(5500.)
model[:galaxy].norm.val = 1/2 * vv
model[:qso_cont].x0.val *= 1/2 * vv / Spline1D(λ, model(:qso_cont), k=1, bc="error")(5500.)
end
end
function add_balmer_cont!(source::QSO{T}, model::Model; id=1) where T <: DefaultRecipe
if source.options[:use_balmer]
tmp = [:qso_cont, :balmer]
(:galaxy in keys(model)) && push!(tmp, :galaxy)
model[:balmer] = QSFit.balmercont(0.1, 0.5)
model[:Continuum] = SumReducer(tmp)
c = model[:balmer]
c.norm.val = 0.1
c.norm.fixed = false
c.norm.high = 0.5
c.ratio.val = 0.5
c.ratio.fixed = false
c.ratio.low = 0.1
c.ratio.high = 1
@patch! model[:balmer].norm *= model[:qso_cont].norm
end
end
function renorm_cont!(source::QSO{T}, model::Model; id=1) where T <: DefaultRecipe
freeze(model, :qso_cont)
c = model[:qso_cont]
initialnorm = c.norm.val
if c.norm.val > 0
println(logio(source), "Cont. norm. (before): ", c.norm.val)
while true
residuals = (model() - source.data[id].val) ./ source.data[id].unc
(count(residuals .< 0) / length(residuals) > 0.9) && break
(c.norm.val < initialnorm / 5) && break # give up
c.norm.val *= 0.99
evaluate!(model)
end
println(logio(source), "Cont. norm. (after) : ", c.norm.val)
else
println(logio(source), "Skipping cont. renormalization")
end
end
function add_iron_uv!(source::QSO{T}, model::Model; id=1) where T <: DefaultRecipe
λ = domain(model)[:]
if source.options[:use_ironuv]
fwhm = 3000.
source.options[:instr_broadening] || (fwhm = sqrt(fwhm^2 + (source.spectra[id].resolution * 2.355)^2))
comp = QSFit.ironuv(fwhm)
(_1, _2, coverage) = spectral_coverage(λ, source.spectra[id].resolution, comp)
threshold = get(source.options[:min_spectral_coverage], :ironuv, source.options[:min_spectral_coverage][:default])
if coverage >= threshold
model[:ironuv] = comp
model[:ironuv].norm.val = 0.5
push!(model[:Iron].list, :ironuv)
else
println(logio(source), "Ignoring ironuv component (threshold: $threshold)")
end
end
end
function add_iron_opt!(source::QSO{T}, model::Model; id=1) where T <: DefaultRecipe
λ = domain(model)[:]
if source.options[:use_ironopt]
fwhm = 3000.
source.options[:instr_broadening] || (fwhm = sqrt(fwhm^2 + (source.spectra[id].resolution * 2.355)^2))
comp = QSFit.ironopt_broad(fwhm)
(_1, _2, coverage) = spectral_coverage(λ, source.spectra[id].resolution, comp)
threshold = get(source.options[:min_spectral_coverage], :ironopt, source.options[:min_spectral_coverage][:default])
if coverage >= threshold
fwhm = 500.
source.options[:instr_broadening] || (fwhm = sqrt(fwhm^2 + (source.spectra[id].resolution * 2.355)^2))
model[:ironoptbr] = comp
model[:ironoptna] = QSFit.ironopt_narrow(fwhm)
model[:ironoptbr].norm.val = 0.1 # TODO: guess a sensible value
model[:ironoptna].norm.val = 0.0
freeze(model, :ironoptna) # will be freed during last run
push!(model[:Iron].list, :ironoptbr)
push!(model[:Iron].list, :ironoptna)
else
println(logio(source), "Ignoring ironopt component (threshold: $threshold)")
end
end
end
function add_emission_lines!(source::QSO{T}, model::Model; id=1) where T <: DefaultRecipe
line_groups = unique(collect(values(source.line_names[id])))
for (cname, comp) in source.line_comps[id]
comp.norm_integrated = source.options[:norm_integrated]
model[cname] = comp
end
for (group, lnames) in QSFit.invert_dictionary(source.line_names[id])
model[group] = SumReducer(lnames)
end
model[:main] = SumReducer([:Continuum, :Iron, line_groups...])
if haskey(model, :MgII_2798)
model[:MgII_2798].voff.low = -1000
model[:MgII_2798].voff.high = 1000
end
if haskey(model, :OIII_5007_bw)
model[:OIII_5007_bw].fwhm.val = 500
model[:OIII_5007_bw].fwhm.low = 1e2
model[:OIII_5007_bw].fwhm.high = 1e3
model[:OIII_5007_bw].voff.low = 0
model[:OIII_5007_bw].voff.high = 2e3
end
evaluate!(model)
end
function guess_emission_lines_values!(source::QSO{T}, model::Model; id=1) where T <: DefaultRecipe
resid = source.data[id].val - model(:Continuum) .- model(:Iron)
for cname in keys(source.line_names[id])
c = model[cname]
resid_at_line = Spline1D(λ, resid, k=1, bc="nearest")(c.center.val)
c.norm.val *= abs(resid_at_line) / maximum(model(cname))
# If instrumental broadening is not used and the line profile
# is a Gaussian one take spectral resolution into account.
# This is significantly faster than convolving with an
# instrument response but has some limitations:
# - works only with Gaussian profiles;
# - all components must be additive (i.e. no absorptions)
# - further narrow components (besides known emission lines)
# will not be corrected for instrumental resolution
if !source.options[:instr_broadening]
if isa(c, QSFit.SpecLineGauss)
c.spec_res_kms = source.spectra[id].resolution
else
println(logio(source), "Line $cname is not a Gaussian profile: Can't take spectral resolution into account")
end
end
end
evaluate!(model)
end
function add_patch_functs!(source::QSO{T}, model::Model; id=1) where T <: DefaultRecipe
# Patch parameters
@patch! begin
# model[:OIII_4959].norm = model[:OIII_5007].norm / 3
model[:OIII_4959].voff = model[:OIII_5007].voff
end
@patch! begin
model[:OIII_5007_bw].voff += model[:OIII_5007].voff
model[:OIII_5007_bw].fwhm += model[:OIII_5007].fwhm
end
@patch! begin
# model[:OI_6300].norm = model[:OI_6364].norm / 3
model[:OI_6300].voff = model[:OI_6364].voff
end
@patch! begin
# model[:NII_6549].norm = model[:NII_6583].norm / 3
model[:NII_6549].voff = model[:NII_6583].voff
end
@patch! begin
# model[:SII_6716].norm = model[:SII_6731].norm / 1.5
model[:SII_6716].voff = model[:SII_6731].voff
end
@patch! model[:na_Hb].voff = model[:na_Ha].voff
# The following are required to avoid degeneracy with iron
# template
@patch! begin
model[:Hg].voff = model[:br_Hb].voff
model[:Hg].fwhm = model[:br_Hb].fwhm
end
@patch! begin
model[:br_Hg].voff = model[:br_Hb].voff
model[:br_Hg].fwhm = model[:br_Hb].fwhm
end
@patch! begin
model[:na_Hg].voff = model[:na_Hb].voff
model[:na_Hg].fwhm = model[:na_Hb].fwhm
end
# Ensure luminosity at peak of the broad base component is
# smaller than the associated broad component:
if haskey(model, :br_Hb) &&
haskey(model, :bb_Hb)
model[:bb_Hb].norm.high = 1
model[:bb_Hb].norm.val = 0.5
@patch! model[:bb_Hb].norm *= model[:br_Hb].norm / model[:br_Hb].fwhm * model[:bb_Hb].fwhm
end
if haskey(model, :br_Ha) &&
haskey(model, :bb_Ha)
model[:bb_Ha].norm.high = 1
model[:bb_Ha].norm.val = 0.5
@patch! model[:bb_Ha].norm *= model[:br_Ha].norm / model[:br_Ha].fwhm * model[:bb_Ha].fwhm
end
end
function add_unknown_lines!(source::QSO{T}, model::Model; id=1) where T <: DefaultRecipe
(source.options[:n_unk] > 0) && (return nothing)
tmp = OrderedDict{Symbol, GFit.AbstractComponent}()
for j in 1:source.options[:n_unk]
tmp[Symbol(:unk, j)] = line_component(TRecipe, QSFit.UnkLine(5e3))
tmp[Symbol(:unk, j)].norm_integrated = source.options[:norm_integrated]
end
for (cname, comp) in tmp
model[cname] = comp
end
model[:UnkLines] = SumReducer(collect(keys(tmp)))
model[:main] = SumReducer([:Continuum, :Iron, line_groups..., :UnkLines])
evaluate!(model)
for j in 1:source.options[:n_unk]
freeze(model, Symbol(:unk, j))
end
evaluate!(model)
# Set "unknown" line center wavelength where there is a maximum in
# the fit residuals, and re-run a fit.
λunk = Vector{Float64}()
while true
(length(λunk) >= source.options[:n_unk]) && break
evaluate!(model)
Δ = (source.data[id].val - model()) ./ source.data[id].unc
# Avoid considering again the same region (within 1A) TODO: within resolution
for l in λunk
Δ[findall(abs.(l .- λ) .< 1)] .= 0.
end
# Avoidance regions
for rr in source.options[:unk_avoid]
Δ[findall(rr[1] .< λ .< rr[2])] .= 0.
end
# Do not add lines close to from the edges since these may
# affect qso_cont fitting
Δ[findall((λ .< minimum(λ)*1.02) .|
(λ .> maximum(λ)*0.98))] .= 0.
iadd = argmax(Δ)
(Δ[iadd] <= 0) && break # No residual is greater than 0, skip further residuals....
push!(λunk, λ[iadd])
cname = Symbol(:unk, length(λunk))
model[cname].norm.val = 1.
model[cname].center.val = λ[iadd]
model[cname].center.low = λ[iadd] - λ[iadd]/10. # allow to shift 10%
model[cname].center.high = λ[iadd] + λ[iadd]/10.
thaw(model, cname)
bestfit = fit!(model, source.data[id], minimizer=mzer); show(logio(source), bestfit)
freeze(model, cname)
end
evaluate!(model)
end
function neglect_weak_features!(source::QSO{T}, model::Model; id=1) where T <: DefaultRecipe
# Disable "unknown" lines whose normalization uncertainty is larger
# than X times the normalization
needs_fitting = false
for ii in 1:source.options[:n_unk]
cname = Symbol(:unk, ii)
isfixed(model, cname) && continue
if bestfit[cname].norm.val == 0.
freeze(model, cname)
needs_fitting = true
println(logio(source), "Disabling $cname (norm. = 0)")
elseif bestfit[cname].norm.unc / bestfit[cname].norm.val > 3
model[cname].norm.val = 0.
freeze(model, cname)
needs_fitting = true
println(logio(source), "Disabling $cname (unc. / norm. > 3)")
end
end
return needs_fitting
end
function fit(source::QSO{TRecipe}; id=1) where TRecipe <: q1927p654
elapsed = time()
mzer = default_minimizer(TRecipe)
model = Model(source.domain[id])
# TODO if source.options[:instr_broadening]
# TODO GFit.set_instr_response!(model[1], (l, f) -> instrumental_broadening(l, f, source.spectra[id].resolution))
# TODO end
println(logio(source), "\nFit continuum components...")
add_qso_continuum!(source, model)
add_host_galaxy!(source, model)
add_balmer_cont!(source, model)
bestfit = fit!(model, source.data[id], minimizer=mzer); show(logio(source), bestfit)
renorm_cont!(source, model)
freeze(model, :qso_cont)
haskey(model, :galaxy) && freeze(model, :galaxy)
haskey(model, :balmer) && freeze(model, :balmer)
evaluate!(model)
println(logio(source), "\nFit iron templates...")
model[:Iron] = SumReducer([])
model[:main] = SumReducer([:Continuum, :Iron])
add_iron_uv!( source, model)
add_iron_opt!(source, model)
evaluate!(model)
if length(model[:Iron].list) > 0
bestfit = fit!(model, source.data[id], minimizer=mzer); show(logio(source), bestfit)
haskey(model, :ironuv ) && freeze(model, :ironuv)
haskey(model, :ironoptbr) && freeze(model, :ironoptbr)
haskey(model, :ironoptna) && freeze(model, :ironoptna)
end
evaluate!(model)
println(logio(source), "\nFit known emission lines...")
add_emission_lines!(source, model)
guess_emission_lines_values!(source, model)
add_patch_functs!(source, model)
# Force Hg and Hb lines to have the same shape as Ha
model[:br_Ha].norm.low = 1.e-10 # avoid division by zero
@patch! begin
model[:na_Hg].norm = model[:br_Hg].norm * (model[:na_Ha].norm / model[:br_Ha].norm)
model[:na_Hg].voff = model[:na_Ha].voff
model[:na_Hg].fwhm = model[:na_Ha].fwhm
model[:bb_Hg].norm = model[:br_Hg].norm * (model[:bb_Ha].norm / model[:br_Ha].norm)
model[:bb_Hg].voff = model[:bb_Ha].voff
model[:bb_Hg].fwhm = model[:bb_Ha].fwhm
model[:br_Hg].voff = model[:br_Ha].voff
model[:br_Hg].fwhm = model[:br_Ha].fwhm
end
@patch! begin
model[:na_Hb].norm = model[:br_Hb].norm * (model[:na_Ha].norm / model[:br_Ha].norm)
model[:na_Hb].voff = model[:na_Ha].voff
model[:na_Hb].fwhm = model[:na_Ha].fwhm
model[:bb_Hb].norm = model[:br_Hb].norm * (model[:bb_Ha].norm / model[:br_Ha].norm)
model[:bb_Hb].voff = model[:bb_Ha].voff
model[:bb_Hb].fwhm = model[:bb_Ha].fwhm
model[:br_Hb].voff = model[:br_Ha].voff
model[:br_Hb].fwhm = model[:br_Ha].fwhm
end
bestfit = fit!(model, source.data[id], minimizer=mzer); show(logio(source), bestfit)
for lname in line_names
freeze(model, lname)
end
println(logio(source), "\nFit unknown emission lines...")
add_unknown_lines!(source, model)
println(logio(source), "\nLast run with all parameters free...")
thaw(model, :qso_cont)
(:galaxy in keys(model)) && thaw(model, :galaxy)
(:balmer in keys(model)) && thaw(model, :balmer)
(:ironuv in keys(model)) && thaw(model, :ironuv)
(:ironoptbr in keys(model)) && thaw(model, :ironoptbr)
(:ironoptna in keys(model)) && thaw(model, :ironoptna)
for lname in keys(source.line_names[id])
thaw(model, lname)
end
for j in 1:source.options[:n_unk]
cname = Symbol(:unk, j)
if model[cname].norm.val > 0
thaw(model, cname)
else
freeze(model, cname)
end
end
bestfit = fit!(model, source.data[id], minimizer=mzer)
if neglect_weak_features!(source, model)
println(logio(source), "\nRe-run fit...")
bestfit = fit!(model, source.data[id], minimizer=mzer)
end
println(logio(source))
show(logio(source), bestfit)
out = QSFit.QSFitResults(source, model, bestfit)
elapsed = time() - elapsed
println(logio(source), "\nElapsed time: $elapsed s")
QSFit.close_logio(source)
return out
end
Date,MJD start,dt,Telescope,Inst,Exp
20180306,58183.5,72,MDM Hiltner 2.4m,OSMOS,600
20180308,58185.5,74,MDM Hiltner 2.4m,OSMOS,1800
20180309,58186.5,75,MDM Hiltner 2.4m,OSMOS,2700
20180313,58190.0,79,Liverpool Telescope 2m,SPRAT,600
20180323,58200.5,89,APO 3.5m,DIS,300
20180423,58231.6,119,Las Cumbres 2m,FLOYDS,1800
20180424,58232.5,120,Shane 3m,Kast,1500
20180507,58245.6,133,Las Cumbres 2m,FLOYDS,1800
20180514,58252.5,140,Las Cumbres 2m,FLOYDS,1800
20180528,58266.5,153,Las Cumbres 2m,FLOYDS,1800
20180603,58272.6,159,Las Cumbres 2m,FLOYDS,1800
20180611,58280.6,167,Las Cumbres 2m,FLOYDS,1800
20180613,58282.5,169,APO 3.5m,DIS,600
20180624,58293.3,180,Las Cumbres 2m,FLOYDS,1800
20180628,58297.5,184,Las Cumbres 2m,FLOYDS,3600
20180706,58305.5,192,Las Cumbres 2m,FLOYDS,3600
20180716,58315.5,201,Keck,LRIS,900
20180717,58316.5,202,Las Cumbres 2m,FLOYDS,3600
20180727,58326.5,212,Las Cumbres 2m,FLOYDS,3600
20180811,58341.0,227,Hale200 inch,DBSP,1200
20180812,58342.4,228,Las Cumbres 2m,FLOYDS,3600
20180908,58369.3,254,Las Cumbres 2m,FLOYDS,3600
20181113,58435.2,319,Las Cumbres 2m,FLOYDS,3600
20180828,58358.4,244,HST,COS,2866
20180828,58358.5,244,HST,STIS,2740
20190319,0,119,Las Cumbres 2m,FLOYDS,1800
20190406,0,119,Las Cumbres 2m,FLOYDS,1800
20190519,0,119,Las Cumbres 2m,FLOYDS,1800