Commit 063e6344 authored by Alice Donini's avatar Alice Donini
Browse files

add dl1 to dl2 scripts

parent 74e9668b
import os
from os.path import join
import pandas as pd
import numpy as np
from utils import (
get_runs_database,
get_db,
parse_arguments,
get_config,
create_DL1_list,
manage_submission,
)
# Argument parser
args = parse_arguments(description="DL1 to DL2 converter",
add_run=True,
add_job=True,
)
config_file = get_config(args.config)
database_file = get_db(config_file['db'])
# look into the db and give the run list
databaseRuns = get_runs_database(args, database_file)
final_run_list = pd.DataFrame(columns=['path'])
# if txt file with runs and nights is given, use that instead search in the database
if args.runlist is not None:
rl = np.genfromtxt(open(args.runlist, 'r'), usecols=(0,1), dtype=int)
runs = rl[:,0]
nights = rl[:,1]
# nights = []
# for run in rl:
# night = database_file.loc[run, 'day']
# night = int(night)
# if night in nights:
# pass
# else:
# nights.append(int(night))
for night in np.unique(nights):
run_list = create_DL1_list(args, config_file, runs, str(night))
final_run_list = pd.concat([final_run_list, run_list])
else:
for night in config_file['dl1_data']['night']:
run_list = create_DL1_list(args, config_file, databaseRuns, str(night))
final_run_list = pd.concat([final_run_list, run_list])
if args.verbose:
print("Final run list to be analysed:")
print(final_run_list)
# output directory
out_dir = config_file['base_dir'] + '/DL2/' + args.source_name
# path to model directory
models = config_file['path_models']
# check if output directory exists, if not, create it
if not (os.path.isdir(out_dir)):
try:
os.makedirs(out_dir, exist_ok=True)
print(f'Output directory {out_dir} created successfully.')
except OSError:
print(f'Output directory {out_dir} can not be created.')
else:
print(f'DL2 data will be saved in {out_dir}.')
# if txt file with runs and nights is given, use that instead search in the database
if args.runlist is not None:
for run in final_run_list.index:
night = final_run_list.loc[run, 'night']
dl1_file = final_run_list.loc[run, 'path']
path = night + '/' + config_file['dl1_data']['version'] + '/' + config_file['dl1_data']['cleaning']
if args.outdir is not None:
dl2_output_path = args.outdir
else:
dl2_output_path = join(out_dir, str(path))
dl2_cmd = f'lstchain_dl1_to_dl2 \
--input-file {dl1_file} \
--output-dir {dl2_output_path} \
--path-models {models}' \
+ ['', f' --config {args.config_analysis}'][args.config_analysis is not None]
# Send interactive, sbatch or dry run
if args.verbose:
print('\n')
print(dl2_cmd)
if not(args.dry):
if args.submit:
# create the script that will be submited and return the name of the script in the variable scriptname
scriptname = manage_submission(args, config_file, dl2_cmd, run, level="2")
os.system("sbatch " + scriptname)
else:
print('Interactive conversion of the DL1 to DL2.\n')
os.system(dl2_cmd)
else:
for run in final_run_list.index:
night = database_file.loc[run, 'day']
dl1_file = final_run_list.loc[run, 'path']
path = str(night) + '/' + config_file['dl1_data']['version'] + '/' + config_file['dl1_data']['cleaning']
if args.outdir is not None:
dl2_output_path = args.outdir
else:
dl2_output_path = join(out_dir, str(path))
dl2_cmd = (
f'lstchain_dl1_to_dl2 '
f'--input-file {dl1_file} '
f'--output-dir {dl2_output_path} '
f'--path-models {models} '
) \
+ ['', f' --config {args.config_analysis}'][args.config_analysis is not None]
# Send interactive, sbatch or dry run
if args.verbose:
print('\n')
print(dl2_cmd)
if not(args.dry):
if args.submit:
# create the script that will be submited and return the name of the script in the variable scriptname
scriptname = manage_submission(args, config_file, dl2_cmd, run, level="2")
os.system("sbatch " + scriptname)
else:
print('Interactive conversion of the DL1 to DL2.\n')
os.system(dl2_cmd)
#!/bin/sh
#SBATCH -p short,long
#SBATCH -J jobname
#SBATCH --mem=70g
#SBATCH -N 1
#SBATCH --exclusive
#SBATCH -o logfile
ulimit -l unlimited
ulimit -s unlimited
ulimit -a
# Directory where job files are written
jobmanager: ../jobmanager
# Database file name
db: database.csv
# LST real data path (don't modify it)
data_folder: /fefs/aswg/data/real
# path to main data folder of the user
# change it accordingly to your working env
base_dir: /fefs/aswg/workspace/alice.donini/Analysis/data
# Path to personal directory where output data will be saved.
# Uncomment and modify in case you want to use a non standard path
#output_folder: ../DL2/Crab
# Directory where config files are stored
#config_folder: ./
# Path to trained RF files
path_models: ../models
# Values for automatic selection of DL1 data
dl1_data:
DL1_dir: /fefs/aswg/data/real/DL1 # path to DL1 directory
night: [20210911, 20210912] # day(s) of observation (more than one is possible)
version: v0.9.1 # v0.7.3, v0.8.4, v0.9, v0.9.1
cleaning: tailcut84
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment