import os from os.path import join import pandas as pd import numpy as np from utils import ( get_runs_database, get_db, parse_arguments, get_config, create_DL1_list, manage_submission, ) # Argument parser args = parse_arguments(description="DL1 to DL2 converter", add_run=True, add_job=True, ) config_file = get_config(args.config) database_file = get_db(config_file['db']) # look into the db and give the run list databaseRuns = get_runs_database(args, database_file) final_run_list = pd.DataFrame(columns=['path']) # if txt file with runs and nights is given, use that instead search in the database if args.runlist is not None: rl = np.genfromtxt(open(args.runlist, 'r'), usecols=(0,1), dtype=int) runs = rl[:,0] nights = rl[:,1] # nights = [] # for run in rl: # night = database_file.loc[run, 'day'] # night = int(night) # if night in nights: # pass # else: # nights.append(int(night)) for night in np.unique(nights): run_list = create_DL1_list(args, config_file, runs, str(night)) final_run_list = pd.concat([final_run_list, run_list]) else: for night in config_file['dl1_data']['night']: run_list = create_DL1_list(args, config_file, databaseRuns, str(night)) final_run_list = pd.concat([final_run_list, run_list]) if args.verbose: print("Final run list to be analysed:") print(final_run_list) # output directory out_dir = config_file['base_dir'] + '/DL2/' + args.source_name # path to model directory models = config_file['path_models'] # check if output directory exists, if not, create it if not (os.path.isdir(out_dir)): try: os.makedirs(out_dir, exist_ok=True) print(f'Output directory {out_dir} created successfully.') except OSError: print(f'Output directory {out_dir} can not be created.') else: print(f'DL2 data will be saved in {out_dir}.') # if txt file with runs and nights is given, use that instead search in the database if args.runlist is not None: for run in final_run_list.index: night = final_run_list.loc[run, 'night'] dl1_file = final_run_list.loc[run, 'path'] path = night + '/' + config_file['dl1_data']['version'] + '/' + config_file['dl1_data']['cleaning'] if args.outdir is not None: dl2_output_path = args.outdir else: dl2_output_path = join(out_dir, str(path)) dl2_cmd = f'lstchain_dl1_to_dl2 \ --input-file {dl1_file} \ --output-dir {dl2_output_path} \ --path-models {models}' \ + ['', f' --config {args.config_analysis}'][args.config_analysis is not None] # Send interactive, sbatch or dry run if args.verbose: print('\n') print(dl2_cmd) if not(args.dry): if args.submit: # create the script that will be submited and return the name of the script in the variable scriptname scriptname = manage_submission(args, config_file, dl2_cmd, run, level="2") os.system("sbatch " + scriptname) else: print('Interactive conversion of the DL1 to DL2.\n') os.system(dl2_cmd) else: for run in final_run_list.index: night = database_file.loc[run, 'day'] dl1_file = final_run_list.loc[run, 'path'] path = str(night) + '/' + config_file['dl1_data']['version'] + '/' + config_file['dl1_data']['cleaning'] if args.outdir is not None: dl2_output_path = args.outdir else: dl2_output_path = join(out_dir, str(path)) dl2_cmd = ( f'lstchain_dl1_to_dl2 ' f'--input-file {dl1_file} ' f'--output-dir {dl2_output_path} ' f'--path-models {models} ' ) \ + ['', f' --config {args.config_analysis}'][args.config_analysis is not None] # Send interactive, sbatch or dry run if args.verbose: print('\n') print(dl2_cmd) if not(args.dry): if args.submit: # create the script that will be submited and return the name of the script in the variable scriptname scriptname = manage_submission(args, config_file, dl2_cmd, run, level="2") os.system("sbatch " + scriptname) else: print('Interactive conversion of the DL1 to DL2.\n') os.system(dl2_cmd)