Loading Untitled.ipynb 0 → 100644 +0 −0 Original line number Diff line number Diff line run_pleiadi 0 → 100644 +34 −0 Original line number Diff line number Diff line #!/bin/bash #SBATCH --nodes=4 #SBATCH --ntasks-per-node=2 #SBATCH --cpus-per-task=18 #SBATCH --time=01:00:00 #SBATCH --job-name=dADP-test #SBATCH --account=ulearn #SBATCH --partition=pleiadi #SBATCH --output=out_pleiadi #SBATCH --error=err_pleiadi #SBATCH --mem=230G cd $SLURM_SUBMIT_DIR module restore dev_pleiadi source /u/ftomba/my_envs/dadac-dev/bin/activate make clean make export OMP_NUM_THREADS=${SLURM_CPUS_PER_TASK} export OMP_PLACES=cores export OMP_PROC_BIND=close #export PSM2_MQ_SENDREQS_MAX=268435456 #export PSM2_MQ_RECVREQS_MAX=268435456 rm bb/* #time mpirun -n ${SLURM_NTASKS} --map-by ppr:1:node:PE=${SLURM_CPUS_PER_TASK} main time mpirun -n ${SLURM_NTASKS} --map-by ppr:1:socket:PE=${SLURM_CPUS_PER_TASK} main #time mpirun -n ${SLURM_NTASKS} main #time python3 check.py src/tree/tree.c +1 −1 Original line number Diff line number Diff line Loading @@ -3515,7 +3515,7 @@ clusters_t Heuristic1(global_context_t *ctx, int verbose) */ DB_PRINT("rank %d proc %d\n", ctx -> mpi_rank, proc_points); //DB_PRINT("rank %d proc %d\n", ctx -> mpi_rank, proc_points); MPI_Allreduce(MPI_IN_PLACE, &completed, 1, MPI_INT, MPI_SUM, ctx -> mpi_communicator); completed = completed == ctx -> world_size ? 1 : 0; /* copy cluster idx into buffer */ Loading Loading
run_pleiadi 0 → 100644 +34 −0 Original line number Diff line number Diff line #!/bin/bash #SBATCH --nodes=4 #SBATCH --ntasks-per-node=2 #SBATCH --cpus-per-task=18 #SBATCH --time=01:00:00 #SBATCH --job-name=dADP-test #SBATCH --account=ulearn #SBATCH --partition=pleiadi #SBATCH --output=out_pleiadi #SBATCH --error=err_pleiadi #SBATCH --mem=230G cd $SLURM_SUBMIT_DIR module restore dev_pleiadi source /u/ftomba/my_envs/dadac-dev/bin/activate make clean make export OMP_NUM_THREADS=${SLURM_CPUS_PER_TASK} export OMP_PLACES=cores export OMP_PROC_BIND=close #export PSM2_MQ_SENDREQS_MAX=268435456 #export PSM2_MQ_RECVREQS_MAX=268435456 rm bb/* #time mpirun -n ${SLURM_NTASKS} --map-by ppr:1:node:PE=${SLURM_CPUS_PER_TASK} main time mpirun -n ${SLURM_NTASKS} --map-by ppr:1:socket:PE=${SLURM_CPUS_PER_TASK} main #time mpirun -n ${SLURM_NTASKS} main #time python3 check.py
src/tree/tree.c +1 −1 Original line number Diff line number Diff line Loading @@ -3515,7 +3515,7 @@ clusters_t Heuristic1(global_context_t *ctx, int verbose) */ DB_PRINT("rank %d proc %d\n", ctx -> mpi_rank, proc_points); //DB_PRINT("rank %d proc %d\n", ctx -> mpi_rank, proc_points); MPI_Allreduce(MPI_IN_PLACE, &completed, 1, MPI_INT, MPI_SUM, ctx -> mpi_communicator); completed = completed == ctx -> world_size ? 1 : 0; /* copy cluster idx into buffer */ Loading