Commit d7bff9b0 authored by Mulas, Giacomo's avatar Mulas, Giacomo
Browse files

First working implementation of parallelism in scales over MPI

parent cf88c51b
Loading
Loading
Loading
Loading
+369 −205
Original line number Diff line number Diff line
@@ -12,6 +12,11 @@
#ifdef _OPENMP
#include <omp.h>
#endif
#ifdef USE_MPI
#ifndef MPI_VERSION
#include <mpi.h>
#endif
#endif

#ifndef INCLUDE_TYPES_H_
#include "../include/types.h"
@@ -61,14 +66,16 @@ int cluster_jxi488_cycle(int jxi488, ScattererConfiguration *sconf, GeometryConf
 *  \param data_file: `string` Name of the input data file.
 *  \param output_path: `string` Directory to write the output files in.
 */
void cluster(const string& config_file, const string& data_file, const string& output_path) {
void cluster(const string& config_file, const string& data_file, const string& output_path, const mixMPI *mpidata) {
  chrono::time_point<chrono::high_resolution_clock> t_start = chrono::high_resolution_clock::now();
  chrono::duration<double> elapsed;
  string message;
  string timing_name = output_path + "/c_timing.log";
  string timing_name = output_path + "/c_timing_mpi"+ to_string(mpidata->rank) +".log";
  FILE *timing_file = fopen(timing_name.c_str(), "w");
  Logger *time_logger = new Logger(LOG_DEBG, timing_file);
  Logger *logger = new Logger(LOG_INFO);
  // the following only happens on MPI process 0
  if (mpidata->rank == 0) {
    logger->log("INFO: making legacy configuration...", LOG_INFO);
    ScattererConfiguration *sconf = NULL;
    try {
@@ -100,7 +107,7 @@ void cluster(const string& config_file, const string& data_file, const string& o
      ScatteringAngles *p_scattering_angles = new ScatteringAngles(gconf);
      double wp = sconf->wp;
      FILE *output = fopen((output_path + "/c_OCLU").c_str(), "w");
    ClusterIterationData *cid = new ClusterIterationData(gconf, sconf);
      ClusterIterationData *cid = new ClusterIterationData(gconf, sconf, mpidata);
      const int ndi = cid->c4->nsph * cid->c4->nlim;
      np_int ndit = 2 * ndi;
      logger->log("INFO: Size of matrices to invert: " + to_string((int64_t)ndit) + " x " + to_string((int64_t)ndit) +".\n");
@@ -187,6 +194,15 @@ void cluster(const string& config_file, const string& data_file, const string& o
	logger->log(message);
	time_logger->log(message);

	// here go the calls that send data to be duplicated on other MPI processes from process 0 to others, using MPI broadcasts, but only if MPI is actually used
#ifdef MPI_VERSION
	if (mpidata->mpirunning) {
	  gconf->mpibcast(mpidata);
	  sconf->mpibcast(mpidata);	    
	  cid->mpibcast(mpidata);
	  p_scattering_angles->mpibcast(mpidata);
	}	
#endif
	// Create this variable and initialise it with a default here, so that it is defined anyway, with or without OpenMP support enabled
	int ompnumthreads = 1;

@@ -211,9 +227,9 @@ void cluster(const string& config_file, const string& data_file, const string& o
	  } else {
	    // this is not thread 0, so do create fresh copies of all local variables
	    cid_2 = new ClusterIterationData(*cid);
	  output_2 = fopen((output_path + "/c_OCLU_" + to_string(myompthread)).c_str(), "w");
	    output_2 = fopen((output_path + "/c_OCLU_" + to_string(mpidata->rank) + "_" + to_string(myompthread)).c_str(), "w");
	    tppoanp_2 = new fstream;
	  tppoanp_2->open((output_path + "/c_TPPOAN_" + to_string(myompthread)).c_str(), ios::out | ios::binary);
	    tppoanp_2->open((output_path + "/c_TPPOAN_" + to_string(mpidata->rank) + "_" + to_string(myompthread)).c_str(), ios::out | ios::binary);
	  }
	  fstream &tppoan_2 = *tppoanp_2;
	  // make sure all threads align here: I don't want the following loop to accidentally start for thread 0, possibly modifying some variables before they are copied by all other threads
@@ -221,7 +237,7 @@ void cluster(const string& config_file, const string& data_file, const string& o
	  if (myompthread==0) logger->log("Syncing OpenMP threads and starting the loop on wavelengths\n");
	  // ok, now I can actually start the parallel calculations
#pragma omp for
	for (jxi488 = 2; jxi488 <= nxi; jxi488++) {
	  for (jxi488 = cid_2->firstxi; jxi488 <= cid_2->lastxi; jxi488++) {
	    int jer = cluster_jxi488_cycle(jxi488, sconf, gconf, p_scattering_angles, cid_2, output_2, output_path, *tppoanp_2, logger);
	  }

@@ -245,11 +261,11 @@ void cluster(const string& config_file, const string& data_file, const string& o
	  // thread 0 already wrote on global files, skip it and take care of appending the others
	  for (int ri = 1; ri < ompnumthreads; ri++) {
	    // Giovanni, please add here in this loop code to reopen the temporary files, reread them and append them respectively to the global output and tppoan, before closing them
	  string partial_file_name = output_path + "/c_OCLU_" + to_string(ri);
	  message = "Copying ASCII output of thread " + to_string(ri) + " of " + to_string(ompnumthreads - 1) + "... ";
	    string partial_file_name = output_path + "/c_OCLU_" + to_string(mpidata->rank) + "_" + to_string(ri);
	    message = "Copying ASCII output in MPI process " + to_string(mpidata->rank) + " of thread " + to_string(ri) + " of " + to_string(ompnumthreads - 1) + "... ";
	    logger->log(message, LOG_DEBG);
	    FILE *partial_output = fopen(partial_file_name.c_str(), "r");
	  char c = fgetc(partial_output);
	    int c = fgetc(partial_output);
	    while (c != EOF) {
	      fputc(c, output);
	      c = fgetc(partial_output);
@@ -257,8 +273,8 @@ void cluster(const string& config_file, const string& data_file, const string& o
	    fclose(partial_output);
	    remove(partial_file_name.c_str());
	    logger->log("done.\n", LOG_DEBG);
	  partial_file_name = output_path + "/c_TPPOAN_" + to_string(ri);
	  message = "Copying binary output of thread " + to_string(ri) + " of " + to_string(ompnumthreads - 1) + "... ";
	    partial_file_name = output_path + "/c_TPPOAN_" + to_string(mpidata->rank) + "_" + to_string(ri);
	    message = "Copying binary output in MPI process " + to_string(mpidata->rank) + " of thread " + to_string(ri) + " of " + to_string(ompnumthreads - 1) + "... ";
	    logger->log(message, LOG_DEBG);
	    fstream partial_tppoan;
	    partial_tppoan.open(partial_file_name.c_str(), ios::in | ios::binary);
@@ -274,6 +290,38 @@ void cluster(const string& config_file, const string& data_file, const string& o
	    logger->log("done.\n", LOG_DEBG);
	  }
	}
#endif
	// here go the code to append the files written in MPI processes > 0 to the ones on MPI process 0
#ifdef MPI_VERSION
	if (mpidata->mpirunning) {
	  // only go through this if MPI has been actually used
	  for (int rr=1; rr<mpidata->nprocs; rr++) {
	    // get the data from process rr
	    // how many openmp threads did process rr use?
	    int remotethreads;
	    MPI_Recv(&remotethreads, 1, MPI_INT, rr, 1, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
	    for (int ri=0; ri<remotethreads; ri++) {
	      // first get the ASCII local file
	      int c = 0;
	      MPI_Recv(&c, 1, MPI_INT, rr, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
	      while (c != EOF) {
		fputc(c, output);
		MPI_Recv(&c, 1, MPI_INT, rr, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
	      }
	      // now get the binary local file
	      long buffer_size = 0;
	      // get the size of the buffer
	      MPI_Recv(&buffer_size, 1, MPI_LONG, rr, 1, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
	      // allocate the bufer
	      char *binary_buffer = new char[buffer_size];
	      // actually receive the buffer
	      MPI_Recv(binary_buffer, buffer_size, MPI_CHAR, rr, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
	      // we can write it to disk
	      tppoan.write(binary_buffer, buffer_size);
	      delete[] binary_buffer;
	    }
	  }
	}
#endif
	tppoanp->close();
	delete tppoanp;
@@ -297,6 +345,122 @@ void cluster(const string& config_file, const string& data_file, const string& o
    logger->log(message);
    logger->log("Finished: output written to " + output_path + "/c_OCLU\n");
    time_logger->log(message);
  }

#ifdef MPI_VERSION
  else {
    // here go the code for MPI processes other than 0
    // copy gconf, sconf, cid and p_scattering_angles from MPI process 0
    GeometryConfiguration *gconf = new GeometryConfiguration(mpidata);
    ScattererConfiguration *sconf = new ScattererConfiguration(mpidata);
    ClusterIterationData *cid = new ClusterIterationData(mpidata);
    ScatteringAngles *p_scattering_angles = new ScatteringAngles(mpidata);
    // open separate files for other MPI processes
    // File *output = fopen((output_path + "/c_OCLU_mpi"+ to_string(mpidata->rank)).c_str(), "w");
    // fstream *tppoanp = new fstream;
    // fstream &tppoan = *tppoanp;
    // string tppoan_name = output_path + "/c_TPPOAN_mpi"+ to_string(mpidata->rank);
    // tppoan.open(tppoan_name.c_str(), ios::out | ios::binary);
    // Create this variable and initialise it with a default here, so that it is defined anyway, with or without OpenMP support enabled
    int ompnumthreads = 1;

#pragma omp parallel
    {
      // Create and initialise this variable here, so that if OpenMP is enabled it is local to the thread, and if OpenMP is not enabled it has a well-defiled value anyway
      int myompthread = 0;
#ifdef _OPENMP
      // If OpenMP is enabled, give actual values to myompthread and ompnumthreads, and open thread-local output files
      myompthread = omp_get_thread_num();
      if (myompthread == 0) ompnumthreads = omp_get_num_threads();
#endif
      // To test parallelism, I will now start feeding this function with "clean" copies of the parameters, so that they will not be changed by previous iterations, and each one will behave as the first one. Define all (empty) variables here, so they have the correct scope, then they get different definitions depending on thread number
      ClusterIterationData *cid_2 = NULL;
      FILE *output_2 = NULL;
      fstream *tppoanp_2 = NULL;
      // for threads other than the 0, create distinct copies of all relevant data, while for thread 0 just define new references / pointers to the original ones
      if (myompthread == 0) {
	cid_2 = cid;
	// output_2 = output;
	// tppoanp_2 = tppoanp;
      } else {
	// this is not thread 0, so do create fresh copies of all local variables
	cid_2 = new ClusterIterationData(*cid);
      }
      output_2 = fopen((output_path + "/c_OCLU_" + to_string(mpidata->rank) + "_" + to_string(myompthread)).c_str(), "w");
      tppoanp_2 = new fstream;
      tppoanp_2->open((output_path + "/c_TPPOAN_" + to_string(mpidata->rank) + "_" + to_string(myompthread)).c_str(), ios::out | ios::binary);
      fstream &tppoan_2 = *tppoanp_2;
      // make sure all threads align here: I don't want the following loop to accidentally start for thread 0, possibly modifying some variables before they are copied by all other threads
#pragma omp barrier
      if (myompthread==0) logger->log("Syncing OpenMP threads and starting the loop on wavelengths\n");
      // ok, now I can actually start the parallel calculations
#pragma omp for
      for (int jxi488 = cid_2->firstxi; jxi488 <= cid_2->lastxi; jxi488++) {
	int jer = cluster_jxi488_cycle(jxi488, sconf, gconf, p_scattering_angles, cid_2, output_2, output_path, *tppoanp_2, logger);
      }

#pragma omp barrier
      // only threads different from 0 have to free local copies of variables
      if (myompthread != 0) {
	delete cid_2;
      }
      fclose(output_2);
      tppoanp_2->close();
      delete tppoanp_2;
#pragma omp barrier
      {
	message = "INFO: Closing thread-local output files of thread " + to_string(myompthread) + " and syncing threads.\n";
	logger->log(message);
      }
    } // closes pragma omp parallel
#pragma omp barrier
    {
      // tell MPI process 0 how many threads we have on this process (not necessarily the same across all processes)
      MPI_Send(&ompnumthreads, 1, MPI_INT, 0, 1, MPI_COMM_WORLD);
      // reopen local files, send them all to MPI process 0
      for (int ri = 0; ri < ompnumthreads; ri++) {
	// Giovanni, please add here in this loop code to reopen the temporary files, reread them and append them respectively to the global output and tppoan, before closing them
	string partial_file_name = output_path + "/c_OCLU_" + to_string(mpidata->rank) + "_" + to_string(ri);
	message = "Copying ASCII output in MPI process " + to_string(mpidata->rank) + " of thread " + to_string(ri) + " of " + to_string(ompnumthreads - 1) + "... ";
	logger->log(message, LOG_DEBG);
	FILE *partial_output = fopen(partial_file_name.c_str(), "r");
	int c = 0;
	while (c != EOF) {
	  c = fgetc(partial_output);
	  MPI_Send(&c, 1, MPI_INT, 0, 0, MPI_COMM_WORLD);
	}
	fclose(partial_output);
	remove(partial_file_name.c_str());
	logger->log("done.\n", LOG_DEBG);
	partial_file_name = output_path + "/c_TPPOAN_" + to_string(mpidata->rank) + "_" + to_string(ri);
	message = "Copying binary output in MPI process " + to_string(mpidata->rank) + " of thread " + to_string(ri) + " of " + to_string(ompnumthreads - 1) + "... ";
	logger->log(message, LOG_DEBG);
	fstream partial_tppoan;
	partial_tppoan.open(partial_file_name.c_str(), ios::in | ios::binary);
	partial_tppoan.seekg(0, ios::end);
	long buffer_size = partial_tppoan.tellg();
	char *binary_buffer = new char[buffer_size];
	partial_tppoan.seekg(0, ios::beg);
	partial_tppoan.read(binary_buffer, buffer_size);
	// tell MPI process 0 how large is the buffer
	MPI_Send(&buffer_size, 1, MPI_LONG, 0, 1, MPI_COMM_WORLD);
	// actually send the buffer
	MPI_Send(binary_buffer, buffer_size, MPI_CHAR, 0, 0, MPI_COMM_WORLD);
	// tppoan.write(binary_buffer, buffer_size);
	partial_tppoan.close();
	delete[] binary_buffer;
	remove(partial_file_name.c_str());
	logger->log("done.\n", LOG_DEBG);
      }
    }
    // Clean memory
    delete cid;
    delete p_scattering_angles;
    delete sconf;
    delete gconf;

  }
#endif
  fclose(timing_file);
  delete time_logger;
  delete logger;
+18 −2
Original line number Diff line number Diff line
@@ -29,9 +29,13 @@
#include "../include/Configuration.h"
#endif

#ifndef INCLUDE_COMMONS_H_
#include "../include/Commons.h"
#endif

using namespace std;

extern void cluster(const string& config_file, const string& data_file, const string& output_path);
extern void cluster(const string& config_file, const string& data_file, const string& output_path, const mixMPI *mpidata);

/*! \brief Main program entry point.
 *
@@ -46,6 +50,14 @@ extern void cluster(const string& config_file, const string& data_file, const st
 * \return result: `int` An exit code passed to the OS (0 for succesful execution).
 */
int main(int argc, char **argv) {
#ifdef MPI_VERSION
	int ierr = MPI_Init(&argc, &argv);
	// create and initialise class with essential MPI data
	mixMPI *mpidata = new mixMPI(MPI_COMM_WORLD);
#else
	// create a the class with dummy data if we are not using MPI at all
	mixMPI *mpidata = new mixMPI();
#endif
  	string config_file = "../../test_data/cluster/DEDFB";
	string data_file = "../../test_data/cluster/DCLU";
	string output_path = ".";
@@ -54,6 +66,10 @@ int main(int argc, char **argv) {
		data_file = string(argv[2]);
		output_path = string(argv[3]);
	}
	cluster(config_file, data_file, output_path);
	cluster(config_file, data_file, output_path, mpidata);
#ifdef MPI_VERSION
	MPI_Finalize();
#endif
	delete mpidata;
	return 0;
}
+183 −2
Original line number Diff line number Diff line
@@ -19,6 +19,12 @@
#ifndef INCLUDE_COMMONS_H_
#define INCLUDE_COMMONS_H_

#ifdef USE_MPI
#include <mpi.h>
#endif

class mixMPI;

/*! \brief Representation of the FORTRAN C1 common blocks.
 *
 * C1 common blocks are used to store vector field expansions and geometric
@@ -110,6 +116,20 @@ public:
   */
  C1(const C1& rhs);

#ifdef MPI_VERSION
  /*! \brief C1 instance constructor copying all contents off MPI broadcast from MPI process 0
   *
   * \param mpidata: `mixMPI *` pointer to MPI data structure.
   */
  C1(const mixMPI *mpidata);

  /*! \brief send C1 instance from MPI process 0 via MPI broadcasts to all other processes
   *
   * \param mpidata: `mixMPI *` pointer to MPI data structure.
   */
  void mpibcast(const mixMPI *mpidata);
#endif

  //! \brief C1 instance destroyer.
  ~C1();
};
@@ -153,6 +173,21 @@ public:

  //! \brief C2 instance destroyer.
  ~C2();

#ifdef MPI_VERSION
  /*! \brief C2 instance constructor copying all contents off MPI broadcast from MPI process 0
   *
   * \param mpidata: `mixMPI *` pointer to MPI data structure.
   */
  C2(const mixMPI *mpidata);

  /*! \brief send C2 instance from MPI process 0 via MPI broadcasts to all other processes
   *
   * \param mpidata: `mixMPI *` pointer to MPI data structure.
   */
  void mpibcast(const mixMPI *mpidata);
#endif

};

/*! \brief Representation of the FORTRAN C3 blocks.
@@ -183,6 +218,21 @@ public:
  /*! \brief C3 instance destroyer.
   */
  ~C3();

#ifdef MPI_VERSION
  /*! \brief C3 instance constructor copying all contents off MPI broadcast from MPI process 0
   *
   * \param mpidata: `mixMPI *` pointer to MPI data structure.
   */
  C3(const mixMPI *mpidata);

  /*! \brief send C3 instance from MPI process 0 via MPI broadcasts to all other processes
   *
   * \param mpidata: `mixMPI *` pointer to MPI data structure.
   */
  void mpibcast(const mixMPI *mpidata);
#endif

};

/*! \brief Representation of the FORTRAN C4 blocks.
@@ -229,6 +279,21 @@ public:
  /*! \brief C4 instance destroyer.
   */
  ~C4();

#ifdef MPI_VERSION
  /*! \brief C4 instance constructor copying all contents off MPI broadcast from MPI process 0
   *
   * \param mpidata: `mixMPI *` pointer to MPI data structure.
   */
  C4(const mixMPI *mpidata);

  /*! \brief send C4 instance from MPI process 0 via MPI broadcasts to all other processes
   *
   * \param mpidata: `mixMPI *` pointer to MPI data structure.
   */
  void mpibcast(const mixMPI *mpidata);
#endif

};

/*! \brief Vectors and matrices that are specific to cluster C1 blocks.
@@ -317,6 +382,21 @@ public:

  //! \brief C1_AddOns instance destroyer.
  ~C1_AddOns();

#ifdef MPI_VERSION
  /*! \brief C1_AddOns instance constructor copying all contents off MPI broadcast from MPI process 0
   *
   * \param mpidata: `mixMPI *` pointer to MPI data structure.
   */
  C1_AddOns(const mixMPI *mpidata);

  /*! \brief send C1_AddOns instance from MPI process 0 via MPI broadcasts to all other processes
   *
   * \param mpidata: `mixMPI *` pointer to MPI data structure.
   */
  void mpibcast(const mixMPI *mpidata);
#endif

};

/*! \brief Representation of the FORTRAN C6 blocks.
@@ -343,6 +423,21 @@ public:
  /*! \brief C6 instance destroyer.
   */
  ~C6();

#ifdef MPI_VERSION
  /*! \brief C6 instance constructor copying all contents off MPI broadcast from MPI process 0
   *
   * \param mpidata: `mixMPI *` pointer to MPI data structure.
   */
  C6(const mixMPI *mpidata);

  /*! \brief send C6 instance from MPI process 0 via MPI broadcasts to all other processes
   *
   * \param mpidata: `mixMPI *` pointer to MPI data structure.
   */
  void mpibcast(const mixMPI *mpidata);
#endif

};

/*! \brief Representation of the FORTRAN C9 blocks.
@@ -384,6 +479,51 @@ public:
  /*! \brief C9 instance destroyer.
   */
  ~C9();

#ifdef MPI_VERSION
  /*! \brief C9 instance constructor copying all contents off MPI broadcast from MPI process 0
   *
   * \param mpidata: `mixMPI *` pointer to MPI data structure.
   */
  C9(const mixMPI *mpidata);

  /*! \brief send C9 instance from MPI process 0 via MPI broadcasts to all other processes
   *
   * \param mpidata: `mixMPI *` pointer to MPI data structure.
   */
  void mpibcast(const mixMPI *mpidata);
#endif

};

/*! \brief structure with essential MPI data.
 */
class mixMPI {
public:
  //! \brief was MPI initialised?
  bool mpirunning;
  //! \brief MPI rank
  int rank;
  //! \brief MPI nprocs
  int nprocs;

  /*! \brief empty mixMPI instance constructor.
   */
  mixMPI();

  /*! \brief mixMPI instance constructor from an actual MPI communicator.
   */
#ifdef MPI_VERSION
  mixMPI(MPI_Comm comm);
#endif
  
  /*! \brief mixMPI instance constructor copying its contents from a preexisting object.
   */
  mixMPI(const mixMPI& rhs);

  /*! \brief mixMPI instance destroyer.
   */
  ~mixMPI();
};

/*! \brief A data structure representing the information used for a single scale
@@ -455,12 +595,32 @@ public:
  //! \brief Wave number.
  double wn;
  double xip;
  int number_of_scales;
  int xiblock;
  int firstxi;
  int lastxi;

  ClusterIterationData(GeometryConfiguration *gconf, ScattererConfiguration *sconf);
  ClusterIterationData(GeometryConfiguration *gconf, ScattererConfiguration *sconf, const mixMPI *mpidata);
  
  ClusterIterationData(const ClusterIterationData& rhs);

#ifdef MPI_VERSION
  ClusterIterationData(const mixMPI *mpidata);

  /*! \brief Broadcast over MPI the ClusterIterationData instance from MPI process 0 to all others.
   *
   * When using MPI, the initial ClusterIterationData instance created by MPI process 0
   * needs to be replicated on all other processes. This function sends it using
   * MPI broadcast calls. The MPI broadcast calls in this function must match those
   * in the constructor using the mixMPI pointer.
   *
   * \param mpidata: `mixMPI *` Pointer to the mpi structure used to do the MPI broadcast.
   */
  void mpibcast(const mixMPI *mpidata);
#endif

  ~ClusterIterationData();

};

/*! \brief A data structure representing the angles to be evaluated in the problem.
@@ -562,6 +722,27 @@ public:
   * \param rhs: `ScatteringAngles&` Reference to the ScatteringAngles object to be copied.
   */
  ScatteringAngles(const ScatteringAngles &rhs);

#ifdef MPI_VERSION
  /*! \brief ScatteringAngles copy from MPI broadcast.
   *
   * \param mpidata: `mixMPI *` Pointer to the mpidata instance used to copy the data.
   */
  ScatteringAngles(const mixMPI *mpidata);

    /*! \brief Broadcast over MPI the ScatteringAngles instance from MPI process 0 to all others.
   *
   * When using MPI, the initial ScatteringAngles instance created by MPI process 0
   * needs to be replicated on all other processes. This function sends it using
   * MPI broadcast calls. The MPI broadcast calls in this function must match those
   * in the constructor using the mixMPI pointer.
   *
   * \param mpidata: `mixMPI *` Pointer to the mpi structure used to do the MPI broadcast.
   */
  void mpibcast(const mixMPI *mpidata);
#endif

};


#endif
+42 −0
Original line number Diff line number Diff line
@@ -30,6 +30,8 @@
#ifndef INCLUDE_CONFIGURATION_H_
#define INCLUDE_CONFIGURATION_H_

class mixMPI;

/**
 * \brief A class to represent the configuration of the scattering geometry.
 *
@@ -199,6 +201,25 @@ public:
   */
  GeometryConfiguration(const GeometryConfiguration& rhs);

#ifdef MPI_VERSION
  /*! \brief Build a scattering geometry configuration structure copying it via MPI from MPI process 0.
   *
   * \param rhs: `mixMPI *` pointer to the mpidata instance to use for the MPI communications.
   */
  GeometryConfiguration(const mixMPI *mpidata);

  /*! \brief Broadcast over MPI the GeometryConfiguration instance from MPI process 0 to all others.
   *
   * When using MPI, the initial GeometryConfiguration instance created by MPI process 0
   * needs to be replicated on all other processes. This function sends it using
   * MPI broadcast calls. The MPI broadcast calls in this function must match those
   * in the constructor using the mixMPI pointer.
   *
   * \param mpidata: `mixMPI *` Pointer to the mpi structure used to do the MPI broadcast.
   */
  void mpibcast(const mixMPI *mpidata);
#endif

  /*! \brief Destroy a GeometryConfiguration instance.
   */
  ~GeometryConfiguration();
@@ -245,6 +266,7 @@ public:
   * \return scale: `double` The Z coordinate of the requested sphere.
   */
  double get_sph_z(int index) { return _sph_z[index]; }

};

/**
@@ -402,6 +424,25 @@ public:
   */
  ScattererConfiguration(const ScattererConfiguration& rhs);

#ifdef MPI_VERSION
  /*! \brief Build a scatterer configuration structure copying it via MPI from MPI process 0.
   *
   * \param rhs: `mixMPI *` pointer to the mpidata instance to use for the MPI communications.
   */
  ScattererConfiguration(const mixMPI *mpidata);

  /*! \brief Broadcast over MPI the ScattererConfiguration instance from MPI process 0 to all others.
   *
   * When using MPI, the initial ScattererConfiguration instance created by MPI process 0
   * needs to be replicated on all other processes. This function sends it using
   * MPI broadcast calls. The MPI broadcast calls in this function must match those
   * in the constructor using the mixMPI pointer.
   *
   * \param mpidata: `mixMPI *` Pointer to the mpi structure used to do the MPI broadcast.
   */
  void mpibcast(const mixMPI *mpidata);
#endif

  /*! \brief Destroy a scatterer configuration instance.
   */
  ~ScattererConfiguration();
@@ -561,6 +602,7 @@ public:
   * \return result: `bool` True, if the two instances are equal, false otherwise.
   */
  bool operator ==(const ScattererConfiguration &other);

};

#endif
+669 −7

File changed.

Preview size limit exceeded, changes collapsed.

Loading