Commit 64e4ba86 authored by Giovanni La Mura's avatar Giovanni La Mura
Browse files

Use ClusterOutputInfo in sequential logic of np_cluster

parent ffb886f7
Loading
Loading
Loading
Loading
+276 −402

File changed.

Preview size limit exceeded, changes collapsed.

+2 −2
Original line number Diff line number Diff line
@@ -458,13 +458,13 @@ public:
   *
   * \param sc: `ScattererConfiguration *` Pointer to a `ScattererConfiguration` instance.
   * \param gc: `GeometryConfiguration *` Pointer to a `GeometryConfiguration` instance.
   * \param mpidata: `mixMPI*` Pointer to a mixMPI instance.
   * \param mpidata: `const mixMPI*` Pointer to a mixMPI instance.
   * \param first_xi: `int` Index of the first scale in output (optional, default is 0).
   * \param xi_length: `int` Number of scales tobe included in output (optional, default is all).
   */   
  ClusterOutputInfo(
    ScattererConfiguration *sc, GeometryConfiguration *gc,
    mixMPI *mpidata, int first_xi = 0, int xi_length = 0
    const mixMPI *mpidata, int first_xi = 0, int xi_length = 0
  );

  /*! \brief `ClusterOutputInfo` instance destroyer.
+13 −13
Original line number Diff line number Diff line
@@ -57,7 +57,7 @@ using namespace std;
// >> ClusterOutputInfo CLASS IMPLEMENTATION <<
ClusterOutputInfo::ClusterOutputInfo(
  ScattererConfiguration *sc, GeometryConfiguration *gc,
  mixMPI *mpidata, int first_xi, int xi_length
  const mixMPI *mpidata, int first_xi, int xi_length
) {
  nsph = gc->number_of_spheres;
  li = gc->li;
@@ -621,7 +621,7 @@ int ClusterOutputInfo::insert(const ClusterOutputInfo &rhs) {
    memcpy(vec_dir_sas22 + offset, rhs.vec_dir_sas22, chunk_size * sizeof(dcomplex));
    memcpy(vec_dir_muls + 16 * offset, rhs.vec_dir_muls, 16 * chunk_size * sizeof(double));
    memcpy(vec_dir_mulslr + 16 * offset, rhs.vec_dir_mulslr, 16 * chunk_size * sizeof(double));
    // Insert vectors whose sizes depend on wavelengths and directions.
    // Insert vectors whose sizes depend on wavelengths and directions
    offset = (xi1 - 1) * ndirs;
    chunk_size = rhs.xi_block_size * ndirs;
    memcpy(vec_dir_sat11 + offset, rhs.vec_dir_sat11, chunk_size * sizeof(dcomplex));
@@ -728,7 +728,7 @@ int ClusterOutputInfo::write_hdf5(const std::string &output) {
int ClusterOutputInfo::write_legacy(const std::string &output) {
  const dcomplex cc0 = 0.0 + I * 0.0;
  int result = 0;
  FILE *p_outfile = fopen(output.c_str(), "a");
  FILE *p_outfile = fopen(output.c_str(), "w");
  if (p_outfile != NULL) {
    if (vec_jxi[0] == 1) {
      // Write the preamble of c_OCLU.
@@ -961,11 +961,11 @@ int ClusterOutputInfo::write_legacy(const std::string &output) {
      // Differential directional loop
      // Loop sorting (outer to inner) is:
      // THETA_INC - PHI_INC - THETA_SCAT - PHI_SCAT
      int dir_index = 0;
      for (int jth = 0; jth < _num_theta; jth++) {
	for (int jph = 0; jph < _num_phi; jph++) {
	  for (int jths = 0; jths < _num_thetas; jths++) {
	    for (int jphs = 0; jphs < _num_phis; jphs++) {
	      int dir_index = jphs + _num_phis * jths + _num_phis * _num_thetas * jph + _num_phis * _num_thetas * _num_phi * jth;
	      fprintf(
		      p_outfile, "********** JTH =%3d, JPH =%3d, JTHS =%3d, JPHS =%3d ********************\n",
		      jth + 1, jph + 1, jths + 1, jphs + 1
@@ -1103,7 +1103,7 @@ int ClusterOutputInfo::write_legacy(const std::string &output) {
	      fprintf(
		      p_outfile, "  RE(FSAC(1,1))/RE(TFSAS)=%15.7lE, IM(FSAC(1,1))/IM(TFSAS)=%15.7lE\n",
		      real(vec_dir_fsac11[sat_dir_index]) / real(vec_fsat[jxi]),
		      imag(vec_dir_fsac11[sat_dir_index]) / real(vec_fsat[jxi])
		      imag(vec_dir_fsac11[sat_dir_index]) / imag(vec_fsat[jxi])
	      );
	      fprintf(
		      p_outfile, "  QSCHU=%15.7lE, PSCHU=%15.7lE, S0MAG=%15.7lE\n",
@@ -1204,7 +1204,7 @@ int ClusterOutputInfo::write_legacy(const std::string &output) {
	      fprintf(
		      p_outfile, "  RE(FSAC(2,2))/RE(TFSAS)=%15.7lE, IM(FSAC(2,2))/IM(TFSAS)=%15.7lE\n",
		      real(vec_dir_fsac22[sat_dir_index]) / real(vec_fsat[jxi]),
		      imag(vec_dir_fsac22[sat_dir_index]) / real(vec_fsat[jxi])
		      imag(vec_dir_fsac22[sat_dir_index]) / imag(vec_fsat[jxi])
	      );
	      fprintf(
		      p_outfile, "  QSCHU=%15.7lE, PSCHU=%15.7lE, S0MAG=%15.7lE\n",
@@ -1262,7 +1262,6 @@ int ClusterOutputInfo::write_legacy(const std::string &output) {
			vec_dir_tqszc2[sat_dir_index]
		);
	      } // end goto190 switch

	      fprintf(
		      p_outfile, "  (RE(FSAC(1,1))-RE(FSAC(2,2)))/RE(FSAC(1,1))=%15.7lE\n",
		      (real(vec_dir_fsac11[sat_dir_index]) - real(vec_dir_fsac22[sat_dir_index])) / real(vec_dir_fsac11[sat_dir_index])
@@ -1273,7 +1272,7 @@ int ClusterOutputInfo::write_legacy(const std::string &output) {
	      );
	      fprintf(p_outfile, "  MULC\n");
	      for (int i = 0; i < 4; i++) {
		int mulc_dir_index = 16 * jxi * dir_index + 4 * i;
		int mulc_dir_index = 16 * jxi * ndirs + 16 * dir_index + 4 * i;
		fprintf(
			p_outfile, "        %15.7lE%15.7lE%15.7lE%15.7lE\n",
			vec_dir_mulc[mulc_dir_index],
@@ -1284,7 +1283,7 @@ int ClusterOutputInfo::write_legacy(const std::string &output) {
	      } // i mulc loop
	      fprintf(p_outfile, "  MULCLR\n");
	      for (int i = 0; i < 4; i++) {
		int mulc_dir_index = 16 * jxi * dir_index + 4 * i;
		int mulc_dir_index = 16 * jxi * ndirs + 16 * dir_index + 4 * i;
		fprintf(
			p_outfile, "        %15.7lE%15.7lE%15.7lE%15.7lE\n",
			vec_dir_mulclr[mulc_dir_index],
@@ -1322,6 +1321,7 @@ int ClusterOutputInfo::write_legacy(const std::string &output) {
		  );
		}
	      } // end of if (iavm != 0) switch
	      dir_index++;
	    } // jphs loop
	  } // jths loop
	} // jph loop
@@ -1447,9 +1447,9 @@ int ClusterOutputInfo::mpireceive(const mixMPI *mpidata, int pid) {
    MPI_Recv(vec_dir_muls + 16 * offset, 16 * chunk_size, MPI_C_DOUBLE_COMPLEX, 0, 10, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
    MPI_Recv(vec_dir_mulslr + 16 * offset, 16 * chunk_size, MPI_C_DOUBLE_COMPLEX, 0, 10, MPI_COMM_WORLD, MPI_STATUS_IGNORE);

    // Receive vectors whose sizes depend on directions.
    // Receive vectors whose sizes depend on directions and scales.
    MPI_Recv(&chunk_size, 1, MPI_INT32_T, pid, 10, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
    offset = (xi1 - 1) * chunk_size;
    offset = (xi1 - 1) * ndirs * chunk_size;
    MPI_Recv(vec_dir_sat11 + offset, chunk_size, MPI_C_DOUBLE_COMPLEX, 0, 10, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
    MPI_Recv(vec_dir_sat21 + offset, chunk_size, MPI_C_DOUBLE_COMPLEX, 0, 10, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
    MPI_Recv(vec_dir_sat12 + offset, chunk_size, MPI_C_DOUBLE_COMPLEX, 0, 10, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
@@ -1637,8 +1637,8 @@ int ClusterOutputInfo::mpisend(const mixMPI *mpidata) {
    MPI_Send(vec_dir_muls, 16 * chunk_size, MPI_C_DOUBLE_COMPLEX, 0, 10, MPI_COMM_WORLD);
    MPI_Send(vec_dir_mulslr, 16 * chunk_size, MPI_C_DOUBLE_COMPLEX, 0, 10, MPI_COMM_WORLD);

    // Send vectors whose sizes depend on directions.
    chunk_size = ndirs * xi_block_size;
    // Send vectors whose sizes depend on directions and scales.
    chunk_size = xi_block_size * ndirs;
    MPI_Send(&chunk_size, 1, MPI_INT32_T, 0, 10, MPI_COMM_WORLD);
    MPI_Send(vec_dir_sat11, chunk_size, MPI_C_DOUBLE_COMPLEX, 0, 10, MPI_COMM_WORLD);
    MPI_Send(vec_dir_sat21, chunk_size, MPI_C_DOUBLE_COMPLEX, 0, 10, MPI_COMM_WORLD);