Commit 3752471c authored by Giovanni La Mura's avatar Giovanni La Mura
Browse files

Rewind ASCII files after getting their size and skip EOF character transmission

parent 287269a5
Loading
Loading
Loading
Loading
+43 −37
Original line number Diff line number Diff line
@@ -13,7 +13,7 @@
   limitations under the License.
 */

/*! \file cluster.cpp
/*! \file cluster.cp
 *
 * \brief Implementation of the calculation for a cluster of spheres.
 */
@@ -82,6 +82,7 @@ int cluster_jxi488_cycle(int jxi488, ScattererConfiguration *sconf, GeometryConf
void cluster(const string& config_file, const string& data_file, const string& output_path, const mixMPI *mpidata) {
  chrono::time_point<chrono::high_resolution_clock> t_start = chrono::high_resolution_clock::now();
  chrono::duration<double> elapsed;
  string message;
  string timing_name = output_path + "/c_timing_mpi"+ to_string(mpidata->rank) +".log";
  FILE *timing_file = fopen(timing_name.c_str(), "w");
  Logger *time_logger = new Logger(LOG_DEBG, timing_file);
@@ -271,12 +272,8 @@ void cluster(const string& config_file, const string& data_file, const string& o
#pragma omp barrier
	{
	  // thread 0 already wrote on global files, skip it and take care of appending the others
	  for (int ri = 0; ri < ompnumthreads; ri++) {
	    // still, we need to remove all c_OCLU_RANK_0 files
	  for (int ri = 1; ri < ompnumthreads; ri++) {
	    string partial_file_name = output_path + "/c_OCLU_" + to_string(mpidata->rank) + "_" + to_string(ri);
	    if (ri == 0) {
	      remove(partial_file_name.c_str());
	    } else {
	    string message = "Copying ASCII output in MPI process " + to_string(mpidata->rank) + " of thread " + to_string(ri) + " of " + to_string(ompnumthreads - 1) + "... ";
	    logger->log(message, LOG_DEBG);
	    FILE *partial_output = fopen(partial_file_name.c_str(), "r");
@@ -305,7 +302,6 @@ void cluster(const string& config_file, const string& data_file, const string& o
	    logger->log("done.\n", LOG_DEBG);
	  }
	}
	}
#endif
	// here go the code to append the files written in MPI processes > 0 to the ones on MPI process 0
#ifdef MPI_VERSION
@@ -317,6 +313,7 @@ void cluster(const string& config_file, const string& data_file, const string& o
	    int remotethreads;
	    MPI_Recv(&remotethreads, 1, MPI_INT, rr, 1, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
	    for (int ri=0; ri<remotethreads; ri++) {
	      // first get the ASCII local file
	      char *chunk_buffer;
	      int chunk_buffer_size = -1;
	      MPI_Recv(&chunk_buffer_size, 1, MPI_INT, rr, 1, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
@@ -327,6 +324,7 @@ void cluster(const string& config_file, const string& data_file, const string& o
		delete[] chunk_buffer;
		MPI_Recv(&chunk_buffer_size, 1, MPI_INT, rr, 1, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
	      }
	      fputc(10, output);

	      // now get the binary local file
	      long buffer_size = 0;
@@ -446,11 +444,18 @@ void cluster(const string& config_file, const string& data_file, const string& o
	partial_output.open(partial_file_name.c_str(), ios::in | ios::binary);
	partial_output.seekg(0, ios::end);
	const long partial_output_size = partial_output.tellg();
	partial_output.close();
	partial_output.open(partial_file_name.c_str(), ios::in | ios::binary);
	int chunk_buffer_size = 25165824; // Length of char array  with 24Mb size
	char *chunk_buffer = new char[chunk_buffer_size]();
	int full_chunks = (int)(partial_output_size / chunk_buffer_size);
	for (int fi = 0; fi < full_chunks; fi++) {
	  partial_output.read(chunk_buffer, chunk_buffer_size);
	  // If EOF is reached, do not send EOF character.
	  long ptr_position = partial_output.tellg();
	  if (ptr_position == partial_output_size) {
	    chunk_buffer_size--;
	  }
	  // Send the size of the buffer that is being transmitted (Node-0 does not know whether it is full or not)
	  MPI_Send(&chunk_buffer_size, 1, MPI_INT, 0, 1, MPI_COMM_WORLD);
	  // Actually send the file contents to Node-0
@@ -459,7 +464,7 @@ void cluster(const string& config_file, const string& data_file, const string& o
	long ptr_position = partial_output.tellg();
	if (ptr_position < partial_output_size) {
	  // Send the last partial buffer
	  chunk_buffer_size = partial_output_size - ptr_position;
	  chunk_buffer_size = partial_output_size - ptr_position - 1;
	  partial_output.read(chunk_buffer, chunk_buffer_size);
	  // Send the size of the buffer that is being transmitted (Node-0 does not know whether it is full or not)
	  MPI_Send(&chunk_buffer_size, 1, MPI_INT, 0, 1, MPI_COMM_WORLD);
@@ -472,6 +477,7 @@ void cluster(const string& config_file, const string& data_file, const string& o
	partial_output.close();
	delete[] chunk_buffer;
	remove(partial_file_name.c_str());
	logger->log("done.\n", LOG_DEBG);
	
	partial_file_name = output_path + "/c_TPPOAN_" + to_string(mpidata->rank) + "_" + to_string(ri);
	message = "Copying binary output in MPI process " + to_string(mpidata->rank) + " of thread " + to_string(ri) + " of " + to_string(ompnumthreads - 1) + "... ";