removed MPI.

Fixed memory leak.

git-svn-id: svn://136.177.114.72/svn_GW/phreeqc3/branches/concrete@10727 1feff8c3-07ed-0310-ac33-dd36852eb9cd
This commit is contained in:
David L Parkhurst 2016-01-21 16:29:14 +00:00
parent 7fbf8779ce
commit 7f842da63e
2 changed files with 12 additions and 87 deletions

View File

@ -13,10 +13,7 @@
#include "cxxKinetics.h"
//#include <sys/signal.h>
//#include <fenv.h>
#if defined(USE_MPI)
#include <mpi.h>
int worker_tasks(int *task_number, void * cookie);
#endif
/* ----------------------------------------------------------------------
* MAIN
* ---------------------------------------------------------------------- */
@ -51,7 +48,7 @@ main(int argc, char *argv[])
tmpDbgFlag |= _CRTDBG_LEAK_CHECK_DF;
///tmpDbgFlag |= _CRTDBG_CHECK_ALWAYS_DF;
_CrtSetDbgFlag(tmpDbgFlag);
//_crtBreakAlloc = 31195;
//_crtBreakAlloc = 185092;
#endif
#ifdef SKIP
//Set the x86 floating-point control word according to what
@ -75,71 +72,10 @@ unsigned int cwOriginal = _controlfp(cw, MCW_EM); //Set it.
//Restore the original value when done:
//_controlfp(cwOriginal, MCW_EM);
#endif
#if defined(USE_MPI)
int mpi_tasks, mpi_myself;
if (MPI_Init(&argc, &argv) != MPI_SUCCESS)
{
return EXIT_FAILURE;
}
if (MPI_Comm_size(MPI_COMM_WORLD, &mpi_tasks) != MPI_SUCCESS)
{
return EXIT_FAILURE;
}
if (MPI_Comm_rank(MPI_COMM_WORLD, &mpi_myself) != MPI_SUCCESS)
{
return EXIT_FAILURE;
}
if (mpi_myself > 0)
{
for (;;)
{
//MPI_Status mpi_status;
int task_no;
MPI_Bcast(&task_no, 1, MPI_INT, 0, MPI_COMM_WORLD);
if (task_no > 0)
{
Parallelizer *phreeqcrm_ptr = new Parallelizer(task_no, MPI_COMM_WORLD);
phreeqcrm_ptr->SetMpiWorkerCallbackC(worker_tasks);
phreeqcrm_ptr->SetMpiWorkerCallbackCookie(phreeqcrm_ptr);
phreeqcrm_ptr->MpiWorker();
delete phreeqcrm_ptr;
//{
// std::ostringstream os;
// os << "Worker " << mpi_myself << " deleting Parallelizer. " << std::endl;
// std::cerr << os.str();
//}
}
else
{
break;
}
}
{
std::ostringstream os;
os << "End worker " << mpi_myself << std::endl;
std::cerr << os.str();
}
MPI_Barrier(MPI_COMM_WORLD);
MPI_Finalize();
return 0;
}
#endif
Phreeqc phreeqc_instance;
int return_value = phreeqc_instance.main_method(argc, argv);
#if defined(USE_MPI)
int task_no = 0;
MPI_Bcast(&task_no, 1, MPI_INT, 0, MPI_COMM_WORLD);
MPI_Barrier(MPI_COMM_WORLD);
MPI_Finalize();
{
std::ostringstream os;
os << "End master " << mpi_myself << std::endl;
std::cerr << os.str();
}
#endif
return return_value;
}
//#define TEST_COPY
@ -999,22 +935,3 @@ open_output_file(char *query, char *default_name, std::ios_base::openmode mode,
}
#endif
#ifdef USE_MPI
int worker_tasks(int *task_number, void * cookie)
{
Parallelizer *phreeqcrm_ptr = (Parallelizer *) cookie;
Phreeqc *phreeqc_ptr = phreeqcrm_ptr->GetPhreeqcPtr();
if (*task_number == 1000)
{
phreeqcrm_ptr->Phreeqc2RM(phreeqc_ptr);
}
else if (*task_number == 1001)
{
phreeqcrm_ptr->RM2Phreeqc(phreeqc_ptr);
}
else
{
}
return 0;
}
#endif

View File

@ -105,7 +105,6 @@ transport(void)
ct[i].A_ij_il = 0.0;
ct[i].Dz2c_il = 0.0;
ct[i].mixf_il = 0.0;
ct[i].A_ij_il = 0.0;
ct[i].J_ij_count_spec = -1;
ct[i].J_ij_il_count_spec = -1;
ct[i].v_m = NULL;
@ -900,6 +899,13 @@ transport(void)
sol_D[i].spec = (struct spec *) free_check_null(sol_D[i].spec);
}
sol_D = (struct sol_D *) free_check_null(sol_D);
for (int i = 0; i < all_cells; i++)
{
ct[i].v_m = (struct V_M *) free_check_null(ct[i].v_m);
ct[i].v_m_il = (struct V_M *) free_check_null(ct[i].v_m_il);
ct[i].J_ij = (struct J_ij *) free_check_null(ct[i].J_ij);
ct[i].J_ij_il = (struct J_ij *) free_check_null(ct[i].J_ij_il);
}
ct = (struct CT *) free_check_null(ct);
moles_added = (struct MOLES_ADDED *) free_check_null(moles_added);
}
@ -2198,7 +2204,9 @@ find_J(int icell, int jcell, LDBLE mixf, LDBLE DDt, int stagnant)
if (dV_dcell && !find_current)
goto dV_dcell2;
ct[icell].v_m = ct[icell].v_m_il = NULL;
ct[icell].v_m = (struct V_M *) free_check_null(ct[icell].v_m);
ct[icell].v_m_il = (struct V_M *) free_check_null(ct[icell].v_m_il);
//ct[icell].v_m = ct[icell].v_m_il = NULL;
if (stagnant)
{
if (!il_calcs && (cell_data[icell].por < multi_Dpor_lim