Skip to content

Commit

Permalink
Update update_grid.cc
Browse files Browse the repository at this point in the history
  • Loading branch information
jpollin98 committed May 1, 2024
1 parent 9a01604 commit 860190d
Showing 1 changed file with 55 additions and 57 deletions.
112 changes: 55 additions & 57 deletions update_grid.cc
Original file line number Diff line number Diff line change
Expand Up @@ -1253,81 +1253,79 @@ void update_grid(FILE *estimators_file, const int nts, const int nts_prev, const
// generate a loop over all mgi
if constexpr (DETAILED_BF_ESTIMATORS_ON) {
for (int mgi = 0; mgi < grid::get_npts_model(); mgi++) {
const int assoc_cells = grid::get_numassociatedcells(mgi);
if (assoc_cells > 0) {
const double deltaV =
grid::get_modelcell_assocvolume_tmin(mgi) * pow(globals::timesteps[nts_prev].mid / globals::tmin, 3);
const double deltaV =
grid::get_modelcell_assocvolume_tmin(mgi) * pow(globals::timesteps[nts_prev].mid / globals::tmin, 3);

const double estimator_normfactor = 1 / deltaV / deltat / globals::nprocs;
const double estimator_normfactor = 1 / deltaV / deltat / globals::nprocs;

radfield::normalise_bf_estimators(mgi, estimator_normfactor / H);
}
radfield::normalise_bf_estimators(mgi, estimator_normfactor / H);
}
}
}

for (int mgi = 0; mgi < grid::get_npts_model(); mgi++) {
/// Check if this task should work on the current model grid cell.
/// If yes, update the cell and write out the estimators
for (int mgi = 0; mgi < grid::get_npts_model(); mgi++) {
/// Check if this task should work on the current model grid cell.
/// If yes, update the cell and write out the estimators

/// place line in ehere
/// place line in ehere

if (mgi >= nstart && mgi < nstart + ndo) {
// use_cellhist = false;
// cellhistory_reset(-99, true);
if (mgi >= nstart && mgi < nstart + ndo) {
// use_cellhist = false;
// cellhistory_reset(-99, true);

struct heatingcoolingrates heatingcoolingrates = {};
update_grid_cell(mgi, nts, nts_prev, titer, tratmid, deltat, &heatingcoolingrates);
struct heatingcoolingrates heatingcoolingrates = {};
update_grid_cell(mgi, nts, nts_prev, titer, tratmid, deltat, &heatingcoolingrates);

// maybe want to add omp ordered here if the modelgrid cells should be output in order
// use_cellhist = true;
// cellhistory_reset(mgi, true);
// maybe want to add omp ordered here if the modelgrid cells should be output in order
// use_cellhist = true;
// cellhistory_reset(mgi, true);
#ifdef _OPENMP
#pragma omp critical(estimators_file)
#endif
{ write_to_estimators_file(estimators_file, mgi, nts, titer, &heatingcoolingrates); }

} else if (grid::get_numassociatedcells(mgi) > 0) {
/// else, only reset gammaestimator to zero. This allows us to do a global MPI
/// communication after update_grid to synchronize gammaestimator
/// and write a contiguous restart file with grid properties
if constexpr (USE_LUT_PHOTOION) {
zero_gammaestimator(mgi);
}
{ write_to_estimators_file(estimators_file, mgi, nts, titer, &heatingcoolingrates); }

} else if (grid::get_numassociatedcells(mgi) > 0) {
/// else, only reset gammaestimator to zero. This allows us to do a global MPI
/// communication after update_grid to synchronize gammaestimator
/// and write a contiguous restart file with grid properties
if constexpr (USE_LUT_PHOTOION) {
zero_gammaestimator(mgi);
}
} /// end parallel for loop over all modelgrid cells

/// Now after all the relevant taks of update_grid have been finished activate
/// the use of the cellhistory for all OpenMP tasks, in what follows (update_packets)
use_cellhist = true;
} /// end OpenMP parallel section

// alterative way to write out estimators. this keeps the modelgrid cells in order but
// heatingrates are not valid. #ifdef _OPENMP for (int n = nstart; n < nstart+nblock; n++)
// {
// write_to_estimators_file(n,nts);
// }
// #endif

/// Assign the minimum of thread private mps to the global variable max_path_step
globals::max_path_step = mps[0];
for (int i = 1; i < get_max_threads(); i++) {
if (mps[i] < globals::max_path_step) {
globals::max_path_step = mps[i];
}
} /// end parallel for loop over all modelgrid cells

/// Now after all the relevant taks of update_grid have been finished activate
/// the use of the cellhistory for all OpenMP tasks, in what follows (update_packets)
use_cellhist = true;
} /// end OpenMP parallel section

// alterative way to write out estimators. this keeps the modelgrid cells in order but
// heatingrates are not valid. #ifdef _OPENMP for (int n = nstart; n < nstart+nblock; n++)
// {
// write_to_estimators_file(n,nts);
// }
// #endif

/// Assign the minimum of thread private mps to the global variable max_path_step
globals::max_path_step = mps[0];
for (int i = 1; i < get_max_threads(); i++) {
if (mps[i] < globals::max_path_step) {
globals::max_path_step = mps[i];
}
}

globals::max_path_step = fmin(globals::max_path_step, globals::rmax / 10.);
printout("max_path_step %g\n", globals::max_path_step);
globals::max_path_step = fmin(globals::max_path_step, globals::rmax / 10.);
printout("max_path_step %g\n", globals::max_path_step);

const time_t time_update_grid_end_thisrank = time(nullptr);
printout("finished update grid on this rank at time %ld\n", time_update_grid_end_thisrank);
const time_t time_update_grid_end_thisrank = time(nullptr);
printout("finished update grid on this rank at time %ld\n", time_update_grid_end_thisrank);

#ifdef MPI_ON
MPI_Barrier(MPI_COMM_WORLD);
MPI_Barrier(MPI_COMM_WORLD);
#endif
printout(
"timestep %d: time after update grid for all processes %ld (rank %d took %lds, waited "
"%lds, total %lds)\n",
nts, time(nullptr), my_rank, time_update_grid_end_thisrank - sys_time_start_update_grid,
time(nullptr) - time_update_grid_end_thisrank, time(nullptr) - sys_time_start_update_grid);
printout(
"timestep %d: time after update grid for all processes %ld (rank %d took %lds, waited "
"%lds, total %lds)\n",
nts, time(nullptr), my_rank, time_update_grid_end_thisrank - sys_time_start_update_grid,
time(nullptr) - time_update_grid_end_thisrank, time(nullptr) - sys_time_start_update_grid);
}

0 comments on commit 860190d

Please sign in to comment.