The FARGO_THORIN code developer's guide
 All Data Structures Files Functions Variables Typedefs Macros
EnergySources.c
Go to the documentation of this file.
1 /**
2  * @file EnergySources.c
3  *
4  * @brief Subroutines related to the heating/cooling source terms,
5  * numerical solver for the energy equation and radiative diffusion.
6  *
7  * @author Ondřej Chrenko <chrenko@sirrah.troja.mff.cuni.cz>
8  *
9  * @details Calculates the individual energy source terms
10  * according to Chrenko et al. (2017). Then solves the energy
11  * equation in a linearised implicit form using the successive
12  * over-relaxation (SOR) method (see Appendix A in Chrenko et al. 2017).
13  *
14  * @section LICENSE
15  * Copyright (c) 2017 Ondřej Chrenko. See the LICENSE file of the
16  * distribution.
17  *
18  */
19 
20 #include "fargo.h"
21 
22 #define SORMAXITERS 1000
23 #define SOREPS 1.0e-8
24 
25 /* Opacity table according to Bell & Lin (1994), see also Keith & Wardle (2014) */
26 static real kappa0[7] = {2.0e-4, 2.0e16, 0.1, 2.0e81, 1.0e-8, 1.0e-36, 1.5e20};
27 static real a[7] = {0.0, 0.0, 0.0, 1.0, 2.0/3.0, 1.0/3.0, 1.0};
28 static real b[7] = {2.0, -7.0, 0.5, -24.0, 3.0, 10.0, -5.0/2.0};
29 
34 
38 
39 static real CV; // specif. heat capacity
40 static real omegabest, domega; // optimum relaxation parameter for SOR and its small increment
41 static int Niterbest; // minimum number of iterations reached when minimizing the relaxation parameter
42 static int jchess1st, jchess2nd;
43 
44 /** Initialises the polar arrays associated with the heating/cooling
45  * processes */
47 {
48  /* all polar grids are within the scope of this file, apart from Qminus
49  which is a global variable (see global.h) */
50  Qminus = CreatePolarGrid (NRAD, NSEC, "qminus");
51  GradTemperRad = CreatePolarGrid (NRAD, NSEC, "gradtemperr");
52  GradTemperTheta = CreatePolarGrid (NRAD, NSEC, "gradtempert");
53  GradTemperMagnitude = CreatePolarGrid (NRAD, NSEC, "gradtemperm");
54  DiffCoefCentered = CreatePolarGrid (NRAD, NSEC, "diffcoefc");
55  DiffCoefIfaceRad = CreatePolarGrid (NRAD, NSEC, "diffcoefr");
56  DiffCoefIfaceTheta = CreatePolarGrid (NRAD, NSEC, "diffcoeft");
57  Opacity = CreatePolarGrid (NRAD, NSEC, "opacity");
58  VolumeDensity = CreatePolarGrid (NRAD, NSEC, "volumedensity");
59  Flaring = CreatePolarGrid (NRAD, NSEC, "flaring");
60  Qirradiation = CreatePolarGrid (NRAD, NSEC, "qirradiation");
61  DiscretizationCoefA = CreatePolarGrid (NRAD, NSEC, "discretcoefa");
62  DiscretizationCoefB = CreatePolarGrid (NRAD, NSEC, "discretcoefb");
63  MatrixNexttoTemperl = CreatePolarGrid (NRAD, NSEC, "matrixl");
64  MatrixNexttoTemperlip = CreatePolarGrid (NRAD, NSEC, "matrixlip");
65  MatrixNexttoTemperlim = CreatePolarGrid (NRAD, NSEC, "matrixlim");
66  MatrixNexttoTemperljp = CreatePolarGrid (NRAD, NSEC, "matrixljp");
67  MatrixNexttoTemperljm = CreatePolarGrid (NRAD, NSEC, "matrixljm");
68  RightHandSide = CreatePolarGrid (NRAD, NSEC, "righthandside");
69  if (Write_Qbalance) Qbalance = CreatePolarGrid (NRAD, NSEC, "qbalance");
70  CV = GASCONST/(MOLWEIGHT*(ADIABIND - 1.0));
71 }
72 
73 /** Estimate of the heat loss due to radiation
74  * escape in the vertical direction with respect to the midplane.
75  * See Eq. (9) in Chrenko et al. (2017). */
76 void CalculateQminus (Rho)
77 PolarGrid *Rho;
78 {
79  int nr, ns, i, j, l;
80  real *rho, *opacity, *qminus, *temper;
81  real tau, taueff;
82  /* ----- */
83  nr = Rho->Nrad;
84  ns = Rho->Nsec;
85  rho = Rho->Field;
86  opacity = Opacity->Field;
87  temper = Temperature->Field;
88  qminus = Qminus->Field;
89 #pragma omp parallel for default(none) shared(nr,ns,opacity,rho,temper,qminus,OPACITYDROP) \
90  private(i,j,l,tau,taueff)
91  for (i=0; i<nr; i++) {
92  for (j=0; j<ns; j++) {
93  l = j + i*ns;
94  tau = OPACITYDROP*0.5*opacity[l]*rho[l]; /* the surface density is used here (see d'angelo 2003) not the volume density */
95  taueff = EffectiveOpticalDepth (tau);
96  qminus[l] = 2.0*STEFANBOLTZMANN*pow(temper[l],3.0)/taueff; /* !!! this is NOT pure qrad (which is propto T^4),
97  but it's a useful form for the implicit inversion of the energy eq. */
98  }
99  }
100 }
101 
102 /** Calculates the stellar irradiation source term.
103  * See Eq. (13) in Chrenko et al. (2017). */
104 void CalculateQirr (Rho)
105 PolarGrid *Rho;
106 {
107  int nr, ns, i, j, l;
108  real *rho, *opacity, *qirr, *flaring;
109  real Teff4, Rstar2, tau, taueff, Tirr4, r2inv;
110  static real Tirr4fac=0.0;
111  /* ----- */
112  if (Tirr4fac==0.0) {
113  Teff4 = EFFECTIVETEMPERATURE/T2SI;
114  Teff4 = pow(Teff4, 4.0);
115  Rstar2 = 6.957e8/AU_SI; // 1 solar radius in code units
116  Rstar2 = pow(STELLARRADIUS*Rstar2,2.0);
117  Tirr4fac = (1.0 - DISCALBEDO)*Teff4*Rstar2;
118  }
119  nr = Rho->Nrad;
120  ns = Rho->Nsec;
121  rho = Rho->Field;
122  opacity = Opacity->Field;
123  qirr = Qirradiation->Field;
124  flaring = Flaring->Field;
125 #pragma omp parallel for default(none) \
126  shared(nr,ns,rho,opacity,Rmed2,qirr,Tirr4fac,flaring,OPACITYDROP) \
127  private(i,j,l,tau,taueff,Tirr4,r2inv)
128  for (i=0; i<nr; i++) {
129  r2inv = 1.0/Rmed2[i];
130  for (j=0; j<ns; j++) {
131  l = j + i*ns;
132  if (flaring[l] > 0.0) {
133  tau = OPACITYDROP*0.5*opacity[l]*rho[l];
134  taueff = EffectiveOpticalDepth (tau);
135  Tirr4 = Tirr4fac*r2inv*flaring[l];
136  qirr[l] = 2.0*STEFANBOLTZMANN*Tirr4/taueff;
137  } else {
138  qirr[l] = 0.0;
139  }
140  }
141  }
142 }
143 
144 /** Calculates the sine of the grazing angle by reconstructing
145  * the surface from the pressure scale height.
146  * See Eq. (15) in Chrenko et al. (2017). */
148 {
149  int i,j,l,lim,lip,nr,ns;
150  real csin, csout, Hin, Hout, dHdr, H;
151  real *flaring, *cs;
152  static real Rstar=0.0;
153  if (Rstar==0.0) Rstar = STELLARRADIUS*6.957e8/AU_SI;
154  flaring = Flaring->Field;
155  cs = SoundSpeed->Field;
156  nr = SoundSpeed->Nrad;
157  ns = SoundSpeed->Nsec;
158 #pragma omp parallel for default(none) \
159  shared(nr,ns,cs,Rmed,Rinf,Rsup,InvDiffRsup,Rstar, \
160  flaring,SQRT_ADIABIND_INV,InvRmed,OmegaInv) \
161  private(i,j,l,lip,lim,csin,csout,Hin,Hout,dHdr,H)
162  for (i=1; i<nr-1; i++) {
163  for (j=0; j<ns; j++) {
164  l = j + i*ns;
165  lip = l + ns;
166  lim = l - ns;
167  csin = 0.5*(cs[l] + cs[lim]);
168  csout = 0.5*(cs[l] + cs[lip]);
169  Hin = csin*pow(Rinf[i],1.5)*SQRT_ADIABIND_INV;
170  Hout = csout*pow(Rsup[i],1.5)*SQRT_ADIABIND_INV;
171  dHdr = (Hout - Hin)*InvDiffRsup[i];
172  H = cs[l]*OmegaInv[i]*SQRT_ADIABIND_INV;
173  flaring[l] = atan(dHdr) - atan((H-0.4*Rstar)*InvRmed[i]); // Baillié & Charnoz (2014)
174  if (flaring[l] >= 0.0) {
175  flaring[l] = sin(flaring[l]);
176  } else { // surface parts tilted away from the incoming starlight
177  flaring[l] = 0.0;
178  }
179  if (i==1) { // find values for i=0 and i=nr-1
180  H = cs[lim]*OmegaInv[i-1]*SQRT_ADIABIND_INV;
181  flaring[lim] = atan(dHdr) - atan((H-0.4*Rstar)/Rmed[i-1]); // we adopt the closest derivative
182  if (flaring[lim] >= 0.0) {
183  flaring[lim] = sin(flaring[lim]);
184  } else {
185  flaring[lim] = 0.0;
186  }
187  }
188  if (i==nr-2) {
189  H = cs[lip]*OmegaInv[i+1]*SQRT_ADIABIND_INV;
190  flaring[lip] = atan(dHdr) - atan((H-0.4*Rstar)/Rmed[i+1]); // we adopt the closest derivative
191  if (flaring[lip] >= 0.0) {
192  flaring[lip] = sin(flaring[lip]);
193  } else {
194  flaring[lip] = 0.0;
195  }
196  }
197  }
198  }
199 }
200 
201 /** The main numerical solver of the energy equation. */
203 PolarGrid *Rho, *EnergyInt, *EnergyNew;
204 real dt;
205 {
206  real *A, *B, *Ml, *Mlip, *Mlim, *Mljp, *Mljm, *RHS;
207  real *temper, *rho, *energynew, *cs, *Dr, *Dt;
208  real *qplus, *qminus, *divergence, *qirr;
209  int nr, ns, i, j, l, lip, ljp, Niter, k;
210  real dxtheta, invdxtheta2, H, afac, bfac;
211  static boolean first=YES;
212  static int Niterlast;
213  static real omega;
214  /* ----- */
215  nr = Rho->Nrad;
216  ns = Rho->Nsec;
217  rho = Rho->Field;
218  energynew = EnergyNew->Field;
219  temper = Temperature->Field;
220  cs = SoundSpeed->Field;
221  Dr = DiffCoefIfaceRad->Field;
222  Dt = DiffCoefIfaceTheta->Field;
223  A = DiscretizationCoefA->Field;
224  B = DiscretizationCoefB->Field;
225  Ml = MatrixNexttoTemperl->Field;
226  Mlip = MatrixNexttoTemperlip->Field;
227  Mlim = MatrixNexttoTemperlim->Field;
228  Mljp = MatrixNexttoTemperljp->Field;
229  Mljm = MatrixNexttoTemperljm->Field;
230  RHS = RightHandSide->Field;
231  divergence = DivergenceVelocity->Field;
232  qplus = Qplus->Field;
233  qminus = Qminus->Field;
234  qirr = Qirradiation->Field;
235  ComputeTemperatureField (Rho, EnergyInt); /* use the intermediate energy (updated in SubStep2) to get T and cs */
236  ComputeSoundSpeed (Rho, EnergyInt);
237  MidplaneVolumeDensity (Rho); /* use new cs to convert the surface density to the volume density */
238  OpacityProfile (); /* use new temperature and vol.dens to calc. the opacity */
239  CalculateQminus (Rho); /* estimate the vertical cooling term (z-direction radiation escape) */
240  if (StellarIrradiation) { /* for stellar irradiated discs: */
241  CalculateFlaring (); /* - check which parts are exposed to the incoming radiation */
242  CalculateQirr (Rho); /* - calculate the heating contribution */
243  }
244  DiffusionCoefs (); /* calculate the diffusion coefficients */
245  /* To update a centered temperature value using SOR, we need the value
246  * to be surrounded by 4 average diffusion coefs (one per each interface)
247  * and the temperature field in adjacent cells must be known.
248  * Update is not possible at the innermost and outermost ring on each CPU.
249  * For this reason and for MPI-parallelization of the SOR method, we
250  * restrict the computation to the active mesh of each processor
251  * (thus everything is fine on middle CPUs, the inner/outer ring problem
252  * remains only on the inner/outer CPU).
253  * After SOR, the temperature in overlapping zones will be synchronized,
254  * leaving old values of T only in the inner ring of inner CPU and in the outer ring
255  * of outer CPU. T is updated in these rings by adopting the neighbouring value. */
256 #pragma omp parlallel default(none) \
257  shared(nr, ns, One_or_active, MaxMO_or_active, Rmed, Rinf, \
258  InvDiffRmed, InvDiffRsup, A, B, Dr, Dt, cs, dt, Ml, Mlip, \
259  Mlim, Mljp, Mljm, RHS, CV, divergence, qminus, temper, qplus, ADIABIND) \
260  private(i, j, l, lip, dxtheta, invdxtheta2, ljp, H, afac, bfac)
261  {
262 #pragma omp for
263  for (i=1; i<nr; i++) { // parts of matrices for SOR (no active mesh restriction so far!)
264  dxtheta = 2.0*PI/(real)ns*Rmed[i];
265  invdxtheta2 = 1.0/dxtheta/dxtheta; // this is 1/(dtheta*Rmed[i])^2
266  for (j=0; j<ns; j++) {
267  l = j + i*ns;
268  A[l] = Dr[l]*Rinf[i]*InvDiffRmed[i];
269  H = cs[l]*OmegaInv[i]*SQRT_ADIABIND_INV;
270  B[l] = Dt[l]*invdxtheta2*2.0*H;
271  }
272  }
273 #pragma omp for
274  for (i=One_or_active; i<MaxMO_or_active; i++) { // final matrices for SOR, active mesh restriction
275  for (j=0; j<ns; j++) {
276  l = j + i*ns;
277  lip = l + ns;
278  ljp = l + 1;
279  if (j == ns-1) ljp = i*ns;
280  H = cs[l]*OmegaInv[i]*SQRT_ADIABIND_INV;
281  bfac = dt/(rho[l]*CV);
282  afac = bfac*InvDiffRsup[i]*2.0*H/Rmed[i];
283  Ml[l] = 1.0 + divergence[l]*(ADIABIND-1.0)*dt + 4.0*bfac*qminus[l] + afac*(A[lip] + A[l]) + bfac*(B[ljp] + B[l]);
284  Mlip[l] = - afac*A[lip]; // because of A[lip], the previous cycle is not restricted to active mesh
285  Mlim[l] = - afac*A[l];
286  Mljp[l] = - bfac*B[ljp];
287  Mljm[l] = - bfac*B[l];
288  RHS[l] = temper[l] + bfac*(qplus[l] + 3.0*qminus[l]*temper[l]);
289  if (StellarIrradiation) RHS[l] += bfac*qirr[l];
290  if (AccretHeating) {
291  for (k=0; k<heatsrc_max; k++) {
292  if (l==heatsrc_index[k]) RHS[l] += bfac*heatsrc[k];
293  }
294  }
295  }
296  }
297  } /* #end of omp parallel section */
298  if (first) {
299  first = NO;
301  omega = omegabest;
302  Niterlast = Niterbest;
303  }
304  omega += domega; // always try to change the relax. parameter a little (domega set in IterateRelaxationParameter())
305  if (omega > 1.999999) { // stay in the [1,2) interval
306  omega = 1.999999;
307  domega = -domega;
308  }
309  if (omega < 1.0) {
310  omega = 1.0;
311  domega = -domega;
312  }
313  Niter = SuccessiveOverrelaxation (omega, YES); // solve the implicit eq. with SOR; YES means that the program will crash when exceeding SORMAXITERS
314  if (Niter > Niterlast) domega = - domega; // if the performance is worse than in the previous case, apply the opposite change next time
315  Niterlast = Niter; // (in this way, omega oscillates around the optimum value)
316  if (CPU_Number > 1) SynchronizeOverlapFields (temper, nr, CPUOVERLAP); /* fill the overlapping ghost zones with values from the neighbouring CPU's active mesh */
317  if (CPU_Rank == 0) { // get the temperature in the innermost and outermost ring by adopting the neighbouring value
318  for (j=0; j<ns; j++) {
319  temper[j] = temper[j+ns];
320  }
321  }
322  if (CPU_Rank == CPU_Number-1) {
323  for (j=0; j<ns; j++) {
324  temper[j+(nr-1)*ns] = temper[j+(nr-2)*ns];
325  }
326  }
327  for (i=0; i<nr; i++) { // get the energy from the updated and synchronised temperature
328  for (j=0; j<ns; j++) {
329  l = j + i*ns;
330  energynew[l] = rho[l]*temper[l]*CV;
331  }
332  }
333 }
334 
335 /** When solving the energy equation for the first time,
336  * the function spans through various values of the SOR
337  * parameter in order to find its best value to start with. */
338 void IterateRelaxationParameter () // note: no explicit MPI stuff here -> everything is synchronized in the reduction routines of SOR
339 {
340  real omegamin=1.0, omegamax=1.999999, omega=1.0; // parameter from interval [1,2)
341  int nr, ns, n, i, j, l, nsplit=9, Niter = SORMAXITERS, count=0;
342  real *temper, *tempbckp;
343  /* ----- */
344  nr = Temperature->Nrad;
345  ns = Temperature->Nsec;
346  temper = Temperature->Field;
347  tempbckp = (real *) malloc(sizeof(real) * (nr + 3) * (ns + 1) + 5); // bckp field with the same size as the field of PolarGrid
348  for (i=0; i<nr; i++) {
349  for (j=0; j<ns; j++) {
350  l = j + i*ns;
351  tempbckp[l] = temper[l]; // backup temperature
352  }
353  }
354  domega = (omegamax - omegamin)/nsplit; // increment to create nsplit itervals bounded by nsplit+1 values
355  Niterbest = SORMAXITERS; // max no. of iterations in SOR is limited by SORMAXITERS (so we use upper limit as the first guess)
356  omegabest = 1.0; // Gauss-Seidel value as the first guess for relax. parameter
357  while (YES) { // infinite loop (till return is called)
358  for (n=0; n<=nsplit; n++) {
359  Niter = SuccessiveOverrelaxation (omega, NO); // NO means that the program won't crash when exceeding SORMAXITERS, it will instead try the next omega
360  if (Niter == Niterbest) count++; // count the number of omegas in the sub-interval that reach the best convergence
361  if (Niter < Niterbest) {
362  Niterbest = Niter; // keep track of the lowest number of iterations
363  omegabest = omega; // corresponding to the best relax. parameter
364  }
365  omega += domega; // try next value of omega
366  for (i=0; i<nr; i++) { // return the original temperature values so that we solve the same problem again
367  for (j=0; j<ns; j++) {
368  l = j + i*ns;
369  temper[l] = tempbckp[l];
370  }
371  }
372  }
373  if (Niterbest == SORMAXITERS) { // this means that no call of SOR converged (must increase SORMAXITERS)
374  masterprint ("Warning! SOR did not converge when estimating the initial over-relaxation parameter.\n");
375  masterprint ("Try using larger SORMAXITERS. Terminating now...\n");
376  prs_exit(1);
377  }
378  if (count==nsplit+1) { // plateau = all parameters from the tested interval converge equally fast
379  domega = omegamax-omegamin; // the width of the sub-interval will be used to oscillate omega around the optimum value
380  masterprint ("\tInitial over-relaxation parameter: omega = %#.6g, no. of iterations = %d\n", omegabest, Niterbest);
381  free (tempbckp); // deallocate the bckp field
382  return; // leave the routine
383  } else {
384  count = 0; // reset counter for the next interval of omegas
385  }
386  omegamin = omegabest - domega; // repeat the test on a sub-interval of the previous range
387  omegamax = omegabest + domega;
388  if (omegamin < 1.0) omegamin = 1.0;
389  if (omegamax > 1.999999) omegamax = 1.999999;
390  domega = (omegamax - omegamin)/nsplit;
391  omega = omegamin;
392  }
393 }
394 
395 /** The SOR method algorithm inspired by the one from Numerical Recipes */
396 int SuccessiveOverrelaxation (omega, errcheck)
397 real omega;
398 boolean errcheck;
399 {
400  int n, i, j, l, nr, ns, ipass, jchess;
401  int lip, lim, ljp, ljm;
402  real resid, normres0 = 0.0, normres, temp;
403  real *temper, *Ml, *Mlip, *Mlim, *Mljp, *Mljm, *RHS;
404  static boolean first=YES;
405  /* ----- */
406  temper = Temperature->Field;
407  nr = Temperature->Nrad;
408  ns = Temperature->Nsec;
409  Ml = MatrixNexttoTemperl->Field;
410  Mlip = MatrixNexttoTemperlip->Field;
411  Mlim = MatrixNexttoTemperlim->Field;
412  Mljp = MatrixNexttoTemperljp->Field;
413  Mljm = MatrixNexttoTemperljm->Field;
414  RHS = RightHandSide->Field;
415  if (first) {
416  first = NO;
417  if (CPU_Number > 1) {
419  } else {
420  jchess1st = 0;
421  jchess2nd = 1;
422  }
423  }
424  for (i=One_or_active; i<MaxMO_or_active; i++) {
425  for (j=0; j<ns; j++) {
426  l = j + i*ns;
427  lip = l + ns;
428  lim = l - ns;
429  ljp = l + 1;
430  if (j == ns-1) ljp = i*ns;
431  ljm = l - 1;
432  if (j == 0) ljm = i*ns + ns-1;
433  resid = temper[l]*Ml[l] + temper[lip]*Mlip[l] \
434  + temper[lim]*Mlim[l] + temper[ljp]*Mljp[l] \
435  + temper[ljm]*Mljm[l] - RHS[l];
436  normres0 += fabs(resid);
437  }
438  }
439  MPI_Allreduce (&normres0, &temp, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
440  normres0 = temp;
441  for (n=1; n<=SORMAXITERS; n++) {
442  normres = 0.0;
443  for (ipass = 1; ipass <= 2; ipass++) { // array is swept through as a chessboard (blackorwhite cells first, whiteorblack cells second)
444  if (ipass == 1) jchess = jchess1st;
445  if (ipass == 2) jchess = jchess2nd;
446  // jchess alternately attains value 0 or 1, the vallue changes with each increment of index 'i'. The initial value is always different, so that all the fields on the chessboard are covered
447 #pragma omp parallel for default(none) \
448  shared(One_or_active, MaxMO_or_active, ns, temper, Ml, Mlip, \
449  Mlim, Mljp, Mljm, RHS, omega) \
450  private(i, j, l, lip, lim, ljp, ljm, resid) \
451  firstprivate(jchess) reduction(+ : normres)
452  for (i=One_or_active; i<MaxMO_or_active; i++) {
453  for (j=jchess; j<ns; j+=2) { // increment of 'j' index is 2
454  l = j + i*ns;
455  lip = l + ns;
456  lim = l - ns;
457  ljp = l + 1;
458  if (j == ns-1) ljp = i*ns;
459  ljm = l - 1;
460  if (j == 0) ljm = i*ns + ns-1;
461  resid = temper[l]*Ml[l] + temper[lip]*Mlip[l] \
462  + temper[lim]*Mlim[l] + temper[ljp]*Mljp[l] \
463  + temper[ljm]*Mljm[l] - RHS[l];
464  normres += fabs(resid);
465  temper[l] -= omega*resid/Ml[l];
466  }
467  jchess = 1 - jchess; // change the starting value of 'j' for the next increment of 'i'
468  }
469  if (CPU_Number > 1) SynchronizeOverlapFields (temper, nr, 1); // must update the 1st ring of each ghost zone neighbouring to the active mesh
470  }
471  MPI_Allreduce (&normres, &temp, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
472  normres = temp;
473  if (normres < SOREPS*normres0) return n;
474  }
475  if (errcheck) {
476  masterprint ("Error!\n");
477  masterprint ("The SOR solution of the radiative diffusion equation did not converge.\n");
478  masterprint ("Terminating now...\n");
479  prs_exit (1);
480  }
481  return n;
482 }
483 
484 /** Function ensures the odd-even ordering of the SOR method
485  * when the grid is split on multiple CPUs. */
487 {
488  int send[3], recv[3], prevcpu, nextcpu, nractive;
489  /* ----- */
490  prevcpu = CPU_Rank-1;
491  nextcpu = CPU_Rank+1;
492  if (CPU_Master) {
493  send[0] = 0; // inner CPU starts with odd cells during the 1st sweep
494  send[1] = 1; // and with even cells during the 2nd sweep
495  nractive = NRAD - CPUOVERLAP - 1; // master has only one ghost zone, but also the innermost ring is skipped in the SOR method
496  if (nractive % 2 == 0) {
497  send[2] = 0; // for even number of active rings, next CPU keeps the same indices
498  } else {
499  send[2] = 1; // for odd number of active rings, next CPU swaps the indices
500  }
501  MPI_Send (&send, 3, MPI_INT, nextcpu, 0, MPI_COMM_WORLD);
502  }
503  if (CPU_Rank > 0) {
504  MPI_Recv (&recv, 3, MPI_INT, prevcpu, 0, MPI_COMM_WORLD, &fargostat);
505  if (recv[2] == 0) {
506  send[0] = recv[0];
507  send[1] = recv[1];
508  } else {
509  send[0] = recv[1];
510  send[1] = recv[0];
511  }
512  if (CPU_Rank < (CPU_Number-1)) { // outer CPU does not have to send anything
513  nractive = NRAD - 2*CPUOVERLAP; // middle CPUs have two ghost zones
514  if (nractive % 2 == 0) {
515  send[2] = 0;
516  } else {
517  send[2] = 1;
518  }
519  MPI_Send (&send, 3, MPI_INT, nextcpu, 0, MPI_COMM_WORLD);
520  }
521  }
522  jchess1st = send[0];
523  jchess2nd = send[1];
524 }
525 
526 /** Calculation of the diffusion coefficients */
528 {
529  int nr, ns, i, j, l, lim, ljm;
530  real *voldens, *temper, *opacity, *D, *Dr, *Dt, *gradtmag;
531  real gradt, s, fluxlimiter, fac;
532  /* ----- */
533  voldens = VolumeDensity->Field;
534  temper = Temperature->Field;
535  nr = Temperature->Nrad;
536  ns = Temperature->Nsec;
537  opacity = Opacity->Field;
538  gradtmag = GradTemperMagnitude->Field;
539  D = DiffCoefCentered->Field;
540  Dr = DiffCoefIfaceRad->Field;
541  Dt = DiffCoefIfaceTheta->Field;
542  TemperatureGradient (); /* compute the magnitude of temperature gradient at cell centers */
543 #pragma omp parallel default(none) \
544  shared(nr,ns,voldens,opacity,temper,gradtmag,D,Dr,Dt) \
545  private(i,j,l,lim,ljm,gradt,s,fluxlimiter,fac)
546  {
547 #pragma omp for
548  for (i=0; i<nr; i++) {
549  for (j=0; j<ns; j++) {
550  l = j + i*ns;
551  if (i==0) {
552  gradt = gradtmag[l+ns]; /* for inner/outer ring, |gradT| is not available. Adopt the radially neighbouring value. */
553  } else if (i==nr-1) { /* this hopefully does not produce a big error cause we use it only to estimate the flux limiter */
554  gradt = gradtmag[l-ns];
555  } else {
556  gradt = gradtmag[l];
557  }
558  fac = 1.0/(voldens[l]*opacity[l]);
559  s = 4.0*gradt*fac/temper[l];
560  fluxlimiter = FluxLimiterValue (s);
561  D[l] = fluxlimiter*16.0*STEFANBOLTZMANN*pow(temper[l],3.0)*fac;
562  }
563  }
564 #pragma omp for
565  for (i=1; i<nr; i++) { /* calc. average diff. coefficient at interfaces where possible */
566  for (j=0; j<ns; j++) { /* (0th ring is skipped for reasons similar to gradT calculation) */
567  l = j + i*ns;
568  lim = l - ns;
569  ljm = l - 1;
570  if (j==0) ljm = i*ns+ns-1;
571  Dr[l] = 0.5*(D[l] + D[lim]);
572  Dt[l] = 0.5*(D[l] + D[ljm]);
573  }
574  }
575  } // #end of the omp parallel section
576 }
577 
578 /** Finds the temperature gradients and their magnitude
579  * over the mesh. */
581 {
582  int i, j, l, lim, ljm, lip, ljp, ns, nr;
583  real *temper, *gradtr, *gradtt, *gradtmag;
584  real dxtheta, invdxtheta, r, t;
585  /* ----- */
586  temper = Temperature->Field;
587  gradtr = GradTemperRad->Field;
588  gradtt = GradTemperTheta->Field;
589  gradtmag = GradTemperMagnitude->Field;
590  nr = Temperature->Nrad;
591  ns = Temperature->Nsec;
592 #pragma omp parallel default(none) \
593  shared(nr,ns,temper,InvDiffRmed,Rmed,gradtr,gradtt,gradtmag) \
594  private(i,j,l,lim,ljm,lip,ljp,dxtheta,invdxtheta,r,t)
595  {
596 #pragma omp for
597  for (i=1; i<nr; i++) { /* on each CPU, radial gradT exists at all interfaces besides the 0th */
598  dxtheta = 2.0*PI/(real)ns*Rmed[i]; /* azimuthal gradT exists even for i=0 but we won't need it anyway */
599  invdxtheta = 1.0/dxtheta;
600  for (j=0; j<ns; j++) {
601  l = j + i*ns;
602  lim = l - ns;
603  /* dT/dr = ( T(i,j) - T(i-1,j) )/( Rmed[i] - Rmed[i-1] ) */
604  gradtr[l] = (temper[l] - temper[lim])*InvDiffRmed[i];
605  ljm = l - 1;
606  if (j==0) ljm = i*ns+ns-1;
607  /* !polar coordinates --> 1/r * dT/dtheta */
608  gradtt[l] = (temper[l] - temper[ljm])*invdxtheta; // don't put an extra Rmed[i] here (is already in invdxtheta) !
609  }
610  }
611 #pragma omp for
612  for (i=1; i<nr-1; i++) { /* magnitude of gradT exists at all cell centers which are surrounded by 4 gradT values (one per each interface) */
613  for (j=0; j<ns; j++) {
614  l = j + i*ns;
615  lip = l + ns;
616  ljp = l + 1;
617  if (j == ns-1) ljp = i*ns;
618  r = 0.5*(gradtr[lip]+gradtr[l]);
619  t = 0.5*(gradtt[ljp]+gradtt[l]);
620  gradtmag[l] = sqrt(r*r + t*t);
621  }
622  }
623  } // end of the omp parallel section
624 }
625 
626 /** Translates the surface density into
627  * the midplane volume density using the local
628  * pressure scale height. */
630 PolarGrid *Rho;
631 {
632  int nr, ns, i, j, l;
633  real *rho, *voldens, *cs, H;
634  /* ----- */
635  nr = Rho->Nrad;
636  ns = Rho->Nsec;
637  rho = Rho->Field;
638  cs = SoundSpeed->Field;
639  voldens = VolumeDensity->Field;
640 #pragma omp parallel for default(none) shared(nr,ns,cs,Rmed,rho,voldens,SQRT_ADIABIND_INV,OmegaInv) \
641  private(i,j,l,H)
642  for (i=0; i<nr; i++) {
643  for (j=0; j<ns; j++) {
644  l = j + i*ns;
645  H = cs[l]*OmegaInv[i]*SQRT_ADIABIND_INV;
646  /* we estimate the midplane volume density from the vertically integrated
647  surface density (see e.g. Kley & Crida 2008, Pierens 2015) */
648  voldens[l] = rho[l]/H*SQRT2PI_INV;
649  }
650  }
651 }
652 
653 /** Fills the opacity polar grid, either with
654  * a fixed parametric value or using the Bell & Lin (1994)
655  * opacity table. */
657 {
658  int nr, ns, i, j, l;
659  real kappa1, kappa2, kappa3, tmp1, tmp2;
660  real *opacity, *temper, *voldens;
661  real T, Dens;
662  static boolean FillParametricOpacity = YES;
663  /* ----- */
664  opacity = Opacity->Field;
665  temper = Temperature->Field;
666  nr = Temperature->Nrad;
667  ns = Temperature->Nsec;
668  voldens = VolumeDensity->Field;
669  if (PARAMETRICOPACITY > 0.0) {
670  if (FillParametricOpacity) { /* if the parametric opacity is used ... */
671  FillParametricOpacity = NO; /* fill the array only once ... */
672  for (i=0; i<nr; i++) {
673  for (j=0; j<ns; j++) {
674  l = j + i*ns;
675  opacity[l] = PARAMETRICOPACITY*OPA2CU;
676  }
677  }
678  } else {
679  return; /* ... and exit the function in subsequent calls */
680  }
681  }
682 #pragma omp parallel for default(none) shared(nr,ns,temper,voldens,opacity,kappa0,a,b) \
683  private(i,j,l,T,Dens,kappa1,kappa2,kappa3,tmp1,tmp2)
684  for (i=0; i<nr; i++) {
685  for (j=0; j<ns; j++) {
686  l = j + i*ns;
687  T = temper[l]*T2SI; /* temperature transformed from code units to Kelvins */
688  Dens = voldens[l]*RHO2CGS; /* transform to cgs */
689  /* transitions calculated using eq. (12) in Keith & Wardle (2014);
690  * comparisons performed in cgs */
691  if (T <= 2286.8*pow(Dens,0.0408)) {
692  kappa1 = kappa0[0] * pow(Dens,a[0]) * pow(T,b[0]);
693  kappa2 = kappa0[1] * pow(Dens,a[1]) * pow(T,b[1]);
694  kappa3 = kappa0[2] * pow(Dens,a[2]) * pow(T,b[2]);
695  tmp1 = 1.0/(kappa1*kappa1) + 1.0/(kappa2*kappa2);
696  tmp1 = 1.0/(tmp1*tmp1);
697  tmp2 = pow(kappa3/(1.0 + 1.0e22*pow(T,-10.0)), 4.0);
698  opacity[l] = pow(tmp1+tmp2, 0.25);
699  }
700  else if (T > 2286.8*pow(Dens,0.0408) && T <= 2029.8*pow(Dens,0.0123)) {
701  opacity[l] = kappa0[3] * pow(Dens,a[3]) * pow(T,b[3]);
702  }
703  else if (T > 2029.8*pow(Dens,0.0123) && T <= 1.0e5*pow(Dens,0.0476)) {
704  opacity[l] = kappa0[4] * pow(Dens,a[4]) * pow(T,b[4]);
705  }
706  else if (T > 1.0e5*pow(Dens,0.0476) && T <= (31195.2)*pow(Dens,0.0533)) {
707  opacity[l] = kappa0[5] * pow(Dens,a[5]) * pow(T,b[5]);
708  }
709  else {
710  opacity[l] = kappa0[6] * pow(Dens,a[6]) * pow(T,b[6]);
711  }
712  opacity[l] = opacity[l]*OPA2CU;
713  }
714  }
715 }
716 
717 /** Calculates the flux limiter
718  * according to Kley (1989) */
720 real s;
721 {
722  real tmp, value;
723  if (s <= 2.0) {
724  tmp = 3.0 + sqrt(9.0+10.0*s*s);
725  value = 2.0/tmp;
726  } else {
727  tmp = 10.0*s + 9.0 + sqrt(81.0+180.0*s);
728  value = 10.0/tmp;
729  }
730  return value;
731 }
732 
733 /** Calculates the effective optical depth
734  * in a simple gray model of the disk's vertical
735  * structure; see Hubeny (1990). */
737 real tau;
738 {
739  real value;
740  if (StellarIrradiation) {
741  value = 0.375*tau + 0.5 + 0.25/tau;
742  } else {
743  value = 0.375*tau + 0.25*sqrt(3.0) + 0.25/tau;
744  }
745  return value;
746 }
747 
748 /** For a MPI-split grid, synchronizes the values
749  * in a requested number 'nsync' of the overlapping
750  * radial rings. */
751 void SynchronizeOverlapFields (field, nr, nsync)
752 real *field;
753 int nr, nsync;
754 {
755  MPI_Request req1, req2, req3, req4;
756  static real *SendBufferInner, *SendBufferOuter;
757  static real *RecvBufferInner, *RecvBufferOuter;
758  static boolean allocate=YES;
759  int prevcpu, nextcpu, size;
760  int sendoffsetin, sendoffsetout, recvoffsetin, recvoffsetout;
761  /* ----- */
762  if (nsync > CPUOVERLAP) {
763  printf ("Error! Requested number of rings to be synchronized by the MPI communication is larger than CPUOVERLAP.\n");
764  printf ("Terminating now...\n");
765  prs_exit (1);
766  }
767  if (allocate) {
768  allocate=NO;
769  SendBufferInner = malloc (CPUOVERLAP*NSEC*sizeof(real)); // buffers allocated with the max size
770  SendBufferOuter = malloc (CPUOVERLAP*NSEC*sizeof(real));
771  RecvBufferInner = malloc (CPUOVERLAP*NSEC*sizeof(real));
772  RecvBufferOuter = malloc (CPUOVERLAP*NSEC*sizeof(real));
773  }
774  size = nsync*NSEC;
775  prevcpu = CPU_Rank-1;
776  nextcpu = CPU_Rank+1;
777  sendoffsetin = CPUOVERLAP*NSEC;
778  sendoffsetout = (nr - CPUOVERLAP - nsync)*NSEC;
779  recvoffsetin = (CPUOVERLAP - nsync)*NSEC;
780  recvoffsetout = (nr - CPUOVERLAP)*NSEC;
781  if (CPU_Rank > 0) memcpy (SendBufferInner, field+sendoffsetin, size*sizeof(real));
782  if (CPU_Rank < CPU_Number-1) memcpy (SendBufferOuter, field+sendoffsetout, size*sizeof(real));
783  if (CPU_Rank % 2 == 0) {
784  if (CPU_Rank > 0) {
785  MPI_Isend (SendBufferInner, size, MPI_DOUBLE, prevcpu, 0, MPI_COMM_WORLD, &req1);
786  MPI_Irecv (RecvBufferInner, size, MPI_DOUBLE, prevcpu, 0, MPI_COMM_WORLD, &req2);
787  }
788  if (CPU_Rank < CPU_Number-1) {
789  MPI_Isend (SendBufferOuter, size, MPI_DOUBLE, nextcpu, 0, MPI_COMM_WORLD, &req3);
790  MPI_Irecv (RecvBufferOuter, size, MPI_DOUBLE, nextcpu, 0, MPI_COMM_WORLD, &req4);
791  }
792  } else {
793  if (CPU_Rank < CPU_Number-1) {
794  MPI_Irecv (RecvBufferOuter, size, MPI_DOUBLE, nextcpu, 0, MPI_COMM_WORLD, &req3);
795  MPI_Isend (SendBufferOuter, size, MPI_DOUBLE, nextcpu, 0, MPI_COMM_WORLD, &req4);
796  }
797  if (CPU_Rank > 0) {
798  MPI_Irecv (RecvBufferInner, size, MPI_DOUBLE, prevcpu, 0, MPI_COMM_WORLD, &req1);
799  MPI_Isend (SendBufferInner, size, MPI_DOUBLE, prevcpu, 0, MPI_COMM_WORLD, &req2);
800  }
801  }
802  if (CPU_Rank > 0) {
803  MPI_Wait (&req1, &fargostat);
804  MPI_Wait (&req2, &fargostat);
805  memcpy (field+recvoffsetin, RecvBufferInner, size*sizeof(real));
806  }
807  if (CPU_Rank < CPU_Number-1) {
808  MPI_Wait (&req3, &fargostat);
809  MPI_Wait (&req4, &fargostat);
810  memcpy (field+recvoffsetout, RecvBufferOuter, size*sizeof(real));
811  }
812 }
813 
814 /** Writes an input file for the 'torquemap' code written
815  * by Bertram Bitsch. */
816 void CreateTorqueMapInfile (istep, Surfdens)
817 int istep;
818 PolarGrid *Surfdens;
819 {
820  real *cs, *temper, *pressure, *surfdens;
821  real CS[MAX1D], T[MAX1D], P[MAX1D], Sigma[MAX1D]; // need to create global azimuthally averaged fields
822  real Rho, tmpr, Opacity, Viscalpha, H;
823  real Omega, kappa1, kappa2, kappa3, tmp1, tmp2;
824  int i;
825  char name[256];
826  FILE *output;
827  // static boolean first=YES;
828  /* ---- */
829  cs = SoundSpeed->Field;
830  temper = Temperature->Field;
831  pressure = Pressure->Field;
832  surfdens = Surfdens->Field;
833  if (CPU_Master) {
834  sprintf (name, "%storquemap_infile%d.dat", OUTPUTDIR, istep);
835  output = fopenp (name, "w");
836  }
837  mpi_make1Dprofile (cs, CS);
838  mpi_make1Dprofile (temper, T);
839  mpi_make1Dprofile (pressure, P);
840  mpi_make1Dprofile (surfdens, Sigma);
841  if (CPU_Master) {
842  masterprint("Writing torque map input file ...");
843  for (i=0; i<GLOBALNRAD; i++) {
844  Omega = pow(GlobalRmed[i],-1.5);
845  H = CS[i]/Omega/sqrt(ADIABIND);
846  Rho = Sigma[i]/(sqrt(2.0*PI)*H)*RHO2CGS;
847  tmpr = T[i]*T2SI;
848  // H[i] = CS[i]/Omega/sqrt(ADIABIND);
849  // Rho[i] = Sigma[i]/(2.0*H[i]);
850  /* OPACITY PART IN CGS ---> */
851  if (tmpr <= 2286.8*pow(Rho,0.0408)) {
852  kappa1 = kappa0[0] * pow(Rho,a[0]) * pow(tmpr,b[0]);
853  kappa2 = kappa0[1] * pow(Rho,a[1]) * pow(tmpr,b[1]);
854  kappa3 = kappa0[2] * pow(Rho,a[2]) * pow(tmpr,b[2]);
855  tmp1 = 1.0/(kappa1*kappa1) + 1.0/(kappa2*kappa2);
856  tmp1 = 1.0/(tmp1*tmp1);
857  tmp2 = pow(kappa3/(1.0 + 1.0e22*pow(tmpr,-10.0)), 4.0);
858  Opacity = pow(tmp1+tmp2, 0.25);
859  }
860  else if (tmpr > 2286.8*pow(Rho,0.0408) && tmpr <= 2029.8*pow(Rho,0.0123)) {
861  Opacity = kappa0[3] * pow(Rho,a[3]) * pow(tmpr,b[3]);
862  }
863  else if (tmpr > 2029.8*pow(Rho,0.0123) && tmpr <= 1.0e5*pow(Rho,0.0476)) {
864  Opacity = kappa0[4] * pow(Rho,a[4]) * pow(tmpr,b[4]);
865  }
866  else if (tmpr > 1.0e5*pow(Rho,0.0476) && tmpr <= (31195.2)*pow(Rho,0.0533)) {
867  Opacity = kappa0[5] * pow(Rho,a[5]) * pow(tmpr,b[5]);
868  }
869  else {
870  Opacity = kappa0[6] * pow(Rho,a[6]) * pow(tmpr,b[6]);
871  }
872  if (PARAMETRICOPACITY > 0.0) Opacity = PARAMETRICOPACITY;
873  /* <--- */
874  if (ViscosityAlpha) {
875  Viscalpha = ALPHAVISCOSITY;
876  } else {
877  Viscalpha = VISCOSITY/(H*H*Omega);
878  }
879  /* output everything in cgs, only radial coordinate and H should be in AU */
880  fprintf (output, "%#.18g\t%#.18g\t%#.18g\t%#.18g\t%#.18g\t%#.18g\t%#.18g\t%#.18g\n",\
881  GlobalRmed[i], Rho, tmpr, P[i]*PRESS2CGS, Opacity, Sigma[i]*SIGMA2CGS, Viscalpha, H);
882  }
883  fclose (output);
884  masterprint(" done\n");
885  }
886 }
887 
#define STEFANBOLTZMANN
Definition: fondam.h:39
double real
Definition of the type 'real' used throughout the code.
Definition: types.h:20
MPI_Status fargostat
Definition: global.h:32
static PolarGrid * DiffCoefCentered
Definition: EnergySources.c:31
real InvDiffRmed[MAX1D]
Definition: global.h:12
void MPI_Send()
Definition: mpi_dummy.c:56
#define OPA2CU
Definition: fondam.h:35
real EffectiveOpticalDepth(real tau)
Calculates the effective optical depth in a simple gray model of the disk's vertical structure; see H...
#define PRESS2CGS
Definition: fondam.h:53
real Rsup[MAX1D]
Definition: global.h:11
static real omegabest
Definition: EnergySources.c:40
void ImplicitRadiativeDiffusion(PolarGrid *Rho, PolarGrid *EnergyInt, PolarGrid *EnergyNew, real dt)
The main numerical solver of the energy equation.
PolarGrid * Temperature
Definition: global.h:35
int CPU_Number
Definition: global.h:2
#define YES
Definition: types.h:46
real EFFECTIVETEMPERATURE
Definition: param_noex.h:58
PolarGrid * Qplus
Definition: global.h:35
#define MPI_DOUBLE
Definition: mpi_dummy.h:11
boolean StellarIrradiation
Definition: global.h:24
char OUTPUTDIR[512]
Definition: param_noex.h:11
void ChessBoardIndexing()
Function ensures the odd-even ordering of the SOR method when the grid is split on multiple CPUs...
static int jchess1st
Definition: EnergySources.c:42
static int jchess2nd
Definition: EnergySources.c:42
PolarGrid * Qminus
Definition: global.h:35
PolarGrid * CreatePolarGrid(int Nr, int Ns, char *name)
Definition: LowTasks.c:73
PolarGrid * SoundSpeed
Definition: global.h:35
#define T2SI
Definition: fondam.h:36
real STELLARRADIUS
Definition: param_noex.h:59
int heatsrc_max
Definition: global.h:22
real OmegaInv[MAX1D]
Definition: global.h:15
real DISCALBEDO
Definition: param_noex.h:60
A structure used to store any scalar fied on the computational domain.
Definition: types.h:37
real GlobalRmed[MAX1D]
Definition: global.h:13
real PARAMETRICOPACITY
Definition: param_noex.h:61
FILE * fopenp(char *string, char *mode)
Definition: LowTasks.c:163
static PolarGrid * MatrixNexttoTemperljp
Definition: EnergySources.c:37
real Rmed2[MAX1D]
Definition: global.h:15
real * Field
Definition: types.h:40
int Nrad
Radial size of the grid, in number of zones.
Definition: types.h:38
static PolarGrid * MatrixNexttoTemperlip
Definition: EnergySources.c:36
int One_or_active
Definition: global.h:8
real Rmed[MAX1D]
Definition: global.h:11
void CalculateQminus(PolarGrid *Rho)
Estimate of the heat loss due to radiation escape in the vertical direction with respect to the midpl...
Definition: EnergySources.c:76
static real CV
Definition: EnergySources.c:39
void DiffusionCoefs()
Calculation of the diffusion coefficients.
int MaxMO_or_active
Definition: global.h:9
void MidplaneVolumeDensity(PolarGrid *Rho)
Translates the surface density into the midplane volume density using the local pressure scale height...
real ALPHAVISCOSITY
Definition: param_noex.h:26
static PolarGrid * DiffCoefIfaceRad
Definition: EnergySources.c:31
static PolarGrid * GradTemperMagnitude
Definition: EnergySources.c:30
real SQRT_ADIABIND_INV
Definition: global.h:19
static PolarGrid * DiscretizationCoefB
Definition: EnergySources.c:35
static real a[7]
Definition: EnergySources.c:27
#define SORMAXITERS
Definition: EnergySources.c:22
static PolarGrid * GradTemperRad
Definition: EnergySources.c:30
void SynchronizeOverlapFields(real *field, int nr, int nsync)
For a MPI-split grid, synchronizes the values in a requested number 'nsync' of the overlapping radial...
void CreateTorqueMapInfile(int istep, PolarGrid *Surfdens)
Writes an input file for the 'torquemap' code written by Bertram Bitsch.
boolean heatsrc_index[MAXPLANETS]
Definition: global.h:28
real OPACITYDROP
Definition: param_noex.h:57
real InvDiffRsup[MAX1D]
Definition: global.h:13
static PolarGrid * Opacity
Definition: EnergySources.c:32
void IterateRelaxationParameter()
When solving the energy equation for the first time, the function spans through various values of the...
real VISCOSITY
Definition: param_noex.h:25
#define SOREPS
Definition: EnergySources.c:23
int GLOBALNRAD
Definition: global.h:10
static PolarGrid * DiscretizationCoefA
Definition: EnergySources.c:35
static PolarGrid * MatrixNexttoTemperlim
Definition: EnergySources.c:36
void OpacityProfile()
Fills the opacity polar grid, either with a fixed parametric value or using the Bell & Lin (1994) opa...
void MPI_Irecv()
Definition: mpi_dummy.c:52
static real kappa0[7]
Definition: EnergySources.c:26
#define CPUOVERLAP
Definition: fondam.h:13
static PolarGrid * MatrixNexttoTemperl
Definition: EnergySources.c:36
void MPI_Allreduce(void *ptr, void *ptr2, int count, int type, int foo3, int foo4)
Definition: mpi_dummy.c:72
#define MPI_SUM
Definition: mpi_dummy.h:17
#define RHO2CGS
Definition: fondam.h:38
void CalculateFlaring()
Calculates the sine of the grazing angle by reconstructing the surface from the pressure scale height...
int NSEC
Definition: param_noex.h:19
#define SIGMA2CGS
Definition: fondam.h:52
#define MPI_INT
Definition: mpi_dummy.h:14
#define GASCONST
Definition: fondam.h:17
#define MOLWEIGHT
Definition: fondam.h:18
static PolarGrid * Flaring
Definition: EnergySources.c:33
PolarGrid * Pressure
Definition: global.h:35
#define NO
Definition: types.h:47
#define AU_SI
Definition: fondam.h:30
void MPI_Isend()
Definition: mpi_dummy.c:48
boolean Write_Qbalance
Definition: global.h:25
int MPI_Request
Definition: mpi_dummy.h:19
void MPI_Recv()
Definition: mpi_dummy.c:60
real ADIABIND
Definition: param_noex.h:54
int NRAD
Definition: param_noex.h:18
static PolarGrid * DiffCoefIfaceTheta
Definition: EnergySources.c:31
PolarGrid * DivergenceVelocity
Definition: global.h:36
static real domega
Definition: EnergySources.c:40
void InitRadiatDiffusionFields()
Initialises the polar arrays associated with the heating/cooling processes.
Definition: EnergySources.c:46
Contains all the include directives requested by the code.
real Rinf[MAX1D]
Definition: global.h:11
int Nsec
Azimuthal size of the grid, in number of zones.
Definition: types.h:39
int SuccessiveOverrelaxation(real omega, boolean errcheck)
The SOR method algorithm inspired by the one from Numerical Recipes.
void TemperatureGradient()
Finds the temperature gradients and their magnitude over the mesh.
boolean AccretHeating
Definition: global.h:27
boolean ViscosityAlpha
Definition: global.h:30
static PolarGrid * RightHandSide
Definition: EnergySources.c:37
static real b[7]
Definition: EnergySources.c:28
int CPU_Rank
Definition: global.h:1
static PolarGrid * EnergyNew
Definition: SourceEuler.c:29
static PolarGrid * MatrixNexttoTemperljm
Definition: EnergySources.c:37
static PolarGrid * EnergyInt
Definition: SourceEuler.c:29
void ComputeTemperatureField()
void CalculateQirr(PolarGrid *Rho)
Calculates the stellar irradiation source term.
static int Niterbest
Definition: EnergySources.c:41
real InvRmed[MAX1D]
Definition: global.h:12
static PolarGrid * Qirradiation
Definition: EnergySources.c:33
static PolarGrid * VolumeDensity
Definition: EnergySources.c:32
real heatsrc[MAXPLANETS]
Definition: global.h:21
real Sigma()
#define SQRT2PI_INV
Definition: fondam.h:20
void MPI_Wait()
Definition: mpi_dummy.c:68
static PolarGrid * GradTemperTheta
Definition: EnergySources.c:30
real FluxLimiterValue(real s)
Calculates the flux limiter according to Kley (1989)
void mpi_make1Dprofile(real *gridfield, real *axifield)
Definition: mpiTasks.c:12
boolean CPU_Master
Definition: global.h:3
#define PI
Definition: fondam.h:12
PolarGrid * Qbalance
Definition: global.h:35
void ComputeSoundSpeed()
#define MPI_COMM_WORLD
Definition: mpi_dummy.h:10
void prs_exit(int numb)
Definition: LowTasks.c:33
#define MAX1D
Definition: types.h:65
void masterprint(const char *template,...)
Definition: LowTasks.c:40