Rheolef  7.2
an efficient C++ finite element environment
 
Loading...
Searching...
No Matches
solver_mumps.cc
Go to the documentation of this file.
1
21// direct solver MUMPS, mpi implementations
22//
23// Note : why no seq implementation based on mumps ?
24//
25// 1. mumps has a pure seq library implementation.
26// Both seq and mpi libraries cannot be linked together because they define
27// both the same dmumps() function.
28// 2. but, when using mpi, the mpi implementation is unable to solve efficiently
29// in // some independant sparse linear systems (eg. sparse mass matrix local
30// on an element) because the right-hand side may be merged on the proc=0
31// for an obscure raison.
32//
33// Thus rheolef uses mumps for mpi-only implementation of the direct solver
34// and use another library (eg umfpack) for the seq direct solver.
35//
36// The present 'seq' implementation has been tested and is invalid :
37// the code is maintained (but no more instanciated) for two reasons:
38// 1. it can be used when mpi is not present, as a seq solver with the pure seq impl of mumps
39// 2. mumps will propose in the future to provide a distributed RHS:
40// https://listes.ens-lyon.fr/sympa/arc/mumps-users
41// From: Alfredo Buttari <alfredo.buttari@enseeiht.fr>
42// Date: Mon, 31 Jan 2011 19:07:03 +0100
43// Subject: Re: [mumps-users] Scalability of MUMPS solution phase
44// Jack,
45// yes, adding support for distributed RHS is on our TODO list but I
46// can't say at this moment when it will be available.
47//
48#include "rheolef/config.h"
49#ifdef _RHEOLEF_HAVE_MUMPS
50#include "solver_mumps.h"
51
52#undef DEBUG_MUMPS_SCOTCH
53#ifdef DEBUG_MUMPS_SCOTCH
54#undef _RHEOLEF_HAVE_MUMPS_WITH_PTSCOTCH // has failed on some linear systems: prefer seq-scotch when available
55#endif // DEBUG_MUMPS
56
57namespace rheolef {
58using namespace std;
59
60// =========================================================================
61// 1. local functions
62// =========================================================================
63// count non-zero entries of the upper part of A
64// 1) diagonal block case
65template<class T, class M>
66static
67typename csr<T,M>::size_type
68nnz_dia_upper (const csr<T,M>& a)
69{
70 typedef typename csr<T,M>::size_type size_type;
71 size_type dia_nnz = 0;
72 typename csr<T,M>::const_iterator dia_ia = a.begin();
73 for (size_type i = 0, n = a.nrow(); i < n; i++) {
74 for (typename csr<T,M>::const_data_iterator p = dia_ia[i]; p < dia_ia[i+1]; p++) {
75 size_type j = (*p).first;
76 if (j >= i) dia_nnz++;
77 }
78 }
79 return dia_nnz;
80}
81// 2) extra-diagonal block case, for distributed csr
82template<class T, class M>
83static
84typename csr<T,M>::size_type
85nnz_ext_upper (const csr<T,M>& a)
86{
87 return 0;
88}
89#ifdef _RHEOLEF_HAVE_MPI
90template<class T>
91static
92typename csr<T,distributed>::size_type
93nnz_ext_upper (const csr<T,distributed>& a)
94{
95 typedef typename csr<T,distributed>::size_type size_type;
96 size_type first_i = a.row_ownership().first_index();
97 size_type ext_nnz = 0;
98 typename csr<T,distributed>::const_iterator ext_ia = a.ext_begin();
99 for (size_type i = 0, n = a.nrow(); i < n; i++) {
100 for (typename csr<T,distributed>::const_data_iterator p = ext_ia[i]; p < ext_ia[i+1]; p++) {
101 size_type dis_i = first_i + i;
102 size_type dis_j = a.jext2dis_j ((*p).first);
103 if (dis_j >= dis_i) ext_nnz++;
104 }
105 }
106 return ext_nnz;
107}
108#endif // _RHEOLEF_HAVE_MPI
109template<class T, class M>
110static
111void
112csr2mumps_struct_seq (const csr<T,M>& a, vector<int>& row, vector<int>& col, bool use_symmetry)
113{
114 typedef typename csr<T,M>::size_type size_type;
115 typename csr<T,M>::const_iterator dia_ia = a.begin();
116 for (size_type i = 0, n = a.nrow(), q = 0; i < n; i++) {
117 for (typename csr<T,M>::const_data_iterator p = dia_ia[i]; p < dia_ia[i+1]; p++) {
118 size_type j = (*p).first;
119 if (!use_symmetry || j >= i) {
120 row[q] = i + 1;
121 col[q] = j + 1;
122 q++;
123 }
124 }
125 }
126}
127template<class T, class M>
128static
129void
130csr2mumps_values_seq (const csr<T,M>& a, vector<T>& val, bool use_symmetry)
131{
132 typedef typename csr<T,M>::size_type size_type;
133 typename csr<T,M>::const_iterator dia_ia = a.begin();
134 for (size_type i = 0, n = a.nrow(), q = 0; i < n; i++) {
135 for (typename csr<T,M>::const_data_iterator p = dia_ia[i]; p < dia_ia[i+1]; p++) {
136 size_type j = (*p).first;
137 if (!use_symmetry || j >= i) {
138 val[q] = (*p).second;
139 q++;
140 }
141 }
142 }
143}
144#ifdef _RHEOLEF_HAVE_MPI
145template<class T>
146static
147void
148csr2mumps_struct_dis (const csr<T,sequential>& a, vector<int>& row, vector<int>& col, bool use_symmetry, bool)
149{
150 // dummy case: for the compiler to be happy
151}
152template<class T>
153static
154void
155csr2mumps_struct_dis (const csr<T,distributed>& a, vector<int>& row, vector<int>& col, bool use_symmetry, bool drop_ext_nnz)
156{
157 typedef typename csr<T,distributed>::size_type size_type;
158 typename csr<T,distributed>::const_iterator dia_ia = a.begin();
159 typename csr<T,distributed>::const_iterator ext_ia = a.ext_begin();
160 size_type dis_nr = a.dis_nrow();
161 size_type dis_nc = a.dis_ncol();
162 size_type nc = a.ncol();
163 size_type first_i = a.row_ownership().first_index();
164 for (size_type i = 0, n = a.nrow(), q = 0; i < n; i++) {
165 for (typename csr<T,distributed>::const_data_iterator p = dia_ia[i]; p < dia_ia[i+1]; p++) {
166 size_type j = (*p).first;
167 size_type dis_i = first_i + i;
168 size_type dis_j = first_i + j;
169 assert_macro (j < nc, "invalid matrix");
170 assert_macro (dis_i < dis_nr && dis_j < dis_nc, "invalid matrix");
171 if (!use_symmetry || dis_j >= dis_i) {
172 row[q] = dis_i + 1;
173 col[q] = dis_j + 1;
174 q++;
175 }
176 }
177 if (drop_ext_nnz) continue;
178 for (typename csr<T,distributed>::const_data_iterator p = ext_ia[i]; p < ext_ia[i+1]; p++) {
179 size_type dis_i = first_i + i;
180 size_type dis_j = a.jext2dis_j ((*p).first);
181 assert_macro (dis_i < dis_nr && dis_j < dis_nc, "invalid matrix");
182 if (!use_symmetry || dis_j >= dis_i) {
183 row[q] = dis_i + 1;
184 col[q] = dis_j + 1;
185 q++;
186 }
187 }
188 }
189}
190template<class T>
191static
192void
193csr2mumps_values_dis (const csr<T,sequential>& a, vector<T>& val, bool use_symmetry, bool)
194{
195 // dummy case: for the compiler to be happy
196}
197template<class T>
198static
199void
200csr2mumps_values_dis (const csr<T,distributed>& a, vector<T>& val, bool use_symmetry, bool drop_ext_nnz)
201{
202#ifdef TO_CLEAN
203 std:fill (val.begin(), val.end(), std::numeric_limits<T>::max());
204#endif // TO_CLEAN
205
206 typedef typename csr<T,distributed>::size_type size_type;
207 size_type first_i = a.row_ownership().first_index();
208 typename csr<T,distributed>::const_iterator dia_ia = a.begin();
209 typename csr<T,distributed>::const_iterator ext_ia = a.ext_begin();
210 for (size_type i = 0, n = a.nrow(), q = 0; i < n; i++) {
211 for (typename csr<T,distributed>::const_data_iterator p = dia_ia[i]; p < dia_ia[i+1]; p++) {
212 size_type dis_i = first_i + i;
213 size_type dis_j = first_i + (*p).first;
214 if (!use_symmetry || dis_j >= dis_i) {
215#ifdef TO_CLEAN
216 check_macro(q < val.size(), "invalid index");
217#endif // TO_CLEAN
218 val[q] = (*p).second;
219 q++;
220 }
221 }
222 if (drop_ext_nnz) continue;
223 for (typename csr<T,distributed>::const_data_iterator p = ext_ia[i]; p < ext_ia[i+1]; p++) {
224 size_type dis_i = first_i + i;
225 size_type dis_j = a.jext2dis_j ((*p).first);
226 if (!use_symmetry || dis_j >= dis_i) {
227#ifdef TO_CLEAN
228 check_macro(q < val.size(), "invalid index");
229#endif // TO_CLEAN
230 val[q] = (*p).second;
231 q++;
232 }
233 }
234 }
235#ifdef TO_CLEAN
236 T val_min = std::numeric_limits<T>::max(), val_max = 0;
237 for (size_t q = 0; q < val.size(); q++) {
238 val_min = std::min (val_min, abs(val[q]));
239 val_max = std::max (val_max, abs(val[q]));
240 }
241 warning_macro ("val_min="<<val_min<<", val_max="<<val_max);
242#endif // TO_CLEAN
243}
244#endif // _RHEOLEF_HAVE_MPI
245// =========================================================================
246// 2. the class interface
247// =========================================================================
248template<class T, class M>
250 : solver_abstract_rep<T,M>(opt),
251 _has_mumps_instance(false),
252 _drop_ext_nnz(false),
253 _mumps_par(),
254 _row(),
255 _col(),
256 _val(),
257 _a00(0),
258 _det()
259{
261 update_values (a);
262}
263template<class T, class M>
264void
266{
267 size_type nproc = communicator().size();
268 if (a.dis_nrow() <= 1) {
269 // skip 0 or 1 dis_nrow, where mumps core dump
270 if (a.nrow() == 1) {
271 _a00 = (*(a.begin()[0])).second;
272 }
273 _has_mumps_instance = false;
274 return;
275 }
276 _has_mumps_instance = true;
277 // ----------------------------------
278 // step 0 : init a mumps instance
279 // ----------------------------------
280 bool use_symmetry = a.is_symmetric();
281 bool use_definite_positive = a.is_definite_positive();
282 _mumps_par.par = 1; // use parallel
283#ifdef _RHEOLEF_HAVE_MPI
284 _mumps_par.comm_fortran = MPI_Comm_c2f (a.row_ownership().comm());
285#endif // _RHEOLEF_HAVE_MPI
286 if (use_symmetry && !use_definite_positive) {
287 // sym invertible but could be undef: could have either < 0 or > 0 eigenvalues
288 // => leads to similar cpu with s.d.p. but without the asumption of positive or negative :
289 _mumps_par.sym = 2;
290 } else if (use_symmetry && use_definite_positive) {
291#if 0
292 // sym def pos
293 // a) standard approach
294 // Note: could fail in scalapack when sym def negative
295 _mumps_par.sym = 1;
296 // b) second approach: from AmeButGue-2011-mumps, page 15:
297 // Another approach to suppress numerical pivoting which works with ScaLAPACK
298 // for both positive definite and negative definite matrices consists in setting SYM=2 and
299 // CNTL(1)=0.0D0 (recommended strategy).
300 _mumps_par.sym = 2;
301 _mumps_par.cntl[1-1] = 0.0;
302#endif // 0
303 // c) third approach: do not use the knowledge of positive or negative, since
304 // this knowledge is contained in bilinear forms defined by the user
305 // use only the general symetry:
306 // tests show CPU is very similar than two previous approaches
307 // and the user do not need know about positive_definite or negative_definite
308 _mumps_par.sym = 2;
309 } else {
310 // unsym invertible
311 _mumps_par.sym = 0;
312 }
313 _mumps_par.job = -1;
314 dmumps_c(&_mumps_par);
315 check_macro (_mumps_par.infog[1-1] == 0, "mumps: an error has occurred");
316 // ----------------------------------
317 // step 1 : symbolic factorization
318 // ----------------------------------
319 if (base::option().verbose_level != 0) {
320 _mumps_par.icntl[1-1] = 1; // error output
321 _mumps_par.icntl[2-1] = 1; // verbose output
322 _mumps_par.icntl[3-1] = 1; // global infos
323 _mumps_par.icntl[4-1] = base::option().verbose_level; // verbosity level
324 // strcpy (_mumps_par.write_problem, "dump_mumps"); // dump sparse structure
325 } else {
326 _mumps_par.icntl[1-1] = -1;
327 _mumps_par.icntl[2-1] = -1;
328 _mumps_par.icntl[3-1] = -1;
329 _mumps_par.icntl[4-1] = 0;
330 }
331 if (base::option().compute_determinant) {
332 _mumps_par.icntl[33-1] = 1;
333 }
334 _mumps_par.icntl[5-1] = 0; // matrix is in assembled format
335#if defined(_RHEOLEF_HAVE_MUMPS_WITH_PTSCOTCH) || defined(_RHEOLEF_HAVE_MUMPS_WITH_SCOTCH)
336 _mumps_par.icntl[7-1] = 3; // sequential ordering: 3==scotch
337#elif defined(_RHEOLEF_HAVE_MUMPS_WITH_PARMETIS) || defined(_RHEOLEF_HAVE_MUMPS_WITH_METIS)
338 _mumps_par.icntl[7-1] = 5; // sequential ordering: 5==metis
339#else // _RHEOLEF_HAVE_MUMPS_WITH_PARMETIS
340 _mumps_par.icntl[7-1] = 7; // ordering: 7=auto
341#endif // _RHEOLEF_HAVE_MUMPS_WITH_PTSCOTCH
342 _mumps_par.icntl[14-1] = 40; // default 20 seems to be insufficient in some cases
343 _mumps_par.icntl[29-1] = 0; // 0: auto; 1: ptscotch; 2: parmetis
344 _mumps_par.icntl[22-1] = 0; // 0: in-core ; 1: out-of-core TODO
346 _mumps_par.icntl[18-1] = 3; // distributed
347
348#if defined(_RHEOLEF_HAVE_MUMPS_WITH_SCOTCH) || defined(_RHEOLEF_HAVE_MUMPS_WITH_METIS)
349 _mumps_par.icntl[28-1] = 1; // sequential ordering (preferred, more robust)
350#elif defined(_RHEOLEF_HAVE_MUMPS_WITH_PTSCOTCH) || defined(_RHEOLEF_HAVE_MUMPS_WITH_PARMETIS)
351 _mumps_par.icntl[28-1] = 2; // parallel ordering (less robust, but faster)
352#endif // _RHEOLEF_HAVE_MUMPS_WITH_PTSCOTCH || _RHEOLEF_HAVE_MUMPS_WITH_PARMETIS
353
354#ifdef _RHEOLEF_HAVE_MUMPS_WITH_PTSCOTCH
355 _mumps_par.icntl[29-1] = 1; // ptscotch=1
356#elif _RHEOLEF_HAVE_MUMPS_WITH_PARMETIS
357 _mumps_par.icntl[29-1] = 2; // parmetis=2
358#else
359 _mumps_par.icntl[29-1] = 0; // automatic choice
360#endif // _RHEOLEF_HAVE_MUMPS_WITH_PTSCOTCH
361 } else {
362 _mumps_par.icntl[18-1] = 0; // sequential
363 }
364 // load the matrix in mumps
365 _mumps_par.n = a.dis_nrow();
366 size_type dia_nnz = ((use_symmetry) ? nnz_dia_upper(a) : a.nnz());
367 size_type ext_nnz = ((use_symmetry) ? nnz_ext_upper(a) : a.ext_nnz());
368 size_type dis_nnz = dia_nnz + (_drop_ext_nnz ? 0 : ext_nnz);
370#ifdef _RHEOLEF_HAVE_MPI
371 _row.resize (dis_nnz);
372 _col.resize (dis_nnz);
373 csr2mumps_struct_dis (a, _row, _col, use_symmetry, _drop_ext_nnz);
374 _mumps_par.nz_loc = dis_nnz;
375 _mumps_par.irn_loc = _row.begin().operator->();
376 _mumps_par.jcn_loc = _col.begin().operator->();
377#endif // _RHEOLEF_HAVE_MPI
378 } else { // sequential
379 _row.resize (dia_nnz);
380 _col.resize (dia_nnz);
381 csr2mumps_struct_seq (a, _row, _col, use_symmetry);
382 _mumps_par.nz = dia_nnz;
383#if (_RHEOLEF_MUMPS_VERSION_MAJOR >= 5)
384 _mumps_par.nnz = dia_nnz;
385#endif
386 _mumps_par.irn = _row.begin().operator->();
387 _mumps_par.jcn = _col.begin().operator->();
388 }
389 _mumps_par.job = 1;
390 dmumps_c(&_mumps_par);
391 trace_macro ("symbolic: ordering effectively used = " << _mumps_par.infog[7-1]); // 3=scoth, etc
392 check_macro (_mumps_par.infog[1-1] == 0, "mumps: error infog(1)="<<_mumps_par.infog[1-1]<<" has occurred (see mumps/infog(1) documentation; infog(2)="<<_mumps_par.infog[2-1]<<")");
393 if (a.dis_nrow() <= 1) {
394 // skip 0 or 1 dis_nrow, where mumps core dump
395 if (a.nrow() == 1) {
396 _a00 = (*(a.begin()[0])).second;
397 }
398 return;
399 }
400 // ----------------------------------
401 // step 2 : numeric factorization
402 // ----------------------------------
404#ifdef _RHEOLEF_HAVE_MPI
405 _val.resize (dis_nnz);
406 csr2mumps_values_dis (a, _val, use_symmetry, _drop_ext_nnz);
407 _mumps_par.a_loc = _val.begin().operator->();
408#endif // _RHEOLEF_HAVE_MPI
409 } else { // sequential
410 _val.resize (dia_nnz);
411 csr2mumps_values_seq (a, _val, use_symmetry);
412 _mumps_par.a = _val.begin().operator->();
413 }
414 _mumps_par.job = 2;
415 bool finished = false;
416 while (!finished && _mumps_par.icntl[14-1] <= 2000) {
417 dmumps_c(&_mumps_par);
418 if ((_mumps_par.infog[1-1] == -8 || _mumps_par.infog[1-1] == -9) &&
419 _mumps_par.infog[2-1] >= 0) {
420 // not enought working space:
421 // default 20 seems to be insufficient in some cases (periodic,surfacic,p-laplacian)
422 _mumps_par.icntl[14-1] += 10;
423 if (_mumps_par.icntl[14-1] > 100) {
424 dis_warning_macro ("numeric factorization: increase working space of "<<_mumps_par.icntl[14-1]<< "% and retry...");
425 }
426 } else {
427 finished = true;
428 }
429 }
430 check_macro (_mumps_par.infog[1-1] == 0,
431 "solver failed: mumps error infog(1)=" <<_mumps_par.infog[1-1]
432 << ", infog(2)=" <<_mumps_par.infog[2-1]
433 << ", icntl(14)="<<_mumps_par.icntl[14-1]
434 << ", icntl(23)="<<_mumps_par.icntl[23-1]);
435 if (_mumps_par.icntl[33-1] != 0) {
436 // get determinant from infos:
437 _det.mantissa = _mumps_par.rinfog[12-1];
438 // imag part _mumps_par.rinfog[13-1] is zero as matrix is real here
439 _det.exponant = _mumps_par.infog[34-1];
440 _det.base = 2;
441 }
442}
443template<class T, class M>
446{
447 if (b.dis_size() <= 1) {
448 // skip 0 or 1 dis_nrow, where mumps core dump
449 if (b.size() == 1) {
450 return vec<T,M>(b.ownership(), b[0]/_a00);
451 } else {
452 return vec<T,M>(b.ownership());
453 }
454 }
455 // ----------------------------------
456 // step 3 : define rhs & solve
457 // ----------------------------------
458 vec<T,M> x;
459 vector<T> sol;
460 vector<int> perm;
461 size_type sol_size = 0;
462 vec<T,M> b_host;
463 T dummy; // mumps faild when zero sizes and 0 pointers...
464 int idummy;
466 // 3.1a. merge the rhs b on proc=0 as required by distributed mumps (why ?)
467 _mumps_par.icntl[21-1] = 1; // 1: solution is distributed
468 _mumps_par.icntl[10-1] = 0; // nstep of iterative refinement (not in distributed version?)
469 communicator comm = b.ownership().comm();
470 distributor host_ownership (b.dis_size(), comm, comm.rank() == 0 ? b.dis_size() : 0);
471 b_host.resize (host_ownership);
472 size_type first_i = b.ownership().first_index();
473 for (size_type i = 0, n = b.size(); i < n; i++) {
474 size_type dis_i = first_i + i;
475 b_host.dis_entry(dis_i) = b[i];
476 }
477 b_host.dis_entry_assembly();
478 if (comm.rank() == 0) {
479 _mumps_par.nrhs = 1;
480 _mumps_par.rhs = b_host.begin().operator->();
481 }
482 sol_size = _mumps_par.info[23-1];
483 sol.resize (sol_size);
484 perm.resize (sol_size);
485 _mumps_par.lsol_loc = sol_size;
486 _mumps_par.sol_loc = (sol_size > 0) ? sol.begin().operator->() : &dummy;
487 _mumps_par.isol_loc = (sol_size > 0) ? perm.begin().operator->() : &idummy;
488 } else { // sequential
489 // 3.1b. inplace resolution: copy b in x and put x as rhs
490 _mumps_par.icntl[21-1] = 0; // 0: sol goes on the proc=0, inplace in rhs
491 _mumps_par.icntl[10-1] = 0; // nstep of iterative refinement (not in distributed version?)
492 x = b;
493 _mumps_par.nrhs = 1;
494 _mumps_par.rhs = x.begin().operator->();
495 }
496 // 3.2. run mumps solver
497 _mumps_par.icntl [9-1] = 1; // solve A*x=b : ctrl=1 ; otherwise solve A^t*x=b TODO
498 _mumps_par.job = 3;
499 dmumps_c(&_mumps_par);
500 check_macro (_mumps_par.infog[1-1] >= 0,
501 "solver failed: mumps error infog(1)="<<_mumps_par.infog[1-1]
502 << ", infog(2)="<<_mumps_par.infog[2-1]);
503 if (_mumps_par.infog[1-1] > 0) {
504 warning_macro ("mumps warning infog(1)="<<_mumps_par.infog[1-1]
505 << ", infog(2)="<<_mumps_par.infog[2-1]);
506 }
508 // 3.3. redistribute the solution as b.ownership
509 x.resize (b.ownership());
510 for (size_t i = 0; i < sol_size; i++) {
511 size_type dis_i = perm[i] - 1;
512 assert_macro (dis_i < x.dis_size(), "invalid index");
513 x.dis_entry(dis_i) = sol[i];
514 }
515 x.dis_entry_assembly();
516 }
517 return x;
518}
519template<class T, class M>
522{
523 error_macro ("not yet");
524 return vec<T,M>();
525}
526template<class T, class M>
528{
529 if (!_has_mumps_instance) return;
530 _has_mumps_instance = false;
531 // ----------------------------------
532 // step F : close the mumps instance
533 // ----------------------------------
534 _mumps_par.job = -2;
535 dmumps_c(&_mumps_par);
536 check_macro (_mumps_par.infog[1-1] == 0, "mumps: an error has occurred");
537}
538// ----------------------------------------------------------------------------
539// instanciation in library
540// ----------------------------------------------------------------------------
541// TODO: code is only valid here for T=double
542
544
545#ifdef _RHEOLEF_HAVE_MPI
547#endif // _RHEOLEF_HAVE_MPI
548
549} // namespace rheolef
550#endif // MUMPS
field::size_type size_type
Definition branch.cc:430
see the communicator page for the full documentation
see the csr page for the full documentation
Definition csr.h:317
see the distributor page for the full documentation
Definition distributor.h:69
void update_values(const csr< T, M > &a)
base::size_type size_type
solver_mumps_rep(const csr< T, M > &a, const solver_option &opt=solver_option())
vec< T, M > trans_solve(const vec< T, M > &rhs) const
vec< T, M > solve(const vec< T, M > &rhs) const
see the solver_option page for the full documentation
see the vec page for the full documentation
Definition vec.h:79
void resize(const distributor &ownership, const T &init_val=std::numeric_limits< T >::max())
Definition vec.h:199
#define trace_macro(message)
Definition dis_macros.h:111
#define assert_macro(ok_condition, message)
Definition dis_macros.h:113
#define error_macro(message)
Definition dis_macros.h:49
#define dis_warning_macro(message)
Definition dis_macros.h:66
#define warning_macro(message)
Definition dis_macros.h:53
Expr1::float_type T
Definition field_expr.h:230
check_macro(expr1.have_homogeneous_space(Xh1), "dual(expr1,expr2); expr1 should have homogeneous space. HINT: use dual(interpolate(Xh, expr1),expr2)")
This file is part of Rheolef.
STL namespace.
Definition sphere.icc:25
Expr1::memory_type M