getfem-commits
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Getfem-commits] (no subject)


From: Konstantinos Poulios
Subject: [Getfem-commits] (no subject)
Date: Tue, 1 Aug 2017 16:27:18 -0400 (EDT)

branch: devel-logari81
commit b9bc748c6c59950555af47a80c73ac51a789f167
Author: Konstantinos Poulios <address@hidden>
Date:   Tue Aug 1 22:26:52 2017 +0200

    normalize source code whitespace
---
 src/bgeot_convex_ref.cc            |   80 +-
 src/bgeot_ftool.cc                 |  328 ++---
 src/bgeot_geometric_trans.cc       |  422 +++---
 src/getfem/bgeot_ftool.h           |   57 +-
 src/getfem/bgeot_geometric_trans.h |   65 +-
 src/getfem/bgeot_mesh_structure.h  |   32 +-
 src/getfem/getfem_mesh_fem.h       |   12 +-
 src/getfem_assembling_tensors.cc   |  326 ++---
 src/getfem_fem.cc                  |  100 +-
 src/getfem_generic_assembly.cc     | 2773 ++++++++++++++++++------------------
 src/getfem_models.cc               |  179 ++-
 src/getfem_nonlinear_elasticity.cc |  254 ++--
 12 files changed, 2318 insertions(+), 2310 deletions(-)

diff --git a/src/bgeot_convex_ref.cc b/src/bgeot_convex_ref.cc
index 613ab8c..53c779f 100644
--- a/src/bgeot_convex_ref.cc
+++ b/src/bgeot_convex_ref.cc
@@ -46,7 +46,7 @@ namespace bgeot {
       }
     }
   }
-  
+
   /* ********************************************************************* */
   /*       Point tab storage.                                              */
   /* ********************************************************************* */
@@ -74,8 +74,8 @@ namespace bgeot {
     }
     stored_point_tab_key(const stored_point_tab *p) : pspt(p) {}
   };
-  
-  
+
+
   pstored_point_tab store_point_tab(const stored_point_tab &spt) {
     dal::pstatic_stored_object_key
       pk = std::make_shared<stored_point_tab_key>(&spt);
@@ -90,7 +90,7 @@ namespace bgeot {
   }
 
   /* should be called on the basic_convex_ref */
-  const mesh_structure* convex_of_reference::simplexified_convex() const {    
+  const mesh_structure* convex_of_reference::simplexified_convex() const {
     if (psimplexified_convex.get() == 0) {
       psimplexified_convex = std::make_shared<mesh_structure>();
       // dal::singleton<cleanup_simplexified_convexes>::instance()
@@ -120,7 +120,7 @@ namespace bgeot {
       if (N > o.N) return false;
       if (K < o.K) return true;
       if (K > o.K) return false;
-      if (nf < o.nf) return true;      
+      if (nf < o.nf) return true;
       return false;
     }
     convex_of_reference_key(int t, dim_type NN, short_type KK = 0,
@@ -172,7 +172,7 @@ namespace bgeot {
         std::fill(normals_[0].begin(), normals_[0].end(),
                   scalar_type(1.0)/sqrt(scalar_type(NN)));
       base_node c(NN);  c.fill(0.0);
-      
+
       if (KK == 0) {
         c.fill(1.0/(NN+1));
         convex<base_node>::points()[0] = c;
@@ -204,7 +204,7 @@ namespace bgeot {
     dal::add_stored_object(pk, p, p->structure(), p->pspt(),
                            dal::PERMANENT_STATIC_OBJECT);
     pconvex_ref p1 = basic_convex_ref(p);
-    if (p != p1) add_dependency(p, p1); 
+    if (p != p1) add_dependency(p, p1);
     return p;
   }
 
@@ -220,7 +220,7 @@ namespace bgeot {
     { return pllref->is_in(pt); }
     scalar_type is_in_face(short_type f, const base_node& pt) const
     { return pllref->is_in_face(f, pt); }
-    
+
     Q2_incomplete_of_ref_(dim_type nc) {
       GMM_ASSERT1(nc == 2 || nc == 3, "Sorry exist only in dimension 2 or 3");
       pllref = parallelepiped_of_reference(nc);
@@ -228,13 +228,13 @@ namespace bgeot {
       convex<base_node>::points().resize(cvs->nb_points());
       normals_.resize(nc == 2 ? 4: 6);
       basic_convex_ref_ = parallelepiped_of_reference(nc);
-      
+
       if(nc==2) {
         sc(normals_[0]) =  1, 0;
         sc(normals_[1]) = -1, 0;
         sc(normals_[2]) =  0, 1;
         sc(normals_[3]) =  0,-1;
-        
+
         convex<base_node>::points()[0] = base_node(0.0, 0.0);
         convex<base_node>::points()[1] = base_node(0.5, 0.0);
         convex<base_node>::points()[2] = base_node(1.0, 0.0);
@@ -243,7 +243,7 @@ namespace bgeot {
         convex<base_node>::points()[5] = base_node(0.0, 1.0);
         convex<base_node>::points()[6] = base_node(0.5, 1.0);
         convex<base_node>::points()[7] = base_node(1.0, 1.0);
-        
+
       } else {
         sc(normals_[0]) =  1, 0, 0;
         sc(normals_[1]) = -1, 0, 0;
@@ -251,7 +251,7 @@ namespace bgeot {
         sc(normals_[3]) =  0,-1, 0;
         sc(normals_[4]) =  0, 0, 1;
         sc(normals_[5]) =  0, 0,-1;
-        
+
         convex<base_node>::points()[0] = base_node(0.0, 0.0, 0.0);
         convex<base_node>::points()[1] = base_node(0.5, 0.0, 0.0);
         convex<base_node>::points()[2] = base_node(1.0, 0.0, 0.0);
@@ -260,12 +260,12 @@ namespace bgeot {
         convex<base_node>::points()[5] = base_node(0.0, 1.0, 0.0);
         convex<base_node>::points()[6] = base_node(0.5, 1.0, 0.0);
         convex<base_node>::points()[7] = base_node(1.0, 1.0, 0.0);
-        
+
         convex<base_node>::points()[8] = base_node(0.0, 0.0, 0.5);
         convex<base_node>::points()[9] = base_node(1.0, 0.0, 0.5);
         convex<base_node>::points()[10] = base_node(0.0, 1.0, 0.5);
         convex<base_node>::points()[11] = base_node(1.0, 1.0, 0.5);
-        
+
         convex<base_node>::points()[12] = base_node(0.0, 0.0, 1.0);
         convex<base_node>::points()[13] = base_node(0.5, 0.0, 1.0);
         convex<base_node>::points()[14] = base_node(1.0, 0.0, 1.0);
@@ -278,10 +278,10 @@ namespace bgeot {
       ppoints = store_point_tab(convex<base_node>::points());
     }
   };
-  
-  
+
+
   DAL_SIMPLE_KEY(Q2_incomplete_reference_key_, dim_type);
-  
+
   pconvex_ref Q2_incomplete_reference(dim_type nc) {
      dal::pstatic_stored_object_key
       pk = std::make_shared<Q2_incomplete_reference_key_>(nc);
@@ -291,7 +291,7 @@ namespace bgeot {
     dal::add_stored_object(pk, p, p->structure(), p->pspt(),
                            dal::PERMANENT_STATIC_OBJECT);
      pconvex_ref p1 = basic_convex_ref(p);
-    if (p != p1) add_dependency(p, p1); 
+    if (p != p1) add_dependency(p, p1);
     return p;
   }
 
@@ -306,9 +306,9 @@ namespace bgeot {
       // negative if the point is on the side of the face where the element is
       GMM_ASSERT1(pt.size() == 3, "Dimensions mismatch");
       if (f == 0)
-       return -pt[2];
+        return -pt[2];
       else
-       return gmm::vect_sp(normals_[f], pt) - sqrt(2.)/2.;
+        return gmm::vect_sp(normals_[f], pt) - sqrt(2.)/2.;
     }
     scalar_type is_in(const base_node& pt) const {
       // return a negative number if pt is in the convex
@@ -316,18 +316,18 @@ namespace bgeot {
       for (short_type i = 1; i < 5; ++i) r = std::max(r, is_in_face(i, pt));
       return r;
     }
-    
+
     pyramid_of_ref_(dim_type k) {
       GMM_ASSERT1(k == 1 || k == 2,
-                 "Sorry exist only in degree 1 or 2, not " << k);
+                  "Sorry exist only in degree 1 or 2, not " << k);
 
       cvs = pyramidal_structure(k);
       convex<base_node>::points().resize(cvs->nb_points());
       normals_.resize(cvs->nb_faces());
       if (k == 1)
-       auto_basic = true;
+        auto_basic = true;
       else
-       basic_convex_ref_ = pyramidal_element_of_reference(1);
+        basic_convex_ref_ = pyramidal_element_of_reference(1);
 
       sc(normals_[0]) =  0., 0., -1.;
       sc(normals_[1]) =  0.,-1.,  1.;
@@ -336,8 +336,8 @@ namespace bgeot {
       sc(normals_[4]) = -1., 0.,  1.;
 
       for (size_type i = 0; i < normals_.size(); ++i)
-       gmm::scale(normals_[i], 1. / gmm::vect_norm2(normals_[i]));
-      
+        gmm::scale(normals_[i], 1. / gmm::vect_norm2(normals_[i]));
+
       if (k==1) {
         convex<base_node>::points()[0]  = base_node(-1.0, -1.0, 0.0);
         convex<base_node>::points()[1]  = base_node( 1.0, -1.0, 0.0);
@@ -363,10 +363,10 @@ namespace bgeot {
       ppoints = store_point_tab(convex<base_node>::points());
     }
   };
-  
-  
+
+
   DAL_SIMPLE_KEY(pyramidal_reference_key_, dim_type);
-  
+
   pconvex_ref pyramidal_element_of_reference(dim_type k) {
      dal::pstatic_stored_object_key
       pk = std::make_shared<pyramidal_reference_key_>(k);
@@ -376,7 +376,7 @@ namespace bgeot {
     dal::add_stored_object(pk, p, p->structure(), p->pspt(),
                            dal::PERMANENT_STATIC_OBJECT);
     pconvex_ref p1 = basic_convex_ref(p);
-    if (p != p1) add_dependency(p, p1); 
+    if (p != p1) add_dependency(p, p1);
     return p;
   }
 
@@ -389,7 +389,7 @@ namespace bgeot {
 
   struct product_ref_ : public convex_of_reference {
     pconvex_ref cvr1, cvr2;
-    
+
     scalar_type is_in(const base_node &pt) const {
       dim_type n1 = cvr1->structure()->dim(), n2 = cvr2->structure()->dim();
       base_node pt1(n1), pt2(n2);
@@ -412,10 +412,10 @@ namespace bgeot {
       else return cvr2->is_in_face(short_type(f - 
cvr1->structure()->nb_faces()), pt2);
     }
 
-    product_ref_(pconvex_ref a, pconvex_ref b) { 
+    product_ref_(pconvex_ref a, pconvex_ref b) {
       if (a->structure()->dim() < b->structure()->dim())
-        GMM_WARNING1("Illegal convex: swap your operands: dim(cv1)=" << 
-                    int(a->structure()->dim()) << " < dim(cv2)=" << 
+        GMM_WARNING1("Illegal convex: swap your operands: dim(cv1)=" <<
+                    int(a->structure()->dim()) << " < dim(cv2)=" <<
                     int(b->structure()->dim()));
       cvr1 = a; cvr2 = b;
       *((convex<base_node> *)(this)) = convex_direct_product(*a, *b);
@@ -451,14 +451,14 @@ namespace bgeot {
                                                     b->structure()),
                            p->pspt(), dal::PERMANENT_STATIC_OBJECT);
     pconvex_ref p1 = basic_convex_ref(p);
-    if (p != p1) add_dependency(p, p1); 
+    if (p != p1) add_dependency(p, p1);
     return p;
   }
 
   pconvex_ref parallelepiped_of_reference(dim_type nc, dim_type k) {
     if (nc <= 1) return simplex_of_reference(nc,k);
     return convex_ref_product(parallelepiped_of_reference(dim_type(nc-1),k),
-                             simplex_of_reference(k));
+                              simplex_of_reference(k));
   }
 
   pconvex_ref prism_of_reference(dim_type nc) {
@@ -483,7 +483,7 @@ namespace bgeot {
     scalar_type is_in_face(short_type f, const base_node &pt) const {
       const base_node &x0 = (f ? convex<base_node>::points()[f-1]
                              : convex<base_node>::points().back());
-      return gmm::vect_sp(pt-x0, normals()[f]); 
+      return gmm::vect_sp(pt-x0, normals()[f]);
     }
     equilateral_simplex_of_ref_(size_type N) {
       pconvex_ref prev = equilateral_simplex_of_reference(dim_type(N-1));
@@ -508,7 +508,7 @@ namespace bgeot {
       }
       gmm::scale(G, scalar_type(1)/scalar_type(N+1));
       for (size_type f=0; f < N+1; ++f) {
-        normals_[f] = G - convex<base_node>::points()[f]; 
+        normals_[f] = G - convex<base_node>::points()[f];
         gmm::scale(normals_[f], 1/gmm::vect_norm2(normals_[f]));
       }
       ppoints = store_point_tab(convex<base_node>::points());
@@ -534,9 +534,9 @@ namespace bgeot {
   public:
     scalar_type is_in(const base_node &) const
     { GMM_ASSERT1(false, "Information not available here"); }
-    scalar_type is_in_face(short_type, const base_node &) const 
+    scalar_type is_in_face(short_type, const base_node &) const
     { GMM_ASSERT1(false, "Information not available here"); }
-  
+
     generic_dummy_(dim_type d, size_type n, short_type nf) {
       cvs = generic_dummy_structure(d, n, nf);
       auto_basic = true;
diff --git a/src/bgeot_ftool.cc b/src/bgeot_ftool.cc
index 9f6e78a..85b3c25 100644
--- a/src/bgeot_ftool.cc
+++ b/src/bgeot_ftool.cc
@@ -37,20 +37,20 @@ namespace bgeot {
       { ist.get(c); if (toupper(c) == toupper(st[i])) i++; else i = 0; }
     if (ist.eof()) return false; else return true;
   }
-  
-#define get_c__(r, c) {        ist.get(c);                                     
\
-    if (ist.eof()) { if (!st.size()) st.push_back('\n'); return r; }   \
+
+#define get_c__(r, c) { ist.get(c);                                        \
+    if (ist.eof()) { if (!st.size()) st.push_back('\n'); return r; }       \
     if (to_up) c = char(toupper(c)); }
 
-#define sdouble__(c, e) {  st.push_back(c); get_c__(5, d); \
-    if (d == e) { st.push_back(e); return 6; }            \
-    else { ist.putback(d); return 5; } }                  \
+#define sdouble__(c, e) { st.push_back(c); get_c__(5, d);  \
+    if (d == e) { st.push_back(e); return 6; }             \
+    else { ist.putback(d); return 5; } }                   \
 
   int get_token(std::istream &ist, std::string &st,
-               bool ignore_cr, bool to_up, bool read_un_pm, int *linenb) {
+                bool ignore_cr, bool to_up, bool read_un_pm, int *linenb) {
     st.clear();
     char c = char(-1), d, e;
-   
+
     get_c__(0, c);
 
     for(;;) { // Go through spaces, commentaries and '...'
@@ -58,39 +58,39 @@ namespace bgeot {
       if (isspace(c)) { while (isspace(c)) get_c__(0, c); }
       else if (c == '%') { while (c != '\n') get_c__(0, c); }
       else if (c == '.') {
-       if (ist.eof()) break; else {
-         get_c__(0, d);
-         if (d == '.'  && !ist.eof()) {
-           get_c__(0, e);
-           if (e == '.') {
-             while (c != '\n') get_c__(0, c);
-             if (linenb) (*linenb)++; 
-             get_c__(0, c);
-           }
-           else { ist.putback(e); ist.putback(d); break; }
-         }
-         else { ist.putback(d); break; }
-       }
+        if (ist.eof()) break; else {
+          get_c__(0, d);
+          if (d == '.'  && !ist.eof()) {
+            get_c__(0, e);
+            if (e == '.') {
+              while (c != '\n') get_c__(0, c);
+              if (linenb) (*linenb)++;
+              get_c__(0, c);
+            }
+            else { ist.putback(e); ist.putback(d); break; }
+          }
+          else { ist.putback(d); break; }
+        }
       }
       else break;
     }
 
     if (read_un_pm)
       if (c == '-' || c == '+') { // reading a number beginning with '+' or '-'
-       get_c__(2, d);
-       if (isdigit(d) || d == '.') { st.push_back(c); c = d; }
-       else ist.putback(d);
+        get_c__(2, d);
+        if (isdigit(d) || d == '.') { st.push_back(c); c = d; }
+        else ist.putback(d);
       }
 
     if (isdigit(c) || c == '.') { // reading a number
       while (isdigit(c) || c == '.' || c == 'e'  || c == 'E') {
-       st.push_back(c); 
-       if (c == 'e' || c == 'E') {
-         get_c__(2, c);
-         if (c == '+' || c == '-') st.push_back(c);
-         else ist.putback(c);
-       } 
-       get_c__(2, c);
+        st.push_back(c);
+        if (c == 'e' || c == 'E') {
+          get_c__(2, c);
+          if (c == '+' || c == '-') st.push_back(c);
+          else ist.putback(c);
+        }
+        get_c__(2, c);
       }
       ist.putback(c);
       return 2;
@@ -99,9 +99,9 @@ namespace bgeot {
     if (c == '\"') { // reading a string
       get_c__(3, c);
       while (true) {
-       if (c == '\"' || c == '\n') return 3;
-       if (c == '\\') { st.push_back(c); get_c__(3, c); }
-       st.push_back(c);
+        if (c == '\"' || c == '\n') return 3;
+        if (c == '\\') { st.push_back(c); get_c__(3, c); }
+        st.push_back(c);
         get_c__(3, c);
       }
       return 3;
@@ -110,9 +110,9 @@ namespace bgeot {
     if (c == '\'') { // reading a string
       get_c__(3, c);
       while (true) {
-       if (c == '\'' || c == '\n') return 3;
-       if (c == '\\') { st.push_back(c); get_c__(3, c); }
-       st.push_back(c);
+        if (c == '\'' || c == '\n') return 3;
+        if (c == '\\') { st.push_back(c); get_c__(3, c); }
+        st.push_back(c);
         get_c__(3, c);
       }
       return 3;
@@ -132,7 +132,7 @@ namespace bgeot {
     if (c == '=') sdouble__(c, '=');
     if (c == '~') sdouble__(c, '=');
     if (c == '<') sdouble__(c, '=');
-    if (c == '>') sdouble__(c, '=');   
+    if (c == '>') sdouble__(c, '=');
 
     st.push_back(c); return 5; // return the symbol read.
   }
@@ -140,11 +140,11 @@ namespace bgeot {
   std::istream& operator>>(std::istream& is, const skip& t) {
     char c;
     int i = 0;
-    while (!is.get(c).eof() && isspace(c)) /*continue*/;    
+    while (!is.get(c).eof() && isspace(c)) /*continue*/;
     for (i=0; t.s[i]; ++i) {
       if (i) is.get(c);
       GMM_ASSERT1(toupper(c) == toupper(t.s[i]) && !is.eof(),
-                 "expected token '" << t.s << "' not found");
+                  "expected token '" << t.s << "' not found");
     }
     return is;
   }
@@ -159,44 +159,44 @@ namespace bgeot {
     else if (b[i]) return -1;
     else return 0;
   }
-  
+
   void md_param::parse_error(const std::string &t) {
     GMM_ASSERT1(false, "Parse error reading "
-               << current_file << " line " << current_line << " near " << t);
+                << current_file << " line " << current_line << " near " << t);
   }
 
   void md_param::syntax_error(const std::string &t) {
     GMM_ASSERT1(false, "Error reading "
-               << current_file << " line " << current_line << " : " << t);
+                << current_file << " line " << current_line << " : " << t);
   }
 
   int md_param::get_next_token(std::istream &f) {
     static int token_type = 0;
     if (!token_is_valid)
       token_type = get_token(f, temp_string, false, false, false,
-                            &current_line);
+                             &current_line);
     token_is_valid = false;
     return token_type;
   }
 
-  void md_param::valid_token(void) { token_is_valid = true; }
+  void md_param::valid_token() { token_is_valid = true; }
 
   std::ostream &operator <<(std::ostream &o, const md_param::param_value& p) {
     switch (p.type_of_param()) {
     case md_param::REAL_VALUE : o << p.real(); break;
     case md_param::STRING_VALUE : o << '\'' << p.string() << '\''; break;
-    case md_param::ARRAY_VALUE : 
+    case md_param::ARRAY_VALUE :
       o << "[";
       if (p.array().size()) o << p.array()[0];
       for (unsigned i = 1; i < p.array().size(); ++i)
-       o << ", " << p.array()[i];
+        o << ", " << p.array()[i];
       o << "]";
     }
     return o;
   }
 
   md_param::param_value md_param::read_expression(std::istream &f,
-                                                 bool skipped) {
+                                                  bool skipped) {
     param_value result;
     int i = get_next_token(f);
     if (i == 2) { // a number
@@ -206,63 +206,63 @@ namespace bgeot {
       result = param_value(temp_string);
       int j = get_next_token(f);
       while (j == 3) {
-       result.string() += temp_string;
-       j = get_next_token(f);
+        result.string() += temp_string;
+        j = get_next_token(f);
       }
       valid_token();
     }
     else if (i == 4) { // a parameter name
       std::string name(temp_string);
       if (parameters.find(name) != parameters.end())
-       result = parameters[name];
+        result = parameters[name];
       else if (!skipped) {
-       std::stringstream s; s << "Parameter " << name << " not found";
-       syntax_error(s.str());
+        std::stringstream s; s << "Parameter " << name << " not found";
+        syntax_error(s.str());
       }
     }
     else if (i == 5) { // unary operators, parentheses and arrays
       switch (temp_string[0]) {
       case '(' :
-       {
-         result = read_expression_list(f, skipped);
-         int j = get_next_token(f);
-         if (j != 5 || temp_string[0] != ')') parse_error(temp_string);
-       }
-       break;
+        {
+          result = read_expression_list(f, skipped);
+          int j = get_next_token(f);
+          if (j != 5 || temp_string[0] != ')') parse_error(temp_string);
+        }
+        break;
       case '+' :
-       result = read_expression(f, skipped);
-       if (result.type_of_param() != REAL_VALUE)
-         syntax_error("Sorry, unary + does not support string "
-                      "or array values");
-       break;
+        result = read_expression(f, skipped);
+        if (result.type_of_param() != REAL_VALUE)
+          syntax_error("Sorry, unary + does not support string "
+                       "or array values");
+        break;
       case '-' :
-       result = read_expression(f, skipped);
-       if (result.type_of_param() != REAL_VALUE)
-         syntax_error("Sorry, unary - does not support string "
-                        "or array values");
-       result.real() *= -1.0;
-       break;
-      case '~' : 
-       result = read_expression(f, skipped);
-       if (result.type_of_param() != REAL_VALUE)
-         syntax_error("Sorry, unary ! does not support string "
-                        "or array values");
-       result.real() = !(result.real());
-       break;
+        result = read_expression(f, skipped);
+        if (result.type_of_param() != REAL_VALUE)
+          syntax_error("Sorry, unary - does not support string "
+                         "or array values");
+        result.real() *= -1.0;
+        break;
+      case '~' :
+        result = read_expression(f, skipped);
+        if (result.type_of_param() != REAL_VALUE)
+          syntax_error("Sorry, unary ! does not support string "
+                         "or array values");
+        result.real() = !(result.real());
+        break;
       case '[' :
-       {
-         bool first = true;
-         result = param_value(ARRAY_VALUE);
-         while (true) {
-           int j = get_next_token(f);
-           if (j == 5 && temp_string[0] == ']') break;
-           if (!first && temp_string[0] != ',') parse_error(temp_string);
-           if (first) valid_token();
-           result.array().push_back(read_expression_list(f, skipped));
-           first = false;
-         }
-       }
-       break;
+        {
+          bool first = true;
+          result = param_value(ARRAY_VALUE);
+          while (true) {
+            int j = get_next_token(f);
+            if (j == 5 && temp_string[0] == ']') break;
+            if (!first && temp_string[0] != ',') parse_error(temp_string);
+            if (first) valid_token();
+            result.array().push_back(read_expression_list(f, skipped));
+            first = false;
+          }
+        }
+        break;
       default : parse_error(temp_string);
       }
     }
@@ -283,25 +283,25 @@ namespace bgeot {
       }
     if (i == 6)
       switch (c) {
-      case '<' : prior = 3; op =  7; return; // <= 
-      case '>' : prior = 3; op =  8; return; // >= 
-      case '=' : prior = 3; op =  9; return; // == 
-      case '~' : prior = 3; op = 10; return; // != 
-      case '&' : prior = 4; op = 11; return; // && 
+      case '<' : prior = 3; op =  7; return; // <=
+      case '>' : prior = 3; op =  8; return; // >=
+      case '=' : prior = 3; op =  9; return; // ==
+      case '~' : prior = 3; op = 10; return; // !=
+      case '&' : prior = 4; op = 11; return; // &&
       case '|' : prior = 4; op = 12; return; // ||
       }
     prior = op = 0;
   }
 
   void md_param::do_bin_op(std::vector<md_param::param_value> &value_list,
-                       std::vector<int> &op_list,
-                       std::vector<int> &prior_list) {
+                           std::vector<int> &op_list,
+                           std::vector<int> &prior_list) {
     param_value &p1(*(value_list.end() - 2));
     param_value &p2(*(value_list.end() - 1));
     if (p1.type_of_param() != REAL_VALUE || p2.type_of_param() != REAL_VALUE)
       syntax_error("Sorry, binary operators does not support string "
-                    "or array values");
-    
+                   "or array values");
+
     switch (op_list.back()) {
     case 1  : p1.real() *= p2.real(); break;
     case 2  : p1.real() /= p2.real(); break;
@@ -321,7 +321,7 @@ namespace bgeot {
 
 
   md_param::param_value md_param::read_expression_list(std::istream &f,
-                                                      bool skipped) {
+                                                       bool skipped) {
     std::vector<param_value> value_list;
     value_list.push_back(read_expression(f, skipped));
     std::vector<int> op_list, prior_list;
@@ -329,7 +329,7 @@ namespace bgeot {
     operator_priority_ftool(i, temp_string[0], prior, op);
     while (op) {
       while (!prior_list.empty() && prior_list.back() <= prior)
-       do_bin_op(value_list, op_list, prior_list);
+        do_bin_op(value_list, op_list, prior_list);
 
       value_list.push_back(read_expression(f, skipped));
       op_list.push_back(op);
@@ -356,30 +356,30 @@ namespace bgeot {
     if (temp_string == "if") {
       param_value p = read_expression_list(f, skipped);
       if (p.type_of_param() != REAL_VALUE)
-       syntax_error("if instruction needs a condition");
+        syntax_error("if instruction needs a condition");
       bool b = (p.real() != 0.0);
       int j = read_instruction_list(f, !b || skipped);
       if (j == 0) syntax_error("Unterminated if");
       if (j == 2) {
-       int k = read_instruction_list(f, b || skipped);
-       if (k != 1) syntax_error("Unterminated else");
+        int k = read_instruction_list(f, b || skipped);
+        if (k != 1) syntax_error("Unterminated else");
       }
       if (j == 3) {
-       int k = 0;
-       do {
-         if (b) skipped = true;
-         p = read_expression_list(f, skipped);
-         if (p.type_of_param() != REAL_VALUE)
-           syntax_error("elseif instruction needs a condition");
-         b = (p.real() != 0.0);
-         k = read_instruction_list(f, !b || skipped);
-         if (k == 2) {
-           k = read_instruction_list(f, b || skipped);
-           break;
-         }
-       } while (k == 3);
-       if (k != 1) syntax_error("Unterminated elseif");
-      }     
+        int k = 0;
+        do {
+          if (b) skipped = true;
+          p = read_expression_list(f, skipped);
+          if (p.type_of_param() != REAL_VALUE)
+            syntax_error("elseif instruction needs a condition");
+          b = (p.real() != 0.0);
+          k = read_instruction_list(f, !b || skipped);
+          if (k == 2) {
+            k = read_instruction_list(f, b || skipped);
+            break;
+          }
+        } while (k == 3);
+        if (k != 1) syntax_error("Unterminated elseif");
+      }
       return 0;
     }
     if (temp_string == "error") {
@@ -409,85 +409,85 @@ namespace bgeot {
     if (read_instruction_list(f) > 1)
       syntax_error("Parameter file terminated by an else");
   }
-  
+
   void md_param::read_command_line(int argc, char *argv[]) {
     gmm::standard_locale sl;
     for (int aa = 1; aa < argc; aa++) {
       if (argv[aa][0] != '-') {
-       current_file = std::string(argv[aa]);
-       std::ifstream f1(current_file.c_str());
-       if (f1) { read_param_file(f1); f1.close(); }
-       else {
-         std::string r = current_file;
-         current_file += ".param";
-         std::ifstream f2(current_file.c_str());
-         if (f2) { read_param_file(f2); f2.close(); }
-         else GMM_ASSERT1(false,  "Parameter file " << r << "not found");
-       }
+        current_file = std::string(argv[aa]);
+        std::ifstream f1(current_file.c_str());
+        if (f1) { read_param_file(f1); f1.close(); }
+        else {
+          std::string r = current_file;
+          current_file += ".param";
+          std::ifstream f2(current_file.c_str());
+          if (f2) { read_param_file(f2); f2.close(); }
+          else GMM_ASSERT1(false,  "Parameter file " << r << "not found");
+        }
       }
       else if (argv[aa][1] == 'd') {
-       current_file = "command line";
-       if (strlen(argv[aa]) == 2)
-         { std::stringstream ss(argv[++aa]); read_param_file(ss); }
-       else 
-         { std::stringstream ss(&(argv[aa][2])); read_param_file(ss); }
+        current_file = "command line";
+        if (strlen(argv[aa]) == 2)
+          { std::stringstream ss(argv[++aa]); read_param_file(ss); }
+        else
+          { std::stringstream ss(&(argv[aa][2])); read_param_file(ss); }
       }
     }
   }
-  
+
   double md_param::real_value(const std::string &name, const char *comment) {
     if (parameters.find(name) == parameters.end()) {
       if (comment == 0) return 0.0;
       else {
-       double f;
-       gmm::standard_locale sl;
-       cout << "No parameter " << name << " found, please enter its value\n";
-       cout << comment << " : "; cin >> f;
-       parameters[name] = param_value(f);
+        double f;
+        gmm::standard_locale sl;
+        cout << "No parameter " << name << " found, please enter its value\n";
+        cout << comment << " : "; cin >> f;
+        parameters[name] = param_value(f);
       }
     }
     param_value &p(parameters[name]);
     GMM_ASSERT1(p.type_of_param() == REAL_VALUE,
-               "Parameter " << name << " is not real");
+                "Parameter " << name << " is not real");
     return p.real();
   }
-  
+
   long md_param::int_value(const std::string &name, const char *comment) {
     if (parameters.find(name) == parameters.end()) {
       if (comment == 0) return 0;
       else {
-       long f;
-       gmm::standard_locale sl;
-       cout << "No parameter " << name << " found, please enter its value\n";
-       cout << comment << " : "; cin >> f;
-       parameters[name] = param_value(double(f));
+        long f;
+        gmm::standard_locale sl;
+        cout << "No parameter " << name << " found, please enter its value\n";
+        cout << comment << " : "; cin >> f;
+        parameters[name] = param_value(double(f));
       }
     }
     param_value &p(parameters[name]);
     GMM_ASSERT1(p.type_of_param() == REAL_VALUE,
-               "Parameter " << name << " is not real");
+                "Parameter " << name << " is not real");
     return long(p.real());
   }
-  
+
   const std::string &md_param::string_value(const std::string &name,
-                                    const char *comment) {
+                                            const char *comment) {
     static const std::string empty_string;
     if (parameters.find(name) == parameters.end()) {
       if (comment == 0) return empty_string;
       else {
-       std::string s;
-       gmm::standard_locale sl;
-       cout << "No parameter " << name << " found, please enter its value\n";
-       cout << comment << " : "; cin >> s;
-       parameters[name] = param_value(s);
+        std::string s;
+        gmm::standard_locale sl;
+        cout << "No parameter " << name << " found, please enter its value\n";
+        cout << comment << " : "; cin >> s;
+        parameters[name] = param_value(s);
       }
     }
     param_value &p(parameters[name]);
     GMM_ASSERT1(p.type_of_param() == STRING_VALUE, "Parameter " << name
-               << " is not a character string");
+                << " is not a character string");
     return p.string();
   }
-  
+
   const std::vector<md_param::param_value> &
   md_param::array_value(const std::string &name, const char *comment) {
 
@@ -495,16 +495,16 @@ namespace bgeot {
     if (parameters.find(name) == parameters.end()) {
       if (comment == 0) return empty_array;
       else {
-       std::string s;
-       gmm::standard_locale sl;
-       cout << "No parameter " << name << " found, please enter its value\n";
-       cout << comment << " : "; cin >> s;
-       parameters[name] = param_value(s);
+        std::string s;
+        gmm::standard_locale sl;
+        cout << "No parameter " << name << " found, please enter its value\n";
+        cout << comment << " : "; cin >> s;
+        parameters[name] = param_value(s);
       }
     }
     param_value &p(parameters[name]);
     GMM_ASSERT1(p.type_of_param() == ARRAY_VALUE, "Parameter " << name
-               << " is not an array");
+                << " is not an array");
     return p.array();
   }
 }
diff --git a/src/bgeot_geometric_trans.cc b/src/bgeot_geometric_trans.cc
index 54ed28e..ef2bad7 100644
--- a/src/bgeot_geometric_trans.cc
+++ b/src/bgeot_geometric_trans.cc
@@ -32,35 +32,35 @@ namespace bgeot {
     DEFINE_STATIC_THREAD_LOCAL(std::vector<scalar_type>, v);
     return v;
   }
-  
+
   std::vector<scalar_type>& __aux2(){
     DEFINE_STATIC_THREAD_LOCAL(std::vector<scalar_type>, v);
     return v;
   }
-  
+
   std::vector<scalar_type>& __aux3(){
     DEFINE_STATIC_THREAD_LOCAL(std::vector<scalar_type>, v);
     return v;
   }
-  
+
   std::vector<int>& __ipvt_aux(){
     DEFINE_STATIC_THREAD_LOCAL(std::vector<int>, vi);
     return vi;
   }
-  
+
   // Optimized matrix mult for small matrices. To be verified.
   // Multiply the matrix A of size MxN by B of size NxP in C of size MxP
   void mat_mult(const scalar_type *A, const scalar_type *B, scalar_type *C,
-               size_type M, size_type N, size_type P) {
+                size_type M, size_type N, size_type P) {
     if (N != 0) {
       auto itC = C; auto itB = B;
       for (size_type j = 0; j < P; ++j, itB += N)
-       for (size_type i = 0; i < M; ++i, ++itC) {
-         auto itA = A+i, itB1 = itB;
-         *itC = (*itA) * (*itB1);
-         for (size_type k = 1; k < N; ++k)
-           { itA += M; ++itB1; *itC += (*itA) * (*itB1); }
-       }
+        for (size_type i = 0; i < M; ++i, ++itC) {
+          auto itA = A+i, itB1 = itB;
+          *itC = (*itA) * (*itB1);
+          for (size_type k = 1; k < N; ++k)
+            { itA += M; ++itB1; *itC += (*itA) * (*itB1); }
+        }
     } else std::fill(C, C+M*P, scalar_type(0));
   }
 
@@ -68,42 +68,42 @@ namespace bgeot {
   // Multiply the matrix A of size MxN by the transpose of B of size PxN
   // in C of size MxP
   void mat_tmult(const scalar_type *A, const scalar_type *B, scalar_type *C,
-                size_type M, size_type N, size_type P) {
+                 size_type M, size_type N, size_type P) {
     auto itC = C;
     switch (N) {
     case 0 : std::fill(C, C+M*P, scalar_type(0)); break;
-    case 1 : 
+    case 1 :
       for (size_type j = 0; j < P; ++j)
-       for (size_type i = 0; i < M; ++i, ++itC) {
-         auto itA = A+i, itB = B+j;
-         *itC = (*itA) * (*itB);
-       }
+        for (size_type i = 0; i < M; ++i, ++itC) {
+          auto itA = A+i, itB = B+j;
+          *itC = (*itA) * (*itB);
+        }
       break;
     case 2 :
       for (size_type j = 0; j < P; ++j)
-       for (size_type i = 0; i < M; ++i, ++itC) {
-         auto itA = A+i, itB = B+j;
-         *itC = (*itA) * (*itB);
-         itA += M; itB += P; *itC += (*itA) * (*itB);
-       }
+        for (size_type i = 0; i < M; ++i, ++itC) {
+          auto itA = A+i, itB = B+j;
+          *itC = (*itA) * (*itB);
+          itA += M; itB += P; *itC += (*itA) * (*itB);
+        }
       break;
     case 3 :
       for (size_type j = 0; j < P; ++j)
-       for (size_type i = 0; i < M; ++i, ++itC) {
-         auto itA = A+i, itB = B+j;
-         *itC = (*itA) * (*itB);
-         itA += M; itB += P; *itC += (*itA) * (*itB);
-         itA += M; itB += P; *itC += (*itA) * (*itB);
-       }
+        for (size_type i = 0; i < M; ++i, ++itC) {
+          auto itA = A+i, itB = B+j;
+          *itC = (*itA) * (*itB);
+          itA += M; itB += P; *itC += (*itA) * (*itB);
+          itA += M; itB += P; *itC += (*itA) * (*itB);
+        }
       break;
     default :
       for (size_type j = 0; j < P; ++j)
-       for (size_type i = 0; i < M; ++i, ++itC) {
-         auto itA = A+i, itB = B+j;
-         *itC = (*itA) * (*itB);
-         for (size_type k = 1; k < N; ++k)
-           { itA += M; itB += P; *itC += (*itA) * (*itB); }
-       }
+        for (size_type i = 0; i < M; ++i, ++itC) {
+          auto itA = A+i, itB = B+j;
+          *itC = (*itA) * (*itB);
+          for (size_type k = 1; k < N; ++k)
+            { itA += M; itB += P; *itC += (*itA) * (*itB); }
+        }
     }
   }
 
@@ -112,34 +112,34 @@ namespace bgeot {
 
   // Optimized lu_factor for small square matrices
   size_type lu_factor(scalar_type *A, std::vector<int> &ipvt,
-                     size_type N) {
+                      size_type N) {
     size_type info(0), i, j, jp, N_1 = N-1;
-      
+
     if (N) {
       for (j = 0; j < N_1; ++j) {
-       auto it = A + (j*(N+1));
-       scalar_type max = gmm::abs(*it); jp = j;
-       for (i = j+1; i < N; ++i) {
-         scalar_type ap = gmm::abs(*(++it));
-         if (ap > max) { jp = i; max = ap; }
-       }
-       ipvt[j] = int(jp + 1);
-       
-       if (max == scalar_type(0)) { info = j + 1; break; }
+        auto it = A + (j*(N+1));
+        scalar_type max = gmm::abs(*it); jp = j;
+        for (i = j+1; i < N; ++i) {
+          scalar_type ap = gmm::abs(*(++it));
+          if (ap > max) { jp = i; max = ap; }
+        }
+        ipvt[j] = int(jp + 1);
+
+        if (max == scalar_type(0)) { info = j + 1; break; }
         if (jp != j) {
-         auto it1 = A+jp, it2 = A+j;
-         for (i = 0; i < N; ++i, it1+=N, it2+=N) std::swap(*it1, *it2);
-       }
-       it = A + (j*(N+1)); max = *it++;
-       for (i = j+1; i < N; ++i) *it++ /= max; 
-       auto it22 = A + (j*N + j+1), it11 = it22;
-       auto it3 = A + ((j+1)*N+j);
-       for (size_type l = j+1; l < N; ++l) {
-         it11 += N;
-         auto it1 = it11, it2 = it22;
-         scalar_type a = *it3; it3 += N;
-         for (size_type k = j+1; k < N; ++k) *it1++ -= *it2++ * a;
-       }
+          auto it1 = A+jp, it2 = A+j;
+          for (i = 0; i < N; ++i, it1+=N, it2+=N) std::swap(*it1, *it2);
+        }
+        it = A + (j*(N+1)); max = *it++;
+        for (i = j+1; i < N; ++i) *it++ /= max;
+        auto it22 = A + (j*N + j+1), it11 = it22;
+        auto it3 = A + ((j+1)*N+j);
+        for (size_type l = j+1; l < N; ++l) {
+          it11 += N;
+          auto it1 = it11, it2 = it22;
+          scalar_type a = *it3; it3 += N;
+          for (size_type k = j+1; k < N; ++k) *it1++ -= *it2++ * a;
+        }
 
       }
       ipvt[N-1] = int(N);
@@ -148,7 +148,7 @@ namespace bgeot {
   }
 
   static void lower_tri_solve(const scalar_type *T, scalar_type *x, int N,
-                             bool is_unit) {
+                              bool is_unit) {
     scalar_type x_j;
     for (int j = 0; j < N; ++j) {
       auto itc = T + j*N, it = itc+(j+1), ite = itc+N;
@@ -160,7 +160,7 @@ namespace bgeot {
   }
 
   static void upper_tri_solve(const scalar_type *T, scalar_type *x, int N,
-                             bool is_unit) {
+                              bool is_unit) {
     scalar_type x_j;
     for (int j = N - 1; j >= 0; --j) {
       auto itc = T + j*N, it = itc, ite = itc+j;
@@ -170,8 +170,8 @@ namespace bgeot {
     }
   }
 
-  static void lu_solve(const scalar_type *LU, const std::vector<int> &ipvt, 
-                      scalar_type *x, scalar_type *b, int N) {
+  static void lu_solve(const scalar_type *LU, const std::vector<int> &ipvt,
+                       scalar_type *x, scalar_type *b, int N) {
     std::copy(b, b+N, x);
     for(int i = 0; i < N; ++i)
       { int perm = ipvt[i]-1; if(i != perm) std::swap(x[i], x[perm]); }
@@ -180,7 +180,7 @@ namespace bgeot {
   }
 
   scalar_type lu_det(const scalar_type *LU, const std::vector<int> &ipvt,
-                    size_type N) {
+                     size_type N) {
     scalar_type det(1);
     for (size_type j = 0; j < N; ++j) det *= *(LU+j*(N+1));
     for(int i = 0; i < int(N); ++i) if (i != ipvt[i]-1) { det = -det; }
@@ -191,26 +191,26 @@ namespace bgeot {
     switch (N) {
     case 1: return *A;
     case 2: return (*A) * (A[3]) - (A[1]) * (A[2]);
-    case 3: 
+    case 3:
       {
-       scalar_type a0 = A[4]*A[8] - A[5]*A[7], a3 = A[5]*A[6] - A[3]*A[8];
-       scalar_type a6 = A[3]*A[7] - A[4]*A[6];
-       return A[0] * a0 + A[1] * a3 + A[2] * a6;
+        scalar_type a0 = A[4]*A[8] - A[5]*A[7], a3 = A[5]*A[6] - A[3]*A[8];
+        scalar_type a6 = A[3]*A[7] - A[4]*A[6];
+        return A[0] * a0 + A[1] * a3 + A[2] * a6;
       }
     default:
       {
-       size_type NN = N*N;
-       if (__aux1().size() < NN) __aux1().resize(N*N);
-       std::copy(A, A+NN, __aux1().begin());
-       __ipvt_aux().resize(N);
-       lu_factor(&(*(__aux1().begin())), __ipvt_aux(), N);
-       return lu_det(&(*(__aux1().begin())), __ipvt_aux(), N);
+        size_type NN = N*N;
+        if (__aux1().size() < NN) __aux1().resize(N*N);
+        std::copy(A, A+NN, __aux1().begin());
+        __ipvt_aux().resize(N);
+        lu_factor(&(*(__aux1().begin())), __ipvt_aux(), N);
+        return lu_det(&(*(__aux1().begin())), __ipvt_aux(), N);
       }
     }
   }
 
   void lu_inverse(const scalar_type *LU, const std::vector<int> &ipvt,
-                 scalar_type *A, size_type N) {
+                  scalar_type *A, size_type N) {
     __aux2().resize(N); gmm::clear(__aux2());
     __aux3().resize(N);
     for(size_type i = 0; i < N; ++i) {
@@ -224,44 +224,44 @@ namespace bgeot {
     switch (N) {
     case 1:
       {
-       scalar_type det = *A;
-       GMM_ASSERT1(det != scalar_type(0), "Non invertible matrix");
-       *A = scalar_type(1)/det;
-       return det;
+        scalar_type det = *A;
+        GMM_ASSERT1(det != scalar_type(0), "Non invertible matrix");
+        *A = scalar_type(1)/det;
+        return det;
       }
     case 2:
       {
-       scalar_type a = *A, b = A[2], c = A[1], d = A[3];
-       scalar_type det = a * d - b * c;
-       GMM_ASSERT1(det != scalar_type(0), "Non invertible matrix");
-       *A++ =  d/det;  *A++ /= -det; *A++ /= -det;  *A =  a/det;
-       return det;
+        scalar_type a = *A, b = A[2], c = A[1], d = A[3];
+        scalar_type det = a * d - b * c;
+        GMM_ASSERT1(det != scalar_type(0), "Non invertible matrix");
+        *A++ =  d/det;  *A++ /= -det; *A++ /= -det;  *A =  a/det;
+        return det;
       }
     case 3:
       {
-       scalar_type a0 = A[4]*A[8] - A[5]*A[7], a1 = A[5]*A[6] - A[3]*A[8];
-       scalar_type a2 = A[3]*A[7] - A[4]*A[6];
-       scalar_type det =  A[0] * a0 + A[1] * a1 + A[2] * a2;
-       GMM_ASSERT1(det != scalar_type(0), "Non invertible matrix");
-       scalar_type a3 = (A[2]*A[7] - A[1]*A[8]), a6 = (A[1]*A[5] - A[2]*A[4]);
-       scalar_type a4 = (A[0]*A[8] - A[2]*A[6]), a7 = (A[2]*A[3] - A[0]*A[5]);
-       scalar_type a5 = (A[1]*A[6] - A[0]*A[7]), a8 = (A[0]*A[4] - A[1]*A[3]);
-       *A++ = a0 / det; *A++ = a3 / det; *A++ = a6 / det;
-       *A++ = a1 / det; *A++ = a4 / det; *A++ = a7 / det;
-       *A++ = a2 / det; *A++ = a5 / det; *A++ = a8 / det;
-       return det;
+        scalar_type a0 = A[4]*A[8] - A[5]*A[7], a1 = A[5]*A[6] - A[3]*A[8];
+        scalar_type a2 = A[3]*A[7] - A[4]*A[6];
+        scalar_type det =  A[0] * a0 + A[1] * a1 + A[2] * a2;
+        GMM_ASSERT1(det != scalar_type(0), "Non invertible matrix");
+        scalar_type a3 = (A[2]*A[7] - A[1]*A[8]), a6 = (A[1]*A[5] - A[2]*A[4]);
+        scalar_type a4 = (A[0]*A[8] - A[2]*A[6]), a7 = (A[2]*A[3] - A[0]*A[5]);
+        scalar_type a5 = (A[1]*A[6] - A[0]*A[7]), a8 = (A[0]*A[4] - A[1]*A[3]);
+        *A++ = a0 / det; *A++ = a3 / det; *A++ = a6 / det;
+        *A++ = a1 / det; *A++ = a4 / det; *A++ = a7 / det;
+        *A++ = a2 / det; *A++ = a5 / det; *A++ = a8 / det;
+        return det;
       }
     default:
       {
-       size_type NN = N*N;
-       if (__aux1().size() < NN) __aux1().resize(NN);
-       std::copy(A, A+NN, __aux1().begin());
-       __ipvt_aux().resize(N);
-       size_type info = lu_factor(&(*(__aux1().begin())), __ipvt_aux(), N);
-       if (doassert) GMM_ASSERT1(!info, "Non invertible matrix, pivot = "
-                                 << info);
-       if (!info) lu_inverse(&(*(__aux1().begin())), __ipvt_aux(), A, N);
-       return lu_det(&(*(__aux1().begin())), __ipvt_aux(), N);
+        size_type NN = N*N;
+        if (__aux1().size() < NN) __aux1().resize(NN);
+        std::copy(A, A+NN, __aux1().begin());
+        __ipvt_aux().resize(N);
+        size_type info = lu_factor(&(*(__aux1().begin())), __ipvt_aux(), N);
+        if (doassert) GMM_ASSERT1(!info, "Non invertible matrix, pivot = "
+                                  << info);
+        if (!info) lu_inverse(&(*(__aux1().begin())), __ipvt_aux(), A, N);
+        return lu_det(&(*(__aux1().begin())), __ipvt_aux(), N);
       }
     }
   }
@@ -276,14 +276,14 @@ namespace bgeot {
     if (N && P && Q) {
       auto itK = K.begin();
       for (size_type j = 0; j < Q; ++j) {
-       auto itpc_j = pc.begin() + j*P, itG_b = G.begin();
-       for (size_type i = 0; i < N; ++i, ++itG_b) {
-         auto itG = itG_b, itpc = itpc_j;
-         register scalar_type a = *(itG) * (*itpc);
-         for (size_type k = 1; k < P; ++k)
-           { itG += N; a += *(itG) * (*++itpc); }
-         *itK++ = a;
-       }
+        auto itpc_j = pc.begin() + j*P, itG_b = G.begin();
+        for (size_type i = 0; i < N; ++i, ++itG_b) {
+          auto itG = itG_b, itpc = itpc_j;
+          register scalar_type a = *(itG) * (*itpc);
+          for (size_type k = 1; k < P; ++k)
+            { itG += N; a += *(itG) * (*++itpc); }
+          *itK++ = a;
+        }
       }
     } else gmm::clear(K);
   }
@@ -305,7 +305,7 @@ namespace bgeot {
     return xreal_;
   }
 
-  void geotrans_interpolation_context::compute_J(void) const {
+  void geotrans_interpolation_context::compute_J() const {
     GMM_ASSERT1(have_G() && have_pgt(), "Unable to compute J\n");
     size_type P = pgt_->structure()->dim();
     const base_matrix &KK = K();
@@ -320,21 +320,21 @@ namespace bgeot {
       case 1: J__ = *it; break;
       case 2: J__ = (*it) * (it[3]) - (it[1]) * (it[2]); break;
       case 3:
-       {
-         B_.base_resize(P, P); // co-factors
-         auto itB = B_.begin();
-         scalar_type a0 = itB[0] = it[4]*it[8] - it[5]*it[7];
-         scalar_type a1 = itB[1] = it[5]*it[6] - it[3]*it[8];
-         scalar_type a2 = itB[2] = it[3]*it[7] - it[4]*it[6];
-         J__ = it[0] * a0 + it[1] * a1 + it[2] * a2;
-       } break;
+        {
+          B_.base_resize(P, P); // co-factors
+          auto itB = B_.begin();
+          scalar_type a0 = itB[0] = it[4]*it[8] - it[5]*it[7];
+          scalar_type a1 = itB[1] = it[5]*it[6] - it[3]*it[8];
+          scalar_type a2 = itB[2] = it[3]*it[7] - it[4]*it[6];
+          J__ = it[0] * a0 + it[1] * a1 + it[2] * a2;
+        } break;
       default:
-       B_factors.base_resize(P, P); // store factorization for B computation
-       gmm::copy(gmm::transposed(KK), B_factors);
-       ipvt.resize(P);
-       bgeot::lu_factor(&(*(B_factors.begin())), ipvt, P);
-       J__ = bgeot::lu_det(&(*(B_factors.begin())), ipvt, P);
-       break;
+              B_factors.base_resize(P, P); // store factorization for B 
computation
+              gmm::copy(gmm::transposed(KK), B_factors);
+              ipvt.resize(P);
+        bgeot::lu_factor(&(*(B_factors.begin())), ipvt, P);
+        J__ = bgeot::lu_det(&(*(B_factors.begin())), ipvt, P);
+        break;
       }
       J_ = gmm::abs(J__);
     }
@@ -347,11 +347,11 @@ namespace bgeot {
       size_type P = pgt_->structure()->dim();
       K_.base_resize(N(), P);
       if (have_pgp()) {
-       pgt_->compute_K_matrix(*G_, pgp_->grad(ii_), K_);
+        pgt_->compute_K_matrix(*G_, pgp_->grad(ii_), K_);
       } else {
-       PC.base_resize(pgt_->nb_points(), P);
+        PC.base_resize(pgt_->nb_points(), P);
         pgt_->poly_vector_grad(xref(), PC);
-       pgt_->compute_K_matrix(*G_, PC, K_);
+        pgt_->compute_K_matrix(*G_, PC, K_);
       }
       have_K_ = true;
     }
@@ -368,29 +368,29 @@ namespace bgeot {
       if (P != N_) {
         gmm::mult(KK, B_factors, B_);
       } else {
-       switch(P) {
-       case 1: B_(0, 0) = scalar_type(1) / J__;  break;
-       case 2:
-         {
-           auto it = &(*(KK.begin())); auto itB = &(*(B_.begin()));
-           *itB++ = it[3] / J__; *itB++ = -it[2] / J__; 
-           *itB++ = -it[1] / J__; *itB = (*it) / J__;
-         } break;
-       case 3:
-         {
-           auto it = &(*(KK.begin())); auto itB = &(*(B_.begin()));
-           *itB++ /= J__; *itB++ /= J__; *itB++ /= J__; 
-           *itB++ = (it[2]*it[7] - it[1]*it[8]) / J__;
-           *itB++ = (it[0]*it[8] - it[2]*it[6]) / J__;
-           *itB++ = (it[1]*it[6] - it[0]*it[7]) / J__;
-           *itB++ = (it[1]*it[5] - it[2]*it[4]) / J__;
-           *itB++ = (it[2]*it[3] - it[0]*it[5]) / J__;
-           *itB   = (it[0]*it[4] - it[1]*it[3]) / J__;
-         } break;
-       default:
-         bgeot::lu_inverse(&(*(B_factors.begin())), ipvt, &(*(B_.begin())), P);
-         break;
-       }
+        switch(P) {
+        case 1: B_(0, 0) = scalar_type(1) / J__;  break;
+        case 2:
+          {
+            auto it = &(*(KK.begin())); auto itB = &(*(B_.begin()));
+            *itB++ = it[3] / J__; *itB++ = -it[2] / J__;
+            *itB++ = -it[1] / J__; *itB = (*it) / J__;
+          } break;
+        case 3:
+          {
+            auto it = &(*(KK.begin())); auto itB = &(*(B_.begin()));
+            *itB++ /= J__; *itB++ /= J__; *itB++ /= J__;
+            *itB++ = (it[2]*it[7] - it[1]*it[8]) / J__;
+            *itB++ = (it[0]*it[8] - it[2]*it[6]) / J__;
+            *itB++ = (it[1]*it[6] - it[0]*it[7]) / J__;
+            *itB++ = (it[1]*it[5] - it[2]*it[4]) / J__;
+            *itB++ = (it[2]*it[3] - it[0]*it[5]) / J__;
+            *itB   = (it[0]*it[4] - it[1]*it[3]) / J__;
+          } break;
+        default:
+          bgeot::lu_inverse(&(*(B_factors.begin())), ipvt, &(*(B_.begin())), 
P);
+          break;
+        }
       }
       have_B_ = true;
     }
@@ -423,7 +423,7 @@ namespace bgeot {
           gmm::mult(G(), pgp_->hessian(ii_), Htau);
         } else {
           /* very inefficient of course... */
-         PC.base_resize(pgt()->nb_points(), P*P);
+          PC.base_resize(pgt()->nb_points(), P*P);
           pgt()->poly_vector_hess(xref(), PC);
           gmm::mult(G(), PC, Htau);
         }
@@ -468,7 +468,7 @@ namespace bgeot {
       for (size_type i = 0; i < cvr->points()[ip].size(); ++i)
         if (gmm::abs(cvr->points()[ip][i]) > 1e-10
             && gmm::abs(cvr->points()[ip][i]-1.0) > 1e-10
-           && gmm::abs(cvr->points()[ip][i]+1.0) > 1e-10)
+            && gmm::abs(cvr->points()[ip][i]+1.0) > 1e-10)
           { vertex = false; break; }
       if (vertex) vertices_.push_back(ip);
     }
@@ -490,10 +490,10 @@ namespace bgeot {
       dim_type n = dim();
       grad_.resize(R);
       for (size_type i = 0; i < R; ++i) {
-       grad_[i].resize(n);
-       for (dim_type j = 0; j < n; ++j) {
-         grad_[i][j] = trans[i]; grad_[i][j].derivative(j);
-       }
+        grad_[i].resize(n);
+        for (dim_type j = 0; j < n; ++j) {
+          grad_[i][j] = trans[i]; grad_[i][j].derivative(j);
+        }
       }
     }
 
@@ -502,16 +502,16 @@ namespace bgeot {
       dim_type n = dim();
       hess_.resize(R);
       for (size_type i = 0; i < R; ++i) {
-       hess_[i].resize(n*n);
-       for (dim_type j = 0; j < n; ++j) {
-         for (dim_type k = 0; k < n; ++k) {
-           hess_[i][j+k*n] = trans[i];
-           hess_[i][j+k*n].derivative(j); hess_[i][j+k*n].derivative(k);
-         }
-       }
+        hess_[i].resize(n*n);
+        for (dim_type j = 0; j < n; ++j) {
+          for (dim_type k = 0; k < n; ++k) {
+            hess_[i][j+k*n] = trans[i];
+            hess_[i][j+k*n].derivative(j); hess_[i][j+k*n].derivative(k);
+          }
+        }
       }
     }
-    
+
     virtual void poly_vector_val(const base_node &pt, base_vector &val) const {
       val.resize(nb_points());
       for (size_type k = 0; k < nb_points(); ++k)
@@ -519,7 +519,7 @@ namespace bgeot {
     }
 
     virtual void poly_vector_val(const base_node &pt,
-                                const convex_ind_ct &ind_ct,
+                                 const convex_ind_ct &ind_ct,
                                  base_vector &val) const {
       size_type nb_funcs=ind_ct.size();
       val.resize(nb_funcs);
@@ -537,7 +537,7 @@ namespace bgeot {
     }
 
     virtual void poly_vector_grad(const base_node &pt,
-                                 const convex_ind_ct &ind_ct,
+                                  const convex_ind_ct &ind_ct,
                                   base_matrix &pc) const {
       if (!(grad_.size())) compute_grad_();
       FUNC PP;
@@ -556,7 +556,7 @@ namespace bgeot {
         for (dim_type n = 0; n < dim(); ++n) {
           for (dim_type m = 0; m <= n; ++m)
             pc(i, n*dim()+m) = pc(i, m*dim()+n) =
-             to_scalar(hess_[i][m*dim()+n].eval(pt.begin()));
+              to_scalar(hess_[i][m*dim()+n].eval(pt.begin()));
         }
     }
 
@@ -767,7 +767,7 @@ namespace bgeot {
 
 
   /* ******************************************************************** */
-  /*   Incomplete Q2 geometric transformation for n=2 or 3.              */
+  /*    Incomplete Q2 geometric transformation for n=2 or 3.              */
   /* ******************************************************************** */
   /* By Yao Koutsawa  <address@hidden> 2012-12-10                  */
 
@@ -778,7 +778,7 @@ namespace bgeot {
       is_lin = false;
       complexity_ = 2;
       trans.resize(R);
-      
+
       if (nc == 2) {
         std::stringstream s
           ( "1 - 2*x^2*y - 2*x*y^2 + 2*x^2 + 5*x*y + 2*y^2 - 3*x - 3*y;"
@@ -789,7 +789,7 @@ namespace bgeot {
             "2*x*x*y - 2*x*y*y - x*y + 2*y*y - y;"
             "4*(x*y - x*x*y);"
             "2*x*x*y + 2*x*y*y - 3*x*y;");
-        
+
         for (int i = 0; i < 8; ++i)
           trans[i] = bgeot::read_base_poly(2, s);
       } else {
@@ -819,27 +819,27 @@ namespace bgeot {
            "2*x^2*y*z - 2*x*y^2*z - 2*x*y*z^2 + 2*y^2*z + 2*y*z^2 + x*y*z - 
3*y*z;"
            "4*( - x^2*y*z + x*y*z);"
            "2*x^2*y*z + 2*x*y^2*z + 2*x*y*z^2 - 5*x*y*z;");
-        
+
         for (int i = 0; i < 20; ++i)
           trans[i] = bgeot::read_base_poly(3, s);
       }
       fill_standard_vertices();
     }
   };
-  
+
   static pgeometric_trans
     Q2_incomplete_gt(gt_param_list& params,
                      std::vector<dal::pstatic_stored_object> &dependencies) {
     GMM_ASSERT1(params.size() == 1, "Bad number of parameters : "
-               << params.size() << " should be 1.");
+                << params.size() << " should be 1.");
     GMM_ASSERT1(params[0].type() == 0, "Bad type of parameters");
     int n = int(::floor(params[0].num() + 0.01));
     GMM_ASSERT1(n == 2 || n == 3, "Bad parameter, expected value 2 or 3");
-    
+
     dependencies.push_back(Q2_incomplete_reference(dim_type(n)));
     return std::make_shared<Q2_incomplete_trans_>(dim_type(n));
   }
-  
+
   pgeometric_trans Q2_incomplete_geotrans(dim_type nc) {
     std::stringstream name;
     name << "GT_Q2_INCOMPLETE(" << nc << ")";
@@ -847,7 +847,7 @@ namespace bgeot {
   }
 
   /* ******************************************************************** */
-  /*   Pyramidal geometric transformation of order k=1 or 2.             */
+  /*    Pyramidal geometric transformation of order k=1 or 2.             */
   /* ******************************************************************** */
 
   struct pyramidal_trans_: public fraction_geometric_trans  {
@@ -859,55 +859,55 @@ namespace bgeot {
       trans.resize(R);
 
       if (k == 1) {
-       base_rational_fraction Q(read_base_poly(3, "x*y"),    // Q = xy/(1-z)
-                                read_base_poly(3, "1-z"));
-       trans[0] = (read_base_poly(3, "1-x-y-z") + Q)*0.25;
-       trans[1] = (read_base_poly(3, "1+x-y-z") - Q)*0.25;
-       trans[2] = (read_base_poly(3, "1-x+y-z") - Q)*0.25;
-       trans[3] = (read_base_poly(3, "1+x+y-z") + Q)*0.25;
-       trans[4] = read_base_poly(3, "z");
+        base_rational_fraction Q(read_base_poly(3, "x*y"),    // Q = xy/(1-z)
+                                 read_base_poly(3, "1-z"));
+        trans[0] = (read_base_poly(3, "1-x-y-z") + Q)*0.25;
+        trans[1] = (read_base_poly(3, "1+x-y-z") - Q)*0.25;
+        trans[2] = (read_base_poly(3, "1-x+y-z") - Q)*0.25;
+        trans[3] = (read_base_poly(3, "1+x+y-z") + Q)*0.25;
+        trans[4] = read_base_poly(3, "z");
       } else if (k == 2) {
         base_poly xi0  = read_base_poly(3, "(1-z-x)*0.5");
         base_poly xi1  = read_base_poly(3, "(1-z-y)*0.5");
         base_poly xi2  = read_base_poly(3, "(1-z+x)*0.5");
         base_poly xi3  = read_base_poly(3, "(1-z+y)*0.5");
-       base_poly x    = read_base_poly(3, "x");
-       base_poly y    = read_base_poly(3, "y");
-       base_poly z    = read_base_poly(3, "z");
-       base_poly ones = read_base_poly(3, "1");
-       base_poly un_z = read_base_poly(3, "1-z");
-       base_rational_fraction Q(read_base_poly(3, "1"), un_z); // Q = 1/(1-z)
-       trans[ 0] = Q*Q*xi0*xi1*(x*y-z*un_z);
-       trans[ 1] = Q*Q*xi0*xi1*xi2*(xi1*2.-un_z)*4.;
-       trans[ 2] = Q*Q*xi1*xi2*(-x*y-z*un_z);
-       trans[ 3] = Q*Q*xi3*xi0*xi1*(xi0*2.-un_z)*4.;
-       trans[ 4] = Q*Q*xi0*xi1*xi2*xi3*16.;
-       trans[ 5] = Q*Q*xi1*xi2*xi3*(xi2*2.-un_z)*4.;
-       trans[ 6] = Q*Q*xi3*xi0*(-x*y-z*un_z);
-       trans[ 7] = Q*Q*xi2*xi3*xi0*(xi3*2.-un_z)*4.;
-       trans[ 8] = Q*Q*xi2*xi3*(x*y-z*un_z);
-       trans[ 9] = Q*z*xi0*xi1*4.;
-       trans[10] = Q*z*xi1*xi2*4.;
-       trans[11] = Q*z*xi3*xi0*4.;
-       trans[12] = Q*z*xi2*xi3*4.;
-       trans[13] = read_base_poly(3, "z*(2*z-1)");
+        base_poly x    = read_base_poly(3, "x");
+        base_poly y    = read_base_poly(3, "y");
+        base_poly z    = read_base_poly(3, "z");
+        base_poly ones = read_base_poly(3, "1");
+        base_poly un_z = read_base_poly(3, "1-z");
+        base_rational_fraction Q(read_base_poly(3, "1"), un_z); // Q = 1/(1-z)
+        trans[ 0] = Q*Q*xi0*xi1*(x*y-z*un_z);
+        trans[ 1] = Q*Q*xi0*xi1*xi2*(xi1*2.-un_z)*4.;
+        trans[ 2] = Q*Q*xi1*xi2*(-x*y-z*un_z);
+        trans[ 3] = Q*Q*xi3*xi0*xi1*(xi0*2.-un_z)*4.;
+        trans[ 4] = Q*Q*xi0*xi1*xi2*xi3*16.;
+        trans[ 5] = Q*Q*xi1*xi2*xi3*(xi2*2.-un_z)*4.;
+        trans[ 6] = Q*Q*xi3*xi0*(-x*y-z*un_z);
+        trans[ 7] = Q*Q*xi2*xi3*xi0*(xi3*2.-un_z)*4.;
+        trans[ 8] = Q*Q*xi2*xi3*(x*y-z*un_z);
+        trans[ 9] = Q*z*xi0*xi1*4.;
+        trans[10] = Q*z*xi1*xi2*4.;
+        trans[11] = Q*z*xi3*xi0*4.;
+        trans[12] = Q*z*xi2*xi3*4.;
+        trans[13] = read_base_poly(3, "z*(2*z-1)");
       }
       fill_standard_vertices();
     }
   };
-  
+
   static pgeometric_trans
     pyramidal_gt(gt_param_list& params,
                      std::vector<dal::pstatic_stored_object> &dependencies) {
     GMM_ASSERT1(params.size() == 1, "Bad number of parameters : "
-               << params.size() << " should be 1.");
+                << params.size() << " should be 1.");
     GMM_ASSERT1(params[0].type() == 0, "Bad type of parameters");
     int k = int(::floor(params[0].num() + 0.01));
-    
+
     dependencies.push_back(pyramidal_element_of_reference(dim_type(k)));
     return std::make_shared<pyramidal_trans_>(dim_type(k));
   }
-  
+
   pgeometric_trans pyramidal_geotrans(short_type k) {
     static short_type k_ = -1;
     static pgeometric_trans pgt = 0;
@@ -916,7 +916,7 @@ namespace bgeot {
       name << "GT_PYRAMID(" << k << ")";
       pgt = geometric_trans_descriptor(name.str());
     }
-    return pgt;  
+    return pgt;
   }
 
   /* ******************************************************************** */
@@ -1004,7 +1004,7 @@ namespace bgeot {
   void add_geometric_trans_name
     (std::string name, dal::naming_system<geometric_trans>::pfunction f) {
     dal::singleton<geometric_trans_naming_system>::instance().add_suffix(name,
-                                                                        f);
+                                                                         f);
   }
 
   pgeometric_trans geometric_trans_descriptor(std::string name) {
diff --git a/src/getfem/bgeot_ftool.h b/src/getfem/bgeot_ftool.h
index 8b752c2..4390914 100644
--- a/src/getfem/bgeot_ftool.h
+++ b/src/getfem/bgeot_ftool.h
@@ -69,8 +69,8 @@ namespace bgeot
    *  allows to consider the carriage return as a space character.
    */
   int get_token(std::istream &ist, std::string &st,
-               bool ignore_cr = true, bool to_up = true, 
-               bool read_un_pm = true, int *linenb = 0);
+                bool ignore_cr = true, bool to_up = true,
+                bool read_un_pm = true, int *linenb = 0);
 
   struct skip {
     const char *s;
@@ -84,11 +84,11 @@ namespace bgeot
   int casecmp(const char *a, const char *b, unsigned n=unsigned(-1));
 
   inline int casecmp(const std::string& a, const char *b,
-                    unsigned n=unsigned(-1))
+                     unsigned n=unsigned(-1))
   { return casecmp(a.c_str(),b,n); }
 
   inline int casecmp(const std::string& a, const std::string& b,
-                    unsigned n=unsigned(-1))
+                     unsigned n=unsigned(-1))
   { return casecmp(a.c_str(), b.c_str(),n); }
 
   inline int casecmp(char a, char b)
@@ -97,14 +97,14 @@ namespace bgeot
   /* ********************************************************************* */
   /*       Read a parameter file.                                          */
   /* ********************************************************************* */
- 
+
   // The associated langage has approximatively the following grammar:
   //
   // 'instruction' := 'parameter_name' '=' 'expression';
   //              or 'if' 'expression'
   //                 'instruction_list'
   //                 [ 'else' 'instruction_list' ]
-  //                 'end'          
+  //                 'end'
   // 'expression' := '[' 'expression' ',' ... ']'
   //              or 'parameter_name'
   //              or 'numeric_value'
@@ -136,23 +136,23 @@ namespace bgeot
       double real_value;
       std::string string_value;
       std::vector<param_value> array_value;
-      
+
     public :
-      param_type type_of_param(void) const { return pt; }
-      double &real(void) { return real_value; }
-      double real(void) const { return real_value; }
-      std::string &string(void) { return string_value; }
-      const std::string &string(void) const { return string_value; }
-      std::vector<param_value> &array(void) { return array_value; }
-      const std::vector<param_value> &array(void) const { return array_value; }
+      param_type type_of_param() const { return pt; }
+      double &real() { return real_value; }
+      double real() const { return real_value; }
+      std::string &string() { return string_value; }
+      const std::string &string() const { return string_value; }
+      std::vector<param_value> &array() { return array_value; }
+      const std::vector<param_value> &array() const { return array_value; }
       param_value(double e = 0.0) : pt(REAL_VALUE), real_value(e) {}
       param_value(std::string s) : pt(STRING_VALUE), real_value(0.0),
-                                  string_value(s) {}
+                                   string_value(s) {}
       param_value(char *s) :  pt(STRING_VALUE), real_value(0.0),
-                             string_value(s) {}
+                              string_value(s) {}
       param_value(param_type p): pt(p), real_value(0.0) {}
     };
-    
+
   protected :
 
     std::map<std::string, param_value> parameters;
@@ -161,7 +161,7 @@ namespace bgeot
     std::string current_file;
 
     int get_next_token(std::istream &f);
-    void valid_token(void);
+    void valid_token();
     std::string temp_string;
     param_value read_expression_list(std::istream &f, bool skipped);
     param_value read_expression(std::istream &f, bool skipped);
@@ -170,31 +170,30 @@ namespace bgeot
     void parse_error(const std::string &t);
     void syntax_error(const std::string &t);
     void do_bin_op(std::vector<md_param::param_value> &value_list,
-                  std::vector<int> &op_list, std::vector<int> &prior_list);
-    
+                   std::vector<int> &op_list, std::vector<int> &prior_list);
+
   public :
-      
 
     double real_value(const std::string &name, const char *comment = 0);
     long int_value(const std::string &name, const char *comment = 0);
     const std::string &string_value(const std::string &name,
-                                   const char *comment = 0);
+                                    const char *comment = 0);
     const std::vector<param_value> &array_value(const std::string &name,
-                                               const char *comment = 0);
+                                                const char *comment = 0);
     void add_int_param(const std::string &name, long e)
     { parameters[name] = param_value(double(e)); }
     void add_real_param(const std::string &name, double e)
     { parameters[name] = param_value(e); }
     void add_string_param(const std::string &name, const std::string &s)
     { parameters[name] = param_value(s); }
- 
+
     // todo : add the access to the arrays
-    
+
     void read_param_file(std::istream &f);
 
-    /** Read the parameters on the command line. If a name is found the 
-     * corresponding .param file is searched. If a -dNOM=VALUE is found
-     * (or -d NOM=VALUE), it is evaluated.
+    /** Read the parameters on the command line. If a name is found the
+     *  corresponding .param file is searched. If a -dNOM=VALUE is found
+     *  (or -d NOM=VALUE), it is evaluated.
      */
     void read_command_line(int argc, char *argv[]);
   };
@@ -203,7 +202,7 @@ namespace bgeot
 
 
 namespace ftool {
- 
+
   // For compatibility with Getfem 2.0
 
   using bgeot::md_param;
diff --git a/src/getfem/bgeot_geometric_trans.h 
b/src/getfem/bgeot_geometric_trans.h
index 06ce13f..849f541 100644
--- a/src/getfem/bgeot_geometric_trans.h
+++ b/src/getfem/bgeot_geometric_trans.h
@@ -115,17 +115,17 @@ namespace bgeot {
   public :
 
     /// Dimension of the reference element.
-    dim_type dim(void) const { return cvr->structure()->dim(); }
+    dim_type dim() const { return cvr->structure()->dim(); }
     /// True if the transformation is linear (affine in fact).
-    bool is_linear(void) const { return is_lin; }
+    bool is_linear() const { return is_lin; }
     /// Number of geometric nodes.
-    size_type nb_points(void) const { return cvr->nb_points(); }
+    size_type nb_points() const { return cvr->nb_points(); }
     /// Pointer on the convex of reference.
-    pconvex_ref convex_ref(void) const { return cvr; }
+    pconvex_ref convex_ref() const { return cvr; }
     /// Structure of the reference element.
-    pconvex_structure structure(void) const { return cvr->structure(); }
+    pconvex_structure structure() const { return cvr->structure(); }
     /// Basic structure of the reference element.
-    pconvex_structure basic_structure(void) const
+    pconvex_structure basic_structure() const
     { return bgeot::basic_structure(cvr->structure()); }
     /// Gives the value of the functions vector at a certain point.
     virtual void poly_vector_val(const base_node &pt, base_vector &val) const 
= 0;
@@ -142,17 +142,17 @@ namespace bgeot {
     /// compute K matrix from multiplication of G with gradient
     virtual void compute_K_matrix(const base_matrix &G, const base_matrix &pc, 
base_matrix &K) const;
     /// Gives the number of vertices.
-    size_type nb_vertices(void) const { return vertices_.size(); }
+    size_type nb_vertices() const { return vertices_.size(); }
     /// Gives the indices of vertices between the nodes.
-    const std::vector<size_type> &vertices(void) const { return vertices_; }
+    const std::vector<size_type> &vertices() const { return vertices_; }
     /// Gives the array of geometric nodes (on reference convex)
-    const stored_point_tab &geometric_nodes(void) const
+    const stored_point_tab &geometric_nodes() const
     { return cvr->points(); }
     /// Gives the array of geometric nodes (on reference convex)
-    pstored_point_tab pgeometric_nodes(void) const
+    pstored_point_tab pgeometric_nodes() const
     { return cvr->pspt(); }
     /// Gives the array of the normals to faces (on reference convex)
-    const std::vector<base_small_vector> &normals(void) const
+    const std::vector<base_small_vector> &normals() const
     { return cvr->normals(); }
     /** Apply the geometric transformation to point pt,
         PTAB contains the points of the real convex */
@@ -161,7 +161,7 @@ namespace bgeot {
     base_node transform(const base_node &pt, const base_matrix &G) const;
     /** Compute the gradient at point x, pc is resized to [nb_points() x dim()]
         if the transformation is linear, x is not used at all */
-    size_type complexity(void) const { return complexity_; }
+    size_type complexity() const { return complexity_; }
     virtual ~geometric_trans()
     { DAL_STORED_OBJECT_DEBUG_DESTROYED(this, "Geometric transformation"); }
     geometric_trans()
@@ -233,7 +233,7 @@ namespace bgeot {
 
      List of possible names:
      GT_PK(N,K)   : Transformation on simplexes, dim N, degree K
-     
+
      GT_QK(N,K)   : Transformation on parallelepipeds, dim N, degree K
      GT_PRISM(N,K)          : Transformation on prisms, dim N, degree K
      GT_Q2_INCOMPLETE(N)    : Q2 incomplete transformation in dim N=2 or 3.
@@ -409,7 +409,7 @@ namespace bgeot {
     mutable base_vector aux1, aux2;
     mutable std::vector<int> ipvt;
     mutable bool have_J_, have_B_, have_B3_, have_B32_, have_K_;
-    void compute_J(void) const;
+    void compute_J() const;
   public:
     bool have_xref() const { return !xref_.empty(); }
     bool have_xreal() const { return !xreal_.empty(); }
@@ -434,47 +434,50 @@ namespace bgeot {
     const base_matrix& G() const { return *G_; }
     /** get the Jacobian of the geometric trans (taken at point @c xref() ) */
     scalar_type J() const { if (!have_J_) compute_J(); return J_; }
-    size_type N() const { if (have_G()) return G().nrows();
+    size_type N() const {
+      if (have_G()) return G().nrows();
       else if (have_xreal()) return xreal_.size();
-      else GMM_ASSERT2(false, "cannot get N"); return 0; }
+      else GMM_ASSERT2(false, "cannot get N");
+      return 0;
+    }
     size_type ii() const { return ii_; }
     bgeot::pgeotrans_precomp pgp() const { return pgp_; }
     /** change the current point (assuming a geotrans_precomp_ is used) */
     void set_ii(size_type ii__) {
       if (ii_ != ii__) {
-       if (pgt_ && !pgt()->is_linear())
-         { have_K_ = have_B_ = have_B3_ = have_B32_ = have_J_ = false; }
-       xref_.resize(0); xreal_.resize(0);
-       ii_=ii__;
+        if (pgt_ && !pgt()->is_linear())
+          { have_K_ = have_B_ = have_B3_ = have_B32_ = have_J_ = false; }
+        xref_.resize(0); xreal_.resize(0);
+        ii_=ii__;
       }
     }
     /** change the current point (coordinates given in the reference convex) */
     void set_xref(const base_node& P);
     void change(bgeot::pgeotrans_precomp pgp__,
-               size_type ii__,
-               const base_matrix& G__) {
+                size_type ii__,
+                const base_matrix& G__) {
       G_ = &G__; pgt_ = pgp__->get_trans(); pgp_ = pgp__;
       pspt_ = pgp__->get_ppoint_tab(); ii_ = ii__;
       have_J_ = have_B_ = have_B3_ = have_B32_ = have_K_ = false;
       xref_.resize(0); xreal_.resize(0);
     }
     void change(bgeot::pgeometric_trans pgt__,
-               bgeot::pstored_point_tab pspt__,
-               size_type ii__,
-               const base_matrix& G__) {
+                bgeot::pstored_point_tab pspt__,
+                size_type ii__,
+                const base_matrix& G__) {
       G_ = &G__; pgt_ = pgt__; pgp_ = 0; pspt_ = pspt__; ii_ = ii__;
       have_J_ = have_B_ = have_B3_ = have_B32_ = have_K_ = false;
       xref_.resize(0); xreal_.resize(0);
     }
     void change(bgeot::pgeometric_trans pgt__,
-               const base_node& xref__,
-               const base_matrix& G__) {
+                const base_node& xref__,
+                const base_matrix& G__) {
       xref_ = xref__; G_ = &G__; pgt_ = pgt__; pgp_ = 0; pspt_ = 0;
       ii_ = size_type(-1);
       have_J_ = have_B_ = have_B3_ = have_B32_ = have_K_ = false;
       xreal_.resize(0);
     }
-    
+
     geotrans_interpolation_context()
       : G_(0), pgt_(0), pgp_(0), pspt_(0), ii_(size_type(-1)),
       have_J_(false), have_B_(false), have_B3_(false), have_B32_(false),
@@ -502,7 +505,7 @@ namespace bgeot {
 
   /* Function allowing the add of an geometric transformation method outwards
      of getfem_integration.cc */
-  
+
   typedef dal::naming_system<geometric_trans>::param_list gt_param_list;
 
   void APIDECL add_geometric_trans_name
@@ -519,12 +522,12 @@ namespace bgeot {
   // Optimized matrix mult for small matrices.
   // Multiply the matrix A of size MxN by B of size NxP in C of size MxP
   void mat_mult(const scalar_type *A, const scalar_type *B, scalar_type *C,
-               size_type M, size_type N, size_type P);
+                size_type M, size_type N, size_type P);
   // Optimized matrix mult for small matrices.
   // Multiply the matrix A of size MxN by the transpose of B of size PxN
   // in C of size MxP
   void mat_tmult(const scalar_type *A, const scalar_type *B, scalar_type *C,
-                size_type M, size_type N, size_type P);
+                 size_type M, size_type N, size_type P);
 
 }  /* end of namespace bgeot.                                             */
 
diff --git a/src/getfem/bgeot_mesh_structure.h 
b/src/getfem/bgeot_mesh_structure.h
index 50e46ed..1e954c4 100644
--- a/src/getfem/bgeot_mesh_structure.h
+++ b/src/getfem/bgeot_mesh_structure.h
@@ -50,9 +50,9 @@ namespace bgeot {
     pconvex_structure cstruct;       /* type of convex.                  */
     ind_pt_ct pts;                   /* point list indices.               */
 
-    pconvex_structure structure(void) const { return cstruct; }
-    pconvex_structure &structure(void) { return cstruct; }
-    mesh_convex_structure(void) : cstruct(0) {}
+    pconvex_structure structure() const { return cstruct; }
+    pconvex_structure &structure() { return cstruct; }
+    mesh_convex_structure() : cstruct(0) {}
   };
 
   struct convex_face
@@ -88,18 +88,18 @@ namespace bgeot {
   public :
 
     /// Return the list of valid convex IDs
-    const dal::bit_vector &convex_index(void) const
+    const dal::bit_vector &convex_index() const
       { return convex_tab.index(); }
     /// Return the list of valid convex IDs of a given dimension
     dal::bit_vector convex_index(dim_type) const;
     /// The total number of convexes in the mesh
-    size_type nb_convex(void) const { return convex_tab.card(); }
+    size_type nb_convex() const { return convex_tab.card(); }
     /// The number of convex indexes from 0 to the index of the last convex
     size_type nb_allocated_convex() const
       { return convex_tab.index().last_true()+1; }
     /// Return true if i is in convex_index()
     bool is_convex_valid(size_type i) { return (convex_tab.index())[i]; }
-    size_type nb_max_points(void) const { return points_tab.size(); }
+    size_type nb_max_points() const { return points_tab.size(); }
     /// Return true if the point i is used by at least one convex
     bool is_point_valid(size_type i) const { return !(points_tab[i].empty()); }
     /** Return a container to the list of points attached to convex ic.
@@ -156,7 +156,7 @@ namespace bgeot {
     void to_faces(dim_type n);
     /** build a new mesh, such that its convexes are the edges of the
         convexes of the previous one */
-    void to_edges(void);
+    void to_edges();
 
     size_type nb_convex_with_edge(size_type i1, size_type i2);
     void convex_with_edge(size_type i1, size_type i2,
@@ -185,13 +185,13 @@ namespace bgeot {
     ind_pt_face_ct ind_points_of_face_of_convex(size_type ic,
                                                 short_type f) const;
 
-    size_type memsize(void) const;
+    size_type memsize() const;
     /** Reorder the convex IDs and point IDs, such that there is no
         hole in their numbering. */
-    void optimize_structure(void);
+    void optimize_structure();
     /// erase the mesh
-    void clear(void);
-    void stat(void);
+    void clear();
+    void stat();
 
     /** Return in s a list of neighbours of a given convex face.
         @param ic the convex id.
@@ -201,14 +201,14 @@ namespace bgeot {
     void neighbours_of_convex(size_type ic, short_type f, ind_set &s) const;
 
     /** Return in s a list of neighbours of a given convex sharing the
-       intersection of a given list of faces
+        intersection of a given list of faces
         @param ic the convex id.
         @param f the face number of the convex.
         @param s the resulting ind_set.
      */
     void neighbours_of_convex(size_type ic,
-                             const std::vector<short_type> &ftab,
-                             ind_set &s) const;
+                              const std::vector<short_type> &ftab,
+                              ind_set &s) const;
 
     /** Return a list of neighbours of a given convex.
         @param ic the convex id.
@@ -253,7 +253,7 @@ namespace bgeot {
 
   /** Return the cuthill_mc_kee ordering on the convexes */
   void APIDECL cuthill_mckee_on_convexes(const bgeot::mesh_structure &ms,
-                                        std::vector<size_type> &cmk);
+                                         std::vector<size_type> &cmk);
 
   template<class ITER>
     bool mesh_structure::is_convex_having_points(size_type ic,
@@ -329,7 +329,7 @@ namespace bgeot {
     }
     edge_list_elt(size_type ii, size_type jj, size_type ic = 0) : cv(ic)
     { i = std::min(ii, jj); j = std::max(ii, jj); }
-    edge_list_elt(void) {}
+    edge_list_elt() {}
   };
 
   typedef dal::dynamic_tree_sorted<edge_list_elt> edge_list;
diff --git a/src/getfem/getfem_mesh_fem.h b/src/getfem/getfem_mesh_fem.h
index 2ec585a..90cd7e2 100644
--- a/src/getfem/getfem_mesh_fem.h
+++ b/src/getfem/getfem_mesh_fem.h
@@ -461,7 +461,7 @@ namespace getfem {
         @param f the face number.
     */
     virtual size_type nb_basic_dof_of_face_of_element(size_type cv,
-                                                     short_type f) const {
+                                                      short_type f) const {
       context_check(); if (!dof_enumeration_made) enumerate_dof();
       pfem pf = f_elems[cv];
       return dof_structure.structure_of_convex(cv)->nb_points_of_face(f)
@@ -638,8 +638,8 @@ namespace getfem {
   void slice_vector_on_basic_dof_of_element(const mesh_fem &mf,
                                             const VEC1 &vec,
                                             size_type cv, VEC2 &coeff,
-                                           size_type qmult1 = size_type(-1),
-                                           size_type qmult2 = size_type(-1)) {
+                                            size_type qmult1 = size_type(-1),
+                                            size_type qmult2 = size_type(-1)) {
     if (qmult1 == size_type(-1)) {
       size_type nbdof = mf.nb_basic_dof();
       qmult1 = gmm::vect_size(vec) / nbdof;
@@ -652,15 +652,15 @@ namespace getfem {
     size_type qmultot = qmult1*qmult2;
     auto &ct = mf.ind_scalar_basic_dof_of_element(cv);
     gmm::resize(coeff, ct.size()*qmultot);
-    
+
     auto it = ct.begin();
     auto itc = coeff.begin();
     if (qmultot == 1) {
       for (; it != ct.end(); ++it) *itc++ = vec[*it];
     } else {
       for (; it != ct.end(); ++it) {
-       auto itv = vec.begin()+(*it)*qmult1;
-       for (size_type m = 0; m < qmultot; ++m) *itc++ = *itv++;
+        auto itv = vec.begin()+(*it)*qmult1;
+        for (size_type m = 0; m < qmultot; ++m) *itc++ = *itv++;
       }
     }
   }
diff --git a/src/getfem_assembling_tensors.cc b/src/getfem_assembling_tensors.cc
index 4dd48e2..9c28557 100644
--- a/src/getfem_assembling_tensors.cc
+++ b/src/getfem_assembling_tensors.cc
@@ -26,11 +26,11 @@ extern "C" void daxpy_(const int *n, const double *alpha, 
const double *x,
                        const int *incx, double *y, const int *incy);
 
 namespace getfem {
-  size_type vdim_specif_list::nb_mf() const { 
+  size_type vdim_specif_list::nb_mf() const {
     return std::count_if(begin(),end(),
       std::mem_fun_ref(&vdim_specif::is_mf_ref));
   }
-  size_type vdim_specif_list::nbelt() const { 
+  size_type vdim_specif_list::nbelt() const {
     size_type sz = 1;
     for (const_iterator it = begin(); it != end(); ++it) sz *= (*it).dim;
     return sz;
@@ -39,10 +39,10 @@ namespace getfem {
     (size_type cv, tensor_ranges& r, std::vector<tensor_strides >& str) const {
       stride_type s = 1, cnt = 0;
       str.resize(size());
-      r.resize(size());      
+      r.resize(size());
       for (const_iterator it = begin(); it != end(); ++it, ++cnt) {
         if ((*it).is_mf_ref()) {
-          r[cnt] = unsigned((*it).pmf->nb_basic_dof_of_element(cv)); 
+          r[cnt] = unsigned((*it).pmf->nb_basic_dof_of_element(cv));
           //mesh_fem::ind_dof_ct::const_iterator ii
           //       = (*it).pmf->ind_basic_dof_of_element(cv).begin();
           str[cnt].resize(r[cnt]);
@@ -65,7 +65,7 @@ namespace getfem {
       child(i).merge_required_shape(tensor_shape(child(i).ranges()));
     }
   }
-  void ATN::set_number(unsigned &gcnt) { 
+  void ATN::set_number(unsigned &gcnt) {
     if (number_ == unsigned(-1)) {
       for (unsigned i=0; i < nchilds(); ++i) child(i).set_number(gcnt);
       number_ = ++gcnt;
@@ -76,7 +76,7 @@ namespace getfem {
     return child(0).is_zero_size();
   }
 
-  /* 
+  /*
   general class for tensor who store their data
   */
   class ATN_tensor_w_data : public ATN_tensor {
@@ -94,12 +94,12 @@ namespace getfem {
     tr.init_strides();
     if (tr.card() > 10000000) {
       cerr << "warning, a tensor of size " << tr.card()
-        << " will be created, it needs " 
+        << " will be created, it needs "
         << tr.card()*sizeof(scalar_type) << " bytes of memory\n";
     }
     if (tr.card() == 0) {
       cerr << "WARNING: tensor " << name()
-        << " will be created with a size of " 
+        << " will be created with a size of "
         << ranges() << " and 0 non-null elements!" << endl;
     }
     data.resize(tr.card());
@@ -108,7 +108,7 @@ namespace getfem {
   }
 
 
-  /* 
+  /*
   general class for the computation of a reduced product of tensors
   (templated by the number of product tensors)
 
@@ -120,7 +120,7 @@ namespace getfem {
   class ATN_reduced_tensor : public ATN_tensor_w_data {
     /* used for specification of tensors and reduction indices , see below */
     reduced_tensor_arg_type red;
-    bgeot::tensor_reduction tred;    
+    bgeot::tensor_reduction tred;
   public:
     void check_shape_update(size_type , dim_type) {
       shape_updated_ = false;
@@ -135,9 +135,9 @@ namespace getfem {
           std::string s = red_n(i);
           if (s.size() != child(i).ranges().size()) {
             ASM_THROW_TENSOR_ERROR("wrong number of indexes for the "
-              << int(i+1) 
+              << int(i+1)
               << "th argument of the reduction "
-              << name() 
+              << name()
               << " (ranges=" << child(i).ranges() << ")");
           }
           for (size_type j=0; j < s.length(); ++j) {
@@ -147,7 +147,7 @@ namespace getfem {
       }
     }
     void update_childs_required_shape() {
-      /* pourrait etre mieux, cf les commentaires de la fonction 
+      /* pourrait etre mieux, cf les commentaires de la fonction
       tensor_reduction::required_shape */
       for (dim_type n=0; n < nchilds(); ++n) {
         tensor_shape ts(child(n).ranges());
@@ -157,32 +157,32 @@ namespace getfem {
         for (unsigned i=0; i < rn.size(); ++i)
           if (s[i] != ' ') {
             size_type p = s.find(s[i]);
-            if (p != size_type(-1) && p < i && rn[p] != rn[i]) 
+            if (p != size_type(-1) && p < i && rn[p] != rn[i])
               ASM_THROW_TENSOR_ERROR("can't reduce the diagonal of a tensor "
               "of size " << rn << " with '"
               << s << "'");
           }
-       //cerr << "ts = " << child(n).ranges() << ", red="
-       //     << red[n].second << "\n";
-       bgeot::tensor_reduction::diag_shape(ts, red[n].second);
-       // cerr << "REDUCTION '" << red[n].second
-       //        << "' -> sending required to child#" << int(n)
-       //      << " " << child(n).name() << ":" << endl;
-       // cerr << ts << endl;
-       child(n).merge_required_shape(ts);
-       // cerr << "------>required shape is now: "
-       //      << child(n).required_shape() << endl;
+          //cerr << "ts = " << child(n).ranges() << ", red="
+          //     << red[n].second << "\n";
+          bgeot::tensor_reduction::diag_shape(ts, red[n].second);
+          // cerr << "REDUCTION '" << red[n].second
+          //        << "' -> sending required to child#" << int(n)
+          //        << " " << child(n).name() << ":" << endl;
+          // cerr << ts << endl;
+          child(n).merge_required_shape(ts);
+          // cerr << "------>required shape is now: "
+          //      << child(n).required_shape() << endl;
       }
     }
 
-    /* 
-    r is a container of pair<vtensor&,std::string> 
+    /*
+    r is a container of pair<vtensor&,std::string>
     where the strings specify the reduction indices:
 
-    if a_{ik}b_{kjl} is reduced against k and l, then the strings are 
+    if a_{ik}b_{kjl} is reduced against k and l, then the strings are
     " k" and "k l"
     */
-    ATN_reduced_tensor(reduced_tensor_arg_type& r) : red(r) { 
+    ATN_reduced_tensor(reduced_tensor_arg_type& r) : red(r) {
       for (size_type i=0; i < r.size(); ++i) add_child(*red[i].first);
     }
 
@@ -197,19 +197,19 @@ namespace getfem {
     void reinit_() {
       tred.clear();
       for (dim_type i=0; i < red.size(); ++i) {
-        // cerr << "ATN_reduced_tensor::reinit : insertion of r(" << red_n(i) 
+        // cerr << "ATN_reduced_tensor::reinit : insertion of r(" << red_n(i)
         //      << "), tr[" << red[i].first->ranges() << "\n"
         //      << red[i].first->tensor() << endl;*/
         // if (red[i].first->ranges().size() != red_n(i).length()) {
         // ASM_THROW_TENSOR_ERROR("wrong number of indexes for the "
         //                        << int(i+1)
-        //                        << "th argument of the reduction " << name() 
+        //                        << "th argument of the reduction " << name()
         //                        << " (ranges=" << red[i].first->ranges()
         //                        << ")");
         // }
         tred.insert(red[i].first->tensor(), red_n(i));
       }
-      /* reserve the memory for the output 
+      /* reserve the memory for the output
       the memory is set to zero since the reduction may only affect a
       subset of this tensor hence a part of it would not be initialized
       */
@@ -227,7 +227,7 @@ namespace getfem {
   };
 
 
-  /* slice tensor: 
+  /* slice tensor:
   no need of a temporary storage for the slice, since direct access
   can be provided via strides.
   */
@@ -236,7 +236,7 @@ namespace getfem {
     size_type slice_idx;
   public:
     ATN_sliced_tensor(ATN_tensor& a, dim_type slice_dim_,
-      size_type slice_idx_) : 
+      size_type slice_idx_) :
     slice_dim(slice_dim_), slice_idx(slice_idx_)  { add_child(a); }
     void check_shape_update(size_type , dim_type) {
       if ((shape_updated_ = child(0).is_shape_updated())) {
@@ -250,7 +250,7 @@ namespace getfem {
       }
     }
     void update_childs_required_shape() {
-      tensor_shape ts = req_shape; 
+      tensor_shape ts = req_shape;
       ts.set_ndim_noclean(dim_type(ts.ndim()+1));
       ts.shift_dim_num_ge(slice_dim,+1);
       ts.push_mask(tensor_mask(child(0).ranges()[slice_dim],
@@ -280,8 +280,8 @@ namespace getfem {
       if ((shape_updated_ = child(0).is_shape_updated())) {
         if (reorder.size() != child(0).ranges().size())
           ASM_THROW_TENSOR_ERROR("can't reorder tensor '" << name()
-                                << "' of dimensions " << child(0).ranges() << 
-                                " with this permutation: " << vref(reorder));
+                                 << "' of dimensions " << child(0).ranges()
+                                 << " with this permutation: " << 
vref(reorder));
         r_.resize(reorder.size());
         std::fill(r_.begin(), r_.end(), dim_type(-1));
 
@@ -305,14 +305,14 @@ namespace getfem {
   };
 
   /* diagonal tensor: take the "diagonal" of a tensor
-  (ie diag(t(i,j,k), {i,k}) == t(i,j,i)) 
+  (ie diag(t(i,j,k), {i,k}) == t(i,j,i))
 
-  /!\ the number of dimensions DO NOT change 
+  /!\ the number of dimensions DO NOT change
   */
   class ATN_diagonal_tensor : public ATN_tensor {
     dim_type i1, i2;
   public:
-    ATN_diagonal_tensor(ATN_tensor& a, dim_type i1_, dim_type i2_) : 
+    ATN_diagonal_tensor(ATN_tensor& a, dim_type i1_, dim_type i2_) :
       i1(i1_), i2(i2_) { add_child(a); }
   private:
     void check_shape_update(size_type , dim_type) {
@@ -320,7 +320,7 @@ namespace getfem {
         if (i1 >= child(0).ranges().size() || i2 >= child(0).ranges().size() ||
           i1 == i2 || child(0).ranges()[i1] != child(0).ranges()[i2])
           ASM_THROW_TENSOR_ERROR("can't take the diagonal of a tensor of "
-          "sizes " << child(0).ranges() << 
+          "sizes " << child(0).ranges() <<
           " at indexes " << int(i1) << " and "
           << int(i2));
         r_ = child(0).ranges();
@@ -337,7 +337,7 @@ namespace getfem {
   };
 
   /* called (if possible, i.e. if not an exact integration) for each
-  integration point during mat_elem->compute() */    
+  integration point during mat_elem->compute() */
   struct computed_tensor_integration_callback
     : public mat_elem_integration_callback {
       bgeot::tensor_reduction red;
@@ -345,11 +345,11 @@ namespace getfem {
       std::vector<TDIter> tensor_bases; /* each tref of 'red' has a   */
       /* reference into this vector */
       virtual void exec(bgeot::base_tensor &t, bool first, scalar_type c) {
-        if (first) { 
+        if (first) {
           resize_t(t);
           std::fill(t.begin(), t.end(), 0.);
           was_called = true;
-        }      
+        }
         assert(t.size());
         for (unsigned k=0; k!=eltm.size(); ++k) { /* put in the 'if (first)' ? 
*/
           tensor_bases[k] = const_cast<TDIter>(&(*eltm[k]->begin()));
@@ -360,11 +360,11 @@ namespace getfem {
           &one, (double*)&(t[0]), &one);
       }
       void resize_t(bgeot::base_tensor &t) {
-        bgeot::multi_index r; 
+        bgeot::multi_index r;
         if (red.reduced_range.size())
           r.assign(red.reduced_range.begin(), red.reduced_range.end());
         else { r.resize(1); r[0]=1; }
-        t.adjust_sizes(r);      
+        t.adjust_sizes(r);
       }
   };
 
@@ -392,7 +392,7 @@ namespace getfem {
       NONLIN=7, DATA=8 } op_type;
     typedef enum { PLAIN_SHAPE = 0, VECTORIZED_SHAPE = 1,
       MATRIXIZED_SHAPE = 2 } field_shape_type;
-    op_type op; /* the numerical values indicates the number 
+    op_type op; /* the numerical values indicates the number
                 of dimensions in the tensor */
     field_shape_type vshape; /* VECTORIZED_SHAPE if vectorization was required
                              (adds an addiational dimension to the tensor
@@ -416,9 +416,9 @@ namespace getfem {
       field_shape_type fst) :
     nlt(0), pmf(pmf_), owner(ow), data(0), op(op_), vshape(fst) { }
     mf_comp(mf_comp_vect *ow, const std::vector<const mesh_fem*> vmf,
-      pnonlinear_elem_term nlt_) : 
+      pnonlinear_elem_term nlt_) :
     nlt(nlt_), pmf(vmf[0]), owner(ow), data(0),
-      auxmf(vmf.begin()+1, vmf.end()), op(NONLIN), 
+      auxmf(vmf.begin()+1, vmf.end()), op(NONLIN),
       vshape(PLAIN_SHAPE) { }
     mf_comp(mf_comp_vect *ow, ATN_tensor *t) :
       nlt(0), pmf(0), owner(ow), data(t), op(DATA), vshape(PLAIN_SHAPE) {}
@@ -456,8 +456,8 @@ namespace getfem {
         }
         break;
       case DATA:
-        for (unsigned i=0; i < data->ranges().size(); ++i) 
-          if (!only_reduced || !reduced(i)) 
+        for (unsigned i=0; i < data->ranges().size(); ++i)
+          if (!only_reduced || !reduced(i))
             rng.push_back(data->ranges()[i]);
         break;
       case NORMAL:
@@ -477,7 +477,7 @@ namespace getfem {
         unsigned d = 0;
         if (!only_reduced || !reduced(d))
           rng.push_back(unsigned(pmf->nb_basic_dof_of_element(cv)));
-        ++d; 
+        ++d;
         if (vshape == mf_comp::VECTORIZED_SHAPE) {
           if (!only_reduced || !reduced(d)) rng.push_back(pmf->get_qdim());
           ++d;
@@ -489,7 +489,7 @@ namespace getfem {
           }
           ++d;
           if (!only_reduced || !reduced(d)) 
rng.push_back(dim_type(pmf->get_qdims()[1]));
-          ++d;   
+          ++d;
         }
 
         if (op == GRAD || op == HESS) {
@@ -515,11 +515,11 @@ namespace getfem {
     pintegration_method pim;
     bgeot::pgeometric_trans pgt;
     base_tensor t;
-    std::vector<scalar_type> data;     
+    std::vector<scalar_type> data;
     TDIter data_base;
     stride_type tsize;
     dal::bit_vector req_bv;  /* bit_vector of values the mat_elem has to 
compute
-                             (useful when only a subset is required from the 
+                             (useful when only a subset is required from the
                              possibly very large elementary tensor) */
     bool has_inline_reduction; /* true if used with reductions inside the 
comp, for example:
                                "comp(Grad(#1)(:,i).Grad(#2)(:,i))" */
@@ -528,20 +528,20 @@ namespace getfem {
     /* if inline reduction are to be done, but were not possible (i.e. if exact
     integration was used) then a fallback is used: apply the reduction
     afterward, on the large expanded tensor */
-    bgeot::tensor_reduction fallback_red; 
+    bgeot::tensor_reduction fallback_red;
     bool fallback_red_uptodate;
     TDIter fallback_base;
 
     size_type cv_shape_update;
     //mat_elem_inline_reduction inline_red;
   public:
-    ATN_computed_tensor(const mf_comp_vect &mfcomp_) : 
-      mfcomp(mfcomp_), pmec(0), pme(0), pim(0), pgt(0), data_base(0) { 
+    ATN_computed_tensor(const mf_comp_vect &mfcomp_) :
+      mfcomp(mfcomp_), pmec(0), pme(0), pim(0), pgt(0), data_base(0) {
         has_inline_reduction = false;
         bool in_data = false;
         for (size_type i=0; i < mfcomp.size(); ++i) {
           if (mfcomp[i].reduction.size() || mfcomp[i].op == mf_comp::DATA) {
-            has_inline_reduction = true; 
+            has_inline_reduction = true;
             if (mfcomp[i].op == mf_comp::DATA) { add_child(*mfcomp[i].data); 
in_data = true; }
           }
           if (mfcomp[i].op != mf_comp::DATA && in_data) {
@@ -551,7 +551,7 @@ namespace getfem {
         }
     }
 
-  private:    
+  private:
     /* mostly for non-linear terms, such as a 3x3x3x3 tensor which may have
     many symmetries or many null elements..  in that case, it is preferable
     for getfem_mat_elem to handle only a sufficient subset of the tensor,
@@ -562,7 +562,7 @@ namespace getfem {
       assert(d < rng.size());
       tensor_strides v;
       index_type r = rng[d];
-      tensor_mask m; m.set_full(d, r);     
+      tensor_mask m; m.set_full(d, r);
       v.resize(r);
       for (index_type i=0; i < r; ++i) v[i] = s*i;
       assert(tref.masks().size() == tref.strides().size());
@@ -575,7 +575,7 @@ namespace getfem {
     /* append a vectorized dimension to tref -- works also for cases
     where target_dim > 1
     */
-    stride_type add_vdim(const tensor_ranges& rng, dim_type d, 
+    stride_type add_vdim(const tensor_ranges& rng, dim_type d,
       index_type target_dim, stride_type s,
       tensor_ref &tref) {
         assert(d < rng.size()-1);
@@ -606,13 +606,13 @@ namespace getfem {
     /* append a matrixized dimension to tref -- works also for cases
     where target_dim > 1 (in that case the rows are "vectorized")
 
-    for example, the Base(RT0) in 2D (3 dof, target_dim=2) is: 
-    [0 1 2; 
+    for example, the Base(RT0) in 2D (3 dof, target_dim=2) is:
+    [0 1 2;
     3 4 5]
 
 
     if we set it in a mesh_fem of qdim = 3x2 , we produce the sparse
-    elementary tensor 9x3x2 = 
+    elementary tensor 9x3x2 =
 
     x x x y y y
     0 . . 3 . . <- phi1
@@ -626,7 +626,7 @@ namespace getfem {
     . . 2 . . 5 <- phi9
 
     */
-    stride_type add_mdim(const tensor_ranges& rng, dim_type d, 
+    stride_type add_mdim(const tensor_ranges& rng, dim_type d,
       index_type target_dim, stride_type s, tensor_ref &tref) {
         assert(d < rng.size()-2);
 
@@ -634,7 +634,7 @@ namespace getfem {
         index_type r = rng[d], q=rng[d+1], p=rng[d+2];
         index_type qmult = (q*p)/target_dim;
 
-        assert(r % q == 0); 
+        assert(r % q == 0);
         assert(p % target_dim == 0);
         assert(r % (p/target_dim) == 0);
 
@@ -680,18 +680,18 @@ namespace getfem {
         case mf_comp::HESS: pme2 = mat_elem_hessian(fem); break;
         case mf_comp::NORMAL: pme2 = mat_elem_unit_normal(); break;
         case mf_comp::GRADGT:
-        case mf_comp::GRADGTINV: 
+        case mf_comp::GRADGTINV:
           pme2 = mat_elem_grad_geotrans(mfcomp[i].op == mf_comp::GRADGTINV);
           break;
         case mf_comp::NONLIN: {
-          std::vector<pfem> ftab(1+mfcomp[i].auxmf.size()); 
+          std::vector<pfem> ftab(1+mfcomp[i].auxmf.size());
           ftab[0] = fem;
-          for (unsigned k=0; k < mfcomp[i].auxmf.size(); ++k) 
+          for (unsigned k=0; k < mfcomp[i].auxmf.size(); ++k)
             ftab[k+1] = mfcomp[i].auxmf[k]->fem_of_element(cv);
           pme2 = mat_elem_nonlinear(mfcomp[i].nlt, ftab);
                               } break;
         case mf_comp::DATA: /*ignore*/;
-        } 
+        }
         if (pme == 0) pme = pme2;
         else pme = mat_elem_product(pme, pme2);
       }
@@ -702,8 +702,8 @@ namespace getfem {
 
 
 
-    size_type 
-      push_back_mfcomp_dimensions(size_type cv, const mf_comp& mc, 
+    size_type
+      push_back_mfcomp_dimensions(size_type cv, const mf_comp& mc,
       unsigned &d, const bgeot::tensor_ranges &rng,
       bgeot::tensor_ref &tref, size_type tsz=1) {
         if (mc.op == mf_comp::NONLIN) {
@@ -716,7 +716,7 @@ namespace getfem {
           d += tref.ndim();
         } else if (mc.op == mf_comp::NORMAL) {
           tsz = add_dim(rng, dim_type(d++), stride_type(tsz), tref);
-        } else if (mc.op == mf_comp::GRADGT || 
+        } else if (mc.op == mf_comp::GRADGT ||
           mc.op == mf_comp::GRADGTINV) {
             tsz = add_dim(rng, dim_type(d++), stride_type(tsz), tref);
             tsz = add_dim(rng, dim_type(d++), stride_type(tsz), tref);
@@ -764,17 +764,17 @@ namespace getfem {
         tensor_ranges rng;
         unsigned d = 0;
         mfcomp[i].push_back_dimensions(cv,rng);
-        push_back_mfcomp_dimensions(cv,mfcomp[i], d, rng, tref); 
+        push_back_mfcomp_dimensions(cv,mfcomp[i], d, rng, tref);
         assert(tref.ndim() == rng.size() && d == rng.size());
-        if (mfcomp[i].reduction.size() == 0) 
+        if (mfcomp[i].reduction.size() == 0)
           mfcomp[i].reduction.insert(size_type(0), tref.ndim(), ' ');
         if (mfcomp[i].op != mf_comp::DATA) /* should already have the correct 
base */
           tref.set_base(icb.tensor_bases[i]);
         tref.update_idx2mask();
         if (mfcomp[i].reduction.size() != tref.ndim()) {
-          ASM_THROW_TENSOR_ERROR("wrong number of indices for the "<< int(i+1) 
-            << "th argument of the reduction "<< name() 
-            << " (expected " << int(tref.ndim()) 
+          ASM_THROW_TENSOR_ERROR("wrong number of indices for the "<< int(i+1)
+            << "th argument of the reduction "<< name()
+            << " (expected " << int(tref.ndim())
             << " indexes, got "
             << mfcomp[i].reduction.size());
         }
@@ -782,7 +782,7 @@ namespace getfem {
       }
       icb.red.prepare();
       icb.red.result(tensor());
-      r_.resize(tensor().ndim()); 
+      r_.resize(tensor().ndim());
       for (dim_type i=0; i < tensor().ndim(); ++i) r_[i] = tensor().dim(i);
       tsize = tensor().card();
       //cerr << "update_shape_with_inline_reduction: tensor=" << tensor()
@@ -819,23 +819,23 @@ namespace getfem {
         if  (current_cv == size_type(-1)) {
           shape_updated_ = true; fem_changed = true;
         } else {
-          fem_changed = fem_changed || 
-            (mfcomp[i].pmf->fem_of_element(current_cv) != 
+          fem_changed = fem_changed ||
+            (mfcomp[i].pmf->fem_of_element(current_cv) !=
             mfcomp[i].pmf->fem_of_element(cv));
-          /* for FEM with non-constant nb_dof.. */ 
+          /* for FEM with non-constant nb_dof.. */
           shape_updated_ = shape_updated_ ||
-            (mfcomp[i].pmf->nb_basic_dof_of_element(current_cv) != 
-            mfcomp[i].pmf->nb_basic_dof_of_element(cv)); 
+            (mfcomp[i].pmf->nb_basic_dof_of_element(current_cv) !=
+            mfcomp[i].pmf->nb_basic_dof_of_element(cv));
         }
       }
       if (shape_updated_) {
         r_.resize(0);
         /* get the new ranges */
-        for (unsigned i=0; i < mfcomp.size(); ++i) 
+        for (unsigned i=0; i < mfcomp.size(); ++i)
           mfcomp[i].push_back_dimensions(cv, r_, true);
       }
       if (fem_changed || shape_updated_) {
-        /* build the new mat_elem structure */      
+        /* build the new mat_elem structure */
         update_pmat_elem(cv);
       }
       if (shape_updated_ || fem_changed || pgt != pgt2 || pim != pim2) {
@@ -898,17 +898,17 @@ namespace getfem {
           size_type fullsz = 1;
           for (unsigned j=0; j < mfcomp[i].data->ranges().size(); ++j)
             fullsz *= mfcomp[i].data->ranges()[j];
-          if (fullsz != size_type(mfcomp[i].data->tensor().card())) 
+          if (fullsz != size_type(mfcomp[i].data->tensor().card()))
             ASM_THROW_TENSOR_ERROR("aaarg inline reduction will explode with 
non-full tensors. "
             "Complain to the author, I was too lazy to do that properly");
         }
       }
       icb.was_called = false;
       if (face == dim_type(-1)) {
-        pmec->gen_compute(t, mim.linked_mesh().points_of_convex(cv), cv, 
+        pmec->gen_compute(t, mim.linked_mesh().points_of_convex(cv), cv,
           has_inline_reduction ? &icb : 0);
       } else {
-        pmec->gen_compute_on_face(t, mim.linked_mesh().points_of_convex(cv), 
face, cv, 
+        pmec->gen_compute_on_face(t, mim.linked_mesh().points_of_convex(cv), 
face, cv,
           has_inline_reduction ? &icb : 0);
       }
 
@@ -930,7 +930,7 @@ namespace getfem {
     tensor_ranges e_r;
     std::vector< tensor_strides > e_str;
   public:
-    ATN_tensor_from_dofs_data(const base_asm_data *basm_, 
+    ATN_tensor_from_dofs_data(const base_asm_data *basm_,
       const vdim_specif_list& d) :
     basm(basm_), vdim(d) {
     }
@@ -963,7 +963,7 @@ namespace getfem {
     }
   };
 
-  /* enforce symmetry of a 2D tensor 
+  /* enforce symmetry of a 2D tensor
   (requiring only the upper-triangle of its child and
   duplicating it) */
   class ATN_symmetrized_tensor : public ATN_tensor_w_data {
@@ -972,7 +972,7 @@ namespace getfem {
     ATN_symmetrized_tensor(ATN_tensor& a) { add_child(a); }
     void check_shape_update(size_type , dim_type) {
       if ((shape_updated_ = child(0).is_shape_updated())) {
-        if (child(0).ranges().size() != 2 || 
+        if (child(0).ranges().size() != 2 ||
           child(0).ranges()[0] != child(0).ranges()[1])
           ASM_THROW_TENSOR_ERROR("can't symmetrize a non-square tensor "
           "of sizes " << child(0).ranges());
@@ -981,7 +981,7 @@ namespace getfem {
     }
     void update_childs_required_shape() {
       tensor_shape ts = req_shape;
-      tensor_shape ts2 = req_shape; 
+      tensor_shape ts2 = req_shape;
       index_set perm(2); perm[0] = 1; perm[1] = 0; ts2.permute(perm);
       ts.merge(ts2, false);
       tensor_mask dm; dm.set_triangular(ranges()[0],0,1);
@@ -1014,7 +1014,7 @@ namespace getfem {
   public:
     ATN_unary_op_tensor(ATN_tensor& a) { add_child(a); }
     void check_shape_update(size_type , dim_type) {
-      if ((shape_updated_ = (ranges() != child(0).ranges())))      
+      if ((shape_updated_ = (ranges() != child(0).ranges())))
         r_ = child(0).ranges();
     }
   private:
@@ -1046,12 +1046,12 @@ namespace getfem {
       if ((shape_updated_ = child(0).is_shape_updated()))
         r_ = child(0).ranges();
       for (size_type i=1; i < nchilds(); ++i)
-        if (ranges() != child(i).ranges()) 
-          ASM_THROW_TENSOR_ERROR("can't add two tensors of sizes " << 
+        if (ranges() != child(i).ranges())
+          ASM_THROW_TENSOR_ERROR("can't add two tensors of sizes " <<
           ranges() << " and " << child(i).ranges());
     }
-    void apply_scale(scalar_type s) { 
-      for (size_type i=0; i < scales.size(); ++i) scales[i] *= s; 
+    void apply_scale(scalar_type s) {
+      for (size_type i=0; i < scales.size(); ++i) scales[i] *= s;
     }
     ATN_tensors_sum_scaled* is_tensors_sum_scaled() { return this; }
   private:
@@ -1085,7 +1085,7 @@ namespace getfem {
     multi_tensor_iterator mti;
     int sgn; /* v+t or v-t ? */
   public:
-    ATN_tensor_scalar_add(ATN_tensor& a, scalar_type v_, int sgn_) : 
+    ATN_tensor_scalar_add(ATN_tensor& a, scalar_type v_, int sgn_) :
       v(v_), sgn(sgn_) { add_child(a); }
     void check_shape_update(size_type , dim_type) {
       if ((shape_updated_ = child(0).is_shape_updated()))
@@ -1110,14 +1110,14 @@ namespace getfem {
   class ATN_print_tensor : public ATN {
     std::string name;
   public:
-    ATN_print_tensor(ATN_tensor& a, std::string n_) 
+    ATN_print_tensor(ATN_tensor& a, std::string n_)
       : name(n_) { add_child(a); }
   private:
     void reinit_() {}
     void exec_(size_type cv, dim_type face) {
       multi_tensor_iterator mti(child(0).tensor(), true);
       cout << "------- > evaluation of " << name << ", at" << endl;
-      cout << "convex " << cv; 
+      cout << "convex " << cv;
       if (face != dim_type(-1)) cout << ", face " << int(face);
       cout << endl;
       cout << "  size   = " << child(0).ranges() << endl;
@@ -1132,13 +1132,13 @@ namespace getfem {
   };
 
 
-  /* 
+  /*
   -------------------
   analysis of the supplied string
   -----------------
   */
 
-  std::string asm_tokenizer::syntax_err_print() {      
+  std::string asm_tokenizer::syntax_err_print() {
     std::string s;
     if (tok_pos - err_msg_mark > 80) err_msg_mark = tok_pos - 40;
     if (str.length() - err_msg_mark < 80) s = tok_substr(err_msg_mark, 
str.length());
@@ -1151,17 +1151,17 @@ namespace getfem {
     gmm::standard_locale sl;
     curr_tok_ival = -1;
     while (tok_pos < str.length() && isspace(str[tok_pos])) ++tok_pos;
-    if (tok_pos == str.length()) { 
+    if (tok_pos == str.length()) {
       curr_tok_type = END; tok_len = 0;
-    } else if (strchr("{}(),;:=-.*/+", str[tok_pos])) { 
+    } else if (strchr("{}(),;:=-.*/+", str[tok_pos])) {
       curr_tok_type = tok_type_enum(str[tok_pos]); tok_len = 1;
-    } else if (str[tok_pos] == '$' || str[tok_pos] == '#' || str[tok_pos] == 
'%') { 
-      curr_tok_type = str[tok_pos] == '$' ? ARGNUM_SELECTOR : 
+    } else if (str[tok_pos] == '$' || str[tok_pos] == '#' || str[tok_pos] == 
'%') {
+      curr_tok_type = str[tok_pos] == '$' ? ARGNUM_SELECTOR :
         (str[tok_pos] == '#' ? MFREF : IMREF);
-    tok_len = 1; 
+    tok_len = 1;
     curr_tok_ival = 0;
-    while (isdigit(str[tok_pos+tok_len])) { 
-      curr_tok_ival*=10; 
+    while (isdigit(str[tok_pos+tok_len])) {
+      curr_tok_ival*=10;
       curr_tok_ival += str[tok_pos+tok_len] - '0';
       ++tok_len;
     }
@@ -1184,7 +1184,7 @@ namespace getfem {
 
   const mesh_fem& generic_assembly::do_mf_arg_basic() {
     if (tok_type() != MFREF) ASM_THROW_PARSE_ERROR("expecting mesh_fem 
reference");
-    if (tok_mfref_num() >= mftab.size()) 
+    if (tok_mfref_num() >= mftab.size())
       ASM_THROW_PARSE_ERROR("reference to a non-existant mesh_fem #" << 
tok_mfref_num()+1);
     const mesh_fem& mf_ = *mftab[tok_mfref_num()]; advance();
     return mf_;
@@ -1198,7 +1198,7 @@ namespace getfem {
       multimf->resize(1); (*multimf)[0] = &mf_;
       while (advance_if(COMMA)) {
         if (tok_type() != MFREF) ASM_THROW_PARSE_ERROR("expecting mesh_fem 
reference");
-        if (tok_mfref_num() >= mftab.size()) 
+        if (tok_mfref_num() >= mftab.size())
           ASM_THROW_PARSE_ERROR("reference to a non-existant mesh_fem #" << 
tok_mfref_num()+1);
         multimf->push_back(mftab[tok_mfref_num()]); advance();
       }
@@ -1218,7 +1218,7 @@ namespace getfem {
         } else if (tok_type() == IDENT) {
           if ((tok().length()==1 && isalpha(tok()[0])) || islower(tok()[0])) {
             s.push_back(tok()[0]); advance(); j++;
-          } else ASM_THROW_PARSE_ERROR("invalid reduction index '" << tok() << 
+          } else ASM_THROW_PARSE_ERROR("invalid reduction index '" << tok() <<
             "', only lower case characters allowed");
         }
       } while (advance_if(COMMA));
@@ -1245,7 +1245,7 @@ namespace getfem {
     meshes).
     */
     if (tok_type() == IMREF) {
-      if (tok_imref_num() >= imtab.size()) 
+      if (tok_imref_num() >= imtab.size())
         ASM_THROW_PARSE_ERROR("reference to a non-existant mesh_im %" << 
tok_imref_num()+1);
       what.set_im(*imtab[tok_imref_num()]); advance();
       accept(COMMA, "expecting ','");
@@ -1255,18 +1255,18 @@ namespace getfem {
     do {
       if (tok_type() == CLOSE_PAR) break;
       if (tok_type() != IDENT) ASM_THROW_PARSE_ERROR("expecting Base or Grad 
or Hess, Normal, etc..");
-      std::string f = tok(); 
+      std::string f = tok();
       const mesh_fem *pmf = 0;
       if (f.compare("Base")==0 || f.compare("vBase")==0 || 
f.compare("mBase")==0) {
-        pmf = &do_mf_arg(); 
+        pmf = &do_mf_arg();
         what.push_back(mf_comp(&what, pmf, mf_comp::BASE, get_shape(f)));
       } else if (f.compare("Grad")==0 || f.compare("vGrad")==0 || 
f.compare("mGrad")==0) {
-        pmf = &do_mf_arg(); 
+        pmf = &do_mf_arg();
         what.push_back(mf_comp(&what, pmf, mf_comp::GRAD, get_shape(f)));
       } else if (f.compare("Hess")==0 || f.compare("vHess")==0 || 
f.compare("mHess")==0) {
-        pmf = &do_mf_arg(); 
+        pmf = &do_mf_arg();
         what.push_back(mf_comp(&what, pmf, mf_comp::HESS, get_shape(f)));
-      } else if (f.compare("NonLin")==0) {     
+      } else if (f.compare("NonLin")==0) {
         size_type num = 0; /* default value */
         advance();
         if (tok_type() == ARGNUM_SELECTOR) { num = tok_argnum(); advance(); }
@@ -1281,10 +1281,10 @@ namespace getfem {
         f.compare("GradGTInv") == 0) {
           advance();
           accept(OPEN_PAR,"expecting '('"); accept(CLOSE_PAR,"expecting ')'");
-          pmf = 0; 
-          what.push_back(mf_comp(&what, pmf, 
+          pmf = 0;
+          what.push_back(mf_comp(&what, pmf,
             f.compare("GradGT") == 0 ?
-            mf_comp::GRADGT : 
+            mf_comp::GRADGT :
           mf_comp::GRADGTINV, mf_comp::PLAIN_SHAPE));
       } else {
         if (vars.find(f) != vars.end()) {
@@ -1308,7 +1308,7 @@ namespace getfem {
 
   void generic_assembly::do_dim_spec(vdim_specif_list& lst) {
     lst.resize(0);
-    accept(OPEN_PAR, "expecting '('");    
+    accept(OPEN_PAR, "expecting '('");
     while (true) {
       if (tok_type() == IDENT) {
         if (tok().compare("mdim")==0) 
lst.push_back(vdim_specif(do_mf_arg().linked_mesh().dim()));
@@ -1335,7 +1335,7 @@ namespace getfem {
       if (tok_type() != ARGNUM_SELECTOR)
         ASM_THROW_PARSE_ERROR("expecting dataset number");
       datanum = tok_argnum();
-      advance(); 
+      advance();
     }
     if (datanum >= indata.size())
       ASM_THROW_PARSE_ERROR("wrong dataset number: " << datanum);
@@ -1343,8 +1343,8 @@ namespace getfem {
     vdim_specif_list sz;
     do_dim_spec(sz);
 
-    if (sz.nbelt() != indata[datanum]->vect_size()) 
-      ASM_THROW_PARSE_ERROR("invalid size for data argument " << datanum+1 << 
+    if (sz.nbelt() != indata[datanum]->vect_size())
+      ASM_THROW_PARSE_ERROR("invalid size for data argument " << datanum+1 <<
       " real size is " << indata[datanum]->vect_size()
       << " expected size is " << sz.nbelt());
     return 
record(std::make_unique<ATN_tensor_from_dofs_data>(indata[datanum].get(), sz));
@@ -1365,7 +1365,7 @@ namespace getfem {
           } else if (tok_type() == IDENT) {
             if ((tok().length()==1 && isalpha(tok()[0])) || islower(tok()[0])) 
{
               s.push_back(tok()[0]); advance(); j++;
-            } else ASM_THROW_PARSE_ERROR("invalid reduction index '" << tok() 
<< 
+            } else ASM_THROW_PARSE_ERROR("invalid reduction index '" << tok() 
<<
               "', only lower case chars allowed");
           }
         } while (advance_if(COMMA));
@@ -1375,7 +1375,7 @@ namespace getfem {
   }
 
   /*
-  ( expr ) 
+  ( expr )
   variable
   comp(..)
   data(data)
@@ -1396,7 +1396,7 @@ namespace getfem {
       } else if (tok().compare("data")==0) {
         advance(); t.assign(do_data());
       } else if (tok().compare("sym")==0) {
-        advance(); 
+        advance();
         tnode t2 = do_expr();
         if (t2.type() != tnode::TNTENSOR)
           ASM_THROW_PARSE_ERROR("can't symmetrise a scalar!");
@@ -1451,7 +1451,7 @@ namespace getfem {
             dim_type(j))));
           check_permut.add(j);
           reorder.push_back(dim_type(j));
-        } else { 
+        } else {
           check_permut.add(i);
           reorder.push_back(dim_type(i));
         }
@@ -1463,14 +1463,14 @@ namespace getfem {
         cerr << check_permut << endl;
         cerr << vref(reorder) << endl;
         ASM_THROW_PARSE_ERROR("you did not give a real permutation:"
-                             << vref(reorder));
+                              << vref(reorder));
       }
       t = tnode(record(std::make_unique<ATN_permuted_tensor>(*t.tensor(), 
reorder)));
     }
     return t;
   }
 
-  /* 
+  /*
   term := prod_trans*prod_trans/prod_trans ...
   */
   tnode generic_assembly::do_term() {
@@ -1484,14 +1484,18 @@ namespace getfem {
       else break;
       tnode t2 = do_prod();
       if (mult == false && t.type() == tnode::TNCONST
-        && t2.type() == tnode::TNTENSOR) 
+        && t2.type() == tnode::TNTENSOR)
         ASM_THROW_PARSE_ERROR("can't divide a constant by a tensor");
       if (t.type() == tnode::TNTENSOR && t2.type() == tnode::TNTENSOR) {
         ASM_THROW_PARSE_ERROR("tensor term-by-term productor division not "
           "implemented yet! are you sure you need it ?");
       } else if (t.type() == tnode::TNCONST && t2.type() == tnode::TNCONST) {
-        if (mult) t.assign(t.xval()*t2.xval());
-        else   { t2.check0(); t.assign(t.xval()/t2.xval()); }
+        if (mult)
+          t.assign(t.xval()*t2.xval());
+        else {
+          t2.check0();
+          t.assign(t.xval()/t2.xval());
+        }
       } else {
         if (t.type() != tnode::TNTENSOR) std::swap(t,t2);
         scalar_type v = t2.xval();
@@ -1510,7 +1514,7 @@ namespace getfem {
     return t;
   }
 
-  /* 
+  /*
   expr := term + term - term + ...
   suboptimal for things like t1+1-2-1 (which gives (((t1+1)-2)-1) )
   ... could be fixed but noone needs that i guess
@@ -1532,20 +1536,20 @@ namespace getfem {
       tnode t2 = do_term();
       if (t.type() == tnode::TNTENSOR && t2.type() == tnode::TNTENSOR) {
         if (!t.tensor()->is_tensors_sum_scaled() || t.tensor()->is_frozen()) {
-          
t.assign(record(std::make_unique<ATN_tensors_sum_scaled>(*t.tensor(), +1))); 
+          
t.assign(record(std::make_unique<ATN_tensors_sum_scaled>(*t.tensor(), +1)));
         }
         t.tensor()->is_tensors_sum_scaled()
           ->push_scaled_tensor(*t2.tensor(), scalar_type(plus));
       } else if (t.type() == tnode::TNCONST && t2.type() == tnode::TNCONST) {
         t.assign(t.xval()+t2.xval()*plus);
       } else {
-        int tsgn = 1;  
+        int tsgn = 1;
         if (t.type() != tnode::TNTENSOR)
         { std::swap(t,t2); if (plus<0) tsgn = -1; }
         else if (plus<0) t2.assign(-t2.xval());
         t.assign(record(std::make_unique<ATN_tensor_scalar_add>(*t.tensor(), 
t2.xval(),
           tsgn)));
-      } 
+      }
     }
     pop_mark();
     return t;
@@ -1581,7 +1585,7 @@ namespace getfem {
       tok_type() == OPEN_PAR) {
         if (tok_type() == ARGNUM_SELECTOR) {
           arg_num = tok_argnum();
-          advance(); 
+          advance();
         } else { arg_num = 0; }
 
         do_dim_spec(vds);
@@ -1609,14 +1613,16 @@ namespace getfem {
           /* if we are allowed to dynamically create matrices */
           if (outmat[arg_num] == 0) {
             if (mat_fact != 0)
-              outmat[arg_num] = 
std::shared_ptr<base_asm_mat>(std::shared_ptr<base_asm_mat>(), 
mat_fact->create_mat(vds[0].pmf->nb_dof(),
-                                                                               
                                     vds[1].pmf->nb_dof()));
+              outmat[arg_num] = std::shared_ptr<base_asm_mat>
+                                (std::shared_ptr<base_asm_mat>(),
+                                 mat_fact->create_mat(vds[0].pmf->nb_dof(),
+                                                      vds[1].pmf->nb_dof()));
             else ASM_THROW_PARSE_ERROR("output matrix $" << arg_num+1
               << " does not exist");
           }
         } else ASM_THROW_PARSE_ERROR("not a valid output statement");
 
-        accept(PLUS); 
+        accept(PLUS);
         accept(EQUAL);
     } else if (advance_if(EQUAL)) {
       what = wALIAS;
@@ -1628,7 +1634,7 @@ namespace getfem {
 
     switch (what) {
     case wPRINT: {
-      record_out(std::make_unique<ATN_print_tensor>(*t.tensor(), 
tok_substr(print_mark, 
+      record_out(std::make_unique<ATN_print_tensor>(*t.tensor(), 
tok_substr(print_mark,
         tok_mark())));
                  } break;
     case wOUTPUT_ARRAY: {
@@ -1636,8 +1642,8 @@ namespace getfem {
                         } break;
     case wOUTPUT_MATRIX: {
       record_out(outmat[arg_num]->build_output_tensor(*t.tensor(),
-                                                     *vds[0].pmf,
-                                                     *vds[1].pmf));
+                                                      *vds[0].pmf,
+                                                      *vds[1].pmf));
                          } break;
     case wALIAS: {
       vars[ident] = t.tensor(); t.tensor()->freeze();
@@ -1649,7 +1655,7 @@ namespace getfem {
 
   struct atn_number_compare {
     bool operator()(const std::unique_ptr<ATN_tensor> &a,
-                   const std::unique_ptr<ATN_tensor> &b) {
+                    const std::unique_ptr<ATN_tensor> &b) {
       assert(a.get() && b.get());
       return (a->number() < b->number());
     }
@@ -1657,7 +1663,7 @@ namespace getfem {
 
   struct outvar_compare {
     bool operator()(const std::unique_ptr<ATN> &a,
-                   const std::unique_ptr<ATN> &b) {
+                    const std::unique_ptr<ATN> &b) {
       assert(a.get() && b.get());
       return (a->number() < b->number());
     }
@@ -1672,10 +1678,10 @@ namespace getfem {
     if (tok_type() != END) ASM_THROW_PARSE_ERROR("unexpected token: '"
       << tok() << "'");
     if (outvars.size() == 0) cerr << "warning: assembly without output\n";
-    
+
     /* reordering of atn_tensors and outvars */
     unsigned gcnt = 0;
-    for (size_type i=0; i < outvars.size(); ++i) 
+    for (size_type i=0; i < outvars.size(); ++i)
       outvars[i]->set_number(gcnt);
 
     std::sort(atn_tensors.begin(), atn_tensors.end(), atn_number_compare());
@@ -1699,10 +1705,10 @@ namespace getfem {
       update_shapes =  (update_shapes || atn_tensors[i]->is_shape_updated());
       /* if (atn_tensors[i]->is_shape_updated()) {
       cerr << "[cv=" << cv << ",f=" << int(face) << "], shape_updated: "
-      << typeid(*atn_tensors[i]).name() 
+      << typeid(*atn_tensors[i]).name()
       << " [" << atn_tensors[i]->name()
       << "]\n  -> r=" << atn_tensors[i]->ranges() << "\n    ";
-      } 
+      }
       */
     }
 
@@ -1761,9 +1767,9 @@ namespace getfem {
   shape modifications during the assembly (since this can be
   very expensive) */
   static void get_convex_order(const dal::bit_vector& cvlst,
-    const std::vector<const mesh_im *>& imtab, 
-    const std::vector<const mesh_fem *>& mftab, 
-    const dal::bit_vector& candidates, 
+    const std::vector<const mesh_im *>& imtab,
+    const std::vector<const mesh_fem *>& mftab,
+    const dal::bit_vector& candidates,
     std::vector<size_type>& cvorder) {
       cvorder.reserve(candidates.card()); cvorder.resize(0);
 
@@ -1775,7 +1781,7 @@ namespace getfem {
               if (!mftab[i]->convex_index().is_in(cv)) {
                 ok = false;
                 // ASM_THROW_ERROR("the convex " << cv << " has no FEM for the 
#"
-                //                               << i+1 << " mesh_fem");       
  
+                //                               << i+1 << " mesh_fem");
               }
             }
             if (ok) {
@@ -1803,7 +1809,7 @@ namespace getfem {
       if (&imtab[i]->linked_mesh() != &m)
         ASM_THROW_ERROR("the mesh_fem/mesh_im live on different meshes!");
     }
-    if (imtab.size() == 0) 
+    if (imtab.size() == 0)
       ASM_THROW_ERROR("no integration method !");
   }
 
@@ -1812,7 +1818,7 @@ namespace getfem {
     r.from_mesh(imtab.at(0)->linked_mesh());
     r.error_if_not_homogeneous();
 
- 
+
     consistency_check();
     get_convex_order(imtab.at(0)->convex_index(), imtab, mftab, r.index(), cv);
     parse();
@@ -1820,10 +1826,10 @@ namespace getfem {
     for (size_type i=0; i < cv.size(); ++i) {
       mesh_region::face_bitset nf = r[cv[i]];
       dim_type f = dim_type(-1);
-      while (nf.any()) 
+      while (nf.any())
       {
         if (nf[0]) exec(cv[i],f);
-        nf >>= 1; 
+        nf >>= 1;
         f++;
       }
     }
diff --git a/src/getfem_fem.cc b/src/getfem_fem.cc
index 43b6440..7b45fb8 100644
--- a/src/getfem_fem.cc
+++ b/src/getfem_fem.cc
@@ -80,12 +80,12 @@ namespace getfem {
 
   // Specific multiplication for fem_interpolation_context use.
   static inline void spec_mat_tmult_(const base_tensor &g, const base_matrix 
&B,
-                                    base_tensor &t) {
+                                     base_tensor &t) {
     size_type P = B.nrows(), N = B.ncols();
     size_type M = t.adjust_sizes_changing_last(g, P);
     bgeot::mat_tmult(&(*(g.begin())), &(*(B.begin())), &(*(t.begin())),M,N,P);
   }
-  
+
   void fem_interpolation_context::pfp_base_value(base_tensor& t,
                                                  const pfem_precomp &pfp__) {
     const pfem &pf__ = pfp__->get_pfem();
@@ -171,7 +171,7 @@ namespace getfem {
           {
             base_tensor u;
             // u.mat_transp_reduction(pfp__->grad(ii()), B(), 2);
-           spec_mat_tmult_(pfp__->grad(ii()), B(), u);
+            spec_mat_tmult_(pfp__->grad(ii()), B(), u);
             t.mat_transp_reduction(u, K(), 1);
           }
           break;
@@ -179,13 +179,13 @@ namespace getfem {
           {
             base_tensor u;
             // u.mat_transp_reduction(pfp__->grad(ii()), B(), 2);
-           spec_mat_tmult_(pfp__->grad(ii()), B(), u);
+            spec_mat_tmult_(pfp__->grad(ii()), B(), u);
             t.mat_transp_reduction(u, B(), 1);
           }
           break;
         default:
-         // t.mat_transp_reduction(pfp__->grad(ii()), B(), 2);
-         spec_mat_tmult_(pfp__->grad(ii()), B(), t);
+          // t.mat_transp_reduction(pfp__->grad(ii()), B(), 2);
+          spec_mat_tmult_(pfp__->grad(ii()), B(), t);
         }
         if (!(pf__->is_equivalent())) {
           set_pfp(pfp__);
@@ -211,7 +211,7 @@ namespace getfem {
             {
               base_tensor u;
               // u.mat_transp_reduction(pfp_->grad(ii()), B(), 2);
-             spec_mat_tmult_(pfp_->grad(ii()), B(), u);
+              spec_mat_tmult_(pfp_->grad(ii()), B(), u);
               t.mat_transp_reduction(u, K(), 1);
             }
             break;
@@ -219,20 +219,20 @@ namespace getfem {
             {
               base_tensor u;
               // u.mat_transp_reduction(pfp_->grad(ii()), B(), 2);
-             spec_mat_tmult_(pfp_->grad(ii()), B(), u);
+              spec_mat_tmult_(pfp_->grad(ii()), B(), u);
               t.mat_transp_reduction(u, B(), 1);
             }
             break;
           default:
-           // t.mat_transp_reduction(pfp_->grad(ii()), B(), 2);
-           spec_mat_tmult_(pfp_->grad(ii()), B(), t);
+            // t.mat_transp_reduction(pfp_->grad(ii()), B(), 2);
+            spec_mat_tmult_(pfp_->grad(ii()), B(), t);
           }
-          
+
         } else {
           base_tensor u;
           pf()->grad_base_value(xref(), u);
           if (u.size()) { /* only if the FEM can provide grad_base_value */
-           // t.mat_transp_reduction(u, B(), 2);
+            // t.mat_transp_reduction(u, B(), 2);
             spec_mat_tmult_(u, B(), t);
             switch(pf()->vectorial_type()) {
             case virtual_fem::VECTORIAL_PRIMAL_TYPE:
@@ -346,7 +346,7 @@ namespace getfem {
     size_type xfem_index;
     bool all_faces;
 
-    dof_description(void)
+    dof_description()
     { linkable = true; all_faces = false; coord_index = 0; xfem_index = 0; }
   };
 
@@ -417,7 +417,7 @@ namespace getfem {
     return &(tab[tab.add_norepeat(l)]);
   }
 
-  size_type reserve_xfem_index(void) {
+  size_type reserve_xfem_index() {
     static size_type ind = 100;
     return ind += 1000;
   }
@@ -646,14 +646,14 @@ namespace getfem {
      add_node(d, pt, faces);
   }
 
-  void virtual_fem::init_cvs_node(void) {
+  void virtual_fem::init_cvs_node() {
     cvs_node->init_for_adaptative(cvr->structure());
     cv_node = bgeot::convex<base_node>(cvs_node);
     face_tab.resize(0);
     pspt_valid = false;
   }
 
-  void virtual_fem::unfreeze_cvs_node(void) {
+  void virtual_fem::unfreeze_cvs_node() {
     cv_node.structure() = cvs_node;
     pspt_valid = false;
   }
@@ -687,7 +687,7 @@ namespace getfem {
     debug_name_ = f.debug_name_;
     face_tab = f.face_tab;
   }
-  
+
   /* ******************************************************************** */
   /*        PK class.                                                         
*/
   /* ******************************************************************** */
@@ -1230,7 +1230,7 @@ namespace getfem {
   /* ******************************************************************** */
 
   // local dof numeration for K=1:
-  //    4 
+  //    4
   //   /|||
   //  / || |
   // 2-|--|-3
@@ -1271,7 +1271,7 @@ namespace getfem {
     } else if (k == 1) {
       p->base().resize(5);
       bgeot::base_rational_fraction // Q = xy/(1-z)
-       Q(bgeot::read_base_poly(3, "x*y"), bgeot::read_base_poly(3, "1-z"));
+        Q(bgeot::read_base_poly(3, "x*y"), bgeot::read_base_poly(3, "1-z"));
       p->base()[0] = (bgeot::read_base_poly(3, "1-x-y-z") + Q)*0.25;
       p->base()[1] = (bgeot::read_base_poly(3, "1+x-y-z") - Q)*0.25;
       p->base()[2] = (bgeot::read_base_poly(3, "1-x+y-z") - Q)*0.25;
@@ -1297,7 +1297,7 @@ namespace getfem {
       base_poly ones = bgeot::read_base_poly(3, "1");
       base_poly un_z = bgeot::read_base_poly(3, "1-z");
       bgeot::base_rational_fraction Q(bgeot::read_base_poly(3, "1"), un_z);
-      
+
       p->base()[ 0] = Q*Q*xi0*xi1*(x*y-z*un_z);
       p->base()[ 1] = -Q*Q*xi0*xi1*xi2*y*4.;
       p->base()[ 2] = Q*Q*xi1*xi2*(-x*y-z*un_z);
@@ -1312,7 +1312,7 @@ namespace getfem {
       p->base()[11] = Q*z*xi3*xi0*4.;
       p->base()[12] = Q*z*xi2*xi3*4.;
       p->base()[13] = bgeot::read_base_poly(3, "z*(2*z-1)");
-      
+
       p->add_node(lag_dof, base_small_vector(-1.0, -1.0, 0.0));
       p->add_node(lag_dof, base_small_vector( 0.0, -1.0, 0.0));
       p->add_node(lag_dof, base_small_vector( 1.0, -1.0, 0.0));
@@ -1329,14 +1329,15 @@ namespace getfem {
       p->add_node(lag_dof, base_small_vector( 0.0,  0.0, 1.0));
 
     } else GMM_ASSERT1(false, "Sorry, pyramidal Lagrange fem "
-                      "implemented only for degree 0, 1 or 2");
-    
+                       "implemented only for degree 0, 1 or 2");
+
     return pfem(p);
   }
-  
-  
-  static pfem pyramidal_pk_fem(fem_param_list &params,
-                     std::vector<dal::pstatic_stored_object> &dependencies) {
+
+
+  static pfem pyramidal_pk_fem
+  (fem_param_list &params,
+   std::vector<dal::pstatic_stored_object> &dependencies) {
     GMM_ASSERT1(params.size() <= 1, "Bad number of parameters");
     short_type k = 2;
     if (params.size() > 0) {
@@ -1349,8 +1350,9 @@ namespace getfem {
     return p;
   }
 
-  static pfem pyramidal_disc_pk_fem(fem_param_list &params,
-                    std::vector<dal::pstatic_stored_object> &dependencies) {
+  static pfem pyramidal_disc_pk_fem
+  (fem_param_list &params,
+   std::vector<dal::pstatic_stored_object> &dependencies) {
     GMM_ASSERT1(params.size() <= 1, "Bad number of parameters");
     short_type k = 2;
     if (params.size() > 0) {
@@ -1713,10 +1715,10 @@ namespace getfem {
 
   struct P1_wabbfoafla_ : public PK_fem_
   { // idem elt prec mais avec raccord lagrange. A faire en dim. quelconque ..
-    P1_wabbfoafla_(void);
+    P1_wabbfoafla_();
   };
 
-  P1_wabbfoafla_::P1_wabbfoafla_(void) : PK_fem_(2, 1) {
+  P1_wabbfoafla_::P1_wabbfoafla_() : PK_fem_(2, 1) {
     unfreeze_cvs_node();
     es_degree = 2;
     base_node pt(2); pt.fill(0.5);
@@ -2927,7 +2929,7 @@ namespace getfem {
   struct hermite_segment__ : public fem<base_poly> {
     virtual void mat_trans(base_matrix &M, const base_matrix &G,
                            bgeot::pgeometric_trans pgt) const;
-    hermite_segment__(void);
+    hermite_segment__();
   };
 
   void hermite_segment__::mat_trans(base_matrix &M,
@@ -2962,7 +2964,7 @@ namespace getfem {
   // Hermite element on the segment. when the real element lies in
   // a 2 or 3 dimensional domain, the element should still work if
   // the tangent coincides.
-  hermite_segment__::hermite_segment__(void) {
+  hermite_segment__::hermite_segment__() {
     base_node pt(1);
     cvr = bgeot::simplex_of_reference(1);
     dim_ = cvr->structure()->dim();
@@ -2992,7 +2994,7 @@ namespace getfem {
   struct hermite_triangle__ : public fem<base_poly> {
     virtual void mat_trans(base_matrix &M, const base_matrix &G,
                            bgeot::pgeometric_trans pgt) const;
-    hermite_triangle__(void);
+    hermite_triangle__();
   };
 
   void hermite_triangle__::mat_trans(base_matrix &M,
@@ -3017,7 +3019,7 @@ namespace getfem {
     }
   }
 
-  hermite_triangle__::hermite_triangle__(void) {
+  hermite_triangle__::hermite_triangle__() {
     cvr = bgeot::simplex_of_reference(2);
     dim_ = cvr->structure()->dim();
     init_cvs_node();
@@ -3065,7 +3067,7 @@ namespace getfem {
   struct hermite_tetrahedron__ : public fem<base_poly> {
     virtual void mat_trans(base_matrix &M, const base_matrix &G,
                            bgeot::pgeometric_trans pgt) const;
-    hermite_tetrahedron__(void);
+    hermite_tetrahedron__();
   };
 
   void hermite_tetrahedron__::mat_trans(base_matrix &M,
@@ -3088,7 +3090,7 @@ namespace getfem {
     }
   }
 
-  hermite_tetrahedron__::hermite_tetrahedron__(void) {
+  hermite_tetrahedron__::hermite_tetrahedron__() {
     cvr = bgeot::simplex_of_reference(3);
     dim_ = cvr->structure()->dim();
     init_cvs_node();
@@ -3168,7 +3170,7 @@ namespace getfem {
   struct argyris_triangle__ : public fem<base_poly> {
     virtual void mat_trans(base_matrix &M, const base_matrix &G,
                            bgeot::pgeometric_trans pgt) const;
-    argyris_triangle__(void);
+    argyris_triangle__();
   };
 
   void argyris_triangle__::mat_trans(base_matrix &M,
@@ -3252,7 +3254,7 @@ namespace getfem {
     }
   }
 
-  argyris_triangle__::argyris_triangle__(void) {
+  argyris_triangle__::argyris_triangle__() {
     cvr = bgeot::simplex_of_reference(2);
     dim_ = cvr->structure()->dim();
     init_cvs_node();
@@ -3339,7 +3341,7 @@ namespace getfem {
   struct morley_triangle__ : public fem<base_poly> {
     virtual void mat_trans(base_matrix &M, const base_matrix &G,
                            bgeot::pgeometric_trans pgt) const;
-    morley_triangle__(void);
+    morley_triangle__();
   };
 
   void morley_triangle__::mat_trans(base_matrix &M,
@@ -3398,7 +3400,7 @@ namespace getfem {
     }
   }
 
-  morley_triangle__::morley_triangle__(void) {
+  morley_triangle__::morley_triangle__() {
     cvr = bgeot::simplex_of_reference(2);
     dim_ = cvr->structure()->dim();
     init_cvs_node();
@@ -3446,8 +3448,8 @@ namespace getfem {
       if (alpha != scalar_type(0)) {
         base_node G =
           gmm::mean_value(cv_node.points().begin(), cv_node.points().end());
-       for (size_type i=0; i < cv_node.nb_points(); ++i)
-         cv_node.points()[i] = (1-alpha)*cv_node.points()[i] + alpha*G;
+        for (size_type i=0; i < cv_node.nb_points(); ++i)
+          cv_node.points()[i] = (1-alpha)*cv_node.points()[i] + alpha*G;
         for (size_type d = 0; d < nc; ++d) {
           base_poly S(1,2);
           S[0] = -alpha * G[d] / (1-alpha);
@@ -3557,23 +3559,23 @@ namespace getfem {
     /* Identifying P1-simplexes.                                          */
     if (nbp == n+1)
       if (pgt->basic_structure() == bgeot::simplex_structure(dim_type(n)))
-       { name << "FEM_PK" << suffix << "("; found = true; }
+        { name << "FEM_PK" << suffix << "("; found = true; }
 
     /* Identifying Q1-parallelepiped.                                     */
     if (!found && nbp == (size_type(1) << n))
       if (pgt->basic_structure()==bgeot::parallelepiped_structure(dim_type(n)))
-       { name << "FEM_QK" << suffix << "("; found = true; }
+        { name << "FEM_QK" << suffix << "("; found = true; }
 
     /* Identifying Q1-prisms.                                             */
     if (!found && nbp == 2 * n)
       if (pgt->basic_structure() == bgeot::prism_structure(dim_type(n)))
-       { name << "FEM_PK_PRISM" << suffix << "("; found = true; }
-    
+        { name << "FEM_PK_PRISM" << suffix << "("; found = true; }
+
     /* Identifying pyramids.                                              */
     if (!found && nbp == 5)
       if (pgt->basic_structure() == bgeot::pyramidal_structure(1)) {
-       name << "FEM_PYRAMID" << suffix << "_LAGRANGE(";
-       found = true; spec_dim = false;
+        name << "FEM_PYRAMID" << suffix << "_LAGRANGE(";
+        found = true; spec_dim = false;
       }
 
     // To be completed
diff --git a/src/getfem_generic_assembly.cc b/src/getfem_generic_assembly.cc
index 99dd345..8e11456 100644
--- a/src/getfem_generic_assembly.cc
+++ b/src/getfem_generic_assembly.cc
@@ -374,10 +374,10 @@ namespace getfem {
     { return is_copied ? tensor_copied->org_tensor() : t; }
     base_tensor &org_tensor()
     { return is_copied ? tensor_copied->org_tensor() : t; }
-    
+
     const base_tensor &tensor() const
     { return (is_copied ? tensor_copied->tensor() : t); }
-    
+
     base_tensor &tensor()
     { return (is_copied ? tensor_copied->tensor() : t); }
 
@@ -394,28 +394,28 @@ namespace getfem {
       is_copied = true; sparsity_ = t_.sparsity_; qdim_ = t_.qdim_;
       t = t_.org_tensor(); tensor_copied = &(t_);
     }
-    
+
     inline void adjust_sizes(const bgeot::multi_index &ssizes)
     { t.adjust_sizes(ssizes); }
 
     inline void adjust_sizes()
     { if (t.sizes().size() || t.size() != 1) t.init(); }
-    
+
     inline void adjust_sizes(size_type i)
     { if (t.sizes().size() != 1 || t.sizes()[0] != i) t.init(i); }
 
     inline void adjust_sizes(size_type i, size_type j) {
       if (t.sizes().size() != 2 || t.sizes()[0] != i || t.sizes()[1] != j)
-       t.init(i, j);
+        t.init(i, j);
     }
-    
+
     inline void adjust_sizes(size_type i, size_type j, size_type k) {
       if (t.sizes().size() != 3 || t.sizes()[0] != i || t.sizes()[1] != j
           || t.sizes()[2] != k)
         t.init(i, j, k);
     }
     inline void adjust_sizes(size_type i, size_type j,
-                            size_type k, size_type l) {
+                             size_type k, size_type l) {
       if (t.sizes().size() != 3 || t.sizes()[0] != i || t.sizes()[1] != j
           || t.sizes()[2] != k || t.sizes()[3] != l)
        t.init(i, j, k, l);
@@ -436,7 +436,7 @@ namespace getfem {
     void init_fourth_order_tensor(size_type n, size_type m,
                                   size_type l, size_type k)
     { set_to_original(); t.adjust_sizes(n, m, l, k); }
-    
+
     const bgeot::multi_index &sizes() const { return t.sizes(); }
 
     assembly_tensor() : is_copied(false), sparsity_(0), tensor_copied(0) {}
@@ -477,7 +477,7 @@ namespace getfem {
     inline const base_tensor &tensor() const { return t.tensor(); }
     inline base_tensor &tensor() { return t.tensor(); }
     int sparsity() const { return t.sparsity(); }
-    
+
     inline size_type nb_test_functions() const {
       if (test_function_type == size_type(-1)) return 0;
       return test_function_type  - (test_function_type >= 2 ? 1 : 0);
@@ -504,7 +504,7 @@ namespace getfem {
       if (test0 && test1 && (test0 == test1 ||
                              test0 >= 3 || test1 >= 3))
         ga_throw_error(expr, pos,
-                      "Incompatibility of test functions in product.");
+                       "Incompatibility of test functions in product.");
       GMM_ASSERT1(test0 != size_type(-1) && test1 != size_type(-1),
                   "internal error");
 
@@ -563,13 +563,13 @@ namespace getfem {
     { t.init_third_order_tensor(n, m, l); test_function_type = 0; }
 
     inline void init_fourth_order_tensor(size_type n, size_type m,
-                                        size_type l, size_type k)
+                                         size_type l, size_type k)
     { t.init_fourth_order_tensor(n, m, l, k); test_function_type = 0; }
 
     ga_tree_node()
       : node_type(GA_NODE_VOID), test_function_type(-1), qdim1(0), qdim2(0),
-       nbc1(0), nbc2(0), nbc3(0), pos(0), der1(0), der2(0),
-       symmetric_op(false), hash_value(0) {}
+        nbc1(0), nbc2(0), nbc3(0), pos(0), der1(0), der2(0),
+        symmetric_op(false), hash_value(0) {}
     ga_tree_node(GA_NODE_TYPE ty, size_type p)
       : node_type(ty), test_function_type(-1),
         qdim1(0), qdim2(0), nbc1(0), nbc2(0), nbc3(0),
@@ -645,9 +645,9 @@ namespace getfem {
 
     void add_sub_tree(ga_tree &sub_tree) {
       if (current_node &&
-         (current_node->node_type == GA_NODE_PARAMS ||
-          current_node->node_type == GA_NODE_INTERPOLATE_FILTER ||
-          current_node->node_type == GA_NODE_C_MATRIX)) {
+          (current_node->node_type == GA_NODE_PARAMS ||
+           current_node->node_type == GA_NODE_INTERPOLATE_FILTER ||
+           current_node->node_type == GA_NODE_C_MATRIX)) {
         GMM_ASSERT1(sub_tree.root, "Invalid tree operation");
         current_node->children.push_back(sub_tree.root);
         sub_tree.root->parent = current_node;
@@ -941,7 +941,7 @@ namespace getfem {
         if (pnode1->t.sizes()[i] != pnode2->t.sizes()[i]) return false;
       for (size_type i = 0; i < pnode1->tensor().size(); ++i)
         if (gmm::abs(pnode1->tensor()[i] - pnode2->tensor()[i]) > 1E-25)
-         return false;
+          return false;
       break;
     case GA_NODE_C_MATRIX:
       if (pnode1->nbc1 != pnode2->nbc1 || pnode1->nbc2 != pnode2->nbc2 ||
@@ -1045,7 +1045,7 @@ namespace getfem {
   }
 
   static void verify_tree(const pga_tree_node pnode,
-                         const pga_tree_node parent) {
+                          const pga_tree_node parent) {
     GMM_ASSERT1(pnode->parent == parent,
                 "Invalid tree node " << pnode->node_type);
     for (size_type i = 0; i < pnode->children.size(); ++i)
@@ -1086,9 +1086,9 @@ namespace getfem {
         size_type n0 = pnode->tensor_proper_size(0);
         size_type n1 = pnode->tensor_proper_size(1);
         size_type n2 = ((pnode->tensor_order() > 2) ?
-                       pnode->tensor_proper_size(2) : 1);
+                        pnode->tensor_proper_size(2) : 1);
         size_type n3 = ((pnode->tensor_order() > 3) ?
-                       pnode->tensor_proper_size(3) : 1);
+                        pnode->tensor_proper_size(3) : 1);
         if (n3 > 1) str << "[";
         for (size_type l = 0; l < n3; ++l) {
           if (l != 0) str << ",";
@@ -1757,24 +1757,24 @@ namespace getfem {
               if (sub_tree.root->node_type == GA_NODE_C_MATRIX) {
                 // nested format
                 if (r_type != GA_COMMA && r_type != GA_RBRACKET)
-                 // in the nested format only "," and "]" are expected
+                  // in the nested format only "," and "]" are expected
                   ga_throw_error(expr, pos-1, "Bad explicit "
                                  "vector/matrix/tensor format.")
                 else if (sub_tree.root->nbc3 != 1)
-                 // the sub-tensor to be merged cannot be of fourth order
+                  // the sub-tensor to be merged cannot be of fourth order
                   ga_throw_error(expr, pos-1, "Definition of explicit "
                                  "tensors is limitted to the fourth order. "
                                  "Limit exceeded.")
                 else if (foundsemi || // Cannot mix with the non-nested format.
                          (sub_tree.root->children.size() > 1 &&
-                         // The sub-tensor cannot be a column vector [a;b],
+                          // The sub-tensor cannot be a column vector [a;b],
                           sub_tree.root->nbc1 == 1))
-                 // the nested format only accepts row vectors [a,b]
-                 // and converts them to column vectors internally
+                  // the nested format only accepts row vectors [a,b]
+                  // and converts them to column vectors internally
                   ga_throw_error(expr, pos-1, "Bad explicit "
                                  "vector/matrix/tensor format.")  // (see 
below)
 
-               // convert a row vector [a,b] to a column vector [a;b]
+                // convert a row vector [a,b] to a column vector [a;b]
                 if (sub_tree.root->children.size() == sub_tree.root->nbc1)
                   sub_tree.root->nbc1 = 1;
 
@@ -2083,11 +2083,11 @@ namespace getfem {
 
       std::map<const mesh_fem *, base_tensor>
         xfem_plus_base,  xfem_plus_grad,  xfem_plus_hess,
-       xfem_minus_base, xfem_minus_grad, xfem_minus_hess;
+        xfem_minus_base, xfem_minus_grad, xfem_minus_hess;
       std::map<const mesh_fem *, std::list<ga_if_hierarchy>>
-       xfem_plus_base_hierarchy,  xfem_plus_grad_hierarchy,
-       xfem_plus_hess_hierarchy,  xfem_minus_base_hierarchy,
-       xfem_minus_grad_hierarchy, xfem_minus_hess_hierarchy;
+        xfem_plus_base_hierarchy,  xfem_plus_grad_hierarchy,
+        xfem_plus_hess_hierarchy,  xfem_minus_base_hierarchy,
+        xfem_minus_grad_hierarchy, xfem_minus_hess_hierarchy;
 
       std::map<std::string, std::set<std::string>> transformations;
       std::set<std::string> transformations_der;
@@ -2172,15 +2172,15 @@ namespace getfem {
 
     friend void ga_define_function(const std::string &name, size_type nbargs,
                                    const std::string &expr,
-                                  const std::string &der1,
+                                   const std::string &der1,
                                    const std::string &der2);
     friend void ga_define_function(const std::string &name,
-                                  pscalar_func_onearg f,
+                                   pscalar_func_onearg f,
                                    const std::string &der);
     friend void ga_define_function(const std::string &name,
-                                  pscalar_func_twoargs f,
+                                   pscalar_func_twoargs f,
                                    const std::string &der1,
-                                  const std::string &der2);
+                                   const std::string &der2);
   public:
     scalar_type operator()(scalar_type t_, scalar_type u_ = 0.) const {
       switch(ftype_) {
@@ -2204,7 +2204,7 @@ namespace getfem {
       if (ftype_ == 1) {
         for (size_type i = 0; i < workspace.thrd_cast().nb_trees(); ++i) {
           const ga_workspace::tree_description &
-           td = workspace.thrd_cast().tree_info(i);
+            td = workspace.thrd_cast().tree_info(i);
           if (!(ga_is_affine(*(td.ptree), workspace, varname, "")))
             return false;
         }
@@ -2223,7 +2223,7 @@ namespace getfem {
     pscalar_func_twoargs f2() const {return f2_;}
 
     ga_predef_function() : expr_(""), derivative1_(""),
-                          derivative2_(""), gis(nullptr) {}
+                           derivative2_(""), gis(nullptr) {}
     ga_predef_function(pscalar_func_onearg f, size_type dtype__ = 0,
                        const std::string &der = "")
       : ftype_(0), dtype_(dtype__), nbargs_(1), f1_(f), expr_(""),
@@ -2423,22 +2423,22 @@ namespace getfem {
                     base_tensor &result) const {
       size_type N = args[0]->sizes()[0];
       if (N) {
-       __mat_aux1().base_resize(N, N);
-       gmm::copy(args[0]->as_vector(), __mat_aux1().as_vector());
-       scalar_type det = bgeot::lu_inverse(__mat_aux1());
-       if (det == scalar_type(0))
-         gmm::clear(result.as_vector());
-       else {
-         auto it = result.begin();
-         auto ita = __mat_aux1().begin();
-         for (size_type j = 0; j < N; ++j, ++ita) {
-           auto itaa = ita;
-           *it = (*itaa) * det; ++it;
-           for (size_type i = 1; i < N; ++i, ++it)
-             { itaa += N; *it = (*itaa) * det; }
-         }
-         GA_DEBUG_ASSERT(it == result.end(), "Internal error");
-       }
+        __mat_aux1().base_resize(N, N);
+        gmm::copy(args[0]->as_vector(), __mat_aux1().as_vector());
+        scalar_type det = bgeot::lu_inverse(__mat_aux1());
+        if (det == scalar_type(0))
+          gmm::clear(result.as_vector());
+        else {
+          auto it = result.begin();
+          auto ita = __mat_aux1().begin();
+          for (size_type j = 0; j < N; ++j, ++ita) {
+            auto itaa = ita;
+            *it = (*itaa) * det; ++it;
+            for (size_type i = 1; i < N; ++i, ++it)
+              { itaa += N; *it = (*itaa) * det; }
+          }
+          GA_DEBUG_ASSERT(it == result.end(), "Internal error");
+        }
       }
     }
 
@@ -2453,18 +2453,18 @@ namespace getfem {
         gmm::clear(result.as_vector());
       else {
         auto it = result.begin();
-       auto ita = __mat_aux1().begin(), ita_l = ita;
+        auto ita = __mat_aux1().begin(), ita_l = ita;
         for (size_type l = 0; l < N; ++l, ++ita_l) {
-         auto ita_lk = ita_l, ita_jk = ita;
-         for (size_type k = 0; k < N; ++k, ita_lk += N, ita_jk += N) {
-           auto ita_j = ita;
+          auto ita_lk = ita_l, ita_jk = ita;
+          for (size_type k = 0; k < N; ++k, ita_lk += N, ita_jk += N) {
+            auto ita_j = ita;
             for (size_type j = 0; j < N; ++j, ++ita_j, ++ita_jk) {
-             auto ita_ji = ita_j, ita_li = ita_l;
+              auto ita_ji = ita_j, ita_li = ita_l;
               for (size_type i = 0; i < N; ++i, ++it, ita_ji += N, ita_li += N)
                 *it = ((*ita_ji) * (*ita_lk) - (*ita_jk) * (*ita_li)) * det;
-           }
-         }
-       }
+            }
+          }
+        }
         GA_DEBUG_ASSERT(it == result.end(), "Internal error");
       }
     }
@@ -2498,15 +2498,15 @@ namespace getfem {
       auto it = result.begin();
       auto ita = __mat_aux1().begin(), ita_l = ita;
       for (size_type l = 0; l < N; ++l, ++ita_l) {
-       auto ita_k = ita;
+        auto ita_k = ita;
         for (size_type k = 0; k < N; ++k, ita_k += N) {
-         auto ita_lj = ita_l;
+          auto ita_lj = ita_l;
           for (size_type j = 0; j < N; ++j, ++ita_lj) {
-           auto ita_ik = ita_k;
+            auto ita_ik = ita_k;
             for (size_type i = 0; i < N; ++i, ++it, ++ita_ik)
               *it = -(*ita_ik) * (*ita_lj);
-         }
-       }
+          }
+        }
       }
       GA_DEBUG_ASSERT(it == result.end(), "Internal error");
     }
@@ -2528,7 +2528,7 @@ namespace getfem {
               for (size_type j = 0; j < N; ++j)
                 for (size_type i = 0; i < N; ++i, ++it)
                   *it = __mat_aux1()(i,k)*__mat_aux1()(l,m)*__mat_aux1()(n,j)
-                   + __mat_aux1()(i,m)*__mat_aux1()(m,k)*__mat_aux1()(l,j);
+                    + __mat_aux1()(i,m)*__mat_aux1()(m,k)*__mat_aux1()(l,j);
       GA_DEBUG_ASSERT(it == result.end(), "Internal error");
     }
   };
@@ -2600,7 +2600,7 @@ namespace getfem {
     PREDEF_FUNCTIONS["sinc"] = ga_predef_function(ga_sinc, 1,
                                                   "DER_PDFUNC_SINC");
     PREDEF_FUNCTIONS["DER_PDFUNC_SINC"] =ga_predef_function(ga_der_sinc, 1,
-                                                           "DER2_PDFUNC_SINC");
+                                                            
"DER2_PDFUNC_SINC");
     PREDEF_FUNCTIONS["DER2_PDFUNC_SINC"] = ga_predef_function(ga_der2_sinc);
 
 
@@ -2618,11 +2618,11 @@ namespace getfem {
       ga_predef_function(ga_der_atan, 2, "-2*t/sqr(1+t*t)");
     PREDEF_FUNCTIONS["DER_PDFUNC1_ATAN2"] =
       ga_predef_function(ga_der_t_atan2, 2, "-2*t*u/sqr(sqr(u)+sqr(t))",
-                        "(sqrt(t)-sqr(u))/sqr(sqr(u)+sqr(t))");
+                         "(sqrt(t)-sqr(u))/sqr(sqr(u)+sqr(t))");
     PREDEF_FUNCTIONS["DER_PDFUNC2_ATAN2"] =
       ga_predef_function(ga_der_u_atan2, 2,
-                        "(sqrt(t)-sqr(u))/sqr(sqr(u)+sqr(t))",
-                        "2*t*u/sqr(sqr(u)+sqr(t))");
+                         "(sqrt(t)-sqr(u))/sqr(sqr(u)+sqr(t))",
+                         "2*t*u/sqr(sqr(u)+sqr(t))");
 
 
     // Error functions
@@ -2716,10 +2716,10 @@ namespace getfem {
     for (size_type thread = 0; thread < num_threads(); ++thread)
     {
       F.workspace(thread).add_fixed_size_variable("t", gmm::sub_interval(0,1),
-                                                 F.t(thread));
+                                                  F.t(thread));
       if (nbargs == 2)
         F.workspace(thread).add_fixed_size_variable("u", 
gmm::sub_interval(0,1),
-                                                   F.u(thread));
+                                                    F.u(thread));
       F.workspace(thread).add_function_expression(expr);
       ga_compile_function(F.workspace(thread), (*F.gis)(thread), true);
     }
@@ -2817,15 +2817,15 @@ namespace getfem {
       GA_DEBUG_INFO("Instruction: Slice local dofs");
       GMM_ASSERT1(qmult1 != 0 && qmult2 != 0, "Internal error");
       slice_vector_on_basic_dof_of_element(mf, U, ctx.convex_num(),
-                                          coeff, qmult1, qmult2);
+                                           coeff, qmult1, qmult2);
       return 0;
     }
     ga_instruction_slice_local_dofs(const mesh_fem &mf_, const base_vector &U_,
                                     const fem_interpolation_context &ctx_,
                                     base_vector &coeff_,
-                                   size_type qmult1_, size_type qmult2_)
+                                    size_type qmult1_, size_type qmult2_)
       : mf(mf_), U(U_), ctx(ctx_), coeff(coeff_),
-       qmult1(qmult1_), qmult2(qmult2_) {}
+        qmult1(qmult1_), qmult2(qmult2_) {}
   };
 
   struct ga_instruction_update_pfp : public ga_instruction {
@@ -2840,7 +2840,7 @@ namespace getfem {
         size_type cv = ctx.is_convex_num_valid()
                      ? ctx.convex_num() : mf.convex_index().first_true();
         pfem pf = mf.fem_of_element(cv);
-       if (!pfp || pf != pfp->get_pfem() ||
+        if (!pfp || pf != pfp->get_pfem() ||
             ctx.pgp()->get_ppoint_tab() != pfp->get_ppoint_tab()) {
           pfp = fp_pool(pf, ctx.pgp()->get_ppoint_tab());
         }
@@ -2861,19 +2861,19 @@ namespace getfem {
     const fem_interpolation_context &ctx;
     size_type qdim;
     const mesh_fem *mfn, **mfg;
-    
+
     virtual int exec() {
       GA_DEBUG_INFO("Instruction: adapt first index of tensor");
       const mesh_fem &mf = *(mfg ? *mfg : mfn);
       GA_DEBUG_ASSERT(mfg ? *mfg : mfn, "Internal error");
       size_type cv_1 = ctx.is_convex_num_valid()
-       ? ctx.convex_num() : mf.convex_index().first_true();
+        ? ctx.convex_num() : mf.convex_index().first_true();
       pfem pf = mf.fem_of_element(cv_1);
       GMM_ASSERT1(pf, "An element without finite element method defined");
       size_type Qmult = qdim / pf->target_dim();
       size_type s = pf->nb_dof(cv_1) * Qmult;
       if (t.sizes()[0] != s)
-       { bgeot::multi_index mi = t.sizes(); mi[0] = s; t.adjust_sizes(mi); }
+        { bgeot::multi_index mi = t.sizes(); mi[0] = s; t.adjust_sizes(mi); }
       return 0;
     }
 
@@ -2891,18 +2891,18 @@ namespace getfem {
       GA_DEBUG_INFO("Instruction: adapt second index of tensor");
       const mesh_fem &mf = *(mfg ? *mfg : mfn);
       size_type cv_1 = ctx.is_convex_num_valid()
-       ? ctx.convex_num() : mf.convex_index().first_true();
+        ? ctx.convex_num() : mf.convex_index().first_true();
       pfem pf = mf.fem_of_element(cv_1);
       GMM_ASSERT1(pf, "An element without finite element methode defined");
       size_type Qmult = qdim / pf->target_dim();
       size_type s = pf->nb_dof(cv_1) * Qmult;
       if (t.sizes()[1] != s)
-       { bgeot::multi_index mi = t.sizes(); mi[1] = s; t.adjust_sizes(mi); }
+        { bgeot::multi_index mi = t.sizes(); mi[1] = s; t.adjust_sizes(mi); }
       return 0;
     }
 
     ga_instruction_second_ind_tensor(base_tensor &t_,
-                                    fem_interpolation_context &ctx_,
+                                     fem_interpolation_context &ctx_,
                                      size_type qdim_, const mesh_fem *mfn_,
                                      const mesh_fem **mfg_)
       : ga_instruction_first_ind_tensor(t_, ctx_, qdim_, mfn_, mfg_) {}
@@ -2916,15 +2916,15 @@ namespace getfem {
     const mesh_fem *mfn1, **mfg1;
     size_type qdim2;
     const mesh_fem *mfn2, **mfg2;
-    
+
     virtual int exec() {
       GA_DEBUG_INFO("Instruction: adapt two first indices of tensor");
       const mesh_fem &mf1 = *(mfg1 ? *mfg1 : mfn1);
       const mesh_fem &mf2 = *(mfg2 ? *mfg2 : mfn2);
       size_type cv_1 = ctx1.is_convex_num_valid()
-       ? ctx1.convex_num() : mf1.convex_index().first_true();
+        ? ctx1.convex_num() : mf1.convex_index().first_true();
       size_type cv_2 = ctx2.is_convex_num_valid()
-       ? ctx2.convex_num() : mf2.convex_index().first_true();
+        ? ctx2.convex_num() : mf2.convex_index().first_true();
       pfem pf1 = mf1.fem_of_element(cv_1);
       GMM_ASSERT1(pf1, "An element without finite element method defined");
       pfem pf2 = mf2.fem_of_element(cv_2);
@@ -2934,9 +2934,9 @@ namespace getfem {
       size_type Qmult2 = qdim2 / pf2->target_dim();
       size_type s2 = pf2->nb_dof(cv_2) * Qmult2;
       if (t.sizes()[0] != s1 || t.sizes()[1] != s2) {
-       bgeot::multi_index mi = t.sizes();
-       mi[0] = s1; mi[1] = s2;
-       t.adjust_sizes(mi);
+        bgeot::multi_index mi = t.sizes();
+        mi[0] = s1; mi[1] = s2;
+        t.adjust_sizes(mi);
       }
       return 0;
     }
@@ -3267,41 +3267,41 @@ namespace getfem {
       size_type ndof = Z.sizes()[0];
       if (!ndof) { gmm::clear(t.as_vector()); return 0; }
       GA_DEBUG_ASSERT(t.size() == qdim, "dimensions mismatch");
-      
+
       if (qdim == 1) {
-       GA_DEBUG_ASSERT(gmm::vect_size(coeff) == ndof,
-                       "Wrong size for coeff vector");
-       auto itc = coeff.begin(); auto itZ = Z.begin();
-       a = (*itc++) * (*itZ++);
-       while (itc != coeff.end()) a += (*itc++) * (*itZ++);
+        GA_DEBUG_ASSERT(gmm::vect_size(coeff) == ndof,
+                        "Wrong size for coeff vector");
+        auto itc = coeff.begin(); auto itZ = Z.begin();
+        a = (*itc++) * (*itZ++);
+        while (itc != coeff.end()) a += (*itc++) * (*itZ++);
       } else {
-       size_type target_dim = Z.sizes()[1];
-       if (target_dim == 1) {
-         GA_DEBUG_ASSERT(gmm::vect_size(coeff) == ndof*qdim,
-                         "Wrong size for coeff vector");
-         auto itc = coeff.begin(); auto itZ = Z.begin();
-         for (auto it = t.begin(); it != t.end(); ++it)
-           *it = (*itc++) * (*itZ);
-         ++itZ;
-         for (size_type j = 1; j < ndof; ++j, ++itZ) {
-           for (auto it = t.begin(); it != t.end(); ++it)
-             *it += (*itc++) * (*itZ);
-         }
-       } else {
-         size_type Qmult = qdim / target_dim;
-         GA_DEBUG_ASSERT(gmm::vect_size(coeff) == ndof*Qmult,
-                         "Wrong size for coeff vector");
-         
-         gmm::clear(t.as_vector());
-         auto itc = coeff.begin();
-         for (size_type j = 0; j < ndof; ++j) {
-           auto it = t.begin();
-           for (size_type q = 0; q < Qmult; ++q, ++itc) {
-             for (size_type r = 0; r < target_dim; ++r)
-               *it++ += (*itc) * Z[j + r*ndof];
-           }
-         }
-       }
+        size_type target_dim = Z.sizes()[1];
+        if (target_dim == 1) {
+          GA_DEBUG_ASSERT(gmm::vect_size(coeff) == ndof*qdim,
+                          "Wrong size for coeff vector");
+          auto itc = coeff.begin(); auto itZ = Z.begin();
+          for (auto it = t.begin(); it != t.end(); ++it)
+            *it = (*itc++) * (*itZ);
+          ++itZ;
+          for (size_type j = 1; j < ndof; ++j, ++itZ) {
+            for (auto it = t.begin(); it != t.end(); ++it)
+              *it += (*itc++) * (*itZ);
+          }
+        } else {
+          size_type Qmult = qdim / target_dim;
+          GA_DEBUG_ASSERT(gmm::vect_size(coeff) == ndof*Qmult,
+                          "Wrong size for coeff vector");
+
+          gmm::clear(t.as_vector());
+          auto itc = coeff.begin();
+          for (size_type j = 0; j < ndof; ++j) {
+            auto it = t.begin();
+            for (size_type q = 0; q < Qmult; ++q, ++itc) {
+              for (size_type r = 0; r < target_dim; ++r)
+                *it++ += (*itc) * Z[j + r*ndof];
+            }
+          }
+        }
       }
       return 0;
     }
@@ -3319,44 +3319,44 @@ namespace getfem {
       if (!ndof) { gmm::clear(t.as_vector()); return 0; }
       size_type N = Z.sizes()[2];
       if (qdim == 1) {
-       GA_DEBUG_ASSERT(t.size() == N, "dimensions mismatch");
-       GA_DEBUG_ASSERT(coeff.size() == ndof, "Wrong size for coeff vector");
-       auto itZ = Z.begin();
-       for (auto it = t.begin(); it != t.end(); ++it) {
-         auto itc = coeff.begin();
-         *it =  (*itc++) * (*itZ++);
-         while (itc != coeff.end()) *it += (*itc++) * (*itZ++);
-       }
+        GA_DEBUG_ASSERT(t.size() == N, "dimensions mismatch");
+        GA_DEBUG_ASSERT(coeff.size() == ndof, "Wrong size for coeff vector");
+        auto itZ = Z.begin();
+        for (auto it = t.begin(); it != t.end(); ++it) {
+          auto itc = coeff.begin();
+          *it =  (*itc++) * (*itZ++);
+          while (itc != coeff.end()) *it += (*itc++) * (*itZ++);
+        }
       } else {
-       size_type target_dim = Z.sizes()[1];
-       if (target_dim == 1) {
-         GA_DEBUG_ASSERT(t.size() == N*qdim, "dimensions mismatch");
-         GA_DEBUG_ASSERT(coeff.size() == ndof*qdim,
-                         "Wrong size for coeff vector");
-         for (size_type q = 0; q < qdim; ++q) {
-           auto itZ = Z.begin(); auto it = t.begin() + q;
-           for (size_type k = 0; k < N; ++k) {
-             if (k)  it += qdim;
-             auto itc = coeff.begin() + q;
-             *it = (*itc) * (*itZ++);
-             for (size_type j = 1; j < ndof; ++j)
-               { itc += qdim; *it += (*itc) * (*itZ++); }
-           }
-         }
-       } else {
-         size_type Qmult = qdim / target_dim;
-         GA_DEBUG_ASSERT(t.size() == N*qdim, "dimensions mismatch");
-         GA_DEBUG_ASSERT(coeff.size() == ndof*Qmult,
-                         "Wrong size for coeff vector");
-         gmm::clear(t.as_vector());
-         for (size_type q = 0; q < Qmult; ++q) {
-           auto itZ = Z.begin();
-           for (size_type k = 0; k < N; ++k)
-             for (size_type r = 0; r < target_dim; ++r)
-               for (size_type j = 0; j < ndof; ++j)
-                 t[r + q*target_dim + k*qdim] += coeff[j*Qmult+q] * (*itZ++);
-         }
-       }
+        size_type target_dim = Z.sizes()[1];
+        if (target_dim == 1) {
+          GA_DEBUG_ASSERT(t.size() == N*qdim, "dimensions mismatch");
+          GA_DEBUG_ASSERT(coeff.size() == ndof*qdim,
+                          "Wrong size for coeff vector");
+          for (size_type q = 0; q < qdim; ++q) {
+            auto itZ = Z.begin(); auto it = t.begin() + q;
+            for (size_type k = 0; k < N; ++k) {
+              if (k)  it += qdim;
+              auto itc = coeff.begin() + q;
+              *it = (*itc) * (*itZ++);
+              for (size_type j = 1; j < ndof; ++j)
+                { itc += qdim; *it += (*itc) * (*itZ++); }
+            }
+          }
+        } else {
+          size_type Qmult = qdim / target_dim;
+          GA_DEBUG_ASSERT(t.size() == N*qdim, "dimensions mismatch");
+          GA_DEBUG_ASSERT(coeff.size() == ndof*Qmult,
+                          "Wrong size for coeff vector");
+          gmm::clear(t.as_vector());
+          for (size_type q = 0; q < Qmult; ++q) {
+            auto itZ = Z.begin();
+            for (size_type k = 0; k < N; ++k)
+              for (size_type r = 0; r < target_dim; ++r)
+                for (size_type j = 0; j < ndof; ++j)
+                  t[r + q*target_dim + k*qdim] += coeff[j*Qmult+q] * (*itZ++);
+          }
+        }
       }
       return 0;
     }
@@ -3411,7 +3411,7 @@ namespace getfem {
               for (size_type r = 0; r < target_dim; ++r)
                 for (size_type j = 0; j < ndof; ++j, ++it)
                   t[r + q*target_dim + kl*qdim] += coeff[j*Qmult+q] * (*it);
-          } 
+          }
         }
       }
       return 0;
@@ -3472,50 +3472,50 @@ namespace getfem {
     virtual int exec() {
       GA_DEBUG_INFO("Instruction: value of test functions");
       if (qdim == 1) {
-       std::copy(Z.begin(), Z.end(), t.begin());
+        std::copy(Z.begin(), Z.end(), t.begin());
       } else {
-       size_type target_dim = Z.sizes()[1];
-       size_type Qmult = qdim / target_dim;
-       if (Qmult == 1) {
-         std::copy(Z.begin(), Z.end(), t.begin());
-       } else {
-         if (target_dim == 1) {
-           size_type ndof = Z.sizes()[0];
-           GA_DEBUG_ASSERT(t.size() == Z.size() * Qmult * Qmult,
-                           "Wrong size for base vector");
-           std::fill(t.begin(), t.end(), scalar_type(0));
-           auto itZ = Z.begin();
-           size_type s = t.sizes()[0], sss = s+1;
-           
-           // Performs t(i*Qmult+j, k*Qmult + j) = Z(i,k);
-           auto it = t.begin();
-           for (size_type i = 0; i < ndof; ++i, ++itZ) {
-             if (i) it += Qmult;
-             auto it2 = it;
-             *it2 = *itZ;
-             for (size_type j = 1; j < Qmult; ++j) { it2 += sss; *it2 = *itZ; }
-           }
-         } else {
-           size_type ndof = Z.sizes()[0];
-           GA_DEBUG_ASSERT(t.size() == Z.size() * Qmult * Qmult,
-                           "Wrong size for base vector");
-           std::fill(t.begin(), t.end(), scalar_type(0));
-           auto itZ = Z.begin();
-           size_type s = t.sizes()[0], ss = s * Qmult, sss = s+1;
-           
-           // Performs t(i*Qmult+j, k*Qmult + j) = Z(i,k);
-           for (size_type k = 0; k < target_dim; ++k) {
-             auto it = t.begin() + (ss * k);
-             for (size_type i = 0; i < ndof; ++i, ++itZ) {
-               if (i) it += Qmult;
-               auto it2 = it;
-               *it2 = *itZ;
-               for (size_type j = 1; j < Qmult; ++j)
-                 { it2 += sss; *it2 = *itZ; }
-             }
-           }
-         }
-       }
+        size_type target_dim = Z.sizes()[1];
+        size_type Qmult = qdim / target_dim;
+        if (Qmult == 1) {
+          std::copy(Z.begin(), Z.end(), t.begin());
+        } else {
+          if (target_dim == 1) {
+            size_type ndof = Z.sizes()[0];
+            GA_DEBUG_ASSERT(t.size() == Z.size() * Qmult * Qmult,
+                            "Wrong size for base vector");
+            std::fill(t.begin(), t.end(), scalar_type(0));
+            auto itZ = Z.begin();
+            size_type s = t.sizes()[0], sss = s+1;
+
+            // Performs t(i*Qmult+j, k*Qmult + j) = Z(i,k);
+            auto it = t.begin();
+            for (size_type i = 0; i < ndof; ++i, ++itZ) {
+              if (i) it += Qmult;
+              auto it2 = it;
+              *it2 = *itZ;
+              for (size_type j = 1; j < Qmult; ++j) { it2 += sss; *it2 = *itZ; 
}
+            }
+          } else {
+            size_type ndof = Z.sizes()[0];
+            GA_DEBUG_ASSERT(t.size() == Z.size() * Qmult * Qmult,
+                            "Wrong size for base vector");
+            std::fill(t.begin(), t.end(), scalar_type(0));
+            auto itZ = Z.begin();
+            size_type s = t.sizes()[0], ss = s * Qmult, sss = s+1;
+
+            // Performs t(i*Qmult+j, k*Qmult + j) = Z(i,k);
+            for (size_type k = 0; k < target_dim; ++k) {
+              auto it = t.begin() + (ss * k);
+              for (size_type i = 0; i < ndof; ++i, ++itZ) {
+                if (i) it += Qmult;
+                auto it2 = it;
+                *it2 = *itZ;
+                for (size_type j = 1; j < Qmult; ++j)
+                  { it2 += sss; *it2 = *itZ; }
+              }
+            }
+          }
+        }
       }
       return 0;
     }
@@ -3529,55 +3529,55 @@ namespace getfem {
     virtual int exec() {
       GA_DEBUG_INFO("Instruction: gradient of test functions");
       if (qdim == 1) {
-       std::copy(Z.begin(), Z.end(), t.begin());
+        std::copy(Z.begin(), Z.end(), t.begin());
       } else {
-       size_type target_dim = Z.sizes()[1];
-       size_type Qmult = qdim / target_dim;
-       if (Qmult == 1) {
-         std::copy(Z.begin(), Z.end(), t.begin());
-       } else {
-         if (target_dim == 1) {
-           size_type ndof = Z.sizes()[0];
-           size_type N = Z.sizes()[2];
-           GA_DEBUG_ASSERT(t.size() == Z.size() * Qmult * Qmult,
-                           "Wrong size for gradient vector");
-           std::fill(t.begin(), t.end(), scalar_type(0));
-           base_tensor::const_iterator itZ = Z.begin();
-           size_type s = t.sizes()[0], sss = s+1, ssss = s*target_dim*Qmult;
-           
-           // Performs t(i*Qmult+j, k*Qmult + j, l) = Z(i,k,l);
-           for (size_type l = 0; l < N; ++l) {
-             base_tensor::iterator it = t.begin() + (ssss*l);
-             for (size_type i = 0; i < ndof; ++i, ++itZ) {
-               if (i) it += Qmult;
-               base_tensor::iterator it2 = it;
-               *it2 = *itZ;
-               for (size_type j = 1; j < Qmult; ++j) { it2+=sss; *it2=*itZ; }
-             }
-           }
-         } else {
-           size_type ndof = Z.sizes()[0];
-           size_type N = Z.sizes()[2];
-           GA_DEBUG_ASSERT(t.size() == Z.size() * Qmult * Qmult,
-                           "Wrong size for gradient vector");
-           std::fill(t.begin(), t.end(), scalar_type(0));
-           base_tensor::const_iterator itZ = Z.begin();
-           size_type s = t.sizes()[0], ss = s * Qmult, sss = s+1;
-           size_type ssss = ss*target_dim;
-           
-           // Performs t(i*Qmult+j, k*Qmult + j, l) = Z(i,k,l);
-           for (size_type l = 0; l < N; ++l)
-             for (size_type k = 0; k < target_dim; ++k) {
-               base_tensor::iterator it = t.begin() + (ss * k + ssss*l);
-               for (size_type i = 0; i < ndof; ++i, ++itZ) {
-                 if (i) it += Qmult;
-                 base_tensor::iterator it2 = it;
-                 *it2 = *itZ;
-                 for (size_type j = 1; j < Qmult; ++j) { it2+=sss; *it2=*itZ; }
-               }
-             }
-         }
-       }
+        size_type target_dim = Z.sizes()[1];
+        size_type Qmult = qdim / target_dim;
+        if (Qmult == 1) {
+          std::copy(Z.begin(), Z.end(), t.begin());
+        } else {
+          if (target_dim == 1) {
+            size_type ndof = Z.sizes()[0];
+            size_type N = Z.sizes()[2];
+            GA_DEBUG_ASSERT(t.size() == Z.size() * Qmult * Qmult,
+                            "Wrong size for gradient vector");
+            std::fill(t.begin(), t.end(), scalar_type(0));
+            base_tensor::const_iterator itZ = Z.begin();
+            size_type s = t.sizes()[0], sss = s+1, ssss = s*target_dim*Qmult;
+
+            // Performs t(i*Qmult+j, k*Qmult + j, l) = Z(i,k,l);
+            for (size_type l = 0; l < N; ++l) {
+              base_tensor::iterator it = t.begin() + (ssss*l);
+              for (size_type i = 0; i < ndof; ++i, ++itZ) {
+                if (i) it += Qmult;
+                base_tensor::iterator it2 = it;
+                *it2 = *itZ;
+                for (size_type j = 1; j < Qmult; ++j) { it2+=sss; *it2=*itZ; }
+              }
+            }
+          } else {
+            size_type ndof = Z.sizes()[0];
+            size_type N = Z.sizes()[2];
+            GA_DEBUG_ASSERT(t.size() == Z.size() * Qmult * Qmult,
+                            "Wrong size for gradient vector");
+            std::fill(t.begin(), t.end(), scalar_type(0));
+            base_tensor::const_iterator itZ = Z.begin();
+            size_type s = t.sizes()[0], ss = s * Qmult, sss = s+1;
+            size_type ssss = ss*target_dim;
+
+            // Performs t(i*Qmult+j, k*Qmult + j, l) = Z(i,k,l);
+            for (size_type l = 0; l < N; ++l)
+              for (size_type k = 0; k < target_dim; ++k) {
+                base_tensor::iterator it = t.begin() + (ss * k + ssss*l);
+                for (size_type i = 0; i < ndof; ++i, ++itZ) {
+                  if (i) it += Qmult;
+                  base_tensor::iterator it2 = it;
+                  *it2 = *itZ;
+                  for (size_type j = 1; j < Qmult; ++j) { it2+=sss; *it2=*itZ; 
}
+                }
+              }
+          }
+        }
       }
       return 0;
     }
@@ -3594,27 +3594,27 @@ namespace getfem {
     // Z(ndof) --> t(qdim*ndof,qdim*target_dim)
     virtual int exec() {
       GA_DEBUG_INFO("Instruction: vectorized value of test functions");
-      
+
       size_type ndof = Z.sizes()[0];
       GA_DEBUG_ASSERT(t.size() == Z.size() * qdim * qdim,
-                     "Wrong size for base vector");
+                      "Wrong size for base vector");
       // std::fill(t.begin(), t.end(), scalar_type(0)); // Factorized
       auto itZ = Z.begin();
       size_type s = t.sizes()[0], sss = s+1;
-      
+
       // Performs t(i*qdim+j, k*qdim + j) = Z(i,k);
       auto it = t.begin();
       for (size_type i = 0; i < ndof; ++i, ++itZ) {
-       if (i) it += qdim;
-       auto it2 = it;
-       *it2 = *itZ;
-       for (size_type j = 1; j < qdim; ++j) { it2 += sss; *it2 = *itZ; }
+        if (i) it += qdim;
+        auto it2 = it;
+        *it2 = *itZ;
+        for (size_type j = 1; j < qdim; ++j) { it2 += sss; *it2 = *itZ; }
       }
       return 0;
     }
 
     ga_instruction_copy_vect_val_base(base_tensor &tt, const base_tensor &Z_,
-                                     size_type q) : t(tt), Z(Z_), qdim(q) {}
+                                      size_type q) : t(tt), Z(Z_), qdim(q) {}
   };
 
   struct ga_instruction_copy_vect_grad_base
@@ -3625,26 +3625,26 @@ namespace getfem {
       size_type ndof = Z.sizes()[0];
       size_type N = Z.sizes()[2];
       GA_DEBUG_ASSERT(t.size() == Z.size() * qdim * qdim,
-                     "Wrong size for gradient vector");
+                      "Wrong size for gradient vector");
       // std::fill(t.begin(), t.end(), scalar_type(0)); // Factorized
       base_tensor::const_iterator itZ = Z.begin();
       size_type s = t.sizes()[0], sss = s+1, ssss = s*qdim;
-      
+
       // Performs t(i*qdim+j, k*qdim + j, l) = Z(i,k,l);
       for (size_type l = 0; l < N; ++l) {
-       base_tensor::iterator it = t.begin() + (ssss*l);
-       for (size_type i = 0; i < ndof; ++i, ++itZ) {
-         if (i) it += qdim;
-         base_tensor::iterator it2 = it;
-         *it2 = *itZ;
-         for (size_type j = 1; j < qdim; ++j) { it2+=sss; *it2=*itZ; }
-       }
+        base_tensor::iterator it = t.begin() + (ssss*l);
+        for (size_type i = 0; i < ndof; ++i, ++itZ) {
+          if (i) it += qdim;
+          base_tensor::iterator it2 = it;
+          *it2 = *itZ;
+          for (size_type j = 1; j < qdim; ++j) { it2+=sss; *it2=*itZ; }
+        }
       }
       return 0;
     }
 
     ga_instruction_copy_vect_grad_base(base_tensor &tt, const base_tensor &Z_,
-                                      size_type q)
+                                       size_type q)
       : ga_instruction_copy_vect_val_base(tt,Z_,q) {}
   };
 
@@ -3669,7 +3669,7 @@ namespace getfem {
         for (size_type klm = 0; klm < NNdim; ++klm) {
           base_tensor::iterator it = t.begin() + (ss * klm);
           for (size_type i = 0; i < ndof; ++i, ++itZ) {
-           if (i) it += Qmult;
+            if (i) it += Qmult;
             base_tensor::iterator it2 = it;
             *it2 = *itZ;
             for (size_type j = 1; j < Qmult; ++j) { it2 += sss; *it2 = *itZ; }
@@ -4447,7 +4447,7 @@ namespace getfem {
     }
 
     ga_instruction_deviator(base_tensor &t_, const base_tensor &tc1_,
-                           size_type n_)
+                            size_type n_)
       : t(t_), tc1(tc1_), n(n_) {}
   };
 
@@ -4761,11 +4761,11 @@ namespace getfem {
       }
       // auto it = t.begin(); // Unoptimized version.
       // for (size_type i = 0; i < s1; ++i)
-      //       for (size_type j = 0; j < s2; ++j, ++it) {
-      //         *it = scalar_type(0);
-      //         for (size_type k = 0; k < nn; ++k)
-      //           *it += tc1[i+k*s1] * tc2[j+k*s2];
-      //       }
+      //   for (size_type j = 0; j < s2; ++j, ++it) {
+      //     *it = scalar_type(0);
+      //     for (size_type k = 0; k < nn; ++k)
+      //       *it += tc1[i+k*s1] * tc2[j+k*s2];
+      //   }
 #endif
       return 0;
     }
@@ -4780,26 +4780,26 @@ namespace getfem {
     size_type n, q;
     virtual int exec() {
       GA_DEBUG_INFO("Instruction: reduction operation of size " << n*q <<
-                   " optimized for vectorized second tensor of type 2");
+                    " optimized for vectorized second tensor of type 2");
       size_type nn = n*q, s1 = tc1.size()/nn, s2 = tc2.size()/nn, s2_q = s2/q;
       size_type s1_qq = s1*q, s2_qq = s2*q;
       GA_DEBUG_ASSERT(t.size() == s1*s2, "Internal error");
-      
+
       auto it = t.begin(), it1 = tc1.begin();
       for (size_type i = 0; i < s1; ++i, ++it1) {
-       auto it2 = tc2.begin();
-               for (size_type j = 0; j < s2_q; ++j) {
-         if (j) it2+=q;
-         auto itt1 = it1;
-         for (size_type l = 0; l < q; ++l, ++it) {
-           if (l) itt1 += s1;
-           auto ittt1 = itt1, ittt2 = it2;
-           *it = *ittt1 * (*ittt2);
-           for (size_type m = 1; m < n; ++m) {
-             ittt1 += s1_qq, ittt2 += s2_qq; *it += *ittt1 * (*ittt2);
-           }
-         }
-       }
+        auto it2 = tc2.begin();
+        for (size_type j = 0; j < s2_q; ++j) {
+          if (j) it2+=q;
+          auto itt1 = it1;
+          for (size_type l = 0; l < q; ++l, ++it) {
+            if (l) itt1 += s1;
+            auto ittt1 = itt1, ittt2 = it2;
+            *it = *ittt1 * (*ittt2);
+            for (size_type m = 1; m < n; ++m) {
+              ittt1 += s1_qq, ittt2 += s2_qq; *it += *ittt1 * (*ittt2);
+            }
+          }
+        }
       }
       // base_tensor u = t;
       // ga_instruction_reduction toto(t, tc1, tc2, n*q);
@@ -4808,11 +4808,11 @@ namespace getfem {
       return 0;
     }
     ga_instruction_reduction_opt0_2(base_tensor &t_, base_tensor &tc1_,
-                                   base_tensor &tc2_, size_type n_,
-                                   size_type q_)
+                                    base_tensor &tc2_, size_type n_,
+                                    size_type q_)
       : t(t_), tc1(tc1_), tc2(tc2_), n(n_), q(q_) {}
   };
-  
+
   // Performs Ani Bmi -> Cmn
   template <int N>
   struct ga_instruction_reduction_opt0_2_unrolled : public ga_instruction {
@@ -4820,31 +4820,31 @@ namespace getfem {
     size_type q;
     virtual int exec() {
       GA_DEBUG_INFO("Instruction: unrolled reduction operation of size " << N*q
-                   << " optimized for vectorized second tensor of type 2");
+                    << " optimized for vectorized second tensor of type 2");
       size_type nn = N*q, s1 = tc1.size()/nn, s2 = tc2.size()/nn, s2_q = s2/q;
       size_type s1_qq = s1*q, s2_qq = s2*q;
       GA_DEBUG_ASSERT(t.size() == s1*s2, "Internal error");
-      
+
       auto it = t.begin(), it1 = tc1.begin();
       for (size_type i = 0; i < s1; ++i, ++it1) {
-       auto it2 = tc2.begin();
-               for (size_type j = 0; j < s2_q; ++j) {
-         if (j) it2+=q;
-         auto itt1 = it1;
-         for (size_type l = 0; l < q; ++l, ++it) {
-           if (l) itt1 += s1;
-           auto ittt1 = itt1, ittt2 = it2;
-           *it = *ittt1 * (*ittt2);
-           for (size_type m = 1; m < N; ++m) {
-             ittt1 += s1_qq, ittt2 += s2_qq; *it += *ittt1 * (*ittt2);
-           }
-         }
-       }
+        auto it2 = tc2.begin();
+        for (size_type j = 0; j < s2_q; ++j) {
+          if (j) it2+=q;
+          auto itt1 = it1;
+          for (size_type l = 0; l < q; ++l, ++it) {
+            if (l) itt1 += s1;
+            auto ittt1 = itt1, ittt2 = it2;
+            *it = *ittt1 * (*ittt2);
+            for (size_type m = 1; m < N; ++m) {
+              ittt1 += s1_qq, ittt2 += s2_qq; *it += *ittt1 * (*ittt2);
+            }
+          }
+        }
       }
       return 0;
     }
     ga_instruction_reduction_opt0_2_unrolled(base_tensor &t_, base_tensor 
&tc1_,
-                                            base_tensor &tc2_, size_type q_)
+                                             base_tensor &tc2_, size_type q_)
       : t(t_), tc1(tc1_), tc2(tc2_), q(q_) {}
   };
 
@@ -4854,26 +4854,26 @@ namespace getfem {
     base_tensor &t, &tc1, &tc2;
     virtual int exec() {
       GA_DEBUG_INFO("Instruction: unrolled reduction operation of size " << N*Q
-                   << " optimized for vectorized second tensor of type 2");
+                    << " optimized for vectorized second tensor of type 2");
       size_type s1 = tc1.size()/(N*Q), s2 = tc2.size()/(N*Q), s2_q = s2/Q;
       size_type s1_qq = s1*Q, s2_qq = s2*Q;
       GA_DEBUG_ASSERT(t.size() == s1*s2, "Internal error");
-      
+
       auto it = t.begin(), it1 = tc1.begin();
       for (size_type i = 0; i < s1; ++i, ++it1) {
-       auto it2 = tc2.begin();
-               for (size_type j = 0; j < s2_q; ++j) {
-         if (j) it2+=Q;
-         auto itt1 = it1;
-         for (size_type l = 0; l < Q; ++l, ++it) {
-           if (l) itt1 += s1;
-           auto ittt1 = itt1, ittt2 = it2;
-           *it = *ittt1 * (*ittt2);
-           for (size_type m = 1; m < N; ++m) {
-             ittt1 += s1_qq, ittt2 += s2_qq; *it += *ittt1 * (*ittt2);
-           }
-         }
-       }
+        auto it2 = tc2.begin();
+        for (size_type j = 0; j < s2_q; ++j) {
+          if (j) it2+=Q;
+          auto itt1 = it1;
+          for (size_type l = 0; l < Q; ++l, ++it) {
+            if (l) itt1 += s1;
+            auto ittt1 = itt1, ittt2 = it2;
+            *it = *ittt1 * (*ittt2);
+            for (size_type m = 1; m < N; ++m) {
+              ittt1 += s1_qq, ittt2 += s2_qq; *it += *ittt1 * (*ittt2);
+            }
+          }
+        }
       }
       return 0;
     }
@@ -4888,30 +4888,30 @@ namespace getfem {
     size_type n, q;
     virtual int exec() {
       GA_DEBUG_INFO("Instruction: reduction operation of size " << n*q <<
-                   " optimized for vectorized second tensor of type 2");
+                    " optimized for vectorized second tensor of type 2");
       size_type nn = n*q, s1 = tc1.size()/nn, s2 = tc2.size()/nn;
       size_type s1_q = s1/q, s1_qq = s1*q, s2_qq = s2*q;
       GA_DEBUG_ASSERT(t.size() == s1*s2, "Internal error");
 
       auto it = t.begin();
       for (size_type i = 0; i < s1_q; ++i)  {
-       auto it1 = tc1.begin() + i*q;
-       for (size_type l = 0; l < q; ++l) {
-         auto it2 = tc2.begin() + l*s2;
-         for (size_type j = 0; j < s2; ++j, ++it, ++it2) {
-           auto itt1 = it1, itt2 = it2;
-           *it = *itt1 * (*itt2);
-           for (size_type m = 1; m < n; ++m) {
-             itt1 += s1_qq, itt2 += s2_qq; *it += *itt1 * (*itt2);
-           }
-         }
-       }
+        auto it1 = tc1.begin() + i*q;
+        for (size_type l = 0; l < q; ++l) {
+          auto it2 = tc2.begin() + l*s2;
+          for (size_type j = 0; j < s2; ++j, ++it, ++it2) {
+            auto itt1 = it1, itt2 = it2;
+            *it = *itt1 * (*itt2);
+            for (size_type m = 1; m < n; ++m) {
+              itt1 += s1_qq, itt2 += s2_qq; *it += *itt1 * (*itt2);
+            }
+          }
+        }
       }
       return 0;
     }
     ga_instruction_reduction_opt2_0(base_tensor &t_, base_tensor &tc1_,
-                                   base_tensor &tc2_, size_type n_,
-                                   size_type q_)
+                                    base_tensor &tc2_, size_type n_,
+                                    size_type q_)
       : t(t_), tc1(tc1_), tc2(tc2_), n(n_), q(q_) { }
   };
 
@@ -4922,28 +4922,28 @@ namespace getfem {
     size_type q;
     virtual int exec() {
       GA_DEBUG_INFO("Instruction: unrolled reduction operation of size " << N*q
-                   << " optimized for vectorized second tensor of type 2");
+                    << " optimized for vectorized second tensor of type 2");
       size_type nn = N*q, s1 = tc1.size()/nn, s2 = tc2.size()/nn;
       size_type s1_q = s1/q, s1_qq = s1*q, s2_qq = s2*q;
       GA_DEBUG_ASSERT(t.size() == s1*s2, "Internal error");
 
       auto it = t.begin(), it1 = tc1.begin();
       for (size_type i = 0; i < s1_q; ++i, it1 += q)  {
-       for (size_type l = 0; l < q; ++l) {
-         auto it2 = tc2.begin() + l*s2;
-         for (size_type j = 0; j < s2; ++j, ++it, ++it2) {
-           auto itt1 = it1, itt2 = it2;
-           *it = *itt1 * (*itt2);
-           for (size_type m = 1; m < N; ++m) {
-             itt1 += s1_qq, itt2 += s2_qq; *it += *itt1 * (*itt2);
-           }
-         }
-       }
+        for (size_type l = 0; l < q; ++l) {
+          auto it2 = tc2.begin() + l*s2;
+          for (size_type j = 0; j < s2; ++j, ++it, ++it2) {
+            auto itt1 = it1, itt2 = it2;
+            *it = *itt1 * (*itt2);
+            for (size_type m = 1; m < N; ++m) {
+              itt1 += s1_qq, itt2 += s2_qq; *it += *itt1 * (*itt2);
+            }
+          }
+        }
       }
       return 0;
     }
     ga_instruction_reduction_opt2_0_unrolled(base_tensor &t_, base_tensor 
&tc1_,
-                                            base_tensor &tc2_, size_type q_)
+                                             base_tensor &tc2_, size_type q_)
       : t(t_), tc1(tc1_), tc2(tc2_), q(q_) {}
   };
 
@@ -4953,23 +4953,23 @@ namespace getfem {
     base_tensor &t, &tc1, &tc2;
     virtual int exec() {
       GA_DEBUG_INFO("Instruction: unrolled reduction operation of size " << N*Q
-                   << " optimized for vectorized second tensor of type 2");
+                    << " optimized for vectorized second tensor of type 2");
       size_type s1 = tc1.size()/(N*Q), s2 = tc2.size()/(N*Q);
       size_type s1_q = s1/Q, s1_qq = s1*Q, s2_qq = s2*Q;
       GA_DEBUG_ASSERT(t.size() == s1*s2, "Internal error");
 
       auto it = t.begin(), it1 = tc1.begin();
       for (size_type i = 0; i < s1_q; ++i, it1 += Q)  {
-       for (size_type l = 0; l < Q; ++l) {
-         auto it2 = tc2.begin() + l*s2;
-         for (size_type j = 0; j < s2; ++j, ++it, ++it2) {
-           auto itt1 = it1, itt2 = it2;
-           *it = *itt1 * (*itt2);
-           for (size_type m = 1; m < N; ++m) {
-             itt1 += s1_qq, itt2 += s2_qq; *it += *itt1 * (*itt2);
-           }
-         }
-       }
+        for (size_type l = 0; l < Q; ++l) {
+          auto it2 = tc2.begin() + l*s2;
+          for (size_type j = 0; j < s2; ++j, ++it, ++it2) {
+            auto itt1 = it1, itt2 = it2;
+            *it = *itt1 * (*itt2);
+            for (size_type m = 1; m < N; ++m) {
+              itt1 += s1_qq, itt2 += s2_qq; *it += *itt1 * (*itt2);
+            }
+          }
+        }
       }
       return 0;
     }
@@ -4984,24 +4984,24 @@ namespace getfem {
     size_type nn;
     virtual int exec() {
       GA_DEBUG_INFO("Instruction: reduction operation of size " << nn <<
-                   " optimized for vectorized second tensor of type 1");
+                    " optimized for vectorized second tensor of type 1");
       size_type ss1=tc1.size(), s1 = ss1/nn, s2=tc2.size()/nn, s2_n=s2/nn;
-      
+
       auto it = t.begin(), it1 = tc1.begin();
       for (size_type i = 0; i < s1; ++i, ++it1) {
-       auto it2 = tc2.begin();
-       for (size_type j = 0; j < s2_n; ++j) {
-         if (j) it2 += nn;
-         auto itt1 = it1;
-         *it++ = (*itt1) * (*it2);
-         for (size_type k = 1; k < nn; ++k)
-           { itt1 += s1; *it++ = (*itt1) * (*it2); }
-       }
+        auto it2 = tc2.begin();
+        for (size_type j = 0; j < s2_n; ++j) {
+          if (j) it2 += nn;
+          auto itt1 = it1;
+          *it++ = (*itt1) * (*it2);
+          for (size_type k = 1; k < nn; ++k)
+            { itt1 += s1; *it++ = (*itt1) * (*it2); }
+        }
       }
       return 0;
     }
     ga_instruction_reduction_opt0_1(base_tensor &t_, base_tensor &tc1_,
-                                   base_tensor &tc2_, size_type n_)
+                                    base_tensor &tc2_, size_type n_)
       : t(t_), tc1(tc1_), tc2(tc2_), nn(n_) {}
   };
 
@@ -5022,18 +5022,18 @@ namespace getfem {
     base_tensor &t, &tc1, &tc2;
     virtual int exec() {
       GA_DEBUG_INFO("Instruction: unrolled reduction operation of size " << N
-                   << " optimized for vectorized second tensor of type 1");
+                    << " optimized for vectorized second tensor of type 1");
       size_type s1 = tc1.size()/N, s2 = tc2.size()/N;
       auto it = t.begin(), it1 = tc1.begin();
       for (size_type i = 0; i < s1; ++i, ++it1) {
-       auto it2 = tc2.begin(), it2e = it2 + s2;
-       for (; it2 != it2e; it2 += N, it += N)
-         reduc_elem_unrolled_opt1_<N>(it, it1, *it2, s1);
+        auto it2 = tc2.begin(), it2e = it2 + s2;
+        for (; it2 != it2e; it2 += N, it += N)
+          reduc_elem_unrolled_opt1_<N>(it, it1, *it2, s1);
       }
       return 0;
     }
     ga_instruction_reduction_opt0_1_unrolled(base_tensor &t_, base_tensor 
&tc1_,
-                                            base_tensor &tc2_)
+                                             base_tensor &tc2_)
       : t(t_), tc1(tc1_), tc2(tc2_) {}
   };
 
@@ -5043,7 +5043,7 @@ namespace getfem {
     size_type nn;
     virtual int exec() {
       GA_DEBUG_INFO("Instruction: reduction operation of size " << nn <<
-                   " optimized for both vectorized tensor of type 1");
+                    " optimized for both vectorized tensor of type 1");
       size_type s1 = tc1.size()/nn, s2 = tc2.size()/nn, s2_1 = s2+1;
       GA_DEBUG_ASSERT(t.size() == s2*s1, "Internal error");
       size_type ss1 = s1/nn, ss2 = s2/nn;
@@ -5051,24 +5051,24 @@ namespace getfem {
       // std::fill(t.begin(), t.end(), scalar_type(0)); // Factorized
       auto it2 = tc2.begin();
       for (size_type j = 0; j < ss2; ++j) {
-       if (j) it2 += nn;
-       auto it1 = tc1.begin(), it = t.begin() + j*nn;
-       for (size_type i = 0; i < ss1; ++i) {
-         if (i) { it1 += nn, it += s2*nn; }
-                 scalar_type a = (*it1) * (*it2);
-         auto itt = it;
-         *itt = a; itt += s2_1; *itt = a;
-                 for (size_type k = 2; k < nn; ++k) { itt += s2_1; *itt = a; }
-               }
-      }      
+        if (j) it2 += nn;
+        auto it1 = tc1.begin(), it = t.begin() + j*nn;
+        for (size_type i = 0; i < ss1; ++i) {
+          if (i) { it1 += nn, it += s2*nn; }
+          scalar_type a = (*it1) * (*it2);
+          auto itt = it;
+          *itt = a; itt += s2_1; *itt = a;
+          for (size_type k = 2; k < nn; ++k) { itt += s2_1; *itt = a; }
+        }
+      }
       return 0;
     }
     ga_instruction_reduction_opt1_1(base_tensor &t_, base_tensor &tc1_,
-                                   base_tensor &tc2_, size_type n_)
+                                    base_tensor &tc2_, size_type n_)
       : t(t_), tc1(tc1_), tc2(tc2_), nn(n_) {}
   };
 
-  
+
 
   template<int N> inline scalar_type reduc_elem_unrolled__
   (base_tensor::iterator &it1, base_tensor::iterator &it2,
@@ -5189,9 +5189,9 @@ namespace getfem {
   (assembly_tensor &t_, assembly_tensor &tc1_, assembly_tensor &tc2_,
    size_type n, bool &to_clear) {
     base_tensor &t = t_.tensor(), &tc1 = tc1_.tensor(), &tc2 = tc2_.tensor();
-   
+
     if (tc1_.sparsity() == 1 && tc2_.sparsity() == 1 &&
-       tc1_.qdim() == n && tc2_.qdim() == n) {
+        tc1_.qdim() == n && tc2_.qdim() == n) {
       to_clear = true;
       t_.set_sparsity(10, tc1_.qdim());
       return std::make_shared<ga_instruction_reduction_opt1_1>(t, tc1, tc2, n);
@@ -5200,173 +5200,173 @@ namespace getfem {
     if (tc2_.sparsity() == 1) {
       switch(n) {
       case 2:
-       return std::make_shared<ga_instruction_reduction_opt0_1_unrolled<2>>
-         (t, tc1, tc2);
+        return std::make_shared<ga_instruction_reduction_opt0_1_unrolled<2>>
+          (t, tc1, tc2);
       case 3:
-       return std::make_shared<ga_instruction_reduction_opt0_1_unrolled<3>>
-         (t, tc1, tc2);
+        return std::make_shared<ga_instruction_reduction_opt0_1_unrolled<3>>
+          (t, tc1, tc2);
       case 4:
-       return std::make_shared<ga_instruction_reduction_opt0_1_unrolled<4>>
-         (t, tc1, tc2);
+        return std::make_shared<ga_instruction_reduction_opt0_1_unrolled<4>>
+          (t, tc1, tc2);
       case 5:
-       return std::make_shared<ga_instruction_reduction_opt0_1_unrolled<5>>
-         (t, tc1, tc2);
+        return std::make_shared<ga_instruction_reduction_opt0_1_unrolled<5>>
+          (t, tc1, tc2);
       default:
-       return std::make_shared<ga_instruction_reduction_opt0_1>(t,tc1,tc2, n);
+        return std::make_shared<ga_instruction_reduction_opt0_1>(t,tc1,tc2, n);
       }
     }
         if (tc2_.sparsity() == 2) {
       size_type q2 = tc2.sizes()[1];
       size_type n2 = (tc2.sizes().size() > 2) ? tc2.sizes()[1] : 1;
       if (n2*q2 == n) {
-       switch (n2) {
-       case 1:
-         switch (q2) {
-         case 2:
-           return
-             std::make_shared<ga_instruction_reduction_opt0_2_dunrolled<1,2>>
-             (t, tc1, tc2);
-         case 3:
-           return
-             std::make_shared<ga_instruction_reduction_opt0_2_dunrolled<1,3>>
-             (t, tc1, tc2);
-         case 4:
-           return
-             std::make_shared<ga_instruction_reduction_opt0_2_dunrolled<1,4>>
-             (t, tc1, tc2);
-         default :
-           return std::make_shared<ga_instruction_reduction_opt0_2_unrolled<1>>
-             (t, tc1, tc2, q2);
-         }
-       case 2:
-         switch (q2) {
-         case 2:
-           return
-             std::make_shared<ga_instruction_reduction_opt0_2_dunrolled<2,2>>
-             (t, tc1, tc2);
-         case 3:
-           return
-             std::make_shared<ga_instruction_reduction_opt0_2_dunrolled<2,3>>
-             (t, tc1, tc2);
-         case 4:
-           return
-             std::make_shared<ga_instruction_reduction_opt0_2_dunrolled<2,4>>
-             (t, tc1, tc2);
-         default :
-           return std::make_shared<ga_instruction_reduction_opt0_2_unrolled<2>>
-             (t, tc1, tc2, q2);
-         }
-       case 3:
-         switch (q2) {
-         case 2:
-           return
-             std::make_shared<ga_instruction_reduction_opt0_2_dunrolled<3,2>>
-             (t, tc1, tc2);
-         case 3:
-           return
-             std::make_shared<ga_instruction_reduction_opt0_2_dunrolled<3,3>>
-             (t, tc1, tc2);
-         case 4:
-           return
-             std::make_shared<ga_instruction_reduction_opt0_2_dunrolled<3,4>>
-             (t, tc1, tc2);
-         default :
-           return std::make_shared<ga_instruction_reduction_opt0_2_unrolled<3>>
-             (t, tc1, tc2, q2);
-         }
-       case 4:
-         return std::make_shared<ga_instruction_reduction_opt0_2_unrolled<4>>
-           (t, tc1, tc2, q2);
-       case 5:
-         return std::make_shared<ga_instruction_reduction_opt0_2_unrolled<5>>
-           (t, tc1, tc2, q2);
-       default:
-         return std::make_shared<ga_instruction_reduction_opt0_2>
-           (t,tc1,tc2,n2,q2);
-       }
+        switch (n2) {
+        case 1:
+          switch (q2) {
+          case 2:
+            return
+              std::make_shared<ga_instruction_reduction_opt0_2_dunrolled<1,2>>
+              (t, tc1, tc2);
+          case 3:
+            return
+              std::make_shared<ga_instruction_reduction_opt0_2_dunrolled<1,3>>
+              (t, tc1, tc2);
+          case 4:
+            return
+              std::make_shared<ga_instruction_reduction_opt0_2_dunrolled<1,4>>
+              (t, tc1, tc2);
+          default :
+            return 
std::make_shared<ga_instruction_reduction_opt0_2_unrolled<1>>
+              (t, tc1, tc2, q2);
+          }
+        case 2:
+          switch (q2) {
+          case 2:
+            return
+              std::make_shared<ga_instruction_reduction_opt0_2_dunrolled<2,2>>
+              (t, tc1, tc2);
+          case 3:
+            return
+              std::make_shared<ga_instruction_reduction_opt0_2_dunrolled<2,3>>
+              (t, tc1, tc2);
+          case 4:
+            return
+              std::make_shared<ga_instruction_reduction_opt0_2_dunrolled<2,4>>
+              (t, tc1, tc2);
+          default :
+            return 
std::make_shared<ga_instruction_reduction_opt0_2_unrolled<2>>
+              (t, tc1, tc2, q2);
+          }
+        case 3:
+          switch (q2) {
+          case 2:
+            return
+              std::make_shared<ga_instruction_reduction_opt0_2_dunrolled<3,2>>
+              (t, tc1, tc2);
+          case 3:
+            return
+              std::make_shared<ga_instruction_reduction_opt0_2_dunrolled<3,3>>
+              (t, tc1, tc2);
+          case 4:
+            return
+              std::make_shared<ga_instruction_reduction_opt0_2_dunrolled<3,4>>
+              (t, tc1, tc2);
+          default :
+            return 
std::make_shared<ga_instruction_reduction_opt0_2_unrolled<3>>
+              (t, tc1, tc2, q2);
+          }
+        case 4:
+          return std::make_shared<ga_instruction_reduction_opt0_2_unrolled<4>>
+            (t, tc1, tc2, q2);
+        case 5:
+          return std::make_shared<ga_instruction_reduction_opt0_2_unrolled<5>>
+            (t, tc1, tc2, q2);
+        default:
+          return std::make_shared<ga_instruction_reduction_opt0_2>
+            (t,tc1,tc2,n2,q2);
+        }
       }
     }
     if (tc1_.sparsity() == 2) {
       size_type q1 = tc1.sizes()[1];
       size_type n1 = (tc1.sizes().size() > 2) ? tc1.sizes()[1] : 1;
       if (n1*q1 == n) {
-       switch (n1) {
-       case 1:
-         switch (q1) {
-         case 2:
-           return
-             std::make_shared<ga_instruction_reduction_opt2_0_dunrolled<1,2>>
-             (t, tc1, tc2);
-         case 3:
-           return
-             std::make_shared<ga_instruction_reduction_opt2_0_dunrolled<1,3>>
-             (t, tc1, tc2);
-         case 4:
-           return
-             std::make_shared<ga_instruction_reduction_opt2_0_dunrolled<1,4>>
-             (t, tc1, tc2);
-         default :
-           return std::make_shared<ga_instruction_reduction_opt2_0_unrolled<1>>
-             (t, tc1, tc2, q1);
-         }
-       case 2:
-         switch (q1) {
-         case 2:
-           return
-             std::make_shared<ga_instruction_reduction_opt2_0_dunrolled<2,2>>
-             (t, tc1, tc2);
-         case 3:
-           return
-             std::make_shared<ga_instruction_reduction_opt2_0_dunrolled<2,3>>
-             (t, tc1, tc2);
-         case 4:
-           return
-             std::make_shared<ga_instruction_reduction_opt2_0_dunrolled<2,4>>
-             (t, tc1, tc2);
-         default :
-           return std::make_shared<ga_instruction_reduction_opt2_0_unrolled<2>>
-             (t, tc1, tc2, q1);
-         }
-       case 3:
-         switch (q1) {
-         case 2:
-           return
-             std::make_shared<ga_instruction_reduction_opt2_0_dunrolled<3,2>>
-             (t, tc1, tc2);
-         case 3:
-           return
-             std::make_shared<ga_instruction_reduction_opt2_0_dunrolled<3,3>>
-             (t, tc1, tc2);
-         case 4:
-           return
-             std::make_shared<ga_instruction_reduction_opt2_0_dunrolled<3,4>>
-             (t, tc1, tc2);
-         default :
-           return std::make_shared<ga_instruction_reduction_opt2_0_unrolled<3>>
-             (t, tc1, tc2, q1);
-         }
-         return std::make_shared<ga_instruction_reduction_opt2_0_unrolled<3>>
-           (t, tc1, tc2, q1);
-       case 4:
-         return std::make_shared<ga_instruction_reduction_opt2_0_unrolled<4>>
-           (t, tc1, tc2, q1);
-       case 5:
-         return std::make_shared<ga_instruction_reduction_opt2_0_unrolled<5>>
-           (t, tc1, tc2, q1);
-       default:
-         return std::make_shared<ga_instruction_reduction_opt2_0>
-           (t,tc1,tc2, n1, q1);
-       }
+        switch (n1) {
+        case 1:
+          switch (q1) {
+          case 2:
+            return
+              std::make_shared<ga_instruction_reduction_opt2_0_dunrolled<1,2>>
+              (t, tc1, tc2);
+          case 3:
+            return
+              std::make_shared<ga_instruction_reduction_opt2_0_dunrolled<1,3>>
+              (t, tc1, tc2);
+          case 4:
+            return
+              std::make_shared<ga_instruction_reduction_opt2_0_dunrolled<1,4>>
+              (t, tc1, tc2);
+          default :
+            return 
std::make_shared<ga_instruction_reduction_opt2_0_unrolled<1>>
+              (t, tc1, tc2, q1);
+          }
+        case 2:
+          switch (q1) {
+          case 2:
+            return
+              std::make_shared<ga_instruction_reduction_opt2_0_dunrolled<2,2>>
+              (t, tc1, tc2);
+          case 3:
+            return
+              std::make_shared<ga_instruction_reduction_opt2_0_dunrolled<2,3>>
+              (t, tc1, tc2);
+          case 4:
+            return
+              std::make_shared<ga_instruction_reduction_opt2_0_dunrolled<2,4>>
+              (t, tc1, tc2);
+          default :
+            return 
std::make_shared<ga_instruction_reduction_opt2_0_unrolled<2>>
+              (t, tc1, tc2, q1);
+          }
+        case 3:
+          switch (q1) {
+          case 2:
+            return
+              std::make_shared<ga_instruction_reduction_opt2_0_dunrolled<3,2>>
+              (t, tc1, tc2);
+          case 3:
+            return
+              std::make_shared<ga_instruction_reduction_opt2_0_dunrolled<3,3>>
+              (t, tc1, tc2);
+          case 4:
+            return
+              std::make_shared<ga_instruction_reduction_opt2_0_dunrolled<3,4>>
+              (t, tc1, tc2);
+          default :
+            return 
std::make_shared<ga_instruction_reduction_opt2_0_unrolled<3>>
+              (t, tc1, tc2, q1);
+          }
+          return std::make_shared<ga_instruction_reduction_opt2_0_unrolled<3>>
+            (t, tc1, tc2, q1);
+        case 4:
+          return std::make_shared<ga_instruction_reduction_opt2_0_unrolled<4>>
+            (t, tc1, tc2, q1);
+        case 5:
+          return std::make_shared<ga_instruction_reduction_opt2_0_unrolled<5>>
+            (t, tc1, tc2, q1);
+        default:
+          return std::make_shared<ga_instruction_reduction_opt2_0>
+            (t,tc1,tc2, n1, q1);
+        }
       }
     }
 
     switch(n) {
     case  2 : return std::make_shared<ga_instruction_reduction_unrolled< 2>>
-                    (t, tc1, tc2);
+                     (t, tc1, tc2);
     case  3 : return std::make_shared<ga_instruction_reduction_unrolled< 3>>
-                    (t, tc1, tc2);
+                     (t, tc1, tc2);
     case  4 : return std::make_shared<ga_instruction_reduction_unrolled< 4>>
-                    (t, tc1, tc2);
+                     (t, tc1, tc2);
     case  5 : return std::make_shared<ga_instruction_reduction_unrolled< 5>>
                      (t, tc1, tc2);
     case  6 : return std::make_shared<ga_instruction_reduction_unrolled< 6>>
@@ -5388,11 +5388,11 @@ namespace getfem {
     case 14 : return std::make_shared<ga_instruction_reduction_unrolled<14>>
                      (t, tc1, tc2);
     case 15 : return std::make_shared<ga_instruction_reduction_unrolled<15>>
-                    (t, tc1, tc2);
+                     (t, tc1, tc2);
     case 16 : return std::make_shared<ga_instruction_reduction_unrolled<16>>
-                    (t, tc1, tc2);
+                     (t, tc1, tc2);
     default : return std::make_shared<ga_instruction_reduction>
-                    (t, tc1, tc2, n);
+                     (t, tc1, tc2, n);
     }
   }
 
@@ -5402,7 +5402,7 @@ namespace getfem {
     base_tensor &t = t_.tensor(), &tc1 = tc1_.tensor(), &tc2 = tc2_.tensor();
 
     if (tc1_.sparsity() == 1 && tc2_.sparsity() == 1 &&
-       tc1_.qdim() == n && tc2_.qdim() == n) {
+        tc1_.qdim() == n && tc2_.qdim() == n) {
       to_clear = true;
       t_.set_sparsity(10, tc1_.qdim());
       return std::make_shared<ga_instruction_reduction_opt1_1>(t,tc1,tc2,n);
@@ -5410,163 +5410,163 @@ namespace getfem {
     if (tc2_.sparsity() == 1) {
       switch(n) {
       case 2:
-       return std::make_shared<ga_instruction_reduction_opt0_1_unrolled<2>>
-         (t, tc1, tc2);
+        return std::make_shared<ga_instruction_reduction_opt0_1_unrolled<2>>
+          (t, tc1, tc2);
       case 3:
-       return std::make_shared<ga_instruction_reduction_opt0_1_unrolled<3>>
-         (t, tc1, tc2);
+        return std::make_shared<ga_instruction_reduction_opt0_1_unrolled<3>>
+          (t, tc1, tc2);
       case 4:
-       return std::make_shared<ga_instruction_reduction_opt0_1_unrolled<4>>
-         (t, tc1, tc2);
+        return std::make_shared<ga_instruction_reduction_opt0_1_unrolled<4>>
+          (t, tc1, tc2);
       case 5:
-       return std::make_shared<ga_instruction_reduction_opt0_1_unrolled<5>>
-         (t, tc1, tc2);
+        return std::make_shared<ga_instruction_reduction_opt0_1_unrolled<5>>
+          (t, tc1, tc2);
       default:
-       return std::make_shared<ga_instruction_reduction_opt0_1>(t,tc1,tc2, n);
+        return std::make_shared<ga_instruction_reduction_opt0_1>(t,tc1,tc2, n);
       }
     }
     if (tc2_.sparsity() == 2) {
       size_type q2 = tc2.sizes()[1];
       size_type n2 = (tc2.sizes().size() > 2) ? tc2.sizes()[1] : 1;
       if (n2*q2 == n) {
-       switch (n2) {
-       case 1:
-         switch (q2) {
-         case 2:
-           return
-             std::make_shared<ga_instruction_reduction_opt0_2_dunrolled<1,2>>
-             (t, tc1, tc2);
-         case 3:
-           return
-             std::make_shared<ga_instruction_reduction_opt0_2_dunrolled<1,3>>
-             (t, tc1, tc2);
-         case 4:
-           return
-             std::make_shared<ga_instruction_reduction_opt0_2_dunrolled<1,4>>
-             (t, tc1, tc2);
-         default :
-           return std::make_shared<ga_instruction_reduction_opt0_2_unrolled<1>>
-             (t, tc1, tc2, q2);
-         }
-       case 2:
-         switch (q2) {
-         case 2:
-           return
-             std::make_shared<ga_instruction_reduction_opt0_2_dunrolled<2,2>>
-             (t, tc1, tc2);
-         case 3:
-           return
-             std::make_shared<ga_instruction_reduction_opt0_2_dunrolled<2,3>>
-             (t, tc1, tc2);
-         case 4:
-           return
-             std::make_shared<ga_instruction_reduction_opt0_2_dunrolled<2,4>>
-             (t, tc1, tc2);
-         default :
-           return std::make_shared<ga_instruction_reduction_opt0_2_unrolled<2>>
-             (t, tc1, tc2, q2);
-         }
-       case 3:
-         switch (q2) {
-         case 2:
-           return
-             std::make_shared<ga_instruction_reduction_opt0_2_dunrolled<3,2>>
-             (t, tc1, tc2);
-         case 3:
-           return
-             std::make_shared<ga_instruction_reduction_opt0_2_dunrolled<3,3>>
-             (t, tc1, tc2);
-         case 4:
-           return
-             std::make_shared<ga_instruction_reduction_opt0_2_dunrolled<3,4>>
-             (t, tc1, tc2);
-         default :
-           return std::make_shared<ga_instruction_reduction_opt0_2_unrolled<3>>
-             (t, tc1, tc2, q2);
-         }
-       case 4:
-         return std::make_shared<ga_instruction_reduction_opt0_2_unrolled<4>>
-           (t, tc1, tc2, q2);
-       case 5:
-         return std::make_shared<ga_instruction_reduction_opt0_2_unrolled<5>>
-           (t, tc1, tc2, q2);
-       default:
-         return std::make_shared<ga_instruction_reduction_opt0_2>
-           (t,tc1,tc2,n2,q2);
-       }
+        switch (n2) {
+        case 1:
+          switch (q2) {
+          case 2:
+            return
+              std::make_shared<ga_instruction_reduction_opt0_2_dunrolled<1,2>>
+              (t, tc1, tc2);
+          case 3:
+            return
+              std::make_shared<ga_instruction_reduction_opt0_2_dunrolled<1,3>>
+              (t, tc1, tc2);
+          case 4:
+            return
+              std::make_shared<ga_instruction_reduction_opt0_2_dunrolled<1,4>>
+              (t, tc1, tc2);
+          default :
+            return 
std::make_shared<ga_instruction_reduction_opt0_2_unrolled<1>>
+              (t, tc1, tc2, q2);
+          }
+        case 2:
+          switch (q2) {
+          case 2:
+            return
+              std::make_shared<ga_instruction_reduction_opt0_2_dunrolled<2,2>>
+              (t, tc1, tc2);
+          case 3:
+            return
+              std::make_shared<ga_instruction_reduction_opt0_2_dunrolled<2,3>>
+              (t, tc1, tc2);
+          case 4:
+            return
+              std::make_shared<ga_instruction_reduction_opt0_2_dunrolled<2,4>>
+              (t, tc1, tc2);
+          default :
+            return 
std::make_shared<ga_instruction_reduction_opt0_2_unrolled<2>>
+              (t, tc1, tc2, q2);
+          }
+        case 3:
+          switch (q2) {
+          case 2:
+            return
+              std::make_shared<ga_instruction_reduction_opt0_2_dunrolled<3,2>>
+              (t, tc1, tc2);
+          case 3:
+            return
+              std::make_shared<ga_instruction_reduction_opt0_2_dunrolled<3,3>>
+              (t, tc1, tc2);
+          case 4:
+            return
+              std::make_shared<ga_instruction_reduction_opt0_2_dunrolled<3,4>>
+              (t, tc1, tc2);
+          default :
+            return 
std::make_shared<ga_instruction_reduction_opt0_2_unrolled<3>>
+              (t, tc1, tc2, q2);
+          }
+        case 4:
+          return std::make_shared<ga_instruction_reduction_opt0_2_unrolled<4>>
+            (t, tc1, tc2, q2);
+        case 5:
+          return std::make_shared<ga_instruction_reduction_opt0_2_unrolled<5>>
+            (t, tc1, tc2, q2);
+        default:
+          return std::make_shared<ga_instruction_reduction_opt0_2>
+            (t,tc1,tc2,n2,q2);
+        }
       }
     }
     if (tc1_.sparsity() == 2) {
       size_type q1 = tc1.sizes()[1];
       size_type n1 = (tc1.sizes().size() > 2) ? tc1.sizes()[1] : 1;
       if (n1*q1 == n) {
-       switch (n1) {
-       case 1:
-         switch (q1) {
-         case 2:
-           return
-             std::make_shared<ga_instruction_reduction_opt2_0_dunrolled<1,2>>
-             (t, tc1, tc2);
-         case 3:
-           return
-             std::make_shared<ga_instruction_reduction_opt2_0_dunrolled<1,3>>
-             (t, tc1, tc2);
-         case 4:
-           return
-             std::make_shared<ga_instruction_reduction_opt2_0_dunrolled<1,4>>
-             (t, tc1, tc2);
-         default :
-           return std::make_shared<ga_instruction_reduction_opt2_0_unrolled<1>>
-             (t, tc1, tc2, q1);
-         }
-       case 2:
-         switch (q1) {
-         case 2:
-           return
-             std::make_shared<ga_instruction_reduction_opt2_0_dunrolled<2,2>>
-             (t, tc1, tc2);
-         case 3:
-           return
-             std::make_shared<ga_instruction_reduction_opt2_0_dunrolled<2,3>>
-             (t, tc1, tc2);
-         case 4:
-           return
-             std::make_shared<ga_instruction_reduction_opt2_0_dunrolled<2,4>>
-             (t, tc1, tc2);
-         default :
-           return std::make_shared<ga_instruction_reduction_opt2_0_unrolled<2>>
-             (t, tc1, tc2, q1);
-         }
-       case 3:
-         switch (q1) {
-         case 2:
-           return
-             std::make_shared<ga_instruction_reduction_opt2_0_dunrolled<3,2>>
-             (t, tc1, tc2);
-         case 3:
-           return
-             std::make_shared<ga_instruction_reduction_opt2_0_dunrolled<3,3>>
-             (t, tc1, tc2);
-         case 4:
-           return
-             std::make_shared<ga_instruction_reduction_opt2_0_dunrolled<3,4>>
-             (t, tc1, tc2);
-         default :
-           return std::make_shared<ga_instruction_reduction_opt2_0_unrolled<3>>
-             (t, tc1, tc2, q1);
-         }
-         return std::make_shared<ga_instruction_reduction_opt2_0_unrolled<3>>
-           (t, tc1, tc2, q1);
-       case 4:
-         return std::make_shared<ga_instruction_reduction_opt2_0_unrolled<4>>
-           (t, tc1, tc2, q1);
-       case 5:
-         return std::make_shared<ga_instruction_reduction_opt2_0_unrolled<5>>
-           (t, tc1, tc2, q1);
-       default:
-         return std::make_shared<ga_instruction_reduction_opt2_0>
-           (t,tc1,tc2, n1, q1);
-       }
+        switch (n1) {
+        case 1:
+          switch (q1) {
+          case 2:
+            return
+              std::make_shared<ga_instruction_reduction_opt2_0_dunrolled<1,2>>
+              (t, tc1, tc2);
+          case 3:
+            return
+              std::make_shared<ga_instruction_reduction_opt2_0_dunrolled<1,3>>
+              (t, tc1, tc2);
+          case 4:
+            return
+              std::make_shared<ga_instruction_reduction_opt2_0_dunrolled<1,4>>
+              (t, tc1, tc2);
+          default :
+            return 
std::make_shared<ga_instruction_reduction_opt2_0_unrolled<1>>
+              (t, tc1, tc2, q1);
+          }
+        case 2:
+          switch (q1) {
+          case 2:
+            return
+              std::make_shared<ga_instruction_reduction_opt2_0_dunrolled<2,2>>
+              (t, tc1, tc2);
+          case 3:
+            return
+              std::make_shared<ga_instruction_reduction_opt2_0_dunrolled<2,3>>
+              (t, tc1, tc2);
+          case 4:
+            return
+              std::make_shared<ga_instruction_reduction_opt2_0_dunrolled<2,4>>
+              (t, tc1, tc2);
+          default :
+            return 
std::make_shared<ga_instruction_reduction_opt2_0_unrolled<2>>
+              (t, tc1, tc2, q1);
+          }
+        case 3:
+          switch (q1) {
+          case 2:
+            return
+              std::make_shared<ga_instruction_reduction_opt2_0_dunrolled<3,2>>
+              (t, tc1, tc2);
+          case 3:
+            return
+              std::make_shared<ga_instruction_reduction_opt2_0_dunrolled<3,3>>
+              (t, tc1, tc2);
+          case 4:
+            return
+              std::make_shared<ga_instruction_reduction_opt2_0_dunrolled<3,4>>
+              (t, tc1, tc2);
+          default :
+            return 
std::make_shared<ga_instruction_reduction_opt2_0_unrolled<3>>
+              (t, tc1, tc2, q1);
+          }
+          return std::make_shared<ga_instruction_reduction_opt2_0_unrolled<3>>
+            (t, tc1, tc2, q1);
+        case 4:
+          return std::make_shared<ga_instruction_reduction_opt2_0_unrolled<4>>
+            (t, tc1, tc2, q1);
+        case 5:
+          return std::make_shared<ga_instruction_reduction_opt2_0_unrolled<5>>
+            (t, tc1, tc2, q1);
+        default:
+          return std::make_shared<ga_instruction_reduction_opt2_0>
+            (t,tc1,tc2, n1, q1);
+        }
       }
     }
 
@@ -5869,7 +5869,7 @@ namespace getfem {
         const base_tensor &t1 = *(components[i]);
         if (t1.size() > 1) {
           GA_DEBUG_ASSERT(t1.size() == s, "Wrong sizes, " << t1.size()
-                         << " != " << s);
+                          << " != " << s);
           for (size_type j = 0; j < s; ++j) *it++ = t1[j];
         } else {
           for (size_type j = 0; j < s; ++j) *it++ = t1[0];
@@ -6363,22 +6363,22 @@ namespace getfem {
       GA_DEBUG_INFO("Instruction: vector term assembly for fem variable");
       if (ipt == 0 || interpolate) {
         elem.resize(t.size());
-       auto itt = t.begin(); auto it = elem.begin(), ite = elem.end();
-       size_type nd = ((t.size()) >> 2);
-       for (size_type i = 0; i < nd; ++i) {
-         *it++ = (*itt++) * coeff; *it++ = (*itt++) * coeff;
-         *it++ = (*itt++) * coeff; *it++ = (*itt++) * coeff;
-       }
-       for (; it != ite;) *it++ = (*itt++) * coeff;
+        auto itt = t.begin(); auto it = elem.begin(), ite = elem.end();
+        size_type nd = ((t.size()) >> 2);
+        for (size_type i = 0; i < nd; ++i) {
+          *it++ = (*itt++) * coeff; *it++ = (*itt++) * coeff;
+          *it++ = (*itt++) * coeff; *it++ = (*itt++) * coeff;
+        }
+        for (; it != ite;) *it++ = (*itt++) * coeff;
         // gmm::copy(gmm::scaled(t.as_vector(), coeff), elem);
       } else {
-       auto itt = t.begin(); auto it = elem.begin(), ite = elem.end();
-       size_type nd = ((t.size()) >> 2);
-       for (size_type i = 0; i < nd; ++i) {
-         *it++ += (*itt++) * coeff; *it++ += (*itt++) * coeff;
-         *it++ += (*itt++) * coeff; *it++ += (*itt++) * coeff;
-       }
-       for (; it != ite;) *it++ += (*itt++) * coeff;
+        auto itt = t.begin(); auto it = elem.begin(), ite = elem.end();
+        size_type nd = ((t.size()) >> 2);
+        for (size_type i = 0; i < nd; ++i) {
+          *it++ += (*itt++) * coeff; *it++ += (*itt++) * coeff;
+          *it++ += (*itt++) * coeff; *it++ += (*itt++) * coeff;
+        }
+        for (; it != ite;) *it++ += (*itt++) * coeff;
         // gmm::add(gmm::scaled(t.as_vector(), coeff), elem);
       }
       if (ipt == nbpt-1 || interpolate) {
@@ -6395,11 +6395,11 @@ namespace getfem {
         auto &ct = mf.ind_scalar_basic_dof_of_element(cv_1);
         size_type qmult = mf.get_qdim();
         if (qmult > 1) qmult /= mf.fem_of_element(cv_1)->target_dim();
-       size_type ifirst = I.first();
-       auto ite = elem.begin();
-       for (auto itc = ct.begin(); itc != ct.end(); ++itc)
-         for (size_type q = 0; q < qmult; ++q)
-           V[ifirst+(*itc)+q] += *ite++;
+        size_type ifirst = I.first();
+        auto ite = elem.begin();
+        for (auto itc = ct.begin(); itc != ct.end(); ++itc)
+          for (size_type q = 0; q < qmult; ++q)
+            V[ifirst+(*itc)+q] += *ite++;
         GMM_ASSERT1(ite == elem.end(), "Internal error");
       }
       return 0;
@@ -6444,8 +6444,8 @@ namespace getfem {
       return 0;
      }
     ga_instruction_assignment(base_tensor &t_, base_vector &V_,
-                             const fem_interpolation_context &ctx_,
-                             const im_data *imd_)
+                              const fem_interpolation_context &ctx_,
+                              const im_data *imd_)
       : t(t_), V(V_), ctx(ctx_), imd(imd_) {}
   };
 
@@ -6457,9 +6457,9 @@ namespace getfem {
     base_vector::const_iterator it = elem.cbegin();
     for (const size_type &dof2 : dofs2)
       for (const size_type &dof1 : dofs1) {
-       if (gmm::abs(*it) > threshold)
-         K(dof1, dof2) += *it;
-       ++it;
+        if (gmm::abs(*it) > threshold)
+          K(dof1, dof2) += *it;
+        ++it;
       }
   }
 
@@ -6469,7 +6469,7 @@ namespace getfem {
   //   size_type bb = *((const size_type *)(b));
   //   return  int((*the_indto_sort)[aa]) - int((*the_indto_sort)[bb]);
   // }
-  
+
   inline void add_elem_matrix_
   (gmm::col_matrix<gmm::rsvector<scalar_type>> &K,
    const std::vector<size_type> &dofs1, const std::vector<size_type> &dofs2,
@@ -6478,12 +6478,12 @@ namespace getfem {
     size_type maxest = (N+1) * std::max(dofs1.size(), dofs2.size());
     size_type s1 = dofs1.size(), s2 = dofs2.size();
     gmm::elt_rsvector_<scalar_type> ev;
-    
+
     dofs1_sort.resize(s1);
     for (size_type i = 0; i < s1; ++i) { // insertion sort
       size_type j = i, k = j-1;
       while (j > 0 && dofs1[i] < dofs1[dofs1_sort[k]])
-       { dofs1_sort[j] = dofs1_sort[k]; j--; k--; }
+        { dofs1_sort[j] = dofs1_sort[k]; j--; k--; }
       dofs1_sort[j] = i;
     }
 
@@ -6497,49 +6497,49 @@ namespace getfem {
       if (j) it += s1;
       std::vector<gmm::elt_rsvector_<scalar_type>> &col = K[dofs2[j]];
       size_type nb = col.size();
-      
+
       if (nb == 0) {
-       col.reserve(maxest);
-       for (size_type i = 0; i < s1; ++i) {
-         size_type k = dofs1_sort[i]; ev.e = *(it+k);
-         if (gmm::abs(ev.e) > threshold) { ev.c=dofs1[k]; col.push_back(ev); }
-       }
+        col.reserve(maxest);
+        for (size_type i = 0; i < s1; ++i) {
+          size_type k = dofs1_sort[i]; ev.e = *(it+k);
+          if (gmm::abs(ev.e) > threshold) { ev.c=dofs1[k]; col.push_back(ev); }
+        }
       } else { // column merge
-       size_type ind = 0;
-       for (size_type i = 0; i < s1; ++i) {
-         size_type k = dofs1_sort[i]; ev.e = *(it+k);
-         if (gmm::abs(ev.e) > threshold) {
-           ev.c = dofs1[k]; 
-
-           size_type count = nb - ind, step, l;
-           while (count > 0) {
-             step = count / 2; l = ind + step;
-             if (col[l].c < ev.c) { ind = ++l; count -= step + 1; }
-             else count = step;
-           }
-           
-           auto itc = col.begin() + ind;
-           if (ind != nb && itc->c == ev.c) itc->e += ev.e;
-           else {
-             if (nb - ind > 1100)
-               GMM_WARNING2("Inefficient addition of element in rsvector with "
-                            << col.size() - ind << " non-zero entries");
-             col.push_back(ev);
-             if (ind != nb) {
-               itc = col.begin() + ind;
-               auto ite = col.end(); --ite; auto itee = ite; 
-               for (; ite != itc; --ite) { --itee; *ite = *itee; }
-               *itc = ev;
-             }
-             ++nb;
-           }
-           ++ind; 
-         }
-       }
-      }
-    }
-  }
-  
+        size_type ind = 0;
+        for (size_type i = 0; i < s1; ++i) {
+          size_type k = dofs1_sort[i]; ev.e = *(it+k);
+          if (gmm::abs(ev.e) > threshold) {
+            ev.c = dofs1[k];
+
+            size_type count = nb - ind, step, l;
+            while (count > 0) {
+              step = count / 2; l = ind + step;
+              if (col[l].c < ev.c) { ind = ++l; count -= step + 1; }
+              else count = step;
+            }
+
+            auto itc = col.begin() + ind;
+            if (ind != nb && itc->c == ev.c) itc->e += ev.e;
+            else {
+              if (nb - ind > 1100)
+                GMM_WARNING2("Inefficient addition of element in rsvector with 
"
+                             << col.size() - ind << " non-zero entries");
+              col.push_back(ev);
+              if (ind != nb) {
+                itc = col.begin() + ind;
+                auto ite = col.end(); --ite; auto itee = ite;
+                for (; ite != itc; --ite) { --itee; *ite = *itee; }
+                *itc = ev;
+              }
+              ++nb;
+            }
+            ++ind;
+          }
+        }
+      }
+    }
+  }
+
 
   template <class MAT = model_real_sparse_matrix>
   struct ga_instruction_matrix_assembly : public ga_instruction {
@@ -6559,32 +6559,32 @@ namespace getfem {
       GA_DEBUG_INFO("Instruction: matrix term assembly");
       if (ipt == 0 || interpolate) {
         elem.resize(t.size());
-       auto itt = t.begin(); auto it = elem.begin(), ite = elem.end();
-       scalar_type e = coeff*alpha1*alpha2;
-       size_type nd = ((t.size()) >> 2);
-       for (size_type i = 0; i < nd; ++i) {
-         *it++ = (*itt++) * e; *it++ = (*itt++) * e;
-         *it++ = (*itt++) * e; *it++ = (*itt++) * e;
-       }
-       for (; it != ite;) *it++ = (*itt++) * e;
+        auto itt = t.begin(); auto it = elem.begin(), ite = elem.end();
+        scalar_type e = coeff*alpha1*alpha2;
+        size_type nd = ((t.size()) >> 2);
+        for (size_type i = 0; i < nd; ++i) {
+          *it++ = (*itt++) * e; *it++ = (*itt++) * e;
+          *it++ = (*itt++) * e; *it++ = (*itt++) * e;
+        }
+        for (; it != ite;) *it++ = (*itt++) * e;
         // gmm::copy(gmm::scaled(t.as_vector(), coeff*alpha1*alpha2), elem);
       } else {
-       // Faster than a daxpy blas call on my config
-       auto itt = t.begin(); auto it = elem.begin(), ite = elem.end();
-       scalar_type e = coeff*alpha1*alpha2;
-       size_type nd = ((t.size()) >> 2);
-       for (size_type i = 0; i < nd; ++i) {
-         *it++ += (*itt++) * e; *it++ += (*itt++) * e;
-         *it++ += (*itt++) * e; *it++ += (*itt++) * e;
-       }
-       for (; it != ite;) *it++ += (*itt++) * e;
+        // Faster than a daxpy blas call on my config
+        auto itt = t.begin(); auto it = elem.begin(), ite = elem.end();
+        scalar_type e = coeff*alpha1*alpha2;
+        size_type nd = ((t.size()) >> 2);
+        for (size_type i = 0; i < nd; ++i) {
+          *it++ += (*itt++) * e; *it++ += (*itt++) * e;
+          *it++ += (*itt++) * e; *it++ += (*itt++) * e;
+        }
+        for (; it != ite;) *it++ += (*itt++) * e;
         // gmm::add(gmm::scaled(t.as_vector(), coeff*alpha1*alpha2), elem);
       }
       if (ipt == nbpt-1 || interpolate) {
         const mesh_fem *pmf1 = mfg1 ? *mfg1 : mfn1;
         const mesh_fem *pmf2 = mfg2 ? *mfg2 : mfn2;
         bool reduced = (pmf1 && pmf1->is_reduced())
-         || (pmf2 && pmf2->is_reduced());
+          || (pmf2 && pmf2->is_reduced());
         MAT &K = reduced ? Kr : Kn;
         const gmm::sub_interval &I1 = reduced ? Ir1 : In1;
         const gmm::sub_interval &I2 = reduced ? Ir2 : In2;
@@ -6594,14 +6594,14 @@ namespace getfem {
         if (ninf == scalar_type(0)) return 0;
 
         size_type s1 = t.sizes()[0], s2 = t.sizes()[1];
-       size_type cv1 = pmf1 ? ctx1.convex_num() : s1;
-       size_type cv2 = pmf2 ? ctx2.convex_num() : s2;
-       size_type N = 1;
+        size_type cv1 = pmf1 ? ctx1.convex_num() : s1;
+        size_type cv2 = pmf2 ? ctx2.convex_num() : s2;
+        size_type N = 1;
 
         dofs1.assign(s1, I1.first());
         if (pmf1) {
           if (!(ctx1.is_convex_num_valid())) return 0;
-         N = ctx1.N();
+          N = ctx1.N();
           auto &ct1 = pmf1->ind_scalar_basic_dof_of_element(cv1);
           size_type qmult1 = pmf1->get_qdim();
           if (qmult1 > 1) qmult1 /= pmf1->fem_of_element(cv1)->target_dim();
@@ -6617,37 +6617,37 @@ namespace getfem {
         } else
           for (size_type i=0; i < s1; ++i) dofs1[i] += i;
 
-       if (pmf1 == pmf2 && cv1 == cv2) {
-         if (I1.first() == I2.first()) {
-           add_elem_matrix_(K, dofs1, dofs1, dofs1_sort, elem, ninf*1E-14, N);
-         } else {
-           dofs2.resize(dofs1.size());
-           for (size_type i = 0; i < dofs1.size(); ++i)
-             dofs2[i] =  dofs1[i] + I2.first() - I1.first();
-           add_elem_matrix_(K, dofs1, dofs2, dofs1_sort, elem, ninf*1E-14, N);
-         }
-       } else {
-         dofs2.assign(s2, I2.first());
-         if (pmf2) {
-           if (!(ctx2.is_convex_num_valid())) return 0;
-           N = std::max(N, ctx2.N());
-           auto &ct2 = pmf2->ind_scalar_basic_dof_of_element(cv2);
-           size_type qmult2 = pmf2->get_qdim();
-           if (qmult2 > 1) qmult2 /= pmf2->fem_of_element(cv2)->target_dim();
-           auto itd = dofs2.begin();
-           if (qmult2 == 1) {
-             for (auto itt = ct2.begin(); itt != ct2.end(); ++itt)
-               *itd++ += *itt;
-           } else {
-             for (auto itt = ct2.begin(); itt != ct2.end(); ++itt)
-               for (size_type q = 0; q < qmult2; ++q)
+        if (pmf1 == pmf2 && cv1 == cv2) {
+          if (I1.first() == I2.first()) {
+            add_elem_matrix_(K, dofs1, dofs1, dofs1_sort, elem, ninf*1E-14, N);
+          } else {
+            dofs2.resize(dofs1.size());
+            for (size_type i = 0; i < dofs1.size(); ++i)
+              dofs2[i] =  dofs1[i] + I2.first() - I1.first();
+            add_elem_matrix_(K, dofs1, dofs2, dofs1_sort, elem, ninf*1E-14, N);
+          }
+        } else {
+          dofs2.assign(s2, I2.first());
+          if (pmf2) {
+            if (!(ctx2.is_convex_num_valid())) return 0;
+            N = std::max(N, ctx2.N());
+            auto &ct2 = pmf2->ind_scalar_basic_dof_of_element(cv2);
+            size_type qmult2 = pmf2->get_qdim();
+            if (qmult2 > 1) qmult2 /= pmf2->fem_of_element(cv2)->target_dim();
+            auto itd = dofs2.begin();
+            if (qmult2 == 1) {
+              for (auto itt = ct2.begin(); itt != ct2.end(); ++itt)
+                *itd++ += *itt;
+            } else {
+              for (auto itt = ct2.begin(); itt != ct2.end(); ++itt)
+                for (size_type q = 0; q < qmult2; ++q)
                   *itd++ += *itt + q;
-           }
-         } else
-           for (size_type i=0; i < s2; ++i) dofs2[i] += i;
+            }
+          } else
+            for (size_type i=0; i < s2; ++i) dofs2[i] += i;
 
-         add_elem_matrix_(K, dofs1, dofs2, dofs1_sort, elem, ninf*1E-14, N);
-       }
+          add_elem_matrix_(K, dofs1, dofs2, dofs1_sort, elem, ninf*1E-14, N);
+        }
       }
       return 0;
     }
@@ -6686,25 +6686,25 @@ namespace getfem {
                     "scalar fems");
       if (ipt == 0) {
         elem.resize(t.size());
-       auto itt = t.begin(); auto it = elem.begin(), ite = elem.end();
-       scalar_type e = coeff*alpha1*alpha2;
-       size_type nd = ((t.size()) >> 2);
-       for (size_type i = 0; i < nd; ++i) {
-         *it++ = (*itt++) * e; *it++ = (*itt++) * e;
-         *it++ = (*itt++) * e; *it++ = (*itt++) * e;
-       }
-       for (; it != ite;) *it++ = (*itt++) * e;
+        auto itt = t.begin(); auto it = elem.begin(), ite = elem.end();
+        scalar_type e = coeff*alpha1*alpha2;
+        size_type nd = ((t.size()) >> 2);
+        for (size_type i = 0; i < nd; ++i) {
+          *it++ = (*itt++) * e; *it++ = (*itt++) * e;
+          *it++ = (*itt++) * e; *it++ = (*itt++) * e;
+        }
+        for (; it != ite;) *it++ = (*itt++) * e;
         // gmm::copy(gmm::scaled(t.as_vector(), coeff*alpha1*alpha2), elem);
       } else {
-       // Faster than a daxpy blas call on my config
-       auto itt = t.begin(); auto it = elem.begin(), ite = elem.end();
-       scalar_type e = coeff*alpha1*alpha2;
-       size_type nd = ((t.size()) >> 2);
-       for (size_type i = 0; i < nd; ++i) {
-         *it++ += (*itt++) * e; *it++ += (*itt++) * e;
-         *it++ += (*itt++) * e; *it++ += (*itt++) * e;
-       }
-       for (; it != ite;) *it++ += (*itt++) * e;
+        // Faster than a daxpy blas call on my config
+        auto itt = t.begin(); auto it = elem.begin(), ite = elem.end();
+        scalar_type e = coeff*alpha1*alpha2;
+        size_type nd = ((t.size()) >> 2);
+        for (size_type i = 0; i < nd; ++i) {
+          *it++ += (*itt++) * e; *it++ += (*itt++) * e;
+          *it++ += (*itt++) * e; *it++ += (*itt++) * e;
+        }
+        for (; it != ite;) *it++ += (*itt++) * e;
         // gmm::add(gmm::scaled(t.as_vector(), coeff*alpha1*alpha2), elem);
       }
       if (ipt == nbpt-1) {
@@ -6717,28 +6717,28 @@ namespace getfem {
         if (cv1 == size_type(-1)) return 0;
         auto &ct1 = pmf1->ind_scalar_basic_dof_of_element(cv1);
         GA_DEBUG_ASSERT(ct1.size() == t.sizes()[0], "Internal error");
-       dofs1.resize(ct1.size());
-       for (size_type i = 0; i < ct1.size(); ++i)
-         dofs1[i] = ct1[i] + I1.first();
+        dofs1.resize(ct1.size());
+        for (size_type i = 0; i < ct1.size(); ++i)
+          dofs1[i] = ct1[i] + I1.first();
 
         if (pmf2 == pmf1 && cv1 == cv2) {
-         if (I1.first() == I2.first()) {
-           add_elem_matrix_(K, dofs1, dofs1, dofs1_sort, elem, ninf*1E-14, N);
-         } else {
-           dofs2.resize(dofs1.size());
-           for (size_type i = 0; i < dofs1.size(); ++i)
-             dofs2[i] =  dofs1[i] + I2.first() - I1.first();
-           add_elem_matrix_(K, dofs1, dofs2, dofs1_sort, elem, ninf*1E-14, N);
-         }
-       } else {
-         if (cv2 == size_type(-1)) return 0;
-         auto &ct2 = pmf2->ind_scalar_basic_dof_of_element(cv2);
-         GA_DEBUG_ASSERT(ct2.size() == t.sizes()[1], "Internal error");
-         dofs2.resize(ct2.size());
-         for (size_type i = 0; i < ct2.size(); ++i)
-           dofs2[i] = ct2[i] + I2.first();
-         add_elem_matrix_(K, dofs1, dofs2, dofs1_sort, elem, ninf*1E-14, N);
-       }
+          if (I1.first() == I2.first()) {
+            add_elem_matrix_(K, dofs1, dofs1, dofs1_sort, elem, ninf*1E-14, N);
+          } else {
+            dofs2.resize(dofs1.size());
+            for (size_type i = 0; i < dofs1.size(); ++i)
+              dofs2[i] =  dofs1[i] + I2.first() - I1.first();
+            add_elem_matrix_(K, dofs1, dofs2, dofs1_sort, elem, ninf*1E-14, N);
+          }
+        } else {
+          if (cv2 == size_type(-1)) return 0;
+          auto &ct2 = pmf2->ind_scalar_basic_dof_of_element(cv2);
+          GA_DEBUG_ASSERT(ct2.size() == t.sizes()[1], "Internal error");
+          dofs2.resize(ct2.size());
+          for (size_type i = 0; i < ct2.size(); ++i)
+            dofs2[i] = ct2[i] + I2.first();
+          add_elem_matrix_(K, dofs1, dofs2, dofs1_sort, elem, ninf*1E-14, N);
+        }
       }
       return 0;
     }
@@ -6773,29 +6773,29 @@ namespace getfem {
                         "vector fems");
       if (ipt == 0) {
         elem.resize(t.size());
-       auto itt = t.begin(); auto it = elem.begin(), ite = elem.end();
-       scalar_type e = coeff*alpha1*alpha2;
-       size_type nd = ((t.size()) >> 3);
-       for (size_type i = 0; i < nd; ++i) {
-         *it++ = (*itt++) * e; *it++ = (*itt++) * e;
-         *it++ = (*itt++) * e; *it++ = (*itt++) * e;
-         *it++ = (*itt++) * e; *it++ = (*itt++) * e;
-         *it++ = (*itt++) * e; *it++ = (*itt++) * e;
-       }
-       for (; it != ite;) *it++ = (*itt++) * e;
+        auto itt = t.begin(); auto it = elem.begin(), ite = elem.end();
+        scalar_type e = coeff*alpha1*alpha2;
+        size_type nd = ((t.size()) >> 3);
+        for (size_type i = 0; i < nd; ++i) {
+          *it++ = (*itt++) * e; *it++ = (*itt++) * e;
+          *it++ = (*itt++) * e; *it++ = (*itt++) * e;
+          *it++ = (*itt++) * e; *it++ = (*itt++) * e;
+          *it++ = (*itt++) * e; *it++ = (*itt++) * e;
+        }
+        for (; it != ite;) *it++ = (*itt++) * e;
         // gmm::copy(gmm::scaled(t.as_vector(), coeff*alpha1*alpha2), elem);
       } else {
-       // (Far) faster than a daxpy blas call on my config.
-       auto itt = t.begin(); auto it = elem.begin(), ite = elem.end();
-       scalar_type e = coeff*alpha1*alpha2;
-       size_type nd = ((t.size()) >> 3);
-       for (size_type i = 0; i < nd; ++i) {
-         *it++ += (*itt++) * e; *it++ += (*itt++) * e;
-         *it++ += (*itt++) * e; *it++ += (*itt++) * e;
-         *it++ += (*itt++) * e; *it++ += (*itt++) * e;
-         *it++ += (*itt++) * e; *it++ += (*itt++) * e;
-       }
-       for (; it != ite;) *it++ += (*itt++) * e;
+        // (Far) faster than a daxpy blas call on my config.
+        auto itt = t.begin(); auto it = elem.begin(), ite = elem.end();
+        scalar_type e = coeff*alpha1*alpha2;
+        size_type nd = ((t.size()) >> 3);
+        for (size_type i = 0; i < nd; ++i) {
+          *it++ += (*itt++) * e; *it++ += (*itt++) * e;
+          *it++ += (*itt++) * e; *it++ += (*itt++) * e;
+          *it++ += (*itt++) * e; *it++ += (*itt++) * e;
+          *it++ += (*itt++) * e; *it++ += (*itt++) * e;
+        }
+        for (; it != ite;) *it++ += (*itt++) * e;
         // gmm::add(gmm::scaled(t.as_vector(), coeff*alpha1*alpha2), elem);
       }
       if (ipt == nbpt-1) {
@@ -6816,28 +6816,28 @@ namespace getfem {
           for (size_type q = 0; q < qmult1; ++q)
             *itd++ += *itt + q;
 
-       if (pmf2 == pmf1 && cv1 == cv2) {
-         if (I1.first() == I2.first()) {
-           add_elem_matrix_(K, dofs1, dofs1, dofs1_sort, elem, ninf*1E-14, N);
-         } else {
-           dofs2.resize(dofs1.size());
-           for (size_type i = 0; i < dofs1.size(); ++i)
-             dofs2[i] =  dofs1[i] + I2.first() - I1.first();
-           add_elem_matrix_(K, dofs1, dofs2, dofs1_sort, elem, ninf*1E-14, N);
-         }
-       } else {
-         if (cv2 == size_type(-1)) return 0;
-         auto &ct2 = pmf2->ind_scalar_basic_dof_of_element(cv2);
-         size_type qmult2 = pmf2->get_qdim();
-         if (qmult2 > 1) qmult2 /= pmf2->fem_of_element(cv2)->target_dim();
-         dofs2.assign(s2, I2.first());
-         itd = dofs2.begin();
-         for (auto itt = ct2.begin(); itt != ct2.end(); ++itt)
-           for (size_type q = 0; q < qmult2; ++q)
-             *itd++ += *itt + q;
-         
-         add_elem_matrix_(K, dofs1, dofs2, dofs1_sort, elem, ninf*1E-14, N);
-       }
+        if (pmf2 == pmf1 && cv1 == cv2) {
+          if (I1.first() == I2.first()) {
+            add_elem_matrix_(K, dofs1, dofs1, dofs1_sort, elem, ninf*1E-14, N);
+          } else {
+            dofs2.resize(dofs1.size());
+            for (size_type i = 0; i < dofs1.size(); ++i)
+              dofs2[i] =  dofs1[i] + I2.first() - I1.first();
+            add_elem_matrix_(K, dofs1, dofs2, dofs1_sort, elem, ninf*1E-14, N);
+          }
+        } else {
+          if (cv2 == size_type(-1)) return 0;
+          auto &ct2 = pmf2->ind_scalar_basic_dof_of_element(cv2);
+          size_type qmult2 = pmf2->get_qdim();
+          if (qmult2 > 1) qmult2 /= pmf2->fem_of_element(cv2)->target_dim();
+          dofs2.assign(s2, I2.first());
+          itd = dofs2.begin();
+          for (auto itt = ct2.begin(); itt != ct2.end(); ++itt)
+            for (size_type q = 0; q < qmult2; ++q)
+              *itd++ += *itt + q;
+
+          add_elem_matrix_(K, dofs1, dofs2, dofs1_sort, elem, ninf*1E-14, N);
+        }
       }
       return 0;
     }
@@ -6869,25 +6869,25 @@ namespace getfem {
     mutable std::vector<size_type> dofs1, dofs2, dofs1_sort;
     virtual int exec() {
       GA_DEBUG_INFO("Instruction: matrix term assembly for standard "
-                   "vector fems optimized for format 10 qdim 2");
+                    "vector fems optimized for format 10 qdim 2");
       size_type s1 = t.sizes()[0], s2 = t.sizes()[1], s1_q = 2*s1;
       size_type ss1 = s1/2, ss2 = s2/2;
       scalar_type e = coeff*alpha1*alpha2;
       if (ipt == 0) {
         elem.resize(ss1*ss2);
-       auto itel = elem.begin();
-       for (size_type j = 0; j < ss2; ++j) {
-         auto it = t.begin() + j*s1_q;
-         for (size_type i = 0; i < ss1; ++i, it += 2)
-           *itel++ = (*it) * e;
-       }
+        auto itel = elem.begin();
+        for (size_type j = 0; j < ss2; ++j) {
+          auto it = t.begin() + j*s1_q;
+          for (size_type i = 0; i < ss1; ++i, it += 2)
+            *itel++ = (*it) * e;
+        }
       } else {
-       auto itel = elem.begin();
-       for (size_type j = 0; j < ss2; ++j) {
-         auto it = t.begin() + j*s1_q;
-         for (size_type i = 0; i < ss1; ++i, it += 2)
-           *itel++ += (*it) * e;
-       }
+        auto itel = elem.begin();
+        for (size_type j = 0; j < ss2; ++j) {
+          auto it = t.begin() + j*s1_q;
+          for (size_type i = 0; i < ss1; ++i, it += 2)
+            *itel++ += (*it) * e;
+        }
       }
       if (ipt == nbpt-1) {
         GA_DEBUG_ASSERT(I1.size() && I2.size(), "Internal error");
@@ -6896,35 +6896,35 @@ namespace getfem {
         if (ninf == scalar_type(0)) return 0;
         size_type N = ctx1.N();
         size_type cv1 = ctx1.convex_num(), cv2 = ctx2.convex_num();
-       size_type i1 = I1.first(), i2 = I2.first();
+        size_type i1 = I1.first(), i2 = I2.first();
         if (cv1 == size_type(-1)) return 0;
         auto &ct1 = pmf1->ind_scalar_basic_dof_of_element(cv1);
-       dofs1.resize(ss1);
-       for (size_type i = 0; i < ss1; ++i) dofs1[i] = i1 + ct1[i];
-       
-       if (pmf2 == pmf1 && cv1 == cv2) {
-         if (i1 == i2) {
-           add_elem_matrix_(K, dofs1, dofs1, dofs1_sort, elem, ninf, N);
-           for (size_type i = 0; i < ss1; ++i) (dofs1[i])++;
-           add_elem_matrix_(K, dofs1, dofs1, dofs1_sort, elem, ninf, N);
-         } else {
-           dofs2.resize(ss2);
-           for (size_type i = 0; i < ss2; ++i) dofs2[i] = i2 + ct1[i];
-           add_elem_matrix_(K, dofs1, dofs2, dofs1_sort, elem, ninf, N);
-           for (size_type i = 0; i < ss1; ++i) (dofs1[i])++;
-           for (size_type i = 0; i < ss2; ++i) (dofs2[i])++;
-           add_elem_matrix_(K, dofs1, dofs2, dofs1_sort, elem, ninf, N);
-         }
-       } else {
-         if (cv2 == size_type(-1)) return 0;
-         auto &ct2 = pmf2->ind_scalar_basic_dof_of_element(cv2);
-         dofs2.resize(ss2);
-         for (size_type i = 0; i < ss2; ++i) dofs2[i] = i2 + ct2[i];
-         add_elem_matrix_(K, dofs1, dofs2, dofs1_sort, elem, ninf, N);
-         for (size_type i = 0; i < ss1; ++i) (dofs1[i])++;
-         for (size_type i = 0; i < ss2; ++i) (dofs2[i])++;
-         add_elem_matrix_(K, dofs1, dofs2, dofs1_sort, elem, ninf, N);
-       }
+        dofs1.resize(ss1);
+        for (size_type i = 0; i < ss1; ++i) dofs1[i] = i1 + ct1[i];
+
+        if (pmf2 == pmf1 && cv1 == cv2) {
+          if (i1 == i2) {
+            add_elem_matrix_(K, dofs1, dofs1, dofs1_sort, elem, ninf, N);
+            for (size_type i = 0; i < ss1; ++i) (dofs1[i])++;
+            add_elem_matrix_(K, dofs1, dofs1, dofs1_sort, elem, ninf, N);
+          } else {
+            dofs2.resize(ss2);
+            for (size_type i = 0; i < ss2; ++i) dofs2[i] = i2 + ct1[i];
+            add_elem_matrix_(K, dofs1, dofs2, dofs1_sort, elem, ninf, N);
+            for (size_type i = 0; i < ss1; ++i) (dofs1[i])++;
+            for (size_type i = 0; i < ss2; ++i) (dofs2[i])++;
+            add_elem_matrix_(K, dofs1, dofs2, dofs1_sort, elem, ninf, N);
+          }
+        } else {
+          if (cv2 == size_type(-1)) return 0;
+          auto &ct2 = pmf2->ind_scalar_basic_dof_of_element(cv2);
+          dofs2.resize(ss2);
+          for (size_type i = 0; i < ss2; ++i) dofs2[i] = i2 + ct2[i];
+          add_elem_matrix_(K, dofs1, dofs2, dofs1_sort, elem, ninf, N);
+          for (size_type i = 0; i < ss1; ++i) (dofs1[i])++;
+          for (size_type i = 0; i < ss2; ++i) (dofs2[i])++;
+          add_elem_matrix_(K, dofs1, dofs2, dofs1_sort, elem, ninf, N);
+        }
       }
       return 0;
     }
@@ -6956,25 +6956,25 @@ namespace getfem {
     mutable std::vector<size_type> dofs1, dofs2, dofs1_sort;
     virtual int exec() {
       GA_DEBUG_INFO("Instruction: matrix term assembly for standard "
-                   "vector fems optimized for format 10 qdim 3");
+                    "vector fems optimized for format 10 qdim 3");
       size_type s1 = t.sizes()[0], s2 = t.sizes()[1], s1_q = 3*s1;
       size_type ss1 = s1/3, ss2 = s2/3;
       scalar_type e = coeff*alpha1*alpha2;
       if (ipt == 0) {
         elem.resize(ss1*ss2);
-       auto itel = elem.begin();
-       for (size_type j = 0; j < ss2; ++j) {
-         auto it = t.begin() + j*s1_q;
-         for (size_type i = 0; i < ss1; ++i, it += 3)
-           *itel++ = (*it) * e;
-       }
+        auto itel = elem.begin();
+        for (size_type j = 0; j < ss2; ++j) {
+          auto it = t.begin() + j*s1_q;
+          for (size_type i = 0; i < ss1; ++i, it += 3)
+            *itel++ = (*it) * e;
+        }
       } else {
-       auto itel = elem.begin();
-       for (size_type j = 0; j < ss2; ++j) {
-         auto it = t.begin() + j*s1_q;
-         for (size_type i = 0; i < ss1; ++i, it += 3)
-           *itel++ += (*it) * e;
-       }
+        auto itel = elem.begin();
+        for (size_type j = 0; j < ss2; ++j) {
+          auto it = t.begin() + j*s1_q;
+          for (size_type i = 0; i < ss1; ++i, it += 3)
+            *itel++ += (*it) * e;
+        }
       }
       if (ipt == nbpt-1) {
         GA_DEBUG_ASSERT(I1.size() && I2.size(), "Internal error");
@@ -6983,43 +6983,43 @@ namespace getfem {
         if (ninf == scalar_type(0)) return 0;
         size_type N = ctx1.N();
         size_type cv1 = ctx1.convex_num(), cv2 = ctx2.convex_num();
-       size_type i1 = I1.first(), i2 = I2.first();
+        size_type i1 = I1.first(), i2 = I2.first();
         if (cv1 == size_type(-1)) return 0;
         auto &ct1 = pmf1->ind_scalar_basic_dof_of_element(cv1);
-       dofs1.resize(ss1);
-       for (size_type i = 0; i < ss1; ++i) dofs1[i] = i1 + ct1[i];
-       
-       if (pmf2 == pmf1 && cv1 == cv2) {
-         if (i1 == i2) {
-           add_elem_matrix_(K, dofs1, dofs1, dofs1_sort, elem, ninf, N);
-           for (size_type i = 0; i < ss1; ++i) (dofs1[i])++;
-           add_elem_matrix_(K, dofs1, dofs1, dofs1_sort, elem, ninf, N);
-           for (size_type i = 0; i < ss1; ++i) (dofs1[i])++;
-           add_elem_matrix_(K, dofs1, dofs1, dofs1_sort, elem, ninf, N);
-         } else {
-           dofs2.resize(ss2);
-           for (size_type i = 0; i < ss2; ++i) dofs2[i] = i2 + ct1[i];
-           add_elem_matrix_(K, dofs1, dofs2, dofs1_sort, elem, ninf, N);
-           for (size_type i = 0; i < ss1; ++i) (dofs1[i])++;
-           for (size_type i = 0; i < ss2; ++i) (dofs2[i])++;
-           add_elem_matrix_(K, dofs1, dofs2, dofs1_sort, elem, ninf, N);
-           for (size_type i = 0; i < ss1; ++i) (dofs1[i])++;
-           for (size_type i = 0; i < ss2; ++i) (dofs2[i])++;
-           add_elem_matrix_(K, dofs1, dofs2, dofs1_sort, elem, ninf, N);
-         }
-       } else {
-         if (cv2 == size_type(-1)) return 0;
-         auto &ct2 = pmf2->ind_scalar_basic_dof_of_element(cv2);
-         dofs2.resize(ss2);
-         for (size_type i = 0; i < ss2; ++i) dofs2[i] = i2 + ct2[i];
-         add_elem_matrix_(K, dofs1, dofs2, dofs1_sort, elem, ninf, N);
-         for (size_type i = 0; i < ss1; ++i) (dofs1[i])++;
-         for (size_type i = 0; i < ss2; ++i) (dofs2[i])++;
-         add_elem_matrix_(K, dofs1, dofs2, dofs1_sort, elem, ninf, N);
-         for (size_type i = 0; i < ss1; ++i) (dofs1[i])++;
-         for (size_type i = 0; i < ss2; ++i) (dofs2[i])++;
-         add_elem_matrix_(K, dofs1, dofs2, dofs1_sort, elem, ninf, N);
-       }
+        dofs1.resize(ss1);
+        for (size_type i = 0; i < ss1; ++i) dofs1[i] = i1 + ct1[i];
+
+        if (pmf2 == pmf1 && cv1 == cv2) {
+          if (i1 == i2) {
+            add_elem_matrix_(K, dofs1, dofs1, dofs1_sort, elem, ninf, N);
+            for (size_type i = 0; i < ss1; ++i) (dofs1[i])++;
+            add_elem_matrix_(K, dofs1, dofs1, dofs1_sort, elem, ninf, N);
+            for (size_type i = 0; i < ss1; ++i) (dofs1[i])++;
+            add_elem_matrix_(K, dofs1, dofs1, dofs1_sort, elem, ninf, N);
+          } else {
+            dofs2.resize(ss2);
+            for (size_type i = 0; i < ss2; ++i) dofs2[i] = i2 + ct1[i];
+            add_elem_matrix_(K, dofs1, dofs2, dofs1_sort, elem, ninf, N);
+            for (size_type i = 0; i < ss1; ++i) (dofs1[i])++;
+            for (size_type i = 0; i < ss2; ++i) (dofs2[i])++;
+            add_elem_matrix_(K, dofs1, dofs2, dofs1_sort, elem, ninf, N);
+            for (size_type i = 0; i < ss1; ++i) (dofs1[i])++;
+            for (size_type i = 0; i < ss2; ++i) (dofs2[i])++;
+            add_elem_matrix_(K, dofs1, dofs2, dofs1_sort, elem, ninf, N);
+          }
+        } else {
+          if (cv2 == size_type(-1)) return 0;
+          auto &ct2 = pmf2->ind_scalar_basic_dof_of_element(cv2);
+          dofs2.resize(ss2);
+          for (size_type i = 0; i < ss2; ++i) dofs2[i] = i2 + ct2[i];
+          add_elem_matrix_(K, dofs1, dofs2, dofs1_sort, elem, ninf, N);
+          for (size_type i = 0; i < ss1; ++i) (dofs1[i])++;
+          for (size_type i = 0; i < ss2; ++i) (dofs2[i])++;
+          add_elem_matrix_(K, dofs1, dofs2, dofs1_sort, elem, ninf, N);
+          for (size_type i = 0; i < ss1; ++i) (dofs1[i])++;
+          for (size_type i = 0; i < ss2; ++i) (dofs2[i])++;
+          add_elem_matrix_(K, dofs1, dofs2, dofs1_sort, elem, ninf, N);
+        }
       }
       return 0;
     }
@@ -7498,7 +7498,7 @@ namespace getfem {
                               const std::string &expr,
                               size_type add_derivative_order,
                               bool function_expr, size_type for_interpolation,
-                             const std::string varname_interpolation) {
+                              const std::string varname_interpolation) {
     if (tree.root) {
 
       // Eliminate the term if it corresponds to disabled variables
@@ -7514,17 +7514,17 @@ namespace getfem {
       //      ga_print_node(tree.root, cout); cout << endl;
       bool remain = true;
       size_type order = 0, ind_tree = 0;
-      
+
       if (for_interpolation)
-       order = size_type(-1) - add_derivative_order;
+        order = size_type(-1) - add_derivative_order;
       else {
-       switch(tree.root->test_function_type) {
-       case 0: order = 0; break;
-       case 1: order = 1; break;
-       case 3: order = 2; break;
-       default: GMM_ASSERT1(false, "Inconsistent term "
-                            << tree.root->test_function_type);
-       }
+        switch(tree.root->test_function_type) {
+        case 0: order = 0; break;
+        case 1: order = 1; break;
+        case 3: order = 2; break;
+        default: GMM_ASSERT1(false, "Inconsistent term "
+                             << tree.root->test_function_type);
+        }
       }
 
       bool found = false;
@@ -7538,7 +7538,7 @@ namespace getfem {
             trees[i].interpolate_name_test2.compare
             (tree.root->interpolate_name_test2) == 0 &&
             trees[i].rg == &rg && trees[i].interpolation == for_interpolation 
&&
-           trees[i].varname_interpolation.compare(varname_interpolation)==0) {
+            trees[i].varname_interpolation.compare(varname_interpolation)==0) {
           ga_tree &ftree = *(trees[i].ptree);
 
           ftree.insert_node(ftree.root, GA_NODE_OP);
@@ -7565,8 +7565,8 @@ namespace getfem {
         trees.back().interpolate_name_test1 = root->interpolate_name_test1;
         trees.back().interpolate_name_test2 = root->interpolate_name_test2;
         trees.back().order = order;
-       trees.back().interpolation = for_interpolation;
-       trees.back().varname_interpolation = varname_interpolation;
+        trees.back().interpolation = for_interpolation;
+        trees.back().varname_interpolation = varname_interpolation;
        }
 
       if (for_interpolation == 0 && order < add_derivative_order) {
@@ -7715,7 +7715,7 @@ namespace getfem {
       GMM_ASSERT1(tree.root->nb_test_functions() == 0,
                   "Invalid expression containing test functions");
       add_tree(tree, m, mim, rg, expr, order+1, false, (before ? 1 : 2),
-              varname);
+               varname);
     }
   }
 
@@ -7861,7 +7861,7 @@ namespace getfem {
 
     if (order == 2) {
       if (K.use_count()) {
-       gmm::clear(*K);
+        gmm::clear(*K);
         gmm::resize(*K, max_dof, max_dof);
       }
       gmm::clear(unreduced_K);
@@ -7950,7 +7950,7 @@ namespace getfem {
                       model_real_row_sparse_matrix M(I1.size(), I2.size());
                       gmm::mult(gmm::sub_matrix(unreduced_K, uI1, uI2),
                                 mf2->extension_matrix(), M);
-                      gmm::add(M, gmm::sub_matrix(*K, I1, I2));                
      
+                      gmm::add(M, gmm::sub_matrix(*K, I1, I2));
                     }
                     vars_mat_done.insert(p);
                   }
@@ -8043,7 +8043,7 @@ namespace getfem {
 
     case GA_NODE_INTERPOLATE_FILTER:
       c += 1.73*ga_hash_code(pnode->interpolate_name)
-       + 2.486*double(pnode->nbc1 + 1);
+        + 2.486*double(pnode->nbc1 + 1);
       break;
     case GA_NODE_INTERPOLATE_DERIVATIVE:
       c += 2.321*ga_hash_code(pnode->interpolate_name_der);
@@ -8336,13 +8336,13 @@ namespace getfem {
         int ndt = ((pnode->node_type == GA_NODE_INTERPOLATE) ? 1 : 0)
           + ((pnode->node_type == GA_NODE_ELEMENTARY) ? 2 : 0)
           + ((pnode->node_type == GA_NODE_XFEM_PLUS) ? 3 : 0)
-         + ((pnode->node_type == GA_NODE_XFEM_MINUS) ? 4 : 0);
+          + ((pnode->node_type == GA_NODE_XFEM_MINUS) ? 4 : 0);
         std::string op__name =
           (pnode->node_type == GA_NODE_INTERPOLATE) ? "Interpolation" : ""
           + (pnode->node_type == GA_NODE_ELEMENTARY) ?
-            "Elementary transformation" : ""
+             "Elementary transformation" : ""
           + (pnode->node_type == GA_NODE_XFEM_PLUS) ? "Xfem_plus" : ""
-         + (pnode->node_type == GA_NODE_XFEM_MINUS) ? "Xfem_minus" : "";
+          + (pnode->node_type == GA_NODE_XFEM_MINUS) ? "Xfem_minus" : "";
 
         std::string name = pnode->name;
         size_type prefix_id = ga_parse_prefix_operator(name);
@@ -8818,10 +8818,10 @@ namespace getfem {
             for (size_type j = 0; j < mi.back(); ++j)
               if (pnode->op_type == GA_SYM)
                 pnode->tensor()(j, i) = 0.5*(child0->tensor()(j,i)
-                                            + child0->tensor()(i,j));
+                                             + child0->tensor()(i,j));
               else
                 pnode->tensor()(j, i) = 0.5*(child0->tensor()(j,i)
-                                            - child0->tensor()(i,j));
+                                             - child0->tensor()(i,j));
           tree.clear_children(pnode);
         } else if (child0->node_type == GA_NODE_ZERO) {
           pnode->node_type = GA_NODE_ZERO;
@@ -8902,7 +8902,7 @@ namespace getfem {
             if (dim0 == 2) {
               scalar_type tr(0);
               gmm::copy(child0->tensor().as_vector(),
-                       pnode->tensor().as_vector());
+                        pnode->tensor().as_vector());
               for (size_type i = 0; i < N; ++i)
                 tr += child0->tensor()(i,i);
               for (size_type i = 0; i < N; ++i)
@@ -9037,11 +9037,11 @@ namespace getfem {
           } else if (child0->tensor().size() == 1) {
             pnode->t = child1->t;
             gmm::scale(pnode->tensor().as_vector(),
-                      scalar_type(child0->tensor()[0]));
+                       scalar_type(child0->tensor()[0]));
           } else if (child1->tensor().size() == 1) {
             pnode->t = child0->t;
             gmm::scale(pnode->tensor().as_vector(),
-                      scalar_type(child1->tensor()[0]));
+                       scalar_type(child1->tensor()[0]));
           } else {
             if (dim0+dim1 > 6)
               ga_throw_error(expr, pnode->pos, "Unauthorized "
@@ -9128,7 +9128,7 @@ namespace getfem {
               for (size_type j = 0; j < n; ++j)
                 for (size_type k = 0; k < p; ++k)
                   pnode->tensor()(i,k) += child0->tensor()(i,j)
-                   * child1->tensor()(j,k);
+                                          * child1->tensor()(j,k);
           }
           else if (dim0 == 4 && dim1 == 2) {
             size_type m=child0->tensor().size(0), n=child0->tensor().size(1);
@@ -9138,7 +9138,7 @@ namespace getfem {
                              "Incompatible sizes in tensor-matrix "
                              "multiplication (" << o << "," << p << " != "
                              << child1->tensor().size(0) << ","
-                            << child1->tensor().size(1) << ").");
+                             << child1->tensor().size(1) << ").");
             pnode->init_matrix_tensor(m,n);
             gmm::clear(pnode->tensor().as_vector());
             for (size_type i = 0; i < m; ++i)
@@ -9146,7 +9146,7 @@ namespace getfem {
                 for (size_type k = 0; k < o; ++k)
                   for (size_type l = 0; l < p; ++l)
                     pnode->tensor()(i,j) += child0->tensor()(i,j,k,l)
-                     * child1->tensor()(k,l);
+                                            * child1->tensor()(k,l);
           } else ga_throw_error(expr, pnode->pos,
                                  "Unauthorized multiplication.");
           tree.clear_children(pnode);
@@ -9211,12 +9211,12 @@ namespace getfem {
             tree.clear_children(pnode);
           } else if (child0->node_type == GA_NODE_CONSTANT &&
                      child0->tensor().size() == 1 &&
-                    child0->tensor()[0] == scalar_type(1)) {
+                     child0->tensor()[0] == scalar_type(1)) {
             tree.replace_node_by_child(pnode, 1);
             pnode = child1;
           } else if (child1->node_type == GA_NODE_CONSTANT &&
                      child1->tensor().size() == 1 &&
-                    child1->tensor()[0] == scalar_type(1)) {
+                     child1->tensor()[0] == scalar_type(1)) {
             tree.replace_node_by_child(pnode, 0);
             pnode = child0;
           }
@@ -9257,7 +9257,7 @@ namespace getfem {
           tree.clear_children(pnode);
         } else if (child1->node_type == GA_NODE_CONSTANT &&
                    child1->tensor().size() == 1 &&
-                  child1->tensor()[0] == scalar_type(1)) {
+                   child1->tensor()[0] == scalar_type(1)) {
           tree.replace_node_by_child(pnode, 0);
           pnode = child0;
         }
@@ -9336,7 +9336,7 @@ namespace getfem {
                   for (size_type k = 0; k < nbc2; ++k)
                     for (size_type l = 0; l < nbc1; ++l)
                       pnode->tensor()(i,j,k,l)
-                       = pnode->children[n++]->tensor()[0];
+                        = pnode->children[n++]->tensor()[0];
           }
         }
         if (all_cte) tree.clear_children(pnode);
@@ -9537,7 +9537,7 @@ namespace getfem {
                 for (size_type i = 0; i < n; ++i)
                   for (size_type j = 0; j < n; ++j)
                     pnode->tensor()(i,j)
-                     = ((i == j) ? scalar_type(1) : scalar_type(0));
+                      = ((i == j) ? scalar_type(1) : scalar_type(0));
               } else {
                 pnode->t.adjust_sizes(workspace.qdims(name));
                 gmm::copy(workspace.value(name), pnode->tensor().as_vector());
@@ -9627,7 +9627,7 @@ namespace getfem {
           ga_throw_error(expr, child1->pos, "X stands for the coordinates on "
                          "the real elements. It accepts only one index.");
         if (!(child1->node_type == GA_NODE_CONSTANT) ||
-           child1->tensor().size() != 1)
+            child1->tensor().size() != 1)
           ga_throw_error(expr, child1->pos, "Index for X has to be constant "
                          "and of size 1.");
         child0->nbc1 = size_type(round(child1->tensor()[0]));
@@ -9737,15 +9737,15 @@ namespace getfem {
             if (s1 == s2) {
               for (size_type i = 0; i < s1; ++i)
                 pnode->tensor()[i] = F(child1->tensor()[i],
-                                      child2->tensor()[i]);
+                                       child2->tensor()[i]);
             } else if (s1 == 1) {
               for (size_type i = 0; i < s2; ++i)
                 pnode->tensor()[i] = F(child1->tensor()[0],
-                                      child2->tensor()[i]);
+                                       child2->tensor()[i]);
             } else {
               for (size_type i = 0; i < s1; ++i)
                 pnode->tensor()[i] = F(child1->tensor()[i],
-                                      child2->tensor()[0]);
+                                       child2->tensor()[0]);
             }
           }
           tree.clear_children(pnode);
@@ -9862,7 +9862,7 @@ namespace getfem {
           if (all_cte) {
             pnode->node_type = GA_NODE_CONSTANT;
             OP.second_derivative(args, child0->der1, child0->der2,
-                                pnode->tensor());
+                                 pnode->tensor());
             tree.clear_children(pnode);
           }
         } else {
@@ -9879,7 +9879,7 @@ namespace getfem {
         all_cte = (child0->node_type == GA_NODE_CONSTANT);
         // cout << "child0->tensor_order() = " << child0->tensor_order();
         // cout << endl << "child0->t.sizes() = "
-       //      << child0->t.sizes() << endl;
+        //      << child0->t.sizes() << endl;
         if (pnode->children.size() != child0->tensor_order() + 1)
           ga_throw_error(expr, pnode->pos, "Bad number of indices.");
         for (size_type i = 1; i < pnode->children.size(); ++i)
@@ -10864,7 +10864,7 @@ namespace getfem {
         if (mark1) {
           if (pnode->children[0]->node_type == GA_NODE_CONSTANT)
             gmm::scale(pnode->children[0]->tensor().as_vector(),
-                      scalar_type(-1));
+                       scalar_type(-1));
           else {
             if (mark0) {
               tree.duplicate_with_subtraction(pnode);
@@ -10947,7 +10947,7 @@ namespace getfem {
               // Inline extension if the derivative is affine (for instance
               // for sqr)
               ga_predef_function_tab::const_iterator
-               itp = PREDEF_FUNCTIONS.find(child0->name);
+                itp = PREDEF_FUNCTIONS.find(child0->name);
               const ga_predef_function &Fp = itp->second;
               if (Fp.is_affine("t")) {
                 scalar_type b = Fp(scalar_type(0));
@@ -10956,7 +10956,7 @@ namespace getfem {
                 pnode->op_type = GA_MULT;
                 child0->init_scalar_tensor(a);
                 child0->node_type = ((a == scalar_type(0)) ?
-                                    GA_NODE_ZERO : GA_NODE_CONSTANT);
+                                     GA_NODE_ZERO : GA_NODE_CONSTANT);
                 if (b != scalar_type(0)) {
                   tree.insert_node(pnode, GA_NODE_OP);
                   pnode->parent->op_type = (b > 0) ? GA_PLUS : GA_MINUS;
@@ -10965,7 +10965,7 @@ namespace getfem {
                   pnode_cte->node_type = GA_NODE_CONSTANT;
                   pnode_cte->t = pnode->t;
                   std::fill(pnode_cte->tensor().begin(),
-                           pnode_cte->tensor().end(), gmm::abs(b));
+                            pnode_cte->tensor().end(), gmm::abs(b));
                   pnode = pnode->parent;
                 }
               }
@@ -11395,7 +11395,7 @@ namespace getfem {
           is_uniform = true;
           pctx1->invalid_convex_num();
           pctx2->invalid_convex_num();
-        } 
+        }
       } else if (mf1 || mfg1) {
         pgai = std::make_shared<ga_instruction_first_ind_tensor>
           (pnode->tensor(), *pctx1, pnode->qdim1, mf1, mfg1);
@@ -11419,22 +11419,22 @@ namespace getfem {
         // ga_print_node(pnode, cout);
         // cout << " and "; ga_print_node(*it, cout); cout << endl;
         if (sub_tree_are_equal(pnode, *it, workspace, 1)) {
-         pnode->t.set_to_copy((*it)->t);
+          pnode->t.set_to_copy((*it)->t);
           return;
         }
         if (sub_tree_are_equal(pnode, *it, workspace, 2)) {
           // cout << "confirmed with transpose" << endl;
           if (pnode->nb_test_functions() == 2) {
-           if (pgai) { // resize instruction if needed
-             if (is_uniform)
-               { pgai->exec(); }
-             else { rmi.instructions.push_back(std::move(pgai)); }
-           }
+            if (pgai) { // resize instruction if needed
+              if (is_uniform)
+                { pgai->exec(); }
+              else { rmi.instructions.push_back(std::move(pgai)); }
+            }
             pgai = std::make_shared<ga_instruction_transpose_test>
               (pnode->tensor(), (*it)->tensor());
-           rmi.instructions.push_back(std::move(pgai));
+            rmi.instructions.push_back(std::move(pgai));
           } else {
-           pnode->t.set_to_copy((*it)->t);
+            pnode->t.set_to_copy((*it)->t);
           }
           return;
         }
@@ -11450,10 +11450,10 @@ namespace getfem {
     if (pgai) { // resize instruction if needed and no equivalent node detected
       if (is_uniform) { pgai->exec(); }
       else {
-       if (mfg1 || mfg2)
-         rmi.instructions.push_back(std::move(pgai));
-       else
-         rmi.elt_instructions.push_back(std::move(pgai));
+        if (mfg1 || mfg2)
+          rmi.instructions.push_back(std::move(pgai));
+        else
+          rmi.elt_instructions.push_back(std::move(pgai));
       }
     }
 
@@ -11531,7 +11531,7 @@ namespace getfem {
       GMM_ASSERT1(!function_case,
                   "No use of element_K is allowed in functions");
       pgai = std::make_shared<ga_instruction_element_K>(pnode->tensor(),
-                                                       gis.ctx);
+                                                        gis.ctx);
       rmi.instructions.push_back(std::move(pgai));
       break;
 
@@ -11539,7 +11539,7 @@ namespace getfem {
       GMM_ASSERT1(!function_case,
                   "No use of element_B is allowed in functions");
       pgai = std::make_shared<ga_instruction_element_B>(pnode->tensor(),
-                                                       gis.ctx);
+                                                        gis.ctx);
       rmi.instructions.push_back(std::move(pgai));
       break;
 
@@ -11613,7 +11613,7 @@ namespace getfem {
         if (imd) {
           pgai = std::make_shared<ga_instruction_extract_local_im_data>
             (pnode->tensor(), *imd, workspace.value(pnode->name),
-            gis.pai, gis.ctx, workspace.qdim(pnode->name));
+             gis.pai, gis.ctx, workspace.qdim(pnode->name));
           rmi.instructions.push_back(std::move(pgai));
         } else {
           GMM_ASSERT1(mf, "Internal error");
@@ -11628,13 +11628,13 @@ namespace getfem {
             rmi.local_dofs[pnode->name] = base_vector(1);
             extend_variable_in_gis(workspace, pnode->name, gis);
             // cout << "local dof of " << pnode->name << endl;
-           size_type qmult2 = mf->get_qdim();
-           if (qmult2 > 1 && !(mf->is_uniformly_vectorized()))
-             qmult2 = size_type(-1);
+            size_type qmult2 = mf->get_qdim();
+            if (qmult2 > 1 && !(mf->is_uniformly_vectorized()))
+              qmult2 = size_type(-1);
             pgai = std::make_shared<ga_instruction_slice_local_dofs>
               (*mf, *(gis.extended_vars[pnode->name]), gis.ctx,
                rmi.local_dofs[pnode->name],
-              workspace.qdim(pnode->name) / mf->get_qdim(), qmult2);
+               workspace.qdim(pnode->name) / mf->get_qdim(), qmult2);
             rmi.elt_instructions.push_back(std::move(pgai));
           }
 
@@ -11643,9 +11643,9 @@ namespace getfem {
             rmi.pfps[mf] = 0;
             pgai = std::make_shared<ga_instruction_update_pfp>
               (*mf, rmi.pfps[mf], gis.ctx, gis.fp_pool);
-           if (mf->is_uniform())
-             rmi.begin_instructions.push_back(std::move(pgai));
-           else
+            if (mf->is_uniform())
+              rmi.begin_instructions.push_back(std::move(pgai));
+            else
               rmi.instructions.push_back(std::move(pgai));
           }
 
@@ -11734,8 +11734,8 @@ namespace getfem {
           switch (pnode->node_type) {
           case GA_NODE_VAL: // --> t(target_dim*Qmult)
             pgai = std::make_shared<ga_instruction_val>
-             (pnode->tensor(), rmi.base[mf], rmi.local_dofs[pnode->name],
-              workspace.qdim(pnode->name));
+              (pnode->tensor(), rmi.base[mf], rmi.local_dofs[pnode->name],
+               workspace.qdim(pnode->name));
             break;
           case GA_NODE_GRAD: // --> t(target_dim*Qmult,N)
             pgai = std::make_shared<ga_instruction_grad>
@@ -11797,7 +11797,7 @@ namespace getfem {
               ga_instruction_set::elementary_trans_info &eti
                 = rmi.elementary_trans_infos[pnode->elementary_name];
               pgai =
-               std::make_shared<ga_instruction_elementary_transformation_val>
+                std::make_shared<ga_instruction_elementary_transformation_val>
                 (pnode->tensor(), rmi.base[mf],
                  rmi.local_dofs[pnode->name], workspace.qdim(pnode->name),
                  workspace.elementary_transformation(pnode->elementary_name),
@@ -11809,7 +11809,7 @@ namespace getfem {
               ga_instruction_set::elementary_trans_info &eti
                 = rmi.elementary_trans_infos[pnode->elementary_name];
               pgai =
-               std::make_shared<ga_instruction_elementary_transformation_grad>
+                std::make_shared<ga_instruction_elementary_transformation_grad>
                 (pnode->tensor(), rmi.grad[mf],
                  rmi.local_dofs[pnode->name], workspace.qdim(pnode->name),
                  workspace.elementary_transformation(pnode->elementary_name),
@@ -11821,7 +11821,7 @@ namespace getfem {
               ga_instruction_set::elementary_trans_info &eti
                 = rmi.elementary_trans_infos[pnode->elementary_name];
               pgai =
-               std::make_shared<ga_instruction_elementary_transformation_hess>
+                std::make_shared<ga_instruction_elementary_transformation_hess>
                 (pnode->tensor(), rmi.hess[mf],
                  rmi.local_dofs[pnode->name], workspace.qdim(pnode->name),
                  workspace.elementary_transformation(pnode->elementary_name),
@@ -11833,7 +11833,7 @@ namespace getfem {
               ga_instruction_set::elementary_trans_info &eti
                 = rmi.elementary_trans_infos[pnode->elementary_name];
               pgai =
-              std::make_shared<ga_instruction_elementary_transformation_diverg>
+               
std::make_shared<ga_instruction_elementary_transformation_diverg>
                 (pnode->tensor(), rmi.grad[mf],
                  rmi.local_dofs[pnode->name], workspace.qdim(pnode->name),
                  workspace.elementary_transformation(pnode->elementary_name),
@@ -11864,27 +11864,27 @@ namespace getfem {
         }
 
         if (pnode->node_type == GA_NODE_INTERPOLATE_VAL) {
-         // --> t(target_dim*Qmult)
+          // --> t(target_dim*Qmult)
           pgai = std::make_shared<ga_instruction_interpolate_val>
             (pnode->tensor(), m2, mfn, mfg, Un, Ug, *pctx,
-            workspace.qdim(pnode->name),
+             workspace.qdim(pnode->name),
              gis.ipt, gis.fp_pool, rmi.interpolate_infos[intn]);
         } else if (pnode->node_type == GA_NODE_INTERPOLATE_GRAD) {
-         // --> t(target_dim*Qmult,N)
+          // --> t(target_dim*Qmult,N)
           pgai = std::make_shared<ga_instruction_interpolate_grad>
             (pnode->tensor(), m2, mfn, mfg, Un, Ug, *pctx,
-            workspace.qdim(pnode->name),
+             workspace.qdim(pnode->name),
              gis.ipt, gis.fp_pool, rmi.interpolate_infos[intn]);
         } else if (pnode->node_type == GA_NODE_INTERPOLATE_HESS) {
-         // --> t(target_dim*Qmult,N,N)
+          // --> t(target_dim*Qmult,N,N)
           pgai = std::make_shared<ga_instruction_interpolate_hess>
             (pnode->tensor(), m2, mfn, mfg, Un, Ug, *pctx,
-            workspace.qdim(pnode->name),
+             workspace.qdim(pnode->name),
              gis.ipt, gis.fp_pool, rmi.interpolate_infos[intn]);
         } else { // --> t(1)
           pgai = std::make_shared<ga_instruction_interpolate_diverg>
             (pnode->tensor(), m2, mfn, mfg, Un, Ug, *pctx,
-            workspace.qdim(pnode->name),
+             workspace.qdim(pnode->name),
              gis.ipt, gis.fp_pool, rmi.interpolate_infos[intn]);
         }
         rmi.instructions.push_back(std::move(pgai));
@@ -11995,7 +11995,7 @@ namespace getfem {
           case GA_NODE_XFEM_PLUS_HESS_TEST:
             if (rmi.xfem_plus_hess.count(mf) == 0 ||
                 !(if_hierarchy.is_compatible(rmi.xfem_plus_hess_hierarchy[mf]))
-               ) {
+                ) {
               rmi.xfem_plus_hess_hierarchy[mf].push_back(if_hierarchy);
               pgai = std::make_shared<ga_instruction_xfem_plus_hess_base>
                 (rmi.xfem_plus_hess[mf], gis.ctx, *mf, rmi.pfps[mf]);
@@ -12017,90 +12017,90 @@ namespace getfem {
           // The copy of the real_base_value
           switch(pnode->node_type) {
           case GA_NODE_VAL_TEST:
-           // --> t(Qmult*ndof,Qmult*target_dim)
-           if (mf->get_qdim() > 1 && mf->is_uniformly_vectorized()) {
-             pnode->t.set_sparsity(1, mf->get_qdim());
-             tensor_to_clear = true;
-             pgai = std::make_shared<ga_instruction_copy_vect_val_base>
-               (pnode->tensor(), rmi.base[mf], mf->get_qdim());
-           } else {
-             pgai = std::make_shared<ga_instruction_copy_val_base>
-               (pnode->tensor(), rmi.base[mf], mf->get_qdim());
-           }
+            // --> t(Qmult*ndof,Qmult*target_dim)
+            if (mf->get_qdim() > 1 && mf->is_uniformly_vectorized()) {
+              pnode->t.set_sparsity(1, mf->get_qdim());
+              tensor_to_clear = true;
+              pgai = std::make_shared<ga_instruction_copy_vect_val_base>
+                (pnode->tensor(), rmi.base[mf], mf->get_qdim());
+            } else {
+              pgai = std::make_shared<ga_instruction_copy_val_base>
+                (pnode->tensor(), rmi.base[mf], mf->get_qdim());
+            }
             break;
           case GA_NODE_GRAD_TEST:
-           // --> t(Qmult*ndof,Qmult*target_dim,N)
-           if (mf->get_qdim() > 1 && mf->is_uniformly_vectorized()) {
-             pnode->t.set_sparsity(2, mf->get_qdim());
-             tensor_to_clear = true;
-             pgai = std::make_shared<ga_instruction_copy_vect_grad_base>
-               (pnode->tensor(), rmi.grad[mf], mf->get_qdim());
-           } else {
-             pgai = std::make_shared<ga_instruction_copy_grad_base>
-               (pnode->tensor(), rmi.grad[mf], mf->get_qdim());
-           }
-           break;
+            // --> t(Qmult*ndof,Qmult*target_dim,N)
+            if (mf->get_qdim() > 1 && mf->is_uniformly_vectorized()) {
+              pnode->t.set_sparsity(2, mf->get_qdim());
+              tensor_to_clear = true;
+              pgai = std::make_shared<ga_instruction_copy_vect_grad_base>
+                (pnode->tensor(), rmi.grad[mf], mf->get_qdim());
+            } else {
+              pgai = std::make_shared<ga_instruction_copy_grad_base>
+                (pnode->tensor(), rmi.grad[mf], mf->get_qdim());
+            }
+            break;
           case GA_NODE_HESS_TEST:
-           // --> t(Qmult*ndof,Qmult*target_dim,N,N)
-           pgai = std::make_shared<ga_instruction_copy_hess_base>
-             (pnode->tensor(), rmi.hess[mf], mf->get_qdim());
-           if (mf->get_qdim() > 1 && mf->is_uniformly_vectorized())
-             pnode->t.set_sparsity(3, mf->get_qdim());
+            // --> t(Qmult*ndof,Qmult*target_dim,N,N)
+            pgai = std::make_shared<ga_instruction_copy_hess_base>
+              (pnode->tensor(), rmi.hess[mf], mf->get_qdim());
+            if (mf->get_qdim() > 1 && mf->is_uniformly_vectorized())
+              pnode->t.set_sparsity(3, mf->get_qdim());
             break;
           case GA_NODE_DIVERG_TEST:
-           // --> t(Qmult*ndof)
+            // --> t(Qmult*ndof)
             pgai = std::make_shared<ga_instruction_copy_diverg_base>
               (pnode->tensor(), rmi.grad[mf], mf->get_qdim());
             break;
           case GA_NODE_XFEM_PLUS_VAL_TEST:
-           // -->t(Qmult*ndof,Qmult*target_dim)
+            // -->t(Qmult*ndof,Qmult*target_dim)
             pgai = std::make_shared<ga_instruction_copy_val_base>
               (pnode->tensor(), rmi.xfem_plus_base[mf], mf->get_qdim());
-           if (mf->get_qdim() > 1 && mf->is_uniformly_vectorized())
-             pnode->t.set_sparsity(1, mf->get_qdim());
+            if (mf->get_qdim() > 1 && mf->is_uniformly_vectorized())
+              pnode->t.set_sparsity(1, mf->get_qdim());
             break;
           case GA_NODE_XFEM_PLUS_GRAD_TEST:
-           // --> t(Qmult*ndof,Qmult*target_dim,N)
+            // --> t(Qmult*ndof,Qmult*target_dim,N)
             pgai = std::make_shared<ga_instruction_copy_grad_base>
               (pnode->tensor(), rmi.xfem_plus_grad[mf], mf->get_qdim());
-           if (mf->get_qdim() > 1 && mf->is_uniformly_vectorized())
-             pnode->t.set_sparsity(2, mf->get_qdim());
+            if (mf->get_qdim() > 1 && mf->is_uniformly_vectorized())
+              pnode->t.set_sparsity(2, mf->get_qdim());
             break;
           case GA_NODE_XFEM_PLUS_HESS_TEST:
-           // --> t(Qmult*ndof,Qmult*target_dim,N,N)
+            // --> t(Qmult*ndof,Qmult*target_dim,N,N)
             pgai = std::make_shared<ga_instruction_copy_hess_base>
               (pnode->tensor(), rmi.xfem_plus_hess[mf], mf->get_qdim());
-           if (mf->get_qdim() > 1 && mf->is_uniformly_vectorized())
-             pnode->t.set_sparsity(3, mf->get_qdim());
+            if (mf->get_qdim() > 1 && mf->is_uniformly_vectorized())
+              pnode->t.set_sparsity(3, mf->get_qdim());
             break;
           case GA_NODE_XFEM_PLUS_DIVERG_TEST:
-           // --> t(Qmult*ndof)
+            // --> t(Qmult*ndof)
             pgai = std::make_shared<ga_instruction_copy_diverg_base>
               (pnode->tensor(), rmi.xfem_plus_grad[mf], mf->get_qdim());
             break;
           case GA_NODE_XFEM_MINUS_VAL_TEST:
-           // -->t(Qmult*ndof,Qmult*target_dim)
+            // -->t(Qmult*ndof,Qmult*target_dim)
             pgai = std::make_shared<ga_instruction_copy_val_base>
               (pnode->tensor(), rmi.xfem_minus_base[mf], mf->get_qdim());
-           if (mf->get_qdim() > 1 && mf->is_uniformly_vectorized())
-             pnode->t.set_sparsity(1, mf->get_qdim());
+            if (mf->get_qdim() > 1 && mf->is_uniformly_vectorized())
+              pnode->t.set_sparsity(1, mf->get_qdim());
             break;
           case GA_NODE_XFEM_MINUS_GRAD_TEST:
-           // --> t(Qmult*ndof,Qmult*target_dim,N)
+            // --> t(Qmult*ndof,Qmult*target_dim,N)
             pgai = std::make_shared<ga_instruction_copy_grad_base>
               (pnode->tensor(), rmi.xfem_minus_grad[mf], mf->get_qdim());
-           if (mf->get_qdim() > 1 && mf->is_uniformly_vectorized())
-             pnode->t.set_sparsity(2, mf->get_qdim());
+            if (mf->get_qdim() > 1 && mf->is_uniformly_vectorized())
+              pnode->t.set_sparsity(2, mf->get_qdim());
             break;
           case GA_NODE_XFEM_MINUS_HESS_TEST:
-           // --> t(Qmult*ndof,Qmult*target_dim,N,N)
+            // --> t(Qmult*ndof,Qmult*target_dim,N,N)
             pgai = std::make_shared<ga_instruction_copy_hess_base>
               (pnode->tensor(), rmi.xfem_minus_hess[mf], mf->get_qdim());
-           if (mf->get_qdim() > 1 && mf->is_uniformly_vectorized())
-             pnode->t.set_sparsity(3, mf->get_qdim());
+            if (mf->get_qdim() > 1 && mf->is_uniformly_vectorized())
+              pnode->t.set_sparsity(3, mf->get_qdim());
             break;
           case GA_NODE_XFEM_MINUS_DIVERG_TEST:
-           // --> t(Qmult*ndof)
+            // --> t(Qmult*ndof)
             pgai = std::make_shared<ga_instruction_copy_diverg_base>
               (pnode->tensor(), rmi.xfem_minus_grad[mf], mf->get_qdim());
             break;
@@ -12109,7 +12109,7 @@ namespace getfem {
               ga_instruction_set::elementary_trans_info &eti
                 = rmi.elementary_trans_infos[pnode->elementary_name];
               pgai =
-            std::make_shared<ga_instruction_elementary_transformation_val_base>
+             
std::make_shared<ga_instruction_elementary_transformation_val_base>
                 (pnode->tensor(), rmi.base[mf], mf->get_qdim(),
                  workspace.elementary_transformation(pnode->elementary_name),
                  *mf, gis.ctx, eti.M, &(eti.mf), eti.icv);
@@ -12120,7 +12120,7 @@ namespace getfem {
               ga_instruction_set::elementary_trans_info &eti
                 = rmi.elementary_trans_infos[pnode->elementary_name];
               pgai =
-           std::make_shared<ga_instruction_elementary_transformation_grad_base>
+            
std::make_shared<ga_instruction_elementary_transformation_grad_base>
                 (pnode->tensor(), rmi.grad[mf], mf->get_qdim(),
                  workspace.elementary_transformation(pnode->elementary_name),
                  *mf, gis.ctx, eti.M, &(eti.mf), eti.icv);
@@ -12131,7 +12131,7 @@ namespace getfem {
               ga_instruction_set::elementary_trans_info &eti
                 = rmi.elementary_trans_infos[pnode->elementary_name];
               pgai =
-           std::make_shared<ga_instruction_elementary_transformation_hess_base>
+            
std::make_shared<ga_instruction_elementary_transformation_hess_base>
                 (pnode->tensor(), rmi.hess[mf], mf->get_qdim(),
                  workspace.elementary_transformation(pnode->elementary_name),
                  *mf, gis.ctx, eti.M, &(eti.mf), eti.icv);
@@ -12142,7 +12142,7 @@ namespace getfem {
               ga_instruction_set::elementary_trans_info &eti
                 = rmi.elementary_trans_infos[pnode->elementary_name];
               pgai =
-         std::make_shared<ga_instruction_elementary_transformation_diverg_base>
+          
std::make_shared<ga_instruction_elementary_transformation_diverg_base>
                 (pnode->tensor(), rmi.grad[mf], mf->get_qdim(),
                  workspace.elementary_transformation(pnode->elementary_name),
                  *mf, gis.ctx, eti.M, &(eti.mf), eti.icv);
@@ -12174,25 +12174,25 @@ namespace getfem {
           // --> t(Qmult*ndof,Qmult*target_dim)
           pgai = std::make_shared<ga_instruction_interpolate_val_base>
             (pnode->tensor(), m2, mfn, mfg, gis.ipt,
-            workspace.qdim(pnode->name), rmi.interpolate_infos[intn],
-            gis.fp_pool);
+             workspace.qdim(pnode->name), rmi.interpolate_infos[intn],
+             gis.fp_pool);
         } else if (pnode->node_type == GA_NODE_INTERPOLATE_GRAD_TEST) {
            // --> t(Qmult*ndof,Qmult*target_dim,N)
           pgai = std::make_shared<ga_instruction_interpolate_grad_base>
             (pnode->tensor(), m2, mfn, mfg, gis.ipt,
-            workspace.qdim(pnode->name),
+             workspace.qdim(pnode->name),
              rmi.interpolate_infos[intn], gis.fp_pool);
         } else if (pnode->node_type == GA_NODE_INTERPOLATE_HESS_TEST) {
            // --> t(Qmult*ndof,Qmult*target_dim,N,N)
           pgai = std::make_shared<ga_instruction_interpolate_hess_base>
             (pnode->tensor(), m2, mfn, mfg, gis.ipt,
-            workspace.qdim(pnode->name),
+             workspace.qdim(pnode->name),
              rmi.interpolate_infos[intn], gis.fp_pool);
         } else { // if (pnode->node_type == GA_NODE_INTERPOLATE_DIVERG_TEST) {
            // --> t(Qmult*ndof)
           pgai = std::make_shared<ga_instruction_interpolate_diverg_base>
             (pnode->tensor(), m2, mfn, mfg, gis.ipt,
-            workspace.qdim(pnode->name),
+             workspace.qdim(pnode->name),
              rmi.interpolate_infos[intn], gis.fp_pool);
         }
         rmi.instructions.push_back(std::move(pgai));
@@ -12214,9 +12214,9 @@ namespace getfem {
            pgai = std::make_shared<ga_instruction_add>
              (pnode->tensor(), child0->tensor(), child1->tensor());
          }
-        if (child0->t.sparsity() == child1->t.sparsity()
-            && child0->t.qdim() == child1->t.qdim())
-          pnode->t.set_sparsity(child0->t.sparsity(), child0->t.qdim());
+         if (child0->t.sparsity() == child1->t.sparsity()
+             && child0->t.qdim() == child1->t.qdim())
+           pnode->t.set_sparsity(child0->t.sparsity(), child0->t.qdim());
          rmi.instructions.push_back(std::move(pgai));
          break;
 
@@ -12232,9 +12232,9 @@ namespace getfem {
            pgai = std::make_shared<ga_instruction_sub>
              (pnode->tensor(), child0->tensor(), child1->tensor());
          }
-        if (child0->t.sparsity() == child1->t.sparsity()
-            && child0->t.qdim() == child1->t.qdim())
-          pnode->t.set_sparsity(child0->t.sparsity(), child0->t.qdim());
+         if (child0->t.sparsity() == child1->t.sparsity()
+             && child0->t.qdim() == child1->t.qdim())
+           pnode->t.set_sparsity(child0->t.sparsity(), child0->t.qdim());
          rmi.instructions.push_back(std::move(pgai));
          break;
 
@@ -12247,7 +12247,7 @@ namespace getfem {
            pgai = std::make_shared<ga_instruction_scalar_mult>
              (pnode->tensor(), child0->tensor(), minus);
          }
-        pnode->t.set_sparsity(child0->t.sparsity(), child0->t.qdim());
+         pnode->t.set_sparsity(child0->t.sparsity(), child0->t.qdim());
          rmi.instructions.push_back(std::move(pgai));
          break;
 
@@ -12270,15 +12270,15 @@ namespace getfem {
                  (pnode->tensor()[0], child0->tensor()[0], 
child1->tensor()[0]);
              }
              else if (child0->tensor().size() == 1) {
-              pnode->t.set_sparsity(child1->t.sparsity(), child1->t.qdim());
+               pnode->t.set_sparsity(child1->t.sparsity(), child1->t.qdim());
                pgai = std::make_shared<ga_instruction_scalar_mult>
                  (pnode->tensor(), child1->tensor(), child0->tensor()[0]);
-            }
+             }
              else if (child1->tensor().size() == 1) {
-              pnode->t.set_sparsity(child0->t.sparsity(), child0->t.qdim());
+               pnode->t.set_sparsity(child0->t.sparsity(), child0->t.qdim());
                pgai = std::make_shared<ga_instruction_scalar_mult>
                  (pnode->tensor(), child0->tensor(), child1->tensor()[0]);
-            }
+             }
              else if (pnode->test_function_type < 3) {
                if (child0->tensor_proper_size() == 1) {
                  if (is_uniform) // Unrolled instruction
@@ -12334,13 +12334,13 @@ namespace getfem {
                      pgai = std::make_shared<ga_instruction_simple_tmult>
                        (pnode->tensor(), child0->tensor(), child1->tensor());
                  } else {
-                  if (is_uniform) // Unrolled instruction
-                    pgai = ga_uniform_instruction_reduction_switch
-                      (pnode->t, child1->t, child0->t, s2, tensor_to_clear);
-                  else // Unrolled instruction
-                    pgai = ga_instruction_reduction_switch
-                      (pnode->t, child1->t, child0->t, s2, tensor_to_clear);
-                }
+                   if (is_uniform) // Unrolled instruction
+                     pgai = ga_uniform_instruction_reduction_switch
+                       (pnode->t, child1->t, child0->t, s2, tensor_to_clear);
+                   else // Unrolled instruction
+                     pgai = ga_instruction_reduction_switch
+                       (pnode->t, child1->t, child0->t, s2, tensor_to_clear);
+                 }
                } else {
                  if (child0->tensor_proper_size() == s2)
                    pgai = ga_uniform_instruction_reduction_switch
@@ -12426,7 +12426,7 @@ namespace getfem {
            pgai = std::make_shared<ga_instruction_scalar_scalar_div>
              (pnode->tensor()[0], child0->tensor()[0], child1->tensor()[0]);
          } else if (child1->tensor().size() == 1) {
-          pnode->t.set_sparsity(child0->t.sparsity(), child0->t.qdim());
+           pnode->t.set_sparsity(child0->t.sparsity(), child0->t.qdim());
            pgai = std::make_shared<ga_instruction_scalar_div>
              (pnode->tensor(), child0->tensor(), child1->tensor()[0]);
          } else GMM_ASSERT1(false, "Internal error");
@@ -12434,7 +12434,7 @@ namespace getfem {
          break;
 
        case GA_PRINT:
-        pnode->t.set_to_copy(child0->t);
+         pnode->t.set_to_copy(child0->t);
          pgai = std::make_shared<ga_instruction_print_tensor>
            (pnode->tensor(), child0, gis.ctx, gis.nbpt, gis.ipt);
          rmi.instructions.push_back(std::move(pgai));
@@ -12446,7 +12446,7 @@ namespace getfem {
              (pnode->tensor(), child0->tensor());
            rmi.instructions.push_back(std::move(pgai));
          } else {
-          pnode->t.set_to_copy(child0->t);
+           pnode->t.set_to_copy(child0->t);
          }
          break;
 
@@ -12456,7 +12456,7 @@ namespace getfem {
              (pnode->tensor(), child0->tensor());
            rmi.instructions.push_back(std::move(pgai));
          } else {
-          pnode->t.set_to_copy(child0->t);
+           pnode->t.set_to_copy(child0->t);
          }
          break;
 
@@ -12471,13 +12471,13 @@ namespace getfem {
        case GA_TRACE:
          {
            size_type N = (child0->tensor_proper_size() == 1) ? 1:size0.back();
-          if (N == 1) {
-            pnode->t.set_to_copy(child0->t);
-          } else { 
-            pgai = std::make_shared<ga_instruction_trace>
-              (pnode->tensor(), child0->tensor(), N);
-            rmi.instructions.push_back(std::move(pgai));
-          }
+           if (N == 1) {
+             pnode->t.set_to_copy(child0->t);
+           } else {
+             pgai = std::make_shared<ga_instruction_trace>
+               (pnode->tensor(), child0->tensor(), N);
+             rmi.instructions.push_back(std::move(pgai));
+           }
          }
          break;
 
@@ -12496,15 +12496,15 @@ namespace getfem {
            pgai = std::make_shared<ga_instruction_scalar_scalar_mult>
              (pnode->tensor()[0], child0->tensor()[0], child1->tensor()[0]);
          } else if (child0->tensor().size() == 1) {
-          pnode->t.set_sparsity(child1->t.sparsity(), child1->t.qdim());
+           pnode->t.set_sparsity(child1->t.sparsity(), child1->t.qdim());
            pgai = std::make_shared<ga_instruction_scalar_mult>
              (pnode->tensor(), child1->tensor(), child0->tensor()[0]);
-        }
+         }
          else if (child1->tensor().size() == 1) {
-          pnode->t.set_sparsity(child0->t.sparsity(), child0->t.qdim());
+           pnode->t.set_sparsity(child0->t.sparsity(), child0->t.qdim());
            pgai = std::make_shared<ga_instruction_scalar_mult>
              (pnode->tensor(), child0->tensor(), child1->tensor()[0]);
-        }
+         }
          else if (child1->test_function_type == 0)
            pgai = std::make_shared<ga_instruction_dotmult>
              (pnode->tensor(), child0->tensor(), child1->tensor());
@@ -12527,7 +12527,7 @@ namespace getfem {
            pgai = std::make_shared<ga_instruction_scalar_scalar_div>
              (pnode->tensor()[0], child0->tensor()[0], child1->tensor()[0]);
          } else if (child1->tensor().size() == 1) {
-          pnode->t.set_sparsity(child0->t.sparsity(), child0->t.qdim());
+           pnode->t.set_sparsity(child0->t.sparsity(), child0->t.qdim());
            pgai = std::make_shared<ga_instruction_scalar_div>
              (pnode->tensor(), child0->tensor(), child1->tensor()[0]);
          } else if (child1->test_function_type == 0) {
@@ -12543,15 +12543,15 @@ namespace getfem {
            pgai = std::make_shared<ga_instruction_scalar_scalar_mult>
              (pnode->tensor()[0], child0->tensor()[0], child1->tensor()[0]);
          } else if (child0->tensor().size() == 1) {
-          pnode->t.set_sparsity(child1->t.sparsity(), child1->t.qdim());
+           pnode->t.set_sparsity(child1->t.sparsity(), child1->t.qdim());
            pgai = std::make_shared<ga_instruction_scalar_mult>
              (pnode->tensor(), child1->tensor(), child0->tensor()[0]);
-        }
+         }
          else if (child1->tensor().size() == 1) {
-          pnode->t.set_sparsity(child0->t.sparsity(), child0->t.qdim());
+           pnode->t.set_sparsity(child0->t.sparsity(), child0->t.qdim());
            pgai = std::make_shared<ga_instruction_scalar_mult>
              (pnode->tensor(), child0->tensor(), child1->tensor()[0]);
-        }
+         }
          else if (child1->test_function_type == 0) {
            if (is_uniform) // Unrolled instruction
              pgai = ga_uniform_instruction_simple_tmult
@@ -12619,7 +12619,7 @@ namespace getfem {
                       = &(pnode->children[n++]->tensor()[0]);
           }
           pgai = std::make_shared<ga_instruction_simple_c_matrix>
-           (pnode->tensor(), components);
+            (pnode->tensor(), components);
         }
         rmi.instructions.push_back(std::move(pgai));
       }
@@ -12628,7 +12628,7 @@ namespace getfem {
     case GA_NODE_PARAMS:
       if (child0->node_type == GA_NODE_RESHAPE) {
         pgai = std::make_shared<ga_instruction_copy_tensor>(pnode->tensor(),
-                                                           child1->tensor());
+                                                            child1->tensor());
         rmi.instructions.push_back(std::move(pgai));
       } else if (child0->node_type == GA_NODE_PREDEF_FUNC) {
 
@@ -12661,28 +12661,28 @@ namespace getfem {
             if (F.ftype() == 0)
               pgai = std::make_shared<ga_instruction_eval_func_2arg_1res>
                 (pnode->tensor()[0], child1->tensor()[0], child2->tensor()[0],
-                F.f2());
+                 F.f2());
             else
               pgai = std::make_shared<ga_instruction_eval_func_2arg_1res_expr>
                 (pnode->tensor()[0], child1->tensor()[0], child2->tensor()[0],
-                F);
+                 F);
           } else if (child1->tensor().size() == 1) {
             if (F.ftype() == 0)
               pgai =
-               std::make_shared<ga_instruction_eval_func_2arg_first_scalar>
+                std::make_shared<ga_instruction_eval_func_2arg_first_scalar>
                 (pnode->tensor(), child1->tensor(), child2->tensor(), F.f2());
             else
               pgai =
-             std::make_shared<ga_instruction_eval_func_2arg_first_scalar_expr>
+              std::make_shared<ga_instruction_eval_func_2arg_first_scalar_expr>
                 (pnode->tensor(), child1->tensor(), child2->tensor(), F);
           } else if (child2->tensor().size() == 1) {
             if (F.ftype() == 0)
               pgai =
-               std::make_shared<ga_instruction_eval_func_2arg_second_scalar>
+                std::make_shared<ga_instruction_eval_func_2arg_second_scalar>
                 (pnode->tensor(), child1->tensor(), child2->tensor(), F.f2());
             else
               pgai =
-             std::make_shared<ga_instruction_eval_func_2arg_second_scalar_expr>
+              
std::make_shared<ga_instruction_eval_func_2arg_second_scalar_expr>
                 (pnode->tensor(), child1->tensor(), child2->tensor(), F);
           } else {
             if (F.ftype() == 0)
@@ -12718,7 +12718,7 @@ namespace getfem {
              (pnode->tensor(), OP, args, child0->der1, child0->der2);
         } else {
           pgai = std::make_shared<ga_instruction_eval_OP>(pnode->tensor(),
-                                                         OP, args);
+                                                          OP, args);
         }
         rmi.instructions.push_back(std::move(pgai));
 
@@ -12735,7 +12735,7 @@ namespace getfem {
           for (size_type i = 0; i < child0->tensor_order(); ++i) {
             if (pnode->children[i+1]->node_type != GA_NODE_ALLINDICES)
               mi1[i+nb_test]
-               = size_type(round(pnode->children[i+1]->tensor()[0])- 1);
+                = size_type(round(pnode->children[i+1]->tensor()[0])- 1);
             else
               indices.push_back(i+nb_test);
           }
@@ -12753,12 +12753,11 @@ namespace getfem {
     if (tensor_to_clear) {
       gmm::clear(pnode->tensor().as_vector());
       if (!is_uniform) {
-       pgai = std::make_shared<ga_instruction_clear_tensor>(pnode->tensor());
-       rmi.elt_instructions.push_back(std::move(pgai));
-      } 
+        pgai = std::make_shared<ga_instruction_clear_tensor>(pnode->tensor());
+        rmi.elt_instructions.push_back(std::move(pgai));
+      }
     }
     rmi.node_list[pnode->hash_value].push_back(pnode);
-    
   }
 
   static void ga_compile_function(ga_workspace &workspace,
@@ -12827,7 +12826,7 @@ namespace getfem {
     for (size_type i = 0; i < pnode->children.size(); ++i)
       found = ga_node_used_interpolates(pnode->children[i], workspace,
                                         interpolates, interpolates_der)
-       || found;
+        || found;
     return found;
   }
 
@@ -12870,7 +12869,7 @@ namespace getfem {
         if (rmi.transformations[transname].count(nodename) == 0) {
           auto&& inin = rmi.interpolate_infos[transname];
           pga_instruction pgai =
-           std::make_shared<ga_instruction_update_group_info>
+            std::make_shared<ga_instruction_update_group_info>
             (workspace, gis, inin, nodename, inin.groups_info[nodename]);
           rmi.instructions.push_back(std::move(pgai));
           rmi.transformations[transname].insert(nodename);
@@ -12921,201 +12920,201 @@ namespace getfem {
     gis.whole_instructions.clear();
     for (size_type version : std::array<size_type, 3>{1, 0, 2}) {
       for (size_type i = 0; i < workspace.nb_trees(); ++i) {
-       ga_workspace::tree_description &td = workspace.tree_info(i);
-       
-       if ((version == td.interpolation) &&
-           ((version == 0 && td.order == order) || // Assembly
-            ((version > 0 && (td.order == size_type(-1) || // Assignment
-                               td.order == size_type(-2) - order))))) {
-         ga_tree *added_tree = 0;
-         if (td.interpolation) {
-           gis.interpolation_trees.push_back(*(td.ptree));
-           added_tree = &(gis.interpolation_trees.back());
-         } else {
-           gis.trees.push_back(*(td.ptree));
-           added_tree = &(gis.trees.back());
-         }
-
-         // Semantic analysis mainly to evaluate fixed size variables and data
-         ga_semantic_analysis("", *added_tree, workspace,
-                              td.mim->linked_mesh().dim(),
-                              ref_elt_dim_of_mesh(td.mim->linked_mesh()),
-                              true, false);
-         pga_tree_node root = added_tree->root;
-         if (root) {
-           // Compile tree
-           // cout << "Will compile "; ga_print_node(root, cout); cout << endl;
-
-           ga_instruction_set::region_mim rm(td.mim, td.rg);
-           ga_instruction_set::region_mim_instructions &rmi
-             = gis.whole_instructions[rm];
-           rmi.m = td.m;
-           // rmi.interpolate_infos.clear();
-           ga_compile_interpolate_trans(root, workspace, gis, rmi, *(td.m));
-           ga_compile_node(root, workspace, gis, rmi, *(td.m), false,
-                           rmi.current_hierarchy);
-           // cout << "compilation finished "; ga_print_node(root, cout);
-           // cout << endl;
-
-           if (version > 0) { // Assignment OR interpolation
-             if(td.varname_interpolation.size() != 0) {// assignment
-               auto *imd
-                 = workspace.associated_im_data(td.varname_interpolation);
-               auto &V = const_cast<model_real_plain_vector &>
+        ga_workspace::tree_description &td = workspace.tree_info(i);
+
+        if ((version == td.interpolation) &&
+            ((version == 0 && td.order == order) || // Assembly
+             ((version > 0 && (td.order == size_type(-1) || // Assignment
+                                td.order == size_type(-2) - order))))) {
+          ga_tree *added_tree = 0;
+          if (td.interpolation) {
+            gis.interpolation_trees.push_back(*(td.ptree));
+            added_tree = &(gis.interpolation_trees.back());
+          } else {
+            gis.trees.push_back(*(td.ptree));
+            added_tree = &(gis.trees.back());
+          }
+
+          // Semantic analysis mainly to evaluate fixed size variables and data
+          ga_semantic_analysis("", *added_tree, workspace,
+                               td.mim->linked_mesh().dim(),
+                               ref_elt_dim_of_mesh(td.mim->linked_mesh()),
+                               true, false);
+          pga_tree_node root = added_tree->root;
+          if (root) {
+            // Compile tree
+            // cout << "Will compile "; ga_print_node(root, cout); cout << 
endl;
+
+            ga_instruction_set::region_mim rm(td.mim, td.rg);
+            ga_instruction_set::region_mim_instructions &rmi
+              = gis.whole_instructions[rm];
+            rmi.m = td.m;
+            // rmi.interpolate_infos.clear();
+            ga_compile_interpolate_trans(root, workspace, gis, rmi, *(td.m));
+            ga_compile_node(root, workspace, gis, rmi, *(td.m), false,
+                            rmi.current_hierarchy);
+            // cout << "compilation finished "; ga_print_node(root, cout);
+            // cout << endl;
+
+            if (version > 0) { // Assignment OR interpolation
+              if(td.varname_interpolation.size() != 0) {// assignment
+                auto *imd
+                  = workspace.associated_im_data(td.varname_interpolation);
+                auto &V = const_cast<model_real_plain_vector &>
             (workspace.value(td.varname_interpolation));
-               GMM_ASSERT1(imd, "Internal error");
-               auto pgai = std::make_shared<ga_instruction_assignment>
+                GMM_ASSERT1(imd, "Internal error");
+                auto pgai = std::make_shared<ga_instruction_assignment>
             (root->tensor(), V, gis.ctx, imd);
-               rmi.instructions.push_back(std::move(pgai));
-        }
-           } else { // assembly
-             // Addition of an assembly instruction
-             pga_instruction pgai;
-             switch(order) {
-             case 0:
-               pgai = std::make_shared<ga_instruction_scalar_assembly>
-                 (root->tensor(), workspace.assembled_potential(), gis.coeff);
-               break;
-             case 1:
-               {
-                 const mesh_fem *mf=workspace.associated_mf(root->name_test1);
-                 const mesh_fem **mfg = 0;
-                 add_interval_to_gis(workspace, root->name_test1, gis);
-                 
-                 if (mf) {
-                   const std::string &intn1 = root->interpolate_name_test1;
-                   const gmm::sub_interval *Ir = 0, *In = 0;
-                   if (intn1.size() &&
-                       workspace.variable_group_exists(root->name_test1)) {
-                     ga_instruction_set::variable_group_info &vgi =
-                       rmi.interpolate_infos[intn1]
-                       .groups_info[root->name_test1];
-                     Ir = &(vgi.Ir);
-                     In = &(vgi.In);
-                     mfg = &(vgi.mf);
-                     mf = 0;
-                   } else {
-                     Ir = &(gis.var_intervals[root->name_test1]);
-                     In = &(workspace.interval_of_variable(root->name_test1));
-                   }
-                   fem_interpolation_context &ctx
-                     = intn1.size() ? rmi.interpolate_infos[intn1].ctx
-                     : gis.ctx;
-                   bool interpolate
-                     = (!intn1.empty() && intn1.compare("neighbour_elt")!=0);
-                   pgai = std::make_shared<ga_instruction_fem_vector_assembly>
-                     (root->tensor(), workspace.unreduced_vector(),
-                      workspace.assembled_vector(), ctx, *Ir, *In, mf, mfg,
-                      gis.coeff, gis.nbpt, gis.ipt, interpolate);
-                 } else {
-                   pgai = std::make_shared<ga_instruction_vector_assembly>
-                     (root->tensor(), workspace.assembled_vector(),
-                      workspace.interval_of_variable(root->name_test1),
-                      gis.coeff);
-                 }
-               }
-               break;
-             case 2:
-               {
-                 const mesh_fem *mf1=workspace.associated_mf(root->name_test1);
-                 const mesh_fem *mf2=workspace.associated_mf(root->name_test2);
-                 const mesh_fem **mfg1 = 0, **mfg2 = 0;
-                 const std::string &intn1 = root->interpolate_name_test1;
-                 const std::string &intn2 = root->interpolate_name_test2;
-                 fem_interpolation_context &ctx1
-                   = intn1.empty() ? gis.ctx
-                   : rmi.interpolate_infos[intn1].ctx;
-                 fem_interpolation_context &ctx2
-                   = intn2.empty() ? gis.ctx
-                   : rmi.interpolate_infos[intn2].ctx;
-                 bool interpolate
-                   = (!intn1.empty() && intn1.compare("neighbour_elt")!=0)
-                   || (!intn2.empty() && intn2.compare("neighbour_elt")!=0);
-                 
-                 add_interval_to_gis(workspace, root->name_test1, gis);
-                 add_interval_to_gis(workspace, root->name_test2, gis);
-                 
-                 const gmm::sub_interval *Ir1 = 0, *In1 = 0, *Ir2 = 0, *In2=0;
-                 const scalar_type *alpha1 = 0, *alpha2 = 0;
-                 
-                 if (!intn1.empty() &&
-                     workspace.variable_group_exists(root->name_test1)) {
-                   ga_instruction_set::variable_group_info &vgi =
-                     rmi.interpolate_infos[intn1]
-                     .groups_info[root->name_test1];
-                   Ir1 = &(vgi.Ir);
-                   In1 = &(vgi.In);
-                   mfg1 = &(vgi.mf);
-                   mf1 = 0;
-                   alpha1 = &(vgi.alpha);
-                 } else {
-                   alpha1 = &(workspace.factor_of_variable(root->name_test1));
-                   Ir1 = &(gis.var_intervals[root->name_test1]);
-                   In1 = &(workspace.interval_of_variable(root->name_test1));
-                 }
-                 
-                 if (!intn2.empty() &&
-                     workspace.variable_group_exists(root->name_test2)) {
-                   ga_instruction_set::variable_group_info &vgi =
-                     rmi.interpolate_infos[intn2]
-                     .groups_info[root->name_test2];
-                   Ir2 = &(vgi.Ir);
-                   In2 = &(vgi.In);
-                   mfg2 = &(vgi.mf);
-                   mf2 = 0;
-                   alpha2 = &(vgi.alpha);
-                 } else {
-                   alpha2 = &(workspace.factor_of_variable(root->name_test2));
-                   Ir2 = &(gis.var_intervals[root->name_test2]);
-                   In2 = &(workspace.interval_of_variable(root->name_test2));
-                 }
-                 
-                 if (!interpolate && mfg1 == 0 && mfg2 == 0 && mf1 && mf2
-                     && mf1->get_qdim() == 1 && mf2->get_qdim() == 1
-                     && !(mf1->is_reduced()) && !(mf2->is_reduced())) {
-                   pgai = std::make_shared
-                     <ga_instruction_matrix_assembly_standard_scalar<>>
-                     (root->tensor(), workspace.assembled_matrix(), ctx1, ctx2,
-                      *In1, *In2, mf1, mf2,
-                      gis.coeff, *alpha1, *alpha2, gis.nbpt, gis.ipt);
-                 } else if (!interpolate && mfg1 == 0 && mfg2==0 && mf1 && mf2
-                            && !(mf1->is_reduced()) && !(mf2->is_reduced())) {
-                   if (root->sparsity() == 10 && root->t.qdim()==2)
-                     pgai = std::make_shared
-                       <ga_instruction_matrix_assembly_standard_vector_opt10_2>
-                       (root->tensor(), workspace.assembled_matrix(),ctx1,ctx2,
-                        *In1, *In2, mf1, mf2,
-                        gis.coeff, *alpha1, *alpha2, gis.nbpt, gis.ipt);
-                   else if (root->sparsity() == 10 && root->t.qdim()==3)
-                     pgai = std::make_shared
-                       <ga_instruction_matrix_assembly_standard_vector_opt10_3>
-                       (root->tensor(), workspace.assembled_matrix(),ctx1,ctx2,
-                        *In1, *In2, mf1, mf2,
-                        gis.coeff, *alpha1, *alpha2, gis.nbpt, gis.ipt);
-                   else
-                     pgai = std::make_shared
-                       <ga_instruction_matrix_assembly_standard_vector<>>
-                       (root->tensor(), workspace.assembled_matrix(),ctx1,ctx2,
-                        *In1, *In2, mf1, mf2,
-                        gis.coeff, *alpha1, *alpha2, gis.nbpt, gis.ipt);
-                   
-                 } else {
-                   pgai = std::make_shared<ga_instruction_matrix_assembly<>>
-                     (root->tensor(), workspace.unreduced_matrix(),
-                      workspace.assembled_matrix(), ctx1, ctx2,
-                      *Ir1, *In1, *Ir2, *In2, mf1, mfg1, mf2, mfg2,
-                      gis.coeff, *alpha1, *alpha2, gis.nbpt, gis.ipt,
-                      interpolate);
-                 }
-                 break;
-               }
-             }
-             if (pgai)
-               gis.whole_instructions[rm].instructions.push_back
-                 (std::move(pgai));
-           }
-         }
-       }
+                rmi.instructions.push_back(std::move(pgai));
+        }
+            } else { // assembly
+              // Addition of an assembly instruction
+              pga_instruction pgai;
+              switch(order) {
+              case 0:
+                pgai = std::make_shared<ga_instruction_scalar_assembly>
+                  (root->tensor(), workspace.assembled_potential(), gis.coeff);
+                break;
+              case 1:
+                {
+                  const mesh_fem *mf=workspace.associated_mf(root->name_test1);
+                  const mesh_fem **mfg = 0;
+                  add_interval_to_gis(workspace, root->name_test1, gis);
+
+                  if (mf) {
+                    const std::string &intn1 = root->interpolate_name_test1;
+                    const gmm::sub_interval *Ir = 0, *In = 0;
+                    if (intn1.size() &&
+                        workspace.variable_group_exists(root->name_test1)) {
+                      ga_instruction_set::variable_group_info &vgi =
+                        rmi.interpolate_infos[intn1]
+                        .groups_info[root->name_test1];
+                      Ir = &(vgi.Ir);
+                      In = &(vgi.In);
+                      mfg = &(vgi.mf);
+                      mf = 0;
+                    } else {
+                      Ir = &(gis.var_intervals[root->name_test1]);
+                      In = &(workspace.interval_of_variable(root->name_test1));
+                    }
+                    fem_interpolation_context &ctx
+                      = intn1.size() ? rmi.interpolate_infos[intn1].ctx
+                      : gis.ctx;
+                    bool interpolate
+                      = (!intn1.empty() && intn1.compare("neighbour_elt")!=0);
+                    pgai = std::make_shared<ga_instruction_fem_vector_assembly>
+                      (root->tensor(), workspace.unreduced_vector(),
+                       workspace.assembled_vector(), ctx, *Ir, *In, mf, mfg,
+                       gis.coeff, gis.nbpt, gis.ipt, interpolate);
+                  } else {
+                    pgai = std::make_shared<ga_instruction_vector_assembly>
+                      (root->tensor(), workspace.assembled_vector(),
+                       workspace.interval_of_variable(root->name_test1),
+                       gis.coeff);
+                  }
+                }
+                break;
+              case 2:
+                {
+                  const mesh_fem 
*mf1=workspace.associated_mf(root->name_test1);
+                  const mesh_fem 
*mf2=workspace.associated_mf(root->name_test2);
+                  const mesh_fem **mfg1 = 0, **mfg2 = 0;
+                  const std::string &intn1 = root->interpolate_name_test1;
+                  const std::string &intn2 = root->interpolate_name_test2;
+                  fem_interpolation_context &ctx1
+                    = intn1.empty() ? gis.ctx
+                    : rmi.interpolate_infos[intn1].ctx;
+                  fem_interpolation_context &ctx2
+                    = intn2.empty() ? gis.ctx
+                    : rmi.interpolate_infos[intn2].ctx;
+                  bool interpolate
+                    = (!intn1.empty() && intn1.compare("neighbour_elt")!=0)
+                    || (!intn2.empty() && intn2.compare("neighbour_elt")!=0);
+
+                  add_interval_to_gis(workspace, root->name_test1, gis);
+                  add_interval_to_gis(workspace, root->name_test2, gis);
+
+                  const gmm::sub_interval *Ir1 = 0, *In1 = 0, *Ir2 = 0, *In2=0;
+                  const scalar_type *alpha1 = 0, *alpha2 = 0;
+
+                  if (!intn1.empty() &&
+                      workspace.variable_group_exists(root->name_test1)) {
+                    ga_instruction_set::variable_group_info &vgi =
+                      rmi.interpolate_infos[intn1]
+                      .groups_info[root->name_test1];
+                    Ir1 = &(vgi.Ir);
+                    In1 = &(vgi.In);
+                    mfg1 = &(vgi.mf);
+                    mf1 = 0;
+                    alpha1 = &(vgi.alpha);
+                  } else {
+                    alpha1 = &(workspace.factor_of_variable(root->name_test1));
+                    Ir1 = &(gis.var_intervals[root->name_test1]);
+                    In1 = &(workspace.interval_of_variable(root->name_test1));
+                  }
+
+                  if (!intn2.empty() &&
+                      workspace.variable_group_exists(root->name_test2)) {
+                    ga_instruction_set::variable_group_info &vgi =
+                      rmi.interpolate_infos[intn2]
+                      .groups_info[root->name_test2];
+                    Ir2 = &(vgi.Ir);
+                    In2 = &(vgi.In);
+                    mfg2 = &(vgi.mf);
+                    mf2 = 0;
+                    alpha2 = &(vgi.alpha);
+                  } else {
+                    alpha2 = &(workspace.factor_of_variable(root->name_test2));
+                    Ir2 = &(gis.var_intervals[root->name_test2]);
+                    In2 = &(workspace.interval_of_variable(root->name_test2));
+                  }
+
+                  if (!interpolate && mfg1 == 0 && mfg2 == 0 && mf1 && mf2
+                      && mf1->get_qdim() == 1 && mf2->get_qdim() == 1
+                      && !(mf1->is_reduced()) && !(mf2->is_reduced())) {
+                    pgai = std::make_shared
+                      <ga_instruction_matrix_assembly_standard_scalar<>>
+                      (root->tensor(), workspace.assembled_matrix(), ctx1, 
ctx2,
+                       *In1, *In2, mf1, mf2,
+                       gis.coeff, *alpha1, *alpha2, gis.nbpt, gis.ipt);
+                  } else if (!interpolate && mfg1 == 0 && mfg2==0 && mf1 && mf2
+                             && !(mf1->is_reduced()) && !(mf2->is_reduced())) {
+                    if (root->sparsity() == 10 && root->t.qdim()==2)
+                      pgai = std::make_shared
+                        
<ga_instruction_matrix_assembly_standard_vector_opt10_2>
+                        (root->tensor(), 
workspace.assembled_matrix(),ctx1,ctx2,
+                         *In1, *In2, mf1, mf2,
+                         gis.coeff, *alpha1, *alpha2, gis.nbpt, gis.ipt);
+                    else if (root->sparsity() == 10 && root->t.qdim()==3)
+                      pgai = std::make_shared
+                        
<ga_instruction_matrix_assembly_standard_vector_opt10_3>
+                        (root->tensor(), 
workspace.assembled_matrix(),ctx1,ctx2,
+                         *In1, *In2, mf1, mf2,
+                         gis.coeff, *alpha1, *alpha2, gis.nbpt, gis.ipt);
+                    else
+                      pgai = std::make_shared
+                        <ga_instruction_matrix_assembly_standard_vector<>>
+                        (root->tensor(), 
workspace.assembled_matrix(),ctx1,ctx2,
+                         *In1, *In2, mf1, mf2,
+                         gis.coeff, *alpha1, *alpha2, gis.nbpt, gis.ipt);
+
+                  } else {
+                    pgai = std::make_shared<ga_instruction_matrix_assembly<>>
+                      (root->tensor(), workspace.unreduced_matrix(),
+                       workspace.assembled_matrix(), ctx1, ctx2,
+                       *Ir1, *In1, *Ir2, *In2, mf1, mfg1, mf2, mfg2,
+                       gis.coeff, *alpha1, *alpha2, gis.nbpt, gis.ipt,
+                       interpolate);
+                  }
+                  break;
+                }
+              }
+              if (pgai)
+                gis.whole_instructions[rm].instructions.push_back
+                  (std::move(pgai));
+            }
+          }
+        }
       }
     }
   }
@@ -13212,8 +13211,8 @@ namespace getfem {
             gmm::clear(workspace.assembled_tensor().as_vector());
             if (ii == 0) {
               for (size_type j = 0; j < gilb.size(); ++j) j += gilb[j]->exec();
-             for (size_type j = 0; j < gile.size(); ++j) j += gile[j]->exec();
-           }
+              for (size_type j = 0; j < gile.size(); ++j) j += gile[j]->exec();
+            }
             for (size_type j = 0; j < gil.size(); ++j) j += gil[j]->exec();
             gic.store_result(v.cv(), ind[ii], workspace.assembled_tensor());
           }
@@ -13270,13 +13269,13 @@ namespace getfem {
 #if 0
       if (gilb.size()) cout << "Begin instructions\n";
       for (size_type j = 0; j < gilb.size(); ++j)
-       cout << typeid(*(gilb[j])).name() << endl;
+        cout << typeid(*(gilb[j])).name() << endl;
       if (gile.size()) cout << "\nElement instructions\n";
       for (size_type j = 0; j < gile.size(); ++j)
-       cout << typeid(*(gile[j])).name() << endl;
+        cout << typeid(*(gile[j])).name() << endl;
       cout << "\nGauss pt instructions\n";
       for (size_type j = 0; j < gil.size(); ++j)
-       cout << typeid(*(gil[j])).name() << endl;
+        cout << typeid(*(gil[j])).name() << endl;
 #endif
       const mesh_region &region = *(instr.first.region());
 
@@ -13289,12 +13288,12 @@ namespace getfem {
       bgeot::pgeotrans_precomp pgp = 0;
       bool first_gp = true;
       for (getfem::mr_visitor v(region, m, true); !v.finished(); ++v) {
-       if (mim.convex_index().is_in(v.cv())) {
+        if (mim.convex_index().is_in(v.cv())) {
           // cout << "proceed with elt " << v.cv() << " face " << v.f() << 
endl;
           if (v.cv() != old_cv) {
             pgt = m.trans_of_convex(v.cv());
-           pim = mim.int_method_of_element(v.cv());
-           m.points_of_convex(v.cv(), G);
+            pim = mim.int_method_of_element(v.cv());
+            m.points_of_convex(v.cv(), G);
 
             if (pim->type() == IM_NONE) continue;
             GMM_ASSERT1(pim->type() == IM_APPROX, "Sorry, exact methods cannot 
"
@@ -13303,18 +13302,18 @@ namespace getfem {
             pspt = pai->pintegration_points();
             if (pspt->size()) {
               if (pgp && gis.pai == pai && pgt_old == pgt) {
-               gis.ctx.change(pgp, 0, 0, G, v.cv(), v.f());
+                gis.ctx.change(pgp, 0, 0, G, v.cv(), v.f());
               } else {
                 if (pai->is_built_on_the_fly()) {
                   gis.ctx.change(pgt, 0, (*pspt)[0], G, v.cv(), v.f());
-                 pgp = 0;
+                  pgp = 0;
                 } else {
-                 pgp = gis.gp_pool(pgt, pspt);
+                  pgp = gis.gp_pool(pgt, pspt);
                   gis.ctx.change(pgp, 0, 0, G, v.cv(), v.f());
                 }
-               pgt_old = pgt; gis.pai = pai;
+                pgt_old = pgt; gis.pai = pai;
               }
-              if (gis.need_elt_size) 
+              if (gis.need_elt_size)
                 gis.elt_size = convex_radius_estimate(pgt, G)*scalar_type(2);
             }
             old_cv = v.cv();
@@ -13332,15 +13331,15 @@ namespace getfem {
               first_ind = pai->ind_first_point_on_face(v.f());
             }
             for (gis.ipt = 0; gis.ipt < gis.nbpt; ++(gis.ipt)) {
-             if (pgp) gis.ctx.set_ii(first_ind+gis.ipt);
+              if (pgp) gis.ctx.set_ii(first_ind+gis.ipt);
               else gis.ctx.set_xref((*pspt)[first_ind+gis.ipt]);
               if (gis.ipt == 0 || !(pgt->is_linear())) {
                 J = gis.ctx.J();
                 // Computation of unit normal vector in case of a boundary
                 if (v.f() != short_type(-1)) {
-                 gis.Normal.resize(G.nrows());
-                 un.resize(pgt->dim());
-                 gmm::copy(pgt->normals()[v.f()], un);
+                  gis.Normal.resize(G.nrows());
+                  un.resize(pgt->dim());
+                  gmm::copy(pgt->normals()[v.f()], un);
                   gmm::mult(gis.ctx.B(), un, gis.Normal);
                   scalar_type nup = gmm::vect_norm2(gis.Normal);
                   J *= nup;
@@ -13353,9 +13352,9 @@ namespace getfem {
                 for (size_type j = 0; j < gilb.size(); ++j) j+=gilb[j]->exec();
                 first_gp = false;
               }
-             if (gis.ipt == 0) {
-               for (size_type j = 0; j < gile.size(); ++j) j+=gile[j]->exec();
-             }
+              if (gis.ipt == 0) {
+                for (size_type j = 0; j < gile.size(); ++j) j+=gile[j]->exec();
+              }
               for (size_type j = 0; j < gil.size(); ++j) j+=gil[j]->exec();
               GA_DEBUG_INFO("");
             }
@@ -13851,7 +13850,7 @@ namespace getfem {
     // working locally. This means a specific assembly.
     model_real_sparse_matrix M(mf.nb_dof(), mf.nb_dof());
     asm_mass_matrix(M, mim, mf, region);
-       
+
     ga_workspace workspace(md);
     size_type nbdof = md.nb_dof();
     gmm::sub_interval I(nbdof, mf.nb_dof());
@@ -13871,12 +13870,12 @@ namespace getfem {
     getfem::base_vector loc_U;
     for (mr_visitor v(region, mf.linked_mesh(), true); !v.finished(); ++v) {
       if (mf.convex_index().is_in(v.cv())) {
-       size_type nd = mf.nb_basic_dof_of_element(v.cv());
-       loc_M.base_resize(nd, nd); gmm::resize(loc_U, nd);
-       gmm::sub_index J(mf.ind_basic_dof_of_element(v.cv()));
-       gmm::copy(gmm::sub_matrix(M, J, J), loc_M);
-       gmm::lu_solve(loc_M, loc_U, gmm::sub_vector(F, J));
-       gmm::copy(loc_U, gmm::sub_vector(result, J));
+        size_type nd = mf.nb_basic_dof_of_element(v.cv());
+        loc_M.base_resize(nd, nd); gmm::resize(loc_U, nd);
+        gmm::sub_index J(mf.ind_basic_dof_of_element(v.cv()));
+        gmm::copy(gmm::sub_matrix(M, J, J), loc_M);
+        gmm::lu_solve(loc_M, loc_U, gmm::sub_vector(F, J));
+        gmm::copy(loc_U, gmm::sub_vector(result, J));
       }
     }
     MPI_SUM_VECTOR(result);
@@ -14251,9 +14250,9 @@ namespace getfem {
         cv = cv_y;
         ret_type = 1;
       } else {
-       cv = cv_x;
-       P_ref = ctx_x.xref();
-       ret_type = 1;
+        cv = cv_x;
+        P_ref = ctx_x.xref();
+        ret_type = 1;
       }
       GMM_ASSERT1(!compute_derivatives,
                   "No derivative for this transformation");
@@ -14292,28 +14291,28 @@ namespace getfem {
   (ga_workspace &workspace, const std::string &name,
    std::map<size_type, size_type> &elt_corr) {
     GMM_ASSERT1(workspace.interpolate_transformation_exists(name),
-               "Unknown transformation");
+                "Unknown transformation");
     auto pit=workspace.interpolate_transformation(name).get();
     auto cpext
       = dynamic_cast<const interpolate_transformation_element_extrapolation *>
       (pit);
     GMM_ASSERT1(cpext,
-               "The transformation is not of element extrapolation type");
+                "The transformation is not of element extrapolation type");
     const_cast<interpolate_transformation_element_extrapolation *>(cpext)
       ->set_correspondance(elt_corr);
   }
-    
+
   void set_element_extrapolation_correspondance
   (model &md, const std::string &name,
    std::map<size_type, size_type> &elt_corr) {
     GMM_ASSERT1(md.interpolate_transformation_exists(name),
-               "Unknown transformation");
+                "Unknown transformation");
     auto pit=md.interpolate_transformation(name).get();
     auto cpext
       = dynamic_cast<const interpolate_transformation_element_extrapolation *>
       (pit);
     GMM_ASSERT1(cpext,
-               "The transformation is not of element extrapolation type");
+                "The transformation is not of element extrapolation type");
     const_cast<interpolate_transformation_element_extrapolation *>(cpext)
       ->set_correspondance(elt_corr);
   }
diff --git a/src/getfem_models.cc b/src/getfem_models.cc
index 0aad4e0..6aca69a 100644
--- a/src/getfem_models.cc
+++ b/src/getfem_models.cc
@@ -96,7 +96,7 @@ namespace getfem {
       ("neighbour_elt", interpolate_transformation_neighbour_instance());
   }
 
-  void model::var_description::set_size(void) {
+  void model::var_description::set_size() {
     n_temp_iter = 0;
     default_iter = 0;
     if (is_complex)
@@ -145,7 +145,7 @@ namespace getfem {
     return nit;
   }
 
-  void model::var_description::clear_temporaries(void) {
+  void model::var_description::clear_temporaries() {
     n_temp_iter = 0;
     default_iter = 0;
     if (is_complex)
@@ -374,7 +374,7 @@ namespace getfem {
         }
   }
 
-  void model::actualize_sizes(void) const {
+  void model::actualize_sizes() const {
     // cout << "begin act size" << endl;
     bool actualized = false;
     getfem::local_guard lock = locks_.get_lock();
@@ -434,7 +434,7 @@ namespace getfem {
         default : break;
         }
       }
-    
+
       if (vdescr.pim_data != 0
           && vdescr.v_num < vdescr.pim_data->version_number()) {
         // const im_data *pimd = vdescr.pim_data;
@@ -570,7 +570,7 @@ namespace getfem {
                              MM);
                     termadded = true;
                   }
-                } else if (multname.compare(term.var1) == 0 && 
+                } else if (multname.compare(term.var1) == 0 &&
                            vname.compare(term.var2) == 0) {
                   if (!bupd) {
                     brick.terms_to_be_computed = true;
@@ -583,7 +583,7 @@ namespace getfem {
                   else
                     gmm::add(gmm::transposed(brick.rmatlist[j]), MM);
                   termadded = true;
-                  
+
                 } else if (multname.compare(term.var2) == 0 &&
                            vname.compare(term.var1) == 0) {
                   if (!bupd) {
@@ -1177,7 +1177,7 @@ namespace getfem {
       if (is_complex())
         set_complex_variable(varname)[0] = complex_type(t);
       else
-        set_real_variable(varname)[0] = t; 
+        set_real_variable(varname)[0] = t;
     }
   }
 
@@ -1201,7 +1201,7 @@ namespace getfem {
       }
   }
 
-  void model::shift_variables_for_time_integration(void) {
+  void model::shift_variables_for_time_integration() {
     for (VAR_SET::iterator it = variables.begin();
          it != variables.end(); ++it)
       if (it->second.is_variable && it->second.ptsc)
@@ -1224,7 +1224,7 @@ namespace getfem {
     time_integration = 1;
   }
 
-  void model::copy_init_time_derivative(void) {
+  void model::copy_init_time_derivative() {
 
     for (VAR_SET::iterator it = variables.begin();
          it != variables.end(); ++it)
@@ -1983,7 +1983,7 @@ namespace getfem {
   }
 
 
-  void model::set_dispatch_coeff(void) {
+  void model::set_dispatch_coeff() {
     for (dal::bv_visitor ib(active_bricks); !ib.finished(); ++ib) {
       brick_description &brick = bricks[ib];
       if (brick.pdispatch)
@@ -1992,7 +1992,7 @@ namespace getfem {
     }
   }
 
-  void model::first_iter(void) {
+  void model::first_iter() {
     context_check(); if (act_size_to_be_done) actualize_sizes();
     for (VAR_SET::iterator it = variables.begin(); it != variables.end(); ++it)
       it->second.clear_temporaries();
@@ -2016,7 +2016,7 @@ namespace getfem {
     }
   }
 
-  void model::next_iter(void) {
+  void model::next_iter() {
     context_check(); if (act_size_to_be_done) actualize_sizes();
     set_dispatch_coeff();
 
@@ -2097,8 +2097,8 @@ namespace getfem {
   }
 
   void model::add_assembly_assignments(const std::string &varname,
-                                      const std::string &expr, size_type rg,
-                                      size_type order, bool before) {
+                                       const std::string &expr, size_type rg,
+                                       size_type order, bool before) {
     GMM_ASSERT1(order < 3 || order == size_type(-1), "Bad order value");
     const im_data *imd = pim_data_of_variable(varname);
     GMM_ASSERT1(imd != 0, "Only applicable to im_data");
@@ -2300,7 +2300,7 @@ namespace getfem {
     }
   }
 
-  void model::update_affine_dependent_variables(void) {
+  void model::update_affine_dependent_variables() {
     for (VAR_SET::iterator it = variables.begin(); it != variables.end(); ++it)
       if (it->second.is_affine_dependent) {
         VAR_SET::iterator it2 = variables.find(it->second.org_name);
@@ -2343,11 +2343,11 @@ namespace getfem {
           { detected = true; break; }
 
       if (detected) {
-       int ifo = -1;
-       for (auto &pmim :  brick.mims)
-         ifo = std::max(ifo, mf->linked_mesh().region(region)
-                        .region_is_faces_of(m, brick.region,
-                                            pmim->linked_mesh()));
+        int ifo = -1;
+        for (auto &pmim :  brick.mims)
+          ifo = std::max(ifo, mf->linked_mesh().region(region)
+                              .region_is_faces_of(m, brick.region,
+                                                  pmim->linked_mesh()));
         GMM_ASSERT1(ifo >= 0, "The given region is only partially covered by "
                     "region of brick " << brick.pbr->brick_name()
                     << ". Please subdivise the region");
@@ -2366,7 +2366,6 @@ namespace getfem {
               result = workspace.extract_Neumann_term(varname);
           }
         }
-      
       }
     }
     return result;
@@ -2693,19 +2692,19 @@ namespace getfem {
         {
           exception.run([&]
           {
-           if (version & BUILD_RHS)
-             GMM_TRACE2("Global generic assembly RHS");
-           if (version & BUILD_MATRIX)
-             GMM_TRACE2("Global generic assembly tangent term");
+            if (version & BUILD_RHS)
+              GMM_TRACE2("Global generic assembly RHS");
+            if (version & BUILD_MATRIX)
+              GMM_TRACE2("Global generic assembly tangent term");
 
             ga_workspace workspace(*this);
 
-           for (const auto &ad : assignments)
-             workspace.add_assignment_expression
-               (ad.varname, ad.expr, ad.region, ad.order, ad.before);
+            for (const auto &ad : assignments)
+              workspace.add_assignment_expression
+                (ad.varname, ad.expr, ad.region, ad.order, ad.before);
 
             for (const auto &ge : generic_expressions)
-             workspace.add_expression(ge.expr, ge.mim, ge.region);
+              workspace.add_expression(ge.expr, ge.mim, ge.region);
 
             if (version & BUILD_RHS) {
               if (is_complex()) {
@@ -3305,11 +3304,11 @@ model_complex_plain_vector &
         size_type nbgdof = md.nb_dof();
         ga_workspace workspace(md, true);
         GMM_TRACE2(name << ": generic source term assembly");
-        workspace.add_expression(expr, *(mims[0]), region);  
+        workspace.add_expression(expr, *(mims[0]), region);
         model::varnamelist vlmd; md.variable_list(vlmd);
         for (size_type i = 0; i < vlmd.size(); ++i)
           if (md.is_disabled_variable(vlmd[i]))
-            nbgdof = std::max(nbgdof, 
+            nbgdof = std::max(nbgdof,
                               workspace.interval_of_variable(vlmd[i]).last());
         GMM_TRACE2(name << ": generic matrix assembly");
         model_real_plain_vector V(nbgdof);
@@ -3397,7 +3396,7 @@ model_complex_plain_vector &
       vl.push_back(directvarname);
       dl.push_back(directdataname);
     } else directvarname = "";
-    
+
     pbrick pbr = std::make_shared<gen_source_term_assembly_brick>
       (expr, brickname, vl_test1, directvarname, directdataname);
     model::termlist tl;
@@ -3448,10 +3447,10 @@ model_complex_plain_vector &
         model::varnamelist vlmd; md.variable_list(vlmd);
         for (size_type i = 0; i < vlmd.size(); ++i)
           if (md.is_disabled_variable(vlmd[i]))
-            nbgdof = std::max(nbgdof, 
+            nbgdof = std::max(nbgdof,
                               workspace.interval_of_variable(vlmd[i]).last());
         GMM_TRACE2(name << ": generic matrix assembly");
-        model_real_sparse_matrix R(nbgdof, nbgdof); 
+        model_real_sparse_matrix R(nbgdof, nbgdof);
         workspace.set_assembled_matrix(R);
         workspace.assembly(2);
         for (size_type i = 0; i < vl_test1.size(); ++i) {
@@ -3841,7 +3840,7 @@ model_complex_plain_vector &
                     "Bad format generic elliptic brick coefficient");
     }
 
-    generic_elliptic_brick(void) {
+    generic_elliptic_brick() {
       set_flags("Generic elliptic", true /* is linear*/,
                 true /* is symmetric */, true /* is coercive */,
                 true /* is real */, true /* is complex */);
@@ -3898,7 +3897,7 @@ model_complex_plain_vector &
         if (mf) qdim_data = mf->get_qdim() * (n / mf->nb_dof());
         else  qdim_data = n;
       }
-      
+
       if (qdim == 1) {
         if (qdim_data != 1) {
           GMM_ASSERT1(qdim_data == gmm::sqr(dim),
@@ -3913,7 +3912,7 @@ model_complex_plain_vector &
           if (qdim_data == gmm::sqr(dim))
             expr = 
"((Reshape("+dataname+",meshdim,meshdim))*Grad_"+varname+"):Grad_"
               +test_varname;
-          else if (qdim_data == gmm::sqr(gmm::sqr(dim))) 
+          else if (qdim_data == gmm::sqr(gmm::sqr(dim)))
             expr = 
"((Reshape("+dataname+",meshdim,meshdim,meshdim,meshdim))*Grad_"
               +varname+"):Grad_"+test_varname;
           else GMM_ASSERT1(false, "Wrong data size for generic elliptic 
brick");
@@ -4046,7 +4045,7 @@ model_complex_plain_vector &
 
 
 
-    source_term_brick(void) {
+    source_term_brick() {
       set_flags("Source term", true /* is linear*/,
                 true /* is symmetric */, true /* is coercive */,
                 true /* is real */, true /* is complex */,
@@ -4150,7 +4149,7 @@ model_complex_plain_vector &
                                       build_version) const override {
       md.add_external_load(ib, gmm::vect_norm1(vecl[0]));
     }
-    
+
 
     virtual void asm_complex_tangent_terms(const model &md, size_type /* ib */,
                                            const model::varnamelist &vl,
@@ -4199,7 +4198,7 @@ model_complex_plain_vector &
       md.add_external_load(ib, gmm::vect_norm1(vecl[0]));
     }
 
-    normal_source_term_brick(void) {
+    normal_source_term_brick() {
       set_flags("Normal source term", true /* is linear*/,
                 true /* is symmetric */, true /* is coercive */,
                 true /* is real */, true /* is complex */,
@@ -4334,7 +4333,7 @@ model_complex_plain_vector &
         GMM_TRACE2("Mass term assembly for Dirichlet condition");
         if (H_version || normal_component) {
           ga_workspace workspace;
-               gmm::sub_interval Imult(0, mf_mult.nb_dof()), Iu(0, 
mf_u.nb_dof());
+          gmm::sub_interval Imult(0, mf_mult.nb_dof()), Iu(0, mf_u.nb_dof());
           base_vector u(mf_u.nb_dof());
           base_vector mult(mf_mult.nb_dof());
           workspace.add_fem_variable("u", mf_u, Iu, u);
@@ -4347,12 +4346,12 @@ model_complex_plain_vector &
                           "Test_mult . (A . Test2_u)"
                           :
                           "Test_mult. (Reshape(A, qdim(u), qdim(u)) . 
Test2_u)";
-          } else if (normal_component){
-                 expression = "Test_mult . (Test2_u . Normal)";
+          } else if (normal_component) {
+            expression = "Test_mult . (Test2_u . Normal)";
           }
           workspace.add_expression(expression, mim, rg);
           workspace.set_assembled_matrix(*B);
-               workspace.assembly(2);
+          workspace.assembly(2);
         } else {
           asm_mass_matrix(*B, mim, mf_mult, mf_u, rg);
         }
@@ -4485,12 +4484,12 @@ model_complex_plain_vector &
         }
         GMM_TRACE2("Mass term assembly for Dirichlet condition");
         if (H_version) {
-         if (mf_u.get_qdim() == 1)
-           asm_real_or_complex_1_param_mat(*B, mim, mf_mult, mf_H, *H, rg,
-                                           "(A*Test_u).Test2_u");
-         else
-           asm_real_or_complex_1_param_mat(*B, mim, mf_mult, mf_H, *H, rg,
-                           "(Reshape(A,qdim(u),qdim(u))*Test2_u).Test_u");
+          if (mf_u.get_qdim() == 1)
+            asm_real_or_complex_1_param_mat(*B, mim, mf_mult, mf_H, *H, rg,
+                                            "(A*Test_u).Test2_u");
+          else
+            asm_real_or_complex_1_param_mat(*B, mim, mf_mult, mf_H, *H, rg,
+                            "(Reshape(A,qdim(u),qdim(u))*Test2_u).Test_u");
           // if (mf_H)
           //   asm_real_or_complex_1_param
           //     (*B, mim, mf_mult, *mf_H, *H, rg, (mf_u.get_qdim() == 1) ?
@@ -4507,16 +4506,16 @@ model_complex_plain_vector &
           //      "M(#1,#2)+=sym(comp(vBase(#1).vBase(#2))(:,i,:,j).F(i,j));");
         }
         else if (normal_component) {
-         ga_workspace workspace;
-         gmm::sub_interval Imult(0, mf_mult.nb_dof()), Iu(0, mf_u.nb_dof());
-         base_vector mult(mf_mult.nb_dof()), u(mf_u.nb_dof());
-         workspace.add_fem_variable("mult", mf_mult, Imult, mult);
-         workspace.add_fem_variable("u", mf_u, Iu, u);
-         workspace.add_expression("Test_mult.(Test2_u.Normal)", mim, rg);
-         model_real_sparse_matrix BB(mf_mult.nb_dof(), mf_u.nb_dof());
-         workspace.set_assembled_matrix(BB);
-         workspace.assembly(2);
-         gmm::add(BB, *B);
+          ga_workspace workspace;
+          gmm::sub_interval Imult(0, mf_mult.nb_dof()), Iu(0, mf_u.nb_dof());
+          base_vector mult(mf_mult.nb_dof()), u(mf_u.nb_dof());
+          workspace.add_fem_variable("mult", mf_mult, Imult, mult);
+          workspace.add_fem_variable("u", mf_u, Iu, u);
+          workspace.add_expression("Test_mult.(Test2_u.Normal)", mim, rg);
+          model_real_sparse_matrix BB(mf_mult.nb_dof(), mf_u.nb_dof());
+          workspace.set_assembled_matrix(BB);
+          workspace.assembly(2);
+          gmm::add(BB, *B);
 
           // generic_assembly assem;
           // if (mf_mult.get_qdim() == 1)
@@ -4936,14 +4935,14 @@ model_complex_plain_vector &
       }
     }
 
-    
+
     virtual std::string declare_volume_assembly_string
     (const model &, size_type, const model::varnamelist &,
      const model::varnamelist &) const {
       return std::string();
     }
 
-    simplification_Dirichlet_condition_brick(void) {
+    simplification_Dirichlet_condition_brick() {
       set_flags("Dirichlet with simplification brick",
                 true /* is linear*/,
                 true /* is symmetric */, true /* is coercive */,
@@ -4981,20 +4980,20 @@ model_complex_plain_vector &
     GMM_ASSERT1(order == 0, "Wrong expression of the Neumann term");
     model::varnamelist vl, vl_test1, vl_test2, dl;
     bool is_lin = workspace.used_variables(vl, vl_test1, vl_test2, dl, 1);
-    
+
     std::string condition = "("+varname + (datag.size() ? "-("+datag+"))":")");
     std::string gamma = "(("+datagamma0+")*element_size)";
     std::string r = "(1/"+gamma+")";
     std::string expr = "("+r+"*"+condition+"-("+Neumannterm+")).Test_"+varname;
     if (theta_ != scalar_type(0)) {
       std::string derivative_Neumann = workspace.extract_order1_term(varname);
-      if (derivative_Neumann.size()) 
+      if (derivative_Neumann.size())
         expr+="-"+theta+"*"+condition+".("+derivative_Neumann+")";
     }
 
     // cout << "Nitsche expression : " << expr << endl;
     // cout << "is_lin : " << int(is_lin) << endl;
-    
+
     if (is_lin) {
       return add_linear_generic_assembly_brick
         (md, mim, expr, region, false, false,
@@ -5017,7 +5016,7 @@ model_complex_plain_vector &
     GMM_ASSERT1(order == 0, "Wrong expression of the Neumann term");
     model::varnamelist vl, vl_test1, vl_test2, dl;
     bool is_lin = workspace.used_variables(vl, vl_test1, vl_test2, dl, 1);
-    
+
     std::string condition = "("+varname+".Normal"
       + (datag.size() ? "-("+datag+"))":")");
     std::string gamma = "(("+datagamma0+")*element_size)";
@@ -5026,7 +5025,7 @@ model_complex_plain_vector &
       +"))*(Normal.Test_"+varname+")";
     if (theta_ != scalar_type(0)) {
       std::string derivative_Neumann = workspace.extract_order1_term(varname);
-      if (derivative_Neumann.size()) 
+      if (derivative_Neumann.size())
         expr+="-"+theta+"*"+condition+"*Normal.("
           +derivative_Neumann+")";
     }
@@ -5052,7 +5051,7 @@ model_complex_plain_vector &
     GMM_ASSERT1(order == 0, "Wrong expression of the Neumann term");
     model::varnamelist vl, vl_test1, vl_test2, dl;
     bool is_lin = workspace.used_variables(vl, vl_test1, vl_test2, dl, 1);
-    
+
     std::string condition = "(("+dataH+")*"+varname
       + (datag.size() ? "-("+datag+"))":")");
     std::string gamma = "(("+datagamma0+")*element_size)";
@@ -5061,7 +5060,7 @@ model_complex_plain_vector &
       +"))*(("+dataH+")*Test_"+varname+")";
     if (theta_ != scalar_type(0)) {
       std::string derivative_Neumann = workspace.extract_order1_term(varname);
-      if (derivative_Neumann.size()) 
+      if (derivative_Neumann.size())
         expr+="-"+theta+"*"+condition+"*(("+dataH+")*("
           +derivative_Neumann+"))";
     }
@@ -5445,7 +5444,7 @@ model_complex_plain_vector &
         GMM_ASSERT1(false, "Bad format Helmholtz brick coefficient");
     }
 
-    Helmholtz_brick(void) {
+    Helmholtz_brick() {
       set_flags("Helmholtz", true /* is linear*/,
                 true /* is symmetric */, true /* is coercive */,
                 true /* is real */, true /* is complex */);
@@ -5565,7 +5564,7 @@ model_complex_plain_vector &
         asm_homogeneous_qu_term(matl[0], mim, mf_u, *A, rg);
     }
 
-    Fourier_Robin_brick(void) {
+    Fourier_Robin_brick() {
       set_flags("Fourier Robin condition", true /* is linear*/,
                 true /* is symmetric */, true /* is coercive */,
                 true /* is real */, true /* is complex */,
@@ -5928,7 +5927,7 @@ model_complex_plain_vector &
       return std::string();
     }
 
-    explicit_rhs_brick(void) {
+    explicit_rhs_brick() {
       set_flags("Explicit rhs brick",
                 true /* is linear*/,
                 true /* is symmetric */, true /* is coercive */,
@@ -5989,9 +5988,9 @@ model_complex_plain_vector &
         model::varnamelist vlmd; md.variable_list(vlmd);
         for (size_type i = 0; i < vlmd.size(); ++i)
           if (md.is_disabled_variable(vlmd[i]))
-            nbgdof = std::max(nbgdof, 
+            nbgdof = std::max(nbgdof,
                               workspace.interval_of_variable(vlmd[i]).last());
-        model_real_sparse_matrix R(nbgdof, nbgdof); 
+        model_real_sparse_matrix R(nbgdof, nbgdof);
         workspace.set_assembled_matrix(R);
         workspace.assembly(2);
         scalar_type alpha = scalar_type(1)
@@ -6049,18 +6048,18 @@ model_complex_plain_vector &
    size_type region, const std::string &dataname3) {
     std::string test_varname
       = "Test_" + sup_previous_and_dot_to_varname(varname);
-    
+
     std::string expr1 = "((("+dataexpr1+")*(Div_"+varname+"-Div_"+dataname3
       +"))*Id(meshdim)+(2*("+dataexpr2+"))*(Sym(Grad_"+varname
       +")-Sym(Grad_"+dataname3+"))):Grad_" +test_varname;
     std::string expr2 = "(Div_"+varname+"*(("+dataexpr1+")*Id(meshdim))"
       +"+(2*("+dataexpr2+"))*Sym(Grad_"+varname+")):Grad_"+test_varname;
-    
+
     ga_workspace workspace(md, true);
     workspace.add_expression(expr2, mim, region);
     model::varnamelist vl, vl_test1, vl_test2, dl;
     bool is_lin = workspace.used_variables(vl, vl_test1, vl_test2, dl, 2);
-    
+
     if (is_lin) {
       pbrick pbr = std::make_shared<iso_lin_elasticity_new_brick>
         (expr2, dataname3);
@@ -6082,18 +6081,18 @@ model_complex_plain_vector &
    size_type region) {
     std::string test_varname
       = "Test_" + sup_previous_and_dot_to_varname(varname);
-    
+
     std::string mu = "(("+data_E+")/(2*(1+("+data_nu+"))))";
     std::string lambda = 
"(("+data_E+")*("+data_nu+")/((1+("+data_nu+"))*(1-2*("
       +data_nu+"))))";
     std::string expr = lambda+"*Div_"+varname+"*Div_"+test_varname
       + "+"+mu+"*(Grad_"+varname+"+Grad_"+varname+"'):Grad_"+test_varname;
-    
+
     ga_workspace workspace(md, true);
     workspace.add_expression(expr, mim, region);
     model::varnamelist vl, vl_test1, vl_test2, dl;
     bool is_lin = workspace.used_variables(vl, vl_test1, vl_test2, dl, 2);
-    
+
     if (is_lin) {
       return add_linear_generic_assembly_brick
         (md, mim, expr, region, false, false, "Linearized isotropic 
elasticity");
@@ -6110,7 +6109,7 @@ model_complex_plain_vector &
    size_type region) {
     std::string test_varname
       = "Test_" + sup_previous_and_dot_to_varname(varname);
-    
+
     const mesh_fem *mfu = md.pmesh_fem_of_variable(varname);
     GMM_ASSERT1(mfu, "The variable should be a fem variable");
     size_type N = mfu->linked_mesh().dim();
@@ -6122,12 +6121,12 @@ model_complex_plain_vector &
       lambda = "(("+data_E+")*("+data_nu+")/((1-sqr("+data_nu+"))))";
     std::string expr = lambda+"*Div_"+varname+"*Div_"+test_varname
       + "+"+mu+"*(Grad_"+varname+"+Grad_"+varname+"'):Grad_"+test_varname;
-    
+
     ga_workspace workspace(md, true);
     workspace.add_expression(expr, mim, region);
     model::varnamelist vl, vl_test1, vl_test2, dl;
     bool is_lin = workspace.used_variables(vl, vl_test1, vl_test2, dl, 2);
-    
+
     if (is_lin) {
       return add_linear_generic_assembly_brick
         (md, mim, expr, region, false, false, "Linearized isotropic 
elasticity");
@@ -6150,7 +6149,7 @@ model_complex_plain_vector &
       const model_real_plain_vector *lambda=&(md.real_variable(data_lambda));
       const mesh_fem *mf_mu = md.pmesh_fem_of_variable(data_mu);
       const model_real_plain_vector *mu = &(md.real_variable(data_mu));
-      
+
       size_type sl = gmm::vect_size(*lambda);
       if (mf_lambda) sl = sl * mf_lambda->get_qdim() / mf_lambda->nb_dof();
       size_type sm = gmm::vect_size(*mu);
@@ -6160,7 +6159,7 @@ model_complex_plain_vector &
       GMM_ASSERT1(mf_lambda == mf_mu,
                   "The two Lame coefficients should be described on the same "
                   "finite element method.");
-      
+
       if (mf_lambda) {
         getfem::interpolation_von_mises_or_tresca(mf_u, mf_vm,
                                                   md.real_variable(varname), 
VM,
@@ -6300,7 +6299,7 @@ model_complex_plain_vector &
     { }
 
 
-    linear_incompressibility_brick(void) {
+    linear_incompressibility_brick() {
       set_flags("Linear incompressibility brick",
                 true /* is linear*/,
                 true /* is symmetric */, false /* is coercive */,
@@ -6449,7 +6448,7 @@ model_complex_plain_vector &
       return std::string();
     }
 
-    mass_brick(void) {
+    mass_brick() {
       set_flags("Mass brick", true /* is linear*/,
                 true /* is symmetric */, true /* is coercive */,
                 true /* is real */, true /* is complex */,
@@ -6631,7 +6630,7 @@ model_complex_plain_vector &
       return std::string();
     }
 
-    basic_d_on_dt_brick(void) {
+    basic_d_on_dt_brick() {
       set_flags("Basic d/dt brick", true /* is linear*/,
                 true /* is symmetric */, true /* is coercive */,
                 true /* is real */, true /* is complex */,
@@ -6806,7 +6805,7 @@ model_complex_plain_vector &
       return std::string();
     }
 
-    basic_d2_on_dt2_brick(void) {
+    basic_d2_on_dt2_brick() {
       set_flags("Basic d2/dt2 brick", true /* is linear*/,
                 true /* is symmetric */, true /* is coercive */,
                 true /* is real */, true /* is complex */,
@@ -7205,7 +7204,7 @@ model_complex_plain_vector &
         md.reset_default_iter_of_variables(vl);
     }
 
-    midpoint_dispatcher(void) : virtual_dispatcher(2)
+    midpoint_dispatcher() : virtual_dispatcher(2)
     { id_num = act_counter(); }
 
   };
diff --git a/src/getfem_nonlinear_elasticity.cc 
b/src/getfem_nonlinear_elasticity.cc
index d8ce08d..b5b8baa 100644
--- a/src/getfem_nonlinear_elasticity.cc
+++ b/src/getfem_nonlinear_elasticity.cc
@@ -44,55 +44,55 @@ namespace getfem {
   }
 
   struct compute_invariants {
-    
+
     const base_matrix &E;
     base_matrix Einv;
     size_type N;
     scalar_type i1_, i2_, i3_, j1_, j2_;
     bool i1_c, i2_c, i3_c, j1_c, j2_c;
 
-    base_matrix di1, di2, di3, dj1, dj2; 
+    base_matrix di1, di2, di3, dj1, dj2;
     bool di1_c, di2_c, di3_c, dj1_c, dj2_c;
 
-    base_tensor ddi1, ddi2, ddi3, ddj1, ddj2; 
+    base_tensor ddi1, ddi2, ddi3, ddj1, ddj2;
     bool ddi1_c, ddi2_c, ddi3_c, ddj1_c, ddj2_c;
 
 
     /* First invariant tr(E) */
-    void compute_i1(void) {
+    void compute_i1() {
       i1_ = gmm::mat_trace(E);
       i1_c = true;
     }
 
-    void compute_di1(void) {
+    void compute_di1() {
       gmm::resize(di1, N, N);
       gmm::copy(gmm::identity_matrix(), di1);
       di1_c = true;
     }
 
-    void compute_ddi1(void) { // not very useful, null tensor
-      ddi1 = base_tensor(N, N, N, N); 
+    void compute_ddi1() { // not very useful, null tensor
+      ddi1 = base_tensor(N, N, N, N);
       ddi1_c = true;
     }
 
-    inline scalar_type i1(void)
+    inline scalar_type i1()
     { if (!i1_c) compute_i1(); return i1_; }
 
-    inline const base_matrix &grad_i1(void)
+    inline const base_matrix &grad_i1()
     { if (!di1_c) compute_di1(); return di1; }
 
-    inline const base_tensor &sym_grad_grad_i1(void)
+    inline const base_tensor &sym_grad_grad_i1()
     { if (!ddi1_c) compute_ddi1(); return ddi1; }
 
 
     /* Second invariant (tr(E)^2 - tr(E^2))/2 */
-    void compute_i2(void) {
+    void compute_i2() {
       i2_ = (gmm::sqr(gmm::mat_trace(E))
              - frobenius_product_trans(E, E)) / scalar_type(2);
       i2_c = true;
     }
 
-    void compute_di2(void) {
+    void compute_di2() {
       gmm::resize(di2, N, N);
       gmm::copy(gmm::identity_matrix(), di2);
       gmm::scale(di2, i1());
@@ -101,7 +101,7 @@ namespace getfem {
       di2_c = true;
     }
 
-    void compute_ddi2(void) {
+    void compute_ddi2() {
       ddi2 = base_tensor(N, N, N, N);
       for (size_type i = 0; i < N; ++i)
         for (size_type k = 0; k < N; ++k)
@@ -114,23 +114,23 @@ namespace getfem {
       ddi2_c = true;
     }
 
-    inline scalar_type i2(void)
+    inline scalar_type i2()
     { if (!i2_c) compute_i2(); return i2_; }
 
-    inline const base_matrix &grad_i2(void)
+    inline const base_matrix &grad_i2()
     { if (!di2_c) compute_di2(); return di2; }
 
-    inline const base_tensor &sym_grad_grad_i2(void)
+    inline const base_tensor &sym_grad_grad_i2()
     { if (!ddi2_c) compute_ddi2(); return ddi2; }
 
     /* Third invariant det(E) */
-    void compute_i3(void) {
+    void compute_i3() {
       Einv = E;
       i3_ = bgeot::lu_inverse(&(*(Einv.begin())), gmm::mat_nrows(Einv));
       i3_c = true;
     }
 
-    void compute_di3(void) {
+    void compute_di3() {
       scalar_type det = i3();
       // gmm::resize(di3, N, N);
       // gmm::copy(gmm::transposed(E), di3);
@@ -140,7 +140,7 @@ namespace getfem {
       di3_c = true;
     }
 
-    void compute_ddi3(void) {
+    void compute_ddi3() {
       ddi3 = base_tensor(N, N, N, N);
       scalar_type det = i3() / scalar_type(2); // computes also E inverse.
       for (size_type i = 0; i < N; ++i)
@@ -152,36 +152,36 @@ namespace getfem {
       ddi3_c = true;
     }
 
-    inline scalar_type i3(void)
+    inline scalar_type i3()
     { if (!i3_c) compute_i3(); return i3_; }
 
-    inline const base_matrix &grad_i3(void)
+    inline const base_matrix &grad_i3()
     { if (!di3_c) compute_di3(); return di3; }
 
-    inline const base_tensor &sym_grad_grad_i3(void)
+    inline const base_tensor &sym_grad_grad_i3()
     { if (!ddi3_c) compute_ddi3(); return ddi3; }
 
     /* Invariant j1(E) = i1(E)*i3(E)^(-1/3) */
-    void compute_j1(void) {
+    void compute_j1() {
       j1_ = i1() * ::pow(gmm::abs(i3()), -scalar_type(1) / scalar_type(3));
       j1_c = true;
     }
 
-    void compute_dj1(void) {
+    void compute_dj1() {
       dj1 = grad_i1();
       gmm::add(gmm::scaled(grad_i3(), -i1() / (scalar_type(3) * i3())), dj1);
       gmm::scale(dj1, ::pow(gmm::abs(i3()), -scalar_type(1) / scalar_type(3)));
       dj1_c = true;
     }
 
-    void compute_ddj1(void) {
-      const base_matrix &di1_ = grad_i1(); 
+    void compute_ddj1() {
+      const base_matrix &di1_ = grad_i1();
       const base_matrix &di3_ = grad_i3();
       scalar_type coeff1 = scalar_type(1) / (scalar_type(3)*i3());
       scalar_type coeff2 = scalar_type(4) * coeff1 * coeff1 * i1();
       ddj1 = sym_grad_grad_i3();
       gmm::scale(ddj1.as_vector(), -i1() * coeff1);
-      
+
       for (size_type i = 0; i < N; ++i)
          for (size_type j = 0; j < N; ++j)
            for (size_type k = 0; k < N; ++k)
@@ -195,30 +195,30 @@ namespace getfem {
       ddj1_c = true;
     }
 
-    inline scalar_type j1(void)
+    inline scalar_type j1()
     { if (!j1_c) compute_j1(); return j1_; }
 
-    inline const base_matrix &grad_j1(void)
+    inline const base_matrix &grad_j1()
     { if (!dj1_c) compute_dj1(); return dj1; }
 
-    inline const base_tensor &sym_grad_grad_j1(void)
+    inline const base_tensor &sym_grad_grad_j1()
     { if (!ddj1_c) compute_ddj1(); return ddj1; }
 
     /* Invariant j2(E) = i2(E)*i3(E)^(-2/3) */
-    void compute_j2(void) {
+    void compute_j2() {
       j2_ = i2() * ::pow(gmm::abs(i3()), -scalar_type(2) / scalar_type(3));
       j2_c = true;
     }
 
-    void compute_dj2(void) {
+    void compute_dj2() {
       dj2 = grad_i2();
       gmm::add(gmm::scaled(grad_i3(), -scalar_type(2) * i2() / (scalar_type(3) 
* i3())), dj2);
       gmm::scale(dj2, ::pow(gmm::abs(i3()), -scalar_type(2) / scalar_type(3)));
       dj2_c = true;
     }
 
-    void compute_ddj2(void) {
-      const base_matrix &di2_ = grad_i2(); 
+    void compute_ddj2() {
+      const base_matrix &di2_ = grad_i2();
       const base_matrix &di3_ = grad_i3();
       scalar_type coeff1 = scalar_type(2) / (scalar_type(3)*i3());
       scalar_type coeff2 = scalar_type(5) * coeff1 * coeff1 * i2()
@@ -226,7 +226,7 @@ namespace getfem {
       ddj2 = sym_grad_grad_i2();
       gmm::add(gmm::scaled(sym_grad_grad_i3().as_vector(), -i2() * coeff1),
                ddj2.as_vector());
-      
+
       for (size_type i = 0; i < N; ++i)
          for (size_type j = 0; j < N; ++j)
            for (size_type k = 0; k < N; ++k)
@@ -241,13 +241,13 @@ namespace getfem {
     }
 
 
-    inline scalar_type j2(void)
+    inline scalar_type j2()
     { if (!j2_c) compute_j2(); return j2_; }
-   
-    inline const base_matrix &grad_j2(void)
+
+    inline const base_matrix &grad_j2()
     { if (!dj2_c) compute_dj2(); return dj2; }
 
-    inline const base_tensor &sym_grad_grad_j2(void)
+    inline const base_tensor &sym_grad_grad_j2()
     { if (!ddj2_c) compute_ddj2(); return ddj2; }
 
 
@@ -261,7 +261,7 @@ namespace getfem {
   };
 
 
- 
+
 
 
   /* Symmetry check */
@@ -272,8 +272,8 @@ namespace getfem {
       for (size_type m = 0; m < N; ++m)
         for (size_type l = 0; l < N; ++l)
           for (size_type k = 0; k < N; ++k) {
-            if (gmm::abs(t(n,m,l,k) - t(l,k,n,m))>1e-5) flags &= (~1); 
-            if (gmm::abs(t(n,m,l,k) - t(m,n,l,k))>1e-5) flags &= (~2); 
+            if (gmm::abs(t(n,m,l,k) - t(l,k,n,m))>1e-5) flags &= (~1);
+            if (gmm::abs(t(n,m,l,k) - t(m,n,l,k))>1e-5) flags &= (~2);
             if (gmm::abs(t(n,m,l,k) - t(n,m,k,l))>1e-5) flags &= (~4);
           }
     return flags;
@@ -288,9 +288,9 @@ namespace getfem {
     do {
       gmm::fill_random(Phi);
       d = bgeot::lu_det(&(*(Phi.begin())), N);
-    } while (d < scalar_type(0.01)); 
+    } while (d < scalar_type(0.01));
     gmm::mult(gmm::transposed(Phi),Phi,E);
-    gmm::scale(E,-1.); gmm::add(gmm::identity_matrix(),E); 
+    gmm::scale(E,-1.); gmm::add(gmm::identity_matrix(),E);
     gmm::scale(E,-0.5);
   }
 
@@ -303,25 +303,25 @@ namespace getfem {
       random_E(E); random_E(DE);
       gmm::scale(DE, h);
       gmm::add(E, DE, E2);
-      
+
       base_matrix sigma1(N,N), sigma2(N,N);
       getfem::base_tensor tdsigma(N,N,N,N);
       base_matrix dsigma(N,N);
       gmm::copy(E, E2); gmm::add(DE, E2);
       sigma(E, sigma1, param, scalar_type(1));
       sigma(E2, sigma2, param, scalar_type(1));
-      
+
       scalar_type d = strain_energy(E2, param, scalar_type(1))
         - strain_energy(E, param, scalar_type(1));
       scalar_type d2 = 0;
-      for (size_type i=0; i < N; ++i) 
+      for (size_type i=0; i < N; ++i)
         for (size_type j=0; j < N; ++j) d2 += sigma1(i,j)*DE(i,j);
       if (gmm::abs(d-d2)/(gmm::abs(d)+1e-40) > 1e-4) {
         cout << "Test " << count << " wrong derivative of strain_energy, d="
              << d/h << ", d2=" << d2/h << endl;
         ok = false;
       }
-      
+
       grad_sigma(E,tdsigma,param, scalar_type(1));
       for (size_type i=0; i < N; ++i) {
         for (size_type j=0; j < N; ++j) {
@@ -344,9 +344,9 @@ namespace getfem {
     }
     GMM_ASSERT1(ok, "Derivative test has failed");
   }
-    
+
   void abstract_hyperelastic_law::cauchy_updated_lagrangian
-  (const base_matrix& F, const base_matrix &E, 
+  (const base_matrix& F, const base_matrix &E,
    base_matrix &cauchy_stress, const base_vector &params,
    scalar_type det_trans) const
   {
@@ -358,7 +358,7 @@ namespace getfem {
     gmm::mult(aux,gmm::transposed(F),cauchy_stress);
     gmm::scale(cauchy_stress,scalar_type(1.0/det_trans)); //cauchy = 
1/J*F*PK2*F^T
   }
-  
+
 
   void abstract_hyperelastic_law::grad_sigma_updated_lagrangian
   (const base_matrix& F, const base_matrix& E,
@@ -381,13 +381,13 @@ namespace getfem {
                 {    for(size_type n = 0; n < N; ++n)
                     for(size_type p = 0; p < N; ++p)
                       for(size_type q = 0; q < N; ++q)
-                        grad_sigma_ul(i,j,k,l)+= 
+                        grad_sigma_ul(i,j,k,l)+=
                           F(i,m)*F(j,n)*F(k,p)*F(l,q)*Cse(m,n,p,q);
                 }
               grad_sigma_ul(i,j,k,l) *= mult;
             }
   }
-  
+
   scalar_type SaintVenant_Kirchhoff_hyperelastic_law::strain_energy
   (const base_matrix &E, const base_vector &params, scalar_type det_trans) 
const {
         // should be optimized, maybe deriving sigma from strain energy
@@ -397,7 +397,7 @@ namespace getfem {
     return gmm::sqr(gmm::mat_trace(E)) * params[0] / scalar_type(2)
     + gmm::mat_euclidean_norm_sqr(E) * params[1];
   }
-  
+
   void SaintVenant_Kirchhoff_hyperelastic_law::sigma
   (const base_matrix &E, base_matrix &result,const base_vector &params, 
scalar_type det_trans) const {
     gmm::copy(gmm::identity_matrix(), result);
@@ -421,7 +421,7 @@ namespace getfem {
       }
   }
 
-  void 
SaintVenant_Kirchhoff_hyperelastic_law::grad_sigma_updated_lagrangian(const 
base_matrix& F, 
+  void 
SaintVenant_Kirchhoff_hyperelastic_law::grad_sigma_updated_lagrangian(const 
base_matrix& F,
     const base_matrix& E,
     const base_vector &params,
     scalar_type det_trans,
@@ -430,7 +430,7 @@ namespace getfem {
     size_type N = E.ncols();
     base_tensor Cse(N,N,N,N);
     grad_sigma(E,Cse,params,det_trans);
-    base_matrix Cinv(N,N); // left Cauchy-Green deform. tens. 
+    base_matrix Cinv(N,N); // left Cauchy-Green deform. tens.
     gmm::mult(F,gmm::transposed(F),Cinv);
     scalar_type mult=1.0/det_trans;
     for(size_type i = 0; i < N; ++i)
@@ -441,7 +441,7 @@ namespace getfem {
             params[1]*(Cinv(i,k)*Cinv(j,l) + Cinv(i,l)*Cinv(j,k)))*mult;
   }
 
-  
SaintVenant_Kirchhoff_hyperelastic_law::SaintVenant_Kirchhoff_hyperelastic_law(void)
 {
+  
SaintVenant_Kirchhoff_hyperelastic_law::SaintVenant_Kirchhoff_hyperelastic_law()
 {
     nb_params_ = 2;
   }
 
@@ -451,7 +451,7 @@ namespace getfem {
     GMM_ASSERT1(false, "To be done");
     return 0;
   }
-  
+
   void membrane_elastic_law::sigma
   (const base_matrix &E, base_matrix &result,const base_vector &params, 
scalar_type det_trans) const {
     // should be optimized, maybe deriving sigma from strain energy
@@ -462,7 +462,7 @@ namespace getfem {
       for (size_type j = 0; j < N; ++j) {
         result(i,j)=0.0;
         for (size_type k = 0; k < N; ++k)
-          for (size_type l = 0; l < N; ++l) 
+          for (size_type l = 0; l < N; ++l)
             result(i,j)+=tt(i,j,k,l)*E(k,l);
       }
     // add pretension in X' direction
@@ -471,7 +471,7 @@ namespace getfem {
     if(params[5]!=0) result(1,1)+=params[5];
     // cout<<"sigma="<<result<<endl;
   }
-  
+
   void membrane_elastic_law::grad_sigma
   (const base_matrix & /* E */, base_tensor &result,
    const base_vector &params, scalar_type) const {
@@ -486,7 +486,7 @@ namespace getfem {
     // result(0,0,1,0) = 0;
     result(0,0,1,1) = params[1]*params[0]/(1-params[1]*poisonXY);
     result(1,1,0,0) = params[1]*params[0]/(1-params[1]*poisonXY);
-    // result(1,1,0,1) = 0;out
+    // result(1,1,0,1) = 0;
     // result(1,1,1,0) = 0;
     result(1,1,1,1) = params[2]/(1-params[1]*poisonXY);
     // result(0,1,0,0) = 0;
@@ -550,7 +550,7 @@ namespace getfem {
 
 // shouldn't negative det_trans be handled here???
       if (det_trans <= scalar_type(0))
-          gmm::add(gmm::scaled(C, 1e200), result);
+        gmm::add(gmm::scaled(C, 1e200), result);
     }
   }
 
@@ -699,7 +699,7 @@ namespace getfem {
   {
     nb_params_ = 2;
   }
-  
+
 
 
   scalar_type generalized_Blatz_Ko_hyperelastic_law::strain_energy
@@ -788,8 +788,8 @@ namespace getfem {
 
     typedef const base_matrix * pointer_base_matrix__;
     pointer_base_matrix__ di[3];
-    di[0] = &(ci.grad_i1()); 
-    di[1] = &(ci.grad_i2()); 
+    di[0] = &(ci.grad_i1());
+    di[1] = &(ci.grad_i2());
     di[2] = &(ci.grad_i3());
 
     for (size_type j = 0; j < N; ++j)
@@ -806,7 +806,7 @@ namespace getfem {
 //                 "Fourth order tensor not symmetric : " << result);
   }
 
-  
generalized_Blatz_Ko_hyperelastic_law::generalized_Blatz_Ko_hyperelastic_law(void)
 {
+  
generalized_Blatz_Ko_hyperelastic_law::generalized_Blatz_Ko_hyperelastic_law() {
     nb_params_ = 5;
     base_vector V(5);
     V[0] = 1.0;  V[1] = 1.0, V[2] = 1.5; V[3] = -0.5; V[4] = 1.5;
@@ -828,7 +828,7 @@ namespace getfem {
     gmm::add(gmm::identity_matrix(), C);
     scalar_type det = bgeot::lu_det(&(*(C.begin())), N);
     return a * gmm::mat_trace(C)
-      + b * (gmm::sqr(gmm::mat_trace(C)) - 
+      + b * (gmm::sqr(gmm::mat_trace(C)) -
              gmm::mat_euclidean_norm_sqr(C))/scalar_type(2)
       + c * det - d * log(det) / scalar_type(2) + e;
   }
@@ -853,7 +853,7 @@ namespace getfem {
     gmm::add(gmm::scaled(C, -scalar_type(2) * b), result);
     if (det_trans <= scalar_type(0))
       gmm::add(gmm::scaled(C, 1e200), result);
-          else {
+    else {
       scalar_type det = bgeot::lu_inverse(&(*(C.begin())), N);
       gmm::add(gmm::scaled(C, scalar_type(2) * c * det - d), result);
     }
@@ -879,7 +879,7 @@ namespace getfem {
         result(i, j, j, i) -= b2;
         for (size_type  k = 0; k < N; ++k)
           for (size_type  l = 0; l < N; ++l)
-            result(i, j, k, l) += 
+            result(i, j, k, l) +=
               (C(i, k)*C(l, j) + C(i, l)*C(k, j)) * (d-scalar_type(2)*det*c)
               + (C(i, j) * C(k, l)) * det*c*scalar_type(4);
       }
@@ -906,7 +906,7 @@ namespace getfem {
   (const base_matrix &E, const base_vector &params, scalar_type det_trans) 
const {
     GMM_ASSERT1(gmm::mat_nrows(E) == 2, "Plane strain law is for 2D only.");
     base_matrix E3D(3,3);
-    E3D(0,0)=E(0,0); E3D(1,0)=E(1,0); E3D(0,1)=E(0,1); E3D(1,1)=E(1,1); 
+    E3D(0,0)=E(0,0); E3D(1,0)=E(1,0); E3D(0,1)=E(0,1); E3D(1,1)=E(1,1);
     return pl->strain_energy(E3D, params, det_trans);
   }
 
@@ -952,7 +952,7 @@ namespace getfem {
   struct nonlinear_elasticity_brick : public virtual_brick {
 
     phyperelastic_law AHL;
-    
+
     virtual void asm_real_tangent_terms(const model &md, size_type /* ib */,
                                         const model::varnamelist &vl,
                                         const model::varnamelist &dl,
@@ -1011,9 +1011,9 @@ namespace getfem {
     }
 
   };
-  
+
   //=========================================================================
-  //  Add a nonlinear elasticity brick.  
+  //  Add a nonlinear elasticity brick.
   //=========================================================================
 
   // Deprecated brick
@@ -1031,11 +1031,11 @@ namespace getfem {
   }
 
   //=========================================================================
-  //  Von Mises or Tresca stress computation.  
+  //  Von Mises or Tresca stress computation.
   //=========================================================================
 
   void compute_Von_Mises_or_Tresca(model &md,
-                                   const std::string &varname, 
+                                   const std::string &varname,
                                    const phyperelastic_law &AHL,
                                    const std::string &dataname,
                                    const mesh_fem &mf_vm,
@@ -1047,12 +1047,12 @@ namespace getfem {
     const model_real_plain_vector &u = md.real_variable(varname);
     const mesh_fem *mf_params = md.pmesh_fem_of_variable(dataname);
     const model_real_plain_vector &params = md.real_variable(dataname);
-    
+
     size_type sl = gmm::vect_size(params);
     if (mf_params) sl = sl * mf_params->get_qdim() / mf_params->nb_dof();
     GMM_ASSERT1(sl == AHL->nb_params(), "Wrong number of coefficients for "
                 "the nonlinear constitutive elastic law");
-    
+
     unsigned N = unsigned(mf_u.linked_mesh().dim());
     unsigned NP = unsigned(AHL->nb_params()), NFem = mf_u.get_qdim();
     model_real_plain_vector GRAD(mf_vm.nb_dof()*NFem*N);
@@ -1086,7 +1086,7 @@ namespace getfem {
         //jyh : compute ez, normal on deformed surface
         for (unsigned int l = 0; l <NFem; ++l)  {
           ez[l]=0;
-          for (unsigned int m = 0; m <NFem; ++m) 
+          for (unsigned int m = 0; m <NFem; ++m)
             for (unsigned int n = 0; n <NFem; ++n){
               ez[l]+=levi_civita(l,m,n)*gradphi(m,0)*gradphi(n,1);
             }
@@ -1096,14 +1096,14 @@ namespace getfem {
       }
       gmm::mult(gradphi, sigmahathat, aux);
       gmm::mult(aux, gmm::transposed(gradphi), sigma);
-      
+
       /* jyh : complete gradphi for virtual 3rd dim (perpendicular to
          deformed surface, same thickness) */
       if (NFem == 3 && N == 2) {
         gmm::resize(gradphi,NFem,NFem);
-        for (unsigned int ll = 0; ll <NFem; ++ll) 
-          for (unsigned int ii = 0; ii <NFem; ++ii) 
-            for (unsigned int jj = 0; jj <NFem; ++jj) 
+        for (unsigned int ll = 0; ll <NFem; ++ll)
+          for (unsigned int ii = 0; ii <NFem; ++ii)
+            for (unsigned int jj = 0; jj <NFem; ++jj)
               gradphi(ll,2)+=(levi_civita(ll,ii,jj)*gradphi(ii,0)
                               *gradphi(jj,1))/normEz;
         //jyh : end complete graphi
@@ -1129,7 +1129,7 @@ namespace getfem {
 
 
   void compute_sigmahathat(model &md,
-                           const std::string &varname, 
+                           const std::string &varname,
                            const phyperelastic_law &AHL,
                            const std::string &dataname,
                            const mesh_fem &mf_sigma,
@@ -1143,13 +1143,13 @@ namespace getfem {
     if (mf_params) sl = sl * mf_params->get_qdim() / mf_params->nb_dof();
     GMM_ASSERT1(sl == AHL->nb_params(), "Wrong number of coefficients for "
                 "the nonlinear constitutive elastic law");
-    
+
     unsigned N = unsigned(mf_u.linked_mesh().dim());
     unsigned NP = unsigned(AHL->nb_params()), NFem = mf_u.get_qdim();
     GMM_ASSERT1(mf_sigma.nb_dof() > 0, "Bad mf_sigma");
     size_type qqdim = mf_sigma.get_qdim();
     size_type ratio = N*N / qqdim;
-    
+
     GMM_ASSERT1(((ratio == 1) || (ratio == N*N)) &&
                 (gmm::vect_size(SIGMA) == mf_sigma.nb_dof()*ratio),
                 "The vector has not the good size");
@@ -1169,7 +1169,7 @@ namespace getfem {
     base_matrix E(N, N), gradphi(NFem,N),gradphit(N,NFem), Id(N, N),
       sigmahathat(N,N),aux(NFem,N), sigma(NFem,NFem),
       IdNFem(NFem, NFem);
-    
+
 
     base_vector p(NP);
     if (!mf_params) gmm::copy(params, p);
@@ -1205,7 +1205,7 @@ namespace getfem {
     }
   }
 
-  
+
 
   // ----------------------------------------------------------------------
   //
@@ -1214,7 +1214,7 @@ namespace getfem {
   // ----------------------------------------------------------------------
 
   struct nonlinear_incompressibility_brick : public virtual_brick {
-    
+
     virtual void asm_real_tangent_terms(const model &md, size_type,
                                         const model::varnamelist &vl,
                                         const model::varnamelist &dl,
@@ -1224,7 +1224,7 @@ namespace getfem {
                                         model::real_veclist &veclsym,
                                         size_type region,
                                         build_version version) const {
-      
+
       GMM_ASSERT1(matl.size() == 2,  "Wrong number of terms for nonlinear "
                   "incompressibility brick");
       GMM_ASSERT1(dl.size() == 0, "Nonlinear incompressibility brick need no "
@@ -1257,7 +1257,7 @@ namespace getfem {
     }
 
 
-    nonlinear_incompressibility_brick(void) {
+    nonlinear_incompressibility_brick() {
       set_flags("Nonlinear incompressibility brick",
                 false /* is linear*/,
                 true /* is symmetric */, false /* is coercive */,
@@ -1305,7 +1305,7 @@ namespace getfem {
       ga_init_scalar_(sizes);
       return true;
     }
-    
+
     // Value : (Trace(M))^2 - Trace(M^2))/2
     void value(const arg_list &args, base_tensor &result) const {
       size_type N = args[0]->sizes()[0];
@@ -1332,7 +1332,7 @@ namespace getfem {
           *it = ((i == j) ? tr : scalar_type(0)) - t[i*N+j];
       GMM_ASSERT1(it == result.end(), "Internal error");
     }
-    
+
     // Second derivative : address@hidden - \delta_{il}\delta_{jk}
     void second_derivative(const arg_list &args, size_type, size_type,
                            base_tensor &result) const { // To be verified
@@ -1347,7 +1347,7 @@ namespace getfem {
   };
 
 
-  // Matrix_j1 Operator 
+  // Matrix_j1 Operator
   struct matrix_j1_operator : public ga_nonlinear_operator {
     bool result_size(const arg_list &args, bgeot::multi_index &sizes) const {
       if (args.size() != 1 || args[0]->sizes().size() != 2
@@ -1355,7 +1355,7 @@ namespace getfem {
       ga_init_scalar_(sizes);
       return true;
     }
-    
+
     // Value : Trace(M)/(det(M)^1/3)
     void value(const arg_list &args, base_tensor &result) const {
       size_type N = args[0]->sizes()[0];
@@ -1390,7 +1390,7 @@ namespace getfem {
       } else
         std::fill(result.begin(), result.end(), 1.E200);
     }
-    
+
     // Second derivative : (address@hidden + Trace(M)*M^{-T}_{ik}M^{-T}_{lj}
     //                      address@hidden + 
Trace(M)address@hidden/3)/(3det(M)^1/3)
     void second_derivative(const arg_list &args, size_type, size_type,
@@ -1413,13 +1413,13 @@ namespace getfem {
                        + tr*M(j,i)*M(k,l)/ scalar_type(3))
                   / (scalar_type(3)*pow(det, scalar_type(1)/scalar_type(3)));
         GMM_ASSERT1(it == result.end(), "Internal error");
-      } else 
+      } else
         std::fill(result.begin(), result.end(), 1.E200);
     }
   };
 
 
-  // Matrix_j2 Operator 
+  // Matrix_j2 Operator
   struct matrix_j2_operator : public ga_nonlinear_operator {
     bool result_size(const arg_list &args, bgeot::multi_index &sizes) const {
       if (args.size() != 1 || args[0]->sizes().size() != 2
@@ -1427,7 +1427,7 @@ namespace getfem {
       ga_init_scalar_(sizes);
       return true;
     }
-    
+
     // Value : i2(M)/(det(M)^2/3)
     void value(const arg_list &args, base_tensor &result) const {
       size_type N = args[0]->sizes()[0];
@@ -1471,7 +1471,7 @@ namespace getfem {
             / pow(det, scalar_type(2)/scalar_type(3));
       GMM_ASSERT1(it == result.end(), "Internal error");
     }
-    
+
     // Second derivative
     void second_derivative(const arg_list &args, size_type, size_type,
                            base_tensor &result) const { // To be verified
@@ -1510,7 +1510,7 @@ namespace getfem {
       ga_init_square_matrix_(sizes, args[0]->sizes()[1]);
       return true;
     }
-    
+
     // Value : F^{T}F
     void value(const arg_list &args, base_tensor &result) const {
       // to be verified
@@ -1540,7 +1540,7 @@ namespace getfem {
             }
       GMM_ASSERT1(it == result.end(), "Internal error");
     }
-    
+
     // Second derivative :
     // A{ijklop}=delta{ok}delta{li}delta{pj} + delta{ok}delta{pi}delta{lj}
     // comes from (H,K) -> H^{T}K + K^{T}H
@@ -1571,7 +1571,7 @@ namespace getfem {
       ga_init_square_matrix_(sizes, args[0]->sizes()[0]);
       return true;
     }
-    
+
     // Value : FF^{T}
     void value(const arg_list &args, base_tensor &result) const {
       // to be verified
@@ -1601,7 +1601,7 @@ namespace getfem {
             }
       GMM_ASSERT1(it == result.end(), "Internal error");
     }
-    
+
     // Second derivative :
     // A{ijklop}=delta{ki}delta{lp}delta{oj} + delta{oi}delta{pl}delta{kj}
     // comes from (H,K) -> HK^{T} + KH^{T}
@@ -1633,7 +1633,7 @@ namespace getfem {
       ga_init_square_matrix_(sizes, args[0]->sizes()[1]);
       return true;
     }
-    
+
     // Value : F^{T}F
     void value(const arg_list &args, base_tensor &result) const {
       // to be verified
@@ -1664,7 +1664,7 @@ namespace getfem {
             }
       GMM_ASSERT1(it == result.end(), "Internal error");
     }
-    
+
     // Second derivative :
     // A{ijklop}=(delta{ok}delta{li}delta{pj} + delta{ok}delta{pi}delta{lj})/2
     // comes from (H,K) -> (H^{T}K + K^{T}H)/2
@@ -1695,12 +1695,12 @@ namespace getfem {
       if (args.size() != 2 || args[0]->sizes().size() != 2
           || args[1]->sizes().size() != 2
           || args[0]->sizes()[0] !=  args[0]->sizes()[1]
-          || args[1]->sizes()[0] !=  args[0]->sizes()[1] 
+          || args[1]->sizes()[0] !=  args[0]->sizes()[1]
           || args[1]->sizes()[1] !=  args[0]->sizes()[1]) return false;
       ga_init_square_matrix_(sizes, args[0]->sizes()[1]);
       return true;
     }
-    
+
     // Value : (I+Grad_u)\sigma(I+Grad_u')/det(I+Grad_u)
     void value(const arg_list &args, base_tensor &result) const {
       size_type N = args[0]->sizes()[0];
@@ -1759,7 +1759,7 @@ namespace getfem {
       }
       GMM_ASSERT1(it == result.end(), "Internal error");
     }
-    
+
     // Second derivative : to be implemented
     void second_derivative(const arg_list &, size_type, size_type,
                            base_tensor &) const {
@@ -1771,7 +1771,7 @@ namespace getfem {
   struct AHL_wrapper_sigma : public ga_nonlinear_operator {
     phyperelastic_law AHL;
     bool result_size(const arg_list &args, bgeot::multi_index &sizes) const {
-      if (args.size() != 2 || args[0]->sizes().size() != 2 
+      if (args.size() != 2 || args[0]->sizes().size() != 2
           || args[1]->size() != AHL->nb_params()
           || args[0]->sizes()[0] != args[0]->sizes()[1]) return false;
       ga_init_square_matrix_(sizes, args[0]->sizes()[0]);
@@ -1814,7 +1814,7 @@ namespace getfem {
                   "law with respect to its parameters is not available.");
 
       AHL->grad_sigma(E, grad_sigma, params, det);
-      
+
       base_tensor::iterator it = result.begin();
       for (size_type l = 0; l < N; ++l)
         for (size_type k = 0; k < N; ++k)
@@ -1842,7 +1842,7 @@ namespace getfem {
   struct AHL_wrapper_potential : public ga_nonlinear_operator {
     phyperelastic_law AHL;
     bool result_size(const arg_list &args, bgeot::multi_index &sizes) const {
-      if (args.size() != 2 || args[0]->sizes().size() != 2 
+      if (args.size() != 2 || args[0]->sizes().size() != 2
           || args[1]->size() != AHL->nb_params()
           || args[0]->sizes()[0] != args[0]->sizes()[1]) return false;
       ga_init_scalar_(sizes);
@@ -1891,7 +1891,7 @@ namespace getfem {
     // Second derivative :
     void second_derivative(const arg_list &args, size_type nder1,
                            size_type nder2, base_tensor &result) const {
-      
+
       size_type N = args[0]->sizes()[0];
       base_vector params(AHL->nb_params());
       gmm::copy(args[1]->as_vector(), params);
@@ -1909,7 +1909,7 @@ namespace getfem {
 
       AHL->sigma(E, sigma, params, det);
       AHL->grad_sigma(E, grad_sigma, params, det);
-      
+
       base_tensor::iterator it = result.begin();
       for (size_type l = 0; l < N; ++l)
         for (size_type k = 0; k < N; ++k)
@@ -1935,13 +1935,13 @@ namespace getfem {
   // lambda Tr(E)Id + 2*mu*E with E = (Grad_u+Grad_u'+Grad_u'Grad_u)/2
   struct Saint_Venant_Kirchhoff_sigma : public ga_nonlinear_operator {
     bool result_size(const arg_list &args, bgeot::multi_index &sizes) const {
-      if (args.size() != 2 || args[0]->sizes().size() != 2 
+      if (args.size() != 2 || args[0]->sizes().size() != 2
           || args[1]->size() != 2
           || args[0]->sizes()[0] != args[0]->sizes()[1]) return false;
       ga_init_square_matrix_(sizes, args[0]->sizes()[0]);
       return true;
     }
-    
+
     // Value : Tr(E) + 2*mu*E
     void value(const arg_list &args, base_tensor &result) const {
       size_type N = args[0]->sizes()[0];
@@ -1952,7 +1952,7 @@ namespace getfem {
       gmm::add(Gu, E); gmm::add(gmm::transposed(Gu), E);
       gmm::scale(E, scalar_type(0.5));
       scalar_type trE = gmm::mat_trace(E);
-      
+
       base_tensor::iterator it = result.begin();
       for (size_type j = 0; j < N; ++j)
         for (size_type i = 0; i < N; ++i, ++it) {
@@ -2010,7 +2010,7 @@ namespace getfem {
       }
       GMM_ASSERT1(it == result.end(), "Internal error");
     }
-    
+
     // Second derivative : not implemented
     void second_derivative(const arg_list &, size_type, size_type,
                            base_tensor &) const {
@@ -2020,11 +2020,11 @@ namespace getfem {
 
 
 
-  static bool init_predef_operators(void) {
+  static bool init_predef_operators() {
 
     ga_predef_operator_tab &PREDEF_OPERATORS
       = dal::singleton<ga_predef_operator_tab>::instance();
-    
+
     PREDEF_OPERATORS.add_method
       ("Matrix_i2", std::make_shared<matrix_i2_operator>());
     PREDEF_OPERATORS.add_method
@@ -2073,7 +2073,7 @@ namespace getfem {
       ("Plane_Strain_Generalized_Blatz_Ko_potential",
        std::make_shared<AHL_wrapper_potential>
        (std::make_shared<plane_strain_hyperelastic_law>(gbklaw)));
-    
+
     phyperelastic_law cigelaw
       = std::make_shared<Ciarlet_Geymonat_hyperelastic_law>();
     PREDEF_OPERATORS.add_method
@@ -2090,9 +2090,9 @@ namespace getfem {
       ("Plane_Strain_Ciarlet_Geymonat_potential",
        std::make_shared<AHL_wrapper_potential>
        (std::make_shared<plane_strain_hyperelastic_law>(cigelaw)));
-    
+
     phyperelastic_law morilaw
-      = std::make_shared<Mooney_Rivlin_hyperelastic_law>();    
+      = std::make_shared<Mooney_Rivlin_hyperelastic_law>();
     PREDEF_OPERATORS.add_method
       ("Incompressible_Mooney_Rivlin_sigma",
        std::make_shared<AHL_wrapper_sigma>(morilaw));
@@ -2126,7 +2126,7 @@ namespace getfem {
       ("Plane_Strain_Compressible_Mooney_Rivlin_potential",
        std::make_shared<AHL_wrapper_potential>
        (std::make_shared<plane_strain_hyperelastic_law>(cmorilaw)));
-    
+
     phyperelastic_law ineolaw
       = std::make_shared<Mooney_Rivlin_hyperelastic_law>(false, true);
     PREDEF_OPERATORS.add_method
@@ -2162,7 +2162,7 @@ namespace getfem {
       ("Plane_Strain_Compressible_Neo_Hookean_potential",
        std::make_shared<AHL_wrapper_potential>
        (std::make_shared<plane_strain_hyperelastic_law>(cneolaw)));
-    
+
     phyperelastic_law cneobolaw
       = std::make_shared<Neo_Hookean_hyperelastic_law>(true);
     PREDEF_OPERATORS.add_method
@@ -2239,7 +2239,7 @@ namespace getfem {
 
 
   size_type add_finite_strain_elasticity_brick
-  (model &md, const mesh_im &mim, const std::string &lawname, 
+  (model &md, const mesh_im &mim, const std::string &lawname,
    const std::string &varname, const std::string &params,
    size_type region) {
     std::string test_varname = "Test_" + 
sup_previous_and_dot_to_varname(varname);
@@ -2281,11 +2281,11 @@ namespace getfem {
   }
 
   void compute_finite_strain_elasticity_Von_Mises
-    (model &md, const std::string &lawname, const std::string &varname, 
+    (model &md, const std::string &lawname, const std::string &varname,
      const std::string &params, const mesh_fem &mf_vm,
      model_real_plain_vector &VM, const mesh_region &rg) {
     size_type N = mf_vm.linked_mesh().dim();
-    
+
     std::string adapted_lawname = adapt_law_name(lawname, N);
 
     std::string expr = "sqrt(3/2)*Norm(Deviator(Cauchy_stress_from_PK2("



reply via email to

[Prev in Thread] Current Thread [Next in Thread]