9#ifndef GOOSEFEM_ELEMENT_H
10#define GOOSEFEM_ELEMENT_H
36 size_t nelem = conn.shape(0);
37 size_t nne = conn.shape(1);
38 size_t ndim = nodevec.shape(1);
42#pragma omp parallel for
43 for (
size_t e = 0; e < nelem; ++e) {
44 for (
size_t m = 0; m < nne; ++m) {
45 for (
size_t i = 0; i < ndim; ++i) {
46 elemvec(e, m, i) = nodevec(conn(e, m), i);
68 size_t nelem = conn.shape(0);
69 size_t nne = conn.shape(1);
70 size_t ndim = elemvec.shape(2);
71 size_t nnode = xt::amax(conn)() + 1;
78 for (
size_t e = 0; e < nelem; ++e) {
79 for (
size_t m = 0; m < nne; ++m) {
80 for (
size_t i = 0; i < ndim; ++i) {
81 nodevec(conn(e, m), i) += elemvec(e, m, i);
98 size_t ndof = xt::amax(dofs)() + 1;
102 for (
auto& i : dofs) {
106 for (
auto& i : dofs) {
107 if (exists[i] == 0) {
126 size_t nelem = elemmat.shape(0);
127 size_t N = elemmat.shape(1);
129 double eps = std::numeric_limits<double>::epsilon();
131#pragma omp parallel for
132 for (
size_t e = 0; e < nelem; ++e) {
133 for (
size_t i = 0; i < N; ++i) {
134 for (
size_t j = 0; j < N; ++j) {
136 if (std::abs(elemmat(e, i, j)) > eps) {
165 return derived_cast().m_nelem;
205 return derived_cast().m_nip;
215 template <
class T,
class R>
228 template <
size_t rank,
class T>
254 return std::array<size_t, 3>{derived_cast().m_nelem, D::s_nne, D::s_ndim};
265 return std::array<size_t, 3>{derived_cast().m_nelem, D::s_nne,
arg};
275 return std::array<size_t, 3>{
276 derived_cast().m_nelem, D::s_nne * D::s_ndim, D::s_nne * D::s_ndim
287 template <
size_t rank = 0>
290 std::array<size_t, 2 + rank>
shape;
291 shape[0] = derived_cast().m_nelem;
292 shape[1] = derived_cast().m_nip;
293 std::fill(
shape.begin() + 2,
shape.end(), derived_cast().m_tdim);
307 shape[0] = derived_cast().m_nelem;
308 shape[1] = derived_cast().m_nip;
309 std::fill(
shape.begin() + 2,
shape.end(), derived_cast().m_tdim);
322 template <
size_t trank>
326 std::array<size_t, 2 + trank>
shape;
327 shape[0] = derived_cast().m_nelem;
328 shape[1] = derived_cast().m_nip;
344 shape[0] = derived_cast().m_nelem;
345 shape[1] = derived_cast().m_nip;
356 return std::array<size_t, 2>{derived_cast().m_nelem, derived_cast().m_nip};
365 return std::array<size_t, 3>{derived_cast().m_nelem, derived_cast().m_nip, D::s_tdim};
375 return std::array<size_t, 3>{derived_cast().m_nelem, derived_cast().m_nip,
arg};
443 template <
size_t rank = 0,
class R>
458 template <
size_t rank = 0,
class R>
567 xt::noalias(derived_cast().m_x) =
x;
568 derived_cast().compute_dN_impl();
577 return derived_cast().m_dNx;
586 return derived_cast().m_vol;
612 template <
class T,
class R>
648 template <
class T,
class R>
680 template <
class T,
class R>
713 template <
class T,
class R>
746 template <
class T,
class R>
784 template <
class T,
class R>
821 template <
class T,
class R>
854 derived_cast().int_gradN_dot_tensor4_dot_gradNT_dV_impl(
qtensor,
elemmat);
864 template <
class T,
class R>
867 derived_cast().int_gradN_dot_tensor4_dot_gradNT_dV_impl(
qtensor,
elemmat);
876 derived_cast().compute_dN_impl();
890 friend class QuadratureBase<
D>;
892 template <
class T,
class R>
899 auto nelem = derived_cast().m_nelem;
900 auto nip = derived_cast().m_nip;
901 auto&
N = derived_cast().m_N;
905#pragma omp parallel for
906 for (
size_t e = 0;
e <
nelem; ++
e) {
910 for (
size_t q = 0;
q <
nip; ++
q) {
915 for (
size_t m = 0;
m < D::s_nne; ++
m) {
916 for (
size_t i = 0;
i <
n; ++
i) {
924 template <
class T,
class R>
930 auto nelem = derived_cast().m_nelem;
931 auto nip = derived_cast().m_nip;
932 auto&
dNx = derived_cast().m_dNx;
936#pragma omp parallel for
937 for (
size_t e = 0;
e <
nelem; ++
e) {
939 auto ue = xt::adapt(&
elemvec(
e, 0, 0), xt::xshape<D::s_nne, D::s_ndim>());
941 for (
size_t q = 0;
q <
nip; ++
q) {
943 auto dNxq = xt::adapt(&
dNx(
e,
q, 0, 0), xt::xshape<D::s_nne, D::s_ndim>());
944 auto graduq = xt::adapt(&
qtensor(
e,
q, 0, 0), xt::xshape<D::s_tdim, D::s_tdim>());
946 for (
size_t m = 0;
m < D::s_nne; ++
m) {
947 for (
size_t i = 0;
i < D::s_ndim; ++
i) {
948 for (
size_t j = 0;
j < D::s_ndim; ++
j) {
957 template <
class T,
class R>
963 auto nelem = derived_cast().m_nelem;
964 auto nip = derived_cast().m_nip;
965 auto&
dNx = derived_cast().m_dNx;
969#pragma omp parallel for
970 for (
size_t e = 0;
e <
nelem; ++
e) {
972 auto ue = xt::adapt(&
elemvec(
e, 0, 0), xt::xshape<D::s_nne, D::s_ndim>());
974 for (
size_t q = 0;
q <
nip; ++
q) {
976 auto dNxq = xt::adapt(&
dNx(
e,
q, 0, 0), xt::xshape<D::s_nne, D::s_ndim>());
977 auto graduq = xt::adapt(&
qtensor(
e,
q, 0, 0), xt::xshape<D::s_tdim, D::s_tdim>());
979 for (
size_t m = 0;
m < D::s_nne; ++
m) {
980 for (
size_t i = 0;
i < D::s_ndim; ++
i) {
981 for (
size_t j = 0;
j < D::s_ndim; ++
j) {
990 template <
class T,
class R>
996 auto nelem = derived_cast().m_nelem;
997 auto nip = derived_cast().m_nip;
998 auto&
dNx = derived_cast().m_dNx;
1002#pragma omp parallel for
1003 for (
size_t e = 0;
e <
nelem; ++
e) {
1005 auto ue = xt::adapt(&
elemvec(
e, 0, 0), xt::xshape<D::s_nne, D::s_ndim>());
1007 for (
size_t q = 0;
q <
nip; ++
q) {
1009 auto dNxq = xt::adapt(&
dNx(
e,
q, 0, 0), xt::xshape<D::s_nne, D::s_ndim>());
1010 auto epsq = xt::adapt(&
qtensor(
e,
q, 0, 0), xt::xshape<D::s_tdim, D::s_tdim>());
1012 for (
size_t m = 0;
m < D::s_nne; ++
m) {
1013 for (
size_t i = 0;
i < D::s_ndim; ++
i) {
1014 for (
size_t j = 0;
j < D::s_ndim; ++
j) {
1024 template <
class T,
class R>
1031 auto nelem = derived_cast().m_nelem;
1032 auto nip = derived_cast().m_nip;
1033 auto&
N = derived_cast().m_N;
1034 auto&
vol = derived_cast().m_vol;
1038#pragma omp parallel for
1039 for (
size_t e = 0;
e <
nelem; ++
e) {
1043 for (
size_t q = 0;
q <
nip; ++
q) {
1049 for (
size_t m = 0;
m < D::s_nne; ++
m) {
1050 for (
size_t i = 0;
i <
n; ++
i) {
1058 template <
class T,
class R>
1064 auto nelem = derived_cast().m_nelem;
1065 auto nip = derived_cast().m_nip;
1066 auto&
N = derived_cast().m_N;
1067 auto&
vol = derived_cast().m_vol;
1071#pragma omp parallel for
1072 for (
size_t e = 0;
e <
nelem; ++
e) {
1074 auto Me = xt::adapt(
1075 &
elemmat(
e, 0, 0), xt::xshape<D::s_nne * D::s_ndim, D::s_nne * D::s_ndim>()
1078 for (
size_t q = 0;
q <
nip; ++
q) {
1080 auto Ne = xt::adapt(&
N(
q, 0), xt::xshape<D::s_nne>());
1085 for (
size_t m = 0;
m < D::s_nne; ++
m) {
1086 for (
size_t n = 0;
n < D::s_nne; ++
n) {
1087 for (
size_t i = 0;
i < D::s_ndim; ++
i) {
1096 template <
class T,
class R>
1102 auto nelem = derived_cast().m_nelem;
1103 auto nip = derived_cast().m_nip;
1104 auto&
dNx = derived_cast().m_m_dNx;
1105 auto&
vol = derived_cast().m_vol;
1109#pragma omp parallel for
1110 for (
size_t e = 0;
e <
nelem; ++
e) {
1112 auto fe = xt::adapt(&
elemvec(
e, 0, 0), xt::xshape<D::s_nne, D::s_ndim>());
1114 for (
size_t q = 0;
q <
nip; ++
q) {
1116 auto dNxq = xt::adapt(&
dNx(
e,
q, 0, 0), xt::xshape<D::s_nne, D::s_ndim>());
1117 auto sigq = xt::adapt(&
qtensor(
e,
q, 0, 0), xt::xshape<D::s_tdim, D::s_tdim>());
1120 for (
size_t m = 0;
m < D::s_nne; ++
m) {
1121 for (
size_t i = 0;
i < D::s_ndim; ++
i) {
1122 for (
size_t j = 0;
j < D::s_ndim; ++
j) {
1131 template <
class T,
class R>
1132 void int_gradN_dot_tensor4_dot_gradNT_dV_impl(
const T&
qtensor,
R&
elemmat)
const
1137 auto nelem = derived_cast().m_nelem;
1138 auto nip = derived_cast().m_nip;
1139 auto&
dNx = derived_cast().m_dNx;
1140 auto&
vol = derived_cast().m_vol;
1144#pragma omp parallel for
1145 for (
size_t e = 0;
e <
nelem; ++
e) {
1148 &
elemmat(
e, 0, 0), xt::xshape<D::s_nne * D::s_ndim, D::s_nne * D::s_ndim>()
1151 for (
size_t q = 0;
q <
nip; ++
q) {
1153 auto dNxq = xt::adapt(&
dNx(
e,
q, 0, 0), xt::xshape<D::s_nne, D::s_ndim>());
1154 auto Cq = xt::adapt(
1156 xt::xshape<D::s_tdim, D::s_tdim, D::s_tdim, D::s_tdim>()
1160 for (
size_t m = 0;
m < D::s_nne; ++
m) {
1161 for (
size_t n = 0;
n < D::s_nne; ++
n) {
1162 for (
size_t i = 0;
i < D::s_ndim; ++
i) {
1163 for (
size_t j = 0;
j < D::s_ndim; ++
j) {
1164 for (
size_t k = 0;
k < D::s_ndim; ++
k) {
1165 for (
size_t l = 0;
l < D::s_ndim; ++
l) {
1166 K(
m * D::s_ndim +
j,
n * D::s_ndim +
k) +=
1178 void compute_dN_impl()
1180 auto nelem = derived_cast().m_nelem;
1181 auto nip = derived_cast().m_nip;
1182 auto&
vol = derived_cast().m_vol;
1183 auto&
w = derived_cast().m_w;
1184 auto&
dNxi = derived_cast().m_dNxi;
1185 auto&
dNx = derived_cast().m_dNx;
1186 auto&
x = derived_cast().m_x;
1192 auto J = array_type::tensor<double, 2>::from_shape({D::s_ndim, D::s_ndim});
1193 auto Jinv = array_type::tensor<double, 2>::from_shape({D::s_ndim, D::s_ndim});
1196 for (
size_t e = 0;
e <
nelem; ++
e) {
1198 auto xe = xt::adapt(&
x(
e, 0, 0), xt::xshape<D::s_nne, D::s_ndim>());
1200 for (
size_t q = 0;
q <
nip; ++
q) {
1202 auto dNxiq = xt::adapt(&
dNxi(
q, 0, 0), xt::xshape<D::s_nne, D::s_ndim>());
1203 auto dNxq = xt::adapt(&
dNx(
e,
q, 0, 0), xt::xshape<D::s_nne, D::s_ndim>());
1207 for (
size_t m = 0;
m < D::s_nne; ++
m) {
1208 for (
size_t i = 0;
i < D::s_ndim; ++
i) {
1209 for (
size_t j = 0;
j < D::s_ndim; ++
j) {
1215 double Jdet = detail::tensor<D::s_ndim>::inv(
J,
Jinv);
1217 for (
size_t m = 0;
m < D::s_nne; ++
m) {
1218 for (
size_t i = 0;
i < D::s_ndim; ++
i) {
1219 for (
size_t j = 0;
j < D::s_ndim; ++
j) {
Common allocation methods.
CRTP base class for interpolation and quadrature for a generic element in Cartesian coordinates.
auto Int_gradN_dot_tensor2_dV(const T &qtensor) const -> array_type::tensor< double, 3 >
Element-by-element: integral of the dot product of the shape function gradients with a second order t...
auto Int_N_scalar_NT_dV(const T &qscalar) const -> array_type::tensor< double, 3 >
Element-by-element: integral of the scalar product of the shape function with a scalar.
void gradN_vector(const T &elemvec, R &qtensor) const
Same as GradN_vector(), but writing to preallocated return.
auto SymGradN_vector(const T &elemvec) const -> array_type::tensor< double, 4 >
The symmetric output of GradN_vector().
void symGradN_vector(const T &elemvec, R &qtensor) const
Same as SymGradN_vector(), but writing to preallocated return.
void int_gradN_dot_tensor2_dV(const T &qtensor, R &elemvec) const
Same as Int_gradN_dot_tensor2_dV(), but writing to preallocated return.
auto InterpQuad_vector(const T &elemvec) const -> array_type::tensor< double, 3 >
Interpolate element vector and evaluate at each quadrature point.
void int_gradN_dot_tensor4_dot_gradNT_dV(const T &qtensor, R &elemmat) const
Same as Int_gradN_dot_tensor4_dot_gradNT_dV(), but writing to preallocated return.
auto dV() const -> const array_type::tensor< double, 2 > &
Integration volume.
void gradN_vector_T(const T &elemvec, R &qtensor) const
Same as GradN_vector_T(), but writing to preallocated return.
auto GradN_vector_T(const T &elemvec) const -> array_type::tensor< double, 4 >
The transposed output of GradN_vector().
D derived_type
Underlying type.
void int_N_vector_dV(const T &qvector, R &elemvec) const
Same as Int_N_vector_dV(), but writing to preallocated return.
auto Int_gradN_dot_tensor4_dot_gradNT_dV(const T &qtensor) const -> array_type::tensor< double, 3 >
Element-by-element: integral of the dot products of the shape function gradients with a fourth order ...
auto GradN_vector(const T &elemvec) const -> array_type::tensor< double, 4 >
Element-by-element: dyadic product of the shape function gradients and a nodal vector.
void interpQuad_vector(const T &elemvec, R &qvector) const
Same as InterpQuad_vector(), but writing to preallocated return.
void compute_dN()
Update the shape function gradients (called when the nodal positions are updated).
void int_N_scalar_NT_dV(const T &qscalar, R &elemmat) const
Same as Int_N_scalar_NT_dV(), but writing to preallocated return.
void update_x(const T &x)
Update the nodal positions.
auto GradN() const -> const array_type::tensor< double, 4 > &
Shape function gradients (in global coordinates).
auto Int_N_vector_dV(const T &qvector) const -> array_type::tensor< double, 3 >
Element-by-element: integral of a continuous vector-field.
CRTP base class for quadrature.
auto shape_qscalar() const -> std::array< size_t, 2 >
Get the shape of a "qscalar" (a "qtensor" of rank 0)
auto shape_qtensor(size_t rank, size_t arg) const -> std::vector< size_t >
Get the shape of a "qtensor" of a certain rank (0 = scalar, 1, vector, 2 = 2nd-order tensor,...
auto shape_elemvec(size_t arg) const -> std::array< size_t, 3 >
Get the shape of an "elemvec".
auto shape_qvector() const -> std::array< size_t, 3 >
Get the shape of a "qvector" (a "qtensor" of rank 1)
auto nip() const
Number of integration points.
auto allocate_elemmat() const
Get an allocated array_type::tensor to store a "elemmat".
auto nelem() const
Number of elements.
auto nne() const
Number of nodes per element.
void asTensor(const T &arg, R &ret) const
Convert "qscalar" to "qtensor" of certain rank.
auto shape_qtensor() const -> std::array< size_t, 2+rank >
Get the shape of a "qtensor" of a certain rank (0 = scalar, 1, vector, 2 = 2nd-order tensor,...
auto allocate_elemvec() const
Get an allocated array_type::tensor to store a "elemvec".
auto shape_elemmat() const -> std::array< size_t, 3 >
Get the shape of an "elemmat".
auto allocate_elemvec(R val) const
Get an allocated and initialised xt::xarray to store a "elemvec".
auto shape_qvector(size_t arg) const -> std::array< size_t, 3 >
Get the shape of a "qvector" (a "qtensor" of rank 1)
auto allocate_elemmat(R val) const
Get an allocated and initialised xt::xarray to store a "elemmat".
auto shape_elemvec() const -> std::array< size_t, 3 >
Get the shape of an "elemvec".
auto allocate_qscalar(R val) const
Get an allocated and initialised xt::xarray to store a "qscalar" (a "qtensor" of rank 0).
auto AsTensor(const T &arg) const
Convert "qscalar" to "qtensor" of certain rank.
auto tdim() const
Number of dimensions for integration point tensors.
auto allocate_qtensor(size_t rank) const
Get an allocated xt::xarray to store a "qtensor" of a certain rank (0 = scalar, 1,...
auto shape_qtensor(size_t rank) const -> std::vector< size_t >
Get the shape of a "qtensor" of a certain rank (0 = scalar, 1, vector, 2 = 2nd-order tensor,...
auto allocate_qtensor() const
Get an allocated array_type::tensor to store a "qtensor" of a certain rank (0 = scalar,...
auto allocate_qtensor(size_t rank, R val) const
Get an allocated and initialised xt::xarray to store a "qtensor" of a certain rank (0 = scalar,...
auto shape_qtensor(size_t rank, size_t arg) const -> std::array< size_t, 2+trank >
Get the shape of a "qtensor" of a certain rank (0 = scalar, 1, vector, 2 = 2nd-order tensor,...
auto ndim() const
Number of dimensions for node vectors.
auto allocate_qscalar() const
Get an allocated array_type::tensor to store a "qscalar" (a "qtensor" of rank 0).
auto AsTensor(size_t rank, const T &arg) const
Convert "qscalar" to "qtensor" of certain rank.
auto allocate_qtensor(R val) const
Get an allocated and initialised array_type::tensor to store a "qtensor" of a certain rank (0 = scala...
D derived_type
Underlying type.
#define GOOSEFEM_ASSERT(expr)
All assertions are implementation as::
array_type::tensor< double, 1 > w()
Integration point weights.
bool isDiagonal(const array_type::tensor< double, 3 > &elemmat)
Check that all of the matrices stored per elemmat (shape: [nelem, nne * ndim, nne * ndim]) are diagon...
array_type::tensor< double, 3 > asElementVector(const array_type::tensor< size_t, 2 > &conn, const array_type::tensor< double, 2 > &nodevec)
Convert nodal vector with ("nodevec", shape:[nnode, ndim]) to nodal vector stored per element ("elemv...
bool isSequential(const E &dofs)
Check that DOFs leave no holes.
array_type::tensor< double, 2 > assembleNodeVector(const array_type::tensor< size_t, 2 > &conn, const array_type::tensor< double, 3 > &elemvec)
Assemble nodal vector stored per element ("elemvec", shape [nelem, nne, ndim]) to nodal vector ("node...
xt::xtensor< T, N > tensor
Fixed (static) rank array.
Toolbox to perform finite element computations.
auto AsTensor(const T &arg, const S &shape)
"Broadcast" a scalar stored in an array (e.g.
void asTensor(const T &arg, R &ret)
"Broadcast" a scalar stored in an array (e.g.