FrictionQPotFEM 0.23.3
Loading...
Searching...
No Matches
Element.h
Go to the documentation of this file.
1
9#ifndef GOOSEFEM_ELEMENT_H
10#define GOOSEFEM_ELEMENT_H
11
12#include "Allocate.h"
13#include "config.h"
14#include "detail.h"
15
16namespace GooseFEM {
17
21namespace Element {
22
33 const array_type::tensor<double, 2>& nodevec)
34{
35 size_t nelem = conn.shape(0);
36 size_t nne = conn.shape(1);
37 size_t ndim = nodevec.shape(1);
38
39 array_type::tensor<double, 3> elemvec = xt::empty<double>({nelem, nne, ndim});
40
41#pragma omp parallel for
42 for (size_t e = 0; e < nelem; ++e) {
43 for (size_t m = 0; m < nne; ++m) {
44 for (size_t i = 0; i < ndim; ++i) {
45 elemvec(e, m, i) = nodevec(conn(e, m), i);
46 }
47 }
48 }
49
50 return elemvec;
51}
52
64 const array_type::tensor<double, 3>& elemvec)
65{
66 size_t nelem = conn.shape(0);
67 size_t nne = conn.shape(1);
68 size_t ndim = elemvec.shape(2);
69 size_t nnode = xt::amax(conn)() + 1;
70
71 GOOSEFEM_ASSERT(elemvec.shape(0) == nelem);
72 GOOSEFEM_ASSERT(elemvec.shape(1) == nne);
73
74 array_type::tensor<double, 2> nodevec = xt::zeros<double>({nnode, ndim});
75
76 for (size_t e = 0; e < nelem; ++e) {
77 for (size_t m = 0; m < nne; ++m) {
78 for (size_t i = 0; i < ndim; ++i) {
79 nodevec(conn(e, m), i) += elemvec(e, m, i);
80 }
81 }
82 }
83
84 return nodevec;
85}
86
93template <class E>
94inline bool isSequential(const E& dofs)
95{
96 size_t ndof = xt::amax(dofs)() + 1;
97
98 array_type::tensor<int, 1> exists = xt::zeros<int>({ndof});
99
100 for (auto& i : dofs) {
101 exists[i]++;
102 }
103
104 for (auto& i : dofs) {
105 if (exists[i] == 0) {
106 return false;
107 }
108 }
109
110 return true;
111}
112
121{
122 GOOSEFEM_ASSERT(elemmat.shape(1) == elemmat.shape(2));
123
124 size_t nelem = elemmat.shape(0);
125 size_t N = elemmat.shape(1);
126
127 double eps = std::numeric_limits<double>::epsilon();
128
129#pragma omp parallel for
130 for (size_t e = 0; e < nelem; ++e) {
131 for (size_t i = 0; i < N; ++i) {
132 for (size_t j = 0; j < N; ++j) {
133 if (i != j) {
134 if (std::abs(elemmat(e, i, j)) > eps) {
135 return false;
136 }
137 }
138 }
139 }
140 }
141
142 return true;
143}
144
148template <class D>
150public:
154 using derived_type = D;
155
161 auto nelem() const
162 {
163 return derived_cast().m_nelem;
164 }
165
171 auto nne() const
172 {
173 return D::s_nne;
174 }
175
181 auto ndim() const
182 {
183 return D::s_ndim;
184 }
185
191 auto tdim() const
192 {
193 return D::s_tdim;
194 }
195
201 auto nip() const
202 {
203 return derived_cast().m_nip;
204 }
205
213 template <class T, class R>
214 void asTensor(const T& arg, R& ret) const
215 {
216 GOOSEFEM_ASSERT(xt::has_shape(arg, this->shape_qscalar()));
217 GooseFEM::asTensor(arg, ret);
218 }
219
226 template <size_t rank, class T>
227 auto AsTensor(const T& arg) const
228 {
229 return GooseFEM::AsTensor<rank>(arg, derived_cast().m_tdim);
230 }
231
239 template <class T>
240 auto AsTensor(size_t rank, const T& arg) const
241 {
242 return GooseFEM::AsTensor(rank, arg, derived_cast().m_tdim);
243 }
244
250 auto shape_elemvec() const -> std::array<size_t, 3>
251 {
252 return std::array<size_t, 3>{derived_cast().m_nelem, D::s_nne, D::s_ndim};
253 }
254
261 auto shape_elemvec(size_t arg) const -> std::array<size_t, 3>
262 {
263 return std::array<size_t, 3>{derived_cast().m_nelem, D::s_nne, arg};
264 }
265
271 auto shape_elemmat() const -> std::array<size_t, 3>
272 {
273 return std::array<size_t, 3>{
274 derived_cast().m_nelem, D::s_nne * D::s_ndim, D::s_nne * D::s_ndim};
275 }
276
284 template <size_t rank = 0>
285 auto shape_qtensor() const -> std::array<size_t, 2 + rank>
286 {
287 std::array<size_t, 2 + rank> shape;
288 shape[0] = derived_cast().m_nelem;
289 shape[1] = derived_cast().m_nip;
290 std::fill(shape.begin() + 2, shape.end(), derived_cast().m_tdim);
291 return shape;
292 }
293
301 auto shape_qtensor(size_t rank) const -> std::vector<size_t>
302 {
303 std::vector<size_t> shape(2 + rank);
304 shape[0] = derived_cast().m_nelem;
305 shape[1] = derived_cast().m_nip;
306 std::fill(shape.begin() + 2, shape.end(), derived_cast().m_tdim);
307 return shape;
308 }
309
319 template <size_t trank>
320 auto shape_qtensor(size_t rank, size_t arg) const -> std::array<size_t, 2 + trank>
321 {
322 GOOSEFEM_ASSERT(trank == rank);
323 std::array<size_t, 2 + trank> shape;
324 shape[0] = derived_cast().m_nelem;
325 shape[1] = derived_cast().m_nip;
326 std::fill(shape.begin() + 2, shape.end(), arg);
327 return shape;
328 }
329
338 auto shape_qtensor(size_t rank, size_t arg) const -> std::vector<size_t>
339 {
340 std::vector<size_t> shape(2 + rank);
341 shape[0] = derived_cast().m_nelem;
342 shape[1] = derived_cast().m_nip;
343 std::fill(shape.begin() + 2, shape.end(), arg);
344 return shape;
345 }
346
351 auto shape_qscalar() const -> std::array<size_t, 2>
352 {
353 return std::array<size_t, 2>{derived_cast().m_nelem, derived_cast().m_nip};
354 }
355
360 auto shape_qvector() const -> std::array<size_t, 3>
361 {
362 return std::array<size_t, 3>{derived_cast().m_nelem, derived_cast().m_nip, D::s_tdim};
363 }
364
370 auto shape_qvector(size_t arg) const -> std::array<size_t, 3>
371 {
372 return std::array<size_t, 3>{derived_cast().m_nelem, derived_cast().m_nip, arg};
373 }
374
382 template <class R>
383 auto allocate_elemvec() const
384 {
386 }
387
395 template <class R>
396 auto allocate_elemvec(R val) const
397 {
399 ret.fill(val);
400 return ret;
401 }
402
410 template <class R>
411 auto allocate_elemmat() const
412 {
414 }
415
423 template <class R>
424 auto allocate_elemmat(R val) const
425 {
427 ret.fill(val);
428 return ret;
429 }
430
440 template <size_t rank = 0, class R>
441 auto allocate_qtensor() const
442 {
443 return array_type::tensor<R, 2 + rank>::from_shape(this->shape_qtensor<rank>());
444 }
445
455 template <size_t rank = 0, class R>
456 auto allocate_qtensor(R val) const
457 {
458 auto ret = array_type::tensor<R, 2 + rank>::from_shape(this->shape_qtensor<rank>());
459 ret.fill(val);
460 return ret;
461 }
462
472 template <class R>
473 auto allocate_qtensor(size_t rank) const
474 {
475 return xt::xarray<R>::from_shape(this->shape_qtensor(rank));
476 }
477
487 template <class R>
488 auto allocate_qtensor(size_t rank, R val) const
489 {
490 auto ret = xt::xarray<R>::from_shape(this->shape_qtensor(rank));
491 ret.fill(val);
492 return ret;
493 }
494
502 template <class R>
503 auto allocate_qscalar() const
504 {
505 return this->allocate_qtensor<0, R>();
506 }
507
515 template <class R>
516 auto allocate_qscalar(R val) const
517 {
518 return this->allocate_qtensor<0, R>(val);
519 }
520
521private:
522 auto derived_cast() -> derived_type&
523 {
524 return *static_cast<derived_type*>(this);
525 }
526
527 auto derived_cast() const -> const derived_type&
528 {
529 return *static_cast<const derived_type*>(this);
530 }
531};
532
542template <class D>
544public:
548 using derived_type = D;
549
560 template <class T>
561 void update_x(const T& x)
562 {
563 GOOSEFEM_ASSERT(xt::has_shape(x, derived_cast().m_x.shape()));
564 xt::noalias(derived_cast().m_x) = x;
565 derived_cast().compute_dN_impl();
566 }
567
572 auto GradN() const -> const array_type::tensor<double, 4>&
573 {
574 return derived_cast().m_dNx;
575 }
576
581 auto dV() const -> const array_type::tensor<double, 2>&
582 {
583 return derived_cast().m_vol;
584 }
585
594 template <class T>
595 auto InterpQuad_vector(const T& elemvec) const -> array_type::tensor<double, 3>
596 {
597 size_t n = elemvec.shape(2);
599 derived_cast().interpQuad_vector_impl(elemvec, qvector);
600 return qvector;
601 }
602
609 template <class T, class R>
610 void interpQuad_vector(const T& elemvec, R& qvector) const
611 {
612 derived_cast().interpQuad_vector_impl(elemvec, qvector);
613 }
614
631 template <class T>
632 auto GradN_vector(const T& elemvec) const -> array_type::tensor<double, 4>
633 {
634 auto qtensor = array_type::tensor<double, 4>::from_shape(this->template shape_qtensor<2>());
635 derived_cast().gradN_vector_impl(elemvec, qtensor);
636 return qtensor;
637 }
638
645 template <class T, class R>
646 void gradN_vector(const T& elemvec, R& qtensor) const
647 {
648 derived_cast().gradN_vector_impl(elemvec, qtensor);
649 }
650
663 template <class T>
664 auto GradN_vector_T(const T& elemvec) const -> array_type::tensor<double, 4>
665 {
666 auto qtensor = array_type::tensor<double, 4>::from_shape(this->template shape_qtensor<2>());
667 derived_cast().gradN_vector_T_impl(elemvec, qtensor);
668 return qtensor;
669 }
670
677 template <class T, class R>
678 void gradN_vector_T(const T& elemvec, R& qtensor) const
679 {
680 derived_cast().gradN_vector_T_impl(elemvec, qtensor);
681 }
682
696 template <class T>
697 auto SymGradN_vector(const T& elemvec) const -> array_type::tensor<double, 4>
698 {
699 auto qtensor = array_type::tensor<double, 4>::from_shape(this->template shape_qtensor<2>());
700 derived_cast().symGradN_vector_impl(elemvec, qtensor);
701 return qtensor;
702 }
703
710 template <class T, class R>
711 void symGradN_vector(const T& elemvec, R& qtensor) const
712 {
713 derived_cast().symGradN_vector_impl(elemvec, qtensor);
714 }
715
728 template <class T>
729 auto Int_N_vector_dV(const T& qvector) const -> array_type::tensor<double, 3>
730 {
731 size_t n = qvector.shape(2);
733 derived_cast().int_N_vector_dV_impl(qvector, elemvec);
734 return elemvec;
735 }
736
743 template <class T, class R>
744 void int_N_vector_dV(const T& qvector, R& elemvec) const
745 {
746 derived_cast().int_N_vector_dV_impl(qvector, elemvec);
747 }
748
767 template <class T>
768 auto Int_N_scalar_NT_dV(const T& qscalar) const -> array_type::tensor<double, 3>
769 {
771 derived_cast().int_N_scalar_NT_dV_impl(qscalar, elemmat);
772 return elemmat;
773 }
774
781 template <class T, class R>
782 void int_N_scalar_NT_dV(const T& qscalar, R& elemmat) const
783 {
784 derived_cast().int_N_scalar_NT_dV_impl(qscalar, elemmat);
785 }
786
804 template <class T>
806 {
808 derived_cast().int_gradN_dot_tensor2_dV_impl(qtensor, elemvec);
809 return elemvec;
810 }
811
818 template <class T, class R>
819 void int_gradN_dot_tensor2_dV(const T& qtensor, R& elemvec) const
820 {
821 derived_cast().int_gradN_dot_tensor2_dV_impl(qtensor, elemvec);
822 }
823
824 // Integral of the dot product
825 // elemmat(m*2+j, n*2+k) += dNdx(m,i) * qtensor(i,j,k,l) * dNdx(n,l) * dV
826
846 template <class T>
847 auto Int_gradN_dot_tensor4_dot_gradNT_dV(const T& qtensor) const
849 {
851 derived_cast().int_gradN_dot_tensor4_dot_gradNT_dV_impl(qtensor, elemmat);
852 return elemmat;
853 }
854
861 template <class T, class R>
862 void int_gradN_dot_tensor4_dot_gradNT_dV(const T& qtensor, R& elemmat) const
863 {
864 derived_cast().int_gradN_dot_tensor4_dot_gradNT_dV_impl(qtensor, elemmat);
865 }
866
867protected:
872 {
873 derived_cast().compute_dN_impl();
874 }
875
876private:
877 auto derived_cast() -> derived_type&
878 {
879 return *static_cast<derived_type*>(this);
880 }
881
882 auto derived_cast() const -> const derived_type&
883 {
884 return *static_cast<const derived_type*>(this);
885 }
886
887 friend class QuadratureBase<D>;
888
889 template <class T, class R>
890 void interpQuad_vector_impl(const T& elemvec, R& qvector) const
891 {
892 size_t n = elemvec.shape(2);
893 GOOSEFEM_ASSERT(xt::has_shape(elemvec, this->shape_elemvec(n)));
894 GOOSEFEM_ASSERT(xt::has_shape(qvector, this->shape_qvector(n)));
895
896 auto nelem = derived_cast().m_nelem;
897 auto nip = derived_cast().m_nip;
898 auto& N = derived_cast().m_N;
899
900 qvector.fill(0.0);
901
902#pragma omp parallel for
903 for (size_t e = 0; e < nelem; ++e) {
904
905 auto fq = &elemvec(e, 0, 0);
906
907 for (size_t q = 0; q < nip; ++q) {
908
909 auto Nq = &N(q, 0);
910 auto tq = &qvector(e, q, 0);
911
912 for (size_t m = 0; m < D::s_nne; ++m) {
913 for (size_t i = 0; i < n; ++i) {
914 tq[i] += Nq[m] * fq[m * n + i];
915 }
916 }
917 }
918 }
919 }
920
921 template <class T, class R>
922 void gradN_vector_impl(const T& elemvec, R& qtensor) const
923 {
924 GOOSEFEM_ASSERT(xt::has_shape(elemvec, this->shape_elemvec()));
925 GOOSEFEM_ASSERT(xt::has_shape(qtensor, this->template shape_qtensor<2>()));
926
927 auto nelem = derived_cast().m_nelem;
928 auto nip = derived_cast().m_nip;
929 auto& dNx = derived_cast().m_dNx;
930
931 qtensor.fill(0.0);
932
933#pragma omp parallel for
934 for (size_t e = 0; e < nelem; ++e) {
935
936 auto ue = xt::adapt(&elemvec(e, 0, 0), xt::xshape<D::s_nne, D::s_ndim>());
937
938 for (size_t q = 0; q < nip; ++q) {
939
940 auto dNxq = xt::adapt(&dNx(e, q, 0, 0), xt::xshape<D::s_nne, D::s_ndim>());
941 auto graduq = xt::adapt(&qtensor(e, q, 0, 0), xt::xshape<D::s_tdim, D::s_tdim>());
942
943 for (size_t m = 0; m < D::s_nne; ++m) {
944 for (size_t i = 0; i < D::s_ndim; ++i) {
945 for (size_t j = 0; j < D::s_ndim; ++j) {
946 graduq(i, j) += dNxq(m, i) * ue(m, j);
947 }
948 }
949 }
950 }
951 }
952 }
953
954 template <class T, class R>
955 void gradN_vector_T_impl(const T& elemvec, R& qtensor) const
956 {
957 GOOSEFEM_ASSERT(xt::has_shape(elemvec, this->shape_elemvec()));
958 GOOSEFEM_ASSERT(xt::has_shape(qtensor, this->template shape_qtensor<2>()));
959
960 auto nelem = derived_cast().m_nelem;
961 auto nip = derived_cast().m_nip;
962 auto& dNx = derived_cast().m_dNx;
963
964 qtensor.fill(0.0);
965
966#pragma omp parallel for
967 for (size_t e = 0; e < nelem; ++e) {
968
969 auto ue = xt::adapt(&elemvec(e, 0, 0), xt::xshape<D::s_nne, D::s_ndim>());
970
971 for (size_t q = 0; q < nip; ++q) {
972
973 auto dNxq = xt::adapt(&dNx(e, q, 0, 0), xt::xshape<D::s_nne, D::s_ndim>());
974 auto graduq = xt::adapt(&qtensor(e, q, 0, 0), xt::xshape<D::s_tdim, D::s_tdim>());
975
976 for (size_t m = 0; m < D::s_nne; ++m) {
977 for (size_t i = 0; i < D::s_ndim; ++i) {
978 for (size_t j = 0; j < D::s_ndim; ++j) {
979 graduq(j, i) += dNxq(m, i) * ue(m, j);
980 }
981 }
982 }
983 }
984 }
985 }
986
987 template <class T, class R>
988 void symGradN_vector_impl(const T& elemvec, R& qtensor) const
989 {
990 GOOSEFEM_ASSERT(xt::has_shape(elemvec, this->shape_elemvec()));
991 GOOSEFEM_ASSERT(xt::has_shape(qtensor, this->template shape_qtensor<2>()));
992
993 auto nelem = derived_cast().m_nelem;
994 auto nip = derived_cast().m_nip;
995 auto& dNx = derived_cast().m_dNx;
996
997 qtensor.fill(0.0);
998
999#pragma omp parallel for
1000 for (size_t e = 0; e < nelem; ++e) {
1001
1002 auto ue = xt::adapt(&elemvec(e, 0, 0), xt::xshape<D::s_nne, D::s_ndim>());
1003
1004 for (size_t q = 0; q < nip; ++q) {
1005
1006 auto dNxq = xt::adapt(&dNx(e, q, 0, 0), xt::xshape<D::s_nne, D::s_ndim>());
1007 auto epsq = xt::adapt(&qtensor(e, q, 0, 0), xt::xshape<D::s_tdim, D::s_tdim>());
1008
1009 for (size_t m = 0; m < D::s_nne; ++m) {
1010 for (size_t i = 0; i < D::s_ndim; ++i) {
1011 for (size_t j = 0; j < D::s_ndim; ++j) {
1012 epsq(i, j) += 0.5 * dNxq(m, i) * ue(m, j);
1013 epsq(j, i) += 0.5 * dNxq(m, i) * ue(m, j);
1014 }
1015 }
1016 }
1017 }
1018 }
1019 }
1020
1021 template <class T, class R>
1022 void int_N_vector_dV_impl(const T& qvector, R& elemvec) const
1023 {
1024 size_t n = qvector.shape(2);
1025 GOOSEFEM_ASSERT(xt::has_shape(qvector, this->shape_qvector(n)));
1026 GOOSEFEM_ASSERT(xt::has_shape(elemvec, this->shape_elemvec(n)));
1027
1028 auto nelem = derived_cast().m_nelem;
1029 auto nip = derived_cast().m_nip;
1030 auto& N = derived_cast().m_N;
1031 auto& vol = derived_cast().m_vol;
1032
1033 elemvec.fill(0.0);
1034
1035#pragma omp parallel for
1036 for (size_t e = 0; e < nelem; ++e) {
1037
1038 auto f = &elemvec(e, 0, 0);
1039
1040 for (size_t q = 0; q < nip; ++q) {
1041
1042 auto Ne = &N(q, 0);
1043 auto tq = &qvector(e, q, 0);
1044 auto& volq = vol(e, q);
1045
1046 for (size_t m = 0; m < D::s_nne; ++m) {
1047 for (size_t i = 0; i < n; ++i) {
1048 f[m * n + i] += Ne[m] * tq[i] * volq;
1049 }
1050 }
1051 }
1052 }
1053 }
1054
1055 template <class T, class R>
1056 void int_N_scalar_NT_dV_impl(const T& qscalar, R& elemmat) const
1057 {
1058 GOOSEFEM_ASSERT(xt::has_shape(qscalar, this->shape_qscalar()));
1059 GOOSEFEM_ASSERT(xt::has_shape(elemmat, this->shape_elemmat()));
1060
1061 auto nelem = derived_cast().m_nelem;
1062 auto nip = derived_cast().m_nip;
1063 auto& N = derived_cast().m_N;
1064 auto& vol = derived_cast().m_vol;
1065
1066 elemmat.fill(0.0);
1067
1068#pragma omp parallel for
1069 for (size_t e = 0; e < nelem; ++e) {
1070
1071 auto Me = xt::adapt(
1072 &elemmat(e, 0, 0), xt::xshape<D::s_nne * D::s_ndim, D::s_nne * D::s_ndim>());
1073
1074 for (size_t q = 0; q < nip; ++q) {
1075
1076 auto Ne = xt::adapt(&N(q, 0), xt::xshape<D::s_nne>());
1077 auto& volq = vol(e, q);
1078 auto& rho = qscalar(e, q);
1079
1080 // M(m * D::s_ndim + i, n * D::s_ndim + i) += N(m) * scalar * N(n) * dV
1081 for (size_t m = 0; m < D::s_nne; ++m) {
1082 for (size_t n = 0; n < D::s_nne; ++n) {
1083 for (size_t i = 0; i < D::s_ndim; ++i) {
1084 Me(m * D::s_ndim + i, n * D::s_ndim + i) += Ne(m) * rho * Ne(n) * volq;
1085 }
1086 }
1087 }
1088 }
1089 }
1090 }
1091
1092 template <class T, class R>
1093 void int_gradN_dot_tensor2_dV_impl(const T& qtensor, R& elemvec) const
1094 {
1095 GOOSEFEM_ASSERT(xt::has_shape(qtensor, this->template shape_qtensor<2>()));
1096 GOOSEFEM_ASSERT(xt::has_shape(elemvec, this->shape_elemvec()));
1097
1098 auto nelem = derived_cast().m_nelem;
1099 auto nip = derived_cast().m_nip;
1100 auto& dNx = derived_cast().m_m_dNx;
1101 auto& vol = derived_cast().m_vol;
1102
1103 elemvec.fill(0.0);
1104
1105#pragma omp parallel for
1106 for (size_t e = 0; e < nelem; ++e) {
1107
1108 auto fe = xt::adapt(&elemvec(e, 0, 0), xt::xshape<D::s_nne, D::s_ndim>());
1109
1110 for (size_t q = 0; q < nip; ++q) {
1111
1112 auto dNxq = xt::adapt(&dNx(e, q, 0, 0), xt::xshape<D::s_nne, D::s_ndim>());
1113 auto sigq = xt::adapt(&qtensor(e, q, 0, 0), xt::xshape<D::s_tdim, D::s_tdim>());
1114 auto& volq = vol(e, q);
1115
1116 for (size_t m = 0; m < D::s_nne; ++m) {
1117 for (size_t i = 0; i < D::s_ndim; ++i) {
1118 for (size_t j = 0; j < D::s_ndim; ++j) {
1119 fe(m, j) += dNxq(m, i) * sigq(i, j) * volq;
1120 }
1121 }
1122 }
1123 }
1124 }
1125 }
1126
1127 template <class T, class R>
1128 void int_gradN_dot_tensor4_dot_gradNT_dV_impl(const T& qtensor, R& elemmat) const
1129 {
1130 GOOSEFEM_ASSERT(xt::has_shape(qtensor, this->template shape_qtensor<4>()));
1131 GOOSEFEM_ASSERT(xt::has_shape(elemmat, this->shape_elemmat()));
1132
1133 auto nelem = derived_cast().m_nelem;
1134 auto nip = derived_cast().m_nip;
1135 auto& dNx = derived_cast().m_dNx;
1136 auto& vol = derived_cast().m_vol;
1137
1138 elemmat.fill(0.0);
1139
1140#pragma omp parallel for
1141 for (size_t e = 0; e < nelem; ++e) {
1142
1143 auto K = xt::adapt(
1144 &elemmat(e, 0, 0), xt::xshape<D::s_nne * D::s_ndim, D::s_nne * D::s_ndim>());
1145
1146 for (size_t q = 0; q < nip; ++q) {
1147
1148 auto dNxq = xt::adapt(&dNx(e, q, 0, 0), xt::xshape<D::s_nne, D::s_ndim>());
1149 auto Cq = xt::adapt(
1150 &qtensor(e, q, 0, 0, 0, 0),
1151 xt::xshape<D::s_tdim, D::s_tdim, D::s_tdim, D::s_tdim>());
1152 auto& volq = vol(e, q);
1153
1154 for (size_t m = 0; m < D::s_nne; ++m) {
1155 for (size_t n = 0; n < D::s_nne; ++n) {
1156 for (size_t i = 0; i < D::s_ndim; ++i) {
1157 for (size_t j = 0; j < D::s_ndim; ++j) {
1158 for (size_t k = 0; k < D::s_ndim; ++k) {
1159 for (size_t l = 0; l < D::s_ndim; ++l) {
1160 K(m * D::s_ndim + j, n * D::s_ndim + k) +=
1161 dNxq(m, i) * Cq(i, j, k, l) * dNxq(n, l) * volq;
1162 }
1163 }
1164 }
1165 }
1166 }
1167 }
1168 }
1169 }
1170 }
1171
1172 void compute_dN_impl()
1173 {
1174 auto nelem = derived_cast().m_nelem;
1175 auto nip = derived_cast().m_nip;
1176 auto& vol = derived_cast().m_vol;
1177 auto& w = derived_cast().m_w;
1178 auto& dNxi = derived_cast().m_dNxi;
1179 auto& dNx = derived_cast().m_dNx;
1180 auto& x = derived_cast().m_x;
1181
1182 dNx.fill(0.0);
1183
1184#pragma omp parallel
1185 {
1186 auto J = array_type::tensor<double, 2>::from_shape({D::s_ndim, D::s_ndim});
1187 auto Jinv = array_type::tensor<double, 2>::from_shape({D::s_ndim, D::s_ndim});
1188
1189#pragma omp for
1190 for (size_t e = 0; e < nelem; ++e) {
1191
1192 auto xe = xt::adapt(&x(e, 0, 0), xt::xshape<D::s_nne, D::s_ndim>());
1193
1194 for (size_t q = 0; q < nip; ++q) {
1195
1196 auto dNxiq = xt::adapt(&dNxi(q, 0, 0), xt::xshape<D::s_nne, D::s_ndim>());
1197 auto dNxq = xt::adapt(&dNx(e, q, 0, 0), xt::xshape<D::s_nne, D::s_ndim>());
1198
1199 J.fill(0.0);
1200
1201 for (size_t m = 0; m < D::s_nne; ++m) {
1202 for (size_t i = 0; i < D::s_ndim; ++i) {
1203 for (size_t j = 0; j < D::s_ndim; ++j) {
1204 J(i, j) += dNxiq(m, i) * xe(m, j);
1205 }
1206 }
1207 }
1208
1209 double Jdet = detail::tensor<D::s_ndim>::inv(J, Jinv);
1210
1211 for (size_t m = 0; m < D::s_nne; ++m) {
1212 for (size_t i = 0; i < D::s_ndim; ++i) {
1213 for (size_t j = 0; j < D::s_ndim; ++j) {
1214 dNxq(m, i) += Jinv(i, j) * dNxiq(m, i);
1215 }
1216 }
1217 }
1218
1219 vol(e, q) = w(q) * Jdet;
1220 }
1221 }
1222 }
1223 }
1224};
1225
1226} // namespace Element
1227} // namespace GooseFEM
1228
1229#endif
Common allocation methods.
CRTP base class for interpolation and quadrature for a generic element in Cartesian coordinates.
Definition: Element.h:543
auto Int_gradN_dot_tensor2_dV(const T &qtensor) const -> array_type::tensor< double, 3 >
Element-by-element: integral of the dot product of the shape function gradients with a second order t...
Definition: Element.h:805
auto Int_N_scalar_NT_dV(const T &qscalar) const -> array_type::tensor< double, 3 >
Element-by-element: integral of the scalar product of the shape function with a scalar.
Definition: Element.h:768
void gradN_vector(const T &elemvec, R &qtensor) const
Same as GradN_vector(), but writing to preallocated return.
Definition: Element.h:646
auto SymGradN_vector(const T &elemvec) const -> array_type::tensor< double, 4 >
The symmetric output of GradN_vector().
Definition: Element.h:697
void symGradN_vector(const T &elemvec, R &qtensor) const
Same as SymGradN_vector(), but writing to preallocated return.
Definition: Element.h:711
void int_gradN_dot_tensor2_dV(const T &qtensor, R &elemvec) const
Same as Int_gradN_dot_tensor2_dV(), but writing to preallocated return.
Definition: Element.h:819
auto InterpQuad_vector(const T &elemvec) const -> array_type::tensor< double, 3 >
Interpolate element vector and evaluate at each quadrature point.
Definition: Element.h:595
void int_gradN_dot_tensor4_dot_gradNT_dV(const T &qtensor, R &elemmat) const
Same as Int_gradN_dot_tensor4_dot_gradNT_dV(), but writing to preallocated return.
Definition: Element.h:862
auto dV() const -> const array_type::tensor< double, 2 > &
Integration volume.
Definition: Element.h:581
void gradN_vector_T(const T &elemvec, R &qtensor) const
Same as GradN_vector_T(), but writing to preallocated return.
Definition: Element.h:678
auto GradN_vector_T(const T &elemvec) const -> array_type::tensor< double, 4 >
The transposed output of GradN_vector().
Definition: Element.h:664
void int_N_vector_dV(const T &qvector, R &elemvec) const
Same as Int_N_vector_dV(), but writing to preallocated return.
Definition: Element.h:744
auto Int_gradN_dot_tensor4_dot_gradNT_dV(const T &qtensor) const -> array_type::tensor< double, 3 >
Element-by-element: integral of the dot products of the shape function gradients with a fourth order ...
Definition: Element.h:847
auto GradN_vector(const T &elemvec) const -> array_type::tensor< double, 4 >
Element-by-element: dyadic product of the shape function gradients and a nodal vector.
Definition: Element.h:632
void interpQuad_vector(const T &elemvec, R &qvector) const
Same as InterpQuad_vector(), but writing to preallocated return.
Definition: Element.h:610
void compute_dN()
Update the shape function gradients (called when the nodal positions are updated).
Definition: Element.h:871
void int_N_scalar_NT_dV(const T &qscalar, R &elemmat) const
Same as Int_N_scalar_NT_dV(), but writing to preallocated return.
Definition: Element.h:782
void update_x(const T &x)
Update the nodal positions.
Definition: Element.h:561
auto GradN() const -> const array_type::tensor< double, 4 > &
Shape function gradients (in global coordinates).
Definition: Element.h:572
auto Int_N_vector_dV(const T &qvector) const -> array_type::tensor< double, 3 >
Element-by-element: integral of a continuous vector-field.
Definition: Element.h:729
CRTP base class for quadrature.
Definition: Element.h:149
auto shape_qscalar() const -> std::array< size_t, 2 >
Get the shape of a "qscalar" (a "qtensor" of rank 0)
Definition: Element.h:351
auto shape_qtensor(size_t rank, size_t arg) const -> std::vector< size_t >
Get the shape of a "qtensor" of a certain rank (0 = scalar, 1, vector, 2 = 2nd-order tensor,...
Definition: Element.h:338
auto shape_elemvec(size_t arg) const -> std::array< size_t, 3 >
Get the shape of an "elemvec".
Definition: Element.h:261
auto shape_qvector() const -> std::array< size_t, 3 >
Get the shape of a "qvector" (a "qtensor" of rank 1)
Definition: Element.h:360
auto nip() const
Number of integration points.
Definition: Element.h:201
auto allocate_elemmat() const
Get an allocated array_type::tensor to store a "elemmat".
Definition: Element.h:411
auto nelem() const
Number of elements.
Definition: Element.h:161
auto nne() const
Number of nodes per element.
Definition: Element.h:171
void asTensor(const T &arg, R &ret) const
Convert "qscalar" to "qtensor" of certain rank.
Definition: Element.h:214
auto shape_qtensor() const -> std::array< size_t, 2+rank >
Get the shape of a "qtensor" of a certain rank (0 = scalar, 1, vector, 2 = 2nd-order tensor,...
Definition: Element.h:285
auto allocate_elemvec() const
Get an allocated array_type::tensor to store a "elemvec".
Definition: Element.h:383
auto shape_elemmat() const -> std::array< size_t, 3 >
Get the shape of an "elemmat".
Definition: Element.h:271
auto allocate_elemvec(R val) const
Get an allocated and initialised xt::xarray to store a "elemvec".
Definition: Element.h:396
auto shape_qvector(size_t arg) const -> std::array< size_t, 3 >
Get the shape of a "qvector" (a "qtensor" of rank 1)
Definition: Element.h:370
auto allocate_elemmat(R val) const
Get an allocated and initialised xt::xarray to store a "elemmat".
Definition: Element.h:424
auto shape_elemvec() const -> std::array< size_t, 3 >
Get the shape of an "elemvec".
Definition: Element.h:250
auto allocate_qscalar(R val) const
Get an allocated and initialised xt::xarray to store a "qscalar" (a "qtensor" of rank 0).
Definition: Element.h:516
auto AsTensor(const T &arg) const
Convert "qscalar" to "qtensor" of certain rank.
Definition: Element.h:227
auto tdim() const
Number of dimensions for integration point tensors.
Definition: Element.h:191
auto allocate_qtensor(size_t rank) const
Get an allocated xt::xarray to store a "qtensor" of a certain rank (0 = scalar, 1,...
Definition: Element.h:473
auto shape_qtensor(size_t rank) const -> std::vector< size_t >
Get the shape of a "qtensor" of a certain rank (0 = scalar, 1, vector, 2 = 2nd-order tensor,...
Definition: Element.h:301
auto allocate_qtensor() const
Get an allocated array_type::tensor to store a "qtensor" of a certain rank (0 = scalar,...
Definition: Element.h:441
auto allocate_qtensor(size_t rank, R val) const
Get an allocated and initialised xt::xarray to store a "qtensor" of a certain rank (0 = scalar,...
Definition: Element.h:488
auto shape_qtensor(size_t rank, size_t arg) const -> std::array< size_t, 2+trank >
Get the shape of a "qtensor" of a certain rank (0 = scalar, 1, vector, 2 = 2nd-order tensor,...
Definition: Element.h:320
auto ndim() const
Number of dimensions for node vectors.
Definition: Element.h:181
auto allocate_qscalar() const
Get an allocated array_type::tensor to store a "qscalar" (a "qtensor" of rank 0).
Definition: Element.h:503
auto AsTensor(size_t rank, const T &arg) const
Convert "qscalar" to "qtensor" of certain rank.
Definition: Element.h:240
auto allocate_qtensor(R val) const
Get an allocated and initialised array_type::tensor to store a "qtensor" of a certain rank (0 = scala...
Definition: Element.h:456
D derived_type
Underlying type.
Definition: Element.h:154
#define GOOSEFEM_ASSERT(expr)
All assertions are implementation as::
Definition: config.h:96
array_type::tensor< double, 1 > w()
Integration point weights.
Definition: ElementHex8.h:92
bool isDiagonal(const array_type::tensor< double, 3 > &elemmat)
Check that all of the matrices stored per elemmat (shape: [nelem, nne * ndim, nne * ndim]) are diagon...
Definition: Element.h:120
array_type::tensor< double, 3 > asElementVector(const array_type::tensor< size_t, 2 > &conn, const array_type::tensor< double, 2 > &nodevec)
Convert nodal vector with ("nodevec", shape:[nnode, ndim]) to nodal vector stored per element ("elemv...
Definition: Element.h:31
bool isSequential(const E &dofs)
Check that DOFs leave no holes.
Definition: Element.h:94
array_type::tensor< double, 2 > assembleNodeVector(const array_type::tensor< size_t, 2 > &conn, const array_type::tensor< double, 3 > &elemvec)
Assemble nodal vector stored per element ("elemvec", shape [nelem, nne, ndim]) to nodal vector ("node...
Definition: Element.h:62
xt::xtensor< T, N > tensor
Fixed (static) rank array.
Definition: config.h:176
Toolbox to perform finite element computations.
Definition: Allocate.h:14
auto AsTensor(const T &arg, const S &shape)
"Broadcast" a scalar stored in an array (e.g.
Definition: Allocate.h:167
void asTensor(const T &arg, R &ret)
"Broadcast" a scalar stored in an array (e.g.
Definition: Allocate.h:153