diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs new file mode 100644 index 0000000000..c78e6b2cfd --- /dev/null +++ b/.git-blame-ignore-revs @@ -0,0 +1,2 @@ +# `pre-commit run --all-files` (https://github.com/Ferrite-FEM/Ferrite.jl/pull/928) +68e1ab15bea4618f76b9ed1d850e2ce33375e266 diff --git a/.github/workflows/Check.yml b/.github/workflows/Check.yml new file mode 100644 index 0000000000..08dc6b83e0 --- /dev/null +++ b/.github/workflows/Check.yml @@ -0,0 +1,57 @@ +name: Code checks + +on: + pull_request: + push: + branches: ["master"] + +jobs: + + pre-commit: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: pre-commit/action@2c7b3805fd2a0fd8c1884dcaebf91fc102a13ecd # v3.0.1 + + explicit-imports: + runs-on: ubuntu-latest + name: "ExplicitImports.jl" + steps: + - uses: actions/checkout@v4 + # - uses: julia-actions/setup-julia@v2 + # with: + # version: '1' + - uses: julia-actions/cache@v2 + - uses: julia-actions/julia-buildpkg@v1 + - name: Install dependencies + shell: julia {0} + run: | + # Add ExplicitImports.jl and packages that Ferrite has extensions for + using Pkg + Pkg.add([ + PackageSpec(name = "ExplicitImports", version = "1.6"), + PackageSpec(name = "Metis"), + PackageSpec(name = "BlockArrays"), + ]) + - name: ExplicitImports.jl code checks + shell: julia --project {0} + run: | + using Ferrite, ExplicitImports, Metis, BlockArrays + # Check Ferrite + allow_unanalyzable = (ColoringAlgorithm,) # baremodules + check_no_implicit_imports(Ferrite; allow_unanalyzable) + check_no_stale_explicit_imports(Ferrite; allow_unanalyzable) + check_all_qualified_accesses_via_owners(Ferrite) + check_no_self_qualified_accesses(Ferrite) + # Check extension modules + for ext in (:FerriteBlockArrays, :FerriteMetis) + extmod = Base.get_extension(Ferrite, ext) + if extmod !== nothing + check_no_implicit_imports(extmod) + check_no_stale_explicit_imports(extmod) + check_all_qualified_accesses_via_owners(extmod) + check_no_self_qualified_accesses(extmod) + else + @warn "$(ext) extensions not available." + end + end diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 4679271fad..8bf808aeff 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -14,7 +14,7 @@ jobs: continue-on-error: ${{ matrix.julia-version == 'nightly' }} strategy: matrix: - julia-version: ['1.6', '1', 'nightly'] + julia-version: ['1.9', '1', 'nightly'] os: ['ubuntu-latest'] include: - os: windows-latest @@ -23,33 +23,30 @@ jobs: julia-version: '1' steps: - uses: actions/checkout@v4 - - uses: julia-actions/setup-julia@v1 + - uses: julia-actions/setup-julia@v2 with: version: ${{ matrix.julia-version }} + - uses: julia-actions/cache@v2 - uses: julia-actions/julia-buildpkg@v1 - uses: julia-actions/julia-runtest@v1 - uses: julia-actions/julia-processcoverage@v1 with: directories: 'src,ext' - - uses: codecov/codecov-action@v3 + - uses: codecov/codecov-action@v4 with: file: lcov.info + env: + CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} docs: name: Documentation runs-on: ubuntu-latest timeout-minutes: 60 steps: - uses: actions/checkout@v4 - - uses: julia-actions/setup-julia@v1 + - uses: julia-actions/setup-julia@v2 with: version: '1' - - uses: actions/cache@v3 - with: - path: | - ~/.julia/artifacts - ~/.julia/packages - ~/.julia/registries - key: .julia-docs-${{ hashFiles('docs/Project.toml', 'docs/Manifest.toml') }} + - uses: julia-actions/cache@v2 - name: Install dependencies run: julia --project=docs -e 'using Pkg; Pkg.instantiate(); Pkg.precompile()' - name: Build and deploy diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 0000000000..6c219d29c1 --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,11 @@ +repos: +- repo: https://github.com/pre-commit/pre-commit-hooks + rev: v3.2.0 + hooks: + - id: check-added-large-files + - id: check-case-conflict + - id: check-toml + - id: check-yaml + - id: end-of-file-fixer + - id: mixed-line-ending + - id: trailing-whitespace diff --git a/CHANGELOG.md b/CHANGELOG.md index aa3b9a2a31..d0e2f25619 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -75,23 +75,23 @@ more discussion). + QuadratureRule{RefTetrahedron}(quadrature_order) ``` -- **Quadrature for face integration (FaceValues)**: replace `QuadratureRule{dim-1, +- **Quadrature for face integration (FacetValues)**: replace `QuadratureRule{dim-1, reference_shape}(quadrature_order)` with - `FaceQuadratureRule{reference_shape}(quadrature_order)`. + `FacetQuadratureRule{reference_shape}(quadrature_order)`. Examples: ```diff - # Quadrature for the faces of a quadrilateral + # Quadrature for the facets of a quadrilateral - QuadratureRule{1, RefCube}(quadrature_order) - + FaceQuadratureRule{RefQuadrilateral}(quadrature_order) + + FacetQuadratureRule{RefQuadrilateral}(quadrature_order) - # Quadrature for the faces of a triangle + # Quadrature for the facets of a triangle - QuadratureRule{1, RefTetrahedron}(quadrature_order) - + FaceQuadratureRule{RefTriangle}(quadrature_order) + + FacetQuadratureRule{RefTriangle}(quadrature_order) - # Quadrature for the faces of a hexhedron + # Quadrature for the facets of a hexhedron - QuadratureRule{2, RefCube}(quadrature_order) - + FaceQuadratureRule{RefHexahedron}(quadrature_order) + + FacetQuadratureRule{RefHexahedron}(quadrature_order) ``` - **CellValues**: replace usage of `CellScalarValues` *and* `CellVectorValues` with @@ -148,28 +148,28 @@ more discussion). + cv = CellValues(qr, ip_function, ip_geometry) ``` -- **FaceValues**: replace usage of `FaceScalarValues` *and* `FaceVectorValues` with - `FaceValues`. For vector valued problems the interpolation passed to `CellValues` should +- **FacetValues**: replace usage of `FaceScalarValues` *and* `FaceVectorValues` with + `FacetValues`. For vector valued problems the interpolation passed to `FacetValues` should be vectorized to a `VectorizedInterpolation` (see above). The input quadrature rule should - be a `FaceQuadratureRule` instead of a `QuadratureRule`. + be a `FacetQuadratureRule` instead of a `QuadratureRule`. Examples: ```diff - # FaceValues for a scalar problem with triangle elements + # FacetValues for a scalar problem with triangle elements - qr = QuadratureRule{1, RefTetrahedron}(quadrature_order) - ip = Lagrange{2, RefTetrahedron, 1}() - cv = FaceScalarValues(qr, ip) - + qr = FaceQuadratureRule{RefTriangle}(quadrature_order) + + qr = FacetQuadratureRule{RefTriangle}(quadrature_order) + ip = Lagrange{RefTriangle, 1}() - + cv = FaceValues(qr, ip) + + cv = FacetValues(qr, ip) # FaceValues for a vector problem with hexahedronal elements - qr = QuadratureRule{2, RefCube}(quadrature_order) - ip = Lagrange{3, RefCube, 1}() - cv = FaceVectorValues(qr, ip) - + qr = FaceQuadratureRule{RefHexahedron}(quadrature_order) + + qr = FacetQuadratureRule{RefHexahedron}(quadrature_order) + ip = Lagrange{RefHexahedron, 1}() ^ 3 - + cv = FaceValues(qr, ip) + + cv = FacetValues(qr, ip) ``` - **DofHandler construction**: it is now required to pass the interpolation explicitly when @@ -190,13 +190,69 @@ more discussion). + add!(dh, :u, Lagrange{RefTriangle, 1}()) ``` +- **Boundary conditions**: The entity enclosing a cell was previously called `face`, but is now + denoted a `facet`. When applying boundary conditions, rename `getfaceset` to `getfacetset` and + `addfaceset!` is now `addfacetset!`. These sets are now described by `FacetIndex` instead of `FaceIndex`. + When looping over the `facets` of a cell, change `nfaces` to `nfacets`. + + Examples: + ```diff + # Dirichlet boundary conditions + - addfaceset!(grid, "dbc", x -> x[1] ≈ 1.0) + + addfacetset!(grid, "dbc", x -> x[1] ≈ 1.0) + + - dbc = Dirichlet(:u, getfaceset(grid, "dbc"), Returns(0.0)) + + dbc = Dirichlet(:u, getfacetset(grid, "dbc"), Returns(0.0)) + + # Neumann boundary conditions + - for facet in 1:nfaces(cell) + - if (cellid(cell), facet) ∈ getfaceset(grid, "Neumann Boundary") + + for facet in 1:nfacets(cell) + + if (cellid(cell), facet) ∈ getfacetset(grid, "Neumann Boundary") + # ... + ``` + +- **VTK Export**: The VTK export has been changed [#692][github-692]. + ```diff + - vtk_grid(name, dh) do vtk + - vtk_point_data(vtk, dh, a) + - vtk_point_data(vtk, nodal_data, "my node data") + - vtk_point_data(vtk, proj, projected_data, "my projected data") + - vtk_cell_data(vtk, proj, projected_data, "my projected data") + + VTKGridFile(name, dh) do vtk + + write_solution(vtk, dh, a) + + write_node_data(vtk, nodal_data, "my node data") + + write_projection(vtk, proj, projected_data, "my projected data") + + write_cell_data(vtk, cell_data, "my projected data") + end + ``` + When using a `paraview_collection` collection for e.g. multiple timesteps + the `VTKGridFile` object can be used instead of the previous type returned + from `vtk_grid`. + +- **Sparsity pattern and global matrix construction**: since there is now explicit support + for working with the sparsity pattern before instantiating a matrix the function + `create_sparsity_pattern` has been removed. To recover the old functionality that return a + sparse matrix from the DofHandler directly use `allocate_matrix` instead. + + Examples: + ```diff + # Create sparse matrix from DofHandler + - K = create_sparsity_pattern(dh) + + K = allocate_matrix(dh) + + # Create condensed sparse matrix from DofHandler + ConstraintHandler + - K = create_sparsity_pattern(dh, ch) + + K = allocate_matrix(dh, ch) + ``` + ### Added - `InterfaceValues` for computing jumps and averages over interfaces. ([#743][github-743]) - `InterfaceIterator` and `InterfaceCache` for iterating over interfaces. ([#747][github-747]) -- `FaceQuadratureRule` implementation for `RefPrism` and `RefPyramid`. ([#779][github-779]) +- `FacetQuadratureRule` implementation for `RefPrism` and `RefPyramid`. ([#779][github-779]) - The `DofHandler` now support selectively adding fields on sub-domains (rather than the full domain). This new functionality is included with the new `SubDofHandler` struct, @@ -212,8 +268,8 @@ more discussion). `Ferrite.RefSimplex{dim}`. ([#679][github-679]) - New methods for adding entitysets that are located on the boundary of the grid: - `addboundaryfaceset!`, `addboundaryedgeset!`, and `addboundaryvertexset!`. These work - similar to `addfaceset!`, `addedgeset!`, and `addvertexset!`, but filters out all + `addboundaryfacetset!` and `addboundaryvertexset!`. These work + similar to `addfacetset!` and `addvertexset!`, but filters out all instances not on the boundary (this can be used to avoid accidental inclusion of internal entities in sets used for boundary conditions, for example). ([#606][github-606]) @@ -233,19 +289,20 @@ more discussion). - `CellValues` now support (vector) interpolations with dimension different from the spatial dimension. ([#651][github-651]) -- `FaceQuadratureRule` have been added and should be used for `FaceValues`. A - `FaceQuadratureRule` for integration of the faces of e.g. a triangle can be constructed by - `FaceQuadratureRule{RefTriangle}(order)` (similar to how `QuadratureRule` is constructed). +- `FacetQuadratureRule` have been added and should be used for `FacetValues`. A + `FacetQuadratureRule` for integration of the facets of e.g. a triangle can be constructed by + `FacetQuadratureRule{RefTriangle}(order)` (similar to how `QuadratureRule` is constructed). ([#716][github-716]) -- New methods `shape_value(::Interpolation, ξ::Vec, i::Int)` and - `shape_gradient(::Interpolation, ξ::Vec, i::Int)` for evaluating the value/gradient of the - `i`th shape function of an interpolation in local reference coordinate `ξ`. Note that - these methods return the value/gradient wrt. the reference coordinate `ξ`, whereas the - corresponding methods for `CellValues` etc return the value/gradient wrt the spatial - coordinate `x`. ([#721][github-721]) +- New functions `Ferrite.reference_shape_value(::Interpolation, ξ::Vec, i::Int)` and + `Ferrite.reference_shape_gradient(::Interpolation, ξ::Vec, i::Int)` for evaluating the + value/gradient of the `i`th shape function of an interpolation in local reference + coordinate `ξ`. These methods are public but not exported. (Note that these methods return + the value/gradient wrt. the reference coordinate `ξ`, whereas the corresponding methods + for `CellValues` etc return the value/gradient wrt the spatial coordinate `x`.) + ([#721][github-721]) -- `FaceIterator` and `FaceCache` have been added. These work similarly to `CellIterator` and +- `FacetIterator` and `FacetCache` have been added. These work similarly to `CellIterator` and `CellCache` but are used to iterate over (boundary) face sets instead. These simplify boundary integrals in general, and in particular Neumann boundary conditions are more convenient to implement now that you can loop directly over the face set instead of @@ -254,19 +311,41 @@ more discussion). - The `ConstraintHandler` now support adding Dirichlet boundary conditions on discontinuous interpolations. ([#729][github-729]) -- All keyword arguments to `vtk_grid` are now passed on to `WriteVTK.vtk_grid` (only - `compress` was supported earlier). ([#687][github-687]) - - `collect_periodic_faces` now have a keyword argument `tol` that can be used to relax the default tolerance when necessary. ([#749][github-749]) - VTK export now work with `QuadraticHexahedron` elements. ([#714][github-714]) +- The function `bounding_box(::AbstractGrid)` has been added. It computes the bounding box for + a given grid (based on its node coordinates), and returns the minimum and maximum vertices + of the bounding box. ([#880][github-880]) + +- Support for working with sparsity patterns has been added. This means that Ferrite exposes + the intermediate "state" between the DofHandler and the instantiated matrix as the new + struct `SparsityPattern`. This make it possible to insert custom equations or couplings in + the pattern before instantiating the matrix. The function `create_sparsity_pattern` have + been removed. The new function `allocate_matrix` is instead used to instantiate the + matrix. Refer to the documentation for more details. ([#888][github-888]) + + **To upgrade**: if you want to recover the old functionality and don't need to work with + the pattern, replace any usage of `create_sparsity_pattern` with `allocate_matrix`. + +- A new function, `geometric_interpolation`, is exported, which gives the geometric interpolation + for each cell type. This is equivalent to the deprecated `Ferrite.default_interpolation` function. + ([#953][github-953]) + +- CellValues and FacetValues can now store and map second order gradients (Hessians). The number + of gradients computed in CellValues/FacetValues is specified using the keyword arguments + `update_gradients::Bool` (default true) and `update_hessians::Bool` (default false) in the + constructors, i.e. `CellValues(...; update_hessians=true)`. ([#953][github-938]) + +- `L2Projector` supports projecting on grids with mixed celltypes. ([#949][github-949]) + ### Changed -- `create_sparsity_pattern` now supports cross-element dof coupling by passing kwarg - `topology` along with an optional `cross_coupling` matrix that behaves similar to - the `coupling` kwarg. ([#710][github-#710]) +- It is now possible to create sparsity patterns with interface couplings, see the new + function `add_interface_entries!` and the rework of sparsity pattern construction. + ([#710][github-#710]) - The `AbstractCell` interface has been reworked. This change should not affect user code, but may in some cases be relevant for code parsing external mesh files. In particular, the @@ -276,7 +355,7 @@ more discussion). **To upgrade** replace any usage of `Cell{...}(...)` with calls to the concrete implementations. -- The default geometric mapping in `CellValues` and `FaceValues` have changed. The new +- The default geometric mapping in `CellValues` and `FacetValues` have changed. The new default is to always use `Lagrange{refshape, 1}()`, i.e. linear Lagrange polynomials, for the geometric interpolation. Previously, the function interpolation was (re) used also for the geometry interpolation. ([#695][github-695]) @@ -312,27 +391,65 @@ more discussion). `Lagrange{RefTriangle}(order)`, etc. - `CellScalarValues` and `CellVectorValues` have been merged into `CellValues`, - `FaceScalarValues` and `FaceVectorValues` have been merged into `FaceValues`, and + `FaceScalarValues` and `FaceVectorValues` have been merged into `FacetValues`, and `PointScalarValues` and `PointVectorValues` have been merged into `PointValues`. The differentiation between scalar and vector have thus been moved to the interpolation (see above). Note that previously `CellValues`, `FaceValues`, and `PointValues` where abstract - types, but they are now concrete implementations with *different type parameters*. - ([#708][github-708]) + types, but they are now concrete implementations with *different type parameters*, except + `FaceValues` which is now `FacetValues` ([#708][github-708]) **To upgrade**, for scalar problems, it is enough to replace `CellScalarValues` with - `CellValues`, `FaceScalarValues` with `FaceValues` and `PointScalarValues` with + `CellValues`, `FaceScalarValues` with `FacetValues` and `PointScalarValues` with `PointValues`, respectively. For vector problems, make sure to vectorize the interpolation (see above) and then replace `CellVectorValues` with `CellValues`, `FaceVectorValues` with - `FaceValues`, and `PointVectorValues` with `PointValues`. + `FacetValues`, and `PointVectorValues` with `PointValues`. -- The quadrature rule passed to `FaceValues` should now be of type `FaceQuadratureRule` +- The quadrature rule passed to `FacetValues` should now be of type `FacetQuadratureRule` rather than of type `QuadratureRule`. ([#716][github-716]) - **To upgrade** replace the quadrature rule passed to `FaceValues` with a - `FaceQuadratureRule`. + **To upgrade** replace the quadrature rule passed to `FacetValues` with a + `FacetQuadratureRule`. - Checking if a face `(ele_id, local_face_id) ∈ faceset` has been previously implemented by type piracy. In order to be invariant to the underlying `Set` datatype as well as omitting type piracy, ([#835][github-835]) implemented `isequal` and `hash` for `BoundaryIndex` datatypes. +- **VTK export**: Ferrite no longer extends `WriteVTK.vtk_grid` and associated functions, + instead the new type `VTKGridFile` should be used instead. New methods exists for writing to + a `VTKGridFile`, e.g. `write_solution`, `write_cell_data`, `write_node_data`, and `write_projection`. + See [#692][github-692]. + +- **Definitions**: Previously, `face` and `edge` referred to codimension 1 relative reference shape. + In Ferrite v1, `volume`, `face`, `edge`, and `vertex` refer to 3, 2, 1, and 0 dimensional entities, + and `facet` replaces the old definition of `face`. No direct replacement for `edges` exits. + See [#914][github-789] and [#914][github-914]. + The main implications of this change are + * `FaceIndex` -> `FacetIndex` (`FaceIndex` still exists, but has a different meaning) + * `FaceValues` -> `FacetValues` + * `nfaces` -> `nfacets` (`nfaces` is now an internal method with different meaning) + * `addfaceset!` -> `addfacetset` + * `getfaceset` -> `getfacetset` + + Furthermore, subtypes of `Interpolation` should now define `vertexdof_indices`, `edgedof_indices`, + `facedof_indices`, `volumedof_indices` (and similar) according to these definitions. + +- `Ferrite.getdim` has been changed into `Ferrite.getrefdim` for getting the dimension of the reference shape + and `Ferrite.getspatialdim` to get the spatial dimension (of the grid). ([#943][github-943]) + +- `Ferrite.getfielddim(::AbstractDofHandler, args...)` has been renamed to `Ferrite.n_components`. + ([#943][github-943]) + +- The constructor for `ExclusiveTopology` only accept an `AbstractGrid` as input, + removing the alternative of providing a `Vector{<:AbstractCell}`, as knowing the + spatial dimension is required for correct code paths. + Furthermore, it uses a new internal data structure, `ArrayOfVectorViews`, to store the neighborhood + information more efficiently The datatype for the neighborhood has thus changed to a view of a vector, + instead of the now removed `EntityNeighborhood` container. This also applies to `vertex_star_stencils`. + ([#974][github-974]). + +- `project(::L2Projector, data, qr_rhs)` now expects data to be indexed by the cellid, as opposed to + the index in the vector of cellids passed to the `L2Projector`. The data may be passed as an + `AbstractDict{Int, <:AbstractVector}`, as an alternative to `AbstractArray{<:AbstractVector}`. + ([#949][github-949]) + ### Deprecated - The rarely (if ever) used methods of `function_value`, `function_gradient`, @@ -353,6 +470,8 @@ more discussion). - `transform!` have been deprecated in favor of `transform_coordinates!`. ([#754][github-754]) +- `Ferrite.default_interpolation` has been deprecated in favor of `geometric_interpolation`. ([#953][github-953]) + ### Removed - `MixedDofHandler` + `FieldHandler` have been removed in favor of `DofHandler` + @@ -376,6 +495,17 @@ more discussion). now you can still use them by prefixing `Ferrite.`, e.g. `Ferrite.getweights`.) ([#754][github-754]) +- The `onboundary` function (and the associated `boundary_matrix` property of the `Grid` + datastructure) have been removed ([#924][github-924]). Instead of first checking + `onboundary` and then check whether a facet belong to a specific facetset, check the + facetset directly. For example: + ```diff + - if onboundary(cell, local_face_id) && (cell_id, local_face_id) in getfacesets(grid, "traction_boundary") + + if (cell_id, local_face_id) in getfacesets(grid, "traction_boundary") + # integrate the "traction_boundary" boundary + end + ``` + ### Fixed - Benchmarks now work with master branch. ([#751][github-#751], [#855][github-#855]) @@ -854,6 +984,7 @@ poking into Ferrite internals: [github-684]: https://github.com/Ferrite-FEM/Ferrite.jl/pull/684 [github-687]: https://github.com/Ferrite-FEM/Ferrite.jl/pull/687 [github-688]: https://github.com/Ferrite-FEM/Ferrite.jl/pull/688 +[github-692]: https://github.com/Ferrite-FEM/Ferrite.jl/pull/692 [github-694]: https://github.com/Ferrite-FEM/Ferrite.jl/pull/694 [github-695]: https://github.com/Ferrite-FEM/Ferrite.jl/pull/695 [github-697]: https://github.com/Ferrite-FEM/Ferrite.jl/pull/697 @@ -889,5 +1020,15 @@ poking into Ferrite internals: [github-756]: https://github.com/Ferrite-FEM/Ferrite.jl/pull/756 [github-759]: https://github.com/Ferrite-FEM/Ferrite.jl/pull/759 [github-779]: https://github.com/Ferrite-FEM/Ferrite.jl/pull/779 +[github-789]: https://github.com/Ferrite-FEM/Ferrite.jl/pull/789 [github-835]: https://github.com/Ferrite-FEM/Ferrite.jl/pull/835 [github-855]: https://github.com/Ferrite-FEM/Ferrite.jl/pull/855 +[github-880]: https://github.com/Ferrite-FEM/Ferrite.jl/pull/880 +[github-888]: https://github.com/Ferrite-FEM/Ferrite.jl/pull/888 +[github-914]: https://github.com/Ferrite-FEM/Ferrite.jl/pull/914 +[github-924]: https://github.com/Ferrite-FEM/Ferrite.jl/pull/924 +[github-938]: https://github.com/Ferrite-FEM/Ferrite.jl/pull/938 +[github-943]: https://github.com/Ferrite-FEM/Ferrite.jl/pull/943 +[github-949]: https://github.com/Ferrite-FEM/Ferrite.jl/pull/949 +[github-953]: https://github.com/Ferrite-FEM/Ferrite.jl/pull/953 +[github-974]: https://github.com/Ferrite-FEM/Ferrite.jl/pull/974 diff --git a/Project.toml b/Project.toml index c18ee1634a..af5e162e41 100644 --- a/Project.toml +++ b/Project.toml @@ -4,8 +4,10 @@ version = "0.3.14" [deps] EnumX = "4e289a0a-7415-4d19-859d-a7e5c4648b56" +ForwardDiff = "f6369f11-7733-5829-9624-2563aa707210" LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" NearestNeighbors = "b8a86587-4115-5ab1-83bc-aa920d37bbce" +OrderedCollections = "bac558e1-5e72-5ebc-8fee-abe8a469f55d" Preferences = "21216c6a-2e73-6563-6e65-726566657250" Reexport = "189a3867-3050-52da-a836-e630ba90ab69" SparseArrays = "2f01184e-e22b-5df5-ae63-d93ebab69eaf" @@ -22,15 +24,16 @@ FerriteBlockArrays = "BlockArrays" FerriteMetis = "Metis" [compat] -BlockArrays = "0.16" +BlockArrays = "0.16, 1" EnumX = "1" Metis = "1.3" NearestNeighbors = "0.4" +OrderedCollections = "1" Preferences = "1" Reexport = "1" Tensors = "1.14" WriteVTK = "1.13" -julia = "1.6" +julia = "1.9" [extras] BlockArrays = "8e7c35d0-a365-5155-bbbb-fb81a777f24e" @@ -39,10 +42,10 @@ FerriteGmsh = "4f95f4f8-b27c-4ae5-9a39-ea55e634e36b" ForwardDiff = "f6369f11-7733-5829-9624-2563aa707210" Gmsh = "705231aa-382f-11e9-3f0c-b7cb4346fdeb" IterativeSolvers = "42fd0dbc-a981-5370-80f2-aaf504508153" -JET = "c3a54625-cd67-489e-a8e7-0a5a0ff4e31b" Logging = "56ddb016-857b-54e1-b83d-db4d58db5568" Metis = "2679e427-3c69-5b7f-982b-ece356f1e94b" NBInclude = "0db19996-df87-5ea3-a455-e3a50d440464" +Pkg = "44cfe95a-1eb2-52ea-b672-e2afdf69b78f" ProgressMeter = "92933f4c-e287-5a05-a399-4b506db050ca" Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" SHA = "ea8e919c-243c-51af-8825-aaa63cd721ce" @@ -50,4 +53,4 @@ Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" TimerOutputs = "a759f4b9-e2f1-59dc-863e-4aeb61b1ea8f" [targets] -test = ["BlockArrays", "Downloads", "FerriteGmsh", "ForwardDiff", "Gmsh", "IterativeSolvers", "JET", "Metis", "NBInclude", "ProgressMeter", "Random", "SHA", "Test", "TimerOutputs", "Logging"] +test = ["BlockArrays", "Downloads", "FerriteGmsh", "ForwardDiff", "Gmsh", "IterativeSolvers", "Metis", "Pkg", "NBInclude", "ProgressMeter", "Random", "SHA", "Test", "TimerOutputs", "Logging"] diff --git a/README.md b/README.md index c10bc0e73b..3377e83b05 100644 --- a/README.md +++ b/README.md @@ -7,9 +7,7 @@ ![Build Status](https://github.com/Ferrite-FEM/Ferrite.jl/workflows/CI/badge.svg?event=push) [![codecov.io](http://codecov.io/github/Ferrite-FEM/Ferrite.jl/coverage.svg?branch=master)](http://codecov.io/github/Ferrite-FEM/Ferrite.jl?branch=master) -A simple finite element toolbox written in Julia. - -**Note:** This package was originally called JuAFEM.jl, but has now been renamed to Ferrite.jl. +A finite element toolbox written in Julia. ## Documentation @@ -41,6 +39,13 @@ If you encounter what you think is a bug please report it, see Please keep in mind that we are part of the Julia community and adhere to the [Julia Community Standards][standards]. +## Related packages +The following registered packages are part of the `Ferrite.jl` ecosystem in addition to Ferrite itself: +* [Tensors.jl][Tensors]: Used throughout Ferrite for efficient tensor manipulation. +* [FerriteViz.jl][FerriteViz]: [Makie.jl][Makie]-based visualization of Ferrite data. +* [FerriteGmsh.jl][FerriteGmsh]: Create, interact with, and import [Gmsh][Gmsh] meshes into Ferrite. +* [FerriteMeshParser.jl][FerriteMeshParser]: Parse the mesh from Abaqus input files into a Ferrite mesh. + [docs-stable-img]: https://img.shields.io/badge/docs-latest%20release-blue [docs-stable-url]: http://ferrite-fem.github.io/Ferrite.jl/ @@ -49,3 +54,10 @@ Please keep in mind that we are part of the Julia community and adhere to the [julia-slack]: https://julialang.org/slack/ [julia-zulip]: https://julialang.zulipchat.com/ [gh-discussion]: https://github.com/Ferrite-FEM/Ferrite.jl/discussions/new + +[Tensors]: https://github.com/Ferrite-FEM/Tensors.jl +[FerriteViz]: https://github.com/Ferrite-FEM/FerriteViz.jl +[FerriteGmsh]: https://github.com/Ferrite-FEM/FerriteGmsh.jl +[FerriteMeshParser]: https://github.com/Ferrite-FEM/FerriteMeshParser.jl +[Makie]: https://docs.makie.org/stable/ +[Gmsh]: https://gmsh.info/ diff --git a/benchmark/benchmarks-assembly.jl b/benchmark/benchmarks-assembly.jl index 6744804907..619185f602 100644 --- a/benchmark/benchmarks-assembly.jl +++ b/benchmark/benchmarks-assembly.jl @@ -14,7 +14,7 @@ for spatial_dim ∈ 1:3 grid = generate_grid(geo_type, tuple(repeat([2], spatial_dim)...)); topology = ExclusiveTopology(grid) - ip_geo = Ferrite.default_interpolation(geo_type) + ip_geo = Ferrite.geometric_interpolation(geo_type) ref_type = FerriteBenchmarkHelper.getrefshape(geo_type) # Nodal interpolation tests @@ -61,26 +61,26 @@ for spatial_dim ∈ 1:3 LAGRANGE_SUITE["petrov-galerkin"]["pressure-velocity"] = @benchmarkable FerriteAssemblyHelper._generalized_petrov_galerkin_assemble_local_matrix($grid, $cvv, shape_divergence, $csv, shape_value, *) if spatial_dim > 1 - qr_face = FaceQuadratureRule{ref_type}(2*order-1) - fsv = FaceValues(qr_face, ip, ip_geo); - fsv2 = FaceValues(qr_face, ip, ip_geo); + qr_facet = FacetQuadratureRule{ref_type}(2*order-1) + fsv = FacetValues(qr_facet, ip, ip_geo); + fsv2 = FacetValues(qr_facet, ip, ip_geo); LAGRANGE_SUITE["ritz-galerkin"]["face-flux"] = @benchmarkable FerriteAssemblyHelper._generalized_ritz_galerkin_assemble_local_matrix($grid, $fsv, shape_gradient, shape_value, *) LAGRANGE_SUITE["petrov-galerkin"]["face-flux"] = @benchmarkable FerriteAssemblyHelper._generalized_petrov_galerkin_assemble_local_matrix($grid, $fsv, shape_gradient, $fsv2, shape_value, *) - + ip = DiscontinuousLagrange{ref_type, order}() - isv = InterfaceValues(qr_face, ip, ip_geo); - isv2 = InterfaceValues(qr_face, ip, ip_geo); + isv = InterfaceValues(qr_facet, ip, ip_geo); + isv2 = InterfaceValues(qr_facet, ip, ip_geo); dh = DofHandler(grid) add!(dh, :u, ip) close!(dh) LAGRANGE_SUITE["ritz-galerkin"]["interface-{grad}⋅[[val]]"] = @benchmarkable FerriteAssemblyHelper._generalized_ritz_galerkin_assemble_interfaces($dh, $isv, shape_gradient_average, shape_value_jump, ⋅) LAGRANGE_SUITE["petrov-galerkin"]["interface-{grad}⋅[[val]]"] = @benchmarkable FerriteAssemblyHelper._generalized_petrov_galerkin_assemble_interfaces($dh, $isv, shape_gradient_average, $isv2, shape_value_jump, ⋅) - + LAGRANGE_SUITE["ritz-galerkin"]["interface-interior-penalty"] = @benchmarkable FerriteAssemblyHelper._generalized_ritz_galerkin_assemble_interfaces($dh, $isv, shape_value_jump, shape_value_jump, ⋅) LAGRANGE_SUITE["petrov-galerkin"]["interface-interior-penalty"] = @benchmarkable FerriteAssemblyHelper._generalized_petrov_galerkin_assemble_interfaces($dh, $isv, shape_value_jump, $isv2, shape_value_jump, ⋅) - + end end end diff --git a/benchmark/benchmarks-boundary-conditions.jl b/benchmark/benchmarks-boundary-conditions.jl index 284a3e7a19..6964196f70 100644 --- a/benchmark/benchmarks-boundary-conditions.jl +++ b/benchmark/benchmarks-boundary-conditions.jl @@ -13,7 +13,7 @@ for spatial_dim ∈ [2] geo_type = Quadrilateral grid = generate_grid(geo_type, ntuple(x->2, spatial_dim)); ref_type = FerriteBenchmarkHelper.getrefshape(geo_type) - ip_geo = Ferrite.default_interpolation(geo_type) + ip_geo = Ferrite.geometric_interpolation(geo_type) order = 2 # assemble a mass matrix to apply BCs on (because its cheap) @@ -25,19 +25,17 @@ for spatial_dim ∈ [2] close!(dh); ch = ConstraintHandler(dh); - ∂Ω = union(getfaceset.((grid, ), ["left"])...); + ∂Ω = union(getfacetset.((grid, ), ["left"])...); dbc = Dirichlet(:u, ∂Ω, (x, t) -> 0) add!(ch, dbc); close!(ch); # Non-symmetric application M, f = FerriteAssemblyHelper._assemble_mass(dh, cellvalues, false); - DIRICHLET_SUITE["global"]["apply!(M,f,APPLY_TRANSPOSE)"] = @benchmarkable apply!($M, $f, $ch; strategy=$(Ferrite.APPLY_TRANSPOSE)); - DIRICHLET_SUITE["global"]["apply!(M,f,APPLY_INPLACE)"] = @benchmarkable apply!($M, $f, $ch; strategy=$(Ferrite.APPLY_INPLACE)); + DIRICHLET_SUITE["global"]["apply!(M,f)"] = @benchmarkable apply!($M, $f, $ch); # Symmetric application M, f = FerriteAssemblyHelper._assemble_mass(dh, cellvalues, true); - DIRICHLET_SUITE["global"]["apply!(M_sym,f,APPLY_TRANSPOSE)"] = @benchmarkable apply!($M, $f, $ch; strategy=$(Ferrite.APPLY_TRANSPOSE)); - DIRICHLET_SUITE["global"]["apply!(M_sym,f,APPLY_INPLACE)"] = @benchmarkable apply!($M, $f, $ch; strategy=$(Ferrite.APPLY_INPLACE)); + DIRICHLET_SUITE["global"]["apply!(M_sym,f)"] = @benchmarkable apply!($M, $f, $ch); DIRICHLET_SUITE["global"]["apply!(f)"] = @benchmarkable apply!($f, $ch); DIRICHLET_SUITE["global"]["apply_zero!(f)"] = @benchmarkable apply!($f, $ch); diff --git a/benchmark/helper.jl b/benchmark/helper.jl index 8595058565..369681b1db 100644 --- a/benchmark/helper.jl +++ b/benchmark/helper.jl @@ -1,6 +1,7 @@ module FerriteBenchmarkHelper using Ferrite +using LinearAlgebra: Symmetric function geo_types_for_spatial_dim(spatial_dim) spatial_dim == 1 && return [Line, QuadraticLine] @@ -40,22 +41,22 @@ function _generalized_ritz_galerkin_assemble_local_matrix(grid::Ferrite.Abstract Ke end -function _generalized_ritz_galerkin_assemble_local_matrix(grid::Ferrite.AbstractGrid, facevalues::FaceValues, f_shape, f_test, op) - n_basefuncs = getnbasefunctions(facevalues) +function _generalized_ritz_galerkin_assemble_local_matrix(grid::Ferrite.AbstractGrid, facetvalues::FacetValues, f_shape, f_test, op) + n_basefuncs = getnbasefunctions(facetvalues) f = zeros(n_basefuncs) X = getcoordinates(grid, 1) - for face in 1:nfaces(getcells(grid)[1]) - reinit!(facevalues, X, face) + for facet in 1:nfacets(getcells(grid)[1]) + reinit!(facetvalues, X, facet) - for q_point in 1:getnquadpoints(facevalues) - n = getnormal(facevalues, q_point) - dΓ = getdetJdV(facevalues, q_point) + for q_point in 1:getnquadpoints(facetvalues) + n = getnormal(facetvalues, q_point) + dΓ = getdetJdV(facetvalues, q_point) for i in 1:n_basefuncs - test = f_test(facevalues, q_point, i) + test = f_test(facetvalues, q_point, i) for j in 1:n_basefuncs - shape = f_shape(facevalues, q_point, j) + shape = f_shape(facetvalues, q_point, j) f[i] += op(test, shape) ⋅ n * dΓ end end @@ -118,25 +119,25 @@ function _generalized_petrov_galerkin_assemble_local_matrix(grid::Ferrite.Abstra Ke end -function _generalized_petrov_galerkin_assemble_local_matrix(grid::Ferrite.AbstractGrid, facevalues_shape::FaceValues, f_shape, facevalues_test::FaceValues, f_test, op) - n_basefuncs_shape = getnbasefunctions(facevalues_shape) - n_basefuncs_test = getnbasefunctions(facevalues_test) +function _generalized_petrov_galerkin_assemble_local_matrix(grid::Ferrite.AbstractGrid, facetvalues_shape::FacetValues, f_shape, facetvalues_test::FacetValues, f_test, op) + n_basefuncs_shape = getnbasefunctions(facetvalues_shape) + n_basefuncs_test = getnbasefunctions(facetvalues_test) f = zeros(n_basefuncs_test) X_shape = getcoordinates(grid, 1) X_test = getcoordinates(grid, 1) - for face in 1:nfaces(getcells(grid)[1]) - reinit!(facevalues_shape, X_shape, face) - reinit!(facevalues_test, X_test, face) + for facet in 1:nfacets(getcells(grid)[1]) + reinit!(facetvalues_shape, X_shape, facet) + reinit!(facetvalues_test, X_test, facet) - for q_point in 1:getnquadpoints(facevalues_shape) - n = getnormal(facevalues_test, q_point) - dΓ = getdetJdV(facevalues_test, q_point) + for q_point in 1:getnquadpoints(facetvalues_shape) + n = getnormal(facetvalues_test, q_point) + dΓ = getdetJdV(facetvalues_test, q_point) for i in 1:n_basefuncs_test - test = f_test(facevalues_test, q_point, i) + test = f_test(facetvalues_test, q_point, i) for j in 1:n_basefuncs_shape - shape = f_shape(facevalues_shape, q_point, j) + shape = f_shape(facetvalues_shape, q_point, j) f[i] += op(test, shape) ⋅ n * dΓ end end @@ -177,7 +178,7 @@ function _assemble_mass(dh, cellvalues, sym) Me = zeros(n_basefuncs, n_basefuncs) fe = zeros(n_basefuncs) - M = sym ? create_symmetric_sparsity_pattern(dh) : create_sparsity_pattern(dh); + M = sym ? Symmetric(allocate_matrix(dh)) : allocate_matrix(dh); f = zeros(ndofs(dh)) assembler = start_assemble(M, f); diff --git a/docs/Makefile b/docs/Makefile index b44d74b850..6646f84817 100644 --- a/docs/Makefile +++ b/docs/Makefile @@ -5,4 +5,7 @@ default: liveserver liveserver: julia --project=${SRCDIR} ${SRCDIR}/liveserver.jl -.PHONY: default liveserver +clean: + julia ${SRCDIR}/clean.jl + +.PHONY: default liveserver clean diff --git a/docs/Manifest.toml b/docs/Manifest.toml index 2c9f66289c..080b04599b 100644 --- a/docs/Manifest.toml +++ b/docs/Manifest.toml @@ -1,13 +1,18 @@ # This file is machine-generated - editing it directly is not advised -julia_version = "1.9.3" +julia_version = "1.10.4" manifest_format = "2.0" -project_hash = "8722b6a67abf1b81e2c7808ddfcfb1d737cd0040" +project_hash = "2283fea1dfcfa0f2c82cdecbc5750875c26d6009" [[deps.ADTypes]] -git-tree-sha1 = "5d2e21d7b0d8c22f67483ef95ebdc39c0e6b6003" +git-tree-sha1 = "3a6511b6e54550bcbc986c560921a8cd7761fcd8" uuid = "47edcb42-4c32-4615-8424-f2b9edc5f35b" -version = "0.2.4" +version = "1.5.1" +weakdeps = ["ChainRulesCore", "EnzymeCore"] + + [deps.ADTypes.extensions] + ADTypesChainRulesCoreExt = "ChainRulesCore" + ADTypesEnzymeCoreExt = "EnzymeCore" [[deps.ANSIColoredPrinters]] git-tree-sha1 = "574baf8110975760d391c710b6341da1afa48d8c" @@ -15,15 +20,36 @@ uuid = "a4c015fc-c6ff-483c-b24f-f7ea428134e9" version = "0.0.1" [[deps.AbstractTrees]] -git-tree-sha1 = "faa260e4cb5aba097a73fab382dd4b5819d8ec8c" +git-tree-sha1 = "2d9c9a55f9c93e8887ad391fbae72f8ef55e1177" uuid = "1520ce14-60c1-5f80-bbc7-55ef81b5835c" -version = "0.4.4" +version = "0.4.5" + +[[deps.Accessors]] +deps = ["CompositionsBase", "ConstructionBase", "Dates", "InverseFunctions", "LinearAlgebra", "MacroTools", "Markdown", "Test"] +git-tree-sha1 = "c0d491ef0b135fd7d63cbc6404286bc633329425" +uuid = "7d9f7c33-5ae7-4f3b-8dc6-eff91059b697" +version = "0.1.36" + + [deps.Accessors.extensions] + AccessorsAxisKeysExt = "AxisKeys" + AccessorsIntervalSetsExt = "IntervalSets" + AccessorsStaticArraysExt = "StaticArrays" + AccessorsStructArraysExt = "StructArrays" + AccessorsUnitfulExt = "Unitful" + + [deps.Accessors.weakdeps] + AxisKeys = "94b1ba4f-4ee9-5380-92f1-94cde586c3c5" + IntervalSets = "8197267c-284f-5f27-9208-e0e47529a953" + Requires = "ae029012-a4dd-5104-9daa-d747884805df" + StaticArrays = "90137ffa-7385-5640-81b9-e52037218182" + StructArrays = "09ab397b-f2b6-538f-b94a-2f83cf4a842a" + Unitful = "1986cc42-f94f-5a68-af5c-568840ba703d" [[deps.Adapt]] deps = ["LinearAlgebra", "Requires"] -git-tree-sha1 = "76289dc51920fdc6e0013c872ba9551d54961c24" +git-tree-sha1 = "6a55b747d1812e699320963ffde36f1ebdda4099" uuid = "79e6a3ab-5dfb-504d-930d-738a2a938a0e" -version = "3.6.2" +version = "4.0.4" weakdeps = ["StaticArrays"] [deps.Adapt.extensions] @@ -35,21 +61,24 @@ version = "1.1.1" [[deps.ArnoldiMethod]] deps = ["LinearAlgebra", "Random", "StaticArrays"] -git-tree-sha1 = "62e51b39331de8911e4a7ff6f5aaf38a5f4cc0ae" +git-tree-sha1 = "d57bd3762d308bded22c3b82d033bff85f6195c6" uuid = "ec485272-7323-5ecc-a04f-4719b315124d" -version = "0.2.0" +version = "0.4.0" [[deps.ArrayInterface]] -deps = ["Adapt", "LinearAlgebra", "Requires", "SparseArrays", "SuiteSparse"] -git-tree-sha1 = "f83ec24f76d4c8f525099b2ac475fc098138ec31" +deps = ["Adapt", "LinearAlgebra", "SparseArrays", "SuiteSparse"] +git-tree-sha1 = "ed2ec3c9b483842ae59cd273834e5b46206d6dda" uuid = "4fba245c-0d91-5ea0-9b3e-6abc04ee57a9" -version = "7.4.11" +version = "7.11.0" [deps.ArrayInterface.extensions] ArrayInterfaceBandedMatricesExt = "BandedMatrices" ArrayInterfaceBlockBandedMatricesExt = "BlockBandedMatrices" ArrayInterfaceCUDAExt = "CUDA" + ArrayInterfaceCUDSSExt = "CUDSS" + ArrayInterfaceChainRulesExt = "ChainRules" ArrayInterfaceGPUArraysCoreExt = "GPUArraysCore" + ArrayInterfaceReverseDiffExt = "ReverseDiff" ArrayInterfaceStaticArraysCoreExt = "StaticArraysCore" ArrayInterfaceTrackerExt = "Tracker" @@ -57,21 +86,18 @@ version = "7.4.11" BandedMatrices = "aae01518-5342-5314-be14-df237901396f" BlockBandedMatrices = "ffab5731-97b5-5995-9138-79e8c1846df0" CUDA = "052768ef-5323-5732-b1bb-66c8b64840ba" + CUDSS = "45b445bb-4962-46a0-9369-b4df9d0f772e" + ChainRules = "082447d4-558c-5d27-93f4-14fc19e9eca2" GPUArraysCore = "46192b85-c4d5-4398-a991-12ede77f4527" + ReverseDiff = "37e2e3b7-166d-5795-8a7a-e32c996b4267" StaticArraysCore = "1e83bf80-4336-4d27-bf5d-d5a4f845583c" Tracker = "9f7883ad-71c0-57eb-9f7f-b5c9e6d3789c" -[[deps.ArrayInterfaceCore]] -deps = ["LinearAlgebra", "SnoopPrecompile", "SparseArrays", "SuiteSparse"] -git-tree-sha1 = "e5f08b5689b1aad068e01751889f2f615c7db36d" -uuid = "30b0a656-2188-435a-8636-2ec0e6a096e2" -version = "0.1.29" - [[deps.ArrayLayouts]] deps = ["FillArrays", "LinearAlgebra"] -git-tree-sha1 = "9a731850434825d183af39c6e6cd0a1c32dd7e20" +git-tree-sha1 = "600078184f7de14b3e60efe13fc0ba5c59f6dca5" uuid = "4c555306-a7a7-4459-81d9-ec55ddd5c99a" -version = "1.4.2" +version = "1.10.0" weakdeps = ["SparseArrays"] [deps.ArrayLayouts.extensions] @@ -101,9 +127,9 @@ uuid = "f1be7e48-bf82-45af-a471-ae754a193061" version = "0.2.20" [[deps.BitFlags]] -git-tree-sha1 = "43b1a4a8f797c1cddadf60499a8a077d4af2cd2d" +git-tree-sha1 = "0691e34b3bb8be9307330f88d1a3c3f25466c24d" uuid = "d1d4a3ce-64b1-5f1a-9ba4-7e7e69966f35" -version = "0.1.7" +version = "0.1.9" [[deps.BitTwiddlingConvenienceFunctions]] deps = ["Static"] @@ -113,33 +139,39 @@ version = "0.1.5" [[deps.BlockArrays]] deps = ["ArrayLayouts", "FillArrays", "LinearAlgebra"] -git-tree-sha1 = "54cd829dd26330c42e1cf9df68470dd4df602c61" +git-tree-sha1 = "5c0ffe1dff8cb7112de075f1b1cb32191675fcba" uuid = "8e7c35d0-a365-5155-bbbb-fb81a777f24e" -version = "0.16.38" +version = "1.1.0" + + [deps.BlockArrays.extensions] + BlockArraysBandedMatricesExt = "BandedMatrices" + + [deps.BlockArrays.weakdeps] + BandedMatrices = "aae01518-5342-5314-be14-df237901396f" [[deps.Bzip2_jll]] deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] -git-tree-sha1 = "19a35467a82e236ff51bc17a3a44b69ef35185a2" +git-tree-sha1 = "9e2a6b69137e6969bab0152632dcb3bc108c8bdd" uuid = "6e34b625-4abd-537c-b88f-471c36dfa7a0" -version = "1.0.8+0" +version = "1.0.8+1" [[deps.CPUSummary]] deps = ["CpuId", "IfElse", "PrecompileTools", "Static"] -git-tree-sha1 = "601f7e7b3d36f18790e2caf83a882d88e9b71ff1" +git-tree-sha1 = "585a387a490f1c4bd88be67eea15b93da5e85db7" uuid = "2a0fbf3d-bb9c-48f3-b0a9-814d99fd7ab9" -version = "0.2.4" +version = "0.2.5" [[deps.Cairo_jll]] -deps = ["Artifacts", "Bzip2_jll", "CompilerSupportLibraries_jll", "Fontconfig_jll", "FreeType2_jll", "Glib_jll", "JLLWrappers", "LZO_jll", "Libdl", "Pixman_jll", "Pkg", "Xorg_libXext_jll", "Xorg_libXrender_jll", "Zlib_jll", "libpng_jll"] -git-tree-sha1 = "4b859a208b2397a7a623a03449e4636bdb17bcf2" +deps = ["Artifacts", "Bzip2_jll", "CompilerSupportLibraries_jll", "Fontconfig_jll", "FreeType2_jll", "Glib_jll", "JLLWrappers", "LZO_jll", "Libdl", "Pixman_jll", "Xorg_libXext_jll", "Xorg_libXrender_jll", "Zlib_jll", "libpng_jll"] +git-tree-sha1 = "a2f1c8c668c8e3cb4cca4e57a8efdb09067bb3fd" uuid = "83423d85-b0ee-5818-9007-b63ccbeb887a" -version = "1.16.1+1" +version = "1.18.0+2" [[deps.ChainRulesCore]] deps = ["Compat", "LinearAlgebra"] -git-tree-sha1 = "e0af648f0692ec1691b5d094b8724ba1346281cf" +git-tree-sha1 = "71acdbf594aab5bbb2cec89b208c41b4c411e49f" uuid = "d360d2e6-b24c-11e9-a2a3-2a2ae2dbcce4" -version = "1.18.0" +version = "1.24.0" weakdeps = ["SparseArrays"] [deps.ChainRulesCore.extensions] @@ -153,21 +185,21 @@ version = "0.1.12" [[deps.CodecZlib]] deps = ["TranscodingStreams", "Zlib_jll"] -git-tree-sha1 = "cd67fc487743b2f0fd4380d4cbd3a24660d0eec8" +git-tree-sha1 = "59939d8a997469ee05c4b4944560a820f9ba0d73" uuid = "944b1d66-785c-5afd-91f1-9de20f533193" -version = "0.7.3" +version = "0.7.4" [[deps.ColorSchemes]] deps = ["ColorTypes", "ColorVectorSpace", "Colors", "FixedPointNumbers", "PrecompileTools", "Random"] -git-tree-sha1 = "67c1f244b991cad9b0aa4b7540fb758c2488b129" +git-tree-sha1 = "4b270d6465eb21ae89b732182c20dc165f8bf9f2" uuid = "35d6a980-a343-548e-a6ea-1d62b119f2f4" -version = "3.24.0" +version = "3.25.0" [[deps.ColorTypes]] deps = ["FixedPointNumbers", "Random"] -git-tree-sha1 = "eb7f0f8307f71fac7c606984ea5fb2817275d6e4" +git-tree-sha1 = "b10d0b65641d57b8b4d5e234446582de5047050d" uuid = "3da002f7-5984-5a60-b8a6-cbb66c0b333f" -version = "0.11.4" +version = "0.11.5" [[deps.ColorVectorSpace]] deps = ["ColorTypes", "FixedPointNumbers", "LinearAlgebra", "Requires", "Statistics", "TensorCore"] @@ -181,9 +213,9 @@ weakdeps = ["SpecialFunctions"] [[deps.Colors]] deps = ["ColorTypes", "FixedPointNumbers", "Reexport"] -git-tree-sha1 = "fc08e5930ee9a4e03f84bfb5211cb54e7769758a" +git-tree-sha1 = "362a287c3aa50601b0bc359053d5c2468f0e7ce0" uuid = "5ae59095-9a9b-59fe-a467-6f913c188581" -version = "0.12.10" +version = "0.12.11" [[deps.CommonSolve]] git-tree-sha1 = "0eee5eb66b1cf62cd6ad1b460238e60e4b09400c" @@ -197,10 +229,10 @@ uuid = "bbf7d656-a473-5ed7-a52c-81e309532950" version = "0.3.0" [[deps.Compat]] -deps = ["UUIDs"] -git-tree-sha1 = "8a62af3e248a8c4bad6b32cbbe663ae02275e32c" +deps = ["TOML", "UUIDs"] +git-tree-sha1 = "b1c55339b7c6c350ee89f2c1604299660525b248" uuid = "34da2185-b29b-5c13-b0c7-acf172513d20" -version = "4.10.0" +version = "4.15.0" weakdeps = ["Dates", "LinearAlgebra"] [deps.Compat.extensions] @@ -209,7 +241,16 @@ weakdeps = ["Dates", "LinearAlgebra"] [[deps.CompilerSupportLibraries_jll]] deps = ["Artifacts", "Libdl"] uuid = "e66e0078-7015-5450-92f7-15fbd957f2ae" -version = "1.0.5+0" +version = "1.1.1+0" + +[[deps.CompositionsBase]] +git-tree-sha1 = "802bb88cd69dfd1509f6670416bd4434015693ad" +uuid = "a33af91c-f02d-484b-be07-31d278c5ca2b" +version = "0.1.2" +weakdeps = ["InverseFunctions"] + + [deps.CompositionsBase.extensions] + CompositionsBaseInverseFunctionsExt = "InverseFunctions" [[deps.ConcreteStructs]] git-tree-sha1 = "f749037478283d372048690eb3b5f92a79432b34" @@ -218,15 +259,15 @@ version = "0.2.3" [[deps.ConcurrentUtilities]] deps = ["Serialization", "Sockets"] -git-tree-sha1 = "5372dbbf8f0bdb8c700db5367132925c0771ef7e" +git-tree-sha1 = "6cbbd4d241d7e6579ab354737f4dd95ca43946e1" uuid = "f0e56b4a-5159-44fe-b623-3e5288b988bb" -version = "2.2.1" +version = "2.4.1" [[deps.ConstructionBase]] deps = ["LinearAlgebra"] -git-tree-sha1 = "c53fc348ca4d40d7b371e71fd52251839080cbc9" +git-tree-sha1 = "260fd2400ed2dab602a7c15cf10c1933c59930a2" uuid = "187b0558-2788-49d3-abe0-74a17ed4e7c9" -version = "1.5.4" +version = "1.5.5" [deps.ConstructionBase.extensions] ConstructionBaseIntervalSetsExt = "IntervalSets" @@ -237,9 +278,9 @@ version = "1.5.4" StaticArrays = "90137ffa-7385-5640-81b9-e52037218182" [[deps.Contour]] -git-tree-sha1 = "d05d9e7b7aedff4e5b51a029dced05cfb6125781" +git-tree-sha1 = "439e35b0b36e2e5881738abc8857bd92ad6ff9a8" uuid = "d38c429a-6771-53c6-b99e-75d170b6e991" -version = "0.6.2" +version = "0.6.3" [[deps.CpuId]] deps = ["Markdown"] @@ -248,15 +289,15 @@ uuid = "adafc99b-e345-5852-983c-f28acb93d879" version = "0.3.1" [[deps.DataAPI]] -git-tree-sha1 = "8da84edb865b0b5b0100c0666a9bc9a0b71c553c" +git-tree-sha1 = "abe83f3a2f1b857aac70ef8b269080af17764bbe" uuid = "9a962f9c-6df0-11e9-0e5d-c546b8b5ee8a" -version = "1.15.0" +version = "1.16.0" [[deps.DataStructures]] deps = ["Compat", "InteractiveUtils", "OrderedCollections"] -git-tree-sha1 = "3dbd312d370723b6bb43ba9d02fc36abade4518d" +git-tree-sha1 = "1d0a14036acb104d9e89698bd408f63ab58cdc82" uuid = "864edb3b-99cc-5e75-8d2d-829cb0a9cfe8" -version = "0.18.15" +version = "0.18.20" [[deps.DataValueInterfaces]] git-tree-sha1 = "bfc1187b79289637fa0ef6d4436ebdfe6905cbd6" @@ -274,14 +315,16 @@ uuid = "8bb1440f-4735-579b-a4ab-409b98df4dab" version = "1.9.1" [[deps.DiffEqBase]] -deps = ["ArrayInterface", "ChainRulesCore", "DataStructures", "DocStringExtensions", "EnumX", "EnzymeCore", "FastBroadcast", "ForwardDiff", "FunctionWrappers", "FunctionWrappersWrappers", "LinearAlgebra", "Logging", "Markdown", "MuladdMacro", "Parameters", "PreallocationTools", "PrecompileTools", "Printf", "RecursiveArrayTools", "Reexport", "Requires", "SciMLBase", "SciMLOperators", "Setfield", "SparseArrays", "Static", "StaticArraysCore", "Statistics", "Tricks", "TruncatedStacktraces", "ZygoteRules"] -git-tree-sha1 = "95b6df71e218379a831874215b0effaac791d7d7" +deps = ["ArrayInterface", "ConcreteStructs", "DataStructures", "DocStringExtensions", "EnumX", "EnzymeCore", "FastBroadcast", "FastClosures", "ForwardDiff", "FunctionWrappers", "FunctionWrappersWrappers", "LinearAlgebra", "Logging", "Markdown", "MuladdMacro", "Parameters", "PreallocationTools", "PrecompileTools", "Printf", "RecursiveArrayTools", "Reexport", "SciMLBase", "SciMLOperators", "Setfield", "SparseArrays", "Static", "StaticArraysCore", "Statistics", "Tricks", "TruncatedStacktraces"] +git-tree-sha1 = "2c6b7bf16fd850c551a765e313e7522ba455cbfd" uuid = "2b5f629d-d688-5b77-993f-72d75c75574e" -version = "6.133.1" +version = "6.151.4" [deps.DiffEqBase.extensions] + DiffEqBaseCUDAExt = "CUDA" + DiffEqBaseChainRulesCoreExt = "ChainRulesCore" DiffEqBaseDistributionsExt = "Distributions" - DiffEqBaseEnzymeExt = "Enzyme" + DiffEqBaseEnzymeExt = ["ChainRulesCore", "Enzyme"] DiffEqBaseGeneralizedGeneratedExt = "GeneralizedGenerated" DiffEqBaseMPIExt = "MPI" DiffEqBaseMeasurementsExt = "Measurements" @@ -289,9 +332,10 @@ version = "6.133.1" DiffEqBaseReverseDiffExt = "ReverseDiff" DiffEqBaseTrackerExt = "Tracker" DiffEqBaseUnitfulExt = "Unitful" - DiffEqBaseZygoteExt = "Zygote" [deps.DiffEqBase.weakdeps] + CUDA = "052768ef-5323-5732-b1bb-66c8b64840ba" + ChainRulesCore = "d360d2e6-b24c-11e9-a2a3-2a2ae2dbcce4" Distributions = "31c24e10-a181-5473-b8eb-7969acd0382f" Enzyme = "7da242da-08ed-463a-9acd-ee780be4f1d9" GeneralizedGenerated = "6b9d7cbe-bcb9-11e9-073f-15a7a543e2eb" @@ -301,7 +345,6 @@ version = "6.133.1" ReverseDiff = "37e2e3b7-166d-5795-8a7a-e32c996b4267" Tracker = "9f7883ad-71c0-57eb-9f7f-b5c9e6d3789c" Unitful = "1986cc42-f94f-5a68-af5c-568840ba703d" - Zygote = "e88e6eb3-aa80-5325-afca-941959d7151f" [[deps.DiffResults]] deps = ["StaticArraysCore"] @@ -315,11 +358,47 @@ git-tree-sha1 = "23163d55f885173722d1e4cf0f6110cdbaf7e272" uuid = "b552c78f-8df3-52c6-915a-8e097449b14b" version = "1.15.1" +[[deps.DifferentiationInterface]] +deps = ["ADTypes", "Compat", "DocStringExtensions", "FillArrays", "LinearAlgebra", "PackageExtensionCompat", "SparseArrays", "SparseMatrixColorings"] +git-tree-sha1 = "23c6df13ad8fcffde4b0596d798911d2e309fc2c" +uuid = "a0c0ee7d-e4b9-4e03-894e-1c5f64a51d63" +version = "0.5.5" + + [deps.DifferentiationInterface.extensions] + DifferentiationInterfaceChainRulesCoreExt = "ChainRulesCore" + DifferentiationInterfaceDiffractorExt = "Diffractor" + DifferentiationInterfaceEnzymeExt = "Enzyme" + DifferentiationInterfaceFastDifferentiationExt = "FastDifferentiation" + DifferentiationInterfaceFiniteDiffExt = "FiniteDiff" + DifferentiationInterfaceFiniteDifferencesExt = "FiniteDifferences" + DifferentiationInterfaceForwardDiffExt = "ForwardDiff" + DifferentiationInterfacePolyesterForwardDiffExt = "PolyesterForwardDiff" + DifferentiationInterfaceReverseDiffExt = "ReverseDiff" + DifferentiationInterfaceSymbolicsExt = "Symbolics" + DifferentiationInterfaceTapirExt = "Tapir" + DifferentiationInterfaceTrackerExt = "Tracker" + DifferentiationInterfaceZygoteExt = ["Zygote", "ForwardDiff"] + + [deps.DifferentiationInterface.weakdeps] + ChainRulesCore = "d360d2e6-b24c-11e9-a2a3-2a2ae2dbcce4" + Diffractor = "9f5e2b26-1114-432f-b630-d3fe2085c51c" + Enzyme = "7da242da-08ed-463a-9acd-ee780be4f1d9" + FastDifferentiation = "eb9bf01b-bf85-4b60-bf87-ee5de06c00be" + FiniteDiff = "6a86dc24-6348-571c-b903-95158fe2bd41" + FiniteDifferences = "26cc04aa-876d-5657-8c51-4c34ba976000" + ForwardDiff = "f6369f11-7733-5829-9624-2563aa707210" + PolyesterForwardDiff = "98d1487c-24ca-40b6-b7ab-df2af84e126b" + ReverseDiff = "37e2e3b7-166d-5795-8a7a-e32c996b4267" + Symbolics = "0c5d862f-8b57-4792-8d23-62f2024744c7" + Tapir = "07d77754-e150-4737-8c94-cd238a1fb45b" + Tracker = "9f7883ad-71c0-57eb-9f7f-b5c9e6d3789c" + Zygote = "e88e6eb3-aa80-5325-afca-941959d7151f" + [[deps.Distances]] deps = ["LinearAlgebra", "Statistics", "StatsAPI"] -git-tree-sha1 = "5225c965635d8c21168e32a12954675e7bea1151" +git-tree-sha1 = "66c4c81f259586e8f002eacebc177e1fb06363b0" uuid = "b4f34e82-e78d-54a5-968a-f98e89d6e8f7" -version = "0.10.10" +version = "0.10.11" weakdeps = ["ChainRulesCore", "SparseArrays"] [deps.Distances.extensions] @@ -337,16 +416,16 @@ uuid = "ffbed154-4ef7-542d-bbb7-c09d3a79fcae" version = "0.9.3" [[deps.Documenter]] -deps = ["ANSIColoredPrinters", "AbstractTrees", "Base64", "Dates", "DocStringExtensions", "Downloads", "IOCapture", "InteractiveUtils", "JSON", "LibGit2", "Logging", "Markdown", "MarkdownAST", "Pkg", "PrecompileTools", "REPL", "RegistryInstances", "SHA", "Test", "Unicode"] -git-tree-sha1 = "147a3cbb6ddcd9448fe5e6c426b347efc68f9c86" +deps = ["ANSIColoredPrinters", "AbstractTrees", "Base64", "CodecZlib", "Dates", "DocStringExtensions", "Downloads", "Git", "IOCapture", "InteractiveUtils", "JSON", "LibGit2", "Logging", "Markdown", "MarkdownAST", "Pkg", "PrecompileTools", "REPL", "RegistryInstances", "SHA", "TOML", "Test", "Unicode"] +git-tree-sha1 = "76deb8c15f37a3853f13ea2226b8f2577652de05" uuid = "e30172f5-a6a5-5a46-863b-614d45cd2de4" -version = "1.1.1" +version = "1.5.0" [[deps.DocumenterCitations]] -deps = ["AbstractTrees", "Bibliography", "Documenter", "Markdown", "MarkdownAST", "OrderedCollections", "Unicode"] -git-tree-sha1 = "0c5c141a66807796d580ef4fe592647132832f39" +deps = ["AbstractTrees", "Bibliography", "Dates", "Documenter", "Logging", "Markdown", "MarkdownAST", "OrderedCollections", "Unicode"] +git-tree-sha1 = "c72ee44a4242d8ad932062e7476880243635ce6d" uuid = "daee34ce-89f3-4625-b898-19384cb65244" -version = "1.2.1" +version = "1.3.3" [[deps.Downloads]] deps = ["ArgTools", "FileWatching", "LibCURL", "NetworkOptions"] @@ -359,10 +438,13 @@ uuid = "4e289a0a-7415-4d19-859d-a7e5c4648b56" version = "1.0.4" [[deps.EnzymeCore]] -deps = ["Adapt"] -git-tree-sha1 = "d8701002a745c450c03b890f10d53636d1a8a7ea" +git-tree-sha1 = "3a3177ba05b4763234819060fb6c2e1613379ca6" uuid = "f151be2c-9106-41f4-ab19-57ee4f262869" -version = "0.6.2" +version = "0.7.6" +weakdeps = ["Adapt"] + + [deps.EnzymeCore.extensions] + AdaptExt = "Adapt" [[deps.EpollShim_jll]] deps = ["Artifacts", "JLLWrappers", "Libdl"] @@ -372,21 +454,21 @@ version = "0.0.20230411+0" [[deps.ExceptionUnwrapping]] deps = ["Test"] -git-tree-sha1 = "e90caa41f5a86296e014e148ee061bd6c3edec96" +git-tree-sha1 = "dcb08a0d93ec0b1cdc4af184b26b591e9695423a" uuid = "460bff9d-24e4-43bc-9d9f-a8973cb893f4" -version = "0.1.9" +version = "0.1.10" [[deps.Expat_jll]] deps = ["Artifacts", "JLLWrappers", "Libdl"] -git-tree-sha1 = "4558ab818dcceaab612d1bb8c19cee87eda2b83c" +git-tree-sha1 = "1c6317308b9dc757616f0b5cb379db10494443a7" uuid = "2e619515-83b5-522b-bb60-26c02a35a201" -version = "2.5.0+0" +version = "2.6.2+0" [[deps.ExponentialUtilities]] deps = ["Adapt", "ArrayInterface", "GPUArraysCore", "GenericSchur", "LinearAlgebra", "PrecompileTools", "Printf", "SparseArrays", "libblastrampoline_jll"] -git-tree-sha1 = "602e4585bcbd5a25bc06f514724593d13ff9e862" +git-tree-sha1 = "8e18940a5ba7f4ddb41fe2b79b6acaac50880a86" uuid = "d4d017d3-3776-5f7e-afef-a10c40355c18" -version = "1.25.0" +version = "1.26.1" [[deps.ExprTools]] git-tree-sha1 = "27415f162e6028e81c72b82ef756bf321213b6ec" @@ -400,10 +482,10 @@ uuid = "c87230d0-a227-11e9-1b43-d7ebe4e7570a" version = "0.4.1" [[deps.FFMPEG_jll]] -deps = ["Artifacts", "Bzip2_jll", "FreeType2_jll", "FriBidi_jll", "JLLWrappers", "LAME_jll", "Libdl", "Ogg_jll", "OpenSSL_jll", "Opus_jll", "PCRE2_jll", "Pkg", "Zlib_jll", "libaom_jll", "libass_jll", "libfdk_aac_jll", "libvorbis_jll", "x264_jll", "x265_jll"] -git-tree-sha1 = "74faea50c1d007c85837327f6775bea60b5492dd" +deps = ["Artifacts", "Bzip2_jll", "FreeType2_jll", "FriBidi_jll", "JLLWrappers", "LAME_jll", "Libdl", "Ogg_jll", "OpenSSL_jll", "Opus_jll", "PCRE2_jll", "Zlib_jll", "libaom_jll", "libass_jll", "libfdk_aac_jll", "libvorbis_jll", "x264_jll", "x265_jll"] +git-tree-sha1 = "466d45dc38e15794ec7d5d63ec03d776a9aff36e" uuid = "b22a6f82-2f65-5046-a5b2-351ab43fb4e5" -version = "4.4.2+2" +version = "4.4.4+1" [[deps.FLTK_jll]] deps = ["Artifacts", "Fontconfig_jll", "FreeType2_jll", "JLLWrappers", "JpegTurbo_jll", "Libdl", "Libglvnd_jll", "Pkg", "Xorg_libX11_jll", "Xorg_libXext_jll", "Xorg_libXfixes_jll", "Xorg_libXft_jll", "Xorg_libXinerama_jll", "Xorg_libXrender_jll", "Zlib_jll", "libpng_jll"] @@ -413,9 +495,9 @@ version = "1.3.8+0" [[deps.FastBroadcast]] deps = ["ArrayInterface", "LinearAlgebra", "Polyester", "Static", "StaticArrayInterface", "StrideArraysCore"] -git-tree-sha1 = "9d77cb1caf03e67514ba60bcfc47c6e131b1950c" +git-tree-sha1 = "2be93e36303143c6fffd07e2222bbade35194d9e" uuid = "7034ab61-46d4-4ed7-9d0f-46aef9175898" -version = "0.2.7" +version = "0.3.3" [[deps.FastClosures]] git-tree-sha1 = "acebe244d53ee1b461970f8910c235b259e772ef" @@ -424,12 +506,12 @@ version = "0.3.2" [[deps.FastLapackInterface]] deps = ["LinearAlgebra"] -git-tree-sha1 = "b12f05108e405dadcc2aff0008db7f831374e051" +git-tree-sha1 = "cbf5edddb61a43669710cbc2241bc08b36d9e660" uuid = "29a986be-02c6-4525-aec4-84b980013641" -version = "2.0.0" +version = "2.0.4" [[deps.Ferrite]] -deps = ["EnumX", "LinearAlgebra", "NearestNeighbors", "Preferences", "Reexport", "SparseArrays", "StaticArrays", "Tensors", "WriteVTK"] +deps = ["EnumX", "ForwardDiff", "LinearAlgebra", "NearestNeighbors", "OrderedCollections", "Preferences", "Reexport", "SparseArrays", "StaticArrays", "Tensors", "WriteVTK"] path = ".." uuid = "c061ca5d-56c9-439f-9c0e-210fe06d3992" version = "0.3.14" @@ -444,41 +526,46 @@ version = "0.3.14" [[deps.FerriteGmsh]] deps = ["Ferrite", "Gmsh"] -git-tree-sha1 = "702427f9f6b2d3e39da3bfab4eea7d02f459e404" +git-tree-sha1 = "0b4c93ea344bdbf5788758a6f214fdc1c3176f2f" uuid = "4f95f4f8-b27c-4ae5-9a39-ea55e634e36b" -version = "1.0.1" +version = "1.1.1" [[deps.FerriteMeshParser]] deps = ["Ferrite"] -git-tree-sha1 = "8b948577bc4066e9c8693438fd511309c7383761" +git-tree-sha1 = "54a647bf423475c6a54d1960bf694880953d27e9" uuid = "0f8c756f-80dd-4a75-85c6-b0a5ab9d4620" -version = "0.1.7" +version = "0.2.0" [[deps.FileIO]] deps = ["Pkg", "Requires", "UUIDs"] -git-tree-sha1 = "299dc33549f68299137e51e6d49a13b5b1da9673" +git-tree-sha1 = "82d8afa92ecf4b52d78d869f038ebfb881267322" uuid = "5789e2e9-d7fb-5bc7-8068-2c6fae9b9549" -version = "1.16.1" +version = "1.16.3" [[deps.FileWatching]] uuid = "7b1f6079-737a-58dc-b8bc-7a2ca5c1b5ee" [[deps.FillArrays]] -deps = ["LinearAlgebra", "Random"] -git-tree-sha1 = "35f0c0f345bff2c6d636f95fdb136323b5a796ef" +deps = ["LinearAlgebra"] +git-tree-sha1 = "0653c0a2396a6da5bc4766c43041ef5fd3efbe57" uuid = "1a297f60-69ca-5386-bcde-b61e274b549b" -version = "1.7.0" -weakdeps = ["SparseArrays", "Statistics"] +version = "1.11.0" [deps.FillArrays.extensions] + FillArraysPDMatsExt = "PDMats" FillArraysSparseArraysExt = "SparseArrays" FillArraysStatisticsExt = "Statistics" + [deps.FillArrays.weakdeps] + PDMats = "90014a1f-27ba-587c-ab20-58faa44d9150" + SparseArrays = "2f01184e-e22b-5df5-ae63-d93ebab69eaf" + Statistics = "10745b16-79ce-11e8-11f9-7d13ad32a3b2" + [[deps.FiniteDiff]] deps = ["ArrayInterface", "LinearAlgebra", "Requires", "Setfield", "SparseArrays"] -git-tree-sha1 = "c6e4a1fbe73b31a3dea94b1da449503b8830c306" +git-tree-sha1 = "2de436b72c3422940cbe1367611d137008af7ec3" uuid = "6a86dc24-6348-571c-b903-95158fe2bd41" -version = "2.21.1" +version = "2.23.1" [deps.FiniteDiff.extensions] FiniteDiffBandedMatricesExt = "BandedMatrices" @@ -492,21 +579,20 @@ version = "2.21.1" [[deps.FixedPointNumbers]] deps = ["Statistics"] -git-tree-sha1 = "335bfdceacc84c5cdf16aadc768aa5ddfc5383cc" +git-tree-sha1 = "05882d6995ae5c12bb5f36dd2ed3f61c98cbb172" uuid = "53c48c17-4a7d-5ca2-90c5-79b7896eea93" -version = "0.8.4" +version = "0.8.5" [[deps.Fontconfig_jll]] -deps = ["Artifacts", "Bzip2_jll", "Expat_jll", "FreeType2_jll", "JLLWrappers", "Libdl", "Libuuid_jll", "Pkg", "Zlib_jll"] -git-tree-sha1 = "21efd19106a55620a188615da6d3d06cd7f6ee03" +deps = ["Artifacts", "Bzip2_jll", "Expat_jll", "FreeType2_jll", "JLLWrappers", "Libdl", "Libuuid_jll", "Zlib_jll"] +git-tree-sha1 = "db16beca600632c95fc8aca29890d83788dd8b23" uuid = "a3f928ae-7b40-5064-980b-68af3947d34b" -version = "2.13.93+0" +version = "2.13.96+0" -[[deps.Formatting]] -deps = ["Printf"] -git-tree-sha1 = "8339d61043228fdd3eb658d86c926cb282ae72a8" -uuid = "59287772-0a20-5a39-b81b-1366585eb4c0" -version = "0.4.2" +[[deps.Format]] +git-tree-sha1 = "9c68794ef81b08086aeb32eeaf33531668d5f5fc" +uuid = "1fa38f19-a742-5d3f-a2b9-30dd87b9d5f8" +version = "1.3.7" [[deps.ForwardDiff]] deps = ["CommonSubexpressions", "DiffResults", "DiffRules", "LinearAlgebra", "LogExpFunctions", "NaNMath", "Preferences", "Printf", "Random", "SpecialFunctions"] @@ -520,15 +606,15 @@ weakdeps = ["StaticArrays"] [[deps.FreeType2_jll]] deps = ["Artifacts", "Bzip2_jll", "JLLWrappers", "Libdl", "Zlib_jll"] -git-tree-sha1 = "d8db6a5a2fe1381c1ea4ef2cab7c69c2de7f9ea0" +git-tree-sha1 = "5c1d8ae0efc6c2e7b1fc502cbe25def8f661b7bc" uuid = "d7e528f0-a631-5988-bf34-fe36492bcfd7" -version = "2.13.1+0" +version = "2.13.2+0" [[deps.FriBidi_jll]] -deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] -git-tree-sha1 = "aa31987c2ba8704e23c6c8ba8a4f769d5d7e4f91" +deps = ["Artifacts", "JLLWrappers", "Libdl"] +git-tree-sha1 = "1ed150b39aebcc805c26b93a8d0122c940f64ce2" uuid = "559328eb-81f9-559d-9380-de523a88c83c" -version = "1.0.10+0" +version = "1.0.14+0" [[deps.FunctionWrappers]] git-tree-sha1 = "d62485945ce5ae9c0c48f124a84998d755bae00e" @@ -546,10 +632,10 @@ deps = ["Random"] uuid = "9fa8497b-333b-5362-9e8d-4d0656e87820" [[deps.GLFW_jll]] -deps = ["Artifacts", "JLLWrappers", "Libdl", "Libglvnd_jll", "Pkg", "Xorg_libXcursor_jll", "Xorg_libXi_jll", "Xorg_libXinerama_jll", "Xorg_libXrandr_jll"] -git-tree-sha1 = "d972031d28c8c8d9d7b41a536ad7bb0c2579caca" +deps = ["Artifacts", "JLLWrappers", "Libdl", "Libglvnd_jll", "Xorg_libXcursor_jll", "Xorg_libXi_jll", "Xorg_libXinerama_jll", "Xorg_libXrandr_jll"] +git-tree-sha1 = "ff38ba61beff76b8f4acad8ab0c97ef73bb670cb" uuid = "0656b61e-2033-5cc2-a64a-77c0f6c09b89" -version = "3.3.8+0" +version = "3.3.9+0" [[deps.GLU_jll]] deps = ["Artifacts", "JLLWrappers", "Libdl", "Libglvnd_jll", "Pkg"] @@ -560,31 +646,31 @@ version = "9.0.1+0" [[deps.GMP_jll]] deps = ["Artifacts", "Libdl"] uuid = "781609d7-10c4-51f6-84f2-b8444358ff6d" -version = "6.2.1+2" +version = "6.2.1+6" [[deps.GPUArraysCore]] deps = ["Adapt"] -git-tree-sha1 = "2d6ca471a6c7b536127afccfa7564b5b39227fe0" +git-tree-sha1 = "ec632f177c0d990e64d955ccc1b8c04c485a0950" uuid = "46192b85-c4d5-4398-a991-12ede77f4527" -version = "0.1.5" +version = "0.1.6" [[deps.GR]] -deps = ["Artifacts", "Base64", "DelimitedFiles", "Downloads", "GR_jll", "HTTP", "JSON", "Libdl", "LinearAlgebra", "Pkg", "Preferences", "Printf", "Random", "Serialization", "Sockets", "TOML", "Tar", "Test", "UUIDs", "p7zip_jll"] -git-tree-sha1 = "27442171f28c952804dede8ff72828a96f2bfc1f" +deps = ["Artifacts", "Base64", "DelimitedFiles", "Downloads", "GR_jll", "HTTP", "JSON", "Libdl", "LinearAlgebra", "Preferences", "Printf", "Random", "Serialization", "Sockets", "TOML", "Tar", "Test", "p7zip_jll"] +git-tree-sha1 = "3e527447a45901ea392fe12120783ad6ec222803" uuid = "28b8d3ca-fb5f-59d9-8090-bfdbd6d07a71" -version = "0.72.10" +version = "0.73.6" [[deps.GR_jll]] deps = ["Artifacts", "Bzip2_jll", "Cairo_jll", "FFMPEG_jll", "Fontconfig_jll", "FreeType2_jll", "GLFW_jll", "JLLWrappers", "JpegTurbo_jll", "Libdl", "Libtiff_jll", "Pixman_jll", "Qt6Base_jll", "Zlib_jll", "libpng_jll"] -git-tree-sha1 = "025d171a2847f616becc0f84c8dc62fe18f0f6dd" +git-tree-sha1 = "182c478a179b267dd7a741b6f8f4c3e0803795d6" uuid = "d2c73de3-f751-5644-a686-071e5b155ba9" -version = "0.72.10+0" +version = "0.73.6+0" [[deps.GenericSchur]] deps = ["LinearAlgebra", "Printf"] -git-tree-sha1 = "fb69b2a645fa69ba5f474af09221b9308b160ce6" +git-tree-sha1 = "af49a0851f8113fcfae2ef5027c6d49d0acec39b" uuid = "c145ed77-6b09-5dd9-b285-bf645a82121e" -version = "0.5.3" +version = "0.5.4" [[deps.Gettext_jll]] deps = ["Artifacts", "CompilerSupportLibraries_jll", "JLLWrappers", "Libdl", "Libiconv_jll", "Pkg", "XML2_jll"] @@ -592,17 +678,29 @@ git-tree-sha1 = "9b02998aba7bf074d14de89f9d37ca24a1a0b046" uuid = "78b55507-aeef-58d4-861c-77aaff3498b1" version = "0.21.0+0" +[[deps.Git]] +deps = ["Git_jll"] +git-tree-sha1 = "04eff47b1354d702c3a85e8ab23d539bb7d5957e" +uuid = "d7ba0133-e1db-5d97-8f8c-041e4b3a1eb2" +version = "1.3.1" + +[[deps.Git_jll]] +deps = ["Artifacts", "Expat_jll", "JLLWrappers", "LibCURL_jll", "Libdl", "Libiconv_jll", "OpenSSL_jll", "PCRE2_jll", "Zlib_jll"] +git-tree-sha1 = "d18fb8a1f3609361ebda9bf029b60fd0f120c809" +uuid = "f8c6e375-362e-5223-8a59-34ff63f689eb" +version = "2.44.0+2" + [[deps.Glib_jll]] deps = ["Artifacts", "Gettext_jll", "JLLWrappers", "Libdl", "Libffi_jll", "Libiconv_jll", "Libmount_jll", "PCRE2_jll", "Zlib_jll"] -git-tree-sha1 = "e94c92c7bf4819685eb80186d51c43e71d4afa17" +git-tree-sha1 = "7c82e6a6cd34e9d935e9aa4051b66c6ff3af59ba" uuid = "7746bdde-850d-59dc-9ae8-88ece973131d" -version = "2.76.5+0" +version = "2.80.2+0" [[deps.Gmsh]] deps = ["gmsh_jll"] -git-tree-sha1 = "4d4dedef84147934837c683538467cea54c44d44" +git-tree-sha1 = "6d815101e62722f4e323514c9fc704007d4da2e3" uuid = "705231aa-382f-11e9-3f0c-b7cb4346fdeb" -version = "0.2.2" +version = "0.3.1" [[deps.Graphite2_jll]] deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] @@ -612,9 +710,9 @@ version = "1.3.14+0" [[deps.Graphs]] deps = ["ArnoldiMethod", "Compat", "DataStructures", "Distributed", "Inflate", "LinearAlgebra", "Random", "SharedArrays", "SimpleTraits", "SparseArrays", "Statistics"] -git-tree-sha1 = "899050ace26649433ef1af25bc17a815b3db52b7" +git-tree-sha1 = "334d300809ae0a68ceee3444c6e99ded412bf0b3" uuid = "86223c79-3864-5bf0-83f7-82e725a168b6" -version = "1.9.0" +version = "1.11.1" [[deps.Grisu]] git-tree-sha1 = "53bb909d1151e57e2484c3d1b53e19552b887fb2" @@ -622,16 +720,16 @@ uuid = "42e2da0e-8278-4e71-bc24-59509adca0fe" version = "1.0.2" [[deps.HDF5_jll]] -deps = ["Artifacts", "JLLWrappers", "LibCURL_jll", "Libdl", "OpenSSL_jll", "Pkg", "Zlib_jll"] -git-tree-sha1 = "4cc2bb72df6ff40b055295fdef6d92955f9dede8" +deps = ["Artifacts", "CompilerSupportLibraries_jll", "JLLWrappers", "LazyArtifacts", "LibCURL_jll", "Libdl", "MPICH_jll", "MPIPreferences", "MPItrampoline_jll", "MicrosoftMPI_jll", "OpenMPI_jll", "OpenSSL_jll", "TOML", "Zlib_jll", "libaec_jll"] +git-tree-sha1 = "82a471768b513dc39e471540fdadc84ff80ff997" uuid = "0234f1f7-429e-5d53-9886-15a909be8d59" -version = "1.12.2+2" +version = "1.14.3+3" [[deps.HTTP]] deps = ["Base64", "CodecZlib", "ConcurrentUtilities", "Dates", "ExceptionUnwrapping", "Logging", "LoggingExtras", "MbedTLS", "NetworkOptions", "OpenSSL", "Random", "SimpleBufferStream", "Sockets", "URIs", "UUIDs"] -git-tree-sha1 = "5eab648309e2e060198b45820af1a37182de3cce" +git-tree-sha1 = "d1d712be3164d61d1fb98e7ce9bcbc6cc06b45ed" uuid = "cd3eb016-35fb-5094-929b-558a96fad6f3" -version = "1.10.0" +version = "1.10.8" [[deps.HarfBuzz_jll]] deps = ["Artifacts", "Cairo_jll", "Fontconfig_jll", "FreeType2_jll", "Glib_jll", "Graphite2_jll", "JLLWrappers", "Libdl", "Libffi_jll", "Pkg"] @@ -645,11 +743,17 @@ git-tree-sha1 = "eb8fed28f4994600e29beef49744639d985a04b2" uuid = "3e5b6fbb-0976-4d2c-9146-d79de83f2fb0" version = "0.1.16" +[[deps.Hwloc_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl"] +git-tree-sha1 = "ca0f6bf568b4bfc807e7537f081c81e35ceca114" +uuid = "e33a78d0-f292-5ffc-b300-72abe9b543c8" +version = "2.10.0+0" + [[deps.IOCapture]] deps = ["Logging", "Random"] -git-tree-sha1 = "d75853a0bdbfb1ac815478bacd89cd27b550ace6" +git-tree-sha1 = "b6d6bfdd7ce25b0f9b2f6b3dd56b2673a66c8770" uuid = "b5f81e59-6552-4d32-b1f0-c071b021bf89" -version = "0.2.3" +version = "0.2.5" [[deps.IfElse]] git-tree-sha1 = "debdd00ffef04665ccbb3e150747a77560e8fad1" @@ -657,20 +761,30 @@ uuid = "615f187c-cbe4-4ef1-ba3b-2fcf58d6d173" version = "0.1.1" [[deps.Inflate]] -git-tree-sha1 = "ea8031dea4aff6bd41f1df8f2fdfb25b33626381" +git-tree-sha1 = "d1b1b796e47d94588b3757fe84fbf65a5ec4a80d" uuid = "d25df0c9-e2be-5dd7-82c8-3ad0b3e990b9" -version = "0.1.4" +version = "0.1.5" [[deps.IntelOpenMP_jll]] -deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] -git-tree-sha1 = "ad37c091f7d7daf900963171600d7c1c5c3ede32" +deps = ["Artifacts", "JLLWrappers", "Libdl"] +git-tree-sha1 = "be50fe8df3acbffa0274a744f1a99d29c45a57f4" uuid = "1d5cc7b8-4909-519e-a0f8-d0f5ad9712d0" -version = "2023.2.0+0" +version = "2024.1.0+0" [[deps.InteractiveUtils]] deps = ["Markdown"] uuid = "b77e0a4c-d291-57a0-90e8-8db25a27a240" +[[deps.InverseFunctions]] +deps = ["Test"] +git-tree-sha1 = "e7cbed5032c4c397a6ac23d1493f3289e01231c4" +uuid = "3587e190-3f89-42d0-90ee-14403ec27112" +version = "0.1.14" +weakdeps = ["Dates"] + + [deps.InverseFunctions.extensions] + DatesExt = "Dates" + [[deps.IrrationalConstants]] git-tree-sha1 = "630b497eafcc20001bba38a4651b327dcfc491d2" uuid = "92d709cd-6900-40b7-9082-c6be49f344b6" @@ -678,9 +792,9 @@ version = "0.2.2" [[deps.IterativeSolvers]] deps = ["LinearAlgebra", "Printf", "Random", "RecipesBase", "SparseArrays"] -git-tree-sha1 = "1169632f425f79429f245113b775a0e3d121457c" +git-tree-sha1 = "59545b0a2b27208b0650df0a46b8e3019f85055b" uuid = "42fd0dbc-a981-5370-80f2-aaf504508153" -version = "0.9.2" +version = "0.9.4" [[deps.IteratorInterfaceExtensions]] git-tree-sha1 = "a3f24677c21f5bbe9d2a714f95dcd58337fb2856" @@ -689,9 +803,9 @@ version = "1.0.0" [[deps.JLFzf]] deps = ["Pipe", "REPL", "Random", "fzf_jll"] -git-tree-sha1 = "9fb0b890adab1c0a4a475d4210d51f228bfc250d" +git-tree-sha1 = "a53ebe394b71470c7f97c2e7e170d51df21b17af" uuid = "1019f520-868f-41f5-a6de-eb00f4b6a39c" -version = "0.1.6" +version = "0.1.7" [[deps.JLLWrappers]] deps = ["Artifacts", "Preferences"] @@ -707,9 +821,15 @@ version = "0.21.4" [[deps.JSON3]] deps = ["Dates", "Mmap", "Parsers", "PrecompileTools", "StructTypes", "UUIDs"] -git-tree-sha1 = "95220473901735a0f4df9d1ca5b171b568b2daa3" +git-tree-sha1 = "eb3edce0ed4fa32f75a0a11217433c31d56bd48b" uuid = "0f8b85d8-7281-11e9-16c2-39a750bddbf1" -version = "1.13.2" +version = "1.14.0" + + [deps.JSON3.extensions] + JSON3ArrowExt = ["ArrowTypes"] + + [deps.JSON3.weakdeps] + ArrowTypes = "31f734f8-188a-4ce0-8406-c8a06bd891cd" [[deps.JSONSchema]] deps = ["Downloads", "JSON", "JSON3", "URIs"] @@ -719,27 +839,27 @@ version = "1.3.0" [[deps.JpegTurbo_jll]] deps = ["Artifacts", "JLLWrappers", "Libdl"] -git-tree-sha1 = "6f2675ef130a300a112286de91973805fcc5ffbc" +git-tree-sha1 = "c84a835e1a09b289ffcd2271bf2a337bbdda6637" uuid = "aacddb02-875f-59d6-b918-886e6ef4fbf8" -version = "2.1.91+0" +version = "3.0.3+0" [[deps.KLU]] deps = ["LinearAlgebra", "SparseArrays", "SuiteSparse_jll"] -git-tree-sha1 = "884c2968c2e8e7e6bf5956af88cb46aa745c854b" +git-tree-sha1 = "07649c499349dad9f08dde4243a4c597064663e9" uuid = "ef3ab10e-7fda-4108-b977-705223b18434" -version = "0.4.1" +version = "0.6.0" [[deps.Krylov]] deps = ["LinearAlgebra", "Printf", "SparseArrays"] -git-tree-sha1 = "17e462054b42dcdda73e9a9ba0c67754170c88ae" +git-tree-sha1 = "267dad6b4b7b5d529c76d40ff48d33f7e94cb834" uuid = "ba0b0d4f-ebba-5204-a429-3ac8c609bfb7" -version = "0.9.4" +version = "0.9.6" [[deps.LAME_jll]] -deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] -git-tree-sha1 = "f6250b16881adf048549549fba48b1161acdac8c" +deps = ["Artifacts", "JLLWrappers", "Libdl"] +git-tree-sha1 = "170b660facf5df5de098d866564877e119141cbd" uuid = "c1c5ebd0-6772-5130-a774-d5fcae4a789d" -version = "3.100.1+0" +version = "3.100.2+0" [[deps.LERC_jll]] deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] @@ -748,27 +868,27 @@ uuid = "88015f11-f218-50d7-93a8-a6af411a945d" version = "3.0.0+1" [[deps.LLVMOpenMP_jll]] -deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] -git-tree-sha1 = "f689897ccbe049adb19a065c495e75f372ecd42b" +deps = ["Artifacts", "JLLWrappers", "Libdl"] +git-tree-sha1 = "d986ce2d884d49126836ea94ed5bfb0f12679713" uuid = "1d63c593-3942-5779-bab2-d838dc0a180e" -version = "15.0.4+0" +version = "15.0.7+0" [[deps.LZO_jll]] -deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] -git-tree-sha1 = "e5b909bcf985c5e2605737d2ce278ed791b89be6" +deps = ["Artifacts", "JLLWrappers", "Libdl"] +git-tree-sha1 = "70c5da094887fd2cae843b8db33920bac4b6f07d" uuid = "dd4b983a-f0e5-5f8d-a1b7-129d4a5fb1ac" -version = "2.10.1+0" +version = "2.10.2+0" [[deps.LaTeXStrings]] -git-tree-sha1 = "f2355693d6778a178ade15952b7ac47a4ff97996" +git-tree-sha1 = "50901ebc375ed41dbf8058da26f9de442febbbec" uuid = "b964fa9f-0449-5b57-a5c2-d3ea65f4040f" -version = "1.3.0" +version = "1.3.1" [[deps.Latexify]] -deps = ["Formatting", "InteractiveUtils", "LaTeXStrings", "MacroTools", "Markdown", "OrderedCollections", "Printf", "Requires"] -git-tree-sha1 = "f428ae552340899a935973270b8d98e5a31c49fe" +deps = ["Format", "InteractiveUtils", "LaTeXStrings", "MacroTools", "Markdown", "OrderedCollections", "Requires"] +git-tree-sha1 = "e0b5cd21dc1b44ec6e64f351976f961e6f31d6c4" uuid = "23fbe1c1-3f47-55db-b15f-69d7ec21a316" -version = "0.16.1" +version = "0.16.3" [deps.Latexify.extensions] DataFramesExt = "DataFrames" @@ -780,20 +900,32 @@ version = "0.16.1" [[deps.LayoutPointers]] deps = ["ArrayInterface", "LinearAlgebra", "ManualMemory", "SIMDTypes", "Static", "StaticArrayInterface"] -git-tree-sha1 = "88b8f66b604da079a627b6fb2860d3704a6729a1" +git-tree-sha1 = "62edfee3211981241b57ff1cedf4d74d79519277" uuid = "10f19ff3-798f-405d-979b-55457f8fc047" -version = "0.1.14" +version = "0.1.15" [[deps.LazilyInitializedFields]] -git-tree-sha1 = "410fe4739a4b092f2ffe36fcb0dcc3ab12648ce1" +git-tree-sha1 = "8f7f3cabab0fd1800699663533b6d5cb3fc0e612" uuid = "0e77f7df-68c5-4e49-93ce-4cd80f5598bf" -version = "1.2.1" +version = "1.2.2" + +[[deps.LazyArrays]] +deps = ["ArrayLayouts", "FillArrays", "LinearAlgebra", "MacroTools", "SparseArrays"] +git-tree-sha1 = "fb43bbe51db62510b032b85e157ea87d77b2fa07" +uuid = "5078a376-72f3-5289-bfd5-ec5146d43c02" +version = "2.1.0" + + [deps.LazyArrays.extensions] + LazyArraysBandedMatricesExt = "BandedMatrices" + LazyArraysBlockArraysExt = "BlockArrays" + LazyArraysBlockBandedMatricesExt = "BlockBandedMatrices" + LazyArraysStaticArraysExt = "StaticArrays" -[[deps.Lazy]] -deps = ["MacroTools"] -git-tree-sha1 = "1370f8202dac30758f3c345f9909b97f53d87d3f" -uuid = "50d2b5c4-7a5e-59d5-8109-a42b560f39c0" -version = "0.15.1" + [deps.LazyArrays.weakdeps] + BandedMatrices = "aae01518-5342-5314-be14-df237901396f" + BlockArrays = "8e7c35d0-a365-5155-bbbb-fb81a777f24e" + BlockBandedMatrices = "ffab5731-97b5-5995-9138-79e8c1846df0" + StaticArrays = "90137ffa-7385-5640-81b9-e52037218182" [[deps.LazyArtifacts]] deps = ["Artifacts", "Pkg"] @@ -802,21 +934,26 @@ uuid = "4af54fe1-eca0-43a8-85a7-787d91b784e3" [[deps.LibCURL]] deps = ["LibCURL_jll", "MozillaCACerts_jll"] uuid = "b27032c2-a3e7-50c8-80cd-2d36dbcbfd21" -version = "0.6.3" +version = "0.6.4" [[deps.LibCURL_jll]] deps = ["Artifacts", "LibSSH2_jll", "Libdl", "MbedTLS_jll", "Zlib_jll", "nghttp2_jll"] uuid = "deac9b47-8bc7-5906-a0fe-35ac56dc84c0" -version = "7.84.0+0" +version = "8.4.0+0" [[deps.LibGit2]] -deps = ["Base64", "NetworkOptions", "Printf", "SHA"] +deps = ["Base64", "LibGit2_jll", "NetworkOptions", "Printf", "SHA"] uuid = "76f85450-5226-5b5a-8eaa-529ad045b433" +[[deps.LibGit2_jll]] +deps = ["Artifacts", "LibSSH2_jll", "Libdl", "MbedTLS_jll"] +uuid = "e37daf67-58a4-590a-8e99-b0245dd2ffc5" +version = "1.6.4+0" + [[deps.LibSSH2_jll]] deps = ["Artifacts", "Libdl", "MbedTLS_jll"] uuid = "29816b5a-b9ab-546f-933c-edad1886dfa8" -version = "1.10.2+0" +version = "1.11.0+1" [[deps.Libdl]] uuid = "8f399da3-3557-5675-b5ff-fb832c97cbdb" @@ -828,10 +965,10 @@ uuid = "e9f186c6-92d2-5b65-8a66-fee21dc1b490" version = "3.2.2+1" [[deps.Libgcrypt_jll]] -deps = ["Artifacts", "JLLWrappers", "Libdl", "Libgpg_error_jll", "Pkg"] -git-tree-sha1 = "64613c82a59c120435c067c2b809fc61cf5166ae" +deps = ["Artifacts", "JLLWrappers", "Libdl", "Libgpg_error_jll"] +git-tree-sha1 = "9fd170c4bbfd8b935fdc5f8b7aa33532c991a673" uuid = "d4300ac3-e22c-5743-9152-c294e39db1e4" -version = "1.8.7+0" +version = "1.8.11+0" [[deps.Libglvnd_jll]] deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Xorg_libX11_jll", "Xorg_libXext_jll"] @@ -840,10 +977,10 @@ uuid = "7e76a0d4-f3c7-5321-8279-8d96eeed0f29" version = "1.6.0+0" [[deps.Libgpg_error_jll]] -deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] -git-tree-sha1 = "c333716e46366857753e273ce6a69ee0945a6db9" +deps = ["Artifacts", "JLLWrappers", "Libdl"] +git-tree-sha1 = "fbb1f2bef882392312feb1ede3615ddc1e9b99ed" uuid = "7add5ba3-2f88-524e-9cd5-f83b8a55f7b8" -version = "1.42.0+0" +version = "1.49.0+0" [[deps.Libiconv_jll]] deps = ["Artifacts", "JLLWrappers", "Libdl"] @@ -852,10 +989,10 @@ uuid = "94ce4f54-9a6c-5748-9c1c-f9c7231a4531" version = "1.17.0+0" [[deps.Libmount_jll]] -deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] -git-tree-sha1 = "9c30530bf0effd46e15e0fdcf2b8636e78cbbd73" +deps = ["Artifacts", "JLLWrappers", "Libdl"] +git-tree-sha1 = "0c4f9c4f1a50d8f35048fa0532dabbadf702f81e" uuid = "4b2f31a3-9ecc-558c-b454-b3730dcb73e9" -version = "2.35.0+0" +version = "2.40.1+0" [[deps.Libtiff_jll]] deps = ["Artifacts", "JLLWrappers", "JpegTurbo_jll", "LERC_jll", "Libdl", "XZ_jll", "Zlib_jll", "Zstd_jll"] @@ -864,16 +1001,16 @@ uuid = "89763e89-9b03-5906-acba-b20f662cd828" version = "4.5.1+1" [[deps.Libuuid_jll]] -deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] -git-tree-sha1 = "7f3efec06033682db852f8b3bc3c1d2b0a0ab066" +deps = ["Artifacts", "JLLWrappers", "Libdl"] +git-tree-sha1 = "5ee6203157c120d79034c748a2acba45b82b8807" uuid = "38a345b3-de98-5d2b-a5d3-14cd9215e700" -version = "2.36.0+0" +version = "2.40.1+0" [[deps.LightXML]] deps = ["Libdl", "XML2_jll"] -git-tree-sha1 = "e129d9391168c677cd4800f5c0abb1ed8cb3794f" +git-tree-sha1 = "3a994404d3f6709610701c7dabfc03fed87a81f8" uuid = "9c8b4983-aa76-5018-a973-4c85ecc9e179" -version = "0.9.0" +version = "0.9.1" [[deps.LineSearches]] deps = ["LinearAlgebra", "NLSolversBase", "NaNMath", "Parameters", "Printf"] @@ -892,16 +1029,18 @@ uuid = "18c40d15-f7cd-5a6d-bc92-87468d86c5db" version = "5.0.0+0" [[deps.LinearSolve]] -deps = ["ArrayInterface", "ConcreteStructs", "DocStringExtensions", "EnumX", "EnzymeCore", "FastLapackInterface", "GPUArraysCore", "InteractiveUtils", "KLU", "Krylov", "Libdl", "LinearAlgebra", "MKL_jll", "PrecompileTools", "Preferences", "RecursiveFactorization", "Reexport", "Requires", "SciMLBase", "SciMLOperators", "Setfield", "SparseArrays", "Sparspak", "SuiteSparse", "UnPack"] -git-tree-sha1 = "158e45dd35cec1ecade0e554c0104ee89e772d82" +deps = ["ArrayInterface", "ChainRulesCore", "ConcreteStructs", "DocStringExtensions", "EnumX", "FastLapackInterface", "GPUArraysCore", "InteractiveUtils", "KLU", "Krylov", "LazyArrays", "Libdl", "LinearAlgebra", "MKL_jll", "Markdown", "PrecompileTools", "Preferences", "RecursiveFactorization", "Reexport", "SciMLBase", "SciMLOperators", "Setfield", "SparseArrays", "Sparspak", "StaticArraysCore", "UnPack"] +git-tree-sha1 = "7648cc20100504f4b453917aacc8520e9c0ecfb3" uuid = "7ed4a6bd-45f5-4d41-b270-4a48e9bafcae" -version = "2.11.1" +version = "2.30.1" [deps.LinearSolve.extensions] LinearSolveBandedMatricesExt = "BandedMatrices" LinearSolveBlockDiagonalsExt = "BlockDiagonals" LinearSolveCUDAExt = "CUDA" - LinearSolveEnzymeExt = "Enzyme" + LinearSolveCUDSSExt = "CUDSS" + LinearSolveEnzymeExt = ["Enzyme", "EnzymeCore"] + LinearSolveFastAlmostBandedMatricesExt = ["FastAlmostBandedMatrices"] LinearSolveHYPREExt = "HYPRE" LinearSolveIterativeSolversExt = "IterativeSolvers" LinearSolveKernelAbstractionsExt = "KernelAbstractions" @@ -914,7 +1053,10 @@ version = "2.11.1" BandedMatrices = "aae01518-5342-5314-be14-df237901396f" BlockDiagonals = "0a1fb500-61f7-11e9-3c65-f5ef3456f9f0" CUDA = "052768ef-5323-5732-b1bb-66c8b64840ba" + CUDSS = "45b445bb-4962-46a0-9369-b4df9d0f772e" Enzyme = "7da242da-08ed-463a-9acd-ee780be4f1d9" + EnzymeCore = "f151be2c-9106-41f4-ab19-57ee4f262869" + FastAlmostBandedMatrices = "9d29842c-ecb8-4973-b1e9-a27b1157504e" HYPRE = "b5ffcf37-a2bd-41ab-a3da-4bd9bc8ad771" IterativeSolvers = "42fd0dbc-a981-5370-80f2-aaf504508153" KernelAbstractions = "63c18a36-062a-441e-b654-da1e3ab1ce7c" @@ -925,21 +1067,21 @@ version = "2.11.1" [[deps.Literate]] deps = ["Base64", "IOCapture", "JSON", "REPL"] -git-tree-sha1 = "ae5703dde29228490f03cbd64c47be8131819485" +git-tree-sha1 = "596df2daea9c27da81eee63ef2cf101baf10c24c" uuid = "98b081ad-f1c9-55d3-8b20-4c87d4299306" -version = "2.15.0" +version = "2.18.0" [[deps.LiveServer]] deps = ["HTTP", "LoggingExtras", "MIMEs", "Pkg", "Sockets", "Test"] -git-tree-sha1 = "24d05efe53436b22a42bf2ae459f47c48b0c2603" +git-tree-sha1 = "1e46b873b8ef176e23ee43f96e72cd45c20bafb4" uuid = "16fef848-5104-11e9-1b77-fb7a48bbb589" -version = "1.2.7" +version = "1.3.1" [[deps.LogExpFunctions]] deps = ["DocStringExtensions", "IrrationalConstants", "LinearAlgebra"] -git-tree-sha1 = "7d6dd4e9212aebaeed356de34ccf262a3cd415aa" +git-tree-sha1 = "a2d09619db4e765091ee5c6ffe8872849de0feea" uuid = "2ab3a3ac-af41-5b50-aa03-7779005ae688" -version = "0.3.26" +version = "0.3.28" [deps.LogExpFunctions.extensions] LogExpFunctionsChainRulesCoreExt = "ChainRulesCore" @@ -961,10 +1103,10 @@ uuid = "e6f89c97-d47a-5376-807f-9c37f3926c36" version = "1.0.3" [[deps.LoopVectorization]] -deps = ["ArrayInterface", "ArrayInterfaceCore", "CPUSummary", "CloseOpenIntervals", "DocStringExtensions", "HostCPUFeatures", "IfElse", "LayoutPointers", "LinearAlgebra", "OffsetArrays", "PolyesterWeave", "PrecompileTools", "SIMDTypes", "SLEEFPirates", "Static", "StaticArrayInterface", "ThreadingUtilities", "UnPack", "VectorizationBase"] -git-tree-sha1 = "c88a4afe1703d731b1c4fdf4e3c7e77e3b176ea2" +deps = ["ArrayInterface", "CPUSummary", "CloseOpenIntervals", "DocStringExtensions", "HostCPUFeatures", "IfElse", "LayoutPointers", "LinearAlgebra", "OffsetArrays", "PolyesterWeave", "PrecompileTools", "SIMDTypes", "SLEEFPirates", "Static", "StaticArrayInterface", "ThreadingUtilities", "UnPack", "VectorizationBase"] +git-tree-sha1 = "8f6786d8b2b3248d79db3ad359ce95382d5a6df8" uuid = "bdcacae8-1622-11e9-2a5c-532679323890" -version = "0.12.165" +version = "0.12.170" weakdeps = ["ChainRulesCore", "ForwardDiff", "SpecialFunctions"] [deps.LoopVectorization.extensions] @@ -983,10 +1125,10 @@ uuid = "6c6e2e6c-3030-632d-7369-2d6c69616d65" version = "0.1.4" [[deps.MKL_jll]] -deps = ["Artifacts", "IntelOpenMP_jll", "JLLWrappers", "LazyArtifacts", "Libdl", "Pkg"] -git-tree-sha1 = "eb006abbd7041c28e0d16260e50a24f8f9104913" +deps = ["Artifacts", "IntelOpenMP_jll", "JLLWrappers", "LazyArtifacts", "Libdl", "oneTBB_jll"] +git-tree-sha1 = "80b2833b56d466b3858d565adcd16a4a05f2089b" uuid = "856f044c-d86e-5d09-b602-aeab76dc8ba7" -version = "2023.2.0+0" +version = "2024.1.0+0" [[deps.MMG_jll]] deps = ["Artifacts", "JLLWrappers", "Libdl", "LinearElasticity_jll", "Pkg", "SCOTCH_jll"] @@ -994,11 +1136,29 @@ git-tree-sha1 = "70a59df96945782bb0d43b56d0fbfdf1ce2e4729" uuid = "86086c02-e288-5929-a127-40944b0018b7" version = "5.6.0+0" +[[deps.MPICH_jll]] +deps = ["Artifacts", "CompilerSupportLibraries_jll", "Hwloc_jll", "JLLWrappers", "LazyArtifacts", "Libdl", "MPIPreferences", "TOML"] +git-tree-sha1 = "4099bb6809ac109bfc17d521dad33763bcf026b7" +uuid = "7cb0a576-ebde-5e09-9194-50597f1243b4" +version = "4.2.1+1" + +[[deps.MPIPreferences]] +deps = ["Libdl", "Preferences"] +git-tree-sha1 = "c105fe467859e7f6e9a852cb15cb4301126fac07" +uuid = "3da0fdf6-3ccc-4f1b-acd9-58baa6c99267" +version = "0.1.11" + +[[deps.MPItrampoline_jll]] +deps = ["Artifacts", "CompilerSupportLibraries_jll", "JLLWrappers", "LazyArtifacts", "Libdl", "MPIPreferences", "TOML"] +git-tree-sha1 = "8c35d5420193841b2f367e658540e8d9e0601ed0" +uuid = "f1f71cc9-e9ae-5b93-9b94-4fe0e1ad3748" +version = "5.4.0+0" + [[deps.MacroTools]] deps = ["Markdown", "Random"] -git-tree-sha1 = "9ee1618cbf5240e6d4e0371d6f24065083f60c48" +git-tree-sha1 = "2fa9ee3e63fd3a4f7a9a4f4744a52f4856de82df" uuid = "1914dd2f-81c6-5fcd-8719-6d5c9610ff09" -version = "0.5.11" +version = "0.5.13" [[deps.ManualMemory]] git-tree-sha1 = "bcaef4fc7a0cfe2cba636d84cda54b5e4e4ca3cd" @@ -1015,34 +1175,46 @@ git-tree-sha1 = "465a70f0fc7d443a00dcdc3267a497397b8a3899" uuid = "d0879d2d-cac2-40c8-9cee-1863dc0c7391" version = "0.1.2" +[[deps.MaybeInplace]] +deps = ["ArrayInterface", "LinearAlgebra", "MacroTools", "SparseArrays"] +git-tree-sha1 = "1b9e613f2ca3b6cdcbfe36381e17ca2b66d4b3a1" +uuid = "bb5d69b7-63fc-4a16-80bd-7e42200c7bdb" +version = "0.1.3" + [[deps.MbedTLS]] -deps = ["Dates", "MbedTLS_jll", "MozillaCACerts_jll", "Random", "Sockets"] -git-tree-sha1 = "03a9b9718f5682ecb107ac9f7308991db4ce395b" +deps = ["Dates", "MbedTLS_jll", "MozillaCACerts_jll", "NetworkOptions", "Random", "Sockets"] +git-tree-sha1 = "c067a280ddc25f196b5e7df3877c6b226d390aaf" uuid = "739be429-bea8-5141-9913-cc70e7f3736d" -version = "1.1.7" +version = "1.1.9" [[deps.MbedTLS_jll]] deps = ["Artifacts", "Libdl"] uuid = "c8ffd9c3-330d-5841-b78e-0817d7145fa1" -version = "2.28.2+0" +version = "2.28.2+1" [[deps.Measures]] git-tree-sha1 = "c13304c81eec1ed3af7fc20e75fb6b26092a1102" uuid = "442fdcdd-2543-5da2-b0f3-8c86c306513e" version = "0.3.2" +[[deps.MicrosoftMPI_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] +git-tree-sha1 = "f12a29c4400ba812841c6ace3f4efbb6dbb3ba01" +uuid = "9237b28f-5490-5468-be7b-bb81f5f5e6cf" +version = "10.1.4+2" + [[deps.Missings]] deps = ["DataAPI"] -git-tree-sha1 = "f66bdc5de519e8f8ae43bdc598782d35a25b1272" +git-tree-sha1 = "ec4f7fbeab05d7747bdf98eb74d130a2a2ed298d" uuid = "e1d29d7a-bbdc-5cf2-9ac0-f12de2c33e28" -version = "1.1.0" +version = "1.2.0" [[deps.Mmap]] uuid = "a63ad114-7e13-5084-954f-fe012c677804" [[deps.MozillaCACerts_jll]] uuid = "14a3606d-f60d-562e-9121-12d972cd8159" -version = "2022.10.11" +version = "2023.1.10" [[deps.MuladdMacro]] git-tree-sha1 = "cac9cc5499c25554cba55cd3c30543cff5ca4fab" @@ -1055,12 +1227,6 @@ git-tree-sha1 = "a0b464d183da839699f4c79e7606d9d186ec172c" uuid = "d41bc354-129a-5804-8e4c-c37616107c6c" version = "7.8.3" -[[deps.NLsolve]] -deps = ["Distances", "LineSearches", "LinearAlgebra", "NLSolversBase", "Printf", "Reexport"] -git-tree-sha1 = "019f12e9a1a7880459d0173c182e6a99365d7ac1" -uuid = "2774e3e8-f4cf-5e23-947b-6d7e65073b56" -version = "4.5.1" - [[deps.NaNMath]] deps = ["OpenLibm_jll"] git-tree-sha1 = "0877504529a3e5c3343c6f8b4c0381e57e4387e4" @@ -1069,27 +1235,45 @@ version = "1.0.2" [[deps.NearestNeighbors]] deps = ["Distances", "StaticArrays"] -git-tree-sha1 = "2c3726ceb3388917602169bed973dbc97f1b51a8" +git-tree-sha1 = "91a67b4d73842da90b526011fa85c5c4c9343fe0" uuid = "b8a86587-4115-5ab1-83bc-aa920d37bbce" -version = "0.4.13" +version = "0.4.18" [[deps.NetworkOptions]] uuid = "ca575930-c2e3-43a9-ace4-1e988b2c1908" version = "1.2.0" [[deps.NonlinearSolve]] -deps = ["ADTypes", "ArrayInterface", "ConcreteStructs", "DiffEqBase", "EnumX", "FiniteDiff", "ForwardDiff", "LineSearches", "LinearAlgebra", "LinearSolve", "PrecompileTools", "RecursiveArrayTools", "Reexport", "SciMLBase", "SimpleNonlinearSolve", "SparseArrays", "SparseDiffTools", "StaticArraysCore", "UnPack"] -git-tree-sha1 = "9203b3333c9610664de2e8cbc23cfd726663df7d" +deps = ["ADTypes", "ArrayInterface", "ConcreteStructs", "DiffEqBase", "FastBroadcast", "FastClosures", "FiniteDiff", "ForwardDiff", "LazyArrays", "LineSearches", "LinearAlgebra", "LinearSolve", "MaybeInplace", "PrecompileTools", "Preferences", "Printf", "RecursiveArrayTools", "Reexport", "SciMLBase", "SimpleNonlinearSolve", "SparseArrays", "SparseDiffTools", "StaticArraysCore", "SymbolicIndexingInterface", "TimerOutputs"] +git-tree-sha1 = "40325dcea1cb84a108efe05966bbb1f4b98e5eea" uuid = "8913a72c-1f9b-4ce2-8d82-65094dcecaec" -version = "2.4.0" +version = "3.13.0" [deps.NonlinearSolve.extensions] + NonlinearSolveBandedMatricesExt = "BandedMatrices" NonlinearSolveFastLevenbergMarquardtExt = "FastLevenbergMarquardt" + NonlinearSolveFixedPointAccelerationExt = "FixedPointAcceleration" NonlinearSolveLeastSquaresOptimExt = "LeastSquaresOptim" + NonlinearSolveMINPACKExt = "MINPACK" + NonlinearSolveNLSolversExt = "NLSolvers" + NonlinearSolveNLsolveExt = "NLsolve" + NonlinearSolveSIAMFANLEquationsExt = "SIAMFANLEquations" + NonlinearSolveSpeedMappingExt = "SpeedMapping" + NonlinearSolveSymbolicsExt = "Symbolics" + NonlinearSolveZygoteExt = "Zygote" [deps.NonlinearSolve.weakdeps] + BandedMatrices = "aae01518-5342-5314-be14-df237901396f" FastLevenbergMarquardt = "7a0df574-e128-4d35-8cbd-3d84502bf7ce" + FixedPointAcceleration = "817d07cb-a79a-5c30-9a31-890123675176" LeastSquaresOptim = "0fc2ff8b-aaa3-5acd-a817-1944a5e08891" + MINPACK = "4854310b-de5a-5eb6-a2a5-c1dee2bd17f9" + NLSolvers = "337daf1e-9722-11e9-073e-8b9effe078ba" + NLsolve = "2774e3e8-f4cf-5e23-947b-6d7e65073b56" + SIAMFANLEquations = "084e46ad-d928-497d-ad5e-07fa361a48c4" + SpeedMapping = "f1835b91-879b-4a3f-a438-e4baacf14412" + Symbolics = "0c5d862f-8b57-4792-8d23-62f2024744c7" + Zygote = "e88e6eb3-aa80-5325-afca-941959d7151f" [[deps.OCCT_jll]] deps = ["Artifacts", "FreeType2_jll", "JLLWrappers", "Libdl", "Libglvnd_jll", "Pkg", "Xorg_libX11_jll", "Xorg_libXext_jll", "Xorg_libXfixes_jll", "Xorg_libXft_jll", "Xorg_libXinerama_jll", "Xorg_libXrender_jll"] @@ -1098,10 +1282,13 @@ uuid = "baad4e97-8daa-5946-aac2-2edac59d34e1" version = "7.6.2+2" [[deps.OffsetArrays]] -deps = ["Adapt"] -git-tree-sha1 = "2ac17d29c523ce1cd38e27785a7d23024853a4bb" +git-tree-sha1 = "e64b4f5ea6b7389f6f046d13d4896a8f9c1ba71e" uuid = "6fe1bfb0-de20-5000-8ca7-80f57d26f881" -version = "1.12.10" +version = "1.14.0" +weakdeps = ["Adapt"] + + [deps.OffsetArrays.extensions] + OffsetArraysAdaptExt = "Adapt" [[deps.Ogg_jll]] deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] @@ -1112,24 +1299,30 @@ version = "1.3.5+1" [[deps.OpenBLAS_jll]] deps = ["Artifacts", "CompilerSupportLibraries_jll", "Libdl"] uuid = "4536629a-c528-5b80-bd46-f80d51c5b363" -version = "0.3.21+4" +version = "0.3.23+4" [[deps.OpenLibm_jll]] deps = ["Artifacts", "Libdl"] uuid = "05823500-19ac-5b8b-9628-191a04bc5112" -version = "0.8.1+0" +version = "0.8.1+2" + +[[deps.OpenMPI_jll]] +deps = ["Artifacts", "CompilerSupportLibraries_jll", "JLLWrappers", "LazyArtifacts", "Libdl", "MPIPreferences", "TOML"] +git-tree-sha1 = "e25c1778a98e34219a00455d6e4384e017ea9762" +uuid = "fe0851c0-eecd-5654-98d4-656369965a5c" +version = "4.1.6+0" [[deps.OpenSSL]] deps = ["BitFlags", "Dates", "MozillaCACerts_jll", "OpenSSL_jll", "Sockets"] -git-tree-sha1 = "51901a49222b09e3743c65b8847687ae5fc78eb2" +git-tree-sha1 = "38cb508d080d21dc1128f7fb04f20387ed4c0af4" uuid = "4d8831e6-92b7-49fb-bdf8-b643e874388c" -version = "1.4.1" +version = "1.4.3" [[deps.OpenSSL_jll]] deps = ["Artifacts", "JLLWrappers", "Libdl"] -git-tree-sha1 = "a12e56c72edee3ce6b96667745e6cbbe5498f200" +git-tree-sha1 = "a028ee3cb5641cccc4c24e90c36b0a4f7707bdf5" uuid = "458c3c95-2e84-50aa-8efc-19380b2a3a95" -version = "1.1.23+0" +version = "3.0.14+0" [[deps.OpenSpecFun_jll]] deps = ["Artifacts", "CompilerSupportLibraries_jll", "JLLWrappers", "Libdl", "Pkg"] @@ -1139,9 +1332,15 @@ version = "0.5.5+0" [[deps.Optim]] deps = ["Compat", "FillArrays", "ForwardDiff", "LineSearches", "LinearAlgebra", "NLSolversBase", "NaNMath", "Parameters", "PositiveFactorizations", "Printf", "SparseArrays", "StatsBase"] -git-tree-sha1 = "01f85d9269b13fedc61e63cc72ee2213565f7a72" +git-tree-sha1 = "d9b79c4eed437421ac4285148fcadf42e0700e89" uuid = "429524aa-4258-5aef-a3af-852621145aeb" -version = "1.7.8" +version = "1.9.4" + + [deps.Optim.extensions] + OptimMOIExt = "MathOptInterface" + + [deps.Optim.weakdeps] + MathOptInterface = "b8f27783-ece8-5eb3-8dc8-9495eed66fee" [[deps.Opus_jll]] deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] @@ -1150,20 +1349,20 @@ uuid = "91d4177d-7536-5919-b921-800302f37372" version = "1.3.2+0" [[deps.OrderedCollections]] -git-tree-sha1 = "2e73fe17cac3c62ad1aebe70d44c963c3cfdc3e3" +git-tree-sha1 = "dfdf5519f235516220579f949664f1bf44e741c5" uuid = "bac558e1-5e72-5ebc-8fee-abe8a469f55d" -version = "1.6.2" +version = "1.6.3" [[deps.OrdinaryDiffEq]] -deps = ["ADTypes", "Adapt", "ArrayInterface", "DataStructures", "DiffEqBase", "DocStringExtensions", "ExponentialUtilities", "FastBroadcast", "FastClosures", "FiniteDiff", "ForwardDiff", "FunctionWrappersWrappers", "IfElse", "InteractiveUtils", "LineSearches", "LinearAlgebra", "LinearSolve", "Logging", "LoopVectorization", "MacroTools", "MuladdMacro", "NLsolve", "NonlinearSolve", "Polyester", "PreallocationTools", "PrecompileTools", "Preferences", "RecursiveArrayTools", "Reexport", "SciMLBase", "SciMLNLSolve", "SciMLOperators", "SimpleNonlinearSolve", "SimpleUnPack", "SparseArrays", "SparseDiffTools", "StaticArrayInterface", "StaticArrays", "TruncatedStacktraces"] -git-tree-sha1 = "def999a7447854f0e9ca9fdda235e04a65916b76" +deps = ["ADTypes", "Adapt", "ArrayInterface", "DataStructures", "DiffEqBase", "DocStringExtensions", "EnumX", "ExponentialUtilities", "FastBroadcast", "FastClosures", "FillArrays", "FiniteDiff", "ForwardDiff", "FunctionWrappersWrappers", "IfElse", "InteractiveUtils", "LineSearches", "LinearAlgebra", "LinearSolve", "Logging", "MacroTools", "MuladdMacro", "NonlinearSolve", "Polyester", "PreallocationTools", "PrecompileTools", "Preferences", "RecursiveArrayTools", "Reexport", "SciMLBase", "SciMLOperators", "SciMLStructures", "SimpleNonlinearSolve", "SimpleUnPack", "SparseArrays", "SparseDiffTools", "StaticArrayInterface", "StaticArrays", "TruncatedStacktraces"] +git-tree-sha1 = "b4cde20f0e8c67fd35863794d5e548722f7bb71d" uuid = "1dea7af3-3e70-54e6-95c3-0bf5283fa5ed" -version = "6.58.0" +version = "6.84.0" [[deps.PCRE2_jll]] deps = ["Artifacts", "Libdl"] uuid = "efcefdf7-47ab-520b-bdef-62a2eaa19f15" -version = "10.42.0+0" +version = "10.42.0+1" [[deps.PackageExtensionCompat]] git-tree-sha1 = "fb28e33b8a95c4cee25ce296c817d89cc2e53518" @@ -1179,9 +1378,9 @@ version = "0.12.3" [[deps.Parsers]] deps = ["Dates", "PrecompileTools", "UUIDs"] -git-tree-sha1 = "716e24b21538abc91f6205fd1d8363f39b442851" +git-tree-sha1 = "8489905bcdbcfac64d1daa51ca07c0d8f0283821" uuid = "69de0a69-1ddd-5017-9359-2bf0b02dc9f0" -version = "2.7.2" +version = "2.8.1" [[deps.Pipe]] git-tree-sha1 = "6842804e7867b115ca9de748a0cf6b364523c16d" @@ -1190,32 +1389,32 @@ version = "1.3.0" [[deps.Pixman_jll]] deps = ["Artifacts", "CompilerSupportLibraries_jll", "JLLWrappers", "LLVMOpenMP_jll", "Libdl"] -git-tree-sha1 = "64779bc4c9784fee475689a1752ef4d5747c5e87" +git-tree-sha1 = "35621f10a7531bc8fa58f74610b1bfb70a3cfc6b" uuid = "30392449-352a-5448-841d-b1acce4e97dc" -version = "0.42.2+0" +version = "0.43.4+0" [[deps.Pkg]] deps = ["Artifacts", "Dates", "Downloads", "FileWatching", "LibGit2", "Libdl", "Logging", "Markdown", "Printf", "REPL", "Random", "SHA", "Serialization", "TOML", "Tar", "UUIDs", "p7zip_jll"] uuid = "44cfe95a-1eb2-52ea-b672-e2afdf69b78f" -version = "1.9.2" +version = "1.10.0" [[deps.PlotThemes]] deps = ["PlotUtils", "Statistics"] -git-tree-sha1 = "1f03a2d339f42dca4a4da149c7e15e9b896ad899" +git-tree-sha1 = "6e55c6841ce3411ccb3457ee52fc48cb698d6fb0" uuid = "ccf2f8ad-2431-5c83-bf29-c5338b663b6a" -version = "3.1.0" +version = "3.2.0" [[deps.PlotUtils]] deps = ["ColorSchemes", "Colors", "Dates", "PrecompileTools", "Printf", "Random", "Reexport", "Statistics"] -git-tree-sha1 = "f92e1315dadf8c46561fb9396e525f7200cdc227" +git-tree-sha1 = "7b1a9df27f072ac4c9c7cbe5efb198489258d1f5" uuid = "995b91a9-d308-5afd-9ec6-746e21dbc043" -version = "1.3.5" +version = "1.4.1" [[deps.Plots]] -deps = ["Base64", "Contour", "Dates", "Downloads", "FFMPEG", "FixedPointNumbers", "GR", "JLFzf", "JSON", "LaTeXStrings", "Latexify", "LinearAlgebra", "Measures", "NaNMath", "Pkg", "PlotThemes", "PlotUtils", "PrecompileTools", "Preferences", "Printf", "REPL", "Random", "RecipesBase", "RecipesPipeline", "Reexport", "RelocatableFolders", "Requires", "Scratch", "Showoff", "SparseArrays", "Statistics", "StatsBase", "UUIDs", "UnicodeFun", "UnitfulLatexify", "Unzip"] -git-tree-sha1 = "ccee59c6e48e6f2edf8a5b64dc817b6729f99eb5" +deps = ["Base64", "Contour", "Dates", "Downloads", "FFMPEG", "FixedPointNumbers", "GR", "JLFzf", "JSON", "LaTeXStrings", "Latexify", "LinearAlgebra", "Measures", "NaNMath", "Pkg", "PlotThemes", "PlotUtils", "PrecompileTools", "Printf", "REPL", "Random", "RecipesBase", "RecipesPipeline", "Reexport", "RelocatableFolders", "Requires", "Scratch", "Showoff", "SparseArrays", "Statistics", "StatsBase", "UUIDs", "UnicodeFun", "UnitfulLatexify", "Unzip"] +git-tree-sha1 = "442e1e7ac27dd5ff8825c3fa62fbd1e86397974b" uuid = "91a5bcdd-55d7-5caf-9e0b-520d859cae80" -version = "1.39.0" +version = "1.40.4" [deps.Plots.extensions] FileIOExt = "FileIO" @@ -1233,9 +1432,9 @@ version = "1.39.0" [[deps.Polyester]] deps = ["ArrayInterface", "BitTwiddlingConvenienceFunctions", "CPUSummary", "IfElse", "ManualMemory", "PolyesterWeave", "Requires", "Static", "StaticArrayInterface", "StrideArraysCore", "ThreadingUtilities"] -git-tree-sha1 = "398f91235beaac50445557c937ecb0145d171842" +git-tree-sha1 = "b3e2bae88cf07baf0a051fe09666b8ef97aefe93" uuid = "f517fe37-dbe3-4b94-8317-1923a5111588" -version = "0.7.8" +version = "0.7.14" [[deps.PolyesterWeave]] deps = ["BitTwiddlingConvenienceFunctions", "CPUSummary", "IfElse", "Static", "ThreadingUtilities"] @@ -1250,10 +1449,10 @@ uuid = "85a6dd25-e78a-55b7-8502-1745935b8125" version = "0.2.4" [[deps.PreallocationTools]] -deps = ["Adapt", "ArrayInterface", "ForwardDiff", "Requires"] -git-tree-sha1 = "f739b1b3cc7b9949af3b35089931f2b58c289163" +deps = ["Adapt", "ArrayInterface", "ForwardDiff"] +git-tree-sha1 = "406c29a7f46706d379a3bce45671b4e3a39ddfbc" uuid = "d236fae5-4411-538c-8e31-a6e3d9e00b46" -version = "0.4.12" +version = "0.4.22" [deps.PreallocationTools.extensions] PreallocationToolsReverseDiffExt = "ReverseDiff" @@ -1263,15 +1462,15 @@ version = "0.4.12" [[deps.PrecompileTools]] deps = ["Preferences"] -git-tree-sha1 = "03b4c25b43cb84cee5c90aa9b5ea0a78fd848d2f" +git-tree-sha1 = "5aa36f7049a63a1528fe8f7c3f2113413ffd4e1f" uuid = "aea7be01-6a6a-4083-8856-8a6e6704d82a" -version = "1.2.0" +version = "1.2.1" [[deps.Preferences]] deps = ["TOML"] -git-tree-sha1 = "00805cd429dcb4870060ff49ef443486c262e38e" +git-tree-sha1 = "9306f6085165d270f7e3db02af26a400d580f5c6" uuid = "21216c6a-2e73-6563-6e65-726566657250" -version = "1.4.1" +version = "1.4.3" [[deps.Printf]] deps = ["Unicode"] @@ -1279,22 +1478,22 @@ uuid = "de0858da-6303-5e67-8744-51eddeeeb8d7" [[deps.ProgressMeter]] deps = ["Distributed", "Printf"] -git-tree-sha1 = "00099623ffee15972c16111bcf84c58a0051257c" +git-tree-sha1 = "763a8ceb07833dd51bb9e3bbca372de32c0605ad" uuid = "92933f4c-e287-5a05-a399-4b506db050ca" -version = "1.9.0" +version = "1.10.0" [[deps.Qt6Base_jll]] deps = ["Artifacts", "CompilerSupportLibraries_jll", "Fontconfig_jll", "Glib_jll", "JLLWrappers", "Libdl", "Libglvnd_jll", "OpenSSL_jll", "Vulkan_Loader_jll", "Xorg_libSM_jll", "Xorg_libXext_jll", "Xorg_libXrender_jll", "Xorg_libxcb_jll", "Xorg_xcb_util_cursor_jll", "Xorg_xcb_util_image_jll", "Xorg_xcb_util_keysyms_jll", "Xorg_xcb_util_renderutil_jll", "Xorg_xcb_util_wm_jll", "Zlib_jll", "libinput_jll", "xkbcommon_jll"] -git-tree-sha1 = "7c29f0e8c575428bd84dc3c72ece5178caa67336" +git-tree-sha1 = "492601870742dcd38f233b23c3ec629628c1d724" uuid = "c0090381-4147-56d7-9ebc-da0b1113ec56" -version = "6.5.2+2" +version = "6.7.1+1" [[deps.REPL]] deps = ["InteractiveUtils", "Markdown", "Sockets", "Unicode"] uuid = "3fa0cd96-eef1-5676-8a61-b3b8758bbffb" [[deps.Random]] -deps = ["SHA", "Serialization"] +deps = ["SHA"] uuid = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" [[deps.RecipesBase]] @@ -1310,28 +1509,34 @@ uuid = "01d81517-befc-4cb6-b9ec-a95719d0359c" version = "0.6.12" [[deps.RecursiveArrayTools]] -deps = ["Adapt", "ArrayInterface", "DocStringExtensions", "GPUArraysCore", "IteratorInterfaceExtensions", "LinearAlgebra", "RecipesBase", "Requires", "StaticArraysCore", "Statistics", "SymbolicIndexingInterface", "Tables"] -git-tree-sha1 = "d7087c013e8a496ff396bae843b1e16d9a30ede8" +deps = ["Adapt", "ArrayInterface", "DocStringExtensions", "GPUArraysCore", "IteratorInterfaceExtensions", "LinearAlgebra", "RecipesBase", "SparseArrays", "StaticArraysCore", "Statistics", "SymbolicIndexingInterface", "Tables"] +git-tree-sha1 = "3400ce27995422fb88ffcd3af9945565aad947f0" uuid = "731186ca-8d62-57ce-b412-fbd966d074cd" -version = "2.38.10" +version = "3.23.1" [deps.RecursiveArrayTools.extensions] + RecursiveArrayToolsFastBroadcastExt = "FastBroadcast" + RecursiveArrayToolsForwardDiffExt = "ForwardDiff" RecursiveArrayToolsMeasurementsExt = "Measurements" RecursiveArrayToolsMonteCarloMeasurementsExt = "MonteCarloMeasurements" + RecursiveArrayToolsReverseDiffExt = ["ReverseDiff", "Zygote"] RecursiveArrayToolsTrackerExt = "Tracker" RecursiveArrayToolsZygoteExt = "Zygote" [deps.RecursiveArrayTools.weakdeps] + FastBroadcast = "7034ab61-46d4-4ed7-9d0f-46aef9175898" + ForwardDiff = "f6369f11-7733-5829-9624-2563aa707210" Measurements = "eff96d63-e80a-5855-80a2-b1b0885c5ab7" MonteCarloMeasurements = "0987c9cc-fe09-11e8-30f0-b96dd679fdca" + ReverseDiff = "37e2e3b7-166d-5795-8a7a-e32c996b4267" Tracker = "9f7883ad-71c0-57eb-9f7f-b5c9e6d3789c" Zygote = "e88e6eb3-aa80-5325-afca-941959d7151f" [[deps.RecursiveFactorization]] deps = ["LinearAlgebra", "LoopVectorization", "Polyester", "PrecompileTools", "StrideArraysCore", "TriangularSolve"] -git-tree-sha1 = "2b6d4a40339aa02655b1743f4cd7c03109f520c1" +git-tree-sha1 = "6db1a75507051bc18bfa131fbc7c3f169cc4b2f6" uuid = "f2c3362d-daeb-58d1-803e-2bc74f2840b4" -version = "0.2.20" +version = "0.2.23" [[deps.Reexport]] git-tree-sha1 = "45e428421666073eab6f2da5c9d310d99bb12f9b" @@ -1358,9 +1563,9 @@ version = "1.3.0" [[deps.RuntimeGeneratedFunctions]] deps = ["ExprTools", "SHA", "Serialization"] -git-tree-sha1 = "6aacc5eefe8415f47b3e34214c1d79d2674a0ba2" +git-tree-sha1 = "04c968137612c4a5629fa531334bb81ad5680f00" uuid = "7e49a35a-f44a-4d26-94aa-eba1b4ca6b47" -version = "0.5.12" +version = "0.5.13" [[deps.SCOTCH_jll]] deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Zlib_jll"] @@ -1374,9 +1579,9 @@ version = "0.7.0" [[deps.SIMD]] deps = ["PrecompileTools"] -git-tree-sha1 = "0e270732477b9e551d884e6b07e23bb2ec947790" +git-tree-sha1 = "2803cab51702db743f3fda07dd1745aadfbf43bd" uuid = "fdea26ae-647d-5447-a871-4b548cad5224" -version = "3.4.5" +version = "3.5.0" [[deps.SIMDTypes]] git-tree-sha1 = "330289636fb8107c5f32088d2741e9fd7a061a5c" @@ -1385,45 +1590,52 @@ version = "0.1.0" [[deps.SLEEFPirates]] deps = ["IfElse", "Static", "VectorizationBase"] -git-tree-sha1 = "4b8586aece42bee682399c4c4aee95446aa5cd19" +git-tree-sha1 = "3aac6d68c5e57449f5b9b865c9ba50ac2970c4cf" uuid = "476501e8-09a2-5ece-8869-fb82de89a1fa" -version = "0.6.39" +version = "0.6.42" [[deps.SciMLBase]] -deps = ["ADTypes", "ArrayInterface", "ChainRulesCore", "CommonSolve", "ConstructionBase", "Distributed", "DocStringExtensions", "EnumX", "FillArrays", "FunctionWrappersWrappers", "IteratorInterfaceExtensions", "LinearAlgebra", "Logging", "Markdown", "PrecompileTools", "Preferences", "RecipesBase", "RecursiveArrayTools", "Reexport", "RuntimeGeneratedFunctions", "SciMLOperators", "StaticArraysCore", "Statistics", "SymbolicIndexingInterface", "Tables", "TruncatedStacktraces", "ZygoteRules"] -git-tree-sha1 = "151c322c309d879d114d1c0bee69c61d5933357f" +deps = ["ADTypes", "Accessors", "ArrayInterface", "CommonSolve", "ConstructionBase", "Distributed", "DocStringExtensions", "EnumX", "FunctionWrappersWrappers", "IteratorInterfaceExtensions", "LinearAlgebra", "Logging", "Markdown", "PrecompileTools", "Preferences", "Printf", "RecipesBase", "RecursiveArrayTools", "Reexport", "RuntimeGeneratedFunctions", "SciMLOperators", "SciMLStructures", "StaticArraysCore", "Statistics", "SymbolicIndexingInterface", "Tables"] +git-tree-sha1 = "7a6c5c8c38d2e37f45d4686c3598c20c1aebf48e" uuid = "0bca4576-84f4-4d90-8ffe-ffa030f20462" -version = "2.4.3" +version = "2.41.3" [deps.SciMLBase.extensions] + SciMLBaseChainRulesCoreExt = "ChainRulesCore" + SciMLBaseMakieExt = "Makie" + SciMLBasePartialFunctionsExt = "PartialFunctions" SciMLBasePyCallExt = "PyCall" SciMLBasePythonCallExt = "PythonCall" SciMLBaseRCallExt = "RCall" SciMLBaseZygoteExt = "Zygote" [deps.SciMLBase.weakdeps] + ChainRules = "082447d4-558c-5d27-93f4-14fc19e9eca2" + ChainRulesCore = "d360d2e6-b24c-11e9-a2a3-2a2ae2dbcce4" + Makie = "ee78f7c6-11fb-53f2-987a-cfe4a2b5a57a" + PartialFunctions = "570af359-4316-4cb7-8c74-252c00c2016b" PyCall = "438e738f-606a-5dbb-bf0a-cddfbfd45ab0" PythonCall = "6099a3de-0909-46bc-b1f4-468b9a2dfc0d" RCall = "6f49c342-dc21-5d91-9882-a32aef131414" Zygote = "e88e6eb3-aa80-5325-afca-941959d7151f" -[[deps.SciMLNLSolve]] -deps = ["DiffEqBase", "LineSearches", "NLsolve", "Reexport", "SciMLBase"] -git-tree-sha1 = "765b788339abd7d983618c09cfc0192e2b6b15fd" -uuid = "e9a6253c-8580-4d32-9898-8661bb511710" -version = "0.1.9" - [[deps.SciMLOperators]] -deps = ["ArrayInterface", "DocStringExtensions", "Lazy", "LinearAlgebra", "Setfield", "SparseArrays", "StaticArraysCore", "Tricks"] -git-tree-sha1 = "65c2e6ced6f62ea796af251eb292a0e131a3613b" +deps = ["ArrayInterface", "DocStringExtensions", "LinearAlgebra", "MacroTools", "Setfield", "SparseArrays", "StaticArraysCore"] +git-tree-sha1 = "10499f619ef6e890f3f4a38914481cc868689cd5" uuid = "c0aeaf25-5076-4817-a8d5-81caf7dfa961" -version = "0.3.6" +version = "0.3.8" + +[[deps.SciMLStructures]] +deps = ["ArrayInterface"] +git-tree-sha1 = "6ab4beaf88dcdd2639bead916f2347f81dcacd0e" +uuid = "53ae85a6-f571-4167-b2af-e1d143709226" +version = "1.3.0" [[deps.Scratch]] deps = ["Dates"] -git-tree-sha1 = "30449ee12237627992a99d5e30ae63e4d78cd24a" +git-tree-sha1 = "3bac05bc7e74a75fd9cba4295cde4045d9fe2386" uuid = "6c6a2e73-6563-6170-7368-637461726353" -version = "1.2.0" +version = "1.2.1" [[deps.Serialization]] uuid = "9e88b42a-f829-5b0c-bbe9-9e923198166b" @@ -1450,16 +1662,22 @@ uuid = "777ac1f9-54b0-4bf8-805c-2214025038e7" version = "1.1.0" [[deps.SimpleNonlinearSolve]] -deps = ["ArrayInterface", "DiffEqBase", "FiniteDiff", "ForwardDiff", "LinearAlgebra", "PackageExtensionCompat", "PrecompileTools", "Reexport", "SciMLBase", "StaticArraysCore"] -git-tree-sha1 = "15ff97fa4881133caa324dacafe28b5ac47ad8a2" +deps = ["ADTypes", "ArrayInterface", "ConcreteStructs", "DiffEqBase", "DiffResults", "DifferentiationInterface", "FastClosures", "FiniteDiff", "ForwardDiff", "LinearAlgebra", "MaybeInplace", "PrecompileTools", "Reexport", "SciMLBase", "Setfield", "StaticArraysCore"] +git-tree-sha1 = "913754ccbbc78720a4542b56a6bdfbab1c84c8f2" uuid = "727e6d20-b764-4bd8-a329-72de5adea6c7" -version = "0.1.23" +version = "1.10.0" [deps.SimpleNonlinearSolve.extensions] - SimpleNonlinearSolveNNlibExt = "NNlib" + SimpleNonlinearSolveChainRulesCoreExt = "ChainRulesCore" + SimpleNonlinearSolveReverseDiffExt = "ReverseDiff" + SimpleNonlinearSolveTrackerExt = "Tracker" + SimpleNonlinearSolveZygoteExt = "Zygote" [deps.SimpleNonlinearSolve.weakdeps] - NNlib = "872c559c-99b0-510c-b3b7-b6c96a88d5cd" + ChainRulesCore = "d360d2e6-b24c-11e9-a2a3-2a2ae2dbcce4" + ReverseDiff = "37e2e3b7-166d-5795-8a7a-e32c996b4267" + Tracker = "9f7883ad-71c0-57eb-9f7f-b5c9e6d3789c" + Zygote = "e88e6eb3-aa80-5325-afca-941959d7151f" [[deps.SimpleTraits]] deps = ["InteractiveUtils", "MacroTools"] @@ -1472,41 +1690,46 @@ git-tree-sha1 = "58e6353e72cde29b90a69527e56df1b5c3d8c437" uuid = "ce78b400-467f-4804-87d8-8f486da07d0a" version = "1.1.0" -[[deps.SnoopPrecompile]] -deps = ["Preferences"] -git-tree-sha1 = "e760a70afdcd461cf01a575947738d359234665c" -uuid = "66db9d55-30c0-4569-8b51-7e840670fc0c" -version = "1.0.3" - [[deps.Sockets]] uuid = "6462fe0b-24de-5631-8697-dd941f90decc" [[deps.SortingAlgorithms]] deps = ["DataStructures"] -git-tree-sha1 = "5165dfb9fd131cf0c6957a3a7605dede376e7b63" +git-tree-sha1 = "66e0a8e672a0bdfca2c3f5937efb8538b9ddc085" uuid = "a2af1166-a08f-5f64-846c-94a0d3cef48c" -version = "1.2.0" +version = "1.2.1" [[deps.SparseArrays]] deps = ["Libdl", "LinearAlgebra", "Random", "Serialization", "SuiteSparse_jll"] uuid = "2f01184e-e22b-5df5-ae63-d93ebab69eaf" +version = "1.10.0" [[deps.SparseDiffTools]] deps = ["ADTypes", "Adapt", "ArrayInterface", "Compat", "DataStructures", "FiniteDiff", "ForwardDiff", "Graphs", "LinearAlgebra", "PackageExtensionCompat", "Random", "Reexport", "SciMLOperators", "Setfield", "SparseArrays", "StaticArrayInterface", "StaticArrays", "Tricks", "UnPack", "VertexSafeGraphs"] -git-tree-sha1 = "336fd944a1bbb8873bfa8171387608ca93317d68" +git-tree-sha1 = "469f51f8c4741ce944be2c0b65423b518b1405b0" uuid = "47a9eef4-7e08-11e9-0b38-333d64bd3804" -version = "2.8.0" +version = "2.19.0" [deps.SparseDiffTools.extensions] SparseDiffToolsEnzymeExt = "Enzyme" + SparseDiffToolsPolyesterExt = "Polyester" + SparseDiffToolsPolyesterForwardDiffExt = "PolyesterForwardDiff" SparseDiffToolsSymbolicsExt = "Symbolics" SparseDiffToolsZygoteExt = "Zygote" [deps.SparseDiffTools.weakdeps] Enzyme = "7da242da-08ed-463a-9acd-ee780be4f1d9" + Polyester = "f517fe37-dbe3-4b94-8317-1923a5111588" + PolyesterForwardDiff = "98d1487c-24ca-40b6-b7ab-df2af84e126b" Symbolics = "0c5d862f-8b57-4792-8d23-62f2024744c7" Zygote = "e88e6eb3-aa80-5325-afca-941959d7151f" +[[deps.SparseMatrixColorings]] +deps = ["ADTypes", "Compat", "DocStringExtensions", "LinearAlgebra", "Random", "SparseArrays"] +git-tree-sha1 = "eed2446b3c3dd58f6ded3168998b8b2cb3fc9229" +uuid = "0a514795-09f3-496d-8182-132a7b665d35" +version = "0.3.3" + [[deps.Sparspak]] deps = ["Libdl", "LinearAlgebra", "Logging", "OffsetArrays", "Printf", "SparseArrays", "Test"] git-tree-sha1 = "342cf4b449c299d8d1ceaf00b7a49f4fbc7940e7" @@ -1515,9 +1738,9 @@ version = "0.3.9" [[deps.SpecialFunctions]] deps = ["IrrationalConstants", "LogExpFunctions", "OpenLibm_jll", "OpenSpecFun_jll"] -git-tree-sha1 = "e2cfc4012a19088254b3950b85c3c1d8882d864d" +git-tree-sha1 = "2f5d4697f21388cbe1ff299430dd169ef97d7e14" uuid = "276daf66-3868-5448-9aa4-cd146d93841b" -version = "2.3.1" +version = "2.4.0" weakdeps = ["ChainRulesCore"] [deps.SpecialFunctions.extensions] @@ -1525,15 +1748,15 @@ weakdeps = ["ChainRulesCore"] [[deps.Static]] deps = ["IfElse"] -git-tree-sha1 = "f295e0a1da4ca425659c57441bcb59abb035a4bc" +git-tree-sha1 = "d2fdac9ff3906e27f7a618d47b676941baa6c80c" uuid = "aedffcd0-7271-4cad-89d0-dc628f76c6d3" -version = "0.8.8" +version = "0.8.10" [[deps.StaticArrayInterface]] deps = ["ArrayInterface", "Compat", "IfElse", "LinearAlgebra", "PrecompileTools", "Requires", "SparseArrays", "Static", "SuiteSparse"] -git-tree-sha1 = "03fec6800a986d191f64f5c0996b59ed526eda25" +git-tree-sha1 = "5d66818a39bb04bf328e92bc933ec5b4ee88e436" uuid = "0d7ed370-da01-4f52-bd93-41d350b8b718" -version = "1.4.1" +version = "1.5.0" weakdeps = ["OffsetArrays", "StaticArrays"] [deps.StaticArrayInterface.extensions] @@ -1541,24 +1764,25 @@ weakdeps = ["OffsetArrays", "StaticArrays"] StaticArrayInterfaceStaticArraysExt = "StaticArrays" [[deps.StaticArrays]] -deps = ["LinearAlgebra", "Random", "StaticArraysCore"] -git-tree-sha1 = "0adf069a2a490c47273727e029371b31d44b72b2" +deps = ["LinearAlgebra", "PrecompileTools", "Random", "StaticArraysCore"] +git-tree-sha1 = "6e00379a24597be4ae1ee6b2d882e15392040132" uuid = "90137ffa-7385-5640-81b9-e52037218182" -version = "1.6.5" -weakdeps = ["Statistics"] +version = "1.9.5" +weakdeps = ["ChainRulesCore", "Statistics"] [deps.StaticArrays.extensions] + StaticArraysChainRulesCoreExt = "ChainRulesCore" StaticArraysStatisticsExt = "Statistics" [[deps.StaticArraysCore]] -git-tree-sha1 = "36b3d696ce6366023a0ea192b4cd442268995a0d" +git-tree-sha1 = "192954ef1208c7019899fbf8049e717f92959682" uuid = "1e83bf80-4336-4d27-bf5d-d5a4f845583c" -version = "1.4.2" +version = "1.4.3" [[deps.Statistics]] deps = ["LinearAlgebra", "SparseArrays"] uuid = "10745b16-79ce-11e8-11f9-7d13ad32a3b2" -version = "1.9.0" +version = "1.10.0" [[deps.StatsAPI]] deps = ["LinearAlgebra"] @@ -1568,15 +1792,15 @@ version = "1.7.0" [[deps.StatsBase]] deps = ["DataAPI", "DataStructures", "LinearAlgebra", "LogExpFunctions", "Missings", "Printf", "Random", "SortingAlgorithms", "SparseArrays", "Statistics", "StatsAPI"] -git-tree-sha1 = "1d77abd07f617c4868c33d4f5b9e1dbb2643c9cf" +git-tree-sha1 = "5cf7606d6cef84b543b483848d4ae08ad9832b21" uuid = "2913bbd2-ae8a-5f71-8c99-4fb6c76f3a91" -version = "0.34.2" +version = "0.34.3" [[deps.StrideArraysCore]] -deps = ["ArrayInterface", "CloseOpenIntervals", "IfElse", "LayoutPointers", "ManualMemory", "SIMDTypes", "Static", "StaticArrayInterface", "ThreadingUtilities"] -git-tree-sha1 = "f02eb61eb5c97b48c153861c72fbbfdddc607e06" +deps = ["ArrayInterface", "CloseOpenIntervals", "IfElse", "LayoutPointers", "LinearAlgebra", "ManualMemory", "SIMDTypes", "Static", "StaticArrayInterface", "ThreadingUtilities"] +git-tree-sha1 = "25349bf8f63aa36acbff5e3550a86e9f5b0ef682" uuid = "7792a7ef-975c-4747-a70f-980b88e8d1da" -version = "0.4.17" +version = "0.5.6" [[deps.StringEncodings]] deps = ["Libiconv_jll"] @@ -1595,15 +1819,15 @@ deps = ["Libdl", "LinearAlgebra", "Serialization", "SparseArrays"] uuid = "4607b0f0-06f3-5cda-b6b1-a6196a1729e9" [[deps.SuiteSparse_jll]] -deps = ["Artifacts", "Libdl", "Pkg", "libblastrampoline_jll"] +deps = ["Artifacts", "Libdl", "libblastrampoline_jll"] uuid = "bea87d4a-7f5b-5778-9afe-8cc45184846c" -version = "5.10.1+6" +version = "7.2.1+1" [[deps.SymbolicIndexingInterface]] -deps = ["DocStringExtensions"] -git-tree-sha1 = "f8ab052bfcbdb9b48fad2c80c873aa0d0344dfe5" +deps = ["Accessors", "ArrayInterface", "RuntimeGeneratedFunctions", "StaticArraysCore"] +git-tree-sha1 = "a5f6f138b740c9d93d76f0feddd3092e6ef002b7" uuid = "2efcf032-c050-4f8e-a9bb-153293bab1f5" -version = "0.2.2" +version = "0.3.22" [[deps.TOML]] deps = ["Dates"] @@ -1635,9 +1859,9 @@ version = "0.1.1" [[deps.Tensors]] deps = ["ForwardDiff", "LinearAlgebra", "PrecompileTools", "SIMD", "StaticArrays", "Statistics"] -git-tree-sha1 = "3b0c974579e89b0dd35a6ee6a9f10caf5e304d6c" +git-tree-sha1 = "957f256fb380cad64cae4da39e562ecfb5c3fec9" uuid = "48a634ad-e948-5137-8d70-aa71f2a747f4" -version = "1.16.0" +version = "1.16.1" [[deps.Test]] deps = ["InteractiveUtils", "Logging", "Random", "Serialization"] @@ -1651,21 +1875,24 @@ version = "0.5.2" [[deps.TimerOutputs]] deps = ["ExprTools", "Printf"] -git-tree-sha1 = "f548a9e9c490030e545f72074a41edfd0e5bcdd7" +git-tree-sha1 = "5a13ae8a41237cff5ecf34f73eb1b8f42fff6531" uuid = "a759f4b9-e2f1-59dc-863e-4aeb61b1ea8f" -version = "0.5.23" +version = "0.5.24" [[deps.TranscodingStreams]] -deps = ["Random", "Test"] -git-tree-sha1 = "9a6ae7ed916312b41236fcef7e0af564ef934769" +git-tree-sha1 = "d73336d81cafdc277ff45558bb7eaa2b04a8e472" uuid = "3bb67fe8-82b1-5028-8e26-92a6c54297fa" -version = "0.9.13" +version = "0.10.10" +weakdeps = ["Random", "Test"] + + [deps.TranscodingStreams.extensions] + TestExt = ["Test", "Random"] [[deps.TriangularSolve]] deps = ["CloseOpenIntervals", "IfElse", "LayoutPointers", "LinearAlgebra", "LoopVectorization", "Polyester", "Static", "VectorizationBase"] -git-tree-sha1 = "31eedbc0b6d07c08a700e26d31298ac27ef330eb" +git-tree-sha1 = "66c68a20907800c0b7c04ff8a6164115e8747de2" uuid = "d5829a12-d9aa-46ab-831f-fb7c9ab06edf" -version = "0.1.19" +version = "0.2.0" [[deps.Tricks]] git-tree-sha1 = "eae1bb484cd63b36999ee58be2de6c178105112f" @@ -1703,18 +1930,15 @@ version = "0.4.1" [[deps.Unitful]] deps = ["Dates", "LinearAlgebra", "Random"] -git-tree-sha1 = "a72d22c7e13fe2de562feda8645aa134712a87ee" +git-tree-sha1 = "dd260903fdabea27d9b6021689b3cd5401a57748" uuid = "1986cc42-f94f-5a68-af5c-568840ba703d" -version = "1.17.0" +version = "1.20.0" +weakdeps = ["ConstructionBase", "InverseFunctions"] [deps.Unitful.extensions] ConstructionBaseUnitfulExt = "ConstructionBase" InverseFunctionsUnitfulExt = "InverseFunctions" - [deps.Unitful.weakdeps] - ConstructionBase = "187b0558-2788-49d3-abe0-74a17ed4e7c9" - InverseFunctions = "3587e190-3f89-42d0-90ee-14403ec27112" - [[deps.UnitfulLatexify]] deps = ["LaTeXStrings", "Latexify", "Unitful"] git-tree-sha1 = "e2d817cc500e960fdbafcf988ac8436ba3208bfd" @@ -1733,9 +1957,9 @@ version = "1.0.1" [[deps.VectorizationBase]] deps = ["ArrayInterface", "CPUSummary", "HostCPUFeatures", "IfElse", "LayoutPointers", "Libdl", "LinearAlgebra", "SIMDTypes", "Static", "StaticArrayInterface"] -git-tree-sha1 = "b182207d4af54ac64cbc71797765068fdeff475d" +git-tree-sha1 = "e863582a41c5731f51fd050563ae91eb33cf09be" uuid = "3d5dd08c-fd9d-11e8-17fa-ed2836048c2f" -version = "0.21.64" +version = "0.21.68" [[deps.VertexSafeGraphs]] deps = ["Graphs"] @@ -1757,21 +1981,21 @@ version = "1.21.0+1" [[deps.Wayland_protocols_jll]] deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] -git-tree-sha1 = "4528479aa01ee1b3b4cd0e6faef0e04cf16466da" +git-tree-sha1 = "93f43ab61b16ddfb2fd3bb13b3ce241cafb0e6c9" uuid = "2381bf8a-dfd0-557d-9999-79630e7b1b91" -version = "1.25.0+0" +version = "1.31.0+0" [[deps.WriteVTK]] deps = ["Base64", "CodecZlib", "FillArrays", "LightXML", "TranscodingStreams", "VTKBase"] -git-tree-sha1 = "41f0dc2a8f6fd860c266b91fd5cdf4fead65ae69" +git-tree-sha1 = "46664bb833f24e4fe561192e3753c9168c3b71b2" uuid = "64499a7a-5c06-52f2-abe2-ccb03c286192" -version = "1.18.1" +version = "1.19.2" [[deps.XML2_jll]] deps = ["Artifacts", "JLLWrappers", "Libdl", "Libiconv_jll", "Zlib_jll"] -git-tree-sha1 = "24b81b59bd35b3c42ab84fa589086e19be919916" +git-tree-sha1 = "52ff2af32e591541550bd753c0da8b9bc92bb9d9" uuid = "02c8fc9c-b97f-50b9-bbe4-9be30ff0a78a" -version = "2.11.5+0" +version = "2.12.7+0" [[deps.XSLT_jll]] deps = ["Artifacts", "JLLWrappers", "Libdl", "Libgcrypt_jll", "Libgpg_error_jll", "Libiconv_jll", "Pkg", "XML2_jll", "Zlib_jll"] @@ -1781,21 +2005,21 @@ version = "1.1.34+0" [[deps.XZ_jll]] deps = ["Artifacts", "JLLWrappers", "Libdl"] -git-tree-sha1 = "cf2c7de82431ca6f39250d2fc4aacd0daa1675c0" +git-tree-sha1 = "ac88fb95ae6447c8dda6a5503f3bafd496ae8632" uuid = "ffd25f8a-64ca-5728-b0f7-c24cf3aae800" -version = "5.4.4+0" +version = "5.4.6+0" [[deps.Xorg_libICE_jll]] -deps = ["Libdl", "Pkg"] -git-tree-sha1 = "e5becd4411063bdcac16be8b66fc2f9f6f1e8fe5" +deps = ["Artifacts", "JLLWrappers", "Libdl"] +git-tree-sha1 = "326b4fea307b0b39892b3e85fa451692eda8d46c" uuid = "f67eecfb-183a-506d-b269-f58e52b52d7c" -version = "1.0.10+1" +version = "1.1.1+0" [[deps.Xorg_libSM_jll]] -deps = ["Libdl", "Pkg", "Xorg_libICE_jll"] -git-tree-sha1 = "4a9d9e4c180e1e8119b5ffc224a7b59d3a7f7e18" +deps = ["Artifacts", "JLLWrappers", "Libdl", "Xorg_libICE_jll"] +git-tree-sha1 = "3796722887072218eabafb494a13c963209754ce" uuid = "c834827a-8449-5923-a945-d239c165b7dd" -version = "1.2.3+0" +version = "1.2.4+0" [[deps.Xorg_libX11_jll]] deps = ["Artifacts", "JLLWrappers", "Libdl", "Xorg_libxcb_jll", "Xorg_xtrans_jll"] @@ -1822,10 +2046,10 @@ uuid = "a3789734-cfe1-5b06-b2d0-1dd0d9d62d05" version = "1.1.4+0" [[deps.Xorg_libXext_jll]] -deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Xorg_libX11_jll"] -git-tree-sha1 = "b7c0aa8c376b31e4852b360222848637f481f8c3" +deps = ["Artifacts", "JLLWrappers", "Libdl", "Xorg_libX11_jll"] +git-tree-sha1 = "d2d1a5c49fae4ba39983f63de6afcbea47194e85" uuid = "1082639a-0dae-5f34-9b06-72781eeb8cb3" -version = "1.3.4+4" +version = "1.3.6+0" [[deps.Xorg_libXfixes_jll]] deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Xorg_libX11_jll"] @@ -1858,10 +2082,10 @@ uuid = "ec84b674-ba8e-5d96-8ba1-2a689ba10484" version = "1.5.2+4" [[deps.Xorg_libXrender_jll]] -deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Xorg_libX11_jll"] -git-tree-sha1 = "19560f30fd49f4d4efbe7002a1037f8c43d43b96" +deps = ["Artifacts", "JLLWrappers", "Libdl", "Xorg_libX11_jll"] +git-tree-sha1 = "47e45cd78224c53109495b3e324df0c37bb61fbe" uuid = "ea2f1a96-1ddc-540d-b46f-429655e07cfa" -version = "0.9.10+4" +version = "0.9.11+0" [[deps.Xorg_libpthread_stubs_jll]] deps = ["Artifacts", "JLLWrappers", "Libdl"] @@ -1871,9 +2095,9 @@ version = "0.1.1+0" [[deps.Xorg_libxcb_jll]] deps = ["Artifacts", "JLLWrappers", "Libdl", "XSLT_jll", "Xorg_libXau_jll", "Xorg_libXdmcp_jll", "Xorg_libpthread_stubs_jll"] -git-tree-sha1 = "b4bfde5d5b652e22b9c790ad00af08b6d042b97d" +git-tree-sha1 = "bcd466676fef0878338c61e655629fa7bbc69d8e" uuid = "c7cfdc94-dc32-55de-ac96-5a1b8d977c5b" -version = "1.15.0+0" +version = "1.17.0+0" [[deps.Xorg_libxkbfile_jll]] deps = ["Artifacts", "JLLWrappers", "Libdl", "Xorg_libX11_jll"] @@ -1937,26 +2161,20 @@ version = "1.5.0+0" [[deps.YAML]] deps = ["Base64", "Dates", "Printf", "StringEncodings"] -git-tree-sha1 = "e6330e4b731a6af7959673621e91645eb1356884" +git-tree-sha1 = "80c3218f29cbc47111ac87e7be5e69cc05c6dd36" uuid = "ddb6d928-2868-570f-bddf-ab3f9cf99eb6" -version = "0.4.9" +version = "0.4.11" [[deps.Zlib_jll]] deps = ["Libdl"] uuid = "83775a58-1f1d-513f-b197-d71354ab007a" -version = "1.2.13+0" +version = "1.2.13+1" [[deps.Zstd_jll]] deps = ["Artifacts", "JLLWrappers", "Libdl"] -git-tree-sha1 = "49ce682769cd5de6c72dcf1b94ed7790cd08974c" +git-tree-sha1 = "e678132f07ddb5bfa46857f0d7620fb9be675d3b" uuid = "3161d3a3-bdf6-5164-811a-617609db77b4" -version = "1.5.5+0" - -[[deps.ZygoteRules]] -deps = ["ChainRulesCore", "MacroTools"] -git-tree-sha1 = "977aed5d006b840e2e40c0b48984f7463109046d" -uuid = "700de1a5-db45-46bc-99cf-38207098b444" -version = "0.2.3" +version = "1.5.6+0" [[deps.eudev_jll]] deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "gperf_jll"] @@ -1965,16 +2183,16 @@ uuid = "35ca27e7-8b34-5b7f-bca9-bdc33f59eb06" version = "3.2.9+0" [[deps.fzf_jll]] -deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] -git-tree-sha1 = "47cf33e62e138b920039e8ff9f9841aafe1b733e" +deps = ["Artifacts", "JLLWrappers", "Libdl"] +git-tree-sha1 = "a68c9655fbe6dfcab3d972808f1aafec151ce3f8" uuid = "214eeab7-80f7-51ab-84ad-2988db7cef09" -version = "0.35.1+0" +version = "0.43.0+0" [[deps.gmsh_jll]] -deps = ["Artifacts", "Cairo_jll", "CompilerSupportLibraries_jll", "FLTK_jll", "FreeType2_jll", "GLU_jll", "GMP_jll", "HDF5_jll", "JLLWrappers", "JpegTurbo_jll", "LLVMOpenMP_jll", "Libdl", "Libglvnd_jll", "METIS_jll", "MMG_jll", "OCCT_jll", "Pkg", "Xorg_libX11_jll", "Xorg_libXext_jll", "Xorg_libXfixes_jll", "Xorg_libXft_jll", "Xorg_libXinerama_jll", "Xorg_libXrender_jll", "Zlib_jll", "libpng_jll"] -git-tree-sha1 = "d4cf3bb87fa0669f569e51f6f06cd083771bab65" +deps = ["Artifacts", "Cairo_jll", "CompilerSupportLibraries_jll", "FLTK_jll", "FreeType2_jll", "GLU_jll", "GMP_jll", "HDF5_jll", "JLLWrappers", "JpegTurbo_jll", "LLVMOpenMP_jll", "Libdl", "Libglvnd_jll", "METIS_jll", "MMG_jll", "OCCT_jll", "Xorg_libX11_jll", "Xorg_libXext_jll", "Xorg_libXfixes_jll", "Xorg_libXft_jll", "Xorg_libXinerama_jll", "Xorg_libXrender_jll", "Zlib_jll", "libpng_jll"] +git-tree-sha1 = "bdc2fa0a123008ad941cabb0ad88c571e696af2e" uuid = "630162c2-fc9b-58b3-9910-8442a8a132e6" -version = "4.10.2+1" +version = "4.13.0+1" [[deps.gperf_jll]] deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] @@ -1982,11 +2200,17 @@ git-tree-sha1 = "3516a5630f741c9eecb3720b1ec9d8edc3ecc033" uuid = "1a1c6b14-54f6-533d-8383-74cd7377aa70" version = "3.1.1+0" +[[deps.libaec_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl"] +git-tree-sha1 = "46bf7be2917b59b761247be3f317ddf75e50e997" +uuid = "477f73a3-ac25-53e9-8cc3-50b2fa2566f0" +version = "1.1.2+0" + [[deps.libaom_jll]] -deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] -git-tree-sha1 = "3a2ea60308f0996d26f1e5354e10c24e9ef905d4" +deps = ["Artifacts", "JLLWrappers", "Libdl"] +git-tree-sha1 = "1827acba325fdcdf1d2647fc8d5301dd9ba43a9d" uuid = "a4ae2306-e953-59d6-aa16-d00cac43593b" -version = "3.4.0+0" +version = "3.9.0+0" [[deps.libass_jll]] deps = ["Artifacts", "Bzip2_jll", "FreeType2_jll", "FriBidi_jll", "HarfBuzz_jll", "JLLWrappers", "Libdl", "Pkg", "Zlib_jll"] @@ -1997,7 +2221,7 @@ version = "0.15.1+0" [[deps.libblastrampoline_jll]] deps = ["Artifacts", "Libdl"] uuid = "8e850b90-86db-534c-a0d3-1478176c7d93" -version = "5.8.0+0" +version = "5.8.0+1" [[deps.libevdev_jll]] deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] @@ -2018,10 +2242,10 @@ uuid = "36db933b-70db-51c0-b978-0f229ee0e533" version = "1.18.0+0" [[deps.libpng_jll]] -deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Zlib_jll"] -git-tree-sha1 = "94d180a6d2b5e55e447e2d27a29ed04fe79eb30c" +deps = ["Artifacts", "JLLWrappers", "Libdl", "Zlib_jll"] +git-tree-sha1 = "d7015d2e18a5fd9a4f47de711837e980519781a4" uuid = "b53b4c65-9356-5827-b1ea-8c7a1a84506f" -version = "1.6.38+0" +version = "1.6.43+1" [[deps.libvorbis_jll]] deps = ["Artifacts", "JLLWrappers", "Libdl", "Ogg_jll", "Pkg"] @@ -2038,12 +2262,18 @@ version = "1.1.6+0" [[deps.nghttp2_jll]] deps = ["Artifacts", "Libdl"] uuid = "8e850ede-7688-5339-a07c-302acd2aaf8d" -version = "1.48.0+0" +version = "1.52.0+1" + +[[deps.oneTBB_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl"] +git-tree-sha1 = "7d0ea0f4895ef2f5cb83645fa689e52cb55cf493" +uuid = "1317d2d5-d96f-522e-a858-c73665f53c3e" +version = "2021.12.0+0" [[deps.p7zip_jll]] deps = ["Artifacts", "Libdl"] uuid = "3f19e933-33d8-53b3-aaab-bd5110c3b7a0" -version = "17.4.0+0" +version = "17.4.0+2" [[deps.x264_jll]] deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] diff --git a/docs/Project.toml b/docs/Project.toml index 876724cc7f..4b6b129d19 100644 --- a/docs/Project.toml +++ b/docs/Project.toml @@ -20,3 +20,4 @@ StaticArrays = "90137ffa-7385-5640-81b9-e52037218182" Tensors = "48a634ad-e948-5137-8d70-aa71f2a747f4" TimerOutputs = "a759f4b9-e2f1-59dc-863e-4aeb61b1ea8f" UnPack = "3a884ed6-31ef-47d7-9d2a-63182c4928ed" +WriteVTK = "64499a7a-5c06-52f2-abe2-ccb03c286192" diff --git a/docs/clean.jl b/docs/clean.jl new file mode 100644 index 0000000000..ea0f052fef --- /dev/null +++ b/docs/clean.jl @@ -0,0 +1,20 @@ +#!/usr/bin/env julia + +# Removes all files (likely) generated by invoking docs/make.jl + +const DIR = @__DIR__ +const ARTIFACTS = String[] + +append!(ARTIFACTS, + # Untracked files in build directory + eachline(`git ls-files --other --directory $(joinpath(DIR, "build"))`), + # Untracked files in examples/tutorials/howto/gallery generated by Literate.jl + let literate_output = joinpath.(DIR, "src", ["examples", "tutorials", "howto", "gallery"]) + eachline(`git ls-files --other --directory $(literate_output)`) + end, +) + +for artifact in ARTIFACTS + @info "Removing $artifact" + rm(artifact; recursive=true, force=true) +end diff --git a/docs/download_resources.jl b/docs/download_resources.jl index c7c482f6fc..2dfbeffb2f 100644 --- a/docs/download_resources.jl +++ b/docs/download_resources.jl @@ -11,6 +11,8 @@ for (file, url) in [ "transient_heat_colorbar.svg" => "https://raw.githubusercontent.com/Ferrite-FEM/Ferrite.jl/gh-pages/assets/transient_heat_colorbar.svg", "porous_media.gif" => "https://raw.githubusercontent.com/Ferrite-FEM/Ferrite.jl/gh-pages/assets/porous_media.gif", "porous_media_0p25.inp" => "https://raw.githubusercontent.com/Ferrite-FEM/Ferrite.jl/gh-pages/assets/porous_media_0p25.inp", + "reactive_surface.gif" => "https://raw.githubusercontent.com/Ferrite-FEM/Ferrite.jl/gh-pages/assets/reactive_surface.gif", + "nsdiffeq.gif" => "https://raw.githubusercontent.com/Ferrite-FEM/Ferrite.jl/gh-pages/assets/nsdiffeq.gif", ] afile = joinpath(directory, file) if !isfile(afile) diff --git a/docs/liveserver.jl b/docs/liveserver.jl index f2c099eaa2..a589e910c1 100755 --- a/docs/liveserver.jl +++ b/docs/liveserver.jl @@ -14,12 +14,14 @@ push!(ARGS, "liveserver") # Run LiveServer.servedocs(...) import LiveServer LiveServer.servedocs(; + host = "0.0.0.0", # Documentation root where make.jl and src/ are located foldername = joinpath(repo_root, "docs"), # Extra source folder to watch for changes include_dirs = [ - # Watch the src folder so docstrings can be Revise'd + # Watch the src and ext folder so docstrings can be Revise'd joinpath(repo_root, "src"), + joinpath(repo_root, "ext"), ], skip_dirs = [ # Skip the folder where Literate.jl output is written. This is needed diff --git a/docs/logo.jl b/docs/logo.jl index 08e58b2038..bd8605f156 100755 --- a/docs/logo.jl +++ b/docs/logo.jl @@ -18,7 +18,7 @@ function ferrite_logo(; bounding_box=true, mesh=true) ## Tessalation success(`neper -T -dim 2 -n 6 -id 4 -reg 1`) ## Meshing - success(`neper -M n6-id4.tess -dim 2 -rcl 2`) + success(`neper -M n6-id4.tess -dim 2 -order 1 -rcl 2`) ## Read the mesh grid = redirect_stdout(devnull) do saved_file_to_grid("n6-id4.msh") @@ -30,12 +30,12 @@ function ferrite_logo(; bounding_box=true, mesh=true) println(io, "\\node [] (N$(i)) at $(n.x.data) {};") end colormap = Dict( - "1" => "julia-purple", - "2" => "julia-red", - "3" => "julia-red", - "4" => "julia-blue", - "5" => "julia-purple", - "6" => "julia-green" + "face1" => "julia-purple", + "face2" => "julia-red", + "face3" => "julia-red", + "face4" => "julia-blue", + "face5" => "julia-purple", + "face6" => "julia-green" ) for (setk, setv) in grid.cellsets color = colormap[setk] diff --git a/docs/make.jl b/docs/make.jl index f3a68d1689..7cdeb1852b 100644 --- a/docs/make.jl +++ b/docs/make.jl @@ -10,7 +10,10 @@ if liveserver @timeit dto "Revise.revise()" Revise.revise() end -using Documenter, DocumenterCitations, Ferrite, FerriteGmsh, FerriteMeshParser +using Documenter, DocumenterCitations, Ferrite, FerriteGmsh, FerriteMeshParser, SparseArrays, LinearAlgebra + +using BlockArrays +const FerriteBlockArrays = Base.get_extension(Ferrite, :FerriteBlockArrays) const is_ci = haskey(ENV, "GITHUB_ACTIONS") @@ -39,7 +42,7 @@ bibtex_plugin = CitationBibliography( ), sitename = "Ferrite.jl", doctest = false, - warnonly = true, + warnonly = is_ci ? false : [:cross_references], # Local build exception required for Literate's `@__NBVIEWER_ROOT_URL__` draft = liveserver, pages = Any[ "Home" => "index.md", @@ -56,26 +59,30 @@ bibtex_plugin = CitationBibliography( "tutorials/stokes-flow.md", "tutorials/porous_media.md", "tutorials/ns_vs_diffeq.md", + "tutorials/reactive_surface.md", "tutorials/linear_shell.md", "tutorials/dg_heat_equation.md", ], "Topic guides" => [ "Topic guide overview" => "topics/index.md", "topics/fe_intro.md", + "topics/reference_shapes.md", "topics/FEValues.md", "topics/degrees_of_freedom.md", + "topics/sparse_matrix.md", "topics/assembly.md", "topics/boundary_conditions.md", "topics/constraints.md", "topics/grid.md", "topics/export.md" ], - "Reference" => [ + "API reference" => [ "Reference overview" => "reference/index.md", "reference/quadrature.md", "reference/interpolations.md", "reference/fevalues.md", "reference/dofhandler.md", + "reference/sparsity_pattern.md", "reference/assembly.md", "reference/boundary_conditions.md", "reference/grid.md", @@ -96,7 +103,7 @@ bibtex_plugin = CitationBibliography( # "gallery/topology_optimization.md", # ], "devdocs/index.md", - "references.md", + "cited-literature.md", ], plugins = [ bibtex_plugin, diff --git a/docs/old_examples/README.md b/docs/old_examples/README.md index fec8fe738d..2dd9293b2b 100644 --- a/docs/old_examples/README.md +++ b/docs/old_examples/README.md @@ -1,2 +1,2 @@ The examples in this folder might be out of date. -For up to date examples, see the documentation. \ No newline at end of file +For up to date examples, see the documentation. diff --git a/docs/src/assets/custom.css b/docs/src/assets/custom.css index 1515ad1f31..1daa0f58cd 100644 --- a/docs/src/assets/custom.css +++ b/docs/src/assets/custom.css @@ -1,5 +1,9 @@ html.theme--documenter-light body div#documenter nav.docs-sidebar a.docs-logo img, -html.theme--documenter-dark body div#documenter nav.docs-sidebar a.docs-logo img { +html.theme--documenter-dark body div#documenter nav.docs-sidebar a.docs-logo img, +html.theme--catppuccin-latte body div#documenter nav.docs-sidebar a.docs-logo img, +html.theme--catppuccin-frappe body div#documenter nav.docs-sidebar a.docs-logo img, +html.theme--catppuccin-macchiato body div#documenter nav.docs-sidebar a.docs-logo img, +html.theme--catppuccin-mocha body div#documenter nav.docs-sidebar a.docs-logo img { max-height: 10rem; } div.docs-package-name { @@ -7,15 +11,3 @@ div.docs-package-name { font-weight: normal !important; font-size: 1.7rem !important; } -details.admonition.collapsible summary.admonition-header::before { - content: "\f055"; -} -details[open].admonition.collapsible summary.admonition-header::before { - content: "\f056"; -} -details.admonition.collapsible summary.admonition-header { - list-style: none; -} -details.admonition.collapsible { - background-color: inherit; -} diff --git a/docs/src/assets/references.bib b/docs/src/assets/references.bib index efbb3fd1c5..c240c43833 100644 --- a/docs/src/assets/references.bib +++ b/docs/src/assets/references.bib @@ -64,7 +64,7 @@ @phdthesis{Cenanovic2017 year={2017}, } @misc{Kirby2017, - title={A general approach to transforming finite elements}, + title={A general approach to transforming finite elements}, author={Robert C. Kirby}, year={2017}, eprint={1706.09017}, @@ -80,7 +80,7 @@ @article{SimMie:1992:act pages = {41-104}, year = {1992}, issn = {0045-7825}, -doi = {https://doi.org/10.1016/0045-7825(92)90170-O}, +doi = {10.1016/0045-7825(92)90170-O}, url = {https://www.sciencedirect.com/science/article/pii/004578259290170O}, author = {J.C. Simo and C. Miehe}, } @@ -117,9 +117,76 @@ @article{Mu:2014:IP pages = {432-440}, year = {2014}, issn = {0377-0427}, -doi = {https://doi.org/10.1016/j.cam.2013.06.003}, +doi = {10.1016/j.cam.2013.06.003}, url = {https://www.sciencedirect.com/science/article/pii/S0377042713002999}, author = {Lin Mu and Junping Wang and Yanqiu Wang and Xiu Ye}, keywords = {Discontinuous Galerkin, Finite element, Interior penalty, Second-order elliptic equations, Hybrid mesh}, abstract = {This paper provides a theoretical foundation for interior penalty discontinuous Galerkin methods for second-order elliptic equations on very general polygonal or polyhedral meshes. The mesh can be composed of any polygons or polyhedra that satisfy certain shape regularity conditions characterized in a recent paper by two of the authors, Wang and Ye (2012) [11]. The usual H1-conforming finite element methods on such meshes are either very complicated or impossible to implement in practical computation. The interior penalty discontinuous Galerkin method provides a simple and effective alternative approach which is efficient and robust. Results with such general meshes have important application in computational sciences.} } +@article{Jin:1984:sgq, + title = {Symmetric gaussian quadrature formulae for tetrahedronal regions}, + journal = {Computer Methods in Applied Mechanics and Engineering}, + volume = {43}, + number = {3}, + pages = {349-353}, + year = {1984}, + issn = {0045-7825}, + doi = {10.1016/0045-7825(84)90072-0}, + url = {https://www.sciencedirect.com/science/article/pii/0045782584900720}, + author = {Jinyun Yu}, + abstract = {Quadrature formulae of degrees 2 to 6 are presented for the numerical integration of a function over tetrahedronal regions. The formulae presented are of Gaussian type and fully symmetric with respect to the four vertices of the tetrahedron.} +} +@article{Dun:1985:hde, + title={High degree efficient symmetrical Gaussian quadrature rules for the triangle}, + author={Dunavant, D.A.}, + journal={International journal for numerical methods in engineering}, + volume={21}, + number={6}, + pages={1129--1148}, + year={1985}, + publisher={Wiley Online Library}, + doi={10.1002/nme.1620210612}, + url={https://onlinelibrary.wiley.com/doi/abs/10.1002/nme.1620210612}, +} +@article{WitVin:2015:isq, + title={On the identification of symmetric quadrature rules for finite element methods}, + author={Witherden, Freddie D and Vincent, Peter E}, + journal={Computers \& Mathematics with Applications}, + volume={69}, + number={10}, + pages={1232--1241}, + year={2015}, + publisher={Elsevier} +} +@article{Keast:1986:mtq, + title={Moderate-degree tetrahedral quadrature formulas}, + author={Keast, Patrick}, + journal={Computer methods in applied mechanics and engineering}, + volume={55}, + number={3}, + pages={339--348}, + year={1986}, + publisher={Elsevier}, + doi={10.1016/0045-7825(86)90059-9}, + url={https://www.sciencedirect.com/science/article/pii/0045782586900599}, +} +@article{RanTur:1992:snq, + title={Simple nonconforming quadrilateral Stokes element}, + author={Rannacher, Rolf and Turek, Stefan}, + journal={Numerical Methods for Partial Differential Equations}, + volume={8}, + number={2}, + pages={97--111}, + year={1992}, + publisher={Wiley Online Library} +} +@article{CroRav:1973:cnf, + title={Conforming and nonconforming finite element methods for solving the stationary Stokes equations I}, + author={Crouzeix, Michel and Raviart, P-A}, + journal={Revue fran{\c{c}}aise d'automatique informatique recherche op{\'e}rationnelle. Math{\'e}matique}, + volume={7}, + number={R3}, + pages={33--75}, + year={1973}, + publisher={EDP Sciences} +} diff --git a/docs/src/references.md b/docs/src/cited-literature.md similarity index 91% rename from docs/src/references.md rename to docs/src/cited-literature.md index 4c502ec27a..7cb4edbf20 100644 --- a/docs/src/references.md +++ b/docs/src/cited-literature.md @@ -1,4 +1,4 @@ -# References +# Cited literature ```@bibliography ``` diff --git a/docs/src/devdocs/FEValues.md b/docs/src/devdocs/FEValues.md index 06bc778058..e329e25c86 100644 --- a/docs/src/devdocs/FEValues.md +++ b/docs/src/devdocs/FEValues.md @@ -4,11 +4,12 @@ * `AbstractValues` * `AbstractCellValues` * [`CellValues`](@ref) - * `AbstractFaceValues` - * [`FaceValues`](@ref) - * [`BCValues`](@ref) + * `AbstractFacetValues` + * [`FacetValues`](@ref) + * [`BCValues`](@ref Ferrite.BCValues) * [`PointValues`](@ref) - + * [`InterfaceValues`](@ref) + ## Internal types ```@docs @@ -18,9 +19,17 @@ Ferrite.FunctionValues Ferrite.BCValues ``` +## Internal utilities +```@docs +Ferrite.embedding_det +Ferrite.shape_value_type +Ferrite.shape_gradient_type +Ferrite.ValuesUpdateFlags +``` + ## Custom FEValues Custom FEValues, `fe_v::AbstractValues`, should normally implement the [`reinit!`](@ref) method. Subtypes of `AbstractValues` have default implementations for some functions, but require some lower-level access functions, specifically - + * [`function_value`](@ref), requires * [`shape_value`](@ref) * [`getnquadpoints`](@ref) @@ -30,10 +39,10 @@ Custom FEValues, `fe_v::AbstractValues`, should normally implement the [`reinit! * [`getnquadpoints`](@ref) * [`getnbasefunctions`](@ref) * [`spatial_coordinate`](@ref), requires - * [`geometric_value`](@ref) - * [`getngeobasefunctions`](@ref) + * [`geometric_value`](@ref Ferrite.geometric_value) + * `getngeobasefunctions` * [`getnquadpoints`](@ref) - + ### Array bounds * Asking for the `n`th quadrature point must be inside array bounds if `1 <= n <= getnquadpoints(fe_v)`. (`checkquadpoint` can, alternatively, be dispatched to check that `n` is inbounds.) diff --git a/docs/src/devdocs/dofhandler.md b/docs/src/devdocs/dofhandler.md index 96a5cd6c04..fd28e81c71 100644 --- a/docs/src/devdocs/dofhandler.md +++ b/docs/src/devdocs/dofhandler.md @@ -3,9 +3,9 @@ ## Type definitions Dof handlers are subtypes of `AbstractDofhandler{sdim}`, i.e. they are -parametrized by the spatial dimension. Internally a helper struct [`InterpolationInfo`](@ref) is utilized to enforce type stability during -dof distribution, because the interpolations are not available as concrete -types. +parametrized by the spatial dimension. Internally a helper struct +[`InterpolationInfo`](@ref Ferrite.InterpolationInfo) is utilized to enforce type stability +during dof distribution, because the interpolations are not available as concrete types. ```@docs Ferrite.InterpolationInfo @@ -16,15 +16,14 @@ Ferrite.SurfaceOrientationInfo ## Internal API -The main entry point for dof distribution is [`__close!`](@ref). +The main entry point for dof distribution is [`__close!`](@ref Ferrite.__close!). ```@docs Ferrite.__close! Ferrite.get_grid -Ferrite.find_field(dh::DofHandler, field_name::Symbol) +Ferrite.find_field +Ferrite._find_field Ferrite._close_subdofhandler! Ferrite._distribute_dofs_for_cell! Ferrite.permute_and_push! -Ferrite.cross_element_coupling! -Ferrite._add_cross_coupling ``` diff --git a/docs/src/devdocs/elements.md b/docs/src/devdocs/elements.md index a719de9cc6..88c799665a 100644 --- a/docs/src/devdocs/elements.md +++ b/docs/src/devdocs/elements.md @@ -2,41 +2,44 @@ ## Type definitions -Elements or cells are subtypes of `AbstractCell{dim,N,M}`. They are parametrized by -the dimension of their nodes via `dim`, the number of nodes `N` and the number -of faces `M`. +Elements or cells are subtypes of `AbstractCell{<:AbstractRefShape}`. As shown, they are parametrized +by the associated reference element. ### Required methods to implement for all subtypes of `AbstractCell` to define a new element +```@docs +Ferrite.get_node_ids +``` + +### Common utilities and definitions when working with grids internally. + +First we have some topological queries on the element + ```@docs Ferrite.vertices(::Ferrite.AbstractCell) Ferrite.edges(::Ferrite.AbstractCell) -Ferrite.reference_faces(::Ferrite.AbstractRefShape) Ferrite.faces(::Ferrite.AbstractCell) -Ferrite.default_interpolation(::Ferrite.AbstractCell) +Ferrite.facets(::Ferrite.AbstractCell) +Ferrite.boundaryfunction(::Type{<:Ferrite.BoundaryIndex}) +Ferrite.reference_vertices(::Ferrite.AbstractCell) +Ferrite.reference_edges(::Ferrite.AbstractCell) +Ferrite.reference_faces(::Ferrite.AbstractCell) ``` -### Common utilities and definitions when working with grids internally. +and some generic utils which are commonly found in finite element codes ```@docs Ferrite.BoundaryIndex -Ferrite.boundaryfunction(::Type{<:Ferrite.BoundaryIndex}) Ferrite.get_coordinate_eltype(::Ferrite.AbstractGrid) Ferrite.get_coordinate_eltype(::Node) Ferrite.toglobal Ferrite.sortface +Ferrite.sortface_fast Ferrite.sortedge -Ferrite.getfaceedges -Ferrite.getfacevertices -Ferrite.getedgevertices -Ferrite.getfaceinstances -Ferrite.getedgeinstances -Ferrite.getvertexinstances -Ferrite.filterfaces -Ferrite.filteredges -Ferrite.filtervertices -Ferrite.element_to_face_transformation -Ferrite.face_to_element_transformation +Ferrite.sortedge_fast +Ferrite.element_to_facet_transformation +Ferrite.facet_to_element_transformation Ferrite.InterfaceOrientationInfo Ferrite.transform_interface_points! +Ferrite.get_transformation_matrix ``` diff --git a/docs/src/devdocs/index.md b/docs/src/devdocs/index.md index 9c16b83d7c..bfd680dcc8 100644 --- a/docs/src/devdocs/index.md +++ b/docs/src/devdocs/index.md @@ -5,5 +5,5 @@ developing the library. ```@contents Depth = 1 -Pages = ["reference_cells.md", "interpolations.md", "elements.md", "FEValues.md", "dofhandler.md", "performance.md"] +Pages = ["reference_cells.md", "interpolations.md", "elements.md", "FEValues.md", "dofhandler.md", "performance.md", "special_datastructures.md"] ``` diff --git a/docs/src/devdocs/interpolations.md b/docs/src/devdocs/interpolations.md index 540d288280..22c45ac3ef 100644 --- a/docs/src/devdocs/interpolations.md +++ b/docs/src/devdocs/interpolations.md @@ -8,16 +8,17 @@ parametrized by the reference element and its characteristic order. ### Fallback methods applicable for all subtypes of `Interpolation` ```@docs -Ferrite.getdim(::Interpolation) Ferrite.getrefshape(::Interpolation) Ferrite.getorder(::Interpolation) -Ferrite.shape_gradient(::Interpolation, ::Vec, ::Int) -Ferrite.shape_gradient_and_value +Ferrite.reference_shape_gradient(::Interpolation, ::Vec, ::Int) +Ferrite.reference_shape_gradient_and_value(::Interpolation, ::Vec, ::Int) +Ferrite.reference_shape_hessian_gradient_and_value(::Interpolation, ::Vec, ::Int) Ferrite.boundarydof_indices Ferrite.dirichlet_boundarydof_indices -Ferrite.shape_values! -Ferrite.shape_gradients! -Ferrite.shape_gradients_and_values! +Ferrite.reference_shape_values! +Ferrite.reference_shape_gradients! +Ferrite.reference_shape_gradients_and_values! +Ferrite.reference_shape_hessians_gradients_and_values! ``` ### Required methods to implement for all subtypes of `Interpolation` to define a new finite element @@ -25,7 +26,7 @@ Ferrite.shape_gradients_and_values! Depending on the dimension of the reference element the following functions have to be implemented ```@docs -Ferrite.shape_value(::Interpolation, ::Vec, ::Int) +Ferrite.reference_shape_value(::Interpolation, ::Vec, ::Int) Ferrite.vertexdof_indices(::Interpolation) Ferrite.dirichlet_vertexdof_indices(::Interpolation) Ferrite.facedof_indices(::Interpolation) @@ -34,7 +35,7 @@ Ferrite.facedof_interior_indices(::Interpolation) Ferrite.edgedof_indices(::Interpolation) Ferrite.dirichlet_edgedof_indices(::Interpolation) Ferrite.edgedof_interior_indices(::Interpolation) -Ferrite.celldof_interior_indices(::Interpolation) +Ferrite.volumedof_interior_indices(::Interpolation) Ferrite.getnbasefunctions(::Interpolation) Ferrite.reference_coordinates(::Interpolation) Ferrite.is_discontinuous(::Interpolation) @@ -43,7 +44,7 @@ Ferrite.mapping_type ``` for all entities which exist on that reference element. The dof functions default to having no -dofs defined on a specific entity. Hence, not overloading of the dof functions will result in an -element with zero dofs. Also, it should always be double checked that everything is consistent as +dofs defined on a specific entity. Hence, not overloading of the dof functions will result in an +element with zero dofs. Also, it should always be double checked that everything is consistent as specified in the docstring of the corresponding function, as inconsistent implementations can lead to bugs which are really difficult to track down. diff --git a/docs/src/devdocs/reference_cells.md b/docs/src/devdocs/reference_cells.md index e48e99264c..cda4889eac 100644 --- a/docs/src/devdocs/reference_cells.md +++ b/docs/src/devdocs/reference_cells.md @@ -15,3 +15,23 @@ Ferrite.RefTetrahedron Ferrite.RefHexahedron Ferrite.RefPrism ``` + +### Required methods to implement for all subtypes of `AbstractRefShape` to define a new reference shape + +```@docs +Ferrite.reference_vertices(::Type{<:Ferrite.AbstractRefShape}) +Ferrite.reference_edges(::Type{<:Ferrite.AbstractRefShape}) +Ferrite.reference_faces(::Type{<:Ferrite.AbstractRefShape}) +``` + +which automatically defines + + +```@docs +Ferrite.reference_facets(::Type{<:Ferrite.AbstractRefShape}) +``` + +### Applicable methods to `AbstractRefShape`s +```@docs +Ferrite.getrefdim(::Type{<:Ferrite.AbstractRefShape}) +``` diff --git a/docs/src/devdocs/special_datastructures.md b/docs/src/devdocs/special_datastructures.md new file mode 100644 index 0000000000..e642e5a1be --- /dev/null +++ b/docs/src/devdocs/special_datastructures.md @@ -0,0 +1,16 @@ +# Special data structures + +## `ArrayOfVectorViews` +`ArrayOfVectorViews` is a data structure representing an `Array` of +vector views (specifically `SubArray{T, 1} where T`). By arranging all +data (of type `T`) continuously in memory, this will significantly reduce +the garbage collection time compared to using an `Array{AbstractVector{T}}`. While the data in each view can be mutated, the length of each view is +fixed after construction. +This data structure provides two features not provided by `ArraysOfArrays.jl`: Support of matrices and higher order arrays for storing vectors +of different dimensions and efficient construction when the number of elements in each view is not known in advance. + +```@docs +Ferrite.ArrayOfVectorViews +Ferrite.ConstructionBuffer +Ferrite.push_at_index! +``` diff --git a/docs/src/literate-gallery/helmholtz.jl b/docs/src/literate-gallery/helmholtz.jl index 6f8faa6de0..bd48a17a59 100644 --- a/docs/src/literate-gallery/helmholtz.jl +++ b/docs/src/literate-gallery/helmholtz.jl @@ -15,7 +15,7 @@ # ```math # n \cdot \nabla u = g_2 \quad x \in \Gamma_2 # ``` -# +# # Here Γ₁ is the union of the top and the right boundary of the square, # while Γ₂ is the union of the bottom and the left boundary. # @@ -55,9 +55,9 @@ grid = generate_grid(Quadrilateral, (150, 150)) ip = Lagrange{RefQuadrilateral, 1}() qr = QuadratureRule{RefQuadrilateral}(2) -qr_face = FaceQuadratureRule{RefQuadrilateral}(2) +qr_facet = FacetQuadratureRule{RefQuadrilateral}(2) cellvalues = CellValues(qr, ip); -facevalues = FaceValues(qr_face, ip); +facetvalues = FacetValues(qr_facet, ip); dh = DofHandler(grid) add!(dh, :u, ip) @@ -80,14 +80,14 @@ end; dbcs = ConstraintHandler(dh) # The (strong) Dirichlet boundary condition can be handled automatically by the Ferrite library. -dbc = Dirichlet(:u, union(getfaceset(grid, "top"), getfaceset(grid, "right")), (x,t) -> u_ana(x)) +dbc = Dirichlet(:u, union(getfacetset(grid, "top"), getfacetset(grid, "right")), (x,t) -> u_ana(x)) add!(dbcs, dbc) close!(dbcs) update!(dbcs, 0.0) -K = create_sparsity_pattern(dh); +K = allocate_matrix(dh); -function doassemble(cellvalues::CellValues, facevalues::FaceValues, +function doassemble(cellvalues::CellValues, facetvalues::FacetValues, K::SparseMatrixCSC, dh::DofHandler) b = 1.0 f = zeros(ndofs(dh)) @@ -134,37 +134,36 @@ function doassemble(cellvalues::CellValues, facevalues::FaceValues, # \int_{\Gamma_2} δu g_2 \, d\Gamma # ``` #+ - for face in 1:nfaces(cell) - if onboundary(cell, face) && - ((cellcount, face) ∈ getfaceset(grid, "left") || - (cellcount, face) ∈ getfaceset(grid, "bottom")) - reinit!(facevalues, cell, face) - for q_point in 1:getnquadpoints(facevalues) - coords_qp = spatial_coordinate(facevalues, q_point, coords) - n = getnormal(facevalues, q_point) + for facet in 1:nfacets(cell) + if (cellcount, facet) ∈ getfacetset(grid, "left") || + (cellcount, facet) ∈ getfacetset(grid, "bottom") + reinit!(facetvalues, cell, facet) + for q_point in 1:getnquadpoints(facetvalues) + coords_qp = spatial_coordinate(facetvalues, q_point, coords) + n = getnormal(facetvalues, q_point) g_2 = gradient(u_ana, coords_qp) ⋅ n - dΓ = getdetJdV(facevalues, q_point) + dΓ = getdetJdV(facetvalues, q_point) for i in 1:n_basefuncs - δu = shape_value(facevalues, q_point, i) + δu = shape_value(facetvalues, q_point, i) fe[i] += (δu * g_2) * dΓ end end end end - + celldofs!(global_dofs, cell) assemble!(assembler, global_dofs, fe, Ke) end return K, f end; -K, f = doassemble(cellvalues, facevalues, K, dh); +K, f = doassemble(cellvalues, facetvalues, K, dh); apply!(K, f, dbcs) u = Symmetric(K) \ f; -vtkfile = vtk_grid("helmholtz", dh) -vtk_point_data(vtkfile, dh, u) -vtk_save(vtkfile) +vtk = VTKGridFile("helmholtz", dh) +write_solution(vtk, dh, u) +close(vtk) using Test #src #src this test catches unexpected changes in the result over time. #src the true maximum is slightly bigger then 1.0 diff --git a/docs/src/literate-gallery/landau.jl b/docs/src/literate-gallery/landau.jl index e28eb75f8e..cb0919e420 100644 --- a/docs/src/literate-gallery/landau.jl +++ b/docs/src/literate-gallery/landau.jl @@ -68,7 +68,7 @@ function ThreadCache(dpc::Int, nodespercell, cvP::CellValues, modelparams, elpot element_coords = zeros(Vec{3, Float64}, nodespercell) potfunc = x -> elpotential(x, cvP, modelparams) gradconf = GradientConfig(potfunc, zeros(dpc), Chunk{12}()) - hessconf = HessianConfig(potfunc, zeros(dpc), Chunk{12}()) + hessconf = HessianConfig(potfunc, zeros(dpc), Chunk{4}()) return ThreadCache(cvP, element_indices, element_dofs, element_gradient, element_hessian, element_coords, potfunc, gradconf, hessconf) end @@ -98,14 +98,14 @@ function LandauModel(α, G, gridsize, left::Vec{DIM, T}, right::Vec{DIM, T}, elp startingconditions!(dofvector, dofhandler) boundaryconds = ConstraintHandler(dofhandler) #boundary conditions can be added but aren't necessary for optimization - #add!(boundaryconds, Dirichlet(:P, getfaceset(grid, "left"), (x, t) -> [0.0,0.0,0.53], [1,2,3])) - #add!(boundaryconds, Dirichlet(:P, getfaceset(grid, "right"), (x, t) -> [0.0,0.0,-0.53], [1,2,3])) + #add!(boundaryconds, Dirichlet(:P, getfacetset(grid, "left"), (x, t) -> [0.0,0.0,0.53], [1,2,3])) + #add!(boundaryconds, Dirichlet(:P, getfacetset(grid, "right"), (x, t) -> [0.0,0.0,-0.53], [1,2,3])) close!(boundaryconds) update!(boundaryconds, 0.0) apply!(dofvector, boundaryconds) - hessian = create_sparsity_pattern(dofhandler) + hessian = allocate_matrix(dofhandler) dpc = ndofs_per_cell(dofhandler) cpc = length(grid.cells[1].nodes) caches = [ThreadCache(dpc, cpc, copy(cvP), ModelParams(α, G), elpotential) for t=1:nthreads()] @@ -113,10 +113,10 @@ function LandauModel(α, G, gridsize, left::Vec{DIM, T}, right::Vec{DIM, T}, elp end # utility to quickly save a model -function Ferrite.vtk_save(path, model, dofs=model.dofs) - vtkfile = vtk_grid(path, model.dofhandler) - vtk_point_data(vtkfile, model.dofhandler, dofs) - vtk_save(vtkfile) +function save_landau(path, model, dofs=model.dofs) + VTKGridFile(path, model.dofhandler) do vtk + write_solution(vtk, model.dofhandler, dofs) + end end # ## Assembly @@ -193,7 +193,7 @@ function minimize!(model; kwargs...) dh = model.dofhandler dofs = model.dofs ∇f = fill(0.0, length(dofs)) - ∇²f = create_sparsity_pattern(dh) + ∇²f = allocate_matrix(dh) function g!(storage, x) ∇F!(storage, x, model) apply_zero!(storage, model.boundaryconds) @@ -253,9 +253,9 @@ left = Vec{3}((-75.,-25.,-2.)) right = Vec{3}((75.,25.,2.)) model = LandauModel(α, G, (50, 50, 2), left, right, element_potential) -vtk_save("landauorig", model) +save_landau("landauorig", model) @time minimize!(model) -vtk_save("landaufinal", model) +save_landau("landaufinal", model) # as we can see this runs very quickly even for relatively large gridsizes. # The key to get high performance like this is to minimize the allocations inside the threaded loops, diff --git a/docs/src/literate-gallery/quasi_incompressible_hyperelasticity.jl b/docs/src/literate-gallery/quasi_incompressible_hyperelasticity.jl index cc709ff59a..ef0a162b9f 100644 --- a/docs/src/literate-gallery/quasi_incompressible_hyperelasticity.jl +++ b/docs/src/literate-gallery/quasi_incompressible_hyperelasticity.jl @@ -4,13 +4,13 @@ #- #md # !!! tip #md # This example is also available as a Jupyter notebook: -#md # [`quasi_incompressible_hyperelasticity.ipynb`](@__NBVIEWER_ROOT_URL__/examples/quasi_incompressible_hyperelasticity.ipynb) +#md # [`quasi_incompressible_hyperelasticity.ipynb`](@__NBVIEWER_ROOT_URL__/gallery/quasi_incompressible_hyperelasticity.ipynb) #- # ## Introduction # # In this example we study quasi- or nearly-incompressible hyperelasticity using the stable Taylor-Hood approximation. In spirit, this example is the nonlinear analogue of -# [`incompressible_elasticity`](@__NBVIEWER_ROOT_URL__/examples/incompressible_elasticity.ipynb) and the incompressible analogue of -# [`hyperelasticity`](@__NBVIEWER_ROOT_URL__/examples/hyperelasticity.ipynb). Much of the code therefore follows from the above two examples. +# [`incompressible_elasticity`](@__NBVIEWER_ROOT_URL__/tutorials/incompressible_elasticity.ipynb) and the incompressible analogue of +# [`hyperelasticity`](@__NBVIEWER_ROOT_URL__/tutorials/hyperelasticity.ipynb). Much of the code therefore follows from the above two examples. # The problem is formulated in the undeformed or reference configuration with the displacement $\mathbf{u}$ and pressure $p$ being the unknown fields. We now briefly outline # the formulation. Consider the standard hyperelasticity problem # @@ -72,7 +72,7 @@ # ## Implementation # We now get to the actual code. First, we import the respective packages -using Ferrite, Tensors, ProgressMeter +using Ferrite, Tensors, ProgressMeter, WriteVTK using BlockArrays, SparseArrays, LinearAlgebra # and the corresponding `struct` to store our material properties. @@ -86,10 +86,10 @@ end # to later assign Dirichlet boundary conditions function importTestGrid() grid = generate_grid(Tetrahedron, (5, 5, 5), zero(Vec{3}), ones(Vec{3})); - addfaceset!(grid, "myBottom", x -> norm(x[2]) ≈ 0.0); - addfaceset!(grid, "myBack", x -> norm(x[3]) ≈ 0.0); - addfaceset!(grid, "myRight", x -> norm(x[1]) ≈ 1.0); - addfaceset!(grid, "myLeft", x -> norm(x[1]) ≈ 0.0); + addfacetset!(grid, "myBottom", x -> norm(x[2]) ≈ 0.0); + addfacetset!(grid, "myBack", x -> norm(x[3]) ≈ 0.0); + addfacetset!(grid, "myRight", x -> norm(x[1]) ≈ 1.0); + addfacetset!(grid, "myLeft", x -> norm(x[1]) ≈ 0.0); return grid end; @@ -98,16 +98,16 @@ end; function create_values(interpolation_u, interpolation_p) ## quadrature rules qr = QuadratureRule{RefTetrahedron}(4) - face_qr = FaceQuadratureRule{RefTetrahedron}(4) + facet_qr = FacetQuadratureRule{RefTetrahedron}(4) - ## cell and facevalues for u + ## cell and facetvalues for u cellvalues_u = CellValues(qr, interpolation_u) - facevalues_u = FaceValues(face_qr, interpolation_u) + facetvalues_u = FacetValues(facet_qr, interpolation_u) ## cellvalues for p cellvalues_p = CellValues(qr, interpolation_p) - return cellvalues_u, cellvalues_p, facevalues_u + return cellvalues_u, cellvalues_p, facetvalues_u end; # We now create the function for Ψ* @@ -145,10 +145,10 @@ end; # of the loading. function create_bc(dh) dbc = ConstraintHandler(dh) - add!(dbc, Dirichlet(:u, getfaceset(dh.grid, "myLeft"), (x,t) -> zero(Vec{1}), [1])) - add!(dbc, Dirichlet(:u, getfaceset(dh.grid, "myBottom"), (x,t) -> zero(Vec{1}), [2])) - add!(dbc, Dirichlet(:u, getfaceset(dh.grid, "myBack"), (x,t) -> zero(Vec{1}), [3])) - add!(dbc, Dirichlet(:u, getfaceset(dh.grid, "myRight"), (x,t) -> t*ones(Vec{1}), [1])) + add!(dbc, Dirichlet(:u, getfacetset(dh.grid, "myLeft"), (x,t) -> zero(Vec{1}), [1])) + add!(dbc, Dirichlet(:u, getfacetset(dh.grid, "myBottom"), (x,t) -> zero(Vec{1}), [2])) + add!(dbc, Dirichlet(:u, getfacetset(dh.grid, "myBack"), (x,t) -> zero(Vec{1}), [3])) + add!(dbc, Dirichlet(:u, getfacetset(dh.grid, "myRight"), (x,t) -> t*ones(Vec{1}), [1])) close!(dbc) Ferrite.update!(dbc, 0.0) return dbc @@ -252,8 +252,8 @@ function assemble_global!(K::SparseMatrixCSC, f, cellvalues_u::CellValues, np = getnbasefunctions(cellvalues_p) ## start_assemble resets K and f - fe = PseudoBlockArray(zeros(nu + np), [nu, np]) # local force vector - ke = PseudoBlockArray(zeros(nu + np, nu + np), [nu, np], [nu, np]) # local stiffness matrix + fe = BlockedArray(zeros(nu + np), [nu, np]) # local force vector + ke = BlockedArray(zeros(nu + np, nu + np), [nu, np], [nu, np]) # local stiffness matrix assembler = start_assemble(K, f) ## Loop over all cells in the grid @@ -284,7 +284,7 @@ function solve(interpolation_u, interpolation_p) ## Create the DofHandler and CellValues dh = create_dofhandler(grid, interpolation_u, interpolation_p) - cellvalues_u, cellvalues_p, facevalues_u = create_values(interpolation_u, interpolation_p) + cellvalues_u, cellvalues_p, facetvalues_u = create_values(interpolation_u, interpolation_p) ## Create the DirichletBCs dbc = create_bc(dh) @@ -296,7 +296,7 @@ function solve(interpolation_u, interpolation_p) apply!(w, dbc) ## Create the sparse matrix and residual vector - K = create_sparsity_pattern(dh) + K = allocate_matrix(dh) f = zeros(_ndofs) ## We run the simulation parameterized by a time like parameter. `Tf` denotes the final value @@ -305,8 +305,8 @@ function solve(interpolation_u, interpolation_p) Δt = 0.1; NEWTON_TOL = 1e-8 - pvd = paraview_collection("hyperelasticity_incomp_mixed.pvd"); - for t ∈ 0.0:Δt:Tf + pvd = paraview_collection("hyperelasticity_incomp_mixed"); + for (step, t) ∈ enumerate(0.0:Δt:Tf) ## Perform Newton iterations Ferrite.update!(dbc, t) apply!(w, dbc) @@ -334,13 +334,12 @@ function solve(interpolation_u, interpolation_p) end; ## Save the solution fields - vtk_grid("hyperelasticity_incomp_mixed_$t.vtu", dh) do vtkfile - vtk_point_data(vtkfile, dh, w) - vtk_save(vtkfile) - pvd[t] = vtkfile + VTKGridFile("hyperelasticity_incomp_mixed_$step", grid) do vtk + write_solution(vtk, dh, w) + pvd[t] = vtk end end; - vtk_save(pvd); + close(pvd); vol_def = calculate_volume_deformed_mesh(w, dh, cellvalues_u); print("Deformed volume is $vol_def") return vol_def; diff --git a/docs/src/literate-gallery/topology_optimization.jl b/docs/src/literate-gallery/topology_optimization.jl index a1f26d730f..4445e4a90a 100644 --- a/docs/src/literate-gallery/topology_optimization.jl +++ b/docs/src/literate-gallery/topology_optimization.jl @@ -9,15 +9,15 @@ #- #md # !!! tip #md # This example is also available as a Jupyter notebook: -#md # [`topology_optimization.ipynb`](@__NBVIEWER_ROOT_URL__/examples/topology_optimization.ipynb). +#md # [`topology_optimization.ipynb`](@__NBVIEWER_ROOT_URL__/gallery/topology_optimization.ipynb). #- # # ## Introduction # -# Topology optimization is the task of finding structures that are mechanically ideal. +# Topology optimization is the task of finding structures that are mechanically ideal. # In this example we cover the bending beam, where we specify a load, boundary conditions and the total mass. Then, our # objective is to find the most suitable geometry within the design space minimizing the compliance (i.e. the inverse stiffness) of the structure. -# We shortly introduce our simplified model for regular meshes. A detailed derivation of the method and advanced techniques +# We shortly introduce our simplified model for regular meshes. A detailed derivation of the method and advanced techniques # can be found in [JanHacJun2019regularizedthermotopopt](@cite) and # [BlaJanJun2022taylorwlsthermotopopt](@cite). # @@ -26,17 +26,17 @@ # means bulk material. Then, we use a SIMP ansatz (solid isotropic material with penalization) for the stiffness tensor # $C(\chi) = \chi^p C_0$, where $C_0$ is the stiffness of the bulk material. The SIMP exponent $p>1$ ensures that the # model prefers the density values void and bulk before the intermediate values. The variational formulation then yields -# the modified Gibbs energy +# the modified Gibbs energy # ```math # G = \int_{\Omega} \frac{1}{2} \chi^p \varepsilon : C : \varepsilon \; \text{d}V - \int_{\Omega} \boldsymbol{f} \cdot \boldsymbol{u} \; \text{d}V - \int_{\partial\Omega} \boldsymbol{t} \cdot \boldsymbol{u} \; \text{d}A. # ``` # Furthermore, we receive the evolution equation of the density # and the additional Neumann boundary condition in the strong form # ```math -# p_\chi + \eta \dot{\chi} + \lambda + \gamma - \beta \nabla^2 \chi \ni 0 \quad \forall \textbf{x} \in \Omega, +# p_\chi + \eta \dot{\chi} + \lambda + \gamma - \beta \nabla^2 \chi \ni 0 \quad \forall \textbf{x} \in \Omega, # ``` # ```math -# \beta \nabla \chi \cdot \textbf{n} = 0 \quad \forall \textbf{x} \in \partial \Omega, +# \beta \nabla \chi \cdot \textbf{n} = 0 \quad \forall \textbf{x} \in \partial \Omega, # ``` # with the thermodynamic driving force # ```math @@ -57,8 +57,8 @@ # ```math # \nabla \chi_p \cdot \textbf{n} = \frac{1}{\Delta h} (\chi_w - \chi_e) = 0 # ``` -# from which follows $\chi_w = \chi_e$. Thus for boundary elements we can replace the value for the missing neighbor by the value of the opposite neighbor. -# In order to find the corresponding neighbor elements, we will make use of Ferrites grid topology funcionalities. +# from which follows $\chi_w = \chi_e$. Thus for boundary elements we can replace the value for the missing neighbor by the value of the opposite neighbor. +# In order to find the corresponding neighbor elements, we will make use of Ferrites grid topology funcionalities. # # ## Commented Program # We now solve the problem in Ferrite. What follows is a program spliced with comments. @@ -70,16 +70,16 @@ using Ferrite, SparseArrays, LinearAlgebra, Tensors, Printf # to the left face set, called `clamped`. On the right face, we create a small set `traction`, where we # will later apply a force in negative y-direction. -function create_grid(n) +function create_grid(n) corners = [Vec{2}((0.0, 0.0)), Vec{2}((2.0, 0.0)), Vec{2}((2.0, 1.0)), Vec{2}((0.0, 1.0))] grid = generate_grid(Quadrilateral, (2*n, n), corners); - + ## node-/facesets for boundary conditions addnodeset!(grid, "clamped", x -> x[1] ≈ 0.0) - addfaceset!(grid, "traction", x -> x[1] ≈ 2.0 && norm(x[2]-0.5) <= 0.05); + addfacetset!(grid, "traction", x -> x[1] ≈ 2.0 && norm(x[2]-0.5) <= 0.05); return grid end #md nothing # hide @@ -89,14 +89,14 @@ end function create_values() ## quadrature rules qr = QuadratureRule{RefQuadrilateral}(2) - face_qr = FaceQuadratureRule{RefQuadrilateral}(2) + facet_qr = FacetQuadratureRule{RefQuadrilateral}(2) - ## cell and facevalues for u + ## cell and facetvalues for u ip = Lagrange{RefQuadrilateral,1}()^2 cellvalues = CellValues(qr, ip) - facevalues = FaceValues(face_qr, ip) - - return cellvalues, facevalues + facetvalues = FacetValues(facet_qr, ip) + + return cellvalues, facetvalues end function create_dofhandler(grid) @@ -106,7 +106,7 @@ function create_dofhandler(grid) return dh end -function create_bc(dh) +function create_bc(dh) dbc = ConstraintHandler(dh) add!(dbc, Dirichlet(:u, getnodeset(dh.grid, "clamped"), (x,t) -> zero(Vec{2}), [1,2])) close!(dbc) @@ -122,14 +122,14 @@ end struct MaterialParameters{T, S <: SymmetricTensor{4, 2, T}} C::S - χ_min::T + χ_min::T p::T β::T η::T end #md nothing # hide -function MaterialParameters(E, ν, χ_min, p, β, η) +function MaterialParameters(E, ν, χ_min, p, β, η) δ(i,j) = i == j ? 1.0 : 0.0 # helper function G = E / 2(1 + ν) # =μ @@ -144,13 +144,13 @@ end # `MaterialState`. We add a constructor to initialize the struct. The function `update_material_states!` # updates the density values once we calculated the new values. -mutable struct MaterialState{T, S <: AbstractArray{SymmetricTensor{2, 2, T}, 1}} +mutable struct MaterialState{T, S <: AbstractArray{SymmetricTensor{2, 2, T, 3}, 1}} χ::T # density ε::S # strain in each quadrature point end function MaterialState(ρ, n_qp) - return MaterialState(ρ, Array{SymmetricTensor{2,2,Float64},1}(undef, n_qp)) + return MaterialState(ρ, Array{SymmetricTensor{2,2,Float64,3},1}(undef, n_qp)) end function update_material_states!(χn1, states, dh) @@ -163,7 +163,7 @@ end # Next, we define a function to calculate the driving forces for all elements. # For this purpose, we iterate through all elements and calculate the average strain in each # element. Then, we compute the driving force from the formula introduced at the beginning. -# We create a second function to collect the density in each element. +# We create a second function to collect the density in each element. function compute_driving_forces(states, mp, dh, χn) pΨ = zeros(length(states)) @@ -185,29 +185,41 @@ function compute_densities(states, dh) end #md nothing # hide -# Now we calculate the Laplacian. For this purpose, we will later create the grid topology of -# the grid by using the function `ExclusiveTopology`. Then we iterate through each face of each element, +# For the Laplacian we need some neighboorhood information which is constant throughout the analysis so we compute it once and cache it. +# We iterate through each face of each element, # obtaining the neighboring element by using the `getneighborhood` function. For boundary faces, # the function call will return an empty object. In that case we use the dictionary to instead find the opposite -# face, as discussed in the introduction. Then, the approximation of the Laplacian reduces to the sum below. +# face, as discussed in the introduction. -function approximate_laplacian(dh, topology, χn, Δh) - ∇²χ = zeros(getncells(dh.grid)) - _nfaces = nfaces(dh.grid.cells[1]) +function cache_neighborhood(dh, topology) + nbgs = Vector{Vector{Int}}(undef, getncells(dh.grid)) + _nfacets = nfacets(dh.grid.cells[1]) opp = Dict(1=>3, 2=>4, 3=>1, 4=>2) - nbg = zeros(Int,_nfaces) - + for element in CellIterator(dh) + nbg = zeros(Int,_nfacets) i = cellid(element) - for j in 1:_nfaces - nbg_cellid = getcells(getneighborhood(topology, dh.grid, FaceIndex(i,j))) + for j in 1:_nfacets + nbg_cellid = getneighborhood(topology, dh.grid, FacetIndex(i,j)) if(!isempty(nbg_cellid)) - nbg[j] = first(nbg_cellid) # assuming only one face neighbor per cell + nbg[j] = first(nbg_cellid)[1] # assuming only one face neighbor per cell else # boundary face - nbg[j] = first(getcells(getneighborhood(topology, dh.grid, FaceIndex(i,opp[j])))) + nbg[j] = first(getneighborhood(topology, dh.grid, FacetIndex(i,opp[j])))[1] end end - + + nbgs[i] = nbg + end + + return nbgs +end +#md nothing # hide + +# Now we calculate the Laplacian using the previously cached neighboorhood information. +function approximate_laplacian(nbgs, χn, Δh) + ∇²χ = zeros(length(nbgs)) + for i in 1:length(nbgs) + nbg = nbgs[i] ∇²χ[i] = (χn[nbg[1]]+χn[nbg[2]]+χn[nbg[3]]+χn[nbg[4]]-4*χn[i])/(Δh^2) end @@ -218,30 +230,30 @@ end # For the iterative computation of the solution, a function is needed to update the densities in each element. # To ensure that the mass is kept constant, we have to calculate the constraint # parameter $\lambda$, which we do via the bisection method. We repeat the calculation -# until the difference between the average density (calculated from the element-wise trial densities) and the target density nearly vanishes. +# until the difference between the average density (calculated from the element-wise trial densities) and the target density nearly vanishes. # By using the extremal values of $\Delta \chi$ as the starting interval, we guarantee that the method converges eventually. -function compute_χn1(χn, Δχ, ρ, ηs, χ_min) +function compute_χn1(χn, Δχ, ρ, ηs, χ_min) n_el = length(χn) - + χ_trial = zeros(n_el) ρ_trial = 0.0 - + λ_lower = minimum(Δχ) - ηs λ_upper = maximum(Δχ) + ηs λ_trial = 0.0 - + while(abs(ρ-ρ_trial)>1e-7) for i in 1:n_el Δχt = 1/ηs * (Δχ[i] - λ_trial) - χ_trial[i] = maximum([χ_min, minimum([1.0, χn[i]+Δχt])]) + χ_trial[i] = max(χ_min, min(1.0, χn[i]+Δχt)) end - + ρ_trial = 0.0 for i in 1:n_el - ρ_trial += χ_trial[i]/n_el + ρ_trial += χ_trial[i]/n_el end - + if(ρ_trial > ρ) λ_lower = λ_trial elseif(ρ_trial < ρ) @@ -256,66 +268,66 @@ end # Lastly, we use the following helper function to compute the average driving force, which is later # used to normalize the driving forces. This makes the used material parameters and numerical parameters independent -# of the problem. +# of the problem. function compute_average_driving_force(mp, pΨ, χn) n = length(pΨ) w = zeros(n) - + for i in 1:n w[i] = (χn[i]-mp.χ_min)*(1-χn[i]) end - + p_Ω = sum(w.*pΨ)/sum(w) # average driving force - + return p_Ω end #md nothing # hide -# Finally, we put everything together to update the density. The loop ensures the stability of the +# Finally, we put everything together to update the density. The loop ensures the stability of the # updated solution. -function update_density(dh, states, mp, ρ, topology, Δh) +function update_density(dh, states, mp, ρ, neighboorhoods, Δh) n_j = Int(ceil(6*mp.β/(mp.η*Δh^2))) # iterations needed for stability - χn = compute_densities(states, dh) # old density field + χn = compute_densities(states, dh) # old density field χn1 = zeros(length(χn)) - + for j in 1:n_j - ∇²χ = approximate_laplacian(dh, topology, χn, Δh) # Laplacian + ∇²χ = approximate_laplacian(neighboorhoods, χn, Δh) # Laplacian pΨ = compute_driving_forces(states, mp, dh, χn) # driving forces p_Ω = compute_average_driving_force(mp, pΨ, χn) # average driving force - - Δχ = pΨ/p_Ω + mp.β*∇²χ - χn1 = compute_χn1(χn, Δχ, ρ, mp.η, mp.χ_min) + Δχ = pΨ/p_Ω + mp.β*∇²χ + + χn1 = compute_χn1(χn, Δχ, ρ, mp.η, mp.χ_min) if(j 10 error("Reached maximum Newton iterations, aborting") break end - + ## current guess u .= un .+ Δu - K, r = doassemble!(cellvalues, facevalues, K, grid, dh, mp, u, states); - norm_r = norm(r[Ferrite.free_dofs(dbc)]) + K, r = doassemble!(cellvalues, facetvalues, K, grid, dh, mp, u, states); + norm_r = norm(r[Ferrite.free_dofs(dbc)]) if (norm_r) < NEWTON_TOL break - end + end apply_zero!(K, r, dbc) ΔΔu = Symmetric(K) \ r - + apply_zero!(ΔΔu, dbc) Δu .+= ΔΔu - end # of loop while NR-Iteration + end # of loop while NR-Iteration ## calculate compliance compliance = 1/2 * u' * K * u - + if(it==1) compliance_0 = compliance end - + ## check convergence criterium (twice!) if(abs(compliance-compliance_n)/compliance < tol) if(conv) @@ -477,46 +489,46 @@ function topopt(ra,ρ,n,filename; output=:false) else conv = :false end - + ## update density - χ = update_density(dh, states, mp, ρ, topology, Δh) - + χ = update_density(dh, states, mp, ρ, neighboorhoods, Δh) + ## update old displacement, density and compliance un .= u Δu .= 0.0 update_material_states!(χ, states, dh) compliance_n = compliance - + ## output during calculation if(output) i = @sprintf("%3.3i", it) filename_it = string(filename, "_", i) - vtk_grid(filename_it, grid) do vtk - vtk_cell_data(vtk, χ, "density") + VTKGridFile(filename_it, grid) do vtk + write_cell_data(vtk, χ, "density") end end end ## export converged results if(!output) - vtk_grid(filename, grid) do vtk - vtk_cell_data(vtk, χ, "density") + VTKGridFile(filename, grid) do vtk + write_cell_data(vtk, χ, "density") end end @printf "Rel. stiffness: %.4f \n" compliance^(-1)/compliance_0^(-1) - + return end #md nothing # hide # Lastly, we call our main function and compare the results. To create the -# complete output with all iteration steps, it is possible to set the output +# complete output with all iteration steps, it is possible to set the output # parameter to `true`. -topopt(0.02, 0.5, 60, "small_radius"; output=:false); -topopt(0.03, 0.5, 60, "large_radius"; output=:false); -##topopt(0.02, 0.5, 60, "topopt_animation"; output=:true); # can be used to create animations +# grid, χ =topopt(0.02, 0.5, 60, "small_radius"; output=:false); +@time topopt(0.03, 0.5, 60, "large_radius"; output=:false); +#topopt(0.02, 0.5, 60, "topopt_animation"; output=:true); # can be used to create animations # We observe, that the stiffness for the lower value of $ra$ is higher, # but also requires more iterations until convergence and finer structures to be manufactured, as can be seen in Figure 2: @@ -529,7 +541,7 @@ topopt(0.03, 0.5, 60, "large_radius"; output=:false); #md # ## References #md # ```@bibliography -#md # Pages = ["gallery/topology_optimization.md"] +#md # Pages = ["topology_optimization.md"] #md # Canonical = false #md # ``` diff --git a/docs/src/literate-howto/postprocessing.jl b/docs/src/literate-howto/postprocessing.jl index 79586f0640..671af15366 100644 --- a/docs/src/literate-howto/postprocessing.jl +++ b/docs/src/literate-howto/postprocessing.jl @@ -8,7 +8,7 @@ #- #md # !!! tip #md # This example is also available as a Jupyter notebook: -#md # [`postprocessing.ipynb`](@__NBVIEWER_ROOT_URL__/examples/postprocessing.ipynb). +#md # [`postprocessing.ipynb`](@__NBVIEWER_ROOT_URL__/howto/postprocessing.ipynb). #- # # ## Introduction @@ -26,12 +26,14 @@ # of the fluxes to the nodes of the mesh. By doing this, we can more easily visualize # integration points quantities. Finally, we visualize the temperature field and the heat fluxes along a cut-line. # -# The L2-projection is defined as follows: Find projection ``q(\boldsymbol{x}) \in L_2(\Omega)`` such that +# The L2-projection is defined as follows: Find projection ``q(\boldsymbol{x}) \in U_h(\Omega)`` such that # ```math -# \int v q \ \mathrm{d}\Omega = \int v d \ \mathrm{d}\Omega \quad \forall v \in L_2(\Omega), +# \int v q \ \mathrm{d}\Omega = \int v d \ \mathrm{d}\Omega \quad \forall v \in U_h(\Omega), # ``` # where ``d`` is the quadrature data to project. Since the flux is a vector the projection function # will be solved with multiple right hand sides, e.g. with ``d = q_x`` and ``d = q_y`` for this 2D problem. +# In this example, we use standard Lagrange interpolations, and the finite element space ``U_h`` is then +# a subset of the ``H^1`` space (continuous functions). # # Ferrite has functionality for doing much of this automatically, as displayed in the code below. # In particular [`L2Projector`](@ref) for assembling the left hand side, and @@ -87,8 +89,8 @@ q_projected = project(projector, q_gp, qr); # To visualize the heat flux, we export the projected field `q_projected` # to a VTK-file, which can be viewed in e.g. [ParaView](https://www.paraview.org/). # The result is also visualized in *Figure 1*. -vtk_grid("heat_equation_flux", grid) do vtk - vtk_point_data(vtk, projector, q_projected, "q") +VTKGridFile("heat_equation_flux", grid) do vtk + write_projection(vtk, projector, q_projected, "q") end; # ## Point Evaluation diff --git a/docs/src/literate-howto/threaded_assembly.jl b/docs/src/literate-howto/threaded_assembly.jl index 2a959212f2..f916938672 100644 --- a/docs/src/literate-howto/threaded_assembly.jl +++ b/docs/src/literate-howto/threaded_assembly.jl @@ -3,7 +3,7 @@ #- #md # !!! tip #md # This example is also available as a Jupyter notebook: -#md # [`threaded_assembly.ipynb`](@__NBVIEWER_ROOT_URL__/examples/threaded_assembly.ipynb). +#md # [`threaded_assembly.ipynb`](@__NBVIEWER_ROOT_URL__/howto/threaded_assembly.ipynb). #- # # ## Example of a colored grid @@ -25,9 +25,9 @@ function create_example_2d_grid() grid = generate_grid(Quadrilateral, (10, 10), Vec{2}((0.0, 0.0)), Vec{2}((10.0, 10.0))) colors_workstream = create_coloring(grid; alg=ColoringAlgorithm.WorkStream) colors_greedy = create_coloring(grid; alg=ColoringAlgorithm.Greedy) - vtk_grid("colored", grid) do vtk - vtk_cell_data_colors(vtk, colors_workstream, "workstream-coloring") - vtk_cell_data_colors(vtk, colors_greedy, "greedy-coloring") + VTKGridFile("colored", grid) do vtk + Ferrite.write_cell_colors(vtk, grid, colors_workstream, "workstream-coloring") + Ferrite.write_cell_colors(vtk, grid, colors_greedy, "greedy-coloring") end end @@ -73,33 +73,33 @@ end; # # ScratchValues is a thread-local collection of data that each thread needs to own, # since we need to be able to mutate the data in the threads independently -struct ScratchValues{T, CV <: CellValues, FV <: FaceValues, TT <: AbstractTensor, dim, Ti} +struct ScratchValues{T, CV <: CellValues, FV <: FacetValues, TT <: AbstractTensor, dim, Ti} Ke::Matrix{T} fe::Vector{T} cellvalues::CV - facevalues::FV + facetvalues::FV global_dofs::Vector{Int} ɛ::Vector{TT} coordinates::Vector{Vec{dim, T}} assembler::Ferrite.AssemblerSparsityPattern{T, Ti} end; -# Each thread need its own CellValues and FaceValues (although, for this example we don't use -# the FaceValues) +# Each thread need its own CellValues and FacetValues (although, for this example we don't use +# the FacetValues) function create_values(interpolation_space::Interpolation{refshape}, qr_order::Int) where {dim, refshape<:Ferrite.AbstractRefShape{dim}} ## Interpolations and values quadrature_rule = QuadratureRule{refshape}(qr_order) - face_quadrature_rule = FaceQuadratureRule{refshape}(qr_order) + facet_quadrature_rule = FacetQuadratureRule{refshape}(qr_order) cellvalues = [CellValues(quadrature_rule, interpolation_space) for i in 1:Threads.nthreads()]; - facevalues = [FaceValues(face_quadrature_rule, interpolation_space) for i in 1:Threads.nthreads()]; - return cellvalues, facevalues + facetvalues = [FacetValues(facet_quadrature_rule, interpolation_space) for i in 1:Threads.nthreads()]; + return cellvalues, facetvalues end; # Create a `ScratchValues` for each thread with the thread local data function create_scratchvalues(K, f, dh::DofHandler{dim}, ip) where {dim} nthreads = Threads.nthreads() assemblers = [start_assemble(K, f) for i in 1:nthreads] - cellvalues, facevalues = create_values(ip, 2) + cellvalues, facetvalues = create_values(ip, 2) n_basefuncs = getnbasefunctions(cellvalues[1]) global_dofs = [zeros(Int, ndofs_per_cell(dh)) for i in 1:nthreads] @@ -111,7 +111,7 @@ function create_scratchvalues(K, f, dh::DofHandler{dim}, ip) where {dim} coordinates = [[zero(Vec{dim}) for i in 1:length(dh.grid.cells[1].nodes)] for i in 1:nthreads] - return [ScratchValues(Kes[i], fes[i], cellvalues[i], facevalues[i], global_dofs[i], + return [ScratchValues(Kes[i], fes[i], cellvalues[i], facetvalues[i], global_dofs[i], ɛs[i], coordinates[i], assemblers[i]) for i in 1:nthreads] end; @@ -140,8 +140,8 @@ function assemble_cell!(scratch::ScratchValues, cell::Int, K::SparseMatrixCSC, grid::Grid, dh::DofHandler, C::SymmetricTensor{4, dim}, b::Vec{dim}) where {dim} ## Unpack our stuff from the scratch - Ke, fe, cellvalues, facevalues, global_dofs, ɛ, coordinates, assembler = - scratch.Ke, scratch.fe, scratch.cellvalues, scratch.facevalues, + Ke, fe, cellvalues, facetvalues, global_dofs, ɛ, coordinates, assembler = + scratch.Ke, scratch.fe, scratch.cellvalues, scratch.facetvalues, scratch.global_dofs, scratch.ɛ, scratch.coordinates, scratch.assembler fill!(Ke, 0) @@ -182,7 +182,7 @@ function run_assemble() ip = Lagrange{RefHexahedron,1}()^3 dh = create_dofhandler(grid, ip); - K = create_sparsity_pattern(dh); + K = allocate_matrix(dh); C = create_stiffness(Val{3}()); ## compilation doassemble(K, colors, grid, dh, C, ip); diff --git a/docs/src/literate-tutorials/computational_homogenization.jl b/docs/src/literate-tutorials/computational_homogenization.jl index 1a06cf495c..cba4b5e66b 100644 --- a/docs/src/literate-tutorials/computational_homogenization.jl +++ b/docs/src/literate-tutorials/computational_homogenization.jl @@ -9,7 +9,7 @@ #- #md # !!! tip #md # This example is also available as a Jupyter notebook: -#md # [`computational_homogenization.ipynb`](@__NBVIEWER_ROOT_URL__/examples/computational_homogenization.ipynb). +#md # [`computational_homogenization.ipynb`](@__NBVIEWER_ROOT_URL__/tutorials/computational_homogenization.ipynb). #- # # ## Introduction @@ -193,6 +193,7 @@ using Test #src # the [`FerriteGmsh`](https://github.com/Ferrite-FEM/FerriteGmsh.jl) package: using FerriteGmsh + #src notebook: use coarse mesh to decrease build time #src script: use the fine mesh #src markdown: use the coarse mesh to decrease build time, but make it look like the fine @@ -231,7 +232,7 @@ close!(dh); ch_dirichlet = ConstraintHandler(dh) dirichlet = Dirichlet( :u, - union(getfaceset.(Ref(grid), ["left", "right", "top", "bottom"])...), + union(getfacetset.(Ref(grid), ["left", "right", "top", "bottom"])...), (x, t) -> [0, 0], [1, 2] ) @@ -276,8 +277,8 @@ ch = (dirichlet = ch_dirichlet, periodic = ch_periodic); # and the constraint handler. K = ( - dirichlet = create_sparsity_pattern(dh), - periodic = create_sparsity_pattern(dh, ch.periodic), + dirichlet = allocate_matrix(dh), + periodic = allocate_matrix(dh, ch.periodic), ); # We define the fourth order elasticity tensor for the matrix material, and define the @@ -518,16 +519,16 @@ round.(ev; digits=-8) uM = zeros(ndofs(dh)) -vtk_grid("homogenization", dh) do vtk +VTKGridFile("homogenization", dh) do vtk for i in 1:3 ## Compute macroscopic solution apply_analytical!(uM, dh, :u, x -> εᴹ[i] ⋅ x) ## Dirichlet - vtk_point_data(vtk, dh, uM + u.dirichlet[i], "_dirichlet_$i") - vtk_point_data(vtk, projector, σ.dirichlet[i], "σvM_dirichlet_$i") + write_solution(vtk, dh, uM + u.dirichlet[i], "_dirichlet_$i") + write_projection(vtk, projector, σ.dirichlet[i], "σvM_dirichlet_$i") ## Periodic - vtk_point_data(vtk, dh, uM + u.periodic[i], "_periodic_$i") - vtk_point_data(vtk, projector, σ.periodic[i], "σvM_periodic_$i") + write_solution(vtk, dh, uM + u.periodic[i], "_periodic_$i") + write_projection(vtk, projector, σ.periodic[i], "σvM_periodic_$i") end end; diff --git a/docs/src/literate-tutorials/dg_heat_equation.jl b/docs/src/literate-tutorials/dg_heat_equation.jl index c1c1f0fb16..8ee2cc7429 100644 --- a/docs/src/literate-tutorials/dg_heat_equation.jl +++ b/docs/src/literate-tutorials/dg_heat_equation.jl @@ -9,7 +9,7 @@ #- #md # !!! tip #md # This example is also available as a Jupyter notebook: -#md # [`dg_heat_equation.ipynb`](@__NBVIEWER_ROOT_URL__/examples/dg_heat_equation.ipynb). +#md # [`dg_heat_equation.ipynb`](@__NBVIEWER_ROOT_URL__/tutorials/dg_heat_equation.ipynb). #- # # This example was developed @@ -71,7 +71,7 @@ # ```math # \int_\Gamma q \boldsymbol{\phi} \cdot \boldsymbol{n} \,\mathrm{d}\Gamma = \int_\Gamma \llbracket q\rrbracket \cdot \{\boldsymbol{\phi}\} \,\mathrm{d}\Gamma + \int_{\Gamma^0} \{q\} \llbracket \boldsymbol{\phi}\rrbracket \,\mathrm{d}\Gamma^0, # ``` -# where $\Gamma^0 : \Gamma \setminus \partial \Omega$, and the jump of the vector-valued field $\boldsymbol{\phi}$ is defined as +# where $\Gamma^0 : \Gamma \setminus \partial \Omega$, and the jump of the vector-valued field $\boldsymbol{\phi}$ is defined as # ```math # \llbracket \boldsymbol{\phi}\rrbracket = \boldsymbol{\phi}^+ \cdot \boldsymbol{n}^+ + \boldsymbol{\phi}^- \cdot \boldsymbol{n}^-\\ # ``` @@ -99,7 +99,7 @@ # ```math # \int_\Omega [\boldsymbol{\nabla} (u)] \cdot [\boldsymbol{\nabla} (\delta u)] \,\mathrm{d}\Omega + \int_\Gamma \llbracket \hat{u} - u\rrbracket \cdot \{\boldsymbol{\nabla} (\delta u)\} \,\mathrm{d}\Gamma + \int_{\Gamma^0} \{\hat{u} - u\} \llbracket \boldsymbol{\nabla} (\delta u)\rrbracket \,\mathrm{d}\Gamma^0 - \int_\Gamma \llbracket \delta u\rrbracket \cdot \{\hat{\boldsymbol{\sigma}}\} \,\mathrm{d}\Gamma - \int_{\Gamma^0} \{\delta u\} \llbracket \hat{\boldsymbol{\sigma}}\rrbracket \,\mathrm{d}\Gamma^0 = \int_\Omega \delta u \,\mathrm{d}\Omega,\\ # ``` -# The numerical fluxes chosen for the interior penalty method are $\boldsymbol{\hat{\sigma}} = \{\boldsymbol{\nabla} (u)\} - \alpha(\llbracket u\rrbracket)$ on $\Gamma$, $\hat{u} = \{u\}$ on the interfaces between elements $\Gamma^0 : \Gamma \setminus \partial \Omega$, +# The numerical fluxes chosen for the interior penalty method are $\boldsymbol{\hat{\sigma}} = \{\boldsymbol{\nabla} (u)\} - \alpha(\llbracket u\rrbracket)$ on $\Gamma$, $\hat{u} = \{u\}$ on the interfaces between elements $\Gamma^0 : \Gamma \setminus \partial \Omega$, # and $\hat{u} = 0$ on $\partial \Omega$. Such choice results in $\{\hat{\boldsymbol{\sigma}}\} = \{\boldsymbol{\nabla} (u)\} - \alpha(\llbracket u\rrbracket)$, $\llbracket \hat{u}\rrbracket = 0$, $\{\hat{u}\} = \{u\}$, $\llbracket \hat{\boldsymbol{\sigma}}\rrbracket = 0$ and the equation becomes # ```math # \int_\Omega [\boldsymbol{\nabla} (u)] \cdot [\boldsymbol{\nabla} (\delta u)] \,\mathrm{d}\Omega - \int_\Gamma \llbracket u\rrbracket \cdot \{\boldsymbol{\nabla} (\delta u)\} \,\mathrm{d}\Gamma - \int_\Gamma \llbracket \delta u\rrbracket \cdot \{\boldsymbol{\nabla} (u)\} - \llbracket \delta u\rrbracket \cdot \alpha(\llbracket u\rrbracket) \,\mathrm{d}\Gamma = \int_\Omega \delta u \,\mathrm{d}\Omega,\\ @@ -138,21 +138,21 @@ grid = generate_grid(Quadrilateral, ntuple(_ -> 20, dim)); topology = ExclusiveTopology(grid); # ### Trial and test functions -# `CellValues`, `FaceValues`, and `InterfaceValues` facilitate the process of evaluating values and gradients of +# `CellValues`, `FacetValues`, and `InterfaceValues` facilitate the process of evaluating values and gradients of # test and trial functions (among other things). To define # these we need to specify an interpolation space for the shape functions. # We use `DiscontinuousLagrange` functions # based on the two-dimensional reference quadrilateral. We also define a quadrature rule based on # the same reference element. We combine the interpolation and the quadrature rule -# to `CellValues` and `InterfaceValues` object. Note that `InterfaceValues` object contains two `FaceValues` objects which can be used individually. +# to `CellValues` and `InterfaceValues` object. Note that `InterfaceValues` object contains two `FacetValues` objects which can be used individually. order = 1; ip = DiscontinuousLagrange{RefQuadrilateral, order}(); qr = QuadratureRule{RefQuadrilateral}(2); -# For `FaceValues` and `InterfaceValues` we use `FaceQuadratureRule` -face_qr = FaceQuadratureRule{RefQuadrilateral}(2); +# For `FacetValues` and `InterfaceValues` we use `FacetQuadratureRule` +facet_qr = FacetQuadratureRule{RefQuadrilateral}(2); cellvalues = CellValues(qr, ip); -facevalues = FaceValues(face_qr, ip); -interfacevalues = InterfaceValues(face_qr, ip); +facetvalues = FacetValues(facet_qr, ip); +interfacevalues = InterfaceValues(facet_qr, ip); # ### Penalty term parameters # We define functions to calculate the diameter of a set of points, used to calculate the characteristic size $h_e$ in the assembly routine. getdistance(p1::Vec{N, T},p2::Vec{N, T}) where {N, T} = norm(p1-p2); @@ -167,20 +167,20 @@ close!(dh); # However, when generating the sparsity pattern we need to pass the topology and the cross-element coupling matrix when we're using # discontinuous interpolations. The cross-element coupling matrix is of size [1,1] in this case as # we have only one field and one DofHandler. -K = create_sparsity_pattern(dh, topology = topology, cross_coupling = trues(1,1)); +K = allocate_matrix(dh, topology = topology, interface_coupling = trues(1,1)); # ### Boundary conditions -# The Dirichlet boundary conditions are treated +# The Dirichlet boundary conditions are treated # as usual by a `ConstraintHandler`. ch = ConstraintHandler(dh) -add!(ch, Dirichlet(:u, getfaceset(grid, "right"), (x, t) -> 1.0)) -add!(ch, Dirichlet(:u, getfaceset(grid, "left"), (x, t) -> -1.0)) +add!(ch, Dirichlet(:u, getfacetset(grid, "right"), (x, t) -> 1.0)) +add!(ch, Dirichlet(:u, getfacetset(grid, "left"), (x, t) -> -1.0)) close!(ch); # Furthermore, we define $\partial \Omega_N$ as the `union` of the face sets with Neumann boundary conditions for later use ∂Ωₙ = union( - getfaceset(grid, "top"), - getfaceset(grid, "bottom"), + getfacetset(grid, "top"), + getfacetset(grid, "bottom"), ); @@ -200,7 +200,7 @@ close!(ch); # * `assemble_interface!` to compute the contribution ``K_i`` of surface integrals over an # interface using `interfacevalues`. # * `assemble_boundary!` to compute the contribution ``f_e`` of surface integrals over a -# boundary face using `facevalues`. +# boundary face using `FacetValues`. function assemble_element!(Ke::Matrix, fe::Vector, cellvalues::CellValues) n_basefuncs = getnbasefunctions(cellvalues) @@ -239,13 +239,13 @@ function assemble_interface!(Ki::Matrix, iv::InterfaceValues, μ::Float64) dΓ = getdetJdV(iv, q_point) ## Loop over test shape functions for i in 1:getnbasefunctions(iv) - ## Multiply the jump by the normal, as the definition used in Ferrite doesn't include the normals. - δu_jump = shape_value_jump(iv, q_point, i) * normal + ## Multiply the jump by the negative normal to get the definition from the theory section. + δu_jump = shape_value_jump(iv, q_point, i) * (-normal) ∇δu_avg = shape_gradient_average(iv, q_point, i) ## Loop over trial shape functions for j in 1:getnbasefunctions(iv) - ## Multiply the jump by the normal, as the definition used in Ferrite doesn't include the normals. - u_jump = shape_value_jump(iv, q_point, j) * normal + ## Multiply the jump by the negative normal to get the definition from the theory section. + u_jump = shape_value_jump(iv, q_point, j) * (-normal) ∇u_avg = shape_gradient_average(iv, q_point, j) ## Add contribution to Ki Ki[i, j] += -(δu_jump ⋅ ∇u_avg + ∇δu_avg ⋅ u_jump)*dΓ + μ * (δu_jump ⋅ u_jump) * dΓ @@ -255,7 +255,7 @@ function assemble_interface!(Ki::Matrix, iv::InterfaceValues, μ::Float64) return Ki end -function assemble_boundary!(fe::Vector, fv::FaceValues) +function assemble_boundary!(fe::Vector, fv::FacetValues) ## Reset to 0 fill!(fe, 0) ## Loop over quadrature points @@ -280,7 +280,7 @@ end # We define the function `assemble_global` to loop over all elements and internal faces # (interfaces), as well as the external faces involved in Neumann boundary conditions. -function assemble_global(cellvalues::CellValues, facevalues::FaceValues, interfacevalues::InterfaceValues, K::SparseMatrixCSC, dh::DofHandler, order::Int, dim::Int) +function assemble_global(cellvalues::CellValues, facetvalues::FacetValues, interfacevalues::InterfaceValues, K::SparseMatrixCSC, dh::DofHandler, order::Int, dim::Int) ## Allocate the element stiffness matrix and element force vector n_basefuncs = getnbasefunctions(cellvalues) Ke = zeros(n_basefuncs, n_basefuncs) @@ -314,17 +314,17 @@ function assemble_global(cellvalues::CellValues, facevalues::FaceValues, interfa assemble!(assembler, interfacedofs(ic), Ki) end ## Loop over domain boundaries with Neumann boundary conditions - for fc in FaceIterator(dh, ∂Ωₙ) + for fc in FacetIterator(dh, ∂Ωₙ) ## Reinitialize face_values_a for this boundary face - reinit!(facevalues, fc) + reinit!(facetvalues, fc) ## Compute boundary face surface integrals contribution - assemble_boundary!(fe, facevalues) + assemble_boundary!(fe, facetvalues) ## Assemble fe into f assemble!(f, celldofs(fc), fe) end return K, f end -K, f = assemble_global(cellvalues, facevalues, interfacevalues, K, dh, order, dim); +K, f = assemble_global(cellvalues, facetvalues, interfacevalues, K, dh, order, dim); #md nothing # hide # ### Solution of the system @@ -334,8 +334,8 @@ K, f = assemble_global(cellvalues, facevalues, interfacevalues, K, dh, order, di apply!(K, f, ch) u = K \ f; -vtk_grid("dg_heat_equation", dh) do vtk - vtk_point_data(vtk, dh, u) +VTKGridFile("dg_heat_equation", dh) do vtk + write_solution(vtk, dh, u) end; ## test the result #src @@ -344,7 +344,7 @@ using Test #src #md # ## References #md # ```@bibliography -#md # Pages = ["tutorials/dg_heat_equation.md"] +#md # Pages = ["dg_heat_equation.md"] #md # Canonical = false #md # ``` diff --git a/docs/src/literate-tutorials/heat_equation.jl b/docs/src/literate-tutorials/heat_equation.jl index 4a82dac7b7..e98fe27c81 100644 --- a/docs/src/literate-tutorials/heat_equation.jl +++ b/docs/src/literate-tutorials/heat_equation.jl @@ -8,7 +8,7 @@ #- #md # !!! tip #md # This example is also available as a Jupyter notebook: -#md # [`heat_equation.ipynb`](@__NBVIEWER_ROOT_URL__/examples/heat_equation.ipynb). +#md # [`heat_equation.ipynb`](@__NBVIEWER_ROOT_URL__/tutorials/heat_equation.ipynb). #- # # ## Introduction @@ -71,9 +71,9 @@ add!(dh, :u, ip) close!(dh); # Now that we have distributed all our dofs we can create our tangent matrix, -# using `create_sparsity_pattern`. This function returns a sparse matrix +# using `allocate_matrix`. This function returns a sparse matrix # with the correct entries stored. -K = create_sparsity_pattern(dh) +K = allocate_matrix(dh) # ### Boundary conditions # In Ferrite constraints like Dirichlet boundary conditions @@ -84,10 +84,10 @@ ch = ConstraintHandler(dh); # homogeneous Dirichlet boundary conditions on the whole boundary, i.e. # the `union` of all the face sets on the boundary. ∂Ω = union( - getfaceset(grid, "left"), - getfaceset(grid, "right"), - getfaceset(grid, "top"), - getfaceset(grid, "bottom"), + getfacetset(grid, "left"), + getfacetset(grid, "right"), + getfacetset(grid, "top"), + getfacetset(grid, "bottom"), ); # Now we are set up to define our constraint. We specify which field @@ -213,8 +213,8 @@ u = K \ f; # ### Exporting to VTK # To visualize the result we export the grid and our field `u` # to a VTK-file, which can be viewed in e.g. [ParaView](https://www.paraview.org/). -vtk_grid("heat_equation", dh) do vtk - vtk_point_data(vtk, dh, u) +VTKGridFile("heat_equation", dh) do vtk + write_solution(vtk, dh, u) end ## test the result #src diff --git a/docs/src/literate-tutorials/hyperelasticity.jl b/docs/src/literate-tutorials/hyperelasticity.jl index 7f4e5ce18b..701ec051e3 100644 --- a/docs/src/literate-tutorials/hyperelasticity.jl +++ b/docs/src/literate-tutorials/hyperelasticity.jl @@ -11,7 +11,7 @@ #- #md # !!! tip #md # This example is also available as a Jupyter notebook: -#md # [`hyperelasticity.ipynb`](@__NBVIEWER_ROOT_URL__/examples/hyperelasticity.ipynb). +#md # [`hyperelasticity.ipynb`](@__NBVIEWER_ROOT_URL__/tutorials/hyperelasticity.ipynb). #- # ## Introduction # @@ -105,7 +105,7 @@ using Ferrite, Tensors, TimerOutputs, ProgressMeter, IterativeSolvers # ``` #md # ```@raw html -#md #
+#md #
#md # #md # Derivation of $\partial \mathbf{P} / \partial \mathbf{F}$ #md # @@ -269,9 +269,9 @@ function assemble_element!(ke, ge, cell, cv, fv, mp, ue, ΓN) end ## Surface integral for the traction - for face in 1:nfaces(cell) - if (cellid(cell), face) in ΓN - reinit!(fv, cell, face) + for facet in 1:nfacets(cell) + if (cellid(cell), facet) in ΓN + reinit!(fv, cell, facet) for q_point in 1:getnquadpoints(fv) t = tn * getnormal(fv, q_point) dΓ = getdetJdV(fv, q_point) @@ -328,9 +328,9 @@ function solve() ## Finite element base ip = Lagrange{RefTetrahedron, 1}()^3 qr = QuadratureRule{RefTetrahedron}(1) - qr_face = FaceQuadratureRule{RefTetrahedron}(1) + qr_facet = FacetQuadratureRule{RefTetrahedron}(1) cv = CellValues(qr, ip) - fv = FaceValues(qr_face, ip) + fv = FacetValues(qr_facet, ip) ## DofHandler dh = DofHandler(grid) @@ -349,9 +349,9 @@ function solve() dbcs = ConstraintHandler(dh) ## Add a homogeneous boundary condition on the "clamped" edge - dbc = Dirichlet(:u, getfaceset(grid, "right"), (x,t) -> [0.0, 0.0, 0.0], [1, 2, 3]) + dbc = Dirichlet(:u, getfacetset(grid, "right"), (x,t) -> [0.0, 0.0, 0.0], [1, 2, 3]) add!(dbcs, dbc) - dbc = Dirichlet(:u, getfaceset(grid, "left"), (x,t) -> rotation(x, t), [1, 2, 3]) + dbc = Dirichlet(:u, getfacetset(grid, "left"), (x,t) -> rotation(x, t), [1, 2, 3]) add!(dbcs, dbc) close!(dbcs) t = 0.5 @@ -359,10 +359,10 @@ function solve() ## Neumann part of the boundary ΓN = union( - getfaceset(grid, "top"), - getfaceset(grid, "bottom"), - getfaceset(grid, "front"), - getfaceset(grid, "back"), + getfacetset(grid, "top"), + getfacetset(grid, "bottom"), + getfacetset(grid, "front"), + getfacetset(grid, "back"), ) ## Pre-allocation of vectors for the solution and Newton increments @@ -374,14 +374,14 @@ function solve() apply!(un, dbcs) ## Create sparse matrix and residual vector - K = create_sparsity_pattern(dh) + K = allocate_matrix(dh) g = zeros(_ndofs) ## Perform Newton iterations newton_itr = -1 NEWTON_TOL = 1e-8 NEWTON_MAXITER = 30 - prog = ProgressMeter.ProgressThresh(NEWTON_TOL, "Solving:") + prog = ProgressMeter.ProgressThresh(NEWTON_TOL; desc = "Solving:") while true; newton_itr += 1 ## Construct the current guess @@ -408,8 +408,8 @@ function solve() ## Save the solution @timeit "export" begin - vtk_grid("hyperelasticity", dh) do vtkfile - vtk_point_data(vtkfile, dh, u) + VTKGridFile("hyperelasticity", dh) do vtk + write_solution(vtk, dh, u) end end diff --git a/docs/src/literate-tutorials/incompressible_elasticity.jl b/docs/src/literate-tutorials/incompressible_elasticity.jl index e69b8a6577..33d64023c7 100644 --- a/docs/src/literate-tutorials/incompressible_elasticity.jl +++ b/docs/src/literate-tutorials/incompressible_elasticity.jl @@ -3,7 +3,7 @@ #- #md # !!! tip #md # This example is also available as a Jupyter notebook: -#md # [`incompressible_elasticity.ipynb`](@__NBVIEWER_ROOT_URL__/examples/incompressible_elasticity.ipynb). +#md # [`incompressible_elasticity.ipynb`](@__NBVIEWER_ROOT_URL__/tutorials/incompressible_elasticity.ipynb). #- # # ## Introduction @@ -33,25 +33,25 @@ function create_cook_grid(nx, ny) Vec{2}(( 0.0, 44.0))] grid = generate_grid(Triangle, (nx, ny), corners) ## facesets for boundary conditions - addfaceset!(grid, "clamped", x -> norm(x[1]) ≈ 0.0) - addfaceset!(grid, "traction", x -> norm(x[1]) ≈ 48.0) + addfacetset!(grid, "clamped", x -> norm(x[1]) ≈ 0.0) + addfacetset!(grid, "traction", x -> norm(x[1]) ≈ 48.0) return grid end; -# Next we define a function to set up our cell- and facevalues. +# Next we define a function to set up our cell- and FacetValues. function create_values(interpolation_u, interpolation_p) ## quadrature rules qr = QuadratureRule{RefTriangle}(3) - face_qr = FaceQuadratureRule{RefTriangle}(3) + facet_qr = FacetQuadratureRule{RefTriangle}(3) - ## cell and facevalues for u + ## cell and FacetValues for u cellvalues_u = CellValues(qr, interpolation_u) - facevalues_u = FaceValues(face_qr, interpolation_u) + facetvalues_u = FacetValues(facet_qr, interpolation_u) ## cellvalues for p cellvalues_p = CellValues(qr, interpolation_p) - return cellvalues_u, cellvalues_p, facevalues_u + return cellvalues_u, cellvalues_p, facetvalues_u end; @@ -69,7 +69,7 @@ end; # We specify a homogeneous Dirichlet bc on the displacement field, `:u`. function create_bc(dh) dbc = ConstraintHandler(dh) - add!(dbc, Dirichlet(:u, getfaceset(dh.grid, "clamped"), x -> zero(x), [1, 2])) + add!(dbc, Dirichlet(:u, getfacetset(dh.grid, "clamped"), x -> zero(x), [1, 2])) close!(dbc) return dbc end; @@ -82,12 +82,12 @@ end # Now to the assembling of the stiffness matrix. This mixed formulation leads to a blocked # element matrix. Since Ferrite does not force us to use any particular matrix type we will -# use a `PseudoBlockArray` from `BlockArrays.jl`. +# use a `BlockedArray` from `BlockArrays.jl`. function doassemble( cellvalues_u::CellValues, cellvalues_p::CellValues, - facevalues_u::FaceValues, + facetvalues_u::FacetValues, K::SparseMatrixCSC, grid::Grid, dh::DofHandler, mp::LinearElasticity ) f = zeros(ndofs(dh)) @@ -95,8 +95,8 @@ function doassemble( nu = getnbasefunctions(cellvalues_u) np = getnbasefunctions(cellvalues_p) - fe = PseudoBlockArray(zeros(nu + np), [nu, np]) # local force vector - ke = PseudoBlockArray(zeros(nu + np, nu + np), [nu, np], [nu, np]) # local stiffness matrix + fe = BlockedArray(zeros(nu + np), [nu, np]) # local force vector + ke = BlockedArray(zeros(nu + np, nu + np), [nu, np], [nu, np]) # local stiffness matrix ## traction vector t = Vec{2}((0.0, 1 / 16)) @@ -106,7 +106,7 @@ function doassemble( for cell in CellIterator(dh) fill!(ke, 0) fill!(fe, 0) - assemble_up!(ke, fe, cell, cellvalues_u, cellvalues_p, facevalues_u, grid, mp, ɛdev, t) + assemble_up!(ke, fe, cell, cellvalues_u, cellvalues_p, facetvalues_u, grid, mp, ɛdev, t) assemble!(assembler, celldofs(cell), fe, ke) end @@ -116,7 +116,7 @@ end; # The element routine integrates the local stiffness and force vector for all elements. # Since the problem results in a symmetric matrix we choose to only assemble the lower part, # and then symmetrize it after the loop over the quadrature points. -function assemble_up!(Ke, fe, cell, cellvalues_u, cellvalues_p, facevalues_u, grid, mp, ɛdev, t) +function assemble_up!(Ke, fe, cell, cellvalues_u, cellvalues_p, facetvalues_u, grid, mp, ɛdev, t) n_basefuncs_u = getnbasefunctions(cellvalues_u) n_basefuncs_p = getnbasefunctions(cellvalues_p) @@ -154,16 +154,16 @@ function assemble_up!(Ke, fe, cell, cellvalues_u, cellvalues_p, facevalues_u, gr symmetrize_lower!(Ke) - ## We integrate the Neumann boundary using the facevalues. - ## We loop over all the faces in the cell, then check if the face - ## is in our `"traction"` faceset. - for face in 1:nfaces(cell) - if onboundary(cell, face) && (cellid(cell), face) ∈ getfaceset(grid, "traction") - reinit!(facevalues_u, cell, face) - for q_point in 1:getnquadpoints(facevalues_u) - dΓ = getdetJdV(facevalues_u, q_point) + ## We integrate the Neumann boundary using the FacetValues. + ## We loop over all the facets in the cell, then check if the facet + ## is in our `"traction"` facetset. + for facet in 1:nfacets(cell) + if (cellid(cell), facet) ∈ getfacetset(grid, "traction") + reinit!(facetvalues_u, cell, facet) + for q_point in 1:getnquadpoints(facetvalues_u) + dΓ = getdetJdV(facetvalues_u, q_point) for i in 1:n_basefuncs_u - δu = shape_value(facevalues_u, q_point, i) + δu = shape_value(facetvalues_u, q_point, i) fe[i] += (δu ⋅ t) * dΓ end end @@ -255,11 +255,11 @@ function solve(ν, interpolation_u, interpolation_p) dbc = create_bc(dh) ## CellValues - cellvalues_u, cellvalues_p, facevalues_u = create_values(interpolation_u, interpolation_p) + cellvalues_u, cellvalues_p, facetvalues_u = create_values(interpolation_u, interpolation_p) ## Assembly and solve - K = create_sparsity_pattern(dh) - K, f = doassemble(cellvalues_u, cellvalues_p, facevalues_u, K, grid, dh, mp) + K = allocate_matrix(dh) + K, f = doassemble(cellvalues_u, cellvalues_p, facetvalues_u, K, grid, dh, mp) apply!(K, f, dbc) u = K \ f @@ -270,13 +270,14 @@ function solve(ν, interpolation_u, interpolation_p) ## Export the solution and the stress filename = "cook_" * (interpolation_u == Lagrange{RefTriangle, 1}()^2 ? "linear" : "quadratic") * "_linear" - vtk_grid(filename, dh) do vtkfile - vtk_point_data(vtkfile, dh, u) + + VTKGridFile(filename, grid) do vtk + write_solution(vtk, dh, u) for i in 1:3, j in 1:3 σij = [x[i, j] for x in σ] - vtk_cell_data(vtkfile, σij, "sigma_$(i)$(j)") + write_cell_data(vtk, σij, "sigma_$(i)$(j)") end - vtk_cell_data(vtkfile, σvM, "sigma von Mise") + write_cell_data(vtk, σvM, "sigma von Mises") end return u end diff --git a/docs/src/literate-tutorials/linear_shell.jl b/docs/src/literate-tutorials/linear_shell.jl index 391378e1d9..6ba45400ba 100644 --- a/docs/src/literate-tutorials/linear_shell.jl +++ b/docs/src/literate-tutorials/linear_shell.jl @@ -4,8 +4,8 @@ #- # ## Introduction # -# In this example we show how shell elements can be analyzed in Ferrite.jl. The shell implemented here comes from the book -# "The finite element method - Linear static and dynamic finite element analysis" by Hughes (1987), and a brief description of it is +# In this example we show how shell elements can be analyzed in Ferrite.jl. The shell implemented here comes from the book +# "The finite element method - Linear static and dynamic finite element analysis" by Hughes (1987), and a brief description of it is # given at the end of this tutorial. The first part of the tutorial explains how to set up the problem. # ## Setting up the problem @@ -15,7 +15,7 @@ using ForwardDiff function main() #wrap everything in a function... # First we generate a flat rectangular mesh. There is currently no built-in function for generating -# shell meshes in Ferrite, so we have to create our own simple mesh generator (see the +# shell meshes in Ferrite, so we have to create our own simple mesh generator (see the # function `generate_shell_grid` further down in this file). #+ nels = (10,10) @@ -23,8 +23,8 @@ size = (10.0, 10.0) grid = generate_shell_grid(nels, size) # Here we define the bi-linear interpolation used for the geometrical description of the shell. -# We also create two quadrature rules for the in-plane and out-of-plane directions. Note that we use -# under integration for the inplane integration, to avoid shear locking. +# We also create two quadrature rules for the in-plane and out-of-plane directions. Note that we use +# under integration for the inplane integration, to avoid shear locking. #+ ip = Lagrange{RefQuadrilateral,1}() qr_inplane = QuadratureRule{RefQuadrilateral}(1) @@ -38,23 +38,23 @@ add!(dh, :u, ip^3) add!(dh, :θ, ip^2) close!(dh) -# In order to apply our boundary conditions, we first need to create some edge- and vertex-sets. This -# is done with `addedgeset!` and `addvertexset!` (similar to `addfaceset!`) +# In order to apply our boundary conditions, we first need to create some facet- and vertex-sets. This +# is done with `addfacetset!` and `addvertexset!` #+ -addedgeset!(grid, "left", (x) -> x[1] ≈ 0.0) -addedgeset!(grid, "right", (x) -> x[1] ≈ size[1]) +addfacetset!(grid, "left", (x) -> x[1] ≈ 0.0) +addfacetset!(grid, "right", (x) -> x[1] ≈ size[1]) addvertexset!(grid, "corner", (x) -> x[1] ≈ 0.0 && x[2] ≈ 0.0 && x[3] ≈ 0.0) # Here we define the boundary conditions. On the left edge, we lock the displacements in the x- and z- directions, and all the rotations. #+ ch = ConstraintHandler(dh) -add!(ch, Dirichlet(:u, getedgeset(grid, "left"), (x, t) -> (0.0, 0.0), [1,3]) ) -add!(ch, Dirichlet(:θ, getedgeset(grid, "left"), (x, t) -> (0.0, 0.0), [1,2]) ) +add!(ch, Dirichlet(:u, getfacetset(grid, "left"), (x, t) -> (0.0, 0.0), [1,3]) ) +add!(ch, Dirichlet(:θ, getfacetset(grid, "left"), (x, t) -> (0.0, 0.0), [1,2]) ) # On the right edge, we also lock the displacements in the x- and z- directions, but apply a precribed rotation. #+ -add!(ch, Dirichlet(:u, getedgeset(grid, "right"), (x, t) -> (0.0, 0.0), [1,3]) ) -add!(ch, Dirichlet(:θ, getedgeset(grid, "right"), (x, t) -> (0.0, pi/10), [1,2]) ) +add!(ch, Dirichlet(:u, getfacetset(grid, "right"), (x, t) -> (0.0, 0.0), [1,3]) ) +add!(ch, Dirichlet(:θ, getfacetset(grid, "right"), (x, t) -> (0.0, pi/10), [1,2]) ) # In order to not get rigid body motion, we lock the y-displacement in one of the corners. #+ @@ -63,7 +63,7 @@ add!(ch, Dirichlet(:θ, getvertexset(grid, "corner"), (x, t) -> (0.0), [2]) ) close!(ch) update!(ch, 0.0) -# Next we define relevant data for the shell, such as shear correction factor and stiffness matrix for the material. +# Next we define relevant data for the shell, such as shear correction factor and stiffness matrix for the material. # In this linear shell, plane stress is assumed, ie $\\sigma_{zz} = 0$. Therefor, the stiffness matrix is 5x5 (opposed to the normal 6x6). #+ κ = 5/6 # Shear correction factor @@ -84,7 +84,7 @@ data = (thickness = 1.0, C = C); #Named tuple nnodes = getnbasefunctions(ip) ndofs_shell = ndofs_per_cell(dh) -K = create_sparsity_pattern(dh) +K = allocate_matrix(dh) f = zeros(Float64, ndofs(dh)) ke = zeros(ndofs_shell, ndofs_shell) @@ -99,7 +99,7 @@ for cell in CellIterator(grid) reinit!(cv, cell) celldofs!(celldofs, dh, cellid(cell)) getcoordinates!(cellcoords, grid, cellid(cell)) - + #Call the element routine integrate_shell!(ke, cv, qr_ooplane, cellcoords, data) @@ -113,14 +113,14 @@ a = K\f # Output results. #+ -vtk_grid("linear_shell", dh) do vtk - vtk_point_data(vtk, dh, a) +VTKGridFile("linear_shell", dh) do vtk + write_solution(vtk, dh, a) end end; #end main functions # Below is the function that creates the shell mesh. It simply generates a 2d-quadrature mesh, and appends -# a third coordinate (z-direction) to the node-positions. +# a third coordinate (z-direction) to the node-positions. function generate_shell_grid(nels, size) _grid = generate_grid(Quadrilateral, nels, Vec((0.0,0.0)), Vec(size)) nodes = [(n.x[1], n.x[2], 0.0) |> Vec{3} |> Node for n in _grid.nodes] @@ -139,7 +139,7 @@ end; #md # !!! note #md # This element might experience various locking phenomenas, and should only be seen as a proof of concept. -# ##### Fiber coordinate system +# ##### Fiber coordinate system # The element uses two coordinate systems. The first coordianate system, called the fiber system, is created for each # element node, and is used as a reference frame for the rotations. The function below implements an algorithm that return the # fiber directions, $\boldsymbol{e}^{f}_{a1}$, $\boldsymbol{e}^{f}_{a2}$ and $\boldsymbol{e}^{f}_{a3}$, at each node $a$. @@ -153,7 +153,7 @@ function fiber_coordsys(Ps::Vector{Vec{3,Float64}}) j = 1 if a[1] > a[3]; a[3] = a[1]; j = 2; end if a[2] > a[3]; j = 3; end - + e3 = P e2 = Tensors.cross(P, basevec(Vec{3}, j)) e2 /= norm(e2) @@ -167,7 +167,7 @@ function fiber_coordsys(Ps::Vector{Vec{3,Float64}}) end; -# ##### Lamina coordinate system +# ##### Lamina coordinate system # The second coordinate system is the so called Lamina Coordinate system. It is # created for each integration point, and is defined to be tangent to the # mid-surface. It is in this system that we enforce that plane stress assumption, @@ -205,7 +205,7 @@ end; # A material point in the shell is defined as # ```math # \boldsymbol x(\xi, \eta, \zeta) = \sum_{a=1}^{N_{\text{nodes}}} N_a(\xi, \eta) \boldsymbol{\bar{x}}_{a} + ζ \frac{h}{2} \boldsymbol{\bar{p}_a} -# ``` +# ``` # where $\boldsymbol{\bar{x}}_{a}$ are nodal positions on the mid-surface, and $\boldsymbol{\bar{p}_a}$ is an vector that defines the fiber direction # on the reference surface. $N_a$ arethe shape functions. # @@ -235,13 +235,13 @@ end; # ``` # The displacement field is calculated as: # ```math -# \boldsymbol u = \sum_{a=1}^{N_{\text{nodes}}} N_a \bar{\boldsymbol u}_{a} + +# \boldsymbol u = \sum_{a=1}^{N_{\text{nodes}}} N_a \bar{\boldsymbol u}_{a} + # N_a ζ\frac{h}{2}(\theta_{a2} \boldsymbol e^{f}_{a1} - \theta_{a1} \boldsymbol e^{f}_{a2}) # # ``` # The gradient of the displacement (in the lamina coordinate system), then becomes: # ```math -# \frac{\partial u_{i}}{\partial x_j} = \sum_{m=1}^3 q_{im} \sum_{a=1}^{N_{\text{nodes}}} \frac{\partial N_a}{\partial x_j} \bar{u}_{am} + +# \frac{\partial u_{i}}{\partial x_j} = \sum_{m=1}^3 q_{im} \sum_{a=1}^{N_{\text{nodes}}} \frac{\partial N_a}{\partial x_j} \bar{u}_{am} + # \frac{\partial(N_a ζ)}{\partial x_j} \frac{h}{2} (\theta_{a2} e^{f}_{am1} - \theta_{a1} e^{f}_{am2}) # ``` function strain(dofvec::Vector{T}, N, dNdx, ζ, dζdx, q, ef1, ef2, h) where T @@ -262,7 +262,7 @@ function strain(dofvec::Vector{T}, N, dNdx, ζ, dζdx, q, ef1, ef2, h) where T end; # ##### Main element routine -# Below is the main routine that calculates the stiffness matrix of the shell element. +# Below is the main routine that calculates the stiffness matrix of the shell element. # Since it is a so called degenerate shell element, the code is similar to that for an standard continuum element. shape_reference_gradient(cv::CellValues, q_point, i) = cv.fun_values.dNdξ[i, q_point] diff --git a/docs/src/literate-tutorials/ns_vs_diffeq.jl b/docs/src/literate-tutorials/ns_vs_diffeq.jl index f397d03812..8d3455943b 100644 --- a/docs/src/literate-tutorials/ns_vs_diffeq.jl +++ b/docs/src/literate-tutorials/ns_vs_diffeq.jl @@ -1,6 +1,13 @@ +# We check for a divergence free velocity field in the CI #src + if isdefined(Main, :is_ci) #hide + IS_CI = Main.is_ci #hide + else #hide + IS_CI = false #hide + end #hide + nothing #hide # # [Incompressible Navier-Stokes equations via DifferentialEquations.jl](@id tutorial-ins-ordinarydiffeq) # -# ![](https://user-images.githubusercontent.com/9196588/134514213-76d91d34-19ab-47c2-957e-16bb0c8669e1.gif) +# ![nsdiffeq](nsdiffeq.gif) # # # In this example we focus on a simple but visually appealing problem from @@ -11,6 +18,9 @@ # # ## Remarks on DifferentialEquations.jl # +# !!! note "Required Version" +# This example will only work with OrdinaryDiffEq@v6.80.1. or above +# # Many "time step solvers" of [DifferentialEquations.jl](https://github.com/SciML/DifferentialEquations.jl) assume that that the # problem is provided in mass matrix form. The incompressible Navier-Stokes # equations as stated above yield a DAE in this form after applying a spatial @@ -38,13 +48,9 @@ # where $v$ is the unknown velocity field, $p$ the unknown pressure field, # $\nu$ the dynamic viscosity and $\Delta$ the Laplacian. In the derivation we assumed # a constant density of 1 for the fluid and negligible coupling between the velocity components. -# Finally we see that the pressure term appears only in combination with the gradient -# operator, so for any solution $p$ the function $p + c$ is also an admissible solution, if -# we do not impose Dirichlet conditions on the pressure. To resolve this we introduce the -# implicit constraint that $ \int_\Omega p = 0 $. # -# Our setup is derived from [Turek's DFG benchmark](http://www.mathematik.tu-dortmund.de/~featflow/en/benchmarks/cfdbenchmarking/flow/dfg_benchmark1_re20.html). -# We model a channel with size $0.41 \times 2.2$ and a hole of radius $0.05$ centered at $(0.2, 0.2)$. +# Our setup is derived from [Turek's DFG benchmark](http://www.mathematik.tu-dortmund.de/~featflow/en/benchmarks/cfdbenchmarking/flow/dfg_benchmark2_re100.html). +# We model a channel with size $0.41 \times 1.1$ and a hole of radius $0.05$ centered at $(0.2, 0.2)$. # The left side has a parabolic inflow profile, which is ramped up over time, modeled as the time dependent # Dirichlet condition # ```math @@ -55,7 +61,7 @@ # 0 # \end{bmatrix} # ``` -# where $v_{in}(t) = \text{clamp}(t, 0.0, 1.0)$. With a dynamic viscosity of $\nu = 0.001$ +# where $v_{in}(t) = \text{clamp}(t, 0.0, 1.5)$. With a dynamic viscosity of $\nu = 0.001$ # this is enough to induce turbulence behind the cylinder which leads to vortex shedding. The top and bottom of our # channel have no-slip conditions, i.e. $v = [0,0]^{\textrm{T}}$, while the right boundary has the do-nothing boundary condition # $\nu \partial_{\textrm{n}} v - p n = 0$ to model outflow. With these boundary conditions we can choose the zero solution as a @@ -113,7 +119,7 @@ # The full program, without comments, can be found in the next [section](@ref ns_vs_diffeq-plain-program). # # First we load Ferrite and some other packages we need -using Ferrite, SparseArrays, BlockArrays, LinearAlgebra, UnPack, LinearSolve +using Ferrite, SparseArrays, BlockArrays, LinearAlgebra, UnPack, LinearSolve, WriteVTK # Since we do not need the complete DifferentialEquations suite, we just load the required ODE infrastructure, which can also handle # DAEs in mass matrix form. using OrdinaryDiffEq @@ -121,43 +127,62 @@ using OrdinaryDiffEq # We start off by defining our only material parameter. ν = 1.0/1000.0; #dynamic viscosity -# Next a fine 2D rectangular grid has to be generated. We leave the cell size parametric for flexibility when -# playing around with the code. Note that the mesh is pretty fine, leading to a high memory consumption when +# Next a rectangular grid with a cylinder in it has to be generated. +# We use `Gmsh` for the creation of the mesh and `FerriteGmsh` to translate it to a `Ferrite.Grid`. +# Note that the mesh is pretty fine, leading to a high memory consumption when # feeding the equation system to direct solvers. -dim = 2 -cell_scale_factor = 2.0 -x_cells = round(Int, cell_scale_factor*220) -y_cells = round(Int, cell_scale_factor*41) -# CI chokes if the grid is too fine. :) #src -x_cells = round(Int, 55/3) #hide -y_cells = round(Int, 41/3) #hide -grid = generate_grid(Quadrilateral, (x_cells, y_cells), Vec{2}((0.0, 0.0)), Vec{2}((2.2, 0.41))); - -# Next we carve a hole $B_{0.05}(0.2,0.2)$ in the mesh by deleting the cells and update the boundary face sets. -# This code will be replaced once a proper mesh interface is available. -cell_indices = filter(ci->norm(mean(map(i->grid.nodes[i].x-[0.2,0.2], Ferrite.vertices(grid.cells[ci]))))>0.05, 1:length(grid.cells)) -hole_cell_indices = filter(ci->norm(mean(map(i->grid.nodes[i].x-[0.2,0.2], Ferrite.vertices(grid.cells[ci]))))<=0.05, 1:length(grid.cells)); -hole_face_ring = Set{FaceIndex}() -for hci ∈ hole_cell_indices - push!(hole_face_ring, FaceIndex((hci+1, 4))) - push!(hole_face_ring, FaceIndex((hci-1, 2))) - push!(hole_face_ring, FaceIndex((hci-x_cells, 3))) - push!(hole_face_ring, FaceIndex((hci+x_cells, 1))) -end -grid.facesets["hole"] = Set(filter(x->x.idx[1] ∉ hole_cell_indices, collect(hole_face_ring))); -cell_indices_map = map(ci->norm(mean(map(i->grid.nodes[i].x-[0.2,0.2], Ferrite.vertices(grid.cells[ci]))))>0.05 ? indexin([ci], cell_indices)[1] : 0, 1:length(grid.cells)) -grid.cells = grid.cells[cell_indices] -for facesetname in keys(grid.facesets) - grid.facesets[facesetname] = Set(map(fi -> FaceIndex( cell_indices_map[fi.idx[1]] ,fi.idx[2]), collect(grid.facesets[facesetname]))) -end; - -# We test against full development of the flow - so regenerate the grid #src -grid = generate_grid(Quadrilateral, (x_cells, y_cells), Vec{2}((0.0, 0.0)), Vec{2}((0.55, 0.41))); #hide - -# ### Function Space -# To ensure stability we utilize the Taylor-Hood element pair Q2-Q1. -# We have to utilize the same quadrature rule for the pressure as for the velocity, because in the weak form the -# linear pressure term is tested against a quadratic function. +using FerriteGmsh +using FerriteGmsh: Gmsh +Gmsh.initialize() +gmsh.option.set_number("General.Verbosity", 2) +dim = 2; +# We specify first the rectangle, the cylinder, the surface spanned by the cylinder +# and the boolean difference of rectangle and cylinder. +if !IS_CI #hide +rect_tag = gmsh.model.occ.add_rectangle(0, 0, 0, 1.1, 0.41) +circle_tag = gmsh.model.occ.add_circle(0.2, 0.2, 0, 0.05) +circle_curve_tag = gmsh.model.occ.add_curve_loop([circle_tag]) +circle_surf_tag = gmsh.model.occ.add_plane_surface([circle_curve_tag]) +gmsh.model.occ.cut([(dim,rect_tag)],[(dim,circle_surf_tag)]); +else #hide +rect_tag = gmsh.model.occ.add_rectangle(0, 0, 0, 0.55, 0.41); #hide +end #hide +nothing #hide +# Now, the geometrical entities need to be synchronized in order to be available outside +# of `gmsh.model.occ` +gmsh.model.occ.synchronize() +# In the next lines, we add the physical groups needed to define boundary conditions. +if !IS_CI #hide +bottomtag = gmsh.model.model.add_physical_group(dim-1,[6],-1,"bottom") +lefttag = gmsh.model.model.add_physical_group(dim-1,[7],-1,"left") +righttag = gmsh.model.model.add_physical_group(dim-1,[8],-1,"right") +toptag = gmsh.model.model.add_physical_group(dim-1,[9],-1,"top") +holetag = gmsh.model.model.add_physical_group(dim-1,[5],-1,"hole"); +else #hide +gmsh.model.model.add_physical_group(dim-1,[4],7,"left") #hide +gmsh.model.model.add_physical_group(dim-1,[3],8,"top") #hide +gmsh.model.model.add_physical_group(dim-1,[2],9,"right") #hide +gmsh.model.model.add_physical_group(dim-1,[1],10,"bottom"); #hide +end #hide +nothing #hide +# Since we want a quad mesh, we specify the meshing algorithm to the quasi structured quad one. +# For a complete list, [see the Gmsh docs](https://gmsh.info/doc/texinfo/gmsh.html#Mesh-options-list). +gmsh.option.setNumber("Mesh.Algorithm",11) +gmsh.option.setNumber("Mesh.MeshSizeFromCurvature",20) +gmsh.option.setNumber("Mesh.MeshSizeMax",0.05) +if IS_CI #hide +gmsh.option.setNumber("Mesh.MeshSizeFromCurvature",20) #hide +gmsh.option.setNumber("Mesh.MeshSizeMax",0.15) #hide +end #hide +# In the next step, the mesh is generated and finally translated. +gmsh.model.mesh.generate(dim) +grid = togrid() +Gmsh.finalize(); + +# ### Function Space +# To ensure stability we utilize the Taylor-Hood element pair Q2-Q1. +# We have to utilize the same quadrature rule for the pressure as for the velocity, because in the weak form the +# linear pressure term is tested against a quadratic function. ip_v = Lagrange{RefQuadrilateral, 2}()^dim qr = QuadratureRule{RefQuadrilateral}(4) cellvalues_v = CellValues(qr, ip_v); @@ -176,23 +201,28 @@ close!(dh); # fluid on this portion of the boundary is fixed to be zero. ch = ConstraintHandler(dh); -nosplip_face_names = ["top", "bottom", "hole"]; +nosplip_facet_names = ["top", "bottom", "hole"]; # No hole for the test present #src -nosplip_face_names = ["top", "bottom"] #hide -∂Ω_noslip = union(getfaceset.((grid, ), nosplip_face_names)...); -noslip_bc = Dirichlet(:v, ∂Ω_noslip, (x, t) -> [0,0], [1,2]) +if IS_CI #hide +nosplip_facet_names = ["top", "bottom"] #hide +end #hide +∂Ω_noslip = union(getfacetset.((grid, ), nosplip_facet_names)...); +noslip_bc = Dirichlet(:v, ∂Ω_noslip, (x, t) -> Vec((0.0,0.0)), [1,2]) add!(ch, noslip_bc); -# The left boundary has a parabolic inflow with peak velocity of 1.0. This +# The left boundary has a parabolic inflow with peak velocity of 1.5. This # ensures that for the given geometry the Reynolds number is 100, which # is already enough to obtain some simple vortex streets. By increasing the # velocity further we can obtain stronger vortices - which may need additional # refinement of the grid. -∂Ω_inflow = getfaceset(grid, "left"); +∂Ω_inflow = getfacetset(grid, "left"); + +# !!! note +# The kink in the velocity profile will lead to a discontinuity in the pressure at $t=1$. +# This needs to be considered in the DiffEq `init` by providing the keyword argument `d_discontinuities=[1.0]`. +vᵢₙ(t) = min(t*1.5, 1.5) #inflow velocity -vᵢₙ(t) = clamp(t, 0.0, 1.0)*1.0 #inflow velocity -vᵢₙ(t) = clamp(t, 0.0, 1.0)*0.3 #hide -parabolic_inflow_profile((x,y),t) = [4*vᵢₙ(t)*y*(0.41-y)/0.41^2,0] +parabolic_inflow_profile(x,t) = Vec((4*vᵢₙ(t)*x[2]*(0.41-x[2])/0.41^2, 0.0)) inflow_bc = Dirichlet(:v, ∂Ω_inflow, parabolic_inflow_profile, [1,2]) add!(ch, inflow_bc); @@ -200,7 +230,7 @@ add!(ch, inflow_bc); # cylinder when the weak form has been derived by setting the boundary integral # to zero. It is also called the do-nothing condition. Other outflow conditions # are also possible. -∂Ω_free = getfaceset(grid, "right"); +∂Ω_free = getfacetset(grid, "right"); close!(ch) update!(ch, 0.0); @@ -217,7 +247,7 @@ function assemble_mass_matrix(cellvalues_v::CellValues, cellvalues_p::CellValues n_basefuncs_p = getnbasefunctions(cellvalues_p) n_basefuncs = n_basefuncs_v + n_basefuncs_p v▄, p▄ = 1, 2 - Mₑ = PseudoBlockArray(zeros(n_basefuncs, n_basefuncs), [n_basefuncs_v, n_basefuncs_p], [n_basefuncs_v, n_basefuncs_p]) + Mₑ = BlockedArray(zeros(n_basefuncs, n_basefuncs), [n_basefuncs_v, n_basefuncs_p], [n_basefuncs_v, n_basefuncs_p]) ## It follows the assembly loop as explained in the basic tutorials. mass_assembler = start_assemble(M) @@ -228,6 +258,7 @@ function assemble_mass_matrix(cellvalues_v::CellValues, cellvalues_p::CellValues for q_point in 1:getnquadpoints(cellvalues_v) dΩ = getdetJdV(cellvalues_v, q_point) ## Remember that we assemble a vector mass term, hence the dot product. + ## There is only one time derivative on the left hand side, so only one mass block is non-zero. for i in 1:n_basefuncs_v φᵢ = shape_value(cellvalues_v, q_point, i) for j in 1:n_basefuncs_v @@ -242,7 +273,7 @@ function assemble_mass_matrix(cellvalues_v::CellValues, cellvalues_p::CellValues return M end; -# Next we discuss the assembly of the Stokes matrix. +# Next we discuss the assembly of the Stokes matrix appearing on the right hand side. # Remember that we use the same function spaces for trial and test, hence the # matrix has the following block form # ```math @@ -260,7 +291,7 @@ function assemble_stokes_matrix(cellvalues_v::CellValues, cellvalues_p::CellValu n_basefuncs_p = getnbasefunctions(cellvalues_p) n_basefuncs = n_basefuncs_v + n_basefuncs_p v▄, p▄ = 1, 2 - Kₑ = PseudoBlockArray(zeros(n_basefuncs, n_basefuncs), [n_basefuncs_v, n_basefuncs_p], [n_basefuncs_v, n_basefuncs_p]) + Kₑ = BlockedArray(zeros(n_basefuncs, n_basefuncs), [n_basefuncs_v, n_basefuncs_p], [n_basefuncs_v, n_basefuncs_p]) ## Assembly loop stiffness_assembler = start_assemble(K) @@ -303,14 +334,22 @@ end; # ### Solution of the semi-discretized system via DifferentialEquations.jl # First we assemble the linear portions for efficiency. These matrices are # assumed to be constant over time. -T = 10.0 -Δt₀ = 0.01 +# !!! note +# To obtain the vortex street a small time step is important to resolve +# the small oscillation forming. The mesh size becomes important to +# "only" resolve the smaller vertices forming, but less important for +# the initial formation. +T = 6.0 +Δt₀ = 0.001 +if IS_CI #hide + Δt₀ = 0.1 #hide +end #hide Δt_save = 0.1 -M = create_sparsity_pattern(dh); +M = allocate_matrix(dh); M = assemble_mass_matrix(cellvalues_v, cellvalues_p, M, dh); -K = create_sparsity_pattern(dh); +K = allocate_matrix(dh); K = assemble_stokes_matrix(cellvalues_v, cellvalues_p, ν, K, dh); # These are our initial conditions. We start from the zero solution, because it @@ -330,45 +369,71 @@ jac_sparsity = sparse(K); # To apply the nonlinear portion of the Navier-Stokes problem we simply hand # over the dof handler and cell values to the right-hand-side (RHS) as a parameter. -# Further the pre-assembled linear part (which is time independent) is -# passed to save some runtime. To apply the time-dependent Dirichlet BCs, we -# also hand over the constraint handler. +# Furthermore the pre-assembled linear part, our Stokes opeartor (which is time independent) +# is passed to save some additional runtime. To apply the time-dependent Dirichlet BCs, we +# also need to hand over the constraint handler. # The basic idea to apply the Dirichlet BCs consistently is that we copy the # current solution `u`, apply the Dirichlet BCs on the copy, evaluate the -# discretized RHS of the Navier-Stokes equations with this vector -# and finally set the RHS to zero on every constraint. This way we obtain a -# correct solution for all dofs which are not Dirichlet constrained. These -# dofs are then corrected in a post-processing step, when evaluating the -# solution vector at specific time points. -# It should be finally noted that this **trick does not work** out of the box -# **for constraining algebraic portion** of the DAE, i.e. if we would like to -# put a Dirichlet BC on pressure dofs. As a workaround we have to set $f_{\textrm{i}} = 1$ -# instead of $f_{\textrm{i}} = 0$, because otherwise the equation system gets singular. -# This is obvious when we remember that our mass matrix is zero for these -# dofs, such that we obtain the equation $0 \cdot \mathrm{d}_t p_{\textrm{i}} = 1 \cdot p_{\textrm{i}}$, which -# now has a unique solution. +# discretized RHS of the Navier-Stokes equations with this vector. +# Furthermore we pass down the Jacobian assembly manually. For the Jacobian we eliminate all +# rows and columns associated with constrained dofs. Also note that we eliminate the mass +# matrix beforehand in a similar fashion. This decouples the time evolution of the constrained +# dofs from the true unknowns. The correct solution is enforced by utilizing step and +# stage limiters. The correct norms are computed by passing down a custom norm which simply +# ignores all constrained dofs. +# +# !!! note +# An alternative strategy is to hook into the nonlinear and linear solvers and enforce +# the solution therein. However, this is not possible at the time of writing this tutorial. +# +apply!(M, ch) + struct RHSparams K::SparseMatrixCSC ch::ConstraintHandler dh::DofHandler cellvalues_v::CellValues + u::Vector +end +p = RHSparams(K, ch, dh, cellvalues_v, copy(u₀)) + +function ferrite_limiter!(u, _, p, t) + update!(p.ch, t) + apply!(u, p.ch) end -p = RHSparams(K, ch, dh, cellvalues_v) -function navierstokes!(du,u_uc,p,t) +function navierstokes_rhs_element!(dvₑ, vₑ, cellvalues_v) + n_basefuncs = getnbasefunctions(cellvalues_v) + for q_point in 1:getnquadpoints(cellvalues_v) + dΩ = getdetJdV(cellvalues_v, q_point) + ∇v = function_gradient(cellvalues_v, q_point, vₑ) + v = function_value(cellvalues_v, q_point, vₑ) + for j in 1:n_basefuncs + φⱼ = shape_value(cellvalues_v, q_point, j) + # Note that in Tensors.jl the definition $\textrm{grad} v = \nabla v$ holds. + # With this information it can be quickly shown in index notation that + # ```math + # [(v \cdot \nabla) v]_{\textrm{i}} = v_{\textrm{j}} (\partial_{\textrm{j}} v_{\textrm{i}}) = [v (\nabla v)^{\textrm{T}}]_{\textrm{i}} + # ``` + # where we should pay attentation to the transpose of the gradient. + #+ + dvₑ[j] -= v ⋅ ∇v' ⋅ φⱼ * dΩ + end + end +end + +function navierstokes!(du,u_uc,p::RHSparams,t) # Unpack the struct to save some allocations. #+ - @unpack K,ch,dh,cellvalues_v = p + @unpack K,ch,dh,cellvalues_v,u = p # We start by applying the time-dependent Dirichlet BCs. Note that we are - # not allowed to mutate `u_uc`! We also can not pre-allocate this variable - # if we want to use AD to derive the Jacobian matrix, which appears in the - # utilized implicit Euler. If we hand over the Jacobian analytically to - # the solver, or when utilizing a method which does not require building the - # Jacobian, then we could also hand over a buffer for `u` in our RHSparams - # structure to save the allocations made here. + # not allowed to mutate `u_uc`! Furthermore not that we also can not pre- + # allocate a buffer for this variable variable if we want to use AD to derive + # the Jacobian matrix, which appears in stiff solvers. + # Therefore, for efficiency reasons, we simply pass down the jacobian analytically. #+ - u = copy(u_uc) + u .= u_uc update!(ch, t) apply!(u, ch) @@ -378,116 +443,190 @@ function navierstokes!(du,u_uc,p,t) mul!(du, K, u) # du .= K * u ## nonlinear contribution + v_range = dof_range(dh, :v) n_basefuncs = getnbasefunctions(cellvalues_v) + vₑ = zeros(n_basefuncs) + duₑ = zeros(n_basefuncs) for cell in CellIterator(dh) Ferrite.reinit!(cellvalues_v, cell) - all_celldofs = celldofs(cell) - v_celldofs = all_celldofs[dof_range(dh, :v)] - v_cell = u[v_celldofs] - for q_point in 1:getnquadpoints(cellvalues_v) - dΩ = getdetJdV(cellvalues_v, q_point) - ∇v = function_gradient(cellvalues_v, q_point, v_cell) - v = function_value(cellvalues_v, q_point, v_cell) - for j in 1:n_basefuncs - φⱼ = shape_value(cellvalues_v, q_point, j) - # Note that in Tensors.jl the definition $\textrm{grad} v = \nabla v$ holds. - # With this information it can be quickly shown in index notation that - # ```math - # [(v \cdot \nabla) v]_{\textrm{i}} = v_{\textrm{j}} (\partial_{\textrm{j}} v_{\textrm{i}}) = [v (\nabla v)^{\textrm{T}}]_{\textrm{i}} - # ``` - # where we should pay attentation to the transpose of the gradient. - #+ - du[v_celldofs[j]] -= v ⋅ ∇v' ⋅ φⱼ * dΩ + v_celldofs = @view celldofs(cell)[v_range] + vₑ .= @views u[v_celldofs] + fill!(duₑ, 0.0) + navierstokes_rhs_element!(duₑ, vₑ, cellvalues_v) + assemble!(du, v_celldofs, duₑ) + end +end; + +function navierstokes_jac_element!(Jₑ, vₑ, cellvalues_v) + n_basefuncs = getnbasefunctions(cellvalues_v) + for q_point in 1:getnquadpoints(cellvalues_v) + dΩ = getdetJdV(cellvalues_v, q_point) + ∇v = function_gradient(cellvalues_v, q_point, vₑ) + v = function_value(cellvalues_v, q_point, vₑ) + for j in 1:n_basefuncs + φⱼ = shape_value(cellvalues_v, q_point, j) + # Note that in Tensors.jl the definition $\textrm{grad} v = \nabla v$ holds. + # With this information it can be quickly shown in index notation that + # ```math + # [(v \cdot \nabla) v]_{\textrm{i}} = v_{\textrm{j}} (\partial_{\textrm{j}} v_{\textrm{i}}) = [v (\nabla v)^{\textrm{T}}]_{\textrm{i}} + # ``` + # where we should pay attentation to the transpose of the gradient. + #+ + for i in 1:n_basefuncs + φᵢ = shape_value(cellvalues_v, q_point, i) + ∇φᵢ = shape_gradient(cellvalues_v, q_point, i) + Jₑ[j, i] -= (φᵢ ⋅ ∇v' + v ⋅ ∇φᵢ') ⋅ φⱼ * dΩ end end end +end - # For now we have to ignore the evolution of the Dirichlet BCs. - # The DBC dofs in the solution vector will be corrected in a post-processing step. +function navierstokes_jac!(J,u_uc,p,t) + # Unpack the struct to save some allocations. + #+ + @unpack K, ch, dh, cellvalues_v, u = p + + # We start by applying the time-dependent Dirichlet BCs. Note that we are + # not allowed to mutate `u_uc`, so we use our buffer again. + #+ + u .= u_uc + update!(ch, t) + apply!(u, ch) + + # Now we apply the Jacobian of the Navier-Stokes equations. + #+ + ## Linear contribution (Stokes operator) + ## Here we assume that J has exactly the same structure as K by construction + nonzeros(J) .= nonzeros(K) + + assembler = start_assemble(J; fillzero=false) + + ## Assemble variation of the nonlinear term + n_basefuncs = getnbasefunctions(cellvalues_v) + Jₑ = zeros(n_basefuncs, n_basefuncs) + vₑ = zeros(n_basefuncs) + v_range = dof_range(dh, :v) + for cell in CellIterator(dh) + Ferrite.reinit!(cellvalues_v, cell) + v_celldofs = @view celldofs(cell)[v_range] + + vₑ .= @views u[v_celldofs] + fill!(Jₑ, 0.0) + navierstokes_jac_element!(Jₑ, vₑ, cellvalues_v) + assemble!(assembler, v_celldofs, Jₑ) + end + + # Finally we eliminate the constrained dofs from the Jacobian to + # decouple them in the nonlinear solver from the remaining system. #+ - apply_zero!(du, ch) + apply!(J, ch) end; + # Finally, together with our pre-assembled mass matrix, we are now able to # define our problem in mass matrix form. -rhs = ODEFunction(navierstokes!, mass_matrix=M; jac_prototype=jac_sparsity) +rhs = ODEFunction(navierstokes!, mass_matrix=M; jac=navierstokes_jac!, jac_prototype=jac_sparsity) problem = ODEProblem(rhs, u₀, (0.0,T), p); +# All norms must not depend on constrained dofs. A problem with the presented implementation +# is that we are currently unable to strictly enforce constraint everywhere in the internal +# time integration process of [DifferentialEquations.jl](https://github.com/SciML/DifferentialEquations.jl), +# hence the values might differ, resulting in worse error estimates. +# We try to resolve this issue in the future. Volunteers are also welcome to take a look into this! +struct FreeDofErrorNorm + ch::ConstraintHandler +end +(fe_norm::FreeDofErrorNorm)(u::Union{AbstractFloat, Complex}, t) = DiffEqBase.ODE_DEFAULT_NORM(u, t) +(fe_norm::FreeDofErrorNorm)(u::AbstractArray, t) = DiffEqBase.ODE_DEFAULT_NORM(u[fe_norm.ch.free_dofs], t) + # Now we can put everything together by specifying how to solve the problem. -# We want to use the adaptive implicit Euler method with our custom linear -# solver, which helps in the enforcement of the Dirichlet BCs. Further we +# We want to use an adaptive variant of the implicit Euler method. Further we # enable the progress bar with the `progress` and `progress_steps` arguments. # Finally we have to communicate the time step length and initialization # algorithm. Since we start with a valid initial state we do not use one of # DifferentialEquations.jl initialization algorithms. -# NOTE: At the time of writing this [no Hessenberg index 2 initialization is implemented](https://github.com/SciML/OrdinaryDiffEq.jl/issues/1019). +# !!! note "DAE initialization" +# At the time of writing this [no Hessenberg index 2 initialization is implemented](https://github.com/SciML/OrdinaryDiffEq.jl/issues/1019). # # To visualize the result we export the grid and our fields # to VTK-files, which can be viewed in [ParaView](https://www.paraview.org/) # by utilizing the corresponding pvd file. -timestepper = ImplicitEuler(linsolve = UMFPACKFactorization(reuse_symbolic=false)) +timestepper = Rodas5P(autodiff=false, step_limiter! = ferrite_limiter!); +# timestepper = ImplicitEuler(nlsolve=NonlinearSolveAlg(OrdinaryDiffEq.NonlinearSolve.NewtonRaphson(autodiff=OrdinaryDiffEq.AutoFiniteDiff()); max_iter=50), step_limiter! = ferrite_limiter!) #src +#NOTE! This is left for future reference #src +# function algebraicmultigrid(W,du,u,p,t,newW,Plprev,Prprev,solverdata) #src +# if newW === nothing || newW #src +# Pl = aspreconditioner(ruge_stuben(convert(AbstractMatrix,W))) #src +# else #src +# Pl = Plprev #src +# end #src +# Pl,nothing #src +# end #src +# timestepper = ImplicitEuler(linsolve = IterativeSolversJL_GMRES(; abstol=1e-8, reltol=1e-6), precs=algebraicmultigrid, concrete_jac=true) #src + +# !!! info "Debugging convergence issues" +# We can obtain some debug information from OrdinaryDiffEq by wrapping the following section into a [debug logger](https://docs.julialang.org/en/v1/stdlib/Logging/#Example:-Enable-debug-level-messages). integrator = init( - problem, timestepper, initializealg=NoInit(), dt=Δt₀, - adaptive=true, abstol=1e-3, reltol=1e-3, + problem, timestepper; initializealg=NoInit(), dt=Δt₀, + adaptive=true, abstol=1e-4, reltol=1e-5, progress=true, progress_steps=1, - saveat=Δt_save); + verbose=true, internalnorm=FreeDofErrorNorm(ch), d_discontinuities=[1.0] +); -pvd = paraview_collection("vortex-street.pvd"); -integrator = TimeChoiceIterator(integrator, 0.0:Δt_save:T) -for (u_uc,t) in integrator - # We ignored the Dirichlet constraints in the solution vector up to now, - # so we have to bring them back now. - #+ - update!(ch, t) - u = copy(u_uc) - apply!(u, ch) - vtk_grid("vortex-street-$t.vtu", dh) do vtk - vtk_point_data(vtk,dh,u) - vtk_save(vtk) + +# !!! note "Export of solution" +# Exporting interpolated solutions of problems containing mass matrices is currently broken. +# Thus, the `intervals` iterator is used. Note that `solve` holds all solutions in the memory. +pvd = paraview_collection("vortex-street") +for (step, (u,t)) in enumerate(intervals(integrator)) + VTKGridFile("vortex-street-$step", dh) do vtk + write_solution(vtk, dh, u) pvd[t] = vtk end end -vtk_save(pvd); - -# Test the result for full proper development of the flow #src -using Test #hide -function compute_divergence(dh, u, cellvalues_v) #hide - divv = 0.0 #hide - for cell in CellIterator(dh) #hide - Ferrite.reinit!(cellvalues_v, cell) #hide - for q_point in 1:getnquadpoints(cellvalues_v) #hide - dΩ = getdetJdV(cellvalues_v, q_point) #hide - #hide - all_celldofs = celldofs(cell) #hide - v_celldofs = all_celldofs[dof_range(dh, :v)] #hide - v_cell = u[v_celldofs] #hide - #hide - divv += function_divergence(cellvalues_v, q_point, v_cell) * dΩ #hide - end #hide - end #hide - return divv #hide -end #hide -@testset "INS OrdinaryDiffEq" begin #hide - u = copy(integrator.integrator.u) #hide - apply!(u, ch) #hide - Δdivv = abs(compute_divergence(dh, u, cellvalues_v)) #hide - @test isapprox(Δdivv, 0.0, atol=1e-12) #hide - #hide - Δv = 0.0 #hide - for cell in CellIterator(dh) #hide - Ferrite.reinit!(cellvalues_v, cell) #hide - all_celldofs = celldofs(cell) #hide - v_celldofs = all_celldofs[dof_range(dh, :v)] #hide - v_cell = u[v_celldofs] #hide - coords = getcoordinates(cell) #hide - for q_point in 1:getnquadpoints(cellvalues_v) #hide - dΩ = getdetJdV(cellvalues_v, q_point) #hide - coords_qp = spatial_coordinate(cellvalues_v, q_point, coords) #hide - v = function_value(cellvalues_v, q_point, v_cell) #hide - Δv += norm(v - parabolic_inflow_profile(coords_qp, T))^2*dΩ #hide - end #hide - end #hide - @test isapprox(sqrt(Δv), 0.0, atol=1e-3) #hide -end; #hide +close(pvd); + + +using Test #hide +if IS_CI #hide + function compute_divergence(dh, u, cellvalues_v) #hide + divv = 0.0 #hide + for cell in CellIterator(dh) #hide + Ferrite.reinit!(cellvalues_v, cell) #hide + for q_point in 1:getnquadpoints(cellvalues_v) #hide + dΩ = getdetJdV(cellvalues_v, q_point) #hide + #hide + all_celldofs = celldofs(cell) #hide + v_celldofs = all_celldofs[dof_range(dh, :v)] #hide + v_cell = u[v_celldofs] #hide + #hide + divv += function_divergence(cellvalues_v, q_point, v_cell) * dΩ #hide + end #hide + end #hide + return divv #hide + end #hide + let #hide + u = copy(integrator.u) #hide + Δdivv = abs(compute_divergence(dh, u, cellvalues_v)) #hide + @test isapprox(Δdivv, 0.0, atol=1e-12) #hide + #hide + Δv = 0.0 #hide + for cell in CellIterator(dh) #hide + Ferrite.reinit!(cellvalues_v, cell) #hide + all_celldofs = celldofs(cell) #hide + v_celldofs = all_celldofs[dof_range(dh, :v)] #hide + v_cell = u[v_celldofs] #hide + coords = getcoordinates(cell) #hide + for q_point in 1:getnquadpoints(cellvalues_v) #hide + dΩ = getdetJdV(cellvalues_v, q_point) #hide + coords_qp = spatial_coordinate(cellvalues_v, q_point, coords) #hide + v = function_value(cellvalues_v, q_point, v_cell) #hide + Δv += norm(v - parabolic_inflow_profile(coords_qp, T))^2*dΩ #hide + end #hide + end #hide + @test isapprox(sqrt(Δv), 0.0, atol=1e-3) #hide + end; #hide + nothing #hide +end #hide #md # ## [Plain program](@id ns_vs_diffeq-plain-program) #md # diff --git a/docs/src/literate-tutorials/plasticity.jl b/docs/src/literate-tutorials/plasticity.jl index c1ec97781a..b8240f32df 100644 --- a/docs/src/literate-tutorials/plasticity.jl +++ b/docs/src/literate-tutorials/plasticity.jl @@ -9,7 +9,7 @@ #- #md # !!! tip #md # This example is also available as a Jupyter notebook: -#md # [`plasticity.ipynb`](@__NBVIEWER_ROOT_URL__/examples/plasticity.ipynb). +#md # [`plasticity.ipynb`](@__NBVIEWER_ROOT_URL__/tutorials/plasticity.ipynb). #- # # ## Introduction @@ -139,13 +139,13 @@ end function create_values(interpolation) ## setup quadrature rules qr = QuadratureRule{RefTetrahedron}(2) - face_qr = FaceQuadratureRule{RefTetrahedron}(3) + facet_qr = FacetQuadratureRule{RefTetrahedron}(3) - ## cell and facevalues for u + ## cell and facetvalues for u cellvalues_u = CellValues(qr, interpolation) - facevalues_u = FaceValues(face_qr, interpolation) + facetvalues_u = FacetValues(facet_qr, interpolation) - return cellvalues_u, facevalues_u + return cellvalues_u, facetvalues_u end; # ### Add degrees of freedom @@ -161,7 +161,7 @@ function create_bc(dh, grid) dbcs = ConstraintHandler(dh) ## Clamped on the left side dofs = [1, 2, 3] - dbc = Dirichlet(:u, getfaceset(grid, "left"), (x,t) -> [0.0, 0.0, 0.0], dofs) + dbc = Dirichlet(:u, getfacetset(grid, "left"), (x,t) -> [0.0, 0.0, 0.0], dofs) add!(dbcs, dbc) close!(dbcs) return dbcs @@ -229,17 +229,17 @@ function symmetrize_lower!(K) end end; -function doassemble_neumann!(r, dh, faceset, facevalues, t) - n_basefuncs = getnbasefunctions(facevalues) +function doassemble_neumann!(r, dh, faceset, facetvalues, t) + n_basefuncs = getnbasefunctions(facetvalues) re = zeros(n_basefuncs) # element residual vector - for fc in FaceIterator(dh, faceset) + for fc in FacetIterator(dh, faceset) ## Add traction as a negative contribution to the element residual `re`: - reinit!(facevalues, fc) + reinit!(facetvalues, fc) fill!(re, 0) - for q_point in 1:getnquadpoints(facevalues) - dΓ = getdetJdV(facevalues, q_point) + for q_point in 1:getnquadpoints(facetvalues) + dΓ = getdetJdV(facetvalues, q_point) for i in 1:n_basefuncs - δu = shape_value(facevalues, q_point, i) + δu = shape_value(facetvalues, q_point, i) re[i] -= (δu ⋅ t) * dΓ end end @@ -275,14 +275,14 @@ function solve() dh = create_dofhandler(grid, interpolation) # JuaFEM helper function dbcs = create_bc(dh, grid) # create Dirichlet boundary-conditions - cellvalues, facevalues = create_values(interpolation) + cellvalues, facetvalues = create_values(interpolation) ## Pre-allocate solution vectors, etc. n_dofs = ndofs(dh) # total number of dofs u = zeros(n_dofs) # solution vector Δu = zeros(n_dofs) # displacement correction r = zeros(n_dofs) # residual - K = create_sparsity_pattern(dh); # tangent stiffness matrix + K = allocate_matrix(dh); # tangent stiffness matrix ## Create material states. One array for each cell, where each element is an array of material- ## states - one for each integration point @@ -311,7 +311,7 @@ function solve() ## Tangent and residual contribution from the cells (volume integral) doassemble!(K, r, cellvalues, dh, material, u, states, states_old); ## Residual contribution from the Neumann boundary (surface integral) - doassemble_neumann!(r, dh, getfaceset(grid, "right"), facevalues, traction) + doassemble_neumann!(r, dh, getfacetset(grid, "right"), facetvalues, traction) norm_r = norm(r[Ferrite.free_dofs(dbcs)]) print("Iteration: $newton_itr \tresidual: $(@sprintf("%.8f", norm_r))\n") @@ -344,10 +344,10 @@ function solve() mises_values[el] /= length(cell_states) # average von Mises stress κ_values[el] /= length(cell_states) # average drag stress end - vtk_grid("plasticity", dh) do vtkfile - vtk_point_data(vtkfile, dh, u) # displacement field - vtk_cell_data(vtkfile, mises_values, "von Mises [Pa]") - vtk_cell_data(vtkfile, κ_values, "Drag stress [Pa]") + VTKGridFile("plasticity", dh) do vtk + write_solution(vtk, dh, u) # displacement field + write_cell_data(vtk, mises_values, "von Mises [Pa]") + write_cell_data(vtk, κ_values, "Drag stress [Pa]") end return u_max, traction_magnitude diff --git a/docs/src/literate-tutorials/porous_media.jl b/docs/src/literate-tutorials/porous_media.jl index ef2ee7e6ce..126222c199 100644 --- a/docs/src/literate-tutorials/porous_media.jl +++ b/docs/src/literate-tutorials/porous_media.jl @@ -1,12 +1,12 @@ -# # Porous media +# # Porous media # Porous media is a two-phase material, consisting of solid parts and a liquid occupying -# the pores inbetween. -# Using the porous media theory, we can model such a material without explicitly -# resolving the microstructure, but by considering the interactions between the -# solid and liquid. In this example, we will additionally consider larger linear -# elastic solid aggregates that are impermeable. Hence, there is no liquids in -# these particles and the only unknown variable is the displacement field `:u`. +# the pores inbetween. +# Using the porous media theory, we can model such a material without explicitly +# resolving the microstructure, but by considering the interactions between the +# solid and liquid. In this example, we will additionally consider larger linear +# elastic solid aggregates that are impermeable. Hence, there is no liquids in +# these particles and the only unknown variable is the displacement field `:u`. # In the porous media, denoted the matrix, we have both the displacement field, # `:u`, as well as the liquid pressure, `:p`, as unknown. The simulation result # is shown below @@ -21,62 +21,62 @@ # \dot{\Phi}(\boldsymbol{\epsilon}, p) + \boldsymbol{w}(p) \cdot \boldsymbol{\nabla} &= 0 # \end{aligned} # ``` -# where -# ``\boldsymbol{\epsilon} = \left[\boldsymbol{u}\otimes\boldsymbol{\nabla}\right]^\mathrm{sym}`` -# The constitutive relationships are +# where +# ``\boldsymbol{\epsilon} = \left[\boldsymbol{u}\otimes\boldsymbol{\nabla}\right]^\mathrm{sym}`` +# The constitutive relationships are # ```math # \begin{aligned} # \boldsymbol{\sigma} &= \boldsymbol{\mathsf{C}}:\boldsymbol{\epsilon} - \alpha p \boldsymbol{I} \\ # \boldsymbol{w} &= - k \boldsymbol{\nabla} p \\ # \Phi &= \phi + \alpha \mathrm{tr}(\boldsymbol{\epsilon}) + \beta p # \end{aligned} -# ``` -# with +# ``` +# with # ``\boldsymbol{\mathsf{C}}=2G \boldsymbol{\mathsf{I}}^\mathrm{dev} + 3K \boldsymbol{I}\otimes\boldsymbol{I}``. -# The material parameters are then the -# shear modulus, ``G``, -# bulk modulus, ``K``, -# permeability, ``k``, +# The material parameters are then the +# shear modulus, ``G``, +# bulk modulus, ``K``, +# permeability, ``k``, # Biot's coefficient, ``\alpha``, and # liquid compressibility, ``\beta``. -# The porosity, ``\phi``, doesn't enter into the equations +# The porosity, ``\phi``, doesn't enter into the equations # (A different porosity leads to different skeleton stiffness and permeability). # -# +# # The variational (weak) form can then be derived for the variations ``\boldsymbol{\delta u}`` # and ``\delta p`` as # ```math # \begin{aligned} # \int_\Omega \left[\left[\boldsymbol{\delta u}\otimes\boldsymbol{\nabla}\right]^\mathrm{sym}: -# \boldsymbol{\mathsf{C}}:\boldsymbol{\epsilon} - \boldsymbol{\delta u} \cdot \boldsymbol{\nabla} \alpha p\right] \mathrm{d}\Omega +# \boldsymbol{\mathsf{C}}:\boldsymbol{\epsilon} - \boldsymbol{\delta u} \cdot \boldsymbol{\nabla} \alpha p\right] \mathrm{d}\Omega # &= \int_\Gamma \boldsymbol{\delta u} \cdot \boldsymbol{t} \mathrm{d} \Gamma \\ -# \int_\Omega \left[\delta p \left[\alpha \dot{\boldsymbol{u}} \cdot \boldsymbol{\nabla} + \beta \dot{p}\right] + -# \boldsymbol{\nabla}(\delta p) \cdot [k \boldsymbol{\nabla}]\right] \mathrm{d}\Omega -# &= \int_\Gamma \delta p w_\mathrm{n} \mathrm{d} \Gamma +# \int_\Omega \left[\delta p \left[\alpha \dot{\boldsymbol{u}} \cdot \boldsymbol{\nabla} + \beta \dot{p}\right] + +# \boldsymbol{\nabla}(\delta p) \cdot [k \boldsymbol{\nabla}]\right] \mathrm{d}\Omega +# &= \int_\Gamma \delta p w_\mathrm{n} \mathrm{d} \Gamma # \end{aligned} # ``` -# where ``\boldsymbol{t}=\boldsymbol{n}\cdot\boldsymbol{\sigma}`` is the traction and -# ``w_\mathrm{n} = \boldsymbol{n}\cdot\boldsymbol{w}`` is the normal flux. -# +# where ``\boldsymbol{t}=\boldsymbol{n}\cdot\boldsymbol{\sigma}`` is the traction and +# ``w_\mathrm{n} = \boldsymbol{n}\cdot\boldsymbol{w}`` is the normal flux. +# # ### Finite element form -# Discretizing in space using finite elements, we obtain the vector equation -# ``r_i = f_i^\mathrm{int} - f_{i}^\mathrm{ext}`` where ``f^\mathrm{ext}`` are the external -# "forces", and ``f_i^\mathrm{int}`` are the internal "forces". We split this into the -# displacement part ``r_i^\mathrm{u} = f_i^\mathrm{int,u} - f_{i}^\mathrm{ext,u}`` and +# Discretizing in space using finite elements, we obtain the vector equation +# ``r_i = f_i^\mathrm{int} - f_{i}^\mathrm{ext}`` where ``f^\mathrm{ext}`` are the external +# "forces", and ``f_i^\mathrm{int}`` are the internal "forces". We split this into the +# displacement part ``r_i^\mathrm{u} = f_i^\mathrm{int,u} - f_{i}^\mathrm{ext,u}`` and # pressure part ``r_i^\mathrm{p} = f_i^\mathrm{int,p} - f_{i}^\mathrm{ext,p}`` -# to obtain the discretized equation system +# to obtain the discretized equation system # ```math # \begin{aligned} -# f_i^\mathrm{int,u} &= \int_\Omega [\boldsymbol{\delta N}^\mathrm{u}_i\otimes\boldsymbol{\nabla}]^\mathrm{sym} : \boldsymbol{\mathsf{C}} : [\boldsymbol{u}\otimes\boldsymbol{\nabla}]^\mathrm{sym} \ -# - [\boldsymbol{\delta N}^\mathrm{u}_i \cdot \boldsymbol{\nabla}] \alpha p \mathrm{d}\Omega +# f_i^\mathrm{int,u} &= \int_\Omega [\boldsymbol{\delta N}^\mathrm{u}_i\otimes\boldsymbol{\nabla}]^\mathrm{sym} : \boldsymbol{\mathsf{C}} : [\boldsymbol{u}\otimes\boldsymbol{\nabla}]^\mathrm{sym} \ +# - [\boldsymbol{\delta N}^\mathrm{u}_i \cdot \boldsymbol{\nabla}] \alpha p \mathrm{d}\Omega # &= \int_\Gamma \boldsymbol{\delta N}^\mathrm{u}_i \cdot \boldsymbol{t} \mathrm{d} \Gamma \\ -# f_i^\mathrm{int,p} &= \int_\Omega \delta N_i^\mathrm{p} [\alpha [\dot{\boldsymbol{u}}\cdot\boldsymbol{\nabla}] + \beta\dot{p}] + \boldsymbol{\nabla}(\delta N_i^\mathrm{p}) \cdot [k \boldsymbol{\nabla}(p)] \mathrm{d}\Omega +# f_i^\mathrm{int,p} &= \int_\Omega \delta N_i^\mathrm{p} [\alpha [\dot{\boldsymbol{u}}\cdot\boldsymbol{\nabla}] + \beta\dot{p}] + \boldsymbol{\nabla}(\delta N_i^\mathrm{p}) \cdot [k \boldsymbol{\nabla}(p)] \mathrm{d}\Omega # &= \int_\Gamma \delta N_i^\mathrm{p} w_\mathrm{n} \mathrm{d} \Gamma # \end{aligned} # ``` # Approximating the time-derivatives, ``\dot{\boldsymbol{u}}\approx \left[\boldsymbol{u}-{}^n\boldsymbol{u}\right]/\Delta t`` -# and ``\dot{p}\approx \left[p-{}^np\right]/\Delta t``, we can implement the finite element equations in the residual form -# ``r_i(\boldsymbol{a}(t), t) = 0`` where the vector ``\boldsymbol{a}`` contains all unknown displacements ``u_i`` and pressures ``p_i``. +# and ``\dot{p}\approx \left[p-{}^np\right]/\Delta t``, we can implement the finite element equations in the residual form +# ``r_i(\boldsymbol{a}(t), t) = 0`` where the vector ``\boldsymbol{a}`` contains all unknown displacements ``u_i`` and pressures ``p_i``. # # The jacobian, ``K_{ij} = \partial r_i/\partial a_j``, is then split into four parts, # ```math @@ -87,17 +87,17 @@ # K_{ij}^\mathrm{pp} &= \frac{\partial r_i^\mathrm{p}}{\partial p_j} = \int_\Omega \delta N_i^\mathrm{p} \frac{N_j^\mathrm{p}}{\Delta t} + \boldsymbol{\nabla}(\delta N_i^\mathrm{p}) \cdot [k \boldsymbol{\nabla}(N_j^\mathrm{p})] \mathrm{d}\Omega # \end{aligned} # ``` -# -# We could assemble one stiffness matrix and one mass matrix, which would be constant, but for simplicity we only consider a single -# system matrix that depends on the time step, and assemble this for each step. The equations are still linear, so no iterations are required. -# +# +# We could assemble one stiffness matrix and one mass matrix, which would be constant, but for simplicity we only consider a single +# system matrix that depends on the time step, and assemble this for each step. The equations are still linear, so no iterations are required. +# # ## Implementation -# We now solve the problem step by step. The full program with fewer comments is found in +# We now solve the problem step by step. The full program with fewer comments is found in #md # the final [section](@ref porous-media-plain-program) -# -# Required packages -using Ferrite, FerriteMeshParser, Tensors # +# Required packages +using Ferrite, FerriteMeshParser, Tensors, WriteVTK + # ### Elasticity # We start by defining the elastic material type, containing the elastic stiffness, # for the linear elastic impermeable solid aggregates. @@ -113,8 +113,8 @@ function Elastic(;E=20.e3, ν=0.3) return Elastic(2G*I4dev + K*I4vol) end; -# Next, we define the element routine for the solid aggregates, where we dispatch on the -# `Elastic` material struct. Note that the unused inputs here are used for the porous matrix below. +# Next, we define the element routine for the solid aggregates, where we dispatch on the +# `Elastic` material struct. Note that the unused inputs here are used for the porous matrix below. function element_routine!(Ke, re, material::Elastic, cv, cell, a, args...) reinit!(cv, cell) n_basefuncs = getnbasefunctions(cv) @@ -126,7 +126,7 @@ function element_routine!(Ke, re, material::Elastic, cv, cell, a, args...) for i in 1:n_basefuncs δ∇N = shape_symmetric_gradient(cv, q_point, i) re[i] += (δ∇N ⊡ σ)*dΩ - for j in 1:n_basefuncs + for j in 1:n_basefuncs ∇N = shape_symmetric_gradient(cv, q_point, j) Ke[i, j] += (δ∇N ⊡ material.C ⊡ ∇N) * dΩ end @@ -135,7 +135,7 @@ function element_routine!(Ke, re, material::Elastic, cv, cell, a, args...) end; # ### PoroElasticity -# To define the poroelastic material, we re-use the elastic part from above for +# To define the poroelastic material, we re-use the elastic part from above for # the skeleton, and add the additional required material parameters. struct PoroElastic{T} elastic::Elastic{T} ## Skeleton stiffness @@ -146,8 +146,8 @@ struct PoroElastic{T} end PoroElastic(;elastic, k, ϕ, α, β) = PoroElastic(elastic, k, ϕ, α, β); -# The element routine requires a few more inputs since we have two fields, as well -# as the dependence on the rates of the displacements and pressure. +# The element routine requires a few more inputs since we have two fields, as well +# as the dependence on the rates of the displacements and pressure. # Again, we dispatch on the material type. function element_routine!(Ke, re, m::PoroElastic, cvs::Tuple, cell, a, a_old, Δt, sdh) ## Setup cellvalues and give easier names @@ -156,14 +156,14 @@ function element_routine!(Ke, re, m::PoroElastic, cvs::Tuple, cell, a, a_old, Δ dr_u = dof_range(sdh, :u) dr_p = dof_range(sdh, :p) - C = m.elastic.C ## Elastic stiffness + C = m.elastic.C ## Elastic stiffness ## Assemble stiffness and force vectors - for q_point in 1:getnquadpoints(cv_u) + for q_point in 1:getnquadpoints(cv_u) dΩ = getdetJdV(cv_u, q_point) p = function_value(cv_p, q_point, a, dr_p) p_old = function_value(cv_p, q_point, a_old, dr_p) - pdot = (p - p_old)/Δt + pdot = (p - p_old)/Δt ∇p = function_gradient(cv_p, q_point, a, dr_p) ϵ = function_symmetric_gradient(cv_u, q_point, a, dr_u) tr_ϵ_old = function_divergence(cv_u, q_point, a_old, dr_u) @@ -196,7 +196,7 @@ function element_routine!(Ke, re, m::PoroElastic, cvs::Tuple, cell, a, a_old, Δ ∇Np = shape_gradient(cv_p, q_point, jₚ) Np = shape_value(cv_p, q_point, jₚ) Ke[Iₚ,Jₚ] += (δNp * m.β*Np/Δt + m.k * (∇δNp ⋅ ∇Np) ) * dΩ - end + end end end end; @@ -209,8 +209,8 @@ struct FEDomain{M,CV,SDH<:SubDofHandler} sdh::SDH end; -# And then we can loop over a vector of such domains, allowing us to -# loop over each domain, to assemble the contributions from each +# And then we can loop over a vector of such domains, allowing us to +# loop over each domain, to assemble the contributions from each # cell in that domain (given by the `SubDofHandler`'s cellset) function doassemble!(K, r, domains::Vector{<:FEDomain}, a, a_old, Δt) assembler = start_assemble(K, r) @@ -223,9 +223,9 @@ end; # we can then loop over all cells in its cellset. Doing this # in a separate function (instead of a nested loop), ensures # that the calls to the `element_routine` are type stable, -# which can be important for good performance. +# which can be important for good performance. function doassemble!(assembler, domain::FEDomain, a, a_old, Δt) - material = domain.material + material = domain.material cv = domain.cellvalues sdh = domain.sdh n = ndofs_per_cell(sdh) @@ -236,7 +236,7 @@ function doassemble!(assembler, domain::FEDomain, a, a_old, Δt) for cell in CellIterator(sdh) ## copy values from a to ae map!(i->a[i], ae, celldofs(cell)) - map!(i->a_old[i], ae_old, celldofs(cell)) + map!(i->a_old[i], ae_old, celldofs(cell)) fill!(Ke, 0) fill!(re, 0) element_routine!(Ke, re, material, cv, cell, ae, ae_old, Δt, sdh) @@ -245,7 +245,7 @@ function doassemble!(assembler, domain::FEDomain, a, a_old, Δt) end; # ### Mesh import -# In this example, we import the mesh from the Abaqus input file, [`porous_media_0p25.inp`](porous_media_0p25.inp) using `FerriteMeshParser`'s +# In this example, we import the mesh from the Abaqus input file, [`porous_media_0p25.inp`](porous_media_0p25.inp) using `FerriteMeshParser`'s # `get_ferrite_grid` function. We then create one cellset for each phase (solid and porous) # for each element type. These 4 sets will later be used in their own `SubDofHandler` function get_grid() @@ -261,12 +261,12 @@ function get_grid() end; # ### Problem setup -# Define the finite element interpolation, integration, and boundary conditions. +# Define the finite element interpolation, integration, and boundary conditions. function setup_problem(;t_rise=0.1, u_max=-0.1) grid = get_grid() - ## Define materials + ## Define materials m_solid = Elastic(;E=20.e3, ν=0.3) m_porous = PoroElastic(;elastic=Elastic(;E=10e3, ν=0.3), β=1/15e3, α=0.9, k=5.0e-3, ϕ=0.8) @@ -275,8 +275,8 @@ function setup_problem(;t_rise=0.1, u_max=-0.1) ipu_tri = Lagrange{RefTriangle,2}()^2 ipp_quad = Lagrange{RefQuadrilateral,1}() ipp_tri = Lagrange{RefTriangle,1}() - - ## Quadrature rules + + ## Quadrature rules qr_quad = QuadratureRule{RefQuadrilateral}(2) qr_tri = QuadratureRule{RefTriangle}(2) @@ -294,11 +294,11 @@ function setup_problem(;t_rise=0.1, u_max=-0.1) ## Solid triangles sdh_solid_tri = SubDofHandler(dh, getcellset(grid,"solid3")) add!(sdh_solid_tri, :u, ipu_tri) - ## Porous quads + ## Porous quads sdh_porous_quad = SubDofHandler(dh, getcellset(grid, "porous4")) add!(sdh_porous_quad, :u, ipu_quad) add!(sdh_porous_quad, :p, ipp_quad) - ## Porous triangles + ## Porous triangles sdh_porous_tri = SubDofHandler(dh, getcellset(grid, "porous3")) add!(sdh_porous_tri, :u, ipu_tri) add!(sdh_porous_tri, :p, ipp_tri) @@ -313,33 +313,34 @@ function setup_problem(;t_rise=0.1, u_max=-0.1) ] ## Boundary conditions - ## Sliding for u, except top which is compressed - ## Sealed for p, except top with prescribed zero pressure - addfaceset!(dh.grid, "sides", x -> x[1] < 1e-6 || x[1] ≈ 5.0) - addfaceset!(dh.grid, "top", x -> x[2]≈10.0) + ## Sliding for u, except top which is compressed + ## Sealed for p, except top with prescribed zero pressure + addfacetset!(dh.grid, "sides", x -> x[1] < 1e-6 || x[1] ≈ 5.0) + addfacetset!(dh.grid, "top", x -> x[2]≈10.0) ch = ConstraintHandler(dh); - add!(ch, Dirichlet(:u, getfaceset(grid, "bottom"), (x, t) -> zero(Vec{1}), [2])) - add!(ch, Dirichlet(:u, getfaceset(grid, "sides"), (x, t) -> zero(Vec{1}), [1])) - add!(ch, Dirichlet(:u, getfaceset(grid, "top"), (x, t) -> u_max*clamp(t/t_rise, 0, 1), [2])) - add!(ch, Dirichlet(:p, getfaceset(grid, "top_p"), (x, t) -> 0.0)) + add!(ch, Dirichlet(:u, getfacetset(grid, "bottom"), (x, t) -> zero(Vec{1}), [2])) + add!(ch, Dirichlet(:u, getfacetset(grid, "sides"), (x, t) -> zero(Vec{1}), [1])) + add!(ch, Dirichlet(:u, getfacetset(grid, "top"), (x, t) -> u_max*clamp(t/t_rise, 0, 1), [2])) + add!(ch, Dirichlet(:p, getfacetset(grid, "top_p"), (x, t) -> 0.0)) close!(ch) return dh, ch, domains end; # ### Solving -# Given the `DofHandler`, `ConstraintHandler`, and `CellValues`, +# Given the `DofHandler`, `ConstraintHandler`, and `CellValues`, # we can solve the problem by stepping through the time history function solve(dh, ch, domains; Δt=0.025, t_total=1.0) - K = create_sparsity_pattern(dh); + K = allocate_matrix(dh) r = zeros(ndofs(dh)) a = zeros(ndofs(dh)) a_old = copy(a) - pvd = paraview_collection("porous_media.pvd"); - for (step, t) in enumerate(0:Δt:t_total) + pvd = paraview_collection("porous_media") + step = 0 + for t in 0:Δt:t_total if t>0 update!(ch, t) - apply!(a, ch) + apply!(a, ch) doassemble!(K, r, domains, a, a_old, Δt) apply_zero!(K, r, ch) Δa = -K\r @@ -347,13 +348,13 @@ function solve(dh, ch, domains; Δt=0.025, t_total=1.0) a .+= Δa copyto!(a_old, a) end - vtk_grid("porous_media-$step", dh) do vtk - vtk_point_data(vtk, dh, a) - vtk_save(vtk) - pvd[step] = vtk + step += 1 + VTKGridFile("porous_media_$step", dh) do vtk + write_solution(vtk, dh, a) + pvd[t] = vtk end end - vtk_save(pvd); + close(pvd); end; # Finally we call the functions to actually run the code @@ -367,4 +368,4 @@ solve(dh, ch, domains); #md # #md # ```julia #md # @__CODE__ -#md # ``` \ No newline at end of file +#md # ``` diff --git a/docs/src/literate-tutorials/reactive_surface.jl b/docs/src/literate-tutorials/reactive_surface.jl new file mode 100644 index 0000000000..d1003d5bed --- /dev/null +++ b/docs/src/literate-tutorials/reactive_surface.jl @@ -0,0 +1,335 @@ +# Putting this flag to false reproduces the figure shown in the example #src +# We check for laminar flow development in the CI #src +if isdefined(Main, :is_ci) #hide + IS_CI = Main.is_ci #hide +else #hide + IS_CI = false #hide +end #hide +nothing #hide +# # [Reactive surface](@id tutorial-reactive-surface) +# +# ![](reactive_surface.gif) +# +# *Figure 1*: Reactant concentration field of the Gray-Scott model on the unit sphere. +# +#- +#md # !!! tip +#md # This example is also available as a Jupyter notebook: +#md # [`reactive_surface.ipynb`](@__NBVIEWER_ROOT_URL__/tutorials/reactive_surface.ipynb). +#- +# +# ## Introduction +# +# This tutorial gives a quick tutorial on how to assemble and solve time-dependent problems +# on embedded surfaces. +# +# For this showcase we use the well known Gray-Scott model, which is a well-known reaction-diffusion +# system to study pattern formation. The strong form is given by +# +# ```math +# \begin{aligned} +# \partial_t r_1 &= \nabla \cdot (D_1 \nabla r_1) - r_1*r_2^2 + F *(1 - r_1) \quad \textbf{x} \in \Omega, \\ +# \partial_t r_2 &= \nabla \cdot (D_2 \nabla r_2) + r_1*r_2^2 - r_2*(F + k ) \quad \textbf{x} \in \Omega, +# \end{aligned} +# ``` +# +# where $r_1$ and $r_2$ are the reaction fields, $D_1$ and $D_2$ the diffusion tensors, +# $k$ is the conversion rate, $F$ is the feed rate and $\Omega$ the domain. Depending on the choice of +# parameters a different pattern can be observed. Please also note that the domain does not have a +# boundary. The corresponding weak form can be derived as usual. +# +# For simplicity we will solve the problem with the Lie-Troter-Godunov operator splitting technique with +# the classical reaction-diffusion split. In this method we split our problem in two problems, i.e. a heat +# problem and a pointwise reaction problem, and solve them alternatingly to advance in time. +# +# ## Solver details +# +# The main idea for the Lie-Trotter-Godunov scheme is simple. We can write down the reaction diffusion +# problem in an abstract way as +# ```math +# \partial_t \mathbf{r} = \mathcal{D}\mathbf{r} + R(\mathbf{r}) \quad \textbf{x} \in \Omega +# ``` +# where $\mathcal{D}$ is the diffusion operator and $R$ is the reaction operator. Notice that the right +# hand side is just the sum of two operators. Now with our operator splitting scheme we can advance a +# solution $\mathbf{r}(t_1)$ to $\mathbf{r}(t_2)$ by first solving a heat problem +# ```math +# \partial_t \mathbf{r}^{\mathrm{\mathrm{A}}} = \mathcal{D}\mathbf{r}^{\mathrm{A}} \quad \textbf{x} \in \Omega +# ``` +# with $\mathbf{r}^{\mathrm{A}}(t_1) = \mathbf{r}(t_1)$ on the time interval $t_1$ to $t_2$ and use +# the solution as the initial condition to solve the reaction problem +# ```math +# \partial_t \mathbf{r}^{\mathrm{B}} = R(\mathbf{r}^{\mathrm{B}}) \quad \textbf{x} \in \Omega +# ``` +# with $\mathbf{r}^{\mathrm{B}}(t_1) = \mathbf{r}^{\mathrm{A}}(t_2)$. +# This way we obtain a solution approximation $\mathbf{r}(t_2) \approx \mathbf{r}^{\mathrm{B}}(t_2)$. +# +# !!! note +# The operator splitting itself is an approximation, so even if we solve the subproblems analytically +# we end up with having only a solution approximation. We also do not have a beginner friendly reference +# for the theory behind operator splitting and can only refer to the original papers for each method. +# +#- +# ## Commented Program +# +# Now we solve the problem in Ferrite. What follows is a program spliced with comments. +#md # The full program, without comments, can be found in the next [section](@ref reactive_surface-plain-program). +# +# First we load Ferrite, and some other packages we need + +using Ferrite, FerriteGmsh +using BlockArrays, SparseArrays, LinearAlgebra, WriteVTK + +# ### Assembly routines +# Before we head into the assembly, we define a helper struct to control the dispatches. +struct GrayScottMaterial{T} + D₁::T + D₂::T + F::T + k::T +end; + +# The following assembly routines are written analogue to these found in previous tutorials. +function assemble_element_mass!(Me::Matrix, cellvalues::CellValues) + n_basefuncs = getnbasefunctions(cellvalues) + ## The mass matrices between the reactions are not coupled, so we get a blocked-strided matrix. + num_reactants = 2 + r₁range = 1:num_reactants:num_reactants*n_basefuncs + r₂range = 2:num_reactants:num_reactants*n_basefuncs + Me₁ = @view Me[r₁range, r₁range] + Me₂ = @view Me[r₂range, r₂range] + ## Reset to 0 + fill!(Me, 0) + ## Loop over quadrature points + for q_point in 1:getnquadpoints(cellvalues) + ## Get the quadrature weight + dΩ = getdetJdV(cellvalues, q_point) + ## Loop over test shape functions + for i in 1:n_basefuncs + δuᵢ = shape_value(cellvalues, q_point, i) + ## Loop over trial shape functions + for j in 1:n_basefuncs + δuⱼ = shape_value(cellvalues, q_point, j) + ## Add contribution to Ke + Me₁[i,j] += (δuᵢ * δuⱼ) * dΩ + Me₂[i,j] += (δuᵢ * δuⱼ) * dΩ + end + end + end + return nothing +end + +function assemble_element_diffusion!(De::Matrix, cellvalues::CellValues, material::GrayScottMaterial) + n_basefuncs = getnbasefunctions(cellvalues) + D₁ = material.D₁ + D₂ = material.D₂ + ## The diffusion between the reactions is not coupled, so we get a blocked-strided matrix. + num_reactants = 2 + r₁range = 1:num_reactants:num_reactants*n_basefuncs + r₂range = 2:num_reactants:num_reactants*n_basefuncs + De₁ = @view De[r₁range, r₁range] + De₂ = @view De[r₂range, r₂range] + ## Reset to 0 + fill!(De, 0) + ## Loop over quadrature points + for q_point in 1:getnquadpoints(cellvalues) + ## Get the quadrature weight + dΩ = getdetJdV(cellvalues, q_point) + ## Loop over test shape functions + for i in 1:n_basefuncs + ∇δuᵢ = shape_gradient(cellvalues, q_point, i) + ## Loop over trial shape functions + for j in 1:n_basefuncs + ∇δuⱼ = shape_gradient(cellvalues, q_point, j) + ## Add contribution to Ke + De₁[i,j] += D₁ * (∇δuᵢ ⋅ ∇δuⱼ) * dΩ + De₂[i,j] += D₂ * (∇δuᵢ ⋅ ∇δuⱼ) * dΩ + end + end + end + return nothing +end + +function assemble_matrices!(M::SparseMatrixCSC, D::SparseMatrixCSC, cellvalues::CellValues, dh::DofHandler, material::GrayScottMaterial) + n_basefuncs = getnbasefunctions(cellvalues) + + ## Allocate the element stiffness matrix and element force vector + Me = zeros(2*n_basefuncs, 2*n_basefuncs) + De = zeros(2*n_basefuncs, 2*n_basefuncs) + + ## Create an assembler + M_assembler = start_assemble(M) + D_assembler = start_assemble(D) + ## Loop over all cels + for cell in CellIterator(dh) + ## Reinitialize cellvalues for this cell + reinit!(cellvalues, cell) + ## Compute element contribution + assemble_element_mass!(Me, cellvalues) + assemble!(M_assembler, celldofs(cell), Me) + + assemble_element_diffusion!(De, cellvalues, material) + assemble!(D_assembler, celldofs(cell), De) + end + return nothing +end; + +# ### Initial condition setup +# Time-dependent problems always need an initial condition from which the time evolution starts. +# In this tutorial we set the concentration of reactant 1 to $1$ and the concentration of reactant +# 2 to $0$ for all nodal dof with associated coordinate $z \leq 0.9$ on the sphere. Since the +# simulation would be pretty boring with a steady-state initial condition, we introduce some +# heterogeneity by setting the dofs associated to top part of the sphere (i.e. dofs with $z > 0.9$ +# to store the reactant concentrations of $0.5$ and $0.25$ for the reactants 1 and 2 respectively. +function setup_initial_conditions!(u₀::Vector, cellvalues::CellValues, dh::DofHandler) + u₀ .= ones(ndofs(dh)) + u₀[2:2:end] .= 0.0 + + n_basefuncs = getnbasefunctions(cellvalues) + + for cell in CellIterator(dh) + reinit!(cellvalues, cell) + + coords = getcoordinates(cell) + dofs = celldofs(cell) + uₑ = @view u₀[dofs] + rv₀ₑ = reshape(uₑ, (2, n_basefuncs)) + + for i in 1:n_basefuncs + if coords[i][3] > 0.9 + rv₀ₑ[1, i] = 0.5 + rv₀ₑ[2, i] = 0.25 + end + end + end + + u₀ .+= 0.01*rand(ndofs(dh)) +end; + +# ### Mesh generation +# In this section we define a routine to create a surface mesh with the help of FerriteGmsh.jl. +function create_embedded_sphere(refinements) + gmsh.initialize() + + ## Add a unit sphere in 3D space + gmsh.model.occ.addSphere(0.0,0.0,0.0,1.0) + gmsh.model.occ.synchronize() + + ## Generate nodes and surface elements only, hence we need to pass 2 into generate + gmsh.model.mesh.generate(2) + + ## To get good solution quality refine the elements several times + for _ in 1:refinements + gmsh.model.mesh.refine() + end + + ## Now we create a Ferrite grid out of it. Note that we also call toelements + ## with our surface element dimension to obtain these. + nodes = tonodes() + elements, _ = toelements(2) + gmsh.finalize() + grid = Grid(elements, nodes); +end + +# ### Simulation routines +# Now we define a function to setup and solve the problem with given feed and conversion rates +# $F$ and $k$, as well as the time step length and for how long we want to solve the model. +function gray_scott_on_sphere(material::GrayScottMaterial, Δt::Real, T::Real, refinements::Integer) + ## We start by setting up grid, dof handler and the matrices for the heat problem. + grid = create_embedded_sphere(refinements) + + ## Next we are creating our element assembly helper for surface elements. + ## The only change which we need to introduce here is to pass in a geometrical + ## interpolation with the same dimension as the physical space into which our + ## elements are embedded into, which is in this example 3. + ip = Lagrange{RefTriangle, 1}() + qr = QuadratureRule{RefTriangle}(2) + cellvalues = CellValues(qr, ip, ip^3); + + ## We have two options to add the reactants to the dof handler, which will give us slightly + ## different resulting dof distributions: + ## A) We can add a scalar-valued interpolation for each reactant. + ## B) We can add one vectorized interpolation whose dimension is the number of reactants + ## number of reactants. + ## In this tutorial we opt for B, because the dofs are distributed per cell entity -- or + ## to be specific for this tutorial, we use an isoparametric concept such that the nodes + ## of our grid and the nodes of our solution approximation coincide. This way a reaction + ## we can create simply reshape the solution vector u to a matrix where the inner index + ## corresponds to the index of the reactant. Note that we will still use the scalar + ## interpolation for the assembly procedure. + dh = DofHandler(grid); + add!(dh, :reactants, ip^2); + close!(dh); + + ## We can save some memory by telling the sparsity pattern that the matrices are not coupled. + M = allocate_matrix(dh; coupling=[true false; false true]) + D = allocate_matrix(dh; coupling=[true false; false true]) + + ## Since the heat problem is linear and has no time dependent parameters, we precompute the + ## decomposition of the system matrix to speed up the linear system solver. + assemble_matrices!(M, D, cellvalues, dh, material); + A = M + Δt .* D + cholA = cholesky(A) + + ## Now we setup buffers for the time dependent solution and fill the initial condition. + uₜ = zeros(ndofs(dh)) + uₜ₋₁ = ones(ndofs(dh)) + setup_initial_conditions!(uₜ₋₁, cellvalues, dh) + + ## And prepare output for visualization. + pvd = paraview_collection("reactive-surface") + VTKGridFile("reactive-surface-0", dh) do vtk + write_solution(vtk, dh, uₜ₋₁) + pvd[0.0] = vtk + end + + ## This is now the main solve loop. + F = material.F + k = material.k + for (iₜ, t) ∈ enumerate(Δt:Δt:T) + ## First we solve the heat problem + uₜ .= cholA \ (M * uₜ₋₁) + + ## Then we solve the point-wise reaction problem with the solution of + ## the heat problem as initial guess. 2 is the number of reactants. + num_individual_reaction_dofs = ndofs(dh) ÷ 2 + rvₜ = reshape(uₜ, (2, num_individual_reaction_dofs)) + for i ∈ 1:num_individual_reaction_dofs + r₁ = rvₜ[1, i] + r₂ = rvₜ[2, i] + rvₜ[1, i] += Δt*( -r₁*r₂^2 + F *(1 - r₁) ) + rvₜ[2, i] += Δt*( r₁*r₂^2 - r₂*(F + k ) ) + end + + ## The solution is then stored every 10th step to vtk files for + ## later visualization purposes. + if (iₜ % 10) == 0 + VTKGridFile("reactive-surface-$(iₜ)", dh) do vtk + write_solution(vtk, dh, uₜ₋₁) + pvd[t] = vtk + end + end + + ## Finally we totate the solution to initialize the next timestep. + uₜ₋₁ .= uₜ + end + + close(pvd); +end + +## This parametrization gives the spot pattern shown in the gif above. +material = GrayScottMaterial(0.00016, 0.00008, 0.06, 0.062) +if !IS_CI #src +gray_scott_on_sphere(material, 10.0, 32000.0, 3) +else #src +gray_scott_on_sphere(material, 10.0, 20.0, 0) #src +end #src +nothing #src + +#md # ## [Plain program](@id reactive_surface-plain-program) +#md # +#md # Here follows a version of the program without any comments. +#md # The file is also available here: [`reactive_surface.jl`](reactive_surface.jl). +#md # +#md # ```julia +#md # @__CODE__ +#md # ``` diff --git a/docs/src/literate-tutorials/stokes-flow.jl b/docs/src/literate-tutorials/stokes-flow.jl index de9b27ab86..ac161f89bd 100644 --- a/docs/src/literate-tutorials/stokes-flow.jl +++ b/docs/src/literate-tutorials/stokes-flow.jl @@ -4,7 +4,7 @@ #- #md # !!! tip #md # This example is also available as a Jupyter notebook: -#md # [`stokes-flow.ipynb`](@__NBVIEWER_ROOT_URL__/examples/stokes-flow.ipynb). +#md # [`stokes-flow.ipynb`](@__NBVIEWER_ROOT_URL__/tutorials/stokes-flow.ipynb). #- # # ![](stokes-flow.png) @@ -146,7 +146,6 @@ using Ferrite, FerriteGmsh, Gmsh, Tensors, LinearAlgebra, SparseArrays using Test #src - # ### Geometry and mesh generation with `Gmsh.jl` # # In the `setup_grid` function below we use the @@ -212,6 +211,15 @@ function setup_grid(h=0.05) ## Finalize the Gmsh library Gmsh.finalize() + ## Temp fix for FerriteGmsh + ## for setname in ["Γ1", "Γ2", "Γ3", "Γ4"] + ## faceset = grid.facesets[setname] + ## edgeset = Set([EdgeIndex(f[1], f[2]) for f in faceset]) + ## grid.edgesets[setname] = edgeset + ## delete!(grid.facesets, setname) + ## end + ## =# + return grid end #md nothing #hide @@ -231,8 +239,8 @@ function setup_fevalues(ipu, ipp, ipg) qr = QuadratureRule{RefTriangle}(2) cvu = CellValues(qr, ipu, ipg) cvp = CellValues(qr, ipp, ipg) - qr_face = FaceQuadratureRule{RefTriangle}(2) - fvp = FaceValues(qr_face, ipp, ipg) + qr_facet = FacetQuadratureRule{RefTriangle}(2) + fvp = FacetValues(qr_facet, ipp, ipg) return cvu, cvp, fvp end #md nothing #hide @@ -288,10 +296,10 @@ function setup_mean_constraint(dh, fvp) assembler = start_assemble() ## All external boundaries set = union( - getfaceset(dh.grid, "Γ1"), - getfaceset(dh.grid, "Γ2"), - getfaceset(dh.grid, "Γ3"), - getfaceset(dh.grid, "Γ4"), + getfacetset(dh.grid, "Γ1"), + getfacetset(dh.grid, "Γ2"), + getfacetset(dh.grid, "Γ3"), + getfacetset(dh.grid, "Γ4"), ) ## Allocate buffers range_p = dof_range(dh, :p) @@ -339,7 +347,7 @@ end # outlet ``\Gamma_3`` to be the mirror. The necessary transformation to apply then becomes a # rotation of ``\pi/2`` radians around the out-of-plane axis. We set up the rotation matrix # `R`, and then compute the mapping between mirror and image faces using -# [`collect_periodic_faces`](@ref) where the rotation is applied to the coordinates. In the +# [`collect_periodic_facets`](@ref) where the rotation is applied to the coordinates. In the # next step we construct the constraint using the [`PeriodicDirichlet`](@ref) constructor. # We pass the constructor the computed mapping, and also the rotation matrix. This matrix is # used to rotate the dofs on the mirror surface such that we properly constrain @@ -354,11 +362,11 @@ function setup_constraints(dh, fvp) ch = ConstraintHandler(dh) ## Periodic BC R = rotation_tensor(π / 2) - periodic_faces = collect_periodic_faces(dh.grid, "Γ3", "Γ1", x -> R ⋅ x) + periodic_faces = collect_periodic_facets(dh.grid, "Γ3", "Γ1", x -> R ⋅ x) periodic = PeriodicDirichlet(:u, periodic_faces, R, [1, 2]) add!(ch, periodic) ## Dirichlet BC - Γ24 = union(getfaceset(dh.grid, "Γ2"), getfaceset(dh.grid, "Γ4")) + Γ24 = union(getfacetset(dh.grid, "Γ2"), getfacetset(dh.grid, "Γ4")) dbc = Dirichlet(:u, Γ24, (x, t) -> [0, 0], [1, 2]) add!(ch, dbc) ## Compute mean value constraint and add it @@ -439,8 +447,8 @@ end function check_mean_constraint(dh, fvp, u) #src ## All external boundaries #src set = union( #src - getfaceset(dh.grid, "Γ1"), getfaceset(dh.grid, "Γ2"), #src - getfaceset(dh.grid, "Γ3"), getfaceset(dh.grid, "Γ4"), #src + getfacetset(dh.grid, "Γ1"), getfacetset(dh.grid, "Γ2"), #src + getfacetset(dh.grid, "Γ3"), getfacetset(dh.grid, "Γ4"), #src ) #src range_p = dof_range(dh, :p) #src cc = CellCache(dh) #src @@ -497,7 +505,7 @@ function main() ch = setup_constraints(dh, fvp) ## Global tangent matrix and rhs coupling = [true true; true false] # no coupling between pressure test/trial functions - K = create_sparsity_pattern(dh, ch; coupling=coupling) + K = allocate_matrix(dh, ch; coupling=coupling) f = zeros(ndofs(dh)) ## Assemble system assemble_system!(K, f, dh, cvu, cvp) @@ -506,8 +514,8 @@ function main() u = K \ f apply!(u, ch) ## Export the solution - vtk_grid("stokes-flow", grid) do vtk - vtk_point_data(vtk, dh, u) + VTKGridFile("stokes-flow", grid) do vtk + write_solution(vtk, dh, u) end ## Check the result #src diff --git a/docs/src/literate-tutorials/transient_heat_equation.jl b/docs/src/literate-tutorials/transient_heat_equation.jl index b25a822aa4..c6a89de6f0 100644 --- a/docs/src/literate-tutorials/transient_heat_equation.jl +++ b/docs/src/literate-tutorials/transient_heat_equation.jl @@ -10,7 +10,7 @@ #- #md # !!! tip #md # This example is also available as a Jupyter notebook: -#md # [`transient_heat_equation.ipynb`](@__NBVIEWER_ROOT_URL__/examples/transient_heat_equation.ipynb). +#md # [`transient_heat_equation.ipynb`](@__NBVIEWER_ROOT_URL__/tutorials/transient_heat_equation.ipynb). #- # # ## Introduction @@ -58,7 +58,7 @@ #md # The full program, without comments, can be found in the next [section](@ref heat_equation-plain-program). # # First we load Ferrite, and some other packages we need. -using Ferrite, SparseArrays +using Ferrite, SparseArrays, WriteVTK # We create the same grid as in the heat equation example. grid = generate_grid(Quadrilateral, (100, 100)); @@ -80,8 +80,8 @@ close!(dh); # M_{ij} = \int_{\Omega} v_i \, u_j \ \mathrm{d}\Omega, # ``` # where $u_i$ and $v_j$ are trial and test functions, respectively. -K = create_sparsity_pattern(dh); -M = create_sparsity_pattern(dh); +K = allocate_matrix(dh); +M = allocate_matrix(dh); # We also preallocate the right hand side f = zeros(ndofs(dh)); @@ -95,12 +95,12 @@ t_rise = 100 ch = ConstraintHandler(dh); # Here, we define the boundary condition related to $\partial \Omega_1$. -∂Ω₁ = union(getfaceset.((grid,), ["left", "right"])...) +∂Ω₁ = union(getfacetset.((grid,), ["left", "right"])...) dbc = Dirichlet(:u, ∂Ω₁, (x, t) -> 0) add!(ch, dbc); # While the next code block corresponds to the linearly increasing temperature description on $\partial \Omega_2$ # until `t=t_rise`, and then keep constant -∂Ω₂ = union(getfaceset.((grid,), ["top", "bottom"])...) +∂Ω₂ = union(getfacetset.((grid,), ["top", "bottom"])...) dbc = Dirichlet(:u, ∂Ω₂, (x, t) -> max_temp * clamp(t / t_rise, 0, 1)) add!(ch, dbc) close!(ch) @@ -182,24 +182,22 @@ A = (Δt .* K) + M; # by `get_rhs_data`. The function returns a `RHSData` struct, which contains all needed information to apply # the boundary conditions solely on the right-hand-side vector of the problem. rhsdata = get_rhs_data(ch, A); -# We set the values at initial time step, denoted by uₙ, to a bubble-shape described by +# We set the values at initial time step, denoted by uₙ, to a bubble-shape described by # $(x_1^2-1)(x_2^2-1)$, such that it is zero at the boundaries and the maximum temperature in the center. uₙ = zeros(length(f)); apply_analytical!(uₙ, dh, :u, x -> (x[1]^2 - 1) * (x[2]^2 - 1) * max_temp); # Here, we apply **once** the boundary conditions to the system matrix `A`. apply!(A, ch); -# To store the solution, we initialize a `paraview_collection` (.pvd) file. -pvd = paraview_collection("transient-heat.pvd"); -t = 0 -vtk_grid("transient-heat-$t", dh) do vtk - vtk_point_data(vtk, dh, uₙ) - vtk_save(vtk) - pvd[t] = vtk +# To store the solution, we initialize a paraview collection (.pvd) file, +pvd = paraview_collection("transient-heat") +VTKGridFile("transient-heat-0", dh) do vtk + write_solution(vtk, dh, uₙ) + pvd[0.0] = vtk end # At this point everything is set up and we can finally approach the time loop. -for t in Δt:Δt:T +for (step, t) in enumerate(Δt:Δt:T) #First of all, we need to update the Dirichlet boundary condition values. update!(ch, t) @@ -211,16 +209,15 @@ for t in Δt:Δt:T #Finally, we can solve the time step and save the solution afterwards. u = A \ b - vtk_grid("transient-heat-$t", dh) do vtk - vtk_point_data(vtk, dh, u) - vtk_save(vtk) + VTKGridFile("transient-heat-$step", dh) do vtk + write_solution(vtk, dh, u) pvd[t] = vtk end #At the end of the time loop, we set the previous solution to the current one and go to the next time step. uₙ .= u end # In order to use the .pvd file we need to store it to the disk, which is done by: -vtk_save(pvd); +close(pvd); #md # ## [Plain program](@id transient_heat_equation-plain-program) #md # diff --git a/docs/src/reference/assembly.md b/docs/src/reference/assembly.md index 79a2a1576e..0a2111aca6 100644 --- a/docs/src/reference/assembly.md +++ b/docs/src/reference/assembly.md @@ -9,8 +9,3 @@ start_assemble assemble! finish_assemble ``` - -```@docs -create_sparsity_pattern -create_symmetric_sparsity_pattern -``` diff --git a/docs/src/reference/boundary_conditions.md b/docs/src/reference/boundary_conditions.md index 950f3fd2d7..ca738dadea 100644 --- a/docs/src/reference/boundary_conditions.md +++ b/docs/src/reference/boundary_conditions.md @@ -12,8 +12,8 @@ Pages = ["boundary_conditions.md"] ConstraintHandler Dirichlet PeriodicDirichlet -collect_periodic_faces -collect_periodic_faces! +collect_periodic_facets +collect_periodic_facets! add! close! update! diff --git a/docs/src/reference/dofhandler.md b/docs/src/reference/dofhandler.md index f93deb2810..3f5b2413e3 100644 --- a/docs/src/reference/dofhandler.md +++ b/docs/src/reference/dofhandler.md @@ -36,8 +36,8 @@ celldofs! ```@docs CellCache CellIterator -FaceCache -FaceIterator +FacetCache +FacetIterator InterfaceCache InterfaceIterator ``` diff --git a/docs/src/reference/export.md b/docs/src/reference/export.md index 2fc0b26a53..61e5849cb8 100644 --- a/docs/src/reference/export.md +++ b/docs/src/reference/export.md @@ -3,31 +3,34 @@ DocTestSetup = :(using Ferrite) ``` # Postprocessing -## Project to nodes +## Projection of quadrature point data ```@docs -L2Projector +L2Projector(::Ferrite.AbstractGrid) +add!(::L2Projector, ::Ferrite.AbstractVecOrSet{Int}, ::Interpolation; kwargs...) +close!(::L2Projector) +L2Projector(::Interpolation, ::Ferrite.AbstractGrid; kwargs...) project ``` - -# Postprocessing +## Evaluation at points ```@docs +evaluate_at_grid_nodes PointEvalHandler evaluate_at_points -Ferrite.PointValues +PointValues PointIterator PointLocation ``` -```@docs -evaluate_at_grid_nodes -``` - ## VTK Export - ```@docs -vtk_grid(filename::AbstractString, grid::Grid{dim,C,T}; compress::Bool) where {dim,C,T} -vtk_point_data -vtk_cellset -vtk_cell_data_colors +VTKGridFile +write_solution +write_projection +write_cell_data +write_node_data +Ferrite.write_cellset +Ferrite.write_nodeset +Ferrite.write_constraints +Ferrite.write_cell_colors ``` diff --git a/docs/src/reference/fevalues.md b/docs/src/reference/fevalues.md index ad868aed49..f373335ee4 100644 --- a/docs/src/reference/fevalues.md +++ b/docs/src/reference/fevalues.md @@ -6,18 +6,24 @@ DocTestSetup = :(using Ferrite) # FEValues ## Main types -[`CellValues`](@ref) and [`FaceValues`](@ref) are the most common -subtypes of `Ferrite.AbstractValues`. For more details about how +[`CellValues`](@ref) and [`FacetValues`](@ref) are the most common +subtypes of `Ferrite.AbstractValues`. For more details about how these work, please see the related [topic guide](@ref fevalues_topicguide). ```@docs CellValues -FaceValues +FacetValues ``` +!!! warning "Embedded API" + Currently, embedded `FEValues` returns `SArray`s, which behave differently + from the `Tensor`s for normal value. In the future, we expect to return + an `AbstractTensor`, this change may happen in a minor release, and the + API for embedded `FEValues` should therefore be considered experimental. + ## Applicable functions The following functions are applicable to both `CellValues` -and `FaceValues`. +and `FacetValues`. ```@docs reinit! @@ -28,24 +34,27 @@ shape_value(::Ferrite.AbstractValues, ::Int, ::Int) shape_gradient(::Ferrite.AbstractValues, ::Int, ::Int) shape_symmetric_gradient shape_divergence +shape_curl +geometric_value function_value function_gradient function_symmetric_gradient function_divergence +function_curl spatial_coordinate ``` -In addition, there are some methods that are unique for `FaceValues`. +In addition, there are some methods that are unique for `FacetValues`. ```@docs -Ferrite.getcurrentface +Ferrite.getcurrentfacet getnormal ``` ## [InterfaceValues](@id reference-interfacevalues) -All of the methods for [`FaceValues`](@ref) apply for `InterfaceValues` as well. +All of the methods for [`FacetValues`](@ref) apply for `InterfaceValues` as well. In addition, there are some methods that are unique for `InterfaceValues`: ```@docs diff --git a/docs/src/reference/grid.md b/docs/src/reference/grid.md index f41a481799..7456e0c527 100644 --- a/docs/src/reference/grid.md +++ b/docs/src/reference/grid.md @@ -13,6 +13,7 @@ CellIndex VertexIndex EdgeIndex FaceIndex +FacetIndex Grid ``` @@ -26,32 +27,34 @@ getnnodes Ferrite.nnodes_per_cell getcellset getnodeset -getfaceset -getedgeset +getfacetset getvertexset transform_coordinates! getcoordinates getcoordinates! -Ferrite.get_node_coordinate +geometric_interpolation(::Ferrite.AbstractCell) +get_node_coordinate +Ferrite.getspatialdim(::Ferrite.AbstractGrid) +Ferrite.getrefdim(::Ferrite.AbstractCell) ``` ### Topology ```@docs -Ferrite.ExclusiveTopology -Ferrite.getneighborhood -Ferrite.faceskeleton -Ferrite.vertex_star_stencils -Ferrite.getstencil +ExclusiveTopology +getneighborhood +facetskeleton +vertex_star_stencils +getstencil ``` ### Grid Sets Utility ```@docs addcellset! -addfaceset! -addboundaryfaceset! -addboundaryedgeset! +addfacetset! +addboundaryfacetset! +addvertexset! addboundaryvertexset! addnodeset! ``` diff --git a/docs/src/reference/interpolations.md b/docs/src/reference/interpolations.md index d42a13ec03..cb16dcc4ea 100644 --- a/docs/src/reference/interpolations.md +++ b/docs/src/reference/interpolations.md @@ -8,7 +8,18 @@ DocTestSetup = :(using Ferrite) ```@docs Interpolation getnbasefunctions -getdim +getrefdim(::Interpolation) getrefshape getorder ``` + +Implemented interpolations: + +```@docs +Lagrange +Serendipity +DiscontinuousLagrange +BubbleEnrichedLagrange +CrouzeixRaviart +RannacherTurek +``` diff --git a/docs/src/reference/quadrature.md b/docs/src/reference/quadrature.md index 3b9be29480..aab24be261 100644 --- a/docs/src/reference/quadrature.md +++ b/docs/src/reference/quadrature.md @@ -7,9 +7,9 @@ DocTestSetup = :(using Ferrite) ```@docs QuadratureRule -FaceQuadratureRule +FacetQuadratureRule getnquadpoints(::QuadratureRule) -getnquadpoints(::FaceQuadratureRule, ::Int) +getnquadpoints(::FacetQuadratureRule, ::Int) getpoints getweights ``` diff --git a/docs/src/reference/sparsity_pattern.md b/docs/src/reference/sparsity_pattern.md new file mode 100644 index 0000000000..c65e86261d --- /dev/null +++ b/docs/src/reference/sparsity_pattern.md @@ -0,0 +1,58 @@ +# Sparsity pattern and sparse matrices + +This is the reference documentation for sparsity patterns and sparse matrix instantiation. +See the topic section on [Sparsity pattern and sparse matrices](@ref topic-sparse-matrix). + +## Sparsity patterns + +### `AbstractSparsityPattern` + +The following applies to all subtypes of `AbstractSparsityPattern`: + +```@docs +Ferrite.AbstractSparsityPattern +init_sparsity_pattern +add_sparsity_entries! +add_cell_entries! +add_interface_entries! +add_constraint_entries! +Ferrite.add_entry! +``` + +### `SparsityPattern` + +```@docs +SparsityPattern(::Int, ::Int) +allocate_matrix(::SparsityPattern) +SparsityPattern +``` + +### `BlockSparsityPattern` + +!!! note "Package extension" + This functionality is only enabled when the package + [BlockArrays.jl](https://github.com/JuliaArrays/BlockArrays.jl) is installed (`pkg> add + BlockArrays`) and loaded (`using BlockArrays`) in the session. + +```@docs +BlockSparsityPattern(::Vector{Int}) +Main.FerriteBlockArrays.BlockSparsityPattern +allocate_matrix(::Main.FerriteBlockArrays.BlockSparsityPattern) +allocate_matrix(::Type{<:BlockMatrix{T, Matrix{S}}}, sp::Main.FerriteBlockArrays.BlockSparsityPattern) where {T, S <: AbstractMatrix{T}} +``` + +## Sparse matrices + +### Creating matrix from `SparsityPattern` + +```@docs +allocate_matrix(::Type{S}, ::Ferrite.AbstractSparsityPattern) where {Tv, Ti, S <: SparseMatrixCSC{Tv, Ti}} +allocate_matrix(::Type{Symmetric{Tv, S}}, ::Ferrite.AbstractSparsityPattern) where {Tv, Ti, S <: SparseMatrixCSC{Tv, Ti}} +``` + +### Creating matrix from `DofHandler` + +```@docs +allocate_matrix(::Type{MatrixType}, ::DofHandler, args...; kwargs...) where {MatrixType} +allocate_matrix(::DofHandler, args...; kwargs...) +``` diff --git a/docs/src/topics/FEValues.md b/docs/src/topics/FEValues.md index 1a4f836be8..f4abfdd3cd 100644 --- a/docs/src/topics/FEValues.md +++ b/docs/src/topics/FEValues.md @@ -1,16 +1,16 @@ # [FEValues](@id fevalues_topicguide) -A key type of object in `Ferrite.jl` is the so-called `FEValues`, where the most common ones are `CellValues` and `FaceValues`. These objects are used inside the element routines and are used to query the integration weights, shape function values and gradients, and much more; see [`CellValues`](@ref) and [`FaceValues`](@ref). For these values to be correct, it is necessary to reinitialize these for the current cell by using the [`reinit!`](@ref) function. This function maps the values from the reference cell to the actual cell, a process described in detail below, see [Mapping of finite elements](@ref mapping-theory). After that, we show an implementation of a [`SimpleCellValues`](@ref SimpleCellValues) type to illustrate how `CellValues` work for the most standard case, excluding the generalizations and optimization that complicates the actual code. +A key type of object in `Ferrite.jl` is the so-called `FEValues`, where the most common ones are `CellValues` and `FacetValues`. These objects are used inside the element routines and are used to query the integration weights, shape function values and gradients, and much more; see [`CellValues`](@ref) and [`FacetValues`](@ref). For these values to be correct, it is necessary to reinitialize these for the current cell by using the [`reinit!`](@ref) function. This function maps the values from the reference cell to the actual cell, a process described in detail below, see [Mapping of finite elements](@ref mapping_theory). After that, we show an implementation of a [`SimpleCellValues`](@ref SimpleCellValues) type to illustrate how `CellValues` work for the most standard case, excluding the generalizations and optimization that complicates the actual code. ## [Mapping of finite elements](@id mapping_theory) -The shape functions and gradients stored in an `FEValues` object, are reinitialized for each cell by calling the `reinit!` function. -The main part of this calculation, considers how to map the values and derivatives of the shape functions, +The shape functions and gradients stored in an `FEValues` object, are reinitialized for each cell by calling the `reinit!` function. +The main part of this calculation, considers how to map the values and derivatives of the shape functions, defined on the reference cell, to the actual cell. -The geometric mapping of a finite element from the reference coordinates to the real coordinates is shown in the following illustration. +The geometric mapping of a finite element from the reference coordinates to the real coordinates is shown in the following illustration. ![mapping_figure](https://raw.githubusercontent.com/Ferrite-FEM/Ferrite.jl/gh-pages/assets/fe_mapping.svg) -This mapping is given by the geometric shape functions, $\hat{N}_i^g(\boldsymbol{\xi})$, such that +This mapping is given by the geometric shape functions, $\hat{N}_i^g(\boldsymbol{\xi})$, such that ```math \begin{align*} \boldsymbol{x}(\boldsymbol{\xi}) =& \sum_{\alpha=1}^N \hat{\boldsymbol{x}}_\alpha \hat{N}_\alpha^g(\boldsymbol{\xi}) \\ @@ -24,8 +24,8 @@ where the defined $\boldsymbol{J}$ is the jacobian of the mapping, and in some c We require that the mapping from reference coordinates to real coordinates is [diffeomorphic](https://en.wikipedia.org/wiki/Diffeomorphism), meaning that we can express $\boldsymbol{x} = \boldsymbol{x}(\boldsymbol{\xi}(\boldsymbol{x}))$, such that ```math \begin{align*} - \frac{\mathrm{d}\boldsymbol{x}}{\mathrm{d}\boldsymbol{x}} = \boldsymbol{I} &= \frac{\mathrm{d}\boldsymbol{x}}{\mathrm{d}\boldsymbol{\xi}} \cdot \frac{\mathrm{d}\boldsymbol{\xi}}{\mathrm{d}\boldsymbol{x}} - \quad\Rightarrow\quad + \frac{\mathrm{d}\boldsymbol{x}}{\mathrm{d}\boldsymbol{x}} = \boldsymbol{I} &= \frac{\mathrm{d}\boldsymbol{x}}{\mathrm{d}\boldsymbol{\xi}} \cdot \frac{\mathrm{d}\boldsymbol{\xi}}{\mathrm{d}\boldsymbol{x}} + \quad\Rightarrow\quad \frac{\mathrm{d}\boldsymbol{\xi}}{\mathrm{d}\boldsymbol{x}} = \left[\frac{\mathrm{d}\boldsymbol{x}}{\mathrm{d}\boldsymbol{\xi}}\right]^{-1} = \boldsymbol{J}^{-1} \end{align*} ``` @@ -34,7 +34,7 @@ Depending on the function interpolation, we may want different types of mappings ### Identity mapping `Ferrite.IdentityMapping` -For scalar fields, we always use scalar base functions. For tensorial fields (non-scalar, e.g. vector-fields), the base functions can be constructed from scalar base functions, by using e.g. `VectorizedInterpolation`. From the perspective of the mapping, however, each component is mapped as an individual scalar base function. And for scalar base functions, we only require that the value of the base function is invariant to the element shape (real coordinate), and only depends on the reference coordinate, i.e. +For scalar fields, we always use scalar base functions. For tensorial fields (non-scalar, e.g. vector-fields), the base functions can be constructed from scalar base functions, by using e.g. `VectorizedInterpolation`. From the perspective of the mapping, however, each component is mapped as an individual scalar base function. And for scalar base functions, we only require that the value of the base function is invariant to the element shape (real coordinate), and only depends on the reference coordinate, i.e. ```math \begin{align*} N(\boldsymbol{x}) &= \hat{N}(\boldsymbol{\xi}(\boldsymbol{x}))\nonumber \\ @@ -42,10 +42,63 @@ For scalar fields, we always use scalar base functions. For tensorial fields (no \end{align*} ``` +Second order gradients of the shape functions are computed as + +```math +\begin{align*} + \mathrm{grad}(\mathrm{grad}(N(\boldsymbol{x}))) = \frac{\mathrm{d}^2 N}{\mathrm{d}\boldsymbol{x}^2} = \boldsymbol{J}^{-T} \cdot \frac{\mathrm{d}^2\hat{N}}{\mathrm{d}\boldsymbol{\xi}^2} \cdot \boldsymbol{J}^{-1} - \boldsymbol{J}^{-T} \cdot\mathrm{grad}(N) \cdot \boldsymbol{\mathcal{H}} \cdot \boldsymbol{J}^{-1} +\end{align*} +``` +!!! details "Derivation" + The gradient of the shape functions is obtained using the chain rule: + ```math + \begin{align*} + \frac{\mathrm{d} N}{\mathrm{d}x_i} = \frac{\mathrm{d} \hat N}{\mathrm{d} \xi_r}\frac{\mathrm{d} \xi_r}{\mathrm{d} x_i} = \frac{\mathrm{d} \hat N}{\mathrm{d} \xi_r} J^{-1}_{ri} + \end{align*} + ``` + + For the second order gradients, we first use the product rule on the equation above: + + ```math + \begin{align} + \frac{\mathrm{d}^2 N}{\mathrm{d}x_i \mathrm{d}x_j} = \frac{\mathrm{d}}{\mathrm{d}x_j}\left[\frac{\mathrm{d} \hat N}{\mathrm{d} \xi_r}\right] J^{-1}_{ri} + \frac{\mathrm{d} \hat N}{\mathrm{d} \xi_r} \frac{\mathrm{d}J^{-1}_{ri}}{\mathrm{d}x_j} + \end{align} + ``` + + Using the fact that $\frac{\mathrm{d}\hat{f}(\boldsymbol{\xi})}{\mathrm{d}x_j} = \frac{\mathrm{d}\hat{f}(\boldsymbol{\xi})}{\mathrm{d}\xi_s} J^{-1}_{sj}$, the first term in the equation above can be expressed as: + + ```math + \begin{align*} + \frac{\mathrm{d}}{\mathrm{d}x_j}\left[\frac{\mathrm{d} \hat N}{\mathrm{d} \xi_r}\right] J^{-1}_{ri} = J^{-1}_{sj}\frac{\mathrm{d}}{\mathrm{d}\xi_s}\left[\frac{\mathrm{d} \hat N}{\mathrm{d} \xi_r}\right] J^{-1}_{ri} = J^{-1}_{sj}\left[\frac{\mathrm{d}^2 \hat N}{\mathrm{d} \xi_s\mathrm{d} \xi_r}\right] J^{-1}_{ri} + \end{align*} + ``` + + The second term can be written as: + + ```math + \begin{align*} + \frac{\mathrm{d} \hat N}{\mathrm{d} \xi_r}\frac{\mathrm{d}J^{-1}_{ri}}{\mathrm{d}x_j} = \frac{\mathrm{d} \hat N}{\mathrm{d} \xi_r}\left[\frac{\mathrm{d}J^{-1}_{ri}}{\mathrm{d}\xi_s}\right]J^{-1}_{sj} = \frac{\mathrm{d} \hat N}{\mathrm{d} \xi_r}\left[- J^{-1}_{rk}\mathcal{H}_{kps} J^{-1}_{pi}\right] J^{-1}_{sj} = - \frac{\mathrm{d} \hat N}{\mathrm{d} x_k}\mathcal{H}_{kps} J^{-1}_{pi}J^{-1}_{sj} + \end{align*} + ``` + + where we have used that the inverse of the jacobian can be computed as: + + ```math + \begin{align*} + 0 = \frac{\mathrm{d}}{\mathrm{d}\xi_s} (J_{kr} J^{-1}_{ri} ) = \frac{\mathrm{d}J_{kp}}{\mathrm{d}\xi_s} J^{-1}_{pi} + J_{kr} \frac{\mathrm{d}J^{-1}_{ri}}{\mathrm{d}\xi_s} = 0 \quad \Rightarrow \\ + \end{align*} + ``` + + ```math + \begin{align*} + \frac{\mathrm{d}J^{-1}_{ri}}{\mathrm{d}\xi_s} = - J^{-1}_{rk}\frac{\mathrm{d}J_{kp}}{\mathrm{d}\xi_s} J^{-1}_{pi} = - J^{-1}_{rk}\mathcal{H}_{kps} J^{-1}_{pi}\\ + \end{align*} + ``` + ### Covariant Piola mapping, H(curl) `Ferrite.CovariantPiolaMapping` -The covariant Piola mapping of a vectorial base function preserves the tangential components. For the value, the mapping is defined as +The covariant Piola mapping of a vectorial base function preserves the tangential components. For the value, the mapping is defined as ```math \begin{align*} \boldsymbol{N}(\boldsymbol{x}) = \boldsymbol{J}^{-\mathrm{T}} \cdot \hat{\boldsymbol{N}}(\boldsymbol{\xi}(\boldsymbol{x})) @@ -66,7 +119,7 @@ which yields the gradient, \end{align*} ``` - Except for a few elements, $\boldsymbol{J}$ varies as a function of $\boldsymbol{x}$. The derivative can be calculated as + Except for a few elements, $\boldsymbol{J}$ varies as a function of $\boldsymbol{x}$. The derivative can be calculated as ```math \begin{align*} \frac{\mathrm{d} J^{-\mathrm{T}}_{ik}}{\mathrm{d} x_j} &= \frac{\mathrm{d} J^{-\mathrm{T}}_{ik}}{\mathrm{d} J_{mn}} \frac{\mathrm{d} J_{mn}}{\mathrm{d} x_j} = - J_{km}^{-1} J_{in}^{-T} \frac{\mathrm{d} J_{mn}}{\mathrm{d} x_j} \nonumber \\ @@ -77,7 +130,7 @@ which yields the gradient, ### Contravariant Piola mapping, H(div) `Ferrite.ContravariantPiolaMapping` -The covariant Piola mapping of a vectorial base function preserves the normal components. For the value, the mapping is defined as +The covariant Piola mapping of a vectorial base function preserves the normal components. For the value, the mapping is defined as ```math \begin{align*} \boldsymbol{N}(\boldsymbol{x}) = \frac{\boldsymbol{J}}{\det(\boldsymbol{J})} \cdot \hat{\boldsymbol{N}}(\boldsymbol{\xi}(\boldsymbol{x})) @@ -97,19 +150,19 @@ This gives the gradient ```math \begin{align*} \frac{\mathrm{d} N_i}{\mathrm{d} x_j} &= \frac{\mathrm{d}}{\mathrm{d} x_j} \left[\frac{J_{ik}}{\det(\boldsymbol{J})} \hat{N}_k\right] =\nonumber\\ - &= \frac{\mathrm{d} J_{ik}}{\mathrm{d} x_j} \frac{\hat{N}_k}{\det(\boldsymbol{J})} + &= \frac{\mathrm{d} J_{ik}}{\mathrm{d} x_j} \frac{\hat{N}_k}{\det(\boldsymbol{J})} - \frac{\mathrm{d} \det(\boldsymbol{J})}{\mathrm{d} x_j} \frac{J_{ik} \hat{N}_k}{\det(\boldsymbol{J})^2} + \frac{J_{ik}}{\det(\boldsymbol{J})} \frac{\mathrm{d} \hat{N}_k}{\mathrm{d} \xi_l} J_{lj}^{-1} \\ - &= \mathcal{H}_{ikl} J^{-1}_{lj} \frac{\hat{N}_k}{\det(\boldsymbol{J})} + &= \mathcal{H}_{ikl} J^{-1}_{lj} \frac{\hat{N}_k}{\det(\boldsymbol{J})} - J^{-T}_{mn} \mathcal{H}_{mnl} J^{-1}_{lj} \frac{J_{ik} \hat{N}_k}{\det(\boldsymbol{J})} + \frac{J_{ik}}{\det(\boldsymbol{J})} \frac{\mathrm{d} \hat{N}_k}{\mathrm{d} \xi_l} J_{lj}^{-1} \end{align*} ``` ## [Walkthrough: Creating `SimpleCellValues`](@id SimpleCellValues) -In the following, we walk through how to create a `SimpleCellValues` type which -works similar to `Ferrite.jl`'s `CellValues`, but is not performance optimized and not as general. The main purpose is to explain how the `CellValues` works for the standard case of `IdentityMapping` described above. -Please note that several internal functions are used, and these may change without a major version increment. Please see the [Developer documentation](@ref) for their documentation. +In the following, we walk through how to create a `SimpleCellValues` type which +works similar to `Ferrite.jl`'s `CellValues`, but is not performance optimized and not as general. The main purpose is to explain how the `CellValues` works for the standard case of `IdentityMapping` described above. +Please note that several internal functions are used, and these may change without a major version increment. Please see the [Developer documentation](@ref) for their documentation. ```@eval # Include the example here, but modify the Literate output to suit being embedded diff --git a/docs/src/topics/SimpleCellValues_literate.jl b/docs/src/topics/SimpleCellValues_literate.jl index 06ba44b779..86a36c7e37 100644 --- a/docs/src/topics/SimpleCellValues_literate.jl +++ b/docs/src/topics/SimpleCellValues_literate.jl @@ -2,8 +2,8 @@ using Ferrite, Test # Then, we define a simple version of the cell values object, which only supports -# * Scalar interpolations -# * Identity mapping from reference to physical cell. +# * Scalar interpolations +# * Identity mapping from reference to physical cell. # * The cell shape has the same dimension as the physical space (excludes so-called embedded cells). struct SimpleCellValues{T, dim} <: Ferrite.AbstractCellValues @@ -11,7 +11,7 @@ struct SimpleCellValues{T, dim} <: Ferrite.AbstractCellValues ## shape function number and q_point the integration point dNdξ::Matrix{Vec{dim,T}} # Precalculated shape gradients in the reference domain, dNdξ[i, q_point] dNdx::Matrix{Vec{dim,T}} # Cache for shape gradients in the physical domain, dNdx[i, q_point] - M::Matrix{T} # Precalculated geometric shape values, M[j, q_point] where j is the + M::Matrix{T} # Precalculated geometric shape values, M[j, q_point] where j is the ## geometric shape function number dMdξ::Matrix{Vec{dim,T}} # Precalculated geometric shape gradients, dMdξ[j, q_point] weights::Vector{T} # Given quadrature weights in the reference domain, weights[q_point] @@ -22,12 +22,12 @@ end; # Next, we create a constructor with the same input as `CellValues` function SimpleCellValues(qr::QuadratureRule, ip_fun::Interpolation, ip_geo::Interpolation) - dim = Ferrite.getdim(ip_fun) + dim = Ferrite.getrefdim(ip_fun) ## Quadrature weights and coordinates (in reference cell) weights = Ferrite.getweights(qr) n_qpoints = length(weights) T = eltype(weights) - + ## Function interpolation n_func_basefuncs = getnbasefunctions(ip_fun) N = zeros(T, n_func_basefuncs, n_qpoints) @@ -42,10 +42,10 @@ function SimpleCellValues(qr::QuadratureRule, ip_fun::Interpolation, ip_geo::Int ## Precalculate function and geometric shape values and gradients for (qp, ξ) in pairs(Ferrite.getpoints(qr)) for i in 1:n_func_basefuncs - dNdξ[i, qp], N[i, qp] = Ferrite.shape_gradient_and_value(ip_fun, ξ, i) + dNdξ[i, qp], N[i, qp] = Ferrite.reference_shape_gradient_and_value(ip_fun, ξ, i) end for i in 1:n_geom_basefuncs - dMdξ[i, qp], M[i, qp] = Ferrite.shape_gradient_and_value(ip_geo, ξ, i) + dMdξ[i, qp], M[i, qp] = Ferrite.reference_shape_gradient_and_value(ip_geo, ξ, i) end end @@ -53,15 +53,15 @@ function SimpleCellValues(qr::QuadratureRule, ip_fun::Interpolation, ip_geo::Int SimpleCellValues(N, dNdξ, dNdx, M, dMdξ, weights, detJdV) end; -# To make our `SimpleCellValues` work in standard Ferrite code, +# To make our `SimpleCellValues` work in standard Ferrite code, # we need to dispatch some access functions: Ferrite.getnbasefunctions(cv::SimpleCellValues) = size(cv.N, 1) Ferrite.getnquadpoints(cv::SimpleCellValues) = size(cv.N, 2) Ferrite.shape_value(cv::SimpleCellValues, q_point::Int, i::Int) = cv.N[i, q_point] Ferrite.shape_gradient(cv::SimpleCellValues, q_point::Int, i::Int) = cv.dNdx[i, q_point]; -# The last step is then to dispatch `reinit!` for our `SimpleCellValues` to calculate -# the cached values `dNdx` and `detJdV` for the current cell according to the +# The last step is then to dispatch `reinit!` for our `SimpleCellValues` to calculate +# the cached values `dNdx` and `detJdV` for the current cell according to the # theory for `IdentityMapping` above. function Ferrite.reinit!(cv::SimpleCellValues, x::Vector{Vec{dim,T}}) where {dim,T} for (q_point, w) in pairs(cv.weights) # Loop over each quadrature point @@ -72,7 +72,7 @@ function Ferrite.reinit!(cv::SimpleCellValues, x::Vector{Vec{dim,T}}) where {dim end ## Calculate the correct integration weight for the current q_point cv.detJdV[q_point] = det(J)*w - ## map the shape gradients to the current geometry + ## map the shape gradients to the current geometry Jinv = inv(J) for i in 1:getnbasefunctions(cv) cv.dNdx[i, q_point] = cv.dNdξ[i, q_point] ⋅ Jinv @@ -98,4 +98,3 @@ ue = rand(getnbasefunctions(simple_cv)) q_point = 2 @test function_value(cv, q_point, ue) ≈ function_value(simple_cv, q_point, ue) @test function_gradient(cv, q_point, ue) ≈ function_gradient(simple_cv, q_point, ue) - diff --git a/docs/src/topics/assembly.md b/docs/src/topics/assembly.md index 90d2e506a7..868761af8e 100644 --- a/docs/src/topics/assembly.md +++ b/docs/src/topics/assembly.md @@ -24,34 +24,8 @@ into the sparse matrix `K` directly. Therefore we will instead use an matrix and the global force vector. It is also often convenient to create the sparse matrix just once, and reuse the allocated matrix. This is useful for e.g. iterative solvers or time dependent problems where the sparse matrix -structure, or [Sparsity Pattern](@ref) will stay the same in every iteration/ -time step. - -## Sparsity Pattern - -Given a `DofHandler` we can obtain the corresponding sparse matrix by using the -[`create_sparsity_pattern`](@ref) function. This will setup a `SparseMatrixCSC` -with stored values on all the places corresponding to the degree of freedom numbering -in the `DofHandler`. This means that when we assemble into the global stiffness matrix -there is no need to change the internal representation of the sparse matrix since the -sparse structure will not change. - -Often the finite element problem is symmetric and will result in a symmetric sparse -matrix. This information is often something that the sparse solver can take advantage of. -If the solver only needs half the matrix there is no need to assemble both halves. -For this purpose there is a [`create_symmetric_sparsity_pattern`](@ref) function that -will only create the upper half of the matrix, and return a `Symmetric` wrapped -`SparseMatrixCSC`. - -Given a `DofHandler` `dh` we can obtain the (symmetric) sparsity pattern as - -```julia -K = create_sparsity_pattern(dh) -K = create_symmetric_sparsity_pattern(dh) -``` - -The returned sparse matrix will be used together with an `Assembler`, which -assembles efficiently into the matrix, without modifying the internal representation. +structure, or [Sparsity Pattern](@ref "Sparsity pattern and sparse matrices") +will stay the same in every iteration/time step. ## `Assembler` @@ -96,7 +70,7 @@ In such cases it is enough to construct the global matrix `K` once. Below is some pseudo-code for how to do this for a time-dependent problem: ```julia -K = create_sparsity_pattern(dh) +K = allocate_matrix(dh) f = zeros(ndofs(dh)) for t in 1:timesteps diff --git a/docs/src/topics/boundary_conditions.md b/docs/src/topics/boundary_conditions.md index 2cfd6b6e9e..242bb44217 100644 --- a/docs/src/topics/boundary_conditions.md +++ b/docs/src/topics/boundary_conditions.md @@ -8,6 +8,11 @@ Every PDE is accompanied with boundary conditions. There are different types of conditions, and they need to be handled in different ways. Below we discuss how to handle the most common ones, Dirichlet and Neumann boundary conditions, and how to do it `Ferrite`. +While boundary conditions can be applied directly to nodes, vertices, edges, or faces, +they are most commonly applied to [facets](@ref "Reference shapes"). Each facet is described +by a [`FacetIndex`](@ref). +When adding boundary conditions to points instead, vertices are preferred over nodes. + ## Dirichlet Boundary Conditions At a Dirichlet boundary the unknown field is prescribed to a given value. For the discrete @@ -25,9 +30,9 @@ for computing the prescribed value. Example: ```julia dbc1 = Dirichlet( - :u, # Name of the field - getfaceset(grid, "left"), # Part of the boundary - x -> 1.0, # Function mapping coordinate to a prescribed value + :u, # Name of the field + getfacetset(grid, "left"), # Part of the boundary + x -> 1.0, # Function mapping coordinate to a prescribed value ) ``` @@ -40,9 +45,9 @@ function computing the prescribed value should be of the form `f(x)` or `f(x, t) To apply a constraint on multiple face sets in the grid you can use `union` to join them, for example ```julia - left_right = union(getfaceset(grid, "left"), getfaceset(grid, "right")) + left_right = union(getfacetset(grid, "left"), getfacetset(grid, "right")) ``` - creates a new face set containing all faces in the `"left"` and "`right`" face sets, + creates a new facetset containing all facets in the `"left"` and "`right`" facetsets, which can be passed to the `Dirichlet` constructor. By default the constraint is added to all components of the given field. To add the @@ -52,10 +57,10 @@ vector field `:u`: ```julia dbc2 = Dirichlet( - :u, # Name of the field - getfaceset(grid, "left"), # Part of the boundary - x -> [0.0, 0.0], # Function mapping coordinate to prescribed values - [1, 3], # Components + :u, # Name of the field + getfacetset(grid, "left"), # Part of the boundary + x -> [0.0, 0.0], # Function mapping coordinate to prescribed values + [1, 3], # Components ) ``` @@ -96,19 +101,19 @@ For complete examples that use Neumann boundary conditions, please see - [von-Mises-plasticity](@ref tutorial-plasticity) - [Hyperelasticity](@ref tutorial-hyperelasticity) -### Using the `FaceIterator` +### Using the `FacetIterator` A Neumann boundary contribution can be added by iterating over -the relevant `faceset::Set{FaceIndex}` by using the [`FaceIterator`](@ref) +the relevant `facetset` by using the [`FacetIterator`](@ref). For a scalar field, this can be done as ```julia grid = generate_grid(Quadrilateral, (3,3)) dh = DofHandler(grid); push!(dh, :u, 1); close!(dh) -fv = FaceValues(QuadratureRule{RefQuadrilateral}(2), Lagrange{RefQuadrilateral, 1}()) +fv = FacetValues(QuadratureRule{RefQuadrilateral}(2), Lagrange{RefQuadrilateral, 1}()) f = zeros(ndofs(dh)) fe = zeros(ndofs_per_cell(dh)) qn = 1.0 # Normal flux -for fc in FaceIterator(dh, getfaceset(grid, "right")) +for fc in FacetIterator(dh, getfacetset(grid, "right")) reinit!(fv, fc) fill!(fe, 0) for q_point in 1:getnquadpoints(fv) @@ -137,13 +142,13 @@ Alternatively, the following code snippet can be included in the element routine to evaluate the boundary integral: ```julia -for face in 1:nfaces(cell) - if (cellid(cell), face) ∈ getfaceset(grid, "Neumann Boundary") - reinit!(facevalues, cell, face) - for q_point in 1:getnquadpoints(facevalues) - dΓ = getdetJdV(facevalues, q_point) - for i in 1:getnbasefunctions(facevalues) - δu = shape_value(facevalues, q_point, i) +for facet in 1:nfacets(cell) + if (cellid(cell), facet) ∈ getfacetset(grid, "Neumann Boundary") + reinit!(facetvalues, cell, facet) + for q_point in 1:getnquadpoints(facetvalues) + dΓ = getdetJdV(facetvalues, q_point) + for i in 1:getnbasefunctions(facetvalues) + δu = shape_value(facetvalues, q_point, i) fe[i] += δu * qn * dΓ end end @@ -154,8 +159,8 @@ end We start by looping over all the faces of the cell, next we check if this particular face is located on our faceset of interest called `"Neumann Boundary"`. If we have determined that the current face is indeed on the boundary and in our faceset, then we -reinitialize `facevalues` for this face, using [`reinit!`](@ref). When `reinit!`ing -`facevalues` we also need to give the face number in addition to the cell. +reinitialize `FacetValues` for this face, using [`reinit!`](@ref). When `reinit!`ing +`FacetValues` we also need to give the face number in addition to the cell. Next we simply loop over the quadrature points of the face, and then loop over all the test functions and assemble the contribution to the force vector. @@ -205,7 +210,7 @@ simply a translation (e.g. sides of a cube) this matrix will be the identity mat In `Ferrite` this type of periodic Dirichlet boundary conditions can be added to the `ConstraintHandler` by constructing an instance of [`PeriodicDirichlet`](@ref). This is usually done it two steps. First we compute the mapping between mirror and image faces using -[`collect_periodic_faces`](@ref). Here we specify the mirror set and image sets (the sets +[`collect_periodic_facets`](@ref). Here we specify the mirror set and image sets (the sets are usually known or can be constructed easily ) and the mapping ``\varphi``. Second we construct the constraint using the `PeriodicDirichlet` constructor. Here we specify which components of the function that should be constrained, and the rotation matrix @@ -223,7 +228,7 @@ ch = ConstraintHandler(dofhandler) # Compute the face mapping φ(x) = x - Vec{2}((1.0, 0.0)) -face_mapping = collect_periodic_faces(grid, "left", "right", φ) +face_mapping = collect_periodic_facets(grid, "left", "right", φ) # Construct the periodic constraint for field :u pdbc = PeriodicDirichlet(:u, face_mapping, [1, 2]) @@ -297,9 +302,9 @@ pdbc = PeriodicDirichlet( ## Initial Conditions -When solving time-dependent problems, initial conditions, different from zero, may be required. -For finite element formulations of ODE-type, -i.e. ``\boldsymbol{u}'(t) = \boldsymbol{f}(\boldsymbol{u}(t),t)``, +When solving time-dependent problems, initial conditions, different from zero, may be required. +For finite element formulations of ODE-type, +i.e. ``\boldsymbol{u}'(t) = \boldsymbol{f}(\boldsymbol{u}(t),t)``, where ``\boldsymbol{u}(t)`` are the degrees of freedom, initial conditions can be specified by the [`apply_analytical!`](@ref) function. For example, specify the initial pressure as a function of the y-coordinate @@ -319,4 +324,3 @@ See also [Transient heat equation](@ref tutorial-transient-heat-equation) for on equations (DAEs) need extra care during initialization. We refer to the paper ["Consistent Initial Condition Calculation for Differential-Algebraic Systems" by Brown et al.](https://dx.doi.org/10.1137/S1064827595289996) for more details on this matter. - diff --git a/docs/src/topics/constraints.md b/docs/src/topics/constraints.md index 1a29090cd1..c2baa243fa 100644 --- a/docs/src/topics/constraints.md +++ b/docs/src/topics/constraints.md @@ -4,7 +4,7 @@ DocTestSetup = :(using Ferrite) # Constraints -PDEs can in general be subjected to a number of constraints, +PDEs can in general be subjected to a number of constraints, ```math g_I(\underline{a}) = 0, \quad I = 1 \text{ to } n_c @@ -12,7 +12,7 @@ g_I(\underline{a}) = 0, \quad I = 1 \text{ to } n_c where $g$ are (non-linear) constraint equations, $\underline{a}$ is a vector of the degrees of freedom, and $n_c$ is the number of constraints. There are many ways to -enforce these constraints, e.g. penalty methods and Lagrange multiplier methods. +enforce these constraints, e.g. penalty methods and Lagrange multiplier methods. ## Affine constraints @@ -30,22 +30,22 @@ where $a_1$, $a_2$ etc. are system degrees of freedom. In Ferrite, we can accoun ```julia ch = ConstraintHandler(dh) lc1 = AffineConstraint(1, [2 => 5.0, 3 => 3.0], 1) -lc2 = AffineConstraint(1, [3 => 2.0, 5 => 6.0], 0) +lc2 = AffineConstraint(4, [3 => 2.0, 5 => 6.0], 0) add!(ch, lc1) add!(ch, lc2) ``` -Affine constraints will affect the sparsity pattern of the stiffness matrix, and as such, it is important to also include +Affine constraints will affect the sparsity pattern of the stiffness matrix, and as such, it is important to also include the `ConstraintHandler` as an argument when creating the sparsity pattern: ```julia -K = create_sparsity_pattern(dh, ch) +K = allocate_matrix(dh, ch) ``` ### Solving linear problems -To solve the system ``\underline{\underline{K}}\underline{a}=\underline{f}``, account for affine constraints the same way as for +To solve the system ``\underline{\underline{K}}\underline{a}=\underline{f}``, account for affine constraints the same way as for `Dirichlet` boundary conditions; first call `apply!(K, f, ch)`. This will condense `K` and `f` inplace (i.e -no new matrix will be created). Note however that we must also call `apply!` on the solution vector after +no new matrix will be created). Note however that we must also call `apply!` on the solution vector after solving the system to enforce the affine constraints: ```julia @@ -59,17 +59,17 @@ apply!(a, ch) # enforces affine constraints ``` ### Solving nonlinear problems -It is important to check the residual **after** applying boundary conditions when -solving nonlinear problems with affine constraints. -`apply_zero!(K, r, ch)` modifies the residual entries for dofs that are involved -in constraints to account for constraint forces. +It is important to check the residual **after** applying boundary conditions when +solving nonlinear problems with affine constraints. +`apply_zero!(K, r, ch)` modifies the residual entries for dofs that are involved +in constraints to account for constraint forces. The following pseudo-code shows a typical pattern for solving a non-linear problem with Newton's method: ```julia a = initial_guess(...) # Make any initial guess for a here, e.g. `a=zeros(ndofs(dh))` apply!(a, ch) # Make the guess fulfill all constraints in `ch` for iter in 1:maxiter doassemble!(K, r, ...) # Assemble the residual, r, and stiffness, K=∂r/∂a. - apply_zero!(K, r, ch) # Modify `K` and `r` to account for the constraints. + apply_zero!(K, r, ch) # Modify `K` and `r` to account for the constraints. check_convergence(r, ...) && break # Only check convergence after `apply_zero!(K, r, ch)` Δa = K \ r # Calculate the (negative) update apply_zero!(Δa, ch) # Change the constrained values in `Δa` such that `a-Δa` diff --git a/docs/src/topics/degrees_of_freedom.md b/docs/src/topics/degrees_of_freedom.md index 151834cb09..c803c250ba 100644 --- a/docs/src/topics/degrees_of_freedom.md +++ b/docs/src/topics/degrees_of_freedom.md @@ -20,13 +20,15 @@ dh = DofHandler(grid) ## Fields Before we can distribute the dofs we need to specify fields. A field is simply the unknown -function(s) we are solving for. To add a field we need a name (a `Symbol`) and we also -need to specify number of components for the field. Here we add a vector field `:u` -(2 components for a 2D problem) and a scalar field `:p`. +function(s) we are solving for. To add a field we need a name (a `Symbol`) and the the +interpolation describing the shape functions for the field. Here we add a scalar field `:p`, +interpolated using linear (degree 1) shape functions on a triangle, and a vector field `:u`, +also interpolated with linear shape functions on a triangle, but raised to the power 2 to +indicate that it is a vector field with 2 components (for a 2D problem). ```@example dofs -add!(dh, :u, Lagrange{2,RefTetrahedron,1}()^2) -add!(dh, :p, Lagrange{2,RefTetrahedron,1}()) +add!(dh, :p, Lagrange{RefTriangle, 1}()) +add!(dh, :u, Lagrange{RefTriangle, 1}()^2) # hide ``` @@ -38,27 +40,7 @@ dofs for the fields we added. close!(dh) ``` -### Specifying interpolation for a field - -In the example above we did not specify which interpolation should be used for our fields -`:u` and `:p`. By default iso-parametric elements will be used meaning that the -interpolation that matches the grid will be used -- for a linear grid a linear -interpolation will be used etc. It is sometimes useful to separate the grid interpolation -from the interpolation that is used to approximate our fields -(e.g. sub- and super-parametric elements). - -We can specify which interpolation that should be used for the approximation when we add -the fields to the dofhandler. For example, here we add our vector field `:u` with a -quadratic interpolation, and our `:p` field with a linear approximation. - -```@example dofs -dh = DofHandler(grid) # hide -add!(dh, :u, Lagrange{2,RefTetrahedron,2}()^2) -add!(dh, :p, Lagrange{2,RefTetrahedron,1}()) -# hide -``` - ## Ordering of Dofs ordered in the same order as we add to dofhandler -nodes -> (edges ->) faces -> cells +vertices -> edges -> faces -> volumes diff --git a/docs/src/topics/export.md b/docs/src/topics/export.md index 855b5d62c8..472e6ecad9 100644 --- a/docs/src/topics/export.md +++ b/docs/src/topics/export.md @@ -1,7 +1,7 @@ ```@setup export using Ferrite grid = generate_grid(Triangle, (2, 2)) -dh = DofHandler(grid); add!(dh, :u, Lagrange{2,RefTetrahedron,1}()); close!(dh) +dh = DofHandler(grid); add!(dh, :u, Lagrange{RefTriangle,1}()); close!(dh) u = rand(ndofs(dh)); σ = rand(getncells(grid)) ``` @@ -10,98 +10,52 @@ u = rand(ndofs(dh)); σ = rand(getncells(grid)) When the problem is solved, and the solution vector `u` is known we typically want to visualize it. The simplest way to do this is to write the solution to a VTK-file, which can be viewed in e.g. [`Paraview`](https://www.paraview.org/). -To write VTK-files, Ferrite uses, and extends, functions from the -[`WriteVTK.jl`](https://github.com/jipolanco/WriteVTK.jl) package to simplify +To write VTK-files, Ferrite comes with an export interface with a +[`WriteVTK.jl`](https://github.com/jipolanco/WriteVTK.jl) backend to simplify the exporting. -First we need to create a file, based on the grid. This is done with the -`vtk_grid` function: - +The following structure can be used to write various output to a vtk-file: ```@example export -vtk = vtk_grid("my-solution", grid) -# hide -``` - -Next we have to add data to the file. We may add different kinds of data; -point data using `vtk_point_data` or cell data using -`vtk_cell_data`. Point data is data for each nodal coordinate in the -grid, for example our solution vector. Point data can be either scalars -or vectors. Cell data is -- as the name suggests -- data for each cell. This -can be for example the stress. As an example, lets add a solution vector `u` -as point data, and a vector with stress for each cell, `σ`, as cell data: - +VTKGridFile("my_solution", grid) do vtk + write_solution(vtk, dh, u) +end; +``` +where `write_solution` is just one example of the following functions that can be used + +* [`write_solution`](@ref) +* [`write_cell_data`](@ref) +* [`write_node_data`](@ref) +* [`write_projection`](@ref) +* [`Ferrite.write_cellset`](@ref) +* [`Ferrite.write_nodeset`](@ref) +* [`Ferrite.write_constraints`](@ref) +* [`Ferrite.write_cell_colors`](@ref) + +Instead of using the `do`-block, it is also possible to do ```@example export -vtk_point_data(vtk, u, "my-point-data") -vtk_cell_data(vtk, σ, "my-cell-data") -# hide +vtk = VTKGridFile("my_solution", grid) +write_solution(vtk, dh, u) +# etc. +close(vtk); ``` -Finally, we need to save the file to disk, using `vtk_save` - -```@example export -vtk_save(vtk) -rm("my-solution.vtu") # hide -``` +The data written by `write_solution`, `write_cell_data`, `write_node_data`, and `write_projection` may be either scalar (`Vector{<:Number}`) or tensor (`Vector{<:AbstractTensor}`) data. -Alternatively, all of the above can be done using a `do` block: +For simulations with multiple time steps, typically one `VTK` (`.vtu`) file is written +for each time step. In order to connect the actual time with each of these files, +the `paraview_collection` can function from `WriteVTK.jl` can be used. This will create +one paraview datafile (`.pvd`) file and one `VTKGridFile` (`.vtu`) for each time step. ```@example export -vtk_grid("my-solution", grid) do vtk - vtk_point_data(vtk, u, "my-point-data") - vtk_cell_data(vtk, σ, "my-cell-data") +using WriteVTK +pvd = paraview_collection("my_results") +for (step, t) in enumerate(range(0, 1, 5)) + # Do calculations to update u + VTKGridFile("my_results_$step", dh) do vtk + write_solution(vtk, dh, u) + pvd[t] = vtk + end end -rm("my-solution.vtu") # hide -``` - -For other functionality, and more information refer to the -[`WriteVTK.jl` README](https://github.com/jipolanco/WriteVTK.jl/blob/master/README.md). -In particular, for exporting the solution at multiple time steps, the -[section on PVD files](https://github.com/jipolanco/WriteVTK.jl#paraview-data-pvd-file-format) -is useful. - -## Exporting with `DofHandler` - -There is an even more convenient way to export a solution vector `u` -- using the -`DofHandler`. The `DofHandler` already contains all of the information needed, -such as the names of our fields and if they are scalar or vector fields. But most -importantly the `DofHandler` knows about the numbering and distribution of -degrees of freedom, and thus knows how to "distribute" the solution vector on -the grid. For example, lets say we have a `DofHandler` `dh` and a solution -vector `u`: - -```@example export -vtk = vtk_grid("my-solution", dh) -vtk_point_data(vtk, dh, u) -vtk_save(vtk) -rm("my-solution.vtu") # hide +close(pvd); ``` - -or with a `do`-block: - -```@example export -vtk_grid("my-solution", dh) do vtk - vtk_point_data(vtk, dh, u) - vtk_cell_data(vtk, σ, "my-cell-data") -end -rm("my-solution.vtu") # hide -``` - -When `vtk_point_data` is used with a `DofHandler` all of the fields will be -written to the VTK file, and the names will be determined by the fieldname -symbol that was used when the field was added to the `DofHandler`. - -## Exporting Boundary Conditions - -There is also a `vtk_point_data` which accepts a `ConstraintHandler`. -This method is useful to verify that the boundary conditions are -applied where they are supposed to. For a `ConstraintHandler` `ch` -we can export the boundary conditions as - -```julia -vtk_grid("boundary-conditions", grid) do vtk - vtk_point_data(vtk, ch) -end -``` - -This will export zero-valued fields with ones on the parts where the -boundary conditions are active. +See [Transient heat equation](@ref tutorial-transient-heat-equation) for an example diff --git a/docs/src/topics/grid.md b/docs/src/topics/grid.md index c232d55eec..f3c3138757 100644 --- a/docs/src/topics/grid.md +++ b/docs/src/topics/grid.md @@ -6,8 +6,8 @@ DocTestSetup = :(using Ferrite) ## Mesh Reading -A Ferrite `Grid` can be generated with the [`generate_grid`](@ref) function. -More advanced meshes can be imported with the +A Ferrite `Grid` can be generated with the [`generate_grid`](@ref) function. +More advanced meshes can be imported with the [`FerriteMeshParser.jl`](https://github.com/Ferrite-FEM/FerriteMeshParser.jl) (from Abaqus input files), or even created and translated with the [`Gmsh.jl`](https://github.com/JuliaFEM/Gmsh.jl) and [`FerriteGmsh.jl`](https://github.com/Ferrite-FEM/FerriteGmsh.jl) package, respectively. @@ -18,7 +18,7 @@ Either, a mesh is created on the fly with the gmsh API or a mesh in `.msh` or `. ```@docs FerriteGmsh.togrid ``` -`FerriteGmsh.jl` supports currently the translation of `cellsets` and `facesets`. +`FerriteGmsh.jl` supports currently the translation of `cellsets` and `facetsets`. Such sets are defined in Gmsh as `PhysicalGroups` of dimension `dim` and `dim-1`, respectively. In case only a part of the mesh is the domain, the domain can be specified by providing the keyword argument `domain` the name of the `PhysicalGroups` in the [`FerriteGmsh.togrid`](@ref) function. @@ -36,14 +36,14 @@ For an exemplary usage of `Gmsh.jl` and `FerriteGmsh.jl`, consider the [Stokes f ### FerriteMeshParser `FerriteMeshParser.jl` converts the mesh in an Abaqus input file (`.inp`) to a `Ferrite.Grid` with its function `get_ferrite_grid`. -The translations for most of Abaqus' standard 2d and 3d continuum elements to a `Ferrite.Cell` are defined. +The translations for most of Abaqus' standard 2d and 3d continuum elements to a `Ferrite.AbstractCell` are defined. Custom translations can be given as input, which can be used to import other (custom) elements or to override the default translation. ```@docs FerriteMeshParser.get_ferrite_grid ``` -If you are missing the translation of an Abaqus element that is equivalent to a `Ferrite.Cell`, -consider to open an [issue](https://github.com/Ferrite-FEM/FerriteMeshParser.jl/issues/new) or a pull request. +If you are missing the translation of an Abaqus element that is equivalent to a `Ferrite.AbstractCell`, +consider to open an [issue](https://github.com/Ferrite-FEM/FerriteMeshParser.jl/issues/new) or a pull request. ## `Grid` Datastructure @@ -62,76 +62,38 @@ Consider the following 2D mesh: The cells of the grid can be described in the following way ```julia -julia> cells = [ - Quadrilateral((1,2,5,4)), - Quadrilateral((2,3,6,5)), - Quadrilateral((4,5,8,7)), - Quadrilateral((5,6,9,8)) - ] +cells = [Quadrilateral((1, 2, 5, 4)), + Quadrilateral((2, 3, 6, 5)), + Quadrilateral((4, 5, 8, 7)), + Quadrilateral((5, 6, 9, 8))] ``` -where each Quadrilateral, which is a subtype of `AbstractCell` saves in the field `nodes` the tuple of node IDs. -Additionally, the data structure `Grid` can hold node-, face- and cellsets. -All of these three sets are defined by a dictionary that maps a string key to a `Set`. -For the special case of node- and cellsets the dictionary's value is of type `Set{Int}`, i.e. a keyword is mapped to a node or cell ID, respectively. +where each `Quadrilateral <: AbstractCell` is defined by the tuple of node IDs. +Additionally, the data structure `Grid` contains node-, cell-, facet-, and vertexsets. +Each of these sets is defined by a `Dict{String, OrderedSet}`. -Facesets are a more elaborate construction. They map a `String` key to a `Set{FaceIndex}`, where each `FaceIndex` consists of `(global_cell_id, local_face_id)`. -In order to understand the `local_face_id` properly, one has to consider the reference space of the element, which typically is spanned by a product of the interval ``[-1, 1]`` and in this particular example ``[-1, 1] \times [-1, 1]``. -In this space a local numbering of nodes and faces exists, i.e. +Node- and cellsets are represented by an `OrderedSet{Int}`, giving a set of node or cell ID, respectively. +Facet- and vertexsets are represented by `OrderedSet{<:BoundaryIndex}`, where `BoundaryIndex` is a `FacetIndex` or `VertexIndex` respectively. +`FacetIndex` and `VertexIndex` wraps a `Tuple`, `(global_cell_id, local_facet_id)` and `(global_cell_id, local_vertex_id)`, where the local IDs +are defined according to the reference shapes, see [Reference shapes](@ref). -![local element](./assets/local_element.svg) - - -The example shows a local face ID ordering, defined as: +The highlighted facets, i.e. the two edges from node ID 3 to 6 and from 6 to 9, on the right hand side of our test mesh can now be described as ```julia -faces(::Lagrange{2,RefCube,1}) = ((1,2), (2,3), (3,4), (4,1)) +boundary_facets = [(3, 6), (6, 9)] ``` +i.e. by using the node IDs of the reference shape vertices. -Other face ID definitions [can be found in the src files](https://github.com/Ferrite-FEM/Ferrite.jl/blob/8224282ab4d67cb523ef342e4a6ceb1716764ada/src/interpolations.jl#L154) in the corresponding `faces` dispatch. - - -The highlighted face, i.e. the two lines from node ID 3 to 6 and from 6 to 9, on the right hand side of our test mesh can now be described as - -```julia -julia> faces = [ - (3,6), - (6,9) - ] -``` - -The local ID can be constructed based on elements, corresponding faces and chosen interpolation, since the face ordering is interpolation dependent. -```julia -julia> function compute_faceset(cells, global_faces, ip::Interpolation{dim}) where {dim} - local_faces = Ferrite.faces(ip) - nodes_per_face = length(local_faces[1]) - d = Dict{NTuple{nodes_per_face, Int}, FaceIndex}() - for (c, cell) in enumerate(cells) # c is global cell number - for (f, face) in enumerate(local_faces) # f is local face number - # store the global nodes for the particular element, local face combination - d[ntuple(i-> cell.nodes[face[i]], nodes_per_face)] = FaceIndex(c, f) - end - end - - faces = Vector{FaceIndex}() - for face in global_faces - # lookup the element, local face combination for this face - push!(faces, d[face]) - end - - return faces - end - -julia> interpolation = Lagrange{2, RefCube, 1}() - -julia> compute_faceset(cells, faces, interpolation) -Vector{FaceIndex} with 2 elements: - FaceIndex((2, 2)) - FaceIndex((4, 2)) +The first of these can be found as the 2nd facet of the 2nd cell. +```@repl +using Ferrite #hide +Ferrite.facets(Quadrilateral((2, 3, 6, 5))) ``` -Ferrite considers edges only in the three dimensional space. However, they share the concepts of faces in terms of `(global_cell_id,local_edge_id)` identifier. +The unique representation of an entity is given by the sorted version of this tuple. +While we could use this information to construct a facet set, Ferrite can construct this +set by filtering based on the coordinates, using [`addfacetset!`](@ref). ## AbstractGrid @@ -171,11 +133,10 @@ Ferrite.getnnodes(grid::SmallGrid) = length(grid.nodes_test) Ferrite.get_coordinate_eltype(::SmallGrid) = Float64 Ferrite.get_coordinate_type(::SmallGrid{dim}) where dim = Vec{dim,Float64} Ferrite.nnodes_per_cell(grid::SmallGrid, i::Int=1) = Ferrite.nnodes(grid.cells_test[i]) -Ferrite.n_faces_per_cell(grid::SmallGrid) = nfaces(eltype(grid.cells_test)) ``` -These definitions make many of `Ferrite`s functions work out of the box, e.g. you can now call -`getcoordinates(grid, cellid)` on the `SmallGrid`. +These definitions make many of `Ferrite`s functions work out of the box, e.g. you can now call +`getcoordinates(grid, cellid)` on the `SmallGrid`. Now, you would be able to assemble the heat equation example over the new custom `SmallGrid` type. Note that this particular subtype isn't able to handle boundary entity sets and so, you can't describe boundaries with it. @@ -183,6 +144,7 @@ In order to use boundaries, e.g. for Dirichlet constraints in the ConstraintHand ## Topology -Ferrite.jl's `Grid` type offers experimental features w.r.t. topology information. The functions [`Ferrite.getneighborhood`](@ref) and [`Ferrite.faceskeleton`](@ref) -are the interface to obtain topological information. The [`Ferrite.getneighborhood`](@ref) can construct lists of directly connected entities based on a given entity (`CellIndex,FaceIndex,EdgeIndex,VertexIndex`). -The [`Ferrite.faceskeleton`](@ref) function can be used to evaluate integrals over material interfaces or computing element interface values such as jumps. +Ferrite.jl's `Grid` type offers experimental features w.r.t. topology information. The functions [`getneighborhood`](@ref) and [`facetskeleton`](@ref) +are the interface to obtain topological information. The [`getneighborhood`](@ref) can construct lists of directly connected entities based on a given entity +(`CellIndex`, `FacetIndex`, `FaceIndex`, `EdgeIndex`, or `VertexIndex`). +The [`facetskeleton`](@ref) function can be used to evaluate integrals over material interfaces or computing element interface values such as jumps. diff --git a/docs/src/topics/index.md b/docs/src/topics/index.md index 6e8a1d7642..f952e09d20 100644 --- a/docs/src/topics/index.md +++ b/docs/src/topics/index.md @@ -6,6 +6,8 @@ This is an overview of the *topic guides*. ```@contents Pages = [ "fe_intro.md", + "reference_shapes.md", + "FEValues.md", "degrees_of_freedom.md", "assembly.md", "boundary_conditions.md", diff --git a/docs/src/topics/reference_shapes.md b/docs/src/topics/reference_shapes.md new file mode 100644 index 0000000000..53d7ca7ce2 --- /dev/null +++ b/docs/src/topics/reference_shapes.md @@ -0,0 +1,89 @@ +# Reference shapes + +The reference shapes in Ferrite are used to define grid cells, +function interpolations (i.e. shape functions), and quadrature rules. +Currently, the following reference shapes are defined + +* `RefLine` +* `RefTriangle` +* `RefQuadrilateral` +* `RefTetrahedron` +* `RefHexahedron` +* `RefPrism` +* `RefPyramid` + +## Entity naming +Ferrite denotes the entities of a reference shape as follows + +| Entity | Description | +| :------- | :---------- | +| Vertex | 0-dimensional entity in the reference shape. | +| Edge | 1-dimensional entity connecting two vertices. | +| Face | 2-dimensional entity enclosed by edges. | +| Volume | 3-dimensional entity enclosed by faces. | + +Note that a node in Ferrite is not the same as a vertex. +Vertices denote endpoints of edges, while nodes may also be located in the middle +of edges (e.g. for a `QuadraticLine` cell). + +To write dimensionally independent code, Ferrite also denotes entities by their +[*codimension*](https://en.wikipedia.org/wiki/Codimension), +defined relative the reference shape dimension. Specifically, Ferrite has the entities + +| Entity | Description | +| :------- | :---------- | +| `Cell` | *0-codimensional* entity, i.e. the same as the reference shape. | +| `Facet` | *1-codimensional* entity defining the boundary of cells. | + +Standard use cases mostly deal with these codimensional entities, +such as [`CellValues`](@ref) and [`FacetValues`](@ref). + +!!! note "Definition of codimension" + In Ferrite, *codimension* is defined relative to the reference dimension of the specific entity. + Note that other finite element codes may define it differently + (e.g. relative the highest reference dimension in the grid). + +## Entity numbering +Each reference shape defines the numbering of its vertices, edges, and faces entities, +where the edge and face entities are defined from their vertex numbers. + +!!! note + The numbering and identification of entities is (mostly) for internal use and typically + not something users of Ferrite need to interact with. + +### Example +The `RefQuadrilateral` is defined on the domain ``[-1, 1] \times [-1, 1]`` +in the local ``\xi_1-\xi_2`` coordinate system. + +![local element](./assets/local_element.svg) + +The vertices of a `RefQuadrilateral` are then +```@example +using Ferrite #hide +Ferrite.reference_vertices(RefQuadrilateral) +``` +and its edges are then defined as +```@example +using Ferrite #hide +Ferrite.reference_edges(RefQuadrilateral) +``` +where the numbers refer to the vertex number. +Finally, this reference shape is 2-dimensional, so it only has a single face, +corresponding to the cell itself, +```@example +using Ferrite #hide +Ferrite.reference_faces(RefQuadrilateral) +``` +also defined in terms of its vertices. + +As this is a 2-dimensional reference shape, the facets are the edges, i.e. +```@example +using Ferrite #hide +Ferrite.reference_facets(RefQuadrilateral) +``` + +!!! note "Not public API" + The functions `reference_vertices`, `reference_edges`, `reference_faces`, and `reference_facets` + are not public and only shown here to explain the numbering concept. + The specific ordering may also change, and is therefore only documented in the + [Developer documentation](@ref). diff --git a/docs/src/topics/sparse_matrix.md b/docs/src/topics/sparse_matrix.md new file mode 100644 index 0000000000..6926bc9f34 --- /dev/null +++ b/docs/src/topics/sparse_matrix.md @@ -0,0 +1,184 @@ +# [Sparsity pattern and sparse matrices](@id topic-sparse-matrix) + +An important property of the finite element method is that it results in *sparse matrices* +for the linear systems to be solved. On this page the topic of sparsity and sparse matrices +are discussed. + +```@contents +Pages = ["sparse_matrix.md"] +Depth = 2:2 +``` + +## Sparsity pattern + +The sparse structure of the linear system depends on many factors such as e.g. the weak +form, the discretization, and the choice of interpolation(s). In the end it boils down to +how the degrees of freedom (DoFs) *couple* with each other. The most common reason that two +DoFs couple is because they belong to the same element. Note, however, that this is not +guaranteed to result in a coupling since it depends on the specific weak form that is being +discretized, see e.g. [Increasing the sparsity](@ref). Boundary conditions and constraints +can also result in additional DoF couplings. + +If DoFs `i` and `j` couple, then the computed value in the eventual matrix will be +*structurally nonzero*[^1]. In this case the entry `(i, j)` should be included in the +sparsity pattern. Conversely, if DoFs `i` and `j` *don't* couple, then the computed value +will be *zero*. In this case the entry `(i, j)` should *not* be included in the sparsity +pattern since there is no need to allocate memory for entries that will be zero. + +The sparsity, i.e. the ratio of zero-entries to the total number of entries, is often[^2] +*very* high and taking advantage of this results in huge savings in terms of memory. For +example, in a problem with ``10^6`` DoFs there will be a matrix of size ``10^6 \times +10^6``. If all ``10^{12}`` entries of this matrix had to be stored (0% sparsity) as double +precision (`Float64`, 8 bytes) it would require 8 TB of memory. If instead the sparsity is +99.9973% (which is the case when solving the heat equation on a three dimensional hypercube +with linear Lagrange interpolation) this would be reduced to 216 MB. + +[^1]: Structurally nonzero means that there is a possibility of a nonzero value even though + the computed value might become zero in the end for various reasons. + +[^2]: At least for most practical problems using low order interpolations. + + +!!! details "Sparsity pattern example" + + To give an example, in this one-dimensional heat problem (see the [Heat + equation](../tutorials/heat_equation.md) tutorial for the weak form) we have 4 nodes + with 3 elements in between. For simplicitly DoF numbers and node numbers are the same + but this is not true in general since nodes and DoFs can be numbered independently (and + in fact are numbered independently in Ferrite). + + ``` + 1 ----- 2 ----- 3 ----- 4 + ``` + + Assuming we use linear Lagrange interpolation (the "hat functions") this will give the + following connections according to the weak form: + - Trial function 1 couples with test functions 1 and 2 (entries `(1, 1)` and `(1, 2)` + included in the sparsity pattern) + - Trial function 2 couples with test functions 1, 2, and 3 (entries `(2, 1)`, `(2, 2)`, + and `(2, 3)` included in the sparsity pattern) + - Trial function 3 couples with test functions 2, 3, and 4 (entries `(3, 2)`, `(3, 3)`, + and `(3, 4)` included in the sparsity pattern) + - Trial function 4 couples with test functions 3 and 4 (entries `(4, 3)` and `(4, 4)` + included in the sparsity pattern) + + The resulting sparsity pattern would look like this: + + ``` + 4×4 SparseArrays.SparseMatrixCSC{Float64, Int64} with 10 stored entries: + 0.0 0.0 ⋅ ⋅ + 0.0 0.0 0.0 ⋅ + ⋅ 0.0 0.0 0.0 + ⋅ ⋅ 0.0 0.0 + ``` + + Moreover, if the problem is solved with periodic boundary conditions, for example by + constraining the value on the right side to the value on the left side, there will be + additional couplings. In the example above, this means that DoF 4 should be equal to DoF + 1. Since DoF 4 is constrained it has to be eliminated from the system. Existing entries + that include DoF 4 are `(3, 4)`, `(4, 3)`, and `(4, 4)`. Given the simple constraint in + this case we can simply replace DoF 4 with DoF 1 in these entries and we end up with + entries `(3, 1)`, `(1, 3)`, and `(1, 1)`. This results in two new entries: `(3, 1)` and + `(1, 3)` (entry `(1, 1)` is already included). + +## Creating sparsity patterns + +Creating a sparsity pattern can be quite expensive if not done properly and therefore +Ferrite provides efficient methods and data structures for this. In general the sparsity +pattern is not known in advance and has to be created incrementally. To make this +incremental construction efficient it is necessary to use a dynamic data structure which +allow for fast insertions. + +The sparsity pattern also serves as a "matrix builder". When all entries are inserted into +the sparsity pattern the dynamic data structure is typically converted, or "compressed", +into a sparse matrix format such as e.g. the [*compressed sparse row +(CSR)*](https://en.wikipedia.org/wiki/Sparse_matrix#Compressed_sparse_row_(CSR,_CRS_or_Yale_format)) +format or the [*compressed sparse column +(CSC)*](https://en.wikipedia.org/wiki/Sparse_matrix#Compressed_sparse_column_(CSC_or_CCS)) +format, where the latter is the default sparse matrix type implemented in the [SparseArrays +standard library](https://github.com/JuliaSparse/SparseArrays.jl). These matrix formats +allow for fast linear algebra operations, such as factorizations and matrix-vector +multiplications, that are needed when the linear system is solved. See [Instantiating the +sparse matrix](@ref) for more details. + +In summary, a dynamic structure is more efficient when incrementally building the pattern by +inserting new entries, and a static or compressed structure is more efficient for linear +algebra operations. + +### Basic sparsity patterns construction + +Working with the sparsity pattern explicitly is in many cases not necessary. For basic +usage (e.g. when only one matrix needed, when no customization of the pattern is +required, etc) there exist convenience methods of [`allocate_matrix`](@ref) that return +the matrix directly. Most examples in this documentation don't deal with the sparsity +pattern explicitly because the basic method suffice. +See also [Instantiating the sparse matrix](@ref) for more details. + +### Custom sparsity pattern construction + +In more advanced cases there might be a need for more fine grained control of the sparsity +pattern. The following steps are typically taken when constructing a sparsity pattern in +Ferrite: + + 1. **Initialize an empty pattern:** This can be done by either using the + [`init_sparsity_pattern(dh)`](@ref) function or by using a constructor directly. + `init_sparsity_pattern` will return a default pattern type that is compatible with the + DofHandler. In some cases you might require another type of pattern (for example a + blocked pattern, see [Blocked sparsity pattern](@ref)) and in that case you can use the + constructor directly. + + 2. **Add entries to the pattern:** There are a number of functions that add entries to the + pattern: + - [`add_sparsity_entries!`](@ref) is a convenience method for performing the common + task of calling `add_cell_entries!`, `add_interface_entries!`, and + `add_constraint_entries!` after each other (see below). + - [`add_cell_entries!`](@ref) adds entries for all couplings between the DoFs within + each element. These entries correspond to assembling the standard element matrix and + is thus almost always required. + - [`add_interface_entries!`](@ref) adds entries for couplings between the DoFs in + neighboring elements. These entries are required when integrating along internal + interfaces between elements (e.g. for discontinuous Galerkin methods). + - [`add_constraint_entries!`](@ref) adds entries required from constraints and boundary + conditions in the ConstraintHandler. Note that this operation depends on existing + entries in the pattern and *must* be called as the last operation on the pattern. + - [`Ferrite.add_entry!`](@ref) adds a single entry to the pattern. This can be used if + you need to add custom entries that are not covered by the other functions. + + 3. **Instantiate the matrix:** A sparse matrix can be created from the sparsity pattern + using [`allocate_matrix`](@ref), see [Instantiating the sparse matrix](@ref) below for + more details. + +### Increasing the sparsity + +By default, when creating a sparsity pattern, it is assumed that each DoF within an element +couple with with *all* other DoFs in the element. + +!!! todo + - Discuss the `coupling` keyword argument. + - Discuss the `keep_constrained` keyword argument. + +### Blocked sparsity pattern + +!!! todo + Discuss `BlockSparsityPattern` and `BlockArrays` extension. + +## Instantiating the sparse matrix + +As mentioned above, for many simple cases there is no need to work with the sparsity pattern +directly and using methods of [`allocate_matrix`](@ref) that take the DofHandler as input is +enough, for example: + +```julia +K = allocate_matrix(dh, ch) +``` + +`allocate_matrix` is also used to instantiate a matrix from a sparsity pattern, for example: + +```julia +K = allocate_matrix(sp) +``` + +!!! note "Multiple matrices with the same pattern" + For some problems there is a need for multiple matrices with the same sparsity pattern, + for example a mass matrix and a stiffness matrix. In this case it is more efficient to + create the sparsity pattern once and then instantiate both matrices from it. diff --git a/docs/src/tutorials/index.md b/docs/src/tutorials/index.md index 80f3015721..3fe4bda264 100644 --- a/docs/src/tutorials/index.md +++ b/docs/src/tutorials/index.md @@ -126,9 +126,9 @@ Gmsh. #### [Tutorial 9: Porous media (SubDofHandler)](porous_media.md) -This tutorial introduces how to solve a complex linear problem, where there are different +This tutorial introduces how to solve a complex linear problem, where there are different fields on different subdomains, and different cell types in the grid. This requires using -the `SubDofHandler` interface. +the `SubDofHandler` interface. **Keywords**: Mixed grids, multiple fields, porous media, `SubDofHandler` @@ -145,6 +145,16 @@ for the time-integration. --- +#### [Tutorial 10: Reactive surface](@ref tutorial-reactive-surface) + +In this tutorial a reaction diffusion system on a sphere surface embedded in 3D is solved. +Ferrite is used to assemble the diffusion operators and the mass matrices. The problem is +solved by using the usual first order reaction diffusion operator splitting. + +**Keywords**: embedded elements, operator splitting, gmsh + +--- + #### [Tutorial 11: Linear shell](@ref tutorial-linear-shell) In this tutorial a linear shell element formulation is set up as a two-dimensional domain @@ -152,11 +162,11 @@ embedded in three-dimensional space. This will teach, and perhaps inspire, you o Ferrite can be used for non-standard things and how to add "hacks" that build on top of Ferrite. -**Keywords**: embedding, automatic differentiation +**Keywords**: shell elements, automatic differentiation --- -#### [Tutorial 11: Discontinuous Galerkin heat equation](@ref tutorial-dg-heat-equation) +#### [Tutorial 12: Discontinuous Galerkin heat equation](@ref tutorial-dg-heat-equation) This tutorial guides you through the process of solving the linear stationary heat equation (i.e. Poisson's equation) on a unit square with inhomogeneous Dirichlet and Neumann boundary @@ -167,4 +177,4 @@ example was developed as part of the *Google Summer of Code* funded project ["Di Galerkin Infrastructure For the finite element toolbox Ferrite.jl"](https://summerofcode.withgoogle.com/programs/2023/projects/SLGbRNI5). -**Keywords**: scalar-valued solution, Dirichlet boundary conditions, Discontinuous Galerkin, Interior penalty. +**Keywords**: scalar-valued solution, Dirichlet boundary conditions, Discontinuous Galerkin, Interior penalty diff --git a/ext/FerriteBlockArrays.jl b/ext/FerriteBlockArrays.jl index 90e50a25f2..b7bdb9ef7c 100644 --- a/ext/FerriteBlockArrays.jl +++ b/ext/FerriteBlockArrays.jl @@ -2,59 +2,61 @@ module FerriteBlockArrays using BlockArrays: BlockArray, BlockIndex, BlockMatrix, BlockVector, block, blockaxes, blockindex, blocks, findblockindex -using Ferrite -using Ferrite: addindex!, fillzero! - -# TODO: Move into Ferrite and enable for mixed grids / subdomains -function global_dof_range(dh::DofHandler, f::Symbol) - set = Set{Int}() - frange = dof_range(dh, f) - for cc in CellIterator(dh) - union!(set, @view celldofs(cc)[frange]) - end - dofmin, dofmax = extrema(set) - r = dofmin:dofmax - if length(set) != length(r) - error("renumber by blocks you donkey") - end - return r -end +using BlockArrays: Block, BlockArray, BlockIndex, BlockMatrix, BlockVector, block, + blockaxes, blockindex, blocks, findblockindex, undef_blocks +using Ferrite: + Ferrite, BlockSparsityPattern, ConstraintHandler, addindex!, allocate_matrix, assemble!, + fillzero! +using SparseArrays: SparseMatrixCSC + + +############################## +## Instantiating the matrix ## +############################## -################################### -## Creating the sparsity pattern ## -################################### - -# Note: -# Creating the full unblocked matrix and then splitting into blocks inside the BlockArray -# constructor (i.e. by `getindex(::SparseMatrixCSC, ::UnitRange, ::UnitRange)`) is -# consistently faster than creating individual blocks directly. However, the latter approach -# uses less than half of the memory (measured for a 2x2 block system and various problem -# sizes), so might be useful in the future to provide an option on what algorithm to use. - -# TODO: Could potentially extract the element type and matrix type for the individual blocks -# by allowing e.g. create_sparsity_pattern(BlockMatrix{Float32}, ...) but that is not -# even supported by regular pattern right now. -function Ferrite.create_sparsity_pattern(::Type{<:BlockMatrix}, dh, ch; kwargs...) - K = create_sparsity_pattern(dh, ch; kwargs...) - # Infer block sizes from the fields in the DofHandler - block_sizes = [length(global_dof_range(dh, f)) for f in dh.field_names] - return BlockArray(K, block_sizes, block_sizes) +# function Ferrite.allocate_matrix(::Type{B}, dh, ch, ...) where B <: BlockMatrix +# # TODO: Create BSP from the induced field blocks in dh +# end + +# Fill in missing matrix type, this allows allocate_matrix(BlockMatrix, sp) +function Ferrite.allocate_matrix(::Type{<:BlockMatrix}, sp::BlockSparsityPattern) + return allocate_matrix(BlockMatrix{Float64, Matrix{SparseMatrixCSC{Float64, Int}}}, sp) end -function Ferrite.create_sparsity_pattern(B::BlockMatrix, dh, ch; kwargs...) - if !(size(B, 1) == size(B, 2) == ndofs(dh)) - error("size of input matrix ($(size(B))) does not match number of dofs ($(ndofs(dh)))") - end - K = create_sparsity_pattern(dh, ch; kwargs...) - ax = axes(B) - for block_j in blockaxes(B, 2), block_i in blockaxes(B, 1) - range_j = ax[2][block_j] - range_i = ax[1][block_i] - B[block_i, block_j] = K[range_i, range_j] +""" + allocate_matrix(::Type{BlockMatrix}, sp::BlockSparsityPattern) + allocate_matrix(::Type{BlockMatrix{T, Matrix{S}}}, sp::BlockSparsityPattern) + +Instantiate a blocked sparse matrix from the blocked sparsity pattern `sp`. + +The type of the returned matrix is a `BlockMatrix` with blocks of type `S` (defaults to +`SparseMatrixCSC{T, Int}`). + +# Examples +``` +# Create a sparse matrix with default block type +allocate_matrix(BlockMatrix, sparsity_pattern) + +# Create a sparse matrix with blocks of type SparseMatrixCSC{Float32, Int} +allocate_matrix(BlockMatrix{Float32, Matrix{SparseMatrixCSC{Float32, Int}}}, sparsity_pattern) +``` + +!!! note "Package extension" + This functionality is only enabled when the package + [BlockArrays.jl](https://github.com/JuliaArrays/BlockArrays.jl) is installed (`pkg> add + BlockArrays`) and loaded (`using BlockArrays`) in the session. +""" +function Ferrite.allocate_matrix(::Type{<:BlockMatrix{T, Matrix{S}}}, sp::BlockSparsityPattern) where {T, S <: AbstractMatrix{T}} + @assert isconcretetype(S) + block_sizes = sp.block_sizes + K = BlockArray(undef_blocks, S, block_sizes, block_sizes) + for j in 1:length(block_sizes), i in 1:length(block_sizes) + K[Block(i), Block(j)] = allocate_matrix(S, sp.blocks[i, j]) end - return B + return K end + ########################################### ## BlockAssembler and associated methods ## ########################################### diff --git a/ext/FerriteMetis.jl b/ext/FerriteMetis.jl index d2a82935ad..4726737eeb 100644 --- a/ext/FerriteMetis.jl +++ b/ext/FerriteMetis.jl @@ -4,7 +4,9 @@ module FerriteMetis # https://github.com/JuliaLang/julia/pull/47749 if VERSION >= v"1.10.0-DEV.90" -using Ferrite +using Ferrite: + Ferrite, CellIterator, ConstraintHandler, DofHandler, DofOrder, celldofs, ndofs, + ndofs_per_cell using Metis.LibMetis: idx_t using Metis: Metis using SparseArrays: sparse @@ -19,7 +21,7 @@ end Fill-reducing permutation order from [Metis.jl](https://github.com/JuliaSparse/Metis.jl). Since computing the permutation involves constructing the structural couplings between all -DoFs the field/component coupling can be provided; see [`create_sparsity_pattern`](@ref) for +DoFs the field/component coupling can be provided; see [`allocate_matrix`](@ref) for details. """ function DofOrder.Ext{Metis}(; @@ -39,7 +41,7 @@ function Ferrite.compute_renumber_permutation( if coupling !== nothing # Set sym = true since Metis.permutation requires a symmetric graph. # TODO: Perhaps just symmetrize it: coupling = coupling' .| coupling - couplings = Ferrite._coupling_to_local_dof_coupling(dh, coupling, #= sym =# true) + couplings = Ferrite._coupling_to_local_dof_coupling(dh, coupling) end # Create the CSR (CSC, but pattern is symmetric so equivalent) using diff --git a/src/CollectionsOfViews.jl b/src/CollectionsOfViews.jl new file mode 100644 index 0000000000..a85691af06 --- /dev/null +++ b/src/CollectionsOfViews.jl @@ -0,0 +1,156 @@ +module CollectionsOfViews + +export ArrayOfVectorViews, push_at_index!, ConstructionBuffer + +# `AdaptiveRange` and `ConstructionBuffer` are used to efficiently build up an `ArrayOfVectorViews` +# when the size of each view is unknown. +struct AdaptiveRange + start::Int + ncurrent::Int + nmax::Int +end + +struct ConstructionBuffer{T, N} + indices::Array{AdaptiveRange, N} + data::Vector{T} + sizehint::Int +end + +""" + ConstructionBuffer(data::Vector, dims::NTuple{N, Int}, sizehint) + +Create a buffer for creating an [`ArrayOfVectorViews`](@ref), representing an array with `N` axes. +`sizehint` sets the number of elements in `data` allocated when a new index is added via `push_at_index!`, +or when the current storage for the index is full, how much many additional elements are reserved for that index. +Any content in `data` is overwritten, but performance is improved by pre-allocating it to a reasonable size or +by `sizehint!`ing it. +""" +function ConstructionBuffer(data::Vector, dims::NTuple{<:Any, Int}, sizehint::Int) + indices = fill(AdaptiveRange(0, 0, 0), dims) + return ConstructionBuffer(indices, empty!(data), sizehint) +end + +""" + push_at_index!(b::ConstructionBuffer, val, indices::Int...) + +`push!` the value `val` to the `Vector` view at the index given by `indices`, typically called +inside the [`ArrayOfVectorViews`](@ref) constructor do-block. But can also be used when manually +creating a `ConstructionBuffer`. +""" +function push_at_index!(b::ConstructionBuffer, val, indices::Vararg{Int, N}) where {N} + r = getindex(b.indices, indices...) + n = length(b.data) + if r.start == 0 + # `indices...` not previously added, allocate new space for it at the end of `b.data` + resize!(b.data, n + b.sizehint) + b.data[n+1] = val + setindex!(b.indices, AdaptiveRange(n + 1, 1, b.sizehint), indices...) + elseif r.ncurrent == r.nmax + # We have used up our space, move data associated with `indices...` to the end of `b.data` + resize!(b.data, n + r.nmax + b.sizehint) + for i in 1:r.ncurrent + b.data[n + i] = b.data[r.start + i - 1] + end + b.data[n + r.ncurrent + 1] = val + setindex!(b.indices, AdaptiveRange(n + 1, r.ncurrent + 1, r.nmax + b.sizehint), indices...) + else # We have space in an already allocated section + b.data[r.start + r.ncurrent] = val + setindex!(b.indices, AdaptiveRange(r.start, r.ncurrent + 1, r.nmax), indices...) + end + return b +end + +struct ArrayOfVectorViews{T, N} <: AbstractArray{SubArray{T, 1, Vector{T}, Tuple{UnitRange{Int64}}, true}, N} + indices::Vector{Int} + data::Vector{T} + lin_idx::LinearIndices{N, NTuple{N, Base.OneTo{Int}}} + function ArrayOfVectorViews{T, N}(indices::Vector{Int}, data::Vector{T}, lin_idx::LinearIndices{N}) where {T, N} + return new{T, N}(indices, data, lin_idx) + end +end + +# AbstractArray interface (https://docs.julialang.org/en/v1/manual/interfaces/#man-interface-array) +Base.size(cv::ArrayOfVectorViews) = size(cv.lin_idx) +@inline function Base.getindex(cv::ArrayOfVectorViews, linear_index::Int) + @boundscheck checkbounds(cv.lin_idx, linear_index) + return @inbounds view(cv.data, cv.indices[linear_index]:(cv.indices[linear_index+1]-1)) +end +@inline function Base.getindex(cv::ArrayOfVectorViews, idx...) + linear_index = getindex(cv.lin_idx, idx...) + return @inbounds getindex(cv, linear_index) +end +Base.IndexStyle(::Type{<:ArrayOfVectorViews{<:Any, N}}) where N = Base.IndexStyle(Array{Int, N}) + +# Constructors +""" + ArrayOfVectorViews(f!::Function, data::Vector{T}, dims::NTuple{N, Int}; sizehint) + +Create an `ArrayOfVectorViews` to store many vector views of potentially different sizes, +emulating an `Array{Vector{T}, N}` with size `dims`. However, it avoids allocating each vector individually +by storing all data in `data`, and instead of `Vector{T}`, the each element is a `typeof(view(data, 1:2))`. + +When the length of each vector is unknown, the `ArrayOfVectorViews` can be created reasonably efficient +with the following do-block, which creates an intermediate `buffer::ConstructionBuffer` supporting the +[`push_at_index!`](@ref) function. +``` +vector_views = ArrayOfVectorViews(data, dims; sizehint) do buffer + for (ind, val) in some_data + push_at_index!(buffer, val, ind) + end +end +``` +`sizehint` tells how much space to allocate for the index `ind` if no `val` has been added to that index before, +or how much more space to allocate in case all previously allocated space for `ind` has been used up. +""" +function ArrayOfVectorViews(f!::F, data::Vector, dims::Tuple; sizehint = nothing) where {F <: Function} + sizehint === nothing && error("Providing sizehint is mandatory") + b = ConstructionBuffer(data, dims, sizehint) + f!(b) + return ArrayOfVectorViews(b) +end + +""" + ArrayOfVectorViews(b::CollectionsOfViews.ConstructionBuffer) + +Creates the `ArrayOfVectorViews` directly from the `ConstructionBuffer` that was manually created and filled. +""" +function ArrayOfVectorViews(b::ConstructionBuffer{T}) where T + indices = Vector{Int}(undef, length(b.indices) + 1) + lin_idx = LinearIndices(b.indices) + data_length = sum(ar.ncurrent for ar in b.indices; init=0) + data = Vector{T}(undef, data_length) + data_index = 1 + for (idx, ar) in pairs(b.indices) + copyto!(data, data_index, b.data, ar.start, ar.ncurrent) + indices[lin_idx[idx]] = data_index + data_index += ar.ncurrent + end + indices[length(indices)] = data_index + # Since user-code in the constructor function has access to `b`, setting dimensions to + # zero here allows GC:ing the data in `b` even in cases when the compiler cannot + # guarantee that it is unreachable. + resize!(b.data, 0); sizehint!(b.data, 0) + isa(b.indices, Vector) && (resize!(b.indices, 0); sizehint!(b.indices, 0)) + return ArrayOfVectorViews(indices, data, lin_idx) +end + +""" + ArrayOfVectorViews(indices::Vector{Int}, data::Vector{T}, lin_idx::LinearIndices{N}; checkargs = true) + +Creates the `ArrayOfVectorViews` directly where the user is responsible for having the correct input data. +Checking of the argument dimensions can be elided by setting `checkargs = false`, but incorrect dimensions +may lead to illegal out of bounds access later. + +`data` is indexed by `indices[i]:indices[i+1]`, where `i = lin_idx[idx...]` and `idx...` are the user-provided +indices to the `ArrayOfVectorViews`. +""" +function ArrayOfVectorViews(indices::Vector{Int}, data::Vector{T}, lin_idx::LinearIndices{N}; checkargs = true) where {T, N} + if checkargs + checkbounds(data, 1:(last(indices) - 1)) + checkbounds(indices, last(lin_idx) + 1) + issorted(indices) || throw(ArgumentError("indices must be weakly increasing")) + end + return ArrayOfVectorViews{T, N}(indices, data, lin_idx) +end + +end diff --git a/src/Dofs/ConstraintHandler.jl b/src/Dofs/ConstraintHandler.jl index 57bfdf6e01..8684e163d3 100644 --- a/src/Dofs/ConstraintHandler.jl +++ b/src/Dofs/ConstraintHandler.jl @@ -1,6 +1,6 @@ # abstract type Constraint end """ - Dirichlet(u::Symbol, ∂Ω::Set, f::Function, components=nothing) + Dirichlet(u::Symbol, ∂Ω::AbstractVecOrSet, f::Function, components=nothing) Create a Dirichlet boundary condition on `u` on the `∂Ω` part of the boundary. `f` is a function of the form `f(x)` or `f(x, t)` @@ -9,14 +9,21 @@ and returns the prescribed value. `components` specify the components of `u` that are prescribed by this condition. By default all components of `u` are prescribed. +The set, `∂Ω`, can be an `AbstractSet` or `AbstractVector` with elements of +type [`FacetIndex`](@ref), [`FaceIndex`](@ref), [`EdgeIndex`](@ref), [`VertexIndex`](@ref), +or `Int`. For most cases, the element type is `FacetIndex`, as shown below. +To constrain a single point, using `VertexIndex` is recommended, but it is also possible +to constrain a specific nodes by giving the node numbers via `Int` elements. +To constrain e.g. an edge in 3d `EdgeIndex` elements can be given. + For example, here we create a -Dirichlet condition for the `:u` field, on the faceset called +Dirichlet condition for the `:u` field, on the facetset called `∂Ω` and the value given by the `sin` function: *Examples* ```jldoctest -# Obtain the faceset from the grid -∂Ω = getfaceset(grid, "boundary-1") +# Obtain the facetset from the grid +∂Ω = getfacetset(grid, "boundary-1") # Prescribe scalar field :s on ∂Ω to sin(t) dbc = Dirichlet(:s, ∂Ω, (x, t) -> sin(t)) @@ -33,14 +40,14 @@ which applies the condition via [`apply!`](@ref) and/or [`apply_zero!`](@ref). """ struct Dirichlet # <: Constraint f::Function # f(x) or f(x,t) -> value(s) - faces::Union{Set{Int},Set{FaceIndex},Set{EdgeIndex},Set{VertexIndex}} + facets::OrderedSet{T} where T <: Union{Int, FacetIndex, FaceIndex, EdgeIndex, VertexIndex} field_name::Symbol components::Vector{Int} # components of the field - local_face_dofs::Vector{Int} - local_face_dofs_offset::Vector{Int} + local_facet_dofs::Vector{Int} + local_facet_dofs_offset::Vector{Int} end -function Dirichlet(field_name::Symbol, faces::Set, f::Function, components=nothing) - return Dirichlet(f, faces, field_name, __to_components(components), Int[], Int[]) +function Dirichlet(field_name::Symbol, facets::AbstractVecOrSet, f::Function, components=nothing) + return Dirichlet(f, convert_to_orderedset(facets), field_name, __to_components(components), Int[], Int[]) end # components=nothing is default and means that all components should be constrained @@ -58,8 +65,8 @@ const DofCoefficients{T} = Vector{Pair{Int,T}} """ AffineConstraint(constrained_dof::Int, entries::Vector{Pair{Int,T}}, b::T) where T -Define an affine/linear constraint to constrain one degree of freedom, `u[i]`, -such that `u[i] = ∑(u[j] * a[j]) + b`, +Define an affine/linear constraint to constrain one degree of freedom, `u[i]`, +such that `u[i] = ∑(u[j] * a[j]) + b`, where `i=constrained_dof` and each element in `entries` are `j => a[j]` """ struct AffineConstraint{T} @@ -74,21 +81,21 @@ end A collection of constraints associated with the dof handler `dh`. `T` is the numeric type for stored values. """ -struct ConstraintHandler{DH<:AbstractDofHandler,T} - dbcs::Vector{Dirichlet} - prescribed_dofs::Vector{Int} - free_dofs::Vector{Int} - inhomogeneities::Vector{T} +mutable struct ConstraintHandler{DH<:AbstractDofHandler,T} + const dbcs::Vector{Dirichlet} + const prescribed_dofs::Vector{Int} + const free_dofs::Vector{Int} + const inhomogeneities::Vector{T} # Store the original constant inhomogeneities for affine constraints used to compute # "effective" inhomogeneities in `update!` and then stored in .inhomogeneities. - affine_inhomogeneities::Vector{Union{Nothing,T}} + const affine_inhomogeneities::Vector{Union{Nothing,T}} # `nothing` for pure DBC constraint, otherwise affine constraint - dofcoefficients::Vector{Union{Nothing, DofCoefficients{T}}} + const dofcoefficients::Vector{Union{Nothing, DofCoefficients{T}}} # global dof -> index into dofs and inhomogeneities and dofcoefficients - dofmapping::Dict{Int,Int} - bcvalues::Vector{BCValues{T}} - dh::DH - closed::ScalarWrapper{Bool} + const dofmapping::Dict{Int,Int} + const bcvalues::Vector{BCValues{T}} + const dh::DH + closed::Bool end ConstraintHandler(dh::AbstractDofHandler) = ConstraintHandler(Float64, dh) @@ -97,7 +104,7 @@ function ConstraintHandler(::Type{T}, dh::AbstractDofHandler) where T <: Number @assert isclosed(dh) ConstraintHandler( Dirichlet[], Int[], Int[], T[], Union{Nothing,T}[], Union{Nothing,DofCoefficients{T}}[], - Dict{Int,Int}(), BCValues{T}[], dh, ScalarWrapper(false), + Dict{Int,Int}(), BCValues{T}[], dh, false, ) end @@ -173,11 +180,11 @@ function Base.show(io::IO, ::MIME"text/plain", ch::ConstraintHandler) end end -isclosed(ch::ConstraintHandler) = ch.closed[] +isclosed(ch::ConstraintHandler) = ch.closed free_dofs(ch::ConstraintHandler) = ch.free_dofs prescribed_dofs(ch::ConstraintHandler) = ch.prescribed_dofs -# Equivalent to `copy!(out, setdiff(1:n_entries, diff))`, but requires that +# Equivalent to `copy!(out, setdiff(1:n_entries, diff))`, but requires that # `issorted(diff)` and that `1 ≤ diff[1] ≤ diff[end] ≤ n_entries` function _sorted_setdiff!(out::Vector{Int}, n_entries::Int, diff::Vector{Int}) n_diff = length(diff) @@ -243,7 +250,7 @@ function close!(ch::ConstraintHandler) end end - ch.closed[] = true + ch.closed = true # Compute the prescribed values by calling update!: This should be cheap, and for the # common case where constraints does not depend on time it is annoying and easy to @@ -292,21 +299,21 @@ function add_prescribed_dof!(ch::ConstraintHandler, constrained_dof::Int, inhomo return ch end -# Dirichlet on (face|edge|vertex)set -function _add!(ch::ConstraintHandler, dbc::Dirichlet, bcfaces::Set{Index}, interpolation::Interpolation, field_dim::Int, offset::Int, bcvalue::BCValues, _) where {Index<:BoundaryIndex} - local_face_dofs, local_face_dofs_offset = - _local_face_dofs_for_bc(interpolation, field_dim, dbc.components, offset, dirichlet_boundarydof_indices(eltype(bcfaces))) - copy!(dbc.local_face_dofs, local_face_dofs) - copy!(dbc.local_face_dofs_offset, local_face_dofs_offset) +# Dirichlet on (facet|face|edge|vertex)set +function _add!(ch::ConstraintHandler, dbc::Dirichlet, bcfacets::AbstractVecOrSet{Index}, interpolation::Interpolation, field_dim::Int, offset::Int, bcvalue::BCValues, _) where {Index<:BoundaryIndex} + local_facet_dofs, local_facet_dofs_offset = + _local_facet_dofs_for_bc(interpolation, field_dim, dbc.components, offset, dirichlet_boundarydof_indices(eltype(bcfacets))) + copy!(dbc.local_facet_dofs, local_facet_dofs) + copy!(dbc.local_facet_dofs_offset, local_facet_dofs_offset) # loop over all the faces in the set and add the global dofs to `constrained_dofs` constrained_dofs = Int[] cc = CellCache(ch.dh, UpdateFlags(; nodes=false, coords=false, dofs=true)) - for (cellidx, faceidx) in bcfaces + for (cellidx, facetidx) in bcfacets reinit!(cc, cellidx) - r = local_face_dofs_offset[faceidx]:(local_face_dofs_offset[faceidx+1]-1) - append!(constrained_dofs, cc.dofs[local_face_dofs[r]]) # TODO: for-loop over r and simply push! to ch.prescribed_dofs - @debug println("adding dofs $(cc.dofs[local_face_dofs[r]]) to dbc") + r = local_facet_dofs_offset[facetidx]:(local_facet_dofs_offset[facetidx+1]-1) + append!(constrained_dofs, cc.dofs[local_facet_dofs[r]]) # TODO: for-loop over r and simply push! to ch.prescribed_dofs + @debug println("adding dofs $(cc.dofs[local_facet_dofs[r]]) to dbc") end # save it to the ConstraintHandler @@ -318,26 +325,26 @@ function _add!(ch::ConstraintHandler, dbc::Dirichlet, bcfaces::Set{Index}, inter return ch end -# Calculate which local dof index live on each face: -# face `i` have dofs `local_face_dofs[local_face_dofs_offset[i]:local_face_dofs_offset[i+1]-1] -function _local_face_dofs_for_bc(interpolation, field_dim, components, offset, boundaryfunc::F=dirichlet_facedof_indices) where F +# Calculate which local dof index live on each facet: +# facet `i` have dofs `local_facet_dofs[local_facet_dofs_offset[i]:local_facet_dofs_offset[i+1]-1] +function _local_facet_dofs_for_bc(interpolation, field_dim, components, offset, boundaryfunc::F=dirichlet_facetdof_indices) where F @assert issorted(components) - local_face_dofs = Int[] - local_face_dofs_offset = Int[1] - for (_, face) in enumerate(boundaryfunc(interpolation)) - for fdof in face, d in 1:field_dim + local_facet_dofs = Int[] + local_facet_dofs_offset = Int[1] + for (_, facet) in enumerate(boundaryfunc(interpolation)) + for fdof in facet, d in 1:field_dim if d in components - push!(local_face_dofs, (fdof-1)*field_dim + d + offset) + push!(local_facet_dofs, (fdof-1)*field_dim + d + offset) end end - push!(local_face_dofs_offset, length(local_face_dofs) + 1) + push!(local_facet_dofs_offset, length(local_facet_dofs) + 1) end - return local_face_dofs, local_face_dofs_offset + return local_facet_dofs, local_facet_dofs_offset end -function _add!(ch::ConstraintHandler, dbc::Dirichlet, bcnodes::Set{Int}, interpolation::Interpolation, field_dim::Int, offset::Int, bcvalue::BCValues, cellset::Set{Int}=Set{Int}(1:getncells(get_grid(ch.dh)))) +function _add!(ch::ConstraintHandler, dbc::Dirichlet, bcnodes::AbstractVecOrSet{Int}, interpolation::Interpolation, field_dim::Int, offset::Int, bcvalue::BCValues, cellset::AbstractVecOrSet{Int}=OrderedSet{Int}(1:getncells(get_grid(ch.dh)))) grid = get_grid(ch.dh) - if interpolation !== default_interpolation(getcelltype(grid, first(cellset))) + if interpolation !== geometric_interpolation(getcelltype(grid, first(cellset))) @warn("adding constraint to nodeset is not recommended for sub/super-parametric approximations.") end @@ -362,7 +369,7 @@ function _add!(ch::ConstraintHandler, dbc::Dirichlet, bcnodes::Set{Int}, interpo constrained_dofs = Int[] sizehint!(constrained_dofs, ncomps*length(bcnodes)) - sizehint!(dbc.local_face_dofs, length(bcnodes)) + sizehint!(dbc.local_facet_dofs, length(bcnodes)) for node in bcnodes if !visited[node] # either the node belongs to another field handler or it does not have dofs in the constrained field @@ -371,11 +378,11 @@ function _add!(ch::ConstraintHandler, dbc::Dirichlet, bcnodes::Set{Int}, interpo for i in 1:ncomps push!(constrained_dofs, node_dofs[i,node]) end - push!(dbc.local_face_dofs, node) # use this field to store the node idx for each node + push!(dbc.local_facet_dofs, node) # use this field to store the node idx for each node end # save it to the ConstraintHandler - copy!(dbc.local_face_dofs_offset, constrained_dofs) # use this field to store the global dofs + copy!(dbc.local_facet_dofs_offset, constrained_dofs) # use this field to store the global dofs push!(ch.dbcs, dbc) push!(ch.bcvalues, bcvalue) for d in constrained_dofs @@ -394,14 +401,14 @@ compute the inhomogeneities. Note that this is called implicitly in `close!(::ConstraintHandler)`. """ function update!(ch::ConstraintHandler, time::Real=0.0) - @assert ch.closed[] + @assert ch.closed for (i, dbc) in pairs(ch.dbcs) # If the BC function only accept one argument, i.e. f(x), we create a wrapper # g(x, t) = f(x) that discards the second parameter so that _update! can always call # the function with two arguments internally. - wrapper_f = hasmethod(dbc.f, Tuple{Any,Any}) ? dbc.f : (x, _) -> dbc.f(x) + wrapper_f = hasmethod(dbc.f, Tuple{get_coordinate_type(get_grid(ch.dh)), typeof(time)}) ? dbc.f : (x, _) -> dbc.f(x) # Function barrier - _update!(ch.inhomogeneities, wrapper_f, dbc.faces, dbc.field_name, dbc.local_face_dofs, dbc.local_face_dofs_offset, + _update!(ch.inhomogeneities, wrapper_f, dbc.facets, dbc.field_name, dbc.local_facet_dofs, dbc.local_facet_dofs_offset, dbc.components, ch.dh, ch.bcvalues[i], ch.dofmapping, ch.dofcoefficients, time) end # Compute effective inhomogeneity for affine constraints with prescribed dofs in the @@ -426,8 +433,8 @@ function update!(ch::ConstraintHandler, time::Real=0.0) return nothing end -# for vertices, faces and edges -function _update!(inhomogeneities::Vector{T}, f::Function, boundary_entities::Set{<:BoundaryIndex}, field::Symbol, local_face_dofs::Vector{Int}, local_face_dofs_offset::Vector{Int}, +# for facets, vertices, faces and edges +function _update!(inhomogeneities::Vector{T}, f::Function, boundary_entities::AbstractVecOrSet{<:BoundaryIndex}, field::Symbol, local_facet_dofs::Vector{Int}, local_facet_dofs_offset::Vector{Int}, components::Vector{Int}, dh::AbstractDofHandler, boundaryvalues::BCValues, dofmapping::Dict{Int,Int}, dofcoefficients::Vector{Union{Nothing,DofCoefficients{T}}}, time::Real) where {T} @@ -436,10 +443,10 @@ function _update!(inhomogeneities::Vector{T}, f::Function, boundary_entities::Se reinit!(cc, cellidx) # no need to reinit!, enough to update current_entity since we only need geometric shape functions M - boundaryvalues.current_entity[] = entityidx + boundaryvalues.current_entity = entityidx - # local dof-range for this face - r = local_face_dofs_offset[entityidx]:(local_face_dofs_offset[entityidx+1]-1) + # local dof-range for this facet + r = local_facet_dofs_offset[entityidx]:(local_facet_dofs_offset[entityidx+1]-1) counter = 1 for location in 1:getnquadpoints(boundaryvalues) x = spatial_coordinate(boundaryvalues, location, cc.coords) @@ -448,7 +455,7 @@ function _update!(inhomogeneities::Vector{T}, f::Function, boundary_entities::Se for i in 1:length(components) # find the global dof - globaldof = cc.dofs[local_face_dofs[r[counter]]] + globaldof = cc.dofs[local_facet_dofs[r[counter]]] counter += 1 dbc_index = dofmapping[globaldof] @@ -464,8 +471,8 @@ function _update!(inhomogeneities::Vector{T}, f::Function, boundary_entities::Se end # for nodes -function _update!(inhomogeneities::Vector{T}, f::Function, ::Set{Int}, field::Symbol, nodeidxs::Vector{Int}, globaldofs::Vector{Int}, - components::Vector{Int}, dh::AbstractDofHandler, facevalues::BCValues, +function _update!(inhomogeneities::Vector{T}, f::Function, ::AbstractVecOrSet{Int}, field::Symbol, nodeidxs::Vector{Int}, globaldofs::Vector{Int}, + components::Vector{Int}, dh::AbstractDofHandler, facetvalues::BCValues, dofmapping::Dict{Int,Int}, dofcoefficients::Vector{Union{Nothing,DofCoefficients{T}}}, time::Real) where T counter = 1 for nodenumber in nodeidxs @@ -486,42 +493,6 @@ function _update!(inhomogeneities::Vector{T}, f::Function, ::Set{Int}, field::Sy end end -# Saves the dirichlet boundary conditions to a vtkfile. -# Values will have a 1 where bcs are active and 0 otherwise -function WriteVTK.vtk_point_data(vtkfile, ch::ConstraintHandler) - unique_fields = [] - for dbc in ch.dbcs - push!(unique_fields, dbc.field_name) - end - unique!(unique_fields) - - for field in unique_fields - nd = getfielddim(ch.dh, field) - data = zeros(Float64, nd, getnnodes(get_grid(ch.dh))) - for dbc in ch.dbcs - dbc.field_name != field && continue - if eltype(dbc.faces) <: BoundaryIndex - functype = boundaryfunction(eltype(dbc.faces)) - for (cellidx, faceidx) in dbc.faces - for facenode in functype(getcells(get_grid(ch.dh), cellidx))[faceidx] - for component in dbc.components - data[component, facenode] = 1 - end - end - end - else - for nodeidx in dbc.faces - for component in dbc.components - data[component, nodeidx] = 1 - end - end - end - end - vtk_point_data(vtkfile, data, string(field, "_bc")) - end - return vtkfile -end - """ apply!(K::SparseMatrixCSC, rhs::AbstractVector, ch::ConstraintHandler) @@ -564,12 +535,12 @@ apply! apply_zero!(K::SparseMatrixCSC, rhs::AbstractVector, ch::ConstraintHandler) Adjust the matrix `K` and the right hand side `rhs` to account for prescribed Dirichlet -boundary conditions and affine constraints such that `du = K \\ rhs` gives the expected +boundary conditions and affine constraints such that `du = K \\ rhs` gives the expected result (e.g. `du` zero for all prescribed degrees of freedom). apply_zero!(v::AbstractVector, ch::ConstraintHandler) -Zero-out values in `v` corresponding to prescribed degrees of freedom and update values +Zero-out values in `v` corresponding to prescribed degrees of freedom and update values prescribed by affine constraints, such that if `a` fulfills the constraints, `a ± v` also will. @@ -589,9 +560,9 @@ apply_zero!(ΔΔu, ch) # Make sure values are exactly zero ``` !!! note - The last call to `apply_zero!` is only strictly necessary for affine constraints. - However, even if the Dirichlet boundary conditions should be fulfilled after - `apply!(K, g, ch)`, solvers of linear systems are not exact. + The last call to `apply_zero!` is only strictly necessary for affine constraints. + However, even if the Dirichlet boundary conditions should be fulfilled after + `apply!(K, g, ch)`, solvers of linear systems are not exact. `apply!(ΔΔu, ch)` can be used to make sure the values for the prescribed degrees of freedom are fulfilled exactly. """ @@ -624,13 +595,7 @@ function apply_zero!(K::Union{SparseMatrixCSC,Symmetric}, f::AbstractVector, ch: apply!(K, f, ch, true) end -# For backwards compatibility, not used anymore -@enumx ApplyStrategy Transpose Inplace -const APPLY_TRANSPOSE = ApplyStrategy.Transpose -const APPLY_INPLACE = ApplyStrategy.Inplace - -function apply!(KK::Union{SparseMatrixCSC,Symmetric}, f::AbstractVector, ch::ConstraintHandler, applyzero::Bool=false; - strategy::ApplyStrategy.T=ApplyStrategy.Transpose) +function apply!(KK::Union{SparseMatrixCSC,Symmetric}, f::AbstractVector, ch::ConstraintHandler, applyzero::Bool=false) @assert isclosed(ch) sym = isa(KK, Symmetric) K = sym ? KK.data : KK @@ -847,7 +812,7 @@ function add!(ch::ConstraintHandler, dbc::Dirichlet) dbc.field_name in sdh.field_names || continue # Compute the intersection between dbc.set and the cellset of this # SubDofHandler and skip if the set is empty - filtered_set = filter_dbc_set(get_grid(ch.dh), sdh.cellset, dbc.faces) + filtered_set = filter_dbc_set(get_grid(ch.dh), sdh.cellset, dbc.facets) isempty(filtered_set) && continue # Fetch information about the field on this SubDofHandler field_idx = find_field(sdh, dbc.field_name) @@ -864,17 +829,17 @@ function add!(ch::ConstraintHandler, dbc::Dirichlet) error("components $(components) not within range of field :$(dbc.field_name) ($(n_comp) dimension(s))") end # Create BCValues for coordinate evaluation at dof-locations - EntityType = eltype(dbc.faces) # (Face|Edge|Vertex)Index + EntityType = eltype(dbc.facets) # (Facet|Face|Edge|Vertex)Index if EntityType <: Integer - # BCValues are just dummy for nodesets so set to FaceIndex - EntityType = FaceIndex + # BCValues are just dummy for nodesets so set to FacetIndex + EntityType = FacetIndex end CT = getcelltype(sdh) # Same celltype enforced in SubDofHandler constructor - bcvalues = BCValues(interpolation, default_interpolation(CT), EntityType) + bcvalues = BCValues(interpolation, geometric_interpolation(CT), EntityType) # Recreate the Dirichlet(...) struct with the filtered set and call internal add! filtered_dbc = Dirichlet(dbc.field_name, filtered_set, dbc.f, components) _add!( - ch, filtered_dbc, filtered_dbc.faces, interpolation, n_comp, + ch, filtered_dbc, filtered_dbc.facets, interpolation, n_comp, field_offset(sdh, field_idx), bcvalues, sdh.cellset, ) dbc_added = true @@ -895,7 +860,7 @@ end function filter_dbc_set(grid::AbstractGrid, fhset::AbstractSet{Int}, dbcset::AbstractSet{Int}) ret = empty(dbcset) - nodes_in_fhset = Set{Int}() + nodes_in_fhset = OrderedSet{Int}() for cc in CellIterator(grid, fhset, UpdateFlags(; nodes=true, coords=false)) union!(nodes_in_fhset, cc.nodes) end @@ -905,27 +870,27 @@ function filter_dbc_set(grid::AbstractGrid, fhset::AbstractSet{Int}, dbcset::Abs return ret end -struct PeriodicFacePair - mirror::FaceIndex - image::FaceIndex - rotation::UInt8 # relative rotation of the mirror face counter-clockwise the *image* normal (only relevant in 3D) +struct PeriodicFacetPair + mirror::FacetIndex + image::FacetIndex + rotation::UInt8 # relative rotation of the mirror facet counter-clockwise the *image* normal (only relevant in 3D) mirrored::Bool # mirrored => opposite normal vectors end """ - PeriodicDirichlet(u::Symbol, face_mapping, components=nothing) - PeriodicDirichlet(u::Symbol, face_mapping, R::AbstractMatrix, components=nothing) - PeriodicDirichlet(u::Symbol, face_mapping, f::Function, components=nothing) - -Create a periodic Dirichlet boundary condition for the field `u` on the face-pairs given in -`face_mapping`. The mapping can be computed with [`collect_periodic_faces`](@ref). The -constraint ensures that degrees-of-freedom on the mirror face are constrained to the -corresponding degrees-of-freedom on the image face. `components` specify the components of + PeriodicDirichlet(u::Symbol, facet_mapping, components=nothing) + PeriodicDirichlet(u::Symbol, facet_mapping, R::AbstractMatrix, components=nothing) + PeriodicDirichlet(u::Symbol, facet_mapping, f::Function, components=nothing) + +Create a periodic Dirichlet boundary condition for the field `u` on the facet-pairs given in +`facet_mapping`. The mapping can be computed with [`collect_periodic_facets`](@ref). The +constraint ensures that degrees-of-freedom on the mirror facet are constrained to the +corresponding degrees-of-freedom on the image facet. `components` specify the components of `u` that are prescribed by this condition. By default all components of `u` are prescribed. If the mapping is not aligned with the coordinate axis (e.g. rotated) a rotation matrix `R` -should be passed to the constructor. This matrix rotates dofs on the mirror face to the -image face. Note that this is only applicable for vector-valued problems. +should be passed to the constructor. This matrix rotates dofs on the mirror facet to the +image facet. Note that this is only applicable for vector-valued problems. To construct an inhomogeneous periodic constraint it is possible to pass a function `f`. Note that this is currently only supported when the periodicity is aligned with the @@ -936,24 +901,24 @@ See the manual section on [Periodic boundary conditions](@ref) for more informat struct PeriodicDirichlet field_name::Symbol components::Vector{Int} # components of the field - face_pairs::Vector{Pair{String,String}} # legacy that will populate face_map on add! - face_map::Vector{PeriodicFacePair} + facet_pairs::Vector{Pair{String,String}} # legacy that will populate facet_map on add! + facet_map::Vector{PeriodicFacetPair} func::Union{Function,Nothing} rotation_matrix::Union{Matrix{Float64},Nothing} end # Default to no inhomogeneity function/rotation -PeriodicDirichlet(fn::Symbol, fp::Union{Vector{<:Pair},Vector{PeriodicFacePair}}, c=nothing) = +PeriodicDirichlet(fn::Symbol, fp::Union{Vector{<:Pair},Vector{PeriodicFacetPair}}, c=nothing) = PeriodicDirichlet(fn, fp, nothing, c) # Basic constructor for the simple case where face_map will be populated in # add!(::ConstraintHandler, ...) instead function PeriodicDirichlet(fn::Symbol, fp::Vector{<:Pair}, f::Union{Function,Nothing}, c=nothing) - face_map = PeriodicFacePair[] # This will be populated in add!(::ConstraintHandler, ...) instead - return PeriodicDirichlet(fn, __to_components(c), fp, face_map, f, nothing) + facet_map = PeriodicFacetPair[] # This will be populated in add!(::ConstraintHandler, ...) instead + return PeriodicDirichlet(fn, __to_components(c), fp, facet_map, f, nothing) end -function PeriodicDirichlet(fn::Symbol, fm::Vector{PeriodicFacePair}, f_or_r::Union{AbstractMatrix,Function,Nothing}, c=nothing) +function PeriodicDirichlet(fn::Symbol, fm::Vector{PeriodicFacetPair}, f_or_r::Union{AbstractMatrix,Function,Nothing}, c=nothing) f = f_or_r isa Function ? f_or_r : nothing rotation_matrix = f_or_r isa AbstractMatrix ? f_or_r : nothing components = __to_components(c) @@ -961,11 +926,11 @@ function PeriodicDirichlet(fn::Symbol, fm::Vector{PeriodicFacePair}, f_or_r::Uni end function add!(ch::ConstraintHandler, pdbc::PeriodicDirichlet) - # Legacy code: Might need to build the face_map - is_legacy = !isempty(pdbc.face_pairs) && isempty(pdbc.face_map) + # Legacy code: Might need to build the facet_map + is_legacy = !isempty(pdbc.facet_pairs) && isempty(pdbc.facet_map) if is_legacy - for (mset, iset) in pdbc.face_pairs - collect_periodic_faces!(pdbc.face_map, get_grid(ch.dh), mset, iset, identity) # TODO: Better transform + for (mset, iset) in pdbc.facet_pairs + collect_periodic_facets!(pdbc.facet_map, get_grid(ch.dh), mset, iset, identity) # TODO: Better transform end end field_idx = find_field(ch.dh, pdbc.field_name) @@ -1007,37 +972,37 @@ end function _add!(ch::ConstraintHandler, pdbc::PeriodicDirichlet, interpolation::Interpolation, field_dim::Int, offset::Int, is_legacy::Bool, rotation_matrix::Union{Matrix{T},Nothing}, ::Type{dof_map_t}, iterator_f::F) where {T, dof_map_t, F <: Function} grid = get_grid(ch.dh) - face_map = pdbc.face_map + facet_map = pdbc.facet_map - # Indices of the local dofs for the faces - local_face_dofs, local_face_dofs_offset = - _local_face_dofs_for_bc(interpolation, field_dim, pdbc.components, offset) + # Indices of the local dofs for the facets + local_facet_dofs, local_facet_dofs_offset = + _local_facet_dofs_for_bc(interpolation, field_dim, pdbc.components, offset) mirrored_indices = - mirror_local_dofs(local_face_dofs, local_face_dofs_offset, interpolation, length(pdbc.components)) - rotated_indices = rotate_local_dofs(local_face_dofs, local_face_dofs_offset, interpolation, length(pdbc.components)) + mirror_local_dofs(local_facet_dofs, local_facet_dofs_offset, interpolation, length(pdbc.components)) + rotated_indices = rotate_local_dofs(local_facet_dofs, local_facet_dofs_offset, interpolation, length(pdbc.components)) # Dof map for mirror dof => image dof dof_map = Dict{dof_map_t,dof_map_t}() - mirror_dofs = zeros(Int, ndofs_per_cell(ch.dh)) - image_dofs = zeros(Int, ndofs_per_cell(ch.dh)) - for face_pair in face_map - m = face_pair.mirror - i = face_pair.image + n = ndofs_per_cell(ch.dh, first(facet_map).mirror[1]) + mirror_dofs = zeros(Int, n) + image_dofs = zeros(Int, n) + for facet_pair in facet_map + m = facet_pair.mirror + i = facet_pair.image celldofs!(mirror_dofs, ch.dh, m[1]) celldofs!( image_dofs, ch.dh, i[1]) - mdof_range = local_face_dofs_offset[m[2]] : (local_face_dofs_offset[m[2] + 1] - 1) - idof_range = local_face_dofs_offset[i[2]] : (local_face_dofs_offset[i[2] + 1] - 1) + mdof_range = local_facet_dofs_offset[m[2]] : (local_facet_dofs_offset[m[2] + 1] - 1) + idof_range = local_facet_dofs_offset[i[2]] : (local_facet_dofs_offset[i[2] + 1] - 1) for (md, id) in zip(iterator_f(mdof_range), iterator_f(idof_range)) - mdof = image_dofs[local_face_dofs[id]] + mdof = image_dofs[local_facet_dofs[id]] # Rotate the mirror index - rotated_md = rotated_indices[md, face_pair.rotation + 1] + rotated_md = rotated_indices[md, facet_pair.rotation + 1] # Mirror the mirror index (maybe) :) - mirrored_md = face_pair.mirrored ? mirrored_indices[rotated_md] : rotated_md - cdof = mirror_dofs[local_face_dofs[mirrored_md]] - + mirrored_md = facet_pair.mirrored ? mirrored_indices[rotated_md] : rotated_md + cdof = mirror_dofs[local_facet_dofs[mirrored_md]] if haskey(dof_map, mdof) mdof′ = dof_map[mdof] # @info "$cdof => $mdof, but $mdof => $mdof′, remapping $cdof => $mdof′." @@ -1082,9 +1047,9 @@ function _add!(ch::ConstraintHandler, pdbc::PeriodicDirichlet, interpolation::In Tx = get_coordinate_type(grid) min_x = Tx(i -> typemax(eltype(Tx))) max_x = Tx(i -> typemin(eltype(Tx))) - for facepair in face_map, faceidx in (facepair.mirror, facepair.image) - cellidx, faceidx = faceidx - nodes = faces(grid.cells[cellidx])[faceidx] + for facetpair in facet_map, facet_indices in (facetpair.mirror, facetpair.image) + cellidx, facetidx = facet_indices + nodes = facets(grid.cells[cellidx])[facetidx] union!(all_node_idxs, nodes) for n in nodes x = get_node_coordinate(grid, n) @@ -1096,7 +1061,7 @@ function _add!(ch::ConstraintHandler, pdbc::PeriodicDirichlet, interpolation::In points = construct_cornerish(min_x, max_x) tree = KDTree(Tx[get_node_coordinate(grid, i) for i in all_node_idxs_v]) idxs, _ = NearestNeighbors.nn(tree, points) - corner_set = Set{Int}(all_node_idxs_v[i] for i in idxs) + corner_set = OrderedSet{Int}(all_node_idxs_v[i] for i in idxs) dbc = Dirichlet(pdbc.field_name, corner_set, pdbc.func === nothing ? (x, _) -> pdbc.components * eltype(x)(0) : pdbc.func, @@ -1112,8 +1077,8 @@ function _add!(ch::ConstraintHandler, pdbc::PeriodicDirichlet, interpolation::In foreach(x -> delete!(dof_map, x), chtmp.prescribed_dofs) # Need to reset the internal of this DBC in order to add! it again... - resize!(dbc.local_face_dofs, 0) - resize!(dbc.local_face_dofs_offset, 0) + resize!(dbc.local_facet_dofs, 0) + resize!(dbc.local_facet_dofs_offset, 0) # Add the Dirichlet for the corners add!(ch, dbc) @@ -1123,10 +1088,10 @@ function _add!(ch::ConstraintHandler, pdbc::PeriodicDirichlet, interpolation::In if pdbc.func !== nothing # Create another temp constraint handler if we need to compute inhomogeneities chtmp2 = ConstraintHandler(ch.dh) - all_faces = Set{FaceIndex}() - union!(all_faces, (x.mirror for x in face_map)) - union!(all_faces, (x.image for x in face_map)) - dbc_all = Dirichlet(pdbc.field_name, all_faces, pdbc.func, pdbc.components) + all_facets = OrderedSet{FacetIndex}() + union!(all_facets, (x.mirror for x in facet_map)) + union!(all_facets, (x.image for x in facet_map)) + dbc_all = Dirichlet(pdbc.field_name, all_facets, pdbc.func, pdbc.components) add!(chtmp2, dbc_all); close!(chtmp2) # Call update! here since we need it to construct the affine constraints... # TODO: This doesn't allow for time dependent constraints... @@ -1196,11 +1161,11 @@ end function mirror_local_dofs(_, _, ::Lagrange{RefLine}, ::Int) # For 1D there is nothing to do end -function mirror_local_dofs(local_face_dofs, local_face_dofs_offset, ip::Lagrange{<:Union{RefQuadrilateral,RefTriangle}}, n::Int) +function mirror_local_dofs(local_facet_dofs, local_facet_dofs_offset, ip::Lagrange{<:Union{RefQuadrilateral,RefTriangle}}, n::Int) # For 2D we always permute since Ferrite defines dofs counter-clockwise - ret = collect(1:length(local_face_dofs)) - for (i, f) in enumerate(dirichlet_facedof_indices(ip)) - this_offset = local_face_dofs_offset[i] + ret = collect(1:length(local_facet_dofs)) + for (i, f) in enumerate(dirichlet_facetdof_indices(ip)) + this_offset = local_facet_dofs_offset[i] other_offset = this_offset + n for d in 1:n idx1 = this_offset + (d - 1) @@ -1214,14 +1179,14 @@ function mirror_local_dofs(local_face_dofs, local_face_dofs_offset, ip::Lagrange end # TODO: Can probably be combined with the method above. -function mirror_local_dofs(local_face_dofs, local_face_dofs_offset, ip::Lagrange{<:Union{RefHexahedron,RefTetrahedron},O}, n::Int) where O +function mirror_local_dofs(local_facet_dofs, local_facet_dofs_offset, ip::Lagrange{<:Union{RefHexahedron,RefTetrahedron},O}, n::Int) where O @assert 1 <= O <= 2 N = ip isa Lagrange{RefHexahedron} ? 4 : 3 - ret = collect(1:length(local_face_dofs)) + ret = collect(1:length(local_facet_dofs)) # Mirror by changing from counter-clockwise to clockwise - for (i, f) in enumerate(dirichlet_facedof_indices(ip)) - r = local_face_dofs_offset[i]:(local_face_dofs_offset[i+1] - 1) + for (i, f) in enumerate(dirichlet_facetdof_indices(ip)) + r = local_facet_dofs_offset[i]:(local_facet_dofs_offset[i+1] - 1) # 1. Rotate the corners vertex_range = r[1:(N*n)] vlr = @view ret[vertex_range] @@ -1244,39 +1209,23 @@ function mirror_local_dofs(local_face_dofs, local_face_dofs_offset, ip::Lagrange return ret end -if VERSION < v"1.8.0" - function circshift!(x::AbstractVector, shift::Integer) - return circshift!(x, copy(x), shift) - end -else - # See JuliaLang/julia#46759 - const CIRCSHIFT_WRONG_DIRECTION = Base.circshift!([1, 2, 3], 1) != Base.circshift([1, 2, 3], 1) - function circshift!(x::AbstractVector, shift::Integer) - shift = CIRCSHIFT_WRONG_DIRECTION ? -shift : shift - return Base.circshift!(x, shift) - end -end - -circshift!(args...) = Base.circshift!(args...) - - -function rotate_local_dofs(local_face_dofs, local_face_dofs_offset, ip::Lagrange{<:Union{RefQuadrilateral,RefTriangle}}, ncomponents) - return collect(1:length(local_face_dofs)) # TODO: Return range? +function rotate_local_dofs(local_facet_dofs, local_facet_dofs_offset, ip::Lagrange{<:Union{RefQuadrilateral,RefTriangle}}, ncomponents) + return collect(1:length(local_facet_dofs)) # TODO: Return range? end -function rotate_local_dofs(local_face_dofs, local_face_dofs_offset, ip::Lagrange{<:Union{RefHexahedron,RefTetrahedron}, O}, ncomponents) where O +function rotate_local_dofs(local_facet_dofs, local_facet_dofs_offset, ip::Lagrange{<:Union{RefHexahedron,RefTetrahedron}, O}, ncomponents) where O @assert 1 <= O <= 2 N = ip isa Lagrange{RefHexahedron} ? 4 : 3 - ret = similar(local_face_dofs, length(local_face_dofs), N) - ret[:, :] .= 1:length(local_face_dofs) - for f in 1:length(local_face_dofs_offset)-1 - face_range = local_face_dofs_offset[f]:(local_face_dofs_offset[f+1]-1) + ret = similar(local_facet_dofs, length(local_facet_dofs), N) + ret[:, :] .= 1:length(local_facet_dofs) + for f in 1:length(local_facet_dofs_offset)-1 + facet_range = local_facet_dofs_offset[f]:(local_facet_dofs_offset[f+1]-1) for i in 1:(N-1) # 1. Rotate the vertex dofs - vertex_range = face_range[1:(N*ncomponents)] + vertex_range = facet_range[1:(N*ncomponents)] circshift!(@view(ret[vertex_range, i+1]), @view(ret[vertex_range, i]), -ncomponents) # 2. Rotate the edge dofs if O > 1 - edge_range = face_range[(N*ncomponents+1):(2N*ncomponents)] + edge_range = facet_range[(N*ncomponents+1):(2N*ncomponents)] circshift!(@view(ret[edge_range, i+1]), @view(ret[edge_range, i]), -ncomponents) end end @@ -1285,151 +1234,151 @@ function rotate_local_dofs(local_face_dofs, local_face_dofs_offset, ip::Lagrange end """ - collect_periodic_faces(grid::Grid, mset, iset, transform::Union{Function,Nothing}=nothing; tol=1e-12) + collect_periodic_facets(grid::Grid, mset, iset, transform::Union{Function,Nothing}=nothing; tol=1e-12) -Match all mirror faces in `mset` with a corresponding image face in `iset`. Return a -dictionary which maps each mirror face to a image face. The result can then be passed to +Match all mirror facets in `mset` with a corresponding image facet in `iset`. Return a +dictionary which maps each mirror facet to a image facet. The result can then be passed to [`PeriodicDirichlet`](@ref). -`mset` and `iset` can be given as a `String` (an existing face set in the grid) or as a -`Set{FaceIndex}` directly. +`mset` and `iset` can be given as a `String` (an existing facet set in the grid) or as a +`AbstractSet{FacetIndex}` directly. -By default this function looks for a matching face in the directions of the coordinate +By default this function looks for a matching facet in the directions of the coordinate system. For other types of periodicities the `transform` function can be used. The -`transform` function is applied on the coordinates of the image face, and is expected to +`transform` function is applied on the coordinates of the image facet, and is expected to transform the coordinates to the matching locations in the mirror set. -The keyword `tol` specifies the tolerance (i.e. distance and deviation in face-normals) -between a image-face and mirror-face, for them to be considered matched. +The keyword `tol` specifies the tolerance (i.e. distance and deviation in facet-normals) +between a image-facet and mirror-facet, for them to be considered matched. -See also: [`collect_periodic_faces!`](@ref), [`PeriodicDirichlet`](@ref). +See also: [`collect_periodic_facets!`](@ref), [`PeriodicDirichlet`](@ref). """ -function collect_periodic_faces(grid::Grid, mset::Union{Set{FaceIndex},String}, iset::Union{Set{FaceIndex},String}, transform::Union{Function,Nothing}=nothing; tol::Float64=1e-12) - return collect_periodic_faces!(PeriodicFacePair[], grid, mset, iset, transform; tol) +function collect_periodic_facets(grid::Grid, mset::Union{AbstractSet{FacetIndex},String}, iset::Union{AbstractSet{FacetIndex},String}, transform::Union{Function,Nothing}=nothing; tol::Float64=1e-12) + return collect_periodic_facets!(PeriodicFacetPair[], grid, mset, iset, transform; tol) end """ - collect_periodic_faces(grid::Grid, all_faces::Union{Set{FaceIndex},String,Nothing}=nothing; tol=1e-12) + collect_periodic_facets(grid::Grid, all_facets::Union{AbstractSet{FacetIndex},String,Nothing}=nothing; tol=1e-12) -Split all faces in `all_faces` into image and mirror sets. For each matching pair, the face -located further along the vector `(1, 1, 1)` becomes the image face. +Split all facets in `all_facets` into image and mirror sets. For each matching pair, the facet +located further along the vector `(1, 1, 1)` becomes the image facet. -If no set is given, all faces on the outer boundary of the grid (i.e. all faces that do not +If no set is given, all facets on the outer boundary of the grid (i.e. all facets that do not have a neighbor) is used. -See also: [`collect_periodic_faces!`](@ref), [`PeriodicDirichlet`](@ref). +See also: [`collect_periodic_facets!`](@ref), [`PeriodicDirichlet`](@ref). """ -function collect_periodic_faces(grid::Grid, all_faces::Union{Set{FaceIndex},String,Nothing}=nothing; tol::Float64=1e-12) - return collect_periodic_faces!(PeriodicFacePair[], grid, all_faces; tol) +function collect_periodic_facets(grid::Grid, all_facets::Union{AbstractSet{FacetIndex},String,Nothing}=nothing; tol::Float64=1e-12) + return collect_periodic_facets!(PeriodicFacetPair[], grid, all_facets; tol) end """ - collect_periodic_faces!(face_map::Vector{PeriodicFacePair}, grid::Grid, mset, iset, transform::Union{Function,Nothing}; tol=1e-12) + collect_periodic_facets!(facet_map::Vector{PeriodicFacetPair}, grid::Grid, mset, iset, transform::Union{Function,Nothing}; tol=1e-12) -Same as [`collect_periodic_faces`](@ref) but adds all matches to the existing `face_map`. +Same as [`collect_periodic_facets`](@ref) but adds all matches to the existing `facet_map`. """ -function collect_periodic_faces!(face_map::Vector{PeriodicFacePair}, grid::Grid, mset::Union{Set{FaceIndex},String}, iset::Union{Set{FaceIndex},String}, transform::Union{Function,Nothing}=nothing; tol::Float64=1e-12) - mset = __to_faceset(grid, mset) - iset = __to_faceset(grid, iset) +function collect_periodic_facets!(facet_map::Vector{PeriodicFacetPair}, grid::Grid, mset::Union{AbstractSet{FacetIndex},String}, iset::Union{AbstractSet{FacetIndex},String}, transform::Union{Function,Nothing}=nothing; tol::Float64=1e-12) + mset = __to_facetset(grid, mset) + iset = __to_facetset(grid, iset) if transform === nothing # This method is destructive, hence the copy - __collect_periodic_faces_bruteforce!(face_map, grid, copy(mset), copy(iset), #=known_order=#true, tol) + __collect_periodic_facets_bruteforce!(facet_map, grid, copy(mset), copy(iset), #=known_order=#true, tol) else # This method relies on ordering, hence the collect - __collect_periodic_faces_tree!(face_map, grid, collect(mset), collect(iset), transform, tol) + __collect_periodic_facets_tree!(facet_map, grid, collect(mset), collect(iset), transform, tol) end - return face_map + return facet_map end -function collect_periodic_faces!(face_map::Vector{PeriodicFacePair}, grid::Grid, faceset::Union{Set{FaceIndex},String,Nothing}; tol::Float64=1e-12) - faceset = faceset === nothing ? __collect_boundary_faces(grid) : copy(__to_faceset(grid, faceset)) - if mod(length(faceset), 2) != 0 - error("uneven number of faces") +function collect_periodic_facets!(facet_map::Vector{PeriodicFacetPair}, grid::Grid, facetset::Union{AbstractSet{FacetIndex},String,Nothing}; tol::Float64=1e-12) + facetset = facetset === nothing ? __collect_boundary_facets(grid) : copy(__to_facetset(grid, facetset)) + if mod(length(facetset), 2) != 0 + error("uneven number of facets") end - return __collect_periodic_faces_bruteforce!(face_map, grid, faceset, faceset, #=known_order=#false, tol) + return __collect_periodic_facets_bruteforce!(facet_map, grid, facetset, facetset, #=known_order=#false, tol) end -__to_faceset(_, set::Set{FaceIndex}) = set -__to_faceset(grid, set::String) = getfaceset(grid, set) -function __collect_boundary_faces(grid::Grid) - candidates = Dict{Tuple, FaceIndex}() +__to_facetset(_, set::AbstractSet{FacetIndex}) = set +__to_facetset(grid, set::String) = getfacetset(grid, set) +function __collect_boundary_facets(grid::Grid) + candidates = Dict{Tuple, FacetIndex}() for (ci, c) in enumerate(grid.cells) - for (fi, fn) in enumerate(faces(c)) - face = first(sortface(fn)) - if haskey(candidates, face) - delete!(candidates, face) + for (fi, fn) in enumerate(facets(c)) + facet = sortfacet_fast(fn) + if haskey(candidates, facet) + delete!(candidates, facet) else - candidates[face] = FaceIndex(ci, fi) + candidates[facet] = FacetIndex(ci, fi) end end end - return Set{FaceIndex}(values(candidates)) + return OrderedSet{FacetIndex}(values(candidates)) end -function __collect_periodic_faces_tree!(face_map::Vector{PeriodicFacePair}, grid::Grid, mset::Vector{FaceIndex}, iset::Vector{FaceIndex}, transformation::F, tol::Float64) where F <: Function +function __collect_periodic_facets_tree!(facet_map::Vector{PeriodicFacetPair}, grid::Grid, mset::Vector{FacetIndex}, iset::Vector{FacetIndex}, transformation::F, tol::Float64) where F <: Function if length(mset) != length(mset) - error("different number of faces in mirror and image set") + error("different number of facets in mirror and image set") end Tx = get_coordinate_type(grid) mirror_mean_x = Tx[] for (c, f) in mset - fn = faces(grid.cells[c])[f] + fn = facets(grid.cells[c])[f] push!(mirror_mean_x, sum(get_node_coordinate(grid,i) for i in fn) / length(fn)) end # Same dance for the image image_mean_x = Tx[] for (c, f) in iset - fn = faces(grid.cells[c])[f] + fn = facets(grid.cells[c])[f] # Apply transformation to all coordinates push!(image_mean_x, sum(transformation(get_node_coordinate(grid,i))::Tx for i in fn) / length(fn)) end - # Use KDTree to find closest face + # Use KDTree to find closest facet tree = KDTree(image_mean_x) idxs, _ = NearestNeighbors.nn(tree, mirror_mean_x) for (midx, iidx) in zip(eachindex(mset), idxs) - r = __check_periodic_faces_f(grid, mset[midx], iset[iidx], mirror_mean_x[midx], image_mean_x[iidx], transformation, tol) + r = __check_periodic_facets_f(grid, mset[midx], iset[iidx], mirror_mean_x[midx], image_mean_x[iidx], transformation, tol) if r === nothing - error("Could not find matching face for $(mset[midx])") + error("Could not find matching facet for $(mset[midx])") end - push!(face_map, r) + push!(facet_map, r) end # Make sure the mapping is unique - @assert all(x -> in(x, Set{FaceIndex}(p.mirror for p in face_map)), mset) - @assert all(x -> in(x, Set{FaceIndex}(p.image for p in face_map)), iset) - if !allunique(Set{FaceIndex}(p.image for p in face_map)) - error("did not find a unique mapping between faces") + @assert all(x -> in(x, Set{FacetIndex}(p.mirror for p in facet_map)), mset) + @assert all(x -> in(x, Set{FacetIndex}(p.image for p in facet_map)), iset) + if !allunique(Set{FacetIndex}(p.image for p in facet_map)) + error("did not find a unique mapping between facets") end - return face_map + return facet_map end # This method empties mset and iset -function __collect_periodic_faces_bruteforce!(face_map::Vector{PeriodicFacePair}, grid::Grid, mset::Set{FaceIndex}, iset::Set{FaceIndex}, known_order::Bool, tol::Float64) +function __collect_periodic_facets_bruteforce!(facet_map::Vector{PeriodicFacetPair}, grid::Grid, mset::AbstractSet{FacetIndex}, iset::AbstractSet{FacetIndex}, known_order::Bool, tol::Float64) if length(mset) != length(iset) - error("different faces in mirror and image") + error("different facets in mirror and image") end while length(mset) > 0 fi = first(mset) found = false for fj in iset fi == fj && continue - r = __check_periodic_faces(grid, fi, fj, known_order, tol) + r = __check_periodic_facets(grid, fi, fj, known_order, tol) r === nothing && continue - push!(face_map, r) + push!(facet_map, r) delete!(mset, fi) delete!(iset, fj) found = true break end - found || error("did not find a corresponding periodic face") + found || error("did not find a corresponding periodic facet") end @assert isempty(mset) && isempty(iset) - return face_map + return facet_map end function __periodic_options(::T) where T <: Vec{2} @@ -1479,22 +1428,22 @@ function circshift_tuple(x::T, n) where T Tuple(circshift!(collect(x), n))::T end -# Check if two faces are periodic. This method assumes that the faces are mirrored and thus +# Check if two facets are periodic. This method assumes that the facets are mirrored and thus # have opposing normal vectors -function __check_periodic_faces(grid::Grid, fi::FaceIndex, fj::FaceIndex, known_order::Bool, tol::Float64) +function __check_periodic_facets(grid::Grid, fi::FacetIndex, fj::FacetIndex, known_order::Bool, tol::Float64) cii, fii = fi - nodes_i = faces(grid.cells[cii])[fii] + nodes_i = facets(grid.cells[cii])[fii] cij, fij = fj - nodes_j = faces(grid.cells[cij])[fij] + nodes_j = facets(grid.cells[cij])[fij] - # 1. Check that normals are opposite TODO: Should use FaceValues here + # 1. Check that normals are opposite TODO: Should use FacetValues here ni = __outward_normal(grid, nodes_i) nj = __outward_normal(grid, nodes_j) if norm(ni + nj) >= tol return nothing end - # 2. Find the periodic direction using the vector between the midpoint of the faces + # 2. Find the periodic direction using the vector between the midpoint of the facets xmi = sum(get_node_coordinate(grid, i) for i in nodes_i) / length(nodes_i) xmj = sum(get_node_coordinate(grid, j) for j in nodes_j) / length(nodes_j) xmij = xmj - xmi @@ -1512,7 +1461,7 @@ function __check_periodic_faces(grid::Grid, fi::FaceIndex, fj::FaceIndex, known_ found || return nothing # 3. Check that the first node of fj have a corresponding node in fi - # In this method faces are mirrored (opposite normal vectors) so reverse the nodes + # In this method facets are mirrored (opposite normal vectors) so reverse the nodes nodes_i = circshift_tuple(reverse(nodes_i), 1) xj = get_node_coordinate(grid, nodes_j[1]) node_rot = 0 @@ -1539,31 +1488,31 @@ function __check_periodic_faces(grid::Grid, fi::FaceIndex, fj::FaceIndex, known_ end # Rotation is only relevant for 3D - if getdim(grid) == 3 + if getspatialdim(grid) == 3 node_rot = mod(node_rot, length(nodes_i)) else node_rot = 0 end - # 5. Faces match! Face below the diagonal become the mirror. + # 5. Facets match! Facet below the diagonal become the mirror. if known_order || len > 0 - return PeriodicFacePair(fi, fj, node_rot, true) + return PeriodicFacetPair(fi, fj, node_rot, true) else - return PeriodicFacePair(fj, fi, node_rot, true) + return PeriodicFacetPair(fj, fi, node_rot, true) end end -# This method is quite similar to __check_periodic_faces, but is used when user have passed +# This method is quite similar to __check_periodic_facets, but is used when user have passed # a transformation function and we have then used the KDTree to find the matching pair of -# faces. This function only need to i) check whether faces have aligned or opposite normal +# facets. This function only need to i) check whether facets have aligned or opposite normal # vectors, and ii) compute the relative rotation. -function __check_periodic_faces_f(grid::Grid, fi::FaceIndex, fj::FaceIndex, xmi, xmj, transformation::F, tol::Float64) where F +function __check_periodic_facets_f(grid::Grid, fi::FacetIndex, fj::FacetIndex, xmi, xmj, transformation::F, tol::Float64) where F cii, fii = fi - nodes_i = faces(grid.cells[cii])[fii] + nodes_i = facets(grid.cells[cii])[fii] cij, fij = fj - nodes_j = faces(grid.cells[cij])[fij] + nodes_j = facets(grid.cells[cij])[fij] - # 1. Check if normals are aligned or opposite TODO: Should use FaceValues here + # 1. Check if normals are aligned or opposite TODO: Should use FacetValues here ni = __outward_normal(grid, nodes_i) nj = __outward_normal(grid, nodes_j, transformation) if norm(ni + nj) < tol @@ -1594,13 +1543,13 @@ function __check_periodic_faces_f(grid::Grid, fi::FaceIndex, fj::FaceIndex, xmi, found || return nothing # 3. Rotation is only relevant for 3D. - if getdim(grid) == 3 + if getspatialdim(grid) == 3 node_rot = mod(node_rot, length(nodes_i)) else node_rot = 0 end - return PeriodicFacePair(fi, fj, node_rot, mirror) + return PeriodicFacetPair(fi, fj, node_rot, mirror) end diff --git a/src/Dofs/DofHandler.jl b/src/Dofs/DofHandler.jl index c72828fbb1..4db8f9eb65 100644 --- a/src/Dofs/DofHandler.jl +++ b/src/Dofs/DofHandler.jl @@ -6,39 +6,39 @@ abstract type AbstractDofHandler end Access some grid representation for the dof handler. !!! note - This API function is currently not well-defined. It acts as the interface between + This API function is currently not well-defined. It acts as the interface between distributed assembly and assembly on a single process, because most parts of the functionality can be handled by only acting on the locally owned cell set. """ get_grid(dh::AbstractDofHandler) -struct SubDofHandler{DH} <: AbstractDofHandler +mutable struct SubDofHandler{DH} <: AbstractDofHandler # From constructor - dh::DH - cellset::Set{Int} + const dh::DH + const cellset::OrderedSet{Int} # Populated in add! - field_names::Vector{Symbol} - field_interpolations::Vector{Interpolation} - field_n_components::Vector{Int} # Redundant with interpolations, remove? + const field_names::Vector{Symbol} + const field_interpolations::Vector{Interpolation} + const field_n_components::Vector{Int} # Redundant with interpolations, remove? # Computed in close! - ndofs_per_cell::ScalarWrapper{Int} + ndofs_per_cell::Int # const dof_ranges::Vector{UnitRange{Int}} # TODO: Why not? end """ - SubDofHandler(dh::AbstractDofHandler, cellset::Set{Int}) + SubDofHandler(dh::AbstractDofHandler, cellset::AbstractVecOrSet{Int}) -Create an `sdh::SubDofHandler` from the parent `dh`, pertaining to the -cells in `cellset`. This allows you to add fields to parts of the domain, or using -different interpolations or cell types (e.g. `Triangles` and `Quadrilaterals`). All +Create an `sdh::SubDofHandler` from the parent `dh`, pertaining to the +cells in `cellset`. This allows you to add fields to parts of the domain, or using +different interpolations or cell types (e.g. `Triangles` and `Quadrilaterals`). All fields and cell types must be the same in one `SubDofHandler`. After construction any number of discrete fields can be added to the SubDofHandler using [`add!`](@ref). Construction is finalized by calling [`close!`](@ref) on the parent `dh`. # Examples -We assume we have a `grid` containing "Triangle" and "Quadrilateral" cells, -including the cellsets "triangles" and "quadilaterals" for to these cells. +We assume we have a `grid` containing "Triangle" and "Quadrilateral" cells, +including the cellsets "triangles" and "quadilaterals" for to these cells. ```julia dh = DofHandler(grid) @@ -50,10 +50,10 @@ sdh_quad = SubDofHandler(dh, getcellset(grid, "quadilaterals")) ip_quad = Lagrange{RefQuadrilateral, 2}()^2 # vector interpolation for a field u add!(sdh_quad, :u, ip_quad) -close!(dh) # Finalize by closing the parent +close!(dh) # Finalize by closing the parent ``` """ -function SubDofHandler(dh::DH, cellset) where {DH <: AbstractDofHandler} +function SubDofHandler(dh::DH, cellset::AbstractVecOrSet{Int}) where {DH <: AbstractDofHandler} # TODO: Should be an inner constructor. isclosed(dh) && error("DofHandler already closed") # Compute the celltype and make sure all elements have the same one @@ -68,7 +68,7 @@ function SubDofHandler(dh::DH, cellset) where {DH <: AbstractDofHandler} end end # Construct and insert into the parent dh - sdh = SubDofHandler{typeof(dh)}(dh, cellset, Symbol[], Interpolation[], Int[], ScalarWrapper(-1)) + sdh = SubDofHandler{typeof(dh)}(dh, convert_to_orderedset(cellset), Symbol[], Interpolation[], Int[], -1) push!(dh.subdofhandlers, sdh) return sdh end @@ -93,17 +93,17 @@ function _print_field_information(io::IO, mime::MIME"text/plain", sdh::SubDofHan end end -struct DofHandler{dim,G<:AbstractGrid{dim}} <: AbstractDofHandler - subdofhandlers::Vector{SubDofHandler{DofHandler{dim, G}}} - field_names::Vector{Symbol} +mutable struct DofHandler{dim,G<:AbstractGrid{dim}} <: AbstractDofHandler + const subdofhandlers::Vector{SubDofHandler{DofHandler{dim, G}}} + const field_names::Vector{Symbol} # Dofs for cell i are stored in cell_dofs at the range: # cell_dofs_offset[i]:(cell_dofs_offset[i]+ndofs_per_cell(dh, i)-1) - cell_dofs::Vector{Int} - cell_dofs_offset::Vector{Int} - cell_to_subdofhandler::Vector{Int} # maps cell id -> SubDofHandler id - closed::ScalarWrapper{Bool} - grid::G - ndofs::ScalarWrapper{Int} + const cell_dofs::Vector{Int} + const cell_dofs_offset::Vector{Int} + const cell_to_subdofhandler::Vector{Int} # maps cell id -> SubDofHandler id + closed::Bool + const grid::G + ndofs::Int end """ @@ -131,7 +131,7 @@ close!(dh) function DofHandler(grid::G) where {dim, G <: AbstractGrid{dim}} ncells = getncells(grid) sdhs = SubDofHandler{DofHandler{dim, G}}[] - DofHandler{dim, G}(sdhs, Symbol[], Int[], zeros(Int, ncells), zeros(Int, ncells), ScalarWrapper(false), grid, ScalarWrapper(-1)) + DofHandler{dim, G}(sdhs, Symbol[], Int[], zeros(Int, ncells), zeros(Int, ncells), false, grid, -1) end function Base.show(io::IO, mime::MIME"text/plain", dh::DofHandler) @@ -141,7 +141,14 @@ function Base.show(io::IO, mime::MIME"text/plain", dh::DofHandler) else println(io, " Fields:") for fieldname in getfieldnames(dh) - println(io, " ", repr(fieldname), ", dim: ", getfielddim(dh, fieldname)) + ip = getfieldinterpolation(dh, find_field(dh, fieldname)) + if ip isa ScalarInterpolation + field_type = "scalar" + elseif ip isa VectorInterpolation + _getvdim(::VectorInterpolation{vdim}) where vdim = vdim + field_type = "Vec{$(_getvdim(ip))}" + end + println(io, " ", repr(fieldname), ", ", field_type) end end if !isclosed(dh) @@ -151,7 +158,7 @@ function Base.show(io::IO, mime::MIME"text/plain", dh::DofHandler) end end -isclosed(dh::AbstractDofHandler) = dh.closed[] +isclosed(dh::AbstractDofHandler) = dh.closed get_grid(dh::DofHandler) = dh.grid """ @@ -159,7 +166,7 @@ get_grid(dh::DofHandler) = dh.grid Return the number of degrees of freedom in `dh` """ -ndofs(dh::AbstractDofHandler) = dh.ndofs[] +ndofs(dh::AbstractDofHandler) = dh.ndofs """ ndofs_per_cell(dh::AbstractDofHandler[, cell::Int=1]) @@ -168,12 +175,20 @@ Return the number of degrees of freedom for the cell with index `cell`. See also [`ndofs`](@ref). """ -function ndofs_per_cell(dh::DofHandler, cell::Int=1) - @boundscheck 1 <= cell <= getncells(get_grid(dh)) - return @inbounds ndofs_per_cell(dh.subdofhandlers[dh.cell_to_subdofhandler[cell]]) +function ndofs_per_cell(dh::DofHandler) + if length(dh.subdofhandlers) > 1 + error("There are more than one subdofhandler. Use `ndofs_per_cell(dh, cellid::Int)` instead.") + end + @assert length(dh.subdofhandlers) != 0 + return @inbounds ndofs_per_cell(dh.subdofhandlers[1]) end -ndofs_per_cell(sdh::SubDofHandler) = sdh.ndofs_per_cell[] -ndofs_per_cell(sdh::SubDofHandler, ::Int) = sdh.ndofs_per_cell[] # for compatibility with DofHandler +function ndofs_per_cell(dh::DofHandler, cell::Int) + sdhidx = dh.cell_to_subdofhandler[cell] + sdhidx ∉ 1:length(dh.subdofhandlers) && return 0 # Dof handler is just defined on a subdomain + return ndofs_per_cell(dh.subdofhandlers[sdhidx]) +end +ndofs_per_cell(sdh::SubDofHandler) = sdh.ndofs_per_cell +ndofs_per_cell(sdh::SubDofHandler, ::Int) = sdh.ndofs_per_cell # for compatibility with DofHandler """ celldofs!(global_dofs::Vector{Int}, dh::AbstractDofHandler, i::Int) @@ -212,31 +227,31 @@ end getfieldnames(dh::DofHandler) getfieldnames(sdh::SubDofHandler) -Return a vector with the unique names of all fields. The order is the sam eas the order in +Return a vector with the unique names of all fields. The order is the same as the order in which they were originally added to the (Sub)DofHandler. Can be used as an iterable over all the fields. """ getfieldnames(dh::DofHandler) = dh.field_names getfieldnames(sdh::SubDofHandler) = sdh.field_names -getfielddim(sdh::SubDofHandler, field_idx::Int) = n_components(sdh.field_interpolations[field_idx])::Int -getfielddim(sdh::SubDofHandler, field_name::Symbol) = getfielddim(sdh, find_field(sdh, field_name)) +n_components(sdh::SubDofHandler, field_idx::Int) = n_components(sdh.field_interpolations[field_idx])::Int +n_components(sdh::SubDofHandler, field_name::Symbol) = n_components(sdh, find_field(sdh, field_name)) """ - getfielddim(dh::DofHandler, field_idxs::NTuple{2,Int}) - getfielddim(dh::DofHandler, field_name::Symbol) - getfielddim(sdh::SubDofHandler, field_idx::Int) - getfielddim(sdh::SubDofHandler, field_name::Symbol) + n_components(dh::DofHandler, field_idxs::NTuple{2,Int}) + n_components(dh::DofHandler, field_name::Symbol) + n_components(sdh::SubDofHandler, field_idx::Int) + n_components(sdh::SubDofHandler, field_name::Symbol) -Return the dimension (number of components) of a given field. The field can be specified by +Return the number of components for a given field. The field can be specified by its index (see [`find_field`](@ref)) or its name. """ -function getfielddim(dh::DofHandler, field_idxs::NTuple{2, Int}) +function n_components(dh::DofHandler, field_idxs::NTuple{2, Int}) sdh_idx, field_idx = field_idxs - fielddim = getfielddim(dh.subdofhandlers[sdh_idx], field_idx) - return fielddim + n = n_components(dh.subdofhandlers[sdh_idx], field_idx) + return n end -getfielddim(dh::DofHandler, name::Symbol) = getfielddim(dh, find_field(dh, name)) +n_components(dh::DofHandler, name::Symbol) = n_components(dh, find_field(dh, name)) """ add!(sdh::SubDofHandler, name::Symbol, ip::Interpolation) @@ -264,7 +279,7 @@ function add!(sdh::SubDofHandler, name::Symbol, ip::Interpolation) # TODO: warn if interpolation type is not the same? end end - + # Check that interpolation is compatible with cells it it added to refshape_sdh = getrefshape(getcells(sdh.dh.grid, first(sdh.cellset))) if refshape_sdh !== getrefshape(ip) @@ -292,7 +307,7 @@ function add!(dh::DofHandler, name::Symbol, ip::Interpolation) @assert isconcretetype(celltype) if isempty(dh.subdofhandlers) # Create a new SubDofHandler for all cells - sdh = SubDofHandler(dh, Set(1:getncells(get_grid(dh)))) + sdh = SubDofHandler(dh, OrderedSet(1:getncells(get_grid(dh)))) elseif length(dh.subdofhandlers) == 1 # Add to existing SubDofHandler (if it covers all cells) sdh = dh.subdofhandlers[1] @@ -327,7 +342,7 @@ For the `DofHandler` each `SubDofHandler` is visited in the order they were adde For each field in the `SubDofHandler` create dofs for the cell. This means that dofs on a particular cell will be numbered in groups for each field, so first the dofs for field 1 are distributed, then field 2, etc. -For each cell dofs are first distributed on its vertices, then on the interior of edges (if applicable), then on the +For each cell dofs are first distributed on its vertices, then on the interior of edges (if applicable), then on the interior of faces (if applicable), and finally on the cell interior. The entity ordering follows the geometrical ordering found in [`vertices`](@ref), [`faces`](@ref) and [`edges`](@ref). """ @@ -348,16 +363,15 @@ function __close!(dh::DofHandler{dim}) where {dim} # TODO: No need to allocate this vector for fields that don't have vertex dofs vertexdicts = [zeros(Int, getnnodes(get_grid(dh))) for _ in 1:numfields] - # `edgedict` keeps track of the visited edges, this will only be used for a 3D problem. + # `edgedict` keeps track of the visited edges. # An edge is uniquely determined by two global vertices, with global direction going - # from low to high vertex number. - edgedicts = [Dict{Tuple{Int,Int}, Int}() for _ in 1:numfields] + # from low to high vertex node number, see sortedge + edgedicts = [Dict{NTuple{2, Int}, Int}() for _ in 1:numfields] # `facedict` keeps track of the visited faces. We only need to store the first dof we - # add to the face since currently more dofs per face isn't supported. In - # 2D a face (i.e. a line) is uniquely determined by 2 vertices, and in 3D a face (i.e. a - # surface) is uniquely determined by 3 vertices. - facedicts = [Dict{NTuple{dim,Int}, Int}() for _ in 1:numfields] + # add to the face since currently more dofs per face isn't supported. + # A face is uniquely determined by 3 vertex nodes, see sortface + facedicts = [Dict{NTuple{3, Int}, Int}() for _ in 1:numfields] # Set initial values nextdof = 1 # next free dof to distribute @@ -374,8 +388,8 @@ function __close!(dh::DofHandler{dim}) where {dim} facedicts, ) end - dh.ndofs[] = maximum(dh.cell_dofs; init=0) - dh.closed[] = true + dh.ndofs = maximum(dh.cell_dofs; init=0) + dh.closed = true return dh, vertexdicts, edgedicts, facedicts @@ -390,32 +404,29 @@ function _close_subdofhandler!(dh::DofHandler{sdim}, sdh::SubDofHandler, sdh_ind ip_infos = InterpolationInfo[] for interpolation in sdh.field_interpolations ip_info = InterpolationInfo(interpolation) + base_ip = get_base_interpolation(interpolation) begin next_dof_index = 1 - for vdofs ∈ vertexdof_indices(interpolation) + for vdofs ∈ vertexdof_indices(base_ip) for dof_index ∈ vdofs @assert dof_index == next_dof_index "Vertex dof ordering not supported. Please consult the dev docs." next_dof_index += 1 end end - if getdim(interpolation) > 2 - for vdofs ∈ edgedof_interior_indices(interpolation) - for dof_index ∈ vdofs - @assert dof_index == next_dof_index "Edge dof ordering not supported. Please consult the dev docs." - next_dof_index += 1 - end + for vdofs ∈ edgedof_interior_indices(base_ip) + for dof_index ∈ vdofs + @assert dof_index == next_dof_index "Edge dof ordering not supported. Please consult the dev docs." + next_dof_index += 1 end end - if getdim(interpolation) > 1 - for vdofs ∈ facedof_interior_indices(interpolation) - for dof_index ∈ vdofs - @assert dof_index == next_dof_index "Face dof ordering not supported. Please consult the dev docs." - next_dof_index += 1 - end + for vdofs ∈ facedof_interior_indices(base_ip) + for dof_index ∈ vdofs + @assert dof_index == next_dof_index "Face dof ordering not supported. Please consult the dev docs." + next_dof_index += 1 end end - for dof_index ∈ celldof_interior_indices(interpolation) - @assert next_dof_index <= dof_index <= getnbasefunctions(interpolation) "Cell dof ordering not supported. Please consult the dev docs." + for dof_index ∈ volumedof_interior_indices(base_ip) + @assert next_dof_index <= dof_index <= getnbasefunctions(base_ip) "Cell dof ordering not supported. Please consult the dev docs." end end push!(ip_infos, ip_info) @@ -436,8 +447,7 @@ function _close_subdofhandler!(dh::DofHandler{sdim}, sdh::SubDofHandler, sdh_ind global_fidxs = Int[findfirst(gname -> gname === lname, dh.field_names) for lname in sdh.field_names] # loop over all the cells, and distribute dofs for all the fields - # TODO: Remove BitSet construction when SubDofHandler ensures sorted collections - for ci in BitSet(sdh.cellset) + for ci in sdh.cellset @debug println("Creating dofs for cell #$ci") # TODO: _check_cellset_intersections can be removed in favor of this assertion @@ -464,7 +474,7 @@ function _close_subdofhandler!(dh::DofHandler{sdim}, sdh::SubDofHandler, sdh_ind if first_cell ndofs_per_cell = length(dh.cell_dofs) - len_cell_dofs_start - sdh.ndofs_per_cell[] = ndofs_per_cell + sdh.ndofs_per_cell = ndofs_per_cell first_cell = false else @assert ndofs_per_cell == length(dh.cell_dofs) - len_cell_dofs_start @@ -487,30 +497,23 @@ function _distribute_dofs_for_cell!(dh::DofHandler{sdim}, cell::AbstractCell, ip ip_info.nvertexdofs, nextdof, ip_info.n_copies, ) - # Distribute dofs for edges (only applicable when dim is 3) - if sdim == 3 && (ip_info.reference_dim == 3 || ip_info.reference_dim == 2) - # Regular 3D element or 2D interpolation embedded in 3D space - nentitydofs = ip_info.reference_dim == 3 ? ip_info.nedgedofs : ip_info.nfacedofs - nextdof = add_edge_dofs( - dh.cell_dofs, cell, edgedict, - nentitydofs, nextdof, - ip_info.adjust_during_distribution, ip_info.n_copies, - ) - end + # Distribute dofs for edges + nextdof = add_edge_dofs( + dh.cell_dofs, cell, edgedict, + ip_info.nedgedofs, nextdof, + ip_info.adjust_during_distribution, ip_info.n_copies, + ) - # Distribute dofs for faces. Filter out 2D interpolations in 3D space, since - # they are added above as edge dofs. - if ip_info.reference_dim == sdim && sdim > 1 - nextdof = add_face_dofs( - dh.cell_dofs, cell, facedict, - ip_info.nfacedofs, nextdof, - ip_info.adjust_during_distribution, ip_info.n_copies, - ) - end + # Distribute dofs for faces. + nextdof = add_face_dofs( + dh.cell_dofs, cell, facedict, + ip_info.nfacedofs, nextdof, + ip_info.adjust_during_distribution, ip_info.n_copies, + ) # Distribute internal dofs for cells - nextdof = add_cell_dofs( - dh.cell_dofs, ip_info.ncelldofs, nextdof, ip_info.n_copies, + nextdof = add_volume_dofs( + dh.cell_dofs, ip_info.nvolumedofs, nextdof, ip_info.n_copies, ) return nextdof @@ -568,7 +571,7 @@ function add_face_dofs(cell_dofs::Vector{Int}, cell::AbstractCell, facedict::Dic sface, orientation = sortface(face) @debug println("\t\tface #$sface, $orientation") nextdof, dofs = get_or_create_dofs!(nextdof, nfacedofs[fi], n_copies, facedict, sface) - permute_and_push!(cell_dofs, dofs, orientation, adjust_during_distribution) + permute_and_push!(cell_dofs, dofs, orientation, adjust_during_distribution, getrefdim(cell)) # TODO: passing rdim of cell is temporary, simply to check if facedofs are internal to cell @debug println("\t\t\tadjusted dofs: $(cell_dofs[(end - nfacedofs[fi]*n_copies + 1):end])") end return nextdof @@ -587,9 +590,9 @@ function add_edge_dofs(cell_dofs::Vector{Int}, cell::AbstractCell, edgedict::Dic return nextdof end -function add_cell_dofs(cell_dofs::CD, ncelldofs::Int, nextdof::Int, n_copies::Int) where {CD} - @debug println("\t\tcelldofs #$nextdof:$(ncelldofs*n_copies-1)") - for _ in 1:ncelldofs, _ in 1:n_copies +function add_volume_dofs(cell_dofs::CD, nvolumedofs::Int, nextdof::Int, n_copies::Int) where {CD} + @debug println("\t\tvolumedofs #$nextdof:$(nvolumedofs*n_copies-1)") + for _ in 1:nvolumedofs, _ in 1:n_copies push!(cell_dofs, nextdof) nextdof += 1 end @@ -681,8 +684,7 @@ Here the unique representation is the sorted node index tuple. Note that in 3D we only need indices to uniquely identify a face, so the unique representation is always a tuple length 3. """ -sortface(face::Tuple{Int,Int}) = sortedge(face) # Face in 2D is the same as edge in 3D. - +function sortface end """ sortface_fast(face::Tuple{Int}) @@ -695,24 +697,24 @@ Here the unique representation is the sorted node index tuple. Note that in 3D we only need indices to uniquely identify a face, so the unique representation is always a tuple length 3. """ -sortface_fast(face::Tuple{Int,Int}) = sortedge_fast(face) # Face in 2D is the same as edge in 3D. +function sortface_fast end """ !!!NOTE TODO implement me. For more details we refer to [1] as we follow the methodology described therein. -[1] Scroggs, M. W., Dokken, J. S., Richardson, C. N., & Wells, G. N. (2022). - Construction of arbitrary order finite element degree-of-freedom maps on - polygonal and polyhedral cell meshes. ACM Transactions on Mathematical +[1] Scroggs, M. W., Dokken, J. S., Richardson, C. N., & Wells, G. N. (2022). + Construction of arbitrary order finite element degree-of-freedom maps on + polygonal and polyhedral cell meshes. ACM Transactions on Mathematical Software (TOMS), 48(2), 1-23. !!!TODO citation via software. !!!TODO Investigate if we can somehow pass the interpolation into this function in a typestable way. """ -@inline function permute_and_push!(cell_dofs::Vector{Int}, dofs::StepRange{Int,Int}, orientation::SurfaceOrientationInfo, adjust_during_distribution::Bool) - if adjust_during_distribution && length(dofs) > 1 +@inline function permute_and_push!(cell_dofs::Vector{Int}, dofs::StepRange{Int,Int}, ::SurfaceOrientationInfo, adjust_during_distribution::Bool, rdim::Int) + if rdim==3 && adjust_during_distribution && length(dofs) > 1 error("Dof distribution for interpolations with multiple dofs per face not implemented yet.") end n_copies = step(dofs) @@ -767,8 +769,21 @@ function sortface_fast(face::Tuple{Int,Int,Int,Int}) end -sortface(face::Tuple{Int}) = face, nothing -sortface_fast(face::Tuple{Int}) = face +""" + sortfacet_fast(facet::NTuple{N, Int}) + +Returns the unique representation of the `facet` by sorting its node indices. +Dispatches on `sortedges_fast` or `sortfaces_fast` depending on `N` +""" +function sortfacet_fast end + +# Vertex +sortfacet_fast(facet::Tuple{Int}) = facet +# Edge +sortfacet_fast(facet::NTuple{2, Int}) = sortedge_fast(facet) +# Face +sortfacet_fast(facet::NTuple{3, Int}) = sortface_fast(facet) +sortfacet_fast(facet::NTuple{4, Int}) = sortface_fast(facet) """ find_field(dh::DofHandler, field_name::Symbol)::NTuple{2,Int} @@ -781,7 +796,7 @@ field was found and the 2nd entry is the index of the field within the `SubDofHa Always finds the 1st occurrence of a field within `DofHandler`. See also: [`find_field(sdh::SubDofHandler, field_name::Symbol)`](@ref), -[`_find_field(sdh::SubDofHandler, field_name::Symbol)`](@ref). +[`Ferrite._find_field(sdh::SubDofHandler, field_name::Symbol)`](@ref). """ function find_field(dh::DofHandler, field_name::Symbol) for (sdh_idx, sdh) in pairs(dh.subdofhandlers) @@ -811,7 +826,7 @@ end """ _find_field(sdh::SubDofHandler, field_name::Symbol)::Int -Return the index of the field with name `field_name` in the `SubDofHandler` `sdh`. Return +Return the index of the field with name `field_name` in the `SubDofHandler` `sdh`. Return `nothing` if the field is not found. See also: [`find_field(dh::DofHandler, field_name::Symbol)`](@ref), [`find_field(sdh::SubDofHandler, field_name::Symbol)`](@ref). @@ -898,7 +913,7 @@ getfieldinterpolation(sdh::SubDofHandler, field_idx::Int) = sdh.field_interpolat getfieldinterpolation(sdh::SubDofHandler, field_name::Symbol) = getfieldinterpolation(sdh, find_field(sdh, field_name)) """ - evaluate_at_grid_nodes(dh::AbstractDofHandler, u::Vector{T}, fieldname::Symbol) where T + evaluate_at_grid_nodes(dh::AbstractDofHandler, u::AbstractVector{T}, fieldname::Symbol) where T Evaluate the approximated solution for field `fieldname` at the node coordinates of the grid given the Dof handler `dh` and the solution vector `u`. @@ -907,12 +922,12 @@ Return a vector of length `getnnodes(grid)` where entry `i` contains the evaluat approximation in the coordinate of node `i`. If the field does not live on parts of the grid, the corresponding values for those nodes will be returned as `NaN`s. """ -function evaluate_at_grid_nodes(dh::DofHandler, u::Vector, fieldname::Symbol) +function evaluate_at_grid_nodes(dh::DofHandler, u::AbstractVector, fieldname::Symbol) return _evaluate_at_grid_nodes(dh, u, fieldname) end # Internal method that have the vtk option to allocate the output differently -function _evaluate_at_grid_nodes(dh::DofHandler, u::Vector{T}, fieldname::Symbol, ::Val{vtk}=Val(false)) where {T, vtk} +function _evaluate_at_grid_nodes(dh::DofHandler, u::AbstractVector{T}, fieldname::Symbol, ::Val{vtk}=Val(false)) where {T, vtk} # Make sure the field exists fieldname ∈ getfieldnames(dh) || error("Field $fieldname not found.") # Figure out the return type (scalar or vector) @@ -936,7 +951,7 @@ function _evaluate_at_grid_nodes(dh::DofHandler, u::Vector{T}, fieldname::Symbol # Set up CellValues with the local node coords as quadrature points CT = getcelltype(sdh) ip = getfieldinterpolation(sdh, field_idx) - ip_geo = default_interpolation(CT) + ip_geo = geometric_interpolation(CT) local_node_coords = reference_coordinates(ip_geo) qr = QuadratureRule{getrefshape(ip)}(zeros(length(local_node_coords)), local_node_coords) if ip isa VectorizedInterpolation @@ -954,7 +969,7 @@ end # Loop over the cells and use shape functions to compute the value function _evaluate_at_grid_nodes!(data::Union{Vector,Matrix}, sdh::SubDofHandler, - u::Vector{T}, cv::CellValues, drange::UnitRange, ::Type{RT}) where {T, RT} + u::AbstractVector{T}, cv::CellValues, drange::UnitRange, ::Type{RT}) where {T, RT} ue = zeros(T, length(drange)) # TODO: Remove this hack when embedding works... if RT <: Vec && function_interpolation(cv) isa ScalarInterpolation diff --git a/src/Dofs/DofRenumbering.jl b/src/Dofs/DofRenumbering.jl index 704d89a181..43bb28daa1 100644 --- a/src/Dofs/DofRenumbering.jl +++ b/src/Dofs/DofRenumbering.jl @@ -108,7 +108,7 @@ function _renumber!(ch::ConstraintHandler, perm::AbstractVector{<:Integer}) pdofs[i] = perm[pdofs[i]] end empty!(ch.dofmapping) - ch.closed[] = false + ch.closed = false close!(ch) return ch end @@ -146,7 +146,7 @@ DofOrder.FieldWise function compute_renumber_permutation(dh::DofHandler, _, order::DofOrder.FieldWise) field_names = getfieldnames(dh) - field_dims = map(fieldname -> getfielddim(dh, fieldname), dh.field_names) + field_dims = map(fieldname -> n_components(dh, fieldname), dh.field_names) target_blocks = if isempty(order.target_blocks) Int[i for (i, dim) in pairs(field_dims) for _ in 1:dim] else @@ -177,7 +177,7 @@ DofOrder.ComponentWise function compute_renumber_permutation(dh::DofHandler, _, order::DofOrder.ComponentWise) # Note: This assumes fields have the same dimension regardless of subdomain - field_dims = map(fieldname -> getfielddim(dh, fieldname), dh.field_names) + field_dims = map(fieldname -> n_components(dh, fieldname), dh.field_names) target_blocks = if isempty(order.target_blocks) collect(Int, 1:sum(field_dims)) else diff --git a/src/Dofs/apply_analytical.jl b/src/Dofs/apply_analytical.jl index 847fd58f8c..13015a1f77 100644 --- a/src/Dofs/apply_analytical.jl +++ b/src/Dofs/apply_analytical.jl @@ -1,12 +1,12 @@ -function _default_interpolations(dh::DofHandler) +function _geometric_interpolations(dh::DofHandler) sdhs = dh.subdofhandlers getcelltype(i) = typeof(getcells(get_grid(dh), first(sdhs[i].cellset))) - ntuple(i -> default_interpolation(getcelltype(i)), length(sdhs)) + ntuple(i -> geometric_interpolation(getcelltype(i)), length(sdhs)) end """ apply_analytical!( - a::AbstractVector, dh::AbstractDofHandler, fieldname::Symbol, + a::AbstractVector, dh::AbstractDofHandler, fieldname::Symbol, f::Function, cellset=1:getncells(get_grid(dh))) Apply a solution `f(x)` by modifying the values in the degree of freedom vector `a` @@ -18,7 +18,7 @@ and for vector fields with dimension `dim`, `f(x)::Vec{dim}`. This function can be used to apply initial conditions for time dependent problems. !!! note - + This function only works for standard nodal finite element interpolations when the function value at the (algebraic) node is equal to the corresponding degree of freedom value. @@ -30,19 +30,20 @@ function apply_analytical!( cellset = 1:getncells(get_grid(dh))) fieldname ∉ getfieldnames(dh) && error("The fieldname $fieldname was not found in the dof handler") - ip_geos = _default_interpolations(dh) + ip_geos = _geometric_interpolations(dh) for (sdh, ip_geo) in zip(dh.subdofhandlers, ip_geos) isnothing(_find_field(sdh, fieldname)) && continue field_idx = find_field(sdh, fieldname) ip_fun = getfieldinterpolation(sdh, field_idx) - field_dim = getfielddim(sdh, field_idx) + field_dim = n_components(sdh, field_idx) celldofinds = dof_range(sdh, fieldname) set_intersection = if length(cellset) == length(sdh.cellset) == getncells(get_grid(dh)) BitSet(1:getncells(get_grid(dh))) else intersect(BitSet(sdh.cellset), BitSet(cellset)) end + isempty(set_intersection) && continue _apply_analytical!(a, dh, celldofinds, field_dim, ip_fun, ip_geo, f, set_intersection) end return a diff --git a/src/Dofs/block_sparsity_pattern.jl b/src/Dofs/block_sparsity_pattern.jl new file mode 100644 index 0000000000..465f5f5223 --- /dev/null +++ b/src/Dofs/block_sparsity_pattern.jl @@ -0,0 +1,131 @@ +######################## +# BlockSparsityPattern # +######################## + +""" + struct BlockSparsityPattern <: AbstractSparsityPattern + +Data structure representing non-zero entries for an eventual *blocked* sparse matrix. + +See the constructor [`BlockSparsityPattern(::Vector{Int})`](@ref +BlockSparsityPattern(::Vector{Int})) for the user-facing documentation. + +# Struct fields + - `nrows::Int`: number of rows + - `ncols::Int`: number of column + - `block_sizes::Vector{Int}`: row and column block sizes + - `blocks::Matrix{SparsityPattern}`: matrix of size `length(block_sizes) × + length(block_sizes)` where `blocks[i, j]` is a [`SparsityPattern`](@ref) corresponding to + block `(i, j)`. + +!!! warning "Internal struct" + The specific implementation of this struct, such as struct fields, type layout and type + parameters, are internal and should not be relied upon. +""" +struct BlockSparsityPattern <: AbstractSparsityPattern + nrows::Int + ncols::Int + block_sizes::Vector{Int} + blocks::Matrix{SparsityPattern} +end + +""" + BlockSparsityPattern(block_sizes::Vector{Int}) + +Create an empty `BlockSparsityPattern` with row and column block sizes given by +`block_sizes`. + +# Examples +```julia +# Create a block sparsity pattern with block size 10 x 5 +sparsity_pattern = BlockSparsityPattern([10, 5]) +``` + +# Methods +The following methods apply to `BlockSparsityPattern` (see their respective documentation +for more details): + + - [`add_sparsity_entries!`](@ref): convenience method for calling + [`add_cell_entries!`](@ref), [`add_interface_entries!`](@ref), and + [`add_constraint_entries!`](@ref). + - [`add_cell_entries!`](@ref): add entries corresponding to DoF couplings within the cells. + - [`add_interface_entries!`](@ref): add entries corresponding to DoF couplings on the + interface between cells. + - [`add_constraint_entries!`](@ref): add entries resulting from constraints. + - [`allocate_matrix`](@ref allocate_matrix(::SparsityPattern)): instantiate a (block) + matrix from the pattern. The default matrix type is `BlockMatrix{Float64, + Matrix{SparseMatrixCSC{Float64, Int}}}`, i.e. a `BlockMatrix`, where the individual + blocks are of type `SparseMatrixCSC{Float64, Int}`. + +!!! note "Package extension" + This functionality is only enabled when the package + [BlockArrays.jl](https://github.com/JuliaArrays/BlockArrays.jl) is installed (`pkg> add + BlockArrays`) and loaded (`using BlockArrays`) in the session. +""" +function BlockSparsityPattern(blk_sizes::AbstractVector{<:Integer}) + block_sizes = collect(Int, blk_sizes) + nrows = ncols = sum(block_sizes) + nblocks = length(block_sizes) + # TODO: Maybe all of these could/should share the same PoolAllocator? + blocks = [SparsityPattern(block_sizes[i], block_sizes[j]) for i in 1:nblocks, j in 1:nblocks] + return BlockSparsityPattern(nrows, ncols, block_sizes, blocks) +end + +getnrows(bsp::BlockSparsityPattern) = bsp.nrows +getncols(bsp::BlockSparsityPattern) = bsp.ncols + +# Compute block index and local index into that block +@inline function _find_block(block_sizes::Vector{Int}, i::Int) + accumulated_block_size = 0 + block_index = 1 + while !(accumulated_block_size < i <= accumulated_block_size + block_sizes[block_index]) + accumulated_block_size += block_sizes[block_index] + block_index += 1 + end + local_index = i - accumulated_block_size + return block_index, local_index +end + +@inline function add_entry!(bsp::BlockSparsityPattern, row::Int, col::Int) + @boundscheck 1 <= row <= getnrows(bsp) && 1 <= col <= getncols(bsp) + row_block, row_local = _find_block(bsp.block_sizes, row) + col_block, col_local = _find_block(bsp.block_sizes, col) + add_entry!(bsp.blocks[row_block, col_block], row_local, col_local) + return +end + +# Helper struct to iterate over the rows. Behaves similar to +# Iterators.flatten([eachrow(bsp.blocks[row_block, col_block) for col_block in 1:nblocks]) +# but we need to add the offset to the iterated values. +struct BSPRowIterator + bsp::BlockSparsityPattern + row::Int + row_block::Int + row_local::Int + function BSPRowIterator(bsp::BlockSparsityPattern, row::Int) + @assert 1 <= row <= getnrows(bsp) + row_block, row_local = _find_block(bsp.block_sizes, row) + return new(bsp, row, row_block, row_local) + end +end + +function Base.iterate(it::BSPRowIterator, state = (1, 1)) + col_block, idx = state + bsp = it.bsp + col_block > length(bsp.block_sizes) && return nothing + block = bsp.blocks[it.row_block, col_block] + colidxs = eachrow(block, it.row_local) + if idx > length(colidxs) + # Advance the col_block and reset idx to 1 + return iterate(it, (col_block + 1, 1)) + else + # Compute global col idx and advance idx + col_local = colidxs[idx] + offset = sum((bsp.block_sizes[i] for i in 1:col_block-1); init = 0) + return offset + col_local, (col_block, idx + 1) + end +end + +# TODO: eltype of the generator do not infer; might need another auxiliary struct. +eachrow(bsp::BlockSparsityPattern) = (BSPRowIterator(bsp, row) for row in 1:getnrows(bsp)) +eachrow(bsp::BlockSparsityPattern, row::Int) = BSPRowIterator(bsp, row) diff --git a/src/Dofs/sparsity_pattern.jl b/src/Dofs/sparsity_pattern.jl index 46ef67c3d3..def97b55e4 100644 --- a/src/Dofs/sparsity_pattern.jl +++ b/src/Dofs/sparsity_pattern.jl @@ -1,76 +1,453 @@ +########################### +# AbstractSparsityPattern # +########################### + +""" + Ferrite.AbstractSparsityPattern + +Supertype for sparsity pattern implementations, e.g. [`SparsityPattern`](@ref) and +[`BlockSparsityPattern`](@ref). +""" +abstract type AbstractSparsityPattern end + +""" + getnrows(sp::AbstractSparsityPattern) + +Return the number of rows in the sparsity pattern `sp`. +""" +getnrows(sp::AbstractSparsityPattern) + +""" + getncols(sp::AbstractSparsityPattern) + +Return the number of columns in the sparsity pattern `sp`. +""" +getncols(sp::AbstractSparsityPattern) + +""" + add_entry!(sp::AbstractSparsityPattern, row::Int, col::Int) + +Add an entry to the sparsity pattern `sp` at row `row` and column `col`. +""" +add_entry!(sp::AbstractSparsityPattern, row::Int, col::Int) + +# This is necessary to avoid warning about not importing Base.eachrow when +# adding docstring before the definitions further down. +function eachrow end + +""" + eachrow(sp::AbstractSparsityPattern) + +Return an iterator over the rows of the sparsity pattern `sp`. +Each element of the iterator iterates indices of the stored *columns* for that row. +""" +eachrow(sp::AbstractSparsityPattern) + +""" + eachrow(sp::AbstractSparsityPattern, row::Int) + +Return an iterator over *column* indices in row `row` of the sparsity pattern. + +Conceptually this is equivalent to [`eachrow(sp)[row]`](@ref +eachrow(::AbstractSparsityPattern)). However, the iterator `eachrow(sp)` isn't always +indexable. This method should be used when a specific row needs to be "random access"d. +""" +eachrow(sp::AbstractSparsityPattern, row::Int) + + +################### +# SparsityPattern # +################### + +""" + struct SparsityPattern <: AbstractSparsityPattern + +Data structure representing non-zero entries in the eventual sparse matrix. + +See the constructor [`SparsityPattern(::Int, ::Int)`](@ref) for the user-facing +documentation. + +# Struct fields + - `nrows::Int`: number of rows + - `ncols::Int`: number of column + - `rows::Vector{Vector{Int}}`: vector of length `nrows`, where `rows[i]` is a + *sorted* vector of column indices for non zero entries in row `i`. + +!!! warning "Internal struct" + The specific implementation of this struct, such as struct fields, type layout and type + parameters, are internal and should not be relied upon. +""" +struct SparsityPattern <: AbstractSparsityPattern + nrows::Int + ncols::Int + mempool::PoolAllocator.MemoryPool{Int} + rows::Vector{PoolAllocator.PoolVector{Int}} +end + +""" + SparsityPattern(nrows::Int, ncols::Int; nnz_per_row::Int = 8) + +Create an empty [`SparsityPattern`](@ref) with `nrows` rows and `ncols` columns. +`nnz_per_row` is used as a memory hint for the number of non zero entries per +row. + +`SparsityPattern` is the default sparsity pattern type for the standard DofHandler and is +therefore commonly constructed using [`init_sparsity_pattern`](@ref) instead of with this +constructor. + +# Examples +```julia +# Create a sparsity pattern for an 100 x 100 matrix, hinting at 10 entries per row +sparsity_pattern = SparsityPattern(100, 100; nnz_per_row = 10) +``` + +# Methods +The following methods apply to `SparsityPattern` (see their respective documentation for +more details): + - [`add_sparsity_entries!`](@ref): convenience method for calling + [`add_cell_entries!`](@ref), [`add_interface_entries!`](@ref), and + [`add_constraint_entries!`](@ref). + - [`add_cell_entries!`](@ref): add entries corresponding to DoF couplings within the cells. + - [`add_interface_entries!`](@ref): add entries corresponding to DoF couplings on the + interface between cells. + - [`add_constraint_entries!`](@ref): add entries resulting from constraints. + - [`allocate_matrix`](@ref allocate_matrix(::SparsityPattern)): instantiate a matrix from + the pattern. The default matrix type is `SparseMatrixCSC{Float64, Int}`. +""" +function SparsityPattern(nrows::Int, ncols::Int; nnz_per_row::Int = 8) + mempool = PoolAllocator.MemoryPool{Int}() + rows = Vector{PoolAllocator.PoolVector{Int}}(undef, nrows) + for i in 1:nrows + rows[i] = PoolAllocator.resize(PoolAllocator.malloc(mempool, nnz_per_row), 0) + end + sp = SparsityPattern(nrows, ncols, mempool, rows) + return sp +end + +function Base.show(io::IO, ::MIME"text/plain", sp::SparsityPattern) + iob = IOBuffer() + println(iob, "$(getnrows(sp))×$(getncols(sp)) $(sprint(show, typeof(sp))):") + # Collect min/max/avg entries per row + min_entries = typemax(Int) + max_entries = typemin(Int) + stored_entries = 0 + for r in eachrow(sp) + l = length(r) + stored_entries += l + min_entries = min(min_entries, l) + max_entries = max(max_entries, l) + end + # Print sparsity + sparsity_pct = round( + (getnrows(sp) * getncols(sp) - stored_entries) / (getnrows(sp) * getncols(sp)) * 100 * 1000 + ) / 1000 + println(iob, " - Sparsity: $(sparsity_pct)% ($(stored_entries) stored entries)") + # Print row stats + avg_entries = round(stored_entries / getnrows(sp) * 10) / 10 + println(iob, " - Entries per row (min, max, avg): $(min_entries), $(max_entries), $(avg_entries)") + # Compute memory estimate + @assert getnrows(sp) * sizeof(eltype(sp.rows)) == sizeof(sp.rows) + bytes_used = sizeof(sp.rows) + stored_entries * sizeof(Int) + bytes_allocated = sizeof(sp.rows) + PoolAllocator.mempool_stats(sp.mempool)[2] + print(iob, " - Memory estimate: $(Base.format_bytes(bytes_used)) used, $(Base.format_bytes(bytes_allocated)) allocated") + write(io, seekstart(iob)) + return +end + +getnrows(sp::SparsityPattern) = sp.nrows +getncols(sp::SparsityPattern) = sp.ncols + +@inline function add_entry!(sp::SparsityPattern, row::Int, col::Int) + @boundscheck (1 <= row <= getnrows(sp) && 1 <= col <= getncols(sp)) || throw(BoundsError(sp, (row, col))) + r = @inbounds sp.rows[row] + r = insert_sorted(r, col) + @inbounds sp.rows[row] = r + return +end + +@inline function insert_sorted(x::PoolAllocator.PoolVector{Int}, item::Int) + k = searchsortedfirst(x, item) + if k == length(x) + 1 || @inbounds(x[k]) != item + x = PoolAllocator.insert(x, k, item) + end + return x +end + +eachrow(sp::SparsityPattern) = sp.rows +eachrow(sp::SparsityPattern, row::Int) = sp.rows[row] + + +################################################ +## Adding entries to AbstractSparsityPatterns ## +################################################ + """ - create_sparsity_pattern(dh::DofHandler; coupling, [topology::Union{Nothing, AbstractTopology}], [cross_coupling]) + init_sparsity_pattern(dh::DofHandler; nnz_per_row::Int) -Create the sparsity pattern corresponding to the degree of freedom -numbering in the [`DofHandler`](@ref). Return a `SparseMatrixCSC` -with stored values in the correct places. +Initialize an empty [`SparsityPattern`](@ref) with `ndofs(dh)` rows and `ndofs(dh)` columns. -The keyword arguments `coupling` and `cross_coupling` can be used to specify how fields (or components) in the dof -handler couple to each other. `coupling` and `cross_coupling` should be square matrices of booleans with -number of rows/columns equal to the total number of fields, or total number of components, -in the DofHandler with `true` if fields are coupled and `false` if -not. By default full coupling is assumed inside the element with no coupling between elements. +# Keyword arguments + - `nnz_per_row`: memory optimization hint for the number of non-zero entries per row that + will be added to the pattern. +""" +function init_sparsity_pattern( + dh::DofHandler; + # TODO: What is a good estimate for nnz_per_row? + nnz_per_row::Int = 2 * ndofs_per_cell(dh.subdofhandlers[1]), # FIXME + ) + sp = SparsityPattern(ndofs(dh), ndofs(dh); nnz_per_row = nnz_per_row) + return sp +end -If `topology` and `cross_coupling` are passed, dof of fields with discontinuous interpolations are coupled between elements according to `cross_coupling`. +""" + add_sparsity_entries!( + sp::AbstractSparsityPattern, + dh::DofHandler, + ch::Union{ConstraintHandler, Nothing} = nothing; + topology = nothing, + keep_constrained::Bool = true, + coupling = nothing, + interface_coupling = nothing, + ) + +Convenience method for doing the common task of calling [`add_cell_entries!`](@ref), +[`add_interface_entries!`](@ref), and [`add_constraint_entries!`](@ref), depending on what +arguments are passed: + - `add_cell_entries!` is always called + - `add_interface_entries!` is called if `topology` is provided (i.e. not `nothing`) + - `add_constraint_entries!` is called if the ConstraintHandler is provided + +For more details about arguments and keyword arguments, see the respective functions. +""" +function add_sparsity_entries!( + sp::AbstractSparsityPattern, dh::DofHandler, + ch::Union{ConstraintHandler, Nothing} = nothing; + keep_constrained::Bool = true, + coupling::Union{AbstractMatrix{Bool}, Nothing} = nothing, + interface_coupling::Union{AbstractMatrix{Bool}, Nothing} = nothing, + topology = nothing, + ) + # Argument checking + isclosed(dh) || error("the DofHandler must be closed") + if getnrows(sp) < ndofs(dh) || getncols(sp) < ndofs(dh) + error("number of rows ($(getnrows(sp))) or columns ($(getncols(sp))) in the sparsity pattern is smaller than number of dofs ($(ndofs(dh)))") + end + # Add all entries + add_diagonal_entries!(sp) + add_cell_entries!(sp, dh, ch; keep_constrained, coupling) + if topology !== nothing + add_interface_entries!(sp, dh, ch; topology, keep_constrained, interface_coupling) + end + if ch !== nothing + add_constraint_entries!(sp, ch; keep_constrained) + end + return sp +end -See the [Sparsity Pattern](@ref) section of the manual. """ -function create_sparsity_pattern(dh::AbstractDofHandler; coupling=nothing, - topology::Union{Nothing, AbstractTopology} = nothing, cross_coupling = nothing) - return _create_sparsity_pattern(dh, nothing, false, true, coupling, topology, cross_coupling) + add_cell_entries!( + sp::AbstractSparsityPattern, + dh::DofHandler, + ch::Union{ConstraintHandler, Nothing} = nothing; + keep_constrained::Bool = true, + coupling::Union{AbstractMatrix{Bool}, Nothing}, = nothing + ) + +Add entries to the sparsity pattern `sp` corresponding to DoF couplings within the cells as +described by the DofHandler `dh`. + +# Keyword arguments + - `keep_constrained`: whether or not entries for constrained DoFs should be kept + (`keep_constrained = true`) or eliminated (`keep_constrained = false`) from the sparsity + pattern. `keep_constrained = false` requires passing the ConstraintHandler `ch`. + - `coupling`: the coupling between fields/components within each cell. By default + (`coupling = nothing`) it is assumed that all DoFs in each cell couple with each other. +""" +function add_cell_entries!( + sp::AbstractSparsityPattern, + dh::DofHandler, ch::Union{ConstraintHandler, Nothing} = nothing; + keep_constrained::Bool = true, coupling::Union{AbstractMatrix{Bool}, Nothing} = nothing, + ) + # Expand coupling from nfields × nfields to ndofs_per_cell × ndofs_per_cell + # TODO: Perhaps this can be done in the loop over SubDofHandlers instead. + if coupling !== nothing + coupling = _coupling_to_local_dof_coupling(dh, coupling) + end + if !keep_constrained + ch === nothing && error("must pass ConstraintHandler when `keep_constrained = true`") + isclosed(ch) || error("the ConstraintHandler must be closed") + ch.dh === dh || error("the DofHandler and the ConstraintHandler's DofHandler must be the same") + end + return _add_cell_entries!(sp, dh, ch, keep_constrained, coupling) end """ - create_symmetric_sparsity_pattern(dh::DofHandler; coupling, topology::Union{Nothing, AbstractTopology}, cross_coupling) + add_interface_entries!( + sp::SparsityPattern, dh::DofHandler, ch::Union{ConstraintHandler, Nothing}; + topology::ExclusiveTopology, keep_constrained::Bool = true, + interface_coupling::AbstractMatrix{Bool}, + ) + +Add entries to the sparsity pattern `sp` corresponding to DoF couplings on the interface +between cells as described by the DofHandler `dh`. + +# Keyword arguments + - `topology`: the topology corresponding to the grid. + - `keep_constrained`: whether or not entries for constrained DoFs should be kept + (`keep_constrained = true`) or eliminated (`keep_constrained = false`) from the sparsity + pattern. `keep_constrained = false` requires passing the ConstraintHandler `ch`. + - `interface_coupling`: the coupling between fields/components across the interface. +""" +function add_interface_entries!( + sp::SparsityPattern, dh::DofHandler, ch::Union{ConstraintHandler, Nothing} = nothing; + topology::ExclusiveTopology, keep_constrained::Bool = true, + interface_coupling::AbstractMatrix{Bool}, + ) + if !keep_constrained + ch === nothing && error("must pass ConstraintHandler when `keep_constrained = true`") + isclosed(ch) || error("the ConstraintHandler must be closed") + ch.dh === dh || error("the DofHandler and the ConstraintHandler's DofHandler must be the same") + end + return _add_interface_entries!(sp, dh, ch, topology, keep_constrained, interface_coupling) +end -Create the symmetric sparsity pattern corresponding to the degree of freedom -numbering in the [`DofHandler`](@ref) by only considering the upper -triangle of the matrix. Return a `Symmetric{SparseMatrixCSC}`. +""" + add_constraint_entries!( + sp::AbstractSparsityPattern, ch::ConstraintHandler; + keep_constrained::Bool = true, + ) + +Add all entries resulting from constraints in the ConstraintHandler `ch` to the sparsity +pattern. Note that, since this operation depends on existing entries in the pattern, this +function must be called as the *last* step when creating the sparsity pattern. + +# Keyword arguments + - `keep_constrained`: whether or not entries for constrained DoFs should be kept + (`keep_constrained = true`) or eliminated (`keep_constrained = false`) from the sparsity + pattern. +""" +function add_constraint_entries!( + sp::AbstractSparsityPattern, ch::ConstraintHandler; + keep_constrained::Bool = true, +) + return _add_constraint_entries!(sp, ch.dofcoefficients, ch.dofmapping, keep_constrained) +end -See the [Sparsity Pattern](@ref) section of the manual. +function add_diagonal_entries!(sp::AbstractSparsityPattern) + for d in 1:min(getnrows(sp), getncols(sp)) + add_entry!(sp, d, d) + end + return sp +end + + +############################################################ +# Sparse matrix instantiation from AbstractSparsityPattern # +############################################################ + +""" + allocate_matrix(::Type{SparseMatrixCSC{Tv, Ti}}, sp::SparsityPattern) + +Allocate a sparse matrix of type `SparseMatrixCSC{Tv, Ti}` from the sparsity pattern `sp`. """ -function create_symmetric_sparsity_pattern(dh::AbstractDofHandler; coupling=nothing, - topology::Union{Nothing, AbstractTopology} = nothing, cross_coupling = nothing) - return Symmetric(_create_sparsity_pattern(dh, nothing, true, true, coupling, topology, cross_coupling), :U) +function allocate_matrix(::Type{S}, sp::AbstractSparsityPattern) where {Tv, Ti, S <: SparseMatrixCSC{Tv, Ti}} + return _allocate_matrix(S, sp, #=sym=# false) end """ - create_symmetric_sparsity_pattern(dh::AbstractDofHandler, ch::ConstraintHandler; coupling, topology::Union{Nothing, AbstractTopology}, cross_coupling) + allocate_matrix(::Type{Symmetric{Tv, SparseMatrixCSC{Tv, Ti}}}, sp::SparsityPattern) -Create a symmetric sparsity pattern accounting for affine constraints in `ch`. See -the Affine Constraints section of the manual for further details. +Instantiate a sparse matrix of type `Symmetric{Tv, SparseMatrixCSC{Tv, Ti}}`, i.e. a +`LinearAlgebra.Symmetric`-wrapped `SparseMatrixCSC`, from the sparsity pattern `sp`. The +resulting matrix will only store entries above, and including, the diagonal. """ -function create_symmetric_sparsity_pattern(dh::AbstractDofHandler, ch::ConstraintHandler; - keep_constrained::Bool=true, coupling=nothing, topology::Union{Nothing, AbstractTopology} = nothing, - cross_coupling = nothing) - return Symmetric(_create_sparsity_pattern(dh, ch, true, keep_constrained, coupling, topology, cross_coupling), :U) +function allocate_matrix(::Type{Symmetric{Tv, S}}, sp::AbstractSparsityPattern) where {Tv, Ti, S <: SparseMatrixCSC{Tv, Ti}} + return Symmetric(_allocate_matrix(S, sp, #=sym=# true)) end """ - create_sparsity_pattern(dh::AbstractDofHandler, ch::ConstraintHandler; coupling, topology::Union{Nothing, AbstractTopology} = nothing) + allocate_matrix(sp::SparsityPattern) + +Allocate a sparse matrix of type `SparseMatrixCSC{Float64, Int}` from the sparsity pattern +`sp`. + +This method is a shorthand for the equivalent +[`allocate_matrix(SparseMatrixCSC{Float64, Int}, sp)`] +(@ref allocate_matrix(::Type{S}, sp::Ferrite.AbstractSparsityPattern) where {Tv, Ti, S <: SparseMatrixCSC{Tv, Ti}}). +""" +allocate_matrix(sp::SparsityPattern) = allocate_matrix(SparseMatrixCSC{Float64, Int}, sp) -Create a sparsity pattern accounting for affine constraints in `ch`. See -the Affine Constraints section of the manual for further details. """ -function create_sparsity_pattern(dh::AbstractDofHandler, ch::ConstraintHandler; - keep_constrained::Bool=true, coupling=nothing, topology::Union{Nothing, AbstractTopology} = nothing, - cross_coupling = nothing) - return _create_sparsity_pattern(dh, ch, false, keep_constrained, coupling, topology, cross_coupling) + allocate_matrix(MatrixType, dh::DofHandler, args...; kwargs...) + +Allocate a matrix of type `MatrixType` from the DofHandler `dh`. + +This is a convenience method and is equivalent to: + +```julia +sp = init_sparsity_pattern(dh) +add_sparsity_entries!(sp, dh, args...; kwargs...) +allocate_matrix(MatrixType, sp) +```` + +Refer to [`allocate_matrix`](@ref allocate_matrix(::Type{<:Any}, ::SparsityPattern)) for +supported matrix types, and to [`init_sparsity_pattern`](@ref) for details about supported +arguments `args` and keyword arguments `kwargs`. + +!!! note + If more than one sparse matrix is needed (e.g. a stiffness and a mass matrix) it is more + efficient to explicitly create the sparsity pattern instead of using this method, i.e. + use + ```julia + sp = init_sparsity_pattern(dh) + add_sparsity_entries!(sp, dh) + K = allocate_matrix(sp) + M = allocate_matrix(sp) + ``` + instead of + ```julia + K = allocate_matrix(dh) + M = allocate_matrix(dh) + ``` + Note that for some matrix types it is possible to `copy` the instantiated matrix (`M = + copy(K)`) instead. +""" +function allocate_matrix(::Type{MatrixType}, dh::DofHandler, args...; kwargs...) where {MatrixType} + sp = init_sparsity_pattern(dh) + add_sparsity_entries!(sp, dh, args...; kwargs...) + return allocate_matrix(MatrixType, sp) end +""" + allocate_matrix(dh::DofHandler, args...; kwargs...) + +Allocate a matrix of type `SparseMatrixCSC{Float64, Int}` from the DofHandler `dh`. + +This method is a shorthand for the equivalent [`allocate_matrix(SparseMatrixCSC{Float64, Int}, +dh, args...; kwargs...)`](@ref allocate_matrix(::Type{MatrixType}, ::DofHandler, args...; +kwargs...) where {MatrixType}) -- refer to that method for details. +""" +function allocate_matrix(dh::DofHandler, args...; kwargs...) + return allocate_matrix(SparseMatrixCSC{Float64, Int}, dh, args...; kwargs...) +end + + +############################## +# Sparsity pattern internals # +############################## + # Compute a coupling matrix of size (ndofs_per_cell × ndofs_per_cell) based on the input # coupling which can be of size i) (nfields × nfields) specifying coupling between fields, # ii) (ncomponents × ncomponents) specifying coupling between components, or iii) # (ndofs_per_cell × ndofs_per_cell) specifying coupling between all local dofs, i.e. a # "template" local matrix. -function _coupling_to_local_dof_coupling(dh::DofHandler, coupling::AbstractMatrix{Bool}, sym::Bool) +function _coupling_to_local_dof_coupling(dh::DofHandler, coupling::AbstractMatrix{Bool}) sz = size(coupling, 1) sz == size(coupling, 2) || error("coupling not square") - sym && (issymmetric(coupling) || error("coupling not symmetric")) # Return one matrix per (potential) sub-domain outs = Matrix{Bool}[] - field_dims = map(fieldname -> getfielddim(dh, fieldname), dh.field_names) + field_dims = map(fieldname -> n_components(dh, fieldname), dh.field_names) for sdh in dh.subdofhandlers out = zeros(Bool, ndofs_per_cell(sdh), ndofs_per_cell(sdh)) @@ -102,40 +479,137 @@ function _coupling_to_local_dof_coupling(dh::DofHandler, coupling::AbstractMatri return outs end -""" - _add_cross_coupling(coupling_sdh, dof_i, dof_j, cell_field_dofs, neighbor_field_dofs, i, j, sym, keep_constrained, ch, cnt, I, J) +function _add_cell_entries!( + sp::AbstractSparsityPattern, dh::DofHandler, ch::Union{ConstraintHandler, Nothing}, + keep_constrained::Bool, coupling::Union{Vector{<:AbstractMatrix{Bool}}, Nothing}, + ) + # Add all connections between dofs for every cell while filtering based + # on a) constraints, and b) field/dof coupling. + cc = CellCache(dh) + for (sdhi, sdh) in pairs(dh.subdofhandlers) + set = BitSet(sdh.cellset) + coupling === nothing || (coupling_sdh = coupling[sdhi]) + for cell_id in set + reinit!(cc, cell_id) + for (i, row) in pairs(cc.dofs) + # a) check constraint for row + !keep_constrained && haskey(ch.dofmapping, row) && continue + # TODO: Extracting the row here and reinserting after the j-loop + # should give some nice speedup + for (j, col) in pairs(cc.dofs) + # b) check coupling between (local) dofs i and j + coupling === nothing || coupling_sdh[i, j] || continue + # a) check constraint for col + !keep_constrained && haskey(ch.dofmapping, col) && continue + # Insert col as a non zero index for this row + add_entry!(sp, row, col) + end + end + end + end + return sp +end -Helper function used to mutate `I` and `J` to add cross-element coupling. -""" -function _add_cross_coupling(coupling_sdh::Matrix{Bool}, dof_i::Int, dof_j::Int, - cell_field_dofs::Union{Vector{Int}, SubArray}, neighbor_field_dofs::Union{Vector{Int}, SubArray}, - i::Int, j::Int, sym::Bool, keep_constrained::Bool, ch::Union{ConstraintHandler, Nothing}, cnt::Int, I::Vector{Int}, J::Vector{Int}) +function _add_constraint_entries!( + sp::AbstractSparsityPattern, dofcoefficients::Vector{Union{DofCoefficients{T}, Nothing}}, + dofmapping::Dict{Int,Int}, keep_constrained::Bool, + ) where {T} - coupling_sdh[dof_i, dof_j] || return cnt + # Return early if there are no non-trivial affine constraints + any(i -> !(i === nothing || isempty(i)), dofcoefficients) || return + + # New entries tracked separately and inserted after since it is not possible to modify + # the datastructure while looping over it. + mempool = PoolAllocator.MemoryPool{Int}() + sp′ = Dict{Int, PoolAllocator.PoolVector{Int}}() + + for (row, colidxs) in zip(1:getnrows(sp), eachrow(sp)) # pairs(eachrow(sp)) + row_coeffs = coefficients_for_dof(dofmapping, dofcoefficients, row) + if row_coeffs === nothing + # This row is _not_ constrained, check columns of this row... + !keep_constrained && haskey(dofmapping, row) && continue + for col in colidxs + col_coeffs = coefficients_for_dof(dofmapping, dofcoefficients, col) + if col_coeffs === nothing + # ... this column is _not_ constrained, done. + continue + else + # ... this column _is_ constrained, distribute to columns. + for (col′, _) in col_coeffs + r = get(sp′, row) do + PoolAllocator.resize(PoolAllocator.malloc(mempool, 8), 0) + end + r = insert_sorted(r, col′) + sp′[row] = r + end + end + end + else + # This row _is_ constrained, check columns of this row... + for col in colidxs + col_coeffs = coefficients_for_dof(dofmapping, dofcoefficients, col) + if col_coeffs === nothing + # ... this column is _not_ constrained, distribute to rows. + !keep_constrained && haskey(dofmapping, col) && continue + for (row′, _) in row_coeffs + r = get(sp′, row′) do + PoolAllocator.resize(PoolAllocator.malloc(mempool, 8), 0) + end + r = insert_sorted(r, col) + sp′[row′] = r + end + else + # ... this column _is_ constrained, double-distribute to columns/rows. + for (row′, _) in row_coeffs + !keep_constrained && haskey(dofmapping, row′) && continue + for (col′, _) in col_coeffs + !keep_constrained && haskey(dofmapping, col′) && continue + r = get(sp′, row′) do + PoolAllocator.resize(PoolAllocator.malloc(mempool, 8), 0) + end + r = insert_sorted(r, col′) + sp′[row′] = r + end + end + end + end + end + end + + # Insert new entries into the sparsity pattern + for (row, colidxs) in sp′ + # TODO: Extract row here and just insert_sorted + for col in colidxs + add_entry!(sp, row, col) + end + end + + return sp +end + +function _add_interface_entry(sp::SparsityPattern, + cell_field_dofs::Union{Vector{Int}, SubArray}, neighbor_field_dofs::Union{Vector{Int}, SubArray}, + i::Int, j::Int, keep_constrained::Bool, ch::Union{ConstraintHandler, Nothing}) dofi = cell_field_dofs[i] dofj = neighbor_field_dofs[j] - sym && (dofj > dofi && return cnt) - !keep_constrained && (haskey(ch.dofmapping, dofi) || haskey(ch.dofmapping, dofj)) && return cnt - cnt += 1 - _add_or_grow(cnt, I, J, dofi, dofj) - return cnt + # sym && (dofj > dofi && return cnt) + !keep_constrained && (haskey(ch.dofmapping, dofi) || haskey(ch.dofmapping, dofj)) && return + add_entry!(sp, dofi, dofj) + return end -""" - cross_element_coupling!(dh::DofHandler, topology::ExclusiveTopology, sym::Bool, keep_constrained::Bool, couplings::Union{AbstractVector{<:AbstractMatrix{Bool}},Nothing}, cnt::Int, I::Vector{Int}, J::Vector{Int}) - -Mutates `I, J` to account for cross-element coupling by calling [`_add_cross_coupling`](@ref). -Returns the updated value of `cnt`. - -Used internally for sparsity patterns with cross-element coupling. -""" -function cross_element_coupling!(dh::DofHandler, ch::Union{ConstraintHandler, Nothing}, topology::ExclusiveTopology, sym::Bool, keep_constrained::Bool, couplings::AbstractVector{<:AbstractMatrix{Bool}}, cnt::Int, I::Vector{Int}, J::Vector{Int}) - fca = FaceCache(CellCache(dh, UpdateFlags(false, false, true)), Int[], ScalarWrapper(0)) - fcb = FaceCache(CellCache(dh, UpdateFlags(false, false, true)), Int[], ScalarWrapper(0)) - ic = InterfaceCache(fca, fcb, Int[]) - for ic in InterfaceIterator(ic, dh.grid, topology) +function _add_interface_entries!( + sp::SparsityPattern, dh::DofHandler, ch::Union{ConstraintHandler, Nothing}, + topology::ExclusiveTopology, keep_constrained::Bool, + interface_coupling::AbstractMatrix{Bool}, + ) + couplings = _coupling_to_local_dof_coupling(dh, interface_coupling) + for ic in InterfaceIterator(dh, topology) + # TODO: This looks like it can be optimized for the common case where + # the cells are in the same subdofhandler sdhs_idx = dh.cell_to_subdofhandler[cellid.([ic.a, ic.b])] sdhs = dh.subdofhandlers[sdhs_idx] + to_check = Dict{Int, Vector{Int}}() for (i, sdh) in pairs(sdhs) sdh_idx = sdhs_idx[i] coupling_sdh = couplings[sdh_idx] @@ -144,161 +618,64 @@ function cross_element_coupling!(dh::DofHandler, ch::Union{ConstraintHandler, No cell_dofs = celldofs(sdh_idx == 1 ? ic.a : ic.b) cell_field_dofs = @view cell_dofs[dofrange1] for neighbor_field in sdh.field_names - sdh2 = sdhs[i==1 ? 2 : 1] + sdh2 = sdhs[i == 1 ? 2 : 1] neighbor_field ∈ sdh2.field_names || continue dofrange2 = dof_range(sdh2, neighbor_field) neighbor_dofs = celldofs(sdh_idx == 2 ? ic.a : ic.b) neighbor_field_dofs = @view neighbor_dofs[dofrange2] - # Typical coupling procedure - for (j, dof_j) in pairs(dofrange2), (i, dof_i) in pairs(dofrange1) - # This line to avoid coupling the shared dof in continuous interpolations as cross-element. They're coupled in the local coupling matrix. - (cell_field_dofs[i] ∈ neighbor_dofs || neighbor_field_dofs[j] ∈ cell_dofs) && continue - cnt = _add_cross_coupling(coupling_sdh, dof_i, dof_j, cell_field_dofs, neighbor_field_dofs, i, j, sym, keep_constrained, ch, cnt, I, J) - cnt = _add_cross_coupling(coupling_sdh, dof_j, dof_i, neighbor_field_dofs, cell_field_dofs, j, i, sym, keep_constrained, ch, cnt, I, J) + + empty!(to_check) + for (j, dof_j) in enumerate(dofrange2) + for (i, dof_i) in enumerate(dofrange1) + coupling_sdh[dof_i, dof_j] || continue + push!(get!(Vector{Int}, to_check, j), i) + end + end + + for (j, is) in to_check + # Avoid coupling the shared dof in continuous interpolations as cross-element. They're coupled in the local coupling matrix. + neighbor_field_dofs[j] ∈ cell_dofs && continue + for i in is + cell_field_dofs[i] ∈ neighbor_dofs && continue + _add_interface_entry(sp, cell_field_dofs, neighbor_field_dofs, i, j, keep_constrained, ch) + _add_interface_entry(sp, neighbor_field_dofs, cell_field_dofs, j, i, keep_constrained, ch) + end end end end end end - return cnt + return sp end -function _create_sparsity_pattern(dh::AbstractDofHandler, ch#=::Union{ConstraintHandler, Nothing}=#, sym::Bool, keep_constrained::Bool, coupling::Union{AbstractMatrix{Bool},Nothing}, - topology::Union{Nothing, AbstractTopology}, cross_coupling::Union{AbstractMatrix{Bool},Nothing}) - @assert isclosed(dh) - if !keep_constrained - @assert ch !== nothing && isclosed(ch) - end - - couplings = isnothing(coupling) ? nothing : _coupling_to_local_dof_coupling(dh, coupling, sym) - cross_couplings = isnothing(cross_coupling) ? nothing : _coupling_to_local_dof_coupling(dh, cross_coupling, sym) - - # Allocate buffers. Compute an upper bound for the buffer length and allocate it all up - # front since they will become large and expensive to re-allocate. The bound is exact - # when keeping constrained dofs (default) and if not it only over-estimates with number - # of entries eliminated by constraints. - max_buffer_length = ndofs(dh) # diagonal elements - for (sdh_idx, sdh) in pairs(dh.subdofhandlers) - set = sdh.cellset - n = ndofs_per_cell(sdh) - entries_per_cell = if coupling === nothing - sym ? div(n * (n + 1), 2) : n^2 - else - coupling_sdh = couplings[sdh_idx] - count(coupling_sdh[i, j] for i in 1:n for j in (sym ? i : 1):n) - end - max_buffer_length += entries_per_cell * length(set) - end - I = Vector{Int}(undef, max_buffer_length) - J = Vector{Int}(undef, max_buffer_length) - global_dofs = Int[] - cnt = 0 - - for (sdh_idx, sdh) in pairs(dh.subdofhandlers) - coupling === nothing || (coupling_sdh = couplings[sdh_idx]) - # TODO: Remove BitSet construction when SubDofHandler ensures sorted collections - set = BitSet(sdh.cellset) - n = ndofs_per_cell(sdh) - resize!(global_dofs, n) - @inbounds for element_id in set - celldofs!(global_dofs, dh, element_id) - for j in eachindex(global_dofs), i in eachindex(global_dofs) - coupling === nothing || coupling_sdh[i, j] || continue - dofi = global_dofs[i] - dofj = global_dofs[j] - sym && (dofi > dofj && continue) - !keep_constrained && (haskey(ch.dofmapping, dofi) || haskey(ch.dofmapping, dofj)) && continue - cnt += 1 - I[cnt] = dofi - J[cnt] = dofj - end +# Internal matrix instantiation for SparseMatrixCSC and Symmetric{SparseMatrixCSC} +function _allocate_matrix(::Type{SparseMatrixCSC{Tv, Ti}}, sp::AbstractSparsityPattern, sym::Bool) where {Tv, Ti} + # 1. Setup colptr + colptr = zeros(Ti, getncols(sp) + 1) + colptr[1] = 1 + for (row, colidxs) in enumerate(eachrow(sp)) + for col in colidxs + sym && row > col && continue + colptr[col+1] += 1 end end - if !isnothing(topology) && !isnothing(cross_coupling) && any(cross_coupling) - cnt = cross_element_coupling!(dh, ch, topology, sym, keep_constrained, cross_couplings, cnt, I, J) - end - # Always add diagonal entries - resize!(I, cnt + ndofs(dh)) - resize!(J, cnt + ndofs(dh)) - @inbounds for d in 1:ndofs(dh) - cnt += 1 - I[cnt] = d - J[cnt] = d - end - @assert length(I) == length(J) == cnt - - K = spzeros!!(Float64, I, J, ndofs(dh), ndofs(dh)) - - # If ConstraintHandler is given, create the condensation pattern due to affine constraints - if ch !== nothing - @assert isclosed(ch) - fill!(K.nzval, 1) - _condense_sparsity_pattern!(K, ch.dofcoefficients, ch.dofmapping, keep_constrained) - fillzero!(K) - end - - return K -end - -# Similar to Ferrite._condense!(K, ch), but only add the non-zero entries to K (that arises from the condensation process) -function _condense_sparsity_pattern!(K::SparseMatrixCSC{T}, dofcoefficients::Vector{Union{Nothing,DofCoefficients{T}}}, dofmapping::Dict{Int,Int}, keep_constrained::Bool) where T - ndofs = size(K, 1) - - # Return early if there are no non-trivial affine constraints - any(i -> !(i === nothing || isempty(i)), dofcoefficients) || return - - # Adding new entries to K is extremely slow, so create a new sparsity triplet for the - # condensed sparsity pattern - N = 2 * length(dofcoefficients) # TODO: Better size estimate for additional condensed sparsity pattern. - I = Int[]; resize!(I, N) - J = Int[]; resize!(J, N) - - cnt = 0 - for col in 1:ndofs - col_coeffs = coefficients_for_dof(dofmapping, dofcoefficients, col) - if col_coeffs === nothing - !keep_constrained && haskey(dofmapping, col) && continue - for ri in nzrange(K, col) - row = K.rowval[ri] - row_coeffs = coefficients_for_dof(dofmapping, dofcoefficients, row) - row_coeffs === nothing && continue - for (d, _) in row_coeffs - cnt += 1 - _add_or_grow(cnt, I, J, d, col) - end - end - else - for ri in nzrange(K, col) - row = K.rowval[ri] - row_coeffs = coefficients_for_dof(dofmapping, dofcoefficients, row) - if row_coeffs === nothing - !keep_constrained && haskey(dofmapping, row) && continue - for (d, _) in col_coeffs - cnt += 1 - _add_or_grow(cnt, I, J, row, d) - end - else - for (d1, _) in col_coeffs - !keep_constrained && haskey(dofmapping, d1) && continue - for (d2, _) in row_coeffs - !keep_constrained && haskey(dofmapping, d2) && continue - cnt += 1 - _add_or_grow(cnt, I, J, d1, d2) - end - end - end - end + cumsum!(colptr, colptr) + nnz = colptr[end] - 1 + # 2. Allocate rowval and nzval now that nnz is known + rowval = Vector{Ti}(undef, nnz) + nzval = zeros(Tv, nnz) + # 3. Populate rowval. Since SparsityPattern is row-based we need to allocate an extra + # work buffer here to keep track of the next index into rowval + nextinds = copy(colptr) + for (row, colidxs) in zip(1:getnrows(sp), eachrow(sp)) # pairs(eachrow(sp)) + for col in colidxs + sym && row > col && continue + k = nextinds[col] + rowval[k] = row + nextinds[col] = k + 1 end end - - resize!(I, cnt) - resize!(J, cnt) - - # Fill the sparse matrix with a non-zero value so that :+ operation does not remove entries with value zero. - K2 = spzeros!!(Float64, I, J, ndofs, ndofs) - fill!(K2.nzval, 1) - - K .+= K2 - - return nothing + @assert all(i -> nextinds[i] == colptr[i + 1], 1:getncols(sp)) + S = SparseMatrixCSC(getnrows(sp), getncols(sp), colptr, rowval, nzval) + return S end diff --git a/src/Export/VTK.jl b/src/Export/VTK.jl index 1b537f9da5..7bc5f0d2bf 100644 --- a/src/Export/VTK.jl +++ b/src/Export/VTK.jl @@ -1,3 +1,66 @@ + +""" + VTKGridFile(filename::AbstractString, grid::AbstractGrid; kwargs...) + VTKGridFile(filename::AbstractString, dh::DofHandler; kwargs...) + +Create a `VTKGridFile` that contains an unstructured VTK grid. +The keyword arguments are forwarded to `WriteVTK.vtk_grid`, see +[Data Formatting Options](https://juliavtk.github.io/WriteVTK.jl/stable/grids/syntax/#Data-formatting-options) + +This file handler can be used to to write data with + +* [`write_solution`](@ref) +* [`write_cell_data`](@ref) +* [`write_projection`](@ref) +* [`write_node_data`](@ref). +* [`Ferrite.write_cellset`](@ref) +* [`Ferrite.write_nodeset`](@ref) +* [`Ferrite.write_constraints`](@ref) + +It is necessary to call `close(::VTKGridFile)` to save the data after writing +to the file handler. Using the supported `do`-block does this automatically: +```julia +VTKGridFile(filename, grid) do vtk + write_solution(vtk, dh, u) + write_cell_data(vtk, celldata) +end +``` +""" +struct VTKGridFile{VTK<:WriteVTK.DatasetFile} + vtk::VTK +end +function VTKGridFile(filename::String, dh::DofHandler; kwargs...) + return VTKGridFile(filename, get_grid(dh); kwargs...) +end +function VTKGridFile(filename::String, grid::AbstractGrid; kwargs...) + vtk = create_vtk_grid(filename, grid; kwargs...) + return VTKGridFile(vtk) +end +# Makes it possible to use the `do`-block syntax +function VTKGridFile(f::Function, args...; kwargs...) + vtk = VTKGridFile(args...; kwargs...) + try + f(vtk) + finally + close(vtk) + end +end + +Base.close(vtk::VTKGridFile) = WriteVTK.vtk_save(vtk.vtk) + +function Base.show(io::IO, ::MIME"text/plain", vtk::VTKGridFile) + open_str = isopen(vtk.vtk) ? "open" : "closed" + filename = vtk.vtk.path + print(io, "VTKGridFile for the $open_str file \"$(filename)\".") +end + +function WriteVTK.collection_add_timestep(pvd::WriteVTK.CollectionFile, datfile::VTKGridFile, time::Real) + WriteVTK.collection_add_timestep(pvd, datfile.vtk, time) +end +function Base.setindex!(pvd::WriteVTK.CollectionFile, datfile::VTKGridFile, time::Real) + WriteVTK.collection_add_timestep(pvd, datfile.vtk, time) +end + cell_to_vtkcell(::Type{Line}) = VTKCellTypes.VTK_LINE cell_to_vtkcell(::Type{QuadraticLine}) = VTKCellTypes.VTK_QUADRATIC_EDGE @@ -47,56 +110,46 @@ nodes_to_vtkorder(cell::QuadraticHexahedron) = [ cell.nodes[27], # interior ] -""" - vtk_grid(filename::AbstractString, grid::Grid; kwargs...) - vtk_grid(filename::AbstractString, dh::DofHandler; kwargs...) - -Create a unstructured VTK grid from `grid` (alternatively from the `grid` stored in `dh`). -Return a `DatasetFile` that data can be appended to, see -[`vtk_point_data`](@ref) and [`vtk_cell_data`](@ref). -The keyword arguments are forwarded to `WriteVTK.vtk_grid`, see -[Data Formatting Options](https://juliavtk.github.io/WriteVTK.jl/stable/grids/syntax/#Data-formatting-options) -""" -function WriteVTK.vtk_grid(filename::AbstractString, grid::Grid{dim,C,T}; kwargs...) where {dim,C,T} - cls = MeshCell[] +function create_vtk_griddata(grid::Grid{dim,C,T}) where {dim,C,T} + cls = WriteVTK.MeshCell[] for cell in getcells(grid) - celltype = Ferrite.cell_to_vtkcell(typeof(cell)) - push!(cls, MeshCell(celltype, nodes_to_vtkorder(cell))) + celltype = cell_to_vtkcell(typeof(cell)) + push!(cls, WriteVTK.MeshCell(celltype, nodes_to_vtkorder(cell))) end coords = reshape(reinterpret(T, getnodes(grid)), (dim, getnnodes(grid))) - return vtk_grid(filename, coords, cls; kwargs...) + return coords, cls end -function WriteVTK.vtk_grid(filename::AbstractString, dh::AbstractDofHandler; kwargs...) - vtk_grid(filename, get_grid(dh); kwargs...) + +function create_vtk_grid(filename::AbstractString, grid::Grid{dim,C,T}; kwargs...) where {dim,C,T} + coords, cls = create_vtk_griddata(grid) + return WriteVTK.vtk_grid(filename, coords, cls; kwargs...) end function toparaview!(v, x::Vec{D}) where D v[1:D] .= x end -function toparaview!(v, x::SecondOrderTensor{D}) where D +function toparaview!(v, x::SecondOrderTensor) tovoigt!(v, x) end -""" - vtk_point_data(vtk, data::Vector{<:AbstractTensor}, name) - -Write the tensor field `data` to the vtk file. Two-dimensional tensors are padded with zeros. - -For second order tensors the following indexing ordering is used: -`[11, 22, 33, 23, 13, 12, 32, 31, 21]`. This is the default Voigt order in Tensors.jl. -""" -function WriteVTK.vtk_point_data( +function _vtk_write_node_data( vtk::WriteVTK.DatasetFile, - data::Vector{S}, + nodedata::Vector{S}, name::AbstractString ) where {O, D, T, M, S <: Union{Tensor{O, D, T, M}, SymmetricTensor{O, D, T, M}}} noutputs = S <: Vec{2} ? 3 : M # Pad 2D Vec to 3D - npoints = length(data) + npoints = length(nodedata) out = zeros(T, noutputs, npoints) for i in 1:npoints - toparaview!(@view(out[:, i]), data[i]) + toparaview!(@view(out[:, i]), nodedata[i]) end - return vtk_point_data(vtk, out, name; component_names=component_names(S)) + return WriteVTK.vtk_point_data(vtk, out, name; component_names=component_names(S)) +end +function _vtk_write_node_data(vtk::WriteVTK.DatasetFile, nodedata::Vector{<:Real}, name::AbstractString) + return WriteVTK.vtk_point_data(vtk, nodedata, name) +end +function _vtk_write_node_data(vtk::WriteVTK.DatasetFile, nodedata::Matrix{<:Real}, name::AbstractString; component_names=nothing) + return WriteVTK.vtk_point_data(vtk, nodedata, name; component_names=component_names) end function component_names(::Type{S}) where S @@ -113,46 +166,150 @@ function component_names(::Type{S}) where S return names end -function vtk_nodeset(vtk::WriteVTK.DatasetFile, grid::Grid{dim}, nodeset::String) where {dim} +""" + write_solution(vtk::VTKGridFile, dh::AbstractDofHandler, u::Vector, suffix="") + +Save the values at the nodes in the degree of freedom vector `u` to `vtk`. +Each field in `dh` will be saved separately, and `suffix` can be used to append +to the fieldname. + +`u` can also contain tensorial values, but each entry in `u` must correspond to a +degree of freedom in `dh`, see [`write_node_data`](@ref write_node_data) for details. +Use `write_node_data` directly when exporting values that are already +sorted by the nodes in the grid. +""" +function write_solution(vtk::VTKGridFile, dh::AbstractDofHandler, u::Vector, suffix="") + fieldnames = getfieldnames(dh) # all primary fields + for name in fieldnames + data = _evaluate_at_grid_nodes(dh, u, name, #=vtk=# Val(true)) + _vtk_write_node_data(vtk.vtk, data, string(name, suffix)) + end + return vtk +end + +""" + write_projection(vtk::VTKGridFile, proj::L2Projector, vals::Vector, name::AbstractString) + +Project `vals` to the grid nodes with `proj` and save to `vtk`. +""" +function write_projection(vtk::VTKGridFile, proj::L2Projector, vals, name) + data = _evaluate_at_grid_nodes(proj, vals, #=vtk=# Val(true))::Matrix + @assert size(data, 2) == getnnodes(get_grid(proj.dh)) + _vtk_write_node_data(vtk.vtk, data, name; component_names=component_names(eltype(vals))) + return vtk +end + +""" + write_cell_data(vtk::VTKGridFile, celldata::AbstractVector, name::String) + +Write the `celldata` that is ordered by the cells in the grid to the vtk file. +""" +function write_cell_data(vtk::VTKGridFile, celldata, name) + WriteVTK.vtk_cell_data(vtk.vtk, celldata, name) +end + +""" + write_node_data(vtk::VTKGridFile, nodedata::Vector{Real}, name) + write_node_data(vtk::VTKGridFile, nodedata::Vector{<:AbstractTensor}, name) + +Write the `nodedata` that is ordered by the nodes in the grid to `vtk`. + +When `nodedata` contains `Tensors.Vec`s, each component is exported. +Two-dimensional vectors are padded with zeros. + +When `nodedata` contains second order tensors, the index order, +`[11, 22, 33, 23, 13, 12, 32, 31, 21]`, follows the default Voigt order in Tensors.jl. +""" +function write_node_data(vtk::VTKGridFile, nodedata, name) + _vtk_write_node_data(vtk.vtk, nodedata, name) +end + + +""" + write_nodeset(vtk::VTKGridFile, grid::AbstractGrid, nodeset::String) + +Write nodal values of 1 for nodes in `nodeset`, and 0 otherwise +""" +function write_nodeset(vtk, grid::AbstractGrid, nodeset::String) z = zeros(getnnodes(grid)) z[collect(getnodeset(grid, nodeset))] .= 1.0 - vtk_point_data(vtk, z, nodeset) + write_node_data(vtk, z, nodeset) + return vtk end """ - vtk_cellset(vtk, grid::Grid) + write_cellset(vtk, grid::AbstractGrid) + write_cellset(vtk, grid::AbstractGrid, cellset::String) + write_cellset(vtk, grid::AbstractGrid, cellsets::Union{AbstractVector{String},AbstractSet{String}) -Export all cell sets in the grid. Each cell set is exported with -`vtk_cell_data` with value 1 if the cell is in the set, and 0 otherwise. +Write all cell sets in the grid with name according to their keys and +celldata 1 if the cell is in the set, and 0 otherwise. It is also possible to +only export a single `cellset`, or multiple `cellsets`. """ -function vtk_cellset(vtk::WriteVTK.DatasetFile, grid::AbstractGrid, cellsets=keys(grid.cellsets)) +function write_cellset(vtk, grid::AbstractGrid, cellsets=keys(getcellsets(grid))) z = zeros(getncells(grid)) for cellset in cellsets - z .= 0.0 + fill!(z, 0) z[collect(getcellset(grid, cellset))] .= 1.0 - vtk_cell_data(vtk, z, cellset) + write_cell_data(vtk, z, cellset) end return vtk end +write_cellset(vtk, grid::AbstractGrid, cellset::String) = write_cellset(vtk, grid, [cellset]) """ - vtk_cellset(vtk, grid::Grid, cellset::String) + write_constraints(vtk::VTKGridFile, ch::ConstraintHandler) -Export the cell set specified by `cellset` as cell data with value 1 if -the cell is in the set and 0 otherwise. +Saves the dirichlet boundary conditions to a vtkfile. +Values will have a 1 where bcs are active and 0 otherwise """ -vtk_cellset(vtk::WriteVTK.DatasetFile, grid::AbstractGrid, cellset::String) = - vtk_cellset(vtk, grid, [cellset]) +function write_constraints(vtk, ch::ConstraintHandler) + unique_fields = [] + for dbc in ch.dbcs + push!(unique_fields, dbc.field_name) + end + unique!(unique_fields) + for field in unique_fields + nd = n_components(ch.dh, field) + data = zeros(Float64, nd, getnnodes(get_grid(ch.dh))) + for dbc in ch.dbcs + dbc.field_name != field && continue + if eltype(dbc.facets) <: BoundaryIndex + functype = boundaryfunction(eltype(dbc.facets)) + for (cellidx, facetidx) in dbc.facets + for facetnode in functype(getcells(get_grid(ch.dh), cellidx))[facetidx] + for component in dbc.components + data[component, facetnode] = 1 + end + end + end + else + for nodeidx in dbc.facets + for component in dbc.components + data[component, nodeidx] = 1 + end + end + end + end + write_node_data(vtk, data, string(field, "_bc")) + end + return vtk +end -function WriteVTK.vtk_point_data(vtkfile, dh::AbstractDofHandler, u::Vector, suffix="") +""" + write_cell_colors(vtk::VTKGridFile, grid::AbstractGrid, cell_colors, name="coloring") - fieldnames = Ferrite.getfieldnames(dh) # all primary fields +Write cell colors (see [`create_coloring`](@ref)) to a VTK file for visualization. - for name in fieldnames - data = _evaluate_at_grid_nodes(dh, u, name, #=vtk=# Val(true)) - vtk_point_data(vtkfile, data, string(name, suffix)) +In case of coloring a subset, the cells which are not part of the subset are represented as color 0. +""" +function write_cell_colors(vtk, grid::AbstractGrid, cell_colors::AbstractVector{<:AbstractVector{<:Integer}}, name="coloring") + color_vector = zeros(Int, getncells(grid)) + for (i, cells_color) in enumerate(cell_colors) + for cell in cells_color + color_vector[cell] = i + end end - - return vtkfile + write_cell_data(vtk, color_vector, name) end diff --git a/src/FEValues/CellValues.jl b/src/FEValues/CellValues.jl index 362f14ab40..0403814b94 100644 --- a/src/FEValues/CellValues.jl +++ b/src/FEValues/CellValues.jl @@ -12,6 +12,11 @@ values of nodal functions, gradients and divergences of nodal functions etc. in By default linear Lagrange interpolation is used. For embedded elements the geometric interpolations should be vectorized to the spatial dimension. +**Keyword arguments:** The following keyword arguments are experimental and may change in future minor releases +* `update_gradients`: Specifies if the gradients of the shape functions should be updated (default true) +* `update_hessians`: Specifies if the hessians of the shape functions should be updated (default false) +* `update_detJdV`: Specifies if the volume associated with each quadrature point should be updated (default true) + **Common methods:** * [`reinit!`](@ref) @@ -41,21 +46,22 @@ struct CellValues{FV, GM, QR, detT} <: AbstractCellValues qr::QR # QuadratureRule detJdV::detT # AbstractVector{<:Number} or Nothing end -function CellValues(::Type{T}, qr::QuadratureRule, ip_fun::Interpolation, ip_geo::VectorizedInterpolation; - update_gradients::Bool = true, update_detJdV::Bool = true) where T - - FunDiffOrder = convert(Int, update_gradients) # Logic must change when supporting update_hessian kwargs - GeoDiffOrder = max(required_geo_diff_order(mapping_type(ip_fun), FunDiffOrder), update_detJdV) +function CellValues(::Type{T}, qr::QuadratureRule, ip_fun::Interpolation, ip_geo::VectorizedInterpolation, + ::ValuesUpdateFlags{FunDiffOrder, GeoDiffOrder, DetJdV}) where {T, FunDiffOrder, GeoDiffOrder, DetJdV} + geo_mapping = GeometryMapping{GeoDiffOrder}(T, ip_geo.ip, qr) fun_values = FunctionValues{FunDiffOrder}(T, ip_fun, qr, ip_geo) - detJdV = update_detJdV ? fill(T(NaN), length(getweights(qr))) : nothing + detJdV = DetJdV ? fill(T(NaN), length(getweights(qr))) : nothing return CellValues(fun_values, geo_mapping, qr, detJdV) end CellValues(qr::QuadratureRule, ip::Interpolation, args...; kwargs...) = CellValues(Float64, qr, ip, args...; kwargs...) -function CellValues(::Type{T}, qr, ip::Interpolation, ip_geo::ScalarInterpolation=default_geometric_interpolation(ip); kwargs...) where T +function CellValues(::Type{T}, qr, ip::Interpolation, ip_geo::ScalarInterpolation; kwargs...) where T return CellValues(T, qr, ip, VectorizedInterpolation(ip_geo); kwargs...) end +function CellValues(::Type{T}, qr::QuadratureRule, ip::Interpolation, ip_geo::VectorizedInterpolation = default_geometric_interpolation(ip); kwargs...) where T + return CellValues(T, qr, ip, ip_geo, ValuesUpdateFlags(ip; kwargs...)) +end function Base.copy(cv::CellValues) return CellValues(copy(cv.fun_values), copy(cv.geo_mapping), copy(cv.qr), _copy_or_nothing(cv.detJdV)) @@ -69,18 +75,20 @@ geometric_interpolation(cv::CellValues) = geometric_interpolation(cv.geo_mapping getdetJdV(cv::CellValues, q_point::Int) = cv.detJdV[q_point] getdetJdV(::CellValues{<:Any, <:Any, <:Any, Nothing}, ::Int) = throw(ArgumentError("detJdV is not saved in CellValues")) -# Accessors for function values +# Accessors for function values getnbasefunctions(cv::CellValues) = getnbasefunctions(cv.fun_values) function_interpolation(cv::CellValues) = function_interpolation(cv.fun_values) function_difforder(cv::CellValues) = function_difforder(cv.fun_values) shape_value_type(cv::CellValues) = shape_value_type(cv.fun_values) shape_gradient_type(cv::CellValues) = shape_gradient_type(cv.fun_values) +shape_hessian_type(cv::CellValues) = shape_hessian_type(cv.fun_values) -@propagate_inbounds shape_value(cv::CellValues, i::Int, q_point::Int) = shape_value(cv.fun_values, i, q_point) -@propagate_inbounds shape_gradient(cv::CellValues, i::Int, q_point::Int) = shape_gradient(cv.fun_values, i, q_point) -@propagate_inbounds shape_symmetric_gradient(cv::CellValues, i::Int, q_point::Int) = shape_symmetric_gradient(cv.fun_values, i, q_point) +@propagate_inbounds shape_value(cv::CellValues, q_point::Int, i::Int) = shape_value(cv.fun_values, q_point, i) +@propagate_inbounds shape_gradient(cv::CellValues, q_point::Int, i::Int) = shape_gradient(cv.fun_values, q_point, i) +@propagate_inbounds shape_hessian(cv::CellValues, q_point::Int, i::Int) = shape_hessian(cv.fun_values, q_point, i) +@propagate_inbounds shape_symmetric_gradient(cv::CellValues, q_point::Int, i::Int) = shape_symmetric_gradient(cv.fun_values, q_point, i) -# Access quadrature rule values +# Access quadrature rule values getnquadpoints(cv::CellValues) = getnquadpoints(cv.qr) @inline function _update_detJdV!(detJvec::AbstractVector, q_point::Int, w, mapping) @@ -98,7 +106,7 @@ function reinit!(cv::CellValues, cell::Union{AbstractCell, Nothing}, x::Abstract geo_mapping = cv.geo_mapping fun_values = cv.fun_values n_geom_basefuncs = getngeobasefunctions(geo_mapping) - + check_reinit_sdim_consistency(:CellValues, shape_gradient_type(cv), eltype(x)) if cell === nothing && !isa(mapping_type(fun_values), IdentityMapping) throw(ArgumentError("The cell::AbstractCell input is required to reinit! non-identity function mappings")) @@ -117,7 +125,7 @@ end function Base.show(io::IO, d::MIME"text/plain", cv::CellValues) ip_geo = geometric_interpolation(cv) ip_fun = function_interpolation(cv) - rdim = getdim(ip_geo) + rdim = getrefdim(ip_geo) vdim = isa(shape_value(cv, 1, 1), Vec) ? length(shape_value(cv, 1, 1)) : 0 GradT = shape_gradient_type(cv) sdim = GradT === nothing ? nothing : sdim_from_gradtype(GradT) @@ -125,6 +133,6 @@ function Base.show(io::IO, d::MIME"text/plain", cv::CellValues) print(io, "CellValues(", vstr, ", rdim=$rdim, and sdim=$sdim): ") print(io, getnquadpoints(cv), " quadrature points") print(io, "\n Function interpolation: "); show(io, d, ip_fun) - print(io, "\nGeometric interpolation: "); + print(io, "\nGeometric interpolation: "); sdim === nothing ? show(io, d, ip_geo) : show(io, d, ip_geo^sdim) -end \ No newline at end of file +end diff --git a/src/FEValues/FaceValues.jl b/src/FEValues/FaceValues.jl deleted file mode 100644 index 64d944e2df..0000000000 --- a/src/FEValues/FaceValues.jl +++ /dev/null @@ -1,216 +0,0 @@ -""" - FaceValues([::Type{T}], quad_rule::FaceQuadratureRule, func_interpol::Interpolation, [geom_interpol::Interpolation]) - -A `FaceValues` object facilitates the process of evaluating values of shape functions, gradients of shape functions, -values of nodal functions, gradients and divergences of nodal functions etc. on the faces of finite elements. - -**Arguments:** - -* `T`: an optional argument (default to `Float64`) to determine the type the internal data is stored as. -* `quad_rule`: an instance of a [`FaceQuadratureRule`](@ref) -* `func_interpol`: an instance of an [`Interpolation`](@ref) used to interpolate the approximated function -* `geom_interpol`: an optional instance of an [`Interpolation`](@ref) which is used to interpolate the geometry. - By default linear Lagrange interpolation is used. - -**Common methods:** - -* [`reinit!`](@ref) -* [`getnquadpoints`](@ref) -* [`getdetJdV`](@ref) - -* [`shape_value`](@ref) -* [`shape_gradient`](@ref) -* [`shape_symmetric_gradient`](@ref) -* [`shape_divergence`](@ref) - -* [`function_value`](@ref) -* [`function_gradient`](@ref) -* [`function_symmetric_gradient`](@ref) -* [`function_divergence`](@ref) -* [`spatial_coordinate`](@ref) -""" -FaceValues - -struct FaceValues{FV, GM, FQR, detT, nT, V_FV<:AbstractVector{FV}, V_GM<:AbstractVector{GM}} <: AbstractFaceValues - fun_values::V_FV # AbstractVector{FunctionValues} - geo_mapping::V_GM # AbstractVector{GeometryMapping} - fqr::FQR # FaceQuadratureRule - detJdV::detT # AbstractVector{<:Number} - normals::nT # AbstractVector{<:Vec} - current_face::ScalarWrapper{Int} -end - -function FaceValues(::Type{T}, fqr::FaceQuadratureRule, ip_fun::Interpolation, ip_geo::VectorizedInterpolation{sdim} = default_geometric_interpolation(ip_fun); - update_gradients::Bool = true) where {T,sdim} - - FunDiffOrder = convert(Int, update_gradients) # Logic must change when supporting update_hessian kwargs - GeoDiffOrder = max(required_geo_diff_order(mapping_type(ip_fun), FunDiffOrder), 1) - geo_mapping = [GeometryMapping{GeoDiffOrder}(T, ip_geo.ip, qr) for qr in fqr.face_rules] - fun_values = [FunctionValues{FunDiffOrder}(T, ip_fun, qr, ip_geo) for qr in fqr.face_rules] - max_nquadpoints = maximum(qr->length(getweights(qr)), fqr.face_rules) - detJdV = fill(T(NaN), max_nquadpoints) - normals = fill(zero(Vec{sdim, T}) * T(NaN), max_nquadpoints) - return FaceValues(fun_values, geo_mapping, fqr, detJdV, normals, ScalarWrapper(1)) -end - -FaceValues(qr::FaceQuadratureRule, ip::Interpolation, args...) = FaceValues(Float64, qr, ip, args...) -function FaceValues(::Type{T}, qr::FaceQuadratureRule, ip::Interpolation, ip_geo::ScalarInterpolation) where T - return FaceValues(T, qr, ip, VectorizedInterpolation(ip_geo)) -end - -function Base.copy(fv::FaceValues) - fun_values = map(copy, fv.fun_values) - geo_mapping = map(copy, fv.geo_mapping) - return FaceValues(fun_values, geo_mapping, copy(fv.fqr), copy(fv.detJdV), copy(fv.normals), copy(fv.current_face)) -end - -getngeobasefunctions(fv::FaceValues) = getngeobasefunctions(get_geo_mapping(fv)) -getnbasefunctions(fv::FaceValues) = getnbasefunctions(get_fun_values(fv)) -getnquadpoints(fv::FaceValues) = @inbounds getnquadpoints(fv.fqr, getcurrentface(fv)) -@propagate_inbounds getdetJdV(fv::FaceValues, q_point) = fv.detJdV[q_point] - -shape_value_type(fv::FaceValues) = shape_value_type(get_fun_values(fv)) -shape_gradient_type(fv::FaceValues) = shape_gradient_type(get_fun_values(fv)) -function_interpolation(fv::FaceValues) = function_interpolation(get_fun_values(fv)) -function_difforder(fv::FaceValues) = function_difforder(get_fun_values(fv)) -geometric_interpolation(fv::FaceValues) = geometric_interpolation(get_geo_mapping(fv)) - -get_geo_mapping(fv::FaceValues) = @inbounds fv.geo_mapping[getcurrentface(fv)] -@propagate_inbounds geometric_value(fv::FaceValues, args...) = geometric_value(get_geo_mapping(fv), args...) - -get_fun_values(fv::FaceValues) = @inbounds fv.fun_values[getcurrentface(fv)] - -@propagate_inbounds shape_value(fv::FaceValues, i::Int, q_point::Int) = shape_value(get_fun_values(fv), i, q_point) -@propagate_inbounds shape_gradient(fv::FaceValues, i::Int, q_point::Int) = shape_gradient(get_fun_values(fv), i, q_point) -@propagate_inbounds shape_symmetric_gradient(fv::FaceValues, i::Int, q_point::Int) = shape_symmetric_gradient(get_fun_values(fv), i, q_point) - -""" - getcurrentface(fv::FaceValues) - -Return the current active face of the `FaceValues` object (from last `reinit!`). -""" -getcurrentface(fv::FaceValues) = fv.current_face[] - -""" - getnormal(fv::FaceValues, qp::Int) - -Return the normal at the quadrature point `qp` for the active face of the -`FaceValues` object(from last `reinit!`). -""" -getnormal(fv::FaceValues, qp::Int) = fv.normals[qp] - -nfaces(fv::FaceValues) = length(fv.geo_mapping) - -function set_current_face!(fv::FaceValues, face_nr::Int) - # Checking face_nr before setting current_face allows us to use @inbounds - # when indexing by getcurrentface(fv) in other places! - checkbounds(Bool, 1:nfaces(fv), face_nr) || throw(ArgumentError("Face index out of range.")) - fv.current_face[] = face_nr -end - -@inline function reinit!(fv::FaceValues, x::AbstractVector, face_nr::Int) - return reinit!(fv, nothing, x, face_nr) -end - -function reinit!(fv::FaceValues, cell::Union{AbstractCell, Nothing}, x::AbstractVector{Vec{dim, T}}, face_nr::Int) where {dim, T} - check_reinit_sdim_consistency(:FaceValues, shape_gradient_type(fv), eltype(x)) - set_current_face!(fv, face_nr) - n_geom_basefuncs = getngeobasefunctions(fv) - if !checkbounds(Bool, x, 1:n_geom_basefuncs) || length(x) != n_geom_basefuncs - throw_incompatible_coord_length(length(x), n_geom_basefuncs) - end - - geo_mapping = get_geo_mapping(fv) - fun_values = get_fun_values(fv) - - if cell === nothing && !isa(mapping_type(fun_values), IdentityMapping) - throw(ArgumentError("The cell::AbstractCell input is required to reinit! non-identity function mappings")) - end - - @inbounds for (q_point, w) in pairs(getweights(fv.fqr, face_nr)) - mapping = calculate_mapping(geo_mapping, q_point, x) - J = getjacobian(mapping) - # See the `Ferrite.embedded_det` docstring for more background - weight_norm = weighted_normal(J, getrefshape(geo_mapping.ip), face_nr) - detJ = norm(weight_norm) - detJ > 0.0 || throw_detJ_not_pos(detJ) - @inbounds fv.detJdV[q_point] = detJ * w - @inbounds fv.normals[q_point] = weight_norm / norm(weight_norm) - apply_mapping!(fun_values, q_point, mapping, cell) - end -end - -function Base.show(io::IO, d::MIME"text/plain", fv::FaceValues) - ip_geo = geometric_interpolation(fv) - rdim = getdim(ip_geo) - vdim = isa(shape_value(fv, 1, 1), Vec) ? length(shape_value(fv, 1, 1)) : 0 - sdim = length(shape_gradient(fv, 1, 1)) ÷ length(shape_value(fv, 1, 1)) - vstr = vdim==0 ? "scalar" : "vdim=$vdim" - print(io, "FaceValues(", vstr, ", rdim=$rdim, sdim=$sdim): ") - nqp = getnquadpoints.(fv.fqr.face_rules) - if all(n==first(nqp) for n in nqp) - println(io, first(nqp), " quadrature points per face") - else - println(io, tuple(nqp...), " quadrature points on each face") - end - print(io, " Function interpolation: "); show(io, d, function_interpolation(fv)) - print(io, "\nGeometric interpolation: "); show(io, d, ip_geo^sdim) -end - -""" - BCValues(func_interpol::Interpolation, geom_interpol::Interpolation, boundary_type::Union{Type{<:BoundaryIndex}}) - -`BCValues` stores the shape values at all faces/edges/vertices (depending on `boundary_type`) for the geomatric interpolation (`geom_interpol`), -for each dof-position determined by the `func_interpol`. Used mainly by the `ConstrainHandler`. -""" -struct BCValues{T} - M::Array{T,3} - nqp::Array{Int} - current_entity::ScalarWrapper{Int} -end - -BCValues(func_interpol::Interpolation, geom_interpol::Interpolation, boundary_type::Type{<:BoundaryIndex} = Ferrite.FaceIndex) = - BCValues(Float64, func_interpol, geom_interpol, boundary_type) - -function BCValues(::Type{T}, func_interpol::Interpolation{refshape}, geom_interpol::Interpolation{refshape}, boundary_type::Type{<:BoundaryIndex} = Ferrite.FaceIndex) where {T,dim,refshape <: AbstractRefShape{dim}} - # set up quadrature rules for each boundary entity with dof-positions - # (determined by func_interpol) as the quadrature points - interpolation_coords = reference_coordinates(func_interpol) - - qrs = QuadratureRule{refshape,T,dim}[] - for boundarydofs in dirichlet_boundarydof_indices(boundary_type)(func_interpol) - dofcoords = Vec{dim,T}[] - for boundarydof in boundarydofs - push!(dofcoords, interpolation_coords[boundarydof]) - end - qrf = QuadratureRule{refshape,T}(fill(T(NaN), length(dofcoords)), dofcoords) # weights will not be used - push!(qrs, qrf) - end - - n_boundary_entities = length(qrs) - n_qpoints = n_boundary_entities == 0 ? 0 : maximum(qr->length(getweights(qr)), qrs) # Bound number of qps correctly. - n_geom_basefuncs = getnbasefunctions(geom_interpol) - M = fill(zero(T) * T(NaN), n_geom_basefuncs, n_qpoints, n_boundary_entities) - nqp = zeros(Int,n_boundary_entities) - - for n_boundary_entity in 1:n_boundary_entities - for (qp, ξ) in pairs(qrs[n_boundary_entity].points) - shape_values!(@view(M[:, qp, n_boundary_entity]), geom_interpol, ξ) - end - nqp[n_boundary_entity] = length(qrs[n_boundary_entity].points) - end - - BCValues{T}(M, nqp, ScalarWrapper(0)) -end - -getnquadpoints(bcv::BCValues) = bcv.nqp[bcv.current_entity.x] -function spatial_coordinate(bcv::BCValues, q_point::Int, xh::AbstractVector{Vec{dim,T}}) where {dim,T} - n_base_funcs = size(bcv.M, 1) - length(xh) == n_base_funcs || throw_incompatible_coord_length(length(xh), n_base_funcs) - x = zero(Vec{dim,T}) - face = bcv.current_entity[] - @inbounds for i in 1:n_base_funcs - x += bcv.M[i,q_point,face] * xh[i] # geometric_value(fe_v, q_point, i) * xh[i] - end - return x -end diff --git a/src/FEValues/FacetValues.jl b/src/FEValues/FacetValues.jl new file mode 100644 index 0000000000..84781ff469 --- /dev/null +++ b/src/FEValues/FacetValues.jl @@ -0,0 +1,227 @@ +""" + FacetValues([::Type{T}], quad_rule::FacetQuadratureRule, func_interpol::Interpolation, [geom_interpol::Interpolation]) + +A `FacetValues` object facilitates the process of evaluating values of shape functions, gradients of shape functions, +values of nodal functions, gradients and divergences of nodal functions etc. on the facets of finite elements. + +**Arguments:** + +* `T`: an optional argument (default to `Float64`) to determine the type the internal data is stored as. +* `quad_rule`: an instance of a [`FacetQuadratureRule`](@ref) +* `func_interpol`: an instance of an [`Interpolation`](@ref) used to interpolate the approximated function +* `geom_interpol`: an optional instance of an [`Interpolation`](@ref) which is used to interpolate the geometry. + By default linear Lagrange interpolation is used. + +**Keyword arguments:** The following keyword arguments are experimental and may change in future minor releases + +* `update_gradients`: Specifies if the gradients of the shape functions should be updated (default true) +* `update_hessians`: Specifies if the hessians of the shape functions should be updated (default false) + +**Common methods:** + +* [`reinit!`](@ref) +* [`getnquadpoints`](@ref) +* [`getdetJdV`](@ref) + +* [`shape_value`](@ref) +* [`shape_gradient`](@ref) +* [`shape_symmetric_gradient`](@ref) +* [`shape_divergence`](@ref) + +* [`function_value`](@ref) +* [`function_gradient`](@ref) +* [`function_symmetric_gradient`](@ref) +* [`function_divergence`](@ref) +* [`spatial_coordinate`](@ref) +""" +FacetValues + +mutable struct FacetValues{FV, GM, FQR, detT, nT, V_FV<:AbstractVector{FV}, V_GM<:AbstractVector{GM}} <: AbstractFacetValues + const fun_values::V_FV # AbstractVector{FunctionValues} + const geo_mapping::V_GM # AbstractVector{GeometryMapping} + const fqr::FQR # FacetQuadratureRule + const detJdV::detT # AbstractVector{<:Number} + const normals::nT # AbstractVector{<:Vec} + current_facet::Int +end + +function FacetValues(::Type{T}, fqr::FacetQuadratureRule, ip_fun::Interpolation, ip_geo::VectorizedInterpolation{sdim}, + ::ValuesUpdateFlags{FunDiffOrder, GeoDiffOrder}) where {T, sdim, FunDiffOrder, GeoDiffOrder} + + # max(GeoDiffOrder, 1) ensures that we get the jacobian needed to calculate the normal. + geo_mapping = map(qr -> GeometryMapping{max(GeoDiffOrder, 1)}(T, ip_geo.ip, qr), fqr.face_rules) + fun_values = map(qr -> FunctionValues{FunDiffOrder}(T, ip_fun, qr, ip_geo), fqr.face_rules) + max_nquadpoints = maximum(qr -> length(getweights(qr)), fqr.face_rules) + # detJdV always calculated, since we needed to calculate the jacobian anyways for the normal. + detJdV = fill(T(NaN), max_nquadpoints) + normals = fill(zero(Vec{sdim, T}) * T(NaN), max_nquadpoints) + return FacetValues(fun_values, geo_mapping, fqr, detJdV, normals, 1) +end + +FacetValues(qr::FacetQuadratureRule, ip::Interpolation, args...; kwargs...) = FacetValues(Float64, qr, ip, args...; kwargs...) +function FacetValues(::Type{T}, qr::FacetQuadratureRule, ip::Interpolation, ip_geo::ScalarInterpolation; kwargs...) where T + return FacetValues(T, qr, ip, VectorizedInterpolation(ip_geo); kwargs...) +end +function FacetValues(::Type{T}, qr::FacetQuadratureRule, ip::Interpolation, ip_geo::VectorizedInterpolation = default_geometric_interpolation(ip); kwargs...) where T + return FacetValues(T, qr, ip, ip_geo, ValuesUpdateFlags(ip; kwargs...)) +end + +function Base.copy(fv::FacetValues) + fun_values = map(copy, fv.fun_values) + geo_mapping = map(copy, fv.geo_mapping) + return FacetValues(fun_values, geo_mapping, copy(fv.fqr), copy(fv.detJdV), copy(fv.normals), fv.current_facet) +end + +getngeobasefunctions(fv::FacetValues) = getngeobasefunctions(get_geo_mapping(fv)) +getnbasefunctions(fv::FacetValues) = getnbasefunctions(get_fun_values(fv)) +getnquadpoints(fv::FacetValues) = @inbounds getnquadpoints(fv.fqr, getcurrentfacet(fv)) +@propagate_inbounds getdetJdV(fv::FacetValues, q_point) = fv.detJdV[q_point] + +shape_value_type(fv::FacetValues) = shape_value_type(get_fun_values(fv)) +shape_gradient_type(fv::FacetValues) = shape_gradient_type(get_fun_values(fv)) +function_interpolation(fv::FacetValues) = function_interpolation(get_fun_values(fv)) +function_difforder(fv::FacetValues) = function_difforder(get_fun_values(fv)) +geometric_interpolation(fv::FacetValues) = geometric_interpolation(get_geo_mapping(fv)) + +get_geo_mapping(fv::FacetValues) = @inbounds fv.geo_mapping[getcurrentfacet(fv)] +@propagate_inbounds geometric_value(fv::FacetValues, args...) = geometric_value(get_geo_mapping(fv), args...) + +get_fun_values(fv::FacetValues) = @inbounds fv.fun_values[getcurrentfacet(fv)] + +@propagate_inbounds shape_value(fv::FacetValues, q_point::Int, i::Int) = shape_value(get_fun_values(fv), q_point, i) +@propagate_inbounds shape_gradient(fv::FacetValues, q_point::Int, i::Int) = shape_gradient(get_fun_values(fv), q_point, i) +@propagate_inbounds shape_hessian(fv::FacetValues, q_point::Int, i::Int) = shape_hessian(get_fun_values(fv), q_point, i) +@propagate_inbounds shape_symmetric_gradient(fv::FacetValues, q_point::Int, i::Int) = shape_symmetric_gradient(get_fun_values(fv), q_point, i) + +""" + getcurrentfacet(fv::FacetValues) + +Return the current active facet of the `FacetValues` object (from last `reinit!`). +""" +getcurrentfacet(fv::FacetValues) = fv.current_facet[] + +""" + getnormal(fv::FacetValues, qp::Int) + +Return the normal at the quadrature point `qp` for the active facet of the +`FacetValues` object(from last `reinit!`). +""" +getnormal(fv::FacetValues, qp::Int) = fv.normals[qp] + +nfacets(fv::FacetValues) = length(fv.geo_mapping) + +function set_current_facet!(fv::FacetValues, face_nr::Int) + # Checking face_nr before setting current_facet allows us to use @inbounds + # when indexing by getcurrentfacet(fv) in other places! + checkbounds(Bool, 1:nfacets(fv), face_nr) || throw(ArgumentError("Face index out of range.")) + fv.current_facet = face_nr +end + +@inline function reinit!(fv::FacetValues, x::AbstractVector, face_nr::Int) + return reinit!(fv, nothing, x, face_nr) +end + +function reinit!(fv::FacetValues, cell::Union{AbstractCell, Nothing}, x::AbstractVector{Vec{dim, T}}, face_nr::Int) where {dim, T} + check_reinit_sdim_consistency(:FacetValues, shape_gradient_type(fv), eltype(x)) + set_current_facet!(fv, face_nr) + n_geom_basefuncs = getngeobasefunctions(fv) + if !checkbounds(Bool, x, 1:n_geom_basefuncs) || length(x) != n_geom_basefuncs + throw_incompatible_coord_length(length(x), n_geom_basefuncs) + end + + geo_mapping = get_geo_mapping(fv) + fun_values = get_fun_values(fv) + + if cell === nothing && !isa(mapping_type(fun_values), IdentityMapping) + throw(ArgumentError("The cell::AbstractCell input is required to reinit! non-identity function mappings")) + end + + @inbounds for (q_point, w) in pairs(getweights(fv.fqr, face_nr)) + mapping = calculate_mapping(geo_mapping, q_point, x) + J = getjacobian(mapping) + # See the `Ferrite.embedding_det` docstring for more background + weight_norm = weighted_normal(J, getrefshape(geo_mapping.ip), face_nr) + detJ = norm(weight_norm) + detJ > 0.0 || throw_detJ_not_pos(detJ) + @inbounds fv.detJdV[q_point] = detJ * w + @inbounds fv.normals[q_point] = weight_norm / norm(weight_norm) + apply_mapping!(fun_values, q_point, mapping, cell) + end +end + +function Base.show(io::IO, d::MIME"text/plain", fv::FacetValues) + ip_geo = geometric_interpolation(fv) + rdim = getrefdim(ip_geo) + vdim = isa(shape_value(fv, 1, 1), Vec) ? length(shape_value(fv, 1, 1)) : 0 + GradT = shape_gradient_type(fv) + sdim = GradT === nothing ? nothing : sdim_from_gradtype(GradT) + vstr = vdim==0 ? "scalar" : "vdim=$vdim" + print(io, "FacetValues(", vstr, ", rdim=$rdim, sdim=$sdim): ") + nqp = getnquadpoints.(fv.fqr.face_rules) + if all(n==first(nqp) for n in nqp) + println(io, first(nqp), " quadrature points per face") + else + println(io, tuple(nqp...), " quadrature points on each face") + end + print(io, " Function interpolation: "); show(io, d, function_interpolation(fv)) + print(io, "\nGeometric interpolation: "); + sdim === nothing ? show(io, d, ip_geo) : show(io, d, ip_geo^sdim) +end + +""" + BCValues(func_interpol::Interpolation, geom_interpol::Interpolation, boundary_type::Union{Type{<:BoundaryIndex}}) + +`BCValues` stores the shape values at all facet/faces/edges/vertices (depending on `boundary_type`) for the geometric interpolation (`geom_interpol`), +for each dof-position determined by the `func_interpol`. Used mainly by the `ConstraintHandler`. +""" +mutable struct BCValues{T} + const M::Array{T,3} + const nqp::Array{Int} + current_entity::Int +end + +BCValues(func_interpol::Interpolation, geom_interpol::Interpolation, boundary_type::Type{<:BoundaryIndex} = FaceIndex) = + BCValues(Float64, func_interpol, geom_interpol, boundary_type) + +function BCValues(::Type{T}, func_interpol::Interpolation{refshape}, geom_interpol::Interpolation{refshape}, boundary_type::Type{<:BoundaryIndex} = FaceIndex) where {T,dim,refshape <: AbstractRefShape{dim}} + # set up quadrature rules for each boundary entity with dof-positions + # (determined by func_interpol) as the quadrature points + interpolation_coords = reference_coordinates(func_interpol) + + qrs = QuadratureRule{refshape,Vector{T},Vector{Vec{dim,T}}}[] + for boundarydofs in dirichlet_boundarydof_indices(boundary_type)(func_interpol) + dofcoords = Vec{dim,T}[] + for boundarydof in boundarydofs + push!(dofcoords, interpolation_coords[boundarydof]) + end + qrf = QuadratureRule{refshape}(fill(T(NaN), length(dofcoords)), dofcoords) # weights will not be used + push!(qrs, qrf) + end + + n_boundary_entities = length(qrs) + n_qpoints = n_boundary_entities == 0 ? 0 : maximum(qr->length(getweights(qr)), qrs) # Bound number of qps correctly. + n_geom_basefuncs = getnbasefunctions(geom_interpol) + M = fill(zero(T) * T(NaN), n_geom_basefuncs, n_qpoints, n_boundary_entities) + nqp = zeros(Int,n_boundary_entities) + + for n_boundary_entity in 1:n_boundary_entities + for (qp, ξ) in pairs(qrs[n_boundary_entity].points) + reference_shape_values!(@view(M[:, qp, n_boundary_entity]), geom_interpol, ξ) + end + nqp[n_boundary_entity] = length(qrs[n_boundary_entity].points) + end + + BCValues{T}(M, nqp, 0) +end + +getnquadpoints(bcv::BCValues) = bcv.nqp[bcv.current_entity] +function spatial_coordinate(bcv::BCValues, q_point::Int, xh::AbstractVector{Vec{dim,T}}) where {dim,T} + n_base_funcs = size(bcv.M, 1) + length(xh) == n_base_funcs || throw_incompatible_coord_length(length(xh), n_base_funcs) + x = zero(Vec{dim,T}) + face = bcv.current_entity[] + @inbounds for i in 1:n_base_funcs + x += bcv.M[i,q_point,face] * xh[i] # geometric_value(fe_v, q_point, i) * xh[i] + end + return x +end diff --git a/src/FEValues/FunctionValues.jl b/src/FEValues/FunctionValues.jl index 361f9a6619..67cdf5ad44 100644 --- a/src/FEValues/FunctionValues.jl +++ b/src/FEValues/FunctionValues.jl @@ -6,72 +6,96 @@ ################################################################# # Scalar, sdim == rdim sdim rdim -typeof_N( ::Type{T}, ::ScalarInterpolation, ::VectorizedInterpolation{dim, <: AbstractRefShape{dim}}) where {T, dim} = T -typeof_dNdx(::Type{T}, ::ScalarInterpolation, ::VectorizedInterpolation{dim, <: AbstractRefShape{dim}}) where {T, dim} = Vec{dim, T} -typeof_dNdξ(::Type{T}, ::ScalarInterpolation, ::VectorizedInterpolation{dim, <: AbstractRefShape{dim}}) where {T, dim} = Vec{dim, T} +typeof_N( ::Type{T}, ::ScalarInterpolation, ::VectorizedInterpolation{dim, <: AbstractRefShape{dim}}) where {T, dim} = T +typeof_dNdx( ::Type{T}, ::ScalarInterpolation, ::VectorizedInterpolation{dim, <: AbstractRefShape{dim}}) where {T, dim} = Vec{dim, T} +typeof_dNdξ( ::Type{T}, ::ScalarInterpolation, ::VectorizedInterpolation{dim, <: AbstractRefShape{dim}}) where {T, dim} = Vec{dim, T} +typeof_d2Ndx2(::Type{T}, ::ScalarInterpolation, ::VectorizedInterpolation{dim, <: AbstractRefShape{dim}}) where {T, dim} = Tensor{2, dim, T} +typeof_d2Ndξ2(::Type{T}, ::ScalarInterpolation, ::VectorizedInterpolation{dim, <: AbstractRefShape{dim}}) where {T, dim} = Tensor{2, dim, T} # Vector, vdim == sdim == rdim vdim sdim rdim -typeof_N( ::Type{T}, ::VectorInterpolation{dim}, ::VectorizedInterpolation{dim, <: AbstractRefShape{dim}}) where {T, dim} = Vec{dim, T} -typeof_dNdx(::Type{T}, ::VectorInterpolation{dim}, ::VectorizedInterpolation{dim, <: AbstractRefShape{dim}}) where {T, dim} = Tensor{2, dim, T} -typeof_dNdξ(::Type{T}, ::VectorInterpolation{dim}, ::VectorizedInterpolation{dim, <: AbstractRefShape{dim}}) where {T, dim} = Tensor{2, dim, T} +typeof_N( ::Type{T}, ::VectorInterpolation{dim}, ::VectorizedInterpolation{dim, <: AbstractRefShape{dim}}) where {T, dim} = Vec{dim, T} +typeof_dNdx( ::Type{T}, ::VectorInterpolation{dim}, ::VectorizedInterpolation{dim, <: AbstractRefShape{dim}}) where {T, dim} = Tensor{2, dim, T} +typeof_dNdξ( ::Type{T}, ::VectorInterpolation{dim}, ::VectorizedInterpolation{dim, <: AbstractRefShape{dim}}) where {T, dim} = Tensor{2, dim, T} +typeof_d2Ndx2(::Type{T}, ::VectorInterpolation{dim}, ::VectorizedInterpolation{dim, <: AbstractRefShape{dim}}) where {T, dim} = Tensor{3, dim, T} +typeof_d2Ndξ2(::Type{T}, ::VectorInterpolation{dim}, ::VectorizedInterpolation{dim, <: AbstractRefShape{dim}}) where {T, dim} = Tensor{3, dim, T} # Scalar, sdim != rdim (TODO: Use Vec if (s|r)dim <= 3?) typeof_N( ::Type{T}, ::ScalarInterpolation, ::VectorizedInterpolation{sdim, <: AbstractRefShape{rdim}}) where {T, sdim, rdim} = T typeof_dNdx(::Type{T}, ::ScalarInterpolation, ::VectorizedInterpolation{sdim, <: AbstractRefShape{rdim}}) where {T, sdim, rdim} = SVector{sdim, T} typeof_dNdξ(::Type{T}, ::ScalarInterpolation, ::VectorizedInterpolation{sdim, <: AbstractRefShape{rdim}}) where {T, sdim, rdim} = SVector{rdim, T} +typeof_d2Ndx2(::Type{T}, ::ScalarInterpolation, ::VectorizedInterpolation{sdim, <: AbstractRefShape{rdim}}) where {T, sdim, rdim} = SMatrix{sdim, sdim, T, sdim*sdim} +typeof_d2Ndξ2(::Type{T}, ::ScalarInterpolation, ::VectorizedInterpolation{sdim, <: AbstractRefShape{rdim}}) where {T, sdim, rdim} = SMatrix{rdim, rdim, T, rdim*rdim} + # Vector, vdim != sdim != rdim (TODO: Use Vec/Tensor if (s|r)dim <= 3?) typeof_N( ::Type{T}, ::VectorInterpolation{vdim}, ::VectorizedInterpolation{sdim, <: AbstractRefShape{rdim}}) where {T, vdim, sdim, rdim} = SVector{vdim, T} -typeof_dNdx(::Type{T}, ::VectorInterpolation{vdim}, ::VectorizedInterpolation{sdim, <: AbstractRefShape{rdim}}) where {T, vdim, sdim, rdim} = SMatrix{vdim, sdim, T} -typeof_dNdξ(::Type{T}, ::VectorInterpolation{vdim}, ::VectorizedInterpolation{sdim, <: AbstractRefShape{rdim}}) where {T, vdim, sdim, rdim} = SMatrix{vdim, rdim, T} +typeof_dNdx(::Type{T}, ::VectorInterpolation{vdim}, ::VectorizedInterpolation{sdim, <: AbstractRefShape{rdim}}) where {T, vdim, sdim, rdim} = SMatrix{vdim, sdim, T, vdim*sdim} +typeof_dNdξ(::Type{T}, ::VectorInterpolation{vdim}, ::VectorizedInterpolation{sdim, <: AbstractRefShape{rdim}}) where {T, vdim, sdim, rdim} = SMatrix{vdim, rdim, T, vdim*rdim} +typeof_d2Ndx2(::Type{T}, ::VectorInterpolation{vdim}, ::VectorizedInterpolation{sdim, <: AbstractRefShape{rdim}}) where {T, vdim, sdim, rdim} = SArray{Tuple{vdim, sdim, sdim}, T, 3, vdim*sdim*sdim} +typeof_d2Ndξ2(::Type{T}, ::VectorInterpolation{vdim}, ::VectorizedInterpolation{sdim, <: AbstractRefShape{rdim}}) where {T, vdim, sdim, rdim} = SArray{Tuple{vdim, rdim, rdim}, T, 3, vdim*rdim*rdim} + """ FunctionValues{DiffOrder}(::Type{T}, ip_fun, qr::QuadratureRule, ip_geo::VectorizedInterpolation) -Create a `FunctionValues` object containing the shape values and gradients (up to order `DiffOrder`) -for both the reference cell (precalculated) and the real cell (updated in `reinit!`). +Create a `FunctionValues` object containing the shape values and gradients (up to order `DiffOrder`) +for both the reference cell (precalculated) and the real cell (updated in `reinit!`). """ FunctionValues -struct FunctionValues{DiffOrder, IP, N_t, dNdx_t, dNdξ_t} +struct FunctionValues{DiffOrder, IP, N_t, dNdx_t, dNdξ_t, d2Ndx2_t, d2Ndξ2_t} ip::IP # ::Interpolation Nx::N_t # ::AbstractMatrix{Union{<:Tensor,<:Number}} Nξ::N_t # ::AbstractMatrix{Union{<:Tensor,<:Number}} dNdx::dNdx_t # ::AbstractMatrix{Union{<:Tensor,<:StaticArray}} or Nothing dNdξ::dNdξ_t # ::AbstractMatrix{Union{<:Tensor,<:StaticArray}} or Nothing - function FunctionValues(ip::Interpolation, Nx::N_t, Nξ::N_t, ::Nothing, ::Nothing) where {N_t<:AbstractMatrix} - return new{0, typeof(ip), N_t, Nothing, Nothing}(ip, Nx, Nξ, nothing, nothing) + d2Ndx2::d2Ndx2_t # ::AbstractMatrix{<:Tensor{2}} Hessians of geometric shape functions in ref-domain + d2Ndξ2::d2Ndξ2_t # ::AbstractMatrix{<:Tensor{2}} Hessians of geometric shape functions in ref-domain + function FunctionValues(ip::Interpolation, Nx::N_t, Nξ::N_t, ::Nothing, ::Nothing, ::Nothing, ::Nothing) where {N_t<:AbstractMatrix} + return new{0, typeof(ip), N_t, Nothing, Nothing, Nothing, Nothing}(ip, Nx, Nξ, nothing, nothing, nothing, nothing) + end + function FunctionValues(ip::Interpolation, Nx::N_t, Nξ::N_t, dNdx::AbstractMatrix, dNdξ::AbstractMatrix, ::Nothing, ::Nothing) where {N_t<:AbstractMatrix} + return new{1, typeof(ip), N_t, typeof(dNdx), typeof(dNdξ), Nothing, Nothing}(ip, Nx, Nξ, dNdx, dNdξ, nothing, nothing) end - function FunctionValues(ip::Interpolation, Nx::N_t, Nξ::N_t, dNdx::AbstractMatrix, dNdξ::AbstractMatrix) where {N_t<:AbstractMatrix} - return new{1, typeof(ip), N_t, typeof(dNdx), typeof(dNdξ)}(ip, Nx, Nξ, dNdx, dNdξ) + function FunctionValues(ip::Interpolation, Nx::N_t, Nξ::N_t, dNdx::AbstractMatrix, dNdξ::AbstractMatrix, d2Ndx2::AbstractMatrix, d2Ndξ2::AbstractMatrix) where {N_t<:AbstractMatrix} + return new{2, typeof(ip), N_t, typeof(dNdx), typeof(dNdξ), typeof(d2Ndx2), typeof(d2Ndξ2)}(ip, Nx, Nξ, dNdx, dNdξ, d2Ndx2, d2Ndξ2) end end function FunctionValues{DiffOrder}(::Type{T}, ip::Interpolation, qr::QuadratureRule, ip_geo::VectorizedInterpolation) where {DiffOrder, T} n_shape = getnbasefunctions(ip) n_qpoints = getnquadpoints(qr) - + Nξ = zeros(typeof_N(T, ip, ip_geo), n_shape, n_qpoints) Nx = isa(mapping_type(ip), IdentityMapping) ? Nξ : similar(Nξ) + dNdξ = dNdx = d2Ndξ2 = d2Ndx2 = nothing - if DiffOrder == 0 - dNdξ = dNdx = nothing - elseif DiffOrder == 1 + if DiffOrder >= 1 dNdξ = zeros(typeof_dNdξ(T, ip, ip_geo), n_shape, n_qpoints) dNdx = fill(zero(typeof_dNdx(T, ip, ip_geo)) * T(NaN), n_shape, n_qpoints) - else - throw(ArgumentError("Currently only values and gradients can be updated in FunctionValues")) end - fv = FunctionValues(ip, Nx, Nξ, dNdx, dNdξ) + if DiffOrder >= 2 + d2Ndξ2 = zeros(typeof_d2Ndξ2(T, ip, ip_geo), n_shape, n_qpoints) + d2Ndx2 = fill(zero(typeof_d2Ndx2(T, ip, ip_geo)) * T(NaN), n_shape, n_qpoints) + end + + if DiffOrder > 2 + throw(ArgumentError("Currently only values, gradients, and hessians can be updated in FunctionValues")) + end + + fv = FunctionValues(ip, Nx, Nξ, dNdx, dNdξ, d2Ndx2, d2Ndξ2) precompute_values!(fv, getpoints(qr)) # Separate function for qr point update in PointValues return fv end -function precompute_values!(fv::FunctionValues{0}, qr_points::Vector{<:Vec}) - shape_values!(fv.Nξ, fv.ip, qr_points) +function precompute_values!(fv::FunctionValues{0}, qr_points::AbstractVector{<:Vec}) + reference_shape_values!(fv.Nξ, fv.ip, qr_points) +end +function precompute_values!(fv::FunctionValues{1}, qr_points::AbstractVector{<:Vec}) + reference_shape_gradients_and_values!(fv.dNdξ, fv.Nξ, fv.ip, qr_points) end -function precompute_values!(fv::FunctionValues{1}, qr_points::Vector{<:Vec}) - shape_gradients_and_values!(fv.dNdξ, fv.Nξ, fv.ip, qr_points) +function precompute_values!(fv::FunctionValues{2}, qr_points::AbstractVector{<:Vec}) + reference_shape_hessians_gradients_and_values!(fv.d2Ndξ2, fv.dNdξ, fv.Nξ, fv.ip, qr_points) end function Base.copy(v::FunctionValues) @@ -79,12 +103,15 @@ function Base.copy(v::FunctionValues) Nx_copy = v.Nξ === v.Nx ? Nξ_copy : copy(v.Nx) # Preserve aliasing dNdx_copy = _copy_or_nothing(v.dNdx) dNdξ_copy = _copy_or_nothing(v.dNdξ) - return FunctionValues(copy(v.ip), Nx_copy, Nξ_copy, dNdx_copy, dNdξ_copy) + d2Ndx2_copy = _copy_or_nothing(v.d2Ndx2) + d2Ndξ2_copy = _copy_or_nothing(v.d2Ndξ2) + return FunctionValues(copy(v.ip), Nx_copy, Nξ_copy, dNdx_copy, dNdξ_copy, d2Ndx2_copy, d2Ndξ2_copy) end getnbasefunctions(funvals::FunctionValues) = size(funvals.Nx, 1) @propagate_inbounds shape_value(funvals::FunctionValues, q_point::Int, base_func::Int) = funvals.Nx[base_func, q_point] @propagate_inbounds shape_gradient(funvals::FunctionValues, q_point::Int, base_func::Int) = funvals.dNdx[base_func, q_point] +@propagate_inbounds shape_hessian(funvals::FunctionValues{2}, q_point::Int, base_func::Int) = funvals.d2Ndx2[base_func, q_point] @propagate_inbounds shape_symmetric_gradient(funvals::FunctionValues, q_point::Int, base_func::Int) = symmetric(shape_gradient(funvals, q_point, base_func)) function_interpolation(funvals::FunctionValues) = funvals.ip @@ -92,6 +119,9 @@ function_difforder(::FunctionValues{DiffOrder}) where DiffOrder = DiffOrder shape_value_type(funvals::FunctionValues) = eltype(funvals.Nx) shape_gradient_type(funvals::FunctionValues) = eltype(funvals.dNdx) shape_gradient_type(::FunctionValues{0}) = nothing +shape_hessian_type(funvals::FunctionValues) = eltype(funvals.d2Ndx2) +shape_hessian_type(::FunctionValues{0}) = nothing +shape_hessian_type(::FunctionValues{1}) = nothing # Checks that the user provides the right dimension of coordinates to reinit! methods to ensure good error messages if not @@ -100,7 +130,7 @@ sdim_from_gradtype(::Type{<:SVector{sdim}}) where sdim = sdim sdim_from_gradtype(::Type{<:SMatrix{<:Any,sdim}}) where sdim = sdim # For performance, these must be fully inferable for the compiler. -# args: valname (:CellValues or :FaceValues), shape_gradient_type, eltype(x) +# args: valname (:CellValues or :FacetValues), shape_gradient_type, eltype(x) function check_reinit_sdim_consistency(valname, gradtype::Type, ::Type{<:Vec{sdim}}) where {sdim} check_reinit_sdim_consistency(valname, Val(sdim_from_gradtype(gradtype)), Val(sdim)) end @@ -110,21 +140,21 @@ function check_reinit_sdim_consistency(valname, ::Val{sdim_val}, ::Val{sdim_x}) throw(ArgumentError("The $valname (sdim=$sdim_val) and coordinates (sdim=$sdim_x) have different spatial dimensions.")) end -# Mapping types -struct IdentityMapping end +# Mapping types +struct IdentityMapping end # Not yet implemented: # struct CovariantPiolaMapping end # PR798 # struct ContravariantPiolaMapping end # PR798 -# struct DoubleCovariantPiolaMapping end -# struct DoubleContravariantPiolaMapping end +# struct DoubleCovariantPiolaMapping end +# struct DoubleContravariantPiolaMapping end mapping_type(fv::FunctionValues) = mapping_type(fv.ip) """ required_geo_diff_order(fun_mapping, fun_diff_order::Int) -Return the required order of geometric derivatives to map -the function values and gradients from the reference cell +Return the required order of geometric derivatives to map +the function values and gradients from the reference cell to the physical cell geometry. """ required_geo_diff_order(::IdentityMapping, fun_diff_order::Int) = fun_diff_order @@ -167,6 +197,29 @@ end return nothing end -# TODO in PR798, apply_mapping! for +@inline function apply_mapping!(funvals::FunctionValues{2}, ::IdentityMapping, q_point::Int, mapping_values, args...) + Jinv = calculate_Jinv(getjacobian(mapping_values)) + + sdim, rdim = size(Jinv) + (rdim != sdim) && error("apply_mapping! for second order gradients and embedded elements not implemented") + + H = gethessian(mapping_values) + is_vector_valued = first(funvals.Nx) isa Vec + Jinv_otimesu_Jinv = is_vector_valued ? otimesu(Jinv, Jinv) : nothing + @inbounds for j in 1:getnbasefunctions(funvals) + dNdx = dothelper(funvals.dNdξ[j, q_point], Jinv) + if is_vector_valued + d2Ndx2 = (funvals.d2Ndξ2[j, q_point] - dNdx⋅H) ⊡ Jinv_otimesu_Jinv + else + d2Ndx2 = Jinv'⋅(funvals.d2Ndξ2[j, q_point] - dNdx⋅H)⋅Jinv + end + + funvals.dNdx[j, q_point] = dNdx + funvals.d2Ndx2[j, q_point] = d2Ndx2 + end + return nothing +end + +# TODO in PR798, apply_mapping! for # * CovariantPiolaMapping # * ContravariantPiolaMapping diff --git a/src/FEValues/GeometryMapping.jl b/src/FEValues/GeometryMapping.jl index b6346b7550..b5cede1a58 100644 --- a/src/FEValues/GeometryMapping.jl +++ b/src/FEValues/GeometryMapping.jl @@ -1,9 +1,9 @@ """ MappingValues(J, H) -The mapping values are calculated based on a +The mapping values are calculated based on a `geometric_mapping::GeometryMapping` along with the cell coordinates, -and the stored jacobian, `J`, and potentially hessian, `H`, are +and the stored jacobian, `J`, and potentially hessian, `H`, are used when mapping the `FunctionValues` to the current cell during `reinit!`. """ MappingValues @@ -13,14 +13,14 @@ struct MappingValues{JT, HT} H::HT # dJ/dξ # Hessian end -@inline getjacobian(mv::MappingValues{<:Union{AbstractTensor, SMatrix}}) = mv.J -# @inline gethessian(mv::MappingValues{<:Any,<:AbstractTensor}) = mv.H # PR798 +@inline getjacobian(mv::MappingValues{<:Union{AbstractTensor, SMatrix}}) = mv.J +@inline gethessian(mv::MappingValues{<:Any,<:AbstractTensor}) = mv.H """ GeometryMapping{DiffOrder}(::Type{T}, ip_geo, qr::QuadratureRule) -Create a `GeometryMapping` object which contains the geometric +Create a `GeometryMapping` object which contains the geometric * shape values * gradient values (if DiffOrder ≥ 1) @@ -31,11 +31,11 @@ Create a `GeometryMapping` object which contains the geometric GeometryMapping struct GeometryMapping{DiffOrder, IP, M_t, dMdξ_t, d2Mdξ2_t} - ip::IP # ::Interpolation Geometric interpolation + ip::IP # ::Interpolation Geometric interpolation M::M_t # ::AbstractMatrix{<:Number} Values of geometric shape functions dMdξ::dMdξ_t # ::AbstractMatrix{<:Vec} Gradients of geometric shape functions in ref-domain d2Mdξ2::d2Mdξ2_t # ::AbstractMatrix{<:Tensor{2}} Hessians of geometric shape functions in ref-domain - # ::Nothing When not required + # ::Nothing (dMdξ or d2Mdξ2 if not required) function GeometryMapping( ip::IP, M::M_t, ::Nothing, ::Nothing ) where {IP <: ScalarInterpolation, M_t<:AbstractMatrix{<:Number}} @@ -46,12 +46,12 @@ struct GeometryMapping{DiffOrder, IP, M_t, dMdξ_t, d2Mdξ2_t} ) where {IP <: ScalarInterpolation, M_t<:AbstractMatrix{<:Number}, dMdξ_t <: AbstractMatrix{<:Vec}} return new{1, IP, M_t, dMdξ_t, Nothing}(ip, M, dMdξ, nothing) end -#= function GeometryMapping( - ip::IP, M::M_t, dMdξ::dMdξ_t, d2Mdξ2::d2Mdξ2_t) where - {IP <: ScalarInterpolation, M_t<:AbstractMatrix{<:Number}, + function GeometryMapping( + ip::IP, M::M_t, dMdξ::dMdξ_t, d2Mdξ2::d2Mdξ2_t) where + {IP <: ScalarInterpolation, M_t<:AbstractMatrix{<:Number}, dMdξ_t <: AbstractMatrix{<:Vec}, d2Mdξ2_t <: AbstractMatrix{<:Tensor{2}}} return new{2, IP, M_t, dMdξ_t, d2Mdξ2_t}(ip, M, dMdξ, d2Mdξ2) - end =# # PR798 + end end function GeometryMapping{0}(::Type{T}, ip::ScalarInterpolation, qr::QuadratureRule) where T n_shape = getnbasefunctions(ip) @@ -63,36 +63,36 @@ end function GeometryMapping{1}(::Type{T}, ip::ScalarInterpolation, qr::QuadratureRule) where T n_shape = getnbasefunctions(ip) n_qpoints = getnquadpoints(qr) - + M = zeros(T, n_shape, n_qpoints) - dMdξ = zeros(Vec{getdim(ip),T}, n_shape, n_qpoints) + dMdξ = zeros(Vec{getrefdim(ip),T}, n_shape, n_qpoints) gm = GeometryMapping(ip, M, dMdξ, nothing) precompute_values!(gm, getpoints(qr)) return gm end -#= function GeometryMapping{2}(::Type{T}, ip::ScalarInterpolation, qr::QuadratureRule) where T +function GeometryMapping{2}(::Type{T}, ip::ScalarInterpolation, qr::QuadratureRule) where T n_shape = getnbasefunctions(ip) n_qpoints = getnquadpoints(qr) - + M = zeros(T, n_shape, n_qpoints) - dMdξ = zeros(Vec{getdim(ip),T}, n_shape, n_qpoints) - d2Mdξ2 = zeros(Tensor{2,getdim(ip),T}, n_shape, n_qpoints) + dMdξ = zeros(Vec{getrefdim(ip),T}, n_shape, n_qpoints) + d2Mdξ2 = zeros(Tensor{2,getrefdim(ip),T}, n_shape, n_qpoints) gm = GeometryMapping(ip, M, dMdξ, d2Mdξ2) precompute_values!(gm, getpoints(qr)) return gm -end =# # PR798 +end -function precompute_values!(gm::GeometryMapping{0}, qr_points::Vector{<:Vec}) - shape_values!(gm.M, gm.ip, qr_points) +function precompute_values!(gm::GeometryMapping{0}, qr_points::AbstractVector{<:Vec}) + reference_shape_values!(gm.M, gm.ip, qr_points) +end +function precompute_values!(gm::GeometryMapping{1}, qr_points::AbstractVector{<:Vec}) + reference_shape_gradients_and_values!(gm.dMdξ, gm.M, gm.ip, qr_points) end -function precompute_values!(gm::GeometryMapping{1}, qr_points::Vector{<:Vec}) - shape_gradients_and_values!(gm.dMdξ, gm.M, gm.ip, qr_points) +function precompute_values!(gm::GeometryMapping{2}, qr_points::AbstractVector{<:Vec}) + reference_shape_hessians_gradients_and_values!(gm.d2Mdξ2, gm.dMdξ, gm.M, gm.ip, qr_points) end -#= function precompute_values!(gm::GeometryMapping{2}, qr_points::Vector{<:Vec}) - shape_hessians_gradients_and_values!(gm.d2Mdξ2, gm.dMdξ, gm.M, gm.ip, qr_points) -end =# # PR798 function Base.copy(v::GeometryMapping) return GeometryMapping(copy(v.ip), copy(v.M), _copy_or_nothing(v.dMdξ), _copy_or_nothing(v.d2Mdξ2)) @@ -117,36 +117,84 @@ end function otimes_returntype(#=typeof(x)=#::Type{<:Vec{dim,Tx}}, #=typeof(dMdξ)=#::Type{<:Vec{dim,TM}}) where {dim, Tx, TM} return Tensor{2,dim,promote_type(Tx,TM)} end -#= function otimes_returntype(#=typeof(x)=#::Type{<:Vec{dim,Tx}}, #=typeof(d2Mdξ2)=#::Type{<:Tensor{2,dim,TM}}) where {dim, Tx, TM} +function otimes_returntype(#=typeof(x)=#::Type{<:Vec{dim,Tx}}, #=typeof(d2Mdξ2)=#::Type{<:Tensor{2,dim,TM}}) where {dim, Tx, TM} return Tensor{3,dim,promote_type(Tx,TM)} -end =# # PR798 +end -@inline function calculate_mapping(::GeometryMapping{0}, q_point, x) +@inline function calculate_mapping(::GeometryMapping{0}, q_point::Int, x::AbstractVector{<:Vec}) return MappingValues(nothing, nothing) end -@inline function calculate_mapping(geo_mapping::GeometryMapping{1}, q_point, x) - fecv_J = zero(otimes_returntype(eltype(x), eltype(geo_mapping.dMdξ))) +@inline function calculate_mapping(geo_mapping::GeometryMapping{1}, q_point::Int, x::AbstractVector{<:Vec}) + J = zero(otimes_returntype(eltype(x), eltype(geo_mapping.dMdξ))) @inbounds for j in 1:getngeobasefunctions(geo_mapping) - #fecv_J += x[j] ⊗ geo_mapping.dMdξ[j, q_point] - fecv_J += otimes_helper(x[j], geo_mapping.dMdξ[j, q_point]) + # J += x[j] ⊗ geo_mapping.dMdξ[j, q_point] + J += otimes_helper(x[j], geo_mapping.dMdξ[j, q_point]) end - return MappingValues(fecv_J, nothing) + return MappingValues(J, nothing) end -#= @inline function calculate_mapping(geo_mapping::GeometryMapping{2}, q_point, x) +@inline function calculate_mapping(geo_mapping::GeometryMapping{2}, q_point::Int, x::AbstractVector{<:Vec}) J = zero(otimes_returntype(eltype(x), eltype(geo_mapping.dMdξ))) + sdim, rdim = size(J) + (rdim != sdim) && error("hessian for embedded elements not implemented (rdim=$rdim, sdim=$sdim)") H = zero(otimes_returntype(eltype(x), eltype(geo_mapping.d2Mdξ2))) @inbounds for j in 1:getngeobasefunctions(geo_mapping) J += x[j] ⊗ geo_mapping.dMdξ[j, q_point] H += x[j] ⊗ geo_mapping.d2Mdξ2[j, q_point] end return MappingValues(J, H) -end =# # PR798 +end + +@inline function calculate_mapping(gip::ScalarInterpolation, ξ::Vec, x::AbstractVector{<:Vec}, ::Val{0}) + return MappingValues(nothing, nothing) +end + +@inline function calculate_mapping(gip::ScalarInterpolation, ξ::Vec{rdim,T}, x::AbstractVector{<:Vec{sdim}}, ::Val{1}) where {T,rdim, sdim} + n_basefuncs = getnbasefunctions(gip) + @boundscheck checkbounds(x, Base.OneTo(n_basefuncs)) + + J = zero(otimes_returntype(Vec{sdim,T}, Vec{rdim,T})) + @inbounds for j in 1:n_basefuncs + dMdξ = reference_shape_gradient(gip, ξ, j) + # J += x[j] ⊗ dMdξ # https://github.com/Ferrite-FEM/Tensors.jl/pull/188 + J += otimes_helper(x[j], dMdξ) + end + return MappingValues(J, nothing) +end + +@inline function calculate_mapping(gip::ScalarInterpolation, ξ::Vec{rdim,T}, x::AbstractVector{<:Vec{sdim}}, ::Val{2}) where {T,rdim, sdim} + n_basefuncs = getnbasefunctions(gip) + @boundscheck checkbounds(x, Base.OneTo(n_basefuncs)) + (rdim != sdim) && error("hessian for embedded elements not implemented (rdim=$rdim, sdim=$sdim)") + J = zero(otimes_returntype(Vec{sdim,T}, Vec{rdim,T})) + H = zero(otimes_returntype(eltype(x), typeof(J))) + @inbounds for j in 1:n_basefuncs + d2Mdξ2, dMdξ, _ = reference_shape_hessian_gradient_and_value(gip, ξ, j) + J += x[j] ⊗ dMdξ + H += x[j] ⊗ d2Mdξ2 + end + return MappingValues(J, H) +end calculate_detJ(J::Tensor{2}) = det(J) calculate_detJ(J::SMatrix) = embedding_det(J) +function calculate_jacobian_and_spatial_coordinate(gip::ScalarInterpolation, ξ::Vec{rdim,Tξ}, x::AbstractVector{<:Vec{sdim, Tx}}) where {Tξ, Tx, rdim, sdim} + n_basefuncs = getnbasefunctions(gip) + @boundscheck checkbounds(x, Base.OneTo(n_basefuncs)) + + fecv_J = zero(otimes_returntype(Vec{sdim,Tx}, Vec{rdim,Tξ})) + sx = zero(Vec{sdim, Tx}) + @inbounds for j in 1:n_basefuncs + dMdξ, M = reference_shape_gradient_and_value(gip, ξ, j) + sx += M * x[j] + fecv_J += otimes_helper(x[j], dMdξ) + end + return fecv_J, sx +end + + # Embedded """ diff --git a/src/FEValues/InterfaceValues.jl b/src/FEValues/InterfaceValues.jl index 6b5eac3a13..53c5ecb33a 100644 --- a/src/FEValues/InterfaceValues.jl +++ b/src/FEValues/InterfaceValues.jl @@ -8,18 +8,18 @@ and gradients of shape functions and function on the interfaces between elements The first element of the interface is denoted "here" and the second element "there". **Constructors** -* `InterfaceValues(qr::FaceQuadratureRule, ip::Interpolation)`: same quadrature rule and +* `InterfaceValues(qr::FacetQuadratureRule, ip::Interpolation)`: same quadrature rule and interpolation on both sides, default linear Lagrange geometric interpolation. -* `InterfaceValues(qr::FaceQuadratureRule, ip::Interpolation, ip_geo::Interpolation)`: same +* `InterfaceValues(qr::FacetQuadratureRule, ip::Interpolation, ip_geo::Interpolation)`: same as above but with given geometric interpolation. -* `InterfaceValues(qr_here::FaceQuadratureRule, ip_here::Interpolation, qr_there::FaceQuadratureRule, ip_there::Interpolation)`: +* `InterfaceValues(qr_here::FacetQuadratureRule, ip_here::Interpolation, qr_there::FacetQuadratureRule, ip_there::Interpolation)`: different quadrature rule and interpolation on the two sides, default linear Lagrange geometric interpolation. -* `InterfaceValues(qr_here::FaceQuadratureRule, ip_here::Interpolation, ip_geo_here::Interpolation, qr_there::FaceQuadratureRule, ip_there::Interpolation, ip_geo_there::Interpolation)`: +* `InterfaceValues(qr_here::FacetQuadratureRule, ip_here::Interpolation, ip_geo_here::Interpolation, qr_there::FacetQuadratureRule, ip_there::Interpolation, ip_geo_there::Interpolation)`: same as above but with given geometric interpolation. -* `InterfaceValues(fv::FaceValues)`: quadrature rule and interpolations from face values +* `InterfaceValues(fv::FacetValues)`: quadrature rule and interpolations from face values (same on both sides). -* `InterfaceValues(fv_here::FaceValues, fv_there::FaceValues)`: quadrature rule and +* `InterfaceValues(fv_here::FacetValues, fv_there::FacetValues)`: quadrature rule and interpolations from the face values. **Associated methods:** @@ -47,7 +47,7 @@ The first element of the interface is denoted "here" and the second element "the """ InterfaceValues -struct InterfaceValues{FVA <: FaceValues, FVB <: FaceValues} <: AbstractValues +struct InterfaceValues{FVA <: FacetValues, FVB <: FacetValues} <: AbstractValues here::FVA there::FVB function InterfaceValues{FVA, FVB}(here::FVA, there::FVB) where {FVA, FVB} @@ -57,34 +57,34 @@ struct InterfaceValues{FVA <: FaceValues, FVB <: FaceValues} <: AbstractValues end function InterfaceValues( - qr_here::FaceQuadratureRule, ip_here::Interpolation, ipg_here::Interpolation, - qr_there::FaceQuadratureRule, ip_there::Interpolation, ipg_there::Interpolation + qr_here::FacetQuadratureRule, ip_here::Interpolation, ipg_here::Interpolation, + qr_there::FacetQuadratureRule, ip_there::Interpolation, ipg_there::Interpolation ) - # FaceValues constructor enforces that refshape matches for all arguments - here = FaceValues(qr_here, ip_here, ipg_here) - there = FaceValues(qr_there, ip_there, ipg_there) + # FacetValues constructor enforces that refshape matches for all arguments + here = FacetValues(qr_here, ip_here, ipg_here) + there = FacetValues(qr_there, ip_there, ipg_there) return InterfaceValues{typeof(here), typeof(there)}(here, there) end # Same on both sides, default geometric mapping -InterfaceValues(qr_here::FaceQuadratureRule, ip_here::Interpolation) = +InterfaceValues(qr_here::FacetQuadratureRule, ip_here::Interpolation) = InterfaceValues(qr_here, ip_here, deepcopy(qr_here), ip_here) # Same on both sides, given geometric mapping -InterfaceValues(qr_here::FaceQuadratureRule, ip_here::Interpolation, ipg_here::Interpolation) = +InterfaceValues(qr_here::FacetQuadratureRule, ip_here::Interpolation, ipg_here::Interpolation) = InterfaceValues(qr_here, ip_here, ipg_here, deepcopy(qr_here), ip_here, ipg_here) # Different on both sides, default geometric mapping function InterfaceValues( - qr_here::FaceQuadratureRule, ip_here::Interpolation, - qr_there::FaceQuadratureRule, ip_there::Interpolation, + qr_here::FacetQuadratureRule, ip_here::Interpolation, + qr_there::FacetQuadratureRule, ip_there::Interpolation, ) return InterfaceValues( qr_here, ip_here, default_geometric_interpolation(ip_here), qr_there, ip_there, default_geometric_interpolation(ip_there), ) end -# From FaceValue(s) -InterfaceValues(facevalues_here::FVA, facevalues_there::FVB = deepcopy(facevalues_here)) where {FVA <: FaceValues, FVB <: FaceValues} = - InterfaceValues{FVA,FVB}(facevalues_here, facevalues_there) +# From FacetValue(s) +InterfaceValues(facetvalues_here::FVA, facetvalues_there::FVB = deepcopy(FacetValues_here)) where {FVA <: FacetValues, FVB <: FacetValues} = + InterfaceValues{FVA,FVB}(facetvalues_here, facetvalues_there) function Base.copy(iv::InterfaceValues) return InterfaceValues(copy(iv.here), copy(iv.there)) @@ -109,38 +109,38 @@ end """ reinit!( iv::InterfaceValues, - cell_here::AbstractCell, coords_here::AbstractVector{Vec{dim, T}}, face_here::Int, - cell_there::AbstractCell, coords_there::AbstractVector{Vec{dim, T}}, face_there::Int + cell_here::AbstractCell, coords_here::AbstractVector{Vec{dim, T}}, facet_here::Int, + cell_there::AbstractCell, coords_there::AbstractVector{Vec{dim, T}}, facet_there::Int ) Update the [`InterfaceValues`](@ref) for the interface between `cell_here` (with cell coordinates `coords_here`) and `cell_there` (with cell coordinates `coords_there`). -`face_here` and `face_there` are the (local) face numbers for the respective cell. +`facet_here` and `facet_there` are the (local) facet numbers for the respective cell. """ function reinit!( iv::InterfaceValues, - cell_here::AbstractCell, coords_here::AbstractVector{Vec{dim, T}}, face_here::Int, - cell_there::AbstractCell, coords_there::AbstractVector{Vec{dim, T}}, face_there::Int + cell_here::AbstractCell, coords_here::AbstractVector{Vec{dim, T}}, facet_here::Int, + cell_there::AbstractCell, coords_there::AbstractVector{Vec{dim, T}}, facet_there::Int ) where {dim, T} # reinit! the here side as normal - reinit!(iv.here, cell_here, coords_here, face_here) - dim == 1 && return reinit!(iv.there, cell_there, coords_there, face_there) + reinit!(iv.here, cell_here, coords_here, facet_here) + dim == 1 && return reinit!(iv.there, cell_there, coords_there, facet_there) # Transform the quadrature points from the here side to the there side - set_current_face!(iv.there, face_there) # Includes boundscheck - interface_transformation = InterfaceOrientationInfo(cell_here, cell_there, face_here, face_there) - quad_points_a = getpoints(iv.here.fqr, face_here) - quad_points_b = getpoints(iv.there.fqr, face_there) + set_current_facet!(iv.there, facet_there) # Includes boundscheck + interface_transformation = InterfaceOrientationInfo(cell_here, cell_there, facet_here, facet_there) + quad_points_a = getpoints(iv.here.fqr, facet_here) + quad_points_b = getpoints(iv.there.fqr, facet_there) transform_interface_points!(quad_points_b, quad_points_a, interface_transformation) # TODO: This is the bottleneck, cache it? @assert length(quad_points_a) <= length(quad_points_b) - + # Re-evaluate shape functions in the transformed quadrature points precompute_values!(get_fun_values(iv.there), quad_points_b) precompute_values!(get_geo_mapping(iv.there), quad_points_b) - + # reinit! the "there" side - reinit!(iv.there, cell_there, coords_there, face_there) + reinit!(iv.there, cell_there, coords_there, facet_there) return iv end @@ -211,13 +211,11 @@ function shape_value_average end """ shape_value_jump(iv::InterfaceValues, qp::Int, i::Int) -Compute the jump of the value of shape function `i` at quadrature point `qp` across the -interface. - -This function uses the definition ``\\llbracket \\vec{v} \\rrbracket=\\vec{v}^\\text{here} -\\vec{v}^\\text{there}``. to obtain the form -``\\llbracket \\vec{v} \\rrbracket=\\vec{v}^\\text{there} ⋅ \\vec{n}^\\text{there} + \\vec{v}^\\text{here} ⋅ \\vec{n}^\\text{here}`` -multiply by the outward facing normal to the first element's side of the interface (which is the default normal for [`getnormal`](@ref) with [`InterfaceValues`](@ref)). +Compute the jump of the value of shape function `i` at quadrature point `qp` across the interface in the default normal direction. +This function uses the definition ``\\llbracket \\vec{v} \\rrbracket=\\vec{v}^\\text{there} -\\vec{v}^\\text{here}``. To obtain the form, +``\\llbracket \\vec{v} \\rrbracket=\\vec{v}^\\text{there} \\cdot \\vec{n}^\\text{there} + \\vec{v}^\\text{here} \\cdot \\vec{n}^\\text{here}``, +multiply by minus the outward facing normal to the first element's side of the interface (which is the default normal for [`getnormal`](@ref) with [`InterfaceValues`](@ref)). """ function shape_value_jump end @@ -232,12 +230,11 @@ function shape_gradient_average end """ shape_gradient_jump(iv::InterfaceValues, qp::Int, i::Int) -Compute the jump of the gradient of shape function `i` at quadrature point `qp` across the -interface. +Compute the jump of the gradient of shape function `i` at quadrature point `qp` across the interface in the default normal direction. -This function uses the definition ``\\llbracket \\vec{v} \\rrbracket=\\vec{v}^\\text{here} -\\vec{v}^\\text{there}``. to obtain the form -``\\llbracket \\vec{v} \\rrbracket=\\vec{v}^\\text{there} ⋅ \\vec{n}^\\text{there} + \\vec{v}^\\text{here} ⋅ \\vec{n}^\\text{here}`` -multiply by the outward facing normal to the first element's side of the interface (which is the default normal for [`getnormal`](@ref) with [`InterfaceValues`](@ref)). +This function uses the definition ``\\llbracket \\vec{v} \\rrbracket=\\vec{v}^\\text{there} -\\vec{v}^\\text{here}``. To obtain the form, +``\\llbracket \\vec{v} \\rrbracket=\\vec{v}^\\text{there} ⋅ \\vec{n}^\\text{there} + \\vec{v}^\\text{here} ⋅ \\vec{n}^\\text{here}``, +multiply by minus the outward facing normal to the first element's side of the interface (which is the default normal for [`getnormal`](@ref) with [`InterfaceValues`](@ref)). """ function shape_gradient_jump end @@ -275,7 +272,7 @@ for (func, f_, is_avg) in ( function $(func)(iv::InterfaceValues, qp::Int, i::Int) f_here = $(f_)(iv, qp, i; here = true) f_there = $(f_)(iv, qp, i; here = false) - return $(is_avg ? :((f_here + f_there) / 2) : :(f_here - f_there)) + return $(is_avg ? :((f_here + f_there) / 2) : :(f_there - f_here)) end end end @@ -292,11 +289,11 @@ function function_value_average end function_value_jump(iv::InterfaceValues, q_point::Int, u) function_value_jump(iv::InterfaceValues, q_point::Int, u, dof_range_here, dof_range_there) -Compute the jump of the function value at the quadrature point over the interface. +Compute the jump of the function value at the quadrature point over the interface along the default normal direction. -This function uses the definition ``\\llbracket \\vec{v} \\rrbracket=\\vec{v}^\\text{here} -\\vec{v}^\\text{there}``. to obtain the form -``\\llbracket \\vec{v} \\rrbracket=\\vec{v}^\\text{there} ⋅ \\vec{n}^\\text{there} + \\vec{v}^\\text{here} ⋅ \\vec{n}^\\text{here}`` -multiply by the outward facing normal to the first element's side of the interface (which is the default normal for [`getnormal`](@ref) with [`InterfaceValues`](@ref)). +This function uses the definition ``\\llbracket \\vec{v} \\rrbracket=\\vec{v}^\\text{there} -\\vec{v}^\\text{here}``. To obtain the form, +``\\llbracket \\vec{v} \\rrbracket=\\vec{v}^\\text{there} ⋅ \\vec{n}^\\text{there} + \\vec{v}^\\text{here} ⋅ \\vec{n}^\\text{here}``, +multiply by minus the outward facing normal to the first element's side of the interface (which is the default normal for [`getnormal`](@ref) with [`InterfaceValues`](@ref)). """ function function_value_jump end @@ -312,11 +309,11 @@ function function_gradient_average end function_gradient_jump(iv::InterfaceValues, q_point::Int, u) function_gradient_jump(iv::InterfaceValues, q_point::Int, u, dof_range_here, dof_range_there) -Compute the jump of the function gradient at the quadrature point over the interface. +Compute the jump of the function gradient at the quadrature point over the interface along the default normal direction. -This function uses the definition ``\\llbracket \\vec{v} \\rrbracket=\\vec{v}^\\text{here} -\\vec{v}^\\text{there}``. to obtain the form -``\\llbracket \\vec{v} \\rrbracket=\\vec{v}^\\text{there} ⋅ \\vec{n}^\\text{there} + \\vec{v}^\\text{here} ⋅ \\vec{n}^\\text{here}`` -multiply by the outward facing normal to the first element's side of the interface (which is the default normal for [`getnormal`](@ref) with [`InterfaceValues`](@ref)). +This function uses the definition ``\\llbracket \\vec{v} \\rrbracket=\\vec{v}^\\text{there} -\\vec{v}^\\text{here}``. To obtain the form, +``\\llbracket \\vec{v} \\rrbracket=\\vec{v}^\\text{there} ⋅ \\vec{n}^\\text{there} + \\vec{v}^\\text{here} ⋅ \\vec{n}^\\text{here}``, +multiply by minus the outward facing normal to the first element's side of the interface (which is the default normal for [`getnormal`](@ref) with [`InterfaceValues`](@ref)). """ function function_gradient_jump end @@ -368,7 +365,7 @@ for (func, f_, is_avg) in ( dof_range_there = (1:getnbasefunctions(iv.there)) .+ getnbasefunctions(iv.here) f_here = $(f_)(iv.here, qp, @view(u[dof_range_here])) f_there = $(f_)(iv.there, qp, @view(u[dof_range_there])) - return $(is_avg ? :((f_here + f_there) / 2) : :(f_here - f_there)) + return $(is_avg ? :((f_here + f_there) / 2) : :(f_there - f_here)) end function $(func)( iv::InterfaceValues, qp::Int, @@ -377,7 +374,7 @@ for (func, f_, is_avg) in ( ) f_here = $(f_)(iv.here, qp, u, dof_range_here) f_there = $(f_)(iv.there, qp, u, dof_range_there) - return $(is_avg ? :((f_here + f_there) / 2) : :(f_here - f_there)) + return $(is_avg ? :((f_here + f_there) / 2) : :(f_there - f_here)) end end end @@ -401,7 +398,7 @@ end Relative orientation information for 1D and 2D interfaces in 2D and 3D elements respectively. This information is used to construct the transformation matrix to -transform the quadrature points from face_a to face_b achieving synced +transform the quadrature points from facet_a to facet_b achieving synced spatial coordinates. Face B's orientation relative to Face A's can possibly be flipped (i.e. the vertices indices order is reversed) and the vertices can be rotated against each other. @@ -414,22 +411,22 @@ struct InterfaceOrientationInfo{RefShapeA, RefShapeB} flipped::Bool shift_index::Int lowest_node_shift_index::Int - face_a::Int - face_b::Int + facet_a::Int + facet_b::Int end """ - InterfaceOrientationInfo(cell_a::AbstractCell, cell_b::AbstractCell, face_a::Int, face_b::Int) + InterfaceOrientationInfo(cell_a::AbstractCell, cell_b::AbstractCell, facet_a::Int, facet_b::Int) Return the relative orientation info for face B with regards to face A. Relative orientation is computed using a [`OrientationInfo`](@ref) for each side of the interface. """ -function InterfaceOrientationInfo(cell_a::AbstractCell{RefShapeA}, cell_b::AbstractCell{RefShapeB}, face_a::Int, face_b::Int) where {RefShapeA <: AbstractRefShape, RefShapeB <: AbstractRefShape} - OI_a = OrientationInfo(faces(cell_a)[face_a]) - OI_b = OrientationInfo(faces(cell_b)[face_b]) +function InterfaceOrientationInfo(cell_a::AbstractCell{RefShapeA}, cell_b::AbstractCell{RefShapeB}, facet_a::Int, facet_b::Int) where {RefShapeA <: AbstractRefShape, RefShapeB <: AbstractRefShape} + OI_a = OrientationInfo(facets(cell_a)[facet_a]) + OI_b = OrientationInfo(facets(cell_b)[facet_b]) flipped = OI_a.flipped != OI_b.flipped shift_index = OI_b.shift_index - OI_a.shift_index - return InterfaceOrientationInfo{RefShapeA, RefShapeB}(flipped, shift_index, OI_b.shift_index, face_a, face_b) + return InterfaceOrientationInfo{RefShapeA, RefShapeB}(flipped, shift_index, OI_b.shift_index, facet_a, facet_b) end function InterfaceOrientationInfo(_::AbstractCell{RefShapeA}, _::AbstractCell{RefShapeB}, _::Int, _::Int) where {RefShapeA <: AbstractRefShape{1}, RefShapeB <: AbstractRefShape{1}} @@ -447,8 +444,8 @@ If the face is not flipped then the transformation is a function of relative ori get_transformation_matrix function get_transformation_matrix(interface_transformation::InterfaceOrientationInfo{RefShapeA}) where RefShapeA <: AbstractRefShape{3} - face_a = interface_transformation.face_a - facenodes = reference_faces(RefShapeA)[face_a] + facet_a = interface_transformation.facet_a + facenodes = reference_facets(RefShapeA)[facet_a] _get_transformation_matrix(facenodes, interface_transformation) end @@ -485,11 +482,11 @@ end end @inline function _get_transformation_matrix(::NTuple{N,Int}, ::InterfaceOrientationInfo) where N - throw(ArgumentError("transformation is not implemented")) + throw(ArgumentError("transformation is not implemented")) end @doc raw""" - transform_interface_points!(dst::Vector{Vec{3, Float64}}, points::Vector{Vec{3, Float64}}, interface_transformation::InterfaceOrientationInfo) + transform_interface_points!(dst::AbstractVector{Vec{3, Float64}}, points::AbstractVector{Vec{3, Float64}}, interface_transformation::InterfaceOrientationInfo) Transform the points from face A to face B using the orientation information of the interface and store it in the vector dst. For 3D, the faces are transformed into regular polygons such that the rotation angle is the shift in reference node index × 2π ÷ number of edges in face. @@ -552,28 +549,28 @@ y | \ """ transform_interface_points! -function transform_interface_points!(dst::Vector{Vec{3, Float64}}, points::Vector{Vec{3, Float64}}, interface_transformation::InterfaceOrientationInfo{RefShapeA, RefShapeB}) where {RefShapeA <: AbstractRefShape{3}, RefShapeB <: AbstractRefShape{3}} - face_a = interface_transformation.face_a - face_b = interface_transformation.face_b +function transform_interface_points!(dst::AbstractVector{Vec{3, Float64}}, points::AbstractVector{Vec{3, Float64}}, interface_transformation::InterfaceOrientationInfo{RefShapeA, RefShapeB}) where {RefShapeA <: AbstractRefShape{3}, RefShapeB <: AbstractRefShape{3}} + facet_a = interface_transformation.facet_a + facet_b = interface_transformation.facet_b M = get_transformation_matrix(interface_transformation) for (idx, point) in pairs(points) - face_point = element_to_face_transformation(point, RefShapeA, face_a) + face_point = element_to_facet_transformation(point, RefShapeA, facet_a) result = M * Vec(face_point[1],face_point[2], 1.0) - dst[idx] = face_to_element_transformation(Vec(result[1],result[2]), RefShapeB, face_b) + dst[idx] = facet_to_element_transformation(Vec(result[1],result[2]), RefShapeB, facet_b) end return nothing end -function transform_interface_points!(dst::Vector{Vec{2, Float64}}, points::Vector{Vec{2, Float64}}, interface_transformation::InterfaceOrientationInfo{RefShapeA, RefShapeB}) where {RefShapeA <: AbstractRefShape{2}, RefShapeB <: AbstractRefShape{2}} - face_a = interface_transformation.face_a - face_b = interface_transformation.face_b +function transform_interface_points!(dst::AbstractVector{Vec{2, Float64}}, points::AbstractVector{Vec{2, Float64}}, interface_transformation::InterfaceOrientationInfo{RefShapeA, RefShapeB}) where {RefShapeA <: AbstractRefShape{2}, RefShapeB <: AbstractRefShape{2}} + facet_a = interface_transformation.facet_a + facet_b = interface_transformation.facet_b flipped = interface_transformation.flipped for (idx, point) in pairs(points) - face_point = element_to_face_transformation(point, RefShapeA, face_a) + face_point = element_to_facet_transformation(point, RefShapeA, facet_a) flipped && (face_point *= -1) - dst[idx] = face_to_element_transformation(face_point, RefShapeB, face_b) + dst[idx] = facet_to_element_transformation(face_point, RefShapeB, facet_b) end return nothing end diff --git a/src/FEValues/PointValues.jl b/src/FEValues/PointValues.jl index ab0f98e353..8168ff73a6 100644 --- a/src/FEValues/PointValues.jl +++ b/src/FEValues/PointValues.jl @@ -28,8 +28,9 @@ function PointValues(cv::CellValues) T = typeof(getdetJdV(cv, 1)) ip_fun = function_interpolation(cv) ip_geo = geometric_interpolation(cv) - update_gradients = function_difforder(cv) == 1 - return PointValues(T, ip_fun, ip_geo; update_gradients) + update_gradients = Val(function_difforder(cv) ≥ 1) + update_hessians = Val(function_difforder(cv) ≥ 2) + return PointValues(T, ip_fun, ip_geo; update_gradients, update_hessians) end function PointValues(ip::Interpolation, ipg::Interpolation = default_geometric_interpolation(ip); kwargs...) return PointValues(Float64, ip, ipg; kwargs...) @@ -39,8 +40,8 @@ function PointValues(::Type{T}, ip::IP, ipg::GIP = default_geometric_interpolati IP <: Interpolation{shape}, GIP <: Interpolation{shape} } - qr = QuadratureRule{shape, T}([one(T)], [zero(Vec{dim, T})]) - cv = CellValues(T, qr, ip, ipg; update_detJdV = false, kwargs...) + qr = QuadratureRule{shape}([one(T)], [zero(Vec{dim, T})]) + cv = CellValues(T, qr, ip, ipg; update_detJdV = Val(false), kwargs...) return PointValues{typeof(cv)}(cv) end @@ -62,7 +63,7 @@ function_symmetric_gradient(pv::PointValues, u::AbstractVector, args...) = # reinit! on PointValues must first update N and dNdξ for the new "quadrature point" # and then call the regular reinit! for the wrapped CellValues to update dNdx -function reinit!(pv::PointValues, x::AbstractVector{<:Vec{D}}, ξ::Vec{D}) where {D} +function reinit!(pv::PointValues, x::AbstractVector{<:Vec{sdim}}, ξ::Vec{rdim}) where {sdim, rdim} # Update the quadrature point location qr_points = getpoints(pv.cv.qr) qr_points[1] = ξ diff --git a/src/FEValues/common_values.jl b/src/FEValues/common_values.jl index 181e54b03c..f8e744f975 100644 --- a/src/FEValues/common_values.jl +++ b/src/FEValues/common_values.jl @@ -26,21 +26,40 @@ end end """ - reinit!(cv::CellValues, cell::AbstractCell, x::Vector) - reinit!(cv::CellValues, x::Vector) - reinit!(fv::FaceValues, cell::AbstractCell, x::Vector, face::Int) - reinit!(fv::FaceValues, x::Vector, face::Int) + ValuesUpdateFlags(ip_fun::Interpolation; update_gradients = Val(true), update_hessians = Val(false), update_detJdV = Val(true)) -Update the `CellValues`/`FaceValues` object for a cell or face with coordinates `x`. +Creates a singelton type for specifying what parts of the AbstractValues should be updated. Note that this is internal +API used to get type-stable construction. Keyword arguments in `AbstractValues` constructors are forwarded, and the public API +is passing these as `Bool`, while the `ValuesUpdateFlags` method supports both boolean and `Val(::Bool)` keyword args. +""" +function ValuesUpdateFlags(ip_fun::Interpolation; update_gradients = Val(true), update_hessians = Val(false), update_detJdV = Val(true)) + toval(v::Bool) = Val(v) + toval(V::Val) = V + return ValuesUpdateFlags(ip_fun, toval(update_gradients), toval(update_hessians), toval(update_detJdV)) +end +function ValuesUpdateFlags(ip_fun::Interpolation, ::Val{update_gradients}, ::Val{update_hessians}, ::Val{update_detJdV} + ) where {update_gradients, update_hessians, update_detJdV} + FunDiffOrder = update_hessians ? 2 : (update_gradients ? 1 : 0) + GeoDiffOrder = max(required_geo_diff_order(mapping_type(ip_fun), FunDiffOrder), update_detJdV) + return ValuesUpdateFlags{FunDiffOrder, GeoDiffOrder, update_detJdV}() +end + +""" + reinit!(cv::CellValues, cell::AbstractCell, x::AbstractVector) + reinit!(cv::CellValues, x::AbstractVector) + reinit!(fv::FacetValues, cell::AbstractCell, x::AbstractVector, face::Int) + reinit!(fv::FacetValues, x::AbstractVector, face::Int) + +Update the `CellValues`/`FacetValues` object for a cell or face with coordinates `x`. The derivatives of the shape functions, and the new integration weights are computed. -For interpolations with non-identity mappings, the current `cell` is also required. +For interpolations with non-identity mappings, the current `cell` is also required. """ reinit! """ getnquadpoints(fe_v::AbstractValues) -Return the number of quadrature points. For `FaceValues`, +Return the number of quadrature points. For `FacetValues`, this is the number for the current face. """ function getnquadpoints end @@ -71,7 +90,7 @@ shape_value(fe_v::AbstractValues, q_point::Int, base_function::Int) """ geometric_value(fe_v::AbstractValues, q_point, base_function::Int) -Return the value of the geometric shape function `base_function` evaluated in +Return the value of the geometric shape function `base_function` evaluated in quadrature point `q_point`. """ geometric_value(fe_v::AbstractValues, q_point::Int, base_function::Int) @@ -105,6 +124,12 @@ end divergence_from_gradient(grad::Vec) = sum(grad) divergence_from_gradient(grad::Tensor{2}) = tr(grad) +""" + shape_curl(fe_v::AbstractValues, q_point::Int, base_function::Int) + +Return the curl of shape function `base_function` evaluated in +quadrature point `q_point`. +""" function shape_curl(cv::AbstractValues, q_point::Int, base_func::Int) return curl_from_gradient(shape_gradient(cv, q_point, base_func)) end @@ -205,6 +230,38 @@ function function_gradient_init(cv::AbstractValues, ::AbstractVector{T}) where { return zero(T) ⊗ zero(shape_gradient_type(cv)) end +""" + function_hessian(fe_v::AbstractValues{dim}, q_point::Int, u::AbstractVector{<:AbstractFloat}, [dof_range]) + + Compute the hessian of the function in a quadrature point. `u` is a vector with values + for the degrees of freedom. +""" +function function_hessian(fe_v::AbstractValues, q_point::Int, u::AbstractVector, dof_range = eachindex(u)) + n_base_funcs = getnbasefunctions(fe_v) + length(dof_range) == n_base_funcs || throw_incompatible_dof_length(length(dof_range), n_base_funcs) + @boundscheck checkbounds(u, dof_range) + @boundscheck checkquadpoint(fe_v, q_point) + hess = function_hessian_init(fe_v, u) + @inbounds for (i, j) in pairs(dof_range) + hess += shape_hessian(fe_v, q_point, i) * u[j] + end + return hess +end + +""" + shape_hessian_type(fe_v::AbstractValues) + +Return the type of `shape_hessian(fe_v, q_point, base_function)` +""" +function shape_hessian_type(fe_v::AbstractValues) + # Default fallback + return typeof(shape_hessian(fe_v, 1, 1)) +end + +function function_hessian_init(cv::AbstractValues, ::AbstractVector{T}) where {T} + return zero(shape_hessian_type(cv)) * zero(T) +end + """ function_symmetric_gradient(fe_v::AbstractValues, q_point::Int, u::AbstractVector, [dof_range]) @@ -272,7 +329,10 @@ Compute the spatial coordinate in a quadrature point. `x` contains the nodal coordinates of the cell. The coordinate is computed, using the geometric interpolation, as -``\\mathbf{x} = \\sum\\limits_{i = 1}^n M_i (\\mathbf{x}) \\mathbf{\\hat{x}}_i`` +``\\mathbf{x} = \\sum\\limits_{i = 1}^n M_i (\\mathbf{\\xi}) \\mathbf{\\hat{x}}_i``. + +where ``\\xi``is the coordinate of the given quadrature point `q_point` of the associated +quadrature rule. """ function spatial_coordinate(fe_v::AbstractValues, q_point::Int, x::AbstractVector{<:Vec}) n_base_funcs = getngeobasefunctions(fe_v) @@ -285,28 +345,43 @@ function spatial_coordinate(fe_v::AbstractValues, q_point::Int, x::AbstractVecto return vec end +""" + spatial_coordinate(ip::ScalarInterpolation, ξ::Vec, x::AbstractVector{<:Vec{sdim, T}}) + +Compute the spatial coordinate in a given quadrature point. `x` contains the nodal coordinates of the cell. + +The coordinate is computed, using the geometric interpolation, as +``\\mathbf{x} = \\sum\\limits_{i = 1}^n M_i (\\mathbf{\\xi}) \\mathbf{\\hat{x}}_i`` +""" +function spatial_coordinate(interpolation::ScalarInterpolation, ξ::Vec, x::AbstractVector{<:Vec}) + n_basefuncs = getnbasefunctions(interpolation) + @boundscheck checkbounds(x, Base.OneTo(n_basefuncs)) + vec = zero(eltype(x)) + @inbounds for j in 1:n_basefuncs + M = reference_shape_value(interpolation, ξ, j) + vec += M * x[j] + end + return vec +end -# Utility functions used by GeometryMapping, FunctionValues +# Utility functions used by GeometryMapping, FunctionValues _copy_or_nothing(x) = copy(x) _copy_or_nothing(::Nothing) = nothing -function shape_values!(values::AbstractMatrix, ip, qr_points::Vector{<:Vec}) +function reference_shape_values!(values::AbstractMatrix, ip, qr_points::AbstractVector{<:Vec}) for (qp, ξ) in pairs(qr_points) - shape_values!(@view(values[:, qp]), ip, ξ) + reference_shape_values!(@view(values[:, qp]), ip, ξ) end end -function shape_gradients_and_values!(gradients::AbstractMatrix, values::AbstractMatrix, ip, qr_points::Vector{<:Vec}) +function reference_shape_gradients_and_values!(gradients::AbstractMatrix, values::AbstractMatrix, ip, qr_points::AbstractVector{<:Vec}) for (qp, ξ) in pairs(qr_points) - shape_gradients_and_values!(@view(gradients[:, qp]), @view(values[:, qp]), ip, ξ) + reference_shape_gradients_and_values!(@view(gradients[:, qp]), @view(values[:, qp]), ip, ξ) end end -#= PR798 -function shape_hessians_gradients_and_values!(hessians::AbstractMatrix, gradients::AbstractMatrix, values::AbstractMatrix, ip, qr_points::Vector{<:Vec}) +function reference_shape_hessians_gradients_and_values!(hessians::AbstractMatrix, gradients::AbstractMatrix, values::AbstractMatrix, ip, qr_points::AbstractVector{<:Vec}) for (qp, ξ) in pairs(qr_points) - shape_hessians_gradients_and_values!(@view(hessians[:, qp]), @view(gradients[:, qp]), @view(values[:, qp]), ip, ξ) + reference_shape_hessians_gradients_and_values!(@view(hessians[:, qp]), @view(gradients[:, qp]), @view(values[:, qp]), ip, ξ) end end -=# - diff --git a/src/FEValues/face_integrals.jl b/src/FEValues/face_integrals.jl index f2145807cb..d2d26bc6bc 100644 --- a/src/FEValues/face_integrals.jl +++ b/src/FEValues/face_integrals.jl @@ -1,66 +1,66 @@ """ - face_to_element_transformation(point::Vec, ::Type{<:AbstractRefShape}, face::Int) + facet_to_element_transformation(point::Vec, ::Type{<:AbstractRefShape}, facet::Int) -Transform quadrature point from face's reference coordinates to coordinates on the -cell's face, increasing the number of dimensions by one. +Transform quadrature point from the facet's reference coordinates to coordinates on the +cell's facet, increasing the number of dimensions by one. """ -face_to_element_transformation +facet_to_element_transformation """ - element_to_face_transformation(point::AbstractVector, cell::AbstractCell{AbstractRefShape}, face::Int) + element_to_facet_transformation(point::AbstractVector, ::Type{<:AbstractRefShape}, facet::Int) -Transform quadrature point from cell's coordinates to the face's reference coordinates, decreasing the number of dimensions by one. -This is the inverse of `face_to_element_transformation`. +Transform quadrature point from the cell's coordinates to the facet's reference coordinates, decreasing the number of dimensions by one. +This is the inverse of `facet_to_element_transformation`. """ -element_to_face_transformation +element_to_facet_transformation """ - weighted_normal(J::AbstractTensor, fv::FaceValues, face::Int) + weighted_normal(J::AbstractTensor, fv::FacetValues, face::Int) weighted_normal(J::AbstractTensor, ::Type{<:AbstractRefShape}, face::Int) -Compute the vector normal to the face weighted by the area ratio between the face and the -reference face. This is computed by taking the cross product of the Jacobian components that -align to the face local axis. +Compute the vector normal to the facet weighted by the area ratio between the facet and the +reference facet. This is computed by taking the cross product of the Jacobian components that +align to the facet's local axis. """ function weighted_normal end """ - create_face_quad_rule(::Type{RefShape}, w::Vector{T}, p::Vector{Vec{N, T}}) - create_face_quad_rule( + create_facet_quad_rule(::Type{RefShape}, w::AbstractVectorä{T}, p::AbstractVectorä{Vec{N, T}}) + create_facet_quad_rule( ::Type{RefShape}, - quad_faces::Vector{Int}, w_quad::Vector{T}, p_quad::Vector{Vec{N, T}}, - tri_faces::Vector{Int}, w_tri::Vector{T}, p_tri::Vector{Vec{N, T}} + quad_faces::AbstractVectorä{Int}, w_quad::AbstractVector{T}, p_quad::AbstractVector{Vec{N, T}}, + tri_faces::AbstractVector{Int}, w_tri::AbstractVector{T}, p_tri::AbstractVector{Vec{N, T}} ) -Create a ["FaceQuadratureRule"](@ref) for the given cell type, weights and points. If the -cell has faces of different shapes (i.e. quadrilaterals and triangles) then each shape's -faces indices, weights and points are passed separately. +Create a ["FacetQuadratureRule"](@ref) for the given cell type, weights and points. If the +cell has facets of different shapes (i.e. quadrilaterals and triangles) then each shape's +facets indices, weights and points are passed separately. """ -function create_face_quad_rule(::Type{RefShape}, w::Vector{T}, p::Vector{Vec{N, T}}) where {N, T, RefShape <: AbstractRefShape} - face_quad_rule = QuadratureRule{RefShape, T, getdim(AbstractCell{RefShape})}[] - for face in 1:nfaces(RefShape) - new_points = [face_to_element_transformation(p[i], RefShape, face) for i in 1:length(w)] - push!(face_quad_rule, QuadratureRule{RefShape, T}(w, new_points)) +function create_facet_quad_rule(::Type{RefShape}, w::AbstractVector{T}, p::AbstractVector{Vec{N, T}}) where {N, T, RefShape <: AbstractRefShape} + facet_quad_rule = QuadratureRule{RefShape, Vector{T}, Vector{Vec{N+1, T}}}[] + for facet in 1:nfacets(RefShape) + new_points = [facet_to_element_transformation(p[i], RefShape, facet) for i in 1:length(w)] + push!(facet_quad_rule, QuadratureRule{RefShape}(copy(w), new_points)) end - return FaceQuadratureRule(face_quad_rule) + return FacetQuadratureRule(facet_quad_rule) end # For cells with mixed faces -function create_face_quad_rule( +function create_facet_quad_rule( ::Type{RefShape}, - quad_faces::Vector{Int}, w_quad::Vector{T}, p_quad::Vector{Vec{N, T}}, - tri_faces::Vector{Int}, w_tri::Vector{T}, p_tri::Vector{Vec{N, T}} + quad_facets::AbstractVector{Int}, w_quad::AbstractVector{T}, p_quad::AbstractVector{Vec{N, T}}, + tri_facets::AbstractVector{Int}, w_tri::AbstractVector{T}, p_tri::AbstractVector{Vec{N, T}} ) where {N, T, RefShape <: Union{RefPrism, RefPyramid}} - face_quad_rule = Vector{QuadratureRule{RefShape, T, getdim(AbstractCell{RefShape})}}(undef, nfaces(RefShape)) - for face in quad_faces - new_points = [face_to_element_transformation(p_quad[i], RefShape, face) for i in 1:length(w_quad)] - face_quad_rule[face] = QuadratureRule{RefShape, T}(w_quad, new_points) + facet_quad_rule = Vector{QuadratureRule{RefShape, Vector{T}, Vector{Vec{N+1, T}}}}(undef, nfacets(RefShape)) + for facet in quad_facets + new_points = [facet_to_element_transformation(p_quad[i], RefShape, facet) for i in 1:length(w_quad)] + facet_quad_rule[facet] = QuadratureRule{RefShape}(copy(w_quad), new_points) end - for face in tri_faces - new_points = [face_to_element_transformation(p_tri[i], RefShape, face) for i in 1:length(w_tri)] - face_quad_rule[face] = QuadratureRule{RefShape, T}(w_tri, new_points) + for facet in tri_facets + new_points = [facet_to_element_transformation(p_tri[i], RefShape, facet) for i in 1:length(w_tri)] + facet_quad_rule[facet] = QuadratureRule{RefShape}(copy(w_tri), new_points) end - return FaceQuadratureRule(face_quad_rule) + return FacetQuadratureRule(facet_quad_rule) end ################## @@ -68,24 +68,24 @@ end ################## # Mapping from to 0D node to 1D line vertex. -function face_to_element_transformation(::Union{Vec{0, T},Vec{1, T}}, ::Type{RefLine}, face::Int) where {T} +function facet_to_element_transformation(::Union{Vec{0, T},Vec{1, T}}, ::Type{RefLine}, face::Int) where {T} face == 1 && return Vec{1, T}(( -one(T),)) face == 2 && return Vec{1, T}(( one(T),)) - throw(ArgumentError("unknown face number")) + throw(ArgumentError("unknown facet number")) end # Mapping from 1D line to point. -function element_to_face_transformation(point::Vec{1, T}, ::Type{RefLine}, face::Int) where T +function element_to_facet_transformation(point::Vec{1, T}, ::Type{RefLine}, face::Int) where T x = point[] face == 1 && return Vec(-x) face == 2 && return Vec( x) - throw(ArgumentError("unknown face number")) + throw(ArgumentError("unknown facet number")) end function weighted_normal(::Tensor{2,1,T}, ::Type{RefLine}, face::Int) where {T} face == 1 && return Vec{1,T}((-one(T),)) face == 2 && return Vec{1,T}(( one(T),)) - throw(ArgumentError("unknown face number")) + throw(ArgumentError("unknown facet number")) end ########################### @@ -93,23 +93,23 @@ end ########################### # Mapping from 1D line to 2D face of a quadrilateral. -function face_to_element_transformation(point::Vec{1, T}, ::Type{RefQuadrilateral}, face::Int) where T +function facet_to_element_transformation(point::Vec{1, T}, ::Type{RefQuadrilateral}, face::Int) where T x = point[1] face == 1 && return Vec{2, T}(( x, -one(T))) face == 2 && return Vec{2, T}(( one(T), x)) face == 3 && return Vec{2, T}(( -x, one(T))) face == 4 && return Vec{2, T}(( -one(T), -x)) - throw(ArgumentError("unknown face number")) + throw(ArgumentError("unknown facet number")) end # Mapping from 2D face of a quadrilateral to 1D line. -function element_to_face_transformation(point::Vec{2, T}, ::Type{RefQuadrilateral}, face::Int) where T +function element_to_facet_transformation(point::Vec{2, T}, ::Type{RefQuadrilateral}, face::Int) where T x, y = point face == 1 && return Vec( x) face == 2 && return Vec( y) face == 3 && return Vec( -x) face == 4 && return Vec( -y) - throw(ArgumentError("unknown face number")) + throw(ArgumentError("unknown facet number")) end function weighted_normal(J::Tensor{2,2}, ::Type{RefQuadrilateral}, face::Int) @@ -119,7 +119,7 @@ function weighted_normal(J::Tensor{2,2}, ::Type{RefQuadrilateral}, face::Int) face == 3 && return Vec{2}((-J[2,1], J[1,1])) face == 4 && return Vec{2}((-J[2,2], J[1,2])) end - throw(ArgumentError("unknown face number")) + throw(ArgumentError("unknown facet number")) end ###################### @@ -127,21 +127,21 @@ end ###################### # Mapping from 1D line to 2D face of a triangle. -function face_to_element_transformation(point::Vec{1, T}, ::Type{RefTriangle}, face::Int) where T +function facet_to_element_transformation(point::Vec{1, T}, ::Type{RefTriangle}, face::Int) where T x = (point[1] + one(T)) / 2 face == 1 && return Vec{2, T}(( one(T) - x, x )) face == 2 && return Vec{2, T}(( zero(T), one(T) -x)) face == 3 && return Vec{2, T}(( x, zero(T))) - throw(ArgumentError("unknown face number")) + throw(ArgumentError("unknown facet number")) end # Mapping from 2D face of a triangle to 1D line. -function element_to_face_transformation(point::Vec{2, T}, ::Type{RefTriangle}, face::Int) where T +function element_to_facet_transformation(point::Vec{2, T}, ::Type{RefTriangle}, face::Int) where T x, y = point face == 1 && return Vec( one(T) - x * 2) face == 2 && return Vec( one(T) - y * 2 ) face == 3 && return Vec( x * 2 - one(T)) - throw(ArgumentError("unknown face number")) + throw(ArgumentError("unknown facet number")) end function weighted_normal(J::Tensor{2,2}, ::Type{RefTriangle}, face::Int) @@ -150,7 +150,7 @@ function weighted_normal(J::Tensor{2,2}, ::Type{RefTriangle}, face::Int) face == 2 && return Vec{2}((-J[2,2], J[1,2])) face == 3 && return Vec{2}((J[2,1], -J[1,1])) end - throw(ArgumentError("unknown face number")) + throw(ArgumentError("unknown facet number")) end ######################## @@ -158,7 +158,7 @@ end ######################## # Mapping from 2D quadrilateral to 3D face of a hexahedron. -function face_to_element_transformation(point::Vec{2, T}, ::Type{RefHexahedron}, face::Int) where T +function facet_to_element_transformation(point::Vec{2, T}, ::Type{RefHexahedron}, face::Int) where T x, y = point face == 1 && return Vec{3, T}(( y, x, -one(T))) face == 2 && return Vec{3, T}(( x, -one(T), y)) @@ -166,11 +166,11 @@ function face_to_element_transformation(point::Vec{2, T}, ::Type{RefHexahedron}, face == 4 && return Vec{3, T}(( -x, one(T), y)) face == 5 && return Vec{3, T}((-one(T), y, x)) face == 6 && return Vec{3, T}(( x, y, one(T))) - throw(ArgumentError("unknown face number")) + throw(ArgumentError("unknown facet number")) end # Mapping from 3D face of a hexahedron to 2D quadrilateral. -function element_to_face_transformation(point::Vec{3, T}, ::Type{RefHexahedron}, face::Int) where T +function element_to_facet_transformation(point::Vec{3, T}, ::Type{RefHexahedron}, face::Int) where T x, y, z = point face == 1 && return Vec( y, x) face == 2 && return Vec( x, z) @@ -178,7 +178,7 @@ function element_to_face_transformation(point::Vec{3, T}, ::Type{RefHexahedron}, face == 4 && return Vec( -x, z) face == 5 && return Vec( z, y) face == 6 && return Vec( x, y) - throw(ArgumentError("unknown face number")) + throw(ArgumentError("unknown facet number")) end function weighted_normal(J::Tensor{2,3}, ::Type{RefHexahedron}, face::Int) @@ -190,7 +190,7 @@ function weighted_normal(J::Tensor{2,3}, ::Type{RefHexahedron}, face::Int) face == 5 && return J[:,3] × J[:,2] face == 6 && return J[:,1] × J[:,2] end - throw(ArgumentError("unknown face number")) + throw(ArgumentError("unknown facet number")) end ######################### @@ -198,23 +198,23 @@ end ######################### # Mapping from 2D triangle to 3D face of a tetrahedon. -function face_to_element_transformation(point::Vec{2, T}, ::Type{RefTetrahedron}, face::Int) where T +function facet_to_element_transformation(point::Vec{2, T}, ::Type{RefTetrahedron}, face::Int) where T x, y = point face == 1 && return Vec{3, T}( (one(T)-x-y, y, zero(T))) face == 2 && return Vec{3, T}( (y, zero(T), one(T)-x-y)) face == 3 && return Vec{3, T}( (x, y, one(T)-x-y)) face == 4 && return Vec{3, T}( (zero(T), one(T)-x-y, y)) - throw(ArgumentError("unknown face number")) + throw(ArgumentError("unknown facet number")) end # Mapping from 3D face of a tetrahedon to 2D triangle. -function element_to_face_transformation(point::Vec{3, T}, ::Type{RefTetrahedron}, face::Int) where T +function element_to_facet_transformation(point::Vec{3, T}, ::Type{RefTetrahedron}, face::Int) where T x, y, z = point face == 1 && return Vec( one(T)-x-y, y) face == 2 && return Vec( one(T)-z-x, x) face == 3 && return Vec( x, y) face == 4 && return Vec( one(T)-y-z, z) - throw(ArgumentError("unknown face number")) + throw(ArgumentError("unknown facet number")) end function weighted_normal(J::Tensor{2,3}, ::Type{RefTetrahedron}, face::Int) @@ -224,7 +224,7 @@ function weighted_normal(J::Tensor{2,3}, ::Type{RefTetrahedron}, face::Int) face == 3 && return (J[:,1]-J[:,3]) × (J[:,2]-J[:,3]) face == 4 && return J[:,3] × J[:,2] end - throw(ArgumentError("unknown face number")) + throw(ArgumentError("unknown facet number")) end ################### @@ -232,7 +232,7 @@ end ################### # Mapping from 2D quadrilateral/triangle to 3D face of a wedge. -function face_to_element_transformation(point::Vec{2, T}, ::Type{RefPrism}, face::Int) where T +function facet_to_element_transformation(point::Vec{2, T}, ::Type{RefPrism}, face::Int) where T # Note that for quadrilaterals the domain is [-1, 1]² but for triangles it is [0, 1]² x, y = point face == 1 && return Vec{3, T}(( one(T)-x-y, y, zero(T))) @@ -240,18 +240,18 @@ function face_to_element_transformation(point::Vec{2, T}, ::Type{RefPrism}, face face == 3 && return Vec{3, T}(( zero(T), one(T)-(one(T)+x)/2, (one(T)+y)/2)) face == 4 && return Vec{3, T}(( one(T)-(one(T)+x)/2, (one(T)+x)/2, (one(T)+y)/2)) face == 5 && return Vec{3, T}(( y, one(T)-x-y, one(T))) - throw(ArgumentError("unknown face number")) + throw(ArgumentError("unknown facet number")) end # Mapping from 3D face of a wedge to 2D triangle or 2D quadrilateral. -function element_to_face_transformation(point::Vec{3, T}, ::Type{RefPrism}, face::Int) where T +function element_to_facet_transformation(point::Vec{3, T}, ::Type{RefPrism}, face::Int) where T x, y, z = point face == 1 && return Vec( one(T)-x-y, y) face == 2 && return Vec( 2*x - one(T), 2*z - one(T) ) face == 3 && return Vec( 2*(one(T) - y) - one(T), 2*z - one(T) ) face == 4 && return Vec( 2*y - one(T), 2*z - one(T) ) face == 5 && return Vec( one(T) - x - y, x) - throw(ArgumentError("unknown face number")) + throw(ArgumentError("unknown facet number")) end function weighted_normal(J::Tensor{2,3}, ::Type{RefPrism}, face::Int) @@ -262,7 +262,7 @@ function weighted_normal(J::Tensor{2,3}, ::Type{RefPrism}, face::Int) face == 4 && return (J[:,2]-J[:,1]) × J[:,3] face == 5 && return J[:,1] × J[:,2] end - throw(ArgumentError("unknown face number")) + throw(ArgumentError("unknown facet number")) end ##################### @@ -270,25 +270,25 @@ end ##################### # Mapping from 2D face to 3D face of a pyramid. -function face_to_element_transformation(point::Vec{2, T}, ::Type{RefPyramid}, face::Int) where T +function facet_to_element_transformation(point::Vec{2, T}, ::Type{RefPyramid}, face::Int) where T x, y = point face == 1 && return Vec{3, T}(( (y+one(T))/2, (x+one(T))/2, zero(T))) face == 2 && return Vec{3, T}(( y, zero(T), one(T)-x-y)) face == 3 && return Vec{3, T}(( zero(T), one(T)-x-y, y)) face == 4 && return Vec{3, T}(( x+y, y, one(T)-x-y)) face == 5 && return Vec{3, T}(( one(T)-x-y, one(T)-y, y)) - throw(ArgumentError("unknown face number")) + throw(ArgumentError("unknown facet number")) end # Mapping from 3D face of a pyramid to 2D triangle or 2D quadrilateral. -function element_to_face_transformation(point::Vec{3, T}, ::Type{RefPyramid}, face::Int) where T +function element_to_facet_transformation(point::Vec{3, T}, ::Type{RefPyramid}, face::Int) where T x, y, z = point face == 1 && return Vec( 2*y - one(T), 2*x - one(T)) face == 2 && return Vec( one(T) - z - x, x) face == 3 && return Vec( one(T) - y - z, z) face == 4 && return Vec( x - y, y) face == 5 && return Vec( one(T) - x - z, z) - throw(ArgumentError("unknown face number")) + throw(ArgumentError("unknown facet number")) end function weighted_normal(J::Tensor{2,3}, ::Type{RefPyramid}, face::Int) @@ -299,5 +299,5 @@ function weighted_normal(J::Tensor{2,3}, ::Type{RefPyramid}, face::Int) face == 4 && return J[:,2] × (J[:,3]-J[:,1]) face == 5 && return (J[:,3]-J[:,2]) × J[:,1] end - throw(ArgumentError("unknown face number")) + throw(ArgumentError("unknown facet number")) end diff --git a/src/Ferrite.jl b/src/Ferrite.jl index 013421f21f..781de4d6f0 100644 --- a/src/Ferrite.jl +++ b/src/Ferrite.jl @@ -1,17 +1,37 @@ module Ferrite -using Reexport + +using Reexport: @reexport @reexport using Tensors -@reexport using WriteVTK -using LinearAlgebra -using SparseArrays -using StaticArrays -using Base: @propagate_inbounds -using NearestNeighbors -using EnumX +using Base: + @propagate_inbounds +using EnumX: + EnumX, @enumx +using LinearAlgebra: + LinearAlgebra, Symmetric, cholesky, det, norm, pinv, tr +using NearestNeighbors: + NearestNeighbors, KDTree, knn +using OrderedCollections: + OrderedSet +using SparseArrays: + SparseArrays, SparseMatrixCSC, nonzeros, nzrange, rowvals, sparse +using StaticArrays: + StaticArrays, MArray, MMatrix, SArray, SMatrix, SVector +using WriteVTK: + WriteVTK, VTKCellTypes +using Tensors: + Tensors, AbstractTensor, SecondOrderTensor, SymmetricTensor, Tensor, Vec, gradient, + rotation_tensor, symmetric, tovoigt!, hessian, otimesu +using ForwardDiff: + ForwardDiff + +include("CollectionsOfViews.jl") +using .CollectionsOfViews: + CollectionsOfViews, ArrayOfVectorViews, push_at_index!, ConstructionBuffer include("exports.jl") + """ AbstractRefShape{refdim} @@ -33,11 +53,19 @@ const RefTetrahedron = RefSimplex{3} struct RefPrism <: AbstractRefShape{3} end struct RefPyramid <: AbstractRefShape{3} end +""" + Ferrite.getrefdim(RefShape::Type{<:AbstractRefShape}) + +Get the dimension of the reference shape +""" +getrefdim(::Type{<:AbstractRefShape}) # To get correct doc filtering +getrefdim(::Type{<:AbstractRefShape{rdim}}) where rdim = rdim + abstract type AbstractCell{refshape <: AbstractRefShape} end abstract type AbstractValues end abstract type AbstractCellValues <: AbstractValues end -abstract type AbstractFaceValues <: AbstractValues end +abstract type AbstractFacetValues <: AbstractValues end """ Abstract type which is used as identifier for faces, edges and verices @@ -72,7 +100,18 @@ struct VertexIndex <: BoundaryIndex idx::Tuple{Int,Int} # cell and side end +""" +A `FacetIndex` wraps an (Int, Int) and defines a local facet by pointing to a (cell, facet). +""" +struct FacetIndex <: BoundaryIndex + idx::Tuple{Int,Int} # cell and side +end + +const AbstractVecOrSet{T} = Union{AbstractSet{T}, AbstractVector{T}} +const IntegerCollection = AbstractVecOrSet{<:Integer} + include("utils.jl") +include("PoolAllocator.jl") # Matrix/Vector utilities include("arrayutils.jl") @@ -84,10 +123,11 @@ include("interpolations.jl") include("Quadrature/quadrature.jl") # FEValues +struct ValuesUpdateFlags{FunDiffOrder, GeoDiffOrder, DetJdV} end # Default constructor in common_values.jl include("FEValues/GeometryMapping.jl") include("FEValues/FunctionValues.jl") include("FEValues/CellValues.jl") -include("FEValues/FaceValues.jl") +include("FEValues/FacetValues.jl") include("FEValues/InterfaceValues.jl") include("FEValues/PointValues.jl") include("FEValues/common_values.jl") @@ -105,6 +145,7 @@ include("Dofs/DofHandler.jl") include("Dofs/ConstraintHandler.jl") include("Dofs/apply_analytical.jl") include("Dofs/sparsity_pattern.jl") +include("Dofs/block_sparsity_pattern.jl") include("Dofs/DofRenumbering.jl") include("iterators.jl") diff --git a/src/Grid/coloring.jl b/src/Grid/coloring.jl index db30efd5d0..85f9723fe2 100644 --- a/src/Grid/coloring.jl +++ b/src/Grid/coloring.jl @@ -68,7 +68,7 @@ end # See Appendix A in https://www.math.colostate.edu/%7Ebangerth/publications/2013-pattern.pdf function workstream_coloring(incidence_matrix, cellset) - + if length(cellset) == 0 return Vector{Int}[] elseif length(cellset) == 1 @@ -179,7 +179,7 @@ Two different algorithms are available, specified with the `alg` keyword argumen - `alg = ColoringAlgorithm.Greedy`: greedy algorithm that works well for structured quadrilateral grids such as e.g. quadrilateral grids from `generate_grid`. -The resulting colors can be visualized using [`vtk_cell_data_colors`](@ref). +The resulting colors can be visualized using [`Ferrite.write_cell_colors`](@ref). !!! note "Cell to color mapping" In a previous version of Ferrite this function returned a dictionary mapping @@ -205,20 +205,3 @@ function create_coloring(g::AbstractGrid, cellset=1:getncells(g); alg::ColoringA error("impossible") end end - -""" - vtk_cell_data_colors(vtkfile, cell_colors, name="coloring") - -Write cell colors (see [`create_coloring`](@ref)) to a VTK file for visualization. - -In case of coloring a subset, the cells which are not part of the subset are represented as color 0. -""" -function vtk_cell_data_colors(vtkfile, cell_colors::AbstractVector{<:AbstractVector{<:Integer}}, name="coloring") - color_vector = zeros(Int, vtkfile.Ncls) - for (i, cells_color) in enumerate(cell_colors) - for cell in cells_color - color_vector[cell] = i - end - end - vtk_cell_data(vtkfile, color_vector, name) -end diff --git a/src/Grid/grid.jl b/src/Grid/grid.jl index ecf92b780e..a1d2113fab 100644 --- a/src/Grid/grid.jl +++ b/src/Grid/grid.jl @@ -16,7 +16,7 @@ Node(x::NTuple{dim,T}) where {dim,T} = Node(Vec{dim,T}(x)) """ get_node_coordinate(::Node) - + Get the value of the node coordinate. """ get_node_coordinate(n::Node) = n.x @@ -43,27 +43,52 @@ get_coordinate_eltype(::Node{dim,T}) where {dim,T} = T # abstract type AbstractCell{refshape <: AbstractRefShape} end getrefshape(::AbstractCell{refshape}) where refshape = refshape +getrefshape(::Type{<:AbstractCell{refshape}}) where refshape = refshape nvertices(c::AbstractCell) = length(vertices(c)) nedges( c::AbstractCell) = length(edges(c)) nfaces( c::AbstractCell) = length(faces(c)) -nfaces( ::Type{T}) where {T <: AbstractRefShape} = length(reference_faces(T)) +nfacets( c::AbstractCell) = length(facets(c)) nnodes( c::AbstractCell) = length(get_node_ids(c)) +nvertices(::Type{T}) where {T <: AbstractRefShape} = length(reference_vertices(T)) +nedges( ::Type{T}) where {T <: AbstractRefShape} = length(reference_edges(T)) +nfaces( ::Type{T}) where {T <: AbstractRefShape} = length(reference_faces(T)) +nfacets( ::Type{T}) where {T <: AbstractRefShape} = length(reference_facets(T)) + + +""" + reference_vertices(::Type{<:AbstractRefShape}) + reference_vertices(::AbstractCell) + +Returns a tuple of integers containing the local node indices corresponding to +the vertices (i.e. corners or endpoints) of the cell. +""" +reference_vertices(::Union{Type{<:AbstractRefShape}, AbstractCell}) + """ Ferrite.vertices(::AbstractCell) Returns a tuple with the node indices (of the nodes in a grid) for each vertex in a given cell. -This function induces the [`VertexIndex`](@ref), where the second index +This function induces the [`VertexIndex`](@ref), where the second index corresponds to the local index into this tuple. """ vertices(::AbstractCell) +""" + reference_edges(::Type{<:AbstractRefShape}) + reference_edges(::AbstractCell) + +Returns a tuple of 2-tuples containing the ordered local node indices +(corresponding to the vertices) that define an edge. +""" +reference_edges(::Union{Type{<:AbstractRefShape}, AbstractCell}) + """ Ferrite.edges(::AbstractCell) Returns a tuple of 2-tuples containing the ordered node indices (of the nodes in a grid) corresponding to -the vertices that define an *oriented edge*. This function induces the +the vertices that define an *oriented edge*. This function induces the [`EdgeIndex`](@ref), where the second index corresponds to the local index into this tuple. Note that the vertices are sufficient to define an edge uniquely. @@ -71,23 +96,19 @@ Note that the vertices are sufficient to define an edge uniquely. edges(::AbstractCell) """ - reference_faces(::AbstractRefShape) - -Returns a tuple of n-tuples containing the ordered local node indices corresponding to -the vertices that define an *oriented face*. - -An *oriented face* is a face with the first node having the local index and the other -nodes spanning such that the normal to the face is pointing outwards. + reference_faces(::Type{<:AbstractRefShape}) + reference_faces(::AbstractCell) -Note that the vertices are sufficient to define a face uniquely. +Returns a tuple of n-tuples containing the ordered local node indices +(corresponding to the vertices) that define a face. """ -reference_faces(::AbstractRefShape) +reference_faces(::Union{Type{<:AbstractRefShape}, AbstractCell}) """ Ferrite.faces(::AbstractCell) Returns a tuple of n-tuples containing the ordered node indices (of the nodes in a grid) corresponding to -the vertices that define an *oriented face*. This function induces the +the vertices that define an *oriented face*. This function induces the [`FaceIndex`](@ref), where the second index corresponds to the local index into this tuple. An *oriented face* is a face with the first node having the local index and the other @@ -97,18 +118,49 @@ Note that the vertices are sufficient to define a face uniquely. """ faces(::AbstractCell) -function faces(c::AbstractCell{refshape}) where refshape - ns = get_node_ids(c) - rfs = reference_faces(refshape) - return ntuple(i -> getindex.(Ref(ns), rfs[i]), nfaces(refshape))::typeof(rfs) -end +""" + Ferrite.facets(::AbstractCell) + +Returns a tuple of n-tuples containing the ordered node indices (of the nodes in a grid) corresponding to +the vertices that define an oriented facet. This function induces the +[`FacetIndex`](@ref), where the second index corresponds to the local index into this tuple. + +See also [`vertices`](@ref), [`edges`](@ref), and [`faces`](@ref) +""" +facets(::AbstractCell) + +@inline facets(c::AbstractCell{<:AbstractRefShape{1}}) = map(i -> (i,), vertices(c)) # facet always tuple of tuple +@inline facets(c::AbstractCell{<:AbstractRefShape{2}}) = edges(c) +@inline facets(c::AbstractCell{<:AbstractRefShape{3}}) = faces(c) + +""" + Ferrite.reference_facets(::Type{<:AbstractRefShape}) + Ferrite.reference_facets(::AbstractCell) + +Returns a tuple of n-tuples containing the ordered local node indices +(corresponding to the vertices) that define a facet. + +See also [`reference_vertices`](@ref), [`reference_edges`](@ref), and [`reference_faces`](@ref). +""" +reference_facets(::Type{<:AbstractRefShape}) + +@inline reference_facets(refshape::Type{<:AbstractRefShape{1}}) = map(i -> (i,), reference_vertices(refshape)) +@inline reference_facets(refshape::Type{<:AbstractRefShape{2}}) = reference_edges(refshape) +@inline reference_facets(refshape::Type{<:AbstractRefShape{3}}) = reference_faces(refshape) + +@inline reference_faces(::AbstractCell{refshape}) where refshape = reference_faces(refshape) +@inline reference_edges(::AbstractCell{refshape}) where refshape = reference_edges(refshape) +@inline reference_vertices(::AbstractCell{refshape}) where refshape = reference_vertices(refshape) +@inline reference_facets(::AbstractCell{refshape}) where refshape = reference_facets(refshape) """ - Ferrite.default_interpolation(::AbstractCell)::Interpolation + geometric_interpolation(::AbstractCell)::ScalarInterpolation + geometric_interpolation(::Type{<:AbstractCell})::ScalarInterpolation -Returns the interpolation which defines the geometry of a given cell. +Each `AbstractCell` type has a unique geometric interpolation describing its geometry. +This function returns that interpolation, which is always a scalar interpolation. """ -default_interpolation(::AbstractCell) +geometric_interpolation(cell::AbstractCell) = geometric_interpolation(typeof(cell)) """ Ferrite.get_node_ids(c::AbstractCell) @@ -119,121 +171,79 @@ Default implementation: `c.nodes`. """ get_node_ids(c::AbstractCell) = c.nodes -# Default implementations of vertices/edges/faces that work as long as get_node_ids is -# correctly implemented for the cell. +# Default implementations of = vertices/edges/faces that work as long as get_node_ids +# and `reference_` are correctly implemented for the cell / reference shape. -# RefLine (refdim = 1): vertices for vertexdofs, faces for BC -function vertices(c::AbstractCell{RefLine}) +function vertices(c::AbstractCell{RefShape}) where RefShape ns = get_node_ids(c) - return (ns[1], ns[2]) # v1, v2 -end -function reference_faces(::Type{RefLine}) - return ((1,), (2,)) # f1, f2 + return map(i -> ns[i], reference_vertices(RefShape)) end -# RefTriangle (refdim = 2): vertices for vertexdofs, faces for facedofs (edgedofs) and BC -function vertices(c::AbstractCell{RefTriangle}) +function edges(c::AbstractCell{RefShape}) where RefShape ns = get_node_ids(c) - return (ns[1], ns[2], ns[3]) # v1, v2, v3 -end -function reference_faces(::Type{RefTriangle}) - return ( - (1, 2), (2, 3), (3, 1), # f1, f2, f3 - ) + return map(reference_edges(RefShape)) do re + map(i -> ns[i], re) + end end -# RefQuadrilateral (refdim = 2): vertices for vertexdofs, faces for facedofs (edgedofs) and BC -function vertices(c::AbstractCell{RefQuadrilateral}) +function faces(c::AbstractCell{RefShape}) where RefShape ns = get_node_ids(c) - return (ns[1], ns[2], ns[3], ns[4]) # v1, v2, v3, v4 -end -function reference_faces(::Type{RefQuadrilateral}) - return ( - (1, 2), (2, 3), (3, 4), (4, 1), # f1, f2, f3, f4 - ) + return map(reference_faces(RefShape)) do rf + map(i -> ns[i], rf) + end end -# RefTetrahedron (refdim = 3): vertices for vertexdofs, edges for edgedofs, faces for facedofs and BC -function vertices(c::AbstractCell{RefTetrahedron}) - ns = get_node_ids(c) - return (ns[1], ns[2], ns[3], ns[4]) # v1, v2, v3, v4 -end -function edges(c::AbstractCell{RefTetrahedron}) - ns = get_node_ids(c) - return ( - (ns[1], ns[2]), (ns[2], ns[3]), (ns[3], ns[1]), # e1, e2, e3 - (ns[1], ns[4]), (ns[2], ns[4]), (ns[3], ns[4]), # e4, e5, e6 - ) -end -function reference_faces(::Type{RefTetrahedron}) - return ( - (1, 3, 2), (1, 2, 4), # f1, f2 - (2, 3, 4), (1, 4, 3), # f3, f4 - ) -end +# RefLine (refdim = 1) +reference_vertices(::Type{RefLine}) = (1, 2) +reference_edges(::Type{RefLine}) = ((1, 2),) # e1 +reference_faces(::Type{RefLine}) = () # - -# RefHexahedron (refdim = 3): vertices for vertexdofs, edges for edgedofs, faces for facedofs and BC -function vertices(c::AbstractCell{RefHexahedron}) - ns = get_node_ids(c) - return ( - ns[1], ns[2], ns[3], ns[4], ns[5], ns[6], ns[7], ns[8], # v1, ..., v8 - ) -end -function edges(c::AbstractCell{RefHexahedron}) - ns = get_node_ids(c) - return ( - (ns[1], ns[2]), (ns[2], ns[3]), (ns[3], ns[4]), (ns[4], ns[1]), # e1, e2, e3, e4 - (ns[5], ns[6]), (ns[6], ns[7]), (ns[7], ns[8]), (ns[8], ns[5]), # e5, e6, e7, e8 - (ns[1], ns[5]), (ns[2], ns[6]), (ns[3], ns[7]), (ns[4], ns[8]), # e9, e10, e11, e12 - ) +# RefTriangle (refdim = 2) +reference_vertices(::Type{RefTriangle}) = (1, 2, 3) +reference_edges(::Type{RefTriangle}) = ((1, 2), (2, 3), (3, 1)) # e1 ... e3 +reference_faces(::Type{RefTriangle}) = ((1, 2, 3),) # f1 + +# RefQuadrilateral (refdim = 2) +reference_vertices(::Type{RefQuadrilateral}) = (1, 2, 3, 4) +reference_edges(::Type{RefQuadrilateral}) = ((1, 2), (2, 3), (3, 4), (4, 1)) # e1 ... e4 +reference_faces(::Type{RefQuadrilateral}) = ((1, 2, 3, 4),) # f1 + +# RefTetrahedron (refdim = 3) +reference_vertices(::Type{RefTetrahedron}) = (1, 2, 3, 4) +reference_edges(::Type{RefTetrahedron}) = ((1, 2), (2, 3), (3, 1), (1, 4), (2, 4), (3, 4)) # e1 ... e6 +reference_faces(::Type{RefTetrahedron}) = ((1, 3, 2), (1, 2, 4), (2, 3, 4), (1, 4, 3)) # f1 ... f4 + +# RefHexahedron (refdim = 3) +reference_vertices(::Type{RefHexahedron}) = (1, 2, 3, 4, 5, 6, 7, 8) +function reference_edges(::Type{RefHexahedron}) + return ((1, 2), (2, 3), (3, 4), (4, 1), (5, 6), (6, 7), # e1 ... e6 + (7, 8), (8, 5), (1, 5), (2, 6), (3, 7), (4, 8)) # e7 ... e12 end function reference_faces(::Type{RefHexahedron}) - return ( - (1, 4, 3, 2), (1, 2, 6, 5), # f1, f2 - (2, 3, 7, 6), (3, 4, 8, 7), # f3, f4 - (1, 5, 8, 4), (5, 6, 7, 8), # f5, f6 - ) + return ((1, 4, 3, 2), (1, 2, 6, 5), (2, 3, 7, 6), # f1, f2, f3 + (3, 4, 8, 7), (1, 5, 8, 4), (5, 6, 7, 8)) # f4, f5, f6 end -# RefPrism (refdim = 3): vertices for vertexdofs, edges for edgedofs, faces for facedofs and BC -function vertices(c::AbstractCell{RefPrism}) - ns = get_node_ids(c) - return (ns[1], ns[2], ns[3], ns[4], ns[5], ns[6]) # v1, ..., v6 -end -function edges(c::AbstractCell{RefPrism}) - ns = get_node_ids(c) - return ( - (ns[2], ns[1]), (ns[1], ns[3]), (ns[1], ns[4]), (ns[3], ns[2]), # e1, e2, e3, e4 - (ns[2], ns[5]), (ns[3], ns[6]), (ns[4], ns[5]), (ns[4], ns[6]), # e5, e6, e7, e8 - (ns[6], ns[5]), # e9 - ) +# RefPrism (refdim = 3) +reference_vertices(::Type{RefPrism}) = (1, 2, 3, 4, 5, 6) +function reference_edges(::Type{RefPrism}) + return ((2, 1), (1, 3), (1, 4), (3, 2), (2, 5), # e1, e2, e3, e4, e5 + (3, 6), (4, 5), (4, 6), (6, 5)) # e6, e7, e8, e9 end function reference_faces(::Type{RefPrism}) - return ( - (1, 3, 2), (1, 2, 5, 4), # f1, f2 - (3, 1, 4, 6), (2, 3, 6, 5), # f3, f4 - (4, 5, 6), # f5 - ) + return ((1, 3, 2), (1, 2, 5, 4), (3, 1, 4, 6), # f1, f2, f3 + (2, 3, 6, 5), (4, 5, 6)) # f4, f5 end -# RefPyramid (refdim = 3): vertices for vertexdofs, edges for edgedofs, faces for facedofs and BC -function vertices(c::AbstractCell{RefPyramid}) - ns = get_node_ids(c) - return (ns[1], ns[2], ns[3], ns[4], ns[5],) # v1, ..., v5 -end -function edges(c::AbstractCell{RefPyramid}) - ns = get_node_ids(c) - return ( - (ns[1], ns[2]), (ns[1], ns[3]), (ns[1], ns[5]), (ns[2], ns[4]), - (ns[2], ns[5]), (ns[4], ns[3]), (ns[3], ns[5]), (ns[4], ns[5]), - ) +# RefPyramid (refdim = 3) +reference_vertices(::Type{RefPyramid}) = (1, 2, 3, 4, 5) +function reference_edges(::Type{RefPyramid}) + return ((1, 2), (1, 3), (1, 5), (2, 4), # e1 ... e4 + (2, 5), (4, 3), (3, 5), (4, 5)) # e5 ... e8 end function reference_faces(::Type{RefPyramid}) - return ( - (1, 3, 4, 2), (1, 2, 5), # f1, f2 - (1, 5, 3), (2, 4, 5), # f3, f4 - (3, 5, 4), # f5 - ) + return ((1, 3, 4, 2), (1, 2, 5), (1, 5, 3), # f1, f2, f3 + (2, 4, 5), (3, 5, 4)) # f4, f5 end ###################################################### @@ -254,28 +264,25 @@ struct QuadraticHexahedron <: AbstractCell{RefHexahedron} nodes::NTuple{27 struct Wedge <: AbstractCell{RefPrism} nodes::NTuple{ 6, Int} end struct Pyramid <: AbstractCell{RefPyramid} nodes::NTuple{ 5, Int} end -default_interpolation(::Type{Line}) = Lagrange{RefLine, 1}() -default_interpolation(::Type{QuadraticLine}) = Lagrange{RefLine, 2}() -default_interpolation(::Type{Triangle}) = Lagrange{RefTriangle, 1}() -default_interpolation(::Type{QuadraticTriangle}) = Lagrange{RefTriangle, 2}() -default_interpolation(::Type{Quadrilateral}) = Lagrange{RefQuadrilateral, 1}() -default_interpolation(::Type{QuadraticQuadrilateral}) = Lagrange{RefQuadrilateral, 2}() -default_interpolation(::Type{Tetrahedron}) = Lagrange{RefTetrahedron, 1}() -default_interpolation(::Type{QuadraticTetrahedron}) = Lagrange{RefTetrahedron, 2}() -default_interpolation(::Type{Hexahedron}) = Lagrange{RefHexahedron, 1}() -default_interpolation(::Type{QuadraticHexahedron}) = Lagrange{RefHexahedron, 2}() -default_interpolation(::Type{Wedge}) = Lagrange{RefPrism, 1}() -default_interpolation(::Type{Pyramid}) = Lagrange{RefPyramid, 1}() - -# TODO: Remove this, used for Quadrilateral3D -edges(c::Quadrilateral#=3D=#) = faces(c) +geometric_interpolation(::Type{Line}) = Lagrange{RefLine, 1}() +geometric_interpolation(::Type{QuadraticLine}) = Lagrange{RefLine, 2}() +geometric_interpolation(::Type{Triangle}) = Lagrange{RefTriangle, 1}() +geometric_interpolation(::Type{QuadraticTriangle}) = Lagrange{RefTriangle, 2}() +geometric_interpolation(::Type{Quadrilateral}) = Lagrange{RefQuadrilateral, 1}() +geometric_interpolation(::Type{QuadraticQuadrilateral}) = Lagrange{RefQuadrilateral, 2}() +geometric_interpolation(::Type{Tetrahedron}) = Lagrange{RefTetrahedron, 1}() +geometric_interpolation(::Type{QuadraticTetrahedron}) = Lagrange{RefTetrahedron, 2}() +geometric_interpolation(::Type{Hexahedron}) = Lagrange{RefHexahedron, 1}() +geometric_interpolation(::Type{QuadraticHexahedron}) = Lagrange{RefHexahedron, 2}() +geometric_interpolation(::Type{Wedge}) = Lagrange{RefPrism, 1}() +geometric_interpolation(::Type{Pyramid}) = Lagrange{RefPyramid, 1}() # Serendipity interpolation based cells struct SerendipityQuadraticQuadrilateral <: AbstractCell{RefQuadrilateral} nodes::NTuple{ 8, Int} end struct SerendipityQuadraticHexahedron <: AbstractCell{RefHexahedron} nodes::NTuple{20, Int} end -default_interpolation(::Type{SerendipityQuadraticQuadrilateral}) = Serendipity{RefQuadrilateral, 2}() -default_interpolation(::Type{SerendipityQuadraticHexahedron}) = Serendipity{RefHexahedron, 2}() +geometric_interpolation(::Type{SerendipityQuadraticQuadrilateral}) = Serendipity{RefQuadrilateral, 2}() +geometric_interpolation(::Type{SerendipityQuadraticHexahedron}) = Serendipity{RefHexahedron, 2}() """ nvertices_on_face(cell::AbstractCell, local_face_index::Int) @@ -288,7 +295,15 @@ Specifies for each subtype of AbstractCell how many nodes form an edge. """ nvertices_on_edge(cell::AbstractCell, local_edge_index::Int) = length(edges(cell)[local_edge_index]) -getdim(::Union{AbstractCell{refshape},Type{<:AbstractCell{refshape}}}) where {refdim, refshape <: AbstractRefShape{refdim}} = refdim +""" + Ferrite.getrefdim(cell::AbstractCell) + Ferrite.getrefdim(::Type{<:AbstractCell}) + +Get the reference dimension of the cell, i.e. the dimension of the cell's +reference shape. +""" +getrefdim(c::AbstractCell) = getrefdim(typeof(c)) +getrefdim(::Type{<:AbstractCell{RefShape}}) where RefShape = getrefdim(RefShape) ###################### @@ -299,42 +314,57 @@ abstract type AbstractGrid{dim} end """ Grid{dim, C<:AbstractCell, T<:Real} <: AbstractGrid} -A `Grid` is a collection of `Cells` and `Node`s which covers the computational domain, together with Sets of cells, nodes and faces. -There are multiple helper structures to apply boundary conditions or define subdomains. They are gathered in the `cellsets`, `nodesets`, -`facesets`, `edgesets` and `vertexsets`. +A `Grid` is a collection of `Ferrite.AbstractCell`s and `Ferrite.Node`s which covers the computational domain. +Helper structures for applying boundary conditions or define subdomains are gathered in `cellsets`, `nodesets`, +`facetsets`, and `vertexsets`. # Fields - `cells::Vector{C}`: stores all cells of the grid - `nodes::Vector{Node{dim,T}}`: stores the `dim` dimensional nodes of the grid -- `cellsets::Dict{String,Set{Int}}`: maps a `String` key to a `Set` of cell ids -- `nodesets::Dict{String,Set{Int}}`: maps a `String` key to a `Set` of global node ids -- `facesets::Dict{String,Set{FaceIndex}}`: maps a `String` to a `Set` of `Set{FaceIndex} (global_cell_id, local_face_id)` -- `edgesets::Dict{String,Set{EdgeIndex}}`: maps a `String` to a `Set` of `Set{EdgeIndex} (global_cell_id, local_edge_id` -- `vertexsets::Dict{String,Set{VertexIndex}}`: maps a `String` key to a `Set` of local vertex ids -- `boundary_matrix::SparseMatrixCSC{Bool,Int}`: optional, only needed by `onboundary` to check if a cell is on the boundary, see, e.g. Helmholtz example +- `cellsets::Dict{String, OrderedSet{Int}}`: maps a `String` key to an `OrderedSet` of cell ids +- `nodesets::Dict{String, OrderedSet{Int}}`: maps a `String` key to an `OrderedSet` of global node ids +- `facetsets::Dict{String, OrderedSet{FacetIndex}}`: maps a `String` to an `OrderedSet` of `FacetIndex` +- `vertexsets::Dict{String, OrderedSet{VertexIndex}}`: maps a `String` key to an `OrderedSet` of `VertexIndex` """ mutable struct Grid{dim,C<:AbstractCell,T<:Real} <: AbstractGrid{dim} cells::Vector{C} nodes::Vector{Node{dim,T}} # Sets - cellsets::Dict{String,Set{Int}} - nodesets::Dict{String,Set{Int}} - facesets::Dict{String,Set{FaceIndex}} - edgesets::Dict{String,Set{EdgeIndex}} - vertexsets::Dict{String,Set{VertexIndex}} - # Boundary matrix (faces per cell × cell) - boundary_matrix::SparseMatrixCSC{Bool,Int} + cellsets::Dict{String,OrderedSet{Int}} + nodesets::Dict{String,OrderedSet{Int}} + facetsets::Dict{String,OrderedSet{FacetIndex}} + vertexsets::Dict{String,OrderedSet{VertexIndex}} end function Grid(cells::Vector{C}, nodes::Vector{Node{dim,T}}; - cellsets::Dict{String,Set{Int}}=Dict{String,Set{Int}}(), - nodesets::Dict{String,Set{Int}}=Dict{String,Set{Int}}(), - facesets::Dict{String,Set{FaceIndex}}=Dict{String,Set{FaceIndex}}(), - edgesets::Dict{String,Set{EdgeIndex}}=Dict{String,Set{EdgeIndex}}(), - vertexsets::Dict{String,Set{VertexIndex}}=Dict{String,Set{VertexIndex}}(), - boundary_matrix::SparseMatrixCSC{Bool,Int}=spzeros(Bool, 0, 0)) where {dim,C,T} - return Grid(cells, nodes, cellsets, nodesets, facesets, edgesets, vertexsets, boundary_matrix) + cellsets::Dict{String, <:AbstractVecOrSet{Int}}=Dict{String,OrderedSet{Int}}(), + nodesets::Dict{String, <:AbstractVecOrSet{Int}}=Dict{String,OrderedSet{Int}}(), + facetsets::Dict{String, <:AbstractVecOrSet{FacetIndex}}=Dict{String,OrderedSet{FacetIndex}}(), + facesets=nothing, # deprecated + vertexsets::Dict{String, <:AbstractVecOrSet{VertexIndex}}=Dict{String,OrderedSet{VertexIndex}}(), + boundary_matrix = nothing) where {dim,C,T} + if facesets !== nothing + if isempty(facetsets) + @warn "facesets in Grid is deprecated, use facetsets instead" maxlog=1 + for (key, set) in facesets + facetsets[key] = OrderedSet(FacetIndex(cellnr, facenr) for (cellnr, facenr) in set) + end + else + error("facesets are deprecated, use only facetsets") + end + end + if boundary_matrix !== nothing + throw(DeprecationError("`boundary_matrix` is not part of the Grid anymore and thus not a supported keyword argument.")) + end + return Grid( + cells, + nodes, + convert_to_orderedsets(cellsets), + convert_to_orderedsets(nodesets), + convert_to_orderedsets(facetsets), + convert_to_orderedsets(vertexsets), + ) end ########################## @@ -355,7 +385,35 @@ This function takes the local vertex representation (a `VertexIndex`) and looks toglobal(grid::AbstractGrid,vertexidx::VertexIndex) = vertices(getcells(grid,vertexidx[1]))[vertexidx[2]] toglobal(grid::AbstractGrid,vertexidx::Vector{VertexIndex}) = unique(toglobal.((grid,),vertexidx)) -@inline getdim(::AbstractGrid{dim}) where {dim} = dim +""" + Ferrite.getspatialdim(grid::AbstractGrid) + +Get the spatial dimension of the grid, corresponding to the vector dimension of the grid's coordinates. +""" +getspatialdim(::AbstractGrid{sdim}) where sdim = sdim + +""" + get_reference_dimension(grid::AbstractGrid) -> Union{Int, Symbol} + +Get information about the reference dimensions of the cells in the grid. +If all cells have the same reference dimension, `rdim::Int` is returned. +For grids with mixed reference dimensions, `:mixed` is returned. +Used internally to dispatch facet-calls to the correct entity when `rdim isa Int`. +""" +get_reference_dimension(g::AbstractGrid) = _get_reference_dimension(getcells(g)) +_get_reference_dimension(::AbstractVector{C}) where C <: AbstractCell{<:AbstractRefShape{rdim}} where rdim = rdim # Fast path for single rdim inferable from eltype +function _get_reference_dimension(cells::AbstractVector{<:AbstractCell}) + # Could make fast-path for eltype being union of cells with different rdims, but @KristofferC recommends against that, + # https://discourse.julialang.org/t/iterating-through-types-of-a-union-in-a-type-stable-manner/58285/3 + # Note, this function is inherently type-instable. + rdims = Set{Int}() + for cell in cells + push!(rdims, getrefdim(cell)) + end + length(rdims) == 1 && return first(rdims) + return :mixed +end + """ getcells(grid::AbstractGrid) getcells(grid::AbstractGrid, v::Union{Int,Vector{Int}} @@ -388,7 +446,13 @@ to a Node. "Returns the number of nodes in the grid." @inline getnnodes(grid::AbstractGrid) = length(grid.nodes) "Returns the number of nodes of the `i`-th cell." -@inline nnodes_per_cell(grid::AbstractGrid, i::Int=1) = nnodes(grid.cells[i]) +function nnodes_per_cell(grid::AbstractGrid) + if !isconcretetype(getcelltype(grid)) + error("There are different celltypes in the `grid`. Use `nnodes_per_cell(grid, cellid::Int)` instead") + end + return nnodes(first(grid.cells)) +end +@inline nnodes_per_cell(grid::AbstractGrid, i::Int) = nnodes(grid.cells[i]) "Return the number type of the nodal coordinates." @inline get_coordinate_eltype(grid::AbstractGrid) = get_coordinate_eltype(first(getnodes(grid))) @@ -396,7 +460,7 @@ to a Node. """ getcellset(grid::AbstractGrid, setname::String) -Returns all cells as cellid in a `Set` of a given `setname`. +Returns all cells as cellid in the set with name `setname`. """ @inline getcellset(grid::AbstractGrid, setname::String) = grid.cellsets[setname] """ @@ -409,7 +473,7 @@ Returns all cellsets of the `grid`. """ getnodeset(grid::AbstractGrid, setname::String) -Returns all nodes as nodeid in a `Set` of a given `setname`. +Returns all nodes as nodeid in the set with name `setname`. """ @inline getnodeset(grid::AbstractGrid, setname::String) = grid.nodesets[setname] """ @@ -420,35 +484,23 @@ Returns all nodesets of the `grid`. @inline getnodesets(grid::AbstractGrid) = grid.nodesets """ - getfaceset(grid::AbstractGrid, setname::String) + getfacetset(grid::AbstractGrid, setname::String) -Returns all faces as `FaceIndex` in a `Set` of a given `setname`. +Returns all faces as `FacetIndex` in the set with name `setname`. """ -@inline getfaceset(grid::AbstractGrid, setname::String) = grid.facesets[setname] +@inline getfacetset(grid::AbstractGrid, setname::String) = grid.facetsets[setname] """ - getfacesets(grid::AbstractGrid) + getfacetsets(grid::AbstractGrid) -Returns all facesets of the `grid`. +Returns all facet sets of the `grid`. """ -@inline getfacesets(grid::AbstractGrid) = grid.facesets +@inline getfacetsets(grid::AbstractGrid) = grid.facetsets -""" - getedgeset(grid::AbstractGrid, setname::String) -Returns all edges as `EdgeIndex` in a `Set` of a given `setname`. -""" -@inline getedgeset(grid::AbstractGrid, setname::String) = grid.edgesets[setname] """ - getedgesets(grid::AbstractGrid) + getvertexset(grid::AbstractGrid, setname::String) -Returns all edge sets of the grid. -""" -@inline getedgesets(grid::AbstractGrid) = grid.edgesets - -""" - getedgeset(grid::AbstractGrid, setname::String) - -Returns all vertices as `VertexIndex` in a `Set` of a given `setname`. +Returns all vertices as `VertexIndex` in the set with name `setname`. """ @inline getvertexset(grid::AbstractGrid, setname::String) = grid.vertexsets[setname] """ @@ -458,233 +510,21 @@ Returns all vertex sets of the grid. """ @inline getvertexsets(grid::AbstractGrid) = grid.vertexsets -n_faces_per_cell(grid::Grid) = nfaces(getcelltype(grid)) - # Transformations """ transform_coordinates!(grid::Abstractgrid, f::Function) -Transform all nodes of the `grid` based on some transformation function `f`. +Transform the coordinates of all nodes of the `grid` based on some transformation function `f(x)`. """ -function transform_coordinates!(g::Grid, f::Function) - replace!(n -> Node(f(get_node_coordinate(n))), g.nodes) +function transform_coordinates!(g::AbstractGrid, f::Function) + replace!(n -> Node(f(get_node_coordinate(n))), getnodes(g)) return g end -# Sets - -_check_setname(dict, name) = haskey(dict, name) && throw(ArgumentError("there already exists a set with the name: $name")) -_warn_emptyset(set, name) = length(set) == 0 && @warn("no entities added to the set with name: $name") - -""" - addcellset!(grid::AbstractGrid, name::String, cellid::Union{Set{Int}, Vector{Int}}) - addcellset!(grid::AbstractGrid, name::String, f::function; all::Bool=true) - -Adds a cellset to the grid with key `name`. -Cellsets are typically used to define subdomains of the problem, e.g. two materials in the computational domain. -The `DofHandler` can construct different fields which live not on the whole domain, but rather on a cellset. -`all=true` implies that `f(x)` must return `true` for all nodal coordinates `x` in the cell if the cell -should be added to the set, otherwise it suffices that `f(x)` returns `true` for one node. - -```julia -addcellset!(grid, "left", Set((1,3))) #add cells with id 1 and 3 to cellset left -addcellset!(grid, "right", x -> norm(x[1]) < 2.0 ) #add cell to cellset right, if x[1] of each cell's node is smaller than 2.0 -``` -""" -function addcellset!(grid::AbstractGrid, name::String, cellid::Union{Set{Int},Vector{Int}}) - _check_setname(grid.cellsets, name) - cells = Set(cellid) - _warn_emptyset(cells, name) - grid.cellsets[name] = cells - grid -end - -function addcellset!(grid::AbstractGrid, name::String, f::Function; all::Bool=true) - _check_setname(grid.cellsets, name) - cells = Set{Int}() - for (i, cell) in enumerate(getcells(grid)) - pass = all - for node_idx in cell.nodes - node = grid.nodes[node_idx] - v = f(node.x) - all ? (!v && (pass = false; break)) : (v && (pass = true; break)) - end - pass && push!(cells, i) - end - _warn_emptyset(cells, name) - grid.cellsets[name] = cells - grid -end - -""" - addfaceset!(grid::AbstractGrid, name::String, faceid::Union{Set{FaceIndex},Vector{FaceIndex}}) - addfaceset!(grid::AbstractGrid, name::String, f::Function; all::Bool=true) - -Adds a faceset to the grid with key `name`. -A faceset maps a `String` key to a `Set` of tuples corresponding to `(global_cell_id, local_face_id)`. -Facesets are used to initialize `Dirichlet` structs, that are needed to specify the boundary for the `ConstraintHandler`. -`all=true` implies that `f(x)` must return `true` for all nodal coordinates `x` on the face if the face -should be added to the set, otherwise it suffices that `f(x)` returns `true` for one node. - -```julia -addfaceset!(grid, "right", Set(((2,2),(4,2))) #see grid manual example for reference -addfaceset!(grid, "clamped", x -> norm(x[1]) ≈ 0.0) #see incompressible elasticity example for reference -``` -""" -addfaceset!(grid::AbstractGrid, name::String, set::Union{Set{FaceIndex},Vector{FaceIndex}}) = - _addset!(grid, name, set, grid.facesets) -addedgeset!(grid::AbstractGrid, name::String, set::Union{Set{EdgeIndex},Vector{EdgeIndex}}) = - _addset!(grid, name, set, grid.edgesets) -addvertexset!(grid::AbstractGrid, name::String, set::Union{Set{VertexIndex},Vector{VertexIndex}}) = - _addset!(grid, name, set, grid.vertexsets) -function _addset!(grid::AbstractGrid, name::String, _set, dict::Dict) - _check_setname(dict, name) - set = Set(_set) - _warn_emptyset(set, name) - dict[name] = set - grid -end - -addfaceset!(grid::AbstractGrid, name::String, f::Function; all::Bool=true) = - _addset!(grid, name, f, Ferrite.faces, grid.facesets, FaceIndex; all=all) -addedgeset!(grid::AbstractGrid, name::String, f::Function; all::Bool=true) = - _addset!(grid, name, f, Ferrite.edges, grid.edgesets, EdgeIndex; all=all) -addvertexset!(grid::AbstractGrid, name::String, f::Function; all::Bool=true) = - _addset!(grid, name, f, Ferrite.vertices, grid.vertexsets, VertexIndex; all=all) -function _addset!(grid::AbstractGrid, name::String, f::Function, _ftype::Function, dict::Dict, _indextype::Type; all::Bool=true) - _check_setname(dict, name) - _set = Set{_indextype}() - for (cell_idx, cell) in enumerate(getcells(grid)) - for (face_idx, face) in enumerate(_ftype(cell)) - pass = all - for node_idx in face - v = f(grid.nodes[node_idx].x) - all ? (!v && (pass = false; break)) : (v && (pass = true; break)) - end - pass && push!(_set, _indextype(cell_idx, face_idx)) - end - end - _warn_emptyset(_set, name) - dict[name] = _set - grid -end - - -""" - getfaceedges(grid::AbstractGrid, face::FaceIndex) - getfaceedges(cell::AbstractCell, face::FaceIndex) - -Returns the edges represented as `Set{EdgeIndex}` in a given face represented as -`FaceIndex`. - -```julia-repl -julia> using Ferrite; using Ferrite: getfaceedges - -julia> grid = generate_grid(Tetrahedron, (2,1,1)); - -julia> getfaceedges(grid, FaceIndex(4,2)) -Set{EdgeIndex} with 3 elements: - EdgeIndex((4, 4)) - EdgeIndex((4, 5)) - EdgeIndex((4, 1)) -``` -""" -function getfaceedges end - -""" - getfacevertices(grid::AbstractGrid, face::FaceIndex) - getfacevertices(cell::AbstractCell, face::FaceIndex) - -Returns the vertices represented as `Set{VertexIndex}` in a given face represented as -`FaceIndex`. - -```julia-repl -julia> using Ferrite; using Ferrite: getfacevertices - -julia> grid = generate_grid(Tetrahedron, (2,1,1)); - -julia> getfacevertices(grid, FaceIndex(4,2)) -Set{VertexIndex} with 3 elements: - VertexIndex((4, 2)) - VertexIndex((4, 4)) - VertexIndex((4, 1)) -``` -""" -function getfacevertices end - -""" - getedgevertices(grid::AbstractGrid, edge::EdgeIndex) - getedgevertices(cell::AbstractCell, edge::EdgeIndex) - -Returns the vertices represented as `Set{VertexIndex}` in a given edge represented as -`EdgeIndex`. - -```julia-repl -julia> using Ferrite; using Ferrite: getedgevertices - -julia> grid = generate_grid(Tetrahedron, (2,1,1)); - -julia> getedgevertices(grid, EdgeIndex(4,2)) -Set{EdgeIndex} with 2 elements: - VertexIndex((4, 2)) - VertexIndex((4, 3)) -``` -""" -function getedgevertices end - -for (func, entity_f, subentity_f, entity_t, subentity_t) in ( - (:getfaceedges, :faces, :edges, :FaceIndex, :EdgeIndex), - (:getfacevertices, :faces, :vertices, :FaceIndex, :VertexIndex), - (:getedgevertices, :edges, :vertices, :EdgeIndex, :VertexIndex), -) - @eval begin - function $(func)(grid::AbstractGrid, entity_idx::$(entity_t)) - cell = getcells(grid)[entity_idx[1]] - return $(func)(cell, entity_idx) - end - function $(func)(cell::AbstractCell, entity_idx::$(entity_t)) - _set = Set{$(subentity_t)}() - subentities = $(subentity_f)(cell) - entity = $(entity_f)(cell)[entity_idx[2]] - for (subentity_idx, subentity) in pairs(subentities) - if all(x -> x in entity, subentity) - push!(_set, $(subentity_t)((entity_idx[1], subentity_idx))) - end - end - return _set - end - end -end - -""" - addnodeset!(grid::AbstractGrid, name::String, nodeid::Union{Vector{Int},Set{Int}}) - addnodeset!(grid::AbstractGrid, name::String, f::Function) - -Adds a `nodeset::Dict{String, Set{Int}}` to the `grid` with key `name`. Has the same interface as `addcellset`. -However, instead of mapping a cell id to the `String` key, a set of node ids is returned. -""" -function addnodeset!(grid::AbstractGrid, name::String, nodeid::Union{Vector{Int},Set{Int}}) - _check_setname(grid.nodesets, name) - grid.nodesets[name] = Set(nodeid) - _warn_emptyset(grid.nodesets[name], name) - grid -end - -function addnodeset!(grid::AbstractGrid, name::String, f::Function) - _check_setname(grid.nodesets, name) - nodes = Set{Int}() - for (i, n) in enumerate(getnodes(grid)) - f(n.x) && push!(nodes, i) - end - grid.nodesets[name] = nodes - _warn_emptyset(grid.nodesets[name], name) - grid -end - """ getcoordinates(grid::AbstractGrid, idx::Union{Int,CellIndex}) getcoordinates(cache::CellCache) - + Get a vector with the coordinates of the cell corresponding to `idx` or `cache` """ @inline function getcoordinates(grid::AbstractGrid, idx::Int) @@ -699,17 +539,17 @@ end """ getcoordinates!(x::Vector{<:Vec}, grid::AbstractGrid, idx::Union{Int,CellIndex}) getcoordinates!(x::Vector{<:Vec}, grid::AbstractGrid, cell::AbstractCell) - + Mutate `x` to the coordinates of the cell corresponding to `idx` or `cell`. """ -@inline function getcoordinates!(x::Vector{Vec{dim,T}}, grid::Ferrite.AbstractGrid, cell::Ferrite.AbstractCell) where {dim,T} +@inline function getcoordinates!(x::Vector{Vec{dim,T}}, grid::AbstractGrid, cell::AbstractCell) where {dim,T} node_ids = get_node_ids(cell) @inbounds for i in 1:length(x) x[i] = get_node_coordinate(grid, node_ids[i]) end return x end -@inline function getcoordinates!(x::Vector{Vec{dim,T}}, grid::Ferrite.AbstractGrid, cellid::Int) where {dim,T} +@inline function getcoordinates!(x::Vector{Vec{dim,T}}, grid::AbstractGrid, cellid::Int) where {dim,T} cell = getcells(grid, cellid) getcoordinates!(x, grid, cell) end @@ -717,7 +557,7 @@ end """ get_node_coordinate(grid::AbstractGrid, n::Int) - + Return the coordinate of the `n`th node in `grid` """ get_node_coordinate(grid, n) = get_node_coordinate(getnodes(grid, n)) @@ -739,7 +579,7 @@ function Base.show(io::IO, ::MIME"text/plain", grid::Grid) if isconcretetype(eltype(grid.cells)) typestrs = [repr(eltype(grid.cells))] else - typestrs = sort!(repr.(Set(typeof(x) for x in grid.cells))) + typestrs = sort!(repr.(OrderedSet(typeof(x) for x in grid.cells))) end join(io, typestrs, '/') print(io, " cells and $(getnnodes(grid)) nodes") @@ -752,17 +592,18 @@ Helper function to dispatch on the correct entity from a given boundary index. """ boundaryfunction(::Type{<:BoundaryIndex}) -boundaryfunction(::Type{FaceIndex}) = Ferrite.faces -boundaryfunction(::Type{EdgeIndex}) = Ferrite.edges -boundaryfunction(::Type{VertexIndex}) = Ferrite.vertices +boundaryfunction(::Type{FaceIndex}) = faces +boundaryfunction(::Type{EdgeIndex}) = edges +boundaryfunction(::Type{VertexIndex}) = vertices +boundaryfunction(::Type{FacetIndex}) = facets -for INDEX in (:VertexIndex, :EdgeIndex, :FaceIndex) - @eval begin +for INDEX in (:VertexIndex, :EdgeIndex, :FaceIndex, :FacetIndex) + @eval begin #Constructor ($INDEX)(a::Int, b::Int) = ($INDEX)((a,b)) Base.getindex(I::($INDEX), i::Int) = I.idx[i] - + #To be able to do a,b = faceidx Base.iterate(I::($INDEX), state::Int=1) = (state==3) ? nothing : (I[state], state+1) @@ -802,8 +643,8 @@ end """ SurfaceOrientationInfo -Orientation information for 2D entities. Such an entity can be -possibly flipped (i.e. the defining vertex order is reverse to the +Orientation information for 2D entities. Such an entity can be +possibly flipped (i.e. the defining vertex order is reverse to the spanning vertex order) and the vertices can be rotated against each other. Take for example the faces ``` @@ -817,7 +658,7 @@ which are rotated against each other by 90° (shift index is 1) or the faces | A | | B | 4---3 3---4 ``` -which are flipped against each other. Any combination of these can happen. +which are flipped against each other. Any combination of these can happen. The combination to map this local face to the defining face is encoded with this data structure via ``rotate \\circ flip`` where the rotation is indiced by the shift index. @@ -844,7 +685,7 @@ is called *regular*, indicated by `flipped=false`, while the oriented path ``` is called *inverted*, indicated by `flipped=true`. -2D entities can be flipped (i.e. the defining vertex order is reverse to the +2D entities can be flipped (i.e. the defining vertex order is reverse to the spanning vertex order) and the vertices can be rotated against each other. The reference entity is a one with it's first node is the lowest index vertex diff --git a/src/Grid/grid_generators.jl b/src/Grid/grid_generators.jl index 38bebc88f0..3bb67f87b3 100644 --- a/src/Grid/grid_generators.jl +++ b/src/Grid/grid_generators.jl @@ -1,17 +1,3 @@ -function boundaries_to_sparse(boundary) - n = length(boundary) - I = Vector{Int}(undef, n) - J = Vector{Int}(undef, n) - V = Vector{Bool}(undef, n) - for (idx, el) in enumerate(boundary) - cell, face = el.idx - I[idx] = face - J[idx] = cell - V[idx] = true - end - return sparse(I, J, V) -end - """ generate_grid(celltype::Cell, nel::NTuple, [left::Vec, right::Vec) @@ -42,15 +28,14 @@ function generate_grid(::Type{Line}, nel::NTuple{1,Int}, left::Vec{1,T}=Vec{1}(( # Cell faces - boundary = Vector([FaceIndex(1, 1), - FaceIndex(nel_x, 2)]) - - boundary_matrix = boundaries_to_sparse(boundary) + boundary = Vector([FacetIndex(1, 1), + FacetIndex(nel_x, 2)]) # Cell face sets - facesets = Dict("left" => Set{FaceIndex}([boundary[1]]), - "right" => Set{FaceIndex}([boundary[2]])) - return Grid(cells, nodes, facesets=facesets, boundary_matrix=boundary_matrix) + facetsets = Dict("left" => OrderedSet{FacetIndex}([boundary[1]]), + "right" => OrderedSet{FacetIndex}([boundary[2]])) + foreach(s -> sort!(s, by = x -> x.idx), values(facetsets)) + return Grid(cells, nodes, facetsets=facetsets) end # QuadraticLine @@ -72,20 +57,19 @@ function generate_grid(::Type{QuadraticLine}, nel::NTuple{1,Int}, left::Vec{1,T} end # Cell faces - boundary = FaceIndex[FaceIndex(1, 1), - FaceIndex(nel_x, 2)] - - boundary_matrix = boundaries_to_sparse(boundary) + boundary = FacetIndex[FacetIndex(1, 1), + FacetIndex(nel_x, 2)] # Cell face sets - facesets = Dict("left" => Set{FaceIndex}([boundary[1]]), - "right" => Set{FaceIndex}([boundary[2]])) - return Grid(cells, nodes, facesets=facesets, boundary_matrix=boundary_matrix) + facetsets = Dict("left" => OrderedSet{FacetIndex}([boundary[1]]), + "right" => OrderedSet{FacetIndex}([boundary[2]])) + foreach(s -> sort!(s, by = x -> x.idx), values(facetsets)) + return Grid(cells, nodes, facetsets=facetsets) end -function _generate_2d_nodes!(nodes, nx, ny, LL, LR, UR, UL) +function _generate_2d_nodes!(nodes::Vector{Node{2, T}}, nx, ny, LL, LR, UR, UL) where T for i in 0:ny-1 - ratio_bounds = i / (ny-1) + ratio_bounds = convert(T, i) / (ny-1) x0 = LL[1] * (1 - ratio_bounds) + ratio_bounds * UL[1] x1 = LR[1] * (1 - ratio_bounds) + ratio_bounds * UR[1] @@ -94,7 +78,7 @@ function _generate_2d_nodes!(nodes, nx, ny, LL, LR, UR, UL) y1 = LR[2] * (1 - ratio_bounds) + ratio_bounds * UR[2] for j in 0:nx-1 - ratio = j / (nx-1) + ratio = convert(T, j) / (nx-1) x = x0 * (1 - ratio) + ratio * x1 y = y0 * (1 - ratio) + ratio * y1 push!(nodes, Node((x, y))) @@ -134,22 +118,21 @@ function generate_grid(C::Type{Quadrilateral}, nel::NTuple{2,Int}, LL::Vec{2,T}, # Cell faces cell_array = reshape(collect(1:nel_tot),(nel_x, nel_y)) - boundary = FaceIndex[[FaceIndex(cl, 1) for cl in cell_array[:,1]]; - [FaceIndex(cl, 2) for cl in cell_array[end,:]]; - [FaceIndex(cl, 3) for cl in cell_array[:,end]]; - [FaceIndex(cl, 4) for cl in cell_array[1,:]]] - - boundary_matrix = boundaries_to_sparse(boundary) + boundary = FacetIndex[[FacetIndex(cl, 1) for cl in cell_array[:,1]]; + [FacetIndex(cl, 2) for cl in cell_array[end,:]]; + [FacetIndex(cl, 3) for cl in cell_array[:,end]]; + [FacetIndex(cl, 4) for cl in cell_array[1,:]]] # Cell face sets offset = 0 - facesets = Dict{String, Set{FaceIndex}}() - facesets["bottom"] = Set{FaceIndex}(boundary[(1:length(cell_array[:,1])) .+ offset]); offset += length(cell_array[:,1]) - facesets["right"] = Set{FaceIndex}(boundary[(1:length(cell_array[end,:])) .+ offset]); offset += length(cell_array[end,:]) - facesets["top"] = Set{FaceIndex}(boundary[(1:length(cell_array[:,end])) .+ offset]); offset += length(cell_array[:,end]) - facesets["left"] = Set{FaceIndex}(boundary[(1:length(cell_array[1,:])) .+ offset]); offset += length(cell_array[1,:]) - - return Grid(cells, nodes, facesets=facesets, boundary_matrix=boundary_matrix) + facetsets = Dict{String, OrderedSet{FacetIndex}}() + facetsets["bottom"] = OrderedSet{FacetIndex}(boundary[(1:length(cell_array[:,1])) .+ offset]); offset += length(cell_array[:,1]) + facetsets["right"] = OrderedSet{FacetIndex}(boundary[(1:length(cell_array[end,:])) .+ offset]); offset += length(cell_array[end,:]) + facetsets["top"] = OrderedSet{FacetIndex}(boundary[(1:length(cell_array[:,end])) .+ offset]); offset += length(cell_array[:,end]) + facetsets["left"] = OrderedSet{FacetIndex}(boundary[(1:length(cell_array[1,:])) .+ offset]); offset += length(cell_array[1,:]) + foreach(s -> sort!(s, by = x -> x.idx), values(facetsets)) + + return Grid(cells, nodes, facetsets=facetsets) end # QuadraticQuadrilateral @@ -173,22 +156,21 @@ function generate_grid(::Type{QuadraticQuadrilateral}, nel::NTuple{2,Int}, LL::V # Cell faces cell_array = reshape(collect(1:nel_tot),(nel_x, nel_y)) - boundary = FaceIndex[[FaceIndex(cl, 1) for cl in cell_array[:,1]]; - [FaceIndex(cl, 2) for cl in cell_array[end,:]]; - [FaceIndex(cl, 3) for cl in cell_array[:,end]]; - [FaceIndex(cl, 4) for cl in cell_array[1,:]]] - - boundary_matrix = boundaries_to_sparse(boundary) + boundary = FacetIndex[[FacetIndex(cl, 1) for cl in cell_array[:,1]]; + [FacetIndex(cl, 2) for cl in cell_array[end,:]]; + [FacetIndex(cl, 3) for cl in cell_array[:,end]]; + [FacetIndex(cl, 4) for cl in cell_array[1,:]]] # Cell face sets offset = 0 - facesets = Dict{String, Set{FaceIndex}}() - facesets["bottom"] = Set{FaceIndex}(boundary[(1:length(cell_array[:,1])) .+ offset]); offset += length(cell_array[:,1]) - facesets["right"] = Set{FaceIndex}(boundary[(1:length(cell_array[end,:])) .+ offset]); offset += length(cell_array[end,:]) - facesets["top"] = Set{FaceIndex}(boundary[(1:length(cell_array[:,end])) .+ offset]); offset += length(cell_array[:,end]) - facesets["left"] = Set{FaceIndex}(boundary[(1:length(cell_array[1,:])) .+ offset]); offset += length(cell_array[1,:]) - - return Grid(cells, nodes, facesets=facesets, boundary_matrix=boundary_matrix) + facetsets = Dict{String, OrderedSet{FacetIndex}}() + facetsets["bottom"] = OrderedSet{FacetIndex}(boundary[(1:length(cell_array[:,1])) .+ offset]); offset += length(cell_array[:,1]) + facetsets["right"] = OrderedSet{FacetIndex}(boundary[(1:length(cell_array[end,:])) .+ offset]); offset += length(cell_array[end,:]) + facetsets["top"] = OrderedSet{FacetIndex}(boundary[(1:length(cell_array[:,end])) .+ offset]); offset += length(cell_array[:,end]) + facetsets["left"] = OrderedSet{FacetIndex}(boundary[(1:length(cell_array[1,:])) .+ offset]); offset += length(cell_array[1,:]) + foreach(s -> sort!(s, by = x -> x.idx), values(facetsets)) + + return Grid(cells, nodes, facetsets=facetsets) end # Hexahedron @@ -216,27 +198,26 @@ function generate_grid(::Type{Hexahedron}, nel::NTuple{3,Int}, left::Vec{3,T}=Ve # Cell faces cell_array = reshape(collect(1:nel_tot),(nel_x, nel_y, nel_z)) - boundary = FaceIndex[[FaceIndex(cl, 1) for cl in cell_array[:,:,1][:]]; - [FaceIndex(cl, 2) for cl in cell_array[:,1,:][:]]; - [FaceIndex(cl, 3) for cl in cell_array[end,:,:][:]]; - [FaceIndex(cl, 4) for cl in cell_array[:,end,:][:]]; - [FaceIndex(cl, 5) for cl in cell_array[1,:,:][:]]; - [FaceIndex(cl, 6) for cl in cell_array[:,:,end][:]]] - - boundary_matrix = boundaries_to_sparse(boundary) + boundary = FacetIndex[[FacetIndex(cl, 1) for cl in cell_array[:,:,1][:]]; + [FacetIndex(cl, 2) for cl in cell_array[:,1,:][:]]; + [FacetIndex(cl, 3) for cl in cell_array[end,:,:][:]]; + [FacetIndex(cl, 4) for cl in cell_array[:,end,:][:]]; + [FacetIndex(cl, 5) for cl in cell_array[1,:,:][:]]; + [FacetIndex(cl, 6) for cl in cell_array[:,:,end][:]]] # Cell face sets offset = 0 - facesets = Dict{String,Set{FaceIndex}}() - facesets["bottom"] = Set{FaceIndex}(boundary[(1:length(cell_array[:,:,1][:])) .+ offset]); offset += length(cell_array[:,:,1][:]) - facesets["front"] = Set{FaceIndex}(boundary[(1:length(cell_array[:,1,:][:])) .+ offset]); offset += length(cell_array[:,1,:][:]) - facesets["right"] = Set{FaceIndex}(boundary[(1:length(cell_array[end,:,:][:])) .+ offset]); offset += length(cell_array[end,:,:][:]) - facesets["back"] = Set{FaceIndex}(boundary[(1:length(cell_array[:,end,:][:])) .+ offset]); offset += length(cell_array[:,end,:][:]) - facesets["left"] = Set{FaceIndex}(boundary[(1:length(cell_array[1,:,:][:])) .+ offset]); offset += length(cell_array[1,:,:][:]) - facesets["top"] = Set{FaceIndex}(boundary[(1:length(cell_array[:,:,end][:])) .+ offset]); offset += length(cell_array[:,:,end][:]) - - return Grid(cells, nodes, facesets=facesets, boundary_matrix=boundary_matrix) -end + facetsets = Dict{String,OrderedSet{FacetIndex}}() + facetsets["bottom"] = OrderedSet{FacetIndex}(boundary[(1:length(cell_array[:,:,1][:])) .+ offset]); offset += length(cell_array[:,:,1][:]) + facetsets["front"] = OrderedSet{FacetIndex}(boundary[(1:length(cell_array[:,1,:][:])) .+ offset]); offset += length(cell_array[:,1,:][:]) + facetsets["right"] = OrderedSet{FacetIndex}(boundary[(1:length(cell_array[end,:,:][:])) .+ offset]); offset += length(cell_array[end,:,:][:]) + facetsets["back"] = OrderedSet{FacetIndex}(boundary[(1:length(cell_array[:,end,:][:])) .+ offset]); offset += length(cell_array[:,end,:][:]) + facetsets["left"] = OrderedSet{FacetIndex}(boundary[(1:length(cell_array[1,:,:][:])) .+ offset]); offset += length(cell_array[1,:,:][:]) + facetsets["top"] = OrderedSet{FacetIndex}(boundary[(1:length(cell_array[:,:,end][:])) .+ offset]); offset += length(cell_array[:,:,end][:]) + foreach(s -> sort!(s, by = x -> x.idx), values(facetsets)) + + return Grid(cells, nodes, facetsets=facetsets) +end # Wedge function generate_grid(::Type{Wedge}, nel::NTuple{3,Int}, left::Vec{3,T}=Vec{3}((-1.0,-1.0,-1.0)), right::Vec{3,T}=Vec{3}((1.0,1.0,1.0))) where {T} @@ -257,35 +238,34 @@ function generate_grid(::Type{Wedge}, nel::NTuple{3,Int}, left::Vec{3,T}=Vec{3}( node_array = reshape(collect(1:n_nodes), (n_nodes_x, n_nodes_y, n_nodes_z)) cells = Wedge[] for k in 1:nel_z, j in 1:nel_y, i in 1:nel_x - push!(cells, Wedge((node_array[i,j,k], node_array[i+1,j,k], node_array[i,j+1,k], + push!(cells, Wedge((node_array[i,j,k], node_array[i+1,j,k], node_array[i,j+1,k], node_array[i,j,k+1], node_array[i+1,j,k+1], node_array[i,j+1,k+1]))) # ◺ - push!(cells, Wedge((node_array[i+1,j,k], node_array[i+1,j+1,k], node_array[i,j+1,k], + push!(cells, Wedge((node_array[i+1,j,k], node_array[i+1,j+1,k], node_array[i,j+1,k], node_array[i+1,j,k+1], node_array[i+1,j+1,k+1], node_array[i,j+1,k+1]))) # ◹ end # Order the cells as c_nxyz[2, x, y, z] such that we can look up boundary cells c_nxyz = reshape(1:length(cells), (2, nel...)) - @views le = map(x -> FaceIndex(x,3), c_nxyz[1, 1, :, :][:]) - @views ri = map(x -> FaceIndex(x,2), c_nxyz[2, end, :, :][:]) - @views fr = map(x -> FaceIndex(x,2), c_nxyz[1, :, 1, :][:]) - @views ba = map(x -> FaceIndex(x,4), c_nxyz[2, :, end, :][:]) - @views bo = [map(x -> FaceIndex(x,1), c_nxyz[1, :, :, 1][:]) ; map(x -> FaceIndex(x,1), c_nxyz[2, :, :, 1][:])] - @views to = [map(x -> FaceIndex(x,5), c_nxyz[1, :, :, end][:]) ; map(x -> FaceIndex(x,5), c_nxyz[2, :, :, end][:])] - - boundary_matrix = boundaries_to_sparse([le; ri; bo; to; fr; ba]) - - facesets = Dict( - "left" => Set(le), - "right" => Set(ri), - "front" => Set(fr), - "back" => Set(ba), - "bottom" => Set(bo), - "top" => Set(to), + @views le = map(x -> FacetIndex(x,3), c_nxyz[1, 1, :, :][:]) + @views ri = map(x -> FacetIndex(x,2), c_nxyz[2, end, :, :][:]) + @views fr = map(x -> FacetIndex(x,2), c_nxyz[1, :, 1, :][:]) + @views ba = map(x -> FacetIndex(x,4), c_nxyz[2, :, end, :][:]) + @views bo = [map(x -> FacetIndex(x,1), c_nxyz[1, :, :, 1][:]) ; map(x -> FacetIndex(x,1), c_nxyz[2, :, :, 1][:])] + @views to = [map(x -> FacetIndex(x,5), c_nxyz[1, :, :, end][:]) ; map(x -> FacetIndex(x,5), c_nxyz[2, :, :, end][:])] + + facetsets = Dict( + "left" => OrderedSet{FacetIndex}(le), + "right" => OrderedSet{FacetIndex}(ri), + "front" => OrderedSet{FacetIndex}(fr), + "back" => OrderedSet{FacetIndex}(ba), + "bottom" => OrderedSet{FacetIndex}(bo), + "top" => OrderedSet{FacetIndex}(to), ) + foreach(s -> sort!(s, by = x -> x.idx), values(facetsets)) - return Grid(cells, nodes, facesets=facesets, boundary_matrix=boundary_matrix) -end + return Grid(cells, nodes, facetsets=facetsets) +end #Pyramid function generate_grid(::Type{Pyramid}, nel::NTuple{3,Int}, left::Vec{3,T}=Vec{3}((-1.0,-1.0,-1.0)), right::Vec{3,T}=Vec{3}((1.0,1.0,1.0))) where {T} @@ -304,24 +284,24 @@ function generate_grid(::Type{Pyramid}, nel::NTuple{3,Int}, left::Vec{3,T}=Vec{3 #Center node in each "voxel" for k in 1:nel_z, j in 1:nel_y, i in 1:nel_x - midx = 0.5(coords_x[i+1] + coords_x[i]) - midy = 0.5(coords_y[j+1] + coords_y[j]) - midz = 0.5(coords_z[k+1] + coords_z[k]) + midx = (coords_x[i+1] + coords_x[i]) / 2 + midy = (coords_y[j+1] + coords_y[j]) / 2 + midz = (coords_z[k+1] + coords_z[k]) / 2 push!(nodes, Node((midx, midy, midz))) - end + end # Generate cells node_array = reshape(collect(1:n_nodes), (n_nodes_x, n_nodes_y, n_nodes_z)) cells = Pyramid[] - midnodecounter = n_nodes_x*n_nodes_y*n_nodes_z + midnodecounter = n_nodes_x*n_nodes_y*n_nodes_z for k in 1:nel_z, j in 1:nel_y, i in 1:nel_x midnodecounter += 1 pyramid1 = Pyramid((node_array[i,j,k], node_array[i+1,j,k], node_array[i,j+1,k], node_array[i+1,j+1,k], midnodecounter )) # bottom - pyramid2 = Pyramid((node_array[i,j,k], node_array[i,j,k+1], node_array[i+1,j,k], node_array[i+1,j,k+1], midnodecounter )) # front + pyramid2 = Pyramid((node_array[i,j,k], node_array[i,j,k+1], node_array[i+1,j,k], node_array[i+1,j,k+1], midnodecounter )) # front pyramid3 = Pyramid((node_array[i+1,j,k], node_array[i+1,j,k+1], node_array[i+1,j+1,k], node_array[i+1,j+1,k+1], midnodecounter )) # right pyramid4 = Pyramid((node_array[i,j+1,k], node_array[i+1,j+1,k], node_array[i,j+1,k+1], node_array[i+1,j+1,k+1], midnodecounter )) # back - pyramid5 = Pyramid((node_array[i,j,k], node_array[i,j+1,k], node_array[i,j,k+1], node_array[i,j+1,k+1], midnodecounter )) # left - pyramid6 = Pyramid((node_array[i,j,k+1], node_array[i,j+1,k+1], node_array[i+1,j,k+1], node_array[i+1,j+1,k+1], midnodecounter )) # top + pyramid5 = Pyramid((node_array[i,j,k], node_array[i,j+1,k], node_array[i,j,k+1], node_array[i,j+1,k+1], midnodecounter )) # left + pyramid6 = Pyramid((node_array[i,j,k+1], node_array[i,j+1,k+1], node_array[i+1,j,k+1], node_array[i+1,j+1,k+1], midnodecounter )) # top push!(cells, pyramid1, pyramid2, pyramid3, pyramid4, pyramid5, pyramid6) end @@ -329,28 +309,27 @@ function generate_grid(::Type{Pyramid}, nel::NTuple{3,Int}, left::Vec{3,T}=Vec{3 ncells_per_voxel = 6 c_nxyz = reshape(1:(prod(nel)*ncells_per_voxel), (ncells_per_voxel, nel...)) - @views le = map(x -> FaceIndex(x,1), c_nxyz[5, 1, :, :][:]) - @views ri = map(x -> FaceIndex(x,1), c_nxyz[3, end, :, :][:]) - @views fr = map(x -> FaceIndex(x,1), c_nxyz[2, :, 1, :][:]) - @views ba = map(x -> FaceIndex(x,1), c_nxyz[4, :, end, :][:]) - @views bo = map(x -> FaceIndex(x,1), c_nxyz[1, :, :, 1][:]) - @views to = map(x -> FaceIndex(x,1), c_nxyz[6, :, :, end][:]) - - boundary_matrix = boundaries_to_sparse([le; ri; bo; to; fr; ba]) - - facesets = Dict( - "left" => Set(le), - "right" => Set(ri), - "front" => Set(fr), - "back" => Set(ba), - "bottom" => Set(bo), - "top" => Set(to), + @views le = map(x -> FacetIndex(x,1), c_nxyz[5, 1, :, :][:]) + @views ri = map(x -> FacetIndex(x,1), c_nxyz[3, end, :, :][:]) + @views fr = map(x -> FacetIndex(x,1), c_nxyz[2, :, 1, :][:]) + @views ba = map(x -> FacetIndex(x,1), c_nxyz[4, :, end, :][:]) + @views bo = map(x -> FacetIndex(x,1), c_nxyz[1, :, :, 1][:]) + @views to = map(x -> FacetIndex(x,1), c_nxyz[6, :, :, end][:]) + + facetsets = Dict( + "left" => OrderedSet{FacetIndex}(le), + "right" => OrderedSet{FacetIndex}(ri), + "front" => OrderedSet{FacetIndex}(fr), + "back" => OrderedSet{FacetIndex}(ba), + "bottom" => OrderedSet{FacetIndex}(bo), + "top" => OrderedSet{FacetIndex}(to), ) + foreach(s -> sort!(s, by = x -> x.idx), values(facetsets)) - return Grid(cells, nodes, facesets=facesets, boundary_matrix=boundary_matrix) -end + return Grid(cells, nodes, facetsets=facetsets) +end -function Ferrite.generate_grid(::Type{SerendipityQuadraticHexahedron}, nel::NTuple{3,Int}, left::Vec{3,T}=Vec{3}((-1.0,-1.0,-1.0)), right::Vec{3,T}=Vec{3}((1.0,1.0,1.0))) where {T} +function generate_grid(::Type{SerendipityQuadraticHexahedron}, nel::NTuple{3,Int}, left::Vec{3,T}=Vec{3}((-1.0,-1.0,-1.0)), right::Vec{3,T}=Vec{3}((1.0,1.0,1.0))) where {T} nel_x = nel[1]; nel_y = nel[2]; nel_z = nel[3]; nel_tot = nel_x*nel_y*nel_z nnode_x = 2nel_x + 1; nnode_y = 2nel_y + 1; nnode_z = 2nel_z + 1 #Note: not the actually number of nodes in x/y/z, just a temporary variables @@ -374,9 +353,9 @@ function Ferrite.generate_grid(::Type{SerendipityQuadraticHexahedron}, nel::NTup # Generate cells cells = SerendipityQuadraticHexahedron[] - for k in 1:2:2nel_z, j in 1:2:2nel_y, i in 1:2:2nel_x + for k in 1:2:2nel_z, j in 1:2:2nel_y, i in 1:2:2nel_x push!(cells, SerendipityQuadraticHexahedron(( - node_array[i,j,k], node_array[i+2,j,k], node_array[i+2,j+2,k], node_array[i,j+2,k], # vertices bot + node_array[i,j,k], node_array[i+2,j,k], node_array[i+2,j+2,k], node_array[i,j+2,k], # vertices bot node_array[i,j,k+2], node_array[i+2,j,k+2], node_array[i+2,j+2,k+2], node_array[i,j+2,k+2], # vertices top node_array[i+1,j,k], node_array[i+2,j+1,k], node_array[i+1,j+2,k], node_array[i,j+1,k], # edges horizontal bottom node_array[i+1,j,k+2], node_array[i+2,j+1,k+2], node_array[i+1,j+2,k+2], node_array[i,j+1,k+2], # edges horizontal top @@ -386,26 +365,25 @@ function Ferrite.generate_grid(::Type{SerendipityQuadraticHexahedron}, nel::NTup # Cell faces cell_array = reshape(collect(1:nel_tot),(nel_x, nel_y, nel_z)) - boundary = FaceIndex[[FaceIndex(cl, 1) for cl in cell_array[:,:,1][:]]; - [FaceIndex(cl, 2) for cl in cell_array[:,1,:][:]]; - [FaceIndex(cl, 3) for cl in cell_array[end,:,:][:]]; - [FaceIndex(cl, 4) for cl in cell_array[:,end,:][:]]; - [FaceIndex(cl, 5) for cl in cell_array[1,:,:][:]]; - [FaceIndex(cl, 6) for cl in cell_array[:,:,end][:]]] - - boundary_matrix = Ferrite.boundaries_to_sparse(boundary) + boundary = FacetIndex[[FacetIndex(cl, 1) for cl in cell_array[:,:,1][:]]; + [FacetIndex(cl, 2) for cl in cell_array[:,1,:][:]]; + [FacetIndex(cl, 3) for cl in cell_array[end,:,:][:]]; + [FacetIndex(cl, 4) for cl in cell_array[:,end,:][:]]; + [FacetIndex(cl, 5) for cl in cell_array[1,:,:][:]]; + [FacetIndex(cl, 6) for cl in cell_array[:,:,end][:]]] # Cell face sets offset = 0 - facesets = Dict{String,Set{FaceIndex}}() - facesets["bottom"] = Set{FaceIndex}(boundary[(1:length(cell_array[:,:,1][:])) .+ offset]); offset += length(cell_array[:,:,1][:]) - facesets["front"] = Set{FaceIndex}(boundary[(1:length(cell_array[:,1,:][:])) .+ offset]); offset += length(cell_array[:,1,:][:]) - facesets["right"] = Set{FaceIndex}(boundary[(1:length(cell_array[end,:,:][:])) .+ offset]); offset += length(cell_array[end,:,:][:]) - facesets["back"] = Set{FaceIndex}(boundary[(1:length(cell_array[:,end,:][:])) .+ offset]); offset += length(cell_array[:,end,:][:]) - facesets["left"] = Set{FaceIndex}(boundary[(1:length(cell_array[1,:,:][:])) .+ offset]); offset += length(cell_array[1,:,:][:]) - facesets["top"] = Set{FaceIndex}(boundary[(1:length(cell_array[:,:,end][:])) .+ offset]); offset += length(cell_array[:,:,end][:]) - - return Grid(cells, nodes, facesets=facesets, boundary_matrix=boundary_matrix) + facetsets = Dict{String,OrderedSet{FacetIndex}}() + facetsets["bottom"] = OrderedSet{FacetIndex}(boundary[(1:length(cell_array[:,:,1][:])) .+ offset]); offset += length(cell_array[:,:,1][:]) + facetsets["front"] = OrderedSet{FacetIndex}(boundary[(1:length(cell_array[:,1,:][:])) .+ offset]); offset += length(cell_array[:,1,:][:]) + facetsets["right"] = OrderedSet{FacetIndex}(boundary[(1:length(cell_array[end,:,:][:])) .+ offset]); offset += length(cell_array[end,:,:][:]) + facetsets["back"] = OrderedSet{FacetIndex}(boundary[(1:length(cell_array[:,end,:][:])) .+ offset]); offset += length(cell_array[:,end,:][:]) + facetsets["left"] = OrderedSet{FacetIndex}(boundary[(1:length(cell_array[1,:,:][:])) .+ offset]); offset += length(cell_array[1,:,:][:]) + facetsets["top"] = OrderedSet{FacetIndex}(boundary[(1:length(cell_array[:,:,end][:])) .+ offset]); offset += length(cell_array[:,:,end][:]) + foreach(s -> sort!(s, by = x -> x.idx), values(facetsets)) + + return Grid(cells, nodes, facetsets=facetsets) end # Triangle @@ -428,22 +406,21 @@ function generate_grid(::Type{Triangle}, nel::NTuple{2,Int}, LL::Vec{2,T}, LR::V # Cell faces cell_array = reshape(collect(1:nel_tot),(2, nel_x, nel_y)) - boundary = FaceIndex[[FaceIndex(cl, 1) for cl in cell_array[1,:,1]]; - [FaceIndex(cl, 1) for cl in cell_array[2,end,:]]; - [FaceIndex(cl, 2) for cl in cell_array[2,:,end]]; - [FaceIndex(cl, 3) for cl in cell_array[1,1,:]]] - - boundary_matrix = boundaries_to_sparse(boundary) + boundary = FacetIndex[[FacetIndex(cl, 1) for cl in cell_array[1,:,1]]; + [FacetIndex(cl, 1) for cl in cell_array[2,end,:]]; + [FacetIndex(cl, 2) for cl in cell_array[2,:,end]]; + [FacetIndex(cl, 3) for cl in cell_array[1,1,:]]] # Cell face sets offset = 0 - facesets = Dict{String,Set{FaceIndex}}() - facesets["bottom"] = Set{FaceIndex}(boundary[(1:length(cell_array[1,:,1])) .+ offset]); offset += length(cell_array[1,:,1]) - facesets["right"] = Set{FaceIndex}(boundary[(1:length(cell_array[2,end,:])) .+ offset]); offset += length(cell_array[2,end,:]) - facesets["top"] = Set{FaceIndex}(boundary[(1:length(cell_array[2,:,end])) .+ offset]); offset += length(cell_array[2,:,end]) - facesets["left"] = Set{FaceIndex}(boundary[(1:length(cell_array[1,1,:])) .+ offset]); offset += length(cell_array[1,1,:]) - - return Grid(cells, nodes, facesets=facesets, boundary_matrix=boundary_matrix) + facetsets = Dict{String,OrderedSet{FacetIndex}}() + facetsets["bottom"] = OrderedSet{FacetIndex}(boundary[(1:length(cell_array[1,:,1])) .+ offset]); offset += length(cell_array[1,:,1]) + facetsets["right"] = OrderedSet{FacetIndex}(boundary[(1:length(cell_array[2,end,:])) .+ offset]); offset += length(cell_array[2,end,:]) + facetsets["top"] = OrderedSet{FacetIndex}(boundary[(1:length(cell_array[2,:,end])) .+ offset]); offset += length(cell_array[2,:,end]) + facetsets["left"] = OrderedSet{FacetIndex}(boundary[(1:length(cell_array[1,1,:])) .+ offset]); offset += length(cell_array[1,1,:]) + foreach(s -> sort!(s, by = x -> x.idx), values(facetsets)) + + return Grid(cells, nodes, facetsets=facetsets) end # QuadraticTriangle @@ -468,22 +445,21 @@ function generate_grid(::Type{QuadraticTriangle}, nel::NTuple{2,Int}, LL::Vec{2, # Cell faces cell_array = reshape(collect(1:nel_tot),(2, nel_x, nel_y)) - boundary = FaceIndex[[FaceIndex(cl, 1) for cl in cell_array[1,:,1]]; - [FaceIndex(cl, 1) for cl in cell_array[2,end,:]]; - [FaceIndex(cl, 2) for cl in cell_array[2,:,end]]; - [FaceIndex(cl, 3) for cl in cell_array[1,1,:]]] - - boundary_matrix = boundaries_to_sparse(boundary) + boundary = FacetIndex[[FacetIndex(cl, 1) for cl in cell_array[1,:,1]]; + [FacetIndex(cl, 1) for cl in cell_array[2,end,:]]; + [FacetIndex(cl, 2) for cl in cell_array[2,:,end]]; + [FacetIndex(cl, 3) for cl in cell_array[1,1,:]]] # Cell face sets offset = 0 - facesets = Dict{String,Set{FaceIndex}}() - facesets["bottom"] = Set{FaceIndex}(boundary[(1:length(cell_array[1,:,1])) .+ offset]); offset += length(cell_array[1,:,1]) - facesets["right"] = Set{FaceIndex}(boundary[(1:length(cell_array[2,end,:])) .+ offset]); offset += length(cell_array[2,end,:]) - facesets["top"] = Set{FaceIndex}(boundary[(1:length(cell_array[2,:,end])) .+ offset]); offset += length(cell_array[2,:,end]) - facesets["left"] = Set{FaceIndex}(boundary[(1:length(cell_array[1,1,:])) .+ offset]); offset += length(cell_array[1,1,:]) - - return Grid(cells, nodes, facesets=facesets, boundary_matrix=boundary_matrix) + facetsets = Dict{String,OrderedSet{FacetIndex}}() + facetsets["bottom"] = OrderedSet{FacetIndex}(boundary[(1:length(cell_array[1,:,1])) .+ offset]); offset += length(cell_array[1,:,1]) + facetsets["right"] = OrderedSet{FacetIndex}(boundary[(1:length(cell_array[2,end,:])) .+ offset]); offset += length(cell_array[2,end,:]) + facetsets["top"] = OrderedSet{FacetIndex}(boundary[(1:length(cell_array[2,:,end])) .+ offset]); offset += length(cell_array[2,:,end]) + facetsets["left"] = OrderedSet{FacetIndex}(boundary[(1:length(cell_array[1,1,:])) .+ offset]); offset += length(cell_array[1,1,:]) + foreach(s -> sort!(s, by = x -> x.idx), values(facetsets)) + + return Grid(cells, nodes, facetsets=facetsets) end # Tetrahedron @@ -545,23 +521,22 @@ function generate_grid(::Type{Tetrahedron}, cells_per_dim::NTuple{3,Int}, left:: # Order the cells as c_nxyz[n, x, y, z] such that we can look up boundary cells c_nxyz = reshape(1:total_elements, (cells_per_cube, cells_per_dim...)) - @views le = [map(x -> FaceIndex(x,4), c_nxyz[1, 1, :, :][:]) ; map(x -> FaceIndex(x,2), c_nxyz[2, 1, :, :][:])] - @views ri = [map(x -> FaceIndex(x,1), c_nxyz[4, end, :, :][:]) ; map(x -> FaceIndex(x,1), c_nxyz[6, end, :, :][:])] - @views fr = [map(x -> FaceIndex(x,1), c_nxyz[2, :, 1, :][:]) ; map(x -> FaceIndex(x,1), c_nxyz[5, :, 1, :][:])] - @views ba = [map(x -> FaceIndex(x,3), c_nxyz[3, :, end, :][:]) ; map(x -> FaceIndex(x,3), c_nxyz[4, :, end, :][:])] - @views bo = [map(x -> FaceIndex(x,1), c_nxyz[1, :, :, 1][:]) ; map(x -> FaceIndex(x,1), c_nxyz[3, :, :, 1][:])] - @views to = [map(x -> FaceIndex(x,3), c_nxyz[5, :, :, end][:]) ; map(x -> FaceIndex(x,3), c_nxyz[6, :, :, end][:])] - - boundary_matrix = boundaries_to_sparse([le; ri; bo; to; fr; ba]) - - facesets = Dict( - "left" => Set(le), - "right" => Set(ri), - "front" => Set(fr), - "back" => Set(ba), - "bottom" => Set(bo), - "top" => Set(to), + @views le = [map(x -> FacetIndex(x,4), c_nxyz[1, 1, :, :][:]) ; map(x -> FacetIndex(x,2), c_nxyz[2, 1, :, :][:])] + @views ri = [map(x -> FacetIndex(x,1), c_nxyz[4, end, :, :][:]) ; map(x -> FacetIndex(x,1), c_nxyz[6, end, :, :][:])] + @views fr = [map(x -> FacetIndex(x,1), c_nxyz[2, :, 1, :][:]) ; map(x -> FacetIndex(x,1), c_nxyz[5, :, 1, :][:])] + @views ba = [map(x -> FacetIndex(x,3), c_nxyz[3, :, end, :][:]) ; map(x -> FacetIndex(x,3), c_nxyz[4, :, end, :][:])] + @views bo = [map(x -> FacetIndex(x,1), c_nxyz[1, :, :, 1][:]) ; map(x -> FacetIndex(x,1), c_nxyz[3, :, :, 1][:])] + @views to = [map(x -> FacetIndex(x,3), c_nxyz[5, :, :, end][:]) ; map(x -> FacetIndex(x,3), c_nxyz[6, :, :, end][:])] + + facetsets = Dict( + "left" => OrderedSet{FacetIndex}(le), + "right" => OrderedSet{FacetIndex}(ri), + "front" => OrderedSet{FacetIndex}(fr), + "back" => OrderedSet{FacetIndex}(ba), + "bottom" => OrderedSet{FacetIndex}(bo), + "top" => OrderedSet{FacetIndex}(to), ) + foreach(s -> sort!(s, by = x -> x.idx), values(facetsets)) - return Grid(cells, nodes, facesets=facesets, boundary_matrix=boundary_matrix) + return Grid(cells, nodes, facetsets=facetsets) end diff --git a/src/Grid/topology.jl b/src/Grid/topology.jl index 38d69f4b80..c5ace16bce 100644 --- a/src/Grid/topology.jl +++ b/src/Grid/topology.jl @@ -1,4 +1,3 @@ - ############ # Topology # ############ @@ -14,245 +13,259 @@ the given entity is included in the returned list as well. """ getneighborhood -struct EntityNeighborhood{T<:Union{BoundaryIndex,CellIndex}} - neighbor_info::Vector{T} -end - -EntityNeighborhood(info::T) where T <: BoundaryIndex = EntityNeighborhood([info]) -Base.length(n::EntityNeighborhood) = length(n.neighbor_info) -Base.getindex(n::EntityNeighborhood,i) = getindex(n.neighbor_info,i) -Base.firstindex(n::EntityNeighborhood) = 1 -Base.lastindex(n::EntityNeighborhood) = length(n.neighbor_info) -Base.:(==)(n1::EntityNeighborhood, n2::EntityNeighborhood) = n1.neighbor_info == n2.neighbor_info -Base.iterate(n::EntityNeighborhood, state=1) = iterate(n.neighbor_info,state) - -function Base.show(io::IO, ::MIME"text/plain", n::EntityNeighborhood) - if length(n) == 0 - println(io, "No EntityNeighborhood") - elseif length(n) == 1 - println(io, "$(n.neighbor_info[1])") - else - println(io, "$(n.neighbor_info...)") - end -end abstract type AbstractTopology end """ - ExclusiveTopology(cells::Vector{C}) where C <: AbstractCell - ExclusiveTopology(grid::Grid) + ExclusiveTopology(grid::AbstractGrid) -`ExclusiveTopology` saves topological (connectivity/neighborhood) data of the grid. The constructor works with an `AbstractCell` -vector for all cells that dispatch `vertices`, `faces` and in 3D `edges`. -The struct saves the highest dimensional neighborhood, i.e. if something is connected by a face and an - edge only the face neighborhood is saved. The lower dimensional neighborhood is recomputed, if needed. +The **experimental feature** `ExclusiveTopology` saves topological (connectivity/neighborhood) data of the grid. +Only the highest dimensional neighborhood is saved. I.e., if something is connected by a face and an +edge, only the face neighborhood is saved. The lower dimensional neighborhood is recomputed when calling getneighborhood if needed. # Fields -- `vertex_to_cell::Vector{Set{Int}}`: global vertex id to all cells containing the vertex -- `cell_neighbor::Vector{EntityNeighborhood{CellIndex}}`: cellid to all connected cells -- `face_neighbor::Matrix{EntityNeighborhood,Int}`: `face_neighbor[cellid,local_face_id]` -> neighboring face -- `vertex_neighbor::Matrix{EntityNeighborhood,Int}`: `vertex_neighbor[cellid,local_vertex_id]` -> neighboring vertex -- `edge_neighbor::Matrix{EntityNeighborhood,Int}`: `edge_neighbor[cellid_local_vertex_id]` -> neighboring edge -- `face_skeleton::Union{Vector{FaceIndex}, Nothing}`: - -!!! note Currently mixed-dimensional queries do not work at the moment. They will be added back later. +- `vertex_to_cell::AbstractArray{AbstractVector{Int}, 1}`: global vertex id to all cells containing the vertex +- `cell_neighbor::AbstractArray{AbstractVector{Int}, 1}`: cellid to all connected cells +- `face_neighbor::AbstractArray{AbstractVector{FaceIndex}, 2}`: `face_neighbor[cellid, local_face_id]` -> neighboring faces +- `edge_neighbor::AbstractArray{AbstractVector{EdgeIndex}, 2}`: `edge_neighbor[cellid, local_edge_id]` -> neighboring edges +- `vertex_neighbor::AbstractArray{AbstractVector{VertexIndex}, 2}`: `vertex_neighbor[cellid, local_vertex_id]` -> neighboring vertices +- `face_skeleton::Union{Vector{FaceIndex}, Nothing}`: List of unique faces in the grid given as `FaceIndex` +- `edge_skeleton::Union{Vector{EdgeIndex}, Nothing}`: List of unique edges in the grid given as `EdgeIndex` +- `vertex_skeleton::Union{Vector{VertexIndex}, Nothing}`: List of unique vertices in the grid given as `VertexIndex` + +!!! warning "Limitations" + The implementation only works with conforming grids, i.e. grids without "hanging nodes". Non-conforming grids will give unexpected results. + Grids with embedded cells (different reference dimension compared + to the spatial dimension) are not supported, and will error on construction. + """ mutable struct ExclusiveTopology <: AbstractTopology - # maps a global vertex id to all cells containing the vertex - vertex_to_cell::Vector{Set{Int}} - # index of the vector = cell id -> all other connected cells - cell_neighbor::Vector{EntityNeighborhood{CellIndex}} - # face_neighbor[cellid,local_face_id] -> exclusive connected entities (not restricted to one entity) - face_face_neighbor::Matrix{EntityNeighborhood{FaceIndex}} - # vertex_neighbor[cellid,local_vertex_id] -> exclusive connected entities to the given vertex - vertex_vertex_neighbor::Matrix{EntityNeighborhood{VertexIndex}} - # edge_neighbor[cellid,local_edge_id] -> exclusive connected entities of the given edge - edge_edge_neighbor::Matrix{EntityNeighborhood{EdgeIndex}} - # lazy constructed face topology - face_skeleton::Union{Vector{FaceIndex}, Nothing} - # TODO reintroduce the codimensional connectivity, e.g. 3D edge to 2D face -end - -function Base.show(io::IO, ::MIME"text/plain", topology::ExclusiveTopology) - println(io, "ExclusiveTopology\n") - print(io, " Vertex neighbors: $(size(topology.vertex_vertex_neighbor))\n") - print(io, " Face neighbors: $(size(topology.face_face_neighbor))\n") - println(io, " Edge neighbors: $(size(topology.edge_edge_neighbor))") + vertex_to_cell::ArrayOfVectorViews{Int, 1} + cell_neighbor::ArrayOfVectorViews{Int, 1} + # face_face_neighbor[cellid,local_face_id] -> exclusive connected entities (not restricted to one entity) + face_face_neighbor::ArrayOfVectorViews{FaceIndex, 2} + # edge_edge_neighbor[cellid,local_edge_id] -> exclusive connected entities of the given edge + edge_edge_neighbor::ArrayOfVectorViews{EdgeIndex, 2} + # vertex_vertex_neighbor[cellid,local_vertex_id] -> exclusive connected entities to the given vertex + vertex_vertex_neighbor::ArrayOfVectorViews{VertexIndex, 2} + facet_skeleton::Union{Vector{FacetIndex}, Nothing} end -function _num_shared_vertices(cell_a::C1, cell_b::C2) where {C1, C2} - num_shared_vertices = 0 - for vertex ∈ vertices(cell_a) - for vertex_neighbor ∈ vertices(cell_b) - if vertex_neighbor == vertex - num_shared_vertices += 1 - continue - end - end +function ExclusiveTopology(grid::AbstractGrid{sdim}) where sdim + if sdim != get_reference_dimension(grid) + error("ExclusiveTopology does not support embedded cells (i.e. reference dimensions different from the spatial dimension)") end - return num_shared_vertices -end + cells = getcells(grid) + nnodes = getnnodes(grid) + ncells = length(cells) + + max_vertices, max_edges, max_faces = _max_nentities_per_cell(cells) + vertex_to_cell = build_vertex_to_cell(cells; max_vertices, nnodes) + cell_neighbor = build_cell_neighbor(grid, cells, vertex_to_cell; ncells) + + # Here we don't use the convenience constructor taking a function, + # since we want to do it simultaneously for 3 data-types + facedata = sizehint!(FaceIndex[], ncells * max_faces * _getsizehint(grid, FaceIndex)) + face_face_neighbor_buf = CollectionsOfViews.ConstructionBuffer(facedata, (ncells, max_faces), _getsizehint(grid, FaceIndex)) + edgedata = sizehint!(EdgeIndex[], ncells * max_edges * _getsizehint(grid, EdgeIndex)) + edge_edge_neighbor_buf = CollectionsOfViews.ConstructionBuffer(edgedata, (ncells, max_edges), _getsizehint(grid, EdgeIndex)) + vertdata = sizehint!(VertexIndex[], ncells * max_vertices * _getsizehint(grid, VertexIndex)) + vertex_vertex_neighbor_buf = CollectionsOfViews.ConstructionBuffer(vertdata, (ncells, max_vertices), _getsizehint(grid, VertexIndex)) -function _exclusive_topology_ctor(cells::Vector{C}, vertex_cell_table::Array{Set{Int}}, vertex_table, face_table, edge_table, cell_neighbor_table) where C <: AbstractCell for (cell_id, cell) in enumerate(cells) - # Gather all cells which are connected via vertices - cell_neighbor_ids = Set{Int}() - for vertex ∈ vertices(cell) - for vertex_cell_id ∈ vertex_cell_table[vertex] - if vertex_cell_id != cell_id - push!(cell_neighbor_ids, vertex_cell_id) - end - end - end - cell_neighbor_table[cell_id] = EntityNeighborhood(CellIndex.(collect(cell_neighbor_ids))) - - # Any of the neighbors is now sorted in the respective categories - for cell_neighbor_id ∈ cell_neighbor_ids - # Buffer neighbor - cell_neighbor = cells[cell_neighbor_id] - # TODO handle mixed-dimensional case - getdim(cell_neighbor) == getdim(cell) || continue - - num_shared_vertices = _num_shared_vertices(cell, cell_neighbor) - - # Simplest case: Only one vertex is shared => Vertex neighbor + for neighbor_cell_id in cell_neighbor[cell_id] + neighbor_cell = cells[neighbor_cell_id] + getrefdim(neighbor_cell) == getrefdim(cell) || error("Not supported") + num_shared_vertices = _num_shared_vertices(cell, neighbor_cell) if num_shared_vertices == 1 - for (lvi, vertex) ∈ enumerate(vertices(cell)) - for (lvi2, vertex_neighbor) ∈ enumerate(vertices(cell_neighbor)) - if vertex_neighbor == vertex - push!(vertex_table[cell_id, lvi].neighbor_info, VertexIndex(cell_neighbor_id, lvi2)) - break - end - end - end - # Shared path + _add_single_vertex_neighbor!(vertex_vertex_neighbor_buf, cell, cell_id, neighbor_cell, neighbor_cell_id) + # Shared edge elseif num_shared_vertices == 2 - if getdim(cell) == 2 - _add_single_face_neighbor!(face_table, cell, cell_id, cell_neighbor, cell_neighbor_id) - elseif getdim(cell) == 3 - _add_single_edge_neighbor!(edge_table, cell, cell_id, cell_neighbor, cell_neighbor_id) - else - @error "Case not implemented." - end - # Shared surface + _add_single_edge_neighbor!(edge_edge_neighbor_buf, cell, cell_id, neighbor_cell, neighbor_cell_id) + # Shared face elseif num_shared_vertices >= 3 - _add_single_face_neighbor!(face_table, cell, cell_id, cell_neighbor, cell_neighbor_id) + _add_single_face_neighbor!(face_face_neighbor_buf, cell, cell_id, neighbor_cell, neighbor_cell_id) else - @error "Found connected elements without shared vertex... Mesh broken?" + error("Found connected elements without shared vertex... Mesh broken?") end end end + face_face_neighbor = ArrayOfVectorViews(face_face_neighbor_buf) + edge_edge_neighbor = ArrayOfVectorViews(edge_edge_neighbor_buf) + vertex_vertex_neighbor = ArrayOfVectorViews(vertex_vertex_neighbor_buf) + return ExclusiveTopology(vertex_to_cell, cell_neighbor, face_face_neighbor, edge_edge_neighbor, vertex_vertex_neighbor, nothing) +end + +function get_facet_facet_neighborhood(t::ExclusiveTopology, g::AbstractGrid) + return _get_facet_facet_neighborhood(t, Val(get_reference_dimension(g))) +end +_get_facet_facet_neighborhood(t::ExclusiveTopology, #=rdim=#::Val{1}) = t.vertex_vertex_neighbor +_get_facet_facet_neighborhood(t::ExclusiveTopology, #=rdim=#::Val{2}) = t.edge_edge_neighbor +_get_facet_facet_neighborhood(t::ExclusiveTopology, #=rdim=#::Val{3}) = t.face_face_neighbor +function _get_facet_facet_neighborhood(::ExclusiveTopology, #=rdim=#::Val{:mixed}) + throw(ArgumentError("get_facet_facet_neightborhood is only supported for grids containing cells with the same reference dimension. + Access the `vertex_vertex_neighbor`, `edge_edge_neighbor`, or `face_face_neighbor` fields explicitly instead.")) end -function ExclusiveTopology(cells::Vector{C}) where C <: AbstractCell - # Setup the cell to vertex table - cell_vertices_table = vertices.(cells) #needs generic interface for <: AbstractCell - vertex_cell_table = Set{Int}[Set{Int}() for _ ∈ 1:maximum(maximum.(cell_vertices_table))] +# Guess of how many neighbors depending on grid dimension and index type. +# This could be possible to optimize further by studying connectivities of non-uniform +# grids, see https://github.com/Ferrite-FEM/Ferrite.jl/pull/974#discussion_r1660838649 +function _getsizehint(g::AbstractGrid, ::Type{IDX}) where IDX + CT = getcelltype(g) + isconcretetype(CT) && return _getsizehint(getrefshape(CT)(), IDX) + rdim = get_reference_dimension(g)::Int + return _getsizehint(RefSimplex{rdim}(), IDX) # Simplex is "worst case", used as default. +end +_getsizehint(::AbstractRefShape, ::Type{FaceIndex}) = 1 # Always 1 or zero if not mixed rdim - # Setup vertex to cell connectivity by flipping the cell to vertex table - for (cellid, cell_vertices) in enumerate(cell_vertices_table) - for vertex in cell_vertices - push!(vertex_cell_table[vertex], cellid) +_getsizehint(::AbstractRefShape{1}, ::Type{EdgeIndex}) = 1 +_getsizehint(::AbstractRefShape{2}, ::Type{EdgeIndex}) = 1 +_getsizehint(::AbstractRefShape{3}, ::Type{EdgeIndex}) = 3 # Number for RefTetrahedron +_getsizehint(::RefHexahedron, ::Type{EdgeIndex}) = 1 # Optim for RefHexahedron + +_getsizehint(::AbstractRefShape{1}, ::Type{VertexIndex}) = 1 +_getsizehint(::AbstractRefShape{2}, ::Type{VertexIndex}) = 3 +_getsizehint(::AbstractRefShape{3}, ::Type{VertexIndex}) = 13 +_getsizehint(::RefHypercube, ::Type{VertexIndex}) = 1 # Optim for RefHypercube + +_getsizehint(::AbstractRefShape{1}, ::Type{CellIndex}) = 2 +_getsizehint(::AbstractRefShape{2}, ::Type{CellIndex}) = 12 +_getsizehint(::AbstractRefShape{3}, ::Type{CellIndex}) = 70 +_getsizehint(::RefQuadrilateral, ::Type{CellIndex}) = 8 +_getsizehint(::RefHexahedron, ::Type{CellIndex}) = 26 + +function _num_shared_vertices(cell_a::C1, cell_b::C2) where {C1, C2} + num_shared_vertices = 0 + for vertex ∈ vertices(cell_a) + for vertex_neighbor ∈ vertices(cell_b) + if vertex_neighbor == vertex + num_shared_vertices += 1 + continue + end end end + return num_shared_vertices +end - # Compute correct matrix size - celltype = eltype(cells) - max_vertices = 0 - max_faces = 0 - max_edges = 0 - if isconcretetype(celltype) - dim = getdim(cells[1]) - - max_vertices = nvertices(cells[1]) - dim > 1 && (max_faces = nfaces(cells[1])) - dim > 2 && (max_edges = nedges(cells[1])) +"Return the highest number of vertices, edges, and faces per cell" +function _max_nentities_per_cell(cells::Vector{C}) where C + if isconcretetype(C) + cell = first(cells) + return nvertices(cell), nedges(cell), nfaces(cell) else celltypes = Set(typeof.(cells)) + max_vertices = 0 + max_edges = 0 + max_faces = 0 for celltype in celltypes - celltypeidx = findfirst(x->typeof(x)==celltype,cells) - dim = getdim(cells[celltypeidx]) - - max_vertices = max(max_vertices,nvertices(cells[celltypeidx])) - dim > 1 && (max_faces = max(max_faces, nfaces(cells[celltypeidx]))) - dim > 2 && (max_edges = max(max_edges, nedges(cells[celltypeidx]))) - end - end - - # Setup matrices - vertex_table = Matrix{EntityNeighborhood{VertexIndex}}(undef, length(cells), max_vertices) - for j = 1:size(vertex_table,2) - for i = 1:size(vertex_table,1) - vertex_table[i,j] = EntityNeighborhood{VertexIndex}(VertexIndex[]) + celltypeidx = findfirst(x -> isa(x, celltype), cells) + max_vertices = max(max_vertices, nvertices(cells[celltypeidx])) + max_edges = max(max_edges, nedges(cells[celltypeidx])) + max_faces = max(max_faces, nfaces(cells[celltypeidx])) end + return max_vertices, max_edges, max_faces end - face_table = Matrix{EntityNeighborhood{FaceIndex}}(undef, length(cells), max_faces) - for j = 1:size(face_table,2) - for i = 1:size(face_table,1) - face_table[i,j] = EntityNeighborhood{FaceIndex}(FaceIndex[]) - end - end - edge_table = Matrix{EntityNeighborhood{EdgeIndex}}(undef, length(cells), max_edges) - for j = 1:size(edge_table,2) - for i = 1:size(edge_table,1) - edge_table[i,j] = EntityNeighborhood{EdgeIndex}(EdgeIndex[]) - end - end - cell_neighbor_table = Vector{EntityNeighborhood{CellIndex}}(undef, length(cells)) - - _exclusive_topology_ctor(cells, vertex_cell_table, vertex_table, face_table, edge_table, cell_neighbor_table) - - return ExclusiveTopology(vertex_cell_table,cell_neighbor_table,face_table,vertex_table,edge_table,nothing) end -function _add_single_face_neighbor!(face_table, cell::C1, cell_id, cell_neighbor::C2, cell_neighbor_id) where {C1, C2} +function _add_single_face_neighbor!(face_table::ConstructionBuffer, cell::AbstractCell, cell_id::Int, cell_neighbor::AbstractCell, cell_neighbor_id::Int) for (lfi, face) ∈ enumerate(faces(cell)) uniqueface = sortface_fast(face) for (lfi2, face_neighbor) ∈ enumerate(faces(cell_neighbor)) uniqueface2 = sortface_fast(face_neighbor) if uniqueface == uniqueface2 - push!(face_table[cell_id, lfi].neighbor_info, FaceIndex(cell_neighbor_id, lfi2)) + push_at_index!(face_table, FaceIndex(cell_neighbor_id, lfi2), cell_id, lfi) return end end end end -function _add_single_edge_neighbor!(edge_table, cell::C1, cell_id, cell_neighbor::C2, cell_neighbor_id) where {C1, C2} +function _add_single_edge_neighbor!(edge_table::ConstructionBuffer, cell::AbstractCell, cell_id::Int, cell_neighbor::AbstractCell, cell_neighbor_id::Int) for (lei, edge) ∈ enumerate(edges(cell)) uniqueedge = sortedge_fast(edge) for (lei2, edge_neighbor) ∈ enumerate(edges(cell_neighbor)) uniqueedge2 = sortedge_fast(edge_neighbor) if uniqueedge == uniqueedge2 - push!(edge_table[cell_id, lei].neighbor_info, EdgeIndex(cell_neighbor_id, lei2)) + push_at_index!(edge_table, EdgeIndex(cell_neighbor_id, lei2), cell_id, lei) return end end end end +function _add_single_vertex_neighbor!(vertex_table::ConstructionBuffer, cell::AbstractCell, cell_id::Int, cell_neighbor::AbstractCell, cell_neighbor_id::Int) + for (lvi, vertex) ∈ enumerate(vertices(cell)) + for (lvi2, vertex_neighbor) ∈ enumerate(vertices(cell_neighbor)) + if vertex_neighbor == vertex + push_at_index!(vertex_table, VertexIndex(cell_neighbor_id, lvi2), cell_id, lvi) + break + end + end + end +end -getcells(neighbor::EntityNeighborhood{T}) where T <: BoundaryIndex = first.(neighbor.neighbor_info) -getcells(neighbor::EntityNeighborhood{CellIndex}) = getproperty.(neighbor.neighbor_info, :idx) -getcells(neighbors::Vector{T}) where T <: EntityNeighborhood = reduce(vcat, getcells.(neighbors)) -getcells(neighbors::Vector{T}) where T <: BoundaryIndex = getindex.(neighbors,1) +function build_vertex_to_cell(cells; max_vertices, nnodes) + vertex_to_cell = ArrayOfVectorViews(sizehint!(Int[], max_vertices * nnodes), (nnodes,); sizehint = max_vertices) do cov + for (cellid, cell) in enumerate(cells) + for vertex in vertices(cell) + push_at_index!(cov, cellid, vertex) + end + end + end + return vertex_to_cell +end -ExclusiveTopology(grid::AbstractGrid) = ExclusiveTopology(getcells(grid)) +function build_cell_neighbor(grid, cells, vertex_to_cell; ncells) + # In this case, we loop over the cells in order and all neighbors at once. + # Then we can create ArrayOfVectorViews directly without the CollectionsOfViews.ConstructionBuffer + sizehint = _getsizehint(grid, CellIndex) + data = empty!(Vector{Int}(undef, ncells * sizehint)) + + indices = Vector{Int}(undef, ncells + 1) + cell_neighbor_ids = Int[] + n = 1 + for (cell_id, cell) in enumerate(cells) + empty!(cell_neighbor_ids) + for vertex ∈ vertices(cell) + for vertex_cell_id ∈ vertex_to_cell[vertex] + if vertex_cell_id != cell_id + vertex_cell_id ∈ cell_neighbor_ids || push!(cell_neighbor_ids, vertex_cell_id) + end + end + end + indices[cell_id] = n + append!(data, cell_neighbor_ids) + n += length(cell_neighbor_ids) + end + indices[end] = n + sizehint!(data, length(data)) # Tell julia that we won't add more data + return ArrayOfVectorViews(indices, data, LinearIndices(1:ncells)) +end function getneighborhood(top::ExclusiveTopology, grid::AbstractGrid, cellidx::CellIndex, include_self=false) - patch = getcells(top.cell_neighbor[cellidx.idx]) + patch = top.cell_neighbor[cellidx.idx] if include_self - return [patch; cellidx.idx] + return view(push!(collect(patch), cellidx.idx), 1:(length(patch) + 1)) else return patch end end function getneighborhood(top::ExclusiveTopology, grid::AbstractGrid, faceidx::FaceIndex, include_self=false) + neighbors = top.face_face_neighbor[faceidx[1], faceidx[2]] + if include_self + return view(push!(collect(neighbors), faceidx), 1:(length(neighbors) + 1)) + else + return neighbors + end +end + +function getneighborhood(top::ExclusiveTopology, grid::AbstractGrid{2}, edgeidx::EdgeIndex, include_self=false) + neighbors = top.edge_edge_neighbor[edgeidx[1], edgeidx[2]] if include_self - return [top.face_face_neighbor[faceidx[1],faceidx[2]].neighbor_info; faceidx] + return view(push!(collect(neighbors), edgeidx), 1:(length(neighbors) + 1)) else - return top.face_face_neighbor[faceidx[1],faceidx[2]].neighbor_info + return neighbors end end @@ -268,90 +281,114 @@ function getneighborhood(top::ExclusiveTopology, grid::AbstractGrid, vertexidx:: !include_self && local_vertex == vertexidx && continue push!(self_reference_local, local_vertex) end - return self_reference_local + return view(self_reference_local, 1:length(self_reference_local)) end function getneighborhood(top::ExclusiveTopology, grid::AbstractGrid{3}, edgeidx::EdgeIndex, include_self=false) cellid, local_edgeidx = edgeidx[1], edgeidx[2] - cell_edges = edges(getcells(grid,cellid)) + cell_edges = edges(getcells(grid, cellid)) nonlocal_edgeid = cell_edges[local_edgeidx] - cell_neighbors = getneighborhood(top,grid,CellIndex(cellid)) + cell_neighbors = getneighborhood(top, grid, CellIndex(cellid)) self_reference_local = EdgeIndex[] for cellid in cell_neighbors - local_neighbor_edgeid = findfirst(x->issubset(x,nonlocal_edgeid),edges(getcells(grid,cellid)))::Int + local_neighbor_edgeid = findfirst(x -> issubset(x, nonlocal_edgeid), edges(getcells(grid, cellid))) local_neighbor_edgeid === nothing && continue local_edge = EdgeIndex(cellid,local_neighbor_edgeid) push!(self_reference_local, local_edge) end if include_self - return unique([top.edge_edge_neighbor[cellid, local_edgeidx].neighbor_info; self_reference_local; edgeidx]) + neighbors = unique([top.edge_edge_neighbor[cellid, local_edgeidx]; self_reference_local; edgeidx]) else - return unique([top.edge_edge_neighbor[cellid, local_edgeidx].neighbor_info; self_reference_local]) + neighbors = unique([top.edge_edge_neighbor[cellid, local_edgeidx]; self_reference_local]) end + return view(neighbors, 1:length(neighbors)) +end + +function getneighborhood(top::ExclusiveTopology, grid::AbstractGrid, facetindex::FacetIndex, include_self=false) + rdim = get_reference_dimension(grid) + return _getneighborhood(Val(rdim), top, grid, facetindex, include_self) +end +_getneighborhood(::Val{1}, top, grid, facetindex::FacetIndex, include_self) = getneighborhood(top, grid, VertexIndex(facetindex...), include_self) +_getneighborhood(::Val{2}, top, grid, facetindex::FacetIndex, include_self) = getneighborhood(top, grid, EdgeIndex(facetindex...), include_self) +_getneighborhood(::Val{3}, top, grid, facetindex::FacetIndex, include_self) = getneighborhood(top, grid, FaceIndex(facetindex...), include_self) +function _getneighborhood(::Val{:mixed}, args...) + throw(ArgumentError("getneighborhood with FacetIndex is is only supported for grids containing cells with a common reference dimension. + For mixed-dimensionality grid, use `VertexIndex`, `EdgeIndex`, and `FaceIndex` explicitly")) end """ - vertex_star_stencils(top::ExclusiveTopology, grid::Grid) -> Vector{Int, EntityNeighborhood{VertexIndex}}() + vertex_star_stencils(top::ExclusiveTopology, grid::Grid) -> AbstractVector{AbstractVector{VertexIndex}} Computes the stencils induced by the edge connectivity of the vertices. """ function vertex_star_stencils(top::ExclusiveTopology, grid::Grid) cells = grid.cells - stencil_table = Dict{Int,EntityNeighborhood{VertexIndex}}() - # Vertex Connectivity - for (global_vertexid,cellset) ∈ enumerate(top.vertex_to_cell) - vertex_neighbors_local = VertexIndex[] - for cell ∈ cellset - neighbor_boundary = getdim(cells[cell]) > 2 ? collect(edges(cells[cell])) : collect(faces(cells[cell])) #get lowest dimension boundary - neighbor_connected_faces = neighbor_boundary[findall(x->global_vertexid ∈ x, neighbor_boundary)] - this_local_vertex = findfirst(i->toglobal(grid, VertexIndex(cell, i)) == global_vertexid, 1:nvertices(cells[cell])) - push!(vertex_neighbors_local, VertexIndex(cell, this_local_vertex)) - other_vertices = findfirst.(x->x!=global_vertexid,neighbor_connected_faces) - any(other_vertices .=== nothing) && continue - neighbor_vertices_global = getindex.(neighbor_connected_faces, other_vertices) - neighbor_vertices_local = [VertexIndex(cell,local_vertex) for local_vertex ∈ findall(x->x ∈ neighbor_vertices_global, vertices(cells[cell]))] - append!(vertex_neighbors_local, neighbor_vertices_local) + stencil_table = ArrayOfVectorViews(VertexIndex[], (getnnodes(grid),); sizehint = 10) do buf + # Vertex Connectivity + for (global_vertexid,cellset) ∈ enumerate(top.vertex_to_cell) + for cell ∈ cellset + neighbor_boundary = edges(cells[cell]) + neighbor_connected_faces = neighbor_boundary[findall(x->global_vertexid ∈ x, neighbor_boundary)] + this_local_vertex = findfirst(i->toglobal(grid, VertexIndex(cell, i)) == global_vertexid, 1:nvertices(cells[cell])) + push_at_index!(buf, VertexIndex(cell, this_local_vertex), global_vertexid) + other_vertices = findfirst.(x->x!=global_vertexid,neighbor_connected_faces) + any(other_vertices .=== nothing) && continue + neighbor_vertices_global = getindex.(neighbor_connected_faces, other_vertices) + neighbor_vertices_local = [VertexIndex(cell,local_vertex) for local_vertex ∈ findall(x->x ∈ neighbor_vertices_global, vertices(cells[cell]))] + for vertex_index in neighbor_vertices_local + push_at_index!(buf, vertex_index, global_vertexid) + end + end end - stencil_table[global_vertexid] = EntityNeighborhood(vertex_neighbors_local) end return stencil_table end """ - getstencil(top::Dict{Int, EntityNeighborhood{VertexIndex}}, grid::AbstractGrid, vertex_idx::VertexIndex) -> EntityNeighborhood{VertexIndex} + getstencil(top::ArrayOfVectorViews{VertexIndex, 1}, grid::AbstractGrid, vertex_idx::VertexIndex) -> AbstractVector{VertexIndex} Get an iterateable over the stencil members for a given local entity. """ -function getstencil(top::Dict{Int, EntityNeighborhood{VertexIndex}}, grid::Grid, vertex_idx::VertexIndex) - return top[toglobal(grid, vertex_idx)].neighbor_info +function getstencil(top::ArrayOfVectorViews{VertexIndex, 1}, grid::Grid, vertex_idx::VertexIndex) + return top[toglobal(grid, vertex_idx)] end """ - _faceskeleton(topology::ExclusiveTopology, grid::Grid) -> Iterable{FaceIndex} -Creates an iterateable face skeleton. The skeleton consists of `FaceIndex` that can be used to `reinit` -`FaceValues`. + _create_facet_skeleton(neighborhood::AbstractMatrix{AbstractVector{BI}}) where BI <: Union{FaceIndex, EdgeIndex, VertexIndex} + +Materializes the skeleton from the `neighborhood` information by returning a `Vector{FacetIndex}` describing the +unique facets in the grid. + +*Example:* With `BI=EdgeIndex`, and an edge between cells and 1 and 2, with vertices 2 and 5, could be described by either +`EdgeIndex(1, 2)` or `EdgeIndex(2, 4)`, but only one of these will be in the vector returned by this function. """ -function _faceskeleton(top::ExclusiveTopology, grid::Grid) - cells = getcells(grid) - cell_dim = getdim(first(cells)) - @assert all(cell -> getdim(cell) == cell_dim, cells) "Face skeleton construction requires all the elements to be of the same dimensionality" +function _create_facet_skeleton(neighborhood::ArrayOfVectorViews{BI, 2}) where BI <: Union{FaceIndex, EdgeIndex, VertexIndex} i = 1 - neighborhood = cell_dim == 1 ? top.vertex_vertex_neighbor : top.face_face_neighbor - face_skeleton_local = Array{FaceIndex}(undef, length(neighborhood) - count(neighbors -> !isempty(neighbors) , neighborhood) ÷ 2) - for (idx, face) in pairs(neighborhood) - isempty(face.neighbor_info) || face.neighbor_info[][1] > idx[1] || continue - face_skeleton_local[i] = FaceIndex(idx[1], idx[2]) + skeleton = Vector{FacetIndex}(undef, length(neighborhood) - count(neighbors -> !isempty(neighbors) , values(neighborhood)) ÷ 2) + for (idx, entity) in pairs(neighborhood) + isempty(entity) || entity[][1] > idx[1] || continue + skeleton[i] = FacetIndex(idx[1], idx[2]) i += 1 end - return face_skeleton_local + return skeleton end """ - face_skeleton(top::ExclusiveTopology, grid::Grid) -> Vector{FaceIndex} -Creates an iterateable face skeleton. The skeleton consists of `FaceIndex` that can be used to `reinit` -`FaceValues`. + facetskeleton(top::ExclusiveTopology, grid::AbstractGrid) + +Materializes the skeleton from the `neighborhood` information by returning an iterable over the +unique facets in the grid, described by `FacetIndex`. """ -function faceskeleton(top::ExclusiveTopology, grid::Grid) - if top.face_skeleton === nothing - top.face_skeleton = _faceskeleton(top, grid) +function facetskeleton(top::ExclusiveTopology, grid::AbstractGrid) + if top.facet_skeleton === nothing + rdim = get_reference_dimension(grid) + top.facet_skeleton = if rdim == 1 + _create_facet_skeleton(top.vertex_vertex_neighbor) + elseif rdim == 2 + _create_facet_skeleton(top.edge_edge_neighbor) + elseif rdim == 3 + _create_facet_skeleton(top.face_face_neighbor) + else + throw(ArgumentError("facetskeleton not supported for refdim = $rdim")) + end end - return top.face_skeleton + return top.facet_skeleton end diff --git a/src/Grid/utils.jl b/src/Grid/utils.jl index 61c76e04a7..c684409aaf 100644 --- a/src/Grid/utils.jl +++ b/src/Grid/utils.jl @@ -1,342 +1,233 @@ -""" -getfaceinstances(grid::AbstractGrid, topology::ExclusiveTopology, face::FaceIndex) - -Returns all the faces as `Set{FaceIndex}` that share all their vertices with a given face -represented as `FaceIndex`. The returned set includes the input face. - -```julia-repl -julia> using Ferrite; using Ferrite: getfaceinstances - -julia> grid = generate_grid(Tetrahedron, (2,1,1)); - -julia> topology = ExclusiveTopology(grid); - -julia> getfaceinstances(grid, topology, FaceIndex(4,2)) -Set{FaceIndex} with 2 elements: -FaceIndex((6, 4)) -FaceIndex((4, 2)) -``` -""" -function getfaceinstances end - -""" -getedgeinstances(grid::AbstractGrid, topology::ExclusiveTopology, edge::EdgeIndex) - -Returns all the edges as `Set{EdgeIndex}` that share all their vertices with a given edge -represented as `EdgeIndex`. -The returned set includes the input edge. - -```julia-repl -julia> using Ferrite; using Ferrite: getedgeinstances +# Sets -julia> grid = generate_grid(Tetrahedron, (2,1,1)); - -julia> topology = ExclusiveTopology(grid); - -julia> getedgeinstances(grid, topology, EdgeIndex(4,2)) -Set{EdgeIndex} with 3 elements: -EdgeIndex((4, 2)) -EdgeIndex((9, 6)) -EdgeIndex((7, 6)) -``` -""" -function getedgeinstances end +_check_setname(dict, name) = haskey(dict, name) && throw(ArgumentError("there already exists a set with the name: $name")) +_warn_emptyset(set, name) = length(set) == 0 && @warn("no entities added to the set with name: $name") """ -getvertexinstances(grid::AbstractGrid, topology::ExclusiveTopology, vertex::EdgeIndex) - -Returns all the vertices as `Set{::VertexIndex}` that use a given vertex represented as -`VertexIndex` in all cells. -The returned set includes the input vertex. - -```julia-repl -julia> using Ferrite; using Ferrite: getvertexinstances + addcellset!(grid::AbstractGrid, name::String, cellid::AbstractVecOrSet{Int}) + addcellset!(grid::AbstractGrid, name::String, f::function; all::Bool=true) -julia> grid = generate_grid(Tetrahedron,(2,1,1)); +Adds a cellset to the grid with key `name`. +Cellsets are typically used to define subdomains of the problem, e.g. two materials in the computational domain. +The `DofHandler` can construct different fields which live not on the whole domain, but rather on a cellset. +`all=true` implies that `f(x)` must return `true` for all nodal coordinates `x` in the cell if the cell +should be added to the set, otherwise it suffices that `f(x)` returns `true` for one node. -julia> topology = ExclusiveTopology(grid); - -julia> getvertexinstances(grid, topology, VertexIndex(4,2)) -Set{VertexIndex} with 8 elements: -VertexIndex((7, 4)) -VertexIndex((10, 4)) -VertexIndex((12, 4)) -VertexIndex((6, 3)) -VertexIndex((4, 2)) -VertexIndex((9, 4)) -VertexIndex((11, 4)) -VertexIndex((8, 4)) +```julia +addcellset!(grid, "left", Set((1,3))) #add cells with id 1 and 3 to cellset left +addcellset!(grid, "right", x -> norm(x[1]) < 2.0 ) #add cell to cellset right, if x[1] of each cell's node is smaller than 2.0 ``` """ -function getvertexinstances end - -for (func, entity_f, entity_t) in ( -(:getvertexinstances, :vertices, :VertexIndex), -(:getedgeinstances, :edges, :EdgeIndex), -(:getfaceinstances, :faces, :FaceIndex), -) -@eval begin - function $(func)(grid::AbstractGrid, topology::ExclusiveTopology, entity::$(entity_t)) - _set = Set{$(entity_t)}() - cells = getcells(grid) - cell = cells[entity[1]] - verts = $(entity_f)(cell)[entity[2]] - # Since we are looking for an entity that share *all* vertices, the first one can be - # used here to query potiential neighbor cells - for cell_idx in topology.vertex_to_cell[verts[1]] # Since all vertices should be shared, the first one can be used here - cell_entities = $(entity_f)(cells[cell_idx]) - for (entity_idx, cell_entity) in pairs(cell_entities) - if all(x -> x in verts, cell_entity) - push!(_set, $(entity_t)((cell_idx, entity_idx))) - end - end - end - return _set - end -end +function addcellset!(grid::AbstractGrid, name::String, cellid::AbstractVecOrSet{Int}) + _addset!(grid, name, cellid, getcellsets(grid)) end -""" -filterfaces(grid::AbstractGrid, faces::Set{FaceIndex}, f::Function; all::Bool=true) - -Returns the faces in `faces` that satisfy `f` as a `Set{FaceIndex}`. -`all=true` implies that `f(x)` must return `true` for all nodal coordinates `x` on the face -if the face should be added to the set, otherwise it suffices that `f(x)` returns `true` for -one node. - -```julia-repl -julia> using Ferrite; using Ferrite: filterfaces - -julia> grid = generate_grid(Tetrahedron, (2,2,2)); - -julia> topology = ExclusiveTopology(grid); - -julia> addboundaryfaceset!(grid, topology, "b", x -> true); +function addcellset!(grid::AbstractGrid, name::String, f::Function; all::Bool=true) + _addset!(grid, name, create_cellset(grid, f; all), getcellsets(grid)) +end -julia> filterfaces(grid, grid.facesets["b"], x -> x[3] ≈ -1) -Set{FaceIndex} with 8 elements: -FaceIndex((7, 1)) -FaceIndex((3, 1)) -FaceIndex((21, 1)) -FaceIndex((13, 1)) -FaceIndex((19, 1)) -FaceIndex((15, 1)) -FaceIndex((1, 1)) -FaceIndex((9, 1)) -``` """ -function filterfaces end + addnodeset!(grid::AbstractGrid, name::String, nodeid::AbstractVecOrSet{Int}) + addnodeset!(grid::AbstractGrid, name::String, f::Function) +Adds a `nodeset::OrderedSet{Int}` to the `grid`'s `nodesets` with key `name`. Has the same interface as `addcellset`. +However, instead of mapping a cell id to the `String` key, a set of node ids is returned. """ -filteredges(grid::AbstractGrid, edges::Set{EdgeIndex}, f::Function; all::Bool=true) - -Returns the edges in `edges` that satisfy `f` as a `Set{EdgeIndex}`. -`all=true` implies that `f(x)` must return `true` for all nodal coordinates `x` on the face -if the face should be added to the set, otherwise it suffices that `f(x)` returns `true` for -one node. - -```julia-repl -julia> using Ferrite; using Ferrite: filteredges +addnodeset!(grid::AbstractGrid, name::String, nodeid::AbstractVecOrSet{Int}) = + _addset!(grid, name, nodeid, getnodesets(grid)) -julia> grid = generate_grid(Tetrahedron, (1,1,1)); +addnodeset!(grid::AbstractGrid, name::String, f::Function) = + _addset!(grid, name, create_nodeset(grid, f), getnodesets(grid)) -julia> topology = ExclusiveTopology(grid); - -julia> addboundaryedgeset!(grid, topology, "b", x -> true); - -julia> filteredges(grid, grid.edgesets["b"], x -> x[3] ≈ -1) -Set{EdgeIndex} with 8 elements: -EdgeIndex((1, 2)) -EdgeIndex((3, 2)) -EdgeIndex((4, 3)) -EdgeIndex((1, 3)) -EdgeIndex((3, 3)) -EdgeIndex((1, 1)) -EdgeIndex((3, 1)) -EdgeIndex((2, 3)) -``` """ -function filteredges end - -""" -filtervertices(grid::AbstractGrid, vertices::Set{VertexIndex}, f::Function; all::Bool=true) - -Returns the vertices in `vertices` that satisfy `f` as a `Set{VertexIndex}`. -`all=true` implies that `f(x)` must return `true` for all nodal coordinates `x` on the face -if the face should be added to the set, otherwise it suffices that `f(x)` returns `true` for -one node. + addfacetset!(grid::AbstractGrid, name::String, faceid::AbstractVecOrSet{FacetIndex}) + addfacetset!(grid::AbstractGrid, name::String, f::Function; all::Bool=true) -```julia-repl -julia> using Ferrite; using Ferrite: filtervertices +Adds a facetset to the grid with key `name`. +A facetset maps a `String` key to a `OrderedSet` of tuples corresponding to `(global_cell_id, local_facet_id)`. +Facetsets can be used to initialize `Dirichlet` boundary conditions for the `ConstraintHandler`. +`all=true` implies that `f(x)` must return `true` for all nodal coordinates `x` on the facet if the facet +should be added to the set, otherwise it suffices that `f(x)` returns `true` for one node. -julia> grid = generate_grid(Tetrahedron,(1,1,1)); - -julia> topology = ExclusiveTopology(grid); - -julia> addboundaryvertexset!(grid, topology, "b", x -> true); - -julia> filtervertices(grid, grid.vertexsets["b"], x -> x[3] ≈ -1) -Set{VertexIndex} with 12 elements: -VertexIndex((2, 3)) -VertexIndex((4, 3)) -VertexIndex((4, 1)) -VertexIndex((3, 3)) -VertexIndex((3, 2)) -VertexIndex((1, 1)) -VertexIndex((2, 1)) -VertexIndex((3, 1)) -VertexIndex((1, 3)) -VertexIndex((5, 1)) -VertexIndex((1, 2)) -VertexIndex((6, 1)) +```julia +addfacetset!(grid, "right", Set((FacetIndex(2,2), FacetIndex(4,2)))) #see grid manual example for reference +addfacetset!(grid, "clamped", x -> norm(x[1]) ≈ 0.0) #see incompressible elasticity example for reference ``` """ -function filtervertices end +addfacetset!(grid::AbstractGrid, name::String, set::AbstractVecOrSet{FacetIndex}) = + _addset!(grid, name, set, getfacetsets(grid)) -for (func, entity_f, entity_t) in ( -(:filtervertices, :vertices, :VertexIndex), -(:filteredges, :edges, :EdgeIndex), -(:filterfaces, :faces, :FaceIndex), -) -@eval begin - function $(func)(grid::AbstractGrid, set::Set{$(entity_t)}, f::Function; all::Bool=true) - _set = Set{$(entity_t)}() - cells = getcells(grid) - for entity in set # entities can be edges/vertices in the face/edge - cell = cells[entity[1]] - cell_entities = $(entity_f)(cell) - pass = all - for node_idx in cell_entities[entity[2]] # using cell entities common with boundary face - v = f(grid.nodes[node_idx].x) - all ? (!v && (pass = false; break)) : (v && (pass = true; break)) - end - pass && push!(_set, entity) - end - return _set - end -end -end +addfacetset!(grid::AbstractGrid, name::String, f::Function; all::Bool=true) = + _addset!(grid, name, create_facetset(grid, f; all=all), getfacetsets(grid)) """ -addboundaryfaceset!(grid::AbstractGrid, topology::ExclusiveTopology, name::String, f::Function; all::Bool=true) - -Adds a boundary faceset to the grid with key `name`. -A faceset maps a `String` key to a `Set` of tuples corresponding to `(global_cell_id, -local_face_id)`. Facesets are used to initialize `Dirichlet` structs, that are needed to -specify the boundary for the `ConstraintHandler`. `all=true` implies that `f(x)` must return -`true` for all nodal coordinates `x` on the face if the face should be added to the set, -otherwise it suffices that `f(x)` returns `true` for one node. - -```julia-repl -julia> using Ferrite - -julia> grid = generate_grid(Tetrahedron, (1,1,1)); + addvertexset!(grid::AbstractGrid, name::String, faceid::AbstractVecOrSet{FaceIndex}) + addvertexset!(grid::AbstractGrid, name::String, f::Function) -julia> topology = ExclusiveTopology(grid); +Adds a vertexset to the grid with key `name`. +A vertexset maps a `String` key to a `OrderedSet` of tuples corresponding to `(global_cell_id, local_vertex_id)`. +Vertexsets can be used to initialize `Dirichlet` boundary conditions for the `ConstraintHandler`. -julia> addboundaryfaceset!(grid, topology, "b", x -> true); - -julia> grid.facesets["b"] -Set{FaceIndex} with 12 elements: -FaceIndex((3, 1)) -FaceIndex((4, 3)) -FaceIndex((3, 3)) -FaceIndex((4, 1)) -FaceIndex((5, 1)) -FaceIndex((2, 2)) -FaceIndex((1, 4)) -FaceIndex((2, 1)) -FaceIndex((6, 1)) -FaceIndex((6, 3)) -FaceIndex((5, 3)) -FaceIndex((1, 1)) +```julia +addvertexset!(grid, "right", Set((VertexIndex(2,2), VertexIndex(4,2)))) +addvertexset!(grid, "clamped", x -> norm(x[1]) ≈ 0.0) ``` """ -function addboundaryfaceset! end - -""" -addboundaryedgeset!(grid::AbstractGrid, topology::ExclusiveTopology, name::String, f::Function; all::Bool=true) +addvertexset!(grid::AbstractGrid, name::String, set::AbstractVecOrSet{VertexIndex}) = + _addset!(grid, name, set, getvertexsets(grid)) -Adds a boundary edgeset to the grid with key `name`. -An edgeset maps a `String` key to a `Set` of tuples corresponding to `(global_cell_id, -local_edge_id)`. `all=true` implies that `f(x)` must return `true` for all nodal coordinates -`x` on the face if the face should be added to the set, otherwise it suffices that `f(x)` -returns `true` for one node. +addvertexset!(grid::AbstractGrid, name::String, f::Function) = + _addset!(grid, name, create_vertexset(grid, f; all=true), getvertexsets(grid)) -```julia-repl -julia> using Ferrite - -julia> grid = generate_grid(Tetrahedron, (1,1,1)); - -julia> topology = ExclusiveTopology(grid); - -julia> addboundaryedgeset!(grid, topology, "b", x -> true); - -julia> grid.edgesets["b"] -Set{EdgeIndex} with 30 elements: -EdgeIndex((6, 6)) -EdgeIndex((2, 1)) -EdgeIndex((5, 3)) -. -. -. -EdgeIndex((2, 5)) -EdgeIndex((1, 4)) -``` -""" -function addboundaryedgeset! end +function _addset!(grid::AbstractGrid, name::String, _set::AbstractVecOrSet, dict::Dict) + _check_setname(dict, name) + set = convert_to_orderedset(_set) + _warn_emptyset(set, name) + dict[name] = set + grid +end """ addboundaryvertexset!(grid::AbstractGrid, topology::ExclusiveTopology, name::String, f::Function; all::Bool=true) Adds a boundary vertexset to the grid with key `name`. -A vertexset maps a `String` key to a `Set` of tuples corresponding to `(global_cell_id, +A vertexset maps a `String` key to an `OrderedSet` of tuples corresponding to `(global_cell_id, local_vertex_id)`. `all=true` implies that `f(x)` must return `true` for all nodal coordinates `x` on the face if the face should be added to the set, otherwise it suffices that `f(x)` returns `true` for one node. +""" +function addboundaryvertexset!(grid::AbstractGrid, top::ExclusiveTopology, name::String, f::Function; kwargs...) + set = create_boundaryvertexset(grid, top, f; kwargs...) + return _addset!(grid, name, set, getvertexsets(grid)) +end -```julia-repl -julia> using Ferrite - -julia> grid = generate_grid(Tetrahedron, (1,1,1)); +""" +addboundaryfacetset!(grid::AbstractGrid, topology::ExclusiveTopology, name::String, f::Function; all::Bool=true) -julia> topology = ExclusiveTopology(grid); +Adds a boundary facetset to the grid with key `name`. +A facetset maps a `String` key to a `OrderedSet` of tuples corresponding to `(global_cell_id, +local_facet_id)`. Facetsets are used to initialize `Dirichlet` structs, that are needed to +specify the boundary for the `ConstraintHandler`. `all=true` implies that `f(x)` must return +`true` for all nodal coordinates `x` on the facet if the facet should be added to the set, +otherwise it suffices that `f(x)` returns `true` for one node. +""" +function addboundaryfacetset!(grid::AbstractGrid, top::ExclusiveTopology, name::String, f::Function; kwargs...) + set = create_boundaryfacetset(grid, top, f; kwargs...) + return _addset!(grid, name, set, getfacetsets(grid)) +end -julia> addboundaryvertexset!(grid, topology, "b", x -> true); +function _create_set(f::Function, grid::AbstractGrid, ::Type{BI}; all=true) where {BI <: BoundaryIndex} + set = OrderedSet{BI}() + # Since we loop over the cells in order the resulting set will be sorted + # lexicographically based on the (cell_idx, entity_idx) tuple + for (cell_idx, cell) in enumerate(getcells(grid)) + for (entity_idx, entity) in enumerate(boundaryfunction(BI)(cell)) + pass = all + for node_idx in entity + v = f(get_node_coordinate(grid, node_idx)) + all ? (!v && (pass = false; break)) : (v && (pass = true; break)) + end + pass && push!(set, BI(cell_idx, entity_idx)) + end + end + return set +end -julia> grid.vertexsets["b"] -Set{VertexIndex} with 24 elements: -VertexIndex((2, 3)) -VertexIndex((5, 2)) -VertexIndex((4, 1)) -. -. -. -VertexIndex((1, 4)) -VertexIndex((4, 4)) -``` -""" -function addboundaryvertexset! end +# Given a boundary index, for example EdgeIndex(2, 1), add this to `set`, as well as any other `EdgeIndex` in the grid +# pointing to the same edge (i.e. indices belong to neighboring cells) +function push_entity_instances!(set::OrderedSet{BI}, grid::AbstractGrid, top::ExclusiveTopology, entity::BI) where {BI <: BoundaryIndex} + push!(set, entity) # Add the given entity + cell = getcells(grid, entity[1]) + verts = boundaryfunction(BI)(cell)[entity[2]] + for cell_idx in top.vertex_to_cell[verts[1]]# Since all vertices should be shared, the first one can be used here + cell_entities = boundaryfunction(BI)(getcells(grid, cell_idx)) + for (entity_idx, cell_entity) in pairs(cell_entities) + if all(x -> x in verts, cell_entity) + push!(set, BI(cell_idx, entity_idx)) + end + end + end + return set +end -for (func, entity_f, entity_t, filter_f, instance_f, destination) in ( -(:addboundaryfaceset!, :((_, x)->Set([x])), :FaceIndex, :filterfaces, :getfaceinstances, :(grid.facesets)), -(:addboundaryedgeset!, :getfaceedges, :EdgeIndex, :filteredges, :getedgeinstances, :(grid.edgesets)), -(:addboundaryvertexset!, :getfacevertices, :VertexIndex, :filtervertices, :getvertexinstances, :(grid.vertexsets)), -) -@eval begin - function $(func)(grid::AbstractGrid, topology::ExclusiveTopology, name::String, f::Function; all::Bool=true) - _check_setname($(destination), name) - _set = Set{$(entity_t)}() - for (face_idx, neighborhood) in pairs(topology.face_face_neighbor) - isempty(neighborhood) || continue # Skip any faces with neighbors (not on boundary) - entities = $(entity_f)(grid, FaceIndex((face_idx[1], face_idx[2]))) - for entity in $(filter_f)(grid, entities, f; all=all) - union!(_set, $(instance_f)(grid, topology, entity)) +# Create a `OrderedSet{BI}` whose entities are a subset of facets which do not have neighbors, i.e. that are on the boundary. +# Note that this may include entities from cells incident to the facet, e.g. +# ____ consider the case of a vertex boundary set, with f(x) only true on the right side. Then also the VertexIndex +# |\ | belong to the lower left cell, in its lower right corner, is on the boundary, so this should be added too. +# |_\| That is done by the `push_entity_instances!` function. +function _create_boundaryset(f::Function, grid::AbstractGrid, top::ExclusiveTopology, ::Type{BI}; all = true) where {BI <: BoundaryIndex} + # Function barrier as get_facet_facet_neighborhood is not always type stable + function _makeset(ff_nh) + set = OrderedSet{BI}() + for (ff_nh_idx, neighborhood) in pairs(ff_nh) + # ff_nh_idx::CartesianIndex into AbstractMatrix{AbstractVector{BI}} + isempty(neighborhood) || continue # Skip any facets with neighbors (not on boundary) + cell_idx = ff_nh_idx[1] + facet_nr = ff_nh_idx[2] + cell = getcells(grid, cell_idx) + facet_nodes = facets(cell)[facet_nr] + for (subentity_idx, subentity_nodes) in pairs(boundaryfunction(BI)(cell)) + if Base.all(n -> n in facet_nodes, subentity_nodes) + pass = all + for node_idx in subentity_nodes + v = f(get_node_coordinate(grid, node_idx)) + all ? (!v && (pass = false; break)) : (v && (pass = true; break)) + end + pass && push_entity_instances!(set, grid, top, BI(cell_idx, subentity_idx)) + end end end - _warn_emptyset(_set, name) - $(destination)[name] = _set - return grid + return set + end + return _makeset(get_facet_facet_neighborhood(top, grid))::OrderedSet{BI} +end + +function create_cellset(grid::AbstractGrid, f::Function; all::Bool=true) + cells = OrderedSet{Int}() + # Since we loop over the cells in order the resulting set will be sorted + for (i, cell) in enumerate(getcells(grid)) + pass = all + for node_idx in get_node_ids(cell) + v = f(get_node_coordinate(grid, node_idx)) + all ? (!v && (pass = false; break)) : (v && (pass = true; break)) + end + pass && push!(cells, i) end + return cells end +function create_nodeset(grid::AbstractGrid, f::Function) + nodes = OrderedSet{Int}() + for (i, n) in pairs(getnodes(grid)) + f(get_node_coordinate(n)) && push!(nodes, i) + end + return nodes +end +create_vertexset(grid::AbstractGrid, f::Function; kwargs...) = _create_set(f, grid, VertexIndex; kwargs...) +create_edgeset( grid::AbstractGrid, f::Function; kwargs...) = _create_set(f, grid, EdgeIndex; kwargs...) +create_faceset( grid::AbstractGrid, f::Function; kwargs...) = _create_set(f, grid, FaceIndex; kwargs...) +create_facetset( grid::AbstractGrid, f::Function; kwargs...) = _create_set(f, grid, FacetIndex; kwargs...) + +create_boundaryvertexset(grid::AbstractGrid, top::ExclusiveTopology, f::Function; kwargs...) = _create_boundaryset(f, grid, top, VertexIndex; kwargs...) +create_boundaryedgeset( grid::AbstractGrid, top::ExclusiveTopology, f::Function; kwargs...) = _create_boundaryset(f, grid, top, EdgeIndex; kwargs...) +create_boundaryfaceset( grid::AbstractGrid, top::ExclusiveTopology, f::Function; kwargs...) = _create_boundaryset(f, grid, top, FaceIndex; kwargs...) +create_boundaryfacetset( grid::AbstractGrid, top::ExclusiveTopology, f::Function; kwargs...) = _create_boundaryset(f, grid, top, FacetIndex; kwargs...) + +""" + bounding_box(grid::AbstractGrid) + +Computes the axis-aligned bounding box for a given grid, based on its node coordinates. +Returns the minimum and maximum vertex coordinates of the bounding box. +""" +function bounding_box(grid::AbstractGrid{dim}) where {dim} + T = get_coordinate_eltype(grid) + min_vertex = Vec{dim}(i->typemax(T)) + max_vertex = Vec{dim}(i->typemin(T)) + for node in getnodes(grid) + x = get_node_coordinate(node) + _max_tmp = max_vertex # avoid type instability + _min_tmp = min_vertex + max_vertex = Vec{dim}(i -> max(x[i], _max_tmp[i])) + min_vertex = Vec{dim}(i -> min(x[i], _min_tmp[i])) + end + return min_vertex, max_vertex end diff --git a/src/L2_projection.jl b/src/L2_projection.jl index d9bc37c939..69717fc699 100644 --- a/src/L2_projection.jl +++ b/src/L2_projection.jl @@ -1,64 +1,138 @@ - abstract type AbstractProjector end -struct L2Projector <: AbstractProjector - func_ip::Interpolation - geom_ip::Interpolation +mutable struct L2Projector <: AbstractProjector M_cholesky #::SuiteSparse.CHOLMOD.Factor{Float64} dh::DofHandler - set::Vector{Int} + qrs_lhs::Vector{<:QuadratureRule} + qrs_rhs::Vector{<:QuadratureRule} end +isclosed(proj::L2Projector) = isclosed(proj.dh) -""" - L2Projector(func_ip::Interpolation, grid::AbstractGrid; kwargs...) - -Create an `L2Projector` used for projecting quadrature data. `func_ip` -is the function interpolation used for the projection and `grid` the grid -over which the projection is applied. +function Base.show(io::IO, ::MIME"text/plain", proj::L2Projector) + dh = proj.dh + print(io, typeof(proj)) + isclosed(proj) || (print(io, " (not closed)"); return nothing) + println(io) + ncells = sum(length(sdh.cellset) for sdh in dh.subdofhandlers) + println(io, " projection on: ", ncells, "/", getncells(get_grid(dh)), " cells in grid") + if length(dh.subdofhandlers) == 1 # Same as before + sdh = dh.subdofhandlers[1] + println(io, " function interpolation: ", only(sdh.field_interpolations)) + println(io, " geometric interpolation: ", geometric_interpolation(getcelltype(sdh))) + else + println(io, " Split into ", length(dh.subdofhandlers), " sets") + end + return nothing +end -Keyword arguments: - - `qr_lhs`: quadrature for the left hand side. Defaults to a quadrature which exactly - integrates a mass matrix with `func_ip` as the interpolation. - - `set`: element set over which the projection applies. Defaults to all elements in the grid. - - `geom_ip`: geometric interpolation. Defaults to the default interpolation for the grid. +""" + L2Projector(grid::AbstractGrid) +Initiate an `L2Projector` for projecting quadrature data onto +a function space. To define the function space, add interpolations for +differents cell sets with `add!` before `close!`ing the projector, +see the example below. The `L2Projector` acts as the integrated left hand side of the projection equation: -Find projection ``u \\in L_2(\\Omega)`` such that +Find projection ``u \\in U_h(\\Omega) \\subset L_2(\\Omega)`` such that ```math -\\int v u \\ \\mathrm{d}\\Omega = \\int v f \\ \\mathrm{d}\\Omega \\quad \\forall v \\in L_2(\\Omega), +\\int v u \\ \\mathrm{d}\\Omega = \\int v f \\ \\mathrm{d}\\Omega \\quad \\forall v \\in U_h(\\Omega), ``` -where ``f`` is the data to project. +where ``f \\in L_2(\\Omega)`` is the data to project. The function space ``U_h(\\Omega)`` +is the finite element approximation given by the interpolations `add!`ed to the `L2Projector`. -Use [`project`](@ref) to integrate the right hand side and solve for the system. +### Example +```julia +proj = L2Projector(grid) +qr_quad = QuadratureRule{RefQuadrilateral}(2) +add!(proj, quad_set, Lagrange{RefQuadrilateral, 1}(); qr_rhs = qr_quad) +qr_tria = QuadratureRule{RefTriangle}(1) +add!(proj, tria_set, Lagrange{RefTriangle, 1}(); qr_rhs = qr_tria) +close!(proj) + +vals = Dict{Int, Vector{Float64}}() # Can also be Vector{Vector}, + # indexed with cellnr +for (set, qr) in ((quad_set, qr_quad), (tria_set, qr_tria)) + nqp = getnquadpoints(qr) + for cellnr in set + vals[cellnr] = rand(nqp) + end +end + +projected = project(proj, vals) +``` +where `projected` can be used in e.g. `evaluate_at_points` with the [`PointEvalHandler`](@ref), +or with [`evaluate_at_grid_nodes`](@ref). +""" +function L2Projector(grid::AbstractGrid) + dh = DofHandler(grid) + return L2Projector(nothing, dh, QuadratureRule[], QuadratureRule[]) +end + +""" + L2Projector(ip::Interpolation, grid::AbstractGrid; [qr_lhs], [set]) + +A quick way to initiate an `L2Projector`, add an interpolation `ip` on the `set` to +it, and then `close!` it so that it can be used to `project`. The optional +keyword argument `set` defaults to all cells in the `grid`, while `qr_lhs` defaults +to a quadrature rule that integrates the mass matrix exactly for the interpolation `ip`. """ function L2Projector( - func_ip::Interpolation, + ip::Interpolation, grid::AbstractGrid; - qr_lhs::QuadratureRule = _mass_qr(func_ip), - set = 1:getncells(grid), - geom_ip::Interpolation = default_interpolation(getcelltype(grid, first(set))), + qr_lhs::QuadratureRule = _mass_qr(ip), + set = OrderedSet(1:getncells(grid)), + geom_ip = nothing, ) + geom_ip === nothing || @warn("Providing geom_ip is deprecated, the geometric interpolation of the cells with always be used") + proj = L2Projector(grid) + add!(proj, set, ip; qr_lhs, qr_rhs = nothing) + close!(proj) + return proj +end - # TODO: Maybe this should not be allowed? We always assume to project scalar entries. - if func_ip isa VectorizedInterpolation - func_ip = func_ip.ip - end +""" + add!(proj::L2Projector, set::AbstractVecOrSet{Int}, ip::Interpolation; + qr_rhs, [qr_lhs]) - _check_same_celltype(grid, set) +Add an interpolation `ip` on the cells in `set` to the `L2Projector` `proj`. - fe_values_mass = CellValues(qr_lhs, func_ip, geom_ip) +* `qr_rhs` sets the quadrature rule used to later integrate the right-hand-side of the + projection equation, when calling [`project`](@ref). It should match the quadrature points used when + creating the quadrature-point variables to project. +* The *optional* `qr_lhs` sets the quadrature rule used to integrate the left-hand-side of the projection equation, + and defaults to a quadrature rule that integrates the mass-matrix exactly for the given interpolation `ip`. - # Create an internal scalar valued field. This is enough since the projection is done on a component basis, hence a scalar field. - dh = DofHandler(grid) - sdh = SubDofHandler(dh, Set(set)) - add!(sdh, :_, func_ip) # we need to create the field, but the interpolation is not used here - close!(dh) +""" +function add!(proj::L2Projector, set::AbstractVecOrSet{Int}, ip::Interpolation; + qr_rhs::Union{QuadratureRule, Nothing}, qr_lhs::QuadratureRule = _mass_qr(ip) + ) + # Validate user input + isclosed(proj) && error("The L2Projector is already closed") + if qr_rhs !== nothing + getrefshape(ip) == getrefshape(qr_rhs) || error("The reference shape of the interpolation and the qr_rhs must be the same") + push!(proj.qrs_rhs, qr_rhs) + end + getrefshape(ip) == getrefshape(qr_lhs) || error("The reference shape of the interpolation and the qr_lhs must be the same") + + sdh = SubDofHandler(proj.dh, set) + add!(sdh, :_, ip isa VectorizedInterpolation ? ip.ip : ip) + push!(proj.qrs_lhs, qr_lhs) + + return proj +end - M = _assemble_L2_matrix(fe_values_mass, set, dh) # the "mass" matrix - M_cholesky = cholesky(M) +""" + close!(proj::L2Projector) - return L2Projector(func_ip, geom_ip, M_cholesky, dh, collect(set)) +Close `proj` which assembles and calculates the left-hand-side of the projection equation, before doing a Cholesky factorization +of the mass-matrix. +""" +function close!(proj::L2Projector) + close!(proj.dh) + M = _assemble_L2_matrix(proj.dh, proj.qrs_lhs) + proj.M_cholesky = cholesky(Symmetric(M)) + return proj end # Quadrature sufficient for integrating a mass matrix @@ -70,23 +144,24 @@ function _mass_qr(::Lagrange{shape, 2}) where {shape <: RefSimplex} end _mass_qr(ip::VectorizedInterpolation) = _mass_qr(ip.ip) -function Base.show(io::IO, ::MIME"text/plain", proj::L2Projector) - println(io, typeof(proj)) - println(io, " projection on: ", length(proj.set), "/", getncells(get_grid(proj.dh)), " cells in grid") - println(io, " function interpolation: ", proj.func_ip) - println(io, " geometric interpolation: ", proj.geom_ip) +function _assemble_L2_matrix(dh::DofHandler, qrs_lhs::Vector{<:QuadratureRule}) + M = Symmetric(allocate_matrix(dh)) + assembler = start_assemble(M) + for (sdh, qr_lhs) in zip(dh.subdofhandlers, qrs_lhs) + ip_fun = only(sdh.field_interpolations) + ip_geo = geometric_interpolation(getcelltype(sdh)) + cv = CellValues(qr_lhs, ip_fun, ip_geo; update_gradients = false) + _assemble_L2_matrix!(assembler, cv, sdh) + end + return M end -function _assemble_L2_matrix(fe_values, set, dh) - - n = Ferrite.getnbasefunctions(fe_values) - M = create_symmetric_sparsity_pattern(dh) - assembler = start_assemble(M) +function _assemble_L2_matrix!(assembler, cellvalues::CellValues, sdh::SubDofHandler) + n = getnbasefunctions(cellvalues) Me = zeros(n, n) - cell_dofs = zeros(Int, n) - function symmetrize_to_lower!(K) + function symmetrize_to_lower!(K::Matrix) for i in 1:size(K, 1) for j in i+1:size(K, 1) K[j, i] = K[i, j] @@ -95,48 +170,49 @@ function _assemble_L2_matrix(fe_values, set, dh) end ## Assemble contributions from each cell - for cellnum in set - celldofs!(cell_dofs, dh, cellnum) - + for cell in CellIterator(sdh) fill!(Me, 0) - Xe = getcoordinates(get_grid(dh), cellnum) - reinit!(fe_values, Xe) + reinit!(cellvalues, cell) ## ∭( v ⋅ u )dΩ - for q_point = 1:getnquadpoints(fe_values) - dΩ = getdetJdV(fe_values, q_point) + for q_point = 1:getnquadpoints(cellvalues) + dΩ = getdetJdV(cellvalues, q_point) for j = 1:n - v = shape_value(fe_values, q_point, j) + v = shape_value(cellvalues, q_point, j) for i = 1:j - u = shape_value(fe_values, q_point, i) + u = shape_value(cellvalues, q_point, i) Me[i, j] += v ⋅ u * dΩ end end end symmetrize_to_lower!(Me) - assemble!(assembler, cell_dofs, Me) + assemble!(assembler, celldofs(cell), Me) end - return M + return assembler end - """ - project(proj::L2Projector, vals, qr_rhs::QuadratureRule) + project(proj::L2Projector, vals, [qr_rhs::QuadratureRule]) Makes a L2 projection of data `vals` to the nodes of the grid using the projector `proj` (see [`L2Projector`](@ref)). `project` integrates the right hand side, and solves the projection ``u`` from the following projection equation: -Find projection ``u \\in L_2(\\Omega)`` such that +Find projection ``u \\in U_h(\\Omega) \\subset L_2(\\Omega)`` such that ```math -\\int v u \\ \\mathrm{d}\\Omega = \\int v f \\ \\mathrm{d}\\Omega \\quad \\forall v \\in L_2(\\Omega), +\\int v u \\ \\mathrm{d}\\Omega = \\int v f \\ \\mathrm{d}\\Omega \\quad \\forall v \\in U_h(\\Omega), ``` -where ``f`` is the data to project, i.e. `vals`. +where ``f \\in L_2(\\Omega)`` is the data to project. The function space ``U_h(\\Omega)`` +is the finite element approximation given by the interpolations in `proj`. + +The data `vals` should be an `AbstractVector` or `AbstractDict` that is indexed by the cell number. +Each index in `vals` should give an `AbstractVector` with one element for each cell quadrature point. + +If `proj` was created by calling `L2Projector(ip, grid, set)`, `qr_rhs` must be given. Otherwise, this +is added for each domain when calling `add!(proj, args...)`. -The data `vals` should be a vector, with length corresponding to number of elements, of vectors, -with length corresponding to number of quadrature points per element, matching the number of points in `qr_rhs`. -Alternatively, `vals` can be a matrix, with number of columns corresponding to number of elements, -and number of rows corresponding to number of points in `qr_rhs`. +Alternatively, `vals` can be a matrix, with the column index referring the cell number, +and the row index corresponding to quadrature point number. Example (scalar) input data: ```julia vals = [ @@ -155,54 +231,88 @@ vals = [ ``` Supported data types to project are `Number`s and `AbstractTensor`s. -The order of the returned data correspond to the order of the `L2Projector`s internal -`DofHandler`. To export the result, use `vtk_point_data(vtk, proj, projected_data)`. +!!! note + The order of the returned data correspond to the order of the `L2Projector`'s internal + `DofHandler`. The data can be further analyzed with [`evaluate_at_points`](@ref) and + [`evaluate_at_grid_nodes`](@ref). Use [`write_projection`](@ref) to export the result. + """ -function project(proj::L2Projector, - vars::AbstractVector{<:AbstractVector{T}}, - qr_rhs::QuadratureRule) where T <: Union{Number, AbstractTensor} +function project(proj::L2Projector, vars::Union{AbstractVector, AbstractDict}) + return _project(proj, vars, proj.qrs_rhs) +end +# Old-style providing quadrature rule to project +function project(p::L2Projector, vars::Union{AbstractVector, AbstractDict}, qr_rhs::QuadratureRule) + length(p.dh.subdofhandlers) == 1 || error("For multiple domains, provide the right-hand-side quadrature rule to the L2Projector") + return _project(p, vars, [qr_rhs]) +end +# Providing matrix data instead of Vector / Dict +function project(p::L2Projector, vars::AbstractMatrix, args...) + # TODO: Random access into vars is required for now, hence the collect + return project(p, collect(eachcol(vars)), args...) +end - # For using the deprecated API - fe_values = CellValues(qr_rhs, proj.func_ip, proj.geom_ip) +function _project(proj::L2Projector, vars::Union{AbstractVector{TC}, AbstractDict{Int, TC}}, qrs_rhs::Vector{<:QuadratureRule}) where + {TC <: AbstractVector{T}} where T <: Union{Number, AbstractTensor} - M = T <: AbstractTensor ? length(vars[1][1].data) : 1 + # Sanity checks for user input + isclosed(proj) || error("The L2Projector is not closed") + length(qrs_rhs) == 0 && error("The right-hand-side quadrature rule must be provided, unless already given to the L2Projector") + length(qrs_rhs) == length(proj.dh.subdofhandlers) || error("Number of qrs_rhs must match the number of `add!`ed sets") + for (qr_rhs, sdh) in zip(qrs_rhs, proj.dh.subdofhandlers) + if getrefshape(qr_rhs) !== getrefshape(getcelltype(sdh)) + error("Reference shape of quadrature rule and cells doesn't match. Please ensure that `qrs_rhs` has the same order as sets are added to the L2Projector") + end + end + # Catch if old input-style giving vars indexed by the set index, instead of the cell id + if isa(vars, AbstractVector) && length(vars) != getncells(get_grid(proj.dh)) + error("vars is indexed by the cellid, not the index in the set: length(vars) != number of cells") + end - projected_vals = _project(vars, proj, fe_values, M, T)::Vector{T} + M = T <: AbstractTensor ? Tensors.n_components(Tensors.get_base(T)) : 1 - return projected_vals + return _project(proj, qrs_rhs, vars, M, T)::Vector{T} end -function project(p::L2Projector, vars::AbstractMatrix, qr_rhs::QuadratureRule) - # TODO: Random access into vars is required for now, hence the collect - return project(p, collect(eachcol(vars)), qr_rhs) + +function _project(proj::L2Projector, qrs_rhs::Vector{<:QuadratureRule}, vars::Union{AbstractVector, AbstractDict}, M::Integer, ::Type{T}) where T + f = zeros(ndofs(proj.dh), M) + for (sdh, qr_rhs) in zip(proj.dh.subdofhandlers, qrs_rhs) + ip_fun = only(sdh.field_interpolations) + ip_geo = geometric_interpolation(getcelltype(sdh)) + cv = CellValues(qr_rhs, ip_fun, ip_geo; update_gradients = false) + assemble_proj_rhs!(f, cv, sdh, vars) + end + + # solve for the projected nodal values + projected_vals = proj.M_cholesky \ f + + # Recast to original input type + make_T(vals) = T <: AbstractTensor ? T(Tuple(vals)) : vals[1] + return T[make_T(x) for x in Base.eachrow(projected_vals)] end -function _project(vars, proj::L2Projector, fe_values::AbstractValues, M::Integer, ::Type{T}) where {T} +function assemble_proj_rhs!(f::Matrix, cellvalues::CellValues, sdh::SubDofHandler, vars::Union{AbstractVector, AbstractDict}) # Assemble the multi-column rhs, f = ∭( v ⋅ x̂ )dΩ # The number of columns corresponds to the length of the data-tuple in the tensor x̂. - - f = zeros(ndofs(proj.dh), M) - n = getnbasefunctions(fe_values) + M = size(f, 2) + n = getnbasefunctions(cellvalues) fe = zeros(n, M) - - cell_dofs = zeros(Int, n) - nqp = getnquadpoints(fe_values) + nqp = getnquadpoints(cellvalues) get_data(x::AbstractTensor, i) = x.data[i] - get_data(x::Number, i) = x + get_data(x::Number, _) = x ## Assemble contributions from each cell - for (ic,cellnum) in enumerate(proj.set) - celldofs!(cell_dofs, proj.dh, cellnum) + for cell in CellIterator(sdh) fill!(fe, 0) - Xe = getcoordinates(get_grid(proj.dh), cellnum) - cell_vars = vars[ic] - reinit!(fe_values, Xe) + cell_vars = vars[cellid(cell)] + length(cell_vars) == nqp || error("The number of variables per cell doesn't match the number of quadrature points") + reinit!(cellvalues, cell) for q_point = 1:nqp - dΩ = getdetJdV(fe_values, q_point) + dΩ = getdetJdV(cellvalues, q_point) qp_vars = cell_vars[q_point] for i = 1:n - v = shape_value(fe_values, q_point, i) + v = shape_value(cellvalues, q_point, i) for j in 1:M fe[i, j] += v * get_data(qp_vars, j) * dΩ end @@ -210,24 +320,10 @@ function _project(vars, proj::L2Projector, fe_values::AbstractValues, M::Integer end # Assemble cell contribution - for (num, dof) in enumerate(cell_dofs) + for (num, dof) in enumerate(celldofs(cell)) f[dof, :] += fe[num, :] end end - - # solve for the projected nodal values - projected_vals = proj.M_cholesky \ f - - # Recast to original input type - make_T(vals) = T <: AbstractTensor ? T(Tuple(vals)) : vals[1] - return T[make_T(x) for x in eachrow(projected_vals)] -end - -function WriteVTK.vtk_point_data(vtk::WriteVTK.DatasetFile, proj::L2Projector, vals::Vector{T}, name::AbstractString) where T - data = _evaluate_at_grid_nodes(proj, vals, #=vtk=# Val(true))::Matrix - @assert size(data, 2) == getnnodes(get_grid(proj.dh)) - vtk_point_data(vtk, data, name; component_names=component_names(T)) - return vtk end evaluate_at_grid_nodes(proj::L2Projector, vals::AbstractVector) = @@ -249,19 +345,24 @@ function _evaluate_at_grid_nodes( nout = S <: Vec{2} ? 3 : M # Pad 2D Vec to 3D data = fill(T(NaN), nout, getnnodes(get_grid(dh))) else - data = fill(NaN * zero(S), getnnodes(get_grid(dh))) + data = fill(T(NaN) * zero(S), getnnodes(get_grid(dh))) + end + for sdh in dh.subdofhandlers + ip = only(sdh.field_interpolations) + gip = geometric_interpolation(getcelltype(sdh)) + RefShape = getrefshape(ip) + local_node_coords = reference_coordinates(gip) + qr = QuadratureRule{RefShape}(zeros(length(local_node_coords)), local_node_coords) + cv = CellValues(qr, ip, gip; update_detJdV = false, update_gradients = false) + _evaluate_at_grid_nodes!(data, cv, sdh, vals) end - ip, gip = proj.func_ip, proj.geom_ip - refdim, refshape = getdim(ip), getrefshape(ip) - local_node_coords = reference_coordinates(gip) - qr = QuadratureRule{refshape}(zeros(length(local_node_coords)), local_node_coords) - cv = CellValues(qr, ip) - # Function barrier - return _evaluate_at_grid_nodes!(data, cv, dh, proj.set, vals) + return data end -function _evaluate_at_grid_nodes!(data, cv, dh, set, u::AbstractVector{S}) where S + +function _evaluate_at_grid_nodes!(data, cv, sdh, u::AbstractVector{S}) where S ue = zeros(S, getnbasefunctions(cv)) - for cell in CellIterator(dh, set) + for cell in CellIterator(sdh) + reinit!(cv, cell) @assert getnquadpoints(cv) == length(cell.nodes) for (i, I) in pairs(cell.dofs) ue[i] = u[I] diff --git a/src/PointEvalHandler.jl b/src/PointEvalHandler.jl index 7e27417d5a..97d57ec150 100644 --- a/src/PointEvalHandler.jl +++ b/src/PointEvalHandler.jl @@ -1,14 +1,15 @@ +Base.@kwdef struct NewtonLineSearchPointFinder{T} + max_iters::Int = 10 + max_line_searches::Int = 5 + residual_tolerance::T = 1e-10 +end + """ PointEvalHandler(grid::Grid, points::AbstractVector{Vec{dim,T}}; kwargs...) where {dim, T} The `PointEvalHandler` can be used for function evaluation in *arbitrary points* in the domain -- not just in quadrature points or nodes. -The `PointEvalHandler` takes the following keyword arguments: - - `search_nneighbors`: How many nodes should be found in the nearest neighbor search for each - point. Usually there is no need to change this setting. Default value: `3`. - - `warn`: Show a warning if a point is not found. Default value: `true`. - The constructor takes a grid and a vector of coordinates for the points. The `PointEvalHandler` computes i) the corresponding cell, and ii) the (local) coordinate within the cell, for each point. The fields of the `PointEvalHandler` are: @@ -28,10 +29,10 @@ There are two ways to use the `PointEvalHandler` to evaluate functions: """ PointEvalHandler -struct PointEvalHandler{G,dim,T<:Real} +struct PointEvalHandler{G,T<:Real} grid::G cells::Vector{Union{Nothing, Int}} - local_coords::Vector{Union{Nothing, Vec{dim,T}}} + local_coords::Vector{Union{Nothing, Vec{1,T},Vec{2,T},Vec{3,T}}} end function Base.show(io::IO, ::MIME"text/plain", ph::PointEvalHandler) @@ -45,32 +46,39 @@ function Base.show(io::IO, ::MIME"text/plain", ph::PointEvalHandler) end end -function PointEvalHandler(grid::AbstractGrid, points::AbstractVector{Vec{dim,T}}; search_nneighbors=3, warn=true) where {dim, T} +# Internals: +# `PointEvalHandler` takes the following keyword arguments: +# - `search_nneighbors`: How many nodes should be found in the nearest neighbor search for each +# point. Usually there is no need to change this setting. Default value: `3`. +# - `warn::Bool`: Show a warning if a point is not found. Default value: `true`. +# - `newton_max_iters::Int`: Maximum number of inner Newton iterations. Default value: `10`. +# - `newton_residual_tolerance`: Tolerance for the residual norm to indicate convergence in the +# inner Newton solver. Default value: `1e-10`. +function PointEvalHandler(grid::AbstractGrid{dim}, points::AbstractVector{Vec{dim,T}}; search_nneighbors=3, warn::Bool=true, strategy = NewtonLineSearchPointFinder()) where {dim, T} node_cell_dicts = _get_node_cell_map(grid) - cells, local_coords = _get_cellcoords(points, grid, node_cell_dicts, search_nneighbors, warn) + cells, local_coords = _get_cellcoords(points, grid, node_cell_dicts, search_nneighbors, warn, strategy) return PointEvalHandler(grid, cells, local_coords) end -function _get_cellcoords(points::AbstractVector{Vec{dim,T}}, grid::AbstractGrid, node_cell_dicts::Dict{C,Dict{Int, Vector{Int}}}, search_nneighbors, warn) where {dim, T<:Real, C} - +function _get_cellcoords(points::AbstractVector{Vec{dim,T}}, grid::AbstractGrid, node_cell_dicts::Dict{C,Dict{Int, Vector{Int}}}, search_nneighbors, warn, strategy::NewtonLineSearchPointFinder) where {dim, T<:Real, C} # set up tree structure for finding nearest nodes to points kdtree = KDTree(reinterpret(Vec{dim,T}, getnodes(grid))) - nearest_nodes, _ = knn(kdtree, points, search_nneighbors, true) + nearest_nodes, _ = knn(kdtree, points, search_nneighbors, true) cells = Vector{Union{Nothing, Int}}(nothing, length(points)) - local_coords = Vector{Union{Nothing, Vec{dim, T}}}(nothing, length(points)) + local_coords = Vector{Union{Nothing, Vec{1, T},Vec{2, T},Vec{3, T}}}(nothing, length(points)) for point_idx in 1:length(points) cell_found = false for (CT, node_cell_dict) in node_cell_dicts - geom_interpol = default_interpolation(CT) + geom_interpol = geometric_interpolation(CT) # loop over points for node in nearest_nodes[point_idx] possible_cells = get(node_cell_dict, node, nothing) possible_cells === nothing && continue # if node is not part of the subdofhandler, try the next node for cell in possible_cells cell_coords = getcoordinates(grid, cell) - is_in_cell, local_coord = point_in_cell(geom_interpol, cell_coords, points[point_idx]) + is_in_cell, local_coord = find_local_coordinate(geom_interpol, cell_coords, points[point_idx], strategy; warn) if is_in_cell cell_found = true cells[point_idx] = cell @@ -89,54 +97,85 @@ function _get_cellcoords(points::AbstractVector{Vec{dim,T}}, grid::AbstractGrid, return cells, local_coords end -# check if point is inside a cell based on physical coordinate -function point_in_cell(geom_interpol::Interpolation{shape}, cell_coordinates, global_coordinate) where {shape} - converged, x_local = find_local_coordinate(geom_interpol, cell_coordinates, global_coordinate) - if converged - return _check_isoparametric_boundaries(shape, x_local), x_local - else - return false, x_local - end -end - # check if point is inside a cell based on isoparametric coordinate -function _check_isoparametric_boundaries(::Type{RefHypercube{dim}}, x_local::Vec{dim, T}) where {dim, T} - tol = sqrt(eps(T)) - # All in the range [-1, 1] - return all(x -> abs(x) - 1 < tol, x_local) +function check_isoparametric_boundaries(::Type{RefHypercube{dim}}, x_local::Vec{dim, T}, tol) where {dim, T} + # All in the range [-1, 1]^dim + return all(x -> abs(x) - 1 ≤ tol, x_local) end # check if point is inside a cell based on isoparametric coordinate -function _check_isoparametric_boundaries(::Type{RefSimplex{dim}}, x_local::Vec{dim, T}) where {dim, T} - tol = sqrt(eps(T)) +function check_isoparametric_boundaries(::Type{RefSimplex{dim}}, x_local::Vec{dim, T}, tol) where {dim, T} # Positive and below the plane 1 - ξx - ξy - ξz return all(x -> x > -tol, x_local) && sum(x_local) - 1 < tol end +cellcenter(::Type{<:RefHypercube{dim}}, _::Type{T}) where {dim, T} = zero(Vec{dim, T}) +cellcenter(::Type{<:RefSimplex{dim}}, _::Type{T}) where {dim, T} = Vec{dim, T}((ntuple(d->1/3, dim))) + +_solve_helper(A::Tensor{2,dim}, b::Vec{dim}) where {dim} = inv(A) ⋅ b +_solve_helper(A::SMatrix{idim, odim}, b::Vec{idim,T}) where {odim, idim, T} = Vec{odim,T}(pinv(A) * b) + # See https://discourse.julialang.org/t/finding-the-value-of-a-field-at-a-spatial-location-in-juafem/38975/2 -# TODO: should we make iteration params optional keyword arguments? -function find_local_coordinate(interpolation, cell_coordinates::Vector{V}, global_coordinate::V) where {dim, T, V <: Vec{dim, T}} +function find_local_coordinate(interpolation::Interpolation{refshape}, cell_coordinates::Vector{<:Vec{sdim}}, global_coordinate::Vec{sdim}, strategy::NewtonLineSearchPointFinder; warn::Bool = false) where {sdim, refshape} + boundary_tolerance = √(strategy.residual_tolerance) + + T = promote_type(eltype(cell_coordinates[1]), eltype(global_coordinate)) n_basefuncs = getnbasefunctions(interpolation) @assert length(cell_coordinates) == n_basefuncs - local_guess = zero(V) - max_iters = 10 - tol_norm = 1e-10 + local_guess = cellcenter(refshape, T) converged = false - for _ in 1:max_iters - global_guess = zero(V) - J = zero(Tensor{2, dim, T}) - # TODO batched eval after 764 is merged. - for j in 1:n_basefuncs - dNdξ, N = shape_gradient_and_value(interpolation, local_guess, j) - global_guess += N * cell_coordinates[j] - J += cell_coordinates[j] ⊗ dNdξ - end + for iter in 1:strategy.max_iters + # Setup J(ξ) and x(ξ) + J, global_guess = calculate_jacobian_and_spatial_coordinate(interpolation, local_guess, cell_coordinates) + # Check if converged residual = global_guess - global_coordinate - if norm(residual) <= tol_norm - converged = true + best_residual_norm = norm(residual) # for line search below + # Early convergence check + if best_residual_norm ≤ strategy.residual_tolerance + converged = check_isoparametric_boundaries(refshape, local_guess, boundary_tolerance) + if converged + @debug println("Local point finder converged in $iter iterations with residual $best_residual_norm to $local_guess") + else + @debug println("Local point finder converged in $iter iterations with residual $best_residual_norm to a point outside the element: $local_guess") + end + break + end + if calculate_detJ(J) ≤ 0.0 + warn && @warn "det(J) negative! Aborting! $(calculate_detJ(J))" break end - local_guess -= inv(J) ⋅ residual + Δξ = _solve_helper(J, residual) # J \ b throws an error. TODO clean up when https://github.com/Ferrite-FEM/Tensors.jl/pull/188 is merged. + # Do line search if the new guess is outside the element + best_index = 1 + new_local_guess = local_guess - Δξ + global_guess = spatial_coordinate(interpolation, new_local_guess, cell_coordinates) + best_residual_norm = norm(global_guess - global_coordinate) + if !check_isoparametric_boundaries(refshape, new_local_guess, boundary_tolerance) + # Search for the residual minimizer, which is still inside the element + for next_index ∈ 2:strategy.max_line_searches + new_local_guess = local_guess - Δξ/2^(next_index-1) + global_guess = spatial_coordinate(interpolation, new_local_guess, cell_coordinates) + residual_norm = norm(global_guess - global_coordinate) + if residual_norm < best_residual_norm && check_isoparametric_boundaries(refshape, new_local_guess, boundary_tolerance) + best_residual_norm = residual_norm + best_index = next_index + end + end + end + local_guess -= Δξ / 2^(best_index-1) + # Late convergence check + if best_residual_norm ≤ strategy.residual_tolerance + converged = check_isoparametric_boundaries(refshape, local_guess, boundary_tolerance) + if converged + @debug println("Local point finder converged in $iter iterations with residual $best_residual_norm to $local_guess") + else + @debug println("Local point finder converged in $iter iterations with residual $best_residual_norm to a point outside the element: $local_guess") + end + break + end + if iter == strategy.max_iters + @debug println("Failed to converge in $(strategy.max_iters) iterations") + end end return converged, local_guess end @@ -180,12 +219,12 @@ function evaluate_at_points(ph::PointEvalHandler, proj::L2Projector, dof_vals::A evaluate_at_points(ph, proj.dh, dof_vals) end -function evaluate_at_points(ph::PointEvalHandler{<:Any, dim, T1}, dh::AbstractDofHandler, dof_vals::AbstractVector{T2}, - fname::Symbol=find_single_field(dh)) where {dim, T1, T2} +function evaluate_at_points(ph::PointEvalHandler{<:Any, T1}, dh::AbstractDofHandler, dof_vals::AbstractVector{T2}, + fname::Symbol=find_single_field(dh)) where {T1, T2} npoints = length(ph.cells) # Figure out the value type by creating a dummy PointValues ip = getfieldinterpolation(dh, find_field(dh, fname)) - pv = PointValues(T1, ip; update_gradients = false) + pv = PointValues(T1, ip; update_gradients = Val(false)) zero_val = function_value_init(pv, dof_vals) # Allocate the output as NaNs nanv = convert(typeof(zero_val), NaN * zero_val) @@ -204,7 +243,7 @@ end # values in dof-order. They must be obtained from the same DofHandler that was used for constructing the PointEvalHandler function evaluate_at_points!(out_vals::Vector{T2}, - ph::PointEvalHandler{<:Any, <:Any, T_ph}, + ph::PointEvalHandler{<:Any, T_ph}, dh::DofHandler, dof_vals::Vector{T}, fname::Symbol, @@ -219,8 +258,9 @@ function evaluate_at_points!(out_vals::Vector{T2}, if ip !== nothing dofrange = dof_range(sdh, fname) cellset = sdh.cellset - ip_geo = default_interpolation(getcelltype(sdh)) - pv = PointValues(T_ph, ip, ip_geo; update_gradients = false) + ip_geo = geometric_interpolation(getcelltype(sdh)) + + pv = PointValues(T_ph, ip, ip_geo; update_gradients = Val(false)) _evaluate_at_points!(out_vals, dof_vals, ph, dh, pv, cellset, dofrange) end end @@ -234,7 +274,7 @@ function _evaluate_at_points!( ph::PointEvalHandler, dh::AbstractDofHandler, pv::PointValues, - cellset::Union{Nothing, Set{Int}}, + cellset::Union{Nothing, AbstractSet{Int}}, dofrange::AbstractRange{Int}, ) where {T2,T} diff --git a/src/PoolAllocator.jl b/src/PoolAllocator.jl new file mode 100644 index 0000000000..2ac84ceb71 --- /dev/null +++ b/src/PoolAllocator.jl @@ -0,0 +1,237 @@ +module PoolAllocator + +# Checkmate LanguageServer.jl +const var"@propagate_inbounds" = Base.var"@propagate_inbounds" + +@eval macro $(Symbol("const"))(field) + if VERSION >= v"1.8.0-DEV.1148" + Expr(:const, esc(field)) + else + return esc(field) + end +end + +const PAGE_SIZE = 4 * 1024 * 1024 # 4 MiB + +# A page corresponds to a memory block of size `PAGE_SIZE` bytes. +# Allocations of arrays are views into this block. +mutable struct Page{T} + @const buf::Vector{T} # data buffer (TODO: Memory in recent Julias?) + @const blocksize::Int # blocksize for this page + @const freelist::BitVector # block is free/used + n_free::Int # number of free blocks + function Page{T}(blocksize::Int) where T + @assert isbitstype(T) + buf = Vector{T}(undef, PAGE_SIZE ÷ sizeof(T)) + n_blocks, r = divrem(length(buf), blocksize) + @assert r == 0 + return new{T}(buf, blocksize, trues(n_blocks), n_blocks) + end +end + +# Find a free block and mark it as used +function _malloc(page::Page, size::Int) + if size != page.blocksize + error("malloc: requested size does not match the blocksize of this page") + end + # Return early if the page is already full + if page.n_free == 0 + return nothing + end + # Find the first free block + blockindex = findfirst(page.freelist)::Int + if !@inbounds(page.freelist[blockindex]) + error("malloc: block already in use") + end + @inbounds page.freelist[blockindex] = false + offset = (blockindex - 1) * page.blocksize + page.n_free -= 1 + return offset +end + +# Mark a block as free +function _free(page::Page, offset::Int) + blockindex = offset ÷ page.blocksize + 1 + if @inbounds page.freelist[blockindex] + error("free: block already free'd") + end + @inbounds page.freelist[blockindex] = true + page.n_free += 1 + # TODO: If this page is completely unused it can be collected and reused. + return +end + +# A book is a collection of pages with a specific blocksize +struct Book{T} + blocksize::Int + pages::Vector{Page{T}} +end + +# Find a page with a free block of the requested size +function _malloc(book::Book{T}, size::Int) where {T} + @assert book.blocksize == size + # Check existing pages + for page in book.pages + offset = _malloc(page, size) + if offset !== nothing + return (page, offset) + end + end + # Allocate a new page + page = Page{T}(book.blocksize) + push!(book.pages, page) + # Allocate block in the new page + offset = _malloc(page, size) + @assert offset !== nothing + return (page, offset) +end + +struct MemoryPool{T} + books::Vector{Book{T}} # blocksizes 2, 4, 6, 8, ... + function MemoryPool{T}() where T + mempool = new(Book{T}[]) + return mempool + end +end + +# Free all pages by resizing all page containers to 0 +function free(mempool::MemoryPool) + for i in 1:length(mempool.books) + isassigned(mempool.books, i) || continue + resize!(mempool.books[i].pages, 0) + end + resize!(mempool.books, 0) + return +end + +function mempool_stats(mempool::MemoryPool{T}) where T + bytes_used = 0 + bytes_allocated = 0 + for bookidx in 1:length(mempool.books) + isassigned(mempool.books, bookidx) || continue + book = mempool.books[bookidx] + bytes_allocated += length(book.pages) * PAGE_SIZE + for page in book.pages + bytes_used += count(!, page.freelist) * page.blocksize * sizeof(T) + end + end + return bytes_used, bytes_allocated +end + +function Base.show(io::IO, ::MIME"text/plain", mempool::MemoryPool{T}) where T + n_books = count(i -> isassigned(mempool.books, i), 1:length(mempool.books)) + print(io, "PoolAllocator.MemoryPool{$(T)} with $(n_books) fixed size pools") + n_books == 0 && return + println(io, ":") + for idx in 1:length(mempool.books) + isassigned(mempool.books, idx) || continue + h = mempool.books[idx] + blocksize = h.blocksize + # @assert blocksize == 2^idx + npages = length(h.pages) + n_free = mapreduce(p -> p.n_free, +, h.pages; init=0) + n_tot = npages * PAGE_SIZE ÷ blocksize ÷ sizeof(T) + println(io, " - blocksize: $(blocksize), npages: $(npages), usage: $(n_tot - n_free) / $(n_tot)") + end + return +end + +function bookindex_from_blocksize(blocksize::Int) + return (8 * sizeof(Int) - leading_zeros(blocksize)) % Int +end + +function malloc(mempool::MemoryPool{T}, dims::NTuple{N, Int}) where {T, N} + @assert prod(dims) > 0 + blocksize = nextpow(2, prod(dims)) + bookidx = bookindex_from_blocksize(blocksize) + if length(mempool.books) < bookidx + resize!(mempool.books, bookidx) + end + if !isassigned(mempool.books, bookidx) + book = Book(blocksize, Page{T}[]) + mempool.books[bookidx] = book + else + book = mempool.books[bookidx] + end + page, offset = _malloc(book, blocksize) + + return PoolArray{T, N}(mempool, page, offset, dims) +end + + +# PoolArray is a view into a page that also has a reference to the MemoryPool so that it can +# be resized/reallocated. +struct PoolArray{T, N} <: AbstractArray{T, N} + mempool::MemoryPool{T} + page::Page{T} + offset::Int + size::NTuple{N, Int} +end + +const PoolVector{T} = PoolArray{T, 1} + +# Constructors +function malloc(mempool::MemoryPool, dim1::Int) + return malloc(mempool, (dim1, )) +end +function malloc(mempool::MemoryPool, dim1::Int, dim2::Int, dimx::Int...) + dims = (dim1, dim2, map(Int, dimx)..., ) + return malloc(mempool, dims) +end + +function free(x::PoolArray) + _free(x.page, x.offset) + return +end + +function realloc(x::PoolArray{T}, newsize::Int) where T + @assert newsize > length(x) # TODO: Allow shrinkage? + @assert newsize <= PAGE_SIZE ÷ sizeof(T) # TODO: Might be required + # Find the page for the block to make sure it was allocated in this mempool + # page = find_page(x.mempool, ) + # Allocate the new block + x′ = malloc(x.mempool, newsize) + # Copy the data + copyto!(x′, x) + # Free the old block and return + _free(x.page, x.offset) + return x′ +end + +# AbstractArray interface +Base.size(mv::PoolArray) = mv.size +allocated_length(mv::PoolArray) = mv.page.blocksize +Base.IndexStyle(::Type{<:PoolArray}) = IndexLinear() +@propagate_inbounds function Base.getindex(mv::PoolArray, i::Int) + @boundscheck checkbounds(mv, i) + return @inbounds mv.page.buf[mv.offset + i] +end +@propagate_inbounds function Base.setindex!(mv::PoolArray{T}, v::T, i::Int) where T + @boundscheck checkbounds(mv, i) + @inbounds mv.page.buf[mv.offset + i] = v + return mv +end + +# Utilities needed for the sparsity pattern +@inline function resize(x::PoolVector{T}, n::Int) where T + if n > allocated_length(x) + return realloc(x, n) + else + return PoolVector{T}(x.mempool, x.page, x.offset, (n, )) + end +end + +@inline function insert(x::PoolVector{T}, k::Int, item::T) where T + lx = length(x) + # Make room + x = resize(x, lx + 1) + # Shift elements after the insertion point to the back + @inbounds for i in lx:-1:k + x[i + 1] = x[i] + end + # Insert the new element + @inbounds x[k] = item + return x +end + +end # module PoolAllocator diff --git a/src/Quadrature/gaussquad_prism_table.jl b/src/Quadrature/gaussquad_prism_table.jl index 36ec8b7662..3d7488b0d1 100644 --- a/src/Quadrature/gaussquad_prism_table.jl +++ b/src/Quadrature/gaussquad_prism_table.jl @@ -1,6 +1,6 @@ # Symmetric quadrature rules takes from -# Witherden, Freddie D., and Peter E. Vincent. "On the identification of -# symmetric quadrature rules for finite element methods." Computers & +# Witherden, Freddie D., and Peter E. Vincent. "On the identification of +# symmetric quadrature rules for finite element methods." Computers & # Mathematics with Applications 69.10 (2015): 1232-1241. # Note that the original rule is defined on [-1,1]^3 while our reference prism is defined on [0,1]^3, hence we transform in the end. function _get_gauss_prismdata_polyquad(n::Int) @@ -59,7 +59,7 @@ function _get_gauss_prismdata_polyquad(n::Int) -0.79828210803458293816870337538129563058 -0.79828210803458293816870337538129563058 0.57042698070515927206196786842334325646 0.25007428574779395912056494464439239186 ] elseif n == 6 - wx = [ + xw = [ -0.33333333333333333333333333333333333333 -0.33333333333333333333333333333333333333 -0.52662270480497475869798007748379244322 0.22269313409222502250038629788629976598 -0.33333333333333333333333333333333333333 -0.33333333333333333333333333333333333333 0.52662270480497475869798007748379244322 0.22269313409222502250038629788629976598 -0.33333333333333333333333333333333333333 -0.33333333333333333333333333333333333333 -0.99083630081924474869286718300205167763 0.13839610937412451172575590081432159364 @@ -90,7 +90,7 @@ function _get_gauss_prismdata_polyquad(n::Int) -0.59485220654953844609181984939099585377 -0.93064127511790609994722013739838094335 0.80928579325583275231645432806703865787 0.084632875139559356776397936558477141722 ] elseif n == 7 - wx = [ + xw = [ -0.33333333333333333333333333333333333333 -0.33333333333333333333333333333333333333 -0.98022806959089160171504882914128008603 0.11378272445075715563392222208600913026 -0.33333333333333333333333333333333333333 -0.33333333333333333333333333333333333333 0.98022806959089160171504882914128008603 0.11378272445075715563392222208600913026 -0.98332480906795705560590831565479581293 0.96664961813591411121181663130959162585 0 0.024472968692380158977782543188506640362 diff --git a/src/Quadrature/gaussquad_pyramid_table.jl b/src/Quadrature/gaussquad_pyramid_table.jl index 043e03b87d..77f773d2c0 100644 --- a/src/Quadrature/gaussquad_pyramid_table.jl +++ b/src/Quadrature/gaussquad_pyramid_table.jl @@ -1,8 +1,8 @@ # Symmetric quadrature rules takes from -# Witherden, Freddie D., and Peter E. Vincent. "On the identification of -# symmetric quadrature rules for finite element methods." Computers & +# Witherden, Freddie D., and Peter E. Vincent. "On the identification of +# symmetric quadrature rules for finite element methods." Computers & # Mathematics with Applications 69.10 (2015): 1232-1241. -# TODO: Implement quadrature data from: +# TODO: Implement quadrature data from: # https://www.sciencedirect.com/science/article/pii/S0168874X1200203X?via%3Dihub#s0065 function _get_gauss_pyramiddata_polyquad(n::Int) if n == 1 @@ -85,7 +85,7 @@ function _get_gauss_pyramiddata_polyquad(n::Int) else throw(ArgumentError("unsupported order for prism polyquad integration")) end - # + # # The above quadrature rule is defined for a pyramid spanning [-1,1] × [-1,1] × [-1,1], with volume 8/3 and with 5th node in center. # The reference pyramid in ferrite spans [0,1] × [0,1] × [0,1], with volume 1/3 and with 5th node in corner. # Here we map thequadrature points to the pyramid defined in Ferrite. @@ -104,4 +104,4 @@ function _get_gauss_pyramiddata_polyquad(n::Int) end return xw -end \ No newline at end of file +end diff --git a/src/Quadrature/gaussquad_tet_table.jl b/src/Quadrature/gaussquad_tet_table.jl index 4eb7b73ffe..0694caa432 100644 --- a/src/Quadrature/gaussquad_tet_table.jl +++ b/src/Quadrature/gaussquad_tet_table.jl @@ -1,6 +1,5 @@ -# Patrick Keast, MODERATE-DEGREE TETRAHEDRAL QUADRATURE FORMULAS -# http://mech.fsv.cvut.cz/oofem/resources/doc/oofemrefman/gaussintegrationrule_8C_source.html -function _get_gauss_tetdata(n::Int) +# Yu, Jinyun. Symmetric Gaussian Quadrature Formulae for Tetrahedronal Regions. 1984. CMAME. +function _get_jinyun_tet_quadrature_data(n::Int) if n == 1 a = 1. / 4. w = 1. / 6. @@ -24,6 +23,20 @@ function _get_gauss_tetdata(n::Int) b2 a2 b2 w2 b2 b2 a2 w2 b2 b2 b2 w2] + elseif 4 ≤ n ≤ 6 + throw(ArgumentError("Jinyun's Gauss quadrature rule (RefTetrahedron) is not implemented for orders 4 and 6")) + else + throw(ArgumentError("unsupported quadrature order $n for Jinyun's Gauss quadrature rule (RefTetrahedron). Supported orders are 1 to 3.")) + end + return xw +end + +# Patrick Keast. Moderate-Degree Tetrahedral Quadrature Formulas. 1986. CMAME. +# Minimal points +function _get_keast_a_tet_quadrature_data(n::Int) + if 1 ≤ n ≤ 3 + # The rules of Jinyin and Keast are identical for order 1 to 3, as stated in the Keast paper. + xw = _get_jinyun_tet_quadrature_data(n) elseif n == 4 a1 = 1. / 4.; w1 = -74. / 5625.; @@ -47,8 +60,76 @@ function _get_gauss_tetdata(n::Int) b3 a3 a3 w3 b3 a3 b3 w3 b3 b3 a3 w3] + elseif n == 5 + w1 = 0.602678571428571597e-2 + a1 = 1. / 3. + b1 = 0. + + w2 = 0.302836780970891856e-1 + a2 = 1. / 4. + + w3 = 0.116452490860289742e-1 + a3 = 1. / 11. + b3 = 8. / 11. + + w4 = 0.109491415613864534e-1 + a4 = 0.665501535736642813e-1 + b4 = 0.433449846426335728e-0 + + xw = [a1 a1 a1 w1 + a1 a1 b1 w1 + a1 b1 a1 w1 + b1 a1 a1 w1 + a2 a2 a2 w2 + a3 a3 a3 w3 + a3 a3 b3 w3 + a3 b3 a3 w3 + b3 a3 a3 w3 + a4 a4 b4 w4 + a4 b4 a4 w4 + a4 b4 b4 w4 + b4 a4 a4 w4 + b4 a4 b4 w4 + b4 b4 a4 w4] + elseif 6 ≤ n ≤ 8 + throw(ArgumentError("Keast's Gauss quadrature rule (RefTetrahedron) not implement for order 6 to 8")) + else + throw(ArgumentError("unsupported order $n for Keast's Gauss quadrature rule (RefTetrahedron). Supported orders are 1 to 5.")) + end + return xw +end + +# Positive points +function _get_keast_b_tet_quadrature_data(n::Int) + if n == 4 + w1 = 0.317460317460317450e-2 + a1 = 1. / 2. + b1 = 0.0 + + w2 = 0.147649707904967828e-1 + a2 = 0.100526765225204467e-0 + b2 = 0.698419704324386603e-0 + + w3 = 0.221397911142651221e-1 + a3 = 0.314372873493192195e-0 + b3 = 0.568813795204234229e-1 + + xw = [a1 a1 b1 w1 + a1 b1 a1 w1 + a1 b1 b1 w1 + b1 a1 a1 w1 + b1 a1 b1 w1 + b1 b1 a1 w1 + a2 a2 a2 w2 + a2 a2 b2 w2 + a2 b2 a2 w2 + b2 a2 a2 w2 + a3 a3 a3 w3 + a3 a3 b3 w3 + a3 b3 a3 w3 + b3 a3 a3 w3] else - throw(ArgumentError("unsupported order for tetraheder gauss-legendre integration")) + xw = _get_keast_a_tet_quadrature_data(n) end return xw end diff --git a/src/Quadrature/gaussquad_tri_table.jl b/src/Quadrature/gaussquad_tri_table.jl index d6af16fed4..7be355ce45 100644 --- a/src/Quadrature/gaussquad_tri_table.jl +++ b/src/Quadrature/gaussquad_tri_table.jl @@ -1,29 +1,23 @@ -# Order 1 to 8 heights points / wheights have been suggested in +# Order 1 to 8 heights points / wheights have been suggested in # Dunavant, D. A. (1985), High degree efficient symmetrical Gaussian quadrature # rules for the triangle. Int. J. Numer. Meth. Engng., 21: 1129–1148. doi: # 10.1002/nme.1620210612 # -# Quadrature rules for orders 9 to 20 have been obtained using the -# basix.make_quadrature(basix.CellType.triangle, n) calls of the -# FEniCS / basix python package -# -# see -# https://docs.fenicsproject.org/basix/main/python/_autosummary/basix.html?highlight=quadraturetype#basix.make_quadrature -function _get_gauss_tridata(n::Int) +function _get_dunavant_gauss_tridata(n::Int) if (n == 1) xw=[0.33333333333333 0.33333333333333 1.00000000000000 / 2.0]; elseif (n == 2) xw=[0.16666666666667 0.16666666666667 0.33333333333333 / 2.0 0.16666666666667 0.66666666666667 0.33333333333333 / 2.0 0.66666666666667 0.16666666666667 0.33333333333333 / 2.0]; - elseif (n == 3) + elseif (n == 3) xw=[0.33333333333333 0.33333333333333 -0.56250000000000 / 2.0 0.20000000000000 0.20000000000000 0.52083333333333 / 2.0 0.20000000000000 0.60000000000000 0.52083333333333 / 2.0 0.60000000000000 0.20000000000000 0.52083333333333 / 2.0]; - elseif (n == 4) + elseif (n == 4) xw=[0.44594849091597 0.44594849091597 0.22338158967801 / 2.0 0.44594849091597 0.10810301816807 0.22338158967801 / 2.0 0.10810301816807 0.44594849091597 0.22338158967801 / 2.0 @@ -82,7 +76,24 @@ function _get_gauss_tridata(n::Int) 0.72849239295540 0.26311282963464 0.02723031417443 / 2.0 0.26311282963464 0.00839477740996 0.02723031417443 / 2.0 0.00839477740996 0.72849239295540 0.02723031417443 / 2.0]; - elseif n == 9 + else + throw(ArgumentError("unsupported order for Dunavant's triangle integration")) + end + return xw +end + +# TheseqQuadrature rules for orders 9 to 20 have been obtained using the +# basix.make_quadrature(basix.CellType.triangle, n) calls of the +# FEniCS / basix python package, which corresponds to Gauss-Jacobi rules. +# +# see +# https://docs.fenicsproject.org/basix/main/python/_autosummary/basix.html?highlight=quadraturetype#basix.make_quadrature +# +# The original paper for this rule is: +# Jacobi, Carl Gustav Jakob. "Ueber Gauss neue Methode, die Werthe der Integrale +# näherungsweise zu finden." (1826): 301-308. +function _get_gaussjacobi_tridata(n::Int) + if n == 9 xw=[0.4171034443615992 0.4171034443615992 0.0136554632640511 0.1803581162663707 0.1803581162663707 0.0131563152940090 0.2857065024365867 0.2857065024365867 0.0188581185763976 @@ -833,7 +844,7 @@ function _get_gauss_tridata(n::Int) 0.0080665857041666 0.5861680189969418 0.0018206702056404 0.0001234468122874 0.8135558255123531 0.0003443363125209] else - throw(ArgumentError("unsupported order for triangle gauss-legendre integration")) + throw(ArgumentError("unsupported order for triangle Gauss-Jacobi integration")) end return xw end diff --git a/src/Quadrature/quadrature.jl b/src/Quadrature/quadrature.jl index 5623f89b19..bb32acdb2d 100644 --- a/src/Quadrature/quadrature.jl +++ b/src/Quadrature/quadrature.jl @@ -4,20 +4,27 @@ include("gaussquad_prism_table.jl") include("gaussquad_pyramid_table.jl") include("generate_quadrature.jl") -import Base.Cartesian: @nloops, @nref, @ntuple, @nexprs +using Base.Cartesian: @nloops, @ntuple, @nexprs ################## # QuadratureRule # ################## """ - QuadratureRule{shape}([quad_rule_type::Symbol], order::Int) - QuadratureRule{shape, T}([quad_rule_type::Symbol], order::Int) + QuadratureRule{shape}([::Type{T},] [quad_rule_type::Symbol,] order::Int) + QuadratureRule{shape}(weights::AbstractVector{T}, points::AbstractVector{Vec{rdim, T}}) Create a `QuadratureRule` used for integration on the refshape `shape` (of type [`AbstractRefShape`](@ref)). `order` is the order of the quadrature rule. `quad_rule_type` is an optional argument determining the type of quadrature rule, -currently the `:legendre` and `:lobatto` rules are implemented. +currently the `:legendre` and `:lobatto` rules are implemented for hypercubes. +For triangles up to order 8 the default rule is the one by `:dunavant` (see [Dun:1985:hde](@cite)) and for +tetrahedra the default rule is `keast_minimal` (see [Keast:1986:mtq](@cite)). Wedges and pyramids default +to `:polyquad` (see [WitVin:2015:isq](@cite)). +Furthermore we have implemented +* `:gaussjacobi` for triangles (order 9-15) +* `:keast_minimal` (see [Keast:1986:mtq](@cite)) for tetrahedra (order 1-5), containing negative weights +* `:keast_positive` (see [Keast:1986:mtq](@cite)) for tetrahedra (order 1-5), containing only positive weights A `QuadratureRule` is used to approximate an integral on a domain by a weighted sum of function values at specific points: @@ -42,39 +49,39 @@ julia> getpoints(qr) [0.33333333333333, 0.33333333333333] ``` """ -struct QuadratureRule{shape,T,dim} - weights::Vector{T} - points::Vector{Vec{dim,T}} - function QuadratureRule{shape, T}(weights::Vector{T}, points::Vector{Vec{dim, T}}) where {dim, shape <: AbstractRefShape{dim}, T} +struct QuadratureRule{shape, WeightStorageType, PointStorageType} + weights::WeightStorageType # E.g. Vector{Float64} + points::PointStorageType # E.g. Vector{Vec{3, Float64}} + function QuadratureRule{shape}(weights::AbstractVector{T}, points::AbstractVector{Vec{rdim, T}}) where {rdim, shape <: AbstractRefShape{rdim}, T} if length(weights) != length(points) - throw(ArgumentError("number of weights and number of points do not match")) + throw(ArgumentError("number of weights and number of points do not match (#weights=$(length(weights)) != #points=$(length(points)))")) end - new{shape, T, dim}(weights, points) + new{shape, typeof(weights), typeof(points)}(weights, points) end end -function QuadratureRule{shape}(weights::Vector{T}, points::Vector{Vec{dim, T}}) where {dim, shape <: AbstractRefShape{dim}, T} - QuadratureRule{shape, T}(weights, points) -end - +@inline _default_quadrature_rule(::Type{<:RefHypercube}) = :legendre +@inline _default_quadrature_rule(::Union{Type{RefPrism}, Type{RefPyramid}}) = :polyquad +@inline _default_quadrature_rule(::Type{RefTriangle}) = :dunavant +@inline _default_quadrature_rule(::Type{RefTetrahedron}) = :keast_minimal -# Fill in defaults (Float64, :legendre) +# Fill in defaults with T=Float64 function QuadratureRule{shape}(order::Int) where {shape <: AbstractRefShape} - return QuadratureRule{shape, Float64}(order) + return QuadratureRule{shape}(Float64, order) end -function QuadratureRule{shape, T}(order::Int) where {shape <: AbstractRefShape, T} - quad_type = (shape === RefPrism || shape === RefPyramid) ? (:polyquad) : (:legendre) - return QuadratureRule{shape, T}(quad_type, order) +function QuadratureRule{shape}(::Type{T}, order::Int) where {shape <: AbstractRefShape, T} + quad_type = _default_quadrature_rule(shape) + return QuadratureRule{shape}(T, quad_type, order) end function QuadratureRule{shape}(quad_type::Symbol, order::Int) where {shape <: AbstractRefShape} - return QuadratureRule{shape, Float64}(quad_type, order) + return QuadratureRule{shape}(Float64, quad_type, order) end # Generate Gauss quadrature rules on hypercubes by doing an outer product # over all dimensions for dim in 1:3 @eval begin - function QuadratureRule{RefHypercube{$dim}, T}(quad_type::Symbol, order::Int) where T + function QuadratureRule{RefHypercube{$dim}}(::Type{T}, quad_type::Symbol, order::Int) where T if quad_type === :legendre p, w = GaussQuadrature.legendre(T, order) elseif quad_type === :lobatto @@ -93,18 +100,24 @@ for dim in 1:3 weights[count] = weight count += 1 end - return QuadratureRule{RefHypercube{$dim}, T}(weights, points) + return QuadratureRule{RefHypercube{$dim}}(weights, points) end end end for dim in 2:3 @eval begin - function QuadratureRule{RefSimplex{$dim}, T}(quad_type::Symbol, order::Int) where T - if $dim == 2 && quad_type === :legendre - data = _get_gauss_tridata(order) - elseif $dim == 3 && quad_type === :legendre - data = _get_gauss_tetdata(order) + function QuadratureRule{RefSimplex{$dim}}(::Type{T}, quad_type::Symbol, order::Int) where T + if $dim == 2 && quad_type === :dunavant + data = _get_dunavant_gauss_tridata(order) + elseif $dim == 2 && quad_type === :gaussjacobi + data = _get_gaussjacobi_tridata(order) + elseif $dim == 3 && quad_type === :jinyun + data = _get_jinyun_tet_quadrature_data(order) + elseif $dim == 3 && quad_type === :keast_minimal + data = _get_keast_a_tet_quadrature_data(order) + elseif $dim == 3 && quad_type === :keast_positive + data = _get_keast_b_tet_quadrature_data(order) else throw(ArgumentError("unsupported quadrature rule")) end @@ -114,14 +127,14 @@ for dim in 2:3 for p in 1:size(data, 1) points[p] = Vec{$dim,T}(@ntuple $dim i -> data[p, i]) end - weights = data[:, $dim + 1] - QuadratureRule{RefSimplex{$dim}, T}(weights, points) + weights = T.(data[:, $dim + 1]) + QuadratureRule{RefSimplex{$dim}}(weights, points) end end end # Grab prism quadrature rule from table -function QuadratureRule{RefPrism, T}(quad_type::Symbol, order::Int) where T +function QuadratureRule{RefPrism}(::Type{T}, quad_type::Symbol, order::Int) where T if quad_type == :polyquad data = _get_gauss_prismdata_polyquad(order) else @@ -133,12 +146,12 @@ function QuadratureRule{RefPrism, T}(quad_type::Symbol, order::Int) where T for p in 1:size(data, 1) points[p] = Vec{3,T}(@ntuple 3 i -> data[p, i]) end - weights = data[:, 4] - QuadratureRule{RefPrism,T}(weights, points) + weights = T.(data[:, 4]) + QuadratureRule{RefPrism}(weights, points) end # Grab pyramid quadrature rule from table -function QuadratureRule{RefPyramid, T}(quad_type::Symbol, order::Int) where T +function QuadratureRule{RefPyramid}(::Type{T}, quad_type::Symbol, order::Int) where T if quad_type == :polyquad data = _get_gauss_pyramiddata_polyquad(order) else @@ -150,83 +163,90 @@ function QuadratureRule{RefPyramid, T}(quad_type::Symbol, order::Int) where T for p in 1:size(data, 1) points[p] = Vec{3,T}(@ntuple 3 i -> data[p, i]) end - weights = data[:, 4] - QuadratureRule{RefPyramid,T}(weights, points) + weights = T.(data[:, 4]) + QuadratureRule{RefPyramid}(weights, points) end ###################### -# FaceQuadratureRule # +# FacetQuadratureRule # ###################### """ - FaceQuadratureRule{shape}([quad_rule_type::Symbol], order::Int) - FaceQuadratureRule{shape, T}([quad_rule_type::Symbol], order::Int) + FacetQuadratureRule{shape}([::Type{T},] [quad_rule_type::Symbol,] order::Int) + FacetQuadratureRule{shape}(face_rules::NTuple{<:Any, <:QuadratureRule{shape}}) + FacetQuadratureRule{shape}(face_rules::AbstractVector{<:QuadratureRule{shape}}) -Create a `FaceQuadratureRule` used for integration of the faces of the refshape `shape` (of +Create a `FacetQuadratureRule` used for integration of the faces of the refshape `shape` (of type [`AbstractRefShape`](@ref)). `order` is the order of the quadrature rule. -`quad_rule_type` is an optional argument determining the type of quadrature rule, currently -the `:legendre` and `:lobatto` rules are implemented. +If no symbol is provided, the default `quad_rule_type` for each facet's reference shape is used (see [QuadratureRule](@ref)). +For non-default `quad_rule_type`s on cells with mixed facet types (e.g. `RefPrism` and `RefPyramid`), the +`face_rules` must be provided explicitly. -`FaceQuadratureRule` is used as one of the components to create [`FaceValues`](@ref). +`FacetQuadratureRule` is used as one of the components to create [`FacetValues`](@ref). """ -struct FaceQuadratureRule{shape, T, dim} - face_rules::Vector{QuadratureRule{shape, T, dim}} - function FaceQuadratureRule{shape, T, dim}(face_rules::Vector{QuadratureRule{shape, T, dim}}) where {shape, T, dim} - # TODO: Verify length(face_rules) == nfaces(shape) - return new{shape, T, dim}(face_rules) +struct FacetQuadratureRule{shape, FacetRulesType} + face_rules::FacetRulesType # E.g. Tuple{QuadratureRule{RefLine,...}, QuadratureRule{RefLine,...}} + function FacetQuadratureRule{shape}(face_rules::Union{NTuple{<:Any, QRType}, AbstractVector{QRType}}) where {shape, QRType <: QuadratureRule{shape}} + if length(face_rules) != nfacets(shape) + throw(ArgumentError("number of quadrature rules does not not match number of facets (#rules=$(length(face_rules)) != #facets=$(nfacets(shape)))")) + end + return new{shape, typeof(face_rules)}(face_rules) end end -function FaceQuadratureRule(face_rules::Vector{QuadratureRule{shape, T, dim}}) where {shape, T, dim} - return FaceQuadratureRule{shape, T, dim}(face_rules) +function FacetQuadratureRule(face_rules::Union{NTuple{<:Any, QRType}, AbstractVector{QRType}}) where {shape, QRType <: QuadratureRule{shape}} + return FacetQuadratureRule{shape}(face_rules) end -# Fill in defaults (Float64, :legendre) -function FaceQuadratureRule{shape}(order::Int) where {shape <: AbstractRefShape} - return FaceQuadratureRule{shape, Float64}(order) +# Fill in defaults T=Float64 +function FacetQuadratureRule{shape}(order::Int) where {shape <: AbstractRefShape} + return FacetQuadratureRule{shape}(Float64, order) end -function FaceQuadratureRule{shape, T}(order::Int) where {shape <: AbstractRefShape, T} - return FaceQuadratureRule{shape, T}(:legendre, order) -end -function FaceQuadratureRule{shape}(quad_type::Symbol, order::Int) where {shape <: AbstractRefShape} - return FaceQuadratureRule{shape, Float64}(quad_type, order) +function FacetQuadratureRule{shape}(quad_type::Symbol, order::Int) where {shape <: AbstractRefShape} + return FacetQuadratureRule{shape}(Float64, quad_type, order) end # For RefShapes with equal face-shapes: generate quad rule for the face shape # and expand to each face -function FaceQuadratureRule{RefLine, T}(::Symbol, ::Int) where T +function FacetQuadratureRule{RefLine}(::Type{T}, ::Int) where T w, p = T[1], Vec{0, T}[Vec{0, T}(())] - return create_face_quad_rule(RefLine, w, p) + return create_facet_quad_rule(RefLine, w, p) end -function FaceQuadratureRule{RefQuadrilateral, T}(quad_type::Symbol, order::Int) where T - qr = QuadratureRule{RefLine, T}(quad_type, order) - return create_face_quad_rule(RefQuadrilateral, qr.weights, qr.points) +FacetQuadratureRule{RefQuadrilateral}(::Type{T}, order::Int) where T = FacetQuadratureRule{RefQuadrilateral}(T,_default_quadrature_rule(RefLine),order) +function FacetQuadratureRule{RefQuadrilateral}(::Type{T}, quad_type::Symbol, order::Int) where T + qr = QuadratureRule{RefLine}(T, quad_type, order) + return create_facet_quad_rule(RefQuadrilateral, qr.weights, qr.points) end -function FaceQuadratureRule{RefHexahedron, T}(quad_type::Symbol, order::Int) where T - qr = QuadratureRule{RefQuadrilateral, T}(quad_type, order) - return create_face_quad_rule(RefHexahedron, qr.weights, qr.points) +FacetQuadratureRule{RefHexahedron}(::Type{T}, order::Int) where T = FacetQuadratureRule{RefHexahedron}(T,_default_quadrature_rule(RefQuadrilateral),order) +function FacetQuadratureRule{RefHexahedron}(::Type{T}, quad_type::Symbol, order::Int) where T + qr = QuadratureRule{RefQuadrilateral}(T, quad_type, order) + return create_facet_quad_rule(RefHexahedron, qr.weights, qr.points) end -function FaceQuadratureRule{RefTriangle, T}(quad_type::Symbol, order::Int) where T - qr = QuadratureRule{RefLine, T}(quad_type, order) - # Interval scaled and shifted in face_to_element_transformation from (-1,1) to (0,1) -> half the length -> half quadrature weights - return create_face_quad_rule(RefTriangle, qr.weights/2, qr.points) +FacetQuadratureRule{RefTriangle}(::Type{T}, order::Int) where T = FacetQuadratureRule{RefTriangle}(T,_default_quadrature_rule(RefLine),order) +function FacetQuadratureRule{RefTriangle}(::Type{T}, quad_type::Symbol, order::Int) where T + qr = QuadratureRule{RefLine}(T, quad_type, order) + # Interval scaled and shifted in facet_to_element_transformation from (-1,1) to (0,1) -> half the length -> half quadrature weights + return create_facet_quad_rule(RefTriangle, qr.weights/2, qr.points) end -function FaceQuadratureRule{RefTetrahedron, T}(quad_type::Symbol, order::Int) where T - qr = QuadratureRule{RefTriangle, T}(quad_type, order) - return create_face_quad_rule(RefTetrahedron, qr.weights, qr.points) +FacetQuadratureRule{RefTetrahedron}(::Type{T}, order::Int) where T = FacetQuadratureRule{RefTetrahedron}(T,_default_quadrature_rule(RefTriangle),order) +function FacetQuadratureRule{RefTetrahedron}(::Type{T}, quad_type::Symbol, order::Int) where T + qr = QuadratureRule{RefTriangle}(T, quad_type, order) + return create_facet_quad_rule(RefTetrahedron, qr.weights, qr.points) end -function FaceQuadratureRule{RefPrism, T}(quad_type::Symbol, order::Int) where T - qr_quad = QuadratureRule{RefQuadrilateral, T}(quad_type, order) - qr_tri = QuadratureRule{RefTriangle, T}(quad_type, order) - # Interval scaled and shifted in face_to_element_transformation for quadrilateral faces from (-1,1)² to (0,1)² -> quarter the area -> quarter the quadrature weights - return create_face_quad_rule(RefPrism, [2,3,4], qr_quad.weights/4, qr_quad.points, +FacetQuadratureRule{RefPrism}(::Type{T}, order::Int) where T = _FacetQuadratureRulePrism(T,(_default_quadrature_rule(RefTriangle), _default_quadrature_rule(RefQuadrilateral)),order) +function _FacetQuadratureRulePrism(::Type{T}, quad_types::Tuple{Symbol,Symbol}, order::Int) where T + qr_quad = QuadratureRule{RefQuadrilateral}(T, quad_types[2], order) + qr_tri = QuadratureRule{RefTriangle}(T, quad_types[1], order) + # Interval scaled and shifted in facet_to_element_transformation for quadrilateral faces from (-1,1)² to (0,1)² -> quarter the area -> quarter the quadrature weights + return create_facet_quad_rule(RefPrism, [2,3,4], qr_quad.weights/4, qr_quad.points, [1,5], qr_tri.weights, qr_tri.points) end -function FaceQuadratureRule{RefPyramid, T}(quad_type::Symbol, order::Int) where T - qr_quad = QuadratureRule{RefQuadrilateral, T}(quad_type, order) - qr_tri = QuadratureRule{RefTriangle, T}(quad_type, order) - # Interval scaled and shifted in face_to_element_transformation for quadrilateral faces from (-1,1)² to (0,1)² -> quarter the area -> quarter the quadrature weights - return create_face_quad_rule(RefPyramid, [1], qr_quad.weights/4, qr_quad.points, +FacetQuadratureRule{RefPyramid}(::Type{T}, order::Int) where T = _FacetQuadratureRulePyramid(T,(_default_quadrature_rule(RefTriangle), _default_quadrature_rule(RefQuadrilateral)),order) +function _FacetQuadratureRulePyramid(::Type{T}, quad_types::Tuple{Symbol,Symbol}, order::Int) where T + qr_quad = QuadratureRule{RefQuadrilateral}(T, quad_types[2], order) + qr_tri = QuadratureRule{RefTriangle}(T, quad_types[1], order) + # Interval scaled and shifted in facet_to_element_transformation for quadrilateral faces from (-1,1)² to (0,1)² -> quarter the area -> quarter the quadrature weights + return create_facet_quad_rule(RefPyramid, [1], qr_quad.weights/4, qr_quad.points, [2,3,4,5], qr_tri.weights, qr_tri.points) end @@ -242,15 +262,15 @@ Return the number of quadrature points in `qr`. getnquadpoints(qr::QuadratureRule) = length(getweights(qr)) """ - getnquadpoints(qr::FaceQuadratureRule, face::Int) + getnquadpoints(qr::FacetQuadratureRule, face::Int) Return the number of quadrature points in `qr` for local face index `face`. """ -getnquadpoints(qr::FaceQuadratureRule, face::Int) = getnquadpoints(qr.face_rules[face]) +getnquadpoints(qr::FacetQuadratureRule, face::Int) = getnquadpoints(qr.face_rules[face]) """ getweights(qr::QuadratureRule) - getweights(qr::FaceQuadratureRule, face::Int) + getweights(qr::FacetQuadratureRule, face::Int) Return the weights of the quadrature rule. @@ -266,12 +286,12 @@ julia> getweights(qr) ``` """ getweights(qr::QuadratureRule) = qr.weights -getweights(qr::FaceQuadratureRule, face::Int) = getweights(qr.face_rules[face]) +getweights(qr::FacetQuadratureRule, face::Int) = getweights(qr.face_rules[face]) """ getpoints(qr::QuadratureRule) - getpoints(qr::FaceQuadratureRule, face::Int) + getpoints(qr::FacetQuadratureRule, face::Int) Return the points of the quadrature rule. @@ -287,7 +307,9 @@ julia> getpoints(qr) ``` """ getpoints(qr::QuadratureRule) = qr.points -getpoints(qr::FaceQuadratureRule, face::Int) = getpoints(qr.face_rules[face]) +getpoints(qr::FacetQuadratureRule, face::Int) = getpoints(qr.face_rules[face]) + +getrefshape(::QuadratureRule{RefShape}) where RefShape = RefShape # TODO: This is used in copy(::(Cell|Face)Values), but it it useful to get an actual copy? -Base.copy(qr::Union{QuadratureRule,FaceQuadratureRule}) = qr +Base.copy(qr::Union{QuadratureRule, FacetQuadratureRule}) = qr diff --git a/src/assembler.jl b/src/assembler.jl index 573f4437dc..b86ffa8899 100644 --- a/src/assembler.jl +++ b/src/assembler.jl @@ -24,7 +24,7 @@ Create an `Assembler` object which can be used to assemble element contributions global sparse matrix. Use [`assemble!`](@ref) for each element, and [`finish_assemble`](@ref), to finalize the assembly and return the sparse matrix. -Note that giving a sparse matrix as input can be more efficient. See below and +Note that giving a sparse matrix as input can be more efficient. See below and as described in the [manual](@ref man-assembly). !!! note @@ -50,7 +50,7 @@ end Assembles the matrix `Ke` into `a` according to the dofs specified by `rowdofs` and `coldofs`. """ -function assemble!(a::Ferrite.Assembler{T}, rowdofs::AbstractVector{Int}, coldofs::AbstractVector{Int}, Ke::AbstractMatrix{T}) where {T} +function assemble!(a::Assembler{T}, rowdofs::AbstractVector{Int}, coldofs::AbstractVector{Int}, Ke::AbstractMatrix{T}) where {T} nrows = length(rowdofs) ncols = length(coldofs) @@ -241,7 +241,7 @@ function _missing_sparsity_pattern_error(Krow::Int, Kcol::Int) throw(ErrorException( "You are trying to assemble values in to K[$(Krow), $(Kcol)], but K[$(Krow), " * "$(Kcol)] is missing in the sparsity pattern. Make sure you have called `K = " * - "create_sparsity_pattern(dh)` or `K = create_sparsity_pattern(dh, ch)` if you " * + "allocate_matrix(dh)` or `K = allocate_matrix(dh, ch)` if you " * "have affine constraints. This error might also happen if you are using " * "`::AssemblerSparsityPattern` in a threaded assembly loop (you need to create an " * "`assembler::AssemblerSparsityPattern` for each task)." diff --git a/src/deprecations.jl b/src/deprecations.jl index 7f81cf443d..e35db07746 100644 --- a/src/deprecations.jl +++ b/src/deprecations.jl @@ -1,17 +1,51 @@ -Base.@deprecate_binding DirichletBoundaryConditions ConstraintHandler -Base.@deprecate_binding DirichletBoundaryCondition Dirichlet +struct DeprecationError <: Exception + msg::String +end +function DeprecationError(msg::Pair) + io = _iobuffer() + printstyled(io, "`$(msg.first)`", color=:red) + print(io, " is deprecated, use ") + printstyled(io, "`$(msg.second)`", color=:green) + print(io, " instead.") + DeprecationError(takestring(io)) +end + +function Base.showerror(io::IO, err::DeprecationError) + print(io, "DeprecationError: ") + print(io, err.msg) +end + +function _iobuffer() + io = IOBuffer() + ioc = IOContext(io, IOContext(stderr)) + return ioc +end +function takestring(ioc) + String(take!(ioc.io)) +end + +function Base.push!(::AbstractDofHandler, args...) + throw(DeprecationError("push!(dh::AbstractDofHandler, args...)" => "add!(dh, args...)")) +end + +for (a, b) in [(:vertices, :vertexdof_indices), (:faces, :facedof_indices), (:edges, :edgedof_indices)] + @eval function $(a)(::Interpolation) + throw(DeprecationError("$($(a))(ip::Interpolation)" => "`$($(b))(ip)")) + end +end -import Base: push! -@deprecate push!(dh::AbstractDofHandler, args...) add!(dh, args...) +function nfields(::AbstractDofHandler) + throw(DeprecationError("nfields(dh::AbstractDofHandler)" => "length(getfieldnames(dh))")) +end -@deprecate vertices(ip::Interpolation) vertexdof_indices(ip) false -@deprecate faces(ip::Interpolation) facedof_indices(ip) false -@deprecate edges(ip::Interpolation) edgedof_indices(ip) false -@deprecate nfields(dh::AbstractDofHandler) length(getfieldnames(dh)) false -# @deprecate add!(ch::ConstraintHandler, fh::FieldHandler, dbc::Dirichlet) add!(ch, dbc) +export getcoordinates +function getcoordinates(::Node) + throw(DeprecationError("getcoordinates(node::Node)" => "get_node_coordinate(node)")) +end -@deprecate getcoordinates(node::Node) get_node_coordinate(node) true -@deprecate cellcoords!(x::Vector, dh::DofHandler, args...) getcoordinates!(x, dh.grid, args...) false +function cellcoords!(x::Vector, dh::DofHandler, args...) + throw(DeprecationError("cellcoords!(x::Vector, dh::DofHandler, args...)" => "getcoordinates!(x, dh.grid, args...)")) +end struct Cell{refdim, nnodes, nfaces} function Cell{refdim, nnodes, nfaces}(nodes) where {refdim, nnodes, nfaces} @@ -45,58 +79,81 @@ struct Cell{refdim, nnodes, nfaces} replacement = Wedge end if replacement === nothing - error("The AbstractCell interface have been changed, see https://github.com/Ferrite-FEM/Ferrite.jl/pull/679") + throw(DeprecationError("The AbstractCell interface have been changed, see https://github.com/Ferrite-FEM/Ferrite.jl/pull/679")) else - Base.depwarn("Use `$(replacement)(nodes)` instead of `Cell{$refdim, $nnodes, $nfaces}(nodes)`.", :Cell) - return replacement(nodes) + throw(DeprecationError("Cell{$refdim, $nnodes, $nfaces}(nodes)" => "$replacement(nodes)")) end end end export Cell -Base.@deprecate_binding Line2D Line -Base.@deprecate_binding Line3D Line -Base.@deprecate_binding Quadrilateral3D Quadrilateral +const Line2D = Cell{2,2,1} +const Line3D = Cell{3,2,0} +const Quadrilateral3D = Cell{3,4,1} export Line2D, Line3D, Quadrilateral3D +using WriteVTK: vtk_grid +export vtk_grid # To give better error + +function WriteVTK.vtk_grid(::String, ::Union{AbstractGrid,AbstractDofHandler}; kwargs...) + throw(DeprecationError( + "The vtk interface has been updated in Ferrite v1.0. " * + "See https://github.com/Ferrite-FEM/Ferrite.jl/pull/692. " * + "Use VTKGridFile to open a vtk file, and the functions " * + "write_solution, write_cell_data, and write_projection to save data." + )) +end + # Deprecation of auto-vectorized methods function add!(dh::DofHandler, name::Symbol, dim::Int) celltype = getcelltype(dh.grid) if !isconcretetype(celltype) error("If you have more than one celltype in Grid, you must use add!(dh::DofHandler, fh::FieldHandler)") end - Base.depwarn( - "`add!(dh::DofHandler, name::Symbol, dim::Int)` is deprecated. Instead, pass the " * - "interpolation explicitly, and vectorize it to `dim` for vector-valued " * - "fields. See CHANGELOG for more details.", - :add!, - ) - ip = default_interpolation(celltype) - add!(dh, name, dim == 1 ? ip : VectorizedInterpolation{dim}(ip)) + io = _iobuffer() + printstyled(io, "`add!(dh::DofHandler, name::Symbol, dim::Int)`", color=:red) + print(io, " is deprecated. Instead, pass the interpolation explicitly, and vectorize it to `dim` for vector-valued fields.") + print(io, " See CHANGELOG for more details.") + throw(DeprecationError(takestring(io))) end function add!(dh::DofHandler, name::Symbol, dim::Int, ip::ScalarInterpolation) - Base.depwarn( - "`add!(dh::DofHandler, name::Symbol, dim::Int, ip::ScalarInterpolation)` is " * - "deprecated. Instead, vectorize the interpolation to the appropriate dimension " * - "and add it (`vip = ip^dim; add!(dh, name, vip)`). See CHANGELOG for more details.", - :add! - ) - add!(dh, name, dim == 1 ? ip : VectorizedInterpolation{dim}(ip)) + io = _iobuffer() + printstyled(io, "`add!(dh::DofHandler, name::Symbol, dim::Int, ip::ScalarInterpolation)`", color=:red) + print(io, " is deprecated. Instead, vectorize the interpolation to the appropriate dimension and then `add!` it.") + print(io, " See CHANGELOG for more details.") + throw(DeprecationError(takestring(io))) end # Deprecation of compute_vertex_values -@deprecate compute_vertex_values(nodes::Vector{<:Node}, f::Function) map(n -> f(n.x), nodes) -@deprecate compute_vertex_values(grid::AbstractGrid, f::Function) map(n -> f(n.x), getnodes(grid)) -@deprecate compute_vertex_values(grid::AbstractGrid, v::Vector{Int}, f::Function) map(n -> f(n.x), getnodes(grid, v)) -@deprecate compute_vertex_values(grid::AbstractGrid, set::String, f::Function) map(n -> f(n.x), getnodes(grid, set)) +export compute_vertex_values +function compute_vertex_values(nodes::Vector{<:Node}, f::Function) + throw(DeprecationError("compute_vertex_values(nodes::Vector{<:Node}, f::Function)" => "map(n -> f(n.x), nodes)")) +end +function compute_vertex_values(grid::AbstractGrid, f::Function) + throw(DeprecationError("compute_vertex_values(grid::AbstractGrid, f::Function)" => "map(n -> f(n.x), getnodes(grid))")) +end +function compute_vertex_values(grid::AbstractGrid, v::Vector{Int}, f::Function) + throw(DeprecationError("compute_vertex_values(grid::AbstractGrid, v::Vector{Int}, f::Function)" => "map(n -> f(n.x), getnodes(grid, v))")) +end +function compute_vertex_values(grid::AbstractGrid, set::String, f::Function) + throw(DeprecationError("compute_vertex_values(grid::AbstractGrid, set::String, f::Function)" => "map(n -> f(n.x), getnodes(grid, set))")) +end -@deprecate reshape_to_nodes evaluate_at_grid_nodes +function reshape_to_nodes(args...) + throw(DeprecationError("reshape_to_nodes(args...)" => "evaluate_at_grid_nodes(args...)")) +end -@deprecate start_assemble(f::Vector, K::Union{SparseMatrixCSC, Symmetric}; kwargs...) start_assemble(K, f; kwargs...) +function start_assemble(f::Vector, K::Union{SparseMatrixCSC, Symmetric}; kwargs...) + throw(DeprecationError("start_assemble(f::Vector, K::Union{SparseMatrixCSC, Symmetric}; kwargs...)" => "start_assemble(K, f; kwargs...)")) +end -@deprecate shape_derivative shape_gradient -@deprecate function_derivative function_gradient +function shape_derivative(args...) + throw(DeprecationError("shape_derivative(args...)" => "shape_gradient(args...)")) +end +function function_derivative(args...) + throw(DeprecationError("function_derivative(args...)" => "function_gradient(args...)")) +end # Deprecation of (Cell|Face|Point)(Scalar|Vector)Values. # Define dummy types so that loading old code doesn't directly error, and let @@ -148,7 +205,7 @@ for VT in ( end @eval begin function $(VT)(args...) - error($message) + throw(DeprecationError($message)) end export $(VT) end @@ -156,11 +213,19 @@ end # TODO: Are these needed to be deprecated - harder? with the new parameterization # (Cell|Face)Values with vector dofs -const _VectorValues = Union{CellValues{<:FV}, FaceValues{<:FV}} where {FV <: FunctionValues{<:Any,<:VectorInterpolation}} -@deprecate function_value(fe_v::_VectorValues, q_point::Int, u::AbstractVector{Vec{dim,T}}) where {dim,T} function_value(fe_v, q_point, reinterpret(T, u)) -@deprecate function_gradient(fe_v::_VectorValues, q_point::Int, u::AbstractVector{Vec{dim,T}}) where {dim,T} function_gradient(fe_v, q_point, reinterpret(T, u)) -@deprecate function_divergence(fe_v::_VectorValues, q_point::Int, u::AbstractVector{Vec{dim,T}}) where {dim,T} function_divergence(fe_v, q_point, reinterpret(T, u)) -@deprecate function_curl(fe_v::_VectorValues, q_point::Int, u::AbstractVector{Vec{dim,T}}) where {dim,T} function_curl(fe_v, q_point, reinterpret(T, u)) +const _VectorValues = Union{CellValues{<:FV}, FacetValues{<:FV}} where {FV <: FunctionValues{<:Any,<:VectorInterpolation}} +function function_value(::_VectorValues, ::Int, ::AbstractVector{Vec{dim,T}}) where {dim,T} + throw(DeprecationError("function_value(fe_v::VectorValues, q_point::Int, u::AbstractVector{Vec{dim,T}})" => "function_value(fe_v, q_point, reinterpret(T, u))")) +end +function function_gradient(::_VectorValues, ::Int, ::AbstractVector{Vec{dim,T}}) where {dim,T} + throw(DeprecationError("function_gradient(fe_v::VectorValues, q_point::Int, u::AbstractVector{Vec{dim,T}})" => "function_gradient(fe_v, q_point, reinterpret(T, u))")) +end +function function_divergence(::_VectorValues, ::Int, ::AbstractVector{Vec{dim,T}}) where {dim,T} + throw(DeprecationError("function_divergence(fe_v::VectorValues, q_point::Int, u::AbstractVector{Vec{dim,T}})" => "function_divergence(fe_v, q_point, reinterpret(T, u))")) +end +function function_curl(::_VectorValues, ::Int, ::AbstractVector{Vec{dim,T}}) where {dim,T} + throw(DeprecationError("function_curl(fe_v::VectorValues, q_point::Int, u::AbstractVector{Vec{dim,T}})" => "function_curl(fe_v, q_point, reinterpret(T, u))")) +end # New reference shapes struct RefCube end @@ -168,34 +233,27 @@ export RefCube function Lagrange{D, RefCube, O}() where {D, O} shape = D == 1 ? RefLine : D == 2 ? RefQuadrilateral : RefHexahedron - Base.depwarn("`Lagrange{$D, RefCube, $O}()` is deprecated, use `Lagrange{$(shape), $O}()` instead.", :Lagrange) - return Lagrange{shape, O}() + throw(DeprecationError("Lagrange{$D, RefCube, $O}()" => "Lagrange{$(shape), $O}()")) end function Lagrange{2, RefTetrahedron, O}() where {O} - Base.depwarn("`Lagrange{2, RefTetrahedron, $O}()` is deprecated, use `Lagrange{RefTriangle, $O}()` instead.", :Lagrange) - return Lagrange{RefTriangle, O}() + throw(DeprecationError("Lagrange{2, RefTetrahedron, $O}()" => "Lagrange{RefTriangle, $O}()")) end function DiscontinuousLagrange{D, RefCube, O}() where {D, O} shape = D == 1 ? RefLine : D == 2 ? RefQuadrilateral : RefHexahedron - Base.depwarn("`DiscontinuousLagrange{$D, RefCube, $O}()` is deprecated, use `DiscontinuousLagrange{$(shape), $O}()` instead.", :DiscontinuousLagrange) - return DiscontinuousLagrange{shape, O}() + throw(DeprecationError("DiscontinuousLagrange{$D, RefCube, $O}()" => "DiscontinuousLagrange{$(shape), $O}()")) end function BubbleEnrichedLagrange{2, RefTetrahedron, O}() where {O} - Base.depwarn("`BubbleEnrichedLagrange{2, RefTetrahedron, $O}()` is deprecated, use `BubbleEnrichedLagrange{RefTriangle, $O}()` instead.", :BubbleEnrichedLagrange) - return BubbleEnrichedLagrange{RefTriangle, O}() + throw(DeprecationError("BubbleEnrichedLagrange{2, RefTetrahedron, $O}()" => "BubbleEnrichedLagrange{RefTriangle, $O}()")) end function DiscontinuousLagrange{2, RefTetrahedron, O}() where {O} - Base.depwarn("`DiscontinuousLagrange{2, RefTetrahedron, $O}()` is deprecated, use `DiscontinuousLagrange{RefTriangle, $O}()` instead.", :DiscontinuousLagrange) - return DiscontinuousLagrange{RefTriangle, O}() + throw(DeprecationError("DiscontinuousLagrange{2, RefTetrahedron, $O}()" => "DiscontinuousLagrange{RefTriangle, $O}()")) end function Serendipity{D, RefCube, O}() where {D, O} shape = D == 1 ? RefLine : D == 2 ? RefQuadrilateral : RefHexahedron - Base.depwarn("`Serendipity{$D, RefCube, $O}()` is deprecated, use `Serendipity{$(shape), $O}()` instead.", :Serendipity) - return Serendipity{shape, O}() + throw(DeprecationError("Serendipity{$D, RefCube, $O}()" => "Serendipity{$(shape), $O}()")) end function CrouzeixRaviart{2, 1}() - Base.depwarn("`CrouzeixRaviart{2, 1}()` is deprecated, use `CrouzeixRaviart{RefTriangle, 1}()` instead.", :CrouzeixRaviart) - return CrouzeixRaviart{RefTriangle, 1}() + throw(DeprecationError("CrouzeixRaviart{2, 1}()" => "CrouzeixRaviart{RefTriangle, 1}()")) end # For the quadrature: Some will be wrong for face integration, so then we warn @@ -204,75 +262,67 @@ end # QuadratureRule{1, RefCube}(...) -> QuadratureRule{RefLine}(...) # QuadratureRule{2, RefCube}(...) -> QuadratureRule{RefQuadrilateral}(...) # QuadratureRule{3, RefCube}(...) -> QuadratureRule{RefHexahedron}(...) -# QuadratureRule{1, RefCube}(...) -> FaceQuadratureRule{RefQuadrilateral}(...) -# QuadratureRule{2, RefCube}(...) -> FaceQuadratureRule{RefHexahedron}(...) +# QuadratureRule{1, RefCube}(...) -> FacetQuadratureRule{RefQuadrilateral}(...) +# QuadratureRule{2, RefCube}(...) -> FacetQuadratureRule{RefHexahedron}(...) function QuadratureRule{D, RefCube}(order::Int) where D shapes = (RefLine, RefQuadrilateral, RefHexahedron) msg = "`QuadratureRule{$D, RefCube}(order::Int)` is deprecated, use `QuadratureRule{$(shapes[D])}(order)` instead" if D == 1 || D == 2 - msg *= " (or `FaceQuadratureRule{$(shapes[D+1])}(order)` if this is a face quadrature rule)" + msg *= " (or `FacetQuadratureRule{$(shapes[D+1])}(order)` if this is a face quadrature rule)" end msg *= "." - Base.depwarn(msg, :QuadratureRule) - return QuadratureRule{shapes[D]}(order) + throw(DeprecationError(msg)) end function QuadratureRule{D, RefCube}(quad_type::Symbol, order::Int) where D shapes = (RefLine, RefQuadrilateral, RefHexahedron) msg = "`QuadratureRule{$D, RefCube}(quad_type::Symbol, order::Int)` is deprecated, use `QuadratureRule{$(shapes[D])}(quad_type, order)` instead" if D == 1 || D == 2 - msg *= " (or `FaceQuadratureRule{$(shapes[D+1])}(quad_type, order)` if this is a face quadrature rule)" + msg *= " (or `FacetQuadratureRule{$(shapes[D+1])}(quad_type, order)` if this is a face quadrature rule)" end msg *= "." - Base.depwarn(msg, :QuadratureRule) - return QuadratureRule{shapes[D]}(quad_type, order) + throw(DeprecationError(msg)) end # QuadratureRule{2, RefTetrahedron}(...) -> QuadratureRule{RefTriangle}(...) # QuadratureRule{3, RefTetrahedron}(...) -> QuadratureRule{RefTetrahedron}(...) -# QuadratureRule{2, RefTetrahedron}(...) -> FaceQuadratureRule{RefTetrahedron}(...) +# QuadratureRule{2, RefTetrahedron}(...) -> FacetQuadratureRule{RefTetrahedron}(...) function QuadratureRule{D, RefTetrahedron}(order::Int) where D shapes = (nothing, RefTriangle, RefTetrahedron) msg = "`QuadratureRule{$D, RefTetrahedron}(order::Int)` is deprecated, use `QuadratureRule{$(shapes[D])}(order)` instead" if D == 2 - msg *= " (or `FaceQuadratureRule{RefTetrahedron)}(order)` if this is a face quadrature rule)" + msg *= " (or `FacetQuadratureRule{RefTetrahedron)}(order)` if this is a face quadrature rule)" end msg *= "." - Base.depwarn(msg, :QuadratureRule) - return QuadratureRule{shapes[D]}(order) + throw(DeprecationError(msg)) end function QuadratureRule{D, RefTetrahedron}(quad_type::Symbol, order::Int) where D shapes = (nothing, RefTriangle, RefTetrahedron) msg = "`QuadratureRule{$D, RefTetrahedron}(quad_type::Symbol, order::Int)` is deprecated, use `QuadratureRule{$(shapes[D])}(quad_type, order)` instead" if D == 2 - msg *= " (or `FaceQuadratureRule{RefTetrahedron)}(order)` if this is a face quadrature rule)" + msg *= " (or `FacetQuadratureRule{RefTetrahedron)}(order)` if this is a face quadrature rule)" end msg *= "." - Base.depwarn(msg, :QuadratureRule) - return QuadratureRule{shapes[D]}(quad_type, order) + throw(DeprecationError(msg)) end -# QuadratureRule{0, RefCube}(...) -> FaceQuadratureRule{RefLine} +# QuadratureRule{0, RefCube}(...) -> FacetQuadratureRule{RefLine} function QuadratureRule{0, RefCube}(order::Int) - msg = "`QuadratureRule{0, RefCube}(order::Int)` is deprecated, use `FaceQuadratureRule{RefLine}(order)` instead." - Base.depwarn(msg, :QuadratureRule) - return FaceQuadratureRule{RefLine}(order) + msg = "`QuadratureRule{0, RefCube}(order::Int)` is deprecated, use `FacetQuadratureRule{RefLine}(order)` instead." + throw(DeprecationError(msg)) end function QuadratureRule{0, RefCube}(quad_type::Symbol, order::Int) - msg = "`QuadratureRule{0, RefCube}(quad_type::Symbol, order::Int)` is deprecated, use `FaceQuadratureRule{RefLine}(quad_type, order)` instead." - Base.depwarn(msg, :QuadratureRule) - return FaceQuadratureRule{RefLine}(quad_type, order) + msg = "`QuadratureRule{0, RefCube}(quad_type::Symbol, order::Int)` is deprecated, use `FacetQuadratureRule{RefLine}(quad_type, order)` instead." + throw(DeprecationError(msg)) end -# QuadratureRule{1, RefTetrahedron}(...) -> FaceQuadratureRule{RefTriangle} +# QuadratureRule{1, RefTetrahedron}(...) -> FacetQuadratureRule{RefTriangle} function QuadratureRule{1, RefTetrahedron}(order::Int) - msg = "`QuadratureRule{1, RefTetrahedron}(order::Int)` is deprecated, use `FaceQuadratureRule{RefTriangle}(order)` instead." - Base.depwarn(msg, :QuadratureRule) - return FaceQuadratureRule{RefTriangle}(order) + msg = "`QuadratureRule{1, RefTetrahedron}(order::Int)` is deprecated, use `FacetQuadratureRule{RefTriangle}(order)` instead." + throw(DeprecationError(msg)) end function QuadratureRule{1, RefTetrahedron}(quad_type::Symbol, order::Int) - msg = "`QuadratureRule{1, RefTetrahedron}(quad_type::Symbol, order::Int)` is deprecated, use `FaceQuadratureRule{RefTriangle}(quad_type, order)` instead." - Base.depwarn(msg, :QuadratureRule) - return FaceQuadratureRule{RefTriangle}(quad_type, order) + msg = "`QuadratureRule{1, RefTetrahedron}(quad_type::Symbol, order::Int)` is deprecated, use `FacetQuadratureRule{RefTriangle}(quad_type, order)` instead." + throw(DeprecationError(msg)) end # Catch remaining cases in (Cell|Face)Value constructors @@ -280,38 +330,33 @@ function CellValues( ::Type{T}, qr::QuadratureRule{2, RefTetrahedron, TQ}, ip::Interpolation{RefTriangle}, gip::Interpolation{RefTriangle} = default_geometric_interpolation(ip), ) where {T, TQ} - qr′ = QuadratureRule{2, RefTriangle, T}(qr.weights, qr.points) - Base.depwarn("The input quadrature rule have the wrong reference shape, likely this comes from a constructor like `QuadratureRule{2, RefTetrahedron}(...)` which have been deprecated in favor of `QuadratureRule{RefTriangle}(...)`.", :CellValues) - CellValues(T, qr′, ip, gip) + msg = "The input quadrature rule have the wrong reference shape, likely this comes from a constructor like `QuadratureRule{2, RefTetrahedron}(...)` which have been deprecated in favor of `QuadratureRule{RefTriangle}(...)`." + throw(DeprecationError(msg)) end -function FaceValues(qr::QuadratureRule, ip::Interpolation, +function FacetValues(qr::QuadratureRule, ip::Interpolation, gip::Interpolation = default_geometric_interpolation(ip)) - return FaceValues(Float64, qr, ip, gip) + return FacetValues(Float64, qr, ip, gip) end -function FaceValues( +function FacetValues( ::Type{T}, qr::QuadratureRule{RefLine, TQ}, ip::Interpolation{RefQuadrilateral}, gip::Interpolation{RefQuadrilateral} = default_geometric_interpolation(ip), ) where {T, TQ} - Base.depwarn("The input quadrature rule have the wrong reference shape, likely this comes from a constructor like `QuadratureRule{1, RefCube}(...)` which have been deprecated in favor of `FaceQuadratureRule{RefQuadrilateral}(...)`.", :FaceValues) - qr′ = create_face_quad_rule(RefQuadrilateral, qr.weights, qr.points) - FaceValues(T, qr′, ip, gip) + msg = "The input quadrature rule have the wrong reference shape, likely this comes from a constructor like `QuadratureRule{1, RefCube}(...)` which have been deprecated in favor of `FacetQuadratureRule{RefQuadrilateral}(...)`." + throw(DeprecationError(msg)) end -function FaceValues( +function FacetValues( ::Type{T}, qr::QuadratureRule{RefQuadrilateral, TQ}, ip::Interpolation{RefHexahedron}, gip::Interpolation{RefHexahedron} = default_geometric_interpolation(ip), ) where {T, TQ} - Base.depwarn("The input quadrature rule have the wrong reference shape, likely this comes from a constructor like `QuadratureRule{2, RefCube}(...)` which have been deprecated in favor of `FaceQuadratureRule{RefHexahedron}(...)`.", :FaceValues) - qr′ = create_face_quad_rule(RefHexahedron, qr.weights, qr.points) - FaceValues(T, qr′, ip, gip) + msg = "The input quadrature rule have the wrong reference shape, likely this comes from a constructor like `QuadratureRule{2, RefCube}(...)` which have been deprecated in favor of `FacetQuadratureRule{RefHexahedron}(...)`." + throw(DeprecationError(msg)) end -function FaceValues( +function FacetValues( ::Type{T}, qr::QuadratureRule{RefTriangle, TQ}, ip::Interpolation{RefTetrahedron}, gip::Interpolation{RefTetrahedron} = default_geometric_interpolation(ip), ) where {T, TQ} -@info "fdjsfdsf" - Base.depwarn("The input quadrature rule have the wrong reference shape, likely this comes from a constructor like `QuadratureRule{2, RefTetrahedron}(...)` which have been deprecated in favor of `FaceQuadratureRule{RefTetrahedron}(...)`.", :FaceValues) - qr′ = create_face_quad_rule(RefTetrahedron, qr.weights, qr.points) - FaceValues(T, qr′, ip, gip) + msg = "The input quadrature rule have the wrong reference shape, likely this comes from a constructor like `QuadratureRule{2, RefTetrahedron}(...)` which have been deprecated in favor of `FacetQuadratureRule{RefTetrahedron}(...)`." + throw(DeprecationError(msg)) end # Hide the last unused type param... @@ -328,16 +373,68 @@ function Base.show(io::IO, ::CrouzeixRaviart{shape, order}) where {shape, order} print(io, "CrouzeixRaviart{$(shape), $(order)}()") end -@deprecate value(ip::Interpolation, ξ::Vec) [shape_value(ip, ξ, i) for i in 1:getnbasefunctions(ip)] false -@deprecate derivative(ip::Interpolation, ξ::Vec) [shape_gradient(ip, ξ, i) for i in 1:getnbasefunctions(ip)] false -@deprecate value(ip::Interpolation, i::Int, ξ::Vec) shape_value(ip, ξ, i) false +function value(ip::Interpolation, ξ::Vec) + throw(DeprecationError("value(ip::Interpolation, ξ::Vec)" => "[reference_shape_value(ip, ξ, i) for i in 1:getnbasefunctions(ip)]")) +end +function derivative(ip::Interpolation, ξ::Vec) + throw(DeprecationError("derivative(ip::Interpolation, ξ::Vec)" => "[reference_shape_gradient(ip, ξ, i) for i in 1:getnbasefunctions(ip)]")) +end +function value(ip::Interpolation, i::Int, ξ::Vec) + throw(DeprecationError("value(ip::Interpolation, i::Int, ξ::Vec)" => "reference_shape_value(ip, ξ, i)")) +end export MixedDofHandler function MixedDofHandler(::AbstractGrid) - error("MixedDofHandler is the standard DofHandler in Ferrite now and has been renamed to DofHandler. -Use DofHandler even for mixed grids and fields on subdomains.") + throw(DeprecationError( + "MixedDofHandler is the standard DofHandler in Ferrite now and has been renamed " * + "to DofHandler. Use DofHandler even for mixed grids and fields on subdomains.", + )) +end + +export end_assemble +function end_assemble(args...) + throw(DeprecationError("end_assemble(args...)" => "finish_assemble(args...)")) end -@deprecate end_assemble finish_assemble -@deprecate get_point_values evaluate_at_points -@deprecate transform! transform_coordinates! +export get_point_values +function get_point_values(args...) + throw(DeprecationError("get_point_values(args...)" => "evaluate_at_points(args...)")) +end + +export transform! +function transform!(args...) + throw(DeprecationError("transform!(args...)" => "transform_coordinates!(args...)")) +end + +export addfaceset! # deprecated, export for backwards compatibility. +# Use warn to show for standard users. +function addfaceset!(grid::AbstractGrid, name, set::Union{Set{FaceIndex}, Vector{FaceIndex}}) + msg = "addfaceset! is deprecated, use addfacetset! instead and convert the set to FacetIndex." + throw(DeprecationError(msg)) +end +function addfaceset!(grid, name, f::Function; kwargs...) + throw(DeprecationError("addfaceset!(args...)" => "addfacetset!(args...)")) +end + +export onboundary +function onboundary(::CellCache, ::Int) + throw(DeprecationError("`onboundary` is deprecated, check just the facetset instead of first checking `onboundary`.")) +end + +getdim(args...) = throw(DeprecationError("`Ferrite.getdim` is deprecated, use `getrefdim` or `getspatialdim` instead")) +getfielddim(args...) = throw(DeprecationError("`Ferrite.getfielddim(::AbstractDofHandler, args...) is deprecated, use `n_components` instead")) + +function default_interpolation(::Type{C}) where {C <: AbstractCell} + msg = "Ferrite.default_interpolation is deprecated, use the exported `geometric_interpolation` instead" + throw(DeprecationError(msg)) +end + +export create_sparsity_pattern +function create_sparsity_pattern(args...) + throw(DeprecationError("create_sparsity_pattern(args...)" => "allocate_matrix(args...; kwargs...)")) +end + +export VTKFile +function VTKFile(args...) + throw(DeprecationError("VTKFile(args...)" => "VTKGridFile(args...)")) +end diff --git a/src/exports.jl b/src/exports.jl index dab7e368fb..c49b24eba0 100644 --- a/src/exports.jl +++ b/src/exports.jl @@ -13,6 +13,7 @@ export RefPyramid, BubbleEnrichedLagrange, CrouzeixRaviart, + RannacherTurek, Lagrange, DiscontinuousLagrange, Serendipity, @@ -21,18 +22,19 @@ export # Quadrature QuadratureRule, - FaceQuadratureRule, + FacetQuadratureRule, getnquadpoints, # FEValues AbstractCellValues, - AbstractFaceValues, + AbstractFacetValues, CellValues, - FaceValues, + FacetValues, InterfaceValues, reinit!, shape_value, shape_gradient, + shape_hessian, shape_symmetric_gradient, shape_divergence, shape_curl, @@ -74,9 +76,11 @@ export FaceIndex, EdgeIndex, VertexIndex, + FacetIndex, + geometric_interpolation, ExclusiveTopology, getneighborhood, - faceskeleton, + facetskeleton, vertex_star_stencils, getstencil, getcells, @@ -86,19 +90,15 @@ export getcelltype, getcellset, getnodeset, - getfaceset, - getedgeset, + getfacetset, getvertexset, get_node_coordinate, getcoordinates, getcoordinates!, - onboundary, - nfaces, + nfacets, addnodeset!, - addfaceset!, - addboundaryfaceset!, - addedgeset!, - addboundaryedgeset!, + addfacetset!, + addboundaryfacetset!, addvertexset!, addboundaryvertexset!, addcellset!, @@ -108,7 +108,6 @@ export # Grid coloring create_coloring, ColoringAlgorithm, - vtk_cell_data_colors, # Dofs DofHandler, @@ -118,21 +117,30 @@ export ndofs_per_cell, celldofs!, celldofs, - create_sparsity_pattern, - create_symmetric_sparsity_pattern, dof_range, renumber!, DofOrder, evaluate_at_grid_nodes, apply_analytical!, +# Sparsity pattern + # AbstractSparsityPattern, + SparsityPattern, + BlockSparsityPattern, + init_sparsity_pattern, + add_sparsity_entries!, + add_cell_entries!, + add_interface_entries!, + add_constraint_entries!, + allocate_matrix, + # Constraints ConstraintHandler, Dirichlet, PeriodicDirichlet, - collect_periodic_faces, - collect_periodic_faces!, - PeriodicFacePair, + collect_periodic_facets, + collect_periodic_facets!, + PeriodicFacetPair, AffineConstraint, update!, apply!, @@ -143,13 +151,12 @@ export apply_assemble!, add!, free_dofs, - ApplyStrategy, # iterators CellCache, CellIterator, - FaceCache, - FaceIterator, + FacetCache, + FacetIterator, InterfaceCache, InterfaceIterator, UpdateFlags, @@ -161,13 +168,12 @@ export assemble!, finish_assemble, -# VTK export - vtk_grid, - vtk_point_data, - vtk_cell_data, - vtk_nodeset, - vtk_cellset, - vtk_save, +# exporting data + VTKGridFile, + write_solution, + write_cell_data, + write_projection, + write_node_data, # L2 Projection project, diff --git a/src/interpolations.jl b/src/interpolations.jl index 537759b15c..8ce950b1ee 100644 --- a/src/interpolations.jl +++ b/src/interpolations.jl @@ -21,6 +21,9 @@ The following interpolations are implemented: * `Lagrange{RefTriangle,5}` * `BubbleEnrichedLagrange{RefTriangle,1}` * `CrouzeixRaviart{RefTriangle, 1}` +* `CrouzeixRaviart{RefTetrahedron, 1}` +* `RannacherTurek{RefQuadrilateral, 1}` +* `RannacherTurek{RefHexahedron, 1}` * `Lagrange{RefHexahedron,1}` * `Lagrange{RefHexahedron,2}` * `Lagrange{RefTetrahedron,1}` @@ -54,12 +57,6 @@ n_components(::VectorInterpolation{vdim}) where {vdim} = vdim # Number of components that are allowed to prescribe in e.g. Dirichlet BC n_dbc_components(ip::Interpolation) = n_components(ip) -# TODO: Remove: this is a hotfix to apply constraints to embedded elements. -edges(ip::InterpolationByDim{2}) = faces(ip) -edgedof_indices(ip::InterpolationByDim{2}) = facedof_indices(ip) -edgedof_interior_indices(ip::InterpolationByDim{2}) = facedof_interior_indices(ip) -facedof_indices(ip::InterpolationByDim{1}) = vertexdof_indices(ip) - # TODO: Add a fallback that errors if there are multiple dofs per edge/face instead to force # interpolations to opt-out instead of silently do nothing. """ @@ -83,43 +80,19 @@ struct InterpolationInfo nvertexdofs::Vector{Int} nedgedofs::Vector{Int} nfacedofs::Vector{Int} - ncelldofs::Int + nvolumedofs::Int reference_dim::Int adjust_during_distribution::Bool n_copies::Int is_discontinuous::Bool end -function InterpolationInfo(interpolation::InterpolationByDim{3}, n_copies) +function InterpolationInfo(interpolation::Interpolation{shape}, n_copies) where {rdim, shape<:AbstractRefShape{rdim}} InterpolationInfo( [length(i) for i ∈ vertexdof_indices(interpolation)], [length(i) for i ∈ edgedof_interior_indices(interpolation)], [length(i) for i ∈ facedof_interior_indices(interpolation)], - length(celldof_interior_indices(interpolation)), - 3, - adjust_dofs_during_distribution(interpolation), - n_copies, - is_discontinuous(interpolation) - ) -end -function InterpolationInfo(interpolation::InterpolationByDim{2}, n_copies) - InterpolationInfo( - [length(i) for i ∈ vertexdof_indices(interpolation)], - Int[], - [length(i) for i ∈ facedof_interior_indices(interpolation)], - length(celldof_interior_indices(interpolation)), - 2, - adjust_dofs_during_distribution(interpolation), - n_copies, - is_discontinuous(interpolation) - ) -end -function InterpolationInfo(interpolation::InterpolationByDim{1}, n_copies) - InterpolationInfo( - [length(i) for i ∈ vertexdof_indices(interpolation)], - Int[], - Int[], - length(celldof_interior_indices(interpolation)), - 1, + length(volumedof_interior_indices(interpolation)), + rdim, adjust_dofs_during_distribution(interpolation), n_copies, is_discontinuous(interpolation) @@ -127,35 +100,19 @@ function InterpolationInfo(interpolation::InterpolationByDim{1}, n_copies) end InterpolationInfo(interpolation::Interpolation) = InterpolationInfo(interpolation, 1) -# Some redundant information about the geometry of the reference cells. -nfaces(::Interpolation{RefHypercube{dim}}) where {dim} = 2*dim -nfaces(::Interpolation{RefTriangle}) = 3 -nfaces(::Interpolation{RefTetrahedron}) = 4 -nfaces(::Interpolation{RefPrism}) = 5 -nfaces(::Interpolation{RefPyramid}) = 5 - -nedges(::Interpolation{RefLine}) = 0 -nedges(::Interpolation{RefQuadrilateral}) = 0 -nedges(::Interpolation{RefHexahedron}) = 12 -nedges(::Interpolation{RefTriangle}) = 0 -nedges(::Interpolation{RefTetrahedron}) = 6 -nedges(::Interpolation{RefPrism}) = 9 -nedges(::Interpolation{RefPyramid}) = 8 - -nvertices(::Interpolation{RefHypercube{dim}}) where {dim} = 2^dim -nvertices(::Interpolation{RefTriangle}) = 3 -nvertices(::Interpolation{RefTetrahedron}) = 4 -nvertices(::Interpolation{RefPrism}) = 6 -nvertices(::Interpolation{RefPyramid}) = 5 +nvertices(::Interpolation{RefShape}) where RefShape = nvertices(RefShape) +nedges(::Interpolation{RefShape}) where RefShape = nedges(RefShape) +nfaces(::Interpolation{RefShape}) where RefShape = nfaces(RefShape) Base.copy(ip::Interpolation) = ip """ - Ferrite.getdim(::Interpolation) + Ferrite.getrefdim(::Interpolation) Return the dimension of the reference element for a given interpolation. """ -@inline getdim(::Interpolation{shape}) where {dim, shape <: AbstractRefShape{dim}} = dim +getrefdim(::Interpolation) # To make doc-filtering work +@inline getrefdim(::Interpolation{RefShape}) where RefShape = getrefdim(RefShape) """ Ferrite.getrefshape(::Interpolation)::AbstractRefShape @@ -190,64 +147,63 @@ getnbasefunctions(::Interpolation) # celldof: dof that is local to the element """ - shape_values!(values::AbstractArray{T}, ip::Interpolation, ξ::Vec) + reference_shape_values!(values::AbstractArray{T}, ip::Interpolation, ξ::Vec) Evaluate all shape functions of `ip` at once at the reference point `ξ` and store them in `values`. """ -@propagate_inbounds function shape_values!(values::AT, ip::IP, ξ::Vec) where {IP <: Interpolation, AT <: AbstractArray} +@propagate_inbounds function reference_shape_values!(values::AT, ip::IP, ξ::Vec) where {IP <: Interpolation, AT <: AbstractArray} @boundscheck checkbounds(values, 1:getnbasefunctions(ip)) @inbounds for i in 1:getnbasefunctions(ip) - values[i] = shape_value(ip, ξ, i) + values[i] = reference_shape_value(ip, ξ, i) end end """ - shape_gradients!(gradients::AbstractArray, ip::Interpolation, ξ::Vec) + reference_shape_gradients!(gradients::AbstractArray, ip::Interpolation, ξ::Vec) Evaluate all shape function gradients of `ip` at once at the reference point `ξ` and store them in `gradients`. """ -function shape_gradients!(gradients::AT, ip::IP, ξ::Vec) where {IP <: Interpolation, AT <: AbstractArray} +function reference_shape_gradients!(gradients::AT, ip::IP, ξ::Vec) where {IP <: Interpolation, AT <: AbstractArray} @boundscheck checkbounds(gradients, 1:getnbasefunctions(ip)) @inbounds for i in 1:getnbasefunctions(ip) - gradients[i] = shape_gradient(ip, ξ, i) + gradients[i] = reference_shape_gradient(ip, ξ, i) end end """ - shape_gradients_and_values!(gradients::AbstractArray, values::AbstractArray, ip::Interpolation, ξ::Vec) + reference_shape_gradients_and_values!(gradients::AbstractArray, values::AbstractArray, ip::Interpolation, ξ::Vec) Evaluate all shape function gradients and values of `ip` at once at the reference point `ξ` and store them in `values`. """ -function shape_gradients_and_values!(gradients::GAT, values::SAT, ip::IP, ξ::Vec) where {IP <: Interpolation, SAT <: AbstractArray, GAT <: AbstractArray} +function reference_shape_gradients_and_values!(gradients::GAT, values::SAT, ip::IP, ξ::Vec) where {IP <: Interpolation, SAT <: AbstractArray, GAT <: AbstractArray} @boundscheck checkbounds(gradients, 1:getnbasefunctions(ip)) @boundscheck checkbounds(values, 1:getnbasefunctions(ip)) @inbounds for i in 1:getnbasefunctions(ip) - gradients[i], values[i] = shape_gradient_and_value(ip, ξ, i) + gradients[i], values[i] = reference_shape_gradient_and_value(ip, ξ, i) end end -#= PR798 """ - shape_hessians_gradients_and_values!(hessians::AbstractVector, gradients::AbstractVector, values::AbstractVector, ip::Interpolation, ξ::Vec) + reference_shape_hessians_gradients_and_values!(hessians::AbstractVector, gradients::AbstractVector, values::AbstractVector, ip::Interpolation, ξ::Vec) Evaluate all shape function hessians, gradients and values of `ip` at once at the reference point `ξ` and store them in `hessians`, `gradients`, and `values`. """ -@propagate_inbounds function shape_hessians_gradients_and_values!(hessians::AbstractVector, gradients::AbstractVector, values::AbstractVector, ip::Interpolation, ξ::Vec) +@propagate_inbounds function reference_shape_hessians_gradients_and_values!(hessians::AbstractVector, gradients::AbstractVector, values::AbstractVector, ip::Interpolation, ξ::Vec) @boundscheck checkbounds(hessians, 1:getnbasefunctions(ip)) @boundscheck checkbounds(gradients, 1:getnbasefunctions(ip)) @boundscheck checkbounds(values, 1:getnbasefunctions(ip)) @inbounds for i in 1:getnbasefunctions(ip) - hessians[i], gradients[i], values[i] = shape_hessian_gradient_and_value(ip, ξ, i) + hessians[i], gradients[i], values[i] = reference_shape_hessian_gradient_and_value(ip, ξ, i) end end -=# + """ - shape_value(ip::Interpolation, ξ::Vec, i::Int) + reference_shape_value(ip::Interpolation, ξ::Vec, i::Int) Evaluate the value of the `i`th shape function of the interpolation `ip` at a point `ξ` on the reference element. The index `i` must @@ -257,39 +213,38 @@ match the index in [`vertices(::Interpolation)`](@ref), [`faces(::Interpolation) For nodal interpolations the indices also must match the indices of [`reference_coordinates(::Interpolation)`](@ref). """ -shape_value(ip::Interpolation, ξ::Vec, i::Int) +reference_shape_value(ip::Interpolation, ξ::Vec, i::Int) """ - shape_gradient(ip::Interpolation, ξ::Vec, i::Int) + reference_shape_gradient(ip::Interpolation, ξ::Vec, i::Int) Evaluate the gradient of the `i`th shape function of the interpolation `ip` in reference coordinate `ξ`. """ -function shape_gradient(ip::Interpolation, ξ::Vec, i::Int) - return Tensors.gradient(x -> shape_value(ip, x, i), ξ) +function reference_shape_gradient(ip::Interpolation, ξ::Vec, i::Int) + return Tensors.gradient(x -> reference_shape_value(ip, x, i), ξ) end """ - shape_gradient_and_value(ip::Interpolation, ξ::Vec, i::Int) + reference_shape_gradient_and_value(ip::Interpolation, ξ::Vec, i::Int) -Optimized version combining the evaluation [`Ferrite.shape_value(::Interpolation)`](@ref) -and [`Ferrite.shape_gradient(::Interpolation)`](@ref). +Optimized version combining the evaluation [`Ferrite.reference_shape_value(::Interpolation)`](@ref) +and [`Ferrite.reference_shape_gradient(::Interpolation)`](@ref). """ -function shape_gradient_and_value(ip::Interpolation, ξ::Vec, i::Int) - return gradient(x -> shape_value(ip, x, i), ξ, :all) +function reference_shape_gradient_and_value(ip::Interpolation, ξ::Vec, i::Int) + return gradient(x -> reference_shape_value(ip, x, i), ξ, :all) end -#= PR798 """ - shape_hessian_gradient_and_value(ip::Interpolation, ξ::Vec, i::Int) + reference_shape_hessian_gradient_and_value(ip::Interpolation, ξ::Vec, i::Int) -Optimized version combining the evaluation [`Ferrite.shape_value(::Interpolation)`](@ref), -[`Ferrite.shape_gradient(::Interpolation)`](@ref), and the gradient of the latter. +Optimized version combining the evaluation [`Ferrite.reference_shape_value(::Interpolation)`](@ref), +[`Ferrite.reference_shape_gradient(::Interpolation)`](@ref), and the gradient of the latter. """ -function shape_hessian_gradient_and_value(ip::Interpolation, ξ::Vec, i::Int) - return hessian(x -> shape_value(ip, x, i), ξ, :all) +function reference_shape_hessian_gradient_and_value(ip::Interpolation, ξ::Vec, i::Int) + return hessian(x -> reference_shape_value(ip, x, i), ξ, :all) end -=# + """ reference_coordinates(ip::Interpolation) @@ -299,7 +254,7 @@ and indices corresponding to the indices of a dof in [`vertices`](@ref), [`faces [`edges`](@ref). Only required for nodal interpolations. - + TODO: Separate nodal and non-nodal interpolations. """ reference_coordinates(::Interpolation) @@ -401,24 +356,24 @@ edge dofs are included here. The dofs appearing in the tuple must be continuous and increasing! The first dof must be the computed via "last edge interior dof index + 1", if face dofs exist. """ -facedof_interior_indices(::Interpolation) +facedof_interior_indices(::Interpolation) """ - celldof_interior_indices(ip::Interpolation) + volumedof_interior_indices(ip::Interpolation) -Tuple containing the dof indices associated with the interior of the cell. +Tuple containing the dof indices associated with the interior of a volume. !!! note - The dofs appearing in the tuple must be continuous and increasing! Celldofs are + The dofs appearing in the tuple must be continuous and increasing, volumedofs are enumerated last. """ -celldof_interior_indices(::Interpolation) = () +volumedof_interior_indices(::Interpolation) = () # Some helpers to skip boilerplate -edgedof_indices(ip::InterpolationByDim{3}) = ntuple(_ -> (), nedges(ip)) -edgedof_interior_indices(ip::InterpolationByDim{3}) = ntuple(_ -> (), nedges(ip)) -facedof_indices(ip::Union{InterpolationByDim{2}, InterpolationByDim{3}}) = ntuple(_ -> (), nfaces(ip)) -facedof_interior_indices(ip::Union{InterpolationByDim{2}, InterpolationByDim{3}}) = ntuple(_ -> (), nfaces(ip)) +edgedof_indices(ip::Interpolation) = ntuple(_ -> (), nedges(ip)) +edgedof_interior_indices(ip::Interpolation) = ntuple(_ -> (), nedges(ip)) +facedof_indices(ip::Interpolation) = ntuple(_ -> (), nfaces(ip)) +facedof_interior_indices(ip::Interpolation) = ntuple(_ -> (), nfaces(ip)) """ boundarydof_indices(::Type{<:BoundaryIndex}) @@ -427,9 +382,23 @@ Helper function to generically dispatch on the correct dof sets of a boundary en """ boundarydof_indices(::Type{<:BoundaryIndex}) -boundarydof_indices(::Type{FaceIndex}) = Ferrite.facedof_indices -boundarydof_indices(::Type{EdgeIndex}) = Ferrite.edgedof_indices -boundarydof_indices(::Type{VertexIndex}) = Ferrite.vertexdof_indices +boundarydof_indices(::Type{FaceIndex}) = facedof_indices +boundarydof_indices(::Type{EdgeIndex}) = edgedof_indices +boundarydof_indices(::Type{VertexIndex}) = vertexdof_indices + +facetdof_indices(ip::InterpolationByDim{3}) = facedof_indices(ip) +facetdof_indices(ip::InterpolationByDim{2}) = edgedof_indices(ip) +facetdof_indices(ip::InterpolationByDim{1}) = vertexdof_indices(ip) +facetdof_interior_indices(ip::InterpolationByDim{3}) = facedof_interior_indices(ip) +facetdof_interior_indices(ip::InterpolationByDim{2}) = edgedof_interior_indices(ip) +facetdof_interior_indices(ip::InterpolationByDim{1}) = ntuple(_ -> (), nvertices(ip)) +dirichlet_facetdof_indices(ip::InterpolationByDim{3}) = dirichlet_facedof_indices(ip) +dirichlet_facetdof_indices(ip::InterpolationByDim{2}) = dirichlet_edgedof_indices(ip) +dirichlet_facetdof_indices(ip::InterpolationByDim{1}) = dirichlet_vertexdof_indices(ip) + +nfacets(ip::InterpolationByDim{3}) = nfaces(ip) +nfacets(ip::InterpolationByDim{2}) = nedges(ip) +nfacets(ip::InterpolationByDim{1}) = nvertices(ip) """ is_discontinuous(::Interpolation) @@ -448,9 +417,10 @@ Used internally in [`ConstraintHandler`](@ref) and defaults to [`boundarydof_ind """ dirichlet_boundarydof_indices(::Type{<:BoundaryIndex}) -dirichlet_boundarydof_indices(::Type{FaceIndex}) = Ferrite.dirichlet_facedof_indices -dirichlet_boundarydof_indices(::Type{EdgeIndex}) = Ferrite.dirichlet_edgedof_indices -dirichlet_boundarydof_indices(::Type{VertexIndex}) = Ferrite.dirichlet_vertexdof_indices +dirichlet_boundarydof_indices(::Type{FaceIndex}) = dirichlet_facedof_indices +dirichlet_boundarydof_indices(::Type{EdgeIndex}) = dirichlet_edgedof_indices +dirichlet_boundarydof_indices(::Type{VertexIndex}) = dirichlet_vertexdof_indices +dirichlet_boundarydof_indices(::Type{FacetIndex}) = dirichlet_facetdof_indices ######################### # DiscontinuousLagrange # @@ -473,7 +443,7 @@ getnbasefunctions(::DiscontinuousLagrange{shape,order}) where {shape,order} = ge getnbasefunctions(::DiscontinuousLagrange{shape,0}) where {shape} = 1 # This just moves all dofs into the interior of the element. -celldof_interior_indices(ip::DiscontinuousLagrange) = ntuple(i->i, getnbasefunctions(ip)) +volumedof_interior_indices(ip::DiscontinuousLagrange) = ntuple(i->i, getnbasefunctions(ip)) # Mirror the Lagrange element for now to avoid repeating. dirichlet_facedof_indices(ip::DiscontinuousLagrange{shape, order}) where {shape, order} = dirichlet_facedof_indices(Lagrange{shape, order}()) @@ -484,8 +454,8 @@ dirichlet_vertexdof_indices(ip::DiscontinuousLagrange{shape, order}) where {shap function reference_coordinates(ip::DiscontinuousLagrange{shape, order}) where {shape, order} return reference_coordinates(Lagrange{shape,order}()) end -function shape_value(::DiscontinuousLagrange{shape, order}, ξ::Vec{dim}, i::Int) where {dim, shape <: AbstractRefShape{dim}, order} - return shape_value(Lagrange{shape, order}(), ξ, i) +function reference_shape_value(::DiscontinuousLagrange{shape, order}, ξ::Vec{dim}, i::Int) where {dim, shape <: AbstractRefShape{dim}, order} + return reference_shape_value(Lagrange{shape, order}(), ξ, i) end # Excepting the L0 element. @@ -501,7 +471,7 @@ function reference_coordinates(ip::DiscontinuousLagrange{RefTetrahedron,0}) return [Vec{3,Float64}((1/4,1/4,1/4))] end -function shape_value(ip::DiscontinuousLagrange{shape, 0}, ::Vec{dim, T}, i::Int) where {dim, shape <: AbstractRefShape{dim}, T} +function reference_shape_value(ip::DiscontinuousLagrange{shape, 0}, ::Vec{dim, T}, i::Int) where {dim, shape <: AbstractRefShape{dim}, T} i > 1 && throw(ArgumentError("no shape function $i for interpolation $ip")) return one(T) end @@ -511,6 +481,11 @@ is_discontinuous(::Type{<:DiscontinuousLagrange}) = true ############ # Lagrange # ############ +""" + Lagrange{refshape, order} <: ScalarInterpolation + +Standard continuous Lagrange polynomials with equidistant node placement. +""" struct Lagrange{shape, order, unused} <: ScalarInterpolation{shape, order} function Lagrange{shape, order}() where {shape <: AbstractRefShape, order} new{shape, order, Nothing}() @@ -538,12 +513,14 @@ getlowerorder(::Lagrange{shape,1}) where {shape} = DiscontinuousLagrange{shape,0 ############################ getnbasefunctions(::Lagrange{RefLine,1}) = 2 +edgedof_indices(::Lagrange{RefLine,1}) = ((1,2),) + function reference_coordinates(::Lagrange{RefLine,1}) return [Vec{1, Float64}((-1.0,)), Vec{1, Float64}(( 1.0,))] end -function shape_value(ip::Lagrange{RefLine, 1}, ξ::Vec{1}, i::Int) +function reference_shape_value(ip::Lagrange{RefLine, 1}, ξ::Vec{1}, i::Int) ξ_x = ξ[1] i == 1 && return (1 - ξ_x) / 2 i == 2 && return (1 + ξ_x) / 2 @@ -555,8 +532,8 @@ end ############################ getnbasefunctions(::Lagrange{RefLine,2}) = 3 -facedof_indices(::Lagrange{RefLine,2}) = ((1,), (2,)) -celldof_interior_indices(::Lagrange{RefLine,2}) = (3,) +edgedof_indices(::Lagrange{RefLine,2}) = ((1,2,3),) +edgedof_interior_indices(::Lagrange{RefLine,2}) = (3,) function reference_coordinates(::Lagrange{RefLine,2}) return [Vec{1, Float64}((-1.0,)), @@ -564,7 +541,7 @@ function reference_coordinates(::Lagrange{RefLine,2}) Vec{1, Float64}(( 0.0,))] end -function shape_value(ip::Lagrange{RefLine, 2}, ξ::Vec{1}, i::Int) +function reference_shape_value(ip::Lagrange{RefLine, 2}, ξ::Vec{1}, i::Int) ξ_x = ξ[1] i == 1 && return ξ_x * (ξ_x - 1) / 2 i == 2 && return ξ_x * (ξ_x + 1) / 2 @@ -577,7 +554,8 @@ end ##################################### getnbasefunctions(::Lagrange{RefQuadrilateral,1}) = 4 -facedof_indices(::Lagrange{RefQuadrilateral,1}) = ((1,2), (2,3), (3,4), (4,1)) +edgedof_indices(::Lagrange{RefQuadrilateral,1}) = ((1,2), (2,3), (3,4), (4,1)) +facedof_indices(ip::Lagrange{RefQuadrilateral,1}) = (ntuple(i->i, getnbasefunctions(ip)),) function reference_coordinates(::Lagrange{RefQuadrilateral,1}) return [Vec{2, Float64}((-1.0, -1.0)), @@ -586,7 +564,7 @@ function reference_coordinates(::Lagrange{RefQuadrilateral,1}) Vec{2, Float64}((-1.0, 1.0,))] end -function shape_value(ip::Lagrange{RefQuadrilateral, 1}, ξ::Vec{2}, i::Int) +function reference_shape_value(ip::Lagrange{RefQuadrilateral, 1}, ξ::Vec{2}, i::Int) ξ_x = ξ[1] ξ_y = ξ[2] i == 1 && return (1 - ξ_x) * (1 - ξ_y) / 4 @@ -601,9 +579,10 @@ end ##################################### getnbasefunctions(::Lagrange{RefQuadrilateral,2}) = 9 -facedof_indices(::Lagrange{RefQuadrilateral,2}) = ((1,2, 5), (2,3, 6), (3,4, 7), (4,1, 8)) -facedof_interior_indices(::Lagrange{RefQuadrilateral,2}) = ((5,), (6,), (7,), (8,)) -celldof_interior_indices(::Lagrange{RefQuadrilateral,2}) = (9,) +edgedof_indices(::Lagrange{RefQuadrilateral,2}) = ((1,2, 5), (2,3, 6), (3,4, 7), (4,1, 8)) +edgedof_interior_indices(::Lagrange{RefQuadrilateral,2}) = ((5,), (6,), (7,), (8,)) +facedof_indices(ip::Lagrange{RefQuadrilateral,2}) = (ntuple(i->i, getnbasefunctions(ip)),) +facedof_interior_indices(::Lagrange{RefQuadrilateral,2}) = ((9,)) function reference_coordinates(::Lagrange{RefQuadrilateral,2}) return [Vec{2, Float64}((-1.0, -1.0)), @@ -617,7 +596,7 @@ function reference_coordinates(::Lagrange{RefQuadrilateral,2}) Vec{2, Float64}(( 0.0, 0.0))] end -function shape_value(ip::Lagrange{RefQuadrilateral, 2}, ξ::Vec{2}, i::Int) +function reference_shape_value(ip::Lagrange{RefQuadrilateral, 2}, ξ::Vec{2}, i::Int) ξ_x = ξ[1] ξ_y = ξ[2] i == 1 && return (ξ_x^2 - ξ_x) * (ξ_y^2 - ξ_y) / 4 @@ -637,9 +616,10 @@ end ##################################### getnbasefunctions(::Lagrange{RefQuadrilateral, 3}) = 16 -facedof_indices(::Lagrange{RefQuadrilateral, 3}) = ((1,2, 5,6), (2,3, 7,8), (3,4, 9,10), (4,1, 11,12)) -facedof_interior_indices(::Lagrange{RefQuadrilateral, 3}) = ((5,6), (7,8), (9,10), (11,12)) -celldof_interior_indices(::Lagrange{RefQuadrilateral, 3}) = (13,14,15,16) +edgedof_indices(::Lagrange{RefQuadrilateral, 3}) = ((1,2, 5,6), (2,3, 7,8), (3,4, 9,10), (4,1, 11,12)) +edgedof_interior_indices(::Lagrange{RefQuadrilateral, 3}) = ((5,6), (7,8), (9,10), (11,12)) +facedof_indices(ip::Lagrange{RefQuadrilateral,3}) = (ntuple(i->i, getnbasefunctions(ip)),) +facedof_interior_indices(::Lagrange{RefQuadrilateral, 3}) = ((13,14,15,16,),) function reference_coordinates(::Lagrange{RefQuadrilateral, 3}) return [Vec{2, Float64}((-1.0, -1.0)), @@ -660,7 +640,7 @@ function reference_coordinates(::Lagrange{RefQuadrilateral, 3}) Vec{2, Float64}(( 1/3, 1/3))] end -function shape_value(ip::Lagrange{RefQuadrilateral, 3}, ξ::Vec{2}, i::Int) +function reference_shape_value(ip::Lagrange{RefQuadrilateral, 3}, ξ::Vec{2}, i::Int) # See https://defelement.com/elements/examples/quadrilateral-Q-3.html # Transform domain from [-1, 1] × [-1, 1] to [0, 1] × [0, 1] ξ_x = (ξ[1]+1)/2 @@ -689,7 +669,8 @@ end ################################ getnbasefunctions(::Lagrange{RefTriangle,1}) = 3 -facedof_indices(::Lagrange{RefTriangle,1}) = ((1,2), (2,3), (3,1)) +edgedof_indices(::Lagrange{RefTriangle,1}) = ((1,2), (2,3), (3,1)) +facedof_indices(ip::Lagrange{RefTriangle,1}) = (ntuple(i->i, getnbasefunctions(ip)),) function reference_coordinates(::Lagrange{RefTriangle,1}) return [Vec{2, Float64}((1.0, 0.0)), @@ -697,7 +678,7 @@ function reference_coordinates(::Lagrange{RefTriangle,1}) Vec{2, Float64}((0.0, 0.0))] end -function shape_value(ip::Lagrange{RefTriangle, 1}, ξ::Vec{2}, i::Int) +function reference_shape_value(ip::Lagrange{RefTriangle, 1}, ξ::Vec{2}, i::Int) ξ_x = ξ[1] ξ_y = ξ[2] i == 1 && return ξ_x @@ -711,8 +692,9 @@ end ################################ getnbasefunctions(::Lagrange{RefTriangle,2}) = 6 -facedof_indices(::Lagrange{RefTriangle,2}) = ((1,2,4), (2,3,5), (3,1,6)) -facedof_interior_indices(::Lagrange{RefTriangle,2}) = ((4,), (5,), (6,)) +edgedof_indices(::Lagrange{RefTriangle,2}) = ((1,2,4), (2,3,5), (3,1,6)) +edgedof_interior_indices(::Lagrange{RefTriangle,2}) = ((4,), (5,), (6,)) +facedof_indices(ip::Lagrange{RefTriangle,2}) = (ntuple(i->i, getnbasefunctions(ip)),) function reference_coordinates(::Lagrange{RefTriangle,2}) return [Vec{2, Float64}((1.0, 0.0)), @@ -723,7 +705,7 @@ function reference_coordinates(::Lagrange{RefTriangle,2}) Vec{2, Float64}((0.5, 0.0))] end -function shape_value(ip::Lagrange{RefTriangle, 2}, ξ::Vec{2}, i::Int) +function reference_shape_value(ip::Lagrange{RefTriangle, 2}, ξ::Vec{2}, i::Int) ξ_x = ξ[1] ξ_y = ξ[2] γ = 1 - ξ_x - ξ_y @@ -761,7 +743,7 @@ const permdof2DLagrange2Tri345 = Dict{Int,Vector{Int}}( 5 => [6, 21, 1, 11, 15, 18, 20, 19, 16, 12, 7, 2, 3, 4, 5, 8, 9, 10, 13, 14, 17], ) -function facedof_indices(ip::Lagrange2Tri345) +function edgedof_indices(ip::Lagrange2Tri345) order = getorder(ip) order == 1 && return ((1,2), (2,3), (3,1)) order == 2 && return ((1,2,4), (2,3,5), (3,1,6)) @@ -772,7 +754,7 @@ function facedof_indices(ip::Lagrange2Tri345) throw(ArgumentError("Unsupported order $order for Lagrange on triangles.")) end -function facedof_interior_indices(ip::Lagrange2Tri345) +function edgedof_interior_indices(ip::Lagrange2Tri345) order = getorder(ip) order == 1 && return ((), (), ()) order == 2 && return ((4,), (5,), (6,)) @@ -782,11 +764,13 @@ function facedof_interior_indices(ip::Lagrange2Tri345) throw(ArgumentError("Unsupported order $order for Lagrange on triangles.")) end -function celldof_interior_indices(ip::Lagrange2Tri345) +facedof_indices(ip::Lagrange2Tri345) = (ntuple(i->i, getnbasefunctions(ip)),) + +function facedof_interior_indices(ip::Lagrange2Tri345) order = getorder(ip) ncellintdofs = (order + 1) * (order + 2) ÷ 2 - 3 * order totaldofs = getnbasefunctions(ip) - return ntuple(i->totaldofs-ncellintdofs+i, ncellintdofs) + return (ntuple(i->totaldofs-ncellintdofs+i, ncellintdofs),) end function reference_coordinates(ip::Lagrange2Tri345) @@ -800,7 +784,7 @@ function reference_coordinates(ip::Lagrange2Tri345) return permute!(coordpts, permdof2DLagrange2Tri345[order]) end -function shape_value(ip::Lagrange2Tri345, ξ::Vec{2}, i::Int) +function reference_shape_value(ip::Lagrange2Tri345, ξ::Vec{2}, i::Int) if !(0 < i <= getnbasefunctions(ip)) throw(ArgumentError("no shape function $i for interpolation $ip")) end @@ -846,7 +830,7 @@ function reference_coordinates(::Lagrange{RefTetrahedron,1}) Vec{3, Float64}((0.0, 0.0, 1.0))] end -function shape_value(ip::Lagrange{RefTetrahedron, 1}, ξ::Vec{3}, i::Int) +function reference_shape_value(ip::Lagrange{RefTetrahedron, 1}, ξ::Vec{3}, i::Int) ξ_x = ξ[1] ξ_y = ξ[2] ξ_z = ξ[3] @@ -881,7 +865,7 @@ end # http://www.colorado.edu/engineering/CAS/courses.d/AFEM.d/AFEM.Ch09.d/AFEM.Ch09.pdf # http://www.colorado.edu/engineering/CAS/courses.d/AFEM.d/AFEM.Ch10.d/AFEM.Ch10.pdf -function shape_value(ip::Lagrange{RefTetrahedron, 2}, ξ::Vec{3}, i::Int) +function reference_shape_value(ip::Lagrange{RefTetrahedron, 2}, ξ::Vec{3}, i::Int) ξ_x = ξ[1] ξ_y = ξ[2] ξ_z = ξ[3] @@ -917,7 +901,7 @@ function reference_coordinates(::Lagrange{RefHexahedron,1}) Vec{3, Float64}((-1.0, 1.0, 1.0))] end -function shape_value(ip::Lagrange{RefHexahedron, 1}, ξ::Vec{3}, i::Int) +function reference_shape_value(ip::Lagrange{RefHexahedron, 1}, ξ::Vec{3}, i::Int) ξ_x = ξ[1] ξ_y = ξ[2] ξ_z = ξ[3] @@ -969,7 +953,7 @@ edgedof_interior_indices(::Lagrange{RefHexahedron,2}) = ( (9,), (10,), (11,), (12,), (13,), (14,), (15,), (16,), (17), (18,), (19,), (20,) ) -celldof_interior_indices(::Lagrange{RefHexahedron,2}) = (27,) +volumedof_interior_indices(::Lagrange{RefHexahedron,2}) = (27,) function reference_coordinates(::Lagrange{RefHexahedron,2}) # vertex @@ -1005,7 +989,7 @@ function reference_coordinates(::Lagrange{RefHexahedron,2}) ] end -function shape_value(ip::Lagrange{RefHexahedron, 2}, ξ::Vec{3, T}, i::Int) where {T} +function reference_shape_value(ip::Lagrange{RefHexahedron, 2}, ξ::Vec{3, T}, i::Int) where {T} # Some local helpers. @inline φ₁(x::T) = -x*(1-x)/2 @inline φ₂(x::T) = (1+x)*(1-x) @@ -1064,7 +1048,7 @@ function reference_coordinates(::Lagrange{RefPrism,1}) Vec{3, Float64}((0.0, 1.0, 1.0))] end -function shape_value(ip::Lagrange{RefPrism,1}, ξ::Vec{3}, i::Int) +function reference_shape_value(ip::Lagrange{RefPrism,1}, ξ::Vec{3}, i::Int) (x,y,z) = ξ i == 1 && return 1-x-y -z*(1-x-y) i == 2 && return x*(1-z) @@ -1083,19 +1067,19 @@ end getnbasefunctions(::Lagrange{RefPrism,2}) = 18 facedof_indices(::Lagrange{RefPrism,2}) = ( - #Vertices| Edges | Face + #Vertices| Edges | Face (1,3,2 , 8,10,7 ), - (1,2,5,4, 7,11,13,9, 16), + (1,2,5,4, 7,11,13,9, 16), (3,1,4,6, 8,9,14,12, 17), (2,3,6,5, 10,12,15,11, 18), (4,5,6 , 13,15,14 ), ) facedof_interior_indices(::Lagrange{RefPrism,2}) = ( - #Vertices| Edges | Face - (), - (16,), - (17,), - (18,), + #Vertices| Edges | Face + (), + (16,), + (17,), + (18,), (), ) edgedof_indices(::Lagrange{RefPrism,2}) = ( @@ -1144,7 +1128,7 @@ function reference_coordinates(::Lagrange{RefPrism,2}) Vec{3, Float64}((1/2, 1/2, 1/2)),] end -function shape_value(ip::Lagrange{RefPrism, 2}, ξ::Vec{3}, i::Int) +function reference_shape_value(ip::Lagrange{RefPrism, 2}, ξ::Vec{3}, i::Int) (x,y,z) = ξ x² = x*x y² = y*y @@ -1177,7 +1161,7 @@ end getnbasefunctions(::Lagrange{RefPyramid,1}) = 5 facedof_indices(::Lagrange{RefPyramid,1}) = ((1,3,4,2), (1,2,5), (1,5,3), (2,4,5), (3,5,4), ) edgedof_indices(::Lagrange{RefPyramid,1}) = ((1,2), (1,3), (1,5), (2,4), (2,5), (4,3), (3,5), (4,5)) - + function reference_coordinates(::Lagrange{RefPyramid,1}) return [Vec{3, Float64}((0.0, 0.0, 0.0)), Vec{3, Float64}((1.0, 0.0, 0.0)), @@ -1186,7 +1170,7 @@ function reference_coordinates(::Lagrange{RefPyramid,1}) Vec{3, Float64}((0.0, 0.0, 1.0))] end -function shape_value(ip::Lagrange{RefPyramid,1}, ξ::Vec{3,T}, i::Int) where T +function reference_shape_value(ip::Lagrange{RefPyramid,1}, ξ::Vec{3,T}, i::Int) where T (x,y,z) = ξ zzero = z ≈ one(T) i == 1 && return zzero ? zero(T) : (-x*y+(z-1)*(-x-y-z+1))/(z-1) @@ -1203,28 +1187,28 @@ end getnbasefunctions(::Lagrange{RefPyramid,2}) = 14 facedof_indices(::Lagrange{RefPyramid,2}) = ( - #Vertices | Edges | Face - (1,3,4,2, 7,11,9,6, 14), - (1,2,5 , 6,10,8 ), - (1,5,3 , 7,12,8 ), - (2,4,5 , 9,13,10 ), - (3,5,4 , 12,13,11 ), + #Vertices | Edges | Face + (1,3,4,2, 7,11,9,6, 14), + (1,2,5 , 6,10,8 ), + (1,5,3 , 7,12,8 ), + (2,4,5 , 9,13,10 ), + (3,5,4 , 12,13,11 ), ) facedof_interior_indices(::Lagrange{RefPyramid,2}) = ( - (14,), - (), - (), - (), + (14,), + (), + (), + (), (), ) edgedof_indices(::Lagrange{RefPyramid,2}) = ( - (1,2,6), - (1,3,7), - (1,5,8), - (2,4,9), - (2,5,10), - (4,3,11), - (3,5,12), + (1,2,6), + (1,3,7), + (1,5,8), + (2,4,9), + (2,5,10), + (4,3,11), + (3,5,12), (4,5,13) ) edgedof_interior_indices(::Lagrange{RefPyramid,2}) = ( @@ -1256,7 +1240,7 @@ function reference_coordinates(::Lagrange{RefPyramid,2}) Vec{3, Float64}((0.5, 0.5, 0.0))] end -function shape_value(ip::Lagrange{RefPyramid,2}, ξ::Vec{3,T}, i::Int) where T +function reference_shape_value(ip::Lagrange{RefPyramid,2}, ξ::Vec{3,T}, i::Int) where T (x,y,z) = ξ x² = x*x y² = y*y @@ -1299,8 +1283,9 @@ getnbasefunctions(::BubbleEnrichedLagrange{RefTriangle,1}) = 4 adjust_dofs_during_distribution(::BubbleEnrichedLagrange{RefTriangle,1}) = false vertexdof_indices(::BubbleEnrichedLagrange{RefTriangle,1}) = ((1,), (2,), (3,)) -facedof_indices(::BubbleEnrichedLagrange{RefTriangle,1}) = ((1,2), (2,3), (3,1)) -celldof_interior_indices(::BubbleEnrichedLagrange{RefTriangle,1}) = (4,) +edgedof_indices(::BubbleEnrichedLagrange{RefTriangle,1}) = ((1,2), (2,3), (3,1)) +facedof_indices(ip::BubbleEnrichedLagrange{RefTriangle,1}) = (ntuple(i->i, getnbasefunctions(ip)),) +facedof_interior_indices(::BubbleEnrichedLagrange{RefTriangle,1}) = ((4,),) function reference_coordinates(::BubbleEnrichedLagrange{RefTriangle,1}) return [Vec{2, Float64}((1.0, 0.0)), @@ -1309,7 +1294,7 @@ function reference_coordinates(::BubbleEnrichedLagrange{RefTriangle,1}) Vec{2, Float64}((1/3, 1/3)),] end -function shape_value(ip::BubbleEnrichedLagrange{RefTriangle, 1}, ξ::Vec{2}, i::Int) +function reference_shape_value(ip::BubbleEnrichedLagrange{RefTriangle, 1}, ξ::Vec{2}, i::Int) ξ_x = ξ[1] ξ_y = ξ[2] i == 1 && return ξ_x*(9ξ_y^2 + 9ξ_x*ξ_y - 9ξ_y + 1) @@ -1322,13 +1307,18 @@ end ############### # Serendipity # ############### +""" + Serendipity{refshape, order} <: ScalarInterpolation + +Serendipity element on hypercubes. Currently only second order variants are implemented. +""" struct Serendipity{shape, order, unused} <: ScalarInterpolation{shape,order} function Serendipity{shape, order}() where {shape <: AbstractRefShape, order} new{shape, order, Nothing}() end end -# Note that the edgedofs for high order serendipity elements are defined in terms of integral moments, +# Note that the edgedofs for high order serendipity elements are defined in terms of integral moments, # so no permutation exists in general. See e.g. Scroggs et al. [2022] for an example. # adjust_dofs_during_distribution(::Serendipity{refshape, order}) where {refshape, order} = false adjust_dofs_during_distribution(::Serendipity{<:Any, 2}) = false @@ -1344,8 +1334,9 @@ vertexdof_indices(::Serendipity{RefHexahedron}) = ((1,),(2,),(3,),(4,),(5,),(6,) getnbasefunctions(::Serendipity{RefQuadrilateral,2}) = 8 getlowerorder(::Serendipity{RefQuadrilateral,2}) = Lagrange{RefQuadrilateral,1}() -facedof_indices(::Serendipity{RefQuadrilateral,2}) = ((1,2,5), (2,3,6), (3,4,7), (4,1,8)) -facedof_interior_indices(::Serendipity{RefQuadrilateral,2}) = ((5,), (6,), (7,), (8,)) +edgedof_indices(::Serendipity{RefQuadrilateral,2}) = ((1,2,5), (2,3,6), (3,4,7), (4,1,8)) +edgedof_interior_indices(::Serendipity{RefQuadrilateral,2}) = ((5,), (6,), (7,), (8,)) +facedof_indices(ip::Serendipity{RefQuadrilateral,2}) = (ntuple(i->i, getnbasefunctions(ip)),) function reference_coordinates(::Serendipity{RefQuadrilateral,2}) return [Vec{2, Float64}((-1.0, -1.0)), @@ -1358,7 +1349,7 @@ function reference_coordinates(::Serendipity{RefQuadrilateral,2}) Vec{2, Float64}((-1.0, 0.0))] end -function shape_value(ip::Serendipity{RefQuadrilateral,2}, ξ::Vec{2}, i::Int) +function reference_shape_value(ip::Serendipity{RefQuadrilateral,2}, ξ::Vec{2}, i::Int) ξ_x = ξ[1] ξ_y = ξ[2] i == 1 && return (1 - ξ_x) * (1 - ξ_y) * (-ξ_x - ξ_y - 1) / 4 @@ -1430,18 +1421,18 @@ function reference_coordinates(::Serendipity{RefHexahedron,2}) end # Inlined to resolve the recursion properly -@inline function shape_value(ip::Serendipity{RefHexahedron, 2}, ξ::Vec{3}, i::Int) +@inline function reference_shape_value(ip::Serendipity{RefHexahedron, 2}, ξ::Vec{3}, i::Int) ξ_x = ξ[1] ξ_y = ξ[2] ξ_z = ξ[3] - i == 1 && return (1 - ξ_x) * (1 - ξ_y) * (1 - ξ_z) / 8 - (shape_value(ip, ξ, 12) + shape_value(ip, ξ, 9) + shape_value(ip, ξ, 17)) / 2 - i == 2 && return (1 + ξ_x) * (1 - ξ_y) * (1 - ξ_z) / 8 - (shape_value(ip, ξ, 9) + shape_value(ip, ξ, 10) + shape_value(ip, ξ, 18)) / 2 - i == 3 && return (1 + ξ_x) * (1 + ξ_y) * (1 - ξ_z) / 8 - (shape_value(ip, ξ, 10) + shape_value(ip, ξ, 11) + shape_value(ip, ξ, 19)) / 2 - i == 4 && return (1 - ξ_x) * (1 + ξ_y) * (1 - ξ_z) / 8 - (shape_value(ip, ξ, 11) + shape_value(ip, ξ, 12) + shape_value(ip, ξ, 20)) / 2 - i == 5 && return (1 - ξ_x) * (1 - ξ_y) * (1 + ξ_z) / 8 - (shape_value(ip, ξ, 16) + shape_value(ip, ξ, 13) + shape_value(ip, ξ, 17)) / 2 - i == 6 && return (1 + ξ_x) * (1 - ξ_y) * (1 + ξ_z) / 8 - (shape_value(ip, ξ, 13) + shape_value(ip, ξ, 14) + shape_value(ip, ξ, 18)) / 2 - i == 7 && return (1 + ξ_x) * (1 + ξ_y) * (1 + ξ_z) / 8 - (shape_value(ip, ξ, 14) + shape_value(ip, ξ, 15) + shape_value(ip, ξ, 19)) / 2 - i == 8 && return (1 - ξ_x) * (1 + ξ_y) * (1 + ξ_z) / 8 - (shape_value(ip, ξ, 15) + shape_value(ip, ξ, 16) + shape_value(ip, ξ, 20)) / 2 + i == 1 && return (1 - ξ_x) * (1 - ξ_y) * (1 - ξ_z) / 8 - (reference_shape_value(ip, ξ, 12) + reference_shape_value(ip, ξ, 9) + reference_shape_value(ip, ξ, 17)) / 2 + i == 2 && return (1 + ξ_x) * (1 - ξ_y) * (1 - ξ_z) / 8 - (reference_shape_value(ip, ξ, 9) + reference_shape_value(ip, ξ, 10) + reference_shape_value(ip, ξ, 18)) / 2 + i == 3 && return (1 + ξ_x) * (1 + ξ_y) * (1 - ξ_z) / 8 - (reference_shape_value(ip, ξ, 10) + reference_shape_value(ip, ξ, 11) + reference_shape_value(ip, ξ, 19)) / 2 + i == 4 && return (1 - ξ_x) * (1 + ξ_y) * (1 - ξ_z) / 8 - (reference_shape_value(ip, ξ, 11) + reference_shape_value(ip, ξ, 12) + reference_shape_value(ip, ξ, 20)) / 2 + i == 5 && return (1 - ξ_x) * (1 - ξ_y) * (1 + ξ_z) / 8 - (reference_shape_value(ip, ξ, 16) + reference_shape_value(ip, ξ, 13) + reference_shape_value(ip, ξ, 17)) / 2 + i == 6 && return (1 + ξ_x) * (1 - ξ_y) * (1 + ξ_z) / 8 - (reference_shape_value(ip, ξ, 13) + reference_shape_value(ip, ξ, 14) + reference_shape_value(ip, ξ, 18)) / 2 + i == 7 && return (1 + ξ_x) * (1 + ξ_y) * (1 + ξ_z) / 8 - (reference_shape_value(ip, ξ, 14) + reference_shape_value(ip, ξ, 15) + reference_shape_value(ip, ξ, 19)) / 2 + i == 8 && return (1 - ξ_x) * (1 + ξ_y) * (1 + ξ_z) / 8 - (reference_shape_value(ip, ξ, 15) + reference_shape_value(ip, ξ, 16) + reference_shape_value(ip, ξ, 20)) / 2 i == 9 && return (1 - ξ_x^2) * (1 - ξ_y) * (1 - ξ_z) / 4 i == 10 && return (1 + ξ_x) * (1 - ξ_y^2) * (1 - ξ_z) / 4 i == 11 && return (1 - ξ_x^2) * (1 + ξ_y) * (1 - ξ_z) / 4 @@ -1462,32 +1453,39 @@ end # Crouzeix–Raviart Elements # ############################# """ + CrouzeixRaviart{refshape, order} <: ScalarInterpolation + Classical non-conforming Crouzeix–Raviart element. -For details we refer to the original paper: -M. Crouzeix and P. Raviart. "Conforming and nonconforming finite element -methods for solving the stationary Stokes equations I." ESAIM: Mathematical Modelling -and Numerical Analysis-Modélisation Mathématique et Analyse Numérique 7.R3 (1973): 33-75. +For details we refer to the original paper [CroRav:1973:cnf](@cite). """ struct CrouzeixRaviart{shape, order, unused} <: ScalarInterpolation{shape, order} CrouzeixRaviart{RefTriangle, 1}() = new{RefTriangle, 1, Nothing}() + CrouzeixRaviart{RefTetrahedron, 1}() = new{RefTetrahedron, 1, Nothing}() end +# CR elements are characterized by not having vertex dofs +vertexdof_indices(ip::CrouzeixRaviart) = ntuple(i->(), nvertices(ip)) + +################################################# +# Non-conforming Crouzeix-Raviart dim 2 order 1 # +################################################# +getnbasefunctions(::CrouzeixRaviart{RefTriangle,1}) = 3 + adjust_dofs_during_distribution(::CrouzeixRaviart) = true adjust_dofs_during_distribution(::CrouzeixRaviart{<:Any, 1}) = false -getnbasefunctions(::CrouzeixRaviart) = 3 - -facedof_indices(::CrouzeixRaviart) = ((1,), (2,), (3,)) -facedof_interior_indices(::CrouzeixRaviart) = ((1,), (2,), (3,)) +edgedof_indices(::CrouzeixRaviart{RefTriangle,1}) = ((1,), (2,), (3,)) +edgedof_interior_indices(::CrouzeixRaviart{RefTriangle,1}) = ((1,), (2,), (3,)) +facedof_indices(ip::CrouzeixRaviart{RefTriangle,1}) = (ntuple(i->i, getnbasefunctions(ip)),) -function reference_coordinates(::CrouzeixRaviart) +function reference_coordinates(::CrouzeixRaviart{RefTriangle,1}) return [Vec{2, Float64}((0.5, 0.5)), Vec{2, Float64}((0.0, 0.5)), Vec{2, Float64}((0.5, 0.0))] end -function shape_value(ip::CrouzeixRaviart{RefTriangle, 1}, ξ::Vec{2}, i::Int) +function reference_shape_value(ip::CrouzeixRaviart{RefTriangle, 1}, ξ::Vec{2}, i::Int) ξ_x = ξ[1] ξ_y = ξ[2] i == 1 && return 2*ξ_x + 2*ξ_y - 1 @@ -1496,6 +1494,106 @@ function shape_value(ip::CrouzeixRaviart{RefTriangle, 1}, ξ::Vec{2}, i::Int) throw(ArgumentError("no shape function $i for interpolation $ip")) end +################################################# +# Non-conforming Crouzeix-Raviart dim 3 order 1 # +################################################# +getnbasefunctions(::CrouzeixRaviart{RefTetrahedron,1}) = 4 + +facedof_indices(::CrouzeixRaviart{RefTetrahedron,1}) = ((1,), (2,), (3,), (4,)) +facedof_interior_indices(::CrouzeixRaviart{RefTetrahedron,1}) = ((1,), (2,), (3,), (4,)) + +function reference_coordinates(::CrouzeixRaviart{RefTetrahedron,1}) + return [ + Vec{3, Float64}((1/3, 1/3, 0.0)), + Vec{3, Float64}((1/3, 0.0, 1/3)), + Vec{3, Float64}((1/3, 1/3, 1/3)), + Vec{3, Float64}((0.0, 1/3, 1/3)), + ] +end + +function reference_shape_value(ip::CrouzeixRaviart{RefTetrahedron,1}, ξ::Vec{3}, i::Int) + (x,y,z) = ξ + i == 1 && return 1 -3z + i == 2 && return 1 -3y + i == 3 && return 3x +3y +3z -2 + i == 4 && return 1 -3x + return throw(ArgumentError("no shape function $i for interpolation $ip")) +end + +""" + RannacherTurek{refshape, order} <: ScalarInterpolation + +Classical non-conforming Rannacher-Turek element. + +This element is basically the idea from Crouzeix and Raviart applied to +hypercubes. For details see the original paper [RanTur:1992:snq](@cite). +""" +struct RannacherTurek{shape,order} <: ScalarInterpolation{shape,order} end + +# CR-type elements are characterized by not having vertex dofs +vertexdof_indices(ip::RannacherTurek) = ntuple(i->(), nvertices(ip)) + +adjust_dofs_during_distribution(::RannacherTurek) = true +adjust_dofs_during_distribution(::RannacherTurek{<:Any, 1}) = false + +################################# +# Rannacher-Turek dim 2 order 1 # +################################# +getnbasefunctions(::RannacherTurek{RefQuadrilateral,1}) = 4 + +edgedof_indices(::RannacherTurek{RefQuadrilateral,1}) = ((1,), (2,), (3,), (4,)) +edgedof_interior_indices(::RannacherTurek{RefQuadrilateral,1}) = ((1,), (2,), (3,), (4,)) +facedof_indices(ip::RannacherTurek{RefQuadrilateral,1}) = (ntuple(i->i, getnbasefunctions(ip)),) + +function reference_coordinates(::RannacherTurek{RefQuadrilateral,1}) + return [Vec{2, Float64}(( 0.0, -1.0)), + Vec{2, Float64}(( 1.0, 0.0)), + Vec{2, Float64}(( 0.0, 1.0)), + Vec{2, Float64}((-1.0, 0.0))] +end + +function reference_shape_value(ip::RannacherTurek{RefQuadrilateral,1}, ξ::Vec{2,T}, i::Int) where T + (x,y) = ξ + + i == 1 && return -(x+1)^2/4 +(y+1)^2/4 +(x+1)/2 -(y+1) +T(3)/4 + i == 2 && return (x+1)^2/4 -(y+1)^2/4 +(y+1)/2 -T(1)/4 + i == 3 && return -(x+1)^2/4 +(y+1)^2/4 +(x+1)/2 -T(1)/4 + i == 4 && return (x+1)^2/4 -(y+1)^2/4 -(x+1) +(y+1)/2 +T(3)/4 + throw(ArgumentError("no shape function $i for interpolation $ip")) +end + +################################# +# Rannacher-Turek dim 3 order 1 # +################################# +getnbasefunctions(::RannacherTurek{RefHexahedron,1}) = 6 + +edgedof_indices(ip::RannacherTurek{RefHexahedron,1}) = ntuple(i->(), nedges(ip)) +edgedof_interior_indices(ip::RannacherTurek{RefHexahedron,1}) = ntuple(i->(), nedges(ip)) +facedof_indices(::RannacherTurek{RefHexahedron,1}) = ((1,), (2,), (3,), (4,), (5,), (6,)) +facedof_interior_indices(::RannacherTurek{RefHexahedron,1}) = ((1,), (2,), (3,), (4,), (5,), (6,)) + +function reference_coordinates(::RannacherTurek{RefHexahedron,1}) + return [Vec{3, Float64}(( 0.0, 0.0, -1.0)), + Vec{3, Float64}(( 0.0, -1.0, 0.0)), + Vec{3, Float64}(( 1.0, 0.0, 0.0)), + Vec{3, Float64}(( 0.0, 1.0, 0.0)), + Vec{3, Float64}((-1.0, 0.0, 0.0)), + Vec{3, Float64}(( 0.0, 0.0, 1.0)),] +end + +function reference_shape_value(ip::RannacherTurek{RefHexahedron,1}, ξ::Vec{3,T}, i::Int) where T + (x,y,z) = ξ + + i == 1 && return -2((x+1))^2/12 +1(x+1)/3 -2((y+1))^2/12 +1(y+1)/3 +4((z+1))^2/12 -7(z+1)/6 + T(2)/3 + i == 2 && return -2((x+1))^2/12 +1(x+1)/3 +4((y+1))^2/12 -7(y+1)/6 -2((z+1))^2/12 +1(z+1)/3 + T(2)/3 + i == 3 && return 4((x+1))^2/12 -1(x+1)/6 -2((y+1))^2/12 +1(y+1)/3 -2((z+1))^2/12 +1(z+1)/3 - T(1)/3 + i == 4 && return -2((x+1))^2/12 +1(x+1)/3 +4((y+1))^2/12 -1(y+1)/6 -2((z+1))^2/12 +1(z+1)/3 - T(1)/3 + i == 5 && return 4((x+1))^2/12 -7(x+1)/6 -2((y+1))^2/12 +1(y+1)/3 -2((z+1))^2/12 +1(z+1)/3 + T(2)/3 + i == 6 && return -2((x+1))^2/12 +1(x+1)/3 -2((y+1))^2/12 +1(y+1)/3 +4((z+1))^2/12 -1(z+1)/6 - T(1)/3 + + throw(ArgumentError("no shape function $i for interpolation $ip")) +end + ################################################## # VectorizedInterpolation{<:ScalarInterpolation} # ################################################## @@ -1527,37 +1625,79 @@ end get_n_copies(::VectorizedInterpolation{vdim}) where vdim = vdim InterpolationInfo(ip::VectorizedInterpolation) = InterpolationInfo(ip.ip, get_n_copies(ip)) +# Error when trying to get dof indicies from vectorized interpolations. +# Currently, this should only be done for the scalar interpolation. +function _entitydof_indices_vectorized_ip_error(f::Symbol) + throw(ArgumentError(string(f, " is not implemented for VectorizedInterpolations and should be called on the scalar base interpolation"))) +end +vertexdof_indices(::VectorizedInterpolation) = _entitydof_indices_vectorized_ip_error(:vertexdof_indices) +edgedof_indices(::VectorizedInterpolation) = _entitydof_indices_vectorized_ip_error(:edgedof_indices) +facedof_indices(::VectorizedInterpolation) = _entitydof_indices_vectorized_ip_error(:facedof_indices) +edgedof_interior_indices(::VectorizedInterpolation) = _entitydof_indices_vectorized_ip_error(:edgedof_interior_indices) +facedof_interior_indices(::VectorizedInterpolation) = _entitydof_indices_vectorized_ip_error(:facedof_interior_indices) +volumedof_interior_indices(::VectorizedInterpolation) = _entitydof_indices_vectorized_ip_error(:volumedof_interior_indices) + +get_base_interpolation(ip::Interpolation) = ip +get_base_interpolation(ip::VectorizedInterpolation) = ip.ip + function getnbasefunctions(ipv::VectorizedInterpolation{vdim}) where vdim return vdim * getnbasefunctions(ipv.ip) end -function shape_value(ipv::VectorizedInterpolation{vdim, shape}, ξ::Vec{refdim, T}, I::Int) where {vdim, refdim, shape <: AbstractRefShape{refdim}, T} +function reference_shape_value(ipv::VectorizedInterpolation{vdim, shape}, ξ::Vec{refdim, T}, I::Int) where {vdim, refdim, shape <: AbstractRefShape{refdim}, T} i0, c0 = divrem(I - 1, vdim) i = i0 + 1 c = c0 + 1 - v = shape_value(ipv.ip, ξ, i) + v = reference_shape_value(ipv.ip, ξ, i) return Vec{vdim, T}(j -> j == c ? v : zero(v)) end # vdim == refdim -function shape_gradient_and_value(ipv::VectorizedInterpolation{dim, shape}, ξ::Vec{dim}, I::Int) where {dim, shape <: AbstractRefShape{dim}} - return invoke(shape_gradient_and_value, Tuple{Interpolation, Vec, Int}, ipv, ξ, I) +function reference_shape_gradient_and_value(ipv::VectorizedInterpolation{dim, shape}, ξ::Vec{dim}, I::Int) where {dim, shape <: AbstractRefShape{dim}} + return invoke(reference_shape_gradient_and_value, Tuple{Interpolation, Vec, Int}, ipv, ξ, I) end # vdim != refdim -function shape_gradient_and_value(ipv::VectorizedInterpolation{vdim, shape}, ξ::V, I::Int) where {vdim, refdim, shape <: AbstractRefShape{refdim}, T, V <: Vec{refdim, T}} +function reference_shape_gradient_and_value(ipv::VectorizedInterpolation{vdim, shape}, ξ::V, I::Int) where {vdim, refdim, shape <: AbstractRefShape{refdim}, T, V <: Vec{refdim, T}} + tosvec(v::Vec) = SVector((v...,)) + tovec(sv::SVector) = Vec((sv...)) + val = reference_shape_value(ipv, ξ, I) + grad = ForwardDiff.jacobian(sv -> tosvec(reference_shape_value(ipv, tovec(sv), I)), tosvec(ξ)) + return grad, val +end + +# vdim == refdim +function reference_shape_hessian_gradient_and_value(ipv::VectorizedInterpolation{dim, shape}, ξ::Vec{dim}, I::Int) where {dim, shape <: AbstractRefShape{dim}} + return invoke(reference_shape_hessian_gradient_and_value, Tuple{Interpolation, Vec, Int}, ipv, ξ, I) +end +# vdim != refdim +function reference_shape_hessian_gradient_and_value(ipv::VectorizedInterpolation{vdim, shape}, ξ::V, I::Int) where {vdim, refdim, shape <: AbstractRefShape{refdim}, T, V <: Vec{refdim, T}} + _reference_shape_hessian_gradient_and_value_static_array(ipv, ξ, I) +end +function _reference_shape_hessian_gradient_and_value_static_array(ipv::VectorizedInterpolation{vdim, shape}, ξ::V, I::Int) where {vdim, refdim, shape <: AbstractRefShape{refdim}, T, V <: Vec{refdim, T}} # Load with dual numbers and compute the value - f = x -> shape_value(ipv, x, I) - ξd = Tensors._load(ξ, Tensors.Tag(f, V)) - value_grad = f(ξd) + f = x -> reference_shape_value(ipv, x, I) + ξd = Tensors._load(Tensors._load(ξ, ForwardDiff.Tag(f, V)), ForwardDiff.Tag(f, V)) + value_hess = f(ξd) # Extract the value and gradient - val = Vec{vdim, T}(i -> Tensors.value(value_grad[i])) + val = Vec{vdim, T}(i -> ForwardDiff.value(ForwardDiff.value(value_hess[i]))) grad = zero(MMatrix{vdim, refdim, T}) - for (i, vi) in pairs(value_grad) - p = Tensors.partials(vi) - for (j, pj) in pairs(p) - grad[i, j] = pj + hess = zero(MArray{Tuple{vdim, refdim, refdim}, T}) + for (i, vi) in pairs(value_hess) + hess_values = ForwardDiff.value(vi) + + hess_values_partials = ForwardDiff.partials(hess_values) + for (k, pk) in pairs(hess_values_partials) + grad[i, k] = pk + end + + hess_partials = ForwardDiff.partials(vi) + for (j, partial_j) in pairs(hess_partials) + hess_partials_partials = ForwardDiff.partials(partial_j) + for (k, pk) in pairs(hess_partials_partials) + hess[i, j, k] = pk + end end end - return SMatrix(grad), val + return SArray(hess), SMatrix(grad), val end reference_coordinates(ip::VectorizedInterpolation) = reference_coordinates(ip.ip) @@ -1570,7 +1710,7 @@ is_discontinuous(::Type{<:VectorizedInterpolation{<:Any, <:Any, <:Any, ip}}) whe Get the type of mapping from the reference cell to the real cell for an interpolation `ip`. Subtypes of `ScalarInterpolation` and `VectorizedInterpolation` return `IdentityMapping()`, but other non-scalar interpolations may request different -mapping types. +mapping types. """ function mapping_type end diff --git a/src/iterators.jl b/src/iterators.jl index b73a2bbd2e..b37721f4b3 100644 --- a/src/iterators.jl +++ b/src/iterators.jl @@ -33,46 +33,46 @@ cell. The cache is updated for a new cell by calling `reinit!(cache, cellid)` wh - `getnodes(cc)`: get the global node ids of the cell - `getcoordinates(cc)`: get the coordinates of the cell - `celldofs(cc)`: get the global dof ids of the cell - - `reinit!(fev, cc)`: reinitialize [`CellValues`](@ref) or [`FaceValues`](@ref) + - `reinit!(fev, cc)`: reinitialize [`CellValues`](@ref) or [`FacetValues`](@ref) See also [`CellIterator`](@ref). """ -struct CellCache{X,G<:AbstractGrid,DH<:Union{AbstractDofHandler,Nothing}} - flags::UpdateFlags - grid::G +mutable struct CellCache{X,G<:AbstractGrid,DH<:Union{AbstractDofHandler,Nothing}} + const flags::UpdateFlags + const grid::G # Pretty useless to store this since you have it already for the reinit! call, but # needed for the CellIterator(...) workflow since the user doesn't necessarily control # the loop order in the cell subset. - cellid::ScalarWrapper{Int} - nodes::Vector{Int} - coords::Vector{X} - dh::DH - dofs::Vector{Int} + cellid::Int + const nodes::Vector{Int} + const coords::Vector{X} + const dh::DH + const dofs::Vector{Int} end function CellCache(grid::Grid{dim,C,T}, flags::UpdateFlags=UpdateFlags()) where {dim,C,T} - N = nnodes_per_cell(grid) + N = nnodes_per_cell(grid, 1) # nodes and coords will be resized in `reinit!` nodes = zeros(Int, N) coords = zeros(Vec{dim,T}, N) - return CellCache(flags, grid, ScalarWrapper(-1), nodes, coords, nothing, Int[]) + return CellCache(flags, grid, -1, nodes, coords, nothing, Int[]) end function CellCache(dh::DofHandler{dim}, flags::UpdateFlags=UpdateFlags()) where {dim} - N = nnodes_per_cell(get_grid(dh)) + n = ndofs_per_cell(dh.subdofhandlers[1]) # dofs and coords will be resized in `reinit!` + N = nnodes_per_cell(get_grid(dh), 1) nodes = zeros(Int, N) coords = zeros(Vec{dim, get_coordinate_eltype(get_grid(dh))}, N) - n = ndofs_per_cell(dh) celldofs = zeros(Int, n) - return CellCache(flags, get_grid(dh), ScalarWrapper(-1), nodes, coords, dh, celldofs) + return CellCache(flags, get_grid(dh), -1, nodes, coords, dh, celldofs) end function CellCache(sdh::SubDofHandler, flags::UpdateFlags=UpdateFlags()) Tv = get_coordinate_type(sdh.dh.grid) - CellCache(flags, sdh.dh.grid, ScalarWrapper(-1), Int[], Tv[], sdh, Int[]) + CellCache(flags, sdh.dh.grid, -1, Int[], Tv[], sdh, Int[]) end function reinit!(cc::CellCache, i::Int) - cc.cellid[] = i + cc.cellid = i if cc.flags.nodes resize!(cc.nodes, nnodes_per_cell(cc.grid, i)) cellnodes!(cc.nodes, cc.grid, i) @@ -90,20 +90,19 @@ end # reinit! FEValues with CellCache reinit!(cv::CellValues, cc::CellCache) = reinit!(cv, cc.coords) -reinit!(fv::FaceValues, cc::CellCache, f::Int) = reinit!(fv, cc.coords, f) # TODO: Deprecate? +reinit!(fv::FacetValues, cc::CellCache, f::Int) = reinit!(fv, cc.coords, f) # TODO: Deprecate? # Accessor functions (TODO: Deprecate? We are so inconsistent with `getxx` vs `xx`...) getnodes(cc::CellCache) = cc.nodes getcoordinates(cc::CellCache) = cc.coords celldofs(cc::CellCache) = cc.dofs -cellid(cc::CellCache) = cc.cellid[] +cellid(cc::CellCache) = cc.cellid # TODO: This can definitely be deprecated celldofs!(v::Vector, cc::CellCache) = copyto!(v, cc.dofs) # celldofs!(v, cc.dh, cc.cellid[]) # TODO: These should really be replaced with something better... -nfaces(cc::CellCache) = nfaces(cc.grid.cells[cc.cellid[]]) -onboundary(cc::CellCache, face::Int) = cc.grid.boundary_matrix[face, cc.cellid[]] +nfacets(cc::CellCache) = nfacets(getcells(cc.grid, cc.cellid)) # TODO: Currently excluded from the docstring below. Should they be public? @@ -111,54 +110,53 @@ onboundary(cc::CellCache, face::Int) = cc.grid.boundary_matrix[face, cc.cellid[] # - `Ferrite.faceid(fc)`: get the current faceid (`faceindex(fc)[2]`) """ - FaceCache(grid::Grid) - FaceCache(dh::AbstractDofHandler) + FacetCache(grid::Grid) + FacetCache(dh::AbstractDofHandler) Create a cache object with pre-allocated memory for the nodes, coordinates, and dofs of a cell suitable for looping over *faces* in a grid. The cache is updated for a new face by -calling `reinit!(cache, fi::FaceIndex)`. +calling `reinit!(cache, fi::FacetIndex)`. -**Methods with `fc::FaceCache`** - - `reinit!(fc, fi)`: reinitialize the cache for face `fi::FaceIndex` +**Methods with `fc::FacetCache`** + - `reinit!(fc, fi)`: reinitialize the cache for face `fi::FacetIndex` - `cellid(fc)`: get the current cellid (`faceindex(fc)[1]`) - `getnodes(fc)`: get the global node ids of the *cell* - `getcoordinates(fc)`: get the coordinates of the *cell* - `celldofs(fc)`: get the global dof ids of the *cell* - - `reinit!(fv, fc)`: reinitialize [`FaceValues`](@ref) + - `reinit!(fv, fc)`: reinitialize [`FacetValues`](@ref) -See also [`FaceIterator`](@ref). +See also [`FacetIterator`](@ref). """ -struct FaceCache{CC<:CellCache} - cc::CC # const for julia > 1.8 - dofs::Vector{Int} # aliasing cc.dofs - current_faceid::ScalarWrapper{Int} +mutable struct FacetCache{CC<:CellCache} + const cc::CC # const for julia > 1.8 + const dofs::Vector{Int} # aliasing cc.dofs + current_facet_id::Int end -function FaceCache(args...) +function FacetCache(args...) cc = CellCache(args...) - FaceCache(cc, cc.dofs, ScalarWrapper(0)) + FacetCache(cc, cc.dofs, 0) end -function reinit!(fc::FaceCache, face::FaceIndex) - cellid, faceid = face +function reinit!(fc::FacetCache, facet::BoundaryIndex) + cellid, facetid = facet reinit!(fc.cc, cellid) - fc.current_faceid[] = faceid + fc.current_facet_id = facetid return nothing end # Delegate methods to the cell cache for op = (:getnodes, :getcoordinates, :cellid, :celldofs) @eval begin - function $op(fc::FaceCache, args...) + function $op(fc::FacetCache, args...) return $op(fc.cc, args...) end end end -# @inline faceid(fc::FaceCache) = fc.current_faceid[] -@inline celldofs!(v::Vector, fc::FaceCache) = celldofs!(v, fc.cc) -# @inline onboundary(fc::FaceCache) = onboundary(fc.cc, faceid(fc)) -# @inline faceindex(fc::FaceCache) = FaceIndex(cellid(fc), faceid(fc)) -@inline function reinit!(fv::FaceValues, fc::FaceCache) - reinit!(fv, fc.cc, fc.current_faceid[]) +# @inline faceid(fc::FacetCache) = fc.current_faceid[] +@inline celldofs!(v::Vector, fc::FacetCache) = celldofs!(v, fc.cc) +# @inline faceindex(fc::FacetCache) = FaceIndex(cellid(fc), faceid(fc)) +@inline function reinit!(fv::FacetValues, fc::FacetCache) + reinit!(fv, fc.cc, fc.current_facet_id) end """ @@ -166,35 +164,35 @@ end InterfaceCache(dh::AbstractDofHandler) Create a cache object with pre-allocated memory for the nodes, coordinates, and dofs of an -interface. The cache is updated for a new cell by calling `reinit!(cache, face_a, face_b)` where -`face_a::FaceIndex` and `face_b::FaceIndex` are the two interface faces. +interface. The cache is updated for a new cell by calling `reinit!(cache, facet_a, facet_b)` where +`facet_a::FacetIndex` and `facet_b::FacetIndex` are the two interface faces. **Struct fields of `InterfaceCache`** - - `ic.a :: FaceCache`: face cache for the first face of the interface - - `ic.b :: FaceCache`: face cache for the second face of the interface + - `ic.a :: FacetCache`: face cache for the first face of the interface + - `ic.b :: FacetCache`: face cache for the second face of the interface - `ic.dofs :: Vector{Int}`: global dof ids for the interface (union of `ic.a.dofs` and `ic.b.dofs`) **Methods with `InterfaceCache`** - - `reinit!(cache::InterfaceCache, face_a::FaceIndex, face_b::FaceIndex)`: reinitialize the cache for a new interface + - `reinit!(cache::InterfaceCache, facet_a::FacetIndex, facet_b::FacetIndex)`: reinitialize the cache for a new interface - `interfacedofs(ic)`: get the global dof ids of the interface See also [`InterfaceIterator`](@ref). """ -struct InterfaceCache{FC<:FaceCache} +struct InterfaceCache{FC<:FacetCache} a::FC b::FC dofs::Vector{Int} end function InterfaceCache(gridordh::Union{AbstractGrid, AbstractDofHandler}) - fc_a = FaceCache(gridordh) - fc_b = FaceCache(gridordh) + fc_a = FacetCache(gridordh) + fc_b = FacetCache(gridordh) return InterfaceCache(fc_a, fc_b, Int[]) end -function reinit!(cache::InterfaceCache, face_a::FaceIndex, face_b::FaceIndex) - reinit!(cache.a, face_a) - reinit!(cache.b, face_b) +function reinit!(cache::InterfaceCache, facet_a::BoundaryIndex, facet_b::BoundaryIndex) + reinit!(cache.a, facet_a) + reinit!(cache.b, facet_b) resize!(cache.dofs, length(celldofs(cache.a)) + length(celldofs(cache.b))) for (i, d) in pairs(cache.a.dofs) cache.dofs[i] = d @@ -209,10 +207,10 @@ function reinit!(iv::InterfaceValues, ic::InterfaceCache) return reinit!(iv, getcells(ic.a.cc.grid, cellid(ic.a)), getcoordinates(ic.a), - ic.a.current_faceid[], + ic.a.current_facet_id[], getcells(ic.b.cc.grid, cellid(ic.b)), getcoordinates(ic.b), - ic.b.current_faceid[], + ic.b.current_facet_id[], ) end @@ -225,9 +223,6 @@ getcoordinates(ic::InterfaceCache) = (getcoordinates(ic.a), getcoordinates(ic.b) #################### ## CellIterator ## - -const IntegerCollection = Union{Set{<:Integer}, AbstractVector{<:Integer}} - """ CellIterator(grid::Grid, cellset=1:getncells(grid)) CellIterator(dh::AbstractDofHandler, cellset=1:getncells(dh)) @@ -284,46 +279,48 @@ end @inline _getcache(ci::CellIterator) = ci.cc -## FaceIterator ## +## FacetIterator ## +FaceIterator(args...) = error("FaceIterator is deprecated, use FacetIterator instead") # Leaving flags undocumented as for CellIterator """ - FaceIterator(gridordh::Union{Grid,AbstractDofHandler}, faceset::Set{FaceIndex}) + FacetIterator(gridordh::Union{Grid,AbstractDofHandler}, faceset::AbstractVecOrSet{FacetIndex}) -Create a `FaceIterator` to conveniently iterate over the faces in `faceset`. The elements of -the iterator are [`FaceCache`](@ref)s which are properly `reinit!`ialized. See -[`FaceCache`](@ref) for more details. +Create a `FacetIterator` to conveniently iterate over the faces in `faceset`. The elements of +the iterator are [`FacetCache`](@ref)s which are properly `reinit!`ialized. See +[`FacetCache`](@ref) for more details. -Looping over a `FaceIterator`, i.e.: +Looping over a `FacetIterator`, i.e.: ```julia -for fc in FaceIterator(grid, faceset) +for fc in FacetIterator(grid, faceset) # ... end ``` is thus simply convenience for the following equivalent snippet: ```julia -fc = FaceCache(grid) +fc = FacetCache(grid) for faceindex in faceset reinit!(fc, faceindex) # ... end """ -struct FaceIterator{FC<:FaceCache} +struct FacetIterator{FC<:FacetCache} fc::FC - set::Set{FaceIndex} + set::OrderedSet{FacetIndex} end -function FaceIterator(gridordh::Union{Grid,AbstractDofHandler}, - set, flags::UpdateFlags=UpdateFlags()) +function FacetIterator(gridordh::Union{Grid,AbstractDofHandler}, + set::AbstractVecOrSet{FacetIndex}, flags::UpdateFlags=UpdateFlags()) if gridordh isa DofHandler # Keep here to maintain same settings as for CellIterator _check_same_celltype(get_grid(gridordh), set) end - return FaceIterator(FaceCache(gridordh, flags), set) + return FacetIterator(FacetCache(gridordh, flags), set) end -@inline _getcache(fi::FaceIterator) = fi.fc -@inline _getset(fi::FaceIterator) = fi.set +@inline _getcache(fi::FacetIterator) = fi.fc +@inline _getset(fi::FacetIterator) = fi.set + """ InterfaceIterator(grid::Grid, [topology::ExclusiveTopology]) @@ -366,28 +363,26 @@ function InterfaceIterator(gridordh::Union{Grid,AbstractDofHandler}, end # Iterator interface -function Base.iterate(ii::InterfaceIterator, state...) - grid_dim = getdim(ii.grid) - neighborhood = grid_dim == 1 ? ii.topology.vertex_vertex_neighbor : ii.topology.face_face_neighbor +function Base.iterate(ii::InterfaceIterator{<:Any, <:Grid{sdim}}, state...) where sdim + neighborhood = get_facet_facet_neighborhood(ii.topology, ii.grid) # TODO: This could be moved to InterfaceIterator constructor (potentially type-instable for non-union or mixed grids) while true - it = iterate(faceskeleton(ii.topology, ii.grid), state...) + it = iterate(facetskeleton(ii.topology, ii.grid), state...) it === nothing && return nothing - face_a, state = it - if isempty(neighborhood[face_a[1], face_a[2]]) + facet_a, state = it + if isempty(neighborhood[facet_a[1], facet_a[2]]) continue end - neighbors = neighborhood[face_a[1], face_a[2]].neighbor_info + neighbors = neighborhood[facet_a[1], facet_a[2]] length(neighbors) > 1 && error("multiple neighboring faces not supported yet") - neighbor = neighbors[1] - face_b = grid_dim == 1 ? FaceIndex(neighbor[1], neighbor[2]) : neighbor - reinit!(ii.cache, face_a, face_b) + facet_b = neighbors[1] + reinit!(ii.cache, facet_a, facet_b) return (ii.cache, state) end end -# Iterator interface for CellIterator/FaceIterator -const GridIterators{C} = Union{CellIterator{C}, FaceIterator{C}, InterfaceIterator{C}} +# Iterator interface for CellIterator/FacetIterator +const GridIterators{C} = Union{CellIterator{C}, FacetIterator{C}, InterfaceIterator{C}} function Base.iterate(iterator::GridIterators, state_in...) it = iterate(_getset(iterator), state_in...) @@ -410,10 +405,10 @@ function _check_same_celltype(grid::AbstractGrid, cellset::IntegerCollection) end end -function _check_same_celltype(grid::AbstractGrid, faceset::Set{FaceIndex}) +function _check_same_celltype(grid::AbstractGrid, facetset::AbstractVecOrSet{<:BoundaryIndex}) isconcretetype(getcelltype(grid)) && return nothing # Short circuit check - celltype = getcelltype(grid, first(faceset)[1]) - if !all(getcelltype(grid, face[1]) == celltype for face in faceset) - error("The cells in the faceset are not all of the same celltype.") + celltype = getcelltype(grid, first(facetset)[1]) + if !all(getcelltype(grid, facet[1]) == celltype for facet in facetset) + error("The cells in the set (set of $(eltype(facetset))) are not all of the same celltype.") end end diff --git a/src/utils.jl b/src/utils.jl index 09e57b0277..84c0f5a908 100644 --- a/src/utils.jl +++ b/src/utils.jl @@ -35,10 +35,10 @@ else end end -mutable struct ScalarWrapper{T} - x::T -end +convert_to_orderedset(set::AbstractVector{T}) where T = OrderedSet{T}(set) +convert_to_orderedset(set::AbstractSet{T}) where T = convert(OrderedSet{T}, set) -@inline Base.getindex(s::ScalarWrapper) = s.x -@inline Base.setindex!(s::ScalarWrapper, v) = s.x = v -Base.copy(s::ScalarWrapper{T}) where {T} = ScalarWrapper{T}(copy(s.x)) +function convert_to_orderedsets(namedsets::Dict{String, <: AbstractVecOrSet{T}}) where T + return Dict{String,OrderedSet{T}}(k => convert_to_orderedset(v) for (k,v) in namedsets) +end +convert_to_orderedsets(namedsets::Dict{String, <: OrderedSet}) = namedsets diff --git a/test/PoolAllocator.jl b/test/PoolAllocator.jl new file mode 100644 index 0000000000..4c50003cb7 --- /dev/null +++ b/test/PoolAllocator.jl @@ -0,0 +1,81 @@ +using Test, Ferrite.PoolAllocator + +@testset "PoolAllocator.jl" begin + + # Basic malloc, realloc, free + mempool = PoolAllocator.MemoryPool{Int}() + x = PoolAllocator.malloc(mempool, 1024) + @test x isa PoolAllocator.PoolArray{Int} + x .= 1:1024 + x′ = PoolAllocator.realloc(x, 2048) + @test x′ isa PoolAllocator.PoolArray{Int} + @test_throws ErrorException("free: block already free'd") PoolAllocator.free(x) + @test x′[1:1024] == 1:1024 + PoolAllocator.free(x′) + @test_throws ErrorException("free: block already free'd") PoolAllocator.free(x′) + + # Internal page allocation: exhaust some pages + mempool = PoolAllocator.MemoryPool{Int}() + xs = PoolAllocator.PoolArray{Int}[] + for _ in 1:(PoolAllocator.PAGE_SIZE ÷ 512 ÷ sizeof(Int) * 2 + 1) + x = PoolAllocator.malloc(mempool, 512) + push!(xs, x) + end + @test length(mempool.books[10].pages) == 3 + @test all(!, mempool.books[10].pages[1].freelist) + @test all(!, mempool.books[10].pages[2].freelist) + @test !mempool.books[10].pages[3].freelist[1] + @test all(mempool.books[10].pages[3].freelist[2:end]) + xs′ = PoolAllocator.PoolArray{Int}[] + for x in xs + x′ = PoolAllocator.realloc(x, 1024) + @test_throws ErrorException("free: block already free'd") PoolAllocator.free(x) + push!(xs′, x′) + end + @test length(mempool.books[10].pages) == 3 # TODO + @test all(mempool.books[10].pages[1].freelist) + @test all(mempool.books[10].pages[2].freelist) + @test all(mempool.books[10].pages[3].freelist) + @test length(mempool.books[11].pages) == 5 + @test all(!, mempool.books[11].pages[1].freelist) + @test all(!, mempool.books[11].pages[2].freelist) + @test all(!, mempool.books[11].pages[3].freelist) + @test all(!, mempool.books[11].pages[4].freelist) + @test !mempool.books[11].pages[5].freelist[1] + @test all(mempool.books[11].pages[5].freelist[2:end]) + for x in xs′ + PoolAllocator.free(x) + @test_throws ErrorException("free: block already free'd") PoolAllocator.free(x) + end + PoolAllocator.free(mempool) + @test length(mempool.books) == 0 + + # Array functions + mempool = PoolAllocator.MemoryPool{Int}() + x = PoolAllocator.malloc(mempool, 8) + @test length(x) == 8 + x = PoolAllocator.resize(x, 0) + @test length(x) == 0 + x = PoolAllocator.resize(x, 16) + @test length(x) == 16 + x .= 1:16 + x = PoolAllocator.resize(x, 8) + @test x == 1:8 + x = PoolAllocator.resize(x, 8) + x = PoolAllocator.insert(x, 1, -1) + x = PoolAllocator.insert(x, length(x) + 1, -1) + x = PoolAllocator.insert(x, 2, -2) + x = PoolAllocator.insert(x, length(x), -2) + @test x == [-1; -2; 1:8; -2; -1] + + # n-d arrays + mempool = PoolAllocator.MemoryPool{Int}() + A = PoolAllocator.malloc(mempool, 64, 64) + @test size(A) == (64, 64) + B = PoolAllocator.malloc(mempool, (64, 32)) + @test size(B) == (64, 32) + + # Smoke test for `show` + show(devnull, MIME"text/plain"(), mempool) + +end diff --git a/test/blockarrays.jl b/test/blockarrays.jl index 464caae8be..81b1f627c8 100644 --- a/test/blockarrays.jl +++ b/test/blockarrays.jl @@ -12,16 +12,19 @@ using Ferrite, BlockArrays, SparseArrays, Test nd = ndofs(dh) ÷ 3 ch = ConstraintHandler(dh) - periodic_faces = collect_periodic_faces(grid, "top", "bottom") + periodic_faces = collect_periodic_facets(grid, "top", "bottom") add!(ch, PeriodicDirichlet(:u, periodic_faces)) - add!(ch, Dirichlet(:u, union(getfaceset(grid, "left"), getfaceset(grid, "top")), (x, t) -> [0, 0])) - add!(ch, Dirichlet(:p, getfaceset(grid, "left"), (x, t) -> 0)) + add!(ch, Dirichlet(:u, union(getfacetset(grid, "left"), getfacetset(grid, "top")), (x, t) -> [0, 0])) + add!(ch, Dirichlet(:p, getfacetset(grid, "left"), (x, t) -> 0)) close!(ch) update!(ch, 0) - K = create_sparsity_pattern(dh, ch) + K = allocate_matrix(dh, ch) f = zeros(axes(K, 1)) - KB = create_sparsity_pattern(BlockMatrix, dh, ch) + # TODO: allocate_matrix(BlockMatrix, ...) should work and default to field blocking + bsp = BlockSparsityPattern([2nd, 1nd]) + add_sparsity_entries!(bsp, dh, ch) + KB = allocate_matrix(BlockMatrix, bsp) @test KB isa BlockMatrix @test blocksize(KB) == (2, 2) @test size(KB[Block(1), Block(1)]) == (2nd, 2nd) @@ -66,16 +69,16 @@ using Ferrite, BlockArrays, SparseArrays, Test # Global application of BC not supported yet @test_throws ErrorException apply!(KB, fB, ch) - # Custom blocking by passing a partially initialized matrix + # Custom blocking perm = invperm([ch.free_dofs; ch.prescribed_dofs]) renumber!(dh, ch, perm) nfree = length(ch.free_dofs) npres = length(ch.prescribed_dofs) - K = create_sparsity_pattern(dh, ch) + K = allocate_matrix(dh, ch) block_sizes = [nfree, npres] - KBtmp = BlockArray(undef_blocks, SparseMatrixCSC{Float64, Int}, block_sizes, block_sizes) - KB = create_sparsity_pattern(KBtmp, dh, ch) - @test KBtmp === KB + bsp = BlockSparsityPattern(block_sizes) + add_sparsity_entries!(bsp, dh, ch) + KB = allocate_matrix(BlockMatrix, bsp) @test blocksize(KB) == (2, 2) @test size(KB[Block(1), Block(1)]) == (nfree, nfree) @test size(KB[Block(2), Block(1)]) == (npres, nfree) diff --git a/test/integration/test_simple_scalar_convergence.jl b/test/integration/test_simple_scalar_convergence.jl index d6387fc0d3..4f7543ebc4 100644 --- a/test/integration/test_simple_scalar_convergence.jl +++ b/test/integration/test_simple_scalar_convergence.jl @@ -1,5 +1,5 @@ using Ferrite, Test -import Ferrite: getdim, default_interpolation +import Ferrite: getrefdim, geometric_interpolation module ConvergenceTestHelper @@ -14,19 +14,28 @@ get_geometry(::Ferrite.Interpolation{RefHexahedron}) = Hexahedron get_geometry(::Ferrite.Interpolation{RefTetrahedron}) = Tetrahedron get_geometry(::Ferrite.Interpolation{RefPyramid}) = Pyramid -get_quadrature_order(::Lagrange{shape, order}) where {shape, order} = 2*order -get_quadrature_order(::Serendipity{shape, order}) where {shape, order} = 2*order -get_quadrature_order(::CrouzeixRaviart{shape, order}) where {shape, order} = 2*order+1 -get_quadrature_order(::BubbleEnrichedLagrange{shape, order}) where {shape, order} = 2*order +get_quadrature_order(::Lagrange{shape, order}) where {shape, order} = max(2*order-1,2) +get_quadrature_order(::Lagrange{RefTriangle, 5}) where {shape, order} = 8 +get_quadrature_order(::Lagrange{RefPrism, order}) where order = 2*order # Don't know why +get_quadrature_order(::Serendipity{shape, order}) where {shape, order} = max(2*order-1,2) +get_quadrature_order(::CrouzeixRaviart{shape, order}) where {shape, order} = max(2*order-1,2) +get_quadrature_order(::RannacherTurek{shape, order}) where {shape, order} = max(2*order-1,2) +get_quadrature_order(::BubbleEnrichedLagrange{shape, order}) where {shape, order} = max(2*order-1,2) get_num_elements(::Ferrite.Interpolation{shape, 1}) where {shape} = 21 get_num_elements(::Ferrite.Interpolation{shape, 2}) where {shape} = 7 get_num_elements(::Ferrite.Interpolation{RefHexahedron, 1}) = 11 +get_num_elements(::Ferrite.RannacherTurek{RefQuadrilateral, 1}) = 15 +get_num_elements(::Ferrite.RannacherTurek{RefHexahedron, 1}) = 13 get_num_elements(::Ferrite.Interpolation{RefHexahedron, 2}) = 4 get_num_elements(::Ferrite.Interpolation{shape, 3}) where {shape} = 8 get_num_elements(::Ferrite.Interpolation{shape, 4}) where {shape} = 5 get_num_elements(::Ferrite.Interpolation{shape, 5}) where {shape} = 3 +get_test_tolerance(ip) = 1e-2 +get_test_tolerance(ip::RannacherTurek) = 4e-2 +get_test_tolerance(ip::CrouzeixRaviart) = 4e-2 + analytical_solution(x) = prod(cos, x*π/2) analytical_rhs(x) = -Tensors.laplace(analytical_solution,x) @@ -102,7 +111,7 @@ function check_and_compute_convergence_norms(dh, u, cellvalues, testatol) ∇uₐₙₐ = gradient(x-> prod(cos, x*π/2), x) ∇uₐₚₚᵣₒₓ = function_gradient(cellvalues, q_point, uₑ) ∇L2norm += norm(∇uₐₙₐ-∇uₐₚₚᵣₒₓ)^2*dΩ - + # Pointwise convergence @test uₐₙₐ ≈ uₐₚₚᵣₒₓ atol=testatol end @@ -112,7 +121,7 @@ end # Assemble and solve function solve(dh, ch, cellvalues) - K, f = assemble_global(cellvalues, create_sparsity_pattern(dh), dh); + K, f = assemble_global(cellvalues, allocate_matrix(dh), dh); apply!(K, f, ch) u = K \ f; end @@ -125,7 +134,7 @@ function setup_poisson_problem(grid, interpolation, interpolation_geo, qr) ch = ConstraintHandler(dh); ∂Ω = union( - values(grid.facesets)... + values(Ferrite.getfacetsets(grid))... ); dbc = Dirichlet(:u, ∂Ω, (x, t) -> analytical_solution(x)) add!(ch, dbc); @@ -140,7 +149,7 @@ end # module ConvergenceTestHelper # These test only for convergence within margins @testset "convergence analysis" begin - @testset "$interpolation" for interpolation in ( + @testset failfast=true "$interpolation" for interpolation in ( Lagrange{RefTriangle, 3}(), Lagrange{RefTriangle, 4}(), Lagrange{RefTriangle, 5}(), @@ -154,13 +163,16 @@ end # module ConvergenceTestHelper # BubbleEnrichedLagrange{RefTriangle, 1}(), # - CrouzeixRaviart{RefTriangle, 1}(), + CrouzeixRaviart{RefTriangle,1}(), + CrouzeixRaviart{RefTetrahedron,1}(), + RannacherTurek{RefQuadrilateral,1}(), + RannacherTurek{RefHexahedron,1}(), ) # Generate a grid ... geometry = ConvergenceTestHelper.get_geometry(interpolation) - interpolation_geo = default_interpolation(geometry) + interpolation_geo = geometric_interpolation(geometry) N = ConvergenceTestHelper.get_num_elements(interpolation) - grid = generate_grid(geometry, ntuple(x->N, getdim(geometry))); + grid = generate_grid(geometry, ntuple(x->N, getrefdim(geometry))); # ... a suitable quadrature rule ... qr_order = ConvergenceTestHelper.get_quadrature_order(interpolation) qr = QuadratureRule{getrefshape(interpolation)}(qr_order) @@ -173,7 +185,7 @@ end # These test also for correct convergence rates @testset "convergence rate" begin - @testset "$interpolation" for interpolation in ( + @testset failfast=true "$interpolation" for interpolation in ( Lagrange{RefLine, 1}(), Lagrange{RefLine, 2}(), Lagrange{RefQuadrilateral, 1}(), @@ -184,13 +196,17 @@ end Lagrange{RefHexahedron, 2}(), Lagrange{RefTetrahedron, 2}(), Lagrange{RefPrism, 2}(), + CrouzeixRaviart{RefTriangle,1}(), + CrouzeixRaviart{RefTetrahedron,1}(), + RannacherTurek{RefQuadrilateral,1}(), + RannacherTurek{RefHexahedron,1}(), ) # Generate a grid ... geometry = ConvergenceTestHelper.get_geometry(interpolation) - interpolation_geo = default_interpolation(geometry) + interpolation_geo = geometric_interpolation(geometry) # "Coarse case" N₁ = ConvergenceTestHelper.get_num_elements(interpolation) - grid = generate_grid(geometry, ntuple(x->N₁, getdim(geometry))); + grid = generate_grid(geometry, ntuple(x->N₁, getrefdim(geometry))); # ... a suitable quadrature rule ... qr_order = ConvergenceTestHelper.get_quadrature_order(interpolation) qr = QuadratureRule{getrefshape(interpolation)}(qr_order) @@ -198,10 +214,10 @@ end dh, ch, cellvalues = ConvergenceTestHelper.setup_poisson_problem(grid, interpolation, interpolation_geo, qr) u = ConvergenceTestHelper.solve(dh, ch, cellvalues) L2₁, H1₁, _ = ConvergenceTestHelper.check_and_compute_convergence_norms(dh, u, cellvalues, 1e-2) - + # "Fine case" N₂ = 2*N₁ - grid = generate_grid(geometry, ntuple(x->N₂, getdim(geometry))); + grid = generate_grid(geometry, ntuple(x->N₂, getrefdim(geometry))); # ... a suitable quadrature rule ... qr_order = ConvergenceTestHelper.get_quadrature_order(interpolation) qr = QuadratureRule{getrefshape(interpolation)}(qr_order) diff --git a/test/runtests.jl b/test/runtests.jl index 72f7d71e53..a390852754 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -7,6 +7,9 @@ import SHA using Random using LinearAlgebra using SparseArrays +using StaticArrays +using OrderedCollections +using WriteVTK const HAS_EXTENSIONS = isdefined(Base, :get_extension) @@ -17,9 +20,11 @@ if HAS_EXTENSIONS && MODULE_CAN_BE_TYPE_PARAMETER import Metis end -const RUN_JET_TESTS = VERSION >= v"1.9" +const RUN_JET_TESTS = VERSION >= v"1.9" && isempty(VERSION.prerelease) if RUN_JET_TESTS + using Pkg: Pkg + Pkg.add("JET") using JET: @test_call else # Just eat the macro on incompatible versions @@ -31,6 +36,7 @@ end include("test_utils.jl") # Unit tests +include("test_collectionsofviews.jl") include("test_interpolations.jl") include("test_cellvalues.jl") include("test_facevalues.jl") @@ -38,9 +44,12 @@ include("test_interfacevalues.jl") include("test_quadrules.jl") include("test_assemble.jl") include("test_dofs.jl") +include("test_sparsity_patterns.jl") include("test_constraints.jl") include("test_grid_dofhandler_vtk.jl") +include("test_vtk_export.jl") include("test_abstractgrid.jl") +include("test_grid_generators.jl") include("test_grid_addboundaryset.jl") include("test_mixeddofhandler.jl") include("test_l2_projection.jl") @@ -48,10 +57,16 @@ include("test_pointevaluation.jl") # include("test_notebooks.jl") include("test_apply_rhs.jl") include("test_apply_analytical.jl") +include("PoolAllocator.jl") include("test_deprecations.jl") HAS_EXTENSIONS && include("blockarrays.jl") include("test_examples.jl") + @test all(x -> isdefined(Ferrite, x), names(Ferrite)) # Test that all exported symbols are defined +# # See which is not defined if fails +# for name in names(Ferrite) +# isdefined(Ferrite, name) || @warn "Ferrite.$name is not defined but $name is exported" +# end # Integration tests include("integration/test_simple_scalar_convergence.jl") diff --git a/test/test_abstractgrid.jl b/test/test_abstractgrid.jl index 67ee1cd0b7..a069e175d7 100644 --- a/test/test_abstractgrid.jl +++ b/test/test_abstractgrid.jl @@ -18,7 +18,6 @@ Ferrite.get_coordinate_eltype(::SmallGrid) = Float64 Ferrite.get_coordinate_type(::SmallGrid{dim}) where dim = Vec{dim,Float64} Ferrite.nnodes_per_cell(grid::SmallGrid, i::Int=1) = Ferrite.nnodes(grid.cells_test[i]) - Ferrite.n_faces_per_cell(grid::SmallGrid) = nfaces(eltype(grid.cells_test)) nodes = [(-1.0,-1.0); (0.0,-1.0); (1.0,-1.0); (-1.0,0.0); (0.0,0.0); (1.0,0.0); (-1.0,1.0); (0.0,1.0); (1.0,1.0)] cells = (Quadrilateral((1,2,5,4)), Quadrilateral((2,3,6,5)), Quadrilateral((4,5,8,7)), Quadrilateral((5,6,9,8))) @@ -28,11 +27,11 @@ ip = Lagrange{RefQuadrilateral, 1}() qr = QuadratureRule{RefQuadrilateral}(2) cellvalues = CellValues(qr, ip); - + dhs = [DofHandler(grid) for grid in (subtype_grid, reference_grid)] u1 = Vector{Float64}(undef, 9) u2 = Vector{Float64}(undef, 9) - ∂Ω = union(getfaceset.((reference_grid, ), ["left", "right", "top", "bottom"])...) + ∂Ω = union(getfacetset.((reference_grid, ), ["left", "right", "top", "bottom"])...) dbc = Dirichlet(:u, ∂Ω, (x, t) -> 0) function doassemble!(cellvalues::CellValues, K::SparseMatrixCSC, dh::DofHandler) @@ -70,7 +69,7 @@ add!(ch, dbc) close!(ch) update!(ch, 0.0) - K = create_sparsity_pattern(dh); + K = allocate_matrix(dh); K, f = doassemble!(cellvalues, K, dh); apply!(K, f, ch) sol = K \ f @@ -82,6 +81,10 @@ @test Ferrite.ndofs(dhs[1]) == Ferrite.ndofs(dhs[2]) @test isapprox(u1,u2,atol=1e-8) + minv, maxv = Ferrite.bounding_box(subtype_grid) + @test minv ≈ Vec((-1.0,-1.0)) + @test maxv ≈ Vec((+1.0,+1.0)) + colors1 = Ferrite.create_coloring(subtype_grid, alg = ColoringAlgorithm.WorkStream) colors2 = Ferrite.create_coloring(reference_grid, alg = ColoringAlgorithm.WorkStream) @test all(colors1 .== colors2) diff --git a/test/test_apply_analytical.jl b/test/test_apply_analytical.jl index 45140a4109..e3b6af7877 100644 --- a/test/test_apply_analytical.jl +++ b/test/test_apply_analytical.jl @@ -9,7 +9,7 @@ RefShape = Ferrite.getrefshape(ip) return B{RefShape,order}() end - getcellorder(CT) = Ferrite.getorder(Ferrite.default_interpolation(CT)) + getcellorder(CT) = Ferrite.getorder(Ferrite.geometric_interpolation(CT)) getcelltypedim(::Type{<:Ferrite.AbstractCell{shape}}) where {dim, shape <: Ferrite.AbstractRefShape{dim}} = dim # Functions to create dof handlers for testing @@ -24,7 +24,7 @@ end dh = DofHandler(grid) - default_ip = Ferrite.default_interpolation(CT) + default_ip = Ferrite.geometric_interpolation(CT) try add!(dh, :u, change_ip_order(default_ip, ip_order_u)^dim) add!(dh, :p, change_ip_order(default_ip, ip_order_p)) @@ -51,8 +51,8 @@ else error("Only dim=1 & 2 supported") end - default_ip_A = Ferrite.default_interpolation(getcelltype(grid, first(getcellset(grid,"A")))) - default_ip_B = Ferrite.default_interpolation(getcelltype(grid, first(getcellset(grid,"B")))) + default_ip_A = Ferrite.geometric_interpolation(getcelltype(grid, first(getcellset(grid,"A")))) + default_ip_B = Ferrite.geometric_interpolation(getcelltype(grid, first(getcellset(grid,"B")))) dh = DofHandler(grid) sdh_A = SubDofHandler(dh, getcellset(grid, "A")) add!(sdh_A, :u, change_ip_order(default_ip_A, ip_order_u)^dim) @@ -99,18 +99,17 @@ for ip_order_p in 1:2 dh = testdh(CT, ip_order_u, ip_order_p) isnothing(dh) && continue # generate_grid not supported for this CT, or reference_coordinates not defined - dim = Ferrite.getdim(dh.grid) num_udofs = length(_global_dof_range(dh, :u)) num_pdofs = length(_global_dof_range(dh, :p)) # Test average value a = zeros(ndofs(dh)) - f(x) = ones(Vec{dim}) + f(x) = ones(Ferrite.get_coordinate_type(dh.grid)) apply_analytical!(a, dh, :u, f) @test sum(a)/length(a) ≈ num_udofs/(num_udofs+num_pdofs) - # If not super/subparametric, compare with ConstraintHandler and node set - if ip_order_u==ip_order_p==getcellorder(CT) + # If not super/subparametric, compare with ConstraintHandler and node set + if ip_order_u==ip_order_p==getcellorder(CT) fill!(a, 0) a_ch = copy(a) fp(x) = norm(x)^2 @@ -126,8 +125,8 @@ apply_analytical!(a, dh, :u, fu) apply_analytical!(a, dh, :p, fp) - @test a ≈ a_ch - end + @test a ≈ a_ch + end end end end @@ -146,6 +145,12 @@ f(x) = ones(Vec{dim}) apply_analytical!(a, dh, :u, f) @test sum(a)/length(a) ≈ num_udofs/(num_udofs+num_pdofs) + + # Repeat test with calls for both subdomains separately + a = zeros(ndofs(dh)) + apply_analytical!(a, dh, :u, f, getcellset(dh.grid, "A")) + apply_analytical!(a, dh, :u, f, getcellset(dh.grid, "B")) + @test sum(a)/length(a) ≈ num_udofs/(num_udofs+num_pdofs) end end end diff --git a/test/test_apply_rhs.jl b/test/test_apply_rhs.jl index af53bc7342..d21a8fc6f9 100644 --- a/test/test_apply_rhs.jl +++ b/test/test_apply_rhs.jl @@ -3,48 +3,48 @@ function test_apply_rhs() ip = Lagrange{RefQuadrilateral,1}() qr = QuadratureRule{RefQuadrilateral}(2) cellvalues = CellValues(qr, ip) - + dh = DofHandler(grid) add!(dh, :u, ip) close!(dh) - - K = create_sparsity_pattern(dh) - + + K = allocate_matrix(dh) + ch = ConstraintHandler(dh) - - ∂Ω = union(getfaceset.((grid,), ["left", "right"])...) + + ∂Ω = union(getfacetset.((grid,), ["left", "right"])...) dbc = Dirichlet(:u, ∂Ω, (x, t) -> 0) add!(ch, dbc); - ∂Ω = union(getfaceset.((grid,), ["top", "bottom"])...) + ∂Ω = union(getfacetset.((grid,), ["top", "bottom"])...) dbc = Dirichlet(:u, ∂Ω, (x, t) -> 2) add!(ch, dbc); - + close!(ch) update!(ch, 0.0); - + function doassemble!( cellvalues::CellValues, K::SparseMatrixCSC, dh::DofHandler, ) - + n_basefuncs = getnbasefunctions(cellvalues) Ke = zeros(n_basefuncs, n_basefuncs) fe = zeros(n_basefuncs) - + f = zeros(ndofs(dh)) assembler = start_assemble(K, f) - + @inbounds for cell in CellIterator(dh) fill!(Ke, 0) fill!(fe, 0) - + reinit!(cellvalues, cell) - + for q_point = 1:getnquadpoints(cellvalues) dΩ = getdetJdV(cellvalues, q_point) - + for i = 1:n_basefuncs v = shape_value(cellvalues, q_point, i) ∇v = shape_gradient(cellvalues, q_point, i) @@ -55,17 +55,17 @@ function test_apply_rhs() end end end - + assemble!(assembler, celldofs(cell), fe, Ke) end return K, f end - + K, f = doassemble!(cellvalues, K, dh) - A = create_sparsity_pattern(dh) + A = allocate_matrix(dh) A, g = doassemble!(cellvalues, A, dh) rhsdata = get_rhs_data(ch, A) - + apply!(K, f, ch) apply!(A, ch) # need to apply bcs to A once apply_rhs!(rhsdata, g, ch) @@ -73,6 +73,6 @@ function test_apply_rhs() u₂ = A \ g return u₁, u₂ end - + u1, u2 = test_apply_rhs() @test u1 == u2 diff --git a/test/test_cellvalues.jl b/test/test_cellvalues.jl index f414b4c6cb..488fefdc6d 100644 --- a/test/test_cellvalues.jl +++ b/test/test_cellvalues.jl @@ -1,5 +1,5 @@ @testset "CellValues" begin -@testset "ip=$scalar_interpol quad_rule=$(typeof(quad_rule))" for (scalar_interpol, quad_rule) in ( +@testset "ip=$scalar_interpol" for (scalar_interpol, quad_rule) in ( (Lagrange{RefLine, 1}(), QuadratureRule{RefLine}(2)), (Lagrange{RefLine, 2}(), QuadratureRule{RefLine}(2)), (Lagrange{RefQuadrilateral, 1}(), QuadratureRule{RefQuadrilateral}(2)), @@ -16,68 +16,119 @@ (Lagrange{RefPrism, 2}(), QuadratureRule{RefPrism}(2)), (Lagrange{RefPyramid, 2}(), QuadratureRule{RefPyramid}(2)), ) - - for func_interpol in (scalar_interpol, VectorizedInterpolation(scalar_interpol)) + for func_interpol in (scalar_interpol, VectorizedInterpolation(scalar_interpol)), DiffOrder in 1:2 + (DiffOrder==2 && Ferrite.getorder(func_interpol)==1) && continue #No need to test linear interpolations again geom_interpol = scalar_interpol # Tests below assume this n_basefunc_base = getnbasefunctions(scalar_interpol) - cv = CellValues(quad_rule, func_interpol, geom_interpol) - ndim = Ferrite.getdim(func_interpol) + update_gradients = true + update_hessians = (DiffOrder==2 && Ferrite.getorder(func_interpol) > 1) + cv = CellValues(quad_rule, func_interpol, geom_interpol; update_gradients, update_hessians) + if update_gradients && !update_hessians # Check correct and type-stable default constructor + cv_default = @inferred CellValues(quad_rule, func_interpol, geom_interpol) + @test typeof(cv) === typeof(cv_default) + @inferred CellValues(quad_rule, func_interpol, geom_interpol; update_gradients=Val(false), update_detJdV=Val(false)) + end + rdim = Ferrite.getrefdim(func_interpol) n_basefuncs = getnbasefunctions(func_interpol) @test getnbasefunctions(cv) == n_basefuncs - x, n = valid_coordinates_and_normals(func_interpol) - reinit!(cv, x) - @test_call reinit!(cv, x) + coords, n = valid_coordinates_and_normals(func_interpol) + reinit!(cv, coords) + @test_call reinit!(cv, coords) # We test this by applying a given deformation gradient on all the nodes. # Since this is a linear deformation we should get back the exact values # from the interpolation. - u = Vec{ndim, Float64}[zero(Tensor{1,ndim}) for i in 1:n_basefunc_base] - u_scal = zeros(n_basefunc_base) - H = rand(Tensor{2, ndim}) - V = rand(Tensor{1, ndim}) + V, G, H = if func_interpol isa Ferrite.ScalarInterpolation + (rand(), rand(Tensor{1, rdim}), Tensor{2, rdim}((i,j)-> i==j ? rand() : 0.0)) + else + (rand(Tensor{1, rdim}), rand(Tensor{2, rdim}), Tensor{3, rdim}((i,j,k)-> i==j==k ? rand() : 0.0)) + end + + u_funk(x,V,G,H) = begin + if update_hessians + 0.5*x⋅H⋅x + G⋅x + V + else + G⋅x + V + end + end + + _ue = [u_funk(coords[i],V,G,H) for i in 1:n_basefunc_base] + ue = reinterpret(Float64, _ue) + + for i in 1:getnquadpoints(cv) + xqp = spatial_coordinate(cv, i, coords) + Hqp, Gqp, Vqp = Tensors.hessian(x -> u_funk(x,V,G,H), xqp, :all) + + @test function_value(cv, i, ue) ≈ Vqp + @test function_gradient(cv, i, ue) ≈ Gqp + if update_hessians + #Note, the jacobian of the element is constant, which makes the hessian (of the mapping) + #zero. So this is not the optimal test + @test Ferrite.function_hessian(cv, i, ue) ≈ Hqp + end + if func_interpol isa Ferrite.VectorInterpolation + @test function_symmetric_gradient(cv, i, ue) ≈ 0.5(Gqp + Gqp') + @test function_divergence(cv, i, ue) ≈ tr(Gqp) + rdim == 3 && @test function_curl(cv, i, ue) ≈ Ferrite.curl_from_gradient(Gqp) + else + @test function_divergence(cv, i, ue) ≈ sum(Gqp) + end + end + + #Test CellValues when input is a ::Vector{<:Vec} (most of which is deprecated) + ue_vec = [zero(Vec{rdim,Float64}) for i in 1:n_basefunc_base] + G_vector = rand(Tensor{2, rdim}) for i in 1:n_basefunc_base - u[i] = H ⋅ x[i] - u_scal[i] = V ⋅ x[i] + ue_vec[i] = G_vector ⋅ coords[i] end - u_vector = reinterpret(Float64, u) for i in 1:getnquadpoints(cv) if func_interpol isa Ferrite.ScalarInterpolation - @test function_gradient(cv, i, u) ≈ H - @test function_symmetric_gradient(cv, i, u) ≈ 0.5(H + H') - @test function_divergence(cv, i, u_scal) ≈ sum(V) - @test function_divergence(cv, i, u) ≈ tr(H) - @test function_gradient(cv, i, u_scal) ≈ V - ndim == 3 && @test function_curl(cv, i, u) ≈ Ferrite.curl_from_gradient(H) - function_value(cv, i, u) - function_value(cv, i, u_scal) + @test function_gradient(cv, i, ue_vec) ≈ G_vector else# func_interpol isa Ferrite.VectorInterpolation - @test function_gradient(cv, i, u_vector) ≈ H - @test (@test_deprecated function_gradient(cv, i, u)) ≈ H - @test function_symmetric_gradient(cv, i, u_vector) ≈ 0.5(H + H') - @test (@test_deprecated function_symmetric_gradient(cv, i, u)) ≈ 0.5(H + H') - @test function_divergence(cv, i, u_vector) ≈ tr(H) - @test (@test_deprecated function_divergence(cv, i, u)) ≈ tr(H) - if ndim == 3 - @test function_curl(cv, i, u_vector) ≈ Ferrite.curl_from_gradient(H) - @test (@test_deprecated function_curl(cv, i, u)) ≈ Ferrite.curl_from_gradient(H) + @test_throws Ferrite.DeprecationError function_gradient(cv, i, ue_vec) + @test_throws Ferrite.DeprecationError function_symmetric_gradient(cv, i, ue_vec) + @test_throws Ferrite.DeprecationError function_divergence(cv, i, ue_vec) + if rdim == 3 + @test_throws Ferrite.DeprecationError function_curl(cv, i, ue_vec) end - @test function_value(cv, i, u_vector) ≈ (@test_deprecated function_value(cv, i, u)) + @test_throws Ferrite.DeprecationError function_value(cv, i, ue_vec) #no value to test against end end + #Check if the non-linear mapping is correct + #Only do this for one interpolation becuase it relise on AD on "iterative function" + if scalar_interpol === Lagrange{RefQuadrilateral, 2}() + coords_nl = [x+rand(x)*0.01 for x in coords] #add some displacement to nodes + reinit!(cv, coords_nl) + + _ue_nl = [u_funk(coords_nl[i],V,G,H) for i in 1:n_basefunc_base] + ue_nl = reinterpret(Float64, _ue_nl) + + for i in 1:getnquadpoints(cv) + xqp = spatial_coordinate(cv, i, coords_nl) + Hqp, Gqp, Vqp = Tensors.hessian(x -> function_value_from_physical_coord(func_interpol, coords_nl, x, ue_nl), xqp, :all) + @test function_value(cv, i, ue_nl) ≈ Vqp + @test function_gradient(cv, i, ue_nl) ≈ Gqp + if update_hessians + @test Ferrite.function_hessian(cv, i, ue_nl) ≈ Hqp + end + end + reinit!(cv, coords) # reinit back to old coords + end + # Test of volume vol = 0.0 for i in 1:getnquadpoints(cv) vol += getdetJdV(cv,i) end - @test vol ≈ calculate_volume(func_interpol, x) + @test vol ≈ calculate_volume(func_interpol, coords) # Test quadrature rule after reinit! with ref. coords - x = Ferrite.reference_coordinates(func_interpol) - reinit!(cv, x) + coords = Ferrite.reference_coordinates(func_interpol) + reinit!(cv, coords) vol = 0.0 for i in 1:getnquadpoints(cv) vol += getdetJdV(cv,i) @@ -86,7 +137,7 @@ # Test spatial coordinate (after reinit with ref.coords we should get back the quad_points) for (i, qp_x) in pairs(Ferrite.getpoints(quad_rule)) - @test spatial_coordinate(cv, i, x) ≈ qp_x + @test spatial_coordinate(cv, i, coords) ≈ qp_x end @testset "copy(::CellValues)" begin @@ -104,7 +155,7 @@ @test v == vc end end - # Test that qr and detJdV is copied as expected. + # Test that qr and detJdV is copied as expected. # Note that qr remain aliased, as defined by `copy(qr)=qr`, see quadrature.jl. for fname in (:qr, :detJdV) v = getfield(cv, fname) @@ -116,6 +167,27 @@ end end +@testset "GeometryMapping" begin + grid = generate_grid(Quadrilateral, (1,1)) + cc = first(CellIterator(grid)) + + qr = QuadratureRule{RefQuadrilateral}(1) + ξ = first(Ferrite.getpoints(qr)) + ip = Lagrange{RefQuadrilateral,1}() + + cv0 = CellValues(Float64, qr, ip, ip^2; update_detJdV=false, update_gradients=false, update_hessians=false) + reinit!(cv0, cc) + @test Ferrite.calculate_mapping(cv0.geo_mapping, 1, cc.coords) == Ferrite.calculate_mapping(ip, ξ, cc.coords, Val(0)) + + cv1 = CellValues(Float64, qr, ip, ip^2; update_detJdV=false, update_gradients=true, update_hessians=false) + reinit!(cv1, cc) + @test Ferrite.calculate_mapping(cv1.geo_mapping, 1, cc.coords) == Ferrite.calculate_mapping(ip, ξ, cc.coords, Val(1)) + + cv2 = CellValues(Float64, qr, ip, ip^2; update_detJdV=false, update_gradients=false, update_hessians=true) + reinit!(cv2, cc) + @test Ferrite.calculate_mapping(cv2.geo_mapping, 1, cc.coords) == Ferrite.calculate_mapping(ip, ξ, cc.coords, Val(2)) +end + @testset "#265: error message for incompatible geometric interpolation" begin dim = 1 deg = 1 @@ -140,20 +212,20 @@ end qp = 1 ip = Lagrange{RefTriangle,1}() qr = QuadratureRule{RefTriangle}(1) - qr_f = FaceQuadratureRule{RefTriangle}(1) + qr_f = FacetQuadratureRule{RefTriangle}(1) csv = CellValues(qr, ip) cvv = CellValues(qr, VectorizedInterpolation(ip)) csv_embedded = CellValues(qr, ip, ip^3) - fsv = FaceValues(qr_f, ip) - fvv = FaceValues(qr_f, VectorizedInterpolation(ip)) - fsv_embedded = FaceValues(qr_f, ip, ip^3) - + fsv = FacetValues(qr_f, ip) + fvv = FacetValues(qr_f, VectorizedInterpolation(ip)) + fsv_embedded = FacetValues(qr_f, ip, ip^3) + x, n = valid_coordinates_and_normals(ip) reinit!(csv, x) reinit!(cvv, x) reinit!(fsv, x, 1) reinit!(fvv, x, 1) - + # Wrong number of coordinates xx = [x; x] @test_throws ArgumentError reinit!(csv, xx) @@ -166,7 +238,7 @@ end @test_throws ArgumentError spatial_coordinate(fsv, qp, xx) @test_throws ArgumentError spatial_coordinate(fvv, qp, xx) - # Wrong dimension of coordinates + # Wrong dimension of coordinates @test_throws ArgumentError reinit!(csv_embedded, x) @test_throws ArgumentError reinit!(fsv_embedded, x, 1) @@ -302,11 +374,23 @@ end @test zeros(vdim) == function_gradient(csv3, 1, ue)[:, 3] end end + + @testset "CellValues with hessians" begin + ip = Lagrange{RefQuadrilateral,2}() + qr = QuadratureRule{RefQuadrilateral}(2) + + cv_vector = CellValues(qr, ip^2, ip^3; update_hessians = true) + cv_scalar = CellValues(qr, ip, ip^3; update_hessians = true) + + coords = [Vec{3}((x[1], x[2], 0.0)) for x in Ferrite.reference_coordinates(ip)] + @test_throws ErrorException reinit!(cv_vector, coords) #Not implemented for embedded elements + @test_throws ErrorException reinit!(cv_scalar, coords) + end end @testset "CellValues constructor entry points" begin qr = QuadratureRule{RefTriangle}(1) - + for fun_ip in (Lagrange{RefTriangle, 1}(), Lagrange{RefTriangle, 2}()^2) value_type(T) = fun_ip isa ScalarInterpolation ? T : Vec{2, T} grad_type(T) = fun_ip isa ScalarInterpolation ? Vec{2, T} : Tensor{2, 2, T, 4} @@ -356,13 +440,13 @@ end end @testset "CustomCellValues" begin - + @testset "SimpleCellValues" begin include(joinpath(@__DIR__, "../docs/src/topics/SimpleCellValues_literate.jl")) end - + @testset "TestCustomCellValues" begin - + struct TestCustomCellValues{CV<:CellValues} <: Ferrite.AbstractValues cv::CV end @@ -382,7 +466,7 @@ end ae = rand(getnbasefunctions(cv)) q_point = rand(1:getnquadpoints(cv)) cv_custom = TestCustomCellValues(cv) - for fun in (function_value, function_gradient, + for fun in (function_value, function_gradient, function_divergence, function_symmetric_gradient, function_curl) @test fun(cv_custom, q_point, ae) == fun(cv, q_point, ae) end diff --git a/test/test_collectionsofviews.jl b/test/test_collectionsofviews.jl new file mode 100644 index 0000000000..4bca3ff5a2 --- /dev/null +++ b/test/test_collectionsofviews.jl @@ -0,0 +1,46 @@ +@testset "ArrayOfVectorViews" begin + # Create a vector sorting integers into bins and check + test_ints = rand(0:99, 100) + # Create for 3 different sizehints + aovs = map([20, 1, 100]) do sh + Ferrite.ArrayOfVectorViews(Int[], (10,); sizehint=sh) do buf + for v in test_ints + idx = 1 + v ÷ 10 + Ferrite.push_at_index!(buf, v, idx) + end + end + end + # Check correct values for the first one + for (idx, v) in enumerate(aovs[1]) + interval = (10 * (idx-1)):(10 * idx - 1) + @test all(x -> x ∈ interval, v) + @test count(x -> x ∈ interval, test_ints) == length(v) + end + for aov in aovs + @test sum(length, aov; init=0) == length(test_ints) + end + # Check that the result is independent of sizehint + for idx in eachindex(aovs[1]) + for aov in aovs[2:end] + @test aovs[1][idx] == aov[idx] + end + end + + # Create an array with random tuple containing and place the tuples + # according to the values. Check for 2d and 3d arrays. + for N in 2:3 + tvals = [ntuple(i->rand(0:9), N) for _ in 1:1000] + aov = Ferrite.ArrayOfVectorViews(NTuple{N, Int}[], (5,5,5)[1:N]; sizehint=10) do buf + for v in tvals + idx = 1 .+ v .÷ 2 + Ferrite.push_at_index!(buf, v, idx...) + end + end + @test sum(length, aov; init=0) == length(tvals) + for (idx, v) in pairs(aov) + intervals = map(i -> (2 * (i-1)):(2 * i - 1), idx.I) + @test all(x -> all(map((z, r) -> z ∈ r, x, intervals)), v) + @test count(x -> all(map((z, r) -> z ∈ r, x, intervals)), tvals) == length(v) + end + end +end diff --git a/test/test_constraints.jl b/test/test_constraints.jl index 5b511632e5..1600d87288 100644 --- a/test/test_constraints.jl +++ b/test/test_constraints.jl @@ -2,8 +2,8 @@ @testset "constructors and error checking" begin grid = generate_grid(Triangle, (2, 2)) - Γ = getfaceset(grid, "left") - face_map = collect_periodic_faces(grid, "left", "right") + Γ = getfacetset(grid, "left") + face_map = collect_periodic_facets(grid, "left", "right") dh = DofHandler(grid) add!(dh, :s, Lagrange{RefTriangle,1}()) add!(dh, :v, Lagrange{RefTriangle,1}()^2) @@ -17,7 +17,7 @@ @test_throws ErrorException("components not sorted: [2, 1]") Dirichlet(:u, Γ, (x, t) -> 0, Int[2, 1]) @test_throws ErrorException("components not unique: [2, 2]") Dirichlet(:u, Γ, (x, t) -> 0, Int[2, 2]) @test_throws ErrorException("No dof prescribed for order 0 interpolations") add!(ch, Dirichlet(:z, Γ, (x, t) -> 0)) - for (s, v) in [(:s, :v), (:sd, :vd)] + for (s, v) in [(:s, :v), (:sd, :vd)] ## Scalar dbc = Dirichlet(s, Γ, (x, t) -> 0) add!(ch, dbc) @@ -86,8 +86,9 @@ end add!(dh, :p, Lagrange{RefTriangle,1}()) close!(dh) ch = ConstraintHandler(dh) - dbc1 = Dirichlet(:u, getnodeset(grid, "nodeset"), (x,t) -> x, [1, 2]) - dbc2 = Dirichlet(:p, getnodeset(grid, "nodeset"), (x,t) -> 0, 1) + dbc1 = Dirichlet(:u, getnodeset(grid, "nodeset"), (x, t) -> x, [1, 2]) + # Add type-spec to function, test https://github.com/Ferrite-FEM/Ferrite.jl/issues/1006 + dbc2 = Dirichlet(:p, getnodeset(grid, "nodeset"), (x::Vec, t::Real) -> 0, 1) add!(ch, dbc1) add!(ch, dbc2) close!(ch) @@ -140,8 +141,8 @@ end close!(dh) ch = ConstraintHandler(dh) - add!(ch, Dirichlet(:u, getfaceset(mesh, "bottom"), (x,t)->1.0, 1)) - add!(ch, Dirichlet(:c, getfaceset(mesh, "bottom"), (x,t)->2.0, 1)) + add!(ch, Dirichlet(:u, getfacetset(mesh, "bottom"), (x,t)->1.0, 1)) + add!(ch, Dirichlet(:c, getfacetset(mesh, "bottom"), (x,t)->2.0, 1)) close!(ch) update!(ch) @@ -151,7 +152,7 @@ end @testset "edge bc" begin grid = generate_grid(Hexahedron, (1, 1, 1)) - addedgeset!(grid, "edge", x-> x[1] ≈ -1.0 && x[3] ≈ -1.0) + edge = Ferrite.create_edgeset(grid, x-> x[1] ≈ -1.0 && x[3] ≈ -1.0) dh = DofHandler(grid) add!(dh, :u, Lagrange{RefHexahedron,1}()^3) @@ -159,7 +160,7 @@ end close!(dh) ch = ConstraintHandler(dh) - dbc1 = Dirichlet(:u, getedgeset(grid, "edge"), (x,t) -> x, [1, 2, 3]) + dbc1 = Dirichlet(:u, edge, (x,t) -> x, [1, 2, 3]) add!(ch, dbc1) close!(ch) update!(ch) @@ -169,7 +170,7 @@ end #Shell mesh edge bcs - nodes = [Node{3,Float64}(Vec(0.0,0.0,0.0)), Node{3,Float64}(Vec(1.0,0.0,0.0)), + nodes = [Node{3,Float64}(Vec(0.0,0.0,0.0)), Node{3,Float64}(Vec(1.0,0.0,0.0)), Node{3,Float64}(Vec(1.0,1.0,0.0)), Node{3,Float64}(Vec(0.0,1.0,0.0)), Node{3,Float64}(Vec(2.0,0.0,0.0)), Node{3,Float64}(Vec(2.0,2.0,0.0))] @@ -182,9 +183,9 @@ end add!(dh, :θ, Lagrange{RefQuadrilateral,2}()) close!(dh) - addedgeset!(grid, "edge", x -> x[2] ≈ 0.0) #bottom edge + edge = Ferrite.create_edgeset(grid, x -> x[2] ≈ 0.0) #bottom edge ch = ConstraintHandler(dh) - dbc1 = Dirichlet(:θ, getedgeset(grid, "edge"), (x,t) -> (0.0,), [1]) + dbc1 = Dirichlet(:θ, edge, (x,t) -> (0.0,), [1]) add!(ch, dbc1) close!(ch) update!(ch) @@ -194,20 +195,20 @@ end @testset "discontinuous ip constraints" begin grid = generate_grid(Hexahedron, (1, 1, 1)) - addedgeset!(grid, "bottom", x-> x[3] ≈ -1.0) + bottom_edge = Ferrite.create_edgeset(grid, x-> x[3] ≈ -1.0) dh = DofHandler(grid) add!(dh, :u, DiscontinuousLagrange{RefHexahedron,1}()^3) add!(dh, :p, DiscontinuousLagrange{RefHexahedron,1}()) close!(dh) face_ch = ConstraintHandler(dh) - face_dbc = Dirichlet(:u, getfaceset(grid, "bottom"), (x,t) -> x, [1, 2, 3]) + face_dbc = Dirichlet(:u, getfacetset(grid, "bottom"), (x,t) -> x, [1, 2, 3]) add!(face_ch, face_dbc) close!(face_ch) update!(face_ch) edge_ch = ConstraintHandler(dh) - edge_dbc = Dirichlet(:u, getedgeset(grid, "bottom"), (x,t) -> x, [1, 2, 3]) + edge_dbc = Dirichlet(:u, bottom_edge, (x,t) -> x, [1, 2, 3]) add!(edge_ch, edge_dbc) close!(edge_ch) update!(edge_ch) @@ -217,7 +218,7 @@ end # This can be merged with the continuous test or removed. # Shell mesh edge bcs - nodes = [Node{3,Float64}(Vec(0.0,0.0,0.0)), Node{3,Float64}(Vec(1.0,0.0,0.0)), + nodes = [Node{3,Float64}(Vec(0.0,0.0,0.0)), Node{3,Float64}(Vec(1.0,0.0,0.0)), Node{3,Float64}(Vec(1.0,1.0,0.0)), Node{3,Float64}(Vec(0.0,1.0,0.0)), Node{3,Float64}(Vec(2.0,0.0,0.0)), Node{3,Float64}(Vec(2.0,2.0,0.0))] @@ -230,9 +231,9 @@ end add!(dh, :θ, DiscontinuousLagrange{RefQuadrilateral,2}()) close!(dh) - addedgeset!(grid, "bottom", x -> x[2] ≈ 0.0) #bottom edge + bottom_edge = Ferrite.create_edgeset(grid, x -> x[2] ≈ 0.0) edge_ch = ConstraintHandler(dh) - edge_dbc = Dirichlet(:θ, getedgeset(grid, "bottom"), (x,t) -> (0.0,), [1]) + edge_dbc = Dirichlet(:θ, bottom_edge, (x,t) -> (0.0,), [1]) add!(edge_ch, edge_dbc) close!(edge_ch) update!(edge_ch) @@ -267,7 +268,7 @@ end # grid = Grid(cells, nodes, cellsets=cellsets, facesets=facesets) # # Create DofHandler based on grid -# dim = Ferrite.getdim(grid) # 2 +# dim = Ferrite.getspatialdim(grid) # 2 # ip_quad = Lagrange{RefQuadrilateral,1}() # ip_tria = Lagrange{RefTetrahedron,1}() # dh = DofHandler(grid) @@ -285,11 +286,11 @@ end # # Add constraints # ch = ConstraintHandler(dh) -# dA_u = Dirichlet(:u, getfaceset(grid, "A"), (x,t) -> 1.0) -# dA_v = Dirichlet(:v, getfaceset(grid, "A"), (x,t) -> 2.0) -# dB_u = Dirichlet(:u, getfaceset(grid, "B"), (x,t) -> 3.0) # Note, overwrites dA_u on node 3 -# dB_v = Dirichlet(:v, getfaceset(grid, "B"), (x,t) -> 4.0) # :v not on cells with "B"-faces -# dC_v = Dirichlet(:v, getfaceset(grid, "C"), (x,t) -> 5.0) # :v not on cells with "C"-faces +# dA_u = Dirichlet(:u, getfacetset(grid, "A"), (x,t) -> 1.0) +# dA_v = Dirichlet(:v, getfacetset(grid, "A"), (x,t) -> 2.0) +# dB_u = Dirichlet(:u, getfacetset(grid, "B"), (x,t) -> 3.0) # Note, overwrites dA_u on node 3 +# dB_v = Dirichlet(:v, getfacetset(grid, "B"), (x,t) -> 4.0) # :v not on cells with "B"-faces +# dC_v = Dirichlet(:v, getfacetset(grid, "C"), (x,t) -> 5.0) # :v not on cells with "C"-faces # dN_u = Dirichlet(:u, Set(10), (x,t) -> 6.0) # Add on node 10 # @test_logs min_level=Logging.Warn add!(ch, dA_u) # No warning should be issued @@ -334,7 +335,7 @@ end for acs in test_acs ch = ConstraintHandler(dh) - add!(ch, Dirichlet(:u, getfaceset(grid, "left"), (x,t)->0.0)) + add!(ch, Dirichlet(:u, getfacetset(grid, "left"), (x,t)->0.0)) for lc in acs add!(ch, lc) end @@ -343,9 +344,9 @@ end C, g = Ferrite.create_constraint_matrix(ch) # Assemble - K = create_sparsity_pattern(dh, ch) + K = allocate_matrix(dh, ch) f = zeros(ndofs(dh)); f[end] = 1.0 - Kl = create_sparsity_pattern(dh, ch) + Kl = allocate_matrix(dh, ch) fl = copy(f) assembler = start_assemble(Kl, fl) for cell in CellIterator(dh) @@ -384,15 +385,15 @@ end @test a ≈ aa ≈ al ≈ a_rhs1 ≈ a_rhs2 end - # Test nonlinear solution procedure (on linear problem) with affine constraints + # Test nonlinear solution procedure (on linear problem) with affine constraints # using standard assembly (i.e. not local condensation) @testset "nonlinear" begin params = (k=1.0, f=1.0, a=1.0, b=0.2, tol=1e-10, maxiter=2) - grid = generate_grid(Line, (2,)); addfaceset!(grid, "center", x->x[1]≈0.0) + grid = generate_grid(Line, (2,)); addfacetset!(grid, "center", x->x[1]≈0.0) dh = DofHandler(grid); add!(dh, :u, Lagrange{RefLine,1}()); close!(dh) function doassemble!(K, r, dh, a, params) - # Spring elements + # Spring elements k = params.k Ke = [k -k; -k k] # Quick and dirty assem @@ -406,21 +407,21 @@ end end ch = ConstraintHandler(dh) - add!(ch, Dirichlet(:u, getfaceset(grid, "center"), (x,t)->Vec{1}((0.0,)))) + add!(ch, Dirichlet(:u, getfacetset(grid, "center"), (x,t)->Vec{1}((0.0,)))) add!(ch, AffineConstraint(1, [3=>params.a], params.b)) close!(ch) - K = create_sparsity_pattern(dh, ch) + K = allocate_matrix(dh, ch) r = zeros(ndofs(dh)) a = zeros(ndofs(dh)) # Nonlinear solution - apply!(a, ch) + apply!(a, ch) for niter = 0:params.maxiter doassemble!(K, r, dh, a, params) apply_zero!(K, r, ch) norm(r) < params.tol && break - Δa = -K\r + Δa = -K\r apply_zero!(Δa, ch) a .+= Δa end @@ -451,10 +452,10 @@ function rotpio2(v, dir=3) return typeof(v)(i -> rv[i]) end -@testset "periodic bc: collect_periodic_faces" begin +@testset "periodic bc: collect_periodic_facets" begin # 1D (TODO: Broken) # grid = generate_grid(Line, (2,)) - # face_map = collect_periodic_faces(grid) + # face_map = collect_periodic_facets(grid) # 2D quad grid @@ -471,62 +472,62 @@ end # 1 1 for grid in (generate_grid(Quadrilateral, (2, 2)), generate_grid(QuadraticQuadrilateral, (2, 2))) correct_map = [ - PeriodicFacePair(FaceIndex(1, 1), FaceIndex(3, 3), 0x00, true), - PeriodicFacePair(FaceIndex(2, 1), FaceIndex(4, 3), 0x00, true), - PeriodicFacePair(FaceIndex(1, 4), FaceIndex(2, 2), 0x00, true), - PeriodicFacePair(FaceIndex(3, 4), FaceIndex(4, 2), 0x00, true), + PeriodicFacetPair(FacetIndex(1, 1), FacetIndex(3, 3), 0x00, true), + PeriodicFacetPair(FacetIndex(2, 1), FacetIndex(4, 3), 0x00, true), + PeriodicFacetPair(FacetIndex(1, 4), FacetIndex(2, 2), 0x00, true), + PeriodicFacetPair(FacetIndex(3, 4), FacetIndex(4, 2), 0x00, true), ] # Brute force path with no boundary info - face_map = collect_periodic_faces(grid) + face_map = collect_periodic_facets(grid) @test issetequal(face_map, correct_map) # Brute force path with boundary info - face_map = collect_periodic_faces(grid, + face_map = collect_periodic_facets(grid, union( - getfaceset(grid, "left"), - getfaceset(grid, "bottom"), + getfacetset(grid, "left"), + getfacetset(grid, "bottom"), ), union( - getfaceset(grid, "right"), - getfaceset(grid, "top"), + getfacetset(grid, "right"), + getfacetset(grid, "top"), ) ) @test issetequal(face_map, correct_map) # Brute force, keeping the mirror/image ordering - face_map = collect_periodic_faces(grid, + face_map = collect_periodic_facets(grid, union( - getfaceset(grid, "right"), - getfaceset(grid, "top"), + getfacetset(grid, "right"), + getfacetset(grid, "top"), ), union( - getfaceset(grid, "left"), - getfaceset(grid, "bottom"), + getfacetset(grid, "left"), + getfacetset(grid, "bottom"), ) ) - @test issetequal(face_map, map(x -> PeriodicFacePair(x.image, x.mirror, x.rotation, x.mirrored), correct_map)) + @test issetequal(face_map, map(x -> PeriodicFacetPair(x.image, x.mirror, x.rotation, x.mirrored), correct_map)) # Known pairs with transformation - face_map = collect_periodic_faces(grid, "left", "right", x -> x - Vec{2}((2.0, 0.0))) - collect_periodic_faces!(face_map, grid, "bottom", "top", x -> x - Vec{2}((0.0, 2.0))) + face_map = collect_periodic_facets(grid, "left", "right", x -> x - Vec{2}((2.0, 0.0))) + collect_periodic_facets!(face_map, grid, "bottom", "top", x -> x - Vec{2}((0.0, 2.0))) @test issetequal(face_map, correct_map) # More advanced transformation by rotation - face_map = collect_periodic_faces(grid, "left", "bottom", rotpio2) - collect_periodic_faces!(face_map, grid, "right", "top", rotpio2) + face_map = collect_periodic_facets(grid, "left", "bottom", rotpio2) + collect_periodic_facets!(face_map, grid, "right", "top", rotpio2) @test issetequal(face_map, [ - PeriodicFacePair(FaceIndex(3, 4), FaceIndex(1, 1), 0x00, false), - PeriodicFacePair(FaceIndex(1, 4), FaceIndex(2, 1), 0x00, false), - PeriodicFacePair(FaceIndex(2, 2), FaceIndex(4, 3), 0x00, false), - PeriodicFacePair(FaceIndex(4, 2), FaceIndex(3, 3), 0x00, false), + PeriodicFacetPair(FacetIndex(3, 4), FacetIndex(1, 1), 0x00, false), + PeriodicFacetPair(FacetIndex(1, 4), FacetIndex(2, 1), 0x00, false), + PeriodicFacetPair(FacetIndex(2, 2), FacetIndex(4, 3), 0x00, false), + PeriodicFacetPair(FacetIndex(4, 2), FacetIndex(3, 3), 0x00, false), ]) # Rotate and translate - face_map = collect_periodic_faces(grid, "bottom", "left", x -> rotpio2(x) - Vec{2}((0.0, 2.0))) + face_map = collect_periodic_facets(grid, "bottom", "left", x -> rotpio2(x) - Vec{2}((0.0, 2.0))) @test issetequal(face_map, [ - PeriodicFacePair(FaceIndex(1, 1), FaceIndex(1, 4), 0x00, true), - PeriodicFacePair(FaceIndex(2, 1), FaceIndex(3, 4), 0x00, true), + PeriodicFacetPair(FacetIndex(1, 1), FacetIndex(1, 4), 0x00, true), + PeriodicFacetPair(FacetIndex(2, 1), FacetIndex(3, 4), 0x00, true), ]) end @@ -548,62 +549,62 @@ end for grid in (generate_grid(Triangle, (2, 2)), generate_grid(QuadraticTriangle, (2, 2))) correct_map = [ - PeriodicFacePair(FaceIndex(1, 1), FaceIndex(6, 2), 0x00, true), - PeriodicFacePair(FaceIndex(3, 1), FaceIndex(8, 2), 0x00, true), - PeriodicFacePair(FaceIndex(1, 3), FaceIndex(4, 1), 0x00, true), - PeriodicFacePair(FaceIndex(5, 3), FaceIndex(8, 1), 0x00, true), + PeriodicFacetPair(FacetIndex(1, 1), FacetIndex(6, 2), 0x00, true), + PeriodicFacetPair(FacetIndex(3, 1), FacetIndex(8, 2), 0x00, true), + PeriodicFacetPair(FacetIndex(1, 3), FacetIndex(4, 1), 0x00, true), + PeriodicFacetPair(FacetIndex(5, 3), FacetIndex(8, 1), 0x00, true), ] # Brute force path with no boundary info - face_map = collect_periodic_faces(grid) + face_map = collect_periodic_facets(grid) @test issetequal(face_map, correct_map) # Brute force path with boundary info - face_map = collect_periodic_faces(grid, + face_map = collect_periodic_facets(grid, union( - getfaceset(grid, "left"), - getfaceset(grid, "bottom"), + getfacetset(grid, "left"), + getfacetset(grid, "bottom"), ), union( - getfaceset(grid, "right"), - getfaceset(grid, "top"), + getfacetset(grid, "right"), + getfacetset(grid, "top"), ) ) @test issetequal(face_map, correct_map) # Brute force, keeping the mirror/image ordering - face_map = collect_periodic_faces(grid, + face_map = collect_periodic_facets(grid, union( - getfaceset(grid, "right"), - getfaceset(grid, "top"), + getfacetset(grid, "right"), + getfacetset(grid, "top"), ), union( - getfaceset(grid, "left"), - getfaceset(grid, "bottom"), + getfacetset(grid, "left"), + getfacetset(grid, "bottom"), ) ) - @test issetequal(face_map, map(x -> PeriodicFacePair(x.image, x.mirror, x.rotation, x.mirrored), correct_map)) + @test issetequal(face_map, map(x -> PeriodicFacetPair(x.image, x.mirror, x.rotation, x.mirrored), correct_map)) # Known pairs with transformation - face_map = collect_periodic_faces(grid, "left", "right", x -> x - Vec{2}((2.0, 0.0))) - collect_periodic_faces!(face_map, grid, "bottom", "top", x -> x - Vec{2}((0.0, 2.0))) + face_map = collect_periodic_facets(grid, "left", "right", x -> x - Vec{2}((2.0, 0.0))) + collect_periodic_facets!(face_map, grid, "bottom", "top", x -> x - Vec{2}((0.0, 2.0))) @test issetequal(face_map, correct_map) # More advanced transformation by rotation - face_map = collect_periodic_faces(grid, "left", "bottom", rotpio2) - collect_periodic_faces!(face_map, grid, "right", "top", rotpio2) + face_map = collect_periodic_facets(grid, "left", "bottom", rotpio2) + collect_periodic_facets!(face_map, grid, "right", "top", rotpio2) @test issetequal(face_map, [ - PeriodicFacePair(FaceIndex(5, 3), FaceIndex(1, 1), 0x00, false), - PeriodicFacePair(FaceIndex(1, 3), FaceIndex(3, 1), 0x00, false), - PeriodicFacePair(FaceIndex(4, 1), FaceIndex(8, 2), 0x00, false), - PeriodicFacePair(FaceIndex(8, 1), FaceIndex(6, 2), 0x00, false), + PeriodicFacetPair(FacetIndex(5, 3), FacetIndex(1, 1), 0x00, false), + PeriodicFacetPair(FacetIndex(1, 3), FacetIndex(3, 1), 0x00, false), + PeriodicFacetPair(FacetIndex(4, 1), FacetIndex(8, 2), 0x00, false), + PeriodicFacetPair(FacetIndex(8, 1), FacetIndex(6, 2), 0x00, false), ]) # Rotate and translate - face_map = collect_periodic_faces(grid, "bottom", "left", x -> rotpio2(x) - Vec{2}((0.0, 2.0))) + face_map = collect_periodic_facets(grid, "bottom", "left", x -> rotpio2(x) - Vec{2}((0.0, 2.0))) @test issetequal(face_map, [ - PeriodicFacePair(FaceIndex(1, 1), FaceIndex(1, 3), 0x00, true), - PeriodicFacePair(FaceIndex(3, 1), FaceIndex(5, 3), 0x00, true), + PeriodicFacetPair(FacetIndex(1, 1), FacetIndex(1, 3), 0x00, true), + PeriodicFacetPair(FacetIndex(3, 1), FacetIndex(5, 3), 0x00, true), ]) end @@ -611,120 +612,120 @@ end # 3D hex grids grid = generate_grid(Hexahedron, (1, 1, 1)) - face_map = collect_periodic_faces(grid) + face_map = collect_periodic_facets(grid) @test issetequal(face_map, [ - PeriodicFacePair(FaceIndex(1, 1), FaceIndex(1, 6), 0x00, true), - PeriodicFacePair(FaceIndex(1, 2), FaceIndex(1, 4), 0x03, true), - PeriodicFacePair(FaceIndex(1, 5), FaceIndex(1, 3), 0x00, true), + PeriodicFacetPair(FacetIndex(1, 1), FacetIndex(1, 6), 0x00, true), + PeriodicFacetPair(FacetIndex(1, 2), FacetIndex(1, 4), 0x03, true), + PeriodicFacetPair(FacetIndex(1, 5), FacetIndex(1, 3), 0x00, true), ]) grid = generate_grid(Hexahedron, (2, 2, 2)) - face_map = collect_periodic_faces(grid, "left", "right", x -> x - Vec{3}((2.0, 0.0, 0.0))) - collect_periodic_faces!(face_map, grid, "bottom", "top") - collect_periodic_faces!(face_map, grid, "front", "back") + face_map = collect_periodic_facets(grid, "left", "right", x -> x - Vec{3}((2.0, 0.0, 0.0))) + collect_periodic_facets!(face_map, grid, "bottom", "top") + collect_periodic_facets!(face_map, grid, "front", "back") @test issetequal(face_map, [ - PeriodicFacePair(FaceIndex(1, 5), FaceIndex(2, 3), 0x00, true), - PeriodicFacePair(FaceIndex(3, 5), FaceIndex(4, 3), 0x00, true), - PeriodicFacePair(FaceIndex(5, 5), FaceIndex(6, 3), 0x00, true), - PeriodicFacePair(FaceIndex(7, 5), FaceIndex(8, 3), 0x00, true), - PeriodicFacePair(FaceIndex(1, 1), FaceIndex(5, 6), 0x00, true), - PeriodicFacePair(FaceIndex(2, 1), FaceIndex(6, 6), 0x00, true), - PeriodicFacePair(FaceIndex(3, 1), FaceIndex(7, 6), 0x00, true), - PeriodicFacePair(FaceIndex(4, 1), FaceIndex(8, 6), 0x00, true), - PeriodicFacePair(FaceIndex(1, 2), FaceIndex(3, 4), 0x03, true), - PeriodicFacePair(FaceIndex(2, 2), FaceIndex(4, 4), 0x03, true), - PeriodicFacePair(FaceIndex(5, 2), FaceIndex(7, 4), 0x03, true), - PeriodicFacePair(FaceIndex(6, 2), FaceIndex(8, 4), 0x03, true), + PeriodicFacetPair(FacetIndex(1, 5), FacetIndex(2, 3), 0x00, true), + PeriodicFacetPair(FacetIndex(3, 5), FacetIndex(4, 3), 0x00, true), + PeriodicFacetPair(FacetIndex(5, 5), FacetIndex(6, 3), 0x00, true), + PeriodicFacetPair(FacetIndex(7, 5), FacetIndex(8, 3), 0x00, true), + PeriodicFacetPair(FacetIndex(1, 1), FacetIndex(5, 6), 0x00, true), + PeriodicFacetPair(FacetIndex(2, 1), FacetIndex(6, 6), 0x00, true), + PeriodicFacetPair(FacetIndex(3, 1), FacetIndex(7, 6), 0x00, true), + PeriodicFacetPair(FacetIndex(4, 1), FacetIndex(8, 6), 0x00, true), + PeriodicFacetPair(FacetIndex(1, 2), FacetIndex(3, 4), 0x03, true), + PeriodicFacetPair(FacetIndex(2, 2), FacetIndex(4, 4), 0x03, true), + PeriodicFacetPair(FacetIndex(5, 2), FacetIndex(7, 4), 0x03, true), + PeriodicFacetPair(FacetIndex(6, 2), FacetIndex(8, 4), 0x03, true), ]) # Rotation grid = generate_grid(Hexahedron, (2, 2, 2)) - face_map = collect_periodic_faces(grid, "left", "front", rotpio2) + face_map = collect_periodic_facets(grid, "left", "front", rotpio2) @test issetequal(face_map, [ - PeriodicFacePair(FaceIndex(1, 5), FaceIndex(2, 2), 0x03, false), - PeriodicFacePair(FaceIndex(3, 5), FaceIndex(1, 2), 0x03, false), - PeriodicFacePair(FaceIndex(5, 5), FaceIndex(6, 2), 0x03, false), - PeriodicFacePair(FaceIndex(7, 5), FaceIndex(5, 2), 0x03, false), + PeriodicFacetPair(FacetIndex(1, 5), FacetIndex(2, 2), 0x03, false), + PeriodicFacetPair(FacetIndex(3, 5), FacetIndex(1, 2), 0x03, false), + PeriodicFacetPair(FacetIndex(5, 5), FacetIndex(6, 2), 0x03, false), + PeriodicFacetPair(FacetIndex(7, 5), FacetIndex(5, 2), 0x03, false), ]) # Rotation and translation grid = generate_grid(Hexahedron, (2, 2, 2)) - face_map = collect_periodic_faces(grid, "front", "left", x -> rotpio2(x) - Vec{3}((0.0, 2.0, 0.0))) + face_map = collect_periodic_facets(grid, "front", "left", x -> rotpio2(x) - Vec{3}((0.0, 2.0, 0.0))) @test issetequal(face_map, [ - PeriodicFacePair(FaceIndex(1, 2), FaceIndex(1, 5), 0x00, true), - PeriodicFacePair(FaceIndex(2, 2), FaceIndex(3, 5), 0x00, true), - PeriodicFacePair(FaceIndex(5, 2), FaceIndex(5, 5), 0x00, true), - PeriodicFacePair(FaceIndex(6, 2), FaceIndex(7, 5), 0x00, true), + PeriodicFacetPair(FacetIndex(1, 2), FacetIndex(1, 5), 0x00, true), + PeriodicFacetPair(FacetIndex(2, 2), FacetIndex(3, 5), 0x00, true), + PeriodicFacetPair(FacetIndex(5, 2), FacetIndex(5, 5), 0x00, true), + PeriodicFacetPair(FacetIndex(6, 2), FacetIndex(7, 5), 0x00, true), ]) # Test with keyword tol grid = generate_grid(Hexahedron, (2, 2, 2)) - face_map = collect_periodic_faces(grid, "bottom", "top") - face_map_TOL = collect_periodic_faces(grid, "bottom", "top"; tol=1e-10) + face_map = collect_periodic_facets(grid, "bottom", "top") + face_map_TOL = collect_periodic_facets(grid, "bottom", "top"; tol=1e-10) @test face_map == face_map_TOL - collect_periodic_faces!(face_map, grid, "right", "left") - collect_periodic_faces!(face_map_TOL, grid, "right", "left"; tol=1e-10) + collect_periodic_facets!(face_map, grid, "right", "left") + collect_periodic_facets!(face_map_TOL, grid, "right", "left"; tol=1e-10) @test face_map == face_map_TOL #################################################################### # 3D tetra grid grid = generate_grid(Tetrahedron, (1, 1, 1)) - face_map = collect_periodic_faces(grid) + face_map = collect_periodic_facets(grid) @test issetequal(face_map, [ - PeriodicFacePair(FaceIndex(1, 4), FaceIndex(4, 1), 0x00, true) - PeriodicFacePair(FaceIndex(2, 2), FaceIndex(6, 1), 0x00, true) - PeriodicFacePair(FaceIndex(2, 1), FaceIndex(3, 3), 0x02, true) - PeriodicFacePair(FaceIndex(5, 1), FaceIndex(4, 3), 0x02, true) - PeriodicFacePair(FaceIndex(1, 1), FaceIndex(5, 3), 0x00, true) - PeriodicFacePair(FaceIndex(3, 1), FaceIndex(6, 3), 0x00, true) + PeriodicFacetPair(FacetIndex(1, 4), FacetIndex(4, 1), 0x00, true) + PeriodicFacetPair(FacetIndex(2, 2), FacetIndex(6, 1), 0x00, true) + PeriodicFacetPair(FacetIndex(2, 1), FacetIndex(3, 3), 0x02, true) + PeriodicFacetPair(FacetIndex(5, 1), FacetIndex(4, 3), 0x02, true) + PeriodicFacetPair(FacetIndex(1, 1), FacetIndex(5, 3), 0x00, true) + PeriodicFacetPair(FacetIndex(3, 1), FacetIndex(6, 3), 0x00, true) ]) grid = generate_grid(Tetrahedron, (2, 2, 2)) - face_map = collect_periodic_faces(grid, "left", "right", x -> x - Vec{3}((2.0, 0.0, 0.0))) - collect_periodic_faces!(face_map, grid, "bottom", "top") - collect_periodic_faces!(face_map, grid, "front", "back") + face_map = collect_periodic_facets(grid, "left", "right", x -> x - Vec{3}((2.0, 0.0, 0.0))) + collect_periodic_facets!(face_map, grid, "bottom", "top") + collect_periodic_facets!(face_map, grid, "front", "back") @test issetequal(face_map, [ - PeriodicFacePair(FaceIndex(1, 4), FaceIndex(10, 1), 0x00, true) - PeriodicFacePair(FaceIndex(2, 2), FaceIndex(12, 1), 0x00, true) - PeriodicFacePair(FaceIndex(13, 4), FaceIndex(22, 1), 0x00, true) - PeriodicFacePair(FaceIndex(14, 2), FaceIndex(24, 1), 0x00, true) - PeriodicFacePair(FaceIndex(25, 4), FaceIndex(34, 1), 0x00, true) - PeriodicFacePair(FaceIndex(26, 2), FaceIndex(36, 1), 0x00, true) - PeriodicFacePair(FaceIndex(37, 4), FaceIndex(46, 1), 0x00, true) - PeriodicFacePair(FaceIndex(38, 2), FaceIndex(48, 1), 0x00, true) - PeriodicFacePair(FaceIndex(2, 1), FaceIndex(15, 3), 0x02, true) - PeriodicFacePair(FaceIndex(5, 1), FaceIndex(16, 3), 0x02, true) - PeriodicFacePair(FaceIndex(8, 1), FaceIndex(21, 3), 0x02, true) - PeriodicFacePair(FaceIndex(11, 1), FaceIndex(22, 3), 0x02, true) - PeriodicFacePair(FaceIndex(26, 1), FaceIndex(39, 3), 0x02, true) - PeriodicFacePair(FaceIndex(29, 1), FaceIndex(40, 3), 0x02, true) - PeriodicFacePair(FaceIndex(32, 1), FaceIndex(45, 3), 0x02, true) - PeriodicFacePair(FaceIndex(35, 1), FaceIndex(46, 3), 0x02, true) - PeriodicFacePair(FaceIndex(1, 1), FaceIndex(29, 3), 0x00, true) - PeriodicFacePair(FaceIndex(3, 1), FaceIndex(30, 3), 0x00, true) - PeriodicFacePair(FaceIndex(7, 1), FaceIndex(35, 3), 0x00, true) - PeriodicFacePair(FaceIndex(9, 1), FaceIndex(36, 3), 0x00, true) - PeriodicFacePair(FaceIndex(13, 1), FaceIndex(41, 3), 0x00, true) - PeriodicFacePair(FaceIndex(15, 1), FaceIndex(42, 3), 0x00, true) - PeriodicFacePair(FaceIndex(19, 1), FaceIndex(47, 3), 0x00, true) - PeriodicFacePair(FaceIndex(21, 1), FaceIndex(48, 3), 0x00, true) + PeriodicFacetPair(FacetIndex(1, 4), FacetIndex(10, 1), 0x00, true) + PeriodicFacetPair(FacetIndex(2, 2), FacetIndex(12, 1), 0x00, true) + PeriodicFacetPair(FacetIndex(13, 4), FacetIndex(22, 1), 0x00, true) + PeriodicFacetPair(FacetIndex(14, 2), FacetIndex(24, 1), 0x00, true) + PeriodicFacetPair(FacetIndex(25, 4), FacetIndex(34, 1), 0x00, true) + PeriodicFacetPair(FacetIndex(26, 2), FacetIndex(36, 1), 0x00, true) + PeriodicFacetPair(FacetIndex(37, 4), FacetIndex(46, 1), 0x00, true) + PeriodicFacetPair(FacetIndex(38, 2), FacetIndex(48, 1), 0x00, true) + PeriodicFacetPair(FacetIndex(2, 1), FacetIndex(15, 3), 0x02, true) + PeriodicFacetPair(FacetIndex(5, 1), FacetIndex(16, 3), 0x02, true) + PeriodicFacetPair(FacetIndex(8, 1), FacetIndex(21, 3), 0x02, true) + PeriodicFacetPair(FacetIndex(11, 1), FacetIndex(22, 3), 0x02, true) + PeriodicFacetPair(FacetIndex(26, 1), FacetIndex(39, 3), 0x02, true) + PeriodicFacetPair(FacetIndex(29, 1), FacetIndex(40, 3), 0x02, true) + PeriodicFacetPair(FacetIndex(32, 1), FacetIndex(45, 3), 0x02, true) + PeriodicFacetPair(FacetIndex(35, 1), FacetIndex(46, 3), 0x02, true) + PeriodicFacetPair(FacetIndex(1, 1), FacetIndex(29, 3), 0x00, true) + PeriodicFacetPair(FacetIndex(3, 1), FacetIndex(30, 3), 0x00, true) + PeriodicFacetPair(FacetIndex(7, 1), FacetIndex(35, 3), 0x00, true) + PeriodicFacetPair(FacetIndex(9, 1), FacetIndex(36, 3), 0x00, true) + PeriodicFacetPair(FacetIndex(13, 1), FacetIndex(41, 3), 0x00, true) + PeriodicFacetPair(FacetIndex(15, 1), FacetIndex(42, 3), 0x00, true) + PeriodicFacetPair(FacetIndex(19, 1), FacetIndex(47, 3), 0x00, true) + PeriodicFacetPair(FacetIndex(21, 1), FacetIndex(48, 3), 0x00, true) ]) # Rotation grid = generate_grid(Tetrahedron, (1, 1, 1)) - face_map = collect_periodic_faces(grid, "left", "front", rotpio2) + face_map = collect_periodic_facets(grid, "left", "front", rotpio2) @test issetequal(face_map, [ - PeriodicFacePair(FaceIndex(1, 4), FaceIndex(2, 1), 0x02, false) - PeriodicFacePair(FaceIndex(2, 2), FaceIndex(5, 1), 0x00, false) + PeriodicFacetPair(FacetIndex(1, 4), FacetIndex(2, 1), 0x02, false) + PeriodicFacetPair(FacetIndex(2, 2), FacetIndex(5, 1), 0x00, false) ]) # Rotation and translation grid = generate_grid(Tetrahedron, (1, 1, 1)) - face_map = collect_periodic_faces(grid, "front", "left", x -> rotpio2(rotate(x, Vec{3}((1., 0., 0.)), 3pi/2)) - Vec{3}((0.0, 2.0, 0.0))) + face_map = collect_periodic_facets(grid, "front", "left", x -> rotpio2(rotate(x, Vec{3}((1., 0., 0.)), 3pi/2)) - Vec{3}((0.0, 2.0, 0.0))) @test issetequal(face_map, [ - PeriodicFacePair(FaceIndex(2, 1), FaceIndex(1, 4), 0x01, true) - PeriodicFacePair(FaceIndex(5, 1), FaceIndex(2, 2), 0x01, true) + PeriodicFacetPair(FacetIndex(2, 1), FacetIndex(1, 4), 0x01, true) + PeriodicFacetPair(FacetIndex(5, 1), FacetIndex(2, 2), 0x01, true) ]) end # testset @@ -780,8 +781,8 @@ end # testset add!(dh, :s, Lagrange{RefQuadrilateral,1}()) close!(dh) ch = ConstraintHandler(dh) - face_map = collect_periodic_faces(grid, "left", "right") - collect_periodic_faces!(face_map, grid, "bottom", "top") + face_map = collect_periodic_facets(grid, "left", "right") + collect_periodic_facets!(face_map, grid, "bottom", "top") pbc = PeriodicDirichlet(:s, face_map) add!(ch, pbc) @test get_dof_map(ch) == Dict{Int,Int}( @@ -794,7 +795,7 @@ end # testset # Rotation ch = ConstraintHandler(dh) - face_map = collect_periodic_faces(grid, "left", "bottom", rotpio2) + face_map = collect_periodic_facets(grid, "left", "bottom", rotpio2) pbc = PeriodicDirichlet(:s, face_map) add!(ch, pbc) @test get_dof_map(ch) == Dict{Int,Int}( @@ -805,7 +806,7 @@ end # testset # Rotation and translation ch = ConstraintHandler(dh) - face_map = collect_periodic_faces(grid, "bottom", "left", x -> rotpio2(x) - Vec{2}((0.0, 2.0))) + face_map = collect_periodic_facets(grid, "bottom", "left", x -> rotpio2(x) - Vec{2}((0.0, 2.0))) pbc = PeriodicDirichlet(:s, face_map) add!(ch, pbc) @test get_dof_map(ch) == Dict{Int,Int}( @@ -819,8 +820,8 @@ end # testset add!(dh, :v, Lagrange{RefQuadrilateral,1}()^2) close!(dh) ch = ConstraintHandler(dh) - face_map = collect_periodic_faces(grid, "left", "right") - collect_periodic_faces!(face_map, grid, "bottom", "top") + face_map = collect_periodic_facets(grid, "left", "right") + collect_periodic_facets!(face_map, grid, "bottom", "top") pbc = PeriodicDirichlet(:v, face_map, [1, 2]) add!(ch, pbc) @test get_dof_map(ch) == Dict{Int,Int}( @@ -843,7 +844,7 @@ end # testset ) # Rotation without dof rotation - face_map = collect_periodic_faces(grid, "left", "bottom", rotpio2) + face_map = collect_periodic_facets(grid, "left", "bottom", rotpio2) ch = ConstraintHandler(dh) pbc = PeriodicDirichlet(:v, face_map, [1, 2]) add!(ch, pbc) @@ -857,7 +858,7 @@ end # testset ) # Rotation with dof rotation - face_map = collect_periodic_faces(grid, "left", "bottom", rotpio2) + face_map = collect_periodic_facets(grid, "left", "bottom", rotpio2) ch = ConstraintHandler(dh) pbc = PeriodicDirichlet(:v, face_map, rotation_tensor(-π/2), [1, 2]) add!(ch, pbc) @@ -878,7 +879,7 @@ end # testset end # Rotation and translation - face_map = collect_periodic_faces(grid, "bottom", "left", x -> rotpio2(x) - Vec{2}((0.0, 2.0))) + face_map = collect_periodic_facets(grid, "bottom", "left", x -> rotpio2(x) - Vec{2}((0.0, 2.0))) ch = ConstraintHandler(dh) pbc = PeriodicDirichlet(:v, face_map, [1, 2]) add!(ch, pbc) @@ -905,8 +906,8 @@ end # testset add!(dh, :s, Lagrange{RefQuadrilateral,2}()) close!(dh) ch = ConstraintHandler(dh) - face_map = collect_periodic_faces(grid, "left", "right") - collect_periodic_faces!(face_map, grid, "bottom", "top") + face_map = collect_periodic_facets(grid, "left", "right") + collect_periodic_facets!(face_map, grid, "bottom", "top") pbc = PeriodicDirichlet(:s, face_map) add!(ch, pbc) @test get_dof_map(ch) == Dict{Int,Int}( @@ -923,7 +924,7 @@ end # testset # Rotation ch = ConstraintHandler(dh) - face_map = collect_periodic_faces(grid, "left", "bottom", rotpio2) + face_map = collect_periodic_facets(grid, "left", "bottom", rotpio2) pbc = PeriodicDirichlet(:s, face_map) add!(ch, pbc) @test get_dof_map(ch) == Dict{Int,Int}( @@ -936,7 +937,7 @@ end # testset # Rotation and translation ch = ConstraintHandler(dh) - face_map = collect_periodic_faces(grid, "bottom", "left", x -> rotpio2(x) - Vec{2}((0.0, 2.0))) + face_map = collect_periodic_facets(grid, "bottom", "left", x -> rotpio2(x) - Vec{2}((0.0, 2.0))) pbc = PeriodicDirichlet(:s, face_map) add!(ch, pbc) @test get_dof_map(ch) == Dict{Int,Int}( @@ -965,7 +966,7 @@ end # testset add!(dh, :v, Lagrange{RefQuadrilateral,2}()^2) close!(dh) ch = ConstraintHandler(dh) - face_map = collect_periodic_faces(grid, "left", "bottom", rotpio2) + face_map = collect_periodic_facets(grid, "left", "bottom", rotpio2) pbc = PeriodicDirichlet(:v, face_map, rotation_tensor(-π/2), [1, 2]) add!(ch, pbc) close!(ch) @@ -991,7 +992,7 @@ end # testset # 3D hex scalar/vector grid = generate_grid(Hexahedron, (1, 1, 1)) - face_map = collect_periodic_faces(grid) + face_map = collect_periodic_facets(grid) dh = DofHandler(grid) add!(dh, :s, Lagrange{RefHexahedron,1}()) add!(dh, :v, Lagrange{RefHexahedron,1}()^2) @@ -1019,7 +1020,7 @@ end # testset # Rotation ch = ConstraintHandler(dh) - face_map = collect_periodic_faces(grid, "left", "front", rotpio2) + face_map = collect_periodic_facets(grid, "left", "front", rotpio2) pbc = PeriodicDirichlet(:s, face_map) add!(ch, pbc) @test get_dof_map(ch) == Dict{Int,Int}( @@ -1030,7 +1031,7 @@ end # testset ) ch = ConstraintHandler(dh) - face_map = collect_periodic_faces(grid, "front", "left", x -> rotpio2(x) - Vec{3}((0.0, 2.0, 0.0))) + face_map = collect_periodic_facets(grid, "front", "left", x -> rotpio2(x) - Vec{3}((0.0, 2.0, 0.0))) pbc = PeriodicDirichlet(:s, face_map) add!(ch, pbc) @test get_dof_map(ch) == Dict{Int,Int}( @@ -1049,23 +1050,23 @@ end # testset compare_by_dbc( dh, - PeriodicDirichlet(:s, collect_periodic_faces(grid, "left", "right")), - Dirichlet(:s, getfaceset(grid, "left"), (x, t) -> 0.), - Dirichlet(:s, getfaceset(grid, "right"), (x, t) -> 0.), + PeriodicDirichlet(:s, collect_periodic_facets(grid, "left", "right")), + Dirichlet(:s, getfacetset(grid, "left"), (x, t) -> 0.), + Dirichlet(:s, getfacetset(grid, "right"), (x, t) -> 0.), ) compare_by_dbc( dh, - PeriodicDirichlet(:v, collect_periodic_faces(grid, "left", "right"), [1, 2]), - Dirichlet(:v, getfaceset(grid, "left"), (x, t) -> [0., 0.], [1, 2]), - Dirichlet(:v, getfaceset(grid, "right"), (x, t) -> [0., 0.], [1, 2]), + PeriodicDirichlet(:v, collect_periodic_facets(grid, "left", "right"), [1, 2]), + Dirichlet(:v, getfacetset(grid, "left"), (x, t) -> [0., 0.], [1, 2]), + Dirichlet(:v, getfacetset(grid, "right"), (x, t) -> [0., 0.], [1, 2]), ) compare_by_dbc( dh, - PeriodicDirichlet(:v, collect_periodic_faces(grid, "left", "right"), [2]), - Dirichlet(:v, getfaceset(grid, "left"), (x, t) -> 0., [2]), - Dirichlet(:v, getfaceset(grid, "right"), (x, t) -> 0., [2]), + PeriodicDirichlet(:v, collect_periodic_facets(grid, "left", "right"), [2]), + Dirichlet(:v, getfacetset(grid, "left"), (x, t) -> 0., [2]), + Dirichlet(:v, getfacetset(grid, "right"), (x, t) -> 0., [2]), ) # 3D tetra scalar @@ -1073,7 +1074,7 @@ end # testset dh = DofHandler(grid) add!(dh, :s, Lagrange{RefTetrahedron,1}()) close!(dh) - face_map = collect_periodic_faces(grid) + face_map = collect_periodic_facets(grid) ch = ConstraintHandler(dh) pbc = PeriodicDirichlet(:s, face_map) add!(ch, pbc) @@ -1087,7 +1088,7 @@ end # testset dh = DofHandler(grid) add!(dh, :v, Lagrange{RefTetrahedron,1}()^2) close!(dh) - face_map = collect_periodic_faces(grid, "left", "right") + face_map = collect_periodic_facets(grid, "left", "right") ch = ConstraintHandler(dh) pbc = PeriodicDirichlet(:v, face_map, [1, 2]) add!(ch, pbc) @@ -1109,7 +1110,7 @@ end # testset add!(dh, :v, Lagrange{RefHexahedron,1}()^3) close!(dh) rot = rotation_tensor(Vec{3}((0., 1., 0.)), π/2) - face_map = collect_periodic_faces(grid, "left", "bottom", x -> rot ⋅ x) + face_map = collect_periodic_facets(grid, "left", "bottom", x -> rot ⋅ x) ch = ConstraintHandler(dh) pbc = PeriodicDirichlet(:v, face_map, rot, [1, 2, 3]) add!(ch, pbc) @@ -1155,80 +1156,80 @@ end # testset # Scalar compare_by_dbc( dh, - PeriodicDirichlet(:s, collect_periodic_faces(grid, "left", "right")), - Dirichlet(:s, getfaceset(grid, "left"), (x,t) -> 0), - Dirichlet(:s, getfaceset(grid, "right"), (x,t) -> 0), + PeriodicDirichlet(:s, collect_periodic_facets(grid, "left", "right")), + Dirichlet(:s, getfacetset(grid, "left"), (x,t) -> 0), + Dirichlet(:s, getfacetset(grid, "right"), (x,t) -> 0), ) compare_by_dbc( dh, - PeriodicDirichlet(:s, collect_periodic_faces(grid, "right", "left")), - Dirichlet(:s, getfaceset(grid, "right"), (x,t) -> 0), - Dirichlet(:s, getfaceset(grid, "left"), (x,t) -> 0), + PeriodicDirichlet(:s, collect_periodic_facets(grid, "right", "left")), + Dirichlet(:s, getfacetset(grid, "right"), (x,t) -> 0), + Dirichlet(:s, getfacetset(grid, "left"), (x,t) -> 0), ) compare_by_dbc( dh, - PeriodicDirichlet(:s, collect_periodic_faces(grid, "bottom", "top")), - Dirichlet(:s, getfaceset(grid, "bottom"), (x,t) -> 0), - Dirichlet(:s, getfaceset(grid, "top"), (x,t) -> 0), + PeriodicDirichlet(:s, collect_periodic_facets(grid, "bottom", "top")), + Dirichlet(:s, getfacetset(grid, "bottom"), (x,t) -> 0), + Dirichlet(:s, getfacetset(grid, "top"), (x,t) -> 0), ) compare_by_dbc( dh, - PeriodicDirichlet(:s, collect_periodic_faces(grid, "top", "bottom")), - Dirichlet(:s, getfaceset(grid, "top"), (x,t) -> 0), - Dirichlet(:s, getfaceset(grid, "bottom"), (x,t) -> 0), + PeriodicDirichlet(:s, collect_periodic_facets(grid, "top", "bottom")), + Dirichlet(:s, getfacetset(grid, "top"), (x,t) -> 0), + Dirichlet(:s, getfacetset(grid, "bottom"), (x,t) -> 0), ) if D == 3 compare_by_dbc( dh, - PeriodicDirichlet(:s, collect_periodic_faces(grid, "front", "back")), - Dirichlet(:s, getfaceset(grid, "front"), (x,t) -> 0), - Dirichlet(:s, getfaceset(grid, "back"), (x,t) -> 0), + PeriodicDirichlet(:s, collect_periodic_facets(grid, "front", "back")), + Dirichlet(:s, getfacetset(grid, "front"), (x,t) -> 0), + Dirichlet(:s, getfacetset(grid, "back"), (x,t) -> 0), ) compare_by_dbc( dh, - PeriodicDirichlet(:s, collect_periodic_faces(grid, "back", "front")), - Dirichlet(:s, getfaceset(grid, "back"), (x,t) -> 0), - Dirichlet(:s, getfaceset(grid, "front"), (x,t) -> 0), + PeriodicDirichlet(:s, collect_periodic_facets(grid, "back", "front")), + Dirichlet(:s, getfacetset(grid, "back"), (x,t) -> 0), + Dirichlet(:s, getfacetset(grid, "front"), (x,t) -> 0), ) end # Vector compare_by_dbc( dh, - PeriodicDirichlet(:v, collect_periodic_faces(grid, "left", "right"), collect(1:D)), - Dirichlet(:v, getfaceset(grid, "left"), (x,t) -> fill(0., D), collect(1:D)), - Dirichlet(:v, getfaceset(grid, "right"), (x,t) -> fill(0., D), collect(1:D)), + PeriodicDirichlet(:v, collect_periodic_facets(grid, "left", "right"), collect(1:D)), + Dirichlet(:v, getfacetset(grid, "left"), (x,t) -> fill(0., D), collect(1:D)), + Dirichlet(:v, getfacetset(grid, "right"), (x,t) -> fill(0., D), collect(1:D)), ) compare_by_dbc( dh, - PeriodicDirichlet(:v, collect_periodic_faces(grid, "right", "left"), [D-1]), - Dirichlet(:v, getfaceset(grid, "right"), (x,t) -> 0, [D-1]), - Dirichlet(:v, getfaceset(grid, "left"), (x,t) -> 0, [D-1]), + PeriodicDirichlet(:v, collect_periodic_facets(grid, "right", "left"), [D-1]), + Dirichlet(:v, getfacetset(grid, "right"), (x,t) -> 0, [D-1]), + Dirichlet(:v, getfacetset(grid, "left"), (x,t) -> 0, [D-1]), ) compare_by_dbc( dh, - PeriodicDirichlet(:v, collect_periodic_faces(grid, "bottom", "top"), [1, 2]), - Dirichlet(:v, getfaceset(grid, "bottom"), (x,t) -> [0., 0.], [1, 2]), - Dirichlet(:v, getfaceset(grid, "top"), (x,t) -> [0., 0.], [1, 2]), + PeriodicDirichlet(:v, collect_periodic_facets(grid, "bottom", "top"), [1, 2]), + Dirichlet(:v, getfacetset(grid, "bottom"), (x,t) -> [0., 0.], [1, 2]), + Dirichlet(:v, getfacetset(grid, "top"), (x,t) -> [0., 0.], [1, 2]), ) compare_by_dbc( dh, - PeriodicDirichlet(:v, collect_periodic_faces(grid, "top", "bottom"), [D]), - Dirichlet(:v, getfaceset(grid, "top"), (x,t) -> 0, [D]), - Dirichlet(:v, getfaceset(grid, "bottom"), (x,t) -> 0, [D]), + PeriodicDirichlet(:v, collect_periodic_facets(grid, "top", "bottom"), [D]), + Dirichlet(:v, getfacetset(grid, "top"), (x,t) -> 0, [D]), + Dirichlet(:v, getfacetset(grid, "bottom"), (x,t) -> 0, [D]), ) if D == 3 compare_by_dbc( dh, - PeriodicDirichlet(:v, collect_periodic_faces(grid, "front", "back"), 1:D), - Dirichlet(:v, getfaceset(grid, "front"), (x,t) -> fill(0., D), 1:D), - Dirichlet(:v, getfaceset(grid, "back"), (x,t) -> fill(0., D), 1:D), + PeriodicDirichlet(:v, collect_periodic_facets(grid, "front", "back"), 1:D), + Dirichlet(:v, getfacetset(grid, "front"), (x,t) -> fill(0., D), 1:D), + Dirichlet(:v, getfacetset(grid, "back"), (x,t) -> fill(0., D), 1:D), ) compare_by_dbc( dh, - PeriodicDirichlet(:v, collect_periodic_faces(grid, "back", "front"), D), - Dirichlet(:v, getfaceset(grid, "back"), (x,t) -> 0, D), - Dirichlet(:v, getfaceset(grid, "front"), (x,t) -> 0, D), + PeriodicDirichlet(:v, collect_periodic_facets(grid, "back", "front"), D), + Dirichlet(:v, getfacetset(grid, "back"), (x,t) -> 0, D), + Dirichlet(:v, getfacetset(grid, "front"), (x,t) -> 0, D), ) end end @@ -1268,9 +1269,9 @@ end # testset ## u9 = 1 ## where the inhomogeneity of u1 and u5 have to be resolved at runtime. ch1 = ConstraintHandler(dh) - add!(ch1, Dirichlet(:u, getfaceset(grid, "left"), (x, t) -> 0)) - add!(ch1, Dirichlet(:u, getfaceset(grid, "right"), (x, t) -> 1)) - add!(ch1, PeriodicDirichlet(:u, collect_periodic_faces(grid, "bottom", "top"))) + add!(ch1, Dirichlet(:u, getfacetset(grid, "left"), (x, t) -> 0)) + add!(ch1, Dirichlet(:u, getfacetset(grid, "right"), (x, t) -> 1)) + add!(ch1, PeriodicDirichlet(:u, collect_periodic_facets(grid, "bottom", "top"))) close!(ch1) update!(ch1, 0) @@ -1287,16 +1288,16 @@ end # testset ## u8 = 0 ## u9 = 1 ch2 = ConstraintHandler(dh) - add!(ch2, PeriodicDirichlet(:u, collect_periodic_faces(grid, "bottom", "top"))) - add!(ch2, Dirichlet(:u, getfaceset(grid, "left"), (x, t) -> 0)) - add!(ch2, Dirichlet(:u, getfaceset(grid, "right"), (x, t) -> 1)) + add!(ch2, PeriodicDirichlet(:u, collect_periodic_facets(grid, "bottom", "top"))) + add!(ch2, Dirichlet(:u, getfacetset(grid, "left"), (x, t) -> 0)) + add!(ch2, Dirichlet(:u, getfacetset(grid, "right"), (x, t) -> 1)) close!(ch2) update!(ch2, 0) - K1 = create_sparsity_pattern(dh, ch1) + K1 = allocate_matrix(dh, ch1) f1 = zeros(ndofs(dh)) a1 = start_assemble(K1, f1) - K2 = create_sparsity_pattern(dh, ch2) + K2 = allocate_matrix(dh, ch2) f2 = zeros(ndofs(dh)) a2 = start_assemble(K2, f2) @@ -1320,22 +1321,22 @@ end # subtestset @testset "time dependence" begin ## Pure Dirichlet ch1 = ConstraintHandler(dh) - add!(ch1, Dirichlet(:u, getfaceset(grid, "top"), (x, t) -> 3.0t + 2.0)) - add!(ch1, Dirichlet(:u, getfaceset(grid, "bottom"), (x, t) -> 1.5t + 1.0)) - add!(ch1, Dirichlet(:u, getfaceset(grid, "left"), (x, t) -> 1.0t)) - add!(ch1, Dirichlet(:u, getfaceset(grid, "right"), (x, t) -> 2.0t)) + add!(ch1, Dirichlet(:u, getfacetset(grid, "top"), (x, t) -> 3.0t + 2.0)) + add!(ch1, Dirichlet(:u, getfacetset(grid, "bottom"), (x, t) -> 1.5t + 1.0)) + add!(ch1, Dirichlet(:u, getfacetset(grid, "left"), (x, t) -> 1.0t)) + add!(ch1, Dirichlet(:u, getfacetset(grid, "right"), (x, t) -> 2.0t)) close!(ch1) ## Dirichlet with corresponding AffineConstraint on dof 2 and 7 ch2 = ConstraintHandler(dh) add!(ch2, AffineConstraint(7, [8 => 1.0, 9 => 1.0], 2.0)) add!(ch2, AffineConstraint(2, [1 => 0.5, 5 => 0.5], 1.0)) - add!(ch2, Dirichlet(:u, getfaceset(grid, "left"), (x, t) -> 1.0t)) - add!(ch2, Dirichlet(:u, getfaceset(grid, "right"), (x, t) -> 2.0t)) + add!(ch2, Dirichlet(:u, getfacetset(grid, "left"), (x, t) -> 1.0t)) + add!(ch2, Dirichlet(:u, getfacetset(grid, "right"), (x, t) -> 2.0t)) close!(ch2) - K1 = create_sparsity_pattern(dh, ch1) + K1 = allocate_matrix(dh, ch1) f1 = zeros(ndofs(dh)) - K2 = create_sparsity_pattern(dh, ch2) + K2 = allocate_matrix(dh, ch2) f2 = zeros(ndofs(dh)) for t in (1.0, 2.0) @@ -1375,8 +1376,8 @@ end # testset close!(dh) # Dirichlet BC ch_dbc = ConstraintHandler(dh) - add!(ch_dbc, Dirichlet(:u, getfaceset(grid, "left"), (x, t) -> 0)) - add!(ch_dbc, Dirichlet(:u, getfaceset(grid, "right"), (x, t) -> 1)) + add!(ch_dbc, Dirichlet(:u, getfacetset(grid, "left"), (x, t) -> 0)) + add!(ch_dbc, Dirichlet(:u, getfacetset(grid, "right"), (x, t) -> 1)) close!(ch_dbc) update!(ch_dbc, 0) # Dirichlet BC as affine constraints @@ -1389,10 +1390,10 @@ end # testset # Periodic constraints (non-local couplings) # ch_p = ConstraintHandler(dh) # TODO: Order matters, but probably shouldn't, see Ferrite-FEM/Ferrite.jl#530 - face_map = collect_periodic_faces(grid, getfaceset(grid, "bottom"), getfaceset(grid, "top")) + face_map = collect_periodic_facets(grid, getfacetset(grid, "bottom"), getfacetset(grid, "top")) add!(ch_p, PeriodicDirichlet(:u, face_map)) - add!(ch_p, Dirichlet(:u, getfaceset(grid, "left"), (x, t) -> 0)) - add!(ch_p, Dirichlet(:u, getfaceset(grid, "right"), (x, t) -> 1)) + add!(ch_p, Dirichlet(:u, getfacetset(grid, "left"), (x, t) -> 0)) + add!(ch_p, Dirichlet(:u, getfacetset(grid, "right"), (x, t) -> 1)) close!(ch_p) update!(ch_p, 0) @@ -1415,7 +1416,7 @@ end # testset for azero in (nothing, false, true) - S = create_sparsity_pattern(dh) + S = allocate_matrix(dh) f = zeros(ndofs(dh)) K_dbc_standard = copy(S) @@ -1428,7 +1429,7 @@ end # testset f_dbc_local = copy(f) assembler_dbc_local = start_assemble(K_dbc_local, f_dbc_local) - S = create_sparsity_pattern(dh, ch_ac) + S = allocate_matrix(dh, ch_ac) K_ac_standard = copy(S) f_ac_standard = copy(f) @@ -1440,7 +1441,7 @@ end # testset f_ac_local = copy(f) assembler_ac_local = start_assemble(K_ac_local, f_ac_local) - S = create_sparsity_pattern(dh, ch_p) + S = allocate_matrix(dh, ch_p) K_p_standard = copy(S) f_p_standard = copy(f) @@ -1489,7 +1490,7 @@ end # testset assemble!(assembler_ac_local, global_dofs, ke, fe) end let ke = copy(ke), fe = copy(fe) - if cellid(cell) in first.(getfaceset(grid, "bottom")) + if cellid(cell) in first.(getfacetset(grid, "bottom")) # Throws for all cells on the image boundary @test_throws ErrorException apply_f!(ke, fe, global_dofs, ch_p) else @@ -1553,9 +1554,9 @@ end # testset @test norm(u_dbc) ≈ 3.8249286998373586 @test norm(u_p) ≈ 3.7828270430540893 end - # vtk_grid("local_application_azero_$(azero)", grid) do vtk - # vtk_point_data(vtk, dh, u_dbc, "_dbc") - # vtk_point_data(vtk, dh, u_p, "_p") + # VTKGridFile("local_application_azero_$(azero)", grid) do vtk + # write_solution(vtk, dh, u_dbc, "_dbc") + # write_solution(vtk, dh, u_p, "_p") # end @test K_dbc_standard \ f_dbc_standard ≈ K_dbc_ch \ f_dbc_ch ≈ K_dbc_local \ f_dbc_local ≈ K_ac_standard \ f_ac_standard ≈ K_ac_ch \ f_ac_ch ≈ K_ac_local \ f_ac_local @@ -1571,10 +1572,10 @@ end # testset add!(dh, :u, Lagrange{RefTriangle,1}()) close!(dh) ch = ConstraintHandler(dh) - add!(ch, Dirichlet(:u, getfaceset(grid, "left"), (x, t) -> 0)) + add!(ch, Dirichlet(:u, getfacetset(grid, "left"), (x, t) -> 0)) close!(ch) - Kfull = create_sparsity_pattern(dh, ch) - K = create_sparsity_pattern(dh, ch; keep_constrained=false) + Kfull = allocate_matrix(dh, ch) + K = allocate_matrix(dh, ch; keep_constrained=false) # Pattern tests nonzero_edges = Set( (i, j) for d in 1:getncells(grid) @@ -1624,7 +1625,7 @@ end # testset add!(dh, :u, Lagrange{RefLine,1}()) close!(dh) ch = ConstraintHandler(dh) - add!(ch, Dirichlet(:u, getfaceset(grid, "left"), x -> 1)) + add!(ch, Dirichlet(:u, getfacetset(grid, "left"), x -> 1)) close!(ch) K1 = rand(3, 3); K1 = sparse(K1'K1) K2 = copy(K1); K2[2:3, 1] .= 42; K2[3, 2] = NaN; K2 = Symmetric(K2) diff --git a/test/test_deprecations.jl b/test/test_deprecations.jl index bcd590d6c1..7be2cb94db 100644 --- a/test/test_deprecations.jl +++ b/test/test_deprecations.jl @@ -6,17 +6,17 @@ using Ferrite, Test # Deprecation of auto-selecting the interpolation grid = generate_grid(Quadrilateral, (1, 1)) dh = DofHandler(grid) - @test_deprecated r"interpolation explicitly, and vectorize it" add!(dh, :u, 2) - @test_deprecated r"interpolation explicitly, and vectorize it" add!(dh, :p, 1) + @test_throws Ferrite.DeprecationError add!(dh, :u, 2) + @test_throws Ferrite.DeprecationError add!(dh, :p, 1) close!(dh) - @test ndofs(dh) == 12 + @test ndofs(dh) == 0 # Deprecation of auto-vectorizing dh = DofHandler(grid) ip = Lagrange{RefQuadrilateral,1}() - @test_deprecated r"vectorize the interpolation" add!(dh, :u, 2, ip) - @test_deprecated r"vectorize the interpolation" add!(dh, :p, 1, ip) + @test_throws Ferrite.DeprecationError add!(dh, :u, 2, ip) + @test_throws Ferrite.DeprecationError add!(dh, :p, 1, ip) close!(dh) - @test ndofs(dh) == 12 + @test ndofs(dh) == 0 end @testset "Deprecation of (Cell|Face)(Scalar|Vector)Values" begin @@ -40,52 +40,88 @@ end @testset "Deprecation of old RefShapes" begin # Interpolations for order in 1:2 - @test (@test_deprecated r"RefLine" Lagrange{1, RefCube, order}()) === Lagrange{RefLine, order}() + @test_throws Ferrite.DeprecationError Lagrange{1, RefCube, order}() end for order in 1:5 - @test (@test_deprecated r"RefTriangle" Lagrange{2, RefTetrahedron, order}()) === Lagrange{RefTriangle, order}() + @test_throws Ferrite.DeprecationError Lagrange{2, RefTetrahedron, order}() end for order in 1:2 - @test (@test_deprecated r"RefQuadrilateral" Lagrange{2, RefCube, order}()) === Lagrange{RefQuadrilateral, order}() + @test_throws Ferrite.DeprecationError Lagrange{2, RefCube, order}() end for order in 1:2 - @test (@test_deprecated r"RefHexahedron" Lagrange{3, RefCube, order}()) === Lagrange{RefHexahedron, order}() + @test_throws Ferrite.DeprecationError Lagrange{3, RefCube, order}() end - @test (@test_deprecated r"RefQuadrilateral" Serendipity{2, RefCube, 2}()) === Serendipity{RefQuadrilateral, 2}() - @test (@test_deprecated r"RefHexahedron" Serendipity{3, RefCube, 2}()) === Serendipity{RefHexahedron, 2}() - @test (@test_deprecated r"RefTriangle" CrouzeixRaviart{2, 1}()) === CrouzeixRaviart{RefTriangle, 1}() - @test (@test_deprecated r"RefTriangle" BubbleEnrichedLagrange{2, RefTetrahedron, 1}()) === BubbleEnrichedLagrange{RefTriangle, 1}() + @test_throws Ferrite.DeprecationError Serendipity{2, RefCube, 2}() + @test_throws Ferrite.DeprecationError Serendipity{3, RefCube, 2}() + @test_throws Ferrite.DeprecationError CrouzeixRaviart{2, 1}() + @test_throws Ferrite.DeprecationError BubbleEnrichedLagrange{2, RefTetrahedron, 1}() # Quadrature/(Cell|Face)Value combinations (sometimes warns in the QR constructor, sometimes it the FEValues constructor) function test_combo(constructor, qdim, qshape, qargs, ip) qr = QuadratureRule{qdim, qshape}(qargs...) constructor(qr, ip) end - @test (@test_deprecated r"RefLine.*RefQuadrilateral" test_combo(CellValues, 1, RefCube, (1,), Lagrange{RefLine, 1}())) isa CellValues - @test (@test_deprecated r"RefLine.*RefQuadrilateral" test_combo(CellValues, 1, RefCube, (:legendre, 1), Lagrange{RefLine, 1}())) isa CellValues - @test (@test_deprecated r"RefQuadrilateral.*RefHexahedron" test_combo(CellValues, 2, RefCube, (1,), Lagrange{RefQuadrilateral, 1}())) isa CellValues - @test (@test_deprecated r"RefQuadrilateral.*RefHexahedron" test_combo(CellValues, 2, RefCube, (:legendre, 1), Lagrange{RefQuadrilateral, 1}())) isa CellValues - @test (@test_deprecated r"RefHexahedron" test_combo(CellValues, 3, RefCube, (1,), Lagrange{RefHexahedron, 1}())) isa CellValues - @test (@test_deprecated r"RefHexahedron" test_combo(CellValues, 3, RefCube, (:legendre, 1), Lagrange{RefHexahedron, 1}())) isa CellValues - @test (@test_deprecated r"RefLine" test_combo(FaceValues, 0, RefCube, (1,), Lagrange{RefLine, 1}())) isa FaceValues - @test (@test_deprecated r"RefLine" test_combo(FaceValues, 0, RefCube, (:legendre, 1), Lagrange{RefLine, 1}())) isa FaceValues - @test (@test_deprecated r"(RefLine.*RefQuadrilateral)" test_combo(FaceValues, 1, RefCube, (1,), Lagrange{RefQuadrilateral, 1}())) isa FaceValues - @test (@test_deprecated r"likely this comes" test_combo(FaceValues, 1, RefCube, (1,), Lagrange{RefQuadrilateral, 1}())) isa FaceValues - @test (@test_deprecated r"(RefLine.*RefQuadrilateral)" test_combo(FaceValues, 1, RefCube, (:legendre, 1), Lagrange{RefQuadrilateral, 1}())) isa FaceValues - @test (@test_deprecated r"likely this comes" test_combo(FaceValues, 1, RefCube, (:legendre, 1), Lagrange{RefQuadrilateral, 1}())) isa FaceValues - @test (@test_deprecated r"RefQuadrilateral.*RefHexahedron" test_combo(FaceValues, 2, RefCube, (1,), Lagrange{RefHexahedron, 1}())) isa FaceValues - @test (@test_deprecated r"likely this comes" test_combo(FaceValues, 2, RefCube, (1,), Lagrange{RefHexahedron, 1}())) isa FaceValues - @test (@test_deprecated r"RefQuadrilateral.*RefHexahedron" test_combo(FaceValues, 2, RefCube, (:legendre, 1), Lagrange{RefHexahedron, 1}())) isa FaceValues - @test (@test_deprecated r"likely this comes" test_combo(FaceValues, 2, RefCube, (:legendre, 1), Lagrange{RefHexahedron, 1}())) isa FaceValues - @test (@test_deprecated r"RefTriangle" test_combo(FaceValues, 1, RefTetrahedron, (1,), Lagrange{RefTriangle, 1}())) isa FaceValues - @test (@test_deprecated r"RefTriangle" test_combo(FaceValues, 1, RefTetrahedron, (:legendre, 1), Lagrange{RefTriangle, 1}())) isa FaceValues + @test_throws Ferrite.DeprecationError test_combo(CellValues, 1, RefCube, (1,), Lagrange{RefLine, 1}()) + @test_throws Ferrite.DeprecationError test_combo(CellValues, 1, RefCube, (:legendre, 1), Lagrange{RefLine, 1}()) + @test_throws Ferrite.DeprecationError test_combo(CellValues, 2, RefCube, (1,), Lagrange{RefQuadrilateral, 1}()) + @test_throws Ferrite.DeprecationError test_combo(CellValues, 2, RefCube, (:legendre, 1), Lagrange{RefQuadrilateral, 1}()) + @test_throws Ferrite.DeprecationError test_combo(CellValues, 3, RefCube, (1,), Lagrange{RefHexahedron, 1}()) + @test_throws Ferrite.DeprecationError test_combo(CellValues, 3, RefCube, (:legendre, 1), Lagrange{RefHexahedron, 1}()) + @test_throws Ferrite.DeprecationError test_combo(FacetValues, 0, RefCube, (1,), Lagrange{RefLine, 1}()) + @test_throws Ferrite.DeprecationError test_combo(FacetValues, 0, RefCube, (:legendre, 1), Lagrange{RefLine, 1}()) + @test_throws Ferrite.DeprecationError test_combo(FacetValues, 1, RefCube, (1,), Lagrange{RefQuadrilateral, 1}()) + @test_throws Ferrite.DeprecationError test_combo(FacetValues, 1, RefCube, (1,), Lagrange{RefQuadrilateral, 1}()) + @test_throws Ferrite.DeprecationError test_combo(FacetValues, 1, RefCube, (:legendre, 1), Lagrange{RefQuadrilateral, 1}()) + @test_throws Ferrite.DeprecationError test_combo(FacetValues, 1, RefCube, (:legendre, 1), Lagrange{RefQuadrilateral, 1}()) + @test_throws Ferrite.DeprecationError test_combo(FacetValues, 2, RefCube, (1,), Lagrange{RefHexahedron, 1}()) + @test_throws Ferrite.DeprecationError test_combo(FacetValues, 2, RefCube, (1,), Lagrange{RefHexahedron, 1}()) + @test_throws Ferrite.DeprecationError test_combo(FacetValues, 2, RefCube, (:legendre, 1), Lagrange{RefHexahedron, 1}()) + @test_throws Ferrite.DeprecationError test_combo(FacetValues, 2, RefCube, (:legendre, 1), Lagrange{RefHexahedron, 1}()) + @test_throws Ferrite.DeprecationError test_combo(FacetValues, 1, RefTetrahedron, (1,), Lagrange{RefTriangle, 1}()) + @test_throws Ferrite.DeprecationError test_combo(FacetValues, 1, RefTetrahedron, (:legendre, 1), Lagrange{RefTriangle, 1}()) end @testset "Ferrite.value and Ferrite.derivative" begin ip = Lagrange{RefQuadrilateral, 1}() ξ = zero(Vec{2}) - @test (@test_deprecated Ferrite.value(ip, ξ)) == [shape_value(ip, ξ, i) for i in 1:getnbasefunctions(ip)] - @test (@test_deprecated Ferrite.derivative(ip, ξ)) == [shape_gradient(ip, ξ, i) for i in 1:getnbasefunctions(ip)] - @test (@test_deprecated Ferrite.value(ip, 1, ξ)) == shape_value(ip, ξ, 1) + @test_throws Ferrite.DeprecationError Ferrite.value(ip, ξ) + @test_throws Ferrite.DeprecationError Ferrite.derivative(ip, ξ) + @test_throws Ferrite.DeprecationError Ferrite.value(ip, 1, ξ) +end + +@testset "facesets" begin + grid = generate_grid(Quadrilateral, (2,2)) + @test_throws Ferrite.DeprecationError addfaceset!(grid, "right_face", x -> x[1] ≈ 1) + @test_throws Ferrite.DeprecationError addfaceset!(grid, "right_face_explicit", Set(Ferrite.FaceIndex(fi[1], fi[2]) for fi in getfacetset(grid, "right"))) +end + +@testset "vtk_grid" begin + # Ensure no MethodError on pre v1. + @test_throws Ferrite.DeprecationError vtk_grid("old", generate_grid(Line, (1,))) +end + +@testset "onboundary" begin + msg = "`onboundary` is deprecated, check just the facetset instead of first checking `onboundary`." + @test_throws Ferrite.DeprecationError(msg) onboundary(first(CellIterator(generate_grid(Line, (2,)))), 1) + msg = "`boundary_matrix` is not part of the Grid anymore and thus not a supported keyword argument." + @test_throws Ferrite.DeprecationError(msg) Grid(Triangle[], Node{2,Float64}[]; boundary_matrix = something) +end + +@testset "getdim" begin + msg = "`Ferrite.getdim` is deprecated, use `getrefdim` or `getspatialdim` instead" + @test_throws Ferrite.DeprecationError(msg) Ferrite.getdim(generate_grid(Line, (1,))) + @test_throws Ferrite.DeprecationError(msg) Ferrite.getdim(Lagrange{RefTriangle,1}()) + @test_throws Ferrite.DeprecationError(msg) Ferrite.getdim(Line((1,2))) +end + +@testset "getfielddim" begin + msg = "`Ferrite.getfielddim(::AbstractDofHandler, args...) is deprecated, use `n_components` instead" + dh = close!(add!(DofHandler(generate_grid(Triangle, (1,1))), :u, Lagrange{RefTriangle,1}())) + @test_throws Ferrite.DeprecationError(msg) Ferrite.getfielddim(dh, Ferrite.find_field(dh, :u)) + @test_throws Ferrite.DeprecationError(msg) Ferrite.getfielddim(dh.subdofhandlers[1], :u) +end + +@testset "default_interpolation" begin + @test_throws Ferrite.DeprecationError Ferrite.default_interpolation(Triangle) end end # testset deprecations diff --git a/test/test_dofs.jl b/test/test_dofs.jl index 8264cb1d1b..7cd3e47011 100644 --- a/test/test_dofs.jl +++ b/test/test_dofs.jl @@ -87,7 +87,7 @@ end @testset "Dofs for quad in 3d (shell)" begin -nodes = [Node{3,Float64}(Vec(0.0,0.0,0.0)), Node{3,Float64}(Vec(1.0,0.0,0.0)), +nodes = [Node{3,Float64}(Vec(0.0,0.0,0.0)), Node{3,Float64}(Vec(1.0,0.0,0.0)), Node{3,Float64}(Vec(1.0,1.0,0.0)), Node{3,Float64}(Vec(0.0,1.0,0.0)), Node{3,Float64}(Vec(2.0,0.0,0.0)), Node{3,Float64}(Vec(2.0,2.0,0.0))] @@ -122,12 +122,21 @@ add!(dh, :v, Lagrange{RefQuadrilateral,1}()^2) add!(dh, :s, Lagrange{RefQuadrilateral,1}()) close!(dh) -u = [1.1, 1.2, 2.1, 2.2, 4.1, 4.2, 3.1, 3.2, 1.3, 2.3, 4.3, 3.3] - +u = [1.1, 1.2, 2.1, 2.2, 4.1, 4.2, 3.1, 3.2, 1.3, 2.3, 4.3, 3.3] +uv = @view u[1:end] +# :s on solution s_nodes = evaluate_at_grid_nodes(dh, u, :s) @test s_nodes ≈ [i+0.3 for i=1:4] +# :s on a view into solution +sv_nodes = evaluate_at_grid_nodes(dh, uv, :s) +@test sv_nodes ≈ [i+0.3 for i=1:4] +# :v on solution v_nodes = evaluate_at_grid_nodes(dh, u, :v) @test v_nodes ≈ [Vec{2,Float64}(i -> j+i/10) for j = 1:4] +# :v on a view into solution +vv_nodes = evaluate_at_grid_nodes(dh, uv, :v) +@test vv_nodes ≈ [Vec{2,Float64}(i -> j+i/10) for j = 1:4] + end @testset "renumber!" begin @@ -145,9 +154,9 @@ end add!(sdh2, :u, Lagrange{RefTriangle,1}()) close!(mdh) ch = ConstraintHandler(dh) - add!(ch, Dirichlet(:u, getfaceset(grid, "left"), (x, t) -> 0)) - add!(ch, Dirichlet(:u, getfaceset(grid, "right"), (x, t) -> 2)) - face_map = collect_periodic_faces(grid, "bottom", "top") + add!(ch, Dirichlet(:u, getfacetset(grid, "left"), (x, t) -> 0)) + add!(ch, Dirichlet(:u, getfacetset(grid, "right"), (x, t) -> 2)) + face_map = collect_periodic_facets(grid, "bottom", "top") add!(ch, PeriodicDirichlet(:u, face_map)) close!(ch) update!(ch, 0) @@ -180,12 +189,12 @@ end @test original_dofcoefficients == ch.dofcoefficients # Integration tests - K = create_sparsity_pattern(dh, ch) + K = allocate_matrix(dh, ch) f = zeros(ndofs(dh)) a = start_assemble(K, f) dhp, _, chp = dhmdhch() renumber!(dhp, chp, perm) - Kp = create_sparsity_pattern(dhp, chp) + Kp = allocate_matrix(dhp, chp) fp = zeros(ndofs(dhp)) ap = start_assemble(Kp, fp) for cellid in 1:getncells(dh.grid) @@ -215,8 +224,8 @@ end add!(dh, :s, Lagrange{RefQuadrilateral,1}()) close!(dh) ch = ConstraintHandler(dh) - add!(ch, Dirichlet(:v, getfaceset(grid, "left"), (x, t) -> 0, [2])) - add!(ch, Dirichlet(:s, getfaceset(grid, "left"), (x, t) -> 0)) + add!(ch, Dirichlet(:v, getfacetset(grid, "left"), (x, t) -> 0, [2])) + add!(ch, Dirichlet(:s, getfacetset(grid, "left"), (x, t) -> 0)) add!(ch, AffineConstraint(13, [15 => 0.5, 16 => 0.5], 0.0)) close!(ch) return dh, ch @@ -305,8 +314,8 @@ end add!(sdh2, :v, ip^2) close!(dh) ch = ConstraintHandler(dh) - add!(ch, Dirichlet(:v, getfaceset(grid, "left"), (x, t) -> 0, [2])) - add!(ch, Dirichlet(:s, getfaceset(grid, "left"), (x, t) -> 0)) + add!(ch, Dirichlet(:v, getfacetset(grid, "left"), (x, t) -> 0, [2])) + add!(ch, Dirichlet(:s, getfacetset(grid, "left"), (x, t) -> 0)) add!(ch, AffineConstraint(13, [15 => 0.5, 16 => 0.5], 0.0)) close!(ch) return dh, ch @@ -429,11 +438,17 @@ end end return false end + function is_stored(sparsity_pattern::SparsityPattern, i, j) + return findfirst(k -> k == j, sparsity_pattern.rows[i]) !== nothing + end # Full coupling (default) - K = create_sparsity_pattern(dh) + sparsity_pattern = init_sparsity_pattern(dh) + add_sparsity_entries!(sparsity_pattern, dh) + K = allocate_matrix(sparsity_pattern) @test eltype(K) == Float64 for j in 1:ndofs(dh), i in 1:ndofs(dh) + @test is_stored(sparsity_pattern, i, j) @test is_stored(K, i, j) end @@ -443,25 +458,30 @@ end true true # v true false # q ] - K = create_sparsity_pattern(dh; coupling=coupling) - Kch = create_sparsity_pattern(dh, ch; coupling=coupling) - @test K.rowval == Kch.rowval - @test K.colptr == Kch.colptr - KS = create_symmetric_sparsity_pattern(dh; coupling=coupling) - KSch = create_symmetric_sparsity_pattern(dh, ch; coupling=coupling) - @test KS.data.rowval == KSch.data.rowval - @test KS.data.colptr == KSch.data.colptr + sparsity_pattern = init_sparsity_pattern(dh) + add_sparsity_entries!(sparsity_pattern, dh; coupling=coupling) + K = allocate_matrix(sparsity_pattern) + # Kch = allocate_matrix(dh, ch; coupling=coupling) + # @test K.rowval == Kch.rowval + # @test K.colptr == Kch.colptr + # KS = create_symmetric_sparsity_pattern(dh; coupling=coupling) + # KSch = create_symmetric_sparsity_pattern(dh, ch; coupling=coupling) + # @test KS.data.rowval == KSch.data.rowval + # @test KS.data.colptr == KSch.data.colptr for j in udofs, i in Iterators.flatten((vdofs, qdofs)) + @test is_stored(sparsity_pattern, i, j) @test is_stored(K, i, j) - @test is_stored(KS, i, j) == (i <= j) + # @test is_stored(KS, i, j) == (i <= j) end for j in pdofs, i in vdofs + @test is_stored(sparsity_pattern, i, j) @test is_stored(K, i, j) - @test is_stored(KS, i, j) + # @test is_stored(KS, i, j) end for j in pdofs, i in qdofs + @test is_stored(sparsity_pattern, i, j) == (i == j) @test is_stored(K, i, j) == (i == j) - @test is_stored(KS, i, j) == (i == j) + # @test is_stored(KS, i, j) == (i == j) end # Component coupling @@ -471,37 +491,45 @@ end true false true # v2 false true true # q ] - K = create_sparsity_pattern(dh; coupling=coupling) - KS = create_symmetric_sparsity_pattern(dh; coupling=coupling) + sparsity_pattern = init_sparsity_pattern(dh) + add_sparsity_entries!(sparsity_pattern, dh; coupling=coupling) + K = allocate_matrix(sparsity_pattern) + # KS = create_symmetric_sparsity_pattern(dh; coupling=coupling) for j in u1dofs, i in vdofs + @test is_stored(sparsity_pattern, i, j) @test is_stored(K, i, j) - @test is_stored(KS, i, j) == (i <= j) + # @test is_stored(KS, i, j) == (i <= j) end for j in u1dofs, i in qdofs + @test !is_stored(sparsity_pattern, i, j) @test !is_stored(K, i, j) - @test !is_stored(KS, i, j) + # @test !is_stored(KS, i, j) end for j in u2dofs, i in Iterators.flatten((v1dofs, qdofs)) + @test is_stored(sparsity_pattern, i, j) @test is_stored(K, i, j) - @test is_stored(KS, i, j) == (i <= j) + # @test is_stored(KS, i, j) == (i <= j) end for j in u2dofs, i in v2dofs + @test is_stored(sparsity_pattern, i, j) == (i == j) @test is_stored(K, i, j) == (i == j) - @test is_stored(KS, i, j) == (i == j) + # @test is_stored(KS, i, j) == (i == j) end for j in pdofs, i in v1dofs + @test !is_stored(sparsity_pattern, i, j) @test !is_stored(K, i, j) - @test !is_stored(KS, i, j) + # @test !is_stored(KS, i, j) end for j in pdofs, i in Iterators.flatten((v2dofs, qdofs)) + @test is_stored(sparsity_pattern, i, j) @test is_stored(K, i, j) - @test is_stored(KS, i, j) == (i <= j) + # @test is_stored(KS, i, j) == (i <= j) end # Error paths - @test_throws ErrorException("coupling not square") create_sparsity_pattern(dh; coupling=[true true]) - @test_throws ErrorException("coupling not symmetric") create_symmetric_sparsity_pattern(dh; coupling=[true true; false true]) - @test_throws ErrorException("could not create coupling") create_symmetric_sparsity_pattern(dh; coupling=falses(100, 100)) + @test_throws ErrorException("coupling not square") allocate_matrix(dh; coupling=[true true]) + # @test_throws ErrorException("coupling not symmetric") create_symmetric_sparsity_pattern(dh; coupling=[true true; false true]) + # @test_throws ErrorException("could not create coupling") create_symmetric_sparsity_pattern(dh; coupling=falses(100, 100)) # Test coupling with subdomains grid = generate_grid(Quadrilateral, (1, 2)) @@ -512,28 +540,35 @@ end sdh2 = SubDofHandler(dh, Set(2)) add!(sdh2, :u, Lagrange{RefQuadrilateral,1}()^2) close!(dh) - K = create_sparsity_pattern(dh; coupling = [true true; true false]) - KS = create_symmetric_sparsity_pattern(dh; coupling = [true true; true false]) + + sparsity_pattern = init_sparsity_pattern(dh) + add_sparsity_entries!(sparsity_pattern, dh; coupling = [true true; true false]) + K = allocate_matrix(sparsity_pattern) + KS = Symmetric(allocate_matrix(dh; #= symmetric=true, =# coupling = [true true; true false])) # Subdomain 1: u and p udofs = celldofs(dh, 1)[dof_range(sdh1, :u)] pdofs = celldofs(dh, 1)[dof_range(sdh1, :p)] for j in udofs, i in Iterators.flatten((udofs, pdofs)) + @test is_stored(sparsity_pattern, i, j) @test is_stored(K, i, j) - @test is_stored(KS, i, j) == (i <= j) + # @test is_stored(KS, i, j) == (i <= j) end for j in pdofs, i in udofs + @test is_stored(sparsity_pattern, i, j) @test is_stored(K, i, j) - @test is_stored(KS, i, j) + # @test is_stored(KS, i, j) end for j in pdofs, i in pdofs + @test is_stored(sparsity_pattern, i, j) == (i == j) @test is_stored(K, i, j) == (i == j) - @test is_stored(KS, i, j) == (i == j) + # @test is_stored(KS, i, j) == (i == j) end # Subdomain 2: u udofs = celldofs(dh, 2)[dof_range(sdh2, :u)] for j in udofs, i in udofs + @test is_stored(sparsity_pattern, i, j) @test is_stored(K, i, j) - @test is_stored(KS, i, j) == (i <= j) + # @test is_stored(KS, i, j) == (i <= j) end end @@ -543,38 +578,38 @@ end # reshape.(Iterators.product(fill([true, false], 9)...) |> collect |> vec .|> collect, Ref((3,3))), [ true true true - true true true - true true true + true true true + true true true ], [ true false false - false true false - false false true + false true false + false false true ], [ true true false - true true true - false true true + true true true + false true true ], # Component coupling [ true true true true - true true true true true true true true - true true true true + true true true true + true true true true ], [ true false false false - false true false false + false true false false false false true false - false false false true + false false false true ], [ true true true false - true true true true true true true true - false true true true + true true true true + false true true true ], ] function is_stored(A, i, j) @@ -589,7 +624,7 @@ end i_dofs = dof_range(sdh, field1_idx) ip1 = sdh.field_interpolations[field1_idx] vdim[1] = typeof(ip1) <: VectorizedInterpolation && size(coupling)[1] == 4 ? Ferrite.get_n_copies(ip1) : 1 - for dim1 in 1:vdim[1] + for dim1 in 1:vdim[1] for cell2_idx in neighbors sdh2 = dh.subdofhandlers[dh.cell_to_subdofhandler[cell2_idx]] coupling_idx[2] = 1 @@ -615,18 +650,18 @@ end end end end - function check_coupling(dh, topology, K, coupling, cross_coupling) + function check_coupling(dh, topology, K, coupling, interface_coupling) for cell_idx in eachindex(getcells(dh.grid)) sdh = dh.subdofhandlers[dh.cell_to_subdofhandler[cell_idx]] coupling_idx = [1,1] - cross_coupling_idx = [1,1] + interface_coupling_idx = [1,1] vdim = [1,1] # test inner coupling _check_dofs(K, dh, sdh, cell_idx, coupling, coupling_idx, vdim, [cell_idx], false) # test cross-element coupling - neighborhood = Ferrite.getdim(dh.grid.cells[1]) > 1 ? topology.face_face_neighbor : topology.vertex_vertex_neighbor - neighbors = neighborhood[cell_idx, :] - _check_dofs(K, dh, sdh, cell_idx, cross_coupling, cross_coupling_idx, vdim, [i[1][1] for i in neighbors[.!isempty.(neighbors)]], true) + neighborhood = Ferrite.get_facet_facet_neighborhood(topology, grid) + neighbors = [neighborhood[cell_idx, i] for i in 1:size(neighborhood, 2)] + _check_dofs(K, dh, sdh, cell_idx, interface_coupling, interface_coupling_idx, vdim, [i[1][1] for i in neighbors[.!isempty.(neighbors)]], true) end end grid = generate_grid(Quadrilateral, (2, 2)) @@ -636,17 +671,17 @@ end add!(dh, :p, DiscontinuousLagrange{RefQuadrilateral,1}()) add!(dh, :w, Lagrange{RefQuadrilateral,1}()) close!(dh) - for coupling in couplings, cross_coupling in couplings - K = create_sparsity_pattern(dh; coupling=coupling, topology = topology, cross_coupling = cross_coupling) - all(coupling) && @test K == create_sparsity_pattern(dh, topology = topology, cross_coupling = cross_coupling) - check_coupling(dh, topology, K, coupling, cross_coupling) + for coupling in couplings, interface_coupling in couplings + K = allocate_matrix(dh; coupling=coupling, topology = topology, interface_coupling = interface_coupling) + all(coupling) && @test K == allocate_matrix(dh, topology = topology, interface_coupling = interface_coupling) + check_coupling(dh, topology, K, coupling, interface_coupling) end # Error paths - @test_throws ErrorException("coupling not square") create_sparsity_pattern(dh; coupling=[true true]) - @test_throws ErrorException("coupling not symmetric") create_symmetric_sparsity_pattern(dh; coupling=[true true; false true]) - @test_throws ErrorException("could not create coupling") create_symmetric_sparsity_pattern(dh; coupling=falses(100, 100)) - + @test_throws ErrorException("coupling not square") allocate_matrix(dh; coupling=[true true]) + # @test_throws ErrorException("coupling not symmetric") allocate_matrix(dh; coupling=[true true; false true]) + @test_throws ErrorException("could not create coupling") allocate_matrix(dh; coupling=falses(100, 100)) + # Test coupling with subdomains # Note: `check_coupling` works for this case only because the second domain has dofs from the first domain in order. Otherwise tests like in continuous ip are required. grid = generate_grid(Quadrilateral, (2, 1)) @@ -661,10 +696,10 @@ end add!(sdh2, :u, DiscontinuousLagrange{RefQuadrilateral,1}()^2) close!(dh) - for coupling in couplings, cross_coupling in couplings - K = create_sparsity_pattern(dh; coupling=coupling, topology = topology, cross_coupling = cross_coupling) - all(coupling) && @test K == create_sparsity_pattern(dh, topology = topology, cross_coupling = cross_coupling) - check_coupling(dh, topology, K, coupling, cross_coupling) + for coupling in couplings, interface_coupling in couplings + K = allocate_matrix(dh; coupling=coupling, topology = topology, interface_coupling = interface_coupling) + all(coupling) && @test K == allocate_matrix(dh, topology = topology, interface_coupling = interface_coupling) + check_coupling(dh, topology, K, coupling, interface_coupling) end # Testing Crouzeix-Raviart coupling @@ -674,8 +709,83 @@ end add!(dh, :u, CrouzeixRaviart{RefTriangle,1}()) close!(dh) coupling = trues(3,3) - K = create_sparsity_pattern(dh; coupling=coupling, topology = topology, cross_coupling = coupling) - K_cont = create_sparsity_pattern(dh; coupling=coupling, topology = topology, cross_coupling = falses(3,3)) - K_default = create_sparsity_pattern(dh) + K = allocate_matrix(dh; coupling=coupling, topology = topology, interface_coupling = coupling) + K_cont = allocate_matrix(dh; coupling=coupling, topology = topology, interface_coupling = falses(3,3)) + K_default = allocate_matrix(dh) @test K == K_cont == K_default end + + +@testset "shell on solid face" begin + + # Node numbering: + # 3 ____ 4 4 + # | | | + # | | | (Beam attached to facet) + # 1 ____ 2 2 + + dim = 2 + grid = generate_grid(Quadrilateral, (1,1)) + line1 = Line((2,4)) + grid = Grid([grid.cells[1], line1], grid.nodes) + + order = 2 + ip_solid = Lagrange{RefQuadrilateral, order}()#^dim + ip_shell = Lagrange{RefLine, order}() + + dh = DofHandler(grid) + sdh_solid = SubDofHandler(dh, Set(1)) + add!(sdh_solid, :u, ip_solid) + sdh_shell = SubDofHandler(dh, Set(2)) + add!(sdh_shell, :u, ip_shell) + close!(dh) + + dofsquad = zeros(Int, ndofs_per_cell(dh, 1)) + dofsbeam = zeros(Int, ndofs_per_cell(dh, 2)) + + celldofs!(dofsquad, dh, 1) + celldofs!(dofsbeam, dh, 2) + @test dofsbeam == [2, 3, 6] + + # Node numbering: + # 5--------7 + # / /| + # / / | + # 6--------8 | + # | | 3 <-- Shell attached on face (4, 3, 7, 8) + # | | / + # | |/ + # 2--------4 + + dim = 2 + grid = generate_grid(Hexahedron, (1,1,1)) + shell = Quadrilateral((4,3,7,8)) + grid = Grid([grid.cells[1], shell], grid.nodes) + + order = 2 + ip_solid = Lagrange{RefHexahedron, order}()#^dim + ip_shell = Lagrange{RefQuadrilateral, order}() + + dh = DofHandler(grid) + sdh_solid = SubDofHandler(dh, Set(1)) + add!(sdh_solid, :u, ip_solid) + sdh_shell = SubDofHandler(dh, Set(2)) + add!(sdh_shell, :u, ip_shell) + Ferrite.close!(dh) + + dofsolid = zeros(Int, ndofs_per_cell(dh, 1)) + dofsshell = zeros(Int, ndofs_per_cell(dh, 2)) + + celldofs!(dofsolid, dh, 1) + celldofs!(dofsshell, dh, 2) + + #Would be nice to have this utility: + #facedofs!(dofs, dh, FaceIndex(1,4)) + + #Shared node dofs + @test dofsshell[1:4] == [3,4,8,7] + #Shared edge dofs + @test dofsshell[5:8] == [11,20,15,19] + #Shared face dof + @test dofsshell[9] == 24 +end diff --git a/test/test_facevalues.jl b/test/test_facevalues.jl index bb781a51ff..9420eee442 100644 --- a/test/test_facevalues.jl +++ b/test/test_facevalues.jl @@ -1,79 +1,132 @@ -@testset "FaceValues" begin +@testset "FacetValues" begin for (scalar_interpol, quad_rule) in ( - (Lagrange{RefLine, 1}(), FaceQuadratureRule{RefLine}(2)), - (Lagrange{RefLine, 2}(), FaceQuadratureRule{RefLine}(2)), - (Lagrange{RefQuadrilateral, 1}(), FaceQuadratureRule{RefQuadrilateral}(2)), - (Lagrange{RefQuadrilateral, 2}(), FaceQuadratureRule{RefQuadrilateral}(2)), - (Lagrange{RefTriangle, 1}(), FaceQuadratureRule{RefTriangle}(2)), - (Lagrange{RefTriangle, 2}(), FaceQuadratureRule{RefTriangle}(2)), - (Lagrange{RefHexahedron, 1}(), FaceQuadratureRule{RefHexahedron}(2)), - (Serendipity{RefQuadrilateral, 2}(), FaceQuadratureRule{RefQuadrilateral}(2)), - (Lagrange{RefTetrahedron, 1}(), FaceQuadratureRule{RefTetrahedron}(2)), - (Lagrange{RefTetrahedron, 2}(), FaceQuadratureRule{RefTetrahedron}(2)), - (Lagrange{RefPyramid, 2}(), FaceQuadratureRule{RefPyramid}(2)), - (Lagrange{RefPrism, 2}(), FaceQuadratureRule{RefPrism}(2)), + (Lagrange{RefLine, 1}(), FacetQuadratureRule{RefLine}(2)), + (Lagrange{RefLine, 2}(), FacetQuadratureRule{RefLine}(2)), + (Lagrange{RefQuadrilateral, 1}(), FacetQuadratureRule{RefQuadrilateral}(2)), + (Lagrange{RefQuadrilateral, 2}(), FacetQuadratureRule{RefQuadrilateral}(2)), + (Lagrange{RefTriangle, 1}(), FacetQuadratureRule{RefTriangle}(2)), + (Lagrange{RefTriangle, 2}(), FacetQuadratureRule{RefTriangle}(2)), + (Lagrange{RefHexahedron, 1}(), FacetQuadratureRule{RefHexahedron}(2)), + (Serendipity{RefQuadrilateral, 2}(), FacetQuadratureRule{RefQuadrilateral}(2)), + (Lagrange{RefTetrahedron, 1}(), FacetQuadratureRule{RefTetrahedron}(2)), + (Lagrange{RefTetrahedron, 2}(), FacetQuadratureRule{RefTetrahedron}(2)), + (Lagrange{RefPyramid, 2}(), FacetQuadratureRule{RefPyramid}(2)), + (Lagrange{RefPrism, 2}(), FacetQuadratureRule{RefPrism}(2)), ) - - for func_interpol in (scalar_interpol, VectorizedInterpolation(scalar_interpol)) + for func_interpol in (scalar_interpol, VectorizedInterpolation(scalar_interpol)), DiffOrder in 1:2 + (DiffOrder==2 && Ferrite.getorder(func_interpol)==1) && continue #No need to test linear interpolations again geom_interpol = scalar_interpol # Tests below assume this n_basefunc_base = getnbasefunctions(scalar_interpol) - fv = FaceValues(quad_rule, func_interpol, geom_interpol) - ndim = Ferrite.getdim(func_interpol) + update_gradients = true + update_hessians = (DiffOrder==2 && Ferrite.getorder(func_interpol) > 1) + fv = FacetValues(quad_rule, func_interpol, geom_interpol; update_gradients, update_hessians) + if update_gradients && !update_hessians # Check correct and type-stable default constructor + fv_default = @inferred FacetValues(quad_rule, func_interpol, geom_interpol) + @test typeof(fv) === typeof(fv_default) + @inferred FacetValues(quad_rule, func_interpol, geom_interpol; update_hessians=Val(true)) + end + + rdim = Ferrite.getrefdim(func_interpol) n_basefuncs = getnbasefunctions(func_interpol) @test getnbasefunctions(fv) == n_basefuncs - xs, n = valid_coordinates_and_normals(func_interpol) - for face in 1:Ferrite.nfaces(func_interpol) - reinit!(fv, xs, face) - @test Ferrite.getcurrentface(fv) == face + coords, n = valid_coordinates_and_normals(func_interpol) + for face in 1:Ferrite.nfacets(func_interpol) + reinit!(fv, coords, face) + @test Ferrite.getcurrentfacet(fv) == face # We test this by applying a given deformation gradient on all the nodes. # Since this is a linear deformation we should get back the exact values # from the interpolation. - u = Vec{ndim, Float64}[zero(Tensor{1,ndim}) for i in 1:n_basefunc_base] - u_scal = zeros(n_basefunc_base) - H = rand(Tensor{2, ndim}) - V = rand(Tensor{1, ndim}) + V, G, H = if func_interpol isa Ferrite.ScalarInterpolation + (rand(), rand(Tensor{1, rdim}), Tensor{2, rdim}((i,j)-> i==j ? rand() : 0.0)) + else + (rand(Tensor{1, rdim}), rand(Tensor{2, rdim}), Tensor{3, rdim}((i,j,k)-> i==j==k ? rand() : 0.0)) + end + + u_funk(x,V,G,H) = begin + if update_hessians + 0.5*x⋅H⋅x + G⋅x + V + else + G⋅x + V + end + end + + _ue = [u_funk(coords[i],V,G,H) for i in 1:n_basefunc_base] + ue = reinterpret(Float64, _ue) + + for i in 1:getnquadpoints(fv) + xqp = spatial_coordinate(fv, i, coords) + Hqp, Gqp, Vqp = Tensors.hessian(x -> u_funk(x,V,G,H), xqp, :all) + + @test function_value(fv, i, ue) ≈ Vqp + @test function_gradient(fv, i, ue) ≈ Gqp + if update_hessians + #Note, the jacobian of the element is constant, which makes the hessian (of the mapping) + #zero. So this is not the optimal test + @test Ferrite.function_hessian(fv, i, ue) ≈ Hqp + end + if func_interpol isa Ferrite.VectorInterpolation + @test function_symmetric_gradient(fv, i, ue) ≈ 0.5(Gqp + Gqp') + @test function_divergence(fv, i, ue) ≈ tr(Gqp) + rdim == 3 && @test function_curl(fv, i, ue) ≈ Ferrite.curl_from_gradient(Gqp) + else + @test function_divergence(fv, i, ue) ≈ sum(Gqp) + end + end + + #Test CellValues when input is a ::Vector{<:Vec} (most of which is deprecated) + ue_vec = [zero(Vec{rdim,Float64}) for i in 1:n_basefunc_base] + G_vector = rand(Tensor{2, rdim}) for i in 1:n_basefunc_base - u[i] = H ⋅ xs[i] - u_scal[i] = V ⋅ xs[i] + ue_vec[i] = G_vector ⋅ coords[i] end - u_vector = reinterpret(Float64, u) + for i in 1:getnquadpoints(fv) - @test getnormal(fv, i) ≈ n[face] if func_interpol isa Ferrite.ScalarInterpolation - @test function_gradient(fv, i, u) ≈ H - @test function_symmetric_gradient(fv, i, u) ≈ 0.5(H + H') - @test function_divergence(fv, i, u_scal) ≈ sum(V) - @test function_divergence(fv, i, u) ≈ tr(H) - @test function_gradient(fv, i, u_scal) ≈ V - ndim == 3 && @test function_curl(fv, i, u) ≈ Ferrite.curl_from_gradient(H) - function_value(fv, i, u) - function_value(fv, i, u_scal) - else # func_interpol isa Ferrite.VectorInterpolation - @test function_gradient(fv, i, u_vector) ≈ H - @test (@test_deprecated function_gradient(fv, i, u)) ≈ H - @test function_symmetric_gradient(fv, i, u_vector) ≈ 0.5(H + H') - @test (@test_deprecated function_symmetric_gradient(fv, i, u)) ≈ 0.5(H + H') - @test function_divergence(fv, i, u_vector) ≈ tr(H) - @test (@test_deprecated function_divergence(fv, i, u)) ≈ tr(H) - if ndim == 3 - @test function_curl(fv, i, u_vector) ≈ Ferrite.curl_from_gradient(H) - @test (@test_deprecated function_curl(fv, i, u)) ≈ Ferrite.curl_from_gradient(H) + @test function_gradient(fv, i, ue_vec) ≈ G_vector + else# func_interpol isa Ferrite.VectorInterpolation + @test_throws Ferrite.DeprecationError function_gradient(fv, i, ue_vec) + @test_throws Ferrite.DeprecationError function_symmetric_gradient(fv, i, ue_vec) + @test_throws Ferrite.DeprecationError function_divergence(fv, i, ue_vec) + if rdim == 3 + @test_throws Ferrite.DeprecationError function_curl(fv, i, ue_vec) + end + @test_throws Ferrite.DeprecationError function_value(fv, i, ue_vec) #no value to test against + end + end + + #Check if the non-linear mapping is correct + #Only do this for one interpolation becuase it relise on AD on "iterative function" + if scalar_interpol === Lagrange{RefQuadrilateral, 2}() + coords_nl = [x+rand(x)*0.01 for x in coords] #add some displacement to nodes + reinit!(fv, coords_nl, face) + + _ue_nl = [u_funk(coords_nl[i],V,G,H) for i in 1:n_basefunc_base] + ue_nl = reinterpret(Float64, _ue_nl) + + for i in 1:getnquadpoints(fv) + xqp = spatial_coordinate(fv, i, coords_nl) + Hqp, Gqp, Vqp = Tensors.hessian(x -> function_value_from_physical_coord(func_interpol, coords_nl, x, ue_nl), xqp, :all) + @test function_value(fv, i, ue_nl) ≈ Vqp + @test function_gradient(fv, i, ue_nl) ≈ Gqp + if update_hessians + @test Ferrite.function_hessian(fv, i, ue_nl) ≈ Hqp end - @test function_value(fv, i, u_vector) ≈ (@test_deprecated function_value(fv, i, u)) end + reinit!(fv, coords, face) # reinit back to old coords end + # Test of volume vol = 0.0 for i in 1:getnquadpoints(fv) vol += getdetJdV(fv,i) end let ip_base = func_interpol isa VectorizedInterpolation ? func_interpol.ip : func_interpol - x_face = xs[[Ferrite.facedof_indices(ip_base)[face]...]] - @test vol ≈ calculate_face_area(ip_base, x_face, face) + x_face = coords[[Ferrite.facetdof_indices(ip_base)[face]...]] + @test vol ≈ calculate_facet_area(ip_base, x_face, face) end # Test quadrature rule after reinit! with ref. coords @@ -86,14 +139,14 @@ for (scalar_interpol, quad_rule) in ( @test vol ≈ reference_face_area(func_interpol, face) # Test spatial coordinate (after reinit with ref.coords we should get back the quad_points) - # TODO: Renable somehow after quad rule is no longer stored in FaceValues + # TODO: Renable somehow after quad rule is no longer stored in FacetValues #for (i, qp_x) in enumerate(getpoints(quad_rule)) # @test spatial_coordinate(fv, i, x) ≈ qp_x #end end - @testset "copy(::FaceValues)" begin + @testset "copy(::FacetValues)" begin fvc = copy(fv) @test typeof(fv) == typeof(fvc) @@ -110,34 +163,31 @@ for (scalar_interpol, quad_rule) in ( end end end - # Test that qr, detJdV, normals, and current_face are copied as expected. + # Test that fqr, detJdV, and normals, are copied as expected. # Note that qr remain aliased, as defined by `copy(qr)=qr`, see quadrature.jl. - # Make it easy to test scalar wrapper equality - _mock_isequal(a, b) = a == b - _mock_isequal(a::T, b::T) where {T<:Ferrite.ScalarWrapper} = a[] == b[] - for fname in (:fqr, :detJdV, :normals, :current_face) + for fname in (:fqr, :detJdV, :normals) v = getfield(fv, fname) vc = getfield(fvc, fname) if fname !== :fqr # Test unaliased @test v !== vc end - @test _mock_isequal(v, vc) + @test v == vc end end end end @testset "show" begin - # Just smoke test to make sure show doesn't error. - fv = FaceValues(FaceQuadratureRule{RefQuadrilateral}(2), Lagrange{RefQuadrilateral,2}()) + # Just smoke test to make sure show doesn't error. + fv = FacetValues(FacetQuadratureRule{RefQuadrilateral}(2), Lagrange{RefQuadrilateral,2}()) showstring = sprint(show, MIME"text/plain"(), fv) - @test startswith(showstring, "FaceValues(scalar, rdim=2, sdim=2): 2 quadrature points per face") + @test startswith(showstring, "FacetValues(scalar, rdim=2, sdim=2): 2 quadrature points per face") @test contains(showstring, "Function interpolation: Lagrange{RefQuadrilateral, 2}()") @test contains(showstring, "Geometric interpolation: Lagrange{RefQuadrilateral, 1}()^2") - fv.fqr.face_rules[1] = deepcopy(fv.fqr.face_rules[1]) - push!(Ferrite.getweights(fv.fqr.face_rules[1]), 1) - showstring = sprint(show, MIME"text/plain"(), fv) - @test startswith(showstring, "FaceValues(scalar, rdim=2, sdim=2): (3, 2, 2, 2) quadrature points on each face") + fv2 = copy(fv) + push!(Ferrite.getweights(fv2.fqr.face_rules[1]), 1) + showstring = sprint(show, MIME"text/plain"(), fv2) + @test startswith(showstring, "FacetValues(scalar, rdim=2, sdim=2): (3, 2, 2, 2) quadrature points on each face") end end # of testset diff --git a/test/test_grid_addboundaryset.jl b/test/test_grid_addboundaryset.jl index 494eac6af2..33514c3bda 100644 --- a/test/test_grid_addboundaryset.jl +++ b/test/test_grid_addboundaryset.jl @@ -1,66 +1,61 @@ @testset "grid boundary" begin - function _extractboundary(grid::Ferrite.AbstractGrid{3}, topology::ExclusiveTopology, _ftype::Function, _set::Dict) - _ftype(grid, topology, "b_bottom", x -> x[3] ≈ -1.0) - _ftype(grid, topology, "b_top", x -> x[3] ≈ 1.0) - _ftype(grid, topology, "b_right", x -> x[1] ≈ 1.0) - _ftype(grid, topology, "b_left", x -> x[1] ≈ -1.0) - _ftype(grid, topology, "b_front", x -> x[2] ≈ 1.0) - _ftype(grid, topology, "b_back", x -> x[2] ≈ -1.0) - return union(_set["b_bottom"], _set["b_top"], - _set["b_right"], _set["b_left"], - _set["b_front"], _set["b_back"]) + function _extractboundary(grid::Ferrite.AbstractGrid{3}, topology::ExclusiveTopology, _ftype::Function) + return union(( _ftype(grid, topology, x -> x[3] ≈ -1.0), + _ftype(grid, topology, x -> x[3] ≈ 1.0), + _ftype(grid, topology, x -> x[1] ≈ 1.0), + _ftype(grid, topology, x -> x[1] ≈ -1.0), + _ftype(grid, topology, x -> x[2] ≈ 1.0), + _ftype(grid, topology, x -> x[2] ≈ -1.0))...) end - function _extractboundary(grid::Ferrite.AbstractGrid{2}, topology::ExclusiveTopology, _ftype::Function, _set::Dict) - _ftype(grid, topology, "b_bottom", x -> x[2] ≈ -1.0) - _ftype(grid, topology, "b_top", x -> x[2] ≈ 1.0) - _ftype(grid, topology, "b_right", x -> x[1] ≈ 1.0) - _ftype(grid, topology, "b_left", x -> x[1] ≈ -1.0) - - return union(_set["b_bottom"], _set["b_top"], - _set["b_right"], _set["b_left"]) + function _extractboundary(grid::Ferrite.AbstractGrid{2}, topology::ExclusiveTopology, _ftype::Function) + return union(( _ftype(grid, topology, x -> x[1] ≈ 1.0), + _ftype(grid, topology, x -> x[1] ≈ -1.0), + _ftype(grid, topology, x -> x[2] ≈ 1.0), + _ftype(grid, topology, x -> x[2] ≈ -1.0))...) end function extractboundary(grid::Ferrite.AbstractGrid{3}, topology::ExclusiveTopology) - faces = _extractboundary(grid, topology, addboundaryfaceset!, grid.facesets) - edges = _extractboundary(grid, topology, addboundaryedgeset!, grid.edgesets) - vertices = _extractboundary(grid, topology, addboundaryvertexset!, grid.vertexsets) - return union(faces, edges, vertices) + facets = _extractboundary(grid, topology, Ferrite.create_boundaryfacetset) + faces = _extractboundary(grid, topology, Ferrite.create_boundaryfaceset) + edges = _extractboundary(grid, topology, Ferrite.create_boundaryedgeset) + vertices = _extractboundary(grid, topology, Ferrite.create_boundaryvertexset) + return union(facets, faces, edges, vertices) end function extractboundary(grid::Ferrite.AbstractGrid{2}, topology::ExclusiveTopology) - faces = _extractboundary(grid, topology, addboundaryfaceset!, grid.facesets) - vertices = _extractboundary(grid, topology, addboundaryvertexset!, grid.vertexsets) - return union(faces, vertices) + facets = _extractboundary(grid, topology, Ferrite.create_boundaryfacetset) + edges = _extractboundary(grid, topology, Ferrite.create_boundaryedgeset) + vertices = _extractboundary(grid, topology, Ferrite.create_boundaryvertexset) + return union(facets, edges, vertices) end - function _extractboundarycheck(grid::Ferrite.AbstractGrid{3}, _ftype::Function, _set::Dict) - _ftype(grid, "b_bottom_c", x -> x[3] ≈ -1.0) - _ftype(grid, "b_top_c", x -> x[3] ≈ 1.0) - _ftype(grid, "b_right_c", x -> x[1] ≈ 1.0) - _ftype(grid, "b_left_c", x -> x[1] ≈ -1.0) - _ftype(grid, "b_front_c", x -> x[2] ≈ 1.0) - _ftype(grid, "b_back_c", x -> x[2] ≈ -1.0) - return union(_set["b_bottom_c"], _set["b_top_c"], - _set["b_right_c"], _set["b_left_c"], - _set["b_front_c"], _set["b_back_c"]) + function _extractboundarycheck(grid::Ferrite.AbstractGrid{3}, _ftype::Function) + return union(( + _ftype(grid, x -> x[3] ≈ -1.0), + _ftype(grid, x -> x[3] ≈ 1.0), + _ftype(grid, x -> x[1] ≈ 1.0), + _ftype(grid, x -> x[1] ≈ -1.0), + _ftype(grid, x -> x[2] ≈ 1.0), + _ftype(grid, x -> x[2] ≈ -1.0))...) end - function _extractboundarycheck(grid::Ferrite.AbstractGrid{2}, _ftype::Function, _set::Dict) - _ftype(grid, "b_bottom_c", x -> x[2] ≈ -1.0) - _ftype(grid, "b_top_c", x -> x[2] ≈ 1.0) - _ftype(grid, "b_right_c", x -> x[1] ≈ 1.0) - _ftype(grid, "b_left_c", x -> x[1] ≈ -1.0) - - return union(_set["b_bottom_c"], _set["b_top_c"], - _set["b_right_c"], _set["b_left_c"]) + function _extractboundarycheck(grid::Ferrite.AbstractGrid{2}, _ftype::Function) + return union(( + _ftype(grid, x -> x[1] ≈ 1.0), + _ftype(grid, x -> x[1] ≈ -1.0), + _ftype(grid, x -> x[2] ≈ 1.0), + _ftype(grid, x -> x[2] ≈ -1.0))...) end function extractboundarycheck(grid::Ferrite.AbstractGrid{3}) - faces = _extractboundarycheck(grid, addfaceset!, grid.facesets) - edges = _extractboundarycheck(grid, addedgeset!, grid.edgesets) - vertices = _extractboundarycheck(grid, addvertexset!, grid.vertexsets) - return union(faces, edges, vertices) + faces = _extractboundarycheck(grid, Ferrite.create_faceset) + facets = _extractboundarycheck(grid, Ferrite.create_facetset) + edges = _extractboundarycheck(grid, Ferrite.create_edgeset) + vertices = _extractboundarycheck(grid, Ferrite.create_vertexset) + return union(facets, faces, edges, vertices) end function extractboundarycheck(grid::Ferrite.AbstractGrid{2}) - faces = _extractboundarycheck(grid, addfaceset!, grid.facesets) - vertices = _extractboundarycheck(grid, addvertexset!, grid.vertexsets) - return union(faces, vertices) + facets = _extractboundarycheck(grid, Ferrite.create_facetset) + edges = _extractboundarycheck(grid, Ferrite.create_edgeset) + vertices = _extractboundarycheck(grid, Ferrite.create_vertexset) + return union(facets, edges, vertices) end + #= @testset "getentities" begin # (8) # (7) +-----+-----+(9) @@ -80,9 +75,9 @@ grid = generate_grid(Triangle, (2, 2)); topology = ExclusiveTopology(grid); for cell in 1:getncells(grid) - @test Ferrite.getfacevertices(grid, FaceIndex(cell, 1)) == Set([VertexIndex(cell, 1), VertexIndex(cell, 2)]) - @test Ferrite.getfacevertices(grid, FaceIndex(cell, 2)) == Set([VertexIndex(cell, 2), VertexIndex(cell, 3)]) - @test Ferrite.getfacevertices(grid, FaceIndex(cell, 3)) == Set([VertexIndex(cell, 3), VertexIndex(cell, 1)]) + @test Ferrite.getedgevertices(grid, EdgeIndex(cell, 1)) == Set([VertexIndex(cell, 1), VertexIndex(cell, 2)]) + @test Ferrite.getedgevertices(grid, EdgeIndex(cell, 2)) == Set([VertexIndex(cell, 2), VertexIndex(cell, 3)]) + @test Ferrite.getedgevertices(grid, EdgeIndex(cell, 3)) == Set([VertexIndex(cell, 3), VertexIndex(cell, 1)]) end # 3D for getfaceedges and getedgevertices grid = generate_grid(Tetrahedron, (2, 2, 2)); @@ -125,19 +120,19 @@ # (2) grid = generate_grid(Triangle, (2, 2)); topology = ExclusiveTopology(grid); - addfaceset!(grid, "all", x->true) + addedgeset!(grid, "all", x->true) addvertexset!(grid, "all", x->true) directions = ["bottom", "top", "left", "right"] conditions = [x->x[2]≈-1, x->x[2]≈1, x->x[1]≈-1, x->x[1]≈1] for diridx in 1:4 - addfaceset!(grid, directions[diridx]*"_nall", conditions[diridx];all=false) + addedgeset!(grid, directions[diridx]*"_nall", conditions[diridx];all=false) addvertexset!(grid, directions[diridx], conditions[diridx];all=true) addvertexset!(grid, directions[diridx]*"_nall", conditions[diridx];all=false) #faces - @test Ferrite.filterfaces(grid, grid.facesets["all"], conditions[diridx];all=true) == - grid.facesets[directions[diridx]] - @test Ferrite.filterfaces(grid, grid.facesets["all"], conditions[diridx];all=false) == - grid.facesets[directions[diridx]*"_nall"] + @test Ferrite.filteredges(grid, grid.edgesets["all"], conditions[diridx];all=true) == + grid.edgesets[directions[diridx]] + @test Ferrite.filteredges(grid, grid.edgesets["all"], conditions[diridx];all=false) == + grid.edgesets[directions[diridx]*"_nall"] #vertices @test Ferrite.filtervertices(grid, grid.vertexsets["all"], conditions[diridx];all=true) == grid.vertexsets[directions[diridx]] @@ -163,11 +158,12 @@ # (2) grid = generate_grid(Triangle, (2, 2)); topology = ExclusiveTopology(grid); - addfaceset!(grid, "all", x->true) + addedgeset!(grid, "all", x->true) addvertexset!(grid, "all", x->true) - @test ∪([Ferrite.getfaceinstances(grid, topology,face) for face in Ferrite.faceskeleton(topology, grid)]...) == grid.facesets["all"] + @test ∪([Ferrite.getedgeinstances(grid, topology,face) for face in Ferrite.facetskeleton(topology, grid)]...) == grid.edgesets["all"] end - @testset "addboundaryset" for cell_type in [ + =# + @testset "addboundaryset ($cell_type)" for cell_type in [ # Line, # topology construction error # QuadraticLine, # topology construction error @@ -188,8 +184,14 @@ SerendipityQuadraticHexahedron ] # Grid tests - Regression test for https://github.com/Ferrite-FEM/Ferrite.jl/discussions/565 - grid = generate_grid(cell_type, ntuple(i->3, Ferrite.getdim(cell_type))) + grid = generate_grid(cell_type, ntuple(i->3, Ferrite.getrefdim(cell_type))) topology = ExclusiveTopology(grid) @test extractboundary(grid, topology) == extractboundarycheck(grid) + + filter_function(x) = x[1] > 0 + addboundaryvertexset!(grid, topology, "test_boundary_vertexset", filter_function) + @test getvertexset(grid, "test_boundary_vertexset") == Ferrite.create_boundaryvertexset(grid, topology, filter_function) + addboundaryfacetset!(grid, topology, "test_boundary_facetset", filter_function) + @test getfacetset(grid, "test_boundary_facetset") == Ferrite.create_boundaryfacetset(grid, topology, filter_function) end end diff --git a/test/test_grid_dofhandler_vtk.jl b/test/test_grid_dofhandler_vtk.jl index ec359ec0ff..1fb651cc8e 100644 --- a/test/test_grid_dofhandler_vtk.jl +++ b/test/test_grid_dofhandler_vtk.jl @@ -33,15 +33,15 @@ end radius = 2*1.5 addcellset!(grid, "cell-1", [1,]) addcellset!(grid, "middle-cells", x -> norm(x) < radius) - addfaceset!(grid, "middle-faceset", x -> norm(x) < radius) - addfaceset!(grid, "right-faceset", getfaceset(grid, "right")) + addfacetset!(grid, "middle-facetset", x -> norm(x) < radius) + addfacetset!(grid, "right-facetset", getfacetset(grid, "right")) addnodeset!(grid, "middle-nodes", x -> norm(x) < radius) gridfilename = "grid-$(repr(celltype))" - vtk_grid(gridfilename, grid) do vtk - vtk_cellset(vtk, grid, "cell-1") - vtk_cellset(vtk, grid, "middle-cells") - vtk_nodeset(vtk, grid, "middle-nodes") + VTKGridFile(gridfilename, grid) do vtk + Ferrite.write_cellset(vtk, grid, "cell-1") + Ferrite.write_cellset(vtk, grid, "middle-cells") + Ferrite.write_nodeset(vtk, grid, "middle-nodes") end # test the sha of the file @@ -56,17 +56,18 @@ end # Create a DofHandler, add some things, write to file and # then check the resulting sha dofhandler = DofHandler(grid) - ip = Ferrite.default_interpolation(celltype) + ip = geometric_interpolation(celltype) + @test ip == geometric_interpolation(getcells(grid, 1)) # Test ::AbstractCell dispatch add!(dofhandler, :temperature, ip) add!(dofhandler, :displacement, ip^dim) close!(dofhandler) ch = ConstraintHandler(dofhandler) - dbc = Dirichlet(:temperature, getfaceset(grid, "right-faceset"), (x,t)->1) + dbc = Dirichlet(:temperature, getfacetset(grid, "right-facetset"), (x,t)->1) add!(ch, dbc) - dbc = Dirichlet(:temperature, getfaceset(grid, "left"), (x,t)->4) + dbc = Dirichlet(:temperature, getfacetset(grid, "left"), (x,t)->4) add!(ch, dbc) for d in 1:dim - dbc = Dirichlet(:displacement, union(getfaceset(grid, "left")), (x,t) -> d, d) + dbc = Dirichlet(:displacement, union(getfacetset(grid, "left")), (x,t) -> d, d) add!(ch, dbc) end close!(ch) @@ -77,9 +78,9 @@ end apply!(u, ch) dofhandlerfilename = "dofhandler-$(repr(celltype))" - vtk_grid(dofhandlerfilename, dofhandler) do vtk - vtk_point_data(vtk, ch) - vtk_point_data(vtk, dofhandler, u) + VTKGridFile(dofhandlerfilename, grid) do vtk + Ferrite.write_constraints(vtk, ch) + write_solution(vtk, dofhandler, u) end # test the sha of the file @@ -91,6 +92,15 @@ end rm(dofhandlerfilename*".vtu") end + minv, maxv = Ferrite.bounding_box(grid) + @test minv ≈ 2left + @test maxv ≈ 2right + + # Consistency check for topological queries + @test Ferrite.reference_vertices(getcells(grid,1)) == Ferrite.reference_vertices(Ferrite.getrefshape(celltype)) + @test Ferrite.reference_edges(getcells(grid,1)) == Ferrite.reference_edges(Ferrite.getrefshape(celltype)) + @test Ferrite.reference_faces(getcells(grid,1)) == Ferrite.reference_faces(Ferrite.getrefshape(celltype)) + @test Ferrite.reference_facets(getcells(grid,1)) == Ferrite.reference_facets(Ferrite.getrefshape(celltype)) end end # of testset @@ -114,10 +124,10 @@ close(csio) vector_data = [Vec{3}(ntuple(i->i, 3)) for j=1:8] filename_3d = "test_vtk_3d" - vtk_grid(filename_3d, grid) do vtk_file - vtk_point_data(vtk_file, sym_tensor_data, "symmetric tensor") - vtk_point_data(vtk_file, tensor_data, "tensor") - vtk_point_data(vtk_file, vector_data, "vector") + VTKGridFile(filename_3d, grid) do vtk + write_node_data(vtk, sym_tensor_data, "symmetric tensor") + write_node_data(vtk, tensor_data, "tensor") + write_node_data(vtk, vector_data, "vector") end # 2D grid @@ -129,11 +139,11 @@ close(csio) vector_data = [Vec{2}(ntuple(i->i, 2)) for j=1:4] filename_2d = "test_vtk_2d" - vtk_grid(filename_2d, grid) do vtk_file - vtk_point_data(vtk_file, sym_tensor_data, "symmetric tensor") - vtk_point_data(vtk_file, tensor_data, "tensor") - vtk_point_data(vtk_file, tensor_data_1D, "tensor_1d") - vtk_point_data(vtk_file, vector_data, "vector") + VTKGridFile(filename_2d, grid) do vtk + write_node_data(vtk, sym_tensor_data, "symmetric tensor") + write_node_data(vtk, tensor_data, "tensor") + write_node_data(vtk, tensor_data_1D, "tensor_1d") + write_node_data(vtk, vector_data, "vector") end # test the shas of the files @@ -181,12 +191,12 @@ end end @test n == div(getncells(grid)*(getncells(grid) + 1), 2) - # FaceCache + # FacetCache grid = generate_grid(Triangle, (3,3)) - fc = FaceCache(grid) - faceindex = first(getfaceset(grid, "left")) - cell_id, face_id = faceindex - reinit!(fc, faceindex) + fc = FacetCache(grid) + facetindex = first(getfacetset(grid, "left")) + cell_id, facet_id = facetindex + reinit!(fc, facetindex) # @test Ferrite.faceindex(fc) == faceindex @test cellid(fc) == cell_id # @test Ferrite.faceid(fc) == face_id @@ -194,18 +204,18 @@ end @test getcoordinates(fc) == getcoordinates(grid, cell_id) @test length(celldofs(fc)) == 0 # Empty because no DofHandler given - # FaceIterator, also tests `reinit!(fv::FaceValues, fc::FaceCache)` + # FacetIterator, also tests `reinit!(fv::FacetValues, fc::FacetCache)` for (dim, celltype) in ((1, Line), (2, Quadrilateral), (3, Hexahedron)) grid = generate_grid(celltype, ntuple(_ -> 3, dim)) ip = Lagrange{Ferrite.RefHypercube{dim}, 1}()^dim - fqr = FaceQuadratureRule{Ferrite.RefHypercube{dim}}(2) - fv = FaceValues(fqr, ip) + fqr = FacetQuadratureRule{Ferrite.RefHypercube{dim}}(2) + fv = FacetValues(fqr, ip) dh = DofHandler(grid); add!(dh, :u, ip); close!(dh) - faceset = getfaceset(grid, "right") + facetset = getfacetset(grid, "right") for dh_or_grid in (grid, dh) - @test first(FaceIterator(dh_or_grid, faceset)) isa FaceCache + @test first(FacetIterator(dh_or_grid, facetset)) isa FacetCache area = 0.0 - for face in FaceIterator(dh_or_grid, faceset) + for face in FacetIterator(dh_or_grid, facetset) reinit!(fv, face) for q_point in 1:getnquadpoints(fv) area += getdetJdV(fv, q_point) @@ -237,7 +247,7 @@ end grid = Grid(cells, nodes) ip1 = DiscontinuousLagrange{RefQuadrilateral, 1}() ip2 = DiscontinuousLagrange{RefTriangle, 1}() - dh = DofHandler(grid); + dh = DofHandler(grid); sdh1 = SubDofHandler(dh, Set([1])); add!(sdh1, :u, ip1); sdh2 = SubDofHandler(dh, Set([2])); add!(sdh2, :u, ip2); close!(dh) @@ -248,12 +258,12 @@ end mixed_grid = Grid([Quadrilateral((1, 2, 3, 4)),Triangle((3, 2, 5))], [Node(coord) for coord in zeros(Vec{2,Float64}, 5)]) cellset = Set(1:getncells(mixed_grid)) - faceset = Set(FaceIndex(i, 1) for i in 1:getncells(mixed_grid)) + facetset = Set(FacetIndex(i, 1) for i in 1:getncells(mixed_grid)) @test_throws ErrorException Ferrite._check_same_celltype(mixed_grid, cellset) - @test_throws ErrorException Ferrite._check_same_celltype(mixed_grid, faceset) + @test_throws ErrorException Ferrite._check_same_celltype(mixed_grid, facetset) std_grid = generate_grid(Quadrilateral, (getncells(mixed_grid),1)) @test Ferrite._check_same_celltype(std_grid, cellset) === nothing - @test Ferrite._check_same_celltype(std_grid, faceset) === nothing + @test Ferrite._check_same_celltype(std_grid, facetset) === nothing end @testset "Grid sets" begin @@ -262,46 +272,52 @@ end #Test manual add addcellset!(grid, "cell_set", [1]); addnodeset!(grid, "node_set", [1]) - addfaceset!(grid, "face_set", [FaceIndex(1,1)]) - addedgeset!(grid, "edge_set", [EdgeIndex(1,1)]) + addfacetset!(grid, "face_set", [FacetIndex(1,1)]) addvertexset!(grid, "vert_set", [VertexIndex(1,1)]) #Test function add - addfaceset!(grid, "left_face", (x)-> x[1] ≈ 0.0) - addedgeset!(grid, "left_lower_edge", (x)-> x[1] ≈ 0.0 && x[3] ≈ 0.0) + addfacetset!(grid, "left_face", (x)-> x[1] ≈ 0.0) + left_lower_edge = Ferrite.create_edgeset(grid, (x)-> x[1] ≈ 0.0 && x[3] ≈ 0.0) addvertexset!(grid, "left_corner", (x)-> x[1] ≈ 0.0 && x[2] ≈ 0.0 && x[3] ≈ 0.0) @test 1 in Ferrite.getnodeset(grid, "node_set") - @test FaceIndex(1,5) in getfaceset(grid, "left_face") - @test EdgeIndex(1,4) in getedgeset(grid, "left_lower_edge") + @test FacetIndex(1,5) in getfacetset(grid, "left_face") + @test EdgeIndex(1,4) in left_lower_edge @test VertexIndex(1,1) in getvertexset(grid, "left_corner") end @testset "Grid topology" begin + # Error paths + mixed_rdim_grid = Grid([Triangle((1,2,3)), Line((2,3))], [Node((0.0, 0.0)), Node((1.0, 0.0)), Node((1.0, 1.1))]) + @test_throws ErrorException ExclusiveTopology(mixed_rdim_grid) + top_line = ExclusiveTopology(generate_grid(Line, (3,))) + @test_throws ArgumentError Ferrite.get_facet_facet_neighborhood(top_line, mixed_rdim_grid) + @test_throws ArgumentError Ferrite.getneighborhood(top_line, mixed_rdim_grid, FacetIndex(1,2)) + # # (1) (2) (3) (4) # +---+---+---+ # linegrid = generate_grid(Line,(3,)) linetopo = ExclusiveTopology(linegrid) - @test linetopo.vertex_vertex_neighbor[1,2] == Ferrite.EntityNeighborhood(VertexIndex(2,1)) + @test linetopo.vertex_vertex_neighbor[1,2] == [VertexIndex(2,1)] @test getneighborhood(linetopo, linegrid, VertexIndex(1,2)) == [VertexIndex(2,1)] - @test linetopo.vertex_vertex_neighbor[2,1] == Ferrite.EntityNeighborhood(VertexIndex(1,2)) + @test linetopo.vertex_vertex_neighbor[2,1] == [VertexIndex(1,2)] @test getneighborhood(linetopo, linegrid, VertexIndex(2,1)) == [VertexIndex(1,2)] - @test linetopo.vertex_vertex_neighbor[2,2] == Ferrite.EntityNeighborhood(VertexIndex(3,1)) + @test linetopo.vertex_vertex_neighbor[2,2] == [VertexIndex(3,1)] @test getneighborhood(linetopo, linegrid, VertexIndex(2,2)) == [VertexIndex(3,1)] - @test linetopo.vertex_vertex_neighbor[3,1] == Ferrite.EntityNeighborhood(VertexIndex(2,2)) + @test linetopo.vertex_vertex_neighbor[3,1] == [VertexIndex(2,2)] @test getneighborhood(linetopo, linegrid, VertexIndex(3,1)) == [VertexIndex(2,2)] - linefaceskeleton = Ferrite.faceskeleton(linetopo, linegrid) + @test getneighborhood(linetopo, linegrid, FacetIndex(3,1)) == getneighborhood(linetopo, linegrid, VertexIndex(3,1)) + + linefaceskeleton = Ferrite.facetskeleton(linetopo, linegrid) quadlinegrid = generate_grid(QuadraticLine,(3,)) quadlinetopo = ExclusiveTopology(quadlinegrid) - quadlinefaceskeleton = Ferrite.faceskeleton(quadlinetopo, quadlinegrid) + quadlinefaceskeleton = Ferrite.facetskeleton(quadlinetopo, quadlinegrid) # Test faceskeleton @test Set(linefaceskeleton) == Set(quadlinefaceskeleton) == Set([ - FaceIndex(1,1), FaceIndex(1,2), - FaceIndex(2,2), - FaceIndex(3,2), + FacetIndex(1,1), FacetIndex(1,2), FacetIndex(2,2),FacetIndex(3,2), ]) # (11) @@ -315,68 +331,54 @@ end # (2) quadgrid = generate_grid(Quadrilateral,(2,3)) topology = ExclusiveTopology(quadgrid) - faceskeleton = Ferrite.faceskeleton(topology, quadgrid) + faceskeleton = Ferrite.facetskeleton(topology, quadgrid) #test vertex neighbors maps cellid and local vertex id to neighbor id and neighbor local vertex id - @test topology.vertex_vertex_neighbor[1,3] == Ferrite.EntityNeighborhood(VertexIndex(4,1)) - @test topology.vertex_vertex_neighbor[2,4] == Ferrite.EntityNeighborhood(VertexIndex(3,2)) + @test topology.vertex_vertex_neighbor[1,3] == [VertexIndex(4,1)] + @test topology.vertex_vertex_neighbor[2,4] == [VertexIndex(3,2)] @test Set(getneighborhood(topology, quadgrid, VertexIndex(2,4))) == Set([VertexIndex(1,3), VertexIndex(3,2), VertexIndex(4,1)]) - @test topology.vertex_vertex_neighbor[3,3] == Ferrite.EntityNeighborhood(VertexIndex(6,1)) - @test topology.vertex_vertex_neighbor[3,2] == Ferrite.EntityNeighborhood(VertexIndex(2,4)) - @test topology.vertex_vertex_neighbor[4,1] == Ferrite.EntityNeighborhood(VertexIndex(1,3)) - @test topology.vertex_vertex_neighbor[4,4] == Ferrite.EntityNeighborhood(VertexIndex(5,2)) - @test topology.vertex_vertex_neighbor[5,2] == Ferrite.EntityNeighborhood(VertexIndex(4,4)) - @test topology.vertex_vertex_neighbor[6,1] == Ferrite.EntityNeighborhood(VertexIndex(3,3)) + @test topology.vertex_vertex_neighbor[3,3] == [VertexIndex(6,1)] + @test topology.vertex_vertex_neighbor[3,2] == [VertexIndex(2,4)] + @test topology.vertex_vertex_neighbor[4,1] == [VertexIndex(1,3)] + @test topology.vertex_vertex_neighbor[4,4] == [VertexIndex(5,2)] + @test topology.vertex_vertex_neighbor[5,2] == [VertexIndex(4,4)] + @test topology.vertex_vertex_neighbor[6,1] == [VertexIndex(3,3)] @test isempty(getneighborhood(topology, quadgrid, VertexIndex(2,2))) @test length(getneighborhood(topology, quadgrid, VertexIndex(2,4))) == 3 #test face neighbor maps cellid and local face id to neighbor id and neighbor local face id - @test topology.face_face_neighbor[1,2] == Ferrite.EntityNeighborhood(FaceIndex(2,4)) - @test getneighborhood(topology, quadgrid, FaceIndex(1,2)) == [FaceIndex(2,4)] - @test topology.face_face_neighbor[1,3] == Ferrite.EntityNeighborhood(FaceIndex(3,1)) - @test getneighborhood(topology, quadgrid, FaceIndex(1,3)) == [FaceIndex(3,1)] - @test topology.face_face_neighbor[2,3] == Ferrite.EntityNeighborhood(FaceIndex(4,1)) - @test getneighborhood(topology, quadgrid, FaceIndex(2,3)) == [FaceIndex(4,1)] - @test topology.face_face_neighbor[2,4] == Ferrite.EntityNeighborhood(FaceIndex(1,2)) - @test getneighborhood(topology, quadgrid, FaceIndex(2,4)) == [FaceIndex(1,2)] - @test topology.face_face_neighbor[3,1] == Ferrite.EntityNeighborhood(FaceIndex(1,3)) - @test getneighborhood(topology, quadgrid, FaceIndex(3,1)) == [FaceIndex(1,3)] - @test topology.face_face_neighbor[3,2] == Ferrite.EntityNeighborhood(FaceIndex(4,4)) - @test getneighborhood(topology, quadgrid, FaceIndex(3,2)) == [FaceIndex(4,4)] - @test topology.face_face_neighbor[3,3] == Ferrite.EntityNeighborhood(FaceIndex(5,1)) - @test getneighborhood(topology, quadgrid, FaceIndex(3,3)) == [FaceIndex(5,1)] - @test topology.face_face_neighbor[4,1] == Ferrite.EntityNeighborhood(FaceIndex(2,3)) - @test getneighborhood(topology, quadgrid, FaceIndex(4,1)) == [FaceIndex(2,3)] - @test topology.face_face_neighbor[4,3] == Ferrite.EntityNeighborhood(FaceIndex(6,1)) - @test getneighborhood(topology, quadgrid, FaceIndex(4,3)) == [FaceIndex(6,1)] - @test topology.face_face_neighbor[4,4] == Ferrite.EntityNeighborhood(FaceIndex(3,2)) - @test getneighborhood(topology, quadgrid, FaceIndex(1,2)) == [FaceIndex(2,4)] - @test topology.face_face_neighbor[5,1] == Ferrite.EntityNeighborhood(FaceIndex(3,3)) - @test getneighborhood(topology, quadgrid, FaceIndex(5,1)) == [FaceIndex(3,3)] - @test topology.face_face_neighbor[5,2] == Ferrite.EntityNeighborhood(FaceIndex(6,4)) - @test getneighborhood(topology, quadgrid, FaceIndex(5,2)) == [FaceIndex(6,4)] - @test topology.face_face_neighbor[5,3] == Ferrite.EntityNeighborhood(Ferrite.BoundaryIndex[]) - @test getneighborhood(topology, quadgrid, FaceIndex(5,3)) == FaceIndex[] - @test topology.face_face_neighbor[5,4] == Ferrite.EntityNeighborhood(Ferrite.BoundaryIndex[]) - @test getneighborhood(topology, quadgrid, FaceIndex(5,4)) == FaceIndex[] - @test topology.face_face_neighbor[6,1] == Ferrite.EntityNeighborhood(FaceIndex(4,3)) - @test getneighborhood(topology, quadgrid, FaceIndex(6,1)) == [FaceIndex(4,3)] - @test topology.face_face_neighbor[6,2] == Ferrite.EntityNeighborhood(Ferrite.BoundaryIndex[]) - @test getneighborhood(topology, quadgrid, FaceIndex(6,2)) == FaceIndex[] - @test topology.face_face_neighbor[6,3] == Ferrite.EntityNeighborhood(Ferrite.BoundaryIndex[]) - @test getneighborhood(topology, quadgrid, FaceIndex(6,3)) == FaceIndex[] - @test topology.face_face_neighbor[6,4] == Ferrite.EntityNeighborhood(FaceIndex(5,2)) - @test getneighborhood(topology, quadgrid, FaceIndex(6,4)) == [FaceIndex(5,2)] - + @test topology.edge_edge_neighbor[1,2] == [EdgeIndex(2,4)] + @test topology.edge_edge_neighbor[1,3] == [EdgeIndex(3,1)] + @test topology.edge_edge_neighbor[2,3] == [EdgeIndex(4,1)] + @test topology.edge_edge_neighbor[2,4] == [EdgeIndex(1,2)] + @test topology.edge_edge_neighbor[3,1] == [EdgeIndex(1,3)] + @test topology.edge_edge_neighbor[3,2] == [EdgeIndex(4,4)] + @test topology.edge_edge_neighbor[3,3] == [EdgeIndex(5,1)] + @test topology.edge_edge_neighbor[4,1] == [EdgeIndex(2,3)] + @test topology.edge_edge_neighbor[4,3] == [EdgeIndex(6,1)] + @test topology.edge_edge_neighbor[4,4] == [EdgeIndex(3,2)] + @test topology.edge_edge_neighbor[5,1] == [EdgeIndex(3,3)] + @test topology.edge_edge_neighbor[5,2] == [EdgeIndex(6,4)] + @test isempty(topology.edge_edge_neighbor[5,3]) + @test isempty(topology.edge_edge_neighbor[5,4]) + @test topology.edge_edge_neighbor[6,1] == [EdgeIndex(4,3)] + @test isempty(topology.edge_edge_neighbor[6,2]) + @test isempty(topology.edge_edge_neighbor[6,3]) + @test topology.edge_edge_neighbor[6,4] == [EdgeIndex(5,2)] + @test getneighborhood(topology, quadgrid, EdgeIndex(6, 4), false) == [EdgeIndex(5,2)] + @test Set(getneighborhood(topology, quadgrid, EdgeIndex(6, 4), true)) == Set([EdgeIndex(6, 4), EdgeIndex(5,2)]) + + @test getneighborhood(topology, quadgrid, EdgeIndex(2,4)) == getneighborhood(topology, quadgrid, FacetIndex(2,4)) + quadquadgrid = generate_grid(QuadraticQuadrilateral,(2,3)) quadtopology = ExclusiveTopology(quadquadgrid) - quadfaceskeleton = Ferrite.faceskeleton(quadtopology, quadquadgrid) + quadfaceskeleton = Ferrite.facetskeleton(quadtopology, quadquadgrid) # Test faceskeleton @test Set(faceskeleton) == Set(quadfaceskeleton) == Set([ - FaceIndex(1,1), FaceIndex(1,2), FaceIndex(1,3), FaceIndex(1,4), - FaceIndex(2,1), FaceIndex(2,2), FaceIndex(2,3), - FaceIndex(3,2), FaceIndex(3,3), FaceIndex(3,4), - FaceIndex(4,2), FaceIndex(4,3), - FaceIndex(5,2), FaceIndex(5,3), FaceIndex(5,4), - FaceIndex(6,2), FaceIndex(6,3), + FacetIndex(1,1), FacetIndex(1,2), FacetIndex(1,3), FacetIndex(1,4), + FacetIndex(2,1), FacetIndex(2,2), FacetIndex(2,3), + FacetIndex(3,2), FacetIndex(3,3), FacetIndex(3,4), + FacetIndex(4,2), FacetIndex(4,3), + FacetIndex(5,2), FacetIndex(5,3), FacetIndex(5,4), + FacetIndex(6,2), FacetIndex(6,3), ]) # (8) @@ -395,14 +397,14 @@ end # (11) hexgrid = generate_grid(Hexahedron,(2,2,1)) topology = ExclusiveTopology(hexgrid) - @test topology.edge_edge_neighbor[1,11] == Ferrite.EntityNeighborhood(EdgeIndex(4,9)) + @test topology.edge_edge_neighbor[1,11] == [EdgeIndex(4,9)] @test Set(getneighborhood(topology,hexgrid,EdgeIndex(1,11),true)) == Set([EdgeIndex(4,9),EdgeIndex(2,12),EdgeIndex(3,10),EdgeIndex(1,11)]) @test Set(getneighborhood(topology,hexgrid,EdgeIndex(1,11),false)) == Set([EdgeIndex(4,9),EdgeIndex(2,12),EdgeIndex(3,10)]) - @test topology.edge_edge_neighbor[2,12] == Ferrite.EntityNeighborhood(EdgeIndex(3,10)) + @test topology.edge_edge_neighbor[2,12] == [EdgeIndex(3,10)] @test Set(getneighborhood(topology,hexgrid,EdgeIndex(2,12),true)) == Set([EdgeIndex(3,10),EdgeIndex(1,11),EdgeIndex(4,9),EdgeIndex(2,12)]) @test Set(getneighborhood(topology,hexgrid,EdgeIndex(2,12),false)) == Set([EdgeIndex(3,10),EdgeIndex(1,11),EdgeIndex(4,9)]) - @test topology.edge_edge_neighbor[3,10] == Ferrite.EntityNeighborhood(EdgeIndex(2,12)) - @test topology.edge_edge_neighbor[4,9] == Ferrite.EntityNeighborhood(EdgeIndex(1,11)) + @test topology.edge_edge_neighbor[3,10] == [EdgeIndex(2,12)] + @test topology.edge_edge_neighbor[4,9] == [EdgeIndex(1,11)] @test getneighborhood(topology,hexgrid,FaceIndex((1,3))) == [FaceIndex((2,5))] @test getneighborhood(topology,hexgrid,FaceIndex((1,4))) == [FaceIndex((3,2))] @test getneighborhood(topology,hexgrid,FaceIndex((2,4))) == [FaceIndex((4,2))] @@ -411,6 +413,9 @@ end @test getneighborhood(topology,hexgrid,FaceIndex((3,3))) == [FaceIndex((4,5))] @test getneighborhood(topology,hexgrid,FaceIndex((4,2))) == [FaceIndex((2,4))] @test getneighborhood(topology,hexgrid,FaceIndex((4,5))) == [FaceIndex((3,3))] + @test Set(getneighborhood(topology, hexgrid, FaceIndex((4,5)), true)) == Set([FaceIndex((3,3)), FaceIndex((4,5))]) + + @test getneighborhood(topology, hexgrid, FaceIndex(2,4)) == getneighborhood(topology, hexgrid, FacetIndex(2,4)) # regression for https://github.com/Ferrite-FEM/Ferrite.jl/issues/518 serendipitygrid = generate_grid(SerendipityQuadraticHexahedron,(2,2,1)) @@ -418,13 +423,13 @@ end @test all(stopology.face_face_neighbor .== topology.face_face_neighbor) @test all(stopology.vertex_vertex_neighbor .== topology.vertex_vertex_neighbor) # Test faceskeleton - faceskeleton = Ferrite.faceskeleton(topology, hexgrid) - sfaceskeleton = Ferrite.faceskeleton(stopology, serendipitygrid) + faceskeleton = Ferrite.facetskeleton(topology, hexgrid) + sfaceskeleton = Ferrite.facetskeleton(stopology, serendipitygrid) @test Set(faceskeleton) == Set(sfaceskeleton) == Set([ - FaceIndex(1,1), FaceIndex(1,2), FaceIndex(1,3), FaceIndex(1,4), FaceIndex(1,5), FaceIndex(1,6), - FaceIndex(2,1), FaceIndex(2,2), FaceIndex(2,3), FaceIndex(2,4), FaceIndex(2,6), - FaceIndex(3,1), FaceIndex(3,3), FaceIndex(3,4), FaceIndex(3,5), FaceIndex(3,6), - FaceIndex(4,1), FaceIndex(4,3), FaceIndex(4,4), FaceIndex(4,6), + FacetIndex(1,1), FacetIndex(1,2), FacetIndex(1,3), FacetIndex(1,4), FacetIndex(1,5), FacetIndex(1,6), + FacetIndex(2,1), FacetIndex(2,2), FacetIndex(2,3), FacetIndex(2,4), FacetIndex(2,6), + FacetIndex(3,1), FacetIndex(3,3), FacetIndex(3,4), FacetIndex(3,5), FacetIndex(3,6), + FacetIndex(4,1), FacetIndex(4,3), FacetIndex(4,4), FacetIndex(4,6), ]) # +-----+-----+ @@ -439,7 +444,12 @@ end # test for multiple vertex_neighbors as in e.g. ele 3, local vertex 3 (middle node) trigrid = generate_grid(Triangle,(2,2)) topology = ExclusiveTopology(trigrid) - @test topology.vertex_vertex_neighbor[3,3] == Ferrite.EntityNeighborhood([VertexIndex(5,2),VertexIndex(6,1),VertexIndex(7,1)]) + tri_vert_nbset = Set([VertexIndex(5,2), VertexIndex(6,1), VertexIndex(7,1)]) # Exclusive neighbors + @test Set(topology.vertex_vertex_neighbor[3,3]) == tri_vert_nbset + union!(tri_vert_nbset, [VertexIndex(4, 3), VertexIndex(2, 2)]) # Add vertices shared via edges as well + @test Set(getneighborhood(topology, trigrid, VertexIndex(3,3), false)) == tri_vert_nbset + union!(tri_vert_nbset, [VertexIndex(3,3)]) # Add self + @test Set(getneighborhood(topology, trigrid, VertexIndex(3,3), true)) == tri_vert_nbset quadtrigrid = generate_grid(QuadraticTriangle,(2,2)) quadtopology = ExclusiveTopology(trigrid) @@ -447,31 +457,43 @@ end @test all(quadtopology.face_face_neighbor .== topology.face_face_neighbor) @test all(quadtopology.vertex_vertex_neighbor .== topology.vertex_vertex_neighbor) # Test faceskeleton - trifaceskeleton = Ferrite.faceskeleton(topology, trigrid) - quadtrifaceskeleton = Ferrite.faceskeleton(quadtopology, quadtrigrid) + trifaceskeleton = Ferrite.facetskeleton(topology, trigrid) + quadtrifaceskeleton = Ferrite.facetskeleton(quadtopology, quadtrigrid) @test Set(trifaceskeleton) == Set(quadtrifaceskeleton) == Set([ - FaceIndex(1,1), FaceIndex(1,2), FaceIndex(1,3), - FaceIndex(2,1), FaceIndex(2,2), - FaceIndex(3,1), FaceIndex(3,2), - FaceIndex(4,1), FaceIndex(4,2), - FaceIndex(5,2), FaceIndex(5,3), - FaceIndex(6,1), FaceIndex(6,2), - FaceIndex(7,2), - FaceIndex(8,1), FaceIndex(8,2), + FacetIndex(1,1), FacetIndex(1,2), FacetIndex(1,3), + FacetIndex(2,1), FacetIndex(2,2), + FacetIndex(3,1), FacetIndex(3,2), + FacetIndex(4,1), FacetIndex(4,2), + FacetIndex(5,2), FacetIndex(5,3), + FacetIndex(6,1), FacetIndex(6,2), + FacetIndex(7,2), + FacetIndex(8,1), FacetIndex(8,2), ]) # Test tetrahedron faceskeleton tetgrid = generate_grid(Tetrahedron, (1,1,1)) topology = ExclusiveTopology(tetgrid) - tetfaceskeleton = Ferrite.faceskeleton(topology, tetgrid) + tetfaceskeleton = Ferrite.facetskeleton(topology, tetgrid) @test Set(tetfaceskeleton) == Set([ - FaceIndex(1,1), FaceIndex(1,2), FaceIndex(1,3), FaceIndex(1,4), - FaceIndex(2,1), FaceIndex(2,2), FaceIndex(2,3), - FaceIndex(3,1), FaceIndex(3,2), FaceIndex(3,3), - FaceIndex(4,1), FaceIndex(4,2), FaceIndex(4,3), - FaceIndex(5,1), FaceIndex(5,3), FaceIndex(5,4), - FaceIndex(6,1), FaceIndex(6,3), + FacetIndex(1,1), FacetIndex(1,2), FacetIndex(1,3), FacetIndex(1,4), + FacetIndex(2,1), FacetIndex(2,2), FacetIndex(2,3), + FacetIndex(3,1), FacetIndex(3,2), FacetIndex(3,3), + FacetIndex(4,1), FacetIndex(4,2), FacetIndex(4,3), + FacetIndex(5,1), FacetIndex(5,3), FacetIndex(5,4), + FacetIndex(6,1), FacetIndex(6,3), ]) -# test mixed grid +# test grids with mixed celltypes of same refdim +# (4)---(5) +# | | \ +# | 1 |2 \ +# (1)---(2)-(3) + tet_quad_grid = Grid( + [Quadrilateral((1, 2, 5, 4)), Triangle((2, 3, 5))], + [Node(Float64.((x, y))) for (x, y) in ((0,0), (1,0), (2,0), (0,1), (1,1))]) + top = ExclusiveTopology(tet_quad_grid) + @test getneighborhood(top, tet_quad_grid, FacetIndex(1, 2)) == [EdgeIndex(2,3)] + @test Set(getneighborhood(top, tet_quad_grid, FacetIndex(1, 2), true)) == Set([EdgeIndex(1,2), EdgeIndex(2,3)]) + +# test grids with mixed refdim cells = [ Hexahedron((1, 2, 3, 4, 5, 6, 7, 8)), Hexahedron((11, 13, 14, 12, 15, 16, 17, 18)), @@ -480,11 +502,12 @@ end ] nodes = [Node(coord) for coord in zeros(Vec{3,Float64}, 18)] grid = Grid(cells, nodes) - topology = ExclusiveTopology(grid) + @test_throws ErrorException ExclusiveTopology(grid) + # topology = ExclusiveTopology(grid) - @test_throws AssertionError("Face skeleton construction requires all the elements to be of the same dimensionality") Ferrite.faceskeleton(topology, grid) - # @test topology.face_face_neighbor[3,4] == Ferrite.EntityNeighborhood(EdgeIndex(1,2)) - # @test topology.edge_edge_neighbor[1,2] == Ferrite.EntityNeighborhood(FaceIndex(3,4)) + # @test_throws ArgumentError Ferrite.facetskeleton(topology, grid) + # @test topology.face_face_neighbor[3,4] == [EdgeIndex(1,2)) + # @test topology.edge_edge_neighbor[1,2] == [FaceIndex(3,4)) # # regression that it doesn't error for boundary faces, see https://github.com/Ferrite-FEM/Ferrite.jl/issues/518 # @test topology.face_face_neighbor[1,6] == topology.face_face_neighbor[1,1] == zero(Ferrite.EntityNeighborhood{FaceIndex}) # @test topology.edge_edge_neighbor[1,1] == topology.edge_edge_neighbor[1,3] == zero(Ferrite.EntityNeighborhood{FaceIndex}) @@ -499,8 +522,11 @@ end ] nodes = [Node(coord) for coord in zeros(Vec{2,Float64}, 18)] grid = Grid(cells, nodes) - topology = ExclusiveTopology(grid) - @test_throws AssertionError("Face skeleton construction requires all the elements to be of the same dimensionality") Ferrite.faceskeleton(topology, grid) + @test_throws ErrorException ExclusiveTopology(grid) + # topology = ExclusiveTopology(grid) + # @test_throws ArgumentError Ferrite.facetskeleton(topology, grid) + # @test_throws ArgumentError getneighborhood(topology, grid, FacetIndex(1,1)) + # @test_throws ArgumentError Ferrite.get_facet_facet_neighborhood(topology, grid) # # +-----+-----+-----+ @@ -524,32 +550,35 @@ end @test issubset([4,5,8], patches[7]) @test issubset([7,4,5,6,9], patches[8]) @test issubset([8,5,6], patches[9]) + @test 5 ∉ getneighborhood(topology, quadgrid, CellIndex(5)) + @test 5 ∈ getneighborhood(topology, quadgrid, CellIndex(5), true) # test star stencils stars = Ferrite.vertex_star_stencils(topology, quadgrid) @test Set(Ferrite.getstencil(stars, quadgrid, VertexIndex(1,1))) == Set([VertexIndex(1,2), VertexIndex(1,4), VertexIndex(1,1)]) @test Set(Ferrite.getstencil(stars, quadgrid, VertexIndex(2,1))) == Set([VertexIndex(1,1), VertexIndex(1,3), VertexIndex(2,2), VertexIndex(2,4), VertexIndex(1,2), VertexIndex(2,1)]) @test Set(Ferrite.getstencil(stars, quadgrid, VertexIndex(5,4))) == Set([VertexIndex(4,2), VertexIndex(4,4), VertexIndex(5,1), VertexIndex(5,3), VertexIndex(7,1), VertexIndex(7,3), VertexIndex(8,2), VertexIndex(8,4), VertexIndex(4,3), VertexIndex(5,4), VertexIndex(7,2), VertexIndex(8,1)]) - @test Set(Ferrite.toglobal(quadgrid, Ferrite.getstencil(stars, quadgrid, VertexIndex(1,1)))) == Set([1,2,5]) - @test Set(Ferrite.toglobal(quadgrid, Ferrite.getstencil(stars, quadgrid, VertexIndex(2,1)))) == Set([2,1,6,3]) - @test Set(Ferrite.toglobal(quadgrid, Ferrite.getstencil(stars, quadgrid, VertexIndex(5,4)))) == Set([10,6,9,11,14]) - - face_skeleton = Ferrite.faceskeleton(topology, quadgrid) - @test Set(face_skeleton) == Set([FaceIndex(1,1),FaceIndex(1,2),FaceIndex(1,3),FaceIndex(1,4), - FaceIndex(2,1),FaceIndex(2,2),FaceIndex(2,3), - FaceIndex(3,1),FaceIndex(3,2),FaceIndex(3,3), - FaceIndex(4,2),FaceIndex(4,3),FaceIndex(4,4), - FaceIndex(5,2),FaceIndex(5,3),FaceIndex(6,2),FaceIndex(6,3), - FaceIndex(7,2),FaceIndex(7,3),FaceIndex(7,4), - FaceIndex(8,2),FaceIndex(8,3),FaceIndex(9,2),FaceIndex(9,3)]) + @test Set(Ferrite.toglobal.((quadgrid,), Ferrite.getstencil(stars, quadgrid, VertexIndex(1,1)))) == Set([1,2,5]) + @test Set(Ferrite.toglobal.((quadgrid,), Ferrite.getstencil(stars, quadgrid, VertexIndex(2,1)))) == Set([2,1,6,3]) + @test Set(Ferrite.toglobal.((quadgrid,), Ferrite.getstencil(stars, quadgrid, VertexIndex(5,4)))) == Set([10,6,9,11,14]) + + face_skeleton = Ferrite.facetskeleton(topology, quadgrid) + @test Set(face_skeleton) == Set([FacetIndex(1,1),FacetIndex(1,2),FacetIndex(1,3),FacetIndex(1,4), + FacetIndex(2,1),FacetIndex(2,2),FacetIndex(2,3), + FacetIndex(3,1),FacetIndex(3,2),FacetIndex(3,3), + FacetIndex(4,2),FacetIndex(4,3),FacetIndex(4,4), + FacetIndex(5,2),FacetIndex(5,3),FacetIndex(6,2),FacetIndex(6,3), + FacetIndex(7,2),FacetIndex(7,3),FacetIndex(7,4), + FacetIndex(8,2),FacetIndex(8,3),FacetIndex(9,2),FacetIndex(9,3)]) @test length(face_skeleton) == 4*3 + 3*4 quadratic_quadgrid = generate_grid(QuadraticQuadrilateral,(3,3)) quadgrid_topology = ExclusiveTopology(quadratic_quadgrid) - quadface_skeleton = Ferrite.faceskeleton(topology, quadgrid) - @test quadface_skeleton == face_skeleton + #quadface_skeleton = Ferrite.facetskeleton(topology, quadgrid) + #@test quadface_skeleton == face_skeleton + # add more regression for https://github.com/Ferrite-FEM/Ferrite.jl/issues/518 - @test all(quadgrid_topology.face_face_neighbor .== topology.face_face_neighbor) + @test all(quadgrid_topology.edge_edge_neighbor .== topology.edge_edge_neighbor) @test all(quadgrid_topology.vertex_vertex_neighbor .== topology.vertex_vertex_neighbor) quadratic_patches = Vector{Int}[Ferrite.getneighborhood(quadgrid_topology, quadratic_quadgrid, CellIndex(i)) for i in 1:getncells(quadratic_quadgrid)] @test all(patches .== quadratic_patches) @@ -564,8 +593,8 @@ end # +-----+-----+-----+ # test application: integrate jump across element boundary 5 ip = DiscontinuousLagrange{RefQuadrilateral, 1}()^2 - qr_face = FaceQuadratureRule{RefQuadrilateral}(2) - iv = InterfaceValues(qr_face, ip) + qr_facet = FacetQuadratureRule{RefQuadrilateral}(2) + iv = InterfaceValues(qr_facet, ip) dh = DofHandler(quadgrid) add!(dh, :u, ip) close!(dh) @@ -673,7 +702,7 @@ end @testset "1d" begin grid = generate_grid(Line, (2,)) - + Ferrite.vertexdof_indices(::VectorLagrangeTest{RefLine,1,2}) = ((1,2),(3,4)) dh1 = DofHandler(grid) add!(dh1, :u, VectorLagrangeTest{RefLine,1,2}()) @@ -716,4 +745,18 @@ end close!(dh2) @test dh1.cell_dofs == dh2.cell_dofs end + + @testset "paraview_collection" begin + grid = generate_grid(Triangle, (2,2)) + celldata = rand(getncells(grid)) + pvd = WriteVTK.paraview_collection("collection") + vtk1 = VTKGridFile("file1", grid) + write_cell_data(vtk1, celldata, "celldata") + @assert isopen(vtk1.vtk) + pvd[0.5] = vtk1 + @test !isopen(vtk1.vtk) # Should be closed when adding it + vtk2 = VTKGridFile("file2", grid) + WriteVTK.collection_add_timestep(pvd, vtk2, 1.0) + @test !isopen(vtk2.vtk) # Should be closed when adding it + end end diff --git a/test/test_grid_generators.jl b/test/test_grid_generators.jl new file mode 100644 index 0000000000..618455eba8 --- /dev/null +++ b/test/test_grid_generators.jl @@ -0,0 +1,24 @@ +# Helper function to test grid generation for a given floating-point type +function test_generate_grid(T::Type) + # Define the cell types to test + cell_types = [ + Line, QuadraticLine, + Quadrilateral, QuadraticQuadrilateral, Triangle, QuadraticTriangle, + Hexahedron, Wedge, Pyramid, Tetrahedron, SerendipityQuadraticHexahedron] + + # Loop over all cell types and test grid generation + for CT in cell_types + rdim = Ferrite.getrefdim(CT) + nels = ntuple(i -> 2, rdim) + left = - ones(Vec{rdim,T}) + right = ones(Vec{rdim,T}) + grid = generate_grid(CT, nels, left, right) + @test isa(grid, Grid{rdim, CT, T}) + end +end + +# Run tests for different floating-point types +@testset "Generate Grid Tests" begin + test_generate_grid(Float64) + test_generate_grid(Float32) +end diff --git a/test/test_interfacevalues.jl b/test/test_interfacevalues.jl index 79d401ddbc..44254fee5a 100644 --- a/test/test_interfacevalues.jl +++ b/test/test_interfacevalues.jl @@ -2,7 +2,7 @@ function test_interfacevalues(grid::Ferrite.AbstractGrid, iv::InterfaceValues; tol = 0) ip_here = Ferrite.function_interpolation(iv.here) ip_there = Ferrite.function_interpolation(iv.there) - ndim = Ferrite.getdim(ip_here) + rdim = Ferrite.getrefdim(ip_here) n_basefuncs = getnbasefunctions(ip_here) + getnbasefunctions(ip_there) @test getnbasefunctions(iv) == n_basefuncs @@ -33,15 +33,15 @@ @test shapevalue ≈ shape_value(iv.there, qp, i - getnbasefunctions(iv.here)) @test shapegrad ≈ shape_gradient(iv.there, qp, i - getnbasefunctions(iv.here)) - @test shape_jump ≈ -shapevalue - @test shapegrad_jump ≈ -shapegrad + @test shape_jump ≈ shapevalue + @test shapegrad_jump ≈ shapegrad else normal = getnormal(iv, qp) @test shapevalue ≈ shape_value(iv.here, qp, i) @test shapegrad ≈ shape_gradient(iv.here, qp, i) - @test shape_jump ≈ shapevalue - @test shapegrad_jump ≈ shapegrad + @test shape_jump ≈ -shapevalue + @test shapegrad_jump ≈ -shapegrad end @test shape_avg ≈ 0.5 * shapevalue @@ -52,16 +52,16 @@ @test_throws ErrorException("Invalid base function $(n_basefuncs + 1). Interface has only $(n_basefuncs) base functions") shape_value_jump(iv, 1, n_basefuncs + 1) @test_throws ErrorException("Invalid base function $(n_basefuncs + 1). Interface has only $(n_basefuncs) base functions") shape_gradient_average(iv, 1, n_basefuncs + 1) - # Test function* copied from facevalues tests + # Test function* copied from facetvalues tests nbf_a = Ferrite.getngeobasefunctions(iv.here) nbf_b = Ferrite.getngeobasefunctions(iv.there) for here in (true, false) - u_a = Vec{ndim, Float64}[zero(Tensor{1,ndim}) for i in 1: nbf_a] - u_b = Vec{ndim, Float64}[zero(Tensor{1,ndim}) for i in 1: nbf_b] + u_a = zeros(Vec{rdim, Float64}, nbf_a) + u_b = zeros(Vec{rdim, Float64}, nbf_b) u_scal_a = zeros(nbf_a) u_scal_b = zeros(nbf_b) - H = rand(Tensor{2, ndim}) - V = rand(Tensor{1, ndim}) + H = rand(Tensor{2, rdim}) + V = rand(Tensor{1, rdim}) for i in 1:nbf_a xs = coords_here u_a[i] = H ⋅ xs[i] @@ -103,49 +103,52 @@ vol += getdetJdV(iv, i) end xs = here ? coords_here : coords_there - face = here ? Ferrite.getcurrentface(iv.here) : Ferrite.getcurrentface(iv.there) + face = here ? Ferrite.getcurrentfacet(iv.here) : Ferrite.getcurrentfacet(iv.there) func_interpol = here ? ip_here : ip_there let ip_base = func_interpol isa VectorizedInterpolation ? func_interpol.ip : func_interpol - x_face = xs[[Ferrite.dirichlet_facedof_indices(ip_base)[face]...]] - @test vol ≈ calculate_face_area(ip_base, x_face, face) + x_face = xs[[Ferrite.dirichlet_facetdof_indices(ip_base)[face]...]] + @test vol ≈ calculate_facet_area(ip_base, x_face, face) end end end end getcelltypedim(::Type{<:Ferrite.AbstractCell{shape}}) where {dim, shape <: Ferrite.AbstractRefShape{dim}} = dim for (cell_shape, scalar_interpol, quad_rule) in ( - (Line, DiscontinuousLagrange{RefLine, 1}(), FaceQuadratureRule{RefLine}(2)), - (QuadraticLine, DiscontinuousLagrange{RefLine, 2}(), FaceQuadratureRule{RefLine}(2)), - (Quadrilateral, DiscontinuousLagrange{RefQuadrilateral, 1}(), FaceQuadratureRule{RefQuadrilateral}(2)), - (QuadraticQuadrilateral, DiscontinuousLagrange{RefQuadrilateral, 2}(), FaceQuadratureRule{RefQuadrilateral}(2)), - (Triangle, DiscontinuousLagrange{RefTriangle, 1}(), FaceQuadratureRule{RefTriangle}(2)), - (QuadraticTriangle, DiscontinuousLagrange{RefTriangle, 2}(), FaceQuadratureRule{RefTriangle}(2)), - (Hexahedron, DiscontinuousLagrange{RefHexahedron, 1}(), FaceQuadratureRule{RefHexahedron}(2)), - # (QuadraticQuadrilateral, Serendipity{RefQuadrilateral, 2}(), FaceQuadratureRule{RefQuadrilateral}(2)), - (Tetrahedron, DiscontinuousLagrange{RefTetrahedron, 1}(), FaceQuadratureRule{RefTetrahedron}(2)), - # (QuadraticTetrahedron, Lagrange{RefTetrahedron, 2}(), FaceQuadratureRule{RefTetrahedron}(2)), - (Wedge, DiscontinuousLagrange{RefPrism, 1}(), FaceQuadratureRule{RefPrism}(2)), - (Pyramid, DiscontinuousLagrange{RefPyramid, 1}(), FaceQuadratureRule{RefPyramid}(2)), + #TODO: update interfaces for lines + (Line, DiscontinuousLagrange{RefLine, 1}(), FacetQuadratureRule{RefLine}(2)), + (QuadraticLine, DiscontinuousLagrange{RefLine, 2}(), FacetQuadratureRule{RefLine}(2)), + (Quadrilateral, DiscontinuousLagrange{RefQuadrilateral, 1}(), FacetQuadratureRule{RefQuadrilateral}(2)), + (QuadraticQuadrilateral, DiscontinuousLagrange{RefQuadrilateral, 2}(), FacetQuadratureRule{RefQuadrilateral}(2)), + (Triangle, DiscontinuousLagrange{RefTriangle, 1}(), FacetQuadratureRule{RefTriangle}(2)), + (QuadraticTriangle, DiscontinuousLagrange{RefTriangle, 2}(), FacetQuadratureRule{RefTriangle}(2)), + (Hexahedron, DiscontinuousLagrange{RefHexahedron, 1}(), FacetQuadratureRule{RefHexahedron}(2)), + # (QuadraticQuadrilateral, Serendipity{RefQuadrilateral, 2}(), FacetQuadratureRule{RefQuadrilateral}(2)), + (Tetrahedron, DiscontinuousLagrange{RefTetrahedron, 1}(), FacetQuadratureRule{RefTetrahedron}(2)), + # (QuadraticTetrahedron, Lagrange{RefTetrahedron, 2}(), FacetQuadratureRule{RefTetrahedron}(2)), + (Wedge, DiscontinuousLagrange{RefPrism, 1}(), FacetQuadratureRule{RefPrism}(2)), + (Pyramid, DiscontinuousLagrange{RefPyramid, 1}(), FacetQuadratureRule{RefPyramid}(2)), ) dim = getcelltypedim(cell_shape) grid = generate_grid(cell_shape, ntuple(i -> 2, dim)) ip = scalar_interpol isa DiscontinuousLagrange ? Lagrange{Ferrite.getrefshape(scalar_interpol), Ferrite.getorder(scalar_interpol)}() : scalar_interpol @testset "faces nodes indices" begin cell = getcells(grid, 1) - geom_ip_faces_indices = Ferrite.facedof_indices(ip) - Ferrite.getdim(ip) > 1 && (geom_ip_faces_indices = Tuple([face[collect(face .∉ Ref(interior))] for (face, interior) in [(geom_ip_faces_indices[i], Ferrite.facedof_interior_indices(ip)[i]) for i in 1:nfaces(ip)]])) - faces_indices = Ferrite.reference_faces(Ferrite.getrefshape(Ferrite.default_interpolation(typeof(cell)))) + geom_ip_facets_indices = Ferrite.facetdof_indices(ip) + Ferrite.getrefdim(ip) > 1 && (geom_ip_facets_indices = Tuple([facet[collect(facet .∉ Ref(interior))] for (facet, interior) in [(geom_ip_facets_indices[i], Ferrite.facetdof_interior_indices(ip)[i]) for i in 1:Ferrite.nfacets(ip)]])) + facets_indices = Ferrite.reference_facets(Ferrite.getrefshape(Ferrite.geometric_interpolation(typeof(cell)))) node_ids = Ferrite.get_node_ids(cell) - @test getindex.(Ref(node_ids), collect.(faces_indices)) == Ferrite.faces(cell) == getindex.(Ref(node_ids), collect.(geom_ip_faces_indices)) + cellfacets = Ferrite.facets(cell) + @test getindex.(Ref(node_ids), collect.(facets_indices)) == cellfacets == getindex.(Ref(node_ids), collect.(geom_ip_facets_indices)) end @testset "error paths" begin cell = getcells(grid, 1) dim == 1 && @test_throws ErrorException("1D elements don't use transformations for interfaces.") Ferrite.InterfaceOrientationInfo(cell,cell,1,1) - @test_throws ArgumentError("unknown face number") Ferrite.element_to_face_transformation(Vec{dim,Float64}(ntuple(_->0.0, dim)), Ferrite.getrefshape(cell), 100) - @test_throws ArgumentError("unknown face number") Ferrite.face_to_element_transformation(Vec{dim-1,Float64}(ntuple(_->0.0, dim-1)), Ferrite.getrefshape(cell), 100) + @test_throws ArgumentError("unknown facet number") Ferrite.element_to_facet_transformation(Vec{dim,Float64}(ntuple(_->0.0, dim)), Ferrite.getrefshape(cell), 100) + @test_throws ArgumentError("unknown facet number") Ferrite.facet_to_element_transformation(Vec{dim-1,Float64}(ntuple(_->0.0, dim-1)), Ferrite.getrefshape(cell), 100) end + func_interpol = scalar_interpol for func_interpol in (scalar_interpol, VectorizedInterpolation(scalar_interpol)) - iv = cell_shape ∈ (QuadraticLine, QuadraticQuadrilateral, QuadraticTriangle, QuadraticTetrahedron) ? + iv = cell_shape ∈ (QuadraticLine, QuadraticQuadrilateral, QuadraticTriangle, QuadraticTetrahedron) ? InterfaceValues(quad_rule, func_interpol, ip) : InterfaceValues(quad_rule, func_interpol) test_interfacevalues(grid, iv) end @@ -158,29 +161,29 @@ points = Vec{2, Float64}.([[0.0, 0.844948974278318], [0.205051025721682, 0.694948974278318], [0.487979589711327, 0.487979589711327], [0.0, 0.355051025721682], [0.29202041028867254, 0.29202041028867254], [0.694948974278318, 0.205051025721682], [0.0, 0.0], [0.355051025721682, 0.0], [0.844948974278318, 0.0]]) # Weights resulted in 4 times the volume [-1, 1] -> so /4 to get [0, 1] weights = [0.096614387479324, 0.308641975308642, 0.087870061825481, 0.187336229804627, 0.677562036939952, 0.308641975308642, 0.049382716049383, 0.187336229804627, 0.096614387479324] / 4 - quad_rule = Ferrite.create_face_quad_rule(RefTetrahedron, weights, points) + quad_rule = Ferrite.create_facet_quad_rule(RefTetrahedron, weights, points) dim = getcelltypedim(cell_shape) grid = generate_grid(cell_shape, ntuple(i -> 2, dim)) @testset "faces nodes indices" begin ip = scalar_interpol isa DiscontinuousLagrange ? Lagrange{Ferrite.getrefshape(scalar_interpol), Ferrite.getorder(scalar_interpol)}() : scalar_interpol cell = getcells(grid, 1) - geom_ip_faces_indices = Ferrite.facedof_indices(ip) - Ferrite.getdim(ip) > 1 && (geom_ip_faces_indices = Tuple([face[collect(face .∉ Ref(interior))] for (face, interior) in [(geom_ip_faces_indices[i], Ferrite.facedof_interior_indices(ip)[i]) for i in 1:nfaces(ip)]])) - faces_indices = Ferrite.reference_faces(Ferrite.getrefshape(Ferrite.default_interpolation(typeof(cell)))) + geom_ip_facets_indices = Ferrite.facetdof_indices(ip) + Ferrite.getrefdim(ip) > 1 && (geom_ip_facets_indices = Tuple([facet[collect(facet .∉ Ref(interior))] for (facet, interior) in [(geom_ip_facets_indices[i], Ferrite.facedof_interior_indices(ip)[i]) for i in 1:Ferrite.nfaces(ip)]])) + facets_indices = Ferrite.reference_facets(Ferrite.getrefshape(Ferrite.geometric_interpolation(typeof(cell)))) node_ids = Ferrite.get_node_ids(cell) - @test getindex.(Ref(node_ids), collect.(faces_indices)) == Ferrite.faces(cell) == getindex.(Ref(node_ids), collect.(geom_ip_faces_indices)) + @test getindex.(Ref(node_ids), collect.(facets_indices)) == Ferrite.faces(cell) == getindex.(Ref(node_ids), collect.(geom_ip_facets_indices)) end @testset "error paths" begin cell = getcells(grid, 1) - @test_throws ArgumentError("unknown face number") Ferrite.element_to_face_transformation(Vec{dim,Float64}(ntuple(_->0.0, dim)), Ferrite.getrefshape(cell), 100) - @test_throws ArgumentError("unknown face number") Ferrite.face_to_element_transformation(Vec{dim-1,Float64}(ntuple(_->0.0, dim-1)), Ferrite.getrefshape(cell), 100) + @test_throws ArgumentError("unknown facet number") Ferrite.element_to_facet_transformation(Vec{dim,Float64}(ntuple(_->0.0, dim)), Ferrite.getrefshape(cell), 100) + @test_throws ArgumentError("unknown facet number") Ferrite.facet_to_element_transformation(Vec{dim-1,Float64}(ntuple(_->0.0, dim-1)), Ferrite.getrefshape(cell), 100) end for func_interpol in (scalar_interpol, VectorizedInterpolation(scalar_interpol)) iv = InterfaceValues(quad_rule, func_interpol) test_interfacevalues(grid, iv; tol = 5*eps(Float64)) end end - # @testset "Mixed elements 2D grids" begin # TODO: this shouldn't work because it should change the FaceValues object + # @testset "Mixed elements 2D grids" begin # TODO: this shouldn't work because it should change the FacetValues object # dim = 2 # nodes = [Node((-1.0, 0.0)), Node((0.0, 0.0)), Node((1.0, 0.0)), Node((-1.0, 1.0)), Node((0.0, 1.0))] # cells = [ @@ -191,15 +194,15 @@ # grid = Grid(cells, nodes) # topology = ExclusiveTopology(grid) # test_interfacevalues(grid, - # DiscontinuousLagrange{RefQuadrilateral, 1}(), FaceQuadratureRule{RefQuadrilateral}(2), - # DiscontinuousLagrange{RefTriangle, 1}(), FaceQuadratureRule{RefTriangle}(2)) + # DiscontinuousLagrange{RefQuadrilateral, 1}(), FacetQuadratureRule{RefQuadrilateral}(2), + # DiscontinuousLagrange{RefTriangle, 1}(), FacetQuadratureRule{RefTriangle}(2)) # end @testset "Unordered nodes 3D" begin dim = 2 - nodes = [Node((-1.0, 0.0, 0.0)), Node((0.0, 0.0, 0.0)), Node((1.0, 0.0, 0.0)), - Node((-1.0, 1.0, 0.0)), Node((0.0, 1.0, 0.0)), Node((1.0, 1.0, 0.0)), - Node((-1.0, 0.0, 1.0)), Node((0.0, 0.0, 1.0)), Node((1.0, 0.0, 1.0)), - Node((-1.0, 1.0, 1.0)), Node((0.0, 1.0, 1.0)), Node((1.0, 1.0, 1.0)), + nodes = [Node((-1.0, 0.0, 0.0)), Node((0.0, 0.0, 0.0)), Node((1.0, 0.0, 0.0)), + Node((-1.0, 1.0, 0.0)), Node((0.0, 1.0, 0.0)), Node((1.0, 1.0, 0.0)), + Node((-1.0, 0.0, 1.0)), Node((0.0, 0.0, 1.0)), Node((1.0, 0.0, 1.0)), + Node((-1.0, 1.0, 1.0)), Node((0.0, 1.0, 1.0)), Node((1.0, 1.0, 1.0)), ] cells = [ Hexahedron((1,2,5,4,7,8,11,10)), @@ -208,14 +211,14 @@ grid = Grid(cells, nodes) test_interfacevalues(grid, - InterfaceValues(FaceQuadratureRule{RefHexahedron}(2), DiscontinuousLagrange{RefHexahedron, 1}())) + InterfaceValues(FacetQuadratureRule{RefHexahedron}(2), DiscontinuousLagrange{RefHexahedron, 1}())) end @testset "Interface dof_range" begin grid = generate_grid(Quadrilateral,(3,3)) ip_u = DiscontinuousLagrange{RefQuadrilateral, 1}()^2 ip_p = DiscontinuousLagrange{RefQuadrilateral, 1}() - qr_face = FaceQuadratureRule{RefQuadrilateral}(2) - iv = InterfaceValues(qr_face, ip_p) + qr_facet = FacetQuadratureRule{RefQuadrilateral}(2) + iv = InterfaceValues(qr_facet, ip_p) @test iv == InterfaceValues(iv.here, iv.there) dh = DofHandler(grid) add!(dh, :u, ip_u) @@ -226,20 +229,18 @@ @test dof_range(ic, :p) == (9:12, 25:28) end # Test copy - iv = InterfaceValues(FaceQuadratureRule{RefQuadrilateral}(2), DiscontinuousLagrange{RefQuadrilateral, 1}()) + iv = InterfaceValues(FacetQuadratureRule{RefQuadrilateral}(2), DiscontinuousLagrange{RefQuadrilateral, 1}()) ivc = copy(iv) @test typeof(iv) == typeof(ivc) for fname in fieldnames(typeof(iv)) v = getfield(iv, fname) - v isa Ferrite.ScalarWrapper && continue vc = getfield(ivc, fname) if hasmethod(pointer, Tuple{typeof(v)}) @test pointer(v) != pointer(vc) end - v isa FaceValues && continue + v isa FacetValues && continue for fname in fieldnames(typeof(vc)) v2 = getfield(v, fname) - v2 isa Ferrite.ScalarWrapper && continue vc2 = getfield(vc, fname) if hasmethod(pointer, Tuple{typeof(v2)}) @test pointer(v2) != pointer(vc2) @@ -252,7 +253,7 @@ @test_throws ArgumentError("transformation is not implemented") Ferrite.get_transformation_matrix(it) end @testset "show" begin - iv = InterfaceValues(FaceQuadratureRule{RefQuadrilateral}(2), Lagrange{RefQuadrilateral,2}()) + iv = InterfaceValues(FacetQuadratureRule{RefQuadrilateral}(2), Lagrange{RefQuadrilateral,2}()) showstring = sprint(show, MIME"text/plain"(), iv) @test contains(showstring, "InterfaceValues with") end diff --git a/test/test_interpolations.jl b/test/test_interpolations.jl index 25c6905154..c8a45c1b05 100644 --- a/test/test_interpolations.jl +++ b/test/test_interpolations.jl @@ -1,3 +1,5 @@ +using Ferrite: reference_shape_value, reference_shape_gradient + @testset "interpolations" begin @testset "Value Type $value_type" for value_type in (Float32, Float64) @@ -38,10 +40,13 @@ # BubbleEnrichedLagrange{RefTriangle, 1}(), # - CrouzeixRaviart{RefTriangle, 1}(), + CrouzeixRaviart{RefTriangle,1}(), + CrouzeixRaviart{RefTetrahedron,1}(), + RannacherTurek{RefQuadrilateral,1}(), + RannacherTurek{RefHexahedron,1}(), ) # Test of utility functions - ref_dim = Ferrite.getdim(interpolation) + ref_dim = Ferrite.getrefdim(interpolation) ref_shape = Ferrite.getrefshape(interpolation) func_order = Ferrite.getorder(interpolation) @test typeof(interpolation) <: Interpolation{ref_shape,func_order} @@ -51,8 +56,8 @@ @testset "transform face points" begin # Test both center point and random points on the face ref_coord = Ferrite.reference_coordinates(Lagrange{ref_shape, 1}()) - for face in 1:nfaces(interpolation) - face_nodes = Ferrite.reference_faces(ref_shape)[face] + for face in 1:nfacets(interpolation) + face_nodes = Ferrite.reference_facets(ref_shape)[face] center_coord = [0.0 for _ in 1:ref_dim] rand_coord = [0.0 for _ in 1:ref_dim] rand_weights = rand(length(face_nodes)) @@ -63,8 +68,8 @@ end for point in (center_coord, rand_coord) vec_point = Vec{ref_dim}(point) - cell_to_face = Ferrite.element_to_face_transformation(vec_point, ref_shape, face) - face_to_cell = Ferrite.face_to_element_transformation(cell_to_face, ref_shape, face) + cell_to_face = Ferrite.element_to_facet_transformation(vec_point, ref_shape, face) + face_to_cell = Ferrite.facet_to_element_transformation(cell_to_face, ref_shape, face) @test vec_point ≈ face_to_cell end end @@ -72,7 +77,7 @@ n_basefuncs = getnbasefunctions(interpolation) coords = Ferrite.reference_coordinates(interpolation) @test length(coords) == n_basefuncs - f(x) = [shape_value(interpolation, Tensor{1, ref_dim}(x), i) for i in 1:n_basefuncs] + f(x) = [reference_shape_value(interpolation, Tensor{1, ref_dim}(x), i) for i in 1:n_basefuncs] #TODO prefer this test style after 1.6 is removed from CI # @testset let x = sample_random_point(ref_shape) # not compatible with Julia 1.6 @@ -80,65 +85,52 @@ random_point_testset = @testset "Random point test" begin # Check gradient evaluation @test vec(ForwardDiff.jacobian(f, Array(x))') ≈ - reinterpret(value_type, [shape_gradient(interpolation, x, i) for i in 1:n_basefuncs]) + reinterpret(value_type, [reference_shape_gradient(interpolation, x, i) for i in 1:n_basefuncs]) # Check partition of unity at random point. - @test sum([shape_value(interpolation, x, i) for i in 1:n_basefuncs]) ≈ 1.0 + @test sum([reference_shape_value(interpolation, x, i) for i in 1:n_basefuncs]) ≈ 1.0 # Check if the important functions are consistent - @test_throws ArgumentError shape_value(interpolation, x, n_basefuncs+1) + @test_throws ArgumentError reference_shape_value(interpolation, x, n_basefuncs+1) # Idempotency test - @test shape_value(interpolation, x, n_basefuncs) == shape_value(interpolation, x, n_basefuncs) + @test reference_shape_value(interpolation, x, n_basefuncs) == reference_shape_value(interpolation, x, n_basefuncs) end # Remove after 1.6 is removed from CI (see above) # Show coordinate in case failure (see issue #811) !isempty(random_point_testset.results) && println("^^^^^Random point test failed at $x for $interpolation !^^^^^") - # Test whether we have for each entity corresponding dof indices (possibly empty) - @test length(Ferrite.vertexdof_indices(interpolation)) == Ferrite.nvertices(interpolation) - if ref_dim > 1 - @test length(Ferrite.facedof_indices(interpolation)) == Ferrite.nfaces(interpolation) - @test length(Ferrite.facedof_interior_indices(interpolation)) == Ferrite.nfaces(interpolation) - elseif ref_dim > 2 - @test length(Ferrite.edgedof_indices(interpolation)) == Ferrite.nedges(interpolation) - @test length(Ferrite.edgedof_interior_indices(interpolation)) == Ferrite.nedges(interpolation) - end - # We have at least as many edge/face dofs as we have edge/face interior dofs - if ref_dim > 1 - @test all(length.(Ferrite.facedof_interior_indices(interpolation)) .<= length.(Ferrite.facedof_indices(interpolation))) - elseif ref_dim > 2 - @test all(length.(Ferrite.edgedof_interior_indices(interpolation)) .<= length.(Ferrite.edgedof_indices(interpolation))) - end - # The total number of dofs must match the number of base functions - totaldofs = sum(length.(Ferrite.vertexdof_indices(interpolation));init=0) - if ref_dim > 1 - totaldofs += sum(length.(Ferrite.facedof_interior_indices(interpolation));init=0) - end - if ref_dim > 2 - totaldofs += sum(length.(Ferrite.edgedof_interior_indices(interpolation));init=0) - end - totaldofs += length(Ferrite.celldof_interior_indices(interpolation)) - @test totaldofs == n_basefuncs - - # The dof indices are valid. - @test all([all(0 .< i .<= n_basefuncs) for i ∈ Ferrite.vertexdof_indices(interpolation)]) - if ref_dim > 1 - @test all([all(0 .< i .<= n_basefuncs) for i ∈ Ferrite.facedof_indices(interpolation)]) - @test all([all(0 .< i .<= n_basefuncs) for i ∈ Ferrite.facedof_interior_indices(interpolation)]) - elseif ref_dim > 2 - @test all([all(0 .< i .<= n_basefuncs) for i ∈ Ferrite.edgedof_indices(interpolation)]) - @test all([all(0 .< i .<= n_basefuncs) for i ∈ Ferrite.edgedof_interior_indices(interpolation)]) - end - @test all([all(0 .< i .<= n_basefuncs) for i ∈ Ferrite.celldof_interior_indices(interpolation)]) + # Test whether we have for each entity corresponding dof indices (possibly empty) + @test length(Ferrite.vertexdof_indices(interpolation)) == Ferrite.nvertices(interpolation) + @test length(Ferrite.facedof_indices(interpolation)) == Ferrite.nfaces(interpolation) + @test length(Ferrite.facedof_interior_indices(interpolation)) == Ferrite.nfaces(interpolation) + @test length(Ferrite.edgedof_indices(interpolation)) == Ferrite.nedges(interpolation) + @test length(Ferrite.edgedof_interior_indices(interpolation)) == Ferrite.nedges(interpolation) + # We have at least as many edge/face dofs as we have edge/face interior dofs + @test all(length.(Ferrite.facedof_interior_indices(interpolation)) .<= length.(Ferrite.facedof_indices(interpolation))) + @test all(length.(Ferrite.edgedof_interior_indices(interpolation)) .<= length.(Ferrite.edgedof_indices(interpolation))) + # The total number of dofs must match the number of base functions + totaldofs = sum(length.(Ferrite.vertexdof_indices(interpolation));init=0) + totaldofs += sum(length.(Ferrite.facedof_interior_indices(interpolation));init=0) + totaldofs += sum(length.(Ferrite.edgedof_interior_indices(interpolation));init=0) + totaldofs += length(Ferrite.volumedof_interior_indices(interpolation)) + @test totaldofs == n_basefuncs + + # The dof indices are valid. + @test all([all(0 .< i .<= n_basefuncs) for i ∈ Ferrite.vertexdof_indices(interpolation)]) + @test all([all(0 .< i .<= n_basefuncs) for i ∈ Ferrite.facedof_indices(interpolation)]) + @test all([all(0 .< i .<= n_basefuncs) for i ∈ Ferrite.facedof_interior_indices(interpolation)]) + @test all([all(0 .< i .<= n_basefuncs) for i ∈ Ferrite.edgedof_indices(interpolation)]) + @test all([all(0 .< i .<= n_basefuncs) for i ∈ Ferrite.edgedof_interior_indices(interpolation)]) + @test all([all(0 .< i .<= n_basefuncs) for i ∈ Ferrite.volumedof_interior_indices(interpolation)]) # Check for evaluation type correctness of interpolation @testset "return type correctness dof $dof" for dof in 1:n_basefuncs - @test (@inferred shape_value(interpolation, x, dof)) isa value_type - @test (@inferred shape_gradient(interpolation, x, dof)) isa Vec{ref_dim, value_type} + @test (@inferred reference_shape_value(interpolation, x, dof)) isa value_type + @test (@inferred reference_shape_gradient(interpolation, x, dof)) isa Vec{ref_dim, value_type} end # Check for dirac delta property of interpolation @testset "dirac delta property of dof $dof" for dof in 1:n_basefuncs for k in 1:n_basefuncs - N_dof = shape_value(interpolation, coords[dof], k) + N_dof = reference_shape_value(interpolation, coords[dof], k) if k == dof @test N_dof ≈ 1.0 else @@ -148,47 +140,54 @@ end end - # Test that facedof_indices(...) return in counter clockwise order (viewing from the outside) - if interpolation isa Lagrange - function __outward_normal(coords::Vector{<:Vec{1}}, nodes) - n = coords[nodes[1]] - return n / norm(n) - end - function __outward_normal(coords::Vector{<:Vec{2}}, nodes) - p1 = coords[nodes[1]] - p2 = coords[nodes[2]] - n = Vec{2}((p2[2] - p1[2], - p2[1] + p1[1])) - return n / norm(n) - end - function __outward_normal(coords::Vector{<:Vec{3}}, nodes) - p1 = coords[nodes[1]] - p2 = coords[nodes[2]] - p3 = coords[nodes[3]] - n = (p3 - p2) × (p1 - p2) - return n / norm(n) - end - for (facenodes, normal) in zip(Ferrite.facedof_indices(interpolation), reference_normals(interpolation)) - @test __outward_normal(coords, facenodes) ≈ normal - end + # Test that facedof_indices(...) return in counter clockwise order (viewing from the outside) + if interpolation isa Lagrange + function __outward_normal(coords::Vector{<:Vec{1}}, nodes) + n = coords[nodes[1]] + return n / norm(n) + end + function __outward_normal(coords::Vector{<:Vec{2}}, nodes) + p1 = coords[nodes[1]] + p2 = coords[nodes[2]] + n = Vec{2}((p2[2] - p1[2], - p2[1] + p1[1])) + return n / norm(n) + end + function __outward_normal(coords::Vector{<:Vec{3}}, nodes) + p1 = coords[nodes[1]] + p2 = coords[nodes[2]] + p3 = coords[nodes[3]] + n = (p3 - p2) × (p1 - p2) + return n / norm(n) + end + _bfunc = if ref_dim == 3 + Ferrite.facedof_indices(interpolation) + elseif ref_dim == 2 + Ferrite.edgedof_indices(interpolation) + elseif ref_dim == 1 + Ferrite.vertexdof_indices(interpolation) end + for (facenodes, normal) in zip(_bfunc, reference_normals(interpolation)) + @test __outward_normal(coords, facenodes) ≈ normal + end + end - # regression for https://github.com/Ferrite-FEM/Ferrite.jl/issues/520 - interpolation_type = typeof(interpolation).name.wrapper - if func_order > 1 && interpolation_type != Ferrite.Serendipity - first_order = interpolation_type{ref_shape,1}() - for (highorderface, firstorderface) in zip(Ferrite.facedof_indices(interpolation), Ferrite.facedof_indices(first_order)) - for (h_node, f_node) in zip(highorderface, firstorderface) - @test h_node == f_node - end + # regression for https://github.com/Ferrite-FEM/Ferrite.jl/issues/520 + #=interpolation_type = typeof(interpolation).name.wrapper + if func_order > 1 && interpolation_type != Ferrite.Serendipity + first_order = interpolation_type{ref_shape,1}() + for (highorderface, firstorderface) in zip(Ferrite.facedof_indices(interpolation), Ferrite.facedof_indices(first_order)) + for (h_node, f_node) in zip(highorderface, firstorderface) + @test h_node == f_node end - if ref_dim > 2 - for (highorderedge, firstorderedge) in zip(Ferrite.edgedof_indices(interpolation), Ferrite.edgedof_indices(first_order)) - for (h_node, f_node) in zip(highorderedge, firstorderedge) - @test h_node == f_node - end + end + if ref_dim > 2 + for (highorderedge, firstorderedge) in zip(Ferrite.edgedof_indices(interpolation), Ferrite.edgedof_indices(first_order)) + for (h_node, f_node) in zip(highorderedge, firstorderedge) + @test h_node == f_node end end end + end=# # VectorizedInterpolation v_interpolation_1 = interpolation^2 @@ -202,8 +201,8 @@ # Check for evaluation type correctness of vectorized interpolation v_interpolation_3 = interpolation^ref_dim @testset "vectorized case of return type correctness of dof $dof" for dof in 1:n_basefuncs - @test @inferred(shape_value(v_interpolation_1, x, dof)) isa Vec{2, value_type} - @test @inferred(shape_gradient(v_interpolation_3, x, dof)) isa Tensor{2, ref_dim, value_type} + @test @inferred(reference_shape_value(v_interpolation_1, x, dof)) isa Vec{2, value_type} + @test @inferred(reference_shape_gradient(v_interpolation_3, x, dof)) isa Tensor{2, ref_dim, value_type} end end # correctness testset @@ -225,4 +224,46 @@ @test Ferrite.is_discontinuous(d_ip_t) == true end +@testset "Correctness of AD of embedded interpolations" begin + ip = Lagrange{RefHexahedron,2}()^3 + ξ = rand(Vec{3,Float64}) + for I in 1:getnbasefunctions(ip) + #Call StaticArray-version + H_sa, G_sa, V_sa = Ferrite._reference_shape_hessian_gradient_and_value_static_array(ip, ξ, I) + #Call tensor AD version + H, G, V = Ferrite.reference_shape_hessian_gradient_and_value(ip, ξ, I) + + @test V ≈ V_sa + @test G ≈ G_sa + @test H ≈ H_sa + end + + ips = Lagrange{RefQuadrilateral,2}() + vdim = 3 + ipv = ips^vdim + ξ = rand(Vec{2, Float64}) + for ipv_ind in 1:getnbasefunctions(ipv) + ips_ind, v_ind = fldmod1(ipv_ind, vdim) + H, G, V = Ferrite.reference_shape_hessian_gradient_and_value(ipv, ξ, ipv_ind) + h, g, v = Ferrite.reference_shape_hessian_gradient_and_value(ips, ξ, ips_ind) + @test h ≈ H[v_ind, :, :] + @test g ≈ G[v_ind, :] + @test v ≈ V[v_ind] + end +end + +@testset "Errors for entitydof_indices on VectorizedInterpolations" begin + ip = Lagrange{RefQuadrilateral,2}()^2 + @test_throws ArgumentError Ferrite.vertexdof_indices(ip) + @test_throws ArgumentError Ferrite.edgedof_indices(ip) + @test_throws ArgumentError Ferrite.facedof_indices(ip) + @test_throws ArgumentError Ferrite.facetdof_indices(ip) + + @test_throws ArgumentError Ferrite.edgedof_interior_indices(ip) + @test_throws ArgumentError Ferrite.facedof_interior_indices(ip) + @test_throws ArgumentError Ferrite.volumedof_interior_indices(ip) + @test_throws ArgumentError Ferrite.facetdof_interior_indices(ip) +end + + end # testset diff --git a/test/test_l2_projection.jl b/test/test_l2_projection.jl index 183d92fcc4..ea72b99980 100644 --- a/test/test_l2_projection.jl +++ b/test/test_l2_projection.jl @@ -31,7 +31,7 @@ function test_projection(order, refshape) qp_values = analytical(f) # Now recover the nodal values using a L2 projection. - proj = L2Projector(ip, grid; geom_ip=ip_geom) + proj = L2Projector(ip, grid) point_vars = project(proj, qp_values, qr) qp_values_matrix = reduce(hcat, qp_values) point_vars_2 = project(proj, qp_values_matrix, qr) @@ -99,29 +99,29 @@ function test_projection(order, refshape) else bad_order = 1 end - @test_throws LinearAlgebra.PosDefException L2Projector(ip, grid; qr_lhs=QuadratureRule{refshape}(bad_order), geom_ip=ip_geom) + @test_throws LinearAlgebra.PosDefException L2Projector(ip, grid; qr_lhs=QuadratureRule{refshape}(bad_order)) end -# Test a mixed grid, where only a subset of the cells contains a field -function test_projection_mixedgrid() - # generate a mesh with 1 quadrilateral and 2 triangular elements - dim = 2 - nodes = Node{dim, Float64}[] - push!(nodes, Node((0.0, 0.0))) - push!(nodes, Node((1.0, 0.0))) - push!(nodes, Node((2.0, 0.0))) - push!(nodes, Node((0.0, 1.0))) - push!(nodes, Node((1.0, 1.0))) - push!(nodes, Node((2.0, 1.0))) - - cells = Ferrite.AbstractCell[] - push!(cells, Quadrilateral((1,2,5,4))) - push!(cells, Triangle((2,3,6))) - push!(cells, Triangle((2,6,5))) +function make_mixedgrid_l2_tests() + # generate a mesh with 2 quadrilateral and 2 triangular elements + # 5 --- 6 --- 7 --- 8 + # | 1 | 2/3 | 4 | + # 1 --- 2 --- 3 --- 4 + nodes = [Node(Float64.((x,y))) for (x, y) in + # 1, 2, 3, 4, 5, 6, 7, 8 + ((0, 0), (1, 0), (2, 0), (3, 0), (0, 1), (1, 1), (2, 1), (3, 1))] + + cells = [Quadrilateral((1, 2, 6, 5)), Triangle((2, 7, 6)), Triangle((2, 3, 7)), Quadrilateral((3, 4, 8, 7))] quadset = 1:1 triaset = 2:3 - mesh = Grid(cells, nodes) + quadset_right = 4:4 + return Grid(cells, nodes), quadset, triaset, quadset_right +end + +# Test a mixed grid, where only a subset of the cells contains a field +function test_projection_subset_of_mixedgrid() + mesh, quadset, triaset, quadset_right = make_mixedgrid_l2_tests() order = 2 ip = Lagrange{RefQuadrilateral, order}() @@ -133,18 +133,30 @@ function test_projection_mixedgrid() # use a SymmetricTensor here for testing the symmetric version of project f(x) = SymmetricTensor{2,2,Float64}((1 + x[1]^2, 2x[2]^2, x[1]*x[2])) xe = getcoordinates(mesh, 1) - + # analytical values - qp_values = [[f(spatial_coordinate(cv, qp, xe)) for qp in 1:getnquadpoints(cv)]] - qp_values_matrix = reduce(hcat, qp_values) + qp_value = [f(spatial_coordinate(cv, qp, xe)) for qp in 1:getnquadpoints(cv)] + qp_values = Vector{typeof(qp_value)}(undef, getncells(mesh)) + qp_values[1] = copy(qp_value) + qp_values_matrix = fill(zero(eltype(qp_value)), getnquadpoints(cv), getncells(mesh)) + qp_values_matrix[:, 1] .= qp_value + qp_values_dict = Dict(1 => copy(qp_value)) # Now recover the nodal values using a L2 projection. # Assume f would only exist on the first cell, we project it to the nodes of the # 1st cell while ignoring the rest of the domain. NaNs should be stored in all # nodes that do not belong to the 1st cell - proj = L2Projector(ip, mesh; geom_ip=ip_geom, set=quadset) + proj = L2Projector(ip, mesh; set=quadset) point_vars = project(proj, qp_values, qr) point_vars_2 = project(proj, qp_values_matrix, qr) + point_vars_3 = project(proj, qp_values_dict, qr) + projection_at_nodes = evaluate_at_grid_nodes(proj, point_vars) + for cellid in quadset + for nodeid in mesh.cells[cellid].nodes + x = mesh.nodes[nodeid].x + @test projection_at_nodes[nodeid] ≈ f(x) + end + end ae = zeros(3, length(point_vars)) for i in 1:3 @@ -152,29 +164,39 @@ function test_projection_mixedgrid() end ae = reinterpret(reshape, SymmetricTensor{2,2,Float64,3}, ae) @test point_vars ≈ point_vars_2 ≈ ae + @test point_vars_3 ≈ ae # Do the same thing but for the triangle set - order = 2 ip = Lagrange{RefTriangle, order}() ip_geom = Lagrange{RefTriangle, 1}() qr = QuadratureRule{RefTriangle}(4) cv = CellValues(qr, ip, ip_geom) nqp = getnquadpoints(cv) - qp_values_tria = [zeros(SymmetricTensor{2,2}, nqp) for _ in triaset] - qp_values_matrix_tria = [zero(SymmetricTensor{2,2}) for _ in 1:nqp, _ in triaset] + qp_values_tria = [SymmetricTensor{2,2,Float64,3}[] for _ in 1:getncells(mesh)] + qp_values_matrix_tria = [zero(SymmetricTensor{2,2}) * NaN for _ in 1:nqp, _ in 1:getncells(mesh)] + qp_values_dict = Dict{Int, Vector{SymmetricTensor{2,2,Float64,3}}}() for (ic, cellid) in enumerate(triaset) xe = getcoordinates(mesh, cellid) # analytical values qp_values = [f(spatial_coordinate(cv, qp, xe)) for qp in 1:getnquadpoints(cv)] - qp_values_tria[ic] = qp_values - qp_values_matrix_tria[:, ic] .= qp_values + qp_values_tria[cellid] = qp_values + qp_values_matrix_tria[:, cellid] .= qp_values + qp_values_dict[cellid] = qp_values end #tria - proj = L2Projector(ip, mesh; geom_ip=ip_geom, set=triaset) + proj = L2Projector(ip, mesh; set=triaset) point_vars = project(proj, qp_values_tria, qr) point_vars_2 = project(proj, qp_values_matrix_tria, qr) + projection_at_nodes = evaluate_at_grid_nodes(proj, point_vars) + for cellid in triaset + for nodeid in mesh.cells[cellid].nodes + x = mesh.nodes[nodeid].x + @test projection_at_nodes[nodeid] ≈ f(x) + end + end + ae = zeros(3, length(point_vars)) for i in 1:3 apply_analytical!(@view(ae[i, :]), proj.dh, :_, x -> f(x).data[i], triaset) @@ -183,6 +205,122 @@ function test_projection_mixedgrid() @test point_vars ≈ point_vars_2 ≈ ae end +function calculate_function_value_in_qpoints!(qp_data, sdh, cv, dofvector::Vector) + for cell in CellIterator(sdh) + qvector = qp_data[cellid(cell)] + ae = dofvector[celldofs(cell)] + resize!(qvector, getnquadpoints(cv)) + for q_point in 1:getnquadpoints(cv) + qvector[q_point] = function_value(cv, q_point, ae) + end + end + return qp_data +end + +function test_add_projection_grid() + grid = generate_grid(Triangle, (3,3)) + set1 = Set(1:getncells(grid)÷2) + set2 = setdiff(1:getncells(grid), set1) + + dh = DofHandler(grid) + ip = Lagrange{RefTriangle, 1}() + sdh1 = SubDofHandler(dh, set1) + add!(sdh1, :u, ip) + sdh2 = SubDofHandler(dh, set2) + add!(sdh2, :u, ip) + close!(dh) + + solution = zeros(ndofs(dh)) + apply_analytical!(solution, dh, :u, x -> x[1]^2 - x[2]^2) + + qr = QuadratureRule{RefTriangle}(2) + cv = CellValues(qr, ip, ip) + + # Fill qp_data with the interpolated values + qp_data = [Float64[] for _ in 1:getncells(grid)] + for (sdh, cv_) in ((sdh1, cv), (sdh2, cv)) + calculate_function_value_in_qpoints!(qp_data, sdh, cv_, solution) + end + + # Build the first L2Projector with two different sets + proj1 = L2Projector(grid) + add!(proj1, set1, ip; qr_rhs = qr) + add!(proj1, set2, ip; qr_rhs = qr) + close!(proj1) + + # Build the second L2Projector with a single set using the convenience function + proj2 = L2Projector(ip, grid) + + # Project both cases + projected1 = project(proj1, qp_data) + projected2 = project(proj2, qp_data, qr) + + # Evaluate at grid nodes to keep same numbering following the grid (dof distribution may be different) + solution_at_nodes = evaluate_at_grid_nodes(dh, solution, :u) + projected1_at_nodes = evaluate_at_grid_nodes(proj1, projected1) + projected2_at_nodes = evaluate_at_grid_nodes(proj2, projected2) + + @test projected1_at_nodes ≈ solution_at_nodes + @test projected2_at_nodes ≈ solution_at_nodes +end + +function test_projection_mixedgrid() + grid, quadset_left, triaset, quadset_right = make_mixedgrid_l2_tests() + quadset_full = union(Set(quadset_left), quadset_right) + @assert getncells(grid) == length(triaset) + length(quadset_full) + # Test both for case with one cell excluded from projection, and will full grid included + for quadset in (quadset_left, quadset_full) + dh = DofHandler(grid) + sdh_quad = SubDofHandler(dh, quadset) + ip_quad = Lagrange{RefQuadrilateral, 1}() + add!(sdh_quad, :u, ip_quad) + sdh_tria = SubDofHandler(dh, triaset) + ip_tria = Lagrange{RefTriangle, 1}() + add!(sdh_tria, :u, ip_tria) + close!(dh) + + solution = zeros(ndofs(dh)) + apply_analytical!(solution, dh, :u, x -> x[1]^2 - x[2]^2) + + qr_quad = QuadratureRule{RefQuadrilateral}(2) + cv_quad = CellValues(qr_quad, ip_quad, ip_quad) + qr_tria = QuadratureRule{RefTriangle}(2) + cv_tria = CellValues(qr_tria, ip_tria, ip_tria) + + # Fill qp_data with the interpolated values + qp_data = [Float64[] for _ in 1:getncells(grid)] + for (sdh, cv) in ((sdh_quad, cv_quad), (sdh_tria, cv_tria)) + calculate_function_value_in_qpoints!(qp_data, sdh, cv, solution) + end + + # Finally, let's build the L2Projector and check if we can project back the solution + proj = L2Projector(grid) + add!(proj, triaset, ip_tria; qr_rhs = qr_tria) + add!(proj, quadset, ip_quad; qr_rhs = qr_quad) + close!(proj) + + # Quadrature rules must be in the same order as ip's are added to proj. + projected = project(proj, qp_data) + + # Evaluate at grid nodes to keep same numbering following the grid (dof distribution may be different) + solution_at_nodes = evaluate_at_grid_nodes(dh, solution, :u) + projected_at_nodes = evaluate_at_grid_nodes(proj, projected) + + # Since one part of the grid is excluded, nodes in this region will be NaN. + # So we only want to check those nodes attached to cells in the cellsets. + active_nodes = Set{Int}() + for cell in CellIterator(grid, union(quadset, triaset)) + for n in Ferrite.getnodes(cell) + push!(active_nodes, n) + end + end + check_nodes = collect(active_nodes) + + @test projected_at_nodes[check_nodes] ≈ solution_at_nodes[check_nodes] + + end +end + function test_export(;subset::Bool) grid = generate_grid(Quadrilateral, (2, 1)) qr = QuadratureRule{RefQuadrilateral}(2) @@ -267,24 +405,85 @@ function test_export(;subset::Bool) end mktempdir() do tmp - fname = vtk_grid(joinpath(tmp, "projected"), grid) do vtk - vtk_point_data(vtk, p, p_scalar, "p_scalar") - vtk_point_data(vtk, p, p_vec, "p_vec") - vtk_point_data(vtk, p, p_tens, "p_tens") - vtk_point_data(vtk, p, p_stens, "p_stens") + fname = joinpath(tmp, "projected") + VTKGridFile(fname, grid) do vtk + write_projection(vtk, p, p_scalar, "p_scalar") + write_projection(vtk, p, p_vec, "p_vec") + write_projection(vtk, p, p_tens, "p_tens") + write_projection(vtk, p, p_stens, "p_stens") + end + # The following test may fail due to floating point inaccuracies + # These could occur due to e.g. changes in system architecture. + if Sys.islinux() && Sys.ARCH === :x86_64 + @test bytes2hex(open(SHA.sha1, fname*".vtu", "r")) == ( + subset ? "b3fef3de9f38ca9ddd92f2f67a1606d07ca56d67" : + "bc2ec8f648f9b8bccccf172c1fc48bf03340329b" + ) end - @test bytes2hex(open(SHA.sha1, fname[1], "r")) in ( - subset ? ("261cfe21de7a478e14f455e783694651a91eeb60", "b3fef3de9f38ca9ddd92f2f67a1606d07ca56d67") : - ("3b8ffb444db1b4cee1246a751da88136116fe49b", "bc2ec8f648f9b8bccccf172c1fc48bf03340329b") - ) end + end -function test_show() +function test_show_l2() grid = generate_grid(Triangle, (2,2)) ip = Lagrange{RefTriangle, 1}() proj = L2Projector(ip, grid) @test repr("text/plain", proj) == repr(typeof(proj)) * "\n projection on: 8/8 cells in grid\n function interpolation: Lagrange{RefTriangle, 1}()\n geometric interpolation: Lagrange{RefTriangle, 1}()\n" + + # Multi-domain setup + proj2 = L2Projector(grid) + @test sprint(show, MIME"text/plain"(), proj2) == "L2Projector (not closed)" + qr_rhs = QuadratureRule{RefTriangle}(2) + add!(proj2, Set(1:2), ip; qr_rhs) + add!(proj2, Set(3:4), ip; qr_rhs) + close!(proj2) + showstr = sprint(show, MIME"text/plain"(), proj2) + @test contains(showstr, "L2Projector") + @test contains(showstr, "4/8 cells in grid") + @test contains(showstr, "Split into 2 sets") +end + +function test_l2proj_errorpaths() + grid = generate_grid(Triangle, (2,3)) + ip = Lagrange{RefTriangle, 1}() + proj = L2Projector(grid) # Multiple subdomains + proj1 = L2Projector(ip, grid; set=collect(1:4)) # Single sub-domain case + qr_tria = QuadratureRule{RefTriangle}(2) + qr_quad = QuadratureRule{RefQuadrilateral}(2) + + # Providing wrong quadrature rules + exception_rhs = ErrorException("The reference shape of the interpolation and the qr_rhs must be the same") + exception_lhs = ErrorException("The reference shape of the interpolation and the qr_lhs must be the same") + @test_throws exception_rhs add!(proj, Set(1:2), ip; qr_rhs = qr_quad) + @test_throws exception_lhs add!(proj, Set(1:2), ip; qr_rhs = qr_tria, qr_lhs = qr_quad) + + # Build up a 2-domain case + add!(proj, Set(1:2), ip; qr_rhs = qr_tria) + add!(proj, Set(3:4), ip; qr_rhs = qr_tria) + data_valid = Dict(i => rand(getnquadpoints(qr_tria)) for i in 1:4) + + # Try projecting when not closed + @test_throws ErrorException("The L2Projector is not closed") project(proj, data_valid) + close!(proj) + + # Not giving quadrature rule + noquad_exception = ErrorException("The right-hand-side quadrature rule must be provided, unless already given to the L2Projector") + @test_throws noquad_exception project(proj1, data_valid) + # Providing wrong quadrature rule to project + wrongquad_exception = ErrorException("Reference shape of quadrature rule and cells doesn't match. Please ensure that `qrs_rhs` has the same order as sets are added to the L2Projector") + @test_throws wrongquad_exception project(proj1, data_valid, qr_quad) + + # Giving data indexed by set index instead of cell index + data_invalid = [rand(getnquadpoints(qr_tria)) for _ in 1:4] + invalid_data_exception = ErrorException("vars is indexed by the cellid, not the index in the set: length(vars) != number of cells") + @test_throws invalid_data_exception project(proj1, data_invalid, qr_tria) + # Giving data with too many or too few quadrature points + data_invalid2 = [rand(getnquadpoints(qr_tria) + 1) for _ in 1:getncells(grid)] + data_invalid3 = [rand(getnquadpoints(qr_tria) - 1) for _ in 1:getncells(grid)] + wrongnqp_exception = ErrorException("The number of variables per cell doesn't match the number of quadrature points") + @test_throws wrongnqp_exception project(proj1, data_invalid2, qr_tria) + @test_throws wrongnqp_exception project(proj1, data_invalid3, qr_tria) + end @testset "Test L2-Projection" begin @@ -292,8 +491,11 @@ end test_projection(1, RefTriangle) test_projection(2, RefQuadrilateral) test_projection(2, RefTriangle) + test_projection_subset_of_mixedgrid() + test_add_projection_grid() test_projection_mixedgrid() test_export(subset=false) test_export(subset=true) - test_show() + test_show_l2() + test_l2proj_errorpaths() end diff --git a/test/test_mixeddofhandler.jl b/test/test_mixeddofhandler.jl index bd02cebb98..264324d4b0 100644 --- a/test/test_mixeddofhandler.jl +++ b/test/test_mixeddofhandler.jl @@ -32,7 +32,7 @@ function test_1d_bar_beam() sdh1 = SubDofHandler(dh, Set(3)) add!(sdh1, :u, ip^2) add!(sdh1, :θ, ip) - sdh2 = SubDofHandler(dh, Set((1,2))) + sdh2 = SubDofHandler(dh, OrderedSet((1,2))) add!(sdh2, :u, ip^2) close!(dh) @test ndofs(dh) == 8 @@ -110,7 +110,7 @@ function test_2d_mixed_1_el() @test ndofs(dh) == 12 @test ndofs_per_cell(dh, 1) == 12 @test celldofs(dh, 1) == [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12] - + @test Set(Ferrite.getfieldnames(dh)) == Set(Ferrite.getfieldnames(dh.subdofhandlers[1])) end @@ -133,6 +133,8 @@ function test_2d_mixed_2_el() @test ndofs_per_cell(dh.subdofhandlers[1]) == 12 @test ndofs_per_cell(dh, 2) == 9 @test ndofs_per_cell(dh.subdofhandlers[2]) == 9 + @test_throws ErrorException ndofs_per_cell(dh) + @test_throws ErrorException Ferrite.nnodes_per_cell(grid) @test celldofs(dh, 1) == [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12] @test celldofs(dh, 2) == [5, 6, 3, 4, 13, 14, 11, 10, 15] end @@ -293,8 +295,8 @@ function test_2_element_heat_eq() # Create two Dirichlet boundary conditions - one for each field. ch = ConstraintHandler(dh); - ∂Ω1 = getfaceset(grid, "left") - ∂Ω2 = getfaceset(grid, "right") + ∂Ω1 = getfacetset(grid, "left") + ∂Ω2 = getfacetset(grid, "right") dbc1 = Dirichlet(:u, ∂Ω1, (x, t) -> 0) dbc2 = Dirichlet(:u, ∂Ω2, (x, t) -> 0) add!(ch, dbc1); @@ -333,7 +335,7 @@ function test_2_element_heat_eq() end end - K = create_sparsity_pattern(dh) + K = allocate_matrix(dh) f = zeros(ndofs(dh)); assembler = start_assemble(K, f); # Use the same assemble function since it is the same weak form for both cell-types @@ -355,11 +357,11 @@ function test_2_element_heat_eq() gridfilename = "mixed_grid" addcellset!(grid, "cell-1", [1,]) addcellset!(grid, "cell-2", [2,]) - vtk_grid(gridfilename, grid) do vtk - vtk_cellset(vtk, grid, "cell-1") - vtk_cellset(vtk, grid, "cell-2") - vtk_point_data(vtk, dh, u) - # vtk_point_data(vtk, ch) #FIXME + VTKGridFile(gridfilename, grid) do vtk + Ferrite.write_cellset(vtk, grid, "cell-1") + Ferrite.write_cellset(vtk, grid, "cell-2") + write_solution(vtk, dh, u) + # Ferrite.write_constraints(vtk, ch) #FIXME end sha = bytes2hex(open(SHA.sha1, gridfilename*".vtu")) @test sha in ("e96732c000b0b385db7444f002461468b60b3b2c", "7b26edc27b5e59a2f60907374cd5a5790cc37a6a") @@ -384,7 +386,7 @@ function test_element_order() dh = DofHandler(grid); # Note the jump in cell numbers - sdh_tri = SubDofHandler(dh, Set((1,3))) + sdh_tri = SubDofHandler(dh, OrderedSet((1,3))) add!(sdh_tri, :u, Lagrange{RefTriangle,1}()^2) sdh_quad = SubDofHandler(dh, Set(2)) add!(sdh_quad, :u, Lagrange{RefQuadrilateral,1}()^2) @@ -415,8 +417,8 @@ function test_field_on_subdomain() close!(dh) # retrieve field dimensions - @test Ferrite.getfielddim(dh, :v) == 2 - @test Ferrite.getfielddim(dh, :s) ==1 + @test Ferrite.n_components(dh, :v) == 2 + @test Ferrite.n_components(dh, :s) ==1 # find field in SubDofHandler @test Ferrite.find_field(dh.subdofhandlers[1], :v) == 1 @@ -428,12 +430,12 @@ end function test_evaluate_at_grid_nodes() # 5_______6 - # |\ | + # |\ | # | \ | # 3______\4 # | | # | | - # 1_______2 + # 1_______2 nodes = [Node((0.0, 0.0)), Node((1.0, 0.0)), @@ -445,8 +447,8 @@ function test_evaluate_at_grid_nodes() Triangle((3,4,6)), Triangle((3,6,5))] mesh = Grid(cells, nodes) - addcellset!(mesh, "quads", Set{Int}((1,))) - addcellset!(mesh, "tris", Set{Int}((2, 3))) + addcellset!(mesh, "quads", Set((1,))) + addcellset!(mesh, "tris", OrderedSet((2, 3))) ip_quad = Lagrange{RefQuadrilateral,1}() ip_tri = Lagrange{RefTriangle,1}() @@ -459,18 +461,34 @@ function test_evaluate_at_grid_nodes() add!(sdh_quad, :s, ip_quad) # scalar field :s only on quad close!(dh) - u = collect(1.:16.) + u = collect(1.:16.) + uv = @view u[1:end] + # :s on thesolution s_nodes = evaluate_at_grid_nodes(dh, u, :s) @test s_nodes[1:4] ≈ [13., 14., 16., 15.] @test all(isnan.(s_nodes[5:6])) - v_nodes = evaluate_at_grid_nodes(dh, u, :v) + # :s on a view into solution + sv_nodes = evaluate_at_grid_nodes(dh, uv, :s) + @test sv_nodes[1:4] ≈ [13., 14., 16., 15.] + @test all(isnan.(sv_nodes[5:6])) + # :v on the solution + v_nodes = evaluate_at_grid_nodes(dh, u, :v) @test v_nodes ≈ hcat( [9., 10., 0.], [11., 12., 0.], [1., 2., 0.], [3., 4., 0.], [7., 8., 0.], [5., 6., 0.]) + + # :v on a view into solution + vv_nodes = evaluate_at_grid_nodes(dh, uv, :v) + @test vv_nodes ≈ hcat( [9., 10., 0.], + [11., 12., 0.], + [1., 2., 0.], + [3., 4., 0.], + [7., 8., 0.], + [5., 6., 0.]) end function test_mixed_grid_show() @@ -479,18 +497,18 @@ function test_mixed_grid_show() @test occursin("2 Quadrilateral/Triangle cells", str) end -# regression tests for https://github.com/KristofferC/JuAFEM.jl/issues/315 +# regression tests for https://github.com/KristofferC/JuAFEM.jl/issues/315 function test_subparametric_quad() #linear geometry grid = generate_grid(Quadrilateral, (1,1)) ip = Lagrange{RefQuadrilateral,2}() - + dh = DofHandler(grid) add!(dh, :u, ip^2) close!(dh) - + ch = ConstraintHandler(dh) - dbc1 = Dirichlet(:u, getfaceset(grid, "left"), (x, t) -> 0.0, 2) + dbc1 = Dirichlet(:u, getfacetset(grid, "left"), (x, t) -> 0.0, 2) add!(ch, dbc1) close!(ch) update!(ch, 1.0) @@ -503,13 +521,13 @@ function test_subparametric_triangle() grid = generate_grid(Triangle, (1,1)) ip = Lagrange{RefTriangle,2}() - + dh = DofHandler(grid) add!(dh, :u, ip^2) close!(dh) - + ch = ConstraintHandler(dh) - dbc1 = Dirichlet(:u, getfaceset(grid, "left"), (x, t) -> 0.0, 2) + dbc1 = Dirichlet(:u, getfacetset(grid, "left"), (x, t) -> 0.0, 2) add!(ch, dbc1) close!(ch) update!(ch, 1.0) @@ -519,8 +537,8 @@ end function test_celliterator_subdomain() for celltype in (Line, Quadrilateral, Hexahedron) - ip = Ferrite.default_interpolation(celltype) - dim = Ferrite.getdim(ip) + ip = Ferrite.geometric_interpolation(celltype) + dim = Ferrite.getrefdim(ip) grid = generate_grid(celltype, ntuple(i->i==1 ? 2 : 1, dim)) # 2 cells dh = DofHandler(grid) sdh = SubDofHandler(dh, Set(2)) # only cell 2, cell 1 is not part of dh @@ -535,12 +553,12 @@ end function test_separate_fields_on_separate_domains() # 5_______6 - # |\ | + # |\ | # | \ | # 3______\4 # | | # | | - # 1_______2 + # 1_______2 # Given: a vector field :q defined on the quad and a scalar field :t defined on the triangles nodes = [Node((0.0, 0.0)), Node((1.0, 0.0)), @@ -552,8 +570,8 @@ function test_separate_fields_on_separate_domains() Triangle((3,4,5)), Triangle((4,6,5))] mesh = Grid(cells, nodes) - addcellset!(mesh, "quads", Set{Int}((1,))) - addcellset!(mesh, "tris", Set{Int}((2, 3))) + addcellset!(mesh, "quads", Set((1,))) + addcellset!(mesh, "tris", OrderedSet((2, 3))) ip_tri = Lagrange{RefTriangle,1}() ip_quad = Lagrange{RefQuadrilateral,1}() @@ -576,8 +594,8 @@ end function test_unique_cellsets() grid = generate_grid(Quadrilateral, (2, 1)) - set_u = Set(1:2) - set_v = Set(1:1) + set_u = OrderedSet(1:2) + set_v = OrderedSet(1:1) ip = Lagrange{RefQuadrilateral,1}() @@ -605,7 +623,7 @@ function test_show() sdh_tri = SubDofHandler(dh, Set(2)) add!(sdh_tri, :u, Lagrange{RefTriangle, 1}()^2) close!(dh) - @test repr("text/plain", dh) == repr(typeof(dh)) * "\n Fields:\n :u, dim: 2\n Total dofs: 10" + @test repr("text/plain", dh) == repr(typeof(dh)) * "\n Fields:\n :u, Vec{2}\n Total dofs: 10" @test repr("text/plain", dh.subdofhandlers[1]) == string( repr("text/plain", typeof(dh.subdofhandlers[1])), "\n Cell type: Quadrilateral\n Fields:\n :u, ", repr("text/plain", dh.subdofhandlers[1].field_interpolations[1]), "\n Dofs per cell: 8\n") @@ -633,12 +651,38 @@ function test_vtk_export() close!(dh) u = collect(1:ndofs(dh)) filename = "mixed_2d_grid" - vtk_grid(filename, dh) do vtk - vtk_point_data(vtk, dh, u) + VTKGridFile(filename, dh) do vtk + write_solution(vtk, dh, u) end sha = bytes2hex(open(SHA.sha1, filename*".vtu")) @test sha == "339ab8a8a613c2f38af684cccd695ae816671607" - rm(filename*".vtu") # clean up + rm(filename*".vtu") # clean up +end + +function test_celliterator_on_true_subdomain_smoketest() + grid = generate_grid(Hexahedron, (2,2,2)) + + dh = DofHandler(grid) + sdh = SubDofHandler(dh, [1,2,3]) + ip = Lagrange{RefHexahedron,1}() + add!(sdh, :u, ip) + close!(dh) + + # The following statements just check that the iterator + # does not crash at least. Regression for #966 + for cell in CellIterator(sdh) + end + + for cell in CellIterator(dh, [1,2,3]) + end + + for cell in CellIterator(dh) + if cellid(cell) <= 3 + @test length(celldofs(cell)) == getnbasefunctions(ip) + else + @test length(celldofs(cell)) == 0 + end + end end @testset "DofHandler" begin @@ -668,4 +712,5 @@ end test_celliterator_subdomain() test_show() test_vtk_export() + test_celliterator_on_true_subdomain_smoketest() end diff --git a/test/test_notebooks.jl b/test/test_notebooks.jl index 9982369afc..6ae90aae2a 100644 --- a/test/test_notebooks.jl +++ b/test/test_notebooks.jl @@ -55,5 +55,3 @@ module Cook end end end - - diff --git a/test/test_pointevaluation.jl b/test/test_pointevaluation.jl index 607b5d00b6..347290a16c 100644 --- a/test/test_pointevaluation.jl +++ b/test/test_pointevaluation.jl @@ -1,20 +1,29 @@ -function scalar_field() +using Ferrite, Test + +function test_pe_scalar_field() # isoparametric approximation - mesh = generate_grid(QuadraticQuadrilateral, (20, 20)) - f(x) = x[1]^2 + mesh = generate_grid(QuadraticQuadrilateral, (3, 3)) + perturbate_standard_grid!(mesh, 1/10) + + f(x) = x[1]+x[2] ip_f = Lagrange{RefQuadrilateral,2}() # function interpolation ip_g = Lagrange{RefQuadrilateral,2}() # geometry interpolation + # points where we want to retrieve field values + points = Vec{2,Float64}[] + # compute values in quadrature points - qr = QuadratureRule{RefQuadrilateral}(3) # exactly approximate quadratic field + qr = QuadratureRule{RefQuadrilateral}(3) # exactly integrate field cv = CellValues(qr, ip_f, ip_g) qp_vals = [Vector{Float64}(undef, getnquadpoints(cv)) for _ in 1:getncells(mesh)] for cellid in eachindex(mesh.cells) xe = getcoordinates(mesh, cellid) reinit!(cv, xe) for qp in 1:getnquadpoints(cv) - qp_vals[cellid][qp] = f(spatial_coordinate(cv, qp, xe)) + x = spatial_coordinate(cv, qp, xe) + qp_vals[cellid][qp] = f(x) + push!(points, x) end end @@ -22,11 +31,10 @@ function scalar_field() projector = L2Projector(ip_f, mesh) projector_vals = project(projector, qp_vals, qr) - # points where we want to retrieve field values - points = [Vec((x, 0.52)) for x in range(0.0; stop=1.0, length=100)] - # set up PointEvalHandler and retrieve values ph = PointEvalHandler(mesh, points) + @test all(x -> x !== nothing, ph.cells) + vals = evaluate_at_points(ph, projector, projector_vals) @test f.(points) ≈ vals @@ -35,18 +43,59 @@ function scalar_field() # @test f.(points) ≈ vals end -function vector_field() +function test_pe_embedded() + mesh = generate_grid(QuadraticQuadrilateral, (3, 3)) + perturbate_standard_grid!(mesh, 1/10) + mesh = Grid(mesh.cells, map(x->Node(Vec((x.x[1], x.x[2], x.x[1]+x.x[2]))), mesh.nodes)) + + f(x) = x[1]+x[2] + + ip_f = Lagrange{RefQuadrilateral,2}() # function interpolation + ip_g = Lagrange{RefQuadrilateral,2}()^3 # geometry interpolation + + # points where we want to retrieve field values + points = Vec{3,Float64}[] + + # compute values in quadrature points + qr = QuadratureRule{RefQuadrilateral}(3) # exactly integrate quadratic field + cv = CellValues(qr, ip_f, ip_g) + qp_vals = [Vector{Float64}(undef, getnquadpoints(cv)) for _ in 1:getncells(mesh)] + for cellid in eachindex(mesh.cells) + xe = getcoordinates(mesh, cellid) + reinit!(cv, xe) + for qp in 1:getnquadpoints(cv) + x = spatial_coordinate(cv, qp, xe) + qp_vals[cellid][qp] = f(x) + push!(points, x) + end + end + + # do a L2Projection for getting values in dofs + # @test_throws MethodError projector = L2Projector(ip_f, mesh; geom_ip=ip_g) + projector = L2Projector(ip_f, mesh) + projector_vals = project(projector, qp_vals, qr) + + # set up PointEvalHandler and retrieve values + ph = PointEvalHandler(mesh, points) + @test all(x -> x !== nothing, ph.cells) + + vals = evaluate_at_points(ph, projector, projector_vals) + @test f.(points) ≈ vals +end + +function test_pe_vector_field() ## vector field # isoparametric approximation - mesh = generate_grid(QuadraticQuadrilateral, (20, 20)) - f(x) = Vec((x[1]^2, x[1])) + mesh = generate_grid(QuadraticQuadrilateral, (3, 3)) + perturbate_standard_grid!(mesh, 1/10) + f(x) = Vec((x[1], x[2])) nodal_vals = [f(p.x) for p in mesh.nodes] - ip_f = Lagrange{RefQuadrilateral,2}() # function interpolation + ip_f = Lagrange{RefQuadrilateral,2}()^2 # function interpolation ip_g = Lagrange{RefQuadrilateral,2}() # geometry interpolation # compute values in quadrature points - qr = QuadratureRule{RefQuadrilateral}(3) # exactly approximate quadratic field + qr = QuadratureRule{RefQuadrilateral}(3) # exactly integrate field cv = CellValues(qr, ip_f, ip_g) qp_vals = [Vector{Vec{2,Float64}}(undef, getnquadpoints(cv)) for i=1:getncells(mesh)] for cellid in eachindex(mesh.cells) @@ -67,6 +116,7 @@ function vector_field() # set up PointEvalHandler and retrieve values ph = PointEvalHandler(mesh, points) + @test all(x -> x !== nothing, ph.cells) vals = evaluate_at_points(ph, projector, projector_vals) @test f.(points) ≈ vals @@ -75,14 +125,15 @@ function vector_field() # @test f.(points) ≈ vals end -function superparametric() +function test_pe_superparametric() # superparametric approximation - mesh = generate_grid(Quadrilateral, (20, 20)) - f(x) = x*x[1] + mesh = generate_grid(Quadrilateral, (3, 3)) + perturbate_standard_grid!(mesh, 1/10) + f(x) = x ip_f = Lagrange{RefQuadrilateral,2}() # function interpolation # compute values in quadrature points - qr = QuadratureRule{RefQuadrilateral}(3) # exactly approximate quadratic field + qr = QuadratureRule{RefQuadrilateral}(3) # exactly integrate field cv = CellValues(qr, ip_f) qp_vals = [Vector{Vec{2,Float64}}(undef, getnquadpoints(cv)) for i=1:getncells(mesh)] for cellid in eachindex(mesh.cells) @@ -102,14 +153,16 @@ function superparametric() # set up PointEvalHandler and retrieve values ph = PointEvalHandler(mesh, points) + @test all(x -> x !== nothing, ph.cells) vals = evaluate_at_points(ph, projector, projector_vals) # can recover a quadratic field by a quadratic approximation @test f.(points) ≈ vals end -function dofhandler() - mesh = generate_grid(Quadrilateral, (2,2)) +function test_pe_dofhandler() + mesh = generate_grid(Quadrilateral, (2, 2)) + perturbate_standard_grid!(mesh, 1/10) dof_vals = [1., 2., 5., 4., 3., 6., 8., 7., 9.] points = [node.x for node in mesh.nodes] # same as nodes @@ -118,6 +171,7 @@ function dofhandler() close!(dh) ph = PointEvalHandler(mesh, points) + @test all(x -> x !== nothing, ph.cells) vals = evaluate_at_points(ph, dh, dof_vals, :s) @test vals ≈ 1.0:9.0 @@ -126,41 +180,15 @@ function dofhandler() # @test vals ≈ 1.0:9.0 end -function dofhandler2(;three_dimensional=true) - # Computes the L2 projection of a quadratic field exactly - # but not using L2Projector since we want the DofHandler dofs - if (three_dimensional) - mesh = generate_grid(Hexahedron, (10, 10, 10)) - f_s = x -> 1.0 + x[1] + x[2] + x[1] * x[2] + x[2] * x[3] - f_v = x -> Vec{3}((1.0 + x[1] + x[2] + x[1] * x[2], 2.0 - x[1] - x[2] - x[1] * x[2], 4.0 + x[1] - x[2] + x[3] - x[1] * x[3] - x[2] * x[3])) - points = [Vec((x, x, x)) for x in range(0; stop=1, length=100)] - ip_f = Lagrange{RefHexahedron,2}() - ip_f_v = ip_f^3 - qr = QuadratureRule{RefHexahedron}(3) - else - mesh = generate_grid(Quadrilateral, (20, 20)) - f_s = x -> 1.0 + x[1] + x[2] + x[1] * x[2] - f_v = x -> Vec{2}((1.0 + x[1] + x[2] + x[1] * x[2], 2.0 - x[1] - x[2] - x[1] * x[2])) - points = [Vec((x, x, )) for x in range(0; stop=1, length=100)] - ip_f = Lagrange{RefQuadrilateral,2}() - ip_f_v = ip_f^2 - qr = QuadratureRule{RefQuadrilateral}(3) - end - - csv = CellValues(qr, ip_f) - cvv = CellValues(qr, ip_f_v) - dh = DofHandler(mesh); - add!(dh, :s, ip_f) - add!(dh, :v, ip_f_v) - close!(dh) - M = create_sparsity_pattern(dh) +function _pointeval_dofhandler2_manual_projection(dh, csv, cvv, f_s, f_v) + M = allocate_matrix(dh) f = zeros(ndofs(dh)) asm = start_assemble(M, f) me = zeros(ndofs_per_cell(dh), ndofs_per_cell(dh)) fe = zeros(ndofs_per_cell(dh)) s_dofs = dof_range(dh, :s) v_dofs = dof_range(dh, :v) - + for cell in CellIterator(dh) fill!(me, 0) fill!(fe, 0) @@ -188,7 +216,43 @@ function dofhandler2(;three_dimensional=true) end assemble!(asm, celldofs(cell), me, fe) end - uh = M \ f + return M \ f +end + + +function test_pe_dofhandler2(;three_dimensional=true) + # Computes the L2 projection of a quadratic field exactly + # but not using L2Projector since we want the DofHandler dofs + if (three_dimensional) + mesh = generate_grid(Hexahedron, (3, 3, 3)) + perturbate_standard_grid!(mesh, 1/10) + f_s = x -> 1.0 + x[1] + x[2] + x[1] * x[2] + x[2] * x[3] + f_v = x -> Vec{3}((1.0 + x[1] + x[2] + x[1] * x[2], 2.0 - x[1] - x[2] - x[1] * x[2], 4.0 + x[1] - x[2] + x[3] - x[1] * x[3] - x[2] * x[3])) + points = [Vec((x, x, x)) for x in range(0; stop=1, length=100)] + ip_f = Lagrange{RefHexahedron,2}() + ip_f_v = ip_f^3 + qr = QuadratureRule{RefHexahedron}(3) + else + mesh = generate_grid(Quadrilateral, (3, 3)) + perturbate_standard_grid!(mesh, 1/10) + f_s = x -> 1.0 + x[1] + x[2] + x[1] * x[2] + f_v = x -> Vec{2}((1.0 + x[1] + x[2] + x[1] * x[2], 2.0 - x[1] - x[2] - x[1] * x[2])) + points = [Vec((x, x, )) for x in range(0; stop=1, length=100)] + ip_f = Lagrange{RefQuadrilateral,2}() + ip_f_v = ip_f^2 + qr = QuadratureRule{RefQuadrilateral}(3) + end + + csv = CellValues(qr, ip_f) + cvv = CellValues(qr, ip_f_v) + dh = DofHandler(mesh); + add!(dh, :s, ip_f) + add!(dh, :v, ip_f_v) + close!(dh) + + s_dofs = dof_range(dh, :s) + v_dofs = dof_range(dh, :v) + uh = _pointeval_dofhandler2_manual_projection(dh, csv, cvv, f_s, f_v) ph = PointEvalHandler(mesh, points) @test all(x -> x !== nothing, ph.cells) @@ -218,16 +282,16 @@ function dofhandler2(;three_dimensional=true) end end -function mixed_grid() - ## Mixed grid where not all cells have the same fields +function test_pe_mixed_grid() + ## Mixed grid where not all cells have the same fields # 5_______6 - # |\ | + # |\ | # | \ | # 3______\4 # | | # | | - # 1_______2 + # 1_______2 nodes = [Node((0.0, 0.0)), Node((1.0, 0.0)), @@ -252,16 +316,17 @@ function mixed_grid() # compute values in quadrature points for quad qr = QuadratureRule{RefQuadrilateral}(2) cv = CellValues(qr, ip_quad) - qp_vals_quads = [Vector{Float64}(undef, getnquadpoints(cv)) for cell in getcellset(mesh, "quads")] - for (local_cellid, global_cellid) in enumerate(getcellset(mesh, "quads")) + qp_vals_quads = OrderedDict(cell => Vector{Float64}(undef, getnquadpoints(cv)) for cell in getcellset(mesh, "quads")) + for global_cellid in getcellset(mesh, "quads") xe = getcoordinates(mesh, global_cellid) reinit!(cv, xe) + cell_vals = qp_vals_quads[global_cellid] for qp in 1:getnquadpoints(cv) - qp_vals_quads[local_cellid][qp] = f(spatial_coordinate(cv, qp, xe)) + cell_vals[qp] = f(spatial_coordinate(cv, qp, xe)) end end - # construct projector + # construct projector projector = L2Projector(ip_quad, mesh; set=getcellset(mesh, "quads")) points = [Vec((x, 2x)) for x in range(0.0; stop=1.0, length=10)] @@ -269,6 +334,7 @@ function mixed_grid() # first alternative: L2Projection to dofs projector_values = project(projector, qp_vals_quads, qr) ph = PointEvalHandler(mesh, points) + @test all(x -> x !== nothing, ph.cells) vals = evaluate_at_points(ph, projector, projector_values) @test vals[1:5] ≈ f.(points[1:5]) @test all(isnan, vals[6:end]) @@ -284,13 +350,15 @@ function mixed_grid() dof_vals = [1., 1., 2., 2., 4., 4., 3., 3., 6., 6., 5., 5.] points = [node.x for node in mesh.nodes] ph = PointEvalHandler(mesh, points) + @test all(x -> x !== nothing, ph.cells) vals = evaluate_at_points(ph, dh, dof_vals, :v) - @test vals == [Vec((i, i)) for i=1.0:6.0] + @test vals ≈ [Vec((i, i)) for i=1.0:6.0] end -function oneD() +function test_pe_oneD() # isoparametric approximation mesh = generate_grid(Line, (2,)) + perturbate_standard_grid!(mesh, 1/10) f(x) = x[1] nodal_vals = [f(p.x) for p in mesh.nodes] @@ -317,6 +385,7 @@ function oneD() # set up PointEvalHandler and retrieve values ph = PointEvalHandler(mesh, points) + @test all(x -> x !== nothing, ph.cells) vals = evaluate_at_points(ph, projector, projector_values) @test f.(points) ≈ vals @@ -326,25 +395,46 @@ function oneD() # @test f.(points) ≈ vals end -function first_point_missing() +function test_pe_first_point_missing() mesh = generate_grid(Quadrilateral, (1, 1)) points = [Vec(2.0, 0.0), Vec(0.0, 0.0)] ph = PointEvalHandler(mesh, points; warn=false) - + @test isnothing(ph.local_coords[1]) - @test ph.local_coords[2] == Vec(0.0, 0.0) + @test ph.local_coords[2] ≈ Vec(0.0, 0.0) end @testset "PointEvalHandler" begin - scalar_field() - vector_field() - dofhandler() - dofhandler2(;three_dimensional=false) - dofhandler2(;three_dimensional=true) - superparametric() - mixed_grid() - oneD() - first_point_missing() + @testset "scalar field" begin + test_pe_scalar_field() + test_pe_embedded() + end + + @testset "vector field" begin + test_pe_vector_field() + end + + @testset "dofhandler interaction" begin + test_pe_dofhandler() + test_pe_dofhandler2(;three_dimensional=false) + test_pe_dofhandler2(;three_dimensional=true) + end + + @testset "superparametric" begin + test_pe_superparametric() + end + + @testset "mixed grid" begin + test_pe_mixed_grid() + end + + @testset "1D" begin + test_pe_oneD() + end + + @testset "failure cases" begin + test_pe_first_point_missing() + end end @testset "PointValues" begin @@ -352,7 +442,7 @@ end x = Vec{2,Float64}.([(0.0, 0.0), (2.0, 0.5), (2.5, 2.5), (0.5, 2.0)]) ξ₁ = Vec{2,Float64}((0.12, -0.34)) ξ₂ = Vec{2,Float64}((0.56, -0.78)) - qr = QuadratureRule{RefQuadrilateral,Float64}([2.0, 2.0], [ξ₁, ξ₂]) + qr = QuadratureRule{RefQuadrilateral}([2.0, 2.0], [ξ₁, ξ₂]) # PointScalarValues csv = CellValues(qr, ip_f) diff --git a/test/test_quadrules.jl b/test/test_quadrules.jl index 13d1155294..d6240e58c4 100644 --- a/test/test_quadrules.jl +++ b/test/test_quadrules.jl @@ -1,5 +1,8 @@ +using Ferrite: reference_shape_value + @testset "Quadrature testing" begin ref_tet_vol(dim) = 1 / factorial(dim) + ref_prism_vol() = 1 / 2 ref_square_vol(dim) = 2^dim function integrate(qr::QuadratureRule, f::Function) @@ -11,17 +14,14 @@ end # Hypercube - for (dim, shape) = ((1, RefLine), (2, RefQuadrilateral), (3, RefHexahedron)) - for order in (1,2,3,4) - f = (x, p) -> sum([x[i]^p for i in 1:length(x)]) - # Legendre - qr = QuadratureRule{shape}(:legendre, order) - @test integrate(qr, (x) -> f(x, 2*order-1)) < 1e-14 - @test sum(qr.weights) ≈ ref_square_vol(dim) - @test sum(Ferrite.getweights(qr)) ≈ ref_square_vol(dim) - # Lobatto - if order > 1 - qr = QuadratureRule{shape}(:lobatto, order) + @testset "Exactness for integration on hypercube of $rulename" for (rulename, orderrange) in [ + (:legendre, 1:4), + (:lobatto, 2:4), + ] + for (dim, shape) = ((1, RefLine), (2, RefQuadrilateral), (3, RefHexahedron)) + for order in orderrange + f = (x, p) -> sum([x[i]^p for i in 1:length(x)]) + qr = QuadratureRule{shape}(rulename, order) @test integrate(qr, (x) -> f(x, 2*order-1)) < 1e-14 @test sum(qr.weights) ≈ ref_square_vol(dim) @test sum(Ferrite.getweights(qr)) ≈ ref_square_vol(dim) @@ -30,30 +30,58 @@ end @test_throws ArgumentError QuadratureRule{RefLine}(:einstein, 2) - # Tetrahedron - g = (x) -> sqrt(sum(x)) - dim = 2 - for order in 1:15 - qr = QuadratureRule{RefTriangle}(:legendre, order) - # http://www.wolframalpha.com/input/?i=integrate+sqrt(x%2By)+from+x+%3D+0+to+1,+y+%3D+0+to+1-x - @test integrate(qr, g) - 0.4 < 0.01 - @test sum(qr.weights) ≈ ref_tet_vol(dim) + # Triangle + # http://www.wolframalpha.com/input/?i=integrate+sqrt(x%2By)+from+x+%3D+0+to+1,+y+%3D+0+to+1-x + @testset "Exactness for integration on triangles of $rulename" for (rulename, orderrange) in [ + (:dunavant, 1:8), + (:gaussjacobi, 9:15), + ] + g = (x) -> sqrt(sum(x)) + dim = 2 + for order in orderrange + qr = QuadratureRule{RefTriangle}(rulename, order) + @test integrate(qr, g) - 0.4 < 0.01 + @test sum(qr.weights) ≈ ref_tet_vol(dim) + end end @test_throws ArgumentError QuadratureRule{RefTriangle}(:einstein, 2) @test_throws ArgumentError QuadratureRule{RefTriangle}(0) - dim = 3 - for order in (1, 2, 3, 4) - qr = QuadratureRule{RefTetrahedron}(:legendre, order) - # Table 1: - # http://www.m-hikari.com/ijma/ijma-2011/ijma-1-4-2011/venkateshIJMA1-4-2011.pdf - @test integrate(qr, g) - 0.14 < 0.01 - @test sum(qr.weights) ≈ ref_tet_vol(dim) + # Tetrahedron + # Table 1: + # http://www.m-hikari.com/ijma/ijma-2011/ijma-1-4-2011/venkateshIJMA1-4-2011.pdf + @testset "Exactness for integration on tetrahedra of $rulename" for (rulename, orderrange) in [ + (:jinyun, 1:3), + (:keast_minimal, 1:5), + (:keast_positive, 1:5) + ] + g = (x) -> sqrt(sum(x)) + dim = 3 + for order in orderrange + qr = QuadratureRule{RefTetrahedron}(rulename, order) + @test integrate(qr, g) - 0.14 < 0.01 + @test sum(qr.weights) ≈ ref_tet_vol(dim) + end end @test_throws ArgumentError QuadratureRule{RefTetrahedron}(:einstein, 2) @test_throws ArgumentError QuadratureRule{RefTetrahedron}(0) - @testset "Quadrature rules for $ref_cell" for ref_cell in ( + # Wedge + # ∫ √(x₁ + x₂) x₃² + @testset "Exactness for integration on prisms of $rulename" for (rulename, orderrange) in [ + (:polyquad, 1:10), + ] + g = (x) -> √(x[1] + x[2])*x[3]^2 + for order in 1:10 + qr = QuadratureRule{RefPrism}(:polyquad, order) + @test integrate(qr, g) - 2/15 < 0.01 + @test sum(qr.weights) ≈ ref_prism_vol() + end + end + @test_throws ArgumentError QuadratureRule{RefPrism}(:einstein, 2) + @test_throws ArgumentError QuadratureRule{RefPrism}(0) + + @testset "Generic quadrature rule properties for $ref_cell" for ref_cell in ( Line, Quadrilateral, Triangle, @@ -72,8 +100,8 @@ ccoords = getcoordinates(grid, cellid) Vec_t = Vec{dim,Float64} Vec_face_t = Vec{dim-1,Float64} - for lfaceid in nfaces(refshape) - facenodes = Ferrite.faces(cell)[lfaceid] + for lfaceid in Ferrite.nfacets(refshape) + facenodes = Ferrite.facets(cell)[lfaceid] fcoords = zeros(Vec_t, length(facenodes)) for (i,nodeid) in enumerate(facenodes) x = grid.nodes[nodeid].x @@ -83,16 +111,16 @@ ipface = Lagrange{getfacerefshape(cell,lfaceid),1}() ξface = rand(Vec_face_t)/4 - ξcell = Ferrite.face_to_element_transformation(ξface, refshape, lfaceid) + ξcell = Ferrite.facet_to_element_transformation(ξface, refshape, lfaceid) xface = zero(Vec_t) for i in 1:getnbasefunctions(ipface) - xface += Ferrite.shape_value(ipface, ξface, i) * fcoords[i] + xface += reference_shape_value(ipface, ξface, i) * fcoords[i] end xcell = zero(Vec_t) for i in 1:getnbasefunctions(ipcell) - xcell += shape_value(ipcell, ξcell, i) * ccoords[i] + xcell += reference_shape_value(ipcell, ξcell, i) * ccoords[i] end @test xcell ≈ xface @@ -100,12 +128,111 @@ end end - @testset "$ref_cell unknown face error path" begin + @testset "$ref_cell unknown facet error path" begin for face in (-1, 0, 100) - err = ArgumentError("unknown face number") + err = ArgumentError("unknown facet number") @test_throws err Ferrite.weighted_normal(Tensor{2,dim}(zeros(dim^2)), refshape, face) pt = Vec{dim-1, Float64}(ntuple(i -> 0.0, dim-1)) - @test_throws err Ferrite.face_to_element_transformation(pt, refshape, face) + @test_throws err Ferrite.facet_to_element_transformation(pt, refshape, face) + end + end + + @testset "Type checks for $refshape (T=$T)" for T in (Float32, Float64) + qr = QuadratureRule{refshape}(T, 1) + qrw = Ferrite.getweights(qr) + qrp = Ferrite.getpoints(qr) + @test qrw isa Vector + @test qrp isa Vector + @test eltype(qrw) === T + @test eltype(eltype(qrp)) === T + + sqr = QuadratureRule{refshape}( + SVector{length(qrw)}(qrw), SVector{length(qrp)}(qrp) + ) + sqrw = Ferrite.getweights(sqr) + sqrp = Ferrite.getpoints(sqr) + @test sqrw isa SVector + @test sqrp isa SVector + @test eltype(sqrw) === T + @test eltype(eltype(sqrp)) === T + + fqr = FacetQuadratureRule{refshape}(T, 1) + for f in 1:nfacets(refshape) + fqrw = Ferrite.getweights(fqr, f) + fqrp = Ferrite.getpoints(fqr, f) + @test fqrw isa Vector + @test fqrp isa Vector + @test eltype(fqrw) === T + @test eltype(eltype(fqrp)) === T + end + + function sqr_for_facet(fqr, f) + fqrw = Ferrite.getweights(fqr, f) + fqrp = Ferrite.getpoints(fqr, f) + return QuadratureRule{refshape}( + SVector{length(qrw)}(fqrw), + SVector{length(qrp)}(fqrp), + ) + end + + sfqr = FacetQuadratureRule( + ntuple(f->sqr_for_facet(fqr, f), nfacets(refshape)) + ) + for f in 1:nfacets(refshape) + sfqrw = Ferrite.getweights(sfqr,f) + sfqrp = Ferrite.getpoints(sfqr, f) + @test sfqrw isa SVector + @test sfqrp isa SVector + @test eltype(sfqrw) === T + @test eltype(eltype(sfqrp)) === T + end + + sfqr2 = FacetQuadratureRule( + [sqr_for_facet(fqr, f) for f in 1:nfacets(refshape)] + ) + for f in 1:nfacets(refshape) + sfqrw = Ferrite.getweights(sfqr2,f) + sfqrp = Ferrite.getpoints(sfqr2, f) + @test sfqrw isa SVector + @test sfqrp isa SVector + @test eltype(sfqrw) === T + @test eltype(eltype(sfqrp)) === T + end + end + end + + # Check explicitly if the defaults changed, as this might affect users negatively + @testset "Volume defaults for $refshape" for (refshape, sym) in ( + (RefLine, :legendre), + (RefQuadrilateral, :legendre), + (RefHexahedron, :legendre), + (RefTriangle, :dunavant), + (RefTetrahedron, :keast_minimal), + (RefPrism, :polyquad), + (RefPyramid, :polyquad), + ) + for order in 1:3 + qr = QuadratureRule{refshape}(sym, order) + qr_default = QuadratureRule{refshape}(order) + @test Ferrite.getweights(qr) == Ferrite.getweights(qr_default) + @test Ferrite.getpoints(qr) == Ferrite.getpoints(qr_default) + end + end + @testset "Facet defaults for $refshape" for (refshape, sym) in ( + # (RefLine, :legendre), # There is no choice for the rule on lines, as it only is a point eval + (RefQuadrilateral, :legendre), + (RefHexahedron, :legendre), + (RefTriangle, :legendre), + (RefTetrahedron, :dunavant), + # (RefPrism, ...), # Not implemented yet (see discussion in #1007) + # (RefPyramid, ...), # Not implement yet (see discussion in #1007) + ) + for order in 1:3 + fqr = FacetQuadratureRule{refshape}(sym, order) + fqr_default = FacetQuadratureRule{refshape}(order) + for f in 1:nfacets(refshape) + @test Ferrite.getweights(fqr,f) == Ferrite.getweights(fqr_default,f) + @test Ferrite.getpoints(fqr,f) == Ferrite.getpoints(fqr_default,f) end end end diff --git a/test/test_sparsity_patterns.jl b/test/test_sparsity_patterns.jl new file mode 100644 index 0000000000..d9f063d192 --- /dev/null +++ b/test/test_sparsity_patterns.jl @@ -0,0 +1,332 @@ +using Ferrite, Test, SparseArrays, Random + +# Minimal implementation of a custom sparsity pattern +struct TestPattern <: Ferrite.AbstractSparsityPattern + nrowscols::Tuple{Int, Int} + data::Vector{Vector{Int}} + function TestPattern(m::Int, n::Int) + return new((m, n), Vector{Int}[Int[] for _ in 1:m]) + end +end +Ferrite.getnrows(tp::TestPattern) = tp.nrowscols[1] +Ferrite.getncols(tp::TestPattern) = tp.nrowscols[2] +function Ferrite.add_entry!(tp::TestPattern, row::Int, col::Int) + if !(1 <= row <= tp.nrowscols[1] && 1 <= col <= tp.nrowscols[2]) + error("out of bounds") + end + r = tp.data[row] + k = searchsortedfirst(r, col) + if k == lastindex(r) + 1 || r[k] != col + insert!(r, k, col) + end + return +end +Ferrite.eachrow(tp::TestPattern) = tp.data +Ferrite.eachrow(tp::TestPattern, r::Int) = tp.data[r] + +function compare_patterns(p1, px...) + @test all(p -> Ferrite.getnrows(p1) == Ferrite.getnrows(p), px) + @test all(p -> Ferrite.getncols(p1) == Ferrite.getncols(p), px) + for rs in zip(Ferrite.eachrow.((p1, px...,))...) + for cs in zip(rs...) + @test all(c -> cs[1] == c, cs) + end + end +end + +# Compare the storage of SparseMatrixCSC +function compare_matrices(A1, Ax...) + @assert A1 isa SparseMatrixCSC + @assert length(Ax) > 0 + @assert all(A -> A isa SparseMatrixCSC, Ax) + @test all(A -> size(A1) == size(A), Ax) + @test all(A -> A1.colptr == A.colptr, Ax) + @test all(A -> A1.rowval == A.rowval, Ax) + return +end + +function is_stored(dsp::SparsityPattern, i, j) + return findfirst(k -> k == j, dsp.rows[i]) !== nothing +end + +@testset "SparsityPattern" begin + + # Ferrite.add_entry! + for (m, n) in ((5, 5), (3, 5), (5, 3)) + dsp = SparsityPattern(m, n) + @test Ferrite.getnrows(dsp) == m + @test Ferrite.getncols(dsp) == n + for r in randperm(m), c in randperm(n) + @test !is_stored(dsp, r, c) + Ferrite.add_entry!(dsp, r, c) + @test is_stored(dsp, r, c) + end + A = allocate_matrix(dsp) + fill!(A.nzval, 1) + @test A == ones(m, n) + # Error paths + @test_throws BoundsError Ferrite.add_entry!(dsp, 0, 1) + @test_throws BoundsError Ferrite.add_entry!(dsp, 1, 0) + @test_throws BoundsError Ferrite.add_entry!(dsp, m+1, 1) + @test_throws BoundsError Ferrite.add_entry!(dsp, 1, n+1) + end + + function testdhch() + local grid, dh, ch + grid = generate_grid(Quadrilateral, (2, 1)) + dh = DofHandler(grid) + add!(dh, :v, Lagrange{RefQuadrilateral,1}()^2) + add!(dh, :s, Lagrange{RefQuadrilateral,1}()) + close!(dh) + ch = ConstraintHandler(dh) + add!(ch, Dirichlet(:v, getfacetset(grid, "left"), (x, t) -> 0, [2])) + add!(ch, Dirichlet(:s, getfacetset(grid, "left"), (x, t) -> 0)) + add!(ch, AffineConstraint(15, [1 => 0.5, 7 => 0.5], 0.0)) + close!(ch) + return dh, ch + end + + dh, ch = testdhch() + + # Mismatching size + + # Test show method + dsp = SparsityPattern(ndofs(dh), ndofs(dh)) + str = sprint(show, "text/plain", dsp) + @test contains(str, "$(ndofs(dh))×$(ndofs(dh))") + @test contains(str, r" - Sparsity: 100.0% \(0 stored entries\)$"m) + @test contains(str, r" - Entries per row \(min, max, avg\): 0, 0, 0.0$"m) + @test contains(str, r" - Memory estimate: .* used, .* allocated$"m) + add_sparsity_entries!(dsp, dh) + str = sprint(show, "text/plain", dsp) + @test contains(str, "$(ndofs(dh))×$(ndofs(dh))") + @test contains(str, r" - Sparsity: .*% \(252 stored entries\)$"m) + @test contains(str, r" - Entries per row \(min, max, avg\): 12, 18, 14\.0$"m) + @test contains(str, r" - Memory estimate: .* used, .* allocated$"m) + + # Test all the possible entrypoints using SparsityPattern with a DofHandler + compare_matrices( + # Reference matrix from COO representation + let I = Int[], J = Int[] + for c in CellIterator(dh) + for row in c.dofs, col in c.dofs + push!(I, row) + push!(J, col) + end + end + sparse(I, J, zeros(Float64, length(I)), ndofs(dh), ndofs(dh)) + end, + let + A = allocate_matrix(dh) + @test A isa SparseMatrixCSC{Float64, Int} + A + end, + let + A = allocate_matrix(SparseMatrixCSC{Float32, Int}, dh) + @test A isa SparseMatrixCSC{Float32, Int} + A + end, + let + dsp = init_sparsity_pattern(dh) + add_sparsity_entries!(dsp, dh) + @test dsp isa SparsityPattern + A = allocate_matrix(dsp) + @test A isa SparseMatrixCSC{Float64, Int} + A + end, + let + dsp = init_sparsity_pattern(dh) + add_sparsity_entries!(dsp, dh) + A = allocate_matrix(SparseMatrixCSC{Float32, Int32}, dsp) + @test A isa SparseMatrixCSC{Float32, Int32} + A + end, + let + dsp = SparsityPattern(ndofs(dh), ndofs(dh); nnz_per_row = 5) + allocate_matrix(add_sparsity_entries!(dsp, dh)) + end, + ) + + # Test entrypoints with a DofHandler + ConstraintHandler + compare_matrices( + let + A = allocate_matrix(dh, ch) + @test A isa SparseMatrixCSC{Float64, Int} + A + end, + let + A = allocate_matrix(SparseMatrixCSC{Float32, Int}, dh, ch) + @test A isa SparseMatrixCSC{Float32, Int} + A + end, + let + dsp = init_sparsity_pattern(dh) + add_sparsity_entries!(dsp, dh, ch) + @test dsp isa SparsityPattern + A = allocate_matrix(dsp) + @test A isa SparseMatrixCSC{Float64, Int} + A + end, + let + dsp = init_sparsity_pattern(dh) + add_sparsity_entries!(dsp, dh, ch) + A = allocate_matrix(SparseMatrixCSC{Float32, Int32}, dsp) + @test A isa SparseMatrixCSC{Float32, Int32} + A + end, + let + dsp = SparsityPattern(ndofs(dh), ndofs(dh)) + allocate_matrix(add_sparsity_entries!(dsp, dh, ch)) + end, + ) + + # Test entrypoints with a DofHandler + coupling + remove constrained + kwargs = (; coupling = [true true; false true], keep_constrained = false) + compare_matrices( + let + A = allocate_matrix(dh, ch; kwargs...) + @test A isa SparseMatrixCSC{Float64, Int} + A + end, + let + A = allocate_matrix(SparseMatrixCSC{Float32, Int}, dh, ch; kwargs...) + @test A isa SparseMatrixCSC{Float32, Int} + A + end, + let + dsp = init_sparsity_pattern(dh) + add_sparsity_entries!(dsp, dh, ch; kwargs...) + @test dsp isa SparsityPattern + A = allocate_matrix(dsp) + @test A isa SparseMatrixCSC{Float64, Int} + A + end, + let + dsp = init_sparsity_pattern(dh) + add_sparsity_entries!(dsp, dh, ch; kwargs...) + A = allocate_matrix(SparseMatrixCSC{Float32, Int32}, dsp) + @test A isa SparseMatrixCSC{Float32, Int32} + A + end, + let + dsp = SparsityPattern(ndofs(dh), ndofs(dh)) + allocate_matrix(add_sparsity_entries!(dsp, dh, ch; kwargs...)) + end, + ) + +end + +@testset "Sparsity pattern generics" begin + + # Test setup + grid = generate_grid(Hexahedron, (5, 5, 5)) + dh = DofHandler(grid) + add!(dh, :u, Lagrange{RefHexahedron, 2}()^3) + add!(dh, :p, Lagrange{RefHexahedron, 1}()) + close!(dh) + ch = ConstraintHandler(dh) + add!(ch, Dirichlet(:p, union(getfacetset.(Ref(grid), ("left", "right", "top", "bottom", "front", "back"),)...), x -> 0)) + add!(ch, PeriodicDirichlet(:u, collect_periodic_facets(grid))) + close!(ch) + + function make_patterns(dh) + nd = ndofs(dh) + tp = TestPattern(nd, nd) + sp = SparsityPattern(nd, nd) + bp = BlockSparsityPattern([nd ÷ 2, nd - nd ÷ 2]) + return tp, sp, bp + end + + # DofHandler + ps = make_patterns(dh) + for p in ps + add_sparsity_entries!(p, dh) + end + compare_patterns(ps...) + + # DofHandler + ConstraintHandler + ps = make_patterns(dh) + for p in ps + add_sparsity_entries!(p, dh, ch) + end + compare_patterns(ps...) + + # DofHandler + ConstraintHandler later + ps = make_patterns(dh) + for p in ps + add_sparsity_entries!(p, dh) + add_constraint_entries!(p, ch) + end + compare_patterns(ps...) + + # Individual pieces + ps = make_patterns(dh) + for p in ps + add_cell_entries!(p, dh) + add_constraint_entries!(p, ch) + end + compare_patterns(ps...) + + # Ignore constrained dofs + ps = make_patterns(dh) + for p in ps + add_sparsity_entries!(p, dh, ch; keep_constrained=false) + # Test that prescribed dofs only have diagonal entry + for row in ch.prescribed_dofs + r = Ferrite.eachrow(p, row) + col, state = iterate(r) + @test col == row + @test iterate(r, state) === nothing + end + end + compare_patterns(ps...) + + # Coupling + ps = make_patterns(dh) + for p in ps + add_sparsity_entries!(p, dh, ch; coupling = [true true; false true]) + end + compare_patterns(ps...) + ps = make_patterns(dh) + for p in ps + coupling = ones(Bool, 4, 4) + coupling[4, 1:3] .= false + add_sparsity_entries!(p, dh, ch; coupling = coupling) + end + compare_patterns(ps...) + ps = make_patterns(dh) + for p in ps + coupling = ones(Bool, ndofs_per_cell(dh), ndofs_per_cell(dh)) + coupling[1:2:(ndofs_per_cell(dh) ÷ 2), :] .= false + add_sparsity_entries!(p, dh, ch; coupling = coupling) + end + compare_patterns(ps...) + + # Error paths + dh_open = DofHandler(grid) + for p in (SparsityPattern(2, 2), TestPattern(2, 2), BlockSparsityPattern([1, 1])) + @test_throws ErrorException("the DofHandler must be closed") add_sparsity_entries!(p, dh_open) + end + for p in (SparsityPattern(ndofs(dh), 2), TestPattern(ndofs(dh), 2), BlockSparsityPattern([2, 2])) + @test_throws ErrorException add_sparsity_entries!(p, dh) + end + for p in (SparsityPattern(2, ndofs(dh)), TestPattern(2, ndofs(dh))) + @test_throws ErrorException add_sparsity_entries!(p, dh) + end + patterns = ( + SparsityPattern(ndofs(dh), ndofs(dh)), + TestPattern(ndofs(dh), ndofs(dh)), + BlockSparsityPattern([ndofs(dh) ÷ 2, ndofs(dh) - ndofs(dh) ÷ 2]), + ) + for p in patterns + @test_throws ErrorException add_sparsity_entries!(p, dh; keep_constrained=false) + end + ch_open = ConstraintHandler(dh) + for p in patterns + @test_throws ErrorException add_sparsity_entries!(p, dh, ch_open; keep_constrained=false) + end + ch_bad = ConstraintHandler(close!(DofHandler(grid))) + for p in patterns + @test_throws ErrorException add_sparsity_entries!(p, dh, ch_bad; keep_constrained=false) + end +end diff --git a/test/test_utils.jl b/test/test_utils.jl index 140954df8b..22f88005d0 100644 --- a/test/test_utils.jl +++ b/test/test_utils.jl @@ -1,5 +1,7 @@ # Some utility functions for testing Ferrite.jl +using Ferrite: reference_shape_value + ##################################### # Volume for the reference elements # ##################################### @@ -12,12 +14,12 @@ reference_face_area(fs::VectorizedInterpolation, f::Int) = reference_face_area(f reference_face_area(fs::Interpolation{Ferrite.RefHypercube{dim}}, face::Int) where {dim} = 2^(dim-1) reference_face_area(fs::Interpolation{RefTriangle}, face::Int) = face == 1 ? sqrt(2) : 1.0 reference_face_area(fs::Interpolation{RefTetrahedron}, face::Int) = face == 3 ? sqrt(2 * 1.5) / 2.0 : 0.5 -function reference_face_area(fs::Interpolation{RefPrism}, face::Int) +function reference_face_area(fs::Interpolation{RefPrism}, face::Int) face == 4 && return √2 face ∈ [1,5] && return 0.5 face ∈ [2,3] && return 1.0 end -function reference_face_area(fs::Interpolation{RefPyramid}, face::Int) +function reference_face_area(fs::Interpolation{RefPyramid}, face::Int) face == 1 && return 1.0 face ∈ [2,3] && return 0.5 face ∈ [4,5] && return sqrt(2)/2 @@ -183,7 +185,7 @@ function calculate_volume(::Lagrange{RefHexahedron, 1}, x::Vector{Vec{3, T}}) wh end function calculate_volume(::Lagrange{RefPrism, order}, x::Vector{Vec{3, T}}) where {T, order} - vol = norm((x[4] - x[1]) ⋅ ((x[2] - x[1]) × (x[3] - x[1]))) / 2.0 + vol = norm((x[4] - x[1]) ⋅ ((x[2] - x[1]) × (x[3] - x[1]))) / 2.0 return vol end @@ -202,29 +204,29 @@ function calculate_volume(::Serendipity{RefQuadrilateral, 2}, x::Vector{Vec{2, T return vol end -function calculate_face_area(ip::Union{Lagrange{RefLine}, DiscontinuousLagrange{RefLine}}, x::Vector{<:Vec}, faceindex::Int) +function calculate_facet_area(ip::Union{Lagrange{RefLine}, DiscontinuousLagrange{RefLine}}, x::Vector{<:Vec}, faceindex::Int) return one(eltype(eltype(x))) end -function calculate_face_area(ip::Union{Lagrange{RefQuadrilateral, order}, DiscontinuousLagrange{RefQuadrilateral, order}}, x::Vector{<:Vec}, faceindex::Int) where order +function calculate_facet_area(ip::Union{Lagrange{RefQuadrilateral, order}, DiscontinuousLagrange{RefQuadrilateral, order}}, x::Vector{<:Vec}, faceindex::Int) where order return calculate_volume(Lagrange{RefLine, order}(), x) end -function calculate_face_area(ip::Union{Lagrange{RefTriangle, order}, DiscontinuousLagrange{RefTriangle, order}}, x::Vector{<:Vec}, faceindex::Int) where order +function calculate_facet_area(ip::Union{Lagrange{RefTriangle, order}, DiscontinuousLagrange{RefTriangle, order}}, x::Vector{<:Vec}, faceindex::Int) where order return calculate_volume(Lagrange{RefLine, order}(), x) end -function calculate_face_area(ip::Union{Lagrange{RefHexahedron, order}, DiscontinuousLagrange{RefHexahedron, order}}, x::Vector{<:Vec}, faceindex::Int) where order +function calculate_facet_area(ip::Union{Lagrange{RefHexahedron, order}, DiscontinuousLagrange{RefHexahedron, order}}, x::Vector{<:Vec}, faceindex::Int) where order return calculate_volume(Lagrange{RefQuadrilateral, order}(), x) end -function calculate_face_area(ip::Serendipity{RefQuadrilateral, order}, x::Vector{<:Vec}, faceindex::Int) where order +function calculate_facet_area(ip::Serendipity{RefQuadrilateral, order}, x::Vector{<:Vec}, faceindex::Int) where order return calculate_volume(Lagrange{RefLine, order}(), x) end -function calculate_face_area(p::Union{Lagrange{RefTetrahedron, order}, DiscontinuousLagrange{RefTetrahedron, order}}, x::Vector{<:Vec}, faceindex::Int) where order +function calculate_facet_area(p::Union{Lagrange{RefTetrahedron, order}, DiscontinuousLagrange{RefTetrahedron, order}}, x::Vector{<:Vec}, faceindex::Int) where order return calculate_volume(Lagrange{RefTriangle, order}(), x) end -function calculate_face_area(p::Union{Lagrange{RefPrism, order}, DiscontinuousLagrange{RefPrism, order}}, x::Vector{<:Vec}, faceindex::Int) where order +function calculate_facet_area(p::Union{Lagrange{RefPrism, order}, DiscontinuousLagrange{RefPrism, order}}, x::Vector{<:Vec}, faceindex::Int) where order faceindex ∈ [1,5] && return calculate_volume(Lagrange{RefTriangle, order}(), x) return calculate_volume(Lagrange{RefQuadrilateral, order}(), x) end -function calculate_face_area(p::Union{Lagrange{RefPyramid, order}, DiscontinuousLagrange{RefPyramid, order}}, x::Vector{<:Vec}, faceindex::Int) where order +function calculate_facet_area(p::Union{Lagrange{RefPyramid, order}, DiscontinuousLagrange{RefPyramid, order}}, x::Vector{<:Vec}, faceindex::Int) where order faceindex != 1 && return calculate_volume(Lagrange{RefTriangle, order}(), x) return calculate_volume(Lagrange{RefQuadrilateral, order}(), x) end @@ -251,30 +253,8 @@ coords_on_faces(x, ::Serendipity{RefHexahedron, 2}) = check_equal_or_nan(a::Any, b::Any) = a==b || (isnan(a) && isnan(b)) check_equal_or_nan(a::Union{Tensor, Array}, b::Union{Tensor, Array}) = all(check_equal_or_nan.(a, b)) -# Hypercube is simply ⨂ᵈⁱᵐ Line :) -sample_random_point(::Type{Ferrite.RefHypercube{ref_dim}}) where {ref_dim} = Vec{ref_dim}(2.0 .* rand(Vec{ref_dim}) .- 1.0) -# Dirichlet type sampling -function sample_random_point(::Type{Ferrite.RefSimplex{ref_dim}}) where {ref_dim} - ξ = rand(ref_dim+1) - ξₜ = -log.(ξ) - return Vec{ref_dim}(ntuple(i->ξₜ[i], ref_dim) ./ sum(ξₜ)) -end -# Wedge = Triangle ⊗ Line -function sample_random_point(::Type{RefPrism}) - (ξ₁, ξ₂) = sample_random_point(RefTriangle) - ξ₃ = rand(Float64) - return Vec{3}((ξ₁, ξ₂, ξ₃)) -end -# TODO what to do here? The samplig is not uniform... -function sample_random_point(::Type{RefPyramid}) - ξ₃ = (1-1e-3)*rand(Float64) # Derivative is discontinuous at the top - # If we fix a z coordinate we get a Quad with extends (1-ξ₃) - (ξ₁, ξ₂) = (1.0 - ξ₃) .* rand(Vec{2}) - return Vec{3}((ξ₁, ξ₂, ξ₃)) -end - ###################################################### -# Helpers for testing face_to_element_transformation # +# Helpers for testing facet_to_element_transformation # ###################################################### getfacerefshape(::Union{Quadrilateral, Triangle}, ::Int) = RefLine getfacerefshape(::Hexahedron, ::Int) = RefQuadrilateral @@ -282,6 +262,18 @@ getfacerefshape(::Tetrahedron, ::Int) = RefTriangle getfacerefshape(::Pyramid, face::Int) = face == 1 ? RefQuadrilateral : RefTriangle getfacerefshape(::Wedge, face::Int) = face ∈ (1,5) ? RefTriangle : RefQuadrilateral +function perturbate_standard_grid!(grid::Ferrite.AbstractGrid{dim}, strength) where dim + function perturbate(x::Vec{dim}) where dim + for d in 1:dim + if x[d] ≈ 1.0 || x[d] ≈ -1.0 + return x + end + end + return x + Vec{dim}(0.5*strength .* (2 .* rand(Vec{dim}) .- 1.0)) + end + transform_coordinates!(grid, perturbate) +end + ###################################################### # Dummy RefShape to test get_transformation_matrix # ###################################################### @@ -290,7 +282,58 @@ module DummyRefShapes struct RefDodecahedron <: Ferrite.AbstractRefShape{3} end function Ferrite.reference_faces(::Type{RefDodecahedron}) return ( - (1, 5, 4, 3, 2), + (1, 5, 4, 3, 2), ) end end + +# Hypercube is simply ⨂ᵈⁱᵐ Line :) +sample_random_point(::Type{Ferrite.RefHypercube{ref_dim}}) where {ref_dim} = Vec{ref_dim}(ntuple(_ -> 2.0*rand()-1.0, ref_dim)) + +# Dirichlet type sampling +# +# The idea behind this sampling is that the d-Simplex (i.e. a generalized triangle in d dimensions) +# is just a surface in d+1 dimensions, that can be characterized by two constraints: +# 1. All coordinates are between 0 and 1 +# 2. The sum of all coordinates is exactly 1 +# This way we can just sample from the d+1 dimensional hypercube, transform the hypercube +# logarithmically to get a "normal distribution" over our simplex and enforce that the coordinates +# sum to 1. By dropping the last coordinate in this sample we finally obtain d numbers which lies in +# the d-simplex. +# +# A nice geometric sketch of this process is given in this stackexchange post: https://stats.stackexchange.com/a/296779 +function sample_random_point(::Type{Ferrite.RefSimplex{ref_dim}}) where {ref_dim} # Note that "ref_dim = d" in the text above + ξₜ = ntuple(_ -> -log(rand()), ref_dim+1) + return Vec{ref_dim}(ntuple(i->ξₜ[i], ref_dim) ./ sum(ξₜ)) +end + +# Wedge = Triangle ⊗ Line +function sample_random_point(::Type{RefPrism}) + (ξ₁, ξ₂) = sample_random_point(RefTriangle) + ξ₃ = rand(Float64) + return Vec{3}((ξ₁, ξ₂, ξ₃)) +end + +# TODO what to do here? The samplig is not uniform... +function sample_random_point(::Type{RefPyramid}) + ξ₃ = (1-1e-3)*rand(Float64) # Derivative is discontinuous at the top + # If we fix a z coordinate we get a Quad with extends (1-ξ₃) + (ξ₁, ξ₂) = (1.0 - ξ₃) .* Vec{2}(ntuple(_ -> rand(), 2)) + return Vec{3}((ξ₁, ξ₂, ξ₃)) +end + +############################################################ +# Inverse parametric mapping ξ = ϕ(x) for testing hessians # +############################################################ +function function_value_from_physical_coord(interpolation::Interpolation, cell_coordinates, X::Vec{dim,T}, ue) where {dim,T} + n_basefuncs = getnbasefunctions(interpolation) + scalar_ip = interpolation isa Ferrite.ScalarInterpolation ? interpolation : interpolation.ip + @assert length(ue) == n_basefuncs + _, ξ = Ferrite.find_local_coordinate(scalar_ip, cell_coordinates, X, Ferrite.NewtonLineSearchPointFinder(residual_tolerance=1e-16)) + u = zero(reference_shape_value(interpolation, ξ, 1)) + for j in 1:n_basefuncs + N = reference_shape_value(interpolation, ξ, j) + u += N * ue[j] + end + return u +end diff --git a/test/test_vtk_export.jl b/test/test_vtk_export.jl new file mode 100644 index 0000000000..4d6a8f4612 --- /dev/null +++ b/test/test_vtk_export.jl @@ -0,0 +1,62 @@ +@testset "VTKGridFile" begin #TODO: Move all vtk tests here + @testset "show(::VTKGridFile)" begin + mktempdir() do tmp + grid = generate_grid(Quadrilateral, (2,2)) + vtk = VTKGridFile(joinpath(tmp, "showfile"), grid) + showstring_open = sprint(show, MIME"text/plain"(), vtk) + @test startswith(showstring_open, "VTKGridFile for the open file") + @test contains(showstring_open, "showfile.vtu") + close(vtk) + showstring_closed = sprint(show, MIME"text/plain"(), vtk) + @test startswith(showstring_closed, "VTKGridFile for the closed file") + @test contains(showstring_closed, "showfile.vtu") + end + end + @testset "cellcolors" begin + mktempdir() do tmp + grid = generate_grid(Quadrilateral, (4, 4)) + colors = create_coloring(grid) + fname = joinpath(tmp, "colors") + VTKGridFile(fname, grid) do vtk + Ferrite.write_cell_colors(vtk, grid, colors) + end + @test bytes2hex(open(SHA.sha1, fname*".vtu")) == "b804d0b064121b672d8e35bcff8446eda361cac3" + end + end + @testset "constraints" begin + mktempdir() do tmp + grid = generate_grid(Tetrahedron, (4, 4, 4)) + dh = DofHandler(grid) + add!(dh, :u, Lagrange{RefTetrahedron, 1}()) + close!(dh) + ch = ConstraintHandler(dh) + add!(ch, Dirichlet(:u, getfacetset(grid, "left"), x -> 0.0)) + addnodeset!(grid, "nodeset", x -> x[1] ≈ 1.0) + add!(ch, Dirichlet(:u, getnodeset(grid, "nodeset"), x -> 0.0)) + close!(ch) + fname = joinpath(tmp, "constraints") + VTKGridFile(fname, grid) do vtk + Ferrite.write_constraints(vtk, ch) + end + @test bytes2hex(open(SHA.sha1, fname*".vtu")) == "31b506bd9729b11992f8bcb79a2191eb65d223bf" + end + end + @testset "write_cellset" begin + # More tests in `test_grid_dofhandler_vtk.jl`, this just validates writing all sets in the grid + # which is not tested there, see https://github.com/Ferrite-FEM/Ferrite.jl/pull/948 + mktempdir() do tmp + grid = generate_grid(Quadrilateral, (2,2)) + addcellset!(grid, "set1", 1:2) + addcellset!(grid, "set2", 1:4) + manual = joinpath(tmp, "manual") + auto = joinpath(tmp, "auto") + VTKGridFile(manual, grid) do vtk + Ferrite.write_cellset(vtk, grid, keys(Ferrite.getcellsets(grid))) + end + VTKGridFile(auto, grid) do vtk + Ferrite.write_cellset(vtk, grid) + end + @test bytes2hex(open(SHA.sha1, manual*".vtu")) == bytes2hex(open(SHA.sha1, auto*".vtu")) + end + end +end