using Gridap, Gridap.MultiField, GridapDistributed, GridapPETSc, GridapSolvers,
PartitionedArrays, LSTO_Distributed, SparseMatricesCSR
using GridapSolvers: NewtonSolver
function main(mesh_partition,distribute,el_size,δₓ)
ranks = distribute(LinearIndices((prod(mesh_partition),)))
## Parameters
order = 1
xmax,ymax,zmax=(2.0,1.0,1.0)
dom = (0,xmax,0,ymax,0,zmax)
γ = 0.1
γ_reinit = 0.5
max_steps = floor(Int,minimum(el_size)/3)
tol = 1/(2order^2)*prod(inv,minimum(el_size))
η_coeff = 2
path = dirname(dirname(@__DIR__))*"/results/testing_hyper_elast"
i_am_main(ranks) && ~isdir(path) && mkdir(path)
## FE Setup
model = CartesianDiscreteModel(ranks,mesh_partition,dom,el_size)
Δ = get_Δ(model)
f_Γ_D(x) = (x[1] ≈ 0.0)
f_Γ_N(x) = (x[1] ≈ xmax)
update_labels!(1,model,f_Γ_D,"Gamma_D")
update_labels!(2,model,f_Γ_N,"Gamma_N")
## Triangulations and measures
Ω = Triangulation(model)
dΩ = Measure(Ω,2order)
## Spaces
reffe = ReferenceFE(lagrangian,VectorValue{3,Float64},order)
reffe_scalar = ReferenceFE(lagrangian,Float64,order)
V = TestFESpace(model,reffe;dirichlet_tags=["Gamma_D","Gamma_N"])
U = TrialFESpace(V,[VectorValue(0.0,0.0,0.0),VectorValue(δₓ,0.0,0.0)])
V_φ = TestFESpace(model,reffe_scalar)
V_reg = TestFESpace(model,reffe_scalar;dirichlet_tags=["Gamma_N"])
U_reg = TrialFESpace(V_reg,0)
## Create FE functions
# φh = interpolate(gen_lsf(4,0.2),V_φ)
φh = interpolate(x->-1,V_φ)
## Interpolation and weak form
interp = SmoothErsatzMaterialInterpolation(η = η_coeff*maximum(Δ))
I,H,DH,ρ = interp.I,interp.H,interp.DH,interp.ρ
## Material properties and loading
_E = 1000
ν = 0.3
μ, λ = _E/(2*(1 + ν)), _E*ν/((1 + ν)*(1 - 2*ν))
## Saint Venant–Kirchhoff law
F(∇u) = one(∇u) + ∇u'
E(F) = 0.5*( F' ⋅ F - one(F) )
Σ(∇u) = λ*tr(E(F(∇u)))*one(∇u)+2*μ*E(F(∇u))
T(∇u) = F(∇u) ⋅ Σ(∇u)
res(u,v,φ,dΩ) = ∫((I ∘ φ)*((T ∘ ∇(u)) ⊙ ∇(v)))dΩ
## Finite difference solver and level set function
stencil = AdvectionStencil(FirstOrderStencil(3,Float64),model,V_φ,tol,max_steps)
reinit!(stencil,φh,γ_reinit)
## Setup solver and FE operators
Tm = SparseMatrixCSR{0,PetscScalar,PetscInt}
Tv = Vector{PetscScalar}
lin_solver = ElasticitySolver(V)
nl_solver = NewtonSolver(lin_solver;maxiter=50,rtol=10^-8,verbose=i_am_main(ranks))
state_map = NonlinearFEStateMap(
res,U,V,V_φ,U_reg,φh,dΩ;
assem_U = SparseMatrixAssembler(Tm,Tv,U,V),
assem_adjoint = SparseMatrixAssembler(Tm,Tv,V,U),
assem_deriv = SparseMatrixAssembler(Tm,Tv,U_reg,U_reg),
nls = nl_solver, adjoint_ls = lin_solver
)
## Optimiser
u = LSTO_Distributed.forward_solve(state_map,φh)
uh = FEFunction(U,u)
write_vtk(Ω,path*"/struc_$δₓ",0,["phi"=>φh,"H(phi)"=>(H ∘ φh),"|nabla(phi))|"=>(norm ∘ ∇(φh)),"uh"=>uh];iter_mod=1)
end
with_mpi() do distribute
mesh_partition = (3,2,2)
el_size = (50,25,25)
hilb_solver_options = "-pc_type gamg -ksp_type cg -ksp_error_if_not_converged true
-ksp_converged_reason -ksp_rtol 1.0e-12 -mat_block_size 3
-mg_levels_ksp_type chebyshev -mg_levels_esteig_ksp_type cg -mg_coarse_sub_pc_type cholesky"
GridapPETSc.with(args=split(hilb_solver_options)) do
main(mesh_partition,distribute,el_size,0.02)
main(mesh_partition,distribute,el_size,0.05)
main(mesh_partition,distribute,el_size,0.1)
end
end
KSP Object: 12 MPI processes
type: cg
maximum iterations=200, initial guess is zero
tolerances: relative=1e-12, absolute=1e-50, divergence=10000.
left preconditioning
using DEFAULT norm type for convergence test
PC Object: 12 MPI processes
type: gamg
PC has not been set up so information may be incomplete
type is MULTIPLICATIVE, levels=0 cycles=unknown
Cycles per PCApply=0
Using externally compute Galerkin coarse grid matrices
GAMG specific options
Threshold for dropping small values in graph on each level =
Threshold scaling factor for each level not specified = 1.
AGG specific options
Number of levels to square graph 0
Number smoothing steps 1
Complexity: grid = 0. operator = 0.
linear system matrix = precond matrix:
Mat Object: 12 MPI processes
type: mpiaij
rows=99372, cols=99372
total: nonzeros=7537680, allocated nonzeros=7537680
total number of mallocs used during MatSetValues calls=0
has attached near null space
using I-node (on process 0) routines: found 2160 nodes, limit used is 5
KSP Object: 12 MPI processes
type: cg
maximum iterations=200, initial guess is zero
tolerances: relative=1e-12, absolute=1e-50, divergence=10000.
left preconditioning
using DEFAULT norm type for convergence test
PC Object: 12 MPI processes
type: gamg
PC has not been set up so information may be incomplete
type is MULTIPLICATIVE, levels=0 cycles=unknown
Cycles per PCApply=0
Using externally compute Galerkin coarse grid matrices
GAMG specific options
Threshold for dropping small values in graph on each level =
Threshold scaling factor for each level not specified = 1.
AGG specific options
Number of levels to square graph 0
Number smoothing steps 1
Complexity: grid = 0. operator = 0.
linear system matrix = precond matrix:
Mat Object: 12 MPI processes
type: mpiaij
rows=99372, cols=99372
total: nonzeros=7537680, allocated nonzeros=7537680
total number of mallocs used during MatSetValues calls=0
has attached near null space
using I-node (on process 0) routines: found 2160 nodes, limit used is 5
--------------- Starting Newton-Raphson solver --------
> Iteration 0 - Residuals: 1.04e+03, 1.00e+00
Linear solve did not converge due to DIVERGED_INDEFINITE_MAT iterations 3
> Iteration 1 - Residuals: 3.28e+02, 3.15e-01
Linear solve did not converge due to DIVERGED_INDEFINITE_MAT iterations 6
> Iteration 2 - Residuals: 2.05e+05, 1.97e+02
Linear solve did not converge due to DIVERGED_INDEFINITE_PC iterations 3
> Iteration 3 - Residuals: 2.13e+05, 2.05e+02
Linear solve did not converge due to DIVERGED_INDEFINITE_PC iterations 2
> Iteration 4 - Residuals: 2.17e+05, 2.09e+02
Linear solve did not converge due to DIVERGED_INDEFINITE_PC iterations 2
> Iteration 5 - Residuals: 1.70e+06, 1.64e+03
... (and so on until error)
This may be because of asymmetry or max/min eigenvalues of resulting Jacobian? I messed around with PETSc settings etc and wasn't able to improve the situation.