diff --git a/CHANGELOG.md b/CHANGELOG.md index 7b18674..a150f63 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,12 +1,14 @@ # Changelog -## [2.0.0] - Planned +## [2.0.0] - 2026-01-06 -### Breaking -- remove solver + precon API which is not based on precs or directly overloading `\`. +- Remove solver + precon API which is not based on precs or directly overloading `\`. Fully rely on LinearSolve (besides `\`) -- Move AMGBuilder, ILUZeroBuilder etc. to the corresponding packages (depending on the PRs) -- remove "old" SparseMatrixLNK (need to benchmark before) +- Move AMGBuilder etc to corresponding packages (depending on the PRs) +- Keep ILUZeroPreconBuilder, JacobiPreconBuilder +- ExtendableSparseMatrix is now GenericExtendableSparseMatrix{SparseMatrixLNK} +- Changes should be non-breaking if only ExtendableSparseMatrix was used, and no solvers or preconditioners + For those, see LinearSolve.jl. ## [1.7.0] - 2025-02-06 - Bump Pardiso to 1.0 and LinearSolve to 3.0 diff --git a/Project.toml b/Project.toml index 7003cc9..dbf3286 100644 --- a/Project.toml +++ b/Project.toml @@ -1,33 +1,55 @@ name = "ExtendableSparse" uuid = "95c220a8-a1cf-11e9-0c77-dbfce5f500b3" +version = "2.0.0" authors = ["Juergen Fuhrmann ", "Daniel Runge"] -version = "1.7.1" [deps] DocStringExtensions = "ffbed154-4ef7-542d-bbb7-c09d3a79fcae" ILUZero = "88f59080-6952-5380-9ea5-54057fb9a43f" LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" Printf = "de0858da-6303-5e67-8744-51eddeeeb8d7" +SciMLPublic = "431bcebd-1456-4ced-9d72-93c2757fff0b" SparseArrays = "2f01184e-e22b-5df5-ae63-d93ebab69eaf" Sparspak = "e56a9233-b9d6-4f03-8d0f-1825330902ac" StaticArrays = "90137ffa-7385-5640-81b9-e52037218182" -SuiteSparse = "4607b0f0-06f3-5cda-b6b1-a6196a1729e9" -Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" [weakdeps] -AMGCLWrap = "4f76b812-4ba5-496d-b042-d70715554288" -AlgebraicMultigrid = "2169fc97-5a83-5252-b627-83903c6c433c" IncompleteLU = "40713840-3770-5561-ab4c-a76e7d0d7895" LinearSolve = "7ed4a6bd-45f5-4d41-b270-4a48e9bafcae" -Pardiso = "46dd5b70-b6fb-5a00-ae2d-e8fea33afaf2" [extensions] -ExtendableSparseAMGCLWrapExt = "AMGCLWrap" -ExtendableSparseAlgebraicMultigridExt = "AlgebraicMultigrid" ExtendableSparseIncompleteLUExt = "IncompleteLU" -ExtendableSparsePardisoExt = "Pardiso" ExtendableSparseLinearSolveExt = "LinearSolve" +[compat] +AMGCLWrap = "2" +AlgebraicMultigrid = "0.4, 0.5, 0.6, 1" +Aqua = "0.8" +BenchmarkTools = "1" +ChunkSplitters = "2, 3" +DocStringExtensions = "0.8, 0.9" +ExplicitImports = "1" +ExtendableGrids = "1.9" +ForwardDiff = "0.10, 1" +ILUZero = "0.2" +IncompleteLU = "^0.2.1" +InteractiveUtils = "1.11.0" +IterativeSolvers = "0.9" +LinearAlgebra = "1.9" +LinearSolve = "2.36.0, 3.7.1" +Metis = "1" +MultiFloats = "1, 2" +OhMyThreads = "0.6, 0.7, 0.8" +Printf = "1.9" +Random = "1.9" +RecursiveFactorization = "0.2" +SciMLPublic = "1.0.1" +SparseArrays = "1.9" +Sparspak = "0.3.6" +StaticArrays = "1" +Test = "1.9" +julia = "1.9" + [extras] AMGCLWrap = "4f76b812-4ba5-496d-b042-d70715554288" AlgebraicMultigrid = "2169fc97-5a83-5252-b627-83903c6c433c" @@ -38,48 +60,15 @@ ExplicitImports = "7d51a73a-1435-4ff3-83d9-f097790105c7" ExtendableGrids = "cfc395e8-590f-11e8-1f13-43a2532b2fa8" ForwardDiff = "f6369f11-7733-5829-9624-2563aa707210" IncompleteLU = "40713840-3770-5561-ab4c-a76e7d0d7895" +InteractiveUtils = "b77e0a4c-d291-57a0-90e8-8db25a27a240" IterativeSolvers = "42fd0dbc-a981-5370-80f2-aaf504508153" LinearSolve = "7ed4a6bd-45f5-4d41-b270-4a48e9bafcae" Metis = "2679e427-3c69-5b7f-982b-ece356f1e94b" MultiFloats = "bdf0d083-296b-4888-a5b6-7498122e68a5" OhMyThreads = "67456a42-1dca-4109-a031-0a68de7e3ad5" -Pardiso = "46dd5b70-b6fb-5a00-ae2d-e8fea33afaf2" Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" RecursiveFactorization = "f2c3362d-daeb-58d1-803e-2bc74f2840b4" Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" -[compat] -AMGCLWrap = "2" -AlgebraicMultigrid = "0.4, 0.5, 0.6, 1" -DocStringExtensions = "0.8, 0.9" -ExtendableGrids = "1.9" -ILUZero = "0.2" -IncompleteLU = "^0.2.1" -IterativeSolvers = "0.9" -LinearSolve = "2.36.0, 3.7.1" -Pardiso = "0.5.1, 1" -Sparspak = "0.3.6" -StaticArrays = "1.5.24" -julia = "1.9" - [targets] -test = [ - "AMGCLWrap", - "AlgebraicMultigrid", - "Aqua", - "BenchmarkTools", - "ChunkSplitters", - "ExplicitImports", - "ExtendableGrids", - "ForwardDiff", - "IncompleteLU", - "IterativeSolvers", - "LinearSolve", - "Metis", - "MultiFloats", - "OhMyThreads", - "Pardiso", - "Random", - "RecursiveFactorization", - "Test", -] +test = ["AMGCLWrap", "AlgebraicMultigrid", "Aqua", "BenchmarkTools", "ChunkSplitters", "ExplicitImports", "ExtendableGrids", "ForwardDiff", "IncompleteLU", "IterativeSolvers", "LinearSolve", "Metis", "MultiFloats", "OhMyThreads", "Random", "RecursiveFactorization", "Test"] diff --git a/README.md b/README.md index d50e661..b7cb6a7 100644 --- a/README.md +++ b/README.md @@ -3,6 +3,7 @@ [![Build status](https://github.com/WIAS-PDELib/ExtendableSparse.jl/actions/workflows/ci.yml/badge.svg?branch=master)](https://github.com/WIAS-PDELib/ExtendableSparse.jl/actions/workflows/ci.yml?query=branch%3Amaster) [![](https://img.shields.io/badge/docs-stable-blue.svg)](https://WIAS-PDELib.github.io/ExtendableSparse.jl/stable) [![](https://img.shields.io/badge/docs-dev-blue.svg)](https://WIAS-PDELib.github.io/ExtendableSparse.jl/dev) +[![Aqua QA](https://raw.githubusercontent.com/JuliaTesting/Aqua.jl/master/badge.svg)](https://github.com/JuliaTesting/Aqua.jl) [![DOI](https://zenodo.org/badge/DOI/10.5281/zenodo.3530554.svg)](https://doi.org/10.5281/zenodo.3530554) [![code style: runic](https://img.shields.io/badge/code_style-%E1%9A%B1%E1%9A%A2%E1%9A%BE%E1%9B%81%E1%9A%B2-black)](https://github.com/fredrikekre/Runic.jl) @@ -42,7 +43,7 @@ With the help of [Sparspak.jl](https://github.com/PetrKryslUCSD/Sparspak.jl), th `sparse(A)` creates a standard `SparseMatrixCSC` from a filled `ExtendableSparseMatrix` which can be used e.g. to create preconditioners. So one can instead perform e.g. ``` -LinearSolve.solve(p, KrylovJL_CG(); Pl = ILUZero.ilu0(sparse(A))) +LinearSolve.solve(p, KrylovJL_CG(; precs=ILUZeroPreconBuilder())) ``` ## Rationale @@ -94,27 +95,6 @@ triggering two index searches, one for `getindex!` and another one for `setindex See [Julia issue #15630](https://github.com/JuliaLang/julia/issues/15630) for a discussion on this. -## Factorizations and Preconditioners - -The package provides a common API for factorizations and preconditioners supporting -series of solutions of similar problem as they occur during nonlinear and transient solves. -For details, see the [corresponding documentation](https://WIAS-PDELib.github.io/ExtendableSparse.jl/stable/iter/). - -With the advent of LinearSolve.jl, this functionality probably will be reduced to some core cases. - -### Interfaces to other packages - -The package directly provides interfaces to other sparse matrix solvers and preconditioners. Dependencies on these -packages are handled via [Requires.jl](https://github.com/JuliaPackaging/Requires.jl). -Currently, support includes: - - - [Pardiso.jl](https://github.com/JuliaSparse/Pardiso.jl) (both ["project Pardiso"](https://pardiso-project.org) - and [MKL Pardiso](https://software.intel.com/content/www/us/en/develop/documentation/onemkl-developer-reference-fortran/top/sparse-solver-routines/onemkl-pardiso-parallel-direct-sparse-solver-interface.html)) - - [IncompleteLU.jl](https://github.com/haampie/IncompleteLU.jl) - - [AlgebraicMultigrid.jl](https://github.com/JuliaLinearAlgebra/AlgebraicMultigrid.jl) (Ruge-Stüben AMG) - -For a similar approach, see [Preconditioners.jl](https://github.com/mohamed82008/Preconditioners.jl) - ## Alternatives You may also evaluate alternatives to this package: diff --git a/docs/Project.toml b/docs/Project.toml index 5a3ae26..88abc0b 100644 --- a/docs/Project.toml +++ b/docs/Project.toml @@ -7,12 +7,9 @@ DocumenterTools = "35a29f4d-8980-5a13-9543-d66fff28ecb8" ExtendableSparse = "95c220a8-a1cf-11e9-0c77-dbfce5f500b3" ILUZero = "88f59080-6952-5380-9ea5-54057fb9a43f" IncompleteLU = "40713840-3770-5561-ab4c-a76e7d0d7895" +InteractiveUtils = "b77e0a4c-d291-57a0-90e8-8db25a27a240" IterativeSolvers = "42fd0dbc-a981-5370-80f2-aaf504508153" LinearSolve = "7ed4a6bd-45f5-4d41-b270-4a48e9bafcae" MultiFloats = "bdf0d083-296b-4888-a5b6-7498122e68a5" +SparseArrays = "2f01184e-e22b-5df5-ae63-d93ebab69eaf" Sparspak = "e56a9233-b9d6-4f03-8d0f-1825330902ac" - -[compat] -Documenter = "1.0" -IterativeSolvers = "0.9" -LinearSolve = "2.36.0" diff --git a/docs/make.jl b/docs/make.jl index 8dce434..3fc273f 100644 --- a/docs/make.jl +++ b/docs/make.jl @@ -1,4 +1,4 @@ -using Documenter, ExtendableSparse, AlgebraicMultigrid, IncompleteLU, Sparspak, LinearAlgebra +using Documenter, ExtendableSparse, AlgebraicMultigrid, IncompleteLU, Sparspak, LinearAlgebra, SparseArrays, Base, InteractiveUtils function mkdocs() return makedocs(; @@ -10,13 +10,13 @@ function mkdocs() authors = "J. Fuhrmann", repo = "https://github.com/WIAS-PDELib/ExtendableSparse.jl", pages = [ - "Home" => "index.md", + "Home" => "home.md", "example.md", "extsparse.md", + "extensions.md", "linearsolve.md", - "internal.md", - "iter.md", - "changes.md", + "misc.md", + "index.md", ] ) end diff --git a/docs/src/extensions.md b/docs/src/extensions.md new file mode 100644 index 0000000..0724ed7 --- /dev/null +++ b/docs/src/extensions.md @@ -0,0 +1,26 @@ +# Matrix extensions + +## AbstractSparseMatrixExtension +```@docs +ExtendableSparse.AbstractSparseMatrixExtension +``` + +## SparseMatrixLNK + +```@autodocs +Modules = [ExtendableSparse] +Pages = ["sparsematrixlnk.jl"] +``` + +## SparseMatrixDILNKC + +```@autodocs +Modules = [ExtendableSparse] +Pages = ["sparsematrixdilnkc.jl"] +``` + +## SparseMatrixDict +```@autodocs +Modules = [ExtendableSparse] +Pages = ["sparsematrixdict.jl"] +``` diff --git a/docs/src/extsparse.md b/docs/src/extsparse.md index e756042..24c7b6f 100644 --- a/docs/src/extsparse.md +++ b/docs/src/extsparse.md @@ -1,20 +1,79 @@ -# Sparse matrix handling +# Extendable matrices +The type hierarchy of extendable matrices in this package is as follows: -## Matrix creation and update API +[`ExtendableSparse.AbstractExtendableSparseMatrixCSC`](@ref) `<: SparseArrays.AbstractSparseMatrixCSC <: SparseArrays.AbstractSparseMatrix <: AbstractMatrix` +The package defines two [subtypes](#Subtypes-of-AbstractExtendableSparseMatrixCSC) of [`ExtendableSparse.AbstractExtendableSparseMatrixCSC`](@ref) which are parametrized by types of [extension matrices](/extensions/#Matrix-extensions): +- [`ExtendableSparse.GenericExtendableSparseMatrixCSC`](@ref) for single threaded assembly +- [`ExtendableSparse.GenericMTExtendableSparseMatrixCSC`](@ref) for multithreaded assembly + +User facing defaults are defined by [type aliases](#Type-aliases): +- `const MTExtendableSparseMatrixCSC = GenericMTExtendableSparseMatrixCSC{SparseMatrixDILNKC}` +- `const STExtendableSparseMatrixCSC = GenericExtendableSparseMatrixCSC{SparseMatrixLNK}` +- `const ExtendableSparseMatrixCSC = STExtendableSparseMatrixCSC` +- `const ExtendableSparseMatrix = ExtendableSparseMatrixCSC` + +## Abstract type +```@docs +ExtendableSparse.AbstractExtendableSparseMatrixCSC +``` +## Subtypes of AbstractExtendableSparseMatrixCSC ```@docs +ExtendableSparse.GenericExtendableSparseMatrixCSC +ExtendableSparse.GenericMTExtendableSparseMatrixCSC +``` + +## Type aliases +```@docs +MTExtendableSparseMatrixCSC +STExtendableSparseMatrixCSC +ExtendableSparseMatrixCSC ExtendableSparseMatrix ``` -```@autodocs -Modules = [ExtendableSparse] -Pages = ["extendable.jl"] + +## Required methods +```@docs +SparseArrays.sparse +ExtendableSparse.rawupdateindex! +ExtendableSparse.flush! +ExtendableSparse.reset! ``` +## AbstractSparseMatrixCSC interface +See [SparseArrways#395](https://github.com/JuliaSparse/SparseArrays.jl/pull/395) for a discussion. + + ```@docs -ExtendableSparse.lu -LinearAlgebra.lu! +SparseArrays.nnz +SparseArrays.nonzeros +SparseArrays.rowvals +SparseArrays.findnz +SparseArrays.dropzeros! +SparseArrays.getcolptr +SparseArrays.SparseMatrixCSC +Base.size +Base.eltype +Base.show +``` + +## Linear Algebra +```@docs +Base.:\ LinearAlgebra.ldiv! +LinearAlgebra.mul! +LinearAlgebra.norm +LinearAlgebra.opnorm +LinearAlgebra.cond +LinearAlgebra.issymmetric +``` + +## Algebraic operations +```@docs +Base.:+ +Base.:- +Base.:* + ``` ## Handling of homogeneous Dirichlet BC @@ -23,10 +82,3 @@ mark_dirichlet eliminate_dirichlet! eliminate_dirichlet ``` - -## Test matrix creation - -```@autodocs -Modules = [ExtendableSparse] -Pages = ["sprand.jl"] -``` diff --git a/docs/src/home.md b/docs/src/home.md new file mode 100644 index 0000000..9632708 --- /dev/null +++ b/docs/src/home.md @@ -0,0 +1,3 @@ +```@docs +ExtendableSparse +``` diff --git a/docs/src/index.md b/docs/src/index.md index 9a475a5..de17f70 100644 --- a/docs/src/index.md +++ b/docs/src/index.md @@ -1,11 +1,3 @@ -````@eval -using Markdown -Markdown.parse(""" -$(read("../../README.md",String)) -""") -```` - - -## Index +# Index ```@index ``` diff --git a/docs/src/internal.md b/docs/src/internal.md deleted file mode 100644 index 477058c..0000000 --- a/docs/src/internal.md +++ /dev/null @@ -1,32 +0,0 @@ -# Internal API - -## Linked List Sparse Matrix format - -```@autodocs -Modules = [ExtendableSparse] -Pages = ["sparsematrixlnk.jl"] -``` - -## Some methods for SparseMatrixCSC - -```@autodocs -Modules = [ExtendableSparse] -Pages = ["sparsematrixcsc.jl"] -``` -## New API -Under development - aimed at multithreading -```@autodocs; canonical = false -Modules = [ExtendableSparse] -Pages = ["abstractsparsematrixextension.jl", - "abstractextendablesparsematrixcsc.jl", - "sparsematrixdilnkc.jl", - "genericextendablesparsematrixcsc.jl", - "genericmtextendablesparsematrixcsc.jl"] -``` - - -## Misc methods - -```@docs -ExtendableSparse.@makefrommatrix :: Tuple{Any} -``` diff --git a/docs/src/iter.md b/docs/src/iter.md deleted file mode 100644 index 3d90dd8..0000000 --- a/docs/src/iter.md +++ /dev/null @@ -1,165 +0,0 @@ -# Factorizations & Preconditioners - -This functionality is deprecated as of version 1.6 an will be removed -with version 2.0. Use the [integration with LinearSolve.jl](/linearsolve/#Integration-with-LinearSolve.jl). - - -## Factorizations - -In this package, preconditioners and LU factorizations are both seen -as complete or approximate _factorizations_. Correspondingly we provide a common API for them. - - -ExtendableSparse.AbstractLUFactorization -```@autodocs -Modules = [ExtendableSparse] -Pages = ["factorizations.jl"] -Order = [:function, :type] -Private = false -``` - -## LU Factorizations - -Handling of the LU factorizations is meant to support -a workflow where sequences of problems are solved based -on the same matrix, where one possibly wants to reuse -existing symbolic factorization data. - -The support comes in two flavors. - - - Using [`factorize!`](@ref) which can work as a drop-in replacement for `lu!`: - -```@example -using ExtendableSparse, LinearAlgebra -A = fdrand(20, 20, 1; matrixtype = ExtendableSparseMatrix) -n = size(A, 1) -b = rand(n) -factorization = SparspakLU() -factorize!(factorization, A) -nm1 = norm(factorization \ b) - -# mock update from Newton etc. -for i = 4:(n - 3) - A[i, i + 3] -= 1.0e-4 -end -factorize!(factorization, A) -nm2 = norm(factorization \ b) -nm1, nm2 -``` - - - Using [`update!`](@ref), where the matrix only needs to be given at construction time: - -```@example -using ExtendableSparse, LinearAlgebra -A = fdrand(20, 20, 1; matrixtype = ExtendableSparseMatrix) -n = size(A, 1) -b = rand(n) -factorization = CholeskyFactorization(A) -nm1 = norm(factorization \ b) - -# mock update from Newton etc. -for i = 4:(n - 3) - A[i, i + 3] -= 1.0e-4 - A[i - 3, i] -= 1.0e-4 -end -update!(factorization) -nm2 = norm(factorization \ b) -nm1, nm2 -``` - -```@autodocs -Modules = [ExtendableSparse] -Pages = ["umfpack_lu.jl", "sparspak.jl"] -``` - -```@docs -ExtendableSparse.AbstractLUFactorization -ExtendableSparse.CholeskyFactorization -Base.:\ -``` - -## Preconditioners - -The API is similar to that for LU factorizations. - -The support comes in two flavors. - - - Using [`factorize!`](@ref): - -```@example -using ExtendableSparse, LinearAlgebra -using IterativeSolvers, IncompleteLU -A = fdrand(20, 20, 1; matrixtype = ExtendableSparseMatrix) -n = size(A, 1) -b = rand(n) -preconditioner = ILUTPreconditioner(; droptol = 1.0e-2) -factorize!(preconditioner, A) - -# mock update from Newton etc. -nm1 = norm(bicgstabl(A, b, 1; Pl = preconditioner)) -for i = 4:(n - 3) - A[i, i + 3] -= 1.0e-4 -end -factorize!(preconditioner, A) -nm2 = norm(bicgstabl(A, b, 1; Pl = preconditioner)) -nm1, nm2 -``` - - - Using [`update!`](@ref): - -```@example -using ExtendableSparse, LinearAlgebra -using IterativeSolvers -A = fdrand(20, 20, 1; matrixtype = ExtendableSparseMatrix) -n = size(A, 1) -b = rand(n) -preconditioner = ILU0Preconditioner(A) -nm1 = norm(cg(A, b; Pl = preconditioner)) - -# mock update from Newton etc. -for i = 4:(n - 3) - A[i, i + 3] -= 1.0e-4 - A[i - 3, i] -= 1.0e-4 -end -update!(preconditioner) -nm2 = norm(cg(A, b; Pl = preconditioner)) -nm1, nm2 -``` - -```@docs -ExtendableSparse.AbstractPreconditioner -``` - -```@autodocs -Modules = [ExtendableSparse] -Pages = ["iluzero.jl","ilut.jl","amg.jl","blockpreconditioner.jl"] -``` - -```@docs -ExtendableSparse.AMGCL_AMGPreconditioner -ExtendableSparse.AMGCL_RLXPreconditioner -ExtendableSparse.SA_AMGPreconditioner -ExtendableSparse.RS_AMGPreconditioner -ILUTPreconditioner -``` - - -## Experimental/deprecated - - -```@docs -PardisoLU -MKLPardisoLU -ExtendableSparse.AMGPreconditioner -``` - - -```@autodocs -Modules = [ExtendableSparse] -Pages = ["jacobi.jl","parallel_jacobi.jl","ilu0.jl",] -``` - -## Iteration schemes -```@docs -ExtendableSparse.simple! -``` diff --git a/docs/src/linearsolve.md b/docs/src/linearsolve.md index 7dbf531..41d447b 100644 --- a/docs/src/linearsolve.md +++ b/docs/src/linearsolve.md @@ -6,7 +6,7 @@ The following example uses [`fdrand`](@ref) to create a test matrix and solve the corresponding linear system of equations. ```@example -using ExtendableSparse +using ExtendableSparse: ExtendableSparseMatrix, fdrand A = fdrand(10, 10, 10; matrixtype = ExtendableSparseMatrix) x = ones(1000) b = A * x @@ -18,7 +18,7 @@ This works as well for number types besides `Float64` and related, in this case, by default a LU factorization based on Sparspak is used. ```@example -using ExtendableSparse +using ExtendableSparse: ExtendableSparseMatrix, fdrand using MultiFloats A = fdrand(Float64x2, 10, 10, 10; matrixtype = ExtendableSparseMatrix) x = ones(Float64x2,1000) @@ -38,7 +38,7 @@ AbstractSparseMatrixCSC interface. The same problem can be solved via `LinearSolve.jl`: ```@example -using ExtendableSparse +using ExtendableSparse: ExtendableSparseMatrix, fdrand using LinearSolve A = fdrand(10, 10, 10; matrixtype = ExtendableSparseMatrix) x = ones(1000) @@ -48,7 +48,7 @@ sum(y) ``` ```@example -using ExtendableSparse +using ExtendableSparse: ExtendableSparseMatrix, fdrand using LinearSolve using MultiFloats A = fdrand(Float64x2, 10, 10, 10; matrixtype = ExtendableSparseMatrix) @@ -65,7 +65,7 @@ keyword argument](https://docs.sciml.ai/LinearSolve/stable/basics/Preconditioner to the iterative solver specification. ```@example -using ExtendableSparse +using ExtendableSparse: ExtendableSparseMatrix, fdrand using LinearSolve using ExtendableSparse: ILUZeroPreconBuilder A = fdrand(10, 10, 10; matrixtype = ExtendableSparseMatrix) @@ -86,8 +86,6 @@ from some other packages. In the future, these packages may provide this functio ```@docs ExtendableSparse.ILUZeroPreconBuilder ExtendableSparse.ILUTPreconBuilder -ExtendableSparse.SmoothedAggregationPreconBuilder -ExtendableSparse.RugeStubenPreconBuilder ``` In addition, ExtendableSparse implements some preconditioners: @@ -106,7 +104,7 @@ ExtendableSparse.LinearSolvePreconBuilder Block preconditioner constructors are provided as well -```@docs; canonical=false +```@docs ExtendableSparse.BlockPreconBuilder ``` @@ -114,9 +112,8 @@ ExtendableSparse.BlockPreconBuilder The example beloww shows how to create a block Jacobi preconditioner where the blocks are defined by even and odd degrees of freedom, and the diagonal blocks are solved using UMFPACK. ```@example -using ExtendableSparse using LinearSolve -using ExtendableSparse: LinearSolvePreconBuilder, BlockPreconBuilder +using ExtendableSparse: LinearSolvePreconBuilder, BlockPreconBuilder, ExtendableSparseMatrix, fdrand A = fdrand(10, 10, 10; matrixtype = ExtendableSparseMatrix) x = ones(1000) b = A * x @@ -128,24 +125,3 @@ sum(y) ``` `umpfackpreconbuilder` e.g. could be replaced by `SmoothedAggregationPreconBuilder()`. Moreover, this approach works for any `AbstractSparseMatrixCSC`. - - -## Deprecated API -Passing a preconditioner via the `Pl` or `Pr` keyword arguments -will be deprecated in LinearSolve. ExtendableSparse used to -export a number of wrappers for preconditioners from other packages -for this purpose. This approach is deprecated as of v1.6 and will be removed -with v2.0. - -```@example -using ExtendableSparse -using LinearSolve -using SparseArray -using ILUZero -A = fdrand(10, 10, 10; matrixtype = ExtendableSparseMatrix) -x = ones(1000) -b = A * x -y = LinearSolve.solve(LinearProblem(A, b), KrylovJL_CG(); - Pl = ILUZero.ilu0(SparseMatrixCSC(A))).u -sum(y) -``` diff --git a/docs/src/misc.md b/docs/src/misc.md new file mode 100644 index 0000000..bc2d616 --- /dev/null +++ b/docs/src/misc.md @@ -0,0 +1,12 @@ +# Misc methods +## Test matrix generation + +```@autodocs +Modules = [ExtendableSparse] +Pages = ["sprand.jl"] +``` +## Methods for SparseMatrixCSC +```@autodocs +Modules = [ExtendableSparse] +Pages = ["sparsematrixcsc.jl"] +``` diff --git a/ext/ExtendableSparseAMGCLWrapExt.jl b/ext/ExtendableSparseAMGCLWrapExt.jl deleted file mode 100644 index aded655..0000000 --- a/ext/ExtendableSparseAMGCLWrapExt.jl +++ /dev/null @@ -1,68 +0,0 @@ -# The whole extension is deprecated -# TODO remove in v2.0 -module ExtendableSparseAMGCLWrapExt -using ExtendableSparse -using AMGCLWrap - -import ExtendableSparse: @makefrommatrix, AbstractPreconditioner, update! - -############################################################################# -mutable struct AMGCL_AMGPreconditioner <: AbstractPreconditioner - A::ExtendableSparseMatrix - factorization::AMGCLWrap.AMGPrecon - kwargs - function ExtendableSparse.AMGCL_AMGPreconditioner(; kwargs...) - Base.depwarn( - "AMGCL_AMGPreconditioner() is deprecated. Use LinearSolve with `precs=AMGCLWrap.AMGPreconBuilder()` instead.", - :AMGCL_AMGPreconditioner - ) - precon = new() - precon.kwargs = kwargs - return precon - end -end - - -@eval begin - @makefrommatrix ExtendableSparse.AMGCL_AMGPreconditioner -end - -function update!(precon::AMGCL_AMGPreconditioner) - @inbounds flush!(precon.A) - return precon.factorization = AMGCLWrap.AMGPrecon(precon.A; precon.kwargs...) -end - -allow_views(::AMGCL_AMGPreconditioner) = true -allow_views(::Type{AMGCL_AMGPreconditioner}) = true - -############################################################################# -mutable struct AMGCL_RLXPreconditioner <: AbstractPreconditioner - A::ExtendableSparseMatrix - factorization::AMGCLWrap.RLXPrecon - kwargs - function ExtendableSparse.AMGCL_RLXPreconditioner(; kwargs...) - Base.depwarn( - "AMGCL_RLXPreconditioner() is deprecated. Use LinearSolve with `precs=AMGCLWrap.RLXPreconBuilder()` instead.", - :AMGCL_RLXPreconditioner - ) - precon = new() - precon.kwargs = kwargs - return precon - end -end - - -@eval begin - @makefrommatrix ExtendableSparse.AMGCL_RLXPreconditioner -end - -function update!(precon::AMGCL_RLXPreconditioner) - @inbounds flush!(precon.A) - return precon.factorization = AMGCLWrap.RLXPrecon(precon.A; precon.kwargs...) -end - -allow_views(::AMGCL_RLXPreconditioner) = true -allow_views(::Type{AMGCL_RLXPreconditioner}) = true - - -end diff --git a/ext/ExtendableSparseAlgebraicMultigridExt.jl b/ext/ExtendableSparseAlgebraicMultigridExt.jl deleted file mode 100644 index 2dffa6a..0000000 --- a/ext/ExtendableSparseAlgebraicMultigridExt.jl +++ /dev/null @@ -1,117 +0,0 @@ -module ExtendableSparseAlgebraicMultigridExt -using ExtendableSparse -using AlgebraicMultigrid: AlgebraicMultigrid, ruge_stuben, smoothed_aggregation, aspreconditioner -using SparseArrays: SparseMatrixCSC, AbstractSparseMatrixCSC -using LinearAlgebra: I - - -import ExtendableSparse: SmoothedAggregationPreconBuilder -import ExtendableSparse: RugeStubenPreconBuilder - -(b::SmoothedAggregationPreconBuilder)(A::AbstractSparseMatrixCSC, p) = (aspreconditioner(smoothed_aggregation(SparseMatrixCSC(A), Val{b.blocksize}; b.kwargs...)), I) -(b::RugeStubenPreconBuilder)(A::AbstractSparseMatrixCSC, p) = (aspreconditioner(ruge_stuben(SparseMatrixCSC(A), Val{b.blocksize}; b.kwargs...)), I) - - -#### -# Deprecated from here on -# TODO remove in v2.0 - -import ExtendableSparse: @makefrommatrix, AbstractPreconditioner, update! - -###################################################################################### -rswarned = false - -mutable struct RS_AMGPreconditioner <: AbstractPreconditioner - A::ExtendableSparseMatrix - factorization::AlgebraicMultigrid.Preconditioner - kwargs - blocksize - function ExtendableSparse.RS_AMGPreconditioner(blocksize = 1; kwargs...) - global rswarned - if !rswarned - @warn "RS_AMGPreconditioner is deprecated. Use LinearSolve with `precs=RugeStubenPreconBuilder()` instead" - rswarned = true - end - precon = new() - precon.kwargs = kwargs - precon.blocksize = blocksize - return precon - end -end - - -@eval begin - @makefrommatrix ExtendableSparse.RS_AMGPreconditioner -end - -function update!(precon::RS_AMGPreconditioner) - @inbounds flush!(precon.A) - return precon.factorization = AlgebraicMultigrid.aspreconditioner(AlgebraicMultigrid.ruge_stuben(precon.A.cscmatrix, Val{precon.blocksize}; precon.kwargs...)) -end - -allow_views(::RS_AMGPreconditioner) = true -allow_views(::Type{RS_AMGPreconditioner}) = true - - -###################################################################################### -sawarned = false -mutable struct SA_AMGPreconditioner <: AbstractPreconditioner - A::ExtendableSparseMatrix - factorization::AlgebraicMultigrid.Preconditioner - kwargs - blocksize - function ExtendableSparse.SA_AMGPreconditioner(blocksize = 1; kwargs...) - global sawarned - if !sawarned - @warn "SA_AMGPreconditioner is deprecated. Use LinearSolve with `precs=SmoothedAggregationPreconBuilder()` instead" - sawarned = true - end - precon = new() - precon.kwargs = kwargs - precon.blocksize = blocksize - return precon - end -end - - -@eval begin - @makefrommatrix ExtendableSparse.SA_AMGPreconditioner -end - -function update!(precon::SA_AMGPreconditioner) - @inbounds flush!(precon.A) - return precon.factorization = AlgebraicMultigrid.aspreconditioner(AlgebraicMultigrid.smoothed_aggregation(precon.A.cscmatrix, Val{precon.blocksize}; precon.kwargs...)) -end - -allow_views(::SA_AMGPreconditioner) = true -allow_views(::Type{SA_AMGPreconditioner}) = true - -###################################################################################### -# deprecated -# mutable struct AMGPreconditioner <: AbstractPreconditioner -# A::ExtendableSparseMatrix -# factorization::AlgebraicMultigrid.Preconditioner -# max_levels::Int -# max_coarse::Int -# function ExtendableSparse.AMGPreconditioner(; max_levels = 10, max_coarse = 10) -# precon = new() -# precon.max_levels = max_levels -# precon.max_coarse = max_coarse -# precon -# end -# end - - -# @eval begin -# @makefrommatrix ExtendableSparse.AMGPreconditioner -# end - -# function update!(precon::AMGPreconditioner) -# @inbounds flush!(precon.A) -# precon.factorization = AlgebraicMultigrid.aspreconditioner(AlgebraicMultigrid.ruge_stuben(precon.A.cscmatrix)) -# end - -# allow_views(::AMGPreconditioner)=true -# allow_views(::Type{AMGPreconditioner})=true - -end diff --git a/ext/ExtendableSparseIncompleteLUExt.jl b/ext/ExtendableSparseIncompleteLUExt.jl index 1a406a4..53df7ac 100644 --- a/ext/ExtendableSparseIncompleteLUExt.jl +++ b/ext/ExtendableSparseIncompleteLUExt.jl @@ -9,36 +9,4 @@ import ExtendableSparse: ILUTPreconBuilder (b::ILUTPreconBuilder)(A::AbstractSparseMatrixCSC, p) = (IncompleteLU.ilu(SparseMatrixCSC(A); τ = b.droptol), I) -import ExtendableSparse: @makefrommatrix, AbstractPreconditioner, update! - - -# Deprecated from here -warned = false -mutable struct ILUTPreconditioner <: AbstractPreconditioner - A::ExtendableSparseMatrix - factorization::IncompleteLU.ILUFactorization - droptol::Float64 - function ExtendableSparse.ILUTPreconditioner(; droptol = 1.0e-3) - global warned - if !warned - @warn "ILUTPreconditioner is deprecated. Use LinearSolve with `precs=ILUTPreconBuilder()` instead" - warned = true - end - p = new() - p.droptol = droptol - return p - end -end - - -@eval begin - @makefrommatrix ExtendableSparse.ILUTPreconditioner -end - -function update!(precon::ILUTPreconditioner) - A = precon.A - @inbounds flush!(A) - return precon.factorization = IncompleteLU.ilu(A.cscmatrix; τ = precon.droptol) -end - end diff --git a/ext/ExtendableSparsePardisoExt.jl b/ext/ExtendableSparsePardisoExt.jl deleted file mode 100644 index 8582cca..0000000 --- a/ext/ExtendableSparsePardisoExt.jl +++ /dev/null @@ -1,112 +0,0 @@ -module ExtendableSparsePardisoExt -using ExtendableSparse -using LinearAlgebra -using Pardiso -# Deprecated -import ExtendableSparse: @makefrommatrix, update!, AbstractLUFactorization - -abstract type AbstractPardisoLU <: AbstractLUFactorization end - -if Pardiso.PARDISO_LOADED[] - mutable struct PardisoLU <: AbstractPardisoLU - A::Union{ExtendableSparseMatrix, Nothing} - ps::Pardiso.PardisoSolver - phash::UInt64 - iparm::Union{Vector{Int}, Nothing} - dparm::Union{Vector{Float64}, Nothing} - mtype::Union{Int, Nothing} - end - - function ExtendableSparse.PardisoLU(; iparm = nothing, dparm = nothing, mtype = nothing) - return fact = PardisoLU(nothing, Pardiso.PardisoSolver(), 0, iparm, dparm, mtype) - end - -end - -############################################################################################# -mutable struct MKLPardisoLU <: AbstractPardisoLU - A::Union{ExtendableSparseMatrix, Nothing} - ps::Pardiso.MKLPardisoSolver - phash::UInt64 - iparm::Union{Vector{Int}, Nothing} - dparm::Nothing - mtype::Union{Int, Nothing} -end - -function ExtendableSparse.MKLPardisoLU(; iparm = nothing, mtype = nothing) - return fact = MKLPardisoLU(nothing, Pardiso.MKLPardisoSolver(), 0, iparm, nothing, mtype) -end - - -########################################################################################## -function default_initialize!(Tv, fact::AbstractPardisoLU) - iparm = fact.iparm - dparm = fact.dparm - mtype = fact.mtype - # if !isnothing(mtype) - # my_mtype=mtype fix this! - # else - - if Tv <: Complex - my_mtype = Pardiso.COMPLEX_NONSYM - else - my_mtype = Pardiso.REAL_NONSYM - end - - Pardiso.set_matrixtype!(fact.ps, my_mtype) - - if !isnothing(iparm) - for i in 1:min(length(iparm), length(fact.ps.iparm)) - Pardiso.set_iparm!(fact.ps, i, iparm[i]) - end - end - - if !isnothing(dparm) - for i in 1:min(length(dparm), length(fact.ps.dparm)) - Pardiso.set_dparm!(fact.ps, i, dparm[i]) - end - end - return fact -end - -function update!(lufact::AbstractPardisoLU) - ps = lufact.ps - flush!(lufact.A) - Acsc = lufact.A.cscmatrix - Tv = eltype(Acsc) - if lufact.phash != lufact.A.phash - default_initialize!(Tv, lufact) - Pardiso.set_phase!(ps, Pardiso.RELEASE_ALL) - Pardiso.pardiso(ps, Tv[], Acsc, Tv[]) - Pardiso.set_phase!(ps, Pardiso.ANALYSIS_NUM_FACT) - lufact.phash = lufact.A.phash - else - Pardiso.set_phase!(ps, Pardiso.NUM_FACT) - end - Pardiso.fix_iparm!(ps, :N) - Pardiso.pardiso(ps, Tv[], Acsc, Tv[]) - return lufact -end - -function LinearAlgebra.ldiv!( - u::AbstractVector, - lufact::AbstractPardisoLU, - v::AbstractVector - ) - ps = lufact.ps - Acsc = lufact.A.cscmatrix - Pardiso.set_phase!(ps, Pardiso.SOLVE_ITERATIVE_REFINE) - Pardiso.pardiso(ps, u, Acsc, v) - return u -end - -function LinearAlgebra.ldiv!(fact::AbstractPardisoLU, v::AbstractVector) - return ldiv!(v, fact, copy(v)) -end - -@eval begin - @makefrommatrix ExtendableSparse.PardisoLU - @makefrommatrix ExtendableSparse.MKLPardisoLU -end - -end diff --git a/src/ExtendableSparse.jl b/src/ExtendableSparse.jl index 5d028b7..00ddbd7 100644 --- a/src/ExtendableSparse.jl +++ b/src/ExtendableSparse.jl @@ -1,240 +1,53 @@ +""" + ExtendableSparse + +$(read(joinpath(@__DIR__, "..", "README.md"), String)) +""" module ExtendableSparse -using DocStringExtensions: DocStringExtensions, SIGNATURES, TYPEDEF, TYPEDFIELDS -using ILUZero: ILUZero, ldiv!, nnz -using LinearAlgebra: LinearAlgebra, Diagonal, Hermitian, Symmetric, Tridiagonal, - cholesky, cholesky!, convert, lu!, mul!, norm, transpose -using SparseArrays: SparseArrays, AbstractSparseMatrix, SparseMatrixCSC, - dropzeros!, findnz, nzrange, sparse, spzeros -using Sparspak: Sparspak, sparspaklu, sparspaklu! +using DocStringExtensions: DocStringExtensions, SIGNATURES, TYPEDEF, TYPEDFIELDS, TYPEDSIGNATURES +using ILUZero: ILUZero +using LinearAlgebra: LinearAlgebra, Diagonal, Hermitian, Symmetric, Tridiagonal, convert, mul!, ldiv! +using SparseArrays: SparseArrays, AbstractSparseMatrix, AbstractSparseMatrixCSC, SparseMatrixCSC +using SparseArrays: dropzeros!, findnz, nzrange, sparse, spzeros, rowvals, getcolptr, nonzeros, nnz +using Sparspak: sparspaklu +using SciMLPublic: @public using StaticArrays: StaticArrays, SMatrix, SVector -using SuiteSparse: SuiteSparse -import SparseArrays: AbstractSparseMatrixCSC, rowvals, getcolptr, nonzeros # Define our own constant here in order to be able to # test things at least a little bit.. const USE_GPL_LIBS = Base.USE_GPL_LIBS -if USE_GPL_LIBS - using SuiteSparse -end - - -include("compat.jl") # @public - -include("matrix/sparsematrixcsc.jl") -include("matrix/abstractsparsematrixextension.jl") -include("matrix/sparsematrixlnk.jl") -include("matrix/sparsematrixdilnkc.jl") -include("matrix/abstractextendablesparsematrixcsc.jl") -include("matrix/extendable.jl") -include("matrix/genericmtextendablesparsematrixcsc.jl") -include("matrix/genericextendablesparsematrixcsc.jl") - -""" - ExtendableSparseMatrix - -Aliased to [`ExtendableSparseMatrixCSC`](@ref) -""" -const ExtendableSparseMatrix = ExtendableSparseMatrixCSC - - -""" - MTExtendableSparseMatrixCSC - -Multithreaded extendable sparse matrix (Experimental). - -Aliased to [`GenericMTExtendableSparseMatricCSC`](@ref) with [`SparseMatrixDILNKC`](@ref) -scalar matrix parameter. -""" -const MTExtendableSparseMatrixCSC{Tv, Ti} = GenericMTExtendableSparseMatrixCSC{SparseMatrixDILNKC{Tv, Ti}, Tv, Ti} -MTExtendableSparseMatrixCSC(m, n, args...) = MTExtendableSparseMatrixCSC{Float64, Int64}(m, n, args...) - -""" - STExtendableSparseMatrixCSC - -Single threaded extendable sparse matrix (Experimental). -Aliased to [`GenericExtendableSparseMatricCSC`](@ref) with [`SparseMatrixDILNKC`](@ref) -scalar matrix parameter. -""" -const STExtendableSparseMatrixCSC{Tv, Ti} = GenericExtendableSparseMatrixCSC{SparseMatrixDILNKC{Tv, Ti}, Tv, Ti} -STExtendableSparseMatrixCSC(m, n, args...) = STExtendableSparseMatrixCSC{Float64, Int64}(m, n, args...) +include("sparsematrixcsc.jl") +include("abstractsparsematrixextension.jl") +include("sparsematrixlnk.jl") +include("sparsematrixdilnkc.jl") +include("sparsematrixdict.jl") +include("abstractextendablesparsematrixcsc.jl") +include("genericmtextendablesparsematrixcsc.jl") +include("genericextendablesparsematrixcsc.jl") +include("typealiases.jl") -export ExtendableSparseMatrixCSC, MTExtendableSparseMatrixCSC, STExtendableSparseMatrixCSC, GenericMTExtendableSparseMatrixCSC -export SparseMatrixLNK, ExtendableSparseMatrix, flush!, nnz, updateindex!, rawupdateindex!, colptrs, sparse, reset!, nnznew -export partitioning! +@public AbstractExtendableSparseMatrixCSC, AbstractSparseMatrixExtension +@public GenericExtendableSparseMatrixCSC, GenericMTExtendableSparseMatrixCSC +export MTExtendableSparseMatrixCSC, STExtendableSparseMatrixCSC +export ExtendableSparseMatrix, ExtendableSparseMatrixCSC +export flush!, updateindex!, rawupdateindex!, reset!, nnznew +@public partitioning! export eliminate_dirichlet, eliminate_dirichlet!, mark_dirichlet -include("factorizations/factorizations.jl") - -#include("experimental/Experimental.jl") - -include("factorizations/simple_iteration.jl") -export simple, simple! include("preconbuilders.jl") export LinearSolvePreconBuilder, BlockPreconBuilder, JacobiPreconBuilder +@public ILUZeroPreconBuilder, ILUTPreconBuilder -@public ILUZeroPreconBuilder, ILUTPreconBuilder, SmoothedAggregationPreconBuilder, RugeStubenPreconBuilder - - -include("matrix/sprand.jl") -export sprand!, sprand_sdd!, fdrand, fdrand!, fdrand_coo, solverbenchmark - -export rawupdateindex!, updateindex! - - -##### -# All of the following is deprecated in favor of the precs bases -# API - -export JacobiPreconditioner, - ILU0Preconditioner, - ILUZeroPreconditioner, - PointBlockILUZeroPreconditioner, - ParallelJacobiPreconditioner, - ParallelILU0Preconditioner, - BlockPreconditioner, allow_views - -export AbstractFactorization, LUFactorization, CholeskyFactorization, SparspakLU -export issolver -export factorize!, update! - -""" -``` -ILUTPreconditioner(;droptol=1.0e-3) -ILUTPreconditioner(matrix; droptol=1.0e-3) -``` - -Create the [`ILUTPreconditioner`](@ref) wrapping the one -from [IncompleteLU.jl](https://github.com/haampie/IncompleteLU.jl) -For using this, you need to issue `using IncompleteLU`. -""" -function ILUTPreconditioner end -export ILUTPreconditioner - -""" -``` -AMGPreconditioner(;max_levels=10, max_coarse=10) -AMGPreconditioner(matrix;max_levels=10, max_coarse=10) -``` - -Create the [`AMGPreconditioner`](@ref) wrapping the Ruge-Stüben AMG preconditioner from [AlgebraicMultigrid.jl](https://github.com/JuliaLinearAlgebra/AlgebraicMultigrid.jl) - -!!! warning - Deprecated in favor of [`RS_AMGPreconditioner`](@ref) - -""" -function AMGPreconditioner end -export AMGPreconditioner - -@deprecate AMGPreconditioner() RS_AMGPreconditioner() -@deprecate AMGPreconditioner(A) RS_AMGPreconditioner(A) - -""" -``` -RS_AMGPreconditioner(;kwargs...) -RS_AMGPreconditioner(matrix;kwargs...) -``` - -Create the [`RS_AMGPreconditioner`](@ref) wrapping the Ruge-Stüben AMG preconditioner from [AlgebraicMultigrid.jl](https://github.com/JuliaLinearAlgebra/AlgebraicMultigrid.jl) -For `kwargs` see there. -""" -function RS_AMGPreconditioner end -export RS_AMGPreconditioner - - -""" -``` -SA_AMGPreconditioner(;kwargs...) -SA_AMGPreconditioner(matrix;kwargs...) -``` - -Create the [`SA_AMGPreconditioner`](@ref) wrapping the smoothed aggregation AMG preconditioner from [AlgebraicMultigrid.jl](https://github.com/JuliaLinearAlgebra/AlgebraicMultigrid.jl) -For `kwargs` see there. -""" -function SA_AMGPreconditioner end -export SA_AMGPreconditioner - - -""" -``` -AMGCL_AMGPreconditioner(;kwargs...) -AMGCL_AMGPreconditioner(matrix;kwargs...) -``` - -Create the [`AMGCL_AMGPreconditioner`](@ref) wrapping AMG preconditioner from [AMGCLWrap.jl](https://github.com/j-fu/AMGCLWrap.jl) -For `kwargs` see there. -""" -function AMGCL_AMGPreconditioner end -export AMGCL_AMGPreconditioner - - -""" -``` -AMGCL_RLXPreconditioner(;kwargs...) -AMGCL_RLXPreconditioner(matrix;kwargs...) -``` - -Create the [`AMGCL_RLXPreconditioner`](@ref) wrapping RLX preconditioner from [AMGCLWrap.jl](https://github.com/j-fu/AMGCLWrap.jl) -""" -function AMGCL_RLXPreconditioner end -export AMGCL_RLXPreconditioner - - -""" -``` -PardisoLU(;iparm::Vector, - dparm::Vector, - mtype::Int) - -PardisoLU(matrix; iparm,dparm,mtype) -``` - -LU factorization based on pardiso. For using this, you need to issue `using Pardiso` -and have the pardiso library from [pardiso-project.org](https://pardiso-project.org) -[installed](https://github.com/JuliaSparse/Pardiso.jl#pardiso-60). - -The optional keyword arguments `mtype`, `iparm` and `dparm` are -[Pardiso internal parameters](https://github.com/JuliaSparse/Pardiso.jl#readme). - -Forsetting them, one can also access the `PardisoSolver` e.g. like -``` -using Pardiso -plu=PardisoLU() -Pardiso.set_iparm!(plu.ps,5,13.0) -``` -""" -function PardisoLU end -export PardisoLU - - -""" -``` -MKLPardisoLU(;iparm::Vector, mtype::Int) - -MKLPardisoLU(matrix; iparm, mtype) -``` - -LU factorization based on pardiso. For using this, you need to issue `using Pardiso`. -This version uses the early 2000's fork in Intel's MKL library. - -The optional keyword arguments `mtype` and `iparm` are -[Pardiso internal parameters](https://github.com/JuliaSparse/Pardiso.jl#readme). - -For setting them you can also access the `PardisoSolver` e.g. like -``` -using Pardiso -plu=MKLPardisoLU() -Pardiso.set_iparm!(plu.ps,5,13.0) -``` -""" -function MKLPardisoLU end -export MKLPardisoLU +include("sprand.jl") +export fdrand, fdrand! +@public sprand!, sprand_sdd!, fdrand_coo end # module diff --git a/src/matrix/abstractextendablesparsematrixcsc.jl b/src/abstractextendablesparsematrixcsc.jl similarity index 60% rename from src/matrix/abstractextendablesparsematrixcsc.jl rename to src/abstractextendablesparsematrixcsc.jl index c88770b..2bcf273 100644 --- a/src/matrix/abstractextendablesparsematrixcsc.jl +++ b/src/abstractextendablesparsematrixcsc.jl @@ -1,33 +1,106 @@ """ + abstract type AbstractExtendableSparseMatrixCSC{Tv, Ti} <: AbstractSparseMatrixCSC{Tv, Ti} end + +Abstract super type for extendable CSC matrices. It implements what is being discussed +as the "AbstractSparseMatrixCSC interfacee" Subtypes must implement: -- SparseArrays.sparse (may be should be sparse! ?) flush+return SparseMatrixCSC +- `SparseArrays.sparse`: flush+return SparseMatrixCSC - Constructor from SparseMatrixCSC -rawupdateindex! -reset!: empty all internals, just keep size -""" +- [`rawupdateindex!`](@ref) +- [`reset!`](@ref): empty all internals, just keep size +- [`flush!`](@ref): (re)build SparseMatrixCSC, incorporate new entries +Subtypes of this type would contain a SparseMatrixCSC which is used in linear algebra +operations. In addition they would contain data structures for efficiently adding new entries, +like instances or vectors of instances of subtypes of [`AbstractSparseMatrixExtension`](@ref). +""" abstract type AbstractExtendableSparseMatrixCSC{Tv, Ti} <: AbstractSparseMatrixCSC{Tv, Ti} end """ -$(SIGNATURES) + SparseArrays.sparse(A::AbstractExtendableSparseMatrixCSC) + +Return `SparseMatrixCSC` which contains all matrix entries introduced so far. +""" +function SparseArrays.sparse(A::AbstractExtendableSparseMatrixCSC) + throw(MethodError("Missing implementation of `sparse(::$(typeof(A)))`")) +end + +""" + rawupdateindex!(A::AbstractExtendableSparseMatrixCSC,op,v,i,j,part = 1) + +Add or update entry of A: `A[i,j]=op(A[i,j],v)` without checking if a zero +is inserted. The optional parameter part denotes the partition. +""" +function rawupdateindex!(A::AbstractExtendableSparseMatrixCSC, op, v, i, j, part = 1) + throw(MethodError("Missing implementation of `rawupdateindex!(::$(typeof(A)),...)`")) +end + +function flush!(A::AbstractExtendableSparseMatrixCSC) + throw(MethodError("Missing implementation of `flush!(::$(typeof(A)))`")) +end + +function reset!(A::AbstractExtendableSparseMatrixCSC) + throw(MethodError("Missing implementation of `reset!(::$(typeof(A)))`")) +end + + +""" + SparseArrays.nnz(ext::AbstractExtendableSparseMatrixCSC) [`flush!`](@ref) and return number of nonzeros in ext.cscmatrix. """ SparseArrays.nnz(ext::AbstractExtendableSparseMatrixCSC) = nnz(sparse(ext)) """ -$(SIGNATURES) + SparseArrays.nonzeros(ext::AbstractExtendableSparseMatrixCSC) = nonzeros(sparse(ext)) [`flush!`](@ref) and return nonzeros in ext.cscmatrix. """ SparseArrays.nonzeros(ext::AbstractExtendableSparseMatrixCSC) = nonzeros(sparse(ext)) +""" +$(TYPEDSIGNATURES) +""" +function SparseArrays.dropzeros!(ext::AbstractExtendableSparseMatrixCSC) + return dropzeros!(sparse(ext)) +end + + +""" + Base.size(ext::AbstractExtendableSparseMatrixCSC) + +Return size of matrix. +""" Base.size(ext::AbstractExtendableSparseMatrixCSC) = size(ext.cscmatrix) """ -$(SIGNATURES) + SparseArrays.rowvals(ext::AbstractExtendableSparseMatrixCSC) + +[`flush!`](@ref) and return rowvals in ext.cscmatrix. +""" +SparseArrays.rowvals(ext::AbstractExtendableSparseMatrixCSC) = rowvals(sparse(ext)) + + +""" + SparseArrays.findnz(ext::AbstractExtendableSparseMatrixCSC) + +[`flush!`](@ref) and return findnz(ext.cscmatrix). +""" +SparseArrays.findnz(ext::AbstractExtendableSparseMatrixCSC) = findnz(sparse(ext)) + + +""" + SparseArrays.getcolptr(ext::AbstractExtendableSparseMatrixCSC) + +[`flush!`](@ref) and return colptr of in ext.cscmatrix. +""" +SparseArrays.getcolptr(ext::AbstractExtendableSparseMatrixCSC) = getcolptr(sparse(ext)) + + +""" + Base.eltype(::AbstractExtendableSparseMatrixCSC{Tv, Ti}) Return element type. """ @@ -35,13 +108,16 @@ Base.eltype(::AbstractExtendableSparseMatrixCSC{Tv, Ti}) where {Tv, Ti} = Tv """ -$(SIGNATURES) - - Create SparseMatrixCSC from ExtendableSparseMatrix + SparseArrays.SparseMatrixCSC(A::AbstractExtendableSparseMatrixCSC) +Create SparseMatrixCSC from ExtendableSparseMatrix """ SparseArrays.SparseMatrixCSC(A::AbstractExtendableSparseMatrixCSC) = sparse(A) +""" + Base.show(::IO, ::MIME"text/plain", ext::AbstractExtendableSparseMatrixCSC) +[`flush!`](@ref) and use the show method of SparseMatrixCSC to show the content. +""" function Base.show(io::IO, ::MIME"text/plain", ext::AbstractExtendableSparseMatrixCSC) A = sparse(ext) xnnz = nnz(A) @@ -70,55 +146,30 @@ function Base.show(io::IO, ::MIME"text/plain", ext::AbstractExtendableSparseMatr end -""" -$(SIGNATURES) +SparseArrays._checkbuffers(ext::AbstractExtendableSparseMatrixCSC) = SparseArrays._checkbuffers(sparse(ext)) -[`flush!`](@ref) and return rowvals in ext.cscmatrix. """ -SparseArrays.rowvals(ext::AbstractExtendableSparseMatrixCSC) = rowvals(sparse(ext)) - - -""" -$(SIGNATURES) - -[`flush!`](@ref) and return colptr of in ext.cscmatrix. -""" -SparseArrays.getcolptr(ext::AbstractExtendableSparseMatrixCSC) = getcolptr(sparse(ext)) + Base.:\\(::AbstractExtendableSparseMatrixCSC, b) -""" -$(SIGNATURES) - -[`flush!`](@ref) and return findnz(ext.cscmatrix). -""" -SparseArrays.findnz(ext::AbstractExtendableSparseMatrixCSC) = findnz(sparse(ext)) - - -@static if VERSION >= v"1.7" - SparseArrays._checkbuffers(ext::AbstractExtendableSparseMatrixCSC) = SparseArrays._checkbuffers(sparse(ext)) -end - -""" - A\b - [`\\`](@ref) for ExtendableSparse. It calls the LU factorization form Sparspak.jl, unless GPL components are allowed in the Julia sysimage and the floating point type of the matrix is Float64 or Complex64. In that case, Julias standard `\` is called, which is realized via UMFPACK. """ -function LinearAlgebra.:\( +function Base.:\( ext::AbstractExtendableSparseMatrixCSC{Tv, Ti}, b::AbstractVector ) where {Tv, Ti} - return SparspakLU(sparse(ext)) \ b + return sparspaklu(sparse(ext)) \ b end """ -$(SIGNATURES) + Base.:\\(Symmetric(::AbstractExtendableSparseMatrixCSC), b) [`\\`](@ref) for Symmetric{ExtendableSparse} """ -function LinearAlgebra.:\( +function Base.:\( symm_ext::Symmetric{Tm, T}, b::AbstractVector ) where {Tm, Ti, T <: AbstractExtendableSparseMatrixCSC{Tm, Ti}} @@ -126,11 +177,11 @@ function LinearAlgebra.:\( end """ -$(SIGNATURES) + Base.:\\(Hermitian(::AbstractExtendableSparseMatrixCSC), b) [`\\`](@ref) for Hermitian{ExtendableSparse} """ -function LinearAlgebra.:\( +function Base.:\( symm_ext::Hermitian{Tm, T}, b::AbstractVector ) where {Tm, Ti, T <: AbstractExtendableSparseMatrixCSC{Tm, Ti}} @@ -140,7 +191,7 @@ end if USE_GPL_LIBS for (Tv) in (:Float64, :ComplexF64) @eval begin - function LinearAlgebra.:\( + function Base.:\( ext::AbstractExtendableSparseMatrixCSC{$Tv, Ti}, B::AbstractVector ) where {Ti} @@ -149,7 +200,7 @@ if USE_GPL_LIBS end @eval begin - function LinearAlgebra.:\( + function Base.:\( symm_ext::Symmetric{ $Tv, AbstractExtendableSparseMatrixCSC{ @@ -165,7 +216,7 @@ if USE_GPL_LIBS end @eval begin - function LinearAlgebra.:\( + function Base.:\( symm_ext::Hermitian{ $Tv, AbstractExtendableSparseMatrixCSC{ @@ -183,25 +234,39 @@ if USE_GPL_LIBS end # USE_GPL_LIBS """ -$(SIGNATURES) +$(TYPEDSIGNATURES) -[`flush!`](@ref) and ldiv with ext.cscmatrix +[`flush!`](@ref) and ldiv! with ext.cscmatrix """ -function LinearAlgebra.ldiv!(r, ext::AbstractExtendableSparseMatrixCSC, x) +function LinearAlgebra.ldiv!(r::AbstractArray, ext::AbstractExtendableSparseMatrixCSC, x::AbstractArray) return LinearAlgebra.ldiv!(r, sparse(ext), x) end """ -$(SIGNATURES) +$(TYPEDSIGNATURES) [`flush!`](@ref) and multiply with ext.cscmatrix """ -function LinearAlgebra.mul!(r, ext::AbstractExtendableSparseMatrixCSC, x) +function LinearAlgebra.mul!(r::AbstractVecOrMat, ext::AbstractExtendableSparseMatrixCSC, x::AbstractVecOrMat) return LinearAlgebra.mul!(r, sparse(ext), x) end + +# to resolve ambiguity +function LinearAlgebra.mul!(::SparseArrays.AbstractSparseMatrixCSC, ::AbstractExtendableSparseMatrixCSC, ::LinearAlgebra.Diagonal) + throw(MethodError("mul!(::AbstractSparseMatrixCSC, ::AbstractExtendableSparseMatrixCSC, ::Diagonal) is impossible")) + return nothing +end + +# to resolve ambiguity +function LinearAlgebra.mul!(::AbstractMatrix, ::AbstractExtendableSparseMatrixCSC, ::LinearAlgebra.AbstractTriangular) + throw(MethodError("mul!(::AbstractMatrix, ::AbstractExtendableSparseMatrixCSC, ::AbstractTriangular) is impossible")) + return nothing +end + + """ -$(SIGNATURES) +$(TYPEDSIGNATURES) [`flush!`](@ref) and calculate norm from cscmatrix """ @@ -210,7 +275,7 @@ function LinearAlgebra.norm(A::AbstractExtendableSparseMatrixCSC, p::Real = 2) end """ -$(SIGNATURES) +$(TYPEDSIGNATURES) [`flush!`](@ref) and calculate opnorm from cscmatrix """ @@ -219,7 +284,7 @@ function LinearAlgebra.opnorm(A::AbstractExtendableSparseMatrixCSC, p::Real = 2) end """ -$(SIGNATURES) +$(TYPEDSIGNATURES) [`flush!`](@ref) and calculate cond from cscmatrix """ @@ -228,7 +293,7 @@ function LinearAlgebra.cond(A::AbstractExtendableSparseMatrixCSC, p::Real = 2) end """ -$(SIGNATURES) +$(TYPEDSIGNATURES) [`flush!`](@ref) and check for symmetry of cscmatrix """ @@ -237,27 +302,36 @@ function LinearAlgebra.issymmetric(A::AbstractExtendableSparseMatrixCSC) end +""" +$(TYPEDSIGNATURES) +""" function Base.:+(A::T, B::T) where {T <: AbstractExtendableSparseMatrixCSC} return T(sparse(A) + sparse(B)) end +""" +$(TYPEDSIGNATURES) +""" function Base.:-(A::T, B::T) where {T <: AbstractExtendableSparseMatrixCSC} return T(sparse(A) - sparse(B)) end +""" +$(TYPEDSIGNATURES) +""" function Base.:*(A::T, B::T) where {T <: AbstractExtendableSparseMatrixCSC} return T(sparse(A) * sparse(B)) end """ -$(SIGNATURES) +$(TYPEDSIGNATURES) """ function Base.:*(d::Diagonal, ext::T) where {T <: AbstractExtendableSparseMatrixCSC} return T(d * sparse(ext)) end """ -$(SIGNATURES) +$(TYPEDSIGNATURES) """ function Base.:*(ext::T, d::Diagonal) where {T <: AbstractExtendableSparseMatrixCSC} return T(sparse(ext) * d) @@ -265,9 +339,7 @@ end """ -$(SIGNATURES) - -Add SparseMatrixCSC matrix and [`ExtendableSparseMatrix`](@ref) ext. +$(TYPEDSIGNATURES) """ function Base.:+(ext::AbstractExtendableSparseMatrixCSC, csc::SparseMatrixCSC) return sparse(ext) + csc @@ -275,39 +347,37 @@ end """ -$(SIGNATURES) - -Subtract SparseMatrixCSC matrix from [`ExtendableSparseMatrix`](@ref) ext. +$(TYPEDSIGNATURES) """ function Base.:-(ext::AbstractExtendableSparseMatrixCSC, csc::SparseMatrixCSC) return sparse(ext) - csc end """ -$(SIGNATURES) - -Subtract [`ExtendableSparseMatrix`](@ref) ext from SparseMatrixCSC. +$(TYPEDSIGNATURES) """ function Base.:-(csc::SparseMatrixCSC, ext::AbstractExtendableSparseMatrixCSC) return csc - sparse(ext) end + """ -$(SIGNATURES) +$(TYPEDSIGNATURES) """ -function SparseArrays.dropzeros!(ext::AbstractExtendableSparseMatrixCSC) - return dropzeros!(sparse(ext)) -end - - function mark_dirichlet(A::AbstractExtendableSparseMatrixCSC; penalty = 1.0e20) return mark_dirichlet(sparse(A); penalty) end +""" +$(TYPEDSIGNATURES) +""" function eliminate_dirichlet(A::T, dirichlet) where {T <: AbstractExtendableSparseMatrixCSC} return T(eliminate_dirichlet(sparse(A), dirichlet)) end +""" +$(TYPEDSIGNATURES) +""" function eliminate_dirichlet!(A::AbstractExtendableSparseMatrixCSC, dirichlet) eliminate_dirichlet!(sparse(A), dirichlet) return A diff --git a/src/matrix/abstractsparsematrixextension.jl b/src/abstractsparsematrixextension.jl similarity index 74% rename from src/matrix/abstractsparsematrixextension.jl rename to src/abstractsparsematrixextension.jl index ca3b972..6e43198 100644 --- a/src/matrix/abstractsparsematrixextension.jl +++ b/src/abstractsparsematrixextension.jl @@ -1,5 +1,5 @@ """ - $(TYPEDEF) + abstract type AbstractSparseMatrixExtension{Tv, Ti} <: AbstractSparseMatrix{Tv, Ti} end Abstract type for sparse matrix extension. @@ -10,10 +10,8 @@ Subtypes T_ext must implement: - `Base.size(ext::T_ext)` - `Base.sum(extmatrices::Vector{T_ext}, csx)`: add csr or csc matrix and extension matrices (one per partition) and return csx matrix - `Base.+(ext::T_ext, csx)` (optional) - Add extension matrix and csc/csr matrix, return csx matrix -- `rawupdateindex!(ext::Text, op, v, i, j, tid) where {Tv, Ti}`: Set `ext[i,j]op=v`, possibly insert new entry into matrix. `tid` is a -task or partition id - +- `rawupdateindex!(ext::Text, op, v, i, j, tid) where {Tv, Ti}`: Set `ext[i,j]op=v`, possibly insert new entry into matrix. `tid` is a task or partition id """ abstract type AbstractSparseMatrixExtension{Tv, Ti} <: AbstractSparseMatrix{Tv, Ti} end -Base.:+(ext::AbstractSparseMatrixExtension, csx) = sum([ext], csx) +Base.:+(ext::AbstractSparseMatrixExtension, csx::AbstractSparseMatrix) = sum([ext], csx) diff --git a/src/compat.jl b/src/compat.jl deleted file mode 100644 index ad847b3..0000000 --- a/src/compat.jl +++ /dev/null @@ -1,17 +0,0 @@ -# Copied from MIT licensed -# https://github.com/JuliaDiff/DifferentiationInterface.jl/blob/main/DifferentiationInterface/src/compat.jl -# -macro public(ex) - return if VERSION >= v"1.11.0-DEV.469" - args = if ex isa Symbol - (ex,) - elseif Base.isexpr(ex, :tuple) - ex.args - else - error("something informative") - end - esc(Expr(:public, args...)) - else - nothing - end -end diff --git a/src/experimental/Experimental.jl b/src/experimental/Experimental.jl deleted file mode 100644 index 9a3bb30..0000000 --- a/src/experimental/Experimental.jl +++ /dev/null @@ -1,41 +0,0 @@ -module Experimental -using ExtendableSparse, SparseArrays -using LinearAlgebra -using SparseArrays: AbstractSparseMatrixCSC -import SparseArrays: nonzeros, getcolptr, nzrange -import ExtendableSparse: flush!, reset!, rawupdateindex!, findindex -using ExtendableSparse: ColEntry, AbstractPreconditioner, @makefrommatrix, phash -using ExtendableSparse: AbstractExtendableSparseMatrixCSC, AbstractSparseMatrixExtension -using DocStringExtensions -using Metis -using Base.Threads -import ExtendableSparse: factorize!, update!, partitioning! - -include(joinpath(@__DIR__, "ExtendableSparseMatrixParallel", "ExtendableSparseParallel.jl")) - -include(joinpath(@__DIR__, "ExtendableSparseMatrixParallel", "ilu_Al-Kurdi_Mittal.jl")) -#using .ILUAM -include(joinpath(@__DIR__, "ExtendableSparseMatrixParallel", "pilu_Al-Kurdi_Mittal.jl")) -#using .PILUAM - -include(joinpath(@__DIR__, "ExtendableSparseMatrixParallel", "iluam.jl")) -include(joinpath(@__DIR__, "ExtendableSparseMatrixParallel", "piluam.jl")) - -@eval begin - @makefrommatrix ILUAMPreconditioner - @makefrommatrix PILUAMPreconditioner -end - -function factorize!(p::PILUAMPreconditioner, A::ExtendableSparseMatrixParallel) - p.A = A - update!(p) - return p -end - -export ExtendableSparseMatrixParallel, SuperSparseMatrixLNK -export addtoentry!, reset!, dummy_assembly!, preparatory_multi_ps_less_reverse, fr, addtoentry!, compare_matrices_light -export ILUAMPreconditioner, PILUAMPreconditioner -export reorderlinsys, nnz_noflush - - -end diff --git a/src/experimental/ExtendableSparseMatrixParallel/ExtendableSparseParallel.jl b/src/experimental/ExtendableSparseMatrixParallel/ExtendableSparseParallel.jl deleted file mode 100644 index 593f11d..0000000 --- a/src/experimental/ExtendableSparseMatrixParallel/ExtendableSparseParallel.jl +++ /dev/null @@ -1,473 +0,0 @@ -include("supersparse.jl") -include("preparatory.jl") -#include("prep_time.jl") - -mutable struct ExtendableSparseMatrixParallel{Tv, Ti <: Integer} <: AbstractSparseMatrix{Tv, Ti} - """ - Final matrix data - """ - cscmatrix::SparseMatrixCSC{Tv, Ti} - - """ - Linked list structures holding data of extension, one for each thread - """ - lnkmatrices::Vector{SuperSparseMatrixLNK{Tv, Ti}} - - """ - Number of Nodes per Threads - """ - nnts::Vector{Ti} - - """ - sortednodesperthread[i,j] = local index of the j-th global column in the i-th LNK matrix - (this is used e.g. when assembling the matrix) - """ - sortednodesperthread::Matrix{Ti} - - """ - depth+1 x nn matrix, - old_noderegions[i,j] = region in which node j is, in level i - old refers to the fact that j is the 'old index' (i.e. grid index, not matrix index, see 'new_indices') - """ - old_noderegions::Matrix{Ti} - - """ - cellsforpart[i] is a vector containing all cells in the i-th region - cellsforpart has length nt*depth + 1 - """ - cellsforpart::Vector{Vector{Ti}} - - """ - globalindices[i][j] = index in the global (ESMP & CSC) matrix of the j-th column of the i-th LNK matrix - (this maps the local indices (in the LNKs) to the global indices (ESMP & CSC)) - """ - globalindices::Vector{Vector{Ti}} - - """ - For some applications such as the parallel ILU preconditioner, a block form is necessary. - Thus, the columns are reordered and the A[i,i] does not correspond to the i-th node of the grid, - but A[new_indices[i], new_indices[i]] does - """ - new_indices::Vector{Ti} - - """ - Reverse: rev_new_indices[new_indices[i]] = i, for all i - """ - rev_new_indices::Vector{Ti} - - """ - starts[i] gives the first column of the i-th region, i.e. starts[1] = 1 - starts has length nt*depth + 1 - """ - start::Vector{Ti} - - """ - cellparts[i] = region of the i-th cell - """ - cellparts::Vector{Ti} - - """ - Number of threads - """ - nt::Ti - - """ - How often is the separator partitioned? (if never: depth = 1) - """ - depth::Ti - - phash::UInt64 - - """ - Number of rows / number of nodes in grid - """ - n::Ti - - """ - Number of columns / number of nodes in grid (only works for square matrices) - """ - m::Ti - - -end - - -""" -$(SIGNATURES) - -`ExtendableSparseMatrixParallel{Tv, Ti}(mat_cell_node, nc, nn, nt, depth; block_struct = true) where {Tv, Ti <: Integer}` - -Create an ExtendableSparseMatrixParallel based on a grid. -The grid is specified by nc (number of cells), nn (number of nodes) and the `mat_cell_node` (i.e. grid[CellNodes] if ExtendableGrids is used). -Here, `mat_cell_node[k,i]` is the i-th node in the k-th cell. -The matrix structure is made for parallel computations with `nt` threads. -`depth` is the number of partition layers, for depth=1, there are nt parts and 1 separator, for depth=2, the separator is partitioned again -`block_struct=true` means, the matrix should be reordered two have a block structure, this is necessary for parallel ILU, for `false`, the matrix is not reordered -""" -function ExtendableSparseMatrixParallel{Tv, Ti}(mat_cell_node, nc, nn, nt, depth; block_struct = true) where {Tv, Ti <: Integer} - nnts, s, onr, cfp, gi, ni, rni, starts, cellparts, depth = preparatory_multi_ps_less_reverse(mat_cell_node, nc, nn, nt, depth, Ti; block_struct) - csc = spzeros(Tv, Ti, nn, nn) - lnk = [SuperSparseMatrixLNK{Tv, Ti}(nn, nnts[tid]) for tid in 1:nt] - return ExtendableSparseMatrixParallel{Tv, Ti}(csc, lnk, nnts, s, onr, cfp, gi, ni, rni, starts, cellparts, nt, depth, phash(csc), csc.n, csc.m) -end - - -""" -$(SIGNATURES) - -`addtoentry!(A::ExtendableSparseMatrixParallel{Tv, Ti}, i, j, tid, v; known_that_unknown=false) where {Tv, Ti <: Integer}` - -`A[i,j] += v` -This function should be used, if the thread in which the entry appears is known (`tid`). -If the thread is not known, use `addtoentry!(A::ExtendableSparseMatrixParallel{Tv, Ti}, i, j, v; known_that_unknown=false)`, this function calculates `tid`. -If you know that the entry is not yet known to the CSC structure, set `known_that_unknown=true`. -""" -function addtoentry!(A::ExtendableSparseMatrixParallel{Tv, Ti}, i, j, tid, v; known_that_unknown = false) where {Tv, Ti <: Integer} - if known_that_unknown - A.lnkmatrices[tid][i, A.sortednodesperthread[tid, j]] += v - return - end - - return if updatentryCSC2!(A.cscmatrix, i, j, v) - else - A.lnkmatrices[tid][i, A.sortednodesperthread[tid, j]] += v - end -end - - -""" -$(SIGNATURES) - -`addtoentry!(A::ExtendableSparseMatrixParallel{Tv, Ti}, i, j, v; known_that_unknown=false) where {Tv, Ti <: Integer}` - -A[i,j] += v, using any partition. -If the partition should be specified (for parallel use), use -`function addtoentry!(A::ExtendableSparseMatrixParallel{Tv, Ti}, i, j, tid, v; known_that_unknown=false) where {Tv, Ti <: Integer}`. -""" -function addtoentry!(A::ExtendableSparseMatrixParallel{Tv, Ti}, i, j, v; known_that_unknown = false) where {Tv, Ti <: Integer} - if known_that_unknown - level, tid = last_nz(A.old_noderegions[:, A.rev_new_indices[j]]) - A.lnkmatrices[tid][i, A.sortednodesperthread[tid, j]] += v - return - end - - return if updatentryCSC2!(A.cscmatrix, i, j, v) - else - _, tid = last_nz(A.old_noderegions[:, A.rev_new_indices[j]]) - A.lnkmatrices[tid][i, A.sortednodesperthread[tid, j]] += v - end -end - -#--------------------------------- - -""" -$(SIGNATURES) -`updateindex!(ext::ExtendableSparseMatrixParallel{Tv, Ti}, op, v, i, j) where {Tv, Ti <: Integer` - -Update element of the matrix with operation `op`. -Use this method if the 'thread of the element' is not known, otherwise use `updateindex!(ext, op, v, i, j, tid)`. -""" -function updateindex!( - ext::ExtendableSparseMatrixParallel{Tv, Ti}, - op, - v, - i, - j - ) where {Tv, Ti <: Integer} - k = ExtendableSparse.findindex(ext.cscmatrix, i, j) - if k > 0 - ext.cscmatrix.nzval[k] = op(ext.cscmatrix.nzval[k], v) - return - else - level, tid = last_nz(ext.old_noderegions[:, ext.rev_new_indices[j]]) - updateindex!(ext.lnkmatrices[tid], op, v, i, ext.sortednodesperthread[tid, j]) - end - return ext -end - -""" -$(SIGNATURES) -`updateindex!(ext::ExtendableSparseMatrixParallel{Tv, Ti}, op, v, i, j, tid) where {Tv, Ti <: Integer` - -Update element of the matrix with operation `op`. -Use this method if the 'thread of the element' is known, otherwise use `updateindex!(ext, op, v, i, j)`. -""" -function updateindex!( - ext::ExtendableSparseMatrixParallel{Tv, Ti}, - op, - v, - i, - j, - tid - ) where {Tv, Ti <: Integer} - k = ExtendableSparse.findindex(ext.cscmatrix, i, j) - if k > 0 - ext.cscmatrix.nzval[k] = op(ext.cscmatrix.nzval[k], v) - return - else - updateindex!(ext.lnkmatrices[tid], op, v, i, ext.sortednodesperthread[tid, j]) - end - return ext -end - -""" -$(SIGNATURES) -`rawupdateindex!(ext::ExtendableSparseMatrixParallel{Tv, Ti}, op, v, i, j) where {Tv, Ti <: Integer}` - -Like [`updateindex!`](@ref) but without checking if v is zero. -Use this method if the 'thread of the element' is not known. -""" -function rawupdateindex!( - ext::ExtendableSparseMatrixParallel{Tv, Ti}, - op, - v, - i, - j - ) where {Tv, Ti <: Integer} - k = ExtendableSparse.findindex(ext.cscmatrix, i, j) - if k > 0 - ext.cscmatrix.nzval[k] = op(ext.cscmatrix.nzval[k], v) - else - level, tid = last_nz(ext.old_noderegions[:, ext.rev_new_indices[j]]) - rawupdateindex!(ext.lnkmatrices[tid], op, v, i, ext.sortednodesperthread[tid, j]) - end - return ext -end - -""" -$(SIGNATURES) -`rawupdateindex!(ext::ExtendableSparseMatrixParallel{Tv, Ti}, op, v, i, j, tid) where {Tv, Ti <: Integer}` - -Like [`updateindex!`](@ref) but without checking if v is zero. -Use this method if the 'thread of the element' is known -""" -function rawupdateindex!( - ext::ExtendableSparseMatrixParallel{Tv, Ti}, - op, - v, - i, - j, - tid - ) where {Tv, Ti <: Integer} - k = ExtendableSparse.findindex(ext.cscmatrix, i, j) - if k > 0 - ext.cscmatrix.nzval[k] = op(ext.cscmatrix.nzval[k], v) - else - rawupdateindex!(ext.lnkmatrices[tid], op, v, i, ext.sortednodesperthread[tid, j]) - end - return ext -end - -""" -$(SIGNATURES) -``Base.getindex(ext::ExtendableSparseMatrixParallel{Tv, Ti}, i::Integer, j::Integer) where {Tv, Ti <: Integer` - -Find index in CSC matrix and return value, if it exists. -Otherwise, return value from extension. -""" -function Base.getindex( - ext::ExtendableSparseMatrixParallel{Tv, Ti}, - i::Integer, - j::Integer - ) where {Tv, Ti <: Integer} - k = ExtendableSparse.findindex(ext.cscmatrix, i, j) - if k > 0 - return ext.cscmatrix.nzval[k] - end - - level, tid = last_nz(ext.old_noderegions[:, ext.rev_new_indices[j]]) - return ext.lnkmatrices[tid][i, ext.sortednodesperthread[tid, j]] - -end - -""" -$(SIGNATURES) -`Base.setindex!(ext::ExtendableSparseMatrixParallel{Tv, Ti}, v::Union{Number,AbstractVecOrMat}, i::Integer, j::Integer) where {Tv, Ti}` - -Find index in CSC matrix and set value if it exists. Otherwise, -set index in extension if `v` is nonzero. -""" -function Base.setindex!( - ext::ExtendableSparseMatrixParallel{Tv, Ti}, - v::Union{Number, AbstractVecOrMat}, - i::Integer, - j::Integer - ) where {Tv, Ti} - k = ExtendableSparse.findindex(ext.cscmatrix, i, j) - return if k > 0 - ext.cscmatrix.nzval[k] = v - else - level, tid = last_nz(ext.old_noderegions[:, ext.rev_new_indices[j]]) - #@info typeof(tid), typeof(j) - jj = ext.sortednodesperthread[tid, j] - ext.lnkmatrices[tid][i, jj] = v - end -end - - -#------------------------------------ - -""" -$(SIGNATURES) - -Reset matrix, such that CSC and LNK have no non-zero entries. -""" -function reset!(A::ExtendableSparseMatrixParallel{Tv, Ti}) where {Tv, Ti <: Integer} - A.cscmatrix = spzeros(Tv, Ti, A.n, A.m) - return A.lnkmatrices = [SuperSparseMatrixLNK{Tv, Ti}(A.n, A.nnts[tid]) for tid in 1:A.nt] -end - -""" -$(SIGNATURES) - -Compute number of non-zero elements, after flush. -""" -function nnz_flush(ext::ExtendableSparseMatrixParallel) - flush!(ext) - return nnz(ext.cscmatrix) -end - -""" -$(SIGNATURES) - -Compute number of non-zero elements, without flush. -""" -function nnz_noflush(ext::ExtendableSparseMatrixParallel) - return nnz(ext.cscmatrix), sum([ext.lnkmatrices[i].nnz for i in 1:ext.nt]) -end - -function matrixindextype(A::ExtendableSparseMatrixParallel{Tv, Ti}) where {Tv, Ti <: Integer} - return Ti -end - -function matrixvaluetype(A::ExtendableSparseMatrixParallel{Tv, Ti}) where {Tv, Ti <: Integer} - return Tv -end - - -""" -$(SIGNATURES) - -Show matrix, without flushing -""" -function Base.show(io::IO, ::MIME"text/plain", ext::ExtendableSparseMatrixParallel) - #flush!(ext) - xnnzCSC, xnnzLNK = nnz_noflush(ext) - m, n = size(ext) - print( - io, - m, - "×", - n, - " ", - typeof(ext), - " with ", - xnnzCSC, - " stored ", - xnnzCSC == 1 ? "entry" : "entries", - " in CSC and ", - xnnzLNK, - " stored ", - xnnzLNK == 1 ? "entry" : "entries", - " in LNK. CSC:" - ) - - if !haskey(io, :compact) - io = IOContext(io, :compact => true) - end - - return if !(m == 0 || n == 0 || xnnzCSC == 0) - print(io, ":\n") - Base.print_array(IOContext(io), ext.cscmatrix) - end -end - -""" -`function entryexists2(CSC, i, j)` - -Find out if CSC already has an nonzero entry at i,j without any allocations -""" -function entryexists2(CSC, i, j) #find out if CSC already has an nonzero entry at i,j - #vals = - #ids = CSC.colptr[j]:(CSC.colptr[j+1]-1) - return i in view(CSC.rowval, CSC.colptr[j]:(CSC.colptr[j + 1] - 1)) -end - -""" -$(SIGNATURES) - -Find out if i,j is non-zero entry in CSC, if yes, update entry with += v and return `true`, if not return `false` -""" -function updatentryCSC2!(CSC::SparseArrays.SparseMatrixCSC{Tv, Ti}, i::Integer, j::Integer, v) where {Tv, Ti <: Integer} - p1 = CSC.colptr[j] - p2 = CSC.colptr[j + 1] - 1 - - searchk = searchsortedfirst(view(CSC.rowval, p1:p2), i) + p1 - 1 - - if (searchk <= p2) && (CSC.rowval[searchk] == i) - CSC.nzval[searchk] += v - return true - else - return false - end -end - - -Base.size(A::ExtendableSparseMatrixParallel) = (A.cscmatrix.m, A.cscmatrix.n) -include("struct_flush.jl") - - -import LinearAlgebra.mul! - -""" -$(SIGNATURES) -```function LinearAlgebra.mul!(y, A, x)``` - -This overwrites the mul! function for A::ExtendableSparseMatrixParallel -""" -function LinearAlgebra.mul!(y::AbstractVector{Tv}, A::ExtendableSparseMatrixParallel{Tv, Ti}, x::AbstractVector{Tv}) where {Tv, Ti <: Integer} - #@info "my matvec" - _, nnzLNK = nnz_noflush(A) - @assert nnzLNK == 0 - #mul!(y, A.cscmatrix, x) - return matvec!(y, A, x) -end - - -""" -$(SIGNATURES) -```function matvec!(y, A, x)``` - -y <- A*x, where y and x are vectors and A is an ExtendableSparseMatrixParallel -this computation is done in parallel, it has the same result as y = A.cscmatrix*x -""" -function matvec!(y::AbstractVector{Tv}, A::ExtendableSparseMatrixParallel{Tv, Ti}, x::AbstractVector{Tv}) where {Tv, Ti <: Integer} - nt = A.nt - depth = A.depth - colptr = A.cscmatrix.colptr - nzv = A.cscmatrix.nzval - rv = A.cscmatrix.rowval - - LinearAlgebra._rmul_or_fill!(y, 0.0) - - for level in 1:depth - @threads for tid::Int64 in 1:nt - for col::Int64 in A.start[(level - 1) * nt + tid]:(A.start[(level - 1) * nt + tid + 1] - 1) - for row::Int64 in colptr[col]:(colptr[col + 1] - 1) # in nzrange(A, col) - y[rv[row]] += nzv[row] * x[col] - end - end - end - end - - - @threads for tid in 1:1 - for col::Int64 in A.start[depth * nt + 1]:(A.start[depth * nt + 2] - 1) - for row::Int64 in colptr[col]:(colptr[col + 1] - 1) #nzrange(A, col) - y[rv[row]] += nzv[row] * x[col] - end - end - end - - return y -end diff --git a/src/experimental/ExtendableSparseMatrixParallel/ilu_Al-Kurdi_Mittal.jl b/src/experimental/ExtendableSparseMatrixParallel/ilu_Al-Kurdi_Mittal.jl deleted file mode 100644 index 75c2341..0000000 --- a/src/experimental/ExtendableSparseMatrixParallel/ilu_Al-Kurdi_Mittal.jl +++ /dev/null @@ -1,267 +0,0 @@ -#module ILUAM -#using LinearAlgebra, SparseArrays - -import LinearAlgebra.ldiv!, LinearAlgebra.\, SparseArrays.nnz - -#@info "ILUAM" - -mutable struct ILUAMPrecon{T, N} - - diag::AbstractVector - nzval::AbstractVector - A::AbstractMatrix - -end - - -function iluAM!(ILU::ILUAMPrecon{Tv, Ti}, A::SparseMatrixCSC{Tv, Ti}) where {Tv, Ti <: Integer} - diag = ILU.diag - nzval = ILU.nzval - - nzval = copy(A.nzval) - diag = Vector{Ti}(undef, n) - ILU.A = A - colptr = A.colptr - rowval = A.rowval - n = A.n - point = zeros(Ti, n) - - for j in 1:n - for v in colptr[j]:(colptr[j + 1] - 1) - if rowval[v] == j - diag[j] = v - break - end - #elseif rowval[v] - end - end - - # compute L and U - for j in 1:n - for v in colptr[j]:(colptr[j + 1] - 1) ## start at colptr[j]+1 ?? - point[rowval[v]] = v - end - - for v in colptr[j]:(diag[j] - 1) - i = rowval[v] - #nzval[v] /= nzval[diag[i]] - for w in (diag[i] + 1):(colptr[i + 1] - 1) - k = point[rowval[w]] - if k > 0 - nzval[k] -= nzval[v] * nzval[w] - end - end - end - - for v in (diag[j] + 1):(colptr[j + 1] - 1) - nzval[v] /= nzval[diag[j]] - end - - - for v in colptr[j]:(colptr[j + 1] - 1) - point[rowval[v]] = zero(Ti) - end - end - - return -end - -function iluAM(A::SparseMatrixCSC{Tv, Ti}) where {Tv, Ti <: Integer} - #@info "iluAM" - nzval = copy(A.nzval) - colptr = A.colptr - rowval = A.rowval - #nzval = ILU.nzval - n = A.n # number of columns - point = zeros(Ti, n) #Vector{Ti}(undef, n) - diag = Vector{Ti}(undef, n) - - # find diagonal entries - for j in 1:n - for v in colptr[j]:(colptr[j + 1] - 1) - if rowval[v] == j - diag[j] = v - break - end - #elseif rowval[v] - end - end - - #@info diag[1:20]' - #@info diag[end-20:end]' - - # compute L and U - for j in 1:n - for v in colptr[j]:(colptr[j + 1] - 1) ## start at colptr[j]+1 ?? - point[rowval[v]] = v - end - - for v in colptr[j]:(diag[j] - 1) - i = rowval[v] - #nzval[v] /= nzval[diag[i]] - for w in (diag[i] + 1):(colptr[i + 1] - 1) - k = point[rowval[w]] - if k > 0 - nzval[k] -= nzval[v] * nzval[w] - end - end - end - - for v in (diag[j] + 1):(colptr[j + 1] - 1) - nzval[v] /= nzval[diag[j]] - end - - - for v in colptr[j]:(colptr[j + 1] - 1) - point[rowval[v]] = zero(Ti) - end - end - #nzval, diag - return ILUAMPrecon{Tv, Ti}(diag, nzval, A) -end - -function forward_subst_old!(y, v, nzval, diag, A) - #@info "fso, $(sum(nzval)), $(sum(nzval.^2)), $(sum(diag)), $(A[1,1])" - n = A.n - colptr = A.colptr - rowval = A.rowval - - for i in eachindex(y) - y[i] = zero(Float64) - end - - #y .= 0 - @inbounds for j in 1:n - y[j] += v[j] - for v in (diag[j] + 1):(colptr[j + 1] - 1) - y[rowval[v]] -= nzval[v] * y[j] - end - end - return y -end - - -function backward_subst_old!(x, y, nzval, diag, A) - #@info "bso, $(sum(nzval)), $(sum(nzval.^2)), $(sum(diag)), $(A[1,1])" - n = A.n - colptr = A.colptr - rowval = A.rowval - @inbounds for j in n:-1:1 - x[j] = y[j] / nzval[diag[j]] - - for i in colptr[j]:(diag[j] - 1) - y[rowval[i]] -= nzval[i] * x[j] - end - - end - return x -end - - -function ldiv!(x, ILU::ILUAMPrecon, b) - #t = @elapsed begin - #@info "iluam ldiv 1" - nzval = ILU.nzval - diag = ILU.diag - A = ILU.A - y = copy(b) - #forward_subst!(y, b, ILU) - forward_subst_old!(y, b, nzval, diag, A) - backward_subst_old!(x, y, nzval, diag, A) - #@info "ILUAM:", b[1], y[1], x[1], maximum(abs.(b-A*x)), nnz(A) #, A[10,10] - #, b[1], x[1], y[1]#maximum(abs.(b)), maximum(abs.(x)) - #end - #println("$t") #@info t - return x -end - -function ldiv!(ILU::ILUAMPrecon, b) - #@info "iluam ldiv 2" - nzval = ILU.nzval - diag = ILU.diag - A = ILU.A - y = copy(b) - #forward_subst!(y, b, ILU) - forward_subst_old!(y, b, nzval, diag, A) - backward_subst_old!(b, y, nzval, diag, A) - return b -end - -function \(ilu::ILUAMPrecon{T, N}, b) where {T, N <: Integer} - x = copy(b) - ldiv!(x, ilu, b) - return x -end - -function nnz(ilu::ILUAMPrecon{T, N}) where {T, N <: Integer} - return length(ilu.nzval) -end - -#= -function forward_subst!(y, v, ilu) #::ILUAMPrecon{T,N}) where {T,N<:Integer} - @info "fw" - n = ilu.A.n - nzval = ilu.nzval - diag = ilu.diag - colptr = ilu.A.colptr - rowval = ilu.A.rowval - - for i in eachindex(y) - y[i] = zero(Float64) - end - - #y .= 0 - @inbounds for j=1:n - y[j] += v[j] - for v=diag[j]+1:colptr[j+1]-1 - y[rowval[v]] -= nzval[v]*y[j] - end - end - y -end - -function backward_subst!(x, y, ilu) #::ILUAMPrecon{T,N}) where {T,N<:Integer} - @info "bw" - n = ilu.A.n - nzval = ilu.nzval - diag = ilu.diag - colptr = ilu.A.colptr - rowval = ilu.A.rowval - #wrk = copy(y) - @inbounds for j=n:-1:1 - x[j] = y[j] / nzval[diag[j]] - - for i=colptr[j]:diag[j]-1 - y[rowval[i]] -= nzval[i]*x[j] - end - - end - x -end - -function iluam_subst(ILU::ILUAMPrecon, b) - y = copy(b) - forward_subst!(y, b, ILU) - z = copy(b) - backward_subst!(z, y, ILU) - z -end - - - -function iluam_subst_old(ILU::ILUAMPrecon, b) - nzval = ILU.nzval - diag = ILU.diag - A = ILU.A - y = copy(b) - #forward_subst!(y, b, ILU) - forward_subst_old!(y, b, nzval, diag, A) - z = copy(b) - backward_subst_old!(z, y, nzval, diag, A) - #backward_subst!(z, y, ILU) - z -end -=# - - -#end diff --git a/src/experimental/ExtendableSparseMatrixParallel/iluam.jl b/src/experimental/ExtendableSparseMatrixParallel/iluam.jl deleted file mode 100644 index cbc05de..0000000 --- a/src/experimental/ExtendableSparseMatrixParallel/iluam.jl +++ /dev/null @@ -1,34 +0,0 @@ -mutable struct ILUAMPreconditioner <: AbstractPreconditioner - A::ExtendableSparseMatrix - factorization::ILUAMPrecon - phash::UInt64 - function ILUAMPreconditioner() - p = new() - p.phash = 0 - return p - end -end - -""" -``` -ILUAMPreconditioner() -ILUAMPreconditioner(matrix) -``` -Incomplete LU preconditioner with zero fill-in using ... . This preconditioner -also calculates and stores updates to the off-diagonal entries and thus delivers better convergence than the [`ILU0Preconditioner`](@ref). -""" -function ILUAMPreconditioner end - -function update!(p::ILUAMPreconditioner) - flush!(p.A) - if p.A.phash != p.phash - p.factorization = iluAM(p.A.cscmatrix) - p.phash = p.A.phash - else - iluam!(p.factorization, p.A.cscmatrix) - end - return p -end - -allow_views(::ILUAMPreconditioner) = true -allow_views(::Type{ILUAMPreconditioner}) = true diff --git a/src/experimental/ExtendableSparseMatrixParallel/pilu_Al-Kurdi_Mittal.jl b/src/experimental/ExtendableSparseMatrixParallel/pilu_Al-Kurdi_Mittal.jl deleted file mode 100644 index 3929ffd..0000000 --- a/src/experimental/ExtendableSparseMatrixParallel/pilu_Al-Kurdi_Mittal.jl +++ /dev/null @@ -1,353 +0,0 @@ -#module PILUAM -#using Base.Threads -#using LinearAlgebra, SparseArrays - -import LinearAlgebra.ldiv!, LinearAlgebra.\, SparseArrays.nnz - -#@info "PILUAM" - -mutable struct PILUAMPrecon{T, N} - - diag::AbstractVector - nzval::AbstractVector - A::AbstractMatrix - start::AbstractVector - nt::Integer - depth::Integer - -end - -function use_vector_par(n, nt, Ti) - point = [Vector{Ti}(undef, n) for tid in 1:nt] - @threads for tid in 1:nt - point[tid] = zeros(Ti, n) - end - return point -end - -function compute_lu!(nzval, point, j0, j1, tid, rowval, colptr, diag, Ti) - for j in j0:(j1 - 1) - for v in colptr[j]:(colptr[j + 1] - 1) - point[tid][rowval[v]] = v - end - - for v in colptr[j]:(diag[j] - 1) - i = rowval[v] - for w in (diag[i] + 1):(colptr[i + 1] - 1) - k = point[tid][rowval[w]] - if k > 0 - nzval[k] -= nzval[v] * nzval[w] - end - end - end - - for v in (diag[j] + 1):(colptr[j + 1] - 1) - nzval[v] /= nzval[diag[j]] - end - - for v in colptr[j]:(colptr[j + 1] - 1) - point[tid][rowval[v]] = zero(Ti) - end - end - return -end - -function piluAM!(ILU::PILUAMPrecon{Tv, Ti}, A::ExtendableSparseMatrixParallel{Tv, Ti}) where {Tv, Ti <: Integer} - #@info "piluAM!" - diag = ILU.diag - nzval = ILU.nzval - ILU.A = A - start = ILU.start - - ILU.nt = A.nt - nt = A.nt - - ILU.depth = A.depth - depth = A.depth - - - colptr = A.cscmatrix.colptr - rowval = A.cscmatrix.rowval - n = A.cscmatrix.n # number of columns - diag = Vector{Ti}(undef, n) - nzval = Vector{Tv}(undef, length(rowval)) #copy(A.nzval) - point = use_vector_par(n, A.nt, Int32) - - - @threads for tid in 1:(depth * nt + 1) - for j in start[tid]:(start[tid + 1] - 1) - for v in colptr[j]:(colptr[j + 1] - 1) - nzval[v] = A.cscmatrix.nzval[v] - if rowval[v] == j - diag[j] = v - end - #elseif rowval[v] - end - end - end - - for level in 1:depth - @threads for tid in 1:nt - for j in start[(level - 1) * nt + tid]:(start[(level - 1) * nt + tid + 1] - 1) - for v in colptr[j]:(colptr[j + 1] - 1) - point[tid][rowval[v]] = v - end - - for v in colptr[j]:(diag[j] - 1) - i = rowval[v] - for w in (diag[i] + 1):(colptr[i + 1] - 1) - k = point[tid][rowval[w]] - if k > 0 - nzval[k] -= nzval[v] * nzval[w] - end - end - end - - for v in (diag[j] + 1):(colptr[j + 1] - 1) - nzval[v] /= nzval[diag[j]] - end - - for v in colptr[j]:(colptr[j + 1] - 1) - point[tid][rowval[v]] = zero(Ti) - end - end - end - end - - #point = zeros(Ti, n) #Vector{Ti}(undef, n) - for j in start[depth * nt + 1]:(start[depth * nt + 2] - 1) - for v in colptr[j]:(colptr[j + 1] - 1) - point[1][rowval[v]] = v - end - - for v in colptr[j]:(diag[j] - 1) - i = rowval[v] - for w in (diag[i] + 1):(colptr[i + 1] - 1) - k = point[1][rowval[w]] - if k > 0 - nzval[k] -= nzval[v] * nzval[w] - end - end - end - - for v in (diag[j] + 1):(colptr[j + 1] - 1) - nzval[v] /= nzval[diag[j]] - end - - for v in colptr[j]:(colptr[j + 1] - 1) - point[1][rowval[v]] = zero(Ti) - end - end - - return -end - -function piluAM(A::ExtendableSparseMatrixParallel{Tv, Ti}) where {Tv, Ti <: Integer} - start = A.start - nt = A.nt - depth = A.depth - - colptr = A.cscmatrix.colptr - rowval = A.cscmatrix.rowval - nzval = Vector{Tv}(undef, length(rowval)) #copy(A.nzval) - n = A.cscmatrix.n # number of columns - diag = Vector{Ti}(undef, n) - point = use_vector_par(n, A.nt, Int32) - - # find diagonal entries - # - - #= - for j=1:n - for v=colptr[j]:colptr[j+1]-1 - nzval[v] = A.cscmatrix.nzval[v] - if rowval[v] == j - diag[j] = v - #break - end - #elseif rowval[v] - end - end - =# - - - @threads for tid in 1:(depth * nt + 1) - for j in start[tid]:(start[tid + 1] - 1) - for v in colptr[j]:(colptr[j + 1] - 1) - nzval[v] = A.cscmatrix.nzval[v] - if rowval[v] == j - diag[j] = v - end - #elseif rowval[v] - end - end - end - - - #@info diag[1:20]' - #@info diag[end-20:end]' - - for level in 1:depth - @threads for tid in 1:nt - for j in start[(level - 1) * nt + tid]:(start[(level - 1) * nt + tid + 1] - 1) - for v in colptr[j]:(colptr[j + 1] - 1) - point[tid][rowval[v]] = v - end - - for v in colptr[j]:(diag[j] - 1) - i = rowval[v] - for w in (diag[i] + 1):(colptr[i + 1] - 1) - k = point[tid][rowval[w]] - if k > 0 - nzval[k] -= nzval[v] * nzval[w] - end - end - end - - for v in (diag[j] + 1):(colptr[j + 1] - 1) - nzval[v] /= nzval[diag[j]] - end - - for v in colptr[j]:(colptr[j + 1] - 1) - point[tid][rowval[v]] = zero(Ti) - end - end - end - end - - #point = zeros(Ti, n) #Vector{Ti}(undef, n) - for j in start[depth * nt + 1]:(start[depth * nt + 2] - 1) - for v in colptr[j]:(colptr[j + 1] - 1) - point[1][rowval[v]] = v - end - - for v in colptr[j]:(diag[j] - 1) - i = rowval[v] - for w in (diag[i] + 1):(colptr[i + 1] - 1) - k = point[1][rowval[w]] - if k > 0 - nzval[k] -= nzval[v] * nzval[w] - end - end - end - - for v in (diag[j] + 1):(colptr[j + 1] - 1) - nzval[v] /= nzval[diag[j]] - end - - for v in colptr[j]:(colptr[j + 1] - 1) - point[1][rowval[v]] = zero(Ti) - end - end - - #nzval, diag - return PILUAMPrecon{Tv, Ti}(diag, nzval, A.cscmatrix, start, nt, depth) -end - -function forward_subst_old!(y, v, nzval, diag, start, nt, depth, A) - #@info "pfso, $(sum(nzval)), $(sum(nzval.^2)), $(sum(diag)), $(A[1,1])" - #@info "fwo" - n = A.n - colptr = A.colptr - rowval = A.rowval - - y .= 0 - - for level in 1:depth - @threads for tid in 1:nt - @inbounds for j in start[(level - 1) * nt + tid]:(start[(level - 1) * nt + tid + 1] - 1) - y[j] += v[j] - for v in (diag[j] + 1):(colptr[j + 1] - 1) - y[rowval[v]] -= nzval[v] * y[j] - end - end - end - end - - return @inbounds for j in start[depth * nt + 1]:(start[depth * nt + 2] - 1) - y[j] += v[j] - for v in (diag[j] + 1):(colptr[j + 1] - 1) - y[rowval[v]] -= nzval[v] * y[j] - end - end - -end - - -function backward_subst_old!(x, y, nzval, diag, start, nt, depth, A) - #@info "pbso, $(sum(nzval)), $(sum(nzval.^2)), $(sum(diag)), $(A[1,1])" - - #@info "bwo" - n = A.n - colptr = A.colptr - rowval = A.rowval - #wrk = copy(y) - - - @inbounds for j in (start[depth * nt + 2] - 1):-1:start[depth * nt + 1] - x[j] = y[j] / nzval[diag[j]] - - for i in colptr[j]:(diag[j] - 1) - y[rowval[i]] -= nzval[i] * x[j] - end - - end - - for level in depth:-1:1 - @threads for tid in 1:nt - @inbounds for j in (start[(level - 1) * nt + tid + 1] - 1):-1:start[(level - 1) * nt + tid] - x[j] = y[j] / nzval[diag[j]] - for i in colptr[j]:(diag[j] - 1) - y[rowval[i]] -= nzval[i] * x[j] - end - end - end - end - - return -end - - -function ldiv!(x, ILU::PILUAMPrecon, b) - #@info "piluam ldiv 1" - nzval = ILU.nzval - diag = ILU.diag - A = ILU.A - start = ILU.start - nt = ILU.nt - depth = ILU.depth - y = copy(b) - #forward_subst!(y, b, ILU) - forward_subst_old!(y, b, nzval, diag, start, nt, depth, A) - backward_subst_old!(x, y, nzval, diag, start, nt, depth, A) - #@info "PILUAM:", b[1], y[1], x[1], maximum(abs.(b-A*x)), nnz(A) #, A[10,10] - #@info "PILUAM:", maximum(abs.(b-A*x)), b[1], x[1], maximum(abs.(b)), maximum(abs.(x)) - return x -end - -function ldiv!(ILU::PILUAMPrecon, b) - #@info "piluam ldiv 2" - nzval = ILU.nzval - diag = ILU.diag - A = ILU.A - start = ILU.start - nt = ILU.nt - depth = ILU.depth - y = copy(b) - #forward_subst!(y, b, ILU) - forward_subst_old!(y, b, nzval, diag, start, nt, depth, A) - backward_subst_old!(b, y, nzval, diag, start, nt, depth, A) - return b -end - -function \(ilu::PILUAMPrecon{T, N}, b) where {T, N <: Integer} - x = copy(b) - ldiv!(x, ilu, b) - return x -end - -function nnz(ilu::PILUAMPrecon{T, N}) where {T, N <: Integer} - return length(ilu.nzval) -end - -#end diff --git a/src/experimental/ExtendableSparseMatrixParallel/piluam.jl b/src/experimental/ExtendableSparseMatrixParallel/piluam.jl deleted file mode 100644 index 9b5ce76..0000000 --- a/src/experimental/ExtendableSparseMatrixParallel/piluam.jl +++ /dev/null @@ -1,35 +0,0 @@ -mutable struct PILUAMPreconditioner <: AbstractPreconditioner - A::ExtendableSparseMatrixParallel - factorization::PILUAMPrecon - phash::UInt64 - function PILUAMPreconditioner() - p = new() - p.phash = 0 - return p - end -end - -""" -``` -PILUAMPreconditioner() -PILUAMPreconditioner(matrix) -``` -Incomplete LU preconditioner with zero fill-in using ... . This preconditioner -also calculates and stores updates to the off-diagonal entries and thus delivers better convergence than the [`ILU0Preconditioner`](@ref). -""" -function PILUAMPreconditioner end - -function update!(p::PILUAMPreconditioner) - #@warn "Should flush now", nnz_noflush(p.A) - flush!(p.A) - if p.A.phash != p.phash - p.factorization = piluAM(p.A) - p.phash = p.A.phash - else - piluAM!(p.factorization, p.A) - end - return p -end - -allow_views(::PILUAMPreconditioner) = true -allow_views(::Type{PILUAMPreconditioner}) = true diff --git a/src/experimental/ExtendableSparseMatrixParallel/preparatory.jl b/src/experimental/ExtendableSparseMatrixParallel/preparatory.jl deleted file mode 100644 index 1e9cc60..0000000 --- a/src/experimental/ExtendableSparseMatrixParallel/preparatory.jl +++ /dev/null @@ -1,936 +0,0 @@ -""" -`function preparatory_multi_ps_less_reverse(mat_cell_node, nc, nn, nt, depth)` - -`nm` is the number of nodes in each dimension (Examples: 2d: nm = (100,100) -> 100 x 100 grid, 3d: nm = (50,50,50) -> 50 x 50 x 50 grid). -`nt` is the number of threads. -`depth` is the number of partition layers, for depth=1, there are nt parts and 1 separator, for depth=2, the separator is partitioned again, leading to 2*nt+1 submatrices... -To assemble the system matrix parallelly, things such as `cellsforpart` (= which thread takes which cells) need to be computed in advance. This is done here. - -This should be somewhere else, longterm -""" -function preparatory_multi_ps_less_reverse( - mat_cell_node, nc, nn, nt, depth, Ti; - sequential = false, assembly = :cellwise, - minsize_sepa = 10, do_print = false, check_partition = false, ne = 0, ce = [], mat_edge_node = [], block_struct = true - ) - #grid = getgrid(nm; x0, x1) - adepth = 0 - if sequential - (allcells, start, cellparts, adepth) = grid_to_graph_cellwise_nogrid!(mat_cell_node, nc, nn, nt, depth; minsize_sepa, do_print) #) - else - (allcells, start, cellparts, adepth) = grid_to_graph_cellwise_par_nogrid!(mat_cell_node, nc, nn, nt, depth; minsize_sepa, do_print) - end - - if (adepth != depth) && do_print - @info "The requested depth of partitioning is too high. The depth is set to $adepth." - end - depth = adepth - - if assembly == :cellwise - cfp = bettercellsforpart(cellparts, depth * nt + 1) - - else - edgeparts = edgewise_partition_from_cellwise_partition(nc, ne, ce, cellparts) - cfp = bettercellsforpart(edgeparts, depth * nt + 1) - end - - - if check_partition - if assembly == :cellwise - validate_partition(nn, mat_cell_node, cellparts, start, allcells, nt, depth, assembly) - else - validate_partition(nn, mat_edge_node, cellparts, start, allcells, nt, depth, assembly) - end - end - - #@info length.(cfp) - #@info minimum(cellparts), maximum(cellparts), nt, depth - - (nnts, s, onr, gi, ni, rni, starts) = get_nnnts_and_sortednodesperthread_and_noderegs_from_cellregs_ps_less_reverse_nopush( - cellparts, allcells, start, nn, Ti, nt, depth; block_struct - ) - - - return nnts, s, onr, cfp, gi, ni, rni, starts, cellparts, adepth -end - - -""" -`function get_nnnts_and_sortednodesperthread_and_noderegs_from_cellregs_ps_less_reverse_nopush(cellregs, allcells, start, nn, Ti, nt)` - -After the cellregions (partitioning of the grid) of the grid have been computed, other things have to be computed, such as `sortednodesperthread` a depth+1 x num_nodes matrix, here `sortednodesperthreads[i,j]` is the point at which the j-th node appears in the i-th level matrix in the corresponding submatrix. -`cellregs` contains the partition for each cell. -Furthermore, `nnts` (number of nodes of the threads) is computed, which contain for each thread the number of nodes that are contained in the cells of that thread. -`allcells` and `start` together behave like the rowval and colptr arrays of a CSC matrix, such that `allcells[start[j]:start[j+1]-1]` are all cells that contain the j-th node. -`nn` is the number of nodes in the grid. -`Ti` is the type (Int64,...) of the elements in the created arrays. -`nt` is the number of threads. -`block_struct=true` means, the matrix should be reordered two have a block structure, this is necessary for parallel ILU, for `false`, the matrix is not reordered -""" -function get_nnnts_and_sortednodesperthread_and_noderegs_from_cellregs_ps_less_reverse_nopush(cellregs, allcells, start, nn, Ti, nt, depth; block_struct = true) - - #num_matrices = maximum(cellregs) - #depth = Int(floor((num_matrices-1)/nt)) - - #loop over each node, get the cellregion of the cell (the one not in the separator) write the position of that node inside the cellregions sorted ranking into a long vector - #nnts = [zeros(Ti, nt+1) for i=1:depth+1] - nnts = zeros(Ti, nt) - #noderegs_max_tmp = 0 - old_noderegions = zeros(Ti, (depth + 1, nn)) - - # Count nodes per thread: - tmp = zeros(depth + 1) - for j in 1:nn - cells = @view allcells[start[j]:(start[j + 1] - 1)] - sortedcellregs = unique(sort(cellregs[cells])) - #tmp = [] - tmpctr = 1 - for cr in sortedcellregs - crmod = (cr - 1) % nt + 1 - level = Int(ceil(cr / nt)) - #nnts[crmod] += 1 - old_noderegions[level, j] = crmod - if !(crmod in tmp[1:(tmpctr - 1)]) - nnts[crmod] += 1 - #sortednodesperthread[crmod,j] = nnts[crmod] #nnts[i][cr] - #push!(tmp, crmod) - if tmpctr > depth + 1 - @info "Cellregs: ", sortedcellregs - @info "Levels : ", Int.(ceil.(sortedcellregs / nt)) - @info "PartsMod: ", ((sortedcellregs .- 1) .% nt) .+ 1 - end - tmp[tmpctr] = crmod - tmpctr += 1 - end - end - end - - # Reorder inidices to receive a block structure: - # Taking the original matrix [a_ij] and mapping each i and j to new_indices[i] and new_indices[j], gives a block structure - # the reverse is also defined rev_new_indices[new_indices[k]] = k - # From now on we will only use this new ordering - counter_for_reorder = zeros(Ti, depth * nt + 1) - for j in 1:nn - level, reg = last_nz(old_noderegions[:, j]) - counter_for_reorder[(level - 1) * nt + reg] += 1 #(reg-1)*depth + level] += 1 - end - - starts = vcat([0], cumsum(counter_for_reorder)) - counter_for_reorder2 = zeros(Ti, depth * nt + 1) - new_indices = Vector{Ti}(undef, nn) - rev_new_indices = Vector{Ti}(undef, nn) - origin = Vector{Ti}(undef, nn) - for j in 1:nn - level, reg = last_nz(old_noderegions[:, j]) - counter_for_reorder2[(level - 1) * nt + reg] += 1 - origin[j] = reg - new_indices[j] = starts[(level - 1) * nt + reg] + counter_for_reorder2[(level - 1) * nt + reg] - rev_new_indices[new_indices[j]] = j - end - starts .+= 1 - - if !block_struct - new_indices = collect(1:nn) - rev_new_indices = collect(1:nn) - starts = [] - end - - # Build sortednodesperthread and globalindices array: - # They are inverses of each other: globalindices[tid][sortednodeperthread[tid][j]] = j - # Note that j has to be a `new index` - - sortednodesperthread = zeros(Ti, (nt, nn)) #vvcons(Ti, nnts) - globalindices = vvcons(Ti, nnts) - gictrs = zeros(Ti, nt) - - for nj in 1:nn - oj = rev_new_indices[nj] - cells = @view allcells[start[oj]:(start[oj + 1] - 1)] - sortedcellregs = unique(sort(cellregs[cells])) - #tmp = [] - tmpctr = 1 - for cr in sortedcellregs - crmod = (cr - 1) % nt + 1 - #level = Int(ceil(cr/nt)) - if !(crmod in tmp[1:(tmpctr - 1)]) - gictrs[crmod] += 1 # , level] += 1 - sortednodesperthread[crmod, nj] = gictrs[crmod] - globalindices[crmod][gictrs[crmod]] = nj - #push!(tmp, crmod) - tmp[tmpctr] = crmod - tmpctr += 1 - end - end - end - - return nnts, sortednodesperthread, old_noderegions, globalindices, new_indices, rev_new_indices, starts -end - - -""" -`function separate!(cellregs, nc, ACSC, nt, level0, ctr_sepanodes)` - -This function partitions the separator, which is done if `depth`>1 (see `grid_to_graph_ps_multi!` and/or `preparatory_multi_ps`). -`cellregs` contains the regions/partitions/colors of each cell. -`nc` is the number of cells in the grid. -`ACSC` is the adjacency matrix of the graph of the (separator-) grid (vertex in graph is cell in grid, edge in graph means two cells share a node) stored as a CSC. -`nt` is the number of threads. -`level0` is the separator-partitoning level, if the (first) separator is partitioned, level0 = 1, in the next iteration, level0 = 2... -`preparatory_multi_ps` is the number of separator-cells. -""" -function separate!(cellregs, ACSC, nt, level0, ctr_sepanodes, ri, gi, do_print) - # current number of cells treated - nc2 = size(ACSC, 1) - - indptr = collect(1:(nc2 + 1)) - indices = zeros(Int64, nc2) - rowval = zeros(Int64, nc2) - - indptrT = collect(1:(ctr_sepanodes + 1)) - indicesT = zeros(Int64, ctr_sepanodes) - rowvalT = zeros(Int64, ctr_sepanodes) - - for i in 1:ctr_sepanodes - j = ri[i] - indices[j] = i - indicesT[i] = j - rowval[j] = 1 - rowvalT[i] = 1 - end - - - R = SparseMatrixCSC(ctr_sepanodes, nc2, indptr, indices, rowval) - RT = SparseMatrixCSC(nc2, ctr_sepanodes, indptrT, indicesT, rowvalT) - # current adjacency matrix, taken as a part of the given one ACSC - RART = dropzeros(R) * ACSC * dropzeros(RT) - - cellregs2 = Metis.partition(RART, nt) - - - for i in 1:ctr_sepanodes - if cellregs[gi[i]] < level0 * nt + 1 - @warn "cell treated in this iteration was not a separator-cell last iteration" - end - cellregs[gi[i]] = level0 * nt + cellregs2[i] - end - - # how many cells are in the separator of the new partition (which is only computed on the separator of the old partition) - new_ctr_sepanodes = 0 - ri2 = Vector{Int64}(undef, ctr_sepanodes) - gi2 = Vector{Int64}(undef, ctr_sepanodes) - - for tid in 1:nt - for i in 1:ctr_sepanodes - if cellregs2[i] == tid - neighbors = RART.rowval[RART.colptr[i]:(RART.colptr[i + 1] - 1)] - rows = gi[vcat(neighbors, [i])] - #counts how many different regions (besides) the separator are adjacent to the current cell - x = how_many_different_below(cellregs[rows], (level0 + 1) * nt + 1) - if x > 1 - cellregs[gi[i]] = (level0 + 1) * nt + 1 - new_ctr_sepanodes += 1 - gi2[new_ctr_sepanodes] = gi[i] - ri2[new_ctr_sepanodes] = i - end - end - end - end - - - ri2 = ri2[1:new_ctr_sepanodes] - gi2 = gi2[1:new_ctr_sepanodes] - - if do_print - @info "At level $(level0 + 1), we found $new_ctr_sepanodes cells that have to be treated in the next iteration!" - end - - return RART, new_ctr_sepanodes, ri2, gi2 -end - - -""" -`function grid_to_graph_ps_multi_nogrid!(nc, nn, mat_cell_node, nt, depth)` - -The function assigns colors/partitions to each cell in the `grid`. First, the grid is partitioned into `nt` partitions. If `depth` > 1, the separator is partitioned again... -The grid is specified by nc (number of cells), nn (number of nodes) and the mat_cell_node (i.e. grid[CellNodes] if ExtendableGrids is used). -Here, `mat_cell_node[k,i]` is the i-th node in the k-th cell. -`nt` is the number of threads. -`depth` is the number of partition layers, for depth=1, there are nt parts and 1 separator, for depth=2, the separator is partitioned again, leading to 2*nt+1 submatrices... -""" -function grid_to_graph_cellwise_nogrid!(nc, nn, mat_cell_node, nt, depth; minsize_sepa = 10, do_print = false) - A = SparseMatrixLNK{Int64, Int64}(nc, nc) - number_cells_per_node = zeros(Int64, nn) - for j in 1:nc - for node_id in mat_cell_node[:, j] - number_cells_per_node[node_id] += 1 - end - end - allcells = zeros(Int64, sum(number_cells_per_node)) - start = ones(Int64, nn + 1) - start[2:end] += cumsum(number_cells_per_node) - number_cells_per_node .= 0 - for j in 1:nc - for node_id in mat_cell_node[:, j] - allcells[start[node_id] + number_cells_per_node[node_id]] = j - number_cells_per_node[node_id] += 1 - end - end - - for j in 1:nn - cells = @view allcells[start[j]:(start[j + 1] - 1)] - for (i, id1) in enumerate(cells) - for id2 in cells[(i + 1):end] - A[id1, id2] = 1 - A[id2, id1] = 1 - end - end - end - - ACSC = SparseArrays.SparseMatrixCSC(A) - - partition = Metis.partition(ACSC, nt) - cellregs = copy(partition) - - sn = Vector{Int64}(undef, nc) - gi = Vector{Int64}(undef, nc) - ctr_sepanodes = 0 - - for tid in 1:nt - for j in 1:nc - if cellregs[j] == tid - rows = vcat(ACSC.rowval[ACSC.colptr[j]:(ACSC.colptr[j + 1] - 1)], [j]) - if how_many_different_below(cellregs[rows], nt + 1) > 1 - cellregs[j] = nt + 1 #+ctr_sepanodes - ctr_sepanodes += 1 - sn[ctr_sepanodes] = j - gi[ctr_sepanodes] = j - end - end - end - end - - sn = sn[1:ctr_sepanodes] - gi = gi[1:ctr_sepanodes] - - if do_print - @info "At level $(1), we found $ctr_sepanodes cells that have to be treated in the next iteration!" - end - - RART = copy(ACSC) - actual_depth = 1 - for level in 1:(depth - 1) - RART, ctr_sepanodes, sn, gi = separate!(cellregs, RART, nt, level, ctr_sepanodes, sn, gi, do_print) - actual_depth += 1 - if ctr_sepanodes < minsize_sepa - break - end - end - - return allcells, start, cellregs, actual_depth -end - - -""" -`function grid_to_graph_ps_multi_par_nogrid!(nc, nn, mat_cell_node, nt, depth)` - -Same result as `grid_to_graph_ps_multi_nogrid!`, but computed on multiple threads. -""" -function grid_to_graph_cellwise_par_nogrid!(cn, nc, nn, nt, depth; minsize_sepa = 10, do_print = false) - As = [ExtendableSparseMatrix{Int64, Int64}(nc, nc) for tid in 1:nt] - number_cells_per_node = zeros(Int64, nn) - - - for j in 1:nc - tmp = view(cn, :, j) - for node_id in tmp - number_cells_per_node[node_id] += 1 - end - end - - - allcells = zeros(Int64, sum(number_cells_per_node)) - start = ones(Int64, nn + 1) - start[2:end] += cumsum(number_cells_per_node) - number_cells_per_node .= 0 - - for j in 1:nc - tmp = view(cn, :, j) - for node_id in tmp - allcells[start[node_id] + number_cells_per_node[node_id]] = j - number_cells_per_node[node_id] += 1 - end - end - - node_range = get_starts(nn, nt) - Threads.@threads for tid in 1:nt - for j in node_range[tid]:(node_range[tid + 1] - 1) - cells = @view allcells[start[j]:(start[j + 1] - 1)] - l = length(cells) - for (i, id1) in enumerate(cells) - ce = view(cells, (i + 1):l) - for id2 in ce - As[tid][id1, id2] = 1 - As[tid][id2, id1] = 1 - end - end - end - ExtendableSparse.flush!(As[tid]) - end - - ACSC = add_all_par!(As).cscmatrix - - cellregs = Metis.partition(ACSC, nt) - - sn = [Vector{Int64}(undef, Int(ceil(nc / nt))) for tid in 1:nt] - ctr_sepanodess = zeros(Int64, nt) - - @threads for tid in 1:nt - for j in 1:nc - if cellregs[j] == tid - rows = vcat(ACSC.rowval[ACSC.colptr[j]:(ACSC.colptr[j + 1] - 1)], [j]) - if how_many_different_below(cellregs[rows], nt + 1) > 1 - cellregs[j] = nt + 1 #+ctr_sepanodes - ctr_sepanodess[tid] += 1 - sn[tid][ctr_sepanodess[tid]] = j - end - end - end - end - - for tid in 1:nt - sn[tid] = sn[tid][1:ctr_sepanodess[tid]] - end - ctr_sepanodes = sum(ctr_sepanodess) - sn = vcat(sn...) - gi = copy(sn) - - if do_print - @info "At level $(1), we found $ctr_sepanodes cells that have to be treated in the next iteration!" - end - - RART = ACSC - actual_depth = 1 - for level in 1:(depth - 1) - RART, ctr_sepanodes, sn, gi = separate!(cellregs, RART, nt, level, ctr_sepanodes, sn, gi, do_print) - actual_depth += 1 - if ctr_sepanodes < minsize_sepa - break - end - end - - #grid[CellRegions] = cellregs - #grid - return allcells, start, cellregs, actual_depth -end - -""" -function grid_to_graph_cellwise!(grid, nt, depth; minsize_sepa=10, do_print=false) - A = SparseMatrixLNK{Int64, Int64}(num_cells(grid), num_cells(grid)) - number_cells_per_node = zeros(Int64, num_nodes(grid)) - for j=1:num_cells(grid) - for node_id in grid[CellNodes][:,j] - number_cells_per_node[node_id] += 1 - end - end - allcells = zeros(Int64, sum(number_cells_per_node)) - start = ones(Int64, num_nodes(grid)+1) - start[2:end] += cumsum(number_cells_per_node) - number_cells_per_node .= 0 - for j=1:num_cells(grid) - for node_id in grid[CellNodes][:,j] - allcells[start[node_id] + number_cells_per_node[node_id]] = j - number_cells_per_node[node_id] += 1 - end - end - - for j=1:num_nodes(grid) - cells = @view allcells[start[j]:start[j+1]-1] - for (i,id1) in enumerate(cells) - for id2 in cells[i+1:end] - A[id1,id2] = 1 - A[id2,id1] = 1 - end - end - end - - ACSC = SparseArrays.SparseMatrixCSC(A) - - partition = Metis.partition(ACSC, nt) - cellregs = copy(partition) - - sn = Vector{Int64}(undef, num_cells(grid)) - gi = Vector{Int64}(undef, num_cells(grid)) - ctr_sepanodes = 0 - - for tid=1:nt - for j=1:num_cells(grid) - if cellregs[j] == tid - rows = vcat(ACSC.rowval[ACSC.colptr[j]:(ACSC.colptr[j+1]-1)], [j]) - if how_many_different_below(cellregs[rows], nt+1) > 1 - cellregs[j] = nt+1 #+ctr_sepanodes - ctr_sepanodes += 1 - sn[ctr_sepanodes] = j - gi[ctr_sepanodes] = j - end - end - end - end - - sn = sn[1:ctr_sepanodes] - gi = gi[1:ctr_sepanodes] - - if do_print - @info "At level (1), we found ctr_sepanodes cells that have to be treated in the next iteration!" - end - - RART = copy(ACSC) - actual_depth = 1 - for level=1:depth-1 - RART, ctr_sepanodes, sn, gi = separate!(cellregs, RART, nt, level, ctr_sepanodes, sn, gi, do_print) - actual_depth += 1 - if ctr_sepanodes < minsize_sepa - break - end - end - - return allcells, start, cellregs, actual_depth -end - - -function grid_to_graph_edgewise!(grid, nt, depth; minsize_sepa=10, do_print=false) - ce = grid[CellEdges] - A = SparseMatrixLNK{Int64, Int64}(num_edges(grid), num_edges(grid)) - number_edges_per_node = zeros(Int64, num_nodes(grid)) - - for i=1:num_edges(grid) - for node_id in grid[EdgeNodes][:,i] - number_edges_per_node[node_id] += 1 - end - end - - alledges = zeros(Int64, sum(number_edges_per_node)) - start = ones(Int64, num_nodes(grid)+1) - start[2:end] += cumsum(number_edges_per_node) - number_edges_per_node .= 0 - - for j=1:num_edges(grid) - for node_id in grid[EdgeNodes][:,j] - alledges[start[node_id] + number_edges_per_node[node_id]] = j - number_edges_per_node[node_id] += 1 - end - end - - for j=1:num_nodes(grid) - edges = @view alledges[start[j]:start[j+1]-1] - for (i,id1) in enumerate(edges) - for id2 in edges[i+1:end] - A[id1,id2] = 1 - A[id2,id1] = 1 - end - end - end - - ACSC = SparseArrays.SparseMatrixCSC(A) - - partition = Metis.partition(ACSC, nt) - - sn = Vector{Int64}(undef, num_edges(grid)) - gi = Vector{Int64}(undef, num_edges(grid)) - ctr_sepanodes = 0 - - edgeregs = copy(partition) - for tid=1:nt - for j=1:num_edges(grid) - if edgeregs[j] == tid - rows = vcat(ACSC.rowval[ACSC.colptr[j]:(ACSC.colptr[j+1]-1)], [j]) - if how_many_different_below(edgeregs[rows], nt+1) > 1 - edgeregs[j] = nt+1 #+ctr_sepanodes - ctr_sepanodes += 1 - sn[ctr_sepanodes] = j - gi[ctr_sepanodes] = j - end - end - end - end - - sn = sn[1:ctr_sepanodes] - gi = gi[1:ctr_sepanodes] - - if do_print - @info "At level (1), we found ctr_sepanodes cells that have to be treated in the next iteration!" - end - - RART = copy(ACSC) - actual_depth = 1 - for level=1:depth-1 - RART, ctr_sepanodes, sn, gi = separate!(edgeregs, RART, nt, level, ctr_sepanodes, sn, gi, do_print) - actual_depth += 1 - if ctr_sepanodes < minsize_sepa - break - end - end - - return alledges, start, edgeregs, actual_depth -end - -function grid_to_graph_edgewise_par!(grid, nt, depth; minsize_sepa=10, do_print=false) - ce = grid[CellEdges] - cn = grid[EdgeNodes] - - As = [ExtendableSparseMatrix{Int64, Int64}(num_edges(grid), num_edges(grid)) for tid=1:nt] - number_edges_per_node = zeros(Int64, num_nodes(grid)) - - - for j=1:num_edges(grid) - tmp = view(cn, :, j) - for node_id in tmp - number_edges_per_node[node_id] += 1 - end - end - - - alledges = zeros(Int64, sum(number_edges_per_node)) - start = ones(Int64, num_nodes(grid)+1) - start[2:end] += cumsum(number_edges_per_node) - number_edges_per_node .= 0 - - for j=1:num_edges(grid) - tmp = view(cn, :, j) - for node_id in tmp - alledges[start[node_id] + number_edges_per_node[node_id]] = j - number_edges_per_node[node_id] += 1 - end - end - - node_range = get_starts(num_nodes(grid), nt) - Threads.@threads for tid=1:nt - for j in node_range[tid]:node_range[tid+1]-1 - edges = @view alledges[start[j]:start[j+1]-1] - l = length(edges) - for (i,id1) in enumerate(edges) - ce = view(edges, i+1:l) - for id2 in ce - As[tid][id1,id2] = 1 - As[tid][id2,id1] = 1 - - end - end - end - ExtendableSparse.flush!(As[tid]) - end - - ACSC = add_all_par!(As).cscmatrix - - cellregs = Metis.partition(ACSC, nt) - - sn = [Vector{Int64}(undef, Int(ceil(num_cells(grid)/nt))) for tid=1:nt] - ctr_sepanodess = zeros(Int64, nt) - - @threads for tid=1:nt - for j=1:num_edges(grid) - if cellregs[j] == tid - rows = vcat(ACSC.rowval[ACSC.colptr[j]:(ACSC.colptr[j+1]-1)], [j]) - if how_many_different_below(cellregs[rows], nt+1) > 1 - cellregs[j] = nt+1 #+ctr_sepanodes - ctr_sepanodess[tid] += 1 - sn[tid][ctr_sepanodess[tid]] = j - end - end - end - end - - for tid=1:nt - sn[tid] = sn[tid][1:ctr_sepanodess[tid]] - end - ctr_sepanodes = sum(ctr_sepanodess) - sn = vcat(sn...) - gi = copy(sn) - - if do_print - @info "At level (1), we found ctr_sepanodes edges that have to be treated in the next iteration!" - end - - RART = ACSC - actual_depth = 1 - for level=1:depth-1 - RART, ctr_sepanodes, sn, gi = separate!(cellregs, RART, nt, level, ctr_sepanodes, sn, gi, do_print) - actual_depth += 1 - if ctr_sepanodes < minsize_sepa - break - end - end - - #grid[CellRegions] = cellregs - #grid - return alledges, start, cellregs, actual_depth -end -""" - -function edgewise_partition_from_cellwise_partition(nc, ne, ce, cellregs) - #ce = grid[CellEdges] - edgeregs = maximum(cellregs) * ones(Int64, ne) - - for icell in 1:nc - tmp = cellregs[icell] - for iedge in ce[:, icell] - if tmp < edgeregs[iedge] - edgeregs[iedge] = tmp - end - end - end - - return edgeregs -end - -""" -`function add_all_par!(As)` - -Add LNK matrices (stored in a vector) parallelly (tree structure). -The result is stored in the first LNK matrix. -""" -function add_all_par!(As) - nt = length(As) - depth = Int(floor(log2(nt))) - endpoint = nt - for level in 1:depth - - @threads for tid in 1:(2^(depth - level)) - #@info "$level, $tid" - start = tid + 2^(depth - level) - while start <= endpoint - As[tid] += As[start] - start += 2^(depth - level) - end - end - endpoint = 2^(depth - level) - end - return As[1] - -end - - -""" -`function vvcons(Ti, lengths)` - -`lengths` is a vector of integers. -The function creates a vector of zero vectors of type `Ti` of length `lengths[i]`. -""" -function vvcons(Ti, lengths) - x::Vector{Vector{Ti}} = [zeros(Ti, i) for i in lengths] - return x -end - - -""" -`function bettercellsforpart(xx, upper)` - -`xx` are the CellRegions (i.e. the color/partition of each cell). -`upper` is the number of partitions (upper=depth*nt+1). -The function returns a vector e.g. [v1, v2, v3, v4, v5]. -The element v1 would be the list of cells that are in partition 1 etc. -The function is basically a faster findall. -""" -function bettercellsforpart(xx, upper) - ctr = zeros(Int64, upper) - for x in xx - ctr[x] += 1 - end - cfp = vvcons(Int64, ctr) - ctr .= 1 - for (i, x) in enumerate(xx) - cfp[x][ctr[x]] = i - ctr[x] += 1 - end - return cfp -end - - -function get_starts(n, nt) - ret = ones(Int64, nt + 1) - ret[end] = n + 1 - for i in nt:-1:2 - ret[i] = ret[i + 1] - Int(round(ret[i + 1] / i)) #Int(round(n/nt))-1 - end - return ret -end - -function last_nz(x) - n = length(x) - for j in n:-1:1 - if x[j] != 0 - return (j, x[j]) - end - end - return -end - - -function how_many_different_below(x0, y; u = 0) - x = copy(x0) - z = unique(x) - t = findall(w -> w < y, z) - t = findall(w -> w > u, z[t]) - return length(t) -end - - -function lookat_grid_to_graph_ps_multi!(nm, nt, depth) - grid = getgrid(nm) - A = SparseMatrixLNK{Int64, Int64}(num_cells(grid), num_cells(grid)) - number_cells_per_node = zeros(Int64, num_nodes(grid)) - for j in 1:num_cells(grid) - for node_id in grid[CellNodes][:, j] - number_cells_per_node[node_id] += 1 - end - end - allcells = zeros(Int64, sum(number_cells_per_node)) - start = ones(Int64, num_nodes(grid) + 1) - start[2:end] += cumsum(number_cells_per_node) - number_cells_per_node .= 0 - for j in 1:num_cells(grid) - for node_id in grid[CellNodes][:, j] - allcells[start[node_id] + number_cells_per_node[node_id]] = j - number_cells_per_node[node_id] += 1 - end - end - - for j in 1:num_nodes(grid) - cells = @view allcells[start[j]:(start[j + 1] - 1)] - for (i, id1) in enumerate(cells) - for id2 in cells[(i + 1):end] - A[id1, id2] = 1 - A[id2, id1] = 1 - end - end - end - - ACSC = SparseArrays.SparseMatrixCSC(A) - - partition = Metis.partition(ACSC, nt) - cellregs = copy(partition) - - sn = [] - gi = [] - ctr_sepanodes = 0 - for j in 1:num_cells(grid) - rows = ACSC.rowval[ACSC.colptr[j]:(ACSC.colptr[j + 1] - 1)] - if minimum(partition[rows]) != maximum(partition[rows]) - cellregs[j] = nt + 1 - ctr_sepanodes += 1 - push!(sn, j) - push!(gi, j) - end - end - RART = ACSC - #sn = 1:num_cells(grid) - #gi = 1:num_cells(grid) - for level in 1:(depth - 1) - RART, ctr_sepanodes, sn, gi = separate_careful!(cellregs, num_cells(grid), RART, nt, level, ctr_sepanodes, sn, gi) - if ctr_sepanodes == 0 - return RART - end - end - - - #return allcells, start, cellregs - return RART -end - - -function adjacencies(grid) - A = SparseMatrixLNK{Int64, Int64}(num_cells(grid), num_cells(grid)) - number_cells_per_node = zeros(Int64, num_nodes(grid)) - for j in 1:num_cells(grid) - for node_id in grid[CellNodes][:, j] - number_cells_per_node[node_id] += 1 - end - end - allcells = zeros(Int64, sum(number_cells_per_node)) - start = ones(Int64, num_nodes(grid) + 1) - start[2:end] += cumsum(number_cells_per_node) - number_cells_per_node .= 0 - for j in 1:num_cells(grid) - for node_id in grid[CellNodes][:, j] - allcells[start[node_id] + number_cells_per_node[node_id]] = j - number_cells_per_node[node_id] += 1 - end - end - - for j in 1:num_nodes(grid) - cells = @view allcells[start[j]:(start[j + 1] - 1)] - for (i, id1) in enumerate(cells) - for id2 in cells[(i + 1):end] - A[id1, id2] = 1 - A[id2, id1] = 1 - end - end - end - - return allcells, start, SparseArrays.SparseMatrixCSC(A) -end - -function check_adjacencies(nm) - grid = getgrid(nm) - allcells, start, A = adjacencies(grid) - - i = 1 - cells1 = sort(vcat([i], A.rowval[A.colptr[i]:(A.colptr[i + 1] - 1)])) #adjacent cells - nodes2 = grid[CellNodes][:, i] - cells2 = sort(unique(vcat([allcells[start[j]:(start[j + 1] - 1)] for j in nodes2]...))) - - @info cells1 - @info cells2 - return @info maximum(abs.(cells1 - cells2)) - - -end - -#= -function check_partition(nm, nt, depth) - grid = getgrid(nm) - - (allcells, start, cellregs, adepth, ACSC) = grid_to_graph_ps_multi!(grid, nt, depth; minsize_sepa=10, do_print=true)#) - - if (adepth != depth) - @info "The requested depth of partitioning is too high. The depth is set to $adepth." - end - depth = adepth - - validate_partition(num_nodes(grid), num_cells(grid), grid, cellregs, start, allcells, nt, depth, ACSC) -end -=# - -function validate_partition(nn, mat, cellregs, start, allcells, nt, depth, assemblytype) - violation_ctr = 0 - - if assemblytype == :cellwise - key = CellNodes - else - key = EdgeNodes - end - - for j in 1:nn - cells = @view allcells[start[j]:(start[j + 1] - 1)] - sortedcellregs = unique(sort(cellregs[cells])) - levels = Int.(ceil.(sortedcellregs / nt)) - - for i in 1:(depth + 1) - ids_lev = findall(x -> x == i, levels) - if length(ids_lev) > 1 - violation_ctr += 1 - - if violation_ctr == 1 - @info "Node Id : $j (we only show one violation)" - @info "Cellregs: $sortedcellregs" - @info "Levels : $levels" - - loc = findall(x -> x == 4, Int.(ceil.(cellregs[allcells[start[j]:(start[j + 1] - 1)]] / nt))) - cells_at_level4 = allcells[loc .+ (start[j] - 1)] - @info cells_at_level4, cellregs[cells_at_level4] - @info mat[:, cells_at_level4[1]], mat[:, cells_at_level4[2]] - end - end - end - end - return @info "We found $violation_ctr violation(s)" -end diff --git a/src/experimental/ExtendableSparseMatrixParallel/struct_flush.jl b/src/experimental/ExtendableSparseMatrixParallel/struct_flush.jl deleted file mode 100644 index 73a7955..0000000 --- a/src/experimental/ExtendableSparseMatrixParallel/struct_flush.jl +++ /dev/null @@ -1,265 +0,0 @@ -function flush!(A::ExtendableSparseMatrixParallel; do_dense = false, keep_zeros = true) - _, nnzLNK = nnz_noflush(A) - - if nnzLNK == 0 - return - end - - if !do_dense - A.cscmatrix = A.cscmatrix + sparse_flush!(A; keep_zeros) - - else - if keep_zeros - A.cscmatrix = dense_flush_keepzeros!(A.lnkmatrices, A.old_noderegions, A.sortednodesperthread, A.nt, A.rev_new_indices) - else - A.cscmatrix = dense_flush_removezeros!(A.lnkmatrices, A.old_noderegions, A.sortednodesperthread, A.nt, A.rev_new_indices) - end - end - A.phash = phash(A.cscmatrix) - return A.lnkmatrices = [SuperSparseMatrixLNK{matrixvaluetype(A), matrixindextype(A)}(A.n, A.nnts[tid]) for tid in 1:A.nt] - -end - -""" -`CSC_RLNK_plusequals_less3_reordered_super!` from `plusequals.jl` -""" -function sparse_flush!(A::ExtendableSparseMatrixParallel; keep_zeros = true) - - #dropzeros!( - return plus_remap(A.lnkmatrices, A.cscmatrix, A.globalindices; keep_zeros) - #) - -end - - -""" -`CSC_RLNK_si_oc_ps_dz_less_reordered` from `conversion.jl` -""" -function dense_flush_keepzeros!( - As::Vector{SuperSparseMatrixLNK{Tv, Ti}}, - onr, s, nt, rni - ) where {Tv, Ti <: Integer} - - nnz = sum([As[i].nnz for i in 1:nt]) #you could also subtract the diagonal entries from shared columns, since those are definitely double - indptr = zeros(Ti, As[1].m + 1) - indices = zeros(Ti, nnz) #sum(As.nnz)) - data = zeros(Float64, nnz) #sum(As.nnz)) - ctr = 1 - eqctr = 0 - tmp = zeros(Ti, size(onr)[1]) - - #@warn [As[i].nnz for i=1:nt], [As[i].n for i=1:nt], [As[i].m for i=1:nt] - #@info maximum.([As[i].colptr for i=1:nt]) - - for nj in 1:As[1].m - indptr[nj] = ctr - oj = rni[nj] - regionctr = 1 - jc = 0 - nrr = view(onr, :, oj) - tmp .= 0 - for region in nrr #nrr #[:,j] - regmod = region #(region-1)%nt+1 - if (region > 0) & !(region in tmp) - k = s[regmod, nj] - if regionctr == 1 - while k > 0 - if As[regmod].rowval[k] != 0 - if ctr > nnz - @info "ctr > nnz, $nj, $oj" - end - indices[ctr] = As[regmod].rowval[k] - data[ctr] = As[regmod].nzval[k] - - for jcc in 1:jc - if indices[ctr - jcc] > indices[ctr - jcc + 1] - tmp_i = indices[ctr - jcc + 1] - tmp_d = data[ctr - jcc + 1] - indices[ctr - jcc + 1] = indices[ctr - jcc] - data[ctr - jcc + 1] = data[ctr - jcc] - - indices[ctr - jcc] = tmp_i - data[ctr - jcc] = tmp_d - else - break - end - end - - ctr += 1 - jc += 1 - end - k = As[regmod].colptr[k] - end - else - while k > 0 - if As[regmod].rowval[k] != 0 - indices[ctr] = As[regmod].rowval[k] - data[ctr] = As[regmod].nzval[k] - - for jcc in 1:jc - if indices[ctr - jcc] > indices[ctr - jcc + 1] - tmp_i = indices[ctr - jcc + 1] - tmp_d = data[ctr - jcc + 1] - indices[ctr - jcc + 1] = indices[ctr - jcc] - data[ctr - jcc + 1] = data[ctr - jcc] - - indices[ctr - jcc] = tmp_i - data[ctr - jcc] = tmp_d - elseif indices[ctr - jcc] == indices[ctr - jcc + 1] - data[ctr - jcc] += data[ctr - jcc + 1] - eqctr += 1 - - for jccc in 1:jcc - indices[ctr - jcc + jccc] = indices[ctr - jcc + jccc + 1] - data[ctr - jcc + jccc] = data[ctr - jcc + jccc + 1] - end - - ctr -= 1 - jc -= 1 - - break - else - break - end - end - - ctr += 1 - jc += 1 - end - k = As[regmod].colptr[k] - end - - end - tmp[regionctr] = region - regionctr += 1 - - end - - end - - end - - #@warn ctr/nnz - - indptr[end] = ctr - resize!(indices, ctr - 1) - resize!(data, ctr - 1) - - - return SparseArrays.SparseMatrixCSC( - As[1].m, As[1].m, indptr, indices, data - ) - -end - - -function dense_flush_removezeros!( - As::Vector{SuperSparseMatrixLNK{Tv, Ti}}, - onr, s, nt, rni - ) where {Tv, Ti <: Integer} - - nnz = sum([As[i].nnz for i in 1:nt]) #you could also subtract the diagonal entries from shared columns, since those are definitely double - indptr = zeros(Ti, As[1].m + 1) - indices = zeros(Ti, nnz) #sum(As.nnz)) - data = zeros(Float64, nnz) #sum(As.nnz)) - ctr = 1 - eqctr = 0 - tmp = zeros(Ti, size(onr)[1]) - - for nj in 1:As[1].m - indptr[nj] = ctr - oj = rni[nj] - regionctr = 1 - jc = 0 - nrr = view(onr, :, oj) - tmp .= 0 - for region in nrr #nrr #[:,j] - regmod = region #(region-1)%nt+1 - if (region > 0) & !(region in tmp) - k = s[regmod, nj] - if regionctr == 1 - while k > 0 - if As[regmod].nzval[k] != 0.0 - indices[ctr] = As[regmod].rowval[k] - data[ctr] = As[regmod].nzval[k] - - for jcc in 1:jc - if indices[ctr - jcc] > indices[ctr - jcc + 1] - tmp_i = indices[ctr - jcc + 1] - tmp_d = data[ctr - jcc + 1] - indices[ctr - jcc + 1] = indices[ctr - jcc] - data[ctr - jcc + 1] = data[ctr - jcc] - - indices[ctr - jcc] = tmp_i - data[ctr - jcc] = tmp_d - else - break - end - end - - ctr += 1 - jc += 1 - end - k = As[regmod].colptr[k] - end - else - while k > 0 - if As[regmod].nzval[k] != 0.0 - indices[ctr] = As[regmod].rowval[k] - data[ctr] = As[regmod].nzval[k] - - for jcc in 1:jc - if indices[ctr - jcc] > indices[ctr - jcc + 1] - tmp_i = indices[ctr - jcc + 1] - tmp_d = data[ctr - jcc + 1] - indices[ctr - jcc + 1] = indices[ctr - jcc] - data[ctr - jcc + 1] = data[ctr - jcc] - - indices[ctr - jcc] = tmp_i - data[ctr - jcc] = tmp_d - elseif indices[ctr - jcc] == indices[ctr - jcc + 1] - data[ctr - jcc] += data[ctr - jcc + 1] - eqctr += 1 - - for jccc in 1:jcc - indices[ctr - jcc + jccc] = indices[ctr - jcc + jccc + 1] - data[ctr - jcc + jccc] = data[ctr - jcc + jccc + 1] - end - - ctr -= 1 - jc -= 1 - - break - else - break - end - end - - ctr += 1 - jc += 1 - end - k = As[regmod].colptr[k] - end - - end - tmp[regionctr] = region - regionctr += 1 - - end - - end - - end - - #@warn ctr/nnz - - indptr[end] = ctr - resize!(indices, ctr - 1) - resize!(data, ctr - 1) - - - return SparseArrays.SparseMatrixCSC( - As[1].m, As[1].m, indptr, indices, data - ) - -end diff --git a/src/experimental/ExtendableSparseMatrixParallel/supersparse.jl b/src/experimental/ExtendableSparseMatrixParallel/supersparse.jl deleted file mode 100644 index 5dc88f0..0000000 --- a/src/experimental/ExtendableSparseMatrixParallel/supersparse.jl +++ /dev/null @@ -1,785 +0,0 @@ -mutable struct SuperSparseMatrixLNK{Tv, Ti <: Integer} <: AbstractSparseMatrix{Tv, Ti} - """ - Number of rows - """ - m::Ti - - """ - Number of columns - """ - n::Ti - - """ - Number of nonzeros - """ - nnz::Ti - - """ - Length of arrays - """ - nentries::Ti - - """ - Linked list of column entries. Initial length is n, - it grows with each new entry. - - colptr[index] contains the next - index in the list or zero, in the later case terminating the list which - starts at index 1<=j<=n for each column j. - """ - colptr::Vector{Ti} - - """ - Row numbers. For each index it contains the zero (initial state) - or the row numbers corresponding to the column entry list in colptr. - - Initial length is n, - it grows with each new entry. - """ - rowval::Vector{Ti} - - """ - Nonzero entry values corresponding to each pair - (colptr[index],rowval[index]) - - Initial length is n, it grows with each new entry. - """ - nzval::Vector{Tv} - - """ - (Unsorted) list of all columns with non-zero entries - """ - collnk::Vector{Ti} - - # counts the number of columns in use - colctr::Ti -end - - -function SparseArrays.SparseMatrixCSC(A::SuperSparseMatrixLNK{Tv, Ti})::SparseArrays.SparseMatrixCSC where {Tv, Ti <: Integer} - return SparseArrays.SparseMatrixCSC(SparseMatrixLNK{Tv, Ti}(A.m, A.n, A.nnz, A.nentries, A.colptr, A.rowval, A.nzval)) - -end - -function SuperSparseMatrixLNK{Tv, Ti}(m, n) where {Tv, Ti <: Integer} - return SuperSparseMatrixLNK{Tv, Ti}(m, n, 0, n, zeros(Ti, n), zeros(Ti, n), zeros(Tv, n), zeros(Ti, n), 0) -end - - -function ExtendableSparse.findindex(lnk::SuperSparseMatrixLNK, i, j) - if !((1 <= i <= lnk.m) & (1 <= j <= lnk.n)) - throw(BoundsError(lnk, (i, j))) - end - - k = j - k0 = j - while k > 0 - if lnk.rowval[k] == i - return k, 0 - end - k0 = k - k = lnk.colptr[k] - end - return 0, k0 -end - -""" -Return tuple containing size of the matrix. -""" -Base.size(lnk::SuperSparseMatrixLNK) = (lnk.m, lnk.n) - -""" -Return value stored for entry or zero if not found -""" -function Base.getindex(lnk::SuperSparseMatrixLNK{Tv, Ti}, i, j) where {Tv, Ti} - k, k0 = findindex(lnk, i, j) - if k == 0 - return zero(Tv) - else - return lnk.nzval[k] - end -end - -function addentry!(lnk::SuperSparseMatrixLNK, i, j, k, k0) - # increase number of entries - lnk.nentries += 1 - if length(lnk.nzval) < lnk.nentries - newsize = Int(ceil(5.0 * lnk.nentries / 4.0)) - resize!(lnk.nzval, newsize) - resize!(lnk.rowval, newsize) - resize!(lnk.colptr, newsize) - end - - # Append entry if not found - lnk.rowval[lnk.nentries] = i - - # Shift the end of the list - lnk.colptr[lnk.nentries] = 0 - lnk.colptr[k0] = lnk.nentries - - # Update number of nonzero entries - lnk.nnz += 1 - return lnk.nentries -end - -""" -Update value of existing entry, otherwise extend matrix if v is nonzero. -""" -function Base.setindex!(lnk::SuperSparseMatrixLNK, v, i, j) - if !((1 <= i <= lnk.m) & (1 <= j <= lnk.n)) - throw(BoundsError(lnk, (i, j))) - end - - # Set the first column entry if it was not yet set. - if lnk.rowval[j] == 0 && !iszero(v) - lnk.colctr += 1 - lnk.collnk[lnk.colctr] = j - lnk.rowval[j] = i - lnk.nzval[j] = v - lnk.nnz += 1 - return lnk - end - - k, k0 = findindex(lnk, i, j) - if k > 0 - lnk.nzval[k] = v - return lnk - end - if !iszero(v) - k = addentry!(lnk, i, j, k, k0) - lnk.nzval[k] = v - end - return lnk -end - -""" -Update element of the matrix with operation `op`. -It assumes that `op(0,0)==0`. If `v` is zero, no new -entry is created. -""" -function updateindex!(lnk::SuperSparseMatrixLNK{Tv, Ti}, op, v, i, j) where {Tv, Ti} - # Set the first column entry if it was not yet set. - if lnk.rowval[j] == 0 && !iszero(v) - lnk.colctr += 1 - lnk.collnk[lnk.colctr] = j - lnk.rowval[j] = i - lnk.nzval[j] = op(lnk.nzval[j], v) - lnk.nnz += 1 - return lnk - end - k, k0 = findindex(lnk, i, j) - if k > 0 - lnk.nzval[k] = op(lnk.nzval[k], v) - return lnk - end - if !iszero(v) - k = addentry!(lnk, i, j, k, k0) - lnk.nzval[k] = op(zero(Tv), v) - end - return lnk -end - -function rawupdateindex!(lnk::SuperSparseMatrixLNK{Tv, Ti}, op, v, i, j) where {Tv, Ti} - # Set the first column entry if it was not yet set. - if lnk.rowval[j] == 0 - lnk.colctr += 1 - lnk.collnk[lnk.colctr] = j - lnk.rowval[j] = i - lnk.nzval[j] = op(lnk.nzval[j], v) - lnk.nnz += 1 - return lnk - end - k, k0 = findindex(lnk, i, j) - if k > 0 - lnk.nzval[k] = op(lnk.nzval[k], v) - return lnk - end - if !iszero(v) - k = addentry!(lnk, i, j, k, k0) - lnk.nzval[k] = op(zero(Tv), v) - end - return lnk -end - -#= -mutable struct ColEntry{Tv, Ti <: Integer} - rowval::Ti - nzval::Tv -end - -# Comparison method for sorting -Base.isless(x::ColEntry, y::ColEntry) = (x.rowval < y.rowval) -=# - -function get_column!(col::Vector{ColEntry{Tv, Ti}}, lnk::SuperSparseMatrixLNK{Tv, Ti}, j::Ti)::Ti where {Tv, Ti <: Integer} - k = j - ctr = zero(Ti) - while k > 0 - if abs(lnk.nzval[k]) > 0 - ctr += 1 - col[ctr] = ColEntry(lnk.rowval[k], lnk.nzval[k]) - end - k = lnk.colptr[k] - end - sort!(col, 1, ctr, Base.QuickSort, Base.Forward) - return ctr -end - - -function remove_doubles!(col, coll) - #input_ctr = 1 - last = 1 - for j in 2:coll - if col[j].rowval == col[last].rowval - col[last].nzval += col[j].nzval - else - last += 1 - if last != j - col[last] = col[j] - end - end - end - return last -end - -function get_column_removezeros!(col::Vector{ColEntry{Tv, Ti}}, lnks::Vector{SuperSparseMatrixLNK{Tv, Ti}}, js, tids, length)::Ti where {Tv, Ti <: Integer} - ctr = zero(Ti) - for i in 1:length - tid = tids[i] - k = js[i] - #for (tid,j) in zip(tids, js) #j0:j1 - #tid = tids[j] - #k = j - while k > 0 - if abs(lnks[tid].nzval[k]) > 0 - ctr += 1 - col[ctr] = ColEntry(lnks[tid].rowval[k], lnks[tid].nzval[k]) - end - k = lnks[tid].colptr[k] - end - end - - sort!(col, 1, ctr, Base.QuickSort, Base.Forward) - ctr = remove_doubles!(col, ctr) - #print_col(col, ctr) - return ctr - -end - -function get_column_keepzeros!(col::Vector{ColEntry{Tv, Ti}}, lnks::Vector{SuperSparseMatrixLNK{Tv, Ti}}, js, tids, length)::Ti where {Tv, Ti <: Integer} - ctr = zero(Ti) - for i in 1:length - tid = tids[i] - k = js[i] - #for (tid,j) in zip(tids, js) #j0:j1 - #tid = tids[j] - #k = j - while k > 0 - #if abs(lnks[tid].nzval[k]) > 0 - ctr += 1 - col[ctr] = ColEntry(lnks[tid].rowval[k], lnks[tid].nzval[k]) - #end - k = lnks[tid].colptr[k] - end - end - - sort!(col, 1, ctr, Base.QuickSort, Base.Forward) - ctr = remove_doubles!(col, ctr) - #print_col(col, ctr) - return ctr - -end - -function merge_into!(rowval::Vector{Ti}, nzval::Vector{Tv}, C::SparseArrays.SparseMatrixCSC{Tv, Ti}, col::Vector{ColEntry{Tv, Ti}}, J::Ti, coll::Ti, ptr1::Ti) where {Tv, Ti <: Integer} - j_min = 1 - numshifts = 0 - j_last = 0 - last_row = 0 - - #@warn "MERGING $J" - - #rowval0 = copy(C.rowval[C.colptr[J]:C.colptr[J+1]-1]) - #endptr = C.colptr[J+1] - - for (di, i) in enumerate(C.colptr[J]:(C.colptr[J + 1] - 1)) - for j in j_min:coll - #if col[j].rowval == last_row - # #@info "!! col j rowval == last row" - #end - if col[j].rowval < C.rowval[i] #ptr1+di+numshifts] #i+numshifts] - if col[j].rowval == last_row - #@info "$(ptr1+di+numshifts) : backwards EQUALITY: " - nzval[ptr1 + di + numshifts] += col[j].nzval - else - #@info "$(ptr1+di+numshifts) : Insert from col: j=$j" - #shift_e!(C.rowval, C.nzval, 1, i+numshifts, C.colptr[end]-1) - rowval[ptr1 + di + numshifts] = col[j].rowval - nzval[ptr1 + di + numshifts] = col[j].nzval - numshifts += 1 - #endptr += 1 - end - j_last = j - elseif col[j].rowval > C.rowval[i] #if col[j].rowval - #@info "$(ptr1+di+numshifts) : Insert from C: i=$i" - rowval[ptr1 + di + numshifts] = C.rowval[i] - nzval[ptr1 + di + numshifts] = C.nzval[i] - j_min = j - break - else - #@info "$(ptr1+di+numshifts) : normal EQUALITY: i=$i, j=$j" - rowval[ptr1 + di + numshifts] = C.rowval[i] - nzval[ptr1 + di + numshifts] = C.nzval[i] + col[j].nzval - #numshifts += 1 - j_min = j + 1 - j_last = j - - if j == coll - #@info "$(ptr1+di+numshifts+1) → $(ptr1+numshifts+(C.colptr[J+1]-C.colptr[J]))" - rowval[(ptr1 + di + numshifts + 1):(ptr1 + numshifts + (C.colptr[J + 1] - C.colptr[J]))] = view(C.rowval, (i + 1):(C.colptr[J + 1] - 1)) #C.rowval[i:C.colptr[J+1]-1] - nzval[(ptr1 + di + numshifts + 1):(ptr1 + numshifts + (C.colptr[J + 1] - C.colptr[J]))] = view(C.nzval, (i + 1):(C.colptr[J + 1] - 1)) #C.nzval[i:C.colptr[J+1]-1] - - #@info "FINISH" - return numshifts - end - - break - end - - if j == coll - #@info "$(ptr1+di+numshifts) → $(ptr1+numshifts+(C.colptr[J+1]-C.colptr[J]))" - rowval[(ptr1 + di + numshifts):(ptr1 + numshifts + (C.colptr[J + 1] - C.colptr[J]))] = view(C.rowval, i:(C.colptr[J + 1] - 1)) #C.rowval[i:C.colptr[J+1]-1] - nzval[(ptr1 + di + numshifts):(ptr1 + numshifts + (C.colptr[J + 1] - C.colptr[J]))] = view(C.nzval, i:(C.colptr[J + 1] - 1)) #C.nzval[i:C.colptr[J+1]-1] - - #@info "FINISH" - return numshifts - end - - last_row = col[j].rowval - end - end - endptr = ptr1 + numshifts + (C.colptr[J + 1] - C.colptr[J]) - last_row = 0 - numshifts_old = numshifts - numshifts = 0 - #start_ptr = endptr - 1 #C.colptr[J+1]-1 - if j_last > 0 - last_row = col[j_last].rowval - end - - if j_last != coll - for j in (j_last + 1):coll - if col[j].rowval != last_row - numshifts += 1 - #shift_e!(C.rowval, C.nzval, 1, start_ptr+numshifts, C.colptr[end]-1) - #for k=start_ptr+numshifts: - #@info "$(endptr+numshifts) : after..." - rowval[endptr + numshifts] = col[j].rowval - nzval[endptr + numshifts] = col[j].nzval - last_row = rowval[endptr + numshifts] - #colptr[J+1:end] .+= 1 - else - nzval[endptr + numshifts] += col[j].nzval - end - end - end - - return numshifts + numshifts_old - -end - - -function print_col(col, coll) - v = zeros((2, coll)) - for j in 1:coll - v[1, j] = col[j].rowval - v[2, j] = col[j].nzval - end - return @info v -end - - -""" -$(SIGNATURES) - -Add the matrices `lnks` of type SuperSparseMatrixLNK onto the SparseMatrixCSC `csc`. -`gi[i]` maps the indices in `lnks[i]` to the indices of `csc`. -""" -function plus_remap(lnks::Vector{SuperSparseMatrixLNK{Tv, Ti}}, csc::SparseArrays.SparseMatrixCSC, gi::Vector{Vector{Ti}}; keep_zeros = true) where {Tv, Ti <: Integer} - nt = length(lnks) - - if keep_zeros - get_col! = get_column_keepzeros! - else - get_col! = get_column_removezeros! - end - lnkscols = vcat([lnks[i].collnk[1:lnks[i].colctr] for i in 1:nt]...) - supersparsecolumns = vcat([gi[i][lnks[i].collnk[1:lnks[i].colctr]] for i in 1:nt]...) - num_cols = sum([lnks[i].colctr for i in 1:nt]) - tids = Vector{Ti}(undef, num_cols) - ctr = 0 - for i in 1:nt - for j in 1:lnks[i].colctr - ctr += 1 - tids[ctr] = i - end - end - - - sortedcolumnids = sortperm(supersparsecolumns) - sortedcolumns = supersparsecolumns[sortedcolumnids] - sortedcolumns = vcat(sortedcolumns, [Ti(csc.n + 1)]) - - coll = sum([lnks[i].nnz for i in 1:nt]) - nnz_sum = length(csc.rowval) + coll - colptr = Vector{Ti}(undef, csc.n + 1) - rowval = Vector{Ti}(undef, nnz_sum) - nzval = Vector{Tv}(undef, nnz_sum) - colptr[1] = one(Ti) - - if csc.m < coll - coll = csc.m - end - - col = [ColEntry{Tv, Ti}(0, zero(Tv)) for i in 1:coll] - numshifts = 0 - - colptr[1:sortedcolumns[1]] = view(csc.colptr, 1:sortedcolumns[1]) - rowval[1:(csc.colptr[sortedcolumns[1]] - 1)] = view(csc.rowval, 1:(csc.colptr[sortedcolumns[1]] - 1)) - nzval[1:(csc.colptr[sortedcolumns[1]] - 1)] = view(csc.nzval, 1:(csc.colptr[sortedcolumns[1]] - 1)) - - J = 1 - i0 = 0 - #lj_last = [] - #tid_last = [] - lj_last = Vector{Ti}(undef, nt) - tid_last = Vector{Ti}(undef, nt) - ctr_last = 1 - gj_last = 0 - for J in 1:(length(sortedcolumns) - 1) - gj_now = sortedcolumns[J] - gj_next = sortedcolumns[J + 1] - - lj_last[ctr_last] = lnkscols[sortedcolumnids[J]] - tid_last[ctr_last] = tids[sortedcolumnids[J]] - - if gj_now != gj_next - #@info typeof(lnks) - # do stuff from gj_last to gj_now / from last_lj to J - #@info lj_last, tid_last - coll = get_col!(col, lnks, lj_last, tid_last, ctr_last) - - nns = merge_into!(rowval, nzval, csc, col, gj_now, coll, colptr[gj_now] - one(Ti)) - numshifts += nns - - - colptr[(gj_now + 1):sortedcolumns[J + 1]] = csc.colptr[(gj_now + 1):sortedcolumns[J + 1]] .+ (-csc.colptr[gj_now] + colptr[gj_now] + nns) - - rowval[colptr[gj_now + 1]:(colptr[sortedcolumns[J + 1]] - 1)] = view(csc.rowval, csc.colptr[gj_now + 1]:(csc.colptr[sortedcolumns[J + 1]] - 1)) - nzval[colptr[gj_now + 1]:(colptr[sortedcolumns[J + 1]] - 1)] = view(csc.nzval, csc.colptr[gj_now + 1]:(csc.colptr[sortedcolumns[J + 1]] - 1)) - - #rowval[colptr[gj_now+1]:colptr[sortedcolumns[J+1]]-1] = csc.rowval[csc.colptr[gj_now+1]:csc.colptr[sortedcolumns[J+1]]-1] - #nzval[colptr[gj_now+1]:colptr[sortedcolumns[J+1]]-1] = csc.nzval[csc.colptr[gj_now+1]:csc.colptr[sortedcolumns[J+1]]-1] - - - #for k=csc.colptr[gj_now+1]:csc.colptr[sortedcolumns[J+1]]-1 - # k2 = k+(-csc.colptr[gj_now]+colptr[gj_now] + nns) - # rowval[k2] = csc.rowval[k] - # nzval[k2] = csc.nzval[k] - #end - - gj_last = gj_now - ctr_last = 0 #tids[sortedcolumnids[J]]] - end - - ctr_last += 1 - - - end - - - resize!(rowval, length(csc.rowval) + numshifts) - resize!(nzval, length(csc.rowval) + numshifts) - - - return SparseArrays.SparseMatrixCSC(csc.m, csc.n, colptr, rowval, nzval) - - - #for ... - # take many columns together if necessary in `get_column` - #end - - -end - - -""" -$(SIGNATURES) - -Add the SuperSparseMatrixLNK `lnk` onto the SparseMatrixCSC `csc`. -`gi` maps the indices in `lnk` to the indices of `csc`. -""" -function plus_remap(lnk::SuperSparseMatrixLNK{Tv, Ti}, csc::SparseArrays.SparseMatrixCSC, gi::Vector{Ti}) where {Tv, Ti <: Integer} - - #@info lnk.collnk[1:lnk.colctr] - - - supersparsecolumns = gi[lnk.collnk[1:lnk.colctr]] - sortedcolumnids = sortperm(supersparsecolumns) - sortedcolumns = supersparsecolumns[sortedcolumnids] - #sortedcolumns = vcat([1], sortedcolumns) - #@info typeof(supersparsecolumns), typeof(sortedcolumns) - - sortedcolumns = vcat(sortedcolumns, [Ti(csc.n + 1)]) - - #@info typeof(supersparsecolumns), typeof(sortedcolumns) - - - #@info supersparsecolumns - #@info sortedcolumns - #@info lnk.collnk[1:length(sortedcolumns)-1] - #@info lnk.collnk[sortedcolumnids[1:length(sortedcolumns)-1]] - - col = [ColEntry{Tv, Ti}(0, zero(Tv)) for i in 1:csc.m] - - #@info sortedcolumnids - - nnz_sum = length(csc.rowval) + lnk.nnz - colptr = Vector{Ti}(undef, csc.n + 1) - rowval = Vector{Ti}(undef, nnz_sum) - nzval = Vector{Tv}(undef, nnz_sum) - colptr[1] = one(Ti) - - #first part: columns between 1 and first column of lnk - - colptr[1:sortedcolumns[1]] = view(csc.colptr, 1:sortedcolumns[1]) - rowval[1:(csc.colptr[sortedcolumns[1]] - 1)] = view(csc.rowval, 1:(csc.colptr[sortedcolumns[1]] - 1)) - nzval[1:(csc.colptr[sortedcolumns[1]] - 1)] = view(csc.nzval, 1:(csc.colptr[sortedcolumns[1]] - 1)) - - numshifts = 0 - - for J in 1:(length(sortedcolumns) - 1) - i = sortedcolumns[J] - - coll = get_column!(col, lnk, lnk.collnk[sortedcolumnids[J]]) - #@info typeof(i), typeof(coll), typeof(colptr), typeof(colptr[i]), typeof(colptr[i]-1) - nns = merge_into!(rowval, nzval, csc, col, i, coll, colptr[i] - one(Ti)) - numshifts += nns - - - colptr[(i + 1):sortedcolumns[J + 1]] = csc.colptr[(i + 1):sortedcolumns[J + 1]] .+ (-csc.colptr[i] + colptr[i] + nns) - rowval[colptr[i + 1]:(colptr[sortedcolumns[J + 1]] - 1)] = view(csc.rowval, csc.colptr[i + 1]:(csc.colptr[sortedcolumns[J + 1]] - 1)) - nzval[colptr[i + 1]:(colptr[sortedcolumns[J + 1]] - 1)] = view(csc.nzval, csc.colptr[i + 1]:(csc.colptr[sortedcolumns[J + 1]] - 1)) - - #= - for k=csc.colptr[i+1]:csc.colptr[sortedcolumns[J+1]]-1 - k2 = k+(-csc.colptr[i]+colptr[i] + nns) - rowval[k2] = csc.rowval[k] - nzval[k2] = csc.nzval[k] - end - =# - end - - - resize!(rowval, length(csc.rowval) + numshifts) - resize!(nzval, length(csc.rowval) + numshifts) - - - return SparseArrays.SparseMatrixCSC(csc.m, csc.n, colptr, rowval, nzval) - -end - - -""" - -function plus(lnk::SparseMatrixLNK{Tv, Ti}, csc::SparseArrays.SparseMatrixCSC) where {Tv, Ti <: Integer} - if lnk.nnz == 0 - return csc - elseif length(csc.rowval) == 0 - return SparseMatrixCSC(lnk) - else - return lnk + csc - end -end - -function plus(lnk::SuperSparseMatrixLNK{Tv, Ti}, csc::SparseArrays.SparseMatrixCSC) where {Tv, Ti <: Integer} - gi = collect(1:csc.n) - - - supersparsecolumns = gi[lnk.collnk[1:lnk.colctr]] - sortedcolumnids = sortperm(supersparsecolumns) - sortedcolumns = supersparsecolumns[sortedcolumnids] - #sortedcolumns = vcat([1], sortedcolumns) - sortedcolumns = vcat(sortedcolumns, [csc.n+1]) - - col = [ColEntry{Tv, Ti}(0, zero(Tv)) for i=1:csc.m] - - #@info sortedcolumnids - - nnz_sum = length(csc.rowval) + lnk.nnz - colptr = Vector{Ti}(undef, csc.n+1) - rowval = Vector{Ti}(undef, nnz_sum) - nzval = Vector{Tv}(undef, nnz_sum) - colptr[1] = one(Ti) - - #first part: columns between 1 and first column of lnk - - colptr[1:sortedcolumns[1]] = view(csc.colptr, 1:sortedcolumns[1]) - rowval[1:csc.colptr[sortedcolumns[1]]-1] = view(csc.rowval, 1:csc.colptr[sortedcolumns[1]]-1) - nzval[1:csc.colptr[sortedcolumns[1]]-1] = view(csc.nzval, 1:csc.colptr[sortedcolumns[1]]-1) - - numshifts = 0 - - for J=1:length(sortedcolumns)-1 - #@info ">>>>>>> J <<<<<<<<<<<<<<<" - # insert new added column here / dummy - i = sortedcolumns[J] - coll = get_column!(col, lnk, i) - #print_col(col, coll) - - nns = merge_into!(rowval, nzval, csc, col, i, coll, colptr[i]-1) - - numshifts += nns - #j = colptr[i] #sortedcolumns[J]] - #rowval[j] = J - #nzval[j] = J - # insertion end - - #colptr[i+1] = colptr[i] + csc.colptr[i+1]-csc.colptr[i] + numshifts - - #a = i+1 - #b = sortedcolumns[J+1] - #@info a, b - - - #colptr[i+1:sortedcolumns[J+1]] = (csc.colptr[i+1:sortedcolumns[J+1]]-csc.colptr[i:sortedcolumns[J+1]-1]).+(colptr[i] + nns) - - colptr[i+1:sortedcolumns[J+1]] = csc.colptr[i+1:sortedcolumns[J+1]].+(-csc.colptr[i]+colptr[i] + nns) - - - rowval[colptr[i+1]:colptr[sortedcolumns[J+1]]-1] = view(csc.rowval, csc.colptr[i+1]:csc.colptr[sortedcolumns[J+1]]-1) - nzval[colptr[i+1]:colptr[sortedcolumns[J+1]]-1] = view(csc.nzval, csc.colptr[i+1]:csc.colptr[sortedcolumns[J+1]]-1) - - - #= - - @info csc.colptr[a:b] - - colptr[a:b] = csc.colptr[a:b].+numshifts - - #colptr[i+2:sortedcolumns[J+1]] = csc.colptr[i+2:sortedcolumns[J+1]].+numshifts - @info i, J, colptr[i+2], colptr[sortedcolumns[J+1]], csc.colptr[i+2], csc.colptr[sortedcolumns[J+1]] - @info i, J, colptr[a], colptr[b], csc.colptr[a], csc.colptr[b] - rowval[colptr[i+2]:colptr[sortedcolumns[J+1]]] = view(csc.rowval, csc.colptr[i+2]:csc.colptr[sortedcolumns[J+1]]) - nzval[colptr[i+2]:colptr[sortedcolumns[J+1]]] = view(csc.nzval, csc.colptr[i+2]:csc.colptr[sortedcolumns[J+1]]) - #rowval[colptrsortedcolumns[J+1]] - =# - end - - #@info colptr - - resize!(rowval, length(csc.rowval)+numshifts) - resize!(nzval, length(csc.rowval)+numshifts) - - - SparseMatrixCSC(csc.m, csc.n, colptr, rowval, nzval) - - - -end - -function plus_loop(lnk::SuperSparseMatrixLNK{Tv, Ti}, csc::SparseArrays.SparseMatrixCSC) where {Tv, Ti <: Integer} - gi = collect(1:csc.n) - - supersparsecolumns = gi[lnk.collnk[1:lnk.colctr]] - sortedcolumnids = sortperm(supersparsecolumns) - sortedcolumns = supersparsecolumns[sortedcolumnids] - #sortedcolumns = vcat([1], sortedcolumns) - sortedcolumns = vcat(sortedcolumns, [csc.n+1]) - - col = [ColEntry{Tv, Ti}(0, zero(Tv)) for i=1:csc.m] - - #@info sortedcolumnids - - nnz_sum = length(csc.rowval) + lnk.nnz - colptr = Vector{Ti}(undef, csc.n+1) - rowval = Vector{Ti}(undef, nnz_sum) - nzval = Vector{Tv}(undef, nnz_sum) - colptr[1] = one(Ti) - - #first part: columns between 1 and first column of lnk - - colptr[1:sortedcolumns[1]] = view(csc.colptr, 1:sortedcolumns[1]) - rowval[1:csc.colptr[sortedcolumns[1]]-1] = view(csc.rowval, 1:csc.colptr[sortedcolumns[1]]-1) - nzval[1:csc.colptr[sortedcolumns[1]]-1] = view(csc.nzval, 1:csc.colptr[sortedcolumns[1]]-1) - - numshifts = 0 - - for J=1:length(sortedcolumns)-1 - i = sortedcolumns[J] - coll = get_column!(col, lnk, i) - - nns = merge_into!(rowval, nzval, csc, col, i, coll, colptr[i]-1) - numshifts += nns - - colptr[i+1:sortedcolumns[J+1]] = csc.colptr[i+1:sortedcolumns[J+1]].+(-csc.colptr[i]+colptr[i] + nns) - - - for k=csc.colptr[i+1]:csc.colptr[sortedcolumns[J+1]]-1 - k2 = k+(-csc.colptr[i]+colptr[i] + nns) - rowval[k2] = csc.rowval[k] - nzval[k2] = csc.nzval[k] - end - - - end - - #@info colptr - - resize!(rowval, length(csc.rowval)+numshifts) - resize!(nzval, length(csc.rowval)+numshifts) - - - SparseMatrixCSC(csc.m, csc.n, colptr, rowval, nzval) - - - -end - - -function twodisjointsets(n, k) - A = rand(1:n, k) - B = zeros(Int64, k) - done = false - ctr = 0 - while ctr != k - v = rand(1:n) - if !(v in A) - ctr += 1 - B[ctr] = v - end - end - - A, B -end - -function distinct(x, n) - y = zeros(typeof(x[1]), n) - ctr = 0 - while ctr != n - v = rand(x) - if !(v in y[1:ctr]) - ctr += 1 - y[ctr] = v - end - end - y -end -""" - -function mean(x) - return sum(x) / length(x) -end - -function form(x) - return [minimum(x), mean(x), maximum(x)] -end diff --git a/src/factorizations/blockpreconditioner.jl b/src/factorizations/blockpreconditioner.jl deleted file mode 100644 index 3b5fbf2..0000000 --- a/src/factorizations/blockpreconditioner.jl +++ /dev/null @@ -1,126 +0,0 @@ -mutable struct BlockPreconditioner <: AbstractPreconditioner - A::ExtendableSparseMatrix - factorization - phash::UInt64 - partitioning::Union{Nothing, Vector{AbstractVector}} - facts::Vector - function BlockPreconditioner(; partitioning = nothing, factorization = ExtendableSparse.LUFactorization) - p = new() - p.phash = 0 - p.partitioning = partitioning - p.factorization = factorization - return p - end -end - - -""" - BlockPreconditioner(;partitioning, factorization=LUFactorization) - -Create a block preconditioner from partition of unknowns given by `partitioning`, a vector of AbstractVectors describing the -indices of the partitions of the matrix. For a matrix of size `n x n`, e.g. partitioning could be `[ 1:n÷2, (n÷2+1):n]` -or [ 1:2:n, 2:2:n]. -Factorization is a callable (Function or struct) which allows to create a factorization (with `ldiv!` methods) from a submatrix of A. -""" -function BlockPreconditioner end - -""" - allow_views(::preconditioner_type) - -Factorizations on matrix partitions within a block preconditioner may or may not work with array views. -E.g. the umfpack factorization cannot work with views, while ILUZeroPreconditioner can. - Implementing a method for `allow_views` returning `false` resp. `true` allows to dispatch to the proper case. -""" -allow_views(::Any) = false - - -function update!(precon::BlockPreconditioner) - flush!(precon.A) - nall = sum(length, precon.partitioning) - n = size(precon.A, 1) - if nall != n - @warn "sum(length,partitioning)=$(nall) but n=$(n)" - end - - if isnothing(precon.partitioning) - partitioning = [1:n] - end - - np = length(precon.partitioning) - precon.facts = Vector{Any}(undef, np) - return Threads.@threads for ipart in 1:np - factorization = deepcopy(precon.factorization) - AP = precon.A[precon.partitioning[ipart], precon.partitioning[ipart]] - FP = factorization(AP) - precon.facts[ipart] = FP - end -end - - -function LinearAlgebra.ldiv!(p::BlockPreconditioner, v) - partitioning = p.partitioning - facts = p.facts - np = length(partitioning) - - if allow_views(p.factorization) - Threads.@threads for ipart in 1:np - ldiv!(facts[ipart], view(v, partitioning[ipart])) - end - else - Threads.@threads for ipart in 1:np - vv = v[partitioning[ipart]] - ldiv!(facts[ipart], vv) - view(v, partitioning[ipart]) .= vv - end - end - return v -end - -function LinearAlgebra.ldiv!(u, p::BlockPreconditioner, v) - partitioning = p.partitioning - facts = p.facts - np = length(partitioning) - if allow_views(p.factorization) - Threads.@threads for ipart in 1:np - ldiv!(view(u, partitioning[ipart]), facts[ipart], view(v, partitioning[ipart])) - end - else - Threads.@threads for ipart in 1:np - uu = u[partitioning[ipart]] - ldiv!(uu, facts[ipart], v[partitioning[ipart]]) - view(u, partitioning[ipart]) .= uu - end - end - return u -end - -Base.eltype(p::BlockPreconditioner) = eltype(p.facts[1]) - - -""" - BlockPreconBuilder(;precs=UMFPACKPreconBuilder(), - partitioning = A -> [1:size(A,1)] - -Return callable object constructing a left block Jacobi preconditioner -from partition of unknowns. - -- `partitioning(A)` shall return a vector of AbstractVectors describing the indices of the partitions of the matrix. - For a matrix of size `n x n`, e.g. partitioning could be `[ 1:n÷2, (n÷2+1):n]` or [ 1:2:n, 2:2:n]. - -- `precs(A,p)` shall return a left precondioner for a matrix block. -""" -Base.@kwdef mutable struct BlockPreconBuilder - precs = UMFPACKPreconBuilder() - partitioning = A -> [1:size(A, 1)] -end - -function (blockprecs::BlockPreconBuilder)(A, p) - (; precs, partitioning) = blockprecs - factorization = A -> precs(A, p)[1] - bp = BlockPreconditioner(A; partitioning = partitioning(A), factorization) - return (bp, LinearAlgebra.I) -end - -""" - Allow array for precs => different precoms -""" diff --git a/src/factorizations/cholmod_cholesky.jl b/src/factorizations/cholmod_cholesky.jl deleted file mode 100644 index 3efa90b..0000000 --- a/src/factorizations/cholmod_cholesky.jl +++ /dev/null @@ -1,39 +0,0 @@ -mutable struct CholeskyFactorization <: AbstractLUFactorization - A::Union{ExtendableSparseMatrix, Nothing} - fact::Union{SuiteSparse.CHOLMOD.Factor, Nothing} - phash::UInt64 - A64::Any -end - -cholwarned = false -""" -CholeskyFactorization(;valuetype=Float64, indextype=Int64) -CholeskyFactorization(matrix) - -Default Cholesky factorization via cholmod. -""" -function CholeskyFactorization() - global cholwarned - if !cholwarned - @warn "ExtendableSparse.CholeskyFactorization is deprecated. Use LinearSolve.CholeskyFactorization` instead" - cholwarned = true - end - return CholeskyFactorization(nothing, nothing, 0, nothing) -end - -function update!(cholfact::CholeskyFactorization) - A = cholfact.A - flush!(A) - if A.phash != cholfact.phash - cholfact.A64 = Symmetric(A.cscmatrix) - cholfact.fact = cholesky(cholfact.A64) - cholfact.phash = A.phash - else - cholfact.A64.data.nzval .= A.cscmatrix.nzval - cholfact.fact = cholesky!(cholfact.fact, cholfact.A64) - end - return cholfact -end - -LinearAlgebra.ldiv!(fact::CholeskyFactorization, v) = fact.fact \ v -LinearAlgebra.ldiv!(u, fact::CholeskyFactorization, v) = u .= fact.fact \ v diff --git a/src/factorizations/factorizations.jl b/src/factorizations/factorizations.jl deleted file mode 100644 index a692fc1..0000000 --- a/src/factorizations/factorizations.jl +++ /dev/null @@ -1,188 +0,0 @@ -""" - $(TYPEDEF) - -Abstract type for a factorization with ExtandableSparseMatrix. - -This type is meant to be a "type flexible" (with respect to the matrix element type) -and lazily construcdet (can be constructed without knowing the matrix, and updated later) -LU factorization or preconditioner. It wraps different concrete, type fixed factorizations -which shall provide the usual `ldiv!` methods. - -Any such preconditioner/factorization `MyFact` should have the following fields -```` - A::ExtendableSparseMatrix - factorization - phash::UInt64 -```` -and provide methods -```` - MyFact(;kwargs...) - update!(precon::MyFact) -```` - -The idea is that, depending if the matrix pattern has changed, -different steps are needed to update the preconditioner. - - -""" -abstract type AbstractFactorization end - -""" -$(TYPEDEF) - -Abstract subtype for preconditioners -""" -abstract type AbstractPreconditioner <: AbstractFactorization end - -""" -$(TYPEDEF) - -Abstract subtype for (full) LU factorizations -""" -abstract type AbstractLUFactorization <: AbstractFactorization end - -""" -``` -issolver(factorization) -``` - -Determine if factorization is a solver or not -""" -issolver(::AbstractLUFactorization) = true -issolver(::AbstractPreconditioner) = false - - -"""" - @makefrommatrix(fact) - -For an AbstractFactorization `MyFact`, provide methods -``` - MyFact(A::ExtendableSparseMatrix; kwargs...) - MyFact(A::SparseMatrixCSC; kwargs...) -``` -""" -macro makefrommatrix(fact) - return quote - function $(esc(fact))(A::ExtendableSparseMatrix; kwargs...) - return factorize!($(esc(fact))(; kwargs...), A) - end - function $(esc(fact))(A::SparseMatrixCSC; kwargs...) - return $(esc(fact))(ExtendableSparseMatrix(A); kwargs...) - end - end -end - -include("ilu0.jl") -include("iluzero.jl") -include("parallel_jacobi.jl") -include("parallel_ilu0.jl") -include("sparspak.jl") -include("blockpreconditioner.jl") -include("jacobi.jl") - -@eval begin - @makefrommatrix ILU0Preconditioner - @makefrommatrix ILUZeroPreconditioner - @makefrommatrix PointBlockILUZeroPreconditioner - @makefrommatrix JacobiPreconditioner - @makefrommatrix ParallelJacobiPreconditioner - @makefrommatrix ParallelILU0Preconditioner - @makefrommatrix SparspakLU - @makefrommatrix UpdateteableBlockpreconditioner - @makefrommatrix BlockPreconditioner -end - -""" -``` -factorize!(factorization, matrix) -``` - -Update or create factorization, possibly reusing information from the current state. -This method is aware of pattern changes. -""" -function factorize!(p::AbstractFactorization, A::ExtendableSparseMatrix) - p.A = A - update!(p) - return p -end -factorize!(p::AbstractFactorization, A::SparseMatrixCSC) = factorize!(p, ExtendableSparseMatrix(A)) - - -""" -``` -lu!(factorization, matrix) -``` - -Update LU factorization, possibly reusing information from the current state. -This method is aware of pattern changes. - -If `nothing` is passed as first parameter, [`factorize!`](@ref) is called. -""" -function LinearAlgebra.lu!(lufact::AbstractFactorization, A::ExtendableSparseMatrix) - return factorize!(lufact, A) -end - -""" -``` -lu(matrix) -``` - -Create LU factorization. It calls the LU factorization form Sparspak.jl, unless GPL components -are allowed in the Julia sysimage and the floating point type of the matrix is Float64 or Complex64. -In that case, Julias standard `lu` is called, which is realized via UMFPACK. -""" -function lu end - -if USE_GPL_LIBS - function LinearAlgebra.lu(A::ExtendableSparseMatrix) - return factorize!(LUFactorization(), A) - end -else - function LinearAlgebra.lu(A::ExtendableSparseMatrix) - return factorize!(SparspakLU(), A) - end -end # USE_GPL_LIBS - -""" -``` - lufact\rhs -``` - -Solve LU factorization problem. -""" -function Base.:\(lufact::AbstractLUFactorization, v) - return ldiv!(similar(v), lufact, v) -end - -""" -``` -update!(factorization) -``` -Update factorization after matrix update. -""" -update!(::AbstractFactorization) - -""" -``` -ldiv!(u,factorization,v) -ldiv!(factorization,v) -``` - -Solve factorization. -""" -LinearAlgebra.ldiv!(u, fact::AbstractFactorization, v) = ldiv!(u, fact.factorization, v) -LinearAlgebra.ldiv!(fact::AbstractFactorization, v) = ldiv!(fact.factorization, v) - - -if USE_GPL_LIBS - #requires SuiteSparse which is not available in non-GPL builds - include("umfpack_lu.jl") - include("cholmod_cholesky.jl") - - @eval begin - @makefrommatrix LUFactorization - @makefrommatrix CholeskyFactorization - end -else - const LUFactorization = SparspakLU -end diff --git a/src/factorizations/ilu0.jl b/src/factorizations/ilu0.jl deleted file mode 100644 index 8ca3bfb..0000000 --- a/src/factorizations/ilu0.jl +++ /dev/null @@ -1,137 +0,0 @@ -mutable struct _ILU0Preconditioner{Tv, Ti} - cscmatrix::SparseMatrixCSC{Tv, Ti} - xdiag::Vector{Tv} - idiag::Vector{Ti} -end - - -function ilu0(cscmatrix::SparseMatrixCSC{Tv, Ti}) where {Tv, Ti} - colptr = cscmatrix.colptr - rowval = cscmatrix.rowval - nzval = cscmatrix.nzval - n = cscmatrix.n - - # Find main diagonal index and - # copy main diagonal values - idiag = Vector{Ti}(undef, n) - @inbounds for j in 1:n - @inbounds for k in colptr[j]:(colptr[j + 1] - 1) - i = rowval[k] - if i == j - idiag[j] = k - break - end - end - end - - xdiag = Vector{Tv}(undef, n) - @inbounds for j in 1:n - xdiag[j] = one(Tv) / nzval[idiag[j]] - @inbounds for k in (idiag[j] + 1):(colptr[j + 1] - 1) - i = rowval[k] - for l in colptr[i]:(colptr[i + 1] - 1) - if rowval[l] == j - xdiag[i] -= nzval[l] * xdiag[j] * nzval[k] - break - end - end - end - end - return _ILU0Preconditioner(cscmatrix, xdiag, idiag) -end - -function ilu0!(p::_ILU0Preconditioner{Tv, Ti}, cscmatrix::SparseMatrixCSC{Tv, Ti}) where {Tv, Ti} - colptr = cscmatrix.colptr - rowval = cscmatrix.rowval - nzval = cscmatrix.nzval - n = cscmatrix.n - idiag = p.idiag - xdiag = p.xdiag - - @inbounds for j in 1:n - xdiag[j] = one(Tv) / nzval[idiag[j]] - @inbounds for k in (idiag[j] + 1):(colptr[j + 1] - 1) - i = rowval[k] - for l in colptr[i]:(colptr[i + 1] - 1) - if rowval[l] == j - xdiag[i] -= nzval[l] * xdiag[j] * nzval[k] - break - end - end - end - end - p.cscmatrix = cscmatrix - return p -end - - -function LinearAlgebra.ldiv!(u, p::_ILU0Preconditioner{Tv, Ti}, v) where {Tv, Ti} - colptr = p.cscmatrix.colptr - rowval = p.cscmatrix.rowval - nzval = p.cscmatrix.nzval - n = p.cscmatrix.n - idiag = p.idiag - xdiag = p.xdiag - - for j in 1:n - u[j] = xdiag[j] * v[j] - end - - for j in n:-1:1 - for k in (idiag[j] + 1):(colptr[j + 1] - 1) - i = rowval[k] - u[i] -= xdiag[i] * nzval[k] * u[j] - end - end - - for j in 1:n - for k in colptr[j]:(idiag[j] - 1) - i = rowval[k] - u[i] -= xdiag[i] * nzval[k] * u[j] - end - end - return u -end - -function LinearAlgebra.ldiv!(p::_ILU0Preconditioner{Tv, Ti}, v) where {Tv, Ti} - return ldiv!(copy(v), p, v) -end - - -mutable struct ILU0Preconditioner <: AbstractPreconditioner - A::ExtendableSparseMatrix - factorization::_ILU0Preconditioner - phash::UInt64 - function ILU0Preconditioner() - p = new() - p.phash = 0 - return p - end -end - -""" -``` -ILU0Preconditioner() -ILU0Preconditioner(matrix) -``` - -Incomplete LU preconditioner with zero fill-in, without modification of off-diagonal entries, so it delivers -slower convergende than [`ILUZeroPreconditioner`](@ref). -""" -function ILU0Preconditioner end - - -function update!(p::ILU0Preconditioner) - flush!(p.A) - Tv = eltype(p.A) - if p.A.phash != p.phash - p.factorization = ilu0(p.A.cscmatrix) - p.phash = p.A.phash - else - ilu0!(p.factorization, p.A.cscmatrix) - end - return p -end - -allow_views(::ILU0Preconditioner) = true -allow_views(::Type{ILU0Preconditioner}) = true diff --git a/src/factorizations/iluzero.jl b/src/factorizations/iluzero.jl deleted file mode 100644 index 4d8b038..0000000 --- a/src/factorizations/iluzero.jl +++ /dev/null @@ -1,98 +0,0 @@ -iluzerowarned = false -mutable struct ILUZeroPreconditioner <: AbstractPreconditioner - A::ExtendableSparseMatrix - factorization::ILUZero.ILU0Precon - phash::UInt64 - function ILUZeroPreconditioner() - global iluzerowarned - if !iluzerowarned - @warn "ILUZeroPreconditioner is deprecated. Use LinearSolve with `precs=ILUZeroPreconBuilder()` instead" - iluzerowarned = true - end - p = new() - p.phash = 0 - return p - end -end - -""" -``` -ILUZeroPreconditioner() -ILUZeroPreconditioner(matrix) -``` -Incomplete LU preconditioner with zero fill-in using [ILUZero.jl](https://github.com/mcovalt/ILUZero.jl). This preconditioner -also calculates and stores updates to the off-diagonal entries and thus delivers better convergence than the [`ILU0Preconditioner`](@ref). -""" -function ILUZeroPreconditioner end - -function update!(p::ILUZeroPreconditioner) - flush!(p.A) - if p.A.phash != p.phash - p.factorization = ILUZero.ilu0(p.A.cscmatrix) - p.phash = p.A.phash - else - ILUZero.ilu0!(p.factorization, p.A.cscmatrix) - end - return p -end - -allow_views(::ILUZeroPreconditioner) = true -allow_views(::Type{ILUZeroPreconditioner}) = true - - -biluzerowarned = false -mutable struct PointBlockILUZeroPreconditioner <: AbstractPreconditioner - A::ExtendableSparseMatrix - factorization::ILUZero.ILU0Precon - phash::UInt64 - blocksize::Int - function PointBlockILUZeroPreconditioner(; blocksize = 1) - global biluzerowarned - if !biluzerowarned - @warn "PointBlockILUZeroPreconditioner is deprecated. Use LinearSolve with `precs=ILUZeroPreconBuilder(; blocksize=$(blocksize))` instead" - biluzerowarned = true - end - p = new() - p.phash = 0 - p.blocksize = blocksize - return p - end -end - -""" -``` -PointBlockILUZeroPreconditioner(;blocksize) -PointBlockILUZeroPreconditioner(matrix;blocksize) -``` -Incomplete LU preconditioner with zero fill-in using [ILUZero.jl](https://github.com/mcovalt/ILUZero.jl). This preconditioner -also calculates and stores updates to the off-diagonal entries and thus delivers better convergence than the [`ILU0Preconditioner`](@ref). -""" -function PointBlockILUZeroPreconditioner end - -function update!(p::PointBlockILUZeroPreconditioner) - flush!(p.A) - Ab = pointblock(p.A.cscmatrix, p.blocksize) - if p.A.phash != p.phash - p.factorization = ILUZero.ilu0(Ab, SVector{p.blocksize, eltype(p.A)}) - p.phash = p.A.phash - else - ILUZero.ilu0!(p.factorization, Ab) - end - return p -end - - -function LinearAlgebra.ldiv!(p::PointBlockILUZeroPreconditioner, v) - vv = reinterpret(SVector{p.blocksize, eltype(v)}, v) - LinearAlgebra.ldiv!(vv, p.factorization, vv) - return v -end - -function LinearAlgebra.ldiv!(u, p::PointBlockILUZeroPreconditioner, v) - LinearAlgebra.ldiv!( - reinterpret(SVector{p.blocksize, eltype(u)}, u), - p.factorization, - reinterpret(SVector{p.blocksize, eltype(v)}, v) - ) - return u -end diff --git a/src/factorizations/jacobi.jl b/src/factorizations/jacobi.jl deleted file mode 100644 index 36b1be2..0000000 --- a/src/factorizations/jacobi.jl +++ /dev/null @@ -1,68 +0,0 @@ -mutable struct _JacobiPreconditioner{Tv} - invdiag::Vector{Tv} -end - -function jacobi(A::SparseMatrixCSC{Tv, Ti}) where {Tv, Ti} - invdiag = Array{Tv, 1}(undef, A.n) - n = A.n - @inbounds for i in 1:n - invdiag[i] = one(Tv) / A[i, i] - end - return _JacobiPreconditioner(invdiag) -end - -function jacobi!(p::_JacobiPreconditioner{Tv}, A::SparseMatrixCSC{Tv, Ti}) where {Tv, Ti} - n = A.n - @inbounds for i in 1:n - p.invdiag[i] = one(Tv) / A[i, i] - end - return p -end - -mutable struct JacobiPreconditioner <: AbstractPreconditioner - A::ExtendableSparseMatrix - factorization::Union{_JacobiPreconditioner, Nothing} - phash::UInt64 - function JacobiPreconditioner() - p = new() - p.factorization = nothing - p.phash = 0 - return p - end -end - -function LinearAlgebra.ldiv!(u, p::_JacobiPreconditioner, v) - n = length(p.invdiag) - for i in 1:n - @inbounds u[i] = p.invdiag[i] * v[i] - end - return -end - -LinearAlgebra.ldiv!(p::_JacobiPreconditioner, v) = ldiv!(v, p, v) - - -""" -``` -JacobiPreconditioner() -JacobiPreconditioner(matrix) -``` - -Jacobi preconditioner. -""" -function JacobiPreconditioner end - -function update!(p::JacobiPreconditioner) - flush!(p.A) - Tv = eltype(p.A) - if p.A.phash != p.phash || isnothing(p.factorization) - p.factorization = jacobi(p.A.cscmatrix) - p.phash = p.A.phash - else - jacobi!(p.factorization, p.A.cscmatrix) - end - return p -end - -allow_views(::JacobiPreconditioner) = true -allow_views(::Type{JacobiPreconditioner}) = true diff --git a/src/factorizations/parallel_ilu0.jl b/src/factorizations/parallel_ilu0.jl deleted file mode 100644 index eac1609..0000000 --- a/src/factorizations/parallel_ilu0.jl +++ /dev/null @@ -1,224 +0,0 @@ -mutable struct _ParallelILU0Preconditioner{Tv, Ti} - A::SparseMatrixCSC{Tv, Ti} - xdiag::Vector{Tv} - idiag::Vector{Ti} - coloring::Vector{Vector{Ti}} - coloring_index::Vector{Vector{Ti}} - coloring_index_reverse::Vector{Vector{Ti}} -end - -function pilu0(A0::SparseMatrixCSC{Tv, Ti}) where {Tv, Ti} - coloring = graphcol(A0) - coloring_index, coloring_index_reverse = coloringindex(coloring) - A = ExtendableSparseMatrix(reordermatrix(A0, coloring)).cscmatrix - - colptr = A.colptr - rowval = A.rowval - nzval = A.nzval - n = A.n - xdiag = Vector{Tv}(undef, n) - idiag = Vector{Ti}(undef, n) - - # Find main diagonal index and - # copy main diagonal values - @inbounds for j in 1:n - @inbounds for k in colptr[j]:(colptr[j + 1] - 1) - i = rowval[k] - if i == j - idiag[j] = k - break - end - end - end - - @inbounds for j in 1:n - xdiag[j] = one(Tv) / nzval[idiag[j]] - @inbounds for k in (idiag[j] + 1):(colptr[j + 1] - 1) - i = rowval[k] - for l in colptr[i]:(colptr[i + 1] - 1) - if rowval[l] == j - xdiag[i] -= nzval[l] * xdiag[j] * nzval[k] - break - end - end - end - end - return _ParallelILU0Preconditioner(A, xdiag, idiag, coloring, coloring_index, coloring_index_reverse) -end - -function LinearAlgebra.ldiv!( - u::AbstractVector, - precon::_ParallelILU0Preconditioner{Tv, Ti}, - v::AbstractVector - ) where {Tv, Ti} - A = precon.A - colptr = A.colptr - rowval = A.rowval - n = A.n - nzval = A.nzval - xdiag = precon.xdiag - idiag = precon.idiag - - coloring = precon.coloring - coloring_index = precon.coloring_index - coloring_index_reverse = precon.coloring_index_reverse - - Threads.@threads for j in 1:n - u[j] = xdiag[j] * v[j] - end - - for indset in coloring_index_reverse - Threads.@threads for j in indset - for k in (idiag[j] + 1):(colptr[j + 1] - 1) - i = rowval[k] - u[i] -= xdiag[i] * nzval[k] * u[j] - end - end - end - for indset in coloring_index - Threads.@threads for j in indset - for k in colptr[j]:(idiag[j] - 1) - i = rowval[k] - u[i] -= xdiag[i] * nzval[k] * u[j] - end - end - end - return -end - -function LinearAlgebra.ldiv!( - precon::_ParallelILU0Preconditioner{Tv, Ti}, - v::AbstractVector - ) where {Tv, Ti} - return ldiv!(v, precon, v) -end - -# Returns an independent set of the graph of a matrix -# Reference: https://research.nvidia.com/sites/default/files/pubs/2015-05_Parallel-Graph-Coloring/nvr-2015-001.pdf -function indset(A::SparseMatrixCSC{Tv, Ti}, W::StridedVector) where {Tv, Ti} - # Random numbers for all vertices - lenW = length(W) - # r = sample(1:lenW, lenW, replace = false) - r = rand(lenW) - @inbounds for i in 1:lenW - if W[i] == 0 - r[i] = 0 - end - end - # Empty independent set - S = zeros(Int, lenW) - # Get independent set by comparing random number of vertex with the random - # numbers of all neighbor vertices - @inbounds Threads.@threads for i in 1:lenW - if W[i] != 0 - j = A.rowval[A.colptr[i]:(A.colptr[i + 1] - 1)] - if all(x -> x == 1, r[i] .>= r[j]) - S[i] = W[i] - end - end - end - # Remove zero entries and return independent set - return filter!(x -> x ≠ 0, S) -end - -# Returns coloring of the graph of a matrix -# Reference: https://research.nvidia.com/sites/default/files/pubs/2015-05_Parallel-Graph-Coloring/nvr-2015-001.pdf -function graphcol(A::SparseMatrixCSC{Tv, Ti}) where {Tv, Ti} - # Empty list for coloring - C = Vector{Ti}[] - # Array of vertices - W = Ti[1:size(A)[1];] - # Get all independent sets of the graph of the matrix - while any(W .!= 0) - # Get independent set - S = indset(A + transpose(A), W) - push!(C, S) - # Remove entries in S from W - @inbounds for s in S - W[s] = 0 - end - end - # Return coloring - return C -end - -# Reorders a sparse matrix with provided coloring -function reordermatrix( - A::SparseMatrixCSC{Tv, Ti}, - coloring - ) where {Tv, Ti} - c = collect(Iterators.flatten(coloring)) - return A[c, :][:, c] -end - -# Reorders a linear system with provided coloring -function reorderlinsys( - A::SparseMatrixCSC{Tv, Ti}, - b::Vector{Tv}, - coloring - ) where {Tv, Ti} - c = collect(Iterators.flatten(coloring)) - return A[c, :][:, c], b[c] -end - -# Returns an array with the same structure of the input coloring and ordered -# entries 1:length(coloring) and an array with the structure of -# reverse(coloring) and ordered entries length(coloring):-1:1 -function coloringindex(coloring) - # First array - c = deepcopy(coloring) - cnt = 1 - @inbounds for i in 1:length(c) - @inbounds for j in 1:length(c[i]) - c[i][j] = cnt - cnt += 1 - end - end - # Second array - cc = deepcopy(reverse(coloring)) - @inbounds for i in 1:length(cc) - @inbounds for j in 1:length(cc[i]) - cnt -= 1 - cc[i][j] = cnt - end - end - # Return both - return c, cc -end - - -################################################################# -mutable struct ParallelILU0Preconditioner <: AbstractPreconditioner - A::ExtendableSparseMatrix - phash::UInt64 - factorization::_ParallelILU0Preconditioner - - function ParallelILU0Preconditioner() - p = new() - p.phash = 0 - return p - end -end - - -""" -``` -ParallelILU0Preconditioner() -ParallelILU0Preconditioner(matrix) -``` - -Parallel ILU preconditioner with zero fill-in. -""" -function ParallelILU0Preconditioner end - -function update!(p::ParallelILU0Preconditioner) - flush!(p.A) - Tv = eltype(p.A) - if p.A.phash != p.phash - p.factorization = pilu0(p.A.cscmatrix) - p.phash = p.A.phash - else - pilu0!(p.factorization, p.A.cscmatrix) - end - return p -end diff --git a/src/factorizations/parallel_jacobi.jl b/src/factorizations/parallel_jacobi.jl deleted file mode 100644 index 68f444a..0000000 --- a/src/factorizations/parallel_jacobi.jl +++ /dev/null @@ -1,69 +0,0 @@ -mutable struct _ParallelJacobiPreconditioner{Tv} - invdiag::Vector{Tv} -end - -function pjacobi(A::SparseMatrixCSC{Tv, Ti}) where {Tv, Ti} - invdiag = Array{Tv, 1}(undef, A.n) - n = A.n - Threads.@threads for i in 1:n - invdiag[i] = one(Tv) / A[i, i] - end - return _ParallelJacobiPreconditioner(invdiag) -end - -function pjacobi!(p::_ParallelJacobiPreconditioner{Tv}, A::SparseMatrixCSC{Tv, Ti}) where {Tv, Ti} - n = A.n - Threads.@threads for i in 1:n - p.invdiag[i] = one(Tv) / A[i, i] - end - return p -end - - -function LinearAlgebra.ldiv!(u, p::_ParallelJacobiPreconditioner, v) - n = length(p.invdiag) - for i in 1:n - @inbounds u[i] = p.invdiag[i] * v[i] - end - return -end - -LinearAlgebra.ldiv!(p::_ParallelJacobiPreconditioner, v) = ldiv!(v, p, v) - - -mutable struct ParallelJacobiPreconditioner <: AbstractPreconditioner - A::ExtendableSparseMatrix - factorization::Union{_ParallelJacobiPreconditioner, Nothing} - phash::UInt64 - function ParallelJacobiPreconditioner() - p = new() - p.factorization = nothing - p.phash = 0 - return p - end -end - -""" -``` -ParallelJacobiPreconditioner() -ParallelJacobiPreconditioner(matrix) -``` - -ParallelJacobi preconditioner. -""" -function ParallelJacobiPreconditioner end - -function update!(p::ParallelJacobiPreconditioner) - flush!(p.A) - Tv = eltype(p.A) - if p.A.phash != p.phash || isnothing(p.factorization) - p.factorization = pjacobi(p.A.cscmatrix) - p.phash = p.A.phash - else - pjacobi!(p.factorization, p.A.cscmatrix) - end - return p -end - -allow_views(::ParallelJacobiPreconditioner) = true -allow_views(::Type{ParallelJacobiPreconditioner}) = true diff --git a/src/factorizations/simple_iteration.jl b/src/factorizations/simple_iteration.jl deleted file mode 100644 index 1516012..0000000 --- a/src/factorizations/simple_iteration.jl +++ /dev/null @@ -1,47 +0,0 @@ -""" -```` -simple!(u,A,b; - abstol::Real = zero(real(eltype(b))), - reltol::Real = sqrt(eps(real(eltype(b)))), - log=false, - maxiter=100, - P=nothing - ) -> solution, [history] -```` - -Simple iteration scheme ``u_{i+1}= u_i - P^{-1} (A u_i -b)`` with similar API as the methods in IterativeSolvers.jl. - -""" -function simple!( - u, - A, - b; - abstol::Real = zero(real(eltype(b))), - reltol::Real = sqrt(eps(real(eltype(b)))), - log = false, - maxiter = 100, - Pl = nothing - ) - res = A * u - b # initial residual - upd = similar(res) - r0 = norm(res) # residual norm - history = [r0] # initialize history recording - for i in 1:maxiter - ldiv!(upd, Pl, res) - u .-= upd # solve preconditioning system and update solution - mul!(res, A, u) # calculate residual - res .-= b - r = norm(res) # residual norm - push!(history, r) # record in history - if (r / r0) < reltol || r < abstol # check for tolerance - break - end - end - if log - return u, Dict(:resnorm => history) - else - return u - end -end - -simple(A, b; kwargs...) = simple!(zeros(eltype(b), length(b)), A, b; kwargs...) diff --git a/src/factorizations/sparspak.jl b/src/factorizations/sparspak.jl deleted file mode 100644 index 6f99bda..0000000 --- a/src/factorizations/sparspak.jl +++ /dev/null @@ -1,28 +0,0 @@ -mutable struct SparspakLU <: AbstractLUFactorization - A::Union{ExtendableSparseMatrix, Nothing} - factorization::Union{Sparspak.SparseSolver.SparseSolver, Nothing} - phash::UInt64 -end - -function SparspakLU() - return fact = SparspakLU(nothing, nothing, 0) -end - -""" -``` -SparspakLU() -SparspakLU(matrix) -``` - -LU factorization based on [Sparspak.jl](https://github.com/PetrKryslUCSD/Sparspak.jl) (P.Krysl's Julia re-implementation of Sparspak by George & Liu) -""" -function SparspakLU end - -function update!(lufact::SparspakLU) - flush!(lufact.A) - return if lufact.A.phash != lufact.phash - lufact.factorization = sparspaklu(lufact.A.cscmatrix) - else - sparspaklu!(lufact.factorization, lufact.A.cscmatrix) - end -end diff --git a/src/factorizations/umfpack_lu.jl b/src/factorizations/umfpack_lu.jl deleted file mode 100644 index 3e0bc18..0000000 --- a/src/factorizations/umfpack_lu.jl +++ /dev/null @@ -1,27 +0,0 @@ -mutable struct LUFactorization <: AbstractLUFactorization - A::Union{Nothing, ExtendableSparseMatrix} - factorization::Union{Nothing, SuiteSparse.UMFPACK.UmfpackLU} - phash::UInt64 -end - -LUFactorization() = LUFactorization(nothing, nothing, 0) -""" -``` -LUFactorization() -LUFactorization(matrix) -``` - -Default LU Factorization. Maps to Sparspak.jl for non-GPL builds, otherwise to UMFPACK. -""" -function LUFactorization end - -function update!(lufact::LUFactorization) - flush!(lufact.A) - if lufact.A.phash != lufact.phash - lufact.factorization = LinearAlgebra.lu(lufact.A.cscmatrix) - lufact.phash = lufact.A.phash - else - lufact.factorization = lu!(lufact.factorization, lufact.A.cscmatrix) - end - return lufact -end diff --git a/src/matrix/genericextendablesparsematrixcsc.jl b/src/genericextendablesparsematrixcsc.jl similarity index 55% rename from src/matrix/genericextendablesparsematrixcsc.jl rename to src/genericextendablesparsematrixcsc.jl index 0c0f649..5d4ce96 100644 --- a/src/matrix/genericextendablesparsematrixcsc.jl +++ b/src/genericextendablesparsematrixcsc.jl @@ -1,15 +1,23 @@ +""" + $(TYPEDEF) + +Single threaded extendable sparse matrix parametrized by sparse matrix extension. + +Fields: +- `cscmatrix`: a SparseMatrixCSC containing existing matrix entries +- `xmatrix`: instance of an [`AbstractSparseMatrixExtension`](@ref) which is used to collect new entries +""" mutable struct GenericExtendableSparseMatrixCSC{Tm <: AbstractSparseMatrixExtension, Tv, Ti <: Integer} <: AbstractExtendableSparseMatrixCSC{Tv, Ti} - """ - Final matrix data - """ cscmatrix::SparseMatrixCSC{Tv, Ti} - - """ - Matrix for new entries - """ xmatrix::Tm end +function GenericExtendableSparseMatrixCSC{Tm}(::Type{Tv}, m::Integer, n::Integer) where {Tm <: AbstractSparseMatrixExtension, Tv} + return GenericExtendableSparseMatrixCSC( + spzeros(Tv, Int, m, n), + Tm{Tv, Int}(m, n) + ) +end function GenericExtendableSparseMatrixCSC{Tm, Tv, Ti}(m::Integer, n::Integer) where {Tm <: AbstractSparseMatrixExtension, Tv, Ti <: Integer} return GenericExtendableSparseMatrixCSC( @@ -18,9 +26,36 @@ function GenericExtendableSparseMatrixCSC{Tm, Tv, Ti}(m::Integer, n::Integer) wh ) end +function GenericExtendableSparseMatrixCSC{Tm, Tv, Ti}(A::SparseMatrixCSC{Tv, Ti}) where {Tm <: AbstractSparseMatrixExtension, Tv, Ti <: Integer} + return GenericExtendableSparseMatrixCSC{Tm, Tv, Ti}( + SparseMatrixCSC(A), + Tm(size(A)...) + ) +end + +""" + $(TYPEDSIGNATURES) +""" +function Base.similar(m::GenericExtendableSparseMatrixCSC{Tm, Tv, Ti}) where {Tm, Tv, Ti} + return GenericExtendableSparseMatrixCSC{Tm, Tv, Ti}(size(m)...) +end +""" + $(TYPEDSIGNATURES) +""" +function Base.similar(m::GenericExtendableSparseMatrixCSC{Tm, Tv, Ti}, ::Type{T}) where {Tm, Tv, Ti, T} + return GenericExtendableSparseMatrixCSC{Tm, T, Ti}(size(m)...) +end + + +""" + $(TYPEDSIGNATURES) +""" nnznew(ext::GenericExtendableSparseMatrixCSC) = nnz(ext.xmatrix) +""" + $(TYPEDSIGNATURES) +""" function reset!(ext::GenericExtendableSparseMatrixCSC{Tm, Tv, Ti}) where {Tm, Tv, Ti} m, n = size(ext.cscmatrix) ext.cscmatrix = spzeros(Tv, Ti, m, n) @@ -29,6 +64,9 @@ function reset!(ext::GenericExtendableSparseMatrixCSC{Tm, Tv, Ti}) where {Tm, Tv end +""" + $(TYPEDSIGNATURES) +""" function flush!(ext::GenericExtendableSparseMatrixCSC{Tm, Tv, Ti}) where {Tm, Tv, Ti} if nnz(ext.xmatrix) > 0 ext.cscmatrix = ext.xmatrix + ext.cscmatrix @@ -37,14 +75,35 @@ function flush!(ext::GenericExtendableSparseMatrixCSC{Tm, Tv, Ti}) where {Tm, Tv return ext end +""" + $(TYPEDSIGNATURES) +""" function SparseArrays.sparse(ext::GenericExtendableSparseMatrixCSC) flush!(ext) return ext.cscmatrix end +""" + $(TYPEDSIGNATURES) +""" function Base.setindex!( ext::GenericExtendableSparseMatrixCSC, - v::Union{Number, AbstractVecOrMat}, + v::Any, + i::Integer, + j::Integer + ) + k = findindex(ext.cscmatrix, i, j) + return if k > 0 + ext.cscmatrix.nzval[k] = v + else + setindex!(ext.xmatrix, v, i, j) + end +end + +# to resolve ambiguity +function Base.setindex!( + ext::GenericExtendableSparseMatrixCSC, + v::AbstractVecOrMat, i::Integer, j::Integer ) @@ -57,6 +116,9 @@ function Base.setindex!( end +""" + $(TYPEDSIGNATURES) +""" function Base.getindex( ext::GenericExtendableSparseMatrixCSC, i::Integer, @@ -70,12 +132,16 @@ function Base.getindex( end end +""" + $(TYPEDSIGNATURES) +""" function rawupdateindex!( ext::GenericExtendableSparseMatrixCSC, op, v, i, - j + j, + part = 1 ) k = findindex(ext.cscmatrix, i, j) return if k > 0 @@ -85,6 +151,9 @@ function rawupdateindex!( end end +""" + $(TYPEDSIGNATURES) +""" function updateindex!( ext::GenericExtendableSparseMatrixCSC, op, diff --git a/src/matrix/genericmtextendablesparsematrixcsc.jl b/src/genericmtextendablesparsematrixcsc.jl similarity index 59% rename from src/matrix/genericmtextendablesparsematrixcsc.jl rename to src/genericmtextendablesparsematrixcsc.jl index c2a39de..f376783 100644 --- a/src/matrix/genericmtextendablesparsematrixcsc.jl +++ b/src/genericmtextendablesparsematrixcsc.jl @@ -1,14 +1,26 @@ +""" + $(TYPEDEF) + + +Extendable sparse matrix parametrized by sparse matrix extension allowing multithreaded assembly and +parallel matrix-vector multiplication. + +Fields: +- `cscmatrix`: a SparseMatrixCSC containing existing matrix entries +- `xmatrices`: vector of instances of [`AbstractSparseMatrixExtension`](@ref) used to collect new entries +- `colparts`: vector describing colors of the partitions of the unknowns +- `partnodes`: vector describing partition of the unknowns + +It is assumed that the set of unknowns is partitioned, and the partitioning is colored in such a way that +several partitions of the same color can be handled by different threads, both during matrix assembly (which +in general would use a partition of e.g. finite elements compatible to the partitioning of the nodes) and during +matrix-vector multiplication. This approach is compatible with the current choice of the standard Julia +sparse ecosystem which prefers compressed colume storage (CSC) over compressed row storage (CSR). + +""" mutable struct GenericMTExtendableSparseMatrixCSC{Tm <: AbstractSparseMatrixExtension, Tv, Ti <: Integer} <: AbstractExtendableSparseMatrixCSC{Tv, Ti} - """ - Final matrix data - """ cscmatrix::SparseMatrixCSC{Tv, Ti} - - """ - Vector of dictionaries for new entries - """ xmatrices::Vector{Tm} - colparts::Vector{Ti} partnodes::Vector{Ti} end @@ -22,6 +34,11 @@ function GenericMTExtendableSparseMatrixCSC{Tm, Tv, Ti}(n, m, p::Integer = 1) wh ) end +""" + $(TYPEDSIGNATURES) + +Set node partitioning. +""" function partitioning!(ext::GenericMTExtendableSparseMatrixCSC{Tm, Tv, Ti}, colparts, partnodes) where {Tm, Tv, Ti} ext.partnodes = partnodes ext.colparts = colparts @@ -29,6 +46,9 @@ function partitioning!(ext::GenericMTExtendableSparseMatrixCSC{Tm, Tv, Ti}, colp end +""" + $(TYPEDSIGNATURES) +""" function reset!(ext::GenericMTExtendableSparseMatrixCSC{Tm, Tv, Ti}, p::Integer) where {Tm, Tv, Ti} m, n = size(ext.cscmatrix) ext.cscmatrix = spzeros(Tv, Ti, m, n) @@ -38,11 +58,17 @@ function reset!(ext::GenericMTExtendableSparseMatrixCSC{Tm, Tv, Ti}, p::Integer) return ext end +""" + $(TYPEDSIGNATURES) +""" function reset!(ext::GenericMTExtendableSparseMatrixCSC) return reset!(ext, length(ext.xmatrices)) end +""" + $(TYPEDSIGNATURES) +""" function flush!(ext::GenericMTExtendableSparseMatrixCSC{Tm, Tv, Ti}) where {Tm, Tv, Ti} ext.cscmatrix = Base.sum(ext.xmatrices, ext.cscmatrix) np = length(ext.xmatrices) @@ -52,14 +78,35 @@ function flush!(ext::GenericMTExtendableSparseMatrixCSC{Tm, Tv, Ti}) where {Tm, end +""" + $(TYPEDSIGNATURES) +""" function SparseArrays.sparse(ext::GenericMTExtendableSparseMatrixCSC) flush!(ext) return ext.cscmatrix end +""" + $(TYPEDSIGNATURES) +""" +function Base.setindex!( + ext::GenericMTExtendableSparseMatrixCSC, + v::Any, + i::Integer, + j::Integer + ) + k = findindex(ext.cscmatrix, i, j) + return if k > 0 + ext.cscmatrix.nzval[k] = v + else + error("use rawupdateindex! for new entries into GenericMTExtendableSparseMatrixCSC") + end +end + +# to resolve ambiguity function Base.setindex!( ext::GenericMTExtendableSparseMatrixCSC, - v::Union{Number, AbstractVecOrMat}, + v::AbstractVecOrMat, i::Integer, j::Integer ) @@ -71,6 +118,10 @@ function Base.setindex!( end end + +""" + $(TYPEDSIGNATURES) +""" function Base.getindex( ext::GenericMTExtendableSparseMatrixCSC, i::Integer, @@ -86,9 +137,15 @@ function Base.getindex( end end +""" + $(TYPEDSIGNATURES) +""" nnznew(ext::GenericMTExtendableSparseMatrixCSC) = sum(nnz, ext.xmatrices) +""" + $(TYPEDSIGNATURES) +""" function rawupdateindex!( ext::GenericMTExtendableSparseMatrixCSC, op, @@ -106,6 +163,9 @@ function rawupdateindex!( end +""" + $(TYPEDSIGNATURES) +""" function updateindex!( ext::GenericMTExtendableSparseMatrixCSC, op, @@ -124,11 +184,14 @@ end # Needed in 1.9 -function Base.:*(ext::GenericMTExtendableSparseMatrixCSC{Tm, TA} where {Tm <: ExtendableSparse.AbstractSparseMatrixExtension}, x::Union{StridedVector, BitVector}) where {TA} +function Base.:*(ext::GenericMTExtendableSparseMatrixCSC{Tm, TA} where {Tm <: AbstractSparseMatrixExtension}, x::Union{StridedVector, BitVector}) where {TA} return mul!(similar(x), ext, x) end -function LinearAlgebra.mul!(r, ext::GenericMTExtendableSparseMatrixCSC, x) +""" + $(TYPEDSIGNATURES) +""" +function LinearAlgebra.mul!(r::AbstractVecOrMat, ext::GenericMTExtendableSparseMatrixCSC, x::AbstractVecOrMat) flush!(ext) A = ext.cscmatrix colparts = ext.colparts @@ -148,3 +211,14 @@ function LinearAlgebra.mul!(r, ext::GenericMTExtendableSparseMatrixCSC, x) end return r end + +# to resolve ambiguity +function LinearAlgebra.mul!(::SparseArrays.AbstractSparseMatrixCSC, ::GenericMTExtendableSparseMatrixCSC, ::LinearAlgebra.Diagonal) + throw(MethodError("mul!(::AbstractSparseMatrixCSC, ::GenericMTExtendableSparseMatrixCSC,::Diagonal) is impossible")) + return nothing +end + +function LinearAlgebra.mul!(::AbstractMatrix, ::GenericMTExtendableSparseMatrixCSC, ::LinearAlgebra.AbstractTriangular) + throw(MethodError("mul!(::AbstractMatrix, ::GenericMTExtendableSparseMatrixCSC, ::AbstractTriangular) is impossible")) + return nothing +end diff --git a/src/matrix/extendable.jl b/src/matrix/extendable.jl deleted file mode 100644 index e1d3f5e..0000000 --- a/src/matrix/extendable.jl +++ /dev/null @@ -1,325 +0,0 @@ -################################################################## -""" -$(TYPEDEF) - -Extendable sparse matrix. A nonzero entry of this matrix is contained -either in cscmatrix, or in lnkmatrix, never in both. - -$(TYPEDFIELDS) -""" -mutable struct ExtendableSparseMatrixCSC{Tv, Ti <: Integer} <: AbstractExtendableSparseMatrixCSC{Tv, Ti} - """ - Final matrix data - """ - cscmatrix::SparseMatrixCSC{Tv, Ti} - - """ - Linked list structure holding data of extension - """ - lnkmatrix::Union{SparseMatrixLNK{Tv, Ti}, Nothing} - - """ - Pattern hash - """ - phash::UInt64 -end - - -""" -``` -ExtendableSparseMatrixCSC(Tv,Ti,m,n) -ExtendableSparseMatrixCSC(Tv,m,n) -ExtendableSparseMatrixCSC(m,n) -``` -Create empty ExtendableSparseMatrixCSC. This is equivalent to `spzeros(m,n)` for -`SparseMartrixCSC`. - -""" - -function ExtendableSparseMatrixCSC{Tv, Ti}(m, n) where {Tv, Ti <: Integer} - return ExtendableSparseMatrixCSC{Tv, Ti}(spzeros(Tv, Ti, m, n), nothing, 0) -end - -function ExtendableSparseMatrixCSC( - valuetype::Type{Tv}, - indextype::Type{Ti}, - m, - n - ) where {Tv, Ti <: Integer} - return ExtendableSparseMatrixCSC{Tv, Ti}(m, n) -end - -function ExtendableSparseMatrixCSC(valuetype::Type{Tv}, m, n) where {Tv} - return ExtendableSparseMatrixCSC{Tv, Int}(m, n) -end - -ExtendableSparseMatrixCSC(m, n) = ExtendableSparseMatrixCSC{Float64, Int}(m, n) - -""" -$(SIGNATURES) - -Create ExtendableSparseMatrixCSC from SparseMatrixCSC -""" -function ExtendableSparseMatrixCSC(csc::SparseMatrixCSC{Tv, Ti}) where {Tv, Ti <: Integer} - return ExtendableSparseMatrixCSC{Tv, Ti}(csc, nothing, phash(csc)) -end - -function ExtendableSparseMatrixCSC{Tv, Ti}(csc::SparseMatrixCSC{Tv, Ti}) where {Tv, Ti <: Integer} - return ExtendableSparseMatrixCSC{Tv, Ti}(csc, nothing, phash(csc)) -end - -""" -$(SIGNATURES) - - Create ExtendableSparseMatrixCSC from Diagonal -""" -ExtendableSparseMatrixCSC(D::Diagonal) = ExtendableSparseMatrixCSC(sparse(D)) - -""" -$(SIGNATURES) - - Create ExtendableSparseMatrixCSC from AbstractMatrix, dropping all zero entries. - This is the equivalent to `sparse(A)`. -""" -ExtendableSparseMatrixCSC(A::AbstractMatrix) = ExtendableSparseMatrixCSC(sparse(A)) - -""" - ExtendableSparseMatrixCSC(I,J,V) - ExtendableSparseMatrixCSC(I,J,V,m,n) - ExtendableSparseMatrixCSC(I,J,V,combine::Function) - ExtendableSparseMatrixCSC(I,J,V,m,n,combine::Function) - -Create ExtendableSparseMatrixCSC from triplet (COO) data. -""" -ExtendableSparseMatrixCSC(I, J, V::AbstractVector) = ExtendableSparseMatrixCSC(sparse(I, J, V)) - -function ExtendableSparseMatrixCSC(I, J, V::AbstractVector, m, n) - return ExtendableSparseMatrixCSC(sparse(I, J, V, m, n)) -end - -function ExtendableSparseMatrixCSC(I, J, V::AbstractVector, combine::Function) - return ExtendableSparseMatrixCSC(sparse(I, J, V, combine)) -end - -function ExtendableSparseMatrixCSC(I, J, V::AbstractVector, m, n, combine::Function) - return ExtendableSparseMatrixCSC(sparse(I, J, V, m, n, combine)) -end - -# THese are probably too much... -# function Base.transpose(A::ExtendableSparseMatrixCSC) -# flush!(A) -# ExtendableSparseMatrixCSC(Base.transpose(sparse(A))) -# end -# function Base.adjoint(A::ExtendableSparseMatrixCSC) -# flush!(A) -# ExtendableSparseMatrixCSC(Base.adjoint(sparse(A))) -# end -# function SparseArrays.sparse(text::LinearAlgebra.Transpose{Tv,ExtendableSparseMatrixCSC{Tv,Ti}}) where {Tv,Ti} -# transpose(sparse(parent(text))) -# end - - -""" -$(SIGNATURES) - -Create similar but empty extendableSparseMatrix -""" -function Base.similar(m::ExtendableSparseMatrixCSC{Tv, Ti}) where {Tv, Ti} - return ExtendableSparseMatrixCSC{Tv, Ti}(size(m)...) -end - -function Base.similar(m::ExtendableSparseMatrixCSC{Tv, Ti}, ::Type{T}) where {Tv, Ti, T} - return ExtendableSparseMatrixCSC{T, Ti}(size(m)...) -end - -""" -$(SIGNATURES) - -Update element of the matrix with operation `op`. -This can replace the following code and save one index -search during access: - -```@example -using ExtendableSparse # hide -A=ExtendableSparseMatrixCSC(3,3) -A[1,2]+=0.1 -A -``` - -```@example -using ExtendableSparse # hide - -A=ExtendableSparseMatrixCSC(3,3) -updateindex!(A,+,0.1,1,2) -A -``` - -If `v` is zero, no new entry is created. -""" - -function updateindex!( - ext::ExtendableSparseMatrixCSC{Tv, Ti}, - op, - v, - i, - j - ) where {Tv, Ti <: Integer} - k = findindex(ext.cscmatrix, i, j) - if k > 0 - ext.cscmatrix.nzval[k] = op(ext.cscmatrix.nzval[k], v) - else - if ext.lnkmatrix == nothing - ext.lnkmatrix = SparseMatrixLNK{Tv, Ti}(ext.cscmatrix.m, ext.cscmatrix.n) - end - updateindex!(ext.lnkmatrix, op, v, i, j) - end - return ext -end - -""" -$(SIGNATURES) -Like [`updateindex!`](@ref) but without -checking if v is zero. -""" -function rawupdateindex!( - ext::ExtendableSparseMatrixCSC{Tv, Ti}, - op, - v, - i, - j, - part = 1 - ) where {Tv, Ti <: Integer} - k = findindex(ext.cscmatrix, i, j) - if k > 0 - ext.cscmatrix.nzval[k] = op(ext.cscmatrix.nzval[k], v) - else - if ext.lnkmatrix == nothing - ext.lnkmatrix = SparseMatrixLNK{Tv, Ti}(ext.cscmatrix.m, ext.cscmatrix.n) - end - rawupdateindex!(ext.lnkmatrix, op, v, i, j) - end - return ext -end - -""" -$(SIGNATURES) - -Find index in CSC matrix and set value if it exists. Otherwise, -set index in extension if `v` is nonzero. -""" -function Base.setindex!( - ext::ExtendableSparseMatrixCSC{Tv, Ti}, - v::Union{Number, AbstractVecOrMat}, - i::Integer, - j::Integer - ) where {Tv, Ti} - k = findindex(ext.cscmatrix, i, j) - return if k > 0 - ext.cscmatrix.nzval[k] = v - else - if ext.lnkmatrix == nothing - ext.lnkmatrix = SparseMatrixLNK{Tv, Ti}(ext.cscmatrix.m, ext.cscmatrix.n) - end - ext.lnkmatrix[i, j] = v - end -end - -""" -$(SIGNATURES) - -Find index in CSC matrix and return value, if it exists. -Otherwise, return value from extension. -""" -function Base.getindex( - ext::ExtendableSparseMatrixCSC{Tv, Ti}, - i::Integer, - j::Integer - ) where {Tv, Ti <: Integer} - k = findindex(ext.cscmatrix, i, j) - if k > 0 - return ext.cscmatrix.nzval[k] - elseif ext.lnkmatrix == nothing - return zero(Tv) - else - v = zero(Tv) - v = ext.lnkmatrix[i, j] - end -end - - -""" -$(SIGNATURES) - -If there are new entries in extension, create new CSC matrix by adding the -cscmatrix and linked list matrix and reset the linked list based extension. -""" -function flush!(ext::ExtendableSparseMatrixCSC) - if ext.lnkmatrix != nothing && nnz(ext.lnkmatrix) > 0 - ext.cscmatrix = ext.lnkmatrix + ext.cscmatrix - ext.lnkmatrix = nothing - ext.phash = phash(ext.cscmatrix) - end - return ext -end - - -function SparseArrays.sparse(ext::ExtendableSparseMatrixCSC) - flush!(ext) - return ext.cscmatrix -end - - -""" -$(SIGNATURES) - -Reset ExtenableSparseMatrix into state similar to that after creation. -""" -function reset!(A::ExtendableSparseMatrixCSC) - A.cscmatrix = spzeros(size(A)...) - return A.lnkmatrix = nothing -end - - -""" -$(SIGNATURES) -""" -function Base.copy(S::ExtendableSparseMatrixCSC) - return if isnothing(S.lnkmatrix) - ExtendableSparseMatrixCSC(copy(S.cscmatrix), nothing, S.phash) - else - ExtendableSparseMatrixCSC(copy(S.cscmatrix), copy(S.lnkmatrix), S.phash) - end -end - -""" - pointblock(matrix,blocksize) - -Create a pointblock matrix. -""" -function pointblock(A0::ExtendableSparseMatrixCSC{Tv, Ti}, blocksize) where {Tv, Ti} - A = SparseMatrixCSC(A0) - colptr = A.colptr - rowval = A.rowval - nzval = A.nzval - n = A.n - block = zeros(Tv, blocksize, blocksize) - nblock = n ÷ blocksize - b = SMatrix{blocksize, blocksize}(block) - Tb = typeof(b) - Ab = ExtendableSparseMatrixCSC{Tb, Ti}(nblock, nblock) - - - for i in 1:n - for k in colptr[i]:(colptr[i + 1] - 1) - j = rowval[k] - iblock = (i - 1) ÷ blocksize + 1 - jblock = (j - 1) ÷ blocksize + 1 - ii = (i - 1) % blocksize + 1 - jj = (j - 1) % blocksize + 1 - block[ii, jj] = nzval[k] - rawupdateindex!(Ab, +, SMatrix{blocksize, blocksize}(block), iblock, jblock) - block[ii, jj] = zero(Tv) - end - end - return flush!(Ab) -end diff --git a/src/preconbuilders.jl b/src/preconbuilders.jl index 08e2596..93d4994 100644 --- a/src/preconbuilders.jl +++ b/src/preconbuilders.jl @@ -45,6 +45,39 @@ function LinearAlgebra.ldiv!( return Y end +""" + pointblock(matrix,blocksize) + +Create a pointblock matrix. +""" +function pointblock(A0::ExtendableSparseMatrixCSC{Tv, Ti}, blocksize) where {Tv, Ti} + A = SparseMatrixCSC(A0) + colptr = A.colptr + rowval = A.rowval + nzval = A.nzval + n = A.n + block = zeros(Tv, blocksize, blocksize) + nblock = n ÷ blocksize + b = SMatrix{blocksize, blocksize}(block) + Tb = typeof(b) + Ab = ExtendableSparseMatrixCSC{Tb, Ti}(nblock, nblock) + + + for i in 1:n + for k in colptr[i]:(colptr[i + 1] - 1) + j = rowval[k] + iblock = (i - 1) ÷ blocksize + 1 + jblock = (j - 1) ÷ blocksize + 1 + ii = (i - 1) % blocksize + 1 + jj = (j - 1) % blocksize + 1 + block[ii, jj] = nzval[k] + rawupdateindex!(Ab, +, SMatrix{blocksize, blocksize}(block), iblock, jblock) + block[ii, jj] = zero(Tv) + end + end + return flush!(Ab) +end + function (b::ILUZeroPreconBuilder)(A0, p) A = SparseMatrixCSC(size(A0)..., getcolptr(A0), rowvals(A0), nonzeros(A0)) return if b.blocksize == 1 @@ -67,40 +100,197 @@ end (::ILUTPreconBuilder)(A, p) = error("import IncompleteLU.jl in order to use ILUTBuilder") +mutable struct BlockPreconditioner + A::AbstractMatrix + factorization + partitioning::Union{Nothing, Vector{AbstractVector}} + facts::Vector + function BlockPreconditioner(A; partitioning = nothing, factorization = nothing) + p = new() + p.A = A + p.partitioning = partitioning + p.factorization = factorization + update!(p) + return p + end +end + + """ - SmoothedAggregationPreconBuilder(;blocksize=1, kwargs...) + BlockPreconditioner(;partitioning, factorization) + +Create a block preconditioner from partition of unknowns given by `partitioning`, a vector of AbstractVectors describing the +indices of the partitions of the matrix. For a matrix of size `n x n`, e.g. partitioning could be `[ 1:n÷2, (n÷2+1):n]` +or [ 1:2:n, 2:2:n]. +Factorization is a callable (Function or struct) which allows to create a factorization (with `ldiv!` methods) from a submatrix of A. +""" +function BlockPreconditioner end -Return callable object constructing a left smoothed aggregation algebraic multigrid preconditioner -using [AlgebraicMultigrid.jl](https://github.com/JuliaLinearAlgebra/AlgebraicMultigrid.jl). +""" + allow_views(::preconditioner_type) -Needs `import AlgebraicMultigrid` to trigger the corresponding extension. +Factorizations on matrix partitions within a block preconditioner may or may not work with array views. +E.g. the umfpack factorization cannot work with views, while ILUZeroPreconditioner can. + Implementing a method for `allow_views` returning `false` resp. `true` allows to dispatch to the proper case. """ -struct SmoothedAggregationPreconBuilder{Tk} - blocksize::Int - kwargs::Tk +allow_views(::Any) = false + + +function update!(precon::BlockPreconditioner) + flush!(precon.A) + nall = sum(length, precon.partitioning) + n = size(precon.A, 1) + if nall != n + @warn "sum(length,partitioning)=$(nall) but n=$(n)" + end + + if isnothing(precon.partitioning) + partitioning = [1:n] + end + + np = length(precon.partitioning) + precon.facts = Vector{Any}(undef, np) + return Threads.@threads for ipart in 1:np + factorization = deepcopy(precon.factorization) + AP = precon.A[precon.partitioning[ipart], precon.partitioning[ipart]] + FP = factorization(AP) + precon.facts[ipart] = FP + end +end + + +function LinearAlgebra.ldiv!(p::BlockPreconditioner, v) + partitioning = p.partitioning + facts = p.facts + np = length(partitioning) + + if allow_views(p.factorization) + Threads.@threads for ipart in 1:np + ldiv!(facts[ipart], view(v, partitioning[ipart])) + end + else + Threads.@threads for ipart in 1:np + vv = v[partitioning[ipart]] + ldiv!(facts[ipart], vv) + view(v, partitioning[ipart]) .= vv + end + end + return v end -function SmoothedAggregationPreconBuilder(; blocksize = 1, kwargs...) - return SmoothedAggregationPreconBuilder(blocksize, kwargs) +function LinearAlgebra.ldiv!(u, p::BlockPreconditioner, v) + partitioning = p.partitioning + facts = p.facts + np = length(partitioning) + if allow_views(p.factorization) + Threads.@threads for ipart in 1:np + ldiv!(view(u, partitioning[ipart]), facts[ipart], view(v, partitioning[ipart])) + end + else + Threads.@threads for ipart in 1:np + uu = u[partitioning[ipart]] + ldiv!(uu, facts[ipart], v[partitioning[ipart]]) + view(u, partitioning[ipart]) .= uu + end + end + return u end -(::SmoothedAggregationPreconBuilder)(A, p) = error("import AlgebraicMultigrid in order to use SmoothedAggregationPreconBuilder") +Base.eltype(p::BlockPreconditioner) = eltype(p.facts[1]) + """ - RugeStubenPreconBuilder(;blocksize=1, kwargs...) + BlockPreconBuilder(;precs=UMFPACKPreconBuilder(), + partitioning = A -> [1:size(A,1)] + +Return callable object constructing a left block Jacobi preconditioner +from partition of unknowns. -Return callable object constructing a left algebraic multigrid preconditioner after Ruge & Stüben -using [AlgebraicMultigrid.jl](https://github.com/JuliaLinearAlgebra/AlgebraicMultigrid.jl). +- `partitioning(A)` shall return a vector of AbstractVectors describing the indices of the partitions of the matrix. + For a matrix of size `n x n`, e.g. partitioning could be `[ 1:n÷2, (n÷2+1):n]` or [ 1:2:n, 2:2:n]. -Needs `import AlgebraicMultigrid` to trigger the corresponding extension. +- `precs(A,p)` shall return a left precondioner for a matrix block. """ -struct RugeStubenPreconBuilder{Tk} - blocksize::Int - kwargs::Tk +Base.@kwdef mutable struct BlockPreconBuilder + precs = UMFPACKPreconBuilder() + partitioning = A -> [1:size(A, 1)] +end + +function (blockprecs::BlockPreconBuilder)(A, p) + (; precs, partitioning) = blockprecs + factorization = A -> precs(A, p)[1] + bp = BlockPreconditioner(A; partitioning = partitioning(A), factorization) + return (bp, LinearAlgebra.I) end -function RugeStubenPreconBuilder(; blocksize = 1, kwargs...) - return RugeStubenPreconBuilder(blocksize, kwargs) +""" + Allow array for precs => different precoms +""" + + +mutable struct _JacobiPreconditioner{Tv} + invdiag::Vector{Tv} end -(::RugeStubenPreconBuilder)(A, p) = error("import AlgebraicMultigrid in order to use RugeStubenAMGBuilder") +function jacobi(A::SparseMatrixCSC{Tv, Ti}) where {Tv, Ti} + invdiag = Array{Tv, 1}(undef, A.n) + n = A.n + @inbounds for i in 1:n + invdiag[i] = one(Tv) / A[i, i] + end + return _JacobiPreconditioner(invdiag) +end + +function jacobi!(p::_JacobiPreconditioner{Tv}, A::SparseMatrixCSC{Tv, Ti}) where {Tv, Ti} + n = A.n + @inbounds for i in 1:n + p.invdiag[i] = one(Tv) / A[i, i] + end + return p +end + +mutable struct JacobiPreconditioner + A::AbstractMatrix + factorization::Union{_JacobiPreconditioner, Nothing} + function JacobiPreconditioner(A) + p = new() + p.A = A + p.factorization = nothing + update!(p) + return p + end +end + +function LinearAlgebra.ldiv!(u, p::_JacobiPreconditioner, v) + n = length(p.invdiag) + for i in 1:n + @inbounds u[i] = p.invdiag[i] * v[i] + end + return u +end + +LinearAlgebra.ldiv!(p::_JacobiPreconditioner, v) = ldiv!(v, p, v) + + +""" +``` +JacobiPreconditioner() +JacobiPreconditioner(matrix) +``` + +Jacobi preconditioner. +""" +function JacobiPreconditioner end + +function update!(p::JacobiPreconditioner) + flush!(p.A) + Tv = eltype(p.A) + p.factorization = jacobi(SparseMatrixCSC(p.A)) + return p +end + +LinearAlgebra.ldiv!(u, fact::JacobiPreconditioner, v) = ldiv!(u, fact.factorization, v) +LinearAlgebra.ldiv!(fact::JacobiPreconditioner, v) = ldiv!(fact.factorization, v) + +allow_views(::JacobiPreconditioner) = true +allow_views(::Type{JacobiPreconditioner}) = true diff --git a/src/matrix/sparsematrixcsc.jl b/src/sparsematrixcsc.jl similarity index 98% rename from src/matrix/sparsematrixcsc.jl rename to src/sparsematrixcsc.jl index 36dd7c2..8065610 100644 --- a/src/matrix/sparsematrixcsc.jl +++ b/src/sparsematrixcsc.jl @@ -1,5 +1,5 @@ """ -$(SIGNATURES) +$(TYPEDSIGNATURES) Return index corresponding to entry [i,j] in the array of nonzeros, if the entry exists, otherwise, return 0. @@ -23,7 +23,7 @@ function findindex(csc::SparseMatrixCSC{T}, i, j) where {T} end """ -$(SIGNATURES) +$(TYPEDSIGNATURES) Update element of the matrix with operation `op` without introducing new nonzero elements. @@ -59,7 +59,7 @@ function updateindex!(csc::SparseMatrixCSC{Tv, Ti}, op, v, i, j) where {Tv, Ti < end """ -$(SIGNATURES) +$(TYPEDSIGNATURES) Trivial flush! method for allowing to run the code with both `ExtendableSparseMatrix` and `SparseMatrixCSC`. @@ -67,7 +67,7 @@ Trivial flush! method for allowing to run the code with both `ExtendableSparseMa flush!(csc::SparseMatrixCSC) = csc """ -$(SIGNATURES) +$(TYPEDSIGNATURES) Hash of csc matrix pattern. """ diff --git a/src/matrix/sparsematrixdict.jl b/src/sparsematrixdict.jl similarity index 62% rename from src/matrix/sparsematrixdict.jl rename to src/sparsematrixdict.jl index 744c1df..6d8c5d6 100644 --- a/src/matrix/sparsematrixdict.jl +++ b/src/sparsematrixdict.jl @@ -1,36 +1,128 @@ """ $(TYPEDEF) -Sparse matrix where entries are organized as dictionary. +[`AbstractSparseMatrixExtension`](@ref) extension where entries are organized as dictionary. +This is meant as an example implementation to show how a sparse matrix +extension could be implemented. As dictionary access tends to be slow, it +is not meant for general use. + +An advantage of this format is the fact that it avoids to store a vector of the length of unknowns +indicating the first column indices, avoiding storage overhead during parallel assembly. + +$(TYPEDFIELDS) """ mutable struct SparseMatrixDict{Tv, Ti} <: AbstractSparseMatrixExtension{Tv, Ti} + """ + Number of rows + """ m::Ti + + """ + Number of columns + """ n::Ti + + """ + Dictionary with pairs of integers as keys containing values + """ values::Dict{Pair{Ti, Ti}, Tv} + SparseMatrixDict{Tv, Ti}(m, n) where {Tv, Ti} = new(m, n, Dict{Pair{Ti, Ti}, Tv}()) end +""" + $(TYPEDSIGNATURES) +""" +function SparseMatrixDict(Acsc::SparseMatrixCSC{Tv, Ti}) where {Tv, Ti} + A = SparseMatrixDict{Tv, Ti}(size(Acsc)...) + rows = rowvals(Acsc) + vals = nonzeros(Acsc) + m, n = size(Acsc) + for j in 1:n + for k in nzrange(Acsc, j) + A[rows[k], j] = vals[k] + end + end + return A +end + +""" + $(TYPEDSIGNATURES) +""" +function SparseMatrixDict( + valuetype::Type{Tv}, indextype::Type{Ti}, m, + n + ) where {Tv, Ti <: Integer} + return SparseMatrixDict{Tv, Ti}(m, n) +end + + +""" + $(TYPEDSIGNATURES) +""" +SparseMatrixDict(valuetype::Type{Tv}, m, n) where {Tv} = SparseMatrixDict(Tv, Int, m, n) + + +""" + $(TYPEDSIGNATURES) +""" +SparseMatrixDict(m, n) = SparseMatrixDict(Float64, m, n) + +""" + $(TYPEDSIGNATURES) +""" function reset!(m::SparseMatrixDict{Tv, Ti}) where {Tv, Ti} return m.values = Dict{Pair{Ti, Ti}, Tv}() end +""" + $(TYPEDSIGNATURES) +""" function Base.setindex!(m::SparseMatrixDict, v, i, j) return m.values[Pair(i, j)] = v end +""" + $(TYPEDSIGNATURES) +""" function rawupdateindex!(m::SparseMatrixDict{Tv, Ti}, op, v, i, j) where {Tv, Ti} p = Pair(i, j) return m.values[p] = op(get(m.values, p, zero(Tv)), v) end + +""" + $(TYPEDSIGNATURES) +""" +function updateindex!(m::SparseMatrixDict{Tv, Ti}, op, v, i, j) where {Tv, Ti} + p = Pair(i, j) + v1 = op(get(m.values, p, zero(Tv)), v) + if !iszero(v1) + m.values[p] = v1 + end + return v1 +end + +""" + $(TYPEDSIGNATURES) +""" function Base.getindex(m::SparseMatrixDict{Tv}, i, j) where {Tv} return get(m.values, Pair(i, j), zero(Tv)) end +""" + $(TYPEDSIGNATURES) +""" Base.size(m::SparseMatrixDict) = (m.m, m.n) +""" + $(TYPEDSIGNATURES) +""" SparseArrays.nnz(m::SparseMatrixDict) = length(m.values) +""" + $(TYPEDSIGNATURES) +""" function SparseArrays.sparse(mat::SparseMatrixDict{Tv, Ti}) where {Tv, Ti} l = length(mat.values) I = Vector{Ti}(undef, l) @@ -50,19 +142,23 @@ function SparseArrays.sparse(mat::SparseMatrixDict{Tv, Ti}) where {Tv, Ti} end end +""" + $(TYPEDSIGNATURES) +""" function Base.:+(dictmatrix::SparseMatrixDict{Tv, Ti}, cscmatrix::SparseMatrixCSC{Tv, Ti}) where {Tv, Ti} lnew = length(dictmatrix.values) if lnew > 0 (; colptr, nzval, rowval, m, n) = cscmatrix l = lnew + nnz(cscmatrix) + I = Vector{Ti}(undef, l) J = Vector{Ti}(undef, l) V = Vector{Tv}(undef, l) i = 1 for icsc in 1:(length(colptr) - 1) for j in colptr[icsc]:(colptr[icsc + 1] - 1) - I[i] = icsc - J[i] = rowval[j] + I[i] = rowval[j] + J[i] = icsc V[i] = nzval[j] i = i + 1 end @@ -74,6 +170,8 @@ function Base.:+(dictmatrix::SparseMatrixDict{Tv, Ti}, cscmatrix::SparseMatrixCS V[i] = v i = i + 1 end + + @assert l == i - 1 @static if VERSION >= v"1.10" return SparseArrays.sparse!(I, J, V, m, n, +) else @@ -83,6 +181,9 @@ function Base.:+(dictmatrix::SparseMatrixDict{Tv, Ti}, cscmatrix::SparseMatrixCS return cscmatrix end +""" + $(TYPEDSIGNATURES) +""" function Base.sum(dictmatrices::Vector{SparseMatrixDict{Tv, Ti}}, cscmatrix::SparseMatrixCSC{Tv, Ti}) where {Tv, Ti} lnew = sum(m -> length(m.values), dictmatrices) if lnew > 0 diff --git a/src/matrix/sparsematrixdilnkc.jl b/src/sparsematrixdilnkc.jl similarity index 92% rename from src/matrix/sparsematrixdilnkc.jl rename to src/sparsematrixdilnkc.jl index a394bcc..081bd29 100644 --- a/src/matrix/sparsematrixdilnkc.jl +++ b/src/sparsematrixdilnkc.jl @@ -1,9 +1,18 @@ """ $(TYPEDEF) - Modification of SparseMatrixLNK where the pointer to first index of -column j is stored in a dictionary. - """ +[`AbstractSparseMatrixExtension`](@ref) where entries are stored as linked list, but +where -- in difference to [`SparseMatrixLNK`](@ref) -- the pointer to first index +of column j is stored in a dictionary. + +Thus this format -- similar to [`SparseMatrixDict`](@ref) -- avoids to store a vector of the length of unknowns +indicating the first column indices, avoiding storage overhead during parallel assembly. + +Via the type alias [`MTExtendableSparseMatrixCSC`](@ref), this extension is used as default for handling +parallel assembly. + +$(TYPEDFIELDS) +""" mutable struct SparseMatrixDILNKC{Tv, Ti <: Integer} <: AbstractSparseMatrixExtension{Tv, Ti} """ Number of rows @@ -54,7 +63,7 @@ mutable struct SparseMatrixDILNKC{Tv, Ti <: Integer} <: AbstractSparseMatrixExte end """ -$(SIGNATURES) +$(TYPEDSIGNATURES) Constructor of empty matrix. """ @@ -63,7 +72,7 @@ function SparseMatrixDILNKC{Tv, Ti}(m, n) where {Tv, Ti <: Integer} end """ -$(SIGNATURES) +$(TYPEDSIGNATURES) Constructor of empty matrix. """ @@ -75,21 +84,21 @@ function SparseMatrixDILNKC( end """ -$(SIGNATURES) +$(TYPEDSIGNATURES) Constructor of empty matrix. """ SparseMatrixDILNKC(valuetype::Type{Tv}, m, n) where {Tv} = SparseMatrixDILNKC(Tv, Int, m, n) """ -$(SIGNATURES) +$(TYPEDSIGNATURES) Constructor of empty matrix. """ SparseMatrixDILNKC(m, n) = SparseMatrixDILNKC(Float64, m, n) """ -$(SIGNATURES) +$(TYPEDSIGNATURES) Constructor from SparseMatrixCSC. @@ -108,7 +117,7 @@ function SparseMatrixDILNKC(csc::SparseArrays.SparseMatrixCSC{Tv, Ti}) where { end """ -$(SIGNATURES) +$(TYPEDSIGNATURES) Find index in matrix. """ @@ -133,7 +142,7 @@ function findindex(lnk::SparseMatrixDILNKC, i, j) end """ -$(SIGNATURES) +$(TYPEDSIGNATURES) Return value stored for entry or zero if not found """ @@ -147,7 +156,7 @@ function Base.getindex(lnk::SparseMatrixDILNKC{Tv, Ti}, i, j) where {Tv, Ti} end """ - $(SIGNATURES) + $(TYPEDSIGNATURES) Add entry. """ @@ -181,7 +190,7 @@ function addentry!(lnk::SparseMatrixDILNKC, i, j, k, k0) end """ -$(SIGNATURES) +$(TYPEDSIGNATURES) Update value of existing entry, otherwise extend matrix if v is nonzero. """ @@ -203,7 +212,7 @@ function Base.setindex!(lnk::SparseMatrixDILNKC, v, i, j) end """ -$(SIGNATURES) +$(TYPEDSIGNATURES) Update element of the matrix with operation `op`. It assumes that `op(0,0)==0`. If `v` is zero, no new @@ -223,7 +232,7 @@ function updateindex!(lnk::SparseMatrixDILNKC{Tv, Ti}, op, v, i, j) where {Tv, T end """ -$(SIGNATURES) +$(TYPEDSIGNATURES) Update element of the matrix with operation `op`. It assumes that `op(0,0)==0`. If `v` is zero a new entry @@ -241,14 +250,14 @@ function rawupdateindex!(lnk::SparseMatrixDILNKC{Tv, Ti}, op, v, i, j) where {Tv end """ -$(SIGNATURES) +$(TYPEDSIGNATURES) Return tuple containing size of the matrix. """ Base.size(lnk::SparseMatrixDILNKC) = (lnk.m, lnk.n) """ -$(SIGNATURES) +$(TYPEDSIGNATURES) Return number of nonzero entries. """ @@ -256,7 +265,7 @@ SparseArrays.nnz(lnk::SparseMatrixDILNKC) = lnk.nnz """ - $(SIGNATURES) + $(TYPEDSIGNATURES) Add lnk and csc via interim COO (coordinate) format, i.e. arrays I,J,V. """ function add_via_COO( @@ -297,7 +306,7 @@ end """ - $(SIGNATURES) + $(TYPEDSIGNATURES) Add lnk and csc without creation of intermediate data. (to be fixed) """ @@ -396,7 +405,7 @@ end """ - $(SIGNATURES) + $(TYPEDSIGNATURES) Add SparseMatrixCSC matrix and [`SparseMatrixDILNKC`](@ref) lnk, returning a SparseMatrixCSC """ @@ -455,7 +464,7 @@ end """ -$(SIGNATURES) +$(TYPEDSIGNATURES) Constructor from SparseMatrixDILNKC. @@ -477,7 +486,7 @@ function Base.copy(S::SparseMatrixDILNKC) S.nentries, copy(S.colptr), copy(S.colstart), - copy(S.rowvals), + copy(S.rowval), copy(S.nzval) ) end diff --git a/src/matrix/sparsematrixlnk.jl b/src/sparsematrixlnk.jl similarity index 93% rename from src/matrix/sparsematrixlnk.jl rename to src/sparsematrixlnk.jl index 76f5e0b..83d897f 100644 --- a/src/matrix/sparsematrixlnk.jl +++ b/src/sparsematrixlnk.jl @@ -1,7 +1,8 @@ """ $(TYPEDEF) -Struct to hold sparse matrix in the linked list format. +[`AbstractSparseMatrixExtension`](@ref) where entries are organized in the linked list +sparse matrix format. This was the standard structure in ExtendableSparse v1.x. Modeled after the linked list sparse matrix format described in the [whitepaper](https://www-users.cs.umn.edu/~saad/software/SPARSKIT/paper.ps) @@ -15,6 +16,11 @@ The advantage of the linked list structure is the fact that upon insertion of a new entry, the arrays describing the structure can grow at their respective ends and can be conveniently updated via `push!`. No copying of existing data is necessary. +Via the type aliases [`STExtendableSparseMatrixCSC`](@ref), [`ExtendableSparseMatrixCSC`](@ref), +and [`ExtendableSparseMatrix`](@ref) this extension is used as default for handling +scalar assembly. + + $(TYPEDFIELDS) """ mutable struct SparseMatrixLNK{Tv, Ti <: Integer} <: AbstractSparseMatrixExtension{Tv, Ti} @@ -67,7 +73,7 @@ mutable struct SparseMatrixLNK{Tv, Ti <: Integer} <: AbstractSparseMatrixExtensi end """ -$(SIGNATURES) +$(TYPEDSIGNATURES) Constructor of empty matrix. """ @@ -76,7 +82,7 @@ function SparseMatrixLNK{Tv, Ti}(m, n) where {Tv, Ti <: Integer} end """ -$(SIGNATURES) +$(TYPEDSIGNATURES) Constructor of empty matrix. """ @@ -88,21 +94,21 @@ function SparseMatrixLNK( end """ -$(SIGNATURES) +$(TYPEDSIGNATURES) Constructor of empty matrix. """ SparseMatrixLNK(valuetype::Type{Tv}, m, n) where {Tv} = SparseMatrixLNK(Tv, Int, m, n) """ -$(SIGNATURES) +$(TYPEDSIGNATURES) Constructor of empty matrix. """ SparseMatrixLNK(m, n) = SparseMatrixLNK(Float64, m, n) """ -$(SIGNATURES) +$(TYPEDSIGNATURES) Constructor from SparseMatrixCSC. @@ -138,7 +144,7 @@ function findindex(lnk::SparseMatrixLNK{Tv, Ti}, i, j) where {Tv, Ti} end """ -$(SIGNATURES) +$(TYPEDSIGNATURES) Return value stored for entry or zero if not found """ @@ -174,7 +180,7 @@ function addentry!(lnk::SparseMatrixLNK, i, j, k, k0) end """ -$(SIGNATURES) +$(TYPEDSIGNATURES) Update value of existing entry, otherwise extend matrix if v is nonzero. """ @@ -204,7 +210,7 @@ function Base.setindex!(lnk::SparseMatrixLNK, v, i, j) end """ -$(SIGNATURES) +$(TYPEDSIGNATURES) Update element of the matrix with operation `op`. It assumes that `op(0,0)==0`. If `v` is zero, no new @@ -231,7 +237,7 @@ function updateindex!(lnk::SparseMatrixLNK{Tv, Ti}, op, v, i, j) where {Tv, Ti} end """ -$(SIGNATURES) +$(TYPEDSIGNATURES) Update element of the matrix with operation `op`. It assumes that `op(0,0)==0`. If `v` is zero a new entry @@ -256,28 +262,19 @@ function rawupdateindex!(lnk::SparseMatrixLNK{Tv, Ti}, op, v, i, j) where {Tv, T end """ -$(SIGNATURES) +$(TYPEDSIGNATURES) Return tuple containing size of the matrix. """ Base.size(lnk::SparseMatrixLNK) = (lnk.m, lnk.n) """ -$(SIGNATURES) +$(TYPEDSIGNATURES) Return number of nonzero entries. """ SparseArrays.nnz(lnk::SparseMatrixLNK) = lnk.nnz -""" -$(SIGNATURES) - -Dummy flush! method for SparseMatrixLNK. Just -used in test methods -""" -function flush!(lnk::SparseMatrixLNK{Tv, Ti}) where {Tv, Ti} - return lnk -end # Struct holding pair of value and row # number, for sorting @@ -290,7 +287,7 @@ end Base.isless(x::ColEntry, y::ColEntry) = (x.rowval < y.rowval) """ -$(SIGNATURES) +$(TYPEDSIGNATURES) Add SparseMatrixCSC matrix and [`SparseMatrixLNK`](@ref) lnk, returning a SparseMatrixCSC """ @@ -392,7 +389,7 @@ end Base.:+(csc::SparseMatrixCSC, lnk::SparseMatrixLNK) = lnk + csc """ -$(SIGNATURES) +$(TYPEDSIGNATURES) Constructor from SparseMatrixLNK. @@ -402,18 +399,14 @@ function SparseArrays.SparseMatrixCSC(lnk::SparseMatrixLNK)::SparseMatrixCSC return lnk + csc end -rowvals(S::SparseMatrixLNK) = getfield(S, :rowval) -getcolptr(S::SparseMatrixLNK) = getfield(S, :colptr) -nonzeros(S::SparseMatrixLNK) = getfield(S, :nzval) - function Base.copy(S::SparseMatrixLNK) return SparseMatrixLNK( size(S, 1), size(S, 2), S.nnz, S.nentries, - copy(getcolptr(S)), - copy(rowvals(S)), - copy(nonzeros(S)) + copy(S.colptr), + copy(S.rowval), + copy(S.nzval) ) end diff --git a/src/matrix/sprand.jl b/src/sprand.jl similarity index 78% rename from src/matrix/sprand.jl rename to src/sprand.jl index 2ff7d21..6cec921 100644 --- a/src/matrix/sprand.jl +++ b/src/sprand.jl @@ -69,20 +69,20 @@ function fdrand!( error("Matrix size mismatch") end - _flush!(m::ExtendableSparseMatrix) = flush!(m) + _flush!(m::AbstractExtendableSparseMatrixCSC) = flush!(m) _flush!(m::SparseMatrixCSC) = m - _flush!(m::SparseMatrixLNK) = m - _flush!(m::AbstractMatrix) = m + _flush!(m::AbstractSparseMatrixExtension) = m _nonzeros(m::Matrix) = vec(m) _nonzeros(m::ExtendableSparseMatrix) = nonzeros(m) _nonzeros(m::SparseMatrixLNK) = m.nzval + _nonzeros(m::SparseMatrixDILNKC) = m.nzval _nonzeros(m::SparseMatrixCSC) = nonzeros(m) zero!(A::AbstractMatrix{T}) where {T} = A .= zero(T) zero!(A::SparseMatrixCSC{T, Ti}) where {T, Ti} = _nonzeros(A) .= zero(T) - zero!(A::ExtendableSparseMatrix{T, Ti}) where {T, Ti} = _nonzeros(A) .= zero(T) - zero!(A::SparseMatrixLNK{T, Ti}) where {T, Ti} = _nonzeros(A) .= zero(T) + zero!(A::AbstractExtendableSparseMatrixCSC) = reset!(A) + zero!(A::AbstractSparseMatrixExtension) = nothing zero!(A) @@ -130,9 +130,8 @@ end """ $(SIGNATURES) -Create SparseMatrixCSC via COO intermedite arrays +Create SparseMatrixCSC via COO intermedite arrays. Just for benchmarking. """ - function fdrand_coo(T, nx, ny = 1, nz = 1; rand = () -> rand()) N = nx * ny * nz I = zeros(Int64, 0) @@ -185,6 +184,7 @@ function fdrand_coo(T, nx, ny = 1, nz = 1; rand = () -> rand()) end return sparse(I, J, V) end + """ $(SIGNATURES) @@ -236,13 +236,14 @@ function fdrand( symmetric = true ) where {T} N = nx * ny * nz + if matrixtype == :COO A = fdrand_coo(T, nx, ny, nz; rand = rand) else - if matrixtype == ExtendableSparseMatrix - A = ExtendableSparseMatrix(T, N, N) - elseif matrixtype == SparseMatrixLNK - A = SparseMatrixLNK(T, N, N) + if matrixtype <: AbstractExtendableSparseMatrixCSC + A = matrixtype(T, N, N) + elseif matrixtype <: AbstractSparseMatrixExtension + A = matrixtype(T, N, N) elseif matrixtype == SparseMatrixCSC A = spzeros(T, N, N) elseif matrixtype == Tridiagonal @@ -260,69 +261,3 @@ function fdrand( end fdrand(nx, ny = 1, nz = 1; kwargs...) = fdrand(Float64, nx, ny, nz; kwargs...) - -### for use with LinearSolve.jl -function solverbenchmark( - T, - solver, - nx, - ny = 1, - nz = 1; - symmetric = false, - matrixtype = ExtendableSparseMatrix, - seconds = 0.5, - repeat = 1, - tol = sqrt(eps(Float64)) - ) - A = fdrand(T, nx, ny, nz; symmetric, matrixtype) - n = size(A, 1) - x = rand(n) - b = A * x - u = solver(A, b) - nrm = norm(u - x, 1) / n - if nrm > tol - error("solution inaccurate: $((nx, ny, nz)), |u-exact|=$nrm") - end - secs = 0.0 - nsol = 0 - tmin = 1.0e30 - while secs < seconds - t = @elapsed solver(A, b) - secs += t - tmin = min(tmin, t) - nsol += 1 - end - return tmin -end - -function solverbenchmark( - T, - solver; - dim = 1, - nsizes = 10, - sizes = [10 * 2^i for i in 1:nsizes], - symmetric = false, - matrixtype = ExtendableSparseMatrix, - seconds = 0.1, - tol = sqrt(eps(Float64)) - ) - if dim == 1 - ns = sizes - elseif dim == 2 - ns = [(Int(ceil(x^(1 / 2))), Int(ceil(x^(1 / 2)))) for x in sizes] - elseif dim == 3 - ns = [ - (Int(ceil(x^(1 / 3))), Int(ceil(x^(1 / 3))), Int(ceil(x^(1 / 3)))) - for - x in sizes - ] - end - times = zeros(0) - sizes = zeros(Int, 0) - for s in ns - t = solverbenchmark(T, solver, s...; symmetric, matrixtype, seconds, tol) - push!(times, t) - push!(sizes, prod(s)) - end - return sizes, times -end diff --git a/src/typealiases.jl b/src/typealiases.jl new file mode 100644 index 0000000..bac491a --- /dev/null +++ b/src/typealiases.jl @@ -0,0 +1,49 @@ +""" + MTExtendableSparseMatrixCSC + +Multithreaded extendable sparse matrix (Experimental). + +Aliased to [`GenericMTExtendableSparseMatrixCSC`](@ref) with [`SparseMatrixDILNKC`](@ref) scalar matrix parameter. +""" +const MTExtendableSparseMatrixCSC{Tv, Ti} = GenericMTExtendableSparseMatrixCSC{SparseMatrixDILNKC{Tv, Ti}, Tv, Ti} +MTExtendableSparseMatrixCSC(m, n, args...) = MTExtendableSparseMatrixCSC{Float64, Int64}(m, n, args...) + +""" + STExtendableSparseMatrixCSC + +Single threaded extendable sparse matrix (Experimental). + +Aliased to [`GenericExtendableSparseMatrixCSC`](@ref) with [`SparseMatrixLNK`](@ref) scalar matrix parameter. +""" +const STExtendableSparseMatrixCSC{Tv, Ti} = GenericExtendableSparseMatrixCSC{SparseMatrixLNK{Tv, Ti}, Tv, Ti} +STExtendableSparseMatrixCSC(::Type{Tv}, m::Number, n::Number) where {Tv} = STExtendableSparseMatrixCSC{Tv, Int64}(m, n) +STExtendableSparseMatrixCSC(m::Number, n::Number) = STExtendableSparseMatrixCSC(Float64, m, n) +function STExtendableSparseMatrixCSC(A::AbstractSparseMatrixCSC{Tv, Ti}) where {Tv, Ti <: Integer} + return GenericExtendableSparseMatrixCSC( + SparseMatrixCSC(A), + SparseMatrixLNK{Tv, Ti}(size(A)...) + ) +end +STExtendableSparseMatrixCSC(D::Diagonal) = STExtendableSparseMatrixCSC(sparse(D)) +STExtendableSparseMatrixCSC(I, J, V::AbstractVector) = STExtendableSparseMatrixCSC(sparse(I, J, V)) +STExtendableSparseMatrixCSC(I, J, V::AbstractVector, m, n) = STExtendableSparseMatrixCSC(sparse(I, J, V, m, n)) +STExtendableSparseMatrixCSC(I, J, V::AbstractVector, combine::Function) = STExtendableSparseMatrixCSC(sparse(I, J, V, combine)) +STExtendableSparseMatrixCSC(I, J, V::AbstractVector, m, n, combine::Function) = STExtendableSparseMatrixCSC(sparse(I, J, V, m, n, combine)) + + +""" + ExtendableSparseMatrixCSC + +Aliased to [`STExtendableSparseMatrixCSC`](@ref) to ensure backward compatibility +to ExtendableSparse v1.x. +""" +const ExtendableSparseMatrixCSC = STExtendableSparseMatrixCSC + + +""" + ExtendableSparseMatrix + +Aliased to [`STExtendableSparseMatrixCSC`](@ref) to ensure backward compatibility +to ExtendableSparse v1.x. +""" +const ExtendableSparseMatrix = STExtendableSparseMatrixCSC diff --git a/test/alltests.jl b/test/alltests.jl index 18e076e..c57225f 100644 --- a/test/alltests.jl +++ b/test/alltests.jl @@ -1,19 +1,46 @@ +using Aqua, ExplicitImports + using LinearAlgebra using SparseArrays using ExtendableSparse -#using ExtendableSparse.Experimental using Printf using BenchmarkTools - using MultiFloats using ForwardDiff -using ExplicitImports +using Random + +function Random.rand( + rng::AbstractRNG, + ::Random.SamplerType{ForwardDiff.Dual{T, V, N}} + ) where {T, V, N} + return ForwardDiff.Dual{T, V, N}(rand(rng, T)) +end + @testset "ExplicitImports" begin @test ExplicitImports.check_no_implicit_imports(ExtendableSparse) === nothing + @test ExplicitImports.check_all_explicit_imports_via_owners(ExtendableSparse) === nothing + @test ExplicitImports.check_all_explicit_imports_are_public(ExtendableSparse, ignore = (:AbstractSparseMatrixCSC, :getcolptr)) === nothing @test ExplicitImports.check_no_stale_explicit_imports(ExtendableSparse) === nothing + @test ExplicitImports.check_all_qualified_accesses_via_owners(ExtendableSparse) === nothing + @test ExplicitImports.check_all_qualified_accesses_are_public( + ExtendableSparse, + ignore = (:AbstractSparseMatrixCSC, :AbstractTriangular, :getcolptr, :Forward, :USE_GPL_LIBS, :_checkbuffers, :print_array, :sparse!) + ) === nothing + @test ExplicitImports.check_no_self_qualified_accesses(ExtendableSparse) === nothing +end + +@testset "Aqua" begin + Aqua.test_all(ExtendableSparse) end +@testset "UndocumentedNames" begin + if isdefined(Docs, :undocumented_names) # >=1.11 + @test isempty(Docs.undocumented_names(ExtendableSparse)) + end +end + + @testset "Parallel" begin include("test_parallel.jl") @@ -32,16 +59,6 @@ end end end -# @testset "ExperimentalParallel" begin -# include("ExperimentalParallel.jl") -# for d=[1,2,3] -# for N in [10,rand(30:40),50] -# ExperimentalParallel.test_correctness_build(N,d) -# end -# end -# end - - @testset "Constructors" begin include("test_constructors.jl") end @@ -82,40 +99,10 @@ end include("test_linearsolve.jl") end -@testset "Preconditioners" begin - include("test_preconditioners.jl") -end - @testset "Symmetric" begin include("test_symmetric.jl") end -@testset "ExtendableSparse.LUFactorization" begin - include("test_default_lu.jl") -end - -@testset "Sparspak" begin - include("test_sparspak.jl") -end - -if ExtendableSparse.USE_GPL_LIBS - @testset "Cholesky" begin - include("test_default_cholesky.jl") - end -end - -@testset "block" begin +@testset "Block" begin include("test_block.jl") end - -#@testset "parilu0" begin include("test_parilu0.jl") end - - -# @testset "mkl-pardiso" begin if !Sys.isapple() -# include("test_mklpardiso.jl") -# end end - - -# if Pardiso.PARDISO_LOADED[] -# @testset "pardiso" begin include("test_pardiso.jl") end -# end diff --git a/test/test_assembly.jl b/test/test_assembly.jl index 16cc9fb..eef557f 100644 --- a/test/test_assembly.jl +++ b/test/test_assembly.jl @@ -2,9 +2,11 @@ module test_assembly using Test using SparseArrays using ExtendableSparse +using ExtendableSparse: GenericExtendableSparseMatrixCSC +using ExtendableSparse: SparseMatrixLNK, SparseMatrixDILNKC, SparseMatrixDict -function test(; m = 1000, n = 1000, xnnz = 5000, nsplice = 1) - A = ExtendableSparseMatrix{Float64, Int64}(m, n) +function test(Tm; m = 1000, n = 1000, xnnz = 5000, nsplice = 1) + A = GenericExtendableSparseMatrixCSC{Tm}{Float64, Int64}(m, n) m, n = size(A) S = spzeros(m, n) for isplice in 1:nsplice @@ -33,24 +35,28 @@ function test(; m = 1000, n = 1000, xnnz = 5000, nsplice = 1) end return true end +for Tm in [SparseMatrixLNK, SparseMatrixDict, SparseMatrixDILNKC] + @show Tm + @test test(Tm; m = 10, n = 10, xnnz = 5) + @test test(Tm; m = 100, n = 100, xnnz = 500, nsplice = 2) + @test test(Tm; m = 1000, n = 1000, xnnz = 5000, nsplice = 3) -@test test(m = 10, n = 10, xnnz = 5) -@test test(m = 100, n = 100, xnnz = 500, nsplice = 2) -@test test(m = 1000, n = 1000, xnnz = 5000, nsplice = 3) + @test test(Tm; m = 20, n = 10, xnnz = 5) + @test test(Tm; m = 200, n = 100, xnnz = 500, nsplice = 2) + @test test(Tm; m = 2000, n = 1000, xnnz = 5000, nsplice = 3) -@test test(m = 20, n = 10, xnnz = 5) -@test test(m = 200, n = 100, xnnz = 500, nsplice = 2) -@test test(m = 2000, n = 1000, xnnz = 5000, nsplice = 3) + @test test(Tm; m = 10, n = 20, xnnz = 5) + @test test(Tm; m = 100, n = 200, xnnz = 500, nsplice = 2) + @test test(Tm; m = 1000, n = 2000, xnnz = 5000, nsplice = 3) -@test test(m = 10, n = 20, xnnz = 5) -@test test(m = 100, n = 200, xnnz = 500, nsplice = 2) -@test test(m = 1000, n = 2000, xnnz = 5000, nsplice = 3) + for irun in 1:10 + m = rand((1:10000)) + n = rand((1:10000)) + nnz = rand((1:10000)) + nsplice = rand((1:5)) + @test test(Tm; m = m, n = n, xnnz = nnz, nsplice = nsplice) + end -for irun in 1:10 - m = rand((1:10000)) - n = rand((1:10000)) - nnz = rand((1:10000)) - nsplice = rand((1:5)) - @test test(m = m, n = n, xnnz = nnz, nsplice = nsplice) end + end diff --git a/test/test_block.jl b/test/test_block.jl index 010971e..f147cb3 100644 --- a/test/test_block.jl +++ b/test/test_block.jl @@ -1,6 +1,7 @@ module test_block using Test using ExtendableSparse +using ExtendableSparse: BlockPreconditioner, jacobi using ILUZero, AlgebraicMultigrid using IterativeSolvers using LinearAlgebra @@ -19,32 +20,16 @@ function main(; n = 100) @test sol ≈ sol0 - sol = cg(A, b, Pl = BlockPreconditioner(A; partitioning)) - @test sol ≈ sol0 - - sol = cg(A, b, Pl = BlockPreconditioner(A; partitioning, factorization = ilu0)) @test sol ≈ sol0 - sol = cg(A, b, Pl = BlockPreconditioner(A; partitioning, factorization = ILUZeroPreconditioner)) - @test sol ≈ sol0 - - sol = cg(A, b, Pl = BlockPreconditioner(A; partitioning, factorization = ILU0Preconditioner)) - @test sol ≈ sol0 - - sol = cg(A, b, Pl = BlockPreconditioner(A; partitioning, factorization = JacobiPreconditioner)) - @test sol ≈ sol0 - - - sol = cg(A, b, Pl = BlockPreconditioner(A; partitioning, factorization = AMGPreconditioner)) + sol = cg(A, b, Pl = BlockPreconditioner(A; partitioning, factorization = jacobi)) @test sol ≈ sol0 sol = cg(A, b, Pl = BlockPreconditioner(A; partitioning, factorization = sparspaklu)) @test sol ≈ sol0 - sol = cg(A, b, Pl = BlockPreconditioner(A; partitioning, factorization = SparspakLU)) - return @test sol ≈ sol0 - + return end main(n = 100) diff --git a/test/test_constructors.jl b/test/test_constructors.jl index 50e4fc1..3efb653 100644 --- a/test/test_constructors.jl +++ b/test/test_constructors.jl @@ -2,21 +2,17 @@ module test_constructors using Test using LinearAlgebra using ExtendableSparse +using ExtendableSparse: GenericExtendableSparseMatrixCSC +using ExtendableSparse: SparseMatrixLNK, SparseMatrixDILNKC, SparseMatrixDict using SparseArrays using Random using MultiFloats using ForwardDiff const Dual64 = ForwardDiff.Dual{Float64, Float64, 1} -function Random.rand( - rng::AbstractRNG, - ::Random.SamplerType{ForwardDiff.Dual{T, V, N}} - ) where {T, V, N} - return ForwardDiff.Dual{T, V, N}(rand(rng, T)) -end -function test_construct(T) - m = ExtendableSparseMatrix(T, 10, 10) +function test_construct(Text, T) + m = GenericExtendableSparseMatrixCSC{Text}(T, 10, 10) return eltype(m) == T end @@ -25,20 +21,25 @@ function test_sprand(T) return eltype(m) == T end -function test_transient_construction(; m = 10, n = 10, d = 0.1) +function test_transient_construction(Text; m = 10, n = 10, d = 0.1) csc = sprand(m, n, d) - lnk = SparseMatrixLNK(csc) + lnk = Text(csc) csc2 = SparseMatrixCSC(lnk) return csc2 == csc end function test() + exttypes = [SparseMatrixLNK, SparseMatrixDict, SparseMatrixDILNKC] + m1 = ExtendableSparseMatrix(10, 10) @test eltype(m1) == Float64 - @test test_construct(Float16) - @test test_construct(Float32) - @test test_construct(Float64x2) - @test test_construct(Dual64) + + for T in exttypes + @test test_construct(T, Float16) + @test test_construct(T, Float32) + @test test_construct(T, Float64x2) + @test test_construct(T, Dual64) + end acsc = sprand(10, 10, 0.5) @test sparse(ExtendableSparseMatrix(acsc)) == acsc @@ -58,11 +59,13 @@ function test() @test test_sprand(Float64x2) @test test_sprand(Dual64) - for irun in 1:10 - m = rand((1:1000)) - n = rand((1:1000)) - d = 0.3 * rand() - @test test_transient_construction(m = m, n = n, d = d) + for T in exttypes + for irun in 1:10 + m = rand((1:1000)) + n = rand((1:1000)) + d = 0.3 * rand() + @test test_transient_construction(T; m = m, n = n, d = d) + end end return end diff --git a/test/test_copymethods.jl b/test/test_copymethods.jl index e131a76..e087995 100644 --- a/test/test_copymethods.jl +++ b/test/test_copymethods.jl @@ -1,22 +1,18 @@ module test_copymethods using Test using ExtendableSparse +using ExtendableSparse: GenericExtendableSparseMatrixCSC +using ExtendableSparse: SparseMatrixLNK, SparseMatrixDILNKC, SparseMatrixDict using SparseArrays using Random using MultiFloats using ForwardDiff const Dual64 = ForwardDiff.Dual{Float64, Float64, 1} -function Random.rand( - rng::AbstractRNG, - ::Random.SamplerType{ForwardDiff.Dual{T, V, N}} - ) where {T, V, N} - return ForwardDiff.Dual{T, V, N}(rand(rng, T)) -end -function test(T) +function test(Tm, T) Xcsc = sprand(T, 10_000, 10_000, 0.01) - Xlnk = SparseMatrixLNK(Xcsc) + Xlnk = Tm(Xcsc) Xext = ExtendableSparseMatrix(Xcsc) t0 = @elapsed copy(Xcsc) t1 = @elapsed copy(Xlnk) @@ -30,7 +26,9 @@ function test(T) end return true end -test(Float64) -test(Float64x2) -test(Dual64) +for Tm in [SparseMatrixLNK, SparseMatrixDict, SparseMatrixDILNKC] + @test test(Tm, Float64) + @test test(Tm, Float64x2) + @test test(Tm, Dual64) +end end diff --git a/test/test_default_cholesky.jl b/test/test_default_cholesky.jl deleted file mode 100644 index 5323695..0000000 --- a/test/test_default_cholesky.jl +++ /dev/null @@ -1,16 +0,0 @@ -module test_default_cholesky -using Test -using ExtendableSparse -using SparseArrays -using LinearAlgebra - -include("test_lu.jl") - -@test test_lu1(Float64, 10, 10, 10, lufac = CholeskyFactorization()) -@test test_lu1(Float64, 25, 40, 1, lufac = CholeskyFactorization()) -@test test_lu1(Float64, 1000, 1, 1, lufac = CholeskyFactorization()) - -@test test_lu2(Float64, 10, 10, 10, lufac = CholeskyFactorization()) -@test test_lu2(Float64, 25, 40, 1, lufac = CholeskyFactorization()) -@test test_lu2(Float64, 1000, 1, 1, lufac = CholeskyFactorization()) -end diff --git a/test/test_default_lu.jl b/test/test_default_lu.jl deleted file mode 100644 index 9224bd6..0000000 --- a/test/test_default_lu.jl +++ /dev/null @@ -1,16 +0,0 @@ -module test_default_lu -using Test -using ExtendableSparse -using SparseArrays -using LinearAlgebra - -include("test_lu.jl") - -@test test_lu1(Float64, 10, 10, 10) -@test test_lu1(Float64, 25, 40, 1) -@test test_lu1(Float64, 1000, 1, 1) - -@test test_lu2(Float64, 10, 10, 10) -@test test_lu2(Float64, 25, 40, 1) -@test test_lu2(Float64, 1000, 1, 1) -end diff --git a/test/test_linearsolve.jl b/test/test_linearsolve.jl index 536b9fe..8756567 100644 --- a/test/test_linearsolve.jl +++ b/test/test_linearsolve.jl @@ -10,7 +10,6 @@ using MultiFloats using AMGCLWrap using ILUZero, IncompleteLU, AlgebraicMultigrid -import Pardiso f64(x::ForwardDiff.Dual{T}) where {T} = Float64(ForwardDiff.value(x)) f64(x::Number) = Float64(x) @@ -62,9 +61,6 @@ factorizations = [ KLUFactorization(reuse_symbolic = false), ] -if !Sys.isapple() - push!(factorizations, MKLPardisoFactorize()) -end @testset "Factorizations" begin @@ -87,9 +83,9 @@ allprecs = [ ExtendableSparse.ILUZeroPreconBuilder(), ExtendableSparse.ILUZeroPreconBuilder(; blocksize = 2), ExtendableSparse.ILUTPreconBuilder(), - ExtendableSparse.JacobiPreconBuilder(), - ExtendableSparse.SmoothedAggregationPreconBuilder(), - ExtendableSparse.RugeStubenPreconBuilder(), + # ExtendableSparse.JacobiPreconBuilder(), + SmoothedAggregationPreconBuilder(), + RugeStubenPreconBuilder(), ] @testset "iterations" begin diff --git a/test/test_lu.jl b/test/test_lu.jl deleted file mode 100644 index 33af985..0000000 --- a/test/test_lu.jl +++ /dev/null @@ -1,45 +0,0 @@ -using ForwardDiff -using MultiFloats -f64(x::ForwardDiff.Dual{T}) where {T} = Float64(ForwardDiff.value(x)) -f64(x::Number) = Float64(x) -const Dual64 = ForwardDiff.Dual{Float64, Float64, 1} - -function test_lu1(T, k, l, m; lufac = ExtendableSparse.LUFactorization()) - A = fdrand(k, l, m; rand = () -> 1, matrixtype = ExtendableSparseMatrix) - b = rand(k * l * m) - x1 = A \ b - for i in 1:(k * l * m) - A[i, i] += 1.0 - end - x2 = A \ b - - Aext = fdrand(T, k, l, m; rand = () -> 1, matrixtype = ExtendableSparseMatrix) - lu!(lufac, Aext) - a1 = deepcopy(Aext.cscmatrix) - x1ext = lufac \ T.(b) - - for i in 1:(k * l * m) - Aext[i, i] += 1.0 - end - update!(lufac) - a2 = deepcopy(Aext.cscmatrix) - x2ext = lufac \ T.(b) - - atol = 100 * sqrt(f64(max(eps(T), eps(Float64)))) - return isapprox(norm(x1 - f64.(x1ext)) / norm(x1), 0; atol) && - isapprox(norm(x2 - f64.(x2ext)) / norm(x2), 0; atol) -end - -function test_lu2(T, k, l, m; lufac = ExtendableSparse.LUFactorization()) - Aext = fdrand(T, k, l, m; rand = () -> 1, matrixtype = ExtendableSparseMatrix) - b = rand(k * l * m) - lu!(lufac, Aext) - x1ext = lufac \ T.(b) - for i in 4:(k * l * m - 3) - Aext[i, i + 3] -= 1.0e-4 - Aext[i - 3, i] -= 1.0e-4 - end - lufac = lu!(lufac, Aext) - x2ext = lufac \ T.(b) - return all(x1ext .< x2ext) -end diff --git a/test/test_mklpardiso.jl b/test/test_mklpardiso.jl deleted file mode 100644 index 86dacc7..0000000 --- a/test/test_mklpardiso.jl +++ /dev/null @@ -1,25 +0,0 @@ -module test_mklpardiso -using Test -using ExtendableSparse -using SparseArrays -using LinearAlgebra -using Pardiso - -include("test_lu.jl") -Base.eps(ComplexF64) = eps(Float64) - -@test test_lu1(Float64, 10, 10, 10, lufac = MKLPardisoLU()) -@test test_lu1(Float64, 25, 40, 1, lufac = MKLPardisoLU()) -@test test_lu1(Float64, 1000, 1, 1, lufac = MKLPardisoLU()) - -@test test_lu1(ComplexF64, 10, 10, 10, lufac = MKLPardisoLU()) -@test test_lu1(ComplexF64, 25, 40, 1, lufac = MKLPardisoLU()) -@test test_lu1(ComplexF64, 1000, 1, 1, lufac = MKLPardisoLU()) - - -@test test_lu2(Float64, 10, 10, 10, lufac = MKLPardisoLU()) -@test test_lu2(Float64, 25, 40, 1, lufac = MKLPardisoLU()) -@test test_lu2(Float64, 1000, 1, 1, lufac = MKLPardisoLU()) - - -end diff --git a/test/test_operations.jl b/test/test_operations.jl index 7fb43dc..97c97b0 100644 --- a/test/test_operations.jl +++ b/test/test_operations.jl @@ -3,6 +3,7 @@ using Test using SparseArrays using LinearAlgebra using ExtendableSparse +using ExtendableSparse: sprand_sdd!, SparseMatrixLNK ##################################################################### function test_addition(; m = 10, n = 10, d = 0.1) diff --git a/test/test_parallel.jl b/test/test_parallel.jl index e80d358..308c7a8 100644 --- a/test/test_parallel.jl +++ b/test/test_parallel.jl @@ -1,6 +1,7 @@ module test_parallel using ExtendableSparse, SparseArrays +using ExtendableSparse: partitioning! # using ExtendableSparse.Experimental using BenchmarkTools using ExtendableGrids diff --git a/test/test_pardiso.jl b/test/test_pardiso.jl deleted file mode 100644 index 1bc07b7..0000000 --- a/test/test_pardiso.jl +++ /dev/null @@ -1,17 +0,0 @@ -module test_pardiso -using Test -using ExtendableSparse -using SparseArrays -using LinearAlgebra -using Pardiso - -include("test_lu.jl") - -@test test_lu1(Float64, 10, 10, 10, lufac = PardisoLU()) -@test test_lu1(Float64, 25, 40, 1, lufac = PardisoLU()) -@test test_lu1(Float64, 1000, 1, 1, lufac = PardisoLU()) - -@test test_lu2(Float64, 10, 10, 10, lufac = PardisoLU()) -@test test_lu2(Float64, 25, 40, 1, lufac = PardisoLU()) -@test test_lu2(Float64, 1000, 1, 1, lufac = PardisoLU()) -end diff --git a/test/test_parilu0.jl b/test/test_parilu0.jl deleted file mode 100644 index b6562e0..0000000 --- a/test/test_parilu0.jl +++ /dev/null @@ -1,28 +0,0 @@ -module test_parilu0 -using Test -using ExtendableSparse -using IterativeSolvers -using LinearAlgebra - -function test(n) - A = ExtendableSparseMatrix(n, n) - sprand_sdd!(A) - flush!(A) - A = A.cscmatrix - b = A * ones(n) - P_par = ParallelILU0Preconditioner(A) - A_reord, b_reord = reorderlinsys(A, b, P_par.factorization.coloring) - P_ser = ILU0Preconditioner(A_reord) - sol_ser, hist_ser = gmres(A_reord, b_reord; Pl = P_ser, log = true) - sol_par, hist_par = gmres(A_reord, b_reord; Pl = P_par, log = true) - @show sol_ser - @show sol_par - @show hist_ser.iters - @show hist_par.iters - return sol_ser ≈ sol_par && hist_ser.iters == hist_par.iters -end - -@test test(10) -@test test(100) -@test test(1000) -end diff --git a/test/test_preconditioners.jl b/test/test_preconditioners.jl deleted file mode 100644 index 66bbf4f..0000000 --- a/test/test_preconditioners.jl +++ /dev/null @@ -1,67 +0,0 @@ -module test_preconditioners -using Test -using ExtendableSparse -using AlgebraicMultigrid -using AMGCLWrap -using IncompleteLU -using IterativeSolvers -using LinearAlgebra - -function test_precon(Precon, k, l, m; maxiter = 10000, symmetric = true) - A = fdrand(k, l, m; matrixtype = ExtendableSparseMatrix, symmetric) - b = ones(size(A, 2)) - exact = A \ b - Pl = Precon(A) - it, hist = simple(A, b; Pl = Pl, maxiter = maxiter, reltol = 1.0e-10, log = true) - r = hist[:resnorm] - nr = length(r) - tail = min(100, length(r) ÷ 2) - return all(x -> x < 1, r[(end - tail):end] ./ r[(end - tail - 1):(end - 1)]), norm(it - exact) -end - -function test_precon2(precon, k, l, m; maxiter = 10000, symmetric = true) - A = fdrand(k, l, m; matrixtype = ExtendableSparseMatrix, symmetric) - b = ones(size(A, 2)) - exact = A \ b - @show typeof(precon) - factorize!(precon, A) - @time it, hist = simple(A, b; Pl = precon, maxiter = maxiter, reltol = 1.0e-10, log = true) - r = hist[:resnorm] - nr = length(r) - tail = min(100, length(r) ÷ 2) - return all(x -> x < 1, r[(end - tail):end] ./ r[(end - tail - 1):(end - 1)]), norm(it - exact) -end - -@test all(test_precon(ILU0Preconditioner, 20, 20, 20) .≤ (true, 4.0e-5)) -@test all(test_precon(ILUZeroPreconditioner, 20, 20, 20) .≤ (true, 4.0e-5)) -@test all(test_precon(JacobiPreconditioner, 20, 20, 20) .≤ (true, 3.0e-4)) -@test all(test_precon(ParallelJacobiPreconditioner, 20, 20, 20) .≤ (true, 3.0e-4)) -@test all(test_precon(ILUTPreconditioner, 20, 20, 20) .≤ (true, 5.0e-5)) -@test all(test_precon(RS_AMGPreconditioner, 20, 20, 20) .≤ (true, 1.0e-5)) -@test all(test_precon(SA_AMGPreconditioner, 20, 20, 20) .≤ (true, 1.0e-5)) -@test all(test_precon(AMGCL_AMGPreconditioner, 20, 20, 20) .≤ (true, 1.0e-5)) -@test all(test_precon(AMGCL_RLXPreconditioner, 20, 20, 20) .≤ (true, 4.0e-5)) - -@test all(test_precon(ILU0Preconditioner, 20, 20, 20; symmetric = false) .≤ (true, 4.0e-5)) -@test all(test_precon(ILUZeroPreconditioner, 20, 20, 20; symmetric = false) .≤ (true, 4.0e-5)) -@test all(test_precon(JacobiPreconditioner, 20, 20, 20; symmetric = false) .≤ (true, 3.0e-4)) -@test all( - test_precon(ParallelJacobiPreconditioner, 20, 20, 20; symmetric = false) .≤ - (true, 3.0e-4) -) -@test all(test_precon(ILUTPreconditioner, 20, 20, 20; symmetric = false) .≤ (true, 5.0e-5)) -#@test all(test_precon(AMGPreconditioner,20,20,20,symmetric=false).≤ (true, 1e-5)) -#@test all(test_precon(AMGCL_AMGPreconditioner,20,20,20,symmetric=false).≤ (true, 1e-5)) -#@test all(test_precon(AMGCL_RLXPreconditioner,20,20,20,symmetric=false).≤ (true, 5e-5)) - -@test all(test_precon2(ILU0Preconditioner(), 20, 20, 20) .≤ (true, 4.0e-5)) -@test all(test_precon2(ILUZeroPreconditioner(), 20, 20, 20) .≤ (true, 4.0e-5)) -@test all(test_precon2(JacobiPreconditioner(), 20, 20, 20) .≤ (true, 3.0e-4)) -@test all(test_precon2(ParallelJacobiPreconditioner(), 20, 20, 20) .≤ (true, 3.0e-4)) -@test all(test_precon2(ILUTPreconditioner(), 20, 20, 20) .≤ (true, 5.0e-5)) -@test all(test_precon2(RS_AMGPreconditioner(), 20, 20, 20) .≤ (true, 1.0e-5)) -@test all(test_precon2(SA_AMGPreconditioner(), 20, 20, 20) .≤ (true, 1.0e-5)) -@test all(test_precon2(AMGCL_AMGPreconditioner(), 20, 20, 20) .≤ (true, 1.0e-5)) -@test all(test_precon2(AMGCL_RLXPreconditioner(), 20, 20, 20) .≤ (true, 4.0e-5)) - -end diff --git a/test/test_sparspak.jl b/test/test_sparspak.jl deleted file mode 100644 index f5a1c68..0000000 --- a/test/test_sparspak.jl +++ /dev/null @@ -1,20 +0,0 @@ -module test_sparspak - -using Test -using ExtendableSparse -using SparseArrays -using LinearAlgebra - -include("test_lu.jl") - -for T in [Float32, Float64, Float64x1, Float64x2, Dual64] - println("$T:") - @test test_lu1(T, 10, 10, 10, lufac = SparspakLU()) - @test test_lu1(T, 25, 40, 1, lufac = SparspakLU()) - @test test_lu1(T, 1000, 1, 1, lufac = SparspakLU()) - - @test test_lu2(T, 10, 10, 10, lufac = SparspakLU()) - @test test_lu2(T, 25, 40, 1, lufac = SparspakLU()) - @test test_lu2(T, 1000, 1, 1, lufac = SparspakLU()) -end -end diff --git a/test/test_symmetric.jl b/test/test_symmetric.jl index 686a32d..d4a0bc5 100644 --- a/test/test_symmetric.jl +++ b/test/test_symmetric.jl @@ -1,6 +1,7 @@ module test_symmetric using Test using ExtendableSparse +using ExtendableSparse: sprand_sdd! using LinearAlgebra ############################################## diff --git a/test/test_timings.jl b/test/test_timings.jl index 7c5e89a..cce5dda 100644 --- a/test/test_timings.jl +++ b/test/test_timings.jl @@ -1,21 +1,24 @@ module test_timings using Test using ExtendableSparse +using ExtendableSparse: SparseMatrixLNK using SparseArrays using Random using MultiFloats using ForwardDiff using BenchmarkTools using Printf +using ExtendableSparse: GenericExtendableSparseMatrixCSC +using ExtendableSparse: SparseMatrixLNK, SparseMatrixDILNKC, SparseMatrixDict const Dual64 = ForwardDiff.Dual{Float64, Float64, 1} -function test(T, k, l, m) +function test(Tm, T, k, l, m) t1 = @belapsed fdrand($T, $k, $l, $m, matrixtype = $SparseMatrixCSC) seconds = 0.1 - t2 = @belapsed fdrand($T, $k, $l, $m, matrixtype = $ExtendableSparseMatrix) seconds = 0.1 - t3 = @belapsed fdrand($T, $k, $l, $m, matrixtype = $SparseMatrixLNK) seconds = 0.1 + t2 = @belapsed fdrand($T, $k, $l, $m, matrixtype = $GenericExtendableSparseMatrixCSC{$(Tm)}) seconds = 0.1 + t3 = @belapsed fdrand($T, $k, $l, $m, matrixtype = $Tm) seconds = 0.1 @printf( - "%s (%d,%d,%d): CSC %.4f EXT %.4f LNK %.4f\n", + "%s (%d,%d,%d): CSC %.4f Extendable %.4f Extension %.4f\n", string(T), k, l, @@ -34,16 +37,18 @@ function test(T, k, l, m) return true end -@test test(Float64, 1000, 1, 1) -@test test(Float64, 100, 100, 1) -@test test(Float64, 20, 20, 20) +for Tm in [SparseMatrixLNK, SparseMatrixDict, SparseMatrixDILNKC] + @show Tm + @test test(Tm, Float64, 1000, 1, 1) + @test test(Tm, Float64, 100, 100, 1) + @test test(Tm, Float64, 20, 20, 20) -@test test(Float64x2, 1000, 1, 1) -@test test(Float64x2, 100, 100, 1) -@test test(Float64x2, 20, 20, 20) - -@test test(Dual64, 1000, 1, 1) -@test test(Dual64, 100, 100, 1) -@test test(Dual64, 20, 20, 20) + @test test(Tm, Float64x2, 1000, 1, 1) + @test test(Tm, Float64x2, 100, 100, 1) + @test test(Tm, Float64x2, 20, 20, 20) + @test test(Tm, Dual64, 1000, 1, 1) + @test test(Tm, Dual64, 100, 100, 1) + @test test(Tm, Dual64, 20, 20, 20) +end end diff --git a/test/test_updates.jl b/test/test_updates.jl index 0077334..9fd51e3 100644 --- a/test/test_updates.jl +++ b/test/test_updates.jl @@ -1,14 +1,16 @@ module test_updates using Test using ExtendableSparse +using ExtendableSparse: GenericExtendableSparseMatrixCSC +using ExtendableSparse: SparseMatrixLNK, SparseMatrixDILNKC, SparseMatrixDict using SparseArrays using Random using MultiFloats using ForwardDiff const Dual64 = ForwardDiff.Dual{Float64, Float64, 1} -function test(T) - A = ExtendableSparseMatrix(T, 10, 10) +function test(Tm, T) + A = GenericExtendableSparseMatrixCSC{Tm}(T, 10, 10) @test nnz(A) == 0 A[1, 3] = 5 updateindex!(A, +, 6.0, 4, 5) @@ -24,7 +26,9 @@ function test(T) return @test nnz(A) == 3 end -test(Float64) -test(Float64x2) -test(Dual64) +for Tm in [SparseMatrixLNK, SparseMatrixDict, SparseMatrixDILNKC] + test(Tm, Float64) + test(Tm, Float64x2) + test(Tm, Dual64) +end end