From 588c0fc4d1dfbc472e7ad6204700d3c078e28172 Mon Sep 17 00:00:00 2001 From: Roger-luo Date: Thu, 2 Aug 2018 14:42:05 +0800 Subject: [PATCH] move LuxurySparse.jl from Yao.jl. license: MIT authors: Roger-luo years: 2018 fix travis deprecations fix typo: statify->staticize rename to staticize --- .codecov.yml | 1 + .gitignore | 16 +++ .travis.yml | 35 ++++++ LICENSE.md | 22 ++++ Project.toml | 17 +++ README.md | 14 +++ REQUIRE | 3 + appveyor.yml | 47 +++++++ benchmarks/README.md | 1 + docs/README.md | 1 + docs/make.jl | 1 + docs/src/index.md | 3 + src/Core.jl | 69 +++++++++++ src/IMatrix.jl | 23 ++++ src/LuxurySparse.jl | 25 ++++ src/PermMatrix.jl | 76 ++++++++++++ src/arraymath.jl | 74 +++++++++++ src/conversions.jl | 82 +++++++++++++ src/kronecker.jl | 285 +++++++++++++++++++++++++++++++++++++++++++ src/linalg.jl | 162 ++++++++++++++++++++++++ src/promotions.jl | 24 ++++ src/staticize.jl | 43 +++++++ test/IMatrix.jl | 66 ++++++++++ test/PermMatrix.jl | 67 ++++++++++ test/kronecker.jl | 31 +++++ test/linalg.jl | 71 +++++++++++ test/runtests.jl | 22 ++++ test/staticize.jl | 42 +++++++ 28 files changed, 1323 insertions(+) create mode 100644 .codecov.yml create mode 100644 .gitignore create mode 100644 .travis.yml create mode 100644 LICENSE.md create mode 100644 Project.toml create mode 100644 README.md create mode 100644 REQUIRE create mode 100644 appveyor.yml create mode 100644 benchmarks/README.md create mode 100644 docs/README.md create mode 100644 docs/make.jl create mode 100644 docs/src/index.md create mode 100644 src/Core.jl create mode 100644 src/IMatrix.jl create mode 100644 src/LuxurySparse.jl create mode 100644 src/PermMatrix.jl create mode 100644 src/arraymath.jl create mode 100644 src/conversions.jl create mode 100644 src/kronecker.jl create mode 100644 src/linalg.jl create mode 100644 src/promotions.jl create mode 100644 src/staticize.jl create mode 100644 test/IMatrix.jl create mode 100644 test/PermMatrix.jl create mode 100644 test/kronecker.jl create mode 100644 test/linalg.jl create mode 100644 test/runtests.jl create mode 100644 test/staticize.jl diff --git a/.codecov.yml b/.codecov.yml new file mode 100644 index 0000000..69cb760 --- /dev/null +++ b/.codecov.yml @@ -0,0 +1 @@ +comment: false diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..78fd494 --- /dev/null +++ b/.gitignore @@ -0,0 +1,16 @@ +*.jl.cov +*.jl.*.cov +*.jl.mem + +docs/build/ +docs/site/ + +*.ipynb_checkpoints +**/*.ipynb_checkpoints +**/**/*.ipynb_checkpoints + +_*.dat +*.swp +__pycache__/ + +Manifest.toml diff --git a/.travis.yml b/.travis.yml new file mode 100644 index 0000000..221207b --- /dev/null +++ b/.travis.yml @@ -0,0 +1,35 @@ +## Documentation: http://docs.travis-ci.com/user/languages/julia/ +language: julia +os: + - linux + - osx +julia: + - 0.7 + - nightly +notifications: + email: false +git: + depth: 99999999 + +## uncomment the following lines to allow failures on nightly julia +## (tests will run but not make your overall status red) +#matrix: +# allow_failures: +# - julia: nightly + +## uncomment and modify the following lines to manually install system packages +#addons: +# apt: # apt-get for linux +# packages: +# - gfortran +#before_script: # homebrew for mac +# - if [ $TRAVIS_OS_NAME = osx ]; then brew install gcc; fi + +## uncomment the following lines to override the default test script +#script: +# - julia -e 'Pkg.clone(pwd()); Pkg.build("LuxurySparse"); Pkg.test("LuxurySparse"; coverage=true)' +after_success: + # push coverage results to Coveralls + - julia -e 'using Pkg; cd(Pkg.dir("LuxurySparse")); Pkg.add("Coverage"); using Coverage; Coveralls.submit(Coveralls.process_folder())' + # push coverage results to Codecov + - julia -e 'using Pkg; cd(Pkg.dir("LuxurySparse")); Pkg.add("Coverage"); using Coverage; Codecov.submit(Codecov.process_folder())' diff --git a/LICENSE.md b/LICENSE.md new file mode 100644 index 0000000..0f9ebe7 --- /dev/null +++ b/LICENSE.md @@ -0,0 +1,22 @@ +The LuxurySparse.jl package is licensed under the MIT "Expat" License: + +> Copyright (c) 2018: Roger-luo. +> +> Permission is hereby granted, free of charge, to any person obtaining a copy +> of this software and associated documentation files (the "Software"), to deal +> in the Software without restriction, including without limitation the rights +> to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +> copies of the Software, and to permit persons to whom the Software is +> furnished to do so, subject to the following conditions: +> +> The above copyright notice and this permission notice shall be included in all +> copies or substantial portions of the Software. +> +> THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +> IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +> FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +> AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +> LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +> OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +> SOFTWARE. +> diff --git a/Project.toml b/Project.toml new file mode 100644 index 0000000..1a7c1d0 --- /dev/null +++ b/Project.toml @@ -0,0 +1,17 @@ +name = "LuxurySparse" +uuid = "72cad168-9621-11e8-1b1e-73228c0c6ca7" +authors = ["Roger-luo "] +version = "0.0.0" + +[deps] +Compat = "34da2185-b29b-5c13-b0c7-acf172513d20" +LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" +Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" +SparseArrays = "2f01184e-e22b-5df5-ae63-d93ebab69eaf" +StaticArrays = "90137ffa-7385-5640-81b9-e52037218182" + +[extras] +Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" + +[targets] +test = ["Test"] diff --git a/README.md b/README.md new file mode 100644 index 0000000..38d3220 --- /dev/null +++ b/README.md @@ -0,0 +1,14 @@ +# LuxurySparse + +[![Build Status](https://travis-ci.org/QuantumBFS/LuxurySparse.jl.svg?branch=master)](https://travis-ci.org/QuantumBFS/LuxurySparse.jl) + +This package contains + +* General Permutation Matrix, +* Identity Matrix, + +with high performance `type convertion`, `kron` and `multiplication` operations. + +`LuxurySparse.jl` is still a part of [Yao.jl](https://github.com/QuantumBFS/Yao.jl) (a quantum circuit simulation package). +Its document is maintained [here](https://quantumbfs.github.io/Yao.jl/latest/man/luxurysparse/). +It will be moved out of `Yao.jl` completely and start to accept PR soon. diff --git a/REQUIRE b/REQUIRE new file mode 100644 index 0000000..ea43e53 --- /dev/null +++ b/REQUIRE @@ -0,0 +1,3 @@ +julia 0.7 +Compat +StaticArrays diff --git a/appveyor.yml b/appveyor.yml new file mode 100644 index 0000000..015080a --- /dev/null +++ b/appveyor.yml @@ -0,0 +1,47 @@ +environment: + matrix: + - JULIA_URL: "https://julialang-s3.julialang.org/bin/winnt/x86/0.6/julia-0.7-latest-win32.exe" + - JULIA_URL: "https://julialang-s3.julialang.org/bin/winnt/x64/0.6/julia-0.7-latest-win64.exe" + - JULIA_URL: "https://julialangnightlies-s3.julialang.org/bin/winnt/x86/julia-latest-win32.exe" + - JULIA_URL: "https://julialangnightlies-s3.julialang.org/bin/winnt/x64/julia-latest-win64.exe" + +## uncomment the following lines to allow failures on nightly julia +## (tests will run but not make your overall status red) +#matrix: +# allow_failures: +# - JULIA_URL: "https://julialangnightlies-s3.julialang.org/bin/winnt/x86/julia-latest-win32.exe" +# - JULIA_URL: "https://julialangnightlies-s3.julialang.org/bin/winnt/x64/julia-latest-win64.exe" + +branches: + only: + - master + - /release-.*/ + +notifications: + - provider: Email + on_build_success: false + on_build_failure: false + on_build_status_changed: false + +install: + - ps: "[System.Net.ServicePointManager]::SecurityProtocol = [System.Net.SecurityProtocolType]::Tls12" +# If there's a newer build queued for the same PR, cancel this one + - ps: if ($env:APPVEYOR_PULL_REQUEST_NUMBER -and $env:APPVEYOR_BUILD_NUMBER -ne ((Invoke-RestMethod ` + https://ci.appveyor.com/api/projects/$env:APPVEYOR_ACCOUNT_NAME/$env:APPVEYOR_PROJECT_SLUG/history?recordsNumber=50).builds | ` + Where-Object pullRequestId -eq $env:APPVEYOR_PULL_REQUEST_NUMBER)[0].buildNumber) { ` + throw "There are newer queued builds for this pull request, failing early." } +# Download most recent Julia Windows binary + - ps: (new-object net.webclient).DownloadFile( + $env:JULIA_URL, + "C:\projects\julia-binary.exe") +# Run installer silently, output to C:\projects\julia + - C:\projects\julia-binary.exe /S /D=C:\projects\julia + +build_script: +# Need to convert from shallow to complete for Pkg.clone to work + - IF EXIST .git\shallow (git fetch --unshallow) + - C:\projects\julia\bin\julia -e "versioninfo(); + Pkg.clone(pwd(), \"LuxurySparse\"); Pkg.build(\"LuxurySparse\")" + +test_script: + - C:\projects\julia\bin\julia -e "Pkg.test(\"LuxurySparse\")" diff --git a/benchmarks/README.md b/benchmarks/README.md new file mode 100644 index 0000000..3878b2f --- /dev/null +++ b/benchmarks/README.md @@ -0,0 +1 @@ +# Benchmark diff --git a/docs/README.md b/docs/README.md new file mode 100644 index 0000000..4652540 --- /dev/null +++ b/docs/README.md @@ -0,0 +1 @@ +# LuxurySparse Documentation diff --git a/docs/make.jl b/docs/make.jl new file mode 100644 index 0000000..3e47c68 --- /dev/null +++ b/docs/make.jl @@ -0,0 +1 @@ +using Documenter diff --git a/docs/src/index.md b/docs/src/index.md new file mode 100644 index 0000000..df42681 --- /dev/null +++ b/docs/src/index.md @@ -0,0 +1,3 @@ +# LuxurySparse + +A luxury sparse library for julia language. diff --git a/src/Core.jl b/src/Core.jl new file mode 100644 index 0000000..a8ae3d6 --- /dev/null +++ b/src/Core.jl @@ -0,0 +1,69 @@ +""" + swaprows!(v::VecOrMat, i::Int, j::Int[, f1, f2]) -> VecOrMat + +swap row i and row j of v inplace, with f1, f2 factors applied on i and j (before swap). +""" +function swaprows! end + +""" + swapcols!(v::VecOrMat, i::Int, j::Int[, f1, f2]) -> VecOrMat + +swap col i and col j of v inplace, with f1, f2 factors applied on i and j (before swap). +""" +function swapcols! end + +""" + u1rows!(state::VecOrMat, i::Int, j::Int, a, b, c, d) -> VecOrMat + +apply u1 on row i and row j of state inplace. +""" +function u1rows! end + +""" + mulcol!(v::Vector, i::Int, f) -> VecOrMat + +multiply col i of v by f inplace. +""" +function mulcol! end + +""" + mulrow!(v::Vector, i::Int, f) -> VecOrMat + +multiply row i of v by f inplace. +""" +function mulrow! end + + +""" + matvec(x::VecOrMat) -> MatOrVec + +Return vector if a matrix is a column vector, else untouched. +""" +function matvec end + +""" + notdense(M) -> Bool + +Return true if a matrix is not dense. + +Note: +It is not exactly same as isparse, e.g. Diagonal, IMatrix and PermMatrix are both notdense but not isparse. +""" +function notdense end + +notdense(M)::Bool = issparse(M) +@static if VERSION >= v"0.7-" +notdense(x::Transpose) = notdense(parent(x)) +notdense(x::Adjoint) = notdense(parent(x)) +end + +"""faster invperm""" +function fast_invperm(order) + v = similar(order) + @inbounds @simd for i=1:length(order) + v[order[i]] = i + end + v +end + +dropzeros!(A::Diagonal) = A diff --git a/src/IMatrix.jl b/src/IMatrix.jl new file mode 100644 index 0000000..e14a99d --- /dev/null +++ b/src/IMatrix.jl @@ -0,0 +1,23 @@ +""" + IMatrix{N, Tv}() + IMatrix{N}() where N = IMatrix{N, Int64}() + IMatrix(A::AbstractMatrix{T}) where T -> IMatrix + +IMatrix matrix, with size N as label, use `Int64` as its default type, both `*` and `kron` are optimized. +""" +struct IMatrix{N, Tv} <: AbstractMatrix{Tv} end +IMatrix{N}() where N = IMatrix{N, Bool}() +IMatrix(N::Int) = IMatrix{N}() + +size(A::IMatrix{N}, i::Int) where N = N +size(A::IMatrix{N}) where N = (N, N) +getindex(A::IMatrix{N, T}, i::Integer, j::Integer) where {N, T} = T(i==j) + +####### sparse matrix ###### +nnz(M::IMatrix{N}) where N = N +nonzeros(M::IMatrix{N, T}) where {N, T} = ones(T, N) +ishermitian(D::IMatrix) = true +notdense(::IMatrix) = true + +similar(::IMatrix{N, Tv}, ::Type{T}) where {N, Tv, T} = IMatrix{N, T}() +copyto!(A::IMatrix{N}, B::IMatrix{N}) where N = A diff --git a/src/LuxurySparse.jl b/src/LuxurySparse.jl new file mode 100644 index 0000000..b4351f1 --- /dev/null +++ b/src/LuxurySparse.jl @@ -0,0 +1,25 @@ +module LuxurySparse + +using LinearAlgebra, SparseArrays, Random +using StaticArrays: SVector, SMatrix + +import Base: copyto! +import LinearAlgebra: ishermitian +import SparseArrays: SparseMatrixCSC, nnz, nonzeros, dropzeros!, findnz +import Base: getindex, size, similar, copy, show + +export PermMatrix, pmrand, IMatrix, I, fast_invperm, notdense +export staticize, SSparseMatrixCSC, SDiagonal + +include("Core.jl") +include("IMatrix.jl") +include("PermMatrix.jl") + +include("conversions.jl") +include("promotions.jl") +include("staticize.jl") +include("arraymath.jl") +include("linalg.jl") +include("kronecker.jl") + +end diff --git a/src/PermMatrix.jl b/src/PermMatrix.jl new file mode 100644 index 0000000..c61a429 --- /dev/null +++ b/src/PermMatrix.jl @@ -0,0 +1,76 @@ +""" + PermMatrix{Tv, Ti}(perm::Vector{Ti}, vals::Vector{Tv}) where {Tv, Ti<:Integer} + PermMatrix(perm::Vector{Ti}, vals::Vector{Tv}) where {Tv, Ti} + PermMatrix(ds::AbstractMatrix) + +PermMatrix represents a special kind linear operator: Permute and Multiply, which means `M * v = v[perm] * val` +Optimizations are used to make it much faster than `SparseMatrixCSC`. + +* `perm` is the permutation order, +* `vals` is the multiplication factor. + +[Generalized Permutation Matrix](https://en.wikipedia.org/wiki/Generalized_permutation_matrix) +""" +struct PermMatrix{Tv, Ti<:Integer, Vv<:AbstractVector{Tv}, Vi<:AbstractVector{Ti}} <: AbstractMatrix{Tv} + perm::Vi # new orders + vals::Vv # multiplied values. + + function PermMatrix{Tv, Ti, Vv, Vi}(perm::Vi, vals::Vv) where {Tv, Ti<:Integer, Vv<:AbstractVector{Tv}, Vi<:AbstractVector{Ti}} + if length(perm) != length(vals) + throw(DimensionMismatch("permutation ($(length(perm))) and multiply ($(length(vals))) length mismatch.")) + end + new{Tv, Ti, Vv, Vi}(perm, vals) + end +end + +function PermMatrix{Tv, Ti}(perm, vals) where {Tv, Ti<:Integer} + PermMatrix{Tv, Ti, Vector{Tv}, Vector{Ti}}(Vector{Ti}(perm), Vector{Tv}(vals)) +end + +function PermMatrix(perm::Vi, vals::Vv) where {Tv, Ti<:Integer, Vv<:AbstractVector{Tv}, Vi<:AbstractVector{Ti}} + PermMatrix{Tv,Ti, Vv, Vi}(perm, vals) +end + +################# Array Functions ################## + +size(M::PermMatrix) = (length(M.perm), length(M.perm)) +function size(A::PermMatrix, d::Integer) + if d < 1 + throw(ArgumentError("dimension must be ≥ 1, got $d")) + elseif d<=2 + return length(A.perm) + else + return 1 + end +end +getindex(M::PermMatrix, i::Integer, j::Integer) = M.perm[i] == j ? M.vals[i] : 0 +copyto!(A::PermMatrix, B::PermMatrix) = (copyto!(A.perm, B.perm); copyto!(A.vals, B.vals); A) + +""" + pmrand(T::Type, n::Int) -> PermMatrix + +Return random PermMatrix. +""" +function pmrand end + +pmrand(::Type{T}, n::Int) where T = PermMatrix(randperm(n), randn(T, n)) +pmrand(n::Int) = pmrand(Float64, n) + +similar(x::PermMatrix{Tv, Ti}) where {Tv, Ti} = PermMatrix{Tv, Ti}(similar(x.perm), similar(x.vals)) +similar(x::PermMatrix{Tv, Ti}, ::Type{T}) where {Tv, Ti, T} = PermMatrix{T, Ti}(similar(x.perm), similar(x.vals, T)) + +# TODO: rewrite this +# function show(io::IO, M::PermMatrix) +# println("PermMatrix") +# for item in zip(M.perm, M.vals) +# i, p = item +# println("- ($i) * $p") +# end +# end + +######### sparse array interfaces ######### +nnz(M::PermMatrix) = length(M.vals) +nonzeros(M::PermMatrix) = M.vals +dropzeros!(M::PermMatrix) = M +notdense(::PermMatrix) = true +notdense(::Diagonal) = true diff --git a/src/arraymath.jl b/src/arraymath.jl new file mode 100644 index 0000000..7725a83 --- /dev/null +++ b/src/arraymath.jl @@ -0,0 +1,74 @@ +import Base: conj, copy, real, ctranspose, imag +import Compat.LinearAlgebra: transpose, transpose!, ctranspose! + +@static if VERSION >= v"0.7-" + import LinearAlgebra: adjoint!, adjoint +end + + +# IMatrix +for func in (:conj, :real, :ctranspose, :transpose, :adjoint, :copy) + @eval ($func)(M::IMatrix{N, T}) where {N, T} = IMatrix{N, T}() +end +for func in (:ctranspose!, :adjoint!, :transpose!) + @eval ($func)(M::IMatrix) = M +end +imag(M::IMatrix{N, T}) where {N, T} = Diagonal(zeros(T,N)) + +# PermMatrix +for func in (:conj, :real, :imag) + @eval ($func)(M::PermMatrix) = PermMatrix(M.perm, ($func)(M.vals)) +end +copy(M::PermMatrix) = PermMatrix(copy(M.perm), copy(M.vals)) + +function transpose(M::PermMatrix) + new_perm = fast_invperm(M.perm) + return PermMatrix(new_perm, M.vals[new_perm]) +end + +adjoint(S::PermMatrix{<:Real}) = transpose(S) +adjoint(S::PermMatrix{<:Complex}) = conj(transpose(S)) + +# scalar +import Base: *, /, ==, +, -, ≈ +*(A::IMatrix{N, T}, B::Number) where {N, T} = Diagonal(fill(promote_type(T, eltype(B))(B), N)) +*(B::Number, A::IMatrix{N, T}) where {N, T} = Diagonal(fill(promote_type(T, eltype(B))(B), N)) +/(A::IMatrix{N, T}, B::Number) where {N, T} = Diagonal(fill(promote_type(T, eltype(B))(1/B), N)) + +*(A::PermMatrix, B::Number) = PermMatrix(A.perm, A.vals*B) +*(B::Number, A::PermMatrix) = A*B +/(A::PermMatrix, B::Number) = PermMatrix(A.perm, A.vals/B) +#+(A::PermMatrix, B::PermMatrix) = PermMatrix(A.dv+B.dv, A.ev+B.ev) +#-(A::PermMatrix, B::PermMatrix) = PermMatrix(A.dv-B.dv, A.ev-B.ev) + + +const IDP = Union{Diagonal, PermMatrix, IMatrix} +for op in [:+, :-, :(==), :≈] + + @eval begin + $op(A::IDP, B::SparseMatrixCSC) = $op(SparseMatrixCSC(A), B) + $op(B::SparseMatrixCSC, A::IDP) = $op(B, SparseMatrixCSC(A)) + + # intra-IDP + $op(A::PermMatrix, B::IDP) = $op(SparseMatrixCSC(A), SparseMatrixCSC(B)) + $op(A::IDP, B::PermMatrix) = $op(SparseMatrixCSC(A), SparseMatrixCSC(B)) + $op(A::PermMatrix, B::PermMatrix) = $op(SparseMatrixCSC(A), SparseMatrixCSC(B)) + end + + # intra-ID + if op in [:+, :-] + @eval begin + $op(d1::Diagonal, d2::IMatrix) = Diagonal($op(d1.diag, diag(d2))) + $op(d1::IMatrix, d2::Diagonal) = Diagonal($op(diag(d1), d2.diag)) + end + else + @eval begin + $op(d1::IMatrix, d2::Diagonal) = $op(diag(d1), d2.diag) + $op(d1::Diagonal, d2::IMatrix) = $op(d1.diag, diag(d2)) + $op(d1::IMatrix{Na}, d2::IMatrix{Nb}) where {Na, Nb} = $op(Na, Nb) + end + end + +end ++(d1::IMatrix{Na, Ta}, d2::IMatrix{Nb, Tb}) where {Na, Nb, Ta, Tb} = d1==d2 ? Diagonal(fill(promote_types(Ta, Tb)(2), Na)) : throw(DimensionMismatch()) +-(d1::IMatrix{Na, Ta}, d2::IMatrix{Nb, Tb}) where {Na, Ta, Nb, Tb} = d1==d2 ? spzeros(promote_types(Ta, Tb), Na, Na) : throw(DimensionMismatch()) diff --git a/src/conversions.jl b/src/conversions.jl new file mode 100644 index 0000000..ae5fe08 --- /dev/null +++ b/src/conversions.jl @@ -0,0 +1,82 @@ +################## To IMatrix ###################### +IMatrix{N, T}(::AbstractMatrix) where {N, T} = IMatrix{N, T}() +IMatrix{N}(A::AbstractMatrix{T}) where {N, T} = IMatrix{N, T}() +IMatrix(A::AbstractMatrix{T}) where T = IMatrix{size(A, 1) == size(A,2) ? size(A, 2) : throw(DimensionMismatch()), T}() + +################## To Diagonal ###################### +@static if VERSION >= v"0.7-" + Diagonal{T, V}(A::AbstractMatrix{T}) where {T, V <: AbstractVector{T}, N} = Diagonal{T, V}(convert(V, diag(A))) +end + +for MAT in [:PermMatrix, :IMatrix] + @eval Diagonal(A::$MAT) = Diagonal(diag(A)) +end +Diagonal{T}(A::AbstractMatrix{T}) where T = Diagonal(A) +Diagonal{T}(::IMatrix{N}) where {T, N} = Diagonal{T}(ones(T, N)) + +################## To SparseMatrixCSC ###################### +SparseMatrixCSC{Tv, Ti}(A::IMatrix{N}) where {Tv, Ti <: Integer, N} = SparseMatrixCSC{Tv, Ti}(I, N, N) +SparseMatrixCSC{Tv}(A::IMatrix) where Tv = SparseMatrixCSC{Tv, Int}(A) +SparseMatrixCSC(A::IMatrix{N, T}) where {N, T} = SparseMatrixCSC{T, Int}(I, N, N) +function SparseMatrixCSC(M::PermMatrix) + n = size(M, 1) + #SparseMatrixCSC(n, n, collect(1:n+1), M.perm, M.vals) + order = invperm(M.perm) + SparseMatrixCSC(n, n, collect(1:n+1), order, M.vals[order]) +end +SparseMatrixCSC{Tv, Ti}(M::PermMatrix{Tv, Ti}) where {Tv, Ti} = SparseMatrixCSC(M) + +function SparseMatrixCSC(M::Diagonal) + n = size(M, 1) + SparseMatrixCSC(n, n, collect(1:n+1), collect(1:n), M.diag) +end +SparseMatrixCSC{Tv, Ti}(M::Diagonal{Tv}) where {Tv, Ti} = SparseMatrixCSC(M) + +################## To Dense ###################### +Matrix{T}(::IMatrix{N}) where {T, N} = Matrix{T}(I, N, N) +Matrix(::IMatrix{N, T}) where {N, T} = Matrix{T}(I, N, N) + +function Matrix{T}(X::PermMatrix) where T + n = size(X, 1) + Mf = zeros(T, n, n) + @simd for i=1:n + @inbounds Mf[i, X.perm[i]] = X.vals[i] + end + return Mf +end +Matrix(X::PermMatrix{T}) where T = Matrix{T}(X) + +################## To PermMatrix ###################### +PermMatrix{Tv, Ti}(::IMatrix{N}) where {Tv, Ti, N} = PermMatrix{Tv, Ti}(Vector{Ti}(1:N), ones(Tv, N)) +PermMatrix{Tv}(X::IMatrix) where Tv = PermMatrix{Tv, Int}(X) +PermMatrix(X::IMatrix{N, T}) where {N, T} = PermMatrix{T, Int}(X) + +PermMatrix{Tv, Ti}(A::PermMatrix) where {Tv, Ti} = PermMatrix(Vector{Ti}(A.perm), Vector{Tv}(A.vals)) + +function _findnz(A::AbstractMatrix) + I = findall(!iszero, A) + getindex.(I, 1), getindex.(I, 2), A[I] +end + +_findnz(A::AbstractSparseArray) = findnz(A) + +function PermMatrix{Tv, Ti}(A::AbstractMatrix) where {Tv, Ti} + i, j, v = _findnz(A) + j == collect(1:size(A, 2)) || throw(ArgumentError("This is not a PermMatrix")) + order = invperm(i) + PermMatrix{Tv, Ti}(Vector{Ti}(order), Vector{Tv}(v[order])) +end +PermMatrix(A::AbstractMatrix{T}) where T = PermMatrix{T, Int}(A) +PermMatrix(A::SparseMatrixCSC{Tv, Ti}) where {Tv, Ti} = PermMatrix{Tv, Ti}(A) # inherit indice type +PermMatrix{Tv, Ti}(A::Diagonal{Tv}) where {Tv, Ti} = PermMatrix(Vector{Ti}(1:size(A, 1)), A.diag) +#PermMatrix(A::Diagonal{T}) where T = PermMatrix{T, Int}(A) +# lazy implementation +function PermMatrix{Tv, Ti, Vv, Vi}(A::AbstractMatrix) where {Tv, Ti<:Integer, Vv<:AbstractVector{Tv}, Vi<:AbstractVector{Ti}} + pm = PermMatrix(PermMatrix{Tv, Ti}(A)) + PermMatrix(Vi(pm.perm), Vv(pm.vals)) +end + +import Base: convert +convert(T::Type{<:PermMatrix}, m::AbstractMatrix) = m isa T ? m : T(m) +convert(T::Type{<:IMatrix}, m::AbstractMatrix) = m isa T ? m : T(m) +convert(T::Type{<:Diagonal}, m::AbstractMatrix) = m isa T ? m : T(m) diff --git a/src/kronecker.jl b/src/kronecker.jl new file mode 100644 index 0000000..bc9ae2a --- /dev/null +++ b/src/kronecker.jl @@ -0,0 +1,285 @@ +function irepeat(v::AbstractVector, n::Int) + nV = length(v) + res = similar(v, nV*n) + @inbounds for j = 1:nV + vj = v[j] + base = (j-1)*n + @inbounds @simd for i = 1:n + res[base+i] = vj + end + end + res +end + +function orepeat(v::AbstractVector, n::Int) + nV = length(v) + res = similar(v, nV*n) + @inbounds for i = 1:n + base = (i-1)*nV + @inbounds @simd for j = 1:nV + res[base+j] = v[j] + end + end + res +end + + +# TODO: since 0.7 transpose is different, we don't take transpose serious here. +####### kronecker product ########### +import Base: kron +# TODO: if IMatrix{1}, do nothing +kron(A::IMatrix{Na, Ta}, B::IMatrix{Nb, Tb}) where {Na, Nb, Ta, Tb}= IMatrix{Na*Nb, promote_type(Ta, Tb)}() +kron(A::IMatrix{Na}, B::Diagonal) where Na = Diagonal(orepeat(B.diag, Na)) +kron(B::Diagonal, A::IMatrix{Na}) where Na = Diagonal(irepeat(B.diag, Na)) +kron(A::IMatrix{1}, B::AbstractMatrix) = B +kron(A::IMatrix{1}, B::PermMatrix) = B +kron(A::IMatrix{1}, B::Diagonal) = B +kron(A::IMatrix{1}, B::SparseMatrixCSC) = B +kron(A::IMatrix{1}, B::IMatrix) = B + +####### diagonal kron ######## +kron(A::Diagonal, B::Diagonal) = Diagonal(kron(A.diag, B.diag)) +kron(A::StridedMatrix, B::Diagonal) = kron(A, PermMatrix(B)) +kron(A::Diagonal, B::StridedMatrix) = kron(PermMatrix(A), B) +kron(A::Diagonal, B::SparseMatrixCSC) = kron(PermMatrix(A), B) +kron(A::SparseMatrixCSC, B::Diagonal) = kron(A, PermMatrix(B)) + + +function kron(A::AbstractMatrix{Tv}, B::IMatrix{Nb}) where {Nb, Tv} + mA, nA = size(A) + nzval = Vector{Tv}(undef, Nb*mA*nA) + rowval = Vector{Int}(undef, Nb*mA*nA) + colptr = collect(1:mA:Nb*mA*nA+1) + @inbounds for j = 1:nA + source = A[:,j] + startbase = (j-1)*Nb*mA - mA + @inbounds for j2 = 1:Nb + start = startbase + j2*mA + row = j2-Nb + @inbounds @simd for i = 1:mA + nzval[start+i] = source[i] + rowval[start+i] = row+Nb*i + end + end + end + SparseMatrixCSC(mA*Nb, nA*Nb, colptr, rowval, nzval) +end + +function kron(A::IMatrix{Na}, B::AbstractMatrix{Tv}) where {Na, Tv} + mB, nB = size(B) + rowval = Vector{Int}(undef, nB*mB*Na) + nzval = Vector{Tv}(undef, nB*mB*Na) + @inbounds for j in 1:Na + r0 = (j-1)*mB + @inbounds for j2 in 1:nB + start = ((j-1)*nB+j2-1)*mB + @inbounds @simd for i in 1:mB + rowval[start+i] = r0+i + nzval[start+i] = B[i,j2] + end + end + end + colptr = collect(1:mB:nB*mB*Na+1) + SparseMatrixCSC(mB*Na, Na*nB, colptr, rowval, nzval) +end + +function kron(A::IMatrix{Na}, B::SparseMatrixCSC{T}) where {Na, T} + mB, nB = size(B) + nV = nnz(B) + nzval = Vector{T}(undef, Na*nV) + rowval = Vector{Int}(undef, Na*nV) + colptr = Vector{Int}(undef, nB*Na+1) + nzval = Vector{T}(undef, Na*nV) + colptr[1] = 1 + @inbounds for i = 1:Na + r0 = (i-1)*mB + start = nV*(i-1) + @inbounds @simd for k = 1:nV + rowval[start+k] = B.rowval[k]+r0 + nzval[start+k] = B.nzval[k] + end + colbase = (i-1)*nB + @inbounds @simd for j=2:nB+1 + colptr[colbase+j] = B.colptr[j]+start + end + end + SparseMatrixCSC(mB*Na, nB*Na, colptr, rowval, nzval) +end + +function kron(A::SparseMatrixCSC{T}, B::IMatrix{Nb}) where {T, Nb} + mA, nA = size(A) + nV = nnz(A) + rowval = Vector{Int}(undef, Nb*nV) + colptr = Vector{Int}(undef, nA*Nb+1) + nzval = Vector{T}(undef, Nb*nV) + z=1 + colptr[1] = 1 + @inbounds for i in 1:nA + rstart = A.colptr[i] + rend = A.colptr[i+1]-1 + colbase = (i-1)*Nb+1 + @inbounds for k in 1:Nb + irow_Nb = k - Nb + @inbounds @simd for r in rstart:rend + rowval[z] = A.rowval[r]*Nb+irow_Nb + nzval[z] = A.nzval[r] + z+=1 + end + colptr[colbase+k] = z + end + end + SparseMatrixCSC(mA*Nb, nA*Nb, colptr, rowval, nzval) +end + +function kron(A::PermMatrix{T}, B::IMatrix) where T + nA = size(A, 1) + nB = size(B, 1) + vals = Vector{T}(undef, nB*nA) + perm = Vector{Int}(undef, nB*nA) + @inbounds for i = 1:nA + start = (i-1)*nB + permAi = (A.perm[i]-1)*nB + val = A.vals[i] + @inbounds @simd for j = 1:nB + perm[start+j] = permAi + j + vals[start+j] = val + end + end + PermMatrix(perm, vals) +end + +function kron(A::IMatrix, B::PermMatrix{Tv, Ti}) where {Tv, Ti <: Integer} + nA = size(A, 1) + nB = size(B, 1) + perm = Vector{Int}(undef, nB*nA) + vals = Vector{Tv}(undef, nB*nA) + @inbounds for i = 1:nA + start = (i-1)*nB + @inbounds @simd for j = 1:nB + perm[start+j] = start +B.perm[j] + vals[start+j] = B.vals[j] + end + end + PermMatrix(perm, vals) +end + + +function kron(A::StridedMatrix{Tv}, B::PermMatrix{Tb}) where {Tv, Tb} + mA, nA = size(A) + nB = size(B, 1) + perm = fast_invperm(B.perm) + nzval = Vector{promote_type(Tv, Tb)}(undef, mA*nA*nB) + rowval = Vector{Int}(undef, mA*nA*nB) + colptr = collect(1:mA:nA*nB*mA+1) + z = 1 + @inbounds for j = 1:nA + @inbounds for j2 = 1:nB + p2 = perm[j2] + val2 = B.vals[p2] + ir = p2 + @inbounds @simd for i = 1:mA + nzval[z] = A[i, j]*val2 # merge + rowval[z] = ir + z += 1 + ir += nB + end + end + end + SparseMatrixCSC(mA*nB, nA*nB, colptr, rowval, nzval) +end + +function kron(A::PermMatrix{Ta}, B::StridedMatrix{Tb}) where {Tb, Ta} + mB, nB = size(B) + nA = size(A, 1) + perm = fast_invperm(A.perm) + nzval = Vector{promote_type(Ta, Tb)}(undef, mB*nA*nB) + rowval = Vector{Int}(undef, mB*nA*nB) + colptr = collect(1:mB:nA*nB*mB+1) + z = 1 + @inbounds for j = 1:nA + colbase = (j-1)*nB + p1 = perm[j] + val2 = A.vals[p1] + ir = (p1-1)*mB + @inbounds for j2 = 1:nB + @inbounds @simd for i2 = 1:mB + nzval[z] = B[i2, j2]*val2 # merge + rowval[z] = ir+i2 + z += 1 + end + end + end + SparseMatrixCSC(nA*mB, nA*nB, colptr, rowval, nzval) +end + +function kron(A::PermMatrix, B::PermMatrix) + nA = size(A, 1) + nB = size(B, 1) + vals = kron(A.vals, B.vals) + perm = Vector{Int}(undef, nB*nA) + @inbounds for i = 1:nA + start = (i-1)*nB + permAi = (A.perm[i]-1)*nB + @inbounds @simd for j = 1:nB + perm[start+j] = permAi +B.perm[j] + end + end + PermMatrix(perm, vals) +end + +kron(A::PermMatrix, B::Diagonal) = kron(A, PermMatrix(B)) +kron(A::Diagonal, B::PermMatrix) = kron(PermMatrix(A), B) + +function kron(A::PermMatrix{Ta}, B::SparseMatrixCSC{Tb}) where {Ta, Tb} + nA = size(A, 1) + mB, nB = size(B) + nV = nnz(B) + perm = fast_invperm(A.perm) + nzval = Vector{promote_type(Ta, Tb)}(undef, nA*nV) + rowval = Vector{Int}(undef, nA*nV) + colptr = Vector{Int}(undef, nA*nB+1) + colptr[1] = 1 + @inbounds @simd for i in 1:nA + start_row = (i-1)*nV + start_ri = (perm[i]-1)*mB + v0 = A.vals[perm[i]] + @inbounds @simd for j = 1:nV + nzval[start_row+j] = B.nzval[j]*v0 + rowval[start_row+j] = B.rowval[j] + start_ri + end + start_col = (i-1)*nB+1 + start_ci = (i-1)*nV + @inbounds @simd for j = 1:nB + colptr[start_col+j] = B.colptr[j+1] + start_ci + end + end + SparseMatrixCSC(mB*nA, nB*nA, colptr, rowval, nzval) +end + +function kron(A::SparseMatrixCSC{T}, B::PermMatrix{Tb}) where {T, Tb} + nB = size(B, 1) + mA, nA = size(A) + nV = nnz(A) + perm = fast_invperm(B.perm) + rowval = Vector{Int}(undef, nB*nV) + colptr = Vector{Int}(undef, nA*nB+1) + nzval = Vector{promote_type(T, Tb)}(undef, nB*nV) + z=1 + colptr[z] = 1 + @inbounds for i in 1:nA + rstart = A.colptr[i] + rend = A.colptr[i+1]-1 + @inbounds for k in 1:nB + irow = perm[k] + bval = B.vals[irow] + irow_nB = irow - nB + @inbounds @simd for r in rstart:rend + rowval[z] = A.rowval[r]*nB+irow_nB + nzval[z] = A.nzval[r]*bval + z+=1 + end + colptr[(i-1)*nB+k+1] = z + end + end + SparseMatrixCSC(mA*nB, nA*nB, colptr, rowval, nzval) +end diff --git a/src/linalg.jl b/src/linalg.jl new file mode 100644 index 0000000..caf3f5b --- /dev/null +++ b/src/linalg.jl @@ -0,0 +1,162 @@ +import Base: inv +import Compat.LinearAlgebra: det, diag, logdet + +####### linear algebra ###### +inv(M::IMatrix) = M +det(M::IMatrix) = 1 +diag(M::IMatrix{N, T}) where {N, T} = ones(T, N) +logdet(M::IMatrix) = 0 + +#det(M::PermMatrix) = parity(M.perm)*prod(M.vals) +function inv(M::PermMatrix) + new_perm = fast_invperm(M.perm) + return PermMatrix(new_perm, 1.0./M.vals[new_perm]) +end + +####### multiply ########### +*(A::IMatrix{N}, B::AbstractVector) where N = size(A, 2) == size(B, 1) ? B : + throw(DimensionMismatch("matrix A has dimensions $(size(A)), matrix B has dimensions $((size(B, 1), 1))")) + +for MATTYPE in [:AbstractMatrix, :StridedMatrix, :Diagonal, :SparseMatrixCSC, :Matrix, :PermMatrix] + @eval *(A::IMatrix{N}, B::$MATTYPE) where N = N == size(B, 1) ? B : + throw(DimensionMismatch("matrix A has dimensions $(size(A)), matrix B has dimensions $(size(B))")) + + @eval *(A::$MATTYPE, B::IMatrix{N}) where N = size(A, 2) == N ? A : + throw(DimensionMismatch("matrix A has dimensions $(size(A)), matrix B has dimensions $(size(B))")) +end + +# TODO: use Adjoint to fix this in v0.7 +*(A::RowVector, B::IMatrix) = size(A, 1) == size(B, 1) ? B : + throw(DimensionMismatch("matrix A has dimensions $(size(A)), matrix B has dimensions $(size(B))")) + +*(A::IMatrix, B::IMatrix) = size(A, 2) == size(B, 1) ? A : + throw(DimensionMismatch("matrix A has dimensions $(size(A)), matrix B has dimensions $(size(B))")) + + +########## Multiplication ############# + +# NOTE: making them dry? +# to vector +function *(A::PermMatrix{Ta}, X::AbstractVector{Tx}) where {Ta, Tx} + nX = length(X) + nX == size(A, 2) || throw(DimensionMismatch()) + v = similar(X, promote_type(Ta, Tx)) + @simd for i = 1:nX + @inbounds v[i] = A.vals[i]*X[A.perm[i]] + end + v +end + +function *(X::RowVector{Tx}, A::PermMatrix{Ta}) where {Tx, Ta} + nX = length(X) + nX == size(A, 1) || throw(DimensionMismatch()) + v = similar(X, promote_type(Tx, Ta)) + @simd for i = 1:nX + @inbounds v[A.perm[i]] = A.vals[i]*X[i] + end + v +end + +# to diagonal +function *(D::Diagonal{Td}, A::PermMatrix{Ta}) where {Td, Ta} + T = Base.promote_op(*, Td, Ta) + PermMatrix(A.perm, A.vals .* D.diag) +end + +function *(A::PermMatrix{Ta}, D::Diagonal{Td}) where {Td, Ta} + T = Base.promote_op(*, Td, Ta) + PermMatrix(A.perm, A.vals .* view(D.diag, A.perm)) +end + +# to self +function *(A::PermMatrix, B::PermMatrix) + size(A, 1) == size(B, 1) || throw(DimensionMismatch()) + PermMatrix(B.perm[A.perm], A.vals.*view(B.vals, A.perm)) +end + +# to matrix +function *(A::PermMatrix, X::AbstractMatrix) + size(X, 1) == size(A, 2) || throw(DimensionMismatch()) + return @views A.vals .* X[A.perm, :] # this may be inefficient for sparse CSC matrix. +end + +function *(X::AbstractMatrix, A::PermMatrix) + mX, nX = size(X) + nX == size(A, 1) || throw(DimensionMismatch()) + return @views (A.vals' .* X)[:, fast_invperm(A.perm)] +end + +@static if VERSION >= v"0.7-" +# NOTE: this is just a temperory fix for v0.7. We should overload mul! in +# the future (when we start to drop v0.6) to enable buildin lazy evaluation. + +*(x::Adjoint{<:Any, <:AbstractVector}, D::PermMatrix) = Matrix(x) * D +*(x::Transpose{<:Any, <:AbstractVector}, D::PermMatrix) = Matrix(x) * D +*(A::Adjoint{<:Any, <:AbstractArray}, D::PermMatrix) = Adjoint(adjoint(D) * parent(A)) +*(A::Transpose{<:Any, <:AbstractArray}, D::PermMatrix) = Transpose(transpose(D) * parent(A)) +*(A::Adjoint{<:Any, <:PermMatrix}, D::PermMatrix) = adjoint(parent(A)) * D +*(A::Transpose{<:Any, <:PermMatrix}, D::PermMatrix) = transpose(parent(A)) * D +*(A::PermMatrix, D::Adjoint{<:Any, <:PermMatrix}) = A * adjoint(parent(D)) +*(A::PermMatrix, D::Transpose{<:Any, <:PermMatrix}) = A * transpose(parent(D)) + +# for MAT in [:AbstractArray, :Matrix, :SparseMatrixCSC, :PermMatrix] +# @eval begin +# *(A::Adjoint{<:Any, <:$MAT}, D::PermMatrix) = copy(A) * D +# *(A::Transpose{<:Any, <:$MAT}, D::PermMatrix) = copy(A) * D +# *(A::PermMatrix, D::Adjoint{<:Any, <:$MAT}) = A * copy(D) +# *(A::PermMatrix, D::Transpose{<:Any, <:$MAT}) = A * copy(D) +# end +# end + +############### Transpose, Adjoint for IMatrix ############### +for MAT in [:AbstractArray, :AbstractVector, :Matrix, :SparseMatrixCSC, :PermMatrix, :IMatrix] + @eval *(A::Adjoint{<:Any, <:$MAT}, D::IMatrix) = Adjoint(D*parent(A)) + @eval *(A::Transpose{<:Any, <:$MAT}, D::IMatrix) = Transpose(D*parent(A)) + if MAT != :AbstactVector + @eval *(A::IMatrix, D::Transpose{<:Any, <:$MAT}) = Transpose(parent(D)*A) + @eval *(A::IMatrix, D::Adjoint{<:Any, <:$MAT}) = Adjoint(parent(D)*A) + end +end + +end + +# to sparse +function *(A::PermMatrix, X::SparseMatrixCSC) + nA = size(A, 1) + mX, nX = size(X) + mX == nA || throw(DimensionMismatch()) + perm = fast_invperm(A.perm) + nzval = similar(X.nzval) + rowval = similar(X.rowval) + @inbounds for j = 1:nA + @inbounds @simd for k = X.colptr[j]:X.colptr[j+1]-1 + r = perm[X.rowval[k]] + nzval[k] = X.nzval[k]*A.vals[r] + rowval[k] = r + end + end + SparseMatrixCSC(mX, nX, X.colptr, rowval, nzval) +end + +function *(X::SparseMatrixCSC, A::PermMatrix) + nA = size(A, 1) + mX, nX = size(X) + nX == nA || throw(DimensionMismatch()) + perm = fast_invperm(A.perm) + nzval = similar(X.nzval) + colptr = similar(X.colptr) + rowval = similar(X.rowval) + colptr[1] = 1 + z = 1 + @inbounds for j = 1:nA + pk = perm[j] + va = A.vals[pk] + @inbounds @simd for k = X.colptr[pk]:X.colptr[pk+1]-1 + nzval[z] = X.nzval[k]*va + rowval[z] = X.rowval[k] + z+=1 + end + colptr[j+1] = z + end + SparseMatrixCSC(mX, nX, colptr, rowval, nzval) +end diff --git a/src/promotions.jl b/src/promotions.jl new file mode 100644 index 0000000..6e82472 --- /dev/null +++ b/src/promotions.jl @@ -0,0 +1,24 @@ +import Base: promote_rule + +# SparseMatrixCSC +promote_rule(::Type{SparseMatrixCSC{Tv, Ti}}, ::Type{Matrix{T}}) where {Tv, Ti, T} = Matrix{promote_type(T, Tv)} + +# IMatrix +promote_rule(::Type{IMatrix{N, T}}, ::Type{PermMatrix{Tv, Ti, Vv, Vi}}) where {N, T, Tv, Ti, Vv, Vi} = (TT=promote_type(T, Tv); PermMatrix{TT, Ti, Vector{TT}, Vi}) +promote_rule(::Type{IMatrix{N, T}}, ::Type{SparseMatrixCSC{Tv, Ti}}) where {N, T, Tv, Ti} = SparseMatrixCSC{promote_type(T, Tv), Ti} +promote_rule(::Type{IMatrix{M, TA}}, ::Type{Matrix{TB}}) where {M, TA, TB} = Array{TB, 2} + +# PermMatrix +promote_rule(::Type{PermMatrix{TvA, TiA}}, ::Type{SparseMatrixCSC{TvB, TiB}}) where {TvA, TiA, TvB, TiB} = + SparseMatrixCSC{promote_type(TvA, TvB), promote_type(TiA, TiB)} +promote_rule(::Type{PermMatrix{Tv, Ti}}, ::Type{Matrix{T}}) where {Tv, Ti, T} = + Array{promote_type(Tv, T), 2} + +# Diagonal +@static if VERSION < v"0.7-" +promote_rule(::Type{IMatrix{N, TA}}, ::Type{Diagonal{TB}}) where {N, TA, TB} = Diagonal{promote_type(TA, TB)} +promote_rule(::Type{Diagonal{Tv}}, ::Type{Matrix{T}}) where {Tv, T} = Matrix{promote_type(Tv, T)} +promote_rule(::Type{Diagonal{T}}, ::Type{SparseMatrixCSC{Tv, Ti}}) where {T, Tv, Ti} = SparseMatrixCSC{promote_type(T, Tv), Ti} +else +# TODO: support this promotion for v0.7 +end diff --git a/src/staticize.jl b/src/staticize.jl new file mode 100644 index 0000000..e5868ec --- /dev/null +++ b/src/staticize.jl @@ -0,0 +1,43 @@ +struct SSparseMatrixCSC{Tv,Ti<:Integer, NNZ, NP} <: AbstractSparseMatrix{Tv,Ti} + m::Int # Number of rows + n::Int # Number of columns + colptr::SVector{NP,Ti} # Column i is in colptr[i]:(colptr[i+1]-1) + rowval::SVector{NNZ,Ti} # Row values of nonzeros + nzval::SVector{NNZ,Tv} # Nonzero values + + function SSparseMatrixCSC{Tv,Ti, NNZ, NP}(m::Integer, n::Integer, colptr::SVector{NP, Ti}, rowval::SVector{NNZ,Ti}, + nzval::SVector{NNZ,Tv}) where {Tv,Ti<:Integer, NNZ, NP} + m < 0 && throw(ArgumentError("number of rows (m) must be ≥ 0, got $m")) + n < 0 && throw(ArgumentError("number of columns (n) must be ≥ 0, got $n")) + new(Int(m), Int(n), colptr, rowval, nzval) + end +end + +function SSparseMatrixCSC(m::Integer, n::Integer, colptr::SVector, rowval::SVector, nzval::SVector) + Tv = eltype(nzval) + Ti = promote_type(eltype(colptr), eltype(rowval)) + SSparseMatrixCSC{Tv,Ti,length(nzval),n+1}(m, n, colptr, rowval, nzval) +end + + +struct SDiagonal{T, N} <: AbstractMatrix{T} + diag::SVector{N, T} +end +SDiagonal(V::AbstractVector{T}) where T = SDiagonal{T, length(V)}(SVector{length{V}}(V)) + +######### staticize ########## +""" + staticize(A::AbstractMatrix) -> AbastractMatrix + +transform a matrix to a static form. +""" +function staticize end + +staticize(A::AbstractMatrix) = SMatrix{size(A,1), size(A,2)}(A) +staticize(A::AbstractVector) = SVector{length(A)}(A) +staticize(A::Diagonal) = SDiagonal(SVector{size(A,1)}(A.diag)) +staticize(A::PermMatrix) = PermMatrix(SVector{size(A,1)}(A.perm), SVector{size(A, 1)}(A.vals)) + +function staticize(A::SparseMatrixCSC) + SSparseMatrixCSC(A.m, A.n, SVector{length(A.colptr)}(A.colptr), SVector{length(A.rowval)}(A.rowval), SVector{length(A.nzval)}(A.nzval)) +end diff --git a/test/IMatrix.jl b/test/IMatrix.jl new file mode 100644 index 0000000..e194ed4 --- /dev/null +++ b/test/IMatrix.jl @@ -0,0 +1,66 @@ +using Test, Random +import LuxurySparse: IMatrix, PermMatrix + +Random.seed!(2) + +p1 = IMatrix{4}() +sp = sprand(ComplexF64, 4,4, 0.5) +ds = rand(ComplexF64, 4,4) +pm = PermMatrix([2,3,4,1], randn(4)) +v = [0.5, 0.3im, 0.2, 1.0] +dv = Diagonal(v) + +@testset "basic" begin + @test p1==copy(p1) + @test eltype(p1) == Bool + @test size(p1) == (4, 4) + @test size(p1, 1) == size(p1, 2) == 4 + @test Matrix(p1) == [1 0 0 0; 0 1 0 0; 0 0 1 0; 0 0 0 1] +end + +@testset "conversion" begin + for mat in [p1, pm, dv] + @test mat == SparseMatrixCSC(mat) + @test mat == Matrix(mat) + @test mat == typeof(mat)(Matrix(mat)) + @test mat == typeof(mat)(SparseMatrixCSC(mat)) + end + for mat in [p1, pm, dv] + @test mat == PermMatrix(mat) + @test mat == typeof(mat)(PermMatrix(mat)) + end + @test Diagonal(p1) == p1 + @test p1 == typeof(p1)(Diagonal(p1)) +end + +@testset "sparse" begin + @test nnz(p1) == 4 + @test nonzeros(p1) == ones(4) +end + +@testset "linalg" begin + for op in [conj, real, transpose, copy, inv] + @test op(p1) == Matrix(I, 4, 4) + @test typeof(op(p1)) == typeof(p1) + end + @test imag(p1) == zeros(4, 4) + @test p1' == Matrix(I, 4, 4) + + # This will be lazy evaluated in 0.7+ + @static if VERSION < v"0.7-" + @test typeof(p1') == typeof(p1) + end + + @test ishermitian(p1) +end + +@testset "elementary" begin + @test all(isapprox.(conj(p1), conj(Matrix(p1)))) + @test all(isapprox.(real(p1), real(Matrix(p1)))) + @test all(isapprox.(imag(p1), imag(Matrix(p1)))) +end + +@testset "basicmath" begin + @test p1*2im == Matrix(p1)*2im + @test p1/2.0 == Matrix(p1)/2.0 +end diff --git a/test/PermMatrix.jl b/test/PermMatrix.jl new file mode 100644 index 0000000..64a76ad --- /dev/null +++ b/test/PermMatrix.jl @@ -0,0 +1,67 @@ +using Test, Random +import LuxurySparse: PermMatrix, pmrand + +Random.seed!(2) +p1 = PermMatrix([1,4,2,3],[0.1, 0.2, 0.4im, 0.5]) +p2 = PermMatrix([2,1,4,3],[0.1, 0.2, 0.4, 0.5]) +#p3 = PermMatrix([4,1,2,3],[0.5, 0.4im, 0.3, 0.2]) +p3 = pmrand(4) +sp = sprand(4, 4, 0.3) +v = [0.5, 0.3im, 0.2, 1.0] + +@testset "basic" begin + @test p1==copy(p1) + @test eltype(p1) == ComplexF64 + @test eltype(p2) == Float64 + @test eltype(p3) == Float64 + @test size(p1) == (4, 4) + @test size(p3) == (4, 4) + @test size(p1, 1) == size(p1, 2) == 4 + @test Matrix(p1) == [0.1 0 0 0; 0 0 0 0.2; 0 0.4im 0 0; 0 0 0.5 0] +end + +@testset "sparse" begin + @test nnz(p1) == 4 + @test nonzeros(p1) == p1.vals + @test dropzeros!(p1) == p1 +end + +@testset "linalg" begin + @test inv(p1) * p1 ≈ Matrix(I, 4, 4) + @test p1*transpose(p1) == diagm(0=>p1.vals.^2) + #@test p1*adjoint(p1) == diagm(0=>abs.(p1.vals).^2) + #@test all(isapprox.(adjoint(p3), transpose(conj(Matrix(p3))))) + @test p1*p1' == diagm(0=>abs.(p1.vals).^2) + @test all(isapprox.(p3', transpose(conj(Matrix(p3))))) +end + +@testset "mul" begin + @test p3*p2 == SparseMatrixCSC(p3)*p2 == Matrix(p3)*p2 + + # Multiply vector + @test p3*v == Matrix(p3)*v + @test v'*p3 == v'*Matrix(p3) + @test p3*collect(1:4) == p3.perm.*p3.vals + + # Diagonal matrices + Dv = Diagonal(v) + @test p3*Dv == Matrix(p3)*Dv + @test Dv*p3 == Dv*Matrix(p3) +end + +@testset "elementary" begin + @test all(isapprox.(conj(p1), conj(Matrix(p1)))) + @test all(isapprox.(real(p1), real(Matrix(p1)))) + @test all(isapprox.(imag(p1), imag(Matrix(p1)))) +end + +@testset "basicmath" begin + @test p1*2 == Matrix(p1)*2 + @test p1/2 == Matrix(p1)/2 +end + +@testset "memorysafe" begin + @test p1 == PermMatrix([1,4,2,3],[0.1, 0.2, 0.4im, 0.5]) + @test p2 == PermMatrix([2,1,4,3],[0.1, 0.2, 0.4, 0.5]) + @test v == [0.5, 0.3im, 0.2, 1.0] +end diff --git a/test/kronecker.jl b/test/kronecker.jl new file mode 100644 index 0000000..ad6d14d --- /dev/null +++ b/test/kronecker.jl @@ -0,0 +1,31 @@ +using Test, Random +import LuxurySparse: IMatrix, PermMatrix + +Random.seed!(2) + +p1 = IMatrix{4}() +sp = sprand(ComplexF64, 4,4, 0.5) +ds = rand(ComplexF64, 4,4) +pm = PermMatrix([2,3,4,1], randn(4)) +v = [0.5, 0.3im, 0.2, 1.0] +dv = Diagonal(v) + + +@testset "kron" begin + for source in [p1, sp, ds, dv, pm] + for target in [p1, sp, ds, dv, pm] + lres = kron(source, target) + rres = kron(target, source) + flres = kron(Matrix(source), Matrix(target)) + frres = kron(Matrix(target), Matrix(source)) + @test lres == flres + @test rres == frres + @test eltype(lres) == eltype(flres) + @test eltype(rres) == eltype(frres) + if !(target === ds && source === ds) + @test !(typeof(lres) <: StridedMatrix) + @test !(typeof(rres) <: StridedMatrix) + end + end + end +end diff --git a/test/linalg.jl b/test/linalg.jl new file mode 100644 index 0000000..dc5d425 --- /dev/null +++ b/test/linalg.jl @@ -0,0 +1,71 @@ +using Test +using LinearAlgebra +import LuxurySparse: IMatrix, PermMatrix, notdense + +Random.seed!(2) + +p1 = IMatrix{4}() +sp = sprand(ComplexF64, 4,4, 0.5) +ds = rand(ComplexF64, 4,4) +pm = PermMatrix([2,3,4,1], randn(4)) +v = [0.5, 0.3im, 0.2, 1.0] +dv = Diagonal(v) + +@testset "invdet" begin + ####### linear algebra ###### + @test inv(p1) == inv(Matrix(p1)) + @test det(p1) == det(Matrix(p1)) + @test diag(p1) == diag(Matrix(p1)) + @test logdet(p1) == 0 + @test inv(pm) == inv(Matrix(pm)) + + for m in [pm, sp, p1, dv] + @test m |> notdense + @test m' |> notdense + @test transpose(m) |> notdense + end + for m in [ds, v] + @test m |> notdense == false + @test m' |> notdense == false + @test transpose(m) |> notdense == false + end +end + +@testset "multiply" begin + for source_ in [p1, sp, ds, dv, pm] + for target in [p1, sp, ds, dv, pm] + for source in [source_, source_', transpose(source_)] + lres = source * target + rres = target * source + flres = Matrix(source) * Matrix(target) + frres = Matrix(target) * Matrix(source) + @test lres ≈ flres + @test rres ≈ frres + if !(target === p1 || parent(source) === p1) + @test eltype(lres) == eltype(flres) + @test eltype(rres) == eltype(frres) + end + if (target |> notdense) && (parent(source) |> notdense) + @test lres |> notdense + @test lres |> notdense + end + end + end + end +end + +@testset "mul-vector" begin + # permutation multiply + lres = transpose(conj(v))*pm #! v' not realized! + rres = pm*v + flres = v' * Matrix(pm) + frres = Matrix(pm) * v + @test lres == flres + @test rres == frres + @test eltype(lres) == eltype(flres) + @test eltype(rres) == eltype(frres) + + # IMatrix + @test v'*p1 == v' + @test p1*v == v +end diff --git a/test/runtests.jl b/test/runtests.jl new file mode 100644 index 0000000..0dc9993 --- /dev/null +++ b/test/runtests.jl @@ -0,0 +1,22 @@ +using Test +using LinearAlgebra, SparseArrays + +@testset "IMatrix" begin + include("IMatrix.jl") +end + +@testset "PermMatrix" begin + include("PermMatrix.jl") +end + +@testset "kronecker" begin + include("kronecker.jl") +end + +@testset "linalg" begin + include("linalg.jl") +end + +@testset "staticize" begin +include("staticize.jl") +end diff --git a/test/staticize.jl b/test/staticize.jl new file mode 100644 index 0000000..c1dfee3 --- /dev/null +++ b/test/staticize.jl @@ -0,0 +1,42 @@ +using Test +using LinearAlgebra + +using LuxurySparse +import LuxurySparse: staticize +using StaticArrays: SVector, SMatrix + +Random.seed!(2) + +using Compat.Test +@testset "staticize" begin + m = pmrand(ComplexF64, 4) + sm = m |> staticize + @test sm.perm isa SVector + @test sm.vals isa SVector + @test sm.perm == m.perm + @test sm.vals == m.vals + + m = sprand(ComplexF64, 4,4, 0.5) + sm = m |> staticize + @test sm.colptr isa SVector + @test sm.rowval isa SVector + @test sm.nzval isa SVector + @test sm.nzval == m.nzval + @test sm.rowval == m.rowval + @test sm.colptr == m.colptr + + m = Diagonal(randn(ComplexF64, 4)) + sm = m |> staticize + @test sm.diag isa SVector + @test sm.diag == m.diag + + m = randn(ComplexF64, 4) + sm = m |> staticize + @test sm isa SVector + @test sm == m + + m = randn(ComplexF64, 4, 4) + sm = m |> staticize + @test sm isa SMatrix + @test sm == m +end