diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml
index cffef64b0..6ad294e18 100644
--- a/.github/workflows/build.yml
+++ b/.github/workflows/build.yml
@@ -7,26 +7,18 @@ on:
jobs:
build:
- strategy:
- matrix:
- os: [ macOS-latest, windows-latest ]
- runs-on: ${{matrix.os}}
- timeout-minutes: 40
+ runs-on: windows-latest
+ timeout-minutes: 20
steps:
- - uses: actions/checkout@v3.0.0
- - uses: actions/setup-java@v3.0.0
+ - uses: actions/checkout@v3
+ - uses: actions/setup-java@v3.5.1
with:
- java-version: 11
- distribution: liberica
- - name: Cache konan
- uses: actions/cache@v3.0.1
- with:
- path: ~/.konan
- key: ${{ runner.os }}-gradle-${{ hashFiles('*.gradle.kts') }}
- restore-keys: |
- ${{ runner.os }}-gradle-
+ java-version: '11'
+ distribution: 'liberica'
+ cache: 'gradle'
- name: Gradle Wrapper Validation
uses: gradle/wrapper-validation-action@v1.0.4
- - uses: gradle/gradle-build-action@v2.1.5
+ - name: Gradle Build
+ uses: gradle/gradle-build-action@v2.4.2
with:
- arguments: build
+ arguments: test jvmTest
diff --git a/.github/workflows/pages.yml b/.github/workflows/pages.yml
index 2abaf0b9f..ba1f5d1e3 100644
--- a/.github/workflows/pages.yml
+++ b/.github/workflows/pages.yml
@@ -22,7 +22,7 @@ jobs:
key: ${{ runner.os }}-gradle-${{ hashFiles('*.gradle.kts') }}
restore-keys: |
${{ runner.os }}-gradle-
- - uses: gradle/gradle-build-action@v2.1.5
+ - uses: gradle/gradle-build-action@v2.4.2
with:
arguments: dokkaHtmlMultiModule --no-parallel
- uses: JamesIves/github-pages-deploy-action@v4.3.0
diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml
index 794881b09..31d539cdd 100644
--- a/.github/workflows/publish.yml
+++ b/.github/workflows/publish.yml
@@ -15,7 +15,7 @@ jobs:
runs-on: ${{matrix.os}}
steps:
- uses: actions/checkout@v3.0.0
- - uses: actions/setup-java@v3.0.0
+ - uses: actions/setup-java@v3.10.0
with:
java-version: 11
distribution: liberica
@@ -26,26 +26,25 @@ jobs:
key: ${{ runner.os }}-gradle-${{ hashFiles('*.gradle.kts') }}
restore-keys: |
${{ runner.os }}-gradle-
- - uses: gradle/wrapper-validation-action@v1.0.4
- name: Publish Windows Artifacts
if: matrix.os == 'windows-latest'
- uses: gradle/gradle-build-action@v2.1.5
+ uses: gradle/gradle-build-action@v2.4.2
with:
arguments: |
- releaseAll
- -Ppublishing.enabled=true
- -Ppublishing.sonatype=false
+ publishAllPublicationsToSpaceRepository
+ -Ppublishing.targets=all
-Ppublishing.space.user=${{ secrets.SPACE_APP_ID }}
-Ppublishing.space.token=${{ secrets.SPACE_APP_SECRET }}
- name: Publish Mac Artifacts
if: matrix.os == 'macOS-latest'
- uses: gradle/gradle-build-action@v2.1.5
+ uses: gradle/gradle-build-action@v2.4.2
with:
arguments: |
- releaseMacosX64
- releaseIosArm64
- releaseIosX64
- -Ppublishing.enabled=true
- -Ppublishing.sonatype=false
+ publishMacosX64PublicationToSpaceRepository
+ publishMacosArm64PublicationToSpaceRepository
+ publishIosX64PublicationToSpaceRepository
+ publishIosArm64PublicationToSpaceRepository
+ publishIosSimulatorArm64PublicationToSpaceRepository
+ -Ppublishing.targets=all
-Ppublishing.space.user=${{ secrets.SPACE_APP_ID }}
-Ppublishing.space.token=${{ secrets.SPACE_APP_SECRET }}
diff --git a/.gitignore b/.gitignore
index 5ddd846a8..96a556ae1 100644
--- a/.gitignore
+++ b/.gitignore
@@ -3,9 +3,9 @@ build/
out/
.idea/
-
-
.vscode/
+.fleet/
+
# Avoid ignoring Gradle wrapper jar file (.jar files are usually ignored)
!gradle-wrapper.jar
@@ -19,4 +19,5 @@ out/
!/.idea/copyright/
!/.idea/scopes/
-/kotlin-js-store/yarn.lock
+/gradle/yarn.lock
+
diff --git a/.idea/copyright/kmath.xml b/.idea/copyright/kmath.xml
index 17e44e4d0..840e0c87c 100644
--- a/.idea/copyright/kmath.xml
+++ b/.idea/copyright/kmath.xml
@@ -1,6 +1,7 @@
-
-
-
-
-
+
+
+
+
+
+
\ No newline at end of file
diff --git a/.idea/copyright/profiles_settings.xml b/.idea/copyright/profiles_settings.xml
index b538bdf41..1c10bd6f5 100644
--- a/.idea/copyright/profiles_settings.xml
+++ b/.idea/copyright/profiles_settings.xml
@@ -1,5 +1,5 @@
-
+
diff --git a/.space.kts b/.space.kts
index c9500e967..ce52a2f5c 100644
--- a/.space.kts
+++ b/.space.kts
@@ -1,3 +1,48 @@
+import kotlin.io.path.readText
+
+val projectName = "kmath"
+
job("Build") {
- gradlew("openjdk:11", "build")
+ //Perform only jvm tests
+ gradlew("spc.registry.jetbrains.space/p/sci/containers/kotlin-ci:1.0.3", "test", "jvmTest")
+}
+
+job("Publish") {
+ startOn {
+ gitPush { enabled = false }
+ }
+ container("spc.registry.jetbrains.space/p/sci/containers/kotlin-ci:1.0.3") {
+ env["SPACE_USER"] = "{{ project:space_user }}"
+ env["SPACE_TOKEN"] = "{{ project:space_token }}"
+ kotlinScript { api ->
+
+ val spaceUser = System.getenv("SPACE_USER")
+ val spaceToken = System.getenv("SPACE_TOKEN")
+
+ // write the version to the build directory
+ api.gradlew("version")
+
+ //read the version from build file
+ val version = java.nio.file.Path.of("build/project-version.txt").readText()
+
+ val revisionSuffix = if (version.endsWith("SNAPSHOT")) {
+ "-" + api.gitRevision().take(7)
+ } else {
+ ""
+ }
+
+ api.space().projects.automation.deployments.start(
+ project = api.projectIdentifier(),
+ targetIdentifier = TargetIdentifier.Key(projectName),
+ version = version+revisionSuffix,
+ // automatically update deployment status based on the status of a job
+ syncWithAutomationJob = true
+ )
+ api.gradlew(
+ "publishAllPublicationsToSpaceRepository",
+ "-Ppublishing.space.user=\"$spaceUser\"",
+ "-Ppublishing.space.token=\"$spaceToken\"",
+ )
+ }
+ }
}
\ No newline at end of file
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 4852f474a..2f011881f 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,13 +1,10 @@
# KMath
-## [Unreleased]
+## Unreleased
+
### Added
-- Autodiff for generic algebra elements in core!
-- Algebra now has an obligatory `bufferFactory` (#477).
### Changed
-- Kotlin 1.7
-- `LazyStructure` `deffered` -> `async` to comply with coroutines code style
### Deprecated
@@ -17,7 +14,34 @@
### Security
-## [0.3.0]
+## 0.3.1 - 2023-04-09
+
+### Added
+- Wasm support for `memory`, `core`, `complex` and `functions` modules.
+- Generic builders for `BufferND` and `MutableBufferND`
+- `NamedMatrix` - matrix with symbol-based indexing
+- `Expression` with default arguments
+- Type-aliases for numbers like `Float64`
+- Autodiff for generic algebra elements in core!
+- Algebra now has an obligatory `bufferFactory` (#477).
+
+### Changed
+- Geometry uses type-safe angles
+- Tensor operations switched to prefix notation
+- Row-wise and column-wise ND shapes in the core
+- Shape is read-only
+- Major refactor of tensors (only minor API changes)
+- Kotlin 1.8.20
+- `LazyStructure` `deffered` -> `async` to comply with coroutines code style
+- Default `dot` operation in tensor algebra no longer support broadcasting. Instead `matmul` operation is added to `DoubleTensorAlgebra`.
+- Multik went MPP
+
+### Removed
+- Trajectory moved to https://github.com/SciProgCentre/maps-kt
+- Polynomials moved to https://github.com/SciProgCentre/kmath-polynomial
+
+## 0.3.0
+
### Added
- `ScaleOperations` interface
- `Field` extends `ScaleOperations`
@@ -42,8 +66,8 @@
- `contentEquals` with tolerance: #364
- Compilation to TeX for MST: #254
-
### Changed
+- Annotations moved to `space.kscience.kmath`
- Exponential operations merged with hyperbolic functions
- Space is replaced by Group. Space is reserved for vector spaces.
- VectorSpace is now a vector space
@@ -75,11 +99,9 @@
- Rework of histograms.
- `UnivariateFunction` -> `Function1D`, `MultivariateFunction` -> `FunctionND`
-
### Deprecated
- Specialized `DoubleBufferAlgebra`
-
### Removed
- Nearest in Domain. To be implemented in geometry package.
- Number multiplication and division in main Algebra chain
@@ -90,15 +112,12 @@
- Second generic from DifferentiableExpression
- Algebra elements are completely removed. Use algebra contexts instead.
-
### Fixed
- Ring inherits RingOperations, not GroupOperations
- Univariate histogram filling
+## 0.2.0
-### Security
-
-## [0.2.0]
### Added
- `fun` annotation for SAM interfaces in library
- Explicit `public` visibility for all public APIs
@@ -118,7 +137,6 @@
- New `MatrixFeature` interfaces for matrix decompositions
- Basic Quaternion vector support in `kmath-complex`.
-
### Changed
- Package changed from `scientifik` to `space.kscience`
- Gradle version: 6.6 -> 6.8.2
@@ -143,7 +161,6 @@
- `symbol` method in `Algebra` renamed to `bindSymbol` to avoid ambiguity
- Add `out` projection to `Buffer` generic
-
### Removed
- `kmath-koma` module because it doesn't support Kotlin 1.4.
- Support of `legacy` JS backend (we will support only IR)
@@ -152,11 +169,11 @@
- `Real` class
- StructureND identity and equals
-
### Fixed
- `symbol` method in `MstExtendedField` (https://github.com/mipt-npm/kmath/pull/140)
-## [0.1.4]
+## 0.1.4
+
### Added
- Functional Expressions API
- Mathematical Syntax Tree, its interpreter and API
@@ -174,7 +191,6 @@
- Full hyperbolic functions support and default implementations within `ExtendedField`
- Norm support for `Complex`
-
### Changed
- `readAsMemory` now has `throws IOException` in JVM signature.
- Several functions taking functional types were made `inline`.
@@ -186,10 +202,9 @@
- Gradle version: 6.3 -> 6.6
- Moved probability distributions to commons-rng and to `kmath-prob`
-
### Fixed
- Missing copy method in Memory implementation on JS (https://github.com/mipt-npm/kmath/pull/106)
- D3.dim value in `kmath-dimensions`
- Multiplication in integer rings in `kmath-core` (https://github.com/mipt-npm/kmath/pull/101)
- Commons RNG compatibility (https://github.com/mipt-npm/kmath/issues/93)
-- Multiplication of BigInt by scalar
\ No newline at end of file
+- Multiplication of BigInt by scalar
diff --git a/README.md b/README.md
index b9d36df50..7c1f759c1 100644
--- a/README.md
+++ b/README.md
@@ -1,8 +1,8 @@
[](https://confluence.jetbrains.com/display/ALL/JetBrains+on+GitHub)
[](https://zenodo.org/badge/latestdoi/129486382)
-
+
[](https://search.maven.org/search?q=g:%22space.kscience%22)
-[](https://maven.pkg.jetbrains.space/mipt-npm/p/sci/maven/space/kscience/)
+[](https://maven.pkg.jetbrains.space/spc/p/sci/maven/space/kscience/)
# KMath
@@ -11,7 +11,7 @@ analog to Python's NumPy library. Later we found that kotlin is much more flexib
architecture designs. In contrast to `numpy` and `scipy` it is modular and has a lightweight core. The `numpy`-like
experience could be achieved with [kmath-for-real](/kmath-for-real) extension module.
-[Documentation site (**WIP**)](https://mipt-npm.github.io/kmath/)
+[Documentation site (**WIP**)](https://SciProgCentre.github.io/kmath/)
## Publications and talks
@@ -214,28 +214,6 @@ One can still use generic algebras though.
>
> **Maturity**: EXPERIMENTAL
-### [kmath-polynomial](kmath-polynomial)
->
->
-> **Maturity**: PROTOTYPE
->
-> **Features:**
-> - [polynomial abstraction](kmath-polynomial/src/commonMain/kotlin/space/kscience/kmath/functions/Polynomial.kt) : Abstraction for polynomial spaces.
-> - [rational function abstraction](kmath-polynomial/src/commonMain/kotlin/space/kscience/kmath/functions/RationalFunction.kt) : Abstraction for rational functions spaces.
-> - ["list" polynomials](kmath-polynomial/src/commonMain/kotlin/space/kscience/kmath/functions/ListRationalFunction.kt) : List implementation of univariate polynomials.
-> - ["list" rational functions](kmath-polynomial/src/commonMain/kotlin/space/kscience/kmath/functions/ListPolynomial.kt) : List implementation of univariate rational functions.
-> - ["list" polynomials and rational functions constructors](kmath-polynomial/src/commonMain/kotlin/space/kscience/kmath/functions/listConstructors.kt) : Constructors for list polynomials and rational functions.
-> - ["list" polynomials and rational functions utilities](kmath-polynomial/src/commonMain/kotlin/space/kscience/kmath/functions/listUtil.kt) : Utilities for list polynomials and rational functions.
-> - ["numbered" polynomials](kmath-polynomial/src/commonMain/kotlin/space/kscience/kmath/functions/NumberedRationalFunction.kt) : Numbered implementation of multivariate polynomials.
-> - ["numbered" rational functions](kmath-polynomial/src/commonMain/kotlin/space/kscience/kmath/functions/NumberedPolynomial.kt) : Numbered implementation of multivariate rational functions.
-> - ["numbered" polynomials and rational functions constructors](kmath-polynomial/src/commonMain/kotlin/space/kscience/kmath/functions/numberedConstructors.kt) : Constructors for numbered polynomials and rational functions.
-> - ["numbered" polynomials and rational functions utilities](kmath-polynomial/src/commonMain/kotlin/space/kscience/kmath/functions/numberedUtil.kt) : Utilities for numbered polynomials and rational functions.
-> - ["labeled" polynomials](kmath-polynomial/src/commonMain/kotlin/space/kscience/kmath/functions/LabeledRationalFunction.kt) : Labeled implementation of multivariate polynomials.
-> - ["labeled" rational functions](kmath-polynomial/src/commonMain/kotlin/space/kscience/kmath/functions/LabeledPolynomial.kt) : Labeled implementation of multivariate rational functions.
-> - ["labeled" polynomials and rational functions constructors](kmath-polynomial/src/commonMain/kotlin/space/kscience/kmath/functions/labeledConstructors.kt) : Constructors for labeled polynomials and rational functions.
-> - ["labeled" polynomials and rational functions utilities](kmath-polynomial/src/commonMain/kotlin/space/kscience/kmath/functions/labeledUtil.kt) : Utilities for labeled polynomials and rational functions.
-
-
### [kmath-stat](kmath-stat)
>
>
@@ -262,11 +240,6 @@ One can still use generic algebras though.
> - [linear algebra operations](kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/api/LinearOpsTensorAlgebra.kt) : Advanced linear algebra operations like LU decomposition, SVD, etc.
-### [kmath-trajectory](kmath-trajectory)
-> Path and trajectory optimization
->
-> **Maturity**: PROTOTYPE
-
### [kmath-viktor](kmath-viktor)
>
>
@@ -324,5 +297,4 @@ Gradle `6.0+` is required for multiplatform artifacts.
The project requires a lot of additional work. The most important thing we need is a feedback about what features are
required the most. Feel free to create feature requests. We are also welcome to code contributions, especially in issues
-marked with
-[waiting for a hero](https://github.com/mipt-npm/kmath/labels/waiting%20for%20a%20hero) label.
\ No newline at end of file
+marked with [waiting for a hero](https://github.com/SciProgCentre/kmath/labels/waiting%20for%20a%20hero) label.
\ No newline at end of file
diff --git a/benchmarks/build.gradle.kts b/benchmarks/build.gradle.kts
index 22712816d..24471a9e4 100644
--- a/benchmarks/build.gradle.kts
+++ b/benchmarks/build.gradle.kts
@@ -1,10 +1,11 @@
@file:Suppress("UNUSED_VARIABLE")
+import org.jetbrains.kotlin.gradle.tasks.KotlinJvmCompile
import space.kscience.kmath.benchmarks.addBenchmarkProperties
plugins {
kotlin("multiplatform")
- kotlin("plugin.allopen")
+ alias(spclibs.plugins.kotlin.plugin.allopen)
id("org.jetbrains.kotlinx.benchmark")
}
@@ -15,6 +16,8 @@ repositories {
mavenCentral()
}
+val multikVersion: String by rootProject.extra
+
kotlin {
jvm()
@@ -26,6 +29,9 @@ kotlin {
all {
languageSettings {
progressiveMode = true
+ optIn("kotlin.contracts.ExperimentalContracts")
+ optIn("kotlin.ExperimentalUnsignedTypes")
+ optIn("space.kscience.kmath.UnstableKMathAPI")
}
}
@@ -39,7 +45,9 @@ kotlin {
implementation(project(":kmath-dimensions"))
implementation(project(":kmath-for-real"))
implementation(project(":kmath-tensors"))
- implementation("org.jetbrains.kotlinx:kotlinx-benchmark-runtime:0.4.2")
+ implementation(project(":kmath-multik"))
+ implementation("org.jetbrains.kotlinx:multik-default:$multikVersion")
+ implementation(spclibs.kotlinx.benchmark.runtime)
}
}
@@ -51,7 +59,6 @@ kotlin {
implementation(project(":kmath-kotlingrad"))
implementation(project(":kmath-viktor"))
implementation(project(":kmath-jafama"))
- implementation(project(":kmath-multik"))
implementation(projects.kmath.kmathTensorflow)
implementation("org.tensorflow:tensorflow-core-platform:0.4.0")
implementation("org.nd4j:nd4j-native:1.0.0-M1")
@@ -138,12 +145,10 @@ benchmark {
commonConfiguration()
include("ViktorLogBenchmark")
}
-}
-// Fix kotlinx-benchmarks bug
-afterEvaluate {
- val jvmBenchmarkJar by tasks.getting(org.gradle.jvm.tasks.Jar::class) {
- duplicatesStrategy = DuplicatesStrategy.EXCLUDE
+ configurations.register("integration") {
+ commonConfiguration()
+ include("IntegrationBenchmark")
}
}
@@ -151,11 +156,11 @@ kotlin.sourceSets.all {
with(languageSettings) {
optIn("kotlin.contracts.ExperimentalContracts")
optIn("kotlin.ExperimentalUnsignedTypes")
- optIn("space.kscience.kmath.misc.UnstableKMathAPI")
+ optIn("space.kscience.kmath.UnstableKMathAPI")
}
}
-tasks.withType {
+tasks.withType {
kotlinOptions {
jvmTarget = "11"
freeCompilerArgs = freeCompilerArgs + "-Xjvm-default=all" + "-Xlambdas=indy"
@@ -163,7 +168,7 @@ tasks.withType {
}
readme {
- maturity = ru.mipt.npm.gradle.Maturity.EXPERIMENTAL
+ maturity = space.kscience.gradle.Maturity.EXPERIMENTAL
}
addBenchmarkProperties()
diff --git a/benchmarks/src/jsMain/kotlin/space/kscience/kmath/benchmarks/ExpressionsInterpretersBenchmark.kt b/benchmarks/src/jsMain/kotlin/space/kscience/kmath/benchmarks/ExpressionsInterpretersBenchmark.kt
index 126a2e648..cb07e489a 100644
--- a/benchmarks/src/jsMain/kotlin/space/kscience/kmath/benchmarks/ExpressionsInterpretersBenchmark.kt
+++ b/benchmarks/src/jsMain/kotlin/space/kscience/kmath/benchmarks/ExpressionsInterpretersBenchmark.kt
@@ -1,5 +1,5 @@
/*
- * Copyright 2018-2021 KMath contributors.
+ * Copyright 2018-2022 KMath contributors.
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
*/
@@ -9,8 +9,8 @@ import kotlinx.benchmark.Benchmark
import kotlinx.benchmark.Blackhole
import kotlinx.benchmark.Scope
import kotlinx.benchmark.State
+import space.kscience.kmath.UnstableKMathAPI
import space.kscience.kmath.expressions.*
-import space.kscience.kmath.operations.Algebra
import space.kscience.kmath.operations.DoubleField
import space.kscience.kmath.operations.bindSymbol
import space.kscience.kmath.operations.invoke
@@ -94,6 +94,7 @@ class ExpressionsInterpretersBenchmark {
}
private val mst = node.toExpression(DoubleField)
+ @OptIn(UnstableKMathAPI::class)
private val wasm = node.wasmCompileToExpression(DoubleField)
private val estree = node.estreeCompileToExpression(DoubleField)
diff --git a/benchmarks/src/jvmMain/kotlin/space/kscience/kmath/benchmarks/ArrayBenchmark.kt b/benchmarks/src/jvmMain/kotlin/space/kscience/kmath/benchmarks/ArrayBenchmark.kt
index ff933997f..abfc8cbf2 100644
--- a/benchmarks/src/jvmMain/kotlin/space/kscience/kmath/benchmarks/ArrayBenchmark.kt
+++ b/benchmarks/src/jvmMain/kotlin/space/kscience/kmath/benchmarks/ArrayBenchmark.kt
@@ -1,5 +1,5 @@
/*
- * Copyright 2018-2021 KMath contributors.
+ * Copyright 2018-2022 KMath contributors.
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
*/
diff --git a/benchmarks/src/jvmMain/kotlin/space/kscience/kmath/benchmarks/BigIntBenchmark.kt b/benchmarks/src/jvmMain/kotlin/space/kscience/kmath/benchmarks/BigIntBenchmark.kt
index 188a48ca7..d07b7b4df 100644
--- a/benchmarks/src/jvmMain/kotlin/space/kscience/kmath/benchmarks/BigIntBenchmark.kt
+++ b/benchmarks/src/jvmMain/kotlin/space/kscience/kmath/benchmarks/BigIntBenchmark.kt
@@ -1,5 +1,5 @@
/*
- * Copyright 2018-2021 KMath contributors.
+ * Copyright 2018-2022 KMath contributors.
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
*/
@@ -10,7 +10,7 @@ import kotlinx.benchmark.Blackhole
import org.openjdk.jmh.annotations.Benchmark
import org.openjdk.jmh.annotations.Scope
import org.openjdk.jmh.annotations.State
-import space.kscience.kmath.misc.UnstableKMathAPI
+import space.kscience.kmath.UnstableKMathAPI
import space.kscience.kmath.operations.BigIntField
import space.kscience.kmath.operations.JBigIntegerField
import space.kscience.kmath.operations.invoke
diff --git a/benchmarks/src/jvmMain/kotlin/space/kscience/kmath/benchmarks/BufferBenchmark.kt b/benchmarks/src/jvmMain/kotlin/space/kscience/kmath/benchmarks/BufferBenchmark.kt
index 39819d407..c2616303b 100644
--- a/benchmarks/src/jvmMain/kotlin/space/kscience/kmath/benchmarks/BufferBenchmark.kt
+++ b/benchmarks/src/jvmMain/kotlin/space/kscience/kmath/benchmarks/BufferBenchmark.kt
@@ -1,39 +1,80 @@
/*
- * Copyright 2018-2021 KMath contributors.
+ * Copyright 2018-2022 KMath contributors.
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
*/
package space.kscience.kmath.benchmarks
import kotlinx.benchmark.Benchmark
+import kotlinx.benchmark.Blackhole
import kotlinx.benchmark.Scope
import kotlinx.benchmark.State
import space.kscience.kmath.complex.Complex
+import space.kscience.kmath.complex.ComplexField
import space.kscience.kmath.complex.complex
+import space.kscience.kmath.operations.invoke
+import space.kscience.kmath.structures.Buffer
import space.kscience.kmath.structures.DoubleBuffer
-import space.kscience.kmath.structures.MutableBuffer
+import space.kscience.kmath.structures.getDouble
+import space.kscience.kmath.structures.permute
@State(Scope.Benchmark)
internal class BufferBenchmark {
+
+ @Benchmark
+ fun doubleArrayReadWrite(blackhole: Blackhole) {
+ val buffer = DoubleArray(size) { it.toDouble() }
+ var res = 0.0
+ (0 until size).forEach {
+ res += buffer[it]
+ }
+ blackhole.consume(res)
+ }
+
@Benchmark
- fun genericDoubleBufferReadWrite() {
+ fun doubleBufferReadWrite(blackhole: Blackhole) {
val buffer = DoubleBuffer(size) { it.toDouble() }
+ var res = 0.0
+ (0 until size).forEach {
+ res += buffer[it]
+ }
+ blackhole.consume(res)
+ }
+
+ @Benchmark
+ fun bufferViewReadWrite(blackhole: Blackhole) {
+ val buffer = DoubleBuffer(size) { it.toDouble() }.permute(reversedIndices)
+ var res = 0.0
+ (0 until size).forEach {
+ res += buffer[it]
+ }
+ blackhole.consume(res)
+ }
+ @Benchmark
+ fun bufferViewReadWriteSpecialized(blackhole: Blackhole) {
+ val buffer = DoubleBuffer(size) { it.toDouble() }.permute(reversedIndices)
+ var res = 0.0
(0 until size).forEach {
- buffer[it]
+ res += buffer.getDouble(it)
}
+ blackhole.consume(res)
}
@Benchmark
- fun complexBufferReadWrite() {
- val buffer = MutableBuffer.complex(size / 2) { Complex(it.toDouble(), -it.toDouble()) }
+ fun complexBufferReadWrite(blackhole: Blackhole) = ComplexField {
+ val buffer = Buffer.complex(size / 2) { Complex(it.toDouble(), -it.toDouble()) }
+ var res = zero
(0 until size / 2).forEach {
- buffer[it]
+ res += buffer[it]
}
+
+ blackhole.consume(res)
}
private companion object {
private const val size = 100
+ private val reversedIndices = IntArray(size){it}.apply { reverse() }
}
}
diff --git a/benchmarks/src/jvmMain/kotlin/space/kscience/kmath/benchmarks/DotBenchmark.kt b/benchmarks/src/jvmMain/kotlin/space/kscience/kmath/benchmarks/DotBenchmark.kt
index 7ceecb5ab..7cbe83113 100644
--- a/benchmarks/src/jvmMain/kotlin/space/kscience/kmath/benchmarks/DotBenchmark.kt
+++ b/benchmarks/src/jvmMain/kotlin/space/kscience/kmath/benchmarks/DotBenchmark.kt
@@ -1,5 +1,5 @@
/*
- * Copyright 2018-2021 KMath contributors.
+ * Copyright 2018-2022 KMath contributors.
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
*/
@@ -13,7 +13,6 @@ import space.kscience.kmath.commons.linear.CMLinearSpace
import space.kscience.kmath.ejml.EjmlLinearSpaceDDRM
import space.kscience.kmath.linear.invoke
import space.kscience.kmath.linear.linearSpace
-import space.kscience.kmath.multik.multikAlgebra
import space.kscience.kmath.operations.DoubleField
import space.kscience.kmath.operations.invoke
import space.kscience.kmath.tensorflow.produceWithTF
@@ -78,7 +77,7 @@ internal class DotBenchmark {
}
@Benchmark
- fun multikDot(blackhole: Blackhole) = with(DoubleField.multikAlgebra) {
+ fun multikDot(blackhole: Blackhole) = with(multikAlgebra) {
blackhole.consume(matrix1 dot matrix2)
}
diff --git a/benchmarks/src/jvmMain/kotlin/space/kscience/kmath/benchmarks/ExpressionsInterpretersBenchmark.kt b/benchmarks/src/jvmMain/kotlin/space/kscience/kmath/benchmarks/ExpressionsInterpretersBenchmark.kt
index db3524e67..4df5f372f 100644
--- a/benchmarks/src/jvmMain/kotlin/space/kscience/kmath/benchmarks/ExpressionsInterpretersBenchmark.kt
+++ b/benchmarks/src/jvmMain/kotlin/space/kscience/kmath/benchmarks/ExpressionsInterpretersBenchmark.kt
@@ -1,5 +1,5 @@
/*
- * Copyright 2018-2021 KMath contributors.
+ * Copyright 2018-2022 KMath contributors.
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
*/
diff --git a/benchmarks/src/jvmMain/kotlin/space/kscience/kmath/benchmarks/IntegrationBenchmark.kt b/benchmarks/src/jvmMain/kotlin/space/kscience/kmath/benchmarks/IntegrationBenchmark.kt
new file mode 100644
index 000000000..6cc649fe9
--- /dev/null
+++ b/benchmarks/src/jvmMain/kotlin/space/kscience/kmath/benchmarks/IntegrationBenchmark.kt
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2018-2023 KMath contributors.
+ * Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
+ */
+
+package space.kscience.kmath.benchmarks
+
+import org.openjdk.jmh.annotations.Benchmark
+import org.openjdk.jmh.annotations.Scope
+import org.openjdk.jmh.annotations.State
+import org.openjdk.jmh.infra.Blackhole
+import space.kscience.kmath.complex.Complex
+import space.kscience.kmath.complex.algebra
+import space.kscience.kmath.integration.gaussIntegrator
+import space.kscience.kmath.integration.integrate
+import space.kscience.kmath.integration.value
+import space.kscience.kmath.operations.algebra
+
+
+@State(Scope.Benchmark)
+internal class IntegrationBenchmark {
+
+ @Benchmark
+ fun doubleIntegration(blackhole: Blackhole) {
+ val res = Double.algebra.gaussIntegrator.integrate(0.0..1.0, intervals = 1000) { x: Double ->
+ //sin(1 / x)
+ 1/x
+ }.value
+ blackhole.consume(res)
+ }
+
+ @Benchmark
+ fun complexIntegration(blackhole: Blackhole) = with(Complex.algebra) {
+ val res = gaussIntegrator.integrate(0.0..1.0, intervals = 1000) { x: Double ->
+// sin(1 / x) + i * cos(1 / x)
+ 1/x - i/x
+ }.value
+ blackhole.consume(res)
+ }
+}
\ No newline at end of file
diff --git a/benchmarks/src/jvmMain/kotlin/space/kscience/kmath/benchmarks/JafamaBenchmark.kt b/benchmarks/src/jvmMain/kotlin/space/kscience/kmath/benchmarks/JafamaBenchmark.kt
index 5d4eee7c0..041f7e92a 100644
--- a/benchmarks/src/jvmMain/kotlin/space/kscience/kmath/benchmarks/JafamaBenchmark.kt
+++ b/benchmarks/src/jvmMain/kotlin/space/kscience/kmath/benchmarks/JafamaBenchmark.kt
@@ -1,5 +1,5 @@
/*
- * Copyright 2018-2021 KMath contributors.
+ * Copyright 2018-2022 KMath contributors.
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
*/
diff --git a/benchmarks/src/jvmMain/kotlin/space/kscience/kmath/benchmarks/MatrixInverseBenchmark.kt b/benchmarks/src/jvmMain/kotlin/space/kscience/kmath/benchmarks/MatrixInverseBenchmark.kt
index 4ff687aac..f7aac8199 100644
--- a/benchmarks/src/jvmMain/kotlin/space/kscience/kmath/benchmarks/MatrixInverseBenchmark.kt
+++ b/benchmarks/src/jvmMain/kotlin/space/kscience/kmath/benchmarks/MatrixInverseBenchmark.kt
@@ -1,5 +1,5 @@
/*
- * Copyright 2018-2021 KMath contributors.
+ * Copyright 2018-2022 KMath contributors.
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
*/
diff --git a/benchmarks/src/jvmMain/kotlin/space/kscience/kmath/benchmarks/NDFieldBenchmark.kt b/benchmarks/src/jvmMain/kotlin/space/kscience/kmath/benchmarks/NDFieldBenchmark.kt
index 89673acd4..fb8d845e8 100644
--- a/benchmarks/src/jvmMain/kotlin/space/kscience/kmath/benchmarks/NDFieldBenchmark.kt
+++ b/benchmarks/src/jvmMain/kotlin/space/kscience/kmath/benchmarks/NDFieldBenchmark.kt
@@ -1,5 +1,5 @@
/*
- * Copyright 2018-2021 KMath contributors.
+ * Copyright 2018-2022 KMath contributors.
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
*/
@@ -13,11 +13,8 @@ import org.jetbrains.kotlinx.multik.api.Multik
import org.jetbrains.kotlinx.multik.api.ones
import org.jetbrains.kotlinx.multik.ndarray.data.DN
import org.jetbrains.kotlinx.multik.ndarray.data.DataType
-import space.kscience.kmath.multik.multikAlgebra
-import space.kscience.kmath.nd.BufferedFieldOpsND
-import space.kscience.kmath.nd.StructureND
-import space.kscience.kmath.nd.ndAlgebra
-import space.kscience.kmath.nd.one
+import space.kscience.kmath.UnsafeKMathAPI
+import space.kscience.kmath.nd.*
import space.kscience.kmath.nd4j.nd4j
import space.kscience.kmath.operations.DoubleField
import space.kscience.kmath.tensors.core.DoubleTensor
@@ -43,7 +40,7 @@ internal class NDFieldBenchmark {
}
@Benchmark
- fun multikAdd(blackhole: Blackhole) = with(multikField) {
+ fun multikAdd(blackhole: Blackhole) = with(multikAlgebra) {
var res: StructureND = one(shape)
repeat(n) { res += 1.0 }
blackhole.consume(res)
@@ -70,9 +67,10 @@ internal class NDFieldBenchmark {
blackhole.consume(res)
}
+ @OptIn(UnsafeKMathAPI::class)
@Benchmark
- fun multikInPlaceAdd(blackhole: Blackhole) = with(DoubleField.multikAlgebra) {
- val res = Multik.ones(shape, DataType.DoubleDataType).wrap()
+ fun multikInPlaceAdd(blackhole: Blackhole) = with(multikAlgebra) {
+ val res = Multik.ones(shape.asArray(), DataType.DoubleDataType).wrap()
repeat(n) { res += 1.0 }
blackhole.consume(res)
}
@@ -87,11 +85,10 @@ internal class NDFieldBenchmark {
private companion object {
private const val dim = 1000
private const val n = 100
- private val shape = intArrayOf(dim, dim)
+ private val shape = ShapeND(dim, dim)
private val specializedField = DoubleField.ndAlgebra
private val genericField = BufferedFieldOpsND(DoubleField)
private val nd4jField = DoubleField.nd4j
- private val multikField = DoubleField.multikAlgebra
private val viktorField = DoubleField.viktorAlgebra
}
}
diff --git a/benchmarks/src/jvmMain/kotlin/space/kscience/kmath/benchmarks/TensorAlgebraBenchmark.kt b/benchmarks/src/jvmMain/kotlin/space/kscience/kmath/benchmarks/TensorAlgebraBenchmark.kt
index 38e064e53..c4382374a 100644
--- a/benchmarks/src/jvmMain/kotlin/space/kscience/kmath/benchmarks/TensorAlgebraBenchmark.kt
+++ b/benchmarks/src/jvmMain/kotlin/space/kscience/kmath/benchmarks/TensorAlgebraBenchmark.kt
@@ -1,5 +1,5 @@
/*
- * Copyright 2018-2021 KMath contributors.
+ * Copyright 2018-2022 KMath contributors.
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
*/
@@ -13,6 +13,8 @@ import space.kscience.kmath.linear.linearSpace
import space.kscience.kmath.linear.matrix
import space.kscience.kmath.linear.symmetric
import space.kscience.kmath.operations.DoubleField
+import space.kscience.kmath.tensors.core.symEigJacobi
+import space.kscience.kmath.tensors.core.symEigSvd
import space.kscience.kmath.tensors.core.tensorAlgebra
import kotlin.random.Random
@@ -27,11 +29,11 @@ internal class TensorAlgebraBenchmark {
@Benchmark
fun tensorSymEigSvd(blackhole: Blackhole) = with(Double.tensorAlgebra) {
- blackhole.consume(matrix.symEigSvd(1e-10))
+ blackhole.consume(symEigSvd(matrix, 1e-10))
}
@Benchmark
fun tensorSymEigJacobi(blackhole: Blackhole) = with(Double.tensorAlgebra) {
- blackhole.consume(matrix.symEigJacobi(50, 1e-10))
+ blackhole.consume(symEigJacobi(matrix, 50, 1e-10))
}
}
\ No newline at end of file
diff --git a/benchmarks/src/jvmMain/kotlin/space/kscience/kmath/benchmarks/ViktorBenchmark.kt b/benchmarks/src/jvmMain/kotlin/space/kscience/kmath/benchmarks/ViktorBenchmark.kt
index 0e92a703e..90f3cb765 100644
--- a/benchmarks/src/jvmMain/kotlin/space/kscience/kmath/benchmarks/ViktorBenchmark.kt
+++ b/benchmarks/src/jvmMain/kotlin/space/kscience/kmath/benchmarks/ViktorBenchmark.kt
@@ -1,5 +1,5 @@
/*
- * Copyright 2018-2021 KMath contributors.
+ * Copyright 2018-2022 KMath contributors.
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
*/
@@ -10,7 +10,7 @@ import kotlinx.benchmark.Blackhole
import kotlinx.benchmark.Scope
import kotlinx.benchmark.State
import org.jetbrains.bio.viktor.F64Array
-import space.kscience.kmath.nd.Shape
+import space.kscience.kmath.nd.ShapeND
import space.kscience.kmath.nd.StructureND
import space.kscience.kmath.nd.ndAlgebra
import space.kscience.kmath.nd.one
@@ -49,7 +49,7 @@ internal class ViktorBenchmark {
private companion object {
private const val dim = 1000
private const val n = 100
- private val shape = Shape(dim, dim)
+ private val shape = ShapeND(dim, dim)
// automatically build context most suited for given type.
private val doubleField = DoubleField.ndAlgebra
diff --git a/benchmarks/src/jvmMain/kotlin/space/kscience/kmath/benchmarks/ViktorLogBenchmark.kt b/benchmarks/src/jvmMain/kotlin/space/kscience/kmath/benchmarks/ViktorLogBenchmark.kt
index 7bb0b876e..4ec4605ed 100644
--- a/benchmarks/src/jvmMain/kotlin/space/kscience/kmath/benchmarks/ViktorLogBenchmark.kt
+++ b/benchmarks/src/jvmMain/kotlin/space/kscience/kmath/benchmarks/ViktorLogBenchmark.kt
@@ -1,5 +1,5 @@
/*
- * Copyright 2018-2021 KMath contributors.
+ * Copyright 2018-2022 KMath contributors.
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
*/
@@ -10,7 +10,7 @@ import kotlinx.benchmark.Blackhole
import kotlinx.benchmark.Scope
import kotlinx.benchmark.State
import org.jetbrains.bio.viktor.F64Array
-import space.kscience.kmath.nd.Shape
+import space.kscience.kmath.nd.ShapeND
import space.kscience.kmath.nd.ndAlgebra
import space.kscience.kmath.nd.one
import space.kscience.kmath.operations.DoubleField
@@ -49,7 +49,7 @@ internal class ViktorLogBenchmark {
private companion object {
private const val dim = 1000
private const val n = 100
- private val shape = Shape(dim, dim)
+ private val shape = ShapeND(dim, dim)
// automatically build context most suited for given type.
private val doubleField = DoubleField.ndAlgebra
diff --git a/benchmarks/src/jvmMain/kotlin/space/kscience/kmath/benchmarks/globals.kt b/benchmarks/src/jvmMain/kotlin/space/kscience/kmath/benchmarks/globals.kt
new file mode 100644
index 000000000..f6d278d83
--- /dev/null
+++ b/benchmarks/src/jvmMain/kotlin/space/kscience/kmath/benchmarks/globals.kt
@@ -0,0 +1,11 @@
+/*
+ * Copyright 2018-2022 KMath contributors.
+ * Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
+ */
+
+package space.kscience.kmath.benchmarks
+
+import org.jetbrains.kotlinx.multik.default.DefaultEngine
+import space.kscience.kmath.multik.MultikDoubleAlgebra
+
+val multikAlgebra = MultikDoubleAlgebra(DefaultEngine())
\ No newline at end of file
diff --git a/build.gradle.kts b/build.gradle.kts
index d8c591799..aed79909c 100644
--- a/build.gradle.kts
+++ b/build.gradle.kts
@@ -1,6 +1,10 @@
+import space.kscience.gradle.isInDevelopment
+import space.kscience.gradle.useApache2Licence
+import space.kscience.gradle.useSPCTeam
+
plugins {
- id("ru.mipt.npm.gradle.project")
- id("org.jetbrains.kotlinx.kover") version "0.5.0"
+ id("space.kscience.gradle.project")
+ id("org.jetbrains.kotlinx.kover") version "0.6.0"
}
allprojects {
@@ -11,7 +15,7 @@ allprojects {
}
group = "space.kscience"
- version = "0.3.1-dev-1"
+ version = "0.3.1"
}
subprojects {
@@ -31,7 +35,7 @@ subprojects {
localDirectory.set(kotlinDir)
remoteUrl.set(
- java.net.URL("https://github.com/mipt-npm/kmath/tree/master/${this@subprojects.name}/$kotlinDirPath")
+ java.net.URL("https://github.com/SciProgCentre/kmath/tree/master/${this@subprojects.name}/$kotlinDirPath")
)
}
@@ -51,26 +55,26 @@ subprojects {
}
}
}
-
- plugins.withId("org.jetbrains.kotlin.multiplatform") {
- configure {
- sourceSets {
- val commonTest by getting {
- dependencies {
- implementation(projects.testUtils)
- }
- }
- }
- }
- }
}
readme.readmeTemplate = file("docs/templates/README-TEMPLATE.md")
ksciencePublish {
- github("kmath", addToRelease = false)
- space()
- sonatype()
+ pom("https://github.com/SciProgCentre/kmath") {
+ useApache2Licence()
+ useSPCTeam()
+ }
+ github("kmath", "SciProgCentre")
+ space(
+ if (isInDevelopment) {
+ "https://maven.pkg.jetbrains.space/spc/p/sci/dev"
+ } else {
+ "https://maven.pkg.jetbrains.space/spc/p/sci/maven"
+ }
+ )
+ sonatype("https://oss.sonatype.org")
}
-apiValidation.nonPublicMarkers.add("space.kscience.kmath.misc.UnstableKMathAPI")
+apiValidation.nonPublicMarkers.add("space.kscience.kmath.UnstableKMathAPI")
+
+val multikVersion by extra("0.2.0")
diff --git a/buildSrc/build.gradle.kts b/buildSrc/build.gradle.kts
index 20611e92d..734f60091 100644
--- a/buildSrc/build.gradle.kts
+++ b/buildSrc/build.gradle.kts
@@ -1,12 +1,8 @@
plugins {
- kotlin("jvm") version "1.7.0"
`kotlin-dsl`
`version-catalog`
- alias(npmlibs.plugins.kotlin.plugin.serialization)
}
-java.targetCompatibility = JavaVersion.VERSION_11
-
repositories {
mavenLocal()
maven("https://repo.kotlin.link")
@@ -14,20 +10,25 @@ repositories {
gradlePluginPortal()
}
-val toolsVersion = npmlibs.versions.tools.get()
-val kotlinVersion = npmlibs.versions.kotlin.asProvider().get()
-val benchmarksVersion = npmlibs.versions.kotlinx.benchmark.get()
+val toolsVersion = spclibs.versions.tools.get()
+val kotlinVersion = spclibs.versions.kotlin.asProvider().get()
+val benchmarksVersion = spclibs.versions.kotlinx.benchmark.get()
dependencies {
- api("ru.mipt.npm:gradle-tools:$toolsVersion")
- api(npmlibs.atomicfu.gradle)
+ api("space.kscience:gradle-tools:$toolsVersion")
//plugins form benchmarks
- api("org.jetbrains.kotlinx:kotlinx-benchmark-plugin:$benchmarksVersion")
- api("org.jetbrains.kotlin:kotlin-allopen:$kotlinVersion")
+ api("org.jetbrains.kotlinx:kotlinx-benchmark-plugin:0.4.7")
+ //api("org.jetbrains.kotlin:kotlin-allopen:$kotlinVersion")
//to be used inside build-script only
- implementation(npmlibs.kotlinx.serialization.json)
+ //implementation(spclibs.kotlinx.serialization.json)
+ implementation("com.fasterxml.jackson.module:jackson-module-kotlin:2.14.+")
}
-kotlin.sourceSets.all {
- languageSettings.optIn("kotlin.OptIn")
+kotlin{
+ jvmToolchain{
+ languageVersion.set(JavaLanguageVersion.of(11))
+ }
+ sourceSets.all {
+ languageSettings.optIn("kotlin.OptIn")
+ }
}
diff --git a/buildSrc/settings.gradle.kts b/buildSrc/settings.gradle.kts
index bce265510..e6b69b0b3 100644
--- a/buildSrc/settings.gradle.kts
+++ b/buildSrc/settings.gradle.kts
@@ -2,6 +2,7 @@
* Copyright 2018-2021 KMath contributors.
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
*/
+rootProject.name = "kmath"
enableFeaturePreview("TYPESAFE_PROJECT_ACCESSORS")
@@ -26,8 +27,8 @@ dependencyResolutionManagement {
}
versionCatalogs {
- create("npmlibs") {
- from("ru.mipt.npm:version-catalog:$toolsVersion")
+ create("spclibs") {
+ from("space.kscience:version-catalog:$toolsVersion")
}
}
}
diff --git a/buildSrc/src/main/kotlin/space/kscience/kmath/benchmarks/JmhReport.kt b/buildSrc/src/main/kotlin/space/kscience/kmath/benchmarks/JmhReport.kt
index eaa0f59d8..3a4fcdc79 100644
--- a/buildSrc/src/main/kotlin/space/kscience/kmath/benchmarks/JmhReport.kt
+++ b/buildSrc/src/main/kotlin/space/kscience/kmath/benchmarks/JmhReport.kt
@@ -1,13 +1,10 @@
/*
- * Copyright 2018-2021 KMath contributors.
+ * Copyright 2018-2022 KMath contributors.
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
*/
package space.kscience.kmath.benchmarks
-import kotlinx.serialization.Serializable
-
-@Serializable
data class JmhReport(
val jmhVersion: String,
val benchmark: String,
@@ -37,7 +34,6 @@ data class JmhReport(
val scoreUnit: String
}
- @Serializable
data class PrimaryMetric(
override val score: Double,
override val scoreError: Double,
@@ -48,7 +44,6 @@ data class JmhReport(
val rawData: List>? = null,
) : Metric
- @Serializable
data class SecondaryMetric(
override val score: Double,
override val scoreError: Double,
diff --git a/buildSrc/src/main/kotlin/space/kscience/kmath/benchmarks/addBenchmarkProperties.kt b/buildSrc/src/main/kotlin/space/kscience/kmath/benchmarks/addBenchmarkProperties.kt
index dc9327348..a3a475885 100644
--- a/buildSrc/src/main/kotlin/space/kscience/kmath/benchmarks/addBenchmarkProperties.kt
+++ b/buildSrc/src/main/kotlin/space/kscience/kmath/benchmarks/addBenchmarkProperties.kt
@@ -1,21 +1,22 @@
/*
- * Copyright 2018-2021 KMath contributors.
+ * Copyright 2018-2022 KMath contributors.
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
*/
package space.kscience.kmath.benchmarks
+import com.fasterxml.jackson.module.kotlin.jacksonObjectMapper
+import com.fasterxml.jackson.module.kotlin.readValue
import kotlinx.benchmark.gradle.BenchmarksExtension
-import kotlinx.serialization.decodeFromString
-import kotlinx.serialization.json.Json
import org.gradle.api.Project
-import ru.mipt.npm.gradle.KScienceReadmeExtension
+import space.kscience.gradle.KScienceReadmeExtension
import java.time.LocalDateTime
import java.time.ZoneId
import java.time.format.DateTimeFormatter
import java.time.format.DateTimeFormatterBuilder
import java.time.format.SignStyle
import java.time.temporal.ChronoField.*
+import java.util.*
private val ISO_DATE_TIME: DateTimeFormatter = DateTimeFormatterBuilder().run {
parseCaseInsensitive()
@@ -45,12 +46,14 @@ private val ISO_DATE_TIME: DateTimeFormatter = DateTimeFormatterBuilder().run {
private fun noun(number: Number, singular: String, plural: String) = if (number.toLong() == 1L) singular else plural
+private val jsonMapper = jacksonObjectMapper()
+
fun Project.addBenchmarkProperties() {
val benchmarksProject = this
rootProject.subprojects.forEach { p ->
p.extensions.findByType(KScienceReadmeExtension::class.java)?.run {
benchmarksProject.extensions.findByType(BenchmarksExtension::class.java)?.configurations?.forEach { cfg ->
- property("benchmark${cfg.name.capitalize()}") {
+ property("benchmark${cfg.name.replaceFirstChar { if (it.isLowerCase()) it.titlecase(Locale.getDefault()) else it.toString() }}") {
val launches = benchmarksProject.buildDir.resolve("reports/benchmarks/${cfg.name}")
val resDirectory = launches.listFiles()?.maxByOrNull {
@@ -60,8 +63,7 @@ fun Project.addBenchmarkProperties() {
if (resDirectory == null || !(resDirectory.resolve("jvm.json")).exists()) {
"> **Can't find appropriate benchmark data. Try generating readme files after running benchmarks**."
} else {
- val reports =
- Json.decodeFromString>(resDirectory.resolve("jvm.json").readText())
+ val reports: List = jsonMapper.readValue>(resDirectory.resolve("jvm.json"))
buildString {
appendLine("")
diff --git a/buildSrc/src/main/kotlin/space/kscience/kmath/ejml/codegen/ejmlCodegen.kt b/buildSrc/src/main/kotlin/space/kscience/kmath/ejml/codegen/ejmlCodegen.kt
index 7c23d8ea0..d973ebae4 100644
--- a/buildSrc/src/main/kotlin/space/kscience/kmath/ejml/codegen/ejmlCodegen.kt
+++ b/buildSrc/src/main/kotlin/space/kscience/kmath/ejml/codegen/ejmlCodegen.kt
@@ -1,5 +1,5 @@
/*
- * Copyright 2018-2021 KMath contributors.
+ * Copyright 2018-2022 KMath contributors.
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
*/
@@ -385,7 +385,7 @@ import org.ejml.sparse.csc.factory.LinearSolverFactory_DSCC
import org.ejml.sparse.csc.factory.LinearSolverFactory_FSCC
import space.kscience.kmath.linear.*
import space.kscience.kmath.linear.Matrix
-import space.kscience.kmath.misc.UnstableKMathAPI
+import space.kscience.kmath.UnstableKMathAPI
import space.kscience.kmath.nd.StructureFeature
import space.kscience.kmath.operations.DoubleField
import space.kscience.kmath.operations.FloatField
diff --git a/docs/images/KM.svg b/docs/images/KM.svg
index 6f80e4d08..55a4339b1 100644
--- a/docs/images/KM.svg
+++ b/docs/images/KM.svg
@@ -1,6 +1,6 @@
diff --git a/docs/images/KM_mono.svg b/docs/images/KM_mono.svg
index 8f8e470b2..f1194f887 100644
--- a/docs/images/KM_mono.svg
+++ b/docs/images/KM_mono.svg
@@ -1,6 +1,6 @@
diff --git a/docs/images/KMath.svg b/docs/images/KMath.svg
index f751d8eb9..509a184bc 100644
--- a/docs/images/KMath.svg
+++ b/docs/images/KMath.svg
@@ -1,6 +1,6 @@
diff --git a/docs/images/KMath_mono.svg b/docs/images/KMath_mono.svg
index 8ca6c5e84..e781979e2 100644
--- a/docs/images/KMath_mono.svg
+++ b/docs/images/KMath_mono.svg
@@ -1,6 +1,6 @@
diff --git a/docs/templates/ARTIFACT-TEMPLATE.md b/docs/templates/ARTIFACT-TEMPLATE.md
index 1bac2a8ff..a3e47e693 100644
--- a/docs/templates/ARTIFACT-TEMPLATE.md
+++ b/docs/templates/ARTIFACT-TEMPLATE.md
@@ -3,10 +3,12 @@
The Maven coordinates of this project are `${group}:${name}:${version}`.
**Gradle:**
-```gradle
+```groovy
repositories {
maven { url 'https://repo.kotlin.link' }
mavenCentral()
+ // development and snapshot versions
+ maven { url 'https://maven.pkg.jetbrains.space/spc/p/sci/dev' }
}
dependencies {
@@ -18,6 +20,8 @@ dependencies {
repositories {
maven("https://repo.kotlin.link")
mavenCentral()
+ // development and snapshot versions
+ maven("https://maven.pkg.jetbrains.space/spc/p/sci/dev")
}
dependencies {
diff --git a/docs/templates/README-TEMPLATE.md b/docs/templates/README-TEMPLATE.md
index 1633f3ff1..d7d5a806d 100644
--- a/docs/templates/README-TEMPLATE.md
+++ b/docs/templates/README-TEMPLATE.md
@@ -1,6 +1,6 @@
[](https://confluence.jetbrains.com/display/ALL/JetBrains+on+GitHub)
[](https://zenodo.org/badge/latestdoi/129486382)
-
+
[](https://search.maven.org/search?q=g:%22space.kscience%22)
[](https://maven.pkg.jetbrains.space/mipt-npm/p/sci/maven/space/kscience/)
@@ -11,7 +11,7 @@ analog to Python's NumPy library. Later we found that kotlin is much more flexib
architecture designs. In contrast to `numpy` and `scipy` it is modular and has a lightweight core. The `numpy`-like
experience could be achieved with [kmath-for-real](/kmath-for-real) extension module.
-[Documentation site (**WIP**)](https://mipt-npm.github.io/kmath/)
+[Documentation site (**WIP**)](https://SciProgCentre.github.io/kmath/)
## Publications and talks
diff --git a/examples/build.gradle.kts b/examples/build.gradle.kts
index aa5c1f47a..7f2abc852 100644
--- a/examples/build.gradle.kts
+++ b/examples/build.gradle.kts
@@ -1,3 +1,5 @@
+import org.jetbrains.kotlin.gradle.tasks.KotlinJvmCompile
+
plugins {
kotlin("jvm")
}
@@ -8,6 +10,8 @@ repositories {
maven("https://maven.pkg.jetbrains.space/kotlin/p/kotlin/kotlin-js-wrappers")
}
+val multikVersion: String by rootProject.extra
+
dependencies {
implementation(project(":kmath-ast"))
implementation(project(":kmath-kotlingrad"))
@@ -16,7 +20,6 @@ dependencies {
implementation(project(":kmath-commons"))
implementation(project(":kmath-complex"))
implementation(project(":kmath-functions"))
- implementation(project(":kmath-polynomial"))
implementation(project(":kmath-optimization"))
implementation(project(":kmath-stat"))
implementation(project(":kmath-viktor"))
@@ -30,7 +33,10 @@ dependencies {
implementation(project(":kmath-jafama"))
//multik
implementation(project(":kmath-multik"))
+ implementation("org.jetbrains.kotlinx:multik-default:$multikVersion")
+ //datetime
+ implementation("org.jetbrains.kotlinx:kotlinx-datetime:0.4.0")
implementation("org.nd4j:nd4j-native:1.0.0-beta7")
@@ -44,29 +50,28 @@ dependencies {
// } else
implementation("org.nd4j:nd4j-native-platform:1.0.0-beta7")
- // multik implementation
- implementation("org.jetbrains.kotlinx:multik-default:0.1.0")
-
implementation("org.slf4j:slf4j-simple:1.7.32")
// plotting
implementation("space.kscience:plotlykt-server:0.5.0")
}
-kotlin.sourceSets.all {
- with(languageSettings) {
- optIn("kotlin.contracts.ExperimentalContracts")
- optIn("kotlin.ExperimentalUnsignedTypes")
- optIn("space.kscience.kmath.misc.UnstableKMathAPI")
+kotlin {
+ jvmToolchain(11)
+ sourceSets.all {
+ languageSettings {
+ optIn("kotlin.contracts.ExperimentalContracts")
+ optIn("kotlin.ExperimentalUnsignedTypes")
+ optIn("space.kscience.kmath.UnstableKMathAPI")
+ }
}
}
-tasks.withType {
+tasks.withType {
kotlinOptions {
- jvmTarget = "11"
freeCompilerArgs = freeCompilerArgs + "-Xjvm-default=all" + "-Xopt-in=kotlin.RequiresOptIn" + "-Xlambdas=indy"
}
}
readme {
- maturity = ru.mipt.npm.gradle.Maturity.EXPERIMENTAL
+ maturity = space.kscience.gradle.Maturity.EXPERIMENTAL
}
diff --git a/examples/notebooks/Naive classifier.ipynb b/examples/notebooks/Naive classifier.ipynb
new file mode 100644
index 000000000..937f5b6c6
--- /dev/null
+++ b/examples/notebooks/Naive classifier.ipynb
@@ -0,0 +1,418 @@
+{
+ "cells": [
+ {
+ "cell_type": "code",
+ "source": [
+ "%use kmath(0.3.1-dev-5)\n",
+ "%use plotly(0.5.0)\n",
+ "@file:DependsOn(\"space.kscience:kmath-commons:0.3.1-dev-5\")"
+ ],
+ "execution_count": null,
+ "outputs": [],
+ "metadata": {
+ "datalore": {
+ "node_id": "lQbSB87rNAn9lV6poArVWW",
+ "type": "CODE",
+ "hide_input_from_viewers": false,
+ "hide_output_from_viewers": false
+ }
+ }
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "//Uncomment to work in Jupyter classic or DataLore\n",
+ "//Plotly.jupyter.notebook()"
+ ],
+ "execution_count": null,
+ "outputs": [],
+ "metadata": {
+ "datalore": {
+ "node_id": "0UP158hfccGgjQtHz0wAi6",
+ "type": "CODE",
+ "hide_input_from_viewers": false,
+ "hide_output_from_viewers": false
+ }
+ }
+ },
+ {
+ "cell_type": "markdown",
+ "source": [
+ "# The model\n",
+ "\n",
+ "Defining the input data format, the statistic abstraction and the statistic implementation based on a weighted sum of elements."
+ ],
+ "metadata": {
+ "collapsed": false
+ }
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "class XYValues(val xValues: DoubleArray, val yValues: DoubleArray) {\n",
+ " init {\n",
+ " require(xValues.size == yValues.size)\n",
+ " }\n",
+ "}\n",
+ "\n",
+ "fun interface XYStatistic {\n",
+ " operator fun invoke(values: XYValues): Double\n",
+ "}\n",
+ "\n",
+ "class ConvolutionalXYStatistic(val weights: DoubleArray) : XYStatistic {\n",
+ " override fun invoke(values: XYValues): Double {\n",
+ " require(weights.size == values.yValues.size)\n",
+ " val norm = values.yValues.sum()\n",
+ " return values.yValues.zip(weights) { value, weight -> value * weight }.sum()/norm\n",
+ " }\n",
+ "}"
+ ],
+ "execution_count": null,
+ "outputs": [],
+ "metadata": {
+ "datalore": {
+ "node_id": "Zhgz1Ui91PWz0meJiQpHol",
+ "type": "CODE",
+ "hide_input_from_viewers": false,
+ "hide_output_from_viewers": false
+ }
+ }
+ },
+ {
+ "cell_type": "markdown",
+ "source": [
+ "# Generator\n",
+ "Generate sample data for parabolas and hyperbolas"
+ ],
+ "metadata": {
+ "collapsed": false
+ }
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "outputs": [],
+ "source": [
+ "fun generateParabolas(xValues: DoubleArray, a: Double, b: Double, c: Double): XYValues {\n",
+ " val yValues = xValues.map { x -> a * x * x + b * x + c }.toDoubleArray()\n",
+ " return XYValues(xValues, yValues)\n",
+ "}\n",
+ "\n",
+ "fun generateHyperbols(xValues: DoubleArray, gamma: Double, x0: Double, y0: Double): XYValues {\n",
+ " val yValues = xValues.map { x -> y0 + gamma / (x - x0) }.toDoubleArray()\n",
+ " return XYValues(xValues, yValues)\n",
+ "}"
+ ],
+ "metadata": {
+ "collapsed": false
+ }
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "val xValues = (1.0..10.0).step(1.0).toDoubleArray()\n",
+ "\n",
+ "val xy = generateHyperbols(xValues, 1.0, 0.0, 0.0)\n",
+ "\n",
+ "Plotly.plot {\n",
+ " scatter {\n",
+ " this.x.doubles = xValues\n",
+ " this.y.doubles = xy.yValues\n",
+ " }\n",
+ "}"
+ ],
+ "execution_count": null,
+ "outputs": [],
+ "metadata": {
+ "datalore": {
+ "node_id": "ZE2atNvFzQsCvpAF8KK4ch",
+ "type": "CODE",
+ "hide_input_from_viewers": false,
+ "hide_output_from_viewers": false
+ }
+ }
+ },
+ {
+ "cell_type": "markdown",
+ "source": [
+ "Create a default statistic with uniform weights"
+ ],
+ "metadata": {
+ "collapsed": false
+ }
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "val statistic = ConvolutionalXYStatistic(DoubleArray(xValues.size){1.0})\n",
+ "statistic(xy)"
+ ],
+ "execution_count": null,
+ "outputs": [],
+ "metadata": {
+ "datalore": {
+ "node_id": "EA5HaydTddRKYrtAUwd29h",
+ "type": "CODE",
+ "hide_input_from_viewers": false,
+ "hide_output_from_viewers": false
+ }
+ }
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "import kotlin.random.Random\n",
+ "\n",
+ "val random = Random(1288)\n",
+ "\n",
+ "val parabolas = buildList{\n",
+ " repeat(500){\n",
+ " add(\n",
+ " generateParabolas(\n",
+ " xValues, \n",
+ " random.nextDouble(), \n",
+ " random.nextDouble(), \n",
+ " random.nextDouble()\n",
+ " )\n",
+ " )\n",
+ " }\n",
+ "}\n",
+ "\n",
+ "val hyperbolas: List = buildList{\n",
+ " repeat(500){\n",
+ " add(\n",
+ " generateHyperbols(\n",
+ " xValues, \n",
+ " random.nextDouble()*10, \n",
+ " random.nextDouble(), \n",
+ " random.nextDouble()\n",
+ " )\n",
+ " )\n",
+ " }\n",
+ "}"
+ ],
+ "execution_count": null,
+ "outputs": [],
+ "metadata": {
+ "datalore": {
+ "node_id": "t5t6IYmD7Q1ykeo9uijFfQ",
+ "type": "CODE",
+ "hide_input_from_viewers": false,
+ "hide_output_from_viewers": false
+ }
+ }
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "Plotly.plot { \n",
+ " scatter { \n",
+ " x.doubles = xValues\n",
+ " y.doubles = parabolas[257].yValues\n",
+ " }\n",
+ " scatter { \n",
+ " x.doubles = xValues\n",
+ " y.doubles = hyperbolas[252].yValues\n",
+ " }\n",
+ " }"
+ ],
+ "execution_count": null,
+ "outputs": [],
+ "metadata": {
+ "datalore": {
+ "node_id": "oXB8lmju7YVYjMRXITKnhO",
+ "type": "CODE",
+ "hide_input_from_viewers": false,
+ "hide_output_from_viewers": false
+ }
+ }
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "Plotly.plot { \n",
+ " histogram { \n",
+ " name = \"parabolae\"\n",
+ " x.numbers = parabolas.map { statistic(it) }\n",
+ " }\n",
+ " histogram { \n",
+ " name = \"hyperbolae\"\n",
+ " x.numbers = hyperbolas.map { statistic(it) }\n",
+ " }\n",
+ "}"
+ ],
+ "execution_count": null,
+ "outputs": [],
+ "metadata": {
+ "datalore": {
+ "node_id": "8EIIecUZrt2NNrOkhxG5P0",
+ "type": "CODE",
+ "hide_input_from_viewers": false,
+ "hide_output_from_viewers": false
+ }
+ }
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "val lossFunction: (XYStatistic) -> Double = { statistic ->\n",
+ " - abs(parabolas.sumOf { statistic(it) } - hyperbolas.sumOf { statistic(it) })\n",
+ "}"
+ ],
+ "execution_count": null,
+ "outputs": [],
+ "metadata": {
+ "datalore": {
+ "node_id": "h7UmglJW5zXkAfKHK40oIL",
+ "type": "CODE",
+ "hide_input_from_viewers": false,
+ "hide_output_from_viewers": false
+ }
+ }
+ },
+ {
+ "cell_type": "markdown",
+ "source": [
+ "Using commons-math optimizer to optimize weights"
+ ],
+ "metadata": {
+ "collapsed": false
+ }
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "import org.apache.commons.math3.optim.*\n",
+ "import org.apache.commons.math3.optim.nonlinear.scalar.*\n",
+ "import org.apache.commons.math3.optim.nonlinear.scalar.noderiv.*\n",
+ "\n",
+ "val optimizer = SimplexOptimizer(1e-1, Double.MAX_VALUE)\n",
+ "\n",
+ "val result = optimizer.optimize(\n",
+ " ObjectiveFunction { point ->\n",
+ " lossFunction(ConvolutionalXYStatistic(point))\n",
+ " },\n",
+ " NelderMeadSimplex(xValues.size),\n",
+ " InitialGuess(DoubleArray(xValues.size){ 1.0 }),\n",
+ " GoalType.MINIMIZE,\n",
+ " MaxEval(100000)\n",
+ ")"
+ ],
+ "execution_count": null,
+ "outputs": [],
+ "metadata": {
+ "datalore": {
+ "node_id": "0EG3K4aCUciMlgGQKPvJ57",
+ "type": "CODE",
+ "hide_input_from_viewers": false,
+ "hide_output_from_viewers": false
+ }
+ }
+ },
+ {
+ "cell_type": "markdown",
+ "source": [
+ "Print resulting weights of optimization"
+ ],
+ "metadata": {
+ "collapsed": false
+ }
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "result.point"
+ ],
+ "execution_count": null,
+ "outputs": [],
+ "metadata": {
+ "datalore": {
+ "node_id": "LelUlY0ZSlJEO9yC6SLk5B",
+ "type": "CODE",
+ "hide_input_from_viewers": false,
+ "hide_output_from_viewers": false
+ }
+ }
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "Plotly.plot { \n",
+ " scatter { \n",
+ " y.doubles = result.point\n",
+ " }\n",
+ "}"
+ ],
+ "execution_count": null,
+ "outputs": [],
+ "metadata": {
+ "datalore": {
+ "node_id": "AuFOq5t9KpOIkGrOLsVXNf",
+ "type": "CODE",
+ "hide_input_from_viewers": false,
+ "hide_output_from_viewers": false
+ }
+ }
+ },
+ {
+ "cell_type": "markdown",
+ "source": [
+ "# The resulting statistic distribution"
+ ],
+ "metadata": {
+ "collapsed": false
+ }
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "val resultStatistic = ConvolutionalXYStatistic(result.point)\n",
+ "Plotly.plot { \n",
+ " histogram { \n",
+ " name = \"parabolae\"\n",
+ " x.numbers = parabolas.map { resultStatistic(it) }\n",
+ " }\n",
+ " histogram { \n",
+ " name = \"hyperbolae\"\n",
+ " x.numbers = hyperbolas.map { resultStatistic(it) }\n",
+ " }\n",
+ "}"
+ ],
+ "execution_count": null,
+ "outputs": [],
+ "metadata": {
+ "datalore": {
+ "node_id": "zvmq42DRdM5mZ3SpzviHwI",
+ "type": "CODE",
+ "hide_input_from_viewers": false,
+ "hide_output_from_viewers": false
+ }
+ }
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "outputs": [],
+ "source": [],
+ "metadata": {
+ "collapsed": false
+ }
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "Kotlin",
+ "language": "kotlin",
+ "name": "kotlin"
+ },
+ "datalore": {
+ "version": 1,
+ "computation_mode": "JUPYTER",
+ "package_manager": "pip",
+ "base_environment": "default",
+ "packages": []
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 4
+}
diff --git a/examples/src/main/kotlin/space/kscience/kmath/ast/astRendering.kt b/examples/src/main/kotlin/space/kscience/kmath/ast/astRendering.kt
index c4f263f97..e85bab8d8 100644
--- a/examples/src/main/kotlin/space/kscience/kmath/ast/astRendering.kt
+++ b/examples/src/main/kotlin/space/kscience/kmath/ast/astRendering.kt
@@ -1,5 +1,5 @@
/*
- * Copyright 2018-2021 KMath contributors.
+ * Copyright 2018-2022 KMath contributors.
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
*/
diff --git a/examples/src/main/kotlin/space/kscience/kmath/ast/expressions.kt b/examples/src/main/kotlin/space/kscience/kmath/ast/expressions.kt
index 907f1bbe4..cacb6683e 100644
--- a/examples/src/main/kotlin/space/kscience/kmath/ast/expressions.kt
+++ b/examples/src/main/kotlin/space/kscience/kmath/ast/expressions.kt
@@ -1,5 +1,5 @@
/*
- * Copyright 2018-2021 KMath contributors.
+ * Copyright 2018-2022 KMath contributors.
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
*/
diff --git a/examples/src/main/kotlin/space/kscience/kmath/ast/kotlingradSupport.kt b/examples/src/main/kotlin/space/kscience/kmath/ast/kotlingradSupport.kt
index dec3bfb81..b443e639d 100644
--- a/examples/src/main/kotlin/space/kscience/kmath/ast/kotlingradSupport.kt
+++ b/examples/src/main/kotlin/space/kscience/kmath/ast/kotlingradSupport.kt
@@ -1,5 +1,5 @@
/*
- * Copyright 2018-2021 KMath contributors.
+ * Copyright 2018-2022 KMath contributors.
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
*/
diff --git a/examples/src/main/kotlin/space/kscience/kmath/ast/symjaSupport.kt b/examples/src/main/kotlin/space/kscience/kmath/ast/symjaSupport.kt
index 7e09faeff..92ee1781b 100644
--- a/examples/src/main/kotlin/space/kscience/kmath/ast/symjaSupport.kt
+++ b/examples/src/main/kotlin/space/kscience/kmath/ast/symjaSupport.kt
@@ -1,5 +1,5 @@
/*
- * Copyright 2018-2021 KMath contributors.
+ * Copyright 2018-2022 KMath contributors.
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
*/
diff --git a/examples/src/main/kotlin/space/kscience/kmath/expressions/autodiff.kt b/examples/src/main/kotlin/space/kscience/kmath/expressions/autodiff.kt
new file mode 100644
index 000000000..863d6be7a
--- /dev/null
+++ b/examples/src/main/kotlin/space/kscience/kmath/expressions/autodiff.kt
@@ -0,0 +1,91 @@
+/*
+ * Copyright 2018-2023 KMath contributors.
+ * Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
+ */
+
+package space.kscience.kmath.expressions
+
+import space.kscience.kmath.UnstableKMathAPI
+// Only kmath-core is needed.
+
+// Let's declare some variables
+val x by symbol
+val y by symbol
+val z by symbol
+
+@OptIn(UnstableKMathAPI::class)
+fun main() {
+ // Let's define some random expression.
+ val someExpression = Double.autodiff.differentiate {
+ // We bind variables `x` and `y` to the builder scope,
+ val x = bindSymbol(x)
+ val y = bindSymbol(y)
+
+ // Then we use the bindings to define expression `xy + x + y - 1`
+ x * y + x + y - 1
+ }
+
+ // Then we can evaluate it at any point ((-1, -1) in the case):
+ println(someExpression(x to -1.0, y to -1.0))
+ // >>> -2.0
+
+ // We can also construct its partial derivatives:
+ val dxExpression = someExpression.derivative(x) // ∂/∂x. Must be `y+1`
+ val dyExpression = someExpression.derivative(y) // ∂/∂y. Must be `x+1`
+ val dxdxExpression = someExpression.derivative(x, x) // ∂^2/∂x^2. Must be `0`
+
+ // We can evaluate them as well
+ println(dxExpression(x to 57.0, y to 6.0))
+ // >>> 7.0
+ println(dyExpression(x to -1.0, y to 179.0))
+ // >>> 0.0
+ println(dxdxExpression(x to 239.0, y to 30.0))
+ // >>> 0.0
+
+ // You can also provide extra arguments that obviously won't affect the result:
+ println(dxExpression(x to 57.0, y to 6.0, z to 42.0))
+ // >>> 7.0
+ println(dyExpression(x to -1.0, y to 179.0, z to 0.0))
+ // >>> 0.0
+ println(dxdxExpression(x to 239.0, y to 30.0, z to 100_000.0))
+ // >>> 0.0
+
+ // But in case you forgot to specify bound symbol's value, exception is thrown:
+ println( runCatching { someExpression(z to 4.0) } )
+ // >>> Failure(java.lang.IllegalStateException: Symbol 'x' is not supported in ...)
+
+ // The reason is that the expression is evaluated lazily,
+ // and each `bindSymbol` operation actually substitutes the provided symbol with the corresponding value.
+
+ // For example, let there be an expression
+ val simpleExpression = Double.autodiff.differentiate {
+ val x = bindSymbol(x)
+ x pow 2
+ }
+ // When you evaluate it via
+ simpleExpression(x to 1.0, y to 57.0, z to 179.0)
+ // lambda above has the context of map `{x: 1.0, y: 57.0, z: 179.0}`.
+ // When x is bound, you can think of it as substitution `x -> 1.0`.
+ // Other values are unused which does not make any problem to us.
+ // But in the case the corresponding value is not provided,
+ // we cannot bind the variable. Thus, exception is thrown.
+
+ // There is also a function `bindSymbolOrNull` that fixes the problem:
+ val fixedExpression = Double.autodiff.differentiate {
+ val x = bindSymbolOrNull(x) ?: const(8.0)
+ x pow -2
+ }
+ println(fixedExpression())
+ // >>> 0.015625
+ // It works!
+
+ // The expression provides a bunch of operations:
+ // 1. Constant bindings (via `const` and `number`).
+ // 2. Variable bindings (via `bindVariable`, `bindVariableOrNull`).
+ // 3. Arithmetic operations (via `+`, `-`, `*`, and `-`).
+ // 4. Exponentiation (via `pow` or `power`).
+ // 5. `exp` and `ln`.
+ // 6. Trigonometrical functions (`sin`, `cos`, `tan`, `cot`).
+ // 7. Inverse trigonometrical functions (`asin`, `acos`, `atan`, `acot`).
+ // 8. Hyperbolic functions and inverse hyperbolic functions.
+}
diff --git a/examples/src/main/kotlin/space/kscience/kmath/fit/chiSquared.kt b/examples/src/main/kotlin/space/kscience/kmath/fit/chiSquared.kt
index 63e57bd8c..258ed0c84 100644
--- a/examples/src/main/kotlin/space/kscience/kmath/fit/chiSquared.kt
+++ b/examples/src/main/kotlin/space/kscience/kmath/fit/chiSquared.kt
@@ -1,5 +1,5 @@
/*
- * Copyright 2018-2021 KMath contributors.
+ * Copyright 2018-2022 KMath contributors.
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
*/
@@ -7,10 +7,9 @@ package space.kscience.kmath.fit
import kotlinx.html.br
import kotlinx.html.h3
-import space.kscience.kmath.commons.expressions.DSProcessor
import space.kscience.kmath.commons.optimization.CMOptimizer
import space.kscience.kmath.distributions.NormalDistribution
-import space.kscience.kmath.expressions.chiSquaredExpression
+import space.kscience.kmath.expressions.autodiff
import space.kscience.kmath.expressions.symbol
import space.kscience.kmath.operations.asIterable
import space.kscience.kmath.operations.toList
@@ -18,10 +17,11 @@ import space.kscience.kmath.optimization.FunctionOptimizationTarget
import space.kscience.kmath.optimization.optimizeWith
import space.kscience.kmath.optimization.resultPoint
import space.kscience.kmath.optimization.resultValue
+import space.kscience.kmath.random.RandomGenerator
import space.kscience.kmath.real.DoubleVector
import space.kscience.kmath.real.map
import space.kscience.kmath.real.step
-import space.kscience.kmath.stat.RandomGenerator
+import space.kscience.kmath.stat.chiSquaredExpression
import space.kscience.plotly.*
import space.kscience.plotly.models.ScatterMode
import space.kscience.plotly.models.TraceValues
@@ -67,7 +67,7 @@ suspend fun main() {
val yErr = y.map { sqrt(it) }//RealVector.same(x.size, sigma)
// compute differentiable chi^2 sum for given model ax^2 + bx + c
- val chi2 = DSProcessor.chiSquaredExpression(x, y, yErr) { arg ->
+ val chi2 = Double.autodiff.chiSquaredExpression(x, y, yErr) { arg ->
//bind variables to autodiff context
val a = bindSymbol(a)
val b = bindSymbol(b)
diff --git a/examples/src/main/kotlin/space/kscience/kmath/fit/qowFit.kt b/examples/src/main/kotlin/space/kscience/kmath/fit/qowFit.kt
index d52976671..fe7f48b72 100644
--- a/examples/src/main/kotlin/space/kscience/kmath/fit/qowFit.kt
+++ b/examples/src/main/kotlin/space/kscience/kmath/fit/qowFit.kt
@@ -1,5 +1,5 @@
/*
- * Copyright 2018-2021 KMath contributors.
+ * Copyright 2018-2022 KMath contributors.
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
*/
@@ -7,21 +7,18 @@ package space.kscience.kmath.fit
import kotlinx.html.br
import kotlinx.html.h3
-import space.kscience.kmath.commons.expressions.DSProcessor
import space.kscience.kmath.data.XYErrorColumnarData
import space.kscience.kmath.distributions.NormalDistribution
import space.kscience.kmath.expressions.Symbol
+import space.kscience.kmath.expressions.autodiff
import space.kscience.kmath.expressions.binding
import space.kscience.kmath.expressions.symbol
import space.kscience.kmath.operations.asIterable
import space.kscience.kmath.operations.toList
-import space.kscience.kmath.optimization.QowOptimizer
-import space.kscience.kmath.optimization.chiSquaredOrNull
-import space.kscience.kmath.optimization.fitWith
-import space.kscience.kmath.optimization.resultPoint
+import space.kscience.kmath.optimization.*
+import space.kscience.kmath.random.RandomGenerator
import space.kscience.kmath.real.map
import space.kscience.kmath.real.step
-import space.kscience.kmath.stat.RandomGenerator
import space.kscience.plotly.*
import space.kscience.plotly.models.ScatterMode
import kotlin.math.abs
@@ -32,6 +29,8 @@ import kotlin.math.sqrt
private val a by symbol
private val b by symbol
private val c by symbol
+private val d by symbol
+private val e by symbol
/**
@@ -63,17 +62,23 @@ suspend fun main() {
val result = XYErrorColumnarData.of(x, y, yErr).fitWith(
QowOptimizer,
- DSProcessor,
- mapOf(a to 0.9, b to 1.2, c to 2.0)
+ Double.autodiff,
+ mapOf(a to 0.9, b to 1.2, c to 2.0, e to 1.0, d to 1.0, e to 0.0),
+ OptimizationParameters(a, b, c, d)
) { arg ->
//bind variables to autodiff context
val a by binding
val b by binding
//Include default value for c if it is not provided as a parameter
val c = bindSymbolOrNull(c) ?: one
- a * arg.pow(2) + b * arg + c
+ val d by binding
+ val e by binding
+
+ a * arg.pow(2) + b * arg + c + d * arg.pow(3) + e / arg
}
+ println("Resulting chi2/dof: ${result.chiSquaredOrNull}/${result.dof}")
+
//display a page with plot and numerical results
val page = Plotly.page {
plot {
@@ -89,7 +94,7 @@ suspend fun main() {
scatter {
mode = ScatterMode.lines
x(x)
- y(x.map { result.model(result.resultPoint + (Symbol.x to it)) })
+ y(x.map { result.model(result.startPoint + result.resultPoint + (Symbol.x to it)) })
name = "fit"
}
}
@@ -98,7 +103,7 @@ suspend fun main() {
+"Fit result: ${result.resultPoint}"
}
h3 {
- +"Chi2/dof = ${result.chiSquaredOrNull!! / (x.size - 3)}"
+ +"Chi2/dof = ${result.chiSquaredOrNull!! / result.dof}"
}
}
diff --git a/examples/src/main/kotlin/space/kscience/kmath/functions/integrate.kt b/examples/src/main/kotlin/space/kscience/kmath/functions/integrate.kt
index 59eaba5bd..e8534d002 100644
--- a/examples/src/main/kotlin/space/kscience/kmath/functions/integrate.kt
+++ b/examples/src/main/kotlin/space/kscience/kmath/functions/integrate.kt
@@ -1,10 +1,15 @@
/*
- * Copyright 2018-2021 KMath contributors.
+ * Copyright 2018-2022 KMath contributors.
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
*/
package space.kscience.kmath.functions
+import space.kscience.kmath.complex.Complex
+import space.kscience.kmath.complex.ComplexField
+import space.kscience.kmath.complex.ComplexField.div
+import space.kscience.kmath.complex.ComplexField.minus
+import space.kscience.kmath.complex.algebra
import space.kscience.kmath.integration.gaussIntegrator
import space.kscience.kmath.integration.integrate
import space.kscience.kmath.integration.value
@@ -20,4 +25,12 @@ fun main() {
//the value is nullable because in some cases the integration could not succeed
println(result.value)
+
+
+ repeat(100000) {
+ Complex.algebra.gaussIntegrator.integrate(0.0..1.0, intervals = 1000) { x: Double ->
+// sin(1 / x) + i * cos(1 / x)
+ 1 / x - ComplexField.i / x
+ }.value
+ }
}
\ No newline at end of file
diff --git a/examples/src/main/kotlin/space/kscience/kmath/functions/interpolate.kt b/examples/src/main/kotlin/space/kscience/kmath/functions/interpolate.kt
index 8dbc7b7a4..b4ce6ad2d 100644
--- a/examples/src/main/kotlin/space/kscience/kmath/functions/interpolate.kt
+++ b/examples/src/main/kotlin/space/kscience/kmath/functions/interpolate.kt
@@ -1,5 +1,5 @@
/*
- * Copyright 2018-2021 KMath contributors.
+ * Copyright 2018-2022 KMath contributors.
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
*/
diff --git a/examples/src/main/kotlin/space/kscience/kmath/functions/interpolateSquare.kt b/examples/src/main/kotlin/space/kscience/kmath/functions/interpolateSquare.kt
index a50df0931..7bcd96990 100644
--- a/examples/src/main/kotlin/space/kscience/kmath/functions/interpolateSquare.kt
+++ b/examples/src/main/kotlin/space/kscience/kmath/functions/interpolateSquare.kt
@@ -1,5 +1,5 @@
/*
- * Copyright 2018-2021 KMath contributors.
+ * Copyright 2018-2022 KMath contributors.
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
*/
diff --git a/examples/src/main/kotlin/space/kscience/kmath/functions/matrixIntegration.kt b/examples/src/main/kotlin/space/kscience/kmath/functions/matrixIntegration.kt
index 4f99aeb47..baba2eb28 100644
--- a/examples/src/main/kotlin/space/kscience/kmath/functions/matrixIntegration.kt
+++ b/examples/src/main/kotlin/space/kscience/kmath/functions/matrixIntegration.kt
@@ -1,5 +1,5 @@
/*
- * Copyright 2018-2021 KMath contributors.
+ * Copyright 2018-2022 KMath contributors.
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
*/
@@ -12,23 +12,21 @@ import space.kscience.kmath.nd.StructureND
import space.kscience.kmath.nd.structureND
import space.kscience.kmath.nd.withNdAlgebra
import space.kscience.kmath.operations.algebra
-import space.kscience.kmath.operations.invoke
+import kotlin.math.pow
-fun main(): Unit = Double.algebra {
- withNdAlgebra(2, 2) {
+fun main(): Unit = Double.algebra.withNdAlgebra(2, 2) {
- //Produce a diagonal StructureND
- fun diagonal(v: Double) = structureND { (i, j) ->
- if (i == j) v else 0.0
- }
+ //Produce a diagonal StructureND
+ fun diagonal(v: Double) = structureND { (i, j) ->
+ if (i == j) v else 0.0
+ }
- //Define a function in a nd space
- val function: (Double) -> StructureND = { x: Double -> 3 * x.pow(2) + 2 * diagonal(x) + 1 }
+ //Define a function in a nd space
+ val function: (Double) -> StructureND = { x: Double -> 3 * x.pow(2) + 2 * diagonal(x) + 1 }
- //get the result of the integration
- val result = gaussIntegrator.integrate(0.0..10.0, function = function)
+ //get the result of the integration
+ val result = gaussIntegrator.integrate(0.0..10.0, function = function)
- //the value is nullable because in some cases the integration could not succeed
- println(result.value)
- }
-}
\ No newline at end of file
+ //the value is nullable because in some cases the integration could not succeed
+ println(result.value)
+}
diff --git a/examples/src/main/kotlin/space/kscience/kmath/functions/polynomials.kt b/examples/src/main/kotlin/space/kscience/kmath/functions/polynomials.kt
deleted file mode 100644
index c65ca589d..000000000
--- a/examples/src/main/kotlin/space/kscience/kmath/functions/polynomials.kt
+++ /dev/null
@@ -1,399 +0,0 @@
-/*
- * Copyright 2018-2021 KMath contributors.
- * Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
- */
-
-@file:Suppress("LocalVariableName")
-
-package space.kscience.kmath.functions
-
-import space.kscience.kmath.expressions.Symbol
-import space.kscience.kmath.expressions.symbol
-import space.kscience.kmath.operations.algebra
-import space.kscience.kmath.operations.invoke
-
-
-/**
- * Shows [ListPolynomial]s' and [ListRationalFunction]s' capabilities.
- */
-fun listPolynomialsExample() {
- // [ListPolynomial] is a representation of a univariate polynomial as a list of coefficients from the least term to
- // the greatest term. For example,
- val polynomial1: ListPolynomial = ListPolynomial(listOf(2, -3, 1))
- // represents polynomial 2 + (-3) x + x^2
-
- // There are also shortcut fabrics:
- val polynomial2: ListPolynomial = ListPolynomial(2, -3, 1)
- println(polynomial1 == polynomial2) // true
- // and even
- val polynomial3: ListPolynomial = 57.asListPolynomial()
- val polynomial4: ListPolynomial = ListPolynomial(listOf(57))
- println(polynomial3 == polynomial4) // true
-
- val polynomial5: ListPolynomial = ListPolynomial(3, -1)
- // For every ring there can be provided a polynomial ring:
- Int.algebra.listPolynomialSpace {
- println(-polynomial5 == ListPolynomial(-3, 1)) // true
- println(polynomial1 + polynomial5 == ListPolynomial(5, -4, 1)) // true
- println(polynomial1 - polynomial5 == ListPolynomial(-1, -2, 1)) // true
- println(polynomial1 * polynomial5 == ListPolynomial(6, -11, 6, -1)) // true
- }
- // You can even write
- val x: ListPolynomial = ListPolynomial(0.0, 1.0)
- val polynomial6: ListPolynomial = ListPolynomial(2.0, -3.0, 1.0)
- Double.algebra.listPolynomialSpace {
- println(2 - 3 * x + x * x == polynomial6)
- println(2.0 - 3.0 * x + x * x == polynomial6)
- }
-
- // Also there are some utilities for polynomials:
- println(polynomial1.substitute(Int.algebra, 1) == 0) // true, because 2 + (-3) * 1 + 1^2 = 0
- println(polynomial1.substitute(Int.algebra, polynomial5) == polynomial1) // true, because 2 + (-3) * (3-x) + (3-x)^2 = 2 - 3x + x^2
- println(polynomial1.derivative(Int.algebra) == ListPolynomial(-3, 2)) // true, (2 - 3x + x^2)' = -3 + 2x
- println(polynomial1.nthDerivative(Int.algebra, 2) == 2.asListPolynomial()) // true, (2 - 3x + x^2)'' = 2
-
- // Lastly, there are rational functions and some other utilities:
- Double.algebra.listRationalFunctionSpace {
- val rationalFunction1: ListRationalFunction = ListRationalFunction(listOf(2.0, -3.0, 1.0), listOf(3.0, -1.0))
- // It's just (2 - 3x + x^2)/(3 - x)
-
- val rationalFunction2 : ListRationalFunction = ListRationalFunction(listOf(5.0, -4.0, 1.0), listOf(3.0, -1.0))
- // It's just (5 - 4x + x^2)/(3 - x)
-
- println(rationalFunction1 + 1 == rationalFunction2)
- }
-}
-
-/**
- * Shows [NumberedPolynomial]s' and [NumberedRationalFunction]s' capabilities.
- */
-fun numberedPolynomialsExample() {
- // Consider polynomial
- // 3 + 5 x_2 - 7 x_1^2 x_3
- // Consider, for example, its term -7 x_1^2 x_3. -7 is a coefficient of the term, whereas (2, 0, 1, 0, 0, ...) is
- // description of degrees of variables x_1, x_2, ... in the term. Such description with removed leading zeros
- // [2, 0, 1] is called "signature" of the term -7 x_1^2 x_3.
-
- val polynomial1: NumberedPolynomial
- with(Int.algebra) {
- // [NumberedPolynomial] is a representation of a multivariate polynomial, that stores terms in a map with terms'
- // signatures as the map's keys and terms' coefficients as corresponding values. For example,
- polynomial1 = NumberedPolynomial(
- mapOf(
- listOf() to 3,
- listOf(0u, 1u) to 5,
- listOf(2u, 0u, 1u) to -7,
- )
- )
- // represents polynomial 3 + 5 x_2 - 7 x_1^2 x_3
-
- // This `NumberedPolynomial` function needs context of either ring of constant (as `Int.algebra` in this example)
- // or space of NumberedPolynomials over it. To understand why it is like this see documentations of functions
- // NumberedPolynomial and NumberedPolynomialWithoutCheck
-
- // There are also shortcut fabrics:
- val polynomial2: NumberedPolynomial = NumberedPolynomial(
- listOf() to 3,
- listOf(0u, 1u) to 5,
- listOf(2u, 0u, 1u) to -7,
- )
- println(polynomial1 == polynomial2) // true
- // and even
- val polynomial3: NumberedPolynomial = 57.asNumberedPolynomial() // This one actually does not algebraic context!
- val polynomial4: NumberedPolynomial = NumberedPolynomial(listOf() to 57)
- println(polynomial3 == polynomial4) // true
-
- numberedPolynomialSpace {
- // Also there is DSL for constructing NumberedPolynomials:
- val polynomial5: NumberedPolynomial = NumberedPolynomialDSL1 {
- 3 {}
- 5 { 1 inPowerOf 1u }
- -7 with { 0 pow 2u; 2 pow 1u }
- // `pow` and `inPowerOf` are the same
- // `with` is omittable
- }
- println(polynomial1 == polynomial5) // true
-
- // Unfortunately the DSL does not work good in bare context of constants' ring, so for now it's disabled and
- // works only in NumberedPolynomialSpace and NumberedRationalFunctionSpace
- }
- }
-
- val polynomial6: NumberedPolynomial = Int.algebra {
- NumberedPolynomial(
- listOf() to 7,
- listOf(0u, 1u) to -5,
- listOf(2u, 0u, 1u) to 0,
- listOf(0u, 0u, 0u, 4u) to 4,
- )
- }
- // For every ring there can be provided a polynomial ring:
- Int.algebra.numberedPolynomialSpace {
- println(
- -polynomial6 == NumberedPolynomial(
- listOf() to -7,
- listOf(0u, 1u) to 5,
- listOf(2u, 0u, 1u) to 0,
- listOf(0u, 0u, 0u, 4u) to (-4),
- )
- ) // true
- println(
- polynomial1 + polynomial6 == NumberedPolynomial(
- listOf() to 10,
- listOf(0u, 1u) to 0,
- listOf(2u, 0u, 1u) to -7,
- listOf(0u, 0u, 0u, 4u) to 4,
- )
- ) // true
- println(
- polynomial1 - polynomial6 == NumberedPolynomial(
- listOf() to -4,
- listOf(0u, 1u) to 10,
- listOf(2u, 0u, 1u) to -7,
- listOf(0u, 0u, 0u, 4u) to -4,
- )
- ) // true
-
- polynomial1 * polynomial6 // Multiplication works too
- }
-
- Double.algebra.numberedPolynomialSpace {
- // You can even write
- val x_1: NumberedPolynomial = NumberedPolynomial(listOf(1u) to 1.0)
- val x_2: NumberedPolynomial = NumberedPolynomial(listOf(0u, 1u) to 1.0)
- val x_3: NumberedPolynomial = NumberedPolynomial(listOf(0u, 0u, 1u) to 1.0)
- val polynomial7: NumberedPolynomial = NumberedPolynomial(
- listOf() to 3.0,
- listOf(0u, 1u) to 5.0,
- listOf(2u, 0u, 1u) to -7.0,
- )
- Double.algebra.listPolynomialSpace {
- println(3 + 5 * x_2 - 7 * x_1 * x_1 * x_3 == polynomial7)
- println(3.0 + 5.0 * x_2 - 7.0 * x_1 * x_1 * x_3 == polynomial7)
- }
- }
-
- Int.algebra.numberedPolynomialSpace {
- val x_4: NumberedPolynomial = NumberedPolynomial(listOf(0u, 0u, 0u, 4u) to 1)
- // Also there are some utilities for polynomials:
- println(polynomial1.substitute(mapOf(0 to 1, 1 to -2, 2 to -1)) == 0.asNumberedPolynomial()) // true,
- // because it's substitution x_1 -> 1, x_2 -> -2, x_3 -> -1,
- // so 3 + 5 x_2 - 7 x_1^2 x_3 = 3 + 5 * (-2) - 7 * 1^2 * (-1) = 3 - 10 + 7 = 0
- println(
- polynomial1.substitute(mapOf(1 to x_4)) == NumberedPolynomial(
- listOf() to 3,
- listOf(0u, 1u) to 5,
- listOf(2u, 0u, 1u) to -7,
- )
- ) // true, because it's substitution x_2 -> x_4, so result is 3 + 5 x_4 - 7 x_1^2 x_3
- println(
- polynomial1.derivativeWithRespectTo(Int.algebra, 1) ==
- NumberedPolynomial(listOf() to 5)
- ) // true, d/dx_2 (3 + 5 x_2 - 7 x_1^2 x_3) = 5
- }
-
- // Lastly, there are rational functions and some other utilities:
- Double.algebra.numberedRationalFunctionSpace {
- val rationalFunction1: NumberedRationalFunction = NumberedRationalFunction(
- NumberedPolynomial(
- listOf() to 2.0,
- listOf(1u) to -3.0,
- listOf(2u) to 1.0,
- ),
- NumberedPolynomial(
- listOf() to 3.0,
- listOf(1u) to -1.0,
- )
- )
- // It's just (2 - 3x + x^2)/(3 - x) where x = x_1
-
- val rationalFunction2: NumberedRationalFunction = NumberedRationalFunction(
- NumberedPolynomial(
- listOf() to 5.0,
- listOf(1u) to -4.0,
- listOf(2u) to 1.0,
- ),
- NumberedPolynomial(
- listOf() to 3.0,
- listOf(1u) to -1.0,
- )
- )
- // It's just (5 - 4x + x^2)/(3 - x) where x = x_1
-
- println(rationalFunction1 + 1 == rationalFunction2)
- }
-}
-
-/**
- * Shows [LabeledPolynomial]s' and [LabeledRationalFunction]s' capabilities.
- */
-fun labeledPolynomialsExample() {
- val x by symbol
- val y by symbol
- val z by symbol
- val t by symbol
-
- // Consider polynomial
- // 3 + 5 y - 7 x^2 z
- // Consider, for example, its term -7 x^2 z. -7 is a coefficient of the term, whereas matching (x -> 2, z -> 3) is
- // description of degrees of variables x_1, x_2, ... in the term. Such description is called "signature" of the
- // term -7 x_1^2 x_3.
-
- val polynomial1: LabeledPolynomial
- with(Int.algebra) {
- // [LabeledPolynomial] is a representation of a multivariate polynomial, that stores terms in a map with terms'
- // signatures as the map's keys and terms' coefficients as corresponding values. For example,
- polynomial1 = LabeledPolynomial(
- mapOf(
- mapOf() to 3,
- mapOf(y to 1u) to 5,
- mapOf(x to 2u, z to 1u) to -7,
- )
- )
- // represents polynomial 3 + 5 y - 7 x^2 z
-
- // This `LabeledPolynomial` function needs context of either ring of constant (as `Int.algebra` in this example)
- // or space of LabeledPolynomials over it. To understand why it is like this see documentations of functions
- // LabeledPolynomial and LabeledPolynomialWithoutCheck
-
- // There are also shortcut fabrics:
- val polynomial2: LabeledPolynomial = LabeledPolynomial(
- mapOf() to 3,
- mapOf(y to 1u) to 5,
- mapOf(x to 2u, z to 1u) to -7,
- )
- println(polynomial1 == polynomial2) // true
- // and even
- val polynomial3: LabeledPolynomial = 57.asLabeledPolynomial() // This one actually does not algebraic context!
- val polynomial4: LabeledPolynomial = LabeledPolynomial(mapOf() to 57)
- println(polynomial3 == polynomial4) // true
-
- labeledPolynomialSpace {
- // Also there is DSL for constructing NumberedPolynomials:
- val polynomial5: LabeledPolynomial = LabeledPolynomialDSL1 {
- 3 {}
- 5 { y inPowerOf 1u }
- -7 with { x pow 2u; z pow 1u }
- // `pow` and `inPowerOf` are the same
- // `with` is omittable
- }
- println(polynomial1 == polynomial5) // true
-
- // Unfortunately the DSL does not work good in bare context of constants' ring, so for now it's disabled and
- // works only in NumberedPolynomialSpace and NumberedRationalFunctionSpace
- }
- }
-
- val polynomial6: LabeledPolynomial = Int.algebra {
- LabeledPolynomial(
- mapOf() to 7,
- mapOf(y to 1u) to -5,
- mapOf(x to 2u, z to 1u) to 0,
- mapOf(t to 4u) to 4,
- )
- }
- // For every ring there can be provided a polynomial ring:
- Int.algebra.labeledPolynomialSpace {
- println(
- -polynomial6 == LabeledPolynomial(
- mapOf() to -7,
- mapOf(y to 1u) to 5,
- mapOf(x to 2u, z to 1u) to 0,
- mapOf(t to 4u) to -4,
- )
- ) // true
- println(
- polynomial1 + polynomial6 == LabeledPolynomial(
- mapOf() to 10,
- mapOf(y to 1u) to 0,
- mapOf(x to 2u, z to 1u) to -7,
- mapOf(t to 4u) to 4,
- )
- ) // true
- println(
- polynomial1 - polynomial6 == LabeledPolynomial(
- mapOf() to -4,
- mapOf(y to 1u) to 10,
- mapOf(x to 2u, z to 1u) to -7,
- mapOf(t to 4u) to -4,
- )
- ) // true
-
- polynomial1 * polynomial6 // Multiplication works too
- }
-
- Double.algebra.labeledPolynomialSpace {
- // You can even write
- val polynomial7: LabeledPolynomial = LabeledPolynomial(
- mapOf() to 3.0,
- mapOf(y to 1u) to 5.0,
- mapOf(x to 2u, z to 1u) to -7.0,
- )
- Double.algebra.listPolynomialSpace {
- println(3 + 5 * y - 7 * x * x * z == polynomial7)
- println(3.0 + 5.0 * y - 7.0 * x * x * z == polynomial7)
- }
- }
-
- Int.algebra.labeledPolynomialSpace {
- // Also there are some utilities for polynomials:
- println(polynomial1.substitute(mapOf(x to 1, y to -2, z to -1)) == 0.asLabeledPolynomial()) // true,
- // because it's substitution x -> 1, y -> -2, z -> -1,
- // so 3 + 5 y - 7 x^2 z = 3 + 5 * (-2) - 7 * 1^2 * (-1) = 3 - 10 + 7 = 0
- println(
- polynomial1.substitute(mapOf(y to t.asPolynomial())) == LabeledPolynomial(
- mapOf() to 3,
- mapOf(t to 1u) to 5,
- mapOf(x to 2u, z to 1u) to -7,
- )
- ) // true, because it's substitution y -> t, so result is 3 + 5 t - 7 x^2 z
- println(
- polynomial1.derivativeWithRespectTo(Int.algebra, y) == LabeledPolynomial(mapOf() to 5)
- ) // true, d/dy (3 + 5 y - 7 x^2 z) = 5
- }
-
- // Lastly, there are rational functions and some other utilities:
- Double.algebra.labeledRationalFunctionSpace {
- val rationalFunction1: LabeledRationalFunction = LabeledRationalFunction(
- LabeledPolynomial(
- mapOf() to 2.0,
- mapOf(x to 1u) to -3.0,
- mapOf(x to 2u) to 1.0,
- ),
- LabeledPolynomial(
- mapOf() to 3.0,
- mapOf(x to 1u) to -1.0,
- )
- )
- // It's just (2 - 3x + x^2)/(3 - x)
-
- val rationalFunction2: LabeledRationalFunction = LabeledRationalFunction(
- LabeledPolynomial(
- mapOf() to 5.0,
- mapOf(x to 1u) to -4.0,
- mapOf(x to 2u) to 1.0,
- ),
- LabeledPolynomial(
- mapOf() to 3.0,
- mapOf(x to 1u) to -1.0,
- )
- )
- // It's just (5 - 4x + x^2)/(3 - x)
-
- println(rationalFunction1 + 1 == rationalFunction2)
- }
-}
-
-fun main() {
- println("ListPolynomials:")
- listPolynomialsExample()
- println()
-
- println("NumberedPolynomials:")
- numberedPolynomialsExample()
- println()
-
- println("ListPolynomials:")
- labeledPolynomialsExample()
- println()
-}
diff --git a/examples/src/main/kotlin/space/kscience/kmath/jafama/JafamaDemo.kt b/examples/src/main/kotlin/space/kscience/kmath/jafama/JafamaDemo.kt
index 9c3d0fdbe..52451e9f3 100644
--- a/examples/src/main/kotlin/space/kscience/kmath/jafama/JafamaDemo.kt
+++ b/examples/src/main/kotlin/space/kscience/kmath/jafama/JafamaDemo.kt
@@ -1,5 +1,5 @@
/*
- * Copyright 2018-2021 KMath contributors.
+ * Copyright 2018-2022 KMath contributors.
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
*/
diff --git a/examples/src/main/kotlin/space/kscience/kmath/linear/dotPerformance.kt b/examples/src/main/kotlin/space/kscience/kmath/linear/dotPerformance.kt
index a2d7d7c27..79eddc6c3 100644
--- a/examples/src/main/kotlin/space/kscience/kmath/linear/dotPerformance.kt
+++ b/examples/src/main/kotlin/space/kscience/kmath/linear/dotPerformance.kt
@@ -1,5 +1,5 @@
/*
- * Copyright 2018-2021 KMath contributors.
+ * Copyright 2018-2022 KMath contributors.
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
*/
@@ -7,8 +7,10 @@ package space.kscience.kmath.linear
import space.kscience.kmath.operations.algebra
import kotlin.random.Random
-import kotlin.system.measureTimeMillis
+import kotlin.time.ExperimentalTime
+import kotlin.time.measureTime
+@OptIn(ExperimentalTime::class)
fun main() {
val random = Random(12224)
val dim = 1000
@@ -21,7 +23,7 @@ fun main() {
if (i <= j) random.nextDouble() else 0.0
}
- val time = measureTimeMillis {
+ val time = measureTime {
with(Double.algebra.linearSpace) {
repeat(10) {
matrix1 dot matrix2
diff --git a/examples/src/main/kotlin/space/kscience/kmath/linear/gradient.kt b/examples/src/main/kotlin/space/kscience/kmath/linear/gradient.kt
index a01ea7fe2..52ed8f05f 100644
--- a/examples/src/main/kotlin/space/kscience/kmath/linear/gradient.kt
+++ b/examples/src/main/kotlin/space/kscience/kmath/linear/gradient.kt
@@ -1,5 +1,5 @@
/*
- * Copyright 2018-2021 KMath contributors.
+ * Copyright 2018-2022 KMath contributors.
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
*/
diff --git a/examples/src/main/kotlin/space/kscience/kmath/operations/BigIntDemo.kt b/examples/src/main/kotlin/space/kscience/kmath/operations/BigIntDemo.kt
index 51f439612..2447d06ed 100644
--- a/examples/src/main/kotlin/space/kscience/kmath/operations/BigIntDemo.kt
+++ b/examples/src/main/kotlin/space/kscience/kmath/operations/BigIntDemo.kt
@@ -1,5 +1,5 @@
/*
- * Copyright 2018-2021 KMath contributors.
+ * Copyright 2018-2022 KMath contributors.
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
*/
diff --git a/examples/src/main/kotlin/space/kscience/kmath/operations/complexDemo.kt b/examples/src/main/kotlin/space/kscience/kmath/operations/complexDemo.kt
index 285b8d000..77cfca4ae 100644
--- a/examples/src/main/kotlin/space/kscience/kmath/operations/complexDemo.kt
+++ b/examples/src/main/kotlin/space/kscience/kmath/operations/complexDemo.kt
@@ -1,5 +1,5 @@
/*
- * Copyright 2018-2021 KMath contributors.
+ * Copyright 2018-2022 KMath contributors.
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
*/
diff --git a/examples/src/main/kotlin/space/kscience/kmath/operations/mixedNDOperations.kt b/examples/src/main/kotlin/space/kscience/kmath/operations/mixedNDOperations.kt
index 62c9c8076..4a5d783e1 100644
--- a/examples/src/main/kotlin/space/kscience/kmath/operations/mixedNDOperations.kt
+++ b/examples/src/main/kotlin/space/kscience/kmath/operations/mixedNDOperations.kt
@@ -1,5 +1,5 @@
/*
- * Copyright 2018-2021 KMath contributors.
+ * Copyright 2018-2022 KMath contributors.
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
*/
@@ -8,14 +8,14 @@ package space.kscience.kmath.operations
import space.kscience.kmath.commons.linear.CMLinearSpace
import space.kscience.kmath.linear.matrix
import space.kscience.kmath.nd.DoubleBufferND
-import space.kscience.kmath.nd.Shape
+import space.kscience.kmath.nd.ShapeND
import space.kscience.kmath.nd.Structure2D
import space.kscience.kmath.nd.ndAlgebra
import space.kscience.kmath.viktor.ViktorStructureND
import space.kscience.kmath.viktor.viktorAlgebra
fun main() {
- val viktorStructure: ViktorStructureND = DoubleField.viktorAlgebra.structureND(Shape(2, 2)) { (i, j) ->
+ val viktorStructure: ViktorStructureND = DoubleField.viktorAlgebra.structureND(ShapeND(2, 2)) { (i, j) ->
if (i == j) 2.0 else 0.0
}
diff --git a/examples/src/main/kotlin/space/kscience/kmath/series/DateTimeSeries.kt b/examples/src/main/kotlin/space/kscience/kmath/series/DateTimeSeries.kt
new file mode 100644
index 000000000..ca10fc290
--- /dev/null
+++ b/examples/src/main/kotlin/space/kscience/kmath/series/DateTimeSeries.kt
@@ -0,0 +1,17 @@
+/*
+ * Copyright 2018-2023 KMath contributors.
+ * Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
+ */
+
+package space.kscience.kmath.series
+
+import kotlinx.datetime.Instant
+import space.kscience.kmath.operations.algebra
+import space.kscience.kmath.operations.bufferAlgebra
+import kotlin.time.Duration
+
+fun SeriesAlgebra.Companion.time(zero: Instant, step: Duration) = MonotonicSeriesAlgebra(
+ bufferAlgebra = Double.algebra.bufferAlgebra,
+ offsetToLabel = { zero + step * it },
+ labelToOffset = { (it - zero) / step }
+)
\ No newline at end of file
diff --git a/examples/src/main/kotlin/space/kscience/kmath/series/analyzeDif.kt b/examples/src/main/kotlin/space/kscience/kmath/series/analyzeDif.kt
new file mode 100644
index 000000000..0e10f1a9a
--- /dev/null
+++ b/examples/src/main/kotlin/space/kscience/kmath/series/analyzeDif.kt
@@ -0,0 +1,56 @@
+package space.kscience.kmath.series
+
+
+import kotlinx.html.h1
+import kotlinx.html.p
+import space.kscience.kmath.operations.algebra
+import space.kscience.kmath.operations.bufferAlgebra
+import space.kscience.kmath.operations.toList
+import space.kscience.kmath.stat.KMComparisonResult
+import space.kscience.kmath.stat.ksComparisonStatistic
+import space.kscience.kmath.structures.Buffer
+import space.kscience.kmath.structures.slice
+import space.kscience.plotly.*
+import kotlin.math.PI
+
+fun main() = with(Double.algebra.bufferAlgebra.seriesAlgebra()) {
+
+
+ fun Plot.plotSeries(name: String, buffer: Buffer) {
+ scatter {
+ this.name = name
+ x.numbers = buffer.labels
+ y.numbers = buffer.toList()
+ }
+ }
+
+
+ val s1 = series(100) { sin(2 * PI * it / 100) + 1.0 }
+
+ val s2 = s1.slice(20..50).moveTo(40)
+
+ val s3: Buffer = s1.zip(s2) { l, r -> l + r } //s1 + s2
+ val s4 = s3.map { ln(it) }
+
+ val kmTest: KMComparisonResult = ksComparisonStatistic(s1, s2)
+
+ Plotly.page {
+ h1 { +"This is my plot" }
+ p{
+ +"Kolmogorov-smirnov test for s1 and s2: ${kmTest.value}"
+ }
+ plot{
+ plotSeries("s1", s1)
+ plotSeries("s2", s2)
+ plotSeries("s3", s3)
+ plotSeries("s4", s4)
+ layout {
+ xaxis {
+ range(0.0..100.0)
+ }
+ }
+ }
+
+ }.makeFile()
+
+}
\ No newline at end of file
diff --git a/examples/src/main/kotlin/space/kscience/kmath/stat/DistributionBenchmark.kt b/examples/src/main/kotlin/space/kscience/kmath/stat/DistributionBenchmark.kt
index 8e3cdf86f..031955e15 100644
--- a/examples/src/main/kotlin/space/kscience/kmath/stat/DistributionBenchmark.kt
+++ b/examples/src/main/kotlin/space/kscience/kmath/stat/DistributionBenchmark.kt
@@ -1,5 +1,5 @@
/*
- * Copyright 2018-2021 KMath contributors.
+ * Copyright 2018-2022 KMath contributors.
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
*/
@@ -10,6 +10,7 @@ import kotlinx.coroutines.async
import kotlinx.coroutines.runBlocking
import org.apache.commons.rng.sampling.distribution.BoxMullerNormalizedGaussianSampler
import org.apache.commons.rng.simple.RandomSource
+import space.kscience.kmath.random.RandomGenerator
import space.kscience.kmath.samplers.GaussianSampler
import java.time.Duration
import java.time.Instant
@@ -35,7 +36,7 @@ private suspend fun runKMathChained(): Duration {
return Duration.between(startTime, Instant.now())
}
-private fun runApacheDirect(): Duration {
+private fun runCMDirect(): Duration {
val rng = RandomSource.create(RandomSource.MT, 123L)
val sampler = CMGaussianSampler.of(
@@ -64,7 +65,7 @@ private fun runApacheDirect(): Duration {
* Comparing chain sampling performance with direct sampling performance
*/
fun main(): Unit = runBlocking(Dispatchers.Default) {
- val directJob = async { runApacheDirect() }
+ val directJob = async { runCMDirect() }
val chainJob = async { runKMathChained() }
println("KMath Chained: ${chainJob.await()}")
println("Apache Direct: ${directJob.await()}")
diff --git a/examples/src/main/kotlin/space/kscience/kmath/stat/DistributionDemo.kt b/examples/src/main/kotlin/space/kscience/kmath/stat/DistributionDemo.kt
index 15654971f..8e6d096ed 100644
--- a/examples/src/main/kotlin/space/kscience/kmath/stat/DistributionDemo.kt
+++ b/examples/src/main/kotlin/space/kscience/kmath/stat/DistributionDemo.kt
@@ -1,5 +1,5 @@
/*
- * Copyright 2018-2021 KMath contributors.
+ * Copyright 2018-2022 KMath contributors.
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
*/
@@ -9,6 +9,7 @@ import kotlinx.coroutines.runBlocking
import space.kscience.kmath.chains.Chain
import space.kscience.kmath.chains.combineWithState
import space.kscience.kmath.distributions.NormalDistribution
+import space.kscience.kmath.random.RandomGenerator
private data class AveragingChainState(var num: Int = 0, var value: Double = 0.0)
diff --git a/examples/src/main/kotlin/space/kscience/kmath/structures/ComplexND.kt b/examples/src/main/kotlin/space/kscience/kmath/structures/ComplexND.kt
index d55f3df09..86d7c0d89 100644
--- a/examples/src/main/kotlin/space/kscience/kmath/structures/ComplexND.kt
+++ b/examples/src/main/kotlin/space/kscience/kmath/structures/ComplexND.kt
@@ -1,5 +1,5 @@
/*
- * Copyright 2018-2021 KMath contributors.
+ * Copyright 2018-2022 KMath contributors.
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
*/
diff --git a/examples/src/main/kotlin/space/kscience/kmath/structures/NDField.kt b/examples/src/main/kotlin/space/kscience/kmath/structures/NDField.kt
index d6ff1dceb..ba8f047a8 100644
--- a/examples/src/main/kotlin/space/kscience/kmath/structures/NDField.kt
+++ b/examples/src/main/kotlin/space/kscience/kmath/structures/NDField.kt
@@ -1,5 +1,5 @@
/*
- * Copyright 2018-2021 KMath contributors.
+ * Copyright 2018-2022 KMath contributors.
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
*/
@@ -29,7 +29,7 @@ fun main() {
Nd4j.zeros(0)
val dim = 1000
val n = 1000
- val shape = Shape(dim, dim)
+ val shape = ShapeND(dim, dim)
// specialized nd-field for Double. It works as generic Double field as well.
diff --git a/examples/src/main/kotlin/space/kscience/kmath/structures/StreamDoubleFieldND.kt b/examples/src/main/kotlin/space/kscience/kmath/structures/StreamDoubleFieldND.kt
index 548fb16c1..2ce2c21a6 100644
--- a/examples/src/main/kotlin/space/kscience/kmath/structures/StreamDoubleFieldND.kt
+++ b/examples/src/main/kotlin/space/kscience/kmath/structures/StreamDoubleFieldND.kt
@@ -1,10 +1,11 @@
/*
- * Copyright 2018-2021 KMath contributors.
+ * Copyright 2018-2022 KMath contributors.
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
*/
package space.kscience.kmath.structures
+import space.kscience.kmath.PerformancePitfall
import space.kscience.kmath.nd.*
import space.kscience.kmath.operations.DoubleField
import space.kscience.kmath.operations.ExtendedField
@@ -16,11 +17,11 @@ import java.util.stream.IntStream
* A demonstration implementation of NDField over Real using Java [java.util.stream.DoubleStream] for parallel
* execution.
*/
-class StreamDoubleFieldND(override val shape: IntArray) : FieldND,
+class StreamDoubleFieldND(override val shape: ShapeND) : FieldND,
NumbersAddOps>,
ExtendedField> {
- private val strides = DefaultStrides(shape)
+ private val strides = ColumnStrides(shape)
override val elementAlgebra: DoubleField get() = DoubleField
override val zero: BufferND by lazy { structureND(shape) { zero } }
override val one: BufferND by lazy { structureND(shape) { one } }
@@ -30,17 +31,19 @@ class StreamDoubleFieldND(override val shape: IntArray) : FieldND.buffer: DoubleBuffer
get() = when {
!shape.contentEquals(this@StreamDoubleFieldND.shape) -> throw ShapeMismatchException(
this@StreamDoubleFieldND.shape,
shape
)
- this is BufferND && this.indices == this@StreamDoubleFieldND.strides -> this.buffer as DoubleBuffer
+
+ this is BufferND && indices == this@StreamDoubleFieldND.strides -> this.buffer as DoubleBuffer
else -> DoubleBuffer(strides.linearSize) { offset -> get(strides.index(offset)) }
}
- override fun structureND(shape: Shape, initializer: DoubleField.(IntArray) -> Double): BufferND {
+ override fun structureND(shape: ShapeND, initializer: DoubleField.(IntArray) -> Double): BufferND {
val array = IntStream.range(0, strides.linearSize).parallel().mapToDouble { offset ->
val index = strides.index(offset)
DoubleField.initializer(index)
@@ -49,6 +52,7 @@ class StreamDoubleFieldND(override val shape: IntArray) : FieldND.map(
transform: DoubleField.(Double) -> Double,
): BufferND {
@@ -56,6 +60,7 @@ class StreamDoubleFieldND(override val shape: IntArray) : FieldND.mapIndexed(
transform: DoubleField.(index: IntArray, Double) -> Double,
): BufferND {
@@ -69,6 +74,7 @@ class StreamDoubleFieldND(override val shape: IntArray) : FieldND,
right: StructureND,
@@ -105,4 +111,4 @@ class StreamDoubleFieldND(override val shape: IntArray) : FieldND): BufferND = arg.map { atanh(it) }
}
-fun DoubleField.ndStreaming(vararg shape: Int): StreamDoubleFieldND = StreamDoubleFieldND(shape)
+fun DoubleField.ndStreaming(vararg shape: Int): StreamDoubleFieldND = StreamDoubleFieldND(ShapeND(shape))
diff --git a/examples/src/main/kotlin/space/kscience/kmath/structures/StructureReadBenchmark.kt b/examples/src/main/kotlin/space/kscience/kmath/structures/StructureReadBenchmark.kt
index de36c664d..e6ff0ee28 100644
--- a/examples/src/main/kotlin/space/kscience/kmath/structures/StructureReadBenchmark.kt
+++ b/examples/src/main/kotlin/space/kscience/kmath/structures/StructureReadBenchmark.kt
@@ -1,20 +1,23 @@
/*
- * Copyright 2018-2021 KMath contributors.
+ * Copyright 2018-2022 KMath contributors.
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
*/
package space.kscience.kmath.structures
+import space.kscience.kmath.PerformancePitfall
import space.kscience.kmath.nd.BufferND
-import space.kscience.kmath.nd.DefaultStrides
+import space.kscience.kmath.nd.ColumnStrides
+import space.kscience.kmath.nd.ShapeND
import kotlin.system.measureTimeMillis
@Suppress("ASSIGNED_BUT_NEVER_ACCESSED_VARIABLE")
+@OptIn(PerformancePitfall::class)
fun main() {
val n = 6000
val array = DoubleArray(n * n) { 1.0 }
val buffer = DoubleBuffer(array)
- val strides = DefaultStrides(intArrayOf(n, n))
+ val strides = ColumnStrides(ShapeND(n, n))
val structure = BufferND(strides, buffer)
measureTimeMillis {
diff --git a/examples/src/main/kotlin/space/kscience/kmath/structures/StructureWriteBenchmark.kt b/examples/src/main/kotlin/space/kscience/kmath/structures/StructureWriteBenchmark.kt
index dea7095a8..14c058417 100644
--- a/examples/src/main/kotlin/space/kscience/kmath/structures/StructureWriteBenchmark.kt
+++ b/examples/src/main/kotlin/space/kscience/kmath/structures/StructureWriteBenchmark.kt
@@ -1,20 +1,27 @@
/*
- * Copyright 2018-2021 KMath contributors.
+ * Copyright 2018-2022 KMath contributors.
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
*/
package space.kscience.kmath.structures
+import space.kscience.kmath.nd.BufferND
+import space.kscience.kmath.nd.ShapeND
import space.kscience.kmath.nd.StructureND
-import space.kscience.kmath.nd.mapToBuffer
+import space.kscience.kmath.operations.mapToBuffer
import kotlin.system.measureTimeMillis
+private inline fun BufferND.mapToBufferND(
+ bufferFactory: BufferFactory = BufferFactory.auto(),
+ crossinline block: (T) -> R,
+): BufferND = BufferND(indices, buffer.mapToBuffer(bufferFactory, block))
+
@Suppress("UNUSED_VARIABLE")
fun main() {
val n = 6000
- val structure = StructureND.buffered(intArrayOf(n, n), Buffer.Companion::auto) { 1.0 }
- structure.mapToBuffer { it + 1 } // warm-up
- val time1 = measureTimeMillis { val res = structure.mapToBuffer { it + 1 } }
+ val structure = StructureND.buffered(ShapeND(n, n), Buffer.Companion::auto) { 1.0 }
+ structure.mapToBufferND { it + 1 } // warm-up
+ val time1 = measureTimeMillis { val res = structure.mapToBufferND { it + 1 } }
println("Structure mapping finished in $time1 millis")
val array = DoubleArray(n * n) { 1.0 }
diff --git a/examples/src/main/kotlin/space/kscience/kmath/structures/buffers.kt b/examples/src/main/kotlin/space/kscience/kmath/structures/buffers.kt
index 889ea99bd..2ac0dc6a4 100644
--- a/examples/src/main/kotlin/space/kscience/kmath/structures/buffers.kt
+++ b/examples/src/main/kotlin/space/kscience/kmath/structures/buffers.kt
@@ -1,5 +1,5 @@
/*
- * Copyright 2018-2021 KMath contributors.
+ * Copyright 2018-2022 KMath contributors.
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
*/
diff --git a/examples/src/main/kotlin/space/kscience/kmath/structures/typeSafeDimensions.kt b/examples/src/main/kotlin/space/kscience/kmath/structures/typeSafeDimensions.kt
index c28b566b9..1ba0e3503 100644
--- a/examples/src/main/kotlin/space/kscience/kmath/structures/typeSafeDimensions.kt
+++ b/examples/src/main/kotlin/space/kscience/kmath/structures/typeSafeDimensions.kt
@@ -1,5 +1,5 @@
/*
- * Copyright 2018-2021 KMath contributors.
+ * Copyright 2018-2022 KMath contributors.
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
*/
diff --git a/examples/src/main/kotlin/space/kscience/kmath/tensors/LevenbergMarquardt/StaticLm/staticDifficultTest.kt b/examples/src/main/kotlin/space/kscience/kmath/tensors/LevenbergMarquardt/StaticLm/staticDifficultTest.kt
new file mode 100644
index 000000000..e6f575262
--- /dev/null
+++ b/examples/src/main/kotlin/space/kscience/kmath/tensors/LevenbergMarquardt/StaticLm/staticDifficultTest.kt
@@ -0,0 +1,90 @@
+/*
+ * Copyright 2018-2023 KMath contributors.
+ * Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
+ */
+
+package space.kscience.kmath.tensors.LevenbergMarquardt.StaticLm
+
+import space.kscience.kmath.nd.ShapeND
+import space.kscience.kmath.nd.as2D
+import space.kscience.kmath.nd.component1
+import space.kscience.kmath.tensors.LevenbergMarquardt.funcDifficultForLm
+import space.kscience.kmath.tensors.core.BroadcastDoubleTensorAlgebra
+import space.kscience.kmath.tensors.core.BroadcastDoubleTensorAlgebra.div
+import space.kscience.kmath.tensors.core.DoubleTensorAlgebra
+import space.kscience.kmath.tensors.core.LMInput
+import space.kscience.kmath.tensors.core.levenbergMarquardt
+import kotlin.math.roundToInt
+
+fun main() {
+ val NData = 200
+ var t_example = DoubleTensorAlgebra.ones(ShapeND(intArrayOf(NData, 1))).as2D()
+ for (i in 0 until NData) {
+ t_example[i, 0] = t_example[i, 0] * (i + 1) - 104
+ }
+
+ val Nparams = 15
+ var p_example = DoubleTensorAlgebra.ones(ShapeND(intArrayOf(Nparams, 1))).as2D()
+ for (i in 0 until Nparams) {
+ p_example[i, 0] = p_example[i, 0] + i - 25
+ }
+
+ val exampleNumber = 1
+
+ var y_hat = funcDifficultForLm(t_example, p_example, exampleNumber)
+
+ var p_init = DoubleTensorAlgebra.zeros(ShapeND(intArrayOf(Nparams, 1))).as2D()
+ for (i in 0 until Nparams) {
+ p_init[i, 0] = (p_example[i, 0] + 0.9)
+ }
+
+ var t = t_example
+ val y_dat = y_hat
+ val weight = 1.0 / Nparams * 1.0 - 0.085
+ val dp = BroadcastDoubleTensorAlgebra.fromArray(
+ ShapeND(intArrayOf(1, 1)), DoubleArray(1) { -0.01 }
+ ).as2D()
+ var p_min = DoubleTensorAlgebra.ones(ShapeND(intArrayOf(Nparams, 1)))
+ p_min = p_min.div(1.0 / -50.0)
+ val p_max = DoubleTensorAlgebra.ones(ShapeND(intArrayOf(Nparams, 1)))
+ p_min = p_min.div(1.0 / 50.0)
+ val opts = doubleArrayOf(3.0, 10000.0, 1e-6, 1e-6, 1e-6, 1e-6, 1e-2, 11.0, 9.0, 1.0)
+// val opts = doubleArrayOf(3.0, 10000.0, 1e-6, 1e-6, 1e-6, 1e-6, 1e-3, 11.0, 9.0, 1.0)
+
+ val inputData = LMInput(::funcDifficultForLm,
+ p_init.as2D(),
+ t,
+ y_dat,
+ weight,
+ dp,
+ p_min.as2D(),
+ p_max.as2D(),
+ opts[1].toInt(),
+ doubleArrayOf(opts[2], opts[3], opts[4], opts[5]),
+ doubleArrayOf(opts[6], opts[7], opts[8]),
+ opts[9].toInt(),
+ 10,
+ 1)
+
+ val result = DoubleTensorAlgebra.levenbergMarquardt(inputData)
+
+ println("Parameters:")
+ for (i in 0 until result.resultParameters.shape.component1()) {
+ val x = (result.resultParameters[i, 0] * 10000).roundToInt() / 10000.0
+ print("$x ")
+ }
+ println()
+
+ println("Y true and y received:")
+ var y_hat_after = funcDifficultForLm(t_example, result.resultParameters, exampleNumber)
+ for (i in 0 until y_hat.shape.component1()) {
+ val x = (y_hat[i, 0] * 10000).roundToInt() / 10000.0
+ val y = (y_hat_after[i, 0] * 10000).roundToInt() / 10000.0
+ println("$x $y")
+ }
+
+ println("Сhi_sq:")
+ println(result.resultChiSq)
+ println("Number of iterations:")
+ println(result.iterations)
+}
\ No newline at end of file
diff --git a/examples/src/main/kotlin/space/kscience/kmath/tensors/LevenbergMarquardt/StaticLm/staticEasyTest.kt b/examples/src/main/kotlin/space/kscience/kmath/tensors/LevenbergMarquardt/StaticLm/staticEasyTest.kt
new file mode 100644
index 000000000..507943031
--- /dev/null
+++ b/examples/src/main/kotlin/space/kscience/kmath/tensors/LevenbergMarquardt/StaticLm/staticEasyTest.kt
@@ -0,0 +1,57 @@
+/*
+ * Copyright 2018-2023 KMath contributors.
+ * Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
+ */
+
+package space.kscience.kmath.tensors.LevenbergMarquardt.StaticLm
+
+import space.kscience.kmath.nd.ShapeND
+import space.kscience.kmath.nd.as2D
+import space.kscience.kmath.nd.component1
+import space.kscience.kmath.tensors.LevenbergMarquardt.funcDifficultForLm
+import space.kscience.kmath.tensors.LevenbergMarquardt.funcEasyForLm
+import space.kscience.kmath.tensors.LevenbergMarquardt.getStartDataForFuncEasy
+import space.kscience.kmath.tensors.core.DoubleTensorAlgebra
+import space.kscience.kmath.tensors.core.LMInput
+import space.kscience.kmath.tensors.core.levenbergMarquardt
+import kotlin.math.roundToInt
+
+fun main() {
+ val startedData = getStartDataForFuncEasy()
+ val inputData = LMInput(::funcEasyForLm,
+ DoubleTensorAlgebra.ones(ShapeND(intArrayOf(4, 1))).as2D(),
+ startedData.t,
+ startedData.y_dat,
+ startedData.weight,
+ startedData.dp,
+ startedData.p_min,
+ startedData.p_max,
+ startedData.opts[1].toInt(),
+ doubleArrayOf(startedData.opts[2], startedData.opts[3], startedData.opts[4], startedData.opts[5]),
+ doubleArrayOf(startedData.opts[6], startedData.opts[7], startedData.opts[8]),
+ startedData.opts[9].toInt(),
+ 10,
+ startedData.example_number)
+
+ val result = DoubleTensorAlgebra.levenbergMarquardt(inputData)
+
+ println("Parameters:")
+ for (i in 0 until result.resultParameters.shape.component1()) {
+ val x = (result.resultParameters[i, 0] * 10000).roundToInt() / 10000.0
+ print("$x ")
+ }
+ println()
+
+ println("Y true and y received:")
+ var y_hat_after = funcDifficultForLm(startedData.t, result.resultParameters, startedData.example_number)
+ for (i in 0 until startedData.y_dat.shape.component1()) {
+ val x = (startedData.y_dat[i, 0] * 10000).roundToInt() / 10000.0
+ val y = (y_hat_after[i, 0] * 10000).roundToInt() / 10000.0
+ println("$x $y")
+ }
+
+ println("Сhi_sq:")
+ println(result.resultChiSq)
+ println("Number of iterations:")
+ println(result.iterations)
+}
\ No newline at end of file
diff --git a/examples/src/main/kotlin/space/kscience/kmath/tensors/LevenbergMarquardt/StaticLm/staticMiddleTest.kt b/examples/src/main/kotlin/space/kscience/kmath/tensors/LevenbergMarquardt/StaticLm/staticMiddleTest.kt
new file mode 100644
index 000000000..0659db103
--- /dev/null
+++ b/examples/src/main/kotlin/space/kscience/kmath/tensors/LevenbergMarquardt/StaticLm/staticMiddleTest.kt
@@ -0,0 +1,88 @@
+/*
+ * Copyright 2018-2023 KMath contributors.
+ * Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
+ */
+
+package space.kscience.kmath.tensors.LevenbergMarquardt.StaticLm
+
+import space.kscience.kmath.nd.ShapeND
+import space.kscience.kmath.nd.as2D
+import space.kscience.kmath.nd.component1
+import space.kscience.kmath.tensors.LevenbergMarquardt.funcMiddleForLm
+import space.kscience.kmath.tensors.core.BroadcastDoubleTensorAlgebra
+import space.kscience.kmath.tensors.core.BroadcastDoubleTensorAlgebra.div
+import space.kscience.kmath.tensors.core.DoubleTensorAlgebra
+import space.kscience.kmath.tensors.core.LMInput
+import space.kscience.kmath.tensors.core.levenbergMarquardt
+import kotlin.math.roundToInt
+fun main() {
+ val NData = 100
+ var t_example = DoubleTensorAlgebra.ones(ShapeND(intArrayOf(NData, 1))).as2D()
+ for (i in 0 until NData) {
+ t_example[i, 0] = t_example[i, 0] * (i + 1)
+ }
+
+ val Nparams = 20
+ var p_example = DoubleTensorAlgebra.ones(ShapeND(intArrayOf(Nparams, 1))).as2D()
+ for (i in 0 until Nparams) {
+ p_example[i, 0] = p_example[i, 0] + i - 25
+ }
+
+ val exampleNumber = 1
+
+ var y_hat = funcMiddleForLm(t_example, p_example, exampleNumber)
+
+ var p_init = DoubleTensorAlgebra.zeros(ShapeND(intArrayOf(Nparams, 1))).as2D()
+ for (i in 0 until Nparams) {
+ p_init[i, 0] = (p_example[i, 0] + 0.9)
+ }
+
+ var t = t_example
+ val y_dat = y_hat
+ val weight = 1.0
+ val dp = BroadcastDoubleTensorAlgebra.fromArray(
+ ShapeND(intArrayOf(1, 1)), DoubleArray(1) { -0.01 }
+ ).as2D()
+ var p_min = DoubleTensorAlgebra.ones(ShapeND(intArrayOf(Nparams, 1)))
+ p_min = p_min.div(1.0 / -50.0)
+ val p_max = DoubleTensorAlgebra.ones(ShapeND(intArrayOf(Nparams, 1)))
+ p_min = p_min.div(1.0 / 50.0)
+ val opts = doubleArrayOf(3.0, 7000.0, 1e-5, 1e-5, 1e-5, 1e-5, 1e-5, 11.0, 9.0, 1.0)
+
+ val inputData = LMInput(::funcMiddleForLm,
+ p_init.as2D(),
+ t,
+ y_dat,
+ weight,
+ dp,
+ p_min.as2D(),
+ p_max.as2D(),
+ opts[1].toInt(),
+ doubleArrayOf(opts[2], opts[3], opts[4], opts[5]),
+ doubleArrayOf(opts[6], opts[7], opts[8]),
+ opts[9].toInt(),
+ 10,
+ 1)
+
+ val result = DoubleTensorAlgebra.levenbergMarquardt(inputData)
+
+ println("Parameters:")
+ for (i in 0 until result.resultParameters.shape.component1()) {
+ val x = (result.resultParameters[i, 0] * 10000).roundToInt() / 10000.0
+ print("$x ")
+ }
+ println()
+
+
+ var y_hat_after = funcMiddleForLm(t_example, result.resultParameters, exampleNumber)
+ for (i in 0 until y_hat.shape.component1()) {
+ val x = (y_hat[i, 0] * 10000).roundToInt() / 10000.0
+ val y = (y_hat_after[i, 0] * 10000).roundToInt() / 10000.0
+ println("$x $y")
+ }
+
+ println("Сhi_sq:")
+ println(result.resultChiSq)
+ println("Number of iterations:")
+ println(result.iterations)
+}
\ No newline at end of file
diff --git a/examples/src/main/kotlin/space/kscience/kmath/tensors/LevenbergMarquardt/StreamingLm/streamLm.kt b/examples/src/main/kotlin/space/kscience/kmath/tensors/LevenbergMarquardt/StreamingLm/streamLm.kt
new file mode 100644
index 000000000..b2818ef2a
--- /dev/null
+++ b/examples/src/main/kotlin/space/kscience/kmath/tensors/LevenbergMarquardt/StreamingLm/streamLm.kt
@@ -0,0 +1,68 @@
+/*
+ * Copyright 2018-2023 KMath contributors.
+ * Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
+ */
+
+package space.kscience.kmath.tensors.LevenbergMarquardt.StreamingLm
+
+import kotlinx.coroutines.delay
+import kotlinx.coroutines.flow.*
+import space.kscience.kmath.nd.*
+import space.kscience.kmath.tensors.LevenbergMarquardt.StartDataLm
+import space.kscience.kmath.tensors.core.BroadcastDoubleTensorAlgebra.zeros
+import space.kscience.kmath.tensors.core.DoubleTensorAlgebra
+import space.kscience.kmath.tensors.core.LMInput
+import space.kscience.kmath.tensors.core.levenbergMarquardt
+import kotlin.random.Random
+import kotlin.reflect.KFunction3
+
+fun streamLm(lm_func: (MutableStructure2D, MutableStructure2D, Int) -> (MutableStructure2D),
+ startData: StartDataLm, launchFrequencyInMs: Long, numberOfLaunches: Int): Flow> = flow{
+
+ var example_number = startData.example_number
+ var p_init = startData.p_init
+ var t = startData.t
+ var y_dat = startData.y_dat
+ val weight = startData.weight
+ val dp = startData.dp
+ val p_min = startData.p_min
+ val p_max = startData.p_max
+ val opts = startData.opts
+
+ var steps = numberOfLaunches
+ val isEndless = (steps <= 0)
+
+ val inputData = LMInput(lm_func,
+ p_init,
+ t,
+ y_dat,
+ weight,
+ dp,
+ p_min,
+ p_max,
+ opts[1].toInt(),
+ doubleArrayOf(opts[2], opts[3], opts[4], opts[5]),
+ doubleArrayOf(opts[6], opts[7], opts[8]),
+ opts[9].toInt(),
+ 10,
+ example_number)
+
+ while (isEndless || steps > 0) {
+ val result = DoubleTensorAlgebra.levenbergMarquardt(inputData)
+ emit(result.resultParameters)
+ delay(launchFrequencyInMs)
+ inputData.realValues = generateNewYDat(y_dat, 0.1)
+ inputData.startParameters = result.resultParameters
+ if (!isEndless) steps -= 1
+ }
+}
+
+fun generateNewYDat(y_dat: MutableStructure2D, delta: Double): MutableStructure2D{
+ val n = y_dat.shape.component1()
+ val y_dat_new = zeros(ShapeND(intArrayOf(n, 1))).as2D()
+ for (i in 0 until n) {
+ val randomEps = Random.nextDouble(delta + delta) - delta
+ y_dat_new[i, 0] = y_dat[i, 0] + randomEps
+ }
+ return y_dat_new
+}
\ No newline at end of file
diff --git a/examples/src/main/kotlin/space/kscience/kmath/tensors/LevenbergMarquardt/StreamingLm/streamingLmTest.kt b/examples/src/main/kotlin/space/kscience/kmath/tensors/LevenbergMarquardt/StreamingLm/streamingLmTest.kt
new file mode 100644
index 000000000..c9dd5029e
--- /dev/null
+++ b/examples/src/main/kotlin/space/kscience/kmath/tensors/LevenbergMarquardt/StreamingLm/streamingLmTest.kt
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2018-2023 KMath contributors.
+ * Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
+ */
+
+package space.kscience.kmath.tensors.LevenbergMarquardt.StreamingLm
+
+import space.kscience.kmath.nd.*
+import space.kscience.kmath.tensors.LevenbergMarquardt.*
+import kotlin.math.roundToInt
+
+suspend fun main(){
+ val startData = getStartDataForFuncDifficult()
+ // Создание потока:
+ val lmFlow = streamLm(::funcDifficultForLm, startData, 0, 100)
+ var initialTime = System.currentTimeMillis()
+ var lastTime: Long
+ val launches = mutableListOf()
+ // Запуск потока
+ lmFlow.collect { parameters ->
+ lastTime = System.currentTimeMillis()
+ launches.add(lastTime - initialTime)
+ initialTime = lastTime
+ for (i in 0 until parameters.shape.component1()) {
+ val x = (parameters[i, 0] * 10000).roundToInt() / 10000.0
+ print("$x ")
+ if (i == parameters.shape.component1() - 1) println()
+ }
+ }
+
+ println("Average without first is: ${launches.subList(1, launches.size - 1).average()}")
+}
\ No newline at end of file
diff --git a/examples/src/main/kotlin/space/kscience/kmath/tensors/LevenbergMarquardt/functionsToOptimize.kt b/examples/src/main/kotlin/space/kscience/kmath/tensors/LevenbergMarquardt/functionsToOptimize.kt
new file mode 100644
index 000000000..7ccb37ed0
--- /dev/null
+++ b/examples/src/main/kotlin/space/kscience/kmath/tensors/LevenbergMarquardt/functionsToOptimize.kt
@@ -0,0 +1,222 @@
+/*
+ * Copyright 2018-2023 KMath contributors.
+ * Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
+ */
+
+package space.kscience.kmath.tensors.LevenbergMarquardt
+
+import space.kscience.kmath.nd.MutableStructure2D
+import space.kscience.kmath.nd.ShapeND
+import space.kscience.kmath.nd.as2D
+import space.kscience.kmath.nd.component1
+import space.kscience.kmath.tensors.core.BroadcastDoubleTensorAlgebra
+import space.kscience.kmath.tensors.core.BroadcastDoubleTensorAlgebra.div
+import space.kscience.kmath.tensors.core.DoubleTensorAlgebra
+import space.kscience.kmath.tensors.core.DoubleTensorAlgebra.Companion.max
+import space.kscience.kmath.tensors.core.DoubleTensorAlgebra.Companion.plus
+import space.kscience.kmath.tensors.core.DoubleTensorAlgebra.Companion.pow
+import space.kscience.kmath.tensors.core.DoubleTensorAlgebra.Companion.times
+import space.kscience.kmath.tensors.core.asDoubleTensor
+
+public data class StartDataLm (
+ var lm_matx_y_dat: MutableStructure2D,
+ var example_number: Int,
+ var p_init: MutableStructure2D,
+ var t: MutableStructure2D,
+ var y_dat: MutableStructure2D,
+ var weight: Double,
+ var dp: MutableStructure2D,
+ var p_min: MutableStructure2D,
+ var p_max: MutableStructure2D,
+ var consts: MutableStructure2D,
+ var opts: DoubleArray
+)
+
+fun funcEasyForLm(t: MutableStructure2D, p: MutableStructure2D, exampleNumber: Int): MutableStructure2D {
+ val m = t.shape.component1()
+ var y_hat = DoubleTensorAlgebra.zeros(ShapeND(intArrayOf(m, 1)))
+
+ if (exampleNumber == 1) {
+ y_hat = DoubleTensorAlgebra.exp((t.times(-1.0 / p[1, 0]))).times(p[0, 0]) + t.times(p[2, 0]).times(
+ DoubleTensorAlgebra.exp((t.times(-1.0 / p[3, 0])))
+ )
+ }
+ else if (exampleNumber == 2) {
+ val mt = t.max()
+ y_hat = (t.times(1.0 / mt)).times(p[0, 0]) +
+ (t.times(1.0 / mt)).pow(2).times(p[1, 0]) +
+ (t.times(1.0 / mt)).pow(3).times(p[2, 0]) +
+ (t.times(1.0 / mt)).pow(4).times(p[3, 0])
+ }
+ else if (exampleNumber == 3) {
+ y_hat = DoubleTensorAlgebra.exp((t.times(-1.0 / p[1, 0])))
+ .times(p[0, 0]) + DoubleTensorAlgebra.sin((t.times(1.0 / p[3, 0]))).times(p[2, 0])
+ }
+
+ return y_hat.as2D()
+}
+
+fun funcMiddleForLm(t: MutableStructure2D, p: MutableStructure2D, exampleNumber: Int): MutableStructure2D {
+ val m = t.shape.component1()
+ var y_hat = DoubleTensorAlgebra.zeros(ShapeND(intArrayOf (m, 1)))
+
+ val mt = t.max()
+ for(i in 0 until p.shape.component1()){
+ y_hat += (t.times(1.0 / mt)).times(p[i, 0])
+ }
+
+ for(i in 0 until 5){
+ y_hat = funcEasyForLm(y_hat.as2D(), p, exampleNumber).asDoubleTensor()
+ }
+
+ return y_hat.as2D()
+}
+
+fun funcDifficultForLm(t: MutableStructure2D, p: MutableStructure2D, exampleNumber: Int): MutableStructure2D {
+ val m = t.shape.component1()
+ var y_hat = DoubleTensorAlgebra.zeros(ShapeND(intArrayOf (m, 1)))
+
+ val mt = t.max()
+ for(i in 0 until p.shape.component1()){
+ y_hat = y_hat.plus( (t.times(1.0 / mt)).times(p[i, 0]) )
+ }
+
+ for(i in 0 until 4){
+ y_hat = funcEasyForLm((y_hat.as2D() + t).as2D(), p, exampleNumber).asDoubleTensor()
+ }
+
+ return y_hat.as2D()
+}
+
+
+fun getStartDataForFuncDifficult(): StartDataLm {
+ val NData = 200
+ var t_example = DoubleTensorAlgebra.ones(ShapeND(intArrayOf(NData, 1))).as2D()
+ for (i in 0 until NData) {
+ t_example[i, 0] = t_example[i, 0] * (i + 1) - 104
+ }
+
+ val Nparams = 15
+ var p_example = DoubleTensorAlgebra.ones(ShapeND(intArrayOf(Nparams, 1))).as2D()
+ for (i in 0 until Nparams) {
+ p_example[i, 0] = p_example[i, 0] + i - 25
+ }
+
+ val exampleNumber = 1
+
+ var y_hat = funcDifficultForLm(t_example, p_example, exampleNumber)
+
+ var p_init = DoubleTensorAlgebra.zeros(ShapeND(intArrayOf(Nparams, 1))).as2D()
+ for (i in 0 until Nparams) {
+ p_init[i, 0] = (p_example[i, 0] + 0.9)
+ }
+
+ var t = t_example
+ val y_dat = y_hat
+ val weight = 1.0 / Nparams * 1.0 - 0.085
+ val dp = BroadcastDoubleTensorAlgebra.fromArray(
+ ShapeND(intArrayOf(1, 1)), DoubleArray(1) { -0.01 }
+ ).as2D()
+ var p_min = DoubleTensorAlgebra.ones(ShapeND(intArrayOf(Nparams, 1)))
+ p_min = p_min.div(1.0 / -50.0)
+ val p_max = DoubleTensorAlgebra.ones(ShapeND(intArrayOf(Nparams, 1)))
+ p_min = p_min.div(1.0 / 50.0)
+ val consts = BroadcastDoubleTensorAlgebra.fromArray(
+ ShapeND(intArrayOf(1, 1)), doubleArrayOf(0.0)
+ ).as2D()
+ val opts = doubleArrayOf(3.0, 10000.0, 1e-2, 1e-3, 1e-2, 1e-2, 1e-2, 11.0, 9.0, 1.0)
+
+ return StartDataLm(y_dat, 1, p_init, t, y_dat, weight, dp, p_min.as2D(), p_max.as2D(), consts, opts)
+}
+
+fun getStartDataForFuncMiddle(): StartDataLm {
+ val NData = 100
+ var t_example = DoubleTensorAlgebra.ones(ShapeND(intArrayOf(NData, 1))).as2D()
+ for (i in 0 until NData) {
+ t_example[i, 0] = t_example[i, 0] * (i + 1)
+ }
+
+ val Nparams = 20
+ var p_example = DoubleTensorAlgebra.ones(ShapeND(intArrayOf(Nparams, 1))).as2D()
+ for (i in 0 until Nparams) {
+ p_example[i, 0] = p_example[i, 0] + i - 25
+ }
+
+ val exampleNumber = 1
+
+ var y_hat = funcMiddleForLm(t_example, p_example, exampleNumber)
+
+ var p_init = DoubleTensorAlgebra.zeros(ShapeND(intArrayOf(Nparams, 1))).as2D()
+ for (i in 0 until Nparams) {
+ p_init[i, 0] = (p_example[i, 0] + 10.0)
+ }
+ var t = t_example
+ val y_dat = y_hat
+ val weight = 1.0
+ val dp = BroadcastDoubleTensorAlgebra.fromArray(
+ ShapeND(intArrayOf(1, 1)), DoubleArray(1) { -0.01 }
+ ).as2D()
+ var p_min = DoubleTensorAlgebra.ones(ShapeND(intArrayOf(Nparams, 1)))
+ p_min = p_min.div(1.0 / -50.0)
+ val p_max = DoubleTensorAlgebra.ones(ShapeND(intArrayOf(Nparams, 1)))
+ p_min = p_min.div(1.0 / 50.0)
+ val consts = BroadcastDoubleTensorAlgebra.fromArray(
+ ShapeND(intArrayOf(1, 1)), doubleArrayOf(0.0)
+ ).as2D()
+ val opts = doubleArrayOf(3.0, 10000.0, 1e-5, 1e-5, 1e-5, 1e-5, 1e-5, 11.0, 9.0, 1.0)
+
+ var example_number = 1
+
+ return StartDataLm(y_dat, example_number, p_init, t, y_dat, weight, dp, p_min.as2D(), p_max.as2D(), consts, opts)
+}
+
+fun getStartDataForFuncEasy(): StartDataLm {
+ val lm_matx_y_dat = doubleArrayOf(
+ 19.6594, 18.6096, 17.6792, 17.2747, 16.3065, 17.1458, 16.0467, 16.7023, 15.7809, 15.9807,
+ 14.7620, 15.1128, 16.0973, 15.1934, 15.8636, 15.4763, 15.6860, 15.1895, 15.3495, 16.6054,
+ 16.2247, 15.9854, 16.1421, 17.0960, 16.7769, 17.1997, 17.2767, 17.5882, 17.5378, 16.7894,
+ 17.7648, 18.2512, 18.1581, 16.7037, 17.8475, 17.9081, 18.3067, 17.9632, 18.2817, 19.1427,
+ 18.8130, 18.5658, 18.0056, 18.4607, 18.5918, 18.2544, 18.3731, 18.7511, 19.3181, 17.3066,
+ 17.9632, 19.0513, 18.7528, 18.2928, 18.5967, 17.8567, 17.7859, 18.4016, 18.9423, 18.4959,
+ 17.8000, 18.4251, 17.7829, 17.4645, 17.5221, 17.3517, 17.4637, 17.7563, 16.8471, 17.4558,
+ 17.7447, 17.1487, 17.3183, 16.8312, 17.7551, 17.0942, 15.6093, 16.4163, 15.3755, 16.6725,
+ 16.2332, 16.2316, 16.2236, 16.5361, 15.3721, 15.3347, 15.5815, 15.6319, 14.4538, 14.6044,
+ 14.7665, 13.3718, 15.0587, 13.8320, 14.7873, 13.6824, 14.2579, 14.2154, 13.5818, 13.8157
+ )
+
+ var example_number = 1
+ val p_init = BroadcastDoubleTensorAlgebra.fromArray(
+ ShapeND(intArrayOf(4, 1)), doubleArrayOf(5.0, 2.0, 0.2, 10.0)
+ ).as2D()
+
+ var t = DoubleTensorAlgebra.ones(ShapeND(intArrayOf(100, 1))).as2D()
+ for (i in 0 until 100) {
+ t[i, 0] = t[i, 0] * (i + 1)
+ }
+
+ val y_dat = BroadcastDoubleTensorAlgebra.fromArray(
+ ShapeND(intArrayOf(100, 1)), lm_matx_y_dat
+ ).as2D()
+
+ val weight = 4.0
+
+ val dp = BroadcastDoubleTensorAlgebra.fromArray(
+ ShapeND(intArrayOf(1, 1)), DoubleArray(1) { -0.01 }
+ ).as2D()
+
+ val p_min = BroadcastDoubleTensorAlgebra.fromArray(
+ ShapeND(intArrayOf(4, 1)), doubleArrayOf(-50.0, -20.0, -2.0, -100.0)
+ ).as2D()
+
+ val p_max = BroadcastDoubleTensorAlgebra.fromArray(
+ ShapeND(intArrayOf(4, 1)), doubleArrayOf(50.0, 20.0, 2.0, 100.0)
+ ).as2D()
+
+ val consts = BroadcastDoubleTensorAlgebra.fromArray(
+ ShapeND(intArrayOf(1, 1)), doubleArrayOf(0.0)
+ ).as2D()
+
+ val opts = doubleArrayOf(3.0, 100.0, 1e-3, 1e-3, 1e-1, 1e-1, 1e-2, 11.0, 9.0, 1.0)
+
+ return StartDataLm(y_dat, example_number, p_init, t, y_dat, weight, dp, p_min, p_max, consts, opts)
+}
\ No newline at end of file
diff --git a/examples/src/main/kotlin/space/kscience/kmath/tensors/OLSWithSVD.kt b/examples/src/main/kotlin/space/kscience/kmath/tensors/OLSWithSVD.kt
index b42602988..2c570ea34 100644
--- a/examples/src/main/kotlin/space/kscience/kmath/tensors/OLSWithSVD.kt
+++ b/examples/src/main/kotlin/space/kscience/kmath/tensors/OLSWithSVD.kt
@@ -1,14 +1,17 @@
/*
- * Copyright 2018-2021 KMath contributors.
+ * Copyright 2018-2022 KMath contributors.
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
*/
package space.kscience.kmath.tensors
+import space.kscience.kmath.nd.ShapeND
+import space.kscience.kmath.nd.contentEquals
import space.kscience.kmath.operations.invoke
import space.kscience.kmath.tensors.core.DoubleTensor
import space.kscience.kmath.tensors.core.DoubleTensorAlgebra
-
+import space.kscience.kmath.tensors.core.randomNormal
+import space.kscience.kmath.tensors.core.randomNormalLike
import kotlin.math.abs
// OLS estimator using SVD
@@ -21,10 +24,10 @@ fun main() {
DoubleTensorAlgebra {
// take coefficient vector from normal distribution
val alpha = randomNormal(
- intArrayOf(5),
+ ShapeND(5),
randSeed
) + fromArray(
- intArrayOf(5),
+ ShapeND(5),
doubleArrayOf(1.0, 2.5, 3.4, 5.0, 10.1)
)
@@ -32,27 +35,29 @@ fun main() {
// also take sample of size 20 from normal distribution for x
val x = randomNormal(
- intArrayOf(20, 5),
+ ShapeND(20, 5),
randSeed
)
// calculate y and add gaussian noise (N(0, 0.05))
val y = x dot alpha
- y += y.randomNormalLike(randSeed) * 0.05
+ y += randomNormalLike(y, randSeed) * 0.05
// now restore the coefficient vector with OSL estimator with SVD
- val (u, singValues, v) = x.svd()
+ val (u, singValues, v) = svd(x)
// we have to make sure the singular values of the matrix are not close to zero
println("Singular values:\n$singValues")
// inverse Sigma matrix can be restored from singular values with diagonalEmbedding function
- val sigma = diagonalEmbedding(singValues.map{ if (abs(it) < 1e-3) 0.0 else 1.0/it })
+ val sigma = diagonalEmbedding(singValues.map { if (abs(it) < 1e-3) 0.0 else 1.0 / it })
- val alphaOLS = v dot sigma dot u.transpose() dot y
- println("Estimated alpha:\n" +
- "$alphaOLS")
+ val alphaOLS = v dot sigma dot u.transposed() dot y
+ println(
+ "Estimated alpha:\n" +
+ "$alphaOLS"
+ )
// figure out MSE of approximation
fun mse(yTrue: DoubleTensor, yPred: DoubleTensor): Double {
@@ -60,7 +65,7 @@ fun main() {
require(yTrue.shape contentEquals yPred.shape)
val diff = yTrue - yPred
- return diff.dot(diff).sqrt().value()
+ return sqrt(diff.dot(diff)).value()
}
println("MSE: ${mse(alpha, alphaOLS)}")
diff --git a/examples/src/main/kotlin/space/kscience/kmath/tensors/PCA.kt b/examples/src/main/kotlin/space/kscience/kmath/tensors/PCA.kt
index aced0cf7d..fb774a39d 100644
--- a/examples/src/main/kotlin/space/kscience/kmath/tensors/PCA.kt
+++ b/examples/src/main/kotlin/space/kscience/kmath/tensors/PCA.kt
@@ -1,12 +1,12 @@
/*
- * Copyright 2018-2021 KMath contributors.
+ * Copyright 2018-2022 KMath contributors.
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
*/
package space.kscience.kmath.tensors
-import space.kscience.kmath.tensors.core.tensorAlgebra
-import space.kscience.kmath.tensors.core.withBroadcast
+import space.kscience.kmath.nd.ShapeND
+import space.kscience.kmath.tensors.core.*
// simple PCA
@@ -16,49 +16,49 @@ fun main(): Unit = Double.tensorAlgebra.withBroadcast { // work in context with
// assume x is range from 0 until 10
val x = fromArray(
- intArrayOf(10),
+ ShapeND(10),
DoubleArray(10) { it.toDouble() }
)
// take y dependent on x with noise
- val y = 2.0 * x + (3.0 + x.randomNormalLike(seed) * 1.5)
+ val y = 2.0 * x + (3.0 + randomNormalLike(x, seed) * 1.5)
println("x:\n$x")
println("y:\n$y")
// stack them into single dataset
- val dataset = stack(listOf(x, y)).transpose()
+ val dataset = stack(listOf(x, y)).transposed()
// normalize both x and y
- val xMean = x.mean()
- val yMean = y.mean()
+ val xMean = mean(x)
+ val yMean = mean(y)
- val xStd = x.std()
- val yStd = y.std()
+ val xStd = std(x)
+ val yStd = std(y)
- val xScaled = (x - xMean) / xStd
- val yScaled = (y - yMean) / yStd
+ val xScaled: DoubleTensor = (x - xMean) / xStd
+ val yScaled: DoubleTensor = (y - yMean) / yStd
// save means ans standard deviations for further recovery
val mean = fromArray(
- intArrayOf(2),
+ ShapeND(2),
doubleArrayOf(xMean, yMean)
)
println("Means:\n$mean")
val std = fromArray(
- intArrayOf(2),
+ ShapeND(2),
doubleArrayOf(xStd, yStd)
)
println("Standard deviations:\n$std")
// calculate the covariance matrix of scaled x and y
- val covMatrix = cov(listOf(xScaled, yScaled))
+ val covMatrix = covariance(listOf(xScaled.asDoubleTensor1D(), yScaled.asDoubleTensor1D()))
println("Covariance matrix:\n$covMatrix")
// and find out eigenvector of it
- val (_, evecs) = covMatrix.symEig()
- val v = evecs[0]
+ val (_, evecs) = symEig(covMatrix)
+ val v = evecs.getTensor(0)
println("Eigenvector:\n$v")
// reduce dimension of dataset
@@ -68,7 +68,7 @@ fun main(): Unit = Double.tensorAlgebra.withBroadcast { // work in context with
// we can restore original data from reduced data;
// for example, find 7th element of dataset.
val n = 7
- val restored = (datasetReduced[n] dot v.view(intArrayOf(1, 2))) * std + mean
- println("Original value:\n${dataset[n]}")
+ val restored = (datasetReduced.getTensor(n) dot v.view(ShapeND(1, 2))) * std + mean
+ println("Original value:\n${dataset.getTensor(n)}")
println("Restored value:\n$restored")
}
diff --git a/examples/src/main/kotlin/space/kscience/kmath/tensors/dataSetNormalization.kt b/examples/src/main/kotlin/space/kscience/kmath/tensors/dataSetNormalization.kt
index a436ae1c3..45c2ff120 100644
--- a/examples/src/main/kotlin/space/kscience/kmath/tensors/dataSetNormalization.kt
+++ b/examples/src/main/kotlin/space/kscience/kmath/tensors/dataSetNormalization.kt
@@ -1,10 +1,12 @@
/*
- * Copyright 2018-2021 KMath contributors.
+ * Copyright 2018-2022 KMath contributors.
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
*/
package space.kscience.kmath.tensors
+import space.kscience.kmath.nd.ShapeND
+import space.kscience.kmath.tensors.core.randomNormal
import space.kscience.kmath.tensors.core.tensorAlgebra
import space.kscience.kmath.tensors.core.withBroadcast
@@ -13,17 +15,17 @@ import space.kscience.kmath.tensors.core.withBroadcast
fun main() = Double.tensorAlgebra.withBroadcast { // work in context with broadcast methods
// take dataset of 5-element vectors from normal distribution
- val dataset = randomNormal(intArrayOf(100, 5)) * 1.5 // all elements from N(0, 1.5)
+ val dataset = randomNormal(ShapeND(100, 5)) * 1.5 // all elements from N(0, 1.5)
dataset += fromArray(
- intArrayOf(5),
+ ShapeND(5),
doubleArrayOf(0.0, 1.0, 1.5, 3.0, 5.0) // row means
)
// find out mean and standard deviation of each column
- val mean = dataset.mean(0, false)
- val std = dataset.std(0, false)
+ val mean = mean(dataset, 0, false)
+ val std = std(dataset, 0, false)
println("Mean:\n$mean")
println("Standard deviation:\n$std")
@@ -35,8 +37,8 @@ fun main() = Double.tensorAlgebra.withBroadcast { // work in context with broad
// now we can scale dataset with mean normalization
val datasetScaled = (dataset - mean) / std
- // find out mean and std of scaled dataset
+ // find out mean and standardDiviation of scaled dataset
- println("Mean of scaled:\n${datasetScaled.mean(0, false)}")
- println("Mean of scaled:\n${datasetScaled.std(0, false)}")
+ println("Mean of scaled:\n${mean(datasetScaled, 0, false)}")
+ println("Mean of scaled:\n${std(datasetScaled, 0, false)}")
}
\ No newline at end of file
diff --git a/examples/src/main/kotlin/space/kscience/kmath/tensors/linearSystemSolvingWithLUP.kt b/examples/src/main/kotlin/space/kscience/kmath/tensors/linearSystemSolvingWithLUP.kt
index f465fc424..238696cf9 100644
--- a/examples/src/main/kotlin/space/kscience/kmath/tensors/linearSystemSolvingWithLUP.kt
+++ b/examples/src/main/kotlin/space/kscience/kmath/tensors/linearSystemSolvingWithLUP.kt
@@ -1,10 +1,11 @@
/*
- * Copyright 2018-2021 KMath contributors.
+ * Copyright 2018-2022 KMath contributors.
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
*/
package space.kscience.kmath.tensors
+import space.kscience.kmath.nd.ShapeND
import space.kscience.kmath.tensors.core.DoubleTensor
import space.kscience.kmath.tensors.core.tensorAlgebra
import space.kscience.kmath.tensors.core.withBroadcast
@@ -15,13 +16,13 @@ fun main() = Double.tensorAlgebra.withBroadcast {// work in context with linear
// set true value of x
val trueX = fromArray(
- intArrayOf(4),
+ ShapeND(4),
doubleArrayOf(-2.0, 1.5, 6.8, -2.4)
)
// and A matrix
val a = fromArray(
- intArrayOf(4, 4),
+ ShapeND(4, 4),
doubleArrayOf(
0.5, 10.5, 4.5, 1.0,
8.5, 0.9, 12.8, 0.1,
@@ -40,7 +41,7 @@ fun main() = Double.tensorAlgebra.withBroadcast {// work in context with linear
// solve `Ax = b` system using LUP decomposition
// get P, L, U such that PA = LU
- val (p, l, u) = a.lu()
+ val (p, l, u) = lu(a)
// check P is permutation matrix
println("P:\n$p")
@@ -64,9 +65,9 @@ fun main() = Double.tensorAlgebra.withBroadcast {// work in context with linear
// this function returns solution x of a system lx = b, l should be lower triangular
fun solveLT(l: DoubleTensor, b: DoubleTensor): DoubleTensor {
val n = l.shape[0]
- val x = zeros(intArrayOf(n))
+ val x = zeros(ShapeND(n))
for (i in 0 until n) {
- x[intArrayOf(i)] = (b[intArrayOf(i)] - l[i].dot(x).value()) / l[intArrayOf(i, i)]
+ x[intArrayOf(i)] = (b[intArrayOf(i)] - l.getTensor(i).dot(x).value()) / l[intArrayOf(i, i)]
}
return x
}
@@ -75,7 +76,7 @@ fun main() = Double.tensorAlgebra.withBroadcast {// work in context with linear
// solveLT(l, b) function can be easily adapted for upper triangular matrix by the permutation matrix revMat
// create it by placing ones on side diagonal
- val revMat = u.zeroesLike()
+ val revMat = zeroesLike(u)
val n = revMat.shape[0]
for (i in 0 until n) {
revMat[intArrayOf(i, n - 1 - i)] = 1.0
diff --git a/examples/src/main/kotlin/space/kscience/kmath/tensors/multik.kt b/examples/src/main/kotlin/space/kscience/kmath/tensors/multik.kt
index f2d1f0b41..67e0631e7 100644
--- a/examples/src/main/kotlin/space/kscience/kmath/tensors/multik.kt
+++ b/examples/src/main/kotlin/space/kscience/kmath/tensors/multik.kt
@@ -1,5 +1,5 @@
/*
- * Copyright 2018-2021 KMath contributors.
+ * Copyright 2018-2022 KMath contributors.
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
*/
@@ -7,11 +7,14 @@ package space.kscience.kmath.tensors
import org.jetbrains.kotlinx.multik.api.Multik
import org.jetbrains.kotlinx.multik.api.ndarray
-import space.kscience.kmath.multik.multikAlgebra
+import org.jetbrains.kotlinx.multik.default.DefaultEngine
+import space.kscience.kmath.multik.MultikDoubleAlgebra
import space.kscience.kmath.nd.one
-import space.kscience.kmath.operations.DoubleField
-fun main(): Unit = with(DoubleField.multikAlgebra) {
+
+val multikAlgebra = MultikDoubleAlgebra(DefaultEngine())
+
+fun main(): Unit = with(multikAlgebra) {
val a = Multik.ndarray(intArrayOf(1, 2, 3)).asType().wrap()
val b = Multik.ndarray(doubleArrayOf(1.0, 2.0, 3.0)).wrap()
one(a.shape) - a + b * 3.0
diff --git a/examples/src/main/kotlin/space/kscience/kmath/tensors/neuralNetwork.kt b/examples/src/main/kotlin/space/kscience/kmath/tensors/neuralNetwork.kt
index 5c41ab0f1..8fd5ae5ad 100644
--- a/examples/src/main/kotlin/space/kscience/kmath/tensors/neuralNetwork.kt
+++ b/examples/src/main/kotlin/space/kscience/kmath/tensors/neuralNetwork.kt
@@ -1,15 +1,15 @@
/*
- * Copyright 2018-2021 KMath contributors.
+ * Copyright 2018-2022 KMath contributors.
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
*/
package space.kscience.kmath.tensors
+import space.kscience.kmath.nd.ShapeND
+import space.kscience.kmath.nd.contentEquals
+import space.kscience.kmath.operations.asIterable
import space.kscience.kmath.operations.invoke
-import space.kscience.kmath.tensors.core.BroadcastDoubleTensorAlgebra
-import space.kscience.kmath.tensors.core.DoubleTensor
-import space.kscience.kmath.tensors.core.DoubleTensorAlgebra
-import space.kscience.kmath.tensors.core.copyArray
+import space.kscience.kmath.tensors.core.*
import kotlin.math.sqrt
const val seed = 100500L
@@ -48,7 +48,7 @@ fun reluDer(x: DoubleTensor): DoubleTensor = DoubleTensorAlgebra {
class ReLU : Activation(::relu, ::reluDer)
fun sigmoid(x: DoubleTensor): DoubleTensor = DoubleTensorAlgebra {
- 1.0 / (1.0 + (-x).exp())
+ 1.0 / (1.0 + exp((-x)))
}
fun sigmoidDer(x: DoubleTensor): DoubleTensor = DoubleTensorAlgebra {
@@ -67,22 +67,22 @@ class Dense(
private val weights: DoubleTensor = DoubleTensorAlgebra {
randomNormal(
- intArrayOf(inputUnits, outputUnits),
+ ShapeND(inputUnits, outputUnits),
seed
) * sqrt(2.0 / (inputUnits + outputUnits))
}
- private val bias: DoubleTensor = DoubleTensorAlgebra { zeros(intArrayOf(outputUnits)) }
+ private val bias: DoubleTensor = DoubleTensorAlgebra { zeros(ShapeND(outputUnits)) }
override fun forward(input: DoubleTensor): DoubleTensor = BroadcastDoubleTensorAlgebra {
(input dot weights) + bias
}
override fun backward(input: DoubleTensor, outputError: DoubleTensor): DoubleTensor = DoubleTensorAlgebra {
- val gradInput = outputError dot weights.transpose()
+ val gradInput = outputError dot weights.transposed()
- val gradW = input.transpose() dot outputError
- val gradBias = outputError.mean(dim = 0, keepDim = false) * input.shape[0].toDouble()
+ val gradW = input.transposed() dot outputError
+ val gradBias = mean(structureND = outputError, dim = 0, keepDim = false) * input.shape[0].toDouble()
weights -= learningRate * gradW
bias -= learningRate * gradBias
@@ -106,17 +106,16 @@ fun accuracy(yPred: DoubleTensor, yTrue: DoubleTensor): Double {
}
// neural network class
-@OptIn(ExperimentalStdlibApi::class)
class NeuralNetwork(private val layers: List) {
private fun softMaxLoss(yPred: DoubleTensor, yTrue: DoubleTensor): DoubleTensor = BroadcastDoubleTensorAlgebra {
- val onesForAnswers = yPred.zeroesLike()
- yTrue.copyArray().forEachIndexed { index, labelDouble ->
+ val onesForAnswers = zeroesLike(yPred)
+ yTrue.source.asIterable().forEachIndexed { index, labelDouble ->
val label = labelDouble.toInt()
onesForAnswers[intArrayOf(index, label)] = 1.0
}
- val softmaxValue = yPred.exp() / yPred.exp().sum(dim = 1, keepDim = true)
+ val softmaxValue = exp(yPred) / exp(yPred).sum(dim = 1, keepDim = true)
(-onesForAnswers + softmaxValue) / (yPred.shape[0].toDouble())
}
@@ -163,7 +162,7 @@ class NeuralNetwork(private val layers: List) {
for ((xBatch, yBatch) in iterBatch(xTrain, yTrain)) {
train(xBatch, yBatch)
}
- println("Accuracy:${accuracy(yTrain, predict(xTrain).argMax(1, true).asDouble())}")
+ println("Accuracy:${accuracy(yTrain, predict(xTrain).argMax(1, true).toDoubleTensor())}")
}
}
@@ -174,7 +173,6 @@ class NeuralNetwork(private val layers: List) {
}
-@OptIn(ExperimentalStdlibApi::class)
fun main() = BroadcastDoubleTensorAlgebra {
val features = 5
val sampleSize = 250
@@ -182,19 +180,19 @@ fun main() = BroadcastDoubleTensorAlgebra {
//val testSize = sampleSize - trainSize
// take sample of features from normal distribution
- val x = randomNormal(intArrayOf(sampleSize, features), seed) * 2.5
+ val x = randomNormal(ShapeND(sampleSize, features), seed) * 2.5
x += fromArray(
- intArrayOf(5),
+ ShapeND(5),
doubleArrayOf(0.0, -1.0, -2.5, -3.0, 5.5) // row means
)
// define class like '1' if the sum of features > 0 and '0' otherwise
val y = fromArray(
- intArrayOf(sampleSize, 1),
+ ShapeND(sampleSize, 1),
DoubleArray(sampleSize) { i ->
- if (x[i].sum() > 0.0) {
+ if (x.getTensor(i).sum() > 0.0) {
1.0
} else {
0.0
@@ -230,7 +228,7 @@ fun main() = BroadcastDoubleTensorAlgebra {
val prediction = model.predict(xTest)
// process raw prediction via argMax
- val predictionLabels = prediction.argMax(1, true).asDouble()
+ val predictionLabels = prediction.argMax(1, true).toDoubleTensor()
// find out accuracy
val acc = accuracy(yTest, predictionLabels)
diff --git a/gradle.properties b/gradle.properties
index 6b45ee49f..fee75d428 100644
--- a/gradle.properties
+++ b/gradle.properties
@@ -3,13 +3,14 @@
# Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
#
kotlin.code.style=official
-kotlin.jupyter.add.scanner=false
kotlin.mpp.stability.nowarn=true
kotlin.native.ignoreDisabledTargets=true
-//kotlin.incremental.js.ir=true
org.gradle.configureondemand=true
-org.gradle.parallel=true
org.gradle.jvmargs=-Xmx4096m
-toolsVersion=0.11.8-kotlin-1.7.10
+toolsVersion=0.14.8-kotlin-1.8.20
+
+
+org.gradle.parallel=true
+org.gradle.workers.max=4
diff --git a/gradle/wrapper/gradle-wrapper.properties b/gradle/wrapper/gradle-wrapper.properties
index aa991fcea..fae08049a 100644
--- a/gradle/wrapper/gradle-wrapper.properties
+++ b/gradle/wrapper/gradle-wrapper.properties
@@ -1,5 +1,5 @@
distributionBase=GRADLE_USER_HOME
distributionPath=wrapper/dists
-distributionUrl=https\://services.gradle.org/distributions/gradle-7.4.2-bin.zip
+distributionUrl=https\://services.gradle.org/distributions/gradle-8.1.1-bin.zip
zipStoreBase=GRADLE_USER_HOME
zipStorePath=wrapper/dists
diff --git a/kmath-ast/README.md b/kmath-ast/README.md
index c6da64982..d85a18e1c 100644
--- a/kmath-ast/README.md
+++ b/kmath-ast/README.md
@@ -10,7 +10,7 @@ Extensions to MST API: transformations, dynamic compilation and visualization.
## Artifact:
-The Maven coordinates of this project are `space.kscience:kmath-ast:0.3.1-dev-1`.
+The Maven coordinates of this project are `space.kscience:kmath-ast:0.4.0-dev-1`.
**Gradle Groovy:**
```groovy
@@ -20,7 +20,7 @@ repositories {
}
dependencies {
- implementation 'space.kscience:kmath-ast:0.3.1-dev-1'
+ implementation 'space.kscience:kmath-ast:0.4.0-dev-1'
}
```
**Gradle Kotlin DSL:**
@@ -31,7 +31,7 @@ repositories {
}
dependencies {
- implementation("space.kscience:kmath-ast:0.3.1-dev-1")
+ implementation("space.kscience:kmath-ast:0.4.0-dev-1")
}
```
diff --git a/kmath-ast/build.gradle.kts b/kmath-ast/build.gradle.kts
index f49c2767a..7cdb745f0 100644
--- a/kmath-ast/build.gradle.kts
+++ b/kmath-ast/build.gradle.kts
@@ -1,67 +1,63 @@
plugins {
- kotlin("multiplatform")
- id("ru.mipt.npm.gradle.common")
+ id("space.kscience.gradle.mpp")
}
-kotlin.js {
- nodejs {
- testTask {
- useMocha().timeout = "0"
- }
- }
+kscience{
+ jvm()
+ js()
+ native()
- browser {
- testTask {
- useMocha().timeout = "0"
- }
+ dependencies {
+ api(projects.kmathCore)
+ api("com.github.h0tk3y.betterParse:better-parse:0.4.4")
}
-}
-kotlin.sourceSets {
- filter { it.name.contains("test", true) }
- .map(org.jetbrains.kotlin.gradle.plugin.KotlinSourceSet::languageSettings)
- .forEach { it.optIn("space.kscience.kmath.misc.UnstableKMathAPI") }
+ testDependencies {
+ implementation(projects.kmathComplex)
+ }
- commonMain {
- dependencies {
- api("com.github.h0tk3y.betterParse:better-parse:0.4.4")
- api(project(":kmath-core"))
- }
+ dependencies(jsMain) {
+ implementation(npm("astring", "1.7.5"))
+ implementation(npm("binaryen", "101.0.0"))
+ implementation(npm("js-base64", "3.6.1"))
}
- commonTest {
- dependencies {
- implementation(project(":kmath-complex"))
- }
+ dependencies(jvmMain){
+ implementation("org.ow2.asm:asm-commons:9.2")
}
- jsMain {
- dependencies {
- implementation(npm("astring", "1.7.5"))
- implementation(npm("binaryen", "101.0.0"))
- implementation(npm("js-base64", "3.6.1"))
+}
+
+kotlin {
+ js {
+ nodejs {
+ testTask {
+ useMocha().timeout = "0"
+ }
}
- }
- jvmMain {
- dependencies {
- implementation("org.ow2.asm:asm-commons:9.2")
+ browser {
+ testTask {
+ useMocha().timeout = "0"
+ }
}
}
-}
-//Workaround for https://github.com/Kotlin/dokka/issues/1455
-tasks.dokkaHtml {
- dependsOn(tasks.build)
+ sourceSets {
+ filter { it.name.contains("test", true) }
+ .map(org.jetbrains.kotlin.gradle.plugin.KotlinSourceSet::languageSettings)
+ .forEach { it.optIn("space.kscience.kmath.UnstableKMathAPI") }
+ }
}
-if (System.getProperty("space.kscience.kmath.ast.dump.generated.classes") == "1")
- tasks.jvmTest {
+if (System.getProperty("space.kscience.kmath.ast.dump.generated.classes") == "1") {
+ tasks.withType {
jvmArgs("-Dspace.kscience.kmath.ast.dump.generated.classes=1")
}
+}
readme {
- maturity = ru.mipt.npm.gradle.Maturity.EXPERIMENTAL
+ maturity = space.kscience.gradle.Maturity.EXPERIMENTAL
propertyByTemplate("artifact", rootProject.file("docs/templates/ARTIFACT-TEMPLATE.md"))
feature(
diff --git a/kmath-ast/src/commonMain/kotlin/space/kscience/kmath/ast/TypedMst.kt b/kmath-ast/src/commonMain/kotlin/space/kscience/kmath/ast/TypedMst.kt
index 8a8b8797d..e82f7a3ab 100644
--- a/kmath-ast/src/commonMain/kotlin/space/kscience/kmath/ast/TypedMst.kt
+++ b/kmath-ast/src/commonMain/kotlin/space/kscience/kmath/ast/TypedMst.kt
@@ -1,5 +1,5 @@
/*
- * Copyright 2018-2021 KMath contributors.
+ * Copyright 2018-2022 KMath contributors.
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
*/
@@ -7,7 +7,6 @@ package space.kscience.kmath.ast
import space.kscience.kmath.expressions.Expression
import space.kscience.kmath.expressions.Symbol
-import space.kscience.kmath.misc.UnstableKMathAPI
import space.kscience.kmath.operations.Algebra
import space.kscience.kmath.operations.NumericAlgebra
@@ -16,7 +15,6 @@ import space.kscience.kmath.operations.NumericAlgebra
*
* @param T the type.
*/
-@UnstableKMathAPI
public sealed interface TypedMst {
/**
* A node containing a unary operation.
@@ -133,7 +131,6 @@ public sealed interface TypedMst {
/**
* Interprets the [TypedMst] node with this [Algebra] and [arguments].
*/
-@UnstableKMathAPI
public fun TypedMst.interpret(algebra: Algebra, arguments: Map): T = when (this) {
is TypedMst.Unary -> algebra.unaryOperation(operation, interpret(algebra, arguments))
@@ -158,7 +155,6 @@ public fun TypedMst.interpret(algebra: Algebra, arguments: Map TypedMst.interpret(algebra: Algebra, vararg arguments: Pair): T = interpret(
algebra,
when (arguments.size) {
@@ -171,7 +167,6 @@ public fun TypedMst.interpret(algebra: Algebra, vararg arguments: Pair
/**
* Interpret this [TypedMst] node as expression.
*/
-@UnstableKMathAPI
public fun TypedMst.toExpression(algebra: Algebra): Expression = Expression { arguments ->
interpret(algebra, arguments)
}
diff --git a/kmath-ast/src/commonMain/kotlin/space/kscience/kmath/ast/evaluateConstants.kt b/kmath-ast/src/commonMain/kotlin/space/kscience/kmath/ast/evaluateConstants.kt
index 71fb154c9..8fc5a6aaf 100644
--- a/kmath-ast/src/commonMain/kotlin/space/kscience/kmath/ast/evaluateConstants.kt
+++ b/kmath-ast/src/commonMain/kotlin/space/kscience/kmath/ast/evaluateConstants.kt
@@ -1,5 +1,5 @@
/*
- * Copyright 2018-2021 KMath contributors.
+ * Copyright 2018-2022 KMath contributors.
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
*/
@@ -7,7 +7,6 @@ package space.kscience.kmath.ast
import space.kscience.kmath.expressions.MST
import space.kscience.kmath.expressions.Symbol
-import space.kscience.kmath.misc.UnstableKMathAPI
import space.kscience.kmath.operations.Algebra
import space.kscience.kmath.operations.NumericAlgebra
import space.kscience.kmath.operations.bindSymbolOrNull
@@ -15,7 +14,6 @@ import space.kscience.kmath.operations.bindSymbolOrNull
/**
* Evaluates constants in given [MST] for given [algebra] at the same time with converting to [TypedMst].
*/
-@UnstableKMathAPI
public fun MST.evaluateConstants(algebra: Algebra): TypedMst = when (this) {
is MST.Numeric -> TypedMst.Constant(
(algebra as? NumericAlgebra)?.number(value) ?: error("Numeric nodes are not supported by $algebra"),
diff --git a/kmath-ast/src/commonMain/kotlin/space/kscience/kmath/ast/parser.kt b/kmath-ast/src/commonMain/kotlin/space/kscience/kmath/ast/parser.kt
index 012a6e65f..2c9a2a9ad 100644
--- a/kmath-ast/src/commonMain/kotlin/space/kscience/kmath/ast/parser.kt
+++ b/kmath-ast/src/commonMain/kotlin/space/kscience/kmath/ast/parser.kt
@@ -1,5 +1,5 @@
/*
- * Copyright 2018-2021 KMath contributors.
+ * Copyright 2018-2022 KMath contributors.
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
*/
diff --git a/kmath-ast/src/commonMain/kotlin/space/kscience/kmath/ast/rendering/LatexSyntaxRenderer.kt b/kmath-ast/src/commonMain/kotlin/space/kscience/kmath/ast/rendering/LatexSyntaxRenderer.kt
index 2df3d3cc7..5a338afed 100644
--- a/kmath-ast/src/commonMain/kotlin/space/kscience/kmath/ast/rendering/LatexSyntaxRenderer.kt
+++ b/kmath-ast/src/commonMain/kotlin/space/kscience/kmath/ast/rendering/LatexSyntaxRenderer.kt
@@ -1,12 +1,10 @@
/*
- * Copyright 2018-2021 KMath contributors.
+ * Copyright 2018-2022 KMath contributors.
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
*/
package space.kscience.kmath.ast.rendering
-import space.kscience.kmath.misc.UnstableKMathAPI
-
/**
* [SyntaxRenderer] implementation for LaTeX.
*
@@ -25,7 +23,6 @@ import space.kscience.kmath.misc.UnstableKMathAPI
*
* @author Iaroslav Postovalov
*/
-@UnstableKMathAPI
public object LatexSyntaxRenderer : SyntaxRenderer {
override fun render(node: MathSyntax, output: Appendable): Unit = output.run {
fun render(syntax: MathSyntax) = render(syntax, output)
diff --git a/kmath-ast/src/commonMain/kotlin/space/kscience/kmath/ast/rendering/MathMLSyntaxRenderer.kt b/kmath-ast/src/commonMain/kotlin/space/kscience/kmath/ast/rendering/MathMLSyntaxRenderer.kt
index 8b5819b84..bfd9aeef9 100644
--- a/kmath-ast/src/commonMain/kotlin/space/kscience/kmath/ast/rendering/MathMLSyntaxRenderer.kt
+++ b/kmath-ast/src/commonMain/kotlin/space/kscience/kmath/ast/rendering/MathMLSyntaxRenderer.kt
@@ -1,12 +1,10 @@
/*
- * Copyright 2018-2021 KMath contributors.
+ * Copyright 2018-2022 KMath contributors.
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
*/
package space.kscience.kmath.ast.rendering
-import space.kscience.kmath.misc.UnstableKMathAPI
-
/**
* [SyntaxRenderer] implementation for MathML.
*
@@ -14,7 +12,6 @@ import space.kscience.kmath.misc.UnstableKMathAPI
*
* @author Iaroslav Postovalov
*/
-@UnstableKMathAPI
public object MathMLSyntaxRenderer : SyntaxRenderer {
override fun render(node: MathSyntax, output: Appendable) {
output.append("