diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml
index f39e12a12..9a9f04621 100644
--- a/.github/workflows/build.yml
+++ b/.github/workflows/build.yml
@@ -13,9 +13,11 @@ jobs:
- name: Checkout the repo
uses: actions/checkout@v2
- name: Set up JDK 11
- uses: actions/setup-java@v1
+ uses: DeLaGuardo/setup-graalvm@4.0
with:
- java-version: 11
+ graalvm: 21.1.0
+ java: java11
+ arch: amd64
- name: Add msys to path
if: matrix.os == 'windows-latest'
run: SETX PATH "%PATH%;C:\msys64\mingw64\bin"
diff --git a/.github/workflows/pages.yml b/.github/workflows/pages.yml
index 82b0fb303..86fdac6a6 100644
--- a/.github/workflows/pages.yml
+++ b/.github/workflows/pages.yml
@@ -15,24 +15,8 @@ jobs:
uses: actions/setup-java@v1
with:
java-version: 11
- - name: Cache gradle
- uses: actions/cache@v2
- with:
- path: ~/.gradle/caches
- key: ubuntu-20.04-gradle-${{ hashFiles('*.gradle.kts') }}
- restore-keys: |
- ubuntu-20.04-gradle-
- - name: Cache konan
- uses: actions/cache@v2
- with:
- path: ~/.konan
- key: ${{ runner.os }}-gradle-${{ hashFiles('*.gradle.kts') }}
- restore-keys: |
- ${{ runner.os }}-gradle-
- name: Build
- run: |
- ./gradlew dokkaHtmlMultiModule --no-daemon --no-parallel --stacktrace
- mv build/dokka/htmlMultiModule/-modules.html build/dokka/htmlMultiModule/index.html
+ run: ./gradlew dokkaHtmlMultiModule --no-daemon --no-parallel --stacktrace
- name: Deploy to GitHub Pages
uses: JamesIves/github-pages-deploy-action@4.1.0
with:
diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml
index ca374574e..c5c110e89 100644
--- a/.github/workflows/publish.yml
+++ b/.github/workflows/publish.yml
@@ -18,9 +18,11 @@ jobs:
- name: Checkout the repo
uses: actions/checkout@v2
- name: Set up JDK 11
- uses: actions/setup-java@v1
+ uses: DeLaGuardo/setup-graalvm@4.0
with:
- java-version: 11
+ graalvm: 21.1.0
+ java: java11
+ arch: amd64
- name: Add msys to path
if: matrix.os == 'windows-latest'
run: SETX PATH "%PATH%;C:\msys64\mingw64\bin"
diff --git a/CHANGELOG.md b/CHANGELOG.md
index c41eda374..9c6b14b95 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -10,20 +10,31 @@
- Blocking chains and Statistics
- Multiplatform integration
- Integration for any Field element
-- Extendend operations for ND4J fields
+- Extended operations for ND4J fields
+- Jupyter Notebook integration module (kmath-jupyter)
+- `@PerformancePitfall` annotation to mark possibly slow API
+- BigInt operation performance improvement and fixes by @zhelenskiy (#328)
### Changed
- Exponential operations merged with hyperbolic functions
- Space is replaced by Group. Space is reserved for vector spaces.
- VectorSpace is now a vector space
- Buffer factories for primitives moved to MutableBuffer.Companion
-- NDStructure and NDAlgebra to StructureND and AlgebraND respectively
-- Real -> Double
+- Rename `NDStructure` and `NDAlgebra` to `StructureND` and `AlgebraND` respectively
+- `Real` -> `Double`
- DataSets are moved from functions to core
- Redesign advanced Chain API
-- Redesign MST. Remove MSTExpression.
-- Move MST to core
+- Redesign `MST`. Remove `MstExpression`.
+- Move `MST` to core
- Separated benchmarks and examples
+- Rewrite `kmath-ejml` without `ejml-simple` artifact, support sparse matrices
+- Promote stability of kmath-ast and kmath-kotlingrad to EXPERIMENTAL.
+- ColumnarData returns nullable column
+- `MST` is made sealed interface
+- Replace `MST.Symbolic` by `Symbol`, `Symbol` now implements MST
+- Remove Any restriction on polynomials
+- Add `out` variance to type parameters of `StructureND` and its implementations where possible
+- Rename `DifferentiableMstExpression` to `KotlingradExpression`
### Deprecated
@@ -33,6 +44,7 @@
- `contentEquals` from Buffer. It moved to the companion.
- MSTExpression
- Expression algebra builders
+- Complex and Quaternion no longer are elements.
### Fixed
- Ring inherits RingOperations, not GroupOperations
diff --git a/README.md b/README.md
index 773eb6398..9117582ac 100644
--- a/README.md
+++ b/README.md
@@ -40,7 +40,7 @@ KMath is a modular library. Different modules provide different features with di
* **PROTOTYPE**. On this level there are no compatibility guarantees. All methods and classes form those modules could break any moment. You can still use it, but be sure to fix the specific version.
* **EXPERIMENTAL**. The general API is decided, but some changes could be made. Volatile API is marked with `@UnstableKmathAPI` or other stability warning annotations.
-* **DEVELOPMENT**. API breaking genrally follows semantic versioning ideology. There could be changes in minor versions, but not in patch versions. API is protected with [binary-compatibility-validator](https://github.com/Kotlin/binary-compatibility-validator) tool.
+* **DEVELOPMENT**. API breaking generally follows semantic versioning ideology. There could be changes in minor versions, but not in patch versions. API is protected with [binary-compatibility-validator](https://github.com/Kotlin/binary-compatibility-validator) tool.
* **STABLE**. The API stabilized. Breaking changes are allowed only in major releases.
@@ -91,7 +91,7 @@ KMath is a modular library. Different modules provide different features with di
* ### [kmath-ast](kmath-ast)
>
>
-> **Maturity**: PROTOTYPE
+> **Maturity**: EXPERIMENTAL
>
> **Features:**
> - [expression-language](kmath-ast/src/commonMain/kotlin/space/kscience/kmath/ast/parser.kt) : Expression language and its parser
@@ -154,9 +154,9 @@ performance calculations to code generation.
> **Maturity**: PROTOTYPE
>
> **Features:**
-> - [ejml-vector](kmath-ejml/src/main/kotlin/space/kscience/kmath/ejml/EjmlVector.kt) : The Point implementation using SimpleMatrix.
-> - [ejml-matrix](kmath-ejml/src/main/kotlin/space/kscience/kmath/ejml/EjmlMatrix.kt) : The Matrix implementation using SimpleMatrix.
-> - [ejml-linear-space](kmath-ejml/src/main/kotlin/space/kscience/kmath/ejml/EjmlLinearSpace.kt) : The LinearSpace implementation using SimpleMatrix.
+> - [ejml-vector](kmath-ejml/src/main/kotlin/space/kscience/kmath/ejml/EjmlVector.kt) : Point implementations.
+> - [ejml-matrix](kmath-ejml/src/main/kotlin/space/kscience/kmath/ejml/EjmlMatrix.kt) : Matrix implementation.
+> - [ejml-linear-space](kmath-ejml/src/main/kotlin/space/kscience/kmath/ejml/EjmlLinearSpace.kt) : LinearSpace implementations.
@@ -200,12 +200,23 @@ One can still use generic algebras though.
> **Maturity**: PROTOTYPE
-* ### [kmath-kotlingrad](kmath-kotlingrad)
+* ### [kmath-jupyter](kmath-jupyter)
>
>
> **Maturity**: PROTOTYPE
+* ### [kmath-kotlingrad](kmath-kotlingrad)
+> Functions, integration and interpolation
+>
+> **Maturity**: EXPERIMENTAL
+>
+> **Features:**
+> - [differentiable-mst-expression](kmath-kotlingrad/src/main/kotlin/space/kscience/kmath/kotlingrad/DifferentiableMstExpression.kt) : MST based DifferentiableExpression.
+> - [differentiable-mst-expression](kmath-kotlingrad/src/main/kotlin/space/kscience/kmath/kotlingrad/DifferentiableMstExpression.kt) : Conversions between Kotlin∇'s SFun and MST
+
+
+
* ### [kmath-memory](kmath-memory)
> An API and basic implementation for arranging objects in a continous memory block.
>
@@ -230,6 +241,18 @@ One can still use generic algebras though.
> **Maturity**: EXPERIMENTAL
+* ### [kmath-tensors](kmath-tensors)
+>
+>
+> **Maturity**: PROTOTYPE
+>
+> **Features:**
+> - [tensor algebra](kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/api/TensorAlgebra.kt) : Basic linear algebra operations on tensors (plus, dot, etc.)
+> - [tensor algebra with broadcasting](kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/algebras/BroadcastDoubleTensorAlgebra.kt) : Basic linear algebra operations implemented with broadcasting.
+> - [linear algebra operations](kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/api/LinearOpsTensorAlgebra.kt) : Advanced linear algebra operations like LU decomposition, SVD, etc.
+
+
+
* ### [kmath-viktor](kmath-viktor)
>
>
@@ -270,8 +293,8 @@ repositories {
}
dependencies {
- api("space.kscience:kmath-core:0.3.0-dev-7")
- // api("space.kscience:kmath-core-jvm:0.3.0-dev-7") for jvm-specific version
+ api("space.kscience:kmath-core:0.3.0-dev-11")
+ // api("space.kscience:kmath-core-jvm:0.3.0-dev-11") for jvm-specific version
}
```
diff --git a/benchmarks/build.gradle.kts b/benchmarks/build.gradle.kts
index 88f034a2a..98ffc5a96 100644
--- a/benchmarks/build.gradle.kts
+++ b/benchmarks/build.gradle.kts
@@ -9,14 +9,10 @@ sourceSets.register("benchmarks")
repositories {
mavenCentral()
- jcenter()
maven("https://repo.kotlin.link")
maven("https://clojars.org/repo")
- maven("https://dl.bintray.com/egor-bogomolov/astminer/")
- maven("https://dl.bintray.com/hotkeytlt/maven")
maven("https://jitpack.io")
- maven {
- setUrl("http://logicrunch.research.it.uu.se/maven/")
+ maven("http://logicrunch.research.it.uu.se/maven") {
isAllowInsecureProtocol = true
}
}
diff --git a/benchmarks/src/jvmMain/kotlin/space/kscience/kmath/benchmarks/BigIntBenchmark.kt b/benchmarks/src/jvmMain/kotlin/space/kscience/kmath/benchmarks/BigIntBenchmark.kt
index 2076aedc7..749cd5e75 100644
--- a/benchmarks/src/jvmMain/kotlin/space/kscience/kmath/benchmarks/BigIntBenchmark.kt
+++ b/benchmarks/src/jvmMain/kotlin/space/kscience/kmath/benchmarks/BigIntBenchmark.kt
@@ -10,20 +10,19 @@ import kotlinx.benchmark.Blackhole
import org.openjdk.jmh.annotations.Benchmark
import org.openjdk.jmh.annotations.Scope
import org.openjdk.jmh.annotations.State
-import space.kscience.kmath.operations.BigInt
-import space.kscience.kmath.operations.BigIntField
-import space.kscience.kmath.operations.JBigIntegerField
-import space.kscience.kmath.operations.invoke
+import space.kscience.kmath.misc.UnstableKMathAPI
+import space.kscience.kmath.operations.*
+import java.math.BigInteger
-private fun BigInt.pow(power: Int): BigInt = modPow(BigIntField.number(power), BigInt.ZERO)
+@UnstableKMathAPI
@State(Scope.Benchmark)
internal class BigIntBenchmark {
val kmNumber = BigIntField.number(Int.MAX_VALUE)
val jvmNumber = JBigIntegerField.number(Int.MAX_VALUE)
- val largeKmNumber = BigIntField { number(11).pow(100_000) }
- val largeJvmNumber = JBigIntegerField { number(11).pow(100_000) }
+ val largeKmNumber = BigIntField { number(11).pow(100_000U) }
+ val largeJvmNumber: BigInteger = JBigIntegerField { number(11).pow(100_000) }
val bigExponent = 50_000
@Benchmark
@@ -36,6 +35,16 @@ internal class BigIntBenchmark {
blackhole.consume(jvmNumber + jvmNumber + jvmNumber)
}
+ @Benchmark
+ fun kmAddLarge(blackhole: Blackhole) = BigIntField {
+ blackhole.consume(largeKmNumber + largeKmNumber + largeKmNumber)
+ }
+
+ @Benchmark
+ fun jvmAddLarge(blackhole: Blackhole) = JBigIntegerField {
+ blackhole.consume(largeJvmNumber + largeJvmNumber + largeJvmNumber)
+ }
+
@Benchmark
fun kmMultiply(blackhole: Blackhole) = BigIntField {
blackhole.consume(kmNumber * kmNumber * kmNumber)
@@ -56,13 +65,33 @@ internal class BigIntBenchmark {
blackhole.consume(largeJvmNumber*largeJvmNumber)
}
-// @Benchmark
-// fun kmPower(blackhole: Blackhole) = BigIntField {
-// blackhole.consume(kmNumber.pow(bigExponent))
-// }
-//
-// @Benchmark
-// fun jvmPower(blackhole: Blackhole) = JBigIntegerField {
-// blackhole.consume(jvmNumber.pow(bigExponent))
-// }
+ @Benchmark
+ fun kmPower(blackhole: Blackhole) = BigIntField {
+ blackhole.consume(kmNumber.pow(bigExponent.toUInt()))
+ }
+
+ @Benchmark
+ fun jvmPower(blackhole: Blackhole) = JBigIntegerField {
+ blackhole.consume(jvmNumber.pow(bigExponent))
+ }
+
+ @Benchmark
+ fun kmParsing16(blackhole: Blackhole) = JBigIntegerField {
+ blackhole.consume("0x7f57ed8b89c29a3b9a85c7a5b84ca3929c7b7488593".parseBigInteger())
+ }
+
+ @Benchmark
+ fun kmParsing10(blackhole: Blackhole) = JBigIntegerField {
+ blackhole.consume("236656783929183747565738292847574838922010".parseBigInteger())
+ }
+
+ @Benchmark
+ fun jvmParsing10(blackhole: Blackhole) = JBigIntegerField {
+ blackhole.consume("236656783929183747565738292847574838922010".toBigInteger(10))
+ }
+
+ @Benchmark
+ fun jvmParsing16(blackhole: Blackhole) = JBigIntegerField {
+ blackhole.consume("7f57ed8b89c29a3b9a85c7a5b84ca3929c7b7488593".toBigInteger(16))
+ }
}
diff --git a/benchmarks/src/jvmMain/kotlin/space/kscience/kmath/benchmarks/DotBenchmark.kt b/benchmarks/src/jvmMain/kotlin/space/kscience/kmath/benchmarks/DotBenchmark.kt
index 23e73cb5f..2c5a03a97 100644
--- a/benchmarks/src/jvmMain/kotlin/space/kscience/kmath/benchmarks/DotBenchmark.kt
+++ b/benchmarks/src/jvmMain/kotlin/space/kscience/kmath/benchmarks/DotBenchmark.kt
@@ -10,7 +10,7 @@ import kotlinx.benchmark.Blackhole
import kotlinx.benchmark.Scope
import kotlinx.benchmark.State
import space.kscience.kmath.commons.linear.CMLinearSpace
-import space.kscience.kmath.ejml.EjmlLinearSpace
+import space.kscience.kmath.ejml.EjmlLinearSpaceDDRM
import space.kscience.kmath.linear.LinearSpace
import space.kscience.kmath.linear.invoke
import space.kscience.kmath.operations.DoubleField
@@ -29,8 +29,8 @@ internal class DotBenchmark {
val cmMatrix1 = CMLinearSpace { matrix1.toCM() }
val cmMatrix2 = CMLinearSpace { matrix2.toCM() }
- val ejmlMatrix1 = EjmlLinearSpace { matrix1.toEjml() }
- val ejmlMatrix2 = EjmlLinearSpace { matrix2.toEjml() }
+ val ejmlMatrix1 = EjmlLinearSpaceDDRM { matrix1.toEjml() }
+ val ejmlMatrix2 = EjmlLinearSpaceDDRM { matrix2.toEjml() }
}
@Benchmark
@@ -42,14 +42,14 @@ internal class DotBenchmark {
@Benchmark
fun ejmlDot(blackhole: Blackhole) {
- EjmlLinearSpace {
+ EjmlLinearSpaceDDRM {
blackhole.consume(ejmlMatrix1 dot ejmlMatrix2)
}
}
@Benchmark
fun ejmlDotWithConversion(blackhole: Blackhole) {
- EjmlLinearSpace {
+ EjmlLinearSpaceDDRM {
blackhole.consume(matrix1 dot matrix2)
}
}
diff --git a/benchmarks/src/jvmMain/kotlin/space/kscience/kmath/benchmarks/ExpressionsInterpretersBenchmark.kt b/benchmarks/src/jvmMain/kotlin/space/kscience/kmath/benchmarks/ExpressionsInterpretersBenchmark.kt
index 942fba308..15cd14399 100644
--- a/benchmarks/src/jvmMain/kotlin/space/kscience/kmath/benchmarks/ExpressionsInterpretersBenchmark.kt
+++ b/benchmarks/src/jvmMain/kotlin/space/kscience/kmath/benchmarks/ExpressionsInterpretersBenchmark.kt
@@ -11,8 +11,6 @@ import kotlinx.benchmark.Scope
import kotlinx.benchmark.State
import space.kscience.kmath.asm.compileToExpression
import space.kscience.kmath.expressions.*
-import space.kscience.kmath.misc.Symbol
-import space.kscience.kmath.misc.symbol
import space.kscience.kmath.operations.DoubleField
import space.kscience.kmath.operations.bindSymbol
import space.kscience.kmath.operations.invoke
diff --git a/benchmarks/src/jvmMain/kotlin/space/kscience/kmath/benchmarks/MatrixInverseBenchmark.kt b/benchmarks/src/jvmMain/kotlin/space/kscience/kmath/benchmarks/MatrixInverseBenchmark.kt
index d1803e389..7bb32af28 100644
--- a/benchmarks/src/jvmMain/kotlin/space/kscience/kmath/benchmarks/MatrixInverseBenchmark.kt
+++ b/benchmarks/src/jvmMain/kotlin/space/kscience/kmath/benchmarks/MatrixInverseBenchmark.kt
@@ -11,25 +11,26 @@ import kotlinx.benchmark.Scope
import kotlinx.benchmark.State
import space.kscience.kmath.commons.linear.CMLinearSpace
import space.kscience.kmath.commons.linear.inverse
-import space.kscience.kmath.ejml.EjmlLinearSpace
-import space.kscience.kmath.ejml.inverse
+import space.kscience.kmath.ejml.EjmlLinearSpaceDDRM
+import space.kscience.kmath.linear.InverseMatrixFeature
import space.kscience.kmath.linear.LinearSpace
import space.kscience.kmath.linear.inverseWithLup
import space.kscience.kmath.linear.invoke
+import space.kscience.kmath.nd.getFeature
import kotlin.random.Random
@State(Scope.Benchmark)
internal class MatrixInverseBenchmark {
- companion object {
- val random = Random(1224)
- const val dim = 100
+ private companion object {
+ private val random = Random(1224)
+ private const val dim = 100
private val space = LinearSpace.real
//creating invertible matrix
- val u = space.buildMatrix(dim, dim) { i, j -> if (i <= j) random.nextDouble() else 0.0 }
- val l = space.buildMatrix(dim, dim) { i, j -> if (i >= j) random.nextDouble() else 0.0 }
- val matrix = space { l dot u }
+ private val u = space.buildMatrix(dim, dim) { i, j -> if (i <= j) random.nextDouble() else 0.0 }
+ private val l = space.buildMatrix(dim, dim) { i, j -> if (i >= j) random.nextDouble() else 0.0 }
+ private val matrix = space { l dot u }
}
@Benchmark
@@ -46,8 +47,8 @@ internal class MatrixInverseBenchmark {
@Benchmark
fun ejmlInverse(blackhole: Blackhole) {
- with(EjmlLinearSpace) {
- blackhole.consume(inverse(matrix))
+ with(EjmlLinearSpaceDDRM) {
+ blackhole.consume(matrix.getFeature>()?.inverse)
}
}
}
diff --git a/build.gradle.kts b/build.gradle.kts
index 4e0b6f256..4de6d8bad 100644
--- a/build.gradle.kts
+++ b/build.gradle.kts
@@ -1,44 +1,37 @@
plugins {
id("ru.mipt.npm.gradle.project")
+ kotlin("jupyter.api") apply false
}
allprojects {
repositories {
- jcenter()
maven("https://clojars.org/repo")
- maven("https://dl.bintray.com/egor-bogomolov/astminer/")
- maven("https://dl.bintray.com/hotkeytlt/maven")
maven("https://jitpack.io")
- maven("http://logicrunch.research.it.uu.se/maven/") {
+ maven("http://logicrunch.research.it.uu.se/maven") {
isAllowInsecureProtocol = true
}
+ maven("https://oss.sonatype.org/content/repositories/snapshots")
mavenCentral()
}
group = "space.kscience"
- version = "0.3.0-dev-7"
+ version = "0.3.0-dev-13"
}
subprojects {
if (name.startsWith("kmath")) apply()
afterEvaluate {
- tasks.withType {
- dokkaSourceSets.all {
- val readmeFile = File(this@subprojects.projectDir, "./README.md")
- if (readmeFile.exists())
- includes.setFrom(includes + readmeFile.absolutePath)
+ tasks.withType {
+ dependsOn(tasks.getByName("assemble"))
- arrayOf(
- "http://ejml.org/javadoc/",
- "https://commons.apache.org/proper/commons-math/javadocs/api-3.6.1/",
- "https://deeplearning4j.org/api/latest/"
- ).map { java.net.URL("${it}package-list") to java.net.URL(it) }.forEach { (a, b) ->
- externalDocumentationLink {
- packageListUrl.set(a)
- url.set(b)
- }
- }
+ dokkaSourceSets.all {
+ val readmeFile = File(this@subprojects.projectDir, "README.md")
+ if (readmeFile.exists()) includes.setFrom(includes + readmeFile.absolutePath)
+ externalDocumentationLink("http://ejml.org/javadoc/")
+ externalDocumentationLink("https://commons.apache.org/proper/commons-math/javadocs/api-3.6.1/")
+ externalDocumentationLink("https://deeplearning4j.org/api/latest/")
+ externalDocumentationLink("https://kotlin.github.io/kotlinx.coroutines/kotlinx-coroutines-core/")
}
}
}
diff --git a/buildSrc/build.gradle.kts b/buildSrc/build.gradle.kts
new file mode 100644
index 000000000..7ca4df19d
--- /dev/null
+++ b/buildSrc/build.gradle.kts
@@ -0,0 +1,5 @@
+plugins {
+ `kotlin-dsl`
+}
+
+repositories.mavenCentral()
diff --git a/buildSrc/src/main/kotlin/space/kscience/kmath/ejml/codegen/ejmlCodegen.kt b/buildSrc/src/main/kotlin/space/kscience/kmath/ejml/codegen/ejmlCodegen.kt
new file mode 100644
index 000000000..5da7d0f67
--- /dev/null
+++ b/buildSrc/src/main/kotlin/space/kscience/kmath/ejml/codegen/ejmlCodegen.kt
@@ -0,0 +1,425 @@
+/*
+ * Copyright 2018-2021 KMath contributors.
+ * Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
+ */
+
+@file:Suppress("KDocUnresolvedReference")
+
+package space.kscience.kmath.ejml.codegen
+
+import org.intellij.lang.annotations.Language
+import java.io.File
+
+private fun Appendable.appendEjmlVector(type: String, ejmlMatrixType: String) {
+ @Language("kotlin") val text = """/**
+ * [EjmlVector] specialization for [$type].
+ */
+public class Ejml${type}Vector(public override val origin: M) : EjmlVector<$type, M>(origin) {
+ init {
+ require(origin.numRows == 1) { "The origin matrix must have only one row to form a vector" }
+ }
+
+ public override operator fun get(index: Int): $type = origin[0, index]
+}"""
+ appendLine(text)
+ appendLine()
+}
+
+private fun Appendable.appendEjmlMatrix(type: String, ejmlMatrixType: String) {
+ val text = """/**
+ * [EjmlMatrix] specialization for [$type].
+ */
+public class Ejml${type}Matrix(public override val origin: M) : EjmlMatrix<$type, M>(origin) {
+ public override operator fun get(i: Int, j: Int): $type = origin[i, j]
+}"""
+ appendLine(text)
+ appendLine()
+}
+
+private fun Appendable.appendEjmlLinearSpace(
+ type: String,
+ kmathAlgebra: String,
+ ejmlMatrixParentTypeMatrix: String,
+ ejmlMatrixType: String,
+ ejmlMatrixDenseType: String,
+ ops: String,
+ denseOps: String,
+ isDense: Boolean,
+) {
+ @Language("kotlin") val text = """/**
+ * [EjmlLinearSpace] implementation based on [CommonOps_$ops], [DecompositionFactory_${ops}] operations and
+ * [${ejmlMatrixType}] matrices.
+ */
+public object EjmlLinearSpace${ops} : EjmlLinearSpace<${type}, ${kmathAlgebra}, $ejmlMatrixType>() {
+ /**
+ * The [${kmathAlgebra}] reference.
+ */
+ public override val elementAlgebra: $kmathAlgebra get() = $kmathAlgebra
+
+ @Suppress("UNCHECKED_CAST")
+ public override fun Matrix<${type}>.toEjml(): Ejml${type}Matrix<${ejmlMatrixType}> = when {
+ this is Ejml${type}Matrix<*> && origin is $ejmlMatrixType -> this as Ejml${type}Matrix<${ejmlMatrixType}>
+ else -> buildMatrix(rowNum, colNum) { i, j -> get(i, j) }
+ }
+
+ @Suppress("UNCHECKED_CAST")
+ public override fun Point<${type}>.toEjml(): Ejml${type}Vector<${ejmlMatrixType}> = when {
+ this is Ejml${type}Vector<*> && origin is $ejmlMatrixType -> this as Ejml${type}Vector<${ejmlMatrixType}>
+ else -> Ejml${type}Vector(${ejmlMatrixType}(size, 1).also {
+ (0 until it.numRows).forEach { row -> it[row, 0] = get(row) }
+ })
+ }
+
+ public override fun buildMatrix(
+ rows: Int,
+ columns: Int,
+ initializer: ${kmathAlgebra}.(i: Int, j: Int) -> ${type},
+ ): Ejml${type}Matrix<${ejmlMatrixType}> = ${ejmlMatrixType}(rows, columns).also {
+ (0 until rows).forEach { row ->
+ (0 until columns).forEach { col -> it[row, col] = elementAlgebra.initializer(row, col) }
+ }
+ }.wrapMatrix()
+
+ public override fun buildVector(
+ size: Int,
+ initializer: ${kmathAlgebra}.(Int) -> ${type},
+ ): Ejml${type}Vector<${ejmlMatrixType}> = Ejml${type}Vector(${ejmlMatrixType}(size, 1).also {
+ (0 until it.numRows).forEach { row -> it[row, 0] = elementAlgebra.initializer(row) }
+ })
+
+ private fun T.wrapMatrix() = Ejml${type}Matrix(this)
+ private fun T.wrapVector() = Ejml${type}Vector(this)
+
+ public override fun Matrix<${type}>.unaryMinus(): Matrix<${type}> = this * elementAlgebra { -one }
+
+ public override fun Matrix<${type}>.dot(other: Matrix<${type}>): Ejml${type}Matrix<${ejmlMatrixType}> {
+ val out = ${ejmlMatrixType}(1, 1)
+ CommonOps_${ops}.mult(toEjml().origin, other.toEjml().origin, out)
+ return out.wrapMatrix()
+ }
+
+ public override fun Matrix<${type}>.dot(vector: Point<${type}>): Ejml${type}Vector<${ejmlMatrixType}> {
+ val out = ${ejmlMatrixType}(1, 1)
+ CommonOps_${ops}.mult(toEjml().origin, vector.toEjml().origin, out)
+ return out.wrapVector()
+ }
+
+ public override operator fun Matrix<${type}>.minus(other: Matrix<${type}>): Ejml${type}Matrix<${ejmlMatrixType}> {
+ val out = ${ejmlMatrixType}(1, 1)
+
+ CommonOps_${ops}.add(
+ elementAlgebra.one,
+ toEjml().origin,
+ elementAlgebra { -one },
+ other.toEjml().origin,
+ out,${
+ if (isDense) "" else
+ """
+ null,
+ null,"""
+ }
+ )
+
+ return out.wrapMatrix()
+ }
+
+ public override operator fun Matrix<${type}>.times(value: ${type}): Ejml${type}Matrix<${ejmlMatrixType}> {
+ val res = ${ejmlMatrixType}(1, 1)
+ CommonOps_${ops}.scale(value, toEjml().origin, res)
+ return res.wrapMatrix()
+ }
+
+ public override fun Point<${type}>.unaryMinus(): Ejml${type}Vector<${ejmlMatrixType}> {
+ val res = ${ejmlMatrixType}(1, 1)
+ CommonOps_${ops}.changeSign(toEjml().origin, res)
+ return res.wrapVector()
+ }
+
+ public override fun Matrix<${type}>.plus(other: Matrix<${type}>): Ejml${type}Matrix<${ejmlMatrixType}> {
+ val out = ${ejmlMatrixType}(1, 1)
+
+ CommonOps_${ops}.add(
+ elementAlgebra.one,
+ toEjml().origin,
+ elementAlgebra.one,
+ other.toEjml().origin,
+ out,${
+ if (isDense) "" else
+ """
+ null,
+ null,"""
+ }
+ )
+
+ return out.wrapMatrix()
+ }
+
+ public override fun Point<${type}>.plus(other: Point<${type}>): Ejml${type}Vector<${ejmlMatrixType}> {
+ val out = ${ejmlMatrixType}(1, 1)
+
+ CommonOps_${ops}.add(
+ elementAlgebra.one,
+ toEjml().origin,
+ elementAlgebra.one,
+ other.toEjml().origin,
+ out,${
+ if (isDense) "" else
+ """
+ null,
+ null,"""
+ }
+ )
+
+ return out.wrapVector()
+ }
+
+ public override fun Point<${type}>.minus(other: Point<${type}>): Ejml${type}Vector<${ejmlMatrixType}> {
+ val out = ${ejmlMatrixType}(1, 1)
+
+ CommonOps_${ops}.add(
+ elementAlgebra.one,
+ toEjml().origin,
+ elementAlgebra { -one },
+ other.toEjml().origin,
+ out,${
+ if (isDense) "" else
+ """
+ null,
+ null,"""
+ }
+ )
+
+ return out.wrapVector()
+ }
+
+ public override fun ${type}.times(m: Matrix<${type}>): Ejml${type}Matrix<${ejmlMatrixType}> = m * this
+
+ public override fun Point<${type}>.times(value: ${type}): Ejml${type}Vector<${ejmlMatrixType}> {
+ val res = ${ejmlMatrixType}(1, 1)
+ CommonOps_${ops}.scale(value, toEjml().origin, res)
+ return res.wrapVector()
+ }
+
+ public override fun ${type}.times(v: Point<${type}>): Ejml${type}Vector<${ejmlMatrixType}> = v * this
+
+ @UnstableKMathAPI
+ public override fun getFeature(structure: Matrix<${type}>, type: KClass): F? {
+ structure.getFeature(type)?.let { return it }
+ val origin = structure.toEjml().origin
+
+ return when (type) {
+ ${
+ if (isDense)
+ """ InverseMatrixFeature::class -> object : InverseMatrixFeature<${type}> {
+ override val inverse: Matrix<${type}> by lazy {
+ val res = origin.copy()
+ CommonOps_${ops}.invert(res)
+ res.wrapMatrix()
+ }
+ }
+
+ DeterminantFeature::class -> object : DeterminantFeature<${type}> {
+ override val determinant: $type by lazy { CommonOps_${ops}.det(origin) }
+ }
+
+ SingularValueDecompositionFeature::class -> object : SingularValueDecompositionFeature<${type}> {
+ private val svd by lazy {
+ DecompositionFactory_${ops}.svd(origin.numRows, origin.numCols, true, true, false)
+ .apply { decompose(origin.copy()) }
+ }
+
+ override val u: Matrix<${type}> by lazy { svd.getU(null, false).wrapMatrix() }
+ override val s: Matrix<${type}> by lazy { svd.getW(null).wrapMatrix() }
+ override val v: Matrix<${type}> by lazy { svd.getV(null, false).wrapMatrix() }
+ override val singularValues: Point<${type}> by lazy { ${type}Buffer(svd.singularValues) }
+ }
+
+ QRDecompositionFeature::class -> object : QRDecompositionFeature<${type}> {
+ private val qr by lazy {
+ DecompositionFactory_${ops}.qr().apply { decompose(origin.copy()) }
+ }
+
+ override val q: Matrix<${type}> by lazy {
+ qr.getQ(null, false).wrapMatrix() + OrthogonalFeature
+ }
+
+ override val r: Matrix<${type}> by lazy { qr.getR(null, false).wrapMatrix() + UFeature }
+ }
+
+ CholeskyDecompositionFeature::class -> object : CholeskyDecompositionFeature<${type}> {
+ override val l: Matrix<${type}> by lazy {
+ val cholesky =
+ DecompositionFactory_${ops}.chol(structure.rowNum, true).apply { decompose(origin.copy()) }
+
+ cholesky.getT(null).wrapMatrix() + LFeature
+ }
+ }
+
+ LupDecompositionFeature::class -> object : LupDecompositionFeature<${type}> {
+ private val lup by lazy {
+ DecompositionFactory_${ops}.lu(origin.numRows, origin.numCols).apply { decompose(origin.copy()) }
+ }
+
+ override val l: Matrix<${type}> by lazy {
+ lup.getLower(null).wrapMatrix() + LFeature
+ }
+
+ override val u: Matrix<${type}> by lazy {
+ lup.getUpper(null).wrapMatrix() + UFeature
+ }
+
+ override val p: Matrix<${type}> by lazy { lup.getRowPivot(null).wrapMatrix() }
+ }""" else """ QRDecompositionFeature::class -> object : QRDecompositionFeature<$type> {
+ private val qr by lazy {
+ DecompositionFactory_${ops}.qr(FillReducing.NONE).apply { decompose(origin.copy()) }
+ }
+
+ override val q: Matrix<${type}> by lazy {
+ qr.getQ(null, false).wrapMatrix() + OrthogonalFeature
+ }
+
+ override val r: Matrix<${type}> by lazy { qr.getR(null, false).wrapMatrix() + UFeature }
+ }
+
+ CholeskyDecompositionFeature::class -> object : CholeskyDecompositionFeature<${type}> {
+ override val l: Matrix<${type}> by lazy {
+ val cholesky =
+ DecompositionFactory_${ops}.cholesky().apply { decompose(origin.copy()) }
+
+ (cholesky.getT(null) as ${ejmlMatrixParentTypeMatrix}).wrapMatrix() + LFeature
+ }
+ }
+
+ LUDecompositionFeature::class, DeterminantFeature::class, InverseMatrixFeature::class -> object :
+ LUDecompositionFeature<${type}>, DeterminantFeature<${type}>, InverseMatrixFeature<${type}> {
+ private val lu by lazy {
+ DecompositionFactory_${ops}.lu(FillReducing.NONE).apply { decompose(origin.copy()) }
+ }
+
+ override val l: Matrix<${type}> by lazy {
+ lu.getLower(null).wrapMatrix() + LFeature
+ }
+
+ override val u: Matrix<${type}> by lazy {
+ lu.getUpper(null).wrapMatrix() + UFeature
+ }
+
+ override val inverse: Matrix<${type}> by lazy {
+ var a = origin
+ val inverse = ${ejmlMatrixDenseType}(1, 1)
+ val solver = LinearSolverFactory_${ops}.lu(FillReducing.NONE)
+ if (solver.modifiesA()) a = a.copy()
+ val i = CommonOps_${denseOps}.identity(a.numRows)
+ solver.solve(i, inverse)
+ inverse.wrapMatrix()
+ }
+
+ override val determinant: $type by lazy { elementAlgebra.number(lu.computeDeterminant().real) }
+ }"""
+ }
+
+ else -> null
+ }?.let(type::cast)
+ }
+
+ /**
+ * Solves for *x* in the following equation: *x = [a] -1 · [b]*.
+ *
+ * @param a the base matrix.
+ * @param b n by p matrix.
+ * @return the solution for *x* that is n by p.
+ */
+ public fun solve(a: Matrix<${type}>, b: Matrix<${type}>): Ejml${type}Matrix<${ejmlMatrixType}> {
+ val res = ${ejmlMatrixType}(1, 1)
+ CommonOps_${ops}.solve(${ejmlMatrixType}(a.toEjml().origin), ${ejmlMatrixType}(b.toEjml().origin), res)
+ return res.wrapMatrix()
+ }
+
+ /**
+ * Solves for *x* in the following equation: *x = [a] -1 · [b]*.
+ *
+ * @param a the base matrix.
+ * @param b n by p vector.
+ * @return the solution for *x* that is n by p.
+ */
+ public fun solve(a: Matrix<${type}>, b: Point<${type}>): Ejml${type}Vector<${ejmlMatrixType}> {
+ val res = ${ejmlMatrixType}(1, 1)
+ CommonOps_${ops}.solve(${ejmlMatrixType}(a.toEjml().origin), ${ejmlMatrixType}(b.toEjml().origin), res)
+ return Ejml${type}Vector(res)
+ }
+}"""
+ appendLine(text)
+ appendLine()
+}
+
+
+/**
+ * Generates routine EJML classes.
+ */
+fun ejmlCodegen(outputFile: String): Unit = File(outputFile).run {
+ parentFile.mkdirs()
+
+ writer().use {
+ it.appendLine("/*")
+ it.appendLine(" * Copyright 2018-2021 KMath contributors.")
+ it.appendLine(" * Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.")
+ it.appendLine(" */")
+ it.appendLine()
+ it.appendLine("/* This file is generated with buildSrc/src/main/kotlin/space/kscience/kmath/ejml/codegen/ejmlCodegen.kt */")
+ it.appendLine()
+ it.appendLine("package space.kscience.kmath.ejml")
+ it.appendLine()
+ it.appendLine("""import org.ejml.data.*
+import org.ejml.dense.row.CommonOps_DDRM
+import org.ejml.dense.row.CommonOps_FDRM
+import org.ejml.dense.row.factory.DecompositionFactory_DDRM
+import org.ejml.dense.row.factory.DecompositionFactory_FDRM
+import org.ejml.sparse.FillReducing
+import org.ejml.sparse.csc.CommonOps_DSCC
+import org.ejml.sparse.csc.CommonOps_FSCC
+import org.ejml.sparse.csc.factory.DecompositionFactory_DSCC
+import org.ejml.sparse.csc.factory.DecompositionFactory_FSCC
+import org.ejml.sparse.csc.factory.LinearSolverFactory_DSCC
+import org.ejml.sparse.csc.factory.LinearSolverFactory_FSCC
+import space.kscience.kmath.linear.*
+import space.kscience.kmath.linear.Matrix
+import space.kscience.kmath.misc.UnstableKMathAPI
+import space.kscience.kmath.nd.StructureFeature
+import space.kscience.kmath.operations.DoubleField
+import space.kscience.kmath.operations.FloatField
+import space.kscience.kmath.operations.invoke
+import space.kscience.kmath.structures.DoubleBuffer
+import space.kscience.kmath.structures.FloatBuffer
+import kotlin.reflect.KClass
+import kotlin.reflect.cast""")
+ it.appendLine()
+ it.appendEjmlVector("Double", "DMatrix")
+ it.appendEjmlVector("Float", "FMatrix")
+ it.appendEjmlMatrix("Double", "DMatrix")
+ it.appendEjmlMatrix("Float", "FMatrix")
+ it.appendEjmlLinearSpace("Double", "DoubleField", "DMatrix", "DMatrixRMaj", "DMatrixRMaj", "DDRM", "DDRM", true)
+ it.appendEjmlLinearSpace("Float", "FloatField", "FMatrix", "FMatrixRMaj", "FMatrixRMaj", "FDRM", "FDRM", true)
+
+ it.appendEjmlLinearSpace(
+ type = "Double",
+ kmathAlgebra = "DoubleField",
+ ejmlMatrixParentTypeMatrix = "DMatrix",
+ ejmlMatrixType = "DMatrixSparseCSC",
+ ejmlMatrixDenseType = "DMatrixRMaj",
+ ops = "DSCC",
+ denseOps = "DDRM",
+ isDense = false,
+ )
+
+ it.appendEjmlLinearSpace(
+ type = "Float",
+ kmathAlgebra = "FloatField",
+ ejmlMatrixParentTypeMatrix = "FMatrix",
+ ejmlMatrixType = "FMatrixSparseCSC",
+ ejmlMatrixDenseType = "FMatrixRMaj",
+ ops = "FSCC",
+ denseOps = "FDRM",
+ isDense = false,
+ )
+ }
+}
diff --git a/docs/templates/ARTIFACT-TEMPLATE.md b/docs/templates/ARTIFACT-TEMPLATE.md
index 01d9c51da..1bac2a8ff 100644
--- a/docs/templates/ARTIFACT-TEMPLATE.md
+++ b/docs/templates/ARTIFACT-TEMPLATE.md
@@ -6,8 +6,7 @@ The Maven coordinates of this project are `${group}:${name}:${version}`.
```gradle
repositories {
maven { url 'https://repo.kotlin.link' }
- maven { url 'https://dl.bintray.com/hotkeytlt/maven' }
- maven { url "https://dl.bintray.com/kotlin/kotlin-eap" } // include for builds based on kotlin-eap
+ mavenCentral()
}
dependencies {
@@ -18,8 +17,7 @@ dependencies {
```kotlin
repositories {
maven("https://repo.kotlin.link")
- maven("https://dl.bintray.com/kotlin/kotlin-eap") // include for builds based on kotlin-eap
- maven("https://dl.bintray.com/hotkeytlt/maven") // required for a
+ mavenCentral()
}
dependencies {
diff --git a/docs/templates/README-TEMPLATE.md b/docs/templates/README-TEMPLATE.md
index 99951b4d6..6bb1e9085 100644
--- a/docs/templates/README-TEMPLATE.md
+++ b/docs/templates/README-TEMPLATE.md
@@ -40,7 +40,7 @@ KMath is a modular library. Different modules provide different features with di
* **PROTOTYPE**. On this level there are no compatibility guarantees. All methods and classes form those modules could break any moment. You can still use it, but be sure to fix the specific version.
* **EXPERIMENTAL**. The general API is decided, but some changes could be made. Volatile API is marked with `@UnstableKmathAPI` or other stability warning annotations.
-* **DEVELOPMENT**. API breaking genrally follows semantic versioning ideology. There could be changes in minor versions, but not in patch versions. API is protected with [binary-compatibility-validator](https://github.com/Kotlin/binary-compatibility-validator) tool.
+* **DEVELOPMENT**. API breaking generally follows semantic versioning ideology. There could be changes in minor versions, but not in patch versions. API is protected with [binary-compatibility-validator](https://github.com/Kotlin/binary-compatibility-validator) tool.
* **STABLE**. The API stabilized. Breaking changes are allowed only in major releases.
diff --git a/examples/build.gradle.kts b/examples/build.gradle.kts
index 56feee9dc..d095db1ba 100644
--- a/examples/build.gradle.kts
+++ b/examples/build.gradle.kts
@@ -4,14 +4,11 @@ plugins {
repositories {
mavenCentral()
- jcenter()
maven("https://repo.kotlin.link")
maven("https://clojars.org/repo")
- maven("https://dl.bintray.com/egor-bogomolov/astminer/")
- maven("https://dl.bintray.com/hotkeytlt/maven")
maven("https://jitpack.io")
- maven{
- setUrl("http://logicrunch.research.it.uu.se/maven/")
+ maven("https://maven.pkg.jetbrains.space/kotlin/p/kotlin/kotlin-js-wrappers")
+ maven("http://logicrunch.research.it.uu.se/maven") {
isAllowInsecureProtocol = true
}
}
@@ -28,6 +25,7 @@ dependencies {
implementation(project(":kmath-dimensions"))
implementation(project(":kmath-ejml"))
implementation(project(":kmath-nd4j"))
+ implementation(project(":kmath-tensors"))
implementation(project(":kmath-for-real"))
@@ -45,7 +43,7 @@ dependencies {
implementation("org.slf4j:slf4j-simple:1.7.30")
// plotting
- implementation("space.kscience:plotlykt-server:0.4.0-dev-2")
+ implementation("space.kscience:plotlykt-server:0.4.0")
}
kotlin.sourceSets.all {
@@ -59,7 +57,7 @@ kotlin.sourceSets.all {
tasks.withType {
kotlinOptions{
jvmTarget = "11"
- freeCompilerArgs = freeCompilerArgs + "-Xjvm-default=all"
+ freeCompilerArgs = freeCompilerArgs + "-Xjvm-default=all" + "-Xopt-in=kotlin.RequiresOptIn"
}
}
diff --git a/examples/src/main/kotlin/space/kscience/kmath/ast/expressions.kt b/examples/src/main/kotlin/space/kscience/kmath/ast/expressions.kt
index 918134e04..d5a82590f 100644
--- a/examples/src/main/kotlin/space/kscience/kmath/ast/expressions.kt
+++ b/examples/src/main/kotlin/space/kscience/kmath/ast/expressions.kt
@@ -6,8 +6,8 @@
package space.kscience.kmath.ast
import space.kscience.kmath.expressions.MstField
+import space.kscience.kmath.expressions.Symbol.Companion.x
import space.kscience.kmath.expressions.interpret
-import space.kscience.kmath.misc.Symbol.Companion.x
import space.kscience.kmath.operations.DoubleField
import space.kscience.kmath.operations.bindSymbol
import space.kscience.kmath.operations.invoke
diff --git a/examples/src/main/kotlin/space/kscience/kmath/ast/kotlingradSupport.kt b/examples/src/main/kotlin/space/kscience/kmath/ast/kotlingradSupport.kt
index 25f42f5a9..420b23f9f 100644
--- a/examples/src/main/kotlin/space/kscience/kmath/ast/kotlingradSupport.kt
+++ b/examples/src/main/kotlin/space/kscience/kmath/ast/kotlingradSupport.kt
@@ -8,8 +8,8 @@ package space.kscience.kmath.ast
import space.kscience.kmath.asm.compileToExpression
import space.kscience.kmath.expressions.derivative
import space.kscience.kmath.expressions.invoke
+import space.kscience.kmath.expressions.symbol
import space.kscience.kmath.kotlingrad.toDiffExpression
-import space.kscience.kmath.misc.symbol
import space.kscience.kmath.operations.DoubleField
/**
diff --git a/examples/src/main/kotlin/space/kscience/kmath/commons/fit/fitWithAutoDiff.kt b/examples/src/main/kotlin/space/kscience/kmath/commons/fit/fitWithAutoDiff.kt
index 028985260..5e64235e3 100644
--- a/examples/src/main/kotlin/space/kscience/kmath/commons/fit/fitWithAutoDiff.kt
+++ b/examples/src/main/kotlin/space/kscience/kmath/commons/fit/fitWithAutoDiff.kt
@@ -10,7 +10,7 @@ import kotlinx.html.h3
import space.kscience.kmath.commons.optimization.chiSquared
import space.kscience.kmath.commons.optimization.minimize
import space.kscience.kmath.distributions.NormalDistribution
-import space.kscience.kmath.misc.symbol
+import space.kscience.kmath.expressions.symbol
import space.kscience.kmath.optimization.FunctionOptimization
import space.kscience.kmath.optimization.OptimizationResult
import space.kscience.kmath.real.DoubleVector
diff --git a/examples/src/main/kotlin/space/kscience/kmath/functions/integrate.kt b/examples/src/main/kotlin/space/kscience/kmath/functions/integrate.kt
index 7cdf7bef6..f60b1ab45 100644
--- a/examples/src/main/kotlin/space/kscience/kmath/functions/integrate.kt
+++ b/examples/src/main/kotlin/space/kscience/kmath/functions/integrate.kt
@@ -5,7 +5,8 @@
package space.kscience.kmath.functions
-import space.kscience.kmath.integration.process
+import space.kscience.kmath.integration.gaussIntegrator
+import space.kscience.kmath.integration.integrate
import space.kscience.kmath.integration.value
import space.kscience.kmath.operations.DoubleField
import kotlin.math.pow
@@ -15,7 +16,7 @@ fun main() {
val function: UnivariateFunction = { x -> 3 * x.pow(2) + 2 * x + 1 }
//get the result of the integration
- val result = DoubleField.process(0.0..10.0, function = function)
+ val result = DoubleField.gaussIntegrator.integrate(0.0..10.0, function = function)
//the value is nullable because in some cases the integration could not succeed
println(result.value)
diff --git a/examples/src/main/kotlin/space/kscience/kmath/functions/interpolate.kt b/examples/src/main/kotlin/space/kscience/kmath/functions/interpolate.kt
new file mode 100644
index 000000000..8dbc7b7a4
--- /dev/null
+++ b/examples/src/main/kotlin/space/kscience/kmath/functions/interpolate.kt
@@ -0,0 +1,54 @@
+/*
+ * Copyright 2018-2021 KMath contributors.
+ * Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
+ */
+
+package space.kscience.kmath.functions
+
+import space.kscience.kmath.interpolation.SplineInterpolator
+import space.kscience.kmath.interpolation.interpolatePolynomials
+import space.kscience.kmath.operations.DoubleField
+import space.kscience.kmath.structures.DoubleBuffer
+import space.kscience.plotly.Plotly
+import space.kscience.plotly.UnstablePlotlyAPI
+import space.kscience.plotly.makeFile
+import space.kscience.plotly.models.functionXY
+import space.kscience.plotly.scatter
+import kotlin.math.PI
+import kotlin.math.sin
+
+@OptIn(UnstablePlotlyAPI::class)
+fun main() {
+ val data = (0..10).map {
+ val x = it.toDouble() / 5 * PI
+ x to sin(x)
+ }
+
+ val polynomial: PiecewisePolynomial = SplineInterpolator(
+ DoubleField, ::DoubleBuffer
+ ).interpolatePolynomials(data)
+
+ val function = polynomial.asFunction(DoubleField, 0.0)
+
+ val cmInterpolate = org.apache.commons.math3.analysis.interpolation.SplineInterpolator().interpolate(
+ data.map { it.first }.toDoubleArray(),
+ data.map { it.second }.toDoubleArray()
+ )
+
+ Plotly.plot {
+ scatter {
+ name = "interpolated"
+ x.numbers = data.map { it.first }
+ y.numbers = x.doubles.map { function(it) }
+ }
+ scatter {
+ name = "original"
+ functionXY(0.0..(2 * PI), 0.1) { sin(it) }
+ }
+ scatter {
+ name = "cm"
+ x.numbers = data.map { it.first }
+ y.numbers = x.doubles.map { cmInterpolate.value(it) }
+ }
+ }.makeFile()
+}
\ No newline at end of file
diff --git a/examples/src/main/kotlin/space/kscience/kmath/functions/interpolateSquare.kt b/examples/src/main/kotlin/space/kscience/kmath/functions/interpolateSquare.kt
new file mode 100644
index 000000000..33973c880
--- /dev/null
+++ b/examples/src/main/kotlin/space/kscience/kmath/functions/interpolateSquare.kt
@@ -0,0 +1,45 @@
+/*
+ * Copyright 2018-2021 KMath contributors.
+ * Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
+ */
+
+package space.kscience.kmath.functions
+
+import space.kscience.kmath.interpolation.SplineInterpolator
+import space.kscience.kmath.interpolation.interpolatePolynomials
+import space.kscience.kmath.operations.DoubleField
+import space.kscience.kmath.real.step
+import space.kscience.kmath.structures.map
+import space.kscience.plotly.Plotly
+import space.kscience.plotly.UnstablePlotlyAPI
+import space.kscience.plotly.makeFile
+import space.kscience.plotly.models.functionXY
+import space.kscience.plotly.scatter
+
+@OptIn(UnstablePlotlyAPI::class)
+fun main() {
+ val function: UnivariateFunction = { x ->
+ if (x in 30.0..50.0) {
+ 1.0
+ } else {
+ 0.0
+ }
+ }
+ val xs = 0.0..100.0 step 0.5
+ val ys = xs.map(function)
+
+ val polynomial: PiecewisePolynomial = SplineInterpolator.double.interpolatePolynomials(xs, ys)
+
+ val polyFunction = polynomial.asFunction(DoubleField, 0.0)
+
+ Plotly.plot {
+ scatter {
+ name = "interpolated"
+ functionXY(25.0..55.0, 0.1) { polyFunction(it) }
+ }
+ scatter {
+ name = "original"
+ functionXY(25.0..55.0, 0.1) { function(it) }
+ }
+ }.makeFile()
+}
\ No newline at end of file
diff --git a/examples/src/main/kotlin/space/kscience/kmath/functions/matrixIntegration.kt b/examples/src/main/kotlin/space/kscience/kmath/functions/matrixIntegration.kt
index 206ba3054..2619d3d74 100644
--- a/examples/src/main/kotlin/space/kscience/kmath/functions/matrixIntegration.kt
+++ b/examples/src/main/kotlin/space/kscience/kmath/functions/matrixIntegration.kt
@@ -5,7 +5,8 @@
package space.kscience.kmath.functions
-import space.kscience.kmath.integration.process
+import space.kscience.kmath.integration.gaussIntegrator
+import space.kscience.kmath.integration.integrate
import space.kscience.kmath.integration.value
import space.kscience.kmath.nd.StructureND
import space.kscience.kmath.nd.nd
@@ -24,7 +25,7 @@ fun main(): Unit = DoubleField {
val function: (Double) -> StructureND = { x: Double -> 3 * number(x).pow(2) + 2 * diagonal(x) + 1 }
//get the result of the integration
- val result = process(0.0..10.0, function = function)
+ val result = gaussIntegrator.integrate(0.0..10.0, function = function)
//the value is nullable because in some cases the integration could not succeed
println(result.value)
diff --git a/examples/src/main/kotlin/space/kscience/kmath/structures/NDField.kt b/examples/src/main/kotlin/space/kscience/kmath/structures/NDField.kt
index cc1f5f680..501bf98db 100644
--- a/examples/src/main/kotlin/space/kscience/kmath/structures/NDField.kt
+++ b/examples/src/main/kotlin/space/kscience/kmath/structures/NDField.kt
@@ -5,6 +5,7 @@
package space.kscience.kmath.structures
+import kotlinx.coroutines.DelicateCoroutinesApi
import kotlinx.coroutines.GlobalScope
import org.nd4j.linalg.factory.Nd4j
import space.kscience.kmath.nd.*
@@ -22,6 +23,7 @@ internal inline fun measureAndPrint(title: String, block: () -> Unit) {
println("$title completed in $time millis")
}
+@OptIn(DelicateCoroutinesApi::class)
fun main() {
// initializing Nd4j
Nd4j.zeros(0)
diff --git a/examples/src/main/kotlin/space/kscience/kmath/tensors/DataSetNormalization.kt b/examples/src/main/kotlin/space/kscience/kmath/tensors/DataSetNormalization.kt
new file mode 100644
index 000000000..74795cc68
--- /dev/null
+++ b/examples/src/main/kotlin/space/kscience/kmath/tensors/DataSetNormalization.kt
@@ -0,0 +1,42 @@
+/*
+ * Copyright 2018-2021 KMath contributors.
+ * Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
+ */
+
+package space.kscience.kmath.tensors
+
+import space.kscience.kmath.operations.invoke
+import space.kscience.kmath.tensors.core.BroadcastDoubleTensorAlgebra
+
+
+// Dataset normalization
+
+fun main() = BroadcastDoubleTensorAlgebra { // work in context with broadcast methods
+ // take dataset of 5-element vectors from normal distribution
+ val dataset = randomNormal(intArrayOf(100, 5)) * 1.5 // all elements from N(0, 1.5)
+
+ dataset += fromArray(
+ intArrayOf(5),
+ doubleArrayOf(0.0, 1.0, 1.5, 3.0, 5.0) // rows means
+ )
+
+
+ // find out mean and standard deviation of each column
+ val mean = dataset.mean(0, false)
+ val std = dataset.std(0, false)
+
+ println("Mean:\n$mean")
+ println("Standard deviation:\n$std")
+
+ // also we can calculate other statistic as minimum and maximum of rows
+ println("Minimum:\n${dataset.min(0, false)}")
+ println("Maximum:\n${dataset.max(0, false)}")
+
+ // now we can scale dataset with mean normalization
+ val datasetScaled = (dataset - mean) / std
+
+ // find out mean and std of scaled dataset
+
+ println("Mean of scaled:\n${datasetScaled.mean(0, false)}")
+ println("Mean of scaled:\n${datasetScaled.std(0, false)}")
+}
\ No newline at end of file
diff --git a/examples/src/main/kotlin/space/kscience/kmath/tensors/LinearSystemSolvingWithLUP.kt b/examples/src/main/kotlin/space/kscience/kmath/tensors/LinearSystemSolvingWithLUP.kt
new file mode 100644
index 000000000..6453ca44e
--- /dev/null
+++ b/examples/src/main/kotlin/space/kscience/kmath/tensors/LinearSystemSolvingWithLUP.kt
@@ -0,0 +1,93 @@
+/*
+ * Copyright 2018-2021 KMath contributors.
+ * Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
+ */
+
+package space.kscience.kmath.tensors
+
+import space.kscience.kmath.operations.invoke
+import space.kscience.kmath.tensors.core.BroadcastDoubleTensorAlgebra
+import space.kscience.kmath.tensors.core.DoubleTensor
+
+// solving linear system with LUP decomposition
+
+fun main() = BroadcastDoubleTensorAlgebra {// work in context with linear operations
+
+ // set true value of x
+ val trueX = fromArray(
+ intArrayOf(4),
+ doubleArrayOf(-2.0, 1.5, 6.8, -2.4)
+ )
+
+ // and A matrix
+ val a = fromArray(
+ intArrayOf(4, 4),
+ doubleArrayOf(
+ 0.5, 10.5, 4.5, 1.0,
+ 8.5, 0.9, 12.8, 0.1,
+ 5.56, 9.19, 7.62, 5.45,
+ 1.0, 2.0, -3.0, -2.5
+ )
+ )
+
+ // calculate y value
+ val b = a dot trueX
+
+ // check out A and b
+ println("A:\n$a")
+ println("b:\n$b")
+
+ // solve `Ax = b` system using LUP decomposition
+
+ // get P, L, U such that PA = LU
+ val (p, l, u) = a.lu()
+
+ // check that P is permutation matrix
+ println("P:\n$p")
+ // L is lower triangular matrix and U is upper triangular matrix
+ println("L:\n$l")
+ println("U:\n$u")
+ // and PA = LU
+ println("PA:\n${p dot a}")
+ println("LU:\n${l dot u}")
+
+ /* Ax = b;
+ PAx = Pb;
+ LUx = Pb;
+ let y = Ux, then
+ Ly = Pb -- this system can be easily solved, since the matrix L is lower triangular;
+ Ux = y can be solved the same way, since the matrix L is upper triangular
+ */
+
+
+
+ // this function returns solution x of a system lx = b, l should be lower triangular
+ fun solveLT(l: DoubleTensor, b: DoubleTensor): DoubleTensor {
+ val n = l.shape[0]
+ val x = zeros(intArrayOf(n))
+ for (i in 0 until n) {
+ x[intArrayOf(i)] = (b[intArrayOf(i)] - l[i].dot(x).value()) / l[intArrayOf(i, i)]
+ }
+ return x
+ }
+
+ val y = solveLT(l, p dot b)
+
+ // solveLT(l, b) function can be easily adapted for upper triangular matrix by the permutation matrix revMat
+ // create it by placing ones on side diagonal
+ val revMat = u.zeroesLike()
+ val n = revMat.shape[0]
+ for (i in 0 until n) {
+ revMat[intArrayOf(i, n - 1 - i)] = 1.0
+ }
+
+ // solution of system ux = b, u should be upper triangular
+ fun solveUT(u: DoubleTensor, b: DoubleTensor): DoubleTensor = revMat dot solveLT(
+ revMat dot u dot revMat, revMat dot b
+ )
+
+ val x = solveUT(u, y)
+
+ println("True x:\n$trueX")
+ println("x founded with LU method:\n$x")
+}
\ No newline at end of file
diff --git a/examples/src/main/kotlin/space/kscience/kmath/tensors/NeuralNetwork.kt b/examples/src/main/kotlin/space/kscience/kmath/tensors/NeuralNetwork.kt
new file mode 100644
index 000000000..b262bee02
--- /dev/null
+++ b/examples/src/main/kotlin/space/kscience/kmath/tensors/NeuralNetwork.kt
@@ -0,0 +1,239 @@
+/*
+ * Copyright 2018-2021 KMath contributors.
+ * Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
+ */
+
+package space.kscience.kmath.tensors
+
+import space.kscience.kmath.operations.invoke
+import space.kscience.kmath.tensors.core.BroadcastDoubleTensorAlgebra
+import space.kscience.kmath.tensors.core.DoubleTensor
+import space.kscience.kmath.tensors.core.DoubleTensorAlgebra
+import space.kscience.kmath.tensors.core.toDoubleArray
+import kotlin.math.sqrt
+
+const val seed = 100500L
+
+// Simple feedforward neural network with backpropagation training
+
+// interface of network layer
+interface Layer {
+ fun forward(input: DoubleTensor): DoubleTensor
+ fun backward(input: DoubleTensor, outputError: DoubleTensor): DoubleTensor
+}
+
+// activation layer
+open class Activation(
+ val activation: (DoubleTensor) -> DoubleTensor,
+ val activationDer: (DoubleTensor) -> DoubleTensor,
+) : Layer {
+ override fun forward(input: DoubleTensor): DoubleTensor {
+ return activation(input)
+ }
+
+ override fun backward(input: DoubleTensor, outputError: DoubleTensor): DoubleTensor {
+ return DoubleTensorAlgebra { outputError * activationDer(input) }
+ }
+}
+
+fun relu(x: DoubleTensor): DoubleTensor = DoubleTensorAlgebra {
+ x.map { if (it > 0) it else 0.0 }
+}
+
+fun reluDer(x: DoubleTensor): DoubleTensor = DoubleTensorAlgebra {
+ x.map { if (it > 0) 1.0 else 0.0 }
+}
+
+// activation layer with relu activator
+class ReLU : Activation(::relu, ::reluDer)
+
+fun sigmoid(x: DoubleTensor): DoubleTensor = DoubleTensorAlgebra {
+ 1.0 / (1.0 + (-x).exp())
+}
+
+fun sigmoidDer(x: DoubleTensor): DoubleTensor = DoubleTensorAlgebra {
+ sigmoid(x) * (1.0 - sigmoid(x))
+}
+
+// activation layer with sigmoid activator
+class Sigmoid : Activation(::sigmoid, ::sigmoidDer)
+
+// dense layer
+class Dense(
+ private val inputUnits: Int,
+ private val outputUnits: Int,
+ private val learningRate: Double = 0.1,
+) : Layer {
+
+ private val weights: DoubleTensor = DoubleTensorAlgebra {
+ randomNormal(
+ intArrayOf(inputUnits, outputUnits),
+ seed
+ ) * sqrt(2.0 / (inputUnits + outputUnits))
+ }
+
+ private val bias: DoubleTensor = DoubleTensorAlgebra { zeros(intArrayOf(outputUnits)) }
+
+ override fun forward(input: DoubleTensor): DoubleTensor = BroadcastDoubleTensorAlgebra {
+ (input dot weights) + bias
+ }
+
+ override fun backward(input: DoubleTensor, outputError: DoubleTensor): DoubleTensor = DoubleTensorAlgebra {
+ val gradInput = outputError dot weights.transpose()
+
+ val gradW = input.transpose() dot outputError
+ val gradBias = outputError.mean(dim = 0, keepDim = false) * input.shape[0].toDouble()
+
+ weights -= learningRate * gradW
+ bias -= learningRate * gradBias
+
+ gradInput
+ }
+
+}
+
+// simple accuracy equal to the proportion of correct answers
+fun accuracy(yPred: DoubleTensor, yTrue: DoubleTensor): Double {
+ check(yPred.shape contentEquals yTrue.shape)
+ val n = yPred.shape[0]
+ var correctCnt = 0
+ for (i in 0 until n) {
+ if (yPred[intArrayOf(i, 0)] == yTrue[intArrayOf(i, 0)]) {
+ correctCnt += 1
+ }
+ }
+ return correctCnt.toDouble() / n.toDouble()
+}
+
+// neural network class
+@OptIn(ExperimentalStdlibApi::class)
+class NeuralNetwork(private val layers: List) {
+ private fun softMaxLoss(yPred: DoubleTensor, yTrue: DoubleTensor): DoubleTensor = BroadcastDoubleTensorAlgebra {
+
+ val onesForAnswers = yPred.zeroesLike()
+ yTrue.toDoubleArray().forEachIndexed { index, labelDouble ->
+ val label = labelDouble.toInt()
+ onesForAnswers[intArrayOf(index, label)] = 1.0
+ }
+
+ val softmaxValue = yPred.exp() / yPred.exp().sum(dim = 1, keepDim = true)
+
+ (-onesForAnswers + softmaxValue) / (yPred.shape[0].toDouble())
+ }
+
+
+ private fun forward(x: DoubleTensor): List {
+ var input = x
+
+ return buildList {
+ layers.forEach { layer ->
+ val output = layer.forward(input)
+ add(output)
+ input = output
+ }
+ }
+ }
+
+ private fun train(xTrain: DoubleTensor, yTrain: DoubleTensor) {
+ val layerInputs = buildList {
+ add(xTrain)
+ addAll(forward(xTrain))
+ }
+
+ var lossGrad = softMaxLoss(layerInputs.last(), yTrain)
+
+ layers.zip(layerInputs).reversed().forEach { (layer, input) ->
+ lossGrad = layer.backward(input, lossGrad)
+ }
+ }
+
+ fun fit(xTrain: DoubleTensor, yTrain: DoubleTensor, batchSize: Int, epochs: Int) = DoubleTensorAlgebra {
+ fun iterBatch(x: DoubleTensor, y: DoubleTensor): Sequence> = sequence {
+ val n = x.shape[0]
+ val shuffledIndices = (0 until n).shuffled()
+ for (i in 0 until n step batchSize) {
+ val excerptIndices = shuffledIndices.drop(i).take(batchSize).toIntArray()
+ val batch = x.rowsByIndices(excerptIndices) to y.rowsByIndices(excerptIndices)
+ yield(batch)
+ }
+ }
+
+ for (epoch in 0 until epochs) {
+ println("Epoch ${epoch + 1}/$epochs")
+ for ((xBatch, yBatch) in iterBatch(xTrain, yTrain)) {
+ train(xBatch, yBatch)
+ }
+ println("Accuracy:${accuracy(yTrain, predict(xTrain).argMax(1, true))}")
+ }
+ }
+
+ fun predict(x: DoubleTensor): DoubleTensor {
+ return forward(x).last()
+ }
+
+}
+
+
+@OptIn(ExperimentalStdlibApi::class)
+fun main() = BroadcastDoubleTensorAlgebra {
+ val features = 5
+ val sampleSize = 250
+ val trainSize = 180
+ //val testSize = sampleSize - trainSize
+
+ // take sample of features from normal distribution
+ val x = randomNormal(intArrayOf(sampleSize, features), seed) * 2.5
+
+ x += fromArray(
+ intArrayOf(5),
+ doubleArrayOf(0.0, -1.0, -2.5, -3.0, 5.5) // rows means
+ )
+
+
+ // define class like '1' if the sum of features > 0 and '0' otherwise
+ val y = fromArray(
+ intArrayOf(sampleSize, 1),
+ DoubleArray(sampleSize) { i ->
+ if (x[i].sum() > 0.0) {
+ 1.0
+ } else {
+ 0.0
+ }
+ }
+ )
+
+ // split train ans test
+ val trainIndices = (0 until trainSize).toList().toIntArray()
+ val testIndices = (trainSize until sampleSize).toList().toIntArray()
+
+ val xTrain = x.rowsByIndices(trainIndices)
+ val yTrain = y.rowsByIndices(trainIndices)
+
+ val xTest = x.rowsByIndices(testIndices)
+ val yTest = y.rowsByIndices(testIndices)
+
+ // build model
+ val layers = buildList {
+ add(Dense(features, 64))
+ add(ReLU())
+ add(Dense(64, 16))
+ add(ReLU())
+ add(Dense(16, 2))
+ add(Sigmoid())
+ }
+ val model = NeuralNetwork(layers)
+
+ // fit it with train data
+ model.fit(xTrain, yTrain, batchSize = 20, epochs = 10)
+
+ // make prediction
+ val prediction = model.predict(xTest)
+
+ // process raw prediction via argMax
+ val predictionLabels = prediction.argMax(1, true)
+
+ // find out accuracy
+ val acc = accuracy(yTest, predictionLabels)
+ println("Test accuracy:$acc")
+
+}
diff --git a/examples/src/main/kotlin/space/kscience/kmath/tensors/OLSWithSVD.kt b/examples/src/main/kotlin/space/kscience/kmath/tensors/OLSWithSVD.kt
new file mode 100644
index 000000000..b42602988
--- /dev/null
+++ b/examples/src/main/kotlin/space/kscience/kmath/tensors/OLSWithSVD.kt
@@ -0,0 +1,68 @@
+/*
+ * Copyright 2018-2021 KMath contributors.
+ * Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
+ */
+
+package space.kscience.kmath.tensors
+
+import space.kscience.kmath.operations.invoke
+import space.kscience.kmath.tensors.core.DoubleTensor
+import space.kscience.kmath.tensors.core.DoubleTensorAlgebra
+
+import kotlin.math.abs
+
+// OLS estimator using SVD
+
+fun main() {
+ //seed for random
+ val randSeed = 100500L
+
+ // work in context with linear operations
+ DoubleTensorAlgebra {
+ // take coefficient vector from normal distribution
+ val alpha = randomNormal(
+ intArrayOf(5),
+ randSeed
+ ) + fromArray(
+ intArrayOf(5),
+ doubleArrayOf(1.0, 2.5, 3.4, 5.0, 10.1)
+ )
+
+ println("Real alpha:\n$alpha")
+
+ // also take sample of size 20 from normal distribution for x
+ val x = randomNormal(
+ intArrayOf(20, 5),
+ randSeed
+ )
+
+ // calculate y and add gaussian noise (N(0, 0.05))
+ val y = x dot alpha
+ y += y.randomNormalLike(randSeed) * 0.05
+
+ // now restore the coefficient vector with OSL estimator with SVD
+ val (u, singValues, v) = x.svd()
+
+ // we have to make sure the singular values of the matrix are not close to zero
+ println("Singular values:\n$singValues")
+
+
+ // inverse Sigma matrix can be restored from singular values with diagonalEmbedding function
+ val sigma = diagonalEmbedding(singValues.map{ if (abs(it) < 1e-3) 0.0 else 1.0/it })
+
+ val alphaOLS = v dot sigma dot u.transpose() dot y
+ println("Estimated alpha:\n" +
+ "$alphaOLS")
+
+ // figure out MSE of approximation
+ fun mse(yTrue: DoubleTensor, yPred: DoubleTensor): Double {
+ require(yTrue.shape.size == 1)
+ require(yTrue.shape contentEquals yPred.shape)
+
+ val diff = yTrue - yPred
+ return diff.dot(diff).sqrt().value()
+ }
+
+ println("MSE: ${mse(alpha, alphaOLS)}")
+ }
+}
\ No newline at end of file
diff --git a/examples/src/main/kotlin/space/kscience/kmath/tensors/PCA.kt b/examples/src/main/kotlin/space/kscience/kmath/tensors/PCA.kt
new file mode 100644
index 000000000..411e048d7
--- /dev/null
+++ b/examples/src/main/kotlin/space/kscience/kmath/tensors/PCA.kt
@@ -0,0 +1,74 @@
+/*
+ * Copyright 2018-2021 KMath contributors.
+ * Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
+ */
+
+package space.kscience.kmath.tensors
+
+import space.kscience.kmath.operations.invoke
+import space.kscience.kmath.tensors.core.BroadcastDoubleTensorAlgebra
+
+
+// simple PCA
+
+fun main(): Unit = BroadcastDoubleTensorAlgebra { // work in context with broadcast methods
+ val seed = 100500L
+
+ // assume x is range from 0 until 10
+ val x = fromArray(
+ intArrayOf(10),
+ (0 until 10).toList().map { it.toDouble() }.toDoubleArray()
+ )
+
+ // take y dependent on x with noise
+ val y = 2.0 * x + (3.0 + x.randomNormalLike(seed) * 1.5)
+
+ println("x:\n$x")
+ println("y:\n$y")
+
+ // stack them into single dataset
+ val dataset = stack(listOf(x, y)).transpose()
+
+ // normalize both x and y
+ val xMean = x.mean()
+ val yMean = y.mean()
+
+ val xStd = x.std()
+ val yStd = y.std()
+
+ val xScaled = (x - xMean) / xStd
+ val yScaled = (y - yMean) / yStd
+
+ // save means ans standard deviations for further recovery
+ val mean = fromArray(
+ intArrayOf(2),
+ doubleArrayOf(xMean, yMean)
+ )
+ println("Means:\n$mean")
+
+ val std = fromArray(
+ intArrayOf(2),
+ doubleArrayOf(xStd, yStd)
+ )
+ println("Standard deviations:\n$std")
+
+ // calculate the covariance matrix of scaled x and y
+ val covMatrix = cov(listOf(xScaled, yScaled))
+ println("Covariance matrix:\n$covMatrix")
+
+ // and find out eigenvector of it
+ val (_, evecs) = covMatrix.symEig()
+ val v = evecs[0]
+ println("Eigenvector:\n$v")
+
+ // reduce dimension of dataset
+ val datasetReduced = v dot stack(listOf(xScaled, yScaled))
+ println("Reduced data:\n$datasetReduced")
+
+ // we can restore original data from reduced data.
+ // for example, find 7th element of dataset
+ val n = 7
+ val restored = (datasetReduced[n] dot v.view(intArrayOf(1, 2))) * std + mean
+ println("Original value:\n${dataset[n]}")
+ println("Restored value:\n$restored")
+}
diff --git a/kmath-ast/README.md b/kmath-ast/README.md
index eedba16fa..b0f2d59e5 100644
--- a/kmath-ast/README.md
+++ b/kmath-ast/README.md
@@ -1,6 +1,6 @@
# Module kmath-ast
-Abstract syntax tree expression representation and related optimizations.
+Performance and visualization extensions to MST API.
- [expression-language](src/commonMain/kotlin/space/kscience/kmath/ast/parser.kt) : Expression language and its parser
- [mst-jvm-codegen](src/jvmMain/kotlin/space/kscience/kmath/asm/asm.kt) : Dynamic MST to JVM bytecode compiler
@@ -10,30 +10,28 @@ Abstract syntax tree expression representation and related optimizations.
## Artifact:
-The Maven coordinates of this project are `space.kscience:kmath-ast:0.3.0-dev-7`.
+The Maven coordinates of this project are `space.kscience:kmath-ast:0.3.0-dev-11`.
**Gradle:**
```gradle
repositories {
maven { url 'https://repo.kotlin.link' }
- maven { url 'https://dl.bintray.com/hotkeytlt/maven' }
- maven { url "https://dl.bintray.com/kotlin/kotlin-eap" } // include for builds based on kotlin-eap
+ mavenCentral()
}
dependencies {
- implementation 'space.kscience:kmath-ast:0.3.0-dev-7'
+ implementation 'space.kscience:kmath-ast:0.3.0-dev-11'
}
```
**Gradle Kotlin DSL:**
```kotlin
repositories {
maven("https://repo.kotlin.link")
- maven("https://dl.bintray.com/kotlin/kotlin-eap") // include for builds based on kotlin-eap
- maven("https://dl.bintray.com/hotkeytlt/maven") // required for a
+ mavenCentral()
}
dependencies {
- implementation("space.kscience:kmath-ast:0.3.0-dev-7")
+ implementation("space.kscience:kmath-ast:0.3.0-dev-11")
}
```
@@ -41,21 +39,26 @@ dependencies {
### On JVM
-`kmath-ast` JVM module supports runtime code generation to eliminate overhead of tree traversal. Code generator builds
-a special implementation of `Expression` with implemented `invoke` function.
+`kmath-ast` JVM module supports runtime code generation to eliminate overhead of tree traversal. Code generator builds a
+special implementation of `Expression` with implemented `invoke` function.
For example, the following builder:
```kotlin
+import space.kscience.kmath.expressions.*
+import space.kscience.kmath.operations.*
+import space.kscience.kmath.asm.*
+
MstField { bindSymbol("x") + 2 }.compileToExpression(DoubleField)
```
-… leads to generation of bytecode, which can be decompiled to the following Java class:
+... leads to generation of bytecode, which can be decompiled to the following Java class:
```java
package space.kscience.kmath.asm.generated;
import java.util.Map;
+
import kotlin.jvm.functions.Function2;
import space.kscience.kmath.asm.internal.MapIntrinsics;
import space.kscience.kmath.expressions.Expression;
@@ -65,7 +68,7 @@ public final class AsmCompiledExpression_45045_0 implements Expression {
private final Object[] constants;
public final Double invoke(Map arguments) {
- return (Double)((Function2)this.constants[0]).invoke((Double)MapIntrinsics.getOrFail(arguments, "x"), 2);
+ return (Double) ((Function2) this.constants[0]).invoke((Double) MapIntrinsics.getOrFail(arguments, "x"), 2);
}
public AsmCompiledExpression_45045_0(Object[] constants) {
@@ -77,8 +80,8 @@ public final class AsmCompiledExpression_45045_0 implements Expression {
#### Known issues
-- The same classes may be generated and loaded twice, so it is recommended to cache compiled expressions to avoid
- class loading overhead.
+- The same classes may be generated and loaded twice, so it is recommended to cache compiled expressions to avoid class
+ loading overhead.
- This API is not supported by non-dynamic JVM implementations (like TeaVM and GraalVM) because of using class loaders.
### On JS
@@ -86,6 +89,10 @@ public final class AsmCompiledExpression_45045_0 implements Expression {
A similar feature is also available on JS.
```kotlin
+import space.kscience.kmath.expressions.*
+import space.kscience.kmath.operations.*
+import space.kscience.kmath.estree.*
+
MstField { bindSymbol("x") + 2 }.compileToExpression(DoubleField)
```
@@ -93,18 +100,22 @@ The code above returns expression implemented with such a JS function:
```js
var executable = function (constants, arguments) {
- return constants[1](constants[0](arguments, "x"), 2);
+ return constants[1](constants[0](arguments, "x"), 2);
};
```
+JS also supports very experimental expression optimization with [WebAssembly](https://webassembly.org/) IR generation.
+Currently, only expressions inside `DoubleField` and `IntRing` are supported.
```kotlin
+import space.kscience.kmath.expressions.*
+import space.kscience.kmath.operations.*
import space.kscience.kmath.wasm.*
MstField { bindSymbol("x") + 2 }.compileToExpression(DoubleField)
```
-An example of emitted WASM IR in the form of WAT:
+An example of emitted Wasm IR in the form of WAT:
```lisp
(func $executable (param $0 f64) (result f64)
@@ -129,9 +140,11 @@ Example usage:
```kotlin
import space.kscience.kmath.ast.*
import space.kscience.kmath.ast.rendering.*
+import space.kscience.kmath.misc.*
+@OptIn(UnstableKMathAPI::class)
public fun main() {
- val mst = "exp(sqrt(x))-asin(2*x)/(2e10+x^3)/(-12)".parseMath()
+ val mst = "exp(sqrt(x))-asin(2*x)/(2e10+x^3)/(12)+x^(2/3)".parseMath()
val syntax = FeaturedMathRendererWithPostProcess.Default.render(mst)
val latex = LatexSyntaxRenderer.renderWithStringBuilder(syntax)
println("LaTeX:")
@@ -145,13 +158,78 @@ public fun main() {
Result LaTeX:
-![](http://chart.googleapis.com/chart?cht=tx&chl=e%5E%7B%5Csqrt%7Bx%7D%7D-%5Cfrac%7B%5Cfrac%7B%5Coperatorname%7Bsin%7D%5E%7B-1%7D%5C,%5Cleft(2%5C,x%5Cright)%7D%7B2%5Ctimes10%5E%7B10%7D%2Bx%5E%7B3%7D%7D%7D%7B-12%7D)
+![](https://latex.codecogs.com/gif.latex?%5Coperatorname{exp}%5C,%5Cleft(%5Csqrt{x}%5Cright)-%5Cfrac{%5Cfrac{%5Coperatorname{arcsin}%5C,%5Cleft(2%5C,x%5Cright)}{2%5Ctimes10^{10}%2Bx^{3}}}{12}+x^{2/3})
-Result MathML (embedding MathML is not allowed by GitHub Markdown):
+Result MathML (can be used with MathJax or other renderers):
+
+
```html
-ex-sin-12x2×1010+x3-12
+
```
+
+
It is also possible to create custom algorithms of render, and even add support of other markup languages
(see API reference).
diff --git a/kmath-ast/build.gradle.kts b/kmath-ast/build.gradle.kts
index b4a0b28ac..508374d82 100644
--- a/kmath-ast/build.gradle.kts
+++ b/kmath-ast/build.gradle.kts
@@ -18,6 +18,10 @@ kotlin.js {
}
kotlin.sourceSets {
+ filter { it.name.contains("test", true) }
+ .map(org.jetbrains.kotlin.gradle.plugin.KotlinSourceSet::languageSettings)
+ .forEach { it.useExperimentalAnnotation("space.kscience.kmath.misc.UnstableKMathAPI") }
+
commonMain {
dependencies {
api("com.github.h0tk3y.betterParse:better-parse:0.4.2")
@@ -54,7 +58,7 @@ tasks.dokkaHtml {
}
readme {
- maturity = ru.mipt.npm.gradle.Maturity.PROTOTYPE
+ maturity = ru.mipt.npm.gradle.Maturity.EXPERIMENTAL
propertyByTemplate("artifact", rootProject.file("docs/templates/ARTIFACT-TEMPLATE.md"))
feature(
diff --git a/kmath-ast/docs/README-TEMPLATE.md b/kmath-ast/docs/README-TEMPLATE.md
index b38311ea1..80ea31642 100644
--- a/kmath-ast/docs/README-TEMPLATE.md
+++ b/kmath-ast/docs/README-TEMPLATE.md
@@ -1,6 +1,6 @@
# Module kmath-ast
-Abstract syntax tree expression representation and related optimizations.
+Performance and visualization extensions to MST API.
${features}
@@ -10,21 +10,26 @@ ${artifact}
### On JVM
-`kmath-ast` JVM module supports runtime code generation to eliminate overhead of tree traversal. Code generator builds
-a special implementation of `Expression` with implemented `invoke` function.
+`kmath-ast` JVM module supports runtime code generation to eliminate overhead of tree traversal. Code generator builds a
+special implementation of `Expression` with implemented `invoke` function.
For example, the following builder:
```kotlin
+import space.kscience.kmath.expressions.*
+import space.kscience.kmath.operations.*
+import space.kscience.kmath.asm.*
+
MstField { bindSymbol("x") + 2 }.compileToExpression(DoubleField)
```
-… leads to generation of bytecode, which can be decompiled to the following Java class:
+... leads to generation of bytecode, which can be decompiled to the following Java class:
```java
package space.kscience.kmath.asm.generated;
import java.util.Map;
+
import kotlin.jvm.functions.Function2;
import space.kscience.kmath.asm.internal.MapIntrinsics;
import space.kscience.kmath.expressions.Expression;
@@ -34,7 +39,7 @@ public final class AsmCompiledExpression_45045_0 implements Expression {
private final Object[] constants;
public final Double invoke(Map arguments) {
- return (Double)((Function2)this.constants[0]).invoke((Double)MapIntrinsics.getOrFail(arguments, "x"), 2);
+ return (Double) ((Function2) this.constants[0]).invoke((Double) MapIntrinsics.getOrFail(arguments, "x"), 2);
}
public AsmCompiledExpression_45045_0(Object[] constants) {
@@ -46,8 +51,8 @@ public final class AsmCompiledExpression_45045_0 implements Expression {
#### Known issues
-- The same classes may be generated and loaded twice, so it is recommended to cache compiled expressions to avoid
- class loading overhead.
+- The same classes may be generated and loaded twice, so it is recommended to cache compiled expressions to avoid class
+ loading overhead.
- This API is not supported by non-dynamic JVM implementations (like TeaVM and GraalVM) because of using class loaders.
### On JS
@@ -55,6 +60,10 @@ public final class AsmCompiledExpression_45045_0 implements Expression {
A similar feature is also available on JS.
```kotlin
+import space.kscience.kmath.expressions.*
+import space.kscience.kmath.operations.*
+import space.kscience.kmath.estree.*
+
MstField { bindSymbol("x") + 2 }.compileToExpression(DoubleField)
```
@@ -62,18 +71,22 @@ The code above returns expression implemented with such a JS function:
```js
var executable = function (constants, arguments) {
- return constants[1](constants[0](arguments, "x"), 2);
+ return constants[1](constants[0](arguments, "x"), 2);
};
```
+JS also supports very experimental expression optimization with [WebAssembly](https://webassembly.org/) IR generation.
+Currently, only expressions inside `DoubleField` and `IntRing` are supported.
```kotlin
+import space.kscience.kmath.expressions.*
+import space.kscience.kmath.operations.*
import space.kscience.kmath.wasm.*
MstField { bindSymbol("x") + 2 }.compileToExpression(DoubleField)
```
-An example of emitted WASM IR in the form of WAT:
+An example of emitted Wasm IR in the form of WAT:
```lisp
(func \$executable (param \$0 f64) (result f64)
@@ -98,9 +111,11 @@ Example usage:
```kotlin
import space.kscience.kmath.ast.*
import space.kscience.kmath.ast.rendering.*
+import space.kscience.kmath.misc.*
+@OptIn(UnstableKMathAPI::class)
public fun main() {
- val mst = "exp(sqrt(x))-asin(2*x)/(2e10+x^3)/(-12)".parseMath()
+ val mst = "exp(sqrt(x))-asin(2*x)/(2e10+x^3)/(12)+x^(2/3)".parseMath()
val syntax = FeaturedMathRendererWithPostProcess.Default.render(mst)
val latex = LatexSyntaxRenderer.renderWithStringBuilder(syntax)
println("LaTeX:")
@@ -114,13 +129,78 @@ public fun main() {
Result LaTeX:
-![](http://chart.googleapis.com/chart?cht=tx&chl=e%5E%7B%5Csqrt%7Bx%7D%7D-%5Cfrac%7B%5Cfrac%7B%5Coperatorname%7Bsin%7D%5E%7B-1%7D%5C,%5Cleft(2%5C,x%5Cright)%7D%7B2%5Ctimes10%5E%7B10%7D%2Bx%5E%7B3%7D%7D%7D%7B-12%7D)
+![](https://latex.codecogs.com/gif.latex?%5Coperatorname{exp}%5C,%5Cleft(%5Csqrt{x}%5Cright)-%5Cfrac{%5Cfrac{%5Coperatorname{arcsin}%5C,%5Cleft(2%5C,x%5Cright)}{2%5Ctimes10^{10}%2Bx^{3}}}{12}+x^{2/3})
-Result MathML (embedding MathML is not allowed by GitHub Markdown):
+Result MathML (can be used with MathJax or other renderers):
+
+
```html
-ex-sin-12x2×1010+x3-12
+
```
+
+
It is also possible to create custom algorithms of render, and even add support of other markup languages
(see API reference).
diff --git a/kmath-ast/src/commonMain/kotlin/space/kscience/kmath/ast/parser.kt b/kmath-ast/src/commonMain/kotlin/space/kscience/kmath/ast/parser.kt
index d2e92c37f..5201fec38 100644
--- a/kmath-ast/src/commonMain/kotlin/space/kscience/kmath/ast/parser.kt
+++ b/kmath-ast/src/commonMain/kotlin/space/kscience/kmath/ast/parser.kt
@@ -17,6 +17,7 @@ import com.github.h0tk3y.betterParse.lexer.regexToken
import com.github.h0tk3y.betterParse.parser.ParseResult
import com.github.h0tk3y.betterParse.parser.Parser
import space.kscience.kmath.expressions.MST
+import space.kscience.kmath.expressions.StringSymbol
import space.kscience.kmath.operations.FieldOperations
import space.kscience.kmath.operations.GroupOperations
import space.kscience.kmath.operations.PowerOperations
@@ -29,7 +30,6 @@ import space.kscience.kmath.operations.RingOperations
* @author Iaroslav Postovalov
*/
public object ArithmeticsEvaluator : Grammar() {
- // TODO replace with "...".toRegex() when better-parse 0.4.1 is released
private val num: Token by regexToken("[\\d.]+(?:[eE][-+]?\\d+)?".toRegex())
private val id: Token by regexToken("[a-z_A-Z][\\da-z_A-Z]*".toRegex())
private val lpar: Token by literalToken("(")
@@ -43,7 +43,7 @@ public object ArithmeticsEvaluator : Grammar() {
private val ws: Token by regexToken("\\s+".toRegex(), ignore = true)
private val number: Parser by num use { MST.Numeric(text.toDouble()) }
- private val singular: Parser by id use { MST.Symbolic(text) }
+ private val singular: Parser by id use { StringSymbol(text) }
private val unaryFunction: Parser by (id and -lpar and parser(ArithmeticsEvaluator::subSumChain) and -rpar)
.map { (id, term) -> MST.Unary(id.text, term) }
diff --git a/kmath-ast/src/commonMain/kotlin/space/kscience/kmath/ast/rendering/LatexSyntaxRenderer.kt b/kmath-ast/src/commonMain/kotlin/space/kscience/kmath/ast/rendering/LatexSyntaxRenderer.kt
index 1c82bd6e7..01717b0f9 100644
--- a/kmath-ast/src/commonMain/kotlin/space/kscience/kmath/ast/rendering/LatexSyntaxRenderer.kt
+++ b/kmath-ast/src/commonMain/kotlin/space/kscience/kmath/ast/rendering/LatexSyntaxRenderer.kt
@@ -5,6 +5,8 @@
package space.kscience.kmath.ast.rendering
+import space.kscience.kmath.misc.UnstableKMathAPI
+
/**
* [SyntaxRenderer] implementation for LaTeX.
*
@@ -23,6 +25,7 @@ package space.kscience.kmath.ast.rendering
*
* @author Iaroslav Postovalov
*/
+@UnstableKMathAPI
public object LatexSyntaxRenderer : SyntaxRenderer {
public override fun render(node: MathSyntax, output: Appendable): Unit = output.run {
fun render(syntax: MathSyntax) = render(syntax, output)
@@ -115,7 +118,11 @@ public object LatexSyntaxRenderer : SyntaxRenderer {
render(node.right)
}
- is FractionSyntax -> {
+ is FractionSyntax -> if (node.infix) {
+ render(node.left)
+ append('/')
+ render(node.right)
+ } else {
append("\\frac{")
render(node.left)
append("}{")
diff --git a/kmath-ast/src/commonMain/kotlin/space/kscience/kmath/ast/rendering/MathMLSyntaxRenderer.kt b/kmath-ast/src/commonMain/kotlin/space/kscience/kmath/ast/rendering/MathMLSyntaxRenderer.kt
index decd4ba46..cda8e2322 100644
--- a/kmath-ast/src/commonMain/kotlin/space/kscience/kmath/ast/rendering/MathMLSyntaxRenderer.kt
+++ b/kmath-ast/src/commonMain/kotlin/space/kscience/kmath/ast/rendering/MathMLSyntaxRenderer.kt
@@ -5,6 +5,8 @@
package space.kscience.kmath.ast.rendering
+import space.kscience.kmath.misc.UnstableKMathAPI
+
/**
* [SyntaxRenderer] implementation for MathML.
*
@@ -12,14 +14,18 @@ package space.kscience.kmath.ast.rendering
*
* @author Iaroslav Postovalov
*/
+@UnstableKMathAPI
public object MathMLSyntaxRenderer : SyntaxRenderer {
public override fun render(node: MathSyntax, output: Appendable) {
- output.append("