diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml
index f39e12a12..9a9f04621 100644
--- a/.github/workflows/build.yml
+++ b/.github/workflows/build.yml
@@ -13,9 +13,11 @@ jobs:
- name: Checkout the repo
uses: actions/checkout@v2
- name: Set up JDK 11
- uses: actions/setup-java@v1
+ uses: DeLaGuardo/setup-graalvm@4.0
with:
- java-version: 11
+ graalvm: 21.1.0
+ java: java11
+ arch: amd64
- name: Add msys to path
if: matrix.os == 'windows-latest'
run: SETX PATH "%PATH%;C:\msys64\mingw64\bin"
diff --git a/.github/workflows/pages.yml b/.github/workflows/pages.yml
index 82b0fb303..5892b3c4c 100644
--- a/.github/workflows/pages.yml
+++ b/.github/workflows/pages.yml
@@ -12,9 +12,11 @@ jobs:
- name: Checkout the repo
uses: actions/checkout@v2
- name: Set up JDK 11
- uses: actions/setup-java@v1
+ uses: DeLaGuardo/setup-graalvm@4.0
with:
- java-version: 11
+ graalvm: 21.1.0
+ java: java11
+ arch: amd64
- name: Cache gradle
uses: actions/cache@v2
with:
@@ -30,9 +32,7 @@ jobs:
restore-keys: |
${{ runner.os }}-gradle-
- name: Build
- run: |
- ./gradlew dokkaHtmlMultiModule --no-daemon --no-parallel --stacktrace
- mv build/dokka/htmlMultiModule/-modules.html build/dokka/htmlMultiModule/index.html
+ run: ./gradlew dokkaHtmlMultiModule --no-daemon --no-parallel --stacktrace
- name: Deploy to GitHub Pages
uses: JamesIves/github-pages-deploy-action@4.1.0
with:
diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml
index ca374574e..c5c110e89 100644
--- a/.github/workflows/publish.yml
+++ b/.github/workflows/publish.yml
@@ -18,9 +18,11 @@ jobs:
- name: Checkout the repo
uses: actions/checkout@v2
- name: Set up JDK 11
- uses: actions/setup-java@v1
+ uses: DeLaGuardo/setup-graalvm@4.0
with:
- java-version: 11
+ graalvm: 21.1.0
+ java: java11
+ arch: amd64
- name: Add msys to path
if: matrix.os == 'windows-latest'
run: SETX PATH "%PATH%;C:\msys64\mingw64\bin"
diff --git a/CHANGELOG.md b/CHANGELOG.md
index c41eda374..5542b6563 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -10,7 +10,8 @@
- Blocking chains and Statistics
- Multiplatform integration
- Integration for any Field element
-- Extendend operations for ND4J fields
+- Extended operations for ND4J fields
+- Jupyter Notebook integration module (kmath-jupyter)
### Changed
- Exponential operations merged with hyperbolic functions
@@ -24,6 +25,7 @@
- Redesign MST. Remove MSTExpression.
- Move MST to core
- Separated benchmarks and examples
+- Rewritten EJML module without ejml-simple
### Deprecated
diff --git a/README.md b/README.md
index 773eb6398..8796d7aac 100644
--- a/README.md
+++ b/README.md
@@ -91,7 +91,7 @@ KMath is a modular library. Different modules provide different features with di
* ### [kmath-ast](kmath-ast)
>
>
-> **Maturity**: PROTOTYPE
+> **Maturity**: EXPERIMENTAL
>
> **Features:**
> - [expression-language](kmath-ast/src/commonMain/kotlin/space/kscience/kmath/ast/parser.kt) : Expression language and its parser
@@ -154,9 +154,9 @@ performance calculations to code generation.
> **Maturity**: PROTOTYPE
>
> **Features:**
-> - [ejml-vector](kmath-ejml/src/main/kotlin/space/kscience/kmath/ejml/EjmlVector.kt) : The Point implementation using SimpleMatrix.
-> - [ejml-matrix](kmath-ejml/src/main/kotlin/space/kscience/kmath/ejml/EjmlMatrix.kt) : The Matrix implementation using SimpleMatrix.
-> - [ejml-linear-space](kmath-ejml/src/main/kotlin/space/kscience/kmath/ejml/EjmlLinearSpace.kt) : The LinearSpace implementation using SimpleMatrix.
+> - [ejml-vector](kmath-ejml/src/main/kotlin/space/kscience/kmath/ejml/EjmlVector.kt) : Point implementations.
+> - [ejml-matrix](kmath-ejml/src/main/kotlin/space/kscience/kmath/ejml/EjmlMatrix.kt) : Matrix implementation.
+> - [ejml-linear-space](kmath-ejml/src/main/kotlin/space/kscience/kmath/ejml/EjmlLinearSpace.kt) : LinearSpace implementations.
@@ -200,6 +200,12 @@ One can still use generic algebras though.
> **Maturity**: PROTOTYPE
+* ### [kmath-jupyter](kmath-jupyter)
+>
+>
+> **Maturity**: PROTOTYPE
+
+
* ### [kmath-kotlingrad](kmath-kotlingrad)
>
>
@@ -230,6 +236,18 @@ One can still use generic algebras though.
> **Maturity**: EXPERIMENTAL
+* ### [kmath-tensors](kmath-tensors)
+>
+>
+> **Maturity**: PROTOTYPE
+>
+> **Features:**
+> - [tensor algebra](kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/api/TensorAlgebra.kt) : Basic linear algebra operations on tensors (plus, dot, etc.)
+> - [tensor algebra with broadcasting](kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/algebras/BroadcastDoubleTensorAlgebra.kt) : Basic linear algebra operations implemented with broadcasting.
+> - [linear algebra operations](kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/api/LinearOpsTensorAlgebra.kt) : Advanced linear algebra operations like LU decomposition, SVD, etc.
+
+
+
* ### [kmath-viktor](kmath-viktor)
>
>
diff --git a/benchmarks/build.gradle.kts b/benchmarks/build.gradle.kts
index 88f034a2a..98ffc5a96 100644
--- a/benchmarks/build.gradle.kts
+++ b/benchmarks/build.gradle.kts
@@ -9,14 +9,10 @@ sourceSets.register("benchmarks")
repositories {
mavenCentral()
- jcenter()
maven("https://repo.kotlin.link")
maven("https://clojars.org/repo")
- maven("https://dl.bintray.com/egor-bogomolov/astminer/")
- maven("https://dl.bintray.com/hotkeytlt/maven")
maven("https://jitpack.io")
- maven {
- setUrl("http://logicrunch.research.it.uu.se/maven/")
+ maven("http://logicrunch.research.it.uu.se/maven") {
isAllowInsecureProtocol = true
}
}
diff --git a/benchmarks/src/jvmMain/kotlin/space/kscience/kmath/benchmarks/DotBenchmark.kt b/benchmarks/src/jvmMain/kotlin/space/kscience/kmath/benchmarks/DotBenchmark.kt
index 23e73cb5f..2c5a03a97 100644
--- a/benchmarks/src/jvmMain/kotlin/space/kscience/kmath/benchmarks/DotBenchmark.kt
+++ b/benchmarks/src/jvmMain/kotlin/space/kscience/kmath/benchmarks/DotBenchmark.kt
@@ -10,7 +10,7 @@ import kotlinx.benchmark.Blackhole
import kotlinx.benchmark.Scope
import kotlinx.benchmark.State
import space.kscience.kmath.commons.linear.CMLinearSpace
-import space.kscience.kmath.ejml.EjmlLinearSpace
+import space.kscience.kmath.ejml.EjmlLinearSpaceDDRM
import space.kscience.kmath.linear.LinearSpace
import space.kscience.kmath.linear.invoke
import space.kscience.kmath.operations.DoubleField
@@ -29,8 +29,8 @@ internal class DotBenchmark {
val cmMatrix1 = CMLinearSpace { matrix1.toCM() }
val cmMatrix2 = CMLinearSpace { matrix2.toCM() }
- val ejmlMatrix1 = EjmlLinearSpace { matrix1.toEjml() }
- val ejmlMatrix2 = EjmlLinearSpace { matrix2.toEjml() }
+ val ejmlMatrix1 = EjmlLinearSpaceDDRM { matrix1.toEjml() }
+ val ejmlMatrix2 = EjmlLinearSpaceDDRM { matrix2.toEjml() }
}
@Benchmark
@@ -42,14 +42,14 @@ internal class DotBenchmark {
@Benchmark
fun ejmlDot(blackhole: Blackhole) {
- EjmlLinearSpace {
+ EjmlLinearSpaceDDRM {
blackhole.consume(ejmlMatrix1 dot ejmlMatrix2)
}
}
@Benchmark
fun ejmlDotWithConversion(blackhole: Blackhole) {
- EjmlLinearSpace {
+ EjmlLinearSpaceDDRM {
blackhole.consume(matrix1 dot matrix2)
}
}
diff --git a/benchmarks/src/jvmMain/kotlin/space/kscience/kmath/benchmarks/MatrixInverseBenchmark.kt b/benchmarks/src/jvmMain/kotlin/space/kscience/kmath/benchmarks/MatrixInverseBenchmark.kt
index d1803e389..7bb32af28 100644
--- a/benchmarks/src/jvmMain/kotlin/space/kscience/kmath/benchmarks/MatrixInverseBenchmark.kt
+++ b/benchmarks/src/jvmMain/kotlin/space/kscience/kmath/benchmarks/MatrixInverseBenchmark.kt
@@ -11,25 +11,26 @@ import kotlinx.benchmark.Scope
import kotlinx.benchmark.State
import space.kscience.kmath.commons.linear.CMLinearSpace
import space.kscience.kmath.commons.linear.inverse
-import space.kscience.kmath.ejml.EjmlLinearSpace
-import space.kscience.kmath.ejml.inverse
+import space.kscience.kmath.ejml.EjmlLinearSpaceDDRM
+import space.kscience.kmath.linear.InverseMatrixFeature
import space.kscience.kmath.linear.LinearSpace
import space.kscience.kmath.linear.inverseWithLup
import space.kscience.kmath.linear.invoke
+import space.kscience.kmath.nd.getFeature
import kotlin.random.Random
@State(Scope.Benchmark)
internal class MatrixInverseBenchmark {
- companion object {
- val random = Random(1224)
- const val dim = 100
+ private companion object {
+ private val random = Random(1224)
+ private const val dim = 100
private val space = LinearSpace.real
//creating invertible matrix
- val u = space.buildMatrix(dim, dim) { i, j -> if (i <= j) random.nextDouble() else 0.0 }
- val l = space.buildMatrix(dim, dim) { i, j -> if (i >= j) random.nextDouble() else 0.0 }
- val matrix = space { l dot u }
+ private val u = space.buildMatrix(dim, dim) { i, j -> if (i <= j) random.nextDouble() else 0.0 }
+ private val l = space.buildMatrix(dim, dim) { i, j -> if (i >= j) random.nextDouble() else 0.0 }
+ private val matrix = space { l dot u }
}
@Benchmark
@@ -46,8 +47,8 @@ internal class MatrixInverseBenchmark {
@Benchmark
fun ejmlInverse(blackhole: Blackhole) {
- with(EjmlLinearSpace) {
- blackhole.consume(inverse(matrix))
+ with(EjmlLinearSpaceDDRM) {
+ blackhole.consume(matrix.getFeature>()?.inverse)
}
}
}
diff --git a/build.gradle.kts b/build.gradle.kts
index 4e0b6f256..760bf1aee 100644
--- a/build.gradle.kts
+++ b/build.gradle.kts
@@ -1,17 +1,16 @@
plugins {
id("ru.mipt.npm.gradle.project")
+ kotlin("jupyter.api") apply false
}
allprojects {
repositories {
- jcenter()
maven("https://clojars.org/repo")
- maven("https://dl.bintray.com/egor-bogomolov/astminer/")
- maven("https://dl.bintray.com/hotkeytlt/maven")
maven("https://jitpack.io")
- maven("http://logicrunch.research.it.uu.se/maven/") {
+ maven("http://logicrunch.research.it.uu.se/maven") {
isAllowInsecureProtocol = true
}
+ maven("https://maven.pkg.jetbrains.space/public/p/kotlinx-html/maven")
mavenCentral()
}
@@ -23,22 +22,16 @@ subprojects {
if (name.startsWith("kmath")) apply()
afterEvaluate {
- tasks.withType {
- dokkaSourceSets.all {
- val readmeFile = File(this@subprojects.projectDir, "./README.md")
- if (readmeFile.exists())
- includes.setFrom(includes + readmeFile.absolutePath)
+ tasks.withType {
+ dependsOn(tasks.getByName("assemble"))
- arrayOf(
- "http://ejml.org/javadoc/",
- "https://commons.apache.org/proper/commons-math/javadocs/api-3.6.1/",
- "https://deeplearning4j.org/api/latest/"
- ).map { java.net.URL("${it}package-list") to java.net.URL(it) }.forEach { (a, b) ->
- externalDocumentationLink {
- packageListUrl.set(a)
- url.set(b)
- }
- }
+ dokkaSourceSets.all {
+ val readmeFile = File(this@subprojects.projectDir, "README.md")
+ if (readmeFile.exists()) includes.setFrom(includes + readmeFile.absolutePath)
+ externalDocumentationLink("http://ejml.org/javadoc/")
+ externalDocumentationLink("https://commons.apache.org/proper/commons-math/javadocs/api-3.6.1/")
+ externalDocumentationLink("https://deeplearning4j.org/api/latest/")
+ externalDocumentationLink("https://kotlin.github.io/kotlinx.coroutines/kotlinx-coroutines-core/")
}
}
}
diff --git a/docs/templates/ARTIFACT-TEMPLATE.md b/docs/templates/ARTIFACT-TEMPLATE.md
index 01d9c51da..1bac2a8ff 100644
--- a/docs/templates/ARTIFACT-TEMPLATE.md
+++ b/docs/templates/ARTIFACT-TEMPLATE.md
@@ -6,8 +6,7 @@ The Maven coordinates of this project are `${group}:${name}:${version}`.
```gradle
repositories {
maven { url 'https://repo.kotlin.link' }
- maven { url 'https://dl.bintray.com/hotkeytlt/maven' }
- maven { url "https://dl.bintray.com/kotlin/kotlin-eap" } // include for builds based on kotlin-eap
+ mavenCentral()
}
dependencies {
@@ -18,8 +17,7 @@ dependencies {
```kotlin
repositories {
maven("https://repo.kotlin.link")
- maven("https://dl.bintray.com/kotlin/kotlin-eap") // include for builds based on kotlin-eap
- maven("https://dl.bintray.com/hotkeytlt/maven") // required for a
+ mavenCentral()
}
dependencies {
diff --git a/examples/build.gradle.kts b/examples/build.gradle.kts
index 56feee9dc..1c7caf1b9 100644
--- a/examples/build.gradle.kts
+++ b/examples/build.gradle.kts
@@ -4,14 +4,11 @@ plugins {
repositories {
mavenCentral()
- jcenter()
maven("https://repo.kotlin.link")
maven("https://clojars.org/repo")
- maven("https://dl.bintray.com/egor-bogomolov/astminer/")
- maven("https://dl.bintray.com/hotkeytlt/maven")
maven("https://jitpack.io")
- maven{
- setUrl("http://logicrunch.research.it.uu.se/maven/")
+ maven("https://maven.pkg.jetbrains.space/kotlin/p/kotlin/kotlin-js-wrappers")
+ maven("http://logicrunch.research.it.uu.se/maven") {
isAllowInsecureProtocol = true
}
}
@@ -28,6 +25,7 @@ dependencies {
implementation(project(":kmath-dimensions"))
implementation(project(":kmath-ejml"))
implementation(project(":kmath-nd4j"))
+ implementation(project(":kmath-tensors"))
implementation(project(":kmath-for-real"))
diff --git a/examples/src/main/kotlin/space/kscience/kmath/tensors/DataSetNormalization.kt b/examples/src/main/kotlin/space/kscience/kmath/tensors/DataSetNormalization.kt
new file mode 100644
index 000000000..6fbf16a91
--- /dev/null
+++ b/examples/src/main/kotlin/space/kscience/kmath/tensors/DataSetNormalization.kt
@@ -0,0 +1,46 @@
+/*
+ * Copyright 2018-2021 KMath contributors.
+ * Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
+ */
+
+package space.kscience.kmath.tensors
+
+import space.kscience.kmath.operations.invoke
+import space.kscience.kmath.tensors.core.BroadcastDoubleTensorAlgebra
+
+
+// Dataset normalization
+
+fun main() {
+
+ // work in context with broadcast methods
+ BroadcastDoubleTensorAlgebra {
+ // take dataset of 5-element vectors from normal distribution
+ val dataset = randomNormal(intArrayOf(100, 5)) * 1.5 // all elements from N(0, 1.5)
+
+ dataset += fromArray(
+ intArrayOf(5),
+ doubleArrayOf(0.0, 1.0, 1.5, 3.0, 5.0) // rows means
+ )
+
+
+ // find out mean and standard deviation of each column
+ val mean = dataset.mean(0, false)
+ val std = dataset.std(0, false)
+
+ println("Mean:\n$mean")
+ println("Standard deviation:\n$std")
+
+ // also we can calculate other statistic as minimum and maximum of rows
+ println("Minimum:\n${dataset.min(0, false)}")
+ println("Maximum:\n${dataset.max(0, false)}")
+
+ // now we can scale dataset with mean normalization
+ val datasetScaled = (dataset - mean) / std
+
+ // find out mean and std of scaled dataset
+
+ println("Mean of scaled:\n${datasetScaled.mean(0, false)}")
+ println("Mean of scaled:\n${datasetScaled.std(0, false)}")
+ }
+}
\ No newline at end of file
diff --git a/examples/src/main/kotlin/space/kscience/kmath/tensors/LinearSystemSolvingWithLUP.kt b/examples/src/main/kotlin/space/kscience/kmath/tensors/LinearSystemSolvingWithLUP.kt
new file mode 100644
index 000000000..78370b517
--- /dev/null
+++ b/examples/src/main/kotlin/space/kscience/kmath/tensors/LinearSystemSolvingWithLUP.kt
@@ -0,0 +1,97 @@
+/*
+ * Copyright 2018-2021 KMath contributors.
+ * Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
+ */
+
+package space.kscience.kmath.tensors
+
+import space.kscience.kmath.operations.invoke
+import space.kscience.kmath.tensors.core.DoubleTensor
+import space.kscience.kmath.tensors.core.BroadcastDoubleTensorAlgebra
+
+// solving linear system with LUP decomposition
+
+fun main () {
+
+ // work in context with linear operations
+ BroadcastDoubleTensorAlgebra {
+
+ // set true value of x
+ val trueX = fromArray(
+ intArrayOf(4),
+ doubleArrayOf(-2.0, 1.5, 6.8, -2.4)
+ )
+
+ // and A matrix
+ val a = fromArray(
+ intArrayOf(4, 4),
+ doubleArrayOf(
+ 0.5, 10.5, 4.5, 1.0,
+ 8.5, 0.9, 12.8, 0.1,
+ 5.56, 9.19, 7.62, 5.45,
+ 1.0, 2.0, -3.0, -2.5
+ )
+ )
+
+ // calculate y value
+ val b = a dot trueX
+
+ // check out A and b
+ println("A:\n$a")
+ println("b:\n$b")
+
+ // solve `Ax = b` system using LUP decomposition
+
+ // get P, L, U such that PA = LU
+ val (p, l, u) = a.lu()
+
+ // check that P is permutation matrix
+ println("P:\n$p")
+ // L is lower triangular matrix and U is upper triangular matrix
+ println("L:\n$l")
+ println("U:\n$u")
+ // and PA = LU
+ println("PA:\n${p dot a}")
+ println("LU:\n${l dot u}")
+
+ /* Ax = b;
+ PAx = Pb;
+ LUx = Pb;
+ let y = Ux, then
+ Ly = Pb -- this system can be easily solved, since the matrix L is lower triangular;
+ Ux = y can be solved the same way, since the matrix L is upper triangular
+ */
+
+
+
+ // this function returns solution x of a system lx = b, l should be lower triangular
+ fun solveLT(l: DoubleTensor, b: DoubleTensor): DoubleTensor {
+ val n = l.shape[0]
+ val x = zeros(intArrayOf(n))
+ for (i in 0 until n){
+ x[intArrayOf(i)] = (b[intArrayOf(i)] - l[i].dot(x).value()) / l[intArrayOf(i, i)]
+ }
+ return x
+ }
+
+ val y = solveLT(l, p dot b)
+
+ // solveLT(l, b) function can be easily adapted for upper triangular matrix by the permutation matrix revMat
+ // create it by placing ones on side diagonal
+ val revMat = u.zeroesLike()
+ val n = revMat.shape[0]
+ for (i in 0 until n) {
+ revMat[intArrayOf(i, n - 1 - i)] = 1.0
+ }
+
+ // solution of system ux = b, u should be upper triangular
+ fun solveUT(u: DoubleTensor, b: DoubleTensor): DoubleTensor = revMat dot solveLT(
+ revMat dot u dot revMat, revMat dot b
+ )
+
+ val x = solveUT(u, y)
+
+ println("True x:\n$trueX")
+ println("x founded with LU method:\n$x")
+ }
+}
\ No newline at end of file
diff --git a/examples/src/main/kotlin/space/kscience/kmath/tensors/NeuralNetwork.kt b/examples/src/main/kotlin/space/kscience/kmath/tensors/NeuralNetwork.kt
new file mode 100644
index 000000000..874ac8034
--- /dev/null
+++ b/examples/src/main/kotlin/space/kscience/kmath/tensors/NeuralNetwork.kt
@@ -0,0 +1,241 @@
+/*
+ * Copyright 2018-2021 KMath contributors.
+ * Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
+ */
+
+package space.kscience.kmath.tensors
+
+import space.kscience.kmath.operations.invoke
+import space.kscience.kmath.tensors.core.DoubleTensor
+import space.kscience.kmath.tensors.core.BroadcastDoubleTensorAlgebra
+import space.kscience.kmath.tensors.core.DoubleTensorAlgebra
+import space.kscience.kmath.tensors.core.toDoubleArray
+import kotlin.math.sqrt
+
+const val seed = 100500L
+
+// Simple feedforward neural network with backpropagation training
+
+// interface of network layer
+interface Layer {
+ fun forward(input: DoubleTensor): DoubleTensor
+ fun backward(input: DoubleTensor, outputError: DoubleTensor): DoubleTensor
+}
+
+// activation layer
+open class Activation(
+ val activation: (DoubleTensor) -> DoubleTensor,
+ val activationDer: (DoubleTensor) -> DoubleTensor
+) : Layer {
+ override fun forward(input: DoubleTensor): DoubleTensor {
+ return activation(input)
+ }
+
+ override fun backward(input: DoubleTensor, outputError: DoubleTensor): DoubleTensor {
+ return DoubleTensorAlgebra { outputError * activationDer(input) }
+ }
+}
+
+fun relu(x: DoubleTensor): DoubleTensor = DoubleTensorAlgebra {
+ x.map { if (it > 0) it else 0.0 }
+}
+
+fun reluDer(x: DoubleTensor): DoubleTensor = DoubleTensorAlgebra {
+ x.map { if (it > 0) 1.0 else 0.0 }
+}
+
+// activation layer with relu activator
+class ReLU : Activation(::relu, ::reluDer)
+
+fun sigmoid(x: DoubleTensor): DoubleTensor = DoubleTensorAlgebra {
+ 1.0 / (1.0 + (-x).exp())
+}
+
+fun sigmoidDer(x: DoubleTensor): DoubleTensor = DoubleTensorAlgebra {
+ sigmoid(x) * (1.0 - sigmoid(x))
+}
+
+// activation layer with sigmoid activator
+class Sigmoid : Activation(::sigmoid, ::sigmoidDer)
+
+// dense layer
+class Dense(
+ private val inputUnits: Int,
+ private val outputUnits: Int,
+ private val learningRate: Double = 0.1
+) : Layer {
+
+ private val weights: DoubleTensor = DoubleTensorAlgebra {
+ randomNormal(
+ intArrayOf(inputUnits, outputUnits),
+ seed
+ ) * sqrt(2.0 / (inputUnits + outputUnits))
+ }
+
+ private val bias: DoubleTensor = DoubleTensorAlgebra { zeros(intArrayOf(outputUnits)) }
+
+ override fun forward(input: DoubleTensor): DoubleTensor {
+ return BroadcastDoubleTensorAlgebra { (input dot weights) + bias }
+ }
+
+ override fun backward(input: DoubleTensor, outputError: DoubleTensor): DoubleTensor = DoubleTensorAlgebra {
+ val gradInput = outputError dot weights.transpose()
+
+ val gradW = input.transpose() dot outputError
+ val gradBias = outputError.mean(dim = 0, keepDim = false) * input.shape[0].toDouble()
+
+ weights -= learningRate * gradW
+ bias -= learningRate * gradBias
+
+ gradInput
+ }
+
+}
+
+// simple accuracy equal to the proportion of correct answers
+fun accuracy(yPred: DoubleTensor, yTrue: DoubleTensor): Double {
+ check(yPred.shape contentEquals yTrue.shape)
+ val n = yPred.shape[0]
+ var correctCnt = 0
+ for (i in 0 until n) {
+ if (yPred[intArrayOf(i, 0)] == yTrue[intArrayOf(i, 0)]) {
+ correctCnt += 1
+ }
+ }
+ return correctCnt.toDouble() / n.toDouble()
+}
+
+// neural network class
+class NeuralNetwork(private val layers: List) {
+ private fun softMaxLoss(yPred: DoubleTensor, yTrue: DoubleTensor): DoubleTensor = BroadcastDoubleTensorAlgebra {
+
+ val onesForAnswers = yPred.zeroesLike()
+ yTrue.toDoubleArray().forEachIndexed { index, labelDouble ->
+ val label = labelDouble.toInt()
+ onesForAnswers[intArrayOf(index, label)] = 1.0
+ }
+
+ val softmaxValue = yPred.exp() / yPred.exp().sum(dim = 1, keepDim = true)
+
+ (-onesForAnswers + softmaxValue) / (yPred.shape[0].toDouble())
+ }
+
+ @OptIn(ExperimentalStdlibApi::class)
+ private fun forward(x: DoubleTensor): List {
+ var input = x
+
+ return buildList {
+ layers.forEach { layer ->
+ val output = layer.forward(input)
+ add(output)
+ input = output
+ }
+ }
+ }
+
+ @OptIn(ExperimentalStdlibApi::class)
+ private fun train(xTrain: DoubleTensor, yTrain: DoubleTensor) {
+ val layerInputs = buildList {
+ add(xTrain)
+ addAll(forward(xTrain))
+ }
+
+ var lossGrad = softMaxLoss(layerInputs.last(), yTrain)
+
+ layers.zip(layerInputs).reversed().forEach { (layer, input) ->
+ lossGrad = layer.backward(input, lossGrad)
+ }
+ }
+
+ fun fit(xTrain: DoubleTensor, yTrain: DoubleTensor, batchSize: Int, epochs: Int) = DoubleTensorAlgebra {
+ fun iterBatch(x: DoubleTensor, y: DoubleTensor): Sequence> = sequence {
+ val n = x.shape[0]
+ val shuffledIndices = (0 until n).shuffled()
+ for (i in 0 until n step batchSize) {
+ val excerptIndices = shuffledIndices.drop(i).take(batchSize).toIntArray()
+ val batch = x.rowsByIndices(excerptIndices) to y.rowsByIndices(excerptIndices)
+ yield(batch)
+ }
+ }
+
+ for (epoch in 0 until epochs) {
+ println("Epoch ${epoch + 1}/$epochs")
+ for ((xBatch, yBatch) in iterBatch(xTrain, yTrain)) {
+ train(xBatch, yBatch)
+ }
+ println("Accuracy:${accuracy(yTrain, predict(xTrain).argMax(1, true))}")
+ }
+ }
+
+ fun predict(x: DoubleTensor): DoubleTensor {
+ return forward(x).last()
+ }
+
+}
+
+
+@OptIn(ExperimentalStdlibApi::class)
+fun main() {
+ BroadcastDoubleTensorAlgebra {
+ val features = 5
+ val sampleSize = 250
+ val trainSize = 180
+ val testSize = sampleSize - trainSize
+
+ // take sample of features from normal distribution
+ val x = randomNormal(intArrayOf(sampleSize, features), seed) * 2.5
+
+ x += fromArray(
+ intArrayOf(5),
+ doubleArrayOf(0.0, -1.0, -2.5, -3.0, 5.5) // rows means
+ )
+
+
+ // define class like '1' if the sum of features > 0 and '0' otherwise
+ val y = fromArray(
+ intArrayOf(sampleSize, 1),
+ DoubleArray(sampleSize) { i ->
+ if (x[i].sum() > 0.0) {
+ 1.0
+ } else {
+ 0.0
+ }
+ }
+ )
+
+ // split train ans test
+ val trainIndices = (0 until trainSize).toList().toIntArray()
+ val testIndices = (trainSize until sampleSize).toList().toIntArray()
+
+ val xTrain = x.rowsByIndices(trainIndices)
+ val yTrain = y.rowsByIndices(trainIndices)
+
+ val xTest = x.rowsByIndices(testIndices)
+ val yTest = y.rowsByIndices(testIndices)
+
+ // build model
+ val layers = buildList {
+ add(Dense(features, 64))
+ add(ReLU())
+ add(Dense(64, 16))
+ add(ReLU())
+ add(Dense(16, 2))
+ add(Sigmoid())
+ }
+ val model = NeuralNetwork(layers)
+
+ // fit it with train data
+ model.fit(xTrain, yTrain, batchSize = 20, epochs = 10)
+
+ // make prediction
+ val prediction = model.predict(xTest)
+
+ // process raw prediction via argMax
+ val predictionLabels = prediction.argMax(1, true)
+
+ // find out accuracy
+ val acc = accuracy(yTest, predictionLabels)
+ println("Test accuracy:$acc")
+
+ }
+}
diff --git a/examples/src/main/kotlin/space/kscience/kmath/tensors/OLSWithSVD.kt b/examples/src/main/kotlin/space/kscience/kmath/tensors/OLSWithSVD.kt
new file mode 100644
index 000000000..42a0a4ba1
--- /dev/null
+++ b/examples/src/main/kotlin/space/kscience/kmath/tensors/OLSWithSVD.kt
@@ -0,0 +1,68 @@
+/*
+ * Copyright 2018-2021 KMath contributors.
+ * Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
+ */
+
+package space.kscience.kmath.tensors
+
+import space.kscience.kmath.operations.invoke
+import space.kscience.kmath.tensors.core.DoubleTensor
+import space.kscience.kmath.tensors.core.DoubleTensorAlgebra
+
+import kotlin.math.abs
+
+// OLS estimator using SVD
+
+fun main() {
+ //seed for random
+ val randSeed = 100500L
+
+ // work in context with linear operations
+ DoubleTensorAlgebra {
+ // take coefficient vector from normal distribution
+ val alpha = randomNormal(
+ intArrayOf(5),
+ randSeed
+ ) + fromArray(
+ intArrayOf(5),
+ doubleArrayOf(1.0, 2.5, 3.4, 5.0, 10.1)
+ )
+
+ println("Real alpha:\n$alpha")
+
+ // also take sample of size 20 from normal distribution for x
+ val x = randomNormal(
+ intArrayOf(20, 5),
+ randSeed
+ )
+
+ // calculate y and add gaussian noise (N(0, 0.05))
+ val y = x dot alpha
+ y += y.randomNormalLike(randSeed) * 0.05
+
+ // now restore the coefficient vector with OSL estimator with SVD
+ val (u, singValues, v) = x.svd()
+
+ // we have to make sure the singular values of the matrix are not close to zero
+ println("Singular values:\n$singValues")
+
+
+ // inverse Sigma matrix can be restored from singular values with diagonalEmbedding function
+ val sigma = diagonalEmbedding(singValues.map{ x -> if (abs(x) < 1e-3) 0.0 else 1.0/x })
+
+ val alphaOLS = v dot sigma dot u.transpose() dot y
+ println("Estimated alpha:\n" +
+ "$alphaOLS")
+
+ // figure out MSE of approximation
+ fun mse(yTrue: DoubleTensor, yPred: DoubleTensor): Double {
+ require(yTrue.shape.size == 1)
+ require(yTrue.shape contentEquals yPred.shape)
+
+ val diff = yTrue - yPred
+ return diff.dot(diff).sqrt().value()
+ }
+
+ println("MSE: ${mse(alpha, alphaOLS)}")
+ }
+}
\ No newline at end of file
diff --git a/examples/src/main/kotlin/space/kscience/kmath/tensors/PCA.kt b/examples/src/main/kotlin/space/kscience/kmath/tensors/PCA.kt
new file mode 100644
index 000000000..f8ac13d3f
--- /dev/null
+++ b/examples/src/main/kotlin/space/kscience/kmath/tensors/PCA.kt
@@ -0,0 +1,78 @@
+/*
+ * Copyright 2018-2021 KMath contributors.
+ * Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
+ */
+
+package space.kscience.kmath.tensors
+
+import space.kscience.kmath.operations.invoke
+import space.kscience.kmath.tensors.core.BroadcastDoubleTensorAlgebra
+
+
+// simple PCA
+
+fun main(){
+ val seed = 100500L
+
+ // work in context with broadcast methods
+ BroadcastDoubleTensorAlgebra {
+
+ // assume x is range from 0 until 10
+ val x = fromArray(
+ intArrayOf(10),
+ (0 until 10).toList().map { it.toDouble() }.toDoubleArray()
+ )
+
+ // take y dependent on x with noise
+ val y = 2.0 * x + (3.0 + x.randomNormalLike(seed) * 1.5)
+
+ println("x:\n$x")
+ println("y:\n$y")
+
+ // stack them into single dataset
+ val dataset = stack(listOf(x, y)).transpose()
+
+ // normalize both x and y
+ val xMean = x.mean()
+ val yMean = y.mean()
+
+ val xStd = x.std()
+ val yStd = y.std()
+
+ val xScaled = (x - xMean) / xStd
+ val yScaled = (y - yMean) / yStd
+
+ // save means ans standard deviations for further recovery
+ val mean = fromArray(
+ intArrayOf(2),
+ doubleArrayOf(xMean, yMean)
+ )
+ println("Means:\n$mean")
+
+ val std = fromArray(
+ intArrayOf(2),
+ doubleArrayOf(xStd, yStd)
+ )
+ println("Standard deviations:\n$std")
+
+ // calculate the covariance matrix of scaled x and y
+ val covMatrix = cov(listOf(xScaled, yScaled))
+ println("Covariance matrix:\n$covMatrix")
+
+ // and find out eigenvector of it
+ val (_, evecs) = covMatrix.symEig()
+ val v = evecs[0]
+ println("Eigenvector:\n$v")
+
+ // reduce dimension of dataset
+ val datasetReduced = v dot stack(listOf(xScaled, yScaled))
+ println("Reduced data:\n$datasetReduced")
+
+ // we can restore original data from reduced data.
+ // for example, find 7th element of dataset
+ val n = 7
+ val restored = (datasetReduced[n] dot v.view(intArrayOf(1, 2))) * std + mean
+ println("Original value:\n${dataset[n]}")
+ println("Restored value:\n$restored")
+ }
+}
diff --git a/kmath-ast/README.md b/kmath-ast/README.md
index eedba16fa..26ee98ba5 100644
--- a/kmath-ast/README.md
+++ b/kmath-ast/README.md
@@ -1,6 +1,6 @@
# Module kmath-ast
-Abstract syntax tree expression representation and related optimizations.
+Performance and visualization extensions to MST API.
- [expression-language](src/commonMain/kotlin/space/kscience/kmath/ast/parser.kt) : Expression language and its parser
- [mst-jvm-codegen](src/jvmMain/kotlin/space/kscience/kmath/asm/asm.kt) : Dynamic MST to JVM bytecode compiler
@@ -16,8 +16,7 @@ The Maven coordinates of this project are `space.kscience:kmath-ast:0.3.0-dev-7`
```gradle
repositories {
maven { url 'https://repo.kotlin.link' }
- maven { url 'https://dl.bintray.com/hotkeytlt/maven' }
- maven { url "https://dl.bintray.com/kotlin/kotlin-eap" } // include for builds based on kotlin-eap
+ mavenCentral()
}
dependencies {
@@ -28,8 +27,7 @@ dependencies {
```kotlin
repositories {
maven("https://repo.kotlin.link")
- maven("https://dl.bintray.com/kotlin/kotlin-eap") // include for builds based on kotlin-eap
- maven("https://dl.bintray.com/hotkeytlt/maven") // required for a
+ mavenCentral()
}
dependencies {
@@ -41,21 +39,26 @@ dependencies {
### On JVM
-`kmath-ast` JVM module supports runtime code generation to eliminate overhead of tree traversal. Code generator builds
-a special implementation of `Expression` with implemented `invoke` function.
+`kmath-ast` JVM module supports runtime code generation to eliminate overhead of tree traversal. Code generator builds a
+special implementation of `Expression` with implemented `invoke` function.
For example, the following builder:
```kotlin
+import space.kscience.kmath.expressions.*
+import space.kscience.kmath.operations.*
+import space.kscience.kmath.asm.*
+
MstField { bindSymbol("x") + 2 }.compileToExpression(DoubleField)
```
-… leads to generation of bytecode, which can be decompiled to the following Java class:
+... leads to generation of bytecode, which can be decompiled to the following Java class:
```java
package space.kscience.kmath.asm.generated;
import java.util.Map;
+
import kotlin.jvm.functions.Function2;
import space.kscience.kmath.asm.internal.MapIntrinsics;
import space.kscience.kmath.expressions.Expression;
@@ -65,7 +68,7 @@ public final class AsmCompiledExpression_45045_0 implements Expression {
private final Object[] constants;
public final Double invoke(Map arguments) {
- return (Double)((Function2)this.constants[0]).invoke((Double)MapIntrinsics.getOrFail(arguments, "x"), 2);
+ return (Double) ((Function2) this.constants[0]).invoke((Double) MapIntrinsics.getOrFail(arguments, "x"), 2);
}
public AsmCompiledExpression_45045_0(Object[] constants) {
@@ -77,8 +80,8 @@ public final class AsmCompiledExpression_45045_0 implements Expression {
#### Known issues
-- The same classes may be generated and loaded twice, so it is recommended to cache compiled expressions to avoid
- class loading overhead.
+- The same classes may be generated and loaded twice, so it is recommended to cache compiled expressions to avoid class
+ loading overhead.
- This API is not supported by non-dynamic JVM implementations (like TeaVM and GraalVM) because of using class loaders.
### On JS
@@ -86,6 +89,10 @@ public final class AsmCompiledExpression_45045_0 implements Expression {
A similar feature is also available on JS.
```kotlin
+import space.kscience.kmath.expressions.*
+import space.kscience.kmath.operations.*
+import space.kscience.kmath.estree.*
+
MstField { bindSymbol("x") + 2 }.compileToExpression(DoubleField)
```
@@ -93,18 +100,22 @@ The code above returns expression implemented with such a JS function:
```js
var executable = function (constants, arguments) {
- return constants[1](constants[0](arguments, "x"), 2);
+ return constants[1](constants[0](arguments, "x"), 2);
};
```
+JS also supports very experimental expression optimization with [WebAssembly](https://webassembly.org/) IR generation.
+Currently, only expressions inside `DoubleField` and `IntRing` are supported.
```kotlin
+import space.kscience.kmath.expressions.*
+import space.kscience.kmath.operations.*
import space.kscience.kmath.wasm.*
MstField { bindSymbol("x") + 2 }.compileToExpression(DoubleField)
```
-An example of emitted WASM IR in the form of WAT:
+An example of emitted Wasm IR in the form of WAT:
```lisp
(func $executable (param $0 f64) (result f64)
@@ -129,7 +140,9 @@ Example usage:
```kotlin
import space.kscience.kmath.ast.*
import space.kscience.kmath.ast.rendering.*
+import space.kscience.kmath.misc.*
+@OptIn(UnstableKMathAPI::class)
public fun main() {
val mst = "exp(sqrt(x))-asin(2*x)/(2e10+x^3)/(-12)".parseMath()
val syntax = FeaturedMathRendererWithPostProcess.Default.render(mst)
@@ -145,13 +158,68 @@ public fun main() {
Result LaTeX:
-![](http://chart.googleapis.com/chart?cht=tx&chl=e%5E%7B%5Csqrt%7Bx%7D%7D-%5Cfrac%7B%5Cfrac%7B%5Coperatorname%7Bsin%7D%5E%7B-1%7D%5C,%5Cleft(2%5C,x%5Cright)%7D%7B2%5Ctimes10%5E%7B10%7D%2Bx%5E%7B3%7D%7D%7D%7B-12%7D)
+![](https://latex.codecogs.com/gif.latex?%5Coperatorname{exp}%5C,%5Cleft(%5Csqrt{x}%5Cright)-%5Cfrac{%5Cfrac{%5Coperatorname{arcsin}%5C,%5Cleft(2%5C,x%5Cright)}{2%5Ctimes10^{10}%2Bx^{3}}}{-12})
Result MathML (embedding MathML is not allowed by GitHub Markdown):
+
+
```html
-ex-sin-12x2×1010+x3-12
+
```
+
+
It is also possible to create custom algorithms of render, and even add support of other markup languages
(see API reference).
diff --git a/kmath-ast/build.gradle.kts b/kmath-ast/build.gradle.kts
index b4a0b28ac..508374d82 100644
--- a/kmath-ast/build.gradle.kts
+++ b/kmath-ast/build.gradle.kts
@@ -18,6 +18,10 @@ kotlin.js {
}
kotlin.sourceSets {
+ filter { it.name.contains("test", true) }
+ .map(org.jetbrains.kotlin.gradle.plugin.KotlinSourceSet::languageSettings)
+ .forEach { it.useExperimentalAnnotation("space.kscience.kmath.misc.UnstableKMathAPI") }
+
commonMain {
dependencies {
api("com.github.h0tk3y.betterParse:better-parse:0.4.2")
@@ -54,7 +58,7 @@ tasks.dokkaHtml {
}
readme {
- maturity = ru.mipt.npm.gradle.Maturity.PROTOTYPE
+ maturity = ru.mipt.npm.gradle.Maturity.EXPERIMENTAL
propertyByTemplate("artifact", rootProject.file("docs/templates/ARTIFACT-TEMPLATE.md"))
feature(
diff --git a/kmath-ast/docs/README-TEMPLATE.md b/kmath-ast/docs/README-TEMPLATE.md
index b38311ea1..80ea31642 100644
--- a/kmath-ast/docs/README-TEMPLATE.md
+++ b/kmath-ast/docs/README-TEMPLATE.md
@@ -1,6 +1,6 @@
# Module kmath-ast
-Abstract syntax tree expression representation and related optimizations.
+Performance and visualization extensions to MST API.
${features}
@@ -10,21 +10,26 @@ ${artifact}
### On JVM
-`kmath-ast` JVM module supports runtime code generation to eliminate overhead of tree traversal. Code generator builds
-a special implementation of `Expression` with implemented `invoke` function.
+`kmath-ast` JVM module supports runtime code generation to eliminate overhead of tree traversal. Code generator builds a
+special implementation of `Expression` with implemented `invoke` function.
For example, the following builder:
```kotlin
+import space.kscience.kmath.expressions.*
+import space.kscience.kmath.operations.*
+import space.kscience.kmath.asm.*
+
MstField { bindSymbol("x") + 2 }.compileToExpression(DoubleField)
```
-… leads to generation of bytecode, which can be decompiled to the following Java class:
+... leads to generation of bytecode, which can be decompiled to the following Java class:
```java
package space.kscience.kmath.asm.generated;
import java.util.Map;
+
import kotlin.jvm.functions.Function2;
import space.kscience.kmath.asm.internal.MapIntrinsics;
import space.kscience.kmath.expressions.Expression;
@@ -34,7 +39,7 @@ public final class AsmCompiledExpression_45045_0 implements Expression {
private final Object[] constants;
public final Double invoke(Map arguments) {
- return (Double)((Function2)this.constants[0]).invoke((Double)MapIntrinsics.getOrFail(arguments, "x"), 2);
+ return (Double) ((Function2) this.constants[0]).invoke((Double) MapIntrinsics.getOrFail(arguments, "x"), 2);
}
public AsmCompiledExpression_45045_0(Object[] constants) {
@@ -46,8 +51,8 @@ public final class AsmCompiledExpression_45045_0 implements Expression {
#### Known issues
-- The same classes may be generated and loaded twice, so it is recommended to cache compiled expressions to avoid
- class loading overhead.
+- The same classes may be generated and loaded twice, so it is recommended to cache compiled expressions to avoid class
+ loading overhead.
- This API is not supported by non-dynamic JVM implementations (like TeaVM and GraalVM) because of using class loaders.
### On JS
@@ -55,6 +60,10 @@ public final class AsmCompiledExpression_45045_0 implements Expression {
A similar feature is also available on JS.
```kotlin
+import space.kscience.kmath.expressions.*
+import space.kscience.kmath.operations.*
+import space.kscience.kmath.estree.*
+
MstField { bindSymbol("x") + 2 }.compileToExpression(DoubleField)
```
@@ -62,18 +71,22 @@ The code above returns expression implemented with such a JS function:
```js
var executable = function (constants, arguments) {
- return constants[1](constants[0](arguments, "x"), 2);
+ return constants[1](constants[0](arguments, "x"), 2);
};
```
+JS also supports very experimental expression optimization with [WebAssembly](https://webassembly.org/) IR generation.
+Currently, only expressions inside `DoubleField` and `IntRing` are supported.
```kotlin
+import space.kscience.kmath.expressions.*
+import space.kscience.kmath.operations.*
import space.kscience.kmath.wasm.*
MstField { bindSymbol("x") + 2 }.compileToExpression(DoubleField)
```
-An example of emitted WASM IR in the form of WAT:
+An example of emitted Wasm IR in the form of WAT:
```lisp
(func \$executable (param \$0 f64) (result f64)
@@ -98,9 +111,11 @@ Example usage:
```kotlin
import space.kscience.kmath.ast.*
import space.kscience.kmath.ast.rendering.*
+import space.kscience.kmath.misc.*
+@OptIn(UnstableKMathAPI::class)
public fun main() {
- val mst = "exp(sqrt(x))-asin(2*x)/(2e10+x^3)/(-12)".parseMath()
+ val mst = "exp(sqrt(x))-asin(2*x)/(2e10+x^3)/(12)+x^(2/3)".parseMath()
val syntax = FeaturedMathRendererWithPostProcess.Default.render(mst)
val latex = LatexSyntaxRenderer.renderWithStringBuilder(syntax)
println("LaTeX:")
@@ -114,13 +129,78 @@ public fun main() {
Result LaTeX:
-![](http://chart.googleapis.com/chart?cht=tx&chl=e%5E%7B%5Csqrt%7Bx%7D%7D-%5Cfrac%7B%5Cfrac%7B%5Coperatorname%7Bsin%7D%5E%7B-1%7D%5C,%5Cleft(2%5C,x%5Cright)%7D%7B2%5Ctimes10%5E%7B10%7D%2Bx%5E%7B3%7D%7D%7D%7B-12%7D)
+![](https://latex.codecogs.com/gif.latex?%5Coperatorname{exp}%5C,%5Cleft(%5Csqrt{x}%5Cright)-%5Cfrac{%5Cfrac{%5Coperatorname{arcsin}%5C,%5Cleft(2%5C,x%5Cright)}{2%5Ctimes10^{10}%2Bx^{3}}}{12}+x^{2/3})
-Result MathML (embedding MathML is not allowed by GitHub Markdown):
+Result MathML (can be used with MathJax or other renderers):
+
+
```html
-ex-sin-12x2×1010+x3-12
+
```
+
+
It is also possible to create custom algorithms of render, and even add support of other markup languages
(see API reference).
diff --git a/kmath-ast/src/commonMain/kotlin/space/kscience/kmath/ast/parser.kt b/kmath-ast/src/commonMain/kotlin/space/kscience/kmath/ast/parser.kt
index d2e92c37f..246625d29 100644
--- a/kmath-ast/src/commonMain/kotlin/space/kscience/kmath/ast/parser.kt
+++ b/kmath-ast/src/commonMain/kotlin/space/kscience/kmath/ast/parser.kt
@@ -29,7 +29,6 @@ import space.kscience.kmath.operations.RingOperations
* @author Iaroslav Postovalov
*/
public object ArithmeticsEvaluator : Grammar() {
- // TODO replace with "...".toRegex() when better-parse 0.4.1 is released
private val num: Token by regexToken("[\\d.]+(?:[eE][-+]?\\d+)?".toRegex())
private val id: Token by regexToken("[a-z_A-Z][\\da-z_A-Z]*".toRegex())
private val lpar: Token by literalToken("(")
diff --git a/kmath-ast/src/commonMain/kotlin/space/kscience/kmath/ast/rendering/LatexSyntaxRenderer.kt b/kmath-ast/src/commonMain/kotlin/space/kscience/kmath/ast/rendering/LatexSyntaxRenderer.kt
index 1c82bd6e7..01717b0f9 100644
--- a/kmath-ast/src/commonMain/kotlin/space/kscience/kmath/ast/rendering/LatexSyntaxRenderer.kt
+++ b/kmath-ast/src/commonMain/kotlin/space/kscience/kmath/ast/rendering/LatexSyntaxRenderer.kt
@@ -5,6 +5,8 @@
package space.kscience.kmath.ast.rendering
+import space.kscience.kmath.misc.UnstableKMathAPI
+
/**
* [SyntaxRenderer] implementation for LaTeX.
*
@@ -23,6 +25,7 @@ package space.kscience.kmath.ast.rendering
*
* @author Iaroslav Postovalov
*/
+@UnstableKMathAPI
public object LatexSyntaxRenderer : SyntaxRenderer {
public override fun render(node: MathSyntax, output: Appendable): Unit = output.run {
fun render(syntax: MathSyntax) = render(syntax, output)
@@ -115,7 +118,11 @@ public object LatexSyntaxRenderer : SyntaxRenderer {
render(node.right)
}
- is FractionSyntax -> {
+ is FractionSyntax -> if (node.infix) {
+ render(node.left)
+ append('/')
+ render(node.right)
+ } else {
append("\\frac{")
render(node.left)
append("}{")
diff --git a/kmath-ast/src/commonMain/kotlin/space/kscience/kmath/ast/rendering/MathMLSyntaxRenderer.kt b/kmath-ast/src/commonMain/kotlin/space/kscience/kmath/ast/rendering/MathMLSyntaxRenderer.kt
index decd4ba46..cda8e2322 100644
--- a/kmath-ast/src/commonMain/kotlin/space/kscience/kmath/ast/rendering/MathMLSyntaxRenderer.kt
+++ b/kmath-ast/src/commonMain/kotlin/space/kscience/kmath/ast/rendering/MathMLSyntaxRenderer.kt
@@ -5,6 +5,8 @@
package space.kscience.kmath.ast.rendering
+import space.kscience.kmath.misc.UnstableKMathAPI
+
/**
* [SyntaxRenderer] implementation for MathML.
*
@@ -12,14 +14,18 @@ package space.kscience.kmath.ast.rendering
*
* @author Iaroslav Postovalov
*/
+@UnstableKMathAPI
public object MathMLSyntaxRenderer : SyntaxRenderer {
public override fun render(node: MathSyntax, output: Appendable) {
- output.append("