diff --git a/CHANGELOG.md b/CHANGELOG.md index eea1dd3ea..c31740a31 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,11 +2,14 @@ ## [Unreleased] ### Added +- Type-aliases for numbers like `Float64` - 2D optimal trajectory computation in a separate module `kmath-trajectory` - Autodiff for generic algebra elements in core! - Algebra now has an obligatory `bufferFactory` (#477). ### Changed +- Tensor operations switched to prefix notation +- Row-wise and column-wise ND shapes in the core - Shape is read-only - Major refactor of tensors (only minor API changes) - Kotlin 1.7.20 diff --git a/benchmarks/src/jvmMain/kotlin/space/kscience/kmath/benchmarks/TensorAlgebraBenchmark.kt b/benchmarks/src/jvmMain/kotlin/space/kscience/kmath/benchmarks/TensorAlgebraBenchmark.kt index 3ed7163b3..c4382374a 100644 --- a/benchmarks/src/jvmMain/kotlin/space/kscience/kmath/benchmarks/TensorAlgebraBenchmark.kt +++ b/benchmarks/src/jvmMain/kotlin/space/kscience/kmath/benchmarks/TensorAlgebraBenchmark.kt @@ -13,6 +13,8 @@ import space.kscience.kmath.linear.linearSpace import space.kscience.kmath.linear.matrix import space.kscience.kmath.linear.symmetric import space.kscience.kmath.operations.DoubleField +import space.kscience.kmath.tensors.core.symEigJacobi +import space.kscience.kmath.tensors.core.symEigSvd import space.kscience.kmath.tensors.core.tensorAlgebra import kotlin.random.Random @@ -27,11 +29,11 @@ internal class TensorAlgebraBenchmark { @Benchmark fun tensorSymEigSvd(blackhole: Blackhole) = with(Double.tensorAlgebra) { - blackhole.consume(matrix.symEigSvd(1e-10)) + blackhole.consume(symEigSvd(matrix, 1e-10)) } @Benchmark fun tensorSymEigJacobi(blackhole: Blackhole) = with(Double.tensorAlgebra) { - blackhole.consume(matrix.symEigJacobi(50, 1e-10)) + blackhole.consume(symEigJacobi(matrix, 50, 1e-10)) } } \ No newline at end of file diff --git a/build.gradle.kts b/build.gradle.kts index 92379d915..e03bec9af 100644 --- a/build.gradle.kts +++ b/build.gradle.kts @@ -15,7 +15,7 @@ allprojects { } group = "space.kscience" - version = "0.3.1-dev-6" + version = "0.3.1-dev-7" } subprojects { diff --git a/examples/src/main/kotlin/space/kscience/kmath/structures/StructureWriteBenchmark.kt b/examples/src/main/kotlin/space/kscience/kmath/structures/StructureWriteBenchmark.kt index 522eeb768..14c058417 100644 --- a/examples/src/main/kotlin/space/kscience/kmath/structures/StructureWriteBenchmark.kt +++ b/examples/src/main/kotlin/space/kscience/kmath/structures/StructureWriteBenchmark.kt @@ -8,17 +8,20 @@ package space.kscience.kmath.structures import space.kscience.kmath.nd.BufferND import space.kscience.kmath.nd.ShapeND import space.kscience.kmath.nd.StructureND -import space.kscience.kmath.operations.map +import space.kscience.kmath.operations.mapToBuffer import kotlin.system.measureTimeMillis -private inline fun BufferND.map(block: (T) -> R): BufferND = BufferND(indices, buffer.map(block)) +private inline fun BufferND.mapToBufferND( + bufferFactory: BufferFactory = BufferFactory.auto(), + crossinline block: (T) -> R, +): BufferND = BufferND(indices, buffer.mapToBuffer(bufferFactory, block)) @Suppress("UNUSED_VARIABLE") fun main() { val n = 6000 val structure = StructureND.buffered(ShapeND(n, n), Buffer.Companion::auto) { 1.0 } - structure.map { it + 1 } // warm-up - val time1 = measureTimeMillis { val res = structure.map { it + 1 } } + structure.mapToBufferND { it + 1 } // warm-up + val time1 = measureTimeMillis { val res = structure.mapToBufferND { it + 1 } } println("Structure mapping finished in $time1 millis") val array = DoubleArray(n * n) { 1.0 } diff --git a/examples/src/main/kotlin/space/kscience/kmath/tensors/OLSWithSVD.kt b/examples/src/main/kotlin/space/kscience/kmath/tensors/OLSWithSVD.kt index 40b927215..2c570ea34 100644 --- a/examples/src/main/kotlin/space/kscience/kmath/tensors/OLSWithSVD.kt +++ b/examples/src/main/kotlin/space/kscience/kmath/tensors/OLSWithSVD.kt @@ -5,18 +5,17 @@ package space.kscience.kmath.tensors -import space.kscience.kmath.misc.PerformancePitfall import space.kscience.kmath.nd.ShapeND import space.kscience.kmath.nd.contentEquals import space.kscience.kmath.operations.invoke import space.kscience.kmath.tensors.core.DoubleTensor import space.kscience.kmath.tensors.core.DoubleTensorAlgebra - +import space.kscience.kmath.tensors.core.randomNormal +import space.kscience.kmath.tensors.core.randomNormalLike import kotlin.math.abs // OLS estimator using SVD -@OptIn(PerformancePitfall::class) fun main() { //seed for random val randSeed = 100500L @@ -42,10 +41,10 @@ fun main() { // calculate y and add gaussian noise (N(0, 0.05)) val y = x dot alpha - y += y.randomNormalLike(randSeed) * 0.05 + y += randomNormalLike(y, randSeed) * 0.05 // now restore the coefficient vector with OSL estimator with SVD - val (u, singValues, v) = x.svd() + val (u, singValues, v) = svd(x) // we have to make sure the singular values of the matrix are not close to zero println("Singular values:\n$singValues") @@ -66,7 +65,7 @@ fun main() { require(yTrue.shape contentEquals yPred.shape) val diff = yTrue - yPred - return diff.dot(diff).sqrt().value() + return sqrt(diff.dot(diff)).value() } println("MSE: ${mse(alpha, alphaOLS)}") diff --git a/examples/src/main/kotlin/space/kscience/kmath/tensors/PCA.kt b/examples/src/main/kotlin/space/kscience/kmath/tensors/PCA.kt index 78199aa8b..fb774a39d 100644 --- a/examples/src/main/kotlin/space/kscience/kmath/tensors/PCA.kt +++ b/examples/src/main/kotlin/space/kscience/kmath/tensors/PCA.kt @@ -6,8 +6,7 @@ package space.kscience.kmath.tensors import space.kscience.kmath.nd.ShapeND -import space.kscience.kmath.tensors.core.tensorAlgebra -import space.kscience.kmath.tensors.core.withBroadcast +import space.kscience.kmath.tensors.core.* // simple PCA @@ -22,7 +21,7 @@ fun main(): Unit = Double.tensorAlgebra.withBroadcast { // work in context with ) // take y dependent on x with noise - val y = 2.0 * x + (3.0 + x.randomNormalLike(seed) * 1.5) + val y = 2.0 * x + (3.0 + randomNormalLike(x, seed) * 1.5) println("x:\n$x") println("y:\n$y") @@ -31,14 +30,14 @@ fun main(): Unit = Double.tensorAlgebra.withBroadcast { // work in context with val dataset = stack(listOf(x, y)).transposed() // normalize both x and y - val xMean = x.mean() - val yMean = y.mean() + val xMean = mean(x) + val yMean = mean(y) - val xStd = x.std() - val yStd = y.std() + val xStd = std(x) + val yStd = std(y) - val xScaled = (x - xMean) / xStd - val yScaled = (y - yMean) / yStd + val xScaled: DoubleTensor = (x - xMean) / xStd + val yScaled: DoubleTensor = (y - yMean) / yStd // save means ans standard deviations for further recovery val mean = fromArray( @@ -54,11 +53,11 @@ fun main(): Unit = Double.tensorAlgebra.withBroadcast { // work in context with println("Standard deviations:\n$std") // calculate the covariance matrix of scaled x and y - val covMatrix = cov(listOf(xScaled, yScaled)) + val covMatrix = covariance(listOf(xScaled.asDoubleTensor1D(), yScaled.asDoubleTensor1D())) println("Covariance matrix:\n$covMatrix") // and find out eigenvector of it - val (_, evecs) = covMatrix.symEig() + val (_, evecs) = symEig(covMatrix) val v = evecs.getTensor(0) println("Eigenvector:\n$v") diff --git a/examples/src/main/kotlin/space/kscience/kmath/tensors/dataSetNormalization.kt b/examples/src/main/kotlin/space/kscience/kmath/tensors/dataSetNormalization.kt index 091889e8e..45c2ff120 100644 --- a/examples/src/main/kotlin/space/kscience/kmath/tensors/dataSetNormalization.kt +++ b/examples/src/main/kotlin/space/kscience/kmath/tensors/dataSetNormalization.kt @@ -6,6 +6,7 @@ package space.kscience.kmath.tensors import space.kscience.kmath.nd.ShapeND +import space.kscience.kmath.tensors.core.randomNormal import space.kscience.kmath.tensors.core.tensorAlgebra import space.kscience.kmath.tensors.core.withBroadcast @@ -23,8 +24,8 @@ fun main() = Double.tensorAlgebra.withBroadcast { // work in context with broad // find out mean and standard deviation of each column - val mean = dataset.mean(0, false) - val std = dataset.std(0, false) + val mean = mean(dataset, 0, false) + val std = std(dataset, 0, false) println("Mean:\n$mean") println("Standard deviation:\n$std") @@ -36,8 +37,8 @@ fun main() = Double.tensorAlgebra.withBroadcast { // work in context with broad // now we can scale dataset with mean normalization val datasetScaled = (dataset - mean) / std - // find out mean and std of scaled dataset + // find out mean and standardDiviation of scaled dataset - println("Mean of scaled:\n${datasetScaled.mean(0, false)}") - println("Mean of scaled:\n${datasetScaled.std(0, false)}") + println("Mean of scaled:\n${mean(datasetScaled, 0, false)}") + println("Mean of scaled:\n${std(datasetScaled, 0, false)}") } \ No newline at end of file diff --git a/examples/src/main/kotlin/space/kscience/kmath/tensors/linearSystemSolvingWithLUP.kt b/examples/src/main/kotlin/space/kscience/kmath/tensors/linearSystemSolvingWithLUP.kt index 60716d0ba..238696cf9 100644 --- a/examples/src/main/kotlin/space/kscience/kmath/tensors/linearSystemSolvingWithLUP.kt +++ b/examples/src/main/kotlin/space/kscience/kmath/tensors/linearSystemSolvingWithLUP.kt @@ -41,7 +41,7 @@ fun main() = Double.tensorAlgebra.withBroadcast {// work in context with linear // solve `Ax = b` system using LUP decomposition // get P, L, U such that PA = LU - val (p, l, u) = a.lu() + val (p, l, u) = lu(a) // check P is permutation matrix println("P:\n$p") diff --git a/examples/src/main/kotlin/space/kscience/kmath/tensors/neuralNetwork.kt b/examples/src/main/kotlin/space/kscience/kmath/tensors/neuralNetwork.kt index 52b3b556c..8fd5ae5ad 100644 --- a/examples/src/main/kotlin/space/kscience/kmath/tensors/neuralNetwork.kt +++ b/examples/src/main/kotlin/space/kscience/kmath/tensors/neuralNetwork.kt @@ -9,10 +9,7 @@ import space.kscience.kmath.nd.ShapeND import space.kscience.kmath.nd.contentEquals import space.kscience.kmath.operations.asIterable import space.kscience.kmath.operations.invoke -import space.kscience.kmath.tensors.core.BroadcastDoubleTensorAlgebra -import space.kscience.kmath.tensors.core.DoubleTensor -import space.kscience.kmath.tensors.core.DoubleTensorAlgebra -import space.kscience.kmath.tensors.core.toDoubleTensor +import space.kscience.kmath.tensors.core.* import kotlin.math.sqrt const val seed = 100500L @@ -51,7 +48,7 @@ fun reluDer(x: DoubleTensor): DoubleTensor = DoubleTensorAlgebra { class ReLU : Activation(::relu, ::reluDer) fun sigmoid(x: DoubleTensor): DoubleTensor = DoubleTensorAlgebra { - 1.0 / (1.0 + (-x).exp()) + 1.0 / (1.0 + exp((-x))) } fun sigmoidDer(x: DoubleTensor): DoubleTensor = DoubleTensorAlgebra { @@ -85,7 +82,7 @@ class Dense( val gradInput = outputError dot weights.transposed() val gradW = input.transposed() dot outputError - val gradBias = outputError.mean(dim = 0, keepDim = false) * input.shape[0].toDouble() + val gradBias = mean(structureND = outputError, dim = 0, keepDim = false) * input.shape[0].toDouble() weights -= learningRate * gradW bias -= learningRate * gradBias @@ -118,7 +115,7 @@ class NeuralNetwork(private val layers: List) { onesForAnswers[intArrayOf(index, label)] = 1.0 } - val softmaxValue = yPred.exp() / yPred.exp().sum(dim = 1, keepDim = true) + val softmaxValue = exp(yPred) / exp(yPred).sum(dim = 1, keepDim = true) (-onesForAnswers + softmaxValue) / (yPred.shape[0].toDouble()) } @@ -176,7 +173,6 @@ class NeuralNetwork(private val layers: List) { } -@OptIn(ExperimentalStdlibApi::class) fun main() = BroadcastDoubleTensorAlgebra { val features = 5 val sampleSize = 250 diff --git a/gradle.properties b/gradle.properties index 216ebf74a..da5867ea4 100644 --- a/gradle.properties +++ b/gradle.properties @@ -11,3 +11,7 @@ org.gradle.configureondemand=true org.gradle.jvmargs=-Xmx4096m toolsVersion=0.13.1-kotlin-1.7.20 + + +org.gradle.parallel=true +org.gradle.workers.max=4 diff --git a/kmath-commons/src/test/kotlin/space/kscience/kmath/commons/optimization/OptimizeTest.kt b/kmath-commons/src/test/kotlin/space/kscience/kmath/commons/optimization/OptimizeTest.kt index f6819a27f..7c8ba7d27 100644 --- a/kmath-commons/src/test/kotlin/space/kscience/kmath/commons/optimization/OptimizeTest.kt +++ b/kmath-commons/src/test/kotlin/space/kscience/kmath/commons/optimization/OptimizeTest.kt @@ -13,12 +13,11 @@ import space.kscience.kmath.expressions.Symbol.Companion.x import space.kscience.kmath.expressions.Symbol.Companion.y import space.kscience.kmath.expressions.chiSquaredExpression import space.kscience.kmath.expressions.symbol -import space.kscience.kmath.operations.map +import space.kscience.kmath.operations.DoubleBufferOps.Companion.map import space.kscience.kmath.optimization.* import space.kscience.kmath.random.RandomGenerator import space.kscience.kmath.structures.DoubleBuffer import space.kscience.kmath.structures.asBuffer -import kotlin.math.pow import kotlin.test.Test internal class OptimizeTest { diff --git a/kmath-core/src/commonMain/kotlin/space/kscience/kmath/expressions/DSAlgebra.kt b/kmath-core/src/commonMain/kotlin/space/kscience/kmath/expressions/DSAlgebra.kt index fa8bf5e58..e7ade4f66 100644 --- a/kmath-core/src/commonMain/kotlin/space/kscience/kmath/expressions/DSAlgebra.kt +++ b/kmath-core/src/commonMain/kotlin/space/kscience/kmath/expressions/DSAlgebra.kt @@ -268,7 +268,7 @@ public open class DSRing( protected fun DS.mapData(block: A.(T) -> T): DS { require(derivativeAlgebra == this@DSRing) { "All derivative operations should be done in the same algebra" } - val newData: Buffer = data.map(valueBufferFactory) { + val newData: Buffer = data.mapToBuffer(valueBufferFactory) { algebra.block(it) } return DS(newData) @@ -276,7 +276,7 @@ public open class DSRing( protected fun DS.mapDataIndexed(block: (Int, T) -> T): DS { require(derivativeAlgebra == this@DSRing) { "All derivative operations should be done in the same algebra" } - val newData: Buffer = data.mapIndexed(valueBufferFactory, block) + val newData: Buffer = data.mapIndexedToBuffer(valueBufferFactory, block) return DS(newData) } diff --git a/kmath-core/src/commonMain/kotlin/space/kscience/kmath/expressions/specialExpressions.kt b/kmath-core/src/commonMain/kotlin/space/kscience/kmath/expressions/specialExpressions.kt index 8cfa1b353..59dfeb8ea 100644 --- a/kmath-core/src/commonMain/kotlin/space/kscience/kmath/expressions/specialExpressions.kt +++ b/kmath-core/src/commonMain/kotlin/space/kscience/kmath/expressions/specialExpressions.kt @@ -11,6 +11,8 @@ import space.kscience.kmath.structures.Buffer import space.kscience.kmath.structures.indices import kotlin.jvm.JvmName +//TODO move to stat + /** * Generate a chi squared expression from given x-y-sigma data and inline model. Provides automatic * differentiation. diff --git a/kmath-core/src/commonMain/kotlin/space/kscience/kmath/operations/DoubleBufferField.kt b/kmath-core/src/commonMain/kotlin/space/kscience/kmath/operations/DoubleBufferField.kt index 449730e7a..2e6b63a92 100644 --- a/kmath-core/src/commonMain/kotlin/space/kscience/kmath/operations/DoubleBufferField.kt +++ b/kmath-core/src/commonMain/kotlin/space/kscience/kmath/operations/DoubleBufferField.kt @@ -5,8 +5,6 @@ package space.kscience.kmath.operations -import space.kscience.kmath.misc.UnstableKMathAPI -import space.kscience.kmath.operations.DoubleField.pow import space.kscience.kmath.structures.Buffer import space.kscience.kmath.structures.DoubleBuffer @@ -32,9 +30,9 @@ public class DoubleBufferField(public val size: Int) : ExtendedField): DoubleBuffer = super.atanh(arg) override fun power(arg: Buffer, pow: Number): DoubleBuffer = if (pow.isInteger()) { - arg.mapInline { it.pow(pow.toInt()) } + arg.map { it.pow(pow.toInt()) } } else { - arg.mapInline { + arg.map { if(it<0) throw IllegalArgumentException("Negative argument $it could not be raised to the fractional power") it.pow(pow.toDouble()) } @@ -42,103 +40,4 @@ public class DoubleBufferField(public val size: Int) : ExtendedField) -> Buffer = super.unaryOperationFunction(operation) - - // override fun number(value: Number): Buffer = DoubleBuffer(size) { value.toDouble() } -// -// override fun Buffer.unaryMinus(): Buffer = DoubleBufferOperations.run { -// -this@unaryMinus -// } -// -// override fun add(a: Buffer, b: Buffer): DoubleBuffer { -// require(a.size == size) { "The buffer size ${a.size} does not match context size $size" } -// return DoubleBufferOperations.add(a, b) -// } -// - -// -// override fun multiply(a: Buffer, b: Buffer): DoubleBuffer { -// require(a.size == size) { "The buffer size ${a.size} does not match context size $size" } -// return DoubleBufferOperations.multiply(a, b) -// } -// -// override fun divide(a: Buffer, b: Buffer): DoubleBuffer { -// require(a.size == size) { "The buffer size ${a.size} does not match context size $size" } -// return DoubleBufferOperations.divide(a, b) -// } -// -// override fun sin(arg: Buffer): DoubleBuffer { -// require(arg.size == size) { "The buffer size ${arg.size} does not match context size $size" } -// return DoubleBufferOperations.sin(arg) -// } -// -// override fun cos(arg: Buffer): DoubleBuffer { -// require(arg.size == size) { "The buffer size ${arg.size} does not match context size $size" } -// return DoubleBufferOperations.cos(arg) -// } -// -// override fun tan(arg: Buffer): DoubleBuffer { -// require(arg.size == size) { "The buffer size ${arg.size} does not match context size $size" } -// return DoubleBufferOperations.tan(arg) -// } -// -// override fun asin(arg: Buffer): DoubleBuffer { -// require(arg.size == size) { "The buffer size ${arg.size} does not match context size $size" } -// return DoubleBufferOperations.asin(arg) -// } -// -// override fun acos(arg: Buffer): DoubleBuffer { -// require(arg.size == size) { "The buffer size ${arg.size} does not match context size $size" } -// return DoubleBufferOperations.acos(arg) -// } -// -// override fun atan(arg: Buffer): DoubleBuffer { -// require(arg.size == size) { "The buffer size ${arg.size} does not match context size $size" } -// return DoubleBufferOperations.atan(arg) -// } -// -// override fun sinh(arg: Buffer): DoubleBuffer { -// require(arg.size == size) { "The buffer size ${arg.size} does not match context size $size" } -// return DoubleBufferOperations.sinh(arg) -// } -// -// override fun cosh(arg: Buffer): DoubleBuffer { -// require(arg.size == size) { "The buffer size ${arg.size} does not match context size $size" } -// return DoubleBufferOperations.cosh(arg) -// } -// -// override fun tanh(arg: Buffer): DoubleBuffer { -// require(arg.size == size) { "The buffer size ${arg.size} does not match context size $size" } -// return DoubleBufferOperations.tanh(arg) -// } -// -// override fun asinh(arg: Buffer): DoubleBuffer { -// require(arg.size == size) { "The buffer size ${arg.size} does not match context size $size" } -// return DoubleBufferOperations.asinh(arg) -// } -// -// override fun acosh(arg: Buffer): DoubleBuffer { -// require(arg.size == size) { "The buffer size ${arg.size} does not match context size $size" } -// return DoubleBufferOperations.acosh(arg) -// } -// -// override fun atanh(arg: Buffer): DoubleBuffer { -// require(arg.size == size) { "The buffer size ${arg.size} does not match context size $size" } -// return DoubleBufferOperations.atanh(arg) -// } -// -// override fun power(arg: Buffer, pow: Number): DoubleBuffer { -// require(arg.size == size) { "The buffer size ${arg.size} does not match context size $size" } -// return DoubleBufferOperations.power(arg, pow) -// } -// -// override fun exp(arg: Buffer): DoubleBuffer { -// require(arg.size == size) { "The buffer size ${arg.size} does not match context size $size" } -// return DoubleBufferOperations.exp(arg) -// } -// -// override fun ln(arg: Buffer): DoubleBuffer { -// require(arg.size == size) { "The buffer size ${arg.size} does not match context size $size" } -// return DoubleBufferOperations.ln(arg) -// } - } \ No newline at end of file diff --git a/kmath-core/src/commonMain/kotlin/space/kscience/kmath/operations/DoubleBufferOps.kt b/kmath-core/src/commonMain/kotlin/space/kscience/kmath/operations/DoubleBufferOps.kt index ded8753a3..7ba1a7066 100644 --- a/kmath-core/src/commonMain/kotlin/space/kscience/kmath/operations/DoubleBufferOps.kt +++ b/kmath-core/src/commonMain/kotlin/space/kscience/kmath/operations/DoubleBufferOps.kt @@ -6,10 +6,8 @@ package space.kscience.kmath.operations import space.kscience.kmath.linear.Point -import space.kscience.kmath.structures.Buffer -import space.kscience.kmath.structures.DoubleBuffer -import space.kscience.kmath.structures.MutableBufferFactory -import space.kscience.kmath.structures.asBuffer +import space.kscience.kmath.misc.UnstableKMathAPI +import space.kscience.kmath.structures.* import kotlin.math.* /** @@ -19,10 +17,29 @@ public abstract class DoubleBufferOps : BufferAlgebra, Exte Norm, Double> { override val elementAlgebra: DoubleField get() = DoubleField + override val elementBufferFactory: MutableBufferFactory get() = elementAlgebra.bufferFactory - override fun Buffer.map(block: DoubleField.(Double) -> Double): DoubleBuffer = - mapInline { DoubleField.block(it) } + @Suppress("OVERRIDE_BY_INLINE") + @OptIn(UnstableKMathAPI::class) + final override inline fun Buffer.map(block: DoubleField.(Double) -> Double): DoubleBuffer = + DoubleArray(size) { DoubleField.block(getDouble(it)) }.asBuffer() + + + @OptIn(UnstableKMathAPI::class) + @Suppress("OVERRIDE_BY_INLINE") + final override inline fun Buffer.mapIndexed(block: DoubleField.(index: Int, arg: Double) -> Double): DoubleBuffer = + DoubleBuffer(size) { DoubleField.block(it, getDouble(it)) } + + @OptIn(UnstableKMathAPI::class) + @Suppress("OVERRIDE_BY_INLINE") + final override inline fun Buffer.zip( + other: Buffer, + block: DoubleField.(left: Double, right: Double) -> Double, + ): DoubleBuffer { + require(size == other.size) { "Incompatible buffer sizes. left: ${size}, right: ${other.size}" } + return DoubleBuffer(size) { DoubleField.block(getDouble(it), other.getDouble(it)) } + } override fun unaryOperationFunction(operation: String): (arg: Buffer) -> Buffer = super.unaryOperationFunction(operation) @@ -30,7 +47,7 @@ public abstract class DoubleBufferOps : BufferAlgebra, Exte override fun binaryOperationFunction(operation: String): (left: Buffer, right: Buffer) -> Buffer = super.binaryOperationFunction(operation) - override fun Buffer.unaryMinus(): DoubleBuffer = mapInline { -it } + override fun Buffer.unaryMinus(): DoubleBuffer = map { -it } override fun add(left: Buffer, right: Buffer): DoubleBuffer { require(right.size == left.size) { @@ -77,6 +94,7 @@ public abstract class DoubleBufferOps : BufferAlgebra, Exte // } else RealBuffer(DoubleArray(a.size) { a[it] / kValue }) // } + @UnstableKMathAPI override fun multiply(left: Buffer, right: Buffer): DoubleBuffer { require(right.size == left.size) { "The size of the first buffer ${left.size} should be the same as for second one: ${right.size} " @@ -101,55 +119,83 @@ public abstract class DoubleBufferOps : BufferAlgebra, Exte } else DoubleBuffer(DoubleArray(left.size) { left[it] / right[it] }) } - override fun sin(arg: Buffer): DoubleBuffer = arg.mapInline(::sin) + override fun sin(arg: Buffer): DoubleBuffer = arg.map { sin(it) } - override fun cos(arg: Buffer): DoubleBuffer = arg.mapInline(::cos) + override fun cos(arg: Buffer): DoubleBuffer = arg.map { cos(it) } - override fun tan(arg: Buffer): DoubleBuffer = arg.mapInline(::tan) + override fun tan(arg: Buffer): DoubleBuffer = arg.map { tan(it) } - override fun asin(arg: Buffer): DoubleBuffer = arg.mapInline(::asin) + override fun asin(arg: Buffer): DoubleBuffer = arg.map { asin(it) } - override fun acos(arg: Buffer): DoubleBuffer = arg.mapInline(::acos) + override fun acos(arg: Buffer): DoubleBuffer = arg.map { acos(it) } - override fun atan(arg: Buffer): DoubleBuffer = arg.mapInline(::atan) + override fun atan(arg: Buffer): DoubleBuffer = arg.map { atan(it) } - override fun sinh(arg: Buffer): DoubleBuffer = arg.mapInline(::sinh) + override fun sinh(arg: Buffer): DoubleBuffer = arg.map { sinh(it) } - override fun cosh(arg: Buffer): DoubleBuffer = arg.mapInline(::cosh) + override fun cosh(arg: Buffer): DoubleBuffer = arg.map { cosh(it) } - override fun tanh(arg: Buffer): DoubleBuffer = arg.mapInline(::tanh) + override fun tanh(arg: Buffer): DoubleBuffer = arg.map { tanh(it) } - override fun asinh(arg: Buffer): DoubleBuffer = arg.mapInline(::asinh) + override fun asinh(arg: Buffer): DoubleBuffer = arg.map { asinh(it) } - override fun acosh(arg: Buffer): DoubleBuffer = arg.mapInline(::acosh) + override fun acosh(arg: Buffer): DoubleBuffer = arg.map { acosh(it) } - override fun atanh(arg: Buffer): DoubleBuffer = arg.mapInline(::atanh) + override fun atanh(arg: Buffer): DoubleBuffer = arg.map { atanh(it) } - override fun exp(arg: Buffer): DoubleBuffer = arg.mapInline(::exp) + override fun exp(arg: Buffer): DoubleBuffer = arg.map { exp(it) } - override fun ln(arg: Buffer): DoubleBuffer = arg.mapInline(::ln) + override fun ln(arg: Buffer): DoubleBuffer = arg.map { ln(it) } override fun norm(arg: Buffer): Double = DoubleL2Norm.norm(arg) - override fun scale(a: Buffer, value: Double): DoubleBuffer = a.mapInline { it * value } + override fun scale(a: Buffer, value: Double): DoubleBuffer = a.map { it * value } override fun power(arg: Buffer, pow: Number): Buffer = if (pow is Int) { - arg.mapInline { it.pow(pow) } + arg.map { it.pow(pow) } } else { - arg.mapInline { it.pow(pow.toDouble()) } + arg.map { it.pow(pow.toDouble()) } } - public companion object : DoubleBufferOps() { - public inline fun Buffer.mapInline(block: (Double) -> Double): DoubleBuffer = - if (this is DoubleBuffer) { - DoubleArray(size) { block(array[it]) }.asBuffer() - } else { - DoubleArray(size) { block(get(it)) }.asBuffer() - } - } + public companion object : DoubleBufferOps() } public object DoubleL2Norm : Norm, Double> { override fun norm(arg: Point): Double = sqrt(arg.fold(0.0) { acc: Double, d: Double -> acc + d.pow(2) }) } +public fun DoubleBufferOps.sum(buffer: Buffer): Double = buffer.reduce(Double::plus) + +/** + * Sum of elements using given [conversion] + */ +public inline fun DoubleBufferOps.sumOf(buffer: Buffer, conversion: (T) -> Double): Double = + buffer.fold(0.0) { acc, value -> acc + conversion(value) } + +public fun DoubleBufferOps.average(buffer: Buffer): Double = sum(buffer) / buffer.size + +/** + * Average of elements using given [conversion] + */ +public inline fun DoubleBufferOps.averageOf(buffer: Buffer, conversion: (T) -> Double): Double = + sumOf(buffer, conversion) / buffer.size + +public fun DoubleBufferOps.dispersion(buffer: Buffer): Double { + val av = average(buffer) + return buffer.fold(0.0) { acc, value -> acc + (value - av).pow(2) } / buffer.size +} + +public fun DoubleBufferOps.std(buffer: Buffer): Double = sqrt(dispersion(buffer)) + +public fun DoubleBufferOps.covariance(x: Buffer, y: Buffer): Double { + require(x.size == y.size) { "Expected buffers of the same size, but x.size == ${x.size} and y.size == ${y.size}" } + val xMean = average(x) + val yMean = average(y) + var sum = 0.0 + x.indices.forEach { + sum += (x[it] - xMean) * (y[it] - yMean) + } + return sum / (x.size - 1) +} + + diff --git a/kmath-core/src/commonMain/kotlin/space/kscience/kmath/operations/algebraExtensions.kt b/kmath-core/src/commonMain/kotlin/space/kscience/kmath/operations/algebraExtensions.kt index efadfb3cc..f05ddafb8 100644 --- a/kmath-core/src/commonMain/kotlin/space/kscience/kmath/operations/algebraExtensions.kt +++ b/kmath-core/src/commonMain/kotlin/space/kscience/kmath/operations/algebraExtensions.kt @@ -5,6 +5,21 @@ package space.kscience.kmath.operations +import space.kscience.kmath.misc.PerformancePitfall +import space.kscience.kmath.structures.Buffer + +/** + * Returns the sum of all elements in the iterable in this [Group]. + * + * @receiver the algebra that provides addition. + * @param data the iterable to sum up. + * @return the sum. + */ +@PerformancePitfall("Potential boxing access to buffer elements") +public fun Group.sum(data: Buffer): T = data.fold(zero) { left, right -> + add(left, right) +} + /** * Returns the sum of all elements in the iterable in this [Group]. * @@ -29,6 +44,18 @@ public fun Group.sum(data: Sequence): T = data.fold(zero) { left, righ add(left, right) } +/** + * Returns an average value of elements in the iterable in this [Group]. + * + * @receiver the algebra that provides addition and division. + * @param data the iterable to find average. + * @return the average value. + * @author Iaroslav Postovalov + */ +@PerformancePitfall("Potential boxing access to buffer elements") +public fun S.average(data: Buffer): T where S : Group, S : ScaleOperations = + sum(data) / data.size + /** * Returns an average value of elements in the iterable in this [Group]. * @@ -95,4 +122,3 @@ public fun Iterable.averageWith(space: S): T where S : Group, S : S */ public fun Sequence.averageWith(space: S): T where S : Group, S : ScaleOperations = space.average(this) - diff --git a/kmath-core/src/commonMain/kotlin/space/kscience/kmath/structures/DoubleBuffer.kt b/kmath-core/src/commonMain/kotlin/space/kscience/kmath/structures/DoubleBuffer.kt index 5a8594616..1696d5055 100644 --- a/kmath-core/src/commonMain/kotlin/space/kscience/kmath/structures/DoubleBuffer.kt +++ b/kmath-core/src/commonMain/kotlin/space/kscience/kmath/structures/DoubleBuffer.kt @@ -57,6 +57,9 @@ public fun Buffer.toDoubleArray(): DoubleArray = when (this) { else -> DoubleArray(size, ::get) } +/** + * Represent this buffer as [DoubleBuffer]. Does not guarantee that changes in the original buffer are reflected on this buffer. + */ public fun Buffer.toDoubleBuffer(): DoubleBuffer = when (this) { is DoubleBuffer -> this else -> DoubleArray(size, ::get).asBuffer() diff --git a/kmath-core/src/commonMain/kotlin/space/kscience/kmath/structures/bufferExtensions.kt b/kmath-core/src/commonMain/kotlin/space/kscience/kmath/structures/bufferExtensions.kt index 0a5d6b964..0a475187f 100644 --- a/kmath-core/src/commonMain/kotlin/space/kscience/kmath/structures/bufferExtensions.kt +++ b/kmath-core/src/commonMain/kotlin/space/kscience/kmath/structures/bufferExtensions.kt @@ -61,18 +61,18 @@ public fun Buffer.toMutableList(): MutableList = when (this) { */ @UnstableKMathAPI public inline fun Buffer.toTypedArray(): Array = Array(size, ::get) - -/** - * Create a new buffer from this one with the given mapping function and using [Buffer.Companion.auto] buffer factory. - */ -public inline fun Buffer.map(block: (T) -> R): Buffer = - Buffer.auto(size) { block(get(it)) } +// +///** +// * Create a new buffer from this one with the given mapping function and using [Buffer.Companion.auto] buffer factory. +// */ +//public inline fun Buffer.map(block: (T) -> R): Buffer = +// Buffer.auto(size) { block(get(it)) } /** * Create a new buffer from this one with the given mapping function. * Provided [bufferFactory] is used to construct the new buffer. */ -public inline fun Buffer.map( +public inline fun Buffer.mapToBuffer( bufferFactory: BufferFactory, crossinline block: (T) -> R, ): Buffer = bufferFactory(size) { block(get(it)) } @@ -81,23 +81,24 @@ public inline fun Buffer.map( * Create a new buffer from this one with the given mapping (indexed) function. * Provided [bufferFactory] is used to construct the new buffer. */ -public inline fun Buffer.mapIndexed( +public inline fun Buffer.mapIndexedToBuffer( bufferFactory: BufferFactory, crossinline block: (index: Int, value: T) -> R, ): Buffer = bufferFactory(size) { block(it, get(it)) } - -/** - * Create a new buffer from this one with the given indexed mapping function. - * Provided [BufferFactory] is used to construct the new buffer. - */ -public inline fun Buffer.mapIndexed( - crossinline block: (index: Int, value: T) -> R, -): Buffer = Buffer.auto(size) { block(it, get(it)) } +// +///** +// * Create a new buffer from this one with the given indexed mapping function. +// * Provided [BufferFactory] is used to construct the new buffer. +// */ +//public inline fun Buffer.mapIndexed( +// crossinline block: (index: Int, value: T) -> R, +//): Buffer = Buffer.auto(size) { block(it, get(it)) } /** * Fold given buffer according to [operation] */ public inline fun Buffer.fold(initial: R, operation: (acc: R, T) -> R): R { + if (size == 0) return initial var accumulator = initial for (index in this.indices) accumulator = operation(accumulator, get(index)) return accumulator @@ -107,18 +108,31 @@ public inline fun Buffer.fold(initial: R, operation: (acc: R, T) -> R) * Fold given buffer according to indexed [operation] */ public inline fun Buffer.foldIndexed(initial: R, operation: (index: Int, acc: R, T) -> R): R { + if (size == 0) return initial var accumulator = initial for (index in this.indices) accumulator = operation(index, accumulator, get(index)) return accumulator } +/** + * Reduce a buffer from left to right according to [operation] + */ +public inline fun Buffer.reduce(operation: (left: T, value: T) -> T): T { + require(size > 0) { "Buffer must have elements" } + var current = get(0) + for (i in 1 until size) { + current = operation(current, get(i)) + } + return current +} + /** * Zip two buffers using given [transform]. */ @UnstableKMathAPI -public inline fun Buffer.zip( +public inline fun Buffer.combineToBuffer( other: Buffer, - bufferFactory: BufferFactory = BufferFactory.auto(), + bufferFactory: BufferFactory, crossinline transform: (T1, T2) -> R, ): Buffer { require(size == other.size) { "Buffer size mismatch in zip: expected $size but found ${other.size}" } diff --git a/kmath-core/src/commonMain/kotlin/space/kscience/kmath/structures/types.kt b/kmath-core/src/commonMain/kotlin/space/kscience/kmath/structures/types.kt new file mode 100644 index 000000000..4ace17538 --- /dev/null +++ b/kmath-core/src/commonMain/kotlin/space/kscience/kmath/structures/types.kt @@ -0,0 +1,20 @@ +/* + * Copyright 2018-2022 KMath contributors. + * Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file. + */ + +package space.kscience.kmath.structures + + +public typealias Float32 = Float +public typealias Float64 = Double + +public typealias Int8 = Byte +public typealias Int16 = Short +public typealias Int32 = Int +public typealias Int64 = Long + +public typealias UInt8 = UByte +public typealias UInt16 = UShort +public typealias UInt32 = UInt +public typealias UInt64 = ULong \ No newline at end of file diff --git a/kmath-coroutines/src/commonMain/kotlin/space/kscience/kmath/coroutines/coroutinesExtra.kt b/kmath-coroutines/src/commonMain/kotlin/space/kscience/kmath/coroutines/coroutinesExtra.kt index 3f06693b0..7bae388a8 100644 --- a/kmath-coroutines/src/commonMain/kotlin/space/kscience/kmath/coroutines/coroutinesExtra.kt +++ b/kmath-coroutines/src/commonMain/kotlin/space/kscience/kmath/coroutines/coroutinesExtra.kt @@ -81,9 +81,7 @@ public suspend fun AsyncFlow.collect(concurrency: Int, collector: FlowCol public suspend inline fun AsyncFlow.collect( concurrency: Int, crossinline action: suspend (value: T) -> Unit, -): Unit = collect(concurrency, object : FlowCollector { - override suspend fun emit(value: T): Unit = action(value) -}) +): Unit = collect(concurrency, FlowCollector { value -> action(value) }) public inline fun Flow.mapParallel( dispatcher: CoroutineDispatcher = Dispatchers.Default, diff --git a/kmath-functions/src/commonMain/kotlin/space/kscience/kmath/integration/GaussIntegratorRuleFactory.kt b/kmath-functions/src/commonMain/kotlin/space/kscience/kmath/integration/GaussIntegratorRuleFactory.kt index 3845bd2a7..fc76ea819 100644 --- a/kmath-functions/src/commonMain/kotlin/space/kscience/kmath/integration/GaussIntegratorRuleFactory.kt +++ b/kmath-functions/src/commonMain/kotlin/space/kscience/kmath/integration/GaussIntegratorRuleFactory.kt @@ -5,7 +5,7 @@ package space.kscience.kmath.integration -import space.kscience.kmath.operations.map +import space.kscience.kmath.operations.mapToBuffer import space.kscience.kmath.structures.Buffer import space.kscience.kmath.structures.DoubleBuffer import space.kscience.kmath.structures.asBuffer @@ -33,11 +33,11 @@ public fun GaussIntegratorRuleFactory.build( val normalized: Pair, Buffer> = build(numPoints) val length = range.endInclusive - range.start - val points = normalized.first.map(::DoubleBuffer) { + val points = normalized.first.mapToBuffer(::DoubleBuffer) { range.start + length / 2 + length / 2 * it } - val weights = normalized.second.map(::DoubleBuffer) { + val weights = normalized.second.mapToBuffer(::DoubleBuffer) { it * length / 2 } diff --git a/kmath-functions/src/commonMain/kotlin/space/kscience/kmath/integration/SplineIntegrator.kt b/kmath-functions/src/commonMain/kotlin/space/kscience/kmath/integration/SplineIntegrator.kt index 9a8f475c8..c66674fbb 100644 --- a/kmath-functions/src/commonMain/kotlin/space/kscience/kmath/integration/SplineIntegrator.kt +++ b/kmath-functions/src/commonMain/kotlin/space/kscience/kmath/integration/SplineIntegrator.kt @@ -65,9 +65,9 @@ public class SplineIntegrator>( DoubleBuffer(numPoints) { i -> range.start + i * step } } - val values = nodes.map(bufferFactory) { integrand.function(it) } + val values = nodes.mapToBuffer(bufferFactory) { integrand.function(it) } val polynomials = interpolator.interpolatePolynomials( - nodes.map(bufferFactory) { number(it) }, + nodes.mapToBuffer(bufferFactory) { number(it) }, values ) val res = polynomials.integrate(algebra, number(range.start)..number(range.endInclusive)) @@ -93,7 +93,7 @@ public object DoubleSplineIntegrator : UnivariateIntegrator { DoubleBuffer(numPoints) { i -> range.start + i * step } } - val values = nodes.map { integrand.function(it) } + val values = nodes.mapToBuffer(::DoubleBuffer) { integrand.function(it) } val polynomials = interpolator.interpolatePolynomials(nodes, values) val res = polynomials.integrate(DoubleField, range) return integrand + IntegrandValue(res) + IntegrandCallsPerformed(integrand.calls + nodes.size) diff --git a/kmath-histograms/src/commonMain/kotlin/space/kscience/kmath/histogram/UniformHistogramGroupND.kt b/kmath-histograms/src/commonMain/kotlin/space/kscience/kmath/histogram/UniformHistogramGroupND.kt index f22ea9776..b62780ed5 100644 --- a/kmath-histograms/src/commonMain/kotlin/space/kscience/kmath/histogram/UniformHistogramGroupND.kt +++ b/kmath-histograms/src/commonMain/kotlin/space/kscience/kmath/histogram/UniformHistogramGroupND.kt @@ -97,7 +97,7 @@ public class UniformHistogramGroupND>( } } hBuilder.apply(builder) - val values: BufferND = BufferND(ndCounter.indices, ndCounter.buffer.map(valueBufferFactory) { it.value }) + val values: BufferND = BufferND(ndCounter.indices, ndCounter.buffer.mapToBuffer(valueBufferFactory) { it.value }) return HistogramND(this, values) } diff --git a/kmath-multik/src/commonTest/kotlin/space/kscience/kmath/multik/MultikNDTest.kt b/kmath-multik/src/commonTest/kotlin/space/kscience/kmath/multik/MultikNDTest.kt index afd292904..d52420fc6 100644 --- a/kmath-multik/src/commonTest/kotlin/space/kscience/kmath/multik/MultikNDTest.kt +++ b/kmath-multik/src/commonTest/kotlin/space/kscience/kmath/multik/MultikNDTest.kt @@ -12,6 +12,7 @@ import space.kscience.kmath.nd.StructureND import space.kscience.kmath.nd.one import space.kscience.kmath.operations.DoubleField import space.kscience.kmath.tensors.core.DoubleTensorAlgebra +import space.kscience.kmath.tensors.core.randomNormal import space.kscience.kmath.tensors.core.tensorAlgebra import kotlin.test.Test import kotlin.test.assertTrue diff --git a/kmath-nd4j/src/main/kotlin/space/kscience/kmath/nd4j/Nd4jTensorAlgebra.kt b/kmath-nd4j/src/main/kotlin/space/kscience/kmath/nd4j/Nd4jTensorAlgebra.kt index 62bfe90ba..ef7b7f257 100644 --- a/kmath-nd4j/src/main/kotlin/space/kscience/kmath/nd4j/Nd4jTensorAlgebra.kt +++ b/kmath-nd4j/src/main/kotlin/space/kscience/kmath/nd4j/Nd4jTensorAlgebra.kt @@ -118,35 +118,35 @@ public sealed interface Nd4jTensorAlgebra> : AnalyticTe override fun StructureND.argMax(dim: Int, keepDim: Boolean): Tensor = ndBase.get().argmax(ndArray, keepDim, dim).asIntStructure() - override fun StructureND.mean(dim: Int, keepDim: Boolean): Nd4jArrayStructure = - ndArray.mean(keepDim, dim).wrap() + override fun mean(structureND: StructureND, dim: Int, keepDim: Boolean): Tensor = + structureND.ndArray.mean(keepDim, dim).wrap() - override fun StructureND.exp(): Nd4jArrayStructure = Transforms.exp(ndArray).wrap() - override fun StructureND.ln(): Nd4jArrayStructure = Transforms.log(ndArray).wrap() - override fun StructureND.sqrt(): Nd4jArrayStructure = Transforms.sqrt(ndArray).wrap() - override fun StructureND.cos(): Nd4jArrayStructure = Transforms.cos(ndArray).wrap() - override fun StructureND.acos(): Nd4jArrayStructure = Transforms.acos(ndArray).wrap() - override fun StructureND.cosh(): Nd4jArrayStructure = Transforms.cosh(ndArray).wrap() + override fun exp(arg: StructureND): Nd4jArrayStructure = Transforms.exp(arg.ndArray).wrap() + override fun ln(arg: StructureND): Nd4jArrayStructure = Transforms.log(arg.ndArray).wrap() + override fun sqrt(arg: StructureND): Nd4jArrayStructure = Transforms.sqrt(arg.ndArray).wrap() + override fun cos(arg: StructureND): Nd4jArrayStructure = Transforms.cos(arg.ndArray).wrap() + override fun acos(arg: StructureND): Nd4jArrayStructure = Transforms.acos(arg.ndArray).wrap() + override fun cosh(arg: StructureND): Nd4jArrayStructure = Transforms.cosh(arg.ndArray).wrap() - override fun StructureND.acosh(): Nd4jArrayStructure = - Nd4j.getExecutioner().exec(ACosh(ndArray, ndArray.ulike())).wrap() + override fun acosh(arg: StructureND): Nd4jArrayStructure = + Nd4j.getExecutioner().exec(ACosh(arg.ndArray, arg.ndArray.ulike())).wrap() - override fun StructureND.sin(): Nd4jArrayStructure = Transforms.sin(ndArray).wrap() - override fun StructureND.asin(): Nd4jArrayStructure = Transforms.asin(ndArray).wrap() - override fun StructureND.sinh(): Tensor = Transforms.sinh(ndArray).wrap() + override fun sin(arg: StructureND): Nd4jArrayStructure = Transforms.sin(arg.ndArray).wrap() + override fun asin(arg: StructureND): Nd4jArrayStructure = Transforms.asin(arg.ndArray).wrap() + override fun sinh(arg: StructureND): Tensor = Transforms.sinh(arg.ndArray).wrap() - override fun StructureND.asinh(): Nd4jArrayStructure = - Nd4j.getExecutioner().exec(ASinh(ndArray, ndArray.ulike())).wrap() + override fun asinh(arg: StructureND): Nd4jArrayStructure = + Nd4j.getExecutioner().exec(ASinh(arg.ndArray, arg.ndArray.ulike())).wrap() - override fun StructureND.tan(): Nd4jArrayStructure = Transforms.tan(ndArray).wrap() - override fun StructureND.atan(): Nd4jArrayStructure = Transforms.atan(ndArray).wrap() - override fun StructureND.tanh(): Nd4jArrayStructure = Transforms.tanh(ndArray).wrap() - override fun StructureND.atanh(): Nd4jArrayStructure = Transforms.atanh(ndArray).wrap() + override fun tan(arg: StructureND): Nd4jArrayStructure = Transforms.tan(arg.ndArray).wrap() + override fun atan(arg: StructureND): Nd4jArrayStructure = Transforms.atan(arg.ndArray).wrap() + override fun tanh(arg: StructureND): Nd4jArrayStructure = Transforms.tanh(arg.ndArray).wrap() + override fun atanh(arg: StructureND): Nd4jArrayStructure = Transforms.atanh(arg.ndArray).wrap() override fun power(arg: StructureND, pow: Number): StructureND = Transforms.pow(arg.ndArray, pow).wrap() - override fun StructureND.ceil(): Nd4jArrayStructure = Transforms.ceil(ndArray).wrap() - override fun StructureND.floor(): Nd4jArrayStructure = Transforms.floor(ndArray).wrap() - override fun StructureND.std(dim: Int, keepDim: Boolean): Nd4jArrayStructure = - ndArray.std(true, keepDim, dim).wrap() + override fun ceil(arg: StructureND): Nd4jArrayStructure = Transforms.ceil(arg.ndArray).wrap() + override fun floor(structureND: StructureND): Nd4jArrayStructure = Transforms.floor(structureND.ndArray).wrap() + override fun std(structureND: StructureND, dim: Int, keepDim: Boolean): Tensor = + structureND.ndArray.std(true, keepDim, dim).wrap() override fun T.div(arg: StructureND): Nd4jArrayStructure = arg.ndArray.rdiv(this).wrap() override fun StructureND.div(arg: T): Nd4jArrayStructure = ndArray.div(arg).wrap() @@ -160,8 +160,8 @@ public sealed interface Nd4jTensorAlgebra> : AnalyticTe ndArray.divi(arg.ndArray) } - override fun StructureND.variance(dim: Int, keepDim: Boolean): Nd4jArrayStructure = - Nd4j.getExecutioner().exec(Variance(ndArray, true, true, dim)).wrap() + override fun variance(structureND: StructureND, dim: Int, keepDim: Boolean): Tensor = + Nd4j.getExecutioner().exec(Variance(structureND.ndArray, true, true, dim)).wrap() private companion object { private val ndBase: ThreadLocal = ThreadLocal.withInitial(::NDBase) @@ -211,7 +211,7 @@ public object DoubleNd4jTensorAlgebra : Nd4jTensorAlgebra { override fun StructureND.sum(): Double = ndArray.sumNumber().toDouble() override fun StructureND.min(): Double = ndArray.minNumber().toDouble() override fun StructureND.max(): Double = ndArray.maxNumber().toDouble() - override fun StructureND.mean(): Double = ndArray.meanNumber().toDouble() - override fun StructureND.std(): Double = ndArray.stdNumber().toDouble() - override fun StructureND.variance(): Double = ndArray.varNumber().toDouble() + override fun mean(structureND: StructureND): Double = structureND.ndArray.meanNumber().toDouble() + override fun std(structureND: StructureND): Double = structureND.ndArray.stdNumber().toDouble() + override fun variance(structureND: StructureND): Double = structureND.ndArray.varNumber().toDouble() } diff --git a/kmath-optimization/src/commonMain/tmp/minuit/HessianGradientCalculator.kt b/kmath-optimization/src/commonMain/tmp/minuit/HessianGradientCalculator.kt index 150d192f9..4ef743955 100644 --- a/kmath-optimization/src/commonMain/tmp/minuit/HessianGradientCalculator.kt +++ b/kmath-optimization/src/commonMain/tmp/minuit/HessianGradientCalculator.kt @@ -33,7 +33,7 @@ internal class HessianGradientCalculator(fcn: MnFcn, par: MnUserTransformation, val g2: RealVector = gradient.getGradientDerivative() val gstep: RealVector = gradient.getStep() val fcnmin: Double = par.fval() - // std::cout<<"fval: "< prec.eps()) { return e } - // std::cout<<"MnPosDef init matrix= "<( @Deprecated("Use Long.mean instead") public val long: Mean = Mean(LongRing) { sum, count -> sum / count } - public fun evaluate(buffer: Buffer): Double = Double.mean.evaluateBlocking(buffer) - public fun evaluate(buffer: Buffer): Int = Int.mean.evaluateBlocking(buffer) - public fun evaluate(buffer: Buffer): Long = Long.mean.evaluateBlocking(buffer) + public fun evaluate(buffer: Buffer): Double = DoubleField.mean.evaluateBlocking(buffer) + public fun evaluate(buffer: Buffer): Int = IntRing.mean.evaluateBlocking(buffer) + public fun evaluate(buffer: Buffer): Long = LongRing.mean.evaluateBlocking(buffer) } } //TODO replace with optimized version which respects overflow -public val Double.Companion.mean: Mean get() = Mean(DoubleField) { sum, count -> sum / count } -public val Int.Companion.mean: Mean get() = Mean(IntRing) { sum, count -> sum / count } -public val Long.Companion.mean: Mean get() = Mean(LongRing) { sum, count -> sum / count } +public val DoubleField.mean: Mean get() = Mean(DoubleField) { sum, count -> sum / count } +public val IntRing.mean: Mean get() = Mean(IntRing) { sum, count -> sum / count } +public val LongRing.mean: Mean get() = Mean(LongRing) { sum, count -> sum / count } diff --git a/kmath-stat/src/jvmTest/kotlin/space/kscience/kmath/stat/StatisticTest.kt b/kmath-stat/src/jvmTest/kotlin/space/kscience/kmath/stat/StatisticTest.kt index 264d62246..3be7fa314 100644 --- a/kmath-stat/src/jvmTest/kotlin/space/kscience/kmath/stat/StatisticTest.kt +++ b/kmath-stat/src/jvmTest/kotlin/space/kscience/kmath/stat/StatisticTest.kt @@ -9,6 +9,7 @@ import kotlinx.coroutines.flow.first import kotlinx.coroutines.flow.last import kotlinx.coroutines.flow.take import kotlinx.coroutines.runBlocking +import space.kscience.kmath.operations.DoubleField import space.kscience.kmath.random.RandomGenerator import space.kscience.kmath.random.chain import space.kscience.kmath.streaming.chunked @@ -27,26 +28,26 @@ internal class StatisticTest { @Test fun singleBlockingMean() { - val first = runBlocking { chunked.first()} - val res = Double.mean(first) - assertEquals(0.5,res, 1e-1) + val first = runBlocking { chunked.first() } + val res = DoubleField.mean(first) + assertEquals(0.5, res, 1e-1) } @Test fun singleSuspendMean() = runBlocking { - val first = runBlocking { chunked.first()} - val res = Double.mean(first) - assertEquals(0.5,res, 1e-1) + val first = runBlocking { chunked.first() } + val res = DoubleField.mean(first) + assertEquals(0.5, res, 1e-1) } @Test fun parallelMean() = runBlocking { - val average = Double.mean + val average = DoubleField.mean .flow(chunked) //create a flow from evaluated results .take(100) // Take 100 data chunks from the source and accumulate them .last() //get 1e5 data samples average - assertEquals(0.5,average, 1e-2) + assertEquals(0.5, average, 1e-2) } } diff --git a/kmath-tensorflow/src/test/kotlin/space/kscience/kmath/tensorflow/DoubleTensorFlowOps.kt b/kmath-tensorflow/src/test/kotlin/space/kscience/kmath/tensorflow/DoubleTensorFlowOps.kt index ff11dc8fe..af754f841 100644 --- a/kmath-tensorflow/src/test/kotlin/space/kscience/kmath/tensorflow/DoubleTensorFlowOps.kt +++ b/kmath-tensorflow/src/test/kotlin/space/kscience/kmath/tensorflow/DoubleTensorFlowOps.kt @@ -13,6 +13,7 @@ import space.kscience.kmath.nd.structureND import space.kscience.kmath.operations.DoubleField import space.kscience.kmath.tensors.core.DoubleTensorAlgebra import space.kscience.kmath.tensors.core.DoubleTensorAlgebra.Companion.sum +import space.kscience.kmath.tensors.core.randomNormal import kotlin.test.assertEquals @OptIn(UnstableKMathAPI::class) diff --git a/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/api/AnalyticTensorAlgebra.kt b/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/api/AnalyticTensorAlgebra.kt index 587096500..1a324b200 100644 --- a/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/api/AnalyticTensorAlgebra.kt +++ b/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/api/AnalyticTensorAlgebra.kt @@ -21,7 +21,7 @@ public interface AnalyticTensorAlgebra> : /** * @return the mean of all elements in the input tensor. */ - public fun StructureND.mean(): T + public fun mean(structureND: StructureND): T /** * Returns the mean of each row of the input tensor in the given dimension [dim]. @@ -34,12 +34,12 @@ public interface AnalyticTensorAlgebra> : * @param keepDim whether the output tensor has [dim] retained or not. * @return the mean of each row of the input tensor in the given dimension [dim]. */ - public fun StructureND.mean(dim: Int, keepDim: Boolean): Tensor + public fun mean(structureND: StructureND, dim: Int, keepDim: Boolean): Tensor /** * @return the standard deviation of all elements in the input tensor. */ - public fun StructureND.std(): T + public fun std(structureND: StructureND): T /** * Returns the standard deviation of each row of the input tensor in the given dimension [dim]. @@ -52,12 +52,12 @@ public interface AnalyticTensorAlgebra> : * @param keepDim whether the output tensor has [dim] retained or not. * @return the standard deviation of each row of the input tensor in the given dimension [dim]. */ - public fun StructureND.std(dim: Int, keepDim: Boolean): Tensor + public fun std(structureND: StructureND, dim: Int, keepDim: Boolean): Tensor /** * @return the variance of all elements in the input tensor. */ - public fun StructureND.variance(): T + public fun variance(structureND: StructureND): T /** * Returns the variance of each row of the input tensor in the given dimension [dim]. @@ -70,80 +70,45 @@ public interface AnalyticTensorAlgebra> : * @param keepDim whether the output tensor has [dim] retained or not. * @return the variance of each row of the input tensor in the given dimension [dim]. */ - public fun StructureND.variance(dim: Int, keepDim: Boolean): Tensor - - //For information: https://pytorch.org/docs/stable/generated/torch.exp.html - public fun StructureND.exp(): Tensor - - //For information: https://pytorch.org/docs/stable/generated/torch.log.html - public fun StructureND.ln(): Tensor + public fun variance(structureND: StructureND, dim: Int, keepDim: Boolean): Tensor //For information: https://pytorch.org/docs/stable/generated/torch.sqrt.html - public fun StructureND.sqrt(): Tensor - - //For information: https://pytorch.org/docs/stable/generated/torch.acos.html#torch.cos - public fun StructureND.cos(): Tensor - - //For information: https://pytorch.org/docs/stable/generated/torch.acos.html#torch.acos - public fun StructureND.acos(): Tensor - - //For information: https://pytorch.org/docs/stable/generated/torch.acosh.html#torch.cosh - public fun StructureND.cosh(): Tensor - - //For information: https://pytorch.org/docs/stable/generated/torch.acosh.html#torch.acosh - public fun StructureND.acosh(): Tensor - - //For information: https://pytorch.org/docs/stable/generated/torch.asin.html#torch.sin - public fun StructureND.sin(): Tensor - - //For information: https://pytorch.org/docs/stable/generated/torch.asin.html#torch.asin - public fun StructureND.asin(): Tensor - - //For information: https://pytorch.org/docs/stable/generated/torch.asin.html#torch.sinh - public fun StructureND.sinh(): Tensor - - //For information: https://pytorch.org/docs/stable/generated/torch.asin.html#torch.asinh - public fun StructureND.asinh(): Tensor + override fun sqrt(arg: StructureND): Tensor //For information: https://pytorch.org/docs/stable/generated/torch.atan.html#torch.tan - public fun StructureND.tan(): Tensor + override fun tan(arg: StructureND): Tensor //https://pytorch.org/docs/stable/generated/torch.atan.html#torch.atan - public fun StructureND.atan(): Tensor + override fun atan(arg: StructureND): Tensor //For information: https://pytorch.org/docs/stable/generated/torch.atanh.html#torch.tanh - public fun StructureND.tanh(): Tensor - - //For information: https://pytorch.org/docs/stable/generated/torch.atanh.html#torch.atanh - public fun StructureND.atanh(): Tensor + override fun tanh(arg: StructureND): Tensor //For information: https://pytorch.org/docs/stable/generated/torch.ceil.html#torch.ceil - public fun StructureND.ceil(): Tensor + public fun ceil(arg: StructureND): Tensor //For information: https://pytorch.org/docs/stable/generated/torch.floor.html#torch.floor - public fun StructureND.floor(): Tensor + public fun floor(structureND: StructureND): Tensor - override fun sin(arg: StructureND): StructureND = arg.sin() + override fun sin(arg: StructureND): StructureND - override fun cos(arg: StructureND): StructureND = arg.cos() + override fun cos(arg: StructureND): StructureND - override fun asin(arg: StructureND): StructureND = arg.asin() + override fun asin(arg: StructureND): StructureND - override fun acos(arg: StructureND): StructureND = arg.acos() + override fun acos(arg: StructureND): StructureND - override fun atan(arg: StructureND): StructureND = arg.atan() + override fun exp(arg: StructureND): StructureND - override fun exp(arg: StructureND): StructureND = arg.exp() + override fun ln(arg: StructureND): StructureND - override fun ln(arg: StructureND): StructureND = arg.ln() + override fun sinh(arg: StructureND): StructureND - override fun sinh(arg: StructureND): StructureND = arg.sinh() + override fun cosh(arg: StructureND): StructureND - override fun cosh(arg: StructureND): StructureND = arg.cosh() + override fun asinh(arg: StructureND): StructureND - override fun asinh(arg: StructureND): StructureND = arg.asinh() + override fun acosh(arg: StructureND): StructureND - override fun acosh(arg: StructureND): StructureND = arg.acosh() - - override fun atanh(arg: StructureND): StructureND = arg.atanh() + override fun atanh(arg: StructureND): StructureND } \ No newline at end of file diff --git a/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/api/LinearOpsTensorAlgebra.kt b/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/api/LinearOpsTensorAlgebra.kt index e15fbb3a6..faff2eb80 100644 --- a/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/api/LinearOpsTensorAlgebra.kt +++ b/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/api/LinearOpsTensorAlgebra.kt @@ -47,7 +47,7 @@ public interface LinearOpsTensorAlgebra> : TensorPartialDivision * @receiver the `input`. * @return the batch of `L` matrices. */ - public fun StructureND.cholesky(): StructureND + public fun cholesky(structureND: StructureND): StructureND /** * QR decomposition. @@ -61,7 +61,7 @@ public interface LinearOpsTensorAlgebra> : TensorPartialDivision * @receiver the `input`. * @return pair of `Q` and `R` tensors. */ - public fun StructureND.qr(): Pair, StructureND> + public fun qr(structureND: StructureND): Pair, StructureND> /** * LUP decomposition @@ -75,7 +75,7 @@ public interface LinearOpsTensorAlgebra> : TensorPartialDivision * @receiver the `input`. * @return triple of P, L and U tensors */ - public fun StructureND.lu(): Triple, StructureND, StructureND> + public fun lu(structureND: StructureND): Triple, StructureND, StructureND> /** * Singular Value Decomposition. @@ -91,7 +91,7 @@ public interface LinearOpsTensorAlgebra> : TensorPartialDivision * @receiver the `input`. * @return triple `Triple(U, S, V)`. */ - public fun StructureND.svd(): Triple, StructureND, StructureND> + public fun svd(structureND: StructureND): Triple, StructureND, StructureND> /** * Returns eigenvalues and eigenvectors of a real symmetric matrix `input` or a batch of real symmetric matrices, @@ -101,6 +101,6 @@ public interface LinearOpsTensorAlgebra> : TensorPartialDivision * @receiver the `input`. * @return a pair `eigenvalues to eigenvectors` */ - public fun StructureND.symEig(): Pair, StructureND> + public fun symEig(structureND: StructureND): Pair, StructureND> } diff --git a/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/DoubleTensor.kt b/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/DoubleTensor.kt index 12e9549da..4eed7a2a8 100644 --- a/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/DoubleTensor.kt +++ b/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/DoubleTensor.kt @@ -10,7 +10,6 @@ import space.kscience.kmath.misc.UnstableKMathAPI import space.kscience.kmath.nd.* import space.kscience.kmath.structures.* import space.kscience.kmath.tensors.core.internal.toPrettyString -import kotlin.jvm.JvmInline public class OffsetDoubleBuffer( override val origin: DoubleBuffer, @@ -83,9 +82,9 @@ public inline fun OffsetDoubleBuffer.mapInPlace(operation: (Double) -> Double) { * * [DoubleTensor] always uses row-based strides */ -public class DoubleTensor( +public open class DoubleTensor( shape: ShapeND, - override val source: OffsetDoubleBuffer, + final override val source: OffsetDoubleBuffer, ) : BufferedTensor(shape), MutableStructureNDOfDouble { init { @@ -103,7 +102,7 @@ public class DoubleTensor( source[indices.offset(index)] = value } - override fun getDouble(index: IntArray): Double = get(index) + override fun getDouble(index: IntArray): Double = source[indices.offset(index)] override fun setDouble(index: IntArray, value: Double) { set(index, value) @@ -112,62 +111,8 @@ public class DoubleTensor( override fun toString(): String = toPrettyString() } -@JvmInline -public value class DoubleTensor2D(public val tensor: DoubleTensor) : MutableStructureND by tensor, - MutableStructure2D { - - init { - require(tensor.shape.size == 2) { "Only 2D tensors could be cast to 2D" } - } - - override val rowNum: Int get() = shape[0] - override val colNum: Int get() = shape[1] - - override fun get(i: Int, j: Int): Double = tensor.source[i * colNum + j] - - override fun set(i: Int, j: Int, value: Double) { - tensor.source[i * colNum + j] = value - } - - @OptIn(PerformancePitfall::class) - override val rows: List - get() = List(rowNum) { i -> - tensor.source.view(i * colNum, colNum) - } - - - @OptIn(PerformancePitfall::class) - override val columns: List> - get() = List(colNum) { j -> - val indices = IntArray(rowNum) { i -> j + i * colNum } - tensor.source.permute(indices) - } - - @PerformancePitfall - override fun elements(): Sequence> = tensor.elements() - @OptIn(PerformancePitfall::class) - override fun get(index: IntArray): Double = tensor[index] - override val shape: ShapeND get() = tensor.shape -} - -public fun DoubleTensor.asDoubleTensor2D(): DoubleTensor2D = DoubleTensor2D(this) - public fun DoubleTensor.asDoubleBuffer(): OffsetDoubleBuffer = if (shape.size == 1) { source } else { error("Only 1D tensors could be cast to 1D") } - -public inline fun DoubleTensor.forEachMatrix(block: (index: IntArray, matrix: DoubleTensor2D) -> Unit) { - val n = shape.size - check(n >= 2) { "Expected tensor with 2 or more dimensions, got size $n" } - val matrixOffset = shape[n - 1] * shape[n - 2] - val matrixShape = ShapeND(shape[n - 2], shape[n - 1]) - - val size = matrixShape.linearSize - for (i in 0 until linearSize / matrixOffset) { - val offset = i * matrixOffset - val index = indices.index(offset).sliceArray(0 until (shape.size - 2)) - block(index, DoubleTensor(matrixShape, source.view(offset, size)).asDoubleTensor2D()) - } -} diff --git a/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/DoubleTensor1D.kt b/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/DoubleTensor1D.kt new file mode 100644 index 000000000..e74f73800 --- /dev/null +++ b/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/DoubleTensor1D.kt @@ -0,0 +1,45 @@ +/* + * Copyright 2018-2022 KMath contributors. + * Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file. + */ + +package space.kscience.kmath.tensors.core + +import space.kscience.kmath.misc.PerformancePitfall +import space.kscience.kmath.nd.MutableStructure1D +import space.kscience.kmath.nd.ShapeND +import space.kscience.kmath.structures.MutableBuffer + +public class DoubleTensor1D( + source: OffsetDoubleBuffer, +) : DoubleTensor(ShapeND(source.size), source), MutableStructure1D { + + @PerformancePitfall + override fun get(index: IntArray): Double = super.get(index) + + @PerformancePitfall + override fun set(index: IntArray, value: Double) { + super.set(index, value) + } + + override val size: Int get() = source.size + + override fun get(index: Int): Double = source[index] + + override fun set(index: Int, value: Double) { + source[index] = value + } + + override fun copy(): MutableBuffer = source.copy() + + @PerformancePitfall + override fun elements(): Sequence> = super.elements() +} + +/** + * A zero-copy cast to 1D structure. Changes in resulting structure are reflected on original tensor. + */ +public fun DoubleTensor.asDoubleTensor1D(): DoubleTensor1D { + require(shape.size == 1) { "Only 1D tensors could be cast to 1D" } + return DoubleTensor1D(source) +} \ No newline at end of file diff --git a/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/DoubleTensor2D.kt b/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/DoubleTensor2D.kt new file mode 100644 index 000000000..7c84b91e1 --- /dev/null +++ b/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/DoubleTensor2D.kt @@ -0,0 +1,71 @@ +/* + * Copyright 2018-2022 KMath contributors. + * Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file. + */ + +package space.kscience.kmath.tensors.core + +import space.kscience.kmath.misc.PerformancePitfall +import space.kscience.kmath.nd.MutableStructure2D +import space.kscience.kmath.nd.ShapeND +import space.kscience.kmath.nd.linearSize +import space.kscience.kmath.structures.PermutedMutableBuffer +import space.kscience.kmath.structures.permute + +public class DoubleTensor2D( + override val rowNum: Int, + override val colNum: Int, + source: OffsetDoubleBuffer, +) : DoubleTensor(ShapeND(rowNum, colNum), source), MutableStructure2D { + + override fun get(i: Int, j: Int): Double = source[i * colNum + j] + + @OptIn(PerformancePitfall::class) + override fun get(index: IntArray): Double = getDouble(index) + + override fun set(i: Int, j: Int, value: Double) { + source[i * colNum + j] = value + } + + @OptIn(PerformancePitfall::class) + override val rows: List + get() = List(rowNum) { i -> + source.view(i * colNum, colNum) + } + + + @OptIn(PerformancePitfall::class) + override val columns: List> + get() = List(colNum) { j -> + val indices = IntArray(rowNum) { i -> j + i * colNum } + source.permute(indices) + } + + override val shape: ShapeND get() = super.shape + + @PerformancePitfall + override fun elements(): Sequence> = super.elements() +} + +/** + * A zero-copy cast to 2D structure. Changes in resulting structure are reflected on original tensor. + */ +public fun DoubleTensor.asDoubleTensor2D(): DoubleTensor2D { + require(shape.size == 2) { "Only 2D tensors could be cast to 2D" } + return DoubleTensor2D(shape[0], shape[1], source) +} + + +public inline fun DoubleTensor.forEachMatrix(block: (index: IntArray, matrix: DoubleTensor2D) -> Unit) { + val n = shape.size + check(n >= 2) { "Expected tensor with 2 or more dimensions, got size $n" } + val matrixOffset = shape[n - 1] * shape[n - 2] + val matrixShape = ShapeND(shape[n - 2], shape[n - 1]) + + val size = matrixShape.linearSize + for (i in 0 until linearSize / matrixOffset) { + val offset = i * matrixOffset + val index = indices.index(offset).sliceArray(0 until (shape.size - 2)) + block(index, DoubleTensor(matrixShape, source.view(offset, size)).asDoubleTensor2D()) + } +} diff --git a/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/DoubleTensorAlgebra.kt b/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/DoubleTensorAlgebra.kt index ce1778013..997c0a316 100644 --- a/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/DoubleTensorAlgebra.kt +++ b/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/DoubleTensorAlgebra.kt @@ -11,12 +11,12 @@ package space.kscience.kmath.tensors.core import space.kscience.kmath.misc.PerformancePitfall import space.kscience.kmath.misc.UnstableKMathAPI import space.kscience.kmath.nd.* +import space.kscience.kmath.operations.DoubleBufferOps import space.kscience.kmath.operations.DoubleField import space.kscience.kmath.structures.* import space.kscience.kmath.tensors.api.AnalyticTensorAlgebra import space.kscience.kmath.tensors.api.LinearOpsTensorAlgebra import space.kscience.kmath.tensors.api.Tensor -import space.kscience.kmath.tensors.api.TensorPartialDivisionAlgebra import space.kscience.kmath.tensors.core.internal.* import kotlin.math.* @@ -25,7 +25,6 @@ import kotlin.math.* */ @OptIn(PerformancePitfall::class) public open class DoubleTensorAlgebra : - TensorPartialDivisionAlgebra, AnalyticTensorAlgebra, LinearOpsTensorAlgebra { @@ -33,6 +32,8 @@ public open class DoubleTensorAlgebra : override val elementAlgebra: DoubleField get() = DoubleField + public val bufferAlgebra: DoubleBufferOps get() = DoubleBufferOps + /** * Applies the [transform] function to each element of the tensor and returns the resulting modified tensor. @@ -98,6 +99,16 @@ public open class DoubleTensorAlgebra : override fun StructureND.value(): Double = valueOrNull() ?: throw IllegalArgumentException("The tensor shape is $shape, but value method is allowed only for shape [1]") + public fun fromBuffer(shape: ShapeND, buffer: Buffer): DoubleTensor { + checkNotEmptyShape(shape) + check(buffer.size > 0) { "Illegal empty buffer provided" } + check(buffer.size == shape.linearSize) { + "Inconsistent shape $shape for buffer of size ${buffer.size} provided" + } + return DoubleTensor(shape, buffer.toDoubleBuffer()) + } + + /** * Constructs a tensor with the specified shape and data. * @@ -105,12 +116,7 @@ public open class DoubleTensorAlgebra : * @param array one-dimensional data array. * @return tensor with the [shape] shape and [array] data. */ - public fun fromArray(shape: ShapeND, array: DoubleArray): DoubleTensor { - checkNotEmptyShape(shape) - checkEmptyDoubleBuffer(array) - checkBufferShapeConsistency(shape, array) - return DoubleTensor(shape, array.asBuffer()) - } + public fun fromArray(shape: ShapeND, array: DoubleArray): DoubleTensor = fromBuffer(shape, array.asBuffer()) /** * Constructs a tensor with the specified shape and initializer. @@ -271,7 +277,7 @@ public open class DoubleTensorAlgebra : override fun StructureND.transposed(i: Int, j: Int): Tensor { val actualI = if (i >= 0) i else shape.size + i - val actualJ = if(j>=0) j else shape.size + j + val actualJ = if (j >= 0) j else shape.size + j return asDoubleTensor().permute( shape.transposed(actualI, actualJ) ) { originIndex -> @@ -498,48 +504,6 @@ public open class DoubleTensorAlgebra : return true } - /** - * Returns a tensor of random numbers drawn from normal distributions with `0.0` mean and `1.0` standard deviation. - * - * @param shape the desired shape for the output tensor. - * @param seed the random seed of the pseudo-random number generator. - * @return tensor of a given shape filled with numbers from the normal distribution - * with `0.0` mean and `1.0` standard deviation. - */ - public fun randomNormal(shape: ShapeND, seed: Long = 0): DoubleTensor = - DoubleTensor(shape, DoubleBuffer.randomNormals(shape.linearSize, seed)) - - /** - * Returns a tensor with the same shape as `input` of random numbers drawn from normal distributions - * with `0.0` mean and `1.0` standard deviation. - * - * @receiver the `input`. - * @param seed the random seed of the pseudo-random number generator. - * @return a tensor with the same shape as `input` filled with numbers from the normal distribution - * with `0.0` mean and `1.0` standard deviation. - */ - public fun Tensor.randomNormalLike(seed: Long = 0): DoubleTensor = - DoubleTensor(shape, DoubleBuffer.randomNormals(shape.linearSize, seed)) - - /** - * Concatenates a sequence of tensors with equal shapes along the first dimension. - * - * @param tensors the [List] of tensors with same shapes to concatenate - * @return tensor with concatenation result - */ - public fun stack(tensors: List>): DoubleTensor { - check(tensors.isNotEmpty()) { "List must have at least 1 element" } - val shape = tensors[0].shape - check(tensors.all { it.shape contentEquals shape }) { "Tensors must have same shapes" } - val resShape = ShapeND(tensors.size) + shape -// val resBuffer: List = tensors.flatMap { -// it.asDoubleTensor().source.array.drop(it.asDoubleTensor().bufferStart) -// .take(it.asDoubleTensor().linearSize) -// } - val resBuffer = tensors.map { it.asDoubleTensor().source }.concat() - return DoubleTensor(resShape, resBuffer) - } - /** * Builds tensor from rows of the input tensor. * @@ -631,102 +595,75 @@ public open class DoubleTensorAlgebra : } - override fun StructureND.mean(): Double = sum() / indices.linearSize + override fun mean(structureND: StructureND): Double = structureND.sum() / structureND.indices.linearSize - override fun StructureND.mean(dim: Int, keepDim: Boolean): DoubleTensor = - foldDimToDouble(dim, keepDim) { arr -> - check(dim < dimension) { "Dimension $dim out of range $dimension" } - arr.sum() / shape[dim] + override fun mean(structureND: StructureND, dim: Int, keepDim: Boolean): Tensor = + structureND.foldDimToDouble(dim, keepDim) { arr -> + check(dim < structureND.dimension) { "Dimension $dim out of range ${structureND.dimension}" } + arr.sum() / structureND.shape[dim] } - override fun StructureND.std(): Double = reduceElements { arr -> - val mean = arr.array.sum() / indices.linearSize - sqrt(arr.array.sumOf { (it - mean) * (it - mean) } / (indices.linearSize - 1)) + override fun std(structureND: StructureND): Double = structureND.reduceElements { arr -> + val mean = arr.array.sum() / structureND.indices.linearSize + sqrt(arr.array.sumOf { (it - mean) * (it - mean) } / (structureND.indices.linearSize - 1)) } - override fun StructureND.std(dim: Int, keepDim: Boolean): DoubleTensor = foldDimToDouble( - dim, - keepDim - ) { arr -> - check(dim < dimension) { "Dimension $dim out of range $dimension" } - val mean = arr.sum() / shape[dim] - sqrt(arr.sumOf { (it - mean) * (it - mean) } / (shape[dim] - 1)) - } + override fun std(structureND: StructureND, dim: Int, keepDim: Boolean): Tensor = + structureND.foldDimToDouble( + dim, + keepDim + ) { arr -> + check(dim < structureND.dimension) { "Dimension $dim out of range ${structureND.dimension}" } + val mean = arr.sum() / structureND.shape[dim] + sqrt(arr.sumOf { (it - mean) * (it - mean) } / (structureND.shape[dim] - 1)) + } - override fun StructureND.variance(): Double = reduceElements { arr -> - val linearSize = indices.linearSize + override fun variance(structureND: StructureND): Double = structureND.reduceElements { arr -> + val linearSize = structureND.indices.linearSize val mean = arr.array.sum() / linearSize arr.array.sumOf { (it - mean) * (it - mean) } / (linearSize - 1) } - override fun StructureND.variance(dim: Int, keepDim: Boolean): DoubleTensor = foldDimToDouble( - dim, - keepDim - ) { arr -> - check(dim < dimension) { "Dimension $dim out of range $dimension" } - val mean = arr.sum() / shape[dim] - arr.sumOf { (it - mean) * (it - mean) } / (shape[dim] - 1) - } - - private fun cov(x: StructureND, y: StructureND): Double { - val n = x.shape[0] - return ((x - x.mean()) * (y - y.mean())).mean() * n / (n - 1) - } - - /** - * Returns the covariance matrix `M` of given vectors. - * - * `M[i, j]` contains covariance of `i`-th and `j`-th given vectors - * - * @param tensors the [List] of 1-dimensional tensors with same shape - * @return `M`. - */ - public fun cov(tensors: List>): DoubleTensor { - check(tensors.isNotEmpty()) { "List must have at least 1 element" } - val n = tensors.size - val m = tensors[0].shape[0] - check(tensors.all { it.shape contentEquals ShapeND(m) }) { "Tensors must have same shapes" } - val resTensor = DoubleTensor( - ShapeND(n, n), - DoubleBuffer(n * n) { 0.0 } - ) - for (i in 0 until n) { - for (j in 0 until n) { - resTensor[intArrayOf(i, j)] = cov(tensors[i], tensors[j]) - } + override fun variance(structureND: StructureND, dim: Int, keepDim: Boolean): Tensor = + structureND.foldDimToDouble( + dim, + keepDim + ) { arr -> + check(dim < structureND.dimension) { "Dimension $dim out of range ${structureND.dimension}" } + val mean = arr.sum() / structureND.shape[dim] + arr.sumOf { (it - mean) * (it - mean) } / (structureND.shape[dim] - 1) } - return resTensor - } - override fun StructureND.exp(): DoubleTensor = map { exp(it) } - override fun StructureND.ln(): DoubleTensor = map { ln(it) } + override fun exp(arg: StructureND): DoubleTensor = arg.map { this.exp(it) } - override fun StructureND.sqrt(): DoubleTensor = map { sqrt(it) } + override fun ln(arg: StructureND): DoubleTensor = arg.map { this.ln(it) } - override fun StructureND.cos(): DoubleTensor = map { cos(it) } + override fun sqrt(arg: StructureND): DoubleTensor = arg.map { this.sqrt(it) } - override fun StructureND.acos(): DoubleTensor = map { acos(it) } + override fun cos(arg: StructureND): DoubleTensor = arg.map { this.cos(it) } - override fun StructureND.cosh(): DoubleTensor = map { cosh(it) } + override fun acos(arg: StructureND): DoubleTensor = arg.map { this.acos(it) } - override fun StructureND.acosh(): DoubleTensor = map { acosh(it) } + override fun cosh(arg: StructureND): DoubleTensor = arg.map { this.cosh(it) } - override fun StructureND.sin(): DoubleTensor = map { sin(it) } + override fun acosh(arg: StructureND): DoubleTensor = arg.map { this.acosh(it) } - override fun StructureND.asin(): DoubleTensor = map { asin(it) } + override fun sin(arg: StructureND): DoubleTensor = arg.map { this.sin(it) } - override fun StructureND.sinh(): DoubleTensor = map { sinh(it) } + override fun asin(arg: StructureND): DoubleTensor = arg.map { this.asin(it) } - override fun StructureND.asinh(): DoubleTensor = map { asinh(it) } + override fun sinh(arg: StructureND): DoubleTensor = arg.map { this.sinh(it) } - override fun StructureND.tan(): DoubleTensor = map { tan(it) } + override fun asinh(arg: StructureND): DoubleTensor = arg.map { this.asinh(it) } - override fun StructureND.atan(): DoubleTensor = map { atan(it) } + override fun tan(arg: StructureND): DoubleTensor = arg.map { this.tan(it) } - override fun StructureND.tanh(): DoubleTensor = map { tanh(it) } + override fun atan(arg: StructureND): DoubleTensor = arg.map { this.atan(it) } - override fun StructureND.atanh(): DoubleTensor = map { atanh(it) } + override fun tanh(arg: StructureND): DoubleTensor = arg.map { this.tanh(it) } + + override fun atanh(arg: StructureND): DoubleTensor = arg.map { this.atanh(it) } override fun power(arg: StructureND, pow: Number): StructureND = if (pow is Int) { arg.map { it.pow(pow) } @@ -734,115 +671,26 @@ public open class DoubleTensorAlgebra : arg.map { it.pow(pow.toDouble()) } } - override fun StructureND.ceil(): DoubleTensor = map { ceil(it) } + override fun ceil(arg: StructureND): DoubleTensor = arg.map { ceil(it) } - override fun StructureND.floor(): DoubleTensor = map { floor(it) } + override fun floor(structureND: StructureND): DoubleTensor = structureND.map { floor(it) } - override fun StructureND.inv(): DoubleTensor = invLU(1e-9) + override fun StructureND.inv(): DoubleTensor = invLU(this, 1e-9) - override fun StructureND.det(): DoubleTensor = detLU(1e-9) + override fun StructureND.det(): DoubleTensor = detLU(this, 1e-9) - /** - * Computes the LU factorization of a matrix or batches of matrices `input`. - * Returns a tuple containing the LU factorization and pivots of `input`. - * - * @param epsilon permissible error when comparing the determinant of a matrix with zero - * @return pair of `factorization` and `pivots`. - * The `factorization` has the shape ``(*, m, n)``, where``(*, m, n)`` is the shape of the `input` tensor. - * The `pivots` has the shape ``(∗, min(m, n))``. `pivots` stores all the intermediate transpositions of rows. - */ - public fun StructureND.luFactor(epsilon: Double): Pair = - computeLU(this, epsilon) - ?: throw IllegalArgumentException("Tensor contains matrices which are singular at precision $epsilon") + override fun lu(structureND: StructureND): Triple = + lu(structureND, 1e-9) - /** - * Computes the LU factorization of a matrix or batches of matrices `input`. - * Returns a tuple containing the LU factorization and pivots of `input`. - * Uses an error of ``1e-9`` when calculating whether a matrix is degenerate. - * - * @return pair of `factorization` and `pivots`. - * The `factorization` has the shape ``(*, m, n)``, where``(*, m, n)`` is the shape of the `input` tensor. - * The `pivots` has the shape ``(∗, min(m, n))``. `pivots` stores all the intermediate transpositions of rows. - */ - public fun StructureND.luFactor(): Pair = luFactor(1e-9) + override fun cholesky(structureND: StructureND): DoubleTensor = cholesky(structureND, 1e-6) - /** - * Unpacks the data and pivots from a LU factorization of a tensor. - * Given a tensor [luTensor], return tensors `Triple(P, L, U)` satisfying `P dot luTensor = L dot U`, - * with `P` being a permutation matrix or batch of matrices, - * `L` being a lower triangular matrix or batch of matrices, - * `U` being an upper triangular matrix or batch of matrices. - * - * @param luTensor the packed LU factorization data - * @param pivotsTensor the packed LU factorization pivots - * @return triple of `P`, `L` and `U` tensors - */ - public fun luPivot( - luTensor: StructureND, - pivotsTensor: Tensor, - ): Triple { - checkSquareMatrix(luTensor.shape) - check( - luTensor.shape.first(luTensor.shape.size - 2) contentEquals pivotsTensor.shape.first(pivotsTensor.shape.size - 1) || - luTensor.shape.last() == pivotsTensor.shape.last() - 1 - ) { "Inappropriate shapes of input tensors" } - - val n = luTensor.shape.last() - val pTensor = zeroesLike(luTensor) - pTensor - .matrixSequence() - .zip(pivotsTensor.asIntTensor().vectorSequence()) - .forEach { (p, pivot) -> pivInit(p.asDoubleTensor2D(), pivot.as1D(), n) } - - val lTensor = zeroesLike(luTensor) - val uTensor = zeroesLike(luTensor) - - lTensor.matrixSequence() - .zip(uTensor.matrixSequence()) - .zip(luTensor.asDoubleTensor().matrixSequence()) - .forEach { (pairLU, lu) -> - val (l, u) = pairLU - luPivotHelper(l.asDoubleTensor2D(), u.asDoubleTensor2D(), lu.asDoubleTensor2D(), n) - } - - return Triple(pTensor, lTensor, uTensor) - } - - /** - * QR decomposition. - * - * Computes the QR decomposition of a matrix or a batch of matrices, and returns a pair `Q to R` of tensors. - * Given a tensor `input`, return tensors `Q to R` satisfying `input == Q dot R`, - * with `Q` being an orthogonal matrix or batch of orthogonal matrices - * and `R` being an upper triangular matrix or batch of upper triangular matrices. - * - * @receiver the `input`. - * @param epsilon the permissible error when comparing tensors for equality. - * Used when checking the positive definiteness of the input matrix or matrices. - * @return a pair of `Q` and `R` tensors. - */ - public fun StructureND.cholesky(epsilon: Double): DoubleTensor { - checkSquareMatrix(shape) - checkPositiveDefinite(asDoubleTensor(), epsilon) - - val n = shape.last() - val lTensor = zeroesLike(this) - - for ((a, l) in asDoubleTensor().matrixSequence().zip(lTensor.matrixSequence())) - for (i in 0 until n) choleskyHelper(a.asDoubleTensor2D(), l.asDoubleTensor2D(), n) - - return lTensor - } - - override fun StructureND.cholesky(): DoubleTensor = cholesky(1e-6) - - override fun StructureND.qr(): Pair { - checkSquareMatrix(shape) - val qTensor = zeroesLike(this) - val rTensor = zeroesLike(this) + override fun qr(structureND: StructureND): Pair { + checkSquareMatrix(structureND.shape) + val qTensor = zeroesLike(structureND) + val rTensor = zeroesLike(structureND) //TODO replace with cycle - asDoubleTensor().matrixSequence() + structureND.asDoubleTensor().matrixSequence() .zip( (qTensor.matrixSequence() .zip(rTensor.matrixSequence())) @@ -854,200 +702,14 @@ public open class DoubleTensorAlgebra : return qTensor to rTensor } - override fun StructureND.svd(): Triple, StructureND, StructureND> = - svd(epsilon = 1e-10) + override fun svd( + structureND: StructureND, + ): Triple, StructureND, StructureND> = + svd(structureND = structureND, epsilon = 1e-10) + override fun symEig(structureND: StructureND): Pair = + symEigJacobi(structureND = structureND, maxIteration = 50, epsilon = 1e-15) - /** - * Singular Value Decomposition. - * - * Computes the singular value decomposition of either a matrix or batch of matrices `input`. - * The singular value decomposition is represented as a triple `Triple(U, S, V)`, - * such that `input == U dot diagonalEmbedding(S) dot V.transpose()`. - * If `input` is a batch of tensors, then U, S, and Vh are also batched with the same batch dimensions as `input. - * - * @receiver the `input`. - * @param epsilon permissible error when calculating the dot product of vectors - * i.e., the precision with which the cosine approaches 1 in an iterative algorithm. - * @return a triple `Triple(U, S, V)`. - */ - public fun StructureND.svd(epsilon: Double): Triple, StructureND, StructureND> { - val size = dimension - val commonShape = shape.slice(0 until size - 2) - val (n, m) = shape.slice(size - 2 until size) - val uTensor = zeros(commonShape + ShapeND(min(n, m), n)) - val sTensor = zeros(commonShape + ShapeND(min(n, m))) - val vTensor = zeros(commonShape + ShapeND(min(n, m), m)) - - val matrices = asDoubleTensor().matrices - val uTensors = uTensor.matrices - val sTensorVectors = sTensor.vectors - val vTensors = vTensor.matrices - - for (index in matrices.indices) { - val matrix = matrices[index] - val usv = Triple( - uTensors[index], - sTensorVectors[index], - vTensors[index] - ) - val matrixSize = matrix.shape.linearSize - val curMatrix = DoubleTensor( - matrix.shape, - matrix.source.view(0, matrixSize) - ) - svdHelper(curMatrix, usv, m, n, epsilon) - } - - return Triple(uTensor.transposed(), sTensor, vTensor.transposed()) - } - - override fun StructureND.symEig(): Pair = - symEigJacobi(maxIteration = 50, epsilon = 1e-15) - - /** - * Returns eigenvalues and eigenvectors of a real symmetric matrix input or a batch of real symmetric matrices, - * represented by a pair `eigenvalues to eigenvectors`. - * - * @param epsilon the permissible error when comparing tensors for equality - * and when the cosine approaches 1 in the SVD algorithm. - * @return a pair `eigenvalues to eigenvectors`. - */ - public fun StructureND.symEigSvd(epsilon: Double): Pair> { - //TODO optimize conversion - checkSymmetric(asDoubleTensor(), epsilon) - - fun MutableStructure2D.cleanSym(n: Int) { - for (i in 0 until n) { - for (j in 0 until n) { - if (i == j) { - this[i, j] = sign(this[i, j]) - } else { - this[i, j] = 0.0 - } - } - } - } - - val (u, s, v) = svd(epsilon) - val shp = s.shape + intArrayOf(1) - val utv = u.transposed() matmul v - val n = s.shape.last() - for (matrix in utv.matrixSequence()) { - matrix.asDoubleTensor2D().cleanSym(n) - } - - val eig = (utv dot s.asDoubleTensor().view(shp)).view(s.shape) - return eig to v - } - - public fun StructureND.symEigJacobi(maxIteration: Int, epsilon: Double): Pair { - //TODO optimize conversion - checkSymmetric(asDoubleTensor(), epsilon) - - val size = this.dimension - val eigenvectors = zeros(shape) - val eigenvalues = zeros(shape.slice(0 until size - 1)) - - var eigenvalueStart = 0 - var eigenvectorStart = 0 - for (matrix in asDoubleTensor().matrixSequence()) { - val matrix2D = matrix.asDoubleTensor2D() - val (d, v) = matrix2D.jacobiHelper(maxIteration, epsilon) - - for (i in 0 until matrix2D.rowNum) { - for (j in 0 until matrix2D.colNum) { - eigenvectors.source[eigenvectorStart + i * matrix2D.rowNum + j] = v[i, j] - } - } - - for (i in 0 until matrix2D.rowNum) { - eigenvalues.source[eigenvalueStart + i] = d[i] - } - - eigenvalueStart += this.shape.last() - eigenvectorStart += this.shape.last() * this.shape.last() - } - - return eigenvalues to eigenvectors - } - - /** - * Computes the determinant of a square matrix input, or of each square matrix in a batched input - * using LU factorization algorithm. - * - * @param epsilon the error in the LU algorithm—permissible error when comparing the determinant of a matrix - * with zero. - * @return the determinant. - */ - public fun StructureND.detLU(epsilon: Double = 1e-9): DoubleTensor { - checkSquareMatrix(shape) - //TODO check for unnecessary copies - val luTensor = copyToTensor() - val pivotsTensor = setUpPivots() - - val n = shape.size - - val detTensorShape = ShapeND(IntArray(n - 1) { i -> shape[i] }.apply { - set(n - 2, 1) - }) - - val resBuffer = DoubleBuffer(detTensorShape.linearSize) { 0.0 } - - val detTensor = DoubleTensor( - detTensorShape, - resBuffer - ) - - luTensor.matrixSequence().zip(pivotsTensor.vectorSequence()).forEachIndexed { index, (lu, pivots) -> - resBuffer[index] = if (luHelper(lu.asDoubleTensor2D(), pivots.as1D(), epsilon)) - 0.0 else luMatrixDet(lu.asDoubleTensor2D(), pivots.as1D()) - } - - return detTensor - } - - /** - * Computes the multiplicative inverse matrix of a square matrix input, or of each square matrix in a batched input - * using LU factorization algorithm. - * Given a square matrix `a`, return the matrix `aInv` satisfying - * `a dot aInv == aInv dot a == eye(a.shape[0])`. - * - * @param epsilon error in the LU algorithm—permissible error when comparing the determinant of a matrix with zero - * @return the multiplicative inverse of a matrix. - */ - public fun StructureND.invLU(epsilon: Double = 1e-9): DoubleTensor { - val (luTensor, pivotsTensor) = luFactor(epsilon) - val invTensor = zeroesLike(luTensor) - - //TODO replace sequence with a cycle - val seq = luTensor.matrixSequence().zip(pivotsTensor.vectorSequence()).zip(invTensor.matrixSequence()) - for ((luP, invMatrix) in seq) { - val (lu, pivots) = luP - luMatrixInv(lu.asDoubleTensor2D(), pivots.as1D(), invMatrix.asDoubleTensor2D()) - } - - return invTensor - } - - /** - * LUP decomposition. - * - * Computes the LUP decomposition of a matrix or a batch of matrices. - * Given a tensor `input`, return tensors `Triple(P, L, U)` satisfying `P dot input == L dot U`, - * with `P` being a permutation matrix or batch of matrices, - * `L` being a lower triangular matrix or batch of matrices, - * `U` being an upper triangular matrix or batch of matrices. - * - * @param epsilon permissible error when comparing the determinant of a matrix with zero. - * @return triple of `P`, `L` and `U` tensors. - */ - public fun StructureND.lu(epsilon: Double = 1e-9): Triple { - val (lu, pivots) = luFactor(epsilon) - return luPivot(lu, pivots) - } - - override fun StructureND.lu(): Triple = lu(1e-9) } public val Double.Companion.tensorAlgebra: DoubleTensorAlgebra get() = DoubleTensorAlgebra diff --git a/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/internal/checks.kt b/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/internal/checks.kt index dcdf49e1e..f384ed462 100644 --- a/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/internal/checks.kt +++ b/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/internal/checks.kt @@ -13,6 +13,7 @@ import space.kscience.kmath.tensors.api.Tensor import space.kscience.kmath.tensors.core.DoubleTensor import space.kscience.kmath.tensors.core.DoubleTensorAlgebra import space.kscience.kmath.tensors.core.asDoubleTensor +import space.kscience.kmath.tensors.core.detLU internal fun checkNotEmptyShape(shape: ShapeND) = @@ -26,7 +27,7 @@ internal fun checkEmptyDoubleBuffer(buffer: DoubleArray) = check(buffer.isNotEmp internal fun checkBufferShapeConsistency(shape: ShapeND, buffer: DoubleArray) = check(buffer.size == shape.linearSize) { - "Inconsistent shape ${shape} for buffer of size ${buffer.size} provided" + "Inconsistent shape $shape for buffer of size ${buffer.size} provided" } @PublishedApi @@ -62,7 +63,7 @@ internal fun DoubleTensorAlgebra.checkSymmetric( internal fun DoubleTensorAlgebra.checkPositiveDefinite(tensor: DoubleTensor, epsilon: Double = 1e-6) { checkSymmetric(tensor, epsilon) for (mat in tensor.matrixSequence()) - check(mat.asDoubleTensor().detLU().value() > 0.0) { - "Tensor contains matrices which are not positive definite ${mat.asDoubleTensor().detLU().value()}" + check(detLU(mat.asDoubleTensor()).value() > 0.0) { + "Tensor contains matrices which are not positive definite ${detLU(mat.asDoubleTensor()).value()}" } } \ No newline at end of file diff --git a/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/internal/linUtils.kt b/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/internal/linUtils.kt index 44459ad14..cf3697e76 100644 --- a/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/internal/linUtils.kt +++ b/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/internal/linUtils.kt @@ -227,7 +227,7 @@ internal fun DoubleTensorAlgebra.qrHelper( } } } - r[j, j] = DoubleTensorAlgebra { (v dot v).sqrt().value() } + r[j, j] = DoubleTensorAlgebra { sqrt((v dot v)).value() } for (i in 0 until n) { qM[i, j] = vv[i] / r[j, j] } @@ -250,7 +250,7 @@ internal fun DoubleTensorAlgebra.svd1d(a: DoubleTensor, epsilon: Double = 1e-10) while (true) { lastV = v v = b.dot(lastV) - val norm = DoubleTensorAlgebra { (v dot v).sqrt().value() } + val norm = DoubleTensorAlgebra { sqrt((v dot v)).value() } v = v.times(1.0 / norm) if (abs(v.dot(lastV).value()) > 1 - epsilon) { return v @@ -283,12 +283,12 @@ internal fun DoubleTensorAlgebra.svdHelper( if (n > m) { v = svd1d(a, epsilon) u = matrix.dot(v) - norm = DoubleTensorAlgebra { (u dot u).sqrt().value() } + norm = DoubleTensorAlgebra { sqrt((u dot u)).value() } u = u.times(1.0 / norm) } else { u = svd1d(a, epsilon) v = matrix.transposed(0, 1).dot(u) - norm = DoubleTensorAlgebra { (v dot v).sqrt().value() } + norm = DoubleTensorAlgebra { sqrt((v dot v)).value() } v = v.times(1.0 / norm) } diff --git a/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/tensorOps.kt b/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/tensorOps.kt new file mode 100644 index 000000000..e5dc55f68 --- /dev/null +++ b/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/tensorOps.kt @@ -0,0 +1,370 @@ +/* + * Copyright 2018-2022 KMath contributors. + * Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file. + */ + +package space.kscience.kmath.tensors.core + +import space.kscience.kmath.nd.* +import space.kscience.kmath.operations.covariance +import space.kscience.kmath.structures.Buffer +import space.kscience.kmath.structures.DoubleBuffer +import space.kscience.kmath.tensors.api.Tensor +import space.kscience.kmath.tensors.core.internal.* +import kotlin.math.min +import kotlin.math.sign + + +/** + * Returns a tensor of random numbers drawn from normal distributions with `0.0` mean and `1.0` standard deviation. + * + * @param shape the desired shape for the output tensor. + * @param seed the random seed of the pseudo-random number generator. + * @return tensor of a given shape filled with numbers from the normal distribution + * with `0.0` mean and `1.0` standard deviation. + */ +public fun DoubleTensorAlgebra.randomNormal(shape: ShapeND, seed: Long = 0): DoubleTensor = + fromBuffer(shape, DoubleBuffer.randomNormals(shape.linearSize, seed)) + +/** + * Returns a tensor with the same shape as `input` of random numbers drawn from normal distributions + * with `0.0` mean and `1.0` standard deviation. + * + * @receiver the `input`. + * @param seed the random seed of the pseudo-random number generator. + * @return a tensor with the same shape as `input` filled with numbers from the normal distribution + * with `0.0` mean and `1.0` standard deviation. + */ +public fun DoubleTensorAlgebra.randomNormalLike(structure: WithShape, seed: Long = 0): DoubleTensor = + DoubleTensor(structure.shape, DoubleBuffer.randomNormals(structure.shape.linearSize, seed)) + +/** + * Concatenates a sequence of tensors with equal shapes along the first dimension. + * + * @param tensors the [List] of tensors with same shapes to concatenate + * @return tensor with concatenation result + */ +public fun stack(tensors: List>): DoubleTensor { + check(tensors.isNotEmpty()) { "List must have at least 1 element" } + val shape = tensors[0].shape + check(tensors.all { it.shape contentEquals shape }) { "Tensors must have same shapes" } + val resShape = ShapeND(tensors.size) + shape +// val resBuffer: List = tensors.flatMap { +// it.asDoubleTensor().source.array.drop(it.asDoubleTensor().bufferStart) +// .take(it.asDoubleTensor().linearSize) +// } + val resBuffer = tensors.map { it.asDoubleTensor().source }.concat() + return DoubleTensor(resShape, resBuffer) +} + +/** + * Computes the LU factorization of a matrix or batches of matrices `input`. + * Returns a tuple containing the LU factorization and pivots of `input`. + * + * @param epsilon permissible error when comparing the determinant of a matrix with zero default is 1e-9 + * @return pair of `factorization` and `pivots`. + * The `factorization` has the shape ``(*, m, n)``, where``(*, m, n)`` is the shape of the `input` tensor. + * The `pivots` has the shape ``(∗, min(m, n))``. `pivots` stores all the intermediate transpositions of rows. + */ +public fun DoubleTensorAlgebra.luFactor( + structureND: StructureND, + epsilon: Double = 1e-9, +): Pair = + computeLU(structureND, epsilon) + ?: throw IllegalArgumentException("Tensor contains matrices which are singular at precision $epsilon") + + +/** + * Unpacks the data and pivots from a LU factorization of a tensor. + * Given a tensor [luTensor], return tensors `Triple(P, L, U)` satisfying `P dot luTensor = L dot U`, + * with `P` being a permutation matrix or batch of matrices, + * `L` being a lower triangular matrix or batch of matrices, + * `U` being an upper triangular matrix or batch of matrices. + * + * @param luTensor the packed LU factorization data + * @param pivotsTensor the packed LU factorization pivots + * @return triple of `P`, `L` and `U` tensors + */ +public fun DoubleTensorAlgebra.luPivot( + luTensor: StructureND, + pivotsTensor: Tensor, +): Triple { + checkSquareMatrix(luTensor.shape) + check( + luTensor.shape.first(luTensor.shape.size - 2) contentEquals pivotsTensor.shape.first(pivotsTensor.shape.size - 1) || + luTensor.shape.last() == pivotsTensor.shape.last() - 1 + ) { "Inappropriate shapes of input tensors" } + + val n = luTensor.shape.last() + val pTensor = zeroesLike(luTensor) + pTensor + .matrixSequence() + .zip(pivotsTensor.asIntTensor().vectorSequence()) + .forEach { (p, pivot) -> pivInit(p.asDoubleTensor2D(), pivot.as1D(), n) } + + val lTensor = zeroesLike(luTensor) + val uTensor = zeroesLike(luTensor) + + lTensor.matrixSequence() + .zip(uTensor.matrixSequence()) + .zip(luTensor.asDoubleTensor().matrixSequence()) + .forEach { (pairLU, lu) -> + val (l, u) = pairLU + luPivotHelper(l.asDoubleTensor2D(), u.asDoubleTensor2D(), lu.asDoubleTensor2D(), n) + } + + return Triple(pTensor, lTensor, uTensor) +} + + +/** + * LUP decomposition. + * + * Computes the LUP decomposition of a matrix or a batch of matrices. + * Given a tensor `input`, return tensors `Triple(P, L, U)` satisfying `P dot input == L dot U`, + * with `P` being a permutation matrix or batch of matrices, + * `L` being a lower triangular matrix or batch of matrices, + * `U` being an upper triangular matrix or batch of matrices. + * + * @param epsilon permissible error when comparing the determinant of a matrix with zero. + * @return triple of `P`, `L` and `U` tensors. + */ +public fun DoubleTensorAlgebra.lu( + structureND: StructureND, + epsilon: Double = 1e-9, +): Triple { + val (lu, pivots) = luFactor(structureND, epsilon) + return luPivot(lu, pivots) +} + + +/** + * QR decomposition. + * + * Computes the QR decomposition of a matrix or a batch of matrices, and returns a pair `Q to R` of tensors. + * Given a tensor `input`, return tensors `Q to R` satisfying `input == Q dot R`, + * with `Q` being an orthogonal matrix or batch of orthogonal matrices + * and `R` being an upper triangular matrix or batch of upper triangular matrices. + * + * @receiver the `input`. + * @param epsilon the permissible error when comparing tensors for equality. The default is 1e-6 + * Used when checking the positive definiteness of the input matrix or matrices. + * @return a pair of `Q` and `R` tensors. + */ +public fun DoubleTensorAlgebra.cholesky(structureND: StructureND, epsilon: Double = 1e-6): DoubleTensor { + checkSquareMatrix(structureND.shape) + checkPositiveDefinite(structureND.asDoubleTensor(), epsilon) + + val n = structureND.shape.last() + val lTensor = zeroesLike(structureND) + + for ((a, l) in structureND.asDoubleTensor().matrixSequence().zip(lTensor.matrixSequence())) + for (i in 0 until n) choleskyHelper(a.asDoubleTensor2D(), l.asDoubleTensor2D(), n) + + return lTensor +} + + +/** + * Singular Value Decomposition. + * + * Computes the singular value decomposition of either a matrix or batch of matrices `input`. + * The singular value decomposition is represented as a triple `Triple(U, S, V)`, + * such that `input == U dot diagonalEmbedding(S) dot V.transpose()`. + * If `input` is a batch of tensors, then U, S, and Vh are also batched with the same batch dimensions as `input. + * + * @receiver the `input`. + * @param epsilon permissible error when calculating the dot product of vectors + * i.e., the precision with which the cosine approaches 1 in an iterative algorithm. + * @return a triple `Triple(U, S, V)`. + */ +public fun DoubleTensorAlgebra.svd( + structureND: StructureND, + epsilon: Double, +): Triple, StructureND, StructureND> { + val size = structureND.dimension + val commonShape = structureND.shape.slice(0 until size - 2) + val (n, m) = structureND.shape.slice(size - 2 until size) + val uTensor = zeros(commonShape + ShapeND(min(n, m), n)) + val sTensor = zeros(commonShape + ShapeND(min(n, m))) + val vTensor = zeros(commonShape + ShapeND(min(n, m), m)) + + val matrices = structureND.asDoubleTensor().matrices + val uTensors = uTensor.matrices + val sTensorVectors = sTensor.vectors + val vTensors = vTensor.matrices + + for (index in matrices.indices) { + val matrix = matrices[index] + val usv = Triple( + uTensors[index], + sTensorVectors[index], + vTensors[index] + ) + val matrixSize = matrix.shape.linearSize + val curMatrix = DoubleTensor( + matrix.shape, + matrix.source.view(0, matrixSize) + ) + svdHelper(curMatrix, usv, m, n, epsilon) + } + + return Triple(uTensor.transposed(), sTensor, vTensor.transposed()) +} + +/** + * Returns eigenvalues and eigenvectors of a real symmetric matrix input or a batch of real symmetric matrices, + * represented by a pair `eigenvalues to eigenvectors`. + * + * @param epsilon the permissible error when comparing tensors for equality + * and when the cosine approaches 1 in the SVD algorithm. + * @return a pair `eigenvalues to eigenvectors`. + */ +public fun DoubleTensorAlgebra.symEigSvd( + structureND: StructureND, + epsilon: Double, +): Pair> { + //TODO optimize conversion + checkSymmetric(structureND.asDoubleTensor(), epsilon) + + fun MutableStructure2D.cleanSym(n: Int) { + for (i in 0 until n) { + for (j in 0 until n) { + if (i == j) { + this[i, j] = sign(this[i, j]) + } else { + this[i, j] = 0.0 + } + } + } + } + + val (u, s, v) = svd(structureND, epsilon) + val shp = s.shape + intArrayOf(1) + val utv = u.transposed() matmul v + val n = s.shape.last() + for (matrix in utv.matrixSequence()) { + matrix.asDoubleTensor2D().cleanSym(n) + } + + val eig = (utv dot s.asDoubleTensor().view(shp)).view(s.shape) + return eig to v +} + +public fun DoubleTensorAlgebra.symEigJacobi( + structureND: StructureND, + maxIteration: Int, + epsilon: Double, +): Pair { + //TODO optimize conversion + checkSymmetric(structureND.asDoubleTensor(), epsilon) + + val size = structureND.dimension + val eigenvectors = zeros(structureND.shape) + val eigenvalues = zeros(structureND.shape.slice(0 until size - 1)) + + var eigenvalueStart = 0 + var eigenvectorStart = 0 + for (matrix in structureND.asDoubleTensor().matrixSequence()) { + val matrix2D = matrix.asDoubleTensor2D() + val (d, v) = matrix2D.jacobiHelper(maxIteration, epsilon) + + for (i in 0 until matrix2D.rowNum) { + for (j in 0 until matrix2D.colNum) { + eigenvectors.source[eigenvectorStart + i * matrix2D.rowNum + j] = v[i, j] + } + } + + for (i in 0 until matrix2D.rowNum) { + eigenvalues.source[eigenvalueStart + i] = d[i] + } + + eigenvalueStart += structureND.shape.last() + eigenvectorStart += structureND.shape.last() * structureND.shape.last() + } + + return eigenvalues to eigenvectors +} + +/** + * Computes the determinant of a square matrix input, or of each square matrix in a batched input + * using LU factorization algorithm. + * + * @param epsilon the error in the LU algorithm—permissible error when comparing the determinant of a matrix + * with zero. + * @return the determinant. + */ +public fun DoubleTensorAlgebra.detLU(structureND: StructureND, epsilon: Double = 1e-9): DoubleTensor { + checkSquareMatrix(structureND.shape) + //TODO check for unnecessary copies + val luTensor = structureND.copyToTensor() + val pivotsTensor = structureND.setUpPivots() + + val n = structureND.shape.size + + val detTensorShape = ShapeND(IntArray(n - 1) { i -> structureND.shape[i] }.apply { + set(n - 2, 1) + }) + + val resBuffer = DoubleBuffer(detTensorShape.linearSize) { 0.0 } + + val detTensor = DoubleTensor( + detTensorShape, + resBuffer + ) + + luTensor.matrixSequence().zip(pivotsTensor.vectorSequence()).forEachIndexed { index, (lu, pivots) -> + resBuffer[index] = if (luHelper(lu.asDoubleTensor2D(), pivots.as1D(), epsilon)) + 0.0 else luMatrixDet(lu.asDoubleTensor2D(), pivots.as1D()) + } + + return detTensor +} + +/** + * Computes the multiplicative inverse matrix of a square matrix input, or of each square matrix in a batched input + * using LU factorization algorithm. + * Given a square matrix `a`, return the matrix `aInv` satisfying + * `a dot aInv == aInv dot a == eye(a.shape[0])`. + * + * @param epsilon error in the LU algorithm—permissible error when comparing the determinant of a matrix with zero + * @return the multiplicative inverse of a matrix. + */ +public fun DoubleTensorAlgebra.invLU(structureND: StructureND, epsilon: Double = 1e-9): DoubleTensor { + val (luTensor, pivotsTensor) = luFactor(structureND, epsilon) + val invTensor = zeroesLike(luTensor) + + //TODO replace sequence with a cycle + val seq = luTensor.matrixSequence().zip(pivotsTensor.vectorSequence()).zip(invTensor.matrixSequence()) + for ((luP, invMatrix) in seq) { + val (lu, pivots) = luP + luMatrixInv(lu.asDoubleTensor2D(), pivots.as1D(), invMatrix.asDoubleTensor2D()) + } + + return invTensor +} + +/** + * Returns the covariance matrix `M` of given vectors. + * + * `M[i, j]` contains covariance of `i`-th and `j`-th given vectors + * + * @param vectors the [List] of 1-dimensional tensors with same shape + * @return `M`. + */ +public fun DoubleTensorAlgebra.covariance(vectors: List>): DoubleTensor { + check(vectors.isNotEmpty()) { "List must have at least 1 element" } + val n = vectors.size + val m = vectors[0].size + check(vectors.all { it.size == m }) { "Vectors must have same shapes" } + val resTensor = DoubleTensor( + ShapeND(n, n), + DoubleBuffer(n * n) { 0.0 } + ) + for (i in 0 until n) { + for (j in 0 until n) { + resTensor[intArrayOf(i, j)] = bufferAlgebra.covariance(vectors[i], vectors[j]) + } + } + return resTensor +} \ No newline at end of file diff --git a/kmath-tensors/src/commonTest/kotlin/space/kscience/kmath/tensors/core/TestDoubleAnalyticTensorAlgebra.kt b/kmath-tensors/src/commonTest/kotlin/space/kscience/kmath/tensors/core/TestDoubleAnalyticTensorAlgebra.kt index 2ef2d87d6..e4c2c40ea 100644 --- a/kmath-tensors/src/commonTest/kotlin/space/kscience/kmath/tensors/core/TestDoubleAnalyticTensorAlgebra.kt +++ b/kmath-tensors/src/commonTest/kotlin/space/kscience/kmath/tensors/core/TestDoubleAnalyticTensorAlgebra.kt @@ -34,73 +34,73 @@ internal class TestDoubleAnalyticTensorAlgebra { @Test fun testExp() = DoubleTensorAlgebra { - assertTrue { tensor.exp() eq expectedTensor(::exp) } + assertTrue { exp(tensor) eq expectedTensor(::exp) } } @Test fun testLog() = DoubleTensorAlgebra { - assertTrue { tensor.ln() eq expectedTensor(::ln) } + assertTrue { ln(tensor) eq expectedTensor(::ln) } } @Test fun testSqrt() = DoubleTensorAlgebra { - assertTrue { tensor.sqrt() eq expectedTensor(::sqrt) } + assertTrue { sqrt(tensor) eq expectedTensor(::sqrt) } } @Test fun testCos() = DoubleTensorAlgebra { - assertTrue { tensor.cos() eq expectedTensor(::cos) } + assertTrue { cos(tensor) eq expectedTensor(::cos) } } @Test fun testCosh() = DoubleTensorAlgebra { - assertTrue { tensor.cosh() eq expectedTensor(::cosh) } + assertTrue { cosh(tensor) eq expectedTensor(::cosh) } } @Test fun testAcosh() = DoubleTensorAlgebra { - assertTrue { tensor.acosh() eq expectedTensor(::acosh) } + assertTrue { acosh(tensor) eq expectedTensor(::acosh) } } @Test fun testSin() = DoubleTensorAlgebra { - assertTrue { tensor.sin() eq expectedTensor(::sin) } + assertTrue { sin(tensor) eq expectedTensor(::sin) } } @Test fun testSinh() = DoubleTensorAlgebra { - assertTrue { tensor.sinh() eq expectedTensor(::sinh) } + assertTrue { sinh(tensor) eq expectedTensor(::sinh) } } @Test fun testAsinh() = DoubleTensorAlgebra { - assertTrue { tensor.asinh() eq expectedTensor(::asinh) } + assertTrue { asinh(tensor) eq expectedTensor(::asinh) } } @Test fun testTan() = DoubleTensorAlgebra { - assertTrue { tensor.tan() eq expectedTensor(::tan) } + assertTrue { tan(tensor) eq expectedTensor(::tan) } } @Test fun testAtan() = DoubleTensorAlgebra { - assertTrue { tensor.atan() eq expectedTensor(::atan) } + assertTrue { atan(tensor) eq expectedTensor(::atan) } } @Test fun testTanh() = DoubleTensorAlgebra { - assertTrue { tensor.tanh() eq expectedTensor(::tanh) } + assertTrue { tanh(tensor) eq expectedTensor(::tanh) } } @Test fun testCeil() = DoubleTensorAlgebra { - assertTrue { tensor.ceil() eq expectedTensor(::ceil) } + assertTrue { ceil(tensor) eq expectedTensor(::ceil) } } @Test fun testFloor() = DoubleTensorAlgebra { - assertTrue { tensor.floor() eq expectedTensor(::floor) } + assertTrue { floor(tensor) eq expectedTensor(::floor) } } val shape2 = ShapeND(2, 2) @@ -163,15 +163,15 @@ internal class TestDoubleAnalyticTensorAlgebra { @Test fun testMean() = DoubleTensorAlgebra { - assertTrue { tensor2.mean() == 1.0 } + assertTrue { mean(tensor2) == 1.0 } assertTrue { - tensor2.mean(0, true) eq fromArray( + mean(tensor2, 0, true) eq fromArray( ShapeND(1, 2), doubleArrayOf(-1.0, 3.0) ) } assertTrue { - tensor2.mean(1, false) eq fromArray( + mean(tensor2, 1, false) eq fromArray( ShapeND(2), doubleArrayOf(1.5, 0.5) ) diff --git a/kmath-tensors/src/commonTest/kotlin/space/kscience/kmath/tensors/core/TestDoubleLinearOpsAlgebra.kt b/kmath-tensors/src/commonTest/kotlin/space/kscience/kmath/tensors/core/TestDoubleLinearOpsAlgebra.kt index d47af4a64..cbd7b9887 100644 --- a/kmath-tensors/src/commonTest/kotlin/space/kscience/kmath/tensors/core/TestDoubleLinearOpsAlgebra.kt +++ b/kmath-tensors/src/commonTest/kotlin/space/kscience/kmath/tensors/core/TestDoubleLinearOpsAlgebra.kt @@ -35,7 +35,7 @@ internal class TestDoubleLinearOpsTensorAlgebra { -7.0 ) ) - val detTensor = tensor.detLU() + val detTensor = detLU(tensor) assertTrue(detTensor.eq(expectedTensor)) @@ -88,7 +88,7 @@ internal class TestDoubleLinearOpsTensorAlgebra { ) ) - val invTensor = tensor.invLU() + val invTensor = invLU(tensor) assertTrue(invTensor.eq(expectedTensor)) } @@ -111,7 +111,7 @@ internal class TestDoubleLinearOpsTensorAlgebra { val tensor = fromArray(shape, buffer) - val (q, r) = tensor.qr() + val (q, r) = qr(tensor) assertTrue { q.shape contentEquals shape } assertTrue { r.shape contentEquals shape } @@ -131,7 +131,7 @@ internal class TestDoubleLinearOpsTensorAlgebra { ) val tensor = fromArray(shape, buffer) - val (p, l, u) = tensor.lu() + val (p, l, u) = lu(tensor) assertTrue { p.shape contentEquals shape } assertTrue { l.shape contentEquals shape } @@ -146,7 +146,7 @@ internal class TestDoubleLinearOpsTensorAlgebra { val sigma = (tensor matmul tensor.transposed()) + diagonalEmbedding( fromArray(ShapeND(2, 5), DoubleArray(10) { 0.1 }) ) - val low = sigma.cholesky() + val low = cholesky(sigma) val sigmChol = low matmul low.transposed() assertTrue(sigma.eq(sigmChol)) } @@ -171,7 +171,7 @@ internal class TestDoubleLinearOpsTensorAlgebra { @Test fun testBatchedSVD() = DoubleTensorAlgebra { val tensor = randomNormal(ShapeND(2, 5, 3), 0) - val (tensorU, tensorS, tensorV) = tensor.svd() + val (tensorU, tensorS, tensorV) = svd(tensor) val tensorSVD = tensorU matmul (diagonalEmbedding(tensorS) matmul tensorV.transposed()) assertTrue(tensor.eq(tensorSVD)) } @@ -180,7 +180,7 @@ internal class TestDoubleLinearOpsTensorAlgebra { fun testBatchedSymEig() = DoubleTensorAlgebra { val tensor = randomNormal(shape = ShapeND(2, 3, 3), 0) val tensorSigma = tensor + tensor.transposed() - val (tensorS, tensorV) = tensorSigma.symEig() + val (tensorS, tensorV) = symEig(tensorSigma) val tensorSigmaCalc = tensorV matmul (diagonalEmbedding(tensorS) matmul tensorV.transposed()) assertTrue(tensorSigma.eq(tensorSigmaCalc)) } @@ -190,7 +190,7 @@ internal class TestDoubleLinearOpsTensorAlgebra { private fun DoubleTensorAlgebra.testSVDFor(tensor: DoubleTensor, epsilon: Double = 1e-10) { - val svd = tensor.svd() + val svd = svd(tensor) val tensorSVD = svd.first .dot(