diff --git a/examples/src/main/kotlin/space/kscience/kmath/tensors/DataSetNormalization.kt b/examples/src/main/kotlin/space/kscience/kmath/tensors/DataSetNormalization.kt index 4d53d940b..d029348f2 100644 --- a/examples/src/main/kotlin/space/kscience/kmath/tensors/DataSetNormalization.kt +++ b/examples/src/main/kotlin/space/kscience/kmath/tensors/DataSetNormalization.kt @@ -7,22 +7,22 @@ package space.kscience.kmath.tensors import space.kscience.kmath.operations.invoke import space.kscience.kmath.tensors.core.algebras.BroadcastDoubleTensorAlgebra -import space.kscience.kmath.tensors.core.algebras.DoubleAnalyticTensorAlgebra + // Dataset normalization fun main() { - // work in context with analytic methods - DoubleAnalyticTensorAlgebra { + // work in context with broadcast methods + BroadcastDoubleTensorAlgebra { // take dataset of 5-element vectors from normal distribution val dataset = randomNormal(intArrayOf(100, 5)) * 1.5 // all elements from N(0, 1.5) - BroadcastDoubleTensorAlgebra { - dataset += fromArray( - intArrayOf(5), - doubleArrayOf(0.0, 1.0, 1.5, 3.0, 5.0) // rows means - ) - } + + dataset += fromArray( + intArrayOf(5), + doubleArrayOf(0.0, 1.0, 1.5, 3.0, 5.0) // rows means + ) + // find out mean and standard deviation of each column val mean = dataset.mean(0, false) @@ -36,7 +36,7 @@ fun main() { println("Maximum:\n${dataset.max(0, false)}") // now we can scale dataset with mean normalization - val datasetScaled = BroadcastDoubleTensorAlgebra { (dataset - mean) / std } + val datasetScaled = (dataset - mean) / std // find out mean and std of scaled dataset diff --git a/examples/src/main/kotlin/space/kscience/kmath/tensors/LinearSystemSolvingWithLUP.kt b/examples/src/main/kotlin/space/kscience/kmath/tensors/LinearSystemSolvingWithLUP.kt index bd8233ccc..c0ece04ca 100644 --- a/examples/src/main/kotlin/space/kscience/kmath/tensors/LinearSystemSolvingWithLUP.kt +++ b/examples/src/main/kotlin/space/kscience/kmath/tensors/LinearSystemSolvingWithLUP.kt @@ -7,14 +7,14 @@ package space.kscience.kmath.tensors import space.kscience.kmath.operations.invoke import space.kscience.kmath.tensors.core.DoubleTensor -import space.kscience.kmath.tensors.core.algebras.DoubleLinearOpsTensorAlgebra +import space.kscience.kmath.tensors.core.algebras.BroadcastDoubleTensorAlgebra // solving linear system with LUP decomposition fun main () { // work in context with linear operations - DoubleLinearOpsTensorAlgebra { + BroadcastDoubleTensorAlgebra { // set true value of x val trueX = fromArray( diff --git a/examples/src/main/kotlin/space/kscience/kmath/tensors/NeuralNetwork.kt b/examples/src/main/kotlin/space/kscience/kmath/tensors/NeuralNetwork.kt index ea863988c..1998b8d16 100644 --- a/examples/src/main/kotlin/space/kscience/kmath/tensors/NeuralNetwork.kt +++ b/examples/src/main/kotlin/space/kscience/kmath/tensors/NeuralNetwork.kt @@ -8,7 +8,6 @@ package space.kscience.kmath.tensors import space.kscience.kmath.operations.invoke import space.kscience.kmath.tensors.core.DoubleTensor import space.kscience.kmath.tensors.core.algebras.BroadcastDoubleTensorAlgebra -import space.kscience.kmath.tensors.core.algebras.DoubleAnalyticTensorAlgebra import space.kscience.kmath.tensors.core.algebras.DoubleTensorAlgebra import space.kscience.kmath.tensors.core.toDoubleArray import kotlin.math.sqrt @@ -48,7 +47,7 @@ fun reluDer(x: DoubleTensor): DoubleTensor = DoubleTensorAlgebra { // activation layer with relu activator class ReLU : Activation(::relu, ::reluDer) -fun sigmoid(x: DoubleTensor): DoubleTensor = DoubleAnalyticTensorAlgebra { +fun sigmoid(x: DoubleTensor): DoubleTensor = DoubleTensorAlgebra { 1.0 / (1.0 + (-x).exp()) } @@ -83,9 +82,7 @@ class Dense( val gradInput = outputError dot weights.transpose() val gradW = input.transpose() dot outputError - val gradBias = DoubleAnalyticTensorAlgebra { - outputError.mean(dim = 0, keepDim = false) * input.shape[0].toDouble() - } + val gradBias = outputError.mean(dim = 0, keepDim = false) * input.shape[0].toDouble() weights -= learningRate * gradW bias -= learningRate * gradBias @@ -110,7 +107,7 @@ fun accuracy(yPred: DoubleTensor, yTrue: DoubleTensor): Double { // neural network class class NeuralNetwork(private val layers: List) { - private fun softMaxLoss(yPred: DoubleTensor, yTrue: DoubleTensor): DoubleTensor = DoubleAnalyticTensorAlgebra { + private fun softMaxLoss(yPred: DoubleTensor, yTrue: DoubleTensor): DoubleTensor = BroadcastDoubleTensorAlgebra { val onesForAnswers = yPred.zeroesLike() yTrue.toDoubleArray().forEachIndexed { index, labelDouble -> @@ -118,7 +115,7 @@ class NeuralNetwork(private val layers: List) { onesForAnswers[intArrayOf(index, label)] = 1.0 } - val softmaxValue = BroadcastDoubleTensorAlgebra { yPred.exp() / yPred.exp().sum(dim = 1, keepDim = true) } + val softmaxValue = yPred.exp() / yPred.exp().sum(dim = 1, keepDim = true) (-onesForAnswers + softmaxValue) / (yPred.shape[0].toDouble()) } @@ -177,10 +174,9 @@ class NeuralNetwork(private val layers: List) { } - @OptIn(ExperimentalStdlibApi::class) fun main() { - DoubleTensorAlgebra { + BroadcastDoubleTensorAlgebra { val features = 5 val sampleSize = 250 val trainSize = 180 @@ -188,12 +184,12 @@ fun main() { // take sample of features from normal distribution val x = randomNormal(intArrayOf(sampleSize, features), seed) * 2.5 - BroadcastDoubleTensorAlgebra { - x += fromArray( - intArrayOf(5), - doubleArrayOf(0.0, -1.0, -2.5, -3.0, 5.5) // rows means - ) - } + + x += fromArray( + intArrayOf(5), + doubleArrayOf(0.0, -1.0, -2.5, -3.0, 5.5) // rows means + ) + // define class like '1' if the sum of features > 0 and '0' otherwise val y = fromArray( diff --git a/examples/src/main/kotlin/space/kscience/kmath/tensors/OLSWithSVD.kt b/examples/src/main/kotlin/space/kscience/kmath/tensors/OLSWithSVD.kt index 435af35f6..497f63b41 100644 --- a/examples/src/main/kotlin/space/kscience/kmath/tensors/OLSWithSVD.kt +++ b/examples/src/main/kotlin/space/kscience/kmath/tensors/OLSWithSVD.kt @@ -7,8 +7,7 @@ package space.kscience.kmath.tensors import space.kscience.kmath.operations.invoke import space.kscience.kmath.tensors.core.DoubleTensor -import space.kscience.kmath.tensors.core.algebras.DoubleAnalyticTensorAlgebra -import space.kscience.kmath.tensors.core.algebras.DoubleLinearOpsTensorAlgebra +import space.kscience.kmath.tensors.core.algebras.DoubleTensorAlgebra import kotlin.math.abs @@ -19,7 +18,7 @@ fun main() { val randSeed = 100500L // work in context with linear operations - DoubleLinearOpsTensorAlgebra { + DoubleTensorAlgebra { // take coefficient vector from normal distribution val alpha = randomNormal( intArrayOf(5), @@ -56,12 +55,12 @@ fun main() { "$alphaOLS") // figure out MSE of approximation - fun mse(yTrue: DoubleTensor, yPred: DoubleTensor): Double = DoubleAnalyticTensorAlgebra{ + fun mse(yTrue: DoubleTensor, yPred: DoubleTensor): Double { require(yTrue.shape.size == 1) require(yTrue.shape contentEquals yPred.shape) val diff = yTrue - yPred - diff.dot(diff).sqrt().value() + return diff.dot(diff).sqrt().value() } println("MSE: ${mse(alpha, alphaOLS)}") diff --git a/examples/src/main/kotlin/space/kscience/kmath/tensors/PCA.kt b/examples/src/main/kotlin/space/kscience/kmath/tensors/PCA.kt index ee25b63a3..d29dbb094 100644 --- a/examples/src/main/kotlin/space/kscience/kmath/tensors/PCA.kt +++ b/examples/src/main/kotlin/space/kscience/kmath/tensors/PCA.kt @@ -7,9 +7,6 @@ package space.kscience.kmath.tensors import space.kscience.kmath.operations.invoke import space.kscience.kmath.tensors.core.algebras.BroadcastDoubleTensorAlgebra -import space.kscience.kmath.tensors.core.algebras.DoubleAnalyticTensorAlgebra -import space.kscience.kmath.tensors.core.algebras.DoubleLinearOpsTensorAlgebra - // simple PCA @@ -17,8 +14,8 @@ import space.kscience.kmath.tensors.core.algebras.DoubleLinearOpsTensorAlgebra fun main(){ val seed = 100500L - // work in context with analytic methods - DoubleAnalyticTensorAlgebra { + // work in context with broadcast methods + BroadcastDoubleTensorAlgebra { // assume x is range from 0 until 10 val x = fromArray( @@ -63,7 +60,7 @@ fun main(){ println("Covariance matrix:\n$covMatrix") // and find out eigenvector of it - val (_, evecs) = DoubleLinearOpsTensorAlgebra {covMatrix.symEig()} + val (_, evecs) = covMatrix.symEig() val v = evecs[0] println("Eigenvector:\n$v") @@ -74,7 +71,7 @@ fun main(){ // we can restore original data from reduced data. // for example, find 7th element of dataset val n = 7 - val restored = BroadcastDoubleTensorAlgebra{(datasetReduced[n] dot v.view(intArrayOf(1, 2))) * std + mean} + val restored = (datasetReduced[n] dot v.view(intArrayOf(1, 2))) * std + mean println("Original value:\n${dataset[n]}") println("Restored value:\n$restored") } diff --git a/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/api/AnalyticTensorAlgebra.kt b/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/api/AnalyticTensorAlgebra.kt index 69e88c28f..1db986e77 100644 --- a/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/api/AnalyticTensorAlgebra.kt +++ b/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/api/AnalyticTensorAlgebra.kt @@ -11,8 +11,7 @@ package space.kscience.kmath.tensors.api * * @param T the type of items closed under analytic functions in the tensors. */ -public interface AnalyticTensorAlgebra : - TensorPartialDivisionAlgebra { +public interface AnalyticTensorAlgebra : TensorPartialDivisionAlgebra { /** * @return the mean of all elements in the input tensor. diff --git a/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/api/LinearOpsTensorAlgebra.kt b/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/api/LinearOpsTensorAlgebra.kt index 4a325ab4e..6bdecfa85 100644 --- a/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/api/LinearOpsTensorAlgebra.kt +++ b/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/api/LinearOpsTensorAlgebra.kt @@ -10,8 +10,7 @@ package space.kscience.kmath.tensors.api * * @param T the type of items closed under division in the tensors. */ -public interface LinearOpsTensorAlgebra : - TensorPartialDivisionAlgebra { +public interface LinearOpsTensorAlgebra : TensorPartialDivisionAlgebra { /** * Computes the determinant of a square matrix input, or of each square matrix in a batched input. diff --git a/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/api/TensorPartialDivisionAlgebra.kt b/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/api/TensorPartialDivisionAlgebra.kt index 921157963..02bf5415d 100644 --- a/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/api/TensorPartialDivisionAlgebra.kt +++ b/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/api/TensorPartialDivisionAlgebra.kt @@ -11,8 +11,7 @@ package space.kscience.kmath.tensors.api * * @param T the type of items closed under division in the tensors. */ -public interface TensorPartialDivisionAlgebra : - TensorAlgebra { +public interface TensorPartialDivisionAlgebra : TensorAlgebra { /** * Each element of the tensor [other] is divided by this value. diff --git a/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/DoubleTensor.kt b/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/DoubleTensor.kt index e3143f5a7..41df50cba 100644 --- a/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/DoubleTensor.kt +++ b/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/DoubleTensor.kt @@ -6,6 +6,7 @@ package space.kscience.kmath.tensors.core import space.kscience.kmath.structures.DoubleBuffer +import space.kscience.kmath.tensors.core.internal.toPrettyString /** * Default [BufferedTensor] implementation for [Double] values diff --git a/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/algebras/BroadcastDoubleTensorAlgebra.kt b/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/algebras/BroadcastDoubleTensorAlgebra.kt index 873ec9027..bc7d90c28 100644 --- a/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/algebras/BroadcastDoubleTensorAlgebra.kt +++ b/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/algebras/BroadcastDoubleTensorAlgebra.kt @@ -7,8 +7,10 @@ package space.kscience.kmath.tensors.core.algebras import space.kscience.kmath.tensors.api.Tensor import space.kscience.kmath.tensors.core.* -import space.kscience.kmath.tensors.core.broadcastTensors -import space.kscience.kmath.tensors.core.broadcastTo +import space.kscience.kmath.tensors.core.internal.array +import space.kscience.kmath.tensors.core.internal.broadcastTensors +import space.kscience.kmath.tensors.core.internal.broadcastTo +import space.kscience.kmath.tensors.core.internal.tensor /** * Basic linear algebra operations implemented with broadcasting. diff --git a/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/algebras/DoubleAnalyticTensorAlgebra.kt b/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/algebras/DoubleAnalyticTensorAlgebra.kt deleted file mode 100644 index 23a2fa282..000000000 --- a/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/algebras/DoubleAnalyticTensorAlgebra.kt +++ /dev/null @@ -1,116 +0,0 @@ -/* - * Copyright 2018-2021 KMath contributors. - * Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file. - */ - -package space.kscience.kmath.tensors.core.algebras - -import space.kscience.kmath.tensors.api.AnalyticTensorAlgebra -import space.kscience.kmath.tensors.api.Tensor -import space.kscience.kmath.tensors.core.DoubleTensor -import space.kscience.kmath.tensors.core.tensor -import kotlin.math.* - -public object DoubleAnalyticTensorAlgebra : - AnalyticTensorAlgebra, - DoubleTensorAlgebra() { - - override fun Tensor.mean(): Double = this.fold { it.sum() / tensor.numElements } - - override fun Tensor.mean(dim: Int, keepDim: Boolean): DoubleTensor = - foldDim( - { arr -> - check(dim < dimension) { "Dimension $dim out of range $dimension" } - arr.sum() / shape[dim] - }, - dim, - keepDim - ) - - override fun Tensor.std(): Double = this.fold { arr -> - val mean = arr.sum() / tensor.numElements - sqrt(arr.sumOf { (it - mean) * (it - mean) } / (tensor.numElements - 1)) - } - - override fun Tensor.std(dim: Int, keepDim: Boolean): DoubleTensor = foldDim( - { arr -> - check(dim < dimension) { "Dimension $dim out of range $dimension" } - val mean = arr.sum() / shape[dim] - sqrt(arr.sumOf { (it - mean) * (it - mean) } / (shape[dim] - 1)) - }, - dim, - keepDim - ) - - override fun Tensor.variance(): Double = this.fold { arr -> - val mean = arr.sum() / tensor.numElements - arr.sumOf { (it - mean) * (it - mean) } / (tensor.numElements - 1) - } - - override fun Tensor.variance(dim: Int, keepDim: Boolean): DoubleTensor = foldDim( - { arr -> - check(dim < dimension) { "Dimension $dim out of range $dimension" } - val mean = arr.sum() / shape[dim] - arr.sumOf { (it - mean) * (it - mean) } / (shape[dim] - 1) - }, - dim, - keepDim - ) - - private fun cov(x: DoubleTensor, y:DoubleTensor): Double{ - val n = x.shape[0] - return ((x - x.mean()) * (y - y.mean())).mean() * n / (n - 1) - } - - override fun cov(tensors: List>): DoubleTensor { - check(tensors.isNotEmpty()) { "List must have at least 1 element" } - val n = tensors.size - val m = tensors[0].shape[0] - check(tensors.all { it.shape contentEquals intArrayOf(m) }) { "Tensors must have same shapes" } - val resTensor = DoubleTensor( - intArrayOf(n, n), - DoubleArray(n * n) {0.0} - ) - for (i in 0 until n){ - for (j in 0 until n){ - resTensor[intArrayOf(i, j)] = cov(tensors[i].tensor, tensors[j].tensor) - } - } - return resTensor - } - - override fun Tensor.exp(): DoubleTensor = tensor.map(::exp) - - override fun Tensor.ln(): DoubleTensor = tensor.map(::ln) - - override fun Tensor.sqrt(): DoubleTensor = tensor.map(::sqrt) - - override fun Tensor.cos(): DoubleTensor = tensor.map(::cos) - - override fun Tensor.acos(): DoubleTensor = tensor.map(::acos) - - override fun Tensor.cosh(): DoubleTensor = tensor.map(::cosh) - - override fun Tensor.acosh(): DoubleTensor = tensor.map(::acosh) - - override fun Tensor.sin(): DoubleTensor = tensor.map(::sin) - - override fun Tensor.asin(): DoubleTensor = tensor.map(::asin) - - override fun Tensor.sinh(): DoubleTensor = tensor.map(::sinh) - - override fun Tensor.asinh(): DoubleTensor = tensor.map(::asinh) - - override fun Tensor.tan(): DoubleTensor = tensor.map(::tan) - - override fun Tensor.atan(): DoubleTensor = tensor.map(::atan) - - override fun Tensor.tanh(): DoubleTensor = tensor.map(::tanh) - - override fun Tensor.atanh(): DoubleTensor = tensor.map(::atanh) - - override fun Tensor.ceil(): DoubleTensor = tensor.map(::ceil) - - override fun Tensor.floor(): DoubleTensor = tensor.map(::floor) - -} diff --git a/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/algebras/DoubleLinearOpsTensorAlgebra.kt b/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/algebras/DoubleLinearOpsTensorAlgebra.kt deleted file mode 100644 index 430482613..000000000 --- a/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/algebras/DoubleLinearOpsTensorAlgebra.kt +++ /dev/null @@ -1,278 +0,0 @@ -/* - * Copyright 2018-2021 KMath contributors. - * Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file. - */ - -package space.kscience.kmath.tensors.core.algebras - -import space.kscience.kmath.tensors.api.LinearOpsTensorAlgebra -import space.kscience.kmath.nd.as1D -import space.kscience.kmath.nd.as2D -import space.kscience.kmath.tensors.api.Tensor -import space.kscience.kmath.tensors.core.* -import space.kscience.kmath.tensors.core.checkSquareMatrix -import space.kscience.kmath.tensors.core.choleskyHelper -import space.kscience.kmath.tensors.core.cleanSymHelper -import space.kscience.kmath.tensors.core.luHelper -import space.kscience.kmath.tensors.core.luMatrixDet -import space.kscience.kmath.tensors.core.luMatrixInv -import space.kscience.kmath.tensors.core.luPivotHelper -import space.kscience.kmath.tensors.core.pivInit -import kotlin.math.min - -/** - * Implementation of common linear algebra operations on double numbers. - * Implements the LinearOpsTensorAlgebra interface. - */ -public object DoubleLinearOpsTensorAlgebra : - LinearOpsTensorAlgebra, - DoubleTensorAlgebra() { - - override fun Tensor.inv(): DoubleTensor = invLU(1e-9) - - override fun Tensor.det(): DoubleTensor = detLU(1e-9) - - /** - * Computes the LU factorization of a matrix or batches of matrices `input`. - * Returns a tuple containing the LU factorization and pivots of `input`. - * - * @param epsilon permissible error when comparing the determinant of a matrix with zero - * @return pair of `factorization` and `pivots`. - * The `factorization` has the shape ``(*, m, n)``, where``(*, m, n)`` is the shape of the `input` tensor. - * The `pivots` has the shape ``(∗, min(m, n))``. `pivots` stores all the intermediate transpositions of rows. - */ - public fun Tensor.luFactor(epsilon: Double): Pair = - computeLU(tensor, epsilon) - ?: throw IllegalArgumentException("Tensor contains matrices which are singular at precision $epsilon") - - /** - * Computes the LU factorization of a matrix or batches of matrices `input`. - * Returns a tuple containing the LU factorization and pivots of `input`. - * Uses an error of ``1e-9`` when calculating whether a matrix is degenerate. - * - * @return pair of `factorization` and `pivots`. - * The `factorization` has the shape ``(*, m, n)``, where``(*, m, n)`` is the shape of the `input` tensor. - * The `pivots` has the shape ``(∗, min(m, n))``. `pivots` stores all the intermediate transpositions of rows. - */ - public fun Tensor.luFactor(): Pair = luFactor(1e-9) - - /** - * Unpacks the data and pivots from a LU factorization of a tensor. - * Given a tensor [luTensor], return tensors (P, L, U) satisfying ``P * luTensor = L * U``, - * with `P` being a permutation matrix or batch of matrices, - * `L` being a lower triangular matrix or batch of matrices, - * `U` being an upper triangular matrix or batch of matrices. - * - * @param luTensor the packed LU factorization data - * @param pivotsTensor the packed LU factorization pivots - * @return triple of P, L and U tensors - */ - public fun luPivot( - luTensor: Tensor, - pivotsTensor: Tensor - ): Triple { - checkSquareMatrix(luTensor.shape) - check( - luTensor.shape.dropLast(2).toIntArray() contentEquals pivotsTensor.shape.dropLast(1).toIntArray() || - luTensor.shape.last() == pivotsTensor.shape.last() - 1 - ) { "Inappropriate shapes of input tensors" } - - val n = luTensor.shape.last() - val pTensor = luTensor.zeroesLike() - pTensor - .matrixSequence() - .zip(pivotsTensor.tensor.vectorSequence()) - .forEach { (p, pivot) -> pivInit(p.as2D(), pivot.as1D(), n) } - - val lTensor = luTensor.zeroesLike() - val uTensor = luTensor.zeroesLike() - - lTensor.matrixSequence() - .zip(uTensor.matrixSequence()) - .zip(luTensor.tensor.matrixSequence()) - .forEach { (pairLU, lu) -> - val (l, u) = pairLU - luPivotHelper(l.as2D(), u.as2D(), lu.as2D(), n) - } - - return Triple(pTensor, lTensor, uTensor) - } - - /** - * QR decomposition. - * - * Computes the QR decomposition of a matrix or a batch of matrices, and returns a pair `(Q, R)` of tensors. - * Given a tensor `input`, return tensors (Q, R) satisfying ``input = Q * R``, - * with `Q` being an orthogonal matrix or batch of orthogonal matrices - * and `R` being an upper triangular matrix or batch of upper triangular matrices. - * - * @param epsilon permissible error when comparing tensors for equality. - * Used when checking the positive definiteness of the input matrix or matrices. - * @return pair of Q and R tensors. - */ - public fun Tensor.cholesky(epsilon: Double): DoubleTensor { - checkSquareMatrix(shape) - checkPositiveDefinite(tensor, epsilon) - - val n = shape.last() - val lTensor = zeroesLike() - - for ((a, l) in tensor.matrixSequence().zip(lTensor.matrixSequence())) - for (i in 0 until n) choleskyHelper(a.as2D(), l.as2D(), n) - - return lTensor - } - - override fun Tensor.cholesky(): DoubleTensor = cholesky(1e-6) - - override fun Tensor.qr(): Pair { - checkSquareMatrix(shape) - val qTensor = zeroesLike() - val rTensor = zeroesLike() - tensor.matrixSequence() - .zip((qTensor.matrixSequence() - .zip(rTensor.matrixSequence()))).forEach { (matrix, qr) -> - val (q, r) = qr - qrHelper(matrix.asTensor(), q.asTensor(), r.as2D()) - } - - return qTensor to rTensor - } - - override fun Tensor.svd(): Triple = - svd(epsilon = 1e-10) - - /** - * Singular Value Decomposition. - * - * Computes the singular value decomposition of either a matrix or batch of matrices `input`. - * The singular value decomposition is represented as a triple `(U, S, V)`, - * such that ``input = U.dot(diagonalEmbedding(S).dot(V.T))``. - * If input is a batch of tensors, then U, S, and Vh are also batched with the same batch dimensions as input. - * - * @param epsilon permissible error when calculating the dot product of vectors, - * i.e. the precision with which the cosine approaches 1 in an iterative algorithm. - * @return triple `(U, S, V)`. - */ - public fun Tensor.svd(epsilon: Double): Triple { - val size = tensor.dimension - val commonShape = tensor.shape.sliceArray(0 until size - 2) - val (n, m) = tensor.shape.sliceArray(size - 2 until size) - val uTensor = zeros(commonShape + intArrayOf(min(n, m), n)) - val sTensor = zeros(commonShape + intArrayOf(min(n, m))) - val vTensor = zeros(commonShape + intArrayOf(min(n, m), m)) - - tensor.matrixSequence() - .zip(uTensor.matrixSequence() - .zip(sTensor.vectorSequence() - .zip(vTensor.matrixSequence()))).forEach { (matrix, USV) -> - val matrixSize = matrix.shape.reduce { acc, i -> acc * i } - val curMatrix = DoubleTensor( - matrix.shape, - matrix.mutableBuffer.array().slice(matrix.bufferStart until matrix.bufferStart + matrixSize) - .toDoubleArray() - ) - svdHelper(curMatrix, USV, m, n, epsilon) - } - - return Triple(uTensor.transpose(), sTensor, vTensor.transpose()) - } - - override fun Tensor.symEig(): Pair = - symEig(epsilon = 1e-15) - - /** - * Returns eigenvalues and eigenvectors of a real symmetric matrix input or a batch of real symmetric matrices, - * represented by a pair (eigenvalues, eigenvectors). - * - * @param epsilon permissible error when comparing tensors for equality - * and when the cosine approaches 1 in the SVD algorithm. - * @return a pair (eigenvalues, eigenvectors) - */ - public fun Tensor.symEig(epsilon: Double): Pair { - checkSymmetric(tensor, epsilon) - val (u, s, v) = tensor.svd(epsilon) - val shp = s.shape + intArrayOf(1) - val utv = u.transpose() dot v - val n = s.shape.last() - for (matrix in utv.matrixSequence()) - cleanSymHelper(matrix.as2D(), n) - - val eig = (utv dot s.view(shp)).view(s.shape) - return eig to v - } - - /** - * Computes the determinant of a square matrix input, or of each square matrix in a batched input - * using LU factorization algorithm. - * - * @param epsilon error in the LU algorithm - permissible error when comparing the determinant of a matrix with zero - * @return the determinant. - */ - public fun Tensor.detLU(epsilon: Double = 1e-9): DoubleTensor { - - checkSquareMatrix(tensor.shape) - val luTensor = tensor.copy() - val pivotsTensor = tensor.setUpPivots() - - val n = shape.size - - val detTensorShape = IntArray(n - 1) { i -> shape[i] } - detTensorShape[n - 2] = 1 - val resBuffer = DoubleArray(detTensorShape.reduce(Int::times)) { 0.0 } - - val detTensor = DoubleTensor( - detTensorShape, - resBuffer - ) - - luTensor.matrixSequence().zip(pivotsTensor.vectorSequence()).forEachIndexed { index, (lu, pivots) -> - resBuffer[index] = if (luHelper(lu.as2D(), pivots.as1D(), epsilon)) - 0.0 else luMatrixDet(lu.as2D(), pivots.as1D()) - } - - return detTensor - } - - /** - * Computes the multiplicative inverse matrix of a square matrix input, or of each square matrix in a batched input - * using LU factorization algorithm. - * Given a square matrix `a`, return the matrix `aInv` satisfying - * ``a.dot(aInv) = aInv.dot(a) = eye(a.shape[0])``. - * - * @param epsilon error in the LU algorithm - permissible error when comparing the determinant of a matrix with zero - * @return the multiplicative inverse of a matrix. - */ - public fun Tensor.invLU(epsilon: Double = 1e-9): DoubleTensor { - val (luTensor, pivotsTensor) = luFactor(epsilon) - val invTensor = luTensor.zeroesLike() - - val seq = luTensor.matrixSequence().zip(pivotsTensor.vectorSequence()).zip(invTensor.matrixSequence()) - for ((luP, invMatrix) in seq) { - val (lu, pivots) = luP - luMatrixInv(lu.as2D(), pivots.as1D(), invMatrix.as2D()) - } - - return invTensor - } - - /** - * LUP decomposition - * - * Computes the LUP decomposition of a matrix or a batch of matrices. - * Given a tensor `input`, return tensors (P, L, U) satisfying ``P * input = L * U``, - * with `P` being a permutation matrix or batch of matrices, - * `L` being a lower triangular matrix or batch of matrices, - * `U` being an upper triangular matrix or batch of matrices. - * - * @param epsilon permissible error when comparing the determinant of a matrix with zero - * @return triple of P, L and U tensors - */ - public fun Tensor.lu(epsilon: Double = 1e-9): Triple { - val (lu, pivots) = this.luFactor(epsilon) - return luPivot(lu, pivots) - } - - override fun Tensor.lu(): Triple = lu(1e-9) - -} \ No newline at end of file diff --git a/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/algebras/DoubleTensorAlgebra.kt b/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/algebras/DoubleTensorAlgebra.kt index e5d41f856..cb06432d0 100644 --- a/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/algebras/DoubleTensorAlgebra.kt +++ b/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/algebras/DoubleTensorAlgebra.kt @@ -5,28 +5,34 @@ package space.kscience.kmath.tensors.core.algebras +import space.kscience.kmath.nd.as1D import space.kscience.kmath.nd.as2D +import space.kscience.kmath.tensors.api.AnalyticTensorAlgebra +import space.kscience.kmath.tensors.api.LinearOpsTensorAlgebra import space.kscience.kmath.tensors.api.TensorPartialDivisionAlgebra import space.kscience.kmath.tensors.api.Tensor import space.kscience.kmath.tensors.core.* -import space.kscience.kmath.tensors.core.algebras.DoubleAnalyticTensorAlgebra.fold -import space.kscience.kmath.tensors.core.algebras.DoubleAnalyticTensorAlgebra.foldDim -import space.kscience.kmath.tensors.core.broadcastOuterTensors -import space.kscience.kmath.tensors.core.checkBufferShapeConsistency -import space.kscience.kmath.tensors.core.checkEmptyDoubleBuffer -import space.kscience.kmath.tensors.core.checkEmptyShape -import space.kscience.kmath.tensors.core.checkShapesCompatible -import space.kscience.kmath.tensors.core.checkTranspose -import space.kscience.kmath.tensors.core.checkView -import space.kscience.kmath.tensors.core.dotHelper -import space.kscience.kmath.tensors.core.getRandomNormals -import space.kscience.kmath.tensors.core.minusIndexFrom -import kotlin.math.abs +import space.kscience.kmath.tensors.core.internal.dotHelper +import space.kscience.kmath.tensors.core.internal.getRandomNormals +import space.kscience.kmath.tensors.core.internal.* +import space.kscience.kmath.tensors.core.internal.broadcastOuterTensors +import space.kscience.kmath.tensors.core.internal.checkBufferShapeConsistency +import space.kscience.kmath.tensors.core.internal.checkEmptyDoubleBuffer +import space.kscience.kmath.tensors.core.internal.checkEmptyShape +import space.kscience.kmath.tensors.core.internal.checkShapesCompatible +import space.kscience.kmath.tensors.core.internal.checkSquareMatrix +import space.kscience.kmath.tensors.core.internal.checkTranspose +import space.kscience.kmath.tensors.core.internal.checkView +import space.kscience.kmath.tensors.core.internal.minusIndexFrom +import kotlin.math.* /** * Implementation of basic operations over double tensors and basic algebra operations on them. */ -public open class DoubleTensorAlgebra : TensorPartialDivisionAlgebra { +public open class DoubleTensorAlgebra : + TensorPartialDivisionAlgebra, + AnalyticTensorAlgebra, + LinearOpsTensorAlgebra { public companion object : DoubleTensorAlgebra() @@ -311,9 +317,8 @@ public open class DoubleTensorAlgebra : TensorPartialDivisionAlgebra { return DoubleTensor(shape, tensor.mutableBuffer.array(), tensor.bufferStart) } - override fun Tensor.viewAs(other: Tensor): DoubleTensor { - return tensor.view(other.shape) - } + override fun Tensor.viewAs(other: Tensor): DoubleTensor = + tensor.view(other.shape) override infix fun Tensor.dot(other: Tensor): DoubleTensor { if (tensor.shape.size == 1 && other.shape.size == 1) { @@ -565,4 +570,350 @@ public open class DoubleTensorAlgebra : TensorPartialDivisionAlgebra { x.withIndex().maxByOrNull { it.value }?.index!!.toDouble() }, dim, keepDim) + + override fun Tensor.mean(): Double = this.fold { it.sum() / tensor.numElements } + + override fun Tensor.mean(dim: Int, keepDim: Boolean): DoubleTensor = + foldDim( + { arr -> + check(dim < dimension) { "Dimension $dim out of range $dimension" } + arr.sum() / shape[dim] + }, + dim, + keepDim + ) + + override fun Tensor.std(): Double = this.fold { arr -> + val mean = arr.sum() / tensor.numElements + sqrt(arr.sumOf { (it - mean) * (it - mean) } / (tensor.numElements - 1)) + } + + override fun Tensor.std(dim: Int, keepDim: Boolean): DoubleTensor = foldDim( + { arr -> + check(dim < dimension) { "Dimension $dim out of range $dimension" } + val mean = arr.sum() / shape[dim] + sqrt(arr.sumOf { (it - mean) * (it - mean) } / (shape[dim] - 1)) + }, + dim, + keepDim + ) + + override fun Tensor.variance(): Double = this.fold { arr -> + val mean = arr.sum() / tensor.numElements + arr.sumOf { (it - mean) * (it - mean) } / (tensor.numElements - 1) + } + + override fun Tensor.variance(dim: Int, keepDim: Boolean): DoubleTensor = foldDim( + { arr -> + check(dim < dimension) { "Dimension $dim out of range $dimension" } + val mean = arr.sum() / shape[dim] + arr.sumOf { (it - mean) * (it - mean) } / (shape[dim] - 1) + }, + dim, + keepDim + ) + + private fun cov(x: DoubleTensor, y:DoubleTensor): Double{ + val n = x.shape[0] + return ((x - x.mean()) * (y - y.mean())).mean() * n / (n - 1) + } + + override fun cov(tensors: List>): DoubleTensor { + check(tensors.isNotEmpty()) { "List must have at least 1 element" } + val n = tensors.size + val m = tensors[0].shape[0] + check(tensors.all { it.shape contentEquals intArrayOf(m) }) { "Tensors must have same shapes" } + val resTensor = DoubleTensor( + intArrayOf(n, n), + DoubleArray(n * n) {0.0} + ) + for (i in 0 until n){ + for (j in 0 until n){ + resTensor[intArrayOf(i, j)] = cov(tensors[i].tensor, tensors[j].tensor) + } + } + return resTensor + } + + override fun Tensor.exp(): DoubleTensor = tensor.map(::exp) + + override fun Tensor.ln(): DoubleTensor = tensor.map(::ln) + + override fun Tensor.sqrt(): DoubleTensor = tensor.map(::sqrt) + + override fun Tensor.cos(): DoubleTensor = tensor.map(::cos) + + override fun Tensor.acos(): DoubleTensor = tensor.map(::acos) + + override fun Tensor.cosh(): DoubleTensor = tensor.map(::cosh) + + override fun Tensor.acosh(): DoubleTensor = tensor.map(::acosh) + + override fun Tensor.sin(): DoubleTensor = tensor.map(::sin) + + override fun Tensor.asin(): DoubleTensor = tensor.map(::asin) + + override fun Tensor.sinh(): DoubleTensor = tensor.map(::sinh) + + override fun Tensor.asinh(): DoubleTensor = tensor.map(::asinh) + + override fun Tensor.tan(): DoubleTensor = tensor.map(::tan) + + override fun Tensor.atan(): DoubleTensor = tensor.map(::atan) + + override fun Tensor.tanh(): DoubleTensor = tensor.map(::tanh) + + override fun Tensor.atanh(): DoubleTensor = tensor.map(::atanh) + + override fun Tensor.ceil(): DoubleTensor = tensor.map(::ceil) + + override fun Tensor.floor(): DoubleTensor = tensor.map(::floor) + + override fun Tensor.inv(): DoubleTensor = invLU(1e-9) + + override fun Tensor.det(): DoubleTensor = detLU(1e-9) + + /** + * Computes the LU factorization of a matrix or batches of matrices `input`. + * Returns a tuple containing the LU factorization and pivots of `input`. + * + * @param epsilon permissible error when comparing the determinant of a matrix with zero + * @return pair of `factorization` and `pivots`. + * The `factorization` has the shape ``(*, m, n)``, where``(*, m, n)`` is the shape of the `input` tensor. + * The `pivots` has the shape ``(∗, min(m, n))``. `pivots` stores all the intermediate transpositions of rows. + */ + public fun Tensor.luFactor(epsilon: Double): Pair = + computeLU(tensor, epsilon) + ?: throw IllegalArgumentException("Tensor contains matrices which are singular at precision $epsilon") + + /** + * Computes the LU factorization of a matrix or batches of matrices `input`. + * Returns a tuple containing the LU factorization and pivots of `input`. + * Uses an error of ``1e-9`` when calculating whether a matrix is degenerate. + * + * @return pair of `factorization` and `pivots`. + * The `factorization` has the shape ``(*, m, n)``, where``(*, m, n)`` is the shape of the `input` tensor. + * The `pivots` has the shape ``(∗, min(m, n))``. `pivots` stores all the intermediate transpositions of rows. + */ + public fun Tensor.luFactor(): Pair = luFactor(1e-9) + + /** + * Unpacks the data and pivots from a LU factorization of a tensor. + * Given a tensor [luTensor], return tensors (P, L, U) satisfying ``P * luTensor = L * U``, + * with `P` being a permutation matrix or batch of matrices, + * `L` being a lower triangular matrix or batch of matrices, + * `U` being an upper triangular matrix or batch of matrices. + * + * @param luTensor the packed LU factorization data + * @param pivotsTensor the packed LU factorization pivots + * @return triple of P, L and U tensors + */ + public fun luPivot( + luTensor: Tensor, + pivotsTensor: Tensor + ): Triple { + checkSquareMatrix(luTensor.shape) + check( + luTensor.shape.dropLast(2).toIntArray() contentEquals pivotsTensor.shape.dropLast(1).toIntArray() || + luTensor.shape.last() == pivotsTensor.shape.last() - 1 + ) { "Inappropriate shapes of input tensors" } + + val n = luTensor.shape.last() + val pTensor = luTensor.zeroesLike() + pTensor + .matrixSequence() + .zip(pivotsTensor.tensor.vectorSequence()) + .forEach { (p, pivot) -> pivInit(p.as2D(), pivot.as1D(), n) } + + val lTensor = luTensor.zeroesLike() + val uTensor = luTensor.zeroesLike() + + lTensor.matrixSequence() + .zip(uTensor.matrixSequence()) + .zip(luTensor.tensor.matrixSequence()) + .forEach { (pairLU, lu) -> + val (l, u) = pairLU + luPivotHelper(l.as2D(), u.as2D(), lu.as2D(), n) + } + + return Triple(pTensor, lTensor, uTensor) + } + + /** + * QR decomposition. + * + * Computes the QR decomposition of a matrix or a batch of matrices, and returns a pair `(Q, R)` of tensors. + * Given a tensor `input`, return tensors (Q, R) satisfying ``input = Q * R``, + * with `Q` being an orthogonal matrix or batch of orthogonal matrices + * and `R` being an upper triangular matrix or batch of upper triangular matrices. + * + * @param epsilon permissible error when comparing tensors for equality. + * Used when checking the positive definiteness of the input matrix or matrices. + * @return pair of Q and R tensors. + */ + public fun Tensor.cholesky(epsilon: Double): DoubleTensor { + checkSquareMatrix(shape) + checkPositiveDefinite(tensor, epsilon) + + val n = shape.last() + val lTensor = zeroesLike() + + for ((a, l) in tensor.matrixSequence().zip(lTensor.matrixSequence())) + for (i in 0 until n) choleskyHelper(a.as2D(), l.as2D(), n) + + return lTensor + } + + override fun Tensor.cholesky(): DoubleTensor = cholesky(1e-6) + + override fun Tensor.qr(): Pair { + checkSquareMatrix(shape) + val qTensor = zeroesLike() + val rTensor = zeroesLike() + tensor.matrixSequence() + .zip((qTensor.matrixSequence() + .zip(rTensor.matrixSequence()))).forEach { (matrix, qr) -> + val (q, r) = qr + qrHelper(matrix.asTensor(), q.asTensor(), r.as2D()) + } + + return qTensor to rTensor + } + + override fun Tensor.svd(): Triple = + svd(epsilon = 1e-10) + + /** + * Singular Value Decomposition. + * + * Computes the singular value decomposition of either a matrix or batch of matrices `input`. + * The singular value decomposition is represented as a triple `(U, S, V)`, + * such that ``input = U.dot(diagonalEmbedding(S).dot(V.T))``. + * If input is a batch of tensors, then U, S, and Vh are also batched with the same batch dimensions as input. + * + * @param epsilon permissible error when calculating the dot product of vectors, + * i.e. the precision with which the cosine approaches 1 in an iterative algorithm. + * @return triple `(U, S, V)`. + */ + public fun Tensor.svd(epsilon: Double): Triple { + val size = tensor.dimension + val commonShape = tensor.shape.sliceArray(0 until size - 2) + val (n, m) = tensor.shape.sliceArray(size - 2 until size) + val uTensor = zeros(commonShape + intArrayOf(min(n, m), n)) + val sTensor = zeros(commonShape + intArrayOf(min(n, m))) + val vTensor = zeros(commonShape + intArrayOf(min(n, m), m)) + + tensor.matrixSequence() + .zip(uTensor.matrixSequence() + .zip(sTensor.vectorSequence() + .zip(vTensor.matrixSequence()))).forEach { (matrix, USV) -> + val matrixSize = matrix.shape.reduce { acc, i -> acc * i } + val curMatrix = DoubleTensor( + matrix.shape, + matrix.mutableBuffer.array().slice(matrix.bufferStart until matrix.bufferStart + matrixSize) + .toDoubleArray() + ) + svdHelper(curMatrix, USV, m, n, epsilon) + } + + return Triple(uTensor.transpose(), sTensor, vTensor.transpose()) + } + + override fun Tensor.symEig(): Pair = + symEig(epsilon = 1e-15) + + /** + * Returns eigenvalues and eigenvectors of a real symmetric matrix input or a batch of real symmetric matrices, + * represented by a pair (eigenvalues, eigenvectors). + * + * @param epsilon permissible error when comparing tensors for equality + * and when the cosine approaches 1 in the SVD algorithm. + * @return a pair (eigenvalues, eigenvectors) + */ + public fun Tensor.symEig(epsilon: Double): Pair { + checkSymmetric(tensor, epsilon) + val (u, s, v) = tensor.svd(epsilon) + val shp = s.shape + intArrayOf(1) + val utv = u.transpose() dot v + val n = s.shape.last() + for (matrix in utv.matrixSequence()) + cleanSymHelper(matrix.as2D(), n) + + val eig = (utv dot s.view(shp)).view(s.shape) + return eig to v + } + + /** + * Computes the determinant of a square matrix input, or of each square matrix in a batched input + * using LU factorization algorithm. + * + * @param epsilon error in the LU algorithm - permissible error when comparing the determinant of a matrix with zero + * @return the determinant. + */ + public fun Tensor.detLU(epsilon: Double = 1e-9): DoubleTensor { + + checkSquareMatrix(tensor.shape) + val luTensor = tensor.copy() + val pivotsTensor = tensor.setUpPivots() + + val n = shape.size + + val detTensorShape = IntArray(n - 1) { i -> shape[i] } + detTensorShape[n - 2] = 1 + val resBuffer = DoubleArray(detTensorShape.reduce(Int::times)) { 0.0 } + + val detTensor = DoubleTensor( + detTensorShape, + resBuffer + ) + + luTensor.matrixSequence().zip(pivotsTensor.vectorSequence()).forEachIndexed { index, (lu, pivots) -> + resBuffer[index] = if (luHelper(lu.as2D(), pivots.as1D(), epsilon)) + 0.0 else luMatrixDet(lu.as2D(), pivots.as1D()) + } + + return detTensor + } + + /** + * Computes the multiplicative inverse matrix of a square matrix input, or of each square matrix in a batched input + * using LU factorization algorithm. + * Given a square matrix `a`, return the matrix `aInv` satisfying + * ``a.dot(aInv) = aInv.dot(a) = eye(a.shape[0])``. + * + * @param epsilon error in the LU algorithm - permissible error when comparing the determinant of a matrix with zero + * @return the multiplicative inverse of a matrix. + */ + public fun Tensor.invLU(epsilon: Double = 1e-9): DoubleTensor { + val (luTensor, pivotsTensor) = luFactor(epsilon) + val invTensor = luTensor.zeroesLike() + + val seq = luTensor.matrixSequence().zip(pivotsTensor.vectorSequence()).zip(invTensor.matrixSequence()) + for ((luP, invMatrix) in seq) { + val (lu, pivots) = luP + luMatrixInv(lu.as2D(), pivots.as1D(), invMatrix.as2D()) + } + + return invTensor + } + + /** + * LUP decomposition + * + * Computes the LUP decomposition of a matrix or a batch of matrices. + * Given a tensor `input`, return tensors (P, L, U) satisfying ``P * input = L * U``, + * with `P` being a permutation matrix or batch of matrices, + * `L` being a lower triangular matrix or batch of matrices, + * `U` being an upper triangular matrix or batch of matrices. + * + * @param epsilon permissible error when comparing the determinant of a matrix with zero + * @return triple of P, L and U tensors + */ + public fun Tensor.lu(epsilon: Double = 1e-9): Triple { + val (lu, pivots) = this.luFactor(epsilon) + return luPivot(lu, pivots) + } + + override fun Tensor.lu(): Triple = lu(1e-9) + } diff --git a/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/broadcastUtils.kt b/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/internal/broadcastUtils.kt similarity index 94% rename from kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/broadcastUtils.kt rename to kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/internal/broadcastUtils.kt index dfac054b5..6324dc242 100644 --- a/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/broadcastUtils.kt +++ b/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/internal/broadcastUtils.kt @@ -1,5 +1,11 @@ -package space.kscience.kmath.tensors.core +/* + * Copyright 2018-2021 KMath contributors. + * Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file. + */ +package space.kscience.kmath.tensors.core.internal + +import space.kscience.kmath.tensors.core.DoubleTensor import kotlin.math.max internal fun multiIndexBroadCasting(tensor: DoubleTensor, resTensor: DoubleTensor, linearSize: Int) { diff --git a/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/checks.kt b/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/internal/checks.kt similarity index 83% rename from kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/checks.kt rename to kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/internal/checks.kt index f8bd5027a..0221c961e 100644 --- a/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/checks.kt +++ b/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/internal/checks.kt @@ -1,7 +1,12 @@ -package space.kscience.kmath.tensors.core +/* + * Copyright 2018-2021 KMath contributors. + * Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file. + */ + +package space.kscience.kmath.tensors.core.internal import space.kscience.kmath.tensors.api.Tensor -import space.kscience.kmath.tensors.core.algebras.DoubleLinearOpsTensorAlgebra +import space.kscience.kmath.tensors.core.DoubleTensor import space.kscience.kmath.tensors.core.algebras.DoubleTensorAlgebra @@ -50,7 +55,7 @@ internal fun DoubleTensorAlgebra.checkSymmetric( "Tensor is not symmetric about the last 2 dimensions at precision $epsilon" } -internal fun DoubleLinearOpsTensorAlgebra.checkPositiveDefinite(tensor: DoubleTensor, epsilon: Double = 1e-6) { +internal fun DoubleTensorAlgebra.checkPositiveDefinite(tensor: DoubleTensor, epsilon: Double = 1e-6) { checkSymmetric(tensor, epsilon) for (mat in tensor.matrixSequence()) check(mat.asTensor().detLU().value() > 0.0) { diff --git a/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/linUtils.kt b/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/internal/linUtils.kt similarity index 91% rename from kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/linUtils.kt rename to kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/internal/linUtils.kt index 8adbfad39..23909f81e 100644 --- a/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/linUtils.kt +++ b/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/internal/linUtils.kt @@ -1,12 +1,17 @@ -package space.kscience.kmath.tensors.core +/* + * Copyright 2018-2021 KMath contributors. + * Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file. + */ + +package space.kscience.kmath.tensors.core.internal import space.kscience.kmath.nd.MutableStructure1D import space.kscience.kmath.nd.MutableStructure2D import space.kscience.kmath.nd.as1D import space.kscience.kmath.nd.as2D import space.kscience.kmath.operations.invoke -import space.kscience.kmath.tensors.core.algebras.DoubleAnalyticTensorAlgebra -import space.kscience.kmath.tensors.core.algebras.DoubleLinearOpsTensorAlgebra +import space.kscience.kmath.tensors.core.* +import space.kscience.kmath.tensors.core.algebras.DoubleTensorAlgebra import kotlin.math.abs import kotlin.math.min import kotlin.math.sign @@ -114,7 +119,7 @@ internal fun BufferedTensor.setUpPivots(): IntTensor { ) } -internal fun DoubleLinearOpsTensorAlgebra.computeLU( +internal fun DoubleTensorAlgebra.computeLU( tensor: DoubleTensor, epsilon: Double ): Pair? { @@ -218,7 +223,7 @@ internal fun luMatrixInv( } } -internal fun DoubleLinearOpsTensorAlgebra.qrHelper( +internal fun DoubleTensorAlgebra.qrHelper( matrix: DoubleTensor, q: DoubleTensor, r: MutableStructure2D @@ -241,14 +246,14 @@ internal fun DoubleLinearOpsTensorAlgebra.qrHelper( } } } - r[j, j] = DoubleAnalyticTensorAlgebra { (v dot v).sqrt().value() } + r[j, j] = DoubleTensorAlgebra { (v dot v).sqrt().value() } for (i in 0 until n) { qM[i, j] = vv[i] / r[j, j] } } } -internal fun DoubleLinearOpsTensorAlgebra.svd1d(a: DoubleTensor, epsilon: Double = 1e-10): DoubleTensor { +internal fun DoubleTensorAlgebra.svd1d(a: DoubleTensor, epsilon: Double = 1e-10): DoubleTensor { val (n, m) = a.shape var v: DoubleTensor val b: DoubleTensor @@ -264,7 +269,7 @@ internal fun DoubleLinearOpsTensorAlgebra.svd1d(a: DoubleTensor, epsilon: Double while (true) { lastV = v v = b.dot(lastV) - val norm = DoubleAnalyticTensorAlgebra { (v dot v).sqrt().value() } + val norm = DoubleTensorAlgebra { (v dot v).sqrt().value() } v = v.times(1.0 / norm) if (abs(v.dot(lastV).value()) > 1 - epsilon) { return v @@ -272,7 +277,7 @@ internal fun DoubleLinearOpsTensorAlgebra.svd1d(a: DoubleTensor, epsilon: Double } } -internal fun DoubleLinearOpsTensorAlgebra.svdHelper( +internal fun DoubleTensorAlgebra.svdHelper( matrix: DoubleTensor, USV: Pair, Pair, BufferedTensor>>, m: Int, n: Int, epsilon: Double @@ -298,12 +303,12 @@ internal fun DoubleLinearOpsTensorAlgebra.svdHelper( if (n > m) { v = svd1d(a, epsilon) u = matrix.dot(v) - norm = DoubleAnalyticTensorAlgebra { (u dot u).sqrt().value() } + norm = DoubleTensorAlgebra { (u dot u).sqrt().value() } u = u.times(1.0 / norm) } else { u = svd1d(a, epsilon) v = matrix.transpose(0, 1).dot(u) - norm = DoubleAnalyticTensorAlgebra { (v dot v).sqrt().value() } + norm = DoubleTensorAlgebra { (v dot v).sqrt().value() } v = v.times(1.0 / norm) } diff --git a/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/tensorCastsUtils.kt b/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/internal/tensorCastsUtils.kt similarity index 88% rename from kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/tensorCastsUtils.kt rename to kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/internal/tensorCastsUtils.kt index 70e3b9c61..67cb0b842 100644 --- a/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/tensorCastsUtils.kt +++ b/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/internal/tensorCastsUtils.kt @@ -3,11 +3,14 @@ * Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file. */ -package space.kscience.kmath.tensors.core +package space.kscience.kmath.tensors.core.internal import space.kscience.kmath.nd.MutableBufferND import space.kscience.kmath.structures.asMutableBuffer import space.kscience.kmath.tensors.api.Tensor +import space.kscience.kmath.tensors.core.BufferedTensor +import space.kscience.kmath.tensors.core.DoubleTensor +import space.kscience.kmath.tensors.core.IntTensor import space.kscience.kmath.tensors.core.algebras.TensorLinearStructure internal fun BufferedTensor.asTensor(): IntTensor = diff --git a/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/utils.kt b/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/internal/utils.kt similarity index 91% rename from kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/utils.kt rename to kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/internal/utils.kt index 0211342bb..0ffaf39e7 100644 --- a/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/utils.kt +++ b/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/internal/utils.kt @@ -1,9 +1,16 @@ -package space.kscience.kmath.tensors.core +/* + * Copyright 2018-2021 KMath contributors. + * Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file. + */ + +package space.kscience.kmath.tensors.core.internal import space.kscience.kmath.nd.as1D import space.kscience.kmath.samplers.GaussianSampler import space.kscience.kmath.stat.RandomGenerator import space.kscience.kmath.structures.* +import space.kscience.kmath.tensors.core.BufferedTensor +import space.kscience.kmath.tensors.core.DoubleTensor import kotlin.math.* /** diff --git a/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/tensorCasts.kt b/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/tensorCasts.kt index 2743a5218..814a1bb9b 100644 --- a/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/tensorCasts.kt +++ b/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/tensorCasts.kt @@ -6,6 +6,7 @@ package space.kscience.kmath.tensors.core import space.kscience.kmath.tensors.api.Tensor +import space.kscience.kmath.tensors.core.internal.tensor /** * Casts [Tensor] to [DoubleTensor] diff --git a/kmath-tensors/src/commonTest/kotlin/space/kscience/kmath/tensors/core/TestBroadcasting.kt b/kmath-tensors/src/commonTest/kotlin/space/kscience/kmath/tensors/core/TestBroadcasting.kt index 6e3b4df60..80c7ab13a 100644 --- a/kmath-tensors/src/commonTest/kotlin/space/kscience/kmath/tensors/core/TestBroadcasting.kt +++ b/kmath-tensors/src/commonTest/kotlin/space/kscience/kmath/tensors/core/TestBroadcasting.kt @@ -3,6 +3,7 @@ package space.kscience.kmath.tensors.core import space.kscience.kmath.operations.invoke import space.kscience.kmath.tensors.core.algebras.BroadcastDoubleTensorAlgebra import space.kscience.kmath.tensors.core.algebras.DoubleTensorAlgebra +import space.kscience.kmath.tensors.core.internal.* import kotlin.test.Test import kotlin.test.assertTrue diff --git a/kmath-tensors/src/commonTest/kotlin/space/kscience/kmath/tensors/core/TestDoubleAnalyticTensorAlgebra.kt b/kmath-tensors/src/commonTest/kotlin/space/kscience/kmath/tensors/core/TestDoubleAnalyticTensorAlgebra.kt index 3ea19da26..4dcc367ca 100644 --- a/kmath-tensors/src/commonTest/kotlin/space/kscience/kmath/tensors/core/TestDoubleAnalyticTensorAlgebra.kt +++ b/kmath-tensors/src/commonTest/kotlin/space/kscience/kmath/tensors/core/TestDoubleAnalyticTensorAlgebra.kt @@ -1,7 +1,6 @@ package space.kscience.kmath.tensors.core import space.kscience.kmath.operations.invoke -import space.kscience.kmath.tensors.core.algebras.DoubleAnalyticTensorAlgebra import space.kscience.kmath.tensors.core.algebras.DoubleTensorAlgebra import kotlin.math.* import kotlin.test.Test @@ -28,73 +27,73 @@ internal class TestDoubleAnalyticTensorAlgebra { } @Test - fun testExp() = DoubleAnalyticTensorAlgebra { + fun testExp() = DoubleTensorAlgebra { assertTrue { tensor.exp() eq expectedTensor(::exp) } } @Test - fun testLog() = DoubleAnalyticTensorAlgebra { + fun testLog() = DoubleTensorAlgebra { assertTrue { tensor.ln() eq expectedTensor(::ln) } } @Test - fun testSqrt() = DoubleAnalyticTensorAlgebra { + fun testSqrt() = DoubleTensorAlgebra { assertTrue { tensor.sqrt() eq expectedTensor(::sqrt) } } @Test - fun testCos() = DoubleAnalyticTensorAlgebra { + fun testCos() = DoubleTensorAlgebra { assertTrue { tensor.cos() eq expectedTensor(::cos) } } @Test - fun testCosh() = DoubleAnalyticTensorAlgebra { + fun testCosh() = DoubleTensorAlgebra { assertTrue { tensor.cosh() eq expectedTensor(::cosh) } } @Test - fun testAcosh() = DoubleAnalyticTensorAlgebra { + fun testAcosh() = DoubleTensorAlgebra { assertTrue { tensor.acosh() eq expectedTensor(::acosh) } } @Test - fun testSin() = DoubleAnalyticTensorAlgebra { + fun testSin() = DoubleTensorAlgebra { assertTrue { tensor.sin() eq expectedTensor(::sin) } } @Test - fun testSinh() = DoubleAnalyticTensorAlgebra { + fun testSinh() = DoubleTensorAlgebra { assertTrue { tensor.sinh() eq expectedTensor(::sinh) } } @Test - fun testAsinh() = DoubleAnalyticTensorAlgebra { + fun testAsinh() = DoubleTensorAlgebra { assertTrue { tensor.asinh() eq expectedTensor(::asinh) } } @Test - fun testTan() = DoubleAnalyticTensorAlgebra { + fun testTan() = DoubleTensorAlgebra { assertTrue { tensor.tan() eq expectedTensor(::tan) } } @Test - fun testAtan() = DoubleAnalyticTensorAlgebra { + fun testAtan() = DoubleTensorAlgebra { assertTrue { tensor.atan() eq expectedTensor(::atan) } } @Test - fun testTanh() = DoubleAnalyticTensorAlgebra { + fun testTanh() = DoubleTensorAlgebra { assertTrue { tensor.tanh() eq expectedTensor(::tanh) } } @Test - fun testCeil() = DoubleAnalyticTensorAlgebra { + fun testCeil() = DoubleTensorAlgebra { assertTrue { tensor.ceil() eq expectedTensor(::ceil) } } @Test - fun testFloor() = DoubleAnalyticTensorAlgebra { + fun testFloor() = DoubleTensorAlgebra { assertTrue { tensor.floor() eq expectedTensor(::floor) } } @@ -145,7 +144,7 @@ internal class TestDoubleAnalyticTensorAlgebra { } @Test - fun testMean() = DoubleAnalyticTensorAlgebra { + fun testMean() = DoubleTensorAlgebra { assertTrue { tensor2.mean() == 1.0 } assertTrue { tensor2.mean(0, true) eq fromArray( intArrayOf(1, 2), diff --git a/kmath-tensors/src/commonTest/kotlin/space/kscience/kmath/tensors/core/TestDoubleLinearOpsAlgebra.kt b/kmath-tensors/src/commonTest/kotlin/space/kscience/kmath/tensors/core/TestDoubleLinearOpsAlgebra.kt index 65070af7f..77748b15e 100644 --- a/kmath-tensors/src/commonTest/kotlin/space/kscience/kmath/tensors/core/TestDoubleLinearOpsAlgebra.kt +++ b/kmath-tensors/src/commonTest/kotlin/space/kscience/kmath/tensors/core/TestDoubleLinearOpsAlgebra.kt @@ -1,7 +1,9 @@ package space.kscience.kmath.tensors.core import space.kscience.kmath.operations.invoke -import space.kscience.kmath.tensors.core.algebras.DoubleLinearOpsTensorAlgebra +import space.kscience.kmath.tensors.core.algebras.DoubleTensorAlgebra +import space.kscience.kmath.tensors.core.internal.array +import space.kscience.kmath.tensors.core.internal.svd1d import kotlin.math.abs import kotlin.test.Test import kotlin.test.assertEquals @@ -10,7 +12,7 @@ import kotlin.test.assertTrue internal class TestDoubleLinearOpsTensorAlgebra { @Test - fun testDetLU() = DoubleLinearOpsTensorAlgebra { + fun testDetLU() = DoubleTensorAlgebra { val tensor = fromArray( intArrayOf(2, 2, 2), doubleArrayOf( @@ -35,7 +37,7 @@ internal class TestDoubleLinearOpsTensorAlgebra { } @Test - fun testDet() = DoubleLinearOpsTensorAlgebra { + fun testDet() = DoubleTensorAlgebra { val expectedValue = 0.019827417 val m = fromArray( intArrayOf(3, 3), doubleArrayOf( @@ -49,7 +51,7 @@ internal class TestDoubleLinearOpsTensorAlgebra { } @Test - fun testDetSingle() = DoubleLinearOpsTensorAlgebra { + fun testDetSingle() = DoubleTensorAlgebra { val expectedValue = 48.151623 val m = fromArray( intArrayOf(1, 1), doubleArrayOf( @@ -61,7 +63,7 @@ internal class TestDoubleLinearOpsTensorAlgebra { } @Test - fun testInvLU() = DoubleLinearOpsTensorAlgebra { + fun testInvLU() = DoubleTensorAlgebra { val tensor = fromArray( intArrayOf(2, 2, 2), doubleArrayOf( @@ -86,14 +88,14 @@ internal class TestDoubleLinearOpsTensorAlgebra { } @Test - fun testScalarProduct() = DoubleLinearOpsTensorAlgebra { + fun testScalarProduct() = DoubleTensorAlgebra { val a = fromArray(intArrayOf(3), doubleArrayOf(1.8, 2.5, 6.8)) val b = fromArray(intArrayOf(3), doubleArrayOf(5.5, 2.6, 6.4)) assertEquals(a.dot(b).value(), 59.92) } @Test - fun testQR() = DoubleLinearOpsTensorAlgebra { + fun testQR() = DoubleTensorAlgebra { val shape = intArrayOf(2, 2, 2) val buffer = doubleArrayOf( 1.0, 3.0, @@ -114,7 +116,7 @@ internal class TestDoubleLinearOpsTensorAlgebra { } @Test - fun testLU() = DoubleLinearOpsTensorAlgebra { + fun testLU() = DoubleTensorAlgebra { val shape = intArrayOf(2, 2, 2) val buffer = doubleArrayOf( 1.0, 3.0, @@ -134,7 +136,7 @@ internal class TestDoubleLinearOpsTensorAlgebra { } @Test - fun testCholesky() = DoubleLinearOpsTensorAlgebra { + fun testCholesky() = DoubleTensorAlgebra { val tensor = randomNormal(intArrayOf(2, 5, 5), 0) val sigma = (tensor dot tensor.transpose()) + diagonalEmbedding( fromArray(intArrayOf(2, 5), DoubleArray(10) { 0.1 }) @@ -145,7 +147,7 @@ internal class TestDoubleLinearOpsTensorAlgebra { } @Test - fun testSVD1D() = DoubleLinearOpsTensorAlgebra { + fun testSVD1D() = DoubleTensorAlgebra { val tensor2 = fromArray(intArrayOf(2, 3), doubleArrayOf(1.0, 2.0, 3.0, 4.0, 5.0, 6.0)) val res = svd1d(tensor2) @@ -156,13 +158,13 @@ internal class TestDoubleLinearOpsTensorAlgebra { } @Test - fun testSVD() = DoubleLinearOpsTensorAlgebra{ + fun testSVD() = DoubleTensorAlgebra{ testSVDFor(fromArray(intArrayOf(2, 3), doubleArrayOf(1.0, 2.0, 3.0, 4.0, 5.0, 6.0))) testSVDFor(fromArray(intArrayOf(2, 2), doubleArrayOf(-1.0, 0.0, 239.0, 238.0))) } @Test - fun testBatchedSVD() = DoubleLinearOpsTensorAlgebra { + fun testBatchedSVD() = DoubleTensorAlgebra { val tensor = randomNormal(intArrayOf(2, 5, 3), 0) val (tensorU, tensorS, tensorV) = tensor.svd() val tensorSVD = tensorU dot (diagonalEmbedding(tensorS) dot tensorV.transpose()) @@ -170,7 +172,7 @@ internal class TestDoubleLinearOpsTensorAlgebra { } @Test - fun testBatchedSymEig() = DoubleLinearOpsTensorAlgebra { + fun testBatchedSymEig() = DoubleTensorAlgebra { val tensor = randomNormal(shape = intArrayOf(2, 3, 3), 0) val tensorSigma = tensor + tensor.transpose() val (tensorS, tensorV) = tensorSigma.symEig() @@ -182,7 +184,7 @@ internal class TestDoubleLinearOpsTensorAlgebra { } -private fun DoubleLinearOpsTensorAlgebra.testSVDFor(tensor: DoubleTensor, epsilon: Double = 1e-10): Unit { +private fun DoubleTensorAlgebra.testSVDFor(tensor: DoubleTensor, epsilon: Double = 1e-10): Unit { val svd = tensor.svd() val tensorSVD = svd.first diff --git a/kmath-tensors/src/commonTest/kotlin/space/kscience/kmath/tensors/core/TestDoubleTensor.kt b/kmath-tensors/src/commonTest/kotlin/space/kscience/kmath/tensors/core/TestDoubleTensor.kt index 132735cc7..c4f9f94b0 100644 --- a/kmath-tensors/src/commonTest/kotlin/space/kscience/kmath/tensors/core/TestDoubleTensor.kt +++ b/kmath-tensors/src/commonTest/kotlin/space/kscience/kmath/tensors/core/TestDoubleTensor.kt @@ -8,6 +8,10 @@ import space.kscience.kmath.operations.invoke import space.kscience.kmath.structures.DoubleBuffer import space.kscience.kmath.structures.toDoubleArray import space.kscience.kmath.tensors.core.algebras.DoubleTensorAlgebra +import space.kscience.kmath.tensors.core.internal.array +import space.kscience.kmath.tensors.core.internal.asTensor +import space.kscience.kmath.tensors.core.internal.matrixSequence +import space.kscience.kmath.tensors.core.internal.toBufferedTensor import kotlin.test.Test import kotlin.test.assertEquals import kotlin.test.assertTrue diff --git a/kmath-tensors/src/commonTest/kotlin/space/kscience/kmath/tensors/core/TestDoubleTensorAlgebra.kt b/kmath-tensors/src/commonTest/kotlin/space/kscience/kmath/tensors/core/TestDoubleTensorAlgebra.kt index d782d78d9..ed858259e 100644 --- a/kmath-tensors/src/commonTest/kotlin/space/kscience/kmath/tensors/core/TestDoubleTensorAlgebra.kt +++ b/kmath-tensors/src/commonTest/kotlin/space/kscience/kmath/tensors/core/TestDoubleTensorAlgebra.kt @@ -3,6 +3,7 @@ package space.kscience.kmath.tensors.core import space.kscience.kmath.operations.invoke import space.kscience.kmath.tensors.core.algebras.DoubleTensorAlgebra +import space.kscience.kmath.tensors.core.internal.array import kotlin.test.Test import kotlin.test.assertFalse import kotlin.test.assertTrue