forked from kscience/kmath
Merging implementations together
This commit is contained in:
parent
14ca7cdd31
commit
0920e21d62
@ -7,22 +7,22 @@ package space.kscience.kmath.tensors
|
|||||||
|
|
||||||
import space.kscience.kmath.operations.invoke
|
import space.kscience.kmath.operations.invoke
|
||||||
import space.kscience.kmath.tensors.core.algebras.BroadcastDoubleTensorAlgebra
|
import space.kscience.kmath.tensors.core.algebras.BroadcastDoubleTensorAlgebra
|
||||||
import space.kscience.kmath.tensors.core.algebras.DoubleAnalyticTensorAlgebra
|
|
||||||
|
|
||||||
// Dataset normalization
|
// Dataset normalization
|
||||||
|
|
||||||
fun main() {
|
fun main() {
|
||||||
|
|
||||||
// work in context with analytic methods
|
// work in context with broadcast methods
|
||||||
DoubleAnalyticTensorAlgebra {
|
BroadcastDoubleTensorAlgebra {
|
||||||
// take dataset of 5-element vectors from normal distribution
|
// take dataset of 5-element vectors from normal distribution
|
||||||
val dataset = randomNormal(intArrayOf(100, 5)) * 1.5 // all elements from N(0, 1.5)
|
val dataset = randomNormal(intArrayOf(100, 5)) * 1.5 // all elements from N(0, 1.5)
|
||||||
BroadcastDoubleTensorAlgebra {
|
|
||||||
dataset += fromArray(
|
dataset += fromArray(
|
||||||
intArrayOf(5),
|
intArrayOf(5),
|
||||||
doubleArrayOf(0.0, 1.0, 1.5, 3.0, 5.0) // rows means
|
doubleArrayOf(0.0, 1.0, 1.5, 3.0, 5.0) // rows means
|
||||||
)
|
)
|
||||||
}
|
|
||||||
|
|
||||||
// find out mean and standard deviation of each column
|
// find out mean and standard deviation of each column
|
||||||
val mean = dataset.mean(0, false)
|
val mean = dataset.mean(0, false)
|
||||||
@ -36,7 +36,7 @@ fun main() {
|
|||||||
println("Maximum:\n${dataset.max(0, false)}")
|
println("Maximum:\n${dataset.max(0, false)}")
|
||||||
|
|
||||||
// now we can scale dataset with mean normalization
|
// now we can scale dataset with mean normalization
|
||||||
val datasetScaled = BroadcastDoubleTensorAlgebra { (dataset - mean) / std }
|
val datasetScaled = (dataset - mean) / std
|
||||||
|
|
||||||
// find out mean and std of scaled dataset
|
// find out mean and std of scaled dataset
|
||||||
|
|
||||||
|
@ -7,14 +7,14 @@ package space.kscience.kmath.tensors
|
|||||||
|
|
||||||
import space.kscience.kmath.operations.invoke
|
import space.kscience.kmath.operations.invoke
|
||||||
import space.kscience.kmath.tensors.core.DoubleTensor
|
import space.kscience.kmath.tensors.core.DoubleTensor
|
||||||
import space.kscience.kmath.tensors.core.algebras.DoubleLinearOpsTensorAlgebra
|
import space.kscience.kmath.tensors.core.algebras.BroadcastDoubleTensorAlgebra
|
||||||
|
|
||||||
// solving linear system with LUP decomposition
|
// solving linear system with LUP decomposition
|
||||||
|
|
||||||
fun main () {
|
fun main () {
|
||||||
|
|
||||||
// work in context with linear operations
|
// work in context with linear operations
|
||||||
DoubleLinearOpsTensorAlgebra {
|
BroadcastDoubleTensorAlgebra {
|
||||||
|
|
||||||
// set true value of x
|
// set true value of x
|
||||||
val trueX = fromArray(
|
val trueX = fromArray(
|
||||||
|
@ -8,7 +8,6 @@ package space.kscience.kmath.tensors
|
|||||||
import space.kscience.kmath.operations.invoke
|
import space.kscience.kmath.operations.invoke
|
||||||
import space.kscience.kmath.tensors.core.DoubleTensor
|
import space.kscience.kmath.tensors.core.DoubleTensor
|
||||||
import space.kscience.kmath.tensors.core.algebras.BroadcastDoubleTensorAlgebra
|
import space.kscience.kmath.tensors.core.algebras.BroadcastDoubleTensorAlgebra
|
||||||
import space.kscience.kmath.tensors.core.algebras.DoubleAnalyticTensorAlgebra
|
|
||||||
import space.kscience.kmath.tensors.core.algebras.DoubleTensorAlgebra
|
import space.kscience.kmath.tensors.core.algebras.DoubleTensorAlgebra
|
||||||
import space.kscience.kmath.tensors.core.toDoubleArray
|
import space.kscience.kmath.tensors.core.toDoubleArray
|
||||||
import kotlin.math.sqrt
|
import kotlin.math.sqrt
|
||||||
@ -48,7 +47,7 @@ fun reluDer(x: DoubleTensor): DoubleTensor = DoubleTensorAlgebra {
|
|||||||
// activation layer with relu activator
|
// activation layer with relu activator
|
||||||
class ReLU : Activation(::relu, ::reluDer)
|
class ReLU : Activation(::relu, ::reluDer)
|
||||||
|
|
||||||
fun sigmoid(x: DoubleTensor): DoubleTensor = DoubleAnalyticTensorAlgebra {
|
fun sigmoid(x: DoubleTensor): DoubleTensor = DoubleTensorAlgebra {
|
||||||
1.0 / (1.0 + (-x).exp())
|
1.0 / (1.0 + (-x).exp())
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -83,9 +82,7 @@ class Dense(
|
|||||||
val gradInput = outputError dot weights.transpose()
|
val gradInput = outputError dot weights.transpose()
|
||||||
|
|
||||||
val gradW = input.transpose() dot outputError
|
val gradW = input.transpose() dot outputError
|
||||||
val gradBias = DoubleAnalyticTensorAlgebra {
|
val gradBias = outputError.mean(dim = 0, keepDim = false) * input.shape[0].toDouble()
|
||||||
outputError.mean(dim = 0, keepDim = false) * input.shape[0].toDouble()
|
|
||||||
}
|
|
||||||
|
|
||||||
weights -= learningRate * gradW
|
weights -= learningRate * gradW
|
||||||
bias -= learningRate * gradBias
|
bias -= learningRate * gradBias
|
||||||
@ -110,7 +107,7 @@ fun accuracy(yPred: DoubleTensor, yTrue: DoubleTensor): Double {
|
|||||||
|
|
||||||
// neural network class
|
// neural network class
|
||||||
class NeuralNetwork(private val layers: List<Layer>) {
|
class NeuralNetwork(private val layers: List<Layer>) {
|
||||||
private fun softMaxLoss(yPred: DoubleTensor, yTrue: DoubleTensor): DoubleTensor = DoubleAnalyticTensorAlgebra {
|
private fun softMaxLoss(yPred: DoubleTensor, yTrue: DoubleTensor): DoubleTensor = BroadcastDoubleTensorAlgebra {
|
||||||
|
|
||||||
val onesForAnswers = yPred.zeroesLike()
|
val onesForAnswers = yPred.zeroesLike()
|
||||||
yTrue.toDoubleArray().forEachIndexed { index, labelDouble ->
|
yTrue.toDoubleArray().forEachIndexed { index, labelDouble ->
|
||||||
@ -118,7 +115,7 @@ class NeuralNetwork(private val layers: List<Layer>) {
|
|||||||
onesForAnswers[intArrayOf(index, label)] = 1.0
|
onesForAnswers[intArrayOf(index, label)] = 1.0
|
||||||
}
|
}
|
||||||
|
|
||||||
val softmaxValue = BroadcastDoubleTensorAlgebra { yPred.exp() / yPred.exp().sum(dim = 1, keepDim = true) }
|
val softmaxValue = yPred.exp() / yPred.exp().sum(dim = 1, keepDim = true)
|
||||||
|
|
||||||
(-onesForAnswers + softmaxValue) / (yPred.shape[0].toDouble())
|
(-onesForAnswers + softmaxValue) / (yPred.shape[0].toDouble())
|
||||||
}
|
}
|
||||||
@ -177,10 +174,9 @@ class NeuralNetwork(private val layers: List<Layer>) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
@OptIn(ExperimentalStdlibApi::class)
|
@OptIn(ExperimentalStdlibApi::class)
|
||||||
fun main() {
|
fun main() {
|
||||||
DoubleTensorAlgebra {
|
BroadcastDoubleTensorAlgebra {
|
||||||
val features = 5
|
val features = 5
|
||||||
val sampleSize = 250
|
val sampleSize = 250
|
||||||
val trainSize = 180
|
val trainSize = 180
|
||||||
@ -188,12 +184,12 @@ fun main() {
|
|||||||
|
|
||||||
// take sample of features from normal distribution
|
// take sample of features from normal distribution
|
||||||
val x = randomNormal(intArrayOf(sampleSize, features), seed) * 2.5
|
val x = randomNormal(intArrayOf(sampleSize, features), seed) * 2.5
|
||||||
BroadcastDoubleTensorAlgebra {
|
|
||||||
x += fromArray(
|
x += fromArray(
|
||||||
intArrayOf(5),
|
intArrayOf(5),
|
||||||
doubleArrayOf(0.0, -1.0, -2.5, -3.0, 5.5) // rows means
|
doubleArrayOf(0.0, -1.0, -2.5, -3.0, 5.5) // rows means
|
||||||
)
|
)
|
||||||
}
|
|
||||||
|
|
||||||
// define class like '1' if the sum of features > 0 and '0' otherwise
|
// define class like '1' if the sum of features > 0 and '0' otherwise
|
||||||
val y = fromArray(
|
val y = fromArray(
|
||||||
|
@ -7,8 +7,7 @@ package space.kscience.kmath.tensors
|
|||||||
|
|
||||||
import space.kscience.kmath.operations.invoke
|
import space.kscience.kmath.operations.invoke
|
||||||
import space.kscience.kmath.tensors.core.DoubleTensor
|
import space.kscience.kmath.tensors.core.DoubleTensor
|
||||||
import space.kscience.kmath.tensors.core.algebras.DoubleAnalyticTensorAlgebra
|
import space.kscience.kmath.tensors.core.algebras.DoubleTensorAlgebra
|
||||||
import space.kscience.kmath.tensors.core.algebras.DoubleLinearOpsTensorAlgebra
|
|
||||||
|
|
||||||
import kotlin.math.abs
|
import kotlin.math.abs
|
||||||
|
|
||||||
@ -19,7 +18,7 @@ fun main() {
|
|||||||
val randSeed = 100500L
|
val randSeed = 100500L
|
||||||
|
|
||||||
// work in context with linear operations
|
// work in context with linear operations
|
||||||
DoubleLinearOpsTensorAlgebra {
|
DoubleTensorAlgebra {
|
||||||
// take coefficient vector from normal distribution
|
// take coefficient vector from normal distribution
|
||||||
val alpha = randomNormal(
|
val alpha = randomNormal(
|
||||||
intArrayOf(5),
|
intArrayOf(5),
|
||||||
@ -56,12 +55,12 @@ fun main() {
|
|||||||
"$alphaOLS")
|
"$alphaOLS")
|
||||||
|
|
||||||
// figure out MSE of approximation
|
// figure out MSE of approximation
|
||||||
fun mse(yTrue: DoubleTensor, yPred: DoubleTensor): Double = DoubleAnalyticTensorAlgebra{
|
fun mse(yTrue: DoubleTensor, yPred: DoubleTensor): Double {
|
||||||
require(yTrue.shape.size == 1)
|
require(yTrue.shape.size == 1)
|
||||||
require(yTrue.shape contentEquals yPred.shape)
|
require(yTrue.shape contentEquals yPred.shape)
|
||||||
|
|
||||||
val diff = yTrue - yPred
|
val diff = yTrue - yPred
|
||||||
diff.dot(diff).sqrt().value()
|
return diff.dot(diff).sqrt().value()
|
||||||
}
|
}
|
||||||
|
|
||||||
println("MSE: ${mse(alpha, alphaOLS)}")
|
println("MSE: ${mse(alpha, alphaOLS)}")
|
||||||
|
@ -7,9 +7,6 @@ package space.kscience.kmath.tensors
|
|||||||
|
|
||||||
import space.kscience.kmath.operations.invoke
|
import space.kscience.kmath.operations.invoke
|
||||||
import space.kscience.kmath.tensors.core.algebras.BroadcastDoubleTensorAlgebra
|
import space.kscience.kmath.tensors.core.algebras.BroadcastDoubleTensorAlgebra
|
||||||
import space.kscience.kmath.tensors.core.algebras.DoubleAnalyticTensorAlgebra
|
|
||||||
import space.kscience.kmath.tensors.core.algebras.DoubleLinearOpsTensorAlgebra
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
// simple PCA
|
// simple PCA
|
||||||
@ -17,8 +14,8 @@ import space.kscience.kmath.tensors.core.algebras.DoubleLinearOpsTensorAlgebra
|
|||||||
fun main(){
|
fun main(){
|
||||||
val seed = 100500L
|
val seed = 100500L
|
||||||
|
|
||||||
// work in context with analytic methods
|
// work in context with broadcast methods
|
||||||
DoubleAnalyticTensorAlgebra {
|
BroadcastDoubleTensorAlgebra {
|
||||||
|
|
||||||
// assume x is range from 0 until 10
|
// assume x is range from 0 until 10
|
||||||
val x = fromArray(
|
val x = fromArray(
|
||||||
@ -63,7 +60,7 @@ fun main(){
|
|||||||
println("Covariance matrix:\n$covMatrix")
|
println("Covariance matrix:\n$covMatrix")
|
||||||
|
|
||||||
// and find out eigenvector of it
|
// and find out eigenvector of it
|
||||||
val (_, evecs) = DoubleLinearOpsTensorAlgebra {covMatrix.symEig()}
|
val (_, evecs) = covMatrix.symEig()
|
||||||
val v = evecs[0]
|
val v = evecs[0]
|
||||||
println("Eigenvector:\n$v")
|
println("Eigenvector:\n$v")
|
||||||
|
|
||||||
@ -74,7 +71,7 @@ fun main(){
|
|||||||
// we can restore original data from reduced data.
|
// we can restore original data from reduced data.
|
||||||
// for example, find 7th element of dataset
|
// for example, find 7th element of dataset
|
||||||
val n = 7
|
val n = 7
|
||||||
val restored = BroadcastDoubleTensorAlgebra{(datasetReduced[n] dot v.view(intArrayOf(1, 2))) * std + mean}
|
val restored = (datasetReduced[n] dot v.view(intArrayOf(1, 2))) * std + mean
|
||||||
println("Original value:\n${dataset[n]}")
|
println("Original value:\n${dataset[n]}")
|
||||||
println("Restored value:\n$restored")
|
println("Restored value:\n$restored")
|
||||||
}
|
}
|
||||||
|
@ -11,8 +11,7 @@ package space.kscience.kmath.tensors.api
|
|||||||
*
|
*
|
||||||
* @param T the type of items closed under analytic functions in the tensors.
|
* @param T the type of items closed under analytic functions in the tensors.
|
||||||
*/
|
*/
|
||||||
public interface AnalyticTensorAlgebra<T> :
|
public interface AnalyticTensorAlgebra<T> : TensorPartialDivisionAlgebra<T> {
|
||||||
TensorPartialDivisionAlgebra<T> {
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @return the mean of all elements in the input tensor.
|
* @return the mean of all elements in the input tensor.
|
||||||
|
@ -10,8 +10,7 @@ package space.kscience.kmath.tensors.api
|
|||||||
*
|
*
|
||||||
* @param T the type of items closed under division in the tensors.
|
* @param T the type of items closed under division in the tensors.
|
||||||
*/
|
*/
|
||||||
public interface LinearOpsTensorAlgebra<T> :
|
public interface LinearOpsTensorAlgebra<T> : TensorPartialDivisionAlgebra<T> {
|
||||||
TensorPartialDivisionAlgebra<T> {
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Computes the determinant of a square matrix input, or of each square matrix in a batched input.
|
* Computes the determinant of a square matrix input, or of each square matrix in a batched input.
|
||||||
|
@ -11,8 +11,7 @@ package space.kscience.kmath.tensors.api
|
|||||||
*
|
*
|
||||||
* @param T the type of items closed under division in the tensors.
|
* @param T the type of items closed under division in the tensors.
|
||||||
*/
|
*/
|
||||||
public interface TensorPartialDivisionAlgebra<T> :
|
public interface TensorPartialDivisionAlgebra<T> : TensorAlgebra<T> {
|
||||||
TensorAlgebra<T> {
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Each element of the tensor [other] is divided by this value.
|
* Each element of the tensor [other] is divided by this value.
|
||||||
|
@ -6,6 +6,7 @@
|
|||||||
package space.kscience.kmath.tensors.core
|
package space.kscience.kmath.tensors.core
|
||||||
|
|
||||||
import space.kscience.kmath.structures.DoubleBuffer
|
import space.kscience.kmath.structures.DoubleBuffer
|
||||||
|
import space.kscience.kmath.tensors.core.internal.toPrettyString
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Default [BufferedTensor] implementation for [Double] values
|
* Default [BufferedTensor] implementation for [Double] values
|
||||||
|
@ -7,8 +7,10 @@ package space.kscience.kmath.tensors.core.algebras
|
|||||||
|
|
||||||
import space.kscience.kmath.tensors.api.Tensor
|
import space.kscience.kmath.tensors.api.Tensor
|
||||||
import space.kscience.kmath.tensors.core.*
|
import space.kscience.kmath.tensors.core.*
|
||||||
import space.kscience.kmath.tensors.core.broadcastTensors
|
import space.kscience.kmath.tensors.core.internal.array
|
||||||
import space.kscience.kmath.tensors.core.broadcastTo
|
import space.kscience.kmath.tensors.core.internal.broadcastTensors
|
||||||
|
import space.kscience.kmath.tensors.core.internal.broadcastTo
|
||||||
|
import space.kscience.kmath.tensors.core.internal.tensor
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Basic linear algebra operations implemented with broadcasting.
|
* Basic linear algebra operations implemented with broadcasting.
|
||||||
|
@ -1,116 +0,0 @@
|
|||||||
/*
|
|
||||||
* Copyright 2018-2021 KMath contributors.
|
|
||||||
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package space.kscience.kmath.tensors.core.algebras
|
|
||||||
|
|
||||||
import space.kscience.kmath.tensors.api.AnalyticTensorAlgebra
|
|
||||||
import space.kscience.kmath.tensors.api.Tensor
|
|
||||||
import space.kscience.kmath.tensors.core.DoubleTensor
|
|
||||||
import space.kscience.kmath.tensors.core.tensor
|
|
||||||
import kotlin.math.*
|
|
||||||
|
|
||||||
public object DoubleAnalyticTensorAlgebra :
|
|
||||||
AnalyticTensorAlgebra<Double>,
|
|
||||||
DoubleTensorAlgebra() {
|
|
||||||
|
|
||||||
override fun Tensor<Double>.mean(): Double = this.fold { it.sum() / tensor.numElements }
|
|
||||||
|
|
||||||
override fun Tensor<Double>.mean(dim: Int, keepDim: Boolean): DoubleTensor =
|
|
||||||
foldDim(
|
|
||||||
{ arr ->
|
|
||||||
check(dim < dimension) { "Dimension $dim out of range $dimension" }
|
|
||||||
arr.sum() / shape[dim]
|
|
||||||
},
|
|
||||||
dim,
|
|
||||||
keepDim
|
|
||||||
)
|
|
||||||
|
|
||||||
override fun Tensor<Double>.std(): Double = this.fold { arr ->
|
|
||||||
val mean = arr.sum() / tensor.numElements
|
|
||||||
sqrt(arr.sumOf { (it - mean) * (it - mean) } / (tensor.numElements - 1))
|
|
||||||
}
|
|
||||||
|
|
||||||
override fun Tensor<Double>.std(dim: Int, keepDim: Boolean): DoubleTensor = foldDim(
|
|
||||||
{ arr ->
|
|
||||||
check(dim < dimension) { "Dimension $dim out of range $dimension" }
|
|
||||||
val mean = arr.sum() / shape[dim]
|
|
||||||
sqrt(arr.sumOf { (it - mean) * (it - mean) } / (shape[dim] - 1))
|
|
||||||
},
|
|
||||||
dim,
|
|
||||||
keepDim
|
|
||||||
)
|
|
||||||
|
|
||||||
override fun Tensor<Double>.variance(): Double = this.fold { arr ->
|
|
||||||
val mean = arr.sum() / tensor.numElements
|
|
||||||
arr.sumOf { (it - mean) * (it - mean) } / (tensor.numElements - 1)
|
|
||||||
}
|
|
||||||
|
|
||||||
override fun Tensor<Double>.variance(dim: Int, keepDim: Boolean): DoubleTensor = foldDim(
|
|
||||||
{ arr ->
|
|
||||||
check(dim < dimension) { "Dimension $dim out of range $dimension" }
|
|
||||||
val mean = arr.sum() / shape[dim]
|
|
||||||
arr.sumOf { (it - mean) * (it - mean) } / (shape[dim] - 1)
|
|
||||||
},
|
|
||||||
dim,
|
|
||||||
keepDim
|
|
||||||
)
|
|
||||||
|
|
||||||
private fun cov(x: DoubleTensor, y:DoubleTensor): Double{
|
|
||||||
val n = x.shape[0]
|
|
||||||
return ((x - x.mean()) * (y - y.mean())).mean() * n / (n - 1)
|
|
||||||
}
|
|
||||||
|
|
||||||
override fun cov(tensors: List<Tensor<Double>>): DoubleTensor {
|
|
||||||
check(tensors.isNotEmpty()) { "List must have at least 1 element" }
|
|
||||||
val n = tensors.size
|
|
||||||
val m = tensors[0].shape[0]
|
|
||||||
check(tensors.all { it.shape contentEquals intArrayOf(m) }) { "Tensors must have same shapes" }
|
|
||||||
val resTensor = DoubleTensor(
|
|
||||||
intArrayOf(n, n),
|
|
||||||
DoubleArray(n * n) {0.0}
|
|
||||||
)
|
|
||||||
for (i in 0 until n){
|
|
||||||
for (j in 0 until n){
|
|
||||||
resTensor[intArrayOf(i, j)] = cov(tensors[i].tensor, tensors[j].tensor)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return resTensor
|
|
||||||
}
|
|
||||||
|
|
||||||
override fun Tensor<Double>.exp(): DoubleTensor = tensor.map(::exp)
|
|
||||||
|
|
||||||
override fun Tensor<Double>.ln(): DoubleTensor = tensor.map(::ln)
|
|
||||||
|
|
||||||
override fun Tensor<Double>.sqrt(): DoubleTensor = tensor.map(::sqrt)
|
|
||||||
|
|
||||||
override fun Tensor<Double>.cos(): DoubleTensor = tensor.map(::cos)
|
|
||||||
|
|
||||||
override fun Tensor<Double>.acos(): DoubleTensor = tensor.map(::acos)
|
|
||||||
|
|
||||||
override fun Tensor<Double>.cosh(): DoubleTensor = tensor.map(::cosh)
|
|
||||||
|
|
||||||
override fun Tensor<Double>.acosh(): DoubleTensor = tensor.map(::acosh)
|
|
||||||
|
|
||||||
override fun Tensor<Double>.sin(): DoubleTensor = tensor.map(::sin)
|
|
||||||
|
|
||||||
override fun Tensor<Double>.asin(): DoubleTensor = tensor.map(::asin)
|
|
||||||
|
|
||||||
override fun Tensor<Double>.sinh(): DoubleTensor = tensor.map(::sinh)
|
|
||||||
|
|
||||||
override fun Tensor<Double>.asinh(): DoubleTensor = tensor.map(::asinh)
|
|
||||||
|
|
||||||
override fun Tensor<Double>.tan(): DoubleTensor = tensor.map(::tan)
|
|
||||||
|
|
||||||
override fun Tensor<Double>.atan(): DoubleTensor = tensor.map(::atan)
|
|
||||||
|
|
||||||
override fun Tensor<Double>.tanh(): DoubleTensor = tensor.map(::tanh)
|
|
||||||
|
|
||||||
override fun Tensor<Double>.atanh(): DoubleTensor = tensor.map(::atanh)
|
|
||||||
|
|
||||||
override fun Tensor<Double>.ceil(): DoubleTensor = tensor.map(::ceil)
|
|
||||||
|
|
||||||
override fun Tensor<Double>.floor(): DoubleTensor = tensor.map(::floor)
|
|
||||||
|
|
||||||
}
|
|
@ -1,278 +0,0 @@
|
|||||||
/*
|
|
||||||
* Copyright 2018-2021 KMath contributors.
|
|
||||||
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package space.kscience.kmath.tensors.core.algebras
|
|
||||||
|
|
||||||
import space.kscience.kmath.tensors.api.LinearOpsTensorAlgebra
|
|
||||||
import space.kscience.kmath.nd.as1D
|
|
||||||
import space.kscience.kmath.nd.as2D
|
|
||||||
import space.kscience.kmath.tensors.api.Tensor
|
|
||||||
import space.kscience.kmath.tensors.core.*
|
|
||||||
import space.kscience.kmath.tensors.core.checkSquareMatrix
|
|
||||||
import space.kscience.kmath.tensors.core.choleskyHelper
|
|
||||||
import space.kscience.kmath.tensors.core.cleanSymHelper
|
|
||||||
import space.kscience.kmath.tensors.core.luHelper
|
|
||||||
import space.kscience.kmath.tensors.core.luMatrixDet
|
|
||||||
import space.kscience.kmath.tensors.core.luMatrixInv
|
|
||||||
import space.kscience.kmath.tensors.core.luPivotHelper
|
|
||||||
import space.kscience.kmath.tensors.core.pivInit
|
|
||||||
import kotlin.math.min
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Implementation of common linear algebra operations on double numbers.
|
|
||||||
* Implements the LinearOpsTensorAlgebra<Double> interface.
|
|
||||||
*/
|
|
||||||
public object DoubleLinearOpsTensorAlgebra :
|
|
||||||
LinearOpsTensorAlgebra<Double>,
|
|
||||||
DoubleTensorAlgebra() {
|
|
||||||
|
|
||||||
override fun Tensor<Double>.inv(): DoubleTensor = invLU(1e-9)
|
|
||||||
|
|
||||||
override fun Tensor<Double>.det(): DoubleTensor = detLU(1e-9)
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Computes the LU factorization of a matrix or batches of matrices `input`.
|
|
||||||
* Returns a tuple containing the LU factorization and pivots of `input`.
|
|
||||||
*
|
|
||||||
* @param epsilon permissible error when comparing the determinant of a matrix with zero
|
|
||||||
* @return pair of `factorization` and `pivots`.
|
|
||||||
* The `factorization` has the shape ``(*, m, n)``, where``(*, m, n)`` is the shape of the `input` tensor.
|
|
||||||
* The `pivots` has the shape ``(∗, min(m, n))``. `pivots` stores all the intermediate transpositions of rows.
|
|
||||||
*/
|
|
||||||
public fun Tensor<Double>.luFactor(epsilon: Double): Pair<DoubleTensor, IntTensor> =
|
|
||||||
computeLU(tensor, epsilon)
|
|
||||||
?: throw IllegalArgumentException("Tensor contains matrices which are singular at precision $epsilon")
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Computes the LU factorization of a matrix or batches of matrices `input`.
|
|
||||||
* Returns a tuple containing the LU factorization and pivots of `input`.
|
|
||||||
* Uses an error of ``1e-9`` when calculating whether a matrix is degenerate.
|
|
||||||
*
|
|
||||||
* @return pair of `factorization` and `pivots`.
|
|
||||||
* The `factorization` has the shape ``(*, m, n)``, where``(*, m, n)`` is the shape of the `input` tensor.
|
|
||||||
* The `pivots` has the shape ``(∗, min(m, n))``. `pivots` stores all the intermediate transpositions of rows.
|
|
||||||
*/
|
|
||||||
public fun Tensor<Double>.luFactor(): Pair<DoubleTensor, IntTensor> = luFactor(1e-9)
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Unpacks the data and pivots from a LU factorization of a tensor.
|
|
||||||
* Given a tensor [luTensor], return tensors (P, L, U) satisfying ``P * luTensor = L * U``,
|
|
||||||
* with `P` being a permutation matrix or batch of matrices,
|
|
||||||
* `L` being a lower triangular matrix or batch of matrices,
|
|
||||||
* `U` being an upper triangular matrix or batch of matrices.
|
|
||||||
*
|
|
||||||
* @param luTensor the packed LU factorization data
|
|
||||||
* @param pivotsTensor the packed LU factorization pivots
|
|
||||||
* @return triple of P, L and U tensors
|
|
||||||
*/
|
|
||||||
public fun luPivot(
|
|
||||||
luTensor: Tensor<Double>,
|
|
||||||
pivotsTensor: Tensor<Int>
|
|
||||||
): Triple<DoubleTensor, DoubleTensor, DoubleTensor> {
|
|
||||||
checkSquareMatrix(luTensor.shape)
|
|
||||||
check(
|
|
||||||
luTensor.shape.dropLast(2).toIntArray() contentEquals pivotsTensor.shape.dropLast(1).toIntArray() ||
|
|
||||||
luTensor.shape.last() == pivotsTensor.shape.last() - 1
|
|
||||||
) { "Inappropriate shapes of input tensors" }
|
|
||||||
|
|
||||||
val n = luTensor.shape.last()
|
|
||||||
val pTensor = luTensor.zeroesLike()
|
|
||||||
pTensor
|
|
||||||
.matrixSequence()
|
|
||||||
.zip(pivotsTensor.tensor.vectorSequence())
|
|
||||||
.forEach { (p, pivot) -> pivInit(p.as2D(), pivot.as1D(), n) }
|
|
||||||
|
|
||||||
val lTensor = luTensor.zeroesLike()
|
|
||||||
val uTensor = luTensor.zeroesLike()
|
|
||||||
|
|
||||||
lTensor.matrixSequence()
|
|
||||||
.zip(uTensor.matrixSequence())
|
|
||||||
.zip(luTensor.tensor.matrixSequence())
|
|
||||||
.forEach { (pairLU, lu) ->
|
|
||||||
val (l, u) = pairLU
|
|
||||||
luPivotHelper(l.as2D(), u.as2D(), lu.as2D(), n)
|
|
||||||
}
|
|
||||||
|
|
||||||
return Triple(pTensor, lTensor, uTensor)
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* QR decomposition.
|
|
||||||
*
|
|
||||||
* Computes the QR decomposition of a matrix or a batch of matrices, and returns a pair `(Q, R)` of tensors.
|
|
||||||
* Given a tensor `input`, return tensors (Q, R) satisfying ``input = Q * R``,
|
|
||||||
* with `Q` being an orthogonal matrix or batch of orthogonal matrices
|
|
||||||
* and `R` being an upper triangular matrix or batch of upper triangular matrices.
|
|
||||||
*
|
|
||||||
* @param epsilon permissible error when comparing tensors for equality.
|
|
||||||
* Used when checking the positive definiteness of the input matrix or matrices.
|
|
||||||
* @return pair of Q and R tensors.
|
|
||||||
*/
|
|
||||||
public fun Tensor<Double>.cholesky(epsilon: Double): DoubleTensor {
|
|
||||||
checkSquareMatrix(shape)
|
|
||||||
checkPositiveDefinite(tensor, epsilon)
|
|
||||||
|
|
||||||
val n = shape.last()
|
|
||||||
val lTensor = zeroesLike()
|
|
||||||
|
|
||||||
for ((a, l) in tensor.matrixSequence().zip(lTensor.matrixSequence()))
|
|
||||||
for (i in 0 until n) choleskyHelper(a.as2D(), l.as2D(), n)
|
|
||||||
|
|
||||||
return lTensor
|
|
||||||
}
|
|
||||||
|
|
||||||
override fun Tensor<Double>.cholesky(): DoubleTensor = cholesky(1e-6)
|
|
||||||
|
|
||||||
override fun Tensor<Double>.qr(): Pair<DoubleTensor, DoubleTensor> {
|
|
||||||
checkSquareMatrix(shape)
|
|
||||||
val qTensor = zeroesLike()
|
|
||||||
val rTensor = zeroesLike()
|
|
||||||
tensor.matrixSequence()
|
|
||||||
.zip((qTensor.matrixSequence()
|
|
||||||
.zip(rTensor.matrixSequence()))).forEach { (matrix, qr) ->
|
|
||||||
val (q, r) = qr
|
|
||||||
qrHelper(matrix.asTensor(), q.asTensor(), r.as2D())
|
|
||||||
}
|
|
||||||
|
|
||||||
return qTensor to rTensor
|
|
||||||
}
|
|
||||||
|
|
||||||
override fun Tensor<Double>.svd(): Triple<DoubleTensor, DoubleTensor, DoubleTensor> =
|
|
||||||
svd(epsilon = 1e-10)
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Singular Value Decomposition.
|
|
||||||
*
|
|
||||||
* Computes the singular value decomposition of either a matrix or batch of matrices `input`.
|
|
||||||
* The singular value decomposition is represented as a triple `(U, S, V)`,
|
|
||||||
* such that ``input = U.dot(diagonalEmbedding(S).dot(V.T))``.
|
|
||||||
* If input is a batch of tensors, then U, S, and Vh are also batched with the same batch dimensions as input.
|
|
||||||
*
|
|
||||||
* @param epsilon permissible error when calculating the dot product of vectors,
|
|
||||||
* i.e. the precision with which the cosine approaches 1 in an iterative algorithm.
|
|
||||||
* @return triple `(U, S, V)`.
|
|
||||||
*/
|
|
||||||
public fun Tensor<Double>.svd(epsilon: Double): Triple<DoubleTensor, DoubleTensor, DoubleTensor> {
|
|
||||||
val size = tensor.dimension
|
|
||||||
val commonShape = tensor.shape.sliceArray(0 until size - 2)
|
|
||||||
val (n, m) = tensor.shape.sliceArray(size - 2 until size)
|
|
||||||
val uTensor = zeros(commonShape + intArrayOf(min(n, m), n))
|
|
||||||
val sTensor = zeros(commonShape + intArrayOf(min(n, m)))
|
|
||||||
val vTensor = zeros(commonShape + intArrayOf(min(n, m), m))
|
|
||||||
|
|
||||||
tensor.matrixSequence()
|
|
||||||
.zip(uTensor.matrixSequence()
|
|
||||||
.zip(sTensor.vectorSequence()
|
|
||||||
.zip(vTensor.matrixSequence()))).forEach { (matrix, USV) ->
|
|
||||||
val matrixSize = matrix.shape.reduce { acc, i -> acc * i }
|
|
||||||
val curMatrix = DoubleTensor(
|
|
||||||
matrix.shape,
|
|
||||||
matrix.mutableBuffer.array().slice(matrix.bufferStart until matrix.bufferStart + matrixSize)
|
|
||||||
.toDoubleArray()
|
|
||||||
)
|
|
||||||
svdHelper(curMatrix, USV, m, n, epsilon)
|
|
||||||
}
|
|
||||||
|
|
||||||
return Triple(uTensor.transpose(), sTensor, vTensor.transpose())
|
|
||||||
}
|
|
||||||
|
|
||||||
override fun Tensor<Double>.symEig(): Pair<DoubleTensor, DoubleTensor> =
|
|
||||||
symEig(epsilon = 1e-15)
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Returns eigenvalues and eigenvectors of a real symmetric matrix input or a batch of real symmetric matrices,
|
|
||||||
* represented by a pair (eigenvalues, eigenvectors).
|
|
||||||
*
|
|
||||||
* @param epsilon permissible error when comparing tensors for equality
|
|
||||||
* and when the cosine approaches 1 in the SVD algorithm.
|
|
||||||
* @return a pair (eigenvalues, eigenvectors)
|
|
||||||
*/
|
|
||||||
public fun Tensor<Double>.symEig(epsilon: Double): Pair<DoubleTensor, DoubleTensor> {
|
|
||||||
checkSymmetric(tensor, epsilon)
|
|
||||||
val (u, s, v) = tensor.svd(epsilon)
|
|
||||||
val shp = s.shape + intArrayOf(1)
|
|
||||||
val utv = u.transpose() dot v
|
|
||||||
val n = s.shape.last()
|
|
||||||
for (matrix in utv.matrixSequence())
|
|
||||||
cleanSymHelper(matrix.as2D(), n)
|
|
||||||
|
|
||||||
val eig = (utv dot s.view(shp)).view(s.shape)
|
|
||||||
return eig to v
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Computes the determinant of a square matrix input, or of each square matrix in a batched input
|
|
||||||
* using LU factorization algorithm.
|
|
||||||
*
|
|
||||||
* @param epsilon error in the LU algorithm - permissible error when comparing the determinant of a matrix with zero
|
|
||||||
* @return the determinant.
|
|
||||||
*/
|
|
||||||
public fun Tensor<Double>.detLU(epsilon: Double = 1e-9): DoubleTensor {
|
|
||||||
|
|
||||||
checkSquareMatrix(tensor.shape)
|
|
||||||
val luTensor = tensor.copy()
|
|
||||||
val pivotsTensor = tensor.setUpPivots()
|
|
||||||
|
|
||||||
val n = shape.size
|
|
||||||
|
|
||||||
val detTensorShape = IntArray(n - 1) { i -> shape[i] }
|
|
||||||
detTensorShape[n - 2] = 1
|
|
||||||
val resBuffer = DoubleArray(detTensorShape.reduce(Int::times)) { 0.0 }
|
|
||||||
|
|
||||||
val detTensor = DoubleTensor(
|
|
||||||
detTensorShape,
|
|
||||||
resBuffer
|
|
||||||
)
|
|
||||||
|
|
||||||
luTensor.matrixSequence().zip(pivotsTensor.vectorSequence()).forEachIndexed { index, (lu, pivots) ->
|
|
||||||
resBuffer[index] = if (luHelper(lu.as2D(), pivots.as1D(), epsilon))
|
|
||||||
0.0 else luMatrixDet(lu.as2D(), pivots.as1D())
|
|
||||||
}
|
|
||||||
|
|
||||||
return detTensor
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Computes the multiplicative inverse matrix of a square matrix input, or of each square matrix in a batched input
|
|
||||||
* using LU factorization algorithm.
|
|
||||||
* Given a square matrix `a`, return the matrix `aInv` satisfying
|
|
||||||
* ``a.dot(aInv) = aInv.dot(a) = eye(a.shape[0])``.
|
|
||||||
*
|
|
||||||
* @param epsilon error in the LU algorithm - permissible error when comparing the determinant of a matrix with zero
|
|
||||||
* @return the multiplicative inverse of a matrix.
|
|
||||||
*/
|
|
||||||
public fun Tensor<Double>.invLU(epsilon: Double = 1e-9): DoubleTensor {
|
|
||||||
val (luTensor, pivotsTensor) = luFactor(epsilon)
|
|
||||||
val invTensor = luTensor.zeroesLike()
|
|
||||||
|
|
||||||
val seq = luTensor.matrixSequence().zip(pivotsTensor.vectorSequence()).zip(invTensor.matrixSequence())
|
|
||||||
for ((luP, invMatrix) in seq) {
|
|
||||||
val (lu, pivots) = luP
|
|
||||||
luMatrixInv(lu.as2D(), pivots.as1D(), invMatrix.as2D())
|
|
||||||
}
|
|
||||||
|
|
||||||
return invTensor
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* LUP decomposition
|
|
||||||
*
|
|
||||||
* Computes the LUP decomposition of a matrix or a batch of matrices.
|
|
||||||
* Given a tensor `input`, return tensors (P, L, U) satisfying ``P * input = L * U``,
|
|
||||||
* with `P` being a permutation matrix or batch of matrices,
|
|
||||||
* `L` being a lower triangular matrix or batch of matrices,
|
|
||||||
* `U` being an upper triangular matrix or batch of matrices.
|
|
||||||
*
|
|
||||||
* @param epsilon permissible error when comparing the determinant of a matrix with zero
|
|
||||||
* @return triple of P, L and U tensors
|
|
||||||
*/
|
|
||||||
public fun Tensor<Double>.lu(epsilon: Double = 1e-9): Triple<DoubleTensor, DoubleTensor, DoubleTensor> {
|
|
||||||
val (lu, pivots) = this.luFactor(epsilon)
|
|
||||||
return luPivot(lu, pivots)
|
|
||||||
}
|
|
||||||
|
|
||||||
override fun Tensor<Double>.lu(): Triple<DoubleTensor, DoubleTensor, DoubleTensor> = lu(1e-9)
|
|
||||||
|
|
||||||
}
|
|
@ -5,28 +5,34 @@
|
|||||||
|
|
||||||
package space.kscience.kmath.tensors.core.algebras
|
package space.kscience.kmath.tensors.core.algebras
|
||||||
|
|
||||||
|
import space.kscience.kmath.nd.as1D
|
||||||
import space.kscience.kmath.nd.as2D
|
import space.kscience.kmath.nd.as2D
|
||||||
|
import space.kscience.kmath.tensors.api.AnalyticTensorAlgebra
|
||||||
|
import space.kscience.kmath.tensors.api.LinearOpsTensorAlgebra
|
||||||
import space.kscience.kmath.tensors.api.TensorPartialDivisionAlgebra
|
import space.kscience.kmath.tensors.api.TensorPartialDivisionAlgebra
|
||||||
import space.kscience.kmath.tensors.api.Tensor
|
import space.kscience.kmath.tensors.api.Tensor
|
||||||
import space.kscience.kmath.tensors.core.*
|
import space.kscience.kmath.tensors.core.*
|
||||||
import space.kscience.kmath.tensors.core.algebras.DoubleAnalyticTensorAlgebra.fold
|
import space.kscience.kmath.tensors.core.internal.dotHelper
|
||||||
import space.kscience.kmath.tensors.core.algebras.DoubleAnalyticTensorAlgebra.foldDim
|
import space.kscience.kmath.tensors.core.internal.getRandomNormals
|
||||||
import space.kscience.kmath.tensors.core.broadcastOuterTensors
|
import space.kscience.kmath.tensors.core.internal.*
|
||||||
import space.kscience.kmath.tensors.core.checkBufferShapeConsistency
|
import space.kscience.kmath.tensors.core.internal.broadcastOuterTensors
|
||||||
import space.kscience.kmath.tensors.core.checkEmptyDoubleBuffer
|
import space.kscience.kmath.tensors.core.internal.checkBufferShapeConsistency
|
||||||
import space.kscience.kmath.tensors.core.checkEmptyShape
|
import space.kscience.kmath.tensors.core.internal.checkEmptyDoubleBuffer
|
||||||
import space.kscience.kmath.tensors.core.checkShapesCompatible
|
import space.kscience.kmath.tensors.core.internal.checkEmptyShape
|
||||||
import space.kscience.kmath.tensors.core.checkTranspose
|
import space.kscience.kmath.tensors.core.internal.checkShapesCompatible
|
||||||
import space.kscience.kmath.tensors.core.checkView
|
import space.kscience.kmath.tensors.core.internal.checkSquareMatrix
|
||||||
import space.kscience.kmath.tensors.core.dotHelper
|
import space.kscience.kmath.tensors.core.internal.checkTranspose
|
||||||
import space.kscience.kmath.tensors.core.getRandomNormals
|
import space.kscience.kmath.tensors.core.internal.checkView
|
||||||
import space.kscience.kmath.tensors.core.minusIndexFrom
|
import space.kscience.kmath.tensors.core.internal.minusIndexFrom
|
||||||
import kotlin.math.abs
|
import kotlin.math.*
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Implementation of basic operations over double tensors and basic algebra operations on them.
|
* Implementation of basic operations over double tensors and basic algebra operations on them.
|
||||||
*/
|
*/
|
||||||
public open class DoubleTensorAlgebra : TensorPartialDivisionAlgebra<Double> {
|
public open class DoubleTensorAlgebra :
|
||||||
|
TensorPartialDivisionAlgebra<Double>,
|
||||||
|
AnalyticTensorAlgebra<Double>,
|
||||||
|
LinearOpsTensorAlgebra<Double> {
|
||||||
|
|
||||||
public companion object : DoubleTensorAlgebra()
|
public companion object : DoubleTensorAlgebra()
|
||||||
|
|
||||||
@ -311,9 +317,8 @@ public open class DoubleTensorAlgebra : TensorPartialDivisionAlgebra<Double> {
|
|||||||
return DoubleTensor(shape, tensor.mutableBuffer.array(), tensor.bufferStart)
|
return DoubleTensor(shape, tensor.mutableBuffer.array(), tensor.bufferStart)
|
||||||
}
|
}
|
||||||
|
|
||||||
override fun Tensor<Double>.viewAs(other: Tensor<Double>): DoubleTensor {
|
override fun Tensor<Double>.viewAs(other: Tensor<Double>): DoubleTensor =
|
||||||
return tensor.view(other.shape)
|
tensor.view(other.shape)
|
||||||
}
|
|
||||||
|
|
||||||
override infix fun Tensor<Double>.dot(other: Tensor<Double>): DoubleTensor {
|
override infix fun Tensor<Double>.dot(other: Tensor<Double>): DoubleTensor {
|
||||||
if (tensor.shape.size == 1 && other.shape.size == 1) {
|
if (tensor.shape.size == 1 && other.shape.size == 1) {
|
||||||
@ -565,4 +570,350 @@ public open class DoubleTensorAlgebra : TensorPartialDivisionAlgebra<Double> {
|
|||||||
x.withIndex().maxByOrNull { it.value }?.index!!.toDouble()
|
x.withIndex().maxByOrNull { it.value }?.index!!.toDouble()
|
||||||
}, dim, keepDim)
|
}, dim, keepDim)
|
||||||
|
|
||||||
|
|
||||||
|
override fun Tensor<Double>.mean(): Double = this.fold { it.sum() / tensor.numElements }
|
||||||
|
|
||||||
|
override fun Tensor<Double>.mean(dim: Int, keepDim: Boolean): DoubleTensor =
|
||||||
|
foldDim(
|
||||||
|
{ arr ->
|
||||||
|
check(dim < dimension) { "Dimension $dim out of range $dimension" }
|
||||||
|
arr.sum() / shape[dim]
|
||||||
|
},
|
||||||
|
dim,
|
||||||
|
keepDim
|
||||||
|
)
|
||||||
|
|
||||||
|
override fun Tensor<Double>.std(): Double = this.fold { arr ->
|
||||||
|
val mean = arr.sum() / tensor.numElements
|
||||||
|
sqrt(arr.sumOf { (it - mean) * (it - mean) } / (tensor.numElements - 1))
|
||||||
|
}
|
||||||
|
|
||||||
|
override fun Tensor<Double>.std(dim: Int, keepDim: Boolean): DoubleTensor = foldDim(
|
||||||
|
{ arr ->
|
||||||
|
check(dim < dimension) { "Dimension $dim out of range $dimension" }
|
||||||
|
val mean = arr.sum() / shape[dim]
|
||||||
|
sqrt(arr.sumOf { (it - mean) * (it - mean) } / (shape[dim] - 1))
|
||||||
|
},
|
||||||
|
dim,
|
||||||
|
keepDim
|
||||||
|
)
|
||||||
|
|
||||||
|
override fun Tensor<Double>.variance(): Double = this.fold { arr ->
|
||||||
|
val mean = arr.sum() / tensor.numElements
|
||||||
|
arr.sumOf { (it - mean) * (it - mean) } / (tensor.numElements - 1)
|
||||||
|
}
|
||||||
|
|
||||||
|
override fun Tensor<Double>.variance(dim: Int, keepDim: Boolean): DoubleTensor = foldDim(
|
||||||
|
{ arr ->
|
||||||
|
check(dim < dimension) { "Dimension $dim out of range $dimension" }
|
||||||
|
val mean = arr.sum() / shape[dim]
|
||||||
|
arr.sumOf { (it - mean) * (it - mean) } / (shape[dim] - 1)
|
||||||
|
},
|
||||||
|
dim,
|
||||||
|
keepDim
|
||||||
|
)
|
||||||
|
|
||||||
|
private fun cov(x: DoubleTensor, y:DoubleTensor): Double{
|
||||||
|
val n = x.shape[0]
|
||||||
|
return ((x - x.mean()) * (y - y.mean())).mean() * n / (n - 1)
|
||||||
|
}
|
||||||
|
|
||||||
|
override fun cov(tensors: List<Tensor<Double>>): DoubleTensor {
|
||||||
|
check(tensors.isNotEmpty()) { "List must have at least 1 element" }
|
||||||
|
val n = tensors.size
|
||||||
|
val m = tensors[0].shape[0]
|
||||||
|
check(tensors.all { it.shape contentEquals intArrayOf(m) }) { "Tensors must have same shapes" }
|
||||||
|
val resTensor = DoubleTensor(
|
||||||
|
intArrayOf(n, n),
|
||||||
|
DoubleArray(n * n) {0.0}
|
||||||
|
)
|
||||||
|
for (i in 0 until n){
|
||||||
|
for (j in 0 until n){
|
||||||
|
resTensor[intArrayOf(i, j)] = cov(tensors[i].tensor, tensors[j].tensor)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return resTensor
|
||||||
|
}
|
||||||
|
|
||||||
|
override fun Tensor<Double>.exp(): DoubleTensor = tensor.map(::exp)
|
||||||
|
|
||||||
|
override fun Tensor<Double>.ln(): DoubleTensor = tensor.map(::ln)
|
||||||
|
|
||||||
|
override fun Tensor<Double>.sqrt(): DoubleTensor = tensor.map(::sqrt)
|
||||||
|
|
||||||
|
override fun Tensor<Double>.cos(): DoubleTensor = tensor.map(::cos)
|
||||||
|
|
||||||
|
override fun Tensor<Double>.acos(): DoubleTensor = tensor.map(::acos)
|
||||||
|
|
||||||
|
override fun Tensor<Double>.cosh(): DoubleTensor = tensor.map(::cosh)
|
||||||
|
|
||||||
|
override fun Tensor<Double>.acosh(): DoubleTensor = tensor.map(::acosh)
|
||||||
|
|
||||||
|
override fun Tensor<Double>.sin(): DoubleTensor = tensor.map(::sin)
|
||||||
|
|
||||||
|
override fun Tensor<Double>.asin(): DoubleTensor = tensor.map(::asin)
|
||||||
|
|
||||||
|
override fun Tensor<Double>.sinh(): DoubleTensor = tensor.map(::sinh)
|
||||||
|
|
||||||
|
override fun Tensor<Double>.asinh(): DoubleTensor = tensor.map(::asinh)
|
||||||
|
|
||||||
|
override fun Tensor<Double>.tan(): DoubleTensor = tensor.map(::tan)
|
||||||
|
|
||||||
|
override fun Tensor<Double>.atan(): DoubleTensor = tensor.map(::atan)
|
||||||
|
|
||||||
|
override fun Tensor<Double>.tanh(): DoubleTensor = tensor.map(::tanh)
|
||||||
|
|
||||||
|
override fun Tensor<Double>.atanh(): DoubleTensor = tensor.map(::atanh)
|
||||||
|
|
||||||
|
override fun Tensor<Double>.ceil(): DoubleTensor = tensor.map(::ceil)
|
||||||
|
|
||||||
|
override fun Tensor<Double>.floor(): DoubleTensor = tensor.map(::floor)
|
||||||
|
|
||||||
|
override fun Tensor<Double>.inv(): DoubleTensor = invLU(1e-9)
|
||||||
|
|
||||||
|
override fun Tensor<Double>.det(): DoubleTensor = detLU(1e-9)
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Computes the LU factorization of a matrix or batches of matrices `input`.
|
||||||
|
* Returns a tuple containing the LU factorization and pivots of `input`.
|
||||||
|
*
|
||||||
|
* @param epsilon permissible error when comparing the determinant of a matrix with zero
|
||||||
|
* @return pair of `factorization` and `pivots`.
|
||||||
|
* The `factorization` has the shape ``(*, m, n)``, where``(*, m, n)`` is the shape of the `input` tensor.
|
||||||
|
* The `pivots` has the shape ``(∗, min(m, n))``. `pivots` stores all the intermediate transpositions of rows.
|
||||||
|
*/
|
||||||
|
public fun Tensor<Double>.luFactor(epsilon: Double): Pair<DoubleTensor, IntTensor> =
|
||||||
|
computeLU(tensor, epsilon)
|
||||||
|
?: throw IllegalArgumentException("Tensor contains matrices which are singular at precision $epsilon")
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Computes the LU factorization of a matrix or batches of matrices `input`.
|
||||||
|
* Returns a tuple containing the LU factorization and pivots of `input`.
|
||||||
|
* Uses an error of ``1e-9`` when calculating whether a matrix is degenerate.
|
||||||
|
*
|
||||||
|
* @return pair of `factorization` and `pivots`.
|
||||||
|
* The `factorization` has the shape ``(*, m, n)``, where``(*, m, n)`` is the shape of the `input` tensor.
|
||||||
|
* The `pivots` has the shape ``(∗, min(m, n))``. `pivots` stores all the intermediate transpositions of rows.
|
||||||
|
*/
|
||||||
|
public fun Tensor<Double>.luFactor(): Pair<DoubleTensor, IntTensor> = luFactor(1e-9)
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Unpacks the data and pivots from a LU factorization of a tensor.
|
||||||
|
* Given a tensor [luTensor], return tensors (P, L, U) satisfying ``P * luTensor = L * U``,
|
||||||
|
* with `P` being a permutation matrix or batch of matrices,
|
||||||
|
* `L` being a lower triangular matrix or batch of matrices,
|
||||||
|
* `U` being an upper triangular matrix or batch of matrices.
|
||||||
|
*
|
||||||
|
* @param luTensor the packed LU factorization data
|
||||||
|
* @param pivotsTensor the packed LU factorization pivots
|
||||||
|
* @return triple of P, L and U tensors
|
||||||
|
*/
|
||||||
|
public fun luPivot(
|
||||||
|
luTensor: Tensor<Double>,
|
||||||
|
pivotsTensor: Tensor<Int>
|
||||||
|
): Triple<DoubleTensor, DoubleTensor, DoubleTensor> {
|
||||||
|
checkSquareMatrix(luTensor.shape)
|
||||||
|
check(
|
||||||
|
luTensor.shape.dropLast(2).toIntArray() contentEquals pivotsTensor.shape.dropLast(1).toIntArray() ||
|
||||||
|
luTensor.shape.last() == pivotsTensor.shape.last() - 1
|
||||||
|
) { "Inappropriate shapes of input tensors" }
|
||||||
|
|
||||||
|
val n = luTensor.shape.last()
|
||||||
|
val pTensor = luTensor.zeroesLike()
|
||||||
|
pTensor
|
||||||
|
.matrixSequence()
|
||||||
|
.zip(pivotsTensor.tensor.vectorSequence())
|
||||||
|
.forEach { (p, pivot) -> pivInit(p.as2D(), pivot.as1D(), n) }
|
||||||
|
|
||||||
|
val lTensor = luTensor.zeroesLike()
|
||||||
|
val uTensor = luTensor.zeroesLike()
|
||||||
|
|
||||||
|
lTensor.matrixSequence()
|
||||||
|
.zip(uTensor.matrixSequence())
|
||||||
|
.zip(luTensor.tensor.matrixSequence())
|
||||||
|
.forEach { (pairLU, lu) ->
|
||||||
|
val (l, u) = pairLU
|
||||||
|
luPivotHelper(l.as2D(), u.as2D(), lu.as2D(), n)
|
||||||
|
}
|
||||||
|
|
||||||
|
return Triple(pTensor, lTensor, uTensor)
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* QR decomposition.
|
||||||
|
*
|
||||||
|
* Computes the QR decomposition of a matrix or a batch of matrices, and returns a pair `(Q, R)` of tensors.
|
||||||
|
* Given a tensor `input`, return tensors (Q, R) satisfying ``input = Q * R``,
|
||||||
|
* with `Q` being an orthogonal matrix or batch of orthogonal matrices
|
||||||
|
* and `R` being an upper triangular matrix or batch of upper triangular matrices.
|
||||||
|
*
|
||||||
|
* @param epsilon permissible error when comparing tensors for equality.
|
||||||
|
* Used when checking the positive definiteness of the input matrix or matrices.
|
||||||
|
* @return pair of Q and R tensors.
|
||||||
|
*/
|
||||||
|
public fun Tensor<Double>.cholesky(epsilon: Double): DoubleTensor {
|
||||||
|
checkSquareMatrix(shape)
|
||||||
|
checkPositiveDefinite(tensor, epsilon)
|
||||||
|
|
||||||
|
val n = shape.last()
|
||||||
|
val lTensor = zeroesLike()
|
||||||
|
|
||||||
|
for ((a, l) in tensor.matrixSequence().zip(lTensor.matrixSequence()))
|
||||||
|
for (i in 0 until n) choleskyHelper(a.as2D(), l.as2D(), n)
|
||||||
|
|
||||||
|
return lTensor
|
||||||
|
}
|
||||||
|
|
||||||
|
override fun Tensor<Double>.cholesky(): DoubleTensor = cholesky(1e-6)
|
||||||
|
|
||||||
|
override fun Tensor<Double>.qr(): Pair<DoubleTensor, DoubleTensor> {
|
||||||
|
checkSquareMatrix(shape)
|
||||||
|
val qTensor = zeroesLike()
|
||||||
|
val rTensor = zeroesLike()
|
||||||
|
tensor.matrixSequence()
|
||||||
|
.zip((qTensor.matrixSequence()
|
||||||
|
.zip(rTensor.matrixSequence()))).forEach { (matrix, qr) ->
|
||||||
|
val (q, r) = qr
|
||||||
|
qrHelper(matrix.asTensor(), q.asTensor(), r.as2D())
|
||||||
|
}
|
||||||
|
|
||||||
|
return qTensor to rTensor
|
||||||
|
}
|
||||||
|
|
||||||
|
override fun Tensor<Double>.svd(): Triple<DoubleTensor, DoubleTensor, DoubleTensor> =
|
||||||
|
svd(epsilon = 1e-10)
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Singular Value Decomposition.
|
||||||
|
*
|
||||||
|
* Computes the singular value decomposition of either a matrix or batch of matrices `input`.
|
||||||
|
* The singular value decomposition is represented as a triple `(U, S, V)`,
|
||||||
|
* such that ``input = U.dot(diagonalEmbedding(S).dot(V.T))``.
|
||||||
|
* If input is a batch of tensors, then U, S, and Vh are also batched with the same batch dimensions as input.
|
||||||
|
*
|
||||||
|
* @param epsilon permissible error when calculating the dot product of vectors,
|
||||||
|
* i.e. the precision with which the cosine approaches 1 in an iterative algorithm.
|
||||||
|
* @return triple `(U, S, V)`.
|
||||||
|
*/
|
||||||
|
public fun Tensor<Double>.svd(epsilon: Double): Triple<DoubleTensor, DoubleTensor, DoubleTensor> {
|
||||||
|
val size = tensor.dimension
|
||||||
|
val commonShape = tensor.shape.sliceArray(0 until size - 2)
|
||||||
|
val (n, m) = tensor.shape.sliceArray(size - 2 until size)
|
||||||
|
val uTensor = zeros(commonShape + intArrayOf(min(n, m), n))
|
||||||
|
val sTensor = zeros(commonShape + intArrayOf(min(n, m)))
|
||||||
|
val vTensor = zeros(commonShape + intArrayOf(min(n, m), m))
|
||||||
|
|
||||||
|
tensor.matrixSequence()
|
||||||
|
.zip(uTensor.matrixSequence()
|
||||||
|
.zip(sTensor.vectorSequence()
|
||||||
|
.zip(vTensor.matrixSequence()))).forEach { (matrix, USV) ->
|
||||||
|
val matrixSize = matrix.shape.reduce { acc, i -> acc * i }
|
||||||
|
val curMatrix = DoubleTensor(
|
||||||
|
matrix.shape,
|
||||||
|
matrix.mutableBuffer.array().slice(matrix.bufferStart until matrix.bufferStart + matrixSize)
|
||||||
|
.toDoubleArray()
|
||||||
|
)
|
||||||
|
svdHelper(curMatrix, USV, m, n, epsilon)
|
||||||
|
}
|
||||||
|
|
||||||
|
return Triple(uTensor.transpose(), sTensor, vTensor.transpose())
|
||||||
|
}
|
||||||
|
|
||||||
|
override fun Tensor<Double>.symEig(): Pair<DoubleTensor, DoubleTensor> =
|
||||||
|
symEig(epsilon = 1e-15)
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns eigenvalues and eigenvectors of a real symmetric matrix input or a batch of real symmetric matrices,
|
||||||
|
* represented by a pair (eigenvalues, eigenvectors).
|
||||||
|
*
|
||||||
|
* @param epsilon permissible error when comparing tensors for equality
|
||||||
|
* and when the cosine approaches 1 in the SVD algorithm.
|
||||||
|
* @return a pair (eigenvalues, eigenvectors)
|
||||||
|
*/
|
||||||
|
public fun Tensor<Double>.symEig(epsilon: Double): Pair<DoubleTensor, DoubleTensor> {
|
||||||
|
checkSymmetric(tensor, epsilon)
|
||||||
|
val (u, s, v) = tensor.svd(epsilon)
|
||||||
|
val shp = s.shape + intArrayOf(1)
|
||||||
|
val utv = u.transpose() dot v
|
||||||
|
val n = s.shape.last()
|
||||||
|
for (matrix in utv.matrixSequence())
|
||||||
|
cleanSymHelper(matrix.as2D(), n)
|
||||||
|
|
||||||
|
val eig = (utv dot s.view(shp)).view(s.shape)
|
||||||
|
return eig to v
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Computes the determinant of a square matrix input, or of each square matrix in a batched input
|
||||||
|
* using LU factorization algorithm.
|
||||||
|
*
|
||||||
|
* @param epsilon error in the LU algorithm - permissible error when comparing the determinant of a matrix with zero
|
||||||
|
* @return the determinant.
|
||||||
|
*/
|
||||||
|
public fun Tensor<Double>.detLU(epsilon: Double = 1e-9): DoubleTensor {
|
||||||
|
|
||||||
|
checkSquareMatrix(tensor.shape)
|
||||||
|
val luTensor = tensor.copy()
|
||||||
|
val pivotsTensor = tensor.setUpPivots()
|
||||||
|
|
||||||
|
val n = shape.size
|
||||||
|
|
||||||
|
val detTensorShape = IntArray(n - 1) { i -> shape[i] }
|
||||||
|
detTensorShape[n - 2] = 1
|
||||||
|
val resBuffer = DoubleArray(detTensorShape.reduce(Int::times)) { 0.0 }
|
||||||
|
|
||||||
|
val detTensor = DoubleTensor(
|
||||||
|
detTensorShape,
|
||||||
|
resBuffer
|
||||||
|
)
|
||||||
|
|
||||||
|
luTensor.matrixSequence().zip(pivotsTensor.vectorSequence()).forEachIndexed { index, (lu, pivots) ->
|
||||||
|
resBuffer[index] = if (luHelper(lu.as2D(), pivots.as1D(), epsilon))
|
||||||
|
0.0 else luMatrixDet(lu.as2D(), pivots.as1D())
|
||||||
|
}
|
||||||
|
|
||||||
|
return detTensor
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Computes the multiplicative inverse matrix of a square matrix input, or of each square matrix in a batched input
|
||||||
|
* using LU factorization algorithm.
|
||||||
|
* Given a square matrix `a`, return the matrix `aInv` satisfying
|
||||||
|
* ``a.dot(aInv) = aInv.dot(a) = eye(a.shape[0])``.
|
||||||
|
*
|
||||||
|
* @param epsilon error in the LU algorithm - permissible error when comparing the determinant of a matrix with zero
|
||||||
|
* @return the multiplicative inverse of a matrix.
|
||||||
|
*/
|
||||||
|
public fun Tensor<Double>.invLU(epsilon: Double = 1e-9): DoubleTensor {
|
||||||
|
val (luTensor, pivotsTensor) = luFactor(epsilon)
|
||||||
|
val invTensor = luTensor.zeroesLike()
|
||||||
|
|
||||||
|
val seq = luTensor.matrixSequence().zip(pivotsTensor.vectorSequence()).zip(invTensor.matrixSequence())
|
||||||
|
for ((luP, invMatrix) in seq) {
|
||||||
|
val (lu, pivots) = luP
|
||||||
|
luMatrixInv(lu.as2D(), pivots.as1D(), invMatrix.as2D())
|
||||||
|
}
|
||||||
|
|
||||||
|
return invTensor
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* LUP decomposition
|
||||||
|
*
|
||||||
|
* Computes the LUP decomposition of a matrix or a batch of matrices.
|
||||||
|
* Given a tensor `input`, return tensors (P, L, U) satisfying ``P * input = L * U``,
|
||||||
|
* with `P` being a permutation matrix or batch of matrices,
|
||||||
|
* `L` being a lower triangular matrix or batch of matrices,
|
||||||
|
* `U` being an upper triangular matrix or batch of matrices.
|
||||||
|
*
|
||||||
|
* @param epsilon permissible error when comparing the determinant of a matrix with zero
|
||||||
|
* @return triple of P, L and U tensors
|
||||||
|
*/
|
||||||
|
public fun Tensor<Double>.lu(epsilon: Double = 1e-9): Triple<DoubleTensor, DoubleTensor, DoubleTensor> {
|
||||||
|
val (lu, pivots) = this.luFactor(epsilon)
|
||||||
|
return luPivot(lu, pivots)
|
||||||
|
}
|
||||||
|
|
||||||
|
override fun Tensor<Double>.lu(): Triple<DoubleTensor, DoubleTensor, DoubleTensor> = lu(1e-9)
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -1,5 +1,11 @@
|
|||||||
package space.kscience.kmath.tensors.core
|
/*
|
||||||
|
* Copyright 2018-2021 KMath contributors.
|
||||||
|
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package space.kscience.kmath.tensors.core.internal
|
||||||
|
|
||||||
|
import space.kscience.kmath.tensors.core.DoubleTensor
|
||||||
import kotlin.math.max
|
import kotlin.math.max
|
||||||
|
|
||||||
internal fun multiIndexBroadCasting(tensor: DoubleTensor, resTensor: DoubleTensor, linearSize: Int) {
|
internal fun multiIndexBroadCasting(tensor: DoubleTensor, resTensor: DoubleTensor, linearSize: Int) {
|
@ -1,7 +1,12 @@
|
|||||||
package space.kscience.kmath.tensors.core
|
/*
|
||||||
|
* Copyright 2018-2021 KMath contributors.
|
||||||
|
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package space.kscience.kmath.tensors.core.internal
|
||||||
|
|
||||||
import space.kscience.kmath.tensors.api.Tensor
|
import space.kscience.kmath.tensors.api.Tensor
|
||||||
import space.kscience.kmath.tensors.core.algebras.DoubleLinearOpsTensorAlgebra
|
import space.kscience.kmath.tensors.core.DoubleTensor
|
||||||
import space.kscience.kmath.tensors.core.algebras.DoubleTensorAlgebra
|
import space.kscience.kmath.tensors.core.algebras.DoubleTensorAlgebra
|
||||||
|
|
||||||
|
|
||||||
@ -50,7 +55,7 @@ internal fun DoubleTensorAlgebra.checkSymmetric(
|
|||||||
"Tensor is not symmetric about the last 2 dimensions at precision $epsilon"
|
"Tensor is not symmetric about the last 2 dimensions at precision $epsilon"
|
||||||
}
|
}
|
||||||
|
|
||||||
internal fun DoubleLinearOpsTensorAlgebra.checkPositiveDefinite(tensor: DoubleTensor, epsilon: Double = 1e-6) {
|
internal fun DoubleTensorAlgebra.checkPositiveDefinite(tensor: DoubleTensor, epsilon: Double = 1e-6) {
|
||||||
checkSymmetric(tensor, epsilon)
|
checkSymmetric(tensor, epsilon)
|
||||||
for (mat in tensor.matrixSequence())
|
for (mat in tensor.matrixSequence())
|
||||||
check(mat.asTensor().detLU().value() > 0.0) {
|
check(mat.asTensor().detLU().value() > 0.0) {
|
@ -1,12 +1,17 @@
|
|||||||
package space.kscience.kmath.tensors.core
|
/*
|
||||||
|
* Copyright 2018-2021 KMath contributors.
|
||||||
|
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package space.kscience.kmath.tensors.core.internal
|
||||||
|
|
||||||
import space.kscience.kmath.nd.MutableStructure1D
|
import space.kscience.kmath.nd.MutableStructure1D
|
||||||
import space.kscience.kmath.nd.MutableStructure2D
|
import space.kscience.kmath.nd.MutableStructure2D
|
||||||
import space.kscience.kmath.nd.as1D
|
import space.kscience.kmath.nd.as1D
|
||||||
import space.kscience.kmath.nd.as2D
|
import space.kscience.kmath.nd.as2D
|
||||||
import space.kscience.kmath.operations.invoke
|
import space.kscience.kmath.operations.invoke
|
||||||
import space.kscience.kmath.tensors.core.algebras.DoubleAnalyticTensorAlgebra
|
import space.kscience.kmath.tensors.core.*
|
||||||
import space.kscience.kmath.tensors.core.algebras.DoubleLinearOpsTensorAlgebra
|
import space.kscience.kmath.tensors.core.algebras.DoubleTensorAlgebra
|
||||||
import kotlin.math.abs
|
import kotlin.math.abs
|
||||||
import kotlin.math.min
|
import kotlin.math.min
|
||||||
import kotlin.math.sign
|
import kotlin.math.sign
|
||||||
@ -114,7 +119,7 @@ internal fun <T> BufferedTensor<T>.setUpPivots(): IntTensor {
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
internal fun DoubleLinearOpsTensorAlgebra.computeLU(
|
internal fun DoubleTensorAlgebra.computeLU(
|
||||||
tensor: DoubleTensor,
|
tensor: DoubleTensor,
|
||||||
epsilon: Double
|
epsilon: Double
|
||||||
): Pair<DoubleTensor, IntTensor>? {
|
): Pair<DoubleTensor, IntTensor>? {
|
||||||
@ -218,7 +223,7 @@ internal fun luMatrixInv(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
internal fun DoubleLinearOpsTensorAlgebra.qrHelper(
|
internal fun DoubleTensorAlgebra.qrHelper(
|
||||||
matrix: DoubleTensor,
|
matrix: DoubleTensor,
|
||||||
q: DoubleTensor,
|
q: DoubleTensor,
|
||||||
r: MutableStructure2D<Double>
|
r: MutableStructure2D<Double>
|
||||||
@ -241,14 +246,14 @@ internal fun DoubleLinearOpsTensorAlgebra.qrHelper(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
r[j, j] = DoubleAnalyticTensorAlgebra { (v dot v).sqrt().value() }
|
r[j, j] = DoubleTensorAlgebra { (v dot v).sqrt().value() }
|
||||||
for (i in 0 until n) {
|
for (i in 0 until n) {
|
||||||
qM[i, j] = vv[i] / r[j, j]
|
qM[i, j] = vv[i] / r[j, j]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
internal fun DoubleLinearOpsTensorAlgebra.svd1d(a: DoubleTensor, epsilon: Double = 1e-10): DoubleTensor {
|
internal fun DoubleTensorAlgebra.svd1d(a: DoubleTensor, epsilon: Double = 1e-10): DoubleTensor {
|
||||||
val (n, m) = a.shape
|
val (n, m) = a.shape
|
||||||
var v: DoubleTensor
|
var v: DoubleTensor
|
||||||
val b: DoubleTensor
|
val b: DoubleTensor
|
||||||
@ -264,7 +269,7 @@ internal fun DoubleLinearOpsTensorAlgebra.svd1d(a: DoubleTensor, epsilon: Double
|
|||||||
while (true) {
|
while (true) {
|
||||||
lastV = v
|
lastV = v
|
||||||
v = b.dot(lastV)
|
v = b.dot(lastV)
|
||||||
val norm = DoubleAnalyticTensorAlgebra { (v dot v).sqrt().value() }
|
val norm = DoubleTensorAlgebra { (v dot v).sqrt().value() }
|
||||||
v = v.times(1.0 / norm)
|
v = v.times(1.0 / norm)
|
||||||
if (abs(v.dot(lastV).value()) > 1 - epsilon) {
|
if (abs(v.dot(lastV).value()) > 1 - epsilon) {
|
||||||
return v
|
return v
|
||||||
@ -272,7 +277,7 @@ internal fun DoubleLinearOpsTensorAlgebra.svd1d(a: DoubleTensor, epsilon: Double
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
internal fun DoubleLinearOpsTensorAlgebra.svdHelper(
|
internal fun DoubleTensorAlgebra.svdHelper(
|
||||||
matrix: DoubleTensor,
|
matrix: DoubleTensor,
|
||||||
USV: Pair<BufferedTensor<Double>, Pair<BufferedTensor<Double>, BufferedTensor<Double>>>,
|
USV: Pair<BufferedTensor<Double>, Pair<BufferedTensor<Double>, BufferedTensor<Double>>>,
|
||||||
m: Int, n: Int, epsilon: Double
|
m: Int, n: Int, epsilon: Double
|
||||||
@ -298,12 +303,12 @@ internal fun DoubleLinearOpsTensorAlgebra.svdHelper(
|
|||||||
if (n > m) {
|
if (n > m) {
|
||||||
v = svd1d(a, epsilon)
|
v = svd1d(a, epsilon)
|
||||||
u = matrix.dot(v)
|
u = matrix.dot(v)
|
||||||
norm = DoubleAnalyticTensorAlgebra { (u dot u).sqrt().value() }
|
norm = DoubleTensorAlgebra { (u dot u).sqrt().value() }
|
||||||
u = u.times(1.0 / norm)
|
u = u.times(1.0 / norm)
|
||||||
} else {
|
} else {
|
||||||
u = svd1d(a, epsilon)
|
u = svd1d(a, epsilon)
|
||||||
v = matrix.transpose(0, 1).dot(u)
|
v = matrix.transpose(0, 1).dot(u)
|
||||||
norm = DoubleAnalyticTensorAlgebra { (v dot v).sqrt().value() }
|
norm = DoubleTensorAlgebra { (v dot v).sqrt().value() }
|
||||||
v = v.times(1.0 / norm)
|
v = v.times(1.0 / norm)
|
||||||
}
|
}
|
||||||
|
|
@ -3,11 +3,14 @@
|
|||||||
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
|
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package space.kscience.kmath.tensors.core
|
package space.kscience.kmath.tensors.core.internal
|
||||||
|
|
||||||
import space.kscience.kmath.nd.MutableBufferND
|
import space.kscience.kmath.nd.MutableBufferND
|
||||||
import space.kscience.kmath.structures.asMutableBuffer
|
import space.kscience.kmath.structures.asMutableBuffer
|
||||||
import space.kscience.kmath.tensors.api.Tensor
|
import space.kscience.kmath.tensors.api.Tensor
|
||||||
|
import space.kscience.kmath.tensors.core.BufferedTensor
|
||||||
|
import space.kscience.kmath.tensors.core.DoubleTensor
|
||||||
|
import space.kscience.kmath.tensors.core.IntTensor
|
||||||
import space.kscience.kmath.tensors.core.algebras.TensorLinearStructure
|
import space.kscience.kmath.tensors.core.algebras.TensorLinearStructure
|
||||||
|
|
||||||
internal fun BufferedTensor<Int>.asTensor(): IntTensor =
|
internal fun BufferedTensor<Int>.asTensor(): IntTensor =
|
@ -1,9 +1,16 @@
|
|||||||
package space.kscience.kmath.tensors.core
|
/*
|
||||||
|
* Copyright 2018-2021 KMath contributors.
|
||||||
|
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package space.kscience.kmath.tensors.core.internal
|
||||||
|
|
||||||
import space.kscience.kmath.nd.as1D
|
import space.kscience.kmath.nd.as1D
|
||||||
import space.kscience.kmath.samplers.GaussianSampler
|
import space.kscience.kmath.samplers.GaussianSampler
|
||||||
import space.kscience.kmath.stat.RandomGenerator
|
import space.kscience.kmath.stat.RandomGenerator
|
||||||
import space.kscience.kmath.structures.*
|
import space.kscience.kmath.structures.*
|
||||||
|
import space.kscience.kmath.tensors.core.BufferedTensor
|
||||||
|
import space.kscience.kmath.tensors.core.DoubleTensor
|
||||||
import kotlin.math.*
|
import kotlin.math.*
|
||||||
|
|
||||||
/**
|
/**
|
@ -6,6 +6,7 @@
|
|||||||
package space.kscience.kmath.tensors.core
|
package space.kscience.kmath.tensors.core
|
||||||
|
|
||||||
import space.kscience.kmath.tensors.api.Tensor
|
import space.kscience.kmath.tensors.api.Tensor
|
||||||
|
import space.kscience.kmath.tensors.core.internal.tensor
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Casts [Tensor<Double>] to [DoubleTensor]
|
* Casts [Tensor<Double>] to [DoubleTensor]
|
||||||
|
@ -3,6 +3,7 @@ package space.kscience.kmath.tensors.core
|
|||||||
import space.kscience.kmath.operations.invoke
|
import space.kscience.kmath.operations.invoke
|
||||||
import space.kscience.kmath.tensors.core.algebras.BroadcastDoubleTensorAlgebra
|
import space.kscience.kmath.tensors.core.algebras.BroadcastDoubleTensorAlgebra
|
||||||
import space.kscience.kmath.tensors.core.algebras.DoubleTensorAlgebra
|
import space.kscience.kmath.tensors.core.algebras.DoubleTensorAlgebra
|
||||||
|
import space.kscience.kmath.tensors.core.internal.*
|
||||||
import kotlin.test.Test
|
import kotlin.test.Test
|
||||||
import kotlin.test.assertTrue
|
import kotlin.test.assertTrue
|
||||||
|
|
||||||
|
@ -1,7 +1,6 @@
|
|||||||
package space.kscience.kmath.tensors.core
|
package space.kscience.kmath.tensors.core
|
||||||
|
|
||||||
import space.kscience.kmath.operations.invoke
|
import space.kscience.kmath.operations.invoke
|
||||||
import space.kscience.kmath.tensors.core.algebras.DoubleAnalyticTensorAlgebra
|
|
||||||
import space.kscience.kmath.tensors.core.algebras.DoubleTensorAlgebra
|
import space.kscience.kmath.tensors.core.algebras.DoubleTensorAlgebra
|
||||||
import kotlin.math.*
|
import kotlin.math.*
|
||||||
import kotlin.test.Test
|
import kotlin.test.Test
|
||||||
@ -28,73 +27,73 @@ internal class TestDoubleAnalyticTensorAlgebra {
|
|||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
fun testExp() = DoubleAnalyticTensorAlgebra {
|
fun testExp() = DoubleTensorAlgebra {
|
||||||
assertTrue { tensor.exp() eq expectedTensor(::exp) }
|
assertTrue { tensor.exp() eq expectedTensor(::exp) }
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
fun testLog() = DoubleAnalyticTensorAlgebra {
|
fun testLog() = DoubleTensorAlgebra {
|
||||||
assertTrue { tensor.ln() eq expectedTensor(::ln) }
|
assertTrue { tensor.ln() eq expectedTensor(::ln) }
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
fun testSqrt() = DoubleAnalyticTensorAlgebra {
|
fun testSqrt() = DoubleTensorAlgebra {
|
||||||
assertTrue { tensor.sqrt() eq expectedTensor(::sqrt) }
|
assertTrue { tensor.sqrt() eq expectedTensor(::sqrt) }
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
fun testCos() = DoubleAnalyticTensorAlgebra {
|
fun testCos() = DoubleTensorAlgebra {
|
||||||
assertTrue { tensor.cos() eq expectedTensor(::cos) }
|
assertTrue { tensor.cos() eq expectedTensor(::cos) }
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
fun testCosh() = DoubleAnalyticTensorAlgebra {
|
fun testCosh() = DoubleTensorAlgebra {
|
||||||
assertTrue { tensor.cosh() eq expectedTensor(::cosh) }
|
assertTrue { tensor.cosh() eq expectedTensor(::cosh) }
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
fun testAcosh() = DoubleAnalyticTensorAlgebra {
|
fun testAcosh() = DoubleTensorAlgebra {
|
||||||
assertTrue { tensor.acosh() eq expectedTensor(::acosh) }
|
assertTrue { tensor.acosh() eq expectedTensor(::acosh) }
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
fun testSin() = DoubleAnalyticTensorAlgebra {
|
fun testSin() = DoubleTensorAlgebra {
|
||||||
assertTrue { tensor.sin() eq expectedTensor(::sin) }
|
assertTrue { tensor.sin() eq expectedTensor(::sin) }
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
fun testSinh() = DoubleAnalyticTensorAlgebra {
|
fun testSinh() = DoubleTensorAlgebra {
|
||||||
assertTrue { tensor.sinh() eq expectedTensor(::sinh) }
|
assertTrue { tensor.sinh() eq expectedTensor(::sinh) }
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
fun testAsinh() = DoubleAnalyticTensorAlgebra {
|
fun testAsinh() = DoubleTensorAlgebra {
|
||||||
assertTrue { tensor.asinh() eq expectedTensor(::asinh) }
|
assertTrue { tensor.asinh() eq expectedTensor(::asinh) }
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
fun testTan() = DoubleAnalyticTensorAlgebra {
|
fun testTan() = DoubleTensorAlgebra {
|
||||||
assertTrue { tensor.tan() eq expectedTensor(::tan) }
|
assertTrue { tensor.tan() eq expectedTensor(::tan) }
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
fun testAtan() = DoubleAnalyticTensorAlgebra {
|
fun testAtan() = DoubleTensorAlgebra {
|
||||||
assertTrue { tensor.atan() eq expectedTensor(::atan) }
|
assertTrue { tensor.atan() eq expectedTensor(::atan) }
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
fun testTanh() = DoubleAnalyticTensorAlgebra {
|
fun testTanh() = DoubleTensorAlgebra {
|
||||||
assertTrue { tensor.tanh() eq expectedTensor(::tanh) }
|
assertTrue { tensor.tanh() eq expectedTensor(::tanh) }
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
fun testCeil() = DoubleAnalyticTensorAlgebra {
|
fun testCeil() = DoubleTensorAlgebra {
|
||||||
assertTrue { tensor.ceil() eq expectedTensor(::ceil) }
|
assertTrue { tensor.ceil() eq expectedTensor(::ceil) }
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
fun testFloor() = DoubleAnalyticTensorAlgebra {
|
fun testFloor() = DoubleTensorAlgebra {
|
||||||
assertTrue { tensor.floor() eq expectedTensor(::floor) }
|
assertTrue { tensor.floor() eq expectedTensor(::floor) }
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -145,7 +144,7 @@ internal class TestDoubleAnalyticTensorAlgebra {
|
|||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
fun testMean() = DoubleAnalyticTensorAlgebra {
|
fun testMean() = DoubleTensorAlgebra {
|
||||||
assertTrue { tensor2.mean() == 1.0 }
|
assertTrue { tensor2.mean() == 1.0 }
|
||||||
assertTrue { tensor2.mean(0, true) eq fromArray(
|
assertTrue { tensor2.mean(0, true) eq fromArray(
|
||||||
intArrayOf(1, 2),
|
intArrayOf(1, 2),
|
||||||
|
@ -1,7 +1,9 @@
|
|||||||
package space.kscience.kmath.tensors.core
|
package space.kscience.kmath.tensors.core
|
||||||
|
|
||||||
import space.kscience.kmath.operations.invoke
|
import space.kscience.kmath.operations.invoke
|
||||||
import space.kscience.kmath.tensors.core.algebras.DoubleLinearOpsTensorAlgebra
|
import space.kscience.kmath.tensors.core.algebras.DoubleTensorAlgebra
|
||||||
|
import space.kscience.kmath.tensors.core.internal.array
|
||||||
|
import space.kscience.kmath.tensors.core.internal.svd1d
|
||||||
import kotlin.math.abs
|
import kotlin.math.abs
|
||||||
import kotlin.test.Test
|
import kotlin.test.Test
|
||||||
import kotlin.test.assertEquals
|
import kotlin.test.assertEquals
|
||||||
@ -10,7 +12,7 @@ import kotlin.test.assertTrue
|
|||||||
internal class TestDoubleLinearOpsTensorAlgebra {
|
internal class TestDoubleLinearOpsTensorAlgebra {
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
fun testDetLU() = DoubleLinearOpsTensorAlgebra {
|
fun testDetLU() = DoubleTensorAlgebra {
|
||||||
val tensor = fromArray(
|
val tensor = fromArray(
|
||||||
intArrayOf(2, 2, 2),
|
intArrayOf(2, 2, 2),
|
||||||
doubleArrayOf(
|
doubleArrayOf(
|
||||||
@ -35,7 +37,7 @@ internal class TestDoubleLinearOpsTensorAlgebra {
|
|||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
fun testDet() = DoubleLinearOpsTensorAlgebra {
|
fun testDet() = DoubleTensorAlgebra {
|
||||||
val expectedValue = 0.019827417
|
val expectedValue = 0.019827417
|
||||||
val m = fromArray(
|
val m = fromArray(
|
||||||
intArrayOf(3, 3), doubleArrayOf(
|
intArrayOf(3, 3), doubleArrayOf(
|
||||||
@ -49,7 +51,7 @@ internal class TestDoubleLinearOpsTensorAlgebra {
|
|||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
fun testDetSingle() = DoubleLinearOpsTensorAlgebra {
|
fun testDetSingle() = DoubleTensorAlgebra {
|
||||||
val expectedValue = 48.151623
|
val expectedValue = 48.151623
|
||||||
val m = fromArray(
|
val m = fromArray(
|
||||||
intArrayOf(1, 1), doubleArrayOf(
|
intArrayOf(1, 1), doubleArrayOf(
|
||||||
@ -61,7 +63,7 @@ internal class TestDoubleLinearOpsTensorAlgebra {
|
|||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
fun testInvLU() = DoubleLinearOpsTensorAlgebra {
|
fun testInvLU() = DoubleTensorAlgebra {
|
||||||
val tensor = fromArray(
|
val tensor = fromArray(
|
||||||
intArrayOf(2, 2, 2),
|
intArrayOf(2, 2, 2),
|
||||||
doubleArrayOf(
|
doubleArrayOf(
|
||||||
@ -86,14 +88,14 @@ internal class TestDoubleLinearOpsTensorAlgebra {
|
|||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
fun testScalarProduct() = DoubleLinearOpsTensorAlgebra {
|
fun testScalarProduct() = DoubleTensorAlgebra {
|
||||||
val a = fromArray(intArrayOf(3), doubleArrayOf(1.8, 2.5, 6.8))
|
val a = fromArray(intArrayOf(3), doubleArrayOf(1.8, 2.5, 6.8))
|
||||||
val b = fromArray(intArrayOf(3), doubleArrayOf(5.5, 2.6, 6.4))
|
val b = fromArray(intArrayOf(3), doubleArrayOf(5.5, 2.6, 6.4))
|
||||||
assertEquals(a.dot(b).value(), 59.92)
|
assertEquals(a.dot(b).value(), 59.92)
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
fun testQR() = DoubleLinearOpsTensorAlgebra {
|
fun testQR() = DoubleTensorAlgebra {
|
||||||
val shape = intArrayOf(2, 2, 2)
|
val shape = intArrayOf(2, 2, 2)
|
||||||
val buffer = doubleArrayOf(
|
val buffer = doubleArrayOf(
|
||||||
1.0, 3.0,
|
1.0, 3.0,
|
||||||
@ -114,7 +116,7 @@ internal class TestDoubleLinearOpsTensorAlgebra {
|
|||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
fun testLU() = DoubleLinearOpsTensorAlgebra {
|
fun testLU() = DoubleTensorAlgebra {
|
||||||
val shape = intArrayOf(2, 2, 2)
|
val shape = intArrayOf(2, 2, 2)
|
||||||
val buffer = doubleArrayOf(
|
val buffer = doubleArrayOf(
|
||||||
1.0, 3.0,
|
1.0, 3.0,
|
||||||
@ -134,7 +136,7 @@ internal class TestDoubleLinearOpsTensorAlgebra {
|
|||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
fun testCholesky() = DoubleLinearOpsTensorAlgebra {
|
fun testCholesky() = DoubleTensorAlgebra {
|
||||||
val tensor = randomNormal(intArrayOf(2, 5, 5), 0)
|
val tensor = randomNormal(intArrayOf(2, 5, 5), 0)
|
||||||
val sigma = (tensor dot tensor.transpose()) + diagonalEmbedding(
|
val sigma = (tensor dot tensor.transpose()) + diagonalEmbedding(
|
||||||
fromArray(intArrayOf(2, 5), DoubleArray(10) { 0.1 })
|
fromArray(intArrayOf(2, 5), DoubleArray(10) { 0.1 })
|
||||||
@ -145,7 +147,7 @@ internal class TestDoubleLinearOpsTensorAlgebra {
|
|||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
fun testSVD1D() = DoubleLinearOpsTensorAlgebra {
|
fun testSVD1D() = DoubleTensorAlgebra {
|
||||||
val tensor2 = fromArray(intArrayOf(2, 3), doubleArrayOf(1.0, 2.0, 3.0, 4.0, 5.0, 6.0))
|
val tensor2 = fromArray(intArrayOf(2, 3), doubleArrayOf(1.0, 2.0, 3.0, 4.0, 5.0, 6.0))
|
||||||
|
|
||||||
val res = svd1d(tensor2)
|
val res = svd1d(tensor2)
|
||||||
@ -156,13 +158,13 @@ internal class TestDoubleLinearOpsTensorAlgebra {
|
|||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
fun testSVD() = DoubleLinearOpsTensorAlgebra{
|
fun testSVD() = DoubleTensorAlgebra{
|
||||||
testSVDFor(fromArray(intArrayOf(2, 3), doubleArrayOf(1.0, 2.0, 3.0, 4.0, 5.0, 6.0)))
|
testSVDFor(fromArray(intArrayOf(2, 3), doubleArrayOf(1.0, 2.0, 3.0, 4.0, 5.0, 6.0)))
|
||||||
testSVDFor(fromArray(intArrayOf(2, 2), doubleArrayOf(-1.0, 0.0, 239.0, 238.0)))
|
testSVDFor(fromArray(intArrayOf(2, 2), doubleArrayOf(-1.0, 0.0, 239.0, 238.0)))
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
fun testBatchedSVD() = DoubleLinearOpsTensorAlgebra {
|
fun testBatchedSVD() = DoubleTensorAlgebra {
|
||||||
val tensor = randomNormal(intArrayOf(2, 5, 3), 0)
|
val tensor = randomNormal(intArrayOf(2, 5, 3), 0)
|
||||||
val (tensorU, tensorS, tensorV) = tensor.svd()
|
val (tensorU, tensorS, tensorV) = tensor.svd()
|
||||||
val tensorSVD = tensorU dot (diagonalEmbedding(tensorS) dot tensorV.transpose())
|
val tensorSVD = tensorU dot (diagonalEmbedding(tensorS) dot tensorV.transpose())
|
||||||
@ -170,7 +172,7 @@ internal class TestDoubleLinearOpsTensorAlgebra {
|
|||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
fun testBatchedSymEig() = DoubleLinearOpsTensorAlgebra {
|
fun testBatchedSymEig() = DoubleTensorAlgebra {
|
||||||
val tensor = randomNormal(shape = intArrayOf(2, 3, 3), 0)
|
val tensor = randomNormal(shape = intArrayOf(2, 3, 3), 0)
|
||||||
val tensorSigma = tensor + tensor.transpose()
|
val tensorSigma = tensor + tensor.transpose()
|
||||||
val (tensorS, tensorV) = tensorSigma.symEig()
|
val (tensorS, tensorV) = tensorSigma.symEig()
|
||||||
@ -182,7 +184,7 @@ internal class TestDoubleLinearOpsTensorAlgebra {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
private fun DoubleLinearOpsTensorAlgebra.testSVDFor(tensor: DoubleTensor, epsilon: Double = 1e-10): Unit {
|
private fun DoubleTensorAlgebra.testSVDFor(tensor: DoubleTensor, epsilon: Double = 1e-10): Unit {
|
||||||
val svd = tensor.svd()
|
val svd = tensor.svd()
|
||||||
|
|
||||||
val tensorSVD = svd.first
|
val tensorSVD = svd.first
|
||||||
|
@ -8,6 +8,10 @@ import space.kscience.kmath.operations.invoke
|
|||||||
import space.kscience.kmath.structures.DoubleBuffer
|
import space.kscience.kmath.structures.DoubleBuffer
|
||||||
import space.kscience.kmath.structures.toDoubleArray
|
import space.kscience.kmath.structures.toDoubleArray
|
||||||
import space.kscience.kmath.tensors.core.algebras.DoubleTensorAlgebra
|
import space.kscience.kmath.tensors.core.algebras.DoubleTensorAlgebra
|
||||||
|
import space.kscience.kmath.tensors.core.internal.array
|
||||||
|
import space.kscience.kmath.tensors.core.internal.asTensor
|
||||||
|
import space.kscience.kmath.tensors.core.internal.matrixSequence
|
||||||
|
import space.kscience.kmath.tensors.core.internal.toBufferedTensor
|
||||||
import kotlin.test.Test
|
import kotlin.test.Test
|
||||||
import kotlin.test.assertEquals
|
import kotlin.test.assertEquals
|
||||||
import kotlin.test.assertTrue
|
import kotlin.test.assertTrue
|
||||||
|
@ -3,6 +3,7 @@ package space.kscience.kmath.tensors.core
|
|||||||
|
|
||||||
import space.kscience.kmath.operations.invoke
|
import space.kscience.kmath.operations.invoke
|
||||||
import space.kscience.kmath.tensors.core.algebras.DoubleTensorAlgebra
|
import space.kscience.kmath.tensors.core.algebras.DoubleTensorAlgebra
|
||||||
|
import space.kscience.kmath.tensors.core.internal.array
|
||||||
import kotlin.test.Test
|
import kotlin.test.Test
|
||||||
import kotlin.test.assertFalse
|
import kotlin.test.assertFalse
|
||||||
import kotlin.test.assertTrue
|
import kotlin.test.assertTrue
|
||||||
|
Loading…
Reference in New Issue
Block a user