forked from kscience/kmath
Major tensor refactoring
This commit is contained in:
parent
8286db30af
commit
cff563c321
@ -2,11 +2,14 @@
|
|||||||
|
|
||||||
## [Unreleased]
|
## [Unreleased]
|
||||||
### Added
|
### Added
|
||||||
|
- Type-aliases for numbers like `Float64`
|
||||||
- 2D optimal trajectory computation in a separate module `kmath-trajectory`
|
- 2D optimal trajectory computation in a separate module `kmath-trajectory`
|
||||||
- Autodiff for generic algebra elements in core!
|
- Autodiff for generic algebra elements in core!
|
||||||
- Algebra now has an obligatory `bufferFactory` (#477).
|
- Algebra now has an obligatory `bufferFactory` (#477).
|
||||||
|
|
||||||
### Changed
|
### Changed
|
||||||
|
- Tensor operations switched to prefix notation
|
||||||
|
- Row-wise and column-wise ND shapes in the core
|
||||||
- Shape is read-only
|
- Shape is read-only
|
||||||
- Major refactor of tensors (only minor API changes)
|
- Major refactor of tensors (only minor API changes)
|
||||||
- Kotlin 1.7.20
|
- Kotlin 1.7.20
|
||||||
|
@ -13,6 +13,8 @@ import space.kscience.kmath.linear.linearSpace
|
|||||||
import space.kscience.kmath.linear.matrix
|
import space.kscience.kmath.linear.matrix
|
||||||
import space.kscience.kmath.linear.symmetric
|
import space.kscience.kmath.linear.symmetric
|
||||||
import space.kscience.kmath.operations.DoubleField
|
import space.kscience.kmath.operations.DoubleField
|
||||||
|
import space.kscience.kmath.tensors.core.symEigJacobi
|
||||||
|
import space.kscience.kmath.tensors.core.symEigSvd
|
||||||
import space.kscience.kmath.tensors.core.tensorAlgebra
|
import space.kscience.kmath.tensors.core.tensorAlgebra
|
||||||
import kotlin.random.Random
|
import kotlin.random.Random
|
||||||
|
|
||||||
@ -27,11 +29,11 @@ internal class TensorAlgebraBenchmark {
|
|||||||
|
|
||||||
@Benchmark
|
@Benchmark
|
||||||
fun tensorSymEigSvd(blackhole: Blackhole) = with(Double.tensorAlgebra) {
|
fun tensorSymEigSvd(blackhole: Blackhole) = with(Double.tensorAlgebra) {
|
||||||
blackhole.consume(matrix.symEigSvd(1e-10))
|
blackhole.consume(symEigSvd(matrix, 1e-10))
|
||||||
}
|
}
|
||||||
|
|
||||||
@Benchmark
|
@Benchmark
|
||||||
fun tensorSymEigJacobi(blackhole: Blackhole) = with(Double.tensorAlgebra) {
|
fun tensorSymEigJacobi(blackhole: Blackhole) = with(Double.tensorAlgebra) {
|
||||||
blackhole.consume(matrix.symEigJacobi(50, 1e-10))
|
blackhole.consume(symEigJacobi(matrix, 50, 1e-10))
|
||||||
}
|
}
|
||||||
}
|
}
|
@ -15,7 +15,7 @@ allprojects {
|
|||||||
}
|
}
|
||||||
|
|
||||||
group = "space.kscience"
|
group = "space.kscience"
|
||||||
version = "0.3.1-dev-6"
|
version = "0.3.1-dev-7"
|
||||||
}
|
}
|
||||||
|
|
||||||
subprojects {
|
subprojects {
|
||||||
|
@ -8,17 +8,20 @@ package space.kscience.kmath.structures
|
|||||||
import space.kscience.kmath.nd.BufferND
|
import space.kscience.kmath.nd.BufferND
|
||||||
import space.kscience.kmath.nd.ShapeND
|
import space.kscience.kmath.nd.ShapeND
|
||||||
import space.kscience.kmath.nd.StructureND
|
import space.kscience.kmath.nd.StructureND
|
||||||
import space.kscience.kmath.operations.map
|
import space.kscience.kmath.operations.mapToBuffer
|
||||||
import kotlin.system.measureTimeMillis
|
import kotlin.system.measureTimeMillis
|
||||||
|
|
||||||
private inline fun <T, reified R: Any> BufferND<T>.map(block: (T) -> R): BufferND<R> = BufferND(indices, buffer.map(block))
|
private inline fun <T, reified R : Any> BufferND<T>.mapToBufferND(
|
||||||
|
bufferFactory: BufferFactory<R> = BufferFactory.auto(),
|
||||||
|
crossinline block: (T) -> R,
|
||||||
|
): BufferND<R> = BufferND(indices, buffer.mapToBuffer(bufferFactory, block))
|
||||||
|
|
||||||
@Suppress("UNUSED_VARIABLE")
|
@Suppress("UNUSED_VARIABLE")
|
||||||
fun main() {
|
fun main() {
|
||||||
val n = 6000
|
val n = 6000
|
||||||
val structure = StructureND.buffered(ShapeND(n, n), Buffer.Companion::auto) { 1.0 }
|
val structure = StructureND.buffered(ShapeND(n, n), Buffer.Companion::auto) { 1.0 }
|
||||||
structure.map { it + 1 } // warm-up
|
structure.mapToBufferND { it + 1 } // warm-up
|
||||||
val time1 = measureTimeMillis { val res = structure.map { it + 1 } }
|
val time1 = measureTimeMillis { val res = structure.mapToBufferND { it + 1 } }
|
||||||
println("Structure mapping finished in $time1 millis")
|
println("Structure mapping finished in $time1 millis")
|
||||||
val array = DoubleArray(n * n) { 1.0 }
|
val array = DoubleArray(n * n) { 1.0 }
|
||||||
|
|
||||||
|
@ -5,18 +5,17 @@
|
|||||||
|
|
||||||
package space.kscience.kmath.tensors
|
package space.kscience.kmath.tensors
|
||||||
|
|
||||||
import space.kscience.kmath.misc.PerformancePitfall
|
|
||||||
import space.kscience.kmath.nd.ShapeND
|
import space.kscience.kmath.nd.ShapeND
|
||||||
import space.kscience.kmath.nd.contentEquals
|
import space.kscience.kmath.nd.contentEquals
|
||||||
import space.kscience.kmath.operations.invoke
|
import space.kscience.kmath.operations.invoke
|
||||||
import space.kscience.kmath.tensors.core.DoubleTensor
|
import space.kscience.kmath.tensors.core.DoubleTensor
|
||||||
import space.kscience.kmath.tensors.core.DoubleTensorAlgebra
|
import space.kscience.kmath.tensors.core.DoubleTensorAlgebra
|
||||||
|
import space.kscience.kmath.tensors.core.randomNormal
|
||||||
|
import space.kscience.kmath.tensors.core.randomNormalLike
|
||||||
import kotlin.math.abs
|
import kotlin.math.abs
|
||||||
|
|
||||||
// OLS estimator using SVD
|
// OLS estimator using SVD
|
||||||
|
|
||||||
@OptIn(PerformancePitfall::class)
|
|
||||||
fun main() {
|
fun main() {
|
||||||
//seed for random
|
//seed for random
|
||||||
val randSeed = 100500L
|
val randSeed = 100500L
|
||||||
@ -42,10 +41,10 @@ fun main() {
|
|||||||
|
|
||||||
// calculate y and add gaussian noise (N(0, 0.05))
|
// calculate y and add gaussian noise (N(0, 0.05))
|
||||||
val y = x dot alpha
|
val y = x dot alpha
|
||||||
y += y.randomNormalLike(randSeed) * 0.05
|
y += randomNormalLike(y, randSeed) * 0.05
|
||||||
|
|
||||||
// now restore the coefficient vector with OSL estimator with SVD
|
// now restore the coefficient vector with OSL estimator with SVD
|
||||||
val (u, singValues, v) = x.svd()
|
val (u, singValues, v) = svd(x)
|
||||||
|
|
||||||
// we have to make sure the singular values of the matrix are not close to zero
|
// we have to make sure the singular values of the matrix are not close to zero
|
||||||
println("Singular values:\n$singValues")
|
println("Singular values:\n$singValues")
|
||||||
@ -66,7 +65,7 @@ fun main() {
|
|||||||
require(yTrue.shape contentEquals yPred.shape)
|
require(yTrue.shape contentEquals yPred.shape)
|
||||||
|
|
||||||
val diff = yTrue - yPred
|
val diff = yTrue - yPred
|
||||||
return diff.dot(diff).sqrt().value()
|
return sqrt(diff.dot(diff)).value()
|
||||||
}
|
}
|
||||||
|
|
||||||
println("MSE: ${mse(alpha, alphaOLS)}")
|
println("MSE: ${mse(alpha, alphaOLS)}")
|
||||||
|
@ -6,8 +6,7 @@
|
|||||||
package space.kscience.kmath.tensors
|
package space.kscience.kmath.tensors
|
||||||
|
|
||||||
import space.kscience.kmath.nd.ShapeND
|
import space.kscience.kmath.nd.ShapeND
|
||||||
import space.kscience.kmath.tensors.core.tensorAlgebra
|
import space.kscience.kmath.tensors.core.*
|
||||||
import space.kscience.kmath.tensors.core.withBroadcast
|
|
||||||
|
|
||||||
|
|
||||||
// simple PCA
|
// simple PCA
|
||||||
@ -22,7 +21,7 @@ fun main(): Unit = Double.tensorAlgebra.withBroadcast { // work in context with
|
|||||||
)
|
)
|
||||||
|
|
||||||
// take y dependent on x with noise
|
// take y dependent on x with noise
|
||||||
val y = 2.0 * x + (3.0 + x.randomNormalLike(seed) * 1.5)
|
val y = 2.0 * x + (3.0 + randomNormalLike(x, seed) * 1.5)
|
||||||
|
|
||||||
println("x:\n$x")
|
println("x:\n$x")
|
||||||
println("y:\n$y")
|
println("y:\n$y")
|
||||||
@ -31,14 +30,14 @@ fun main(): Unit = Double.tensorAlgebra.withBroadcast { // work in context with
|
|||||||
val dataset = stack(listOf(x, y)).transposed()
|
val dataset = stack(listOf(x, y)).transposed()
|
||||||
|
|
||||||
// normalize both x and y
|
// normalize both x and y
|
||||||
val xMean = x.mean()
|
val xMean = mean(x)
|
||||||
val yMean = y.mean()
|
val yMean = mean(y)
|
||||||
|
|
||||||
val xStd = x.std()
|
val xStd = std(x)
|
||||||
val yStd = y.std()
|
val yStd = std(y)
|
||||||
|
|
||||||
val xScaled = (x - xMean) / xStd
|
val xScaled: DoubleTensor = (x - xMean) / xStd
|
||||||
val yScaled = (y - yMean) / yStd
|
val yScaled: DoubleTensor = (y - yMean) / yStd
|
||||||
|
|
||||||
// save means ans standard deviations for further recovery
|
// save means ans standard deviations for further recovery
|
||||||
val mean = fromArray(
|
val mean = fromArray(
|
||||||
@ -54,11 +53,11 @@ fun main(): Unit = Double.tensorAlgebra.withBroadcast { // work in context with
|
|||||||
println("Standard deviations:\n$std")
|
println("Standard deviations:\n$std")
|
||||||
|
|
||||||
// calculate the covariance matrix of scaled x and y
|
// calculate the covariance matrix of scaled x and y
|
||||||
val covMatrix = cov(listOf(xScaled, yScaled))
|
val covMatrix = covariance(listOf(xScaled.asDoubleTensor1D(), yScaled.asDoubleTensor1D()))
|
||||||
println("Covariance matrix:\n$covMatrix")
|
println("Covariance matrix:\n$covMatrix")
|
||||||
|
|
||||||
// and find out eigenvector of it
|
// and find out eigenvector of it
|
||||||
val (_, evecs) = covMatrix.symEig()
|
val (_, evecs) = symEig(covMatrix)
|
||||||
val v = evecs.getTensor(0)
|
val v = evecs.getTensor(0)
|
||||||
println("Eigenvector:\n$v")
|
println("Eigenvector:\n$v")
|
||||||
|
|
||||||
|
@ -6,6 +6,7 @@
|
|||||||
package space.kscience.kmath.tensors
|
package space.kscience.kmath.tensors
|
||||||
|
|
||||||
import space.kscience.kmath.nd.ShapeND
|
import space.kscience.kmath.nd.ShapeND
|
||||||
|
import space.kscience.kmath.tensors.core.randomNormal
|
||||||
import space.kscience.kmath.tensors.core.tensorAlgebra
|
import space.kscience.kmath.tensors.core.tensorAlgebra
|
||||||
import space.kscience.kmath.tensors.core.withBroadcast
|
import space.kscience.kmath.tensors.core.withBroadcast
|
||||||
|
|
||||||
@ -23,8 +24,8 @@ fun main() = Double.tensorAlgebra.withBroadcast { // work in context with broad
|
|||||||
|
|
||||||
|
|
||||||
// find out mean and standard deviation of each column
|
// find out mean and standard deviation of each column
|
||||||
val mean = dataset.mean(0, false)
|
val mean = mean(dataset, 0, false)
|
||||||
val std = dataset.std(0, false)
|
val std = std(dataset, 0, false)
|
||||||
|
|
||||||
println("Mean:\n$mean")
|
println("Mean:\n$mean")
|
||||||
println("Standard deviation:\n$std")
|
println("Standard deviation:\n$std")
|
||||||
@ -36,8 +37,8 @@ fun main() = Double.tensorAlgebra.withBroadcast { // work in context with broad
|
|||||||
// now we can scale dataset with mean normalization
|
// now we can scale dataset with mean normalization
|
||||||
val datasetScaled = (dataset - mean) / std
|
val datasetScaled = (dataset - mean) / std
|
||||||
|
|
||||||
// find out mean and std of scaled dataset
|
// find out mean and standardDiviation of scaled dataset
|
||||||
|
|
||||||
println("Mean of scaled:\n${datasetScaled.mean(0, false)}")
|
println("Mean of scaled:\n${mean(datasetScaled, 0, false)}")
|
||||||
println("Mean of scaled:\n${datasetScaled.std(0, false)}")
|
println("Mean of scaled:\n${std(datasetScaled, 0, false)}")
|
||||||
}
|
}
|
@ -41,7 +41,7 @@ fun main() = Double.tensorAlgebra.withBroadcast {// work in context with linear
|
|||||||
// solve `Ax = b` system using LUP decomposition
|
// solve `Ax = b` system using LUP decomposition
|
||||||
|
|
||||||
// get P, L, U such that PA = LU
|
// get P, L, U such that PA = LU
|
||||||
val (p, l, u) = a.lu()
|
val (p, l, u) = lu(a)
|
||||||
|
|
||||||
// check P is permutation matrix
|
// check P is permutation matrix
|
||||||
println("P:\n$p")
|
println("P:\n$p")
|
||||||
|
@ -9,10 +9,7 @@ import space.kscience.kmath.nd.ShapeND
|
|||||||
import space.kscience.kmath.nd.contentEquals
|
import space.kscience.kmath.nd.contentEquals
|
||||||
import space.kscience.kmath.operations.asIterable
|
import space.kscience.kmath.operations.asIterable
|
||||||
import space.kscience.kmath.operations.invoke
|
import space.kscience.kmath.operations.invoke
|
||||||
import space.kscience.kmath.tensors.core.BroadcastDoubleTensorAlgebra
|
import space.kscience.kmath.tensors.core.*
|
||||||
import space.kscience.kmath.tensors.core.DoubleTensor
|
|
||||||
import space.kscience.kmath.tensors.core.DoubleTensorAlgebra
|
|
||||||
import space.kscience.kmath.tensors.core.toDoubleTensor
|
|
||||||
import kotlin.math.sqrt
|
import kotlin.math.sqrt
|
||||||
|
|
||||||
const val seed = 100500L
|
const val seed = 100500L
|
||||||
@ -51,7 +48,7 @@ fun reluDer(x: DoubleTensor): DoubleTensor = DoubleTensorAlgebra {
|
|||||||
class ReLU : Activation(::relu, ::reluDer)
|
class ReLU : Activation(::relu, ::reluDer)
|
||||||
|
|
||||||
fun sigmoid(x: DoubleTensor): DoubleTensor = DoubleTensorAlgebra {
|
fun sigmoid(x: DoubleTensor): DoubleTensor = DoubleTensorAlgebra {
|
||||||
1.0 / (1.0 + (-x).exp())
|
1.0 / (1.0 + exp((-x)))
|
||||||
}
|
}
|
||||||
|
|
||||||
fun sigmoidDer(x: DoubleTensor): DoubleTensor = DoubleTensorAlgebra {
|
fun sigmoidDer(x: DoubleTensor): DoubleTensor = DoubleTensorAlgebra {
|
||||||
@ -85,7 +82,7 @@ class Dense(
|
|||||||
val gradInput = outputError dot weights.transposed()
|
val gradInput = outputError dot weights.transposed()
|
||||||
|
|
||||||
val gradW = input.transposed() dot outputError
|
val gradW = input.transposed() dot outputError
|
||||||
val gradBias = outputError.mean(dim = 0, keepDim = false) * input.shape[0].toDouble()
|
val gradBias = mean(structureND = outputError, dim = 0, keepDim = false) * input.shape[0].toDouble()
|
||||||
|
|
||||||
weights -= learningRate * gradW
|
weights -= learningRate * gradW
|
||||||
bias -= learningRate * gradBias
|
bias -= learningRate * gradBias
|
||||||
@ -118,7 +115,7 @@ class NeuralNetwork(private val layers: List<Layer>) {
|
|||||||
onesForAnswers[intArrayOf(index, label)] = 1.0
|
onesForAnswers[intArrayOf(index, label)] = 1.0
|
||||||
}
|
}
|
||||||
|
|
||||||
val softmaxValue = yPred.exp() / yPred.exp().sum(dim = 1, keepDim = true)
|
val softmaxValue = exp(yPred) / exp(yPred).sum(dim = 1, keepDim = true)
|
||||||
|
|
||||||
(-onesForAnswers + softmaxValue) / (yPred.shape[0].toDouble())
|
(-onesForAnswers + softmaxValue) / (yPred.shape[0].toDouble())
|
||||||
}
|
}
|
||||||
@ -176,7 +173,6 @@ class NeuralNetwork(private val layers: List<Layer>) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@OptIn(ExperimentalStdlibApi::class)
|
|
||||||
fun main() = BroadcastDoubleTensorAlgebra {
|
fun main() = BroadcastDoubleTensorAlgebra {
|
||||||
val features = 5
|
val features = 5
|
||||||
val sampleSize = 250
|
val sampleSize = 250
|
||||||
|
@ -11,3 +11,7 @@ org.gradle.configureondemand=true
|
|||||||
org.gradle.jvmargs=-Xmx4096m
|
org.gradle.jvmargs=-Xmx4096m
|
||||||
|
|
||||||
toolsVersion=0.13.1-kotlin-1.7.20
|
toolsVersion=0.13.1-kotlin-1.7.20
|
||||||
|
|
||||||
|
|
||||||
|
org.gradle.parallel=true
|
||||||
|
org.gradle.workers.max=4
|
||||||
|
@ -13,12 +13,11 @@ import space.kscience.kmath.expressions.Symbol.Companion.x
|
|||||||
import space.kscience.kmath.expressions.Symbol.Companion.y
|
import space.kscience.kmath.expressions.Symbol.Companion.y
|
||||||
import space.kscience.kmath.expressions.chiSquaredExpression
|
import space.kscience.kmath.expressions.chiSquaredExpression
|
||||||
import space.kscience.kmath.expressions.symbol
|
import space.kscience.kmath.expressions.symbol
|
||||||
import space.kscience.kmath.operations.map
|
import space.kscience.kmath.operations.DoubleBufferOps.Companion.map
|
||||||
import space.kscience.kmath.optimization.*
|
import space.kscience.kmath.optimization.*
|
||||||
import space.kscience.kmath.random.RandomGenerator
|
import space.kscience.kmath.random.RandomGenerator
|
||||||
import space.kscience.kmath.structures.DoubleBuffer
|
import space.kscience.kmath.structures.DoubleBuffer
|
||||||
import space.kscience.kmath.structures.asBuffer
|
import space.kscience.kmath.structures.asBuffer
|
||||||
import kotlin.math.pow
|
|
||||||
import kotlin.test.Test
|
import kotlin.test.Test
|
||||||
|
|
||||||
internal class OptimizeTest {
|
internal class OptimizeTest {
|
||||||
|
@ -268,7 +268,7 @@ public open class DSRing<T, A>(
|
|||||||
|
|
||||||
protected fun DS<T, A>.mapData(block: A.(T) -> T): DS<T, A> {
|
protected fun DS<T, A>.mapData(block: A.(T) -> T): DS<T, A> {
|
||||||
require(derivativeAlgebra == this@DSRing) { "All derivative operations should be done in the same algebra" }
|
require(derivativeAlgebra == this@DSRing) { "All derivative operations should be done in the same algebra" }
|
||||||
val newData: Buffer<T> = data.map(valueBufferFactory) {
|
val newData: Buffer<T> = data.mapToBuffer(valueBufferFactory) {
|
||||||
algebra.block(it)
|
algebra.block(it)
|
||||||
}
|
}
|
||||||
return DS(newData)
|
return DS(newData)
|
||||||
@ -276,7 +276,7 @@ public open class DSRing<T, A>(
|
|||||||
|
|
||||||
protected fun DS<T, A>.mapDataIndexed(block: (Int, T) -> T): DS<T, A> {
|
protected fun DS<T, A>.mapDataIndexed(block: (Int, T) -> T): DS<T, A> {
|
||||||
require(derivativeAlgebra == this@DSRing) { "All derivative operations should be done in the same algebra" }
|
require(derivativeAlgebra == this@DSRing) { "All derivative operations should be done in the same algebra" }
|
||||||
val newData: Buffer<T> = data.mapIndexed(valueBufferFactory, block)
|
val newData: Buffer<T> = data.mapIndexedToBuffer(valueBufferFactory, block)
|
||||||
return DS(newData)
|
return DS(newData)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -11,6 +11,8 @@ import space.kscience.kmath.structures.Buffer
|
|||||||
import space.kscience.kmath.structures.indices
|
import space.kscience.kmath.structures.indices
|
||||||
import kotlin.jvm.JvmName
|
import kotlin.jvm.JvmName
|
||||||
|
|
||||||
|
//TODO move to stat
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Generate a chi squared expression from given x-y-sigma data and inline model. Provides automatic
|
* Generate a chi squared expression from given x-y-sigma data and inline model. Provides automatic
|
||||||
* differentiation.
|
* differentiation.
|
||||||
|
@ -5,8 +5,6 @@
|
|||||||
|
|
||||||
package space.kscience.kmath.operations
|
package space.kscience.kmath.operations
|
||||||
|
|
||||||
import space.kscience.kmath.misc.UnstableKMathAPI
|
|
||||||
import space.kscience.kmath.operations.DoubleField.pow
|
|
||||||
import space.kscience.kmath.structures.Buffer
|
import space.kscience.kmath.structures.Buffer
|
||||||
import space.kscience.kmath.structures.DoubleBuffer
|
import space.kscience.kmath.structures.DoubleBuffer
|
||||||
|
|
||||||
@ -32,9 +30,9 @@ public class DoubleBufferField(public val size: Int) : ExtendedField<Buffer<Doub
|
|||||||
override fun atanh(arg: Buffer<Double>): DoubleBuffer = super<DoubleBufferOps>.atanh(arg)
|
override fun atanh(arg: Buffer<Double>): DoubleBuffer = super<DoubleBufferOps>.atanh(arg)
|
||||||
|
|
||||||
override fun power(arg: Buffer<Double>, pow: Number): DoubleBuffer = if (pow.isInteger()) {
|
override fun power(arg: Buffer<Double>, pow: Number): DoubleBuffer = if (pow.isInteger()) {
|
||||||
arg.mapInline { it.pow(pow.toInt()) }
|
arg.map { it.pow(pow.toInt()) }
|
||||||
} else {
|
} else {
|
||||||
arg.mapInline {
|
arg.map {
|
||||||
if(it<0) throw IllegalArgumentException("Negative argument $it could not be raised to the fractional power")
|
if(it<0) throw IllegalArgumentException("Negative argument $it could not be raised to the fractional power")
|
||||||
it.pow(pow.toDouble())
|
it.pow(pow.toDouble())
|
||||||
}
|
}
|
||||||
@ -42,103 +40,4 @@ public class DoubleBufferField(public val size: Int) : ExtendedField<Buffer<Doub
|
|||||||
|
|
||||||
override fun unaryOperationFunction(operation: String): (arg: Buffer<Double>) -> Buffer<Double> =
|
override fun unaryOperationFunction(operation: String): (arg: Buffer<Double>) -> Buffer<Double> =
|
||||||
super<ExtendedField>.unaryOperationFunction(operation)
|
super<ExtendedField>.unaryOperationFunction(operation)
|
||||||
|
|
||||||
// override fun number(value: Number): Buffer<Double> = DoubleBuffer(size) { value.toDouble() }
|
|
||||||
//
|
|
||||||
// override fun Buffer<Double>.unaryMinus(): Buffer<Double> = DoubleBufferOperations.run {
|
|
||||||
// -this@unaryMinus
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// override fun add(a: Buffer<Double>, b: Buffer<Double>): DoubleBuffer {
|
|
||||||
// require(a.size == size) { "The buffer size ${a.size} does not match context size $size" }
|
|
||||||
// return DoubleBufferOperations.add(a, b)
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
|
|
||||||
//
|
|
||||||
// override fun multiply(a: Buffer<Double>, b: Buffer<Double>): DoubleBuffer {
|
|
||||||
// require(a.size == size) { "The buffer size ${a.size} does not match context size $size" }
|
|
||||||
// return DoubleBufferOperations.multiply(a, b)
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// override fun divide(a: Buffer<Double>, b: Buffer<Double>): DoubleBuffer {
|
|
||||||
// require(a.size == size) { "The buffer size ${a.size} does not match context size $size" }
|
|
||||||
// return DoubleBufferOperations.divide(a, b)
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// override fun sin(arg: Buffer<Double>): DoubleBuffer {
|
|
||||||
// require(arg.size == size) { "The buffer size ${arg.size} does not match context size $size" }
|
|
||||||
// return DoubleBufferOperations.sin(arg)
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// override fun cos(arg: Buffer<Double>): DoubleBuffer {
|
|
||||||
// require(arg.size == size) { "The buffer size ${arg.size} does not match context size $size" }
|
|
||||||
// return DoubleBufferOperations.cos(arg)
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// override fun tan(arg: Buffer<Double>): DoubleBuffer {
|
|
||||||
// require(arg.size == size) { "The buffer size ${arg.size} does not match context size $size" }
|
|
||||||
// return DoubleBufferOperations.tan(arg)
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// override fun asin(arg: Buffer<Double>): DoubleBuffer {
|
|
||||||
// require(arg.size == size) { "The buffer size ${arg.size} does not match context size $size" }
|
|
||||||
// return DoubleBufferOperations.asin(arg)
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// override fun acos(arg: Buffer<Double>): DoubleBuffer {
|
|
||||||
// require(arg.size == size) { "The buffer size ${arg.size} does not match context size $size" }
|
|
||||||
// return DoubleBufferOperations.acos(arg)
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// override fun atan(arg: Buffer<Double>): DoubleBuffer {
|
|
||||||
// require(arg.size == size) { "The buffer size ${arg.size} does not match context size $size" }
|
|
||||||
// return DoubleBufferOperations.atan(arg)
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// override fun sinh(arg: Buffer<Double>): DoubleBuffer {
|
|
||||||
// require(arg.size == size) { "The buffer size ${arg.size} does not match context size $size" }
|
|
||||||
// return DoubleBufferOperations.sinh(arg)
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// override fun cosh(arg: Buffer<Double>): DoubleBuffer {
|
|
||||||
// require(arg.size == size) { "The buffer size ${arg.size} does not match context size $size" }
|
|
||||||
// return DoubleBufferOperations.cosh(arg)
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// override fun tanh(arg: Buffer<Double>): DoubleBuffer {
|
|
||||||
// require(arg.size == size) { "The buffer size ${arg.size} does not match context size $size" }
|
|
||||||
// return DoubleBufferOperations.tanh(arg)
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// override fun asinh(arg: Buffer<Double>): DoubleBuffer {
|
|
||||||
// require(arg.size == size) { "The buffer size ${arg.size} does not match context size $size" }
|
|
||||||
// return DoubleBufferOperations.asinh(arg)
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// override fun acosh(arg: Buffer<Double>): DoubleBuffer {
|
|
||||||
// require(arg.size == size) { "The buffer size ${arg.size} does not match context size $size" }
|
|
||||||
// return DoubleBufferOperations.acosh(arg)
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// override fun atanh(arg: Buffer<Double>): DoubleBuffer {
|
|
||||||
// require(arg.size == size) { "The buffer size ${arg.size} does not match context size $size" }
|
|
||||||
// return DoubleBufferOperations.atanh(arg)
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// override fun power(arg: Buffer<Double>, pow: Number): DoubleBuffer {
|
|
||||||
// require(arg.size == size) { "The buffer size ${arg.size} does not match context size $size" }
|
|
||||||
// return DoubleBufferOperations.power(arg, pow)
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// override fun exp(arg: Buffer<Double>): DoubleBuffer {
|
|
||||||
// require(arg.size == size) { "The buffer size ${arg.size} does not match context size $size" }
|
|
||||||
// return DoubleBufferOperations.exp(arg)
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// override fun ln(arg: Buffer<Double>): DoubleBuffer {
|
|
||||||
// require(arg.size == size) { "The buffer size ${arg.size} does not match context size $size" }
|
|
||||||
// return DoubleBufferOperations.ln(arg)
|
|
||||||
// }
|
|
||||||
|
|
||||||
}
|
}
|
@ -6,10 +6,8 @@
|
|||||||
package space.kscience.kmath.operations
|
package space.kscience.kmath.operations
|
||||||
|
|
||||||
import space.kscience.kmath.linear.Point
|
import space.kscience.kmath.linear.Point
|
||||||
import space.kscience.kmath.structures.Buffer
|
import space.kscience.kmath.misc.UnstableKMathAPI
|
||||||
import space.kscience.kmath.structures.DoubleBuffer
|
import space.kscience.kmath.structures.*
|
||||||
import space.kscience.kmath.structures.MutableBufferFactory
|
|
||||||
import space.kscience.kmath.structures.asBuffer
|
|
||||||
import kotlin.math.*
|
import kotlin.math.*
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -19,10 +17,29 @@ public abstract class DoubleBufferOps : BufferAlgebra<Double, DoubleField>, Exte
|
|||||||
Norm<Buffer<Double>, Double> {
|
Norm<Buffer<Double>, Double> {
|
||||||
|
|
||||||
override val elementAlgebra: DoubleField get() = DoubleField
|
override val elementAlgebra: DoubleField get() = DoubleField
|
||||||
|
|
||||||
override val elementBufferFactory: MutableBufferFactory<Double> get() = elementAlgebra.bufferFactory
|
override val elementBufferFactory: MutableBufferFactory<Double> get() = elementAlgebra.bufferFactory
|
||||||
|
|
||||||
override fun Buffer<Double>.map(block: DoubleField.(Double) -> Double): DoubleBuffer =
|
@Suppress("OVERRIDE_BY_INLINE")
|
||||||
mapInline { DoubleField.block(it) }
|
@OptIn(UnstableKMathAPI::class)
|
||||||
|
final override inline fun Buffer<Double>.map(block: DoubleField.(Double) -> Double): DoubleBuffer =
|
||||||
|
DoubleArray(size) { DoubleField.block(getDouble(it)) }.asBuffer()
|
||||||
|
|
||||||
|
|
||||||
|
@OptIn(UnstableKMathAPI::class)
|
||||||
|
@Suppress("OVERRIDE_BY_INLINE")
|
||||||
|
final override inline fun Buffer<Double>.mapIndexed(block: DoubleField.(index: Int, arg: Double) -> Double): DoubleBuffer =
|
||||||
|
DoubleBuffer(size) { DoubleField.block(it, getDouble(it)) }
|
||||||
|
|
||||||
|
@OptIn(UnstableKMathAPI::class)
|
||||||
|
@Suppress("OVERRIDE_BY_INLINE")
|
||||||
|
final override inline fun Buffer<Double>.zip(
|
||||||
|
other: Buffer<Double>,
|
||||||
|
block: DoubleField.(left: Double, right: Double) -> Double,
|
||||||
|
): DoubleBuffer {
|
||||||
|
require(size == other.size) { "Incompatible buffer sizes. left: ${size}, right: ${other.size}" }
|
||||||
|
return DoubleBuffer(size) { DoubleField.block(getDouble(it), other.getDouble(it)) }
|
||||||
|
}
|
||||||
|
|
||||||
override fun unaryOperationFunction(operation: String): (arg: Buffer<Double>) -> Buffer<Double> =
|
override fun unaryOperationFunction(operation: String): (arg: Buffer<Double>) -> Buffer<Double> =
|
||||||
super<ExtendedFieldOps>.unaryOperationFunction(operation)
|
super<ExtendedFieldOps>.unaryOperationFunction(operation)
|
||||||
@ -30,7 +47,7 @@ public abstract class DoubleBufferOps : BufferAlgebra<Double, DoubleField>, Exte
|
|||||||
override fun binaryOperationFunction(operation: String): (left: Buffer<Double>, right: Buffer<Double>) -> Buffer<Double> =
|
override fun binaryOperationFunction(operation: String): (left: Buffer<Double>, right: Buffer<Double>) -> Buffer<Double> =
|
||||||
super<ExtendedFieldOps>.binaryOperationFunction(operation)
|
super<ExtendedFieldOps>.binaryOperationFunction(operation)
|
||||||
|
|
||||||
override fun Buffer<Double>.unaryMinus(): DoubleBuffer = mapInline { -it }
|
override fun Buffer<Double>.unaryMinus(): DoubleBuffer = map { -it }
|
||||||
|
|
||||||
override fun add(left: Buffer<Double>, right: Buffer<Double>): DoubleBuffer {
|
override fun add(left: Buffer<Double>, right: Buffer<Double>): DoubleBuffer {
|
||||||
require(right.size == left.size) {
|
require(right.size == left.size) {
|
||||||
@ -77,6 +94,7 @@ public abstract class DoubleBufferOps : BufferAlgebra<Double, DoubleField>, Exte
|
|||||||
// } else RealBuffer(DoubleArray(a.size) { a[it] / kValue })
|
// } else RealBuffer(DoubleArray(a.size) { a[it] / kValue })
|
||||||
// }
|
// }
|
||||||
|
|
||||||
|
@UnstableKMathAPI
|
||||||
override fun multiply(left: Buffer<Double>, right: Buffer<Double>): DoubleBuffer {
|
override fun multiply(left: Buffer<Double>, right: Buffer<Double>): DoubleBuffer {
|
||||||
require(right.size == left.size) {
|
require(right.size == left.size) {
|
||||||
"The size of the first buffer ${left.size} should be the same as for second one: ${right.size} "
|
"The size of the first buffer ${left.size} should be the same as for second one: ${right.size} "
|
||||||
@ -101,55 +119,83 @@ public abstract class DoubleBufferOps : BufferAlgebra<Double, DoubleField>, Exte
|
|||||||
} else DoubleBuffer(DoubleArray(left.size) { left[it] / right[it] })
|
} else DoubleBuffer(DoubleArray(left.size) { left[it] / right[it] })
|
||||||
}
|
}
|
||||||
|
|
||||||
override fun sin(arg: Buffer<Double>): DoubleBuffer = arg.mapInline(::sin)
|
override fun sin(arg: Buffer<Double>): DoubleBuffer = arg.map { sin(it) }
|
||||||
|
|
||||||
override fun cos(arg: Buffer<Double>): DoubleBuffer = arg.mapInline(::cos)
|
override fun cos(arg: Buffer<Double>): DoubleBuffer = arg.map { cos(it) }
|
||||||
|
|
||||||
override fun tan(arg: Buffer<Double>): DoubleBuffer = arg.mapInline(::tan)
|
override fun tan(arg: Buffer<Double>): DoubleBuffer = arg.map { tan(it) }
|
||||||
|
|
||||||
override fun asin(arg: Buffer<Double>): DoubleBuffer = arg.mapInline(::asin)
|
override fun asin(arg: Buffer<Double>): DoubleBuffer = arg.map { asin(it) }
|
||||||
|
|
||||||
override fun acos(arg: Buffer<Double>): DoubleBuffer = arg.mapInline(::acos)
|
override fun acos(arg: Buffer<Double>): DoubleBuffer = arg.map { acos(it) }
|
||||||
|
|
||||||
override fun atan(arg: Buffer<Double>): DoubleBuffer = arg.mapInline(::atan)
|
override fun atan(arg: Buffer<Double>): DoubleBuffer = arg.map { atan(it) }
|
||||||
|
|
||||||
override fun sinh(arg: Buffer<Double>): DoubleBuffer = arg.mapInline(::sinh)
|
override fun sinh(arg: Buffer<Double>): DoubleBuffer = arg.map { sinh(it) }
|
||||||
|
|
||||||
override fun cosh(arg: Buffer<Double>): DoubleBuffer = arg.mapInline(::cosh)
|
override fun cosh(arg: Buffer<Double>): DoubleBuffer = arg.map { cosh(it) }
|
||||||
|
|
||||||
override fun tanh(arg: Buffer<Double>): DoubleBuffer = arg.mapInline(::tanh)
|
override fun tanh(arg: Buffer<Double>): DoubleBuffer = arg.map { tanh(it) }
|
||||||
|
|
||||||
override fun asinh(arg: Buffer<Double>): DoubleBuffer = arg.mapInline(::asinh)
|
override fun asinh(arg: Buffer<Double>): DoubleBuffer = arg.map { asinh(it) }
|
||||||
|
|
||||||
override fun acosh(arg: Buffer<Double>): DoubleBuffer = arg.mapInline(::acosh)
|
override fun acosh(arg: Buffer<Double>): DoubleBuffer = arg.map { acosh(it) }
|
||||||
|
|
||||||
override fun atanh(arg: Buffer<Double>): DoubleBuffer = arg.mapInline(::atanh)
|
override fun atanh(arg: Buffer<Double>): DoubleBuffer = arg.map { atanh(it) }
|
||||||
|
|
||||||
override fun exp(arg: Buffer<Double>): DoubleBuffer = arg.mapInline(::exp)
|
override fun exp(arg: Buffer<Double>): DoubleBuffer = arg.map { exp(it) }
|
||||||
|
|
||||||
override fun ln(arg: Buffer<Double>): DoubleBuffer = arg.mapInline(::ln)
|
override fun ln(arg: Buffer<Double>): DoubleBuffer = arg.map { ln(it) }
|
||||||
|
|
||||||
override fun norm(arg: Buffer<Double>): Double = DoubleL2Norm.norm(arg)
|
override fun norm(arg: Buffer<Double>): Double = DoubleL2Norm.norm(arg)
|
||||||
|
|
||||||
override fun scale(a: Buffer<Double>, value: Double): DoubleBuffer = a.mapInline { it * value }
|
override fun scale(a: Buffer<Double>, value: Double): DoubleBuffer = a.map { it * value }
|
||||||
|
|
||||||
override fun power(arg: Buffer<Double>, pow: Number): Buffer<Double> = if (pow is Int) {
|
override fun power(arg: Buffer<Double>, pow: Number): Buffer<Double> = if (pow is Int) {
|
||||||
arg.mapInline { it.pow(pow) }
|
arg.map { it.pow(pow) }
|
||||||
} else {
|
} else {
|
||||||
arg.mapInline { it.pow(pow.toDouble()) }
|
arg.map { it.pow(pow.toDouble()) }
|
||||||
}
|
}
|
||||||
|
|
||||||
public companion object : DoubleBufferOps() {
|
public companion object : DoubleBufferOps()
|
||||||
public inline fun Buffer<Double>.mapInline(block: (Double) -> Double): DoubleBuffer =
|
|
||||||
if (this is DoubleBuffer) {
|
|
||||||
DoubleArray(size) { block(array[it]) }.asBuffer()
|
|
||||||
} else {
|
|
||||||
DoubleArray(size) { block(get(it)) }.asBuffer()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
public object DoubleL2Norm : Norm<Point<Double>, Double> {
|
public object DoubleL2Norm : Norm<Point<Double>, Double> {
|
||||||
override fun norm(arg: Point<Double>): Double = sqrt(arg.fold(0.0) { acc: Double, d: Double -> acc + d.pow(2) })
|
override fun norm(arg: Point<Double>): Double = sqrt(arg.fold(0.0) { acc: Double, d: Double -> acc + d.pow(2) })
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public fun DoubleBufferOps.sum(buffer: Buffer<Double>): Double = buffer.reduce(Double::plus)
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Sum of elements using given [conversion]
|
||||||
|
*/
|
||||||
|
public inline fun <T> DoubleBufferOps.sumOf(buffer: Buffer<T>, conversion: (T) -> Double): Double =
|
||||||
|
buffer.fold(0.0) { acc, value -> acc + conversion(value) }
|
||||||
|
|
||||||
|
public fun DoubleBufferOps.average(buffer: Buffer<Double>): Double = sum(buffer) / buffer.size
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Average of elements using given [conversion]
|
||||||
|
*/
|
||||||
|
public inline fun <T> DoubleBufferOps.averageOf(buffer: Buffer<T>, conversion: (T) -> Double): Double =
|
||||||
|
sumOf(buffer, conversion) / buffer.size
|
||||||
|
|
||||||
|
public fun DoubleBufferOps.dispersion(buffer: Buffer<Double>): Double {
|
||||||
|
val av = average(buffer)
|
||||||
|
return buffer.fold(0.0) { acc, value -> acc + (value - av).pow(2) } / buffer.size
|
||||||
|
}
|
||||||
|
|
||||||
|
public fun DoubleBufferOps.std(buffer: Buffer<Double>): Double = sqrt(dispersion(buffer))
|
||||||
|
|
||||||
|
public fun DoubleBufferOps.covariance(x: Buffer<Double>, y: Buffer<Double>): Double {
|
||||||
|
require(x.size == y.size) { "Expected buffers of the same size, but x.size == ${x.size} and y.size == ${y.size}" }
|
||||||
|
val xMean = average(x)
|
||||||
|
val yMean = average(y)
|
||||||
|
var sum = 0.0
|
||||||
|
x.indices.forEach {
|
||||||
|
sum += (x[it] - xMean) * (y[it] - yMean)
|
||||||
|
}
|
||||||
|
return sum / (x.size - 1)
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -5,6 +5,21 @@
|
|||||||
|
|
||||||
package space.kscience.kmath.operations
|
package space.kscience.kmath.operations
|
||||||
|
|
||||||
|
import space.kscience.kmath.misc.PerformancePitfall
|
||||||
|
import space.kscience.kmath.structures.Buffer
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns the sum of all elements in the iterable in this [Group].
|
||||||
|
*
|
||||||
|
* @receiver the algebra that provides addition.
|
||||||
|
* @param data the iterable to sum up.
|
||||||
|
* @return the sum.
|
||||||
|
*/
|
||||||
|
@PerformancePitfall("Potential boxing access to buffer elements")
|
||||||
|
public fun <T> Group<T>.sum(data: Buffer<T>): T = data.fold(zero) { left, right ->
|
||||||
|
add(left, right)
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Returns the sum of all elements in the iterable in this [Group].
|
* Returns the sum of all elements in the iterable in this [Group].
|
||||||
*
|
*
|
||||||
@ -29,6 +44,18 @@ public fun <T> Group<T>.sum(data: Sequence<T>): T = data.fold(zero) { left, righ
|
|||||||
add(left, right)
|
add(left, right)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns an average value of elements in the iterable in this [Group].
|
||||||
|
*
|
||||||
|
* @receiver the algebra that provides addition and division.
|
||||||
|
* @param data the iterable to find average.
|
||||||
|
* @return the average value.
|
||||||
|
* @author Iaroslav Postovalov
|
||||||
|
*/
|
||||||
|
@PerformancePitfall("Potential boxing access to buffer elements")
|
||||||
|
public fun <T, S> S.average(data: Buffer<T>): T where S : Group<T>, S : ScaleOperations<T> =
|
||||||
|
sum(data) / data.size
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Returns an average value of elements in the iterable in this [Group].
|
* Returns an average value of elements in the iterable in this [Group].
|
||||||
*
|
*
|
||||||
@ -95,4 +122,3 @@ public fun <T, S> Iterable<T>.averageWith(space: S): T where S : Group<T>, S : S
|
|||||||
*/
|
*/
|
||||||
public fun <T, S> Sequence<T>.averageWith(space: S): T where S : Group<T>, S : ScaleOperations<T> =
|
public fun <T, S> Sequence<T>.averageWith(space: S): T where S : Group<T>, S : ScaleOperations<T> =
|
||||||
space.average(this)
|
space.average(this)
|
||||||
|
|
||||||
|
@ -57,6 +57,9 @@ public fun Buffer<Double>.toDoubleArray(): DoubleArray = when (this) {
|
|||||||
else -> DoubleArray(size, ::get)
|
else -> DoubleArray(size, ::get)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Represent this buffer as [DoubleBuffer]. Does not guarantee that changes in the original buffer are reflected on this buffer.
|
||||||
|
*/
|
||||||
public fun Buffer<Double>.toDoubleBuffer(): DoubleBuffer = when (this) {
|
public fun Buffer<Double>.toDoubleBuffer(): DoubleBuffer = when (this) {
|
||||||
is DoubleBuffer -> this
|
is DoubleBuffer -> this
|
||||||
else -> DoubleArray(size, ::get).asBuffer()
|
else -> DoubleArray(size, ::get).asBuffer()
|
||||||
|
@ -61,18 +61,18 @@ public fun <T> Buffer<T>.toMutableList(): MutableList<T> = when (this) {
|
|||||||
*/
|
*/
|
||||||
@UnstableKMathAPI
|
@UnstableKMathAPI
|
||||||
public inline fun <reified T> Buffer<T>.toTypedArray(): Array<T> = Array(size, ::get)
|
public inline fun <reified T> Buffer<T>.toTypedArray(): Array<T> = Array(size, ::get)
|
||||||
|
//
|
||||||
/**
|
///**
|
||||||
* Create a new buffer from this one with the given mapping function and using [Buffer.Companion.auto] buffer factory.
|
// * Create a new buffer from this one with the given mapping function and using [Buffer.Companion.auto] buffer factory.
|
||||||
*/
|
// */
|
||||||
public inline fun <T, reified R : Any> Buffer<T>.map(block: (T) -> R): Buffer<R> =
|
//public inline fun <T, reified R : Any> Buffer<T>.map(block: (T) -> R): Buffer<R> =
|
||||||
Buffer.auto(size) { block(get(it)) }
|
// Buffer.auto(size) { block(get(it)) }
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Create a new buffer from this one with the given mapping function.
|
* Create a new buffer from this one with the given mapping function.
|
||||||
* Provided [bufferFactory] is used to construct the new buffer.
|
* Provided [bufferFactory] is used to construct the new buffer.
|
||||||
*/
|
*/
|
||||||
public inline fun <T, R> Buffer<T>.map(
|
public inline fun <T, R> Buffer<T>.mapToBuffer(
|
||||||
bufferFactory: BufferFactory<R>,
|
bufferFactory: BufferFactory<R>,
|
||||||
crossinline block: (T) -> R,
|
crossinline block: (T) -> R,
|
||||||
): Buffer<R> = bufferFactory(size) { block(get(it)) }
|
): Buffer<R> = bufferFactory(size) { block(get(it)) }
|
||||||
@ -81,23 +81,24 @@ public inline fun <T, R> Buffer<T>.map(
|
|||||||
* Create a new buffer from this one with the given mapping (indexed) function.
|
* Create a new buffer from this one with the given mapping (indexed) function.
|
||||||
* Provided [bufferFactory] is used to construct the new buffer.
|
* Provided [bufferFactory] is used to construct the new buffer.
|
||||||
*/
|
*/
|
||||||
public inline fun <T, R> Buffer<T>.mapIndexed(
|
public inline fun <T, R> Buffer<T>.mapIndexedToBuffer(
|
||||||
bufferFactory: BufferFactory<R>,
|
bufferFactory: BufferFactory<R>,
|
||||||
crossinline block: (index: Int, value: T) -> R,
|
crossinline block: (index: Int, value: T) -> R,
|
||||||
): Buffer<R> = bufferFactory(size) { block(it, get(it)) }
|
): Buffer<R> = bufferFactory(size) { block(it, get(it)) }
|
||||||
|
//
|
||||||
/**
|
///**
|
||||||
* Create a new buffer from this one with the given indexed mapping function.
|
// * Create a new buffer from this one with the given indexed mapping function.
|
||||||
* Provided [BufferFactory] is used to construct the new buffer.
|
// * Provided [BufferFactory] is used to construct the new buffer.
|
||||||
*/
|
// */
|
||||||
public inline fun <T, reified R : Any> Buffer<T>.mapIndexed(
|
//public inline fun <T, reified R : Any> Buffer<T>.mapIndexed(
|
||||||
crossinline block: (index: Int, value: T) -> R,
|
// crossinline block: (index: Int, value: T) -> R,
|
||||||
): Buffer<R> = Buffer.auto(size) { block(it, get(it)) }
|
//): Buffer<R> = Buffer.auto(size) { block(it, get(it)) }
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Fold given buffer according to [operation]
|
* Fold given buffer according to [operation]
|
||||||
*/
|
*/
|
||||||
public inline fun <T, R> Buffer<T>.fold(initial: R, operation: (acc: R, T) -> R): R {
|
public inline fun <T, R> Buffer<T>.fold(initial: R, operation: (acc: R, T) -> R): R {
|
||||||
|
if (size == 0) return initial
|
||||||
var accumulator = initial
|
var accumulator = initial
|
||||||
for (index in this.indices) accumulator = operation(accumulator, get(index))
|
for (index in this.indices) accumulator = operation(accumulator, get(index))
|
||||||
return accumulator
|
return accumulator
|
||||||
@ -107,18 +108,31 @@ public inline fun <T, R> Buffer<T>.fold(initial: R, operation: (acc: R, T) -> R)
|
|||||||
* Fold given buffer according to indexed [operation]
|
* Fold given buffer according to indexed [operation]
|
||||||
*/
|
*/
|
||||||
public inline fun <T : Any, R> Buffer<T>.foldIndexed(initial: R, operation: (index: Int, acc: R, T) -> R): R {
|
public inline fun <T : Any, R> Buffer<T>.foldIndexed(initial: R, operation: (index: Int, acc: R, T) -> R): R {
|
||||||
|
if (size == 0) return initial
|
||||||
var accumulator = initial
|
var accumulator = initial
|
||||||
for (index in this.indices) accumulator = operation(index, accumulator, get(index))
|
for (index in this.indices) accumulator = operation(index, accumulator, get(index))
|
||||||
return accumulator
|
return accumulator
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Reduce a buffer from left to right according to [operation]
|
||||||
|
*/
|
||||||
|
public inline fun <T> Buffer<T>.reduce(operation: (left: T, value: T) -> T): T {
|
||||||
|
require(size > 0) { "Buffer must have elements" }
|
||||||
|
var current = get(0)
|
||||||
|
for (i in 1 until size) {
|
||||||
|
current = operation(current, get(i))
|
||||||
|
}
|
||||||
|
return current
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Zip two buffers using given [transform].
|
* Zip two buffers using given [transform].
|
||||||
*/
|
*/
|
||||||
@UnstableKMathAPI
|
@UnstableKMathAPI
|
||||||
public inline fun <T1, T2 : Any, reified R : Any> Buffer<T1>.zip(
|
public inline fun <T1, T2, R> Buffer<T1>.combineToBuffer(
|
||||||
other: Buffer<T2>,
|
other: Buffer<T2>,
|
||||||
bufferFactory: BufferFactory<R> = BufferFactory.auto(),
|
bufferFactory: BufferFactory<R>,
|
||||||
crossinline transform: (T1, T2) -> R,
|
crossinline transform: (T1, T2) -> R,
|
||||||
): Buffer<R> {
|
): Buffer<R> {
|
||||||
require(size == other.size) { "Buffer size mismatch in zip: expected $size but found ${other.size}" }
|
require(size == other.size) { "Buffer size mismatch in zip: expected $size but found ${other.size}" }
|
||||||
|
@ -0,0 +1,20 @@
|
|||||||
|
/*
|
||||||
|
* Copyright 2018-2022 KMath contributors.
|
||||||
|
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package space.kscience.kmath.structures
|
||||||
|
|
||||||
|
|
||||||
|
public typealias Float32 = Float
|
||||||
|
public typealias Float64 = Double
|
||||||
|
|
||||||
|
public typealias Int8 = Byte
|
||||||
|
public typealias Int16 = Short
|
||||||
|
public typealias Int32 = Int
|
||||||
|
public typealias Int64 = Long
|
||||||
|
|
||||||
|
public typealias UInt8 = UByte
|
||||||
|
public typealias UInt16 = UShort
|
||||||
|
public typealias UInt32 = UInt
|
||||||
|
public typealias UInt64 = ULong
|
@ -81,9 +81,7 @@ public suspend fun <T> AsyncFlow<T>.collect(concurrency: Int, collector: FlowCol
|
|||||||
public suspend inline fun <T> AsyncFlow<T>.collect(
|
public suspend inline fun <T> AsyncFlow<T>.collect(
|
||||||
concurrency: Int,
|
concurrency: Int,
|
||||||
crossinline action: suspend (value: T) -> Unit,
|
crossinline action: suspend (value: T) -> Unit,
|
||||||
): Unit = collect(concurrency, object : FlowCollector<T> {
|
): Unit = collect(concurrency, FlowCollector<T> { value -> action(value) })
|
||||||
override suspend fun emit(value: T): Unit = action(value)
|
|
||||||
})
|
|
||||||
|
|
||||||
public inline fun <T, R> Flow<T>.mapParallel(
|
public inline fun <T, R> Flow<T>.mapParallel(
|
||||||
dispatcher: CoroutineDispatcher = Dispatchers.Default,
|
dispatcher: CoroutineDispatcher = Dispatchers.Default,
|
||||||
|
@ -5,7 +5,7 @@
|
|||||||
|
|
||||||
package space.kscience.kmath.integration
|
package space.kscience.kmath.integration
|
||||||
|
|
||||||
import space.kscience.kmath.operations.map
|
import space.kscience.kmath.operations.mapToBuffer
|
||||||
import space.kscience.kmath.structures.Buffer
|
import space.kscience.kmath.structures.Buffer
|
||||||
import space.kscience.kmath.structures.DoubleBuffer
|
import space.kscience.kmath.structures.DoubleBuffer
|
||||||
import space.kscience.kmath.structures.asBuffer
|
import space.kscience.kmath.structures.asBuffer
|
||||||
@ -33,11 +33,11 @@ public fun GaussIntegratorRuleFactory.build(
|
|||||||
val normalized: Pair<Buffer<Double>, Buffer<Double>> = build(numPoints)
|
val normalized: Pair<Buffer<Double>, Buffer<Double>> = build(numPoints)
|
||||||
val length = range.endInclusive - range.start
|
val length = range.endInclusive - range.start
|
||||||
|
|
||||||
val points = normalized.first.map(::DoubleBuffer) {
|
val points = normalized.first.mapToBuffer(::DoubleBuffer) {
|
||||||
range.start + length / 2 + length / 2 * it
|
range.start + length / 2 + length / 2 * it
|
||||||
}
|
}
|
||||||
|
|
||||||
val weights = normalized.second.map(::DoubleBuffer) {
|
val weights = normalized.second.mapToBuffer(::DoubleBuffer) {
|
||||||
it * length / 2
|
it * length / 2
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -65,9 +65,9 @@ public class SplineIntegrator<T : Comparable<T>>(
|
|||||||
DoubleBuffer(numPoints) { i -> range.start + i * step }
|
DoubleBuffer(numPoints) { i -> range.start + i * step }
|
||||||
}
|
}
|
||||||
|
|
||||||
val values = nodes.map(bufferFactory) { integrand.function(it) }
|
val values = nodes.mapToBuffer(bufferFactory) { integrand.function(it) }
|
||||||
val polynomials = interpolator.interpolatePolynomials(
|
val polynomials = interpolator.interpolatePolynomials(
|
||||||
nodes.map(bufferFactory) { number(it) },
|
nodes.mapToBuffer(bufferFactory) { number(it) },
|
||||||
values
|
values
|
||||||
)
|
)
|
||||||
val res = polynomials.integrate(algebra, number(range.start)..number(range.endInclusive))
|
val res = polynomials.integrate(algebra, number(range.start)..number(range.endInclusive))
|
||||||
@ -93,7 +93,7 @@ public object DoubleSplineIntegrator : UnivariateIntegrator<Double> {
|
|||||||
DoubleBuffer(numPoints) { i -> range.start + i * step }
|
DoubleBuffer(numPoints) { i -> range.start + i * step }
|
||||||
}
|
}
|
||||||
|
|
||||||
val values = nodes.map { integrand.function(it) }
|
val values = nodes.mapToBuffer(::DoubleBuffer) { integrand.function(it) }
|
||||||
val polynomials = interpolator.interpolatePolynomials(nodes, values)
|
val polynomials = interpolator.interpolatePolynomials(nodes, values)
|
||||||
val res = polynomials.integrate(DoubleField, range)
|
val res = polynomials.integrate(DoubleField, range)
|
||||||
return integrand + IntegrandValue(res) + IntegrandCallsPerformed(integrand.calls + nodes.size)
|
return integrand + IntegrandValue(res) + IntegrandCallsPerformed(integrand.calls + nodes.size)
|
||||||
|
@ -97,7 +97,7 @@ public class UniformHistogramGroupND<V : Any, A : Field<V>>(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
hBuilder.apply(builder)
|
hBuilder.apply(builder)
|
||||||
val values: BufferND<V> = BufferND(ndCounter.indices, ndCounter.buffer.map(valueBufferFactory) { it.value })
|
val values: BufferND<V> = BufferND(ndCounter.indices, ndCounter.buffer.mapToBuffer(valueBufferFactory) { it.value })
|
||||||
|
|
||||||
return HistogramND(this, values)
|
return HistogramND(this, values)
|
||||||
}
|
}
|
||||||
|
@ -12,6 +12,7 @@ import space.kscience.kmath.nd.StructureND
|
|||||||
import space.kscience.kmath.nd.one
|
import space.kscience.kmath.nd.one
|
||||||
import space.kscience.kmath.operations.DoubleField
|
import space.kscience.kmath.operations.DoubleField
|
||||||
import space.kscience.kmath.tensors.core.DoubleTensorAlgebra
|
import space.kscience.kmath.tensors.core.DoubleTensorAlgebra
|
||||||
|
import space.kscience.kmath.tensors.core.randomNormal
|
||||||
import space.kscience.kmath.tensors.core.tensorAlgebra
|
import space.kscience.kmath.tensors.core.tensorAlgebra
|
||||||
import kotlin.test.Test
|
import kotlin.test.Test
|
||||||
import kotlin.test.assertTrue
|
import kotlin.test.assertTrue
|
||||||
|
@ -118,35 +118,35 @@ public sealed interface Nd4jTensorAlgebra<T : Number, A : Field<T>> : AnalyticTe
|
|||||||
override fun StructureND<T>.argMax(dim: Int, keepDim: Boolean): Tensor<Int> =
|
override fun StructureND<T>.argMax(dim: Int, keepDim: Boolean): Tensor<Int> =
|
||||||
ndBase.get().argmax(ndArray, keepDim, dim).asIntStructure()
|
ndBase.get().argmax(ndArray, keepDim, dim).asIntStructure()
|
||||||
|
|
||||||
override fun StructureND<T>.mean(dim: Int, keepDim: Boolean): Nd4jArrayStructure<T> =
|
override fun mean(structureND: StructureND<T>, dim: Int, keepDim: Boolean): Tensor<T> =
|
||||||
ndArray.mean(keepDim, dim).wrap()
|
structureND.ndArray.mean(keepDim, dim).wrap()
|
||||||
|
|
||||||
override fun StructureND<T>.exp(): Nd4jArrayStructure<T> = Transforms.exp(ndArray).wrap()
|
override fun exp(arg: StructureND<T>): Nd4jArrayStructure<T> = Transforms.exp(arg.ndArray).wrap()
|
||||||
override fun StructureND<T>.ln(): Nd4jArrayStructure<T> = Transforms.log(ndArray).wrap()
|
override fun ln(arg: StructureND<T>): Nd4jArrayStructure<T> = Transforms.log(arg.ndArray).wrap()
|
||||||
override fun StructureND<T>.sqrt(): Nd4jArrayStructure<T> = Transforms.sqrt(ndArray).wrap()
|
override fun sqrt(arg: StructureND<T>): Nd4jArrayStructure<T> = Transforms.sqrt(arg.ndArray).wrap()
|
||||||
override fun StructureND<T>.cos(): Nd4jArrayStructure<T> = Transforms.cos(ndArray).wrap()
|
override fun cos(arg: StructureND<T>): Nd4jArrayStructure<T> = Transforms.cos(arg.ndArray).wrap()
|
||||||
override fun StructureND<T>.acos(): Nd4jArrayStructure<T> = Transforms.acos(ndArray).wrap()
|
override fun acos(arg: StructureND<T>): Nd4jArrayStructure<T> = Transforms.acos(arg.ndArray).wrap()
|
||||||
override fun StructureND<T>.cosh(): Nd4jArrayStructure<T> = Transforms.cosh(ndArray).wrap()
|
override fun cosh(arg: StructureND<T>): Nd4jArrayStructure<T> = Transforms.cosh(arg.ndArray).wrap()
|
||||||
|
|
||||||
override fun StructureND<T>.acosh(): Nd4jArrayStructure<T> =
|
override fun acosh(arg: StructureND<T>): Nd4jArrayStructure<T> =
|
||||||
Nd4j.getExecutioner().exec(ACosh(ndArray, ndArray.ulike())).wrap()
|
Nd4j.getExecutioner().exec(ACosh(arg.ndArray, arg.ndArray.ulike())).wrap()
|
||||||
|
|
||||||
override fun StructureND<T>.sin(): Nd4jArrayStructure<T> = Transforms.sin(ndArray).wrap()
|
override fun sin(arg: StructureND<T>): Nd4jArrayStructure<T> = Transforms.sin(arg.ndArray).wrap()
|
||||||
override fun StructureND<T>.asin(): Nd4jArrayStructure<T> = Transforms.asin(ndArray).wrap()
|
override fun asin(arg: StructureND<T>): Nd4jArrayStructure<T> = Transforms.asin(arg.ndArray).wrap()
|
||||||
override fun StructureND<T>.sinh(): Tensor<T> = Transforms.sinh(ndArray).wrap()
|
override fun sinh(arg: StructureND<T>): Tensor<T> = Transforms.sinh(arg.ndArray).wrap()
|
||||||
|
|
||||||
override fun StructureND<T>.asinh(): Nd4jArrayStructure<T> =
|
override fun asinh(arg: StructureND<T>): Nd4jArrayStructure<T> =
|
||||||
Nd4j.getExecutioner().exec(ASinh(ndArray, ndArray.ulike())).wrap()
|
Nd4j.getExecutioner().exec(ASinh(arg.ndArray, arg.ndArray.ulike())).wrap()
|
||||||
|
|
||||||
override fun StructureND<T>.tan(): Nd4jArrayStructure<T> = Transforms.tan(ndArray).wrap()
|
override fun tan(arg: StructureND<T>): Nd4jArrayStructure<T> = Transforms.tan(arg.ndArray).wrap()
|
||||||
override fun StructureND<T>.atan(): Nd4jArrayStructure<T> = Transforms.atan(ndArray).wrap()
|
override fun atan(arg: StructureND<T>): Nd4jArrayStructure<T> = Transforms.atan(arg.ndArray).wrap()
|
||||||
override fun StructureND<T>.tanh(): Nd4jArrayStructure<T> = Transforms.tanh(ndArray).wrap()
|
override fun tanh(arg: StructureND<T>): Nd4jArrayStructure<T> = Transforms.tanh(arg.ndArray).wrap()
|
||||||
override fun StructureND<T>.atanh(): Nd4jArrayStructure<T> = Transforms.atanh(ndArray).wrap()
|
override fun atanh(arg: StructureND<T>): Nd4jArrayStructure<T> = Transforms.atanh(arg.ndArray).wrap()
|
||||||
override fun power(arg: StructureND<T>, pow: Number): StructureND<T> = Transforms.pow(arg.ndArray, pow).wrap()
|
override fun power(arg: StructureND<T>, pow: Number): StructureND<T> = Transforms.pow(arg.ndArray, pow).wrap()
|
||||||
override fun StructureND<T>.ceil(): Nd4jArrayStructure<T> = Transforms.ceil(ndArray).wrap()
|
override fun ceil(arg: StructureND<T>): Nd4jArrayStructure<T> = Transforms.ceil(arg.ndArray).wrap()
|
||||||
override fun StructureND<T>.floor(): Nd4jArrayStructure<T> = Transforms.floor(ndArray).wrap()
|
override fun floor(structureND: StructureND<T>): Nd4jArrayStructure<T> = Transforms.floor(structureND.ndArray).wrap()
|
||||||
override fun StructureND<T>.std(dim: Int, keepDim: Boolean): Nd4jArrayStructure<T> =
|
override fun std(structureND: StructureND<T>, dim: Int, keepDim: Boolean): Tensor<T> =
|
||||||
ndArray.std(true, keepDim, dim).wrap()
|
structureND.ndArray.std(true, keepDim, dim).wrap()
|
||||||
|
|
||||||
override fun T.div(arg: StructureND<T>): Nd4jArrayStructure<T> = arg.ndArray.rdiv(this).wrap()
|
override fun T.div(arg: StructureND<T>): Nd4jArrayStructure<T> = arg.ndArray.rdiv(this).wrap()
|
||||||
override fun StructureND<T>.div(arg: T): Nd4jArrayStructure<T> = ndArray.div(arg).wrap()
|
override fun StructureND<T>.div(arg: T): Nd4jArrayStructure<T> = ndArray.div(arg).wrap()
|
||||||
@ -160,8 +160,8 @@ public sealed interface Nd4jTensorAlgebra<T : Number, A : Field<T>> : AnalyticTe
|
|||||||
ndArray.divi(arg.ndArray)
|
ndArray.divi(arg.ndArray)
|
||||||
}
|
}
|
||||||
|
|
||||||
override fun StructureND<T>.variance(dim: Int, keepDim: Boolean): Nd4jArrayStructure<T> =
|
override fun variance(structureND: StructureND<T>, dim: Int, keepDim: Boolean): Tensor<T> =
|
||||||
Nd4j.getExecutioner().exec(Variance(ndArray, true, true, dim)).wrap()
|
Nd4j.getExecutioner().exec(Variance(structureND.ndArray, true, true, dim)).wrap()
|
||||||
|
|
||||||
private companion object {
|
private companion object {
|
||||||
private val ndBase: ThreadLocal<NDBase> = ThreadLocal.withInitial(::NDBase)
|
private val ndBase: ThreadLocal<NDBase> = ThreadLocal.withInitial(::NDBase)
|
||||||
@ -211,7 +211,7 @@ public object DoubleNd4jTensorAlgebra : Nd4jTensorAlgebra<Double, DoubleField> {
|
|||||||
override fun StructureND<Double>.sum(): Double = ndArray.sumNumber().toDouble()
|
override fun StructureND<Double>.sum(): Double = ndArray.sumNumber().toDouble()
|
||||||
override fun StructureND<Double>.min(): Double = ndArray.minNumber().toDouble()
|
override fun StructureND<Double>.min(): Double = ndArray.minNumber().toDouble()
|
||||||
override fun StructureND<Double>.max(): Double = ndArray.maxNumber().toDouble()
|
override fun StructureND<Double>.max(): Double = ndArray.maxNumber().toDouble()
|
||||||
override fun StructureND<Double>.mean(): Double = ndArray.meanNumber().toDouble()
|
override fun mean(structureND: StructureND<Double>): Double = structureND.ndArray.meanNumber().toDouble()
|
||||||
override fun StructureND<Double>.std(): Double = ndArray.stdNumber().toDouble()
|
override fun std(structureND: StructureND<Double>): Double = structureND.ndArray.stdNumber().toDouble()
|
||||||
override fun StructureND<Double>.variance(): Double = ndArray.varNumber().toDouble()
|
override fun variance(structureND: StructureND<Double>): Double = structureND.ndArray.varNumber().toDouble()
|
||||||
}
|
}
|
||||||
|
@ -33,7 +33,7 @@ internal class HessianGradientCalculator(fcn: MnFcn, par: MnUserTransformation,
|
|||||||
val g2: RealVector = gradient.getGradientDerivative()
|
val g2: RealVector = gradient.getGradientDerivative()
|
||||||
val gstep: RealVector = gradient.getStep()
|
val gstep: RealVector = gradient.getStep()
|
||||||
val fcnmin: Double = par.fval()
|
val fcnmin: Double = par.fval()
|
||||||
// std::cout<<"fval: "<<fcnmin<<std::endl;
|
// standardDiviation::cout<<"fval: "<<fcnmin<<standardDiviation::endl;
|
||||||
val dfmin: Double = 4.0 * precision().eps2() * (abs(fcnmin) + theFcn.errorDef())
|
val dfmin: Double = 4.0 * precision().eps2() * (abs(fcnmin) + theFcn.errorDef())
|
||||||
val n: Int = x.getDimension()
|
val n: Int = x.getDimension()
|
||||||
val dgrd: RealVector = ArrayRealVector(n)
|
val dgrd: RealVector = ArrayRealVector(n)
|
||||||
|
@ -36,7 +36,7 @@ internal object MnPosDef {
|
|||||||
if (err.size() === 1 && err[0, 0] > prec.eps()) {
|
if (err.size() === 1 && err[0, 0] > prec.eps()) {
|
||||||
return e
|
return e
|
||||||
}
|
}
|
||||||
// std::cout<<"MnPosDef init matrix= "<<err<<std::endl;
|
// standardDiviation::cout<<"MnPosDef init matrix= "<<err<<standardDiviation::endl;
|
||||||
val epspdf: Double = max(1e-6, prec.eps2())
|
val epspdf: Double = max(1e-6, prec.eps2())
|
||||||
var dgmin: Double = err[0, 0]
|
var dgmin: Double = err[0, 0]
|
||||||
for (i in 0 until err.nrow()) {
|
for (i in 0 until err.nrow()) {
|
||||||
@ -66,11 +66,11 @@ internal object MnPosDef {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// std::cout<<"MnPosDef p: "<<p<<std::endl;
|
// standardDiviation::cout<<"MnPosDef p: "<<p<<standardDiviation::endl;
|
||||||
val eval: RealVector = p.eigenvalues()
|
val eval: RealVector = p.eigenvalues()
|
||||||
val pmin: Double = eval.getEntry(0)
|
val pmin: Double = eval.getEntry(0)
|
||||||
var pmax: Double = eval.getEntry(eval.getDimension() - 1)
|
var pmax: Double = eval.getEntry(eval.getDimension() - 1)
|
||||||
// std::cout<<"pmin= "<<pmin<<" pmax= "<<pmax<<std::endl;
|
// standardDiviation::cout<<"pmin= "<<pmin<<" pmax= "<<pmax<<standardDiviation::endl;
|
||||||
pmax = max(abs(pmax), 1.0)
|
pmax = max(abs(pmax), 1.0)
|
||||||
if (pmin > epspdf * pmax) {
|
if (pmin > epspdf * pmax) {
|
||||||
return e
|
return e
|
||||||
@ -81,9 +81,9 @@ internal object MnPosDef {
|
|||||||
err[i, i] = err[i, i] * (1.0 + padd)
|
err[i, i] = err[i, i] * (1.0 + padd)
|
||||||
MINUITPlugin.logStatic(java.lang.Double.toString(eval.getEntry(i)))
|
MINUITPlugin.logStatic(java.lang.Double.toString(eval.getEntry(i)))
|
||||||
}
|
}
|
||||||
// std::cout<<"MnPosDef final matrix: "<<err<<std::endl;
|
// standardDiviation::cout<<"MnPosDef final matrix: "<<err<<standardDiviation::endl;
|
||||||
MINUITPlugin.logStatic("matrix forced pos-def by adding $padd to diagonal")
|
MINUITPlugin.logStatic("matrix forced pos-def by adding $padd to diagonal")
|
||||||
// std::cout<<"eigenvalues: "<<eval<<std::endl;
|
// standardDiviation::cout<<"eigenvalues: "<<eval<<standardDiviation::endl;
|
||||||
return MinimumError(err, MnMadePosDef())
|
return MinimumError(err, MnMadePosDef())
|
||||||
}
|
}
|
||||||
}
|
}
|
@ -52,16 +52,16 @@ public class Mean<T>(
|
|||||||
@Deprecated("Use Long.mean instead")
|
@Deprecated("Use Long.mean instead")
|
||||||
public val long: Mean<Long> = Mean(LongRing) { sum, count -> sum / count }
|
public val long: Mean<Long> = Mean(LongRing) { sum, count -> sum / count }
|
||||||
|
|
||||||
public fun evaluate(buffer: Buffer<Double>): Double = Double.mean.evaluateBlocking(buffer)
|
public fun evaluate(buffer: Buffer<Double>): Double = DoubleField.mean.evaluateBlocking(buffer)
|
||||||
public fun evaluate(buffer: Buffer<Int>): Int = Int.mean.evaluateBlocking(buffer)
|
public fun evaluate(buffer: Buffer<Int>): Int = IntRing.mean.evaluateBlocking(buffer)
|
||||||
public fun evaluate(buffer: Buffer<Long>): Long = Long.mean.evaluateBlocking(buffer)
|
public fun evaluate(buffer: Buffer<Long>): Long = LongRing.mean.evaluateBlocking(buffer)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
//TODO replace with optimized version which respects overflow
|
//TODO replace with optimized version which respects overflow
|
||||||
public val Double.Companion.mean: Mean<Double> get() = Mean(DoubleField) { sum, count -> sum / count }
|
public val DoubleField.mean: Mean<Double> get() = Mean(DoubleField) { sum, count -> sum / count }
|
||||||
public val Int.Companion.mean: Mean<Int> get() = Mean(IntRing) { sum, count -> sum / count }
|
public val IntRing.mean: Mean<Int> get() = Mean(IntRing) { sum, count -> sum / count }
|
||||||
public val Long.Companion.mean: Mean<Long> get() = Mean(LongRing) { sum, count -> sum / count }
|
public val LongRing.mean: Mean<Long> get() = Mean(LongRing) { sum, count -> sum / count }
|
||||||
|
|
||||||
|
|
||||||
|
@ -9,6 +9,7 @@ import kotlinx.coroutines.flow.first
|
|||||||
import kotlinx.coroutines.flow.last
|
import kotlinx.coroutines.flow.last
|
||||||
import kotlinx.coroutines.flow.take
|
import kotlinx.coroutines.flow.take
|
||||||
import kotlinx.coroutines.runBlocking
|
import kotlinx.coroutines.runBlocking
|
||||||
|
import space.kscience.kmath.operations.DoubleField
|
||||||
import space.kscience.kmath.random.RandomGenerator
|
import space.kscience.kmath.random.RandomGenerator
|
||||||
import space.kscience.kmath.random.chain
|
import space.kscience.kmath.random.chain
|
||||||
import space.kscience.kmath.streaming.chunked
|
import space.kscience.kmath.streaming.chunked
|
||||||
@ -27,26 +28,26 @@ internal class StatisticTest {
|
|||||||
|
|
||||||
@Test
|
@Test
|
||||||
fun singleBlockingMean() {
|
fun singleBlockingMean() {
|
||||||
val first = runBlocking { chunked.first()}
|
val first = runBlocking { chunked.first() }
|
||||||
val res = Double.mean(first)
|
val res = DoubleField.mean(first)
|
||||||
assertEquals(0.5,res, 1e-1)
|
assertEquals(0.5, res, 1e-1)
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
fun singleSuspendMean() = runBlocking {
|
fun singleSuspendMean() = runBlocking {
|
||||||
val first = runBlocking { chunked.first()}
|
val first = runBlocking { chunked.first() }
|
||||||
val res = Double.mean(first)
|
val res = DoubleField.mean(first)
|
||||||
assertEquals(0.5,res, 1e-1)
|
assertEquals(0.5, res, 1e-1)
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
fun parallelMean() = runBlocking {
|
fun parallelMean() = runBlocking {
|
||||||
val average = Double.mean
|
val average = DoubleField.mean
|
||||||
.flow(chunked) //create a flow from evaluated results
|
.flow(chunked) //create a flow from evaluated results
|
||||||
.take(100) // Take 100 data chunks from the source and accumulate them
|
.take(100) // Take 100 data chunks from the source and accumulate them
|
||||||
.last() //get 1e5 data samples average
|
.last() //get 1e5 data samples average
|
||||||
|
|
||||||
assertEquals(0.5,average, 1e-2)
|
assertEquals(0.5, average, 1e-2)
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -13,6 +13,7 @@ import space.kscience.kmath.nd.structureND
|
|||||||
import space.kscience.kmath.operations.DoubleField
|
import space.kscience.kmath.operations.DoubleField
|
||||||
import space.kscience.kmath.tensors.core.DoubleTensorAlgebra
|
import space.kscience.kmath.tensors.core.DoubleTensorAlgebra
|
||||||
import space.kscience.kmath.tensors.core.DoubleTensorAlgebra.Companion.sum
|
import space.kscience.kmath.tensors.core.DoubleTensorAlgebra.Companion.sum
|
||||||
|
import space.kscience.kmath.tensors.core.randomNormal
|
||||||
import kotlin.test.assertEquals
|
import kotlin.test.assertEquals
|
||||||
|
|
||||||
@OptIn(UnstableKMathAPI::class)
|
@OptIn(UnstableKMathAPI::class)
|
||||||
|
@ -21,7 +21,7 @@ public interface AnalyticTensorAlgebra<T, A : Field<T>> :
|
|||||||
/**
|
/**
|
||||||
* @return the mean of all elements in the input tensor.
|
* @return the mean of all elements in the input tensor.
|
||||||
*/
|
*/
|
||||||
public fun StructureND<T>.mean(): T
|
public fun mean(structureND: StructureND<T>): T
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Returns the mean of each row of the input tensor in the given dimension [dim].
|
* Returns the mean of each row of the input tensor in the given dimension [dim].
|
||||||
@ -34,12 +34,12 @@ public interface AnalyticTensorAlgebra<T, A : Field<T>> :
|
|||||||
* @param keepDim whether the output tensor has [dim] retained or not.
|
* @param keepDim whether the output tensor has [dim] retained or not.
|
||||||
* @return the mean of each row of the input tensor in the given dimension [dim].
|
* @return the mean of each row of the input tensor in the given dimension [dim].
|
||||||
*/
|
*/
|
||||||
public fun StructureND<T>.mean(dim: Int, keepDim: Boolean): Tensor<T>
|
public fun mean(structureND: StructureND<T>, dim: Int, keepDim: Boolean): Tensor<T>
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @return the standard deviation of all elements in the input tensor.
|
* @return the standard deviation of all elements in the input tensor.
|
||||||
*/
|
*/
|
||||||
public fun StructureND<T>.std(): T
|
public fun std(structureND: StructureND<T>): T
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Returns the standard deviation of each row of the input tensor in the given dimension [dim].
|
* Returns the standard deviation of each row of the input tensor in the given dimension [dim].
|
||||||
@ -52,12 +52,12 @@ public interface AnalyticTensorAlgebra<T, A : Field<T>> :
|
|||||||
* @param keepDim whether the output tensor has [dim] retained or not.
|
* @param keepDim whether the output tensor has [dim] retained or not.
|
||||||
* @return the standard deviation of each row of the input tensor in the given dimension [dim].
|
* @return the standard deviation of each row of the input tensor in the given dimension [dim].
|
||||||
*/
|
*/
|
||||||
public fun StructureND<T>.std(dim: Int, keepDim: Boolean): Tensor<T>
|
public fun std(structureND: StructureND<T>, dim: Int, keepDim: Boolean): Tensor<T>
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @return the variance of all elements in the input tensor.
|
* @return the variance of all elements in the input tensor.
|
||||||
*/
|
*/
|
||||||
public fun StructureND<T>.variance(): T
|
public fun variance(structureND: StructureND<T>): T
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Returns the variance of each row of the input tensor in the given dimension [dim].
|
* Returns the variance of each row of the input tensor in the given dimension [dim].
|
||||||
@ -70,80 +70,45 @@ public interface AnalyticTensorAlgebra<T, A : Field<T>> :
|
|||||||
* @param keepDim whether the output tensor has [dim] retained or not.
|
* @param keepDim whether the output tensor has [dim] retained or not.
|
||||||
* @return the variance of each row of the input tensor in the given dimension [dim].
|
* @return the variance of each row of the input tensor in the given dimension [dim].
|
||||||
*/
|
*/
|
||||||
public fun StructureND<T>.variance(dim: Int, keepDim: Boolean): Tensor<T>
|
public fun variance(structureND: StructureND<T>, dim: Int, keepDim: Boolean): Tensor<T>
|
||||||
|
|
||||||
//For information: https://pytorch.org/docs/stable/generated/torch.exp.html
|
|
||||||
public fun StructureND<T>.exp(): Tensor<T>
|
|
||||||
|
|
||||||
//For information: https://pytorch.org/docs/stable/generated/torch.log.html
|
|
||||||
public fun StructureND<T>.ln(): Tensor<T>
|
|
||||||
|
|
||||||
//For information: https://pytorch.org/docs/stable/generated/torch.sqrt.html
|
//For information: https://pytorch.org/docs/stable/generated/torch.sqrt.html
|
||||||
public fun StructureND<T>.sqrt(): Tensor<T>
|
override fun sqrt(arg: StructureND<T>): Tensor<T>
|
||||||
|
|
||||||
//For information: https://pytorch.org/docs/stable/generated/torch.acos.html#torch.cos
|
|
||||||
public fun StructureND<T>.cos(): Tensor<T>
|
|
||||||
|
|
||||||
//For information: https://pytorch.org/docs/stable/generated/torch.acos.html#torch.acos
|
|
||||||
public fun StructureND<T>.acos(): Tensor<T>
|
|
||||||
|
|
||||||
//For information: https://pytorch.org/docs/stable/generated/torch.acosh.html#torch.cosh
|
|
||||||
public fun StructureND<T>.cosh(): Tensor<T>
|
|
||||||
|
|
||||||
//For information: https://pytorch.org/docs/stable/generated/torch.acosh.html#torch.acosh
|
|
||||||
public fun StructureND<T>.acosh(): Tensor<T>
|
|
||||||
|
|
||||||
//For information: https://pytorch.org/docs/stable/generated/torch.asin.html#torch.sin
|
|
||||||
public fun StructureND<T>.sin(): Tensor<T>
|
|
||||||
|
|
||||||
//For information: https://pytorch.org/docs/stable/generated/torch.asin.html#torch.asin
|
|
||||||
public fun StructureND<T>.asin(): Tensor<T>
|
|
||||||
|
|
||||||
//For information: https://pytorch.org/docs/stable/generated/torch.asin.html#torch.sinh
|
|
||||||
public fun StructureND<T>.sinh(): Tensor<T>
|
|
||||||
|
|
||||||
//For information: https://pytorch.org/docs/stable/generated/torch.asin.html#torch.asinh
|
|
||||||
public fun StructureND<T>.asinh(): Tensor<T>
|
|
||||||
|
|
||||||
//For information: https://pytorch.org/docs/stable/generated/torch.atan.html#torch.tan
|
//For information: https://pytorch.org/docs/stable/generated/torch.atan.html#torch.tan
|
||||||
public fun StructureND<T>.tan(): Tensor<T>
|
override fun tan(arg: StructureND<T>): Tensor<T>
|
||||||
|
|
||||||
//https://pytorch.org/docs/stable/generated/torch.atan.html#torch.atan
|
//https://pytorch.org/docs/stable/generated/torch.atan.html#torch.atan
|
||||||
public fun StructureND<T>.atan(): Tensor<T>
|
override fun atan(arg: StructureND<T>): Tensor<T>
|
||||||
|
|
||||||
//For information: https://pytorch.org/docs/stable/generated/torch.atanh.html#torch.tanh
|
//For information: https://pytorch.org/docs/stable/generated/torch.atanh.html#torch.tanh
|
||||||
public fun StructureND<T>.tanh(): Tensor<T>
|
override fun tanh(arg: StructureND<T>): Tensor<T>
|
||||||
|
|
||||||
//For information: https://pytorch.org/docs/stable/generated/torch.atanh.html#torch.atanh
|
|
||||||
public fun StructureND<T>.atanh(): Tensor<T>
|
|
||||||
|
|
||||||
//For information: https://pytorch.org/docs/stable/generated/torch.ceil.html#torch.ceil
|
//For information: https://pytorch.org/docs/stable/generated/torch.ceil.html#torch.ceil
|
||||||
public fun StructureND<T>.ceil(): Tensor<T>
|
public fun ceil(arg: StructureND<T>): Tensor<T>
|
||||||
|
|
||||||
//For information: https://pytorch.org/docs/stable/generated/torch.floor.html#torch.floor
|
//For information: https://pytorch.org/docs/stable/generated/torch.floor.html#torch.floor
|
||||||
public fun StructureND<T>.floor(): Tensor<T>
|
public fun floor(structureND: StructureND<T>): Tensor<T>
|
||||||
|
|
||||||
override fun sin(arg: StructureND<T>): StructureND<T> = arg.sin()
|
override fun sin(arg: StructureND<T>): StructureND<T>
|
||||||
|
|
||||||
override fun cos(arg: StructureND<T>): StructureND<T> = arg.cos()
|
override fun cos(arg: StructureND<T>): StructureND<T>
|
||||||
|
|
||||||
override fun asin(arg: StructureND<T>): StructureND<T> = arg.asin()
|
override fun asin(arg: StructureND<T>): StructureND<T>
|
||||||
|
|
||||||
override fun acos(arg: StructureND<T>): StructureND<T> = arg.acos()
|
override fun acos(arg: StructureND<T>): StructureND<T>
|
||||||
|
|
||||||
override fun atan(arg: StructureND<T>): StructureND<T> = arg.atan()
|
override fun exp(arg: StructureND<T>): StructureND<T>
|
||||||
|
|
||||||
override fun exp(arg: StructureND<T>): StructureND<T> = arg.exp()
|
override fun ln(arg: StructureND<T>): StructureND<T>
|
||||||
|
|
||||||
override fun ln(arg: StructureND<T>): StructureND<T> = arg.ln()
|
override fun sinh(arg: StructureND<T>): StructureND<T>
|
||||||
|
|
||||||
override fun sinh(arg: StructureND<T>): StructureND<T> = arg.sinh()
|
override fun cosh(arg: StructureND<T>): StructureND<T>
|
||||||
|
|
||||||
override fun cosh(arg: StructureND<T>): StructureND<T> = arg.cosh()
|
override fun asinh(arg: StructureND<T>): StructureND<T>
|
||||||
|
|
||||||
override fun asinh(arg: StructureND<T>): StructureND<T> = arg.asinh()
|
override fun acosh(arg: StructureND<T>): StructureND<T>
|
||||||
|
|
||||||
override fun acosh(arg: StructureND<T>): StructureND<T> = arg.acosh()
|
override fun atanh(arg: StructureND<T>): StructureND<T>
|
||||||
|
|
||||||
override fun atanh(arg: StructureND<T>): StructureND<T> = arg.atanh()
|
|
||||||
}
|
}
|
@ -47,7 +47,7 @@ public interface LinearOpsTensorAlgebra<T, A : Field<T>> : TensorPartialDivision
|
|||||||
* @receiver the `input`.
|
* @receiver the `input`.
|
||||||
* @return the batch of `L` matrices.
|
* @return the batch of `L` matrices.
|
||||||
*/
|
*/
|
||||||
public fun StructureND<T>.cholesky(): StructureND<T>
|
public fun cholesky(structureND: StructureND<T>): StructureND<T>
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* QR decomposition.
|
* QR decomposition.
|
||||||
@ -61,7 +61,7 @@ public interface LinearOpsTensorAlgebra<T, A : Field<T>> : TensorPartialDivision
|
|||||||
* @receiver the `input`.
|
* @receiver the `input`.
|
||||||
* @return pair of `Q` and `R` tensors.
|
* @return pair of `Q` and `R` tensors.
|
||||||
*/
|
*/
|
||||||
public fun StructureND<T>.qr(): Pair<StructureND<T>, StructureND<T>>
|
public fun qr(structureND: StructureND<T>): Pair<StructureND<T>, StructureND<T>>
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* LUP decomposition
|
* LUP decomposition
|
||||||
@ -75,7 +75,7 @@ public interface LinearOpsTensorAlgebra<T, A : Field<T>> : TensorPartialDivision
|
|||||||
* @receiver the `input`.
|
* @receiver the `input`.
|
||||||
* @return triple of P, L and U tensors
|
* @return triple of P, L and U tensors
|
||||||
*/
|
*/
|
||||||
public fun StructureND<T>.lu(): Triple<StructureND<T>, StructureND<T>, StructureND<T>>
|
public fun lu(structureND: StructureND<T>): Triple<StructureND<T>, StructureND<T>, StructureND<T>>
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Singular Value Decomposition.
|
* Singular Value Decomposition.
|
||||||
@ -91,7 +91,7 @@ public interface LinearOpsTensorAlgebra<T, A : Field<T>> : TensorPartialDivision
|
|||||||
* @receiver the `input`.
|
* @receiver the `input`.
|
||||||
* @return triple `Triple(U, S, V)`.
|
* @return triple `Triple(U, S, V)`.
|
||||||
*/
|
*/
|
||||||
public fun StructureND<T>.svd(): Triple<StructureND<T>, StructureND<T>, StructureND<T>>
|
public fun svd(structureND: StructureND<T>): Triple<StructureND<T>, StructureND<T>, StructureND<T>>
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Returns eigenvalues and eigenvectors of a real symmetric matrix `input` or a batch of real symmetric matrices,
|
* Returns eigenvalues and eigenvectors of a real symmetric matrix `input` or a batch of real symmetric matrices,
|
||||||
@ -101,6 +101,6 @@ public interface LinearOpsTensorAlgebra<T, A : Field<T>> : TensorPartialDivision
|
|||||||
* @receiver the `input`.
|
* @receiver the `input`.
|
||||||
* @return a pair `eigenvalues to eigenvectors`
|
* @return a pair `eigenvalues to eigenvectors`
|
||||||
*/
|
*/
|
||||||
public fun StructureND<T>.symEig(): Pair<StructureND<T>, StructureND<T>>
|
public fun symEig(structureND: StructureND<T>): Pair<StructureND<T>, StructureND<T>>
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -10,7 +10,6 @@ import space.kscience.kmath.misc.UnstableKMathAPI
|
|||||||
import space.kscience.kmath.nd.*
|
import space.kscience.kmath.nd.*
|
||||||
import space.kscience.kmath.structures.*
|
import space.kscience.kmath.structures.*
|
||||||
import space.kscience.kmath.tensors.core.internal.toPrettyString
|
import space.kscience.kmath.tensors.core.internal.toPrettyString
|
||||||
import kotlin.jvm.JvmInline
|
|
||||||
|
|
||||||
public class OffsetDoubleBuffer(
|
public class OffsetDoubleBuffer(
|
||||||
override val origin: DoubleBuffer,
|
override val origin: DoubleBuffer,
|
||||||
@ -83,9 +82,9 @@ public inline fun OffsetDoubleBuffer.mapInPlace(operation: (Double) -> Double) {
|
|||||||
*
|
*
|
||||||
* [DoubleTensor] always uses row-based strides
|
* [DoubleTensor] always uses row-based strides
|
||||||
*/
|
*/
|
||||||
public class DoubleTensor(
|
public open class DoubleTensor(
|
||||||
shape: ShapeND,
|
shape: ShapeND,
|
||||||
override val source: OffsetDoubleBuffer,
|
final override val source: OffsetDoubleBuffer,
|
||||||
) : BufferedTensor<Double>(shape), MutableStructureNDOfDouble {
|
) : BufferedTensor<Double>(shape), MutableStructureNDOfDouble {
|
||||||
|
|
||||||
init {
|
init {
|
||||||
@ -103,7 +102,7 @@ public class DoubleTensor(
|
|||||||
source[indices.offset(index)] = value
|
source[indices.offset(index)] = value
|
||||||
}
|
}
|
||||||
|
|
||||||
override fun getDouble(index: IntArray): Double = get(index)
|
override fun getDouble(index: IntArray): Double = source[indices.offset(index)]
|
||||||
|
|
||||||
override fun setDouble(index: IntArray, value: Double) {
|
override fun setDouble(index: IntArray, value: Double) {
|
||||||
set(index, value)
|
set(index, value)
|
||||||
@ -112,62 +111,8 @@ public class DoubleTensor(
|
|||||||
override fun toString(): String = toPrettyString()
|
override fun toString(): String = toPrettyString()
|
||||||
}
|
}
|
||||||
|
|
||||||
@JvmInline
|
|
||||||
public value class DoubleTensor2D(public val tensor: DoubleTensor) : MutableStructureND<Double> by tensor,
|
|
||||||
MutableStructure2D<Double> {
|
|
||||||
|
|
||||||
init {
|
|
||||||
require(tensor.shape.size == 2) { "Only 2D tensors could be cast to 2D" }
|
|
||||||
}
|
|
||||||
|
|
||||||
override val rowNum: Int get() = shape[0]
|
|
||||||
override val colNum: Int get() = shape[1]
|
|
||||||
|
|
||||||
override fun get(i: Int, j: Int): Double = tensor.source[i * colNum + j]
|
|
||||||
|
|
||||||
override fun set(i: Int, j: Int, value: Double) {
|
|
||||||
tensor.source[i * colNum + j] = value
|
|
||||||
}
|
|
||||||
|
|
||||||
@OptIn(PerformancePitfall::class)
|
|
||||||
override val rows: List<OffsetDoubleBuffer>
|
|
||||||
get() = List(rowNum) { i ->
|
|
||||||
tensor.source.view(i * colNum, colNum)
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
@OptIn(PerformancePitfall::class)
|
|
||||||
override val columns: List<PermutedMutableBuffer<Double>>
|
|
||||||
get() = List(colNum) { j ->
|
|
||||||
val indices = IntArray(rowNum) { i -> j + i * colNum }
|
|
||||||
tensor.source.permute(indices)
|
|
||||||
}
|
|
||||||
|
|
||||||
@PerformancePitfall
|
|
||||||
override fun elements(): Sequence<Pair<IntArray, Double>> = tensor.elements()
|
|
||||||
@OptIn(PerformancePitfall::class)
|
|
||||||
override fun get(index: IntArray): Double = tensor[index]
|
|
||||||
override val shape: ShapeND get() = tensor.shape
|
|
||||||
}
|
|
||||||
|
|
||||||
public fun DoubleTensor.asDoubleTensor2D(): DoubleTensor2D = DoubleTensor2D(this)
|
|
||||||
|
|
||||||
public fun DoubleTensor.asDoubleBuffer(): OffsetDoubleBuffer = if (shape.size == 1) {
|
public fun DoubleTensor.asDoubleBuffer(): OffsetDoubleBuffer = if (shape.size == 1) {
|
||||||
source
|
source
|
||||||
} else {
|
} else {
|
||||||
error("Only 1D tensors could be cast to 1D")
|
error("Only 1D tensors could be cast to 1D")
|
||||||
}
|
}
|
||||||
|
|
||||||
public inline fun DoubleTensor.forEachMatrix(block: (index: IntArray, matrix: DoubleTensor2D) -> Unit) {
|
|
||||||
val n = shape.size
|
|
||||||
check(n >= 2) { "Expected tensor with 2 or more dimensions, got size $n" }
|
|
||||||
val matrixOffset = shape[n - 1] * shape[n - 2]
|
|
||||||
val matrixShape = ShapeND(shape[n - 2], shape[n - 1])
|
|
||||||
|
|
||||||
val size = matrixShape.linearSize
|
|
||||||
for (i in 0 until linearSize / matrixOffset) {
|
|
||||||
val offset = i * matrixOffset
|
|
||||||
val index = indices.index(offset).sliceArray(0 until (shape.size - 2))
|
|
||||||
block(index, DoubleTensor(matrixShape, source.view(offset, size)).asDoubleTensor2D())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
@ -0,0 +1,45 @@
|
|||||||
|
/*
|
||||||
|
* Copyright 2018-2022 KMath contributors.
|
||||||
|
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package space.kscience.kmath.tensors.core
|
||||||
|
|
||||||
|
import space.kscience.kmath.misc.PerformancePitfall
|
||||||
|
import space.kscience.kmath.nd.MutableStructure1D
|
||||||
|
import space.kscience.kmath.nd.ShapeND
|
||||||
|
import space.kscience.kmath.structures.MutableBuffer
|
||||||
|
|
||||||
|
public class DoubleTensor1D(
|
||||||
|
source: OffsetDoubleBuffer,
|
||||||
|
) : DoubleTensor(ShapeND(source.size), source), MutableStructure1D<Double> {
|
||||||
|
|
||||||
|
@PerformancePitfall
|
||||||
|
override fun get(index: IntArray): Double = super<MutableStructure1D>.get(index)
|
||||||
|
|
||||||
|
@PerformancePitfall
|
||||||
|
override fun set(index: IntArray, value: Double) {
|
||||||
|
super<MutableStructure1D>.set(index, value)
|
||||||
|
}
|
||||||
|
|
||||||
|
override val size: Int get() = source.size
|
||||||
|
|
||||||
|
override fun get(index: Int): Double = source[index]
|
||||||
|
|
||||||
|
override fun set(index: Int, value: Double) {
|
||||||
|
source[index] = value
|
||||||
|
}
|
||||||
|
|
||||||
|
override fun copy(): MutableBuffer<Double> = source.copy()
|
||||||
|
|
||||||
|
@PerformancePitfall
|
||||||
|
override fun elements(): Sequence<Pair<IntArray, Double>> = super<MutableStructure1D>.elements()
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* A zero-copy cast to 1D structure. Changes in resulting structure are reflected on original tensor.
|
||||||
|
*/
|
||||||
|
public fun DoubleTensor.asDoubleTensor1D(): DoubleTensor1D {
|
||||||
|
require(shape.size == 1) { "Only 1D tensors could be cast to 1D" }
|
||||||
|
return DoubleTensor1D(source)
|
||||||
|
}
|
@ -0,0 +1,71 @@
|
|||||||
|
/*
|
||||||
|
* Copyright 2018-2022 KMath contributors.
|
||||||
|
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package space.kscience.kmath.tensors.core
|
||||||
|
|
||||||
|
import space.kscience.kmath.misc.PerformancePitfall
|
||||||
|
import space.kscience.kmath.nd.MutableStructure2D
|
||||||
|
import space.kscience.kmath.nd.ShapeND
|
||||||
|
import space.kscience.kmath.nd.linearSize
|
||||||
|
import space.kscience.kmath.structures.PermutedMutableBuffer
|
||||||
|
import space.kscience.kmath.structures.permute
|
||||||
|
|
||||||
|
public class DoubleTensor2D(
|
||||||
|
override val rowNum: Int,
|
||||||
|
override val colNum: Int,
|
||||||
|
source: OffsetDoubleBuffer,
|
||||||
|
) : DoubleTensor(ShapeND(rowNum, colNum), source), MutableStructure2D<Double> {
|
||||||
|
|
||||||
|
override fun get(i: Int, j: Int): Double = source[i * colNum + j]
|
||||||
|
|
||||||
|
@OptIn(PerformancePitfall::class)
|
||||||
|
override fun get(index: IntArray): Double = getDouble(index)
|
||||||
|
|
||||||
|
override fun set(i: Int, j: Int, value: Double) {
|
||||||
|
source[i * colNum + j] = value
|
||||||
|
}
|
||||||
|
|
||||||
|
@OptIn(PerformancePitfall::class)
|
||||||
|
override val rows: List<OffsetDoubleBuffer>
|
||||||
|
get() = List(rowNum) { i ->
|
||||||
|
source.view(i * colNum, colNum)
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@OptIn(PerformancePitfall::class)
|
||||||
|
override val columns: List<PermutedMutableBuffer<Double>>
|
||||||
|
get() = List(colNum) { j ->
|
||||||
|
val indices = IntArray(rowNum) { i -> j + i * colNum }
|
||||||
|
source.permute(indices)
|
||||||
|
}
|
||||||
|
|
||||||
|
override val shape: ShapeND get() = super<DoubleTensor>.shape
|
||||||
|
|
||||||
|
@PerformancePitfall
|
||||||
|
override fun elements(): Sequence<Pair<IntArray, Double>> = super<MutableStructure2D>.elements()
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* A zero-copy cast to 2D structure. Changes in resulting structure are reflected on original tensor.
|
||||||
|
*/
|
||||||
|
public fun DoubleTensor.asDoubleTensor2D(): DoubleTensor2D {
|
||||||
|
require(shape.size == 2) { "Only 2D tensors could be cast to 2D" }
|
||||||
|
return DoubleTensor2D(shape[0], shape[1], source)
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
public inline fun DoubleTensor.forEachMatrix(block: (index: IntArray, matrix: DoubleTensor2D) -> Unit) {
|
||||||
|
val n = shape.size
|
||||||
|
check(n >= 2) { "Expected tensor with 2 or more dimensions, got size $n" }
|
||||||
|
val matrixOffset = shape[n - 1] * shape[n - 2]
|
||||||
|
val matrixShape = ShapeND(shape[n - 2], shape[n - 1])
|
||||||
|
|
||||||
|
val size = matrixShape.linearSize
|
||||||
|
for (i in 0 until linearSize / matrixOffset) {
|
||||||
|
val offset = i * matrixOffset
|
||||||
|
val index = indices.index(offset).sliceArray(0 until (shape.size - 2))
|
||||||
|
block(index, DoubleTensor(matrixShape, source.view(offset, size)).asDoubleTensor2D())
|
||||||
|
}
|
||||||
|
}
|
@ -11,12 +11,12 @@ package space.kscience.kmath.tensors.core
|
|||||||
import space.kscience.kmath.misc.PerformancePitfall
|
import space.kscience.kmath.misc.PerformancePitfall
|
||||||
import space.kscience.kmath.misc.UnstableKMathAPI
|
import space.kscience.kmath.misc.UnstableKMathAPI
|
||||||
import space.kscience.kmath.nd.*
|
import space.kscience.kmath.nd.*
|
||||||
|
import space.kscience.kmath.operations.DoubleBufferOps
|
||||||
import space.kscience.kmath.operations.DoubleField
|
import space.kscience.kmath.operations.DoubleField
|
||||||
import space.kscience.kmath.structures.*
|
import space.kscience.kmath.structures.*
|
||||||
import space.kscience.kmath.tensors.api.AnalyticTensorAlgebra
|
import space.kscience.kmath.tensors.api.AnalyticTensorAlgebra
|
||||||
import space.kscience.kmath.tensors.api.LinearOpsTensorAlgebra
|
import space.kscience.kmath.tensors.api.LinearOpsTensorAlgebra
|
||||||
import space.kscience.kmath.tensors.api.Tensor
|
import space.kscience.kmath.tensors.api.Tensor
|
||||||
import space.kscience.kmath.tensors.api.TensorPartialDivisionAlgebra
|
|
||||||
import space.kscience.kmath.tensors.core.internal.*
|
import space.kscience.kmath.tensors.core.internal.*
|
||||||
import kotlin.math.*
|
import kotlin.math.*
|
||||||
|
|
||||||
@ -25,7 +25,6 @@ import kotlin.math.*
|
|||||||
*/
|
*/
|
||||||
@OptIn(PerformancePitfall::class)
|
@OptIn(PerformancePitfall::class)
|
||||||
public open class DoubleTensorAlgebra :
|
public open class DoubleTensorAlgebra :
|
||||||
TensorPartialDivisionAlgebra<Double, DoubleField>,
|
|
||||||
AnalyticTensorAlgebra<Double, DoubleField>,
|
AnalyticTensorAlgebra<Double, DoubleField>,
|
||||||
LinearOpsTensorAlgebra<Double, DoubleField> {
|
LinearOpsTensorAlgebra<Double, DoubleField> {
|
||||||
|
|
||||||
@ -33,6 +32,8 @@ public open class DoubleTensorAlgebra :
|
|||||||
|
|
||||||
override val elementAlgebra: DoubleField get() = DoubleField
|
override val elementAlgebra: DoubleField get() = DoubleField
|
||||||
|
|
||||||
|
public val bufferAlgebra: DoubleBufferOps get() = DoubleBufferOps
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Applies the [transform] function to each element of the tensor and returns the resulting modified tensor.
|
* Applies the [transform] function to each element of the tensor and returns the resulting modified tensor.
|
||||||
@ -98,6 +99,16 @@ public open class DoubleTensorAlgebra :
|
|||||||
override fun StructureND<Double>.value(): Double = valueOrNull()
|
override fun StructureND<Double>.value(): Double = valueOrNull()
|
||||||
?: throw IllegalArgumentException("The tensor shape is $shape, but value method is allowed only for shape [1]")
|
?: throw IllegalArgumentException("The tensor shape is $shape, but value method is allowed only for shape [1]")
|
||||||
|
|
||||||
|
public fun fromBuffer(shape: ShapeND, buffer: Buffer<Double>): DoubleTensor {
|
||||||
|
checkNotEmptyShape(shape)
|
||||||
|
check(buffer.size > 0) { "Illegal empty buffer provided" }
|
||||||
|
check(buffer.size == shape.linearSize) {
|
||||||
|
"Inconsistent shape $shape for buffer of size ${buffer.size} provided"
|
||||||
|
}
|
||||||
|
return DoubleTensor(shape, buffer.toDoubleBuffer())
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Constructs a tensor with the specified shape and data.
|
* Constructs a tensor with the specified shape and data.
|
||||||
*
|
*
|
||||||
@ -105,12 +116,7 @@ public open class DoubleTensorAlgebra :
|
|||||||
* @param array one-dimensional data array.
|
* @param array one-dimensional data array.
|
||||||
* @return tensor with the [shape] shape and [array] data.
|
* @return tensor with the [shape] shape and [array] data.
|
||||||
*/
|
*/
|
||||||
public fun fromArray(shape: ShapeND, array: DoubleArray): DoubleTensor {
|
public fun fromArray(shape: ShapeND, array: DoubleArray): DoubleTensor = fromBuffer(shape, array.asBuffer())
|
||||||
checkNotEmptyShape(shape)
|
|
||||||
checkEmptyDoubleBuffer(array)
|
|
||||||
checkBufferShapeConsistency(shape, array)
|
|
||||||
return DoubleTensor(shape, array.asBuffer())
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Constructs a tensor with the specified shape and initializer.
|
* Constructs a tensor with the specified shape and initializer.
|
||||||
@ -271,7 +277,7 @@ public open class DoubleTensorAlgebra :
|
|||||||
|
|
||||||
override fun StructureND<Double>.transposed(i: Int, j: Int): Tensor<Double> {
|
override fun StructureND<Double>.transposed(i: Int, j: Int): Tensor<Double> {
|
||||||
val actualI = if (i >= 0) i else shape.size + i
|
val actualI = if (i >= 0) i else shape.size + i
|
||||||
val actualJ = if(j>=0) j else shape.size + j
|
val actualJ = if (j >= 0) j else shape.size + j
|
||||||
return asDoubleTensor().permute(
|
return asDoubleTensor().permute(
|
||||||
shape.transposed(actualI, actualJ)
|
shape.transposed(actualI, actualJ)
|
||||||
) { originIndex ->
|
) { originIndex ->
|
||||||
@ -498,48 +504,6 @@ public open class DoubleTensorAlgebra :
|
|||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Returns a tensor of random numbers drawn from normal distributions with `0.0` mean and `1.0` standard deviation.
|
|
||||||
*
|
|
||||||
* @param shape the desired shape for the output tensor.
|
|
||||||
* @param seed the random seed of the pseudo-random number generator.
|
|
||||||
* @return tensor of a given shape filled with numbers from the normal distribution
|
|
||||||
* with `0.0` mean and `1.0` standard deviation.
|
|
||||||
*/
|
|
||||||
public fun randomNormal(shape: ShapeND, seed: Long = 0): DoubleTensor =
|
|
||||||
DoubleTensor(shape, DoubleBuffer.randomNormals(shape.linearSize, seed))
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Returns a tensor with the same shape as `input` of random numbers drawn from normal distributions
|
|
||||||
* with `0.0` mean and `1.0` standard deviation.
|
|
||||||
*
|
|
||||||
* @receiver the `input`.
|
|
||||||
* @param seed the random seed of the pseudo-random number generator.
|
|
||||||
* @return a tensor with the same shape as `input` filled with numbers from the normal distribution
|
|
||||||
* with `0.0` mean and `1.0` standard deviation.
|
|
||||||
*/
|
|
||||||
public fun Tensor<Double>.randomNormalLike(seed: Long = 0): DoubleTensor =
|
|
||||||
DoubleTensor(shape, DoubleBuffer.randomNormals(shape.linearSize, seed))
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Concatenates a sequence of tensors with equal shapes along the first dimension.
|
|
||||||
*
|
|
||||||
* @param tensors the [List] of tensors with same shapes to concatenate
|
|
||||||
* @return tensor with concatenation result
|
|
||||||
*/
|
|
||||||
public fun stack(tensors: List<Tensor<Double>>): DoubleTensor {
|
|
||||||
check(tensors.isNotEmpty()) { "List must have at least 1 element" }
|
|
||||||
val shape = tensors[0].shape
|
|
||||||
check(tensors.all { it.shape contentEquals shape }) { "Tensors must have same shapes" }
|
|
||||||
val resShape = ShapeND(tensors.size) + shape
|
|
||||||
// val resBuffer: List<Double> = tensors.flatMap {
|
|
||||||
// it.asDoubleTensor().source.array.drop(it.asDoubleTensor().bufferStart)
|
|
||||||
// .take(it.asDoubleTensor().linearSize)
|
|
||||||
// }
|
|
||||||
val resBuffer = tensors.map { it.asDoubleTensor().source }.concat()
|
|
||||||
return DoubleTensor(resShape, resBuffer)
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Builds tensor from rows of the input tensor.
|
* Builds tensor from rows of the input tensor.
|
||||||
*
|
*
|
||||||
@ -631,102 +595,75 @@ public open class DoubleTensorAlgebra :
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
override fun StructureND<Double>.mean(): Double = sum() / indices.linearSize
|
override fun mean(structureND: StructureND<Double>): Double = structureND.sum() / structureND.indices.linearSize
|
||||||
|
|
||||||
override fun StructureND<Double>.mean(dim: Int, keepDim: Boolean): DoubleTensor =
|
override fun mean(structureND: StructureND<Double>, dim: Int, keepDim: Boolean): Tensor<Double> =
|
||||||
foldDimToDouble(dim, keepDim) { arr ->
|
structureND.foldDimToDouble(dim, keepDim) { arr ->
|
||||||
check(dim < dimension) { "Dimension $dim out of range $dimension" }
|
check(dim < structureND.dimension) { "Dimension $dim out of range ${structureND.dimension}" }
|
||||||
arr.sum() / shape[dim]
|
arr.sum() / structureND.shape[dim]
|
||||||
}
|
}
|
||||||
|
|
||||||
override fun StructureND<Double>.std(): Double = reduceElements { arr ->
|
override fun std(structureND: StructureND<Double>): Double = structureND.reduceElements { arr ->
|
||||||
val mean = arr.array.sum() / indices.linearSize
|
val mean = arr.array.sum() / structureND.indices.linearSize
|
||||||
sqrt(arr.array.sumOf { (it - mean) * (it - mean) } / (indices.linearSize - 1))
|
sqrt(arr.array.sumOf { (it - mean) * (it - mean) } / (structureND.indices.linearSize - 1))
|
||||||
}
|
}
|
||||||
|
|
||||||
override fun StructureND<Double>.std(dim: Int, keepDim: Boolean): DoubleTensor = foldDimToDouble(
|
override fun std(structureND: StructureND<Double>, dim: Int, keepDim: Boolean): Tensor<Double> =
|
||||||
dim,
|
structureND.foldDimToDouble(
|
||||||
keepDim
|
dim,
|
||||||
) { arr ->
|
keepDim
|
||||||
check(dim < dimension) { "Dimension $dim out of range $dimension" }
|
) { arr ->
|
||||||
val mean = arr.sum() / shape[dim]
|
check(dim < structureND.dimension) { "Dimension $dim out of range ${structureND.dimension}" }
|
||||||
sqrt(arr.sumOf { (it - mean) * (it - mean) } / (shape[dim] - 1))
|
val mean = arr.sum() / structureND.shape[dim]
|
||||||
}
|
sqrt(arr.sumOf { (it - mean) * (it - mean) } / (structureND.shape[dim] - 1))
|
||||||
|
}
|
||||||
|
|
||||||
override fun StructureND<Double>.variance(): Double = reduceElements { arr ->
|
override fun variance(structureND: StructureND<Double>): Double = structureND.reduceElements { arr ->
|
||||||
val linearSize = indices.linearSize
|
val linearSize = structureND.indices.linearSize
|
||||||
val mean = arr.array.sum() / linearSize
|
val mean = arr.array.sum() / linearSize
|
||||||
arr.array.sumOf { (it - mean) * (it - mean) } / (linearSize - 1)
|
arr.array.sumOf { (it - mean) * (it - mean) } / (linearSize - 1)
|
||||||
}
|
}
|
||||||
|
|
||||||
override fun StructureND<Double>.variance(dim: Int, keepDim: Boolean): DoubleTensor = foldDimToDouble(
|
override fun variance(structureND: StructureND<Double>, dim: Int, keepDim: Boolean): Tensor<Double> =
|
||||||
dim,
|
structureND.foldDimToDouble(
|
||||||
keepDim
|
dim,
|
||||||
) { arr ->
|
keepDim
|
||||||
check(dim < dimension) { "Dimension $dim out of range $dimension" }
|
) { arr ->
|
||||||
val mean = arr.sum() / shape[dim]
|
check(dim < structureND.dimension) { "Dimension $dim out of range ${structureND.dimension}" }
|
||||||
arr.sumOf { (it - mean) * (it - mean) } / (shape[dim] - 1)
|
val mean = arr.sum() / structureND.shape[dim]
|
||||||
}
|
arr.sumOf { (it - mean) * (it - mean) } / (structureND.shape[dim] - 1)
|
||||||
|
|
||||||
private fun cov(x: StructureND<Double>, y: StructureND<Double>): Double {
|
|
||||||
val n = x.shape[0]
|
|
||||||
return ((x - x.mean()) * (y - y.mean())).mean() * n / (n - 1)
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Returns the covariance matrix `M` of given vectors.
|
|
||||||
*
|
|
||||||
* `M[i, j]` contains covariance of `i`-th and `j`-th given vectors
|
|
||||||
*
|
|
||||||
* @param tensors the [List] of 1-dimensional tensors with same shape
|
|
||||||
* @return `M`.
|
|
||||||
*/
|
|
||||||
public fun cov(tensors: List<StructureND<Double>>): DoubleTensor {
|
|
||||||
check(tensors.isNotEmpty()) { "List must have at least 1 element" }
|
|
||||||
val n = tensors.size
|
|
||||||
val m = tensors[0].shape[0]
|
|
||||||
check(tensors.all { it.shape contentEquals ShapeND(m) }) { "Tensors must have same shapes" }
|
|
||||||
val resTensor = DoubleTensor(
|
|
||||||
ShapeND(n, n),
|
|
||||||
DoubleBuffer(n * n) { 0.0 }
|
|
||||||
)
|
|
||||||
for (i in 0 until n) {
|
|
||||||
for (j in 0 until n) {
|
|
||||||
resTensor[intArrayOf(i, j)] = cov(tensors[i], tensors[j])
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
return resTensor
|
|
||||||
}
|
|
||||||
|
|
||||||
override fun StructureND<Double>.exp(): DoubleTensor = map { exp(it) }
|
|
||||||
|
|
||||||
override fun StructureND<Double>.ln(): DoubleTensor = map { ln(it) }
|
override fun exp(arg: StructureND<Double>): DoubleTensor = arg.map { this.exp(it) }
|
||||||
|
|
||||||
override fun StructureND<Double>.sqrt(): DoubleTensor = map { sqrt(it) }
|
override fun ln(arg: StructureND<Double>): DoubleTensor = arg.map { this.ln(it) }
|
||||||
|
|
||||||
override fun StructureND<Double>.cos(): DoubleTensor = map { cos(it) }
|
override fun sqrt(arg: StructureND<Double>): DoubleTensor = arg.map { this.sqrt(it) }
|
||||||
|
|
||||||
override fun StructureND<Double>.acos(): DoubleTensor = map { acos(it) }
|
override fun cos(arg: StructureND<Double>): DoubleTensor = arg.map { this.cos(it) }
|
||||||
|
|
||||||
override fun StructureND<Double>.cosh(): DoubleTensor = map { cosh(it) }
|
override fun acos(arg: StructureND<Double>): DoubleTensor = arg.map { this.acos(it) }
|
||||||
|
|
||||||
override fun StructureND<Double>.acosh(): DoubleTensor = map { acosh(it) }
|
override fun cosh(arg: StructureND<Double>): DoubleTensor = arg.map { this.cosh(it) }
|
||||||
|
|
||||||
override fun StructureND<Double>.sin(): DoubleTensor = map { sin(it) }
|
override fun acosh(arg: StructureND<Double>): DoubleTensor = arg.map { this.acosh(it) }
|
||||||
|
|
||||||
override fun StructureND<Double>.asin(): DoubleTensor = map { asin(it) }
|
override fun sin(arg: StructureND<Double>): DoubleTensor = arg.map { this.sin(it) }
|
||||||
|
|
||||||
override fun StructureND<Double>.sinh(): DoubleTensor = map { sinh(it) }
|
override fun asin(arg: StructureND<Double>): DoubleTensor = arg.map { this.asin(it) }
|
||||||
|
|
||||||
override fun StructureND<Double>.asinh(): DoubleTensor = map { asinh(it) }
|
override fun sinh(arg: StructureND<Double>): DoubleTensor = arg.map { this.sinh(it) }
|
||||||
|
|
||||||
override fun StructureND<Double>.tan(): DoubleTensor = map { tan(it) }
|
override fun asinh(arg: StructureND<Double>): DoubleTensor = arg.map { this.asinh(it) }
|
||||||
|
|
||||||
override fun StructureND<Double>.atan(): DoubleTensor = map { atan(it) }
|
override fun tan(arg: StructureND<Double>): DoubleTensor = arg.map { this.tan(it) }
|
||||||
|
|
||||||
override fun StructureND<Double>.tanh(): DoubleTensor = map { tanh(it) }
|
override fun atan(arg: StructureND<Double>): DoubleTensor = arg.map { this.atan(it) }
|
||||||
|
|
||||||
override fun StructureND<Double>.atanh(): DoubleTensor = map { atanh(it) }
|
override fun tanh(arg: StructureND<Double>): DoubleTensor = arg.map { this.tanh(it) }
|
||||||
|
|
||||||
|
override fun atanh(arg: StructureND<Double>): DoubleTensor = arg.map { this.atanh(it) }
|
||||||
|
|
||||||
override fun power(arg: StructureND<Double>, pow: Number): StructureND<Double> = if (pow is Int) {
|
override fun power(arg: StructureND<Double>, pow: Number): StructureND<Double> = if (pow is Int) {
|
||||||
arg.map { it.pow(pow) }
|
arg.map { it.pow(pow) }
|
||||||
@ -734,115 +671,26 @@ public open class DoubleTensorAlgebra :
|
|||||||
arg.map { it.pow(pow.toDouble()) }
|
arg.map { it.pow(pow.toDouble()) }
|
||||||
}
|
}
|
||||||
|
|
||||||
override fun StructureND<Double>.ceil(): DoubleTensor = map { ceil(it) }
|
override fun ceil(arg: StructureND<Double>): DoubleTensor = arg.map { ceil(it) }
|
||||||
|
|
||||||
override fun StructureND<Double>.floor(): DoubleTensor = map { floor(it) }
|
override fun floor(structureND: StructureND<Double>): DoubleTensor = structureND.map { floor(it) }
|
||||||
|
|
||||||
override fun StructureND<Double>.inv(): DoubleTensor = invLU(1e-9)
|
override fun StructureND<Double>.inv(): DoubleTensor = invLU(this, 1e-9)
|
||||||
|
|
||||||
override fun StructureND<Double>.det(): DoubleTensor = detLU(1e-9)
|
override fun StructureND<Double>.det(): DoubleTensor = detLU(this, 1e-9)
|
||||||
|
|
||||||
/**
|
override fun lu(structureND: StructureND<Double>): Triple<DoubleTensor, DoubleTensor, DoubleTensor> =
|
||||||
* Computes the LU factorization of a matrix or batches of matrices `input`.
|
lu(structureND, 1e-9)
|
||||||
* Returns a tuple containing the LU factorization and pivots of `input`.
|
|
||||||
*
|
|
||||||
* @param epsilon permissible error when comparing the determinant of a matrix with zero
|
|
||||||
* @return pair of `factorization` and `pivots`.
|
|
||||||
* The `factorization` has the shape ``(*, m, n)``, where``(*, m, n)`` is the shape of the `input` tensor.
|
|
||||||
* The `pivots` has the shape ``(∗, min(m, n))``. `pivots` stores all the intermediate transpositions of rows.
|
|
||||||
*/
|
|
||||||
public fun StructureND<Double>.luFactor(epsilon: Double): Pair<DoubleTensor, IntTensor> =
|
|
||||||
computeLU(this, epsilon)
|
|
||||||
?: throw IllegalArgumentException("Tensor contains matrices which are singular at precision $epsilon")
|
|
||||||
|
|
||||||
/**
|
override fun cholesky(structureND: StructureND<Double>): DoubleTensor = cholesky(structureND, 1e-6)
|
||||||
* Computes the LU factorization of a matrix or batches of matrices `input`.
|
|
||||||
* Returns a tuple containing the LU factorization and pivots of `input`.
|
|
||||||
* Uses an error of ``1e-9`` when calculating whether a matrix is degenerate.
|
|
||||||
*
|
|
||||||
* @return pair of `factorization` and `pivots`.
|
|
||||||
* The `factorization` has the shape ``(*, m, n)``, where``(*, m, n)`` is the shape of the `input` tensor.
|
|
||||||
* The `pivots` has the shape ``(∗, min(m, n))``. `pivots` stores all the intermediate transpositions of rows.
|
|
||||||
*/
|
|
||||||
public fun StructureND<Double>.luFactor(): Pair<DoubleTensor, IntTensor> = luFactor(1e-9)
|
|
||||||
|
|
||||||
/**
|
override fun qr(structureND: StructureND<Double>): Pair<DoubleTensor, DoubleTensor> {
|
||||||
* Unpacks the data and pivots from a LU factorization of a tensor.
|
checkSquareMatrix(structureND.shape)
|
||||||
* Given a tensor [luTensor], return tensors `Triple(P, L, U)` satisfying `P dot luTensor = L dot U`,
|
val qTensor = zeroesLike(structureND)
|
||||||
* with `P` being a permutation matrix or batch of matrices,
|
val rTensor = zeroesLike(structureND)
|
||||||
* `L` being a lower triangular matrix or batch of matrices,
|
|
||||||
* `U` being an upper triangular matrix or batch of matrices.
|
|
||||||
*
|
|
||||||
* @param luTensor the packed LU factorization data
|
|
||||||
* @param pivotsTensor the packed LU factorization pivots
|
|
||||||
* @return triple of `P`, `L` and `U` tensors
|
|
||||||
*/
|
|
||||||
public fun luPivot(
|
|
||||||
luTensor: StructureND<Double>,
|
|
||||||
pivotsTensor: Tensor<Int>,
|
|
||||||
): Triple<DoubleTensor, DoubleTensor, DoubleTensor> {
|
|
||||||
checkSquareMatrix(luTensor.shape)
|
|
||||||
check(
|
|
||||||
luTensor.shape.first(luTensor.shape.size - 2) contentEquals pivotsTensor.shape.first(pivotsTensor.shape.size - 1) ||
|
|
||||||
luTensor.shape.last() == pivotsTensor.shape.last() - 1
|
|
||||||
) { "Inappropriate shapes of input tensors" }
|
|
||||||
|
|
||||||
val n = luTensor.shape.last()
|
|
||||||
val pTensor = zeroesLike(luTensor)
|
|
||||||
pTensor
|
|
||||||
.matrixSequence()
|
|
||||||
.zip(pivotsTensor.asIntTensor().vectorSequence())
|
|
||||||
.forEach { (p, pivot) -> pivInit(p.asDoubleTensor2D(), pivot.as1D(), n) }
|
|
||||||
|
|
||||||
val lTensor = zeroesLike(luTensor)
|
|
||||||
val uTensor = zeroesLike(luTensor)
|
|
||||||
|
|
||||||
lTensor.matrixSequence()
|
|
||||||
.zip(uTensor.matrixSequence())
|
|
||||||
.zip(luTensor.asDoubleTensor().matrixSequence())
|
|
||||||
.forEach { (pairLU, lu) ->
|
|
||||||
val (l, u) = pairLU
|
|
||||||
luPivotHelper(l.asDoubleTensor2D(), u.asDoubleTensor2D(), lu.asDoubleTensor2D(), n)
|
|
||||||
}
|
|
||||||
|
|
||||||
return Triple(pTensor, lTensor, uTensor)
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* QR decomposition.
|
|
||||||
*
|
|
||||||
* Computes the QR decomposition of a matrix or a batch of matrices, and returns a pair `Q to R` of tensors.
|
|
||||||
* Given a tensor `input`, return tensors `Q to R` satisfying `input == Q dot R`,
|
|
||||||
* with `Q` being an orthogonal matrix or batch of orthogonal matrices
|
|
||||||
* and `R` being an upper triangular matrix or batch of upper triangular matrices.
|
|
||||||
*
|
|
||||||
* @receiver the `input`.
|
|
||||||
* @param epsilon the permissible error when comparing tensors for equality.
|
|
||||||
* Used when checking the positive definiteness of the input matrix or matrices.
|
|
||||||
* @return a pair of `Q` and `R` tensors.
|
|
||||||
*/
|
|
||||||
public fun StructureND<Double>.cholesky(epsilon: Double): DoubleTensor {
|
|
||||||
checkSquareMatrix(shape)
|
|
||||||
checkPositiveDefinite(asDoubleTensor(), epsilon)
|
|
||||||
|
|
||||||
val n = shape.last()
|
|
||||||
val lTensor = zeroesLike(this)
|
|
||||||
|
|
||||||
for ((a, l) in asDoubleTensor().matrixSequence().zip(lTensor.matrixSequence()))
|
|
||||||
for (i in 0 until n) choleskyHelper(a.asDoubleTensor2D(), l.asDoubleTensor2D(), n)
|
|
||||||
|
|
||||||
return lTensor
|
|
||||||
}
|
|
||||||
|
|
||||||
override fun StructureND<Double>.cholesky(): DoubleTensor = cholesky(1e-6)
|
|
||||||
|
|
||||||
override fun StructureND<Double>.qr(): Pair<DoubleTensor, DoubleTensor> {
|
|
||||||
checkSquareMatrix(shape)
|
|
||||||
val qTensor = zeroesLike(this)
|
|
||||||
val rTensor = zeroesLike(this)
|
|
||||||
|
|
||||||
//TODO replace with cycle
|
//TODO replace with cycle
|
||||||
asDoubleTensor().matrixSequence()
|
structureND.asDoubleTensor().matrixSequence()
|
||||||
.zip(
|
.zip(
|
||||||
(qTensor.matrixSequence()
|
(qTensor.matrixSequence()
|
||||||
.zip(rTensor.matrixSequence()))
|
.zip(rTensor.matrixSequence()))
|
||||||
@ -854,200 +702,14 @@ public open class DoubleTensorAlgebra :
|
|||||||
return qTensor to rTensor
|
return qTensor to rTensor
|
||||||
}
|
}
|
||||||
|
|
||||||
override fun StructureND<Double>.svd(): Triple<StructureND<Double>, StructureND<Double>, StructureND<Double>> =
|
override fun svd(
|
||||||
svd(epsilon = 1e-10)
|
structureND: StructureND<Double>,
|
||||||
|
): Triple<StructureND<Double>, StructureND<Double>, StructureND<Double>> =
|
||||||
|
svd(structureND = structureND, epsilon = 1e-10)
|
||||||
|
|
||||||
|
override fun symEig(structureND: StructureND<Double>): Pair<DoubleTensor, DoubleTensor> =
|
||||||
|
symEigJacobi(structureND = structureND, maxIteration = 50, epsilon = 1e-15)
|
||||||
|
|
||||||
/**
|
|
||||||
* Singular Value Decomposition.
|
|
||||||
*
|
|
||||||
* Computes the singular value decomposition of either a matrix or batch of matrices `input`.
|
|
||||||
* The singular value decomposition is represented as a triple `Triple(U, S, V)`,
|
|
||||||
* such that `input == U dot diagonalEmbedding(S) dot V.transpose()`.
|
|
||||||
* If `input` is a batch of tensors, then U, S, and Vh are also batched with the same batch dimensions as `input.
|
|
||||||
*
|
|
||||||
* @receiver the `input`.
|
|
||||||
* @param epsilon permissible error when calculating the dot product of vectors
|
|
||||||
* i.e., the precision with which the cosine approaches 1 in an iterative algorithm.
|
|
||||||
* @return a triple `Triple(U, S, V)`.
|
|
||||||
*/
|
|
||||||
public fun StructureND<Double>.svd(epsilon: Double): Triple<StructureND<Double>, StructureND<Double>, StructureND<Double>> {
|
|
||||||
val size = dimension
|
|
||||||
val commonShape = shape.slice(0 until size - 2)
|
|
||||||
val (n, m) = shape.slice(size - 2 until size)
|
|
||||||
val uTensor = zeros(commonShape + ShapeND(min(n, m), n))
|
|
||||||
val sTensor = zeros(commonShape + ShapeND(min(n, m)))
|
|
||||||
val vTensor = zeros(commonShape + ShapeND(min(n, m), m))
|
|
||||||
|
|
||||||
val matrices = asDoubleTensor().matrices
|
|
||||||
val uTensors = uTensor.matrices
|
|
||||||
val sTensorVectors = sTensor.vectors
|
|
||||||
val vTensors = vTensor.matrices
|
|
||||||
|
|
||||||
for (index in matrices.indices) {
|
|
||||||
val matrix = matrices[index]
|
|
||||||
val usv = Triple(
|
|
||||||
uTensors[index],
|
|
||||||
sTensorVectors[index],
|
|
||||||
vTensors[index]
|
|
||||||
)
|
|
||||||
val matrixSize = matrix.shape.linearSize
|
|
||||||
val curMatrix = DoubleTensor(
|
|
||||||
matrix.shape,
|
|
||||||
matrix.source.view(0, matrixSize)
|
|
||||||
)
|
|
||||||
svdHelper(curMatrix, usv, m, n, epsilon)
|
|
||||||
}
|
|
||||||
|
|
||||||
return Triple(uTensor.transposed(), sTensor, vTensor.transposed())
|
|
||||||
}
|
|
||||||
|
|
||||||
override fun StructureND<Double>.symEig(): Pair<DoubleTensor, DoubleTensor> =
|
|
||||||
symEigJacobi(maxIteration = 50, epsilon = 1e-15)
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Returns eigenvalues and eigenvectors of a real symmetric matrix input or a batch of real symmetric matrices,
|
|
||||||
* represented by a pair `eigenvalues to eigenvectors`.
|
|
||||||
*
|
|
||||||
* @param epsilon the permissible error when comparing tensors for equality
|
|
||||||
* and when the cosine approaches 1 in the SVD algorithm.
|
|
||||||
* @return a pair `eigenvalues to eigenvectors`.
|
|
||||||
*/
|
|
||||||
public fun StructureND<Double>.symEigSvd(epsilon: Double): Pair<DoubleTensor, StructureND<Double>> {
|
|
||||||
//TODO optimize conversion
|
|
||||||
checkSymmetric(asDoubleTensor(), epsilon)
|
|
||||||
|
|
||||||
fun MutableStructure2D<Double>.cleanSym(n: Int) {
|
|
||||||
for (i in 0 until n) {
|
|
||||||
for (j in 0 until n) {
|
|
||||||
if (i == j) {
|
|
||||||
this[i, j] = sign(this[i, j])
|
|
||||||
} else {
|
|
||||||
this[i, j] = 0.0
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
val (u, s, v) = svd(epsilon)
|
|
||||||
val shp = s.shape + intArrayOf(1)
|
|
||||||
val utv = u.transposed() matmul v
|
|
||||||
val n = s.shape.last()
|
|
||||||
for (matrix in utv.matrixSequence()) {
|
|
||||||
matrix.asDoubleTensor2D().cleanSym(n)
|
|
||||||
}
|
|
||||||
|
|
||||||
val eig = (utv dot s.asDoubleTensor().view(shp)).view(s.shape)
|
|
||||||
return eig to v
|
|
||||||
}
|
|
||||||
|
|
||||||
public fun StructureND<Double>.symEigJacobi(maxIteration: Int, epsilon: Double): Pair<DoubleTensor, DoubleTensor> {
|
|
||||||
//TODO optimize conversion
|
|
||||||
checkSymmetric(asDoubleTensor(), epsilon)
|
|
||||||
|
|
||||||
val size = this.dimension
|
|
||||||
val eigenvectors = zeros(shape)
|
|
||||||
val eigenvalues = zeros(shape.slice(0 until size - 1))
|
|
||||||
|
|
||||||
var eigenvalueStart = 0
|
|
||||||
var eigenvectorStart = 0
|
|
||||||
for (matrix in asDoubleTensor().matrixSequence()) {
|
|
||||||
val matrix2D = matrix.asDoubleTensor2D()
|
|
||||||
val (d, v) = matrix2D.jacobiHelper(maxIteration, epsilon)
|
|
||||||
|
|
||||||
for (i in 0 until matrix2D.rowNum) {
|
|
||||||
for (j in 0 until matrix2D.colNum) {
|
|
||||||
eigenvectors.source[eigenvectorStart + i * matrix2D.rowNum + j] = v[i, j]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for (i in 0 until matrix2D.rowNum) {
|
|
||||||
eigenvalues.source[eigenvalueStart + i] = d[i]
|
|
||||||
}
|
|
||||||
|
|
||||||
eigenvalueStart += this.shape.last()
|
|
||||||
eigenvectorStart += this.shape.last() * this.shape.last()
|
|
||||||
}
|
|
||||||
|
|
||||||
return eigenvalues to eigenvectors
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Computes the determinant of a square matrix input, or of each square matrix in a batched input
|
|
||||||
* using LU factorization algorithm.
|
|
||||||
*
|
|
||||||
* @param epsilon the error in the LU algorithm—permissible error when comparing the determinant of a matrix
|
|
||||||
* with zero.
|
|
||||||
* @return the determinant.
|
|
||||||
*/
|
|
||||||
public fun StructureND<Double>.detLU(epsilon: Double = 1e-9): DoubleTensor {
|
|
||||||
checkSquareMatrix(shape)
|
|
||||||
//TODO check for unnecessary copies
|
|
||||||
val luTensor = copyToTensor()
|
|
||||||
val pivotsTensor = setUpPivots()
|
|
||||||
|
|
||||||
val n = shape.size
|
|
||||||
|
|
||||||
val detTensorShape = ShapeND(IntArray(n - 1) { i -> shape[i] }.apply {
|
|
||||||
set(n - 2, 1)
|
|
||||||
})
|
|
||||||
|
|
||||||
val resBuffer = DoubleBuffer(detTensorShape.linearSize) { 0.0 }
|
|
||||||
|
|
||||||
val detTensor = DoubleTensor(
|
|
||||||
detTensorShape,
|
|
||||||
resBuffer
|
|
||||||
)
|
|
||||||
|
|
||||||
luTensor.matrixSequence().zip(pivotsTensor.vectorSequence()).forEachIndexed { index, (lu, pivots) ->
|
|
||||||
resBuffer[index] = if (luHelper(lu.asDoubleTensor2D(), pivots.as1D(), epsilon))
|
|
||||||
0.0 else luMatrixDet(lu.asDoubleTensor2D(), pivots.as1D())
|
|
||||||
}
|
|
||||||
|
|
||||||
return detTensor
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Computes the multiplicative inverse matrix of a square matrix input, or of each square matrix in a batched input
|
|
||||||
* using LU factorization algorithm.
|
|
||||||
* Given a square matrix `a`, return the matrix `aInv` satisfying
|
|
||||||
* `a dot aInv == aInv dot a == eye(a.shape[0])`.
|
|
||||||
*
|
|
||||||
* @param epsilon error in the LU algorithm—permissible error when comparing the determinant of a matrix with zero
|
|
||||||
* @return the multiplicative inverse of a matrix.
|
|
||||||
*/
|
|
||||||
public fun StructureND<Double>.invLU(epsilon: Double = 1e-9): DoubleTensor {
|
|
||||||
val (luTensor, pivotsTensor) = luFactor(epsilon)
|
|
||||||
val invTensor = zeroesLike(luTensor)
|
|
||||||
|
|
||||||
//TODO replace sequence with a cycle
|
|
||||||
val seq = luTensor.matrixSequence().zip(pivotsTensor.vectorSequence()).zip(invTensor.matrixSequence())
|
|
||||||
for ((luP, invMatrix) in seq) {
|
|
||||||
val (lu, pivots) = luP
|
|
||||||
luMatrixInv(lu.asDoubleTensor2D(), pivots.as1D(), invMatrix.asDoubleTensor2D())
|
|
||||||
}
|
|
||||||
|
|
||||||
return invTensor
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* LUP decomposition.
|
|
||||||
*
|
|
||||||
* Computes the LUP decomposition of a matrix or a batch of matrices.
|
|
||||||
* Given a tensor `input`, return tensors `Triple(P, L, U)` satisfying `P dot input == L dot U`,
|
|
||||||
* with `P` being a permutation matrix or batch of matrices,
|
|
||||||
* `L` being a lower triangular matrix or batch of matrices,
|
|
||||||
* `U` being an upper triangular matrix or batch of matrices.
|
|
||||||
*
|
|
||||||
* @param epsilon permissible error when comparing the determinant of a matrix with zero.
|
|
||||||
* @return triple of `P`, `L` and `U` tensors.
|
|
||||||
*/
|
|
||||||
public fun StructureND<Double>.lu(epsilon: Double = 1e-9): Triple<DoubleTensor, DoubleTensor, DoubleTensor> {
|
|
||||||
val (lu, pivots) = luFactor(epsilon)
|
|
||||||
return luPivot(lu, pivots)
|
|
||||||
}
|
|
||||||
|
|
||||||
override fun StructureND<Double>.lu(): Triple<DoubleTensor, DoubleTensor, DoubleTensor> = lu(1e-9)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
public val Double.Companion.tensorAlgebra: DoubleTensorAlgebra get() = DoubleTensorAlgebra
|
public val Double.Companion.tensorAlgebra: DoubleTensorAlgebra get() = DoubleTensorAlgebra
|
||||||
|
@ -13,6 +13,7 @@ import space.kscience.kmath.tensors.api.Tensor
|
|||||||
import space.kscience.kmath.tensors.core.DoubleTensor
|
import space.kscience.kmath.tensors.core.DoubleTensor
|
||||||
import space.kscience.kmath.tensors.core.DoubleTensorAlgebra
|
import space.kscience.kmath.tensors.core.DoubleTensorAlgebra
|
||||||
import space.kscience.kmath.tensors.core.asDoubleTensor
|
import space.kscience.kmath.tensors.core.asDoubleTensor
|
||||||
|
import space.kscience.kmath.tensors.core.detLU
|
||||||
|
|
||||||
|
|
||||||
internal fun checkNotEmptyShape(shape: ShapeND) =
|
internal fun checkNotEmptyShape(shape: ShapeND) =
|
||||||
@ -26,7 +27,7 @@ internal fun checkEmptyDoubleBuffer(buffer: DoubleArray) = check(buffer.isNotEmp
|
|||||||
|
|
||||||
internal fun checkBufferShapeConsistency(shape: ShapeND, buffer: DoubleArray) =
|
internal fun checkBufferShapeConsistency(shape: ShapeND, buffer: DoubleArray) =
|
||||||
check(buffer.size == shape.linearSize) {
|
check(buffer.size == shape.linearSize) {
|
||||||
"Inconsistent shape ${shape} for buffer of size ${buffer.size} provided"
|
"Inconsistent shape $shape for buffer of size ${buffer.size} provided"
|
||||||
}
|
}
|
||||||
|
|
||||||
@PublishedApi
|
@PublishedApi
|
||||||
@ -62,7 +63,7 @@ internal fun DoubleTensorAlgebra.checkSymmetric(
|
|||||||
internal fun DoubleTensorAlgebra.checkPositiveDefinite(tensor: DoubleTensor, epsilon: Double = 1e-6) {
|
internal fun DoubleTensorAlgebra.checkPositiveDefinite(tensor: DoubleTensor, epsilon: Double = 1e-6) {
|
||||||
checkSymmetric(tensor, epsilon)
|
checkSymmetric(tensor, epsilon)
|
||||||
for (mat in tensor.matrixSequence())
|
for (mat in tensor.matrixSequence())
|
||||||
check(mat.asDoubleTensor().detLU().value() > 0.0) {
|
check(detLU(mat.asDoubleTensor()).value() > 0.0) {
|
||||||
"Tensor contains matrices which are not positive definite ${mat.asDoubleTensor().detLU().value()}"
|
"Tensor contains matrices which are not positive definite ${detLU(mat.asDoubleTensor()).value()}"
|
||||||
}
|
}
|
||||||
}
|
}
|
@ -227,7 +227,7 @@ internal fun DoubleTensorAlgebra.qrHelper(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
r[j, j] = DoubleTensorAlgebra { (v dot v).sqrt().value() }
|
r[j, j] = DoubleTensorAlgebra { sqrt((v dot v)).value() }
|
||||||
for (i in 0 until n) {
|
for (i in 0 until n) {
|
||||||
qM[i, j] = vv[i] / r[j, j]
|
qM[i, j] = vv[i] / r[j, j]
|
||||||
}
|
}
|
||||||
@ -250,7 +250,7 @@ internal fun DoubleTensorAlgebra.svd1d(a: DoubleTensor, epsilon: Double = 1e-10)
|
|||||||
while (true) {
|
while (true) {
|
||||||
lastV = v
|
lastV = v
|
||||||
v = b.dot(lastV)
|
v = b.dot(lastV)
|
||||||
val norm = DoubleTensorAlgebra { (v dot v).sqrt().value() }
|
val norm = DoubleTensorAlgebra { sqrt((v dot v)).value() }
|
||||||
v = v.times(1.0 / norm)
|
v = v.times(1.0 / norm)
|
||||||
if (abs(v.dot(lastV).value()) > 1 - epsilon) {
|
if (abs(v.dot(lastV).value()) > 1 - epsilon) {
|
||||||
return v
|
return v
|
||||||
@ -283,12 +283,12 @@ internal fun DoubleTensorAlgebra.svdHelper(
|
|||||||
if (n > m) {
|
if (n > m) {
|
||||||
v = svd1d(a, epsilon)
|
v = svd1d(a, epsilon)
|
||||||
u = matrix.dot(v)
|
u = matrix.dot(v)
|
||||||
norm = DoubleTensorAlgebra { (u dot u).sqrt().value() }
|
norm = DoubleTensorAlgebra { sqrt((u dot u)).value() }
|
||||||
u = u.times(1.0 / norm)
|
u = u.times(1.0 / norm)
|
||||||
} else {
|
} else {
|
||||||
u = svd1d(a, epsilon)
|
u = svd1d(a, epsilon)
|
||||||
v = matrix.transposed(0, 1).dot(u)
|
v = matrix.transposed(0, 1).dot(u)
|
||||||
norm = DoubleTensorAlgebra { (v dot v).sqrt().value() }
|
norm = DoubleTensorAlgebra { sqrt((v dot v)).value() }
|
||||||
v = v.times(1.0 / norm)
|
v = v.times(1.0 / norm)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -0,0 +1,370 @@
|
|||||||
|
/*
|
||||||
|
* Copyright 2018-2022 KMath contributors.
|
||||||
|
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package space.kscience.kmath.tensors.core
|
||||||
|
|
||||||
|
import space.kscience.kmath.nd.*
|
||||||
|
import space.kscience.kmath.operations.covariance
|
||||||
|
import space.kscience.kmath.structures.Buffer
|
||||||
|
import space.kscience.kmath.structures.DoubleBuffer
|
||||||
|
import space.kscience.kmath.tensors.api.Tensor
|
||||||
|
import space.kscience.kmath.tensors.core.internal.*
|
||||||
|
import kotlin.math.min
|
||||||
|
import kotlin.math.sign
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns a tensor of random numbers drawn from normal distributions with `0.0` mean and `1.0` standard deviation.
|
||||||
|
*
|
||||||
|
* @param shape the desired shape for the output tensor.
|
||||||
|
* @param seed the random seed of the pseudo-random number generator.
|
||||||
|
* @return tensor of a given shape filled with numbers from the normal distribution
|
||||||
|
* with `0.0` mean and `1.0` standard deviation.
|
||||||
|
*/
|
||||||
|
public fun DoubleTensorAlgebra.randomNormal(shape: ShapeND, seed: Long = 0): DoubleTensor =
|
||||||
|
fromBuffer(shape, DoubleBuffer.randomNormals(shape.linearSize, seed))
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns a tensor with the same shape as `input` of random numbers drawn from normal distributions
|
||||||
|
* with `0.0` mean and `1.0` standard deviation.
|
||||||
|
*
|
||||||
|
* @receiver the `input`.
|
||||||
|
* @param seed the random seed of the pseudo-random number generator.
|
||||||
|
* @return a tensor with the same shape as `input` filled with numbers from the normal distribution
|
||||||
|
* with `0.0` mean and `1.0` standard deviation.
|
||||||
|
*/
|
||||||
|
public fun DoubleTensorAlgebra.randomNormalLike(structure: WithShape, seed: Long = 0): DoubleTensor =
|
||||||
|
DoubleTensor(structure.shape, DoubleBuffer.randomNormals(structure.shape.linearSize, seed))
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Concatenates a sequence of tensors with equal shapes along the first dimension.
|
||||||
|
*
|
||||||
|
* @param tensors the [List] of tensors with same shapes to concatenate
|
||||||
|
* @return tensor with concatenation result
|
||||||
|
*/
|
||||||
|
public fun stack(tensors: List<Tensor<Double>>): DoubleTensor {
|
||||||
|
check(tensors.isNotEmpty()) { "List must have at least 1 element" }
|
||||||
|
val shape = tensors[0].shape
|
||||||
|
check(tensors.all { it.shape contentEquals shape }) { "Tensors must have same shapes" }
|
||||||
|
val resShape = ShapeND(tensors.size) + shape
|
||||||
|
// val resBuffer: List<Double> = tensors.flatMap {
|
||||||
|
// it.asDoubleTensor().source.array.drop(it.asDoubleTensor().bufferStart)
|
||||||
|
// .take(it.asDoubleTensor().linearSize)
|
||||||
|
// }
|
||||||
|
val resBuffer = tensors.map { it.asDoubleTensor().source }.concat()
|
||||||
|
return DoubleTensor(resShape, resBuffer)
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Computes the LU factorization of a matrix or batches of matrices `input`.
|
||||||
|
* Returns a tuple containing the LU factorization and pivots of `input`.
|
||||||
|
*
|
||||||
|
* @param epsilon permissible error when comparing the determinant of a matrix with zero default is 1e-9
|
||||||
|
* @return pair of `factorization` and `pivots`.
|
||||||
|
* The `factorization` has the shape ``(*, m, n)``, where``(*, m, n)`` is the shape of the `input` tensor.
|
||||||
|
* The `pivots` has the shape ``(∗, min(m, n))``. `pivots` stores all the intermediate transpositions of rows.
|
||||||
|
*/
|
||||||
|
public fun DoubleTensorAlgebra.luFactor(
|
||||||
|
structureND: StructureND<Double>,
|
||||||
|
epsilon: Double = 1e-9,
|
||||||
|
): Pair<DoubleTensor, IntTensor> =
|
||||||
|
computeLU(structureND, epsilon)
|
||||||
|
?: throw IllegalArgumentException("Tensor contains matrices which are singular at precision $epsilon")
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Unpacks the data and pivots from a LU factorization of a tensor.
|
||||||
|
* Given a tensor [luTensor], return tensors `Triple(P, L, U)` satisfying `P dot luTensor = L dot U`,
|
||||||
|
* with `P` being a permutation matrix or batch of matrices,
|
||||||
|
* `L` being a lower triangular matrix or batch of matrices,
|
||||||
|
* `U` being an upper triangular matrix or batch of matrices.
|
||||||
|
*
|
||||||
|
* @param luTensor the packed LU factorization data
|
||||||
|
* @param pivotsTensor the packed LU factorization pivots
|
||||||
|
* @return triple of `P`, `L` and `U` tensors
|
||||||
|
*/
|
||||||
|
public fun DoubleTensorAlgebra.luPivot(
|
||||||
|
luTensor: StructureND<Double>,
|
||||||
|
pivotsTensor: Tensor<Int>,
|
||||||
|
): Triple<DoubleTensor, DoubleTensor, DoubleTensor> {
|
||||||
|
checkSquareMatrix(luTensor.shape)
|
||||||
|
check(
|
||||||
|
luTensor.shape.first(luTensor.shape.size - 2) contentEquals pivotsTensor.shape.first(pivotsTensor.shape.size - 1) ||
|
||||||
|
luTensor.shape.last() == pivotsTensor.shape.last() - 1
|
||||||
|
) { "Inappropriate shapes of input tensors" }
|
||||||
|
|
||||||
|
val n = luTensor.shape.last()
|
||||||
|
val pTensor = zeroesLike(luTensor)
|
||||||
|
pTensor
|
||||||
|
.matrixSequence()
|
||||||
|
.zip(pivotsTensor.asIntTensor().vectorSequence())
|
||||||
|
.forEach { (p, pivot) -> pivInit(p.asDoubleTensor2D(), pivot.as1D(), n) }
|
||||||
|
|
||||||
|
val lTensor = zeroesLike(luTensor)
|
||||||
|
val uTensor = zeroesLike(luTensor)
|
||||||
|
|
||||||
|
lTensor.matrixSequence()
|
||||||
|
.zip(uTensor.matrixSequence())
|
||||||
|
.zip(luTensor.asDoubleTensor().matrixSequence())
|
||||||
|
.forEach { (pairLU, lu) ->
|
||||||
|
val (l, u) = pairLU
|
||||||
|
luPivotHelper(l.asDoubleTensor2D(), u.asDoubleTensor2D(), lu.asDoubleTensor2D(), n)
|
||||||
|
}
|
||||||
|
|
||||||
|
return Triple(pTensor, lTensor, uTensor)
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* LUP decomposition.
|
||||||
|
*
|
||||||
|
* Computes the LUP decomposition of a matrix or a batch of matrices.
|
||||||
|
* Given a tensor `input`, return tensors `Triple(P, L, U)` satisfying `P dot input == L dot U`,
|
||||||
|
* with `P` being a permutation matrix or batch of matrices,
|
||||||
|
* `L` being a lower triangular matrix or batch of matrices,
|
||||||
|
* `U` being an upper triangular matrix or batch of matrices.
|
||||||
|
*
|
||||||
|
* @param epsilon permissible error when comparing the determinant of a matrix with zero.
|
||||||
|
* @return triple of `P`, `L` and `U` tensors.
|
||||||
|
*/
|
||||||
|
public fun DoubleTensorAlgebra.lu(
|
||||||
|
structureND: StructureND<Double>,
|
||||||
|
epsilon: Double = 1e-9,
|
||||||
|
): Triple<DoubleTensor, DoubleTensor, DoubleTensor> {
|
||||||
|
val (lu, pivots) = luFactor(structureND, epsilon)
|
||||||
|
return luPivot(lu, pivots)
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* QR decomposition.
|
||||||
|
*
|
||||||
|
* Computes the QR decomposition of a matrix or a batch of matrices, and returns a pair `Q to R` of tensors.
|
||||||
|
* Given a tensor `input`, return tensors `Q to R` satisfying `input == Q dot R`,
|
||||||
|
* with `Q` being an orthogonal matrix or batch of orthogonal matrices
|
||||||
|
* and `R` being an upper triangular matrix or batch of upper triangular matrices.
|
||||||
|
*
|
||||||
|
* @receiver the `input`.
|
||||||
|
* @param epsilon the permissible error when comparing tensors for equality. The default is 1e-6
|
||||||
|
* Used when checking the positive definiteness of the input matrix or matrices.
|
||||||
|
* @return a pair of `Q` and `R` tensors.
|
||||||
|
*/
|
||||||
|
public fun DoubleTensorAlgebra.cholesky(structureND: StructureND<Double>, epsilon: Double = 1e-6): DoubleTensor {
|
||||||
|
checkSquareMatrix(structureND.shape)
|
||||||
|
checkPositiveDefinite(structureND.asDoubleTensor(), epsilon)
|
||||||
|
|
||||||
|
val n = structureND.shape.last()
|
||||||
|
val lTensor = zeroesLike(structureND)
|
||||||
|
|
||||||
|
for ((a, l) in structureND.asDoubleTensor().matrixSequence().zip(lTensor.matrixSequence()))
|
||||||
|
for (i in 0 until n) choleskyHelper(a.asDoubleTensor2D(), l.asDoubleTensor2D(), n)
|
||||||
|
|
||||||
|
return lTensor
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Singular Value Decomposition.
|
||||||
|
*
|
||||||
|
* Computes the singular value decomposition of either a matrix or batch of matrices `input`.
|
||||||
|
* The singular value decomposition is represented as a triple `Triple(U, S, V)`,
|
||||||
|
* such that `input == U dot diagonalEmbedding(S) dot V.transpose()`.
|
||||||
|
* If `input` is a batch of tensors, then U, S, and Vh are also batched with the same batch dimensions as `input.
|
||||||
|
*
|
||||||
|
* @receiver the `input`.
|
||||||
|
* @param epsilon permissible error when calculating the dot product of vectors
|
||||||
|
* i.e., the precision with which the cosine approaches 1 in an iterative algorithm.
|
||||||
|
* @return a triple `Triple(U, S, V)`.
|
||||||
|
*/
|
||||||
|
public fun DoubleTensorAlgebra.svd(
|
||||||
|
structureND: StructureND<Double>,
|
||||||
|
epsilon: Double,
|
||||||
|
): Triple<StructureND<Double>, StructureND<Double>, StructureND<Double>> {
|
||||||
|
val size = structureND.dimension
|
||||||
|
val commonShape = structureND.shape.slice(0 until size - 2)
|
||||||
|
val (n, m) = structureND.shape.slice(size - 2 until size)
|
||||||
|
val uTensor = zeros(commonShape + ShapeND(min(n, m), n))
|
||||||
|
val sTensor = zeros(commonShape + ShapeND(min(n, m)))
|
||||||
|
val vTensor = zeros(commonShape + ShapeND(min(n, m), m))
|
||||||
|
|
||||||
|
val matrices = structureND.asDoubleTensor().matrices
|
||||||
|
val uTensors = uTensor.matrices
|
||||||
|
val sTensorVectors = sTensor.vectors
|
||||||
|
val vTensors = vTensor.matrices
|
||||||
|
|
||||||
|
for (index in matrices.indices) {
|
||||||
|
val matrix = matrices[index]
|
||||||
|
val usv = Triple(
|
||||||
|
uTensors[index],
|
||||||
|
sTensorVectors[index],
|
||||||
|
vTensors[index]
|
||||||
|
)
|
||||||
|
val matrixSize = matrix.shape.linearSize
|
||||||
|
val curMatrix = DoubleTensor(
|
||||||
|
matrix.shape,
|
||||||
|
matrix.source.view(0, matrixSize)
|
||||||
|
)
|
||||||
|
svdHelper(curMatrix, usv, m, n, epsilon)
|
||||||
|
}
|
||||||
|
|
||||||
|
return Triple(uTensor.transposed(), sTensor, vTensor.transposed())
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns eigenvalues and eigenvectors of a real symmetric matrix input or a batch of real symmetric matrices,
|
||||||
|
* represented by a pair `eigenvalues to eigenvectors`.
|
||||||
|
*
|
||||||
|
* @param epsilon the permissible error when comparing tensors for equality
|
||||||
|
* and when the cosine approaches 1 in the SVD algorithm.
|
||||||
|
* @return a pair `eigenvalues to eigenvectors`.
|
||||||
|
*/
|
||||||
|
public fun DoubleTensorAlgebra.symEigSvd(
|
||||||
|
structureND: StructureND<Double>,
|
||||||
|
epsilon: Double,
|
||||||
|
): Pair<DoubleTensor, StructureND<Double>> {
|
||||||
|
//TODO optimize conversion
|
||||||
|
checkSymmetric(structureND.asDoubleTensor(), epsilon)
|
||||||
|
|
||||||
|
fun MutableStructure2D<Double>.cleanSym(n: Int) {
|
||||||
|
for (i in 0 until n) {
|
||||||
|
for (j in 0 until n) {
|
||||||
|
if (i == j) {
|
||||||
|
this[i, j] = sign(this[i, j])
|
||||||
|
} else {
|
||||||
|
this[i, j] = 0.0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
val (u, s, v) = svd(structureND, epsilon)
|
||||||
|
val shp = s.shape + intArrayOf(1)
|
||||||
|
val utv = u.transposed() matmul v
|
||||||
|
val n = s.shape.last()
|
||||||
|
for (matrix in utv.matrixSequence()) {
|
||||||
|
matrix.asDoubleTensor2D().cleanSym(n)
|
||||||
|
}
|
||||||
|
|
||||||
|
val eig = (utv dot s.asDoubleTensor().view(shp)).view(s.shape)
|
||||||
|
return eig to v
|
||||||
|
}
|
||||||
|
|
||||||
|
public fun DoubleTensorAlgebra.symEigJacobi(
|
||||||
|
structureND: StructureND<Double>,
|
||||||
|
maxIteration: Int,
|
||||||
|
epsilon: Double,
|
||||||
|
): Pair<DoubleTensor, DoubleTensor> {
|
||||||
|
//TODO optimize conversion
|
||||||
|
checkSymmetric(structureND.asDoubleTensor(), epsilon)
|
||||||
|
|
||||||
|
val size = structureND.dimension
|
||||||
|
val eigenvectors = zeros(structureND.shape)
|
||||||
|
val eigenvalues = zeros(structureND.shape.slice(0 until size - 1))
|
||||||
|
|
||||||
|
var eigenvalueStart = 0
|
||||||
|
var eigenvectorStart = 0
|
||||||
|
for (matrix in structureND.asDoubleTensor().matrixSequence()) {
|
||||||
|
val matrix2D = matrix.asDoubleTensor2D()
|
||||||
|
val (d, v) = matrix2D.jacobiHelper(maxIteration, epsilon)
|
||||||
|
|
||||||
|
for (i in 0 until matrix2D.rowNum) {
|
||||||
|
for (j in 0 until matrix2D.colNum) {
|
||||||
|
eigenvectors.source[eigenvectorStart + i * matrix2D.rowNum + j] = v[i, j]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for (i in 0 until matrix2D.rowNum) {
|
||||||
|
eigenvalues.source[eigenvalueStart + i] = d[i]
|
||||||
|
}
|
||||||
|
|
||||||
|
eigenvalueStart += structureND.shape.last()
|
||||||
|
eigenvectorStart += structureND.shape.last() * structureND.shape.last()
|
||||||
|
}
|
||||||
|
|
||||||
|
return eigenvalues to eigenvectors
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Computes the determinant of a square matrix input, or of each square matrix in a batched input
|
||||||
|
* using LU factorization algorithm.
|
||||||
|
*
|
||||||
|
* @param epsilon the error in the LU algorithm—permissible error when comparing the determinant of a matrix
|
||||||
|
* with zero.
|
||||||
|
* @return the determinant.
|
||||||
|
*/
|
||||||
|
public fun DoubleTensorAlgebra.detLU(structureND: StructureND<Double>, epsilon: Double = 1e-9): DoubleTensor {
|
||||||
|
checkSquareMatrix(structureND.shape)
|
||||||
|
//TODO check for unnecessary copies
|
||||||
|
val luTensor = structureND.copyToTensor()
|
||||||
|
val pivotsTensor = structureND.setUpPivots()
|
||||||
|
|
||||||
|
val n = structureND.shape.size
|
||||||
|
|
||||||
|
val detTensorShape = ShapeND(IntArray(n - 1) { i -> structureND.shape[i] }.apply {
|
||||||
|
set(n - 2, 1)
|
||||||
|
})
|
||||||
|
|
||||||
|
val resBuffer = DoubleBuffer(detTensorShape.linearSize) { 0.0 }
|
||||||
|
|
||||||
|
val detTensor = DoubleTensor(
|
||||||
|
detTensorShape,
|
||||||
|
resBuffer
|
||||||
|
)
|
||||||
|
|
||||||
|
luTensor.matrixSequence().zip(pivotsTensor.vectorSequence()).forEachIndexed { index, (lu, pivots) ->
|
||||||
|
resBuffer[index] = if (luHelper(lu.asDoubleTensor2D(), pivots.as1D(), epsilon))
|
||||||
|
0.0 else luMatrixDet(lu.asDoubleTensor2D(), pivots.as1D())
|
||||||
|
}
|
||||||
|
|
||||||
|
return detTensor
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Computes the multiplicative inverse matrix of a square matrix input, or of each square matrix in a batched input
|
||||||
|
* using LU factorization algorithm.
|
||||||
|
* Given a square matrix `a`, return the matrix `aInv` satisfying
|
||||||
|
* `a dot aInv == aInv dot a == eye(a.shape[0])`.
|
||||||
|
*
|
||||||
|
* @param epsilon error in the LU algorithm—permissible error when comparing the determinant of a matrix with zero
|
||||||
|
* @return the multiplicative inverse of a matrix.
|
||||||
|
*/
|
||||||
|
public fun DoubleTensorAlgebra.invLU(structureND: StructureND<Double>, epsilon: Double = 1e-9): DoubleTensor {
|
||||||
|
val (luTensor, pivotsTensor) = luFactor(structureND, epsilon)
|
||||||
|
val invTensor = zeroesLike(luTensor)
|
||||||
|
|
||||||
|
//TODO replace sequence with a cycle
|
||||||
|
val seq = luTensor.matrixSequence().zip(pivotsTensor.vectorSequence()).zip(invTensor.matrixSequence())
|
||||||
|
for ((luP, invMatrix) in seq) {
|
||||||
|
val (lu, pivots) = luP
|
||||||
|
luMatrixInv(lu.asDoubleTensor2D(), pivots.as1D(), invMatrix.asDoubleTensor2D())
|
||||||
|
}
|
||||||
|
|
||||||
|
return invTensor
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns the covariance matrix `M` of given vectors.
|
||||||
|
*
|
||||||
|
* `M[i, j]` contains covariance of `i`-th and `j`-th given vectors
|
||||||
|
*
|
||||||
|
* @param vectors the [List] of 1-dimensional tensors with same shape
|
||||||
|
* @return `M`.
|
||||||
|
*/
|
||||||
|
public fun DoubleTensorAlgebra.covariance(vectors: List<Buffer<Double>>): DoubleTensor {
|
||||||
|
check(vectors.isNotEmpty()) { "List must have at least 1 element" }
|
||||||
|
val n = vectors.size
|
||||||
|
val m = vectors[0].size
|
||||||
|
check(vectors.all { it.size == m }) { "Vectors must have same shapes" }
|
||||||
|
val resTensor = DoubleTensor(
|
||||||
|
ShapeND(n, n),
|
||||||
|
DoubleBuffer(n * n) { 0.0 }
|
||||||
|
)
|
||||||
|
for (i in 0 until n) {
|
||||||
|
for (j in 0 until n) {
|
||||||
|
resTensor[intArrayOf(i, j)] = bufferAlgebra.covariance(vectors[i], vectors[j])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return resTensor
|
||||||
|
}
|
@ -34,73 +34,73 @@ internal class TestDoubleAnalyticTensorAlgebra {
|
|||||||
|
|
||||||
@Test
|
@Test
|
||||||
fun testExp() = DoubleTensorAlgebra {
|
fun testExp() = DoubleTensorAlgebra {
|
||||||
assertTrue { tensor.exp() eq expectedTensor(::exp) }
|
assertTrue { exp(tensor) eq expectedTensor(::exp) }
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
fun testLog() = DoubleTensorAlgebra {
|
fun testLog() = DoubleTensorAlgebra {
|
||||||
assertTrue { tensor.ln() eq expectedTensor(::ln) }
|
assertTrue { ln(tensor) eq expectedTensor(::ln) }
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
fun testSqrt() = DoubleTensorAlgebra {
|
fun testSqrt() = DoubleTensorAlgebra {
|
||||||
assertTrue { tensor.sqrt() eq expectedTensor(::sqrt) }
|
assertTrue { sqrt(tensor) eq expectedTensor(::sqrt) }
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
fun testCos() = DoubleTensorAlgebra {
|
fun testCos() = DoubleTensorAlgebra {
|
||||||
assertTrue { tensor.cos() eq expectedTensor(::cos) }
|
assertTrue { cos(tensor) eq expectedTensor(::cos) }
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
fun testCosh() = DoubleTensorAlgebra {
|
fun testCosh() = DoubleTensorAlgebra {
|
||||||
assertTrue { tensor.cosh() eq expectedTensor(::cosh) }
|
assertTrue { cosh(tensor) eq expectedTensor(::cosh) }
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
fun testAcosh() = DoubleTensorAlgebra {
|
fun testAcosh() = DoubleTensorAlgebra {
|
||||||
assertTrue { tensor.acosh() eq expectedTensor(::acosh) }
|
assertTrue { acosh(tensor) eq expectedTensor(::acosh) }
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
fun testSin() = DoubleTensorAlgebra {
|
fun testSin() = DoubleTensorAlgebra {
|
||||||
assertTrue { tensor.sin() eq expectedTensor(::sin) }
|
assertTrue { sin(tensor) eq expectedTensor(::sin) }
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
fun testSinh() = DoubleTensorAlgebra {
|
fun testSinh() = DoubleTensorAlgebra {
|
||||||
assertTrue { tensor.sinh() eq expectedTensor(::sinh) }
|
assertTrue { sinh(tensor) eq expectedTensor(::sinh) }
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
fun testAsinh() = DoubleTensorAlgebra {
|
fun testAsinh() = DoubleTensorAlgebra {
|
||||||
assertTrue { tensor.asinh() eq expectedTensor(::asinh) }
|
assertTrue { asinh(tensor) eq expectedTensor(::asinh) }
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
fun testTan() = DoubleTensorAlgebra {
|
fun testTan() = DoubleTensorAlgebra {
|
||||||
assertTrue { tensor.tan() eq expectedTensor(::tan) }
|
assertTrue { tan(tensor) eq expectedTensor(::tan) }
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
fun testAtan() = DoubleTensorAlgebra {
|
fun testAtan() = DoubleTensorAlgebra {
|
||||||
assertTrue { tensor.atan() eq expectedTensor(::atan) }
|
assertTrue { atan(tensor) eq expectedTensor(::atan) }
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
fun testTanh() = DoubleTensorAlgebra {
|
fun testTanh() = DoubleTensorAlgebra {
|
||||||
assertTrue { tensor.tanh() eq expectedTensor(::tanh) }
|
assertTrue { tanh(tensor) eq expectedTensor(::tanh) }
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
fun testCeil() = DoubleTensorAlgebra {
|
fun testCeil() = DoubleTensorAlgebra {
|
||||||
assertTrue { tensor.ceil() eq expectedTensor(::ceil) }
|
assertTrue { ceil(tensor) eq expectedTensor(::ceil) }
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
fun testFloor() = DoubleTensorAlgebra {
|
fun testFloor() = DoubleTensorAlgebra {
|
||||||
assertTrue { tensor.floor() eq expectedTensor(::floor) }
|
assertTrue { floor(tensor) eq expectedTensor(::floor) }
|
||||||
}
|
}
|
||||||
|
|
||||||
val shape2 = ShapeND(2, 2)
|
val shape2 = ShapeND(2, 2)
|
||||||
@ -163,15 +163,15 @@ internal class TestDoubleAnalyticTensorAlgebra {
|
|||||||
|
|
||||||
@Test
|
@Test
|
||||||
fun testMean() = DoubleTensorAlgebra {
|
fun testMean() = DoubleTensorAlgebra {
|
||||||
assertTrue { tensor2.mean() == 1.0 }
|
assertTrue { mean(tensor2) == 1.0 }
|
||||||
assertTrue {
|
assertTrue {
|
||||||
tensor2.mean(0, true) eq fromArray(
|
mean(tensor2, 0, true) eq fromArray(
|
||||||
ShapeND(1, 2),
|
ShapeND(1, 2),
|
||||||
doubleArrayOf(-1.0, 3.0)
|
doubleArrayOf(-1.0, 3.0)
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
assertTrue {
|
assertTrue {
|
||||||
tensor2.mean(1, false) eq fromArray(
|
mean(tensor2, 1, false) eq fromArray(
|
||||||
ShapeND(2),
|
ShapeND(2),
|
||||||
doubleArrayOf(1.5, 0.5)
|
doubleArrayOf(1.5, 0.5)
|
||||||
)
|
)
|
||||||
|
@ -35,7 +35,7 @@ internal class TestDoubleLinearOpsTensorAlgebra {
|
|||||||
-7.0
|
-7.0
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
val detTensor = tensor.detLU()
|
val detTensor = detLU(tensor)
|
||||||
|
|
||||||
assertTrue(detTensor.eq(expectedTensor))
|
assertTrue(detTensor.eq(expectedTensor))
|
||||||
|
|
||||||
@ -88,7 +88,7 @@ internal class TestDoubleLinearOpsTensorAlgebra {
|
|||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
val invTensor = tensor.invLU()
|
val invTensor = invLU(tensor)
|
||||||
assertTrue(invTensor.eq(expectedTensor))
|
assertTrue(invTensor.eq(expectedTensor))
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -111,7 +111,7 @@ internal class TestDoubleLinearOpsTensorAlgebra {
|
|||||||
|
|
||||||
val tensor = fromArray(shape, buffer)
|
val tensor = fromArray(shape, buffer)
|
||||||
|
|
||||||
val (q, r) = tensor.qr()
|
val (q, r) = qr(tensor)
|
||||||
|
|
||||||
assertTrue { q.shape contentEquals shape }
|
assertTrue { q.shape contentEquals shape }
|
||||||
assertTrue { r.shape contentEquals shape }
|
assertTrue { r.shape contentEquals shape }
|
||||||
@ -131,7 +131,7 @@ internal class TestDoubleLinearOpsTensorAlgebra {
|
|||||||
)
|
)
|
||||||
val tensor = fromArray(shape, buffer)
|
val tensor = fromArray(shape, buffer)
|
||||||
|
|
||||||
val (p, l, u) = tensor.lu()
|
val (p, l, u) = lu(tensor)
|
||||||
|
|
||||||
assertTrue { p.shape contentEquals shape }
|
assertTrue { p.shape contentEquals shape }
|
||||||
assertTrue { l.shape contentEquals shape }
|
assertTrue { l.shape contentEquals shape }
|
||||||
@ -146,7 +146,7 @@ internal class TestDoubleLinearOpsTensorAlgebra {
|
|||||||
val sigma = (tensor matmul tensor.transposed()) + diagonalEmbedding(
|
val sigma = (tensor matmul tensor.transposed()) + diagonalEmbedding(
|
||||||
fromArray(ShapeND(2, 5), DoubleArray(10) { 0.1 })
|
fromArray(ShapeND(2, 5), DoubleArray(10) { 0.1 })
|
||||||
)
|
)
|
||||||
val low = sigma.cholesky()
|
val low = cholesky(sigma)
|
||||||
val sigmChol = low matmul low.transposed()
|
val sigmChol = low matmul low.transposed()
|
||||||
assertTrue(sigma.eq(sigmChol))
|
assertTrue(sigma.eq(sigmChol))
|
||||||
}
|
}
|
||||||
@ -171,7 +171,7 @@ internal class TestDoubleLinearOpsTensorAlgebra {
|
|||||||
@Test
|
@Test
|
||||||
fun testBatchedSVD() = DoubleTensorAlgebra {
|
fun testBatchedSVD() = DoubleTensorAlgebra {
|
||||||
val tensor = randomNormal(ShapeND(2, 5, 3), 0)
|
val tensor = randomNormal(ShapeND(2, 5, 3), 0)
|
||||||
val (tensorU, tensorS, tensorV) = tensor.svd()
|
val (tensorU, tensorS, tensorV) = svd(tensor)
|
||||||
val tensorSVD = tensorU matmul (diagonalEmbedding(tensorS) matmul tensorV.transposed())
|
val tensorSVD = tensorU matmul (diagonalEmbedding(tensorS) matmul tensorV.transposed())
|
||||||
assertTrue(tensor.eq(tensorSVD))
|
assertTrue(tensor.eq(tensorSVD))
|
||||||
}
|
}
|
||||||
@ -180,7 +180,7 @@ internal class TestDoubleLinearOpsTensorAlgebra {
|
|||||||
fun testBatchedSymEig() = DoubleTensorAlgebra {
|
fun testBatchedSymEig() = DoubleTensorAlgebra {
|
||||||
val tensor = randomNormal(shape = ShapeND(2, 3, 3), 0)
|
val tensor = randomNormal(shape = ShapeND(2, 3, 3), 0)
|
||||||
val tensorSigma = tensor + tensor.transposed()
|
val tensorSigma = tensor + tensor.transposed()
|
||||||
val (tensorS, tensorV) = tensorSigma.symEig()
|
val (tensorS, tensorV) = symEig(tensorSigma)
|
||||||
val tensorSigmaCalc = tensorV matmul (diagonalEmbedding(tensorS) matmul tensorV.transposed())
|
val tensorSigmaCalc = tensorV matmul (diagonalEmbedding(tensorS) matmul tensorV.transposed())
|
||||||
assertTrue(tensorSigma.eq(tensorSigmaCalc))
|
assertTrue(tensorSigma.eq(tensorSigmaCalc))
|
||||||
}
|
}
|
||||||
@ -190,7 +190,7 @@ internal class TestDoubleLinearOpsTensorAlgebra {
|
|||||||
|
|
||||||
|
|
||||||
private fun DoubleTensorAlgebra.testSVDFor(tensor: DoubleTensor, epsilon: Double = 1e-10) {
|
private fun DoubleTensorAlgebra.testSVDFor(tensor: DoubleTensor, epsilon: Double = 1e-10) {
|
||||||
val svd = tensor.svd()
|
val svd = svd(tensor)
|
||||||
|
|
||||||
val tensorSVD = svd.first
|
val tensorSVD = svd.first
|
||||||
.dot(
|
.dot(
|
||||||
|
Loading…
Reference in New Issue
Block a user