forked from kscience/kmath
[WIP] optimization with QOW
This commit is contained in:
parent
7f32348e7a
commit
95c0b2d3f0
2
.gitignore
vendored
2
.gitignore
vendored
@ -18,3 +18,5 @@ out/
|
||||
# Generated by javac -h and runtime
|
||||
*.class
|
||||
*.log
|
||||
|
||||
/kmath-ejml/src/main/kotlin/space/kscience/kmath/ejml/_generated.kt
|
||||
|
@ -14,6 +14,7 @@
|
||||
- Jupyter Notebook integration module (kmath-jupyter)
|
||||
- `@PerformancePitfall` annotation to mark possibly slow API
|
||||
- BigInt operation performance improvement and fixes by @zhelenskiy (#328)
|
||||
- Unified architecture for Integration and Optimization using features.
|
||||
|
||||
### Changed
|
||||
- Exponential operations merged with hyperbolic functions
|
||||
@ -35,6 +36,8 @@
|
||||
- Remove Any restriction on polynomials
|
||||
- Add `out` variance to type parameters of `StructureND` and its implementations where possible
|
||||
- Rename `DifferentiableMstExpression` to `KotlingradExpression`
|
||||
- `FeatureSet` now accepts only `Feature`. It is possible to override keys and use interfaces.
|
||||
- Use `Symbol` factory function instead of `StringSymbol`
|
||||
|
||||
### Deprecated
|
||||
|
||||
|
@ -23,8 +23,8 @@ internal class DotBenchmark {
|
||||
const val dim = 1000
|
||||
|
||||
//creating invertible matrix
|
||||
val matrix1 = LinearSpace.real.buildMatrix(dim, dim) { i, j -> if (i <= j) random.nextDouble() else 0.0 }
|
||||
val matrix2 = LinearSpace.real.buildMatrix(dim, dim) { i, j -> if (i <= j) random.nextDouble() else 0.0 }
|
||||
val matrix1 = LinearSpace.double.buildMatrix(dim, dim) { i, j -> if (i <= j) random.nextDouble() else 0.0 }
|
||||
val matrix2 = LinearSpace.double.buildMatrix(dim, dim) { i, j -> if (i <= j) random.nextDouble() else 0.0 }
|
||||
|
||||
val cmMatrix1 = CMLinearSpace { matrix1.toCM() }
|
||||
val cmMatrix2 = CMLinearSpace { matrix2.toCM() }
|
||||
@ -63,7 +63,7 @@ internal class DotBenchmark {
|
||||
|
||||
@Benchmark
|
||||
fun realDot(blackhole: Blackhole) {
|
||||
LinearSpace.real {
|
||||
LinearSpace.double {
|
||||
blackhole.consume(matrix1 dot matrix2)
|
||||
}
|
||||
}
|
||||
|
@ -25,7 +25,7 @@ internal class MatrixInverseBenchmark {
|
||||
private val random = Random(1224)
|
||||
private const val dim = 100
|
||||
|
||||
private val space = LinearSpace.real
|
||||
private val space = LinearSpace.double
|
||||
|
||||
//creating invertible matrix
|
||||
private val u = space.buildMatrix(dim, dim) { i, j -> if (i <= j) random.nextDouble() else 0.0 }
|
||||
@ -35,7 +35,7 @@ internal class MatrixInverseBenchmark {
|
||||
|
||||
@Benchmark
|
||||
fun kmathLupInversion(blackhole: Blackhole) {
|
||||
blackhole.consume(LinearSpace.real.inverseWithLup(matrix))
|
||||
blackhole.consume(LinearSpace.double.inverseWithLup(matrix))
|
||||
}
|
||||
|
||||
@Benchmark
|
||||
|
@ -203,7 +203,7 @@ public object EjmlLinearSpace${ops} : EjmlLinearSpace<${type}, ${kmathAlgebra},
|
||||
public override fun ${type}.times(v: Point<${type}>): Ejml${type}Vector<${ejmlMatrixType}> = v * this
|
||||
|
||||
@UnstableKMathAPI
|
||||
public override fun <F : StructureFeature> getFeature(structure: Matrix<${type}>, type: KClass<out F>): F? {
|
||||
public override fun <F : StructureFeature> computeFeature(structure: Matrix<${type}>, type: KClass<out F>): F? {
|
||||
structure.getFeature(type)?.let { return it }
|
||||
val origin = structure.toEjml().origin
|
||||
|
||||
|
@ -9,7 +9,7 @@ import space.kscience.kmath.asm.compileToExpression
|
||||
import space.kscience.kmath.expressions.derivative
|
||||
import space.kscience.kmath.expressions.invoke
|
||||
import space.kscience.kmath.expressions.symbol
|
||||
import space.kscience.kmath.kotlingrad.toDiffExpression
|
||||
import space.kscience.kmath.kotlingrad.toKotlingradExpression
|
||||
import space.kscience.kmath.operations.DoubleField
|
||||
|
||||
/**
|
||||
@ -20,7 +20,7 @@ fun main() {
|
||||
val x by symbol
|
||||
|
||||
val actualDerivative = "x^2-4*x-44".parseMath()
|
||||
.toDiffExpression(DoubleField)
|
||||
.toKotlingradExpression(DoubleField)
|
||||
.derivative(x)
|
||||
|
||||
|
||||
|
@ -17,7 +17,7 @@ import com.github.h0tk3y.betterParse.lexer.regexToken
|
||||
import com.github.h0tk3y.betterParse.parser.ParseResult
|
||||
import com.github.h0tk3y.betterParse.parser.Parser
|
||||
import space.kscience.kmath.expressions.MST
|
||||
import space.kscience.kmath.expressions.StringSymbol
|
||||
import space.kscience.kmath.expressions.Symbol
|
||||
import space.kscience.kmath.operations.FieldOperations
|
||||
import space.kscience.kmath.operations.GroupOperations
|
||||
import space.kscience.kmath.operations.PowerOperations
|
||||
@ -43,7 +43,7 @@ public object ArithmeticsEvaluator : Grammar<MST>() {
|
||||
private val ws: Token by regexToken("\\s+".toRegex(), ignore = true)
|
||||
|
||||
private val number: Parser<MST> by num use { MST.Numeric(text.toDouble()) }
|
||||
private val singular: Parser<MST> by id use { StringSymbol(text) }
|
||||
private val singular: Parser<MST> by id use { Symbol(text) }
|
||||
|
||||
private val unaryFunction: Parser<MST> by (id and -lpar and parser(ArithmeticsEvaluator::subSumChain) and -rpar)
|
||||
.map { (id, term) -> MST.Unary(id.text, term) }
|
||||
|
@ -7,7 +7,6 @@
|
||||
|
||||
package space.kscience.kmath.asm.internal
|
||||
|
||||
import space.kscience.kmath.expressions.StringSymbol
|
||||
import space.kscience.kmath.expressions.Symbol
|
||||
|
||||
/**
|
||||
@ -15,4 +14,4 @@ import space.kscience.kmath.expressions.Symbol
|
||||
*
|
||||
* @author Iaroslav Postovalov
|
||||
*/
|
||||
internal fun <V> Map<Symbol, V>.getOrFail(key: String): V = getValue(StringSymbol(key))
|
||||
internal fun <V> Map<Symbol, V>.getOrFail(key: String): V = getValue(Symbol(key))
|
||||
|
@ -103,12 +103,12 @@ public class DerivativeStructureField(
|
||||
public override operator fun DerivativeStructure.minus(b: Number): DerivativeStructure = subtract(b.toDouble())
|
||||
public override operator fun Number.plus(b: DerivativeStructure): DerivativeStructure = b + this
|
||||
public override operator fun Number.minus(b: DerivativeStructure): DerivativeStructure = b - this
|
||||
}
|
||||
|
||||
public companion object :
|
||||
AutoDiffProcessor<Double, DerivativeStructure, DerivativeStructureField, Expression<Double>> {
|
||||
public override fun process(function: DerivativeStructureField.() -> DerivativeStructure): DifferentiableExpression<Double> =
|
||||
DerivativeStructureExpression(function)
|
||||
}
|
||||
public object DSProcessor : AutoDiffProcessor<Double, DerivativeStructure, DerivativeStructureField> {
|
||||
public override fun differentiate(
|
||||
function: DerivativeStructureField.() -> DerivativeStructure,
|
||||
): DerivativeStructureExpression = DerivativeStructureExpression(function)
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -16,7 +16,7 @@ public class CMGaussRuleIntegrator(
|
||||
private var type: GaussRule = GaussRule.LEGANDRE,
|
||||
) : UnivariateIntegrator<Double> {
|
||||
|
||||
override fun integrate(integrand: UnivariateIntegrand<Double>): UnivariateIntegrand<Double> {
|
||||
override fun process(integrand: UnivariateIntegrand<Double>): UnivariateIntegrand<Double> {
|
||||
val range = integrand.getFeature<IntegrationRange>()?.range
|
||||
?: error("Integration range is not provided")
|
||||
val integrator: GaussIntegrator = getIntegrator(range)
|
||||
@ -76,8 +76,8 @@ public class CMGaussRuleIntegrator(
|
||||
numPoints: Int = 100,
|
||||
type: GaussRule = GaussRule.LEGANDRE,
|
||||
function: (Double) -> Double,
|
||||
): Double = CMGaussRuleIntegrator(numPoints, type).integrate(
|
||||
): Double = CMGaussRuleIntegrator(numPoints, type).process(
|
||||
UnivariateIntegrand(function, IntegrationRange(range))
|
||||
).valueOrNull!!
|
||||
).value
|
||||
}
|
||||
}
|
@ -18,7 +18,7 @@ public class CMIntegrator(
|
||||
public val integratorBuilder: (Integrand) -> org.apache.commons.math3.analysis.integration.UnivariateIntegrator,
|
||||
) : UnivariateIntegrator<Double> {
|
||||
|
||||
override fun integrate(integrand: UnivariateIntegrand<Double>): UnivariateIntegrand<Double> {
|
||||
override fun process(integrand: UnivariateIntegrand<Double>): UnivariateIntegrand<Double> {
|
||||
val integrator = integratorBuilder(integrand)
|
||||
val maxCalls = integrand.getFeature<IntegrandMaxCalls>()?.maxCalls ?: defaultMaxCalls
|
||||
val remainingCalls = maxCalls - integrand.calls
|
||||
|
@ -95,7 +95,7 @@ public object CMLinearSpace : LinearSpace<Double, DoubleField> {
|
||||
v * this
|
||||
|
||||
@UnstableKMathAPI
|
||||
override fun <F : StructureFeature> getFeature(structure: Matrix<Double>, type: KClass<out F>): F? {
|
||||
override fun <F : StructureFeature> computeFeature(structure: Matrix<Double>, type: KClass<out F>): F? {
|
||||
//Return the feature if it is intrinsic to the structure
|
||||
structure.getFeature(type)?.let { return it }
|
||||
|
||||
@ -109,22 +109,22 @@ public object CMLinearSpace : LinearSpace<Double, DoubleField> {
|
||||
LupDecompositionFeature<Double> {
|
||||
private val lup by lazy { LUDecomposition(origin) }
|
||||
override val determinant: Double by lazy { lup.determinant }
|
||||
override val l: Matrix<Double> by lazy { CMMatrix(lup.l) + LFeature }
|
||||
override val u: Matrix<Double> by lazy { CMMatrix(lup.u) + UFeature }
|
||||
override val l: Matrix<Double> by lazy<Matrix<Double>> { CMMatrix(lup.l).withFeature(LFeature) }
|
||||
override val u: Matrix<Double> by lazy<Matrix<Double>> { CMMatrix(lup.u).withFeature(UFeature) }
|
||||
override val p: Matrix<Double> by lazy { CMMatrix(lup.p) }
|
||||
}
|
||||
|
||||
CholeskyDecompositionFeature::class -> object : CholeskyDecompositionFeature<Double> {
|
||||
override val l: Matrix<Double> by lazy {
|
||||
override val l: Matrix<Double> by lazy<Matrix<Double>> {
|
||||
val cholesky = CholeskyDecomposition(origin)
|
||||
CMMatrix(cholesky.l) + LFeature
|
||||
CMMatrix(cholesky.l).withFeature(LFeature)
|
||||
}
|
||||
}
|
||||
|
||||
QRDecompositionFeature::class -> object : QRDecompositionFeature<Double> {
|
||||
private val qr by lazy { QRDecomposition(origin) }
|
||||
override val q: Matrix<Double> by lazy { CMMatrix(qr.q) + OrthogonalFeature }
|
||||
override val r: Matrix<Double> by lazy { CMMatrix(qr.r) + UFeature }
|
||||
override val q: Matrix<Double> by lazy<Matrix<Double>> { CMMatrix(qr.q).withFeature(OrthogonalFeature) }
|
||||
override val r: Matrix<Double> by lazy<Matrix<Double>> { CMMatrix(qr.r).withFeature(UFeature) }
|
||||
}
|
||||
|
||||
SingularValueDecompositionFeature::class -> object : SingularValueDecompositionFeature<Double> {
|
||||
|
@ -6,6 +6,7 @@
|
||||
package space.kscience.kmath.commons.linear
|
||||
|
||||
import org.apache.commons.math3.linear.*
|
||||
import space.kscience.kmath.linear.LinearSolver
|
||||
import space.kscience.kmath.linear.Matrix
|
||||
import space.kscience.kmath.linear.Point
|
||||
|
||||
@ -44,3 +45,12 @@ public fun CMLinearSpace.inverse(
|
||||
a: Matrix<Double>,
|
||||
decomposition: CMDecomposition = CMDecomposition.LUP,
|
||||
): CMMatrix = solver(a, decomposition).inverse.wrap()
|
||||
|
||||
|
||||
public fun CMLinearSpace.solver(decomposition: CMDecomposition): LinearSolver<Double> = object : LinearSolver<Double> {
|
||||
override fun solve(a: Matrix<Double>, b: Matrix<Double>): Matrix<Double> = solve(a, b, decomposition)
|
||||
|
||||
override fun solve(a: Matrix<Double>, b: Point<Double>): Point<Double> = solve(a, b, decomposition)
|
||||
|
||||
override fun inverse(matrix: Matrix<Double>): Matrix<Double> = inverse(matrix, decomposition)
|
||||
}
|
@ -2,7 +2,7 @@
|
||||
* Copyright 2018-2021 KMath contributors.
|
||||
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
|
||||
*/
|
||||
|
||||
@file:OptIn(UnstableKMathAPI::class)
|
||||
package space.kscience.kmath.commons.optimization
|
||||
|
||||
import org.apache.commons.math3.optim.*
|
||||
@ -11,6 +11,10 @@ import org.apache.commons.math3.optim.nonlinear.scalar.MultivariateOptimizer
|
||||
import org.apache.commons.math3.optim.nonlinear.scalar.ObjectiveFunction
|
||||
import org.apache.commons.math3.optim.nonlinear.scalar.ObjectiveFunctionGradient
|
||||
import org.apache.commons.math3.optim.nonlinear.scalar.gradient.NonLinearConjugateGradientOptimizer
|
||||
import org.apache.commons.math3.optim.nonlinear.scalar.noderiv.NelderMeadSimplex
|
||||
import org.apache.commons.math3.optim.nonlinear.scalar.noderiv.SimplexOptimizer
|
||||
import space.kscience.kmath.expressions.Symbol
|
||||
import space.kscience.kmath.expressions.SymbolIndexer
|
||||
import space.kscience.kmath.expressions.derivative
|
||||
import space.kscience.kmath.expressions.withSymbols
|
||||
import space.kscience.kmath.misc.UnstableKMathAPI
|
||||
@ -21,29 +25,61 @@ import kotlin.reflect.KClass
|
||||
public operator fun PointValuePair.component1(): DoubleArray = point
|
||||
public operator fun PointValuePair.component2(): Double = value
|
||||
|
||||
public class CMOptimizer(public val optimizerBuilder: () -> MultivariateOptimizer): OptimizationFeature{
|
||||
public class CMOptimizerEngine(public val optimizerBuilder: () -> MultivariateOptimizer) : OptimizationFeature {
|
||||
override fun toString(): String = "CMOptimizer($optimizerBuilder)"
|
||||
}
|
||||
|
||||
public class CMOptimizerData(public val data: List<OptimizationData>) : OptimizationFeature {
|
||||
public constructor(vararg data: OptimizationData) : this(data.toList())
|
||||
/**
|
||||
* Specify a Commons-maths optimization engine
|
||||
*/
|
||||
public fun FunctionOptimizationBuilder<Double>.cmEngine(optimizerBuilder: () -> MultivariateOptimizer) {
|
||||
addFeature(CMOptimizerEngine(optimizerBuilder))
|
||||
}
|
||||
|
||||
public class CMOptimizerData(public val data: List<SymbolIndexer.() -> OptimizationData>) : OptimizationFeature {
|
||||
public constructor(vararg data: (SymbolIndexer.() -> OptimizationData)) : this(data.toList())
|
||||
|
||||
override fun toString(): String = "CMOptimizerData($data)"
|
||||
}
|
||||
|
||||
/**
|
||||
* Specify Commons-maths optimization data.
|
||||
*/
|
||||
public fun FunctionOptimizationBuilder<Double>.cmOptimizationData(data: SymbolIndexer.() -> OptimizationData) {
|
||||
updateFeature<CMOptimizerData> {
|
||||
val newData = (it?.data ?: emptyList()) + data
|
||||
CMOptimizerData(newData)
|
||||
}
|
||||
}
|
||||
|
||||
public fun FunctionOptimizationBuilder<Double>.simplexSteps(vararg steps: Pair<Symbol, Double>) {
|
||||
//TODO use convergence checker from features
|
||||
cmEngine { SimplexOptimizer(CMOptimizer.defaultConvergenceChecker) }
|
||||
cmOptimizationData { NelderMeadSimplex(mapOf(*steps).toDoubleArray()) }
|
||||
}
|
||||
|
||||
@OptIn(UnstableKMathAPI::class)
|
||||
public class CMOptimization : Optimizer<FunctionOptimization<Double>> {
|
||||
public object CMOptimizer : Optimizer<Double, FunctionOptimization<Double>> {
|
||||
|
||||
public const val DEFAULT_RELATIVE_TOLERANCE: Double = 1e-4
|
||||
public const val DEFAULT_ABSOLUTE_TOLERANCE: Double = 1e-4
|
||||
public const val DEFAULT_MAX_ITER: Int = 1000
|
||||
|
||||
public val defaultConvergenceChecker: SimpleValueChecker = SimpleValueChecker(
|
||||
DEFAULT_RELATIVE_TOLERANCE,
|
||||
DEFAULT_ABSOLUTE_TOLERANCE,
|
||||
DEFAULT_MAX_ITER
|
||||
)
|
||||
|
||||
|
||||
override suspend fun optimize(
|
||||
problem: FunctionOptimization<Double>,
|
||||
): FunctionOptimization<Double> {
|
||||
val startPoint = problem.getFeature<OptimizationStartPoint<Double>>()?.point
|
||||
?: error("Starting point not defined in $problem")
|
||||
val startPoint = problem.startPoint
|
||||
|
||||
val parameters = problem.getFeature<OptimizationParameters>()?.symbols
|
||||
?: problem.getFeature<OptimizationStartPoint<Double>>()?.point?.keys
|
||||
?:startPoint.keys
|
||||
?: startPoint.keys
|
||||
|
||||
|
||||
withSymbols(parameters) {
|
||||
@ -53,7 +89,7 @@ public class CMOptimization : Optimizer<FunctionOptimization<Double>> {
|
||||
DEFAULT_MAX_ITER
|
||||
)
|
||||
|
||||
val cmOptimizer: MultivariateOptimizer = problem.getFeature<CMOptimizer>()?.optimizerBuilder?.invoke()
|
||||
val cmOptimizer: MultivariateOptimizer = problem.getFeature<CMOptimizerEngine>()?.optimizerBuilder?.invoke()
|
||||
?: NonLinearConjugateGradientOptimizer(
|
||||
NonLinearConjugateGradientOptimizer.Formula.FLETCHER_REEVES,
|
||||
convergenceChecker
|
||||
@ -68,7 +104,7 @@ public class CMOptimization : Optimizer<FunctionOptimization<Double>> {
|
||||
addOptimizationData(MaxEval.unlimited())
|
||||
addOptimizationData(InitialGuess(startPoint.toDoubleArray()))
|
||||
|
||||
fun exportOptimizationData(): List<OptimizationData> = optimizationData.values.toList()
|
||||
//fun exportOptimizationData(): List<OptimizationData> = optimizationData.values.toList()
|
||||
|
||||
val objectiveFunction = ObjectiveFunction {
|
||||
val args = startPoint + it.toMap()
|
||||
@ -88,7 +124,9 @@ public class CMOptimization : Optimizer<FunctionOptimization<Double>> {
|
||||
|
||||
for (feature in problem.features) {
|
||||
when (feature) {
|
||||
is CMOptimizerData -> feature.data.forEach { addOptimizationData(it) }
|
||||
is CMOptimizerData -> feature.data.forEach { dataBuilder ->
|
||||
addOptimizationData(dataBuilder())
|
||||
}
|
||||
is FunctionOptimizationTarget -> when (feature) {
|
||||
FunctionOptimizationTarget.MAXIMIZE -> addOptimizationData(GoalType.MAXIMIZE)
|
||||
FunctionOptimizationTarget.MINIMIZE -> addOptimizationData(GoalType.MINIMIZE)
|
||||
@ -101,10 +139,4 @@ public class CMOptimization : Optimizer<FunctionOptimization<Double>> {
|
||||
return problem.withFeatures(OptimizationResult(point.toMap()), OptimizationValue(value))
|
||||
}
|
||||
}
|
||||
|
||||
public companion object {
|
||||
public const val DEFAULT_RELATIVE_TOLERANCE: Double = 1e-4
|
||||
public const val DEFAULT_ABSOLUTE_TOLERANCE: Double = 1e-4
|
||||
public const val DEFAULT_MAX_ITER: Int = 1000
|
||||
}
|
||||
}
|
@ -1,75 +0,0 @@
|
||||
/*
|
||||
* Copyright 2018-2021 KMath contributors.
|
||||
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
|
||||
*/
|
||||
|
||||
package space.kscience.kmath.commons.optimization
|
||||
|
||||
import org.apache.commons.math3.analysis.differentiation.DerivativeStructure
|
||||
import space.kscience.kmath.commons.expressions.DerivativeStructureField
|
||||
import space.kscience.kmath.expressions.DifferentiableExpression
|
||||
import space.kscience.kmath.expressions.Expression
|
||||
import space.kscience.kmath.expressions.Symbol
|
||||
import space.kscience.kmath.optimization.*
|
||||
import space.kscience.kmath.structures.Buffer
|
||||
import space.kscience.kmath.structures.asBuffer
|
||||
|
||||
/**
|
||||
* Generate a chi squared expression from given x-y-sigma data and inline model. Provides automatic differentiation
|
||||
*/
|
||||
public fun FunctionOptimization.Companion.chiSquaredExpression(
|
||||
x: Buffer<Double>,
|
||||
y: Buffer<Double>,
|
||||
yErr: Buffer<Double>,
|
||||
model: DerivativeStructureField.(x: DerivativeStructure) -> DerivativeStructure,
|
||||
): DifferentiableExpression<Double> = chiSquaredExpression(DerivativeStructureField, x, y, yErr, model)
|
||||
|
||||
/**
|
||||
* Generate a chi squared expression from given x-y-sigma data and inline model. Provides automatic differentiation
|
||||
*/
|
||||
public fun FunctionOptimization.Companion.chiSquaredExpression(
|
||||
x: Iterable<Double>,
|
||||
y: Iterable<Double>,
|
||||
yErr: Iterable<Double>,
|
||||
model: DerivativeStructureField.(x: DerivativeStructure) -> DerivativeStructure,
|
||||
): DifferentiableExpression<Double> = chiSquaredExpression(
|
||||
DerivativeStructureField,
|
||||
x.toList().asBuffer(),
|
||||
y.toList().asBuffer(),
|
||||
yErr.toList().asBuffer(),
|
||||
model
|
||||
)
|
||||
|
||||
/**
|
||||
* Optimize expression without derivatives
|
||||
*/
|
||||
public suspend fun Expression<Double>.optimize(
|
||||
vararg symbols: Symbol,
|
||||
configuration: CMOptimization.() -> Unit,
|
||||
): OptimizationResult<Double> {
|
||||
require(symbols.isNotEmpty()) { "Must provide a list of symbols for optimization" }
|
||||
val problem = CMOptimization(symbols.toList(), configuration)
|
||||
problem.noDerivFunction(this)
|
||||
return problem.optimize()
|
||||
}
|
||||
|
||||
/**
|
||||
* Optimize differentiable expression
|
||||
*/
|
||||
public suspend fun DifferentiableExpression<Double>.optimize(
|
||||
vararg symbols: Symbol,
|
||||
configuration: CMOptimization.() -> Unit,
|
||||
): OptimizationResult<Double> = optimizeWith(CMOptimization, symbols = symbols, configuration)
|
||||
|
||||
public suspend fun DifferentiableExpression<Double>.minimize(
|
||||
vararg startPoint: Pair<Symbol, Double>,
|
||||
configuration: CMOptimization.() -> Unit = {},
|
||||
): OptimizationResult<Double> {
|
||||
val symbols = startPoint.map { it.first }.toTypedArray()
|
||||
return optimize(*symbols){
|
||||
maximize = false
|
||||
initialGuess(startPoint.toMap())
|
||||
function(this@minimize)
|
||||
configuration()
|
||||
}
|
||||
}
|
@ -6,43 +6,38 @@
|
||||
package space.kscience.kmath.commons.optimization
|
||||
|
||||
import kotlinx.coroutines.runBlocking
|
||||
import space.kscience.kmath.commons.expressions.DSProcessor
|
||||
import space.kscience.kmath.commons.expressions.DerivativeStructureExpression
|
||||
import space.kscience.kmath.distributions.NormalDistribution
|
||||
import space.kscience.kmath.expressions.Symbol.Companion.x
|
||||
import space.kscience.kmath.expressions.Symbol.Companion.y
|
||||
import space.kscience.kmath.expressions.symbol
|
||||
import space.kscience.kmath.optimization.FunctionOptimization
|
||||
import space.kscience.kmath.optimization.*
|
||||
import space.kscience.kmath.stat.RandomGenerator
|
||||
import kotlin.math.pow
|
||||
import kotlin.test.Test
|
||||
|
||||
internal class OptimizeTest {
|
||||
val x by symbol
|
||||
val y by symbol
|
||||
|
||||
val normal = DerivativeStructureExpression {
|
||||
exp(-bindSymbol(x).pow(2) / 2) + exp(-bindSymbol(y)
|
||||
.pow(2) / 2)
|
||||
exp(-bindSymbol(x).pow(2) / 2) + exp(-bindSymbol(y).pow(2) / 2)
|
||||
}
|
||||
|
||||
@Test
|
||||
fun testGradientOptimization() = runBlocking{
|
||||
val result = normal.optimize(x, y) {
|
||||
initialGuess(x to 1.0, y to 1.0)
|
||||
//no need to select optimizer. Gradient optimizer is used by default because gradients are provided by function
|
||||
}
|
||||
println(result.point)
|
||||
println(result.value)
|
||||
fun testGradientOptimization() = runBlocking {
|
||||
val result = normal.optimizeWith(CMOptimizer, x to 1.0, y to 1.0)
|
||||
println(result.resultPoint)
|
||||
println(result.resultValue)
|
||||
}
|
||||
|
||||
@Test
|
||||
fun testSimplexOptimization() = runBlocking{
|
||||
val result = normal.optimize(x, y) {
|
||||
initialGuess(x to 1.0, y to 1.0)
|
||||
fun testSimplexOptimization() = runBlocking {
|
||||
val result = normal.optimizeWith(CMOptimizer, x to 1.0, y to 1.0) {
|
||||
simplexSteps(x to 2.0, y to 0.5)
|
||||
//this sets simplex optimizer
|
||||
}
|
||||
|
||||
println(result.point)
|
||||
println(result.value)
|
||||
println(result.resultPoint)
|
||||
println(result.resultValue)
|
||||
}
|
||||
|
||||
@Test
|
||||
@ -62,6 +57,11 @@ internal class OptimizeTest {
|
||||
|
||||
val yErr = List(x.size) { sigma }
|
||||
|
||||
val model = DSProcessor.differentiate { x1 ->
|
||||
val cWithDefault = bindSymbolOrNull(c) ?: one
|
||||
bindSymbol(a) * x1.pow(2) + bindSymbol(b) * x1 + cWithDefault
|
||||
}
|
||||
|
||||
val chi2 = FunctionOptimization.chiSquared(x, y, yErr) { x1 ->
|
||||
val cWithDefault = bindSymbolOrNull(c) ?: one
|
||||
bindSymbol(a) * x1.pow(2) + bindSymbol(b) * x1 + cWithDefault
|
||||
|
@ -25,6 +25,9 @@ public interface ColumnarData<out T> {
|
||||
public operator fun get(symbol: Symbol): Buffer<T>?
|
||||
}
|
||||
|
||||
@UnstableKMathAPI
|
||||
public val ColumnarData<*>.indices: IntRange get() = 0 until size
|
||||
|
||||
/**
|
||||
* A zero-copy method to represent a [Structure2D] as a two-column x-y data.
|
||||
* There could more than two columns in the structure.
|
||||
|
@ -5,6 +5,8 @@
|
||||
|
||||
package space.kscience.kmath.expressions
|
||||
|
||||
import space.kscience.kmath.operations.Algebra
|
||||
|
||||
/**
|
||||
* Represents expression which structure can be differentiated.
|
||||
*
|
||||
@ -63,7 +65,10 @@ public abstract class FirstDerivativeExpression<T> : DifferentiableExpression<T>
|
||||
|
||||
/**
|
||||
* A factory that converts an expression in autodiff variables to a [DifferentiableExpression]
|
||||
* @param T type of the constants for the expression
|
||||
* @param I type of the actual expression state
|
||||
* @param A type of expression algebra
|
||||
*/
|
||||
public fun interface AutoDiffProcessor<T, I, A : ExpressionAlgebra<T, I>, out R : Expression<T>> {
|
||||
public fun process(function: A.() -> I): DifferentiableExpression<T>
|
||||
public fun interface AutoDiffProcessor<T, I, A : Algebra<I>> {
|
||||
public fun differentiate(function: A.() -> I): DifferentiableExpression<T>
|
||||
}
|
||||
|
@ -5,8 +5,6 @@
|
||||
|
||||
package space.kscience.kmath.linear
|
||||
|
||||
import space.kscience.kmath.nd.as1D
|
||||
|
||||
/**
|
||||
* A group of methods to solve for *X* in equation *X = A <sup>-1</sup> · B*, where *A* and *B* are matrices or
|
||||
* vectors.
|
||||
@ -30,20 +28,3 @@ public interface LinearSolver<T : Any> {
|
||||
public fun inverse(matrix: Matrix<T>): Matrix<T>
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert matrix to vector if it is possible.
|
||||
*/
|
||||
public fun <T : Any> Matrix<T>.asVector(): Point<T> =
|
||||
if (this.colNum == 1)
|
||||
as1D()
|
||||
else
|
||||
error("Can't convert matrix with more than one column to vector")
|
||||
|
||||
/**
|
||||
* Creates an n × 1 [VirtualMatrix], where n is the size of the given buffer.
|
||||
*
|
||||
* @param T the type of elements contained in the buffer.
|
||||
* @receiver a buffer.
|
||||
* @return the new matrix.
|
||||
*/
|
||||
public fun <T : Any> Point<T>.asMatrix(): VirtualMatrix<T> = VirtualMatrix(size, 1) { i, _ -> get(i) }
|
||||
|
@ -164,7 +164,7 @@ public interface LinearSpace<T : Any, out A : Ring<T>> {
|
||||
public operator fun T.times(v: Point<T>): Point<T> = v * this
|
||||
|
||||
/**
|
||||
* Get a feature of the structure in this scope. Structure features take precedence other context features
|
||||
* Compute a feature of the structure in this scope. Structure features take precedence other context features
|
||||
*
|
||||
* @param F the type of feature.
|
||||
* @param structure the structure.
|
||||
@ -172,7 +172,7 @@ public interface LinearSpace<T : Any, out A : Ring<T>> {
|
||||
* @return a feature object or `null` if it isn't present.
|
||||
*/
|
||||
@UnstableKMathAPI
|
||||
public fun <F : StructureFeature> getFeature(structure: Matrix<T>, type: KClass<out F>): F? = structure.getFeature(type)
|
||||
public fun <F : StructureFeature> computeFeature(structure: Matrix<T>, type: KClass<out F>): F? = structure.getFeature(type)
|
||||
|
||||
public companion object {
|
||||
|
||||
@ -184,7 +184,7 @@ public interface LinearSpace<T : Any, out A : Ring<T>> {
|
||||
bufferFactory: BufferFactory<T> = Buffer.Companion::boxing,
|
||||
): LinearSpace<T, A> = BufferedLinearSpace(algebra, bufferFactory)
|
||||
|
||||
public val real: LinearSpace<Double, DoubleField> = buffered(DoubleField, ::DoubleBuffer)
|
||||
public val double: LinearSpace<Double, DoubleField> = buffered(DoubleField, ::DoubleBuffer)
|
||||
|
||||
/**
|
||||
* Automatic buffered matrix, unboxed if it is possible
|
||||
@ -202,9 +202,27 @@ public interface LinearSpace<T : Any, out A : Ring<T>> {
|
||||
* @return a feature object or `null` if it isn't present.
|
||||
*/
|
||||
@UnstableKMathAPI
|
||||
public inline fun <T : Any, reified F : StructureFeature> LinearSpace<T, *>.getFeature(structure: Matrix<T>): F? =
|
||||
getFeature(structure, F::class)
|
||||
public inline fun <T : Any, reified F : StructureFeature> LinearSpace<T, *>.computeFeature(structure: Matrix<T>): F? =
|
||||
computeFeature(structure, F::class)
|
||||
|
||||
|
||||
public operator fun <LS : LinearSpace<*, *>, R> LS.invoke(block: LS.() -> R): R = run(block)
|
||||
public inline operator fun <LS : LinearSpace<*, *>, R> LS.invoke(block: LS.() -> R): R = run(block)
|
||||
|
||||
|
||||
/**
|
||||
* Convert matrix to vector if it is possible.
|
||||
*/
|
||||
public fun <T : Any> Matrix<T>.asVector(): Point<T> =
|
||||
if (this.colNum == 1)
|
||||
as1D()
|
||||
else
|
||||
error("Can't convert matrix with more than one column to vector")
|
||||
|
||||
/**
|
||||
* Creates an n × 1 [VirtualMatrix], where n is the size of the given buffer.
|
||||
*
|
||||
* @param T the type of elements contained in the buffer.
|
||||
* @receiver a buffer.
|
||||
* @return the new matrix.
|
||||
*/
|
||||
public fun <T : Any> Point<T>.asMatrix(): VirtualMatrix<T> = VirtualMatrix(size, 1) { i, _ -> get(i) }
|
@ -5,6 +5,7 @@
|
||||
|
||||
package space.kscience.kmath.linear
|
||||
|
||||
import space.kscience.kmath.misc.PerformancePitfall
|
||||
import space.kscience.kmath.misc.UnstableKMathAPI
|
||||
import space.kscience.kmath.nd.getFeature
|
||||
import space.kscience.kmath.operations.*
|
||||
@ -34,7 +35,7 @@ public class LupDecomposition<T : Any>(
|
||||
j == i -> elementContext.one
|
||||
else -> elementContext.zero
|
||||
}
|
||||
} + LFeature
|
||||
}.withFeature(LFeature)
|
||||
|
||||
|
||||
/**
|
||||
@ -44,7 +45,7 @@ public class LupDecomposition<T : Any>(
|
||||
*/
|
||||
override val u: Matrix<T> = VirtualMatrix(lu.shape[0], lu.shape[1]) { i, j ->
|
||||
if (j >= i) lu[i, j] else elementContext.zero
|
||||
} + UFeature
|
||||
}.withFeature(UFeature)
|
||||
|
||||
/**
|
||||
* Returns the P rows permutation matrix.
|
||||
@ -82,7 +83,7 @@ public fun <T : Comparable<T>> LinearSpace<T, Field<T>>.lup(
|
||||
val m = matrix.colNum
|
||||
val pivot = IntArray(matrix.rowNum)
|
||||
|
||||
//TODO just waits for KEEP-176
|
||||
//TODO just waits for multi-receivers
|
||||
BufferAccessor2D(matrix.rowNum, matrix.colNum, factory).run {
|
||||
elementAlgebra {
|
||||
val lu = create(matrix)
|
||||
@ -156,10 +157,13 @@ public inline fun <reified T : Comparable<T>> LinearSpace<T, Field<T>>.lup(
|
||||
noinline checkSingular: (T) -> Boolean,
|
||||
): LupDecomposition<T> = lup(MutableBuffer.Companion::auto, matrix, checkSingular)
|
||||
|
||||
public fun LinearSpace<Double, DoubleField>.lup(matrix: Matrix<Double>): LupDecomposition<Double> =
|
||||
lup(::DoubleBuffer, matrix) { it < 1e-11 }
|
||||
public fun LinearSpace<Double, DoubleField>.lup(
|
||||
matrix: Matrix<Double>,
|
||||
singularityThreshold: Double = 1e-11,
|
||||
): LupDecomposition<Double> =
|
||||
lup(::DoubleBuffer, matrix) { it < singularityThreshold }
|
||||
|
||||
public fun <T : Any> LupDecomposition<T>.solveWithLup(
|
||||
internal fun <T : Any> LupDecomposition<T>.solve(
|
||||
factory: MutableBufferFactory<T>,
|
||||
matrix: Matrix<T>,
|
||||
): Matrix<T> {
|
||||
@ -207,41 +211,24 @@ public fun <T : Any> LupDecomposition<T>.solveWithLup(
|
||||
}
|
||||
}
|
||||
|
||||
public inline fun <reified T : Any> LupDecomposition<T>.solveWithLup(matrix: Matrix<T>): Matrix<T> =
|
||||
solveWithLup(MutableBuffer.Companion::auto, matrix)
|
||||
|
||||
/**
|
||||
* Solves a system of linear equations *ax = b** using LUP decomposition.
|
||||
* Produce a generic solver based on LUP decomposition
|
||||
*/
|
||||
@PerformancePitfall()
|
||||
@OptIn(UnstableKMathAPI::class)
|
||||
public inline fun <reified T : Comparable<T>> LinearSpace<T, Field<T>>.solveWithLup(
|
||||
a: Matrix<T>,
|
||||
b: Matrix<T>,
|
||||
noinline bufferFactory: MutableBufferFactory<T> = MutableBuffer.Companion::auto,
|
||||
noinline checkSingular: (T) -> Boolean,
|
||||
): Matrix<T> {
|
||||
// Use existing decomposition if it is provided by matrix
|
||||
val decomposition = a.getFeature() ?: lup(bufferFactory, a, checkSingular)
|
||||
return decomposition.solveWithLup(bufferFactory, b)
|
||||
public fun <T : Comparable<T>, F : Field<T>> LinearSpace<T, F>.lupSolver(
|
||||
bufferFactory: MutableBufferFactory<T>,
|
||||
singularityCheck: (T) -> Boolean,
|
||||
): LinearSolver<T> = object : LinearSolver<T> {
|
||||
override fun solve(a: Matrix<T>, b: Matrix<T>): Matrix<T> {
|
||||
// Use existing decomposition if it is provided by matrix
|
||||
val decomposition = a.getFeature() ?: lup(bufferFactory, a, singularityCheck)
|
||||
return decomposition.solve(bufferFactory, b)
|
||||
}
|
||||
|
||||
override fun inverse(matrix: Matrix<T>): Matrix<T> = solve(matrix, one(matrix.rowNum, matrix.colNum))
|
||||
}
|
||||
|
||||
public inline fun <reified T : Comparable<T>> LinearSpace<T, Field<T>>.inverseWithLup(
|
||||
matrix: Matrix<T>,
|
||||
noinline bufferFactory: MutableBufferFactory<T> = MutableBuffer.Companion::auto,
|
||||
noinline checkSingular: (T) -> Boolean,
|
||||
): Matrix<T> = solveWithLup(matrix, one(matrix.rowNum, matrix.colNum), bufferFactory, checkSingular)
|
||||
|
||||
|
||||
@OptIn(UnstableKMathAPI::class)
|
||||
public fun LinearSpace<Double, DoubleField>.solveWithLup(a: Matrix<Double>, b: Matrix<Double>): Matrix<Double> {
|
||||
// Use existing decomposition if it is provided by matrix
|
||||
val bufferFactory: MutableBufferFactory<Double> = ::DoubleBuffer
|
||||
val decomposition: LupDecomposition<Double> = a.getFeature() ?: lup(bufferFactory, a) { it < 1e-11 }
|
||||
return decomposition.solveWithLup(bufferFactory, b)
|
||||
}
|
||||
|
||||
/**
|
||||
* Inverses a square matrix using LUP decomposition. Non square matrix will throw a error.
|
||||
*/
|
||||
public fun LinearSpace<Double, DoubleField>.inverseWithLup(matrix: Matrix<Double>): Matrix<Double> =
|
||||
solveWithLup(matrix, one(matrix.rowNum, matrix.colNum))
|
||||
@PerformancePitfall
|
||||
public fun LinearSpace<Double, DoubleField>.lupSolver(singularityThreshold: Double = 1e-11): LinearSolver<Double> =
|
||||
lupSolver(::DoubleBuffer) { it < singularityThreshold }
|
||||
|
@ -7,6 +7,8 @@ package space.kscience.kmath.linear
|
||||
|
||||
import space.kscience.kmath.misc.UnstableKMathAPI
|
||||
import space.kscience.kmath.operations.Ring
|
||||
import space.kscience.kmath.structures.BufferAccessor2D
|
||||
import space.kscience.kmath.structures.MutableBuffer
|
||||
|
||||
public class MatrixBuilder<T : Any, out A : Ring<T>>(
|
||||
public val linearSpace: LinearSpace<T, A>,
|
||||
@ -45,4 +47,31 @@ public inline fun <T : Any> LinearSpace<T, Ring<T>>.column(
|
||||
crossinline builder: (Int) -> T,
|
||||
): Matrix<T> = buildMatrix(size, 1) { i, _ -> builder(i) }
|
||||
|
||||
public fun <T : Any> LinearSpace<T, Ring<T>>.column(vararg values: T): Matrix<T> = column(values.size, values::get)
|
||||
public fun <T : Any> LinearSpace<T, Ring<T>>.column(vararg values: T): Matrix<T> = column(values.size, values::get)
|
||||
|
||||
public object SymmetricMatrixFeature : MatrixFeature
|
||||
|
||||
/**
|
||||
* Naive implementation of a symmetric matrix builder, that adds a [SymmetricMatrixFeature] tag. The resulting matrix contains
|
||||
* full `size^2` number of elements, but caches elements during calls to save [builder] calls. [builder] is always called in the
|
||||
* upper triangle region meaning that `i <= j`
|
||||
*/
|
||||
public fun <T : Any, A : Ring<T>> MatrixBuilder<T, A>.symmetric(
|
||||
builder: (i: Int, j: Int) -> T,
|
||||
): Matrix<T> {
|
||||
require(columns == rows) { "In order to build symmetric matrix, number of rows $rows should be equal to number of columns $columns" }
|
||||
return with(BufferAccessor2D<T?>(rows, rows, MutableBuffer.Companion::boxing)) {
|
||||
val cache = factory(rows * rows) { null }
|
||||
linearSpace.buildMatrix(rows, rows) { i, j ->
|
||||
val cached = cache[i, j]
|
||||
if (cached == null) {
|
||||
val value = if (i <= j) builder(i, j) else builder(j, i)
|
||||
cache[i, j] = value
|
||||
cache[j, i] = value
|
||||
value
|
||||
} else {
|
||||
cached
|
||||
}
|
||||
}.withFeature(SymmetricMatrixFeature)
|
||||
}
|
||||
}
|
@ -5,6 +5,7 @@
|
||||
|
||||
package space.kscience.kmath.linear
|
||||
|
||||
import space.kscience.kmath.misc.FeatureSet
|
||||
import space.kscience.kmath.misc.UnstableKMathAPI
|
||||
import space.kscience.kmath.nd.StructureFeature
|
||||
import space.kscience.kmath.nd.getFeature
|
||||
@ -18,7 +19,7 @@ import kotlin.reflect.KClass
|
||||
*/
|
||||
public class MatrixWrapper<out T : Any> internal constructor(
|
||||
public val origin: Matrix<T>,
|
||||
public val features: Set<MatrixFeature>,
|
||||
public val features: FeatureSet<StructureFeature>,
|
||||
) : Matrix<T> by origin {
|
||||
|
||||
/**
|
||||
@ -27,8 +28,7 @@ public class MatrixWrapper<out T : Any> internal constructor(
|
||||
@UnstableKMathAPI
|
||||
@Suppress("UNCHECKED_CAST")
|
||||
public override fun <F : StructureFeature> getFeature(type: KClass<out F>): F? =
|
||||
features.singleOrNull(type::isInstance) as? F
|
||||
?: origin.getFeature(type)
|
||||
features.getFeature(type) ?: origin.getFeature(type)
|
||||
|
||||
public override fun toString(): String = "MatrixWrapper(matrix=$origin, features=$features)"
|
||||
}
|
||||
@ -44,20 +44,23 @@ public val <T : Any> Matrix<T>.origin: Matrix<T>
|
||||
/**
|
||||
* Add a single feature to a [Matrix]
|
||||
*/
|
||||
public operator fun <T : Any> Matrix<T>.plus(newFeature: MatrixFeature): MatrixWrapper<T> = if (this is MatrixWrapper) {
|
||||
MatrixWrapper(origin, features + newFeature)
|
||||
public fun <T : Any> Matrix<T>.withFeature(newFeature: MatrixFeature): MatrixWrapper<T> = if (this is MatrixWrapper) {
|
||||
MatrixWrapper(origin, features.with(newFeature))
|
||||
} else {
|
||||
MatrixWrapper(this, setOf(newFeature))
|
||||
MatrixWrapper(this, FeatureSet.of(newFeature))
|
||||
}
|
||||
|
||||
@Deprecated("To be replaced by withFeature")
|
||||
public operator fun <T : Any> Matrix<T>.plus(newFeature: MatrixFeature): MatrixWrapper<T> = withFeature(newFeature)
|
||||
|
||||
/**
|
||||
* Add a collection of features to a [Matrix]
|
||||
*/
|
||||
public operator fun <T : Any> Matrix<T>.plus(newFeatures: Collection<MatrixFeature>): MatrixWrapper<T> =
|
||||
public fun <T : Any> Matrix<T>.withFeatures(newFeatures: Iterable<MatrixFeature>): MatrixWrapper<T> =
|
||||
if (this is MatrixWrapper) {
|
||||
MatrixWrapper(origin, features + newFeatures)
|
||||
MatrixWrapper(origin, features.with(newFeatures))
|
||||
} else {
|
||||
MatrixWrapper(this, newFeatures.toSet())
|
||||
MatrixWrapper(this, FeatureSet.of(newFeatures))
|
||||
}
|
||||
|
||||
/**
|
||||
@ -68,7 +71,7 @@ public fun <T : Any> LinearSpace<T, Ring<T>>.one(
|
||||
columns: Int,
|
||||
): Matrix<T> = VirtualMatrix(rows, columns) { i, j ->
|
||||
if (i == j) elementAlgebra.one else elementAlgebra.zero
|
||||
} + UnitFeature
|
||||
}.withFeature(UnitFeature)
|
||||
|
||||
|
||||
/**
|
||||
@ -79,7 +82,7 @@ public fun <T : Any> LinearSpace<T, Ring<T>>.zero(
|
||||
columns: Int,
|
||||
): Matrix<T> = VirtualMatrix(rows, columns) { _, _ ->
|
||||
elementAlgebra.zero
|
||||
} + ZeroFeature
|
||||
}.withFeature(ZeroFeature)
|
||||
|
||||
public class TransposedFeature<out T : Any>(public val original: Matrix<T>) : MatrixFeature
|
||||
|
||||
@ -90,4 +93,4 @@ public class TransposedFeature<out T : Any>(public val original: Matrix<T>) : Ma
|
||||
public fun <T : Any> Matrix<T>.transpose(): Matrix<T> = getFeature<TransposedFeature<T>>()?.original ?: VirtualMatrix(
|
||||
colNum,
|
||||
rowNum,
|
||||
) { i, j -> get(j, i) } + TransposedFeature(this)
|
||||
) { i, j -> get(j, i) }.withFeature(TransposedFeature(this))
|
@ -20,3 +20,6 @@ public class VirtualMatrix<out T : Any>(
|
||||
|
||||
override operator fun get(i: Int, j: Int): T = generator(i, j)
|
||||
}
|
||||
|
||||
public fun <T : Any> MatrixBuilder<T, *>.virtual(generator: (i: Int, j: Int) -> T): VirtualMatrix<T> =
|
||||
VirtualMatrix(rows, columns, generator)
|
||||
|
@ -1,34 +0,0 @@
|
||||
/*
|
||||
* Copyright 2018-2021 KMath contributors.
|
||||
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
|
||||
*/
|
||||
|
||||
package space.kscience.kmath.linear
|
||||
|
||||
import space.kscience.kmath.structures.BufferAccessor2D
|
||||
import space.kscience.kmath.structures.MutableBuffer
|
||||
|
||||
public object SymmetricMatrixFeature : MatrixFeature
|
||||
|
||||
/**
|
||||
* Naive implementation of a symmetric matrix builder, that adds a [SymmetricMatrixFeature] tag. The resulting matrix contains
|
||||
* full `size^2` number of elements, but caches elements during calls to save [builder] calls. [builder] is always called in the
|
||||
* upper triangle region meaning that `i <= j`
|
||||
*/
|
||||
public fun <T : Any, LS : LinearSpace<T, *>> LS.buildSymmetricMatrix(
|
||||
size: Int,
|
||||
builder: (i: Int, j: Int) -> T,
|
||||
): Matrix<T> = BufferAccessor2D<T?>(size, size, MutableBuffer.Companion::boxing).run {
|
||||
val cache = factory(size * size) { null }
|
||||
buildMatrix(size, size) { i, j ->
|
||||
val cached = cache[i, j]
|
||||
if (cached == null) {
|
||||
val value = if (i <= j) builder(i, j) else builder(j, i)
|
||||
cache[i, j] = value
|
||||
cache[j, i] = value
|
||||
value
|
||||
} else {
|
||||
cached
|
||||
}
|
||||
} + SymmetricMatrixFeature
|
||||
}
|
@ -11,29 +11,44 @@ import kotlin.reflect.KClass
|
||||
* A entity that contains a set of features defined by their types
|
||||
*/
|
||||
public interface Featured<F : Any> {
|
||||
public fun <T : F> getFeature(type: KClass<out T>): T?
|
||||
public fun <T : F> getFeature(type: FeatureKey<T>): T?
|
||||
}
|
||||
|
||||
public typealias FeatureKey<T> = KClass<out T>
|
||||
|
||||
public interface Feature<F: Feature<F>> {
|
||||
|
||||
/**
|
||||
* A key used for extraction
|
||||
*/
|
||||
@Suppress("UNCHECKED_CAST")
|
||||
public val key: FeatureKey<F> get() = this::class as FeatureKey<F>
|
||||
}
|
||||
|
||||
/**
|
||||
* A container for a set of features
|
||||
*/
|
||||
public class FeatureSet<F : Any> private constructor(public val features: Map<KClass<out F>, F>) : Featured<F> {
|
||||
public class FeatureSet<F : Feature<F>> private constructor(public val features: Map<FeatureKey<F>, F>) : Featured<F> {
|
||||
@Suppress("UNCHECKED_CAST")
|
||||
override fun <T : F> getFeature(type: KClass<out T>): T? = features[type] as? T
|
||||
override fun <T : F> getFeature(type: FeatureKey<T>): T? = features[type] as? T
|
||||
|
||||
public inline fun <reified T : F> getFeature(): T? = getFeature(T::class)
|
||||
|
||||
public fun <T : F> with(feature: T, type: KClass<out T> = feature::class): FeatureSet<F> =
|
||||
public fun <T : F> with(feature: T, type: FeatureKey<F> = feature.key): FeatureSet<F> =
|
||||
FeatureSet(features + (type to feature))
|
||||
|
||||
public fun with(other: FeatureSet<F>): FeatureSet<F> = FeatureSet(features + other.features)
|
||||
|
||||
public fun with(vararg otherFeatures: F): FeatureSet<F> =
|
||||
FeatureSet(features + otherFeatures.associateBy { it::class })
|
||||
FeatureSet(features + otherFeatures.associateBy { it.key })
|
||||
|
||||
public fun with(otherFeatures: Iterable<F>): FeatureSet<F> =
|
||||
FeatureSet(features + otherFeatures.associateBy { it.key })
|
||||
|
||||
public operator fun iterator(): Iterator<F> = features.values.iterator()
|
||||
|
||||
public companion object {
|
||||
public fun <F : Any> of(vararg features: F): FeatureSet<F> = FeatureSet(features.associateBy { it::class })
|
||||
public fun <F : Feature<F>> of(vararg features: F): FeatureSet<F> = FeatureSet(features.associateBy { it.key })
|
||||
public fun <F : Feature<F>> of(features: Iterable<F>): FeatureSet<F> = FeatureSet(features.associateBy { it.key })
|
||||
}
|
||||
}
|
||||
|
@ -12,7 +12,7 @@ package space.kscience.kmath.misc
|
||||
* in some way that may break some code.
|
||||
*/
|
||||
@MustBeDocumented
|
||||
@Retention(value = AnnotationRetention.BINARY)
|
||||
@Retention(value = AnnotationRetention.SOURCE)
|
||||
@RequiresOptIn("This API is unstable and could change in future", RequiresOptIn.Level.WARNING)
|
||||
public annotation class UnstableKMathAPI
|
||||
|
||||
@ -21,7 +21,7 @@ public annotation class UnstableKMathAPI
|
||||
* slow-down in some cases. Refer to the documentation and benchmark it to be sure.
|
||||
*/
|
||||
@MustBeDocumented
|
||||
@Retention(value = AnnotationRetention.BINARY)
|
||||
@Retention(value = AnnotationRetention.SOURCE)
|
||||
@RequiresOptIn(
|
||||
"Refer to the documentation to use this API in performance-critical code",
|
||||
RequiresOptIn.Level.WARNING
|
||||
|
@ -5,6 +5,8 @@
|
||||
|
||||
package space.kscience.kmath.nd
|
||||
|
||||
import space.kscience.kmath.misc.Feature
|
||||
import space.kscience.kmath.misc.Featured
|
||||
import space.kscience.kmath.misc.PerformancePitfall
|
||||
import space.kscience.kmath.misc.UnstableKMathAPI
|
||||
import space.kscience.kmath.structures.Buffer
|
||||
@ -13,7 +15,7 @@ import kotlin.jvm.JvmName
|
||||
import kotlin.native.concurrent.ThreadLocal
|
||||
import kotlin.reflect.KClass
|
||||
|
||||
public interface StructureFeature
|
||||
public interface StructureFeature : Feature<StructureFeature>
|
||||
|
||||
/**
|
||||
* Represents n-dimensional structure, i.e. multidimensional container of items of the same type and size. The number
|
||||
@ -24,7 +26,7 @@ public interface StructureFeature
|
||||
*
|
||||
* @param T the type of items.
|
||||
*/
|
||||
public interface StructureND<out T> {
|
||||
public interface StructureND<out T> : Featured<StructureFeature> {
|
||||
/**
|
||||
* The shape of structure, i.e. non-empty sequence of non-negative integers that specify sizes of dimensions of
|
||||
* this structure.
|
||||
@ -57,7 +59,7 @@ public interface StructureND<out T> {
|
||||
* If the feature is not present, null is returned.
|
||||
*/
|
||||
@UnstableKMathAPI
|
||||
public fun <F : StructureFeature> getFeature(type: KClass<out F>): F? = null
|
||||
override fun <F : StructureFeature> getFeature(type: KClass<out F>): F? = null
|
||||
|
||||
public companion object {
|
||||
/**
|
||||
|
@ -14,30 +14,30 @@ import space.kscience.kmath.nd.as2D
|
||||
* A context that allows to operate on a [MutableBuffer] as on 2d array
|
||||
*/
|
||||
internal class BufferAccessor2D<T>(
|
||||
public val rowNum: Int,
|
||||
public val colNum: Int,
|
||||
val rowNum: Int,
|
||||
val colNum: Int,
|
||||
val factory: MutableBufferFactory<T>,
|
||||
) {
|
||||
public operator fun Buffer<T>.get(i: Int, j: Int): T = get(i * colNum + j)
|
||||
operator fun Buffer<T>.get(i: Int, j: Int): T = get(i * colNum + j)
|
||||
|
||||
public operator fun MutableBuffer<T>.set(i: Int, j: Int, value: T) {
|
||||
operator fun MutableBuffer<T>.set(i: Int, j: Int, value: T) {
|
||||
set(i * colNum + j, value)
|
||||
}
|
||||
|
||||
public inline fun create(crossinline init: (i: Int, j: Int) -> T): MutableBuffer<T> =
|
||||
inline fun create(crossinline init: (i: Int, j: Int) -> T): MutableBuffer<T> =
|
||||
factory(rowNum * colNum) { offset -> init(offset / colNum, offset % colNum) }
|
||||
|
||||
public fun create(mat: Structure2D<T>): MutableBuffer<T> = create { i, j -> mat[i, j] }
|
||||
fun create(mat: Structure2D<T>): MutableBuffer<T> = create { i, j -> mat[i, j] }
|
||||
|
||||
//TODO optimize wrapper
|
||||
public fun MutableBuffer<T>.collect(): Structure2D<T> = StructureND.buffered(
|
||||
fun MutableBuffer<T>.collect(): Structure2D<T> = StructureND.buffered(
|
||||
DefaultStrides(intArrayOf(rowNum, colNum)),
|
||||
factory
|
||||
) { (i, j) ->
|
||||
get(i, j)
|
||||
}.as2D()
|
||||
|
||||
public inner class Row(public val buffer: MutableBuffer<T>, public val rowIndex: Int) : MutableBuffer<T> {
|
||||
inner class Row(val buffer: MutableBuffer<T>, val rowIndex: Int) : MutableBuffer<T> {
|
||||
override val size: Int get() = colNum
|
||||
|
||||
override operator fun get(index: Int): T = buffer[rowIndex, index]
|
||||
@ -54,5 +54,5 @@ internal class BufferAccessor2D<T>(
|
||||
/**
|
||||
* Get row
|
||||
*/
|
||||
public fun MutableBuffer<T>.row(i: Int): Row = Row(this, i)
|
||||
fun MutableBuffer<T>.row(i: Int): Row = Row(this, i)
|
||||
}
|
||||
|
@ -22,14 +22,14 @@ class DoubleLUSolverTest {
|
||||
|
||||
@Test
|
||||
fun testInvertOne() {
|
||||
val matrix = LinearSpace.real.one(2, 2)
|
||||
val inverted = LinearSpace.real.inverseWithLup(matrix)
|
||||
val matrix = LinearSpace.double.one(2, 2)
|
||||
val inverted = LinearSpace.double.lupSolver().inverse(matrix)
|
||||
assertMatrixEquals(matrix, inverted)
|
||||
}
|
||||
|
||||
@Test
|
||||
fun testDecomposition() {
|
||||
LinearSpace.real.run {
|
||||
LinearSpace.double.run {
|
||||
val matrix = matrix(2, 2)(
|
||||
3.0, 1.0,
|
||||
2.0, 3.0
|
||||
@ -46,14 +46,14 @@ class DoubleLUSolverTest {
|
||||
|
||||
@Test
|
||||
fun testInvert() {
|
||||
val matrix = LinearSpace.real.matrix(2, 2)(
|
||||
val matrix = LinearSpace.double.matrix(2, 2)(
|
||||
3.0, 1.0,
|
||||
1.0, 3.0
|
||||
)
|
||||
|
||||
val inverted = LinearSpace.real.inverseWithLup(matrix)
|
||||
val inverted = LinearSpace.double.lupSolver().inverse(matrix)
|
||||
|
||||
val expected = LinearSpace.real.matrix(2, 2)(
|
||||
val expected = LinearSpace.double.matrix(2, 2)(
|
||||
0.375, -0.125,
|
||||
-0.125, 0.375
|
||||
)
|
||||
|
@ -19,14 +19,14 @@ import kotlin.test.assertTrue
|
||||
class MatrixTest {
|
||||
@Test
|
||||
fun testTranspose() {
|
||||
val matrix = LinearSpace.real.one(3, 3)
|
||||
val matrix = LinearSpace.double.one(3, 3)
|
||||
val transposed = matrix.transpose()
|
||||
assertTrue { StructureND.contentEquals(matrix, transposed) }
|
||||
}
|
||||
|
||||
@Test
|
||||
fun testBuilder() {
|
||||
val matrix = LinearSpace.real.matrix(2, 3)(
|
||||
val matrix = LinearSpace.double.matrix(2, 3)(
|
||||
1.0, 0.0, 0.0,
|
||||
0.0, 1.0, 2.0
|
||||
)
|
||||
@ -48,7 +48,7 @@ class MatrixTest {
|
||||
infix fun Matrix<Double>.pow(power: Int): Matrix<Double> {
|
||||
var res = this
|
||||
repeat(power - 1) {
|
||||
res = LinearSpace.real.run { res dot this@pow }
|
||||
res = LinearSpace.double.run { res dot this@pow }
|
||||
}
|
||||
return res
|
||||
}
|
||||
@ -61,7 +61,7 @@ class MatrixTest {
|
||||
val firstMatrix = StructureND.auto(2, 3) { (i, j) -> (i + j).toDouble() }.as2D()
|
||||
val secondMatrix = StructureND.auto(3, 2) { (i, j) -> (i + j).toDouble() }.as2D()
|
||||
|
||||
LinearSpace.real.run {
|
||||
LinearSpace.double.run {
|
||||
// val firstMatrix = produce(2, 3) { i, j -> (i + j).toDouble() }
|
||||
// val secondMatrix = produce(3, 2) { i, j -> (i + j).toDouble() }
|
||||
val result = firstMatrix dot secondMatrix
|
||||
|
@ -40,7 +40,7 @@ class NumberNDFieldTest {
|
||||
@Test
|
||||
fun testGeneration() {
|
||||
|
||||
val array = LinearSpace.real.buildMatrix(3, 3) { i, j ->
|
||||
val array = LinearSpace.double.buildMatrix(3, 3) { i, j ->
|
||||
(i * 10 + j).toDouble()
|
||||
}
|
||||
|
||||
|
@ -151,7 +151,7 @@ public value class DMatrixContext<T : Any, out A : Ring<T>>(public val context:
|
||||
context.run { (this@transpose as Matrix<T>).transpose() }.coerce()
|
||||
|
||||
public companion object {
|
||||
public val real: DMatrixContext<Double, DoubleField> = DMatrixContext(LinearSpace.real)
|
||||
public val real: DMatrixContext<Double, DoubleField> = DMatrixContext(LinearSpace.double)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1,995 +0,0 @@
|
||||
/*
|
||||
* Copyright 2018-2021 KMath contributors.
|
||||
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
|
||||
*/
|
||||
|
||||
/* This file is generated with buildSrc/src/main/kotlin/space/kscience/kmath/ejml/codegen/ejmlCodegen.kt */
|
||||
|
||||
package space.kscience.kmath.ejml
|
||||
|
||||
import org.ejml.data.*
|
||||
import org.ejml.dense.row.CommonOps_DDRM
|
||||
import org.ejml.dense.row.CommonOps_FDRM
|
||||
import org.ejml.dense.row.factory.DecompositionFactory_DDRM
|
||||
import org.ejml.dense.row.factory.DecompositionFactory_FDRM
|
||||
import org.ejml.sparse.FillReducing
|
||||
import org.ejml.sparse.csc.CommonOps_DSCC
|
||||
import org.ejml.sparse.csc.CommonOps_FSCC
|
||||
import org.ejml.sparse.csc.factory.DecompositionFactory_DSCC
|
||||
import org.ejml.sparse.csc.factory.DecompositionFactory_FSCC
|
||||
import org.ejml.sparse.csc.factory.LinearSolverFactory_DSCC
|
||||
import org.ejml.sparse.csc.factory.LinearSolverFactory_FSCC
|
||||
import space.kscience.kmath.linear.*
|
||||
import space.kscience.kmath.linear.Matrix
|
||||
import space.kscience.kmath.misc.UnstableKMathAPI
|
||||
import space.kscience.kmath.nd.StructureFeature
|
||||
import space.kscience.kmath.operations.DoubleField
|
||||
import space.kscience.kmath.operations.FloatField
|
||||
import space.kscience.kmath.operations.invoke
|
||||
import space.kscience.kmath.structures.DoubleBuffer
|
||||
import space.kscience.kmath.structures.FloatBuffer
|
||||
import kotlin.reflect.KClass
|
||||
import kotlin.reflect.cast
|
||||
|
||||
/**
|
||||
* [EjmlVector] specialization for [Double].
|
||||
*/
|
||||
public class EjmlDoubleVector<out M : DMatrix>(public override val origin: M) : EjmlVector<Double, M>(origin) {
|
||||
init {
|
||||
require(origin.numRows == 1) { "The origin matrix must have only one row to form a vector" }
|
||||
}
|
||||
|
||||
public override operator fun get(index: Int): Double = origin[0, index]
|
||||
}
|
||||
|
||||
/**
|
||||
* [EjmlVector] specialization for [Float].
|
||||
*/
|
||||
public class EjmlFloatVector<out M : FMatrix>(public override val origin: M) : EjmlVector<Float, M>(origin) {
|
||||
init {
|
||||
require(origin.numRows == 1) { "The origin matrix must have only one row to form a vector" }
|
||||
}
|
||||
|
||||
public override operator fun get(index: Int): Float = origin[0, index]
|
||||
}
|
||||
|
||||
/**
|
||||
* [EjmlMatrix] specialization for [Double].
|
||||
*/
|
||||
public class EjmlDoubleMatrix<out M : DMatrix>(public override val origin: M) : EjmlMatrix<Double, M>(origin) {
|
||||
public override operator fun get(i: Int, j: Int): Double = origin[i, j]
|
||||
}
|
||||
|
||||
/**
|
||||
* [EjmlMatrix] specialization for [Float].
|
||||
*/
|
||||
public class EjmlFloatMatrix<out M : FMatrix>(public override val origin: M) : EjmlMatrix<Float, M>(origin) {
|
||||
public override operator fun get(i: Int, j: Int): Float = origin[i, j]
|
||||
}
|
||||
|
||||
/**
|
||||
* [EjmlLinearSpace] implementation based on [CommonOps_DDRM], [DecompositionFactory_DDRM] operations and
|
||||
* [DMatrixRMaj] matrices.
|
||||
*/
|
||||
public object EjmlLinearSpaceDDRM : EjmlLinearSpace<Double, DoubleField, DMatrixRMaj>() {
|
||||
/**
|
||||
* The [DoubleField] reference.
|
||||
*/
|
||||
public override val elementAlgebra: DoubleField get() = DoubleField
|
||||
|
||||
@Suppress("UNCHECKED_CAST")
|
||||
public override fun Matrix<Double>.toEjml(): EjmlDoubleMatrix<DMatrixRMaj> = when {
|
||||
this is EjmlDoubleMatrix<*> && origin is DMatrixRMaj -> this as EjmlDoubleMatrix<DMatrixRMaj>
|
||||
else -> buildMatrix(rowNum, colNum) { i, j -> get(i, j) }
|
||||
}
|
||||
|
||||
@Suppress("UNCHECKED_CAST")
|
||||
public override fun Point<Double>.toEjml(): EjmlDoubleVector<DMatrixRMaj> = when {
|
||||
this is EjmlDoubleVector<*> && origin is DMatrixRMaj -> this as EjmlDoubleVector<DMatrixRMaj>
|
||||
else -> EjmlDoubleVector(DMatrixRMaj(size, 1).also {
|
||||
(0 until it.numRows).forEach { row -> it[row, 0] = get(row) }
|
||||
})
|
||||
}
|
||||
|
||||
public override fun buildMatrix(
|
||||
rows: Int,
|
||||
columns: Int,
|
||||
initializer: DoubleField.(i: Int, j: Int) -> Double,
|
||||
): EjmlDoubleMatrix<DMatrixRMaj> = DMatrixRMaj(rows, columns).also {
|
||||
(0 until rows).forEach { row ->
|
||||
(0 until columns).forEach { col -> it[row, col] = elementAlgebra.initializer(row, col) }
|
||||
}
|
||||
}.wrapMatrix()
|
||||
|
||||
public override fun buildVector(
|
||||
size: Int,
|
||||
initializer: DoubleField.(Int) -> Double,
|
||||
): EjmlDoubleVector<DMatrixRMaj> = EjmlDoubleVector(DMatrixRMaj(size, 1).also {
|
||||
(0 until it.numRows).forEach { row -> it[row, 0] = elementAlgebra.initializer(row) }
|
||||
})
|
||||
|
||||
private fun <T : DMatrix> T.wrapMatrix() = EjmlDoubleMatrix(this)
|
||||
private fun <T : DMatrix> T.wrapVector() = EjmlDoubleVector(this)
|
||||
|
||||
public override fun Matrix<Double>.unaryMinus(): Matrix<Double> = this * elementAlgebra { -one }
|
||||
|
||||
public override fun Matrix<Double>.dot(other: Matrix<Double>): EjmlDoubleMatrix<DMatrixRMaj> {
|
||||
val out = DMatrixRMaj(1, 1)
|
||||
CommonOps_DDRM.mult(toEjml().origin, other.toEjml().origin, out)
|
||||
return out.wrapMatrix()
|
||||
}
|
||||
|
||||
public override fun Matrix<Double>.dot(vector: Point<Double>): EjmlDoubleVector<DMatrixRMaj> {
|
||||
val out = DMatrixRMaj(1, 1)
|
||||
CommonOps_DDRM.mult(toEjml().origin, vector.toEjml().origin, out)
|
||||
return out.wrapVector()
|
||||
}
|
||||
|
||||
public override operator fun Matrix<Double>.minus(other: Matrix<Double>): EjmlDoubleMatrix<DMatrixRMaj> {
|
||||
val out = DMatrixRMaj(1, 1)
|
||||
|
||||
CommonOps_DDRM.add(
|
||||
elementAlgebra.one,
|
||||
toEjml().origin,
|
||||
elementAlgebra { -one },
|
||||
other.toEjml().origin,
|
||||
out,
|
||||
)
|
||||
|
||||
return out.wrapMatrix()
|
||||
}
|
||||
|
||||
public override operator fun Matrix<Double>.times(value: Double): EjmlDoubleMatrix<DMatrixRMaj> {
|
||||
val res = DMatrixRMaj(1, 1)
|
||||
CommonOps_DDRM.scale(value, toEjml().origin, res)
|
||||
return res.wrapMatrix()
|
||||
}
|
||||
|
||||
public override fun Point<Double>.unaryMinus(): EjmlDoubleVector<DMatrixRMaj> {
|
||||
val res = DMatrixRMaj(1, 1)
|
||||
CommonOps_DDRM.changeSign(toEjml().origin, res)
|
||||
return res.wrapVector()
|
||||
}
|
||||
|
||||
public override fun Matrix<Double>.plus(other: Matrix<Double>): EjmlDoubleMatrix<DMatrixRMaj> {
|
||||
val out = DMatrixRMaj(1, 1)
|
||||
|
||||
CommonOps_DDRM.add(
|
||||
elementAlgebra.one,
|
||||
toEjml().origin,
|
||||
elementAlgebra.one,
|
||||
other.toEjml().origin,
|
||||
out,
|
||||
)
|
||||
|
||||
return out.wrapMatrix()
|
||||
}
|
||||
|
||||
public override fun Point<Double>.plus(other: Point<Double>): EjmlDoubleVector<DMatrixRMaj> {
|
||||
val out = DMatrixRMaj(1, 1)
|
||||
|
||||
CommonOps_DDRM.add(
|
||||
elementAlgebra.one,
|
||||
toEjml().origin,
|
||||
elementAlgebra.one,
|
||||
other.toEjml().origin,
|
||||
out,
|
||||
)
|
||||
|
||||
return out.wrapVector()
|
||||
}
|
||||
|
||||
public override fun Point<Double>.minus(other: Point<Double>): EjmlDoubleVector<DMatrixRMaj> {
|
||||
val out = DMatrixRMaj(1, 1)
|
||||
|
||||
CommonOps_DDRM.add(
|
||||
elementAlgebra.one,
|
||||
toEjml().origin,
|
||||
elementAlgebra { -one },
|
||||
other.toEjml().origin,
|
||||
out,
|
||||
)
|
||||
|
||||
return out.wrapVector()
|
||||
}
|
||||
|
||||
public override fun Double.times(m: Matrix<Double>): EjmlDoubleMatrix<DMatrixRMaj> = m * this
|
||||
|
||||
public override fun Point<Double>.times(value: Double): EjmlDoubleVector<DMatrixRMaj> {
|
||||
val res = DMatrixRMaj(1, 1)
|
||||
CommonOps_DDRM.scale(value, toEjml().origin, res)
|
||||
return res.wrapVector()
|
||||
}
|
||||
|
||||
public override fun Double.times(v: Point<Double>): EjmlDoubleVector<DMatrixRMaj> = v * this
|
||||
|
||||
@UnstableKMathAPI
|
||||
public override fun <F : StructureFeature> getFeature(structure: Matrix<Double>, type: KClass<out F>): F? {
|
||||
structure.getFeature(type)?.let { return it }
|
||||
val origin = structure.toEjml().origin
|
||||
|
||||
return when (type) {
|
||||
InverseMatrixFeature::class -> object : InverseMatrixFeature<Double> {
|
||||
override val inverse: Matrix<Double> by lazy {
|
||||
val res = origin.copy()
|
||||
CommonOps_DDRM.invert(res)
|
||||
res.wrapMatrix()
|
||||
}
|
||||
}
|
||||
|
||||
DeterminantFeature::class -> object : DeterminantFeature<Double> {
|
||||
override val determinant: Double by lazy { CommonOps_DDRM.det(origin) }
|
||||
}
|
||||
|
||||
SingularValueDecompositionFeature::class -> object : SingularValueDecompositionFeature<Double> {
|
||||
private val svd by lazy {
|
||||
DecompositionFactory_DDRM.svd(origin.numRows, origin.numCols, true, true, false)
|
||||
.apply { decompose(origin.copy()) }
|
||||
}
|
||||
|
||||
override val u: Matrix<Double> by lazy { svd.getU(null, false).wrapMatrix() }
|
||||
override val s: Matrix<Double> by lazy { svd.getW(null).wrapMatrix() }
|
||||
override val v: Matrix<Double> by lazy { svd.getV(null, false).wrapMatrix() }
|
||||
override val singularValues: Point<Double> by lazy { DoubleBuffer(svd.singularValues) }
|
||||
}
|
||||
|
||||
QRDecompositionFeature::class -> object : QRDecompositionFeature<Double> {
|
||||
private val qr by lazy {
|
||||
DecompositionFactory_DDRM.qr().apply { decompose(origin.copy()) }
|
||||
}
|
||||
|
||||
override val q: Matrix<Double> by lazy {
|
||||
qr.getQ(null, false).wrapMatrix() + OrthogonalFeature
|
||||
}
|
||||
|
||||
override val r: Matrix<Double> by lazy { qr.getR(null, false).wrapMatrix() + UFeature }
|
||||
}
|
||||
|
||||
CholeskyDecompositionFeature::class -> object : CholeskyDecompositionFeature<Double> {
|
||||
override val l: Matrix<Double> by lazy {
|
||||
val cholesky =
|
||||
DecompositionFactory_DDRM.chol(structure.rowNum, true).apply { decompose(origin.copy()) }
|
||||
|
||||
cholesky.getT(null).wrapMatrix() + LFeature
|
||||
}
|
||||
}
|
||||
|
||||
LupDecompositionFeature::class -> object : LupDecompositionFeature<Double> {
|
||||
private val lup by lazy {
|
||||
DecompositionFactory_DDRM.lu(origin.numRows, origin.numCols).apply { decompose(origin.copy()) }
|
||||
}
|
||||
|
||||
override val l: Matrix<Double> by lazy {
|
||||
lup.getLower(null).wrapMatrix() + LFeature
|
||||
}
|
||||
|
||||
override val u: Matrix<Double> by lazy {
|
||||
lup.getUpper(null).wrapMatrix() + UFeature
|
||||
}
|
||||
|
||||
override val p: Matrix<Double> by lazy { lup.getRowPivot(null).wrapMatrix() }
|
||||
}
|
||||
|
||||
else -> null
|
||||
}?.let(type::cast)
|
||||
}
|
||||
|
||||
/**
|
||||
* Solves for *x* in the following equation: *x = [a] <sup>-1</sup> · [b]*.
|
||||
*
|
||||
* @param a the base matrix.
|
||||
* @param b n by p matrix.
|
||||
* @return the solution for *x* that is n by p.
|
||||
*/
|
||||
public fun solve(a: Matrix<Double>, b: Matrix<Double>): EjmlDoubleMatrix<DMatrixRMaj> {
|
||||
val res = DMatrixRMaj(1, 1)
|
||||
CommonOps_DDRM.solve(DMatrixRMaj(a.toEjml().origin), DMatrixRMaj(b.toEjml().origin), res)
|
||||
return res.wrapMatrix()
|
||||
}
|
||||
|
||||
/**
|
||||
* Solves for *x* in the following equation: *x = [a] <sup>-1</sup> · [b]*.
|
||||
*
|
||||
* @param a the base matrix.
|
||||
* @param b n by p vector.
|
||||
* @return the solution for *x* that is n by p.
|
||||
*/
|
||||
public fun solve(a: Matrix<Double>, b: Point<Double>): EjmlDoubleVector<DMatrixRMaj> {
|
||||
val res = DMatrixRMaj(1, 1)
|
||||
CommonOps_DDRM.solve(DMatrixRMaj(a.toEjml().origin), DMatrixRMaj(b.toEjml().origin), res)
|
||||
return EjmlDoubleVector(res)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* [EjmlLinearSpace] implementation based on [CommonOps_FDRM], [DecompositionFactory_FDRM] operations and
|
||||
* [FMatrixRMaj] matrices.
|
||||
*/
|
||||
public object EjmlLinearSpaceFDRM : EjmlLinearSpace<Float, FloatField, FMatrixRMaj>() {
|
||||
/**
|
||||
* The [FloatField] reference.
|
||||
*/
|
||||
public override val elementAlgebra: FloatField get() = FloatField
|
||||
|
||||
@Suppress("UNCHECKED_CAST")
|
||||
public override fun Matrix<Float>.toEjml(): EjmlFloatMatrix<FMatrixRMaj> = when {
|
||||
this is EjmlFloatMatrix<*> && origin is FMatrixRMaj -> this as EjmlFloatMatrix<FMatrixRMaj>
|
||||
else -> buildMatrix(rowNum, colNum) { i, j -> get(i, j) }
|
||||
}
|
||||
|
||||
@Suppress("UNCHECKED_CAST")
|
||||
public override fun Point<Float>.toEjml(): EjmlFloatVector<FMatrixRMaj> = when {
|
||||
this is EjmlFloatVector<*> && origin is FMatrixRMaj -> this as EjmlFloatVector<FMatrixRMaj>
|
||||
else -> EjmlFloatVector(FMatrixRMaj(size, 1).also {
|
||||
(0 until it.numRows).forEach { row -> it[row, 0] = get(row) }
|
||||
})
|
||||
}
|
||||
|
||||
public override fun buildMatrix(
|
||||
rows: Int,
|
||||
columns: Int,
|
||||
initializer: FloatField.(i: Int, j: Int) -> Float,
|
||||
): EjmlFloatMatrix<FMatrixRMaj> = FMatrixRMaj(rows, columns).also {
|
||||
(0 until rows).forEach { row ->
|
||||
(0 until columns).forEach { col -> it[row, col] = elementAlgebra.initializer(row, col) }
|
||||
}
|
||||
}.wrapMatrix()
|
||||
|
||||
public override fun buildVector(
|
||||
size: Int,
|
||||
initializer: FloatField.(Int) -> Float,
|
||||
): EjmlFloatVector<FMatrixRMaj> = EjmlFloatVector(FMatrixRMaj(size, 1).also {
|
||||
(0 until it.numRows).forEach { row -> it[row, 0] = elementAlgebra.initializer(row) }
|
||||
})
|
||||
|
||||
private fun <T : FMatrix> T.wrapMatrix() = EjmlFloatMatrix(this)
|
||||
private fun <T : FMatrix> T.wrapVector() = EjmlFloatVector(this)
|
||||
|
||||
public override fun Matrix<Float>.unaryMinus(): Matrix<Float> = this * elementAlgebra { -one }
|
||||
|
||||
public override fun Matrix<Float>.dot(other: Matrix<Float>): EjmlFloatMatrix<FMatrixRMaj> {
|
||||
val out = FMatrixRMaj(1, 1)
|
||||
CommonOps_FDRM.mult(toEjml().origin, other.toEjml().origin, out)
|
||||
return out.wrapMatrix()
|
||||
}
|
||||
|
||||
public override fun Matrix<Float>.dot(vector: Point<Float>): EjmlFloatVector<FMatrixRMaj> {
|
||||
val out = FMatrixRMaj(1, 1)
|
||||
CommonOps_FDRM.mult(toEjml().origin, vector.toEjml().origin, out)
|
||||
return out.wrapVector()
|
||||
}
|
||||
|
||||
public override operator fun Matrix<Float>.minus(other: Matrix<Float>): EjmlFloatMatrix<FMatrixRMaj> {
|
||||
val out = FMatrixRMaj(1, 1)
|
||||
|
||||
CommonOps_FDRM.add(
|
||||
elementAlgebra.one,
|
||||
toEjml().origin,
|
||||
elementAlgebra { -one },
|
||||
other.toEjml().origin,
|
||||
out,
|
||||
)
|
||||
|
||||
return out.wrapMatrix()
|
||||
}
|
||||
|
||||
public override operator fun Matrix<Float>.times(value: Float): EjmlFloatMatrix<FMatrixRMaj> {
|
||||
val res = FMatrixRMaj(1, 1)
|
||||
CommonOps_FDRM.scale(value, toEjml().origin, res)
|
||||
return res.wrapMatrix()
|
||||
}
|
||||
|
||||
public override fun Point<Float>.unaryMinus(): EjmlFloatVector<FMatrixRMaj> {
|
||||
val res = FMatrixRMaj(1, 1)
|
||||
CommonOps_FDRM.changeSign(toEjml().origin, res)
|
||||
return res.wrapVector()
|
||||
}
|
||||
|
||||
public override fun Matrix<Float>.plus(other: Matrix<Float>): EjmlFloatMatrix<FMatrixRMaj> {
|
||||
val out = FMatrixRMaj(1, 1)
|
||||
|
||||
CommonOps_FDRM.add(
|
||||
elementAlgebra.one,
|
||||
toEjml().origin,
|
||||
elementAlgebra.one,
|
||||
other.toEjml().origin,
|
||||
out,
|
||||
)
|
||||
|
||||
return out.wrapMatrix()
|
||||
}
|
||||
|
||||
public override fun Point<Float>.plus(other: Point<Float>): EjmlFloatVector<FMatrixRMaj> {
|
||||
val out = FMatrixRMaj(1, 1)
|
||||
|
||||
CommonOps_FDRM.add(
|
||||
elementAlgebra.one,
|
||||
toEjml().origin,
|
||||
elementAlgebra.one,
|
||||
other.toEjml().origin,
|
||||
out,
|
||||
)
|
||||
|
||||
return out.wrapVector()
|
||||
}
|
||||
|
||||
public override fun Point<Float>.minus(other: Point<Float>): EjmlFloatVector<FMatrixRMaj> {
|
||||
val out = FMatrixRMaj(1, 1)
|
||||
|
||||
CommonOps_FDRM.add(
|
||||
elementAlgebra.one,
|
||||
toEjml().origin,
|
||||
elementAlgebra { -one },
|
||||
other.toEjml().origin,
|
||||
out,
|
||||
)
|
||||
|
||||
return out.wrapVector()
|
||||
}
|
||||
|
||||
public override fun Float.times(m: Matrix<Float>): EjmlFloatMatrix<FMatrixRMaj> = m * this
|
||||
|
||||
public override fun Point<Float>.times(value: Float): EjmlFloatVector<FMatrixRMaj> {
|
||||
val res = FMatrixRMaj(1, 1)
|
||||
CommonOps_FDRM.scale(value, toEjml().origin, res)
|
||||
return res.wrapVector()
|
||||
}
|
||||
|
||||
public override fun Float.times(v: Point<Float>): EjmlFloatVector<FMatrixRMaj> = v * this
|
||||
|
||||
@UnstableKMathAPI
|
||||
public override fun <F : StructureFeature> getFeature(structure: Matrix<Float>, type: KClass<out F>): F? {
|
||||
structure.getFeature(type)?.let { return it }
|
||||
val origin = structure.toEjml().origin
|
||||
|
||||
return when (type) {
|
||||
InverseMatrixFeature::class -> object : InverseMatrixFeature<Float> {
|
||||
override val inverse: Matrix<Float> by lazy {
|
||||
val res = origin.copy()
|
||||
CommonOps_FDRM.invert(res)
|
||||
res.wrapMatrix()
|
||||
}
|
||||
}
|
||||
|
||||
DeterminantFeature::class -> object : DeterminantFeature<Float> {
|
||||
override val determinant: Float by lazy { CommonOps_FDRM.det(origin) }
|
||||
}
|
||||
|
||||
SingularValueDecompositionFeature::class -> object : SingularValueDecompositionFeature<Float> {
|
||||
private val svd by lazy {
|
||||
DecompositionFactory_FDRM.svd(origin.numRows, origin.numCols, true, true, false)
|
||||
.apply { decompose(origin.copy()) }
|
||||
}
|
||||
|
||||
override val u: Matrix<Float> by lazy { svd.getU(null, false).wrapMatrix() }
|
||||
override val s: Matrix<Float> by lazy { svd.getW(null).wrapMatrix() }
|
||||
override val v: Matrix<Float> by lazy { svd.getV(null, false).wrapMatrix() }
|
||||
override val singularValues: Point<Float> by lazy { FloatBuffer(svd.singularValues) }
|
||||
}
|
||||
|
||||
QRDecompositionFeature::class -> object : QRDecompositionFeature<Float> {
|
||||
private val qr by lazy {
|
||||
DecompositionFactory_FDRM.qr().apply { decompose(origin.copy()) }
|
||||
}
|
||||
|
||||
override val q: Matrix<Float> by lazy {
|
||||
qr.getQ(null, false).wrapMatrix() + OrthogonalFeature
|
||||
}
|
||||
|
||||
override val r: Matrix<Float> by lazy { qr.getR(null, false).wrapMatrix() + UFeature }
|
||||
}
|
||||
|
||||
CholeskyDecompositionFeature::class -> object : CholeskyDecompositionFeature<Float> {
|
||||
override val l: Matrix<Float> by lazy {
|
||||
val cholesky =
|
||||
DecompositionFactory_FDRM.chol(structure.rowNum, true).apply { decompose(origin.copy()) }
|
||||
|
||||
cholesky.getT(null).wrapMatrix() + LFeature
|
||||
}
|
||||
}
|
||||
|
||||
LupDecompositionFeature::class -> object : LupDecompositionFeature<Float> {
|
||||
private val lup by lazy {
|
||||
DecompositionFactory_FDRM.lu(origin.numRows, origin.numCols).apply { decompose(origin.copy()) }
|
||||
}
|
||||
|
||||
override val l: Matrix<Float> by lazy {
|
||||
lup.getLower(null).wrapMatrix() + LFeature
|
||||
}
|
||||
|
||||
override val u: Matrix<Float> by lazy {
|
||||
lup.getUpper(null).wrapMatrix() + UFeature
|
||||
}
|
||||
|
||||
override val p: Matrix<Float> by lazy { lup.getRowPivot(null).wrapMatrix() }
|
||||
}
|
||||
|
||||
else -> null
|
||||
}?.let(type::cast)
|
||||
}
|
||||
|
||||
/**
|
||||
* Solves for *x* in the following equation: *x = [a] <sup>-1</sup> · [b]*.
|
||||
*
|
||||
* @param a the base matrix.
|
||||
* @param b n by p matrix.
|
||||
* @return the solution for *x* that is n by p.
|
||||
*/
|
||||
public fun solve(a: Matrix<Float>, b: Matrix<Float>): EjmlFloatMatrix<FMatrixRMaj> {
|
||||
val res = FMatrixRMaj(1, 1)
|
||||
CommonOps_FDRM.solve(FMatrixRMaj(a.toEjml().origin), FMatrixRMaj(b.toEjml().origin), res)
|
||||
return res.wrapMatrix()
|
||||
}
|
||||
|
||||
/**
|
||||
* Solves for *x* in the following equation: *x = [a] <sup>-1</sup> · [b]*.
|
||||
*
|
||||
* @param a the base matrix.
|
||||
* @param b n by p vector.
|
||||
* @return the solution for *x* that is n by p.
|
||||
*/
|
||||
public fun solve(a: Matrix<Float>, b: Point<Float>): EjmlFloatVector<FMatrixRMaj> {
|
||||
val res = FMatrixRMaj(1, 1)
|
||||
CommonOps_FDRM.solve(FMatrixRMaj(a.toEjml().origin), FMatrixRMaj(b.toEjml().origin), res)
|
||||
return EjmlFloatVector(res)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* [EjmlLinearSpace] implementation based on [CommonOps_DSCC], [DecompositionFactory_DSCC] operations and
|
||||
* [DMatrixSparseCSC] matrices.
|
||||
*/
|
||||
public object EjmlLinearSpaceDSCC : EjmlLinearSpace<Double, DoubleField, DMatrixSparseCSC>() {
|
||||
/**
|
||||
* The [DoubleField] reference.
|
||||
*/
|
||||
public override val elementAlgebra: DoubleField get() = DoubleField
|
||||
|
||||
@Suppress("UNCHECKED_CAST")
|
||||
public override fun Matrix<Double>.toEjml(): EjmlDoubleMatrix<DMatrixSparseCSC> = when {
|
||||
this is EjmlDoubleMatrix<*> && origin is DMatrixSparseCSC -> this as EjmlDoubleMatrix<DMatrixSparseCSC>
|
||||
else -> buildMatrix(rowNum, colNum) { i, j -> get(i, j) }
|
||||
}
|
||||
|
||||
@Suppress("UNCHECKED_CAST")
|
||||
public override fun Point<Double>.toEjml(): EjmlDoubleVector<DMatrixSparseCSC> = when {
|
||||
this is EjmlDoubleVector<*> && origin is DMatrixSparseCSC -> this as EjmlDoubleVector<DMatrixSparseCSC>
|
||||
else -> EjmlDoubleVector(DMatrixSparseCSC(size, 1).also {
|
||||
(0 until it.numRows).forEach { row -> it[row, 0] = get(row) }
|
||||
})
|
||||
}
|
||||
|
||||
public override fun buildMatrix(
|
||||
rows: Int,
|
||||
columns: Int,
|
||||
initializer: DoubleField.(i: Int, j: Int) -> Double,
|
||||
): EjmlDoubleMatrix<DMatrixSparseCSC> = DMatrixSparseCSC(rows, columns).also {
|
||||
(0 until rows).forEach { row ->
|
||||
(0 until columns).forEach { col -> it[row, col] = elementAlgebra.initializer(row, col) }
|
||||
}
|
||||
}.wrapMatrix()
|
||||
|
||||
public override fun buildVector(
|
||||
size: Int,
|
||||
initializer: DoubleField.(Int) -> Double,
|
||||
): EjmlDoubleVector<DMatrixSparseCSC> = EjmlDoubleVector(DMatrixSparseCSC(size, 1).also {
|
||||
(0 until it.numRows).forEach { row -> it[row, 0] = elementAlgebra.initializer(row) }
|
||||
})
|
||||
|
||||
private fun <T : DMatrix> T.wrapMatrix() = EjmlDoubleMatrix(this)
|
||||
private fun <T : DMatrix> T.wrapVector() = EjmlDoubleVector(this)
|
||||
|
||||
public override fun Matrix<Double>.unaryMinus(): Matrix<Double> = this * elementAlgebra { -one }
|
||||
|
||||
public override fun Matrix<Double>.dot(other: Matrix<Double>): EjmlDoubleMatrix<DMatrixSparseCSC> {
|
||||
val out = DMatrixSparseCSC(1, 1)
|
||||
CommonOps_DSCC.mult(toEjml().origin, other.toEjml().origin, out)
|
||||
return out.wrapMatrix()
|
||||
}
|
||||
|
||||
public override fun Matrix<Double>.dot(vector: Point<Double>): EjmlDoubleVector<DMatrixSparseCSC> {
|
||||
val out = DMatrixSparseCSC(1, 1)
|
||||
CommonOps_DSCC.mult(toEjml().origin, vector.toEjml().origin, out)
|
||||
return out.wrapVector()
|
||||
}
|
||||
|
||||
public override operator fun Matrix<Double>.minus(other: Matrix<Double>): EjmlDoubleMatrix<DMatrixSparseCSC> {
|
||||
val out = DMatrixSparseCSC(1, 1)
|
||||
|
||||
CommonOps_DSCC.add(
|
||||
elementAlgebra.one,
|
||||
toEjml().origin,
|
||||
elementAlgebra { -one },
|
||||
other.toEjml().origin,
|
||||
out,
|
||||
null,
|
||||
null,
|
||||
)
|
||||
|
||||
return out.wrapMatrix()
|
||||
}
|
||||
|
||||
public override operator fun Matrix<Double>.times(value: Double): EjmlDoubleMatrix<DMatrixSparseCSC> {
|
||||
val res = DMatrixSparseCSC(1, 1)
|
||||
CommonOps_DSCC.scale(value, toEjml().origin, res)
|
||||
return res.wrapMatrix()
|
||||
}
|
||||
|
||||
public override fun Point<Double>.unaryMinus(): EjmlDoubleVector<DMatrixSparseCSC> {
|
||||
val res = DMatrixSparseCSC(1, 1)
|
||||
CommonOps_DSCC.changeSign(toEjml().origin, res)
|
||||
return res.wrapVector()
|
||||
}
|
||||
|
||||
public override fun Matrix<Double>.plus(other: Matrix<Double>): EjmlDoubleMatrix<DMatrixSparseCSC> {
|
||||
val out = DMatrixSparseCSC(1, 1)
|
||||
|
||||
CommonOps_DSCC.add(
|
||||
elementAlgebra.one,
|
||||
toEjml().origin,
|
||||
elementAlgebra.one,
|
||||
other.toEjml().origin,
|
||||
out,
|
||||
null,
|
||||
null,
|
||||
)
|
||||
|
||||
return out.wrapMatrix()
|
||||
}
|
||||
|
||||
public override fun Point<Double>.plus(other: Point<Double>): EjmlDoubleVector<DMatrixSparseCSC> {
|
||||
val out = DMatrixSparseCSC(1, 1)
|
||||
|
||||
CommonOps_DSCC.add(
|
||||
elementAlgebra.one,
|
||||
toEjml().origin,
|
||||
elementAlgebra.one,
|
||||
other.toEjml().origin,
|
||||
out,
|
||||
null,
|
||||
null,
|
||||
)
|
||||
|
||||
return out.wrapVector()
|
||||
}
|
||||
|
||||
public override fun Point<Double>.minus(other: Point<Double>): EjmlDoubleVector<DMatrixSparseCSC> {
|
||||
val out = DMatrixSparseCSC(1, 1)
|
||||
|
||||
CommonOps_DSCC.add(
|
||||
elementAlgebra.one,
|
||||
toEjml().origin,
|
||||
elementAlgebra { -one },
|
||||
other.toEjml().origin,
|
||||
out,
|
||||
null,
|
||||
null,
|
||||
)
|
||||
|
||||
return out.wrapVector()
|
||||
}
|
||||
|
||||
public override fun Double.times(m: Matrix<Double>): EjmlDoubleMatrix<DMatrixSparseCSC> = m * this
|
||||
|
||||
public override fun Point<Double>.times(value: Double): EjmlDoubleVector<DMatrixSparseCSC> {
|
||||
val res = DMatrixSparseCSC(1, 1)
|
||||
CommonOps_DSCC.scale(value, toEjml().origin, res)
|
||||
return res.wrapVector()
|
||||
}
|
||||
|
||||
public override fun Double.times(v: Point<Double>): EjmlDoubleVector<DMatrixSparseCSC> = v * this
|
||||
|
||||
@UnstableKMathAPI
|
||||
public override fun <F : StructureFeature> getFeature(structure: Matrix<Double>, type: KClass<out F>): F? {
|
||||
structure.getFeature(type)?.let { return it }
|
||||
val origin = structure.toEjml().origin
|
||||
|
||||
return when (type) {
|
||||
QRDecompositionFeature::class -> object : QRDecompositionFeature<Double> {
|
||||
private val qr by lazy {
|
||||
DecompositionFactory_DSCC.qr(FillReducing.NONE).apply { decompose(origin.copy()) }
|
||||
}
|
||||
|
||||
override val q: Matrix<Double> by lazy {
|
||||
qr.getQ(null, false).wrapMatrix() + OrthogonalFeature
|
||||
}
|
||||
|
||||
override val r: Matrix<Double> by lazy { qr.getR(null, false).wrapMatrix() + UFeature }
|
||||
}
|
||||
|
||||
CholeskyDecompositionFeature::class -> object : CholeskyDecompositionFeature<Double> {
|
||||
override val l: Matrix<Double> by lazy {
|
||||
val cholesky =
|
||||
DecompositionFactory_DSCC.cholesky().apply { decompose(origin.copy()) }
|
||||
|
||||
(cholesky.getT(null) as DMatrix).wrapMatrix() + LFeature
|
||||
}
|
||||
}
|
||||
|
||||
LUDecompositionFeature::class, DeterminantFeature::class, InverseMatrixFeature::class -> object :
|
||||
LUDecompositionFeature<Double>, DeterminantFeature<Double>, InverseMatrixFeature<Double> {
|
||||
private val lu by lazy {
|
||||
DecompositionFactory_DSCC.lu(FillReducing.NONE).apply { decompose(origin.copy()) }
|
||||
}
|
||||
|
||||
override val l: Matrix<Double> by lazy {
|
||||
lu.getLower(null).wrapMatrix() + LFeature
|
||||
}
|
||||
|
||||
override val u: Matrix<Double> by lazy {
|
||||
lu.getUpper(null).wrapMatrix() + UFeature
|
||||
}
|
||||
|
||||
override val inverse: Matrix<Double> by lazy {
|
||||
var a = origin
|
||||
val inverse = DMatrixRMaj(1, 1)
|
||||
val solver = LinearSolverFactory_DSCC.lu(FillReducing.NONE)
|
||||
if (solver.modifiesA()) a = a.copy()
|
||||
val i = CommonOps_DDRM.identity(a.numRows)
|
||||
solver.solve(i, inverse)
|
||||
inverse.wrapMatrix()
|
||||
}
|
||||
|
||||
override val determinant: Double by lazy { elementAlgebra.number(lu.computeDeterminant().real) }
|
||||
}
|
||||
|
||||
else -> null
|
||||
}?.let(type::cast)
|
||||
}
|
||||
|
||||
/**
|
||||
* Solves for *x* in the following equation: *x = [a] <sup>-1</sup> · [b]*.
|
||||
*
|
||||
* @param a the base matrix.
|
||||
* @param b n by p matrix.
|
||||
* @return the solution for *x* that is n by p.
|
||||
*/
|
||||
public fun solve(a: Matrix<Double>, b: Matrix<Double>): EjmlDoubleMatrix<DMatrixSparseCSC> {
|
||||
val res = DMatrixSparseCSC(1, 1)
|
||||
CommonOps_DSCC.solve(DMatrixSparseCSC(a.toEjml().origin), DMatrixSparseCSC(b.toEjml().origin), res)
|
||||
return res.wrapMatrix()
|
||||
}
|
||||
|
||||
/**
|
||||
* Solves for *x* in the following equation: *x = [a] <sup>-1</sup> · [b]*.
|
||||
*
|
||||
* @param a the base matrix.
|
||||
* @param b n by p vector.
|
||||
* @return the solution for *x* that is n by p.
|
||||
*/
|
||||
public fun solve(a: Matrix<Double>, b: Point<Double>): EjmlDoubleVector<DMatrixSparseCSC> {
|
||||
val res = DMatrixSparseCSC(1, 1)
|
||||
CommonOps_DSCC.solve(DMatrixSparseCSC(a.toEjml().origin), DMatrixSparseCSC(b.toEjml().origin), res)
|
||||
return EjmlDoubleVector(res)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* [EjmlLinearSpace] implementation based on [CommonOps_FSCC], [DecompositionFactory_FSCC] operations and
|
||||
* [FMatrixSparseCSC] matrices.
|
||||
*/
|
||||
public object EjmlLinearSpaceFSCC : EjmlLinearSpace<Float, FloatField, FMatrixSparseCSC>() {
|
||||
/**
|
||||
* The [FloatField] reference.
|
||||
*/
|
||||
public override val elementAlgebra: FloatField get() = FloatField
|
||||
|
||||
@Suppress("UNCHECKED_CAST")
|
||||
public override fun Matrix<Float>.toEjml(): EjmlFloatMatrix<FMatrixSparseCSC> = when {
|
||||
this is EjmlFloatMatrix<*> && origin is FMatrixSparseCSC -> this as EjmlFloatMatrix<FMatrixSparseCSC>
|
||||
else -> buildMatrix(rowNum, colNum) { i, j -> get(i, j) }
|
||||
}
|
||||
|
||||
@Suppress("UNCHECKED_CAST")
|
||||
public override fun Point<Float>.toEjml(): EjmlFloatVector<FMatrixSparseCSC> = when {
|
||||
this is EjmlFloatVector<*> && origin is FMatrixSparseCSC -> this as EjmlFloatVector<FMatrixSparseCSC>
|
||||
else -> EjmlFloatVector(FMatrixSparseCSC(size, 1).also {
|
||||
(0 until it.numRows).forEach { row -> it[row, 0] = get(row) }
|
||||
})
|
||||
}
|
||||
|
||||
public override fun buildMatrix(
|
||||
rows: Int,
|
||||
columns: Int,
|
||||
initializer: FloatField.(i: Int, j: Int) -> Float,
|
||||
): EjmlFloatMatrix<FMatrixSparseCSC> = FMatrixSparseCSC(rows, columns).also {
|
||||
(0 until rows).forEach { row ->
|
||||
(0 until columns).forEach { col -> it[row, col] = elementAlgebra.initializer(row, col) }
|
||||
}
|
||||
}.wrapMatrix()
|
||||
|
||||
public override fun buildVector(
|
||||
size: Int,
|
||||
initializer: FloatField.(Int) -> Float,
|
||||
): EjmlFloatVector<FMatrixSparseCSC> = EjmlFloatVector(FMatrixSparseCSC(size, 1).also {
|
||||
(0 until it.numRows).forEach { row -> it[row, 0] = elementAlgebra.initializer(row) }
|
||||
})
|
||||
|
||||
private fun <T : FMatrix> T.wrapMatrix() = EjmlFloatMatrix(this)
|
||||
private fun <T : FMatrix> T.wrapVector() = EjmlFloatVector(this)
|
||||
|
||||
public override fun Matrix<Float>.unaryMinus(): Matrix<Float> = this * elementAlgebra { -one }
|
||||
|
||||
public override fun Matrix<Float>.dot(other: Matrix<Float>): EjmlFloatMatrix<FMatrixSparseCSC> {
|
||||
val out = FMatrixSparseCSC(1, 1)
|
||||
CommonOps_FSCC.mult(toEjml().origin, other.toEjml().origin, out)
|
||||
return out.wrapMatrix()
|
||||
}
|
||||
|
||||
public override fun Matrix<Float>.dot(vector: Point<Float>): EjmlFloatVector<FMatrixSparseCSC> {
|
||||
val out = FMatrixSparseCSC(1, 1)
|
||||
CommonOps_FSCC.mult(toEjml().origin, vector.toEjml().origin, out)
|
||||
return out.wrapVector()
|
||||
}
|
||||
|
||||
public override operator fun Matrix<Float>.minus(other: Matrix<Float>): EjmlFloatMatrix<FMatrixSparseCSC> {
|
||||
val out = FMatrixSparseCSC(1, 1)
|
||||
|
||||
CommonOps_FSCC.add(
|
||||
elementAlgebra.one,
|
||||
toEjml().origin,
|
||||
elementAlgebra { -one },
|
||||
other.toEjml().origin,
|
||||
out,
|
||||
null,
|
||||
null,
|
||||
)
|
||||
|
||||
return out.wrapMatrix()
|
||||
}
|
||||
|
||||
public override operator fun Matrix<Float>.times(value: Float): EjmlFloatMatrix<FMatrixSparseCSC> {
|
||||
val res = FMatrixSparseCSC(1, 1)
|
||||
CommonOps_FSCC.scale(value, toEjml().origin, res)
|
||||
return res.wrapMatrix()
|
||||
}
|
||||
|
||||
public override fun Point<Float>.unaryMinus(): EjmlFloatVector<FMatrixSparseCSC> {
|
||||
val res = FMatrixSparseCSC(1, 1)
|
||||
CommonOps_FSCC.changeSign(toEjml().origin, res)
|
||||
return res.wrapVector()
|
||||
}
|
||||
|
||||
public override fun Matrix<Float>.plus(other: Matrix<Float>): EjmlFloatMatrix<FMatrixSparseCSC> {
|
||||
val out = FMatrixSparseCSC(1, 1)
|
||||
|
||||
CommonOps_FSCC.add(
|
||||
elementAlgebra.one,
|
||||
toEjml().origin,
|
||||
elementAlgebra.one,
|
||||
other.toEjml().origin,
|
||||
out,
|
||||
null,
|
||||
null,
|
||||
)
|
||||
|
||||
return out.wrapMatrix()
|
||||
}
|
||||
|
||||
public override fun Point<Float>.plus(other: Point<Float>): EjmlFloatVector<FMatrixSparseCSC> {
|
||||
val out = FMatrixSparseCSC(1, 1)
|
||||
|
||||
CommonOps_FSCC.add(
|
||||
elementAlgebra.one,
|
||||
toEjml().origin,
|
||||
elementAlgebra.one,
|
||||
other.toEjml().origin,
|
||||
out,
|
||||
null,
|
||||
null,
|
||||
)
|
||||
|
||||
return out.wrapVector()
|
||||
}
|
||||
|
||||
public override fun Point<Float>.minus(other: Point<Float>): EjmlFloatVector<FMatrixSparseCSC> {
|
||||
val out = FMatrixSparseCSC(1, 1)
|
||||
|
||||
CommonOps_FSCC.add(
|
||||
elementAlgebra.one,
|
||||
toEjml().origin,
|
||||
elementAlgebra { -one },
|
||||
other.toEjml().origin,
|
||||
out,
|
||||
null,
|
||||
null,
|
||||
)
|
||||
|
||||
return out.wrapVector()
|
||||
}
|
||||
|
||||
public override fun Float.times(m: Matrix<Float>): EjmlFloatMatrix<FMatrixSparseCSC> = m * this
|
||||
|
||||
public override fun Point<Float>.times(value: Float): EjmlFloatVector<FMatrixSparseCSC> {
|
||||
val res = FMatrixSparseCSC(1, 1)
|
||||
CommonOps_FSCC.scale(value, toEjml().origin, res)
|
||||
return res.wrapVector()
|
||||
}
|
||||
|
||||
public override fun Float.times(v: Point<Float>): EjmlFloatVector<FMatrixSparseCSC> = v * this
|
||||
|
||||
@UnstableKMathAPI
|
||||
public override fun <F : StructureFeature> getFeature(structure: Matrix<Float>, type: KClass<out F>): F? {
|
||||
structure.getFeature(type)?.let { return it }
|
||||
val origin = structure.toEjml().origin
|
||||
|
||||
return when (type) {
|
||||
QRDecompositionFeature::class -> object : QRDecompositionFeature<Float> {
|
||||
private val qr by lazy {
|
||||
DecompositionFactory_FSCC.qr(FillReducing.NONE).apply { decompose(origin.copy()) }
|
||||
}
|
||||
|
||||
override val q: Matrix<Float> by lazy {
|
||||
qr.getQ(null, false).wrapMatrix() + OrthogonalFeature
|
||||
}
|
||||
|
||||
override val r: Matrix<Float> by lazy { qr.getR(null, false).wrapMatrix() + UFeature }
|
||||
}
|
||||
|
||||
CholeskyDecompositionFeature::class -> object : CholeskyDecompositionFeature<Float> {
|
||||
override val l: Matrix<Float> by lazy {
|
||||
val cholesky =
|
||||
DecompositionFactory_FSCC.cholesky().apply { decompose(origin.copy()) }
|
||||
|
||||
(cholesky.getT(null) as FMatrix).wrapMatrix() + LFeature
|
||||
}
|
||||
}
|
||||
|
||||
LUDecompositionFeature::class, DeterminantFeature::class, InverseMatrixFeature::class -> object :
|
||||
LUDecompositionFeature<Float>, DeterminantFeature<Float>, InverseMatrixFeature<Float> {
|
||||
private val lu by lazy {
|
||||
DecompositionFactory_FSCC.lu(FillReducing.NONE).apply { decompose(origin.copy()) }
|
||||
}
|
||||
|
||||
override val l: Matrix<Float> by lazy {
|
||||
lu.getLower(null).wrapMatrix() + LFeature
|
||||
}
|
||||
|
||||
override val u: Matrix<Float> by lazy {
|
||||
lu.getUpper(null).wrapMatrix() + UFeature
|
||||
}
|
||||
|
||||
override val inverse: Matrix<Float> by lazy {
|
||||
var a = origin
|
||||
val inverse = FMatrixRMaj(1, 1)
|
||||
val solver = LinearSolverFactory_FSCC.lu(FillReducing.NONE)
|
||||
if (solver.modifiesA()) a = a.copy()
|
||||
val i = CommonOps_FDRM.identity(a.numRows)
|
||||
solver.solve(i, inverse)
|
||||
inverse.wrapMatrix()
|
||||
}
|
||||
|
||||
override val determinant: Float by lazy { elementAlgebra.number(lu.computeDeterminant().real) }
|
||||
}
|
||||
|
||||
else -> null
|
||||
}?.let(type::cast)
|
||||
}
|
||||
|
||||
/**
|
||||
* Solves for *x* in the following equation: *x = [a] <sup>-1</sup> · [b]*.
|
||||
*
|
||||
* @param a the base matrix.
|
||||
* @param b n by p matrix.
|
||||
* @return the solution for *x* that is n by p.
|
||||
*/
|
||||
public fun solve(a: Matrix<Float>, b: Matrix<Float>): EjmlFloatMatrix<FMatrixSparseCSC> {
|
||||
val res = FMatrixSparseCSC(1, 1)
|
||||
CommonOps_FSCC.solve(FMatrixSparseCSC(a.toEjml().origin), FMatrixSparseCSC(b.toEjml().origin), res)
|
||||
return res.wrapMatrix()
|
||||
}
|
||||
|
||||
/**
|
||||
* Solves for *x* in the following equation: *x = [a] <sup>-1</sup> · [b]*.
|
||||
*
|
||||
* @param a the base matrix.
|
||||
* @param b n by p vector.
|
||||
* @return the solution for *x* that is n by p.
|
||||
*/
|
||||
public fun solve(a: Matrix<Float>, b: Point<Float>): EjmlFloatVector<FMatrixSparseCSC> {
|
||||
val res = FMatrixSparseCSC(1, 1)
|
||||
CommonOps_FSCC.solve(FMatrixSparseCSC(a.toEjml().origin), FMatrixSparseCSC(b.toEjml().origin), res)
|
||||
return EjmlFloatVector(res)
|
||||
}
|
||||
}
|
||||
|
@ -11,7 +11,7 @@ import org.ejml.dense.row.RandomMatrices_DDRM
|
||||
import org.ejml.dense.row.factory.DecompositionFactory_DDRM
|
||||
import space.kscience.kmath.linear.DeterminantFeature
|
||||
import space.kscience.kmath.linear.LupDecompositionFeature
|
||||
import space.kscience.kmath.linear.getFeature
|
||||
import space.kscience.kmath.linear.computeFeature
|
||||
import space.kscience.kmath.misc.PerformancePitfall
|
||||
import space.kscience.kmath.misc.UnstableKMathAPI
|
||||
import space.kscience.kmath.nd.StructureND
|
||||
@ -59,9 +59,9 @@ internal class EjmlMatrixTest {
|
||||
fun features() {
|
||||
val m = randomMatrix
|
||||
val w = EjmlDoubleMatrix(m)
|
||||
val det: DeterminantFeature<Double> = EjmlLinearSpaceDDRM.getFeature(w) ?: fail()
|
||||
val det: DeterminantFeature<Double> = EjmlLinearSpaceDDRM.computeFeature(w) ?: fail()
|
||||
assertEquals(CommonOps_DDRM.det(m), det.determinant)
|
||||
val lup: LupDecompositionFeature<Double> = EjmlLinearSpaceDDRM.getFeature(w) ?: fail()
|
||||
val lup: LupDecompositionFeature<Double> = EjmlLinearSpaceDDRM.computeFeature(w) ?: fail()
|
||||
|
||||
val ludecompositionF64 = DecompositionFactory_DDRM.lu(m.numRows, m.numCols)
|
||||
.also { it.decompose(m.copy()) }
|
||||
|
@ -32,18 +32,18 @@ import kotlin.math.pow
|
||||
public typealias RealMatrix = Matrix<Double>
|
||||
|
||||
public fun realMatrix(rowNum: Int, colNum: Int, initializer: DoubleField.(i: Int, j: Int) -> Double): RealMatrix =
|
||||
LinearSpace.real.buildMatrix(rowNum, colNum, initializer)
|
||||
LinearSpace.double.buildMatrix(rowNum, colNum, initializer)
|
||||
|
||||
@OptIn(UnstableKMathAPI::class)
|
||||
public fun realMatrix(rowNum: Int, colNum: Int): MatrixBuilder<Double, DoubleField> =
|
||||
LinearSpace.real.matrix(rowNum, colNum)
|
||||
LinearSpace.double.matrix(rowNum, colNum)
|
||||
|
||||
public fun Array<DoubleArray>.toMatrix(): RealMatrix {
|
||||
return LinearSpace.real.buildMatrix(size, this[0].size) { row, col -> this@toMatrix[row][col] }
|
||||
return LinearSpace.double.buildMatrix(size, this[0].size) { row, col -> this@toMatrix[row][col] }
|
||||
}
|
||||
|
||||
public fun Sequence<DoubleArray>.toMatrix(): RealMatrix = toList().let {
|
||||
LinearSpace.real.buildMatrix(it.size, it[0].size) { row, col -> it[row][col] }
|
||||
LinearSpace.double.buildMatrix(it.size, it[0].size) { row, col -> it[row][col] }
|
||||
}
|
||||
|
||||
public fun RealMatrix.repeatStackVertical(n: Int): RealMatrix =
|
||||
@ -56,37 +56,37 @@ public fun RealMatrix.repeatStackVertical(n: Int): RealMatrix =
|
||||
*/
|
||||
|
||||
public operator fun RealMatrix.times(double: Double): RealMatrix =
|
||||
LinearSpace.real.buildMatrix(rowNum, colNum) { row, col ->
|
||||
LinearSpace.double.buildMatrix(rowNum, colNum) { row, col ->
|
||||
get(row, col) * double
|
||||
}
|
||||
|
||||
public operator fun RealMatrix.plus(double: Double): RealMatrix =
|
||||
LinearSpace.real.buildMatrix(rowNum, colNum) { row, col ->
|
||||
LinearSpace.double.buildMatrix(rowNum, colNum) { row, col ->
|
||||
get(row, col) + double
|
||||
}
|
||||
|
||||
public operator fun RealMatrix.minus(double: Double): RealMatrix =
|
||||
LinearSpace.real.buildMatrix(rowNum, colNum) { row, col ->
|
||||
LinearSpace.double.buildMatrix(rowNum, colNum) { row, col ->
|
||||
get(row, col) - double
|
||||
}
|
||||
|
||||
public operator fun RealMatrix.div(double: Double): RealMatrix =
|
||||
LinearSpace.real.buildMatrix(rowNum, colNum) { row, col ->
|
||||
LinearSpace.double.buildMatrix(rowNum, colNum) { row, col ->
|
||||
get(row, col) / double
|
||||
}
|
||||
|
||||
public operator fun Double.times(matrix: RealMatrix): RealMatrix =
|
||||
LinearSpace.real.buildMatrix(matrix.rowNum, matrix.colNum) { row, col ->
|
||||
LinearSpace.double.buildMatrix(matrix.rowNum, matrix.colNum) { row, col ->
|
||||
this@times * matrix[row, col]
|
||||
}
|
||||
|
||||
public operator fun Double.plus(matrix: RealMatrix): RealMatrix =
|
||||
LinearSpace.real.buildMatrix(matrix.rowNum, matrix.colNum) { row, col ->
|
||||
LinearSpace.double.buildMatrix(matrix.rowNum, matrix.colNum) { row, col ->
|
||||
this@plus + matrix[row, col]
|
||||
}
|
||||
|
||||
public operator fun Double.minus(matrix: RealMatrix): RealMatrix =
|
||||
LinearSpace.real.buildMatrix(matrix.rowNum, matrix.colNum) { row, col ->
|
||||
LinearSpace.double.buildMatrix(matrix.rowNum, matrix.colNum) { row, col ->
|
||||
this@minus - matrix[row, col]
|
||||
}
|
||||
|
||||
@ -101,20 +101,20 @@ public operator fun Double.minus(matrix: RealMatrix): RealMatrix =
|
||||
|
||||
@UnstableKMathAPI
|
||||
public operator fun RealMatrix.times(other: RealMatrix): RealMatrix =
|
||||
LinearSpace.real.buildMatrix(rowNum, colNum) { row, col -> this@times[row, col] * other[row, col] }
|
||||
LinearSpace.double.buildMatrix(rowNum, colNum) { row, col -> this@times[row, col] * other[row, col] }
|
||||
|
||||
public operator fun RealMatrix.plus(other: RealMatrix): RealMatrix =
|
||||
LinearSpace.real.run { this@plus + other }
|
||||
LinearSpace.double.run { this@plus + other }
|
||||
|
||||
public operator fun RealMatrix.minus(other: RealMatrix): RealMatrix =
|
||||
LinearSpace.real.buildMatrix(rowNum, colNum) { row, col -> this@minus[row, col] - other[row, col] }
|
||||
LinearSpace.double.buildMatrix(rowNum, colNum) { row, col -> this@minus[row, col] - other[row, col] }
|
||||
|
||||
/*
|
||||
* Operations on columns
|
||||
*/
|
||||
|
||||
public inline fun RealMatrix.appendColumn(crossinline mapper: (Buffer<Double>) -> Double): RealMatrix =
|
||||
LinearSpace.real.buildMatrix(rowNum, colNum + 1) { row, col ->
|
||||
LinearSpace.double.buildMatrix(rowNum, colNum + 1) { row, col ->
|
||||
if (col < colNum)
|
||||
get(row, col)
|
||||
else
|
||||
@ -122,7 +122,7 @@ public inline fun RealMatrix.appendColumn(crossinline mapper: (Buffer<Double>) -
|
||||
}
|
||||
|
||||
public fun RealMatrix.extractColumns(columnRange: IntRange): RealMatrix =
|
||||
LinearSpace.real.buildMatrix(rowNum, columnRange.count()) { row, col ->
|
||||
LinearSpace.double.buildMatrix(rowNum, columnRange.count()) { row, col ->
|
||||
this@extractColumns[row, columnRange.first + col]
|
||||
}
|
||||
|
||||
@ -155,14 +155,14 @@ public fun RealMatrix.max(): Double? = elements().map { (_, value) -> value }.ma
|
||||
public fun RealMatrix.average(): Double = elements().map { (_, value) -> value }.average()
|
||||
|
||||
public inline fun RealMatrix.map(crossinline transform: (Double) -> Double): RealMatrix =
|
||||
LinearSpace.real.buildMatrix(rowNum, colNum) { i, j ->
|
||||
LinearSpace.double.buildMatrix(rowNum, colNum) { i, j ->
|
||||
transform(get(i, j))
|
||||
}
|
||||
|
||||
/**
|
||||
* Inverse a square real matrix using LUP decomposition
|
||||
*/
|
||||
public fun RealMatrix.inverseWithLup(): RealMatrix = LinearSpace.real.inverseWithLup(this)
|
||||
public fun RealMatrix.inverseWithLup(): RealMatrix = LinearSpace.double.lupSolver().inverse(this)
|
||||
|
||||
//extended operations
|
||||
|
||||
|
@ -12,6 +12,6 @@ import space.kscience.kmath.linear.Matrix
|
||||
/**
|
||||
* Optimized dot product for real matrices
|
||||
*/
|
||||
public infix fun Matrix<Double>.dot(other: Matrix<Double>): Matrix<Double> = LinearSpace.real.run {
|
||||
public infix fun Matrix<Double>.dot(other: Matrix<Double>): Matrix<Double> = LinearSpace.double.run {
|
||||
this@dot dot other
|
||||
}
|
@ -65,7 +65,7 @@ internal class DoubleMatrixTest {
|
||||
4.0, 6.0, 2.0
|
||||
)
|
||||
val matrix2 = (matrix1 * 2.5 + 1.0 - 2.0) / 2.0
|
||||
val expectedResult = LinearSpace.real.matrix(2, 3)(
|
||||
val expectedResult = LinearSpace.double.matrix(2, 3)(
|
||||
0.75, -0.5, 3.25,
|
||||
4.5, 7.0, 2.0
|
||||
)
|
||||
@ -160,7 +160,7 @@ internal class DoubleMatrixTest {
|
||||
|
||||
@Test
|
||||
fun testAllElementOperations() {
|
||||
val matrix1 = LinearSpace.real.matrix(2, 4)(
|
||||
val matrix1 = LinearSpace.double.matrix(2, 4)(
|
||||
-1.0, 0.0, 3.0, 15.0,
|
||||
4.0, -6.0, 7.0, -11.0
|
||||
)
|
||||
|
@ -35,7 +35,7 @@ internal class DoubleVectorTest {
|
||||
val vector2 = DoubleBuffer(5) { 5 - it.toDouble() }
|
||||
val matrix1 = vector1.asMatrix()
|
||||
val matrix2 = vector2.asMatrix().transpose()
|
||||
val product = LinearSpace.real.run { matrix1 dot matrix2 }
|
||||
val product = LinearSpace.double.run { matrix1 dot matrix2 }
|
||||
assertEquals(5.0, product[1, 0])
|
||||
assertEquals(6.0, product[2, 2])
|
||||
}
|
||||
|
@ -5,11 +5,12 @@
|
||||
|
||||
package space.kscience.kmath.integration
|
||||
|
||||
import space.kscience.kmath.misc.Feature
|
||||
import space.kscience.kmath.misc.FeatureSet
|
||||
import space.kscience.kmath.misc.Featured
|
||||
import kotlin.reflect.KClass
|
||||
|
||||
public interface IntegrandFeature {
|
||||
public interface IntegrandFeature : Feature<IntegrandFeature> {
|
||||
override fun toString(): String
|
||||
}
|
||||
|
||||
@ -18,7 +19,7 @@ public interface Integrand : Featured<IntegrandFeature> {
|
||||
override fun <T : IntegrandFeature> getFeature(type: KClass<out T>): T? = features.getFeature(type)
|
||||
}
|
||||
|
||||
public inline fun <reified T: IntegrandFeature> Integrand.getFeature(): T? = getFeature(T::class)
|
||||
public inline fun <reified T : IntegrandFeature> Integrand.getFeature(): T? = getFeature(T::class)
|
||||
|
||||
public class IntegrandValue<T : Any>(public val value: T) : IntegrandFeature {
|
||||
override fun toString(): String = "Value($value)"
|
||||
|
@ -27,19 +27,30 @@ public class KotlingradExpression<T : Number, A : NumericAlgebra<T>>(
|
||||
) : SpecialDifferentiableExpression<T, KotlingradExpression<T, A>> {
|
||||
public override fun invoke(arguments: Map<Symbol, T>): T = mst.interpret(algebra, arguments)
|
||||
|
||||
public override fun derivativeOrNull(symbols: List<Symbol>): KotlingradExpression<T, A> =
|
||||
KotlingradExpression(
|
||||
algebra,
|
||||
symbols.map(Symbol::identity)
|
||||
.map(MstNumericAlgebra::bindSymbol)
|
||||
.map<Symbol, SVar<KMathNumber<T, A>>>(Symbol::toSVar)
|
||||
.fold(mst.toSFun(), SFun<KMathNumber<T, A>>::d)
|
||||
.toMst(),
|
||||
)
|
||||
public override fun derivativeOrNull(
|
||||
symbols: List<Symbol>,
|
||||
): KotlingradExpression<T, A> = KotlingradExpression(
|
||||
algebra,
|
||||
symbols.map(Symbol::identity)
|
||||
.map(MstNumericAlgebra::bindSymbol)
|
||||
.map<Symbol, SVar<KMathNumber<T, A>>>(Symbol::toSVar)
|
||||
.fold(mst.toSFun(), SFun<KMathNumber<T, A>>::d)
|
||||
.toMst(),
|
||||
)
|
||||
}
|
||||
|
||||
/**
|
||||
* A diff processor using [MST] to Kotlingrad converter
|
||||
*/
|
||||
public class KotlingradProcessor<T : Number, A : NumericAlgebra<T>>(
|
||||
public val algebra: A,
|
||||
) : AutoDiffProcessor<T, MST, MstExtendedField> {
|
||||
override fun differentiate(function: MstExtendedField.() -> MST): DifferentiableExpression<T> =
|
||||
MstExtendedField.function().toKotlingradExpression(algebra)
|
||||
}
|
||||
|
||||
/**
|
||||
* Wraps this [MST] into [KotlingradExpression].
|
||||
*/
|
||||
public fun <T : Number, A : NumericAlgebra<T>> MST.toDiffExpression(algebra: A): KotlingradExpression<T, A> =
|
||||
public fun <T : Number, A : NumericAlgebra<T>> MST.toKotlingradExpression(algebra: A): KotlingradExpression<T, A> =
|
||||
KotlingradExpression(algebra, this)
|
||||
|
@ -23,7 +23,7 @@ public enum class FunctionOptimizationTarget : OptimizationFeature {
|
||||
public class FunctionOptimization<T>(
|
||||
override val features: FeatureSet<OptimizationFeature>,
|
||||
public val expression: DifferentiableExpression<T>,
|
||||
) : OptimizationProblem{
|
||||
) : OptimizationProblem<T>{
|
||||
|
||||
public companion object{
|
||||
/**
|
||||
@ -56,7 +56,6 @@ public class FunctionOptimization<T>(
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
public fun <T> FunctionOptimization<T>.withFeatures(
|
||||
vararg newFeature: OptimizationFeature,
|
||||
): FunctionOptimization<T> = FunctionOptimization(
|
||||
@ -68,7 +67,7 @@ public fun <T> FunctionOptimization<T>.withFeatures(
|
||||
* Optimize differentiable expression using specific [optimizer] form given [startingPoint]
|
||||
*/
|
||||
public suspend fun <T : Any> DifferentiableExpression<T>.optimizeWith(
|
||||
optimizer: Optimizer<FunctionOptimization<T>>,
|
||||
optimizer: Optimizer<T, FunctionOptimization<T>>,
|
||||
startingPoint: Map<Symbol, T>,
|
||||
vararg features: OptimizationFeature,
|
||||
): FunctionOptimization<T> {
|
||||
@ -76,3 +75,8 @@ public suspend fun <T : Any> DifferentiableExpression<T>.optimizeWith(
|
||||
return optimizer.optimize(problem)
|
||||
}
|
||||
|
||||
public val <T> FunctionOptimization<T>.resultValueOrNull:T?
|
||||
get() = getFeature<OptimizationResult<T>>()?.point?.let { expression(it) }
|
||||
|
||||
public val <T> FunctionOptimization<T>.resultValue: T
|
||||
get() = resultValueOrNull ?: error("Result is not present in $this")
|
@ -0,0 +1,93 @@
|
||||
/*
|
||||
* Copyright 2018-2021 KMath contributors.
|
||||
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
|
||||
*/
|
||||
|
||||
package space.kscience.kmath.optimization
|
||||
|
||||
import space.kscience.kmath.data.XYColumnarData
|
||||
import space.kscience.kmath.expressions.DifferentiableExpression
|
||||
import space.kscience.kmath.expressions.Symbol
|
||||
import space.kscience.kmath.misc.FeatureSet
|
||||
|
||||
public abstract class OptimizationBuilder<T, R : OptimizationProblem<T>> {
|
||||
public val features: ArrayList<OptimizationFeature> = ArrayList()
|
||||
|
||||
public fun addFeature(feature: OptimizationFeature) {
|
||||
features.add(feature)
|
||||
}
|
||||
|
||||
public inline fun <reified T : OptimizationFeature> updateFeature(update: (T?) -> T) {
|
||||
val existing = features.find { it.key == T::class } as? T
|
||||
val new = update(existing)
|
||||
if (existing != null) {
|
||||
features.remove(existing)
|
||||
}
|
||||
addFeature(new)
|
||||
}
|
||||
|
||||
public abstract fun build(): R
|
||||
}
|
||||
|
||||
public fun <T> OptimizationBuilder<T, *>.startAt(startingPoint: Map<Symbol, T>) {
|
||||
addFeature(OptimizationStartPoint(startingPoint))
|
||||
}
|
||||
|
||||
public class FunctionOptimizationBuilder<T>(
|
||||
private val expression: DifferentiableExpression<T>,
|
||||
) : OptimizationBuilder<T, FunctionOptimization<T>>() {
|
||||
override fun build(): FunctionOptimization<T> = FunctionOptimization(FeatureSet.of(features), expression)
|
||||
}
|
||||
|
||||
public fun <T> FunctionOptimization(
|
||||
expression: DifferentiableExpression<T>,
|
||||
builder: FunctionOptimizationBuilder<T>.() -> Unit,
|
||||
): FunctionOptimization<T> = FunctionOptimizationBuilder(expression).apply(builder).build()
|
||||
|
||||
public suspend fun <T> DifferentiableExpression<T>.optimizeWith(
|
||||
optimizer: Optimizer<T, FunctionOptimization<T>>,
|
||||
startingPoint: Map<Symbol, T>,
|
||||
builder: FunctionOptimizationBuilder<T>.() -> Unit = {},
|
||||
): FunctionOptimization<T> {
|
||||
val problem = FunctionOptimization<T>(this) {
|
||||
startAt(startingPoint)
|
||||
builder()
|
||||
}
|
||||
return optimizer.optimize(problem)
|
||||
}
|
||||
|
||||
public suspend fun <T> DifferentiableExpression<T>.optimizeWith(
|
||||
optimizer: Optimizer<T, FunctionOptimization<T>>,
|
||||
vararg startingPoint: Pair<Symbol, T>,
|
||||
builder: FunctionOptimizationBuilder<T>.() -> Unit = {},
|
||||
): FunctionOptimization<T> {
|
||||
val problem = FunctionOptimization<T>(this) {
|
||||
startAt(mapOf(*startingPoint))
|
||||
builder()
|
||||
}
|
||||
return optimizer.optimize(problem)
|
||||
}
|
||||
|
||||
|
||||
public class XYOptimizationBuilder(
|
||||
public val data: XYColumnarData<Double, Double, Double>,
|
||||
public val model: DifferentiableExpression<Double>,
|
||||
) : OptimizationBuilder<Double, XYOptimization>() {
|
||||
|
||||
public var pointToCurveDistance: PointToCurveDistance = PointToCurveDistance.byY
|
||||
public var pointWeight: PointWeight = PointWeight.byYSigma
|
||||
|
||||
override fun build(): XYOptimization = XYOptimization(
|
||||
FeatureSet.of(features),
|
||||
data,
|
||||
model,
|
||||
pointToCurveDistance,
|
||||
pointWeight
|
||||
)
|
||||
}
|
||||
|
||||
public fun XYOptimization(
|
||||
data: XYColumnarData<Double, Double, Double>,
|
||||
model: DifferentiableExpression<Double>,
|
||||
builder: XYOptimizationBuilder.() -> Unit,
|
||||
): XYOptimization = XYOptimizationBuilder(data, model).apply(builder).build()
|
@ -5,37 +5,61 @@
|
||||
|
||||
package space.kscience.kmath.optimization
|
||||
|
||||
import space.kscience.kmath.expressions.DifferentiableExpression
|
||||
import space.kscience.kmath.expressions.Symbol
|
||||
import space.kscience.kmath.misc.FeatureSet
|
||||
import space.kscience.kmath.misc.Featured
|
||||
import space.kscience.kmath.misc.Loggable
|
||||
import space.kscience.kmath.linear.Matrix
|
||||
import space.kscience.kmath.misc.*
|
||||
import kotlin.reflect.KClass
|
||||
|
||||
public interface OptimizationFeature {
|
||||
public interface OptimizationFeature : Feature<OptimizationFeature> {
|
||||
// enforce toString override
|
||||
override fun toString(): String
|
||||
}
|
||||
|
||||
public interface OptimizationProblem : Featured<OptimizationFeature> {
|
||||
public interface OptimizationProblem<T> : Featured<OptimizationFeature> {
|
||||
public val features: FeatureSet<OptimizationFeature>
|
||||
override fun <T : OptimizationFeature> getFeature(type: KClass<out T>): T? = features.getFeature(type)
|
||||
override fun <F : OptimizationFeature> getFeature(type: KClass<out F>): F? = features.getFeature(type)
|
||||
}
|
||||
|
||||
public inline fun <reified T : OptimizationFeature> OptimizationProblem.getFeature(): T? = getFeature(T::class)
|
||||
public inline fun <reified F : OptimizationFeature> OptimizationProblem<*>.getFeature(): F? = getFeature(F::class)
|
||||
|
||||
public open class OptimizationStartPoint<T>(public val point: Map<Symbol, T>) : OptimizationFeature {
|
||||
override fun toString(): String = "StartPoint($point)"
|
||||
}
|
||||
|
||||
|
||||
public interface OptimizationPrior<T> : OptimizationFeature, DifferentiableExpression<T> {
|
||||
override val key: FeatureKey<OptimizationFeature> get() = OptimizationPrior::class
|
||||
}
|
||||
|
||||
public class OptimizationCovariance<T>(public val covariance: Matrix<T>) : OptimizationFeature {
|
||||
override fun toString(): String = "Covariance($covariance)"
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the starting point for optimization. Throws error if not defined.
|
||||
*/
|
||||
public val <T> OptimizationProblem<T>.startPoint: Map<Symbol, T>
|
||||
get() = getFeature<OptimizationStartPoint<T>>()?.point
|
||||
?: error("Starting point not defined in $this")
|
||||
|
||||
public open class OptimizationResult<T>(public val point: Map<Symbol, T>) : OptimizationFeature {
|
||||
override fun toString(): String = "Result($point)"
|
||||
}
|
||||
|
||||
public val <T> OptimizationProblem<T>.resultPointOrNull: Map<Symbol, T>?
|
||||
get() = getFeature<OptimizationResult<T>>()?.point
|
||||
|
||||
public val <T> OptimizationProblem<T>.resultPoint: Map<Symbol, T>
|
||||
get() = resultPointOrNull ?: error("Result is not present in $this")
|
||||
|
||||
public class OptimizationLog(private val loggable: Loggable) : Loggable by loggable, OptimizationFeature {
|
||||
override fun toString(): String = "Log($loggable)"
|
||||
}
|
||||
|
||||
public class OptimizationParameters(public val symbols: List<Symbol>): OptimizationFeature{
|
||||
public class OptimizationParameters(public val symbols: List<Symbol>) : OptimizationFeature {
|
||||
public constructor(vararg symbols: Symbol) : this(listOf(*symbols))
|
||||
|
||||
override fun toString(): String = "Parameters($symbols)"
|
||||
}
|
||||
|
||||
|
@ -5,6 +5,6 @@
|
||||
|
||||
package space.kscience.kmath.optimization
|
||||
|
||||
public interface Optimizer<P : OptimizationProblem> {
|
||||
public interface Optimizer<T, P : OptimizationProblem<T>> {
|
||||
public suspend fun optimize(problem: P): P
|
||||
}
|
@ -0,0 +1,247 @@
|
||||
/*
|
||||
* Copyright 2018-2021 KMath contributors.
|
||||
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
|
||||
*/
|
||||
|
||||
package space.kscience.kmath.optimization
|
||||
|
||||
import space.kscience.kmath.expressions.DifferentiableExpression
|
||||
import space.kscience.kmath.expressions.Symbol
|
||||
import space.kscience.kmath.expressions.SymbolIndexer
|
||||
import space.kscience.kmath.expressions.derivative
|
||||
import space.kscience.kmath.linear.*
|
||||
import space.kscience.kmath.misc.UnstableKMathAPI
|
||||
import space.kscience.kmath.operations.DoubleField
|
||||
import space.kscience.kmath.structures.DoubleBuffer
|
||||
import space.kscience.kmath.structures.DoubleL2Norm
|
||||
|
||||
|
||||
/**
|
||||
* An optimizer based onf Fyodor Tkachev's quasi-optimal weights method.
|
||||
* See [the article](http://arxiv.org/abs/physics/0604127).
|
||||
*/
|
||||
@UnstableKMathAPI
|
||||
public class QowOptimizer : Optimizer<Double, XYOptimization> {
|
||||
|
||||
private val linearSpace: LinearSpace<Double, DoubleField> = LinearSpace.double
|
||||
private val solver: LinearSolver<Double> = linearSpace.lupSolver()
|
||||
|
||||
@OptIn(UnstableKMathAPI::class)
|
||||
private inner class QoWeight(
|
||||
val problem: XYOptimization,
|
||||
val parameters: Map<Symbol, Double>,
|
||||
) : Map<Symbol, Double> by parameters, SymbolIndexer {
|
||||
override val symbols: List<Symbol> = parameters.keys.toList()
|
||||
|
||||
val data get() = problem.data
|
||||
|
||||
/**
|
||||
* Derivatives of the spectrum over parameters. First index in the point number, second one - index of parameter
|
||||
*/
|
||||
val derivs: Matrix<Double> by lazy {
|
||||
linearSpace.buildMatrix(problem.data.size, symbols.size) { i, k ->
|
||||
problem.distance(i).derivative(symbols[k])(parameters)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Array of dispersions in each point
|
||||
*/
|
||||
val dispersion: Point<Double> by lazy {
|
||||
DoubleBuffer(problem.data.size) { i ->
|
||||
problem.weight(i).invoke(parameters)
|
||||
}
|
||||
}
|
||||
|
||||
val prior: DifferentiableExpression<Double>? get() = problem.getFeature<OptimizationPrior<Double>>()
|
||||
}
|
||||
|
||||
/**
|
||||
* The signed distance from the model to the [i]-th point of data.
|
||||
*/
|
||||
private fun QoWeight.distance(i: Int, parameters: Map<Symbol, Double>): Double = problem.distance(i)(parameters)
|
||||
|
||||
|
||||
/**
|
||||
* The derivative of [distance]
|
||||
*/
|
||||
private fun QoWeight.distanceDerivative(symbol: Symbol, i: Int, parameters: Map<Symbol, Double>): Double =
|
||||
problem.distance(i).derivative(symbol)(parameters)
|
||||
|
||||
/**
|
||||
* Теоретическая ковариация весовых функций.
|
||||
*
|
||||
* D(\phi)=E(\phi_k(\theta_0) \phi_l(\theta_0))= disDeriv_k * disDeriv_l /sigma^2
|
||||
*/
|
||||
private fun QoWeight.covarF(): Matrix<Double> =
|
||||
linearSpace.matrix(size, size).symmetric { k, l ->
|
||||
(0 until data.size).sumOf { i -> derivs[k, i] * derivs[l, i] / dispersion[i] }
|
||||
}
|
||||
|
||||
/**
|
||||
* Экспериментальная ковариация весов. Формула (22) из
|
||||
* http://arxiv.org/abs/physics/0604127
|
||||
*/
|
||||
private fun QoWeight.covarFExp(theta: Map<Symbol, Double>): Matrix<Double> =
|
||||
with(linearSpace) {
|
||||
/*
|
||||
* Важно! Если не делать предварителього вычисления этих производных, то
|
||||
* количество вызывов функции будет dim^2 вместо dim Первый индекс -
|
||||
* номер точки, второй - номер переменной, по которой берется производная
|
||||
*/
|
||||
val eqvalues = linearSpace.buildMatrix(data.size, size) { i, l ->
|
||||
distance(i, theta) * derivs[l, i] / dispersion[i]
|
||||
}
|
||||
|
||||
buildMatrix(size, size) { k, l ->
|
||||
(0 until data.size).sumOf { i -> eqvalues[i, l] * eqvalues[i, k] }
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Equation derivatives for Newton run
|
||||
*/
|
||||
private fun QoWeight.getEqDerivValues(
|
||||
theta: Map<Symbol, Double> = parameters,
|
||||
): Matrix<Double> = with(linearSpace) {
|
||||
val fitDim = size
|
||||
//Возвращает производную k-того Eq по l-тому параметру
|
||||
//val res = Array(fitDim) { DoubleArray(fitDim) }
|
||||
val sderiv = buildMatrix(data.size, size) { i, l ->
|
||||
distanceDerivative(symbols[l], i, theta)
|
||||
}
|
||||
|
||||
buildMatrix(size, size) { k, l ->
|
||||
val base = (0 until data.size).sumOf { i ->
|
||||
require(dispersion[i] > 0)
|
||||
sderiv[i, l] * derivs[k, i] / dispersion[i]
|
||||
}
|
||||
prior?.let { prior ->
|
||||
//Check if this one is correct
|
||||
val pi = prior(theta)
|
||||
val deriv1 = prior.derivative(symbols[k])(theta)
|
||||
val deriv2 = prior.derivative(symbols[l])(theta)
|
||||
base + deriv1 * deriv2 / pi / pi
|
||||
} ?: base
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Значения уравнений метода квазиоптимальных весов
|
||||
*/
|
||||
private fun QoWeight.getEqValues(theta: Map<Symbol, Double> = this): Point<Double> {
|
||||
val distances = DoubleBuffer(data.size) { i -> distance(i, theta) }
|
||||
|
||||
return DoubleBuffer(size) { k ->
|
||||
val base = (0 until data.size).sumOf { i -> distances[i] * derivs[k, i] / dispersion[i] }
|
||||
//Поправка на априорную вероятность
|
||||
prior?.let { prior ->
|
||||
base - prior.derivative(symbols[k])(theta) / prior(theta)
|
||||
} ?: base
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
private fun QoWeight.newtonianStep(
|
||||
theta: Map<Symbol, Double>,
|
||||
eqvalues: Point<Double>,
|
||||
): QoWeight = linearSpace {
|
||||
with(this@newtonianStep) {
|
||||
val start = theta.toPoint()
|
||||
val invJacob = solver.inverse(this@newtonianStep.getEqDerivValues(theta))
|
||||
|
||||
val step = invJacob.dot(eqvalues)
|
||||
return QoWeight(problem, theta + (start - step).toMap())
|
||||
}
|
||||
}
|
||||
|
||||
private fun QoWeight.newtonianRun(
|
||||
maxSteps: Int = 100,
|
||||
tolerance: Double = 0.0,
|
||||
fast: Boolean = false,
|
||||
): QoWeight {
|
||||
|
||||
val logger = problem.getFeature<OptimizationLog>()
|
||||
|
||||
var dis: Double//норма невязки
|
||||
// Для удобства работаем всегда с полным набором параметров
|
||||
var par = problem.startPoint
|
||||
|
||||
logger?.log { "Starting newtonian iteration from: \n\t$par" }
|
||||
|
||||
var eqvalues = getEqValues(par)//значения функций
|
||||
|
||||
dis = DoubleL2Norm.norm(eqvalues)// невязка
|
||||
logger?.log { "Starting discrepancy is $dis" }
|
||||
var i = 0
|
||||
var flag = false
|
||||
while (!flag) {
|
||||
i++
|
||||
logger?.log { "Starting step number $i" }
|
||||
|
||||
val currentSolution = if (fast) {
|
||||
//Берет значения матрицы в той точке, где считается вес
|
||||
newtonianStep(this, eqvalues)
|
||||
} else {
|
||||
//Берет значения матрицы в точке par
|
||||
newtonianStep(par, eqvalues)
|
||||
}
|
||||
// здесь должен стоять учет границ параметров
|
||||
logger?.log { "Parameter values after step are: \n\t$currentSolution" }
|
||||
|
||||
eqvalues = getEqValues(currentSolution)
|
||||
val currentDis = DoubleL2Norm.norm(eqvalues)// невязка после шага
|
||||
|
||||
logger?.log { "The discrepancy after step is: $currentDis." }
|
||||
|
||||
if (currentDis >= dis && i > 1) {
|
||||
//дополнительно проверяем, чтобы был сделан хотя бы один шаг
|
||||
flag = true
|
||||
logger?.log { "The discrepancy does not decrease. Stopping iteration." }
|
||||
} else {
|
||||
par = currentSolution
|
||||
dis = currentDis
|
||||
}
|
||||
if (i >= maxSteps) {
|
||||
flag = true
|
||||
logger?.log { "Maximum number of iterations reached. Stopping iteration." }
|
||||
}
|
||||
if (dis <= tolerance) {
|
||||
flag = true
|
||||
logger?.log { "Tolerance threshold is reached. Stopping iteration." }
|
||||
}
|
||||
}
|
||||
|
||||
return QoWeight(problem, par)
|
||||
}
|
||||
|
||||
private fun QoWeight.covariance(): Matrix<Double> {
|
||||
val logger = problem.getFeature<OptimizationLog>()
|
||||
|
||||
logger?.log {
|
||||
"""
|
||||
Starting errors estimation using quasioptimal weights method. The starting weight is:
|
||||
${problem.startPoint}
|
||||
""".trimIndent()
|
||||
}
|
||||
|
||||
val covar = solver.inverse(getEqDerivValues())
|
||||
//TODO fix eigenvalues check
|
||||
// val decomposition = EigenDecomposition(covar.matrix)
|
||||
// var valid = true
|
||||
// for (lambda in decomposition.realEigenvalues) {
|
||||
// if (lambda <= 0) {
|
||||
// logger?.log { "The covariance matrix is not positive defined. Error estimation is not valid" }
|
||||
// valid = false
|
||||
// }
|
||||
// }
|
||||
return covar
|
||||
}
|
||||
|
||||
override suspend fun optimize(problem: XYOptimization): XYOptimization {
|
||||
val initialWeight = QoWeight(problem, problem.startPoint)
|
||||
val res = initialWeight.newtonianRun()
|
||||
return res.problem.withFeature(OptimizationResult(res.parameters))
|
||||
}
|
||||
}
|
@ -7,13 +7,21 @@
|
||||
package space.kscience.kmath.optimization
|
||||
|
||||
import space.kscience.kmath.data.XYColumnarData
|
||||
import space.kscience.kmath.data.indices
|
||||
import space.kscience.kmath.expressions.DifferentiableExpression
|
||||
import space.kscience.kmath.expressions.Expression
|
||||
import space.kscience.kmath.expressions.Symbol
|
||||
import space.kscience.kmath.expressions.derivative
|
||||
import space.kscience.kmath.misc.FeatureSet
|
||||
import space.kscience.kmath.misc.UnstableKMathAPI
|
||||
import kotlin.math.PI
|
||||
import kotlin.math.ln
|
||||
import kotlin.math.pow
|
||||
import kotlin.math.sqrt
|
||||
|
||||
/**
|
||||
* Specify the way to compute distance from point to the curve as DifferentiableExpression
|
||||
*/
|
||||
public interface PointToCurveDistance : OptimizationFeature {
|
||||
public fun distance(problem: XYOptimization, index: Int): DifferentiableExpression<Double>
|
||||
|
||||
@ -33,42 +41,107 @@ public interface PointToCurveDistance : OptimizationFeature {
|
||||
}
|
||||
|
||||
override fun toString(): String = "PointToCurveDistanceByY"
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Compute a wight of the point. The more the weight, the more impact this point will have on the fit.
|
||||
* By default uses Dispersion^-1
|
||||
*/
|
||||
public interface PointWeight : OptimizationFeature {
|
||||
public fun weight(problem: XYOptimization, index: Int): DifferentiableExpression<Double>
|
||||
|
||||
public companion object {
|
||||
public fun bySigma(sigmaSymbol: Symbol): PointWeight = object : PointWeight {
|
||||
override fun weight(problem: XYOptimization, index: Int): DifferentiableExpression<Double> =
|
||||
object : DifferentiableExpression<Double> {
|
||||
override fun invoke(arguments: Map<Symbol, Double>): Double {
|
||||
return problem.data[sigmaSymbol]?.get(index)?.pow(-2) ?: 1.0
|
||||
}
|
||||
|
||||
override fun derivativeOrNull(symbols: List<Symbol>): Expression<Double> = Expression { 0.0 }
|
||||
}
|
||||
|
||||
override fun toString(): String = "PointWeightBySigma($sigmaSymbol)"
|
||||
|
||||
}
|
||||
|
||||
public val byYSigma: PointWeight = bySigma(Symbol.yError)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* An optimization for XY data.
|
||||
*/
|
||||
public class XYOptimization(
|
||||
override val features: FeatureSet<OptimizationFeature>,
|
||||
public val data: XYColumnarData<Double, Double, Double>,
|
||||
public val model: DifferentiableExpression<Double>,
|
||||
) : OptimizationProblem
|
||||
internal val pointToCurveDistance: PointToCurveDistance = PointToCurveDistance.byY,
|
||||
internal val pointWeight: PointWeight = PointWeight.byYSigma,
|
||||
) : OptimizationProblem<Double> {
|
||||
public fun distance(index: Int): DifferentiableExpression<Double> = pointToCurveDistance.distance(this, index)
|
||||
|
||||
public fun weight(index: Int): DifferentiableExpression<Double> = pointWeight.weight(this, index)
|
||||
}
|
||||
|
||||
public suspend fun Optimizer<FunctionOptimization<Double>>.maximumLogLikelihood(problem: XYOptimization): XYOptimization {
|
||||
val distanceBuilder = problem.getFeature() ?: PointToCurveDistance.byY
|
||||
val likelihood: DifferentiableExpression<Double> = object : DifferentiableExpression<Double> {
|
||||
override fun derivativeOrNull(symbols: List<Symbol>): Expression<Double>? {
|
||||
TODO("Not yet implemented")
|
||||
public fun XYOptimization.withFeature(vararg features: OptimizationFeature): XYOptimization {
|
||||
return XYOptimization(this.features.with(*features), data, model, pointToCurveDistance, pointWeight)
|
||||
}
|
||||
|
||||
private val oneOver2Pi = 1.0 / sqrt(2 * PI)
|
||||
|
||||
internal fun XYOptimization.likelihood(): DifferentiableExpression<Double> = object : DifferentiableExpression<Double> {
|
||||
override fun derivativeOrNull(symbols: List<Symbol>): Expression<Double> = Expression { arguments ->
|
||||
data.indices.sumOf { index ->
|
||||
|
||||
val d = distance(index)(arguments)
|
||||
val weight = weight(index)(arguments)
|
||||
val weightDerivative = weight(index)(arguments)
|
||||
|
||||
// -1 / (sqrt(2 PI) * sigma) + 2 (x-mu)/ 2 sigma^2 * d mu/ d theta - (x-mu)^2 / 2 * d w/ d theta
|
||||
return@sumOf -oneOver2Pi * sqrt(weight) + //offset derivative
|
||||
d * model.derivative(symbols)(arguments) * weight - //model derivative
|
||||
d.pow(2) * weightDerivative / 2 //weight derivative
|
||||
}
|
||||
|
||||
override fun invoke(arguments: Map<Symbol, Double>): Double {
|
||||
var res = 0.0
|
||||
for (index in 0 until problem.data.size) {
|
||||
val d = distanceBuilder.distance(problem, index).invoke(arguments)
|
||||
val sigma: Double = TODO()
|
||||
res -= (d / sigma).pow(2)
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
}
|
||||
val functionOptimization = FunctionOptimization(problem.features, likelihood)
|
||||
val result = optimize(functionOptimization)
|
||||
|
||||
override fun invoke(arguments: Map<Symbol, Double>): Double {
|
||||
return data.indices.sumOf { index ->
|
||||
val d = distance(index)(arguments)
|
||||
val weight = weight(index)(arguments)
|
||||
//1/sqrt(2 PI sigma^2) - (x-mu)^2/ (2 * sigma^2)
|
||||
oneOver2Pi * ln(weight) - d.pow(2) * weight
|
||||
} / 2
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* Optimize given XY (least squares) [problem] using this function [Optimizer].
|
||||
* The problem is treated as maximum likelihood problem and is done via maximizing logarithmic likelihood, respecting
|
||||
* possible weight dependency on the model and parameters.
|
||||
*/
|
||||
public suspend fun Optimizer<Double, FunctionOptimization<Double>>.maximumLogLikelihood(problem: XYOptimization): XYOptimization {
|
||||
val functionOptimization = FunctionOptimization(problem.features, problem.likelihood())
|
||||
val result = optimize(functionOptimization.withFeatures(FunctionOptimizationTarget.MAXIMIZE))
|
||||
return XYOptimization(result.features, problem.data, problem.model)
|
||||
}
|
||||
|
||||
public suspend fun Optimizer<Double, FunctionOptimization<Double>>.maximumLogLikelihood(
|
||||
data: XYColumnarData<Double, Double, Double>,
|
||||
model: DifferentiableExpression<Double>,
|
||||
builder: XYOptimizationBuilder.() -> Unit,
|
||||
): XYOptimization = maximumLogLikelihood(XYOptimization(data, model, builder))
|
||||
|
||||
//public suspend fun XYColumnarData<Double, Double, Double>.fitWith(
|
||||
// optimizer: XYOptimization,
|
||||
// problemBuilder: XYOptimizationBuilder.() -> Unit = {},
|
||||
//
|
||||
//)
|
||||
|
||||
|
||||
//
|
||||
//@UnstableKMathAPI
|
||||
//public interface XYFit<T> : OptimizationProblem {
|
||||
|
@ -16,6 +16,7 @@
|
||||
package ru.inr.mass.minuit
|
||||
|
||||
import space.kscience.kmath.optimization.minuit.MINUITPlugin
|
||||
import space.kscience.kmath.optimization.minuit.MinimumSeed
|
||||
|
||||
/**
|
||||
*
|
@ -16,6 +16,7 @@
|
||||
package ru.inr.mass.minuit
|
||||
|
||||
import ru.inr.mass.minuit.*
|
||||
import space.kscience.kmath.optimization.minuit.MinimumSeed
|
||||
|
||||
/**
|
||||
* Result of the minimization.
|
@ -15,6 +15,8 @@
|
||||
*/
|
||||
package ru.inr.mass.minuit
|
||||
|
||||
import space.kscience.kmath.optimization.minuit.MinimumSeed
|
||||
|
||||
/**
|
||||
*
|
||||
* @version $Id$
|
@ -13,7 +13,9 @@
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package ru.inr.mass.minuit
|
||||
package space.kscience.kmath.optimization.minuit
|
||||
|
||||
import ru.inr.mass.minuit.*
|
||||
|
||||
/**
|
||||
*
|
@ -15,6 +15,8 @@
|
||||
*/
|
||||
package ru.inr.mass.minuit
|
||||
|
||||
import space.kscience.kmath.optimization.minuit.MinimumSeed
|
||||
|
||||
/**
|
||||
* base class for seed generators (starting values); the seed generator prepares
|
||||
* initial starting values from the input (MnUserParameterState) for the
|
@ -17,6 +17,7 @@ package ru.inr.mass.minuit
|
||||
|
||||
import space.kscience.kmath.optimization.minuit.MINUITPlugin
|
||||
import ru.inr.mass.minuit.*
|
||||
import space.kscience.kmath.optimization.minuit.MinimumSeed
|
||||
|
||||
/**
|
||||
*
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user