Refactor structure features. Basic curve fitting
This commit is contained in:
parent
248d42c4e0
commit
88d0c19a74
@ -1,5 +1,3 @@
|
|||||||
import ru.mipt.npm.gradle.KSciencePublishingPlugin
|
|
||||||
|
|
||||||
plugins {
|
plugins {
|
||||||
id("ru.mipt.npm.gradle.project")
|
id("ru.mipt.npm.gradle.project")
|
||||||
}
|
}
|
||||||
@ -20,11 +18,11 @@ allprojects {
|
|||||||
}
|
}
|
||||||
|
|
||||||
group = "space.kscience"
|
group = "space.kscience"
|
||||||
version = "0.3.0-dev-3"
|
version = "0.3.0-dev-4"
|
||||||
}
|
}
|
||||||
|
|
||||||
subprojects {
|
subprojects {
|
||||||
if (name.startsWith("kmath")) apply<KSciencePublishingPlugin>()
|
if (name.startsWith("kmath")) apply(plugin = "maven-publish")
|
||||||
}
|
}
|
||||||
|
|
||||||
readme {
|
readme {
|
||||||
|
@ -8,10 +8,14 @@ import kscience.plotly.models.TraceValues
|
|||||||
import space.kscience.kmath.commons.optimization.chiSquared
|
import space.kscience.kmath.commons.optimization.chiSquared
|
||||||
import space.kscience.kmath.commons.optimization.minimize
|
import space.kscience.kmath.commons.optimization.minimize
|
||||||
import space.kscience.kmath.expressions.symbol
|
import space.kscience.kmath.expressions.symbol
|
||||||
|
import space.kscience.kmath.optimization.FunctionOptimization
|
||||||
|
import space.kscience.kmath.optimization.OptimizationResult
|
||||||
import space.kscience.kmath.real.DoubleVector
|
import space.kscience.kmath.real.DoubleVector
|
||||||
import space.kscience.kmath.real.map
|
import space.kscience.kmath.real.map
|
||||||
import space.kscience.kmath.real.step
|
import space.kscience.kmath.real.step
|
||||||
import space.kscience.kmath.stat.*
|
import space.kscience.kmath.stat.Distribution
|
||||||
|
import space.kscience.kmath.stat.RandomGenerator
|
||||||
|
import space.kscience.kmath.stat.normal
|
||||||
import space.kscience.kmath.structures.asIterable
|
import space.kscience.kmath.structures.asIterable
|
||||||
import space.kscience.kmath.structures.toList
|
import space.kscience.kmath.structures.toList
|
||||||
import kotlin.math.pow
|
import kotlin.math.pow
|
||||||
@ -58,7 +62,7 @@ fun main() {
|
|||||||
val yErr = y.map { sqrt(it) }//RealVector.same(x.size, sigma)
|
val yErr = y.map { sqrt(it) }//RealVector.same(x.size, sigma)
|
||||||
|
|
||||||
// compute differentiable chi^2 sum for given model ax^2 + bx + c
|
// compute differentiable chi^2 sum for given model ax^2 + bx + c
|
||||||
val chi2 = Fitting.chiSquared(x, y, yErr) { x1 ->
|
val chi2 = FunctionOptimization.chiSquared(x, y, yErr) { x1 ->
|
||||||
//bind variables to autodiff context
|
//bind variables to autodiff context
|
||||||
val a = bind(a)
|
val a = bind(a)
|
||||||
val b = bind(b)
|
val b = bind(b)
|
||||||
|
@ -3,6 +3,7 @@ package space.kscience.kmath.commons.linear
|
|||||||
import org.apache.commons.math3.linear.*
|
import org.apache.commons.math3.linear.*
|
||||||
import space.kscience.kmath.linear.*
|
import space.kscience.kmath.linear.*
|
||||||
import space.kscience.kmath.misc.UnstableKMathAPI
|
import space.kscience.kmath.misc.UnstableKMathAPI
|
||||||
|
import space.kscience.kmath.nd.StructureFeature
|
||||||
import space.kscience.kmath.operations.DoubleField
|
import space.kscience.kmath.operations.DoubleField
|
||||||
import space.kscience.kmath.structures.DoubleBuffer
|
import space.kscience.kmath.structures.DoubleBuffer
|
||||||
import kotlin.reflect.KClass
|
import kotlin.reflect.KClass
|
||||||
@ -89,7 +90,7 @@ public object CMLinearSpace : LinearSpace<Double, DoubleField> {
|
|||||||
v * this
|
v * this
|
||||||
|
|
||||||
@UnstableKMathAPI
|
@UnstableKMathAPI
|
||||||
override fun <F : Any> getFeature(structure: Matrix<Double>, type: KClass<F>): F? {
|
override fun <F : StructureFeature> getFeature(structure: Matrix<Double>, type: KClass<out F>): F? {
|
||||||
//Return the feature if it is intrinsic to the structure
|
//Return the feature if it is intrinsic to the structure
|
||||||
structure.getFeature(type)?.let { return it }
|
structure.getFeature(type)?.let { return it }
|
||||||
|
|
||||||
|
@ -10,21 +10,25 @@ import org.apache.commons.math3.optim.nonlinear.scalar.noderiv.AbstractSimplex
|
|||||||
import org.apache.commons.math3.optim.nonlinear.scalar.noderiv.NelderMeadSimplex
|
import org.apache.commons.math3.optim.nonlinear.scalar.noderiv.NelderMeadSimplex
|
||||||
import org.apache.commons.math3.optim.nonlinear.scalar.noderiv.SimplexOptimizer
|
import org.apache.commons.math3.optim.nonlinear.scalar.noderiv.SimplexOptimizer
|
||||||
import space.kscience.kmath.expressions.*
|
import space.kscience.kmath.expressions.*
|
||||||
import space.kscience.kmath.stat.OptimizationFeature
|
import space.kscience.kmath.optimization.FunctionOptimization
|
||||||
import space.kscience.kmath.stat.OptimizationProblem
|
import space.kscience.kmath.optimization.OptimizationFeature
|
||||||
import space.kscience.kmath.stat.OptimizationProblemFactory
|
import space.kscience.kmath.optimization.OptimizationProblemFactory
|
||||||
import space.kscience.kmath.stat.OptimizationResult
|
import space.kscience.kmath.optimization.OptimizationResult
|
||||||
import kotlin.reflect.KClass
|
import kotlin.reflect.KClass
|
||||||
|
|
||||||
public operator fun PointValuePair.component1(): DoubleArray = point
|
public operator fun PointValuePair.component1(): DoubleArray = point
|
||||||
public operator fun PointValuePair.component2(): Double = value
|
public operator fun PointValuePair.component2(): Double = value
|
||||||
|
|
||||||
public class CMOptimizationProblem(override val symbols: List<Symbol>) :
|
public class CMOptimization(
|
||||||
OptimizationProblem<Double>, SymbolIndexer, OptimizationFeature {
|
override val symbols: List<Symbol>,
|
||||||
|
) : FunctionOptimization<Double>, SymbolIndexer, OptimizationFeature {
|
||||||
private val optimizationData: HashMap<KClass<out OptimizationData>, OptimizationData> = HashMap()
|
private val optimizationData: HashMap<KClass<out OptimizationData>, OptimizationData> = HashMap()
|
||||||
private var optimizatorBuilder: (() -> MultivariateOptimizer)? = null
|
private var optimizerBuilder: (() -> MultivariateOptimizer)? = null
|
||||||
public var convergenceChecker: ConvergenceChecker<PointValuePair> = SimpleValueChecker(DEFAULT_RELATIVE_TOLERANCE,
|
public var convergenceChecker: ConvergenceChecker<PointValuePair> = SimpleValueChecker(
|
||||||
DEFAULT_ABSOLUTE_TOLERANCE, DEFAULT_MAX_ITER)
|
DEFAULT_RELATIVE_TOLERANCE,
|
||||||
|
DEFAULT_ABSOLUTE_TOLERANCE,
|
||||||
|
DEFAULT_MAX_ITER
|
||||||
|
)
|
||||||
|
|
||||||
public fun addOptimizationData(data: OptimizationData) {
|
public fun addOptimizationData(data: OptimizationData) {
|
||||||
optimizationData[data::class] = data
|
optimizationData[data::class] = data
|
||||||
@ -57,8 +61,8 @@ public class CMOptimizationProblem(override val symbols: List<Symbol>) :
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
addOptimizationData(gradientFunction)
|
addOptimizationData(gradientFunction)
|
||||||
if (optimizatorBuilder == null) {
|
if (optimizerBuilder == null) {
|
||||||
optimizatorBuilder = {
|
optimizerBuilder = {
|
||||||
NonLinearConjugateGradientOptimizer(
|
NonLinearConjugateGradientOptimizer(
|
||||||
NonLinearConjugateGradientOptimizer.Formula.FLETCHER_REEVES,
|
NonLinearConjugateGradientOptimizer.Formula.FLETCHER_REEVES,
|
||||||
convergenceChecker
|
convergenceChecker
|
||||||
@ -70,8 +74,8 @@ public class CMOptimizationProblem(override val symbols: List<Symbol>) :
|
|||||||
public fun simplex(simplex: AbstractSimplex) {
|
public fun simplex(simplex: AbstractSimplex) {
|
||||||
addOptimizationData(simplex)
|
addOptimizationData(simplex)
|
||||||
//Set optimization builder to simplex if it is not present
|
//Set optimization builder to simplex if it is not present
|
||||||
if (optimizatorBuilder == null) {
|
if (optimizerBuilder == null) {
|
||||||
optimizatorBuilder = { SimplexOptimizer(convergenceChecker) }
|
optimizerBuilder = { SimplexOptimizer(convergenceChecker) }
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -84,7 +88,7 @@ public class CMOptimizationProblem(override val symbols: List<Symbol>) :
|
|||||||
}
|
}
|
||||||
|
|
||||||
public fun optimizer(block: () -> MultivariateOptimizer) {
|
public fun optimizer(block: () -> MultivariateOptimizer) {
|
||||||
optimizatorBuilder = block
|
optimizerBuilder = block
|
||||||
}
|
}
|
||||||
|
|
||||||
override fun update(result: OptimizationResult<Double>) {
|
override fun update(result: OptimizationResult<Double>) {
|
||||||
@ -92,19 +96,19 @@ public class CMOptimizationProblem(override val symbols: List<Symbol>) :
|
|||||||
}
|
}
|
||||||
|
|
||||||
override fun optimize(): OptimizationResult<Double> {
|
override fun optimize(): OptimizationResult<Double> {
|
||||||
val optimizer = optimizatorBuilder?.invoke() ?: error("Optimizer not defined")
|
val optimizer = optimizerBuilder?.invoke() ?: error("Optimizer not defined")
|
||||||
val (point, value) = optimizer.optimize(*optimizationData.values.toTypedArray())
|
val (point, value) = optimizer.optimize(*optimizationData.values.toTypedArray())
|
||||||
return OptimizationResult(point.toMap(), value, setOf(this))
|
return OptimizationResult(point.toMap(), value, setOf(this))
|
||||||
}
|
}
|
||||||
|
|
||||||
public companion object : OptimizationProblemFactory<Double, CMOptimizationProblem> {
|
public companion object : OptimizationProblemFactory<Double, CMOptimization> {
|
||||||
public const val DEFAULT_RELATIVE_TOLERANCE: Double = 1e-4
|
public const val DEFAULT_RELATIVE_TOLERANCE: Double = 1e-4
|
||||||
public const val DEFAULT_ABSOLUTE_TOLERANCE: Double = 1e-4
|
public const val DEFAULT_ABSOLUTE_TOLERANCE: Double = 1e-4
|
||||||
public const val DEFAULT_MAX_ITER: Int = 1000
|
public const val DEFAULT_MAX_ITER: Int = 1000
|
||||||
|
|
||||||
override fun build(symbols: List<Symbol>): CMOptimizationProblem = CMOptimizationProblem(symbols)
|
override fun build(symbols: List<Symbol>): CMOptimization = CMOptimization(symbols)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
public fun CMOptimizationProblem.initialGuess(vararg pairs: Pair<Symbol, Double>): Unit = initialGuess(pairs.toMap())
|
public fun CMOptimization.initialGuess(vararg pairs: Pair<Symbol, Double>): Unit = initialGuess(pairs.toMap())
|
||||||
public fun CMOptimizationProblem.simplexSteps(vararg pairs: Pair<Symbol, Double>): Unit = simplexSteps(pairs.toMap())
|
public fun CMOptimization.simplexSteps(vararg pairs: Pair<Symbol, Double>): Unit = simplexSteps(pairs.toMap())
|
@ -6,16 +6,16 @@ import space.kscience.kmath.commons.expressions.DerivativeStructureField
|
|||||||
import space.kscience.kmath.expressions.DifferentiableExpression
|
import space.kscience.kmath.expressions.DifferentiableExpression
|
||||||
import space.kscience.kmath.expressions.Expression
|
import space.kscience.kmath.expressions.Expression
|
||||||
import space.kscience.kmath.expressions.Symbol
|
import space.kscience.kmath.expressions.Symbol
|
||||||
import space.kscience.kmath.stat.Fitting
|
import space.kscience.kmath.optimization.FunctionOptimization
|
||||||
import space.kscience.kmath.stat.OptimizationResult
|
import space.kscience.kmath.optimization.OptimizationResult
|
||||||
import space.kscience.kmath.stat.optimizeWith
|
import space.kscience.kmath.optimization.optimizeWith
|
||||||
import space.kscience.kmath.structures.Buffer
|
import space.kscience.kmath.structures.Buffer
|
||||||
import space.kscience.kmath.structures.asBuffer
|
import space.kscience.kmath.structures.asBuffer
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Generate a chi squared expression from given x-y-sigma data and inline model. Provides automatic differentiation
|
* Generate a chi squared expression from given x-y-sigma data and inline model. Provides automatic differentiation
|
||||||
*/
|
*/
|
||||||
public fun Fitting.chiSquared(
|
public fun FunctionOptimization.Companion.chiSquared(
|
||||||
x: Buffer<Double>,
|
x: Buffer<Double>,
|
||||||
y: Buffer<Double>,
|
y: Buffer<Double>,
|
||||||
yErr: Buffer<Double>,
|
yErr: Buffer<Double>,
|
||||||
@ -25,7 +25,7 @@ public fun Fitting.chiSquared(
|
|||||||
/**
|
/**
|
||||||
* Generate a chi squared expression from given x-y-sigma data and inline model. Provides automatic differentiation
|
* Generate a chi squared expression from given x-y-sigma data and inline model. Provides automatic differentiation
|
||||||
*/
|
*/
|
||||||
public fun Fitting.chiSquared(
|
public fun FunctionOptimization.Companion.chiSquared(
|
||||||
x: Iterable<Double>,
|
x: Iterable<Double>,
|
||||||
y: Iterable<Double>,
|
y: Iterable<Double>,
|
||||||
yErr: Iterable<Double>,
|
yErr: Iterable<Double>,
|
||||||
@ -43,23 +43,23 @@ public fun Fitting.chiSquared(
|
|||||||
*/
|
*/
|
||||||
public fun Expression<Double>.optimize(
|
public fun Expression<Double>.optimize(
|
||||||
vararg symbols: Symbol,
|
vararg symbols: Symbol,
|
||||||
configuration: CMOptimizationProblem.() -> Unit,
|
configuration: CMOptimization.() -> Unit,
|
||||||
): OptimizationResult<Double> = optimizeWith(CMOptimizationProblem, symbols = symbols, configuration)
|
): OptimizationResult<Double> = optimizeWith(CMOptimization, symbols = symbols, configuration)
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Optimize differentiable expression
|
* Optimize differentiable expression
|
||||||
*/
|
*/
|
||||||
public fun DifferentiableExpression<Double, Expression<Double>>.optimize(
|
public fun DifferentiableExpression<Double, Expression<Double>>.optimize(
|
||||||
vararg symbols: Symbol,
|
vararg symbols: Symbol,
|
||||||
configuration: CMOptimizationProblem.() -> Unit,
|
configuration: CMOptimization.() -> Unit,
|
||||||
): OptimizationResult<Double> = optimizeWith(CMOptimizationProblem, symbols = symbols, configuration)
|
): OptimizationResult<Double> = optimizeWith(CMOptimization, symbols = symbols, configuration)
|
||||||
|
|
||||||
public fun DifferentiableExpression<Double, Expression<Double>>.minimize(
|
public fun DifferentiableExpression<Double, Expression<Double>>.minimize(
|
||||||
vararg startPoint: Pair<Symbol, Double>,
|
vararg startPoint: Pair<Symbol, Double>,
|
||||||
configuration: CMOptimizationProblem.() -> Unit = {},
|
configuration: CMOptimization.() -> Unit = {},
|
||||||
): OptimizationResult<Double> {
|
): OptimizationResult<Double> {
|
||||||
require(startPoint.isNotEmpty()) { "Must provide a list of symbols for optimization" }
|
require(startPoint.isNotEmpty()) { "Must provide a list of symbols for optimization" }
|
||||||
val problem = CMOptimizationProblem(startPoint.map { it.first }).apply(configuration)
|
val problem = CMOptimization(startPoint.map { it.first }).apply(configuration)
|
||||||
problem.diffExpression(this)
|
problem.diffExpression(this)
|
||||||
problem.initialGuess(startPoint.toMap())
|
problem.initialGuess(startPoint.toMap())
|
||||||
problem.goal(GoalType.MINIMIZE)
|
problem.goal(GoalType.MINIMIZE)
|
||||||
|
@ -3,8 +3,8 @@ package space.kscience.kmath.commons.optimization
|
|||||||
import org.junit.jupiter.api.Test
|
import org.junit.jupiter.api.Test
|
||||||
import space.kscience.kmath.commons.expressions.DerivativeStructureExpression
|
import space.kscience.kmath.commons.expressions.DerivativeStructureExpression
|
||||||
import space.kscience.kmath.expressions.symbol
|
import space.kscience.kmath.expressions.symbol
|
||||||
|
import space.kscience.kmath.optimization.FunctionOptimization
|
||||||
import space.kscience.kmath.stat.Distribution
|
import space.kscience.kmath.stat.Distribution
|
||||||
import space.kscience.kmath.stat.Fitting
|
|
||||||
import space.kscience.kmath.stat.RandomGenerator
|
import space.kscience.kmath.stat.RandomGenerator
|
||||||
import space.kscience.kmath.stat.normal
|
import space.kscience.kmath.stat.normal
|
||||||
import kotlin.math.pow
|
import kotlin.math.pow
|
||||||
@ -55,7 +55,7 @@ internal class OptimizeTest {
|
|||||||
|
|
||||||
val yErr = List(x.size) { sigma }
|
val yErr = List(x.size) { sigma }
|
||||||
|
|
||||||
val chi2 = Fitting.chiSquared(x, y, yErr) { x1 ->
|
val chi2 = FunctionOptimization.chiSquared(x, y, yErr) { x1 ->
|
||||||
val cWithDefault = bindSymbolOrNull(c) ?: one
|
val cWithDefault = bindSymbolOrNull(c) ?: one
|
||||||
bind(a) * x1.pow(2) + bind(b) * x1 + cWithDefault
|
bind(a) * x1.pow(2) + bind(b) * x1 + cWithDefault
|
||||||
}
|
}
|
||||||
|
@ -575,7 +575,7 @@ public final class space/kscience/kmath/linear/MatrixBuilderKt {
|
|||||||
public static final fun row (Lspace/kscience/kmath/linear/LinearSpace;[Ljava/lang/Object;)Lspace/kscience/kmath/nd/Structure2D;
|
public static final fun row (Lspace/kscience/kmath/linear/LinearSpace;[Ljava/lang/Object;)Lspace/kscience/kmath/nd/Structure2D;
|
||||||
}
|
}
|
||||||
|
|
||||||
public abstract interface class space/kscience/kmath/linear/MatrixFeature {
|
public abstract interface class space/kscience/kmath/linear/MatrixFeature : space/kscience/kmath/nd/StructureFeature {
|
||||||
}
|
}
|
||||||
|
|
||||||
public final class space/kscience/kmath/linear/MatrixFeaturesKt {
|
public final class space/kscience/kmath/linear/MatrixFeaturesKt {
|
||||||
@ -1060,11 +1060,15 @@ public final class space/kscience/kmath/nd/Strides$DefaultImpls {
|
|||||||
}
|
}
|
||||||
|
|
||||||
public abstract interface class space/kscience/kmath/nd/Structure1D : space/kscience/kmath/nd/StructureND, space/kscience/kmath/structures/Buffer {
|
public abstract interface class space/kscience/kmath/nd/Structure1D : space/kscience/kmath/nd/StructureND, space/kscience/kmath/structures/Buffer {
|
||||||
|
public static final field Companion Lspace/kscience/kmath/nd/Structure1D$Companion;
|
||||||
public abstract fun get ([I)Ljava/lang/Object;
|
public abstract fun get ([I)Ljava/lang/Object;
|
||||||
public abstract fun getDimension ()I
|
public abstract fun getDimension ()I
|
||||||
public abstract fun iterator ()Ljava/util/Iterator;
|
public abstract fun iterator ()Ljava/util/Iterator;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public final class space/kscience/kmath/nd/Structure1D$Companion {
|
||||||
|
}
|
||||||
|
|
||||||
public final class space/kscience/kmath/nd/Structure1D$DefaultImpls {
|
public final class space/kscience/kmath/nd/Structure1D$DefaultImpls {
|
||||||
public static fun get (Lspace/kscience/kmath/nd/Structure1D;[I)Ljava/lang/Object;
|
public static fun get (Lspace/kscience/kmath/nd/Structure1D;[I)Ljava/lang/Object;
|
||||||
public static fun getDimension (Lspace/kscience/kmath/nd/Structure1D;)I
|
public static fun getDimension (Lspace/kscience/kmath/nd/Structure1D;)I
|
||||||
@ -1104,6 +1108,9 @@ public final class space/kscience/kmath/nd/Structure2DKt {
|
|||||||
public static final fun as2D (Lspace/kscience/kmath/nd/StructureND;)Lspace/kscience/kmath/nd/Structure2D;
|
public static final fun as2D (Lspace/kscience/kmath/nd/StructureND;)Lspace/kscience/kmath/nd/Structure2D;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public abstract interface class space/kscience/kmath/nd/StructureFeature {
|
||||||
|
}
|
||||||
|
|
||||||
public abstract interface class space/kscience/kmath/nd/StructureND {
|
public abstract interface class space/kscience/kmath/nd/StructureND {
|
||||||
public static final field Companion Lspace/kscience/kmath/nd/StructureND$Companion;
|
public static final field Companion Lspace/kscience/kmath/nd/StructureND$Companion;
|
||||||
public abstract fun elements ()Lkotlin/sequences/Sequence;
|
public abstract fun elements ()Lkotlin/sequences/Sequence;
|
||||||
|
@ -164,7 +164,7 @@ public interface LinearSpace<T : Any, out A : Ring<T>> {
|
|||||||
* @return a feature object or `null` if it isn't present.
|
* @return a feature object or `null` if it isn't present.
|
||||||
*/
|
*/
|
||||||
@UnstableKMathAPI
|
@UnstableKMathAPI
|
||||||
public fun <F : Any> getFeature(structure: Matrix<T>, type: KClass<F>): F? = structure.getFeature(type)
|
public fun <F : StructureFeature> getFeature(structure: Matrix<T>, type: KClass<out F>): F? = structure.getFeature(type)
|
||||||
|
|
||||||
public companion object {
|
public companion object {
|
||||||
|
|
||||||
@ -194,7 +194,7 @@ public interface LinearSpace<T : Any, out A : Ring<T>> {
|
|||||||
* @return a feature object or `null` if it isn't present.
|
* @return a feature object or `null` if it isn't present.
|
||||||
*/
|
*/
|
||||||
@UnstableKMathAPI
|
@UnstableKMathAPI
|
||||||
public inline fun <T : Any, reified F : Any> LinearSpace<T, *>.getFeature(structure: Matrix<T>): F? =
|
public inline fun <T : Any, reified F : StructureFeature> LinearSpace<T, *>.getFeature(structure: Matrix<T>): F? =
|
||||||
getFeature(structure, F::class)
|
getFeature(structure, F::class)
|
||||||
|
|
||||||
|
|
||||||
|
@ -1,10 +1,12 @@
|
|||||||
package space.kscience.kmath.linear
|
package space.kscience.kmath.linear
|
||||||
|
|
||||||
|
import space.kscience.kmath.nd.StructureFeature
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* A marker interface representing some properties of matrices or additional transformations of them. Features are used
|
* A marker interface representing some properties of matrices or additional transformations of them. Features are used
|
||||||
* to optimize matrix operations performance in some cases or retrieve the APIs.
|
* to optimize matrix operations performance in some cases or retrieve the APIs.
|
||||||
*/
|
*/
|
||||||
public interface MatrixFeature
|
public interface MatrixFeature: StructureFeature
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Matrices with this feature are considered to have only diagonal non-null elements.
|
* Matrices with this feature are considered to have only diagonal non-null elements.
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
package space.kscience.kmath.linear
|
package space.kscience.kmath.linear
|
||||||
|
|
||||||
import space.kscience.kmath.misc.UnstableKMathAPI
|
import space.kscience.kmath.misc.UnstableKMathAPI
|
||||||
|
import space.kscience.kmath.nd.StructureFeature
|
||||||
import space.kscience.kmath.nd.getFeature
|
import space.kscience.kmath.nd.getFeature
|
||||||
import space.kscience.kmath.operations.Ring
|
import space.kscience.kmath.operations.Ring
|
||||||
import kotlin.reflect.KClass
|
import kotlin.reflect.KClass
|
||||||
@ -20,7 +21,7 @@ public class MatrixWrapper<T : Any> internal constructor(
|
|||||||
*/
|
*/
|
||||||
@UnstableKMathAPI
|
@UnstableKMathAPI
|
||||||
@Suppress("UNCHECKED_CAST")
|
@Suppress("UNCHECKED_CAST")
|
||||||
override fun <T : Any> getFeature(type: KClass<T>): T? = features.singleOrNull { type.isInstance(it) } as? T
|
override fun <F : StructureFeature> getFeature(type: KClass<out F>): F? = features.singleOrNull { type.isInstance(it) } as? F
|
||||||
?: origin.getFeature(type)
|
?: origin.getFeature(type)
|
||||||
|
|
||||||
override fun toString(): String {
|
override fun toString(): String {
|
||||||
|
@ -67,7 +67,8 @@ public interface AlgebraND<T, C : Algebra<T>> {
|
|||||||
* @return a feature object or `null` if it isn't present.
|
* @return a feature object or `null` if it isn't present.
|
||||||
*/
|
*/
|
||||||
@UnstableKMathAPI
|
@UnstableKMathAPI
|
||||||
public fun <F : Any> getFeature(structure: StructureND<T>, type: KClass<F>): F? = structure.getFeature(type)
|
public fun <F : StructureFeature> getFeature(structure: StructureND<T>, type: KClass<out F>): F? =
|
||||||
|
structure.getFeature(type)
|
||||||
|
|
||||||
public companion object
|
public companion object
|
||||||
}
|
}
|
||||||
@ -81,7 +82,7 @@ public interface AlgebraND<T, C : Algebra<T>> {
|
|||||||
* @return a feature object or `null` if it isn't present.
|
* @return a feature object or `null` if it isn't present.
|
||||||
*/
|
*/
|
||||||
@UnstableKMathAPI
|
@UnstableKMathAPI
|
||||||
public inline fun <T : Any, reified F : Any> AlgebraND<T, *>.getFeature(structure: StructureND<T>): F? =
|
public inline fun <T : Any, reified F : StructureFeature> AlgebraND<T, *>.getFeature(structure: StructureND<T>): F? =
|
||||||
getFeature(structure, F::class)
|
getFeature(structure, F::class)
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -15,6 +15,8 @@ public interface Structure1D<T> : StructureND<T>, Buffer<T> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
public override operator fun iterator(): Iterator<T> = (0 until size).asSequence().map(::get).iterator()
|
public override operator fun iterator(): Iterator<T> = (0 until size).asSequence().map(::get).iterator()
|
||||||
|
|
||||||
|
public companion object
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -69,7 +69,7 @@ private inline class Structure2DWrapper<T>(val structure: StructureND<T>) : Stru
|
|||||||
override operator fun get(i: Int, j: Int): T = structure[i, j]
|
override operator fun get(i: Int, j: Int): T = structure[i, j]
|
||||||
|
|
||||||
@UnstableKMathAPI
|
@UnstableKMathAPI
|
||||||
override fun <F : Any> getFeature(type: KClass<F>): F? = structure.getFeature(type)
|
override fun <F : StructureFeature> getFeature(type: KClass<out F>): F? = structure.getFeature(type)
|
||||||
|
|
||||||
override fun elements(): Sequence<Pair<IntArray, T>> = structure.elements()
|
override fun elements(): Sequence<Pair<IntArray, T>> = structure.elements()
|
||||||
}
|
}
|
||||||
|
@ -7,6 +7,8 @@ import kotlin.jvm.JvmName
|
|||||||
import kotlin.native.concurrent.ThreadLocal
|
import kotlin.native.concurrent.ThreadLocal
|
||||||
import kotlin.reflect.KClass
|
import kotlin.reflect.KClass
|
||||||
|
|
||||||
|
public interface StructureFeature
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Represents n-dimensional structure, i.e. multidimensional container of items of the same type and size. The number
|
* Represents n-dimensional structure, i.e. multidimensional container of items of the same type and size. The number
|
||||||
* of dimensions and items in an array is defined by its shape, which is a sequence of non-negative integers that
|
* of dimensions and items in an array is defined by its shape, which is a sequence of non-negative integers that
|
||||||
@ -48,7 +50,7 @@ public interface StructureND<T> {
|
|||||||
* If the feature is not present, null is returned.
|
* If the feature is not present, null is returned.
|
||||||
*/
|
*/
|
||||||
@UnstableKMathAPI
|
@UnstableKMathAPI
|
||||||
public fun <F : Any> getFeature(type: KClass<F>): F? = null
|
public fun <F : StructureFeature> getFeature(type: KClass<out F>): F? = null
|
||||||
|
|
||||||
public companion object {
|
public companion object {
|
||||||
/**
|
/**
|
||||||
@ -144,7 +146,7 @@ public interface StructureND<T> {
|
|||||||
public operator fun <T> StructureND<T>.get(vararg index: Int): T = get(index)
|
public operator fun <T> StructureND<T>.get(vararg index: Int): T = get(index)
|
||||||
|
|
||||||
@UnstableKMathAPI
|
@UnstableKMathAPI
|
||||||
public inline fun <reified T : Any> StructureND<*>.getFeature(): T? = getFeature(T::class)
|
public inline fun <reified T : StructureFeature> StructureND<*>.getFeature(): T? = getFeature(T::class)
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Represents mutable [StructureND].
|
* Represents mutable [StructureND].
|
||||||
|
@ -8,5 +8,5 @@ public abstract class BlockingDoubleChain : Chain<Double> {
|
|||||||
|
|
||||||
override suspend fun next(): Double = nextDouble()
|
override suspend fun next(): Double = nextDouble()
|
||||||
|
|
||||||
public fun nextBlock(size: Int): DoubleArray = DoubleArray(size) { nextDouble() }
|
public open fun nextBlock(size: Int): DoubleArray = DoubleArray(size) { nextDouble() }
|
||||||
}
|
}
|
||||||
|
@ -4,6 +4,7 @@ import org.ejml.dense.row.factory.DecompositionFactory_DDRM
|
|||||||
import org.ejml.simple.SimpleMatrix
|
import org.ejml.simple.SimpleMatrix
|
||||||
import space.kscience.kmath.linear.*
|
import space.kscience.kmath.linear.*
|
||||||
import space.kscience.kmath.misc.UnstableKMathAPI
|
import space.kscience.kmath.misc.UnstableKMathAPI
|
||||||
|
import space.kscience.kmath.nd.StructureFeature
|
||||||
import space.kscience.kmath.nd.getFeature
|
import space.kscience.kmath.nd.getFeature
|
||||||
import space.kscience.kmath.operations.DoubleField
|
import space.kscience.kmath.operations.DoubleField
|
||||||
import space.kscience.kmath.structures.DoubleBuffer
|
import space.kscience.kmath.structures.DoubleBuffer
|
||||||
@ -89,7 +90,7 @@ public object EjmlLinearSpace : LinearSpace<Double, DoubleField> {
|
|||||||
v.toEjml().origin.scale(this).wrapVector()
|
v.toEjml().origin.scale(this).wrapVector()
|
||||||
|
|
||||||
@UnstableKMathAPI
|
@UnstableKMathAPI
|
||||||
override fun <F : Any> getFeature(structure: Matrix<Double>, type: KClass<F>): F? {
|
override fun <F : StructureFeature> getFeature(structure: Matrix<Double>, type: KClass<out F>): F? {
|
||||||
//Return the feature if it is intrinsic to the structure
|
//Return the feature if it is intrinsic to the structure
|
||||||
structure.getFeature(type)?.let { return it }
|
structure.getFeature(type)?.let { return it }
|
||||||
|
|
||||||
|
@ -1,7 +1,43 @@
|
|||||||
package space.kscience.kmath.real
|
package space.kscience.kmath.real
|
||||||
|
|
||||||
import space.kscience.kmath.structures.asBuffer
|
import space.kscience.kmath.misc.UnstableKMathAPI
|
||||||
import kotlin.math.abs
|
import space.kscience.kmath.structures.Buffer
|
||||||
|
import space.kscience.kmath.structures.DoubleBuffer
|
||||||
|
import kotlin.math.floor
|
||||||
|
|
||||||
|
public val ClosedFloatingPointRange<Double>.length: Double get() = endInclusive - start
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Create a Buffer-based grid with equally distributed [numberOfPoints] points. The range could be increasing or decreasing.
|
||||||
|
* If range has a zero size, then the buffer consisting of [numberOfPoints] equal values is returned.
|
||||||
|
*/
|
||||||
|
@UnstableKMathAPI
|
||||||
|
public fun Buffer.Companion.fromRange(range: ClosedFloatingPointRange<Double>, numberOfPoints: Int): DoubleBuffer {
|
||||||
|
require(numberOfPoints >= 2) { "Number of points in grid must be more than 1" }
|
||||||
|
val normalizedRange = when {
|
||||||
|
range.endInclusive > range.start -> range
|
||||||
|
range.endInclusive < range.start -> range.endInclusive..range.start
|
||||||
|
else -> return DoubleBuffer(numberOfPoints) { range.start }
|
||||||
|
}
|
||||||
|
val step = normalizedRange.length / (numberOfPoints - 1)
|
||||||
|
return DoubleBuffer(numberOfPoints) { normalizedRange.start + step * it / (numberOfPoints - 1) }
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Create a Buffer-based grid with equally distributed points with a fixed [step]. The range could be increasing or decreasing.
|
||||||
|
* If the step is larger than the range size, single point is returned.
|
||||||
|
*/
|
||||||
|
@UnstableKMathAPI
|
||||||
|
public fun Buffer.Companion.fromRange(range: ClosedFloatingPointRange<Double>, step: Double): DoubleBuffer {
|
||||||
|
require(step > 0) { "The grid step must be positive" }
|
||||||
|
val normalizedRange = when {
|
||||||
|
range.endInclusive > range.start -> range
|
||||||
|
range.endInclusive < range.start -> range.endInclusive..range.start
|
||||||
|
else -> return DoubleBuffer(range.start)
|
||||||
|
}
|
||||||
|
val numberOfPoints = floor(normalizedRange.length / step).toInt()
|
||||||
|
return DoubleBuffer(numberOfPoints) { normalizedRange.start + step * it / (numberOfPoints - 1) }
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Convert double range to sequence.
|
* Convert double range to sequence.
|
||||||
@ -11,35 +47,5 @@ import kotlin.math.abs
|
|||||||
*
|
*
|
||||||
* If step is negative, the same goes from upper boundary downwards
|
* If step is negative, the same goes from upper boundary downwards
|
||||||
*/
|
*/
|
||||||
public fun ClosedFloatingPointRange<Double>.toSequenceWithStep(step: Double): Sequence<Double> = when {
|
@UnstableKMathAPI
|
||||||
step == 0.0 -> error("Zero step in double progression")
|
public infix fun ClosedFloatingPointRange<Double>.step(step: Double): DoubleBuffer = Buffer.fromRange(this, step)
|
||||||
|
|
||||||
step > 0 -> sequence {
|
|
||||||
var current = start
|
|
||||||
|
|
||||||
while (current <= endInclusive) {
|
|
||||||
yield(current)
|
|
||||||
current += step
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
else -> sequence {
|
|
||||||
var current = endInclusive
|
|
||||||
|
|
||||||
while (current >= start) {
|
|
||||||
yield(current)
|
|
||||||
current += step
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
public infix fun ClosedFloatingPointRange<Double>.step(step: Double): DoubleVector =
|
|
||||||
toSequenceWithStep(step).toList().asBuffer()
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Convert double range to sequence with the fixed number of points
|
|
||||||
*/
|
|
||||||
public fun ClosedFloatingPointRange<Double>.toSequenceWithPoints(numPoints: Int): Sequence<Double> {
|
|
||||||
require(numPoints > 1) { "The number of points should be more than 2" }
|
|
||||||
return toSequenceWithStep(abs(endInclusive - start) / (numPoints - 1))
|
|
||||||
}
|
|
@ -3,14 +3,6 @@ plugins {
|
|||||||
}
|
}
|
||||||
|
|
||||||
kotlin.sourceSets {
|
kotlin.sourceSets {
|
||||||
all {
|
|
||||||
languageSettings.apply {
|
|
||||||
useExperimentalAnnotation("kotlinx.coroutines.FlowPreview")
|
|
||||||
useExperimentalAnnotation("kotlinx.coroutines.ExperimentalCoroutinesApi")
|
|
||||||
useExperimentalAnnotation("kotlinx.coroutines.ObsoleteCoroutinesApi")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
commonMain {
|
commonMain {
|
||||||
dependencies {
|
dependencies {
|
||||||
api(project(":kmath-coroutines"))
|
api(project(":kmath-coroutines"))
|
||||||
|
@ -0,0 +1,17 @@
|
|||||||
|
package space.kscience.kmath.optimization
|
||||||
|
|
||||||
|
import space.kscience.kmath.expressions.DifferentiableExpression
|
||||||
|
import space.kscience.kmath.expressions.StringSymbol
|
||||||
|
import space.kscience.kmath.expressions.Symbol
|
||||||
|
import space.kscience.kmath.structures.Buffer
|
||||||
|
|
||||||
|
public interface DataFit<T : Any> : Optimization<T> {
|
||||||
|
|
||||||
|
public fun modelAndData(
|
||||||
|
x: Buffer<T>,
|
||||||
|
y: Buffer<T>,
|
||||||
|
yErr: Buffer<T>,
|
||||||
|
model: DifferentiableExpression<T, *>,
|
||||||
|
xSymbol: Symbol = StringSymbol("x"),
|
||||||
|
)
|
||||||
|
}
|
@ -0,0 +1,122 @@
|
|||||||
|
package space.kscience.kmath.optimization
|
||||||
|
|
||||||
|
import space.kscience.kmath.expressions.*
|
||||||
|
import space.kscience.kmath.operations.ExtendedField
|
||||||
|
import space.kscience.kmath.structures.Buffer
|
||||||
|
import space.kscience.kmath.structures.indices
|
||||||
|
import kotlin.math.pow
|
||||||
|
|
||||||
|
/**
|
||||||
|
* A likelihood function optimization problem
|
||||||
|
*/
|
||||||
|
public interface FunctionOptimization<T: Any>: Optimization<T>, DataFit<T> {
|
||||||
|
/**
|
||||||
|
* Define the initial guess for the optimization problem
|
||||||
|
*/
|
||||||
|
public fun initialGuess(map: Map<Symbol, T>)
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Set an objective function expression
|
||||||
|
*/
|
||||||
|
public fun expression(expression: Expression<T>)
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Set a differentiable expression as objective function as function and gradient provider
|
||||||
|
*/
|
||||||
|
public fun diffExpression(expression: DifferentiableExpression<T, Expression<T>>)
|
||||||
|
|
||||||
|
override fun modelAndData(
|
||||||
|
x: Buffer<T>,
|
||||||
|
y: Buffer<T>,
|
||||||
|
yErr: Buffer<T>,
|
||||||
|
model: DifferentiableExpression<T, *>,
|
||||||
|
xSymbol: Symbol,
|
||||||
|
) {
|
||||||
|
require(x.size == y.size) { "X and y buffers should be of the same size" }
|
||||||
|
require(y.size == yErr.size) { "Y and yErr buffer should of the same size" }
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
public companion object{
|
||||||
|
/**
|
||||||
|
* Generate a chi squared expression from given x-y-sigma data and inline model. Provides automatic differentiation
|
||||||
|
*/
|
||||||
|
public fun <T : Any, I : Any, A> chiSquared(
|
||||||
|
autoDiff: AutoDiffProcessor<T, I, A, Expression<T>>,
|
||||||
|
x: Buffer<T>,
|
||||||
|
y: Buffer<T>,
|
||||||
|
yErr: Buffer<T>,
|
||||||
|
model: A.(I) -> I,
|
||||||
|
): DifferentiableExpression<T, Expression<T>> where A : ExtendedField<I>, A : ExpressionAlgebra<T, I> {
|
||||||
|
require(x.size == y.size) { "X and y buffers should be of the same size" }
|
||||||
|
require(y.size == yErr.size) { "Y and yErr buffer should of the same size" }
|
||||||
|
|
||||||
|
return autoDiff.process {
|
||||||
|
var sum = zero
|
||||||
|
|
||||||
|
x.indices.forEach {
|
||||||
|
val xValue = const(x[it])
|
||||||
|
val yValue = const(y[it])
|
||||||
|
val yErrValue = const(yErr[it])
|
||||||
|
val modelValue = model(xValue)
|
||||||
|
sum += ((yValue - modelValue) / yErrValue).pow(2)
|
||||||
|
}
|
||||||
|
|
||||||
|
sum
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Generate a chi squared expression from given x-y-sigma model represented by an expression. Does not provide derivatives
|
||||||
|
*/
|
||||||
|
public fun chiSquared(
|
||||||
|
x: Buffer<Double>,
|
||||||
|
y: Buffer<Double>,
|
||||||
|
yErr: Buffer<Double>,
|
||||||
|
model: Expression<Double>,
|
||||||
|
xSymbol: Symbol = StringSymbol("x"),
|
||||||
|
): Expression<Double> {
|
||||||
|
require(x.size == y.size) { "X and y buffers should be of the same size" }
|
||||||
|
require(y.size == yErr.size) { "Y and yErr buffer should of the same size" }
|
||||||
|
|
||||||
|
return Expression { arguments ->
|
||||||
|
x.indices.sumByDouble {
|
||||||
|
val xValue = x[it]
|
||||||
|
val yValue = y[it]
|
||||||
|
val yErrValue = yErr[it]
|
||||||
|
val modifiedArgs = arguments + (xSymbol to xValue)
|
||||||
|
val modelValue = model(modifiedArgs)
|
||||||
|
((yValue - modelValue) / yErrValue).pow(2)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Optimize expression without derivatives using specific [OptimizationProblemFactory]
|
||||||
|
*/
|
||||||
|
public fun <T : Any, F : FunctionOptimization<T>> Expression<T>.optimizeWith(
|
||||||
|
factory: OptimizationProblemFactory<T, F>,
|
||||||
|
vararg symbols: Symbol,
|
||||||
|
configuration: F.() -> Unit,
|
||||||
|
): OptimizationResult<T> {
|
||||||
|
require(symbols.isNotEmpty()) { "Must provide a list of symbols for optimization" }
|
||||||
|
val problem = factory(symbols.toList(), configuration)
|
||||||
|
problem.expression(this)
|
||||||
|
return problem.optimize()
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Optimize differentiable expression using specific [OptimizationProblemFactory]
|
||||||
|
*/
|
||||||
|
public fun <T : Any, F : FunctionOptimization<T>> DifferentiableExpression<T, Expression<T>>.optimizeWith(
|
||||||
|
factory: OptimizationProblemFactory<T, F>,
|
||||||
|
vararg symbols: Symbol,
|
||||||
|
configuration: F.() -> Unit,
|
||||||
|
): OptimizationResult<T> {
|
||||||
|
require(symbols.isNotEmpty()) { "Must provide a list of symbols for optimization" }
|
||||||
|
val problem = factory(symbols.toList(), configuration)
|
||||||
|
problem.diffExpression(this)
|
||||||
|
return problem.optimize()
|
||||||
|
}
|
@ -0,0 +1,44 @@
|
|||||||
|
package space.kscience.kmath.optimization
|
||||||
|
|
||||||
|
import space.kscience.kmath.expressions.Symbol
|
||||||
|
|
||||||
|
public interface OptimizationFeature
|
||||||
|
|
||||||
|
public class OptimizationResult<T>(
|
||||||
|
public val point: Map<Symbol, T>,
|
||||||
|
public val value: T,
|
||||||
|
public val features: Set<OptimizationFeature> = emptySet(),
|
||||||
|
) {
|
||||||
|
override fun toString(): String {
|
||||||
|
return "OptimizationResult(point=$point, value=$value)"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public operator fun <T> OptimizationResult<T>.plus(
|
||||||
|
feature: OptimizationFeature,
|
||||||
|
): OptimizationResult<T> = OptimizationResult(point, value, features + feature)
|
||||||
|
|
||||||
|
/**
|
||||||
|
* An optimization problem builder over [T] variables
|
||||||
|
*/
|
||||||
|
public interface Optimization<T : Any> {
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Update the problem from previous optimization run
|
||||||
|
*/
|
||||||
|
public fun update(result: OptimizationResult<T>)
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Make an optimization run
|
||||||
|
*/
|
||||||
|
public fun optimize(): OptimizationResult<T>
|
||||||
|
}
|
||||||
|
|
||||||
|
public fun interface OptimizationProblemFactory<T : Any, out P : Optimization<T>> {
|
||||||
|
public fun build(symbols: List<Symbol>): P
|
||||||
|
}
|
||||||
|
|
||||||
|
public operator fun <T : Any, P : Optimization<T>> OptimizationProblemFactory<T, P>.invoke(
|
||||||
|
symbols: List<Symbol>,
|
||||||
|
block: P.() -> Unit,
|
||||||
|
): P = build(symbols).apply(block)
|
@ -1,63 +0,0 @@
|
|||||||
package space.kscience.kmath.stat
|
|
||||||
|
|
||||||
import space.kscience.kmath.expressions.*
|
|
||||||
import space.kscience.kmath.operations.ExtendedField
|
|
||||||
import space.kscience.kmath.structures.Buffer
|
|
||||||
import space.kscience.kmath.structures.indices
|
|
||||||
import kotlin.math.pow
|
|
||||||
|
|
||||||
public object Fitting {
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Generate a chi squared expression from given x-y-sigma data and inline model. Provides automatic differentiation
|
|
||||||
*/
|
|
||||||
public fun <T : Any, I : Any, A> chiSquared(
|
|
||||||
autoDiff: AutoDiffProcessor<T, I, A, Expression<T>>,
|
|
||||||
x: Buffer<T>,
|
|
||||||
y: Buffer<T>,
|
|
||||||
yErr: Buffer<T>,
|
|
||||||
model: A.(I) -> I,
|
|
||||||
): DifferentiableExpression<T, Expression<T>> where A : ExtendedField<I>, A : ExpressionAlgebra<T, I> {
|
|
||||||
require(x.size == y.size) { "X and y buffers should be of the same size" }
|
|
||||||
require(y.size == yErr.size) { "Y and yErr buffer should of the same size" }
|
|
||||||
|
|
||||||
return autoDiff.process {
|
|
||||||
var sum = zero
|
|
||||||
|
|
||||||
x.indices.forEach {
|
|
||||||
val xValue = const(x[it])
|
|
||||||
val yValue = const(y[it])
|
|
||||||
val yErrValue = const(yErr[it])
|
|
||||||
val modelValue = model(xValue)
|
|
||||||
sum += ((yValue - modelValue) / yErrValue).pow(2)
|
|
||||||
}
|
|
||||||
|
|
||||||
sum
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Generate a chi squared expression from given x-y-sigma model represented by an expression. Does not provide derivatives
|
|
||||||
*/
|
|
||||||
public fun chiSquared(
|
|
||||||
x: Buffer<Double>,
|
|
||||||
y: Buffer<Double>,
|
|
||||||
yErr: Buffer<Double>,
|
|
||||||
model: Expression<Double>,
|
|
||||||
xSymbol: Symbol = StringSymbol("x"),
|
|
||||||
): Expression<Double> {
|
|
||||||
require(x.size == y.size) { "X and y buffers should be of the same size" }
|
|
||||||
require(y.size == yErr.size) { "Y and yErr buffer should of the same size" }
|
|
||||||
|
|
||||||
return Expression { arguments ->
|
|
||||||
x.indices.sumByDouble {
|
|
||||||
val xValue = x[it]
|
|
||||||
val yValue = y[it]
|
|
||||||
val yErrValue = yErr[it]
|
|
||||||
val modifiedArgs = arguments + (xSymbol to xValue)
|
|
||||||
val modelValue = model(modifiedArgs)
|
|
||||||
((yValue - modelValue) / yErrValue).pow(2)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,88 +0,0 @@
|
|||||||
package space.kscience.kmath.stat
|
|
||||||
|
|
||||||
import space.kscience.kmath.expressions.DifferentiableExpression
|
|
||||||
import space.kscience.kmath.expressions.Expression
|
|
||||||
import space.kscience.kmath.expressions.Symbol
|
|
||||||
|
|
||||||
public interface OptimizationFeature
|
|
||||||
|
|
||||||
public class OptimizationResult<T>(
|
|
||||||
public val point: Map<Symbol, T>,
|
|
||||||
public val value: T,
|
|
||||||
public val features: Set<OptimizationFeature> = emptySet(),
|
|
||||||
) {
|
|
||||||
override fun toString(): String {
|
|
||||||
return "OptimizationResult(point=$point, value=$value)"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
public operator fun <T> OptimizationResult<T>.plus(
|
|
||||||
feature: OptimizationFeature,
|
|
||||||
): OptimizationResult<T> = OptimizationResult(point, value, features + feature)
|
|
||||||
|
|
||||||
/**
|
|
||||||
* A configuration builder for optimization problem
|
|
||||||
*/
|
|
||||||
public interface OptimizationProblem<T : Any> {
|
|
||||||
/**
|
|
||||||
* Define the initial guess for the optimization problem
|
|
||||||
*/
|
|
||||||
public fun initialGuess(map: Map<Symbol, T>)
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Set an objective function expression
|
|
||||||
*/
|
|
||||||
public fun expression(expression: Expression<T>)
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Set a differentiable expression as objective function as function and gradient provider
|
|
||||||
*/
|
|
||||||
public fun diffExpression(expression: DifferentiableExpression<T, Expression<T>>)
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Update the problem from previous optimization run
|
|
||||||
*/
|
|
||||||
public fun update(result: OptimizationResult<T>)
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Make an optimization run
|
|
||||||
*/
|
|
||||||
public fun optimize(): OptimizationResult<T>
|
|
||||||
}
|
|
||||||
|
|
||||||
public fun interface OptimizationProblemFactory<T : Any, out P : OptimizationProblem<T>> {
|
|
||||||
public fun build(symbols: List<Symbol>): P
|
|
||||||
}
|
|
||||||
|
|
||||||
public operator fun <T : Any, P : OptimizationProblem<T>> OptimizationProblemFactory<T, P>.invoke(
|
|
||||||
symbols: List<Symbol>,
|
|
||||||
block: P.() -> Unit,
|
|
||||||
): P = build(symbols).apply(block)
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Optimize expression without derivatives using specific [OptimizationProblemFactory]
|
|
||||||
*/
|
|
||||||
public fun <T : Any, F : OptimizationProblem<T>> Expression<T>.optimizeWith(
|
|
||||||
factory: OptimizationProblemFactory<T, F>,
|
|
||||||
vararg symbols: Symbol,
|
|
||||||
configuration: F.() -> Unit,
|
|
||||||
): OptimizationResult<T> {
|
|
||||||
require(symbols.isNotEmpty()) { "Must provide a list of symbols for optimization" }
|
|
||||||
val problem = factory(symbols.toList(), configuration)
|
|
||||||
problem.expression(this)
|
|
||||||
return problem.optimize()
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Optimize differentiable expression using specific [OptimizationProblemFactory]
|
|
||||||
*/
|
|
||||||
public fun <T : Any, F : OptimizationProblem<T>> DifferentiableExpression<T, Expression<T>>.optimizeWith(
|
|
||||||
factory: OptimizationProblemFactory<T, F>,
|
|
||||||
vararg symbols: Symbol,
|
|
||||||
configuration: F.() -> Unit,
|
|
||||||
): OptimizationResult<T> {
|
|
||||||
require(symbols.isNotEmpty()) { "Must provide a list of symbols for optimization" }
|
|
||||||
val problem = factory(symbols.toList(), configuration)
|
|
||||||
problem.diffExpression(this)
|
|
||||||
return problem.optimize()
|
|
||||||
}
|
|
@ -80,19 +80,20 @@ public fun Distribution.Companion.normal(
|
|||||||
override fun probability(arg: Double): Double = exp(-(arg - mean).pow(2) / 2 / sigma2) / norm
|
override fun probability(arg: Double): Double = exp(-(arg - mean).pow(2) / 2 / sigma2) / norm
|
||||||
}
|
}
|
||||||
|
|
||||||
public fun Distribution.Companion.poisson(lambda: Double): DiscreteSamplerDistribution =
|
public fun Distribution.Companion.poisson(
|
||||||
object : DiscreteSamplerDistribution() {
|
lambda: Double,
|
||||||
private val computedProb: MutableMap<Int, Double> = hashMapOf(0 to exp(-lambda))
|
): DiscreteSamplerDistribution = object : DiscreteSamplerDistribution() {
|
||||||
|
private val computedProb: HashMap<Int, Double> = hashMapOf(0 to exp(-lambda))
|
||||||
|
|
||||||
override fun buildSampler(generator: RandomGenerator): DiscreteSampler =
|
override fun buildSampler(generator: RandomGenerator): DiscreteSampler =
|
||||||
PoissonSampler.of(generator.asUniformRandomProvider(), lambda)
|
PoissonSampler.of(generator.asUniformRandomProvider(), lambda)
|
||||||
|
|
||||||
override fun probability(arg: Int): Double {
|
override fun probability(arg: Int): Double {
|
||||||
require(arg >= 0) { "The argument must be >= 0" }
|
require(arg >= 0) { "The argument must be >= 0" }
|
||||||
|
|
||||||
return if (arg > 40)
|
return if (arg > 40)
|
||||||
exp(-(arg - lambda).pow(2) / 2 / lambda) / sqrt(2 * PI * lambda)
|
exp(-(arg - lambda).pow(2) / 2 / lambda) / sqrt(2 * PI * lambda)
|
||||||
else
|
else
|
||||||
computedProb.getOrPut(arg) { probability(arg - 1) * lambda / arg }
|
computedProb.getOrPut(arg) { probability(arg - 1) * lambda / arg }
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user