diff --git a/CHANGELOG.md b/CHANGELOG.md index 109168475..f28041adf 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,6 +11,7 @@ - A separate `Symbol` entity, which is used for global unbound symbol. - A `Symbol` indexing scope. - Basic optimization API for Commons-math. +- Chi squared optimization for array-like data in CM ### Changed - Package changed from `scientifik` to `kscience.kmath`. diff --git a/kmath-commons/src/main/kotlin/kscience/kmath/commons/optimization/CMFit.kt b/kmath-commons/src/main/kotlin/kscience/kmath/commons/optimization/CMFit.kt new file mode 100644 index 000000000..4ffd0559d --- /dev/null +++ b/kmath-commons/src/main/kotlin/kscience/kmath/commons/optimization/CMFit.kt @@ -0,0 +1,103 @@ +package kscience.kmath.commons.optimization + +import kscience.kmath.commons.expressions.DerivativeStructureExpression +import kscience.kmath.commons.expressions.DerivativeStructureField +import kscience.kmath.expressions.DifferentiableExpression +import kscience.kmath.expressions.Expression +import kscience.kmath.expressions.StringSymbol +import kscience.kmath.expressions.Symbol +import kscience.kmath.structures.Buffer +import kscience.kmath.structures.indices +import org.apache.commons.math3.analysis.differentiation.DerivativeStructure +import org.apache.commons.math3.optim.nonlinear.scalar.GoalType +import kotlin.math.pow + + +public object CMFit { + + /** + * Generate a chi squared expression from given x-y-sigma model represented by an expression. Does not provide derivatives + * TODO move to core/separate module + */ + public fun chiSquaredExpression( + x: Buffer, + y: Buffer, + yErr: Buffer, + model: Expression, + xSymbol: Symbol = StringSymbol("x"), + ): Expression { + require(x.size == y.size) { "X and y buffers should be of the same size" } + require(y.size == yErr.size) { "Y and yErr buffer should of the same size" } + return Expression { arguments -> + x.indices.sumByDouble { + val xValue = x[it] + val yValue = y[it] + val yErrValue = yErr[it] + val modifiedArgs = arguments + (xSymbol to xValue) + val modelValue = model(modifiedArgs) + ((yValue - modelValue) / yErrValue).pow(2) / 2 + } + } + } + + /** + * Generate a chi squared expression from given x-y-sigma data and inline model. Provides automatic differentiation + */ + public fun chiSquaredExpression( + x: Buffer, + y: Buffer, + yErr: Buffer, + model: DerivativeStructureField.(x: DerivativeStructure) -> DerivativeStructure, + ): DerivativeStructureExpression { + require(x.size == y.size) { "X and y buffers should be of the same size" } + require(y.size == yErr.size) { "Y and yErr buffer should of the same size" } + return DerivativeStructureExpression { + var sum = zero + x.indices.forEach { + val xValue = x[it] + val yValue = y[it] + val yErrValue = yErr[it] + val modelValue = model(const(xValue)) + sum += ((yValue - modelValue) / yErrValue).pow(2) / 2 + } + sum + } + } +} + +/** + * Optimize expression without derivatives + */ +public fun Expression.optimize( + vararg symbols: Symbol, + configuration: CMOptimizationProblem.() -> Unit, +): OptimizationResult { + require(symbols.isNotEmpty()) { "Must provide a list of symbols for optimization" } + val problem = CMOptimizationProblem(symbols.toList()).apply(configuration) + problem.expression(this) + return problem.optimize() +} + +/** + * Optimize differentiable expression + */ +public fun DifferentiableExpression.optimize( + vararg symbols: Symbol, + configuration: CMOptimizationProblem.() -> Unit, +): OptimizationResult { + require(symbols.isNotEmpty()) { "Must provide a list of symbols for optimization" } + val problem = CMOptimizationProblem(symbols.toList()).apply(configuration) + problem.diffExpression(this) + return problem.optimize() +} + +public fun DifferentiableExpression.minimize( + vararg symbols: Symbol, + configuration: CMOptimizationProblem.() -> Unit, +): OptimizationResult { + require(symbols.isNotEmpty()) { "Must provide a list of symbols for optimization" } + val problem = CMOptimizationProblem(symbols.toList()).apply(configuration) + problem.diffExpression(this) + problem.goal(GoalType.MINIMIZE) + return problem.optimize() +} \ No newline at end of file diff --git a/kmath-commons/src/main/kotlin/kscience/kmath/commons/optimization/optimize.kt b/kmath-commons/src/main/kotlin/kscience/kmath/commons/optimization/optimize.kt deleted file mode 100644 index c4bd5704e..000000000 --- a/kmath-commons/src/main/kotlin/kscience/kmath/commons/optimization/optimize.kt +++ /dev/null @@ -1,32 +0,0 @@ -package kscience.kmath.commons.optimization - -import kscience.kmath.expressions.DifferentiableExpression -import kscience.kmath.expressions.Expression -import kscience.kmath.expressions.Symbol - - -/** - * Optimize expression without derivatives - */ -public fun Expression.optimize( - vararg symbols: Symbol, - configuration: CMOptimizationProblem.() -> Unit, -): OptimizationResult { - require(symbols.isNotEmpty()) { "Must provide a list of symbols for optimization" } - val problem = CMOptimizationProblem(symbols.toList()).apply(configuration) - problem.expression(this) - return problem.optimize() -} - -/** - * Optimize differentiable expression - */ -public fun DifferentiableExpression.optimize( - vararg symbols: Symbol, - configuration: CMOptimizationProblem.() -> Unit, -): OptimizationResult { - require(symbols.isNotEmpty()) { "Must provide a list of symbols for optimization" } - val problem = CMOptimizationProblem(symbols.toList()).apply(configuration) - problem.diffExpression(this) - return problem.optimize() -} \ No newline at end of file diff --git a/kmath-commons/src/test/kotlin/kscience/kmath/commons/optimization/OptimizeTest.kt b/kmath-commons/src/test/kotlin/kscience/kmath/commons/optimization/OptimizeTest.kt index bd7870573..07bda2aa4 100644 --- a/kmath-commons/src/test/kotlin/kscience/kmath/commons/optimization/OptimizeTest.kt +++ b/kmath-commons/src/test/kotlin/kscience/kmath/commons/optimization/OptimizeTest.kt @@ -13,7 +13,7 @@ internal class OptimizeTest { } @Test - fun testOptimization() { + fun testGradientOptimization() { val result = normal.optimize(x, y) { initialGuess(x to 1.0, y to 1.0) //no need to select optimizer. Gradient optimizer is used by default because gradients are provided by function