From 33cb317ceee1256f3d271dd6723b8d55c57b814f Mon Sep 17 00:00:00 2001 From: Margarita Lashina Date: Sun, 28 May 2023 23:07:01 +0300 Subject: [PATCH] added examples and tests --- .../StaticLm/staticDifficultTest.kt | 92 ++++++ .../StaticLm/staticEasyTest.kt | 56 ++++ .../StaticLm/staticMiddleTest.kt | 91 ++++++ .../StreamingLm/streamLm.kt} | 26 +- .../StreamingLm/streamingLmTest.kt | 25 ++ .../functionsToOptimize.kt | 39 ++- .../kmath/tensors/core/DoubleTensorAlgebra.kt | 6 +- .../kmath/tensors/core/TestLmAlgorithm.kt | 267 ++++++++++++++++++ 8 files changed, 579 insertions(+), 23 deletions(-) create mode 100644 examples/src/main/kotlin/space/kscience/kmath/tensors/LevenbergMarquardt/StaticLm/staticDifficultTest.kt create mode 100644 examples/src/main/kotlin/space/kscience/kmath/tensors/LevenbergMarquardt/StaticLm/staticEasyTest.kt create mode 100644 examples/src/main/kotlin/space/kscience/kmath/tensors/LevenbergMarquardt/StaticLm/staticMiddleTest.kt rename examples/src/main/kotlin/space/kscience/kmath/tensors/{StreamingLm/streamingLmMain.kt => LevenbergMarquardt/StreamingLm/streamLm.kt} (72%) create mode 100644 examples/src/main/kotlin/space/kscience/kmath/tensors/LevenbergMarquardt/StreamingLm/streamingLmTest.kt rename examples/src/main/kotlin/space/kscience/kmath/tensors/{StreamingLm => LevenbergMarquardt}/functionsToOptimize.kt (77%) create mode 100644 kmath-tensors/src/commonTest/kotlin/space/kscience/kmath/tensors/core/TestLmAlgorithm.kt diff --git a/examples/src/main/kotlin/space/kscience/kmath/tensors/LevenbergMarquardt/StaticLm/staticDifficultTest.kt b/examples/src/main/kotlin/space/kscience/kmath/tensors/LevenbergMarquardt/StaticLm/staticDifficultTest.kt new file mode 100644 index 000000000..621943aea --- /dev/null +++ b/examples/src/main/kotlin/space/kscience/kmath/tensors/LevenbergMarquardt/StaticLm/staticDifficultTest.kt @@ -0,0 +1,92 @@ +/* + * Copyright 2018-2023 KMath contributors. + * Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file. + */ + +package space.kscience.kmath.tensors.LevenbergMarquardt.StaticLm + +import space.kscience.kmath.nd.ShapeND +import space.kscience.kmath.nd.as2D +import space.kscience.kmath.nd.component1 +import space.kscience.kmath.tensors.LevenbergMarquardt.funcDifficultForLm +import space.kscience.kmath.tensors.core.BroadcastDoubleTensorAlgebra +import space.kscience.kmath.tensors.core.BroadcastDoubleTensorAlgebra.div +import space.kscience.kmath.tensors.core.DoubleTensorAlgebra +import space.kscience.kmath.tensors.core.internal.LMSettings +import kotlin.math.roundToInt + +fun main() { + val NData = 200 + var t_example = DoubleTensorAlgebra.ones(ShapeND(intArrayOf(NData, 1))).as2D() + for (i in 0 until NData) { + t_example[i, 0] = t_example[i, 0] * (i + 1) - 104 + } + + val Nparams = 15 + var p_example = DoubleTensorAlgebra.ones(ShapeND(intArrayOf(Nparams, 1))).as2D() + for (i in 0 until Nparams) { + p_example[i, 0] = p_example[i, 0] + i - 25 + } + + val settings = LMSettings(0, 0, 1) + + var y_hat = funcDifficultForLm(t_example, p_example, settings) + + var p_init = DoubleTensorAlgebra.zeros(ShapeND(intArrayOf(Nparams, 1))).as2D() + for (i in 0 until Nparams) { + p_init[i, 0] = (p_example[i, 0] + 0.9) + } + + var t = t_example + val y_dat = y_hat + val weight = BroadcastDoubleTensorAlgebra.fromArray( + ShapeND(intArrayOf(1, 1)), DoubleArray(1) { 1.0 / Nparams * 1.0 - 0.085 } + ).as2D() + val dp = BroadcastDoubleTensorAlgebra.fromArray( + ShapeND(intArrayOf(1, 1)), DoubleArray(1) { -0.01 } + ).as2D() + var p_min = DoubleTensorAlgebra.ones(ShapeND(intArrayOf(Nparams, 1))) + p_min = p_min.div(1.0 / -50.0) + val p_max = DoubleTensorAlgebra.ones(ShapeND(intArrayOf(Nparams, 1))) + p_min = p_min.div(1.0 / 50.0) + val consts = BroadcastDoubleTensorAlgebra.fromArray( + ShapeND(intArrayOf(1, 1)), doubleArrayOf(0.0) + ).as2D() + val opts = doubleArrayOf(3.0, 10000.0, 1e-2, 0.015, 1e-2, 1e-2, 1e-2, 11.0, 9.0, 1.0) +// val opts = doubleArrayOf(3.0, 10000.0, 1e-5, 1e-5, 1e-5, 1e-5, 1e-3, 11.0, 9.0, 1.0) + + val result = DoubleTensorAlgebra.lm( + ::funcDifficultForLm, + p_init.as2D(), + t, + y_dat, + weight, + dp, + p_min.as2D(), + p_max.as2D(), + consts, + opts, + 10, + 1 + ) + + println("Parameters:") + for (i in 0 until result.result_parameters.shape.component1()) { + val x = (result.result_parameters[i, 0] * 10000).roundToInt() / 10000.0 + print("$x ") + } + println() + + println("Y true and y received:") + var y_hat_after = funcDifficultForLm(t_example, result.result_parameters, settings) + for (i in 0 until y_hat.shape.component1()) { + val x = (y_hat[i, 0] * 10000).roundToInt() / 10000.0 + val y = (y_hat_after[i, 0] * 10000).roundToInt() / 10000.0 + println("$x $y") + } + + println("Сhi_sq:") + println(result.result_chi_sq) + println("Number of iterations:") + println(result.iterations) +} \ No newline at end of file diff --git a/examples/src/main/kotlin/space/kscience/kmath/tensors/LevenbergMarquardt/StaticLm/staticEasyTest.kt b/examples/src/main/kotlin/space/kscience/kmath/tensors/LevenbergMarquardt/StaticLm/staticEasyTest.kt new file mode 100644 index 000000000..bae5a674f --- /dev/null +++ b/examples/src/main/kotlin/space/kscience/kmath/tensors/LevenbergMarquardt/StaticLm/staticEasyTest.kt @@ -0,0 +1,56 @@ +/* + * Copyright 2018-2023 KMath contributors. + * Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file. + */ + +package space.kscience.kmath.tensors.LevenbergMarquardt.StaticLm + +import space.kscience.kmath.nd.ShapeND +import space.kscience.kmath.nd.as2D +import space.kscience.kmath.nd.component1 +import space.kscience.kmath.tensors.LevenbergMarquardt.funcDifficultForLm +import space.kscience.kmath.tensors.LevenbergMarquardt.funcEasyForLm +import space.kscience.kmath.tensors.LevenbergMarquardt.getStartDataForFuncEasy +import space.kscience.kmath.tensors.core.BroadcastDoubleTensorAlgebra +import space.kscience.kmath.tensors.core.DoubleTensorAlgebra +import space.kscience.kmath.tensors.core.internal.LMSettings +import kotlin.math.roundToInt + +fun main() { + val startedData = getStartDataForFuncEasy() + + val result = DoubleTensorAlgebra.lm( + ::funcEasyForLm, + DoubleTensorAlgebra.ones(ShapeND(intArrayOf(4, 1))).as2D(), + startedData.t, + startedData.y_dat, + startedData.weight, + startedData.dp, + startedData.p_min, + startedData.p_max, + startedData.consts, + startedData.opts, + 10, + startedData.example_number + ) + + println("Parameters:") + for (i in 0 until result.result_parameters.shape.component1()) { + val x = (result.result_parameters[i, 0] * 10000).roundToInt() / 10000.0 + print("$x ") + } + println() + + println("Y true and y received:") + var y_hat_after = funcDifficultForLm(startedData.t, result.result_parameters, LMSettings(0, 0, startedData.example_number)) + for (i in 0 until startedData.y_dat.shape.component1()) { + val x = (startedData.y_dat[i, 0] * 10000).roundToInt() / 10000.0 + val y = (y_hat_after[i, 0] * 10000).roundToInt() / 10000.0 + println("$x $y") + } + + println("Сhi_sq:") + println(result.result_chi_sq) + println("Number of iterations:") + println(result.iterations) +} \ No newline at end of file diff --git a/examples/src/main/kotlin/space/kscience/kmath/tensors/LevenbergMarquardt/StaticLm/staticMiddleTest.kt b/examples/src/main/kotlin/space/kscience/kmath/tensors/LevenbergMarquardt/StaticLm/staticMiddleTest.kt new file mode 100644 index 000000000..a39572858 --- /dev/null +++ b/examples/src/main/kotlin/space/kscience/kmath/tensors/LevenbergMarquardt/StaticLm/staticMiddleTest.kt @@ -0,0 +1,91 @@ +/* + * Copyright 2018-2023 KMath contributors. + * Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file. + */ + +package space.kscience.kmath.tensors.LevenbergMarquardt.StaticLm + +import space.kscience.kmath.nd.ShapeND +import space.kscience.kmath.nd.as2D +import space.kscience.kmath.nd.component1 +import space.kscience.kmath.tensors.LevenbergMarquardt.funcMiddleForLm +import space.kscience.kmath.tensors.core.BroadcastDoubleTensorAlgebra +import space.kscience.kmath.tensors.core.BroadcastDoubleTensorAlgebra.div +import space.kscience.kmath.tensors.core.DoubleTensorAlgebra +import space.kscience.kmath.tensors.core.internal.LMSettings +import kotlin.math.roundToInt +fun main() { + val NData = 100 + var t_example = DoubleTensorAlgebra.ones(ShapeND(intArrayOf(NData, 1))).as2D() + for (i in 0 until NData) { + t_example[i, 0] = t_example[i, 0] * (i + 1) + } + + val Nparams = 20 + var p_example = DoubleTensorAlgebra.ones(ShapeND(intArrayOf(Nparams, 1))).as2D() + for (i in 0 until Nparams) { + p_example[i, 0] = p_example[i, 0] + i - 25 + } + + val settings = LMSettings(0, 0, 1) + + var y_hat = funcMiddleForLm(t_example, p_example, settings) + + var p_init = DoubleTensorAlgebra.zeros(ShapeND(intArrayOf(Nparams, 1))).as2D() + for (i in 0 until Nparams) { + p_init[i, 0] = (p_example[i, 0] + 0.9) + } +// val p_init = DoubleTensorAlgebra.ones(ShapeND(intArrayOf(Nparams, 1))) +// val p_init = p_example + var t = t_example + val y_dat = y_hat + val weight = BroadcastDoubleTensorAlgebra.fromArray( + ShapeND(intArrayOf(1, 1)), DoubleArray(1) { 1.0 } + ).as2D() + val dp = BroadcastDoubleTensorAlgebra.fromArray( + ShapeND(intArrayOf(1, 1)), DoubleArray(1) { -0.01 } + ).as2D() + var p_min = DoubleTensorAlgebra.ones(ShapeND(intArrayOf(Nparams, 1))) + p_min = p_min.div(1.0 / -50.0) + val p_max = DoubleTensorAlgebra.ones(ShapeND(intArrayOf(Nparams, 1))) + p_min = p_min.div(1.0 / 50.0) + val consts = BroadcastDoubleTensorAlgebra.fromArray( + ShapeND(intArrayOf(1, 1)), doubleArrayOf(0.0) + ).as2D() + val opts = doubleArrayOf(3.0, 10000.0, 1e-5, 1e-5, 1e-5, 1e-5, 1e-5, 11.0, 9.0, 1.0) + + val result = DoubleTensorAlgebra.lm( + ::funcMiddleForLm, + p_init.as2D(), + t, + y_dat, + weight, + dp, + p_min.as2D(), + p_max.as2D(), + consts, + opts, + 10, + 1 + ) + + println("Parameters:") + for (i in 0 until result.result_parameters.shape.component1()) { + val x = (result.result_parameters[i, 0] * 10000).roundToInt() / 10000.0 + print("$x ") + } + println() + + println("Y true and y received:") + var y_hat_after = funcMiddleForLm(t_example, result.result_parameters, settings) + for (i in 0 until y_hat.shape.component1()) { + val x = (y_hat[i, 0] * 10000).roundToInt() / 10000.0 + val y = (y_hat_after[i, 0] * 10000).roundToInt() / 10000.0 + println("$x $y") + } + + println("Сhi_sq:") + println(result.result_chi_sq) + println("Number of iterations:") + println(result.iterations) +} \ No newline at end of file diff --git a/examples/src/main/kotlin/space/kscience/kmath/tensors/StreamingLm/streamingLmMain.kt b/examples/src/main/kotlin/space/kscience/kmath/tensors/LevenbergMarquardt/StreamingLm/streamLm.kt similarity index 72% rename from examples/src/main/kotlin/space/kscience/kmath/tensors/StreamingLm/streamingLmMain.kt rename to examples/src/main/kotlin/space/kscience/kmath/tensors/LevenbergMarquardt/StreamingLm/streamLm.kt index 0fa934955..5a1037618 100644 --- a/examples/src/main/kotlin/space/kscience/kmath/tensors/StreamingLm/streamingLmMain.kt +++ b/examples/src/main/kotlin/space/kscience/kmath/tensors/LevenbergMarquardt/StreamingLm/streamLm.kt @@ -3,20 +3,20 @@ * Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file. */ -package space.kscience.kmath.tensors.StreamingLm +package space.kscience.kmath.tensors.LevenbergMarquardt.StreamingLm import kotlinx.coroutines.delay import kotlinx.coroutines.flow.* import space.kscience.kmath.nd.* +import space.kscience.kmath.tensors.LevenbergMarquardt.StartDataLm import space.kscience.kmath.tensors.core.BroadcastDoubleTensorAlgebra.zeros import space.kscience.kmath.tensors.core.DoubleTensorAlgebra import space.kscience.kmath.tensors.core.internal.LMSettings -import kotlin.math.roundToInt import kotlin.random.Random import kotlin.reflect.KFunction3 fun streamLm(lm_func: KFunction3, MutableStructure2D, LMSettings, MutableStructure2D>, - startData: StartDataLm, launchFrequencyInMs: Long): Flow> = flow{ + startData: StartDataLm, launchFrequencyInMs: Long, numberOfLaunches: Int): Flow> = flow{ var example_number = startData.example_number var p_init = startData.p_init @@ -29,7 +29,10 @@ fun streamLm(lm_func: KFunction3, MutableStructure2D< val consts = startData.consts val opts = startData.opts - while (true) { + var steps = numberOfLaunches + val isEndless = (steps <= 0) + + while (isEndless || steps > 0) { val result = DoubleTensorAlgebra.lm( lm_func, p_init, @@ -48,6 +51,7 @@ fun streamLm(lm_func: KFunction3, MutableStructure2D< delay(launchFrequencyInMs) p_init = result.result_parameters y_dat = generateNewYDat(y_dat, 0.1) + if (!isEndless) steps -= 1 } } @@ -59,18 +63,4 @@ fun generateNewYDat(y_dat: MutableStructure2D, delta: Double): MutableSt y_dat_new[i, 0] = y_dat[i, 0] + randomEps } return y_dat_new -} - -suspend fun main(){ - val startData = getStartDataForFunc1() - // Создание потока: - val lmFlow = streamLm(::func1ForLm, startData, 1000) - // Запуск потока - lmFlow.collect { parameters -> - for (i in 0 until parameters.shape.component1()) { - val x = (parameters[i, 0] * 10000).roundToInt() / 10000.0 - print("$x ") - if (i == parameters.shape.component1() - 1) println() - } - } } \ No newline at end of file diff --git a/examples/src/main/kotlin/space/kscience/kmath/tensors/LevenbergMarquardt/StreamingLm/streamingLmTest.kt b/examples/src/main/kotlin/space/kscience/kmath/tensors/LevenbergMarquardt/StreamingLm/streamingLmTest.kt new file mode 100644 index 000000000..f81b048e5 --- /dev/null +++ b/examples/src/main/kotlin/space/kscience/kmath/tensors/LevenbergMarquardt/StreamingLm/streamingLmTest.kt @@ -0,0 +1,25 @@ +/* + * Copyright 2018-2023 KMath contributors. + * Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file. + */ + +package space.kscience.kmath.tensors.LevenbergMarquardt.StreamingLm + +import space.kscience.kmath.nd.* +import space.kscience.kmath.tensors.LevenbergMarquardt.funcEasyForLm +import space.kscience.kmath.tensors.LevenbergMarquardt.getStartDataForFuncEasy +import kotlin.math.roundToInt + +suspend fun main(){ + val startData = getStartDataForFuncEasy() + // Создание потока: + val lmFlow = streamLm(::funcEasyForLm, startData, 1000, 10) + // Запуск потока + lmFlow.collect { parameters -> + for (i in 0 until parameters.shape.component1()) { + val x = (parameters[i, 0] * 10000).roundToInt() / 10000.0 + print("$x ") + if (i == parameters.shape.component1() - 1) println() + } + } +} \ No newline at end of file diff --git a/examples/src/main/kotlin/space/kscience/kmath/tensors/StreamingLm/functionsToOptimize.kt b/examples/src/main/kotlin/space/kscience/kmath/tensors/LevenbergMarquardt/functionsToOptimize.kt similarity index 77% rename from examples/src/main/kotlin/space/kscience/kmath/tensors/StreamingLm/functionsToOptimize.kt rename to examples/src/main/kotlin/space/kscience/kmath/tensors/LevenbergMarquardt/functionsToOptimize.kt index 1d0ce7deb..191dc5c67 100644 --- a/examples/src/main/kotlin/space/kscience/kmath/tensors/StreamingLm/functionsToOptimize.kt +++ b/examples/src/main/kotlin/space/kscience/kmath/tensors/LevenbergMarquardt/functionsToOptimize.kt @@ -3,7 +3,7 @@ * Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file. */ -package space.kscience.kmath.tensors.StreamingLm +package space.kscience.kmath.tensors.LevenbergMarquardt import space.kscience.kmath.nd.MutableStructure2D import space.kscience.kmath.nd.ShapeND @@ -15,6 +15,7 @@ import space.kscience.kmath.tensors.core.DoubleTensorAlgebra.Companion.max import space.kscience.kmath.tensors.core.DoubleTensorAlgebra.Companion.plus import space.kscience.kmath.tensors.core.DoubleTensorAlgebra.Companion.pow import space.kscience.kmath.tensors.core.DoubleTensorAlgebra.Companion.times +import space.kscience.kmath.tensors.core.asDoubleTensor import space.kscience.kmath.tensors.core.internal.LMSettings public data class StartDataLm ( @@ -31,7 +32,39 @@ public data class StartDataLm ( var opts: DoubleArray ) -fun func1ForLm(t: MutableStructure2D, p: MutableStructure2D, settings: LMSettings): MutableStructure2D { +fun funcDifficultForLm(t: MutableStructure2D, p: MutableStructure2D, settings: LMSettings): MutableStructure2D { + val m = t.shape.component1() + var y_hat = DoubleTensorAlgebra.zeros(ShapeND(intArrayOf (m, 1))) + + val mt = t.max() + for(i in 0 until p.shape.component1()){ + y_hat = y_hat.plus( (t.times(1.0 / mt)).times(p[i, 0]) ) + } + + for(i in 0 until 4){ + y_hat = funcEasyForLm((y_hat.as2D() + t).as2D(), p, settings).asDoubleTensor() + } + + return y_hat.as2D() +} + +fun funcMiddleForLm(t: MutableStructure2D, p: MutableStructure2D, settings: LMSettings): MutableStructure2D { + val m = t.shape.component1() + var y_hat = DoubleTensorAlgebra.zeros(ShapeND(intArrayOf (m, 1))) + + val mt = t.max() + for(i in 0 until p.shape.component1()){ + y_hat += (t.times(1.0 / mt)).times(p[i, 0]) + } + + for(i in 0 until 5){ + y_hat = funcEasyForLm(y_hat.as2D(), p, settings).asDoubleTensor() + } + + return y_hat.as2D() +} + +fun funcEasyForLm(t: MutableStructure2D, p: MutableStructure2D, settings: LMSettings): MutableStructure2D { val m = t.shape.component1() var y_hat = DoubleTensorAlgebra.zeros(ShapeND(intArrayOf (m, 1))) @@ -55,7 +88,7 @@ fun func1ForLm(t: MutableStructure2D, p: MutableStructure2D, set return y_hat.as2D() } -fun getStartDataForFunc1(): StartDataLm { +fun getStartDataForFuncEasy(): StartDataLm { val lm_matx_y_dat = doubleArrayOf( 19.6594, 18.6096, 17.6792, 17.2747, 16.3065, 17.1458, 16.0467, 16.7023, 15.7809, 15.9807, 14.7620, 15.1128, 16.0973, 15.1934, 15.8636, 15.4763, 15.6860, 15.1895, 15.3495, 16.6054, diff --git a/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/DoubleTensorAlgebra.kt b/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/DoubleTensorAlgebra.kt index 43f097564..2b8bf84c0 100644 --- a/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/DoubleTensorAlgebra.kt +++ b/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/DoubleTensorAlgebra.kt @@ -765,12 +765,12 @@ public open class DoubleTensorAlgebra : var weight = weight_input if (nargin < 5) { - weight = fromArray(ShapeND(intArrayOf(1, 1)), doubleArrayOf((y_dat.transpose().dot(y_dat)).as1D()[0])).as2D() + fromArray(ShapeND(intArrayOf(1, 1)), doubleArrayOf(1.0)).as2D() } var dp = dp_input if (nargin < 6) { - dp = fromArray(ShapeND(intArrayOf(1, 1)), doubleArrayOf(0.001)).as2D() + dp = fromArray(ShapeND(intArrayOf(1, 1)), doubleArrayOf(-0.001)).as2D() } var p_min = p_min_input @@ -1023,6 +1023,8 @@ public open class DoubleTensorAlgebra : // println(" !! Maximum Number of Iterations Reached Without Convergence !!") resultInfo.typeOfConvergence = LinearOpsTensorAlgebra.TypeOfConvergence.noConvergence resultInfo.epsilon = 0.0 + print("noConvergence, MaxIter = ") + println(MaxIter) stop = true } } // --- End of Main Loop diff --git a/kmath-tensors/src/commonTest/kotlin/space/kscience/kmath/tensors/core/TestLmAlgorithm.kt b/kmath-tensors/src/commonTest/kotlin/space/kscience/kmath/tensors/core/TestLmAlgorithm.kt new file mode 100644 index 000000000..29accbbfa --- /dev/null +++ b/kmath-tensors/src/commonTest/kotlin/space/kscience/kmath/tensors/core/TestLmAlgorithm.kt @@ -0,0 +1,267 @@ +/* + * Copyright 2018-2023 KMath contributors. + * Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file. + */ + +package space.kscience.kmath.tensors.core + +import space.kscience.kmath.nd.MutableStructure2D +import space.kscience.kmath.nd.ShapeND +import space.kscience.kmath.nd.as2D +import space.kscience.kmath.nd.component1 +import space.kscience.kmath.operations.invoke +import space.kscience.kmath.tensors.api.LinearOpsTensorAlgebra +import space.kscience.kmath.tensors.core.DoubleTensorAlgebra.Companion.max +import space.kscience.kmath.tensors.core.DoubleTensorAlgebra.Companion.plus +import space.kscience.kmath.tensors.core.DoubleTensorAlgebra.Companion.pow +import space.kscience.kmath.tensors.core.DoubleTensorAlgebra.Companion.times +import space.kscience.kmath.tensors.core.internal.LMSettings +import kotlin.math.roundToLong +import kotlin.test.Test +import kotlin.test.assertEquals + +class TestLmAlgorithm { + companion object { + fun funcEasyForLm(t: MutableStructure2D, p: MutableStructure2D, settings: LMSettings): MutableStructure2D { + val m = t.shape.component1() + var y_hat = DoubleTensorAlgebra.zeros(ShapeND(intArrayOf(m, 1))) + + if (settings.example_number == 1) { + y_hat = DoubleTensorAlgebra.exp((t.times(-1.0 / p[1, 0]))).times(p[0, 0]) + t.times(p[2, 0]).times( + DoubleTensorAlgebra.exp((t.times(-1.0 / p[3, 0]))) + ) + } + else if (settings.example_number == 2) { + val mt = t.max() + y_hat = (t.times(1.0 / mt)).times(p[0, 0]) + + (t.times(1.0 / mt)).pow(2).times(p[1, 0]) + + (t.times(1.0 / mt)).pow(3).times(p[2, 0]) + + (t.times(1.0 / mt)).pow(4).times(p[3, 0]) + } + else if (settings.example_number == 3) { + y_hat = DoubleTensorAlgebra.exp((t.times(-1.0 / p[1, 0]))) + .times(p[0, 0]) + DoubleTensorAlgebra.sin((t.times(1.0 / p[3, 0]))).times(p[2, 0]) + } + + return y_hat.as2D() + } + + fun funcMiddleForLm(t: MutableStructure2D, p: MutableStructure2D, settings: LMSettings): MutableStructure2D { + val m = t.shape.component1() + var y_hat = DoubleTensorAlgebra.zeros(ShapeND(intArrayOf (m, 1))) + + val mt = t.max() + for(i in 0 until p.shape.component1()){ + y_hat += (t.times(1.0 / mt)).times(p[i, 0]) + } + + for(i in 0 until 5){ + y_hat = funcEasyForLm(y_hat.as2D(), p, settings).asDoubleTensor() + } + + return y_hat.as2D() + } + + fun funcDifficultForLm(t: MutableStructure2D, p: MutableStructure2D, settings: LMSettings): MutableStructure2D { + val m = t.shape.component1() + var y_hat = DoubleTensorAlgebra.zeros(ShapeND(intArrayOf (m, 1))) + + val mt = t.max() + for(i in 0 until p.shape.component1()){ + y_hat = y_hat.plus( (t.times(1.0 / mt)).times(p[i, 0]) ) + } + + for(i in 0 until 4){ + y_hat = funcEasyForLm((y_hat.as2D() + t).as2D(), p, settings).asDoubleTensor() + } + + return y_hat.as2D() + } + + } + @Test + fun testLMEasy() = DoubleTensorAlgebra { + val lm_matx_y_dat = doubleArrayOf( + 19.6594, 18.6096, 17.6792, 17.2747, 16.3065, 17.1458, 16.0467, 16.7023, 15.7809, 15.9807, + 14.7620, 15.1128, 16.0973, 15.1934, 15.8636, 15.4763, 15.6860, 15.1895, 15.3495, 16.6054, + 16.2247, 15.9854, 16.1421, 17.0960, 16.7769, 17.1997, 17.2767, 17.5882, 17.5378, 16.7894, + 17.7648, 18.2512, 18.1581, 16.7037, 17.8475, 17.9081, 18.3067, 17.9632, 18.2817, 19.1427, + 18.8130, 18.5658, 18.0056, 18.4607, 18.5918, 18.2544, 18.3731, 18.7511, 19.3181, 17.3066, + 17.9632, 19.0513, 18.7528, 18.2928, 18.5967, 17.8567, 17.7859, 18.4016, 18.9423, 18.4959, + 17.8000, 18.4251, 17.7829, 17.4645, 17.5221, 17.3517, 17.4637, 17.7563, 16.8471, 17.4558, + 17.7447, 17.1487, 17.3183, 16.8312, 17.7551, 17.0942, 15.6093, 16.4163, 15.3755, 16.6725, + 16.2332, 16.2316, 16.2236, 16.5361, 15.3721, 15.3347, 15.5815, 15.6319, 14.4538, 14.6044, + 14.7665, 13.3718, 15.0587, 13.8320, 14.7873, 13.6824, 14.2579, 14.2154, 13.5818, 13.8157 + ) + + var example_number = 1 + val p_init = BroadcastDoubleTensorAlgebra.fromArray( + ShapeND(intArrayOf(4, 1)), doubleArrayOf(5.0, 2.0, 0.2, 10.0) + ).as2D() + + var t = ones(ShapeND(intArrayOf(100, 1))).as2D() + for (i in 0 until 100) { + t[i, 0] = t[i, 0] * (i + 1) + } + + val y_dat = BroadcastDoubleTensorAlgebra.fromArray( + ShapeND(intArrayOf(100, 1)), lm_matx_y_dat + ).as2D() + + val weight = BroadcastDoubleTensorAlgebra.fromArray( + ShapeND(intArrayOf(1, 1)), DoubleArray(1) { 4.0 } + ).as2D() + + val dp = BroadcastDoubleTensorAlgebra.fromArray( + ShapeND(intArrayOf(1, 1)), DoubleArray(1) { -0.01 } + ).as2D() + + val p_min = BroadcastDoubleTensorAlgebra.fromArray( + ShapeND(intArrayOf(4, 1)), doubleArrayOf(-50.0, -20.0, -2.0, -100.0) + ).as2D() + + val p_max = BroadcastDoubleTensorAlgebra.fromArray( + ShapeND(intArrayOf(4, 1)), doubleArrayOf(50.0, 20.0, 2.0, 100.0) + ).as2D() + + val consts = BroadcastDoubleTensorAlgebra.fromArray( + ShapeND(intArrayOf(1, 1)), doubleArrayOf(0.0) + ).as2D() + + val opts = doubleArrayOf(3.0, 100.0, 1e-3, 1e-3, 1e-1, 1e-1, 1e-2, 11.0, 9.0, 1.0) + + val result = lm(::funcEasyForLm, p_init, t, y_dat, weight, dp, p_min, p_max, consts, opts, 10, example_number) + assertEquals(13, result.iterations) + assertEquals(31, result.func_calls) + assertEquals(1, result.example_number) + assertEquals(0.9131368192633, (result.result_chi_sq * 1e13).roundToLong() / 1e13) + assertEquals(3.7790980 * 1e-7, (result.result_lambda * 1e13).roundToLong() / 1e13) + assertEquals(result.typeOfConvergence, LinearOpsTensorAlgebra.TypeOfConvergence.inParameters) + val expectedParameters = BroadcastDoubleTensorAlgebra.fromArray( + ShapeND(intArrayOf(4, 1)), doubleArrayOf(20.527230909086, 9.833627103230, 0.997571256572, 50.174445822506) + ).as2D() + result.result_parameters = result.result_parameters.map { x -> (x * 1e12).toLong() / 1e12}.as2D() + val receivedParameters = BroadcastDoubleTensorAlgebra.fromArray( + ShapeND(intArrayOf(4, 1)), doubleArrayOf(result.result_parameters[0, 0], result.result_parameters[1, 0], + result.result_parameters[2, 0], result.result_parameters[3, 0]) + ).as2D() + assertEquals(expectedParameters[0, 0], receivedParameters[0, 0]) + assertEquals(expectedParameters[1, 0], receivedParameters[1, 0]) + assertEquals(expectedParameters[2, 0], receivedParameters[2, 0]) + assertEquals(expectedParameters[3, 0], receivedParameters[3, 0]) + } + + @Test + fun TestLMMiddle() = DoubleTensorAlgebra { + val NData = 100 + var t_example = DoubleTensorAlgebra.ones(ShapeND(intArrayOf(NData, 1))).as2D() + for (i in 0 until NData) { + t_example[i, 0] = t_example[i, 0] * (i + 1) + } + + val Nparams = 20 + var p_example = DoubleTensorAlgebra.ones(ShapeND(intArrayOf(Nparams, 1))).as2D() + for (i in 0 until Nparams) { + p_example[i, 0] = p_example[i, 0] + i - 25 + } + + val settings = LMSettings(0, 0, 1) + + var y_hat = funcMiddleForLm(t_example, p_example, settings) + + var p_init = DoubleTensorAlgebra.zeros(ShapeND(intArrayOf(Nparams, 1))).as2D() + for (i in 0 until Nparams) { + p_init[i, 0] = (p_example[i, 0] + 0.9) + } + + var t = t_example + val y_dat = y_hat + val weight = BroadcastDoubleTensorAlgebra.fromArray( + ShapeND(intArrayOf(1, 1)), DoubleArray(1) { 1.0 } + ).as2D() + val dp = BroadcastDoubleTensorAlgebra.fromArray( + ShapeND(intArrayOf(1, 1)), DoubleArray(1) { -0.01 } + ).as2D() + var p_min = DoubleTensorAlgebra.ones(ShapeND(intArrayOf(Nparams, 1))) + p_min = p_min.div(1.0 / -50.0) + val p_max = DoubleTensorAlgebra.ones(ShapeND(intArrayOf(Nparams, 1))) + p_min = p_min.div(1.0 / 50.0) + val consts = BroadcastDoubleTensorAlgebra.fromArray( + ShapeND(intArrayOf(1, 1)), doubleArrayOf(0.0) + ).as2D() + val opts = doubleArrayOf(3.0, 7000.0, 1e-5, 1e-5, 1e-5, 1e-5, 1e-5, 11.0, 9.0, 1.0) + + val result = DoubleTensorAlgebra.lm( + ::funcMiddleForLm, + p_init.as2D(), + t, + y_dat, + weight, + dp, + p_min.as2D(), + p_max.as2D(), + consts, + opts, + 10, + 1 + ) + } + + @Test + fun TestLMDifficult() = DoubleTensorAlgebra { + val NData = 200 + var t_example = DoubleTensorAlgebra.ones(ShapeND(intArrayOf(NData, 1))).as2D() + for (i in 0 until NData) { + t_example[i, 0] = t_example[i, 0] * (i + 1) - 104 + } + + val Nparams = 15 + var p_example = DoubleTensorAlgebra.ones(ShapeND(intArrayOf(Nparams, 1))).as2D() + for (i in 0 until Nparams) { + p_example[i, 0] = p_example[i, 0] + i - 25 + } + + val settings = LMSettings(0, 0, 1) + + var y_hat = funcDifficultForLm(t_example, p_example, settings) + + var p_init = DoubleTensorAlgebra.zeros(ShapeND(intArrayOf(Nparams, 1))).as2D() + for (i in 0 until Nparams) { + p_init[i, 0] = (p_example[i, 0] + 0.9) + } + + var t = t_example + val y_dat = y_hat + val weight = BroadcastDoubleTensorAlgebra.fromArray( + ShapeND(intArrayOf(1, 1)), DoubleArray(1) { 1.0 / Nparams * 1.0 - 0.085 } + ).as2D() + val dp = BroadcastDoubleTensorAlgebra.fromArray( + ShapeND(intArrayOf(1, 1)), DoubleArray(1) { -0.01 } + ).as2D() + var p_min = DoubleTensorAlgebra.ones(ShapeND(intArrayOf(Nparams, 1))) + p_min = p_min.div(1.0 / -50.0) + val p_max = DoubleTensorAlgebra.ones(ShapeND(intArrayOf(Nparams, 1))) + p_min = p_min.div(1.0 / 50.0) + val consts = BroadcastDoubleTensorAlgebra.fromArray( + ShapeND(intArrayOf(1, 1)), doubleArrayOf(0.0) + ).as2D() + val opts = doubleArrayOf(3.0, 7000.0, 1e-2, 1e-1, 1e-2, 1e-2, 1e-2, 11.0, 9.0, 1.0) + + val result = DoubleTensorAlgebra.lm( + ::funcDifficultForLm, + p_init.as2D(), + t, + y_dat, + weight, + dp, + p_min.as2D(), + p_max.as2D(), + consts, + opts, + 10, + 1 + ) + +// assertEquals(1.15, (result.result_chi_sq * 1e2).roundToLong() / 1e2) + } +} \ No newline at end of file