forked from kscience/kmath
made class for settings private and removed settings as input from a custom function
This commit is contained in:
parent
0c7f5697da
commit
cac5b513f3
@ -12,7 +12,6 @@ import space.kscience.kmath.tensors.LevenbergMarquardt.funcDifficultForLm
|
|||||||
import space.kscience.kmath.tensors.core.BroadcastDoubleTensorAlgebra
|
import space.kscience.kmath.tensors.core.BroadcastDoubleTensorAlgebra
|
||||||
import space.kscience.kmath.tensors.core.BroadcastDoubleTensorAlgebra.div
|
import space.kscience.kmath.tensors.core.BroadcastDoubleTensorAlgebra.div
|
||||||
import space.kscience.kmath.tensors.core.DoubleTensorAlgebra
|
import space.kscience.kmath.tensors.core.DoubleTensorAlgebra
|
||||||
import space.kscience.kmath.tensors.core.LMSettings
|
|
||||||
import space.kscience.kmath.tensors.core.lm
|
import space.kscience.kmath.tensors.core.lm
|
||||||
import kotlin.math.roundToInt
|
import kotlin.math.roundToInt
|
||||||
|
|
||||||
@ -29,9 +28,9 @@ fun main() {
|
|||||||
p_example[i, 0] = p_example[i, 0] + i - 25
|
p_example[i, 0] = p_example[i, 0] + i - 25
|
||||||
}
|
}
|
||||||
|
|
||||||
val settings = LMSettings(0, 0, 1)
|
val exampleNumber = 1
|
||||||
|
|
||||||
var y_hat = funcDifficultForLm(t_example, p_example, settings)
|
var y_hat = funcDifficultForLm(t_example, p_example, exampleNumber)
|
||||||
|
|
||||||
var p_init = DoubleTensorAlgebra.zeros(ShapeND(intArrayOf(Nparams, 1))).as2D()
|
var p_init = DoubleTensorAlgebra.zeros(ShapeND(intArrayOf(Nparams, 1))).as2D()
|
||||||
for (i in 0 until Nparams) {
|
for (i in 0 until Nparams) {
|
||||||
@ -72,14 +71,14 @@ fun main() {
|
|||||||
)
|
)
|
||||||
|
|
||||||
println("Parameters:")
|
println("Parameters:")
|
||||||
for (i in 0 until result.result_parameters.shape.component1()) {
|
for (i in 0 until result.resultParameters.shape.component1()) {
|
||||||
val x = (result.result_parameters[i, 0] * 10000).roundToInt() / 10000.0
|
val x = (result.resultParameters[i, 0] * 10000).roundToInt() / 10000.0
|
||||||
print("$x ")
|
print("$x ")
|
||||||
}
|
}
|
||||||
println()
|
println()
|
||||||
|
|
||||||
println("Y true and y received:")
|
println("Y true and y received:")
|
||||||
var y_hat_after = funcDifficultForLm(t_example, result.result_parameters, settings)
|
var y_hat_after = funcDifficultForLm(t_example, result.resultParameters, exampleNumber)
|
||||||
for (i in 0 until y_hat.shape.component1()) {
|
for (i in 0 until y_hat.shape.component1()) {
|
||||||
val x = (y_hat[i, 0] * 10000).roundToInt() / 10000.0
|
val x = (y_hat[i, 0] * 10000).roundToInt() / 10000.0
|
||||||
val y = (y_hat_after[i, 0] * 10000).roundToInt() / 10000.0
|
val y = (y_hat_after[i, 0] * 10000).roundToInt() / 10000.0
|
||||||
@ -87,7 +86,7 @@ fun main() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
println("Сhi_sq:")
|
println("Сhi_sq:")
|
||||||
println(result.result_chi_sq)
|
println(result.resultChiSq)
|
||||||
println("Number of iterations:")
|
println("Number of iterations:")
|
||||||
println(result.iterations)
|
println(result.iterations)
|
||||||
}
|
}
|
@ -12,7 +12,6 @@ import space.kscience.kmath.tensors.LevenbergMarquardt.funcDifficultForLm
|
|||||||
import space.kscience.kmath.tensors.LevenbergMarquardt.funcEasyForLm
|
import space.kscience.kmath.tensors.LevenbergMarquardt.funcEasyForLm
|
||||||
import space.kscience.kmath.tensors.LevenbergMarquardt.getStartDataForFuncEasy
|
import space.kscience.kmath.tensors.LevenbergMarquardt.getStartDataForFuncEasy
|
||||||
import space.kscience.kmath.tensors.core.DoubleTensorAlgebra
|
import space.kscience.kmath.tensors.core.DoubleTensorAlgebra
|
||||||
import space.kscience.kmath.tensors.core.LMSettings
|
|
||||||
import space.kscience.kmath.tensors.core.lm
|
import space.kscience.kmath.tensors.core.lm
|
||||||
import kotlin.math.roundToInt
|
import kotlin.math.roundToInt
|
||||||
|
|
||||||
@ -35,14 +34,14 @@ fun main() {
|
|||||||
)
|
)
|
||||||
|
|
||||||
println("Parameters:")
|
println("Parameters:")
|
||||||
for (i in 0 until result.result_parameters.shape.component1()) {
|
for (i in 0 until result.resultParameters.shape.component1()) {
|
||||||
val x = (result.result_parameters[i, 0] * 10000).roundToInt() / 10000.0
|
val x = (result.resultParameters[i, 0] * 10000).roundToInt() / 10000.0
|
||||||
print("$x ")
|
print("$x ")
|
||||||
}
|
}
|
||||||
println()
|
println()
|
||||||
|
|
||||||
println("Y true and y received:")
|
println("Y true and y received:")
|
||||||
var y_hat_after = funcDifficultForLm(startedData.t, result.result_parameters, LMSettings(0, 0, startedData.example_number))
|
var y_hat_after = funcDifficultForLm(startedData.t, result.resultParameters, startedData.example_number)
|
||||||
for (i in 0 until startedData.y_dat.shape.component1()) {
|
for (i in 0 until startedData.y_dat.shape.component1()) {
|
||||||
val x = (startedData.y_dat[i, 0] * 10000).roundToInt() / 10000.0
|
val x = (startedData.y_dat[i, 0] * 10000).roundToInt() / 10000.0
|
||||||
val y = (y_hat_after[i, 0] * 10000).roundToInt() / 10000.0
|
val y = (y_hat_after[i, 0] * 10000).roundToInt() / 10000.0
|
||||||
@ -50,7 +49,7 @@ fun main() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
println("Сhi_sq:")
|
println("Сhi_sq:")
|
||||||
println(result.result_chi_sq)
|
println(result.resultChiSq)
|
||||||
println("Number of iterations:")
|
println("Number of iterations:")
|
||||||
println(result.iterations)
|
println(result.iterations)
|
||||||
}
|
}
|
@ -12,8 +12,6 @@ import space.kscience.kmath.tensors.LevenbergMarquardt.funcMiddleForLm
|
|||||||
import space.kscience.kmath.tensors.core.BroadcastDoubleTensorAlgebra
|
import space.kscience.kmath.tensors.core.BroadcastDoubleTensorAlgebra
|
||||||
import space.kscience.kmath.tensors.core.BroadcastDoubleTensorAlgebra.div
|
import space.kscience.kmath.tensors.core.BroadcastDoubleTensorAlgebra.div
|
||||||
import space.kscience.kmath.tensors.core.DoubleTensorAlgebra
|
import space.kscience.kmath.tensors.core.DoubleTensorAlgebra
|
||||||
import space.kscience.kmath.tensors.core.DoubleTensorAlgebra.Companion.times
|
|
||||||
import space.kscience.kmath.tensors.core.LMSettings
|
|
||||||
import space.kscience.kmath.tensors.core.lm
|
import space.kscience.kmath.tensors.core.lm
|
||||||
import kotlin.math.roundToInt
|
import kotlin.math.roundToInt
|
||||||
fun main() {
|
fun main() {
|
||||||
@ -29,16 +27,15 @@ fun main() {
|
|||||||
p_example[i, 0] = p_example[i, 0] + i - 25
|
p_example[i, 0] = p_example[i, 0] + i - 25
|
||||||
}
|
}
|
||||||
|
|
||||||
val settings = LMSettings(0, 0, 1)
|
val exampleNumber = 1
|
||||||
|
|
||||||
var y_hat = funcMiddleForLm(t_example, p_example, settings)
|
var y_hat = funcMiddleForLm(t_example, p_example, exampleNumber)
|
||||||
|
|
||||||
var p_init = DoubleTensorAlgebra.zeros(ShapeND(intArrayOf(Nparams, 1))).as2D()
|
var p_init = DoubleTensorAlgebra.zeros(ShapeND(intArrayOf(Nparams, 1))).as2D()
|
||||||
for (i in 0 until Nparams) {
|
for (i in 0 until Nparams) {
|
||||||
p_init[i, 0] = (p_example[i, 0] + 0.9)
|
p_init[i, 0] = (p_example[i, 0] + 0.9)
|
||||||
}
|
}
|
||||||
// val p_init = DoubleTensorAlgebra.ones(ShapeND(intArrayOf(Nparams, 1)))
|
|
||||||
// val p_init = p_example
|
|
||||||
var t = t_example
|
var t = t_example
|
||||||
val y_dat = y_hat
|
val y_dat = y_hat
|
||||||
val weight = BroadcastDoubleTensorAlgebra.fromArray(
|
val weight = BroadcastDoubleTensorAlgebra.fromArray(
|
||||||
@ -54,7 +51,7 @@ fun main() {
|
|||||||
val consts = BroadcastDoubleTensorAlgebra.fromArray(
|
val consts = BroadcastDoubleTensorAlgebra.fromArray(
|
||||||
ShapeND(intArrayOf(1, 1)), doubleArrayOf(0.0)
|
ShapeND(intArrayOf(1, 1)), doubleArrayOf(0.0)
|
||||||
).as2D()
|
).as2D()
|
||||||
val opts = doubleArrayOf(3.0, 10000.0, 1e-3, 1e-3, 1e-3, 1e-3, 1e-15, 11.0, 9.0, 1.0)
|
val opts = doubleArrayOf(3.0, 7000.0, 1e-5, 1e-5, 1e-5, 1e-5, 1e-5, 11.0, 9.0, 1.0)
|
||||||
|
|
||||||
val result = DoubleTensorAlgebra.lm(
|
val result = DoubleTensorAlgebra.lm(
|
||||||
::funcMiddleForLm,
|
::funcMiddleForLm,
|
||||||
@ -72,14 +69,14 @@ fun main() {
|
|||||||
)
|
)
|
||||||
|
|
||||||
println("Parameters:")
|
println("Parameters:")
|
||||||
for (i in 0 until result.result_parameters.shape.component1()) {
|
for (i in 0 until result.resultParameters.shape.component1()) {
|
||||||
val x = (result.result_parameters[i, 0] * 10000).roundToInt() / 10000.0
|
val x = (result.resultParameters[i, 0] * 10000).roundToInt() / 10000.0
|
||||||
print("$x ")
|
print("$x ")
|
||||||
}
|
}
|
||||||
println()
|
println()
|
||||||
|
|
||||||
|
|
||||||
var y_hat_after = funcMiddleForLm(t_example, result.result_parameters, settings)
|
var y_hat_after = funcMiddleForLm(t_example, result.resultParameters, exampleNumber)
|
||||||
for (i in 0 until y_hat.shape.component1()) {
|
for (i in 0 until y_hat.shape.component1()) {
|
||||||
val x = (y_hat[i, 0] * 10000).roundToInt() / 10000.0
|
val x = (y_hat[i, 0] * 10000).roundToInt() / 10000.0
|
||||||
val y = (y_hat_after[i, 0] * 10000).roundToInt() / 10000.0
|
val y = (y_hat_after[i, 0] * 10000).roundToInt() / 10000.0
|
||||||
@ -87,7 +84,7 @@ fun main() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
println("Сhi_sq:")
|
println("Сhi_sq:")
|
||||||
println(result.result_chi_sq)
|
println(result.resultChiSq)
|
||||||
println("Number of iterations:")
|
println("Number of iterations:")
|
||||||
println(result.iterations)
|
println(result.iterations)
|
||||||
}
|
}
|
@ -11,12 +11,11 @@ import space.kscience.kmath.nd.*
|
|||||||
import space.kscience.kmath.tensors.LevenbergMarquardt.StartDataLm
|
import space.kscience.kmath.tensors.LevenbergMarquardt.StartDataLm
|
||||||
import space.kscience.kmath.tensors.core.BroadcastDoubleTensorAlgebra.zeros
|
import space.kscience.kmath.tensors.core.BroadcastDoubleTensorAlgebra.zeros
|
||||||
import space.kscience.kmath.tensors.core.DoubleTensorAlgebra
|
import space.kscience.kmath.tensors.core.DoubleTensorAlgebra
|
||||||
import space.kscience.kmath.tensors.core.LMSettings
|
|
||||||
import space.kscience.kmath.tensors.core.lm
|
import space.kscience.kmath.tensors.core.lm
|
||||||
import kotlin.random.Random
|
import kotlin.random.Random
|
||||||
import kotlin.reflect.KFunction3
|
import kotlin.reflect.KFunction3
|
||||||
|
|
||||||
fun streamLm(lm_func: KFunction3<MutableStructure2D<Double>, MutableStructure2D<Double>, LMSettings, MutableStructure2D<Double>>,
|
fun streamLm(lm_func: KFunction3<MutableStructure2D<Double>, MutableStructure2D<Double>, Int, MutableStructure2D<Double>>,
|
||||||
startData: StartDataLm, launchFrequencyInMs: Long, numberOfLaunches: Int): Flow<MutableStructure2D<Double>> = flow{
|
startData: StartDataLm, launchFrequencyInMs: Long, numberOfLaunches: Int): Flow<MutableStructure2D<Double>> = flow{
|
||||||
|
|
||||||
var example_number = startData.example_number
|
var example_number = startData.example_number
|
||||||
@ -48,9 +47,9 @@ fun streamLm(lm_func: KFunction3<MutableStructure2D<Double>, MutableStructure2D<
|
|||||||
10,
|
10,
|
||||||
example_number
|
example_number
|
||||||
)
|
)
|
||||||
emit(result.result_parameters)
|
emit(result.resultParameters)
|
||||||
delay(launchFrequencyInMs)
|
delay(launchFrequencyInMs)
|
||||||
p_init = result.result_parameters
|
p_init = result.resultParameters
|
||||||
y_dat = generateNewYDat(y_dat, 0.1)
|
y_dat = generateNewYDat(y_dat, 0.1)
|
||||||
if (!isEndless) steps -= 1
|
if (!isEndless) steps -= 1
|
||||||
}
|
}
|
||||||
|
@ -16,7 +16,6 @@ import space.kscience.kmath.tensors.core.DoubleTensorAlgebra.Companion.max
|
|||||||
import space.kscience.kmath.tensors.core.DoubleTensorAlgebra.Companion.plus
|
import space.kscience.kmath.tensors.core.DoubleTensorAlgebra.Companion.plus
|
||||||
import space.kscience.kmath.tensors.core.DoubleTensorAlgebra.Companion.pow
|
import space.kscience.kmath.tensors.core.DoubleTensorAlgebra.Companion.pow
|
||||||
import space.kscience.kmath.tensors.core.DoubleTensorAlgebra.Companion.times
|
import space.kscience.kmath.tensors.core.DoubleTensorAlgebra.Companion.times
|
||||||
import space.kscience.kmath.tensors.core.LMSettings
|
|
||||||
import space.kscience.kmath.tensors.core.asDoubleTensor
|
import space.kscience.kmath.tensors.core.asDoubleTensor
|
||||||
|
|
||||||
public data class StartDataLm (
|
public data class StartDataLm (
|
||||||
@ -33,23 +32,31 @@ public data class StartDataLm (
|
|||||||
var opts: DoubleArray
|
var opts: DoubleArray
|
||||||
)
|
)
|
||||||
|
|
||||||
fun funcDifficultForLm(t: MutableStructure2D<Double>, p: MutableStructure2D<Double>, settings: LMSettings): MutableStructure2D<Double> {
|
fun funcEasyForLm(t: MutableStructure2D<Double>, p: MutableStructure2D<Double>, exampleNumber: Int): MutableStructure2D<Double> {
|
||||||
val m = t.shape.component1()
|
val m = t.shape.component1()
|
||||||
var y_hat = DoubleTensorAlgebra.zeros(ShapeND(intArrayOf (m, 1)))
|
var y_hat = DoubleTensorAlgebra.zeros(ShapeND(intArrayOf(m, 1)))
|
||||||
|
|
||||||
val mt = t.max()
|
if (exampleNumber == 1) {
|
||||||
for(i in 0 until p.shape.component1()){
|
y_hat = DoubleTensorAlgebra.exp((t.times(-1.0 / p[1, 0]))).times(p[0, 0]) + t.times(p[2, 0]).times(
|
||||||
y_hat = y_hat.plus( (t.times(1.0 / mt)).times(p[i, 0]) )
|
DoubleTensorAlgebra.exp((t.times(-1.0 / p[3, 0])))
|
||||||
|
)
|
||||||
}
|
}
|
||||||
|
else if (exampleNumber == 2) {
|
||||||
for(i in 0 until 4){
|
val mt = t.max()
|
||||||
y_hat = funcEasyForLm((y_hat.as2D() + t).as2D(), p, settings).asDoubleTensor()
|
y_hat = (t.times(1.0 / mt)).times(p[0, 0]) +
|
||||||
|
(t.times(1.0 / mt)).pow(2).times(p[1, 0]) +
|
||||||
|
(t.times(1.0 / mt)).pow(3).times(p[2, 0]) +
|
||||||
|
(t.times(1.0 / mt)).pow(4).times(p[3, 0])
|
||||||
|
}
|
||||||
|
else if (exampleNumber == 3) {
|
||||||
|
y_hat = DoubleTensorAlgebra.exp((t.times(-1.0 / p[1, 0])))
|
||||||
|
.times(p[0, 0]) + DoubleTensorAlgebra.sin((t.times(1.0 / p[3, 0]))).times(p[2, 0])
|
||||||
}
|
}
|
||||||
|
|
||||||
return y_hat.as2D()
|
return y_hat.as2D()
|
||||||
}
|
}
|
||||||
|
|
||||||
fun funcMiddleForLm(t: MutableStructure2D<Double>, p: MutableStructure2D<Double>, settings: LMSettings): MutableStructure2D<Double> {
|
fun funcMiddleForLm(t: MutableStructure2D<Double>, p: MutableStructure2D<Double>, exampleNumber: Int): MutableStructure2D<Double> {
|
||||||
val m = t.shape.component1()
|
val m = t.shape.component1()
|
||||||
var y_hat = DoubleTensorAlgebra.zeros(ShapeND(intArrayOf (m, 1)))
|
var y_hat = DoubleTensorAlgebra.zeros(ShapeND(intArrayOf (m, 1)))
|
||||||
|
|
||||||
@ -59,36 +66,29 @@ fun funcMiddleForLm(t: MutableStructure2D<Double>, p: MutableStructure2D<Double>
|
|||||||
}
|
}
|
||||||
|
|
||||||
for(i in 0 until 5){
|
for(i in 0 until 5){
|
||||||
y_hat = funcEasyForLm(y_hat.as2D(), p, settings).asDoubleTensor()
|
y_hat = funcEasyForLm(y_hat.as2D(), p, exampleNumber).asDoubleTensor()
|
||||||
}
|
}
|
||||||
|
|
||||||
return y_hat.as2D()
|
return y_hat.as2D()
|
||||||
}
|
}
|
||||||
|
|
||||||
fun funcEasyForLm(t: MutableStructure2D<Double>, p: MutableStructure2D<Double>, settings: LMSettings): MutableStructure2D<Double> {
|
fun funcDifficultForLm(t: MutableStructure2D<Double>, p: MutableStructure2D<Double>, exampleNumber: Int): MutableStructure2D<Double> {
|
||||||
val m = t.shape.component1()
|
val m = t.shape.component1()
|
||||||
var y_hat = DoubleTensorAlgebra.zeros(ShapeND(intArrayOf (m, 1)))
|
var y_hat = DoubleTensorAlgebra.zeros(ShapeND(intArrayOf (m, 1)))
|
||||||
|
|
||||||
if (settings.example_number == 1) {
|
|
||||||
y_hat = DoubleTensorAlgebra.exp((t.times(-1.0 / p[1, 0]))).times(p[0, 0]) + t.times(p[2, 0]).times(
|
|
||||||
DoubleTensorAlgebra.exp((t.times(-1.0 / p[3, 0])))
|
|
||||||
)
|
|
||||||
}
|
|
||||||
else if (settings.example_number == 2) {
|
|
||||||
val mt = t.max()
|
val mt = t.max()
|
||||||
y_hat = (t.times(1.0 / mt)).times(p[0, 0]) +
|
for(i in 0 until p.shape.component1()){
|
||||||
(t.times(1.0 / mt)).pow(2).times(p[1, 0]) +
|
y_hat = y_hat.plus( (t.times(1.0 / mt)).times(p[i, 0]) )
|
||||||
(t.times(1.0 / mt)).pow(3).times(p[2, 0]) +
|
|
||||||
(t.times(1.0 / mt)).pow(4).times(p[3, 0])
|
|
||||||
}
|
}
|
||||||
else if (settings.example_number == 3) {
|
|
||||||
y_hat = DoubleTensorAlgebra.exp((t.times(-1.0 / p[1, 0])))
|
for(i in 0 until 4){
|
||||||
.times(p[0, 0]) + DoubleTensorAlgebra.sin((t.times(1.0 / p[3, 0]))).times(p[2, 0])
|
y_hat = funcEasyForLm((y_hat.as2D() + t).as2D(), p, exampleNumber).asDoubleTensor()
|
||||||
}
|
}
|
||||||
|
|
||||||
return y_hat.as2D()
|
return y_hat.as2D()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
fun getStartDataForFuncDifficult(): StartDataLm {
|
fun getStartDataForFuncDifficult(): StartDataLm {
|
||||||
val NData = 200
|
val NData = 200
|
||||||
var t_example = DoubleTensorAlgebra.ones(ShapeND(intArrayOf(NData, 1))).as2D()
|
var t_example = DoubleTensorAlgebra.ones(ShapeND(intArrayOf(NData, 1))).as2D()
|
||||||
@ -102,9 +102,9 @@ fun getStartDataForFuncDifficult(): StartDataLm {
|
|||||||
p_example[i, 0] = p_example[i, 0] + i - 25
|
p_example[i, 0] = p_example[i, 0] + i - 25
|
||||||
}
|
}
|
||||||
|
|
||||||
val settings = LMSettings(0, 0, 1)
|
val exampleNumber = 1
|
||||||
|
|
||||||
var y_hat = funcDifficultForLm(t_example, p_example, settings)
|
var y_hat = funcDifficultForLm(t_example, p_example, exampleNumber)
|
||||||
|
|
||||||
var p_init = DoubleTensorAlgebra.zeros(ShapeND(intArrayOf(Nparams, 1))).as2D()
|
var p_init = DoubleTensorAlgebra.zeros(ShapeND(intArrayOf(Nparams, 1))).as2D()
|
||||||
for (i in 0 until Nparams) {
|
for (i in 0 until Nparams) {
|
||||||
@ -144,9 +144,9 @@ fun getStartDataForFuncMiddle(): StartDataLm {
|
|||||||
p_example[i, 0] = p_example[i, 0] + i - 25
|
p_example[i, 0] = p_example[i, 0] + i - 25
|
||||||
}
|
}
|
||||||
|
|
||||||
val settings = LMSettings(0, 0, 1)
|
val exampleNumber = 1
|
||||||
|
|
||||||
var y_hat = funcMiddleForLm(t_example, p_example, settings)
|
var y_hat = funcMiddleForLm(t_example, p_example, exampleNumber)
|
||||||
|
|
||||||
var p_init = DoubleTensorAlgebra.zeros(ShapeND(intArrayOf(Nparams, 1))).as2D()
|
var p_init = DoubleTensorAlgebra.zeros(ShapeND(intArrayOf(Nparams, 1))).as2D()
|
||||||
for (i in 0 until Nparams) {
|
for (i in 0 until Nparams) {
|
||||||
|
@ -40,36 +40,39 @@ public enum class TypeOfConvergence{
|
|||||||
NoConvergence
|
NoConvergence
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Class for the data obtained as a result of the execution of the Levenberg-Marquardt algorithm
|
||||||
|
*
|
||||||
|
* iterations: number of completed iterations
|
||||||
|
* funcCalls: the number of evaluations of the input function during execution
|
||||||
|
* resultChiSq: chi squared value on final parameters
|
||||||
|
* resultLambda: final lambda parameter used to calculate the offset
|
||||||
|
* resultParameters: final parameters
|
||||||
|
* typeOfConvergence: type of convergence
|
||||||
|
*/
|
||||||
public data class LMResultInfo (
|
public data class LMResultInfo (
|
||||||
var iterations:Int,
|
var iterations:Int,
|
||||||
var func_calls: Int,
|
var funcCalls: Int,
|
||||||
var example_number: Int,
|
var resultChiSq: Double,
|
||||||
var result_chi_sq: Double,
|
var resultLambda: Double,
|
||||||
var result_lambda: Double,
|
var resultParameters: MutableStructure2D<Double>,
|
||||||
var result_parameters: MutableStructure2D<Double>,
|
|
||||||
var typeOfConvergence: TypeOfConvergence,
|
var typeOfConvergence: TypeOfConvergence,
|
||||||
var epsilon: Double
|
|
||||||
)
|
|
||||||
|
|
||||||
public data class LMSettings (
|
|
||||||
var iteration:Int,
|
|
||||||
var func_calls: Int,
|
|
||||||
var example_number:Int
|
|
||||||
)
|
)
|
||||||
|
|
||||||
public fun DoubleTensorAlgebra.lm(
|
public fun DoubleTensorAlgebra.lm(
|
||||||
func: KFunction3<MutableStructure2D<Double>, MutableStructure2D<Double>, LMSettings, MutableStructure2D<Double>>,
|
func: KFunction3<MutableStructure2D<Double>, MutableStructure2D<Double>, Int, MutableStructure2D<Double>>,
|
||||||
p_input: MutableStructure2D<Double>, t_input: MutableStructure2D<Double>, y_dat_input: MutableStructure2D<Double>,
|
p_input: MutableStructure2D<Double>, t_input: MutableStructure2D<Double>, y_dat_input: MutableStructure2D<Double>,
|
||||||
weight_input: MutableStructure2D<Double>, dp_input: MutableStructure2D<Double>, p_min_input: MutableStructure2D<Double>, p_max_input: MutableStructure2D<Double>,
|
weight_input: MutableStructure2D<Double>, dp_input: MutableStructure2D<Double>, p_min_input: MutableStructure2D<Double>, p_max_input: MutableStructure2D<Double>,
|
||||||
c_input: MutableStructure2D<Double>, opts_input: DoubleArray, nargin: Int, example_number: Int): LMResultInfo {
|
c_input: MutableStructure2D<Double>, opts_input: DoubleArray, nargin: Int, example_number: Int): LMResultInfo {
|
||||||
|
|
||||||
val resultInfo = LMResultInfo(0, 0, example_number, 0.0,
|
|
||||||
0.0, p_input, TypeOfConvergence.NoConvergence, 0.0)
|
val resultInfo = LMResultInfo(0, 0, 0.0,
|
||||||
|
0.0, p_input, TypeOfConvergence.NoConvergence)
|
||||||
|
|
||||||
val eps:Double = 2.2204e-16
|
val eps:Double = 2.2204e-16
|
||||||
|
|
||||||
val settings = LMSettings(0, 0, example_number)
|
val settings = LMSettings(0, 0, example_number)
|
||||||
settings.func_calls = 0 // running count of function evaluations
|
settings.funcCalls = 0 // running count of function evaluations
|
||||||
|
|
||||||
var p = p_input
|
var p = p_input
|
||||||
val y_dat = y_dat_input
|
val y_dat = y_dat_input
|
||||||
@ -160,7 +163,7 @@ public fun DoubleTensorAlgebra.lm(
|
|||||||
val idx = get_zero_indices(dp) // indices of the parameters to be fit
|
val idx = get_zero_indices(dp) // indices of the parameters to be fit
|
||||||
val Nfit = idx?.shape?.component1() // number of parameters to fit
|
val Nfit = idx?.shape?.component1() // number of parameters to fit
|
||||||
var stop = false // termination flag
|
var stop = false // termination flag
|
||||||
val y_init = feval(func, t, p, settings) // residual error using p_try
|
val y_init = feval(func, t, p, example_number) // residual error using p_try
|
||||||
|
|
||||||
if (weight.shape.component1() == 1 || variance(weight) == 0.0) { // identical weights vector
|
if (weight.shape.component1() == 1 || variance(weight) == 0.0) { // identical weights vector
|
||||||
weight = ones(ShapeND(intArrayOf(Npnt, 1))).div(1 / kotlin.math.abs(weight[0, 0])).as2D()
|
weight = ones(ShapeND(intArrayOf(Npnt, 1))).div(1 / kotlin.math.abs(weight[0, 0])).as2D()
|
||||||
@ -219,7 +222,7 @@ public fun DoubleTensorAlgebra.lm(
|
|||||||
var p_try = (p + h).as2D() // update the [idx] elements
|
var p_try = (p + h).as2D() // update the [idx] elements
|
||||||
p_try = smallest_element_comparison(largest_element_comparison(p_min, p_try.as2D()), p_max) // apply constraints
|
p_try = smallest_element_comparison(largest_element_comparison(p_min, p_try.as2D()), p_max) // apply constraints
|
||||||
|
|
||||||
var delta_y = y_dat.minus(feval(func, t, p_try, settings)) // residual error using p_try
|
var delta_y = y_dat.minus(feval(func, t, p_try, example_number)) // residual error using p_try
|
||||||
|
|
||||||
for (i in 0 until delta_y.shape.component1()) { // floating point error; break
|
for (i in 0 until delta_y.shape.component1()) { // floating point error; break
|
||||||
for (j in 0 until delta_y.shape.component2()) {
|
for (j in 0 until delta_y.shape.component2()) {
|
||||||
@ -230,7 +233,7 @@ public fun DoubleTensorAlgebra.lm(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
settings.func_calls += 1
|
settings.funcCalls += 1
|
||||||
|
|
||||||
val tmp = delta_y.times(weight)
|
val tmp = delta_y.times(weight)
|
||||||
var X2_try = delta_y.as2D().transpose().dot(tmp) // Chi-squared error criteria
|
var X2_try = delta_y.as2D().transpose().dot(tmp) // Chi-squared error criteria
|
||||||
@ -244,8 +247,8 @@ public fun DoubleTensorAlgebra.lm(
|
|||||||
p_try = p.plus(h).as2D() // update only [idx] elements
|
p_try = p.plus(h).as2D() // update only [idx] elements
|
||||||
p_try = smallest_element_comparison(largest_element_comparison(p_min, p_try), p_max) // apply constraints
|
p_try = smallest_element_comparison(largest_element_comparison(p_min, p_try), p_max) // apply constraints
|
||||||
|
|
||||||
var delta_y = y_dat.minus(feval(func, t, p_try, settings)) // residual error using p_try
|
var delta_y = y_dat.minus(feval(func, t, p_try, example_number)) // residual error using p_try
|
||||||
settings.func_calls += 1
|
settings.funcCalls += 1
|
||||||
|
|
||||||
val tmp = delta_y.times(weight)
|
val tmp = delta_y.times(weight)
|
||||||
X2_try = delta_y.as2D().transpose().dot(tmp) // Chi-squared error criteria
|
X2_try = delta_y.as2D().transpose().dot(tmp) // Chi-squared error criteria
|
||||||
@ -320,10 +323,10 @@ public fun DoubleTensorAlgebra.lm(
|
|||||||
if (prnt > 1) {
|
if (prnt > 1) {
|
||||||
val chi_sq = X2 / DoF
|
val chi_sq = X2 / DoF
|
||||||
resultInfo.iterations = settings.iteration
|
resultInfo.iterations = settings.iteration
|
||||||
resultInfo.func_calls = settings.func_calls
|
resultInfo.funcCalls = settings.funcCalls
|
||||||
resultInfo.result_chi_sq = chi_sq
|
resultInfo.resultChiSq = chi_sq
|
||||||
resultInfo.result_lambda = lambda
|
resultInfo.resultLambda = lambda
|
||||||
resultInfo.result_parameters = p
|
resultInfo.resultParameters = p
|
||||||
}
|
}
|
||||||
|
|
||||||
// update convergence history ... save _reduced_ Chi-square
|
// update convergence history ... save _reduced_ Chi-square
|
||||||
@ -331,30 +334,32 @@ public fun DoubleTensorAlgebra.lm(
|
|||||||
|
|
||||||
if (abs(JtWdy).max()!! < epsilon_1 && settings.iteration > 2) {
|
if (abs(JtWdy).max()!! < epsilon_1 && settings.iteration > 2) {
|
||||||
resultInfo.typeOfConvergence = TypeOfConvergence.InGradient
|
resultInfo.typeOfConvergence = TypeOfConvergence.InGradient
|
||||||
resultInfo.epsilon = epsilon_1
|
|
||||||
stop = true
|
stop = true
|
||||||
}
|
}
|
||||||
if ((abs(h.as2D()).div(abs(p) + 1e-12)).max() < epsilon_2 && settings.iteration > 2) {
|
if ((abs(h.as2D()).div(abs(p) + 1e-12)).max() < epsilon_2 && settings.iteration > 2) {
|
||||||
resultInfo.typeOfConvergence = TypeOfConvergence.InParameters
|
resultInfo.typeOfConvergence = TypeOfConvergence.InParameters
|
||||||
resultInfo.epsilon = epsilon_2
|
|
||||||
stop = true
|
stop = true
|
||||||
}
|
}
|
||||||
if (X2 / DoF < epsilon_3 && settings.iteration > 2) {
|
if (X2 / DoF < epsilon_3 && settings.iteration > 2) {
|
||||||
resultInfo.typeOfConvergence = TypeOfConvergence.InReducedChiSquare
|
resultInfo.typeOfConvergence = TypeOfConvergence.InReducedChiSquare
|
||||||
resultInfo.epsilon = epsilon_3
|
|
||||||
stop = true
|
stop = true
|
||||||
}
|
}
|
||||||
if (settings.iteration == MaxIter) {
|
if (settings.iteration == MaxIter) {
|
||||||
resultInfo.typeOfConvergence = TypeOfConvergence.NoConvergence
|
resultInfo.typeOfConvergence = TypeOfConvergence.NoConvergence
|
||||||
resultInfo.epsilon = 0.0
|
|
||||||
stop = true
|
stop = true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return resultInfo
|
return resultInfo
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private data class LMSettings (
|
||||||
|
var iteration:Int,
|
||||||
|
var funcCalls: Int,
|
||||||
|
var exampleNumber:Int
|
||||||
|
)
|
||||||
|
|
||||||
/* matrix -> column of all elemnets */
|
/* matrix -> column of all elemnets */
|
||||||
public fun make_column(tensor: MutableStructure2D<Double>) : MutableStructure2D<Double> {
|
private fun make_column(tensor: MutableStructure2D<Double>) : MutableStructure2D<Double> {
|
||||||
val shape = intArrayOf(tensor.shape.component1() * tensor.shape.component2(), 1)
|
val shape = intArrayOf(tensor.shape.component1() * tensor.shape.component2(), 1)
|
||||||
val buffer = DoubleArray(tensor.shape.component1() * tensor.shape.component2())
|
val buffer = DoubleArray(tensor.shape.component1() * tensor.shape.component2())
|
||||||
for (i in 0 until tensor.shape.component1()) {
|
for (i in 0 until tensor.shape.component1()) {
|
||||||
@ -367,11 +372,11 @@ public fun make_column(tensor: MutableStructure2D<Double>) : MutableStructure2D<
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* column length */
|
/* column length */
|
||||||
public fun length(column: MutableStructure2D<Double>) : Int {
|
private fun length(column: MutableStructure2D<Double>) : Int {
|
||||||
return column.shape.component1()
|
return column.shape.component1()
|
||||||
}
|
}
|
||||||
|
|
||||||
public fun MutableStructure2D<Double>.abs() {
|
private fun MutableStructure2D<Double>.abs() {
|
||||||
for (i in 0 until this.shape.component1()) {
|
for (i in 0 until this.shape.component1()) {
|
||||||
for (j in 0 until this.shape.component2()) {
|
for (j in 0 until this.shape.component2()) {
|
||||||
this[i, j] = kotlin.math.abs(this[i, j])
|
this[i, j] = kotlin.math.abs(this[i, j])
|
||||||
@ -379,7 +384,7 @@ public fun MutableStructure2D<Double>.abs() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
public fun abs(input: MutableStructure2D<Double>): MutableStructure2D<Double> {
|
private fun abs(input: MutableStructure2D<Double>): MutableStructure2D<Double> {
|
||||||
val tensor = BroadcastDoubleTensorAlgebra.ones(
|
val tensor = BroadcastDoubleTensorAlgebra.ones(
|
||||||
ShapeND(
|
ShapeND(
|
||||||
intArrayOf(
|
intArrayOf(
|
||||||
@ -396,7 +401,7 @@ public fun abs(input: MutableStructure2D<Double>): MutableStructure2D<Double> {
|
|||||||
return tensor
|
return tensor
|
||||||
}
|
}
|
||||||
|
|
||||||
public fun diag(input: MutableStructure2D<Double>): MutableStructure2D<Double> {
|
private fun diag(input: MutableStructure2D<Double>): MutableStructure2D<Double> {
|
||||||
val tensor = BroadcastDoubleTensorAlgebra.ones(ShapeND(intArrayOf(input.shape.component1(), 1))).as2D()
|
val tensor = BroadcastDoubleTensorAlgebra.ones(ShapeND(intArrayOf(input.shape.component1(), 1))).as2D()
|
||||||
for (i in 0 until tensor.shape.component1()) {
|
for (i in 0 until tensor.shape.component1()) {
|
||||||
tensor[i, 0] = input[i, i]
|
tensor[i, 0] = input[i, i]
|
||||||
@ -404,7 +409,7 @@ public fun diag(input: MutableStructure2D<Double>): MutableStructure2D<Double> {
|
|||||||
return tensor
|
return tensor
|
||||||
}
|
}
|
||||||
|
|
||||||
public fun make_matrx_with_diagonal(column: MutableStructure2D<Double>): MutableStructure2D<Double> {
|
private fun make_matrx_with_diagonal(column: MutableStructure2D<Double>): MutableStructure2D<Double> {
|
||||||
val size = column.shape.component1()
|
val size = column.shape.component1()
|
||||||
val tensor = BroadcastDoubleTensorAlgebra.zeros(ShapeND(intArrayOf(size, size))).as2D()
|
val tensor = BroadcastDoubleTensorAlgebra.zeros(ShapeND(intArrayOf(size, size))).as2D()
|
||||||
for (i in 0 until size) {
|
for (i in 0 until size) {
|
||||||
@ -413,12 +418,12 @@ public fun make_matrx_with_diagonal(column: MutableStructure2D<Double>): Mutable
|
|||||||
return tensor
|
return tensor
|
||||||
}
|
}
|
||||||
|
|
||||||
public fun lm_eye(size: Int): MutableStructure2D<Double> {
|
private fun lm_eye(size: Int): MutableStructure2D<Double> {
|
||||||
val column = BroadcastDoubleTensorAlgebra.ones(ShapeND(intArrayOf(size, 1))).as2D()
|
val column = BroadcastDoubleTensorAlgebra.ones(ShapeND(intArrayOf(size, 1))).as2D()
|
||||||
return make_matrx_with_diagonal(column)
|
return make_matrx_with_diagonal(column)
|
||||||
}
|
}
|
||||||
|
|
||||||
public fun largest_element_comparison(a: MutableStructure2D<Double>, b: MutableStructure2D<Double>): MutableStructure2D<Double> {
|
private fun largest_element_comparison(a: MutableStructure2D<Double>, b: MutableStructure2D<Double>): MutableStructure2D<Double> {
|
||||||
val a_sizeX = a.shape.component1()
|
val a_sizeX = a.shape.component1()
|
||||||
val a_sizeY = a.shape.component2()
|
val a_sizeY = a.shape.component2()
|
||||||
val b_sizeX = b.shape.component1()
|
val b_sizeX = b.shape.component1()
|
||||||
@ -440,7 +445,7 @@ public fun largest_element_comparison(a: MutableStructure2D<Double>, b: MutableS
|
|||||||
return tensor
|
return tensor
|
||||||
}
|
}
|
||||||
|
|
||||||
public fun smallest_element_comparison(a: MutableStructure2D<Double>, b: MutableStructure2D<Double>): MutableStructure2D<Double> {
|
private fun smallest_element_comparison(a: MutableStructure2D<Double>, b: MutableStructure2D<Double>): MutableStructure2D<Double> {
|
||||||
val a_sizeX = a.shape.component1()
|
val a_sizeX = a.shape.component1()
|
||||||
val a_sizeY = a.shape.component2()
|
val a_sizeY = a.shape.component2()
|
||||||
val b_sizeX = b.shape.component1()
|
val b_sizeX = b.shape.component1()
|
||||||
@ -462,7 +467,7 @@ public fun smallest_element_comparison(a: MutableStructure2D<Double>, b: Mutable
|
|||||||
return tensor
|
return tensor
|
||||||
}
|
}
|
||||||
|
|
||||||
public fun get_zero_indices(column: MutableStructure2D<Double>, epsilon: Double = 0.000001): MutableStructure2D<Double>? {
|
private fun get_zero_indices(column: MutableStructure2D<Double>, epsilon: Double = 0.000001): MutableStructure2D<Double>? {
|
||||||
var idx = emptyArray<Double>()
|
var idx = emptyArray<Double>()
|
||||||
for (i in 0 until column.shape.component1()) {
|
for (i in 0 until column.shape.component1()) {
|
||||||
if (kotlin.math.abs(column[i, 0]) > epsilon) {
|
if (kotlin.math.abs(column[i, 0]) > epsilon) {
|
||||||
@ -475,14 +480,14 @@ public fun get_zero_indices(column: MutableStructure2D<Double>, epsilon: Double
|
|||||||
return null
|
return null
|
||||||
}
|
}
|
||||||
|
|
||||||
public fun feval(func: (MutableStructure2D<Double>, MutableStructure2D<Double>, LMSettings) -> MutableStructure2D<Double>,
|
private fun feval(func: (MutableStructure2D<Double>, MutableStructure2D<Double>, Int) -> MutableStructure2D<Double>,
|
||||||
t: MutableStructure2D<Double>, p: MutableStructure2D<Double>, settings: LMSettings)
|
t: MutableStructure2D<Double>, p: MutableStructure2D<Double>, exampleNumber: Int)
|
||||||
: MutableStructure2D<Double>
|
: MutableStructure2D<Double>
|
||||||
{
|
{
|
||||||
return func(t, p, settings)
|
return func(t, p, exampleNumber)
|
||||||
}
|
}
|
||||||
|
|
||||||
public fun lm_matx(func: (MutableStructure2D<Double>, MutableStructure2D<Double>, LMSettings) -> MutableStructure2D<Double>,
|
private fun lm_matx(func: (MutableStructure2D<Double>, MutableStructure2D<Double>, Int) -> MutableStructure2D<Double>,
|
||||||
t: MutableStructure2D<Double>, p_old: MutableStructure2D<Double>, y_old: MutableStructure2D<Double>,
|
t: MutableStructure2D<Double>, p_old: MutableStructure2D<Double>, y_old: MutableStructure2D<Double>,
|
||||||
dX2: Int, J_input: MutableStructure2D<Double>, p: MutableStructure2D<Double>,
|
dX2: Int, J_input: MutableStructure2D<Double>, p: MutableStructure2D<Double>,
|
||||||
y_dat: MutableStructure2D<Double>, weight: MutableStructure2D<Double>, dp:MutableStructure2D<Double>, settings:LMSettings) : Array<MutableStructure2D<Double>>
|
y_dat: MutableStructure2D<Double>, weight: MutableStructure2D<Double>, dp:MutableStructure2D<Double>, settings:LMSettings) : Array<MutableStructure2D<Double>>
|
||||||
@ -492,8 +497,8 @@ public fun lm_matx(func: (MutableStructure2D<Double>, MutableStructure2D<Double>
|
|||||||
val Npnt = length(y_dat) // number of data points
|
val Npnt = length(y_dat) // number of data points
|
||||||
val Npar = length(p) // number of parameters
|
val Npar = length(p) // number of parameters
|
||||||
|
|
||||||
val y_hat = feval(func, t, p, settings) // evaluate model using parameters 'p'
|
val y_hat = feval(func, t, p, settings.exampleNumber) // evaluate model using parameters 'p'
|
||||||
settings.func_calls += 1
|
settings.funcCalls += 1
|
||||||
|
|
||||||
var J = J_input
|
var J = J_input
|
||||||
|
|
||||||
@ -513,7 +518,7 @@ public fun lm_matx(func: (MutableStructure2D<Double>, MutableStructure2D<Double>
|
|||||||
return arrayOf(JtWJ,JtWdy,Chi_sq,y_hat,J)
|
return arrayOf(JtWJ,JtWdy,Chi_sq,y_hat,J)
|
||||||
}
|
}
|
||||||
|
|
||||||
public fun lm_Broyden_J(p_old: MutableStructure2D<Double>, y_old: MutableStructure2D<Double>, J_input: MutableStructure2D<Double>,
|
private fun lm_Broyden_J(p_old: MutableStructure2D<Double>, y_old: MutableStructure2D<Double>, J_input: MutableStructure2D<Double>,
|
||||||
p: MutableStructure2D<Double>, y: MutableStructure2D<Double>): MutableStructure2D<Double> {
|
p: MutableStructure2D<Double>, y: MutableStructure2D<Double>): MutableStructure2D<Double> {
|
||||||
var J = J_input.copyToTensor()
|
var J = J_input.copyToTensor()
|
||||||
|
|
||||||
@ -524,7 +529,7 @@ public fun lm_Broyden_J(p_old: MutableStructure2D<Double>, y_old: MutableStructu
|
|||||||
return J.as2D()
|
return J.as2D()
|
||||||
}
|
}
|
||||||
|
|
||||||
public fun lm_FD_J(func: (MutableStructure2D<Double>, MutableStructure2D<Double>, settings: LMSettings) -> MutableStructure2D<Double>,
|
private fun lm_FD_J(func: (MutableStructure2D<Double>, MutableStructure2D<Double>, exampleNumber: Int) -> MutableStructure2D<Double>,
|
||||||
t: MutableStructure2D<Double>, p: MutableStructure2D<Double>, y: MutableStructure2D<Double>,
|
t: MutableStructure2D<Double>, p: MutableStructure2D<Double>, y: MutableStructure2D<Double>,
|
||||||
dp: MutableStructure2D<Double>, settings: LMSettings): MutableStructure2D<Double> {
|
dp: MutableStructure2D<Double>, settings: LMSettings): MutableStructure2D<Double> {
|
||||||
// default: dp = 0.001 * ones(1,n)
|
// default: dp = 0.001 * ones(1,n)
|
||||||
@ -543,8 +548,8 @@ public fun lm_FD_J(func: (MutableStructure2D<Double>, MutableStructure2D<Double>
|
|||||||
|
|
||||||
val epsilon = 0.0000001
|
val epsilon = 0.0000001
|
||||||
if (kotlin.math.abs(del[j, 0]) > epsilon) {
|
if (kotlin.math.abs(del[j, 0]) > epsilon) {
|
||||||
val y1 = feval(func, t, p, settings)
|
val y1 = feval(func, t, p, settings.exampleNumber)
|
||||||
settings.func_calls += 1
|
settings.funcCalls += 1
|
||||||
|
|
||||||
if (dp[j, 0] < 0) { // backwards difference
|
if (dp[j, 0] < 0) { // backwards difference
|
||||||
for (i in 0 until J.shape.component1()) {
|
for (i in 0 until J.shape.component1()) {
|
||||||
@ -556,9 +561,9 @@ public fun lm_FD_J(func: (MutableStructure2D<Double>, MutableStructure2D<Double>
|
|||||||
println("Potential mistake")
|
println("Potential mistake")
|
||||||
p[j, 0] = ps[j, 0] - del[j, 0] // central difference, additional func call
|
p[j, 0] = ps[j, 0] - del[j, 0] // central difference, additional func call
|
||||||
for (i in 0 until J.shape.component1()) {
|
for (i in 0 until J.shape.component1()) {
|
||||||
J[i, j] = (y1.as2D().minus(feval(func, t, p, settings)).as2D())[i, 0] / (2 * del[j, 0])
|
J[i, j] = (y1.as2D().minus(feval(func, t, p, settings.exampleNumber)).as2D())[i, 0] / (2 * del[j, 0])
|
||||||
}
|
}
|
||||||
settings.func_calls += 1
|
settings.funcCalls += 1
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -20,23 +20,23 @@ import kotlin.test.assertEquals
|
|||||||
|
|
||||||
class TestLmAlgorithm {
|
class TestLmAlgorithm {
|
||||||
companion object {
|
companion object {
|
||||||
fun funcEasyForLm(t: MutableStructure2D<Double>, p: MutableStructure2D<Double>, settings: LMSettings): MutableStructure2D<Double> {
|
fun funcEasyForLm(t: MutableStructure2D<Double>, p: MutableStructure2D<Double>, exampleNumber: Int): MutableStructure2D<Double> {
|
||||||
val m = t.shape.component1()
|
val m = t.shape.component1()
|
||||||
var y_hat = DoubleTensorAlgebra.zeros(ShapeND(intArrayOf(m, 1)))
|
var y_hat = DoubleTensorAlgebra.zeros(ShapeND(intArrayOf(m, 1)))
|
||||||
|
|
||||||
if (settings.example_number == 1) {
|
if (exampleNumber == 1) {
|
||||||
y_hat = DoubleTensorAlgebra.exp((t.times(-1.0 / p[1, 0]))).times(p[0, 0]) + t.times(p[2, 0]).times(
|
y_hat = DoubleTensorAlgebra.exp((t.times(-1.0 / p[1, 0]))).times(p[0, 0]) + t.times(p[2, 0]).times(
|
||||||
DoubleTensorAlgebra.exp((t.times(-1.0 / p[3, 0])))
|
DoubleTensorAlgebra.exp((t.times(-1.0 / p[3, 0])))
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
else if (settings.example_number == 2) {
|
else if (exampleNumber == 2) {
|
||||||
val mt = t.max()
|
val mt = t.max()
|
||||||
y_hat = (t.times(1.0 / mt)).times(p[0, 0]) +
|
y_hat = (t.times(1.0 / mt)).times(p[0, 0]) +
|
||||||
(t.times(1.0 / mt)).pow(2).times(p[1, 0]) +
|
(t.times(1.0 / mt)).pow(2).times(p[1, 0]) +
|
||||||
(t.times(1.0 / mt)).pow(3).times(p[2, 0]) +
|
(t.times(1.0 / mt)).pow(3).times(p[2, 0]) +
|
||||||
(t.times(1.0 / mt)).pow(4).times(p[3, 0])
|
(t.times(1.0 / mt)).pow(4).times(p[3, 0])
|
||||||
}
|
}
|
||||||
else if (settings.example_number == 3) {
|
else if (exampleNumber == 3) {
|
||||||
y_hat = DoubleTensorAlgebra.exp((t.times(-1.0 / p[1, 0])))
|
y_hat = DoubleTensorAlgebra.exp((t.times(-1.0 / p[1, 0])))
|
||||||
.times(p[0, 0]) + DoubleTensorAlgebra.sin((t.times(1.0 / p[3, 0]))).times(p[2, 0])
|
.times(p[0, 0]) + DoubleTensorAlgebra.sin((t.times(1.0 / p[3, 0]))).times(p[2, 0])
|
||||||
}
|
}
|
||||||
@ -44,7 +44,7 @@ class TestLmAlgorithm {
|
|||||||
return y_hat.as2D()
|
return y_hat.as2D()
|
||||||
}
|
}
|
||||||
|
|
||||||
fun funcMiddleForLm(t: MutableStructure2D<Double>, p: MutableStructure2D<Double>, settings: LMSettings): MutableStructure2D<Double> {
|
fun funcMiddleForLm(t: MutableStructure2D<Double>, p: MutableStructure2D<Double>, exampleNumber: Int): MutableStructure2D<Double> {
|
||||||
val m = t.shape.component1()
|
val m = t.shape.component1()
|
||||||
var y_hat = DoubleTensorAlgebra.zeros(ShapeND(intArrayOf (m, 1)))
|
var y_hat = DoubleTensorAlgebra.zeros(ShapeND(intArrayOf (m, 1)))
|
||||||
|
|
||||||
@ -54,13 +54,13 @@ class TestLmAlgorithm {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for(i in 0 until 5){
|
for(i in 0 until 5){
|
||||||
y_hat = funcEasyForLm(y_hat.as2D(), p, settings).asDoubleTensor()
|
y_hat = funcEasyForLm(y_hat.as2D(), p, exampleNumber).asDoubleTensor()
|
||||||
}
|
}
|
||||||
|
|
||||||
return y_hat.as2D()
|
return y_hat.as2D()
|
||||||
}
|
}
|
||||||
|
|
||||||
fun funcDifficultForLm(t: MutableStructure2D<Double>, p: MutableStructure2D<Double>, settings: LMSettings): MutableStructure2D<Double> {
|
fun funcDifficultForLm(t: MutableStructure2D<Double>, p: MutableStructure2D<Double>, exampleNumber: Int): MutableStructure2D<Double> {
|
||||||
val m = t.shape.component1()
|
val m = t.shape.component1()
|
||||||
var y_hat = DoubleTensorAlgebra.zeros(ShapeND(intArrayOf (m, 1)))
|
var y_hat = DoubleTensorAlgebra.zeros(ShapeND(intArrayOf (m, 1)))
|
||||||
|
|
||||||
@ -70,12 +70,11 @@ class TestLmAlgorithm {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for(i in 0 until 4){
|
for(i in 0 until 4){
|
||||||
y_hat = funcEasyForLm((y_hat.as2D() + t).as2D(), p, settings).asDoubleTensor()
|
y_hat = funcEasyForLm((y_hat.as2D() + t).as2D(), p, exampleNumber).asDoubleTensor()
|
||||||
}
|
}
|
||||||
|
|
||||||
return y_hat.as2D()
|
return y_hat.as2D()
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
@Test
|
@Test
|
||||||
fun testLMEasy() = DoubleTensorAlgebra {
|
fun testLMEasy() = DoubleTensorAlgebra {
|
||||||
@ -130,18 +129,17 @@ class TestLmAlgorithm {
|
|||||||
|
|
||||||
val result = lm(::funcEasyForLm, p_init, t, y_dat, weight, dp, p_min, p_max, consts, opts, 10, example_number)
|
val result = lm(::funcEasyForLm, p_init, t, y_dat, weight, dp, p_min, p_max, consts, opts, 10, example_number)
|
||||||
assertEquals(13, result.iterations)
|
assertEquals(13, result.iterations)
|
||||||
assertEquals(31, result.func_calls)
|
assertEquals(31, result.funcCalls)
|
||||||
assertEquals(1, result.example_number)
|
assertEquals(0.9131368192633, (result.resultChiSq * 1e13).roundToLong() / 1e13)
|
||||||
assertEquals(0.9131368192633, (result.result_chi_sq * 1e13).roundToLong() / 1e13)
|
assertEquals(3.7790980 * 1e-7, (result.resultLambda * 1e13).roundToLong() / 1e13)
|
||||||
assertEquals(3.7790980 * 1e-7, (result.result_lambda * 1e13).roundToLong() / 1e13)
|
|
||||||
assertEquals(result.typeOfConvergence, TypeOfConvergence.InParameters)
|
assertEquals(result.typeOfConvergence, TypeOfConvergence.InParameters)
|
||||||
val expectedParameters = BroadcastDoubleTensorAlgebra.fromArray(
|
val expectedParameters = BroadcastDoubleTensorAlgebra.fromArray(
|
||||||
ShapeND(intArrayOf(4, 1)), doubleArrayOf(20.527230909086, 9.833627103230, 0.997571256572, 50.174445822506)
|
ShapeND(intArrayOf(4, 1)), doubleArrayOf(20.527230909086, 9.833627103230, 0.997571256572, 50.174445822506)
|
||||||
).as2D()
|
).as2D()
|
||||||
result.result_parameters = result.result_parameters.map { x -> (x * 1e12).toLong() / 1e12}.as2D()
|
result.resultParameters = result.resultParameters.map { x -> (x * 1e12).toLong() / 1e12}.as2D()
|
||||||
val receivedParameters = BroadcastDoubleTensorAlgebra.fromArray(
|
val receivedParameters = BroadcastDoubleTensorAlgebra.fromArray(
|
||||||
ShapeND(intArrayOf(4, 1)), doubleArrayOf(result.result_parameters[0, 0], result.result_parameters[1, 0],
|
ShapeND(intArrayOf(4, 1)), doubleArrayOf(result.resultParameters[0, 0], result.resultParameters[1, 0],
|
||||||
result.result_parameters[2, 0], result.result_parameters[3, 0])
|
result.resultParameters[2, 0], result.resultParameters[3, 0])
|
||||||
).as2D()
|
).as2D()
|
||||||
assertEquals(expectedParameters[0, 0], receivedParameters[0, 0])
|
assertEquals(expectedParameters[0, 0], receivedParameters[0, 0])
|
||||||
assertEquals(expectedParameters[1, 0], receivedParameters[1, 0])
|
assertEquals(expectedParameters[1, 0], receivedParameters[1, 0])
|
||||||
@ -163,9 +161,9 @@ class TestLmAlgorithm {
|
|||||||
p_example[i, 0] = p_example[i, 0] + i - 25
|
p_example[i, 0] = p_example[i, 0] + i - 25
|
||||||
}
|
}
|
||||||
|
|
||||||
val settings = LMSettings(0, 0, 1)
|
val exampleNumber = 1
|
||||||
|
|
||||||
var y_hat = funcMiddleForLm(t_example, p_example, settings)
|
var y_hat = funcMiddleForLm(t_example, p_example, exampleNumber)
|
||||||
|
|
||||||
var p_init = DoubleTensorAlgebra.zeros(ShapeND(intArrayOf(Nparams, 1))).as2D()
|
var p_init = DoubleTensorAlgebra.zeros(ShapeND(intArrayOf(Nparams, 1))).as2D()
|
||||||
for (i in 0 until Nparams) {
|
for (i in 0 until Nparams) {
|
||||||
@ -219,9 +217,9 @@ class TestLmAlgorithm {
|
|||||||
p_example[i, 0] = p_example[i, 0] + i - 25
|
p_example[i, 0] = p_example[i, 0] + i - 25
|
||||||
}
|
}
|
||||||
|
|
||||||
val settings = LMSettings(0, 0, 1)
|
val exampleNumber = 1
|
||||||
|
|
||||||
var y_hat = funcDifficultForLm(t_example, p_example, settings)
|
var y_hat = funcDifficultForLm(t_example, p_example, exampleNumber)
|
||||||
|
|
||||||
var p_init = DoubleTensorAlgebra.zeros(ShapeND(intArrayOf(Nparams, 1))).as2D()
|
var p_init = DoubleTensorAlgebra.zeros(ShapeND(intArrayOf(Nparams, 1))).as2D()
|
||||||
for (i in 0 until Nparams) {
|
for (i in 0 until Nparams) {
|
||||||
|
Loading…
Reference in New Issue
Block a user