forked from kscience/kmath
Merge pull request #4 from margarita0303/streaming_lm_algorithm
tests changed
This commit is contained in:
commit
2ead722620
@ -52,8 +52,8 @@ fun main() {
|
|||||||
val consts = BroadcastDoubleTensorAlgebra.fromArray(
|
val consts = BroadcastDoubleTensorAlgebra.fromArray(
|
||||||
ShapeND(intArrayOf(1, 1)), doubleArrayOf(0.0)
|
ShapeND(intArrayOf(1, 1)), doubleArrayOf(0.0)
|
||||||
).as2D()
|
).as2D()
|
||||||
val opts = doubleArrayOf(3.0, 10000.0, 1e-2, 0.015, 1e-2, 1e-2, 1e-2, 11.0, 9.0, 1.0)
|
val opts = doubleArrayOf(3.0, 10000.0, 1e-6, 1e-6, 1e-6, 1e-6, 1e-2, 11.0, 9.0, 1.0)
|
||||||
// val opts = doubleArrayOf(3.0, 10000.0, 1e-5, 1e-5, 1e-5, 1e-5, 1e-3, 11.0, 9.0, 1.0)
|
// val opts = doubleArrayOf(3.0, 10000.0, 1e-6, 1e-6, 1e-6, 1e-6, 1e-3, 11.0, 9.0, 1.0)
|
||||||
|
|
||||||
val result = DoubleTensorAlgebra.lm(
|
val result = DoubleTensorAlgebra.lm(
|
||||||
::funcDifficultForLm,
|
::funcDifficultForLm,
|
||||||
|
@ -12,6 +12,7 @@ import space.kscience.kmath.tensors.LevenbergMarquardt.funcMiddleForLm
|
|||||||
import space.kscience.kmath.tensors.core.BroadcastDoubleTensorAlgebra
|
import space.kscience.kmath.tensors.core.BroadcastDoubleTensorAlgebra
|
||||||
import space.kscience.kmath.tensors.core.BroadcastDoubleTensorAlgebra.div
|
import space.kscience.kmath.tensors.core.BroadcastDoubleTensorAlgebra.div
|
||||||
import space.kscience.kmath.tensors.core.DoubleTensorAlgebra
|
import space.kscience.kmath.tensors.core.DoubleTensorAlgebra
|
||||||
|
import space.kscience.kmath.tensors.core.DoubleTensorAlgebra.Companion.times
|
||||||
import space.kscience.kmath.tensors.core.internal.LMSettings
|
import space.kscience.kmath.tensors.core.internal.LMSettings
|
||||||
import kotlin.math.roundToInt
|
import kotlin.math.roundToInt
|
||||||
fun main() {
|
fun main() {
|
||||||
@ -52,7 +53,7 @@ fun main() {
|
|||||||
val consts = BroadcastDoubleTensorAlgebra.fromArray(
|
val consts = BroadcastDoubleTensorAlgebra.fromArray(
|
||||||
ShapeND(intArrayOf(1, 1)), doubleArrayOf(0.0)
|
ShapeND(intArrayOf(1, 1)), doubleArrayOf(0.0)
|
||||||
).as2D()
|
).as2D()
|
||||||
val opts = doubleArrayOf(3.0, 10000.0, 1e-5, 1e-5, 1e-5, 1e-5, 1e-5, 11.0, 9.0, 1.0)
|
val opts = doubleArrayOf(3.0, 10000.0, 1e-3, 1e-3, 1e-3, 1e-3, 1e-15, 11.0, 9.0, 1.0)
|
||||||
|
|
||||||
val result = DoubleTensorAlgebra.lm(
|
val result = DoubleTensorAlgebra.lm(
|
||||||
::funcMiddleForLm,
|
::funcMiddleForLm,
|
||||||
@ -76,7 +77,7 @@ fun main() {
|
|||||||
}
|
}
|
||||||
println()
|
println()
|
||||||
|
|
||||||
println("Y true and y received:")
|
|
||||||
var y_hat_after = funcMiddleForLm(t_example, result.result_parameters, settings)
|
var y_hat_after = funcMiddleForLm(t_example, result.result_parameters, settings)
|
||||||
for (i in 0 until y_hat.shape.component1()) {
|
for (i in 0 until y_hat.shape.component1()) {
|
||||||
val x = (y_hat[i, 0] * 10000).roundToInt() / 10000.0
|
val x = (y_hat[i, 0] * 10000).roundToInt() / 10000.0
|
||||||
|
@ -6,20 +6,27 @@
|
|||||||
package space.kscience.kmath.tensors.LevenbergMarquardt.StreamingLm
|
package space.kscience.kmath.tensors.LevenbergMarquardt.StreamingLm
|
||||||
|
|
||||||
import space.kscience.kmath.nd.*
|
import space.kscience.kmath.nd.*
|
||||||
import space.kscience.kmath.tensors.LevenbergMarquardt.funcEasyForLm
|
import space.kscience.kmath.tensors.LevenbergMarquardt.*
|
||||||
import space.kscience.kmath.tensors.LevenbergMarquardt.getStartDataForFuncEasy
|
|
||||||
import kotlin.math.roundToInt
|
import kotlin.math.roundToInt
|
||||||
|
|
||||||
suspend fun main(){
|
suspend fun main(){
|
||||||
val startData = getStartDataForFuncEasy()
|
val startData = getStartDataForFuncDifficult()
|
||||||
// Создание потока:
|
// Создание потока:
|
||||||
val lmFlow = streamLm(::funcEasyForLm, startData, 1000, 10)
|
val lmFlow = streamLm(::funcDifficultForLm, startData, 0, 100)
|
||||||
|
var initialTime = System.currentTimeMillis()
|
||||||
|
var lastTime: Long
|
||||||
|
val launches = mutableListOf<Long>()
|
||||||
// Запуск потока
|
// Запуск потока
|
||||||
lmFlow.collect { parameters ->
|
lmFlow.collect { parameters ->
|
||||||
|
lastTime = System.currentTimeMillis()
|
||||||
|
launches.add(lastTime - initialTime)
|
||||||
|
initialTime = lastTime
|
||||||
for (i in 0 until parameters.shape.component1()) {
|
for (i in 0 until parameters.shape.component1()) {
|
||||||
val x = (parameters[i, 0] * 10000).roundToInt() / 10000.0
|
val x = (parameters[i, 0] * 10000).roundToInt() / 10000.0
|
||||||
print("$x ")
|
print("$x ")
|
||||||
if (i == parameters.shape.component1() - 1) println()
|
if (i == parameters.shape.component1() - 1) println()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
println("Average without first is: ${launches.subList(1, launches.size - 1).average()}")
|
||||||
}
|
}
|
@ -10,6 +10,7 @@ import space.kscience.kmath.nd.ShapeND
|
|||||||
import space.kscience.kmath.nd.as2D
|
import space.kscience.kmath.nd.as2D
|
||||||
import space.kscience.kmath.nd.component1
|
import space.kscience.kmath.nd.component1
|
||||||
import space.kscience.kmath.tensors.core.BroadcastDoubleTensorAlgebra
|
import space.kscience.kmath.tensors.core.BroadcastDoubleTensorAlgebra
|
||||||
|
import space.kscience.kmath.tensors.core.BroadcastDoubleTensorAlgebra.div
|
||||||
import space.kscience.kmath.tensors.core.DoubleTensorAlgebra
|
import space.kscience.kmath.tensors.core.DoubleTensorAlgebra
|
||||||
import space.kscience.kmath.tensors.core.DoubleTensorAlgebra.Companion.max
|
import space.kscience.kmath.tensors.core.DoubleTensorAlgebra.Companion.max
|
||||||
import space.kscience.kmath.tensors.core.DoubleTensorAlgebra.Companion.plus
|
import space.kscience.kmath.tensors.core.DoubleTensorAlgebra.Companion.plus
|
||||||
@ -17,6 +18,7 @@ import space.kscience.kmath.tensors.core.DoubleTensorAlgebra.Companion.pow
|
|||||||
import space.kscience.kmath.tensors.core.DoubleTensorAlgebra.Companion.times
|
import space.kscience.kmath.tensors.core.DoubleTensorAlgebra.Companion.times
|
||||||
import space.kscience.kmath.tensors.core.asDoubleTensor
|
import space.kscience.kmath.tensors.core.asDoubleTensor
|
||||||
import space.kscience.kmath.tensors.core.internal.LMSettings
|
import space.kscience.kmath.tensors.core.internal.LMSettings
|
||||||
|
import kotlin.math.roundToInt
|
||||||
|
|
||||||
public data class StartDataLm (
|
public data class StartDataLm (
|
||||||
var lm_matx_y_dat: MutableStructure2D<Double>,
|
var lm_matx_y_dat: MutableStructure2D<Double>,
|
||||||
@ -88,6 +90,91 @@ fun funcEasyForLm(t: MutableStructure2D<Double>, p: MutableStructure2D<Double>,
|
|||||||
return y_hat.as2D()
|
return y_hat.as2D()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fun getStartDataForFuncDifficult(): StartDataLm {
|
||||||
|
val NData = 200
|
||||||
|
var t_example = DoubleTensorAlgebra.ones(ShapeND(intArrayOf(NData, 1))).as2D()
|
||||||
|
for (i in 0 until NData) {
|
||||||
|
t_example[i, 0] = t_example[i, 0] * (i + 1) - 104
|
||||||
|
}
|
||||||
|
|
||||||
|
val Nparams = 15
|
||||||
|
var p_example = DoubleTensorAlgebra.ones(ShapeND(intArrayOf(Nparams, 1))).as2D()
|
||||||
|
for (i in 0 until Nparams) {
|
||||||
|
p_example[i, 0] = p_example[i, 0] + i - 25
|
||||||
|
}
|
||||||
|
|
||||||
|
val settings = LMSettings(0, 0, 1)
|
||||||
|
|
||||||
|
var y_hat = funcDifficultForLm(t_example, p_example, settings)
|
||||||
|
|
||||||
|
var p_init = DoubleTensorAlgebra.zeros(ShapeND(intArrayOf(Nparams, 1))).as2D()
|
||||||
|
for (i in 0 until Nparams) {
|
||||||
|
p_init[i, 0] = (p_example[i, 0] + 0.9)
|
||||||
|
}
|
||||||
|
|
||||||
|
var t = t_example
|
||||||
|
val y_dat = y_hat
|
||||||
|
val weight = BroadcastDoubleTensorAlgebra.fromArray(
|
||||||
|
ShapeND(intArrayOf(1, 1)), DoubleArray(1) { 1.0 / Nparams * 1.0 - 0.085 }
|
||||||
|
).as2D()
|
||||||
|
val dp = BroadcastDoubleTensorAlgebra.fromArray(
|
||||||
|
ShapeND(intArrayOf(1, 1)), DoubleArray(1) { -0.01 }
|
||||||
|
).as2D()
|
||||||
|
var p_min = DoubleTensorAlgebra.ones(ShapeND(intArrayOf(Nparams, 1)))
|
||||||
|
p_min = p_min.div(1.0 / -50.0)
|
||||||
|
val p_max = DoubleTensorAlgebra.ones(ShapeND(intArrayOf(Nparams, 1)))
|
||||||
|
p_min = p_min.div(1.0 / 50.0)
|
||||||
|
val consts = BroadcastDoubleTensorAlgebra.fromArray(
|
||||||
|
ShapeND(intArrayOf(1, 1)), doubleArrayOf(0.0)
|
||||||
|
).as2D()
|
||||||
|
val opts = doubleArrayOf(3.0, 10000.0, 1e-2, 1e-3, 1e-2, 1e-2, 1e-2, 11.0, 9.0, 1.0)
|
||||||
|
|
||||||
|
return StartDataLm(y_dat, 1, p_init, t, y_dat, weight, dp, p_min.as2D(), p_max.as2D(), consts, opts)
|
||||||
|
}
|
||||||
|
|
||||||
|
fun getStartDataForFuncMiddle(): StartDataLm {
|
||||||
|
val NData = 100
|
||||||
|
var t_example = DoubleTensorAlgebra.ones(ShapeND(intArrayOf(NData, 1))).as2D()
|
||||||
|
for (i in 0 until NData) {
|
||||||
|
t_example[i, 0] = t_example[i, 0] * (i + 1)
|
||||||
|
}
|
||||||
|
|
||||||
|
val Nparams = 20
|
||||||
|
var p_example = DoubleTensorAlgebra.ones(ShapeND(intArrayOf(Nparams, 1))).as2D()
|
||||||
|
for (i in 0 until Nparams) {
|
||||||
|
p_example[i, 0] = p_example[i, 0] + i - 25
|
||||||
|
}
|
||||||
|
|
||||||
|
val settings = LMSettings(0, 0, 1)
|
||||||
|
|
||||||
|
var y_hat = funcMiddleForLm(t_example, p_example, settings)
|
||||||
|
|
||||||
|
var p_init = DoubleTensorAlgebra.zeros(ShapeND(intArrayOf(Nparams, 1))).as2D()
|
||||||
|
for (i in 0 until Nparams) {
|
||||||
|
p_init[i, 0] = (p_example[i, 0] + 10.0)
|
||||||
|
}
|
||||||
|
var t = t_example
|
||||||
|
val y_dat = y_hat
|
||||||
|
val weight = BroadcastDoubleTensorAlgebra.fromArray(
|
||||||
|
ShapeND(intArrayOf(1, 1)), DoubleArray(1) { 1.0 }
|
||||||
|
).as2D()
|
||||||
|
val dp = BroadcastDoubleTensorAlgebra.fromArray(
|
||||||
|
ShapeND(intArrayOf(1, 1)), DoubleArray(1) { -0.01 }
|
||||||
|
).as2D()
|
||||||
|
var p_min = DoubleTensorAlgebra.ones(ShapeND(intArrayOf(Nparams, 1)))
|
||||||
|
p_min = p_min.div(1.0 / -50.0)
|
||||||
|
val p_max = DoubleTensorAlgebra.ones(ShapeND(intArrayOf(Nparams, 1)))
|
||||||
|
p_min = p_min.div(1.0 / 50.0)
|
||||||
|
val consts = BroadcastDoubleTensorAlgebra.fromArray(
|
||||||
|
ShapeND(intArrayOf(1, 1)), doubleArrayOf(0.0)
|
||||||
|
).as2D()
|
||||||
|
val opts = doubleArrayOf(3.0, 10000.0, 1e-5, 1e-5, 1e-5, 1e-5, 1e-5, 11.0, 9.0, 1.0)
|
||||||
|
|
||||||
|
var example_number = 1
|
||||||
|
|
||||||
|
return StartDataLm(y_dat, example_number, p_init, t, y_dat, weight, dp, p_min.as2D(), p_max.as2D(), consts, opts)
|
||||||
|
}
|
||||||
|
|
||||||
fun getStartDataForFuncEasy(): StartDataLm {
|
fun getStartDataForFuncEasy(): StartDataLm {
|
||||||
val lm_matx_y_dat = doubleArrayOf(
|
val lm_matx_y_dat = doubleArrayOf(
|
||||||
19.6594, 18.6096, 17.6792, 17.2747, 16.3065, 17.1458, 16.0467, 16.7023, 15.7809, 15.9807,
|
19.6594, 18.6096, 17.6792, 17.2747, 16.3065, 17.1458, 16.0467, 16.7023, 15.7809, 15.9807,
|
||||||
|
@ -7,7 +7,7 @@ kscience{
|
|||||||
js {
|
js {
|
||||||
browser {
|
browser {
|
||||||
testTask {
|
testTask {
|
||||||
useMocha().timeout = "100000"
|
useMocha().timeout = "0"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -245,7 +245,7 @@ class TestLmAlgorithm {
|
|||||||
val consts = BroadcastDoubleTensorAlgebra.fromArray(
|
val consts = BroadcastDoubleTensorAlgebra.fromArray(
|
||||||
ShapeND(intArrayOf(1, 1)), doubleArrayOf(0.0)
|
ShapeND(intArrayOf(1, 1)), doubleArrayOf(0.0)
|
||||||
).as2D()
|
).as2D()
|
||||||
val opts = doubleArrayOf(3.0, 7000.0, 1e-2, 1e-1, 1e-2, 1e-2, 1e-2, 11.0, 9.0, 1.0)
|
val opts = doubleArrayOf(3.0, 7000.0, 1e-2, 1e-3, 1e-2, 1e-2, 1e-2, 11.0, 9.0, 1.0)
|
||||||
|
|
||||||
val result = DoubleTensorAlgebra.lm(
|
val result = DoubleTensorAlgebra.lm(
|
||||||
::funcDifficultForLm,
|
::funcDifficultForLm,
|
||||||
@ -261,7 +261,5 @@ class TestLmAlgorithm {
|
|||||||
10,
|
10,
|
||||||
1
|
1
|
||||||
)
|
)
|
||||||
|
|
||||||
// assertEquals(1.15, (result.result_chi_sq * 1e2).roundToLong() / 1e2)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
Loading…
Reference in New Issue
Block a user