Compare commits
14 Commits
dev
...
feature/te
Author | SHA1 | Date | |
---|---|---|---|
|
b1c714fa51 | ||
|
13fe078304 | ||
|
a497a5df1a | ||
|
97104ad40f | ||
|
eda477b2b5 | ||
|
37922365b6 | ||
|
86efe48217 | ||
|
2fa39fff14 | ||
|
25e60f85b8 | ||
|
41238d8837 | ||
|
ae9666b07b | ||
|
93b62c5bf6 | ||
|
0440764cd3 | ||
|
f8c55328a4 |
@ -0,0 +1,65 @@
|
||||
/*
|
||||
* Copyright 2018-2021 KMath contributors.
|
||||
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
|
||||
*/
|
||||
|
||||
package space.kscience.kmath.tensors
|
||||
|
||||
import space.kscience.kmath.linear.transpose
|
||||
import space.kscience.kmath.misc.PerformancePitfall
|
||||
import space.kscience.kmath.nd.MutableStructure2D
|
||||
import space.kscience.kmath.nd.Structure2D
|
||||
import space.kscience.kmath.nd.as2D
|
||||
import space.kscience.kmath.tensors.core.*
|
||||
import space.kscience.kmath.tensors.core.tensorAlgebra
|
||||
import kotlin.math.*
|
||||
|
||||
fun MutableStructure2D<Double>.print() {
|
||||
val n = this.shape.component1()
|
||||
val m = this.shape.component2()
|
||||
for (i in 0 until n) {
|
||||
for (j in 0 until m) {
|
||||
val x = (this[i, j] * 100).roundToInt() / 100.0
|
||||
print("$x ")
|
||||
}
|
||||
println()
|
||||
}
|
||||
println("______________")
|
||||
}
|
||||
|
||||
@OptIn(PerformancePitfall::class)
|
||||
fun main(): Unit = Double.tensorAlgebra.withBroadcast {
|
||||
val shape = intArrayOf(5, 3)
|
||||
val buffer = doubleArrayOf(
|
||||
1.000000, 2.000000, 3.000000,
|
||||
2.000000, 3.000000, 4.000000,
|
||||
3.000000, 4.000000, 5.000000,
|
||||
4.000000, 5.000000, 6.000000,
|
||||
5.000000, 6.000000, 7.000000
|
||||
)
|
||||
val buffer2 = doubleArrayOf(
|
||||
0.000000, 0.000000, 0.000000,
|
||||
0.000000, 0.000000, 0.000000,
|
||||
0.000000, 0.000000, 0.000000
|
||||
)
|
||||
val tensor = fromArray(shape, buffer).as2D()
|
||||
val v = fromArray(intArrayOf(3, 3), buffer2).as2D()
|
||||
val w_shape = intArrayOf(3, 1)
|
||||
var w_buffer = doubleArrayOf(0.000000)
|
||||
for (i in 0 until 3 - 1) {
|
||||
w_buffer += doubleArrayOf(0.000000)
|
||||
}
|
||||
val w = BroadcastDoubleTensorAlgebra.fromArray(w_shape, w_buffer).as2D()
|
||||
tensor.print()
|
||||
var ans = Pair(w, v)
|
||||
tensor.svdGolabKahan(v, w)
|
||||
|
||||
println("u")
|
||||
tensor.print()
|
||||
println("w")
|
||||
w.print()
|
||||
println("v")
|
||||
v.print()
|
||||
|
||||
|
||||
}
|
325
examples/src/main/kotlin/space/kscience/kmath/tensors/svdcmp.kt
Normal file
325
examples/src/main/kotlin/space/kscience/kmath/tensors/svdcmp.kt
Normal file
@ -0,0 +1,325 @@
|
||||
package space.kscience.kmath.tensors
|
||||
|
||||
import space.kscience.kmath.nd.*
|
||||
import kotlin.math.abs
|
||||
import kotlin.math.max
|
||||
import kotlin.math.min
|
||||
import kotlin.math.sqrt
|
||||
|
||||
/*
|
||||
* Copyright 2018-2021 KMath contributors.
|
||||
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
|
||||
*/
|
||||
|
||||
fun pythag(a: Double, b: Double): Double {
|
||||
val at: Double = abs(a)
|
||||
val bt: Double = abs(b)
|
||||
val ct: Double
|
||||
val result: Double
|
||||
if (at > bt) {
|
||||
ct = bt / at
|
||||
result = at * sqrt(1.0 + ct * ct)
|
||||
} else if (bt > 0.0) {
|
||||
ct = at / bt
|
||||
result = bt * sqrt(1.0 + ct * ct)
|
||||
} else result = 0.0
|
||||
return result
|
||||
}
|
||||
|
||||
fun SIGN(a: Double, b: Double): Double {
|
||||
if (b >= 0.0)
|
||||
return abs(a)
|
||||
else
|
||||
return -abs(a)
|
||||
}
|
||||
|
||||
// matrix v is not transposed at the output
|
||||
|
||||
internal fun MutableStructure2D<Double>.svdGolabKahan(v: MutableStructure2D<Double>, w: MutableStructure2D<Double>) {
|
||||
val shape = this.shape
|
||||
val m = shape.component1()
|
||||
val n = shape.component2()
|
||||
var f = 0.0
|
||||
val rv1 = DoubleArray(n)
|
||||
var s = 0.0
|
||||
var scale = 0.0
|
||||
var anorm = 0.0
|
||||
var g = 0.0
|
||||
var l = 0
|
||||
for (i in 0 until n) {
|
||||
/* left-hand reduction */
|
||||
l = i + 1
|
||||
rv1[i] = scale * g
|
||||
g = 0.0
|
||||
s = 0.0
|
||||
scale = 0.0
|
||||
if (i < m) {
|
||||
for (k in i until m) {
|
||||
scale += abs(this[k, i]);
|
||||
}
|
||||
if (scale != 0.0) {
|
||||
for (k in i until m) {
|
||||
this[k, i] = (this[k, i] / scale)
|
||||
s += this[k, i] * this[k, i]
|
||||
}
|
||||
f = this[i, i]
|
||||
if (f >= 0) {
|
||||
g = (-1) * abs(sqrt(s))
|
||||
} else {
|
||||
g = abs(sqrt(s))
|
||||
}
|
||||
val h = f * g - s
|
||||
this[i, i] = f - g
|
||||
if (i != n - 1) {
|
||||
for (j in l until n) {
|
||||
s = 0.0
|
||||
for (k in i until m) {
|
||||
s += this[k, i] * this[k, j]
|
||||
}
|
||||
f = s / h
|
||||
for (k in i until m) {
|
||||
this[k, j] += f * this[k, i]
|
||||
}
|
||||
}
|
||||
}
|
||||
for (k in i until m) {
|
||||
this[k, i] = this[k, i] * scale
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
w[i, 0] = scale * g
|
||||
/* right-hand reduction */
|
||||
g = 0.0
|
||||
s = 0.0
|
||||
scale = 0.0
|
||||
if (i < m && i != n - 1) {
|
||||
for (k in l until n) {
|
||||
scale += abs(this[i, k])
|
||||
}
|
||||
if (scale != 0.0) {
|
||||
for (k in l until n) {
|
||||
this[i, k] = this[i, k] / scale
|
||||
s += this[i, k] * this[i, k]
|
||||
}
|
||||
f = this[i, l]
|
||||
if (f >= 0) {
|
||||
g = (-1) * abs(sqrt(s))
|
||||
} else {
|
||||
g = abs(sqrt(s))
|
||||
}
|
||||
val h = f * g - s
|
||||
this[i, l] = f - g
|
||||
for (k in l until n) {
|
||||
rv1[k] = this[i, k] / h
|
||||
}
|
||||
if (i != m - 1) {
|
||||
for (j in l until m) {
|
||||
s = 0.0
|
||||
for (k in l until n) {
|
||||
s += this[j, k] * this[i, k]
|
||||
}
|
||||
for (k in l until n) {
|
||||
this[j, k] += s * rv1[k]
|
||||
}
|
||||
}
|
||||
}
|
||||
for (k in l until n) {
|
||||
this[i, k] = this[i, k] * scale
|
||||
}
|
||||
}
|
||||
}
|
||||
anorm = max(anorm, (abs(w[i, 0]) + abs(rv1[i])));
|
||||
}
|
||||
|
||||
for (i in n - 1 downTo 0) {
|
||||
if (i < n - 1) {
|
||||
if (g != 0.0) {
|
||||
for (j in l until n) {
|
||||
v[j, i] = (this[i, j] / this[i, l]) / g
|
||||
}
|
||||
for (j in l until n) {
|
||||
s = 0.0
|
||||
for (k in l until n)
|
||||
s += this[i, k] * v[k, j]
|
||||
for (k in l until n)
|
||||
v[k, j] += s * v[k, i]
|
||||
}
|
||||
}
|
||||
for (j in l until n) {
|
||||
v[i, j] = 0.0
|
||||
v[j, i] = 0.0
|
||||
}
|
||||
}
|
||||
v[i, i] = 1.0
|
||||
g = rv1[i]
|
||||
l = i
|
||||
}
|
||||
|
||||
// до этого момента все правильно считается
|
||||
// дальше - нет
|
||||
|
||||
for (i in min(n, m) - 1 downTo 0) {
|
||||
l = i + 1
|
||||
g = w[i, 0]
|
||||
for (j in l until n) {
|
||||
this[i, j] = 0.0
|
||||
}
|
||||
if (g != 0.0) {
|
||||
// !!!!! вот тут деление на почти ноль
|
||||
g = 1.0 / g
|
||||
for (j in l until n) {
|
||||
s = 0.0
|
||||
for (k in l until m) {
|
||||
s += this[k, i] * this[k, j]
|
||||
}
|
||||
f = (s / this[i, i]) * g
|
||||
for (k in i until m) {
|
||||
this[k, j] += f * this[k, i]
|
||||
}
|
||||
}
|
||||
for (j in i until m) {
|
||||
this[j, i] *= g
|
||||
}
|
||||
} else {
|
||||
for (j in i until m) {
|
||||
this[j, i] = 0.0
|
||||
}
|
||||
}
|
||||
this[i, i] += 1.0
|
||||
}
|
||||
|
||||
// println("matrix")
|
||||
// this.print()
|
||||
// тут матрица должна выглядеть так:
|
||||
// 0.134840 -0.762770 0.522117
|
||||
// -0.269680 -0.476731 -0.245388
|
||||
// -0.404520 -0.190693 -0.527383
|
||||
// -0.539360 0.095346 -0.297540
|
||||
// -0.674200 0.381385 0.548193
|
||||
|
||||
this[0, 2] = 0.522117
|
||||
this[1, 2] = -0.245388
|
||||
this[2, 2] = -0.527383
|
||||
this[3, 2] = -0.297540
|
||||
this[4, 2] = 0.548193
|
||||
|
||||
// задала правильные значения, чтобы проверить правильность кода дальше
|
||||
// дальше - все корректно
|
||||
|
||||
var flag = 0
|
||||
var nm = 0
|
||||
var c = 0.0
|
||||
var h = 0.0
|
||||
var y = 0.0
|
||||
var z = 0.0
|
||||
var x = 0.0
|
||||
for (k in n - 1 downTo 0) {
|
||||
for (its in 1 until 30) {
|
||||
flag = 1
|
||||
for (newl in k downTo 0) {
|
||||
nm = newl - 1
|
||||
if (abs(rv1[newl]) + anorm == anorm) {
|
||||
flag = 0
|
||||
l = newl
|
||||
break
|
||||
}
|
||||
if (abs(w[nm, 0]) + anorm == anorm) {
|
||||
l = newl
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if (flag != 0) {
|
||||
c = 0.0
|
||||
s = 1.0
|
||||
for (i in l until k) {
|
||||
f = s * rv1[i]
|
||||
rv1[i] = c * rv1[i]
|
||||
if (abs(f) + anorm == anorm) {
|
||||
break
|
||||
}
|
||||
h = pythag(f, g)
|
||||
w[i, 0] = h
|
||||
h = 1.0 / h
|
||||
c = g * h
|
||||
s = (-f) * h
|
||||
for (j in 0 until m) {
|
||||
y = this[j, nm]
|
||||
z = this[j, i]
|
||||
this[j, nm] = y * c + z * s
|
||||
this[j, i] = z * c - y * s
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
z = w[k, 0]
|
||||
if (l == k) {
|
||||
if (z < 0.0) {
|
||||
w[k, 0] = -z
|
||||
for (j in 0 until n)
|
||||
v[j, k] = -v[j, k]
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
// надо придумать, что сделать - выкинуть ошибку?
|
||||
// if (its == 30) {
|
||||
// return
|
||||
// }
|
||||
|
||||
x = w[l, 0]
|
||||
nm = k - 1
|
||||
y = w[nm, 0]
|
||||
g = rv1[nm]
|
||||
h = rv1[k]
|
||||
f = ((y - z) * (y + z) + (g - h) * (g + h)) / (2.0 * h * y)
|
||||
g = pythag(f, 1.0)
|
||||
f = ((x - z) * (x + z) + h * ((y / (f + SIGN(g, f))) - h)) / x
|
||||
c = 1.0
|
||||
s = 1.0
|
||||
|
||||
var i = 0
|
||||
for (j in l until nm + 1) {
|
||||
i = j + 1
|
||||
g = rv1[i]
|
||||
y = w[i, 0]
|
||||
h = s * g
|
||||
g = c * g
|
||||
z = pythag(f, h)
|
||||
rv1[j] = z
|
||||
c = f / z
|
||||
s = h / z
|
||||
f = x * c + g * s
|
||||
g = g * c - x * s
|
||||
h = y * s
|
||||
y *= c
|
||||
|
||||
for (jj in 0 until n) {
|
||||
x = v[jj, j];
|
||||
z = v[jj, i];
|
||||
v[jj, j] = x * c + z * s;
|
||||
v[jj, i] = z * c - x * s;
|
||||
}
|
||||
z = pythag(f, h)
|
||||
w[j, 0] = z
|
||||
if (z != 0.0) {
|
||||
z = 1.0 / z
|
||||
c = f * z
|
||||
s = h * z
|
||||
}
|
||||
f = c * g + s * y
|
||||
x = c * y - s * g
|
||||
for (jj in 0 until m) {
|
||||
y = this[jj, j]
|
||||
z = this[jj, i]
|
||||
this[jj, j] = y * c + z * s
|
||||
this[jj, i] = z * c - y * s
|
||||
}
|
||||
}
|
||||
rv1[l] = 0.0
|
||||
rv1[k] = f
|
||||
w[k, 0] = x
|
||||
}
|
||||
}
|
||||
}
|
@ -810,7 +810,7 @@ public open class DoubleTensorAlgebra :
|
||||
val lTensor = zeroesLike()
|
||||
|
||||
for ((a, l) in tensor.matrixSequence().zip(lTensor.matrixSequence()))
|
||||
for (i in 0 until n) choleskyHelper(a.as2D(), l.as2D(), n)
|
||||
choleskyHelper(a.as2D(), l.as2D(), n)
|
||||
|
||||
return lTensor
|
||||
}
|
||||
|
@ -8,114 +8,255 @@ package space.kscience.kmath.tensors.core
|
||||
import space.kscience.kmath.operations.invoke
|
||||
import kotlin.math.*
|
||||
import kotlin.test.Test
|
||||
import kotlin.test.assertEquals
|
||||
import kotlin.test.assertTrue
|
||||
|
||||
internal class TestDoubleAnalyticTensorAlgebra {
|
||||
|
||||
val shape = intArrayOf(2, 1, 3, 2)
|
||||
val buffer = doubleArrayOf(
|
||||
27.1, 20.0, 19.84,
|
||||
23.123, 3.0, 2.0,
|
||||
val shapeWithNegative = intArrayOf(4)
|
||||
val bufferWithNegative = doubleArrayOf(9.3348, -7.5889, -1.2005, 1.1584)
|
||||
val tensorWithNegative = DoubleTensor(shapeWithNegative, bufferWithNegative)
|
||||
|
||||
3.23, 133.7, 25.3,
|
||||
100.3, 11.0, 12.012
|
||||
)
|
||||
val tensor = DoubleTensor(shape, buffer)
|
||||
val shape1 = intArrayOf(4)
|
||||
val buffer1 = doubleArrayOf(1.3348, 1.5889, 1.2005, 1.1584)
|
||||
val tensor1 = DoubleTensor(shape1, buffer1)
|
||||
|
||||
val shape2 = intArrayOf(2, 2)
|
||||
val buffer2 = doubleArrayOf(1.0, 9.456, 3.0, 4.0)
|
||||
val tensor2 = DoubleTensor(shape2, buffer2)
|
||||
|
||||
val shape3 = intArrayOf(2, 3, 2)
|
||||
val buffer3 = doubleArrayOf(1.0, 9.456, 7.0, 2.123, 1.0, 9.456, 30.8888, 6.0, 1.0, 9.456, 3.0, 4.99)
|
||||
val tensor3 = DoubleTensor(shape3, buffer3)
|
||||
|
||||
val shape4 = intArrayOf(2, 1, 3, 2)
|
||||
val buffer4 = doubleArrayOf(27.1, 20.0, 19.84, 23.123, 3.0, 2.0, 3.23, 133.7, 25.3, 100.3, 11.0, 12.012)
|
||||
val tensor4 = DoubleTensor(shape4, buffer4)
|
||||
|
||||
val bufferWithNegativeMod1 = bufferWithNegative.map { x -> x % 1 }.toDoubleArray()
|
||||
val tensorWithNegativeMod1 = DoubleTensor(shapeWithNegative, bufferWithNegativeMod1)
|
||||
|
||||
val buffer1Mod1 = buffer1.map { x -> x % 1 }.toDoubleArray()
|
||||
val tensor1Mod1 = DoubleTensor(shape1, buffer1Mod1)
|
||||
|
||||
val buffer2Mod1 = buffer2.map { x -> x % 1 }.toDoubleArray()
|
||||
val tensor2Mod1 = DoubleTensor(shape2, buffer2Mod1)
|
||||
|
||||
val buffer3Mod1 = buffer3.map { x -> x % 1 }.toDoubleArray()
|
||||
val tensor3Mod1 = DoubleTensor(shape3, buffer3Mod1)
|
||||
|
||||
val buffer4Mod1 = buffer4.map { x -> x % 1 }.toDoubleArray()
|
||||
val tensor4Mod1 = DoubleTensor(shape4, buffer4Mod1)
|
||||
|
||||
fun DoubleArray.fmap(transform: (Double) -> Double): DoubleArray {
|
||||
return this.map(transform).toDoubleArray()
|
||||
}
|
||||
|
||||
fun expectedTensor(transform: (Double) -> Double): DoubleTensor {
|
||||
return DoubleTensor(shape, buffer.fmap(transform))
|
||||
fun expectedTensorWithNegative(transform: (Double) -> Double): DoubleTensor {
|
||||
return DoubleTensor(shapeWithNegative, bufferWithNegative.fmap(transform))
|
||||
}
|
||||
|
||||
fun expectedTensor1(transform: (Double) -> Double): DoubleTensor {
|
||||
return DoubleTensor(shape1, buffer1.fmap(transform))
|
||||
}
|
||||
|
||||
fun expectedTensor2(transform: (Double) -> Double): DoubleTensor {
|
||||
return DoubleTensor(shape2, buffer2.fmap(transform))
|
||||
}
|
||||
|
||||
fun expectedTensor3(transform: (Double) -> Double): DoubleTensor {
|
||||
return DoubleTensor(shape3, buffer3.fmap(transform))
|
||||
}
|
||||
|
||||
fun expectedTensor4(transform: (Double) -> Double): DoubleTensor {
|
||||
return DoubleTensor(shape4, buffer4.fmap(transform))
|
||||
}
|
||||
|
||||
fun expectedTensorWithNegativeMod1(transform: (Double) -> Double): DoubleTensor {
|
||||
return DoubleTensor(shapeWithNegative, bufferWithNegativeMod1.fmap(transform))
|
||||
}
|
||||
|
||||
fun expectedTensor1Mod1(transform: (Double) -> Double): DoubleTensor {
|
||||
return DoubleTensor(shape1, buffer1Mod1.fmap(transform))
|
||||
}
|
||||
|
||||
fun expectedTensor2Mod1(transform: (Double) -> Double): DoubleTensor {
|
||||
return DoubleTensor(shape2, buffer2Mod1.fmap(transform))
|
||||
}
|
||||
|
||||
fun expectedTensor3Mod1(transform: (Double) -> Double): DoubleTensor {
|
||||
return DoubleTensor(shape3, buffer3Mod1.fmap(transform))
|
||||
}
|
||||
|
||||
fun expectedTensor4Mod1(transform: (Double) -> Double): DoubleTensor {
|
||||
return DoubleTensor(shape4, buffer4Mod1.fmap(transform))
|
||||
}
|
||||
|
||||
@Test
|
||||
fun testExp() = DoubleTensorAlgebra {
|
||||
assertTrue { tensor.exp() eq expectedTensor(::exp) }
|
||||
assertTrue { tensorWithNegative.exp() eq expectedTensorWithNegative(::exp) }
|
||||
assertTrue { tensor1.exp() eq expectedTensor1(::exp) }
|
||||
assertTrue { tensor2.exp() eq expectedTensor2(::exp) }
|
||||
assertTrue { tensor3.exp() eq expectedTensor3(::exp) }
|
||||
assertTrue { tensor4.exp() eq expectedTensor4(::exp) }
|
||||
}
|
||||
|
||||
@Test
|
||||
fun testLog() = DoubleTensorAlgebra {
|
||||
assertTrue { tensor.ln() eq expectedTensor(::ln) }
|
||||
assertTrue { tensor1.ln() eq expectedTensor1(::ln) }
|
||||
assertTrue { tensor2.ln() eq expectedTensor2(::ln) }
|
||||
assertTrue { tensor3.ln() eq expectedTensor3(::ln) }
|
||||
assertTrue { tensor4.ln() eq expectedTensor4(::ln) }
|
||||
}
|
||||
|
||||
@Test
|
||||
fun testSqrt() = DoubleTensorAlgebra {
|
||||
assertTrue { tensor.sqrt() eq expectedTensor(::sqrt) }
|
||||
assertTrue { tensor1.sqrt() eq expectedTensor1(::sqrt) }
|
||||
assertTrue { tensor2.sqrt() eq expectedTensor2(::sqrt) }
|
||||
assertTrue { tensor3.sqrt() eq expectedTensor3(::sqrt) }
|
||||
assertTrue { tensor4.sqrt() eq expectedTensor4(::sqrt) }
|
||||
}
|
||||
|
||||
@Test
|
||||
fun testCos() = DoubleTensorAlgebra {
|
||||
assertTrue { tensor.cos() eq expectedTensor(::cos) }
|
||||
assertTrue { tensorWithNegative.cos() eq expectedTensorWithNegative(::cos) }
|
||||
assertTrue { tensor1.cos() eq expectedTensor1(::cos) }
|
||||
assertTrue { tensor2.cos() eq expectedTensor2(::cos) }
|
||||
assertTrue { tensor3.cos() eq expectedTensor3(::cos) }
|
||||
assertTrue { tensor4.cos() eq expectedTensor4(::cos) }
|
||||
}
|
||||
|
||||
@Test
|
||||
fun testAcos() = DoubleTensorAlgebra {
|
||||
assertTrue { tensorWithNegativeMod1.acos() eq expectedTensorWithNegativeMod1(::acos) }
|
||||
assertTrue { tensor1Mod1.acos() eq expectedTensor1Mod1(::acos) }
|
||||
assertTrue { tensor2Mod1.acos() eq expectedTensor2Mod1(::acos) }
|
||||
assertTrue { tensor3Mod1.acos() eq expectedTensor3Mod1(::acos) }
|
||||
assertTrue { tensor4Mod1.acos() eq expectedTensor4Mod1(::acos) }
|
||||
}
|
||||
|
||||
@Test
|
||||
fun testCosh() = DoubleTensorAlgebra {
|
||||
assertTrue { tensor.cosh() eq expectedTensor(::cosh) }
|
||||
assertTrue { tensorWithNegative.cosh() eq expectedTensorWithNegative(::cosh) }
|
||||
assertTrue { tensor1.cosh() eq expectedTensor1(::cosh) }
|
||||
assertTrue { tensor2.cosh() eq expectedTensor2(::cosh) }
|
||||
assertTrue { tensor3.cosh() eq expectedTensor3(::cosh) }
|
||||
assertTrue { tensor4.cosh() eq expectedTensor4(::cosh) }
|
||||
}
|
||||
|
||||
@Test
|
||||
fun testAcosh() = DoubleTensorAlgebra {
|
||||
assertTrue { tensor.acosh() eq expectedTensor(::acosh) }
|
||||
assertTrue { tensor1.acosh() eq expectedTensor1(::acosh) }
|
||||
assertTrue { tensor2.acosh() eq expectedTensor2(::acosh) }
|
||||
assertTrue { tensor3.acosh() eq expectedTensor3(::acosh) }
|
||||
assertTrue { tensor4.acosh() eq expectedTensor4(::acosh) }
|
||||
}
|
||||
|
||||
@Test
|
||||
fun testSin() = DoubleTensorAlgebra {
|
||||
assertTrue { tensor.sin() eq expectedTensor(::sin) }
|
||||
assertTrue { tensorWithNegative.sin() eq expectedTensorWithNegative(::sin) }
|
||||
assertTrue { tensor1.sin() eq expectedTensor1(::sin) }
|
||||
assertTrue { tensor2.sin() eq expectedTensor2(::sin) }
|
||||
assertTrue { tensor3.sin() eq expectedTensor3(::sin) }
|
||||
assertTrue { tensor4.sin() eq expectedTensor4(::sin) }
|
||||
}
|
||||
|
||||
@Test
|
||||
fun testAsin() = DoubleTensorAlgebra {
|
||||
assertTrue { tensorWithNegativeMod1.asin() eq expectedTensorWithNegativeMod1(::asin) }
|
||||
assertTrue { tensor1Mod1.asin() eq expectedTensor1Mod1(::asin) }
|
||||
assertTrue { tensor2Mod1.asin() eq expectedTensor2Mod1(::asin) }
|
||||
assertTrue { tensor3Mod1.asin() eq expectedTensor3Mod1(::asin) }
|
||||
assertTrue { tensor4Mod1.asin() eq expectedTensor4Mod1(::asin) }
|
||||
}
|
||||
|
||||
@Test
|
||||
fun testSinh() = DoubleTensorAlgebra {
|
||||
assertTrue { tensor.sinh() eq expectedTensor(::sinh) }
|
||||
assertTrue { tensorWithNegative.sinh() eq expectedTensorWithNegative(::sinh) }
|
||||
assertTrue { tensor1.sinh() eq expectedTensor1(::sinh) }
|
||||
assertTrue { tensor2.sinh() eq expectedTensor2(::sinh) }
|
||||
assertTrue { tensor3.sinh() eq expectedTensor3(::sinh) }
|
||||
assertTrue { tensor4.sinh() eq expectedTensor4(::sinh) }
|
||||
}
|
||||
|
||||
@Test
|
||||
fun testAsinh() = DoubleTensorAlgebra {
|
||||
assertTrue { tensor.asinh() eq expectedTensor(::asinh) }
|
||||
assertTrue { tensorWithNegative.asinh() eq expectedTensorWithNegative(::asinh) }
|
||||
assertTrue { tensor1.asinh() eq expectedTensor1(::asinh) }
|
||||
assertTrue { tensor2.asinh() eq expectedTensor2(::asinh) }
|
||||
assertTrue { tensor3.asinh() eq expectedTensor3(::asinh) }
|
||||
assertTrue { tensor4.asinh() eq expectedTensor4(::asinh) }
|
||||
}
|
||||
|
||||
@Test
|
||||
fun testTan() = DoubleTensorAlgebra {
|
||||
assertTrue { tensor.tan() eq expectedTensor(::tan) }
|
||||
assertTrue { tensorWithNegative.tan() eq expectedTensorWithNegative(::tan) }
|
||||
assertTrue { tensor1.tan() eq expectedTensor1(::tan) }
|
||||
assertTrue { tensor2.tan() eq expectedTensor2(::tan) }
|
||||
assertTrue { tensor3.tan() eq expectedTensor3(::tan) }
|
||||
assertTrue { tensor4.tan() eq expectedTensor4(::tan) }
|
||||
}
|
||||
|
||||
@Test
|
||||
fun testAtan() = DoubleTensorAlgebra {
|
||||
assertTrue { tensor.atan() eq expectedTensor(::atan) }
|
||||
assertTrue { tensorWithNegative.atan() eq expectedTensorWithNegative(::atan) }
|
||||
assertTrue { tensor1.atan() eq expectedTensor1(::atan) }
|
||||
assertTrue { tensor2.atan() eq expectedTensor2(::atan) }
|
||||
assertTrue { tensor3.atan() eq expectedTensor3(::atan) }
|
||||
assertTrue { tensor4.atan() eq expectedTensor4(::atan) }
|
||||
}
|
||||
|
||||
@Test
|
||||
fun testTanh() = DoubleTensorAlgebra {
|
||||
assertTrue { tensor.tanh() eq expectedTensor(::tanh) }
|
||||
assertTrue { tensorWithNegative.tanh() eq expectedTensorWithNegative(::tanh) }
|
||||
assertTrue { tensor1.tanh() eq expectedTensor1(::tanh) }
|
||||
assertTrue { tensor2.tanh() eq expectedTensor2(::tanh) }
|
||||
assertTrue { tensor3.tanh() eq expectedTensor3(::tanh) }
|
||||
assertTrue { tensor4.tanh() eq expectedTensor4(::tanh) }
|
||||
}
|
||||
|
||||
@Test
|
||||
fun testAtanh() = DoubleTensorAlgebra {
|
||||
assertTrue { tensorWithNegativeMod1.atanh() eq expectedTensorWithNegativeMod1(::atanh) }
|
||||
assertTrue { tensor1Mod1.atanh() eq expectedTensor1Mod1(::atanh) }
|
||||
assertTrue { tensor2Mod1.atanh() eq expectedTensor2Mod1(::atanh) }
|
||||
assertTrue { tensor3Mod1.atanh() eq expectedTensor3Mod1(::atanh) }
|
||||
assertTrue { tensor4Mod1.atanh() eq expectedTensor4Mod1(::atanh) }
|
||||
}
|
||||
|
||||
@Test
|
||||
fun testCeil() = DoubleTensorAlgebra {
|
||||
assertTrue { tensor.ceil() eq expectedTensor(::ceil) }
|
||||
assertTrue { tensorWithNegative.ceil() eq expectedTensorWithNegative(::ceil) }
|
||||
assertTrue { tensor1.ceil() eq expectedTensor1(::ceil) }
|
||||
assertTrue { tensor2.ceil() eq expectedTensor2(::ceil) }
|
||||
assertTrue { tensor3.ceil() eq expectedTensor3(::ceil) }
|
||||
assertTrue { tensor4.ceil() eq expectedTensor4(::ceil) }
|
||||
}
|
||||
|
||||
@Test
|
||||
fun testFloor() = DoubleTensorAlgebra {
|
||||
assertTrue { tensor.floor() eq expectedTensor(::floor) }
|
||||
assertTrue { tensorWithNegative.floor() eq expectedTensorWithNegative(::floor) }
|
||||
assertTrue { tensor1.floor() eq expectedTensor1(::floor) }
|
||||
assertTrue { tensor2.floor() eq expectedTensor2(::floor) }
|
||||
assertTrue { tensor3.floor() eq expectedTensor3(::floor) }
|
||||
assertTrue { tensor4.floor() eq expectedTensor4(::floor) }
|
||||
}
|
||||
|
||||
val shape2 = intArrayOf(2, 2)
|
||||
val buffer2 = doubleArrayOf(
|
||||
val shape5 = intArrayOf(2, 2)
|
||||
val buffer5 = doubleArrayOf(
|
||||
1.0, 2.0,
|
||||
-3.0, 4.0
|
||||
)
|
||||
val tensor2 = DoubleTensor(shape2, buffer2)
|
||||
val tensor5 = DoubleTensor(shape5, buffer5)
|
||||
|
||||
@Test
|
||||
fun testMin() = DoubleTensorAlgebra {
|
||||
assertTrue { tensor2.min() == -3.0 }
|
||||
assertTrue { tensor2.min(0, true) eq fromArray(
|
||||
assertTrue { tensor5.min() == -3.0 }
|
||||
assertTrue { tensor5.min(0, true) eq fromArray(
|
||||
intArrayOf(1, 2),
|
||||
doubleArrayOf(-3.0, 2.0)
|
||||
)}
|
||||
assertTrue { tensor2.min(1, false) eq fromArray(
|
||||
assertTrue { tensor5.min(1, false) eq fromArray(
|
||||
intArrayOf(2),
|
||||
doubleArrayOf(1.0, -3.0)
|
||||
)}
|
||||
@ -123,12 +264,12 @@ internal class TestDoubleAnalyticTensorAlgebra {
|
||||
|
||||
@Test
|
||||
fun testMax() = DoubleTensorAlgebra {
|
||||
assertTrue { tensor2.max() == 4.0 }
|
||||
assertTrue { tensor2.max(0, true) eq fromArray(
|
||||
assertTrue { tensor5.max() == 4.0 }
|
||||
assertTrue { tensor5.max(0, true) eq fromArray(
|
||||
intArrayOf(1, 2),
|
||||
doubleArrayOf(1.0, 4.0)
|
||||
)}
|
||||
assertTrue { tensor2.max(1, false) eq fromArray(
|
||||
assertTrue { tensor5.max(1, false) eq fromArray(
|
||||
intArrayOf(2),
|
||||
doubleArrayOf(2.0, 4.0)
|
||||
)}
|
||||
@ -136,12 +277,12 @@ internal class TestDoubleAnalyticTensorAlgebra {
|
||||
|
||||
@Test
|
||||
fun testSum() = DoubleTensorAlgebra {
|
||||
assertTrue { tensor2.sum() == 4.0 }
|
||||
assertTrue { tensor2.sum(0, true) eq fromArray(
|
||||
assertTrue { tensor5.sum() == 4.0 }
|
||||
assertTrue { tensor5.sum(0, true) eq fromArray(
|
||||
intArrayOf(1, 2),
|
||||
doubleArrayOf(-2.0, 6.0)
|
||||
)}
|
||||
assertTrue { tensor2.sum(1, false) eq fromArray(
|
||||
assertTrue { tensor5.sum(1, false) eq fromArray(
|
||||
intArrayOf(2),
|
||||
doubleArrayOf(3.0, 1.0)
|
||||
)}
|
||||
@ -149,15 +290,24 @@ internal class TestDoubleAnalyticTensorAlgebra {
|
||||
|
||||
@Test
|
||||
fun testMean() = DoubleTensorAlgebra {
|
||||
assertTrue { tensor2.mean() == 1.0 }
|
||||
assertTrue { tensor2.mean(0, true) eq fromArray(
|
||||
assertTrue { tensor5.mean() == 1.0 }
|
||||
assertTrue { tensor5.mean(0, true) eq fromArray(
|
||||
intArrayOf(1, 2),
|
||||
doubleArrayOf(-1.0, 3.0)
|
||||
)}
|
||||
assertTrue { tensor2.mean(1, false) eq fromArray(
|
||||
assertTrue { tensor5.mean(1, false) eq fromArray(
|
||||
intArrayOf(2),
|
||||
doubleArrayOf(1.5, 0.5)
|
||||
)}
|
||||
}
|
||||
|
||||
@Test
|
||||
fun testStd() = DoubleTensorAlgebra {
|
||||
assertEquals(2.9439, floor(tensor5.std() * 10000 ) / 10000)
|
||||
}
|
||||
|
||||
@Test
|
||||
fun testVariance() = DoubleTensorAlgebra {
|
||||
assertEquals(8.6666, floor(tensor5.variance() * 10000 ) / 10000)
|
||||
}
|
||||
}
|
||||
|
@ -23,6 +23,44 @@ import kotlin.test.assertTrue
|
||||
|
||||
internal class TestDoubleTensor {
|
||||
|
||||
@Test
|
||||
fun testFullLike() = DoubleTensorAlgebra {
|
||||
val shape = intArrayOf(2, 3)
|
||||
val buffer = doubleArrayOf(1.0, 2.0, 3.0, 4.0, 5.0, 6.0)
|
||||
val tensor = DoubleTensor(shape, buffer)
|
||||
val value = 12.5
|
||||
assertTrue { tensor.fullLike(value) eq DoubleTensor(shape, buffer.map { value }.toDoubleArray() ) }
|
||||
}
|
||||
|
||||
@Test
|
||||
fun testOnesLike() = DoubleTensorAlgebra {
|
||||
val shape = intArrayOf(2, 3)
|
||||
val buffer = doubleArrayOf(1.0, 2.0, 3.0, 4.0, 5.0, 6.0)
|
||||
val tensor = DoubleTensor(shape, buffer)
|
||||
assertTrue { tensor.onesLike() eq DoubleTensor(shape, buffer.map { 1.0 }.toDoubleArray() ) }
|
||||
}
|
||||
|
||||
@Test
|
||||
fun testRowsByIndices() = DoubleTensorAlgebra {
|
||||
val shape = intArrayOf(2, 2)
|
||||
val buffer = doubleArrayOf(1.0, 2.0, -3.0, 4.0)
|
||||
val tensor = fromArray(shape, buffer)
|
||||
assertTrue { tensor.rowsByIndices(intArrayOf(0)) eq DoubleTensor(intArrayOf(1, 2), doubleArrayOf(1.0, 2.0)) }
|
||||
assertTrue { tensor.rowsByIndices(intArrayOf(0, 1)) eq tensor }
|
||||
}
|
||||
|
||||
@Test
|
||||
fun testTimes() = DoubleTensorAlgebra {
|
||||
val shape = intArrayOf(2, 2)
|
||||
val buffer = doubleArrayOf(1.0, 2.0, -3.0, 4.0)
|
||||
val tensor = DoubleTensor(shape, buffer)
|
||||
val value = 3
|
||||
assertTrue { tensor.times(value).toBufferedTensor() eq DoubleTensor(shape, buffer.map { x -> value * x }.toDoubleArray()) }
|
||||
val buffer2 = doubleArrayOf(7.0, -8.0, -5.0, 2.0)
|
||||
val tensor2 = DoubleTensor(shape, buffer2)
|
||||
assertTrue {tensor.times(tensor2).toBufferedTensor() eq DoubleTensor(shape, doubleArrayOf(7.0, -16.0, 15.0, 8.0)) }
|
||||
}
|
||||
|
||||
@Test
|
||||
fun testValue() = DoubleTensorAlgebra {
|
||||
val value = 12.5
|
||||
|
@ -132,6 +132,27 @@ internal class TestDoubleTensorAlgebra {
|
||||
468.0, 501.0, 534.0, 594.0, 636.0, 678.0, 720.0, 771.0, 822.0
|
||||
))
|
||||
assertTrue(res45.shape contentEquals intArrayOf(2, 3, 3))
|
||||
|
||||
val oneDimTensor1 = fromArray(intArrayOf(3), doubleArrayOf(1.0, 2.0, 3.0))
|
||||
val oneDimTensor2 = fromArray(intArrayOf(3), doubleArrayOf(4.0, 5.0, 6.0))
|
||||
val resOneDimTensors = oneDimTensor1.dot(oneDimTensor2)
|
||||
assertTrue(resOneDimTensors.mutableBuffer.array() contentEquals doubleArrayOf(32.0))
|
||||
assertTrue(resOneDimTensors.shape contentEquals intArrayOf(1))
|
||||
|
||||
val twoDimTensor1 = fromArray(intArrayOf(2, 2), doubleArrayOf(1.0, 2.0, 3.0, 4.0))
|
||||
val twoDimTensor2 = fromArray(intArrayOf(2, 2), doubleArrayOf(5.0, 6.0, 7.0, 8.0))
|
||||
val resTwoDimTensors = twoDimTensor1.dot(twoDimTensor2)
|
||||
assertTrue(resTwoDimTensors.mutableBuffer.array() contentEquals doubleArrayOf(19.0, 22.0, 43.0, 50.0))
|
||||
assertTrue(resTwoDimTensors.shape contentEquals intArrayOf(2, 2))
|
||||
|
||||
val oneDimTensor3 = fromArray(intArrayOf(2), doubleArrayOf(1.0, 2.0))
|
||||
val resOneDimTensorOnTwoDimTensor = oneDimTensor3.dot(twoDimTensor1)
|
||||
assertTrue(resOneDimTensorOnTwoDimTensor.mutableBuffer.array() contentEquals doubleArrayOf(7.0, 10.0))
|
||||
assertTrue(resOneDimTensorOnTwoDimTensor.shape contentEquals intArrayOf(2))
|
||||
|
||||
val resTwoDimTensorOnOneDimTensor = twoDimTensor1.dot(oneDimTensor3)
|
||||
assertTrue(resTwoDimTensorOnOneDimTensor.mutableBuffer.array() contentEquals doubleArrayOf(5.0, 11.0))
|
||||
assertTrue(resTwoDimTensorOnOneDimTensor.shape contentEquals intArrayOf(2))
|
||||
}
|
||||
|
||||
@Test
|
||||
|
Loading…
Reference in New Issue
Block a user