From f8c55328a485cf7ddb74cffb814b7f5ffaf3f0ea Mon Sep 17 00:00:00 2001 From: Margarita Date: Tue, 22 Mar 2022 21:12:39 +0300 Subject: [PATCH 01/59] added more examples for tensors, added tests for acos, asin, atanh --- .../core/TestDoubleAnalyticTensorAlgebra.kt | 219 ++++++++++++++---- 1 file changed, 179 insertions(+), 40 deletions(-) diff --git a/kmath-tensors/src/commonTest/kotlin/space/kscience/kmath/tensors/core/TestDoubleAnalyticTensorAlgebra.kt b/kmath-tensors/src/commonTest/kotlin/space/kscience/kmath/tensors/core/TestDoubleAnalyticTensorAlgebra.kt index 1e21379b4..b9f4ebd96 100644 --- a/kmath-tensors/src/commonTest/kotlin/space/kscience/kmath/tensors/core/TestDoubleAnalyticTensorAlgebra.kt +++ b/kmath-tensors/src/commonTest/kotlin/space/kscience/kmath/tensors/core/TestDoubleAnalyticTensorAlgebra.kt @@ -12,110 +12,250 @@ import kotlin.test.assertTrue internal class TestDoubleAnalyticTensorAlgebra { - val shape = intArrayOf(2, 1, 3, 2) - val buffer = doubleArrayOf( - 27.1, 20.0, 19.84, - 23.123, 3.0, 2.0, + val shapeWithNegative = intArrayOf(4) + val bufferWithNegative = doubleArrayOf(9.3348, -7.5889, -1.2005, 1.1584) + val tensorWithNegative = DoubleTensor(shapeWithNegative, bufferWithNegative) - 3.23, 133.7, 25.3, - 100.3, 11.0, 12.012 - ) - val tensor = DoubleTensor(shape, buffer) + val shape1 = intArrayOf(4) + val buffer1 = doubleArrayOf(1.3348, 1.5889, 1.2005, 1.1584) + val tensor1 = DoubleTensor(shape1, buffer1) + + val shape2 = intArrayOf(2, 2) + val buffer2 = doubleArrayOf(1.0, 9.456, 3.0, 4.0) + val tensor2 = DoubleTensor(shape2, buffer2) + + val shape3 = intArrayOf(2, 3, 2) + val buffer3 = doubleArrayOf(1.0, 9.456, 7.0, 2.123, 1.0, 9.456, 30.8888, 6.0, 1.0, 9.456, 3.0, 4.99) + val tensor3 = DoubleTensor(shape3, buffer3) + + val shape4 = intArrayOf(2, 1, 3, 2) + val buffer4 = doubleArrayOf(27.1, 20.0, 19.84, 23.123, 3.0, 2.0, 3.23, 133.7, 25.3, 100.3, 11.0, 12.012) + val tensor4 = DoubleTensor(shape4, buffer4) + + val bufferWithNegativeMod1 = bufferWithNegative.map { x -> x % 1 }.toDoubleArray() + val tensorWithNegativeMod1 = DoubleTensor(shapeWithNegative, bufferWithNegativeMod1) + + val buffer1Mod1 = buffer1.map { x -> x % 1 }.toDoubleArray() + val tensor1Mod1 = DoubleTensor(shape1, buffer1Mod1) + + val buffer2Mod1 = buffer2.map { x -> x % 1 }.toDoubleArray() + val tensor2Mod1 = DoubleTensor(shape2, buffer2Mod1) + + val buffer3Mod1 = buffer3.map { x -> x % 1 }.toDoubleArray() + val tensor3Mod1 = DoubleTensor(shape3, buffer3Mod1) + + val buffer4Mod1 = buffer4.map { x -> x % 1 }.toDoubleArray() + val tensor4Mod1 = DoubleTensor(shape4, buffer4Mod1) fun DoubleArray.fmap(transform: (Double) -> Double): DoubleArray { return this.map(transform).toDoubleArray() } - fun expectedTensor(transform: (Double) -> Double): DoubleTensor { - return DoubleTensor(shape, buffer.fmap(transform)) + fun expectedTensorWithNegative(transform: (Double) -> Double): DoubleTensor { + return DoubleTensor(shapeWithNegative, bufferWithNegative.fmap(transform)) + } + + fun expectedTensor1(transform: (Double) -> Double): DoubleTensor { + return DoubleTensor(shape1, buffer1.fmap(transform)) + } + + fun expectedTensor2(transform: (Double) -> Double): DoubleTensor { + return DoubleTensor(shape2, buffer2.fmap(transform)) + } + + fun expectedTensor3(transform: (Double) -> Double): DoubleTensor { + return DoubleTensor(shape3, buffer3.fmap(transform)) + } + + fun expectedTensor4(transform: (Double) -> Double): DoubleTensor { + return DoubleTensor(shape4, buffer4.fmap(transform)) + } + + fun expectedTensorWithNegativeMod1(transform: (Double) -> Double): DoubleTensor { + return DoubleTensor(shapeWithNegative, bufferWithNegativeMod1.fmap(transform)) + } + + fun expectedTensor1Mod1(transform: (Double) -> Double): DoubleTensor { + return DoubleTensor(shape1, buffer1Mod1.fmap(transform)) + } + + fun expectedTensor2Mod1(transform: (Double) -> Double): DoubleTensor { + return DoubleTensor(shape2, buffer2Mod1.fmap(transform)) + } + + fun expectedTensor3Mod1(transform: (Double) -> Double): DoubleTensor { + return DoubleTensor(shape3, buffer3Mod1.fmap(transform)) + } + + fun expectedTensor4Mod1(transform: (Double) -> Double): DoubleTensor { + return DoubleTensor(shape4, buffer4Mod1.fmap(transform)) } @Test fun testExp() = DoubleTensorAlgebra { - assertTrue { tensor.exp() eq expectedTensor(::exp) } + assertTrue { tensorWithNegative.exp() eq expectedTensorWithNegative(::exp) } + assertTrue { tensor1.exp() eq expectedTensor1(::exp) } + assertTrue { tensor2.exp() eq expectedTensor2(::exp) } + assertTrue { tensor3.exp() eq expectedTensor3(::exp) } + assertTrue { tensor4.exp() eq expectedTensor4(::exp) } } @Test fun testLog() = DoubleTensorAlgebra { - assertTrue { tensor.ln() eq expectedTensor(::ln) } + assertTrue { tensor1.ln() eq expectedTensor1(::ln) } + assertTrue { tensor2.ln() eq expectedTensor2(::ln) } + assertTrue { tensor3.ln() eq expectedTensor3(::ln) } + assertTrue { tensor4.ln() eq expectedTensor4(::ln) } } @Test fun testSqrt() = DoubleTensorAlgebra { - assertTrue { tensor.sqrt() eq expectedTensor(::sqrt) } + assertTrue { tensor1.sqrt() eq expectedTensor1(::sqrt) } + assertTrue { tensor2.sqrt() eq expectedTensor2(::sqrt) } + assertTrue { tensor3.sqrt() eq expectedTensor3(::sqrt) } + assertTrue { tensor4.sqrt() eq expectedTensor4(::sqrt) } } @Test fun testCos() = DoubleTensorAlgebra { - assertTrue { tensor.cos() eq expectedTensor(::cos) } + assertTrue { tensorWithNegative.cos() eq expectedTensorWithNegative(::cos) } + assertTrue { tensor1.cos() eq expectedTensor1(::cos) } + assertTrue { tensor2.cos() eq expectedTensor2(::cos) } + assertTrue { tensor3.cos() eq expectedTensor3(::cos) } + assertTrue { tensor4.cos() eq expectedTensor4(::cos) } } + @Test + fun testAcos() = DoubleTensorAlgebra { + assertTrue { tensorWithNegativeMod1.acos() eq expectedTensorWithNegativeMod1(::acos) } + assertTrue { tensor1Mod1.acos() eq expectedTensor1Mod1(::acos) } + assertTrue { tensor2Mod1.acos() eq expectedTensor2Mod1(::acos) } + assertTrue { tensor3Mod1.acos() eq expectedTensor3Mod1(::acos) } + assertTrue { tensor4Mod1.acos() eq expectedTensor4Mod1(::acos) } + } @Test fun testCosh() = DoubleTensorAlgebra { - assertTrue { tensor.cosh() eq expectedTensor(::cosh) } + assertTrue { tensorWithNegative.cosh() eq expectedTensorWithNegative(::cosh) } + assertTrue { tensor1.cosh() eq expectedTensor1(::cosh) } + assertTrue { tensor2.cosh() eq expectedTensor2(::cosh) } + assertTrue { tensor3.cosh() eq expectedTensor3(::cosh) } + assertTrue { tensor4.cosh() eq expectedTensor4(::cosh) } } @Test fun testAcosh() = DoubleTensorAlgebra { - assertTrue { tensor.acosh() eq expectedTensor(::acosh) } + assertTrue { tensor1.acosh() eq expectedTensor1(::acosh) } + assertTrue { tensor2.acosh() eq expectedTensor2(::acosh) } + assertTrue { tensor3.acosh() eq expectedTensor3(::acosh) } + assertTrue { tensor4.acosh() eq expectedTensor4(::acosh) } } @Test fun testSin() = DoubleTensorAlgebra { - assertTrue { tensor.sin() eq expectedTensor(::sin) } + assertTrue { tensorWithNegative.sin() eq expectedTensorWithNegative(::sin) } + assertTrue { tensor1.sin() eq expectedTensor1(::sin) } + assertTrue { tensor2.sin() eq expectedTensor2(::sin) } + assertTrue { tensor3.sin() eq expectedTensor3(::sin) } + assertTrue { tensor4.sin() eq expectedTensor4(::sin) } + } + + @Test + fun testAsin() = DoubleTensorAlgebra { + assertTrue { tensorWithNegativeMod1.asin() eq expectedTensorWithNegativeMod1(::asin) } + assertTrue { tensor1Mod1.asin() eq expectedTensor1Mod1(::asin) } + assertTrue { tensor2Mod1.asin() eq expectedTensor2Mod1(::asin) } + assertTrue { tensor3Mod1.asin() eq expectedTensor3Mod1(::asin) } + assertTrue { tensor4Mod1.asin() eq expectedTensor4Mod1(::asin) } } @Test fun testSinh() = DoubleTensorAlgebra { - assertTrue { tensor.sinh() eq expectedTensor(::sinh) } + assertTrue { tensorWithNegative.sinh() eq expectedTensorWithNegative(::sinh) } + assertTrue { tensor1.sinh() eq expectedTensor1(::sinh) } + assertTrue { tensor2.sinh() eq expectedTensor2(::sinh) } + assertTrue { tensor3.sinh() eq expectedTensor3(::sinh) } + assertTrue { tensor4.sinh() eq expectedTensor4(::sinh) } } @Test fun testAsinh() = DoubleTensorAlgebra { - assertTrue { tensor.asinh() eq expectedTensor(::asinh) } + assertTrue { tensorWithNegative.asinh() eq expectedTensorWithNegative(::asinh) } + assertTrue { tensor1.asinh() eq expectedTensor1(::asinh) } + assertTrue { tensor2.asinh() eq expectedTensor2(::asinh) } + assertTrue { tensor3.asinh() eq expectedTensor3(::asinh) } + assertTrue { tensor4.asinh() eq expectedTensor4(::asinh) } } @Test fun testTan() = DoubleTensorAlgebra { - assertTrue { tensor.tan() eq expectedTensor(::tan) } + assertTrue { tensorWithNegative.tan() eq expectedTensorWithNegative(::tan) } + assertTrue { tensor1.tan() eq expectedTensor1(::tan) } + assertTrue { tensor2.tan() eq expectedTensor2(::tan) } + assertTrue { tensor3.tan() eq expectedTensor3(::tan) } + assertTrue { tensor4.tan() eq expectedTensor4(::tan) } } @Test fun testAtan() = DoubleTensorAlgebra { - assertTrue { tensor.atan() eq expectedTensor(::atan) } + assertTrue { tensorWithNegative.atan() eq expectedTensorWithNegative(::atan) } + assertTrue { tensor1.atan() eq expectedTensor1(::atan) } + assertTrue { tensor2.atan() eq expectedTensor2(::atan) } + assertTrue { tensor3.atan() eq expectedTensor3(::atan) } + assertTrue { tensor4.atan() eq expectedTensor4(::atan) } } @Test fun testTanh() = DoubleTensorAlgebra { - assertTrue { tensor.tanh() eq expectedTensor(::tanh) } + assertTrue { tensorWithNegative.tanh() eq expectedTensorWithNegative(::tanh) } + assertTrue { tensor1.tanh() eq expectedTensor1(::tanh) } + assertTrue { tensor2.tanh() eq expectedTensor2(::tanh) } + assertTrue { tensor3.tanh() eq expectedTensor3(::tanh) } + assertTrue { tensor4.tanh() eq expectedTensor4(::tanh) } + } + + @Test + fun testAtanh() = DoubleTensorAlgebra { + assertTrue { tensorWithNegativeMod1.atanh() eq expectedTensorWithNegativeMod1(::atanh) } + assertTrue { tensor1Mod1.atanh() eq expectedTensor1Mod1(::atanh) } + assertTrue { tensor2Mod1.atanh() eq expectedTensor2Mod1(::atanh) } + assertTrue { tensor3Mod1.atanh() eq expectedTensor3Mod1(::atanh) } + assertTrue { tensor4Mod1.atanh() eq expectedTensor4Mod1(::atanh) } } @Test fun testCeil() = DoubleTensorAlgebra { - assertTrue { tensor.ceil() eq expectedTensor(::ceil) } + assertTrue { tensorWithNegative.ceil() eq expectedTensorWithNegative(::ceil) } + assertTrue { tensor1.ceil() eq expectedTensor1(::ceil) } + assertTrue { tensor2.ceil() eq expectedTensor2(::ceil) } + assertTrue { tensor3.ceil() eq expectedTensor3(::ceil) } + assertTrue { tensor4.ceil() eq expectedTensor4(::ceil) } } @Test fun testFloor() = DoubleTensorAlgebra { - assertTrue { tensor.floor() eq expectedTensor(::floor) } + assertTrue { tensorWithNegative.floor() eq expectedTensorWithNegative(::floor) } + assertTrue { tensor1.floor() eq expectedTensor1(::floor) } + assertTrue { tensor2.floor() eq expectedTensor2(::floor) } + assertTrue { tensor3.floor() eq expectedTensor3(::floor) } + assertTrue { tensor4.floor() eq expectedTensor4(::floor) } } - val shape2 = intArrayOf(2, 2) - val buffer2 = doubleArrayOf( + val shape5 = intArrayOf(2, 2) + val buffer5 = doubleArrayOf( 1.0, 2.0, -3.0, 4.0 ) - val tensor2 = DoubleTensor(shape2, buffer2) + val tensor5 = DoubleTensor(shape5, buffer5) @Test fun testMin() = DoubleTensorAlgebra { - assertTrue { tensor2.min() == -3.0 } - assertTrue { tensor2.min(0, true) eq fromArray( + assertTrue { tensor5.min() == -3.0 } + assertTrue { tensor5.min(0, true) eq fromArray( intArrayOf(1, 2), doubleArrayOf(-3.0, 2.0) )} - assertTrue { tensor2.min(1, false) eq fromArray( + assertTrue { tensor5.min(1, false) eq fromArray( intArrayOf(2), doubleArrayOf(1.0, -3.0) )} @@ -123,12 +263,12 @@ internal class TestDoubleAnalyticTensorAlgebra { @Test fun testMax() = DoubleTensorAlgebra { - assertTrue { tensor2.max() == 4.0 } - assertTrue { tensor2.max(0, true) eq fromArray( + assertTrue { tensor5.max() == 4.0 } + assertTrue { tensor5.max(0, true) eq fromArray( intArrayOf(1, 2), doubleArrayOf(1.0, 4.0) )} - assertTrue { tensor2.max(1, false) eq fromArray( + assertTrue { tensor5.max(1, false) eq fromArray( intArrayOf(2), doubleArrayOf(2.0, 4.0) )} @@ -136,12 +276,12 @@ internal class TestDoubleAnalyticTensorAlgebra { @Test fun testSum() = DoubleTensorAlgebra { - assertTrue { tensor2.sum() == 4.0 } - assertTrue { tensor2.sum(0, true) eq fromArray( + assertTrue { tensor5.sum() == 4.0 } + assertTrue { tensor5.sum(0, true) eq fromArray( intArrayOf(1, 2), doubleArrayOf(-2.0, 6.0) )} - assertTrue { tensor2.sum(1, false) eq fromArray( + assertTrue { tensor5.sum(1, false) eq fromArray( intArrayOf(2), doubleArrayOf(3.0, 1.0) )} @@ -149,15 +289,14 @@ internal class TestDoubleAnalyticTensorAlgebra { @Test fun testMean() = DoubleTensorAlgebra { - assertTrue { tensor2.mean() == 1.0 } - assertTrue { tensor2.mean(0, true) eq fromArray( + assertTrue { tensor5.mean() == 1.0 } + assertTrue { tensor5.mean(0, true) eq fromArray( intArrayOf(1, 2), doubleArrayOf(-1.0, 3.0) )} - assertTrue { tensor2.mean(1, false) eq fromArray( + assertTrue { tensor5.mean(1, false) eq fromArray( intArrayOf(2), doubleArrayOf(1.5, 0.5) )} } - } From 0440764cd31099b7215ee0a661db297f84a17f89 Mon Sep 17 00:00:00 2001 From: Margarita Date: Tue, 22 Mar 2022 21:51:45 +0300 Subject: [PATCH 02/59] added tests for std, variance --- .../tensors/core/TestDoubleAnalyticTensorAlgebra.kt | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/kmath-tensors/src/commonTest/kotlin/space/kscience/kmath/tensors/core/TestDoubleAnalyticTensorAlgebra.kt b/kmath-tensors/src/commonTest/kotlin/space/kscience/kmath/tensors/core/TestDoubleAnalyticTensorAlgebra.kt index b9f4ebd96..d51702403 100644 --- a/kmath-tensors/src/commonTest/kotlin/space/kscience/kmath/tensors/core/TestDoubleAnalyticTensorAlgebra.kt +++ b/kmath-tensors/src/commonTest/kotlin/space/kscience/kmath/tensors/core/TestDoubleAnalyticTensorAlgebra.kt @@ -299,4 +299,14 @@ internal class TestDoubleAnalyticTensorAlgebra { doubleArrayOf(1.5, 0.5) )} } + + @Test + fun testStd() = DoubleTensorAlgebra { + assertTrue { floor(tensor5.std() * 10000 ) / 10000 == 2.9439 } + } + + @Test + fun testVariance() = DoubleTensorAlgebra { + assertTrue { floor(tensor5.variance() * 10000 ) / 10000 == 8.6666 } + } } From 93b62c5bf6550ff2baa835b7899c16fe79f54860 Mon Sep 17 00:00:00 2001 From: Margarita Date: Wed, 23 Mar 2022 00:25:17 +0300 Subject: [PATCH 03/59] added tests for fullLike, onesLike, rowsByIndices --- .../kmath/tensors/core/TestDoubleTensor.kt | 26 +++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/kmath-tensors/src/commonTest/kotlin/space/kscience/kmath/tensors/core/TestDoubleTensor.kt b/kmath-tensors/src/commonTest/kotlin/space/kscience/kmath/tensors/core/TestDoubleTensor.kt index d808637c7..bebaed32f 100644 --- a/kmath-tensors/src/commonTest/kotlin/space/kscience/kmath/tensors/core/TestDoubleTensor.kt +++ b/kmath-tensors/src/commonTest/kotlin/space/kscience/kmath/tensors/core/TestDoubleTensor.kt @@ -23,6 +23,32 @@ import kotlin.test.assertTrue internal class TestDoubleTensor { + @Test + fun testFullLike() = DoubleTensorAlgebra { + val shape = intArrayOf(2, 3) + val buffer = doubleArrayOf(1.0, 2.0, 3.0, 4.0, 5.0, 6.0) + val tensor = DoubleTensor(shape, buffer) + val value = 12.5 + assertTrue { tensor.fullLike(value) eq DoubleTensor(shape, buffer.map { value }.toDoubleArray() ) } + } + + @Test + fun testOnesLike() = DoubleTensorAlgebra { + val shape = intArrayOf(2, 3) + val buffer = doubleArrayOf(1.0, 2.0, 3.0, 4.0, 5.0, 6.0) + val tensor = DoubleTensor(shape, buffer) + assertTrue { tensor.onesLike() eq DoubleTensor(shape, buffer.map { 1.0 }.toDoubleArray() ) } + } + + @Test + fun testRowsByIndices() = DoubleTensorAlgebra { + val shape = intArrayOf(2, 2) + val buffer = doubleArrayOf(1.0, 2.0, -3.0, 4.0) + val tensor = fromArray(shape, buffer) + assertTrue { tensor.rowsByIndices(intArrayOf(0)) eq DoubleTensor(intArrayOf(1, 2), doubleArrayOf(1.0, 2.0)) } + assertTrue { tensor.rowsByIndices(intArrayOf(0, 1)) eq tensor } + } + @Test fun testValue() = DoubleTensorAlgebra { val value = 12.5 From ae9666b07b1f57289926068112d4a8e6514f40b0 Mon Sep 17 00:00:00 2001 From: Margarita Date: Wed, 23 Mar 2022 00:46:52 +0300 Subject: [PATCH 04/59] added test for StructureND.times --- .../kscience/kmath/tensors/core/TestDoubleTensor.kt | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/kmath-tensors/src/commonTest/kotlin/space/kscience/kmath/tensors/core/TestDoubleTensor.kt b/kmath-tensors/src/commonTest/kotlin/space/kscience/kmath/tensors/core/TestDoubleTensor.kt index bebaed32f..7a38e5b7d 100644 --- a/kmath-tensors/src/commonTest/kotlin/space/kscience/kmath/tensors/core/TestDoubleTensor.kt +++ b/kmath-tensors/src/commonTest/kotlin/space/kscience/kmath/tensors/core/TestDoubleTensor.kt @@ -49,6 +49,18 @@ internal class TestDoubleTensor { assertTrue { tensor.rowsByIndices(intArrayOf(0, 1)) eq tensor } } + @Test + fun testTimes() = DoubleTensorAlgebra { + val shape = intArrayOf(2, 2) + val buffer = doubleArrayOf(1.0, 2.0, -3.0, 4.0) + val tensor = DoubleTensor(shape, buffer) + val value = 3 + assertTrue { tensor.times(value).toBufferedTensor() eq DoubleTensor(shape, buffer.map { x -> 3 * x }.toDoubleArray()) } + val buffer2 = doubleArrayOf(7.0, -8.0, -5.0, 2.0) + val tensor2 = DoubleTensor(shape, buffer2) + assertTrue {tensor.times(tensor2).toBufferedTensor() eq DoubleTensor(shape, doubleArrayOf(7.0, -16.0, 15.0, 8.0)) } + } + @Test fun testValue() = DoubleTensorAlgebra { val value = 12.5 From 2fa39fff14352df710c0495082a3903389a4d005 Mon Sep 17 00:00:00 2001 From: Margarita Date: Tue, 24 May 2022 19:22:26 +0300 Subject: [PATCH 05/59] added partial implementation of svd calculation --- .../space/kscience/kmath/tensors/margarita.kt | 76 +++++ .../space/kscience/kmath/tensors/svdcmp.kt | 276 ++++++++++++++++++ 2 files changed, 352 insertions(+) create mode 100644 examples/src/main/kotlin/space/kscience/kmath/tensors/margarita.kt create mode 100644 examples/src/main/kotlin/space/kscience/kmath/tensors/svdcmp.kt diff --git a/examples/src/main/kotlin/space/kscience/kmath/tensors/margarita.kt b/examples/src/main/kotlin/space/kscience/kmath/tensors/margarita.kt new file mode 100644 index 000000000..354b08346 --- /dev/null +++ b/examples/src/main/kotlin/space/kscience/kmath/tensors/margarita.kt @@ -0,0 +1,76 @@ +/* + * Copyright 2018-2021 KMath contributors. + * Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file. + */ + +package space.kscience.kmath.tensors + +import space.kscience.kmath.linear.transpose +import space.kscience.kmath.misc.PerformancePitfall +import space.kscience.kmath.nd.MutableStructure2D +import space.kscience.kmath.nd.Structure2D +import space.kscience.kmath.nd.as2D +import space.kscience.kmath.tensors.core.* +import space.kscience.kmath.tensors.core.BroadcastDoubleTensorAlgebra.dot +import space.kscience.kmath.tensors.core.BroadcastDoubleTensorAlgebra.mapIndexed +import space.kscience.kmath.tensors.core.BroadcastDoubleTensorAlgebra.zeros +import space.kscience.kmath.tensors.core.DoubleTensorAlgebra.Companion.minus +import space.kscience.kmath.tensors.core.DoubleTensorAlgebra.Companion.sum +import space.kscience.kmath.tensors.core.tensorAlgebra +import kotlin.math.* + +fun DoubleArray.fmap(transform: (Double) -> Double): DoubleArray { + return this.map(transform).toDoubleArray() +} + +fun scalarProduct(v1: Structure2D, v2: Structure2D): Double { + return v1.mapIndexed { index, d -> d * v2[index] }.sum() +} + +internal fun diagonal(shape: IntArray, v: Double) : DoubleTensor { + val matrix = zeros(shape) + return matrix.mapIndexed { index, _ -> if (index.component1() == index.component2()) v else 0.0 } +} + + +fun MutableStructure2D.print() { + val n = this.shape.component1() + val m = this.shape.component2() + for (i in 0 until n) { + for (j in 0 until m) { + val x = (this[i, j] * 100).roundToInt() / 100.0 + print("$x ") + } + println() + } + println("______________") +} + +@OptIn(PerformancePitfall::class) +fun main(): Unit = Double.tensorAlgebra.withBroadcast { + val shape = intArrayOf(5, 3) + val buffer = doubleArrayOf( + 1.000000, 2.000000, 3.000000, + 2.000000, 3.000000, 4.000000, + 3.000000, 4.000000, 5.000000, + 4.000000, 5.000000, 6.000000, + 5.000000, 6.000000, 7.000000 + ) + val buffer2 = doubleArrayOf( + 0.000000, 0.000000, 0.000000, + 0.000000, 0.000000, 0.000000, + 0.000000, 0.000000, 0.000000, + 0.000000, 0.000000, 0.000000, + 0.000000, 0.000000, 0.000000 + ) + val tensor = fromArray(shape, buffer).as2D() + val v = fromArray(shape, buffer2).as2D() + tensor.print() + tensor.svdcmp(v) +// tensor.print() + + + + + +} diff --git a/examples/src/main/kotlin/space/kscience/kmath/tensors/svdcmp.kt b/examples/src/main/kotlin/space/kscience/kmath/tensors/svdcmp.kt new file mode 100644 index 000000000..3657c0c8c --- /dev/null +++ b/examples/src/main/kotlin/space/kscience/kmath/tensors/svdcmp.kt @@ -0,0 +1,276 @@ +package space.kscience.kmath.tensors + +import space.kscience.kmath.nd.* +import space.kscience.kmath.tensors.core.BroadcastDoubleTensorAlgebra +import kotlin.math.abs +import kotlin.math.max +import kotlin.math.min +import kotlin.math.sqrt + +/* + * Copyright 2018-2021 KMath contributors. + * Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file. + */ + + +fun pythag(a: Double, b: Double): Double { + val at: Double = abs(a) + val bt: Double = abs(b) + val ct: Double + val result: Double + if (at > bt) { + ct = bt / at + result = at * sqrt(1.0 + ct * ct) + } else if (bt > 0.0) { + ct = at / bt + result = bt * sqrt(1.0 + ct * ct) + } else result = 0.0 + return result +} + +fun SIGN(a: Double, b: Double): Double { + if (b >= 0.0) + return abs(a) + else + return -abs(a) +} + +internal fun MutableStructure2D.svdcmp(v: MutableStructure2D) { + val shape = this.shape + val n = shape.component2() + val m = shape.component1() + var f = 0.0 + val rv1 = DoubleArray(n) + var s = 0.0 + var scale = 0.0 + var anorm = 0.0 + var g = 0.0 + var l = 0 + val w_shape = intArrayOf(m, 1) + var w_buffer = doubleArrayOf(0.000000) + for (i in 0 until m - 1) { + w_buffer += doubleArrayOf(0.000000) + } + val w = BroadcastDoubleTensorAlgebra.fromArray(w_shape, w_buffer).as2D() + for (i in 0 until n) { + /* left-hand reduction */ + l = i + 1 + rv1[i] = scale * g + g = 0.0 + s = 0.0 + scale = 0.0 + if (i < m) { + for (k in i until m) { + scale += abs(this[k, i]); + } + if (scale != 0.0) { + for (k in i until m) { + this[k, i] = (this[k, i] / scale) + s += this[k, i] * this[k, i] + } + f = this[i, i] + if (f >= 0) { + g = (-1) * abs(sqrt(s)) + } + else { + g = abs(sqrt(s)) + } + val h = f * g - s + this[i, i] = f - g + if (i != n - 1) { + for (j in l until n) { + s = 0.0 + for (k in i until m) { + s += this[k, i] * this[k, j] + } + f = s / h + for (k in i until m) { + this[k, j] += f * this[k, i] + } + } + } + for (k in i until m) { + this[k, i] = this[k, i] * scale + } + } + } + + w[i, 0] = scale * g + /* right-hand reduction */ + g = 0.0 + s = 0.0 + scale = 0.0 + if (i < m && i != n - 1) { + for (k in l until n) { + scale += abs(this[i, k]) + } + if (scale != 0.0) { + for (k in l until n) { + this[i, k] = this[i, k] / scale + s += this[i, k] * this[i, k] + } + f = this[i, l] + if (f >= 0) { + g = (-1) * abs(sqrt(s)) + } + else { + g = abs(sqrt(s)) + } + val h = f * g - s + this[i, l] = f - g + for (k in l until n) { + rv1[k] = this[i, k] / h + } + if (i != m - 1) { + for (j in l until m) { + s = 0.0 + for (k in l until n) { + s += this[j, k] * this[i, k] + } + for (k in l until n) { + this[j, k] += s * rv1[k] + } + } + } + for (k in l until n) { + this[i, k] = this[i, k] * scale + } + } + } + anorm = max(anorm, (abs(w[i, 0]) + abs(rv1[i]))); + } + + for (i in n - 1 downTo 0) { + if (i < n - 1) { + if (g != 0.0) { + for (j in l until n) { + v[j, i] = (this[i, j] / this[i, l]) / g + } + for (j in l until n) { + s = 0.0 + for (k in l until n) + s += this[i, k] * v[k, j] + for (k in l until n) + v[k, j] += s * v[k, i] + } + } + for (j in l until n) { + v[i, j] = 0.0 + v[j, i] = 0.0 + } + } + v[i, i] = 1.0 + g = rv1[i] + l = i + } + + // тут все правильно считается + + +// println("w") +// w.print() +// + val eps = 0.000000001 +// println("1.0 / w[2, 0] " + 1.0 / w[2, 0]) +// println("w[2, 0] " + w[2, 0]) + + for (i in min(n, m) - 1 downTo 0) { + l = i + 1 + g = w[i, 0] +// println("w[i, 0] " + w[i, 0]) + for (j in l until n) { + this[i, j] = 0.0 + } + if (g != 0.0) { + g = 1.0 / g +// println("g " + g) + for (j in l until n) { + s = 0.0 + for (k in l until m) { + s += this[k, i] * this[k, j] + } + f = (s / this[i, i]) * g + for (k in i until m) { + this[k, j] += f * this[k, i] + } + } + for (j in i until m) { + this[j, i] *= g + } + } + else { + for (j in i until m) { + this[j, i] = 0.0 + } + } + this[i, i] += 1.0 +// println("matrix") +// this.print() + } + + println("matrix") + this.print() + + // тут матрица должна выглядеть так: + +// 0.134840 -0.762770 0.522117 +// -0.269680 -0.476731 -0.245388 +// -0.404520 -0.190693 -0.527383 +// -0.539360 0.095346 -0.297540 +// -0.674200 0.381385 0.548193 + + + +// var flag = 0 +// var nm = 0 +// var c = 0.0 +// var h = 0.0 +// var y = 0.0 +// var z = 0.0 +// for (k in n - 1 downTo 0) { +// for (its in 0 until 30) { +// flag = 0 +// for (l in k downTo 0) { +// nm = l - 1 +// if (abs(rv1[l]) < eps) { +// flag = 0 +//// println("break1") +// break +// } +// if (abs(w[nm, 0]) < eps) { +// println("break2") +// break +// } +// } +// +// // l = 1 тут +// +// if (flag != 0) { +// c = 0.0 +// s = 0.0 +// for (i in l until k) { // а точно ли такие границы? там немного отличается +// f=s*rv1[i] +// rv1[i]=c*rv1[i] +// if (abs(f) < eps) { +// println("break3") +// break +// } +// g=w[i, 0] +// h=pythag(f,g) +// w[i, 0]=h +// h=1.0/h +// c=g*h +// s = -f*h +// for (j in 0 until m) { // точно ли такие границы? +// y=this[j, nm] +// z=this[j, i] +// this[j, nm]=y*c+z*s +// this[j, i]=z*c-y*s +// } +// } +// } +// +// +// } +// } +} \ No newline at end of file From 86efe48217775ddef25d81ad57793b5c09c3ce97 Mon Sep 17 00:00:00 2001 From: Margarita Date: Tue, 24 May 2022 19:50:50 +0300 Subject: [PATCH 06/59] added comment about division by zero --- .../space/kscience/kmath/tensors/svdcmp.kt | 144 +++++++++++++----- 1 file changed, 103 insertions(+), 41 deletions(-) diff --git a/examples/src/main/kotlin/space/kscience/kmath/tensors/svdcmp.kt b/examples/src/main/kotlin/space/kscience/kmath/tensors/svdcmp.kt index 3657c0c8c..b2706c334 100644 --- a/examples/src/main/kotlin/space/kscience/kmath/tensors/svdcmp.kt +++ b/examples/src/main/kotlin/space/kscience/kmath/tensors/svdcmp.kt @@ -164,26 +164,18 @@ internal fun MutableStructure2D.svdcmp(v: MutableStructure2D) { l = i } - // тут все правильно считается - - -// println("w") -// w.print() -// - val eps = 0.000000001 -// println("1.0 / w[2, 0] " + 1.0 / w[2, 0]) -// println("w[2, 0] " + w[2, 0]) + // до этого момента все правильно считается + // дальше - нет for (i in min(n, m) - 1 downTo 0) { l = i + 1 g = w[i, 0] -// println("w[i, 0] " + w[i, 0]) for (j in l until n) { this[i, j] = 0.0 } if (g != 0.0) { + // !!!!! вот тут деление на почти ноль g = 1.0 / g -// println("g " + g) for (j in l until n) { s = 0.0 for (k in l until m) { @@ -204,15 +196,11 @@ internal fun MutableStructure2D.svdcmp(v: MutableStructure2D) { } } this[i, i] += 1.0 -// println("matrix") -// this.print() } println("matrix") this.print() - - // тут матрица должна выглядеть так: - +// тут матрица должна выглядеть так: // 0.134840 -0.762770 0.522117 // -0.269680 -0.476731 -0.245388 // -0.404520 -0.190693 -0.527383 @@ -220,57 +208,131 @@ internal fun MutableStructure2D.svdcmp(v: MutableStructure2D) { // -0.674200 0.381385 0.548193 - // var flag = 0 // var nm = 0 // var c = 0.0 // var h = 0.0 // var y = 0.0 // var z = 0.0 +// var x = 0.0 +// println("L = " + l) // for (k in n - 1 downTo 0) { -// for (its in 0 until 30) { -// flag = 0 -// for (l in k downTo 0) { -// nm = l - 1 -// if (abs(rv1[l]) < eps) { +// for (its in 1 until 30) { +// flag = 1 +// +// for (newl in k downTo 0) { +// nm = newl - 1 +// if (abs(rv1[newl]) + anorm == anorm) { // flag = 0 -//// println("break1") +// l = newl +//// println("newl before break1 = " + newl) +// println("break1") // break // } -// if (abs(w[nm, 0]) < eps) { +// if (abs(w[nm, 0] + anorm) == anorm) { +// l = newl // println("break2") // break // } // } // -// // l = 1 тут +//// println("NEWL = " + l) +// +//// l = 0 // // if (flag != 0) { // c = 0.0 -// s = 0.0 -// for (i in l until k) { // а точно ли такие границы? там немного отличается -// f=s*rv1[i] -// rv1[i]=c*rv1[i] -// if (abs(f) < eps) { +// s = 1.0 +// for (i in l until k) { +// f = s * rv1[i] +// rv1[i] = c * rv1[i] +// if (abs(f) + anorm == anorm) { // println("break3") // break // } -// g=w[i, 0] -// h=pythag(f,g) -// w[i, 0]=h -// h=1.0/h -// c=g*h -// s = -f*h -// for (j in 0 until m) { // точно ли такие границы? -// y=this[j, nm] -// z=this[j, i] -// this[j, nm]=y*c+z*s -// this[j, i]=z*c-y*s +// h = pythag(f, g) +// w[i, 0] = h +// h = 1.0 / h +// c = g * h +// s = (-f) * h +// for (j in 0 until m) { +// y = this[j, nm] +// z = this[j, i] +// this[j, nm] = y * c + z * s +// this[j, i] = z * c - y * s // } // } // } // +// z = w[k, 0] +//// println("l = " + l) +//// println("k = " + k) +// if (l == k) { +// if (z < 0.0) { +// w[k, 0] = -z +// for (j in 0 until n) +// v[j, k] = -v[j, k] +// } +// println("break4") +// break +// } // +// if (its == 30) { +// return +// } +// +// x = w[l, 0] +// nm = k - 1 +//// println("nm = " + nm) +// y = w[nm, 0] +// g = rv1[nm] +// h = rv1[k] +// f = ((y-z)*(y+z)+(g-h)*(g+h))/(2.0*h*y) +// g = pythag(f,1.0) +// f=((x-z)*(x+z)+h*((y/(f+SIGN(g,f)))-h))/x +// c = 1.0 +// s = 1.0 +// +// var i = 0 // непонятно, где она должна быть объявлена +// for (j in l until nm) { +// i = j + 1 +// g = rv1[i] +// y = w[i, 0] +// h = s * g +// g = c * g +// z = pythag(f,h) +// rv1[j] = z +// c = f / z +// s = h / z +// f = x * c + g * s +// g = g * c - x * s +// h = y * s +// y *= c +// for (jj in 0 until n) { +// x=v[jj, j]; +// z=v[jj, i]; +// v[jj, j] = x * c + z * s; +// v[jj, i] = z * c - x * s; +// } +// z = pythag(f,h) +// w[j, 0] = z +// if (z != 0.0) { +// z = 1.0 / z +// c = f * z +// s = h * z +// } +// f = c * g + s * y +// x = c * y - s * g +// for (jj in 0 until m) { +// y = this[jj, j] +// z = this[jj, i] +// this[jj, j] = y * c + z * s +// this[jj, i] = z * c - y * s +// } +// } +// rv1[l] = 0.0 +// rv1[k] = f +// w[k, 0] = x // } // } } \ No newline at end of file From 37922365b69dfa16d0077ce1b840016a721d611a Mon Sep 17 00:00:00 2001 From: Margarita Date: Tue, 24 May 2022 23:23:35 +0300 Subject: [PATCH 07/59] added the rest of the algorithm --- .../space/kscience/kmath/tensors/margarita.kt | 5 +- .../space/kscience/kmath/tensors/svdcmp.kt | 262 +++++++++--------- 2 files changed, 131 insertions(+), 136 deletions(-) diff --git a/examples/src/main/kotlin/space/kscience/kmath/tensors/margarita.kt b/examples/src/main/kotlin/space/kscience/kmath/tensors/margarita.kt index 354b08346..3629b6a47 100644 --- a/examples/src/main/kotlin/space/kscience/kmath/tensors/margarita.kt +++ b/examples/src/main/kotlin/space/kscience/kmath/tensors/margarita.kt @@ -57,17 +57,14 @@ fun main(): Unit = Double.tensorAlgebra.withBroadcast { 5.000000, 6.000000, 7.000000 ) val buffer2 = doubleArrayOf( - 0.000000, 0.000000, 0.000000, - 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000 ) val tensor = fromArray(shape, buffer).as2D() - val v = fromArray(shape, buffer2).as2D() + val v = fromArray(intArrayOf(3, 3), buffer2).as2D() tensor.print() tensor.svdcmp(v) -// tensor.print() diff --git a/examples/src/main/kotlin/space/kscience/kmath/tensors/svdcmp.kt b/examples/src/main/kotlin/space/kscience/kmath/tensors/svdcmp.kt index b2706c334..e7a424d74 100644 --- a/examples/src/main/kotlin/space/kscience/kmath/tensors/svdcmp.kt +++ b/examples/src/main/kotlin/space/kscience/kmath/tensors/svdcmp.kt @@ -12,7 +12,6 @@ import kotlin.math.sqrt * Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file. */ - fun pythag(a: Double, b: Double): Double { val at: Double = abs(a) val bt: Double = abs(b) @@ -46,9 +45,9 @@ internal fun MutableStructure2D.svdcmp(v: MutableStructure2D) { var anorm = 0.0 var g = 0.0 var l = 0 - val w_shape = intArrayOf(m, 1) + val w_shape = intArrayOf(n, 1) var w_buffer = doubleArrayOf(0.000000) - for (i in 0 until m - 1) { + for (i in 0 until n - 1) { w_buffer += doubleArrayOf(0.000000) } val w = BroadcastDoubleTensorAlgebra.fromArray(w_shape, w_buffer).as2D() @@ -198,8 +197,8 @@ internal fun MutableStructure2D.svdcmp(v: MutableStructure2D) { this[i, i] += 1.0 } - println("matrix") - this.print() +// println("matrix") +// this.print() // тут матрица должна выглядеть так: // 0.134840 -0.762770 0.522117 // -0.269680 -0.476731 -0.245388 @@ -207,132 +206,131 @@ internal fun MutableStructure2D.svdcmp(v: MutableStructure2D) { // -0.539360 0.095346 -0.297540 // -0.674200 0.381385 0.548193 + this[0, 2] = 0.522117 + this[1, 2] = -0.245388 + this[2, 2] = -0.527383 + this[3, 2] = -0.297540 + this[4, 2] = 0.548193 -// var flag = 0 -// var nm = 0 -// var c = 0.0 -// var h = 0.0 -// var y = 0.0 -// var z = 0.0 -// var x = 0.0 -// println("L = " + l) -// for (k in n - 1 downTo 0) { -// for (its in 1 until 30) { -// flag = 1 -// -// for (newl in k downTo 0) { -// nm = newl - 1 -// if (abs(rv1[newl]) + anorm == anorm) { -// flag = 0 -// l = newl -//// println("newl before break1 = " + newl) -// println("break1") -// break -// } -// if (abs(w[nm, 0] + anorm) == anorm) { -// l = newl -// println("break2") -// break -// } -// } -// -//// println("NEWL = " + l) -// -//// l = 0 -// -// if (flag != 0) { -// c = 0.0 -// s = 1.0 -// for (i in l until k) { -// f = s * rv1[i] -// rv1[i] = c * rv1[i] -// if (abs(f) + anorm == anorm) { -// println("break3") -// break -// } -// h = pythag(f, g) -// w[i, 0] = h -// h = 1.0 / h -// c = g * h -// s = (-f) * h -// for (j in 0 until m) { -// y = this[j, nm] -// z = this[j, i] -// this[j, nm] = y * c + z * s -// this[j, i] = z * c - y * s -// } -// } -// } -// -// z = w[k, 0] -//// println("l = " + l) -//// println("k = " + k) -// if (l == k) { -// if (z < 0.0) { -// w[k, 0] = -z -// for (j in 0 until n) -// v[j, k] = -v[j, k] -// } -// println("break4") -// break -// } -// -// if (its == 30) { -// return -// } -// -// x = w[l, 0] -// nm = k - 1 -//// println("nm = " + nm) -// y = w[nm, 0] -// g = rv1[nm] -// h = rv1[k] -// f = ((y-z)*(y+z)+(g-h)*(g+h))/(2.0*h*y) -// g = pythag(f,1.0) -// f=((x-z)*(x+z)+h*((y/(f+SIGN(g,f)))-h))/x -// c = 1.0 -// s = 1.0 -// -// var i = 0 // непонятно, где она должна быть объявлена -// for (j in l until nm) { -// i = j + 1 -// g = rv1[i] -// y = w[i, 0] -// h = s * g -// g = c * g -// z = pythag(f,h) -// rv1[j] = z -// c = f / z -// s = h / z -// f = x * c + g * s -// g = g * c - x * s -// h = y * s -// y *= c -// for (jj in 0 until n) { -// x=v[jj, j]; -// z=v[jj, i]; -// v[jj, j] = x * c + z * s; -// v[jj, i] = z * c - x * s; -// } -// z = pythag(f,h) -// w[j, 0] = z -// if (z != 0.0) { -// z = 1.0 / z -// c = f * z -// s = h * z -// } -// f = c * g + s * y -// x = c * y - s * g -// for (jj in 0 until m) { -// y = this[jj, j] -// z = this[jj, i] -// this[jj, j] = y * c + z * s -// this[jj, i] = z * c - y * s -// } -// } -// rv1[l] = 0.0 -// rv1[k] = f -// w[k, 0] = x -// } -// } + var flag = 0 + var nm = 0 + var c = 0.0 + var h = 0.0 + var y = 0.0 + var z = 0.0 + var x = 0.0 + for (k in n - 1 downTo 0) { + for (its in 1 until 30) { + flag = 1 + for (newl in k downTo 0) { + nm = newl - 1 + if (abs(rv1[newl]) + anorm == anorm) { + flag = 0 + l = newl + break + } + if (abs(w[nm, 0]) + anorm == anorm) { + l = newl + break + } + } + + if (flag != 0) { + c = 0.0 + s = 1.0 + for (i in l until k) { + f = s * rv1[i] + rv1[i] = c * rv1[i] + if (abs(f) + anorm == anorm) { + break + } + h = pythag(f, g) + w[i, 0] = h + h = 1.0 / h + c = g * h + s = (-f) * h + for (j in 0 until m) { + y = this[j, nm] + z = this[j, i] + this[j, nm] = y * c + z * s + this[j, i] = z * c - y * s + } + } + } + + z = w[k, 0] + if (l == k) { + if (z < 0.0) { + w[k, 0] = -z + for (j in 0 until n) + v[j, k] = -v[j, k] + } + break + } + + if (its == 30) { + return + } + + x = w[l, 0] + nm = k - 1 + y = w[nm, 0] + g = rv1[nm] + h = rv1[k] + f = ((y-z)*(y+z)+(g-h)*(g+h))/(2.0*h*y) + g = pythag(f,1.0) + f=((x-z)*(x+z)+h*((y/(f+SIGN(g,f)))-h))/x + c = 1.0 + s = 1.0 + + var i = 0 + for (j in l until nm + 1) { + i = j + 1 + g = rv1[i] + y = w[i, 0] + h = s * g + g = c * g + z = pythag(f,h) + rv1[j] = z + c = f / z + s = h / z + f = x * c + g * s + g = g * c - x * s + h = y * s + y *= c + + for (jj in 0 until n) { + x=v[jj, j]; + z=v[jj, i]; + v[jj, j] = x * c + z * s; + v[jj, i] = z * c - x * s; + } + z = pythag(f,h) + w[j, 0] = z + if (z != 0.0) { + z = 1.0 / z + c = f * z + s = h * z + } + f = c * g + s * y + x = c * y - s * g + for (jj in 0 until m) { + y = this[jj, j] + z = this[jj, i] + this[jj, j] = y * c + z * s + this[jj, i] = z * c - y * s + } + } + rv1[l] = 0.0 + rv1[k] = f + w[k, 0] = x + } + } + + println("u") + this.print() + println("w") + w.print() + println("v") + v.print() } \ No newline at end of file From eda477b2b538a7c416bffbf51702de6565376d7e Mon Sep 17 00:00:00 2001 From: Margarita Date: Tue, 24 May 2022 23:44:09 +0300 Subject: [PATCH 08/59] added some tests for method dot --- .../tensors/core/TestDoubleTensorAlgebra.kt | 21 +++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/kmath-tensors/src/commonTest/kotlin/space/kscience/kmath/tensors/core/TestDoubleTensorAlgebra.kt b/kmath-tensors/src/commonTest/kotlin/space/kscience/kmath/tensors/core/TestDoubleTensorAlgebra.kt index 205ae2fee..8657a8dd9 100644 --- a/kmath-tensors/src/commonTest/kotlin/space/kscience/kmath/tensors/core/TestDoubleTensorAlgebra.kt +++ b/kmath-tensors/src/commonTest/kotlin/space/kscience/kmath/tensors/core/TestDoubleTensorAlgebra.kt @@ -132,6 +132,27 @@ internal class TestDoubleTensorAlgebra { 468.0, 501.0, 534.0, 594.0, 636.0, 678.0, 720.0, 771.0, 822.0 )) assertTrue(res45.shape contentEquals intArrayOf(2, 3, 3)) + + val oneDimTensor1 = fromArray(intArrayOf(3), doubleArrayOf(1.0, 2.0, 3.0)) + val oneDimTensor2 = fromArray(intArrayOf(3), doubleArrayOf(4.0, 5.0, 6.0)) + val resOneDimTensors = oneDimTensor1.dot(oneDimTensor2) + assertTrue(resOneDimTensors.mutableBuffer.array() contentEquals doubleArrayOf(32.0)) + assertTrue(resOneDimTensors.shape contentEquals intArrayOf(1)) + + val twoDimTensor1 = fromArray(intArrayOf(2, 2), doubleArrayOf(1.0, 2.0, 3.0, 4.0)) + val twoDimTensor2 = fromArray(intArrayOf(2, 2), doubleArrayOf(5.0, 6.0, 7.0, 8.0)) + val resTwoDimTensors = twoDimTensor1.dot(twoDimTensor2) + assertTrue(resTwoDimTensors.mutableBuffer.array() contentEquals doubleArrayOf(19.0, 22.0, 43.0, 50.0)) + assertTrue(resTwoDimTensors.shape contentEquals intArrayOf(2, 2)) + + val oneDimTensor3 = fromArray(intArrayOf(2), doubleArrayOf(1.0, 2.0)) + val resOneDimTensorOnTwoDimTensor = oneDimTensor3.dot(twoDimTensor1) + assertTrue(resOneDimTensorOnTwoDimTensor.mutableBuffer.array() contentEquals doubleArrayOf(7.0, 10.0)) + assertTrue(resOneDimTensorOnTwoDimTensor.shape contentEquals intArrayOf(2)) + + val resTwoDimTensorOnOneDimTensor = twoDimTensor1.dot(oneDimTensor3) + assertTrue(resTwoDimTensorOnOneDimTensor.mutableBuffer.array() contentEquals doubleArrayOf(5.0, 11.0)) + assertTrue(resTwoDimTensorOnOneDimTensor.shape contentEquals intArrayOf(2)) } @Test From 97104ad40f43534ecc41262669f82c41739c2ce1 Mon Sep 17 00:00:00 2001 From: Margarita Date: Wed, 25 May 2022 01:11:22 +0300 Subject: [PATCH 09/59] renamed function and changed structure --- .../space/kscience/kmath/tensors/margarita.kt | 36 ++++++++----------- .../space/kscience/kmath/tensors/svdcmp.kt | 30 ++++++---------- 2 files changed, 25 insertions(+), 41 deletions(-) diff --git a/examples/src/main/kotlin/space/kscience/kmath/tensors/margarita.kt b/examples/src/main/kotlin/space/kscience/kmath/tensors/margarita.kt index 3629b6a47..c376d6d0f 100644 --- a/examples/src/main/kotlin/space/kscience/kmath/tensors/margarita.kt +++ b/examples/src/main/kotlin/space/kscience/kmath/tensors/margarita.kt @@ -11,28 +11,9 @@ import space.kscience.kmath.nd.MutableStructure2D import space.kscience.kmath.nd.Structure2D import space.kscience.kmath.nd.as2D import space.kscience.kmath.tensors.core.* -import space.kscience.kmath.tensors.core.BroadcastDoubleTensorAlgebra.dot -import space.kscience.kmath.tensors.core.BroadcastDoubleTensorAlgebra.mapIndexed -import space.kscience.kmath.tensors.core.BroadcastDoubleTensorAlgebra.zeros -import space.kscience.kmath.tensors.core.DoubleTensorAlgebra.Companion.minus -import space.kscience.kmath.tensors.core.DoubleTensorAlgebra.Companion.sum import space.kscience.kmath.tensors.core.tensorAlgebra import kotlin.math.* -fun DoubleArray.fmap(transform: (Double) -> Double): DoubleArray { - return this.map(transform).toDoubleArray() -} - -fun scalarProduct(v1: Structure2D, v2: Structure2D): Double { - return v1.mapIndexed { index, d -> d * v2[index] }.sum() -} - -internal fun diagonal(shape: IntArray, v: Double) : DoubleTensor { - val matrix = zeros(shape) - return matrix.mapIndexed { index, _ -> if (index.component1() == index.component2()) v else 0.0 } -} - - fun MutableStructure2D.print() { val n = this.shape.component1() val m = this.shape.component2() @@ -63,11 +44,22 @@ fun main(): Unit = Double.tensorAlgebra.withBroadcast { ) val tensor = fromArray(shape, buffer).as2D() val v = fromArray(intArrayOf(3, 3), buffer2).as2D() + val w_shape = intArrayOf(3, 1) + var w_buffer = doubleArrayOf(0.000000) + for (i in 0 until 3 - 1) { + w_buffer += doubleArrayOf(0.000000) + } + val w = BroadcastDoubleTensorAlgebra.fromArray(w_shape, w_buffer).as2D() tensor.print() - tensor.svdcmp(v) - - + var ans = Pair(w, v) + tensor.svdGolabKahan(v, w) + println("u") + tensor.print() + println("w") + w.print() + println("v") + v.print() } diff --git a/examples/src/main/kotlin/space/kscience/kmath/tensors/svdcmp.kt b/examples/src/main/kotlin/space/kscience/kmath/tensors/svdcmp.kt index e7a424d74..8572bdee9 100644 --- a/examples/src/main/kotlin/space/kscience/kmath/tensors/svdcmp.kt +++ b/examples/src/main/kotlin/space/kscience/kmath/tensors/svdcmp.kt @@ -1,7 +1,6 @@ package space.kscience.kmath.tensors import space.kscience.kmath.nd.* -import space.kscience.kmath.tensors.core.BroadcastDoubleTensorAlgebra import kotlin.math.abs import kotlin.math.max import kotlin.math.min @@ -34,10 +33,12 @@ fun SIGN(a: Double, b: Double): Double { return -abs(a) } -internal fun MutableStructure2D.svdcmp(v: MutableStructure2D) { +// matrix v is not transposed at the output + +internal fun MutableStructure2D.svdGolabKahan(v: MutableStructure2D, w: MutableStructure2D) { val shape = this.shape - val n = shape.component2() val m = shape.component1() + val n = shape.component2() var f = 0.0 val rv1 = DoubleArray(n) var s = 0.0 @@ -45,12 +46,6 @@ internal fun MutableStructure2D.svdcmp(v: MutableStructure2D) { var anorm = 0.0 var g = 0.0 var l = 0 - val w_shape = intArrayOf(n, 1) - var w_buffer = doubleArrayOf(0.000000) - for (i in 0 until n - 1) { - w_buffer += doubleArrayOf(0.000000) - } - val w = BroadcastDoubleTensorAlgebra.fromArray(w_shape, w_buffer).as2D() for (i in 0 until n) { /* left-hand reduction */ l = i + 1 @@ -212,6 +207,9 @@ internal fun MutableStructure2D.svdcmp(v: MutableStructure2D) { this[3, 2] = -0.297540 this[4, 2] = 0.548193 + // задала правильные значения, чтобы проверить правильность кода дальше + // дальше - все корректно + var flag = 0 var nm = 0 var c = 0.0 @@ -268,9 +266,10 @@ internal fun MutableStructure2D.svdcmp(v: MutableStructure2D) { break } - if (its == 30) { - return - } +// надо придумать, что сделать - выкинуть ошибку? +// if (its == 30) { +// return +// } x = w[l, 0] nm = k - 1 @@ -326,11 +325,4 @@ internal fun MutableStructure2D.svdcmp(v: MutableStructure2D) { w[k, 0] = x } } - - println("u") - this.print() - println("w") - w.print() - println("v") - v.print() } \ No newline at end of file From a497a5df1a13ea844494c2effe30cc6f7504b60e Mon Sep 17 00:00:00 2001 From: Margarita Date: Wed, 25 May 2022 01:16:56 +0300 Subject: [PATCH 10/59] reformatted code --- .../space/kscience/kmath/tensors/svdcmp.kt | 33 +++++++++---------- 1 file changed, 15 insertions(+), 18 deletions(-) diff --git a/examples/src/main/kotlin/space/kscience/kmath/tensors/svdcmp.kt b/examples/src/main/kotlin/space/kscience/kmath/tensors/svdcmp.kt index 8572bdee9..ec4c8cc32 100644 --- a/examples/src/main/kotlin/space/kscience/kmath/tensors/svdcmp.kt +++ b/examples/src/main/kotlin/space/kscience/kmath/tensors/svdcmp.kt @@ -65,8 +65,7 @@ internal fun MutableStructure2D.svdGolabKahan(v: MutableStructure2D= 0) { g = (-1) * abs(sqrt(s)) - } - else { + } else { g = abs(sqrt(s)) } val h = f * g - s @@ -106,8 +105,7 @@ internal fun MutableStructure2D.svdGolabKahan(v: MutableStructure2D= 0) { g = (-1) * abs(sqrt(s)) - } - else { + } else { g = abs(sqrt(s)) } val h = f * g - s @@ -140,7 +138,7 @@ internal fun MutableStructure2D.svdGolabKahan(v: MutableStructure2D.svdGolabKahan(v: MutableStructure2D.svdGolabKahan(v: MutableStructure2D.svdGolabKahan(v: MutableStructure2D.svdGolabKahan(v: MutableStructure2D.svdGolabKahan(v: MutableStructure2D.svdGolabKahan(v: MutableStructure2D Date: Tue, 12 Jul 2022 17:16:30 +0300 Subject: [PATCH 11/59] added svdGolabKahanHelper --- .../kmath/tensors/core/internal/linUtils.kt | 296 ++++++++++++++++++ 1 file changed, 296 insertions(+) diff --git a/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/internal/linUtils.kt b/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/internal/linUtils.kt index aba6167ce..2c9a0cf8d 100644 --- a/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/internal/linUtils.kt +++ b/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/internal/linUtils.kt @@ -17,6 +17,7 @@ import space.kscience.kmath.tensors.core.DoubleTensor import space.kscience.kmath.tensors.core.DoubleTensorAlgebra import space.kscience.kmath.tensors.core.IntTensor import kotlin.math.abs +import kotlin.math.max import kotlin.math.min import kotlin.math.sqrt @@ -349,3 +350,298 @@ internal fun DoubleTensorAlgebra.svdHelper( matrixV.mutableBuffer.array()[matrixV.bufferStart + i] = vBuffer[i] } } + +internal fun pythag(a: Double, b: Double): Double { + val at: Double = abs(a) + val bt: Double = abs(b) + val ct: Double + val result: Double + if (at > bt) { + ct = bt / at + result = at * sqrt(1.0 + ct * ct) + } else if (bt > 0.0) { + ct = at / bt + result = bt * sqrt(1.0 + ct * ct) + } else result = 0.0 + return result +} + +internal fun SIGN(a: Double, b: Double): Double { + if (b >= 0.0) + return abs(a) + else + return -abs(a) +} +internal fun MutableStructure2D.svdGolabKahanHelper(u: MutableStructure2D, w: BufferedTensor, v: MutableStructure2D) { + val shape = this.shape + val m = shape.component1() + val n = shape.component2() + var f = 0.0 + val rv1 = DoubleArray(n) + var s = 0.0 + var scale = 0.0 + var anorm = 0.0 + var g = 0.0 + var l = 0 + val epsilon = 1e-10 + for (i in 0 until n) { + /* left-hand reduction */ + l = i + 1 + rv1[i] = scale * g + g = 0.0 + s = 0.0 + scale = 0.0 + if (i < m) { + for (k in i until m) { + scale += abs(this[k, i]); + } + if (abs(scale) > epsilon) { + for (k in i until m) { + this[k, i] = (this[k, i] / scale) + s += this[k, i] * this[k, i] + } + f = this[i, i] + if (f >= 0) { + g = (-1) * abs(sqrt(s)) + } else { + g = abs(sqrt(s)) + } + val h = f * g - s + this[i, i] = f - g + if (i != n - 1) { + for (j in l until n) { + s = 0.0 + for (k in i until m) { + s += this[k, i] * this[k, j] + } + f = s / h + for (k in i until m) { + this[k, j] += f * this[k, i] + } + } + } + for (k in i until m) { + this[k, i] = this[k, i] * scale + } + } + } + + w.mutableBuffer.array()[w.bufferStart + i] = scale * g + /* right-hand reduction */ + g = 0.0 + s = 0.0 + scale = 0.0 + if (i < m && i != n - 1) { + for (k in l until n) { + scale += abs(this[i, k]) + } + if (abs(scale) > epsilon) { + for (k in l until n) { + this[i, k] = this[i, k] / scale + s += this[i, k] * this[i, k] + } + f = this[i, l] + if (f >= 0) { + g = (-1) * abs(sqrt(s)) + } else { + g = abs(sqrt(s)) + } + val h = f * g - s + this[i, l] = f - g + for (k in l until n) { + rv1[k] = this[i, k] / h + } + if (i != m - 1) { + for (j in l until m) { + s = 0.0 + for (k in l until n) { + s += this[j, k] * this[i, k] + } + for (k in l until n) { + this[j, k] += s * rv1[k] + } + } + } + for (k in l until n) { + this[i, k] = this[i, k] * scale + } + } + } + anorm = max(anorm, (abs(w.mutableBuffer.array()[w.bufferStart + i]) + abs(rv1[i]))); + } + + for (i in n - 1 downTo 0) { + if (i < n - 1) { + if (abs(g) > epsilon) { + for (j in l until n) { + v[j, i] = (this[i, j] / this[i, l]) / g + } + for (j in l until n) { + s = 0.0 + for (k in l until n) + s += this[i, k] * v[k, j] + for (k in l until n) + v[k, j] += s * v[k, i] + } + } + for (j in l until n) { + v[i, j] = 0.0 + v[j, i] = 0.0 + } + } + v[i, i] = 1.0 + g = rv1[i] + l = i + } + + for (i in min(n, m) - 1 downTo 0) { + l = i + 1 + g = w.mutableBuffer.array()[w.bufferStart + i] + for (j in l until n) { + this[i, j] = 0.0 + } + if (abs(g) > epsilon) { + g = 1.0 / g + for (j in l until n) { + s = 0.0 + for (k in l until m) { + s += this[k, i] * this[k, j] + } + f = (s / this[i, i]) * g + for (k in i until m) { + this[k, j] += f * this[k, i] + } + } + for (j in i until m) { + this[j, i] *= g + } + } else { + for (j in i until m) { + this[j, i] = 0.0 + } + } + this[i, i] += 1.0 + } + + var flag = 0 + var nm = 0 + var c = 0.0 + var h = 0.0 + var y = 0.0 + var z = 0.0 + var x = 0.0 + for (k in n - 1 downTo 0) { + for (its in 1 until 30) { + flag = 1 + for (newl in k downTo 0) { + nm = newl - 1 + if (abs(rv1[newl]) + anorm == anorm) { + flag = 0 + l = newl + break + } + if (abs(w.mutableBuffer.array()[w.bufferStart + nm]) + anorm == anorm) { + l = newl + break + } + } + + if (flag != 0) { + c = 0.0 + s = 1.0 + for (i in l until k) { + f = s * rv1[i] + rv1[i] = c * rv1[i] + if (abs(f) + anorm == anorm) { + break + } + h = pythag(f, g) + w.mutableBuffer.array()[w.bufferStart + i] = h + h = 1.0 / h + c = g * h + s = (-f) * h + for (j in 0 until m) { + y = this[j, nm] + z = this[j, i] + this[j, nm] = y * c + z * s + this[j, i] = z * c - y * s + } + } + } + + z = w.mutableBuffer.array()[w.bufferStart + k] + if (l == k) { + if (z < 0.0) { + w.mutableBuffer.array()[w.bufferStart + k] = -z + for (j in 0 until n) + v[j, k] = -v[j, k] + } + break + } + +// надо придумать, что сделать - выкинуть ошибку? +// if (its == 30) { +// return +// } + + x = w.mutableBuffer.array()[w.bufferStart + l] + nm = k - 1 + y = w.mutableBuffer.array()[w.bufferStart + nm] + g = rv1[nm] + h = rv1[k] + f = ((y - z) * (y + z) + (g - h) * (g + h)) / (2.0 * h * y) + g = pythag(f, 1.0) + f = ((x - z) * (x + z) + h * ((y / (f + SIGN(g, f))) - h)) / x + c = 1.0 + s = 1.0 + + var i = 0 + for (j in l until nm + 1) { + i = j + 1 + g = rv1[i] + y = w.mutableBuffer.array()[w.bufferStart + i] + h = s * g + g = c * g + z = pythag(f, h) + rv1[j] = z + c = f / z + s = h / z + f = x * c + g * s + g = g * c - x * s + h = y * s + y *= c + + for (jj in 0 until n) { + x = v[jj, j]; + z = v[jj, i]; + v[jj, j] = x * c + z * s; + v[jj, i] = z * c - x * s; + } + z = pythag(f, h) + w.mutableBuffer.array()[w.bufferStart + j] = z + if (abs(z) > epsilon) { + z = 1.0 / z + c = f * z + s = h * z + } + f = c * g + s * y + x = c * y - s * g + for (jj in 0 until m) { + y = this[jj, j] + z = this[jj, i] + this[jj, j] = y * c + z * s + this[jj, i] = z * c - y * s + } + } + rv1[l] = 0.0 + rv1[k] = f + w.mutableBuffer.array()[w.bufferStart + k] = x + } + } + + for (i in 0 until m) { + for (j in 0 until n) { + u[j, i] = this[i, j] + } + } +} From ac21838e4b7565b511068c8b994740fb39ec3479 Mon Sep 17 00:00:00 2001 From: margarita0303 Date: Tue, 12 Jul 2022 17:19:19 +0300 Subject: [PATCH 12/59] added svdGolabKahan --- .../kmath/tensors/core/DoubleTensorAlgebra.kt | 28 +++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/DoubleTensorAlgebra.kt b/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/DoubleTensorAlgebra.kt index e9dc34748..174b1dde2 100644 --- a/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/DoubleTensorAlgebra.kt +++ b/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/DoubleTensorAlgebra.kt @@ -882,6 +882,34 @@ public open class DoubleTensorAlgebra : return Triple(uTensor.transpose(), sTensor, vTensor.transpose()) } + public fun StructureND.svdGolabKahan(): Triple { + val size = tensor.dimension + val commonShape = tensor.shape.sliceArray(0 until size - 2) + val (n, m) = tensor.shape.sliceArray(size - 2 until size) + val uTensor = zeros(commonShape + intArrayOf(min(n, m), n)) + val sTensor = zeros(commonShape + intArrayOf(min(n, m))) + val vTensor = zeros(commonShape + intArrayOf(min(n, m), m)) + + val matrices = tensor.matrices + val uTensors = uTensor.matrices + val sTensorVectors = sTensor.vectors + val vTensors = vTensor.matrices + + for (index in matrices.indices) { + val matrix = matrices[index] + val matrixSize = matrix.shape.reduce { acc, i -> acc * i } + val curMatrix = DoubleTensor( + matrix.shape, + matrix.mutableBuffer.array() + .slice(matrix.bufferStart until matrix.bufferStart + matrixSize) + .toDoubleArray() + ) + curMatrix.as2D().svdGolabKahanHelper(uTensors[index].as2D(), sTensorVectors[index], vTensors[index].as2D()) + } + + return Triple(uTensor.transpose(), sTensor, vTensor) + } + override fun StructureND.symEig(): Pair = symEigJacobi(maxIteration = 50, epsilon = 1e-15) /** From c7e224818b17604830861d45edca221a3792dad6 Mon Sep 17 00:00:00 2001 From: margarita0303 Date: Tue, 12 Jul 2022 20:32:44 +0300 Subject: [PATCH 13/59] deleted tmp file --- .../space/kscience/kmath/tensors/svdcmp.kt | 325 ------------------ 1 file changed, 325 deletions(-) delete mode 100644 examples/src/main/kotlin/space/kscience/kmath/tensors/svdcmp.kt diff --git a/examples/src/main/kotlin/space/kscience/kmath/tensors/svdcmp.kt b/examples/src/main/kotlin/space/kscience/kmath/tensors/svdcmp.kt deleted file mode 100644 index ec4c8cc32..000000000 --- a/examples/src/main/kotlin/space/kscience/kmath/tensors/svdcmp.kt +++ /dev/null @@ -1,325 +0,0 @@ -package space.kscience.kmath.tensors - -import space.kscience.kmath.nd.* -import kotlin.math.abs -import kotlin.math.max -import kotlin.math.min -import kotlin.math.sqrt - -/* - * Copyright 2018-2021 KMath contributors. - * Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file. - */ - -fun pythag(a: Double, b: Double): Double { - val at: Double = abs(a) - val bt: Double = abs(b) - val ct: Double - val result: Double - if (at > bt) { - ct = bt / at - result = at * sqrt(1.0 + ct * ct) - } else if (bt > 0.0) { - ct = at / bt - result = bt * sqrt(1.0 + ct * ct) - } else result = 0.0 - return result -} - -fun SIGN(a: Double, b: Double): Double { - if (b >= 0.0) - return abs(a) - else - return -abs(a) -} - -// matrix v is not transposed at the output - -internal fun MutableStructure2D.svdGolabKahan(v: MutableStructure2D, w: MutableStructure2D) { - val shape = this.shape - val m = shape.component1() - val n = shape.component2() - var f = 0.0 - val rv1 = DoubleArray(n) - var s = 0.0 - var scale = 0.0 - var anorm = 0.0 - var g = 0.0 - var l = 0 - for (i in 0 until n) { - /* left-hand reduction */ - l = i + 1 - rv1[i] = scale * g - g = 0.0 - s = 0.0 - scale = 0.0 - if (i < m) { - for (k in i until m) { - scale += abs(this[k, i]); - } - if (scale != 0.0) { - for (k in i until m) { - this[k, i] = (this[k, i] / scale) - s += this[k, i] * this[k, i] - } - f = this[i, i] - if (f >= 0) { - g = (-1) * abs(sqrt(s)) - } else { - g = abs(sqrt(s)) - } - val h = f * g - s - this[i, i] = f - g - if (i != n - 1) { - for (j in l until n) { - s = 0.0 - for (k in i until m) { - s += this[k, i] * this[k, j] - } - f = s / h - for (k in i until m) { - this[k, j] += f * this[k, i] - } - } - } - for (k in i until m) { - this[k, i] = this[k, i] * scale - } - } - } - - w[i, 0] = scale * g - /* right-hand reduction */ - g = 0.0 - s = 0.0 - scale = 0.0 - if (i < m && i != n - 1) { - for (k in l until n) { - scale += abs(this[i, k]) - } - if (scale != 0.0) { - for (k in l until n) { - this[i, k] = this[i, k] / scale - s += this[i, k] * this[i, k] - } - f = this[i, l] - if (f >= 0) { - g = (-1) * abs(sqrt(s)) - } else { - g = abs(sqrt(s)) - } - val h = f * g - s - this[i, l] = f - g - for (k in l until n) { - rv1[k] = this[i, k] / h - } - if (i != m - 1) { - for (j in l until m) { - s = 0.0 - for (k in l until n) { - s += this[j, k] * this[i, k] - } - for (k in l until n) { - this[j, k] += s * rv1[k] - } - } - } - for (k in l until n) { - this[i, k] = this[i, k] * scale - } - } - } - anorm = max(anorm, (abs(w[i, 0]) + abs(rv1[i]))); - } - - for (i in n - 1 downTo 0) { - if (i < n - 1) { - if (g != 0.0) { - for (j in l until n) { - v[j, i] = (this[i, j] / this[i, l]) / g - } - for (j in l until n) { - s = 0.0 - for (k in l until n) - s += this[i, k] * v[k, j] - for (k in l until n) - v[k, j] += s * v[k, i] - } - } - for (j in l until n) { - v[i, j] = 0.0 - v[j, i] = 0.0 - } - } - v[i, i] = 1.0 - g = rv1[i] - l = i - } - - // до этого момента все правильно считается - // дальше - нет - - for (i in min(n, m) - 1 downTo 0) { - l = i + 1 - g = w[i, 0] - for (j in l until n) { - this[i, j] = 0.0 - } - if (g != 0.0) { - // !!!!! вот тут деление на почти ноль - g = 1.0 / g - for (j in l until n) { - s = 0.0 - for (k in l until m) { - s += this[k, i] * this[k, j] - } - f = (s / this[i, i]) * g - for (k in i until m) { - this[k, j] += f * this[k, i] - } - } - for (j in i until m) { - this[j, i] *= g - } - } else { - for (j in i until m) { - this[j, i] = 0.0 - } - } - this[i, i] += 1.0 - } - -// println("matrix") -// this.print() -// тут матрица должна выглядеть так: -// 0.134840 -0.762770 0.522117 -// -0.269680 -0.476731 -0.245388 -// -0.404520 -0.190693 -0.527383 -// -0.539360 0.095346 -0.297540 -// -0.674200 0.381385 0.548193 - - this[0, 2] = 0.522117 - this[1, 2] = -0.245388 - this[2, 2] = -0.527383 - this[3, 2] = -0.297540 - this[4, 2] = 0.548193 - - // задала правильные значения, чтобы проверить правильность кода дальше - // дальше - все корректно - - var flag = 0 - var nm = 0 - var c = 0.0 - var h = 0.0 - var y = 0.0 - var z = 0.0 - var x = 0.0 - for (k in n - 1 downTo 0) { - for (its in 1 until 30) { - flag = 1 - for (newl in k downTo 0) { - nm = newl - 1 - if (abs(rv1[newl]) + anorm == anorm) { - flag = 0 - l = newl - break - } - if (abs(w[nm, 0]) + anorm == anorm) { - l = newl - break - } - } - - if (flag != 0) { - c = 0.0 - s = 1.0 - for (i in l until k) { - f = s * rv1[i] - rv1[i] = c * rv1[i] - if (abs(f) + anorm == anorm) { - break - } - h = pythag(f, g) - w[i, 0] = h - h = 1.0 / h - c = g * h - s = (-f) * h - for (j in 0 until m) { - y = this[j, nm] - z = this[j, i] - this[j, nm] = y * c + z * s - this[j, i] = z * c - y * s - } - } - } - - z = w[k, 0] - if (l == k) { - if (z < 0.0) { - w[k, 0] = -z - for (j in 0 until n) - v[j, k] = -v[j, k] - } - break - } - -// надо придумать, что сделать - выкинуть ошибку? -// if (its == 30) { -// return -// } - - x = w[l, 0] - nm = k - 1 - y = w[nm, 0] - g = rv1[nm] - h = rv1[k] - f = ((y - z) * (y + z) + (g - h) * (g + h)) / (2.0 * h * y) - g = pythag(f, 1.0) - f = ((x - z) * (x + z) + h * ((y / (f + SIGN(g, f))) - h)) / x - c = 1.0 - s = 1.0 - - var i = 0 - for (j in l until nm + 1) { - i = j + 1 - g = rv1[i] - y = w[i, 0] - h = s * g - g = c * g - z = pythag(f, h) - rv1[j] = z - c = f / z - s = h / z - f = x * c + g * s - g = g * c - x * s - h = y * s - y *= c - - for (jj in 0 until n) { - x = v[jj, j]; - z = v[jj, i]; - v[jj, j] = x * c + z * s; - v[jj, i] = z * c - x * s; - } - z = pythag(f, h) - w[j, 0] = z - if (z != 0.0) { - z = 1.0 / z - c = f * z - s = h * z - } - f = c * g + s * y - x = c * y - s * g - for (jj in 0 until m) { - y = this[jj, j] - z = this[jj, i] - this[jj, j] = y * c + z * s - this[jj, i] = z * c - y * s - } - } - rv1[l] = 0.0 - rv1[k] = f - w[k, 0] = x - } - } -} \ No newline at end of file From 874da80446dc12b8078cbda6b4c900d540aae027 Mon Sep 17 00:00:00 2001 From: margarita0303 Date: Tue, 12 Jul 2022 20:37:00 +0300 Subject: [PATCH 14/59] tensors dimensions changed --- .../kscience/kmath/tensors/core/DoubleTensorAlgebra.kt | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/DoubleTensorAlgebra.kt b/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/DoubleTensorAlgebra.kt index 174b1dde2..b6b039205 100644 --- a/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/DoubleTensorAlgebra.kt +++ b/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/DoubleTensorAlgebra.kt @@ -886,9 +886,9 @@ public open class DoubleTensorAlgebra : val size = tensor.dimension val commonShape = tensor.shape.sliceArray(0 until size - 2) val (n, m) = tensor.shape.sliceArray(size - 2 until size) - val uTensor = zeros(commonShape + intArrayOf(min(n, m), n)) - val sTensor = zeros(commonShape + intArrayOf(min(n, m))) - val vTensor = zeros(commonShape + intArrayOf(min(n, m), m)) + val uTensor = zeros(commonShape + intArrayOf(m, n)) + val sTensor = zeros(commonShape + intArrayOf(m)) + val vTensor = zeros(commonShape + intArrayOf(m, m)) val matrices = tensor.matrices val uTensors = uTensor.matrices From 3da47573f4732f2fa1f9e48b4d11e9ea283e4fd4 Mon Sep 17 00:00:00 2001 From: Margarita <44027333+margarita0303@users.noreply.github.com> Date: Tue, 12 Jul 2022 20:43:35 +0300 Subject: [PATCH 15/59] Delete margarita.kt deleted tmp file for testing during work --- .../space/kscience/kmath/tensors/margarita.kt | 65 ------------------- 1 file changed, 65 deletions(-) delete mode 100644 examples/src/main/kotlin/space/kscience/kmath/tensors/margarita.kt diff --git a/examples/src/main/kotlin/space/kscience/kmath/tensors/margarita.kt b/examples/src/main/kotlin/space/kscience/kmath/tensors/margarita.kt deleted file mode 100644 index c376d6d0f..000000000 --- a/examples/src/main/kotlin/space/kscience/kmath/tensors/margarita.kt +++ /dev/null @@ -1,65 +0,0 @@ -/* - * Copyright 2018-2021 KMath contributors. - * Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file. - */ - -package space.kscience.kmath.tensors - -import space.kscience.kmath.linear.transpose -import space.kscience.kmath.misc.PerformancePitfall -import space.kscience.kmath.nd.MutableStructure2D -import space.kscience.kmath.nd.Structure2D -import space.kscience.kmath.nd.as2D -import space.kscience.kmath.tensors.core.* -import space.kscience.kmath.tensors.core.tensorAlgebra -import kotlin.math.* - -fun MutableStructure2D.print() { - val n = this.shape.component1() - val m = this.shape.component2() - for (i in 0 until n) { - for (j in 0 until m) { - val x = (this[i, j] * 100).roundToInt() / 100.0 - print("$x ") - } - println() - } - println("______________") -} - -@OptIn(PerformancePitfall::class) -fun main(): Unit = Double.tensorAlgebra.withBroadcast { - val shape = intArrayOf(5, 3) - val buffer = doubleArrayOf( - 1.000000, 2.000000, 3.000000, - 2.000000, 3.000000, 4.000000, - 3.000000, 4.000000, 5.000000, - 4.000000, 5.000000, 6.000000, - 5.000000, 6.000000, 7.000000 - ) - val buffer2 = doubleArrayOf( - 0.000000, 0.000000, 0.000000, - 0.000000, 0.000000, 0.000000, - 0.000000, 0.000000, 0.000000 - ) - val tensor = fromArray(shape, buffer).as2D() - val v = fromArray(intArrayOf(3, 3), buffer2).as2D() - val w_shape = intArrayOf(3, 1) - var w_buffer = doubleArrayOf(0.000000) - for (i in 0 until 3 - 1) { - w_buffer += doubleArrayOf(0.000000) - } - val w = BroadcastDoubleTensorAlgebra.fromArray(w_shape, w_buffer).as2D() - tensor.print() - var ans = Pair(w, v) - tensor.svdGolabKahan(v, w) - - println("u") - tensor.print() - println("w") - w.print() - println("v") - v.print() - - -} From 07ed9980fcd683048b15db88088f859d39ad6f4c Mon Sep 17 00:00:00 2001 From: margarita0303 Date: Wed, 13 Jul 2022 18:34:26 +0300 Subject: [PATCH 16/59] svd benchmarks added --- benchmarks/build.gradle.kts | 5 +++ .../kscience/kmath/benchmarks/SVDBenchmark.kt | 34 +++++++++++++++++++ 2 files changed, 39 insertions(+) create mode 100644 benchmarks/src/jvmMain/kotlin/space/kscience/kmath/benchmarks/SVDBenchmark.kt diff --git a/benchmarks/build.gradle.kts b/benchmarks/build.gradle.kts index f8d39b9c5..143681aec 100644 --- a/benchmarks/build.gradle.kts +++ b/benchmarks/build.gradle.kts @@ -84,6 +84,11 @@ benchmark { iterationTimeUnit = "ms" } + configurations.register("svd") { + commonConfiguration() + include("svdBenchmark") + } + configurations.register("buffer") { commonConfiguration() include("BufferBenchmark") diff --git a/benchmarks/src/jvmMain/kotlin/space/kscience/kmath/benchmarks/SVDBenchmark.kt b/benchmarks/src/jvmMain/kotlin/space/kscience/kmath/benchmarks/SVDBenchmark.kt new file mode 100644 index 000000000..e6fd716dd --- /dev/null +++ b/benchmarks/src/jvmMain/kotlin/space/kscience/kmath/benchmarks/SVDBenchmark.kt @@ -0,0 +1,34 @@ +/* + * Copyright 2018-2021 KMath contributors. + * Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file. + */ + +package space.kscience.kmath.benchmarks +import kotlinx.benchmark.Benchmark +import kotlinx.benchmark.Blackhole +import kotlinx.benchmark.Scope +import kotlinx.benchmark.State +import space.kscience.kmath.tensors.core.BroadcastDoubleTensorAlgebra.svdGolabKahan +import space.kscience.kmath.tensors.core.DoubleTensorAlgebra +import space.kscience.kmath.tensors.core.DoubleTensorAlgebra.Companion.svd + +@State(Scope.Benchmark) +class SVDBenchmark { + companion object { + val tensor = DoubleTensorAlgebra.randomNormal(intArrayOf(10, 10, 10), 0) + } + + @Benchmark + fun svdPowerMethod(blackhole: Blackhole) { + blackhole.consume( + tensor.svd() + ) + } + + @Benchmark + fun svdGolabKahan(blackhole: Blackhole) { + blackhole.consume( + tensor.svdGolabKahan() + ) + } +} \ No newline at end of file From f7cb4837825e6f1e90504f32dd967af152d986c7 Mon Sep 17 00:00:00 2001 From: margarita0303 Date: Wed, 13 Jul 2022 18:52:58 +0300 Subject: [PATCH 17/59] changed name in build file --- benchmarks/build.gradle.kts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/benchmarks/build.gradle.kts b/benchmarks/build.gradle.kts index 143681aec..10352a4c5 100644 --- a/benchmarks/build.gradle.kts +++ b/benchmarks/build.gradle.kts @@ -86,7 +86,7 @@ benchmark { configurations.register("svd") { commonConfiguration() - include("svdBenchmark") + include("SVDBenchmark") } configurations.register("buffer") { From 83620c77d2ef329fdda6a4ecaf53fc0a6c0686c7 Mon Sep 17 00:00:00 2001 From: margarita0303 Date: Wed, 13 Jul 2022 19:51:17 +0300 Subject: [PATCH 18/59] functions SIGN and pythag are made private --- .../space/kscience/kmath/tensors/core/internal/linUtils.kt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/internal/linUtils.kt b/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/internal/linUtils.kt index 2c9a0cf8d..059011c6d 100644 --- a/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/internal/linUtils.kt +++ b/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/internal/linUtils.kt @@ -351,7 +351,7 @@ internal fun DoubleTensorAlgebra.svdHelper( } } -internal fun pythag(a: Double, b: Double): Double { +private fun pythag(a: Double, b: Double): Double { val at: Double = abs(a) val bt: Double = abs(b) val ct: Double @@ -366,7 +366,7 @@ internal fun pythag(a: Double, b: Double): Double { return result } -internal fun SIGN(a: Double, b: Double): Double { +private fun SIGN(a: Double, b: Double): Double { if (b >= 0.0) return abs(a) else From a420f7406cafa8b88b6fda1d59e032a21d1a3433 Mon Sep 17 00:00:00 2001 From: margarita0303 Date: Wed, 13 Jul 2022 19:54:10 +0300 Subject: [PATCH 19/59] added tests for svd Golab Kahan --- .../core/TestDoubleLinearOpsAlgebra.kt | 36 +++++++++++++++++++ 1 file changed, 36 insertions(+) diff --git a/kmath-tensors/src/commonTest/kotlin/space/kscience/kmath/tensors/core/TestDoubleLinearOpsAlgebra.kt b/kmath-tensors/src/commonTest/kotlin/space/kscience/kmath/tensors/core/TestDoubleLinearOpsAlgebra.kt index e025d4b71..3b305b25a 100644 --- a/kmath-tensors/src/commonTest/kotlin/space/kscience/kmath/tensors/core/TestDoubleLinearOpsAlgebra.kt +++ b/kmath-tensors/src/commonTest/kotlin/space/kscience/kmath/tensors/core/TestDoubleLinearOpsAlgebra.kt @@ -175,6 +175,28 @@ internal class TestDoubleLinearOpsTensorAlgebra { assertTrue(tensor.eq(tensorSVD)) } + @Test + fun testSVDGolabKahan() = DoubleTensorAlgebra{ + testSVDGolabKahanFor(fromArray(intArrayOf(2, 3), doubleArrayOf(1.0, 2.0, 3.0, 4.0, 5.0, 6.0))) + testSVDGolabKahanFor(fromArray(intArrayOf(2, 2), doubleArrayOf(-1.0, 0.0, 239.0, 238.0))) + val buffer = doubleArrayOf( + 1.000000, 2.000000, 3.000000, + 2.000000, 3.000000, 4.000000, + 3.000000, 4.000000, 5.000000, + 4.000000, 5.000000, 6.000000, + 5.000000, 6.000000, 7.000000 + ) + testSVDGolabKahanFor(fromArray(intArrayOf(5, 3), buffer)) + } + + @Test + fun testBatchedSVDGolabKahan() = DoubleTensorAlgebra{ + val tensor = randomNormal(intArrayOf(2, 5, 3), 0) + val (tensorU, tensorS, tensorV) = tensor.svdGolabKahan() + val tensorSVD = tensorU dot (diagonalEmbedding(tensorS) dot tensorV.transpose()) + assertTrue(tensor.eq(tensorSVD)) + } + @Test fun testBatchedSymEig() = DoubleTensorAlgebra { val tensor = randomNormal(shape = intArrayOf(2, 3, 3), 0) @@ -199,3 +221,17 @@ private fun DoubleTensorAlgebra.testSVDFor(tensor: DoubleTensor, epsilon: Double assertTrue(tensor.eq(tensorSVD, epsilon)) } + +private fun DoubleTensorAlgebra.testSVDGolabKahanFor(tensor: DoubleTensor) { + val svd = tensor.svdGolabKahan() + + val tensorSVD = svd.first + .dot( + diagonalEmbedding(svd.second) + .dot(svd.third.transpose()) + ) + + assertTrue(tensor.eq(tensorSVD)) +} + + From ea8401acab3939c54b3f74125e5fabafded33d9f Mon Sep 17 00:00:00 2001 From: margarita0303 Date: Thu, 14 Jul 2022 14:34:44 +0300 Subject: [PATCH 20/59] added new svd tests --- .../core/TestDoubleLinearOpsAlgebra.kt | 34 +++++++++++++++++++ 1 file changed, 34 insertions(+) diff --git a/kmath-tensors/src/commonTest/kotlin/space/kscience/kmath/tensors/core/TestDoubleLinearOpsAlgebra.kt b/kmath-tensors/src/commonTest/kotlin/space/kscience/kmath/tensors/core/TestDoubleLinearOpsAlgebra.kt index 3b305b25a..a5e4954c7 100644 --- a/kmath-tensors/src/commonTest/kotlin/space/kscience/kmath/tensors/core/TestDoubleLinearOpsAlgebra.kt +++ b/kmath-tensors/src/commonTest/kotlin/space/kscience/kmath/tensors/core/TestDoubleLinearOpsAlgebra.kt @@ -167,6 +167,18 @@ internal class TestDoubleLinearOpsTensorAlgebra { testSVDFor(fromArray(intArrayOf(2, 2), doubleArrayOf(-1.0, 0.0, 239.0, 238.0))) } +// @Test +// fun testSVDError() = DoubleTensorAlgebra{ +// val buffer = doubleArrayOf( +// 1.000000, 2.000000, 3.000000, +// 2.000000, 3.000000, 4.000000, +// 3.000000, 4.000000, 5.000000, +// 4.000000, 5.000000, 6.000000, +// 5.000000, 6.000000, 7.000000 +// ) +// testSVDFor(fromArray(intArrayOf(5, 3), buffer)) +// } + @Test fun testBatchedSVD() = DoubleTensorAlgebra { val tensor = randomNormal(intArrayOf(2, 5, 3), 0) @@ -189,6 +201,28 @@ internal class TestDoubleLinearOpsTensorAlgebra { testSVDGolabKahanFor(fromArray(intArrayOf(5, 3), buffer)) } +// @Test +// fun testSVDGolabKahanError() = DoubleTensorAlgebra{ +// val buffer = doubleArrayOf( +// 1.0, 2.0, 3.0, 2.0, 3.0, +// 4.0, 3.0, 4.0, 5.0, 4.0, +// 5.0, 6.0, 5.0, 6.0, 7.0 +// ) +// testSVDGolabKahanFor(fromArray(intArrayOf(3, 5), buffer)) +// } + + @Test + fun testSVDGolabKahanBig() = DoubleTensorAlgebra{ + val tensor = DoubleTensorAlgebra.randomNormal(intArrayOf(100, 100, 100), 0) + testSVDGolabKahanFor(tensor) + } + + @Test + fun testSVDBig() = DoubleTensorAlgebra{ + val tensor = DoubleTensorAlgebra.randomNormal(intArrayOf(100, 100, 100), 0) + testSVDFor(tensor) + } + @Test fun testBatchedSVDGolabKahan() = DoubleTensorAlgebra{ val tensor = randomNormal(intArrayOf(2, 5, 3), 0) From 20695e9617ff0efd675a8193c870579127da7831 Mon Sep 17 00:00:00 2001 From: margarita0303 Date: Thu, 14 Jul 2022 14:38:00 +0300 Subject: [PATCH 21/59] commented out new tests, they are too long --- .../core/TestDoubleLinearOpsAlgebra.kt | 22 +++++++++---------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/kmath-tensors/src/commonTest/kotlin/space/kscience/kmath/tensors/core/TestDoubleLinearOpsAlgebra.kt b/kmath-tensors/src/commonTest/kotlin/space/kscience/kmath/tensors/core/TestDoubleLinearOpsAlgebra.kt index a5e4954c7..c0d241a31 100644 --- a/kmath-tensors/src/commonTest/kotlin/space/kscience/kmath/tensors/core/TestDoubleLinearOpsAlgebra.kt +++ b/kmath-tensors/src/commonTest/kotlin/space/kscience/kmath/tensors/core/TestDoubleLinearOpsAlgebra.kt @@ -211,17 +211,17 @@ internal class TestDoubleLinearOpsTensorAlgebra { // testSVDGolabKahanFor(fromArray(intArrayOf(3, 5), buffer)) // } - @Test - fun testSVDGolabKahanBig() = DoubleTensorAlgebra{ - val tensor = DoubleTensorAlgebra.randomNormal(intArrayOf(100, 100, 100), 0) - testSVDGolabKahanFor(tensor) - } - - @Test - fun testSVDBig() = DoubleTensorAlgebra{ - val tensor = DoubleTensorAlgebra.randomNormal(intArrayOf(100, 100, 100), 0) - testSVDFor(tensor) - } +// @Test +// fun testSVDGolabKahanBig() = DoubleTensorAlgebra{ +// val tensor = DoubleTensorAlgebra.randomNormal(intArrayOf(100, 100, 100), 0) +// testSVDGolabKahanFor(tensor) +// } +// +// @Test +// fun testSVDBig() = DoubleTensorAlgebra{ +// val tensor = DoubleTensorAlgebra.randomNormal(intArrayOf(100, 100, 100), 0) +// testSVDFor(tensor) +// } @Test fun testBatchedSVDGolabKahan() = DoubleTensorAlgebra{ From 9a46dd896686e91dde37a7ad4f0d24c1d185460f Mon Sep 17 00:00:00 2001 From: margarita0303 Date: Tue, 26 Jul 2022 18:03:01 +0300 Subject: [PATCH 22/59] functions renamed, svd (Golab Kahan) now works by default --- .../kmath/tensors/core/DoubleTensorAlgebra.kt | 63 +++++++++---------- .../kmath/tensors/core/internal/linUtils.kt | 4 +- 2 files changed, 32 insertions(+), 35 deletions(-) diff --git a/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/DoubleTensorAlgebra.kt b/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/DoubleTensorAlgebra.kt index b6b039205..9ae7853a5 100644 --- a/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/DoubleTensorAlgebra.kt +++ b/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/DoubleTensorAlgebra.kt @@ -833,8 +833,33 @@ public open class DoubleTensorAlgebra : return qTensor to rTensor } - override fun StructureND.svd(): Triple = - svd(epsilon = 1e-10) + override fun StructureND.svd(): Triple { + val size = tensor.dimension + val commonShape = tensor.shape.sliceArray(0 until size - 2) + val (n, m) = tensor.shape.sliceArray(size - 2 until size) + val uTensor = zeros(commonShape + intArrayOf(m, n)) + val sTensor = zeros(commonShape + intArrayOf(m)) + val vTensor = zeros(commonShape + intArrayOf(m, m)) + + val matrices = tensor.matrices + val uTensors = uTensor.matrices + val sTensorVectors = sTensor.vectors + val vTensors = vTensor.matrices + + for (index in matrices.indices) { + val matrix = matrices[index] + val matrixSize = matrix.shape.reduce { acc, i -> acc * i } + val curMatrix = DoubleTensor( + matrix.shape, + matrix.mutableBuffer.array() + .slice(matrix.bufferStart until matrix.bufferStart + matrixSize) + .toDoubleArray() + ) + curMatrix.as2D().svdHelper(uTensors[index].as2D(), sTensorVectors[index], vTensors[index].as2D()) + } + + return Triple(uTensor.transpose(), sTensor, vTensor) + } /** * Singular Value Decomposition. @@ -849,7 +874,7 @@ public open class DoubleTensorAlgebra : * i.e., the precision with which the cosine approaches 1 in an iterative algorithm. * @return a triple `Triple(U, S, V)`. */ - public fun StructureND.svd(epsilon: Double): Triple { + public fun StructureND.svdPowerMethod(epsilon: Double = 1e-10): Triple { val size = tensor.dimension val commonShape = tensor.shape.sliceArray(0 until size - 2) val (n, m) = tensor.shape.sliceArray(size - 2 until size) @@ -876,40 +901,12 @@ public open class DoubleTensorAlgebra : .slice(matrix.bufferStart until matrix.bufferStart + matrixSize) .toDoubleArray() ) - svdHelper(curMatrix, usv, m, n, epsilon) + svdPowerMethodHelper(curMatrix, usv, m, n, epsilon) } return Triple(uTensor.transpose(), sTensor, vTensor.transpose()) } - public fun StructureND.svdGolabKahan(): Triple { - val size = tensor.dimension - val commonShape = tensor.shape.sliceArray(0 until size - 2) - val (n, m) = tensor.shape.sliceArray(size - 2 until size) - val uTensor = zeros(commonShape + intArrayOf(m, n)) - val sTensor = zeros(commonShape + intArrayOf(m)) - val vTensor = zeros(commonShape + intArrayOf(m, m)) - - val matrices = tensor.matrices - val uTensors = uTensor.matrices - val sTensorVectors = sTensor.vectors - val vTensors = vTensor.matrices - - for (index in matrices.indices) { - val matrix = matrices[index] - val matrixSize = matrix.shape.reduce { acc, i -> acc * i } - val curMatrix = DoubleTensor( - matrix.shape, - matrix.mutableBuffer.array() - .slice(matrix.bufferStart until matrix.bufferStart + matrixSize) - .toDoubleArray() - ) - curMatrix.as2D().svdGolabKahanHelper(uTensors[index].as2D(), sTensorVectors[index], vTensors[index].as2D()) - } - - return Triple(uTensor.transpose(), sTensor, vTensor) - } - override fun StructureND.symEig(): Pair = symEigJacobi(maxIteration = 50, epsilon = 1e-15) /** @@ -935,7 +932,7 @@ public open class DoubleTensorAlgebra : } } - val (u, s, v) = tensor.svd(epsilon) + val (u, s, v) = tensor.svd() val shp = s.shape + intArrayOf(1) val utv = u.transpose() dot v val n = s.shape.last() diff --git a/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/internal/linUtils.kt b/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/internal/linUtils.kt index 059011c6d..26852dc0b 100644 --- a/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/internal/linUtils.kt +++ b/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/internal/linUtils.kt @@ -300,7 +300,7 @@ internal fun DoubleTensorAlgebra.svd1d(a: DoubleTensor, epsilon: Double = 1e-10) } } -internal fun DoubleTensorAlgebra.svdHelper( +internal fun DoubleTensorAlgebra.svdPowerMethodHelper( matrix: DoubleTensor, USV: Triple, BufferedTensor, BufferedTensor>, m: Int, n: Int, epsilon: Double, @@ -372,7 +372,7 @@ private fun SIGN(a: Double, b: Double): Double { else return -abs(a) } -internal fun MutableStructure2D.svdGolabKahanHelper(u: MutableStructure2D, w: BufferedTensor, v: MutableStructure2D) { +internal fun MutableStructure2D.svdHelper(u: MutableStructure2D, w: BufferedTensor, v: MutableStructure2D) { val shape = this.shape val m = shape.component1() val n = shape.component2() From f0e445587dfb9d3c6f8fbd01dff7be22cbd795a8 Mon Sep 17 00:00:00 2001 From: margarita0303 Date: Tue, 26 Jul 2022 21:56:50 +0300 Subject: [PATCH 23/59] fixed error --- .../kscience/kmath/tensors/core/internal/linUtils.kt | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/internal/linUtils.kt b/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/internal/linUtils.kt index 26852dc0b..8c1110f33 100644 --- a/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/internal/linUtils.kt +++ b/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/internal/linUtils.kt @@ -549,12 +549,13 @@ internal fun MutableStructure2D.svdHelper(u: MutableStructure2D, if (flag != 0) { c = 0.0 s = 1.0 - for (i in l until k) { + for (i in l until k + 1) { f = s * rv1[i] rv1[i] = c * rv1[i] if (abs(f) + anorm == anorm) { break } + g = w.mutableBuffer.array()[w.bufferStart + i] h = pythag(f, g) w.mutableBuffer.array()[w.bufferStart + i] = h h = 1.0 / h @@ -579,11 +580,6 @@ internal fun MutableStructure2D.svdHelper(u: MutableStructure2D, break } -// надо придумать, что сделать - выкинуть ошибку? -// if (its == 30) { -// return -// } - x = w.mutableBuffer.array()[w.bufferStart + l] nm = k - 1 y = w.mutableBuffer.array()[w.bufferStart + nm] From f7ac73b748ee72ea1dbef28a3fde4c1afd64a60e Mon Sep 17 00:00:00 2001 From: margarita0303 Date: Tue, 26 Jul 2022 22:13:48 +0300 Subject: [PATCH 24/59] added some tests for svd --- .../core/TestDoubleLinearOpsAlgebra.kt | 147 ++++++++---------- 1 file changed, 63 insertions(+), 84 deletions(-) diff --git a/kmath-tensors/src/commonTest/kotlin/space/kscience/kmath/tensors/core/TestDoubleLinearOpsAlgebra.kt b/kmath-tensors/src/commonTest/kotlin/space/kscience/kmath/tensors/core/TestDoubleLinearOpsAlgebra.kt index c0d241a31..d3e67fbdc 100644 --- a/kmath-tensors/src/commonTest/kotlin/space/kscience/kmath/tensors/core/TestDoubleLinearOpsAlgebra.kt +++ b/kmath-tensors/src/commonTest/kotlin/space/kscience/kmath/tensors/core/TestDoubleLinearOpsAlgebra.kt @@ -161,76 +161,6 @@ internal class TestDoubleLinearOpsTensorAlgebra { assertTrue { abs(abs(res.mutableBuffer.array()[res.bufferStart + 1]) - 0.922) < 0.01 } } - @Test - fun testSVD() = DoubleTensorAlgebra{ - testSVDFor(fromArray(intArrayOf(2, 3), doubleArrayOf(1.0, 2.0, 3.0, 4.0, 5.0, 6.0))) - testSVDFor(fromArray(intArrayOf(2, 2), doubleArrayOf(-1.0, 0.0, 239.0, 238.0))) - } - -// @Test -// fun testSVDError() = DoubleTensorAlgebra{ -// val buffer = doubleArrayOf( -// 1.000000, 2.000000, 3.000000, -// 2.000000, 3.000000, 4.000000, -// 3.000000, 4.000000, 5.000000, -// 4.000000, 5.000000, 6.000000, -// 5.000000, 6.000000, 7.000000 -// ) -// testSVDFor(fromArray(intArrayOf(5, 3), buffer)) -// } - - @Test - fun testBatchedSVD() = DoubleTensorAlgebra { - val tensor = randomNormal(intArrayOf(2, 5, 3), 0) - val (tensorU, tensorS, tensorV) = tensor.svd() - val tensorSVD = tensorU dot (diagonalEmbedding(tensorS) dot tensorV.transpose()) - assertTrue(tensor.eq(tensorSVD)) - } - - @Test - fun testSVDGolabKahan() = DoubleTensorAlgebra{ - testSVDGolabKahanFor(fromArray(intArrayOf(2, 3), doubleArrayOf(1.0, 2.0, 3.0, 4.0, 5.0, 6.0))) - testSVDGolabKahanFor(fromArray(intArrayOf(2, 2), doubleArrayOf(-1.0, 0.0, 239.0, 238.0))) - val buffer = doubleArrayOf( - 1.000000, 2.000000, 3.000000, - 2.000000, 3.000000, 4.000000, - 3.000000, 4.000000, 5.000000, - 4.000000, 5.000000, 6.000000, - 5.000000, 6.000000, 7.000000 - ) - testSVDGolabKahanFor(fromArray(intArrayOf(5, 3), buffer)) - } - -// @Test -// fun testSVDGolabKahanError() = DoubleTensorAlgebra{ -// val buffer = doubleArrayOf( -// 1.0, 2.0, 3.0, 2.0, 3.0, -// 4.0, 3.0, 4.0, 5.0, 4.0, -// 5.0, 6.0, 5.0, 6.0, 7.0 -// ) -// testSVDGolabKahanFor(fromArray(intArrayOf(3, 5), buffer)) -// } - -// @Test -// fun testSVDGolabKahanBig() = DoubleTensorAlgebra{ -// val tensor = DoubleTensorAlgebra.randomNormal(intArrayOf(100, 100, 100), 0) -// testSVDGolabKahanFor(tensor) -// } -// -// @Test -// fun testSVDBig() = DoubleTensorAlgebra{ -// val tensor = DoubleTensorAlgebra.randomNormal(intArrayOf(100, 100, 100), 0) -// testSVDFor(tensor) -// } - - @Test - fun testBatchedSVDGolabKahan() = DoubleTensorAlgebra{ - val tensor = randomNormal(intArrayOf(2, 5, 3), 0) - val (tensorU, tensorS, tensorV) = tensor.svdGolabKahan() - val tensorSVD = tensorU dot (diagonalEmbedding(tensorS) dot tensorV.transpose()) - assertTrue(tensor.eq(tensorSVD)) - } - @Test fun testBatchedSymEig() = DoubleTensorAlgebra { val tensor = randomNormal(shape = intArrayOf(2, 3, 3), 0) @@ -240,24 +170,63 @@ internal class TestDoubleLinearOpsTensorAlgebra { assertTrue(tensorSigma.eq(tensorSigmaCalc)) } - -} - - -private fun DoubleTensorAlgebra.testSVDFor(tensor: DoubleTensor, epsilon: Double = 1e-10) { - val svd = tensor.svd() - - val tensorSVD = svd.first - .dot( - diagonalEmbedding(svd.second) - .dot(svd.third.transpose()) + @Test + fun testSVD() = DoubleTensorAlgebra{ + testSVDFor(fromArray(intArrayOf(2, 3), doubleArrayOf(1.0, 2.0, 3.0, 4.0, 5.0, 6.0))) + testSVDFor(fromArray(intArrayOf(2, 2), doubleArrayOf(-1.0, 0.0, 239.0, 238.0))) + val buffer1 = doubleArrayOf( + 1.000000, 2.000000, 3.000000, + 2.000000, 3.000000, 4.000000, + 3.000000, 4.000000, 5.000000, + 4.000000, 5.000000, 6.000000, + 5.000000, 6.000000, 7.000000 ) + testSVDFor(fromArray(intArrayOf(5, 3), buffer1)) + val buffer2 = doubleArrayOf( + 1.0, 2.0, 3.0, 2.0, 3.0, + 4.0, 3.0, 4.0, 5.0, 4.0, + 5.0, 6.0, 5.0, 6.0, 7.0 + ) + testSVDFor(fromArray(intArrayOf(3, 5), buffer2)) + } - assertTrue(tensor.eq(tensorSVD, epsilon)) + @Test + fun testBatchedSVD() = DoubleTensorAlgebra{ + val tensor1 = randomNormal(intArrayOf(2, 5, 3), 0) + testSVDFor(tensor1) + val tensor2 = DoubleTensorAlgebra.randomNormal(intArrayOf(30, 30, 30), 0) + testSVDFor(tensor2) + } + + @Test + fun testSVDPowerMethod() = DoubleTensorAlgebra{ + testSVDPowerMethodFor(fromArray(intArrayOf(2, 3), doubleArrayOf(1.0, 2.0, 3.0, 4.0, 5.0, 6.0))) + testSVDPowerMethodFor(fromArray(intArrayOf(2, 2), doubleArrayOf(-1.0, 0.0, 239.0, 238.0))) + } + + @Test + fun testBatchedSVDPowerMethod() = DoubleTensorAlgebra { + val tensor1 = randomNormal(intArrayOf(2, 5, 3), 0) + testSVDPowerMethodFor(tensor1) + val tensor2 = DoubleTensorAlgebra.randomNormal(intArrayOf(30, 30, 30), 0) + testSVDPowerMethodFor(tensor2) + } + +// @Test +// fun testSVDPowerMethodError() = DoubleTensorAlgebra{ +// val buffer = doubleArrayOf( +// 1.000000, 2.000000, 3.000000, +// 2.000000, 3.000000, 4.000000, +// 3.000000, 4.000000, 5.000000, +// 4.000000, 5.000000, 6.000000, +// 5.000000, 6.000000, 7.000000 +// ) +// testSVDPowerMethodFor(fromArray(intArrayOf(5, 3), buffer)) +// } } -private fun DoubleTensorAlgebra.testSVDGolabKahanFor(tensor: DoubleTensor) { - val svd = tensor.svdGolabKahan() +private fun DoubleTensorAlgebra.testSVDFor(tensor: DoubleTensor) { + val svd = tensor.svd() val tensorSVD = svd.first .dot( @@ -268,4 +237,14 @@ private fun DoubleTensorAlgebra.testSVDGolabKahanFor(tensor: DoubleTensor) { assertTrue(tensor.eq(tensorSVD)) } +private fun DoubleTensorAlgebra.testSVDPowerMethodFor(tensor: DoubleTensor, epsilon: Double = 1e-10) { + val svd = tensor.svdPowerMethod() + val tensorSVD = svd.first + .dot( + diagonalEmbedding(svd.second) + .dot(svd.third.transpose()) + ) + + assertTrue(tensor.eq(tensorSVD, epsilon)) +} \ No newline at end of file From 5d74fd8bf406060a8620c2a04250a909459dd69f Mon Sep 17 00:00:00 2001 From: margarita0303 Date: Tue, 26 Jul 2022 22:42:36 +0300 Subject: [PATCH 25/59] benchmark changed due to new names of svd functions --- .../space/kscience/kmath/benchmarks/SVDBenchmark.kt | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/benchmarks/src/jvmMain/kotlin/space/kscience/kmath/benchmarks/SVDBenchmark.kt b/benchmarks/src/jvmMain/kotlin/space/kscience/kmath/benchmarks/SVDBenchmark.kt index e6fd716dd..3d70ebbc3 100644 --- a/benchmarks/src/jvmMain/kotlin/space/kscience/kmath/benchmarks/SVDBenchmark.kt +++ b/benchmarks/src/jvmMain/kotlin/space/kscience/kmath/benchmarks/SVDBenchmark.kt @@ -8,9 +8,9 @@ import kotlinx.benchmark.Benchmark import kotlinx.benchmark.Blackhole import kotlinx.benchmark.Scope import kotlinx.benchmark.State -import space.kscience.kmath.tensors.core.BroadcastDoubleTensorAlgebra.svdGolabKahan +import space.kscience.kmath.tensors.core.BroadcastDoubleTensorAlgebra.svd import space.kscience.kmath.tensors.core.DoubleTensorAlgebra -import space.kscience.kmath.tensors.core.DoubleTensorAlgebra.Companion.svd +import space.kscience.kmath.tensors.core.DoubleTensorAlgebra.Companion.svdPowerMethod @State(Scope.Benchmark) class SVDBenchmark { @@ -21,14 +21,14 @@ class SVDBenchmark { @Benchmark fun svdPowerMethod(blackhole: Blackhole) { blackhole.consume( - tensor.svd() + tensor.svdPowerMethod() ) } @Benchmark fun svdGolabKahan(blackhole: Blackhole) { blackhole.consume( - tensor.svdGolabKahan() + tensor.svd() ) } } \ No newline at end of file From a241d5dc9279223f552810e2e972df59275e18fc Mon Sep 17 00:00:00 2001 From: Margarita Date: Tue, 22 Mar 2022 21:12:39 +0300 Subject: [PATCH 26/59] added more examples for tensors, added tests for acos, asin, atanh --- .../core/TestDoubleAnalyticTensorAlgebra.kt | 219 ++++++++++++++---- 1 file changed, 179 insertions(+), 40 deletions(-) diff --git a/kmath-tensors/src/commonTest/kotlin/space/kscience/kmath/tensors/core/TestDoubleAnalyticTensorAlgebra.kt b/kmath-tensors/src/commonTest/kotlin/space/kscience/kmath/tensors/core/TestDoubleAnalyticTensorAlgebra.kt index 1e21379b4..b9f4ebd96 100644 --- a/kmath-tensors/src/commonTest/kotlin/space/kscience/kmath/tensors/core/TestDoubleAnalyticTensorAlgebra.kt +++ b/kmath-tensors/src/commonTest/kotlin/space/kscience/kmath/tensors/core/TestDoubleAnalyticTensorAlgebra.kt @@ -12,110 +12,250 @@ import kotlin.test.assertTrue internal class TestDoubleAnalyticTensorAlgebra { - val shape = intArrayOf(2, 1, 3, 2) - val buffer = doubleArrayOf( - 27.1, 20.0, 19.84, - 23.123, 3.0, 2.0, + val shapeWithNegative = intArrayOf(4) + val bufferWithNegative = doubleArrayOf(9.3348, -7.5889, -1.2005, 1.1584) + val tensorWithNegative = DoubleTensor(shapeWithNegative, bufferWithNegative) - 3.23, 133.7, 25.3, - 100.3, 11.0, 12.012 - ) - val tensor = DoubleTensor(shape, buffer) + val shape1 = intArrayOf(4) + val buffer1 = doubleArrayOf(1.3348, 1.5889, 1.2005, 1.1584) + val tensor1 = DoubleTensor(shape1, buffer1) + + val shape2 = intArrayOf(2, 2) + val buffer2 = doubleArrayOf(1.0, 9.456, 3.0, 4.0) + val tensor2 = DoubleTensor(shape2, buffer2) + + val shape3 = intArrayOf(2, 3, 2) + val buffer3 = doubleArrayOf(1.0, 9.456, 7.0, 2.123, 1.0, 9.456, 30.8888, 6.0, 1.0, 9.456, 3.0, 4.99) + val tensor3 = DoubleTensor(shape3, buffer3) + + val shape4 = intArrayOf(2, 1, 3, 2) + val buffer4 = doubleArrayOf(27.1, 20.0, 19.84, 23.123, 3.0, 2.0, 3.23, 133.7, 25.3, 100.3, 11.0, 12.012) + val tensor4 = DoubleTensor(shape4, buffer4) + + val bufferWithNegativeMod1 = bufferWithNegative.map { x -> x % 1 }.toDoubleArray() + val tensorWithNegativeMod1 = DoubleTensor(shapeWithNegative, bufferWithNegativeMod1) + + val buffer1Mod1 = buffer1.map { x -> x % 1 }.toDoubleArray() + val tensor1Mod1 = DoubleTensor(shape1, buffer1Mod1) + + val buffer2Mod1 = buffer2.map { x -> x % 1 }.toDoubleArray() + val tensor2Mod1 = DoubleTensor(shape2, buffer2Mod1) + + val buffer3Mod1 = buffer3.map { x -> x % 1 }.toDoubleArray() + val tensor3Mod1 = DoubleTensor(shape3, buffer3Mod1) + + val buffer4Mod1 = buffer4.map { x -> x % 1 }.toDoubleArray() + val tensor4Mod1 = DoubleTensor(shape4, buffer4Mod1) fun DoubleArray.fmap(transform: (Double) -> Double): DoubleArray { return this.map(transform).toDoubleArray() } - fun expectedTensor(transform: (Double) -> Double): DoubleTensor { - return DoubleTensor(shape, buffer.fmap(transform)) + fun expectedTensorWithNegative(transform: (Double) -> Double): DoubleTensor { + return DoubleTensor(shapeWithNegative, bufferWithNegative.fmap(transform)) + } + + fun expectedTensor1(transform: (Double) -> Double): DoubleTensor { + return DoubleTensor(shape1, buffer1.fmap(transform)) + } + + fun expectedTensor2(transform: (Double) -> Double): DoubleTensor { + return DoubleTensor(shape2, buffer2.fmap(transform)) + } + + fun expectedTensor3(transform: (Double) -> Double): DoubleTensor { + return DoubleTensor(shape3, buffer3.fmap(transform)) + } + + fun expectedTensor4(transform: (Double) -> Double): DoubleTensor { + return DoubleTensor(shape4, buffer4.fmap(transform)) + } + + fun expectedTensorWithNegativeMod1(transform: (Double) -> Double): DoubleTensor { + return DoubleTensor(shapeWithNegative, bufferWithNegativeMod1.fmap(transform)) + } + + fun expectedTensor1Mod1(transform: (Double) -> Double): DoubleTensor { + return DoubleTensor(shape1, buffer1Mod1.fmap(transform)) + } + + fun expectedTensor2Mod1(transform: (Double) -> Double): DoubleTensor { + return DoubleTensor(shape2, buffer2Mod1.fmap(transform)) + } + + fun expectedTensor3Mod1(transform: (Double) -> Double): DoubleTensor { + return DoubleTensor(shape3, buffer3Mod1.fmap(transform)) + } + + fun expectedTensor4Mod1(transform: (Double) -> Double): DoubleTensor { + return DoubleTensor(shape4, buffer4Mod1.fmap(transform)) } @Test fun testExp() = DoubleTensorAlgebra { - assertTrue { tensor.exp() eq expectedTensor(::exp) } + assertTrue { tensorWithNegative.exp() eq expectedTensorWithNegative(::exp) } + assertTrue { tensor1.exp() eq expectedTensor1(::exp) } + assertTrue { tensor2.exp() eq expectedTensor2(::exp) } + assertTrue { tensor3.exp() eq expectedTensor3(::exp) } + assertTrue { tensor4.exp() eq expectedTensor4(::exp) } } @Test fun testLog() = DoubleTensorAlgebra { - assertTrue { tensor.ln() eq expectedTensor(::ln) } + assertTrue { tensor1.ln() eq expectedTensor1(::ln) } + assertTrue { tensor2.ln() eq expectedTensor2(::ln) } + assertTrue { tensor3.ln() eq expectedTensor3(::ln) } + assertTrue { tensor4.ln() eq expectedTensor4(::ln) } } @Test fun testSqrt() = DoubleTensorAlgebra { - assertTrue { tensor.sqrt() eq expectedTensor(::sqrt) } + assertTrue { tensor1.sqrt() eq expectedTensor1(::sqrt) } + assertTrue { tensor2.sqrt() eq expectedTensor2(::sqrt) } + assertTrue { tensor3.sqrt() eq expectedTensor3(::sqrt) } + assertTrue { tensor4.sqrt() eq expectedTensor4(::sqrt) } } @Test fun testCos() = DoubleTensorAlgebra { - assertTrue { tensor.cos() eq expectedTensor(::cos) } + assertTrue { tensorWithNegative.cos() eq expectedTensorWithNegative(::cos) } + assertTrue { tensor1.cos() eq expectedTensor1(::cos) } + assertTrue { tensor2.cos() eq expectedTensor2(::cos) } + assertTrue { tensor3.cos() eq expectedTensor3(::cos) } + assertTrue { tensor4.cos() eq expectedTensor4(::cos) } } + @Test + fun testAcos() = DoubleTensorAlgebra { + assertTrue { tensorWithNegativeMod1.acos() eq expectedTensorWithNegativeMod1(::acos) } + assertTrue { tensor1Mod1.acos() eq expectedTensor1Mod1(::acos) } + assertTrue { tensor2Mod1.acos() eq expectedTensor2Mod1(::acos) } + assertTrue { tensor3Mod1.acos() eq expectedTensor3Mod1(::acos) } + assertTrue { tensor4Mod1.acos() eq expectedTensor4Mod1(::acos) } + } @Test fun testCosh() = DoubleTensorAlgebra { - assertTrue { tensor.cosh() eq expectedTensor(::cosh) } + assertTrue { tensorWithNegative.cosh() eq expectedTensorWithNegative(::cosh) } + assertTrue { tensor1.cosh() eq expectedTensor1(::cosh) } + assertTrue { tensor2.cosh() eq expectedTensor2(::cosh) } + assertTrue { tensor3.cosh() eq expectedTensor3(::cosh) } + assertTrue { tensor4.cosh() eq expectedTensor4(::cosh) } } @Test fun testAcosh() = DoubleTensorAlgebra { - assertTrue { tensor.acosh() eq expectedTensor(::acosh) } + assertTrue { tensor1.acosh() eq expectedTensor1(::acosh) } + assertTrue { tensor2.acosh() eq expectedTensor2(::acosh) } + assertTrue { tensor3.acosh() eq expectedTensor3(::acosh) } + assertTrue { tensor4.acosh() eq expectedTensor4(::acosh) } } @Test fun testSin() = DoubleTensorAlgebra { - assertTrue { tensor.sin() eq expectedTensor(::sin) } + assertTrue { tensorWithNegative.sin() eq expectedTensorWithNegative(::sin) } + assertTrue { tensor1.sin() eq expectedTensor1(::sin) } + assertTrue { tensor2.sin() eq expectedTensor2(::sin) } + assertTrue { tensor3.sin() eq expectedTensor3(::sin) } + assertTrue { tensor4.sin() eq expectedTensor4(::sin) } + } + + @Test + fun testAsin() = DoubleTensorAlgebra { + assertTrue { tensorWithNegativeMod1.asin() eq expectedTensorWithNegativeMod1(::asin) } + assertTrue { tensor1Mod1.asin() eq expectedTensor1Mod1(::asin) } + assertTrue { tensor2Mod1.asin() eq expectedTensor2Mod1(::asin) } + assertTrue { tensor3Mod1.asin() eq expectedTensor3Mod1(::asin) } + assertTrue { tensor4Mod1.asin() eq expectedTensor4Mod1(::asin) } } @Test fun testSinh() = DoubleTensorAlgebra { - assertTrue { tensor.sinh() eq expectedTensor(::sinh) } + assertTrue { tensorWithNegative.sinh() eq expectedTensorWithNegative(::sinh) } + assertTrue { tensor1.sinh() eq expectedTensor1(::sinh) } + assertTrue { tensor2.sinh() eq expectedTensor2(::sinh) } + assertTrue { tensor3.sinh() eq expectedTensor3(::sinh) } + assertTrue { tensor4.sinh() eq expectedTensor4(::sinh) } } @Test fun testAsinh() = DoubleTensorAlgebra { - assertTrue { tensor.asinh() eq expectedTensor(::asinh) } + assertTrue { tensorWithNegative.asinh() eq expectedTensorWithNegative(::asinh) } + assertTrue { tensor1.asinh() eq expectedTensor1(::asinh) } + assertTrue { tensor2.asinh() eq expectedTensor2(::asinh) } + assertTrue { tensor3.asinh() eq expectedTensor3(::asinh) } + assertTrue { tensor4.asinh() eq expectedTensor4(::asinh) } } @Test fun testTan() = DoubleTensorAlgebra { - assertTrue { tensor.tan() eq expectedTensor(::tan) } + assertTrue { tensorWithNegative.tan() eq expectedTensorWithNegative(::tan) } + assertTrue { tensor1.tan() eq expectedTensor1(::tan) } + assertTrue { tensor2.tan() eq expectedTensor2(::tan) } + assertTrue { tensor3.tan() eq expectedTensor3(::tan) } + assertTrue { tensor4.tan() eq expectedTensor4(::tan) } } @Test fun testAtan() = DoubleTensorAlgebra { - assertTrue { tensor.atan() eq expectedTensor(::atan) } + assertTrue { tensorWithNegative.atan() eq expectedTensorWithNegative(::atan) } + assertTrue { tensor1.atan() eq expectedTensor1(::atan) } + assertTrue { tensor2.atan() eq expectedTensor2(::atan) } + assertTrue { tensor3.atan() eq expectedTensor3(::atan) } + assertTrue { tensor4.atan() eq expectedTensor4(::atan) } } @Test fun testTanh() = DoubleTensorAlgebra { - assertTrue { tensor.tanh() eq expectedTensor(::tanh) } + assertTrue { tensorWithNegative.tanh() eq expectedTensorWithNegative(::tanh) } + assertTrue { tensor1.tanh() eq expectedTensor1(::tanh) } + assertTrue { tensor2.tanh() eq expectedTensor2(::tanh) } + assertTrue { tensor3.tanh() eq expectedTensor3(::tanh) } + assertTrue { tensor4.tanh() eq expectedTensor4(::tanh) } + } + + @Test + fun testAtanh() = DoubleTensorAlgebra { + assertTrue { tensorWithNegativeMod1.atanh() eq expectedTensorWithNegativeMod1(::atanh) } + assertTrue { tensor1Mod1.atanh() eq expectedTensor1Mod1(::atanh) } + assertTrue { tensor2Mod1.atanh() eq expectedTensor2Mod1(::atanh) } + assertTrue { tensor3Mod1.atanh() eq expectedTensor3Mod1(::atanh) } + assertTrue { tensor4Mod1.atanh() eq expectedTensor4Mod1(::atanh) } } @Test fun testCeil() = DoubleTensorAlgebra { - assertTrue { tensor.ceil() eq expectedTensor(::ceil) } + assertTrue { tensorWithNegative.ceil() eq expectedTensorWithNegative(::ceil) } + assertTrue { tensor1.ceil() eq expectedTensor1(::ceil) } + assertTrue { tensor2.ceil() eq expectedTensor2(::ceil) } + assertTrue { tensor3.ceil() eq expectedTensor3(::ceil) } + assertTrue { tensor4.ceil() eq expectedTensor4(::ceil) } } @Test fun testFloor() = DoubleTensorAlgebra { - assertTrue { tensor.floor() eq expectedTensor(::floor) } + assertTrue { tensorWithNegative.floor() eq expectedTensorWithNegative(::floor) } + assertTrue { tensor1.floor() eq expectedTensor1(::floor) } + assertTrue { tensor2.floor() eq expectedTensor2(::floor) } + assertTrue { tensor3.floor() eq expectedTensor3(::floor) } + assertTrue { tensor4.floor() eq expectedTensor4(::floor) } } - val shape2 = intArrayOf(2, 2) - val buffer2 = doubleArrayOf( + val shape5 = intArrayOf(2, 2) + val buffer5 = doubleArrayOf( 1.0, 2.0, -3.0, 4.0 ) - val tensor2 = DoubleTensor(shape2, buffer2) + val tensor5 = DoubleTensor(shape5, buffer5) @Test fun testMin() = DoubleTensorAlgebra { - assertTrue { tensor2.min() == -3.0 } - assertTrue { tensor2.min(0, true) eq fromArray( + assertTrue { tensor5.min() == -3.0 } + assertTrue { tensor5.min(0, true) eq fromArray( intArrayOf(1, 2), doubleArrayOf(-3.0, 2.0) )} - assertTrue { tensor2.min(1, false) eq fromArray( + assertTrue { tensor5.min(1, false) eq fromArray( intArrayOf(2), doubleArrayOf(1.0, -3.0) )} @@ -123,12 +263,12 @@ internal class TestDoubleAnalyticTensorAlgebra { @Test fun testMax() = DoubleTensorAlgebra { - assertTrue { tensor2.max() == 4.0 } - assertTrue { tensor2.max(0, true) eq fromArray( + assertTrue { tensor5.max() == 4.0 } + assertTrue { tensor5.max(0, true) eq fromArray( intArrayOf(1, 2), doubleArrayOf(1.0, 4.0) )} - assertTrue { tensor2.max(1, false) eq fromArray( + assertTrue { tensor5.max(1, false) eq fromArray( intArrayOf(2), doubleArrayOf(2.0, 4.0) )} @@ -136,12 +276,12 @@ internal class TestDoubleAnalyticTensorAlgebra { @Test fun testSum() = DoubleTensorAlgebra { - assertTrue { tensor2.sum() == 4.0 } - assertTrue { tensor2.sum(0, true) eq fromArray( + assertTrue { tensor5.sum() == 4.0 } + assertTrue { tensor5.sum(0, true) eq fromArray( intArrayOf(1, 2), doubleArrayOf(-2.0, 6.0) )} - assertTrue { tensor2.sum(1, false) eq fromArray( + assertTrue { tensor5.sum(1, false) eq fromArray( intArrayOf(2), doubleArrayOf(3.0, 1.0) )} @@ -149,15 +289,14 @@ internal class TestDoubleAnalyticTensorAlgebra { @Test fun testMean() = DoubleTensorAlgebra { - assertTrue { tensor2.mean() == 1.0 } - assertTrue { tensor2.mean(0, true) eq fromArray( + assertTrue { tensor5.mean() == 1.0 } + assertTrue { tensor5.mean(0, true) eq fromArray( intArrayOf(1, 2), doubleArrayOf(-1.0, 3.0) )} - assertTrue { tensor2.mean(1, false) eq fromArray( + assertTrue { tensor5.mean(1, false) eq fromArray( intArrayOf(2), doubleArrayOf(1.5, 0.5) )} } - } From 99d0cfb38403c751bfd73df0a4e5f32d2efd9452 Mon Sep 17 00:00:00 2001 From: Margarita Date: Tue, 22 Mar 2022 21:51:45 +0300 Subject: [PATCH 27/59] added tests for std, variance --- .../tensors/core/TestDoubleAnalyticTensorAlgebra.kt | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/kmath-tensors/src/commonTest/kotlin/space/kscience/kmath/tensors/core/TestDoubleAnalyticTensorAlgebra.kt b/kmath-tensors/src/commonTest/kotlin/space/kscience/kmath/tensors/core/TestDoubleAnalyticTensorAlgebra.kt index b9f4ebd96..d51702403 100644 --- a/kmath-tensors/src/commonTest/kotlin/space/kscience/kmath/tensors/core/TestDoubleAnalyticTensorAlgebra.kt +++ b/kmath-tensors/src/commonTest/kotlin/space/kscience/kmath/tensors/core/TestDoubleAnalyticTensorAlgebra.kt @@ -299,4 +299,14 @@ internal class TestDoubleAnalyticTensorAlgebra { doubleArrayOf(1.5, 0.5) )} } + + @Test + fun testStd() = DoubleTensorAlgebra { + assertTrue { floor(tensor5.std() * 10000 ) / 10000 == 2.9439 } + } + + @Test + fun testVariance() = DoubleTensorAlgebra { + assertTrue { floor(tensor5.variance() * 10000 ) / 10000 == 8.6666 } + } } From cc00e1dee58f5ebe7b40c11758ce84c869d4e232 Mon Sep 17 00:00:00 2001 From: Margarita Date: Wed, 23 Mar 2022 00:25:17 +0300 Subject: [PATCH 28/59] added tests for fullLike, onesLike, rowsByIndices --- .../kmath/tensors/core/TestDoubleTensor.kt | 26 +++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/kmath-tensors/src/commonTest/kotlin/space/kscience/kmath/tensors/core/TestDoubleTensor.kt b/kmath-tensors/src/commonTest/kotlin/space/kscience/kmath/tensors/core/TestDoubleTensor.kt index d808637c7..bebaed32f 100644 --- a/kmath-tensors/src/commonTest/kotlin/space/kscience/kmath/tensors/core/TestDoubleTensor.kt +++ b/kmath-tensors/src/commonTest/kotlin/space/kscience/kmath/tensors/core/TestDoubleTensor.kt @@ -23,6 +23,32 @@ import kotlin.test.assertTrue internal class TestDoubleTensor { + @Test + fun testFullLike() = DoubleTensorAlgebra { + val shape = intArrayOf(2, 3) + val buffer = doubleArrayOf(1.0, 2.0, 3.0, 4.0, 5.0, 6.0) + val tensor = DoubleTensor(shape, buffer) + val value = 12.5 + assertTrue { tensor.fullLike(value) eq DoubleTensor(shape, buffer.map { value }.toDoubleArray() ) } + } + + @Test + fun testOnesLike() = DoubleTensorAlgebra { + val shape = intArrayOf(2, 3) + val buffer = doubleArrayOf(1.0, 2.0, 3.0, 4.0, 5.0, 6.0) + val tensor = DoubleTensor(shape, buffer) + assertTrue { tensor.onesLike() eq DoubleTensor(shape, buffer.map { 1.0 }.toDoubleArray() ) } + } + + @Test + fun testRowsByIndices() = DoubleTensorAlgebra { + val shape = intArrayOf(2, 2) + val buffer = doubleArrayOf(1.0, 2.0, -3.0, 4.0) + val tensor = fromArray(shape, buffer) + assertTrue { tensor.rowsByIndices(intArrayOf(0)) eq DoubleTensor(intArrayOf(1, 2), doubleArrayOf(1.0, 2.0)) } + assertTrue { tensor.rowsByIndices(intArrayOf(0, 1)) eq tensor } + } + @Test fun testValue() = DoubleTensorAlgebra { val value = 12.5 From 487a663d0fe36fcaa900a704b93f35a3989bc5cc Mon Sep 17 00:00:00 2001 From: Margarita Date: Wed, 23 Mar 2022 00:46:52 +0300 Subject: [PATCH 29/59] added test for StructureND.times --- .../kscience/kmath/tensors/core/TestDoubleTensor.kt | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/kmath-tensors/src/commonTest/kotlin/space/kscience/kmath/tensors/core/TestDoubleTensor.kt b/kmath-tensors/src/commonTest/kotlin/space/kscience/kmath/tensors/core/TestDoubleTensor.kt index bebaed32f..7a38e5b7d 100644 --- a/kmath-tensors/src/commonTest/kotlin/space/kscience/kmath/tensors/core/TestDoubleTensor.kt +++ b/kmath-tensors/src/commonTest/kotlin/space/kscience/kmath/tensors/core/TestDoubleTensor.kt @@ -49,6 +49,18 @@ internal class TestDoubleTensor { assertTrue { tensor.rowsByIndices(intArrayOf(0, 1)) eq tensor } } + @Test + fun testTimes() = DoubleTensorAlgebra { + val shape = intArrayOf(2, 2) + val buffer = doubleArrayOf(1.0, 2.0, -3.0, 4.0) + val tensor = DoubleTensor(shape, buffer) + val value = 3 + assertTrue { tensor.times(value).toBufferedTensor() eq DoubleTensor(shape, buffer.map { x -> 3 * x }.toDoubleArray()) } + val buffer2 = doubleArrayOf(7.0, -8.0, -5.0, 2.0) + val tensor2 = DoubleTensor(shape, buffer2) + assertTrue {tensor.times(tensor2).toBufferedTensor() eq DoubleTensor(shape, doubleArrayOf(7.0, -16.0, 15.0, 8.0)) } + } + @Test fun testValue() = DoubleTensorAlgebra { val value = 12.5 From e1d1bc903186bec3332f283e713da2db3b04579f Mon Sep 17 00:00:00 2001 From: Margarita Date: Tue, 24 May 2022 19:22:26 +0300 Subject: [PATCH 30/59] added partial implementation of svd calculation --- .../space/kscience/kmath/tensors/margarita.kt | 76 +++++ .../space/kscience/kmath/tensors/svdcmp.kt | 276 ++++++++++++++++++ 2 files changed, 352 insertions(+) create mode 100644 examples/src/main/kotlin/space/kscience/kmath/tensors/margarita.kt create mode 100644 examples/src/main/kotlin/space/kscience/kmath/tensors/svdcmp.kt diff --git a/examples/src/main/kotlin/space/kscience/kmath/tensors/margarita.kt b/examples/src/main/kotlin/space/kscience/kmath/tensors/margarita.kt new file mode 100644 index 000000000..354b08346 --- /dev/null +++ b/examples/src/main/kotlin/space/kscience/kmath/tensors/margarita.kt @@ -0,0 +1,76 @@ +/* + * Copyright 2018-2021 KMath contributors. + * Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file. + */ + +package space.kscience.kmath.tensors + +import space.kscience.kmath.linear.transpose +import space.kscience.kmath.misc.PerformancePitfall +import space.kscience.kmath.nd.MutableStructure2D +import space.kscience.kmath.nd.Structure2D +import space.kscience.kmath.nd.as2D +import space.kscience.kmath.tensors.core.* +import space.kscience.kmath.tensors.core.BroadcastDoubleTensorAlgebra.dot +import space.kscience.kmath.tensors.core.BroadcastDoubleTensorAlgebra.mapIndexed +import space.kscience.kmath.tensors.core.BroadcastDoubleTensorAlgebra.zeros +import space.kscience.kmath.tensors.core.DoubleTensorAlgebra.Companion.minus +import space.kscience.kmath.tensors.core.DoubleTensorAlgebra.Companion.sum +import space.kscience.kmath.tensors.core.tensorAlgebra +import kotlin.math.* + +fun DoubleArray.fmap(transform: (Double) -> Double): DoubleArray { + return this.map(transform).toDoubleArray() +} + +fun scalarProduct(v1: Structure2D, v2: Structure2D): Double { + return v1.mapIndexed { index, d -> d * v2[index] }.sum() +} + +internal fun diagonal(shape: IntArray, v: Double) : DoubleTensor { + val matrix = zeros(shape) + return matrix.mapIndexed { index, _ -> if (index.component1() == index.component2()) v else 0.0 } +} + + +fun MutableStructure2D.print() { + val n = this.shape.component1() + val m = this.shape.component2() + for (i in 0 until n) { + for (j in 0 until m) { + val x = (this[i, j] * 100).roundToInt() / 100.0 + print("$x ") + } + println() + } + println("______________") +} + +@OptIn(PerformancePitfall::class) +fun main(): Unit = Double.tensorAlgebra.withBroadcast { + val shape = intArrayOf(5, 3) + val buffer = doubleArrayOf( + 1.000000, 2.000000, 3.000000, + 2.000000, 3.000000, 4.000000, + 3.000000, 4.000000, 5.000000, + 4.000000, 5.000000, 6.000000, + 5.000000, 6.000000, 7.000000 + ) + val buffer2 = doubleArrayOf( + 0.000000, 0.000000, 0.000000, + 0.000000, 0.000000, 0.000000, + 0.000000, 0.000000, 0.000000, + 0.000000, 0.000000, 0.000000, + 0.000000, 0.000000, 0.000000 + ) + val tensor = fromArray(shape, buffer).as2D() + val v = fromArray(shape, buffer2).as2D() + tensor.print() + tensor.svdcmp(v) +// tensor.print() + + + + + +} diff --git a/examples/src/main/kotlin/space/kscience/kmath/tensors/svdcmp.kt b/examples/src/main/kotlin/space/kscience/kmath/tensors/svdcmp.kt new file mode 100644 index 000000000..3657c0c8c --- /dev/null +++ b/examples/src/main/kotlin/space/kscience/kmath/tensors/svdcmp.kt @@ -0,0 +1,276 @@ +package space.kscience.kmath.tensors + +import space.kscience.kmath.nd.* +import space.kscience.kmath.tensors.core.BroadcastDoubleTensorAlgebra +import kotlin.math.abs +import kotlin.math.max +import kotlin.math.min +import kotlin.math.sqrt + +/* + * Copyright 2018-2021 KMath contributors. + * Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file. + */ + + +fun pythag(a: Double, b: Double): Double { + val at: Double = abs(a) + val bt: Double = abs(b) + val ct: Double + val result: Double + if (at > bt) { + ct = bt / at + result = at * sqrt(1.0 + ct * ct) + } else if (bt > 0.0) { + ct = at / bt + result = bt * sqrt(1.0 + ct * ct) + } else result = 0.0 + return result +} + +fun SIGN(a: Double, b: Double): Double { + if (b >= 0.0) + return abs(a) + else + return -abs(a) +} + +internal fun MutableStructure2D.svdcmp(v: MutableStructure2D) { + val shape = this.shape + val n = shape.component2() + val m = shape.component1() + var f = 0.0 + val rv1 = DoubleArray(n) + var s = 0.0 + var scale = 0.0 + var anorm = 0.0 + var g = 0.0 + var l = 0 + val w_shape = intArrayOf(m, 1) + var w_buffer = doubleArrayOf(0.000000) + for (i in 0 until m - 1) { + w_buffer += doubleArrayOf(0.000000) + } + val w = BroadcastDoubleTensorAlgebra.fromArray(w_shape, w_buffer).as2D() + for (i in 0 until n) { + /* left-hand reduction */ + l = i + 1 + rv1[i] = scale * g + g = 0.0 + s = 0.0 + scale = 0.0 + if (i < m) { + for (k in i until m) { + scale += abs(this[k, i]); + } + if (scale != 0.0) { + for (k in i until m) { + this[k, i] = (this[k, i] / scale) + s += this[k, i] * this[k, i] + } + f = this[i, i] + if (f >= 0) { + g = (-1) * abs(sqrt(s)) + } + else { + g = abs(sqrt(s)) + } + val h = f * g - s + this[i, i] = f - g + if (i != n - 1) { + for (j in l until n) { + s = 0.0 + for (k in i until m) { + s += this[k, i] * this[k, j] + } + f = s / h + for (k in i until m) { + this[k, j] += f * this[k, i] + } + } + } + for (k in i until m) { + this[k, i] = this[k, i] * scale + } + } + } + + w[i, 0] = scale * g + /* right-hand reduction */ + g = 0.0 + s = 0.0 + scale = 0.0 + if (i < m && i != n - 1) { + for (k in l until n) { + scale += abs(this[i, k]) + } + if (scale != 0.0) { + for (k in l until n) { + this[i, k] = this[i, k] / scale + s += this[i, k] * this[i, k] + } + f = this[i, l] + if (f >= 0) { + g = (-1) * abs(sqrt(s)) + } + else { + g = abs(sqrt(s)) + } + val h = f * g - s + this[i, l] = f - g + for (k in l until n) { + rv1[k] = this[i, k] / h + } + if (i != m - 1) { + for (j in l until m) { + s = 0.0 + for (k in l until n) { + s += this[j, k] * this[i, k] + } + for (k in l until n) { + this[j, k] += s * rv1[k] + } + } + } + for (k in l until n) { + this[i, k] = this[i, k] * scale + } + } + } + anorm = max(anorm, (abs(w[i, 0]) + abs(rv1[i]))); + } + + for (i in n - 1 downTo 0) { + if (i < n - 1) { + if (g != 0.0) { + for (j in l until n) { + v[j, i] = (this[i, j] / this[i, l]) / g + } + for (j in l until n) { + s = 0.0 + for (k in l until n) + s += this[i, k] * v[k, j] + for (k in l until n) + v[k, j] += s * v[k, i] + } + } + for (j in l until n) { + v[i, j] = 0.0 + v[j, i] = 0.0 + } + } + v[i, i] = 1.0 + g = rv1[i] + l = i + } + + // тут все правильно считается + + +// println("w") +// w.print() +// + val eps = 0.000000001 +// println("1.0 / w[2, 0] " + 1.0 / w[2, 0]) +// println("w[2, 0] " + w[2, 0]) + + for (i in min(n, m) - 1 downTo 0) { + l = i + 1 + g = w[i, 0] +// println("w[i, 0] " + w[i, 0]) + for (j in l until n) { + this[i, j] = 0.0 + } + if (g != 0.0) { + g = 1.0 / g +// println("g " + g) + for (j in l until n) { + s = 0.0 + for (k in l until m) { + s += this[k, i] * this[k, j] + } + f = (s / this[i, i]) * g + for (k in i until m) { + this[k, j] += f * this[k, i] + } + } + for (j in i until m) { + this[j, i] *= g + } + } + else { + for (j in i until m) { + this[j, i] = 0.0 + } + } + this[i, i] += 1.0 +// println("matrix") +// this.print() + } + + println("matrix") + this.print() + + // тут матрица должна выглядеть так: + +// 0.134840 -0.762770 0.522117 +// -0.269680 -0.476731 -0.245388 +// -0.404520 -0.190693 -0.527383 +// -0.539360 0.095346 -0.297540 +// -0.674200 0.381385 0.548193 + + + +// var flag = 0 +// var nm = 0 +// var c = 0.0 +// var h = 0.0 +// var y = 0.0 +// var z = 0.0 +// for (k in n - 1 downTo 0) { +// for (its in 0 until 30) { +// flag = 0 +// for (l in k downTo 0) { +// nm = l - 1 +// if (abs(rv1[l]) < eps) { +// flag = 0 +//// println("break1") +// break +// } +// if (abs(w[nm, 0]) < eps) { +// println("break2") +// break +// } +// } +// +// // l = 1 тут +// +// if (flag != 0) { +// c = 0.0 +// s = 0.0 +// for (i in l until k) { // а точно ли такие границы? там немного отличается +// f=s*rv1[i] +// rv1[i]=c*rv1[i] +// if (abs(f) < eps) { +// println("break3") +// break +// } +// g=w[i, 0] +// h=pythag(f,g) +// w[i, 0]=h +// h=1.0/h +// c=g*h +// s = -f*h +// for (j in 0 until m) { // точно ли такие границы? +// y=this[j, nm] +// z=this[j, i] +// this[j, nm]=y*c+z*s +// this[j, i]=z*c-y*s +// } +// } +// } +// +// +// } +// } +} \ No newline at end of file From 0c9ef905da52401b696b682395ff3321a501e57a Mon Sep 17 00:00:00 2001 From: Margarita Date: Tue, 24 May 2022 19:50:50 +0300 Subject: [PATCH 31/59] added comment about division by zero --- .../space/kscience/kmath/tensors/svdcmp.kt | 144 +++++++++++++----- 1 file changed, 103 insertions(+), 41 deletions(-) diff --git a/examples/src/main/kotlin/space/kscience/kmath/tensors/svdcmp.kt b/examples/src/main/kotlin/space/kscience/kmath/tensors/svdcmp.kt index 3657c0c8c..b2706c334 100644 --- a/examples/src/main/kotlin/space/kscience/kmath/tensors/svdcmp.kt +++ b/examples/src/main/kotlin/space/kscience/kmath/tensors/svdcmp.kt @@ -164,26 +164,18 @@ internal fun MutableStructure2D.svdcmp(v: MutableStructure2D) { l = i } - // тут все правильно считается - - -// println("w") -// w.print() -// - val eps = 0.000000001 -// println("1.0 / w[2, 0] " + 1.0 / w[2, 0]) -// println("w[2, 0] " + w[2, 0]) + // до этого момента все правильно считается + // дальше - нет for (i in min(n, m) - 1 downTo 0) { l = i + 1 g = w[i, 0] -// println("w[i, 0] " + w[i, 0]) for (j in l until n) { this[i, j] = 0.0 } if (g != 0.0) { + // !!!!! вот тут деление на почти ноль g = 1.0 / g -// println("g " + g) for (j in l until n) { s = 0.0 for (k in l until m) { @@ -204,15 +196,11 @@ internal fun MutableStructure2D.svdcmp(v: MutableStructure2D) { } } this[i, i] += 1.0 -// println("matrix") -// this.print() } println("matrix") this.print() - - // тут матрица должна выглядеть так: - +// тут матрица должна выглядеть так: // 0.134840 -0.762770 0.522117 // -0.269680 -0.476731 -0.245388 // -0.404520 -0.190693 -0.527383 @@ -220,57 +208,131 @@ internal fun MutableStructure2D.svdcmp(v: MutableStructure2D) { // -0.674200 0.381385 0.548193 - // var flag = 0 // var nm = 0 // var c = 0.0 // var h = 0.0 // var y = 0.0 // var z = 0.0 +// var x = 0.0 +// println("L = " + l) // for (k in n - 1 downTo 0) { -// for (its in 0 until 30) { -// flag = 0 -// for (l in k downTo 0) { -// nm = l - 1 -// if (abs(rv1[l]) < eps) { +// for (its in 1 until 30) { +// flag = 1 +// +// for (newl in k downTo 0) { +// nm = newl - 1 +// if (abs(rv1[newl]) + anorm == anorm) { // flag = 0 -//// println("break1") +// l = newl +//// println("newl before break1 = " + newl) +// println("break1") // break // } -// if (abs(w[nm, 0]) < eps) { +// if (abs(w[nm, 0] + anorm) == anorm) { +// l = newl // println("break2") // break // } // } // -// // l = 1 тут +//// println("NEWL = " + l) +// +//// l = 0 // // if (flag != 0) { // c = 0.0 -// s = 0.0 -// for (i in l until k) { // а точно ли такие границы? там немного отличается -// f=s*rv1[i] -// rv1[i]=c*rv1[i] -// if (abs(f) < eps) { +// s = 1.0 +// for (i in l until k) { +// f = s * rv1[i] +// rv1[i] = c * rv1[i] +// if (abs(f) + anorm == anorm) { // println("break3") // break // } -// g=w[i, 0] -// h=pythag(f,g) -// w[i, 0]=h -// h=1.0/h -// c=g*h -// s = -f*h -// for (j in 0 until m) { // точно ли такие границы? -// y=this[j, nm] -// z=this[j, i] -// this[j, nm]=y*c+z*s -// this[j, i]=z*c-y*s +// h = pythag(f, g) +// w[i, 0] = h +// h = 1.0 / h +// c = g * h +// s = (-f) * h +// for (j in 0 until m) { +// y = this[j, nm] +// z = this[j, i] +// this[j, nm] = y * c + z * s +// this[j, i] = z * c - y * s // } // } // } // +// z = w[k, 0] +//// println("l = " + l) +//// println("k = " + k) +// if (l == k) { +// if (z < 0.0) { +// w[k, 0] = -z +// for (j in 0 until n) +// v[j, k] = -v[j, k] +// } +// println("break4") +// break +// } // +// if (its == 30) { +// return +// } +// +// x = w[l, 0] +// nm = k - 1 +//// println("nm = " + nm) +// y = w[nm, 0] +// g = rv1[nm] +// h = rv1[k] +// f = ((y-z)*(y+z)+(g-h)*(g+h))/(2.0*h*y) +// g = pythag(f,1.0) +// f=((x-z)*(x+z)+h*((y/(f+SIGN(g,f)))-h))/x +// c = 1.0 +// s = 1.0 +// +// var i = 0 // непонятно, где она должна быть объявлена +// for (j in l until nm) { +// i = j + 1 +// g = rv1[i] +// y = w[i, 0] +// h = s * g +// g = c * g +// z = pythag(f,h) +// rv1[j] = z +// c = f / z +// s = h / z +// f = x * c + g * s +// g = g * c - x * s +// h = y * s +// y *= c +// for (jj in 0 until n) { +// x=v[jj, j]; +// z=v[jj, i]; +// v[jj, j] = x * c + z * s; +// v[jj, i] = z * c - x * s; +// } +// z = pythag(f,h) +// w[j, 0] = z +// if (z != 0.0) { +// z = 1.0 / z +// c = f * z +// s = h * z +// } +// f = c * g + s * y +// x = c * y - s * g +// for (jj in 0 until m) { +// y = this[jj, j] +// z = this[jj, i] +// this[jj, j] = y * c + z * s +// this[jj, i] = z * c - y * s +// } +// } +// rv1[l] = 0.0 +// rv1[k] = f +// w[k, 0] = x // } // } } \ No newline at end of file From a21baf95a0cbff7d6d53d8e074d91101a8442231 Mon Sep 17 00:00:00 2001 From: Margarita Date: Tue, 24 May 2022 23:23:35 +0300 Subject: [PATCH 32/59] added the rest of the algorithm --- .../space/kscience/kmath/tensors/margarita.kt | 5 +- .../space/kscience/kmath/tensors/svdcmp.kt | 262 +++++++++--------- 2 files changed, 131 insertions(+), 136 deletions(-) diff --git a/examples/src/main/kotlin/space/kscience/kmath/tensors/margarita.kt b/examples/src/main/kotlin/space/kscience/kmath/tensors/margarita.kt index 354b08346..3629b6a47 100644 --- a/examples/src/main/kotlin/space/kscience/kmath/tensors/margarita.kt +++ b/examples/src/main/kotlin/space/kscience/kmath/tensors/margarita.kt @@ -57,17 +57,14 @@ fun main(): Unit = Double.tensorAlgebra.withBroadcast { 5.000000, 6.000000, 7.000000 ) val buffer2 = doubleArrayOf( - 0.000000, 0.000000, 0.000000, - 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000 ) val tensor = fromArray(shape, buffer).as2D() - val v = fromArray(shape, buffer2).as2D() + val v = fromArray(intArrayOf(3, 3), buffer2).as2D() tensor.print() tensor.svdcmp(v) -// tensor.print() diff --git a/examples/src/main/kotlin/space/kscience/kmath/tensors/svdcmp.kt b/examples/src/main/kotlin/space/kscience/kmath/tensors/svdcmp.kt index b2706c334..e7a424d74 100644 --- a/examples/src/main/kotlin/space/kscience/kmath/tensors/svdcmp.kt +++ b/examples/src/main/kotlin/space/kscience/kmath/tensors/svdcmp.kt @@ -12,7 +12,6 @@ import kotlin.math.sqrt * Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file. */ - fun pythag(a: Double, b: Double): Double { val at: Double = abs(a) val bt: Double = abs(b) @@ -46,9 +45,9 @@ internal fun MutableStructure2D.svdcmp(v: MutableStructure2D) { var anorm = 0.0 var g = 0.0 var l = 0 - val w_shape = intArrayOf(m, 1) + val w_shape = intArrayOf(n, 1) var w_buffer = doubleArrayOf(0.000000) - for (i in 0 until m - 1) { + for (i in 0 until n - 1) { w_buffer += doubleArrayOf(0.000000) } val w = BroadcastDoubleTensorAlgebra.fromArray(w_shape, w_buffer).as2D() @@ -198,8 +197,8 @@ internal fun MutableStructure2D.svdcmp(v: MutableStructure2D) { this[i, i] += 1.0 } - println("matrix") - this.print() +// println("matrix") +// this.print() // тут матрица должна выглядеть так: // 0.134840 -0.762770 0.522117 // -0.269680 -0.476731 -0.245388 @@ -207,132 +206,131 @@ internal fun MutableStructure2D.svdcmp(v: MutableStructure2D) { // -0.539360 0.095346 -0.297540 // -0.674200 0.381385 0.548193 + this[0, 2] = 0.522117 + this[1, 2] = -0.245388 + this[2, 2] = -0.527383 + this[3, 2] = -0.297540 + this[4, 2] = 0.548193 -// var flag = 0 -// var nm = 0 -// var c = 0.0 -// var h = 0.0 -// var y = 0.0 -// var z = 0.0 -// var x = 0.0 -// println("L = " + l) -// for (k in n - 1 downTo 0) { -// for (its in 1 until 30) { -// flag = 1 -// -// for (newl in k downTo 0) { -// nm = newl - 1 -// if (abs(rv1[newl]) + anorm == anorm) { -// flag = 0 -// l = newl -//// println("newl before break1 = " + newl) -// println("break1") -// break -// } -// if (abs(w[nm, 0] + anorm) == anorm) { -// l = newl -// println("break2") -// break -// } -// } -// -//// println("NEWL = " + l) -// -//// l = 0 -// -// if (flag != 0) { -// c = 0.0 -// s = 1.0 -// for (i in l until k) { -// f = s * rv1[i] -// rv1[i] = c * rv1[i] -// if (abs(f) + anorm == anorm) { -// println("break3") -// break -// } -// h = pythag(f, g) -// w[i, 0] = h -// h = 1.0 / h -// c = g * h -// s = (-f) * h -// for (j in 0 until m) { -// y = this[j, nm] -// z = this[j, i] -// this[j, nm] = y * c + z * s -// this[j, i] = z * c - y * s -// } -// } -// } -// -// z = w[k, 0] -//// println("l = " + l) -//// println("k = " + k) -// if (l == k) { -// if (z < 0.0) { -// w[k, 0] = -z -// for (j in 0 until n) -// v[j, k] = -v[j, k] -// } -// println("break4") -// break -// } -// -// if (its == 30) { -// return -// } -// -// x = w[l, 0] -// nm = k - 1 -//// println("nm = " + nm) -// y = w[nm, 0] -// g = rv1[nm] -// h = rv1[k] -// f = ((y-z)*(y+z)+(g-h)*(g+h))/(2.0*h*y) -// g = pythag(f,1.0) -// f=((x-z)*(x+z)+h*((y/(f+SIGN(g,f)))-h))/x -// c = 1.0 -// s = 1.0 -// -// var i = 0 // непонятно, где она должна быть объявлена -// for (j in l until nm) { -// i = j + 1 -// g = rv1[i] -// y = w[i, 0] -// h = s * g -// g = c * g -// z = pythag(f,h) -// rv1[j] = z -// c = f / z -// s = h / z -// f = x * c + g * s -// g = g * c - x * s -// h = y * s -// y *= c -// for (jj in 0 until n) { -// x=v[jj, j]; -// z=v[jj, i]; -// v[jj, j] = x * c + z * s; -// v[jj, i] = z * c - x * s; -// } -// z = pythag(f,h) -// w[j, 0] = z -// if (z != 0.0) { -// z = 1.0 / z -// c = f * z -// s = h * z -// } -// f = c * g + s * y -// x = c * y - s * g -// for (jj in 0 until m) { -// y = this[jj, j] -// z = this[jj, i] -// this[jj, j] = y * c + z * s -// this[jj, i] = z * c - y * s -// } -// } -// rv1[l] = 0.0 -// rv1[k] = f -// w[k, 0] = x -// } -// } + var flag = 0 + var nm = 0 + var c = 0.0 + var h = 0.0 + var y = 0.0 + var z = 0.0 + var x = 0.0 + for (k in n - 1 downTo 0) { + for (its in 1 until 30) { + flag = 1 + for (newl in k downTo 0) { + nm = newl - 1 + if (abs(rv1[newl]) + anorm == anorm) { + flag = 0 + l = newl + break + } + if (abs(w[nm, 0]) + anorm == anorm) { + l = newl + break + } + } + + if (flag != 0) { + c = 0.0 + s = 1.0 + for (i in l until k) { + f = s * rv1[i] + rv1[i] = c * rv1[i] + if (abs(f) + anorm == anorm) { + break + } + h = pythag(f, g) + w[i, 0] = h + h = 1.0 / h + c = g * h + s = (-f) * h + for (j in 0 until m) { + y = this[j, nm] + z = this[j, i] + this[j, nm] = y * c + z * s + this[j, i] = z * c - y * s + } + } + } + + z = w[k, 0] + if (l == k) { + if (z < 0.0) { + w[k, 0] = -z + for (j in 0 until n) + v[j, k] = -v[j, k] + } + break + } + + if (its == 30) { + return + } + + x = w[l, 0] + nm = k - 1 + y = w[nm, 0] + g = rv1[nm] + h = rv1[k] + f = ((y-z)*(y+z)+(g-h)*(g+h))/(2.0*h*y) + g = pythag(f,1.0) + f=((x-z)*(x+z)+h*((y/(f+SIGN(g,f)))-h))/x + c = 1.0 + s = 1.0 + + var i = 0 + for (j in l until nm + 1) { + i = j + 1 + g = rv1[i] + y = w[i, 0] + h = s * g + g = c * g + z = pythag(f,h) + rv1[j] = z + c = f / z + s = h / z + f = x * c + g * s + g = g * c - x * s + h = y * s + y *= c + + for (jj in 0 until n) { + x=v[jj, j]; + z=v[jj, i]; + v[jj, j] = x * c + z * s; + v[jj, i] = z * c - x * s; + } + z = pythag(f,h) + w[j, 0] = z + if (z != 0.0) { + z = 1.0 / z + c = f * z + s = h * z + } + f = c * g + s * y + x = c * y - s * g + for (jj in 0 until m) { + y = this[jj, j] + z = this[jj, i] + this[jj, j] = y * c + z * s + this[jj, i] = z * c - y * s + } + } + rv1[l] = 0.0 + rv1[k] = f + w[k, 0] = x + } + } + + println("u") + this.print() + println("w") + w.print() + println("v") + v.print() } \ No newline at end of file From 3ca94ec4ec235ffaabfbca874a7fd2a845fcdcec Mon Sep 17 00:00:00 2001 From: Margarita Date: Tue, 24 May 2022 23:44:09 +0300 Subject: [PATCH 33/59] added some tests for method dot --- .../tensors/core/TestDoubleTensorAlgebra.kt | 21 +++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/kmath-tensors/src/commonTest/kotlin/space/kscience/kmath/tensors/core/TestDoubleTensorAlgebra.kt b/kmath-tensors/src/commonTest/kotlin/space/kscience/kmath/tensors/core/TestDoubleTensorAlgebra.kt index 205ae2fee..8657a8dd9 100644 --- a/kmath-tensors/src/commonTest/kotlin/space/kscience/kmath/tensors/core/TestDoubleTensorAlgebra.kt +++ b/kmath-tensors/src/commonTest/kotlin/space/kscience/kmath/tensors/core/TestDoubleTensorAlgebra.kt @@ -132,6 +132,27 @@ internal class TestDoubleTensorAlgebra { 468.0, 501.0, 534.0, 594.0, 636.0, 678.0, 720.0, 771.0, 822.0 )) assertTrue(res45.shape contentEquals intArrayOf(2, 3, 3)) + + val oneDimTensor1 = fromArray(intArrayOf(3), doubleArrayOf(1.0, 2.0, 3.0)) + val oneDimTensor2 = fromArray(intArrayOf(3), doubleArrayOf(4.0, 5.0, 6.0)) + val resOneDimTensors = oneDimTensor1.dot(oneDimTensor2) + assertTrue(resOneDimTensors.mutableBuffer.array() contentEquals doubleArrayOf(32.0)) + assertTrue(resOneDimTensors.shape contentEquals intArrayOf(1)) + + val twoDimTensor1 = fromArray(intArrayOf(2, 2), doubleArrayOf(1.0, 2.0, 3.0, 4.0)) + val twoDimTensor2 = fromArray(intArrayOf(2, 2), doubleArrayOf(5.0, 6.0, 7.0, 8.0)) + val resTwoDimTensors = twoDimTensor1.dot(twoDimTensor2) + assertTrue(resTwoDimTensors.mutableBuffer.array() contentEquals doubleArrayOf(19.0, 22.0, 43.0, 50.0)) + assertTrue(resTwoDimTensors.shape contentEquals intArrayOf(2, 2)) + + val oneDimTensor3 = fromArray(intArrayOf(2), doubleArrayOf(1.0, 2.0)) + val resOneDimTensorOnTwoDimTensor = oneDimTensor3.dot(twoDimTensor1) + assertTrue(resOneDimTensorOnTwoDimTensor.mutableBuffer.array() contentEquals doubleArrayOf(7.0, 10.0)) + assertTrue(resOneDimTensorOnTwoDimTensor.shape contentEquals intArrayOf(2)) + + val resTwoDimTensorOnOneDimTensor = twoDimTensor1.dot(oneDimTensor3) + assertTrue(resTwoDimTensorOnOneDimTensor.mutableBuffer.array() contentEquals doubleArrayOf(5.0, 11.0)) + assertTrue(resTwoDimTensorOnOneDimTensor.shape contentEquals intArrayOf(2)) } @Test From 8b48eaa1ede355628074f02502491cc5795e47c3 Mon Sep 17 00:00:00 2001 From: Margarita Date: Wed, 25 May 2022 01:11:22 +0300 Subject: [PATCH 34/59] renamed function and changed structure --- .../space/kscience/kmath/tensors/margarita.kt | 36 ++++++++----------- .../space/kscience/kmath/tensors/svdcmp.kt | 30 ++++++---------- 2 files changed, 25 insertions(+), 41 deletions(-) diff --git a/examples/src/main/kotlin/space/kscience/kmath/tensors/margarita.kt b/examples/src/main/kotlin/space/kscience/kmath/tensors/margarita.kt index 3629b6a47..c376d6d0f 100644 --- a/examples/src/main/kotlin/space/kscience/kmath/tensors/margarita.kt +++ b/examples/src/main/kotlin/space/kscience/kmath/tensors/margarita.kt @@ -11,28 +11,9 @@ import space.kscience.kmath.nd.MutableStructure2D import space.kscience.kmath.nd.Structure2D import space.kscience.kmath.nd.as2D import space.kscience.kmath.tensors.core.* -import space.kscience.kmath.tensors.core.BroadcastDoubleTensorAlgebra.dot -import space.kscience.kmath.tensors.core.BroadcastDoubleTensorAlgebra.mapIndexed -import space.kscience.kmath.tensors.core.BroadcastDoubleTensorAlgebra.zeros -import space.kscience.kmath.tensors.core.DoubleTensorAlgebra.Companion.minus -import space.kscience.kmath.tensors.core.DoubleTensorAlgebra.Companion.sum import space.kscience.kmath.tensors.core.tensorAlgebra import kotlin.math.* -fun DoubleArray.fmap(transform: (Double) -> Double): DoubleArray { - return this.map(transform).toDoubleArray() -} - -fun scalarProduct(v1: Structure2D, v2: Structure2D): Double { - return v1.mapIndexed { index, d -> d * v2[index] }.sum() -} - -internal fun diagonal(shape: IntArray, v: Double) : DoubleTensor { - val matrix = zeros(shape) - return matrix.mapIndexed { index, _ -> if (index.component1() == index.component2()) v else 0.0 } -} - - fun MutableStructure2D.print() { val n = this.shape.component1() val m = this.shape.component2() @@ -63,11 +44,22 @@ fun main(): Unit = Double.tensorAlgebra.withBroadcast { ) val tensor = fromArray(shape, buffer).as2D() val v = fromArray(intArrayOf(3, 3), buffer2).as2D() + val w_shape = intArrayOf(3, 1) + var w_buffer = doubleArrayOf(0.000000) + for (i in 0 until 3 - 1) { + w_buffer += doubleArrayOf(0.000000) + } + val w = BroadcastDoubleTensorAlgebra.fromArray(w_shape, w_buffer).as2D() tensor.print() - tensor.svdcmp(v) - - + var ans = Pair(w, v) + tensor.svdGolabKahan(v, w) + println("u") + tensor.print() + println("w") + w.print() + println("v") + v.print() } diff --git a/examples/src/main/kotlin/space/kscience/kmath/tensors/svdcmp.kt b/examples/src/main/kotlin/space/kscience/kmath/tensors/svdcmp.kt index e7a424d74..8572bdee9 100644 --- a/examples/src/main/kotlin/space/kscience/kmath/tensors/svdcmp.kt +++ b/examples/src/main/kotlin/space/kscience/kmath/tensors/svdcmp.kt @@ -1,7 +1,6 @@ package space.kscience.kmath.tensors import space.kscience.kmath.nd.* -import space.kscience.kmath.tensors.core.BroadcastDoubleTensorAlgebra import kotlin.math.abs import kotlin.math.max import kotlin.math.min @@ -34,10 +33,12 @@ fun SIGN(a: Double, b: Double): Double { return -abs(a) } -internal fun MutableStructure2D.svdcmp(v: MutableStructure2D) { +// matrix v is not transposed at the output + +internal fun MutableStructure2D.svdGolabKahan(v: MutableStructure2D, w: MutableStructure2D) { val shape = this.shape - val n = shape.component2() val m = shape.component1() + val n = shape.component2() var f = 0.0 val rv1 = DoubleArray(n) var s = 0.0 @@ -45,12 +46,6 @@ internal fun MutableStructure2D.svdcmp(v: MutableStructure2D) { var anorm = 0.0 var g = 0.0 var l = 0 - val w_shape = intArrayOf(n, 1) - var w_buffer = doubleArrayOf(0.000000) - for (i in 0 until n - 1) { - w_buffer += doubleArrayOf(0.000000) - } - val w = BroadcastDoubleTensorAlgebra.fromArray(w_shape, w_buffer).as2D() for (i in 0 until n) { /* left-hand reduction */ l = i + 1 @@ -212,6 +207,9 @@ internal fun MutableStructure2D.svdcmp(v: MutableStructure2D) { this[3, 2] = -0.297540 this[4, 2] = 0.548193 + // задала правильные значения, чтобы проверить правильность кода дальше + // дальше - все корректно + var flag = 0 var nm = 0 var c = 0.0 @@ -268,9 +266,10 @@ internal fun MutableStructure2D.svdcmp(v: MutableStructure2D) { break } - if (its == 30) { - return - } +// надо придумать, что сделать - выкинуть ошибку? +// if (its == 30) { +// return +// } x = w[l, 0] nm = k - 1 @@ -326,11 +325,4 @@ internal fun MutableStructure2D.svdcmp(v: MutableStructure2D) { w[k, 0] = x } } - - println("u") - this.print() - println("w") - w.print() - println("v") - v.print() } \ No newline at end of file From 40bab168a7abe2f91ae6be87e1e797cb6be137e0 Mon Sep 17 00:00:00 2001 From: Margarita Date: Wed, 25 May 2022 01:16:56 +0300 Subject: [PATCH 35/59] reformatted code --- .../space/kscience/kmath/tensors/svdcmp.kt | 33 +++++++++---------- 1 file changed, 15 insertions(+), 18 deletions(-) diff --git a/examples/src/main/kotlin/space/kscience/kmath/tensors/svdcmp.kt b/examples/src/main/kotlin/space/kscience/kmath/tensors/svdcmp.kt index 8572bdee9..ec4c8cc32 100644 --- a/examples/src/main/kotlin/space/kscience/kmath/tensors/svdcmp.kt +++ b/examples/src/main/kotlin/space/kscience/kmath/tensors/svdcmp.kt @@ -65,8 +65,7 @@ internal fun MutableStructure2D.svdGolabKahan(v: MutableStructure2D= 0) { g = (-1) * abs(sqrt(s)) - } - else { + } else { g = abs(sqrt(s)) } val h = f * g - s @@ -106,8 +105,7 @@ internal fun MutableStructure2D.svdGolabKahan(v: MutableStructure2D= 0) { g = (-1) * abs(sqrt(s)) - } - else { + } else { g = abs(sqrt(s)) } val h = f * g - s @@ -140,7 +138,7 @@ internal fun MutableStructure2D.svdGolabKahan(v: MutableStructure2D.svdGolabKahan(v: MutableStructure2D.svdGolabKahan(v: MutableStructure2D.svdGolabKahan(v: MutableStructure2D.svdGolabKahan(v: MutableStructure2D.svdGolabKahan(v: MutableStructure2D.svdGolabKahan(v: MutableStructure2D Date: Tue, 12 Jul 2022 17:16:30 +0300 Subject: [PATCH 36/59] added svdGolabKahanHelper --- .../kmath/tensors/core/internal/linUtils.kt | 296 ++++++++++++++++++ 1 file changed, 296 insertions(+) diff --git a/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/internal/linUtils.kt b/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/internal/linUtils.kt index aba6167ce..2c9a0cf8d 100644 --- a/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/internal/linUtils.kt +++ b/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/internal/linUtils.kt @@ -17,6 +17,7 @@ import space.kscience.kmath.tensors.core.DoubleTensor import space.kscience.kmath.tensors.core.DoubleTensorAlgebra import space.kscience.kmath.tensors.core.IntTensor import kotlin.math.abs +import kotlin.math.max import kotlin.math.min import kotlin.math.sqrt @@ -349,3 +350,298 @@ internal fun DoubleTensorAlgebra.svdHelper( matrixV.mutableBuffer.array()[matrixV.bufferStart + i] = vBuffer[i] } } + +internal fun pythag(a: Double, b: Double): Double { + val at: Double = abs(a) + val bt: Double = abs(b) + val ct: Double + val result: Double + if (at > bt) { + ct = bt / at + result = at * sqrt(1.0 + ct * ct) + } else if (bt > 0.0) { + ct = at / bt + result = bt * sqrt(1.0 + ct * ct) + } else result = 0.0 + return result +} + +internal fun SIGN(a: Double, b: Double): Double { + if (b >= 0.0) + return abs(a) + else + return -abs(a) +} +internal fun MutableStructure2D.svdGolabKahanHelper(u: MutableStructure2D, w: BufferedTensor, v: MutableStructure2D) { + val shape = this.shape + val m = shape.component1() + val n = shape.component2() + var f = 0.0 + val rv1 = DoubleArray(n) + var s = 0.0 + var scale = 0.0 + var anorm = 0.0 + var g = 0.0 + var l = 0 + val epsilon = 1e-10 + for (i in 0 until n) { + /* left-hand reduction */ + l = i + 1 + rv1[i] = scale * g + g = 0.0 + s = 0.0 + scale = 0.0 + if (i < m) { + for (k in i until m) { + scale += abs(this[k, i]); + } + if (abs(scale) > epsilon) { + for (k in i until m) { + this[k, i] = (this[k, i] / scale) + s += this[k, i] * this[k, i] + } + f = this[i, i] + if (f >= 0) { + g = (-1) * abs(sqrt(s)) + } else { + g = abs(sqrt(s)) + } + val h = f * g - s + this[i, i] = f - g + if (i != n - 1) { + for (j in l until n) { + s = 0.0 + for (k in i until m) { + s += this[k, i] * this[k, j] + } + f = s / h + for (k in i until m) { + this[k, j] += f * this[k, i] + } + } + } + for (k in i until m) { + this[k, i] = this[k, i] * scale + } + } + } + + w.mutableBuffer.array()[w.bufferStart + i] = scale * g + /* right-hand reduction */ + g = 0.0 + s = 0.0 + scale = 0.0 + if (i < m && i != n - 1) { + for (k in l until n) { + scale += abs(this[i, k]) + } + if (abs(scale) > epsilon) { + for (k in l until n) { + this[i, k] = this[i, k] / scale + s += this[i, k] * this[i, k] + } + f = this[i, l] + if (f >= 0) { + g = (-1) * abs(sqrt(s)) + } else { + g = abs(sqrt(s)) + } + val h = f * g - s + this[i, l] = f - g + for (k in l until n) { + rv1[k] = this[i, k] / h + } + if (i != m - 1) { + for (j in l until m) { + s = 0.0 + for (k in l until n) { + s += this[j, k] * this[i, k] + } + for (k in l until n) { + this[j, k] += s * rv1[k] + } + } + } + for (k in l until n) { + this[i, k] = this[i, k] * scale + } + } + } + anorm = max(anorm, (abs(w.mutableBuffer.array()[w.bufferStart + i]) + abs(rv1[i]))); + } + + for (i in n - 1 downTo 0) { + if (i < n - 1) { + if (abs(g) > epsilon) { + for (j in l until n) { + v[j, i] = (this[i, j] / this[i, l]) / g + } + for (j in l until n) { + s = 0.0 + for (k in l until n) + s += this[i, k] * v[k, j] + for (k in l until n) + v[k, j] += s * v[k, i] + } + } + for (j in l until n) { + v[i, j] = 0.0 + v[j, i] = 0.0 + } + } + v[i, i] = 1.0 + g = rv1[i] + l = i + } + + for (i in min(n, m) - 1 downTo 0) { + l = i + 1 + g = w.mutableBuffer.array()[w.bufferStart + i] + for (j in l until n) { + this[i, j] = 0.0 + } + if (abs(g) > epsilon) { + g = 1.0 / g + for (j in l until n) { + s = 0.0 + for (k in l until m) { + s += this[k, i] * this[k, j] + } + f = (s / this[i, i]) * g + for (k in i until m) { + this[k, j] += f * this[k, i] + } + } + for (j in i until m) { + this[j, i] *= g + } + } else { + for (j in i until m) { + this[j, i] = 0.0 + } + } + this[i, i] += 1.0 + } + + var flag = 0 + var nm = 0 + var c = 0.0 + var h = 0.0 + var y = 0.0 + var z = 0.0 + var x = 0.0 + for (k in n - 1 downTo 0) { + for (its in 1 until 30) { + flag = 1 + for (newl in k downTo 0) { + nm = newl - 1 + if (abs(rv1[newl]) + anorm == anorm) { + flag = 0 + l = newl + break + } + if (abs(w.mutableBuffer.array()[w.bufferStart + nm]) + anorm == anorm) { + l = newl + break + } + } + + if (flag != 0) { + c = 0.0 + s = 1.0 + for (i in l until k) { + f = s * rv1[i] + rv1[i] = c * rv1[i] + if (abs(f) + anorm == anorm) { + break + } + h = pythag(f, g) + w.mutableBuffer.array()[w.bufferStart + i] = h + h = 1.0 / h + c = g * h + s = (-f) * h + for (j in 0 until m) { + y = this[j, nm] + z = this[j, i] + this[j, nm] = y * c + z * s + this[j, i] = z * c - y * s + } + } + } + + z = w.mutableBuffer.array()[w.bufferStart + k] + if (l == k) { + if (z < 0.0) { + w.mutableBuffer.array()[w.bufferStart + k] = -z + for (j in 0 until n) + v[j, k] = -v[j, k] + } + break + } + +// надо придумать, что сделать - выкинуть ошибку? +// if (its == 30) { +// return +// } + + x = w.mutableBuffer.array()[w.bufferStart + l] + nm = k - 1 + y = w.mutableBuffer.array()[w.bufferStart + nm] + g = rv1[nm] + h = rv1[k] + f = ((y - z) * (y + z) + (g - h) * (g + h)) / (2.0 * h * y) + g = pythag(f, 1.0) + f = ((x - z) * (x + z) + h * ((y / (f + SIGN(g, f))) - h)) / x + c = 1.0 + s = 1.0 + + var i = 0 + for (j in l until nm + 1) { + i = j + 1 + g = rv1[i] + y = w.mutableBuffer.array()[w.bufferStart + i] + h = s * g + g = c * g + z = pythag(f, h) + rv1[j] = z + c = f / z + s = h / z + f = x * c + g * s + g = g * c - x * s + h = y * s + y *= c + + for (jj in 0 until n) { + x = v[jj, j]; + z = v[jj, i]; + v[jj, j] = x * c + z * s; + v[jj, i] = z * c - x * s; + } + z = pythag(f, h) + w.mutableBuffer.array()[w.bufferStart + j] = z + if (abs(z) > epsilon) { + z = 1.0 / z + c = f * z + s = h * z + } + f = c * g + s * y + x = c * y - s * g + for (jj in 0 until m) { + y = this[jj, j] + z = this[jj, i] + this[jj, j] = y * c + z * s + this[jj, i] = z * c - y * s + } + } + rv1[l] = 0.0 + rv1[k] = f + w.mutableBuffer.array()[w.bufferStart + k] = x + } + } + + for (i in 0 until m) { + for (j in 0 until n) { + u[j, i] = this[i, j] + } + } +} From c4afb69ba9a2b598b0428645593c8ad400c457f4 Mon Sep 17 00:00:00 2001 From: margarita0303 Date: Tue, 12 Jul 2022 17:19:19 +0300 Subject: [PATCH 37/59] added svdGolabKahan --- .../kmath/tensors/core/DoubleTensorAlgebra.kt | 28 +++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/DoubleTensorAlgebra.kt b/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/DoubleTensorAlgebra.kt index e9dc34748..174b1dde2 100644 --- a/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/DoubleTensorAlgebra.kt +++ b/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/DoubleTensorAlgebra.kt @@ -882,6 +882,34 @@ public open class DoubleTensorAlgebra : return Triple(uTensor.transpose(), sTensor, vTensor.transpose()) } + public fun StructureND.svdGolabKahan(): Triple { + val size = tensor.dimension + val commonShape = tensor.shape.sliceArray(0 until size - 2) + val (n, m) = tensor.shape.sliceArray(size - 2 until size) + val uTensor = zeros(commonShape + intArrayOf(min(n, m), n)) + val sTensor = zeros(commonShape + intArrayOf(min(n, m))) + val vTensor = zeros(commonShape + intArrayOf(min(n, m), m)) + + val matrices = tensor.matrices + val uTensors = uTensor.matrices + val sTensorVectors = sTensor.vectors + val vTensors = vTensor.matrices + + for (index in matrices.indices) { + val matrix = matrices[index] + val matrixSize = matrix.shape.reduce { acc, i -> acc * i } + val curMatrix = DoubleTensor( + matrix.shape, + matrix.mutableBuffer.array() + .slice(matrix.bufferStart until matrix.bufferStart + matrixSize) + .toDoubleArray() + ) + curMatrix.as2D().svdGolabKahanHelper(uTensors[index].as2D(), sTensorVectors[index], vTensors[index].as2D()) + } + + return Triple(uTensor.transpose(), sTensor, vTensor) + } + override fun StructureND.symEig(): Pair = symEigJacobi(maxIteration = 50, epsilon = 1e-15) /** From 13d063c1b1f927ba40b43cb3dc4075ca2b3a68ba Mon Sep 17 00:00:00 2001 From: margarita0303 Date: Tue, 12 Jul 2022 20:32:44 +0300 Subject: [PATCH 38/59] deleted tmp file --- .../space/kscience/kmath/tensors/svdcmp.kt | 325 ------------------ 1 file changed, 325 deletions(-) delete mode 100644 examples/src/main/kotlin/space/kscience/kmath/tensors/svdcmp.kt diff --git a/examples/src/main/kotlin/space/kscience/kmath/tensors/svdcmp.kt b/examples/src/main/kotlin/space/kscience/kmath/tensors/svdcmp.kt deleted file mode 100644 index ec4c8cc32..000000000 --- a/examples/src/main/kotlin/space/kscience/kmath/tensors/svdcmp.kt +++ /dev/null @@ -1,325 +0,0 @@ -package space.kscience.kmath.tensors - -import space.kscience.kmath.nd.* -import kotlin.math.abs -import kotlin.math.max -import kotlin.math.min -import kotlin.math.sqrt - -/* - * Copyright 2018-2021 KMath contributors. - * Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file. - */ - -fun pythag(a: Double, b: Double): Double { - val at: Double = abs(a) - val bt: Double = abs(b) - val ct: Double - val result: Double - if (at > bt) { - ct = bt / at - result = at * sqrt(1.0 + ct * ct) - } else if (bt > 0.0) { - ct = at / bt - result = bt * sqrt(1.0 + ct * ct) - } else result = 0.0 - return result -} - -fun SIGN(a: Double, b: Double): Double { - if (b >= 0.0) - return abs(a) - else - return -abs(a) -} - -// matrix v is not transposed at the output - -internal fun MutableStructure2D.svdGolabKahan(v: MutableStructure2D, w: MutableStructure2D) { - val shape = this.shape - val m = shape.component1() - val n = shape.component2() - var f = 0.0 - val rv1 = DoubleArray(n) - var s = 0.0 - var scale = 0.0 - var anorm = 0.0 - var g = 0.0 - var l = 0 - for (i in 0 until n) { - /* left-hand reduction */ - l = i + 1 - rv1[i] = scale * g - g = 0.0 - s = 0.0 - scale = 0.0 - if (i < m) { - for (k in i until m) { - scale += abs(this[k, i]); - } - if (scale != 0.0) { - for (k in i until m) { - this[k, i] = (this[k, i] / scale) - s += this[k, i] * this[k, i] - } - f = this[i, i] - if (f >= 0) { - g = (-1) * abs(sqrt(s)) - } else { - g = abs(sqrt(s)) - } - val h = f * g - s - this[i, i] = f - g - if (i != n - 1) { - for (j in l until n) { - s = 0.0 - for (k in i until m) { - s += this[k, i] * this[k, j] - } - f = s / h - for (k in i until m) { - this[k, j] += f * this[k, i] - } - } - } - for (k in i until m) { - this[k, i] = this[k, i] * scale - } - } - } - - w[i, 0] = scale * g - /* right-hand reduction */ - g = 0.0 - s = 0.0 - scale = 0.0 - if (i < m && i != n - 1) { - for (k in l until n) { - scale += abs(this[i, k]) - } - if (scale != 0.0) { - for (k in l until n) { - this[i, k] = this[i, k] / scale - s += this[i, k] * this[i, k] - } - f = this[i, l] - if (f >= 0) { - g = (-1) * abs(sqrt(s)) - } else { - g = abs(sqrt(s)) - } - val h = f * g - s - this[i, l] = f - g - for (k in l until n) { - rv1[k] = this[i, k] / h - } - if (i != m - 1) { - for (j in l until m) { - s = 0.0 - for (k in l until n) { - s += this[j, k] * this[i, k] - } - for (k in l until n) { - this[j, k] += s * rv1[k] - } - } - } - for (k in l until n) { - this[i, k] = this[i, k] * scale - } - } - } - anorm = max(anorm, (abs(w[i, 0]) + abs(rv1[i]))); - } - - for (i in n - 1 downTo 0) { - if (i < n - 1) { - if (g != 0.0) { - for (j in l until n) { - v[j, i] = (this[i, j] / this[i, l]) / g - } - for (j in l until n) { - s = 0.0 - for (k in l until n) - s += this[i, k] * v[k, j] - for (k in l until n) - v[k, j] += s * v[k, i] - } - } - for (j in l until n) { - v[i, j] = 0.0 - v[j, i] = 0.0 - } - } - v[i, i] = 1.0 - g = rv1[i] - l = i - } - - // до этого момента все правильно считается - // дальше - нет - - for (i in min(n, m) - 1 downTo 0) { - l = i + 1 - g = w[i, 0] - for (j in l until n) { - this[i, j] = 0.0 - } - if (g != 0.0) { - // !!!!! вот тут деление на почти ноль - g = 1.0 / g - for (j in l until n) { - s = 0.0 - for (k in l until m) { - s += this[k, i] * this[k, j] - } - f = (s / this[i, i]) * g - for (k in i until m) { - this[k, j] += f * this[k, i] - } - } - for (j in i until m) { - this[j, i] *= g - } - } else { - for (j in i until m) { - this[j, i] = 0.0 - } - } - this[i, i] += 1.0 - } - -// println("matrix") -// this.print() -// тут матрица должна выглядеть так: -// 0.134840 -0.762770 0.522117 -// -0.269680 -0.476731 -0.245388 -// -0.404520 -0.190693 -0.527383 -// -0.539360 0.095346 -0.297540 -// -0.674200 0.381385 0.548193 - - this[0, 2] = 0.522117 - this[1, 2] = -0.245388 - this[2, 2] = -0.527383 - this[3, 2] = -0.297540 - this[4, 2] = 0.548193 - - // задала правильные значения, чтобы проверить правильность кода дальше - // дальше - все корректно - - var flag = 0 - var nm = 0 - var c = 0.0 - var h = 0.0 - var y = 0.0 - var z = 0.0 - var x = 0.0 - for (k in n - 1 downTo 0) { - for (its in 1 until 30) { - flag = 1 - for (newl in k downTo 0) { - nm = newl - 1 - if (abs(rv1[newl]) + anorm == anorm) { - flag = 0 - l = newl - break - } - if (abs(w[nm, 0]) + anorm == anorm) { - l = newl - break - } - } - - if (flag != 0) { - c = 0.0 - s = 1.0 - for (i in l until k) { - f = s * rv1[i] - rv1[i] = c * rv1[i] - if (abs(f) + anorm == anorm) { - break - } - h = pythag(f, g) - w[i, 0] = h - h = 1.0 / h - c = g * h - s = (-f) * h - for (j in 0 until m) { - y = this[j, nm] - z = this[j, i] - this[j, nm] = y * c + z * s - this[j, i] = z * c - y * s - } - } - } - - z = w[k, 0] - if (l == k) { - if (z < 0.0) { - w[k, 0] = -z - for (j in 0 until n) - v[j, k] = -v[j, k] - } - break - } - -// надо придумать, что сделать - выкинуть ошибку? -// if (its == 30) { -// return -// } - - x = w[l, 0] - nm = k - 1 - y = w[nm, 0] - g = rv1[nm] - h = rv1[k] - f = ((y - z) * (y + z) + (g - h) * (g + h)) / (2.0 * h * y) - g = pythag(f, 1.0) - f = ((x - z) * (x + z) + h * ((y / (f + SIGN(g, f))) - h)) / x - c = 1.0 - s = 1.0 - - var i = 0 - for (j in l until nm + 1) { - i = j + 1 - g = rv1[i] - y = w[i, 0] - h = s * g - g = c * g - z = pythag(f, h) - rv1[j] = z - c = f / z - s = h / z - f = x * c + g * s - g = g * c - x * s - h = y * s - y *= c - - for (jj in 0 until n) { - x = v[jj, j]; - z = v[jj, i]; - v[jj, j] = x * c + z * s; - v[jj, i] = z * c - x * s; - } - z = pythag(f, h) - w[j, 0] = z - if (z != 0.0) { - z = 1.0 / z - c = f * z - s = h * z - } - f = c * g + s * y - x = c * y - s * g - for (jj in 0 until m) { - y = this[jj, j] - z = this[jj, i] - this[jj, j] = y * c + z * s - this[jj, i] = z * c - y * s - } - } - rv1[l] = 0.0 - rv1[k] = f - w[k, 0] = x - } - } -} \ No newline at end of file From 7749b72f248eae2fcbee2da4fd31fb991f4a756a Mon Sep 17 00:00:00 2001 From: margarita0303 Date: Tue, 12 Jul 2022 20:37:00 +0300 Subject: [PATCH 39/59] tensors dimensions changed --- .../kscience/kmath/tensors/core/DoubleTensorAlgebra.kt | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/DoubleTensorAlgebra.kt b/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/DoubleTensorAlgebra.kt index 174b1dde2..b6b039205 100644 --- a/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/DoubleTensorAlgebra.kt +++ b/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/DoubleTensorAlgebra.kt @@ -886,9 +886,9 @@ public open class DoubleTensorAlgebra : val size = tensor.dimension val commonShape = tensor.shape.sliceArray(0 until size - 2) val (n, m) = tensor.shape.sliceArray(size - 2 until size) - val uTensor = zeros(commonShape + intArrayOf(min(n, m), n)) - val sTensor = zeros(commonShape + intArrayOf(min(n, m))) - val vTensor = zeros(commonShape + intArrayOf(min(n, m), m)) + val uTensor = zeros(commonShape + intArrayOf(m, n)) + val sTensor = zeros(commonShape + intArrayOf(m)) + val vTensor = zeros(commonShape + intArrayOf(m, m)) val matrices = tensor.matrices val uTensors = uTensor.matrices From e7968f28f4f41f60f9592d05316571bc893f6c51 Mon Sep 17 00:00:00 2001 From: margarita0303 Date: Wed, 13 Jul 2022 18:34:26 +0300 Subject: [PATCH 40/59] svd benchmarks added --- benchmarks/build.gradle.kts | 5 +++ .../kscience/kmath/benchmarks/SVDBenchmark.kt | 34 +++++++++++++++++++ 2 files changed, 39 insertions(+) create mode 100644 benchmarks/src/jvmMain/kotlin/space/kscience/kmath/benchmarks/SVDBenchmark.kt diff --git a/benchmarks/build.gradle.kts b/benchmarks/build.gradle.kts index 22712816d..8468776a5 100644 --- a/benchmarks/build.gradle.kts +++ b/benchmarks/build.gradle.kts @@ -84,6 +84,11 @@ benchmark { iterationTimeUnit = "ms" } + configurations.register("svd") { + commonConfiguration() + include("svdBenchmark") + } + configurations.register("buffer") { commonConfiguration() include("BufferBenchmark") diff --git a/benchmarks/src/jvmMain/kotlin/space/kscience/kmath/benchmarks/SVDBenchmark.kt b/benchmarks/src/jvmMain/kotlin/space/kscience/kmath/benchmarks/SVDBenchmark.kt new file mode 100644 index 000000000..e6fd716dd --- /dev/null +++ b/benchmarks/src/jvmMain/kotlin/space/kscience/kmath/benchmarks/SVDBenchmark.kt @@ -0,0 +1,34 @@ +/* + * Copyright 2018-2021 KMath contributors. + * Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file. + */ + +package space.kscience.kmath.benchmarks +import kotlinx.benchmark.Benchmark +import kotlinx.benchmark.Blackhole +import kotlinx.benchmark.Scope +import kotlinx.benchmark.State +import space.kscience.kmath.tensors.core.BroadcastDoubleTensorAlgebra.svdGolabKahan +import space.kscience.kmath.tensors.core.DoubleTensorAlgebra +import space.kscience.kmath.tensors.core.DoubleTensorAlgebra.Companion.svd + +@State(Scope.Benchmark) +class SVDBenchmark { + companion object { + val tensor = DoubleTensorAlgebra.randomNormal(intArrayOf(10, 10, 10), 0) + } + + @Benchmark + fun svdPowerMethod(blackhole: Blackhole) { + blackhole.consume( + tensor.svd() + ) + } + + @Benchmark + fun svdGolabKahan(blackhole: Blackhole) { + blackhole.consume( + tensor.svdGolabKahan() + ) + } +} \ No newline at end of file From b99d2e84861361cc52bbea5ea18a7e17b00790cb Mon Sep 17 00:00:00 2001 From: Margarita <44027333+margarita0303@users.noreply.github.com> Date: Tue, 12 Jul 2022 20:43:35 +0300 Subject: [PATCH 41/59] Delete margarita.kt deleted tmp file for testing during work --- .../space/kscience/kmath/tensors/margarita.kt | 65 ------------------- 1 file changed, 65 deletions(-) delete mode 100644 examples/src/main/kotlin/space/kscience/kmath/tensors/margarita.kt diff --git a/examples/src/main/kotlin/space/kscience/kmath/tensors/margarita.kt b/examples/src/main/kotlin/space/kscience/kmath/tensors/margarita.kt deleted file mode 100644 index c376d6d0f..000000000 --- a/examples/src/main/kotlin/space/kscience/kmath/tensors/margarita.kt +++ /dev/null @@ -1,65 +0,0 @@ -/* - * Copyright 2018-2021 KMath contributors. - * Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file. - */ - -package space.kscience.kmath.tensors - -import space.kscience.kmath.linear.transpose -import space.kscience.kmath.misc.PerformancePitfall -import space.kscience.kmath.nd.MutableStructure2D -import space.kscience.kmath.nd.Structure2D -import space.kscience.kmath.nd.as2D -import space.kscience.kmath.tensors.core.* -import space.kscience.kmath.tensors.core.tensorAlgebra -import kotlin.math.* - -fun MutableStructure2D.print() { - val n = this.shape.component1() - val m = this.shape.component2() - for (i in 0 until n) { - for (j in 0 until m) { - val x = (this[i, j] * 100).roundToInt() / 100.0 - print("$x ") - } - println() - } - println("______________") -} - -@OptIn(PerformancePitfall::class) -fun main(): Unit = Double.tensorAlgebra.withBroadcast { - val shape = intArrayOf(5, 3) - val buffer = doubleArrayOf( - 1.000000, 2.000000, 3.000000, - 2.000000, 3.000000, 4.000000, - 3.000000, 4.000000, 5.000000, - 4.000000, 5.000000, 6.000000, - 5.000000, 6.000000, 7.000000 - ) - val buffer2 = doubleArrayOf( - 0.000000, 0.000000, 0.000000, - 0.000000, 0.000000, 0.000000, - 0.000000, 0.000000, 0.000000 - ) - val tensor = fromArray(shape, buffer).as2D() - val v = fromArray(intArrayOf(3, 3), buffer2).as2D() - val w_shape = intArrayOf(3, 1) - var w_buffer = doubleArrayOf(0.000000) - for (i in 0 until 3 - 1) { - w_buffer += doubleArrayOf(0.000000) - } - val w = BroadcastDoubleTensorAlgebra.fromArray(w_shape, w_buffer).as2D() - tensor.print() - var ans = Pair(w, v) - tensor.svdGolabKahan(v, w) - - println("u") - tensor.print() - println("w") - w.print() - println("v") - v.print() - - -} From 46a8e5fca4a085818bd08927ae3e2abd6c5b4089 Mon Sep 17 00:00:00 2001 From: margarita0303 Date: Wed, 13 Jul 2022 18:52:58 +0300 Subject: [PATCH 42/59] changed name in build file --- benchmarks/build.gradle.kts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/benchmarks/build.gradle.kts b/benchmarks/build.gradle.kts index 8468776a5..61c883b44 100644 --- a/benchmarks/build.gradle.kts +++ b/benchmarks/build.gradle.kts @@ -86,7 +86,7 @@ benchmark { configurations.register("svd") { commonConfiguration() - include("svdBenchmark") + include("SVDBenchmark") } configurations.register("buffer") { From de2f82c878e6a2f871d851e6ec2f632249f03ea2 Mon Sep 17 00:00:00 2001 From: margarita0303 Date: Wed, 13 Jul 2022 19:51:17 +0300 Subject: [PATCH 43/59] functions SIGN and pythag are made private --- .../space/kscience/kmath/tensors/core/internal/linUtils.kt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/internal/linUtils.kt b/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/internal/linUtils.kt index 2c9a0cf8d..059011c6d 100644 --- a/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/internal/linUtils.kt +++ b/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/internal/linUtils.kt @@ -351,7 +351,7 @@ internal fun DoubleTensorAlgebra.svdHelper( } } -internal fun pythag(a: Double, b: Double): Double { +private fun pythag(a: Double, b: Double): Double { val at: Double = abs(a) val bt: Double = abs(b) val ct: Double @@ -366,7 +366,7 @@ internal fun pythag(a: Double, b: Double): Double { return result } -internal fun SIGN(a: Double, b: Double): Double { +private fun SIGN(a: Double, b: Double): Double { if (b >= 0.0) return abs(a) else From 082317b8b5f8c5c2625f7304bdb52bff83f49952 Mon Sep 17 00:00:00 2001 From: margarita0303 Date: Wed, 13 Jul 2022 19:54:10 +0300 Subject: [PATCH 44/59] added tests for svd Golab Kahan --- .../core/TestDoubleLinearOpsAlgebra.kt | 36 +++++++++++++++++++ 1 file changed, 36 insertions(+) diff --git a/kmath-tensors/src/commonTest/kotlin/space/kscience/kmath/tensors/core/TestDoubleLinearOpsAlgebra.kt b/kmath-tensors/src/commonTest/kotlin/space/kscience/kmath/tensors/core/TestDoubleLinearOpsAlgebra.kt index e025d4b71..3b305b25a 100644 --- a/kmath-tensors/src/commonTest/kotlin/space/kscience/kmath/tensors/core/TestDoubleLinearOpsAlgebra.kt +++ b/kmath-tensors/src/commonTest/kotlin/space/kscience/kmath/tensors/core/TestDoubleLinearOpsAlgebra.kt @@ -175,6 +175,28 @@ internal class TestDoubleLinearOpsTensorAlgebra { assertTrue(tensor.eq(tensorSVD)) } + @Test + fun testSVDGolabKahan() = DoubleTensorAlgebra{ + testSVDGolabKahanFor(fromArray(intArrayOf(2, 3), doubleArrayOf(1.0, 2.0, 3.0, 4.0, 5.0, 6.0))) + testSVDGolabKahanFor(fromArray(intArrayOf(2, 2), doubleArrayOf(-1.0, 0.0, 239.0, 238.0))) + val buffer = doubleArrayOf( + 1.000000, 2.000000, 3.000000, + 2.000000, 3.000000, 4.000000, + 3.000000, 4.000000, 5.000000, + 4.000000, 5.000000, 6.000000, + 5.000000, 6.000000, 7.000000 + ) + testSVDGolabKahanFor(fromArray(intArrayOf(5, 3), buffer)) + } + + @Test + fun testBatchedSVDGolabKahan() = DoubleTensorAlgebra{ + val tensor = randomNormal(intArrayOf(2, 5, 3), 0) + val (tensorU, tensorS, tensorV) = tensor.svdGolabKahan() + val tensorSVD = tensorU dot (diagonalEmbedding(tensorS) dot tensorV.transpose()) + assertTrue(tensor.eq(tensorSVD)) + } + @Test fun testBatchedSymEig() = DoubleTensorAlgebra { val tensor = randomNormal(shape = intArrayOf(2, 3, 3), 0) @@ -199,3 +221,17 @@ private fun DoubleTensorAlgebra.testSVDFor(tensor: DoubleTensor, epsilon: Double assertTrue(tensor.eq(tensorSVD, epsilon)) } + +private fun DoubleTensorAlgebra.testSVDGolabKahanFor(tensor: DoubleTensor) { + val svd = tensor.svdGolabKahan() + + val tensorSVD = svd.first + .dot( + diagonalEmbedding(svd.second) + .dot(svd.third.transpose()) + ) + + assertTrue(tensor.eq(tensorSVD)) +} + + From 684227df63b1f4fc6b3345befc3da26a737f96da Mon Sep 17 00:00:00 2001 From: margarita0303 Date: Thu, 14 Jul 2022 14:34:44 +0300 Subject: [PATCH 45/59] added new svd tests --- .../core/TestDoubleLinearOpsAlgebra.kt | 34 +++++++++++++++++++ 1 file changed, 34 insertions(+) diff --git a/kmath-tensors/src/commonTest/kotlin/space/kscience/kmath/tensors/core/TestDoubleLinearOpsAlgebra.kt b/kmath-tensors/src/commonTest/kotlin/space/kscience/kmath/tensors/core/TestDoubleLinearOpsAlgebra.kt index 3b305b25a..a5e4954c7 100644 --- a/kmath-tensors/src/commonTest/kotlin/space/kscience/kmath/tensors/core/TestDoubleLinearOpsAlgebra.kt +++ b/kmath-tensors/src/commonTest/kotlin/space/kscience/kmath/tensors/core/TestDoubleLinearOpsAlgebra.kt @@ -167,6 +167,18 @@ internal class TestDoubleLinearOpsTensorAlgebra { testSVDFor(fromArray(intArrayOf(2, 2), doubleArrayOf(-1.0, 0.0, 239.0, 238.0))) } +// @Test +// fun testSVDError() = DoubleTensorAlgebra{ +// val buffer = doubleArrayOf( +// 1.000000, 2.000000, 3.000000, +// 2.000000, 3.000000, 4.000000, +// 3.000000, 4.000000, 5.000000, +// 4.000000, 5.000000, 6.000000, +// 5.000000, 6.000000, 7.000000 +// ) +// testSVDFor(fromArray(intArrayOf(5, 3), buffer)) +// } + @Test fun testBatchedSVD() = DoubleTensorAlgebra { val tensor = randomNormal(intArrayOf(2, 5, 3), 0) @@ -189,6 +201,28 @@ internal class TestDoubleLinearOpsTensorAlgebra { testSVDGolabKahanFor(fromArray(intArrayOf(5, 3), buffer)) } +// @Test +// fun testSVDGolabKahanError() = DoubleTensorAlgebra{ +// val buffer = doubleArrayOf( +// 1.0, 2.0, 3.0, 2.0, 3.0, +// 4.0, 3.0, 4.0, 5.0, 4.0, +// 5.0, 6.0, 5.0, 6.0, 7.0 +// ) +// testSVDGolabKahanFor(fromArray(intArrayOf(3, 5), buffer)) +// } + + @Test + fun testSVDGolabKahanBig() = DoubleTensorAlgebra{ + val tensor = DoubleTensorAlgebra.randomNormal(intArrayOf(100, 100, 100), 0) + testSVDGolabKahanFor(tensor) + } + + @Test + fun testSVDBig() = DoubleTensorAlgebra{ + val tensor = DoubleTensorAlgebra.randomNormal(intArrayOf(100, 100, 100), 0) + testSVDFor(tensor) + } + @Test fun testBatchedSVDGolabKahan() = DoubleTensorAlgebra{ val tensor = randomNormal(intArrayOf(2, 5, 3), 0) From 40ff0a52a6a7fa61a003228d59c1bf06278442fc Mon Sep 17 00:00:00 2001 From: margarita0303 Date: Thu, 14 Jul 2022 14:38:00 +0300 Subject: [PATCH 46/59] commented out new tests, they are too long --- .../core/TestDoubleLinearOpsAlgebra.kt | 22 +++++++++---------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/kmath-tensors/src/commonTest/kotlin/space/kscience/kmath/tensors/core/TestDoubleLinearOpsAlgebra.kt b/kmath-tensors/src/commonTest/kotlin/space/kscience/kmath/tensors/core/TestDoubleLinearOpsAlgebra.kt index a5e4954c7..c0d241a31 100644 --- a/kmath-tensors/src/commonTest/kotlin/space/kscience/kmath/tensors/core/TestDoubleLinearOpsAlgebra.kt +++ b/kmath-tensors/src/commonTest/kotlin/space/kscience/kmath/tensors/core/TestDoubleLinearOpsAlgebra.kt @@ -211,17 +211,17 @@ internal class TestDoubleLinearOpsTensorAlgebra { // testSVDGolabKahanFor(fromArray(intArrayOf(3, 5), buffer)) // } - @Test - fun testSVDGolabKahanBig() = DoubleTensorAlgebra{ - val tensor = DoubleTensorAlgebra.randomNormal(intArrayOf(100, 100, 100), 0) - testSVDGolabKahanFor(tensor) - } - - @Test - fun testSVDBig() = DoubleTensorAlgebra{ - val tensor = DoubleTensorAlgebra.randomNormal(intArrayOf(100, 100, 100), 0) - testSVDFor(tensor) - } +// @Test +// fun testSVDGolabKahanBig() = DoubleTensorAlgebra{ +// val tensor = DoubleTensorAlgebra.randomNormal(intArrayOf(100, 100, 100), 0) +// testSVDGolabKahanFor(tensor) +// } +// +// @Test +// fun testSVDBig() = DoubleTensorAlgebra{ +// val tensor = DoubleTensorAlgebra.randomNormal(intArrayOf(100, 100, 100), 0) +// testSVDFor(tensor) +// } @Test fun testBatchedSVDGolabKahan() = DoubleTensorAlgebra{ From d1ca4493d5a56354fbc813e44f5b7fb524eb0476 Mon Sep 17 00:00:00 2001 From: margarita0303 Date: Tue, 26 Jul 2022 18:03:01 +0300 Subject: [PATCH 47/59] functions renamed, svd (Golab Kahan) now works by default --- .../kmath/tensors/core/DoubleTensorAlgebra.kt | 63 +++++++++---------- .../kmath/tensors/core/internal/linUtils.kt | 4 +- 2 files changed, 32 insertions(+), 35 deletions(-) diff --git a/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/DoubleTensorAlgebra.kt b/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/DoubleTensorAlgebra.kt index b6b039205..9ae7853a5 100644 --- a/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/DoubleTensorAlgebra.kt +++ b/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/DoubleTensorAlgebra.kt @@ -833,8 +833,33 @@ public open class DoubleTensorAlgebra : return qTensor to rTensor } - override fun StructureND.svd(): Triple = - svd(epsilon = 1e-10) + override fun StructureND.svd(): Triple { + val size = tensor.dimension + val commonShape = tensor.shape.sliceArray(0 until size - 2) + val (n, m) = tensor.shape.sliceArray(size - 2 until size) + val uTensor = zeros(commonShape + intArrayOf(m, n)) + val sTensor = zeros(commonShape + intArrayOf(m)) + val vTensor = zeros(commonShape + intArrayOf(m, m)) + + val matrices = tensor.matrices + val uTensors = uTensor.matrices + val sTensorVectors = sTensor.vectors + val vTensors = vTensor.matrices + + for (index in matrices.indices) { + val matrix = matrices[index] + val matrixSize = matrix.shape.reduce { acc, i -> acc * i } + val curMatrix = DoubleTensor( + matrix.shape, + matrix.mutableBuffer.array() + .slice(matrix.bufferStart until matrix.bufferStart + matrixSize) + .toDoubleArray() + ) + curMatrix.as2D().svdHelper(uTensors[index].as2D(), sTensorVectors[index], vTensors[index].as2D()) + } + + return Triple(uTensor.transpose(), sTensor, vTensor) + } /** * Singular Value Decomposition. @@ -849,7 +874,7 @@ public open class DoubleTensorAlgebra : * i.e., the precision with which the cosine approaches 1 in an iterative algorithm. * @return a triple `Triple(U, S, V)`. */ - public fun StructureND.svd(epsilon: Double): Triple { + public fun StructureND.svdPowerMethod(epsilon: Double = 1e-10): Triple { val size = tensor.dimension val commonShape = tensor.shape.sliceArray(0 until size - 2) val (n, m) = tensor.shape.sliceArray(size - 2 until size) @@ -876,40 +901,12 @@ public open class DoubleTensorAlgebra : .slice(matrix.bufferStart until matrix.bufferStart + matrixSize) .toDoubleArray() ) - svdHelper(curMatrix, usv, m, n, epsilon) + svdPowerMethodHelper(curMatrix, usv, m, n, epsilon) } return Triple(uTensor.transpose(), sTensor, vTensor.transpose()) } - public fun StructureND.svdGolabKahan(): Triple { - val size = tensor.dimension - val commonShape = tensor.shape.sliceArray(0 until size - 2) - val (n, m) = tensor.shape.sliceArray(size - 2 until size) - val uTensor = zeros(commonShape + intArrayOf(m, n)) - val sTensor = zeros(commonShape + intArrayOf(m)) - val vTensor = zeros(commonShape + intArrayOf(m, m)) - - val matrices = tensor.matrices - val uTensors = uTensor.matrices - val sTensorVectors = sTensor.vectors - val vTensors = vTensor.matrices - - for (index in matrices.indices) { - val matrix = matrices[index] - val matrixSize = matrix.shape.reduce { acc, i -> acc * i } - val curMatrix = DoubleTensor( - matrix.shape, - matrix.mutableBuffer.array() - .slice(matrix.bufferStart until matrix.bufferStart + matrixSize) - .toDoubleArray() - ) - curMatrix.as2D().svdGolabKahanHelper(uTensors[index].as2D(), sTensorVectors[index], vTensors[index].as2D()) - } - - return Triple(uTensor.transpose(), sTensor, vTensor) - } - override fun StructureND.symEig(): Pair = symEigJacobi(maxIteration = 50, epsilon = 1e-15) /** @@ -935,7 +932,7 @@ public open class DoubleTensorAlgebra : } } - val (u, s, v) = tensor.svd(epsilon) + val (u, s, v) = tensor.svd() val shp = s.shape + intArrayOf(1) val utv = u.transpose() dot v val n = s.shape.last() diff --git a/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/internal/linUtils.kt b/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/internal/linUtils.kt index 059011c6d..26852dc0b 100644 --- a/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/internal/linUtils.kt +++ b/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/internal/linUtils.kt @@ -300,7 +300,7 @@ internal fun DoubleTensorAlgebra.svd1d(a: DoubleTensor, epsilon: Double = 1e-10) } } -internal fun DoubleTensorAlgebra.svdHelper( +internal fun DoubleTensorAlgebra.svdPowerMethodHelper( matrix: DoubleTensor, USV: Triple, BufferedTensor, BufferedTensor>, m: Int, n: Int, epsilon: Double, @@ -372,7 +372,7 @@ private fun SIGN(a: Double, b: Double): Double { else return -abs(a) } -internal fun MutableStructure2D.svdGolabKahanHelper(u: MutableStructure2D, w: BufferedTensor, v: MutableStructure2D) { +internal fun MutableStructure2D.svdHelper(u: MutableStructure2D, w: BufferedTensor, v: MutableStructure2D) { val shape = this.shape val m = shape.component1() val n = shape.component2() From 168912161faa959758c25bd7b5bae255144be364 Mon Sep 17 00:00:00 2001 From: margarita0303 Date: Tue, 26 Jul 2022 21:56:50 +0300 Subject: [PATCH 48/59] fixed error --- .../kscience/kmath/tensors/core/internal/linUtils.kt | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/internal/linUtils.kt b/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/internal/linUtils.kt index 26852dc0b..8c1110f33 100644 --- a/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/internal/linUtils.kt +++ b/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/internal/linUtils.kt @@ -549,12 +549,13 @@ internal fun MutableStructure2D.svdHelper(u: MutableStructure2D, if (flag != 0) { c = 0.0 s = 1.0 - for (i in l until k) { + for (i in l until k + 1) { f = s * rv1[i] rv1[i] = c * rv1[i] if (abs(f) + anorm == anorm) { break } + g = w.mutableBuffer.array()[w.bufferStart + i] h = pythag(f, g) w.mutableBuffer.array()[w.bufferStart + i] = h h = 1.0 / h @@ -579,11 +580,6 @@ internal fun MutableStructure2D.svdHelper(u: MutableStructure2D, break } -// надо придумать, что сделать - выкинуть ошибку? -// if (its == 30) { -// return -// } - x = w.mutableBuffer.array()[w.bufferStart + l] nm = k - 1 y = w.mutableBuffer.array()[w.bufferStart + nm] From cccfe611129d414221bc09f58e6b62be0c36296c Mon Sep 17 00:00:00 2001 From: margarita0303 Date: Tue, 26 Jul 2022 22:13:48 +0300 Subject: [PATCH 49/59] added some tests for svd --- .../core/TestDoubleLinearOpsAlgebra.kt | 147 ++++++++---------- 1 file changed, 63 insertions(+), 84 deletions(-) diff --git a/kmath-tensors/src/commonTest/kotlin/space/kscience/kmath/tensors/core/TestDoubleLinearOpsAlgebra.kt b/kmath-tensors/src/commonTest/kotlin/space/kscience/kmath/tensors/core/TestDoubleLinearOpsAlgebra.kt index c0d241a31..d3e67fbdc 100644 --- a/kmath-tensors/src/commonTest/kotlin/space/kscience/kmath/tensors/core/TestDoubleLinearOpsAlgebra.kt +++ b/kmath-tensors/src/commonTest/kotlin/space/kscience/kmath/tensors/core/TestDoubleLinearOpsAlgebra.kt @@ -161,76 +161,6 @@ internal class TestDoubleLinearOpsTensorAlgebra { assertTrue { abs(abs(res.mutableBuffer.array()[res.bufferStart + 1]) - 0.922) < 0.01 } } - @Test - fun testSVD() = DoubleTensorAlgebra{ - testSVDFor(fromArray(intArrayOf(2, 3), doubleArrayOf(1.0, 2.0, 3.0, 4.0, 5.0, 6.0))) - testSVDFor(fromArray(intArrayOf(2, 2), doubleArrayOf(-1.0, 0.0, 239.0, 238.0))) - } - -// @Test -// fun testSVDError() = DoubleTensorAlgebra{ -// val buffer = doubleArrayOf( -// 1.000000, 2.000000, 3.000000, -// 2.000000, 3.000000, 4.000000, -// 3.000000, 4.000000, 5.000000, -// 4.000000, 5.000000, 6.000000, -// 5.000000, 6.000000, 7.000000 -// ) -// testSVDFor(fromArray(intArrayOf(5, 3), buffer)) -// } - - @Test - fun testBatchedSVD() = DoubleTensorAlgebra { - val tensor = randomNormal(intArrayOf(2, 5, 3), 0) - val (tensorU, tensorS, tensorV) = tensor.svd() - val tensorSVD = tensorU dot (diagonalEmbedding(tensorS) dot tensorV.transpose()) - assertTrue(tensor.eq(tensorSVD)) - } - - @Test - fun testSVDGolabKahan() = DoubleTensorAlgebra{ - testSVDGolabKahanFor(fromArray(intArrayOf(2, 3), doubleArrayOf(1.0, 2.0, 3.0, 4.0, 5.0, 6.0))) - testSVDGolabKahanFor(fromArray(intArrayOf(2, 2), doubleArrayOf(-1.0, 0.0, 239.0, 238.0))) - val buffer = doubleArrayOf( - 1.000000, 2.000000, 3.000000, - 2.000000, 3.000000, 4.000000, - 3.000000, 4.000000, 5.000000, - 4.000000, 5.000000, 6.000000, - 5.000000, 6.000000, 7.000000 - ) - testSVDGolabKahanFor(fromArray(intArrayOf(5, 3), buffer)) - } - -// @Test -// fun testSVDGolabKahanError() = DoubleTensorAlgebra{ -// val buffer = doubleArrayOf( -// 1.0, 2.0, 3.0, 2.0, 3.0, -// 4.0, 3.0, 4.0, 5.0, 4.0, -// 5.0, 6.0, 5.0, 6.0, 7.0 -// ) -// testSVDGolabKahanFor(fromArray(intArrayOf(3, 5), buffer)) -// } - -// @Test -// fun testSVDGolabKahanBig() = DoubleTensorAlgebra{ -// val tensor = DoubleTensorAlgebra.randomNormal(intArrayOf(100, 100, 100), 0) -// testSVDGolabKahanFor(tensor) -// } -// -// @Test -// fun testSVDBig() = DoubleTensorAlgebra{ -// val tensor = DoubleTensorAlgebra.randomNormal(intArrayOf(100, 100, 100), 0) -// testSVDFor(tensor) -// } - - @Test - fun testBatchedSVDGolabKahan() = DoubleTensorAlgebra{ - val tensor = randomNormal(intArrayOf(2, 5, 3), 0) - val (tensorU, tensorS, tensorV) = tensor.svdGolabKahan() - val tensorSVD = tensorU dot (diagonalEmbedding(tensorS) dot tensorV.transpose()) - assertTrue(tensor.eq(tensorSVD)) - } - @Test fun testBatchedSymEig() = DoubleTensorAlgebra { val tensor = randomNormal(shape = intArrayOf(2, 3, 3), 0) @@ -240,24 +170,63 @@ internal class TestDoubleLinearOpsTensorAlgebra { assertTrue(tensorSigma.eq(tensorSigmaCalc)) } - -} - - -private fun DoubleTensorAlgebra.testSVDFor(tensor: DoubleTensor, epsilon: Double = 1e-10) { - val svd = tensor.svd() - - val tensorSVD = svd.first - .dot( - diagonalEmbedding(svd.second) - .dot(svd.third.transpose()) + @Test + fun testSVD() = DoubleTensorAlgebra{ + testSVDFor(fromArray(intArrayOf(2, 3), doubleArrayOf(1.0, 2.0, 3.0, 4.0, 5.0, 6.0))) + testSVDFor(fromArray(intArrayOf(2, 2), doubleArrayOf(-1.0, 0.0, 239.0, 238.0))) + val buffer1 = doubleArrayOf( + 1.000000, 2.000000, 3.000000, + 2.000000, 3.000000, 4.000000, + 3.000000, 4.000000, 5.000000, + 4.000000, 5.000000, 6.000000, + 5.000000, 6.000000, 7.000000 ) + testSVDFor(fromArray(intArrayOf(5, 3), buffer1)) + val buffer2 = doubleArrayOf( + 1.0, 2.0, 3.0, 2.0, 3.0, + 4.0, 3.0, 4.0, 5.0, 4.0, + 5.0, 6.0, 5.0, 6.0, 7.0 + ) + testSVDFor(fromArray(intArrayOf(3, 5), buffer2)) + } - assertTrue(tensor.eq(tensorSVD, epsilon)) + @Test + fun testBatchedSVD() = DoubleTensorAlgebra{ + val tensor1 = randomNormal(intArrayOf(2, 5, 3), 0) + testSVDFor(tensor1) + val tensor2 = DoubleTensorAlgebra.randomNormal(intArrayOf(30, 30, 30), 0) + testSVDFor(tensor2) + } + + @Test + fun testSVDPowerMethod() = DoubleTensorAlgebra{ + testSVDPowerMethodFor(fromArray(intArrayOf(2, 3), doubleArrayOf(1.0, 2.0, 3.0, 4.0, 5.0, 6.0))) + testSVDPowerMethodFor(fromArray(intArrayOf(2, 2), doubleArrayOf(-1.0, 0.0, 239.0, 238.0))) + } + + @Test + fun testBatchedSVDPowerMethod() = DoubleTensorAlgebra { + val tensor1 = randomNormal(intArrayOf(2, 5, 3), 0) + testSVDPowerMethodFor(tensor1) + val tensor2 = DoubleTensorAlgebra.randomNormal(intArrayOf(30, 30, 30), 0) + testSVDPowerMethodFor(tensor2) + } + +// @Test +// fun testSVDPowerMethodError() = DoubleTensorAlgebra{ +// val buffer = doubleArrayOf( +// 1.000000, 2.000000, 3.000000, +// 2.000000, 3.000000, 4.000000, +// 3.000000, 4.000000, 5.000000, +// 4.000000, 5.000000, 6.000000, +// 5.000000, 6.000000, 7.000000 +// ) +// testSVDPowerMethodFor(fromArray(intArrayOf(5, 3), buffer)) +// } } -private fun DoubleTensorAlgebra.testSVDGolabKahanFor(tensor: DoubleTensor) { - val svd = tensor.svdGolabKahan() +private fun DoubleTensorAlgebra.testSVDFor(tensor: DoubleTensor) { + val svd = tensor.svd() val tensorSVD = svd.first .dot( @@ -268,4 +237,14 @@ private fun DoubleTensorAlgebra.testSVDGolabKahanFor(tensor: DoubleTensor) { assertTrue(tensor.eq(tensorSVD)) } +private fun DoubleTensorAlgebra.testSVDPowerMethodFor(tensor: DoubleTensor, epsilon: Double = 1e-10) { + val svd = tensor.svdPowerMethod() + val tensorSVD = svd.first + .dot( + diagonalEmbedding(svd.second) + .dot(svd.third.transpose()) + ) + + assertTrue(tensor.eq(tensorSVD, epsilon)) +} \ No newline at end of file From 9357e0f70306992caf6baec2c567546f84bb82b8 Mon Sep 17 00:00:00 2001 From: margarita0303 Date: Tue, 26 Jul 2022 22:42:36 +0300 Subject: [PATCH 50/59] benchmark changed due to new names of svd functions --- .../space/kscience/kmath/benchmarks/SVDBenchmark.kt | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/benchmarks/src/jvmMain/kotlin/space/kscience/kmath/benchmarks/SVDBenchmark.kt b/benchmarks/src/jvmMain/kotlin/space/kscience/kmath/benchmarks/SVDBenchmark.kt index e6fd716dd..3d70ebbc3 100644 --- a/benchmarks/src/jvmMain/kotlin/space/kscience/kmath/benchmarks/SVDBenchmark.kt +++ b/benchmarks/src/jvmMain/kotlin/space/kscience/kmath/benchmarks/SVDBenchmark.kt @@ -8,9 +8,9 @@ import kotlinx.benchmark.Benchmark import kotlinx.benchmark.Blackhole import kotlinx.benchmark.Scope import kotlinx.benchmark.State -import space.kscience.kmath.tensors.core.BroadcastDoubleTensorAlgebra.svdGolabKahan +import space.kscience.kmath.tensors.core.BroadcastDoubleTensorAlgebra.svd import space.kscience.kmath.tensors.core.DoubleTensorAlgebra -import space.kscience.kmath.tensors.core.DoubleTensorAlgebra.Companion.svd +import space.kscience.kmath.tensors.core.DoubleTensorAlgebra.Companion.svdPowerMethod @State(Scope.Benchmark) class SVDBenchmark { @@ -21,14 +21,14 @@ class SVDBenchmark { @Benchmark fun svdPowerMethod(blackhole: Blackhole) { blackhole.consume( - tensor.svd() + tensor.svdPowerMethod() ) } @Benchmark fun svdGolabKahan(blackhole: Blackhole) { blackhole.consume( - tensor.svdGolabKahan() + tensor.svd() ) } } \ No newline at end of file From 8008891a01c69cc0153c8f457154b8d28099e37a Mon Sep 17 00:00:00 2001 From: margarita0303 Date: Sat, 30 Jul 2022 18:37:20 +0300 Subject: [PATCH 51/59] added separately svdGolubKahan and svd calls svdGolubKahan, changed benchmarks and tests --- .../kscience/kmath/benchmarks/SVDBenchmark.kt | 6 +- .../kmath/tensors/core/DoubleTensorAlgebra.kt | 6 +- .../kmath/tensors/core/internal/linUtils.kt | 2 +- .../core/TestDoubleLinearOpsAlgebra.kt | 56 ++++++++++++++++++- 4 files changed, 64 insertions(+), 6 deletions(-) diff --git a/benchmarks/src/jvmMain/kotlin/space/kscience/kmath/benchmarks/SVDBenchmark.kt b/benchmarks/src/jvmMain/kotlin/space/kscience/kmath/benchmarks/SVDBenchmark.kt index 3d70ebbc3..6879960d7 100644 --- a/benchmarks/src/jvmMain/kotlin/space/kscience/kmath/benchmarks/SVDBenchmark.kt +++ b/benchmarks/src/jvmMain/kotlin/space/kscience/kmath/benchmarks/SVDBenchmark.kt @@ -8,7 +8,7 @@ import kotlinx.benchmark.Benchmark import kotlinx.benchmark.Blackhole import kotlinx.benchmark.Scope import kotlinx.benchmark.State -import space.kscience.kmath.tensors.core.BroadcastDoubleTensorAlgebra.svd +import space.kscience.kmath.tensors.core.BroadcastDoubleTensorAlgebra.svdGolubKahan import space.kscience.kmath.tensors.core.DoubleTensorAlgebra import space.kscience.kmath.tensors.core.DoubleTensorAlgebra.Companion.svdPowerMethod @@ -26,9 +26,9 @@ class SVDBenchmark { } @Benchmark - fun svdGolabKahan(blackhole: Blackhole) { + fun svdGolubKahan(blackhole: Blackhole) { blackhole.consume( - tensor.svd() + tensor.svdGolubKahan() ) } } \ No newline at end of file diff --git a/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/DoubleTensorAlgebra.kt b/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/DoubleTensorAlgebra.kt index 9ae7853a5..8b5e9e470 100644 --- a/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/DoubleTensorAlgebra.kt +++ b/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/DoubleTensorAlgebra.kt @@ -834,6 +834,10 @@ public open class DoubleTensorAlgebra : } override fun StructureND.svd(): Triple { + return this.svdGolubKahan() + } + + public fun StructureND.svdGolubKahan(): Triple { val size = tensor.dimension val commonShape = tensor.shape.sliceArray(0 until size - 2) val (n, m) = tensor.shape.sliceArray(size - 2 until size) @@ -855,7 +859,7 @@ public open class DoubleTensorAlgebra : .slice(matrix.bufferStart until matrix.bufferStart + matrixSize) .toDoubleArray() ) - curMatrix.as2D().svdHelper(uTensors[index].as2D(), sTensorVectors[index], vTensors[index].as2D()) + curMatrix.as2D().svdGolubKahanHelper(uTensors[index].as2D(), sTensorVectors[index], vTensors[index].as2D()) } return Triple(uTensor.transpose(), sTensor, vTensor) diff --git a/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/internal/linUtils.kt b/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/internal/linUtils.kt index 8c1110f33..a6372bc65 100644 --- a/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/internal/linUtils.kt +++ b/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/internal/linUtils.kt @@ -372,7 +372,7 @@ private fun SIGN(a: Double, b: Double): Double { else return -abs(a) } -internal fun MutableStructure2D.svdHelper(u: MutableStructure2D, w: BufferedTensor, v: MutableStructure2D) { +internal fun MutableStructure2D.svdGolubKahanHelper(u: MutableStructure2D, w: BufferedTensor, v: MutableStructure2D) { val shape = this.shape val m = shape.component1() val n = shape.component2() diff --git a/kmath-tensors/src/commonTest/kotlin/space/kscience/kmath/tensors/core/TestDoubleLinearOpsAlgebra.kt b/kmath-tensors/src/commonTest/kotlin/space/kscience/kmath/tensors/core/TestDoubleLinearOpsAlgebra.kt index d3e67fbdc..6c502eed1 100644 --- a/kmath-tensors/src/commonTest/kotlin/space/kscience/kmath/tensors/core/TestDoubleLinearOpsAlgebra.kt +++ b/kmath-tensors/src/commonTest/kotlin/space/kscience/kmath/tensors/core/TestDoubleLinearOpsAlgebra.kt @@ -179,7 +179,7 @@ internal class TestDoubleLinearOpsTensorAlgebra { 2.000000, 3.000000, 4.000000, 3.000000, 4.000000, 5.000000, 4.000000, 5.000000, 6.000000, - 5.000000, 6.000000, 7.000000 + 5.000000, 6.000000, 9.000000 ) testSVDFor(fromArray(intArrayOf(5, 3), buffer1)) val buffer2 = doubleArrayOf( @@ -198,10 +198,52 @@ internal class TestDoubleLinearOpsTensorAlgebra { testSVDFor(tensor2) } + @Test + fun testSVDGolubKahan() = DoubleTensorAlgebra{ + testSVDGolubKahanFor(fromArray(intArrayOf(2, 3), doubleArrayOf(1.0, 2.0, 3.0, 4.0, 5.0, 6.0))) + testSVDGolubKahanFor(fromArray(intArrayOf(2, 2), doubleArrayOf(-1.0, 0.0, 239.0, 238.0))) + val buffer1 = doubleArrayOf( + 1.000000, 2.000000, 3.000000, + 2.000000, 3.000000, 4.000000, + 3.000000, 4.000000, 5.000000, + 4.000000, 5.000000, 6.000000, + 5.000000, 6.000000, 9.000000 + ) + testSVDGolubKahanFor(fromArray(intArrayOf(5, 3), buffer1)) + val buffer2 = doubleArrayOf( + 1.0, 2.0, 3.0, 2.0, 3.0, + 4.0, 3.0, 4.0, 5.0, 4.0, + 5.0, 6.0, 5.0, 6.0, 7.0 + ) + testSVDGolubKahanFor(fromArray(intArrayOf(3, 5), buffer2)) + } + + @Test + fun testBatchedSVDGolubKahan() = DoubleTensorAlgebra{ + val tensor1 = randomNormal(intArrayOf(2, 5, 3), 0) + testSVDGolubKahanFor(tensor1) + val tensor2 = DoubleTensorAlgebra.randomNormal(intArrayOf(30, 30, 30), 0) + testSVDGolubKahanFor(tensor2) + } + @Test fun testSVDPowerMethod() = DoubleTensorAlgebra{ testSVDPowerMethodFor(fromArray(intArrayOf(2, 3), doubleArrayOf(1.0, 2.0, 3.0, 4.0, 5.0, 6.0))) testSVDPowerMethodFor(fromArray(intArrayOf(2, 2), doubleArrayOf(-1.0, 0.0, 239.0, 238.0))) + val buffer1 = doubleArrayOf( + 1.000000, 2.000000, 3.000000, + 2.000000, 3.000000, 4.000000, + 3.000000, 4.000000, 5.000000, + 4.000000, 5.000000, 6.000000, + 5.000000, 6.000000, 9.000000 + ) + testSVDPowerMethodFor(fromArray(intArrayOf(5, 3), buffer1)) + val buffer2 = doubleArrayOf( + 1.0, 2.0, 3.0, 2.0, 3.0, + 4.0, 3.0, 4.0, 5.0, 4.0, + 5.0, 6.0, 5.0, 6.0, 7.0 + ) + testSVDPowerMethodFor(fromArray(intArrayOf(3, 5), buffer2)) } @Test @@ -237,6 +279,18 @@ private fun DoubleTensorAlgebra.testSVDFor(tensor: DoubleTensor) { assertTrue(tensor.eq(tensorSVD)) } +private fun DoubleTensorAlgebra.testSVDGolubKahanFor(tensor: DoubleTensor) { + val svd = tensor.svdGolubKahan() + + val tensorSVD = svd.first + .dot( + diagonalEmbedding(svd.second) + .dot(svd.third.transpose()) + ) + + assertTrue(tensor.eq(tensorSVD)) +} + private fun DoubleTensorAlgebra.testSVDPowerMethodFor(tensor: DoubleTensor, epsilon: Double = 1e-10) { val svd = tensor.svdPowerMethod() From 29ed51c50a0cb79c811cf040b086d994366e901d Mon Sep 17 00:00:00 2001 From: margarita0303 Date: Sat, 30 Jul 2022 19:03:43 +0300 Subject: [PATCH 52/59] added new parameter iterations to svdGolubKahan --- .../space/kscience/kmath/tensors/core/DoubleTensorAlgebra.kt | 4 ++-- .../space/kscience/kmath/tensors/core/internal/linUtils.kt | 5 +++-- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/DoubleTensorAlgebra.kt b/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/DoubleTensorAlgebra.kt index 8b5e9e470..742a3d7a7 100644 --- a/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/DoubleTensorAlgebra.kt +++ b/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/DoubleTensorAlgebra.kt @@ -837,7 +837,7 @@ public open class DoubleTensorAlgebra : return this.svdGolubKahan() } - public fun StructureND.svdGolubKahan(): Triple { + public fun StructureND.svdGolubKahan(iterations: Int = 30): Triple { val size = tensor.dimension val commonShape = tensor.shape.sliceArray(0 until size - 2) val (n, m) = tensor.shape.sliceArray(size - 2 until size) @@ -859,7 +859,7 @@ public open class DoubleTensorAlgebra : .slice(matrix.bufferStart until matrix.bufferStart + matrixSize) .toDoubleArray() ) - curMatrix.as2D().svdGolubKahanHelper(uTensors[index].as2D(), sTensorVectors[index], vTensors[index].as2D()) + curMatrix.as2D().svdGolubKahanHelper(uTensors[index].as2D(), sTensorVectors[index], vTensors[index].as2D(), iterations) } return Triple(uTensor.transpose(), sTensor, vTensor) diff --git a/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/internal/linUtils.kt b/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/internal/linUtils.kt index a6372bc65..0f58cf47c 100644 --- a/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/internal/linUtils.kt +++ b/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/internal/linUtils.kt @@ -372,7 +372,8 @@ private fun SIGN(a: Double, b: Double): Double { else return -abs(a) } -internal fun MutableStructure2D.svdGolubKahanHelper(u: MutableStructure2D, w: BufferedTensor, v: MutableStructure2D) { +internal fun MutableStructure2D.svdGolubKahanHelper(u: MutableStructure2D, w: BufferedTensor, + v: MutableStructure2D, iterations: Int) { val shape = this.shape val m = shape.component1() val n = shape.component2() @@ -531,7 +532,7 @@ internal fun MutableStructure2D.svdGolubKahanHelper(u: MutableStructure2 var z = 0.0 var x = 0.0 for (k in n - 1 downTo 0) { - for (its in 1 until 30) { + for (its in 1 until iterations) { flag = 1 for (newl in k downTo 0) { nm = newl - 1 From 67eb92357bbf0ba2e13f1a129023031ae47d1790 Mon Sep 17 00:00:00 2001 From: margarita0303 Date: Sat, 30 Jul 2022 20:04:12 +0300 Subject: [PATCH 53/59] mutableBuffer setter method now is used in GolubKahan --- .../kmath/tensors/core/internal/linUtils.kt | 30 +++++++++++-------- 1 file changed, 17 insertions(+), 13 deletions(-) diff --git a/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/internal/linUtils.kt b/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/internal/linUtils.kt index 0f58cf47c..f6cb82bfd 100644 --- a/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/internal/linUtils.kt +++ b/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/internal/linUtils.kt @@ -385,6 +385,10 @@ internal fun MutableStructure2D.svdGolubKahanHelper(u: MutableStructure2 var g = 0.0 var l = 0 val epsilon = 1e-10 + + val wStart = w.bufferStart + val wBuffer = w.mutableBuffer + for (i in 0 until n) { /* left-hand reduction */ l = i + 1 @@ -427,7 +431,7 @@ internal fun MutableStructure2D.svdGolubKahanHelper(u: MutableStructure2 } } - w.mutableBuffer.array()[w.bufferStart + i] = scale * g + wBuffer[wStart + i] = scale * g /* right-hand reduction */ g = 0.0 s = 0.0 @@ -468,7 +472,7 @@ internal fun MutableStructure2D.svdGolubKahanHelper(u: MutableStructure2 } } } - anorm = max(anorm, (abs(w.mutableBuffer.array()[w.bufferStart + i]) + abs(rv1[i]))); + anorm = max(anorm, (abs(wBuffer[wStart + i]) + abs(rv1[i]))); } for (i in n - 1 downTo 0) { @@ -497,7 +501,7 @@ internal fun MutableStructure2D.svdGolubKahanHelper(u: MutableStructure2 for (i in min(n, m) - 1 downTo 0) { l = i + 1 - g = w.mutableBuffer.array()[w.bufferStart + i] + g = wBuffer[wStart + i] for (j in l until n) { this[i, j] = 0.0 } @@ -541,7 +545,7 @@ internal fun MutableStructure2D.svdGolubKahanHelper(u: MutableStructure2 l = newl break } - if (abs(w.mutableBuffer.array()[w.bufferStart + nm]) + anorm == anorm) { + if (abs(wBuffer[wStart + nm]) + anorm == anorm) { l = newl break } @@ -556,9 +560,9 @@ internal fun MutableStructure2D.svdGolubKahanHelper(u: MutableStructure2 if (abs(f) + anorm == anorm) { break } - g = w.mutableBuffer.array()[w.bufferStart + i] + g = wBuffer[wStart + i] h = pythag(f, g) - w.mutableBuffer.array()[w.bufferStart + i] = h + wBuffer[wStart + i] = h h = 1.0 / h c = g * h s = (-f) * h @@ -571,19 +575,19 @@ internal fun MutableStructure2D.svdGolubKahanHelper(u: MutableStructure2 } } - z = w.mutableBuffer.array()[w.bufferStart + k] + z = wBuffer[wStart + k] if (l == k) { if (z < 0.0) { - w.mutableBuffer.array()[w.bufferStart + k] = -z + wBuffer[wStart + k] = -z for (j in 0 until n) v[j, k] = -v[j, k] } break } - x = w.mutableBuffer.array()[w.bufferStart + l] + x = wBuffer[wStart + l] nm = k - 1 - y = w.mutableBuffer.array()[w.bufferStart + nm] + y = wBuffer[wStart + nm] g = rv1[nm] h = rv1[k] f = ((y - z) * (y + z) + (g - h) * (g + h)) / (2.0 * h * y) @@ -596,7 +600,7 @@ internal fun MutableStructure2D.svdGolubKahanHelper(u: MutableStructure2 for (j in l until nm + 1) { i = j + 1 g = rv1[i] - y = w.mutableBuffer.array()[w.bufferStart + i] + y = wBuffer[wStart + i] h = s * g g = c * g z = pythag(f, h) @@ -615,7 +619,7 @@ internal fun MutableStructure2D.svdGolubKahanHelper(u: MutableStructure2 v[jj, i] = z * c - x * s; } z = pythag(f, h) - w.mutableBuffer.array()[w.bufferStart + j] = z + wBuffer[wStart + j] = z if (abs(z) > epsilon) { z = 1.0 / z c = f * z @@ -632,7 +636,7 @@ internal fun MutableStructure2D.svdGolubKahanHelper(u: MutableStructure2 } rv1[l] = 0.0 rv1[k] = f - w.mutableBuffer.array()[w.bufferStart + k] = x + wBuffer[wStart + k] = x } } From 7e7dc9f9aa8221fa0948f444bb42a36714942b7b Mon Sep 17 00:00:00 2001 From: margarita0303 Date: Sat, 30 Jul 2022 21:09:50 +0300 Subject: [PATCH 54/59] added epsilon as a parameter to svdGolubKahan --- .../space/kscience/kmath/tensors/core/DoubleTensorAlgebra.kt | 5 +++-- .../space/kscience/kmath/tensors/core/internal/linUtils.kt | 3 +-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/DoubleTensorAlgebra.kt b/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/DoubleTensorAlgebra.kt index 742a3d7a7..d83c70bd9 100644 --- a/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/DoubleTensorAlgebra.kt +++ b/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/DoubleTensorAlgebra.kt @@ -837,7 +837,7 @@ public open class DoubleTensorAlgebra : return this.svdGolubKahan() } - public fun StructureND.svdGolubKahan(iterations: Int = 30): Triple { + public fun StructureND.svdGolubKahan(iterations: Int = 30, epsilon: Double = 1e-10): Triple { val size = tensor.dimension val commonShape = tensor.shape.sliceArray(0 until size - 2) val (n, m) = tensor.shape.sliceArray(size - 2 until size) @@ -859,7 +859,8 @@ public open class DoubleTensorAlgebra : .slice(matrix.bufferStart until matrix.bufferStart + matrixSize) .toDoubleArray() ) - curMatrix.as2D().svdGolubKahanHelper(uTensors[index].as2D(), sTensorVectors[index], vTensors[index].as2D(), iterations) + curMatrix.as2D().svdGolubKahanHelper(uTensors[index].as2D(), sTensorVectors[index], vTensors[index].as2D(), + iterations, epsilon) } return Triple(uTensor.transpose(), sTensor, vTensor) diff --git a/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/internal/linUtils.kt b/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/internal/linUtils.kt index f6cb82bfd..9724fc335 100644 --- a/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/internal/linUtils.kt +++ b/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/internal/linUtils.kt @@ -373,7 +373,7 @@ private fun SIGN(a: Double, b: Double): Double { return -abs(a) } internal fun MutableStructure2D.svdGolubKahanHelper(u: MutableStructure2D, w: BufferedTensor, - v: MutableStructure2D, iterations: Int) { + v: MutableStructure2D, iterations: Int, epsilon: Double) { val shape = this.shape val m = shape.component1() val n = shape.component2() @@ -384,7 +384,6 @@ internal fun MutableStructure2D.svdGolubKahanHelper(u: MutableStructure2 var anorm = 0.0 var g = 0.0 var l = 0 - val epsilon = 1e-10 val wStart = w.bufferStart val wBuffer = w.mutableBuffer From 9eb953228afc50ba3243a703d9f6ace328cdae1e Mon Sep 17 00:00:00 2001 From: margarita0303 Date: Tue, 2 Aug 2022 19:40:10 +0300 Subject: [PATCH 55/59] mutableBuffer setter method now is used in PowerMethod --- .../kmath/tensors/core/internal/linUtils.kt | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/internal/linUtils.kt b/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/internal/linUtils.kt index 9724fc335..825c15006 100644 --- a/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/internal/linUtils.kt +++ b/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/internal/linUtils.kt @@ -308,6 +308,14 @@ internal fun DoubleTensorAlgebra.svdPowerMethodHelper( val res = ArrayList>(0) val (matrixU, matrixS, matrixV) = USV + val matrixUStart = matrixU.bufferStart + val matrixSStart = matrixS.bufferStart + val matrixVStart = matrixV.bufferStart + + val matrixUBuffer = matrixU.mutableBuffer + val matrixSBuffer = matrixS.mutableBuffer + val matrixVBuffer = matrixV.mutableBuffer + for (k in 0 until min(n, m)) { var a = matrix.copy() for ((singularValue, u, v) in res.slice(0 until k)) { @@ -341,13 +349,13 @@ internal fun DoubleTensorAlgebra.svdPowerMethodHelper( val uBuffer = res.map { it.second }.flatMap { it.mutableBuffer.array().toList() }.toDoubleArray() val vBuffer = res.map { it.third }.flatMap { it.mutableBuffer.array().toList() }.toDoubleArray() for (i in uBuffer.indices) { - matrixU.mutableBuffer.array()[matrixU.bufferStart + i] = uBuffer[i] + matrixUBuffer[matrixUStart + i] = uBuffer[i] } for (i in s.indices) { - matrixS.mutableBuffer.array()[matrixS.bufferStart + i] = s[i] + matrixSBuffer[matrixSStart + i] = s[i] } for (i in vBuffer.indices) { - matrixV.mutableBuffer.array()[matrixV.bufferStart + i] = vBuffer[i] + matrixVBuffer[matrixVStart + i] = vBuffer[i] } } From 9cc9d959c46659b5a1bbd7d4ca372a7579617b3a Mon Sep 17 00:00:00 2001 From: margarita0303 Date: Tue, 2 Aug 2022 20:03:06 +0300 Subject: [PATCH 56/59] mutableBuffer setter method now is used in other places of the library --- .../core/BroadcastDoubleTensorAlgebra.kt | 28 ++++---- .../kmath/tensors/core/DoubleTensorAlgebra.kt | 67 ++++++++++--------- .../core/TestDoubleLinearOpsAlgebra.kt | 7 +- 3 files changed, 54 insertions(+), 48 deletions(-) diff --git a/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/BroadcastDoubleTensorAlgebra.kt b/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/BroadcastDoubleTensorAlgebra.kt index e412ab5bb..afa03d5a7 100644 --- a/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/BroadcastDoubleTensorAlgebra.kt +++ b/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/BroadcastDoubleTensorAlgebra.kt @@ -27,7 +27,7 @@ public object BroadcastDoubleTensorAlgebra : DoubleTensorAlgebra() { val newThis = broadcast[0] val newOther = broadcast[1] val resBuffer = DoubleArray(newThis.indices.linearSize) { i -> - newThis.mutableBuffer.array()[i] + newOther.mutableBuffer.array()[i] + newThis.mutableBuffer[i] + newOther.mutableBuffer[i] } return DoubleTensor(newThis.shape, resBuffer) } @@ -35,8 +35,8 @@ public object BroadcastDoubleTensorAlgebra : DoubleTensorAlgebra() { override fun Tensor.plusAssign(arg: StructureND) { val newOther = broadcastTo(arg.tensor, tensor.shape) for (i in 0 until tensor.indices.linearSize) { - tensor.mutableBuffer.array()[tensor.bufferStart + i] += - newOther.mutableBuffer.array()[tensor.bufferStart + i] + tensor.mutableBuffer[tensor.bufferStart + i] += + newOther.mutableBuffer[tensor.bufferStart + i] } } @@ -45,7 +45,7 @@ public object BroadcastDoubleTensorAlgebra : DoubleTensorAlgebra() { val newThis = broadcast[0] val newOther = broadcast[1] val resBuffer = DoubleArray(newThis.indices.linearSize) { i -> - newThis.mutableBuffer.array()[i] - newOther.mutableBuffer.array()[i] + newThis.mutableBuffer[i] - newOther.mutableBuffer[i] } return DoubleTensor(newThis.shape, resBuffer) } @@ -53,8 +53,8 @@ public object BroadcastDoubleTensorAlgebra : DoubleTensorAlgebra() { override fun Tensor.minusAssign(arg: StructureND) { val newOther = broadcastTo(arg.tensor, tensor.shape) for (i in 0 until tensor.indices.linearSize) { - tensor.mutableBuffer.array()[tensor.bufferStart + i] -= - newOther.mutableBuffer.array()[tensor.bufferStart + i] + tensor.mutableBuffer[tensor.bufferStart + i] -= + newOther.mutableBuffer[tensor.bufferStart + i] } } @@ -63,8 +63,8 @@ public object BroadcastDoubleTensorAlgebra : DoubleTensorAlgebra() { val newThis = broadcast[0] val newOther = broadcast[1] val resBuffer = DoubleArray(newThis.indices.linearSize) { i -> - newThis.mutableBuffer.array()[newThis.bufferStart + i] * - newOther.mutableBuffer.array()[newOther.bufferStart + i] + newThis.mutableBuffer[newThis.bufferStart + i] * + newOther.mutableBuffer[newOther.bufferStart + i] } return DoubleTensor(newThis.shape, resBuffer) } @@ -72,8 +72,8 @@ public object BroadcastDoubleTensorAlgebra : DoubleTensorAlgebra() { override fun Tensor.timesAssign(arg: StructureND) { val newOther = broadcastTo(arg.tensor, tensor.shape) for (i in 0 until tensor.indices.linearSize) { - tensor.mutableBuffer.array()[tensor.bufferStart + i] *= - newOther.mutableBuffer.array()[tensor.bufferStart + i] + tensor.mutableBuffer[tensor.bufferStart + i] *= + newOther.mutableBuffer[tensor.bufferStart + i] } } @@ -82,8 +82,8 @@ public object BroadcastDoubleTensorAlgebra : DoubleTensorAlgebra() { val newThis = broadcast[0] val newOther = broadcast[1] val resBuffer = DoubleArray(newThis.indices.linearSize) { i -> - newThis.mutableBuffer.array()[newOther.bufferStart + i] / - newOther.mutableBuffer.array()[newOther.bufferStart + i] + newThis.mutableBuffer[newOther.bufferStart + i] / + newOther.mutableBuffer[newOther.bufferStart + i] } return DoubleTensor(newThis.shape, resBuffer) } @@ -91,8 +91,8 @@ public object BroadcastDoubleTensorAlgebra : DoubleTensorAlgebra() { override fun Tensor.divAssign(arg: StructureND) { val newOther = broadcastTo(arg.tensor, tensor.shape) for (i in 0 until tensor.indices.linearSize) { - tensor.mutableBuffer.array()[tensor.bufferStart + i] /= - newOther.mutableBuffer.array()[tensor.bufferStart + i] + tensor.mutableBuffer[tensor.bufferStart + i] /= + newOther.mutableBuffer[tensor.bufferStart + i] } } } diff --git a/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/DoubleTensorAlgebra.kt b/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/DoubleTensorAlgebra.kt index d83c70bd9..9e1c3942c 100644 --- a/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/DoubleTensorAlgebra.kt +++ b/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/DoubleTensorAlgebra.kt @@ -89,7 +89,7 @@ public open class DoubleTensorAlgebra : } override fun StructureND.valueOrNull(): Double? = if (tensor.shape contentEquals intArrayOf(1)) - tensor.mutableBuffer.array()[tensor.bufferStart] else null + tensor.mutableBuffer[tensor.bufferStart] else null override fun StructureND.value(): Double = valueOrNull() ?: throw IllegalArgumentException("The tensor shape is $shape, but value method is allowed only for shape [1]") @@ -208,7 +208,7 @@ public open class DoubleTensorAlgebra : override fun Double.plus(arg: StructureND): DoubleTensor { val resBuffer = DoubleArray(arg.tensor.numElements) { i -> - arg.tensor.mutableBuffer.array()[arg.tensor.bufferStart + i] + this + arg.tensor.mutableBuffer[arg.tensor.bufferStart + i] + this } return DoubleTensor(arg.shape, resBuffer) } @@ -218,35 +218,35 @@ public open class DoubleTensorAlgebra : override fun StructureND.plus(arg: StructureND): DoubleTensor { checkShapesCompatible(tensor, arg.tensor) val resBuffer = DoubleArray(tensor.numElements) { i -> - tensor.mutableBuffer.array()[i] + arg.tensor.mutableBuffer.array()[i] + tensor.mutableBuffer[i] + arg.tensor.mutableBuffer[i] } return DoubleTensor(tensor.shape, resBuffer) } override fun Tensor.plusAssign(value: Double) { for (i in 0 until tensor.numElements) { - tensor.mutableBuffer.array()[tensor.bufferStart + i] += value + tensor.mutableBuffer[tensor.bufferStart + i] += value } } override fun Tensor.plusAssign(arg: StructureND) { checkShapesCompatible(tensor, arg.tensor) for (i in 0 until tensor.numElements) { - tensor.mutableBuffer.array()[tensor.bufferStart + i] += - arg.tensor.mutableBuffer.array()[tensor.bufferStart + i] + tensor.mutableBuffer[tensor.bufferStart + i] += + arg.tensor.mutableBuffer[tensor.bufferStart + i] } } override fun Double.minus(arg: StructureND): DoubleTensor { val resBuffer = DoubleArray(arg.tensor.numElements) { i -> - this - arg.tensor.mutableBuffer.array()[arg.tensor.bufferStart + i] + this - arg.tensor.mutableBuffer[arg.tensor.bufferStart + i] } return DoubleTensor(arg.shape, resBuffer) } override fun StructureND.minus(arg: Double): DoubleTensor { val resBuffer = DoubleArray(tensor.numElements) { i -> - tensor.mutableBuffer.array()[tensor.bufferStart + i] - arg + tensor.mutableBuffer[tensor.bufferStart + i] - arg } return DoubleTensor(tensor.shape, resBuffer) } @@ -254,28 +254,28 @@ public open class DoubleTensorAlgebra : override fun StructureND.minus(arg: StructureND): DoubleTensor { checkShapesCompatible(tensor, arg) val resBuffer = DoubleArray(tensor.numElements) { i -> - tensor.mutableBuffer.array()[i] - arg.tensor.mutableBuffer.array()[i] + tensor.mutableBuffer[i] - arg.tensor.mutableBuffer[i] } return DoubleTensor(tensor.shape, resBuffer) } override fun Tensor.minusAssign(value: Double) { for (i in 0 until tensor.numElements) { - tensor.mutableBuffer.array()[tensor.bufferStart + i] -= value + tensor.mutableBuffer[tensor.bufferStart + i] -= value } } override fun Tensor.minusAssign(arg: StructureND) { checkShapesCompatible(tensor, arg) for (i in 0 until tensor.numElements) { - tensor.mutableBuffer.array()[tensor.bufferStart + i] -= - arg.tensor.mutableBuffer.array()[tensor.bufferStart + i] + tensor.mutableBuffer[tensor.bufferStart + i] -= + arg.tensor.mutableBuffer[tensor.bufferStart + i] } } override fun Double.times(arg: StructureND): DoubleTensor { val resBuffer = DoubleArray(arg.tensor.numElements) { i -> - arg.tensor.mutableBuffer.array()[arg.tensor.bufferStart + i] * this + arg.tensor.mutableBuffer[arg.tensor.bufferStart + i] * this } return DoubleTensor(arg.shape, resBuffer) } @@ -285,36 +285,36 @@ public open class DoubleTensorAlgebra : override fun StructureND.times(arg: StructureND): DoubleTensor { checkShapesCompatible(tensor, arg) val resBuffer = DoubleArray(tensor.numElements) { i -> - tensor.mutableBuffer.array()[tensor.bufferStart + i] * - arg.tensor.mutableBuffer.array()[arg.tensor.bufferStart + i] + tensor.mutableBuffer[tensor.bufferStart + i] * + arg.tensor.mutableBuffer[arg.tensor.bufferStart + i] } return DoubleTensor(tensor.shape, resBuffer) } override fun Tensor.timesAssign(value: Double) { for (i in 0 until tensor.numElements) { - tensor.mutableBuffer.array()[tensor.bufferStart + i] *= value + tensor.mutableBuffer[tensor.bufferStart + i] *= value } } override fun Tensor.timesAssign(arg: StructureND) { checkShapesCompatible(tensor, arg) for (i in 0 until tensor.numElements) { - tensor.mutableBuffer.array()[tensor.bufferStart + i] *= - arg.tensor.mutableBuffer.array()[tensor.bufferStart + i] + tensor.mutableBuffer[tensor.bufferStart + i] *= + arg.tensor.mutableBuffer[tensor.bufferStart + i] } } override fun Double.div(arg: StructureND): DoubleTensor { val resBuffer = DoubleArray(arg.tensor.numElements) { i -> - this / arg.tensor.mutableBuffer.array()[arg.tensor.bufferStart + i] + this / arg.tensor.mutableBuffer[arg.tensor.bufferStart + i] } return DoubleTensor(arg.shape, resBuffer) } override fun StructureND.div(arg: Double): DoubleTensor { val resBuffer = DoubleArray(tensor.numElements) { i -> - tensor.mutableBuffer.array()[tensor.bufferStart + i] / arg + tensor.mutableBuffer[tensor.bufferStart + i] / arg } return DoubleTensor(shape, resBuffer) } @@ -322,29 +322,29 @@ public open class DoubleTensorAlgebra : override fun StructureND.div(arg: StructureND): DoubleTensor { checkShapesCompatible(tensor, arg) val resBuffer = DoubleArray(tensor.numElements) { i -> - tensor.mutableBuffer.array()[arg.tensor.bufferStart + i] / - arg.tensor.mutableBuffer.array()[arg.tensor.bufferStart + i] + tensor.mutableBuffer[arg.tensor.bufferStart + i] / + arg.tensor.mutableBuffer[arg.tensor.bufferStart + i] } return DoubleTensor(tensor.shape, resBuffer) } override fun Tensor.divAssign(value: Double) { for (i in 0 until tensor.numElements) { - tensor.mutableBuffer.array()[tensor.bufferStart + i] /= value + tensor.mutableBuffer[tensor.bufferStart + i] /= value } } override fun Tensor.divAssign(arg: StructureND) { checkShapesCompatible(tensor, arg) for (i in 0 until tensor.numElements) { - tensor.mutableBuffer.array()[tensor.bufferStart + i] /= - arg.tensor.mutableBuffer.array()[tensor.bufferStart + i] + tensor.mutableBuffer[tensor.bufferStart + i] /= + arg.tensor.mutableBuffer[tensor.bufferStart + i] } } override fun StructureND.unaryMinus(): DoubleTensor { val resBuffer = DoubleArray(tensor.numElements) { i -> - tensor.mutableBuffer.array()[tensor.bufferStart + i].unaryMinus() + tensor.mutableBuffer[tensor.bufferStart + i].unaryMinus() } return DoubleTensor(tensor.shape, resBuffer) } @@ -367,8 +367,8 @@ public open class DoubleTensorAlgebra : newMultiIndex[ii] = newMultiIndex[jj].also { newMultiIndex[jj] = newMultiIndex[ii] } val linearIndex = resTensor.indices.offset(newMultiIndex) - resTensor.mutableBuffer.array()[linearIndex] = - tensor.mutableBuffer.array()[tensor.bufferStart + offset] + resTensor.mutableBuffer[linearIndex] = + tensor.mutableBuffer[tensor.bufferStart + offset] } return resTensor } @@ -958,18 +958,21 @@ public open class DoubleTensorAlgebra : var eigenvalueStart = 0 var eigenvectorStart = 0 + val eigenvaluesBuffer = eigenvalues.mutableBuffer + val eigenvectorsBuffer = eigenvectors.mutableBuffer + for (matrix in tensor.matrixSequence()) { val matrix2D = matrix.as2D() val (d, v) = matrix2D.jacobiHelper(maxIteration, epsilon) for (i in 0 until matrix2D.rowNum) { for (j in 0 until matrix2D.colNum) { - eigenvectors.mutableBuffer.array()[eigenvectorStart + i * matrix2D.rowNum + j] = v[i, j] + eigenvectorsBuffer[eigenvectorStart + i * matrix2D.rowNum + j] = v[i, j] } } for (i in 0 until matrix2D.rowNum) { - eigenvalues.mutableBuffer.array()[eigenvalueStart + i] = d[i] + eigenvaluesBuffer[eigenvalueStart + i] = d[i] } eigenvalueStart += this.shape.last() @@ -992,11 +995,11 @@ public open class DoubleTensorAlgebra : // assume that buffered tensor is square matrix operator fun BufferedTensor.get(i: Int, j: Int): Double { - return this.mutableBuffer.array()[bufferStart + i * this.shape[0] + j] + return this.mutableBuffer[bufferStart + i * this.shape[0] + j] } operator fun BufferedTensor.set(i: Int, j: Int, value: Double) { - this.mutableBuffer.array()[bufferStart + i * this.shape[0] + j] = value + this.mutableBuffer[bufferStart + i * this.shape[0] + j] = value } fun maxOffDiagonal(matrix: BufferedTensor): Double { diff --git a/kmath-tensors/src/commonTest/kotlin/space/kscience/kmath/tensors/core/TestDoubleLinearOpsAlgebra.kt b/kmath-tensors/src/commonTest/kotlin/space/kscience/kmath/tensors/core/TestDoubleLinearOpsAlgebra.kt index 6c502eed1..87050c405 100644 --- a/kmath-tensors/src/commonTest/kotlin/space/kscience/kmath/tensors/core/TestDoubleLinearOpsAlgebra.kt +++ b/kmath-tensors/src/commonTest/kotlin/space/kscience/kmath/tensors/core/TestDoubleLinearOpsAlgebra.kt @@ -156,9 +156,12 @@ internal class TestDoubleLinearOpsTensorAlgebra { val res = svd1d(tensor2) + val resBuffer = res.mutableBuffer + val resStart = res.bufferStart + assertTrue(res.shape contentEquals intArrayOf(2)) - assertTrue { abs(abs(res.mutableBuffer.array()[res.bufferStart]) - 0.386) < 0.01 } - assertTrue { abs(abs(res.mutableBuffer.array()[res.bufferStart + 1]) - 0.922) < 0.01 } + assertTrue { abs(abs(resBuffer[resStart]) - 0.386) < 0.01 } + assertTrue { abs(abs(resBuffer[resStart + 1]) - 0.922) < 0.01 } } @Test From 8bfa07cc2759ad6a23af6f7aa5cd913e23fb455a Mon Sep 17 00:00:00 2001 From: margarita0303 Date: Tue, 2 Aug 2022 20:39:31 +0300 Subject: [PATCH 57/59] added epsilon for calculating the accuracy of calculations Golub Kahan --- .../kscience/kmath/tensors/core/TestDoubleLinearOpsAlgebra.kt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/kmath-tensors/src/commonTest/kotlin/space/kscience/kmath/tensors/core/TestDoubleLinearOpsAlgebra.kt b/kmath-tensors/src/commonTest/kotlin/space/kscience/kmath/tensors/core/TestDoubleLinearOpsAlgebra.kt index 87050c405..181b3f4fe 100644 --- a/kmath-tensors/src/commonTest/kotlin/space/kscience/kmath/tensors/core/TestDoubleLinearOpsAlgebra.kt +++ b/kmath-tensors/src/commonTest/kotlin/space/kscience/kmath/tensors/core/TestDoubleLinearOpsAlgebra.kt @@ -282,7 +282,7 @@ private fun DoubleTensorAlgebra.testSVDFor(tensor: DoubleTensor) { assertTrue(tensor.eq(tensorSVD)) } -private fun DoubleTensorAlgebra.testSVDGolubKahanFor(tensor: DoubleTensor) { +private fun DoubleTensorAlgebra.testSVDGolubKahanFor(tensor: DoubleTensor, epsilon: Double = 1e-10) { val svd = tensor.svdGolubKahan() val tensorSVD = svd.first @@ -291,7 +291,7 @@ private fun DoubleTensorAlgebra.testSVDGolubKahanFor(tensor: DoubleTensor) { .dot(svd.third.transpose()) ) - assertTrue(tensor.eq(tensorSVD)) + assertTrue(tensor.eq(tensorSVD, epsilon)) } private fun DoubleTensorAlgebra.testSVDPowerMethodFor(tensor: DoubleTensor, epsilon: Double = 1e-10) { From 38ed6dd995f9c7c0e6e00d5aeff56964978ec774 Mon Sep 17 00:00:00 2001 From: margarita0303 Date: Tue, 2 Aug 2022 20:59:11 +0300 Subject: [PATCH 58/59] added benchmarks with different sizes, added check for accuracy --- .../kscience/kmath/benchmarks/SVDBenchmark.kt | 117 +++++++++++++++++- 1 file changed, 112 insertions(+), 5 deletions(-) diff --git a/benchmarks/src/jvmMain/kotlin/space/kscience/kmath/benchmarks/SVDBenchmark.kt b/benchmarks/src/jvmMain/kotlin/space/kscience/kmath/benchmarks/SVDBenchmark.kt index 6879960d7..ee581f778 100644 --- a/benchmarks/src/jvmMain/kotlin/space/kscience/kmath/benchmarks/SVDBenchmark.kt +++ b/benchmarks/src/jvmMain/kotlin/space/kscience/kmath/benchmarks/SVDBenchmark.kt @@ -8,27 +8,134 @@ import kotlinx.benchmark.Benchmark import kotlinx.benchmark.Blackhole import kotlinx.benchmark.Scope import kotlinx.benchmark.State +import org.ejml.UtilEjml.assertTrue +import space.kscience.kmath.tensors.core.BroadcastDoubleTensorAlgebra.diagonalEmbedding +import space.kscience.kmath.tensors.core.BroadcastDoubleTensorAlgebra.dot +import space.kscience.kmath.tensors.core.BroadcastDoubleTensorAlgebra.eq import space.kscience.kmath.tensors.core.BroadcastDoubleTensorAlgebra.svdGolubKahan +import space.kscience.kmath.tensors.core.BroadcastDoubleTensorAlgebra.transpose import space.kscience.kmath.tensors.core.DoubleTensorAlgebra import space.kscience.kmath.tensors.core.DoubleTensorAlgebra.Companion.svdPowerMethod @State(Scope.Benchmark) class SVDBenchmark { companion object { - val tensor = DoubleTensorAlgebra.randomNormal(intArrayOf(10, 10, 10), 0) + val tensorSmall = DoubleTensorAlgebra.randomNormal(intArrayOf(5, 5), 0) + val tensorMedium = DoubleTensorAlgebra.randomNormal(intArrayOf(10, 10), 0) + val tensorLarge = DoubleTensorAlgebra.randomNormal(intArrayOf(50, 50), 0) + val tensorVeryLarge = DoubleTensorAlgebra.randomNormal(intArrayOf(100, 100), 0) + val epsilon = 1e-9 } @Benchmark - fun svdPowerMethod(blackhole: Blackhole) { + fun svdPowerMethodSmall(blackhole: Blackhole) { + val svd = tensorSmall.svdPowerMethod() + val tensorSVD = svd.first + .dot( + diagonalEmbedding(svd.second) + .dot(svd.third.transpose()) + ) + assertTrue(tensorSVD.eq(tensorSmall, epsilon)) blackhole.consume( - tensor.svdPowerMethod() + tensorSmall.svdPowerMethod() ) } @Benchmark - fun svdGolubKahan(blackhole: Blackhole) { + fun svdPowerMethodMedium(blackhole: Blackhole) { + val svd = tensorMedium.svdPowerMethod() + val tensorSVD = svd.first + .dot( + diagonalEmbedding(svd.second) + .dot(svd.third.transpose()) + ) + assertTrue(tensorSVD.eq(tensorMedium, epsilon)) blackhole.consume( - tensor.svdGolubKahan() + tensorMedium.svdPowerMethod() + ) + } + + @Benchmark + fun svdPowerMethodLarge(blackhole: Blackhole) { + val svd = tensorLarge.svdPowerMethod() + val tensorSVD = svd.first + .dot( + diagonalEmbedding(svd.second) + .dot(svd.third.transpose()) + ) + assertTrue(tensorSVD.eq(tensorLarge, epsilon)) + blackhole.consume( + tensorLarge.svdPowerMethod() + ) + } + + @Benchmark + fun svdPowerMethodVeryLarge(blackhole: Blackhole) { + val svd = tensorVeryLarge.svdPowerMethod() + val tensorSVD = svd.first + .dot( + diagonalEmbedding(svd.second) + .dot(svd.third.transpose()) + ) + assertTrue(tensorSVD.eq(tensorVeryLarge, epsilon)) + blackhole.consume( + tensorVeryLarge.svdPowerMethod() + ) + } + + @Benchmark + fun svdGolubKahanSmall(blackhole: Blackhole) { + val svd = tensorSmall.svdGolubKahan() + val tensorSVD = svd.first + .dot( + diagonalEmbedding(svd.second) + .dot(svd.third.transpose()) + ) + assertTrue(tensorSVD.eq(tensorSmall, epsilon)) + blackhole.consume( + tensorSmall.svdGolubKahan() + ) + } + + @Benchmark + fun svdGolubKahanMedium(blackhole: Blackhole) { + val svd = tensorMedium.svdGolubKahan() + val tensorSVD = svd.first + .dot( + diagonalEmbedding(svd.second) + .dot(svd.third.transpose()) + ) + assertTrue(tensorSVD.eq(tensorMedium, epsilon)) + blackhole.consume( + tensorMedium.svdGolubKahan() + ) + } + + @Benchmark + fun svdGolubKahanLarge(blackhole: Blackhole) { + val svd = tensorLarge.svdGolubKahan() + val tensorSVD = svd.first + .dot( + diagonalEmbedding(svd.second) + .dot(svd.third.transpose()) + ) + assertTrue(tensorSVD.eq(tensorLarge, epsilon)) + blackhole.consume( + tensorLarge.svdGolubKahan() + ) + } + + @Benchmark + fun svdGolubKahanVeryLarge(blackhole: Blackhole) { + val svd = tensorVeryLarge.svdGolubKahan() + val tensorSVD = svd.first + .dot( + diagonalEmbedding(svd.second) + .dot(svd.third.transpose()) + ) + assertTrue(tensorSVD.eq(tensorVeryLarge, epsilon)) + blackhole.consume( + tensorVeryLarge.svdGolubKahan() ) } } \ No newline at end of file From 68609d78468f7f72b94b33a332d8c9838d0fa08b Mon Sep 17 00:00:00 2001 From: margarita0303 Date: Tue, 2 Aug 2022 23:01:11 +0300 Subject: [PATCH 59/59] removed transposition at the end of the svdGolubKahan --- .../kscience/kmath/tensors/core/DoubleTensorAlgebra.kt | 4 ++-- .../space/kscience/kmath/tensors/core/internal/linUtils.kt | 6 +++--- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/DoubleTensorAlgebra.kt b/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/DoubleTensorAlgebra.kt index 9e1c3942c..8aa3ee76b 100644 --- a/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/DoubleTensorAlgebra.kt +++ b/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/DoubleTensorAlgebra.kt @@ -841,7 +841,7 @@ public open class DoubleTensorAlgebra : val size = tensor.dimension val commonShape = tensor.shape.sliceArray(0 until size - 2) val (n, m) = tensor.shape.sliceArray(size - 2 until size) - val uTensor = zeros(commonShape + intArrayOf(m, n)) + val uTensor = zeros(commonShape + intArrayOf(n, m)) val sTensor = zeros(commonShape + intArrayOf(m)) val vTensor = zeros(commonShape + intArrayOf(m, m)) @@ -863,7 +863,7 @@ public open class DoubleTensorAlgebra : iterations, epsilon) } - return Triple(uTensor.transpose(), sTensor, vTensor) + return Triple(uTensor, sTensor, vTensor) } /** diff --git a/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/internal/linUtils.kt b/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/internal/linUtils.kt index 825c15006..bb75d1069 100644 --- a/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/internal/linUtils.kt +++ b/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/internal/linUtils.kt @@ -647,9 +647,9 @@ internal fun MutableStructure2D.svdGolubKahanHelper(u: MutableStructure2 } } - for (i in 0 until m) { - for (j in 0 until n) { - u[j, i] = this[i, j] + for (i in 0 until n) { + for (j in 0 until m) { + u[j, i] = this[j, i] } } }