diff --git a/benchmarks/build.gradle.kts b/benchmarks/build.gradle.kts index e46f4a9b4..f8d39b9c5 100644 --- a/benchmarks/build.gradle.kts +++ b/benchmarks/build.gradle.kts @@ -124,6 +124,11 @@ benchmark { include("JafamaBenchmark") } + configurations.register("tensorAlgebra") { + commonConfiguration() + include("TensorAlgebraBenchmark") + } + configurations.register("viktor") { commonConfiguration() include("ViktorBenchmark") diff --git a/benchmarks/src/jvmMain/kotlin/space/kscience/kmath/benchmarks/DotBenchmark.kt b/benchmarks/src/jvmMain/kotlin/space/kscience/kmath/benchmarks/DotBenchmark.kt index 4a5cd4aa2..16fd544a8 100644 --- a/benchmarks/src/jvmMain/kotlin/space/kscience/kmath/benchmarks/DotBenchmark.kt +++ b/benchmarks/src/jvmMain/kotlin/space/kscience/kmath/benchmarks/DotBenchmark.kt @@ -15,10 +15,8 @@ import space.kscience.kmath.linear.invoke import space.kscience.kmath.linear.linearSpace import space.kscience.kmath.multik.multikAlgebra import space.kscience.kmath.operations.DoubleField -import space.kscience.kmath.operations.invoke import space.kscience.kmath.structures.Buffer import space.kscience.kmath.tensorflow.produceWithTF -import space.kscience.kmath.tensors.core.DoubleTensorAlgebra import space.kscience.kmath.tensors.core.tensorAlgebra import kotlin.random.Random @@ -36,9 +34,6 @@ internal class DotBenchmark { random.nextDouble() } - val tensor1 = DoubleTensorAlgebra.randomNormal(shape = intArrayOf(dim, dim), 12224) - val tensor2 = DoubleTensorAlgebra.randomNormal(shape = intArrayOf(dim, dim), 12225) - val cmMatrix1 = CMLinearSpace { matrix1.toCM() } val cmMatrix2 = CMLinearSpace { matrix2.toCM() } @@ -48,10 +43,10 @@ internal class DotBenchmark { @Benchmark - fun tfDot(blackhole: Blackhole){ + fun tfDot(blackhole: Blackhole) { blackhole.consume( DoubleField.produceWithTF { - tensor1 dot tensor2 + matrix1 dot matrix1 } ) } @@ -95,9 +90,4 @@ internal class DotBenchmark { fun doubleDot(blackhole: Blackhole) = with(DoubleField.linearSpace) { blackhole.consume(matrix1 dot matrix2) } - - @Benchmark - fun doubleTensorDot(blackhole: Blackhole) = DoubleTensorAlgebra.invoke { - blackhole.consume(tensor1 dot tensor2) - } } diff --git a/benchmarks/src/jvmMain/kotlin/space/kscience/kmath/benchmarks/TensorAlgebraBenchmark.kt b/benchmarks/src/jvmMain/kotlin/space/kscience/kmath/benchmarks/TensorAlgebraBenchmark.kt new file mode 100644 index 000000000..38e064e53 --- /dev/null +++ b/benchmarks/src/jvmMain/kotlin/space/kscience/kmath/benchmarks/TensorAlgebraBenchmark.kt @@ -0,0 +1,37 @@ +/* + * Copyright 2018-2021 KMath contributors. + * Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file. + */ + +package space.kscience.kmath.benchmarks + +import kotlinx.benchmark.Benchmark +import kotlinx.benchmark.Blackhole +import kotlinx.benchmark.Scope +import kotlinx.benchmark.State +import space.kscience.kmath.linear.linearSpace +import space.kscience.kmath.linear.matrix +import space.kscience.kmath.linear.symmetric +import space.kscience.kmath.operations.DoubleField +import space.kscience.kmath.tensors.core.tensorAlgebra +import kotlin.random.Random + +@State(Scope.Benchmark) +internal class TensorAlgebraBenchmark { + companion object { + private val random = Random(12224) + private const val dim = 30 + + private val matrix = DoubleField.linearSpace.matrix(dim, dim).symmetric { _, _ -> random.nextDouble() } + } + + @Benchmark + fun tensorSymEigSvd(blackhole: Blackhole) = with(Double.tensorAlgebra) { + blackhole.consume(matrix.symEigSvd(1e-10)) + } + + @Benchmark + fun tensorSymEigJacobi(blackhole: Blackhole) = with(Double.tensorAlgebra) { + blackhole.consume(matrix.symEigJacobi(50, 1e-10)) + } +} \ No newline at end of file diff --git a/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/DoubleTensorAlgebra.kt b/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/DoubleTensorAlgebra.kt index ebe7a10b6..e9dc34748 100644 --- a/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/DoubleTensorAlgebra.kt +++ b/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/DoubleTensorAlgebra.kt @@ -9,10 +9,7 @@ package space.kscience.kmath.tensors.core import space.kscience.kmath.misc.PerformancePitfall -import space.kscience.kmath.nd.MutableStructure2D -import space.kscience.kmath.nd.StructureND -import space.kscience.kmath.nd.as1D -import space.kscience.kmath.nd.as2D +import space.kscience.kmath.nd.* import space.kscience.kmath.operations.DoubleField import space.kscience.kmath.structures.MutableBuffer import space.kscience.kmath.structures.indices @@ -885,7 +882,7 @@ public open class DoubleTensorAlgebra : return Triple(uTensor.transpose(), sTensor, vTensor.transpose()) } - override fun StructureND.symEig(): Pair = symEig(epsilon = 1e-15) + override fun StructureND.symEig(): Pair = symEigJacobi(maxIteration = 50, epsilon = 1e-15) /** * Returns eigenvalues and eigenvectors of a real symmetric matrix input or a batch of real symmetric matrices, @@ -895,7 +892,7 @@ public open class DoubleTensorAlgebra : * and when the cosine approaches 1 in the SVD algorithm. * @return a pair `eigenvalues to eigenvectors`. */ - public fun StructureND.symEig(epsilon: Double): Pair { + public fun StructureND.symEigSvd(epsilon: Double): Pair { checkSymmetric(tensor, epsilon) fun MutableStructure2D.cleanSym(n: Int) { @@ -922,6 +919,151 @@ public open class DoubleTensorAlgebra : return eig to v } + public fun StructureND.symEigJacobi(maxIteration: Int, epsilon: Double): Pair { + checkSymmetric(tensor, epsilon) + + val size = this.dimension + val eigenvectors = zeros(this.shape) + val eigenvalues = zeros(this.shape.sliceArray(0 until size - 1)) + + var eigenvalueStart = 0 + var eigenvectorStart = 0 + for (matrix in tensor.matrixSequence()) { + val matrix2D = matrix.as2D() + val (d, v) = matrix2D.jacobiHelper(maxIteration, epsilon) + + for (i in 0 until matrix2D.rowNum) { + for (j in 0 until matrix2D.colNum) { + eigenvectors.mutableBuffer.array()[eigenvectorStart + i * matrix2D.rowNum + j] = v[i, j] + } + } + + for (i in 0 until matrix2D.rowNum) { + eigenvalues.mutableBuffer.array()[eigenvalueStart + i] = d[i] + } + + eigenvalueStart += this.shape.last() + eigenvectorStart += this.shape.last() * this.shape.last() + } + + return eigenvalues to eigenvectors + } + + private fun MutableStructure2D.jacobiHelper( + maxIteration: Int, + epsilon: Double + ): Pair, Structure2D> { + val n = this.shape[0] + val A_ = this.copy() + val V = eye(n) + val D = DoubleTensor(intArrayOf(n), (0 until this.rowNum).map { this[it, it] }.toDoubleArray()).as1D() + val B = DoubleTensor(intArrayOf(n), (0 until this.rowNum).map { this[it, it] }.toDoubleArray()).as1D() + val Z = zeros(intArrayOf(n)).as1D() + + // assume that buffered tensor is square matrix + operator fun BufferedTensor.get(i: Int, j: Int): Double { + return this.mutableBuffer.array()[bufferStart + i * this.shape[0] + j] + } + + operator fun BufferedTensor.set(i: Int, j: Int, value: Double) { + this.mutableBuffer.array()[bufferStart + i * this.shape[0] + j] = value + } + + fun maxOffDiagonal(matrix: BufferedTensor): Double { + var maxOffDiagonalElement = 0.0 + for (i in 0 until n - 1) { + for (j in i + 1 until n) { + maxOffDiagonalElement = max(maxOffDiagonalElement, abs(matrix[i, j])) + } + } + return maxOffDiagonalElement + } + + fun rotate(a: BufferedTensor, s: Double, tau: Double, i: Int, j: Int, k: Int, l: Int) { + val g = a[i, j] + val h = a[k, l] + a[i, j] = g - s * (h + g * tau) + a[k, l] = h + s * (g - h * tau) + } + + fun jacobiIteration( + a: BufferedTensor, + v: BufferedTensor, + d: MutableStructure1D, + z: MutableStructure1D, + ) { + for (ip in 0 until n - 1) { + for (iq in ip + 1 until n) { + val g = 100.0 * abs(a[ip, iq]) + + if (g <= epsilon * abs(d[ip]) && g <= epsilon * abs(d[iq])) { + a[ip, iq] = 0.0 + continue + } + + var h = d[iq] - d[ip] + val t = when { + g <= epsilon * abs(h) -> (a[ip, iq]) / h + else -> { + val theta = 0.5 * h / (a[ip, iq]) + val denominator = abs(theta) + sqrt(1.0 + theta * theta) + if (theta < 0.0) -1.0 / denominator else 1.0 / denominator + } + } + + val c = 1.0 / sqrt(1 + t * t) + val s = t * c + val tau = s / (1.0 + c) + h = t * a[ip, iq] + z[ip] -= h + z[iq] += h + d[ip] -= h + d[iq] += h + a[ip, iq] = 0.0 + + for (j in 0 until ip) { + rotate(a, s, tau, j, ip, j, iq) + } + for (j in (ip + 1) until iq) { + rotate(a, s, tau, ip, j, j, iq) + } + for (j in (iq + 1) until n) { + rotate(a, s, tau, ip, j, iq, j) + } + for (j in 0 until n) { + rotate(v, s, tau, j, ip, j, iq) + } + } + } + } + + fun updateDiagonal( + d: MutableStructure1D, + z: MutableStructure1D, + b: MutableStructure1D, + ) { + for (ip in 0 until d.size) { + b[ip] += z[ip] + d[ip] = b[ip] + z[ip] = 0.0 + } + } + + var sm = maxOffDiagonal(A_) + for (iteration in 0 until maxIteration) { + if (sm < epsilon) { + break + } + + jacobiIteration(A_, V, D, Z) + updateDiagonal(D, Z, B) + sm = maxOffDiagonal(A_) + } + + // TODO sort eigenvalues + return D to V.as2D() + } + /** * Computes the determinant of a square matrix input, or of each square matrix in a batched input * using LU factorization algorithm.