Feature/tensors performance #497
@ -161,76 +161,6 @@ internal class TestDoubleLinearOpsTensorAlgebra {
|
||||
assertTrue { abs(abs(res.mutableBuffer.array()[res.bufferStart + 1]) - 0.922) < 0.01 }
|
||||
}
|
||||
|
||||
@Test
|
||||
fun testSVD() = DoubleTensorAlgebra{
|
||||
testSVDFor(fromArray(intArrayOf(2, 3), doubleArrayOf(1.0, 2.0, 3.0, 4.0, 5.0, 6.0)))
|
||||
testSVDFor(fromArray(intArrayOf(2, 2), doubleArrayOf(-1.0, 0.0, 239.0, 238.0)))
|
||||
}
|
||||
|
||||
// @Test
|
||||
// fun testSVDError() = DoubleTensorAlgebra{
|
||||
// val buffer = doubleArrayOf(
|
||||
// 1.000000, 2.000000, 3.000000,
|
||||
// 2.000000, 3.000000, 4.000000,
|
||||
// 3.000000, 4.000000, 5.000000,
|
||||
// 4.000000, 5.000000, 6.000000,
|
||||
// 5.000000, 6.000000, 7.000000
|
||||
// )
|
||||
// testSVDFor(fromArray(intArrayOf(5, 3), buffer))
|
||||
// }
|
||||
|
||||
@Test
|
||||
fun testBatchedSVD() = DoubleTensorAlgebra {
|
||||
val tensor = randomNormal(intArrayOf(2, 5, 3), 0)
|
||||
val (tensorU, tensorS, tensorV) = tensor.svd()
|
||||
val tensorSVD = tensorU dot (diagonalEmbedding(tensorS) dot tensorV.transpose())
|
||||
assertTrue(tensor.eq(tensorSVD))
|
||||
}
|
||||
|
||||
@Test
|
||||
fun testSVDGolabKahan() = DoubleTensorAlgebra{
|
||||
testSVDGolabKahanFor(fromArray(intArrayOf(2, 3), doubleArrayOf(1.0, 2.0, 3.0, 4.0, 5.0, 6.0)))
|
||||
testSVDGolabKahanFor(fromArray(intArrayOf(2, 2), doubleArrayOf(-1.0, 0.0, 239.0, 238.0)))
|
||||
val buffer = doubleArrayOf(
|
||||
1.000000, 2.000000, 3.000000,
|
||||
2.000000, 3.000000, 4.000000,
|
||||
3.000000, 4.000000, 5.000000,
|
||||
4.000000, 5.000000, 6.000000,
|
||||
5.000000, 6.000000, 7.000000
|
||||
)
|
||||
testSVDGolabKahanFor(fromArray(intArrayOf(5, 3), buffer))
|
||||
}
|
||||
|
||||
// @Test
|
||||
// fun testSVDGolabKahanError() = DoubleTensorAlgebra{
|
||||
// val buffer = doubleArrayOf(
|
||||
// 1.0, 2.0, 3.0, 2.0, 3.0,
|
||||
// 4.0, 3.0, 4.0, 5.0, 4.0,
|
||||
// 5.0, 6.0, 5.0, 6.0, 7.0
|
||||
// )
|
||||
// testSVDGolabKahanFor(fromArray(intArrayOf(3, 5), buffer))
|
||||
// }
|
||||
|
||||
// @Test
|
||||
// fun testSVDGolabKahanBig() = DoubleTensorAlgebra{
|
||||
// val tensor = DoubleTensorAlgebra.randomNormal(intArrayOf(100, 100, 100), 0)
|
||||
// testSVDGolabKahanFor(tensor)
|
||||
// }
|
||||
//
|
||||
// @Test
|
||||
// fun testSVDBig() = DoubleTensorAlgebra{
|
||||
// val tensor = DoubleTensorAlgebra.randomNormal(intArrayOf(100, 100, 100), 0)
|
||||
// testSVDFor(tensor)
|
||||
// }
|
||||
|
||||
@Test
|
||||
fun testBatchedSVDGolabKahan() = DoubleTensorAlgebra{
|
||||
val tensor = randomNormal(intArrayOf(2, 5, 3), 0)
|
||||
val (tensorU, tensorS, tensorV) = tensor.svdGolabKahan()
|
||||
val tensorSVD = tensorU dot (diagonalEmbedding(tensorS) dot tensorV.transpose())
|
||||
assertTrue(tensor.eq(tensorSVD))
|
||||
}
|
||||
|
||||
@Test
|
||||
fun testBatchedSymEig() = DoubleTensorAlgebra {
|
||||
val tensor = randomNormal(shape = intArrayOf(2, 3, 3), 0)
|
||||
@ -240,24 +170,63 @@ internal class TestDoubleLinearOpsTensorAlgebra {
|
||||
assertTrue(tensorSigma.eq(tensorSigmaCalc))
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
||||
|
||||
private fun DoubleTensorAlgebra.testSVDFor(tensor: DoubleTensor, epsilon: Double = 1e-10) {
|
||||
val svd = tensor.svd()
|
||||
|
||||
val tensorSVD = svd.first
|
||||
.dot(
|
||||
diagonalEmbedding(svd.second)
|
||||
.dot(svd.third.transpose())
|
||||
@Test
|
||||
fun testSVD() = DoubleTensorAlgebra{
|
||||
testSVDFor(fromArray(intArrayOf(2, 3), doubleArrayOf(1.0, 2.0, 3.0, 4.0, 5.0, 6.0)))
|
||||
testSVDFor(fromArray(intArrayOf(2, 2), doubleArrayOf(-1.0, 0.0, 239.0, 238.0)))
|
||||
val buffer1 = doubleArrayOf(
|
||||
1.000000, 2.000000, 3.000000,
|
||||
2.000000, 3.000000, 4.000000,
|
||||
3.000000, 4.000000, 5.000000,
|
||||
4.000000, 5.000000, 6.000000,
|
||||
5.000000, 6.000000, 7.000000
|
||||
)
|
||||
testSVDFor(fromArray(intArrayOf(5, 3), buffer1))
|
||||
val buffer2 = doubleArrayOf(
|
||||
1.0, 2.0, 3.0, 2.0, 3.0,
|
||||
4.0, 3.0, 4.0, 5.0, 4.0,
|
||||
5.0, 6.0, 5.0, 6.0, 7.0
|
||||
)
|
||||
testSVDFor(fromArray(intArrayOf(3, 5), buffer2))
|
||||
}
|
||||
|
||||
assertTrue(tensor.eq(tensorSVD, epsilon))
|
||||
@Test
|
||||
fun testBatchedSVD() = DoubleTensorAlgebra{
|
||||
val tensor1 = randomNormal(intArrayOf(2, 5, 3), 0)
|
||||
testSVDFor(tensor1)
|
||||
val tensor2 = DoubleTensorAlgebra.randomNormal(intArrayOf(30, 30, 30), 0)
|
||||
testSVDFor(tensor2)
|
||||
}
|
||||
|
||||
@Test
|
||||
fun testSVDPowerMethod() = DoubleTensorAlgebra{
|
||||
testSVDPowerMethodFor(fromArray(intArrayOf(2, 3), doubleArrayOf(1.0, 2.0, 3.0, 4.0, 5.0, 6.0)))
|
||||
testSVDPowerMethodFor(fromArray(intArrayOf(2, 2), doubleArrayOf(-1.0, 0.0, 239.0, 238.0)))
|
||||
}
|
||||
|
||||
@Test
|
||||
fun testBatchedSVDPowerMethod() = DoubleTensorAlgebra {
|
||||
val tensor1 = randomNormal(intArrayOf(2, 5, 3), 0)
|
||||
testSVDPowerMethodFor(tensor1)
|
||||
val tensor2 = DoubleTensorAlgebra.randomNormal(intArrayOf(30, 30, 30), 0)
|
||||
testSVDPowerMethodFor(tensor2)
|
||||
}
|
||||
|
||||
// @Test
|
||||
// fun testSVDPowerMethodError() = DoubleTensorAlgebra{
|
||||
// val buffer = doubleArrayOf(
|
||||
// 1.000000, 2.000000, 3.000000,
|
||||
// 2.000000, 3.000000, 4.000000,
|
||||
// 3.000000, 4.000000, 5.000000,
|
||||
// 4.000000, 5.000000, 6.000000,
|
||||
// 5.000000, 6.000000, 7.000000
|
||||
// )
|
||||
// testSVDPowerMethodFor(fromArray(intArrayOf(5, 3), buffer))
|
||||
// }
|
||||
}
|
||||
|
||||
private fun DoubleTensorAlgebra.testSVDGolabKahanFor(tensor: DoubleTensor) {
|
||||
val svd = tensor.svdGolabKahan()
|
||||
private fun DoubleTensorAlgebra.testSVDFor(tensor: DoubleTensor) {
|
||||
val svd = tensor.svd()
|
||||
|
||||
val tensorSVD = svd.first
|
||||
.dot(
|
||||
@ -268,4 +237,14 @@ private fun DoubleTensorAlgebra.testSVDGolabKahanFor(tensor: DoubleTensor) {
|
||||
assertTrue(tensor.eq(tensorSVD))
|
||||
}
|
||||
|
||||
private fun DoubleTensorAlgebra.testSVDPowerMethodFor(tensor: DoubleTensor, epsilon: Double = 1e-10) {
|
||||
val svd = tensor.svdPowerMethod()
|
||||
|
||||
val tensorSVD = svd.first
|
||||
.dot(
|
||||
diagonalEmbedding(svd.second)
|
||||
.dot(svd.third.transpose())
|
||||
)
|
||||
|
||||
assertTrue(tensor.eq(tensorSVD, epsilon))
|
||||
}
|
Loading…
Reference in New Issue
Block a user