diff --git a/kmath-torch/README.md b/kmath-torch/README.md index 4d463f5bc..3137906f3 100644 --- a/kmath-torch/README.md +++ b/kmath-torch/README.md @@ -15,69 +15,7 @@ This builds `ctorch`, a C wrapper for `LibTorch` placed inside: `~/.konan/third-party/kmath-torch-0.2.0-dev-4/cpp-build` -You will have to link against it in your own project. Here is an example of build script for a standalone application: - -```kotlin -//build.gradle.kts -plugins { - id("ru.mipt.npm.mpp") -} - -repositories { - jcenter() - mavenLocal() -} - -val home = System.getProperty("user.home") -val kver = "0.2.0-dev-4" -val cppBuildDir = "$home/.konan/third-party/kmath-torch-$kver/cpp-build" - -kotlin { - explicitApiWarning() - - val nativeTarget = linuxX64("your.app") - nativeTarget.apply { - binaries { - executable { - entryPoint = "your.app.main" - } - all { - linkerOpts( - "-L$cppBuildDir", - "-Wl,-rpath=$cppBuildDir", - "-lctorch" - ) - } - } - } - - val main by nativeTarget.compilations.getting - - sourceSets { - val nativeMain by creating { - dependencies { - implementation("kscience.kmath:kmath-torch:$kver") - } - } - main.defaultSourceSet.dependsOn(nativeMain) - } -} -``` - -```kotlin -//settings.gradle.kts -pluginManagement { - repositories { - gradlePluginPortal() - jcenter() - maven("https://dl.bintray.com/mipt-npm/dev") - } - plugins { - id("ru.mipt.npm.mpp") version "0.7.1" - kotlin("jvm") version "1.4.21" - } -} -``` +You will have to link against it in your own project. ## Usage @@ -96,32 +34,33 @@ TorchTensorRealAlgebra { val gpuRealTensor: TorchTensorReal = copyFromArray( array = (1..8).map { it * 2.5 }.toList().toDoubleArray(), shape = intArrayOf(2, 2, 2), - device = TorchDevice.TorchCUDA(0) + device = Device.CUDA(0) ) println(gpuRealTensor) } ``` -Enjoy a high performance automatic differentiation engine: +High performance automatic differentiation engine is available: ```kotlin TorchTensorRealAlgebra { val dim = 10 - val device = TorchDevice.TorchCPU //or TorchDevice.TorchCUDA(0) - val x = randNormal(shape = intArrayOf(dim), device = device) + val device = Device.CPU //or Device.CUDA(0) + + val tensorX = randNormal(shape = intArrayOf(dim), device = device) + val randFeatures = randNormal(shape = intArrayOf(dim, dim), device = device) + val tensorSigma = randFeatures + randFeatures.transpose(0, 1) + val tensorMu = randNormal(shape = intArrayOf(dim), device = device) - val X = randNormal(shape = intArrayOf(dim, dim), device = device) - val Q = X + X.transpose(0, 1) - val mu = randNormal(shape = intArrayOf(dim), device = device) + // expression to differentiate w.r.t. x evaluated at x = tensorX + val expressionAtX = withGradAt(tensorX, { x -> + 0.5 * (x dot (tensorSigma dot x)) + (tensorMu dot x) + 25.9 + }) - // expression to differentiate w.r.t. x - val f = x.withGrad { - 0.5 * (x dot (Q dot x)) + (mu dot x) + 25.3 - } - // value of the gradient at x - val gradf = f grad x - // value of the hessian at x - val hessf = f hess x + // value of the gradient at x = tensorX + val gradientAtX = expressionAtX.grad(tensorX, retainGraph = true) + // value of the hessian at x = tensorX + val hessianAtX = expressionAtX hess tensorX } ``` diff --git a/kmath-torch/build.gradle.kts b/kmath-torch/build.gradle.kts index 200ffc175..999ea1fa1 100644 --- a/kmath-torch/build.gradle.kts +++ b/kmath-torch/build.gradle.kts @@ -127,6 +127,7 @@ val generateJNIHeader by tasks.registering { kotlin { explicitApiWarning() + jvm { withJava() } diff --git a/kmath-torch/src/commonMain/kotlin/kscience.kmath.torch/TorchTensor.kt b/kmath-torch/src/commonMain/kotlin/kscience.kmath.torch/TorchTensor.kt index 8700f7c04..be01570c9 100644 --- a/kmath-torch/src/commonMain/kotlin/kscience.kmath.torch/TorchTensor.kt +++ b/kmath-torch/src/commonMain/kotlin/kscience.kmath.torch/TorchTensor.kt @@ -1,9 +1,9 @@ +@file:Suppress("NOTHING_TO_INLINE") + package kscience.kmath.torch import kscience.kmath.structures.TensorStructure - - public interface TorchTensor : TensorStructure { public fun item(): T public val strides: IntArray diff --git a/kmath-torch/src/commonMain/kotlin/kscience.kmath.torch/TorchTensorAlgebra.kt b/kmath-torch/src/commonMain/kotlin/kscience.kmath.torch/TorchTensorAlgebra.kt index d78c23bc9..676a26034 100644 --- a/kmath-torch/src/commonMain/kotlin/kscience.kmath.torch/TorchTensorAlgebra.kt +++ b/kmath-torch/src/commonMain/kotlin/kscience.kmath.torch/TorchTensorAlgebra.kt @@ -1,3 +1,5 @@ +@file:Suppress("NOTHING_TO_INLINE") + package kscience.kmath.torch import kscience.kmath.structures.* diff --git a/kmath-torch/src/commonTest/kotlin/kscience.kmath.torch/TestAutograd.kt b/kmath-torch/src/commonTest/kotlin/kscience.kmath.torch/TestAutograd.kt new file mode 100644 index 000000000..53651f112 --- /dev/null +++ b/kmath-torch/src/commonTest/kotlin/kscience.kmath.torch/TestAutograd.kt @@ -0,0 +1,53 @@ +@file:Suppress("NOTHING_TO_INLINE") + +package kscience.kmath.torch + +import kotlin.test.assertTrue + +internal inline fun , + TorchTensorAlgebraType : TorchTensorPartialDivisionAlgebra> + TorchTensorAlgebraType.testingAutoGrad(device: Device = Device.CPU): Unit { + setSeed(SEED) + val dim = 3 + val tensorX = randNormal(shape = intArrayOf(dim), device = device) + val randFeatures = randNormal(shape = intArrayOf(dim, dim), device = device) + val tensorSigma = randFeatures + randFeatures.transpose(0, 1) + val tensorMu = randNormal(shape = intArrayOf(dim), device = device) + + val expressionAtX = withGradAt(tensorX, { x -> + 0.5f * (x dot (tensorSigma dot x)) + (tensorMu dot x) + 25.9f + }) + + val gradientAtX = expressionAtX.grad(tensorX, retainGraph = true) + val hessianAtX = expressionAtX hess tensorX + val expectedGradientAtX = (tensorSigma dot tensorX) + tensorMu + + val error = (gradientAtX - expectedGradientAtX).abs().sum().value() + + (hessianAtX - tensorSigma).abs().sum().value() + assertTrue(error < TOLERANCE) +} + +internal inline fun , + TorchTensorAlgebraType : TorchTensorPartialDivisionAlgebra> + TorchTensorAlgebraType.testingBatchedAutoGrad(device: Device = Device.CPU): Unit { + setSeed(SEED) + val batch = intArrayOf(2) + val dim = 2 + val tensorX = randNormal(shape = batch + intArrayOf(1, dim), device = device) + val randFeatures = randNormal(shape = batch + intArrayOf(dim, dim), device = device) + val tensorSigma = randFeatures + randFeatures.transpose(-2, -1) + val tensorMu = randNormal(shape = batch + intArrayOf(1, dim), device = device) + + val expressionAtX = withGradAt(tensorX, { x -> + val xt = x.transpose(-1, -2) + 0.5f * (x dot (tensorSigma dot xt)) + (tensorMu dot xt) + 58.2f + }) + expressionAtX.sumAssign() + + val gradientAtX = expressionAtX grad tensorX + val expectedGradientAtX = (tensorX dot tensorSigma) + tensorMu + + val error = (gradientAtX - expectedGradientAtX).abs().sum().value() + assertTrue(error < TOLERANCE) +} + diff --git a/kmath-torch/src/commonTest/kotlin/kscience.kmath.torch/TestTorchTensor.kt b/kmath-torch/src/commonTest/kotlin/kscience.kmath.torch/TestTorchTensor.kt new file mode 100644 index 000000000..b5d9c9431 --- /dev/null +++ b/kmath-torch/src/commonTest/kotlin/kscience.kmath.torch/TestTorchTensor.kt @@ -0,0 +1,53 @@ +@file:Suppress("NOTHING_TO_INLINE") + +package kscience.kmath.torch + +import kotlin.test.assertEquals +import kotlin.test.assertTrue + +internal inline fun , + TorchTensorAlgebraType : TorchTensorPartialDivisionAlgebra> + TorchTensorAlgebraType.testingCopying(device: Device = Device.CPU): Unit { + val array = (1..24).map { 10f * it * it }.toFloatArray() + val shape = intArrayOf(2, 3, 4) + val tensor = copyFromArray(array, shape = shape, device = device) + val copyOfTensor = tensor.copy() + tensor[intArrayOf(0, 0)] = 0.1f + assertTrue(copyOfTensor.copyToArray() contentEquals array) + assertEquals(0.1f, tensor[intArrayOf(0, 0)]) + if(device != Device.CPU){ + val normalCpu = randNormal(intArrayOf(2, 3)) + val normalGpu = normalCpu.copyToDevice(device) + assertTrue(normalCpu.copyToArray() contentEquals normalGpu.copyToArray()) + + val uniformGpu = randUniform(intArrayOf(3,2),device) + val uniformCpu = uniformGpu.copyToDevice(Device.CPU) + assertTrue(uniformGpu.copyToArray() contentEquals uniformCpu.copyToArray()) + } +} + +internal inline fun , + TorchTensorAlgebraType : TorchTensorPartialDivisionAlgebra> + TorchTensorAlgebraType.testingRequiresGrad(): Unit { + val tensor = randNormal(intArrayOf(3)) + assertTrue(!tensor.requiresGrad) + tensor.requiresGrad = true + assertTrue(tensor.requiresGrad) + tensor.requiresGrad = false + assertTrue(!tensor.requiresGrad) + tensor.requiresGrad = true + val detachedTensor = tensor.detachFromGraph() + assertTrue(!detachedTensor.requiresGrad) +} + +internal inline fun , + TorchTensorAlgebraType : TorchTensorAlgebra> + TorchTensorAlgebraType.testingViewWithNoCopy(device: Device = Device.CPU) { + val tensor = copyFromArray(intArrayOf(1, 2, 3, 4, 5, 6), shape = intArrayOf(6)) + val viewTensor = tensor.view(intArrayOf(2, 3)) + assertTrue(viewTensor.shape contentEquals intArrayOf(2, 3)) + viewTensor[intArrayOf(0, 0)] = 10 + assertEquals(tensor[intArrayOf(0)], 10) +} + + diff --git a/kmath-torch/src/commonTest/kotlin/kscience.kmath.torch/TestTorchTensorAlgebra.kt b/kmath-torch/src/commonTest/kotlin/kscience.kmath.torch/TestTorchTensorAlgebra.kt new file mode 100644 index 000000000..26a7d1787 --- /dev/null +++ b/kmath-torch/src/commonTest/kotlin/kscience.kmath.torch/TestTorchTensorAlgebra.kt @@ -0,0 +1,133 @@ +@file:Suppress("NOTHING_TO_INLINE") + +package kscience.kmath.torch + +import kscience.kmath.linear.RealMatrixContext +import kscience.kmath.operations.invoke +import kscience.kmath.structures.Matrix +import kotlin.math.* +import kotlin.test.* + + +internal inline fun , + TorchTensorAlgebraType : TorchTensorPartialDivisionAlgebra> + TorchTensorAlgebraType.testingScalarProduct(device: Device = Device.CPU): Unit { + val lhs = randUniform(shape = intArrayOf(3), device = device) + val rhs = randUniform(shape = intArrayOf(3), device = device) + val product = lhs dot rhs + var expected = 0.0 + lhs.elements().forEach { + expected += it.second * rhs[it.first] + } + assertTrue(abs(expected - product.value()) < TOLERANCE) +} + +internal inline fun , + TorchTensorAlgebraType : TorchTensorPartialDivisionAlgebra> + TorchTensorAlgebraType.testingMatrixMultiplication(device: Device = Device.CPU): Unit { + setSeed(SEED) + + val lhsTensor = randNormal(shape = intArrayOf(3, 3), device = device) + val rhsTensor = randNormal(shape = intArrayOf(3, 3), device = device) + val product = lhsTensor dot rhsTensor + + val expected: Matrix = RealMatrixContext { + val lhs = produce(3, 3) { i, j -> lhsTensor[intArrayOf(i, j)] } + val rhs = produce(3, 3) { i, j -> rhsTensor[intArrayOf(i, j)] } + lhs dot rhs + } + + val lhsTensorCopy = lhsTensor.copy() + val rhsTensorCopy = rhsTensor.copy() + + lhsTensorCopy dotAssign rhsTensor + lhsTensor dotRightAssign rhsTensorCopy + + var error = 0.0 + product.elements().forEach { + error += abs(expected[it.first] - it.second) + + abs(expected[it.first] - lhsTensorCopy[it.first]) + + abs(expected[it.first] - rhsTensorCopy[it.first]) + } + assertTrue(error < TOLERANCE) +} + +internal inline fun , + TorchTensorAlgebraType : TorchTensorPartialDivisionAlgebra> + TorchTensorAlgebraType.testingLinearStructure(device: Device = Device.CPU): Unit { + + val shape = intArrayOf(3) + val tensorA = full(value = -4.5, shape = shape, device = device) + val tensorB = full(value = 10.9, shape = shape, device = device) + val tensorC = full(value = 789.3, shape = shape, device = device) + val tensorD = full(value = -72.9, shape = shape, device = device) + val tensorE = full(value = 553.1, shape = shape, device = device) + val result = 15.8 * tensorA - 1.5 * tensorB * (-tensorD) + 0.02 * tensorC / tensorE - 39.4 + val expected = copyFromArray( + array = (1..3).map { + 15.8 * (-4.5) - 1.5 * 10.9 * 72.9 + 0.02 * 789.3 / 553.1 - 39.4 + } + .toDoubleArray(), + shape = shape, + device = device + ) + + val assignResult = full(value = 0.0, shape = shape, device = device) + tensorA *= 15.8 + tensorB *= 1.5 + tensorB *= -tensorD + tensorC *= 0.02 + tensorC /= tensorE + assignResult += tensorA + assignResult -= tensorB + assignResult += tensorC + assignResult += -39.4 + + val error = (expected - result).abs().sum().value() + + (expected - assignResult).abs().sum().value() + assertTrue(error < TOLERANCE) + +} + +internal inline fun , + TorchTensorAlgebraType : TorchTensorPartialDivisionAlgebra> + TorchTensorAlgebraType.testingTensorTransformations(device: Device = Device.CPU): Unit { + setSeed(SEED) + val tensor = randNormal(shape = intArrayOf(3, 3), device = device) + val result = tensor.exp().log() + val assignResult = tensor.copy() + assignResult.transposeAssign(0, 1) + assignResult.expAssign() + assignResult.logAssign() + assignResult.transposeAssign(0, 1) + val error = tensor - result + error.absAssign() + error.sumAssign() + error += (tensor - assignResult).abs().sum() + assertTrue(error.value() < TOLERANCE) + +} + +internal inline fun , + TorchTensorAlgebraType : TorchTensorPartialDivisionAlgebra> + TorchTensorAlgebraType.testingBatchedSVD(device: Device = Device.CPU): Unit { + val tensor = randNormal(shape = intArrayOf(7, 5, 3), device = device) + val (tensorU, tensorS, tensorV) = tensor.svd() + val error = tensor - (tensorU dot (diagonalEmbedding(tensorS) dot tensorV.transpose(-2, -1))) + assertTrue(error.abs().sum().value() < TOLERANCE) +} + +internal inline fun , + TorchTensorAlgebraType : TorchTensorPartialDivisionAlgebra> + TorchTensorAlgebraType.testingBatchedSymEig(device: Device = Device.CPU): Unit { + val tensor = randNormal(shape = intArrayOf(5, 5), device = device) + val tensorSigma = tensor + tensor.transpose(-2, -1) + val (tensorS, tensorV) = tensorSigma.symEig() + val error = tensorSigma - (tensorV dot (diagonalEmbedding(tensorS) dot tensorV.transpose(-2, -1))) + assertTrue(error.abs().sum().value() < TOLERANCE) +} + + + + + diff --git a/kmath-torch/src/commonTest/kotlin/kscience.kmath.torch/TestUtils.kt b/kmath-torch/src/commonTest/kotlin/kscience.kmath.torch/TestUtils.kt index 9164fefbf..93bd57fd3 100644 --- a/kmath-torch/src/commonTest/kotlin/kscience.kmath.torch/TestUtils.kt +++ b/kmath-torch/src/commonTest/kotlin/kscience.kmath.torch/TestUtils.kt @@ -1,5 +1,39 @@ +@file:Suppress("NOTHING_TO_INLINE") + package kscience.kmath.torch +import kotlin.test.assertEquals + internal val SEED = 987654 internal val TOLERANCE = 1e-6 +internal inline fun , + TorchTensorAlgebraType : TorchTensorAlgebra> + TorchTensorAlgebraType.withCuda(block: TorchTensorAlgebraType.(Device) -> Unit): Unit { + this.block(Device.CPU) + if (cudaAvailable()) this.block(Device.CUDA(0)) +} + +internal inline fun , + TorchTensorAlgebraType : TorchTensorAlgebra> + TorchTensorAlgebraType.testingSetNumThreads(): Unit { + val numThreads = 2 + setNumThreads(numThreads) + assertEquals(numThreads, getNumThreads()) +} + +internal inline fun , + TorchTensorAlgebraType : TorchTensorPartialDivisionAlgebra> + TorchTensorAlgebraType.testingSetSeed(device: Device = Device.CPU): Unit { + setSeed(SEED) + val integral = randIntegral(0f, 100f, IntArray(0), device = device).value() + val normal = randNormal(IntArray(0), device = device).value() + val uniform = randUniform(IntArray(0), device = device).value() + setSeed(SEED) + val nextIntegral = randIntegral(0f, 100f, IntArray(0), device = device).value() + val nextNormal = randNormal(IntArray(0), device = device).value() + val nextUniform = randUniform(IntArray(0), device = device).value() + assertEquals(normal, nextNormal) + assertEquals(uniform, nextUniform) + assertEquals(integral, nextIntegral) +} \ No newline at end of file diff --git a/kmath-torch/src/jvmMain/kotlin/kscience/kmath/torch/Utils.kt b/kmath-torch/src/jvmMain/kotlin/kscience/kmath/torch/Utils.kt index ce70b8fe1..dc08360ae 100644 --- a/kmath-torch/src/jvmMain/kotlin/kscience/kmath/torch/Utils.kt +++ b/kmath-torch/src/jvmMain/kotlin/kscience/kmath/torch/Utils.kt @@ -9,14 +9,6 @@ public fun setNumThreads(numThreads: Int): Unit { JTorch.setNumThreads(numThreads) } -public fun cudaAvailable(): Boolean { - TODO("Implementation not available yet") -} - -public fun setSeed(seed: Int): Unit { - TODO("Implementation not available yet") -} - public fun runCPD(): Unit { val tensorHandle = JTorch.createTensor() diff --git a/kmath-torch/src/nativeGPUTest/kotlin/kscience/kmath/torch/TestAutogradGPU.kt b/kmath-torch/src/nativeGPUTest/kotlin/kscience/kmath/torch/TestAutogradGPU.kt deleted file mode 100644 index 7d43653c1..000000000 --- a/kmath-torch/src/nativeGPUTest/kotlin/kscience/kmath/torch/TestAutogradGPU.kt +++ /dev/null @@ -1,13 +0,0 @@ -package kscience.kmath.torch - -import kotlin.test.* - - -internal class TestAutogradGPU { - @Test - fun testAutoGrad() = testingAutoGrad(dim = 3, device = Device.CUDA(0)) - - @Test - fun testBatchedAutoGrad() = testingBatchedAutoGrad( - bath = intArrayOf(2), dim=3, device = Device.CUDA(0)) -} \ No newline at end of file diff --git a/kmath-torch/src/nativeGPUTest/kotlin/kscience/kmath/torch/TestTorchTensorAlgebraGPU.kt b/kmath-torch/src/nativeGPUTest/kotlin/kscience/kmath/torch/TestTorchTensorAlgebraGPU.kt deleted file mode 100644 index c0c385103..000000000 --- a/kmath-torch/src/nativeGPUTest/kotlin/kscience/kmath/torch/TestTorchTensorAlgebraGPU.kt +++ /dev/null @@ -1,32 +0,0 @@ -package kscience.kmath.torch - -import kotlin.test.* - - -class TestTorchTensorAlgebraGPU { - - @Test - fun testScalarProduct() = - testingScalarProduct(device = Device.CUDA(0)) - - @Test - fun testMatrixMultiplication() = - testingMatrixMultiplication(device = Device.CUDA(0)) - - @Test - fun testLinearStructure() = - testingLinearStructure(device = Device.CUDA(0)) - - @Test - fun testTensorTransformations() = - testingTensorTransformations(device = Device.CUDA(0)) - - @Test - fun testBatchedSVD() = - testingBatchedSVD(device = Device.CUDA(0)) - - @Test - fun testBatchedSymEig() = - testingBatchedSymEig(device = Device.CUDA(0)) - -} diff --git a/kmath-torch/src/nativeGPUTest/kotlin/kscience/kmath/torch/TestTorchTensorGPU.kt b/kmath-torch/src/nativeGPUTest/kotlin/kscience/kmath/torch/TestTorchTensorGPU.kt deleted file mode 100644 index 820f2008a..000000000 --- a/kmath-torch/src/nativeGPUTest/kotlin/kscience/kmath/torch/TestTorchTensorGPU.kt +++ /dev/null @@ -1,22 +0,0 @@ -package kscience.kmath.torch - -import kotlin.test.* - -class TestTorchTensorGPU { - - @Test - fun testCopyFromArray() = testingCopyFromArray(Device.CUDA(0)) - - @Test - fun testCopyToDevice() = TorchTensorRealAlgebra { - setSeed(SEED) - val normalCpu = randNormal(intArrayOf(2, 3)) - val normalGpu = normalCpu.copyToDevice(Device.CUDA(0)) - assertTrue(normalCpu.copyToArray() contentEquals normalGpu.copyToArray()) - - val uniformGpu = randUniform(intArrayOf(3,2),Device.CUDA(0)) - val uniformCpu = uniformGpu.copyToDevice(Device.CPU) - assertTrue(uniformGpu.copyToArray() contentEquals uniformCpu.copyToArray()) - } - -} diff --git a/kmath-torch/src/nativeGPUTest/kotlin/kscience/kmath/torch/TestUtilsGPU.kt b/kmath-torch/src/nativeGPUTest/kotlin/kscience/kmath/torch/TestUtilsGPU.kt deleted file mode 100644 index 72c34add1..000000000 --- a/kmath-torch/src/nativeGPUTest/kotlin/kscience/kmath/torch/TestUtilsGPU.kt +++ /dev/null @@ -1,16 +0,0 @@ -package kscience.kmath.torch - -import kotlin.test.* - - -internal class TestUtilsGPU { - - @Test - fun testCudaAvailable() { - assertTrue(cudaAvailable()) - } - - @Test - fun testSetSeed() = testingSetSeed(Device.CUDA(0)) - -} diff --git a/kmath-torch/src/nativeMain/kotlin/kscience.kmath.torch/TorchTensorAlgebraNative.kt b/kmath-torch/src/nativeMain/kotlin/kscience.kmath.torch/TorchTensorAlgebraNative.kt index aa1cbc15b..aa0564f2e 100644 --- a/kmath-torch/src/nativeMain/kotlin/kscience.kmath.torch/TorchTensorAlgebraNative.kt +++ b/kmath-torch/src/nativeMain/kotlin/kscience.kmath.torch/TorchTensorAlgebraNative.kt @@ -133,9 +133,9 @@ public sealed class TorchTensorPartialDivisionAlgebraNative(scope), TorchTensorPartialDivisionAlgebra { - override operator fun TorchTensorType.div(b: TorchTensorType): TorchTensorType { - if (checks) checkLinearOperation(this, b) - return wrap(div_tensor(this.tensorHandle, b.tensorHandle)!!) + override operator fun TorchTensorType.div(other: TorchTensorType): TorchTensorType { + if (checks) checkLinearOperation(this, other) + return wrap(div_tensor(this.tensorHandle, other.tensorHandle)!!) } override operator fun TorchTensorType.divAssign(other: TorchTensorType): Unit { diff --git a/kmath-torch/src/nativeTest/kotlin/kscience/kmath/torch/TestAutograd.kt b/kmath-torch/src/nativeTest/kotlin/kscience/kmath/torch/TestAutograd.kt index 5884372de..6560153f7 100644 --- a/kmath-torch/src/nativeTest/kotlin/kscience/kmath/torch/TestAutograd.kt +++ b/kmath-torch/src/nativeTest/kotlin/kscience/kmath/torch/TestAutograd.kt @@ -2,61 +2,19 @@ package kscience.kmath.torch import kotlin.test.* -internal fun testingAutoGrad(dim: Int, device: Device = Device.CPU): Unit { - TorchTensorRealAlgebra { - setSeed(SEED) - - val tensorX = randNormal(shape = intArrayOf(dim), device = device) - val randFeatures = randNormal(shape = intArrayOf(dim, dim), device = device) - val tensorSigma = randFeatures + randFeatures.transpose(0, 1) - val tensorMu = randNormal(shape = intArrayOf(dim), device = device) - - val expressionAtX = withGradAt(tensorX, { x -> - 0.5 * (x dot (tensorSigma dot x)) + (tensorMu dot x) + 25.9 - }) - - val gradientAtX = expressionAtX.grad(tensorX, retainGraph = true) - val hessianAtX = expressionAtX hess tensorX - val expectedGradientAtX = (tensorSigma dot tensorX) + tensorMu - - val error = (gradientAtX - expectedGradientAtX).abs().sum().value() + - (hessianAtX - tensorSigma).abs().sum().value() - assertTrue(error < TOLERANCE) - } -} - -internal fun testingBatchedAutoGrad( - bath: IntArray, - dim: Int, - device: Device = Device.CPU -): Unit { - TorchTensorRealAlgebra { - setSeed(SEED) - - val tensorX = randNormal(shape = bath + intArrayOf(1, dim), device = device) - val randFeatures = randNormal(shape = bath + intArrayOf(dim, dim), device = device) - val tensorSigma = randFeatures + randFeatures.transpose(-2, -1) - val tensorMu = randNormal(shape = bath + intArrayOf(1, dim), device = device) - - val expressionAtX = withGradAt(tensorX, { x -> - val xt = x.transpose(-1, -2) - 0.5 * (x dot (tensorSigma dot xt)) + (tensorMu dot xt) + 58.2 - }) - expressionAtX.sumAssign() - - val gradientAtX = expressionAtX grad tensorX - val expectedGradientAtX = (tensorX dot tensorSigma) + tensorMu - - val error = (gradientAtX - expectedGradientAtX).abs().sum().value() - assertTrue(error < TOLERANCE) - } -} - internal class TestAutograd { @Test - fun testAutoGrad() = testingAutoGrad(dim = 100) + fun testAutoGrad() = TorchTensorFloatAlgebra { + withCuda { device -> + testingAutoGrad(device) + } + } @Test - fun testBatchedAutoGrad() = testingBatchedAutoGrad(bath = intArrayOf(2, 10), dim = 30) + fun testBatchedAutoGrad() = TorchTensorFloatAlgebra { + withCuda { device -> + testingBatchedAutoGrad(device) + } + } } \ No newline at end of file diff --git a/kmath-torch/src/nativeTest/kotlin/kscience/kmath/torch/TestTorchTensor.kt b/kmath-torch/src/nativeTest/kotlin/kscience/kmath/torch/TestTorchTensor.kt index a0e8e5ed3..4a8edfd58 100644 --- a/kmath-torch/src/nativeTest/kotlin/kscience/kmath/torch/TestTorchTensor.kt +++ b/kmath-torch/src/nativeTest/kotlin/kscience/kmath/torch/TestTorchTensor.kt @@ -3,26 +3,18 @@ package kscience.kmath.torch import kotlinx.cinterop.* import kotlin.test.* -internal fun testingCopyFromArray(device: Device = Device.CPU): Unit { - TorchTensorRealAlgebra { - val array = (1..24).map { 10.0 * it * it }.toDoubleArray() - val shape = intArrayOf(2, 3, 4) - val tensor = copyFromArray(array, shape = shape, device = device) - val copyOfTensor = tensor.copy() - tensor[intArrayOf(0, 0)] = 0.1 - assertTrue(copyOfTensor.copyToArray() contentEquals array) - assertEquals(0.1, tensor[intArrayOf(0, 0)]) - } -} - class TestTorchTensor { @Test - fun testCopyFromArray() = testingCopyFromArray() + fun testCopying() = TorchTensorFloatAlgebra { + withCuda { + device -> testingCopying(device) + } + } @Test - fun testCopyLessDataTransferOnCPU() = memScoped { + fun testNativeNoCopyDataTransferOnCPU() = memScoped { val data = allocArray(1) data[0] = 1.0 TorchTensorRealAlgebra { @@ -39,15 +31,7 @@ class TestTorchTensor { @Test fun testRequiresGrad() = TorchTensorRealAlgebra { - val tensor = randNormal(intArrayOf(3)) - assertTrue(!tensor.requiresGrad) - tensor.requiresGrad = true - assertTrue(tensor.requiresGrad) - tensor.requiresGrad = false - assertTrue(!tensor.requiresGrad) - tensor.requiresGrad = true - val detachedTensor = tensor.detachFromGraph() - assertTrue(!detachedTensor.requiresGrad) + testingRequiresGrad() } @Test @@ -63,11 +47,9 @@ class TestTorchTensor { @Test fun testViewWithNoCopy() = TorchTensorIntAlgebra { - val tensor = copyFromArray(intArrayOf(1, 2, 3, 4, 5, 6), shape = intArrayOf(6)) - val viewTensor = tensor.view(intArrayOf(2, 3)) - assertTrue(viewTensor.shape contentEquals intArrayOf(2, 3)) - viewTensor[intArrayOf(0, 0)] = 10 - assertEquals(tensor[intArrayOf(0)], 10) + withCuda { + device -> testingViewWithNoCopy(device) + } } } \ No newline at end of file diff --git a/kmath-torch/src/nativeTest/kotlin/kscience/kmath/torch/TestTorchTensorAlgebra.kt b/kmath-torch/src/nativeTest/kotlin/kscience/kmath/torch/TestTorchTensorAlgebra.kt index 0246833ee..c761ab1ca 100644 --- a/kmath-torch/src/nativeTest/kotlin/kscience/kmath/torch/TestTorchTensorAlgebra.kt +++ b/kmath-torch/src/nativeTest/kotlin/kscience/kmath/torch/TestTorchTensorAlgebra.kt @@ -1,146 +1,50 @@ package kscience.kmath.torch -import kscience.kmath.linear.RealMatrixContext -import kscience.kmath.operations.invoke -import kscience.kmath.structures.Matrix -import kotlin.math.* import kotlin.test.* -internal fun testingScalarProduct(device: Device = Device.CPU): Unit { - TorchTensorRealAlgebra { - val lhs = randUniform(shape = intArrayOf(3), device = device) - val rhs = randUniform(shape = intArrayOf(3), device = device) - val product = lhs dot rhs - var expected = 0.0 - lhs.elements().forEach { - expected += it.second * rhs[it.first] - } - assertTrue(abs(expected - product.value()) < TOLERANCE) - } -} - -internal fun testingMatrixMultiplication(device: Device = Device.CPU): Unit { - TorchTensorRealAlgebra { - setSeed(SEED) - - val lhsTensor = randNormal(shape = intArrayOf(3, 3), device = device) - val rhsTensor = randNormal(shape = intArrayOf(3, 3), device = device) - val product = lhsTensor dot rhsTensor - - val expected: Matrix = RealMatrixContext { - val lhs = produce(3, 3) { i, j -> lhsTensor[intArrayOf(i, j)] } - val rhs = produce(3, 3) { i, j -> rhsTensor[intArrayOf(i, j)] } - lhs dot rhs - } - - val lhsTensorCopy = lhsTensor.copy() - val rhsTensorCopy = rhsTensor.copy() - - lhsTensorCopy dotAssign rhsTensor - lhsTensor dotRightAssign rhsTensorCopy - - var error = 0.0 - product.elements().forEach { - error += abs(expected[it.first] - it.second) + - abs(expected[it.first] - lhsTensorCopy[it.first]) + - abs(expected[it.first] - rhsTensorCopy[it.first]) - } - assertTrue(error < TOLERANCE) - } -} - -internal fun testingLinearStructure(device: Device = Device.CPU): Unit { - TorchTensorRealAlgebra { - withChecks { - val shape = intArrayOf(3) - val tensorA = full(value = -4.5, shape = shape, device = device) - val tensorB = full(value = 10.9, shape = shape, device = device) - val tensorC = full(value = 789.3, shape = shape, device = device) - val tensorD = full(value = -72.9, shape = shape, device = device) - val tensorE = full(value = 553.1, shape = shape, device = device) - val result = 15.8 * tensorA - 1.5 * tensorB * (-tensorD) + 0.02 * tensorC / tensorE - 39.4 - val expected = copyFromArray( - array = (1..3).map { - 15.8 * (-4.5) - 1.5 * 10.9 * 72.9 + 0.02 * 789.3 / 553.1 - 39.4 - } - .toDoubleArray(), - shape = shape, - device = device - ) - - val assignResult = full(value = 0.0, shape = shape, device = device) - tensorA *= 15.8 - tensorB *= 1.5 - tensorB *= -tensorD - tensorC *= 0.02 - tensorC /= tensorE - assignResult += tensorA - assignResult -= tensorB - assignResult += tensorC - assignResult += -39.4 - - val error = (expected - result).abs().sum().value() + - (expected - assignResult).abs().sum().value() - assertTrue(error < TOLERANCE) - println(expected) - }} -} - -internal fun testingTensorTransformations(device: Device = Device.CPU): Unit { - TorchTensorRealAlgebra { - setSeed(SEED) - val tensor = randNormal(shape = intArrayOf(3, 3), device = device) - val result = tensor.exp().log() - val assignResult = tensor.copy() - assignResult.transposeAssign(0, 1) - assignResult.expAssign() - assignResult.logAssign() - assignResult.transposeAssign(0, 1) - val error = tensor - result - error.absAssign() - error.sumAssign() - error += (tensor - assignResult).abs().sum() - assertTrue(error.value() < TOLERANCE) - } -} - -internal fun testingBatchedSVD(device: Device = Device.CPU): Unit { - TorchTensorRealAlgebra { - val tensor = randNormal(shape = intArrayOf(7, 5, 3), device = device) - val (tensorU, tensorS, tensorV) = tensor.svd() - val error = tensor - (tensorU dot (diagonalEmbedding(tensorS) dot tensorV.transpose(-2,-1))) - assertTrue(error.abs().sum().value() < TOLERANCE) - } -} - -internal fun testingBatchedSymEig(device: Device = Device.CPU): Unit { - TorchTensorRealAlgebra { - val tensor = randNormal(shape = intArrayOf(5,5), device = device) - val tensorSigma = tensor + tensor.transpose(-2,-1) - val (tensorS, tensorV) = tensorSigma.symEig() - val error = tensorSigma - (tensorV dot (diagonalEmbedding(tensorS) dot tensorV.transpose(-2,-1))) - assertTrue(error.abs().sum().value() < TOLERANCE) - } -} internal class TestTorchTensorAlgebra { @Test - fun testScalarProduct() = testingScalarProduct() + fun testScalarProduct() = TorchTensorRealAlgebra { + withCuda { device -> + testingScalarProduct(device) + } + } @Test - fun testMatrixMultiplication() = testingMatrixMultiplication() + fun testMatrixMultiplication() = TorchTensorRealAlgebra { + withCuda { device -> + testingMatrixMultiplication(device) + } + } @Test - fun testLinearStructure() = testingLinearStructure() + fun testLinearStructure() = TorchTensorRealAlgebra { + withCuda { device -> + testingLinearStructure(device) + } + } @Test - fun testTensorTransformations() = testingTensorTransformations() + fun testTensorTransformations() = TorchTensorRealAlgebra { + withCuda { device -> + testingTensorTransformations(device) + } + } @Test - fun testBatchedSVD() = testingBatchedSVD() + fun testBatchedSVD() = TorchTensorRealAlgebra { + withCuda { device -> + testingBatchedSVD(device) + } + } @Test - fun testBatchedSymEig() = testingBatchedSymEig() + fun testBatchedSymEig() = TorchTensorRealAlgebra { + withCuda { device -> + testingBatchedSymEig(device) + } + } } \ No newline at end of file diff --git a/kmath-torch/src/nativeTest/kotlin/kscience/kmath/torch/TestUtils.kt b/kmath-torch/src/nativeTest/kotlin/kscience/kmath/torch/TestUtils.kt index f57894f62..02f362165 100644 --- a/kmath-torch/src/nativeTest/kotlin/kscience/kmath/torch/TestUtils.kt +++ b/kmath-torch/src/nativeTest/kotlin/kscience/kmath/torch/TestUtils.kt @@ -3,29 +3,18 @@ package kscience.kmath.torch import kotlin.test.* -internal fun testingSetSeed(device: Device = Device.CPU): Unit { - TorchTensorRealAlgebra { - setSeed(SEED) - val normal = randNormal(IntArray(0), device = device).value() - val uniform = randUniform(IntArray(0), device = device).value() - setSeed(SEED) - val nextNormal = randNormal(IntArray(0), device = device).value() - val nextUniform = randUniform(IntArray(0), device = device).value() - assertEquals(normal, nextNormal) - assertEquals(uniform, nextUniform) - } -} - internal class TestUtils { @Test fun testSetNumThreads() { - TorchTensorRealAlgebra { - val numThreads = 2 - setNumThreads(numThreads) - assertEquals(numThreads, getNumThreads()) + TorchTensorIntAlgebra { + testingSetNumThreads() } } @Test - fun testSetSeed() = testingSetSeed() + fun testSeedSetting() = TorchTensorFloatAlgebra { + withCuda { + device -> testingSetSeed(device) + } + } } \ No newline at end of file