From 0bc2c12a05271bc9926f55e73af2e5cf9c61b6db Mon Sep 17 00:00:00 2001 From: Roland Grinis Date: Tue, 13 Jul 2021 15:12:19 +0100 Subject: [PATCH] testing autograd --- kmath-noa/README.md | 65 +++++++++++++++++ kmath-noa/build.gradle.kts | 4 +- .../kotlin/space/kscience/kmath/noa/utils.kt | 11 +++ .../space/kscience/kmath/noa/TestAlgebra.kt | 3 +- .../space/kscience/kmath/noa/TestAutoGrad.kt | 69 +++++++++++++++++++ .../space/kscience/kmath/noa/TestUtils.kt | 13 ++++ 6 files changed, 161 insertions(+), 4 deletions(-) create mode 100644 kmath-noa/src/test/kotlin/space/kscience/kmath/noa/TestAutoGrad.kt diff --git a/kmath-noa/README.md b/kmath-noa/README.md index 707845bf2..0774a9800 100644 --- a/kmath-noa/README.md +++ b/kmath-noa/README.md @@ -43,3 +43,68 @@ To load the native library you will need to add to the VM options: -Djava.library.path=${HOME}/.konan/third-party/noa-v0.0.1/cpp-build/kmath ``` +## Usage + +We implement the tensor algebra interfaces +from [kmath-tensors](../kmath-tensors): +```kotlin +NoaFloat { + val tensor = + randNormal( + shape = intArrayOf(7, 5, 3), + device = Device.CPU) // or Device.CUDA(0) for GPU + + // Compute SVD + val (tensorU, tensorS, tensorV) = tensor.svd() + + // Reconstruct tensor + val tensorReg = + tensorU dot (diagonalEmbedding(tensorS) dot tensorV.transpose(-2, -1)) +} +``` + +The [AutoGrad](https://pytorch.org/tutorials/beginner/blitz/autograd_tutorial.html) +engine is exposed: + +```kotlin +NoaFloat { + // Create a quadratic function + val dim = 3 + val tensorX = randNormal(shape = intArrayOf(dim)) + val randFeatures = randNormal(shape = intArrayOf(dim, dim)) + val tensorSigma = randFeatures + randFeatures.transpose(0, 1) + val tensorMu = randNormal(shape = intArrayOf(dim)) + + // Create a differentiable expression + val expressionAtX = withGradAt(tensorX) { x -> + 0.5f * (x dot (tensorSigma dot x)) + (tensorMu dot x) + 25.9f + } + + // Evaluate the gradient at tensorX + // retaining the graph for the hessian computation + val gradientAtX = expressionAtX.autoGradient(tensorX, retainGraph = true) + + // Compute the hessian at tensorX + val hessianAtX = expressionAtX.autoHessian(tensorX) +} +``` + + +Native memory management relies on scoping +with [NoaScope](src/main/kotlin/space/kscience/kmath/noa/memory/NoaScope.kt) +which is readily within an algebra context. +Manual management is also possible: +```kotlin +// Create a scope +val scope = NoaScope() + +val tensor = NoaFloat(scope){ + full(5f, intArrayOf(1)) +}!! // the result might be null + +// If the computation fails resources will be freed automatically +// Otherwise it's your responsibility: +scope.disposeAll() + +// Attempts to use tensor here is undefined behaviour +``` \ No newline at end of file diff --git a/kmath-noa/build.gradle.kts b/kmath-noa/build.gradle.kts index 383bd6140..ce4db4337 100644 --- a/kmath-noa/build.gradle.kts +++ b/kmath-noa/build.gradle.kts @@ -160,8 +160,8 @@ tasks["compileJava"].dependsOn(buildCpp) tasks { withType{ - systemProperty("java.library.path", "$home/devspace/noa/cmake-build-release/kmath") - //"$cppBuildDir/kmath") + systemProperty("java.library.path", //"$home/devspace/noa/cmake-build-release/kmath") + "$cppBuildDir/kmath") } } diff --git a/kmath-noa/src/main/kotlin/space/kscience/kmath/noa/utils.kt b/kmath-noa/src/main/kotlin/space/kscience/kmath/noa/utils.kt index 853e5debf..5ad18c2eb 100644 --- a/kmath-noa/src/main/kotlin/space/kscience/kmath/noa/utils.kt +++ b/kmath-noa/src/main/kotlin/space/kscience/kmath/noa/utils.kt @@ -20,3 +20,14 @@ public fun setNumThreads(numThreads: Int): Unit { public fun setSeed(seed: Int): Unit { JNoa.setSeed(seed) } + +public inline fun , + GradAlgebraT : NoaPartialDivisionAlgebra> + GradAlgebraT.withGradAt( + tensor: GradTensorT, + block: GradAlgebraT.(GradTensorT) -> GradTensorT +): GradTensorT { + tensor.requiresGrad = true + return this.block(tensor) +} diff --git a/kmath-noa/src/test/kotlin/space/kscience/kmath/noa/TestAlgebra.kt b/kmath-noa/src/test/kotlin/space/kscience/kmath/noa/TestAlgebra.kt index f4835f986..58b9be324 100644 --- a/kmath-noa/src/test/kotlin/space/kscience/kmath/noa/TestAlgebra.kt +++ b/kmath-noa/src/test/kotlin/space/kscience/kmath/noa/TestAlgebra.kt @@ -81,5 +81,4 @@ class TestAlgebra { testingBatchedSymEig(device) } }!! - -} \ No newline at end of file +} diff --git a/kmath-noa/src/test/kotlin/space/kscience/kmath/noa/TestAutoGrad.kt b/kmath-noa/src/test/kotlin/space/kscience/kmath/noa/TestAutoGrad.kt new file mode 100644 index 000000000..49669cc8f --- /dev/null +++ b/kmath-noa/src/test/kotlin/space/kscience/kmath/noa/TestAutoGrad.kt @@ -0,0 +1,69 @@ +/* + * Copyright 2018-2021 KMath contributors. + * Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file. + */ + +package space.kscience.kmath.noa + +import kotlin.test.Test +import kotlin.test.assertTrue + +internal fun NoaFloat.testingAutoGrad(device: Device = Device.CPU): Unit { + setSeed(SEED) + val dim = 3 + val tensorX = randNormal(shape = intArrayOf(dim), device = device) + val randFeatures = randNormal(shape = intArrayOf(dim, dim), device = device) + val tensorSigma = randFeatures + randFeatures.transpose(0, 1) + val tensorMu = randNormal(shape = intArrayOf(dim), device = device) + + val expressionAtX = withGradAt(tensorX) { x -> + 0.5f * (x dot (tensorSigma dot x)) + (tensorMu dot x) + 25.9f + } + + val gradientAtX = expressionAtX.autoGradient(tensorX, retainGraph = true) + val hessianAtX = expressionAtX.autoHessian(tensorX) + val expectedGradientAtX = (tensorSigma dot tensorX) + tensorMu + + val error = (gradientAtX - expectedGradientAtX).abs().sum() + + (hessianAtX - tensorSigma).abs().sum() + assertTrue(error < TOLERANCE) +} + +internal fun NoaFloat.testingBatchedAutoGrad(device: Device = Device.CPU): Unit { + setSeed(SEED) + val batch = intArrayOf(2) + val dim = 2 + val tensorX = randNormal(shape = batch + intArrayOf(1, dim), device = device) + val randFeatures = randNormal(shape = batch + intArrayOf(dim, dim), device = device) + val tensorSigma = randFeatures + randFeatures.transpose(-2, -1) + val tensorMu = randNormal(shape = batch + intArrayOf(1, dim), device = device) + + val expressionAtX = withGradAt(tensorX) { x -> + val xt = x.transpose(-1, -2) + (0.5f * (x dot (tensorSigma dot xt)) + (tensorMu dot xt) + 58.2f).sumAll() + } + + + val gradientAtX = expressionAtX.autoGradient(tensorX) + val expectedGradientAtX = (tensorX dot tensorSigma) + tensorMu + + val error = (gradientAtX - expectedGradientAtX).abs().sum() + assertTrue(error < TOLERANCE) +} + +class TestAutoGrad { + + @Test + fun testAutoGrad() = NoaFloat { + withCuda { device -> + testingAutoGrad(device) + } + }!! + + @Test + fun testBatchedAutoGrad() = NoaFloat { + withCuda { device -> + testingBatchedAutoGrad(device) + } + }!! +} diff --git a/kmath-noa/src/test/kotlin/space/kscience/kmath/noa/TestUtils.kt b/kmath-noa/src/test/kotlin/space/kscience/kmath/noa/TestUtils.kt index e26c7d1f3..61e6f0362 100644 --- a/kmath-noa/src/test/kotlin/space/kscience/kmath/noa/TestUtils.kt +++ b/kmath-noa/src/test/kotlin/space/kscience/kmath/noa/TestUtils.kt @@ -5,6 +5,7 @@ package space.kscience.kmath.noa +import space.kscience.kmath.noa.memory.NoaScope import kotlin.test.Test import kotlin.test.assertEquals @@ -59,4 +60,16 @@ class TestUtils { } }!! + @Test + fun testScoping(): Unit { + val scope = NoaScope() + val tensor = NoaFloat(scope){ + full(5f, intArrayOf(1)) + }!! + assertEquals(tensor.numElements, 1) + assertEquals(scope.disposables.size, 1) + scope.disposeAll() + assertEquals(scope.disposables.size, 0) + } + }