testing autograd

This commit is contained in:
Roland Grinis 2021-07-13 15:12:19 +01:00
parent 28fac22f12
commit 0bc2c12a05
6 changed files with 161 additions and 4 deletions

View File

@ -43,3 +43,68 @@ To load the native library you will need to add to the VM options:
-Djava.library.path=${HOME}/.konan/third-party/noa-v0.0.1/cpp-build/kmath -Djava.library.path=${HOME}/.konan/third-party/noa-v0.0.1/cpp-build/kmath
``` ```
## Usage
We implement the tensor algebra interfaces
from [kmath-tensors](../kmath-tensors):
```kotlin
NoaFloat {
val tensor =
randNormal(
shape = intArrayOf(7, 5, 3),
device = Device.CPU) // or Device.CUDA(0) for GPU
// Compute SVD
val (tensorU, tensorS, tensorV) = tensor.svd()
// Reconstruct tensor
val tensorReg =
tensorU dot (diagonalEmbedding(tensorS) dot tensorV.transpose(-2, -1))
}
```
The [AutoGrad](https://pytorch.org/tutorials/beginner/blitz/autograd_tutorial.html)
engine is exposed:
```kotlin
NoaFloat {
// Create a quadratic function
val dim = 3
val tensorX = randNormal(shape = intArrayOf(dim))
val randFeatures = randNormal(shape = intArrayOf(dim, dim))
val tensorSigma = randFeatures + randFeatures.transpose(0, 1)
val tensorMu = randNormal(shape = intArrayOf(dim))
// Create a differentiable expression
val expressionAtX = withGradAt(tensorX) { x ->
0.5f * (x dot (tensorSigma dot x)) + (tensorMu dot x) + 25.9f
}
// Evaluate the gradient at tensorX
// retaining the graph for the hessian computation
val gradientAtX = expressionAtX.autoGradient(tensorX, retainGraph = true)
// Compute the hessian at tensorX
val hessianAtX = expressionAtX.autoHessian(tensorX)
}
```
Native memory management relies on scoping
with [NoaScope](src/main/kotlin/space/kscience/kmath/noa/memory/NoaScope.kt)
which is readily within an algebra context.
Manual management is also possible:
```kotlin
// Create a scope
val scope = NoaScope()
val tensor = NoaFloat(scope){
full(5f, intArrayOf(1))
}!! // the result might be null
// If the computation fails resources will be freed automatically
// Otherwise it's your responsibility:
scope.disposeAll()
// Attempts to use tensor here is undefined behaviour
```

View File

@ -160,8 +160,8 @@ tasks["compileJava"].dependsOn(buildCpp)
tasks { tasks {
withType<Test>{ withType<Test>{
systemProperty("java.library.path", "$home/devspace/noa/cmake-build-release/kmath") systemProperty("java.library.path", //"$home/devspace/noa/cmake-build-release/kmath")
//"$cppBuildDir/kmath") "$cppBuildDir/kmath")
} }
} }

View File

@ -20,3 +20,14 @@ public fun setNumThreads(numThreads: Int): Unit {
public fun setSeed(seed: Int): Unit { public fun setSeed(seed: Int): Unit {
JNoa.setSeed(seed) JNoa.setSeed(seed)
} }
public inline fun <T, ArrayT,
GradTensorT : NoaTensorOverField<T>,
GradAlgebraT : NoaPartialDivisionAlgebra<T, ArrayT, GradTensorT>>
GradAlgebraT.withGradAt(
tensor: GradTensorT,
block: GradAlgebraT.(GradTensorT) -> GradTensorT
): GradTensorT {
tensor.requiresGrad = true
return this.block(tensor)
}

View File

@ -81,5 +81,4 @@ class TestAlgebra {
testingBatchedSymEig(device) testingBatchedSymEig(device)
} }
}!! }!!
} }

View File

@ -0,0 +1,69 @@
/*
* Copyright 2018-2021 KMath contributors.
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
*/
package space.kscience.kmath.noa
import kotlin.test.Test
import kotlin.test.assertTrue
internal fun NoaFloat.testingAutoGrad(device: Device = Device.CPU): Unit {
setSeed(SEED)
val dim = 3
val tensorX = randNormal(shape = intArrayOf(dim), device = device)
val randFeatures = randNormal(shape = intArrayOf(dim, dim), device = device)
val tensorSigma = randFeatures + randFeatures.transpose(0, 1)
val tensorMu = randNormal(shape = intArrayOf(dim), device = device)
val expressionAtX = withGradAt(tensorX) { x ->
0.5f * (x dot (tensorSigma dot x)) + (tensorMu dot x) + 25.9f
}
val gradientAtX = expressionAtX.autoGradient(tensorX, retainGraph = true)
val hessianAtX = expressionAtX.autoHessian(tensorX)
val expectedGradientAtX = (tensorSigma dot tensorX) + tensorMu
val error = (gradientAtX - expectedGradientAtX).abs().sum() +
(hessianAtX - tensorSigma).abs().sum()
assertTrue(error < TOLERANCE)
}
internal fun NoaFloat.testingBatchedAutoGrad(device: Device = Device.CPU): Unit {
setSeed(SEED)
val batch = intArrayOf(2)
val dim = 2
val tensorX = randNormal(shape = batch + intArrayOf(1, dim), device = device)
val randFeatures = randNormal(shape = batch + intArrayOf(dim, dim), device = device)
val tensorSigma = randFeatures + randFeatures.transpose(-2, -1)
val tensorMu = randNormal(shape = batch + intArrayOf(1, dim), device = device)
val expressionAtX = withGradAt(tensorX) { x ->
val xt = x.transpose(-1, -2)
(0.5f * (x dot (tensorSigma dot xt)) + (tensorMu dot xt) + 58.2f).sumAll()
}
val gradientAtX = expressionAtX.autoGradient(tensorX)
val expectedGradientAtX = (tensorX dot tensorSigma) + tensorMu
val error = (gradientAtX - expectedGradientAtX).abs().sum()
assertTrue(error < TOLERANCE)
}
class TestAutoGrad {
@Test
fun testAutoGrad() = NoaFloat {
withCuda { device ->
testingAutoGrad(device)
}
}!!
@Test
fun testBatchedAutoGrad() = NoaFloat {
withCuda { device ->
testingBatchedAutoGrad(device)
}
}!!
}

View File

@ -5,6 +5,7 @@
package space.kscience.kmath.noa package space.kscience.kmath.noa
import space.kscience.kmath.noa.memory.NoaScope
import kotlin.test.Test import kotlin.test.Test
import kotlin.test.assertEquals import kotlin.test.assertEquals
@ -59,4 +60,16 @@ class TestUtils {
} }
}!! }!!
@Test
fun testScoping(): Unit {
val scope = NoaScope()
val tensor = NoaFloat(scope){
full(5f, intArrayOf(1))
}!!
assertEquals(tensor.numElements, 1)
assertEquals(scope.disposables.size, 1)
scope.disposeAll()
assertEquals(scope.disposables.size, 0)
}
} }