Moving tests implementation to common

This commit is contained in:
Roland Grinis 2021-01-18 19:02:01 +00:00
parent b30ca920e1
commit 274d1a3105
18 changed files with 355 additions and 398 deletions

View File

@ -15,69 +15,7 @@ This builds `ctorch`, a C wrapper for `LibTorch` placed inside:
`~/.konan/third-party/kmath-torch-0.2.0-dev-4/cpp-build`
You will have to link against it in your own project. Here is an example of build script for a standalone application:
```kotlin
//build.gradle.kts
plugins {
id("ru.mipt.npm.mpp")
}
repositories {
jcenter()
mavenLocal()
}
val home = System.getProperty("user.home")
val kver = "0.2.0-dev-4"
val cppBuildDir = "$home/.konan/third-party/kmath-torch-$kver/cpp-build"
kotlin {
explicitApiWarning()
val nativeTarget = linuxX64("your.app")
nativeTarget.apply {
binaries {
executable {
entryPoint = "your.app.main"
}
all {
linkerOpts(
"-L$cppBuildDir",
"-Wl,-rpath=$cppBuildDir",
"-lctorch"
)
}
}
}
val main by nativeTarget.compilations.getting
sourceSets {
val nativeMain by creating {
dependencies {
implementation("kscience.kmath:kmath-torch:$kver")
}
}
main.defaultSourceSet.dependsOn(nativeMain)
}
}
```
```kotlin
//settings.gradle.kts
pluginManagement {
repositories {
gradlePluginPortal()
jcenter()
maven("https://dl.bintray.com/mipt-npm/dev")
}
plugins {
id("ru.mipt.npm.mpp") version "0.7.1"
kotlin("jvm") version "1.4.21"
}
}
```
You will have to link against it in your own project.
## Usage
@ -96,32 +34,33 @@ TorchTensorRealAlgebra {
val gpuRealTensor: TorchTensorReal = copyFromArray(
array = (1..8).map { it * 2.5 }.toList().toDoubleArray(),
shape = intArrayOf(2, 2, 2),
device = TorchDevice.TorchCUDA(0)
device = Device.CUDA(0)
)
println(gpuRealTensor)
}
```
Enjoy a high performance automatic differentiation engine:
High performance automatic differentiation engine is available:
```kotlin
TorchTensorRealAlgebra {
val dim = 10
val device = TorchDevice.TorchCPU //or TorchDevice.TorchCUDA(0)
val x = randNormal(shape = intArrayOf(dim), device = device)
val device = Device.CPU //or Device.CUDA(0)
val X = randNormal(shape = intArrayOf(dim, dim), device = device)
val Q = X + X.transpose(0, 1)
val mu = randNormal(shape = intArrayOf(dim), device = device)
val tensorX = randNormal(shape = intArrayOf(dim), device = device)
val randFeatures = randNormal(shape = intArrayOf(dim, dim), device = device)
val tensorSigma = randFeatures + randFeatures.transpose(0, 1)
val tensorMu = randNormal(shape = intArrayOf(dim), device = device)
// expression to differentiate w.r.t. x
val f = x.withGrad {
0.5 * (x dot (Q dot x)) + (mu dot x) + 25.3
}
// value of the gradient at x
val gradf = f grad x
// value of the hessian at x
val hessf = f hess x
// expression to differentiate w.r.t. x evaluated at x = tensorX
val expressionAtX = withGradAt(tensorX, { x ->
0.5 * (x dot (tensorSigma dot x)) + (tensorMu dot x) + 25.9
})
// value of the gradient at x = tensorX
val gradientAtX = expressionAtX.grad(tensorX, retainGraph = true)
// value of the hessian at x = tensorX
val hessianAtX = expressionAtX hess tensorX
}
```

View File

@ -127,6 +127,7 @@ val generateJNIHeader by tasks.registering {
kotlin {
explicitApiWarning()
jvm {
withJava()
}

View File

@ -1,9 +1,9 @@
@file:Suppress("NOTHING_TO_INLINE")
package kscience.kmath.torch
import kscience.kmath.structures.TensorStructure
public interface TorchTensor<T> : TensorStructure<T> {
public fun item(): T
public val strides: IntArray

View File

@ -1,3 +1,5 @@
@file:Suppress("NOTHING_TO_INLINE")
package kscience.kmath.torch
import kscience.kmath.structures.*

View File

@ -0,0 +1,53 @@
@file:Suppress("NOTHING_TO_INLINE")
package kscience.kmath.torch
import kotlin.test.assertTrue
internal inline fun <TorchTensorType : TorchTensorOverField<Float>,
TorchTensorAlgebraType : TorchTensorPartialDivisionAlgebra<Float, FloatArray, TorchTensorType>>
TorchTensorAlgebraType.testingAutoGrad(device: Device = Device.CPU): Unit {
setSeed(SEED)
val dim = 3
val tensorX = randNormal(shape = intArrayOf(dim), device = device)
val randFeatures = randNormal(shape = intArrayOf(dim, dim), device = device)
val tensorSigma = randFeatures + randFeatures.transpose(0, 1)
val tensorMu = randNormal(shape = intArrayOf(dim), device = device)
val expressionAtX = withGradAt(tensorX, { x ->
0.5f * (x dot (tensorSigma dot x)) + (tensorMu dot x) + 25.9f
})
val gradientAtX = expressionAtX.grad(tensorX, retainGraph = true)
val hessianAtX = expressionAtX hess tensorX
val expectedGradientAtX = (tensorSigma dot tensorX) + tensorMu
val error = (gradientAtX - expectedGradientAtX).abs().sum().value() +
(hessianAtX - tensorSigma).abs().sum().value()
assertTrue(error < TOLERANCE)
}
internal inline fun <TorchTensorType : TorchTensorOverField<Float>,
TorchTensorAlgebraType : TorchTensorPartialDivisionAlgebra<Float, FloatArray, TorchTensorType>>
TorchTensorAlgebraType.testingBatchedAutoGrad(device: Device = Device.CPU): Unit {
setSeed(SEED)
val batch = intArrayOf(2)
val dim = 2
val tensorX = randNormal(shape = batch + intArrayOf(1, dim), device = device)
val randFeatures = randNormal(shape = batch + intArrayOf(dim, dim), device = device)
val tensorSigma = randFeatures + randFeatures.transpose(-2, -1)
val tensorMu = randNormal(shape = batch + intArrayOf(1, dim), device = device)
val expressionAtX = withGradAt(tensorX, { x ->
val xt = x.transpose(-1, -2)
0.5f * (x dot (tensorSigma dot xt)) + (tensorMu dot xt) + 58.2f
})
expressionAtX.sumAssign()
val gradientAtX = expressionAtX grad tensorX
val expectedGradientAtX = (tensorX dot tensorSigma) + tensorMu
val error = (gradientAtX - expectedGradientAtX).abs().sum().value()
assertTrue(error < TOLERANCE)
}

View File

@ -0,0 +1,53 @@
@file:Suppress("NOTHING_TO_INLINE")
package kscience.kmath.torch
import kotlin.test.assertEquals
import kotlin.test.assertTrue
internal inline fun <TorchTensorType : TorchTensorOverField<Float>,
TorchTensorAlgebraType : TorchTensorPartialDivisionAlgebra<Float, FloatArray, TorchTensorType>>
TorchTensorAlgebraType.testingCopying(device: Device = Device.CPU): Unit {
val array = (1..24).map { 10f * it * it }.toFloatArray()
val shape = intArrayOf(2, 3, 4)
val tensor = copyFromArray(array, shape = shape, device = device)
val copyOfTensor = tensor.copy()
tensor[intArrayOf(0, 0)] = 0.1f
assertTrue(copyOfTensor.copyToArray() contentEquals array)
assertEquals(0.1f, tensor[intArrayOf(0, 0)])
if(device != Device.CPU){
val normalCpu = randNormal(intArrayOf(2, 3))
val normalGpu = normalCpu.copyToDevice(device)
assertTrue(normalCpu.copyToArray() contentEquals normalGpu.copyToArray())
val uniformGpu = randUniform(intArrayOf(3,2),device)
val uniformCpu = uniformGpu.copyToDevice(Device.CPU)
assertTrue(uniformGpu.copyToArray() contentEquals uniformCpu.copyToArray())
}
}
internal inline fun <T, PrimitiveArrayType, TorchTensorType : TorchTensorOverField<T>,
TorchTensorAlgebraType : TorchTensorPartialDivisionAlgebra<T, PrimitiveArrayType, TorchTensorType>>
TorchTensorAlgebraType.testingRequiresGrad(): Unit {
val tensor = randNormal(intArrayOf(3))
assertTrue(!tensor.requiresGrad)
tensor.requiresGrad = true
assertTrue(tensor.requiresGrad)
tensor.requiresGrad = false
assertTrue(!tensor.requiresGrad)
tensor.requiresGrad = true
val detachedTensor = tensor.detachFromGraph()
assertTrue(!detachedTensor.requiresGrad)
}
internal inline fun <TorchTensorType : TorchTensor<Int>,
TorchTensorAlgebraType : TorchTensorAlgebra<Int, IntArray, TorchTensorType>>
TorchTensorAlgebraType.testingViewWithNoCopy(device: Device = Device.CPU) {
val tensor = copyFromArray(intArrayOf(1, 2, 3, 4, 5, 6), shape = intArrayOf(6))
val viewTensor = tensor.view(intArrayOf(2, 3))
assertTrue(viewTensor.shape contentEquals intArrayOf(2, 3))
viewTensor[intArrayOf(0, 0)] = 10
assertEquals(tensor[intArrayOf(0)], 10)
}

View File

@ -0,0 +1,133 @@
@file:Suppress("NOTHING_TO_INLINE")
package kscience.kmath.torch
import kscience.kmath.linear.RealMatrixContext
import kscience.kmath.operations.invoke
import kscience.kmath.structures.Matrix
import kotlin.math.*
import kotlin.test.*
internal inline fun <TorchTensorType : TorchTensorOverField<Double>,
TorchTensorAlgebraType : TorchTensorPartialDivisionAlgebra<Double, DoubleArray, TorchTensorType>>
TorchTensorAlgebraType.testingScalarProduct(device: Device = Device.CPU): Unit {
val lhs = randUniform(shape = intArrayOf(3), device = device)
val rhs = randUniform(shape = intArrayOf(3), device = device)
val product = lhs dot rhs
var expected = 0.0
lhs.elements().forEach {
expected += it.second * rhs[it.first]
}
assertTrue(abs(expected - product.value()) < TOLERANCE)
}
internal inline fun <TorchTensorType : TorchTensorOverField<Double>,
TorchTensorAlgebraType : TorchTensorPartialDivisionAlgebra<Double, DoubleArray, TorchTensorType>>
TorchTensorAlgebraType.testingMatrixMultiplication(device: Device = Device.CPU): Unit {
setSeed(SEED)
val lhsTensor = randNormal(shape = intArrayOf(3, 3), device = device)
val rhsTensor = randNormal(shape = intArrayOf(3, 3), device = device)
val product = lhsTensor dot rhsTensor
val expected: Matrix<Double> = RealMatrixContext {
val lhs = produce(3, 3) { i, j -> lhsTensor[intArrayOf(i, j)] }
val rhs = produce(3, 3) { i, j -> rhsTensor[intArrayOf(i, j)] }
lhs dot rhs
}
val lhsTensorCopy = lhsTensor.copy()
val rhsTensorCopy = rhsTensor.copy()
lhsTensorCopy dotAssign rhsTensor
lhsTensor dotRightAssign rhsTensorCopy
var error = 0.0
product.elements().forEach {
error += abs(expected[it.first] - it.second) +
abs(expected[it.first] - lhsTensorCopy[it.first]) +
abs(expected[it.first] - rhsTensorCopy[it.first])
}
assertTrue(error < TOLERANCE)
}
internal inline fun <TorchTensorType : TorchTensorOverField<Double>,
TorchTensorAlgebraType : TorchTensorPartialDivisionAlgebra<Double, DoubleArray, TorchTensorType>>
TorchTensorAlgebraType.testingLinearStructure(device: Device = Device.CPU): Unit {
val shape = intArrayOf(3)
val tensorA = full(value = -4.5, shape = shape, device = device)
val tensorB = full(value = 10.9, shape = shape, device = device)
val tensorC = full(value = 789.3, shape = shape, device = device)
val tensorD = full(value = -72.9, shape = shape, device = device)
val tensorE = full(value = 553.1, shape = shape, device = device)
val result = 15.8 * tensorA - 1.5 * tensorB * (-tensorD) + 0.02 * tensorC / tensorE - 39.4
val expected = copyFromArray(
array = (1..3).map {
15.8 * (-4.5) - 1.5 * 10.9 * 72.9 + 0.02 * 789.3 / 553.1 - 39.4
}
.toDoubleArray(),
shape = shape,
device = device
)
val assignResult = full(value = 0.0, shape = shape, device = device)
tensorA *= 15.8
tensorB *= 1.5
tensorB *= -tensorD
tensorC *= 0.02
tensorC /= tensorE
assignResult += tensorA
assignResult -= tensorB
assignResult += tensorC
assignResult += -39.4
val error = (expected - result).abs().sum().value() +
(expected - assignResult).abs().sum().value()
assertTrue(error < TOLERANCE)
}
internal inline fun <TorchTensorType : TorchTensorOverField<Double>,
TorchTensorAlgebraType : TorchTensorPartialDivisionAlgebra<Double, DoubleArray, TorchTensorType>>
TorchTensorAlgebraType.testingTensorTransformations(device: Device = Device.CPU): Unit {
setSeed(SEED)
val tensor = randNormal(shape = intArrayOf(3, 3), device = device)
val result = tensor.exp().log()
val assignResult = tensor.copy()
assignResult.transposeAssign(0, 1)
assignResult.expAssign()
assignResult.logAssign()
assignResult.transposeAssign(0, 1)
val error = tensor - result
error.absAssign()
error.sumAssign()
error += (tensor - assignResult).abs().sum()
assertTrue(error.value() < TOLERANCE)
}
internal inline fun <TorchTensorType : TorchTensorOverField<Double>,
TorchTensorAlgebraType : TorchTensorPartialDivisionAlgebra<Double, DoubleArray, TorchTensorType>>
TorchTensorAlgebraType.testingBatchedSVD(device: Device = Device.CPU): Unit {
val tensor = randNormal(shape = intArrayOf(7, 5, 3), device = device)
val (tensorU, tensorS, tensorV) = tensor.svd()
val error = tensor - (tensorU dot (diagonalEmbedding(tensorS) dot tensorV.transpose(-2, -1)))
assertTrue(error.abs().sum().value() < TOLERANCE)
}
internal inline fun <TorchTensorType : TorchTensorOverField<Double>,
TorchTensorAlgebraType : TorchTensorPartialDivisionAlgebra<Double, DoubleArray, TorchTensorType>>
TorchTensorAlgebraType.testingBatchedSymEig(device: Device = Device.CPU): Unit {
val tensor = randNormal(shape = intArrayOf(5, 5), device = device)
val tensorSigma = tensor + tensor.transpose(-2, -1)
val (tensorS, tensorV) = tensorSigma.symEig()
val error = tensorSigma - (tensorV dot (diagonalEmbedding(tensorS) dot tensorV.transpose(-2, -1)))
assertTrue(error.abs().sum().value() < TOLERANCE)
}

View File

@ -1,5 +1,39 @@
@file:Suppress("NOTHING_TO_INLINE")
package kscience.kmath.torch
import kotlin.test.assertEquals
internal val SEED = 987654
internal val TOLERANCE = 1e-6
internal inline fun <T, PrimitiveArrayType, TorchTensorType : TorchTensor<T>,
TorchTensorAlgebraType : TorchTensorAlgebra<T, PrimitiveArrayType, TorchTensorType>>
TorchTensorAlgebraType.withCuda(block: TorchTensorAlgebraType.(Device) -> Unit): Unit {
this.block(Device.CPU)
if (cudaAvailable()) this.block(Device.CUDA(0))
}
internal inline fun <T, PrimitiveArrayType, TorchTensorType : TorchTensor<T>,
TorchTensorAlgebraType : TorchTensorAlgebra<T, PrimitiveArrayType, TorchTensorType>>
TorchTensorAlgebraType.testingSetNumThreads(): Unit {
val numThreads = 2
setNumThreads(numThreads)
assertEquals(numThreads, getNumThreads())
}
internal inline fun <TorchTensorType : TorchTensorOverField<Float>,
TorchTensorAlgebraType : TorchTensorPartialDivisionAlgebra<Float, FloatArray, TorchTensorType>>
TorchTensorAlgebraType.testingSetSeed(device: Device = Device.CPU): Unit {
setSeed(SEED)
val integral = randIntegral(0f, 100f, IntArray(0), device = device).value()
val normal = randNormal(IntArray(0), device = device).value()
val uniform = randUniform(IntArray(0), device = device).value()
setSeed(SEED)
val nextIntegral = randIntegral(0f, 100f, IntArray(0), device = device).value()
val nextNormal = randNormal(IntArray(0), device = device).value()
val nextUniform = randUniform(IntArray(0), device = device).value()
assertEquals(normal, nextNormal)
assertEquals(uniform, nextUniform)
assertEquals(integral, nextIntegral)
}

View File

@ -9,14 +9,6 @@ public fun setNumThreads(numThreads: Int): Unit {
JTorch.setNumThreads(numThreads)
}
public fun cudaAvailable(): Boolean {
TODO("Implementation not available yet")
}
public fun setSeed(seed: Int): Unit {
TODO("Implementation not available yet")
}
public fun runCPD(): Unit {
val tensorHandle = JTorch.createTensor()

View File

@ -1,13 +0,0 @@
package kscience.kmath.torch
import kotlin.test.*
internal class TestAutogradGPU {
@Test
fun testAutoGrad() = testingAutoGrad(dim = 3, device = Device.CUDA(0))
@Test
fun testBatchedAutoGrad() = testingBatchedAutoGrad(
bath = intArrayOf(2), dim=3, device = Device.CUDA(0))
}

View File

@ -1,32 +0,0 @@
package kscience.kmath.torch
import kotlin.test.*
class TestTorchTensorAlgebraGPU {
@Test
fun testScalarProduct() =
testingScalarProduct(device = Device.CUDA(0))
@Test
fun testMatrixMultiplication() =
testingMatrixMultiplication(device = Device.CUDA(0))
@Test
fun testLinearStructure() =
testingLinearStructure(device = Device.CUDA(0))
@Test
fun testTensorTransformations() =
testingTensorTransformations(device = Device.CUDA(0))
@Test
fun testBatchedSVD() =
testingBatchedSVD(device = Device.CUDA(0))
@Test
fun testBatchedSymEig() =
testingBatchedSymEig(device = Device.CUDA(0))
}

View File

@ -1,22 +0,0 @@
package kscience.kmath.torch
import kotlin.test.*
class TestTorchTensorGPU {
@Test
fun testCopyFromArray() = testingCopyFromArray(Device.CUDA(0))
@Test
fun testCopyToDevice() = TorchTensorRealAlgebra {
setSeed(SEED)
val normalCpu = randNormal(intArrayOf(2, 3))
val normalGpu = normalCpu.copyToDevice(Device.CUDA(0))
assertTrue(normalCpu.copyToArray() contentEquals normalGpu.copyToArray())
val uniformGpu = randUniform(intArrayOf(3,2),Device.CUDA(0))
val uniformCpu = uniformGpu.copyToDevice(Device.CPU)
assertTrue(uniformGpu.copyToArray() contentEquals uniformCpu.copyToArray())
}
}

View File

@ -1,16 +0,0 @@
package kscience.kmath.torch
import kotlin.test.*
internal class TestUtilsGPU {
@Test
fun testCudaAvailable() {
assertTrue(cudaAvailable())
}
@Test
fun testSetSeed() = testingSetSeed(Device.CUDA(0))
}

View File

@ -133,9 +133,9 @@ public sealed class TorchTensorPartialDivisionAlgebraNative<T, TVar : CPrimitive
TorchTensorAlgebraNative<T, TVar, PrimitiveArrayType, TorchTensorType>(scope),
TorchTensorPartialDivisionAlgebra<T, PrimitiveArrayType, TorchTensorType> {
override operator fun TorchTensorType.div(b: TorchTensorType): TorchTensorType {
if (checks) checkLinearOperation(this, b)
return wrap(div_tensor(this.tensorHandle, b.tensorHandle)!!)
override operator fun TorchTensorType.div(other: TorchTensorType): TorchTensorType {
if (checks) checkLinearOperation(this, other)
return wrap(div_tensor(this.tensorHandle, other.tensorHandle)!!)
}
override operator fun TorchTensorType.divAssign(other: TorchTensorType): Unit {

View File

@ -2,61 +2,19 @@ package kscience.kmath.torch
import kotlin.test.*
internal fun testingAutoGrad(dim: Int, device: Device = Device.CPU): Unit {
TorchTensorRealAlgebra {
setSeed(SEED)
val tensorX = randNormal(shape = intArrayOf(dim), device = device)
val randFeatures = randNormal(shape = intArrayOf(dim, dim), device = device)
val tensorSigma = randFeatures + randFeatures.transpose(0, 1)
val tensorMu = randNormal(shape = intArrayOf(dim), device = device)
val expressionAtX = withGradAt(tensorX, { x ->
0.5 * (x dot (tensorSigma dot x)) + (tensorMu dot x) + 25.9
})
val gradientAtX = expressionAtX.grad(tensorX, retainGraph = true)
val hessianAtX = expressionAtX hess tensorX
val expectedGradientAtX = (tensorSigma dot tensorX) + tensorMu
val error = (gradientAtX - expectedGradientAtX).abs().sum().value() +
(hessianAtX - tensorSigma).abs().sum().value()
assertTrue(error < TOLERANCE)
}
}
internal fun testingBatchedAutoGrad(
bath: IntArray,
dim: Int,
device: Device = Device.CPU
): Unit {
TorchTensorRealAlgebra {
setSeed(SEED)
val tensorX = randNormal(shape = bath + intArrayOf(1, dim), device = device)
val randFeatures = randNormal(shape = bath + intArrayOf(dim, dim), device = device)
val tensorSigma = randFeatures + randFeatures.transpose(-2, -1)
val tensorMu = randNormal(shape = bath + intArrayOf(1, dim), device = device)
val expressionAtX = withGradAt(tensorX, { x ->
val xt = x.transpose(-1, -2)
0.5 * (x dot (tensorSigma dot xt)) + (tensorMu dot xt) + 58.2
})
expressionAtX.sumAssign()
val gradientAtX = expressionAtX grad tensorX
val expectedGradientAtX = (tensorX dot tensorSigma) + tensorMu
val error = (gradientAtX - expectedGradientAtX).abs().sum().value()
assertTrue(error < TOLERANCE)
}
}
internal class TestAutograd {
@Test
fun testAutoGrad() = testingAutoGrad(dim = 100)
fun testAutoGrad() = TorchTensorFloatAlgebra {
withCuda { device ->
testingAutoGrad(device)
}
}
@Test
fun testBatchedAutoGrad() = testingBatchedAutoGrad(bath = intArrayOf(2, 10), dim = 30)
fun testBatchedAutoGrad() = TorchTensorFloatAlgebra {
withCuda { device ->
testingBatchedAutoGrad(device)
}
}
}

View File

@ -3,26 +3,18 @@ package kscience.kmath.torch
import kotlinx.cinterop.*
import kotlin.test.*
internal fun testingCopyFromArray(device: Device = Device.CPU): Unit {
TorchTensorRealAlgebra {
val array = (1..24).map { 10.0 * it * it }.toDoubleArray()
val shape = intArrayOf(2, 3, 4)
val tensor = copyFromArray(array, shape = shape, device = device)
val copyOfTensor = tensor.copy()
tensor[intArrayOf(0, 0)] = 0.1
assertTrue(copyOfTensor.copyToArray() contentEquals array)
assertEquals(0.1, tensor[intArrayOf(0, 0)])
}
}
class TestTorchTensor {
@Test
fun testCopyFromArray() = testingCopyFromArray()
fun testCopying() = TorchTensorFloatAlgebra {
withCuda {
device -> testingCopying(device)
}
}
@Test
fun testCopyLessDataTransferOnCPU() = memScoped {
fun testNativeNoCopyDataTransferOnCPU() = memScoped {
val data = allocArray<DoubleVar>(1)
data[0] = 1.0
TorchTensorRealAlgebra {
@ -39,15 +31,7 @@ class TestTorchTensor {
@Test
fun testRequiresGrad() = TorchTensorRealAlgebra {
val tensor = randNormal(intArrayOf(3))
assertTrue(!tensor.requiresGrad)
tensor.requiresGrad = true
assertTrue(tensor.requiresGrad)
tensor.requiresGrad = false
assertTrue(!tensor.requiresGrad)
tensor.requiresGrad = true
val detachedTensor = tensor.detachFromGraph()
assertTrue(!detachedTensor.requiresGrad)
testingRequiresGrad()
}
@Test
@ -63,11 +47,9 @@ class TestTorchTensor {
@Test
fun testViewWithNoCopy() = TorchTensorIntAlgebra {
val tensor = copyFromArray(intArrayOf(1, 2, 3, 4, 5, 6), shape = intArrayOf(6))
val viewTensor = tensor.view(intArrayOf(2, 3))
assertTrue(viewTensor.shape contentEquals intArrayOf(2, 3))
viewTensor[intArrayOf(0, 0)] = 10
assertEquals(tensor[intArrayOf(0)], 10)
withCuda {
device -> testingViewWithNoCopy(device)
}
}
}

View File

@ -1,146 +1,50 @@
package kscience.kmath.torch
import kscience.kmath.linear.RealMatrixContext
import kscience.kmath.operations.invoke
import kscience.kmath.structures.Matrix
import kotlin.math.*
import kotlin.test.*
internal fun testingScalarProduct(device: Device = Device.CPU): Unit {
TorchTensorRealAlgebra {
val lhs = randUniform(shape = intArrayOf(3), device = device)
val rhs = randUniform(shape = intArrayOf(3), device = device)
val product = lhs dot rhs
var expected = 0.0
lhs.elements().forEach {
expected += it.second * rhs[it.first]
}
assertTrue(abs(expected - product.value()) < TOLERANCE)
}
}
internal fun testingMatrixMultiplication(device: Device = Device.CPU): Unit {
TorchTensorRealAlgebra {
setSeed(SEED)
val lhsTensor = randNormal(shape = intArrayOf(3, 3), device = device)
val rhsTensor = randNormal(shape = intArrayOf(3, 3), device = device)
val product = lhsTensor dot rhsTensor
val expected: Matrix<Double> = RealMatrixContext {
val lhs = produce(3, 3) { i, j -> lhsTensor[intArrayOf(i, j)] }
val rhs = produce(3, 3) { i, j -> rhsTensor[intArrayOf(i, j)] }
lhs dot rhs
}
val lhsTensorCopy = lhsTensor.copy()
val rhsTensorCopy = rhsTensor.copy()
lhsTensorCopy dotAssign rhsTensor
lhsTensor dotRightAssign rhsTensorCopy
var error = 0.0
product.elements().forEach {
error += abs(expected[it.first] - it.second) +
abs(expected[it.first] - lhsTensorCopy[it.first]) +
abs(expected[it.first] - rhsTensorCopy[it.first])
}
assertTrue(error < TOLERANCE)
}
}
internal fun testingLinearStructure(device: Device = Device.CPU): Unit {
TorchTensorRealAlgebra {
withChecks {
val shape = intArrayOf(3)
val tensorA = full(value = -4.5, shape = shape, device = device)
val tensorB = full(value = 10.9, shape = shape, device = device)
val tensorC = full(value = 789.3, shape = shape, device = device)
val tensorD = full(value = -72.9, shape = shape, device = device)
val tensorE = full(value = 553.1, shape = shape, device = device)
val result = 15.8 * tensorA - 1.5 * tensorB * (-tensorD) + 0.02 * tensorC / tensorE - 39.4
val expected = copyFromArray(
array = (1..3).map {
15.8 * (-4.5) - 1.5 * 10.9 * 72.9 + 0.02 * 789.3 / 553.1 - 39.4
}
.toDoubleArray(),
shape = shape,
device = device
)
val assignResult = full(value = 0.0, shape = shape, device = device)
tensorA *= 15.8
tensorB *= 1.5
tensorB *= -tensorD
tensorC *= 0.02
tensorC /= tensorE
assignResult += tensorA
assignResult -= tensorB
assignResult += tensorC
assignResult += -39.4
val error = (expected - result).abs().sum().value() +
(expected - assignResult).abs().sum().value()
assertTrue(error < TOLERANCE)
println(expected)
}}
}
internal fun testingTensorTransformations(device: Device = Device.CPU): Unit {
TorchTensorRealAlgebra {
setSeed(SEED)
val tensor = randNormal(shape = intArrayOf(3, 3), device = device)
val result = tensor.exp().log()
val assignResult = tensor.copy()
assignResult.transposeAssign(0, 1)
assignResult.expAssign()
assignResult.logAssign()
assignResult.transposeAssign(0, 1)
val error = tensor - result
error.absAssign()
error.sumAssign()
error += (tensor - assignResult).abs().sum()
assertTrue(error.value() < TOLERANCE)
}
}
internal fun testingBatchedSVD(device: Device = Device.CPU): Unit {
TorchTensorRealAlgebra {
val tensor = randNormal(shape = intArrayOf(7, 5, 3), device = device)
val (tensorU, tensorS, tensorV) = tensor.svd()
val error = tensor - (tensorU dot (diagonalEmbedding(tensorS) dot tensorV.transpose(-2,-1)))
assertTrue(error.abs().sum().value() < TOLERANCE)
}
}
internal fun testingBatchedSymEig(device: Device = Device.CPU): Unit {
TorchTensorRealAlgebra {
val tensor = randNormal(shape = intArrayOf(5,5), device = device)
val tensorSigma = tensor + tensor.transpose(-2,-1)
val (tensorS, tensorV) = tensorSigma.symEig()
val error = tensorSigma - (tensorV dot (diagonalEmbedding(tensorS) dot tensorV.transpose(-2,-1)))
assertTrue(error.abs().sum().value() < TOLERANCE)
}
}
internal class TestTorchTensorAlgebra {
@Test
fun testScalarProduct() = testingScalarProduct()
fun testScalarProduct() = TorchTensorRealAlgebra {
withCuda { device ->
testingScalarProduct(device)
}
}
@Test
fun testMatrixMultiplication() = testingMatrixMultiplication()
fun testMatrixMultiplication() = TorchTensorRealAlgebra {
withCuda { device ->
testingMatrixMultiplication(device)
}
}
@Test
fun testLinearStructure() = testingLinearStructure()
fun testLinearStructure() = TorchTensorRealAlgebra {
withCuda { device ->
testingLinearStructure(device)
}
}
@Test
fun testTensorTransformations() = testingTensorTransformations()
fun testTensorTransformations() = TorchTensorRealAlgebra {
withCuda { device ->
testingTensorTransformations(device)
}
}
@Test
fun testBatchedSVD() = testingBatchedSVD()
fun testBatchedSVD() = TorchTensorRealAlgebra {
withCuda { device ->
testingBatchedSVD(device)
}
}
@Test
fun testBatchedSymEig() = testingBatchedSymEig()
fun testBatchedSymEig() = TorchTensorRealAlgebra {
withCuda { device ->
testingBatchedSymEig(device)
}
}
}

View File

@ -3,29 +3,18 @@ package kscience.kmath.torch
import kotlin.test.*
internal fun testingSetSeed(device: Device = Device.CPU): Unit {
TorchTensorRealAlgebra {
setSeed(SEED)
val normal = randNormal(IntArray(0), device = device).value()
val uniform = randUniform(IntArray(0), device = device).value()
setSeed(SEED)
val nextNormal = randNormal(IntArray(0), device = device).value()
val nextUniform = randUniform(IntArray(0), device = device).value()
assertEquals(normal, nextNormal)
assertEquals(uniform, nextUniform)
}
}
internal class TestUtils {
@Test
fun testSetNumThreads() {
TorchTensorRealAlgebra {
val numThreads = 2
setNumThreads(numThreads)
assertEquals(numThreads, getNumThreads())
TorchTensorIntAlgebra {
testingSetNumThreads()
}
}
@Test
fun testSetSeed() = testingSetSeed()
fun testSeedSetting() = TorchTensorFloatAlgebra {
withCuda {
device -> testingSetSeed(device)
}
}
}