From 4f4fcba55987faea2cdefc76bd7534ed37e43316 Mon Sep 17 00:00:00 2001 From: rgrit91 Date: Sat, 16 Jan 2021 15:52:05 +0000 Subject: [PATCH] Adding systematic checks --- kmath-torch/ctorch/include/ctorch.h | 2 + kmath-torch/ctorch/include/utils.hh | 2 +- kmath-torch/ctorch/src/ctorch.cc | 8 + .../kmath/torch/BenchmarkMatMultGPU.kt | 6 +- .../torch/BenchmarkRandomGeneratorsGPU.kt | 24 +- .../kscience/kmath/torch/TestAutogradGPU.kt | 4 +- .../kmath/torch/TestTorchTensorAlgebraGPU.kt | 12 +- .../kmath/torch/TestTorchTensorGPU.kt | 8 +- .../kscience/kmath/torch/TestUtilsGPU.kt | 2 +- .../kotlin/kscience.kmath.torch/Device.kt | 22 ++ .../kscience.kmath.torch/TensorAlgebra.kt | 50 +++ .../kscience.kmath.torch/TensorStructure.kt | 13 + .../kscience.kmath.torch/TorchDevice.kt | 22 -- .../kscience.kmath.torch/TorchTensor.kt | 16 +- .../TorchTensorAlgebra.kt | 354 +++++++++++++----- .../kscience/kmath/torch/BenchmarkMatMult.kt | 12 +- .../kmath/torch/BenchmarkRandomGenerators.kt | 6 +- .../kscience/kmath/torch/TestAutograd.kt | 4 +- .../kscience/kmath/torch/TestTorchTensor.kt | 20 +- .../kmath/torch/TestTorchTensorAlgebra.kt | 16 +- .../kotlin/kscience/kmath/torch/TestUtils.kt | 2 +- 21 files changed, 413 insertions(+), 192 deletions(-) create mode 100644 kmath-torch/src/nativeMain/kotlin/kscience.kmath.torch/Device.kt create mode 100644 kmath-torch/src/nativeMain/kotlin/kscience.kmath.torch/TensorAlgebra.kt create mode 100644 kmath-torch/src/nativeMain/kotlin/kscience.kmath.torch/TensorStructure.kt delete mode 100644 kmath-torch/src/nativeMain/kotlin/kscience.kmath.torch/TorchDevice.kt diff --git a/kmath-torch/ctorch/include/ctorch.h b/kmath-torch/ctorch/include/ctorch.h index b77a50d5a..0f60d7356 100644 --- a/kmath-torch/ctorch/include/ctorch.h +++ b/kmath-torch/ctorch/include/ctorch.h @@ -67,6 +67,8 @@ extern "C" TorchTensorHandle rand_float(int *shape, int shape_size, int device); TorchTensorHandle randn_float(int *shape, int shape_size, int device); + TorchTensorHandle randint_double(long low, long high, int *shape, int shape_size, int device); + TorchTensorHandle randint_float(long low, long high, int *shape, int shape_size, int device); TorchTensorHandle randint_long(long low, long high, int *shape, int shape_size, int device); TorchTensorHandle randint_int(int low, int high, int *shape, int shape_size, int device); diff --git a/kmath-torch/ctorch/include/utils.hh b/kmath-torch/ctorch/include/utils.hh index 975e8f034..aabaa9c6c 100644 --- a/kmath-torch/ctorch/include/utils.hh +++ b/kmath-torch/ctorch/include/utils.hh @@ -103,7 +103,7 @@ namespace ctorch } template - inline torch::Tensor randint(Dtype low, Dtype high, std::vector shape, torch::Device device) + inline torch::Tensor randint(long low, long high, std::vector shape, torch::Device device) { return torch::randint(low, high, shape, torch::TensorOptions().dtype(dtype()).layout(torch::kStrided).device(device)); } diff --git a/kmath-torch/ctorch/src/ctorch.cc b/kmath-torch/ctorch/src/ctorch.cc index eb145081d..df9635fd0 100644 --- a/kmath-torch/ctorch/src/ctorch.cc +++ b/kmath-torch/ctorch/src/ctorch.cc @@ -197,6 +197,14 @@ TorchTensorHandle randn_float(int *shape, int shape_size, int device) return new torch::Tensor(ctorch::randn(ctorch::to_vec_int(shape, shape_size), ctorch::int_to_device(device))); } +TorchTensorHandle randint_double(long low, long high, int *shape, int shape_size, int device) +{ + return new torch::Tensor(ctorch::randint(low, high, ctorch::to_vec_int(shape, shape_size), ctorch::int_to_device(device))); +} +TorchTensorHandle randint_float(long low, long high, int *shape, int shape_size, int device) +{ + return new torch::Tensor(ctorch::randint(low, high, ctorch::to_vec_int(shape, shape_size), ctorch::int_to_device(device))); +} TorchTensorHandle randint_long(long low, long high, int *shape, int shape_size, int device) { return new torch::Tensor(ctorch::randint(low, high, ctorch::to_vec_int(shape, shape_size), ctorch::int_to_device(device))); diff --git a/kmath-torch/src/nativeGPUTest/kotlin/kscience/kmath/torch/BenchmarkMatMultGPU.kt b/kmath-torch/src/nativeGPUTest/kotlin/kscience/kmath/torch/BenchmarkMatMultGPU.kt index 10798af14..62424a59d 100644 --- a/kmath-torch/src/nativeGPUTest/kotlin/kscience/kmath/torch/BenchmarkMatMultGPU.kt +++ b/kmath-torch/src/nativeGPUTest/kotlin/kscience/kmath/torch/BenchmarkMatMultGPU.kt @@ -6,15 +6,15 @@ class BenchmarkMatMultGPU { @Test fun benchmarkMatMultFloat20() = benchmarkingMatMultFloat(20, 10, 100000, - device = TorchDevice.TorchCUDA(0)) + device = Device.CUDA(0)) @Test fun benchmarkMatMultFloat200() = benchmarkingMatMultFloat(200, 10, 10000, - device = TorchDevice.TorchCUDA(0)) + device = Device.CUDA(0)) @Test fun benchmarkMatMultFloat2000() = benchmarkingMatMultFloat(2000, 10, 1000, - device = TorchDevice.TorchCUDA(0)) + device = Device.CUDA(0)) } \ No newline at end of file diff --git a/kmath-torch/src/nativeGPUTest/kotlin/kscience/kmath/torch/BenchmarkRandomGeneratorsGPU.kt b/kmath-torch/src/nativeGPUTest/kotlin/kscience/kmath/torch/BenchmarkRandomGeneratorsGPU.kt index 67de2efd7..c9593a825 100644 --- a/kmath-torch/src/nativeGPUTest/kotlin/kscience/kmath/torch/BenchmarkRandomGeneratorsGPU.kt +++ b/kmath-torch/src/nativeGPUTest/kotlin/kscience/kmath/torch/BenchmarkRandomGeneratorsGPU.kt @@ -6,59 +6,59 @@ class BenchmarkRandomGeneratorsGPU { @Test fun benchmarkRandNormal1() = benchmarkingRandNormal(10, 10, 100000, - device = TorchDevice.TorchCUDA(0)) + device = Device.CUDA(0)) @Test fun benchmarkRandUniform1() = benchmarkingRandUniform(10, 10, 100000, - device = TorchDevice.TorchCUDA(0)) + device = Device.CUDA(0)) @Test fun benchmarkRandIntegral1() = benchmarkingRandIntegral(10, 10, 100000, - device = TorchDevice.TorchCUDA(0)) + device = Device.CUDA(0)) @Test fun benchmarkRandNormal3() = benchmarkingRandNormal(1000, 10, 100000, - device = TorchDevice.TorchCUDA(0)) + device = Device.CUDA(0)) @Test fun benchmarkRandUniform3() = benchmarkingRandUniform(1000, 10, 100000, - device = TorchDevice.TorchCUDA(0)) + device = Device.CUDA(0)) @Test fun benchmarkRandIntegral3() = benchmarkingRandIntegral(1000, 10, 100000, - device = TorchDevice.TorchCUDA(0)) + device = Device.CUDA(0)) @Test fun benchmarkRandNormal5() = benchmarkingRandNormal(100000, 10, 100000, - device = TorchDevice.TorchCUDA(0)) + device = Device.CUDA(0)) @Test fun benchmarkRandUniform5() = benchmarkingRandUniform(100000, 10, 100000, - device = TorchDevice.TorchCUDA(0)) + device = Device.CUDA(0)) @Test fun benchmarkRandIntegral5() = benchmarkingRandIntegral(100000, 10, 100000, - device = TorchDevice.TorchCUDA(0)) + device = Device.CUDA(0)) @Test fun benchmarkRandNormal7() = benchmarkingRandNormal(10000000, 10, 10000, - device = TorchDevice.TorchCUDA(0)) + device = Device.CUDA(0)) @Test fun benchmarkRandUniform7() = benchmarkingRandUniform(10000000, 10, 10000, - device = TorchDevice.TorchCUDA(0)) + device = Device.CUDA(0)) @Test fun benchmarkRandIntegral7() = benchmarkingRandIntegral(10000000, 10, 10000, - device = TorchDevice.TorchCUDA(0)) + device = Device.CUDA(0)) } \ No newline at end of file diff --git a/kmath-torch/src/nativeGPUTest/kotlin/kscience/kmath/torch/TestAutogradGPU.kt b/kmath-torch/src/nativeGPUTest/kotlin/kscience/kmath/torch/TestAutogradGPU.kt index 7c974fced..7d43653c1 100644 --- a/kmath-torch/src/nativeGPUTest/kotlin/kscience/kmath/torch/TestAutogradGPU.kt +++ b/kmath-torch/src/nativeGPUTest/kotlin/kscience/kmath/torch/TestAutogradGPU.kt @@ -5,9 +5,9 @@ import kotlin.test.* internal class TestAutogradGPU { @Test - fun testAutoGrad() = testingAutoGrad(dim = 3, device = TorchDevice.TorchCUDA(0)) + fun testAutoGrad() = testingAutoGrad(dim = 3, device = Device.CUDA(0)) @Test fun testBatchedAutoGrad() = testingBatchedAutoGrad( - bath = intArrayOf(2), dim=3, device = TorchDevice.TorchCUDA(0)) + bath = intArrayOf(2), dim=3, device = Device.CUDA(0)) } \ No newline at end of file diff --git a/kmath-torch/src/nativeGPUTest/kotlin/kscience/kmath/torch/TestTorchTensorAlgebraGPU.kt b/kmath-torch/src/nativeGPUTest/kotlin/kscience/kmath/torch/TestTorchTensorAlgebraGPU.kt index a1b3ba3f8..c0c385103 100644 --- a/kmath-torch/src/nativeGPUTest/kotlin/kscience/kmath/torch/TestTorchTensorAlgebraGPU.kt +++ b/kmath-torch/src/nativeGPUTest/kotlin/kscience/kmath/torch/TestTorchTensorAlgebraGPU.kt @@ -7,26 +7,26 @@ class TestTorchTensorAlgebraGPU { @Test fun testScalarProduct() = - testingScalarProduct(device = TorchDevice.TorchCUDA(0)) + testingScalarProduct(device = Device.CUDA(0)) @Test fun testMatrixMultiplication() = - testingMatrixMultiplication(device = TorchDevice.TorchCUDA(0)) + testingMatrixMultiplication(device = Device.CUDA(0)) @Test fun testLinearStructure() = - testingLinearStructure(device = TorchDevice.TorchCUDA(0)) + testingLinearStructure(device = Device.CUDA(0)) @Test fun testTensorTransformations() = - testingTensorTransformations(device = TorchDevice.TorchCUDA(0)) + testingTensorTransformations(device = Device.CUDA(0)) @Test fun testBatchedSVD() = - testingBatchedSVD(device = TorchDevice.TorchCUDA(0)) + testingBatchedSVD(device = Device.CUDA(0)) @Test fun testBatchedSymEig() = - testingBatchedSymEig(device = TorchDevice.TorchCUDA(0)) + testingBatchedSymEig(device = Device.CUDA(0)) } diff --git a/kmath-torch/src/nativeGPUTest/kotlin/kscience/kmath/torch/TestTorchTensorGPU.kt b/kmath-torch/src/nativeGPUTest/kotlin/kscience/kmath/torch/TestTorchTensorGPU.kt index d984993a1..820f2008a 100644 --- a/kmath-torch/src/nativeGPUTest/kotlin/kscience/kmath/torch/TestTorchTensorGPU.kt +++ b/kmath-torch/src/nativeGPUTest/kotlin/kscience/kmath/torch/TestTorchTensorGPU.kt @@ -5,17 +5,17 @@ import kotlin.test.* class TestTorchTensorGPU { @Test - fun testCopyFromArray() = testingCopyFromArray(TorchDevice.TorchCUDA(0)) + fun testCopyFromArray() = testingCopyFromArray(Device.CUDA(0)) @Test fun testCopyToDevice() = TorchTensorRealAlgebra { setSeed(SEED) val normalCpu = randNormal(intArrayOf(2, 3)) - val normalGpu = normalCpu.copyToDevice(TorchDevice.TorchCUDA(0)) + val normalGpu = normalCpu.copyToDevice(Device.CUDA(0)) assertTrue(normalCpu.copyToArray() contentEquals normalGpu.copyToArray()) - val uniformGpu = randUniform(intArrayOf(3,2),TorchDevice.TorchCUDA(0)) - val uniformCpu = uniformGpu.copyToDevice(TorchDevice.TorchCPU) + val uniformGpu = randUniform(intArrayOf(3,2),Device.CUDA(0)) + val uniformCpu = uniformGpu.copyToDevice(Device.CPU) assertTrue(uniformGpu.copyToArray() contentEquals uniformCpu.copyToArray()) } diff --git a/kmath-torch/src/nativeGPUTest/kotlin/kscience/kmath/torch/TestUtilsGPU.kt b/kmath-torch/src/nativeGPUTest/kotlin/kscience/kmath/torch/TestUtilsGPU.kt index 3cf1d1c2a..72c34add1 100644 --- a/kmath-torch/src/nativeGPUTest/kotlin/kscience/kmath/torch/TestUtilsGPU.kt +++ b/kmath-torch/src/nativeGPUTest/kotlin/kscience/kmath/torch/TestUtilsGPU.kt @@ -11,6 +11,6 @@ internal class TestUtilsGPU { } @Test - fun testSetSeed() = testingSetSeed(TorchDevice.TorchCUDA(0)) + fun testSetSeed() = testingSetSeed(Device.CUDA(0)) } diff --git a/kmath-torch/src/nativeMain/kotlin/kscience.kmath.torch/Device.kt b/kmath-torch/src/nativeMain/kotlin/kscience.kmath.torch/Device.kt new file mode 100644 index 000000000..50e6a5b45 --- /dev/null +++ b/kmath-torch/src/nativeMain/kotlin/kscience.kmath.torch/Device.kt @@ -0,0 +1,22 @@ +package kscience.kmath.torch + + +public sealed class Device { + public object CPU: Device() { + override fun toString(): String { + return "CPU" + } + } + public data class CUDA(val index: Int): Device() + public fun toInt(): Int { + when(this) { + is CPU -> return 0 + is CUDA -> return this.index + 1 + } + } + public companion object { + public fun fromInt(deviceInt: Int): Device { + return if (deviceInt == 0) CPU else CUDA(deviceInt-1) + } + } +} \ No newline at end of file diff --git a/kmath-torch/src/nativeMain/kotlin/kscience.kmath.torch/TensorAlgebra.kt b/kmath-torch/src/nativeMain/kotlin/kscience.kmath.torch/TensorAlgebra.kt new file mode 100644 index 000000000..96653f305 --- /dev/null +++ b/kmath-torch/src/nativeMain/kotlin/kscience.kmath.torch/TensorAlgebra.kt @@ -0,0 +1,50 @@ +package kscience.kmath.torch + +import kscience.kmath.operations.Field +import kscience.kmath.operations.Ring + + +public interface TensorAlgebra> : Ring { + + public operator fun T.plus(other: TorchTensorType): TorchTensorType + public operator fun TorchTensorType.plus(value: T): TorchTensorType + public operator fun TorchTensorType.plusAssign(value: T): Unit + public operator fun TorchTensorType.plusAssign(b: TorchTensorType): Unit + + public operator fun T.minus(other: TorchTensorType): TorchTensorType + public operator fun TorchTensorType.minus(value: T): TorchTensorType + public operator fun TorchTensorType.minusAssign(value: T): Unit + public operator fun TorchTensorType.minusAssign(b: TorchTensorType): Unit + + public operator fun T.times(other: TorchTensorType): TorchTensorType + public operator fun TorchTensorType.times(value: T): TorchTensorType + public operator fun TorchTensorType.timesAssign(value: T): Unit + public operator fun TorchTensorType.timesAssign(b: TorchTensorType): Unit + + public infix fun TorchTensorType.dot(b: TorchTensorType): TorchTensorType + + public fun diagonalEmbedding( + diagonalEntries: TorchTensorType, + offset: Int = 0, dim1: Int = -2, dim2: Int = -1 + ): TorchTensorType + + public fun TorchTensorType.transpose(i: Int, j: Int): TorchTensorType + public fun TorchTensorType.view(shape: IntArray): TorchTensorType + + public fun TorchTensorType.abs(): TorchTensorType + public fun TorchTensorType.sum(): TorchTensorType + +} + +public interface TensorFieldAlgebra> : + TensorAlgebra, Field { + + public operator fun TorchTensorType.divAssign(b: TorchTensorType) + + public fun TorchTensorType.exp(): TorchTensorType + public fun TorchTensorType.log(): TorchTensorType + + public fun TorchTensorType.svd(): Triple + public fun TorchTensorType.symEig(eigenvectors: Boolean = true): Pair + +} \ No newline at end of file diff --git a/kmath-torch/src/nativeMain/kotlin/kscience.kmath.torch/TensorStructure.kt b/kmath-torch/src/nativeMain/kotlin/kscience.kmath.torch/TensorStructure.kt new file mode 100644 index 000000000..53539045a --- /dev/null +++ b/kmath-torch/src/nativeMain/kotlin/kscience.kmath.torch/TensorStructure.kt @@ -0,0 +1,13 @@ +package kscience.kmath.torch + +import kscience.kmath.structures.MutableNDStructure + +public abstract class TensorStructure: MutableNDStructure { + + // A tensor can have empty shape, in which case it represents just a value + public abstract fun value(): T + + // Tensors might hold shared resources + override fun equals(other: Any?): Boolean = false + override fun hashCode(): Int = 0 +} \ No newline at end of file diff --git a/kmath-torch/src/nativeMain/kotlin/kscience.kmath.torch/TorchDevice.kt b/kmath-torch/src/nativeMain/kotlin/kscience.kmath.torch/TorchDevice.kt deleted file mode 100644 index 75c7a921c..000000000 --- a/kmath-torch/src/nativeMain/kotlin/kscience.kmath.torch/TorchDevice.kt +++ /dev/null @@ -1,22 +0,0 @@ -package kscience.kmath.torch - - -public sealed class TorchDevice { - public object TorchCPU: TorchDevice() { - override fun toString(): String { - return "TorchCPU" - } - } - public data class TorchCUDA(val index: Int): TorchDevice() - public fun toInt(): Int { - when(this) { - is TorchCPU -> return 0 - is TorchCUDA -> return this.index + 1 - } - } - public companion object { - public fun fromInt(deviceInt: Int): TorchDevice { - return if (deviceInt == 0) TorchCPU else TorchCUDA(deviceInt-1) - } - } -} \ No newline at end of file diff --git a/kmath-torch/src/nativeMain/kotlin/kscience.kmath.torch/TorchTensor.kt b/kmath-torch/src/nativeMain/kotlin/kscience.kmath.torch/TorchTensor.kt index fb7c70abe..4302bc94c 100644 --- a/kmath-torch/src/nativeMain/kotlin/kscience.kmath.torch/TorchTensor.kt +++ b/kmath-torch/src/nativeMain/kotlin/kscience.kmath.torch/TorchTensor.kt @@ -1,6 +1,5 @@ package kscience.kmath.torch -import kscience.kmath.structures.MutableNDStructure import kotlinx.cinterop.* import kscience.kmath.ctorch.* @@ -9,7 +8,7 @@ import kscience.kmath.ctorch.* public sealed class TorchTensor constructor( internal val scope: DeferScope, internal val tensorHandle: COpaquePointer -) : MutableNDStructure { +) : TensorStructure() { init { scope.defer(::close) } @@ -23,10 +22,8 @@ public sealed class TorchTensor constructor( public val strides: IntArray get() = (1..dimension).map{get_stride_at(tensorHandle, it-1)}.toIntArray() public val size: Int get() = get_numel(tensorHandle) - public val device: TorchDevice get() = TorchDevice.fromInt(get_device(tensorHandle)) + public val device: Device get() = Device.fromInt(get_device(tensorHandle)) - override fun equals(other: Any?): Boolean = false - override fun hashCode(): Int = 0 override fun toString(): String { val nativeStringRepresentation: CPointer = tensor_to_string(tensorHandle)!! val stringRepresentation = nativeStringRepresentation.toKString() @@ -42,12 +39,13 @@ public sealed class TorchTensor constructor( return indices.map { it to get(it) } } - internal inline fun isValue() = check(dimension == 0) { + public inline fun isValue(): Boolean = dimension == 0 + public inline fun isNotValue(): Boolean = !isValue() + internal inline fun checkIsValue() = check(isValue()) { "This tensor has shape ${shape.toList()}" } - - public fun value(): T { - isValue() + override fun value(): T { + checkIsValue() return item() } diff --git a/kmath-torch/src/nativeMain/kotlin/kscience.kmath.torch/TorchTensorAlgebra.kt b/kmath-torch/src/nativeMain/kotlin/kscience.kmath.torch/TorchTensorAlgebra.kt index 9887d8486..db644962d 100644 --- a/kmath-torch/src/nativeMain/kotlin/kscience.kmath.torch/TorchTensorAlgebra.kt +++ b/kmath-torch/src/nativeMain/kotlin/kscience.kmath.torch/TorchTensorAlgebra.kt @@ -9,13 +9,15 @@ public sealed class TorchTensorAlgebra< PrimitiveArrayType, TorchTensorType : TorchTensor> constructor( internal val scope: DeferScope -) { +) : + TensorAlgebra { + internal abstract fun wrap(tensorHandle: COpaquePointer): TorchTensorType public abstract fun copyFromArray( array: PrimitiveArrayType, shape: IntArray, - device: TorchDevice = TorchDevice.TorchCPU + device: Device = Device.CPU ): TorchTensorType public abstract fun TorchTensorType.copyToArray(): PrimitiveArrayType @@ -23,96 +25,219 @@ public sealed class TorchTensorAlgebra< public abstract fun fromBlob(arrayBlob: CPointer, shape: IntArray): TorchTensorType public abstract fun TorchTensorType.getData(): CPointer - public abstract fun full(value: T, shape: IntArray, device: TorchDevice): TorchTensorType + public abstract fun full(value: T, shape: IntArray, device: Device): TorchTensorType - public abstract operator fun T.plus(other: TorchTensorType): TorchTensorType - public abstract operator fun TorchTensorType.plus(value: T): TorchTensorType - public abstract operator fun TorchTensorType.plusAssign(value: T): Unit - public abstract operator fun T.minus(other: TorchTensorType): TorchTensorType - public abstract operator fun TorchTensorType.minus(value: T): TorchTensorType - public abstract operator fun TorchTensorType.minusAssign(value: T): Unit - public abstract operator fun T.times(other: TorchTensorType): TorchTensorType - public abstract operator fun TorchTensorType.times(value: T): TorchTensorType - public abstract operator fun TorchTensorType.timesAssign(value: T): Unit + public abstract fun randIntegral( + low: T, high: T, shape: IntArray, + device: Device = Device.CPU + ): TorchTensorType - public operator fun TorchTensorType.times(other: TorchTensorType): TorchTensorType = - wrap(times_tensor(this.tensorHandle, other.tensorHandle)!!) + public abstract fun TorchTensorType.randIntegral(low: T, high: T): TorchTensorType + public abstract fun TorchTensorType.randIntegralAssign(low: T, high: T): Unit - public operator fun TorchTensorType.timesAssign(other: TorchTensorType): Unit { - times_tensor_assign(this.tensorHandle, other.tensorHandle) + override val zero: TorchTensorType + get() = number(0) + override val one: TorchTensorType + get() = number(1) + + protected inline fun checkDeviceCompatible(a: TorchTensorType, b: TorchTensorType) = + check(a.device == b.device) { + "Tensors must be on the same device" + } + + protected inline fun checkShapeCompatible(a: TorchTensorType, b: TorchTensorType) = + check(a.shape contentEquals b.shape) { + "Tensors must be of identical shape" + } + + protected inline fun checkLinearOperation(a: TorchTensorType, b: TorchTensorType) { + if (a.isNotValue() and b.isNotValue()) { + checkDeviceCompatible(a, b) + checkShapeCompatible(a, b) + } } - public infix fun TorchTensorType.dot(other: TorchTensorType): TorchTensorType = - wrap(matmul(this.tensorHandle, other.tensorHandle)!!) + override operator fun TorchTensorType.times(b: TorchTensorType): TorchTensorType = + this.times(b, safe = true) - public infix fun TorchTensorType.dotAssign(other: TorchTensorType): Unit { - matmul_assign(this.tensorHandle, other.tensorHandle) + public fun TorchTensorType.times(b: TorchTensorType, safe: Boolean): TorchTensorType { + if (safe) checkLinearOperation(this, b) + return wrap(times_tensor(this.tensorHandle, b.tensorHandle)!!) } - public infix fun TorchTensorType.dotRightAssign(other: TorchTensorType): Unit { - matmul_right_assign(this.tensorHandle, other.tensorHandle) + override operator fun TorchTensorType.timesAssign(b: TorchTensorType): Unit = + this.timesAssign(b, safe = true) + + public fun TorchTensorType.timesAssign(b: TorchTensorType, safe: Boolean): Unit { + if (safe) checkLinearOperation(this, b) + times_tensor_assign(this.tensorHandle, b.tensorHandle) } - public operator fun TorchTensorType.plus(other: TorchTensorType): TorchTensorType = - wrap(plus_tensor(this.tensorHandle, other.tensorHandle)!!) + override fun multiply(a: TorchTensorType, b: TorchTensorType): TorchTensorType = a * b - public operator fun TorchTensorType.plusAssign(other: TorchTensorType): Unit { - plus_tensor_assign(this.tensorHandle, other.tensorHandle) + override operator fun TorchTensorType.plus(b: TorchTensorType): TorchTensorType = + this.plus(b, safe = true) + + public fun TorchTensorType.plus(b: TorchTensorType, safe: Boolean): TorchTensorType { + if (safe) checkLinearOperation(this, b) + return wrap(plus_tensor(this.tensorHandle, b.tensorHandle)!!) } - public operator fun TorchTensorType.minus(other: TorchTensorType): TorchTensorType = - wrap(minus_tensor(this.tensorHandle, other.tensorHandle)!!) + override operator fun TorchTensorType.plusAssign(b: TorchTensorType): Unit = + this.plusAssign(b, false) - public operator fun TorchTensorType.minusAssign(other: TorchTensorType): Unit { - minus_tensor_assign(this.tensorHandle, other.tensorHandle) + public fun TorchTensorType.plusAssign(b: TorchTensorType, safe: Boolean): Unit { + if (safe) checkLinearOperation(this, b) + plus_tensor_assign(this.tensorHandle, b.tensorHandle) } - public operator fun TorchTensorType.unaryMinus(): TorchTensorType = + override fun add(a: TorchTensorType, b: TorchTensorType): TorchTensorType = a + b + + override operator fun TorchTensorType.minus(b: TorchTensorType): TorchTensorType = + this.minus(b, safe = true) + + public fun TorchTensorType.minus(b: TorchTensorType, safe: Boolean): TorchTensorType { + if (safe) checkLinearOperation(this, b) + return wrap(minus_tensor(this.tensorHandle, b.tensorHandle)!!) + } + + override operator fun TorchTensorType.minusAssign(b: TorchTensorType): Unit = + this.minusAssign(b, safe = true) + + public fun TorchTensorType.minusAssign(b: TorchTensorType, safe: Boolean): Unit { + if (safe) checkLinearOperation(this, b) + minus_tensor_assign(this.tensorHandle, b.tensorHandle) + } + + override operator fun TorchTensorType.unaryMinus(): TorchTensorType = wrap(unary_minus(this.tensorHandle)!!) - public fun TorchTensorType.abs(): TorchTensorType = wrap(abs_tensor(tensorHandle)!!) + private inline fun checkDotOperation(a: TorchTensorType, b: TorchTensorType): Unit { + checkDeviceCompatible(a, b) + val sa = a.shape + val sb = b.shape + val na = sa.size + val nb = sb.size + var status: Boolean + if (nb == 1) { + status = sa.last() == sb[0] + } else { + status = sa.last() == sb[nb - 2] + if ((na > 2) and (nb > 2)) { + status = status and + (sa.take(nb - 2).toIntArray() contentEquals sb.take(nb - 2).toIntArray()) + } + } + check(status) { "Incompatible shapes $sa and $sb for dot product" } + } + + override infix fun TorchTensorType.dot(b: TorchTensorType): TorchTensorType = + this.dot(b, safe = true) + + public fun TorchTensorType.dot(b: TorchTensorType, safe: Boolean): TorchTensorType { + if (safe) checkDotOperation(this, b) + return wrap(matmul(this.tensorHandle, b.tensorHandle)!!) + } + + public infix fun TorchTensorType.dotAssign(b: TorchTensorType): Unit = + this.dotAssign(b, safe = true) + + public fun TorchTensorType.dotAssign(b: TorchTensorType, safe: Boolean): Unit { + if (safe) checkDotOperation(this, b) + matmul_assign(this.tensorHandle, b.tensorHandle) + } + + public infix fun TorchTensorType.dotRightAssign(b: TorchTensorType): Unit = + this.dotRightAssign(b, safe = true) + + public fun TorchTensorType.dotRightAssign(b: TorchTensorType, safe: Boolean): Unit { + if (safe) checkDotOperation(this, b) + matmul_right_assign(this.tensorHandle, b.tensorHandle) + } + + override fun diagonalEmbedding( + diagonalEntries: TorchTensorType, offset: Int, dim1: Int, dim2: Int + ): TorchTensorType = + wrap(diag_embed(diagonalEntries.tensorHandle, offset, dim1, dim2)!!) + + private inline fun checkTranspose(dim: Int, i: Int, j: Int): Unit = + check((i < dim) and (j < dim)) { + "Cannot transpose $i to $j for a tensor of dim $dim" + } + + override fun TorchTensorType.transpose(i: Int, j: Int): TorchTensorType = + this.transpose(i, j, safe = true) + + public fun TorchTensorType.transpose(i: Int, j: Int, safe: Boolean): TorchTensorType { + if (safe) checkTranspose(this.dimension, i, j) + return wrap(transpose_tensor(tensorHandle, i, j)!!) + } + + public fun TorchTensorType.transposeAssign(i: Int, j: Int): Unit = + this.transposeAssign(i, j, safe = true) + + public fun TorchTensorType.transposeAssign(i: Int, j: Int, safe: Boolean): Unit { + if (safe) checkTranspose(this.dimension, i, j) + transpose_tensor_assign(tensorHandle, i, j) + } + + private inline fun checkView(a: TorchTensorType, shape: IntArray): Unit = + check(a.shape.reduce(Int::times) == shape.reduce(Int::times)) + + override fun TorchTensorType.view(shape: IntArray): TorchTensorType = + this.view(shape, safe = true) + + public fun TorchTensorType.view(shape: IntArray, safe: Boolean): TorchTensorType { + if (safe) checkView(this, shape) + return wrap(view_tensor(this.tensorHandle, shape.toCValues(), shape.size)!!) + } + + override fun TorchTensorType.abs(): TorchTensorType = wrap(abs_tensor(tensorHandle)!!) public fun TorchTensorType.absAssign(): Unit { abs_tensor_assign(tensorHandle) } - public fun TorchTensorType.transpose(i: Int, j: Int): TorchTensorType = - wrap(transpose_tensor(tensorHandle, i, j)!!) - - public fun TorchTensorType.transposeAssign(i: Int, j: Int): Unit { - transpose_tensor_assign(tensorHandle, i, j) - } - - public fun TorchTensorType.sum(): TorchTensorType = wrap(sum_tensor(tensorHandle)!!) + override fun TorchTensorType.sum(): TorchTensorType = wrap(sum_tensor(tensorHandle)!!) public fun TorchTensorType.sumAssign(): Unit { sum_tensor_assign(tensorHandle) } - public fun diagEmbed( - diagonalEntries: TorchTensorType, - offset: Int = 0, dim1: Int = -2, dim2: Int = -1 - ): TorchTensorType = - wrap(diag_embed(diagonalEntries.tensorHandle, offset, dim1, dim2)!!) - public fun TorchTensorType.copy(): TorchTensorType = wrap(copy_tensor(this.tensorHandle)!!) - public fun TorchTensorType.copyToDevice(device: TorchDevice): TorchTensorType = + public fun TorchTensorType.copyToDevice(device: Device): TorchTensorType = wrap(copy_to_device(this.tensorHandle, device.toInt())!!) public infix fun TorchTensorType.swap(otherTensor: TorchTensorType): Unit { swap_tensors(this.tensorHandle, otherTensor.tensorHandle) } - - public fun TorchTensorType.view(shape: IntArray): TorchTensorType = - wrap(view_tensor(this.tensorHandle, shape.toCValues(), shape.size)!!) } public sealed class TorchTensorFieldAlgebra>(scope: DeferScope) : - TorchTensorAlgebra(scope) { + TorchTensorAlgebra(scope), + TensorFieldAlgebra { - public abstract fun randUniform(shape: IntArray, device: TorchDevice = TorchDevice.TorchCPU): TorchTensorType - public abstract fun randNormal(shape: IntArray, device: TorchDevice = TorchDevice.TorchCPU): TorchTensorType + override operator fun TorchTensorType.div(b: TorchTensorType): TorchTensorType = + this.div(b, safe = true) + + public fun TorchTensorType.div(b: TorchTensorType, safe: Boolean): TorchTensorType { + if (safe) checkLinearOperation(this, b) + return wrap(div_tensor(this.tensorHandle, b.tensorHandle)!!) + } + + override operator fun TorchTensorType.divAssign(b: TorchTensorType): Unit = + this.divAssign(b, safe = true) + + public fun TorchTensorType.divAssign(b: TorchTensorType, safe: Boolean): Unit { + if (safe) checkLinearOperation(this, b) + div_tensor_assign(this.tensorHandle, b.tensorHandle) + } + + override fun divide(a: TorchTensorType, b: TorchTensorType): TorchTensorType = a / b + + public abstract fun randUniform(shape: IntArray, device: Device = Device.CPU): TorchTensorType + public abstract fun randNormal(shape: IntArray, device: Device = Device.CPU): TorchTensorType public fun TorchTensorType.randUniform(): TorchTensorType = wrap(rand_like(this.tensorHandle)!!) @@ -128,24 +253,17 @@ public sealed class TorchTensorFieldAlgebra { + override fun TorchTensorType.svd(): Triple { val U = empty_tensor()!! val V = empty_tensor()!! val S = empty_tensor()!! @@ -153,21 +271,23 @@ public sealed class TorchTensorFieldAlgebra { + override fun TorchTensorType.symEig(eigenvectors: Boolean): Pair { val V = empty_tensor()!! val S = empty_tensor()!! symeig_tensor(this.tensorHandle, S, V, eigenvectors) return Pair(wrap(S), wrap(V)) } - public fun TorchTensorType.grad(variable: TorchTensorType, retainGraph: Boolean=false): TorchTensorType { - this.isValue() + public fun TorchTensorType.grad(variable: TorchTensorType, retainGraph: Boolean = false): TorchTensorType { + this.checkIsValue() return wrap(autograd_tensor(this.tensorHandle, variable.tensorHandle, retainGraph)!!) } + public infix fun TorchTensorType.grad(variable: TorchTensorType): TorchTensorType = this.grad(variable, false) + public infix fun TorchTensorType.hess(variable: TorchTensorType): TorchTensorType { - this.isValue() + this.checkIsValue() return wrap(autohess_tensor(this.tensorHandle, variable.tensorHandle)!!) } @@ -175,42 +295,34 @@ public sealed class TorchTensorFieldAlgebra>(scope: DeferScope) : - TorchTensorAlgebra(scope) { - public abstract fun randIntegral( - low: T, high: T, shape: IntArray, - device: TorchDevice = TorchDevice.TorchCPU - ): TorchTensorType - public abstract fun TorchTensorType.randIntegral(low: T, high: T): TorchTensorType - public abstract fun TorchTensorType.randIntegralAssign(low: T, high: T): Unit -} - public class TorchTensorRealAlgebra(scope: DeferScope) : TorchTensorFieldAlgebra(scope) { override fun wrap(tensorHandle: COpaquePointer): TorchTensorReal = TorchTensorReal(scope = scope, tensorHandle = tensorHandle) + override fun number(value: Number): TorchTensorReal = + full(value.toDouble(), intArrayOf(1), Device.CPU).sum() + override fun TorchTensorReal.copyToArray(): DoubleArray = this.elements().map { it.second }.toList().toDoubleArray() - override fun copyFromArray(array: DoubleArray, shape: IntArray, device: TorchDevice): TorchTensorReal = + override fun copyFromArray(array: DoubleArray, shape: IntArray, device: Device): TorchTensorReal = wrap(from_blob_double(array.toCValues(), shape.toCValues(), shape.size, device.toInt(), true)!!) override fun fromBlob(arrayBlob: CPointer, shape: IntArray): TorchTensorReal = - wrap(from_blob_double(arrayBlob, shape.toCValues(), shape.size, TorchDevice.TorchCPU.toInt(), false)!!) + wrap(from_blob_double(arrayBlob, shape.toCValues(), shape.size, Device.CPU.toInt(), false)!!) override fun TorchTensorReal.getData(): CPointer { - require(this.device is TorchDevice.TorchCPU) { + require(this.device is Device.CPU) { "This tensor is not on available on CPU" } return get_data_double(this.tensorHandle)!! } - override fun randNormal(shape: IntArray, device: TorchDevice): TorchTensorReal = + override fun randNormal(shape: IntArray, device: Device): TorchTensorReal = wrap(randn_double(shape.toCValues(), shape.size, device.toInt())!!) - override fun randUniform(shape: IntArray, device: TorchDevice): TorchTensorReal = + override fun randUniform(shape: IntArray, device: Device): TorchTensorReal = wrap(rand_double(shape.toCValues(), shape.size, device.toInt())!!) override operator fun Double.plus(other: TorchTensorReal): TorchTensorReal = @@ -243,8 +355,20 @@ public class TorchTensorRealAlgebra(scope: DeferScope) : times_double_assign(value, this.tensorHandle) } - override fun full(value: Double, shape: IntArray, device: TorchDevice): TorchTensorReal = + override fun multiply(a: TorchTensorReal, k: Number): TorchTensorReal = a * k.toDouble() + + override fun full(value: Double, shape: IntArray, device: Device): TorchTensorReal = wrap(full_double(value, shape.toCValues(), shape.size, device.toInt())!!) + + override fun randIntegral(low: Double, high: Double, shape: IntArray, device: Device): TorchTensorReal = + wrap(randint_double(low.toLong(), high.toLong(), shape.toCValues(), shape.size, device.toInt())!!) + + override fun TorchTensorReal.randIntegral(low: Double, high: Double): TorchTensorReal = + wrap(randint_long_like(this.tensorHandle, low.toLong(), high.toLong())!!) + + override fun TorchTensorReal.randIntegralAssign(low: Double, high: Double): Unit { + randint_long_like_assign(this.tensorHandle, low.toLong(), high.toLong()) + } } public class TorchTensorFloatAlgebra(scope: DeferScope) : @@ -252,26 +376,29 @@ public class TorchTensorFloatAlgebra(scope: DeferScope) : override fun wrap(tensorHandle: COpaquePointer): TorchTensorFloat = TorchTensorFloat(scope = scope, tensorHandle = tensorHandle) + override fun number(value: Number): TorchTensorFloat = + full(value.toFloat(), intArrayOf(1), Device.CPU).sum() + override fun TorchTensorFloat.copyToArray(): FloatArray = this.elements().map { it.second }.toList().toFloatArray() - override fun copyFromArray(array: FloatArray, shape: IntArray, device: TorchDevice): TorchTensorFloat = + override fun copyFromArray(array: FloatArray, shape: IntArray, device: Device): TorchTensorFloat = wrap(from_blob_float(array.toCValues(), shape.toCValues(), shape.size, device.toInt(), true)!!) override fun fromBlob(arrayBlob: CPointer, shape: IntArray): TorchTensorFloat = - wrap(from_blob_float(arrayBlob, shape.toCValues(), shape.size, TorchDevice.TorchCPU.toInt(), false)!!) + wrap(from_blob_float(arrayBlob, shape.toCValues(), shape.size, Device.CPU.toInt(), false)!!) override fun TorchTensorFloat.getData(): CPointer { - require(this.device is TorchDevice.TorchCPU) { + require(this.device is Device.CPU) { "This tensor is not on available on CPU" } return get_data_float(this.tensorHandle)!! } - override fun randNormal(shape: IntArray, device: TorchDevice): TorchTensorFloat = + override fun randNormal(shape: IntArray, device: Device): TorchTensorFloat = wrap(randn_float(shape.toCValues(), shape.size, device.toInt())!!) - override fun randUniform(shape: IntArray, device: TorchDevice): TorchTensorFloat = + override fun randUniform(shape: IntArray, device: Device): TorchTensorFloat = wrap(rand_float(shape.toCValues(), shape.size, device.toInt())!!) override operator fun Float.plus(other: TorchTensorFloat): TorchTensorFloat = @@ -304,36 +431,52 @@ public class TorchTensorFloatAlgebra(scope: DeferScope) : times_float_assign(value, this.tensorHandle) } - override fun full(value: Float, shape: IntArray, device: TorchDevice): TorchTensorFloat = + override fun multiply(a: TorchTensorFloat, k: Number): TorchTensorFloat = a * k.toFloat() + + override fun full(value: Float, shape: IntArray, device: Device): TorchTensorFloat = wrap(full_float(value, shape.toCValues(), shape.size, device.toInt())!!) + + override fun randIntegral(low: Float, high: Float, shape: IntArray, device: Device): TorchTensorFloat = + wrap(randint_float(low.toLong(), high.toLong(), shape.toCValues(), shape.size, device.toInt())!!) + + override fun TorchTensorFloat.randIntegral(low: Float, high: Float): TorchTensorFloat = + wrap(randint_long_like(this.tensorHandle, low.toLong(), high.toLong())!!) + + override fun TorchTensorFloat.randIntegralAssign(low: Float, high: Float): Unit { + randint_long_like_assign(this.tensorHandle, low.toLong(), high.toLong()) + } } public class TorchTensorLongAlgebra(scope: DeferScope) : - TorchTensorRingAlgebra(scope) { + TorchTensorAlgebra(scope) { override fun wrap(tensorHandle: COpaquePointer): TorchTensorLong = TorchTensorLong(scope = scope, tensorHandle = tensorHandle) + override fun number(value: Number): TorchTensorLong = + full(value.toLong(), intArrayOf(1), Device.CPU).sum() + override fun TorchTensorLong.copyToArray(): LongArray = this.elements().map { it.second }.toList().toLongArray() - override fun copyFromArray(array: LongArray, shape: IntArray, device: TorchDevice): TorchTensorLong = + override fun copyFromArray(array: LongArray, shape: IntArray, device: Device): TorchTensorLong = wrap(from_blob_long(array.toCValues(), shape.toCValues(), shape.size, device.toInt(), true)!!) override fun fromBlob(arrayBlob: CPointer, shape: IntArray): TorchTensorLong = - wrap(from_blob_long(arrayBlob, shape.toCValues(), shape.size, TorchDevice.TorchCPU.toInt(), false)!!) + wrap(from_blob_long(arrayBlob, shape.toCValues(), shape.size, Device.CPU.toInt(), false)!!) override fun TorchTensorLong.getData(): CPointer { - check(this.device is TorchDevice.TorchCPU) { + check(this.device is Device.CPU) { "This tensor is not on available on CPU" } return get_data_long(this.tensorHandle)!! } - override fun randIntegral(low: Long, high: Long, shape: IntArray, device: TorchDevice): TorchTensorLong = + override fun randIntegral(low: Long, high: Long, shape: IntArray, device: Device): TorchTensorLong = wrap(randint_long(low, high, shape.toCValues(), shape.size, device.toInt())!!) override fun TorchTensorLong.randIntegral(low: Long, high: Long): TorchTensorLong = wrap(randint_long_like(this.tensorHandle, low, high)!!) + override fun TorchTensorLong.randIntegralAssign(low: Long, high: Long): Unit { randint_long_like_assign(this.tensorHandle, low, high) } @@ -368,32 +511,37 @@ public class TorchTensorLongAlgebra(scope: DeferScope) : times_long_assign(value, this.tensorHandle) } - override fun full(value: Long, shape: IntArray, device: TorchDevice): TorchTensorLong = + override fun multiply(a: TorchTensorLong, k: Number): TorchTensorLong = a * k.toLong() + + override fun full(value: Long, shape: IntArray, device: Device): TorchTensorLong = wrap(full_long(value, shape.toCValues(), shape.size, device.toInt())!!) } public class TorchTensorIntAlgebra(scope: DeferScope) : - TorchTensorRingAlgebra(scope) { + TorchTensorAlgebra(scope) { override fun wrap(tensorHandle: COpaquePointer): TorchTensorInt = TorchTensorInt(scope = scope, tensorHandle = tensorHandle) + override fun number(value: Number): TorchTensorInt = + full(value.toInt(), intArrayOf(1), Device.CPU).sum() + override fun TorchTensorInt.copyToArray(): IntArray = this.elements().map { it.second }.toList().toIntArray() - override fun copyFromArray(array: IntArray, shape: IntArray, device: TorchDevice): TorchTensorInt = + override fun copyFromArray(array: IntArray, shape: IntArray, device: Device): TorchTensorInt = wrap(from_blob_int(array.toCValues(), shape.toCValues(), shape.size, device.toInt(), true)!!) override fun fromBlob(arrayBlob: CPointer, shape: IntArray): TorchTensorInt = - wrap(from_blob_int(arrayBlob, shape.toCValues(), shape.size, TorchDevice.TorchCPU.toInt(), false)!!) + wrap(from_blob_int(arrayBlob, shape.toCValues(), shape.size, Device.CPU.toInt(), false)!!) override fun TorchTensorInt.getData(): CPointer { - require(this.device is TorchDevice.TorchCPU) { + require(this.device is Device.CPU) { "This tensor is not on available on CPU" } return get_data_int(this.tensorHandle)!! } - override fun randIntegral(low: Int, high: Int, shape: IntArray, device: TorchDevice): TorchTensorInt = + override fun randIntegral(low: Int, high: Int, shape: IntArray, device: Device): TorchTensorInt = wrap(randint_int(low, high, shape.toCValues(), shape.size, device.toInt())!!) override fun TorchTensorInt.randIntegral(low: Int, high: Int): TorchTensorInt = @@ -433,7 +581,9 @@ public class TorchTensorIntAlgebra(scope: DeferScope) : times_int_assign(value, this.tensorHandle) } - override fun full(value: Int, shape: IntArray, device: TorchDevice): TorchTensorInt = + override fun multiply(a: TorchTensorInt, k: Number): TorchTensorInt = a * k.toInt() + + override fun full(value: Int, shape: IntArray, device: Device): TorchTensorInt = wrap(full_int(value, shape.toCValues(), shape.size, device.toInt())!!) } diff --git a/kmath-torch/src/nativeTest/kotlin/kscience/kmath/torch/BenchmarkMatMult.kt b/kmath-torch/src/nativeTest/kotlin/kscience/kmath/torch/BenchmarkMatMult.kt index fdf44534a..400f5a9a0 100644 --- a/kmath-torch/src/nativeTest/kotlin/kscience/kmath/torch/BenchmarkMatMult.kt +++ b/kmath-torch/src/nativeTest/kotlin/kscience/kmath/torch/BenchmarkMatMult.kt @@ -7,15 +7,15 @@ internal fun benchmarkingMatMultDouble( scale: Int, numWarmUp: Int, numIter: Int, - device: TorchDevice = TorchDevice.TorchCPU + device: Device = Device.CPU ): Unit { TorchTensorRealAlgebra { println("Benchmarking $scale x $scale matrices over Double's on $device: ") setSeed(SEED) val lhs = randNormal(shape = intArrayOf(scale, scale), device = device) val rhs = randNormal(shape = intArrayOf(scale, scale), device = device) - repeat(numWarmUp) { lhs dotAssign rhs } - val measuredTime = measureTime { repeat(numIter) { lhs dotAssign rhs } } + repeat(numWarmUp) { lhs.dotAssign(rhs, false) } + val measuredTime = measureTime { repeat(numIter) { lhs.dotAssign(rhs, false) } } println(" ${measuredTime / numIter} p.o. with $numIter iterations") } } @@ -24,15 +24,15 @@ internal fun benchmarkingMatMultFloat( scale: Int, numWarmUp: Int, numIter: Int, - device: TorchDevice = TorchDevice.TorchCPU + device: Device = Device.CPU ): Unit { TorchTensorFloatAlgebra { println("Benchmarking $scale x $scale matrices over Float's on $device: ") setSeed(SEED) val lhs = randNormal(shape = intArrayOf(scale, scale), device = device) val rhs = randNormal(shape = intArrayOf(scale, scale), device = device) - repeat(numWarmUp) { lhs dotAssign rhs } - val measuredTime = measureTime { repeat(numIter) { lhs dotAssign rhs } } + repeat(numWarmUp) { lhs.dotAssign(rhs, false) } + val measuredTime = measureTime { repeat(numIter) { lhs.dotAssign(rhs, false) } } println(" ${measuredTime / numIter} p.o. with $numIter iterations") } } diff --git a/kmath-torch/src/nativeTest/kotlin/kscience/kmath/torch/BenchmarkRandomGenerators.kt b/kmath-torch/src/nativeTest/kotlin/kscience/kmath/torch/BenchmarkRandomGenerators.kt index 2ce2a591e..bc4d1ad12 100644 --- a/kmath-torch/src/nativeTest/kotlin/kscience/kmath/torch/BenchmarkRandomGenerators.kt +++ b/kmath-torch/src/nativeTest/kotlin/kscience/kmath/torch/BenchmarkRandomGenerators.kt @@ -7,7 +7,7 @@ internal fun benchmarkingRandNormal( samples: Int, numWarmUp: Int, numIter: Int, - device: TorchDevice = TorchDevice.TorchCPU): Unit + device: Device = Device.CPU): Unit { TorchTensorFloatAlgebra{ println("Benchmarking generation of $samples Normal samples on $device: ") @@ -23,7 +23,7 @@ internal fun benchmarkingRandUniform( samples: Int, numWarmUp: Int, numIter: Int, - device: TorchDevice = TorchDevice.TorchCPU): Unit + device: Device = Device.CPU): Unit { TorchTensorFloatAlgebra{ println("Benchmarking generation of $samples Uniform samples on $device: ") @@ -40,7 +40,7 @@ internal fun benchmarkingRandIntegral( samples: Int, numWarmUp: Int, numIter: Int, - device: TorchDevice = TorchDevice.TorchCPU): Unit + device: Device = Device.CPU): Unit { TorchTensorIntAlgebra { println("Benchmarking generation of $samples integer [0,100] samples on $device: ") diff --git a/kmath-torch/src/nativeTest/kotlin/kscience/kmath/torch/TestAutograd.kt b/kmath-torch/src/nativeTest/kotlin/kscience/kmath/torch/TestAutograd.kt index 076188fd1..a8e27511e 100644 --- a/kmath-torch/src/nativeTest/kotlin/kscience/kmath/torch/TestAutograd.kt +++ b/kmath-torch/src/nativeTest/kotlin/kscience/kmath/torch/TestAutograd.kt @@ -2,7 +2,7 @@ package kscience.kmath.torch import kotlin.test.* -internal fun testingAutoGrad(dim: Int, device: TorchDevice = TorchDevice.TorchCPU): Unit { +internal fun testingAutoGrad(dim: Int, device: Device = Device.CPU): Unit { TorchTensorRealAlgebra { setSeed(SEED) val tensorX = randNormal(shape = intArrayOf(dim), device = device) @@ -26,7 +26,7 @@ internal fun testingAutoGrad(dim: Int, device: TorchDevice = TorchDevice.TorchCP internal fun testingBatchedAutoGrad(bath: IntArray, dim: Int, - device: TorchDevice = TorchDevice.TorchCPU): Unit { + device: Device = Device.CPU): Unit { TorchTensorRealAlgebra { setSeed(SEED) diff --git a/kmath-torch/src/nativeTest/kotlin/kscience/kmath/torch/TestTorchTensor.kt b/kmath-torch/src/nativeTest/kotlin/kscience/kmath/torch/TestTorchTensor.kt index cd248acb5..e26b0aa9c 100644 --- a/kmath-torch/src/nativeTest/kotlin/kscience/kmath/torch/TestTorchTensor.kt +++ b/kmath-torch/src/nativeTest/kotlin/kscience/kmath/torch/TestTorchTensor.kt @@ -3,7 +3,7 @@ package kscience.kmath.torch import kotlinx.cinterop.* import kotlin.test.* -internal fun testingCopyFromArray(device: TorchDevice = TorchDevice.TorchCPU): Unit { +internal fun testingCopyFromArray(device: Device = Device.CPU): Unit { TorchTensorRealAlgebra { val array = (1..24).map { 10.0 * it * it }.toDoubleArray() val shape = intArrayOf(2, 3, 4) @@ -52,21 +52,21 @@ class TestTorchTensor { @Test fun testTypeMoving() = TorchTensorFloatAlgebra { - val tensorInt = copyFromArray(floatArrayOf(1f,2f,3f), intArrayOf(3)).copyToInt() + val tensorInt = copyFromArray(floatArrayOf(1f, 2f, 3f), intArrayOf(3)).copyToInt() TorchTensorIntAlgebra { - val temporalTensor = copyFromArray(intArrayOf(4,5,6),intArrayOf(3)) + val temporalTensor = copyFromArray(intArrayOf(4, 5, 6), intArrayOf(3)) tensorInt swap temporalTensor - assertTrue(temporalTensor.copyToArray() contentEquals intArrayOf(1,2,3)) + assertTrue(temporalTensor.copyToArray() contentEquals intArrayOf(1, 2, 3)) } - assertTrue(tensorInt.copyToFloat().copyToArray() contentEquals floatArrayOf(4f,5f,6f)) + assertTrue(tensorInt.copyToFloat().copyToArray() contentEquals floatArrayOf(4f, 5f, 6f)) } @Test - fun testViewWithNoCopy() = TorchTensorIntAlgebra{ - val tensor = copyFromArray(intArrayOf(1,2,3,4,5,6), shape = intArrayOf(6)) - val viewTensor = tensor.view(intArrayOf(2,3)) - assertTrue(viewTensor.shape contentEquals intArrayOf(2,3)) - viewTensor[intArrayOf(0,0)] = 10 + fun testViewWithNoCopy() = TorchTensorIntAlgebra { + val tensor = copyFromArray(intArrayOf(1, 2, 3, 4, 5, 6), shape = intArrayOf(6)) + val viewTensor = tensor.view(intArrayOf(2, 3)) + assertTrue(viewTensor.shape contentEquals intArrayOf(2, 3)) + viewTensor[intArrayOf(0, 0)] = 10 assertEquals(tensor[intArrayOf(0)], 10) } } \ No newline at end of file diff --git a/kmath-torch/src/nativeTest/kotlin/kscience/kmath/torch/TestTorchTensorAlgebra.kt b/kmath-torch/src/nativeTest/kotlin/kscience/kmath/torch/TestTorchTensorAlgebra.kt index fa6246c87..afa952ee7 100644 --- a/kmath-torch/src/nativeTest/kotlin/kscience/kmath/torch/TestTorchTensorAlgebra.kt +++ b/kmath-torch/src/nativeTest/kotlin/kscience/kmath/torch/TestTorchTensorAlgebra.kt @@ -6,7 +6,7 @@ import kscience.kmath.structures.Matrix import kotlin.math.* import kotlin.test.* -internal fun testingScalarProduct(device: TorchDevice = TorchDevice.TorchCPU): Unit { +internal fun testingScalarProduct(device: Device = Device.CPU): Unit { TorchTensorRealAlgebra { val lhs = randUniform(shape = intArrayOf(3), device = device) val rhs = randUniform(shape = intArrayOf(3), device = device) @@ -19,7 +19,7 @@ internal fun testingScalarProduct(device: TorchDevice = TorchDevice.TorchCPU): U } } -internal fun testingMatrixMultiplication(device: TorchDevice = TorchDevice.TorchCPU): Unit { +internal fun testingMatrixMultiplication(device: Device = Device.CPU): Unit { TorchTensorRealAlgebra { setSeed(SEED) @@ -49,7 +49,7 @@ internal fun testingMatrixMultiplication(device: TorchDevice = TorchDevice.Torch } } -internal fun testingLinearStructure(device: TorchDevice = TorchDevice.TorchCPU): Unit { +internal fun testingLinearStructure(device: Device = Device.CPU): Unit { TorchTensorRealAlgebra { val shape = intArrayOf(3) val tensorA = full(value = -4.5, shape = shape, device = device) @@ -84,7 +84,7 @@ internal fun testingLinearStructure(device: TorchDevice = TorchDevice.TorchCPU): } } -internal fun testingTensorTransformations(device: TorchDevice = TorchDevice.TorchCPU): Unit { +internal fun testingTensorTransformations(device: Device = Device.CPU): Unit { TorchTensorRealAlgebra { setSeed(SEED) val tensor = randNormal(shape = intArrayOf(3, 3), device = device) @@ -102,21 +102,21 @@ internal fun testingTensorTransformations(device: TorchDevice = TorchDevice.Torc } } -internal fun testingBatchedSVD(device: TorchDevice = TorchDevice.TorchCPU): Unit { +internal fun testingBatchedSVD(device: Device = Device.CPU): Unit { TorchTensorRealAlgebra { val tensor = randNormal(shape = intArrayOf(7, 5, 3), device = device) val (tensorU, tensorS, tensorV) = tensor.svd() - val error = tensor - (tensorU dot (diagEmbed(tensorS) dot tensorV.transpose(-2,-1))) + val error = tensor - (tensorU dot (diagonalEmbedding(tensorS) dot tensorV.transpose(-2,-1))) assertTrue(error.abs().sum().value() < TOLERANCE) } } -internal fun testingBatchedSymEig(device: TorchDevice = TorchDevice.TorchCPU): Unit { +internal fun testingBatchedSymEig(device: Device = Device.CPU): Unit { TorchTensorRealAlgebra { val tensor = randNormal(shape = intArrayOf(5,5), device = device) val tensorSigma = tensor + tensor.transpose(-2,-1) val (tensorS, tensorV) = tensorSigma.symEig() - val error = tensorSigma - (tensorV dot (diagEmbed(tensorS) dot tensorV.transpose(-2,-1))) + val error = tensorSigma - (tensorV dot (diagonalEmbedding(tensorS) dot tensorV.transpose(-2,-1))) assertTrue(error.abs().sum().value() < TOLERANCE) } } diff --git a/kmath-torch/src/nativeTest/kotlin/kscience/kmath/torch/TestUtils.kt b/kmath-torch/src/nativeTest/kotlin/kscience/kmath/torch/TestUtils.kt index c56c9bb28..8333b4e9a 100644 --- a/kmath-torch/src/nativeTest/kotlin/kscience/kmath/torch/TestUtils.kt +++ b/kmath-torch/src/nativeTest/kotlin/kscience/kmath/torch/TestUtils.kt @@ -6,7 +6,7 @@ import kotlin.test.* internal val SEED = 987654 internal val TOLERANCE = 1e-6 -internal fun testingSetSeed(device: TorchDevice = TorchDevice.TorchCPU): Unit { +internal fun testingSetSeed(device: Device = Device.CPU): Unit { TorchTensorRealAlgebra { setSeed(SEED) val normal = randNormal(IntArray(0), device = device).value()