diff --git a/kmath-torch/ctorch/include/ctorch.h b/kmath-torch/ctorch/include/ctorch.h index 80a53e8ed..793c2115e 100644 --- a/kmath-torch/ctorch/include/ctorch.h +++ b/kmath-torch/ctorch/include/ctorch.h @@ -58,6 +58,10 @@ extern "C" TorchTensorHandle copy_to_cpu(TorchTensorHandle tensor_handle); TorchTensorHandle copy_to_gpu(TorchTensorHandle tensor_handle, int device); + TorchTensorHandle randn_float(int* shape, int shape_size); + + TorchTensorHandle matmul(TorchTensorHandle lhs, TorchTensorHandle rhs); + #ifdef __cplusplus } #endif diff --git a/kmath-torch/ctorch/include/utils.hh b/kmath-torch/ctorch/include/utils.hh index 86331339f..9950499a5 100644 --- a/kmath-torch/ctorch/include/utils.hh +++ b/kmath-torch/ctorch/include/utils.hh @@ -33,13 +33,17 @@ namespace ctorch return *static_cast(tensor_handle); } - template - inline torch::Tensor copy_from_blob(Dtype *data, int *shape, int dim, torch::Device device) + inline std::vector to_vec_int(int *arr, int arr_size) { - auto shape_vec = std::vector(dim); - shape_vec.assign(shape, shape + dim); - return torch::from_blob(data, shape_vec, dtype()).to( - torch::TensorOptions().layout(torch::kStrided).device(device), false, true); + auto vec = std::vector(arr_size); + vec.assign(arr, arr + arr_size); + return vec; + } + + template + inline torch::Tensor copy_from_blob(Dtype *data, std::vector shape, torch::Device device) + { + return torch::from_blob(data, shape, dtype()).to(torch::TensorOptions().layout(torch::kStrided).device(device), false, true); } inline int *to_dynamic_ints(const c10::IntArrayRef &arr) @@ -78,4 +82,10 @@ namespace ctorch ten.index(offset_to_index(offset, ten.strides())) = value; } + template + inline torch::Tensor randn(std::vector shape, torch::Device device) + { + return torch::randn(shape, torch::TensorOptions().dtype(dtype()).layout(torch::kStrided).device(device)); + } + } // namespace ctorch diff --git a/kmath-torch/ctorch/src/ctorch.cc b/kmath-torch/ctorch/src/ctorch.cc index 747fd4703..752784703 100644 --- a/kmath-torch/ctorch/src/ctorch.cc +++ b/kmath-torch/ctorch/src/ctorch.cc @@ -27,36 +27,36 @@ void set_seed(int seed) TorchTensorHandle copy_from_blob_double(double *data, int *shape, int dim) { - return new torch::Tensor(ctorch::copy_from_blob(data, shape, dim, torch::kCPU)); + return new torch::Tensor(ctorch::copy_from_blob(data, ctorch::to_vec_int(shape, dim), torch::kCPU)); } TorchTensorHandle copy_from_blob_float(float *data, int *shape, int dim) { - return new torch::Tensor(ctorch::copy_from_blob(data, shape, dim, torch::kCPU)); + return new torch::Tensor(ctorch::copy_from_blob(data, ctorch::to_vec_int(shape, dim), torch::kCPU)); } TorchTensorHandle copy_from_blob_long(long *data, int *shape, int dim) { - return new torch::Tensor(ctorch::copy_from_blob(data, shape, dim, torch::kCPU)); + return new torch::Tensor(ctorch::copy_from_blob(data, ctorch::to_vec_int(shape, dim), torch::kCPU)); } TorchTensorHandle copy_from_blob_int(int *data, int *shape, int dim) { - return new torch::Tensor(ctorch::copy_from_blob(data, shape, dim, torch::kCPU)); + return new torch::Tensor(ctorch::copy_from_blob(data, ctorch::to_vec_int(shape, dim), torch::kCPU)); } TorchTensorHandle copy_from_blob_to_gpu_double(double *data, int *shape, int dim, int device) { - return new torch::Tensor(ctorch::copy_from_blob(data, shape, dim, torch::Device(torch::kCUDA, device))); + return new torch::Tensor(ctorch::copy_from_blob(data, ctorch::to_vec_int(shape, dim), torch::Device(torch::kCUDA, device))); } TorchTensorHandle copy_from_blob_to_gpu_float(float *data, int *shape, int dim, int device) { - return new torch::Tensor(ctorch::copy_from_blob(data, shape, dim, torch::Device(torch::kCUDA, device))); + return new torch::Tensor(ctorch::copy_from_blob(data, ctorch::to_vec_int(shape, dim), torch::Device(torch::kCUDA, device))); } TorchTensorHandle copy_from_blob_to_gpu_long(long *data, int *shape, int dim, int device) { - return new torch::Tensor(ctorch::copy_from_blob(data, shape, dim, torch::Device(torch::kCUDA, device))); + return new torch::Tensor(ctorch::copy_from_blob(data, ctorch::to_vec_int(shape, dim), torch::Device(torch::kCUDA, device))); } TorchTensorHandle copy_from_blob_to_gpu_int(int *data, int *shape, int dim, int device) { - return new torch::Tensor(ctorch::copy_from_blob(data, shape, dim, torch::Device(torch::kCUDA, device))); + return new torch::Tensor(ctorch::copy_from_blob(data, ctorch::to_vec_int(shape, dim), torch::Device(torch::kCUDA, device))); } TorchTensorHandle copy_tensor(TorchTensorHandle tensor_handle) @@ -167,3 +167,11 @@ TorchTensorHandle copy_to_gpu(TorchTensorHandle tensor_handle, int device) { return new torch::Tensor(ctorch::cast(tensor_handle).to(torch::Device(torch::kCUDA, device),false, true)); } + +TorchTensorHandle randn_float(int* shape, int shape_size){ + return new torch::Tensor(ctorch::randn(ctorch::to_vec_int(shape, shape_size), torch::kCPU)); +} + +TorchTensorHandle matmul(TorchTensorHandle lhs, TorchTensorHandle rhs){ + return new torch::Tensor(torch::matmul(ctorch::cast(lhs), ctorch::cast(rhs))); +} \ No newline at end of file diff --git a/kmath-torch/src/nativeGPUTest/kotlin/kscience/kmath/torch/TestTorchTensorGPU.kt b/kmath-torch/src/nativeGPUTest/kotlin/kscience/kmath/torch/TestTorchTensorGPU.kt index e5e448459..20a7b9439 100644 --- a/kmath-torch/src/nativeGPUTest/kotlin/kscience/kmath/torch/TestTorchTensorGPU.kt +++ b/kmath-torch/src/nativeGPUTest/kotlin/kscience/kmath/torch/TestTorchTensorGPU.kt @@ -20,6 +20,6 @@ class TestTorchTensorGPU { tensor.elements().forEach { assertEquals(tensor[it.first], it.second) } - assertTrue(tensor.buffer.contentEquals(array.asBuffer())) + assertTrue(tensor.asBuffer().contentEquals(array.asBuffer())) } } \ No newline at end of file diff --git a/kmath-torch/src/nativeMain/kotlin/kscience/kmath/torch/TorchMemoryHolder.kt b/kmath-torch/src/nativeMain/kotlin/kscience/kmath/torch/TorchMemoryHolder.kt new file mode 100644 index 000000000..140315f5e --- /dev/null +++ b/kmath-torch/src/nativeMain/kotlin/kscience/kmath/torch/TorchMemoryHolder.kt @@ -0,0 +1,18 @@ +package kscience.kmath.torch + +import kotlinx.cinterop.* +import ctorch.* + +public abstract class TorchMemoryHolder internal constructor( + internal val scope: DeferScope, + internal var tensorHandle: COpaquePointer? +){ + init { + scope.defer(::close) + } + + protected fun close() { + dispose_tensor(tensorHandle) + tensorHandle = null + } +} \ No newline at end of file diff --git a/kmath-torch/src/nativeMain/kotlin/kscience/kmath/torch/TorchTensor.kt b/kmath-torch/src/nativeMain/kotlin/kscience/kmath/torch/TorchTensor.kt index 6749d4319..fc94e62c4 100644 --- a/kmath-torch/src/nativeMain/kotlin/kscience/kmath/torch/TorchTensor.kt +++ b/kmath-torch/src/nativeMain/kotlin/kscience/kmath/torch/TorchTensor.kt @@ -5,22 +5,32 @@ import kscience.kmath.structures.* import kotlinx.cinterop.* import ctorch.* -public abstract class TorchTensor> : +public sealed class TorchTensor> : MutableNDBufferTrait() { + public fun asBuffer(): MutableBuffer = buffer + public companion object { public fun copyFromFloatArray(scope: DeferScope, array: FloatArray, shape: IntArray): TorchTensorFloat { val tensorHandle: COpaquePointer = copy_from_blob_float( array.toCValues(), shape.toCValues(), shape.size )!! - return TorchTensorFloat(populateStridesFromNative(tensorHandle, rawShape = shape), scope, tensorHandle) + return TorchTensorFloat( + scope = scope, + tensorHandle = tensorHandle, + strides = populateStridesFromNative(tensorHandle, rawShape = shape) + ) } public fun copyFromIntArray(scope: DeferScope, array: IntArray, shape: IntArray): TorchTensorInt { val tensorHandle: COpaquePointer = copy_from_blob_int( array.toCValues(), shape.toCValues(), shape.size )!! - return TorchTensorInt(populateStridesFromNative(tensorHandle, rawShape = shape), scope, tensorHandle) + return TorchTensorInt( + scope = scope, + tensorHandle = tensorHandle, + strides = populateStridesFromNative(tensorHandle, rawShape = shape) + ) } public fun copyFromFloatArrayToGPU( @@ -32,7 +42,11 @@ public abstract class TorchTensor public fun copy(): TorchTensor = wrap( - outStrides = strides, outScope = buffer.scope, - outTensorHandle = copy_tensor(buffer.tensorHandle!!)!! + outTensorHandle = copy_tensor(buffer.tensorHandle!!)!!, + outStrides = strides ) } public class TorchTensorFloat internal constructor( - override val strides: TorchTensorStrides, scope: DeferScope, - tensorHandle: COpaquePointer + tensorHandle: COpaquePointer, + override val strides: TorchTensorStrides ) : TorchTensor() { override val buffer: TorchTensorBufferFloat = TorchTensorBufferFloat(scope, tensorHandle) - override fun wrap(outStrides: TorchTensorStrides, outScope: DeferScope, outTensorHandle: COpaquePointer) = - TorchTensorFloat( - strides = outStrides, scope = outScope, tensorHandle = outTensorHandle - ) + override fun wrap( + outScope: DeferScope, + outTensorHandle: COpaquePointer, + outStrides: TorchTensorStrides + ): TorchTensorFloat = TorchTensorFloat( + scope = outScope, tensorHandle = outTensorHandle, strides = outStrides + ) } public class TorchTensorInt internal constructor( - override val strides: TorchTensorStrides, scope: DeferScope, - tensorHandle: COpaquePointer + tensorHandle: COpaquePointer, + override val strides: TorchTensorStrides ) : TorchTensor() { override val buffer: TorchTensorBufferInt = TorchTensorBufferInt(scope, tensorHandle) - override fun wrap(outStrides: TorchTensorStrides, outScope: DeferScope, outTensorHandle: COpaquePointer) = - TorchTensorInt( - strides = outStrides, scope = outScope, tensorHandle = outTensorHandle - ) + override fun wrap( + outScope: DeferScope, + outTensorHandle: COpaquePointer, + outStrides: TorchTensorStrides + ): TorchTensorInt = TorchTensorInt( + scope = outScope, tensorHandle = outTensorHandle, strides = outStrides + ) } public class TorchTensorFloatGPU internal constructor( - override val strides: TorchTensorStrides, scope: DeferScope, - tensorHandle: COpaquePointer + tensorHandle: COpaquePointer, + override val strides: TorchTensorStrides ) : TorchTensor() { override val buffer: TorchTensorBufferFloatGPU = TorchTensorBufferFloatGPU(scope, tensorHandle) - override fun wrap(outStrides: TorchTensorStrides, outScope: DeferScope, outTensorHandle: COpaquePointer) = + override fun wrap( + outScope: DeferScope, + outTensorHandle: COpaquePointer, + outStrides: TorchTensorStrides + ): TorchTensorFloatGPU = TorchTensorFloatGPU( - strides = outStrides, scope = outScope, tensorHandle = outTensorHandle + scope = outScope, tensorHandle = outTensorHandle, strides = outStrides ) } diff --git a/kmath-torch/src/nativeMain/kotlin/kscience/kmath/torch/TorchTensorAlgebra.kt b/kmath-torch/src/nativeMain/kotlin/kscience/kmath/torch/TorchTensorAlgebra.kt new file mode 100644 index 000000000..27d18c275 --- /dev/null +++ b/kmath-torch/src/nativeMain/kotlin/kscience/kmath/torch/TorchTensorAlgebra.kt @@ -0,0 +1,65 @@ +package kscience.kmath.torch + +import kotlinx.cinterop.* +import ctorch.* + + +public sealed class TorchTensorAlgebra< + T, + TorchTensorBufferImpl : TorchTensorBuffer, + PrimitiveArrayType> +constructor( + internal val scope: DeferScope +) { + + protected abstract fun wrap( + outTensorHandle: COpaquePointer, + outStrides: TorchTensorStrides + ): TorchTensor + + public infix fun TorchTensor.swap(other: TorchTensor): Unit { + check(this.shape contentEquals other.shape) { + "Attempt to swap tensors with different shapes" + } + this.buffer.tensorHandle = other.buffer.tensorHandle.also { + other.buffer.tensorHandle = this.buffer.tensorHandle + } + } + + public abstract fun copyFromArray(array: PrimitiveArrayType, shape: IntArray): TorchTensor + + public infix fun TorchTensor.dot(other: TorchTensor): + TorchTensor { + val resultHandle = matmul(this.buffer.tensorHandle, other.buffer.tensorHandle)!! + val strides = populateStridesFromNative(tensorHandle = resultHandle) + return wrap(resultHandle, strides) + } +} + + +public sealed class TorchTensorField, PrimitiveArrayType> +constructor(scope: DeferScope) : TorchTensorAlgebra(scope) { + public abstract fun randn(shape: IntArray): TorchTensor +} + + +public class TorchTensorFloatAlgebra(scope: DeferScope) : + TorchTensorField(scope) { + override fun wrap( + outTensorHandle: COpaquePointer, + outStrides: TorchTensorStrides + ): TorchTensorFloat = TorchTensorFloat(scope = scope, tensorHandle = outTensorHandle, strides = outStrides) + + override fun randn(shape: IntArray): TorchTensor { + val tensorHandle = randn_float(shape.toCValues(), shape.size)!! + val strides = populateStridesFromNative(tensorHandle = tensorHandle, rawShape = shape) + return wrap(tensorHandle, strides) + } + + override fun copyFromArray(array: FloatArray, shape: IntArray): TorchTensorFloat = + TorchTensor.copyFromFloatArray(scope, array, shape) +} + + +public fun TorchTensorFloatAlgebra(block: TorchTensorFloatAlgebra.() -> R): R = + memScoped { TorchTensorFloatAlgebra(this).block() } \ No newline at end of file diff --git a/kmath-torch/src/nativeMain/kotlin/kscience/kmath/torch/TorchTensorBuffer.kt b/kmath-torch/src/nativeMain/kotlin/kscience/kmath/torch/TorchTensorBuffer.kt index 62873482e..44d6b8dd6 100644 --- a/kmath-torch/src/nativeMain/kotlin/kscience/kmath/torch/TorchTensorBuffer.kt +++ b/kmath-torch/src/nativeMain/kotlin/kscience/kmath/torch/TorchTensorBuffer.kt @@ -5,25 +5,16 @@ import kscience.kmath.structures.MutableBuffer import kotlinx.cinterop.* import ctorch.* -public abstract class TorchTensorBuffer internal constructor( - internal val scope: DeferScope, - internal var tensorHandle: COpaquePointer? -) : MutableBuffer { +public sealed class TorchTensorBuffer constructor( + scope: DeferScope, + tensorHandle: COpaquePointer? +) : MutableBuffer, TorchMemoryHolder(scope, tensorHandle) { override val size: Int get(){ return get_numel(tensorHandle!!) } - init { - scope.defer(::close) - } - - protected fun close() { - dispose_tensor(tensorHandle) - tensorHandle = null - } - internal abstract fun wrap(outScope: DeferScope, outTensorHandle: COpaquePointer): TorchTensorBuffer override fun copy(): TorchTensorBuffer = wrap( diff --git a/kmath-torch/src/nativeTest/kotlin/kscience/kmath/torch/TestTorchTensor.kt b/kmath-torch/src/nativeTest/kotlin/kscience/kmath/torch/TestTorchTensor.kt index 86cd05b3a..7636ea99a 100644 --- a/kmath-torch/src/nativeTest/kotlin/kscience/kmath/torch/TestTorchTensor.kt +++ b/kmath-torch/src/nativeTest/kotlin/kscience/kmath/torch/TestTorchTensor.kt @@ -16,7 +16,7 @@ internal class TestTorchTensor { tensor.elements().forEach { assertEquals(tensor[it.first], it.second) } - assertTrue(tensor.buffer.contentEquals(array.asBuffer())) + assertTrue(tensor.asBuffer().contentEquals(array.asBuffer())) } @Test @@ -27,7 +27,7 @@ internal class TestTorchTensor { tensor.elements().forEach { assertEquals(tensor[it.first], it.second) } - assertTrue(tensor.buffer.contentEquals(array.asBuffer())) + assertTrue(tensor.asBuffer().contentEquals(array.asBuffer())) } @Test diff --git a/kmath-torch/src/nativeTest/kotlin/kscience/kmath/torch/TestTorchTensorAlgebra.kt b/kmath-torch/src/nativeTest/kotlin/kscience/kmath/torch/TestTorchTensorAlgebra.kt new file mode 100644 index 000000000..346739d63 --- /dev/null +++ b/kmath-torch/src/nativeTest/kotlin/kscience/kmath/torch/TestTorchTensorAlgebra.kt @@ -0,0 +1,36 @@ +package kscience.kmath.torch + + +import kotlin.test.* +import kotlin.time.measureTime + + +class TestTorchTensorAlgebra { + + @Test + fun swappingTensors() = TorchTensorFloatAlgebra { + val tensorA = copyFromArray(floatArrayOf(1f, 2f, 3f), intArrayOf(3)) + val tensorB = tensorA.copy() + val tensorC = copyFromArray(floatArrayOf(4f, 5f, 6f), intArrayOf(3)) + tensorA swap tensorC + assertTrue(tensorB.asBuffer().contentEquals(tensorC.asBuffer())) + } + + @Test + fun dotOperation() = TorchTensorFloatAlgebra { + setSeed(987654) + var tensorA = randn(intArrayOf(1000, 1000)) + val tensorB = randn(intArrayOf(1000, 1000)) + measureTime { + repeat(100) { + TorchTensorFloatAlgebra { + tensorA swap (tensorA dot tensorB) + } + } + }.also(::println) + assertTrue(tensorA.shape contentEquals tensorB.shape) + } + + +} + diff --git a/kmath-torch/src/nativeTest/kotlin/kscience/kmath/torch/TestUtils.kt b/kmath-torch/src/nativeTest/kotlin/kscience/kmath/torch/TestUtils.kt index 53759c5a0..e15c8dcec 100644 --- a/kmath-torch/src/nativeTest/kotlin/kscience/kmath/torch/TestUtils.kt +++ b/kmath-torch/src/nativeTest/kotlin/kscience/kmath/torch/TestUtils.kt @@ -1,7 +1,6 @@ package kscience.kmath.torch -import kotlin.test.Test -import kotlin.test.assertEquals +import kotlin.test.* internal class TestUtils { @@ -11,4 +10,12 @@ internal class TestUtils { setNumThreads(numThreads) assertEquals(numThreads, getNumThreads()) } + @Test + fun seedSetting() = TorchTensorFloatAlgebra { + setSeed(987654) + val tensorA = randn(intArrayOf(2,3)) + setSeed(987654) + val tensorB = randn(intArrayOf(2,3)) + assertTrue(tensorA.asBuffer().contentEquals(tensorB.asBuffer())) + } } \ No newline at end of file