diff --git a/kmath-torch/ctorch/include/ctorch.h b/kmath-torch/ctorch/include/ctorch.h index 396950e9d..3d6ffc779 100644 --- a/kmath-torch/ctorch/include/ctorch.h +++ b/kmath-torch/ctorch/include/ctorch.h @@ -61,14 +61,23 @@ extern "C" void set_long(TorchTensorHandle tensor_handle, int *index, long value); void set_int(TorchTensorHandle tensor_handle, int *index, int value); - TorchTensorHandle randn_double(int *shape, int shape_size, int device); TorchTensorHandle rand_double(int *shape, int shape_size, int device); - TorchTensorHandle randn_float(int *shape, int shape_size, int device); + TorchTensorHandle randn_double(int *shape, int shape_size, int device); TorchTensorHandle rand_float(int *shape, int shape_size, int device); + TorchTensorHandle randn_float(int *shape, int shape_size, int device); TorchTensorHandle randint_long(long low, long high, int *shape, int shape_size, int device); TorchTensorHandle randint_int(int low, int high, int *shape, int shape_size, int device); + TorchTensorHandle rand_like(TorchTensorHandle tensor_handle); + void rand_like_assign(TorchTensorHandle tensor_handle); + TorchTensorHandle randn_like(TorchTensorHandle tensor_handle); + void randn_like_assign(TorchTensorHandle tensor_handle); + TorchTensorHandle randint_long_like(TorchTensorHandle tensor_handle, long low, long high); + void randint_long_like_assign(TorchTensorHandle tensor_handle, long low, long high); + TorchTensorHandle randint_int_like(TorchTensorHandle tensor_handle, int low, int high); + void randint_int_like_assign(TorchTensorHandle tensor_handle, int low, int high); + TorchTensorHandle full_double(double value, int *shape, int shape_size, int device); TorchTensorHandle full_float(float value, int *shape, int shape_size, int device); TorchTensorHandle full_long(long value, int *shape, int shape_size, int device); diff --git a/kmath-torch/ctorch/src/ctorch.cc b/kmath-torch/ctorch/src/ctorch.cc index f02b79b55..e5e1c55ef 100644 --- a/kmath-torch/ctorch/src/ctorch.cc +++ b/kmath-torch/ctorch/src/ctorch.cc @@ -181,22 +181,22 @@ void set_int(TorchTensorHandle tensor_handle, int *index, int value) ctorch::set(tensor_handle, index, value); } -TorchTensorHandle randn_double(int *shape, int shape_size, int device) -{ - return new torch::Tensor(ctorch::randn(ctorch::to_vec_int(shape, shape_size), ctorch::int_to_device(device))); -} TorchTensorHandle rand_double(int *shape, int shape_size, int device) { return new torch::Tensor(ctorch::rand(ctorch::to_vec_int(shape, shape_size), ctorch::int_to_device(device))); } -TorchTensorHandle randn_float(int *shape, int shape_size, int device) +TorchTensorHandle randn_double(int *shape, int shape_size, int device) { - return new torch::Tensor(ctorch::randn(ctorch::to_vec_int(shape, shape_size), ctorch::int_to_device(device))); + return new torch::Tensor(ctorch::randn(ctorch::to_vec_int(shape, shape_size), ctorch::int_to_device(device))); } TorchTensorHandle rand_float(int *shape, int shape_size, int device) { return new torch::Tensor(ctorch::rand(ctorch::to_vec_int(shape, shape_size), ctorch::int_to_device(device))); } +TorchTensorHandle randn_float(int *shape, int shape_size, int device) +{ + return new torch::Tensor(ctorch::randn(ctorch::to_vec_int(shape, shape_size), ctorch::int_to_device(device))); +} TorchTensorHandle randint_long(long low, long high, int *shape, int shape_size, int device) { @@ -207,6 +207,39 @@ TorchTensorHandle randint_int(int low, int high, int *shape, int shape_size, int return new torch::Tensor(ctorch::randint(low, high, ctorch::to_vec_int(shape, shape_size), ctorch::int_to_device(device))); } +TorchTensorHandle rand_like(TorchTensorHandle tensor_handle) +{ + return new torch::Tensor(torch::rand_like(ctorch::cast(tensor_handle))); +} +void rand_like_assign(TorchTensorHandle tensor_handle) +{ + ctorch::cast(tensor_handle) = torch::rand_like(ctorch::cast(tensor_handle)); +} +TorchTensorHandle randn_like(TorchTensorHandle tensor_handle) +{ + return new torch::Tensor(torch::randn_like(ctorch::cast(tensor_handle))); +} +void randn_like_assign(TorchTensorHandle tensor_handle) +{ + ctorch::cast(tensor_handle) = torch::randn_like(ctorch::cast(tensor_handle)); +} +TorchTensorHandle randint_long_like(TorchTensorHandle tensor_handle, long low, long high) +{ + return new torch::Tensor(torch::randint_like(ctorch::cast(tensor_handle), low, high)); +} +void randint_long_like_assign(TorchTensorHandle tensor_handle, long low, long high) +{ + ctorch::cast(tensor_handle) = torch::randint_like(ctorch::cast(tensor_handle), low, high); +} +TorchTensorHandle randint_int_like(TorchTensorHandle tensor_handle, int low, int high) +{ + return new torch::Tensor(torch::randint_like(ctorch::cast(tensor_handle), low, high)); +} +void randint_int_like_assign(TorchTensorHandle tensor_handle, int low, int high) +{ + ctorch::cast(tensor_handle) = torch::randint_like(ctorch::cast(tensor_handle), low, high); +} + TorchTensorHandle full_double(double value, int *shape, int shape_size, int device) { return new torch::Tensor(ctorch::full(value, ctorch::to_vec_int(shape, shape_size), ctorch::int_to_device(device))); diff --git a/kmath-torch/src/nativeGPUTest/kotlin/kscience/kmath/torch/BenchmarkMatMultFloatGPU.kt b/kmath-torch/src/nativeGPUTest/kotlin/kscience/kmath/torch/BenchmarkMatMultGPU.kt similarity index 72% rename from kmath-torch/src/nativeGPUTest/kotlin/kscience/kmath/torch/BenchmarkMatMultFloatGPU.kt rename to kmath-torch/src/nativeGPUTest/kotlin/kscience/kmath/torch/BenchmarkMatMultGPU.kt index 6857fef0e..10798af14 100644 --- a/kmath-torch/src/nativeGPUTest/kotlin/kscience/kmath/torch/BenchmarkMatMultFloatGPU.kt +++ b/kmath-torch/src/nativeGPUTest/kotlin/kscience/kmath/torch/BenchmarkMatMultGPU.kt @@ -2,19 +2,19 @@ package kscience.kmath.torch import kotlin.test.Test -class BenchmarkMatMultFloatGPU { +class BenchmarkMatMultGPU { @Test - fun benchmarkMatMult20() = + fun benchmarkMatMultFloat20() = benchmarkingMatMultFloat(20, 10, 100000, device = TorchDevice.TorchCUDA(0)) @Test - fun benchmarkMatMult200() = + fun benchmarkMatMultFloat200() = benchmarkingMatMultFloat(200, 10, 10000, device = TorchDevice.TorchCUDA(0)) @Test - fun benchmarkMatMult2000() = + fun benchmarkMatMultFloat2000() = benchmarkingMatMultFloat(2000, 10, 1000, device = TorchDevice.TorchCUDA(0)) } \ No newline at end of file diff --git a/kmath-torch/src/nativeGPUTest/kotlin/kscience/kmath/torch/BenchmarkRandomGeneratorsGPU.kt b/kmath-torch/src/nativeGPUTest/kotlin/kscience/kmath/torch/BenchmarkRandomGeneratorsGPU.kt new file mode 100644 index 000000000..67de2efd7 --- /dev/null +++ b/kmath-torch/src/nativeGPUTest/kotlin/kscience/kmath/torch/BenchmarkRandomGeneratorsGPU.kt @@ -0,0 +1,64 @@ +package kscience.kmath.torch + +import kotlin.test.Test + +class BenchmarkRandomGeneratorsGPU { + @Test + fun benchmarkRandNormal1() = + benchmarkingRandNormal(10, 10, 100000, + device = TorchDevice.TorchCUDA(0)) + @Test + fun benchmarkRandUniform1() = + benchmarkingRandUniform(10, 10, 100000, + device = TorchDevice.TorchCUDA(0)) + + @Test + fun benchmarkRandIntegral1() = + benchmarkingRandIntegral(10, 10, 100000, + device = TorchDevice.TorchCUDA(0)) + + @Test + fun benchmarkRandNormal3() = + benchmarkingRandNormal(1000, 10, 100000, + device = TorchDevice.TorchCUDA(0)) + + @Test + fun benchmarkRandUniform3() = + benchmarkingRandUniform(1000, 10, 100000, + device = TorchDevice.TorchCUDA(0)) + + @Test + fun benchmarkRandIntegral3() = + benchmarkingRandIntegral(1000, 10, 100000, + device = TorchDevice.TorchCUDA(0)) + + @Test + fun benchmarkRandNormal5() = + benchmarkingRandNormal(100000, 10, 100000, + device = TorchDevice.TorchCUDA(0)) + + @Test + fun benchmarkRandUniform5() = + benchmarkingRandUniform(100000, 10, 100000, + device = TorchDevice.TorchCUDA(0)) + + @Test + fun benchmarkRandIntegral5() = + benchmarkingRandIntegral(100000, 10, 100000, + device = TorchDevice.TorchCUDA(0)) + + @Test + fun benchmarkRandNormal7() = + benchmarkingRandNormal(10000000, 10, 10000, + device = TorchDevice.TorchCUDA(0)) + + @Test + fun benchmarkRandUniform7() = + benchmarkingRandUniform(10000000, 10, 10000, + device = TorchDevice.TorchCUDA(0)) + + @Test + fun benchmarkRandIntegral7() = + benchmarkingRandIntegral(10000000, 10, 10000, + device = TorchDevice.TorchCUDA(0)) +} \ No newline at end of file diff --git a/kmath-torch/src/nativeMain/kotlin/kscience.kmath.torch/TorchTensorAlgebra.kt b/kmath-torch/src/nativeMain/kotlin/kscience.kmath.torch/TorchTensorAlgebra.kt index 185264c0f..fb07db744 100644 --- a/kmath-torch/src/nativeMain/kotlin/kscience.kmath.torch/TorchTensorAlgebra.kt +++ b/kmath-torch/src/nativeMain/kotlin/kscience.kmath.torch/TorchTensorAlgebra.kt @@ -108,8 +108,22 @@ public sealed class TorchTensorFieldAlgebra>(scope: DeferScope) : TorchTensorAlgebra(scope) { - public abstract fun randNormal(shape: IntArray, device: TorchDevice = TorchDevice.TorchCPU): TorchTensorType public abstract fun randUniform(shape: IntArray, device: TorchDevice = TorchDevice.TorchCPU): TorchTensorType + public abstract fun randNormal(shape: IntArray, device: TorchDevice = TorchDevice.TorchCPU): TorchTensorType + + public fun TorchTensorType.randUniform(): TorchTensorType = + wrap(rand_like(this.tensorHandle)!!) + + public fun TorchTensorType.randUniformAssign(): Unit { + rand_like_assign(this.tensorHandle) + } + + public fun TorchTensorType.randNormal(): TorchTensorType = + wrap(randn_like(this.tensorHandle)!!) + + public fun TorchTensorType.randNormalAssign(): Unit { + randn_like_assign(this.tensorHandle) + } public operator fun TorchTensorType.div(other: TorchTensorType): TorchTensorType = wrap(div_tensor(this.tensorHandle, other.tensorHandle)!!) @@ -157,6 +171,8 @@ public sealed class TorchTensorRingAlgebra