forked from kscience/kmath
Benchmarking random generators
This commit is contained in:
parent
ef570254e6
commit
7d25aa2834
@ -61,14 +61,23 @@ extern "C"
|
||||
void set_long(TorchTensorHandle tensor_handle, int *index, long value);
|
||||
void set_int(TorchTensorHandle tensor_handle, int *index, int value);
|
||||
|
||||
TorchTensorHandle randn_double(int *shape, int shape_size, int device);
|
||||
TorchTensorHandle rand_double(int *shape, int shape_size, int device);
|
||||
TorchTensorHandle randn_float(int *shape, int shape_size, int device);
|
||||
TorchTensorHandle randn_double(int *shape, int shape_size, int device);
|
||||
TorchTensorHandle rand_float(int *shape, int shape_size, int device);
|
||||
TorchTensorHandle randn_float(int *shape, int shape_size, int device);
|
||||
|
||||
TorchTensorHandle randint_long(long low, long high, int *shape, int shape_size, int device);
|
||||
TorchTensorHandle randint_int(int low, int high, int *shape, int shape_size, int device);
|
||||
|
||||
TorchTensorHandle rand_like(TorchTensorHandle tensor_handle);
|
||||
void rand_like_assign(TorchTensorHandle tensor_handle);
|
||||
TorchTensorHandle randn_like(TorchTensorHandle tensor_handle);
|
||||
void randn_like_assign(TorchTensorHandle tensor_handle);
|
||||
TorchTensorHandle randint_long_like(TorchTensorHandle tensor_handle, long low, long high);
|
||||
void randint_long_like_assign(TorchTensorHandle tensor_handle, long low, long high);
|
||||
TorchTensorHandle randint_int_like(TorchTensorHandle tensor_handle, int low, int high);
|
||||
void randint_int_like_assign(TorchTensorHandle tensor_handle, int low, int high);
|
||||
|
||||
TorchTensorHandle full_double(double value, int *shape, int shape_size, int device);
|
||||
TorchTensorHandle full_float(float value, int *shape, int shape_size, int device);
|
||||
TorchTensorHandle full_long(long value, int *shape, int shape_size, int device);
|
||||
|
@ -181,22 +181,22 @@ void set_int(TorchTensorHandle tensor_handle, int *index, int value)
|
||||
ctorch::set<int>(tensor_handle, index, value);
|
||||
}
|
||||
|
||||
TorchTensorHandle randn_double(int *shape, int shape_size, int device)
|
||||
{
|
||||
return new torch::Tensor(ctorch::randn<double>(ctorch::to_vec_int(shape, shape_size), ctorch::int_to_device(device)));
|
||||
}
|
||||
TorchTensorHandle rand_double(int *shape, int shape_size, int device)
|
||||
{
|
||||
return new torch::Tensor(ctorch::rand<double>(ctorch::to_vec_int(shape, shape_size), ctorch::int_to_device(device)));
|
||||
}
|
||||
TorchTensorHandle randn_float(int *shape, int shape_size, int device)
|
||||
TorchTensorHandle randn_double(int *shape, int shape_size, int device)
|
||||
{
|
||||
return new torch::Tensor(ctorch::randn<float>(ctorch::to_vec_int(shape, shape_size), ctorch::int_to_device(device)));
|
||||
return new torch::Tensor(ctorch::randn<double>(ctorch::to_vec_int(shape, shape_size), ctorch::int_to_device(device)));
|
||||
}
|
||||
TorchTensorHandle rand_float(int *shape, int shape_size, int device)
|
||||
{
|
||||
return new torch::Tensor(ctorch::rand<float>(ctorch::to_vec_int(shape, shape_size), ctorch::int_to_device(device)));
|
||||
}
|
||||
TorchTensorHandle randn_float(int *shape, int shape_size, int device)
|
||||
{
|
||||
return new torch::Tensor(ctorch::randn<float>(ctorch::to_vec_int(shape, shape_size), ctorch::int_to_device(device)));
|
||||
}
|
||||
|
||||
TorchTensorHandle randint_long(long low, long high, int *shape, int shape_size, int device)
|
||||
{
|
||||
@ -207,6 +207,39 @@ TorchTensorHandle randint_int(int low, int high, int *shape, int shape_size, int
|
||||
return new torch::Tensor(ctorch::randint<int>(low, high, ctorch::to_vec_int(shape, shape_size), ctorch::int_to_device(device)));
|
||||
}
|
||||
|
||||
TorchTensorHandle rand_like(TorchTensorHandle tensor_handle)
|
||||
{
|
||||
return new torch::Tensor(torch::rand_like(ctorch::cast(tensor_handle)));
|
||||
}
|
||||
void rand_like_assign(TorchTensorHandle tensor_handle)
|
||||
{
|
||||
ctorch::cast(tensor_handle) = torch::rand_like(ctorch::cast(tensor_handle));
|
||||
}
|
||||
TorchTensorHandle randn_like(TorchTensorHandle tensor_handle)
|
||||
{
|
||||
return new torch::Tensor(torch::randn_like(ctorch::cast(tensor_handle)));
|
||||
}
|
||||
void randn_like_assign(TorchTensorHandle tensor_handle)
|
||||
{
|
||||
ctorch::cast(tensor_handle) = torch::randn_like(ctorch::cast(tensor_handle));
|
||||
}
|
||||
TorchTensorHandle randint_long_like(TorchTensorHandle tensor_handle, long low, long high)
|
||||
{
|
||||
return new torch::Tensor(torch::randint_like(ctorch::cast(tensor_handle), low, high));
|
||||
}
|
||||
void randint_long_like_assign(TorchTensorHandle tensor_handle, long low, long high)
|
||||
{
|
||||
ctorch::cast(tensor_handle) = torch::randint_like(ctorch::cast(tensor_handle), low, high);
|
||||
}
|
||||
TorchTensorHandle randint_int_like(TorchTensorHandle tensor_handle, int low, int high)
|
||||
{
|
||||
return new torch::Tensor(torch::randint_like(ctorch::cast(tensor_handle), low, high));
|
||||
}
|
||||
void randint_int_like_assign(TorchTensorHandle tensor_handle, int low, int high)
|
||||
{
|
||||
ctorch::cast(tensor_handle) = torch::randint_like(ctorch::cast(tensor_handle), low, high);
|
||||
}
|
||||
|
||||
TorchTensorHandle full_double(double value, int *shape, int shape_size, int device)
|
||||
{
|
||||
return new torch::Tensor(ctorch::full<double>(value, ctorch::to_vec_int(shape, shape_size), ctorch::int_to_device(device)));
|
||||
|
@ -2,19 +2,19 @@ package kscience.kmath.torch
|
||||
|
||||
import kotlin.test.Test
|
||||
|
||||
class BenchmarkMatMultFloatGPU {
|
||||
class BenchmarkMatMultGPU {
|
||||
@Test
|
||||
fun benchmarkMatMult20() =
|
||||
fun benchmarkMatMultFloat20() =
|
||||
benchmarkingMatMultFloat(20, 10, 100000,
|
||||
device = TorchDevice.TorchCUDA(0))
|
||||
|
||||
@Test
|
||||
fun benchmarkMatMult200() =
|
||||
fun benchmarkMatMultFloat200() =
|
||||
benchmarkingMatMultFloat(200, 10, 10000,
|
||||
device = TorchDevice.TorchCUDA(0))
|
||||
|
||||
@Test
|
||||
fun benchmarkMatMult2000() =
|
||||
fun benchmarkMatMultFloat2000() =
|
||||
benchmarkingMatMultFloat(2000, 10, 1000,
|
||||
device = TorchDevice.TorchCUDA(0))
|
||||
}
|
@ -0,0 +1,64 @@
|
||||
package kscience.kmath.torch
|
||||
|
||||
import kotlin.test.Test
|
||||
|
||||
class BenchmarkRandomGeneratorsGPU {
|
||||
@Test
|
||||
fun benchmarkRandNormal1() =
|
||||
benchmarkingRandNormal(10, 10, 100000,
|
||||
device = TorchDevice.TorchCUDA(0))
|
||||
@Test
|
||||
fun benchmarkRandUniform1() =
|
||||
benchmarkingRandUniform(10, 10, 100000,
|
||||
device = TorchDevice.TorchCUDA(0))
|
||||
|
||||
@Test
|
||||
fun benchmarkRandIntegral1() =
|
||||
benchmarkingRandIntegral(10, 10, 100000,
|
||||
device = TorchDevice.TorchCUDA(0))
|
||||
|
||||
@Test
|
||||
fun benchmarkRandNormal3() =
|
||||
benchmarkingRandNormal(1000, 10, 100000,
|
||||
device = TorchDevice.TorchCUDA(0))
|
||||
|
||||
@Test
|
||||
fun benchmarkRandUniform3() =
|
||||
benchmarkingRandUniform(1000, 10, 100000,
|
||||
device = TorchDevice.TorchCUDA(0))
|
||||
|
||||
@Test
|
||||
fun benchmarkRandIntegral3() =
|
||||
benchmarkingRandIntegral(1000, 10, 100000,
|
||||
device = TorchDevice.TorchCUDA(0))
|
||||
|
||||
@Test
|
||||
fun benchmarkRandNormal5() =
|
||||
benchmarkingRandNormal(100000, 10, 100000,
|
||||
device = TorchDevice.TorchCUDA(0))
|
||||
|
||||
@Test
|
||||
fun benchmarkRandUniform5() =
|
||||
benchmarkingRandUniform(100000, 10, 100000,
|
||||
device = TorchDevice.TorchCUDA(0))
|
||||
|
||||
@Test
|
||||
fun benchmarkRandIntegral5() =
|
||||
benchmarkingRandIntegral(100000, 10, 100000,
|
||||
device = TorchDevice.TorchCUDA(0))
|
||||
|
||||
@Test
|
||||
fun benchmarkRandNormal7() =
|
||||
benchmarkingRandNormal(10000000, 10, 10000,
|
||||
device = TorchDevice.TorchCUDA(0))
|
||||
|
||||
@Test
|
||||
fun benchmarkRandUniform7() =
|
||||
benchmarkingRandUniform(10000000, 10, 10000,
|
||||
device = TorchDevice.TorchCUDA(0))
|
||||
|
||||
@Test
|
||||
fun benchmarkRandIntegral7() =
|
||||
benchmarkingRandIntegral(10000000, 10, 10000,
|
||||
device = TorchDevice.TorchCUDA(0))
|
||||
}
|
@ -108,8 +108,22 @@ public sealed class TorchTensorFieldAlgebra<T, TVar : CPrimitiveVar,
|
||||
PrimitiveArrayType, TorchTensorType : TorchTensor<T>>(scope: DeferScope) :
|
||||
TorchTensorAlgebra<T, TVar, PrimitiveArrayType, TorchTensorType>(scope) {
|
||||
|
||||
public abstract fun randNormal(shape: IntArray, device: TorchDevice = TorchDevice.TorchCPU): TorchTensorType
|
||||
public abstract fun randUniform(shape: IntArray, device: TorchDevice = TorchDevice.TorchCPU): TorchTensorType
|
||||
public abstract fun randNormal(shape: IntArray, device: TorchDevice = TorchDevice.TorchCPU): TorchTensorType
|
||||
|
||||
public fun TorchTensorType.randUniform(): TorchTensorType =
|
||||
wrap(rand_like(this.tensorHandle)!!)
|
||||
|
||||
public fun TorchTensorType.randUniformAssign(): Unit {
|
||||
rand_like_assign(this.tensorHandle)
|
||||
}
|
||||
|
||||
public fun TorchTensorType.randNormal(): TorchTensorType =
|
||||
wrap(randn_like(this.tensorHandle)!!)
|
||||
|
||||
public fun TorchTensorType.randNormalAssign(): Unit {
|
||||
randn_like_assign(this.tensorHandle)
|
||||
}
|
||||
|
||||
public operator fun TorchTensorType.div(other: TorchTensorType): TorchTensorType =
|
||||
wrap(div_tensor(this.tensorHandle, other.tensorHandle)!!)
|
||||
@ -157,6 +171,8 @@ public sealed class TorchTensorRingAlgebra<T, TVar : CPrimitiveVar,
|
||||
low: T, high: T, shape: IntArray,
|
||||
device: TorchDevice = TorchDevice.TorchCPU
|
||||
): TorchTensorType
|
||||
public abstract fun TorchTensorType.randIntegral(low: T, high: T): TorchTensorType
|
||||
public abstract fun TorchTensorType.randIntegralAssign(low: T, high: T): Unit
|
||||
}
|
||||
|
||||
public class TorchTensorRealAlgebra(scope: DeferScope) :
|
||||
@ -305,6 +321,12 @@ public class TorchTensorLongAlgebra(scope: DeferScope) :
|
||||
override fun randIntegral(low: Long, high: Long, shape: IntArray, device: TorchDevice): TorchTensorLong =
|
||||
wrap(randint_long(low, high, shape.toCValues(), shape.size, device.toInt())!!)
|
||||
|
||||
override fun TorchTensorLong.randIntegral(low: Long, high: Long): TorchTensorLong =
|
||||
wrap(randint_long_like(this.tensorHandle, low, high)!!)
|
||||
override fun TorchTensorLong.randIntegralAssign(low: Long, high: Long): Unit {
|
||||
randint_long_like_assign(this.tensorHandle, low, high)
|
||||
}
|
||||
|
||||
override operator fun Long.plus(other: TorchTensorLong): TorchTensorLong =
|
||||
wrap(plus_long(this, other.tensorHandle)!!)
|
||||
|
||||
@ -363,6 +385,13 @@ public class TorchTensorIntAlgebra(scope: DeferScope) :
|
||||
override fun randIntegral(low: Int, high: Int, shape: IntArray, device: TorchDevice): TorchTensorInt =
|
||||
wrap(randint_int(low, high, shape.toCValues(), shape.size, device.toInt())!!)
|
||||
|
||||
override fun TorchTensorInt.randIntegral(low: Int, high: Int): TorchTensorInt =
|
||||
wrap(randint_int_like(this.tensorHandle, low, high)!!)
|
||||
|
||||
override fun TorchTensorInt.randIntegralAssign(low: Int, high: Int): Unit {
|
||||
randint_int_like_assign(this.tensorHandle, low, high)
|
||||
}
|
||||
|
||||
override operator fun Int.plus(other: TorchTensorInt): TorchTensorInt =
|
||||
wrap(plus_int(this, other.tensorHandle)!!)
|
||||
|
||||
|
@ -0,0 +1,66 @@
|
||||
package kscience.kmath.torch
|
||||
|
||||
import kotlin.test.Test
|
||||
import kotlin.time.measureTime
|
||||
|
||||
internal fun benchmarkingMatMultDouble(
|
||||
scale: Int,
|
||||
numWarmUp: Int,
|
||||
numIter: Int,
|
||||
device: TorchDevice = TorchDevice.TorchCPU
|
||||
): Unit {
|
||||
TorchTensorRealAlgebra {
|
||||
println("Benchmarking $scale x $scale matrices over Double's on $device: ")
|
||||
setSeed(SEED)
|
||||
val lhs = randNormal(shape = intArrayOf(scale, scale), device = device)
|
||||
val rhs = randNormal(shape = intArrayOf(scale, scale), device = device)
|
||||
repeat(numWarmUp) { lhs dotAssign rhs }
|
||||
val measuredTime = measureTime { repeat(numIter) { lhs dotAssign rhs } }
|
||||
println(" ${measuredTime / numIter} p.o. with $numIter iterations")
|
||||
}
|
||||
}
|
||||
|
||||
internal fun benchmarkingMatMultFloat(
|
||||
scale: Int,
|
||||
numWarmUp: Int,
|
||||
numIter: Int,
|
||||
device: TorchDevice = TorchDevice.TorchCPU
|
||||
): Unit {
|
||||
TorchTensorFloatAlgebra {
|
||||
println("Benchmarking $scale x $scale matrices over Float's on $device: ")
|
||||
setSeed(SEED)
|
||||
val lhs = randNormal(shape = intArrayOf(scale, scale), device = device)
|
||||
val rhs = randNormal(shape = intArrayOf(scale, scale), device = device)
|
||||
repeat(numWarmUp) { lhs dotAssign rhs }
|
||||
val measuredTime = measureTime { repeat(numIter) { lhs dotAssign rhs } }
|
||||
println(" ${measuredTime / numIter} p.o. with $numIter iterations")
|
||||
}
|
||||
}
|
||||
|
||||
internal class BenchmarkMatMult {
|
||||
|
||||
@Test
|
||||
fun benchmarkMatMultDouble20() =
|
||||
benchmarkingMatMultDouble(20, 10, 100000)
|
||||
|
||||
@Test
|
||||
fun benchmarkMatMultFloat20() =
|
||||
benchmarkingMatMultFloat(20, 10, 100000)
|
||||
|
||||
@Test
|
||||
fun benchmarkMatMultDouble200() =
|
||||
benchmarkingMatMultDouble(200, 10, 10000)
|
||||
|
||||
@Test
|
||||
fun benchmarkMatMultFloat200() =
|
||||
benchmarkingMatMultFloat(200, 10, 10000)
|
||||
|
||||
@Test
|
||||
fun benchmarkMatMultDouble2000() =
|
||||
benchmarkingMatMultDouble(2000, 3, 20)
|
||||
|
||||
@Test
|
||||
fun benchmarkMatMultFloat2000() =
|
||||
benchmarkingMatMultFloat(2000, 3, 20)
|
||||
|
||||
}
|
@ -1,37 +0,0 @@
|
||||
package kscience.kmath.torch
|
||||
|
||||
import kotlin.test.Test
|
||||
import kotlin.time.measureTime
|
||||
|
||||
internal fun benchmarkingMatMultDouble(
|
||||
scale: Int,
|
||||
numWarmUp: Int,
|
||||
numIter: Int,
|
||||
device: TorchDevice = TorchDevice.TorchCPU
|
||||
): Unit {
|
||||
TorchTensorRealAlgebra {
|
||||
println("Benchmarking $scale x $scale matrices over Double's on $device: ")
|
||||
setSeed(SEED)
|
||||
val lhs = randNormal(shape = intArrayOf(scale, scale), device = device)
|
||||
val rhs = randNormal(shape = intArrayOf(scale, scale), device = device)
|
||||
repeat(numWarmUp) { lhs dotAssign rhs }
|
||||
val measuredTime = measureTime { repeat(numIter) { lhs dotAssign rhs } }
|
||||
println(" ${measuredTime / numIter} p.o. with $numIter iterations")
|
||||
}
|
||||
}
|
||||
|
||||
internal class BenchmarkMatMultDouble {
|
||||
|
||||
@Test
|
||||
fun benchmarkMatMult20() =
|
||||
benchmarkingMatMultDouble(20, 10, 100000)
|
||||
|
||||
@Test
|
||||
fun benchmarkMatMult200() =
|
||||
benchmarkingMatMultDouble(200, 10, 10000)
|
||||
|
||||
@Test
|
||||
fun benchmarkMatMult2000() =
|
||||
benchmarkingMatMultDouble(2000, 3, 20)
|
||||
|
||||
}
|
@ -1,36 +0,0 @@
|
||||
package kscience.kmath.torch
|
||||
|
||||
import kotlin.test.Test
|
||||
import kotlin.time.measureTime
|
||||
|
||||
internal fun benchmarkingMatMultFloat(
|
||||
scale: Int,
|
||||
numWarmUp: Int,
|
||||
numIter: Int,
|
||||
device: TorchDevice = TorchDevice.TorchCPU
|
||||
): Unit {
|
||||
TorchTensorFloatAlgebra {
|
||||
println("Benchmarking $scale x $scale matrices over Float's on $device: ")
|
||||
setSeed(SEED)
|
||||
val lhs = randNormal(shape = intArrayOf(scale, scale), device = device)
|
||||
val rhs = randNormal(shape = intArrayOf(scale, scale), device = device)
|
||||
repeat(numWarmUp) { lhs dotAssign rhs }
|
||||
val measuredTime = measureTime { repeat(numIter) { lhs dotAssign rhs } }
|
||||
println(" ${measuredTime / numIter} p.o. with $numIter iterations")
|
||||
}
|
||||
}
|
||||
|
||||
internal class BenchmarkMatMultFloat {
|
||||
|
||||
@Test
|
||||
fun benchmarkMatMult20() =
|
||||
benchmarkingMatMultFloat(20, 10, 100000)
|
||||
|
||||
@Test
|
||||
fun benchmarkMatMult200() =
|
||||
benchmarkingMatMultFloat(200, 10, 10000)
|
||||
|
||||
@Test
|
||||
fun benchmarkMatMult2000() =
|
||||
benchmarkingMatMultFloat(2000, 3, 20)
|
||||
}
|
@ -0,0 +1,107 @@
|
||||
package kscience.kmath.torch
|
||||
|
||||
import kotlin.test.Test
|
||||
import kotlin.time.measureTime
|
||||
|
||||
internal fun benchmarkingRandNormal(
|
||||
samples: Int,
|
||||
numWarmUp: Int,
|
||||
numIter: Int,
|
||||
device: TorchDevice = TorchDevice.TorchCPU): Unit
|
||||
{
|
||||
TorchTensorFloatAlgebra{
|
||||
println("Benchmarking generation of $samples Normal samples on $device: ")
|
||||
setSeed(SEED)
|
||||
val shape = intArrayOf(samples)
|
||||
val tensor = randNormal(shape = shape, device = device)
|
||||
repeat(numWarmUp) { tensor.randNormalAssign() }
|
||||
val measuredTime = measureTime { repeat(numIter) { tensor.randNormalAssign() } }
|
||||
println(" ${measuredTime / numIter} p.o. with $numIter iterations")
|
||||
}
|
||||
}
|
||||
internal fun benchmarkingRandUniform(
|
||||
samples: Int,
|
||||
numWarmUp: Int,
|
||||
numIter: Int,
|
||||
device: TorchDevice = TorchDevice.TorchCPU): Unit
|
||||
{
|
||||
TorchTensorFloatAlgebra{
|
||||
println("Benchmarking generation of $samples Uniform samples on $device: ")
|
||||
setSeed(SEED)
|
||||
val shape = intArrayOf(samples)
|
||||
val tensor = randUniform(shape = shape, device = device)
|
||||
repeat(numWarmUp) { tensor.randUniformAssign() }
|
||||
val measuredTime = measureTime { repeat(numIter) { tensor.randUniformAssign() } }
|
||||
println(" ${measuredTime / numIter} p.o. with $numIter iterations")
|
||||
}
|
||||
}
|
||||
|
||||
internal fun benchmarkingRandIntegral(
|
||||
samples: Int,
|
||||
numWarmUp: Int,
|
||||
numIter: Int,
|
||||
device: TorchDevice = TorchDevice.TorchCPU): Unit
|
||||
{
|
||||
TorchTensorIntAlgebra {
|
||||
println("Benchmarking generation of $samples integer [0,100] samples on $device: ")
|
||||
setSeed(SEED)
|
||||
val shape = intArrayOf(samples)
|
||||
val tensor = randIntegral(0,100, shape = shape, device = device)
|
||||
repeat(numWarmUp) { tensor.randIntegralAssign(0,100) }
|
||||
val measuredTime = measureTime { repeat(numIter) { tensor.randIntegralAssign(0,100) } }
|
||||
println(" ${measuredTime / numIter} p.o. with $numIter iterations")
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
internal class BenchmarkRandomGenerators {
|
||||
|
||||
@Test
|
||||
fun benchmarkRandNormal1() =
|
||||
benchmarkingRandNormal(10, 10, 100000)
|
||||
|
||||
@Test
|
||||
fun benchmarkRandUniform1() =
|
||||
benchmarkingRandUniform(10, 10, 100000)
|
||||
|
||||
@Test
|
||||
fun benchmarkRandIntegral1() =
|
||||
benchmarkingRandIntegral(10, 10, 100000)
|
||||
|
||||
@Test
|
||||
fun benchmarkRandNormal3() =
|
||||
benchmarkingRandNormal(1000, 10, 10000)
|
||||
|
||||
@Test
|
||||
fun benchmarkRandUniform3() =
|
||||
benchmarkingRandUniform(1000, 10, 10000)
|
||||
|
||||
@Test
|
||||
fun benchmarkRandIntegral3() =
|
||||
benchmarkingRandIntegral(1000, 10, 10000)
|
||||
|
||||
@Test
|
||||
fun benchmarkRandNormal5() =
|
||||
benchmarkingRandNormal(100000, 5, 100)
|
||||
|
||||
@Test
|
||||
fun benchmarkRandUniform5() =
|
||||
benchmarkingRandUniform(100000, 5, 100)
|
||||
|
||||
@Test
|
||||
fun benchmarkRandIntegral5() =
|
||||
benchmarkingRandIntegral(100000, 5, 100)
|
||||
|
||||
@Test
|
||||
fun benchmarkRandNormal7() =
|
||||
benchmarkingRandNormal(10000000, 3, 20)
|
||||
|
||||
@Test
|
||||
fun benchmarkRandUniform7() =
|
||||
benchmarkingRandUniform(10000000, 3, 20)
|
||||
|
||||
@Test
|
||||
fun benchmarkRandIntegral7() =
|
||||
benchmarkingRandIntegral(10000000, 3, 20)
|
||||
|
||||
}
|
@ -32,7 +32,7 @@ class TestTorchTensor {
|
||||
assertEquals(tensor[intArrayOf(0)], 2.0)
|
||||
val tensorData = tensor.getData()
|
||||
tensorData[0] = 3.0
|
||||
println(assertEquals(tensor[intArrayOf(0)], 3.0))
|
||||
assertEquals(tensor[intArrayOf(0)], 3.0)
|
||||
}
|
||||
assertEquals(data[0], 3.0)
|
||||
}
|
||||
|
@ -19,7 +19,6 @@ internal fun testingSetSeed(device: TorchDevice = TorchDevice.TorchCPU): Unit {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
internal class TestUtils {
|
||||
@Test
|
||||
fun testSetNumThreads() {
|
||||
|
Loading…
Reference in New Issue
Block a user