Syncing with dev

This commit is contained in:
Roland Grinis 2021-03-01 17:04:13 +00:00
commit 9b3258b06b
20 changed files with 219 additions and 217 deletions

1
.gitignore vendored
View File

@ -13,4 +13,3 @@ out/
# Generated by javac -h and runtime
*.class
*.log

View File

@ -175,6 +175,7 @@ public interface SpaceOperations<T> : Algebra<T> {
* @param k the divisor.
* @return the quotient.
*/
@Deprecated("Dividing not allowed in a Ring")
public operator fun T.div(k: Number): T = multiply(this, 1.0 / k.toDouble())
/**

View File

@ -96,6 +96,7 @@ public interface Nd4jArraySpace<T, S : Space<T>> : NDSpace<T, S>, Nd4jArrayAlgeb
return a.ndArray.mul(k).wrap()
}
@Deprecated("Avoid using this method, underlying array get casted to Doubles")
public override operator fun NDStructure<T>.div(k: Number): Nd4jArrayStructure<T> {
return ndArray.div(k).wrap()
}

View File

@ -13,7 +13,7 @@ To install the library, you have to build & publish locally `kmath-core`, `kmath
This builds `ctorch` a C wrapper and `jtorch` a JNI wrapper for `LibTorch`, placed inside:
`~/.konan/third-party/kmath-torch-0.2.0-dev-4/cpp-build`
`~/.konan/third-party/kmath-torch-0.2.0/cpp-build`
You will have to link against it in your own project.

View File

@ -2,21 +2,21 @@ package space.kscience.kmath.torch
public sealed class Device {
public object CPU: space.kscience.kmath.torch.Device() {
public object CPU: Device() {
override fun toString(): String {
return "CPU"
}
}
public data class CUDA(val index: Int): space.kscience.kmath.torch.Device()
public data class CUDA(val index: Int): Device()
public fun toInt(): Int {
when(this) {
is space.kscience.kmath.torch.Device.CPU -> return 0
is space.kscience.kmath.torch.Device.CUDA -> return this.index + 1
is Device.CPU -> return 0
is Device.CUDA -> return this.index + 1
}
}
public companion object {
public fun fromInt(deviceInt: Int): space.kscience.kmath.torch.Device {
return if (deviceInt == 0) space.kscience.kmath.torch.Device.CPU else space.kscience.kmath.torch.Device.CUDA(
public fun fromInt(deviceInt: Int): Device {
return if (deviceInt == 0) Device.CPU else Device.CUDA(
deviceInt - 1
)
}

View File

@ -8,7 +8,7 @@ public interface TorchTensor<T> : TensorStructure<T> {
public fun item(): T
public val strides: IntArray
public val size: Int
public val device: space.kscience.kmath.torch.Device
public val device: Device
override fun value(): T {
checkIsValue()
return item()

View File

@ -11,7 +11,7 @@ internal inline fun <T, PrimitiveArrayType, TorchTensorType : TorchTensorOverFie
numWarmUp: Int,
numIter: Int,
fieldName: String,
device: space.kscience.kmath.torch.Device = space.kscience.kmath.torch.Device.CPU
device: Device = Device.CPU
): Unit {
println("Benchmarking $scale x $scale $fieldName matrices on $device: ")
setSeed(SEED)

View File

@ -10,9 +10,9 @@ internal inline fun <TorchTensorType : TorchTensorOverField<Float>,
samples: Int,
numWarmUp: Int,
numIter: Int,
device: space.kscience.kmath.torch.Device,
device: Device,
distName: String,
initBock: TorchTensorAlgebraType.(IntArray, space.kscience.kmath.torch.Device) -> TorchTensorType,
initBock: TorchTensorAlgebraType.(IntArray, Device) -> TorchTensorType,
runBlock: TorchTensorAlgebraType.(TorchTensorType) -> Unit
): Unit{
println("Benchmarking generation of $samples $distName samples on $device: ")
@ -30,7 +30,7 @@ internal inline fun <TorchTensorType : TorchTensorOverField<Float>,
samples: Int,
numWarmUp: Int,
numIter: Int,
device: space.kscience.kmath.torch.Device = space.kscience.kmath.torch.Device.CPU): Unit{
device: Device = Device.CPU): Unit{
benchmarkRand(
samples,
numWarmUp,
@ -48,7 +48,7 @@ internal inline fun <TorchTensorType : TorchTensorOverField<Float>,
samples: Int,
numWarmUp: Int,
numIter: Int,
device: space.kscience.kmath.torch.Device = space.kscience.kmath.torch.Device.CPU): Unit{
device: Device = Device.CPU): Unit{
benchmarkRand(
samples,
numWarmUp,
@ -66,7 +66,7 @@ internal inline fun <TorchTensorType : TorchTensorOverField<Float>,
samples: Int,
numWarmUp: Int,
numIter: Int,
device: space.kscience.kmath.torch.Device = space.kscience.kmath.torch.Device.CPU): Unit{
device: Device = Device.CPU): Unit{
benchmarkRand(
samples,
numWarmUp,
@ -85,9 +85,9 @@ internal inline fun <TorchTensorType : TorchTensorOverField<Float>,
benchmarkRandUniform(10, 10, 100000)
benchmarkRandIntegral(10, 10, 100000)
if(cudaAvailable()) {
benchmarkRandNormal(10, 10, 100000, device = space.kscience.kmath.torch.Device.CUDA(0))
benchmarkRandUniform(10, 10, 100000, device = space.kscience.kmath.torch.Device.CUDA(0))
benchmarkRandIntegral(10, 10, 100000, device = space.kscience.kmath.torch.Device.CUDA(0))
benchmarkRandNormal(10, 10, 100000, device = Device.CUDA(0))
benchmarkRandUniform(10, 10, 100000, device = Device.CUDA(0))
benchmarkRandIntegral(10, 10, 100000, device = Device.CUDA(0))
}
}
@ -99,9 +99,9 @@ internal inline fun <TorchTensorType : TorchTensorOverField<Float>,
benchmarkRandUniform(1000, 10, 10000)
benchmarkRandIntegral(1000, 10, 10000)
if(cudaAvailable()) {
benchmarkRandNormal(1000, 10, 100000, device = space.kscience.kmath.torch.Device.CUDA(0))
benchmarkRandUniform(1000, 10, 100000, device = space.kscience.kmath.torch.Device.CUDA(0))
benchmarkRandIntegral(1000, 10, 100000, device = space.kscience.kmath.torch.Device.CUDA(0))
benchmarkRandNormal(1000, 10, 100000, device = Device.CUDA(0))
benchmarkRandUniform(1000, 10, 100000, device = Device.CUDA(0))
benchmarkRandIntegral(1000, 10, 100000, device = Device.CUDA(0))
}
}
@ -112,9 +112,9 @@ internal inline fun <TorchTensorType : TorchTensorOverField<Float>,
benchmarkRandUniform(100000, 5, 100)
benchmarkRandIntegral(100000, 5, 100)
if(cudaAvailable()){
benchmarkRandNormal(100000, 10, 100000, device = space.kscience.kmath.torch.Device.CUDA(0))
benchmarkRandUniform(100000, 10, 100000, device = space.kscience.kmath.torch.Device.CUDA(0))
benchmarkRandIntegral(100000, 10, 100000, device = space.kscience.kmath.torch.Device.CUDA(0))
benchmarkRandNormal(100000, 10, 100000, device = Device.CUDA(0))
benchmarkRandUniform(100000, 10, 100000, device = Device.CUDA(0))
benchmarkRandIntegral(100000, 10, 100000, device = Device.CUDA(0))
}
}
@ -125,8 +125,8 @@ internal inline fun <TorchTensorType : TorchTensorOverField<Float>,
benchmarkRandUniform(10000000, 3, 20)
benchmarkRandIntegral(10000000, 3, 20)
if(cudaAvailable()){
benchmarkRandNormal(10000000, 10, 10000, device = space.kscience.kmath.torch.Device.CUDA(0))
benchmarkRandUniform(10000000, 10, 10000, device = space.kscience.kmath.torch.Device.CUDA(0))
benchmarkRandIntegral(10000000, 10, 10000, device = space.kscience.kmath.torch.Device.CUDA(0))
benchmarkRandNormal(10000000, 10, 10000, device = Device.CUDA(0))
benchmarkRandUniform(10000000, 10, 10000, device = Device.CUDA(0))
benchmarkRandIntegral(10000000, 10, 10000, device = Device.CUDA(0))
}
}

View File

@ -6,7 +6,7 @@ import kotlin.test.assertTrue
internal inline fun <TorchTensorType : TorchTensorOverField<Float>,
TorchTensorAlgebraType : TorchTensorPartialDivisionAlgebra<Float, FloatArray, TorchTensorType>>
TorchTensorAlgebraType.testingAutoGrad(device: space.kscience.kmath.torch.Device = space.kscience.kmath.torch.Device.CPU): Unit {
TorchTensorAlgebraType.testingAutoGrad(device: Device = Device.CPU): Unit {
setSeed(SEED)
val dim = 3
val tensorX = randNormal(shape = intArrayOf(dim), device = device)
@ -29,7 +29,7 @@ internal inline fun <TorchTensorType : TorchTensorOverField<Float>,
internal inline fun <TorchTensorType : TorchTensorOverField<Float>,
TorchTensorAlgebraType : TorchTensorPartialDivisionAlgebra<Float, FloatArray, TorchTensorType>>
TorchTensorAlgebraType.testingBatchedAutoGrad(device: space.kscience.kmath.torch.Device = space.kscience.kmath.torch.Device.CPU): Unit {
TorchTensorAlgebraType.testingBatchedAutoGrad(device: Device = Device.CPU): Unit {
setSeed(SEED)
val batch = intArrayOf(2)
val dim = 2

View File

@ -7,7 +7,7 @@ import kotlin.test.assertTrue
internal inline fun <TorchTensorType : TorchTensorOverField<Float>,
TorchTensorAlgebraType : TorchTensorPartialDivisionAlgebra<Float, FloatArray, TorchTensorType>>
TorchTensorAlgebraType.testingCopying(device: space.kscience.kmath.torch.Device = space.kscience.kmath.torch.Device.CPU): Unit {
TorchTensorAlgebraType.testingCopying(device: Device = Device.CPU): Unit {
val array = (1..24).map { 10f * it * it }.toFloatArray()
val shape = intArrayOf(2, 3, 4)
val tensor = copyFromArray(array, shape = shape, device = device)
@ -15,13 +15,13 @@ internal inline fun <TorchTensorType : TorchTensorOverField<Float>,
tensor[intArrayOf(1, 2, 3)] = 0.1f
assertTrue(copyOfTensor.copyToArray() contentEquals array)
assertEquals(0.1f, tensor[intArrayOf(1, 2, 3)])
if(device != space.kscience.kmath.torch.Device.CPU){
if(device != Device.CPU){
val normalCpu = randNormal(intArrayOf(2, 3))
val normalGpu = normalCpu.copyToDevice(device)
assertTrue(normalCpu.copyToArray() contentEquals normalGpu.copyToArray())
val uniformGpu = randUniform(intArrayOf(3,2),device)
val uniformCpu = uniformGpu.copyToDevice(space.kscience.kmath.torch.Device.CPU)
val uniformCpu = uniformGpu.copyToDevice(Device.CPU)
assertTrue(uniformGpu.copyToArray() contentEquals uniformCpu.copyToArray())
}
}
@ -42,7 +42,7 @@ internal inline fun <T, PrimitiveArrayType, TorchTensorType : TorchTensorOverFie
internal inline fun <TorchTensorType : TorchTensor<Int>,
TorchTensorAlgebraType : TorchTensorAlgebra<Int, IntArray, TorchTensorType>>
TorchTensorAlgebraType.testingViewWithNoCopy(device: space.kscience.kmath.torch.Device = space.kscience.kmath.torch.Device.CPU) {
TorchTensorAlgebraType.testingViewWithNoCopy(device: Device = Device.CPU) {
val tensor = copyFromArray(intArrayOf(1, 2, 3, 4, 5, 6), shape = intArrayOf(6), device)
val viewTensor = tensor.view(intArrayOf(2, 3))
assertTrue(viewTensor.shape contentEquals intArrayOf(2, 3))

View File

@ -11,7 +11,7 @@ import kotlin.test.*
internal inline fun <TorchTensorType : TorchTensorOverField<Double>,
TorchTensorAlgebraType : TorchTensorPartialDivisionAlgebra<Double, DoubleArray, TorchTensorType>>
TorchTensorAlgebraType.testingScalarProduct(device: space.kscience.kmath.torch.Device = space.kscience.kmath.torch.Device.CPU): Unit {
TorchTensorAlgebraType.testingScalarProduct(device: Device = Device.CPU): Unit {
val lhs = randUniform(shape = intArrayOf(3), device = device)
val rhs = randUniform(shape = intArrayOf(3), device = device)
val product = lhs dot rhs
@ -24,7 +24,7 @@ internal inline fun <TorchTensorType : TorchTensorOverField<Double>,
internal inline fun <TorchTensorType : TorchTensorOverField<Double>,
TorchTensorAlgebraType : TorchTensorPartialDivisionAlgebra<Double, DoubleArray, TorchTensorType>>
TorchTensorAlgebraType.testingMatrixMultiplication(device: space.kscience.kmath.torch.Device = space.kscience.kmath.torch.Device.CPU): Unit {
TorchTensorAlgebraType.testingMatrixMultiplication(device: Device = Device.CPU): Unit {
setSeed(SEED)
val lhsTensor = randNormal(shape = intArrayOf(3, 3), device = device)
@ -54,7 +54,7 @@ internal inline fun <TorchTensorType : TorchTensorOverField<Double>,
internal inline fun <TorchTensorType : TorchTensorOverField<Double>,
TorchTensorAlgebraType : TorchTensorPartialDivisionAlgebra<Double, DoubleArray, TorchTensorType>>
TorchTensorAlgebraType.testingLinearStructure(device: space.kscience.kmath.torch.Device = space.kscience.kmath.torch.Device.CPU): Unit {
TorchTensorAlgebraType.testingLinearStructure(device: Device = Device.CPU): Unit {
val shape = intArrayOf(3)
val tensorA = full(value = -4.5, shape = shape, device = device)
@ -91,7 +91,7 @@ internal inline fun <TorchTensorType : TorchTensorOverField<Double>,
internal inline fun <TorchTensorType : TorchTensorOverField<Double>,
TorchTensorAlgebraType : TorchTensorPartialDivisionAlgebra<Double, DoubleArray, TorchTensorType>>
TorchTensorAlgebraType.testingTensorTransformations(device: space.kscience.kmath.torch.Device = space.kscience.kmath.torch.Device.CPU): Unit {
TorchTensorAlgebraType.testingTensorTransformations(device: Device = Device.CPU): Unit {
setSeed(SEED)
val tensor = randNormal(shape = intArrayOf(3, 3), device = device)
val result = tensor.exp().log()
@ -110,7 +110,7 @@ internal inline fun <TorchTensorType : TorchTensorOverField<Double>,
internal inline fun <TorchTensorType : TorchTensorOverField<Double>,
TorchTensorAlgebraType : TorchTensorPartialDivisionAlgebra<Double, DoubleArray, TorchTensorType>>
TorchTensorAlgebraType.testingBatchedSVD(device: space.kscience.kmath.torch.Device = space.kscience.kmath.torch.Device.CPU): Unit {
TorchTensorAlgebraType.testingBatchedSVD(device: Device = Device.CPU): Unit {
val tensor = randNormal(shape = intArrayOf(7, 5, 3), device = device)
val (tensorU, tensorS, tensorV) = tensor.svd()
val error = tensor - (tensorU dot (diagonalEmbedding(tensorS) dot tensorV.transpose(-2, -1)))
@ -119,7 +119,7 @@ internal inline fun <TorchTensorType : TorchTensorOverField<Double>,
internal inline fun <TorchTensorType : TorchTensorOverField<Double>,
TorchTensorAlgebraType : TorchTensorPartialDivisionAlgebra<Double, DoubleArray, TorchTensorType>>
TorchTensorAlgebraType.testingBatchedSymEig(device: space.kscience.kmath.torch.Device = space.kscience.kmath.torch.Device.CPU): Unit {
TorchTensorAlgebraType.testingBatchedSymEig(device: Device = Device.CPU): Unit {
val tensor = randNormal(shape = intArrayOf(5, 5), device = device)
val tensorSigma = tensor + tensor.transpose(-2, -1)
val (tensorS, tensorV) = tensorSigma.symEig()

View File

@ -9,9 +9,9 @@ internal val TOLERANCE = 1e-6
internal inline fun <T, PrimitiveArrayType, TorchTensorType : TorchTensor<T>,
TorchTensorAlgebraType : TorchTensorAlgebra<T, PrimitiveArrayType, TorchTensorType>>
TorchTensorAlgebraType.withCuda(block: TorchTensorAlgebraType.(space.kscience.kmath.torch.Device) -> Unit): Unit {
this.block(space.kscience.kmath.torch.Device.CPU)
if (cudaAvailable()) this.block(space.kscience.kmath.torch.Device.CUDA(0))
TorchTensorAlgebraType.withCuda(block: TorchTensorAlgebraType.(Device) -> Unit): Unit {
this.block(Device.CPU)
if (cudaAvailable()) this.block(Device.CUDA(0))
}
internal inline fun <T, PrimitiveArrayType, TorchTensorType : TorchTensor<T>,
@ -24,7 +24,7 @@ internal inline fun <T, PrimitiveArrayType, TorchTensorType : TorchTensor<T>,
internal inline fun <TorchTensorType : TorchTensorOverField<Float>,
TorchTensorAlgebraType : TorchTensorPartialDivisionAlgebra<Float, FloatArray, TorchTensorType>>
TorchTensorAlgebraType.testingSetSeed(device: space.kscience.kmath.torch.Device = space.kscience.kmath.torch.Device.CPU): Unit {
TorchTensorAlgebraType.testingSetSeed(device: Device = Device.CPU): Unit {
setSeed(SEED)
val integral = randIntegral(0, 100, IntArray(0), device = device).value()
val normal = randNormal(IntArray(0), device = device).value()

View File

@ -151,6 +151,7 @@ extern "C"
TorchTensorHandle detach_from_graph(TorchTensorHandle tensor_handle);
TorchTensorHandle autograd_tensor(TorchTensorHandle value, TorchTensorHandle variable, bool retain_graph);
TorchTensorHandle autohess_tensor(TorchTensorHandle value, TorchTensorHandle variable);
TorchTensorHandle autohess_tensor_given_grad(TorchTensorHandle value, TorchTensorHandle variable, TorchTensorHandle gradient);
#ifdef __cplusplus
}

View File

@ -560,7 +560,7 @@ JNIEXPORT jlong JNICALL Java_space_kscience_kmath_torch_JTorch_detachFromGraph(J
return (long)new torch::Tensor(ctorch::cast(tensor_handle).detach());
}
JNIEXPORT jlong JNICALL
JNIEXPORT jlong JNICALL
Java_space_kscience_kmath_torch_JTorch_autogradTensor(JNIEnv *, jclass, jlong value, jlong variable, jboolean retain_graph)
{
return (long)new torch::Tensor(torch::autograd::grad({ctorch::cast(value)}, {ctorch::cast(variable)}, {}, retain_graph)[0]);

View File

@ -10,19 +10,19 @@ public sealed class TorchTensorAlgebraJVM<
internal val scope: DeferScope
) : TorchTensorAlgebra<T, PrimitiveArrayType, TorchTensorType> {
override fun getNumThreads(): Int {
return space.kscience.kmath.torch.JTorch.getNumThreads()
return JTorch.getNumThreads()
}
override fun setNumThreads(numThreads: Int): Unit {
space.kscience.kmath.torch.JTorch.setNumThreads(numThreads)
JTorch.setNumThreads(numThreads)
}
override fun cudaAvailable(): Boolean {
return space.kscience.kmath.torch.JTorch.cudaIsAvailable()
return JTorch.cudaIsAvailable()
}
override fun setSeed(seed: Int): Unit {
space.kscience.kmath.torch.JTorch.setSeed(seed)
JTorch.setSeed(seed)
}
override var checks: Boolean = false
@ -31,92 +31,92 @@ public sealed class TorchTensorAlgebraJVM<
override operator fun TorchTensorType.times(other: TorchTensorType): TorchTensorType {
if (checks) checkLinearOperation(this, other)
return wrap(space.kscience.kmath.torch.JTorch.timesTensor(this.tensorHandle, other.tensorHandle))
return wrap(JTorch.timesTensor(this.tensorHandle, other.tensorHandle))
}
override operator fun TorchTensorType.timesAssign(other: TorchTensorType): Unit {
if (checks) checkLinearOperation(this, other)
space.kscience.kmath.torch.JTorch.timesTensorAssign(this.tensorHandle, other.tensorHandle)
JTorch.timesTensorAssign(this.tensorHandle, other.tensorHandle)
}
override operator fun TorchTensorType.plus(other: TorchTensorType): TorchTensorType {
if (checks) checkLinearOperation(this, other)
return wrap(space.kscience.kmath.torch.JTorch.plusTensor(this.tensorHandle, other.tensorHandle))
return wrap(JTorch.plusTensor(this.tensorHandle, other.tensorHandle))
}
override operator fun TorchTensorType.plusAssign(other: TorchTensorType): Unit {
if (checks) checkLinearOperation(this, other)
space.kscience.kmath.torch.JTorch.plusTensorAssign(this.tensorHandle, other.tensorHandle)
JTorch.plusTensorAssign(this.tensorHandle, other.tensorHandle)
}
override operator fun TorchTensorType.minus(other: TorchTensorType): TorchTensorType {
if (checks) checkLinearOperation(this, other)
return wrap(space.kscience.kmath.torch.JTorch.minusTensor(this.tensorHandle, other.tensorHandle))
return wrap(JTorch.minusTensor(this.tensorHandle, other.tensorHandle))
}
override operator fun TorchTensorType.minusAssign(other: TorchTensorType): Unit {
if (checks) checkLinearOperation(this, other)
space.kscience.kmath.torch.JTorch.minusTensorAssign(this.tensorHandle, other.tensorHandle)
JTorch.minusTensorAssign(this.tensorHandle, other.tensorHandle)
}
override operator fun TorchTensorType.unaryMinus(): TorchTensorType =
wrap(space.kscience.kmath.torch.JTorch.unaryMinus(this.tensorHandle))
wrap(JTorch.unaryMinus(this.tensorHandle))
override infix fun TorchTensorType.dot(other: TorchTensorType): TorchTensorType {
if (checks) checkDotOperation(this, other)
return wrap(space.kscience.kmath.torch.JTorch.matmul(this.tensorHandle, other.tensorHandle))
return wrap(JTorch.matmul(this.tensorHandle, other.tensorHandle))
}
override infix fun TorchTensorType.dotAssign(other: TorchTensorType): Unit {
if (checks) checkDotOperation(this, other)
space.kscience.kmath.torch.JTorch.matmulAssign(this.tensorHandle, other.tensorHandle)
JTorch.matmulAssign(this.tensorHandle, other.tensorHandle)
}
override infix fun TorchTensorType.dotRightAssign(other: TorchTensorType): Unit {
if (checks) checkDotOperation(this, other)
space.kscience.kmath.torch.JTorch.matmulRightAssign(this.tensorHandle, other.tensorHandle)
JTorch.matmulRightAssign(this.tensorHandle, other.tensorHandle)
}
override fun diagonalEmbedding(
diagonalEntries: TorchTensorType, offset: Int, dim1: Int, dim2: Int
): TorchTensorType =
wrap(space.kscience.kmath.torch.JTorch.diagEmbed(diagonalEntries.tensorHandle, offset, dim1, dim2))
wrap(JTorch.diagEmbed(diagonalEntries.tensorHandle, offset, dim1, dim2))
override fun TorchTensorType.transpose(i: Int, j: Int): TorchTensorType {
if (checks) checkTranspose(this.dimension, i, j)
return wrap(space.kscience.kmath.torch.JTorch.transposeTensor(tensorHandle, i, j))
return wrap(JTorch.transposeTensor(tensorHandle, i, j))
}
override fun TorchTensorType.transposeAssign(i: Int, j: Int): Unit {
if (checks) checkTranspose(this.dimension, i, j)
space.kscience.kmath.torch.JTorch.transposeTensorAssign(tensorHandle, i, j)
JTorch.transposeTensorAssign(tensorHandle, i, j)
}
override fun TorchTensorType.view(shape: IntArray): TorchTensorType {
if (checks) checkView(this, shape)
return wrap(space.kscience.kmath.torch.JTorch.viewTensor(this.tensorHandle, shape))
return wrap(JTorch.viewTensor(this.tensorHandle, shape))
}
override fun TorchTensorType.abs(): TorchTensorType = wrap(space.kscience.kmath.torch.JTorch.absTensor(tensorHandle))
override fun TorchTensorType.absAssign(): Unit = space.kscience.kmath.torch.JTorch.absTensorAssign(tensorHandle)
override fun TorchTensorType.abs(): TorchTensorType = wrap(JTorch.absTensor(tensorHandle))
override fun TorchTensorType.absAssign(): Unit = JTorch.absTensorAssign(tensorHandle)
override fun TorchTensorType.sum(): TorchTensorType = wrap(space.kscience.kmath.torch.JTorch.sumTensor(tensorHandle))
override fun TorchTensorType.sumAssign(): Unit = space.kscience.kmath.torch.JTorch.sumTensorAssign(tensorHandle)
override fun TorchTensorType.sum(): TorchTensorType = wrap(JTorch.sumTensor(tensorHandle))
override fun TorchTensorType.sumAssign(): Unit = JTorch.sumTensorAssign(tensorHandle)
override fun TorchTensorType.randIntegral(low: Long, high: Long): TorchTensorType =
wrap(space.kscience.kmath.torch.JTorch.randintLike(this.tensorHandle, low, high))
wrap(JTorch.randintLike(this.tensorHandle, low, high))
override fun TorchTensorType.randIntegralAssign(low: Long, high: Long): Unit =
space.kscience.kmath.torch.JTorch.randintLikeAssign(this.tensorHandle, low, high)
JTorch.randintLikeAssign(this.tensorHandle, low, high)
override fun TorchTensorType.copy(): TorchTensorType =
wrap(space.kscience.kmath.torch.JTorch.copyTensor(this.tensorHandle))
wrap(JTorch.copyTensor(this.tensorHandle))
override fun TorchTensorType.copyToDevice(device: space.kscience.kmath.torch.Device): TorchTensorType =
wrap(space.kscience.kmath.torch.JTorch.copyToDevice(this.tensorHandle, device.toInt()))
override fun TorchTensorType.copyToDevice(device: Device): TorchTensorType =
wrap(JTorch.copyToDevice(this.tensorHandle, device.toInt()))
override infix fun TorchTensorType.swap(other: TorchTensorType): Unit =
space.kscience.kmath.torch.JTorch.swapTensors(this.tensorHandle, other.tensorHandle)
JTorch.swapTensors(this.tensorHandle, other.tensorHandle)
}
public sealed class TorchTensorPartialDivisionAlgebraJVM<T, PrimitiveArrayType,
@ -126,58 +126,58 @@ public sealed class TorchTensorPartialDivisionAlgebraJVM<T, PrimitiveArrayType,
override operator fun TorchTensorType.div(other: TorchTensorType): TorchTensorType {
if (checks) checkLinearOperation(this, other)
return wrap(space.kscience.kmath.torch.JTorch.divTensor(this.tensorHandle, other.tensorHandle))
return wrap(JTorch.divTensor(this.tensorHandle, other.tensorHandle))
}
override operator fun TorchTensorType.divAssign(other: TorchTensorType): Unit {
if (checks) checkLinearOperation(this, other)
space.kscience.kmath.torch.JTorch.divTensorAssign(this.tensorHandle, other.tensorHandle)
JTorch.divTensorAssign(this.tensorHandle, other.tensorHandle)
}
override fun TorchTensorType.randUniform(): TorchTensorType =
wrap(space.kscience.kmath.torch.JTorch.randLike(this.tensorHandle))
wrap(JTorch.randLike(this.tensorHandle))
override fun TorchTensorType.randUniformAssign(): Unit =
space.kscience.kmath.torch.JTorch.randLikeAssign(this.tensorHandle)
JTorch.randLikeAssign(this.tensorHandle)
override fun TorchTensorType.randNormal(): TorchTensorType =
wrap(space.kscience.kmath.torch.JTorch.randnLike(this.tensorHandle))
wrap(JTorch.randnLike(this.tensorHandle))
override fun TorchTensorType.randNormalAssign(): Unit =
space.kscience.kmath.torch.JTorch.randnLikeAssign(this.tensorHandle)
JTorch.randnLikeAssign(this.tensorHandle)
override fun TorchTensorType.exp(): TorchTensorType = wrap(space.kscience.kmath.torch.JTorch.expTensor(tensorHandle))
override fun TorchTensorType.expAssign(): Unit = space.kscience.kmath.torch.JTorch.expTensorAssign(tensorHandle)
override fun TorchTensorType.log(): TorchTensorType = wrap(space.kscience.kmath.torch.JTorch.logTensor(tensorHandle))
override fun TorchTensorType.logAssign(): Unit = space.kscience.kmath.torch.JTorch.logTensorAssign(tensorHandle)
override fun TorchTensorType.exp(): TorchTensorType = wrap(JTorch.expTensor(tensorHandle))
override fun TorchTensorType.expAssign(): Unit = JTorch.expTensorAssign(tensorHandle)
override fun TorchTensorType.log(): TorchTensorType = wrap(JTorch.logTensor(tensorHandle))
override fun TorchTensorType.logAssign(): Unit = JTorch.logTensorAssign(tensorHandle)
override fun TorchTensorType.svd(): Triple<TorchTensorType, TorchTensorType, TorchTensorType> {
val U = space.kscience.kmath.torch.JTorch.emptyTensor()
val V = space.kscience.kmath.torch.JTorch.emptyTensor()
val S = space.kscience.kmath.torch.JTorch.emptyTensor()
space.kscience.kmath.torch.JTorch.svdTensor(this.tensorHandle, U, S, V)
val U = JTorch.emptyTensor()
val V = JTorch.emptyTensor()
val S = JTorch.emptyTensor()
JTorch.svdTensor(this.tensorHandle, U, S, V)
return Triple(wrap(U), wrap(S), wrap(V))
}
override fun TorchTensorType.symEig(eigenvectors: Boolean): Pair<TorchTensorType, TorchTensorType> {
val V = space.kscience.kmath.torch.JTorch.emptyTensor()
val S = space.kscience.kmath.torch.JTorch.emptyTensor()
space.kscience.kmath.torch.JTorch.symeigTensor(this.tensorHandle, S, V, eigenvectors)
val V = JTorch.emptyTensor()
val S = JTorch.emptyTensor()
JTorch.symeigTensor(this.tensorHandle, S, V, eigenvectors)
return Pair(wrap(S), wrap(V))
}
override fun TorchTensorType.grad(variable: TorchTensorType, retainGraph: Boolean): TorchTensorType {
if (checks) this.checkIsValue()
return wrap(space.kscience.kmath.torch.JTorch.autogradTensor(this.tensorHandle, variable.tensorHandle, retainGraph))
return wrap(JTorch.autogradTensor(this.tensorHandle, variable.tensorHandle, retainGraph))
}
override infix fun TorchTensorType.hess(variable: TorchTensorType): TorchTensorType {
if (checks) this.checkIsValue()
return wrap(space.kscience.kmath.torch.JTorch.autohessTensor(this.tensorHandle, variable.tensorHandle))
return wrap(JTorch.autohessTensor(this.tensorHandle, variable.tensorHandle))
}
override fun TorchTensorType.detachFromGraph(): TorchTensorType =
wrap(space.kscience.kmath.torch.JTorch.detachFromGraph(this.tensorHandle))
wrap(JTorch.detachFromGraph(this.tensorHandle))
}
@ -189,47 +189,47 @@ public class TorchTensorRealAlgebra(scope: DeferScope) :
override fun TorchTensorReal.copyToArray(): DoubleArray =
this.elements().map { it.second }.toList().toDoubleArray()
override fun copyFromArray(array: DoubleArray, shape: IntArray, device: space.kscience.kmath.torch.Device): TorchTensorReal =
wrap(space.kscience.kmath.torch.JTorch.fromBlobDouble(array, shape, device.toInt()))
override fun copyFromArray(array: DoubleArray, shape: IntArray, device: Device): TorchTensorReal =
wrap(JTorch.fromBlobDouble(array, shape, device.toInt()))
override fun randNormal(shape: IntArray, device: space.kscience.kmath.torch.Device): TorchTensorReal =
wrap(space.kscience.kmath.torch.JTorch.randnDouble(shape, device.toInt()))
override fun randNormal(shape: IntArray, device: Device): TorchTensorReal =
wrap(JTorch.randnDouble(shape, device.toInt()))
override fun randUniform(shape: IntArray, device: space.kscience.kmath.torch.Device): TorchTensorReal =
wrap(space.kscience.kmath.torch.JTorch.randDouble(shape, device.toInt()))
override fun randUniform(shape: IntArray, device: Device): TorchTensorReal =
wrap(JTorch.randDouble(shape, device.toInt()))
override fun randIntegral(low: Long, high: Long, shape: IntArray, device: space.kscience.kmath.torch.Device): TorchTensorReal =
wrap(space.kscience.kmath.torch.JTorch.randintDouble(low, high, shape, device.toInt()))
override fun randIntegral(low: Long, high: Long, shape: IntArray, device: Device): TorchTensorReal =
wrap(JTorch.randintDouble(low, high, shape, device.toInt()))
override operator fun Double.plus(other: TorchTensorReal): TorchTensorReal =
wrap(space.kscience.kmath.torch.JTorch.plusDouble(this, other.tensorHandle))
wrap(JTorch.plusDouble(this, other.tensorHandle))
override fun TorchTensorReal.plus(value: Double): TorchTensorReal =
wrap(space.kscience.kmath.torch.JTorch.plusDouble(value, this.tensorHandle))
wrap(JTorch.plusDouble(value, this.tensorHandle))
override fun TorchTensorReal.plusAssign(value: Double): Unit =
space.kscience.kmath.torch.JTorch.plusDoubleAssign(value, this.tensorHandle)
JTorch.plusDoubleAssign(value, this.tensorHandle)
override operator fun Double.minus(other: TorchTensorReal): TorchTensorReal =
wrap(space.kscience.kmath.torch.JTorch.plusDouble(-this, other.tensorHandle))
wrap(JTorch.plusDouble(-this, other.tensorHandle))
override fun TorchTensorReal.minus(value: Double): TorchTensorReal =
wrap(space.kscience.kmath.torch.JTorch.plusDouble(-value, this.tensorHandle))
wrap(JTorch.plusDouble(-value, this.tensorHandle))
override fun TorchTensorReal.minusAssign(value: Double): Unit =
space.kscience.kmath.torch.JTorch.plusDoubleAssign(-value, this.tensorHandle)
JTorch.plusDoubleAssign(-value, this.tensorHandle)
override operator fun Double.times(other: TorchTensorReal): TorchTensorReal =
wrap(space.kscience.kmath.torch.JTorch.timesDouble(this, other.tensorHandle))
wrap(JTorch.timesDouble(this, other.tensorHandle))
override fun TorchTensorReal.times(value: Double): TorchTensorReal =
wrap(space.kscience.kmath.torch.JTorch.timesDouble(value, this.tensorHandle))
wrap(JTorch.timesDouble(value, this.tensorHandle))
override fun TorchTensorReal.timesAssign(value: Double): Unit =
space.kscience.kmath.torch.JTorch.timesDoubleAssign(value, this.tensorHandle)
JTorch.timesDoubleAssign(value, this.tensorHandle)
override fun full(value: Double, shape: IntArray, device: space.kscience.kmath.torch.Device): TorchTensorReal =
wrap(space.kscience.kmath.torch.JTorch.fullDouble(value, shape, device.toInt()))
override fun full(value: Double, shape: IntArray, device: Device): TorchTensorReal =
wrap(JTorch.fullDouble(value, shape, device.toInt()))
}
public class TorchTensorFloatAlgebra(scope: DeferScope) :
@ -240,47 +240,47 @@ public class TorchTensorFloatAlgebra(scope: DeferScope) :
override fun TorchTensorFloat.copyToArray(): FloatArray =
this.elements().map { it.second }.toList().toFloatArray()
override fun copyFromArray(array: FloatArray, shape: IntArray, device: space.kscience.kmath.torch.Device): TorchTensorFloat =
wrap(space.kscience.kmath.torch.JTorch.fromBlobFloat(array, shape, device.toInt()))
override fun copyFromArray(array: FloatArray, shape: IntArray, device: Device): TorchTensorFloat =
wrap(JTorch.fromBlobFloat(array, shape, device.toInt()))
override fun randNormal(shape: IntArray, device: space.kscience.kmath.torch.Device): TorchTensorFloat =
wrap(space.kscience.kmath.torch.JTorch.randnFloat(shape, device.toInt()))
override fun randNormal(shape: IntArray, device: Device): TorchTensorFloat =
wrap(JTorch.randnFloat(shape, device.toInt()))
override fun randUniform(shape: IntArray, device: space.kscience.kmath.torch.Device): TorchTensorFloat =
wrap(space.kscience.kmath.torch.JTorch.randFloat(shape, device.toInt()))
override fun randUniform(shape: IntArray, device: Device): TorchTensorFloat =
wrap(JTorch.randFloat(shape, device.toInt()))
override fun randIntegral(low: Long, high: Long, shape: IntArray, device: space.kscience.kmath.torch.Device): TorchTensorFloat =
wrap(space.kscience.kmath.torch.JTorch.randintFloat(low, high, shape, device.toInt()))
override fun randIntegral(low: Long, high: Long, shape: IntArray, device: Device): TorchTensorFloat =
wrap(JTorch.randintFloat(low, high, shape, device.toInt()))
override operator fun Float.plus(other: TorchTensorFloat): TorchTensorFloat =
wrap(space.kscience.kmath.torch.JTorch.plusFloat(this, other.tensorHandle))
wrap(JTorch.plusFloat(this, other.tensorHandle))
override fun TorchTensorFloat.plus(value: Float): TorchTensorFloat =
wrap(space.kscience.kmath.torch.JTorch.plusFloat(value, this.tensorHandle))
wrap(JTorch.plusFloat(value, this.tensorHandle))
override fun TorchTensorFloat.plusAssign(value: Float): Unit =
space.kscience.kmath.torch.JTorch.plusFloatAssign(value, this.tensorHandle)
JTorch.plusFloatAssign(value, this.tensorHandle)
override operator fun Float.minus(other: TorchTensorFloat): TorchTensorFloat =
wrap(space.kscience.kmath.torch.JTorch.plusFloat(-this, other.tensorHandle))
wrap(JTorch.plusFloat(-this, other.tensorHandle))
override fun TorchTensorFloat.minus(value: Float): TorchTensorFloat =
wrap(space.kscience.kmath.torch.JTorch.plusFloat(-value, this.tensorHandle))
wrap(JTorch.plusFloat(-value, this.tensorHandle))
override fun TorchTensorFloat.minusAssign(value: Float): Unit =
space.kscience.kmath.torch.JTorch.plusFloatAssign(-value, this.tensorHandle)
JTorch.plusFloatAssign(-value, this.tensorHandle)
override operator fun Float.times(other: TorchTensorFloat): TorchTensorFloat =
wrap(space.kscience.kmath.torch.JTorch.timesFloat(this, other.tensorHandle))
wrap(JTorch.timesFloat(this, other.tensorHandle))
override fun TorchTensorFloat.times(value: Float): TorchTensorFloat =
wrap(space.kscience.kmath.torch.JTorch.timesFloat(value, this.tensorHandle))
wrap(JTorch.timesFloat(value, this.tensorHandle))
override fun TorchTensorFloat.timesAssign(value: Float): Unit =
space.kscience.kmath.torch.JTorch.timesFloatAssign(value, this.tensorHandle)
JTorch.timesFloatAssign(value, this.tensorHandle)
override fun full(value: Float, shape: IntArray, device: space.kscience.kmath.torch.Device): TorchTensorFloat =
wrap(space.kscience.kmath.torch.JTorch.fullFloat(value, shape, device.toInt()))
override fun full(value: Float, shape: IntArray, device: Device): TorchTensorFloat =
wrap(JTorch.fullFloat(value, shape, device.toInt()))
}
public class TorchTensorLongAlgebra(scope: DeferScope) :
@ -291,41 +291,41 @@ public class TorchTensorLongAlgebra(scope: DeferScope) :
override fun TorchTensorLong.copyToArray(): LongArray =
this.elements().map { it.second }.toList().toLongArray()
override fun copyFromArray(array: LongArray, shape: IntArray, device: space.kscience.kmath.torch.Device): TorchTensorLong =
wrap(space.kscience.kmath.torch.JTorch.fromBlobLong(array, shape, device.toInt()))
override fun copyFromArray(array: LongArray, shape: IntArray, device: Device): TorchTensorLong =
wrap(JTorch.fromBlobLong(array, shape, device.toInt()))
override fun randIntegral(low: Long, high: Long, shape: IntArray, device: space.kscience.kmath.torch.Device): TorchTensorLong =
wrap(space.kscience.kmath.torch.JTorch.randintLong(low, high, shape, device.toInt()))
override fun randIntegral(low: Long, high: Long, shape: IntArray, device: Device): TorchTensorLong =
wrap(JTorch.randintLong(low, high, shape, device.toInt()))
override operator fun Long.plus(other: TorchTensorLong): TorchTensorLong =
wrap(space.kscience.kmath.torch.JTorch.plusLong(this, other.tensorHandle))
wrap(JTorch.plusLong(this, other.tensorHandle))
override fun TorchTensorLong.plus(value: Long): TorchTensorLong =
wrap(space.kscience.kmath.torch.JTorch.plusLong(value, this.tensorHandle))
wrap(JTorch.plusLong(value, this.tensorHandle))
override fun TorchTensorLong.plusAssign(value: Long): Unit =
space.kscience.kmath.torch.JTorch.plusLongAssign(value, this.tensorHandle)
JTorch.plusLongAssign(value, this.tensorHandle)
override operator fun Long.minus(other: TorchTensorLong): TorchTensorLong =
wrap(space.kscience.kmath.torch.JTorch.plusLong(-this, other.tensorHandle))
wrap(JTorch.plusLong(-this, other.tensorHandle))
override fun TorchTensorLong.minus(value: Long): TorchTensorLong =
wrap(space.kscience.kmath.torch.JTorch.plusLong(-value, this.tensorHandle))
wrap(JTorch.plusLong(-value, this.tensorHandle))
override fun TorchTensorLong.minusAssign(value: Long): Unit =
space.kscience.kmath.torch.JTorch.plusLongAssign(-value, this.tensorHandle)
JTorch.plusLongAssign(-value, this.tensorHandle)
override operator fun Long.times(other: TorchTensorLong): TorchTensorLong =
wrap(space.kscience.kmath.torch.JTorch.timesLong(this, other.tensorHandle))
wrap(JTorch.timesLong(this, other.tensorHandle))
override fun TorchTensorLong.times(value: Long): TorchTensorLong =
wrap(space.kscience.kmath.torch.JTorch.timesLong(value, this.tensorHandle))
wrap(JTorch.timesLong(value, this.tensorHandle))
override fun TorchTensorLong.timesAssign(value: Long): Unit =
space.kscience.kmath.torch.JTorch.timesLongAssign(value, this.tensorHandle)
JTorch.timesLongAssign(value, this.tensorHandle)
override fun full(value: Long, shape: IntArray, device: space.kscience.kmath.torch.Device): TorchTensorLong =
wrap(space.kscience.kmath.torch.JTorch.fullLong(value, shape, device.toInt()))
override fun full(value: Long, shape: IntArray, device: Device): TorchTensorLong =
wrap(JTorch.fullLong(value, shape, device.toInt()))
}
public class TorchTensorIntAlgebra(scope: DeferScope) :
@ -336,41 +336,41 @@ public class TorchTensorIntAlgebra(scope: DeferScope) :
override fun TorchTensorInt.copyToArray(): IntArray =
this.elements().map { it.second }.toList().toIntArray()
override fun copyFromArray(array: IntArray, shape: IntArray, device: space.kscience.kmath.torch.Device): TorchTensorInt =
wrap(space.kscience.kmath.torch.JTorch.fromBlobInt(array, shape, device.toInt()))
override fun copyFromArray(array: IntArray, shape: IntArray, device: Device): TorchTensorInt =
wrap(JTorch.fromBlobInt(array, shape, device.toInt()))
override fun randIntegral(low: Long, high: Long, shape: IntArray, device: space.kscience.kmath.torch.Device): TorchTensorInt =
wrap(space.kscience.kmath.torch.JTorch.randintInt(low, high, shape, device.toInt()))
override fun randIntegral(low: Long, high: Long, shape: IntArray, device: Device): TorchTensorInt =
wrap(JTorch.randintInt(low, high, shape, device.toInt()))
override operator fun Int.plus(other: TorchTensorInt): TorchTensorInt =
wrap(space.kscience.kmath.torch.JTorch.plusInt(this, other.tensorHandle))
wrap(JTorch.plusInt(this, other.tensorHandle))
override fun TorchTensorInt.plus(value: Int): TorchTensorInt =
wrap(space.kscience.kmath.torch.JTorch.plusInt(value, this.tensorHandle))
wrap(JTorch.plusInt(value, this.tensorHandle))
override fun TorchTensorInt.plusAssign(value: Int): Unit =
space.kscience.kmath.torch.JTorch.plusIntAssign(value, this.tensorHandle)
JTorch.plusIntAssign(value, this.tensorHandle)
override operator fun Int.minus(other: TorchTensorInt): TorchTensorInt =
wrap(space.kscience.kmath.torch.JTorch.plusInt(-this, other.tensorHandle))
wrap(JTorch.plusInt(-this, other.tensorHandle))
override fun TorchTensorInt.minus(value: Int): TorchTensorInt =
wrap(space.kscience.kmath.torch.JTorch.plusInt(-value, this.tensorHandle))
wrap(JTorch.plusInt(-value, this.tensorHandle))
override fun TorchTensorInt.minusAssign(value: Int): Unit =
space.kscience.kmath.torch.JTorch.plusIntAssign(-value, this.tensorHandle)
JTorch.plusIntAssign(-value, this.tensorHandle)
override operator fun Int.times(other: TorchTensorInt): TorchTensorInt =
wrap(space.kscience.kmath.torch.JTorch.timesInt(this, other.tensorHandle))
wrap(JTorch.timesInt(this, other.tensorHandle))
override fun TorchTensorInt.times(value: Int): TorchTensorInt =
wrap(space.kscience.kmath.torch.JTorch.timesInt(value, this.tensorHandle))
wrap(JTorch.timesInt(value, this.tensorHandle))
override fun TorchTensorInt.timesAssign(value: Int): Unit =
space.kscience.kmath.torch.JTorch.timesIntAssign(value, this.tensorHandle)
JTorch.timesIntAssign(value, this.tensorHandle)
override fun full(value: Int, shape: IntArray, device: space.kscience.kmath.torch.Device): TorchTensorInt =
wrap(space.kscience.kmath.torch.JTorch.fullInt(value, shape, device.toInt()))
override fun full(value: Int, shape: IntArray, device: Device): TorchTensorInt =
wrap(JTorch.fullInt(value, shape, device.toInt()))
}
public inline fun <R> TorchTensorRealAlgebra(block: TorchTensorRealAlgebra.() -> R): R =

View File

@ -7,36 +7,36 @@ public sealed class TorchTensorJVM<T> constructor(
internal val tensorHandle: Long
) : TorchTensor<T>, TorchTensorMemoryHolder(scope)
{
override fun close(): Unit = space.kscience.kmath.torch.JTorch.disposeTensor(tensorHandle)
override fun close(): Unit = JTorch.disposeTensor(tensorHandle)
override val dimension: Int get() = space.kscience.kmath.torch.JTorch.getDim(tensorHandle)
override val dimension: Int get() = JTorch.getDim(tensorHandle)
override val shape: IntArray
get() = (1..dimension).map { space.kscience.kmath.torch.JTorch.getShapeAt(tensorHandle, it - 1) }.toIntArray()
get() = (1..dimension).map { JTorch.getShapeAt(tensorHandle, it - 1) }.toIntArray()
override val strides: IntArray
get() = (1..dimension).map { space.kscience.kmath.torch.JTorch.getStrideAt(tensorHandle, it - 1) }.toIntArray()
override val size: Int get() = space.kscience.kmath.torch.JTorch.getNumel(tensorHandle)
override val device: space.kscience.kmath.torch.Device get() = space.kscience.kmath.torch.Device.fromInt(space.kscience.kmath.torch.JTorch.getDevice(tensorHandle))
get() = (1..dimension).map { JTorch.getStrideAt(tensorHandle, it - 1) }.toIntArray()
override val size: Int get() = JTorch.getNumel(tensorHandle)
override val device: Device get() = Device.fromInt(JTorch.getDevice(tensorHandle))
override fun toString(): String = space.kscience.kmath.torch.JTorch.tensorToString(tensorHandle)
override fun toString(): String = JTorch.tensorToString(tensorHandle)
public fun copyToDouble(): TorchTensorReal = TorchTensorReal(
scope = scope,
tensorHandle = space.kscience.kmath.torch.JTorch.copyToDouble(this.tensorHandle)
tensorHandle = JTorch.copyToDouble(this.tensorHandle)
)
public fun copyToFloat(): TorchTensorFloat = TorchTensorFloat(
scope = scope,
tensorHandle = space.kscience.kmath.torch.JTorch.copyToFloat(this.tensorHandle)
tensorHandle = JTorch.copyToFloat(this.tensorHandle)
)
public fun copyToLong(): TorchTensorLong = TorchTensorLong(
scope = scope,
tensorHandle = space.kscience.kmath.torch.JTorch.copyToLong(this.tensorHandle)
tensorHandle = JTorch.copyToLong(this.tensorHandle)
)
public fun copyToInt(): TorchTensorInt = TorchTensorInt(
scope = scope,
tensorHandle = space.kscience.kmath.torch.JTorch.copyToInt(this.tensorHandle)
tensorHandle = JTorch.copyToInt(this.tensorHandle)
)
}
@ -45,18 +45,18 @@ public sealed class TorchTensorOverFieldJVM<T> constructor(
tensorHandle: Long
) : TorchTensorJVM<T>(scope, tensorHandle), TorchTensorOverField<T> {
override var requiresGrad: Boolean
get() = space.kscience.kmath.torch.JTorch.requiresGrad(tensorHandle)
set(value) = space.kscience.kmath.torch.JTorch.setRequiresGrad(tensorHandle, value)
get() = JTorch.requiresGrad(tensorHandle)
set(value) = JTorch.setRequiresGrad(tensorHandle, value)
}
public class TorchTensorReal internal constructor(
scope: DeferScope,
tensorHandle: Long
) : TorchTensorOverFieldJVM<Double>(scope, tensorHandle) {
override fun item(): Double = space.kscience.kmath.torch.JTorch.getItemDouble(tensorHandle)
override fun get(index: IntArray): Double = space.kscience.kmath.torch.JTorch.getDouble(tensorHandle, index)
override fun item(): Double = JTorch.getItemDouble(tensorHandle)
override fun get(index: IntArray): Double = JTorch.getDouble(tensorHandle, index)
override fun set(index: IntArray, value: Double) {
space.kscience.kmath.torch.JTorch.setDouble(tensorHandle, index, value)
JTorch.setDouble(tensorHandle, index, value)
}
}
@ -64,10 +64,10 @@ public class TorchTensorFloat internal constructor(
scope: DeferScope,
tensorHandle: Long
) : TorchTensorOverFieldJVM<Float>(scope, tensorHandle) {
override fun item(): Float = space.kscience.kmath.torch.JTorch.getItemFloat(tensorHandle)
override fun get(index: IntArray): Float = space.kscience.kmath.torch.JTorch.getFloat(tensorHandle, index)
override fun item(): Float = JTorch.getItemFloat(tensorHandle)
override fun get(index: IntArray): Float = JTorch.getFloat(tensorHandle, index)
override fun set(index: IntArray, value: Float) {
space.kscience.kmath.torch.JTorch.setFloat(tensorHandle, index, value)
JTorch.setFloat(tensorHandle, index, value)
}
}
@ -75,10 +75,10 @@ public class TorchTensorLong internal constructor(
scope: DeferScope,
tensorHandle: Long
) : TorchTensorOverFieldJVM<Long>(scope, tensorHandle) {
override fun item(): Long = space.kscience.kmath.torch.JTorch.getItemLong(tensorHandle)
override fun get(index: IntArray): Long = space.kscience.kmath.torch.JTorch.getLong(tensorHandle, index)
override fun item(): Long = JTorch.getItemLong(tensorHandle)
override fun get(index: IntArray): Long = JTorch.getLong(tensorHandle, index)
override fun set(index: IntArray, value: Long) {
space.kscience.kmath.torch.JTorch.setLong(tensorHandle, index, value)
JTorch.setLong(tensorHandle, index, value)
}
}
@ -86,9 +86,9 @@ public class TorchTensorInt internal constructor(
scope: DeferScope,
tensorHandle: Long
) : TorchTensorOverFieldJVM<Int>(scope, tensorHandle) {
override fun item(): Int = space.kscience.kmath.torch.JTorch.getItemInt(tensorHandle)
override fun get(index: IntArray): Int = space.kscience.kmath.torch.JTorch.getInt(tensorHandle, index)
override fun item(): Int = JTorch.getItemInt(tensorHandle)
override fun get(index: IntArray): Int = JTorch.getInt(tensorHandle, index)
override fun set(index: IntArray, value: Int) {
space.kscience.kmath.torch.JTorch.setInt(tensorHandle, index, value)
JTorch.setInt(tensorHandle, index, value)
}
}

View File

@ -18,9 +18,9 @@ class BenchmarkMatMul {
benchmarkMatMul(200, 10, 10000, "Float")
benchmarkMatMul(2000, 3, 20, "Float")
if (cudaAvailable()) {
benchmarkMatMul(20, 10, 100000, "Float", space.kscience.kmath.torch.Device.CUDA(0))
benchmarkMatMul(200, 10, 10000, "Float", space.kscience.kmath.torch.Device.CUDA(0))
benchmarkMatMul(2000, 10, 1000, "Float", space.kscience.kmath.torch.Device.CUDA(0))
benchmarkMatMul(20, 10, 100000, "Float", Device.CUDA(0))
benchmarkMatMul(200, 10, 10000, "Float", Device.CUDA(0))
benchmarkMatMul(2000, 10, 1000, "Float", Device.CUDA(0))
}
}
}

View File

@ -121,7 +121,7 @@ public sealed class TorchTensorAlgebraNative<
override fun TorchTensorType.copy(): TorchTensorType =
wrap(copy_tensor(this.tensorHandle)!!)
override fun TorchTensorType.copyToDevice(device: space.kscience.kmath.torch.Device): TorchTensorType =
override fun TorchTensorType.copyToDevice(device: Device): TorchTensorType =
wrap(copy_to_device(this.tensorHandle, device.toInt())!!)
override infix fun TorchTensorType.swap(other: TorchTensorType): Unit =
@ -200,26 +200,26 @@ public class TorchTensorRealAlgebra(scope: DeferScope) :
override fun TorchTensorReal.copyToArray(): DoubleArray =
this.elements().map { it.second }.toList().toDoubleArray()
override fun copyFromArray(array: DoubleArray, shape: IntArray, device: space.kscience.kmath.torch.Device): TorchTensorReal =
override fun copyFromArray(array: DoubleArray, shape: IntArray, device: Device): TorchTensorReal =
wrap(from_blob_double(array.toCValues(), shape.toCValues(), shape.size, device.toInt(), true)!!)
override fun fromBlob(arrayBlob: CPointer<DoubleVar>, shape: IntArray): TorchTensorReal =
wrap(from_blob_double(arrayBlob, shape.toCValues(), shape.size, space.kscience.kmath.torch.Device.CPU.toInt(), false)!!)
wrap(from_blob_double(arrayBlob, shape.toCValues(), shape.size, Device.CPU.toInt(), false)!!)
override fun TorchTensorReal.getData(): CPointer<DoubleVar> {
require(this.device is space.kscience.kmath.torch.Device.CPU) {
require(this.device is Device.CPU) {
"This tensor is not on available on CPU"
}
return get_data_double(this.tensorHandle)!!
}
override fun randNormal(shape: IntArray, device: space.kscience.kmath.torch.Device): TorchTensorReal =
override fun randNormal(shape: IntArray, device: Device): TorchTensorReal =
wrap(randn_double(shape.toCValues(), shape.size, device.toInt())!!)
override fun randUniform(shape: IntArray, device: space.kscience.kmath.torch.Device): TorchTensorReal =
override fun randUniform(shape: IntArray, device: Device): TorchTensorReal =
wrap(rand_double(shape.toCValues(), shape.size, device.toInt())!!)
override fun randIntegral(low: Long, high: Long, shape: IntArray, device: space.kscience.kmath.torch.Device): TorchTensorReal =
override fun randIntegral(low: Long, high: Long, shape: IntArray, device: Device): TorchTensorReal =
wrap(randint_double(low, high, shape.toCValues(), shape.size, device.toInt())!!)
override operator fun Double.plus(other: TorchTensorReal): TorchTensorReal =
@ -252,7 +252,7 @@ public class TorchTensorRealAlgebra(scope: DeferScope) :
times_double_assign(value, this.tensorHandle)
}
override fun full(value: Double, shape: IntArray, device: space.kscience.kmath.torch.Device): TorchTensorReal =
override fun full(value: Double, shape: IntArray, device: Device): TorchTensorReal =
wrap(full_double(value, shape.toCValues(), shape.size, device.toInt())!!)
}
@ -265,26 +265,26 @@ public class TorchTensorFloatAlgebra(scope: DeferScope) :
override fun TorchTensorFloat.copyToArray(): FloatArray =
this.elements().map { it.second }.toList().toFloatArray()
override fun copyFromArray(array: FloatArray, shape: IntArray, device: space.kscience.kmath.torch.Device): TorchTensorFloat =
override fun copyFromArray(array: FloatArray, shape: IntArray, device: Device): TorchTensorFloat =
wrap(from_blob_float(array.toCValues(), shape.toCValues(), shape.size, device.toInt(), true)!!)
override fun fromBlob(arrayBlob: CPointer<FloatVar>, shape: IntArray): TorchTensorFloat =
wrap(from_blob_float(arrayBlob, shape.toCValues(), shape.size, space.kscience.kmath.torch.Device.CPU.toInt(), false)!!)
wrap(from_blob_float(arrayBlob, shape.toCValues(), shape.size, Device.CPU.toInt(), false)!!)
override fun TorchTensorFloat.getData(): CPointer<FloatVar> {
require(this.device is space.kscience.kmath.torch.Device.CPU) {
require(this.device is Device.CPU) {
"This tensor is not on available on CPU"
}
return get_data_float(this.tensorHandle)!!
}
override fun randNormal(shape: IntArray, device: space.kscience.kmath.torch.Device): TorchTensorFloat =
override fun randNormal(shape: IntArray, device: Device): TorchTensorFloat =
wrap(randn_float(shape.toCValues(), shape.size, device.toInt())!!)
override fun randUniform(shape: IntArray, device: space.kscience.kmath.torch.Device): TorchTensorFloat =
override fun randUniform(shape: IntArray, device: Device): TorchTensorFloat =
wrap(rand_float(shape.toCValues(), shape.size, device.toInt())!!)
override fun randIntegral(low: Long, high: Long, shape: IntArray, device: space.kscience.kmath.torch.Device): TorchTensorFloat =
override fun randIntegral(low: Long, high: Long, shape: IntArray, device: Device): TorchTensorFloat =
wrap(randint_float(low, high, shape.toCValues(), shape.size, device.toInt())!!)
override operator fun Float.plus(other: TorchTensorFloat): TorchTensorFloat =
@ -314,7 +314,7 @@ public class TorchTensorFloatAlgebra(scope: DeferScope) :
override fun TorchTensorFloat.timesAssign(value: Float): Unit =
times_float_assign(value, this.tensorHandle)
override fun full(value: Float, shape: IntArray, device: space.kscience.kmath.torch.Device): TorchTensorFloat =
override fun full(value: Float, shape: IntArray, device: Device): TorchTensorFloat =
wrap(full_float(value, shape.toCValues(), shape.size, device.toInt())!!)
}
@ -327,20 +327,20 @@ public class TorchTensorLongAlgebra(scope: DeferScope) :
override fun TorchTensorLong.copyToArray(): LongArray =
this.elements().map { it.second }.toList().toLongArray()
override fun copyFromArray(array: LongArray, shape: IntArray, device: space.kscience.kmath.torch.Device): TorchTensorLong =
override fun copyFromArray(array: LongArray, shape: IntArray, device: Device): TorchTensorLong =
wrap(from_blob_long(array.toCValues(), shape.toCValues(), shape.size, device.toInt(), true)!!)
override fun fromBlob(arrayBlob: CPointer<LongVar>, shape: IntArray): TorchTensorLong =
wrap(from_blob_long(arrayBlob, shape.toCValues(), shape.size, space.kscience.kmath.torch.Device.CPU.toInt(), false)!!)
wrap(from_blob_long(arrayBlob, shape.toCValues(), shape.size, Device.CPU.toInt(), false)!!)
override fun TorchTensorLong.getData(): CPointer<LongVar> {
check(this.device is space.kscience.kmath.torch.Device.CPU) {
check(this.device is Device.CPU) {
"This tensor is not on available on CPU"
}
return get_data_long(this.tensorHandle)!!
}
override fun randIntegral(low: Long, high: Long, shape: IntArray, device: space.kscience.kmath.torch.Device): TorchTensorLong =
override fun randIntegral(low: Long, high: Long, shape: IntArray, device: Device): TorchTensorLong =
wrap(randint_long(low, high, shape.toCValues(), shape.size, device.toInt())!!)
override operator fun Long.plus(other: TorchTensorLong): TorchTensorLong =
@ -370,7 +370,7 @@ public class TorchTensorLongAlgebra(scope: DeferScope) :
override fun TorchTensorLong.timesAssign(value: Long): Unit =
times_long_assign(value, this.tensorHandle)
override fun full(value: Long, shape: IntArray, device: space.kscience.kmath.torch.Device): TorchTensorLong =
override fun full(value: Long, shape: IntArray, device: Device): TorchTensorLong =
wrap(full_long(value, shape.toCValues(), shape.size, device.toInt())!!)
}
@ -382,20 +382,20 @@ public class TorchTensorIntAlgebra(scope: DeferScope) :
override fun TorchTensorInt.copyToArray(): IntArray =
this.elements().map { it.second }.toList().toIntArray()
override fun copyFromArray(array: IntArray, shape: IntArray, device: space.kscience.kmath.torch.Device): TorchTensorInt =
override fun copyFromArray(array: IntArray, shape: IntArray, device: Device): TorchTensorInt =
wrap(from_blob_int(array.toCValues(), shape.toCValues(), shape.size, device.toInt(), true)!!)
override fun fromBlob(arrayBlob: CPointer<IntVar>, shape: IntArray): TorchTensorInt =
wrap(from_blob_int(arrayBlob, shape.toCValues(), shape.size, space.kscience.kmath.torch.Device.CPU.toInt(), false)!!)
wrap(from_blob_int(arrayBlob, shape.toCValues(), shape.size, Device.CPU.toInt(), false)!!)
override fun TorchTensorInt.getData(): CPointer<IntVar> {
require(this.device is space.kscience.kmath.torch.Device.CPU) {
require(this.device is Device.CPU) {
"This tensor is not on available on CPU"
}
return get_data_int(this.tensorHandle)!!
}
override fun randIntegral(low: Long, high: Long, shape: IntArray, device: space.kscience.kmath.torch.Device): TorchTensorInt =
override fun randIntegral(low: Long, high: Long, shape: IntArray, device: Device): TorchTensorInt =
wrap(randint_int(low, high, shape.toCValues(), shape.size, device.toInt())!!)
override operator fun Int.plus(other: TorchTensorInt): TorchTensorInt =
@ -425,7 +425,7 @@ public class TorchTensorIntAlgebra(scope: DeferScope) :
override fun TorchTensorInt.timesAssign(value: Int): Unit =
times_int_assign(value, this.tensorHandle)
override fun full(value: Int, shape: IntArray, device: space.kscience.kmath.torch.Device): TorchTensorInt =
override fun full(value: Int, shape: IntArray, device: Device): TorchTensorInt =
wrap(full_int(value, shape.toCValues(), shape.size, device.toInt())!!)
}

View File

@ -19,7 +19,7 @@ public sealed class TorchTensorNative<T> constructor(
override val strides: IntArray
get() = (1..dimension).map { get_stride_at(tensorHandle, it - 1) }.toIntArray()
override val size: Int get() = get_numel(tensorHandle)
override val device: space.kscience.kmath.torch.Device get() = space.kscience.kmath.torch.Device.fromInt(get_device(tensorHandle))
override val device: Device get() = Device.fromInt(get_device(tensorHandle))
override fun toString(): String {
val nativeStringRepresentation: CPointer<ByteVar> = tensor_to_string(tensorHandle)!!

View File

@ -18,9 +18,9 @@ internal class BenchmarkMatMul {
benchmarkMatMul(200, 10, 10000, "Float")
benchmarkMatMul(2000, 3, 20, "Float")
if (cudaAvailable()) {
benchmarkMatMul(20, 10, 100000, "Float", space.kscience.kmath.torch.Device.CUDA(0))
benchmarkMatMul(200, 10, 10000, "Float", space.kscience.kmath.torch.Device.CUDA(0))
benchmarkMatMul(2000, 10, 1000, "Float", space.kscience.kmath.torch.Device.CUDA(0))
benchmarkMatMul(20, 10, 100000, "Float", Device.CUDA(0))
benchmarkMatMul(200, 10, 10000, "Float", Device.CUDA(0))
benchmarkMatMul(2000, 10, 1000, "Float", Device.CUDA(0))
}
}
}