Dropping support for buffered NDStructures
This commit is contained in:
parent
9b1a958491
commit
0cb2c3f0da
@ -148,28 +148,23 @@ public interface Strides {
|
|||||||
/**
|
/**
|
||||||
* Array strides
|
* Array strides
|
||||||
*/
|
*/
|
||||||
public val strides: IntArray
|
public val strides: List<Int>
|
||||||
|
|
||||||
/**
|
|
||||||
* The size of linear buffer to accommodate all elements of ND-structure corresponding to strides
|
|
||||||
*/
|
|
||||||
public val linearSize: Int
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get linear index from multidimensional index
|
* Get linear index from multidimensional index
|
||||||
*/
|
*/
|
||||||
public fun offset(index: IntArray): Int = index.mapIndexed { i, value ->
|
public fun offset(index: IntArray): Int
|
||||||
if (value < 0 || value >= this.shape[i])
|
|
||||||
throw IndexOutOfBoundsException("Index $value out of shape bounds: (0,${this.shape[i]})")
|
|
||||||
|
|
||||||
value * strides[i]
|
|
||||||
}.sum()
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get multidimensional from linear
|
* Get multidimensional from linear
|
||||||
*/
|
*/
|
||||||
public fun index(offset: Int): IntArray
|
public fun index(offset: Int): IntArray
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The size of linear buffer to accommodate all elements of ND-structure corresponding to strides
|
||||||
|
*/
|
||||||
|
public val linearSize: Int
|
||||||
|
|
||||||
// TODO introduce a fast way to calculate index of the next element?
|
// TODO introduce a fast way to calculate index of the next element?
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -188,7 +183,7 @@ public class DefaultStrides private constructor(override val shape: IntArray) :
|
|||||||
/**
|
/**
|
||||||
* Strides for memory access
|
* Strides for memory access
|
||||||
*/
|
*/
|
||||||
override val strides: IntArray by lazy {
|
override val strides: List<Int> by lazy {
|
||||||
sequence {
|
sequence {
|
||||||
var current = 1
|
var current = 1
|
||||||
yield(1)
|
yield(1)
|
||||||
@ -197,9 +192,16 @@ public class DefaultStrides private constructor(override val shape: IntArray) :
|
|||||||
current *= it
|
current *= it
|
||||||
yield(current)
|
yield(current)
|
||||||
}
|
}
|
||||||
}.toList().toIntArray()
|
}.toList()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
override fun offset(index: IntArray): Int = index.mapIndexed { i, value ->
|
||||||
|
if (value < 0 || value >= this.shape[i])
|
||||||
|
throw IndexOutOfBoundsException("Index $value out of shape bounds: (0,${this.shape[i]})")
|
||||||
|
|
||||||
|
value * strides[i]
|
||||||
|
}.sum()
|
||||||
|
|
||||||
override fun index(offset: Int): IntArray {
|
override fun index(offset: Int): IntArray {
|
||||||
val res = IntArray(shape.size)
|
val res = IntArray(shape.size)
|
||||||
var current = offset
|
var current = offset
|
||||||
@ -239,19 +241,17 @@ public class DefaultStrides private constructor(override val shape: IntArray) :
|
|||||||
* Trait for [NDStructure] over [Buffer].
|
* Trait for [NDStructure] over [Buffer].
|
||||||
*
|
*
|
||||||
* @param T the type of items
|
* @param T the type of items
|
||||||
* @param BufferImpl implementation of [Buffer].
|
|
||||||
*/
|
*/
|
||||||
public abstract class NDBufferTrait<T, out BufferImpl : Buffer<T>, out StridesImpl: Strides> :
|
public abstract class NDBuffer<T> : NDStructure<T> {
|
||||||
NDStructure<T> {
|
|
||||||
/**
|
/**
|
||||||
* The underlying buffer.
|
* The underlying buffer.
|
||||||
*/
|
*/
|
||||||
public abstract val buffer: BufferImpl
|
public abstract val buffer: Buffer<T>
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* The strides to access elements of [Buffer] by linear indices.
|
* The strides to access elements of [Buffer] by linear indices.
|
||||||
*/
|
*/
|
||||||
public abstract val strides: StridesImpl
|
public abstract val strides: Strides
|
||||||
|
|
||||||
override operator fun get(index: IntArray): T = buffer[strides.offset(index)]
|
override operator fun get(index: IntArray): T = buffer[strides.offset(index)]
|
||||||
|
|
||||||
@ -259,10 +259,6 @@ public abstract class NDBufferTrait<T, out BufferImpl : Buffer<T>, out StridesIm
|
|||||||
|
|
||||||
override fun elements(): Sequence<Pair<IntArray, T>> = strides.indices().map { it to this[it] }
|
override fun elements(): Sequence<Pair<IntArray, T>> = strides.indices().map { it to this[it] }
|
||||||
|
|
||||||
public fun checkStridesBufferCompatibility(): Unit = require(strides.linearSize == buffer.size) {
|
|
||||||
"Expected buffer side of ${strides.linearSize}, but found ${buffer.size}"
|
|
||||||
}
|
|
||||||
|
|
||||||
override fun hashCode(): Int {
|
override fun hashCode(): Int {
|
||||||
var result = strides.hashCode()
|
var result = strides.hashCode()
|
||||||
result = 31 * result + buffer.hashCode()
|
result = 31 * result + buffer.hashCode()
|
||||||
@ -286,36 +282,10 @@ public abstract class NDBufferTrait<T, out BufferImpl : Buffer<T>, out StridesIm
|
|||||||
}
|
}
|
||||||
return "NDBuffer(shape=${shape.contentToString()}, buffer=$bufferRepr)"
|
return "NDBuffer(shape=${shape.contentToString()}, buffer=$bufferRepr)"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Trait for [MutableNDStructure] over [MutableBuffer].
|
|
||||||
*
|
|
||||||
* @param T the type of items
|
|
||||||
* @param MutableBufferImpl implementation of [MutableBuffer].
|
|
||||||
*/
|
|
||||||
public abstract class MutableNDBufferTrait<T, out MutableBufferImpl : MutableBuffer<T>, out StridesImpl: Strides> :
|
|
||||||
NDBufferTrait<T, MutableBufferImpl, StridesImpl>(), MutableNDStructure<T> {
|
|
||||||
override fun hashCode(): Int = 0
|
|
||||||
override fun equals(other: Any?): Boolean = false
|
|
||||||
override operator fun set(index: IntArray, value: T): Unit =
|
|
||||||
buffer.set(strides.offset(index), value)
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Default representation of [NDStructure] over [Buffer].
|
|
||||||
*
|
|
||||||
* @param T the type of items.
|
|
||||||
*/
|
|
||||||
public abstract class NDBuffer<T> : NDBufferTrait<T, Buffer<T>, Strides>()
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Default representation of [MutableNDStructure] over [MutableBuffer].
|
|
||||||
*
|
|
||||||
* @param T the type of items.
|
|
||||||
*/
|
|
||||||
public abstract class MutableNDBuffer<T> : MutableNDBufferTrait<T, MutableBuffer<T>, Strides>()
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Boxing generic [NDStructure]
|
* Boxing generic [NDStructure]
|
||||||
*/
|
*/
|
||||||
@ -324,7 +294,9 @@ public class BufferNDStructure<T>(
|
|||||||
override val buffer: Buffer<T>,
|
override val buffer: Buffer<T>,
|
||||||
) : NDBuffer<T>() {
|
) : NDBuffer<T>() {
|
||||||
init {
|
init {
|
||||||
checkStridesBufferCompatibility()
|
if (strides.linearSize != buffer.size) {
|
||||||
|
error("Expected buffer side of ${strides.linearSize}, but found ${buffer.size}")
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -344,17 +316,22 @@ public inline fun <T, reified R : Any> NDStructure<T>.mapToBuffer(
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Boxing generic [MutableNDStructure].
|
* Mutable ND buffer based on linear [MutableBuffer].
|
||||||
*/
|
*/
|
||||||
public class MutableBufferNDStructure<T>(
|
public class MutableBufferNDStructure<T>(
|
||||||
override val strides: Strides,
|
override val strides: Strides,
|
||||||
override val buffer: MutableBuffer<T>,
|
override val buffer: MutableBuffer<T>,
|
||||||
) : MutableNDBuffer<T>() {
|
) : NDBuffer<T>(), MutableNDStructure<T> {
|
||||||
|
|
||||||
init {
|
init {
|
||||||
checkStridesBufferCompatibility()
|
require(strides.linearSize == buffer.size) {
|
||||||
|
"Expected buffer side of ${strides.linearSize}, but found ${buffer.size}"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
override operator fun set(index: IntArray, value: T): Unit = buffer.set(strides.offset(index), value)
|
||||||
|
}
|
||||||
|
|
||||||
public inline fun <reified T : Any> NDStructure<T>.combine(
|
public inline fun <reified T : Any> NDStructure<T>.combine(
|
||||||
struct: NDStructure<T>,
|
struct: NDStructure<T>,
|
||||||
crossinline block: (T, T) -> T,
|
crossinline block: (T, T) -> T,
|
||||||
|
@ -78,28 +78,20 @@ pluginManagement {
|
|||||||
|
|
||||||
Tensors implement the buffer protocol over `MutableNDStructure`. They can only be instantiated through provided factory methods and require scoping:
|
Tensors implement the buffer protocol over `MutableNDStructure`. They can only be instantiated through provided factory methods and require scoping:
|
||||||
```kotlin
|
```kotlin
|
||||||
memScoped {
|
TorchTensorRealAlgebra {
|
||||||
val intTensor: TorchTensorInt = TorchTensor.copyFromIntArray(
|
|
||||||
scope = this,
|
|
||||||
array = (1..24).toList().toIntArray(),
|
|
||||||
shape = intArrayOf(3, 2, 4)
|
|
||||||
)
|
|
||||||
println(intTensor)
|
|
||||||
|
|
||||||
val floatTensor: TorchTensorFloat = TorchTensor.copyFromFloatArray(
|
val realTensor: TorchTensorReal = copyFromArray(
|
||||||
scope = this,
|
array = (1..10).map { it + 50.0 }.toList().toDoubleArray(),
|
||||||
array = (1..10).map { it + 50f }.toList().toFloatArray(),
|
shape = intArrayOf(2,5)
|
||||||
shape = intArrayOf(10)
|
|
||||||
)
|
)
|
||||||
println(floatTensor)
|
println(realTensor)
|
||||||
|
|
||||||
val gpuFloatTensor: TorchTensorFloatGPU = TorchTensor.copyFromFloatArrayToGPU(
|
val gpuRealTensor: TorchTensorReal = copyFromArray(
|
||||||
scope = this,
|
array = (1..8).map { it * 2.5 }.toList().toDoubleArray(),
|
||||||
array = (1..8).map { it * 2f }.toList().toFloatArray(),
|
|
||||||
shape = intArrayOf(2, 2, 2),
|
shape = intArrayOf(2, 2, 2),
|
||||||
device = 0
|
device = TorchDevice.TorchCUDA(0)
|
||||||
)
|
)
|
||||||
println(gpuFloatTensor)
|
println(gpuRealTensor)
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -145,6 +145,7 @@ kotlin {
|
|||||||
}
|
}
|
||||||
val nativeGPUTest by creating {
|
val nativeGPUTest by creating {
|
||||||
dependsOn(nativeMain)
|
dependsOn(nativeMain)
|
||||||
|
dependsOn(nativeTest)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -18,49 +18,46 @@ extern "C"
|
|||||||
|
|
||||||
void set_seed(int seed);
|
void set_seed(int seed);
|
||||||
|
|
||||||
TorchTensorHandle copy_from_blob_double(double *data, int *shape, int dim);
|
TorchTensorHandle copy_from_blob_double(double *data, int *shape, int dim, int device);
|
||||||
TorchTensorHandle copy_from_blob_float(float *data, int *shape, int dim);
|
TorchTensorHandle copy_from_blob_float(float *data, int *shape, int dim, int device);
|
||||||
TorchTensorHandle copy_from_blob_long(long *data, int *shape, int dim);
|
TorchTensorHandle copy_from_blob_long(long *data, int *shape, int dim, int device);
|
||||||
TorchTensorHandle copy_from_blob_int(int *data, int *shape, int dim);
|
TorchTensorHandle copy_from_blob_int(int *data, int *shape, int dim, int device);
|
||||||
TorchTensorHandle copy_from_blob_to_gpu_double(double *data, int *shape, int dim, int device);
|
|
||||||
TorchTensorHandle copy_from_blob_to_gpu_float(float *data, int *shape, int dim, int device);
|
|
||||||
TorchTensorHandle copy_from_blob_to_gpu_long(long *data, int *shape, int dim, int device);
|
|
||||||
TorchTensorHandle copy_from_blob_to_gpu_int(int *data, int *shape, int dim, int device);
|
|
||||||
|
|
||||||
TorchTensorHandle copy_tensor(TorchTensorHandle tensor_handle);
|
TorchTensorHandle copy_tensor(TorchTensorHandle tensor_handle);
|
||||||
|
TorchTensorHandle copy_to_device(TorchTensorHandle tensor_handle, int device);
|
||||||
|
|
||||||
double *get_data_double(TorchTensorHandle tensor_handle);
|
double get_item_double(TorchTensorHandle tensor_handle);
|
||||||
float *get_data_float(TorchTensorHandle tensor_handle);
|
float get_item_float(TorchTensorHandle tensor_handle);
|
||||||
long *get_data_long(TorchTensorHandle tensor_handle);
|
long get_item_long(TorchTensorHandle tensor_handle);
|
||||||
int *get_data_int(TorchTensorHandle tensor_handle);
|
int get_item_int(TorchTensorHandle tensor_handle);
|
||||||
|
|
||||||
int get_numel(TorchTensorHandle tensor_handle);
|
|
||||||
int get_dim(TorchTensorHandle tensor_handle);
|
int get_dim(TorchTensorHandle tensor_handle);
|
||||||
int *get_shape(TorchTensorHandle tensor_handle);
|
int get_numel(TorchTensorHandle tensor_handle);
|
||||||
int *get_strides(TorchTensorHandle tensor_handle);
|
int get_shape_at(TorchTensorHandle tensor_handle, int d);
|
||||||
|
int get_stride_at(TorchTensorHandle tensor_handle, int d);
|
||||||
|
int get_device(TorchTensorHandle tensor_handle);
|
||||||
|
|
||||||
char *tensor_to_string(TorchTensorHandle tensor_handle);
|
char *tensor_to_string(TorchTensorHandle tensor_handle);
|
||||||
|
|
||||||
void dispose_int_array(int *ptr);
|
|
||||||
void dispose_char(char *ptr);
|
void dispose_char(char *ptr);
|
||||||
void dispose_tensor(TorchTensorHandle tensor_handle);
|
void dispose_tensor(TorchTensorHandle tensor_handle);
|
||||||
|
|
||||||
// Workaround for GPU tensors
|
|
||||||
double get_at_offset_double(TorchTensorHandle tensor_handle, int offset);
|
|
||||||
float get_at_offset_float(TorchTensorHandle tensor_handle, int offset);
|
|
||||||
long get_at_offset_long(TorchTensorHandle tensor_handle, int offset);
|
|
||||||
int get_at_offset_int(TorchTensorHandle tensor_handle, int offset);
|
|
||||||
void set_at_offset_double(TorchTensorHandle tensor_handle, int offset, double value);
|
|
||||||
void set_at_offset_float(TorchTensorHandle tensor_handle, int offset, float value);
|
|
||||||
void set_at_offset_long(TorchTensorHandle tensor_handle, int offset, long value);
|
|
||||||
void set_at_offset_int(TorchTensorHandle tensor_handle, int offset, int value);
|
|
||||||
|
|
||||||
TorchTensorHandle copy_to_cpu(TorchTensorHandle tensor_handle);
|
double get_double(TorchTensorHandle tensor_handle, int* index);
|
||||||
TorchTensorHandle copy_to_gpu(TorchTensorHandle tensor_handle, int device);
|
float get_float(TorchTensorHandle tensor_handle, int* index);
|
||||||
|
long get_long(TorchTensorHandle tensor_handle, int* index);
|
||||||
|
int get_int(TorchTensorHandle tensor_handle, int* index);
|
||||||
|
void set_double(TorchTensorHandle tensor_handle, int* index, double value);
|
||||||
|
void set_float(TorchTensorHandle tensor_handle, int* index, float value);
|
||||||
|
void set_long(TorchTensorHandle tensor_handle, int* index, long value);
|
||||||
|
void set_int(TorchTensorHandle tensor_handle, int* index, int value);
|
||||||
|
|
||||||
TorchTensorHandle randn_float(int* shape, int shape_size);
|
|
||||||
|
TorchTensorHandle randn_double(int* shape, int shape_size, int device);
|
||||||
|
TorchTensorHandle rand_double(int* shape, int shape_size, int device);
|
||||||
|
TorchTensorHandle randn_float(int* shape, int shape_size, int device);
|
||||||
|
TorchTensorHandle rand_float(int* shape, int shape_size, int device);
|
||||||
|
|
||||||
TorchTensorHandle matmul(TorchTensorHandle lhs, TorchTensorHandle rhs);
|
TorchTensorHandle matmul(TorchTensorHandle lhs, TorchTensorHandle rhs);
|
||||||
|
void matmul_assign(TorchTensorHandle lhs, TorchTensorHandle rhs);
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
}
|
}
|
||||||
|
@ -33,6 +33,16 @@ namespace ctorch
|
|||||||
return *static_cast<torch::Tensor *>(tensor_handle);
|
return *static_cast<torch::Tensor *>(tensor_handle);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
inline int device_to_int(const torch::Tensor &tensor)
|
||||||
|
{
|
||||||
|
return (tensor.device().type() == torch::kCPU) ? 0 : 1 + tensor.device().index();
|
||||||
|
}
|
||||||
|
|
||||||
|
inline torch::Device int_to_device(int device_int)
|
||||||
|
{
|
||||||
|
return (device_int == 0) ? torch::kCPU : torch::Device(torch::kCUDA, device_int - 1);
|
||||||
|
}
|
||||||
|
|
||||||
inline std::vector<int64_t> to_vec_int(int *arr, int arr_size)
|
inline std::vector<int64_t> to_vec_int(int *arr, int arr_size)
|
||||||
{
|
{
|
||||||
auto vec = std::vector<int64_t>(arr_size);
|
auto vec = std::vector<int64_t>(arr_size);
|
||||||
@ -40,46 +50,34 @@ namespace ctorch
|
|||||||
return vec;
|
return vec;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
inline std::vector<at::indexing::TensorIndex> to_index(int *arr, int arr_size)
|
||||||
|
{
|
||||||
|
std::vector<at::indexing::TensorIndex> index;
|
||||||
|
for (int i = 0; i < arr_size; i++)
|
||||||
|
{
|
||||||
|
index.emplace_back(arr[i]);
|
||||||
|
}
|
||||||
|
return index;
|
||||||
|
}
|
||||||
|
|
||||||
template <typename Dtype>
|
template <typename Dtype>
|
||||||
inline torch::Tensor copy_from_blob(Dtype *data, std::vector<int64_t> shape, torch::Device device)
|
inline torch::Tensor copy_from_blob(Dtype *data, std::vector<int64_t> shape, torch::Device device)
|
||||||
{
|
{
|
||||||
return torch::from_blob(data, shape, dtype<Dtype>()).to(torch::TensorOptions().layout(torch::kStrided).device(device), false, true);
|
return torch::from_blob(data, shape, dtype<Dtype>()).to(torch::TensorOptions().layout(torch::kStrided).device(device), false, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
inline int *to_dynamic_ints(const c10::IntArrayRef &arr)
|
template <typename NumType>
|
||||||
|
inline NumType get(const TorchTensorHandle &tensor_handle, int *index)
|
||||||
{
|
{
|
||||||
size_t n = arr.size();
|
auto ten = ctorch::cast(tensor_handle);
|
||||||
int *res = (int *)malloc(sizeof(int) * n);
|
return ten.index(to_index(index, ten.dim())).item<NumType>();
|
||||||
for (size_t i = 0; i < n; i++)
|
|
||||||
{
|
|
||||||
res[i] = arr[i];
|
|
||||||
}
|
|
||||||
return res;
|
|
||||||
}
|
|
||||||
|
|
||||||
inline std::vector<at::indexing::TensorIndex> offset_to_index(int offset, const c10::IntArrayRef &strides)
|
|
||||||
{
|
|
||||||
std::vector<at::indexing::TensorIndex> index;
|
|
||||||
for (const auto &stride : strides)
|
|
||||||
{
|
|
||||||
index.emplace_back(offset / stride);
|
|
||||||
offset %= stride;
|
|
||||||
}
|
|
||||||
return index;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename NumType>
|
template <typename NumType>
|
||||||
inline NumType get_at_offset(const TorchTensorHandle &tensor_handle, int offset)
|
inline void set(TorchTensorHandle &tensor_handle, int *index, NumType value)
|
||||||
{
|
{
|
||||||
auto ten = ctorch::cast(tensor_handle);
|
auto ten = ctorch::cast(tensor_handle);
|
||||||
return ten.index(ctorch::offset_to_index(offset, ten.strides())).item<NumType>();
|
ten.index(to_index(index, ten.dim())) = value;
|
||||||
}
|
|
||||||
|
|
||||||
template <typename NumType>
|
|
||||||
inline void set_at_offset(TorchTensorHandle &tensor_handle, int offset, NumType value)
|
|
||||||
{
|
|
||||||
auto ten = ctorch::cast(tensor_handle);
|
|
||||||
ten.index(offset_to_index(offset, ten.strides())) = value;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename Dtype>
|
template <typename Dtype>
|
||||||
@ -88,4 +86,10 @@ namespace ctorch
|
|||||||
return torch::randn(shape, torch::TensorOptions().dtype(dtype<Dtype>()).layout(torch::kStrided).device(device));
|
return torch::randn(shape, torch::TensorOptions().dtype(dtype<Dtype>()).layout(torch::kStrided).device(device));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
template <typename Dtype>
|
||||||
|
inline torch::Tensor rand(std::vector<int64_t> shape, torch::Device device)
|
||||||
|
{
|
||||||
|
return torch::rand(shape, torch::TensorOptions().dtype(dtype<Dtype>()).layout(torch::kStrided).device(device));
|
||||||
|
}
|
||||||
|
|
||||||
} // namespace ctorch
|
} // namespace ctorch
|
||||||
|
@ -25,80 +25,50 @@ void set_seed(int seed)
|
|||||||
torch::manual_seed(seed);
|
torch::manual_seed(seed);
|
||||||
}
|
}
|
||||||
|
|
||||||
TorchTensorHandle copy_from_blob_double(double *data, int *shape, int dim)
|
|
||||||
{
|
|
||||||
return new torch::Tensor(ctorch::copy_from_blob<double>(data, ctorch::to_vec_int(shape, dim), torch::kCPU));
|
|
||||||
}
|
|
||||||
TorchTensorHandle copy_from_blob_float(float *data, int *shape, int dim)
|
|
||||||
{
|
|
||||||
return new torch::Tensor(ctorch::copy_from_blob<float>(data, ctorch::to_vec_int(shape, dim), torch::kCPU));
|
|
||||||
}
|
|
||||||
TorchTensorHandle copy_from_blob_long(long *data, int *shape, int dim)
|
|
||||||
{
|
|
||||||
return new torch::Tensor(ctorch::copy_from_blob<long>(data, ctorch::to_vec_int(shape, dim), torch::kCPU));
|
|
||||||
}
|
|
||||||
TorchTensorHandle copy_from_blob_int(int *data, int *shape, int dim)
|
|
||||||
{
|
|
||||||
return new torch::Tensor(ctorch::copy_from_blob<int>(data, ctorch::to_vec_int(shape, dim), torch::kCPU));
|
|
||||||
}
|
|
||||||
|
|
||||||
TorchTensorHandle copy_from_blob_to_gpu_double(double *data, int *shape, int dim, int device)
|
|
||||||
{
|
|
||||||
return new torch::Tensor(ctorch::copy_from_blob<double>(data, ctorch::to_vec_int(shape, dim), torch::Device(torch::kCUDA, device)));
|
|
||||||
}
|
|
||||||
TorchTensorHandle copy_from_blob_to_gpu_float(float *data, int *shape, int dim, int device)
|
|
||||||
{
|
|
||||||
return new torch::Tensor(ctorch::copy_from_blob<float>(data, ctorch::to_vec_int(shape, dim), torch::Device(torch::kCUDA, device)));
|
|
||||||
}
|
|
||||||
TorchTensorHandle copy_from_blob_to_gpu_long(long *data, int *shape, int dim, int device)
|
|
||||||
{
|
|
||||||
return new torch::Tensor(ctorch::copy_from_blob<long>(data, ctorch::to_vec_int(shape, dim), torch::Device(torch::kCUDA, device)));
|
|
||||||
}
|
|
||||||
TorchTensorHandle copy_from_blob_to_gpu_int(int *data, int *shape, int dim, int device)
|
|
||||||
{
|
|
||||||
return new torch::Tensor(ctorch::copy_from_blob<int>(data, ctorch::to_vec_int(shape, dim), torch::Device(torch::kCUDA, device)));
|
|
||||||
}
|
|
||||||
|
|
||||||
TorchTensorHandle copy_tensor(TorchTensorHandle tensor_handle)
|
|
||||||
{
|
|
||||||
return new torch::Tensor(ctorch::cast(tensor_handle).clone());
|
|
||||||
}
|
|
||||||
|
|
||||||
double *get_data_double(TorchTensorHandle tensor_handle)
|
|
||||||
{
|
|
||||||
return ctorch::cast(tensor_handle).data_ptr<double>();
|
|
||||||
}
|
|
||||||
float *get_data_float(TorchTensorHandle tensor_handle)
|
|
||||||
{
|
|
||||||
return ctorch::cast(tensor_handle).data_ptr<float>();
|
|
||||||
}
|
|
||||||
long *get_data_long(TorchTensorHandle tensor_handle)
|
|
||||||
{
|
|
||||||
return ctorch::cast(tensor_handle).data_ptr<long>();
|
|
||||||
}
|
|
||||||
int *get_data_int(TorchTensorHandle tensor_handle)
|
|
||||||
{
|
|
||||||
return ctorch::cast(tensor_handle).data_ptr<int>();
|
|
||||||
}
|
|
||||||
|
|
||||||
int get_numel(TorchTensorHandle tensor_handle)
|
|
||||||
{
|
|
||||||
return ctorch::cast(tensor_handle).numel();
|
|
||||||
}
|
|
||||||
|
|
||||||
int get_dim(TorchTensorHandle tensor_handle)
|
int get_dim(TorchTensorHandle tensor_handle)
|
||||||
{
|
{
|
||||||
return ctorch::cast(tensor_handle).dim();
|
return ctorch::cast(tensor_handle).dim();
|
||||||
}
|
}
|
||||||
|
int get_numel(TorchTensorHandle tensor_handle)
|
||||||
int *get_shape(TorchTensorHandle tensor_handle)
|
|
||||||
{
|
{
|
||||||
return ctorch::to_dynamic_ints(ctorch::cast(tensor_handle).sizes());
|
return ctorch::cast(tensor_handle).numel();
|
||||||
|
}
|
||||||
|
int get_shape_at(TorchTensorHandle tensor_handle, int d)
|
||||||
|
{
|
||||||
|
return ctorch::cast(tensor_handle).size(d);
|
||||||
|
}
|
||||||
|
int get_stride_at(TorchTensorHandle tensor_handle, int d)
|
||||||
|
{
|
||||||
|
return ctorch::cast(tensor_handle).stride(d);
|
||||||
|
}
|
||||||
|
int get_device(TorchTensorHandle tensor_handle)
|
||||||
|
{
|
||||||
|
return ctorch::device_to_int(ctorch::cast(tensor_handle));
|
||||||
}
|
}
|
||||||
|
|
||||||
int *get_strides(TorchTensorHandle tensor_handle)
|
TorchTensorHandle copy_from_blob_double(double *data, int *shape, int dim, int device)
|
||||||
{
|
{
|
||||||
return ctorch::to_dynamic_ints(ctorch::cast(tensor_handle).strides());
|
return new torch::Tensor(ctorch::copy_from_blob<double>(data, ctorch::to_vec_int(shape, dim), ctorch::int_to_device(device)));
|
||||||
|
}
|
||||||
|
TorchTensorHandle copy_from_blob_float(float *data, int *shape, int dim, int device)
|
||||||
|
{
|
||||||
|
return new torch::Tensor(ctorch::copy_from_blob<float>(data, ctorch::to_vec_int(shape, dim), ctorch::int_to_device(device)));
|
||||||
|
}
|
||||||
|
TorchTensorHandle copy_from_blob_long(long *data, int *shape, int dim, int device)
|
||||||
|
{
|
||||||
|
return new torch::Tensor(ctorch::copy_from_blob<long>(data, ctorch::to_vec_int(shape, dim), ctorch::int_to_device(device)));
|
||||||
|
}
|
||||||
|
TorchTensorHandle copy_from_blob_int(int *data, int *shape, int dim, int device)
|
||||||
|
{
|
||||||
|
return new torch::Tensor(ctorch::copy_from_blob<int>(data, ctorch::to_vec_int(shape, dim), ctorch::int_to_device(device)));
|
||||||
|
}
|
||||||
|
TorchTensorHandle copy_tensor(TorchTensorHandle tensor_handle)
|
||||||
|
{
|
||||||
|
return new torch::Tensor(ctorch::cast(tensor_handle).clone());
|
||||||
|
}
|
||||||
|
TorchTensorHandle copy_to_device(TorchTensorHandle tensor_handle, int device)
|
||||||
|
{
|
||||||
|
return new torch::Tensor(ctorch::cast(tensor_handle).to(ctorch::int_to_device(device), false, true));
|
||||||
}
|
}
|
||||||
|
|
||||||
char *tensor_to_string(TorchTensorHandle tensor_handle)
|
char *tensor_to_string(TorchTensorHandle tensor_handle)
|
||||||
@ -110,68 +80,89 @@ char *tensor_to_string(TorchTensorHandle tensor_handle)
|
|||||||
std::strcpy(crep, rep.c_str());
|
std::strcpy(crep, rep.c_str());
|
||||||
return crep;
|
return crep;
|
||||||
}
|
}
|
||||||
|
|
||||||
void dispose_int_array(int *ptr)
|
|
||||||
{
|
|
||||||
free(ptr);
|
|
||||||
}
|
|
||||||
|
|
||||||
void dispose_char(char *ptr)
|
void dispose_char(char *ptr)
|
||||||
{
|
{
|
||||||
free(ptr);
|
free(ptr);
|
||||||
}
|
}
|
||||||
|
|
||||||
void dispose_tensor(TorchTensorHandle tensor_handle)
|
void dispose_tensor(TorchTensorHandle tensor_handle)
|
||||||
{
|
{
|
||||||
delete static_cast<torch::Tensor *>(tensor_handle);
|
delete static_cast<torch::Tensor *>(tensor_handle);
|
||||||
}
|
}
|
||||||
|
|
||||||
double get_at_offset_double(TorchTensorHandle tensor_handle, int offset)
|
double get_double(TorchTensorHandle tensor_handle, int *index)
|
||||||
{
|
{
|
||||||
return ctorch::get_at_offset<double>(tensor_handle, offset);
|
return ctorch::get<double>(tensor_handle, index);
|
||||||
}
|
}
|
||||||
float get_at_offset_float(TorchTensorHandle tensor_handle, int offset)
|
float get_float(TorchTensorHandle tensor_handle, int *index)
|
||||||
{
|
{
|
||||||
return ctorch::get_at_offset<float>(tensor_handle, offset);
|
return ctorch::get<float>(tensor_handle, index);
|
||||||
}
|
}
|
||||||
long get_at_offset_long(TorchTensorHandle tensor_handle, int offset)
|
long get_long(TorchTensorHandle tensor_handle, int *index)
|
||||||
{
|
{
|
||||||
return ctorch::get_at_offset<long>(tensor_handle, offset);
|
return ctorch::get<long>(tensor_handle, index);
|
||||||
}
|
}
|
||||||
int get_at_offset_int(TorchTensorHandle tensor_handle, int offset)
|
int get_int(TorchTensorHandle tensor_handle, int *index)
|
||||||
{
|
{
|
||||||
return ctorch::get_at_offset<int>(tensor_handle, offset);
|
return ctorch::get<int>(tensor_handle, index);
|
||||||
}
|
}
|
||||||
void set_at_offset_double(TorchTensorHandle tensor_handle, int offset, double value)
|
void set_double(TorchTensorHandle tensor_handle, int *index, double value)
|
||||||
{
|
{
|
||||||
ctorch::set_at_offset<double>(tensor_handle, offset, value);
|
ctorch::set<double>(tensor_handle, index, value);
|
||||||
}
|
}
|
||||||
void set_at_offset_float(TorchTensorHandle tensor_handle, int offset, float value)
|
void set_float(TorchTensorHandle tensor_handle, int *index, float value)
|
||||||
{
|
{
|
||||||
ctorch::set_at_offset<float>(tensor_handle, offset, value);
|
ctorch::set<float>(tensor_handle, index, value);
|
||||||
}
|
}
|
||||||
void set_at_offset_long(TorchTensorHandle tensor_handle, int offset, long value)
|
void set_long(TorchTensorHandle tensor_handle, int *index, long value)
|
||||||
{
|
{
|
||||||
ctorch::set_at_offset<long>(tensor_handle, offset, value);
|
ctorch::set<long>(tensor_handle, index, value);
|
||||||
}
|
}
|
||||||
void set_at_offset_int(TorchTensorHandle tensor_handle, int offset, int value)
|
void set_int(TorchTensorHandle tensor_handle, int *index, int value)
|
||||||
{
|
{
|
||||||
ctorch::set_at_offset<int>(tensor_handle, offset, value);
|
ctorch::set<int>(tensor_handle, index, value);
|
||||||
}
|
}
|
||||||
|
|
||||||
TorchTensorHandle copy_to_cpu(TorchTensorHandle tensor_handle)
|
double get_item_double(TorchTensorHandle tensor_handle)
|
||||||
{
|
{
|
||||||
return new torch::Tensor(ctorch::cast(tensor_handle).to(torch::kCPU,false, true));
|
return ctorch::cast(tensor_handle).item<double>();
|
||||||
}
|
}
|
||||||
TorchTensorHandle copy_to_gpu(TorchTensorHandle tensor_handle, int device)
|
float get_item_float(TorchTensorHandle tensor_handle)
|
||||||
{
|
{
|
||||||
return new torch::Tensor(ctorch::cast(tensor_handle).to(torch::Device(torch::kCUDA, device),false, true));
|
return ctorch::cast(tensor_handle).item<float>();
|
||||||
|
}
|
||||||
|
long get_item_long(TorchTensorHandle tensor_handle)
|
||||||
|
{
|
||||||
|
return ctorch::cast(tensor_handle).item<long>();
|
||||||
|
}
|
||||||
|
int get_item_int(TorchTensorHandle tensor_handle)
|
||||||
|
{
|
||||||
|
return ctorch::cast(tensor_handle).item<int>();
|
||||||
}
|
}
|
||||||
|
|
||||||
TorchTensorHandle randn_float(int* shape, int shape_size){
|
TorchTensorHandle randn_double(int *shape, int shape_size, int device)
|
||||||
return new torch::Tensor(ctorch::randn<float>(ctorch::to_vec_int(shape, shape_size), torch::kCPU));
|
{
|
||||||
|
return new torch::Tensor(ctorch::randn<double>(ctorch::to_vec_int(shape, shape_size), ctorch::int_to_device(device)));
|
||||||
|
}
|
||||||
|
TorchTensorHandle rand_double(int *shape, int shape_size, int device)
|
||||||
|
{
|
||||||
|
return new torch::Tensor(ctorch::rand<double>(ctorch::to_vec_int(shape, shape_size), ctorch::int_to_device(device)));
|
||||||
|
}
|
||||||
|
TorchTensorHandle randn_float(int *shape, int shape_size, int device)
|
||||||
|
{
|
||||||
|
return new torch::Tensor(ctorch::randn<float>(ctorch::to_vec_int(shape, shape_size), ctorch::int_to_device(device)));
|
||||||
|
}
|
||||||
|
TorchTensorHandle rand_float(int *shape, int shape_size, int device)
|
||||||
|
{
|
||||||
|
return new torch::Tensor(ctorch::rand<float>(ctorch::to_vec_int(shape, shape_size), ctorch::int_to_device(device)));
|
||||||
}
|
}
|
||||||
|
|
||||||
TorchTensorHandle matmul(TorchTensorHandle lhs, TorchTensorHandle rhs){
|
TorchTensorHandle matmul(TorchTensorHandle lhs, TorchTensorHandle rhs)
|
||||||
|
{
|
||||||
return new torch::Tensor(torch::matmul(ctorch::cast(lhs), ctorch::cast(rhs)));
|
return new torch::Tensor(torch::matmul(ctorch::cast(lhs), ctorch::cast(rhs)));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void matmul_assign(TorchTensorHandle lhs, TorchTensorHandle rhs)
|
||||||
|
{
|
||||||
|
auto lhs_tensor = ctorch::cast(lhs);
|
||||||
|
lhs_tensor = lhs_tensor.matmul(ctorch::cast(rhs));
|
||||||
|
}
|
@ -0,0 +1,13 @@
|
|||||||
|
package kscience.kmath.torch
|
||||||
|
|
||||||
|
import kotlin.test.*
|
||||||
|
|
||||||
|
|
||||||
|
class TestTorchTensorAlgebraGPU {
|
||||||
|
|
||||||
|
@Test
|
||||||
|
fun testScalarProduct() = testingScalarProduct(device = TorchDevice.TorchCUDA(0))
|
||||||
|
|
||||||
|
@Test
|
||||||
|
fun testMatrixMultiplication() = testingMatrixMultiplication(device = TorchDevice.TorchCUDA(0))
|
||||||
|
}
|
@ -1,25 +1,22 @@
|
|||||||
package kscience.kmath.torch
|
package kscience.kmath.torch
|
||||||
|
|
||||||
import kscience.kmath.structures.asBuffer
|
|
||||||
|
|
||||||
import kotlinx.cinterop.memScoped
|
|
||||||
import kotlin.test.*
|
import kotlin.test.*
|
||||||
|
|
||||||
class TestTorchTensorGPU {
|
class TestTorchTensorGPU {
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
fun cudaAvailability() {
|
fun testCopyFromArray() = testingCopyFromArray(TorchDevice.TorchCUDA(0))
|
||||||
assertTrue(cudaAvailable())
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
fun floatGPUTensorLayout() = memScoped {
|
fun testCopyToDevice() = TorchTensorRealAlgebra {
|
||||||
val array = (1..8).map { it * 2f }.toList().toFloatArray()
|
setSeed(SEED)
|
||||||
val shape = intArrayOf(2, 2, 2)
|
val normalCpu = randNormal(intArrayOf(2, 3))
|
||||||
val tensor = TorchTensor.copyFromFloatArrayToGPU(this, array, shape, 0)
|
val normalGpu = normalCpu.copyToDevice(TorchDevice.TorchCUDA(0))
|
||||||
tensor.elements().forEach {
|
assertTrue(normalCpu.copyToArray() contentEquals normalGpu.copyToArray())
|
||||||
assertEquals(tensor[it.first], it.second)
|
|
||||||
}
|
val uniformGpu = randUniform(intArrayOf(3,2),TorchDevice.TorchCUDA(0))
|
||||||
assertTrue(tensor.asBuffer().contentEquals(array.asBuffer()))
|
val uniformCpu = uniformGpu.copyToDevice(TorchDevice.TorchCPU)
|
||||||
|
assertTrue(uniformGpu.copyToArray() contentEquals uniformCpu.copyToArray())
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
@ -0,0 +1,33 @@
|
|||||||
|
package kscience.kmath.torch
|
||||||
|
|
||||||
|
import kotlin.test.*
|
||||||
|
|
||||||
|
|
||||||
|
internal class TestUtilsGPU {
|
||||||
|
|
||||||
|
@Test
|
||||||
|
fun testCudaAvailable() {
|
||||||
|
assertTrue(cudaAvailable())
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
fun testSetSeed() = testingSetSeed(TorchDevice.TorchCUDA(0))
|
||||||
|
|
||||||
|
@Test
|
||||||
|
fun testReadmeFactory() = TorchTensorRealAlgebra {
|
||||||
|
|
||||||
|
val realTensor: TorchTensorReal = copyFromArray(
|
||||||
|
array = (1..10).map { it + 50.0 }.toList().toDoubleArray(),
|
||||||
|
shape = intArrayOf(2,5)
|
||||||
|
)
|
||||||
|
println(realTensor)
|
||||||
|
|
||||||
|
val gpuRealTensor: TorchTensorReal = copyFromArray(
|
||||||
|
array = (1..8).map { it * 2.5 }.toList().toDoubleArray(),
|
||||||
|
shape = intArrayOf(2, 2, 2),
|
||||||
|
device = TorchDevice.TorchCUDA(0)
|
||||||
|
)
|
||||||
|
println(gpuRealTensor)
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
@ -1,2 +1,2 @@
|
|||||||
package=ctorch
|
package=kscience.kmath.ctorch
|
||||||
headers=ctorch.h
|
headers=ctorch.h
|
@ -0,0 +1,18 @@
|
|||||||
|
package kscience.kmath.torch
|
||||||
|
|
||||||
|
|
||||||
|
public sealed class TorchDevice {
|
||||||
|
public object TorchCPU: TorchDevice()
|
||||||
|
public data class TorchCUDA(val index: Int): TorchDevice()
|
||||||
|
public fun toInt(): Int {
|
||||||
|
when(this) {
|
||||||
|
is TorchCPU -> return 0
|
||||||
|
is TorchCUDA -> return this.index + 1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
public companion object {
|
||||||
|
public fun fromInt(deviceInt: Int): TorchDevice {
|
||||||
|
return if (deviceInt == 0) TorchCPU else TorchCUDA(deviceInt-1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
@ -0,0 +1,93 @@
|
|||||||
|
package kscience.kmath.torch
|
||||||
|
|
||||||
|
import kscience.kmath.structures.MutableNDStructure
|
||||||
|
|
||||||
|
import kotlinx.cinterop.*
|
||||||
|
import kscience.kmath.ctorch.*
|
||||||
|
|
||||||
|
|
||||||
|
public sealed class TorchTensor<T> constructor(
|
||||||
|
internal val scope: DeferScope,
|
||||||
|
internal val tensorHandle: COpaquePointer
|
||||||
|
) : MutableNDStructure<T> {
|
||||||
|
init {
|
||||||
|
scope.defer(::close)
|
||||||
|
}
|
||||||
|
private fun close(): Unit = dispose_tensor(tensorHandle)
|
||||||
|
|
||||||
|
protected abstract fun item(): T
|
||||||
|
internal abstract fun wrap(outScope: DeferScope, outTensorHandle: COpaquePointer): TorchTensor<T>
|
||||||
|
|
||||||
|
override val dimension: Int get() = get_dim(tensorHandle)
|
||||||
|
override val shape: IntArray
|
||||||
|
get() = (1..dimension).map{get_shape_at(tensorHandle, it-1)}.toIntArray()
|
||||||
|
public val strides: IntArray
|
||||||
|
get() = (1..dimension).map{get_stride_at(tensorHandle, it-1)}.toIntArray()
|
||||||
|
public val size: Int get() = get_numel(tensorHandle)
|
||||||
|
public val device: TorchDevice get() = TorchDevice.fromInt(get_device(tensorHandle))
|
||||||
|
|
||||||
|
override fun equals(other: Any?): Boolean = false
|
||||||
|
override fun hashCode(): Int = 0
|
||||||
|
override fun toString(): String {
|
||||||
|
val nativeStringRepresentation: CPointer<ByteVar> = tensor_to_string(tensorHandle)!!
|
||||||
|
val stringRepresentation = nativeStringRepresentation.toKString()
|
||||||
|
dispose_char(nativeStringRepresentation)
|
||||||
|
return stringRepresentation
|
||||||
|
}
|
||||||
|
|
||||||
|
override fun elements(): Sequence<Pair<IntArray, T>> {
|
||||||
|
if (dimension == 0) {
|
||||||
|
return emptySequence()
|
||||||
|
}
|
||||||
|
val indices = (1..size).asSequence().map { indexFromOffset(it - 1, strides, dimension) }
|
||||||
|
return indices.map { it to get(it) }
|
||||||
|
}
|
||||||
|
|
||||||
|
public fun value(): T {
|
||||||
|
check(dimension == 0) {
|
||||||
|
"This tensor has shape ${shape.toList()}"
|
||||||
|
}
|
||||||
|
return item()
|
||||||
|
}
|
||||||
|
|
||||||
|
public fun copy(): TorchTensor<T> =
|
||||||
|
wrap(
|
||||||
|
outScope = scope,
|
||||||
|
outTensorHandle = copy_tensor(tensorHandle)!!
|
||||||
|
)
|
||||||
|
|
||||||
|
public fun copyToDevice(device: TorchDevice): TorchTensor<T> =
|
||||||
|
wrap(
|
||||||
|
outScope = scope,
|
||||||
|
outTensorHandle = copy_to_device(tensorHandle, device.toInt())!!
|
||||||
|
)
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
public class TorchTensorReal internal constructor(
|
||||||
|
scope: DeferScope,
|
||||||
|
tensorHandle: COpaquePointer
|
||||||
|
) : TorchTensor<Double>(scope, tensorHandle) {
|
||||||
|
override fun item(): Double = get_item_double(tensorHandle)
|
||||||
|
override fun wrap(outScope: DeferScope, outTensorHandle: COpaquePointer
|
||||||
|
): TorchTensorReal = TorchTensorReal(scope = outScope, tensorHandle = outTensorHandle)
|
||||||
|
|
||||||
|
override fun get(index: IntArray): Double = get_double(tensorHandle, index.toCValues())
|
||||||
|
override fun set(index: IntArray, value: Double) {
|
||||||
|
set_double(tensorHandle, index.toCValues(), value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
private inline fun indexFromOffset(offset: Int, strides: IntArray, nDim: Int): IntArray {
|
||||||
|
val res = IntArray(nDim)
|
||||||
|
var current = offset
|
||||||
|
var strideIndex = 0
|
||||||
|
|
||||||
|
while (strideIndex < nDim) {
|
||||||
|
res[strideIndex] = (current / strides[strideIndex])
|
||||||
|
current %= strides[strideIndex]
|
||||||
|
strideIndex++
|
||||||
|
}
|
||||||
|
return res
|
||||||
|
}
|
@ -0,0 +1,66 @@
|
|||||||
|
package kscience.kmath.torch
|
||||||
|
|
||||||
|
import kotlinx.cinterop.*
|
||||||
|
import kscience.kmath.ctorch.*
|
||||||
|
|
||||||
|
public sealed class TorchTensorAlgebra<T, PrimitiveArrayType> constructor(
|
||||||
|
internal val scope: DeferScope
|
||||||
|
) {
|
||||||
|
internal abstract fun wrap(tensorHandle: COpaquePointer): TorchTensor<T>
|
||||||
|
public abstract fun copyFromArray(
|
||||||
|
array: PrimitiveArrayType,
|
||||||
|
shape: IntArray,
|
||||||
|
device: TorchDevice = TorchDevice.TorchCPU
|
||||||
|
): TorchTensor<T>
|
||||||
|
|
||||||
|
public abstract fun TorchTensor<T>.copyToArray(): PrimitiveArrayType
|
||||||
|
|
||||||
|
public infix fun TorchTensor<T>.dot(other: TorchTensor<T>): TorchTensor<T> =
|
||||||
|
wrap(matmul(this.tensorHandle, other.tensorHandle)!!)
|
||||||
|
|
||||||
|
public infix fun TorchTensor<T>.dotAssign(other: TorchTensor<T>): Unit {
|
||||||
|
matmul_assign(this.tensorHandle, other.tensorHandle)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public sealed class TorchTensorFieldAlgebra<T, PrimitiveArrayType>(scope: DeferScope) :
|
||||||
|
TorchTensorAlgebra<T, PrimitiveArrayType>(scope) {
|
||||||
|
public abstract fun randNormal(shape: IntArray, device: TorchDevice = TorchDevice.TorchCPU): TorchTensor<T>
|
||||||
|
public abstract fun randUniform(shape: IntArray, device: TorchDevice = TorchDevice.TorchCPU): TorchTensor<T>
|
||||||
|
}
|
||||||
|
|
||||||
|
public class TorchTensorRealAlgebra(scope: DeferScope) : TorchTensorFieldAlgebra<Double, DoubleArray>(scope) {
|
||||||
|
override fun wrap(tensorHandle: COpaquePointer): TorchTensorReal =
|
||||||
|
TorchTensorReal(scope = scope, tensorHandle = tensorHandle)
|
||||||
|
|
||||||
|
override fun TorchTensor<Double>.copyToArray(): DoubleArray =
|
||||||
|
this.elements().map { it.second }.toList().toDoubleArray()
|
||||||
|
|
||||||
|
override fun copyFromArray(
|
||||||
|
array: DoubleArray,
|
||||||
|
shape: IntArray,
|
||||||
|
device: TorchDevice
|
||||||
|
): TorchTensorReal =
|
||||||
|
TorchTensorReal(
|
||||||
|
scope = scope,
|
||||||
|
tensorHandle = copy_from_blob_double(
|
||||||
|
array.toCValues(),
|
||||||
|
shape.toCValues(),
|
||||||
|
shape.size,
|
||||||
|
device.toInt()
|
||||||
|
)!!
|
||||||
|
)
|
||||||
|
|
||||||
|
override fun randNormal(shape: IntArray, device: TorchDevice): TorchTensorReal = TorchTensorReal(
|
||||||
|
scope = scope,
|
||||||
|
tensorHandle = randn_double(shape.toCValues(), shape.size, device.toInt())!!
|
||||||
|
)
|
||||||
|
|
||||||
|
override fun randUniform(shape: IntArray, device: TorchDevice): TorchTensorReal = TorchTensorReal(
|
||||||
|
scope = scope,
|
||||||
|
tensorHandle = rand_double(shape.toCValues(), shape.size, device.toInt())!!
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
public fun <R> TorchTensorRealAlgebra(block: TorchTensorRealAlgebra.() -> R): R =
|
||||||
|
memScoped { TorchTensorRealAlgebra(this).block() }
|
@ -1,7 +1,7 @@
|
|||||||
package kscience.kmath.torch
|
package kscience.kmath.torch
|
||||||
|
|
||||||
import kotlinx.cinterop.*
|
import kotlinx.cinterop.*
|
||||||
import ctorch.*
|
import kscience.kmath.ctorch.*
|
||||||
|
|
||||||
public fun getNumThreads(): Int {
|
public fun getNumThreads(): Int {
|
||||||
return get_num_threads()
|
return get_num_threads()
|
@ -1,18 +0,0 @@
|
|||||||
package kscience.kmath.torch
|
|
||||||
|
|
||||||
import kotlinx.cinterop.*
|
|
||||||
import ctorch.*
|
|
||||||
|
|
||||||
public abstract class TorchMemoryHolder internal constructor(
|
|
||||||
internal val scope: DeferScope,
|
|
||||||
internal var tensorHandle: COpaquePointer?
|
|
||||||
){
|
|
||||||
init {
|
|
||||||
scope.defer(::close)
|
|
||||||
}
|
|
||||||
|
|
||||||
protected fun close() {
|
|
||||||
dispose_tensor(tensorHandle)
|
|
||||||
tensorHandle = null
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,119 +0,0 @@
|
|||||||
package kscience.kmath.torch
|
|
||||||
|
|
||||||
import kscience.kmath.structures.*
|
|
||||||
|
|
||||||
import kotlinx.cinterop.*
|
|
||||||
import ctorch.*
|
|
||||||
|
|
||||||
public sealed class TorchTensor<T, out TorchTensorBufferImpl : TorchTensorBuffer<T>> :
|
|
||||||
MutableNDBufferTrait<T, TorchTensorBufferImpl, TorchTensorStrides>() {
|
|
||||||
|
|
||||||
public fun asBuffer(): MutableBuffer<T> = buffer
|
|
||||||
|
|
||||||
public companion object {
|
|
||||||
public fun copyFromFloatArray(scope: DeferScope, array: FloatArray, shape: IntArray): TorchTensorFloat {
|
|
||||||
val tensorHandle: COpaquePointer = copy_from_blob_float(
|
|
||||||
array.toCValues(), shape.toCValues(), shape.size
|
|
||||||
)!!
|
|
||||||
return TorchTensorFloat(
|
|
||||||
scope = scope,
|
|
||||||
tensorHandle = tensorHandle,
|
|
||||||
strides = populateStridesFromNative(tensorHandle, rawShape = shape)
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
public fun copyFromIntArray(scope: DeferScope, array: IntArray, shape: IntArray): TorchTensorInt {
|
|
||||||
val tensorHandle: COpaquePointer = copy_from_blob_int(
|
|
||||||
array.toCValues(), shape.toCValues(), shape.size
|
|
||||||
)!!
|
|
||||||
return TorchTensorInt(
|
|
||||||
scope = scope,
|
|
||||||
tensorHandle = tensorHandle,
|
|
||||||
strides = populateStridesFromNative(tensorHandle, rawShape = shape)
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
public fun copyFromFloatArrayToGPU(
|
|
||||||
scope: DeferScope,
|
|
||||||
array: FloatArray,
|
|
||||||
shape: IntArray,
|
|
||||||
device: Int
|
|
||||||
): TorchTensorFloatGPU {
|
|
||||||
val tensorHandle: COpaquePointer = copy_from_blob_to_gpu_float(
|
|
||||||
array.toCValues(), shape.toCValues(), shape.size, device
|
|
||||||
)!!
|
|
||||||
return TorchTensorFloatGPU(
|
|
||||||
scope = scope,
|
|
||||||
tensorHandle = tensorHandle,
|
|
||||||
strides = populateStridesFromNative(tensorHandle, rawShape = shape)
|
|
||||||
)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
override fun toString(): String {
|
|
||||||
val nativeStringRepresentation: CPointer<ByteVar> = tensor_to_string(buffer.tensorHandle!!)!!
|
|
||||||
val stringRepresentation = nativeStringRepresentation.toKString()
|
|
||||||
dispose_char(nativeStringRepresentation)
|
|
||||||
return stringRepresentation
|
|
||||||
}
|
|
||||||
|
|
||||||
protected abstract fun wrap(
|
|
||||||
outScope: DeferScope,
|
|
||||||
outTensorHandle: COpaquePointer,
|
|
||||||
outStrides: TorchTensorStrides
|
|
||||||
): TorchTensor<T, TorchTensorBufferImpl>
|
|
||||||
|
|
||||||
public fun copy(): TorchTensor<T, TorchTensorBufferImpl> = wrap(
|
|
||||||
outScope = buffer.scope,
|
|
||||||
outTensorHandle = copy_tensor(buffer.tensorHandle!!)!!,
|
|
||||||
outStrides = strides
|
|
||||||
)
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
public class TorchTensorFloat internal constructor(
|
|
||||||
scope: DeferScope,
|
|
||||||
tensorHandle: COpaquePointer,
|
|
||||||
override val strides: TorchTensorStrides
|
|
||||||
) : TorchTensor<Float, TorchTensorBufferFloat>() {
|
|
||||||
override val buffer: TorchTensorBufferFloat = TorchTensorBufferFloat(scope, tensorHandle)
|
|
||||||
override fun wrap(
|
|
||||||
outScope: DeferScope,
|
|
||||||
outTensorHandle: COpaquePointer,
|
|
||||||
outStrides: TorchTensorStrides
|
|
||||||
): TorchTensorFloat = TorchTensorFloat(
|
|
||||||
scope = outScope, tensorHandle = outTensorHandle, strides = outStrides
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
public class TorchTensorInt internal constructor(
|
|
||||||
scope: DeferScope,
|
|
||||||
tensorHandle: COpaquePointer,
|
|
||||||
override val strides: TorchTensorStrides
|
|
||||||
) : TorchTensor<Int, TorchTensorBufferInt>() {
|
|
||||||
override val buffer: TorchTensorBufferInt = TorchTensorBufferInt(scope, tensorHandle)
|
|
||||||
override fun wrap(
|
|
||||||
outScope: DeferScope,
|
|
||||||
outTensorHandle: COpaquePointer,
|
|
||||||
outStrides: TorchTensorStrides
|
|
||||||
): TorchTensorInt = TorchTensorInt(
|
|
||||||
scope = outScope, tensorHandle = outTensorHandle, strides = outStrides
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
public class TorchTensorFloatGPU internal constructor(
|
|
||||||
scope: DeferScope,
|
|
||||||
tensorHandle: COpaquePointer,
|
|
||||||
override val strides: TorchTensorStrides
|
|
||||||
) : TorchTensor<Float, TorchTensorBufferFloatGPU>() {
|
|
||||||
override val buffer: TorchTensorBufferFloatGPU = TorchTensorBufferFloatGPU(scope, tensorHandle)
|
|
||||||
override fun wrap(
|
|
||||||
outScope: DeferScope,
|
|
||||||
outTensorHandle: COpaquePointer,
|
|
||||||
outStrides: TorchTensorStrides
|
|
||||||
): TorchTensorFloatGPU =
|
|
||||||
TorchTensorFloatGPU(
|
|
||||||
scope = outScope, tensorHandle = outTensorHandle, strides = outStrides
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
@ -1,65 +0,0 @@
|
|||||||
package kscience.kmath.torch
|
|
||||||
|
|
||||||
import kotlinx.cinterop.*
|
|
||||||
import ctorch.*
|
|
||||||
|
|
||||||
|
|
||||||
public sealed class TorchTensorAlgebra<
|
|
||||||
T,
|
|
||||||
TorchTensorBufferImpl : TorchTensorBuffer<T>,
|
|
||||||
PrimitiveArrayType>
|
|
||||||
constructor(
|
|
||||||
internal val scope: DeferScope
|
|
||||||
) {
|
|
||||||
|
|
||||||
protected abstract fun wrap(
|
|
||||||
outTensorHandle: COpaquePointer,
|
|
||||||
outStrides: TorchTensorStrides
|
|
||||||
): TorchTensor<T, TorchTensorBufferImpl>
|
|
||||||
|
|
||||||
public infix fun TorchTensor<T, TorchTensorBufferImpl>.swap(other: TorchTensor<T, TorchTensorBufferImpl>): Unit {
|
|
||||||
check(this.shape contentEquals other.shape) {
|
|
||||||
"Attempt to swap tensors with different shapes"
|
|
||||||
}
|
|
||||||
this.buffer.tensorHandle = other.buffer.tensorHandle.also {
|
|
||||||
other.buffer.tensorHandle = this.buffer.tensorHandle
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
public abstract fun copyFromArray(array: PrimitiveArrayType, shape: IntArray): TorchTensor<T, TorchTensorBufferImpl>
|
|
||||||
|
|
||||||
public infix fun TorchTensor<T, TorchTensorBufferImpl>.dot(other: TorchTensor<T, TorchTensorBufferImpl>):
|
|
||||||
TorchTensor<T, TorchTensorBufferImpl> {
|
|
||||||
val resultHandle = matmul(this.buffer.tensorHandle, other.buffer.tensorHandle)!!
|
|
||||||
val strides = populateStridesFromNative(tensorHandle = resultHandle)
|
|
||||||
return wrap(resultHandle, strides)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
public sealed class TorchTensorField<T, TorchTensorBufferImpl : TorchTensorBuffer<T>, PrimitiveArrayType>
|
|
||||||
constructor(scope: DeferScope) : TorchTensorAlgebra<T, TorchTensorBufferImpl, PrimitiveArrayType>(scope) {
|
|
||||||
public abstract fun randn(shape: IntArray): TorchTensor<T, TorchTensorBufferImpl>
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
public class TorchTensorFloatAlgebra(scope: DeferScope) :
|
|
||||||
TorchTensorField<Float, TorchTensorBufferFloat, FloatArray>(scope) {
|
|
||||||
override fun wrap(
|
|
||||||
outTensorHandle: COpaquePointer,
|
|
||||||
outStrides: TorchTensorStrides
|
|
||||||
): TorchTensorFloat = TorchTensorFloat(scope = scope, tensorHandle = outTensorHandle, strides = outStrides)
|
|
||||||
|
|
||||||
override fun randn(shape: IntArray): TorchTensor<Float, TorchTensorBufferFloat> {
|
|
||||||
val tensorHandle = randn_float(shape.toCValues(), shape.size)!!
|
|
||||||
val strides = populateStridesFromNative(tensorHandle = tensorHandle, rawShape = shape)
|
|
||||||
return wrap(tensorHandle, strides)
|
|
||||||
}
|
|
||||||
|
|
||||||
override fun copyFromArray(array: FloatArray, shape: IntArray): TorchTensorFloat =
|
|
||||||
TorchTensor.copyFromFloatArray(scope, array, shape)
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
public fun <R> TorchTensorFloatAlgebra(block: TorchTensorFloatAlgebra.() -> R): R =
|
|
||||||
memScoped { TorchTensorFloatAlgebra(this).block() }
|
|
@ -1,98 +0,0 @@
|
|||||||
package kscience.kmath.torch
|
|
||||||
|
|
||||||
import kscience.kmath.structures.MutableBuffer
|
|
||||||
|
|
||||||
import kotlinx.cinterop.*
|
|
||||||
import ctorch.*
|
|
||||||
|
|
||||||
public sealed class TorchTensorBuffer<T> constructor(
|
|
||||||
scope: DeferScope,
|
|
||||||
tensorHandle: COpaquePointer?
|
|
||||||
) : MutableBuffer<T>, TorchMemoryHolder(scope, tensorHandle) {
|
|
||||||
|
|
||||||
override val size: Int
|
|
||||||
get(){
|
|
||||||
return get_numel(tensorHandle!!)
|
|
||||||
}
|
|
||||||
|
|
||||||
internal abstract fun wrap(outScope: DeferScope, outTensorHandle: COpaquePointer): TorchTensorBuffer<T>
|
|
||||||
|
|
||||||
override fun copy(): TorchTensorBuffer<T> = wrap(
|
|
||||||
outScope = scope,
|
|
||||||
outTensorHandle = copy_tensor(tensorHandle!!)!!
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
public class TorchTensorBufferFloat internal constructor(
|
|
||||||
scope: DeferScope,
|
|
||||||
tensorHandle: COpaquePointer
|
|
||||||
) : TorchTensorBuffer<Float>(scope, tensorHandle) {
|
|
||||||
|
|
||||||
private val tensorData: CPointer<FloatVar>
|
|
||||||
get(){
|
|
||||||
return get_data_float(tensorHandle!!)!!
|
|
||||||
}
|
|
||||||
|
|
||||||
override operator fun get(index: Int): Float = tensorData[index]
|
|
||||||
|
|
||||||
override operator fun set(index: Int, value: Float) {
|
|
||||||
tensorData[index] = value
|
|
||||||
}
|
|
||||||
|
|
||||||
override operator fun iterator(): Iterator<Float> = (1..size).map { tensorData[it - 1] }.iterator()
|
|
||||||
|
|
||||||
override fun wrap(outScope: DeferScope, outTensorHandle: COpaquePointer) = TorchTensorBufferFloat(
|
|
||||||
scope = outScope,
|
|
||||||
tensorHandle = outTensorHandle
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
public class TorchTensorBufferInt internal constructor(
|
|
||||||
scope: DeferScope,
|
|
||||||
tensorHandle: COpaquePointer
|
|
||||||
) : TorchTensorBuffer<Int>(scope, tensorHandle) {
|
|
||||||
|
|
||||||
private val tensorData: CPointer<IntVar>
|
|
||||||
get(){
|
|
||||||
return get_data_int(tensorHandle!!)!!
|
|
||||||
}
|
|
||||||
|
|
||||||
override operator fun get(index: Int): Int = tensorData[index]
|
|
||||||
|
|
||||||
override operator fun set(index: Int, value: Int) {
|
|
||||||
tensorData[index] = value
|
|
||||||
}
|
|
||||||
|
|
||||||
override operator fun iterator(): Iterator<Int> = (1..size).map { tensorData[it - 1] }.iterator()
|
|
||||||
|
|
||||||
override fun wrap(outScope: DeferScope, outTensorHandle: COpaquePointer) = TorchTensorBufferInt(
|
|
||||||
scope = outScope,
|
|
||||||
tensorHandle = outTensorHandle
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
public class TorchTensorBufferFloatGPU internal constructor(
|
|
||||||
scope: DeferScope,
|
|
||||||
tensorHandle: COpaquePointer
|
|
||||||
) : TorchTensorBuffer<Float>(scope, tensorHandle) {
|
|
||||||
|
|
||||||
override operator fun get(index: Int): Float = get_at_offset_float(tensorHandle!!, index)
|
|
||||||
|
|
||||||
override operator fun set(index: Int, value: Float) {
|
|
||||||
set_at_offset_float(tensorHandle!!, index, value)
|
|
||||||
}
|
|
||||||
|
|
||||||
override operator fun iterator(): Iterator<Float> {
|
|
||||||
val cpuCopy = copy_to_cpu(tensorHandle!!)!!
|
|
||||||
val tensorCpuData = get_data_float(cpuCopy)!!
|
|
||||||
val iteratorResult = (1..size).map { tensorCpuData[it - 1] }.iterator()
|
|
||||||
dispose_tensor(cpuCopy)
|
|
||||||
return iteratorResult
|
|
||||||
}
|
|
||||||
|
|
||||||
override fun wrap(outScope: DeferScope, outTensorHandle: COpaquePointer) = TorchTensorBufferFloatGPU(
|
|
||||||
scope = outScope,
|
|
||||||
tensorHandle = outTensorHandle
|
|
||||||
)
|
|
||||||
}
|
|
@ -1,55 +0,0 @@
|
|||||||
package kscience.kmath.torch
|
|
||||||
|
|
||||||
import kscience.kmath.structures.Strides
|
|
||||||
|
|
||||||
import kotlinx.cinterop.*
|
|
||||||
import ctorch.*
|
|
||||||
|
|
||||||
public class TorchTensorStrides internal constructor(
|
|
||||||
override val shape: IntArray,
|
|
||||||
override val strides: IntArray,
|
|
||||||
override val linearSize: Int
|
|
||||||
) : Strides {
|
|
||||||
override fun index(offset: Int): IntArray {
|
|
||||||
val nDim = shape.size
|
|
||||||
val res = IntArray(nDim)
|
|
||||||
var current = offset
|
|
||||||
var strideIndex = 0
|
|
||||||
|
|
||||||
while (strideIndex < nDim) {
|
|
||||||
res[strideIndex] = (current / strides[strideIndex])
|
|
||||||
current %= strides[strideIndex]
|
|
||||||
strideIndex++
|
|
||||||
}
|
|
||||||
return res
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
private inline fun intPointerToArrayAndClean(ptr: CPointer<IntVar>, nDim: Int): IntArray {
|
|
||||||
val res: IntArray = (1 .. nDim).map{ptr[it-1]}.toIntArray()
|
|
||||||
dispose_int_array(ptr)
|
|
||||||
return res
|
|
||||||
}
|
|
||||||
|
|
||||||
private inline fun getShapeFromNative(tensorHandle: COpaquePointer, nDim: Int): IntArray{
|
|
||||||
return intPointerToArrayAndClean(get_shape(tensorHandle)!!, nDim)
|
|
||||||
}
|
|
||||||
|
|
||||||
private inline fun getStridesFromNative(tensorHandle: COpaquePointer, nDim: Int): IntArray{
|
|
||||||
return intPointerToArrayAndClean(get_strides(tensorHandle)!!, nDim)
|
|
||||||
}
|
|
||||||
|
|
||||||
internal inline fun populateStridesFromNative(
|
|
||||||
tensorHandle: COpaquePointer,
|
|
||||||
rawShape: IntArray? = null,
|
|
||||||
rawStrides: IntArray? = null,
|
|
||||||
rawLinearSize: Int? = null
|
|
||||||
): TorchTensorStrides {
|
|
||||||
val nDim = rawShape?.size?: rawStrides?.size?: get_dim(tensorHandle)
|
|
||||||
return TorchTensorStrides(
|
|
||||||
shape = rawShape?: getShapeFromNative(tensorHandle, nDim),
|
|
||||||
strides = rawStrides?: getStridesFromNative(tensorHandle, nDim),
|
|
||||||
linearSize = rawLinearSize?: get_numel(tensorHandle)
|
|
||||||
)
|
|
||||||
}
|
|
@ -1,45 +1,22 @@
|
|||||||
package kscience.kmath.torch
|
package kscience.kmath.torch
|
||||||
|
|
||||||
import kscience.kmath.structures.asBuffer
|
|
||||||
|
|
||||||
import kotlinx.cinterop.memScoped
|
|
||||||
import kotlin.test.*
|
import kotlin.test.*
|
||||||
|
|
||||||
|
internal fun testingCopyFromArray(device: TorchDevice = TorchDevice.TorchCPU): Unit {
|
||||||
|
TorchTensorRealAlgebra {
|
||||||
|
val array = (1..24).map { 10.0 * it * it }.toDoubleArray()
|
||||||
|
val shape = intArrayOf(2, 3, 4)
|
||||||
|
val tensor = copyFromArray(array, shape = shape, device = device)
|
||||||
|
val copyOfTensor = tensor.copy()
|
||||||
|
tensor[intArrayOf(0, 0)] = 0.1
|
||||||
|
assertTrue(copyOfTensor.copyToArray() contentEquals array)
|
||||||
|
assertEquals(0.1, tensor[intArrayOf(0, 0)])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
internal class TestTorchTensor {
|
|
||||||
|
class TestTorchTensor {
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
fun intTensorLayout() = memScoped {
|
fun testCopyFromArray() = testingCopyFromArray()
|
||||||
val array = (1..24).toList().toIntArray()
|
|
||||||
val shape = intArrayOf(4, 6)
|
|
||||||
val tensor = TorchTensor.copyFromIntArray(scope = this, array = array, shape = shape)
|
|
||||||
tensor.elements().forEach {
|
|
||||||
assertEquals(tensor[it.first], it.second)
|
|
||||||
}
|
|
||||||
assertTrue(tensor.asBuffer().contentEquals(array.asBuffer()))
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
fun floatTensorLayout() = memScoped {
|
|
||||||
val array = (1..10).map { it + 50f }.toList().toFloatArray()
|
|
||||||
val shape = intArrayOf(10)
|
|
||||||
val tensor = TorchTensor.copyFromFloatArray(this, array, shape)
|
|
||||||
tensor.elements().forEach {
|
|
||||||
assertEquals(tensor[it.first], it.second)
|
|
||||||
}
|
|
||||||
assertTrue(tensor.asBuffer().contentEquals(array.asBuffer()))
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
fun mutableStructure() = memScoped {
|
|
||||||
val array = (1..10).map { 1f * it }.toList().toFloatArray()
|
|
||||||
val shape = intArrayOf(10)
|
|
||||||
val tensor = TorchTensor.copyFromFloatArray(this, array, shape)
|
|
||||||
val tensorCopy = tensor.copy()
|
|
||||||
|
|
||||||
tensor[intArrayOf(0)] = 99f
|
|
||||||
assertEquals(99f, tensor[intArrayOf(0)])
|
|
||||||
assertEquals(1f, tensorCopy[intArrayOf(0)])
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
}
|
@ -1,36 +1,52 @@
|
|||||||
package kscience.kmath.torch
|
package kscience.kmath.torch
|
||||||
|
|
||||||
|
import kscience.kmath.linear.RealMatrixContext
|
||||||
|
import kscience.kmath.operations.invoke
|
||||||
|
import kscience.kmath.structures.Matrix
|
||||||
|
import kotlin.math.abs
|
||||||
import kotlin.test.*
|
import kotlin.test.*
|
||||||
import kotlin.time.measureTime
|
|
||||||
|
|
||||||
|
internal fun testingScalarProduct(device: TorchDevice = TorchDevice.TorchCPU): Unit {
|
||||||
|
TorchTensorRealAlgebra {
|
||||||
|
val lhs = randUniform(shape = intArrayOf(10), device = device)
|
||||||
|
val rhs = randUniform(shape = intArrayOf(10), device = device)
|
||||||
|
val product = lhs dot rhs
|
||||||
|
var expected = 0.0
|
||||||
|
lhs.elements().forEach {
|
||||||
|
expected += it.second * rhs[it.first]
|
||||||
|
}
|
||||||
|
assertTrue(abs(expected - product.value()) < TOLERANCE)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
class TestTorchTensorAlgebra {
|
internal fun testingMatrixMultiplication(device: TorchDevice = TorchDevice.TorchCPU): Unit {
|
||||||
|
TorchTensorRealAlgebra {
|
||||||
|
setSeed(SEED)
|
||||||
|
|
||||||
|
val lhsTensor = randNormal(shape = intArrayOf(20, 20), device = device)
|
||||||
|
val rhsTensor = randNormal(shape = intArrayOf(20, 20), device = device)
|
||||||
|
val product = lhsTensor dot rhsTensor
|
||||||
|
|
||||||
|
val expected: Matrix<Double> = RealMatrixContext {
|
||||||
|
val lhs = produce(20, 20) { i, j -> lhsTensor[intArrayOf(i, j)] }
|
||||||
|
val rhs = produce(20, 20) { i, j -> rhsTensor[intArrayOf(i, j)] }
|
||||||
|
lhs dot rhs
|
||||||
|
}
|
||||||
|
|
||||||
|
var error: Double = 0.0
|
||||||
|
product.elements().forEach {
|
||||||
|
error += abs(expected[it.first] - it.second)
|
||||||
|
}
|
||||||
|
assertTrue(error < TOLERANCE)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
internal class TestTorchTensorAlgebra {
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
fun swappingTensors() = TorchTensorFloatAlgebra {
|
fun testScalarProduct() = testingScalarProduct()
|
||||||
val tensorA = copyFromArray(floatArrayOf(1f, 2f, 3f), intArrayOf(3))
|
|
||||||
val tensorB = tensorA.copy()
|
|
||||||
val tensorC = copyFromArray(floatArrayOf(4f, 5f, 6f), intArrayOf(3))
|
|
||||||
tensorA swap tensorC
|
|
||||||
assertTrue(tensorB.asBuffer().contentEquals(tensorC.asBuffer()))
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
fun dotOperation() = TorchTensorFloatAlgebra {
|
fun testMatrixMultiplication() = testingMatrixMultiplication()
|
||||||
setSeed(987654)
|
|
||||||
var tensorA = randn(intArrayOf(1000, 1000))
|
|
||||||
val tensorB = randn(intArrayOf(1000, 1000))
|
|
||||||
measureTime {
|
|
||||||
repeat(100) {
|
|
||||||
TorchTensorFloatAlgebra {
|
|
||||||
tensorA swap (tensorA dot tensorB)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}.also(::println)
|
|
||||||
assertTrue(tensorA.shape contentEquals tensorB.shape)
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3,19 +3,31 @@ package kscience.kmath.torch
|
|||||||
import kotlin.test.*
|
import kotlin.test.*
|
||||||
|
|
||||||
|
|
||||||
|
internal val SEED = 987654
|
||||||
|
internal val TOLERANCE = 1e-6
|
||||||
|
|
||||||
|
internal fun testingSetSeed(device: TorchDevice = TorchDevice.TorchCPU): Unit {
|
||||||
|
TorchTensorRealAlgebra {
|
||||||
|
setSeed(SEED)
|
||||||
|
val normal = randNormal(IntArray(0), device = device).value()
|
||||||
|
val uniform = randUniform(IntArray(0), device = device).value()
|
||||||
|
setSeed(SEED)
|
||||||
|
val nextNormal = randNormal(IntArray(0), device = device).value()
|
||||||
|
val nextUniform = randUniform(IntArray(0), device = device).value()
|
||||||
|
assertEquals(normal, nextNormal)
|
||||||
|
assertEquals(uniform, nextUniform)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
internal class TestUtils {
|
internal class TestUtils {
|
||||||
@Test
|
@Test
|
||||||
fun settingTorchThreadsCount() {
|
fun testSetNumThreads() {
|
||||||
val numThreads = 2
|
val numThreads = 2
|
||||||
setNumThreads(numThreads)
|
setNumThreads(numThreads)
|
||||||
assertEquals(numThreads, getNumThreads())
|
assertEquals(numThreads, getNumThreads())
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
fun seedSetting() = TorchTensorFloatAlgebra {
|
fun testSetSeed() = testingSetSeed()
|
||||||
setSeed(987654)
|
|
||||||
val tensorA = randn(intArrayOf(2,3))
|
|
||||||
setSeed(987654)
|
|
||||||
val tensorB = randn(intArrayOf(2,3))
|
|
||||||
assertTrue(tensorA.asBuffer().contentEquals(tensorB.asBuffer()))
|
|
||||||
}
|
|
||||||
}
|
}
|
Loading…
Reference in New Issue
Block a user