From a229aaa6a480bab73df78d16aaddf24103867c94 Mon Sep 17 00:00:00 2001 From: rgrit91 Date: Tue, 29 Dec 2020 22:42:33 +0000 Subject: [PATCH] Buffer protocol for torch tensors --- .gitignore | 1 + .../kscience/kmath/structures/NDStructure.kt | 103 +++++++----- kmath-torch/README.md | 95 ++++++++++++ kmath-torch/build.gradle.kts | 146 ++++++++++++++++++ kmath-torch/ctorch/CMakeLists.txt | 25 +++ kmath-torch/ctorch/include/ctorch.h | 48 ++++++ kmath-torch/ctorch/include/utils.hh | 56 +++++++ kmath-torch/ctorch/src/ctorch.cc | 110 +++++++++++++ .../src/nativeInterop/cinterop/libctorch.def | 2 + .../kscience/kmath/torch/TorchTensor.kt | 52 +++++++ .../kscience/kmath/torch/TorchTensorBuffer.kt | 67 ++++++++ .../kmath/torch/TorchTensorStrides.kt | 55 +++++++ .../kotlin/kscience/kmath/torch/Utils.kt | 20 +++ .../kscience/kmath/torch/TestTorchTensor.kt | 33 ++++ .../kotlin/kscience/kmath/torch/TestUtils.kt | 19 +++ settings.gradle.kts | 4 + 16 files changed, 796 insertions(+), 40 deletions(-) create mode 100644 kmath-torch/README.md create mode 100644 kmath-torch/build.gradle.kts create mode 100644 kmath-torch/ctorch/CMakeLists.txt create mode 100644 kmath-torch/ctorch/include/ctorch.h create mode 100644 kmath-torch/ctorch/include/utils.hh create mode 100644 kmath-torch/ctorch/src/ctorch.cc create mode 100644 kmath-torch/src/nativeInterop/cinterop/libctorch.def create mode 100644 kmath-torch/src/nativeMain/kotlin/kscience/kmath/torch/TorchTensor.kt create mode 100644 kmath-torch/src/nativeMain/kotlin/kscience/kmath/torch/TorchTensorBuffer.kt create mode 100644 kmath-torch/src/nativeMain/kotlin/kscience/kmath/torch/TorchTensorStrides.kt create mode 100644 kmath-torch/src/nativeMain/kotlin/kscience/kmath/torch/Utils.kt create mode 100644 kmath-torch/src/nativeTest/kotlin/kscience/kmath/torch/TestTorchTensor.kt create mode 100644 kmath-torch/src/nativeTest/kotlin/kscience/kmath/torch/TestUtils.kt diff --git a/.gitignore b/.gitignore index bade7f08c..ea8e65fb4 100644 --- a/.gitignore +++ b/.gitignore @@ -2,6 +2,7 @@ build/ out/ .idea/ +.vscode/ # Avoid ignoring Gradle wrapper jar file (.jar files are usually ignored) !gradle-wrapper.jar diff --git a/kmath-core/src/commonMain/kotlin/kscience/kmath/structures/NDStructure.kt b/kmath-core/src/commonMain/kotlin/kscience/kmath/structures/NDStructure.kt index 08160adf4..fe580b031 100644 --- a/kmath-core/src/commonMain/kotlin/kscience/kmath/structures/NDStructure.kt +++ b/kmath-core/src/commonMain/kotlin/kscience/kmath/structures/NDStructure.kt @@ -148,23 +148,28 @@ public interface Strides { /** * Array strides */ - public val strides: List - - /** - * Get linear index from multidimensional index - */ - public fun offset(index: IntArray): Int - - /** - * Get multidimensional from linear - */ - public fun index(offset: Int): IntArray + public val strides: IntArray /** * The size of linear buffer to accommodate all elements of ND-structure corresponding to strides */ public val linearSize: Int + /** + * Get linear index from multidimensional index + */ + public fun offset(index: IntArray): Int = index.mapIndexed { i, value -> + if (value < 0 || value >= this.shape[i]) + throw IndexOutOfBoundsException("Index $value out of shape bounds: (0,${this.shape[i]})") + + value * strides[i] + }.sum() + + /** + * Get multidimensional from linear + */ + public fun index(offset: Int): IntArray + // TODO introduce a fast way to calculate index of the next element? /** @@ -183,7 +188,7 @@ public class DefaultStrides private constructor(override val shape: IntArray) : /** * Strides for memory access */ - override val strides: List by lazy { + override val strides: IntArray by lazy { sequence { var current = 1 yield(1) @@ -192,16 +197,9 @@ public class DefaultStrides private constructor(override val shape: IntArray) : current *= it yield(current) } - }.toList() + }.toList().toIntArray() } - override fun offset(index: IntArray): Int = index.mapIndexed { i, value -> - if (value < 0 || value >= this.shape[i]) - throw IndexOutOfBoundsException("Index $value out of shape bounds: (0,${this.shape[i]})") - - value * strides[i] - }.sum() - override fun index(offset: Int): IntArray { val res = IntArray(shape.size) var current = offset @@ -238,20 +236,22 @@ public class DefaultStrides private constructor(override val shape: IntArray) : } /** - * Represents [NDStructure] over [Buffer]. + * Trait for [NDStructure] over [Buffer]. * - * @param T the type of items. + * @param T the type of items + * @param BufferImpl implementation of [Buffer]. */ -public abstract class NDBuffer : NDStructure { +public abstract class NDBufferTrait, out StridesImpl: Strides> : + NDStructure { /** * The underlying buffer. */ - public abstract val buffer: Buffer + public abstract val buffer: BufferImpl /** * The strides to access elements of [Buffer] by linear indices. */ - public abstract val strides: Strides + public abstract val strides: StridesImpl override operator fun get(index: IntArray): T = buffer[strides.offset(index)] @@ -259,8 +259,8 @@ public abstract class NDBuffer : NDStructure { override fun elements(): Sequence> = strides.indices().map { it to this[it] } - override fun equals(other: Any?): Boolean { - return NDStructure.equals(this, other as? NDStructure<*> ?: return false) + public fun checkStridesBufferCompatibility(): Unit = require(strides.linearSize == buffer.size) { + "Expected buffer side of ${strides.linearSize}, but found ${buffer.size}" } override fun hashCode(): Int { @@ -269,6 +269,10 @@ public abstract class NDBuffer : NDStructure { return result } + override fun equals(other: Any?): Boolean { + return NDStructure.equals(this, other as? NDStructure<*> ?: return false) + } + override fun toString(): String { val bufferRepr: String = when (shape.size) { 1 -> buffer.asSequence().joinToString(prefix = "[", postfix = "]", separator = ", ") @@ -282,10 +286,36 @@ public abstract class NDBuffer : NDStructure { } return "NDBuffer(shape=${shape.contentToString()}, buffer=$bufferRepr)" } - - } +/** + * Trait for [MutableNDStructure] over [MutableBuffer]. + * + * @param T the type of items + * @param MutableBufferImpl implementation of [MutableBuffer]. + */ +public abstract class MutableNDBufferTrait, out StridesImpl: Strides> : + NDBufferTrait(), MutableNDStructure { + override fun hashCode(): Int = 0 + override fun equals(other: Any?): Boolean = false + override operator fun set(index: IntArray, value: T): Unit = + buffer.set(strides.offset(index), value) +} + +/** + * Default representation of [NDStructure] over [Buffer]. + * + * @param T the type of items. + */ +public abstract class NDBuffer : NDBufferTrait, Strides>() + +/** + * Default representation of [MutableNDStructure] over [MutableBuffer]. + * + * @param T the type of items. + */ +public abstract class MutableNDBuffer : MutableNDBufferTrait, Strides>() + /** * Boxing generic [NDStructure] */ @@ -294,9 +324,7 @@ public class BufferNDStructure( override val buffer: Buffer, ) : NDBuffer() { init { - if (strides.linearSize != buffer.size) { - error("Expected buffer side of ${strides.linearSize}, but found ${buffer.size}") - } + checkStridesBufferCompatibility() } } @@ -316,20 +344,15 @@ public inline fun NDStructure.mapToBuffer( } /** - * Mutable ND buffer based on linear [MutableBuffer]. + * Boxing generic [MutableNDStructure]. */ public class MutableBufferNDStructure( override val strides: Strides, override val buffer: MutableBuffer, -) : NDBuffer(), MutableNDStructure { - +) : MutableNDBuffer() { init { - require(strides.linearSize == buffer.size) { - "Expected buffer side of ${strides.linearSize}, but found ${buffer.size}" - } + checkStridesBufferCompatibility() } - - override operator fun set(index: IntArray, value: T): Unit = buffer.set(strides.offset(index), value) } public inline fun NDStructure.combine( diff --git a/kmath-torch/README.md b/kmath-torch/README.md new file mode 100644 index 000000000..6e0368036 --- /dev/null +++ b/kmath-torch/README.md @@ -0,0 +1,95 @@ +# LibTorch extension (`kmath-torch`) + +This is a `Kotlin/Native` module, with only `linuxX64` supported so far. This library wraps some of the [PyTorch C++ API](https://pytorch.org/cppdocs), focusing on integrating `Aten` & `Autograd` with `KMath`. + +## Installation +To install the library, you have to build & publish locally `kmath-core`, `kmath-memory` with `kmath-torch`: +``` +./gradlew -q :kmath-core:publishToMavenLocal :kmath-memory:publishToMavenLocal :kmath-torch:publishToMavenLocal +``` + +This builds `ctorch`, a C wrapper for `LibTorch` placed inside: + +`~/.konan/third-party/kmath-torch-0.2.0-dev-4/cpp-build` + +You will have to link against it in your own project. Here is an example of build script for a standalone application: +```kotlin +//build.gradle.kts +plugins { + id("ru.mipt.npm.mpp") +} + +repositories { + jcenter() + mavenLocal() +} + +val home = System.getProperty("user.home") +val kver = "0.2.0-dev-4" +val cppBuildDir = "$home/.konan/third-party/kmath-torch-$kver/cpp-build" + +kotlin { + explicitApiWarning() + + val nativeTarget = linuxX64("your.app") + nativeTarget.apply { + binaries { + executable { + entryPoint = "your.app.main" + } + all { + linkerOpts( + "-L$cppBuildDir", + "-Wl,-rpath=$cppBuildDir", + "-lctorch" + ) + } + } + } + + val main by nativeTarget.compilations.getting + + sourceSets { + val nativeMain by creating { + dependencies { + implementation("kscience.kmath:kmath-torch:$kver") + } + } + main.defaultSourceSet.dependsOn(nativeMain) + } +} +``` +```kotlin +//settings.gradle.kts +pluginManagement { + repositories { + gradlePluginPortal() + jcenter() + maven("https://dl.bintray.com/mipt-npm/dev") + } + plugins { + id("ru.mipt.npm.mpp") version "0.7.1" + kotlin("jvm") version "1.4.21" + } +} +``` + +## Usage + +Tensors implement the buffer protocol over `MutableNDStructure`. They can only be instantiated through provided factory methods and require scoping: +```kotlin +memScoped { + val intTensor: TorchTensorInt = TorchTensor.copyFromIntArray( + scope = this, + array = intArrayOf(7,8,9,2,6,5), + shape = intArrayOf(3,2)) + println(intTensor) + + val floatTensor: TorchTensorFloat = TorchTensor.copyFromFloatArray( + scope = this, + array = floatArrayOf(7f,8.9f,2.6f,5.6f), + shape = intArrayOf(4)) + println(intTensor) +} +``` + diff --git a/kmath-torch/build.gradle.kts b/kmath-torch/build.gradle.kts new file mode 100644 index 000000000..08f74fd88 --- /dev/null +++ b/kmath-torch/build.gradle.kts @@ -0,0 +1,146 @@ +import de.undercouch.gradle.tasks.download.Download +import org.jetbrains.kotlin.gradle.plugin.mpp.KotlinNativeTarget + + +plugins { + id("ru.mipt.npm.mpp") + id("de.undercouch.download") +} + + +val home = System.getProperty("user.home") +val thirdPartyDir = "$home/.konan/third-party/kmath-torch-${project.property("version")}" +val cppBuildDir = "$thirdPartyDir/cpp-build" + +val cmakeArchive = "cmake-3.19.2-Linux-x86_64" +val torchArchive = "libtorch" + +val cmakeCmd = "$thirdPartyDir/$cmakeArchive/bin/cmake" +val ninjaCmd = "$thirdPartyDir/ninja" + +val downloadCMake by tasks.registering(Download::class) { + val tarFile = "$cmakeArchive.tar.gz" + src("https://github.com/Kitware/CMake/releases/download/v3.19.2/$tarFile") + dest(File(thirdPartyDir, tarFile)) + overwrite(false) +} + +val downloadNinja by tasks.registering(Download::class) { + src("https://github.com/ninja-build/ninja/releases/download/v1.10.2/ninja-linux.zip") + dest(File(thirdPartyDir, "ninja-linux.zip")) + overwrite(false) +} + +val downloadTorch by tasks.registering(Download::class) { + val zipFile = "$torchArchive-cxx11-abi-shared-with-deps-1.7.1%2Bcu110.zip" + src("https://download.pytorch.org/libtorch/cu110/$zipFile") + dest(File(thirdPartyDir, "$torchArchive.zip")) + overwrite(false) +} + +val extractCMake by tasks.registering(Copy::class) { + dependsOn(downloadCMake) + from(tarTree(resources.gzip(downloadCMake.get().dest))) + into(thirdPartyDir) +} + +val extractTorch by tasks.registering(Copy::class) { + dependsOn(downloadTorch) + from(zipTree(downloadTorch.get().dest)) + into(thirdPartyDir) +} + +val extractNinja by tasks.registering(Copy::class) { + dependsOn(downloadNinja) + from(zipTree(downloadNinja.get().dest)) + into(thirdPartyDir) +} + +val configureCpp by tasks.registering { + dependsOn(extractCMake) + dependsOn(extractNinja) + dependsOn(extractTorch) + onlyIf { !file(cppBuildDir).exists() } + doLast { + exec { + workingDir(thirdPartyDir) + commandLine("mkdir", "-p", cppBuildDir) + } + exec { + workingDir(cppBuildDir) + commandLine( + cmakeCmd, + projectDir.resolve("ctorch"), + "-GNinja", + "-DCMAKE_MAKE_PROGRAM=$ninjaCmd", + "-DCMAKE_PREFIX_PATH=$thirdPartyDir/$torchArchive", + "-DCMAKE_BUILD_TYPE=Release" + ) + } + } +} + +val cleanCppBuild by tasks.registering { + onlyIf { file(cppBuildDir).exists() } + doLast { + exec { + workingDir(thirdPartyDir) + commandLine("rm", "-rf", cppBuildDir) + } + } +} + +val buildCpp by tasks.registering { + dependsOn(configureCpp) + doLast { + exec { + workingDir(cppBuildDir) + commandLine(cmakeCmd, "--build", ".", "--config", "Release") + } + } +} + +kotlin { + explicitApiWarning() + + val nativeTarget = linuxX64("torch") + nativeTarget.apply { + binaries { + all { + linkerOpts( + "-L$cppBuildDir", + "-Wl,-rpath=$cppBuildDir", + "-lctorch" + ) + } + } + } + + val main by nativeTarget.compilations.getting { + cinterops { + val libctorch by creating { + includeDirs(projectDir.resolve("ctorch/include")) + } + } + } + + val test by nativeTarget.compilations.getting + + sourceSets { + val nativeMain by creating { + dependencies { + api(project(":kmath-core")) + } + } + val nativeTest by creating { + dependsOn(nativeMain) + } + + main.defaultSourceSet.dependsOn(nativeMain) + test.defaultSourceSet.dependsOn(nativeTest) + } +} + +val torch: KotlinNativeTarget by kotlin.targets +tasks[torch.compilations["main"].cinterops["libctorch"].interopProcessingTaskName] + .dependsOn(buildCpp) \ No newline at end of file diff --git a/kmath-torch/ctorch/CMakeLists.txt b/kmath-torch/ctorch/CMakeLists.txt new file mode 100644 index 000000000..8617ff418 --- /dev/null +++ b/kmath-torch/ctorch/CMakeLists.txt @@ -0,0 +1,25 @@ +cmake_minimum_required(VERSION 3.12) + +project(CTorch LANGUAGES C CXX) + +# Require C++17 +set(CMAKE_CXX_STANDARD 17) + +# Build configuration +if(NOT CMAKE_BUILD_TYPE) + set(CMAKE_BUILD_TYPE Release CACHE STRING "Build type" FORCE) +endif() +message(STATUS "Build type: ${CMAKE_BUILD_TYPE}") + +find_package(Torch REQUIRED) + +add_library(ctorch SHARED src/ctorch.cc) +target_include_directories(ctorch PRIVATE include) +target_link_libraries(ctorch PRIVATE torch) +target_compile_options(ctorch PRIVATE -Wall -Wextra -Wpedantic -O3 -fPIC) + +include(GNUInstallDirs) +set_target_properties(ctorch PROPERTIES PUBLIC_HEADER include/ctorch.h) +install(TARGETS ctorch + LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR} + PUBLIC_HEADER DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}) \ No newline at end of file diff --git a/kmath-torch/ctorch/include/ctorch.h b/kmath-torch/ctorch/include/ctorch.h new file mode 100644 index 000000000..36ec82d10 --- /dev/null +++ b/kmath-torch/ctorch/include/ctorch.h @@ -0,0 +1,48 @@ +#ifndef CTORCH +#define CTORCH + +#include + +#ifdef __cplusplus +extern "C" +{ +#endif + + typedef void *TorchTensorHandle; + + int get_num_threads(); + + void set_num_threads(int num_threads); + + bool cuda_is_available(); + + void set_seed(int seed); + + TorchTensorHandle copy_from_blob_double(double *data, int *shape, int dim); + TorchTensorHandle copy_from_blob_float(float *data, int *shape, int dim); + TorchTensorHandle copy_from_blob_long(long *data, int *shape, int dim); + TorchTensorHandle copy_from_blob_int(int *data, int *shape, int dim); + + TorchTensorHandle copy_tensor(TorchTensorHandle tensor_handle); + + double *get_data_double(TorchTensorHandle tensor_handle); + float *get_data_float(TorchTensorHandle tensor_handle); + long *get_data_long(TorchTensorHandle tensor_handle); + int *get_data_int(TorchTensorHandle tensor_handle); + + int get_numel(TorchTensorHandle tensor_handle); + int get_dim(TorchTensorHandle tensor_handle); + int *get_shape(TorchTensorHandle tensor_handle); + int *get_strides(TorchTensorHandle tensor_handle); + + char *tensor_to_string(TorchTensorHandle tensor_handle); + + void dispose_int_array(int *ptr); + void dispose_char(char *ptr); + void dispose_tensor(TorchTensorHandle tensor_handle); + +#ifdef __cplusplus +} +#endif + +#endif //CTORCH \ No newline at end of file diff --git a/kmath-torch/ctorch/include/utils.hh b/kmath-torch/ctorch/include/utils.hh new file mode 100644 index 000000000..e58981960 --- /dev/null +++ b/kmath-torch/ctorch/include/utils.hh @@ -0,0 +1,56 @@ +#include + +#include "ctorch.h" + +namespace ctorch +{ + template + inline c10::ScalarType dtype() + { + return torch::kFloat64; + } + + template <> + inline c10::ScalarType dtype() + { + return torch::kFloat32; + } + + template <> + inline c10::ScalarType dtype() + { + return torch::kInt64; + } + + template <> + inline c10::ScalarType dtype() + { + return torch::kInt32; + } + + inline torch::Tensor &cast(TorchTensorHandle tensor_handle) + { + return *static_cast(tensor_handle); + } + + template + inline torch::Tensor copy_from_blob(Dtype *data, int *shape, int dim) + { + auto shape_vec = std::vector(dim); + shape_vec.assign(shape, shape + dim); + return torch::from_blob(data, shape_vec, dtype()).clone(); + } + + template + inline int *to_dynamic_ints(IntArray arr) + { + size_t n = arr.size(); + int *res = (int *)malloc(sizeof(int) * n); + for (size_t i = 0; i < n; i++) + { + res[i] = arr[i]; + } + return res; + } + +} // namespace ctorch diff --git a/kmath-torch/ctorch/src/ctorch.cc b/kmath-torch/ctorch/src/ctorch.cc new file mode 100644 index 000000000..457c2d8c3 --- /dev/null +++ b/kmath-torch/ctorch/src/ctorch.cc @@ -0,0 +1,110 @@ +#include +#include +#include + +#include "ctorch.h" +#include "utils.hh" + +int get_num_threads() +{ + return torch::get_num_threads(); +} + +void set_num_threads(int num_threads) +{ + torch::set_num_threads(num_threads); +} + +bool cuda_is_available() +{ + return torch::cuda::is_available(); +} + +void set_seed(int seed) +{ + torch::manual_seed(seed); +} + +TorchTensorHandle copy_from_blob_double(double *data, int *shape, int dim) +{ + return new torch::Tensor(ctorch::copy_from_blob(data, shape, dim)); +} +TorchTensorHandle copy_from_blob_float(float *data, int *shape, int dim) +{ + return new torch::Tensor(ctorch::copy_from_blob(data, shape, dim)); +} +TorchTensorHandle copy_from_blob_long(long *data, int *shape, int dim) +{ + return new torch::Tensor(ctorch::copy_from_blob(data, shape, dim)); +} +TorchTensorHandle copy_from_blob_int(int *data, int *shape, int dim) +{ + return new torch::Tensor(ctorch::copy_from_blob(data, shape, dim)); +} + +TorchTensorHandle copy_tensor(TorchTensorHandle tensor_handle) +{ + return new torch::Tensor(ctorch::cast(tensor_handle).clone()); +} + +double *get_data_double(TorchTensorHandle tensor_handle) +{ + return ctorch::cast(tensor_handle).data_ptr(); +} +float *get_data_float(TorchTensorHandle tensor_handle) +{ + return ctorch::cast(tensor_handle).data_ptr(); +} +long *get_data_long(TorchTensorHandle tensor_handle) +{ + return ctorch::cast(tensor_handle).data_ptr(); +} +int *get_data_int(TorchTensorHandle tensor_handle) +{ + return ctorch::cast(tensor_handle).data_ptr(); +} + +int get_numel(TorchTensorHandle tensor_handle) +{ + return ctorch::cast(tensor_handle).numel(); +} + +int get_dim(TorchTensorHandle tensor_handle) +{ + return ctorch::cast(tensor_handle).dim(); +} + +int *get_shape(TorchTensorHandle tensor_handle) +{ + return ctorch::to_dynamic_ints(ctorch::cast(tensor_handle).sizes()); +} + +int *get_strides(TorchTensorHandle tensor_handle) +{ + return ctorch::to_dynamic_ints(ctorch::cast(tensor_handle).strides()); +} + +char *tensor_to_string(TorchTensorHandle tensor_handle) +{ + std::stringstream bufrep; + bufrep << ctorch::cast(tensor_handle); + auto rep = bufrep.str(); + char *crep = (char *)malloc(rep.length() + 1); + std::strcpy(crep, rep.c_str()); + return crep; +} + +void dispose_int_array(int *ptr) +{ + free(ptr); +} + +void dispose_char(char *ptr) +{ + free(ptr); +} + +void dispose_tensor(TorchTensorHandle tensor_handle) +{ + delete static_cast(tensor_handle); +} \ No newline at end of file diff --git a/kmath-torch/src/nativeInterop/cinterop/libctorch.def b/kmath-torch/src/nativeInterop/cinterop/libctorch.def new file mode 100644 index 000000000..638a361fc --- /dev/null +++ b/kmath-torch/src/nativeInterop/cinterop/libctorch.def @@ -0,0 +1,2 @@ +package=ctorch +headers=ctorch.h \ No newline at end of file diff --git a/kmath-torch/src/nativeMain/kotlin/kscience/kmath/torch/TorchTensor.kt b/kmath-torch/src/nativeMain/kotlin/kscience/kmath/torch/TorchTensor.kt new file mode 100644 index 000000000..71570a184 --- /dev/null +++ b/kmath-torch/src/nativeMain/kotlin/kscience/kmath/torch/TorchTensor.kt @@ -0,0 +1,52 @@ +package kscience.kmath.torch + +import kscience.kmath.structures.* + +import kotlinx.cinterop.* +import ctorch.* + +public abstract class TorchTensor> : + MutableNDBufferTrait() { + + public companion object { + public fun copyFromFloatArray(scope: DeferScope, array: FloatArray, shape: IntArray): TorchTensorFloat { + val tensorHandle: COpaquePointer = copy_from_blob_float( + array.toCValues(), shape.toCValues(), shape.size + )!! + return TorchTensorFloat(populateStridesFromNative(tensorHandle, rawShape = shape), scope, tensorHandle) + } + public fun copyFromIntArray(scope: DeferScope, array: IntArray, shape: IntArray): TorchTensorInt { + val tensorHandle: COpaquePointer = copy_from_blob_int( + array.toCValues(), shape.toCValues(), shape.size + )!! + return TorchTensorInt(populateStridesFromNative(tensorHandle, rawShape = shape), scope, tensorHandle) + } + } + + override fun toString(): String { + val nativeStringRepresentation: CPointer = tensor_to_string(buffer.tensorHandle)!! + val stringRepresentation = nativeStringRepresentation.toKString() + dispose_char(nativeStringRepresentation) + return stringRepresentation + } + +} + +public class TorchTensorFloat internal constructor( + override val strides: TorchTensorStrides, + scope: DeferScope, + tensorHandle: COpaquePointer +): TorchTensor() { + override val buffer: TorchTensorBufferFloat = TorchTensorBufferFloat(scope, tensorHandle) +} + +public class TorchTensorInt internal constructor( + override val strides: TorchTensorStrides, + scope: DeferScope, + tensorHandle: COpaquePointer +): TorchTensor() { + override val buffer: TorchTensorBufferInt = TorchTensorBufferInt(scope, tensorHandle) +} + diff --git a/kmath-torch/src/nativeMain/kotlin/kscience/kmath/torch/TorchTensorBuffer.kt b/kmath-torch/src/nativeMain/kotlin/kscience/kmath/torch/TorchTensorBuffer.kt new file mode 100644 index 000000000..c162ef451 --- /dev/null +++ b/kmath-torch/src/nativeMain/kotlin/kscience/kmath/torch/TorchTensorBuffer.kt @@ -0,0 +1,67 @@ +package kscience.kmath.torch + +import kscience.kmath.structures.MutableBuffer + +import kotlinx.cinterop.* +import ctorch.* + +public abstract class TorchTensorBuffer internal constructor( + internal val scope: DeferScope, + internal val tensorHandle: COpaquePointer +) : MutableBuffer { + init { + scope.defer(::close) + } + + internal fun close() { + dispose_tensor(tensorHandle) + } + + protected abstract val tensorData: CPointer + + override val size: Int + get() = get_numel(tensorHandle) + +} + + +public class TorchTensorBufferFloat internal constructor( + scope: DeferScope, + tensorHandle: COpaquePointer +) : TorchTensorBuffer(scope, tensorHandle) { + override val tensorData: CPointer = get_data_float(tensorHandle)!! + + override operator fun get(index: Int): Float = tensorData[index] + + override operator fun set(index: Int, value: Float) { + tensorData[index] = value + } + + override operator fun iterator(): Iterator = (1..size).map { tensorData[it - 1] }.iterator() + + override fun copy(): TorchTensorBufferFloat = TorchTensorBufferFloat( + scope = scope, + tensorHandle = copy_tensor(tensorHandle)!! + ) +} + +public class TorchTensorBufferInt internal constructor( + scope: DeferScope, + tensorHandle: COpaquePointer +) : TorchTensorBuffer(scope, tensorHandle) { + override val tensorData: CPointer = get_data_int(tensorHandle)!! + + override operator fun get(index: Int): Int = tensorData[index] + + override operator fun set(index: Int, value: Int) { + tensorData[index] = value + } + + override operator fun iterator(): Iterator = (1..size).map { tensorData[it - 1] }.iterator() + + override fun copy(): TorchTensorBufferInt = TorchTensorBufferInt( + scope = scope, + tensorHandle = copy_tensor(tensorHandle)!! + ) +} + diff --git a/kmath-torch/src/nativeMain/kotlin/kscience/kmath/torch/TorchTensorStrides.kt b/kmath-torch/src/nativeMain/kotlin/kscience/kmath/torch/TorchTensorStrides.kt new file mode 100644 index 000000000..f9602073b --- /dev/null +++ b/kmath-torch/src/nativeMain/kotlin/kscience/kmath/torch/TorchTensorStrides.kt @@ -0,0 +1,55 @@ +package kscience.kmath.torch + +import kscience.kmath.structures.Strides + +import kotlinx.cinterop.* +import ctorch.* + +public class TorchTensorStrides internal constructor( + override val shape: IntArray, + override val strides: IntArray, + override val linearSize: Int +) : Strides { + override fun index(offset: Int): IntArray { + val nDim = shape.size + val res = IntArray(nDim) + var current = offset + var strideIndex = 0 + + while (strideIndex < nDim) { + res[strideIndex] = (current / strides[strideIndex]) + current %= strides[strideIndex] + strideIndex++ + } + return res + } +} + + +private inline fun intPointerToArrayAndClean(ptr: CPointer, nDim: Int): IntArray { + val res: IntArray = (1 .. nDim).map{ptr[it-1]}.toIntArray() + dispose_int_array(ptr) + return res +} + +private inline fun getShapeFromNative(tensorHandle: COpaquePointer, nDim: Int): IntArray{ + return intPointerToArrayAndClean(get_shape(tensorHandle)!!, nDim) +} + +private inline fun getStridesFromNative(tensorHandle: COpaquePointer, nDim: Int): IntArray{ + return intPointerToArrayAndClean(get_strides(tensorHandle)!!, nDim) +} + +internal inline fun populateStridesFromNative( + tensorHandle: COpaquePointer, + rawShape: IntArray? = null, + rawStrides: IntArray? = null, + rawLinearSize: Int? = null +): TorchTensorStrides { + val nDim = rawShape?.size?: rawStrides?.size?: get_dim(tensorHandle) + return TorchTensorStrides( + shape = rawShape?: getShapeFromNative(tensorHandle, nDim), + strides = rawStrides?: getStridesFromNative(tensorHandle, nDim), + linearSize = rawLinearSize?: get_numel(tensorHandle) + ) +} \ No newline at end of file diff --git a/kmath-torch/src/nativeMain/kotlin/kscience/kmath/torch/Utils.kt b/kmath-torch/src/nativeMain/kotlin/kscience/kmath/torch/Utils.kt new file mode 100644 index 000000000..086231687 --- /dev/null +++ b/kmath-torch/src/nativeMain/kotlin/kscience/kmath/torch/Utils.kt @@ -0,0 +1,20 @@ +package kscience.kmath.torch + +import kotlinx.cinterop.* +import ctorch.* + +public fun getNumThreads(): Int { + return get_num_threads() +} + +public fun setNumThreads(numThreads: Int): Unit { + set_num_threads(numThreads) +} + +public fun cudaAvailable(): Boolean { + return cuda_is_available() +} + +public fun setSeed(seed: Int): Unit { + set_seed(seed) +} \ No newline at end of file diff --git a/kmath-torch/src/nativeTest/kotlin/kscience/kmath/torch/TestTorchTensor.kt b/kmath-torch/src/nativeTest/kotlin/kscience/kmath/torch/TestTorchTensor.kt new file mode 100644 index 000000000..06a2f6d4c --- /dev/null +++ b/kmath-torch/src/nativeTest/kotlin/kscience/kmath/torch/TestTorchTensor.kt @@ -0,0 +1,33 @@ +package kscience.kmath.torch + +import kscience.kmath.structures.asBuffer + +import kotlinx.cinterop.memScoped +import kotlin.test.* + + +internal class TestTorchTensor { + + @Test + fun intTensorLayout() = memScoped { + val array = intArrayOf(7,8,9,2,6,5) + val shape = intArrayOf(3,2) + val tensor = TorchTensor.copyFromIntArray(scope=this, array=array, shape=shape) + tensor.elements().forEach { + assertEquals(tensor[it.first], it.second) + } + assertTrue(tensor.buffer.contentEquals(array.asBuffer())) + } + + @Test + fun floatTensorLayout() = memScoped { + val array = floatArrayOf(7.5f,8.2f,9f,2.58f,6.5f,5f) + val shape = intArrayOf(2,3) + val tensor = TorchTensor.copyFromFloatArray(this, array, shape) + tensor.elements().forEach { + assertEquals(tensor[it.first], it.second) + } + assertTrue(tensor.buffer.contentEquals(array.asBuffer())) + } + +} \ No newline at end of file diff --git a/kmath-torch/src/nativeTest/kotlin/kscience/kmath/torch/TestUtils.kt b/kmath-torch/src/nativeTest/kotlin/kscience/kmath/torch/TestUtils.kt new file mode 100644 index 000000000..3a174b186 --- /dev/null +++ b/kmath-torch/src/nativeTest/kotlin/kscience/kmath/torch/TestUtils.kt @@ -0,0 +1,19 @@ +package kscience.kmath.torch + +import kotlin.test.Test +import kotlin.test.assertEquals +import kotlin.test.assertTrue + + +internal class TestUtils { + @Test + fun settingTorchThreadsCount(){ + val numThreads = 2 + setNumThreads(numThreads) + assertEquals(numThreads, getNumThreads()) + } + @Test + fun cudaAvailability(){ + assertTrue(cudaAvailable()) + } +} \ No newline at end of file diff --git a/settings.gradle.kts b/settings.gradle.kts index da33fea59..a197ca42f 100644 --- a/settings.gradle.kts +++ b/settings.gradle.kts @@ -42,3 +42,7 @@ include( ":kmath-kotlingrad", ":examples" ) + +if(System.getProperty("os.name") == "Linux"){ + include(":kmath-torch") +} \ No newline at end of file