Compare commits

...

86 Commits

Author SHA1 Message Date
Roland Grinis
0817efbc4b latest dev 2022-05-21 21:06:10 +01:00
Roland Grinis
a35bfad44a DL optimisers 2022-05-21 21:00:45 +01:00
Anastasia Golovina
3c92bfda59 add options for optimizers 2022-05-09 18:22:05 +03:00
Anastasia Golovina
b288c4ce59 add adamw, rms, adagrad, sgd optimizers 2022-04-25 22:17:07 +03:00
Roland Grinis
c53bdd38f8 comments 2022-02-21 11:52:10 +00:00
Roland Grinis
c2c8d80b40 remove performance pitfall annotation 2022-02-20 14:50:36 +00:00
Roland Grinis
f3a411f0e2 Merge branch 'dev' into feature/noa 2022-02-20 14:33:30 +00:00
Roland Grinis
08f7af6d41 sync with dev 2022-02-07 09:27:35 +00:00
Roland Grinis
4beabb3a5d Merge branch 'dev' into feature/noa 2022-02-07 09:15:50 +00:00
Roland Grinis
9ca896b608 moved to torch 1.10.2 2022-02-07 09:15:03 +00:00
Roland Grinis
705b27aa4b Merge branch 'dev' into feature/noa 2021-11-01 18:50:05 +00:00
Roland Grinis
1e7ee53c82 more updates 2021-11-01 17:59:21 +00:00
Roland Grinis
4216a43470 latest dev 2021-11-01 17:36:24 +00:00
Roland Grinis
a9da25875b fixed CPU only installation 2021-11-01 16:12:50 +00:00
Roland Grinis
40b9066dd0 minor update in docs 2021-08-25 09:09:55 +01:00
Roland Grinis
3b6e80f5b6 default to Clang for building bindings 2021-08-10 06:51:46 +01:00
Roland Grinis
84570549e2 fix build script 2021-08-03 09:38:48 +01:00
Roland Grinis
79de6700e7 update publish local instructions 2021-08-02 10:10:15 +01:00
Roland Grinis
3660c7f217 clarifying parameters 2021-08-02 09:34:44 +01:00
Roland Grinis
927916a01f clang support 2021-08-01 22:04:08 +01:00
Roland Grinis
633eb7ad4e fix error in usage docs 2021-08-01 17:44:59 +01:00
Roland Grinis
371674c9d3 slicing API changed 2021-08-01 14:57:02 +01:00
Roland Grinis
bc43afe93b update build 2021-08-01 09:19:00 +01:00
Roland Grinis
3d1a3e3b69 advanced slicing 2021-07-31 21:23:15 +01:00
Roland Grinis
c6acedf9e0 new functionality for primitive arrays 2021-07-31 18:37:42 +01:00
Roland Grinis
36bc127260 tensors and modules serialisation 2021-07-30 22:33:43 +01:00
Roland Grinis
686baa6517 typo in readme 2021-07-14 08:26:00 +01:00
Roland Grinis
3b0032cb2f more typos 2021-07-13 20:00:36 +01:00
Roland Grinis
cc4da94646 typo corrected 2021-07-13 19:58:22 +01:00
Roland Grinis
046d26b17a reference to NOA docs 2021-07-13 19:55:36 +01:00
Roland Grinis
b948ca57cd add description 2021-07-13 19:50:57 +01:00
Roland Grinis
cccff29378 deep learning example 2021-07-13 19:48:21 +01:00
Roland Grinis
0bc2c12a05 testing autograd 2021-07-13 15:12:19 +01:00
Roland Grinis
28fac22f12 testing algebra 2021-07-13 14:20:13 +01:00
Roland Grinis
ea6cd01b89 tensors testing 2021-07-13 13:56:34 +01:00
Roland Grinis
face60824d testing copying 2021-07-13 13:08:00 +01:00
Roland Grinis
d303c912d6 testing seed setting 2021-07-13 12:45:07 +01:00
Roland Grinis
a33af9ec94 fixed initial tests 2021-07-13 12:19:03 +01:00
Roland Grinis
93768ed2a7 failing tests 2021-07-13 11:54:33 +01:00
Roland Grinis
c09da54cc9 clarification 2021-07-13 09:42:40 +01:00
Roland Grinis
183d34e01e update installation instructions 2021-07-13 09:37:06 +01:00
Roland Grinis
6b3b8aa6ae dependencies fixed 2021-07-12 21:12:02 +01:00
Roland Grinis
3dd915e2fb optimiser 2021-07-12 20:39:29 +01:00
Roland Grinis
1029871047 data for jit modules 2021-07-12 17:18:02 +01:00
Roland Grinis
09923a6c22 update JNI header 2021-07-12 15:48:46 +01:00
Roland Grinis
e4300d0530 jit modules 2021-07-12 15:48:07 +01:00
Roland Grinis
1ad20cb143 readme updates 2021-07-11 19:01:50 +01:00
Roland Grinis
06bc8fecf6 LU decomp doc in NOA 2021-07-10 22:21:20 +01:00
Roland Grinis
2a29e66daa update LU docs 2021-07-10 22:15:33 +01:00
Roland Grinis
c7de0bc4ee flatten refactor 2021-07-10 17:53:41 +01:00
Roland Grinis
a0b72f519b bug in div 2021-07-10 16:47:10 +01:00
Roland Grinis
6d5e4a5776 inheritance is back 2021-07-10 15:22:15 +01:00
Roland Grinis
e6e117f694 remove get index tensor 2021-07-10 14:26:13 +01:00
Roland Grinis
b8ff5938ff public casting 2021-07-09 10:12:30 +01:00
Roland Grinis
68d0e9958f renaming 2021-07-09 09:36:39 +01:00
Roland Grinis
00a04a1931 Noa scopes runners 2021-07-09 09:25:19 +01:00
Roland Grinis
7744880ce7 safe scopes 2021-07-09 08:46:57 +01:00
Roland Grinis
6384182593 informative error message 2021-07-09 08:25:51 +01:00
Roland Grinis
1af6dbbb78 minor corrections 2021-07-09 08:16:33 +01:00
Roland Grinis
d8c4b84ddc relying to Java library path to load JNoa 2021-07-09 08:14:27 +01:00
Roland Grinis
80879d3736 tensor casting 2021-07-09 07:55:15 +01:00
Roland Grinis
bea6ed4d65 fix folding for DoubleTensor 2021-07-09 07:36:18 +01:00
Roland Grinis
280c4e97e2 algebras 2021-07-08 23:20:17 +01:00
Roland Grinis
62b3ccd111 NoaDoubleAlgebra 2021-07-08 23:10:59 +01:00
Roland Grinis
5da017ec2e linear algebra 2021-07-08 22:54:27 +01:00
Roland Grinis
f328c7f266 analytic algebra 2021-07-08 22:42:59 +01:00
Roland Grinis
0088be99f5 basics for div algebra 2021-07-08 22:09:53 +01:00
Roland Grinis
803a88ac2c NoaTensorAlgebra 2021-07-08 21:37:13 +01:00
Roland Grinis
773ff10dd1 Fix argmax for tensors 2021-07-08 21:08:20 +01:00
Roland Grinis
b2b063196d argmax fixed 2021-07-08 12:30:27 +01:00
Roland Grinis
e80e1dcf62 argMax to core 2021-07-08 12:13:21 +01:00
Roland Grinis
623c96e4bb basic algebra ops 2021-07-08 11:03:20 +01:00
Roland Grinis
2a97aca9b6 value method for algebra 2021-07-08 10:57:08 +01:00
Roland Grinis
8f32397f50 copying utilities for tensors 2021-07-08 09:43:55 +01:00
Roland Grinis
40d8a05bb0 tensors implementation 2021-07-08 09:37:16 +01:00
Roland Grinis
78b1cd41da disposable pattern 2021-07-07 11:58:48 +01:00
Roland Grinis
cadcb9916f threads setting 2021-07-06 19:10:13 +01:00
Roland Grinis
e17fe32ae2 testing in progress 2021-07-06 12:50:52 +01:00
Roland Grinis
cd362c749e testing native exceptions 2021-07-05 20:20:44 +01:00
Roland Grinis
8e4f7ffce6 Fetching JNoa 2021-07-05 19:47:51 +01:00
Roland Grinis
675ad089fa clang and exceptions 2021-07-05 10:34:01 +01:00
Roland Grinis
f4f5d65bd8 update installation procedure 2021-06-28 11:39:19 +01:00
Roland Grinis
8872263f45 moving to kmath-tensors to implementation 2021-06-27 23:04:38 +01:00
Roland Grinis
95d2b5b8d9 initial build 2021-06-27 22:12:02 +01:00
Roland Grinis
84937ecaca update instructions 2021-06-27 16:28:28 +01:00
Roland Grinis
adfb541fff initial commit for noa module 2021-06-25 22:36:01 +01:00
27 changed files with 6205 additions and 3 deletions

264
kmath-noa/README.md Normal file
View File

@ -0,0 +1,264 @@
# Module kmath-noa
A general purpose differentiable programming library over
[NOA](https://github.com/grinisrit/noa.git)
together with relevant functionality from
[LibTorch](https://pytorch.org/cppdocs).
Our aim is to cover a wide set of applications
from bayesian computation and deep learning to particle physics
simulations. In fact, we support any
differentiable program written on top of
`AutoGrad` & `ATen`.
## Installation from source
Currently, we support only the linux platform for the native artifacts.
For `GPU` kernels, we require a compatible
[CUDA](https://docs.nvidia.com/cuda/cuda-installation-guide-linux/index.html)
installation. If you are on Windows, we recommend setting up
everything on [WSL](https://docs.nvidia.com/cuda/wsl-user-guide/index.html).
To install the library, you can simply publish `KMath` to the local
Maven repository:
```
$ ./gradlew -Dorg.gradle.java.home=/path/to/local/jdk -q publishToMavenLocal
```
This will fetch and build the `JNI` wrapper `jnoa`.
The library has been tested with
[graalvm-ce-java11-linux-amd64-22.0.0.2.](https://github.com/graalvm/graalvm-ce-builds/releases/tag/vm-22.0.0.2)
In your own application add the local dependency:
```kotlin
repositories {
mavenCentral()
mavenLocal()
}
dependencies {
implementation("space.kscience:kmath-noa:0.3.0-dev-17")
}
```
To load the native library you will need to add to the VM options:
```
-Djava.library.path=${HOME}/.kmath/third-party/noa-v0.0.1/cpp-build/jnoa
```
## Usage
The library is under active development. Many more features
will be available soon.
### Tensors and Linear Algebra
We implement the tensor algebra interfaces
from [kmath-tensors](../kmath-tensors):
```kotlin
NoaFloat {
val tensor =
randNormal(
shape = intArrayOf(7, 5, 3),
device = Device.CPU) // or Device.CUDA(0) for GPU
// Compute SVD
val (tensorU, tensorS, tensorV) = tensor.svd()
// Reconstruct tensor
val tensorReg =
tensorU dot (diagonalEmbedding(tensorS) dot tensorV.transpose(-2, -1))
// Serialise tensor for later
tensorReg.save("tensorReg.pt")
}
```
The saved tensor can be loaded in `C++` or in `python`:
```python
import torch
tensor_reg = list(torch.jit.load('tensorReg.pt').parameters())[0]
```
The most efficient way passing data between the `JVM` and the native backend
is to rely on primitive arrays:
```kotlin
val array = (1..8).map { 100f * it }.toFloatArray()
val updateArray = floatArrayOf(15f, 20f)
val resArray = NoaFloat {
val tensor = copyFromArray(array, intArrayOf(2, 2, 2))
NoaFloat {
// The call `tensor[0]` creates a native tensor instance pointing to a slice of `tensor`
// The second call `[1]` is a setter call and does not create any new instances
tensor[0][1] = updateArray
// The instance `tensor[0]` is destroyed as we move out of the scope
}!! // if the computation fails the result fill be null
tensor.copyToArray()
// the instance `tensor` is destroyed here
}!!
```
### Automatic Differentiation
The [AutoGrad](https://pytorch.org/tutorials/beginner/blitz/autograd_tutorial.html)
engine is exposed:
```kotlin
NoaFloat {
// Create a quadratic function
val dim = 3
val tensorX = randNormal(shape = intArrayOf(dim))
val randFeatures = randNormal(shape = intArrayOf(dim, dim))
val tensorSigma = randFeatures + randFeatures.transpose(0, 1)
val tensorMu = randNormal(shape = intArrayOf(dim))
// Create a differentiable expression
val expressionAtX = withGradAt(tensorX) { x ->
0.5f * (x dot (tensorSigma dot x)) + (tensorMu dot x) + 25.9f
}
// Evaluate the gradient at tensorX
// retaining the graph for the hessian computation
val gradientAtX = expressionAtX.autoGradient(tensorX, retainGraph = true)
// Compute the hessian at tensorX
val hessianAtX = expressionAtX.autoHessian(tensorX)
}
```
### Deep Learning
You can train any [TorchScript](https://pytorch.org/docs/stable/jit.html) model.
For example, you can build in `python` the following neural network
and prepare the training data:
```python
import torch
n_tr = 7
n_val = 300
x_val = torch.linspace(-5, 5, n_val).view(-1, 1)
y_val = torch.sin(x_val)
x_train = torch.linspace(-3.14, 3.14, n_tr).view(-1, 1)
y_train = torch.sin(x_train) + torch.randn_like(x_train) * 0.1
class Data(torch.nn.Module):
def __init__(self):
super(Data, self).__init__()
self.register_buffer('x_val', x_val)
self.register_buffer('y_val', y_val)
self.register_buffer('x_train', x_train)
self.register_buffer('y_train', y_train)
class Net(torch.nn.Module):
def __init__(self):
super(Net, self).__init__()
self.l1 = torch.nn.Linear(1, 10, bias = True)
self.l2 = torch.nn.Linear(10, 10, bias = True)
self.l3 = torch.nn.Linear(10, 1, bias = True)
def forward(self, x):
x = self.l1(x)
x = torch.relu(x)
x = self.l2(x)
x = torch.relu(x)
x = self.l3(x)
return x
class Loss(torch.nn.Module):
def __init__(self, target):
super(Loss, self).__init__()
self.register_buffer('target', target)
self.loss = torch.nn.MSELoss()
def forward(self, x):
return self.loss(x, self.target)
# Generate TorchScript modules and serialise them
torch.jit.script(Data()).save('data.pt')
torch.jit.script(Net()).save('net.pt')
torch.jit.script(Loss(y_train)).save('loss.pt')
```
You can then load the modules into `kotlin` and train them:
```kotlin
NoaFloat {
// Load the serialised JIT modules
// The training data
val dataModule = loadJitModule("data.pt")
// The DL model
val netModule = loadJitModule("net.pt")
// The loss function
val lossModule = loadJitModule("loss.pt")
// Get the tensors from the module
val xTrain = dataModule.getBuffer("x_train")
val yTrain = dataModule.getBuffer("y_train")
val xVal = dataModule.getBuffer("x_val")
val yVal = dataModule.getBuffer("y_val")
// Set the model in training mode
netModule.train(true)
// Loss function for training
lossModule.setBuffer("target", yTrain)
// Compute the predictions
val yPred = netModule.forward(xTrain)
// Compute the training loss
val loss = lossModule.forward(yPred)
println(loss)
// Set-up the Adam optimiser with learning rate 0.005
val optimiser = netModule.adamOptimiser(0.005)
// Train for 250 epochs
repeat(250){
// Clean gradients
optimiser.zeroGrad()
// Use forwardAssign to for better memory management
netModule.forwardAssign(xTrain, yPred)
lossModule.forwardAssign(yPred, loss)
// Backward pass
loss.backward()
// Update model parameters
optimiser.step()
if(it % 50 == 0)
println("Training loss: $loss")
}
// Finally validate the model
// Compute the predictions for the validation features
netModule.forwardAssign(xVal, yPred)
// Set the loss for validation
lossModule.setBuffer("target", yVal)
// Compute the loss on validation dataset
lossModule.forwardAssign(yPred, loss)
println("Validation loss: $loss")
// The model can be serialised in its current state
netModule.save("trained_net.pt")
}
```
### Custom memory management
Native memory management relies on scoping
with [NoaScope](src/main/kotlin/space/kscience/kmath/noa/memory/NoaScope.kt)
which is readily available within an algebra context.
Manual management is also possible:
```kotlin
// Create a scope
val scope = NoaScope()
val tensor = NoaFloat(scope){
full(5f, intArrayOf(1))
}!! // the result might be null
// If the computation fails resources will be freed automatically
// Otherwise it's your responsibility:
scope.disposeAll()
// Attempts to use tensor here is undefined behaviour
```
For more examples have a look at
[NOA](https://github.com/grinisrit/noa) docs.
Contributed by [Roland Grinis](https://github.com/grinisrit)

200
kmath-noa/build.gradle.kts Normal file
View File

@ -0,0 +1,200 @@
/*
* Copyright 2018-2021 KMath contributors.
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
*/
import de.undercouch.gradle.tasks.download.Download
plugins {
kotlin("jvm")
id("ru.mipt.npm.gradle.common")
id("de.undercouch.download")
}
description = "Wrapper for the Differentiable Computation library NOA on top of LibTorch"
dependencies {
implementation(project(":kmath-tensors"))
}
val home: String = System.getProperty("user.home")
val javaHome: String = System.getProperty("java.home")
val thirdPartyDir = "$home/.kmath/third-party/noa-v0.0.1"
val cppBuildDir = "$thirdPartyDir/cpp-build"
val jNoaDir = "$thirdPartyDir/jnoa/noa-kmath" //"$home/devspace/noa"
val cudaHome: String? = System.getenv("CUDA_HOME")
val cudaDefault = file("/usr/local/cuda").exists()
val cudaFound = (cudaHome?.isNotEmpty() ?: false) or cudaDefault //false
val cmakeArchive = "cmake-3.20.5-linux-x86_64"
val torchArchive = "libtorch"
val clangArchive = "clang+llvm-12.0.1-x86_64-linux-gnu-ubuntu-16.04"
val cmakeCmd = "$thirdPartyDir/cmake/$cmakeArchive/bin/cmake"
val ninjaCmd = "$thirdPartyDir/ninja/ninja"
val clangRootDir = "$thirdPartyDir/clang/$clangArchive"
val clangCmd = "$clangRootDir/bin/clang"
val clangxxCmd = "$clangRootDir/bin/clang++"
val generateJNIHeader by tasks.registering {
doLast {
exec {
workingDir(projectDir.resolve("src/main/java/space/kscience/kmath/noa"))
commandLine(
"$javaHome/bin/javac", "-h",
projectDir.resolve("src/main/resources"), "JNoa.java"
)
}
}
}
val downloadCMake by tasks.registering(Download::class) {
val tarFile = "$cmakeArchive.tar.gz"
src("https://github.com/Kitware/CMake/releases/download/v3.20.5/$tarFile")
dest(File("$thirdPartyDir/cmake", tarFile))
overwrite(false)
}
val downloadNinja by tasks.registering(Download::class) {
src("https://github.com/ninja-build/ninja/releases/download/v1.10.2/ninja-linux.zip")
dest(File("$thirdPartyDir/ninja", "ninja-linux.zip"))
overwrite(false)
}
val downloadClang by tasks.registering(Download::class) {
val tarFile = "$clangArchive.tar.xz"
src("https://github.com/llvm/llvm-project/releases/download/llvmorg-12.0.1/$tarFile")
dest(File("$thirdPartyDir/clang", tarFile))
overwrite(false)
}
val downloadTorch by tasks.registering(Download::class) {
val torchVersion = "$torchArchive-shared-with-deps-1.10.2%2B"
val cudaUrl = "https://download.pytorch.org/libtorch/cu113/${torchVersion}cu113.zip"
val cpuUrl = "https://download.pytorch.org/libtorch/cpu/${torchVersion}cpu.zip"
val url = if (cudaFound) cudaUrl else cpuUrl
src(url)
dest(File("$thirdPartyDir/torch", "$torchArchive.zip"))
overwrite(false)
}
fun downloadJNoaHelper(update: Boolean) = tasks.registering(Download::class) {
src("https://github.com/grinisrit/noa/archive/refs/heads/kmath.zip")
dest(File("$thirdPartyDir/jnoa", "kmath.zip"))
overwrite(update)
}
val downloadJNoa by downloadJNoaHelper(false)
val reDownloadJNoa by downloadJNoaHelper(true)
val extractCMake by tasks.registering(Copy::class) {
dependsOn(downloadCMake)
from(tarTree(resources.gzip(downloadCMake.get().dest)))
into("$thirdPartyDir/cmake")
}
val extractNinja by tasks.registering(Copy::class) {
dependsOn(downloadNinja)
from(zipTree(downloadNinja.get().dest))
into("$thirdPartyDir/ninja")
}
val extractClang by tasks.registering {
dependsOn(downloadClang)
onlyIf { !file(clangRootDir).exists() }
doLast {
exec {
workingDir("$thirdPartyDir/clang")
commandLine("mkdir", clangArchive)
}
exec {
workingDir("$thirdPartyDir/clang")
commandLine("tar", "-xf", "$clangArchive.tar.xz",
"-C", clangArchive, "--strip-components", "1")
}
}
}
val extractTorch by tasks.registering(Copy::class) {
dependsOn(downloadTorch)
from(zipTree(downloadTorch.get().dest))
into("$thirdPartyDir/torch")
}
val extractJNoa by tasks.registering(Copy::class) {
dependsOn(downloadJNoa)
from(zipTree(downloadJNoa.get().dest))
into("$thirdPartyDir/jnoa")
}
val configureCpp by tasks.registering {
dependsOn(extractCMake)
dependsOn(extractNinja)
dependsOn(extractClang)
dependsOn(extractTorch)
dependsOn(extractJNoa)
onlyIf { !file(cppBuildDir).exists() }
doLast {
exec {
workingDir(thirdPartyDir)
commandLine("mkdir", "-p", cppBuildDir)
}
exec {
workingDir(cppBuildDir)
commandLine(
cmakeCmd,
jNoaDir,
"-GNinja",
"-DCMAKE_MAKE_PROGRAM=$ninjaCmd",
"-DCMAKE_C_COMPILER=$clangCmd",
"-DCMAKE_CXX_COMPILER=$clangxxCmd",
"-DCMAKE_PREFIX_PATH=$thirdPartyDir/torch/$torchArchive",
"-DJAVA_HOME=$javaHome",
"-DBUILD_JNOA=ON",
"-DCMAKE_BUILD_TYPE=Release",
"-DBUILD_NOA_TESTS=OFF",
"-DBUILD_NOA_BENCHMARKS=OFF",
"-DINSTALL_NOA=OFF"
)
}
}
}
val cleanCppBuild by tasks.registering {
onlyIf { file(cppBuildDir).exists() }
doLast {
exec {
workingDir(thirdPartyDir)
commandLine("rm", "-rf", cppBuildDir)
}
}
}
val buildCpp by tasks.registering {
dependsOn(configureCpp)
doLast {
exec {
workingDir(cppBuildDir)
commandLine(cmakeCmd, "--build", ".", "--config", "Release", "--target", "jnoa")
}
}
}
tasks["compileJava"].dependsOn(buildCpp)
tasks {
withType<Test>{
systemProperty("java.library.path", "$cppBuildDir/jnoa")
//systemProperty("java.library.path",
// "${System.getProperty("user.home")}/devspace/noa/cmake-build-release/jnoa")
}
}
readme {
maturity = ru.mipt.npm.gradle.Maturity.PROTOTYPE
}

View File

@ -0,0 +1,407 @@
/*
* Copyright 2018-2021 KMath contributors.
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
*/
package space.kscience.kmath.noa;
class JNoa {
static {
try {
System.loadLibrary("jnoa");
} catch (UnsatisfiedLinkError e) {
System.err.println(
"Failed to load the native library NOA:\n" +
" - Follow the installation instructions from\n" +
" https://github.com/grinisrit/noa \n" +
" - Set java.library.path to the location of libjnoa.so");
System.exit(1);
}
}
public static native int testException(int seed);
public static native boolean cudaIsAvailable();
public static native int getNumThreads();
public static native void setNumThreads(int numThreads);
public static native void setSeed(int seed);
public static native void disposeTensor(long tensorHandle);
public static native long emptyTensor();
public static native long fromBlobDouble(double[] data, int[] shape, int device);
public static native long fromBlobFloat(float[] data, int[] shape, int device);
public static native long fromBlobLong(long[] data, int[] shape, int device);
public static native long fromBlobInt(int[] data, int[] shape, int device);
public static native long copyTensor(long tensorHandle);
public static native long copyToDevice(long tensorHandle, int device);
public static native long copyToDouble(long tensorHandle);
public static native long copyToFloat(long tensorHandle);
public static native long copyToLong(long tensorHandle);
public static native long copyToInt(long tensorHandle);
public static native long viewTensor(long tensorHandle, int[] shape);
public static native long viewAsTensor(long tensorHandle, long asTensorHandle);
public static native String tensorToString(long tensorHandle);
public static native int getDim(long tensorHandle);
public static native int getNumel(long tensorHandle);
public static native int getShapeAt(long tensorHandle, int d);
public static native int getStrideAt(long tensorHandle, int d);
public static native int getDevice(long tensorHandle);
public static native double getItemDouble(long tensorHandle);
public static native float getItemFloat(long tensorHandle);
public static native long getItemLong(long tensorHandle);
public static native int getItemInt(long tensorHandle);
public static native long getIndex(long tensorHandle, int index);
public static native double getDouble(long tensorHandle, int[] index);
public static native float getFloat(long tensorHandle, int[] index);
public static native long getLong(long tensorHandle, int[] index);
public static native int getInt(long tensorHandle, int[] index);
public static native void setDouble(long tensorHandle, int[] index, double value);
public static native void setFloat(long tensorHandle, int[] index, float value);
public static native void setLong(long tensorHandle, int[] index, long value);
public static native void setInt(long tensorHandle, int[] index, int value);
public static native long randDouble(int[] shape, int device);
public static native long randnDouble(int[] shape, int device);
public static native long randFloat(int[] shape, int device);
public static native long randnFloat(int[] shape, int device);
public static native long randintDouble(long low, long high, int[] shape, int device);
public static native long randintFloat(long low, long high, int[] shape, int device);
public static native long randintLong(long low, long high, int[] shape, int device);
public static native long randintInt(long low, long high, int[] shape, int device);
public static native long randLike(long tensorHandle);
public static native void randLikeAssign(long tensorHandle);
public static native long randnLike(long tensorHandle);
public static native void randnLikeAssign(long tensorHandle);
public static native long randintLike(long tensorHandle, long low, long high);
public static native void randintLikeAssign(long tensorHandle, long low, long high);
public static native long fullDouble(double value, int[] shape, int device);
public static native long fullFloat(float value, int[] shape, int device);
public static native long fullLong(long value, int[] shape, int device);
public static native long fullInt(int value, int[] shape, int device);
public static native long timesDouble(double value, long other);
public static native long timesFloat(float value, long other);
public static native long timesLong(long value, long other);
public static native long timesInt(int value, long other);
public static native void timesDoubleAssign(double value, long other);
public static native void timesFloatAssign(float value, long other);
public static native void timesLongAssign(long value, long other);
public static native void timesIntAssign(int value, long other);
public static native long plusDouble(double value, long other);
public static native long plusFloat(float value, long other);
public static native long plusLong(long value, long other);
public static native long plusInt(int value, long other);
public static native void plusDoubleAssign(double value, long other);
public static native void plusFloatAssign(float value, long other);
public static native void plusLongAssign(long value, long other);
public static native void plusIntAssign(int value, long other);
public static native long timesTensor(long lhs, long rhs);
public static native void timesTensorAssign(long lhs, long rhs);
public static native long divTensor(long lhs, long rhs);
public static native void divTensorAssign(long lhs, long rhs);
public static native long plusTensor(long lhs, long rhs);
public static native void plusTensorAssign(long lhs, long rhs);
public static native long minusTensor(long lhs, long rhs);
public static native void minusTensorAssign(long lhs, long rhs);
public static native long unaryMinus(long tensorHandle);
public static native long transposeTensor(long tensorHandle, int i, int j);
public static native long absTensor(long tensorHandle);
public static native long expTensor(long tensorHandle);
public static native long lnTensor(long tensorHandle);
public static native long sqrtTensor(long tensorHandle);
public static native long cosTensor(long tensorHandle);
public static native long acosTensor(long tensorHandle);
public static native long coshTensor(long tensorHandle);
public static native long acoshTensor(long tensorHandle);
public static native long sinTensor(long tensorHandle);
public static native long asinTensor(long tensorHandle);
public static native long sinhTensor(long tensorHandle);
public static native long asinhTensor(long tensorHandle);
public static native long tanTensor(long tensorHandle);
public static native long atanTensor(long tensorHandle);
public static native long tanhTensor(long tensorHandle);
public static native long atanhTensor(long tensorHandle);
public static native long ceilTensor(long tensorHandle);
public static native long floorTensor(long tensorHandle);
public static native long sumTensor(long tensorHandle);
public static native long sumDimTensor(long tensorHandle, int dim, boolean keepDim);
public static native long minTensor(long tensorHandle);
public static native long minDimTensor(long tensorHandle, int dim, boolean keepDim);
public static native long maxTensor(long tensorHandle);
public static native long maxDimTensor(long tensorHandle, int dim, boolean keepDim);
public static native long meanTensor(long tensorHandle);
public static native long meanDimTensor(long tensorHandle, int dim, boolean keepDim);
public static native long stdTensor(long tensorHandle);
public static native long stdDimTensor(long tensorHandle, int dim, boolean keepDim);
public static native long varTensor(long tensorHandle);
public static native long varDimTensor(long tensorHandle, int dim, boolean keepDim);
public static native long argMaxTensor(long tensorHandle, int dim, boolean keepDim);
public static native long flattenTensor(long tensorHandle, int startDim, int endDim);
public static native long matmul(long lhs, long rhs);
public static native void matmulAssign(long lhs, long rhs);
public static native void matmulRightAssign(long lhs, long rhs);
public static native long diagEmbed(long diagsHandle, int offset, int dim1, int dim2);
public static native long detTensor(long tensorHandle);
public static native long invTensor(long tensorHandle);
public static native long choleskyTensor(long tensorHandle);
public static native void qrTensor(long tensorHandle, long Qhandle, long Rhandle);
public static native void luTensor(long tensorHandle, long Phandle, long Lhandle, long Uhandle);
public static native void svdTensor(long tensorHandle, long Uhandle, long Shandle, long Vhandle);
public static native void symEigTensor(long tensorHandle, long Shandle, long Vhandle);
public static native boolean requiresGrad(long tensorHandle);
public static native void setRequiresGrad(long tensorHandle, boolean status);
public static native long detachFromGraph(long tensorHandle);
public static native long autoGradTensor(long value, long variable, boolean retainGraph);
public static native long autoHessTensor(long value, long variable);
public static native void backwardPass(long tensorHandle);
public static native long tensorGrad(long tensorHandle);
public static native void disposeJitModule(long jitModuleHandle);
public static native void trainMode(long jitModuleHandle, boolean status);
public static native long loadJitModuleDouble(String path, int device);
public static native long loadJitModuleFloat(String path, int device);
public static native long loadJitModuleLong(String path, int device);
public static native long loadJitModuleInt(String path, int device);
public static native long forwardPass(long jitModuleHandle, long tensorHandle);
public static native void forwardPassAssign(long jitModuleHandle, long featuresHandle, long predsHandle);
public static native long getModuleParameter(long jitModuleHandle, String name);
public static native void setModuleParameter(long jitModuleHandle, String name, long tensorHandle);
public static native long getModuleBuffer(long jitModuleHandle, String name);
public static native void setModuleBuffer(long jitModuleHandle, String name, long tensorHandle);
public static native long adamOptim(long jitModuleHandle, double learningRate);
public static native void disposeAdamOptim(long adamOptHandle);
public static native void stepAdamOptim(long adamOptHandle);
public static native void zeroGradAdamOptim(long adamOptHandle);
public static native long rmsOptim(long jitModuleHandle, double learningRate, double alpha,
double eps, double weight_decay, double momentum, boolean centered);
public static native void disposeRmsOptim(long rmsOptHandle);
public static native void stepRmsOptim(long rmsOptHandle);
public static native void zeroGradRmsOptim(long rmsOptHandle);
public static native long adamWOptim(long jitModuleHandle, double learningRate, double beta1,
double beta2, double eps, double weight_decay, boolean amsgrad);
public static native void disposeAdamWOptim(long adamWOptHandle);
public static native void stepAdamWOptim(long adamWOptHandle);
public static native void zeroGradAdamWOptim(long adamWOptHandle);
public static native long adagradOptim(long jitModuleHandle, double learningRate, double weight_decay,
double lr_decay, double initial_accumulator_value, double eps);
public static native void disposeAdagradOptim(long adagradOptHandle);
public static native void stepAdagradOptim(long adagradOptHandle);
public static native void zeroGradAdagradOptim(long adagradOptHandle);
public static native long sgdOptim(long jitModuleHandle, double learningRate, double momentum,
double dampening, double weight_decay, boolean nesterov);
public static native void disposeSgdOptim(long sgdOptHandle);
public static native void stepSgdOptim(long sgdOptHandle);
public static native void zeroGradSgdOptim(long sgdOptHandle);
public static native void swapTensors(long lhsHandle, long rhsHandle);
public static native long loadTensorDouble(String path, int device);
public static native long loadTensorFloat(String path, int device);
public static native long loadTensorLong(String path, int device);
public static native long loadTensorInt(String path, int device);
public static native void saveTensor(long tensorHandle, String path);
public static native void saveJitModule(long jitModuleHandle, String path);
public static native void assignBlobDouble(long tensorHandle, double[] data);
public static native void assignBlobFloat(long tensorHandle, float[] data);
public static native void assignBlobLong(long tensorHandle, long[] data);
public static native void assignBlobInt(long tensorHandle, int[] data);
public static native void setBlobDouble(long tensorHandle, int i, double[] data);
public static native void setBlobFloat(long tensorHandle, int i, float[] data);
public static native void setBlobLong(long tensorHandle, int i, long[] data);
public static native void setBlobInt(long tensorHandle, int i, int[] data);
public static native void getBlobDouble(long tensorHandle, double[] data);
public static native void getBlobFloat(long tensorHandle, float[] data);
public static native void getBlobLong(long tensorHandle, long[] data);
public static native void getBlobInt(long tensorHandle, int[] data);
public static native void setTensor(long tensorHandle, int i, long tensorValue);
public static native long getSliceTensor(long tensorHandle, int dim, int start, int end);
public static native void setSliceTensor(long tensorHandle, int dim, int start, int end, long tensorValue);
public static native void setSliceBlobDouble(long tensorHandle, int dim, int start, int end, double[] data);
public static native void setSliceBlobFloat(long tensorHandle, int dim, int start, int end, float[] data);
public static native void setSliceBlobLong(long tensorHandle, int dim, int start, int end, long[] data);
public static native void setSliceBlobInt(long tensorHandle, int dim, int start, int end, int[] data);
}

View File

@ -0,0 +1,12 @@
/*
* Copyright 2018-2021 KMath contributors.
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
*/
package space.kscience.kmath.noa;
public class NoaException extends Exception {
public NoaException(String errorMessage) {
super(errorMessage);
}
}

View File

@ -0,0 +1,778 @@
/*
* Copyright 2018-2021 KMath contributors.
* Use of tensor source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
*/
package space.kscience.kmath.noa
import space.kscience.kmath.misc.PerformancePitfall
import space.kscience.kmath.nd.StructureND
import space.kscience.kmath.noa.memory.NoaScope
import space.kscience.kmath.operations.*
import space.kscience.kmath.tensors.api.AnalyticTensorAlgebra
import space.kscience.kmath.tensors.api.LinearOpsTensorAlgebra
import space.kscience.kmath.tensors.api.Tensor
import space.kscience.kmath.tensors.api.TensorAlgebra
import space.kscience.kmath.tensors.core.TensorLinearStructure
internal typealias Slice = Pair<Int, Int>
public sealed class NoaAlgebra<T, A : Ring<T>, PrimitiveArray, TensorType : NoaTensor<T>>
protected constructor(protected val scope: NoaScope) :
TensorAlgebra<T, A> {
protected abstract val StructureND<T>.tensor: TensorType
protected abstract fun wrap(tensorHandle: TensorHandle): TensorType
@PerformancePitfall
public fun Tensor<T>.cast(): TensorType = tensor
/**
* A scalar tensor must have empty shape
*/
override fun StructureND<T>.valueOrNull(): T? =
try {
tensor.item()
} catch (e: NoaException) {
null
}
override fun StructureND<T>.value(): T = tensor.item()
public abstract fun randDiscrete(low: Long, high: Long, shape: IntArray, device: Device = Device.CPU): TensorType
public abstract fun TensorType.copyToArray(): PrimitiveArray
public abstract fun copyFromArray(array: PrimitiveArray, shape: IntArray, device: Device = Device.CPU): TensorType
public abstract fun full(value: T, shape: IntArray, device: Device = Device.CPU): TensorType
override operator fun StructureND<T>.times(arg: StructureND<T>): TensorType {
return wrap(JNoa.timesTensor(tensor.tensorHandle, arg.tensor.tensorHandle))
}
override operator fun Tensor<T>.timesAssign(arg: StructureND<T>): Unit {
JNoa.timesTensorAssign(tensor.tensorHandle, arg.tensor.tensorHandle)
}
override operator fun StructureND<T>.plus(arg: StructureND<T>): TensorType {
return wrap(JNoa.plusTensor(tensor.tensorHandle, arg.tensor.tensorHandle))
}
override operator fun Tensor<T>.plusAssign(arg: StructureND<T>): Unit {
JNoa.plusTensorAssign(tensor.tensorHandle, arg.tensor.tensorHandle)
}
override operator fun StructureND<T>.minus(arg: StructureND<T>): TensorType {
return wrap(JNoa.minusTensor(tensor.tensorHandle, arg.tensor.tensorHandle))
}
override operator fun Tensor<T>.minusAssign(arg: StructureND<T>): Unit {
JNoa.minusTensorAssign(tensor.tensorHandle, arg.tensor.tensorHandle)
}
override operator fun StructureND<T>.unaryMinus(): TensorType =
wrap(JNoa.unaryMinus(tensor.tensorHandle))
override infix fun StructureND<T>.dot(other: StructureND<T>): TensorType {
return wrap(JNoa.matmul(tensor.tensorHandle, other.tensor.tensorHandle))
}
public infix fun Tensor<T>.dotAssign(arg: StructureND<T>): Unit {
JNoa.matmulAssign(tensor.tensorHandle, arg.tensor.tensorHandle)
}
public infix fun StructureND<T>.dotRightAssign(arg: Tensor<T>): Unit {
JNoa.matmulRightAssign(tensor.tensorHandle, arg.tensor.tensorHandle)
}
override operator fun Tensor<T>.get(i: Int): TensorType =
wrap(JNoa.getIndex(tensor.tensorHandle, i))
public operator fun TensorType.set(i: Int, value: Tensor<T>): Unit =
JNoa.setTensor(tensorHandle, i, value.tensor.tensorHandle)
public abstract operator fun TensorType.set(i: Int, array: PrimitiveArray): Unit
public operator fun Tensor<T>.get(dim: Int, slice: Slice): TensorType =
wrap(JNoa.getSliceTensor(tensor.tensorHandle, dim, slice.first, slice.second))
public operator fun TensorType.set(dim: Int, slice: Slice, value: Tensor<T>): Unit =
JNoa.setSliceTensor(tensorHandle, dim, slice.first, slice.second, value.tensor.tensorHandle)
public abstract operator fun TensorType.set(dim: Int, slice: Slice, array: PrimitiveArray): Unit
override fun diagonalEmbedding(
diagonalEntries: Tensor<T>, offset: Int, dim1: Int, dim2: Int
): TensorType =
wrap(JNoa.diagEmbed(diagonalEntries.tensor.tensorHandle, offset, dim1, dim2))
override fun Tensor<T>.transpose(i: Int, j: Int): TensorType {
return wrap(JNoa.transposeTensor(tensor.tensorHandle, i, j))
}
override fun Tensor<T>.view(shape: IntArray): TensorType {
return wrap(JNoa.viewTensor(tensor.tensorHandle, shape))
}
override fun Tensor<T>.viewAs(other: StructureND<T>): TensorType {
return wrap(JNoa.viewAsTensor(tensor.tensorHandle, other.tensor.tensorHandle))
}
public fun StructureND<T>.abs(): TensorType = wrap(JNoa.absTensor(tensor.tensorHandle))
public fun StructureND<T>.sumAll(): TensorType = wrap(JNoa.sumTensor(tensor.tensorHandle))
override fun StructureND<T>.sum(): T = sumAll().item()
override fun StructureND<T>.sum(dim: Int, keepDim: Boolean): TensorType =
wrap(JNoa.sumDimTensor(tensor.tensorHandle, dim, keepDim))
public fun StructureND<T>.minAll(): TensorType = wrap(JNoa.minTensor(tensor.tensorHandle))
override fun StructureND<T>.min(): T = minAll().item()
override fun StructureND<T>.min(dim: Int, keepDim: Boolean): TensorType =
wrap(JNoa.minDimTensor(tensor.tensorHandle, dim, keepDim))
public fun StructureND<T>.maxAll(): TensorType = wrap(JNoa.maxTensor(tensor.tensorHandle))
override fun StructureND<T>.max(): T = maxAll().item()
override fun StructureND<T>.max(dim: Int, keepDim: Boolean): TensorType =
wrap(JNoa.maxDimTensor(tensor.tensorHandle, dim, keepDim))
override fun StructureND<T>.argMax(dim: Int, keepDim: Boolean): NoaIntTensor =
NoaIntTensor(scope, JNoa.argMaxTensor(tensor.tensorHandle, dim, keepDim))
public fun Tensor<T>.flatten(startDim: Int, endDim: Int): TensorType =
wrap(JNoa.flattenTensor(tensor.tensorHandle, startDim, endDim))
public fun Tensor<T>.randDiscrete(low: Long, high: Long): TensorType =
wrap(JNoa.randintLike(tensor.tensorHandle, low, high))
public fun Tensor<T>.randDiscreteAssign(low: Long, high: Long): Unit =
JNoa.randintLikeAssign(tensor.tensorHandle, low, high)
public fun Tensor<T>.copy(): TensorType =
wrap(JNoa.copyTensor(tensor.tensorHandle))
public fun Tensor<T>.copyToDevice(device: Device = Device.CPU): TensorType =
wrap(JNoa.copyToDevice(tensor.tensorHandle, device.toInt()))
public abstract fun loadJitModule(path: String, device: Device = Device.CPU): NoaJitModule
public abstract fun loadTensor(path: String, device: Device = Device.CPU): TensorType
public fun NoaJitModule.forward(features: Tensor<T>): TensorType =
wrap(JNoa.forwardPass(jitModuleHandle, features.tensor.tensorHandle))
public fun NoaJitModule.forwardAssign(features: TensorType, predictions: TensorType): Unit =
JNoa.forwardPassAssign(jitModuleHandle, features.tensorHandle, predictions.tensorHandle)
public fun NoaJitModule.getParameter(name: String): TensorType =
wrap(JNoa.getModuleParameter(jitModuleHandle, name))
public fun NoaJitModule.setParameter(name: String, parameter: Tensor<T>): Unit =
JNoa.setModuleParameter(jitModuleHandle, name, parameter.tensor.tensorHandle)
public fun NoaJitModule.getBuffer(name: String): TensorType =
wrap(JNoa.getModuleBuffer(jitModuleHandle, name))
public fun NoaJitModule.setBuffer(name: String, buffer: Tensor<T>): Unit =
JNoa.setModuleBuffer(jitModuleHandle, name, buffer.tensor.tensorHandle)
public infix fun TensorType.swap(arg: TensorType): Unit =
JNoa.swapTensors(tensorHandle, arg.tensorHandle)
public abstract fun TensorType.assignFromArray(array: PrimitiveArray): Unit
}
public sealed class NoaPartialDivisionAlgebra<T, A : Field<T>, PrimitiveArray, TensorType : NoaTensor<T>>
protected constructor(scope: NoaScope) :
NoaAlgebra<T, A, PrimitiveArray, TensorType>(scope),
LinearOpsTensorAlgebra<T, A>,
AnalyticTensorAlgebra<T, A> {
override operator fun StructureND<T>.div(arg: StructureND<T>): TensorType {
return wrap(JNoa.divTensor(tensor.tensorHandle, arg.tensor.tensorHandle))
}
override operator fun Tensor<T>.divAssign(arg: StructureND<T>): Unit {
JNoa.divTensorAssign(tensor.tensorHandle, arg.tensor.tensorHandle)
}
public fun StructureND<T>.meanAll(): TensorType = wrap(JNoa.meanTensor(tensor.tensorHandle))
override fun StructureND<T>.mean(): T = meanAll().item()
override fun StructureND<T>.mean(dim: Int, keepDim: Boolean): TensorType =
wrap(JNoa.meanDimTensor(tensor.tensorHandle, dim, keepDim))
public fun StructureND<T>.stdAll(): TensorType = wrap(JNoa.stdTensor(tensor.tensorHandle))
override fun StructureND<T>.std(): T = stdAll().item()
override fun StructureND<T>.std(dim: Int, keepDim: Boolean): TensorType =
wrap(JNoa.stdDimTensor(tensor.tensorHandle, dim, keepDim))
public fun StructureND<T>.varAll(): TensorType = wrap(JNoa.varTensor(tensor.tensorHandle))
override fun StructureND<T>.variance(): T = varAll().item()
override fun StructureND<T>.variance(dim: Int, keepDim: Boolean): TensorType =
wrap(JNoa.varDimTensor(tensor.tensorHandle, dim, keepDim))
public abstract fun randNormal(shape: IntArray, device: Device = Device.CPU): TensorType
public abstract fun randUniform(shape: IntArray, device: Device = Device.CPU): TensorType
public fun StructureND<T>.randUniform(): TensorType =
wrap(JNoa.randLike(tensor.tensorHandle))
public fun StructureND<T>.randUniformAssign(): Unit =
JNoa.randLikeAssign(tensor.tensorHandle)
public fun StructureND<T>.randNormal(): TensorType =
wrap(JNoa.randnLike(tensor.tensorHandle))
public fun StructureND<T>.randNormalAssign(): Unit =
JNoa.randnLikeAssign(tensor.tensorHandle)
override fun StructureND<T>.exp(): TensorType =
wrap(JNoa.expTensor(tensor.tensorHandle))
override fun StructureND<T>.ln(): TensorType =
wrap(JNoa.lnTensor(tensor.tensorHandle))
override fun StructureND<T>.sqrt(): TensorType =
wrap(JNoa.sqrtTensor(tensor.tensorHandle))
override fun StructureND<T>.cos(): TensorType =
wrap(JNoa.cosTensor(tensor.tensorHandle))
override fun StructureND<T>.acos(): TensorType =
wrap(JNoa.acosTensor(tensor.tensorHandle))
override fun StructureND<T>.cosh(): TensorType =
wrap(JNoa.coshTensor(tensor.tensorHandle))
override fun StructureND<T>.acosh(): TensorType =
wrap(JNoa.acoshTensor(tensor.tensorHandle))
override fun StructureND<T>.sin(): TensorType =
wrap(JNoa.sinTensor(tensor.tensorHandle))
override fun StructureND<T>.asin(): TensorType =
wrap(JNoa.asinTensor(tensor.tensorHandle))
override fun StructureND<T>.sinh(): TensorType =
wrap(JNoa.sinhTensor(tensor.tensorHandle))
override fun StructureND<T>.asinh(): TensorType =
wrap(JNoa.asinhTensor(tensor.tensorHandle))
override fun StructureND<T>.tan(): TensorType =
wrap(JNoa.tanTensor(tensor.tensorHandle))
override fun StructureND<T>.atan(): TensorType =
wrap(JNoa.atanTensor(tensor.tensorHandle))
override fun StructureND<T>.tanh(): TensorType =
wrap(JNoa.tanhTensor(tensor.tensorHandle))
override fun StructureND<T>.atanh(): TensorType =
wrap(JNoa.atanhTensor(tensor.tensorHandle))
override fun StructureND<T>.ceil(): TensorType =
wrap(JNoa.ceilTensor(tensor.tensorHandle))
override fun StructureND<T>.floor(): TensorType =
wrap(JNoa.floorTensor(tensor.tensorHandle))
override fun StructureND<T>.det(): Tensor<T> =
wrap(JNoa.detTensor(tensor.tensorHandle))
override fun StructureND<T>.inv(): Tensor<T> =
wrap(JNoa.invTensor(tensor.tensorHandle))
override fun StructureND<T>.cholesky(): Tensor<T> =
wrap(JNoa.choleskyTensor(tensor.tensorHandle))
override fun StructureND<T>.qr(): Pair<TensorType, TensorType> {
val Q = JNoa.emptyTensor()
val R = JNoa.emptyTensor()
JNoa.qrTensor(tensor.tensorHandle, Q, R)
return Pair(wrap(Q), wrap(R))
}
/**
* this implementation satisfies `tensor = P dot L dot U`
*/
override fun StructureND<T>.lu(): Triple<TensorType, TensorType, TensorType> {
val P = JNoa.emptyTensor()
val L = JNoa.emptyTensor()
val U = JNoa.emptyTensor()
JNoa.luTensor(tensor.tensorHandle, P, L, U)
return Triple(wrap(P), wrap(L), wrap(U))
}
override fun StructureND<T>.svd(): Triple<TensorType, TensorType, TensorType> {
val U = JNoa.emptyTensor()
val V = JNoa.emptyTensor()
val S = JNoa.emptyTensor()
JNoa.svdTensor(tensor.tensorHandle, U, S, V)
return Triple(wrap(U), wrap(S), wrap(V))
}
override fun StructureND<T>.symEig(): Pair<TensorType, TensorType> {
val V = JNoa.emptyTensor()
val S = JNoa.emptyTensor()
JNoa.symEigTensor(tensor.tensorHandle, S, V)
return Pair(wrap(S), wrap(V))
}
public fun TensorType.autoGradient(variable: TensorType, retainGraph: Boolean = false): TensorType =
wrap(JNoa.autoGradTensor(tensorHandle, variable.tensorHandle, retainGraph))
public fun TensorType.autoHessian(variable: TensorType): TensorType =
wrap(JNoa.autoHessTensor(tensorHandle, variable.tensorHandle))
public fun TensorType.detachFromGraph(): TensorType =
wrap(JNoa.detachFromGraph(tensorHandle))
public fun TensorType.backward(): Unit =
JNoa.backwardPass(tensorHandle)
public fun TensorType.grad(): TensorType =
wrap(JNoa.tensorGrad(tensorHandle))
public fun NoaJitModule.train(status: Boolean): Unit =
JNoa.trainMode(jitModuleHandle, status)
public fun NoaJitModule.adamOptimiser(learningRate: Double): AdamOptimiser =
AdamOptimiser(scope, JNoa.adamOptim(jitModuleHandle, learningRate))
/**
* Implements RMSprop algorithm. Receive `learning rate`, `alpha` (smoothing constant),
* `eps` (term added to the denominator to improve numerical stability), `weight_decay`,
* `momentum` factor, `centered` (if True, compute the centered RMSProp).
* For more information: https://pytorch.org/docs/stable/generated/torch.optim.RMSprop.html
*
* @receiver the `learning rate`, `alpha`, `eps`, `weight_decay`, `momentum`, `centered`.
* @return RMSpropOptimiser.
*/
public fun NoaJitModule.rmsOptimiser(learningRate: Double, alpha: Double,
eps: Double, weightDecay: Double, momentum: Double, centered: Boolean): RMSpropOptimiser =
RMSpropOptimiser(scope, JNoa.rmsOptim(jitModuleHandle, learningRate, alpha,
eps, weightDecay, momentum, centered))
/**
* Implements AdamW algorithm. Receive `learning rate`, `beta1` and `beta2` (coefficients used
* for computing running averages of gradient and its square), `eps` (term added to the denominator
* to improve numerical stability), `weight_decay`, `amsgrad`.
* For more information: https://pytorch.org/docs/stable/generated/torch.optim.AdamW.html
*
* @receiver the `learning rate`, `beta1`, `beta2`, `eps`, `weight_decay`, `amsgrad`.
* @return AdamWOptimiser.
*/
public fun NoaJitModule.adamWOptimiser(learningRate: Double, beta1: Double,
beta2: Double, eps: Double, weightDecay: Double, amsgrad: Boolean): AdamWOptimiser =
AdamWOptimiser(scope, JNoa.adamWOptim(jitModuleHandle, learningRate, beta1,
beta2, eps, weightDecay, amsgrad))
/**
* Implements Adagrad algorithm. Receive `learning rate`, `weight_decay`,
* `learning rate decay`, `initial accumulator value`, `eps`.
* For more information: https://pytorch.org/docs/stable/generated/torch.optim.Adagrad.html
*
* @receiver the `learning rate`, `weight_decay`, `learning rate decay`, `initial accumulator value`, `eps`.
* @return AdagradOptimiser.
*/
public fun NoaJitModule.adagradOptimiser(learningRate: Double, weightDecay: Double,
lrDecay: Double, initialAccumulatorValue: Double, eps: Double): AdagradOptimiser =
AdagradOptimiser(scope, JNoa.adagradOptim(jitModuleHandle, learningRate, weightDecay,
lrDecay, initialAccumulatorValue, eps))
/**
* Implements stochastic gradient descent. Receive `learning rate`, `momentum` factor,
* `dampening` for momentum, `weight_decay`, `nesterov` (enables Nesterov momentum).
* For more information: https://pytorch.org/docs/stable/generated/torch.optim.SGD.html
*
* @receiver the `learning rate`, `momentum`, `dampening`, `weight_decay`, `nesterov`.
* @return SgdOptimiser.
*/
public fun NoaJitModule.sgdOptimiser(learningRate: Double, momentum: Double,
dampening: Double, weightDecay: Double, nesterov: Boolean): SgdOptimiser =
SgdOptimiser(scope, JNoa.sgdOptim(jitModuleHandle, learningRate, momentum,
dampening, weightDecay, nesterov))
}
public sealed class NoaDoubleAlgebra
protected constructor(scope: NoaScope) :
NoaPartialDivisionAlgebra<Double, DoubleField, DoubleArray, NoaDoubleTensor>(scope) {
override val elementAlgebra: DoubleField
get() = DoubleField
override fun structureND(shape: IntArray, initializer: DoubleField.(IntArray) -> Double): NoaDoubleTensor =
copyFromArray(
TensorLinearStructure(shape).asSequence().map { DoubleField.initializer(it) }.toMutableList()
.toDoubleArray(),
shape, Device.CPU
)
private fun StructureND<Double>.castHelper(): NoaDoubleTensor =
copyFromArray(
TensorLinearStructure(this.shape).asSequence().map(this::get).toMutableList().toDoubleArray(),
this.shape, Device.CPU
)
override val StructureND<Double>.tensor: NoaDoubleTensor
get() = when (this) {
is NoaDoubleTensor -> this
else -> castHelper()
}
override fun wrap(tensorHandle: TensorHandle): NoaDoubleTensor =
NoaDoubleTensor(scope = scope, tensorHandle = tensorHandle)
override fun NoaDoubleTensor.copyToArray(): DoubleArray {
val array = DoubleArray(numElements)
JNoa.getBlobDouble(tensorHandle, array)
return array
}
override fun copyFromArray(array: DoubleArray, shape: IntArray, device: Device): NoaDoubleTensor =
wrap(JNoa.fromBlobDouble(array, shape, device.toInt()))
override fun randNormal(shape: IntArray, device: Device): NoaDoubleTensor =
wrap(JNoa.randnDouble(shape, device.toInt()))
override fun randUniform(shape: IntArray, device: Device): NoaDoubleTensor =
wrap(JNoa.randDouble(shape, device.toInt()))
override fun randDiscrete(low: Long, high: Long, shape: IntArray, device: Device): NoaDoubleTensor =
wrap(JNoa.randintDouble(low, high, shape, device.toInt()))
override operator fun Double.plus(arg: StructureND<Double>): NoaDoubleTensor =
wrap(JNoa.plusDouble(this, arg.tensor.tensorHandle))
override fun StructureND<Double>.plus(value: Double): NoaDoubleTensor =
wrap(JNoa.plusDouble(value, tensor.tensorHandle))
override fun Tensor<Double>.plusAssign(value: Double): Unit =
JNoa.plusDoubleAssign(value, tensor.tensorHandle)
override operator fun Double.minus(arg: StructureND<Double>): NoaDoubleTensor =
wrap(JNoa.plusDouble(-this, arg.tensor.tensorHandle))
override fun StructureND<Double>.minus(value: Double): NoaDoubleTensor =
wrap(JNoa.plusDouble(-value, tensor.tensorHandle))
override fun Tensor<Double>.minusAssign(value: Double): Unit =
JNoa.plusDoubleAssign(-value, tensor.tensorHandle)
override operator fun Double.times(arg: StructureND<Double>): NoaDoubleTensor =
wrap(JNoa.timesDouble(this, arg.tensor.tensorHandle))
override fun StructureND<Double>.times(value: Double): NoaDoubleTensor =
wrap(JNoa.timesDouble(value, tensor.tensorHandle))
override fun Tensor<Double>.timesAssign(value: Double): Unit =
JNoa.timesDoubleAssign(value, tensor.tensorHandle)
override fun Double.div(arg: StructureND<Double>): NoaDoubleTensor =
arg.tensor * (1 / this)
override fun StructureND<Double>.div(value: Double): NoaDoubleTensor =
tensor * (1 / value)
override fun Tensor<Double>.divAssign(value: Double): Unit =
tensor.timesAssign(1 / value)
override fun full(value: Double, shape: IntArray, device: Device): NoaDoubleTensor =
wrap(JNoa.fullDouble(value, shape, device.toInt()))
override fun loadJitModule(path: String, device: Device): NoaJitModule =
NoaJitModule(scope, JNoa.loadJitModuleDouble(path, device.toInt()))
override fun loadTensor(path: String, device: Device): NoaDoubleTensor =
wrap(JNoa.loadTensorDouble(path, device.toInt()))
override fun NoaDoubleTensor.assignFromArray(array: DoubleArray): Unit =
JNoa.assignBlobDouble(tensorHandle, array)
override fun NoaDoubleTensor.set(i: Int, array: DoubleArray): Unit =
JNoa.setBlobDouble(tensorHandle, i, array)
override fun NoaDoubleTensor.set(dim: Int, slice: Slice, array: DoubleArray): Unit =
JNoa.setSliceBlobDouble(tensorHandle, dim, slice.first, slice.second, array)
}
public sealed class NoaFloatAlgebra
protected constructor(scope: NoaScope) :
NoaPartialDivisionAlgebra<Float, FloatField, FloatArray, NoaFloatTensor>(scope) {
override val elementAlgebra: FloatField
get() = FloatField
override fun structureND(shape: IntArray, initializer: FloatField.(IntArray) -> Float): NoaFloatTensor =
copyFromArray(
TensorLinearStructure(shape).asSequence().map { FloatField.initializer(it) }.toMutableList()
.toFloatArray(),
shape, Device.CPU
)
private fun StructureND<Float>.castHelper(): NoaFloatTensor =
copyFromArray(
TensorLinearStructure(this.shape).asSequence().map(this::get).toMutableList().toFloatArray(),
this.shape, Device.CPU
)
override val StructureND<Float>.tensor: NoaFloatTensor
get() = when (this) {
is NoaFloatTensor -> this
else -> castHelper()
}
override fun wrap(tensorHandle: TensorHandle): NoaFloatTensor =
NoaFloatTensor(scope = scope, tensorHandle = tensorHandle)
override fun NoaFloatTensor.copyToArray(): FloatArray {
val res = FloatArray(numElements)
JNoa.getBlobFloat(tensorHandle, res)
return res
}
override fun copyFromArray(array: FloatArray, shape: IntArray, device: Device): NoaFloatTensor =
wrap(JNoa.fromBlobFloat(array, shape, device.toInt()))
override fun randNormal(shape: IntArray, device: Device): NoaFloatTensor =
wrap(JNoa.randnFloat(shape, device.toInt()))
override fun randUniform(shape: IntArray, device: Device): NoaFloatTensor =
wrap(JNoa.randFloat(shape, device.toInt()))
override fun randDiscrete(low: Long, high: Long, shape: IntArray, device: Device): NoaFloatTensor =
wrap(JNoa.randintFloat(low, high, shape, device.toInt()))
override operator fun Float.plus(arg: StructureND<Float>): NoaFloatTensor =
wrap(JNoa.plusFloat(this, arg.tensor.tensorHandle))
override fun StructureND<Float>.plus(value: Float): NoaFloatTensor =
wrap(JNoa.plusFloat(value, tensor.tensorHandle))
override fun Tensor<Float>.plusAssign(value: Float): Unit =
JNoa.plusFloatAssign(value, tensor.tensorHandle)
override operator fun Float.minus(arg: StructureND<Float>): NoaFloatTensor =
wrap(JNoa.plusFloat(-this, arg.tensor.tensorHandle))
override fun StructureND<Float>.minus(value: Float): NoaFloatTensor =
wrap(JNoa.plusFloat(-value, tensor.tensorHandle))
override fun Tensor<Float>.minusAssign(value: Float): Unit =
JNoa.plusFloatAssign(-value, tensor.tensorHandle)
override operator fun Float.times(arg: StructureND<Float>): NoaFloatTensor =
wrap(JNoa.timesFloat(this, arg.tensor.tensorHandle))
override fun StructureND<Float>.times(value: Float): NoaFloatTensor =
wrap(JNoa.timesFloat(value, tensor.tensorHandle))
override fun Tensor<Float>.timesAssign(value: Float): Unit =
JNoa.timesFloatAssign(value, tensor.tensorHandle)
override fun Float.div(arg: StructureND<Float>): NoaFloatTensor =
arg.tensor * (1 / this)
override fun StructureND<Float>.div(value: Float): NoaFloatTensor =
tensor * (1 / value)
override fun Tensor<Float>.divAssign(value: Float): Unit =
tensor.timesAssign(1 / value)
override fun full(value: Float, shape: IntArray, device: Device): NoaFloatTensor =
wrap(JNoa.fullFloat(value, shape, device.toInt()))
override fun loadJitModule(path: String, device: Device): NoaJitModule =
NoaJitModule(scope, JNoa.loadJitModuleFloat(path, device.toInt()))
override fun loadTensor(path: String, device: Device): NoaFloatTensor =
wrap(JNoa.loadTensorFloat(path, device.toInt()))
override fun NoaFloatTensor.assignFromArray(array: FloatArray): Unit =
JNoa.assignBlobFloat(tensorHandle, array)
override fun NoaFloatTensor.set(i: Int, array: FloatArray): Unit =
JNoa.setBlobFloat(tensorHandle, i, array)
override fun NoaFloatTensor.set(dim: Int, slice: Slice, array: FloatArray): Unit =
JNoa.setSliceBlobFloat(tensorHandle, dim, slice.first, slice.second, array)
}
public sealed class NoaLongAlgebra
protected constructor(scope: NoaScope) :
NoaAlgebra<Long, LongRing, LongArray, NoaLongTensor>(scope) {
override val elementAlgebra: LongRing
get() = LongRing
override fun structureND(shape: IntArray, initializer: LongRing.(IntArray) -> Long): NoaLongTensor =
copyFromArray(
TensorLinearStructure(shape).asSequence().map { LongRing.initializer(it) }.toMutableList()
.toLongArray(),
shape, Device.CPU
)
private fun StructureND<Long>.castHelper(): NoaLongTensor =
copyFromArray(
TensorLinearStructure(this.shape).asSequence().map(this::get).toMutableList().toLongArray(),
this.shape, Device.CPU
)
override val StructureND<Long>.tensor: NoaLongTensor
get() = when (this) {
is NoaLongTensor -> this
else -> castHelper()
}
override fun wrap(tensorHandle: TensorHandle): NoaLongTensor =
NoaLongTensor(scope = scope, tensorHandle = tensorHandle)
override fun NoaLongTensor.copyToArray(): LongArray {
val array = LongArray(numElements)
JNoa.getBlobLong(tensorHandle, array)
return array
}
override fun copyFromArray(array: LongArray, shape: IntArray, device: Device): NoaLongTensor =
wrap(JNoa.fromBlobLong(array, shape, device.toInt()))
override fun randDiscrete(low: Long, high: Long, shape: IntArray, device: Device): NoaLongTensor =
wrap(JNoa.randintLong(low, high, shape, device.toInt()))
override operator fun Long.plus(arg: StructureND<Long>): NoaLongTensor =
wrap(JNoa.plusLong(this, arg.tensor.tensorHandle))
override fun StructureND<Long>.plus(value: Long): NoaLongTensor =
wrap(JNoa.plusLong(value, tensor.tensorHandle))
override fun Tensor<Long>.plusAssign(value: Long): Unit =
JNoa.plusLongAssign(value, tensor.tensorHandle)
override operator fun Long.minus(arg: StructureND<Long>): NoaLongTensor =
wrap(JNoa.plusLong(-this, arg.tensor.tensorHandle))
override fun StructureND<Long>.minus(value: Long): NoaLongTensor =
wrap(JNoa.plusLong(-value, tensor.tensorHandle))
override fun Tensor<Long>.minusAssign(value: Long): Unit =
JNoa.plusLongAssign(-value, tensor.tensorHandle)
override operator fun Long.times(arg: StructureND<Long>): NoaLongTensor =
wrap(JNoa.timesLong(this, arg.tensor.tensorHandle))
override fun StructureND<Long>.times(value: Long): NoaLongTensor =
wrap(JNoa.timesLong(value, tensor.tensorHandle))
override fun Tensor<Long>.timesAssign(value: Long): Unit =
JNoa.timesLongAssign(value, tensor.tensorHandle)
override fun full(value: Long, shape: IntArray, device: Device): NoaLongTensor =
wrap(JNoa.fullLong(value, shape, device.toInt()))
override fun loadJitModule(path: String, device: Device): NoaJitModule =
NoaJitModule(scope, JNoa.loadJitModuleLong(path, device.toInt()))
override fun loadTensor(path: String, device: Device): NoaLongTensor =
wrap(JNoa.loadTensorLong(path, device.toInt()))
override fun NoaLongTensor.assignFromArray(array: LongArray): Unit =
JNoa.assignBlobLong(tensorHandle, array)
override fun NoaLongTensor.set(i: Int, array: LongArray): Unit =
JNoa.setBlobLong(tensorHandle, i, array)
override fun NoaLongTensor.set(dim: Int, slice: Slice, array: LongArray): Unit =
JNoa.setSliceBlobLong(tensorHandle, dim, slice.first, slice.second, array)
}
public sealed class NoaIntAlgebra
protected constructor(scope: NoaScope) :
NoaAlgebra<Int, IntRing, IntArray, NoaIntTensor>(scope) {
override val elementAlgebra: IntRing
get() = IntRing
override fun structureND(shape: IntArray, initializer: IntRing.(IntArray) -> Int): NoaIntTensor =
copyFromArray(
TensorLinearStructure(shape).asSequence().map { IntRing.initializer(it) }.toMutableList()
.toIntArray(),
shape, Device.CPU
)
private fun StructureND<Int>.castHelper(): NoaIntTensor =
copyFromArray(
TensorLinearStructure(this.shape).asSequence().map(this::get).toMutableList().toIntArray(),
this.shape, Device.CPU
)
override val StructureND<Int>.tensor: NoaIntTensor
get() = when (this) {
is NoaIntTensor -> this
else -> castHelper()
}
override fun wrap(tensorHandle: TensorHandle): NoaIntTensor =
NoaIntTensor(scope = scope, tensorHandle = tensorHandle)
override fun NoaIntTensor.copyToArray(): IntArray {
val array = IntArray(numElements)
JNoa.getBlobInt(tensorHandle, array)
return array
}
override fun copyFromArray(array: IntArray, shape: IntArray, device: Device): NoaIntTensor =
wrap(JNoa.fromBlobInt(array, shape, device.toInt()))
override fun randDiscrete(low: Long, high: Long, shape: IntArray, device: Device): NoaIntTensor =
wrap(JNoa.randintInt(low, high, shape, device.toInt()))
override operator fun Int.plus(arg: StructureND<Int>): NoaIntTensor =
wrap(JNoa.plusInt(this, arg.tensor.tensorHandle))
override fun StructureND<Int>.plus(value: Int): NoaIntTensor =
wrap(JNoa.plusInt(value, tensor.tensorHandle))
override fun Tensor<Int>.plusAssign(value: Int): Unit =
JNoa.plusIntAssign(value, tensor.tensorHandle)
override operator fun Int.minus(arg: StructureND<Int>): NoaIntTensor =
wrap(JNoa.plusInt(-this, arg.tensor.tensorHandle))
override fun StructureND<Int>.minus(value: Int): NoaIntTensor =
wrap(JNoa.plusInt(-value, tensor.tensorHandle))
override fun Tensor<Int>.minusAssign(value: Int): Unit =
JNoa.plusIntAssign(-value, tensor.tensorHandle)
override operator fun Int.times(arg: StructureND<Int>): NoaIntTensor =
wrap(JNoa.timesInt(this, arg.tensor.tensorHandle))
override fun StructureND<Int>.times(value: Int): NoaIntTensor =
wrap(JNoa.timesInt(value, tensor.tensorHandle))
override fun Tensor<Int>.timesAssign(value: Int): Unit =
JNoa.timesIntAssign(value, tensor.tensorHandle)
override fun full(value: Int, shape: IntArray, device: Device): NoaIntTensor =
wrap(JNoa.fullInt(value, shape, device.toInt()))
override fun loadJitModule(path: String, device: Device): NoaJitModule =
NoaJitModule(scope, JNoa.loadJitModuleInt(path, device.toInt()))
override fun loadTensor(path: String, device: Device): NoaIntTensor =
wrap(JNoa.loadTensorInt(path, device.toInt()))
override fun NoaIntTensor.assignFromArray(array: IntArray): Unit =
JNoa.assignBlobInt(tensorHandle, array)
override fun NoaIntTensor.set(i: Int, array: IntArray): Unit =
JNoa.setBlobInt(tensorHandle, i, array)
override fun NoaIntTensor.set(dim: Int, slice: Slice, array: IntArray): Unit =
JNoa.setSliceBlobInt(tensorHandle, dim, slice.first, slice.second, array)
}

View File

@ -0,0 +1,49 @@
/*
* Copyright 2018-2021 KMath contributors.
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
*/
package space.kscience.kmath.noa
import space.kscience.kmath.noa.memory.NoaScope
import space.kscience.kmath.noa.memory.withNoaScope
public class NoaDouble
internal constructor(scope: NoaScope) :
NoaDoubleAlgebra(scope)
public fun <R> NoaDouble(block: NoaDouble.() -> R): R? =
withNoaScope { NoaDouble(this).block() }
public fun <R> NoaDouble(scope: NoaScope, block: NoaDouble.() -> R): R? =
withNoaScope(scope) { NoaDouble(this).block() }
public class NoaFloat
internal constructor(scope: NoaScope) :
NoaFloatAlgebra(scope)
public fun <R> NoaFloat(block: NoaFloat.() -> R): R? =
withNoaScope { NoaFloat(this).block() }
public fun <R> NoaFloat(scope: NoaScope, block: NoaFloat.() -> R): R? =
withNoaScope(scope) { NoaFloat(this).block() }
public class NoaLong
internal constructor(scope: NoaScope) :
NoaLongAlgebra(scope)
public fun <R> NoaLong(block: NoaLong.() -> R): R? =
withNoaScope { NoaLong(this).block() }
public fun <R> NoaLong(scope: NoaScope, block: NoaLong.() -> R): R? =
withNoaScope(scope) { NoaLong(this).block() }
public class NoaInt
internal constructor(scope: NoaScope) :
NoaIntAlgebra(scope)
public fun <R> NoaInt(block: NoaInt.() -> R): R? =
withNoaScope { NoaInt(this).block() }
public fun <R> NoaInt(scope: NoaScope, block: NoaInt.() -> R): R? =
withNoaScope(scope) { NoaInt(this).block() }

View File

@ -0,0 +1,19 @@
/*
* Copyright 2018-2021 KMath contributors.
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
*/
package space.kscience.kmath.noa
import space.kscience.kmath.noa.memory.NoaResource
import space.kscience.kmath.noa.memory.NoaScope
internal typealias JitModuleHandle = Long
public class NoaJitModule
internal constructor(scope: NoaScope, internal val jitModuleHandle: JitModuleHandle)
: NoaResource(scope){
override fun dispose(): Unit = JNoa.disposeJitModule(jitModuleHandle)
public fun save(path: String): Unit = JNoa.saveJitModule(jitModuleHandle, path)
}

View File

@ -0,0 +1,15 @@
/*
* Copyright 2018-2021 KMath contributors.
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
*/
package space.kscience.kmath.noa.memory
public abstract class NoaResource
internal constructor(internal val scope: NoaScope) {
init {
scope.add(::dispose)
}
protected abstract fun dispose(): Unit
}

View File

@ -0,0 +1,49 @@
/*
* Copyright 2018-2021 KMath contributors.
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
*/
package space.kscience.kmath.noa.memory
private typealias Disposable = () -> Unit
public class NoaScope {
internal val disposables: ArrayDeque<Disposable> = ArrayDeque(0)
public fun disposeAll() {
disposables.forEach(Disposable::invoke)
disposables.clear()
}
internal inline fun add(crossinline disposable: Disposable) {
disposables += {
try {
disposable()
} catch (e: Throwable) {
}
}
}
internal fun addAll(scope: NoaScope) {
disposables.addAll(scope.disposables)
}
}
internal inline fun <R> withNoaScope(block: NoaScope.() -> R): R? {
val noaScope = NoaScope()
val result = try { noaScope.block() } catch (e: Throwable) { null }
noaScope.disposeAll()
return result
}
internal inline fun <R> withNoaScope(scope: NoaScope, block: NoaScope.() -> R): R? {
val noaScope = NoaScope()
val result = try { noaScope.block() } catch (e: Throwable) { null }
if (result == null){
noaScope.disposeAll()
} else {
scope.addAll(noaScope)
}
return result
}

View File

@ -0,0 +1,58 @@
/*
* Copyright 2018-2021 KMath contributors.
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
*/
package space.kscience.kmath.noa
import space.kscience.kmath.noa.memory.NoaResource
import space.kscience.kmath.noa.memory.NoaScope
internal typealias OptimiserHandle = Long
public abstract class NoaOptimiser
internal constructor(scope: NoaScope) : NoaResource(scope) {
public abstract fun step(): Unit
public abstract fun zeroGrad(): Unit
}
public class AdamOptimiser
internal constructor(scope: NoaScope, internal val optimiserHandle: OptimiserHandle)
: NoaOptimiser(scope) {
override fun dispose(): Unit = JNoa.disposeAdamOptim(optimiserHandle)
override fun step(): Unit = JNoa.stepAdamOptim(optimiserHandle)
override fun zeroGrad(): Unit = JNoa.zeroGradAdamOptim(optimiserHandle)
}
public class RMSpropOptimiser
internal constructor(scope: NoaScope, internal val optimiserHandle: OptimiserHandle)
: NoaOptimiser(scope) {
override fun dispose(): Unit = JNoa.disposeRmsOptim(optimiserHandle)
override fun step(): Unit = JNoa.stepRmsOptim(optimiserHandle)
override fun zeroGrad(): Unit = JNoa.zeroGradRmsOptim(optimiserHandle)
}
public class AdamWOptimiser
internal constructor(scope: NoaScope, internal val optimiserHandle: OptimiserHandle)
: NoaOptimiser(scope) {
override fun dispose(): Unit = JNoa.disposeAdamWOptim(optimiserHandle)
override fun step(): Unit = JNoa.stepAdamWOptim(optimiserHandle)
override fun zeroGrad(): Unit = JNoa.zeroGradAdamWOptim(optimiserHandle)
}
public class AdagradOptimiser
internal constructor(scope: NoaScope, internal val optimiserHandle: OptimiserHandle)
: NoaOptimiser(scope) {
override fun dispose(): Unit = JNoa.disposeAdagradOptim(optimiserHandle)
override fun step(): Unit = JNoa.stepAdagradOptim(optimiserHandle)
override fun zeroGrad(): Unit = JNoa.zeroGradAdagradOptim(optimiserHandle)
}
public class SgdOptimiser
internal constructor(scope: NoaScope, internal val optimiserHandle: OptimiserHandle)
: NoaOptimiser(scope) {
override fun dispose(): Unit = JNoa.disposeSgdOptim(optimiserHandle)
override fun step(): Unit = JNoa.stepSgdOptim(optimiserHandle)
override fun zeroGrad(): Unit = JNoa.zeroGradSgdOptim(optimiserHandle)
}

View File

@ -0,0 +1,159 @@
/*
* Copyright 2018-2021 KMath contributors.
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
*/
package space.kscience.kmath.noa
import space.kscience.kmath.misc.PerformancePitfall
import space.kscience.kmath.noa.memory.NoaResource
import space.kscience.kmath.noa.memory.NoaScope
import space.kscience.kmath.tensors.api.Tensor
import space.kscience.kmath.tensors.core.TensorLinearStructure
internal typealias TensorHandle = Long
public sealed class NoaTensor<T>
protected constructor(scope: NoaScope, internal val tensorHandle: TensorHandle) :
NoaResource(scope), Tensor<T> {
override fun dispose(): Unit = JNoa.disposeTensor(tensorHandle)
internal abstract fun item(): T
override val dimension: Int get() = JNoa.getDim(tensorHandle)
override val shape: IntArray
get() = (1..dimension).map { JNoa.getShapeAt(tensorHandle, it - 1) }.toIntArray()
public val strides: IntArray
get() = (1..dimension).map { JNoa.getStrideAt(tensorHandle, it - 1) }.toIntArray()
public val numElements: Int get() = JNoa.getNumel(tensorHandle)
public val device: Device get() = Device.fromInt(JNoa.getDevice(tensorHandle))
public fun save(path: String): Unit = JNoa.saveTensor(tensorHandle, path)
override fun toString(): String = JNoa.tensorToString(tensorHandle)
@PerformancePitfall
override fun elements(): Sequence<Pair<IntArray, T>> {
if (dimension == 0) {
return emptySequence()
}
val indices = (1..numElements).asSequence().map {
TensorLinearStructure.indexFromOffset(it - 1, strides, dimension)
}
return indices.map { it to get(it) }
}
public fun asDouble(): NoaDoubleTensor = NoaDoubleTensor(
scope = scope,
tensorHandle = JNoa.copyToDouble(this.tensorHandle)
)
public fun asFloat(): NoaFloatTensor = NoaFloatTensor(
scope = scope,
tensorHandle = JNoa.copyToFloat(this.tensorHandle)
)
public fun asLong(): NoaLongTensor = NoaLongTensor(
scope = scope,
tensorHandle = JNoa.copyToLong(this.tensorHandle)
)
public fun asInt(): NoaIntTensor = NoaIntTensor(
scope = scope,
tensorHandle = JNoa.copyToInt(this.tensorHandle)
)
}
public sealed class NoaTensorOverField<T>
protected constructor(scope: NoaScope, tensorHandle: Long) :
NoaTensor<T>(scope, tensorHandle) {
public var requiresGrad: Boolean
get() = JNoa.requiresGrad(tensorHandle)
set(value) = JNoa.setRequiresGrad(tensorHandle, value)
}
public class NoaDoubleTensor
internal constructor(scope: NoaScope, tensorHandle: TensorHandle) :
NoaTensorOverField<Double>(scope, tensorHandle) {
override fun item(): Double = JNoa.getItemDouble(tensorHandle)
override fun get(index: IntArray): Double = JNoa.getDouble(tensorHandle, index)
override fun set(index: IntArray, value: Double) {
JNoa.setDouble(tensorHandle, index, value)
}
}
public class NoaFloatTensor
internal constructor(scope: NoaScope, tensorHandle: TensorHandle) :
NoaTensorOverField<Float>(scope, tensorHandle) {
override fun item(): Float = JNoa.getItemFloat(tensorHandle)
override fun get(index: IntArray): Float = JNoa.getFloat(tensorHandle, index)
override fun set(index: IntArray, value: Float) {
JNoa.setFloat(tensorHandle, index, value)
}
}
public class NoaLongTensor
internal constructor(scope: NoaScope, tensorHandle: TensorHandle) :
NoaTensor<Long>(scope, tensorHandle) {
override fun item(): Long = JNoa.getItemLong(tensorHandle)
override fun get(index: IntArray): Long = JNoa.getLong(tensorHandle, index)
override fun set(index: IntArray, value: Long) {
JNoa.setLong(tensorHandle, index, value)
}
}
public class NoaIntTensor
internal constructor(scope: NoaScope, tensorHandle: TensorHandle) :
NoaTensor<Int>(scope, tensorHandle) {
override fun item(): Int = JNoa.getItemInt(tensorHandle)
override fun get(index: IntArray): Int = JNoa.getInt(tensorHandle, index)
override fun set(index: IntArray, value: Int) {
JNoa.setInt(tensorHandle, index, value)
}
}
public sealed class Device {
public object CPU : Device() {
override fun toString(): String {
return "CPU"
}
}
public data class CUDA(val index: Int) : Device()
public fun toInt(): Int {
when (this) {
is CPU -> return 0
is CUDA -> return this.index + 1
}
}
public companion object {
public fun fromInt(deviceInt: Int): Device {
return if (deviceInt == 0) CPU else CUDA(
deviceInt - 1
)
}
}
}

View File

@ -0,0 +1,35 @@
/*
* Copyright 2018-2021 KMath contributors.
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
*/
package space.kscience.kmath.noa
import space.kscience.kmath.operations.Field
public fun cudaAvailable(): Boolean {
return JNoa.cudaIsAvailable()
}
public fun getNumThreads(): Int {
return JNoa.getNumThreads()
}
public fun setNumThreads(numThreads: Int): Unit {
JNoa.setNumThreads(numThreads)
}
public fun setSeed(seed: Int): Unit {
JNoa.setSeed(seed)
}
public inline fun <T, A : Field<T>, ArrayT,
GradTensorT : NoaTensorOverField<T>,
GradAlgebraT : NoaPartialDivisionAlgebra<T, A, ArrayT, GradTensorT>>
GradAlgebraT.withGradAt(
tensor: GradTensorT,
block: GradAlgebraT.(GradTensorT) -> GradTensorT
): GradTensorT {
tensor.requiresGrad = true
return this.block(tensor)
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,84 @@
/*
* Copyright 2018-2021 KMath contributors.
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
*/
package space.kscience.kmath.noa
import kotlin.test.Test
import kotlin.test.assertTrue
internal fun NoaDouble.testingLinearStructure(device: Device = Device.CPU): Unit {
val shape = intArrayOf(3)
val tensorA = full(value = -4.5, shape = shape, device = device)
val tensorB = full(value = 10.9, shape = shape, device = device)
val tensorC = full(value = 789.3, shape = shape, device = device)
val tensorD = full(value = -72.9, shape = shape, device = device)
val tensorE = full(value = 553.1, shape = shape, device = device)
val result = 15.8 * tensorA - 1.5 * tensorB * (-tensorD) + 0.02 * tensorC / tensorE - 39.4
val expected = copyFromArray(
array = (1..3).map {
15.8 * (-4.5) - 1.5 * 10.9 * 72.9 + 0.02 * 789.3 / 553.1 - 39.4
}.toDoubleArray(),
shape = shape,
device = device
)
val assignResult = full(value = 0.0, shape = shape, device = device)
tensorA *= 15.8
tensorB *= 1.5
tensorB *= -tensorD
tensorC *= 0.02
tensorC /= tensorE
assignResult += tensorA
assignResult -= tensorB
assignResult += tensorC
assignResult += -39.4
val error = (expected - result).abs().sum() +
(expected - assignResult).abs().sum()
assertTrue(error < TOLERANCE)
}
internal fun NoaDouble.testingBatchedSVD(device: Device = Device.CPU): Unit {
val tensor = randNormal(shape = intArrayOf(7, 5, 3), device = device)
val (tensorU, tensorS, tensorV) = tensor.svd()
val error = tensor - (tensorU dot (diagonalEmbedding(tensorS) dot tensorV.transpose(-2, -1)))
assertTrue(error.abs().sum() < TOLERANCE)
}
internal fun NoaDouble.testingBatchedSymEig(device: Device = Device.CPU): Unit {
val tensor = randNormal(shape = intArrayOf(5, 5), device = device)
val tensorSigma = tensor + tensor.transpose(-2, -1)
val (tensorS, tensorV) = tensorSigma.symEig()
val error = tensorSigma - (tensorV dot (diagonalEmbedding(tensorS) dot tensorV.transpose(-2, -1)))
assertTrue(error.abs().sum() < TOLERANCE)
}
class TestAlgebra {
@Test
fun testLinearStructure() = NoaDouble {
withCuda { device ->
testingLinearStructure(device)
}
}!!
@Test
fun testBatchedSVD() = NoaDouble {
withCuda { device ->
testingBatchedSVD(device)
}
}!!
@Test
fun testBatchedSymEig() = NoaDouble {
withCuda { device ->
testingBatchedSymEig(device)
}
}!!
}

View File

@ -0,0 +1,69 @@
/*
* Copyright 2018-2021 KMath contributors.
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
*/
package space.kscience.kmath.noa
import kotlin.test.Test
import kotlin.test.assertTrue
internal fun NoaFloat.testingAutoGrad(device: Device = Device.CPU): Unit {
setSeed(SEED)
val dim = 3
val tensorX = randNormal(shape = intArrayOf(dim), device = device)
val randFeatures = randNormal(shape = intArrayOf(dim, dim), device = device)
val tensorSigma = randFeatures + randFeatures.transpose(0, 1)
val tensorMu = randNormal(shape = intArrayOf(dim), device = device)
val expressionAtX = withGradAt(tensorX) { x ->
0.5f * (x dot (tensorSigma dot x)) + (tensorMu dot x) + 25.9f
}
val gradientAtX = expressionAtX.autoGradient(tensorX, retainGraph = true)
val hessianAtX = expressionAtX.autoHessian(tensorX)
val expectedGradientAtX = (tensorSigma dot tensorX) + tensorMu
val error = (gradientAtX - expectedGradientAtX).abs().sum() +
(hessianAtX - tensorSigma).abs().sum()
assertTrue(error < TOLERANCE)
}
internal fun NoaFloat.testingBatchedAutoGrad(device: Device = Device.CPU): Unit {
setSeed(SEED)
val batch = intArrayOf(2)
val dim = 2
val tensorX = randNormal(shape = batch + intArrayOf(1, dim), device = device)
val randFeatures = randNormal(shape = batch + intArrayOf(dim, dim), device = device)
val tensorSigma = randFeatures + randFeatures.transpose(-2, -1)
val tensorMu = randNormal(shape = batch + intArrayOf(1, dim), device = device)
val expressionAtX = withGradAt(tensorX) { x ->
val xt = x.transpose(-1, -2)
(0.5f * (x dot (tensorSigma dot xt)) + (tensorMu dot xt) + 58.2f).sumAll()
}
val gradientAtX = expressionAtX.autoGradient(tensorX)
val expectedGradientAtX = (tensorX dot tensorSigma) + tensorMu
val error = (gradientAtX - expectedGradientAtX).abs().sum()
assertTrue(error < TOLERANCE)
}
class TestAutoGrad {
@Test
fun testAutoGrad() = NoaFloat {
withCuda { device ->
testingAutoGrad(device)
}
}!!
@Test
fun testBatchedAutoGrad() = NoaFloat {
withCuda { device ->
testingBatchedAutoGrad(device)
}
}!!
}

View File

@ -0,0 +1,199 @@
/*
* Copyright 2018-2021 KMath contributors.
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
*/
package space.kscience.kmath.noa
import java.io.File
import kotlin.test.Ignore
import kotlin.test.Test
import kotlin.test.assertTrue
class TestJitModules {
private val resources = File("").resolve("src/test/resources")
private val dataPath = resources.resolve("data.pt").absolutePath
private val netPath = resources.resolve("net.pt").absolutePath
private val lossPath = resources.resolve("loss.pt").absolutePath
@Test
fun testOptimisationAdam() = NoaFloat {
setSeed(SEED)
val dataModule = loadJitModule(dataPath)
val netModule = loadJitModule(netPath)
val lossModule = loadJitModule(lossPath)
val xTrain = dataModule.getBuffer("x_train")
val yTrain = dataModule.getBuffer("y_train")
val xVal = dataModule.getBuffer("x_val")
val yVal = dataModule.getBuffer("y_val")
netModule.train(true)
lossModule.setBuffer("target", yTrain)
val yPred = netModule.forward(xTrain)
val loss = lossModule.forward(yPred)
val optimiser = netModule.adamOptimiser(0.005)
repeat(250){
optimiser.zeroGrad()
netModule.forwardAssign(xTrain, yPred)
lossModule.forwardAssign(yPred, loss)
loss.backward()
optimiser.step()
}
netModule.forwardAssign(xVal, yPred)
lossModule.setBuffer("target", yVal)
lossModule.forwardAssign(yPred, loss)
assertTrue(loss.value() < 0.1)
}!!
@Test
fun testOptimisationRms() = NoaFloat {
setSeed(SEED)
val dataModule = loadJitModule(dataPath)
val netModule = loadJitModule(netPath)
val lossModule = loadJitModule(lossPath)
val xTrain = dataModule.getBuffer("x_train")
val yTrain = dataModule.getBuffer("y_train")
val xVal = dataModule.getBuffer("x_val")
val yVal = dataModule.getBuffer("y_val")
netModule.train(true)
lossModule.setBuffer("target", yTrain)
val yPred = netModule.forward(xTrain)
val loss = lossModule.forward(yPred)
val optimiser = netModule.rmsOptimiser(0.005, 0.99, 1e-08, 0.0, 0.0, false)
repeat(250){
optimiser.zeroGrad()
netModule.forwardAssign(xTrain, yPred)
lossModule.forwardAssign(yPred, loss)
loss.backward()
optimiser.step()
}
netModule.forwardAssign(xVal, yPred)
lossModule.setBuffer("target", yVal)
lossModule.forwardAssign(yPred, loss)
assertTrue(loss.value() < 0.1)
}!!
@Test
fun testOptimisationAdamW() = NoaFloat {
setSeed(SEED)
val dataModule = loadJitModule(dataPath)
val netModule = loadJitModule(netPath)
val lossModule = loadJitModule(lossPath)
val xTrain = dataModule.getBuffer("x_train")
val yTrain = dataModule.getBuffer("y_train")
val xVal = dataModule.getBuffer("x_val")
val yVal = dataModule.getBuffer("y_val")
netModule.train(true)
lossModule.setBuffer("target", yTrain)
val yPred = netModule.forward(xTrain)
val loss = lossModule.forward(yPred)
val optimiser = netModule.adamWOptimiser(0.005, 0.9, 0.999, 1e-08, 0.01, false)
repeat(250){
optimiser.zeroGrad()
netModule.forwardAssign(xTrain, yPred)
lossModule.forwardAssign(yPred, loss)
loss.backward()
optimiser.step()
}
netModule.forwardAssign(xVal, yPred)
lossModule.setBuffer("target", yVal)
lossModule.forwardAssign(yPred, loss)
assertTrue(loss.value() < 0.1)
}!!
@Test
fun testOptimisationAdagrad() = NoaFloat {
setSeed(SEED)
val dataModule = loadJitModule(dataPath)
val netModule = loadJitModule(netPath)
val lossModule = loadJitModule(lossPath)
val xTrain = dataModule.getBuffer("x_train")
val yTrain = dataModule.getBuffer("y_train")
val xVal = dataModule.getBuffer("x_val")
val yVal = dataModule.getBuffer("y_val")
netModule.train(true)
lossModule.setBuffer("target", yTrain)
val yPred = netModule.forward(xTrain)
val loss = lossModule.forward(yPred)
val optimiser = netModule.adagradOptimiser(0.05, 0.0, 0.0, 0.0, 1e-10)
repeat(250){
optimiser.zeroGrad()
netModule.forwardAssign(xTrain, yPred)
lossModule.forwardAssign(yPred, loss)
loss.backward()
optimiser.step()
}
netModule.forwardAssign(xVal, yPred)
lossModule.setBuffer("target", yVal)
lossModule.forwardAssign(yPred, loss)
assertTrue(loss.value() < 0.1)
}!!
@Test
fun testOptimisationSgd() = NoaFloat {
setSeed(SEED)
val dataModule = loadJitModule(dataPath)
val netModule = loadJitModule(netPath)
val lossModule = loadJitModule(lossPath)
val xTrain = dataModule.getBuffer("x_train")
val yTrain = dataModule.getBuffer("y_train")
val xVal = dataModule.getBuffer("x_val")
val yVal = dataModule.getBuffer("y_val")
netModule.train(true)
lossModule.setBuffer("target", yTrain)
val yPred = netModule.forward(xTrain)
val loss = lossModule.forward(yPred)
val optimiser = netModule.sgdOptimiser(0.01, 0.9, 0.0, 0.0, false)
repeat(400){
optimiser.zeroGrad()
netModule.forwardAssign(xTrain, yPred)
lossModule.forwardAssign(yPred, loss)
loss.backward()
optimiser.step()
}
netModule.forwardAssign(xVal, yPred)
lossModule.setBuffer("target", yVal)
lossModule.forwardAssign(yPred, loss)
assertTrue(loss.value() < 0.1)
}!!
}

View File

@ -0,0 +1,132 @@
/*
* Copyright 2018-2021 KMath contributors.
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
*/
package space.kscience.kmath.noa
import java.io.File
import kotlin.test.Test
import kotlin.test.assertEquals
import kotlin.test.assertTrue
internal fun NoaFloat.testingCopying(device: Device = Device.CPU): Unit {
val array = (1..24).map { 10f * it * it }.toFloatArray()
val shape = intArrayOf(2, 3, 4)
val tensor = copyFromArray(array, shape = shape, device = device)
val copyOfTensor = tensor.copy()
tensor[intArrayOf(1, 2, 3)] = 0.1f
assertTrue(copyOfTensor.copyToArray() contentEquals array)
assertEquals(0.1f, tensor[intArrayOf(1, 2, 3)])
if (device != Device.CPU) {
val normalCpu = randNormal(intArrayOf(2, 3))
val normalGpu = normalCpu.copyToDevice(device)
assertTrue(normalCpu.copyToArray() contentEquals normalGpu.copyToArray())
val uniformGpu = randUniform(intArrayOf(3, 2), device)
val uniformCpu = uniformGpu.copyToDevice(Device.CPU)
assertTrue(uniformGpu.copyToArray() contentEquals uniformCpu.copyToArray())
}
}
internal fun NoaInt.testingViewWithNoCopy(device: Device = Device.CPU) {
val tensor = copyFromArray(intArrayOf(1, 2, 3, 4, 5, 6), shape = intArrayOf(6), device)
val viewTensor = tensor.view(intArrayOf(2, 3))
assertTrue(viewTensor.shape contentEquals intArrayOf(2, 3))
viewTensor[intArrayOf(0, 0)] = 10
assertEquals(tensor[intArrayOf(0)], 10)
}
internal fun NoaFloat.testingSerialisation(tensorPath: String, device: Device = Device.CPU) {
val tensor = copyFromArray(floatArrayOf(45.5f, 98.6f), intArrayOf(2), device)
tensor.save(tensorPath)
val loadedTensor = loadTensor(tensorPath, device)
assertTrue(tensor.copyToArray() contentEquals loadedTensor.copyToArray())
}
internal fun NoaFloat.testingBatchedGetterSetter(device: Device = Device.CPU) {
val array = (1..8).map { 100f * it }.toFloatArray()
val tensor = full(0.0f, intArrayOf(2, 2, 2), device)
tensor.assignFromArray(array)
assertTrue(tensor.copyToArray() contentEquals array)
val updateArray = floatArrayOf(15f, 20f)
val updateTensor = full(5.0f, intArrayOf(4), device)
updateTensor[0, Slice(1, 3)] = updateArray
NoaFloat {
tensor[0][1] = updateArray
tensor[1] = updateTensor.view(intArrayOf(2, 2))
updateTensor[0, Slice(2, 4)] = updateTensor[0, Slice(0, 2)]
}!!
assertTrue(
tensor.copyToArray() contentEquals
floatArrayOf(100f, 200f, 15f, 20f, 5f, 15f, 20f, 5f)
)
assertTrue(
updateTensor.copyToArray() contentEquals
floatArrayOf(5f, 15f, 5f, 15f)
)
}
class TestTensor {
private val resources = File("").resolve("src/test/resources")
private val tensorPath = resources.resolve("tensor.pt").absolutePath
@Test
fun testCopying() = NoaFloat {
withCuda { device ->
testingCopying(device)
}
}!!
@Test
fun testRequiresGrad() = NoaFloat {
val tensor = randNormal(intArrayOf(3))
assertTrue(!tensor.requiresGrad)
tensor.requiresGrad = true
assertTrue(tensor.requiresGrad)
tensor.requiresGrad = false
assertTrue(!tensor.requiresGrad)
tensor.requiresGrad = true
val detachedTensor = tensor.detachFromGraph()
assertTrue(!detachedTensor.requiresGrad)
}!!
@Test
fun testTypeMoving() = NoaFloat {
val tensorInt = copyFromArray(floatArrayOf(1f, 2f, 3f), intArrayOf(3)).asInt()
NoaInt {
val temporalTensor = copyFromArray(intArrayOf(4, 5, 6), intArrayOf(3))
tensorInt swap temporalTensor
assertTrue(temporalTensor.copyToArray() contentEquals intArrayOf(1, 2, 3))
}
assertTrue(tensorInt.asFloat().copyToArray() contentEquals floatArrayOf(4f, 5f, 6f))
}!!
@Test
fun testViewWithNoCopy() = NoaInt {
withCuda { device ->
testingViewWithNoCopy(device)
}
}!!
@Test
fun testSerialisation() = NoaFloat {
withCuda { device ->
testingSerialisation(tensorPath, device)
}
}!!
@Test
fun testBatchedGetterSetter() = NoaFloat {
withCuda { device ->
testingBatchedGetterSetter(device)
}
}!!
}

View File

@ -0,0 +1,76 @@
/*
* Copyright 2018-2021 KMath contributors.
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
*/
package space.kscience.kmath.noa
import space.kscience.kmath.noa.memory.NoaScope
import space.kscience.kmath.operations.Ring
import kotlin.test.Test
import kotlin.test.assertEquals
internal val SEED = 987654
internal val TOLERANCE = 1e-6
internal fun <T, A : Ring<T>, ArrayT, TensorT : NoaTensor<T>, AlgebraT : NoaAlgebra<T, A, ArrayT, TensorT>>
AlgebraT.withCuda(block: AlgebraT.(Device) -> Unit): Unit {
this.block(Device.CPU)
if (cudaAvailable()) this.block(Device.CUDA(0))
}
internal fun NoaFloat.testingSetSeed(device: Device = Device.CPU): Unit {
setSeed(SEED)
val integral = randDiscrete(0, 100, IntArray(0), device = device).value()
val normal = randNormal(IntArray(0), device = device).value()
val uniform = randUniform(IntArray(0), device = device).value()
setSeed(SEED)
val nextIntegral = randDiscrete(0, 100, IntArray(0), device = device).value()
val nextNormal = randNormal(IntArray(0), device = device).value()
val nextUniform = randUniform(IntArray(0), device = device).value()
assertEquals(normal, nextNormal)
assertEquals(uniform, nextUniform)
assertEquals(integral, nextIntegral)
}
class TestUtils {
@Test
fun testException() {
val i = try {
JNoa.testException(5)
} catch (e: NoaException) {
10
}
assertEquals(i, 10)
}
@Test
fun testSetNumThreads() {
val numThreads = 2
setNumThreads(numThreads)
assertEquals(numThreads, getNumThreads())
}
@Test
fun testSetSeed() = NoaFloat {
withCuda { device ->
testingSetSeed(device)
}
}!!
@Test
fun testScoping(): Unit {
val scope = NoaScope()
val tensor = NoaFloat(scope){
full(5f, intArrayOf(1))
}!!
assertEquals(tensor.numElements, 1)
assertEquals(scope.disposables.size, 1)
scope.disposeAll()
assertEquals(scope.disposables.size, 0)
}
}

Binary file not shown.

View File

@ -0,0 +1,52 @@
import torch
torch.manual_seed(987654)
n_tr = 7
n_val = 300
x_val = torch.linspace(-5, 5, n_val).view(-1, 1)
y_val = torch.sin(x_val)
x_train = torch.linspace(-3.14, 3.14, n_tr).view(-1, 1)
y_train = torch.sin(x_train) + torch.randn_like(x_train) * 0.1
class Data(torch.nn.Module):
def __init__(self):
super(Data, self).__init__()
self.register_buffer('x_val', x_val)
self.register_buffer('y_val', y_val)
self.register_buffer('x_train', x_train)
self.register_buffer('y_train', y_train)
class Net(torch.nn.Module):
def __init__(self):
super(Net, self).__init__()
self.l1 = torch.nn.Linear(1, 10, bias = True)
self.l2 = torch.nn.Linear(10, 10, bias = True)
self.l3 = torch.nn.Linear(10, 1, bias = True)
def forward(self, x):
x = self.l1(x)
x = torch.relu(x)
x = self.l2(x)
x = torch.relu(x)
x = self.l3(x)
return x
class Loss(torch.nn.Module):
def __init__(self, target):
super(Loss, self).__init__()
self.register_buffer('target', target)
self.loss = torch.nn.MSELoss()
def forward(self, x):
return self.loss(x, self.target)
torch.jit.script(Data()).save('data.pt')
torch.jit.script(Net()).save('net.pt')
torch.jit.script(Loss(y_train)).save('loss.pt')

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@ -16,7 +16,7 @@ kotlin.sourceSets {
commonMain {
dependencies {
api(project(":kmath-core"))
api(project(":kmath-stat"))
implementation(project(":kmath-stat"))
}
}
}

View File

@ -67,8 +67,10 @@ public interface LinearOpsTensorAlgebra<T, A : Field<T>> : TensorPartialDivision
* LUP decomposition
*
* Computes the LUP decomposition of a matrix or a batch of matrices.
* Given a tensor `input`, return tensors (P, L, U) satisfying `P dot input = L dot U`,
* with `P` being a permutation matrix or batch of matrices,
* Given a tensor `input`, return tensors (P, L, U) satisfying :
* `P dot input = L dot U` or `input = P dot L dot U`
* depending on the implementation, with :
* `P` being a permutation matrix or batch of matrices,
* `L` being a lower triangular matrix or batch of matrices,
* `U` being an upper triangular matrix or batch of matrices.
*

2005
kotlin-js-store/yarn.lock Normal file

File diff suppressed because it is too large Load Diff

View File

@ -30,3 +30,8 @@ include(
":examples",
":benchmarks",
)
if(System.getProperty("os.name") == "Linux"){
include(":kmath-noa")
}