diff --git a/README.md b/README.md
index 97ce164e1..8796d7aac 100644
--- a/README.md
+++ b/README.md
@@ -236,6 +236,18 @@ One can still use generic algebras though.
> **Maturity**: EXPERIMENTAL
+* ### [kmath-tensors](kmath-tensors)
+>
+>
+> **Maturity**: PROTOTYPE
+>
+> **Features:**
+> - [tensor algebra](kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/api/TensorAlgebra.kt) : Basic linear algebra operations on tensors (plus, dot, etc.)
+> - [tensor algebra with broadcasting](kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/algebras/BroadcastDoubleTensorAlgebra.kt) : Basic linear algebra operations implemented with broadcasting.
+> - [linear algebra operations](kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/api/LinearOpsTensorAlgebra.kt) : Advanced linear algebra operations like LU decomposition, SVD, etc.
+
+
+
* ### [kmath-viktor](kmath-viktor)
>
>
diff --git a/examples/build.gradle.kts b/examples/build.gradle.kts
index 571949b7b..1c7caf1b9 100644
--- a/examples/build.gradle.kts
+++ b/examples/build.gradle.kts
@@ -25,6 +25,7 @@ dependencies {
implementation(project(":kmath-dimensions"))
implementation(project(":kmath-ejml"))
implementation(project(":kmath-nd4j"))
+ implementation(project(":kmath-tensors"))
implementation(project(":kmath-for-real"))
diff --git a/examples/src/main/kotlin/space/kscience/kmath/tensors/DataSetNormalization.kt b/examples/src/main/kotlin/space/kscience/kmath/tensors/DataSetNormalization.kt
new file mode 100644
index 000000000..6fbf16a91
--- /dev/null
+++ b/examples/src/main/kotlin/space/kscience/kmath/tensors/DataSetNormalization.kt
@@ -0,0 +1,46 @@
+/*
+ * Copyright 2018-2021 KMath contributors.
+ * Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
+ */
+
+package space.kscience.kmath.tensors
+
+import space.kscience.kmath.operations.invoke
+import space.kscience.kmath.tensors.core.BroadcastDoubleTensorAlgebra
+
+
+// Dataset normalization
+
+fun main() {
+
+ // work in context with broadcast methods
+ BroadcastDoubleTensorAlgebra {
+ // take dataset of 5-element vectors from normal distribution
+ val dataset = randomNormal(intArrayOf(100, 5)) * 1.5 // all elements from N(0, 1.5)
+
+ dataset += fromArray(
+ intArrayOf(5),
+ doubleArrayOf(0.0, 1.0, 1.5, 3.0, 5.0) // rows means
+ )
+
+
+ // find out mean and standard deviation of each column
+ val mean = dataset.mean(0, false)
+ val std = dataset.std(0, false)
+
+ println("Mean:\n$mean")
+ println("Standard deviation:\n$std")
+
+ // also we can calculate other statistic as minimum and maximum of rows
+ println("Minimum:\n${dataset.min(0, false)}")
+ println("Maximum:\n${dataset.max(0, false)}")
+
+ // now we can scale dataset with mean normalization
+ val datasetScaled = (dataset - mean) / std
+
+ // find out mean and std of scaled dataset
+
+ println("Mean of scaled:\n${datasetScaled.mean(0, false)}")
+ println("Mean of scaled:\n${datasetScaled.std(0, false)}")
+ }
+}
\ No newline at end of file
diff --git a/examples/src/main/kotlin/space/kscience/kmath/tensors/LinearSystemSolvingWithLUP.kt b/examples/src/main/kotlin/space/kscience/kmath/tensors/LinearSystemSolvingWithLUP.kt
new file mode 100644
index 000000000..78370b517
--- /dev/null
+++ b/examples/src/main/kotlin/space/kscience/kmath/tensors/LinearSystemSolvingWithLUP.kt
@@ -0,0 +1,97 @@
+/*
+ * Copyright 2018-2021 KMath contributors.
+ * Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
+ */
+
+package space.kscience.kmath.tensors
+
+import space.kscience.kmath.operations.invoke
+import space.kscience.kmath.tensors.core.DoubleTensor
+import space.kscience.kmath.tensors.core.BroadcastDoubleTensorAlgebra
+
+// solving linear system with LUP decomposition
+
+fun main () {
+
+ // work in context with linear operations
+ BroadcastDoubleTensorAlgebra {
+
+ // set true value of x
+ val trueX = fromArray(
+ intArrayOf(4),
+ doubleArrayOf(-2.0, 1.5, 6.8, -2.4)
+ )
+
+ // and A matrix
+ val a = fromArray(
+ intArrayOf(4, 4),
+ doubleArrayOf(
+ 0.5, 10.5, 4.5, 1.0,
+ 8.5, 0.9, 12.8, 0.1,
+ 5.56, 9.19, 7.62, 5.45,
+ 1.0, 2.0, -3.0, -2.5
+ )
+ )
+
+ // calculate y value
+ val b = a dot trueX
+
+ // check out A and b
+ println("A:\n$a")
+ println("b:\n$b")
+
+ // solve `Ax = b` system using LUP decomposition
+
+ // get P, L, U such that PA = LU
+ val (p, l, u) = a.lu()
+
+ // check that P is permutation matrix
+ println("P:\n$p")
+ // L is lower triangular matrix and U is upper triangular matrix
+ println("L:\n$l")
+ println("U:\n$u")
+ // and PA = LU
+ println("PA:\n${p dot a}")
+ println("LU:\n${l dot u}")
+
+ /* Ax = b;
+ PAx = Pb;
+ LUx = Pb;
+ let y = Ux, then
+ Ly = Pb -- this system can be easily solved, since the matrix L is lower triangular;
+ Ux = y can be solved the same way, since the matrix L is upper triangular
+ */
+
+
+
+ // this function returns solution x of a system lx = b, l should be lower triangular
+ fun solveLT(l: DoubleTensor, b: DoubleTensor): DoubleTensor {
+ val n = l.shape[0]
+ val x = zeros(intArrayOf(n))
+ for (i in 0 until n){
+ x[intArrayOf(i)] = (b[intArrayOf(i)] - l[i].dot(x).value()) / l[intArrayOf(i, i)]
+ }
+ return x
+ }
+
+ val y = solveLT(l, p dot b)
+
+ // solveLT(l, b) function can be easily adapted for upper triangular matrix by the permutation matrix revMat
+ // create it by placing ones on side diagonal
+ val revMat = u.zeroesLike()
+ val n = revMat.shape[0]
+ for (i in 0 until n) {
+ revMat[intArrayOf(i, n - 1 - i)] = 1.0
+ }
+
+ // solution of system ux = b, u should be upper triangular
+ fun solveUT(u: DoubleTensor, b: DoubleTensor): DoubleTensor = revMat dot solveLT(
+ revMat dot u dot revMat, revMat dot b
+ )
+
+ val x = solveUT(u, y)
+
+ println("True x:\n$trueX")
+ println("x founded with LU method:\n$x")
+ }
+}
\ No newline at end of file
diff --git a/examples/src/main/kotlin/space/kscience/kmath/tensors/NeuralNetwork.kt b/examples/src/main/kotlin/space/kscience/kmath/tensors/NeuralNetwork.kt
new file mode 100644
index 000000000..874ac8034
--- /dev/null
+++ b/examples/src/main/kotlin/space/kscience/kmath/tensors/NeuralNetwork.kt
@@ -0,0 +1,241 @@
+/*
+ * Copyright 2018-2021 KMath contributors.
+ * Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
+ */
+
+package space.kscience.kmath.tensors
+
+import space.kscience.kmath.operations.invoke
+import space.kscience.kmath.tensors.core.DoubleTensor
+import space.kscience.kmath.tensors.core.BroadcastDoubleTensorAlgebra
+import space.kscience.kmath.tensors.core.DoubleTensorAlgebra
+import space.kscience.kmath.tensors.core.toDoubleArray
+import kotlin.math.sqrt
+
+const val seed = 100500L
+
+// Simple feedforward neural network with backpropagation training
+
+// interface of network layer
+interface Layer {
+ fun forward(input: DoubleTensor): DoubleTensor
+ fun backward(input: DoubleTensor, outputError: DoubleTensor): DoubleTensor
+}
+
+// activation layer
+open class Activation(
+ val activation: (DoubleTensor) -> DoubleTensor,
+ val activationDer: (DoubleTensor) -> DoubleTensor
+) : Layer {
+ override fun forward(input: DoubleTensor): DoubleTensor {
+ return activation(input)
+ }
+
+ override fun backward(input: DoubleTensor, outputError: DoubleTensor): DoubleTensor {
+ return DoubleTensorAlgebra { outputError * activationDer(input) }
+ }
+}
+
+fun relu(x: DoubleTensor): DoubleTensor = DoubleTensorAlgebra {
+ x.map { if (it > 0) it else 0.0 }
+}
+
+fun reluDer(x: DoubleTensor): DoubleTensor = DoubleTensorAlgebra {
+ x.map { if (it > 0) 1.0 else 0.0 }
+}
+
+// activation layer with relu activator
+class ReLU : Activation(::relu, ::reluDer)
+
+fun sigmoid(x: DoubleTensor): DoubleTensor = DoubleTensorAlgebra {
+ 1.0 / (1.0 + (-x).exp())
+}
+
+fun sigmoidDer(x: DoubleTensor): DoubleTensor = DoubleTensorAlgebra {
+ sigmoid(x) * (1.0 - sigmoid(x))
+}
+
+// activation layer with sigmoid activator
+class Sigmoid : Activation(::sigmoid, ::sigmoidDer)
+
+// dense layer
+class Dense(
+ private val inputUnits: Int,
+ private val outputUnits: Int,
+ private val learningRate: Double = 0.1
+) : Layer {
+
+ private val weights: DoubleTensor = DoubleTensorAlgebra {
+ randomNormal(
+ intArrayOf(inputUnits, outputUnits),
+ seed
+ ) * sqrt(2.0 / (inputUnits + outputUnits))
+ }
+
+ private val bias: DoubleTensor = DoubleTensorAlgebra { zeros(intArrayOf(outputUnits)) }
+
+ override fun forward(input: DoubleTensor): DoubleTensor {
+ return BroadcastDoubleTensorAlgebra { (input dot weights) + bias }
+ }
+
+ override fun backward(input: DoubleTensor, outputError: DoubleTensor): DoubleTensor = DoubleTensorAlgebra {
+ val gradInput = outputError dot weights.transpose()
+
+ val gradW = input.transpose() dot outputError
+ val gradBias = outputError.mean(dim = 0, keepDim = false) * input.shape[0].toDouble()
+
+ weights -= learningRate * gradW
+ bias -= learningRate * gradBias
+
+ gradInput
+ }
+
+}
+
+// simple accuracy equal to the proportion of correct answers
+fun accuracy(yPred: DoubleTensor, yTrue: DoubleTensor): Double {
+ check(yPred.shape contentEquals yTrue.shape)
+ val n = yPred.shape[0]
+ var correctCnt = 0
+ for (i in 0 until n) {
+ if (yPred[intArrayOf(i, 0)] == yTrue[intArrayOf(i, 0)]) {
+ correctCnt += 1
+ }
+ }
+ return correctCnt.toDouble() / n.toDouble()
+}
+
+// neural network class
+class NeuralNetwork(private val layers: List) {
+ private fun softMaxLoss(yPred: DoubleTensor, yTrue: DoubleTensor): DoubleTensor = BroadcastDoubleTensorAlgebra {
+
+ val onesForAnswers = yPred.zeroesLike()
+ yTrue.toDoubleArray().forEachIndexed { index, labelDouble ->
+ val label = labelDouble.toInt()
+ onesForAnswers[intArrayOf(index, label)] = 1.0
+ }
+
+ val softmaxValue = yPred.exp() / yPred.exp().sum(dim = 1, keepDim = true)
+
+ (-onesForAnswers + softmaxValue) / (yPred.shape[0].toDouble())
+ }
+
+ @OptIn(ExperimentalStdlibApi::class)
+ private fun forward(x: DoubleTensor): List {
+ var input = x
+
+ return buildList {
+ layers.forEach { layer ->
+ val output = layer.forward(input)
+ add(output)
+ input = output
+ }
+ }
+ }
+
+ @OptIn(ExperimentalStdlibApi::class)
+ private fun train(xTrain: DoubleTensor, yTrain: DoubleTensor) {
+ val layerInputs = buildList {
+ add(xTrain)
+ addAll(forward(xTrain))
+ }
+
+ var lossGrad = softMaxLoss(layerInputs.last(), yTrain)
+
+ layers.zip(layerInputs).reversed().forEach { (layer, input) ->
+ lossGrad = layer.backward(input, lossGrad)
+ }
+ }
+
+ fun fit(xTrain: DoubleTensor, yTrain: DoubleTensor, batchSize: Int, epochs: Int) = DoubleTensorAlgebra {
+ fun iterBatch(x: DoubleTensor, y: DoubleTensor): Sequence> = sequence {
+ val n = x.shape[0]
+ val shuffledIndices = (0 until n).shuffled()
+ for (i in 0 until n step batchSize) {
+ val excerptIndices = shuffledIndices.drop(i).take(batchSize).toIntArray()
+ val batch = x.rowsByIndices(excerptIndices) to y.rowsByIndices(excerptIndices)
+ yield(batch)
+ }
+ }
+
+ for (epoch in 0 until epochs) {
+ println("Epoch ${epoch + 1}/$epochs")
+ for ((xBatch, yBatch) in iterBatch(xTrain, yTrain)) {
+ train(xBatch, yBatch)
+ }
+ println("Accuracy:${accuracy(yTrain, predict(xTrain).argMax(1, true))}")
+ }
+ }
+
+ fun predict(x: DoubleTensor): DoubleTensor {
+ return forward(x).last()
+ }
+
+}
+
+
+@OptIn(ExperimentalStdlibApi::class)
+fun main() {
+ BroadcastDoubleTensorAlgebra {
+ val features = 5
+ val sampleSize = 250
+ val trainSize = 180
+ val testSize = sampleSize - trainSize
+
+ // take sample of features from normal distribution
+ val x = randomNormal(intArrayOf(sampleSize, features), seed) * 2.5
+
+ x += fromArray(
+ intArrayOf(5),
+ doubleArrayOf(0.0, -1.0, -2.5, -3.0, 5.5) // rows means
+ )
+
+
+ // define class like '1' if the sum of features > 0 and '0' otherwise
+ val y = fromArray(
+ intArrayOf(sampleSize, 1),
+ DoubleArray(sampleSize) { i ->
+ if (x[i].sum() > 0.0) {
+ 1.0
+ } else {
+ 0.0
+ }
+ }
+ )
+
+ // split train ans test
+ val trainIndices = (0 until trainSize).toList().toIntArray()
+ val testIndices = (trainSize until sampleSize).toList().toIntArray()
+
+ val xTrain = x.rowsByIndices(trainIndices)
+ val yTrain = y.rowsByIndices(trainIndices)
+
+ val xTest = x.rowsByIndices(testIndices)
+ val yTest = y.rowsByIndices(testIndices)
+
+ // build model
+ val layers = buildList {
+ add(Dense(features, 64))
+ add(ReLU())
+ add(Dense(64, 16))
+ add(ReLU())
+ add(Dense(16, 2))
+ add(Sigmoid())
+ }
+ val model = NeuralNetwork(layers)
+
+ // fit it with train data
+ model.fit(xTrain, yTrain, batchSize = 20, epochs = 10)
+
+ // make prediction
+ val prediction = model.predict(xTest)
+
+ // process raw prediction via argMax
+ val predictionLabels = prediction.argMax(1, true)
+
+ // find out accuracy
+ val acc = accuracy(yTest, predictionLabels)
+ println("Test accuracy:$acc")
+
+ }
+}
diff --git a/examples/src/main/kotlin/space/kscience/kmath/tensors/OLSWithSVD.kt b/examples/src/main/kotlin/space/kscience/kmath/tensors/OLSWithSVD.kt
new file mode 100644
index 000000000..42a0a4ba1
--- /dev/null
+++ b/examples/src/main/kotlin/space/kscience/kmath/tensors/OLSWithSVD.kt
@@ -0,0 +1,68 @@
+/*
+ * Copyright 2018-2021 KMath contributors.
+ * Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
+ */
+
+package space.kscience.kmath.tensors
+
+import space.kscience.kmath.operations.invoke
+import space.kscience.kmath.tensors.core.DoubleTensor
+import space.kscience.kmath.tensors.core.DoubleTensorAlgebra
+
+import kotlin.math.abs
+
+// OLS estimator using SVD
+
+fun main() {
+ //seed for random
+ val randSeed = 100500L
+
+ // work in context with linear operations
+ DoubleTensorAlgebra {
+ // take coefficient vector from normal distribution
+ val alpha = randomNormal(
+ intArrayOf(5),
+ randSeed
+ ) + fromArray(
+ intArrayOf(5),
+ doubleArrayOf(1.0, 2.5, 3.4, 5.0, 10.1)
+ )
+
+ println("Real alpha:\n$alpha")
+
+ // also take sample of size 20 from normal distribution for x
+ val x = randomNormal(
+ intArrayOf(20, 5),
+ randSeed
+ )
+
+ // calculate y and add gaussian noise (N(0, 0.05))
+ val y = x dot alpha
+ y += y.randomNormalLike(randSeed) * 0.05
+
+ // now restore the coefficient vector with OSL estimator with SVD
+ val (u, singValues, v) = x.svd()
+
+ // we have to make sure the singular values of the matrix are not close to zero
+ println("Singular values:\n$singValues")
+
+
+ // inverse Sigma matrix can be restored from singular values with diagonalEmbedding function
+ val sigma = diagonalEmbedding(singValues.map{ x -> if (abs(x) < 1e-3) 0.0 else 1.0/x })
+
+ val alphaOLS = v dot sigma dot u.transpose() dot y
+ println("Estimated alpha:\n" +
+ "$alphaOLS")
+
+ // figure out MSE of approximation
+ fun mse(yTrue: DoubleTensor, yPred: DoubleTensor): Double {
+ require(yTrue.shape.size == 1)
+ require(yTrue.shape contentEquals yPred.shape)
+
+ val diff = yTrue - yPred
+ return diff.dot(diff).sqrt().value()
+ }
+
+ println("MSE: ${mse(alpha, alphaOLS)}")
+ }
+}
\ No newline at end of file
diff --git a/examples/src/main/kotlin/space/kscience/kmath/tensors/PCA.kt b/examples/src/main/kotlin/space/kscience/kmath/tensors/PCA.kt
new file mode 100644
index 000000000..f8ac13d3f
--- /dev/null
+++ b/examples/src/main/kotlin/space/kscience/kmath/tensors/PCA.kt
@@ -0,0 +1,78 @@
+/*
+ * Copyright 2018-2021 KMath contributors.
+ * Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
+ */
+
+package space.kscience.kmath.tensors
+
+import space.kscience.kmath.operations.invoke
+import space.kscience.kmath.tensors.core.BroadcastDoubleTensorAlgebra
+
+
+// simple PCA
+
+fun main(){
+ val seed = 100500L
+
+ // work in context with broadcast methods
+ BroadcastDoubleTensorAlgebra {
+
+ // assume x is range from 0 until 10
+ val x = fromArray(
+ intArrayOf(10),
+ (0 until 10).toList().map { it.toDouble() }.toDoubleArray()
+ )
+
+ // take y dependent on x with noise
+ val y = 2.0 * x + (3.0 + x.randomNormalLike(seed) * 1.5)
+
+ println("x:\n$x")
+ println("y:\n$y")
+
+ // stack them into single dataset
+ val dataset = stack(listOf(x, y)).transpose()
+
+ // normalize both x and y
+ val xMean = x.mean()
+ val yMean = y.mean()
+
+ val xStd = x.std()
+ val yStd = y.std()
+
+ val xScaled = (x - xMean) / xStd
+ val yScaled = (y - yMean) / yStd
+
+ // save means ans standard deviations for further recovery
+ val mean = fromArray(
+ intArrayOf(2),
+ doubleArrayOf(xMean, yMean)
+ )
+ println("Means:\n$mean")
+
+ val std = fromArray(
+ intArrayOf(2),
+ doubleArrayOf(xStd, yStd)
+ )
+ println("Standard deviations:\n$std")
+
+ // calculate the covariance matrix of scaled x and y
+ val covMatrix = cov(listOf(xScaled, yScaled))
+ println("Covariance matrix:\n$covMatrix")
+
+ // and find out eigenvector of it
+ val (_, evecs) = covMatrix.symEig()
+ val v = evecs[0]
+ println("Eigenvector:\n$v")
+
+ // reduce dimension of dataset
+ val datasetReduced = v dot stack(listOf(xScaled, yScaled))
+ println("Reduced data:\n$datasetReduced")
+
+ // we can restore original data from reduced data.
+ // for example, find 7th element of dataset
+ val n = 7
+ val restored = (datasetReduced[n] dot v.view(intArrayOf(1, 2))) * std + mean
+ println("Original value:\n${dataset[n]}")
+ println("Restored value:\n$restored")
+ }
+}
diff --git a/kmath-core/api/kmath-core.api b/kmath-core/api/kmath-core.api
index fd7a13dcf..a782b8009 100644
--- a/kmath-core/api/kmath-core.api
+++ b/kmath-core/api/kmath-core.api
@@ -750,7 +750,7 @@ public final class space/kscience/kmath/nd/BufferAlgebraNDKt {
public static final fun ring (Lspace/kscience/kmath/nd/AlgebraND$Companion;Lspace/kscience/kmath/operations/Ring;Lkotlin/jvm/functions/Function2;[I)Lspace/kscience/kmath/nd/BufferedRingND;
}
-public final class space/kscience/kmath/nd/BufferND : space/kscience/kmath/nd/StructureND {
+public class space/kscience/kmath/nd/BufferND : space/kscience/kmath/nd/StructureND {
public fun (Lspace/kscience/kmath/nd/Strides;Lspace/kscience/kmath/structures/Buffer;)V
public fun elements ()Lkotlin/sequences/Sequence;
public fun get ([I)Ljava/lang/Object;
@@ -791,10 +791,9 @@ public final class space/kscience/kmath/nd/DefaultStrides : space/kscience/kmath
public fun equals (Ljava/lang/Object;)Z
public fun getLinearSize ()I
public fun getShape ()[I
- public fun getStrides ()Ljava/util/List;
+ public fun getStrides ()[I
public fun hashCode ()I
public fun index (I)[I
- public fun offset ([I)I
}
public final class space/kscience/kmath/nd/DefaultStrides$Companion {
@@ -878,6 +877,22 @@ public abstract interface class space/kscience/kmath/nd/GroupND : space/kscience
public final class space/kscience/kmath/nd/GroupND$Companion {
}
+public final class space/kscience/kmath/nd/MutableBufferND : space/kscience/kmath/nd/BufferND, space/kscience/kmath/nd/MutableStructureND {
+ public fun (Lspace/kscience/kmath/nd/Strides;Lspace/kscience/kmath/structures/MutableBuffer;)V
+ public final fun getMutableBuffer ()Lspace/kscience/kmath/structures/MutableBuffer;
+ public fun set ([ILjava/lang/Object;)V
+}
+
+public abstract interface class space/kscience/kmath/nd/MutableStructure1D : space/kscience/kmath/nd/MutableStructureND, space/kscience/kmath/nd/Structure1D, space/kscience/kmath/structures/MutableBuffer {
+ public fun set ([ILjava/lang/Object;)V
+}
+
+public abstract interface class space/kscience/kmath/nd/MutableStructure2D : space/kscience/kmath/nd/MutableStructureND, space/kscience/kmath/nd/Structure2D {
+ public fun getColumns ()Ljava/util/List;
+ public fun getRows ()Ljava/util/List;
+ public abstract fun set (IILjava/lang/Object;)V
+}
+
public abstract interface class space/kscience/kmath/nd/MutableStructureND : space/kscience/kmath/nd/StructureND {
public abstract fun set ([ILjava/lang/Object;)V
}
@@ -917,10 +932,10 @@ public final class space/kscience/kmath/nd/ShortRingNDKt {
public abstract interface class space/kscience/kmath/nd/Strides {
public abstract fun getLinearSize ()I
public abstract fun getShape ()[I
- public abstract fun getStrides ()Ljava/util/List;
+ public abstract fun getStrides ()[I
public abstract fun index (I)[I
public fun indices ()Lkotlin/sequences/Sequence;
- public abstract fun offset ([I)I
+ public fun offset ([I)I
}
public abstract interface class space/kscience/kmath/nd/Structure1D : space/kscience/kmath/nd/StructureND, space/kscience/kmath/structures/Buffer {
@@ -934,6 +949,7 @@ public final class space/kscience/kmath/nd/Structure1D$Companion {
}
public final class space/kscience/kmath/nd/Structure1DKt {
+ public static final fun as1D (Lspace/kscience/kmath/nd/MutableStructureND;)Lspace/kscience/kmath/nd/MutableStructure1D;
public static final fun as1D (Lspace/kscience/kmath/nd/StructureND;)Lspace/kscience/kmath/nd/Structure1D;
public static final fun asND (Lspace/kscience/kmath/structures/Buffer;)Lspace/kscience/kmath/nd/Structure1D;
}
@@ -954,6 +970,7 @@ public final class space/kscience/kmath/nd/Structure2D$Companion {
}
public final class space/kscience/kmath/nd/Structure2DKt {
+ public static final fun as2D (Lspace/kscience/kmath/nd/MutableStructureND;)Lspace/kscience/kmath/nd/MutableStructure2D;
public static final fun as2D (Lspace/kscience/kmath/nd/StructureND;)Lspace/kscience/kmath/nd/Structure2D;
}
diff --git a/kmath-core/src/commonMain/kotlin/space/kscience/kmath/linear/LinearSpace.kt b/kmath-core/src/commonMain/kotlin/space/kscience/kmath/linear/LinearSpace.kt
index 0798e8763..ec073ac48 100644
--- a/kmath-core/src/commonMain/kotlin/space/kscience/kmath/linear/LinearSpace.kt
+++ b/kmath-core/src/commonMain/kotlin/space/kscience/kmath/linear/LinearSpace.kt
@@ -19,6 +19,7 @@ import kotlin.reflect.KClass
* @param T the type of items.
*/
public typealias Matrix = Structure2D
+public typealias MutableMatrix = MutableStructure2D
/**
* Alias or using [Buffer] as a point/vector in a many-dimensional space.
diff --git a/kmath-core/src/commonMain/kotlin/space/kscience/kmath/nd/BufferND.kt b/kmath-core/src/commonMain/kotlin/space/kscience/kmath/nd/BufferND.kt
index 23d961a7e..1f608f478 100644
--- a/kmath-core/src/commonMain/kotlin/space/kscience/kmath/nd/BufferND.kt
+++ b/kmath-core/src/commonMain/kotlin/space/kscience/kmath/nd/BufferND.kt
@@ -7,6 +7,8 @@ package space.kscience.kmath.nd
import space.kscience.kmath.structures.Buffer
import space.kscience.kmath.structures.BufferFactory
+import space.kscience.kmath.structures.MutableBuffer
+import space.kscience.kmath.structures.MutableBufferFactory
/**
* Represents [StructureND] over [Buffer].
@@ -15,7 +17,7 @@ import space.kscience.kmath.structures.BufferFactory
* @param strides The strides to access elements of [Buffer] by linear indices.
* @param buffer The underlying buffer.
*/
-public class BufferND(
+public open class BufferND(
public val strides: Strides,
public val buffer: Buffer,
) : StructureND {
@@ -50,4 +52,35 @@ public inline fun StructureND.mapToBuffer(
val strides = DefaultStrides(shape)
BufferND(strides, factory.invoke(strides.linearSize) { transform(get(strides.index(it))) })
}
+}
+
+/**
+ * Represents [MutableStructureND] over [MutableBuffer].
+ *
+ * @param T the type of items.
+ * @param strides The strides to access elements of [MutableBuffer] by linear indices.
+ * @param mutableBuffer The underlying buffer.
+ */
+public class MutableBufferND(
+ strides: Strides,
+ public val mutableBuffer: MutableBuffer,
+) : MutableStructureND, BufferND(strides, mutableBuffer) {
+ override fun set(index: IntArray, value: T) {
+ mutableBuffer[strides.offset(index)] = value
+ }
+}
+
+/**
+ * Transform structure to a new structure using provided [MutableBufferFactory] and optimizing if argument is [MutableBufferND]
+ */
+public inline fun MutableStructureND.mapToMutableBuffer(
+ factory: MutableBufferFactory = MutableBuffer.Companion::auto,
+ crossinline transform: (T) -> R,
+): MutableBufferND {
+ return if (this is MutableBufferND)
+ MutableBufferND(this.strides, factory.invoke(strides.linearSize) { transform(mutableBuffer[it]) })
+ else {
+ val strides = DefaultStrides(shape)
+ MutableBufferND(strides, factory.invoke(strides.linearSize) { transform(get(strides.index(it))) })
+ }
}
\ No newline at end of file
diff --git a/kmath-core/src/commonMain/kotlin/space/kscience/kmath/nd/Structure1D.kt b/kmath-core/src/commonMain/kotlin/space/kscience/kmath/nd/Structure1D.kt
index 0f0588fda..8ea6d0f02 100644
--- a/kmath-core/src/commonMain/kotlin/space/kscience/kmath/nd/Structure1D.kt
+++ b/kmath-core/src/commonMain/kotlin/space/kscience/kmath/nd/Structure1D.kt
@@ -6,6 +6,8 @@
package space.kscience.kmath.nd
import space.kscience.kmath.structures.Buffer
+import space.kscience.kmath.structures.MutableBuffer
+import space.kscience.kmath.structures.asMutableBuffer
import space.kscience.kmath.structures.asSequence
import kotlin.jvm.JvmInline
@@ -25,6 +27,16 @@ public interface Structure1D : StructureND, Buffer {
public companion object
}
+/**
+ * A mutable structure that is guaranteed to be one-dimensional
+ */
+public interface MutableStructure1D : Structure1D, MutableStructureND, MutableBuffer {
+ public override operator fun set(index: IntArray, value: T) {
+ require(index.size == 1) { "Index dimension mismatch. Expected 1 but found ${index.size}" }
+ set(index[0], value)
+ }
+}
+
/**
* A 1D wrapper for nd-structure
*/
@@ -37,6 +49,23 @@ private value class Structure1DWrapper(val structure: StructureND) : Struc
override fun elements(): Sequence> = structure.elements()
}
+/**
+ * A 1D wrapper for a mutable nd-structure
+ */
+private class MutableStructure1DWrapper(val structure: MutableStructureND) : MutableStructure1D {
+ override val shape: IntArray get() = structure.shape
+ override val size: Int get() = structure.shape[0]
+ override fun elements(): Sequence> = structure.elements()
+
+ override fun get(index: Int): T = structure[index]
+ override fun set(index: Int, value: T) {
+ structure[intArrayOf(index)] = value
+ }
+
+ override fun copy(): MutableBuffer =
+ structure.elements().map { it.second }.toMutableList().asMutableBuffer()
+}
+
/**
* A structure wrapper for buffer
@@ -52,6 +81,21 @@ private value class Buffer1DWrapper(val buffer: Buffer) : Structure1D {
override operator fun get(index: Int): T = buffer[index]
}
+internal class MutableBuffer1DWrapper(val buffer: MutableBuffer) : MutableStructure1D {
+ override val shape: IntArray get() = intArrayOf(buffer.size)
+ override val size: Int get() = buffer.size
+
+ override fun elements(): Sequence> =
+ buffer.asSequence().mapIndexed { index, value -> intArrayOf(index) to value }
+
+ override operator fun get(index: Int): T = buffer[index]
+ override fun set(index: Int, value: T) {
+ buffer[index] = value
+ }
+
+ override fun copy(): MutableBuffer = buffer.copy()
+}
+
/**
* Represent a [StructureND] as [Structure1D]. Throw error in case of dimension mismatch
*/
@@ -62,6 +106,11 @@ public fun StructureND.as1D(): Structure1D = this as? Structure1D ?
}
} else error("Can't create 1d-structure from ${shape.size}d-structure")
+public fun MutableStructureND.as1D(): MutableStructure1D =
+ this as? MutableStructure1D ?: if (shape.size == 1) {
+ MutableStructure1DWrapper(this)
+ } else error("Can't create 1d-structure from ${shape.size}d-structure")
+
/**
* Represent this buffer as 1D structure
*/
@@ -75,3 +124,4 @@ internal fun Structure1D.unwrap(): Buffer = when {
this is Structure1DWrapper && structure is BufferND -> structure.buffer
else -> this
}
+
diff --git a/kmath-core/src/commonMain/kotlin/space/kscience/kmath/nd/Structure2D.kt b/kmath-core/src/commonMain/kotlin/space/kscience/kmath/nd/Structure2D.kt
index 3eee41832..28ae07a3c 100644
--- a/kmath-core/src/commonMain/kotlin/space/kscience/kmath/nd/Structure2D.kt
+++ b/kmath-core/src/commonMain/kotlin/space/kscience/kmath/nd/Structure2D.kt
@@ -8,6 +8,7 @@ package space.kscience.kmath.nd
import space.kscience.kmath.misc.UnstableKMathAPI
import space.kscience.kmath.structures.Buffer
import space.kscience.kmath.structures.VirtualBuffer
+import space.kscience.kmath.structures.MutableListBuffer
import kotlin.jvm.JvmInline
import kotlin.reflect.KClass
@@ -63,6 +64,32 @@ public interface Structure2D : StructureND {
public companion object
}
+/**
+ * Represents mutable [Structure2D].
+ */
+public interface MutableStructure2D : Structure2D, MutableStructureND {
+ /**
+ * Inserts an item at the specified indices.
+ *
+ * @param i the first index.
+ * @param j the second index.
+ * @param value the value.
+ */
+ public operator fun set(i: Int, j: Int, value: T)
+
+ /**
+ * The buffer of rows of this structure. It gets elements from the structure dynamically.
+ */
+ override val rows: List>
+ get() = List(rowNum) { i -> MutableBuffer1DWrapper(MutableListBuffer(colNum) { j -> get(i, j) })}
+
+ /**
+ * The buffer of columns of this structure. It gets elements from the structure dynamically.
+ */
+ override val columns: List>
+ get() = List(colNum) { j -> MutableBuffer1DWrapper(MutableListBuffer(rowNum) { i -> get(i, j) }) }
+}
+
/**
* A 2D wrapper for nd-structure
*/
@@ -81,6 +108,33 @@ private value class Structure2DWrapper(val structure: StructureND) : Struc
override fun elements(): Sequence> = structure.elements()
}
+/**
+ * A 2D wrapper for a mutable nd-structure
+ */
+private class MutableStructure2DWrapper(val structure: MutableStructureND): MutableStructure2D
+{
+ override val shape: IntArray get() = structure.shape
+
+ override val rowNum: Int get() = shape[0]
+ override val colNum: Int get() = shape[1]
+
+ override operator fun get(i: Int, j: Int): T = structure[i, j]
+
+ override fun set(index: IntArray, value: T) {
+ structure[index] = value
+ }
+
+ override operator fun set(i: Int, j: Int, value: T){
+ structure[intArrayOf(i, j)] = value
+ }
+
+ override fun elements(): Sequence> = structure.elements()
+
+ override fun equals(other: Any?): Boolean = false
+
+ override fun hashCode(): Int = 0
+}
+
/**
* Represent a [StructureND] as [Structure1D]. Throw error in case of dimension mismatch
*/
@@ -89,9 +143,18 @@ public fun StructureND.as2D(): Structure2D = this as? Structure2D ?
else -> error("Can't create 2d-structure from ${shape.size}d-structure")
}
+public fun MutableStructureND.as2D(): MutableStructure2D = this as? MutableStructure2D ?: when (shape.size) {
+ 2 -> MutableStructure2DWrapper(this)
+ else -> error("Can't create 2d-structure from ${shape.size}d-structure")
+}
+
/**
* Expose inner [StructureND] if possible
*/
internal fun Structure2D.unwrap(): StructureND =
if (this is Structure2DWrapper) structure
- else this
\ No newline at end of file
+ else this
+
+internal fun MutableStructure2D.unwrap(): MutableStructureND =
+ if (this is MutableStructure2DWrapper) structure else this
+
diff --git a/kmath-core/src/commonMain/kotlin/space/kscience/kmath/nd/StructureND.kt b/kmath-core/src/commonMain/kotlin/space/kscience/kmath/nd/StructureND.kt
index 0656b1f7f..a3331d71a 100644
--- a/kmath-core/src/commonMain/kotlin/space/kscience/kmath/nd/StructureND.kt
+++ b/kmath-core/src/commonMain/kotlin/space/kscience/kmath/nd/StructureND.kt
@@ -184,12 +184,15 @@ public interface Strides {
/**
* Array strides
*/
- public val strides: List
+ public val strides: IntArray
/**
* Get linear index from multidimensional index
*/
- public fun offset(index: IntArray): Int
+ public fun offset(index: IntArray): Int = index.mapIndexed { i, value ->
+ if (value < 0 || value >= shape[i]) throw IndexOutOfBoundsException("Index $value out of shape bounds: (0,${this.shape[i]})")
+ value * strides[i]
+ }.sum()
/**
* Get multidimensional from linear
@@ -221,7 +224,7 @@ public class DefaultStrides private constructor(override val shape: IntArray) :
/**
* Strides for memory access
*/
- override val strides: List by lazy {
+ override val strides: IntArray by lazy {
sequence {
var current = 1
yield(1)
@@ -230,14 +233,9 @@ public class DefaultStrides private constructor(override val shape: IntArray) :
current *= it
yield(current)
}
- }.toList()
+ }.toList().toIntArray()
}
- override fun offset(index: IntArray): Int = index.mapIndexed { i, value ->
- if (value < 0 || value >= shape[i]) throw IndexOutOfBoundsException("Index $value out of shape bounds: (0,${this.shape[i]})")
- value * strides[i]
- }.sum()
-
override fun index(offset: Int): IntArray {
val res = IntArray(shape.size)
var current = offset
diff --git a/kmath-core/src/commonMain/kotlin/space/kscience/kmath/structures/Buffer.kt b/kmath-core/src/commonMain/kotlin/space/kscience/kmath/structures/Buffer.kt
index d187beab1..be5dfb359 100644
--- a/kmath-core/src/commonMain/kotlin/space/kscience/kmath/structures/Buffer.kt
+++ b/kmath-core/src/commonMain/kotlin/space/kscience/kmath/structures/Buffer.kt
@@ -232,7 +232,7 @@ public value class MutableListBuffer(public val list: MutableList) : Mutab
}
/**
- * Returns an [ListBuffer] that wraps the original list.
+ * Returns an [MutableListBuffer] that wraps the original list.
*/
public fun MutableList.asMutableBuffer(): MutableListBuffer = MutableListBuffer(this)
diff --git a/kmath-tensors/README.md b/kmath-tensors/README.md
new file mode 100644
index 000000000..a81b7277c
--- /dev/null
+++ b/kmath-tensors/README.md
@@ -0,0 +1,40 @@
+# Module kmath-tensors
+
+Common operations on tensors, the API consists of:
+
+ - [TensorAlgebra](src/commonMain/kotlin/space/kscience/kmath/tensors/api/TensorAlgebra.kt) : Basic algebra operations on tensors (plus, dot, etc.)
+ - [TensorPartialDivisionAlgebra](src/commonMain/kotlin/space/kscience/kmath/tensors/api/TensorPartialDivisionAlgebra.kt) : Emulates an algebra over a field
+ - [LinearOpsTensorAlgebra](src/commonMain/kotlin/space/kscience/kmath/tensors/api/LinearOpsTensorAlgebra.kt) : Linear algebra operations including LU, QR, Cholesky LL and SVD decompositions
+ - [AnalyticTensorAlgebra](src/commonMain/kotlin/space/kscience/kmath/tensors/api/AnalyticTensorAlgebra.kt) : Element-wise analytic operations
+
+The library offers a multiplatform implementation for this interface over the `Double`'s. As a highlight, the user can find:
+ - [BroadcastDoubleTensorAlgebra](src/commonMain/kotlin/space/kscience/kmath/tensors/core/algebras/BroadcastDoubleTensorAlgebra.kt) : Basic algebra operations implemented with broadcasting.
+ - [DoubleLinearOpsTensorAlgebra](src/commonMain/kotlin/space/kscience/kmath/tensors/core/algebras/DoubleLinearOpsTensorAlgebra.kt) : Contains the power method for SVD and the spectrum of symmetric matrices.
+## Artifact:
+
+The Maven coordinates of this project are `space.kscience:kmath-tensors:0.3.0-dev-7`.
+
+**Gradle:**
+```gradle
+repositories {
+ maven { url 'https://repo.kotlin.link' }
+ maven { url 'https://dl.bintray.com/hotkeytlt/maven' }
+ maven { url "https://dl.bintray.com/kotlin/kotlin-eap" } // include for builds based on kotlin-eap
+}
+
+dependencies {
+ implementation 'space.kscience:kmath-tensors:0.3.0-dev-7'
+}
+```
+**Gradle Kotlin DSL:**
+```kotlin
+repositories {
+ maven("https://repo.kotlin.link")
+ maven("https://dl.bintray.com/kotlin/kotlin-eap") // include for builds based on kotlin-eap
+ maven("https://dl.bintray.com/hotkeytlt/maven") // required for a
+}
+
+dependencies {
+ implementation("space.kscience:kmath-tensors:0.3.0-dev-7")
+}
+```
diff --git a/kmath-tensors/build.gradle.kts b/kmath-tensors/build.gradle.kts
new file mode 100644
index 000000000..b7f24dc6a
--- /dev/null
+++ b/kmath-tensors/build.gradle.kts
@@ -0,0 +1,43 @@
+plugins {
+ id("ru.mipt.npm.gradle.mpp")
+}
+
+kotlin.sourceSets {
+ all {
+ languageSettings.useExperimentalAnnotation("space.kscience.kmath.misc.UnstableKMathAPI")
+ }
+ commonMain {
+ dependencies {
+ api(project(":kmath-core"))
+ api(project(":kmath-stat"))
+ }
+ }
+}
+
+tasks.dokkaHtml {
+ dependsOn(tasks.build)
+}
+
+readme {
+ maturity = ru.mipt.npm.gradle.Maturity.PROTOTYPE
+ propertyByTemplate("artifact", rootProject.file("docs/templates/ARTIFACT-TEMPLATE.md"))
+
+ feature(
+ id = "tensor algebra",
+ description = "Basic linear algebra operations on tensors (plus, dot, etc.)",
+ ref = "src/commonMain/kotlin/space/kscience/kmath/tensors/api/TensorAlgebra.kt"
+ )
+
+ feature(
+ id = "tensor algebra with broadcasting",
+ description = "Basic linear algebra operations implemented with broadcasting.",
+ ref = "src/commonMain/kotlin/space/kscience/kmath/tensors/core/algebras/BroadcastDoubleTensorAlgebra.kt"
+ )
+
+ feature(
+ id = "linear algebra operations",
+ description = "Advanced linear algebra operations like LU decomposition, SVD, etc.",
+ ref = "src/commonMain/kotlin/space/kscience/kmath/tensors/api/LinearOpsTensorAlgebra.kt"
+ )
+
+}
\ No newline at end of file
diff --git a/kmath-tensors/docs/README-TEMPLATE.md b/kmath-tensors/docs/README-TEMPLATE.md
new file mode 100644
index 000000000..5fd968afd
--- /dev/null
+++ b/kmath-tensors/docs/README-TEMPLATE.md
@@ -0,0 +1,7 @@
+# Module kmath-tensors
+
+Common linear algebra operations on tensors.
+
+${features}
+
+${artifact}
diff --git a/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/api/AnalyticTensorAlgebra.kt b/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/api/AnalyticTensorAlgebra.kt
new file mode 100644
index 000000000..1db986e77
--- /dev/null
+++ b/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/api/AnalyticTensorAlgebra.kt
@@ -0,0 +1,131 @@
+/*
+ * Copyright 2018-2021 KMath contributors.
+ * Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
+ */
+
+package space.kscience.kmath.tensors.api
+
+
+/**
+ * Analytic operations on [Tensor].
+ *
+ * @param T the type of items closed under analytic functions in the tensors.
+ */
+public interface AnalyticTensorAlgebra : TensorPartialDivisionAlgebra {
+
+ /**
+ * @return the mean of all elements in the input tensor.
+ */
+ public fun Tensor.mean(): T
+
+ /**
+ * Returns the mean of each row of the input tensor in the given dimension [dim].
+ *
+ * If [keepDim] is true, the output tensor is of the same size as
+ * input except in the dimension [dim] where it is of size 1.
+ * Otherwise, [dim] is squeezed, resulting in the output tensor having 1 fewer dimension.
+ *
+ * @param dim the dimension to reduce.
+ * @param keepDim whether the output tensor has [dim] retained or not.
+ * @return the mean of each row of the input tensor in the given dimension [dim].
+ */
+ public fun Tensor.mean(dim: Int, keepDim: Boolean): Tensor
+
+ /**
+ * @return the standard deviation of all elements in the input tensor.
+ */
+ public fun Tensor.std(): T
+
+ /**
+ * Returns the standard deviation of each row of the input tensor in the given dimension [dim].
+ *
+ * If [keepDim] is true, the output tensor is of the same size as
+ * input except in the dimension [dim] where it is of size 1.
+ * Otherwise, [dim] is squeezed, resulting in the output tensor having 1 fewer dimension.
+ *
+ * @param dim the dimension to reduce.
+ * @param keepDim whether the output tensor has [dim] retained or not.
+ * @return the standard deviation of each row of the input tensor in the given dimension [dim].
+ */
+ public fun Tensor.std(dim: Int, keepDim: Boolean): Tensor
+
+ /**
+ * @return the variance of all elements in the input tensor.
+ */
+ public fun Tensor.variance(): T
+
+ /**
+ * Returns the variance of each row of the input tensor in the given dimension [dim].
+ *
+ * If [keepDim] is true, the output tensor is of the same size as
+ * input except in the dimension [dim] where it is of size 1.
+ * Otherwise, [dim] is squeezed, resulting in the output tensor having 1 fewer dimension.
+ *
+ * @param dim the dimension to reduce.
+ * @param keepDim whether the output tensor has [dim] retained or not.
+ * @return the variance of each row of the input tensor in the given dimension [dim].
+ */
+ public fun Tensor.variance(dim: Int, keepDim: Boolean): Tensor
+
+ /**
+ * Returns the covariance matrix M of given vectors.
+ *
+ * M[i, j] contains covariance of i-th and j-th given vectors
+ *
+ * @param tensors the [List] of 1-dimensional tensors with same shape
+ * @return the covariance matrix
+ */
+ public fun cov(tensors: List>): Tensor
+
+ //For information: https://pytorch.org/docs/stable/generated/torch.exp.html
+ public fun Tensor.exp(): Tensor
+
+ //For information: https://pytorch.org/docs/stable/generated/torch.log.html
+ public fun Tensor.ln(): Tensor
+
+ //For information: https://pytorch.org/docs/stable/generated/torch.sqrt.html
+ public fun Tensor.sqrt(): Tensor
+
+ //For information: https://pytorch.org/docs/stable/generated/torch.acos.html#torch.cos
+ public fun Tensor.cos(): Tensor
+
+ //For information: https://pytorch.org/docs/stable/generated/torch.acos.html#torch.acos
+ public fun Tensor.acos(): Tensor
+
+ //For information: https://pytorch.org/docs/stable/generated/torch.acosh.html#torch.cosh
+ public fun Tensor.cosh(): Tensor
+
+ //For information: https://pytorch.org/docs/stable/generated/torch.acosh.html#torch.acosh
+ public fun Tensor.acosh(): Tensor
+
+ //For information: https://pytorch.org/docs/stable/generated/torch.asin.html#torch.sin
+ public fun Tensor.sin(): Tensor
+
+ //For information: https://pytorch.org/docs/stable/generated/torch.asin.html#torch.asin
+ public fun Tensor.asin(): Tensor
+
+ //For information: https://pytorch.org/docs/stable/generated/torch.asin.html#torch.sinh
+ public fun Tensor.sinh(): Tensor
+
+ //For information: https://pytorch.org/docs/stable/generated/torch.asin.html#torch.asinh
+ public fun Tensor.asinh(): Tensor
+
+ //For information: https://pytorch.org/docs/stable/generated/torch.atan.html#torch.tan
+ public fun Tensor.tan(): Tensor
+
+ //https://pytorch.org/docs/stable/generated/torch.atan.html#torch.atan
+ public fun Tensor.atan(): Tensor
+
+ //For information: https://pytorch.org/docs/stable/generated/torch.atanh.html#torch.tanh
+ public fun Tensor.tanh(): Tensor
+
+ //For information: https://pytorch.org/docs/stable/generated/torch.atanh.html#torch.atanh
+ public fun Tensor.atanh(): Tensor
+
+ //For information: https://pytorch.org/docs/stable/generated/torch.ceil.html#torch.ceil
+ public fun Tensor.ceil(): Tensor
+
+ //For information: https://pytorch.org/docs/stable/generated/torch.floor.html#torch.floor
+ public fun Tensor.floor(): Tensor
+
+}
\ No newline at end of file
diff --git a/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/api/LinearOpsTensorAlgebra.kt b/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/api/LinearOpsTensorAlgebra.kt
new file mode 100644
index 000000000..6bdecfa85
--- /dev/null
+++ b/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/api/LinearOpsTensorAlgebra.kt
@@ -0,0 +1,97 @@
+/*
+ * Copyright 2018-2021 KMath contributors.
+ * Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
+ */
+
+package space.kscience.kmath.tensors.api
+
+/**
+ * Common linear algebra operations. Operates on [Tensor].
+ *
+ * @param T the type of items closed under division in the tensors.
+ */
+public interface LinearOpsTensorAlgebra : TensorPartialDivisionAlgebra {
+
+ /**
+ * Computes the determinant of a square matrix input, or of each square matrix in a batched input.
+ * For more information: https://pytorch.org/docs/stable/linalg.html#torch.linalg.det
+ *
+ * @return the determinant.
+ */
+ public fun Tensor.det(): Tensor
+
+ /**
+ * Computes the multiplicative inverse matrix of a square matrix input, or of each square matrix in a batched input.
+ * Given a square matrix `A`, return the matrix `AInv` satisfying
+ * `A dot AInv = AInv dot A = eye(a.shape[0])`.
+ * For more information: https://pytorch.org/docs/stable/linalg.html#torch.linalg.inv
+ *
+ * @return the multiplicative inverse of a matrix.
+ */
+ public fun Tensor.inv(): Tensor
+
+ /**
+ * Cholesky decomposition.
+ *
+ * Computes the Cholesky decomposition of a Hermitian (or symmetric for real-valued matrices)
+ * positive-definite matrix or the Cholesky decompositions for a batch of such matrices.
+ * Each decomposition has the form:
+ * Given a tensor `input`, return the tensor `L` satisfying `input = L dot L.H`,
+ * where L is a lower-triangular matrix and L.H is the conjugate transpose of L,
+ * which is just a transpose for the case of real-valued input matrices.
+ * For more information: https://pytorch.org/docs/stable/linalg.html#torch.linalg.cholesky
+ *
+ * @return the batch of L matrices.
+ */
+ public fun Tensor.cholesky(): Tensor
+
+ /**
+ * QR decomposition.
+ *
+ * Computes the QR decomposition of a matrix or a batch of matrices, and returns a pair `(Q, R)` of tensors.
+ * Given a tensor `input`, return tensors (Q, R) satisfying ``input = Q dot R``,
+ * with `Q` being an orthogonal matrix or batch of orthogonal matrices
+ * and `R` being an upper triangular matrix or batch of upper triangular matrices.
+ * For more information: https://pytorch.org/docs/stable/linalg.html#torch.linalg.qr
+ *
+ * @return pair of Q and R tensors.
+ */
+ public fun Tensor.qr(): Pair, Tensor>
+
+ /**
+ * LUP decomposition
+ *
+ * Computes the LUP decomposition of a matrix or a batch of matrices.
+ * Given a tensor `input`, return tensors (P, L, U) satisfying `P dot input = L dot U`,
+ * with `P` being a permutation matrix or batch of matrices,
+ * `L` being a lower triangular matrix or batch of matrices,
+ * `U` being an upper triangular matrix or batch of matrices.
+ *
+ * * @return triple of P, L and U tensors
+ */
+ public fun Tensor.lu(): Triple, Tensor, Tensor>
+
+ /**
+ * Singular Value Decomposition.
+ *
+ * Computes the singular value decomposition of either a matrix or batch of matrices `input`.
+ * The singular value decomposition is represented as a triple `(U, S, V)`,
+ * such that `input = U dot diagonalEmbedding(S) dot V.H`,
+ * where V.H is the conjugate transpose of V.
+ * If input is a batch of tensors, then U, S, and Vh are also batched with the same batch dimensions as input.
+ * For more information: https://pytorch.org/docs/stable/linalg.html#torch.linalg.svd
+ *
+ * @return triple `(U, S, V)`.
+ */
+ public fun Tensor.svd(): Triple, Tensor, Tensor>
+
+ /**
+ * Returns eigenvalues and eigenvectors of a real symmetric matrix input or a batch of real symmetric matrices,
+ * represented by a pair (eigenvalues, eigenvectors).
+ * For more information: https://pytorch.org/docs/stable/generated/torch.symeig.html
+ *
+ * @return a pair (eigenvalues, eigenvectors)
+ */
+ public fun Tensor.symEig(): Pair, Tensor>
+
+}
diff --git a/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/api/Tensor.kt b/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/api/Tensor.kt
new file mode 100644
index 000000000..179787684
--- /dev/null
+++ b/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/api/Tensor.kt
@@ -0,0 +1,5 @@
+package space.kscience.kmath.tensors.api
+
+import space.kscience.kmath.nd.MutableStructureND
+
+public typealias Tensor = MutableStructureND
diff --git a/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/api/TensorAlgebra.kt b/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/api/TensorAlgebra.kt
new file mode 100644
index 000000000..2eb18ada6
--- /dev/null
+++ b/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/api/TensorAlgebra.kt
@@ -0,0 +1,327 @@
+/*
+ * Copyright 2018-2021 KMath contributors.
+ * Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
+ */
+
+package space.kscience.kmath.tensors.api
+
+import space.kscience.kmath.operations.Algebra
+
+/**
+ * Algebra over a ring on [Tensor].
+ * For more information: https://proofwiki.org/wiki/Definition:Algebra_over_Ring
+ *
+ * @param T the type of items in the tensors.
+ */
+public interface TensorAlgebra: Algebra> {
+
+ /**
+ * Returns a single tensor value of unit dimension if tensor shape equals to [1].
+ *
+ * @return a nullable value of a potentially scalar tensor.
+ */
+ public fun Tensor.valueOrNull(): T?
+
+ /**
+ * Returns a single tensor value of unit dimension. The tensor shape must be equal to [1].
+ *
+ * @return the value of a scalar tensor.
+ */
+ public fun Tensor.value(): T
+
+ /**
+ * Each element of the tensor [other] is added to this value.
+ * The resulting tensor is returned.
+ *
+ * @param other tensor to be added.
+ * @return the sum of this value and tensor [other].
+ */
+ public operator fun T.plus(other: Tensor): Tensor
+
+ /**
+ * Adds the scalar [value] to each element of this tensor and returns a new resulting tensor.
+ *
+ * @param value the number to be added to each element of this tensor.
+ * @return the sum of this tensor and [value].
+ */
+ public operator fun Tensor.plus(value: T): Tensor
+
+ /**
+ * Each element of the tensor [other] is added to each element of this tensor.
+ * The resulting tensor is returned.
+ *
+ * @param other tensor to be added.
+ * @return the sum of this tensor and [other].
+ */
+ public operator fun Tensor.plus(other: Tensor): Tensor
+
+ /**
+ * Adds the scalar [value] to each element of this tensor.
+ *
+ * @param value the number to be added to each element of this tensor.
+ */
+ public operator fun Tensor.plusAssign(value: T): Unit
+
+ /**
+ * Each element of the tensor [other] is added to each element of this tensor.
+ *
+ * @param other tensor to be added.
+ */
+ public operator fun Tensor.plusAssign(other: Tensor): Unit
+
+
+ /**
+ * Each element of the tensor [other] is subtracted from this value.
+ * The resulting tensor is returned.
+ *
+ * @param other tensor to be subtracted.
+ * @return the difference between this value and tensor [other].
+ */
+ public operator fun T.minus(other: Tensor): Tensor
+
+ /**
+ * Subtracts the scalar [value] from each element of this tensor and returns a new resulting tensor.
+ *
+ * @param value the number to be subtracted from each element of this tensor.
+ * @return the difference between this tensor and [value].
+ */
+ public operator fun Tensor.minus(value: T): Tensor
+
+ /**
+ * Each element of the tensor [other] is subtracted from each element of this tensor.
+ * The resulting tensor is returned.
+ *
+ * @param other tensor to be subtracted.
+ * @return the difference between this tensor and [other].
+ */
+ public operator fun Tensor.minus(other: Tensor): Tensor
+
+ /**
+ * Subtracts the scalar [value] from each element of this tensor.
+ *
+ * @param value the number to be subtracted from each element of this tensor.
+ */
+ public operator fun Tensor.minusAssign(value: T): Unit
+
+ /**
+ * Each element of the tensor [other] is subtracted from each element of this tensor.
+ *
+ * @param other tensor to be subtracted.
+ */
+ public operator fun Tensor.minusAssign(other: Tensor): Unit
+
+
+ /**
+ * Each element of the tensor [other] is multiplied by this value.
+ * The resulting tensor is returned.
+ *
+ * @param other tensor to be multiplied.
+ * @return the product of this value and tensor [other].
+ */
+ public operator fun T.times(other: Tensor): Tensor
+
+ /**
+ * Multiplies the scalar [value] by each element of this tensor and returns a new resulting tensor.
+ *
+ * @param value the number to be multiplied by each element of this tensor.
+ * @return the product of this tensor and [value].
+ */
+ public operator fun Tensor.times(value: T): Tensor
+
+ /**
+ * Each element of the tensor [other] is multiplied by each element of this tensor.
+ * The resulting tensor is returned.
+ *
+ * @param other tensor to be multiplied.
+ * @return the product of this tensor and [other].
+ */
+ public operator fun Tensor.times(other: Tensor): Tensor
+
+ /**
+ * Multiplies the scalar [value] by each element of this tensor.
+ *
+ * @param value the number to be multiplied by each element of this tensor.
+ */
+ public operator fun Tensor.timesAssign(value: T): Unit
+
+ /**
+ * Each element of the tensor [other] is multiplied by each element of this tensor.
+ *
+ * @param other tensor to be multiplied.
+ */
+ public operator fun Tensor.timesAssign(other: Tensor): Unit
+
+ /**
+ * Numerical negative, element-wise.
+ *
+ * @return tensor negation of the original tensor.
+ */
+ public operator fun Tensor.unaryMinus(): Tensor
+
+ /**
+ * Returns the tensor at index i
+ * For more information: https://pytorch.org/cppdocs/notes/tensor_indexing.html
+ *
+ * @param i index of the extractable tensor
+ * @return subtensor of the original tensor with index [i]
+ */
+ public operator fun Tensor.get(i: Int): Tensor
+
+ /**
+ * Returns a tensor that is a transposed version of this tensor. The given dimensions [i] and [j] are swapped.
+ * For more information: https://pytorch.org/docs/stable/generated/torch.transpose.html
+ *
+ * @param i the first dimension to be transposed
+ * @param j the second dimension to be transposed
+ * @return transposed tensor
+ */
+ public fun Tensor.transpose(i: Int = -2, j: Int = -1): Tensor
+
+ /**
+ * Returns a new tensor with the same data as the self tensor but of a different shape.
+ * The returned tensor shares the same data and must have the same number of elements, but may have a different size
+ * For more information: https://pytorch.org/docs/stable/tensor_view.html
+ *
+ * @param shape the desired size
+ * @return tensor with new shape
+ */
+ public fun Tensor.view(shape: IntArray): Tensor
+
+ /**
+ * View this tensor as the same size as [other].
+ * ``this.viewAs(other) is equivalent to this.view(other.shape)``.
+ * For more information: https://pytorch.org/cppdocs/notes/tensor_indexing.html
+ *
+ * @param other the result tensor has the same size as other.
+ * @return the result tensor with the same size as other.
+ */
+ public fun Tensor.viewAs(other: Tensor): Tensor
+
+ /**
+ * Matrix product of two tensors.
+ *
+ * The behavior depends on the dimensionality of the tensors as follows:
+ * 1. If both tensors are 1-dimensional, the dot product (scalar) is returned.
+ *
+ * 2. If both arguments are 2-dimensional, the matrix-matrix product is returned.
+ *
+ * 3. If the first argument is 1-dimensional and the second argument is 2-dimensional,
+ * a 1 is prepended to its dimension for the purpose of the matrix multiply.
+ * After the matrix multiply, the prepended dimension is removed.
+ *
+ * 4. If the first argument is 2-dimensional and the second argument is 1-dimensional,
+ * the matrix-vector product is returned.
+ *
+ * 5. If both arguments are at least 1-dimensional and at least one argument is N-dimensional (where N > 2),
+ * then a batched matrix multiply is returned. If the first argument is 1-dimensional,
+ * a 1 is prepended to its dimension for the purpose of the batched matrix multiply and removed after.
+ * If the second argument is 1-dimensional, a 1 is appended to its dimension for the purpose of the batched matrix
+ * multiple and removed after.
+ * The non-matrix (i.e. batch) dimensions are broadcasted (and thus must be broadcastable).
+ * For example, if `input` is a (j × 1 × n × n) tensor and `other` is a
+ * (k × n × n) tensor, out will be a (j × k × n × n) tensor.
+ *
+ * For more information: https://pytorch.org/docs/stable/generated/torch.matmul.html
+ *
+ * @param other tensor to be multiplied
+ * @return mathematical product of two tensors
+ */
+ public infix fun Tensor.dot(other: Tensor): Tensor
+
+ /**
+ * Creates a tensor whose diagonals of certain 2D planes (specified by [dim1] and [dim2])
+ * are filled by [diagonalEntries].
+ * To facilitate creating batched diagonal matrices,
+ * the 2D planes formed by the last two dimensions of the returned tensor are chosen by default.
+ *
+ * The argument [offset] controls which diagonal to consider:
+ * 1. If [offset] = 0, it is the main diagonal.
+ * 1. If [offset] > 0, it is above the main diagonal.
+ * 1. If [offset] < 0, it is below the main diagonal.
+ *
+ * The size of the new matrix will be calculated
+ * to make the specified diagonal of the size of the last input dimension.
+ * For more information: https://pytorch.org/docs/stable/generated/torch.diag_embed.html
+ *
+ * @param diagonalEntries the input tensor. Must be at least 1-dimensional.
+ * @param offset which diagonal to consider. Default: 0 (main diagonal).
+ * @param dim1 first dimension with respect to which to take diagonal. Default: -2.
+ * @param dim2 second dimension with respect to which to take diagonal. Default: -1.
+ *
+ * @return tensor whose diagonals of certain 2D planes (specified by [dim1] and [dim2])
+ * are filled by [diagonalEntries]
+ */
+ public fun diagonalEmbedding(
+ diagonalEntries: Tensor,
+ offset: Int = 0,
+ dim1: Int = -2,
+ dim2: Int = -1
+ ): Tensor
+
+ /**
+ * @return the sum of all elements in the input tensor.
+ */
+ public fun Tensor.sum(): T
+
+ /**
+ * Returns the sum of each row of the input tensor in the given dimension [dim].
+ *
+ * If [keepDim] is true, the output tensor is of the same size as
+ * input except in the dimension [dim] where it is of size 1.
+ * Otherwise, [dim] is squeezed, resulting in the output tensor having 1 fewer dimension.
+ *
+ * @param dim the dimension to reduce.
+ * @param keepDim whether the output tensor has [dim] retained or not.
+ * @return the sum of each row of the input tensor in the given dimension [dim].
+ */
+ public fun Tensor.sum(dim: Int, keepDim: Boolean): Tensor
+
+ /**
+ * @return the minimum value of all elements in the input tensor.
+ */
+ public fun Tensor.min(): T
+
+ /**
+ * Returns the minimum value of each row of the input tensor in the given dimension [dim].
+ *
+ * If [keepDim] is true, the output tensor is of the same size as
+ * input except in the dimension [dim] where it is of size 1.
+ * Otherwise, [dim] is squeezed, resulting in the output tensor having 1 fewer dimension.
+ *
+ * @param dim the dimension to reduce.
+ * @param keepDim whether the output tensor has [dim] retained or not.
+ * @return the minimum value of each row of the input tensor in the given dimension [dim].
+ */
+ public fun Tensor.min(dim: Int, keepDim: Boolean): Tensor
+
+ /**
+ * Returns the maximum value of all elements in the input tensor.
+ */
+ public fun Tensor.max(): T
+
+ /**
+ * Returns the maximum value of each row of the input tensor in the given dimension [dim].
+ *
+ * If [keepDim] is true, the output tensor is of the same size as
+ * input except in the dimension [dim] where it is of size 1.
+ * Otherwise, [dim] is squeezed, resulting in the output tensor having 1 fewer dimension.
+ *
+ * @param dim the dimension to reduce.
+ * @param keepDim whether the output tensor has [dim] retained or not.
+ * @return the maximum value of each row of the input tensor in the given dimension [dim].
+ */
+ public fun Tensor.max(dim: Int, keepDim: Boolean): Tensor
+
+ /**
+ * Returns the index of maximum value of each row of the input tensor in the given dimension [dim].
+ *
+ * If [keepDim] is true, the output tensor is of the same size as
+ * input except in the dimension [dim] where it is of size 1.
+ * Otherwise, [dim] is squeezed, resulting in the output tensor having 1 fewer dimension.
+ *
+ * @param dim the dimension to reduce.
+ * @param keepDim whether the output tensor has [dim] retained or not.
+ * @return the the index of maximum value of each row of the input tensor in the given dimension [dim].
+ */
+ public fun Tensor.argMax(dim: Int, keepDim: Boolean): Tensor
+}
diff --git a/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/api/TensorPartialDivisionAlgebra.kt b/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/api/TensorPartialDivisionAlgebra.kt
new file mode 100644
index 000000000..02bf5415d
--- /dev/null
+++ b/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/api/TensorPartialDivisionAlgebra.kt
@@ -0,0 +1,55 @@
+/*
+ * Copyright 2018-2021 KMath contributors.
+ * Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
+ */
+
+package space.kscience.kmath.tensors.api
+
+/**
+ * Algebra over a field with partial division on [Tensor].
+ * For more information: https://proofwiki.org/wiki/Definition:Division_Algebra
+ *
+ * @param T the type of items closed under division in the tensors.
+ */
+public interface TensorPartialDivisionAlgebra : TensorAlgebra {
+
+ /**
+ * Each element of the tensor [other] is divided by this value.
+ * The resulting tensor is returned.
+ *
+ * @param other tensor to divide by.
+ * @return the division of this value by the tensor [other].
+ */
+ public operator fun T.div(other: Tensor): Tensor
+
+ /**
+ * Divide by the scalar [value] each element of this tensor returns a new resulting tensor.
+ *
+ * @param value the number to divide by each element of this tensor.
+ * @return the division of this tensor by the [value].
+ */
+ public operator fun Tensor.div(value: T): Tensor
+
+ /**
+ * Each element of the tensor [other] is divided by each element of this tensor.
+ * The resulting tensor is returned.
+ *
+ * @param other tensor to be divided by.
+ * @return the division of this tensor by [other].
+ */
+ public operator fun Tensor.div(other: Tensor): Tensor
+
+ /**
+ * Divides by the scalar [value] each element of this tensor.
+ *
+ * @param value the number to divide by each element of this tensor.
+ */
+ public operator fun Tensor.divAssign(value: T)
+
+ /**
+ * Each element of this tensor is divided by each element of the [other] tensor.
+ *
+ * @param other tensor to be divide by.
+ */
+ public operator fun Tensor.divAssign(other: Tensor)
+}
diff --git a/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/BroadcastDoubleTensorAlgebra.kt b/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/BroadcastDoubleTensorAlgebra.kt
new file mode 100644
index 000000000..b8530f637
--- /dev/null
+++ b/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/BroadcastDoubleTensorAlgebra.kt
@@ -0,0 +1,93 @@
+/*
+ * Copyright 2018-2021 KMath contributors.
+ * Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
+ */
+
+package space.kscience.kmath.tensors.core
+
+import space.kscience.kmath.tensors.api.Tensor
+import space.kscience.kmath.tensors.core.internal.array
+import space.kscience.kmath.tensors.core.internal.broadcastTensors
+import space.kscience.kmath.tensors.core.internal.broadcastTo
+import space.kscience.kmath.tensors.core.internal.tensor
+
+/**
+ * Basic linear algebra operations implemented with broadcasting.
+ * For more information: https://pytorch.org/docs/stable/notes/broadcasting.html
+ */
+public object BroadcastDoubleTensorAlgebra : DoubleTensorAlgebra() {
+
+ override fun Tensor.plus(other: Tensor): DoubleTensor {
+ val broadcast = broadcastTensors(tensor, other.tensor)
+ val newThis = broadcast[0]
+ val newOther = broadcast[1]
+ val resBuffer = DoubleArray(newThis.linearStructure.linearSize) { i ->
+ newThis.mutableBuffer.array()[i] + newOther.mutableBuffer.array()[i]
+ }
+ return DoubleTensor(newThis.shape, resBuffer)
+ }
+
+ override fun Tensor.plusAssign(other: Tensor) {
+ val newOther = broadcastTo(other.tensor, tensor.shape)
+ for (i in 0 until tensor.linearStructure.linearSize) {
+ tensor.mutableBuffer.array()[tensor.bufferStart + i] +=
+ newOther.mutableBuffer.array()[tensor.bufferStart + i]
+ }
+ }
+
+ override fun Tensor.minus(other: Tensor): DoubleTensor {
+ val broadcast = broadcastTensors(tensor, other.tensor)
+ val newThis = broadcast[0]
+ val newOther = broadcast[1]
+ val resBuffer = DoubleArray(newThis.linearStructure.linearSize) { i ->
+ newThis.mutableBuffer.array()[i] - newOther.mutableBuffer.array()[i]
+ }
+ return DoubleTensor(newThis.shape, resBuffer)
+ }
+
+ override fun Tensor.minusAssign(other: Tensor) {
+ val newOther = broadcastTo(other.tensor, tensor.shape)
+ for (i in 0 until tensor.linearStructure.linearSize) {
+ tensor.mutableBuffer.array()[tensor.bufferStart + i] -=
+ newOther.mutableBuffer.array()[tensor.bufferStart + i]
+ }
+ }
+
+ override fun Tensor.times(other: Tensor): DoubleTensor {
+ val broadcast = broadcastTensors(tensor, other.tensor)
+ val newThis = broadcast[0]
+ val newOther = broadcast[1]
+ val resBuffer = DoubleArray(newThis.linearStructure.linearSize) { i ->
+ newThis.mutableBuffer.array()[newThis.bufferStart + i] *
+ newOther.mutableBuffer.array()[newOther.bufferStart + i]
+ }
+ return DoubleTensor(newThis.shape, resBuffer)
+ }
+
+ override fun Tensor.timesAssign(other: Tensor) {
+ val newOther = broadcastTo(other.tensor, tensor.shape)
+ for (i in 0 until tensor.linearStructure.linearSize) {
+ tensor.mutableBuffer.array()[tensor.bufferStart + i] *=
+ newOther.mutableBuffer.array()[tensor.bufferStart + i]
+ }
+ }
+
+ override fun Tensor.div(other: Tensor): DoubleTensor {
+ val broadcast = broadcastTensors(tensor, other.tensor)
+ val newThis = broadcast[0]
+ val newOther = broadcast[1]
+ val resBuffer = DoubleArray(newThis.linearStructure.linearSize) { i ->
+ newThis.mutableBuffer.array()[newOther.bufferStart + i] /
+ newOther.mutableBuffer.array()[newOther.bufferStart + i]
+ }
+ return DoubleTensor(newThis.shape, resBuffer)
+ }
+
+ override fun Tensor.divAssign(other: Tensor