v0.3.0-dev-18 #459

Merged
altavir merged 64 commits from dev into master 2022-02-13 17:50:34 +03:00
12 changed files with 323 additions and 347 deletions
Showing only changes of commit 29a90efca5 - Show all commits

View File

@ -45,6 +45,7 @@
- Buffer algebra does not require size anymore
- Operations -> Ops
- Default Buffer and ND algebras are now Ops and lack neutral elements (0, 1) as well as algebra-level shapes.
- Tensor algebra takes read-only structures as input and inherits AlgebraND
### Deprecated
- Specialized `DoubleBufferAlgebra`

View File

@ -13,8 +13,7 @@ import org.jetbrains.kotlinx.multik.api.Multik
import org.jetbrains.kotlinx.multik.api.ones
import org.jetbrains.kotlinx.multik.ndarray.data.DN
import org.jetbrains.kotlinx.multik.ndarray.data.DataType
import space.kscience.kmath.multik.multikND
import space.kscience.kmath.multik.multikTensorAlgebra
import space.kscience.kmath.multik.multikAlgebra
import space.kscience.kmath.nd.BufferedFieldOpsND
import space.kscience.kmath.nd.StructureND
import space.kscience.kmath.nd.ndAlgebra
@ -79,7 +78,7 @@ internal class NDFieldBenchmark {
}
@Benchmark
fun multikInPlaceAdd(blackhole: Blackhole) = with(DoubleField.multikTensorAlgebra) {
fun multikInPlaceAdd(blackhole: Blackhole) = with(DoubleField.multikAlgebra) {
val res = Multik.ones<Double, DN>(shape, DataType.DoubleDataType).wrap()
repeat(n) { res += 1.0 }
blackhole.consume(res)
@ -100,7 +99,7 @@ internal class NDFieldBenchmark {
private val specializedField = DoubleField.ndAlgebra
private val genericField = BufferedFieldOpsND(DoubleField, Buffer.Companion::boxing)
private val nd4jField = DoubleField.nd4j
private val multikField = DoubleField.multikND
private val multikField = DoubleField.multikAlgebra
private val viktorField = DoubleField.viktorAlgebra
}
}

View File

@ -7,12 +7,12 @@ package space.kscience.kmath.tensors
import org.jetbrains.kotlinx.multik.api.Multik
import org.jetbrains.kotlinx.multik.api.ndarray
import space.kscience.kmath.multik.multikND
import space.kscience.kmath.multik.multikAlgebra
import space.kscience.kmath.nd.one
import space.kscience.kmath.operations.DoubleField
fun main(): Unit = with(DoubleField.multikND) {
fun main(): Unit = with(DoubleField.multikAlgebra) {
val a = Multik.ndarray(intArrayOf(1, 2, 3)).asType<Double>().wrap()
val b = Multik.ndarray(doubleArrayOf(1.0, 2.0, 3.0)).wrap()
one(a.shape) - a + b * 3
one(a.shape) - a + b * 3.0
}

View File

@ -1,137 +0,0 @@
package space.kscience.kmath.multik
import org.jetbrains.kotlinx.multik.api.math.cos
import org.jetbrains.kotlinx.multik.api.math.sin
import org.jetbrains.kotlinx.multik.api.mk
import org.jetbrains.kotlinx.multik.api.zeros
import org.jetbrains.kotlinx.multik.ndarray.data.*
import org.jetbrains.kotlinx.multik.ndarray.operations.*
import space.kscience.kmath.nd.FieldOpsND
import space.kscience.kmath.nd.RingOpsND
import space.kscience.kmath.nd.Shape
import space.kscience.kmath.nd.StructureND
import space.kscience.kmath.operations.*
/**
* A ring algebra for Multik operations
*/
public open class MultikRingOpsND<T, A : Ring<T>> internal constructor(
public val type: DataType,
override val elementAlgebra: A
) : RingOpsND<T, A> {
public fun MutableMultiArray<T, *>.wrap(): MultikTensor<T> = MultikTensor(this.asDNArray())
override fun structureND(shape: Shape, initializer: A.(IntArray) -> T): MultikTensor<T> {
val res = mk.zeros<T, DN>(shape, type).asDNArray()
for (index in res.multiIndices) {
res[index] = elementAlgebra.initializer(index)
}
return res.wrap()
}
public fun StructureND<T>.asMultik(): MultikTensor<T> = if (this is MultikTensor) {
this
} else {
structureND(shape) { get(it) }
}
override fun StructureND<T>.map(transform: A.(T) -> T): MultikTensor<T> {
//taken directly from Multik sources
val array = asMultik().array
val data = initMemoryView<T>(array.size, type)
var count = 0
for (el in array) data[count++] = elementAlgebra.transform(el)
return NDArray(data, shape = array.shape, dim = array.dim).wrap()
}
override fun StructureND<T>.mapIndexed(transform: A.(index: IntArray, T) -> T): MultikTensor<T> {
//taken directly from Multik sources
val array = asMultik().array
val data = initMemoryView<T>(array.size, type)
val indexIter = array.multiIndices.iterator()
var index = 0
for (item in array) {
if (indexIter.hasNext()) {
data[index++] = elementAlgebra.transform(indexIter.next(), item)
} else {
throw ArithmeticException("Index overflow has happened.")
}
}
return NDArray(data, shape = array.shape, dim = array.dim).wrap()
}
override fun zip(left: StructureND<T>, right: StructureND<T>, transform: A.(T, T) -> T): MultikTensor<T> {
require(left.shape.contentEquals(right.shape)) { "ND array shape mismatch" } //TODO replace by ShapeMismatchException
val leftArray = left.asMultik().array
val rightArray = right.asMultik().array
val data = initMemoryView<T>(leftArray.size, type)
var counter = 0
val leftIterator = leftArray.iterator()
val rightIterator = rightArray.iterator()
//iterating them together
while (leftIterator.hasNext()) {
data[counter++] = elementAlgebra.transform(leftIterator.next(), rightIterator.next())
}
return NDArray(data, shape = leftArray.shape, dim = leftArray.dim).wrap()
}
override fun StructureND<T>.unaryMinus(): MultikTensor<T> = asMultik().array.unaryMinus().wrap()
override fun add(left: StructureND<T>, right: StructureND<T>): MultikTensor<T> =
(left.asMultik().array + right.asMultik().array).wrap()
override fun StructureND<T>.plus(arg: T): MultikTensor<T> =
asMultik().array.plus(arg).wrap()
override fun StructureND<T>.minus(arg: T): MultikTensor<T> = asMultik().array.minus(arg).wrap()
override fun T.plus(arg: StructureND<T>): MultikTensor<T> = arg + this
override fun T.minus(arg: StructureND<T>): MultikTensor<T> = arg.map { this@minus - it }
override fun multiply(left: StructureND<T>, right: StructureND<T>): MultikTensor<T> =
left.asMultik().array.times(right.asMultik().array).wrap()
override fun StructureND<T>.times(arg: T): MultikTensor<T> =
asMultik().array.times(arg).wrap()
override fun T.times(arg: StructureND<T>): MultikTensor<T> = arg * this
override fun StructureND<T>.unaryPlus(): MultikTensor<T> = asMultik()
override fun StructureND<T>.plus(other: StructureND<T>): MultikTensor<T> =
asMultik().array.plus(other.asMultik().array).wrap()
override fun StructureND<T>.minus(other: StructureND<T>): MultikTensor<T> =
asMultik().array.minus(other.asMultik().array).wrap()
override fun StructureND<T>.times(other: StructureND<T>): MultikTensor<T> =
asMultik().array.times(other.asMultik().array).wrap()
}
/**
* A field algebra for multik operations
*/
public class MultikFieldOpsND<T, A : Field<T>> internal constructor(
type: DataType,
elementAlgebra: A
) : MultikRingOpsND<T, A>(type, elementAlgebra), FieldOpsND<T, A> {
override fun StructureND<T>.div(other: StructureND<T>): StructureND<T> =
asMultik().array.div(other.asMultik().array).wrap()
}
public val DoubleField.multikND: MultikFieldOpsND<Double, DoubleField>
get() = MultikFieldOpsND(DataType.DoubleDataType, DoubleField)
public val FloatField.multikND: MultikFieldOpsND<Float, FloatField>
get() = MultikFieldOpsND(DataType.FloatDataType, FloatField)
public val ShortRing.multikND: MultikRingOpsND<Short, ShortRing>
get() = MultikRingOpsND(DataType.ShortDataType, ShortRing)
public val IntRing.multikND: MultikRingOpsND<Int, IntRing>
get() = MultikRingOpsND(DataType.IntDataType, IntRing)
public val LongRing.multikND: MultikRingOpsND<Long, LongRing>
get() = MultikRingOpsND(DataType.LongDataType, LongRing)

View File

@ -15,12 +15,14 @@ import org.jetbrains.kotlinx.multik.api.zeros
import org.jetbrains.kotlinx.multik.ndarray.data.*
import org.jetbrains.kotlinx.multik.ndarray.operations.*
import space.kscience.kmath.misc.PerformancePitfall
import space.kscience.kmath.nd.DefaultStrides
import space.kscience.kmath.nd.Shape
import space.kscience.kmath.nd.StructureND
import space.kscience.kmath.nd.mapInPlace
import space.kscience.kmath.operations.*
import space.kscience.kmath.tensors.api.Tensor
import space.kscience.kmath.tensors.api.TensorAlgebra
import space.kscience.kmath.tensors.api.TensorPartialDivisionAlgebra
@JvmInline
public value class MultikTensor<T>(public val array: MutableMultiArray<T, DN>) : Tensor<T> {
@ -50,18 +52,70 @@ private fun <T, D : Dimension> MultiArray<T, D>.asD2Array(): D2Array<T> {
else throw ClassCastException("Cannot cast MultiArray to NDArray.")
}
public class MultikTensorAlgebra<T : Number, A: Ring<T>> internal constructor(
public val type: DataType,
override val elementAlgebra: A,
public val comparator: Comparator<T>
) : TensorAlgebra<T, A> {
public abstract class MultikTensorAlgebra<T, A : Ring<T>> : TensorAlgebra<T, A> where T : Number, T : Comparable<T> {
public abstract val type: DataType
override fun structureND(shape: Shape, initializer: A.(IntArray) -> T): MultikTensor<T> {
val strides = DefaultStrides(shape)
val memoryView = initMemoryView<T>(strides.linearSize, type)
strides.indices().forEachIndexed { linearIndex, tensorIndex ->
memoryView[linearIndex] = elementAlgebra.initializer(tensorIndex)
}
return MultikTensor(NDArray(memoryView, shape = shape, dim = DN(shape.size)))
}
override fun StructureND<T>.map(transform: A.(T) -> T): MultikTensor<T> = if (this is MultikTensor) {
val data = initMemoryView<T>(array.size, type)
var count = 0
for (el in array) data[count++] = elementAlgebra.transform(el)
NDArray(data, shape = shape, dim = array.dim).wrap()
} else {
structureND(shape) { index ->
transform(get(index))
}
}
override fun StructureND<T>.mapIndexed(transform: A.(index: IntArray, T) -> T): MultikTensor<T> =
if (this is MultikTensor) {
val array = asMultik().array
val data = initMemoryView<T>(array.size, type)
val indexIter = array.multiIndices.iterator()
var index = 0
for (item in array) {
if (indexIter.hasNext()) {
data[index++] = elementAlgebra.transform(indexIter.next(), item)
} else {
throw ArithmeticException("Index overflow has happened.")
}
}
NDArray(data, shape = array.shape, dim = array.dim).wrap()
} else {
structureND(shape) { index ->
transform(index, get(index))
}
}
override fun zip(left: StructureND<T>, right: StructureND<T>, transform: A.(T, T) -> T): MultikTensor<T> {
require(left.shape.contentEquals(right.shape)) { "ND array shape mismatch" } //TODO replace by ShapeMismatchException
val leftArray = left.asMultik().array
val rightArray = right.asMultik().array
val data = initMemoryView<T>(leftArray.size, type)
var counter = 0
val leftIterator = leftArray.iterator()
val rightIterator = rightArray.iterator()
//iterating them together
while (leftIterator.hasNext()) {
data[counter++] = elementAlgebra.transform(leftIterator.next(), rightIterator.next())
}
return NDArray(data, shape = leftArray.shape, dim = leftArray.dim).wrap()
}
/**
* Convert a tensor to [MultikTensor] if necessary. If tensor is converted, changes on the resulting tensor
* are not reflected back onto the source
*/
public fun StructureND<T>.asMultik(): MultikTensor<T> {
return if (this is MultikTensor) {
public fun StructureND<T>.asMultik(): MultikTensor<T> = if (this is MultikTensor) {
this
} else {
val res = mk.zeros<T, DN>(shape, type).asDNArray()
@ -70,9 +124,8 @@ public class MultikTensorAlgebra<T : Number, A: Ring<T>> internal constructor(
}
res.wrap()
}
}
public fun MutableMultiArray<T, DN>.wrap(): MultikTensor<T> = MultikTensor(this)
public fun MutableMultiArray<T, *>.wrap(): MultikTensor<T> = MultikTensor(this.asDNArray())
override fun StructureND<T>.valueOrNull(): T? = if (shape contentEquals intArrayOf(1)) {
get(intArrayOf(0))
@ -81,8 +134,8 @@ public class MultikTensorAlgebra<T : Number, A: Ring<T>> internal constructor(
override fun T.plus(other: StructureND<T>): MultikTensor<T> =
other.plus(this)
override fun StructureND<T>.plus(value: T): MultikTensor<T> =
asMultik().array.deepCopy().apply { plusAssign(value) }.wrap()
override fun StructureND<T>.plus(arg: T): MultikTensor<T> =
asMultik().array.deepCopy().apply { plusAssign(arg) }.wrap()
override fun StructureND<T>.plus(other: StructureND<T>): MultikTensor<T> =
asMultik().array.plus(other.asMultik().array).wrap()
@ -155,11 +208,11 @@ public class MultikTensorAlgebra<T : Number, A: Ring<T>> internal constructor(
override fun StructureND<T>.unaryMinus(): MultikTensor<T> =
asMultik().array.unaryMinus().wrap()
override fun Tensor<T>.get(i: Int): MultikTensor<T> = asMultik().array.mutableView(i).wrap()
override fun StructureND<T>.get(i: Int): MultikTensor<T> = asMultik().array.mutableView(i).wrap()
override fun Tensor<T>.transpose(i: Int, j: Int): MultikTensor<T> = asMultik().array.transpose(i, j).wrap()
override fun StructureND<T>.transpose(i: Int, j: Int): MultikTensor<T> = asMultik().array.transpose(i, j).wrap()
override fun Tensor<T>.view(shape: IntArray): MultikTensor<T> {
override fun StructureND<T>.view(shape: IntArray): MultikTensor<T> {
require(shape.all { it > 0 })
require(shape.fold(1, Int::times) == this.shape.size) {
"Cannot reshape array of size ${this.shape.size} into a new shape ${
@ -178,9 +231,9 @@ public class MultikTensorAlgebra<T : Number, A: Ring<T>> internal constructor(
}.wrap()
}
override fun Tensor<T>.viewAs(other: Tensor<T>): MultikTensor<T> = view(other.shape)
override fun StructureND<T>.viewAs(other: StructureND<T>): MultikTensor<T> = view(other.shape)
override fun Tensor<T>.dot(other: Tensor<T>): MultikTensor<T> =
override fun StructureND<T>.dot(other: StructureND<T>): MultikTensor<T> =
if (this.shape.size == 1 && other.shape.size == 1) {
Multik.ndarrayOf(
asMultik().array.asD1Array() dot other.asMultik().array.asD1Array()
@ -197,45 +250,95 @@ public class MultikTensorAlgebra<T : Number, A: Ring<T>> internal constructor(
TODO("Diagonal embedding not implemented")
}
override fun Tensor<T>.sum(): T = asMultik().array.reduceMultiIndexed { _: IntArray, acc: T, t: T ->
override fun StructureND<T>.sum(): T = asMultik().array.reduceMultiIndexed { _: IntArray, acc: T, t: T ->
elementAlgebra.add(acc, t)
}
override fun Tensor<T>.sum(dim: Int, keepDim: Boolean): MultikTensor<T> {
override fun StructureND<T>.sum(dim: Int, keepDim: Boolean): MultikTensor<T> {
TODO("Not yet implemented")
}
override fun Tensor<T>.min(): T =
asMultik().array.minWith(comparator) ?: error("No elements in tensor")
override fun StructureND<T>.min(): T? = asMultik().array.min()
override fun Tensor<T>.min(dim: Int, keepDim: Boolean): MultikTensor<T> {
override fun StructureND<T>.min(dim: Int, keepDim: Boolean): Tensor<T> {
TODO("Not yet implemented")
}
override fun Tensor<T>.max(): T =
asMultik().array.maxWith(comparator) ?: error("No elements in tensor")
override fun StructureND<T>.max(): T? = asMultik().array.max()
override fun Tensor<T>.max(dim: Int, keepDim: Boolean): MultikTensor<T> {
override fun StructureND<T>.max(dim: Int, keepDim: Boolean): Tensor<T> {
TODO("Not yet implemented")
}
override fun Tensor<T>.argMax(dim: Int, keepDim: Boolean): MultikTensor<T> {
override fun StructureND<T>.argMax(dim: Int, keepDim: Boolean): Tensor<T> {
TODO("Not yet implemented")
}
}
public val DoubleField.multikTensorAlgebra: MultikTensorAlgebra<Double, DoubleField>
get() = MultikTensorAlgebra(DataType.DoubleDataType, DoubleField) { o1, o2 -> o1.compareTo(o2) }
public abstract class MultikDivisionTensorAlgebra<T, A : Field<T>>
: MultikTensorAlgebra<T, A>(), TensorPartialDivisionAlgebra<T, A> where T : Number, T : Comparable<T> {
public val FloatField.multikTensorAlgebra: MultikTensorAlgebra<Float, FloatField>
get() = MultikTensorAlgebra(DataType.FloatDataType, FloatField) { o1, o2 -> o1.compareTo(o2) }
override fun T.div(arg: StructureND<T>): MultikTensor<T> = arg.map { elementAlgebra.divide(this@div, it) }
public val ShortRing.multikTensorAlgebra: MultikTensorAlgebra<Short, ShortRing>
get() = MultikTensorAlgebra(DataType.ShortDataType, ShortRing) { o1, o2 -> o1.compareTo(o2) }
override fun StructureND<T>.div(arg: T): MultikTensor<T> =
asMultik().array.deepCopy().apply { divAssign(arg) }.wrap()
public val IntRing.multikTensorAlgebra: MultikTensorAlgebra<Int, IntRing>
get() = MultikTensorAlgebra(DataType.IntDataType, IntRing) { o1, o2 -> o1.compareTo(o2) }
override fun StructureND<T>.div(other: StructureND<T>): MultikTensor<T> =
asMultik().array.div(other.asMultik().array).wrap()
public val LongRing.multikTensorAlgebra: MultikTensorAlgebra<Long, LongRing>
get() = MultikTensorAlgebra(DataType.LongDataType, LongRing) { o1, o2 -> o1.compareTo(o2) }
override fun Tensor<T>.divAssign(value: T) {
if (this is MultikTensor) {
array.divAssign(value)
} else {
mapInPlace { _, t -> elementAlgebra.divide(t, value) }
}
}
override fun Tensor<T>.divAssign(other: StructureND<T>) {
if (this is MultikTensor) {
array.divAssign(other.asMultik().array)
} else {
mapInPlace { index, t -> elementAlgebra.divide(t, other[index]) }
}
}
}
public object MultikDoubleAlgebra : MultikDivisionTensorAlgebra<Double, DoubleField>() {
override val elementAlgebra: DoubleField get() = DoubleField
override val type: DataType get() = DataType.DoubleDataType
}
public val Double.Companion.multikAlgebra: MultikTensorAlgebra<Double, DoubleField> get() = MultikDoubleAlgebra
public val DoubleField.multikAlgebra: MultikTensorAlgebra<Double, DoubleField> get() = MultikDoubleAlgebra
public object MultikFloatAlgebra : MultikDivisionTensorAlgebra<Float, FloatField>() {
override val elementAlgebra: FloatField get() = FloatField
override val type: DataType get() = DataType.FloatDataType
}
public val Float.Companion.multikAlgebra: MultikTensorAlgebra<Float, FloatField> get() = MultikFloatAlgebra
public val FloatField.multikAlgebra: MultikTensorAlgebra<Float, FloatField> get() = MultikFloatAlgebra
public object MultikShortAlgebra : MultikTensorAlgebra<Short, ShortRing>() {
override val elementAlgebra: ShortRing get() = ShortRing
override val type: DataType get() = DataType.ShortDataType
}
public val Short.Companion.multikAlgebra: MultikTensorAlgebra<Short, ShortRing> get() = MultikShortAlgebra
public val ShortRing.multikAlgebra: MultikTensorAlgebra<Short, ShortRing> get() = MultikShortAlgebra
public object MultikIntAlgebra : MultikTensorAlgebra<Int, IntRing>() {
override val elementAlgebra: IntRing get() = IntRing
override val type: DataType get() = DataType.IntDataType
}
public val Int.Companion.multikAlgebra: MultikTensorAlgebra<Int, IntRing> get() = MultikIntAlgebra
public val IntRing.multikAlgebra: MultikTensorAlgebra<Int, IntRing> get() = MultikIntAlgebra
public object MultikLongAlgebra : MultikTensorAlgebra<Long, LongRing>() {
override val elementAlgebra: LongRing get() = LongRing
override val type: DataType get() = DataType.LongDataType
}
public val Long.Companion.multikAlgebra: MultikTensorAlgebra<Long, LongRing> get() = MultikLongAlgebra
public val LongRing.multikAlgebra: MultikTensorAlgebra<Long, LongRing> get() = MultikLongAlgebra

View File

@ -7,7 +7,7 @@ import space.kscience.kmath.operations.invoke
internal class MultikNDTest {
@Test
fun basicAlgebra(): Unit = DoubleField.multikND{
fun basicAlgebra(): Unit = DoubleField.multikAlgebra{
one(2,2) + 1.0
}
}

View File

@ -13,6 +13,7 @@ import org.nd4j.linalg.factory.Nd4j
import org.nd4j.linalg.factory.ops.NDBase
import org.nd4j.linalg.ops.transforms.Transforms
import space.kscience.kmath.misc.PerformancePitfall
import space.kscience.kmath.nd.DefaultStrides
import space.kscience.kmath.nd.Shape
import space.kscience.kmath.nd.StructureND
import space.kscience.kmath.operations.DoubleField
@ -27,22 +28,6 @@ import space.kscience.kmath.tensors.core.DoubleTensorAlgebra
*/
public sealed interface Nd4jTensorAlgebra<T : Number, A : Field<T>> : AnalyticTensorAlgebra<T, A> {
override fun structureND(shape: Shape, initializer: A.(IntArray) -> T): Nd4jArrayStructure<T> {
val array =
}
override fun StructureND<T>.map(transform: A.(T) -> T): Nd4jArrayStructure<T> {
TODO("Not yet implemented")
}
override fun StructureND<T>.mapIndexed(transform: A.(index: IntArray, T) -> T): Nd4jArrayStructure<T> {
TODO("Not yet implemented")
}
override fun zip(left: StructureND<T>, right: StructureND<T>, transform: A.(T, T) -> T): Nd4jArrayStructure<T> {
TODO("Not yet implemented")
}
/**
* Wraps [INDArray] to [Nd4jArrayStructure].
*/
@ -53,8 +38,21 @@ public sealed interface Nd4jTensorAlgebra<T : Number, A : Field<T>> : AnalyticTe
*/
public val StructureND<T>.ndArray: INDArray
override fun structureND(shape: Shape, initializer: A.(IntArray) -> T): Nd4jArrayStructure<T>
override fun StructureND<T>.map(transform: A.(T) -> T): Nd4jArrayStructure<T> =
structureND(shape) { index -> elementAlgebra.transform(get(index)) }
override fun StructureND<T>.mapIndexed(transform: A.(index: IntArray, T) -> T): Nd4jArrayStructure<T> =
structureND(shape) { index -> elementAlgebra.transform(index, get(index)) }
override fun zip(left: StructureND<T>, right: StructureND<T>, transform: A.(T, T) -> T): Nd4jArrayStructure<T> {
require(left.shape.contentEquals(right.shape))
return structureND(left.shape) { index -> elementAlgebra.transform(left[index], right[index]) }
}
override fun T.plus(other: StructureND<T>): Nd4jArrayStructure<T> = other.ndArray.add(this).wrap()
override fun StructureND<T>.plus(value: T): Nd4jArrayStructure<T> = ndArray.add(value).wrap()
override fun StructureND<T>.plus(arg: T): Nd4jArrayStructure<T> = ndArray.add(arg).wrap()
override fun StructureND<T>.plus(other: StructureND<T>): Nd4jArrayStructure<T> = ndArray.add(other.ndArray).wrap()
@ -94,51 +92,52 @@ public sealed interface Nd4jTensorAlgebra<T : Number, A : Field<T>> : AnalyticTe
}
override fun StructureND<T>.unaryMinus(): Nd4jArrayStructure<T> = ndArray.neg().wrap()
override fun Tensor<T>.get(i: Int): Nd4jArrayStructure<T> = ndArray.slice(i.toLong()).wrap()
override fun Tensor<T>.transpose(i: Int, j: Int): Nd4jArrayStructure<T> = ndArray.swapAxes(i, j).wrap()
override fun Tensor<T>.dot(other: Tensor<T>): Nd4jArrayStructure<T> = ndArray.mmul(other.ndArray).wrap()
override fun StructureND<T>.get(i: Int): Nd4jArrayStructure<T> = ndArray.slice(i.toLong()).wrap()
override fun StructureND<T>.transpose(i: Int, j: Int): Nd4jArrayStructure<T> = ndArray.swapAxes(i, j).wrap()
override fun StructureND<T>.dot(other: StructureND<T>): Nd4jArrayStructure<T> = ndArray.mmul(other.ndArray).wrap()
override fun Tensor<T>.min(dim: Int, keepDim: Boolean): Nd4jArrayStructure<T> =
override fun StructureND<T>.min(dim: Int, keepDim: Boolean): Nd4jArrayStructure<T> =
ndArray.min(keepDim, dim).wrap()
override fun Tensor<T>.sum(dim: Int, keepDim: Boolean): Nd4jArrayStructure<T> =
override fun StructureND<T>.sum(dim: Int, keepDim: Boolean): Nd4jArrayStructure<T> =
ndArray.sum(keepDim, dim).wrap()
override fun Tensor<T>.max(dim: Int, keepDim: Boolean): Nd4jArrayStructure<T> =
override fun StructureND<T>.max(dim: Int, keepDim: Boolean): Nd4jArrayStructure<T> =
ndArray.max(keepDim, dim).wrap()
override fun Tensor<T>.view(shape: IntArray): Nd4jArrayStructure<T> = ndArray.reshape(shape).wrap()
override fun Tensor<T>.viewAs(other: Tensor<T>): Nd4jArrayStructure<T> = view(other.shape)
override fun StructureND<T>.view(shape: IntArray): Nd4jArrayStructure<T> = ndArray.reshape(shape).wrap()
override fun StructureND<T>.viewAs(other: StructureND<T>): Nd4jArrayStructure<T> = view(other.shape)
override fun Tensor<T>.argMax(dim: Int, keepDim: Boolean): Nd4jArrayStructure<T> =
override fun StructureND<T>.argMax(dim: Int, keepDim: Boolean): Nd4jArrayStructure<T> =
ndBase.get().argmax(ndArray, keepDim, dim).wrap()
override fun Tensor<T>.mean(dim: Int, keepDim: Boolean): Nd4jArrayStructure<T> = ndArray.mean(keepDim, dim).wrap()
override fun StructureND<T>.mean(dim: Int, keepDim: Boolean): Nd4jArrayStructure<T> =
ndArray.mean(keepDim, dim).wrap()
override fun Tensor<T>.exp(): Nd4jArrayStructure<T> = Transforms.exp(ndArray).wrap()
override fun Tensor<T>.ln(): Nd4jArrayStructure<T> = Transforms.log(ndArray).wrap()
override fun Tensor<T>.sqrt(): Nd4jArrayStructure<T> = Transforms.sqrt(ndArray).wrap()
override fun Tensor<T>.cos(): Nd4jArrayStructure<T> = Transforms.cos(ndArray).wrap()
override fun Tensor<T>.acos(): Nd4jArrayStructure<T> = Transforms.acos(ndArray).wrap()
override fun Tensor<T>.cosh(): Nd4jArrayStructure<T> = Transforms.cosh(ndArray).wrap()
override fun StructureND<T>.exp(): Nd4jArrayStructure<T> = Transforms.exp(ndArray).wrap()
override fun StructureND<T>.ln(): Nd4jArrayStructure<T> = Transforms.log(ndArray).wrap()
override fun StructureND<T>.sqrt(): Nd4jArrayStructure<T> = Transforms.sqrt(ndArray).wrap()
override fun StructureND<T>.cos(): Nd4jArrayStructure<T> = Transforms.cos(ndArray).wrap()
override fun StructureND<T>.acos(): Nd4jArrayStructure<T> = Transforms.acos(ndArray).wrap()
override fun StructureND<T>.cosh(): Nd4jArrayStructure<T> = Transforms.cosh(ndArray).wrap()
override fun Tensor<T>.acosh(): Nd4jArrayStructure<T> =
override fun StructureND<T>.acosh(): Nd4jArrayStructure<T> =
Nd4j.getExecutioner().exec(ACosh(ndArray, ndArray.ulike())).wrap()
override fun Tensor<T>.sin(): Nd4jArrayStructure<T> = Transforms.sin(ndArray).wrap()
override fun Tensor<T>.asin(): Nd4jArrayStructure<T> = Transforms.asin(ndArray).wrap()
override fun Tensor<T>.sinh(): Tensor<T> = Transforms.sinh(ndArray).wrap()
override fun StructureND<T>.sin(): Nd4jArrayStructure<T> = Transforms.sin(ndArray).wrap()
override fun StructureND<T>.asin(): Nd4jArrayStructure<T> = Transforms.asin(ndArray).wrap()
override fun StructureND<T>.sinh(): Tensor<T> = Transforms.sinh(ndArray).wrap()
override fun Tensor<T>.asinh(): Nd4jArrayStructure<T> =
override fun StructureND<T>.asinh(): Nd4jArrayStructure<T> =
Nd4j.getExecutioner().exec(ASinh(ndArray, ndArray.ulike())).wrap()
override fun Tensor<T>.tan(): Nd4jArrayStructure<T> = Transforms.tan(ndArray).wrap()
override fun Tensor<T>.atan(): Nd4jArrayStructure<T> = Transforms.atan(ndArray).wrap()
override fun Tensor<T>.tanh(): Nd4jArrayStructure<T> = Transforms.tanh(ndArray).wrap()
override fun Tensor<T>.atanh(): Nd4jArrayStructure<T> = Transforms.atanh(ndArray).wrap()
override fun Tensor<T>.ceil(): Nd4jArrayStructure<T> = Transforms.ceil(ndArray).wrap()
override fun Tensor<T>.floor(): Nd4jArrayStructure<T> = Transforms.floor(ndArray).wrap()
override fun Tensor<T>.std(dim: Int, keepDim: Boolean): Nd4jArrayStructure<T> =
override fun StructureND<T>.tan(): Nd4jArrayStructure<T> = Transforms.tan(ndArray).wrap()
override fun StructureND<T>.atan(): Nd4jArrayStructure<T> = Transforms.atan(ndArray).wrap()
override fun StructureND<T>.tanh(): Nd4jArrayStructure<T> = Transforms.tanh(ndArray).wrap()
override fun StructureND<T>.atanh(): Nd4jArrayStructure<T> = Transforms.atanh(ndArray).wrap()
override fun StructureND<T>.ceil(): Nd4jArrayStructure<T> = Transforms.ceil(ndArray).wrap()
override fun StructureND<T>.floor(): Nd4jArrayStructure<T> = Transforms.floor(ndArray).wrap()
override fun StructureND<T>.std(dim: Int, keepDim: Boolean): Nd4jArrayStructure<T> =
ndArray.std(true, keepDim, dim).wrap()
override fun T.div(arg: StructureND<T>): Nd4jArrayStructure<T> = arg.ndArray.rdiv(this).wrap()
@ -153,7 +152,7 @@ public sealed interface Nd4jTensorAlgebra<T : Number, A : Field<T>> : AnalyticTe
ndArray.divi(other.ndArray)
}
override fun Tensor<T>.variance(dim: Int, keepDim: Boolean): Nd4jArrayStructure<T> =
override fun StructureND<T>.variance(dim: Int, keepDim: Boolean): Nd4jArrayStructure<T> =
Nd4j.getExecutioner().exec(Variance(ndArray, true, true, dim)).wrap()
private companion object {
@ -170,6 +169,16 @@ public object DoubleNd4jTensorAlgebra : Nd4jTensorAlgebra<Double, DoubleField> {
override fun INDArray.wrap(): Nd4jArrayStructure<Double> = asDoubleStructure()
override fun structureND(shape: Shape, initializer: DoubleField.(IntArray) -> Double): Nd4jArrayStructure<Double> {
val array: INDArray = Nd4j.zeros(*shape)
val indices = DefaultStrides(shape)
indices.indices().forEach { index ->
array.putScalar(index, elementAlgebra.initializer(index))
}
return array.wrap()
}
@OptIn(PerformancePitfall::class)
override val StructureND<Double>.ndArray: INDArray
get() = when (this) {
@ -190,10 +199,10 @@ public object DoubleNd4jTensorAlgebra : Nd4jTensorAlgebra<Double, DoubleField> {
dim2: Int,
): Tensor<Double> = DoubleTensorAlgebra.diagonalEmbedding(diagonalEntries, offset, dim1, dim2)
override fun Tensor<Double>.sum(): Double = ndArray.sumNumber().toDouble()
override fun Tensor<Double>.min(): Double = ndArray.minNumber().toDouble()
override fun Tensor<Double>.max(): Double = ndArray.maxNumber().toDouble()
override fun Tensor<Double>.mean(): Double = ndArray.meanNumber().toDouble()
override fun Tensor<Double>.std(): Double = ndArray.stdNumber().toDouble()
override fun Tensor<Double>.variance(): Double = ndArray.varNumber().toDouble()
override fun StructureND<Double>.sum(): Double = ndArray.sumNumber().toDouble()
override fun StructureND<Double>.min(): Double = ndArray.minNumber().toDouble()
override fun StructureND<Double>.max(): Double = ndArray.maxNumber().toDouble()
override fun StructureND<Double>.mean(): Double = ndArray.meanNumber().toDouble()
override fun StructureND<Double>.std(): Double = ndArray.stdNumber().toDouble()
override fun StructureND<Double>.variance(): Double = ndArray.varNumber().toDouble()
}

View File

@ -5,6 +5,7 @@
package space.kscience.kmath.tensors.api
import space.kscience.kmath.nd.StructureND
import space.kscience.kmath.operations.Field
@ -18,7 +19,7 @@ public interface AnalyticTensorAlgebra<T, A : Field<T>> : TensorPartialDivisionA
/**
* @return the mean of all elements in the input tensor.
*/
public fun Tensor<T>.mean(): T
public fun StructureND<T>.mean(): T
/**
* Returns the mean of each row of the input tensor in the given dimension [dim].
@ -31,12 +32,12 @@ public interface AnalyticTensorAlgebra<T, A : Field<T>> : TensorPartialDivisionA
* @param keepDim whether the output tensor has [dim] retained or not.
* @return the mean of each row of the input tensor in the given dimension [dim].
*/
public fun Tensor<T>.mean(dim: Int, keepDim: Boolean): Tensor<T>
public fun StructureND<T>.mean(dim: Int, keepDim: Boolean): Tensor<T>
/**
* @return the standard deviation of all elements in the input tensor.
*/
public fun Tensor<T>.std(): T
public fun StructureND<T>.std(): T
/**
* Returns the standard deviation of each row of the input tensor in the given dimension [dim].
@ -49,12 +50,12 @@ public interface AnalyticTensorAlgebra<T, A : Field<T>> : TensorPartialDivisionA
* @param keepDim whether the output tensor has [dim] retained or not.
* @return the standard deviation of each row of the input tensor in the given dimension [dim].
*/
public fun Tensor<T>.std(dim: Int, keepDim: Boolean): Tensor<T>
public fun StructureND<T>.std(dim: Int, keepDim: Boolean): Tensor<T>
/**
* @return the variance of all elements in the input tensor.
*/
public fun Tensor<T>.variance(): T
public fun StructureND<T>.variance(): T
/**
* Returns the variance of each row of the input tensor in the given dimension [dim].
@ -67,57 +68,57 @@ public interface AnalyticTensorAlgebra<T, A : Field<T>> : TensorPartialDivisionA
* @param keepDim whether the output tensor has [dim] retained or not.
* @return the variance of each row of the input tensor in the given dimension [dim].
*/
public fun Tensor<T>.variance(dim: Int, keepDim: Boolean): Tensor<T>
public fun StructureND<T>.variance(dim: Int, keepDim: Boolean): Tensor<T>
//For information: https://pytorch.org/docs/stable/generated/torch.exp.html
public fun Tensor<T>.exp(): Tensor<T>
public fun StructureND<T>.exp(): Tensor<T>
//For information: https://pytorch.org/docs/stable/generated/torch.log.html
public fun Tensor<T>.ln(): Tensor<T>
public fun StructureND<T>.ln(): Tensor<T>
//For information: https://pytorch.org/docs/stable/generated/torch.sqrt.html
public fun Tensor<T>.sqrt(): Tensor<T>
public fun StructureND<T>.sqrt(): Tensor<T>
//For information: https://pytorch.org/docs/stable/generated/torch.acos.html#torch.cos
public fun Tensor<T>.cos(): Tensor<T>
public fun StructureND<T>.cos(): Tensor<T>
//For information: https://pytorch.org/docs/stable/generated/torch.acos.html#torch.acos
public fun Tensor<T>.acos(): Tensor<T>
public fun StructureND<T>.acos(): Tensor<T>
//For information: https://pytorch.org/docs/stable/generated/torch.acosh.html#torch.cosh
public fun Tensor<T>.cosh(): Tensor<T>
public fun StructureND<T>.cosh(): Tensor<T>
//For information: https://pytorch.org/docs/stable/generated/torch.acosh.html#torch.acosh
public fun Tensor<T>.acosh(): Tensor<T>
public fun StructureND<T>.acosh(): Tensor<T>
//For information: https://pytorch.org/docs/stable/generated/torch.asin.html#torch.sin
public fun Tensor<T>.sin(): Tensor<T>
public fun StructureND<T>.sin(): Tensor<T>
//For information: https://pytorch.org/docs/stable/generated/torch.asin.html#torch.asin
public fun Tensor<T>.asin(): Tensor<T>
public fun StructureND<T>.asin(): Tensor<T>
//For information: https://pytorch.org/docs/stable/generated/torch.asin.html#torch.sinh
public fun Tensor<T>.sinh(): Tensor<T>
public fun StructureND<T>.sinh(): Tensor<T>
//For information: https://pytorch.org/docs/stable/generated/torch.asin.html#torch.asinh
public fun Tensor<T>.asinh(): Tensor<T>
public fun StructureND<T>.asinh(): Tensor<T>
//For information: https://pytorch.org/docs/stable/generated/torch.atan.html#torch.tan
public fun Tensor<T>.tan(): Tensor<T>
public fun StructureND<T>.tan(): Tensor<T>
//https://pytorch.org/docs/stable/generated/torch.atan.html#torch.atan
public fun Tensor<T>.atan(): Tensor<T>
public fun StructureND<T>.atan(): Tensor<T>
//For information: https://pytorch.org/docs/stable/generated/torch.atanh.html#torch.tanh
public fun Tensor<T>.tanh(): Tensor<T>
public fun StructureND<T>.tanh(): Tensor<T>
//For information: https://pytorch.org/docs/stable/generated/torch.atanh.html#torch.atanh
public fun Tensor<T>.atanh(): Tensor<T>
public fun StructureND<T>.atanh(): Tensor<T>
//For information: https://pytorch.org/docs/stable/generated/torch.ceil.html#torch.ceil
public fun Tensor<T>.ceil(): Tensor<T>
public fun StructureND<T>.ceil(): Tensor<T>
//For information: https://pytorch.org/docs/stable/generated/torch.floor.html#torch.floor
public fun Tensor<T>.floor(): Tensor<T>
public fun StructureND<T>.floor(): Tensor<T>
}

View File

@ -5,6 +5,7 @@
package space.kscience.kmath.tensors.api
import space.kscience.kmath.nd.StructureND
import space.kscience.kmath.operations.Field
/**
@ -20,7 +21,7 @@ public interface LinearOpsTensorAlgebra<T, A : Field<T>> : TensorPartialDivision
*
* @return the determinant.
*/
public fun Tensor<T>.det(): Tensor<T>
public fun StructureND<T>.det(): Tensor<T>
/**
* Computes the multiplicative inverse matrix of a square matrix input, or of each square matrix in a batched input.
@ -30,7 +31,7 @@ public interface LinearOpsTensorAlgebra<T, A : Field<T>> : TensorPartialDivision
*
* @return the multiplicative inverse of a matrix.
*/
public fun Tensor<T>.inv(): Tensor<T>
public fun StructureND<T>.inv(): Tensor<T>
/**
* Cholesky decomposition.
@ -46,7 +47,7 @@ public interface LinearOpsTensorAlgebra<T, A : Field<T>> : TensorPartialDivision
* @receiver the `input`.
* @return the batch of `L` matrices.
*/
public fun Tensor<T>.cholesky(): Tensor<T>
public fun StructureND<T>.cholesky(): Tensor<T>
/**
* QR decomposition.
@ -60,7 +61,7 @@ public interface LinearOpsTensorAlgebra<T, A : Field<T>> : TensorPartialDivision
* @receiver the `input`.
* @return pair of `Q` and `R` tensors.
*/
public fun Tensor<T>.qr(): Pair<Tensor<T>, Tensor<T>>
public fun StructureND<T>.qr(): Pair<Tensor<T>, Tensor<T>>
/**
* LUP decomposition
@ -74,7 +75,7 @@ public interface LinearOpsTensorAlgebra<T, A : Field<T>> : TensorPartialDivision
* @receiver the `input`.
* @return triple of P, L and U tensors
*/
public fun Tensor<T>.lu(): Triple<Tensor<T>, Tensor<T>, Tensor<T>>
public fun StructureND<T>.lu(): Triple<Tensor<T>, Tensor<T>, Tensor<T>>
/**
* Singular Value Decomposition.
@ -90,7 +91,7 @@ public interface LinearOpsTensorAlgebra<T, A : Field<T>> : TensorPartialDivision
* @receiver the `input`.
* @return triple `Triple(U, S, V)`.
*/
public fun Tensor<T>.svd(): Triple<Tensor<T>, Tensor<T>, Tensor<T>>
public fun StructureND<T>.svd(): Triple<Tensor<T>, Tensor<T>, Tensor<T>>
/**
* Returns eigenvalues and eigenvectors of a real symmetric matrix `input` or a batch of real symmetric matrices,
@ -100,6 +101,6 @@ public interface LinearOpsTensorAlgebra<T, A : Field<T>> : TensorPartialDivision
* @receiver the `input`.
* @return a pair `eigenvalues to eigenvectors`
*/
public fun Tensor<T>.symEig(): Pair<Tensor<T>, Tensor<T>>
public fun StructureND<T>.symEig(): Pair<Tensor<T>, Tensor<T>>
}

View File

@ -41,12 +41,12 @@ public interface TensorAlgebra<T, A : Ring<T>> : RingOpsND<T, A> {
override operator fun T.plus(other: StructureND<T>): Tensor<T>
/**
* Adds the scalar [value] to each element of this tensor and returns a new resulting tensor.
* Adds the scalar [arg] to each element of this tensor and returns a new resulting tensor.
*
* @param value the number to be added to each element of this tensor.
* @return the sum of this tensor and [value].
* @param arg the number to be added to each element of this tensor.
* @return the sum of this tensor and [arg].
*/
override operator fun StructureND<T>.plus(value: T): Tensor<T>
override operator fun StructureND<T>.plus(arg: T): Tensor<T>
/**
* Each element of the tensor [other] is added to each element of this tensor.
@ -166,7 +166,7 @@ public interface TensorAlgebra<T, A : Ring<T>> : RingOpsND<T, A> {
* @param i index of the extractable tensor
* @return subtensor of the original tensor with index [i]
*/
public operator fun Tensor<T>.get(i: Int): Tensor<T>
public operator fun StructureND<T>.get(i: Int): Tensor<T>
/**
* Returns a tensor that is a transposed version of this tensor. The given dimensions [i] and [j] are swapped.
@ -176,7 +176,7 @@ public interface TensorAlgebra<T, A : Ring<T>> : RingOpsND<T, A> {
* @param j the second dimension to be transposed
* @return transposed tensor
*/
public fun Tensor<T>.transpose(i: Int = -2, j: Int = -1): Tensor<T>
public fun StructureND<T>.transpose(i: Int = -2, j: Int = -1): Tensor<T>
/**
* Returns a new tensor with the same data as the self tensor but of a different shape.
@ -186,7 +186,7 @@ public interface TensorAlgebra<T, A : Ring<T>> : RingOpsND<T, A> {
* @param shape the desired size
* @return tensor with new shape
*/
public fun Tensor<T>.view(shape: IntArray): Tensor<T>
public fun StructureND<T>.view(shape: IntArray): Tensor<T>
/**
* View this tensor as the same size as [other].
@ -196,7 +196,7 @@ public interface TensorAlgebra<T, A : Ring<T>> : RingOpsND<T, A> {
* @param other the result tensor has the same size as other.
* @return the result tensor with the same size as other.
*/
public fun Tensor<T>.viewAs(other: Tensor<T>): Tensor<T>
public fun StructureND<T>.viewAs(other: StructureND<T>): Tensor<T>
/**
* Matrix product of two tensors.
@ -227,7 +227,7 @@ public interface TensorAlgebra<T, A : Ring<T>> : RingOpsND<T, A> {
* @param other tensor to be multiplied.
* @return a mathematical product of two tensors.
*/
public infix fun Tensor<T>.dot(other: Tensor<T>): Tensor<T>
public infix fun StructureND<T>.dot(other: StructureND<T>): Tensor<T>
/**
* Creates a tensor whose diagonals of certain 2D planes (specified by [dim1] and [dim2])
@ -262,7 +262,7 @@ public interface TensorAlgebra<T, A : Ring<T>> : RingOpsND<T, A> {
/**
* @return the sum of all elements in the input tensor.
*/
public fun Tensor<T>.sum(): T
public fun StructureND<T>.sum(): T
/**
* Returns the sum of each row of the input tensor in the given dimension [dim].
@ -275,12 +275,12 @@ public interface TensorAlgebra<T, A : Ring<T>> : RingOpsND<T, A> {
* @param keepDim whether the output tensor has [dim] retained or not.
* @return the sum of each row of the input tensor in the given dimension [dim].
*/
public fun Tensor<T>.sum(dim: Int, keepDim: Boolean): Tensor<T>
public fun StructureND<T>.sum(dim: Int, keepDim: Boolean): Tensor<T>
/**
* @return the minimum value of all elements in the input tensor.
* @return the minimum value of all elements in the input tensor or null if there are no values
*/
public fun Tensor<T>.min(): T
public fun StructureND<T>.min(): T?
/**
* Returns the minimum value of each row of the input tensor in the given dimension [dim].
@ -293,12 +293,12 @@ public interface TensorAlgebra<T, A : Ring<T>> : RingOpsND<T, A> {
* @param keepDim whether the output tensor has [dim] retained or not.
* @return the minimum value of each row of the input tensor in the given dimension [dim].
*/
public fun Tensor<T>.min(dim: Int, keepDim: Boolean): Tensor<T>
public fun StructureND<T>.min(dim: Int, keepDim: Boolean): Tensor<T>
/**
* Returns the maximum value of all elements in the input tensor.
* Returns the maximum value of all elements in the input tensor or null if there are no values
*/
public fun Tensor<T>.max(): T
public fun StructureND<T>.max(): T?
/**
* Returns the maximum value of each row of the input tensor in the given dimension [dim].
@ -311,7 +311,7 @@ public interface TensorAlgebra<T, A : Ring<T>> : RingOpsND<T, A> {
* @param keepDim whether the output tensor has [dim] retained or not.
* @return the maximum value of each row of the input tensor in the given dimension [dim].
*/
public fun Tensor<T>.max(dim: Int, keepDim: Boolean): Tensor<T>
public fun StructureND<T>.max(dim: Int, keepDim: Boolean): Tensor<T>
/**
* Returns the index of maximum value of each row of the input tensor in the given dimension [dim].
@ -324,7 +324,7 @@ public interface TensorAlgebra<T, A : Ring<T>> : RingOpsND<T, A> {
* @param keepDim whether the output tensor has [dim] retained or not.
* @return the index of maximum value of each row of the input tensor in the given dimension [dim].
*/
public fun Tensor<T>.argMax(dim: Int, keepDim: Boolean): Tensor<T>
public fun StructureND<T>.argMax(dim: Int, keepDim: Boolean): Tensor<T>
override fun add(left: StructureND<T>, right: StructureND<T>): Tensor<T> = left + right

View File

@ -85,7 +85,7 @@ public object BroadcastDoubleTensorAlgebra : DoubleTensorAlgebra() {
return DoubleTensor(newThis.shape, resBuffer)
}
override fun Tensor<Double>.divAssign(other: Tensor<Double>) {
override fun Tensor<Double>.divAssign(other: StructureND<Double>) {
val newOther = broadcastTo(other.tensor, tensor.shape)
for (i in 0 until tensor.indices.linearSize) {
tensor.mutableBuffer.array()[tensor.bufferStart + i] /=

View File

@ -115,7 +115,7 @@ public open class DoubleTensorAlgebra :
TensorLinearStructure(shape).indices().map { DoubleField.initializer(it) }.toMutableList().toDoubleArray()
)
override operator fun Tensor<Double>.get(i: Int): DoubleTensor {
override operator fun StructureND<Double>.get(i: Int): DoubleTensor {
val lastShape = tensor.shape.drop(1).toIntArray()
val newShape = if (lastShape.isNotEmpty()) lastShape else intArrayOf(1)
val newStart = newShape.reduce(Int::times) * i + tensor.bufferStart
@ -160,7 +160,7 @@ public open class DoubleTensorAlgebra :
*
* @return tensor filled with the scalar value `0.0`, with the same shape as `input` tensor.
*/
public fun Tensor<Double>.zeroesLike(): DoubleTensor = tensor.fullLike(0.0)
public fun StructureND<Double>.zeroesLike(): DoubleTensor = tensor.fullLike(0.0)
/**
* Returns a tensor filled with the scalar value `1.0`, with the shape defined by the variable argument [shape].
@ -198,9 +198,8 @@ public open class DoubleTensorAlgebra :
*
* @return a copy of the `input` tensor with a copied buffer.
*/
public fun Tensor<Double>.copy(): DoubleTensor {
return DoubleTensor(tensor.shape, tensor.mutableBuffer.array().copyOf(), tensor.bufferStart)
}
public fun StructureND<Double>.copy(): DoubleTensor =
DoubleTensor(tensor.shape, tensor.mutableBuffer.array().copyOf(), tensor.bufferStart)
override fun Double.plus(other: StructureND<Double>): DoubleTensor {
val resBuffer = DoubleArray(other.tensor.numElements) { i ->
@ -209,7 +208,7 @@ public open class DoubleTensorAlgebra :
return DoubleTensor(other.shape, resBuffer)
}
override fun StructureND<Double>.plus(value: Double): DoubleTensor = value + tensor
override fun StructureND<Double>.plus(arg: Double): DoubleTensor = arg + tensor
override fun StructureND<Double>.plus(other: StructureND<Double>): DoubleTensor {
checkShapesCompatible(tensor, other.tensor)
@ -345,7 +344,7 @@ public open class DoubleTensorAlgebra :
return DoubleTensor(tensor.shape, resBuffer)
}
override fun Tensor<Double>.transpose(i: Int, j: Int): DoubleTensor {
override fun StructureND<Double>.transpose(i: Int, j: Int): DoubleTensor {
val ii = tensor.minusIndex(i)
val jj = tensor.minusIndex(j)
checkTranspose(tensor.dimension, ii, jj)
@ -369,15 +368,15 @@ public open class DoubleTensorAlgebra :
return resTensor
}
override fun Tensor<Double>.view(shape: IntArray): DoubleTensor {
override fun StructureND<Double>.view(shape: IntArray): DoubleTensor {
checkView(tensor, shape)
return DoubleTensor(shape, tensor.mutableBuffer.array(), tensor.bufferStart)
}
override fun Tensor<Double>.viewAs(other: Tensor<Double>): DoubleTensor =
override fun StructureND<Double>.viewAs(other: StructureND<Double>): DoubleTensor =
tensor.view(other.shape)
override infix fun Tensor<Double>.dot(other: Tensor<Double>): DoubleTensor {
override infix fun StructureND<Double>.dot(other: StructureND<Double>): DoubleTensor {
if (tensor.shape.size == 1 && other.shape.size == 1) {
return DoubleTensor(intArrayOf(1), doubleArrayOf(tensor.times(other).tensor.mutableBuffer.array().sum()))
}
@ -569,10 +568,10 @@ public open class DoubleTensorAlgebra :
*/
public fun Tensor<Double>.rowsByIndices(indices: IntArray): DoubleTensor = stack(indices.map { this[it] })
internal inline fun Tensor<Double>.fold(foldFunction: (DoubleArray) -> Double): Double =
internal inline fun StructureND<Double>.fold(foldFunction: (DoubleArray) -> Double): Double =
foldFunction(tensor.copyArray())
internal inline fun Tensor<Double>.foldDim(
internal inline fun StructureND<Double>.foldDim(
foldFunction: (DoubleArray) -> Double,
dim: Int,
keepDim: Boolean,
@ -596,30 +595,30 @@ public open class DoubleTensorAlgebra :
return resTensor
}
override fun Tensor<Double>.sum(): Double = tensor.fold { it.sum() }
override fun StructureND<Double>.sum(): Double = tensor.fold { it.sum() }
override fun Tensor<Double>.sum(dim: Int, keepDim: Boolean): DoubleTensor =
override fun StructureND<Double>.sum(dim: Int, keepDim: Boolean): DoubleTensor =
foldDim({ x -> x.sum() }, dim, keepDim)
override fun Tensor<Double>.min(): Double = this.fold { it.minOrNull()!! }
override fun StructureND<Double>.min(): Double = this.fold { it.minOrNull()!! }
override fun Tensor<Double>.min(dim: Int, keepDim: Boolean): DoubleTensor =
override fun StructureND<Double>.min(dim: Int, keepDim: Boolean): DoubleTensor =
foldDim({ x -> x.minOrNull()!! }, dim, keepDim)
override fun Tensor<Double>.max(): Double = this.fold { it.maxOrNull()!! }
override fun StructureND<Double>.max(): Double = this.fold { it.maxOrNull()!! }
override fun Tensor<Double>.max(dim: Int, keepDim: Boolean): DoubleTensor =
override fun StructureND<Double>.max(dim: Int, keepDim: Boolean): DoubleTensor =
foldDim({ x -> x.maxOrNull()!! }, dim, keepDim)
override fun Tensor<Double>.argMax(dim: Int, keepDim: Boolean): DoubleTensor =
override fun StructureND<Double>.argMax(dim: Int, keepDim: Boolean): DoubleTensor =
foldDim({ x ->
x.withIndex().maxByOrNull { it.value }?.index!!.toDouble()
}, dim, keepDim)
override fun Tensor<Double>.mean(): Double = this.fold { it.sum() / tensor.numElements }
override fun StructureND<Double>.mean(): Double = this.fold { it.sum() / tensor.numElements }
override fun Tensor<Double>.mean(dim: Int, keepDim: Boolean): DoubleTensor =
override fun StructureND<Double>.mean(dim: Int, keepDim: Boolean): DoubleTensor =
foldDim(
{ arr ->
check(dim < dimension) { "Dimension $dim out of range $dimension" }
@ -629,12 +628,12 @@ public open class DoubleTensorAlgebra :
keepDim
)
override fun Tensor<Double>.std(): Double = this.fold { arr ->
override fun StructureND<Double>.std(): Double = this.fold { arr ->
val mean = arr.sum() / tensor.numElements
sqrt(arr.sumOf { (it - mean) * (it - mean) } / (tensor.numElements - 1))
}
override fun Tensor<Double>.std(dim: Int, keepDim: Boolean): DoubleTensor = foldDim(
override fun StructureND<Double>.std(dim: Int, keepDim: Boolean): DoubleTensor = foldDim(
{ arr ->
check(dim < dimension) { "Dimension $dim out of range $dimension" }
val mean = arr.sum() / shape[dim]
@ -644,12 +643,12 @@ public open class DoubleTensorAlgebra :
keepDim
)
override fun Tensor<Double>.variance(): Double = this.fold { arr ->
override fun StructureND<Double>.variance(): Double = this.fold { arr ->
val mean = arr.sum() / tensor.numElements
arr.sumOf { (it - mean) * (it - mean) } / (tensor.numElements - 1)
}
override fun Tensor<Double>.variance(dim: Int, keepDim: Boolean): DoubleTensor = foldDim(
override fun StructureND<Double>.variance(dim: Int, keepDim: Boolean): DoubleTensor = foldDim(
{ arr ->
check(dim < dimension) { "Dimension $dim out of range $dimension" }
val mean = arr.sum() / shape[dim]
@ -672,7 +671,7 @@ public open class DoubleTensorAlgebra :
* @param tensors the [List] of 1-dimensional tensors with same shape
* @return `M`.
*/
public fun cov(tensors: List<Tensor<Double>>): DoubleTensor {
public fun cov(tensors: List<StructureND<Double>>): DoubleTensor {
check(tensors.isNotEmpty()) { "List must have at least 1 element" }
val n = tensors.size
val m = tensors[0].shape[0]
@ -689,43 +688,43 @@ public open class DoubleTensorAlgebra :
return resTensor
}
override fun Tensor<Double>.exp(): DoubleTensor = tensor.map { exp(it) }
override fun StructureND<Double>.exp(): DoubleTensor = tensor.map { exp(it) }
override fun Tensor<Double>.ln(): DoubleTensor = tensor.map { ln(it) }
override fun StructureND<Double>.ln(): DoubleTensor = tensor.map { ln(it) }
override fun Tensor<Double>.sqrt(): DoubleTensor = tensor.map { sqrt(it) }
override fun StructureND<Double>.sqrt(): DoubleTensor = tensor.map { sqrt(it) }
override fun Tensor<Double>.cos(): DoubleTensor = tensor.map { cos(it) }
override fun StructureND<Double>.cos(): DoubleTensor = tensor.map { cos(it) }
override fun Tensor<Double>.acos(): DoubleTensor = tensor.map { acos(it) }
override fun StructureND<Double>.acos(): DoubleTensor = tensor.map { acos(it) }
override fun Tensor<Double>.cosh(): DoubleTensor = tensor.map { cosh(it) }
override fun StructureND<Double>.cosh(): DoubleTensor = tensor.map { cosh(it) }
override fun Tensor<Double>.acosh(): DoubleTensor = tensor.map { acosh(it) }
override fun StructureND<Double>.acosh(): DoubleTensor = tensor.map { acosh(it) }
override fun Tensor<Double>.sin(): DoubleTensor = tensor.map { sin(it) }
override fun StructureND<Double>.sin(): DoubleTensor = tensor.map { sin(it) }
override fun Tensor<Double>.asin(): DoubleTensor = tensor.map { asin(it) }
override fun StructureND<Double>.asin(): DoubleTensor = tensor.map { asin(it) }
override fun Tensor<Double>.sinh(): DoubleTensor = tensor.map { sinh(it) }
override fun StructureND<Double>.sinh(): DoubleTensor = tensor.map { sinh(it) }
override fun Tensor<Double>.asinh(): DoubleTensor = tensor.map { asinh(it) }
override fun StructureND<Double>.asinh(): DoubleTensor = tensor.map { asinh(it) }
override fun Tensor<Double>.tan(): DoubleTensor = tensor.map { tan(it) }
override fun StructureND<Double>.tan(): DoubleTensor = tensor.map { tan(it) }
override fun Tensor<Double>.atan(): DoubleTensor = tensor.map { atan(it) }
override fun StructureND<Double>.atan(): DoubleTensor = tensor.map { atan(it) }
override fun Tensor<Double>.tanh(): DoubleTensor = tensor.map { tanh(it) }
override fun StructureND<Double>.tanh(): DoubleTensor = tensor.map { tanh(it) }
override fun Tensor<Double>.atanh(): DoubleTensor = tensor.map { atanh(it) }
override fun StructureND<Double>.atanh(): DoubleTensor = tensor.map { atanh(it) }
override fun Tensor<Double>.ceil(): DoubleTensor = tensor.map { ceil(it) }
override fun StructureND<Double>.ceil(): DoubleTensor = tensor.map { ceil(it) }
override fun Tensor<Double>.floor(): DoubleTensor = tensor.map { floor(it) }
override fun StructureND<Double>.floor(): DoubleTensor = tensor.map { floor(it) }
override fun Tensor<Double>.inv(): DoubleTensor = invLU(1e-9)
override fun StructureND<Double>.inv(): DoubleTensor = invLU(1e-9)
override fun Tensor<Double>.det(): DoubleTensor = detLU(1e-9)
override fun StructureND<Double>.det(): DoubleTensor = detLU(1e-9)
/**
* Computes the LU factorization of a matrix or batches of matrices `input`.
@ -736,7 +735,7 @@ public open class DoubleTensorAlgebra :
* The `factorization` has the shape ``(*, m, n)``, where``(*, m, n)`` is the shape of the `input` tensor.
* The `pivots` has the shape ``(, min(m, n))``. `pivots` stores all the intermediate transpositions of rows.
*/
public fun Tensor<Double>.luFactor(epsilon: Double): Pair<DoubleTensor, IntTensor> =
public fun StructureND<Double>.luFactor(epsilon: Double): Pair<DoubleTensor, IntTensor> =
computeLU(tensor, epsilon)
?: throw IllegalArgumentException("Tensor contains matrices which are singular at precision $epsilon")
@ -749,7 +748,7 @@ public open class DoubleTensorAlgebra :
* The `factorization` has the shape ``(*, m, n)``, where``(*, m, n)`` is the shape of the `input` tensor.
* The `pivots` has the shape ``(, min(m, n))``. `pivots` stores all the intermediate transpositions of rows.
*/
public fun Tensor<Double>.luFactor(): Pair<DoubleTensor, IntTensor> = luFactor(1e-9)
public fun StructureND<Double>.luFactor(): Pair<DoubleTensor, IntTensor> = luFactor(1e-9)
/**
* Unpacks the data and pivots from a LU factorization of a tensor.
@ -763,7 +762,7 @@ public open class DoubleTensorAlgebra :
* @return triple of `P`, `L` and `U` tensors
*/
public fun luPivot(
luTensor: Tensor<Double>,
luTensor: StructureND<Double>,
pivotsTensor: Tensor<Int>,
): Triple<DoubleTensor, DoubleTensor, DoubleTensor> {
checkSquareMatrix(luTensor.shape)
@ -806,7 +805,7 @@ public open class DoubleTensorAlgebra :
* Used when checking the positive definiteness of the input matrix or matrices.
* @return a pair of `Q` and `R` tensors.
*/
public fun Tensor<Double>.cholesky(epsilon: Double): DoubleTensor {
public fun StructureND<Double>.cholesky(epsilon: Double): DoubleTensor {
checkSquareMatrix(shape)
checkPositiveDefinite(tensor, epsilon)
@ -819,9 +818,9 @@ public open class DoubleTensorAlgebra :
return lTensor
}
override fun Tensor<Double>.cholesky(): DoubleTensor = cholesky(1e-6)
override fun StructureND<Double>.cholesky(): DoubleTensor = cholesky(1e-6)
override fun Tensor<Double>.qr(): Pair<DoubleTensor, DoubleTensor> {
override fun StructureND<Double>.qr(): Pair<DoubleTensor, DoubleTensor> {
checkSquareMatrix(shape)
val qTensor = zeroesLike()
val rTensor = zeroesLike()
@ -837,7 +836,7 @@ public open class DoubleTensorAlgebra :
return qTensor to rTensor
}
override fun Tensor<Double>.svd(): Triple<DoubleTensor, DoubleTensor, DoubleTensor> =
override fun StructureND<Double>.svd(): Triple<DoubleTensor, DoubleTensor, DoubleTensor> =
svd(epsilon = 1e-10)
/**
@ -853,7 +852,7 @@ public open class DoubleTensorAlgebra :
* i.e., the precision with which the cosine approaches 1 in an iterative algorithm.
* @return a triple `Triple(U, S, V)`.
*/
public fun Tensor<Double>.svd(epsilon: Double): Triple<DoubleTensor, DoubleTensor, DoubleTensor> {
public fun StructureND<Double>.svd(epsilon: Double): Triple<DoubleTensor, DoubleTensor, DoubleTensor> {
val size = tensor.dimension
val commonShape = tensor.shape.sliceArray(0 until size - 2)
val (n, m) = tensor.shape.sliceArray(size - 2 until size)
@ -886,7 +885,7 @@ public open class DoubleTensorAlgebra :
return Triple(uTensor.transpose(), sTensor, vTensor.transpose())
}
override fun Tensor<Double>.symEig(): Pair<DoubleTensor, DoubleTensor> = symEig(epsilon = 1e-15)
override fun StructureND<Double>.symEig(): Pair<DoubleTensor, DoubleTensor> = symEig(epsilon = 1e-15)
/**
* Returns eigenvalues and eigenvectors of a real symmetric matrix input or a batch of real symmetric matrices,
@ -896,7 +895,7 @@ public open class DoubleTensorAlgebra :
* and when the cosine approaches 1 in the SVD algorithm.
* @return a pair `eigenvalues to eigenvectors`.
*/
public fun Tensor<Double>.symEig(epsilon: Double): Pair<DoubleTensor, DoubleTensor> {
public fun StructureND<Double>.symEig(epsilon: Double): Pair<DoubleTensor, DoubleTensor> {
checkSymmetric(tensor, epsilon)
fun MutableStructure2D<Double>.cleanSym(n: Int) {
@ -931,7 +930,7 @@ public open class DoubleTensorAlgebra :
* with zero.
* @return the determinant.
*/
public fun Tensor<Double>.detLU(epsilon: Double = 1e-9): DoubleTensor {
public fun StructureND<Double>.detLU(epsilon: Double = 1e-9): DoubleTensor {
checkSquareMatrix(tensor.shape)
val luTensor = tensor.copy()
val pivotsTensor = tensor.setUpPivots()
@ -964,7 +963,7 @@ public open class DoubleTensorAlgebra :
* @param epsilon error in the LU algorithm&mdash;permissible error when comparing the determinant of a matrix with zero
* @return the multiplicative inverse of a matrix.
*/
public fun Tensor<Double>.invLU(epsilon: Double = 1e-9): DoubleTensor {
public fun StructureND<Double>.invLU(epsilon: Double = 1e-9): DoubleTensor {
val (luTensor, pivotsTensor) = luFactor(epsilon)
val invTensor = luTensor.zeroesLike()
@ -989,12 +988,12 @@ public open class DoubleTensorAlgebra :
* @param epsilon permissible error when comparing the determinant of a matrix with zero.
* @return triple of `P`, `L` and `U` tensors.
*/
public fun Tensor<Double>.lu(epsilon: Double = 1e-9): Triple<DoubleTensor, DoubleTensor, DoubleTensor> {
public fun StructureND<Double>.lu(epsilon: Double = 1e-9): Triple<DoubleTensor, DoubleTensor, DoubleTensor> {
val (lu, pivots) = tensor.luFactor(epsilon)
return luPivot(lu, pivots)
}
override fun Tensor<Double>.lu(): Triple<DoubleTensor, DoubleTensor, DoubleTensor> = lu(1e-9)
override fun StructureND<Double>.lu(): Triple<DoubleTensor, DoubleTensor, DoubleTensor> = lu(1e-9)
}
public val Double.Companion.tensorAlgebra: DoubleTensorAlgebra.Companion get() = DoubleTensorAlgebra