Safe shapes
This commit is contained in:
parent
c653052d8c
commit
b0abcf2d0c
@ -7,6 +7,7 @@
|
||||
- Algebra now has an obligatory `bufferFactory` (#477).
|
||||
|
||||
### Changed
|
||||
- Shape is read-only
|
||||
- Major refactor of tensors (only minor API changes)
|
||||
- Kotlin 1.7.20
|
||||
- `LazyStructure` `deffered` -> `async` to comply with coroutines code style
|
||||
|
@ -13,10 +13,8 @@ import org.jetbrains.kotlinx.multik.api.Multik
|
||||
import org.jetbrains.kotlinx.multik.api.ones
|
||||
import org.jetbrains.kotlinx.multik.ndarray.data.DN
|
||||
import org.jetbrains.kotlinx.multik.ndarray.data.DataType
|
||||
import space.kscience.kmath.nd.BufferedFieldOpsND
|
||||
import space.kscience.kmath.nd.StructureND
|
||||
import space.kscience.kmath.nd.ndAlgebra
|
||||
import space.kscience.kmath.nd.one
|
||||
import space.kscience.kmath.misc.UnsafeKMathAPI
|
||||
import space.kscience.kmath.nd.*
|
||||
import space.kscience.kmath.nd4j.nd4j
|
||||
import space.kscience.kmath.operations.DoubleField
|
||||
import space.kscience.kmath.tensors.core.DoubleTensor
|
||||
@ -69,9 +67,10 @@ internal class NDFieldBenchmark {
|
||||
blackhole.consume(res)
|
||||
}
|
||||
|
||||
@OptIn(UnsafeKMathAPI::class)
|
||||
@Benchmark
|
||||
fun multikInPlaceAdd(blackhole: Blackhole) = with(multikAlgebra) {
|
||||
val res = Multik.ones<Double, DN>(shape, DataType.DoubleDataType).wrap()
|
||||
val res = Multik.ones<Double, DN>(shape.asArray(), DataType.DoubleDataType).wrap()
|
||||
repeat(n) { res += 1.0 }
|
||||
blackhole.consume(res)
|
||||
}
|
||||
@ -86,7 +85,7 @@ internal class NDFieldBenchmark {
|
||||
private companion object {
|
||||
private const val dim = 1000
|
||||
private const val n = 100
|
||||
private val shape = intArrayOf(dim, dim)
|
||||
private val shape = Shape(dim, dim)
|
||||
private val specializedField = DoubleField.ndAlgebra
|
||||
private val genericField = BufferedFieldOpsND(DoubleField)
|
||||
private val nd4jField = DoubleField.nd4j
|
||||
|
@ -1,4 +1,3 @@
|
||||
import space.kscience.gradle.isInDevelopment
|
||||
import space.kscience.gradle.useApache2Licence
|
||||
import space.kscience.gradle.useSPCTeam
|
||||
|
||||
@ -15,7 +14,7 @@ allprojects {
|
||||
}
|
||||
|
||||
group = "space.kscience"
|
||||
version = "0.3.1-dev-4"
|
||||
version = "0.3.1-dev-5"
|
||||
}
|
||||
|
||||
subprojects {
|
||||
@ -78,11 +77,12 @@ ksciencePublish {
|
||||
}
|
||||
github("kmath", "SciProgCentre")
|
||||
space(
|
||||
if (isInDevelopment) {
|
||||
"https://maven.pkg.jetbrains.space/mipt-npm/p/sci/dev"
|
||||
} else {
|
||||
"https://maven.pkg.jetbrains.space/mipt-npm/p/sci/release"
|
||||
}
|
||||
"https://maven.pkg.jetbrains.space/spc/p/sci/maven"
|
||||
// if (isInDevelopment) {
|
||||
// "https://maven.pkg.jetbrains.space/spc/p/sci/dev"
|
||||
// } else {
|
||||
// "https://maven.pkg.jetbrains.space/spc/p/sci/release"
|
||||
// }
|
||||
)
|
||||
sonatype()
|
||||
}
|
||||
|
@ -17,7 +17,7 @@ import java.util.stream.IntStream
|
||||
* A demonstration implementation of NDField over Real using Java [java.util.stream.DoubleStream] for parallel
|
||||
* execution.
|
||||
*/
|
||||
class StreamDoubleFieldND(override val shape: IntArray) : FieldND<Double, DoubleField>,
|
||||
class StreamDoubleFieldND(override val shape: Shape) : FieldND<Double, DoubleField>,
|
||||
NumbersAddOps<StructureND<Double>>,
|
||||
ExtendedField<StructureND<Double>> {
|
||||
|
||||
@ -31,6 +31,7 @@ class StreamDoubleFieldND(override val shape: IntArray) : FieldND<Double, Double
|
||||
return structureND(shape) { d }
|
||||
}
|
||||
|
||||
@OptIn(PerformancePitfall::class)
|
||||
private val StructureND<Double>.buffer: DoubleBuffer
|
||||
get() = when {
|
||||
!shape.contentEquals(this@StreamDoubleFieldND.shape) -> throw ShapeMismatchException(
|
||||
@ -110,4 +111,4 @@ class StreamDoubleFieldND(override val shape: IntArray) : FieldND<Double, Double
|
||||
override fun atanh(arg: StructureND<Double>): BufferND<Double> = arg.map { atanh(it) }
|
||||
}
|
||||
|
||||
fun DoubleField.ndStreaming(vararg shape: Int): StreamDoubleFieldND = StreamDoubleFieldND(shape)
|
||||
fun DoubleField.ndStreaming(vararg shape: Int): StreamDoubleFieldND = StreamDoubleFieldND(Shape(shape))
|
||||
|
@ -5,16 +5,19 @@
|
||||
|
||||
package space.kscience.kmath.structures
|
||||
|
||||
import space.kscience.kmath.misc.PerformancePitfall
|
||||
import space.kscience.kmath.nd.BufferND
|
||||
import space.kscience.kmath.nd.ColumnStrides
|
||||
import space.kscience.kmath.nd.Shape
|
||||
import kotlin.system.measureTimeMillis
|
||||
|
||||
@Suppress("ASSIGNED_BUT_NEVER_ACCESSED_VARIABLE")
|
||||
@OptIn(PerformancePitfall::class)
|
||||
fun main() {
|
||||
val n = 6000
|
||||
val array = DoubleArray(n * n) { 1.0 }
|
||||
val buffer = DoubleBuffer(array)
|
||||
val strides = ColumnStrides(intArrayOf(n, n))
|
||||
val strides = ColumnStrides(Shape(n, n))
|
||||
val structure = BufferND(strides, buffer)
|
||||
|
||||
measureTimeMillis {
|
||||
|
@ -5,16 +5,20 @@
|
||||
|
||||
package space.kscience.kmath.structures
|
||||
|
||||
import space.kscience.kmath.nd.BufferND
|
||||
import space.kscience.kmath.nd.Shape
|
||||
import space.kscience.kmath.nd.StructureND
|
||||
import space.kscience.kmath.nd.mapToBuffer
|
||||
import space.kscience.kmath.operations.map
|
||||
import kotlin.system.measureTimeMillis
|
||||
|
||||
private inline fun <T, reified R: Any> BufferND<T>.map(block: (T) -> R): BufferND<R> = BufferND(indices, buffer.map(block))
|
||||
|
||||
@Suppress("UNUSED_VARIABLE")
|
||||
fun main() {
|
||||
val n = 6000
|
||||
val structure = StructureND.buffered(intArrayOf(n, n), Buffer.Companion::auto) { 1.0 }
|
||||
structure.mapToBuffer { it + 1 } // warm-up
|
||||
val time1 = measureTimeMillis { val res = structure.mapToBuffer { it + 1 } }
|
||||
val structure = StructureND.buffered(Shape(n, n), Buffer.Companion::auto) { 1.0 }
|
||||
structure.map { it + 1 } // warm-up
|
||||
val time1 = measureTimeMillis { val res = structure.map { it + 1 } }
|
||||
println("Structure mapping finished in $time1 millis")
|
||||
val array = DoubleArray(n * n) { 1.0 }
|
||||
|
||||
|
@ -6,6 +6,8 @@
|
||||
package space.kscience.kmath.tensors
|
||||
|
||||
import space.kscience.kmath.misc.PerformancePitfall
|
||||
import space.kscience.kmath.nd.Shape
|
||||
import space.kscience.kmath.nd.contentEquals
|
||||
import space.kscience.kmath.operations.invoke
|
||||
import space.kscience.kmath.tensors.core.DoubleTensor
|
||||
import space.kscience.kmath.tensors.core.DoubleTensorAlgebra
|
||||
@ -23,10 +25,10 @@ fun main() {
|
||||
DoubleTensorAlgebra {
|
||||
// take coefficient vector from normal distribution
|
||||
val alpha = randomNormal(
|
||||
intArrayOf(5),
|
||||
Shape(5),
|
||||
randSeed
|
||||
) + fromArray(
|
||||
intArrayOf(5),
|
||||
Shape(5),
|
||||
doubleArrayOf(1.0, 2.5, 3.4, 5.0, 10.1)
|
||||
)
|
||||
|
||||
@ -34,7 +36,7 @@ fun main() {
|
||||
|
||||
// also take sample of size 20 from normal distribution for x
|
||||
val x = randomNormal(
|
||||
intArrayOf(20, 5),
|
||||
Shape(20, 5),
|
||||
randSeed
|
||||
)
|
||||
|
||||
@ -50,11 +52,13 @@ fun main() {
|
||||
|
||||
|
||||
// inverse Sigma matrix can be restored from singular values with diagonalEmbedding function
|
||||
val sigma = diagonalEmbedding(singValues.map{ if (abs(it) < 1e-3) 0.0 else 1.0/it })
|
||||
val sigma = diagonalEmbedding(singValues.map { if (abs(it) < 1e-3) 0.0 else 1.0 / it })
|
||||
|
||||
val alphaOLS = v dot sigma dot u.transposed() dot y
|
||||
println("Estimated alpha:\n" +
|
||||
"$alphaOLS")
|
||||
println(
|
||||
"Estimated alpha:\n" +
|
||||
"$alphaOLS"
|
||||
)
|
||||
|
||||
// figure out MSE of approximation
|
||||
fun mse(yTrue: DoubleTensor, yPred: DoubleTensor): Double {
|
||||
|
@ -5,6 +5,7 @@
|
||||
|
||||
package space.kscience.kmath.tensors
|
||||
|
||||
import space.kscience.kmath.nd.Shape
|
||||
import space.kscience.kmath.tensors.core.tensorAlgebra
|
||||
import space.kscience.kmath.tensors.core.withBroadcast
|
||||
|
||||
@ -16,7 +17,7 @@ fun main(): Unit = Double.tensorAlgebra.withBroadcast { // work in context with
|
||||
|
||||
// assume x is range from 0 until 10
|
||||
val x = fromArray(
|
||||
intArrayOf(10),
|
||||
Shape(10),
|
||||
DoubleArray(10) { it.toDouble() }
|
||||
)
|
||||
|
||||
@ -41,13 +42,13 @@ fun main(): Unit = Double.tensorAlgebra.withBroadcast { // work in context with
|
||||
|
||||
// save means ans standard deviations for further recovery
|
||||
val mean = fromArray(
|
||||
intArrayOf(2),
|
||||
Shape(2),
|
||||
doubleArrayOf(xMean, yMean)
|
||||
)
|
||||
println("Means:\n$mean")
|
||||
|
||||
val std = fromArray(
|
||||
intArrayOf(2),
|
||||
Shape(2),
|
||||
doubleArrayOf(xStd, yStd)
|
||||
)
|
||||
println("Standard deviations:\n$std")
|
||||
@ -68,7 +69,7 @@ fun main(): Unit = Double.tensorAlgebra.withBroadcast { // work in context with
|
||||
// we can restore original data from reduced data;
|
||||
// for example, find 7th element of dataset.
|
||||
val n = 7
|
||||
val restored = (datasetReduced.getTensor(n) dot v.view(intArrayOf(1, 2))) * std + mean
|
||||
val restored = (datasetReduced.getTensor(n) dot v.view(Shape(1, 2))) * std + mean
|
||||
println("Original value:\n${dataset.getTensor(n)}")
|
||||
println("Restored value:\n$restored")
|
||||
}
|
||||
|
@ -5,6 +5,7 @@
|
||||
|
||||
package space.kscience.kmath.tensors
|
||||
|
||||
import space.kscience.kmath.nd.Shape
|
||||
import space.kscience.kmath.tensors.core.tensorAlgebra
|
||||
import space.kscience.kmath.tensors.core.withBroadcast
|
||||
|
||||
@ -13,10 +14,10 @@ import space.kscience.kmath.tensors.core.withBroadcast
|
||||
|
||||
fun main() = Double.tensorAlgebra.withBroadcast { // work in context with broadcast methods
|
||||
// take dataset of 5-element vectors from normal distribution
|
||||
val dataset = randomNormal(intArrayOf(100, 5)) * 1.5 // all elements from N(0, 1.5)
|
||||
val dataset = randomNormal(Shape(100, 5)) * 1.5 // all elements from N(0, 1.5)
|
||||
|
||||
dataset += fromArray(
|
||||
intArrayOf(5),
|
||||
Shape(5),
|
||||
doubleArrayOf(0.0, 1.0, 1.5, 3.0, 5.0) // row means
|
||||
)
|
||||
|
||||
|
@ -5,6 +5,7 @@
|
||||
|
||||
package space.kscience.kmath.tensors
|
||||
|
||||
import space.kscience.kmath.nd.Shape
|
||||
import space.kscience.kmath.tensors.core.DoubleTensor
|
||||
import space.kscience.kmath.tensors.core.tensorAlgebra
|
||||
import space.kscience.kmath.tensors.core.withBroadcast
|
||||
@ -15,13 +16,13 @@ fun main() = Double.tensorAlgebra.withBroadcast {// work in context with linear
|
||||
|
||||
// set true value of x
|
||||
val trueX = fromArray(
|
||||
intArrayOf(4),
|
||||
Shape(4),
|
||||
doubleArrayOf(-2.0, 1.5, 6.8, -2.4)
|
||||
)
|
||||
|
||||
// and A matrix
|
||||
val a = fromArray(
|
||||
intArrayOf(4, 4),
|
||||
Shape(4, 4),
|
||||
doubleArrayOf(
|
||||
0.5, 10.5, 4.5, 1.0,
|
||||
8.5, 0.9, 12.8, 0.1,
|
||||
@ -64,7 +65,7 @@ fun main() = Double.tensorAlgebra.withBroadcast {// work in context with linear
|
||||
// this function returns solution x of a system lx = b, l should be lower triangular
|
||||
fun solveLT(l: DoubleTensor, b: DoubleTensor): DoubleTensor {
|
||||
val n = l.shape[0]
|
||||
val x = zeros(intArrayOf(n))
|
||||
val x = zeros(Shape(n))
|
||||
for (i in 0 until n) {
|
||||
x[intArrayOf(i)] = (b[intArrayOf(i)] - l.getTensor(i).dot(x).value()) / l[intArrayOf(i, i)]
|
||||
}
|
||||
|
@ -5,6 +5,8 @@
|
||||
|
||||
package space.kscience.kmath.tensors
|
||||
|
||||
import space.kscience.kmath.nd.Shape
|
||||
import space.kscience.kmath.nd.contentEquals
|
||||
import space.kscience.kmath.operations.asIterable
|
||||
import space.kscience.kmath.operations.invoke
|
||||
import space.kscience.kmath.tensors.core.BroadcastDoubleTensorAlgebra
|
||||
@ -68,12 +70,12 @@ class Dense(
|
||||
|
||||
private val weights: DoubleTensor = DoubleTensorAlgebra {
|
||||
randomNormal(
|
||||
intArrayOf(inputUnits, outputUnits),
|
||||
Shape(inputUnits, outputUnits),
|
||||
seed
|
||||
) * sqrt(2.0 / (inputUnits + outputUnits))
|
||||
}
|
||||
|
||||
private val bias: DoubleTensor = DoubleTensorAlgebra { zeros(intArrayOf(outputUnits)) }
|
||||
private val bias: DoubleTensor = DoubleTensorAlgebra { zeros(Shape(outputUnits)) }
|
||||
|
||||
override fun forward(input: DoubleTensor): DoubleTensor = BroadcastDoubleTensorAlgebra {
|
||||
(input dot weights) + bias
|
||||
@ -182,17 +184,17 @@ fun main() = BroadcastDoubleTensorAlgebra {
|
||||
//val testSize = sampleSize - trainSize
|
||||
|
||||
// take sample of features from normal distribution
|
||||
val x = randomNormal(intArrayOf(sampleSize, features), seed) * 2.5
|
||||
val x = randomNormal(Shape(sampleSize, features), seed) * 2.5
|
||||
|
||||
x += fromArray(
|
||||
intArrayOf(5),
|
||||
Shape(5),
|
||||
doubleArrayOf(0.0, -1.0, -2.5, -3.0, 5.5) // row means
|
||||
)
|
||||
|
||||
|
||||
// define class like '1' if the sum of features > 0 and '0' otherwise
|
||||
val y = fromArray(
|
||||
intArrayOf(sampleSize, 1),
|
||||
Shape(sampleSize, 1),
|
||||
DoubleArray(sampleSize) { i ->
|
||||
if (x.getTensor(i).sum() > 0.0) {
|
||||
1.0
|
||||
|
@ -3,13 +3,11 @@
|
||||
# Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
|
||||
#
|
||||
kotlin.code.style=official
|
||||
kotlin.jupyter.add.scanner=false
|
||||
kotlin.mpp.stability.nowarn=true
|
||||
kotlin.native.ignoreDisabledTargets=true
|
||||
kotlin.incremental.js.ir=true
|
||||
|
||||
org.gradle.configureondemand=true
|
||||
org.gradle.parallel=true
|
||||
org.gradle.jvmargs=-Xmx4096m
|
||||
|
||||
toolsVersion=0.13.0-kotlin-1.7.20-Beta
|
||||
toolsVersion=0.13.1-kotlin-1.7.20
|
||||
|
@ -5,6 +5,7 @@
|
||||
|
||||
package space.kscience.kmath.complex
|
||||
|
||||
import space.kscience.kmath.misc.PerformancePitfall
|
||||
import space.kscience.kmath.misc.UnstableKMathAPI
|
||||
import space.kscience.kmath.nd.*
|
||||
import space.kscience.kmath.operations.*
|
||||
@ -20,6 +21,7 @@ import kotlin.contracts.contract
|
||||
public sealed class ComplexFieldOpsND : BufferedFieldOpsND<Complex, ComplexField>(ComplexField.bufferAlgebra),
|
||||
ScaleOperations<StructureND<Complex>>, ExtendedFieldOps<StructureND<Complex>>, PowerOperations<StructureND<Complex>> {
|
||||
|
||||
@OptIn(PerformancePitfall::class)
|
||||
override fun StructureND<Complex>.toBufferND(): BufferND<Complex> = when (this) {
|
||||
is BufferND -> this
|
||||
else -> {
|
||||
@ -69,12 +71,12 @@ public class ComplexFieldND(override val shape: Shape) :
|
||||
|
||||
public val ComplexField.ndAlgebra: ComplexFieldOpsND get() = ComplexFieldOpsND
|
||||
|
||||
public fun ComplexField.ndAlgebra(vararg shape: Int): ComplexFieldND = ComplexFieldND(shape)
|
||||
public fun ComplexField.ndAlgebra(vararg shape: Int): ComplexFieldND = ComplexFieldND(Shape(shape))
|
||||
|
||||
/**
|
||||
* Produce a context for n-dimensional operations inside this real field
|
||||
*/
|
||||
public inline fun <R> ComplexField.withNdAlgebra(vararg shape: Int, action: ComplexFieldND.() -> R): R {
|
||||
contract { callsInPlace(action, InvocationKind.EXACTLY_ONCE) }
|
||||
return ComplexFieldND(shape).action()
|
||||
return ComplexFieldND(Shape(shape)).action()
|
||||
}
|
||||
|
@ -6,9 +6,7 @@
|
||||
package space.kscience.kmath.linear
|
||||
|
||||
import space.kscience.kmath.misc.PerformancePitfall
|
||||
import space.kscience.kmath.nd.BufferedRingOpsND
|
||||
import space.kscience.kmath.nd.as2D
|
||||
import space.kscience.kmath.nd.asND
|
||||
import space.kscience.kmath.nd.*
|
||||
import space.kscience.kmath.operations.*
|
||||
import space.kscience.kmath.structures.Buffer
|
||||
import space.kscience.kmath.structures.VirtualBuffer
|
||||
@ -23,7 +21,7 @@ public class BufferedLinearSpace<T, out A : Ring<T>>(
|
||||
private val ndAlgebra = BufferedRingOpsND(bufferAlgebra)
|
||||
|
||||
override fun buildMatrix(rows: Int, columns: Int, initializer: A.(i: Int, j: Int) -> T): Matrix<T> =
|
||||
ndAlgebra.structureND(intArrayOf(rows, columns)) { (i, j) -> elementAlgebra.initializer(i, j) }.as2D()
|
||||
ndAlgebra.structureND(Shape(rows, columns)) { (i, j) -> elementAlgebra.initializer(i, j) }.as2D()
|
||||
|
||||
override fun buildVector(size: Int, initializer: A.(Int) -> T): Point<T> =
|
||||
bufferAlgebra.buffer(size) { elementAlgebra.initializer(it) }
|
||||
|
@ -6,9 +6,7 @@
|
||||
package space.kscience.kmath.linear
|
||||
|
||||
import space.kscience.kmath.misc.PerformancePitfall
|
||||
import space.kscience.kmath.nd.DoubleFieldOpsND
|
||||
import space.kscience.kmath.nd.as2D
|
||||
import space.kscience.kmath.nd.asND
|
||||
import space.kscience.kmath.nd.*
|
||||
import space.kscience.kmath.operations.DoubleBufferOps
|
||||
import space.kscience.kmath.operations.DoubleField
|
||||
import space.kscience.kmath.operations.invoke
|
||||
@ -23,7 +21,7 @@ public object DoubleLinearSpace : LinearSpace<Double, DoubleField> {
|
||||
rows: Int,
|
||||
columns: Int,
|
||||
initializer: DoubleField.(i: Int, j: Int) -> Double
|
||||
): Matrix<Double> = DoubleFieldOpsND.structureND(intArrayOf(rows, columns)) { (i, j) ->
|
||||
): Matrix<Double> = DoubleFieldOpsND.structureND(Shape(rows, columns)) { (i, j) ->
|
||||
DoubleField.initializer(i, j)
|
||||
}.as2D()
|
||||
|
||||
|
@ -5,6 +5,8 @@
|
||||
|
||||
package space.kscience.kmath.linear
|
||||
|
||||
import space.kscience.kmath.nd.Shape
|
||||
|
||||
/**
|
||||
* The matrix where each element is evaluated each time when is being accessed.
|
||||
*
|
||||
@ -16,7 +18,7 @@ public class VirtualMatrix<out T : Any>(
|
||||
public val generator: (i: Int, j: Int) -> T,
|
||||
) : Matrix<T> {
|
||||
|
||||
override val shape: IntArray get() = intArrayOf(rowNum, colNum)
|
||||
override val shape: Shape get() = Shape(rowNum, colNum)
|
||||
|
||||
override operator fun get(i: Int, j: Int): T = generator(i, j)
|
||||
}
|
||||
|
@ -29,3 +29,16 @@ public annotation class UnstableKMathAPI
|
||||
public annotation class PerformancePitfall(
|
||||
val message: String = "Potential performance problem",
|
||||
)
|
||||
|
||||
/**
|
||||
* Marks API that is public, but should not be used without clear understanding what it does.
|
||||
*/
|
||||
@MustBeDocumented
|
||||
@Retention(value = AnnotationRetention.BINARY)
|
||||
@RequiresOptIn(
|
||||
"This API is unsafe and should be used carefully",
|
||||
RequiresOptIn.Level.ERROR,
|
||||
)
|
||||
public annotation class UnsafeKMathAPI(
|
||||
val message: String = "Unsafe API",
|
||||
)
|
||||
|
@ -12,7 +12,7 @@ import space.kscience.kmath.misc.UnstableKMathAPI
|
||||
import space.kscience.kmath.operations.*
|
||||
|
||||
public interface BufferAlgebraND<T, out A : Algebra<T>> : AlgebraND<T, A> {
|
||||
public val indexerBuilder: (IntArray) -> ShapeIndexer
|
||||
public val indexerBuilder: (Shape) -> ShapeIndexer
|
||||
public val bufferAlgebra: BufferAlgebra<T, A>
|
||||
override val elementAlgebra: A get() = bufferAlgebra.elementAlgebra
|
||||
|
||||
@ -26,6 +26,7 @@ public interface BufferAlgebraND<T, out A : Algebra<T>> : AlgebraND<T, A> {
|
||||
)
|
||||
}
|
||||
|
||||
@OptIn(PerformancePitfall::class)
|
||||
public fun StructureND<T>.toBufferND(): BufferND<T> = when (this) {
|
||||
is BufferND -> this
|
||||
else -> {
|
||||
@ -46,7 +47,7 @@ public interface BufferAlgebraND<T, out A : Algebra<T>> : AlgebraND<T, A> {
|
||||
zipInline(left.toBufferND(), right.toBufferND(), transform)
|
||||
|
||||
public companion object {
|
||||
public val defaultIndexerBuilder: (IntArray) -> ShapeIndexer = ::Strides
|
||||
public val defaultIndexerBuilder: (Shape) -> ShapeIndexer = ::Strides
|
||||
}
|
||||
}
|
||||
|
||||
@ -98,24 +99,24 @@ internal inline fun <T, A : Algebra<T>> BufferAlgebraND<T, A>.zipInline(
|
||||
@OptIn(PerformancePitfall::class)
|
||||
public open class BufferedGroupNDOps<T, out A : Group<T>>(
|
||||
override val bufferAlgebra: BufferAlgebra<T, A>,
|
||||
override val indexerBuilder: (IntArray) -> ShapeIndexer = BufferAlgebraND.defaultIndexerBuilder,
|
||||
override val indexerBuilder: (Shape) -> ShapeIndexer = BufferAlgebraND.defaultIndexerBuilder,
|
||||
) : GroupOpsND<T, A>, BufferAlgebraND<T, A> {
|
||||
override fun StructureND<T>.unaryMinus(): StructureND<T> = map { -it }
|
||||
}
|
||||
|
||||
public open class BufferedRingOpsND<T, out A : Ring<T>>(
|
||||
bufferAlgebra: BufferAlgebra<T, A>,
|
||||
indexerBuilder: (IntArray) -> ShapeIndexer = BufferAlgebraND.defaultIndexerBuilder,
|
||||
indexerBuilder: (Shape) -> ShapeIndexer = BufferAlgebraND.defaultIndexerBuilder,
|
||||
) : BufferedGroupNDOps<T, A>(bufferAlgebra, indexerBuilder), RingOpsND<T, A>
|
||||
|
||||
public open class BufferedFieldOpsND<T, out A : Field<T>>(
|
||||
bufferAlgebra: BufferAlgebra<T, A>,
|
||||
indexerBuilder: (IntArray) -> ShapeIndexer = BufferAlgebraND.defaultIndexerBuilder,
|
||||
indexerBuilder: (Shape) -> ShapeIndexer = BufferAlgebraND.defaultIndexerBuilder,
|
||||
) : BufferedRingOpsND<T, A>(bufferAlgebra, indexerBuilder), FieldOpsND<T, A> {
|
||||
|
||||
public constructor(
|
||||
elementAlgebra: A,
|
||||
indexerBuilder: (IntArray) -> ShapeIndexer = BufferAlgebraND.defaultIndexerBuilder,
|
||||
indexerBuilder: (Shape) -> ShapeIndexer = BufferAlgebraND.defaultIndexerBuilder,
|
||||
) : this(BufferFieldOps(elementAlgebra), indexerBuilder)
|
||||
|
||||
@OptIn(PerformancePitfall::class)
|
||||
@ -130,7 +131,7 @@ public val <T, A : Field<T>> BufferAlgebra<T, A>.nd: BufferedFieldOpsND<T, A> ge
|
||||
public fun <T, A : Algebra<T>> BufferAlgebraND<T, A>.structureND(
|
||||
vararg shape: Int,
|
||||
initializer: A.(IntArray) -> T,
|
||||
): BufferND<T> = structureND(shape, initializer)
|
||||
): BufferND<T> = structureND(Shape(shape), initializer)
|
||||
|
||||
public fun <T, EA : Algebra<T>, A> A.structureND(
|
||||
initializer: EA.(IntArray) -> T,
|
||||
|
@ -5,10 +5,9 @@
|
||||
|
||||
package space.kscience.kmath.nd
|
||||
|
||||
import space.kscience.kmath.misc.PerformancePitfall
|
||||
import space.kscience.kmath.structures.Buffer
|
||||
import space.kscience.kmath.structures.BufferFactory
|
||||
import space.kscience.kmath.structures.MutableBuffer
|
||||
import space.kscience.kmath.structures.MutableBufferFactory
|
||||
|
||||
/**
|
||||
* Represents [StructureND] over [Buffer].
|
||||
@ -22,32 +21,33 @@ public open class BufferND<out T>(
|
||||
public open val buffer: Buffer<T>,
|
||||
) : StructureND<T> {
|
||||
|
||||
@PerformancePitfall
|
||||
override operator fun get(index: IntArray): T = buffer[indices.offset(index)]
|
||||
|
||||
override val shape: IntArray get() = indices.shape
|
||||
override val shape: Shape get() = indices.shape
|
||||
|
||||
override fun toString(): String = StructureND.toString(this)
|
||||
}
|
||||
|
||||
/**
|
||||
* Transform structure to a new structure using provided [BufferFactory] and optimizing if argument is [BufferND]
|
||||
*/
|
||||
public inline fun <T, R : Any> StructureND<T>.mapToBuffer(
|
||||
factory: BufferFactory<R>,
|
||||
crossinline transform: (T) -> R,
|
||||
): BufferND<R> = if (this is BufferND<T>)
|
||||
BufferND(this.indices, factory.invoke(indices.linearSize) { transform(buffer[it]) })
|
||||
else {
|
||||
val strides = ColumnStrides(shape)
|
||||
BufferND(strides, factory.invoke(strides.linearSize) { transform(get(strides.index(it))) })
|
||||
}
|
||||
|
||||
/**
|
||||
* Transform structure to a new structure using inferred [BufferFactory]
|
||||
*/
|
||||
public inline fun <T, reified R : Any> StructureND<T>.mapToBuffer(
|
||||
crossinline transform: (T) -> R,
|
||||
): BufferND<R> = mapToBuffer(Buffer.Companion::auto, transform)
|
||||
///**
|
||||
// * Transform structure to a new structure using provided [BufferFactory] and optimizing if argument is [BufferND]
|
||||
// */
|
||||
//public inline fun <T, R : Any> StructureND<T>.mapToBuffer(
|
||||
// factory: BufferFactory<R>,
|
||||
// crossinline transform: (T) -> R,
|
||||
//): BufferND<R> = if (this is BufferND<T>)
|
||||
// BufferND(this.indices, factory.invoke(indices.linearSize) { transform(buffer[it]) })
|
||||
//else {
|
||||
// val strides = ColumnStrides(shape)
|
||||
// BufferND(strides, factory.invoke(strides.linearSize) { transform(get(strides.index(it))) })
|
||||
//}
|
||||
//
|
||||
///**
|
||||
// * Transform structure to a new structure using inferred [BufferFactory]
|
||||
// */
|
||||
//public inline fun <T, reified R : Any> StructureND<T>.mapToBuffer(
|
||||
// crossinline transform: (T) -> R,
|
||||
//): BufferND<R> = mapToBuffer(Buffer.Companion::auto, transform)
|
||||
|
||||
/**
|
||||
* Represents [MutableStructureND] over [MutableBuffer].
|
||||
@ -60,22 +60,24 @@ public open class MutableBufferND<T>(
|
||||
strides: ShapeIndexer,
|
||||
override val buffer: MutableBuffer<T>,
|
||||
) : MutableStructureND<T>, BufferND<T>(strides, buffer) {
|
||||
|
||||
@PerformancePitfall
|
||||
override fun set(index: IntArray, value: T) {
|
||||
buffer[indices.offset(index)] = value
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Transform structure to a new structure using provided [MutableBufferFactory] and optimizing if argument is [MutableBufferND]
|
||||
*/
|
||||
public inline fun <T, reified R : Any> MutableStructureND<T>.mapToMutableBuffer(
|
||||
factory: MutableBufferFactory<R> = MutableBufferFactory(MutableBuffer.Companion::auto),
|
||||
crossinline transform: (T) -> R,
|
||||
): MutableBufferND<R> {
|
||||
return if (this is MutableBufferND<T>)
|
||||
MutableBufferND(this.indices, factory.invoke(indices.linearSize) { transform(buffer[it]) })
|
||||
else {
|
||||
val strides = ColumnStrides(shape)
|
||||
MutableBufferND(strides, factory.invoke(strides.linearSize) { transform(get(strides.index(it))) })
|
||||
}
|
||||
}
|
||||
///**
|
||||
// * Transform structure to a new structure using provided [MutableBufferFactory] and optimizing if argument is [MutableBufferND]
|
||||
// */
|
||||
//public inline fun <T, reified R : Any> MutableStructureND<T>.mapToMutableBuffer(
|
||||
// factory: MutableBufferFactory<R> = MutableBufferFactory(MutableBuffer.Companion::auto),
|
||||
// crossinline transform: (T) -> R,
|
||||
//): MutableBufferND<R> {
|
||||
// return if (this is MutableBufferND<T>)
|
||||
// MutableBufferND(this.indices, factory.invoke(indices.linearSize) { transform(buffer[it]) })
|
||||
// else {
|
||||
// val strides = ColumnStrides(shape)
|
||||
// MutableBufferND(strides, factory.invoke(strides.linearSize) { transform(get(strides.index(it))) })
|
||||
// }
|
||||
//}
|
@ -14,15 +14,25 @@ import kotlin.contracts.contract
|
||||
import kotlin.math.pow
|
||||
import kotlin.math.pow as kpow
|
||||
|
||||
/**
|
||||
* A simple mutable [StructureND] of doubles
|
||||
*/
|
||||
public class DoubleBufferND(
|
||||
indexes: ShapeIndexer,
|
||||
override val buffer: DoubleBuffer,
|
||||
) : MutableBufferND<Double>(indexes, buffer)
|
||||
) : MutableBufferND<Double>(indexes, buffer), MutableStructureNDOfDouble{
|
||||
override fun getDouble(index: IntArray): Double = buffer[indices.offset(index)]
|
||||
|
||||
override fun setDouble(index: IntArray, value: Double) {
|
||||
buffer[indices.offset(index)] = value
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
public sealed class DoubleFieldOpsND : BufferedFieldOpsND<Double, DoubleField>(DoubleField.bufferAlgebra),
|
||||
ScaleOperations<StructureND<Double>>, ExtendedFieldOps<StructureND<Double>> {
|
||||
|
||||
@OptIn(PerformancePitfall::class)
|
||||
override fun StructureND<Double>.toBufferND(): DoubleBufferND = when (this) {
|
||||
is DoubleBufferND -> this
|
||||
else -> {
|
||||
@ -221,7 +231,8 @@ public class DoubleFieldND(override val shape: Shape) :
|
||||
|
||||
public val DoubleField.ndAlgebra: DoubleFieldOpsND get() = DoubleFieldOpsND
|
||||
|
||||
public fun DoubleField.ndAlgebra(vararg shape: Int): DoubleFieldND = DoubleFieldND(shape)
|
||||
public fun DoubleField.ndAlgebra(vararg shape: Int): DoubleFieldND = DoubleFieldND(Shape(shape))
|
||||
public fun DoubleField.ndAlgebra(shape: Shape): DoubleFieldND = DoubleFieldND(shape)
|
||||
|
||||
/**
|
||||
* Produce a context for n-dimensional operations inside this real field
|
||||
@ -229,5 +240,5 @@ public fun DoubleField.ndAlgebra(vararg shape: Int): DoubleFieldND = DoubleField
|
||||
@UnstableKMathAPI
|
||||
public inline fun <R> DoubleField.withNdAlgebra(vararg shape: Int, action: DoubleFieldND.() -> R): R {
|
||||
contract { callsInPlace(action, InvocationKind.EXACTLY_ONCE) }
|
||||
return DoubleFieldND(shape).run(action)
|
||||
return DoubleFieldND(Shape(shape)).run(action)
|
||||
}
|
||||
|
@ -46,5 +46,5 @@ public class IntRingND(
|
||||
|
||||
public inline fun <R> IntRing.withNdAlgebra(vararg shape: Int, action: IntRingND.() -> R): R {
|
||||
contract { callsInPlace(action, InvocationKind.EXACTLY_ONCE) }
|
||||
return IntRingND(shape).run(action)
|
||||
return IntRingND(Shape(shape)).run(action)
|
||||
}
|
||||
|
@ -0,0 +1,50 @@
|
||||
/*
|
||||
* Copyright 2018-2022 KMath contributors.
|
||||
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
|
||||
*/
|
||||
|
||||
package space.kscience.kmath.nd
|
||||
|
||||
import space.kscience.kmath.misc.PerformancePitfall
|
||||
|
||||
|
||||
public class PermutedStructureND<T>(
|
||||
public val origin: StructureND<T>,
|
||||
public val permutation: (IntArray) -> IntArray,
|
||||
) : StructureND<T> {
|
||||
|
||||
override val shape: Shape
|
||||
get() = origin.shape
|
||||
|
||||
@OptIn(PerformancePitfall::class)
|
||||
override fun get(index: IntArray): T {
|
||||
return origin[permutation(index)]
|
||||
}
|
||||
}
|
||||
|
||||
public fun <T> StructureND<T>.permute(
|
||||
permutation: (IntArray) -> IntArray,
|
||||
): PermutedStructureND<T> = PermutedStructureND(this, permutation)
|
||||
|
||||
public class PermutedMutableStructureND<T>(
|
||||
public val origin: MutableStructureND<T>,
|
||||
override val shape: Shape = origin.shape,
|
||||
public val permutation: (IntArray) -> IntArray,
|
||||
) : MutableStructureND<T> {
|
||||
|
||||
|
||||
@OptIn(PerformancePitfall::class)
|
||||
override fun set(index: IntArray, value: T) {
|
||||
origin[permutation(index)] = value
|
||||
}
|
||||
|
||||
@OptIn(PerformancePitfall::class)
|
||||
override fun get(index: IntArray): T {
|
||||
return origin[permutation(index)]
|
||||
}
|
||||
}
|
||||
|
||||
public fun <T> MutableStructureND<T>.permute(
|
||||
newShape: Shape = shape,
|
||||
permutation: (IntArray) -> IntArray,
|
||||
): PermutedMutableStructureND<T> = PermutedMutableStructureND(this, newShape, permutation)
|
@ -5,21 +5,88 @@
|
||||
|
||||
package space.kscience.kmath.nd
|
||||
|
||||
import space.kscience.kmath.misc.UnsafeKMathAPI
|
||||
import kotlin.jvm.JvmInline
|
||||
|
||||
/**
|
||||
* A read-only ND shape
|
||||
*/
|
||||
@JvmInline
|
||||
public value class Shape(@PublishedApi internal val array: IntArray) {
|
||||
public val size: Int get() = array.size
|
||||
public operator fun get(index: Int): Int = array[index]
|
||||
override fun toString(): String = array.contentToString()
|
||||
}
|
||||
|
||||
public inline fun Shape.forEach(block: (value: Int) -> Unit): Unit = array.forEach(block)
|
||||
|
||||
public inline fun Shape.forEachIndexed(block: (index: Int, value: Int) -> Unit): Unit = array.forEachIndexed(block)
|
||||
|
||||
public infix fun Shape.contentEquals(other: Shape): Boolean = array.contentEquals(other.array)
|
||||
|
||||
public fun Shape.contentHashCode(): Int = array.contentHashCode()
|
||||
|
||||
public val Shape.indices: IntRange get() = array.indices
|
||||
public val Shape.linearSize: Int get() = array.reduce(Int::times)
|
||||
|
||||
public fun Shape.slice(range: IntRange): Shape = Shape(array.sliceArray(range))
|
||||
|
||||
public fun Shape.last(): Int = array.last()
|
||||
|
||||
/**
|
||||
* A shape including last [n] dimensions of this shape
|
||||
*/
|
||||
public fun Shape.last(n: Int): Shape = Shape(array.copyOfRange(size - n, size))
|
||||
|
||||
public fun Shape.first(): Int = array.first()
|
||||
|
||||
/**
|
||||
* A shape including first [n] dimensions of this shape
|
||||
*/
|
||||
public fun Shape.first(n: Int): Shape = Shape(array.copyOfRange(0, n))
|
||||
|
||||
public operator fun Shape.plus(add: IntArray): Shape = Shape(array + add)
|
||||
|
||||
public operator fun Shape.plus(add: Shape): Shape = Shape(array + add.array)
|
||||
|
||||
public fun Shape.isEmpty(): Boolean = size == 0
|
||||
public fun Shape.isNotEmpty(): Boolean = size > 0
|
||||
|
||||
public fun Shape.transposed(i: Int, j: Int): Shape = Shape(array.copyOf().apply {
|
||||
val ith = get(i)
|
||||
val jth = get(j)
|
||||
set(i, jth)
|
||||
set(j, ith)
|
||||
})
|
||||
|
||||
public operator fun Shape.component1(): Int = get(0)
|
||||
public operator fun Shape.component2(): Int = get(1)
|
||||
public operator fun Shape.component3(): Int = get(2)
|
||||
|
||||
/**
|
||||
* Convert to array with protective copy
|
||||
*/
|
||||
public fun Shape.toArray(): IntArray = array.copyOf()
|
||||
|
||||
@UnsafeKMathAPI
|
||||
public fun Shape.asArray(): IntArray = array
|
||||
|
||||
public fun Shape.asList(): List<Int> = array.asList()
|
||||
|
||||
|
||||
/**
|
||||
* An exception is thrown when the expected and actual shape of NDArray differ.
|
||||
*
|
||||
* @property expected the expected shape.
|
||||
* @property actual the actual shape.
|
||||
*/
|
||||
public class ShapeMismatchException(public val expected: IntArray, public val actual: IntArray) :
|
||||
RuntimeException("Shape ${actual.contentToString()} doesn't fit in expected shape ${expected.contentToString()}.")
|
||||
public class ShapeMismatchException(public val expected: Shape, public val actual: Shape) :
|
||||
RuntimeException("Shape $actual doesn't fit in expected shape ${expected}.")
|
||||
|
||||
public class IndexOutOfShapeException(public val shape: Shape, public val index: IntArray) :
|
||||
RuntimeException("Index ${index.contentToString()} is out of shape ${shape.contentToString()}")
|
||||
RuntimeException("Index ${index.contentToString()} is out of shape ${shape}")
|
||||
|
||||
public typealias Shape = IntArray
|
||||
|
||||
public fun Shape(shapeFirst: Int, vararg shapeRest: Int): Shape = intArrayOf(shapeFirst, *shapeRest)
|
||||
public fun Shape(shapeFirst: Int, vararg shapeRest: Int): Shape = Shape(intArrayOf(shapeFirst, *shapeRest))
|
||||
|
||||
public interface WithShape {
|
||||
public val shape: Shape
|
||||
@ -28,8 +95,8 @@ public interface WithShape {
|
||||
}
|
||||
|
||||
internal fun requireIndexInShape(index: IntArray, shape: Shape) {
|
||||
if (index.size != shape.size) throw IndexOutOfShapeException(index, shape)
|
||||
if (index.size != shape.size) throw IndexOutOfShapeException(shape, index)
|
||||
shape.forEachIndexed { axis, axisShape ->
|
||||
if (index[axis] !in 0 until axisShape) throw IndexOutOfShapeException(index, shape)
|
||||
if (index[axis] !in 0 until axisShape) throw IndexOutOfShapeException(shape, index)
|
||||
}
|
||||
}
|
||||
|
@ -49,10 +49,10 @@ public abstract class Strides : ShapeIndexer {
|
||||
/**
|
||||
* Array strides
|
||||
*/
|
||||
public abstract val strides: IntArray
|
||||
internal abstract val strides: IntArray
|
||||
|
||||
public override fun offset(index: IntArray): Int = index.mapIndexed { i, value ->
|
||||
if (value < 0 || value >= shape[i]) throw IndexOutOfBoundsException("Index $value out of shape bounds: (0,${this.shape[i]})")
|
||||
if (value !in 0 until shape[i]) throw IndexOutOfBoundsException("Index $value out of shape bounds: (0, ${this.shape[i]})")
|
||||
value * strides[i]
|
||||
}.sum()
|
||||
|
||||
@ -63,15 +63,12 @@ public abstract class Strides : ShapeIndexer {
|
||||
*/
|
||||
public override fun asSequence(): Sequence<IntArray> = (0 until linearSize).asSequence().map(::index)
|
||||
|
||||
public companion object{
|
||||
public fun linearSizeOf(shape: IntArray): Int = shape.reduce(Int::times)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Column-first [Strides]. Columns are represented as continuous arrays
|
||||
*/
|
||||
public class ColumnStrides(override val shape: IntArray) : Strides() {
|
||||
public class ColumnStrides(override val shape: Shape) : Strides() {
|
||||
override val linearSize: Int get() = strides[shape.size]
|
||||
|
||||
/**
|
||||
@ -121,7 +118,7 @@ public class ColumnStrides(override val shape: IntArray) : Strides() {
|
||||
*
|
||||
* @param shape the shape of the tensor.
|
||||
*/
|
||||
public class RowStrides(override val shape: IntArray) : Strides() {
|
||||
public class RowStrides(override val shape: Shape) : Strides() {
|
||||
|
||||
override val strides: IntArray by lazy {
|
||||
val nDim = shape.size
|
||||
@ -151,7 +148,7 @@ public class RowStrides(override val shape: IntArray) : Strides() {
|
||||
return res
|
||||
}
|
||||
|
||||
override val linearSize: Int get() = linearSizeOf(shape)
|
||||
override val linearSize: Int get() = shape.linearSize
|
||||
|
||||
override fun equals(other: Any?): Boolean {
|
||||
if (this === other) return true
|
||||
@ -166,9 +163,9 @@ public class RowStrides(override val shape: IntArray) : Strides() {
|
||||
}
|
||||
|
||||
@ThreadLocal
|
||||
private val defaultStridesCache = HashMap<IntArray, Strides>()
|
||||
private val defaultStridesCache = HashMap<Shape, Strides>()
|
||||
|
||||
/**
|
||||
* Cached builder for default strides
|
||||
*/
|
||||
public fun Strides(shape: IntArray): Strides = defaultStridesCache.getOrPut(shape) { RowStrides(shape) }
|
||||
public fun Strides(shape: Shape): Strides = defaultStridesCache.getOrPut(shape) { RowStrides(shape) }
|
@ -30,5 +30,5 @@ public class ShortRingND(
|
||||
|
||||
public inline fun <R> ShortRing.withNdAlgebra(vararg shape: Int, action: ShortRingND.() -> R): R {
|
||||
contract { callsInPlace(action, InvocationKind.EXACTLY_ONCE) }
|
||||
return ShortRingND(shape).run(action)
|
||||
return ShortRingND(Shape(shape)).run(action)
|
||||
}
|
||||
|
@ -18,6 +18,7 @@ import kotlin.jvm.JvmInline
|
||||
public interface Structure1D<out T> : StructureND<T>, Buffer<T> {
|
||||
override val dimension: Int get() = 1
|
||||
|
||||
@PerformancePitfall
|
||||
override operator fun get(index: IntArray): T {
|
||||
require(index.size == 1) { "Index dimension mismatch. Expected 1 but found ${index.size}" }
|
||||
return get(index[0])
|
||||
@ -32,6 +33,8 @@ public interface Structure1D<out T> : StructureND<T>, Buffer<T> {
|
||||
* A mutable structure that is guaranteed to be one-dimensional
|
||||
*/
|
||||
public interface MutableStructure1D<T> : Structure1D<T>, MutableStructureND<T>, MutableBuffer<T> {
|
||||
|
||||
@PerformancePitfall
|
||||
override operator fun set(index: IntArray, value: T) {
|
||||
require(index.size == 1) { "Index dimension mismatch. Expected 1 but found ${index.size}" }
|
||||
set(index[0], value)
|
||||
@ -43,9 +46,10 @@ public interface MutableStructure1D<T> : Structure1D<T>, MutableStructureND<T>,
|
||||
*/
|
||||
@JvmInline
|
||||
private value class Structure1DWrapper<out T>(val structure: StructureND<T>) : Structure1D<T> {
|
||||
override val shape: IntArray get() = structure.shape
|
||||
override val shape: Shape get() = structure.shape
|
||||
override val size: Int get() = structure.shape[0]
|
||||
|
||||
@PerformancePitfall
|
||||
override operator fun get(index: Int): T = structure[index]
|
||||
|
||||
@PerformancePitfall
|
||||
@ -56,13 +60,16 @@ private value class Structure1DWrapper<out T>(val structure: StructureND<T>) : S
|
||||
* A 1D wrapper for a mutable nd-structure
|
||||
*/
|
||||
private class MutableStructure1DWrapper<T>(val structure: MutableStructureND<T>) : MutableStructure1D<T> {
|
||||
override val shape: IntArray get() = structure.shape
|
||||
override val shape: Shape get() = structure.shape
|
||||
override val size: Int get() = structure.shape[0]
|
||||
|
||||
@PerformancePitfall
|
||||
override fun elements(): Sequence<Pair<IntArray, T>> = structure.elements()
|
||||
|
||||
@PerformancePitfall
|
||||
override fun get(index: Int): T = structure[index]
|
||||
|
||||
@PerformancePitfall
|
||||
override fun set(index: Int, value: T) {
|
||||
structure[intArrayOf(index)] = value
|
||||
}
|
||||
@ -83,7 +90,7 @@ private class MutableStructure1DWrapper<T>(val structure: MutableStructureND<T>)
|
||||
*/
|
||||
@JvmInline
|
||||
private value class Buffer1DWrapper<out T>(val buffer: Buffer<T>) : Structure1D<T> {
|
||||
override val shape: IntArray get() = intArrayOf(buffer.size)
|
||||
override val shape: Shape get() = Shape(buffer.size)
|
||||
override val size: Int get() = buffer.size
|
||||
|
||||
@PerformancePitfall
|
||||
@ -95,7 +102,7 @@ private value class Buffer1DWrapper<out T>(val buffer: Buffer<T>) : Structure1D<
|
||||
}
|
||||
|
||||
internal class MutableBuffer1DWrapper<T>(val buffer: MutableBuffer<T>) : MutableStructure1D<T> {
|
||||
override val shape: IntArray get() = intArrayOf(buffer.size)
|
||||
override val shape: Shape get() = Shape(buffer.size)
|
||||
override val size: Int get() = buffer.size
|
||||
|
||||
@PerformancePitfall
|
||||
|
@ -29,7 +29,7 @@ public interface Structure2D<out T> : StructureND<T> {
|
||||
*/
|
||||
public val colNum: Int
|
||||
|
||||
override val shape: IntArray get() = intArrayOf(rowNum, colNum)
|
||||
override val shape: Shape get() = Shape(rowNum, colNum)
|
||||
|
||||
/**
|
||||
* The buffer of rows of this structure. It gets elements from the structure dynamically.
|
||||
@ -54,6 +54,7 @@ public interface Structure2D<out T> : StructureND<T> {
|
||||
*/
|
||||
public operator fun get(i: Int, j: Int): T
|
||||
|
||||
@PerformancePitfall
|
||||
override operator fun get(index: IntArray): T {
|
||||
require(index.size == 2) { "Index dimension mismatch. Expected 2 but found ${index.size}" }
|
||||
return get(index[0], index[1])
|
||||
@ -106,6 +107,7 @@ private value class Structure2DWrapper<out T>(val structure: StructureND<T>) : S
|
||||
override val rowNum: Int get() = shape[0]
|
||||
override val colNum: Int get() = shape[1]
|
||||
|
||||
@PerformancePitfall
|
||||
override operator fun get(i: Int, j: Int): T = structure[i, j]
|
||||
|
||||
override fun <F : StructureFeature> getFeature(type: KClass<out F>): F? = structure.getFeature(type)
|
||||
@ -123,12 +125,15 @@ private class MutableStructure2DWrapper<T>(val structure: MutableStructureND<T>)
|
||||
override val rowNum: Int get() = shape[0]
|
||||
override val colNum: Int get() = shape[1]
|
||||
|
||||
@PerformancePitfall
|
||||
override operator fun get(i: Int, j: Int): T = structure[i, j]
|
||||
|
||||
@PerformancePitfall
|
||||
override fun set(index: IntArray, value: T) {
|
||||
structure[index] = value
|
||||
}
|
||||
|
||||
@PerformancePitfall
|
||||
override operator fun set(i: Int, j: Int, value: T) {
|
||||
structure[intArrayOf(i, j)] = value
|
||||
}
|
||||
|
@ -46,6 +46,7 @@ public interface StructureND<out T> : Featured<StructureFeature>, WithShape {
|
||||
* @param index the indices.
|
||||
* @return the value.
|
||||
*/
|
||||
@PerformancePitfall
|
||||
public operator fun get(index: IntArray): T
|
||||
|
||||
/**
|
||||
@ -97,6 +98,7 @@ public interface StructureND<out T> : Featured<StructureFeature>, WithShape {
|
||||
/**
|
||||
* Debug output to string
|
||||
*/
|
||||
@OptIn(PerformancePitfall::class)
|
||||
public fun toString(structure: StructureND<*>): String {
|
||||
val bufferRepr: String = when (structure.shape.size) {
|
||||
1 -> (0 until structure.shape[0]).map { structure[it] }
|
||||
@ -116,7 +118,7 @@ public interface StructureND<out T> : Featured<StructureFeature>, WithShape {
|
||||
}
|
||||
val className = structure::class.simpleName ?: "StructureND"
|
||||
|
||||
return "$className(shape=${structure.shape.contentToString()}, buffer=$bufferRepr)"
|
||||
return "$className(shape=${structure.shape}, buffer=$bufferRepr)"
|
||||
}
|
||||
|
||||
/**
|
||||
@ -145,13 +147,13 @@ public interface StructureND<out T> : Featured<StructureFeature>, WithShape {
|
||||
): BufferND<T> = BufferND(strides, Buffer.auto(type, strides.linearSize) { i -> initializer(strides.index(i)) })
|
||||
|
||||
public fun <T> buffered(
|
||||
shape: IntArray,
|
||||
shape: Shape,
|
||||
bufferFactory: BufferFactory<T> = BufferFactory.boxing(),
|
||||
initializer: (IntArray) -> T,
|
||||
): BufferND<T> = buffered(ColumnStrides(shape), bufferFactory, initializer)
|
||||
|
||||
public inline fun <reified T : Any> auto(
|
||||
shape: IntArray,
|
||||
shape: Shape,
|
||||
crossinline initializer: (IntArray) -> T,
|
||||
): BufferND<T> = auto(ColumnStrides(shape), initializer)
|
||||
|
||||
@ -160,13 +162,13 @@ public interface StructureND<out T> : Featured<StructureFeature>, WithShape {
|
||||
vararg shape: Int,
|
||||
crossinline initializer: (IntArray) -> T,
|
||||
): BufferND<T> =
|
||||
auto(ColumnStrides(shape), initializer)
|
||||
auto(ColumnStrides(Shape(shape)), initializer)
|
||||
|
||||
public inline fun <T : Any> auto(
|
||||
type: KClass<T>,
|
||||
vararg shape: Int,
|
||||
crossinline initializer: (IntArray) -> T,
|
||||
): BufferND<T> = auto(type, ColumnStrides(shape), initializer)
|
||||
): BufferND<T> = auto(type, ColumnStrides(Shape(shape)), initializer)
|
||||
}
|
||||
}
|
||||
|
||||
@ -214,8 +216,13 @@ public fun <T : Comparable<T>> LinearSpace<T, Ring<T>>.contentEquals(
|
||||
* @param index the indices.
|
||||
* @return the value.
|
||||
*/
|
||||
@PerformancePitfall
|
||||
public operator fun <T> StructureND<T>.get(vararg index: Int): T = get(index)
|
||||
|
||||
public operator fun StructureND<Double>.get(vararg index: Int): Double = getDouble(index)
|
||||
|
||||
public operator fun StructureND<Int>.get(vararg index: Int): Int = getInt(index)
|
||||
|
||||
//@UnstableKMathAPI
|
||||
//public inline fun <reified T : StructureFeature> StructureND<*>.getFeature(): T? = getFeature(T::class)
|
||||
|
||||
@ -229,12 +236,14 @@ public interface MutableStructureND<T> : StructureND<T> {
|
||||
* @param index the indices.
|
||||
* @param value the value.
|
||||
*/
|
||||
@PerformancePitfall
|
||||
public operator fun set(index: IntArray, value: T)
|
||||
}
|
||||
|
||||
/**
|
||||
* Set value at specified indices
|
||||
*/
|
||||
@PerformancePitfall
|
||||
public operator fun <T> MutableStructureND<T>.set(vararg index: Int, value: T) {
|
||||
set(index, value)
|
||||
}
|
@ -5,12 +5,15 @@
|
||||
|
||||
package space.kscience.kmath.nd
|
||||
|
||||
import space.kscience.kmath.misc.PerformancePitfall
|
||||
import space.kscience.kmath.misc.UnstableKMathAPI
|
||||
|
||||
public open class VirtualStructureND<T>(
|
||||
override val shape: Shape,
|
||||
public val producer: (IntArray) -> T,
|
||||
) : StructureND<T> {
|
||||
|
||||
@PerformancePitfall
|
||||
override fun get(index: IntArray): T {
|
||||
requireIndexInShape(index, shape)
|
||||
return producer(index)
|
||||
|
@ -5,6 +5,9 @@
|
||||
|
||||
package space.kscience.kmath.nd
|
||||
|
||||
import space.kscience.kmath.misc.PerformancePitfall
|
||||
|
||||
@OptIn(PerformancePitfall::class)
|
||||
public fun <T> StructureND<T>.roll(axis: Int, step: Int = 1): StructureND<T> {
|
||||
require(axis in shape.indices) { "Axis $axis is outside of shape dimensions: [0, ${shape.size})" }
|
||||
return VirtualStructureND(shape) { index ->
|
||||
@ -19,6 +22,7 @@ public fun <T> StructureND<T>.roll(axis: Int, step: Int = 1): StructureND<T> {
|
||||
}
|
||||
}
|
||||
|
||||
@OptIn(PerformancePitfall::class)
|
||||
public fun <T> StructureND<T>.roll(pair: Pair<Int, Int>, vararg others: Pair<Int, Int>): StructureND<T> {
|
||||
val axisMap: Map<Int, Int> = mapOf(pair, *others)
|
||||
require(axisMap.keys.all { it in shape.indices }) { "Some of axes ${axisMap.keys} is outside of shape dimensions: [0, ${shape.size})" }
|
||||
|
@ -0,0 +1,45 @@
|
||||
/*
|
||||
* Copyright 2018-2022 KMath contributors.
|
||||
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
|
||||
*/
|
||||
|
||||
package space.kscience.kmath.nd
|
||||
|
||||
import space.kscience.kmath.misc.PerformancePitfall
|
||||
|
||||
public interface StructureNDOfDouble : StructureND<Double> {
|
||||
/**
|
||||
* Guaranteed non-blocking access to content
|
||||
*/
|
||||
public fun getDouble(index: IntArray): Double
|
||||
}
|
||||
|
||||
/**
|
||||
* Optimized method to access primitive without boxing if possible
|
||||
*/
|
||||
@OptIn(PerformancePitfall::class)
|
||||
public fun StructureND<Double>.getDouble(index: IntArray): Double =
|
||||
if (this is StructureNDOfDouble) getDouble(index) else get(index)
|
||||
|
||||
public interface MutableStructureNDOfDouble : StructureNDOfDouble, MutableStructureND<Double> {
|
||||
/**
|
||||
* Guaranteed non-blocking access to content
|
||||
*/
|
||||
public fun setDouble(index: IntArray, value: Double)
|
||||
}
|
||||
|
||||
@OptIn(PerformancePitfall::class)
|
||||
public fun MutableStructureND<Double>.getDouble(index: IntArray): Double =
|
||||
if (this is StructureNDOfDouble) getDouble(index) else get(index)
|
||||
|
||||
|
||||
public interface StructureNDOfInt : StructureND<Int> {
|
||||
/**
|
||||
* Guaranteed non-blocking access to content
|
||||
*/
|
||||
public fun getInt(index: IntArray): Int
|
||||
}
|
||||
|
||||
@OptIn(PerformancePitfall::class)
|
||||
public fun StructureND<Int>.getInt(index: IntArray): Int =
|
||||
if (this is StructureNDOfInt) getInt(index) else get(index)
|
@ -5,10 +5,7 @@
|
||||
|
||||
package space.kscience.kmath.structures
|
||||
|
||||
import space.kscience.kmath.nd.ColumnStrides
|
||||
import space.kscience.kmath.nd.Structure2D
|
||||
import space.kscience.kmath.nd.StructureND
|
||||
import space.kscience.kmath.nd.as2D
|
||||
import space.kscience.kmath.nd.*
|
||||
|
||||
/**
|
||||
* A context that allows to operate on a [MutableBuffer] as on 2d array
|
||||
@ -31,7 +28,7 @@ internal class BufferAccessor2D<T>(
|
||||
|
||||
//TODO optimize wrapper
|
||||
fun MutableBuffer<T>.collect(): Structure2D<T> = StructureND.buffered(
|
||||
ColumnStrides(intArrayOf(rowNum, colNum)),
|
||||
ColumnStrides(Shape(rowNum, colNum)),
|
||||
factory
|
||||
) { (i, j) ->
|
||||
get(i, j)
|
||||
|
@ -10,7 +10,7 @@ import kotlin.test.Test
|
||||
class StridesTest {
|
||||
@Test
|
||||
fun checkRowBasedStrides() {
|
||||
val strides = RowStrides(intArrayOf(3, 3))
|
||||
val strides = RowStrides(Shape(3, 3))
|
||||
var counter = 0
|
||||
for(i in 0..2){
|
||||
for(j in 0..2){
|
||||
@ -24,7 +24,7 @@ class StridesTest {
|
||||
|
||||
@Test
|
||||
fun checkColumnBasedStrides() {
|
||||
val strides = ColumnStrides(intArrayOf(3, 3))
|
||||
val strides = ColumnStrides(Shape(3, 3))
|
||||
var counter = 0
|
||||
for(i in 0..2){
|
||||
for(j in 0..2){
|
||||
|
@ -88,7 +88,7 @@ class NumberNDFieldTest {
|
||||
@Test
|
||||
fun testInternalContext() {
|
||||
algebra {
|
||||
(DoubleField.ndAlgebra(*array1.shape)) { with(L2Norm) { 1 + norm(array1) + exp(array2) } }
|
||||
(DoubleField.ndAlgebra(array1.shape)) { with(L2Norm) { 1 + norm(array1) + exp(array2) } }
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -9,11 +9,12 @@ import kotlinx.coroutines.*
|
||||
import space.kscience.kmath.coroutines.Math
|
||||
import space.kscience.kmath.misc.PerformancePitfall
|
||||
import space.kscience.kmath.nd.ColumnStrides
|
||||
import space.kscience.kmath.nd.Shape
|
||||
import space.kscience.kmath.nd.StructureND
|
||||
|
||||
public class LazyStructureND<out T>(
|
||||
public val scope: CoroutineScope,
|
||||
override val shape: IntArray,
|
||||
override val shape: Shape,
|
||||
public val function: suspend (IntArray) -> T,
|
||||
) : StructureND<T> {
|
||||
private val cache: MutableMap<IntArray, Deferred<T>> = HashMap()
|
||||
@ -23,6 +24,7 @@ public class LazyStructureND<out T>(
|
||||
}
|
||||
|
||||
public suspend fun await(index: IntArray): T = async(index).await()
|
||||
@PerformancePitfall
|
||||
override operator fun get(index: IntArray): T = runBlocking { async(index).await() }
|
||||
|
||||
@OptIn(PerformancePitfall::class)
|
||||
|
@ -6,6 +6,7 @@
|
||||
package space.kscience.kmath.dimensions
|
||||
|
||||
import space.kscience.kmath.linear.*
|
||||
import space.kscience.kmath.nd.Shape
|
||||
import space.kscience.kmath.nd.Structure2D
|
||||
import space.kscience.kmath.operations.DoubleField
|
||||
import space.kscience.kmath.operations.Ring
|
||||
@ -47,7 +48,7 @@ public interface DMatrix<out T, R : Dimension, C : Dimension> : Structure2D<T> {
|
||||
public value class DMatrixWrapper<out T, R : Dimension, C : Dimension>(
|
||||
private val structure: Structure2D<T>,
|
||||
) : DMatrix<T, R, C> {
|
||||
override val shape: IntArray get() = structure.shape
|
||||
override val shape: Shape get() = structure.shape
|
||||
override val rowNum: Int get() = shape[0]
|
||||
override val colNum: Int get() = shape[1]
|
||||
override operator fun get(i: Int, j: Int): T = structure[i, j]
|
||||
|
@ -15,6 +15,7 @@ import space.kscience.kmath.linear.*
|
||||
import space.kscience.kmath.misc.PerformancePitfall
|
||||
import space.kscience.kmath.misc.UnstableKMathAPI
|
||||
import space.kscience.kmath.nd.StructureND
|
||||
import space.kscience.kmath.nd.toArray
|
||||
import space.kscience.kmath.operations.algebra
|
||||
import kotlin.random.Random
|
||||
import kotlin.random.asJavaRandom
|
||||
@ -52,7 +53,7 @@ internal class EjmlMatrixTest {
|
||||
fun shape() {
|
||||
val m = randomMatrix
|
||||
val w = EjmlDoubleMatrix(m)
|
||||
assertContentEquals(intArrayOf(m.numRows, m.numCols), w.shape)
|
||||
assertContentEquals(intArrayOf(m.numRows, m.numCols), w.shape.toArray())
|
||||
}
|
||||
|
||||
@OptIn(UnstableKMathAPI::class)
|
||||
|
@ -7,6 +7,7 @@ package space.kscience.kmath.histogram
|
||||
|
||||
import space.kscience.kmath.domains.Domain
|
||||
import space.kscience.kmath.linear.Point
|
||||
import space.kscience.kmath.misc.PerformancePitfall
|
||||
import space.kscience.kmath.nd.ColumnStrides
|
||||
import space.kscience.kmath.nd.FieldOpsND
|
||||
import space.kscience.kmath.nd.Shape
|
||||
@ -24,6 +25,7 @@ public class HistogramND<T : Comparable<T>, D : Domain<T>, V : Any>(
|
||||
internal val values: StructureND<V>,
|
||||
) : Histogram<T, V, DomainBin<T, D, V>> {
|
||||
|
||||
@OptIn(PerformancePitfall::class)
|
||||
override fun get(point: Point<T>): DomainBin<T, D, V>? {
|
||||
val index = group.getIndexOrNull(point) ?: return null
|
||||
return group.produceBin(index, values[index])
|
||||
@ -31,6 +33,7 @@ public class HistogramND<T : Comparable<T>, D : Domain<T>, V : Any>(
|
||||
|
||||
override val dimension: Int get() = group.shape.size
|
||||
|
||||
@OptIn(PerformancePitfall::class)
|
||||
override val bins: Iterable<DomainBin<T, D, V>>
|
||||
get() = ColumnStrides(group.shape).asSequence().map {
|
||||
group.produceBin(it, values[it])
|
||||
|
@ -9,11 +9,10 @@ package space.kscience.kmath.histogram
|
||||
|
||||
import space.kscience.kmath.domains.HyperSquareDomain
|
||||
import space.kscience.kmath.linear.Point
|
||||
import space.kscience.kmath.misc.PerformancePitfall
|
||||
import space.kscience.kmath.misc.UnstableKMathAPI
|
||||
import space.kscience.kmath.nd.*
|
||||
import space.kscience.kmath.operations.DoubleField
|
||||
import space.kscience.kmath.operations.Field
|
||||
import space.kscience.kmath.operations.invoke
|
||||
import space.kscience.kmath.operations.*
|
||||
import space.kscience.kmath.structures.*
|
||||
import kotlin.math.floor
|
||||
|
||||
@ -40,7 +39,7 @@ public class UniformHistogramGroupND<V : Any, A : Field<V>>(
|
||||
|
||||
public val dimension: Int get() = lower.size
|
||||
|
||||
override val shape: IntArray = IntArray(binNums.size) { binNums[it] + 2 }
|
||||
override val shape: Shape = Shape(IntArray(binNums.size) { binNums[it] + 2 })
|
||||
|
||||
private val binSize = DoubleBuffer(dimension) { (upper[it] - lower[it]) / binNums[it] }
|
||||
|
||||
@ -83,8 +82,12 @@ public class UniformHistogramGroupND<V : Any, A : Field<V>>(
|
||||
}
|
||||
|
||||
|
||||
override fun produce(builder: HistogramBuilder<Double, V>.() -> Unit): HistogramND<Double, HyperSquareDomain, V> {
|
||||
val ndCounter = StructureND.buffered(shape) { Counter.of(valueAlgebraND.elementAlgebra) }
|
||||
@OptIn(PerformancePitfall::class)
|
||||
override fun produce(
|
||||
builder: HistogramBuilder<Double, V>.() -> Unit,
|
||||
): HistogramND<Double, HyperSquareDomain, V> {
|
||||
val ndCounter: BufferND<ObjectCounter<V>> =
|
||||
StructureND.buffered(shape) { Counter.of(valueAlgebraND.elementAlgebra) }
|
||||
val hBuilder = object : HistogramBuilder<Double, V> {
|
||||
override val defaultValue: V get() = valueAlgebraND.elementAlgebra.one
|
||||
|
||||
@ -94,7 +97,8 @@ public class UniformHistogramGroupND<V : Any, A : Field<V>>(
|
||||
}
|
||||
}
|
||||
hBuilder.apply(builder)
|
||||
val values: BufferND<V> = ndCounter.mapToBuffer(valueBufferFactory) { it.value }
|
||||
val values: BufferND<V> = BufferND(ndCounter.indices, ndCounter.buffer.map(valueBufferFactory) { it.value })
|
||||
|
||||
return HistogramND(this, values)
|
||||
}
|
||||
|
||||
|
@ -7,6 +7,7 @@
|
||||
|
||||
package space.kscience.kmath.histogram
|
||||
|
||||
import space.kscience.kmath.misc.PerformancePitfall
|
||||
import space.kscience.kmath.misc.UnstableKMathAPI
|
||||
import space.kscience.kmath.nd.ColumnStrides
|
||||
import space.kscience.kmath.operations.invoke
|
||||
@ -50,6 +51,7 @@ internal class MultivariateHistogramTest {
|
||||
assertEquals(n, histogram.bins.sumOf { it.binValue.toInt() })
|
||||
}
|
||||
|
||||
@OptIn(PerformancePitfall::class)
|
||||
@Test
|
||||
fun testHistogramAlgebra() {
|
||||
Histogram.uniformDoubleNDFromRanges(
|
||||
|
@ -13,14 +13,16 @@ import kotlin.jvm.JvmInline
|
||||
|
||||
@JvmInline
|
||||
public value class MultikTensor<T>(public val array: MutableMultiArray<T, DN>) : Tensor<T> {
|
||||
override val shape: Shape get() = array.shape
|
||||
override val shape: Shape get() = Shape(array.shape)
|
||||
|
||||
@PerformancePitfall
|
||||
override fun get(index: IntArray): T = array[index]
|
||||
|
||||
@PerformancePitfall
|
||||
override fun elements(): Sequence<Pair<IntArray, T>> =
|
||||
array.multiIndices.iterator().asSequence().map { it to get(it) }
|
||||
|
||||
@PerformancePitfall
|
||||
override fun set(index: IntArray, value: T) {
|
||||
array[index] = value
|
||||
}
|
||||
|
@ -14,6 +14,7 @@ import org.jetbrains.kotlinx.multik.api.stat.Statistics
|
||||
import org.jetbrains.kotlinx.multik.ndarray.data.*
|
||||
import org.jetbrains.kotlinx.multik.ndarray.operations.*
|
||||
import space.kscience.kmath.misc.PerformancePitfall
|
||||
import space.kscience.kmath.misc.UnsafeKMathAPI
|
||||
import space.kscience.kmath.nd.*
|
||||
import space.kscience.kmath.operations.*
|
||||
import space.kscience.kmath.tensors.api.Tensor
|
||||
@ -30,21 +31,22 @@ public abstract class MultikTensorAlgebra<T, A : Ring<T>>(
|
||||
protected val multikLinAl: LinAlg = multikEngine.getLinAlg()
|
||||
protected val multikStat: Statistics = multikEngine.getStatistics()
|
||||
|
||||
@OptIn(UnsafeKMathAPI::class)
|
||||
override fun structureND(shape: Shape, initializer: A.(IntArray) -> T): MultikTensor<T> {
|
||||
val strides = ColumnStrides(shape)
|
||||
val memoryView = initMemoryView<T>(strides.linearSize, type)
|
||||
strides.asSequence().forEachIndexed { linearIndex, tensorIndex ->
|
||||
memoryView[linearIndex] = elementAlgebra.initializer(tensorIndex)
|
||||
}
|
||||
return MultikTensor(NDArray(memoryView, shape = shape, dim = DN(shape.size)))
|
||||
return MultikTensor(NDArray(memoryView, shape = shape.asArray(), dim = DN(shape.size)))
|
||||
}
|
||||
|
||||
@OptIn(PerformancePitfall::class)
|
||||
@OptIn(PerformancePitfall::class, UnsafeKMathAPI::class)
|
||||
override fun StructureND<T>.map(transform: A.(T) -> T): MultikTensor<T> = if (this is MultikTensor) {
|
||||
val data = initMemoryView<T>(array.size, type)
|
||||
var count = 0
|
||||
for (el in array) data[count++] = elementAlgebra.transform(el)
|
||||
NDArray(data, shape = shape, dim = array.dim).wrap()
|
||||
NDArray(data, shape = shape.asArray(), dim = array.dim).wrap()
|
||||
} else {
|
||||
structureND(shape) { index ->
|
||||
transform(get(index))
|
||||
@ -75,6 +77,7 @@ public abstract class MultikTensorAlgebra<T, A : Ring<T>>(
|
||||
/**
|
||||
* Transform a structure element-by element in place.
|
||||
*/
|
||||
@OptIn(PerformancePitfall::class)
|
||||
public inline fun <T> MutableStructureND<T>.mapIndexedInPlace(operation: (index: IntArray, t: T) -> T): Unit {
|
||||
if (this is MultikTensor) {
|
||||
array.multiIndices.iterator().forEach {
|
||||
@ -106,10 +109,11 @@ public abstract class MultikTensorAlgebra<T, A : Ring<T>>(
|
||||
* Convert a tensor to [MultikTensor] if necessary. If tensor is converted, changes on the resulting tensor
|
||||
* are not reflected back onto the source
|
||||
*/
|
||||
@OptIn(UnsafeKMathAPI::class, PerformancePitfall::class)
|
||||
public fun StructureND<T>.asMultik(): MultikTensor<T> = if (this is MultikTensor) {
|
||||
this
|
||||
} else {
|
||||
val res = mk.zeros<T, DN>(shape, type).asDNArray()
|
||||
val res = mk.zeros<T, DN>(shape.asArray(), type).asDNArray()
|
||||
for (index in res.multiIndices) {
|
||||
res[index] = this[index]
|
||||
}
|
||||
@ -118,7 +122,8 @@ public abstract class MultikTensorAlgebra<T, A : Ring<T>>(
|
||||
|
||||
public fun MutableMultiArray<T, *>.wrap(): MultikTensor<T> = MultikTensor(this.asDNArray())
|
||||
|
||||
override fun StructureND<T>.valueOrNull(): T? = if (shape contentEquals intArrayOf(1)) {
|
||||
@OptIn(PerformancePitfall::class)
|
||||
override fun StructureND<T>.valueOrNull(): T? = if (shape contentEquals Shape(1)) {
|
||||
get(intArrayOf(0))
|
||||
} else null
|
||||
|
||||
@ -139,6 +144,7 @@ public abstract class MultikTensorAlgebra<T, A : Ring<T>>(
|
||||
}
|
||||
}
|
||||
|
||||
@OptIn(PerformancePitfall::class)
|
||||
override fun Tensor<T>.plusAssign(arg: StructureND<T>) {
|
||||
if (this is MultikTensor) {
|
||||
array.plusAssign(arg.asMultik().array)
|
||||
@ -163,6 +169,7 @@ public abstract class MultikTensorAlgebra<T, A : Ring<T>>(
|
||||
}
|
||||
}
|
||||
|
||||
@OptIn(PerformancePitfall::class)
|
||||
override fun Tensor<T>.minusAssign(arg: StructureND<T>) {
|
||||
if (this is MultikTensor) {
|
||||
array.minusAssign(arg.asMultik().array)
|
||||
@ -188,6 +195,7 @@ public abstract class MultikTensorAlgebra<T, A : Ring<T>>(
|
||||
}
|
||||
}
|
||||
|
||||
@OptIn(PerformancePitfall::class)
|
||||
override fun Tensor<T>.timesAssign(arg: StructureND<T>) {
|
||||
if (this is MultikTensor) {
|
||||
array.timesAssign(arg.asMultik().array)
|
||||
@ -201,13 +209,13 @@ public abstract class MultikTensorAlgebra<T, A : Ring<T>>(
|
||||
|
||||
override fun Tensor<T>.getTensor(i: Int): MultikTensor<T> = asMultik().array.mutableView(i).wrap()
|
||||
|
||||
override fun Tensor<T>.transposed(i: Int, j: Int): MultikTensor<T> = asMultik().array.transpose(i, j).wrap()
|
||||
override fun StructureND<T>.transposed(i: Int, j: Int): MultikTensor<T> = asMultik().array.transpose(i, j).wrap()
|
||||
|
||||
override fun Tensor<T>.view(shape: IntArray): MultikTensor<T> {
|
||||
require(shape.all { it > 0 })
|
||||
require(shape.fold(1, Int::times) == this.shape.size) {
|
||||
override fun Tensor<T>.view(shape: Shape): MultikTensor<T> {
|
||||
require(shape.asList().all { it > 0 })
|
||||
require(shape.linearSize == this.shape.size) {
|
||||
"Cannot reshape array of size ${this.shape.size} into a new shape ${
|
||||
shape.joinToString(
|
||||
shape.asList().joinToString(
|
||||
prefix = "(",
|
||||
postfix = ")"
|
||||
)
|
||||
@ -215,10 +223,11 @@ public abstract class MultikTensorAlgebra<T, A : Ring<T>>(
|
||||
}
|
||||
|
||||
val mt = asMultik().array
|
||||
return if (mt.shape.contentEquals(shape)) {
|
||||
return if (Shape(mt.shape).contentEquals(shape)) {
|
||||
mt
|
||||
} else {
|
||||
NDArray(mt.data, mt.offset, shape, dim = DN(shape.size), base = mt.base ?: mt)
|
||||
@OptIn(UnsafeKMathAPI::class)
|
||||
NDArray(mt.data, mt.offset, shape.asArray(), dim = DN(shape.size), base = mt.base ?: mt)
|
||||
}.wrap()
|
||||
}
|
||||
|
||||
@ -241,7 +250,7 @@ public abstract class MultikTensorAlgebra<T, A : Ring<T>>(
|
||||
TODO("Not implemented for broadcasting")
|
||||
}
|
||||
|
||||
override fun diagonalEmbedding(diagonalEntries: Tensor<T>, offset: Int, dim1: Int, dim2: Int): MultikTensor<T> {
|
||||
override fun diagonalEmbedding(diagonalEntries: StructureND<T>, offset: Int, dim1: Int, dim2: Int): MultikTensor<T> {
|
||||
|
||||
TODO("Diagonal embedding not implemented")
|
||||
}
|
||||
@ -284,8 +293,9 @@ public abstract class MultikDivisionTensorAlgebra<T, A : Field<T>>(
|
||||
multikEngine: Engine,
|
||||
) : MultikTensorAlgebra<T, A>(multikEngine), TensorPartialDivisionAlgebra<T, A> where T : Number, T : Comparable<T> {
|
||||
|
||||
@OptIn(UnsafeKMathAPI::class)
|
||||
override fun T.div(arg: StructureND<T>): MultikTensor<T> =
|
||||
Multik.ones<T, DN>(arg.shape, type).apply { divAssign(arg.asMultik().array) }.wrap()
|
||||
Multik.ones<T, DN>(arg.shape.asArray(), type).apply { divAssign(arg.asMultik().array) }.wrap()
|
||||
|
||||
override fun StructureND<T>.div(arg: T): MultikTensor<T> =
|
||||
asMultik().array.div(arg).wrap()
|
||||
@ -301,6 +311,7 @@ public abstract class MultikDivisionTensorAlgebra<T, A : Field<T>>(
|
||||
}
|
||||
}
|
||||
|
||||
@OptIn(PerformancePitfall::class)
|
||||
override fun Tensor<T>.divAssign(arg: StructureND<T>) {
|
||||
if (this is MultikTensor) {
|
||||
array.divAssign(arg.asMultik().array)
|
||||
|
@ -7,6 +7,7 @@ package space.kscience.kmath.multik
|
||||
|
||||
import org.jetbrains.kotlinx.multik.default.DefaultEngine
|
||||
import space.kscience.kmath.misc.PerformancePitfall
|
||||
import space.kscience.kmath.nd.Shape
|
||||
import space.kscience.kmath.nd.StructureND
|
||||
import space.kscience.kmath.nd.one
|
||||
import space.kscience.kmath.operations.DoubleField
|
||||
@ -28,8 +29,8 @@ internal class MultikNDTest {
|
||||
fun dotResult() {
|
||||
val dim = 100
|
||||
|
||||
val tensor1 = DoubleTensorAlgebra.randomNormal(shape = intArrayOf(dim, dim), 12224)
|
||||
val tensor2 = DoubleTensorAlgebra.randomNormal(shape = intArrayOf(dim, dim), 12225)
|
||||
val tensor1 = DoubleTensorAlgebra.randomNormal(shape = Shape(dim, dim), 12224)
|
||||
val tensor2 = DoubleTensorAlgebra.randomNormal(shape = Shape(dim, dim), 12225)
|
||||
|
||||
val multikResult = with(multikAlgebra) {
|
||||
tensor1 dot tensor2
|
||||
|
@ -11,6 +11,7 @@ import org.nd4j.linalg.api.ops.impl.transforms.strict.ASinh
|
||||
import org.nd4j.linalg.factory.Nd4j
|
||||
import org.nd4j.linalg.ops.transforms.Transforms
|
||||
import space.kscience.kmath.misc.PerformancePitfall
|
||||
import space.kscience.kmath.misc.UnsafeKMathAPI
|
||||
import space.kscience.kmath.misc.UnstableKMathAPI
|
||||
import space.kscience.kmath.nd.*
|
||||
import space.kscience.kmath.operations.*
|
||||
@ -33,7 +34,8 @@ public sealed interface Nd4jArrayAlgebra<T, out C : Algebra<T>> : AlgebraND<T, C
|
||||
public val StructureND<T>.ndArray: INDArray
|
||||
|
||||
override fun structureND(shape: Shape, initializer: C.(IntArray) -> T): Nd4jArrayStructure<T> {
|
||||
val struct = Nd4j.create(*shape)!!.wrap()
|
||||
@OptIn(UnsafeKMathAPI::class)
|
||||
val struct: Nd4jArrayStructure<T> = Nd4j.create(*shape.asArray())!!.wrap()
|
||||
struct.indicesIterator().forEach { struct[it] = elementAlgebra.initializer(it) }
|
||||
return struct
|
||||
}
|
||||
@ -45,23 +47,23 @@ public sealed interface Nd4jArrayAlgebra<T, out C : Algebra<T>> : AlgebraND<T, C
|
||||
return newStruct
|
||||
}
|
||||
|
||||
@OptIn(PerformancePitfall::class)
|
||||
@OptIn(PerformancePitfall::class, UnsafeKMathAPI::class)
|
||||
override fun StructureND<T>.mapIndexed(
|
||||
transform: C.(index: IntArray, T) -> T,
|
||||
): Nd4jArrayStructure<T> {
|
||||
val new = Nd4j.create(*shape).wrap()
|
||||
val new = Nd4j.create(*shape.asArray()).wrap()
|
||||
new.indicesIterator().forEach { idx -> new[idx] = elementAlgebra.transform(idx, this[idx]) }
|
||||
return new
|
||||
}
|
||||
|
||||
@OptIn(PerformancePitfall::class)
|
||||
@OptIn(PerformancePitfall::class, UnsafeKMathAPI::class)
|
||||
override fun zip(
|
||||
left: StructureND<T>,
|
||||
right: StructureND<T>,
|
||||
transform: C.(T, T) -> T,
|
||||
): Nd4jArrayStructure<T> {
|
||||
require(left.shape.contentEquals(right.shape)) { "Can't zip tow structures of shape ${left.shape} and ${right.shape}" }
|
||||
val new = Nd4j.create(*left.shape).wrap()
|
||||
val new = Nd4j.create(*left.shape.asArray()).wrap()
|
||||
new.indicesIterator().forEach { idx -> new[idx] = elementAlgebra.transform(left[idx], right[idx]) }
|
||||
return new
|
||||
}
|
||||
@ -192,11 +194,11 @@ public open class DoubleNd4jArrayFieldOps : Nd4jArrayExtendedFieldOps<Double, Do
|
||||
|
||||
override fun INDArray.wrap(): Nd4jArrayStructure<Double> = asDoubleStructure()
|
||||
|
||||
@OptIn(PerformancePitfall::class)
|
||||
@OptIn(PerformancePitfall::class, UnsafeKMathAPI::class)
|
||||
override val StructureND<Double>.ndArray: INDArray
|
||||
get() = when (this) {
|
||||
is Nd4jArrayStructure<Double> -> ndArray
|
||||
else -> Nd4j.zeros(*shape).also {
|
||||
else -> Nd4j.zeros(*shape.asArray()).also {
|
||||
elements().forEach { (idx, value) -> it.putScalar(idx, value) }
|
||||
}
|
||||
}
|
||||
@ -225,7 +227,7 @@ public val DoubleField.nd4j: DoubleNd4jArrayFieldOps get() = DoubleNd4jArrayFiel
|
||||
public class DoubleNd4jArrayField(override val shape: Shape) : DoubleNd4jArrayFieldOps(), FieldND<Double, DoubleField>
|
||||
|
||||
public fun DoubleField.nd4j(shapeFirst: Int, vararg shapeRest: Int): DoubleNd4jArrayField =
|
||||
DoubleNd4jArrayField(intArrayOf(shapeFirst, * shapeRest))
|
||||
DoubleNd4jArrayField(Shape(shapeFirst, * shapeRest))
|
||||
|
||||
|
||||
/**
|
||||
@ -236,11 +238,11 @@ public open class FloatNd4jArrayFieldOps : Nd4jArrayExtendedFieldOps<Float, Floa
|
||||
|
||||
override fun INDArray.wrap(): Nd4jArrayStructure<Float> = asFloatStructure()
|
||||
|
||||
@OptIn(PerformancePitfall::class)
|
||||
@OptIn(PerformancePitfall::class, UnsafeKMathAPI::class)
|
||||
override val StructureND<Float>.ndArray: INDArray
|
||||
get() = when (this) {
|
||||
is Nd4jArrayStructure<Float> -> ndArray
|
||||
else -> Nd4j.zeros(*shape).also {
|
||||
else -> Nd4j.zeros(*shape.asArray()).also {
|
||||
elements().forEach { (idx, value) -> it.putScalar(idx, value) }
|
||||
}
|
||||
}
|
||||
@ -274,7 +276,7 @@ public class FloatNd4jArrayField(override val shape: Shape) : FloatNd4jArrayFiel
|
||||
public val FloatField.nd4j: FloatNd4jArrayFieldOps get() = FloatNd4jArrayFieldOps
|
||||
|
||||
public fun FloatField.nd4j(shapeFirst: Int, vararg shapeRest: Int): FloatNd4jArrayField =
|
||||
FloatNd4jArrayField(intArrayOf(shapeFirst, * shapeRest))
|
||||
FloatNd4jArrayField(Shape(shapeFirst, * shapeRest))
|
||||
|
||||
/**
|
||||
* Represents [RingND] over [Nd4jArrayIntStructure].
|
||||
@ -284,11 +286,11 @@ public open class IntNd4jArrayRingOps : Nd4jArrayRingOps<Int, IntRing> {
|
||||
|
||||
override fun INDArray.wrap(): Nd4jArrayStructure<Int> = asIntStructure()
|
||||
|
||||
@OptIn(PerformancePitfall::class)
|
||||
@OptIn(PerformancePitfall::class, UnsafeKMathAPI::class)
|
||||
override val StructureND<Int>.ndArray: INDArray
|
||||
get() = when (this) {
|
||||
is Nd4jArrayStructure<Int> -> ndArray
|
||||
else -> Nd4j.zeros(*shape).also {
|
||||
else -> Nd4j.zeros(*shape.asArray()).also {
|
||||
elements().forEach { (idx, value) -> it.putScalar(idx, value) }
|
||||
}
|
||||
}
|
||||
@ -313,4 +315,4 @@ public val IntRing.nd4j: IntNd4jArrayRingOps get() = IntNd4jArrayRingOps
|
||||
public class IntNd4jArrayRing(override val shape: Shape) : IntNd4jArrayRingOps(), RingND<Int, IntRing>
|
||||
|
||||
public fun IntRing.nd4j(shapeFirst: Int, vararg shapeRest: Int): IntNd4jArrayRing =
|
||||
IntNd4jArrayRing(intArrayOf(shapeFirst, * shapeRest))
|
||||
IntNd4jArrayRing(Shape(shapeFirst, * shapeRest))
|
@ -7,8 +7,7 @@ package space.kscience.kmath.nd4j
|
||||
|
||||
import org.nd4j.linalg.api.ndarray.INDArray
|
||||
import space.kscience.kmath.misc.PerformancePitfall
|
||||
import space.kscience.kmath.nd.MutableStructureND
|
||||
import space.kscience.kmath.nd.StructureND
|
||||
import space.kscience.kmath.nd.*
|
||||
|
||||
/**
|
||||
* Represents a [StructureND] wrapping an [INDArray] object.
|
||||
@ -22,7 +21,7 @@ public sealed class Nd4jArrayStructure<T> : MutableStructureND<T> {
|
||||
*/
|
||||
public abstract val ndArray: INDArray
|
||||
|
||||
override val shape: IntArray get() = ndArray.shape().toIntArray()
|
||||
override val shape: Shape get() = Shape(ndArray.shape().toIntArray())
|
||||
|
||||
internal abstract fun elementsIterator(): Iterator<Pair<IntArray, T>>
|
||||
internal fun indicesIterator(): Iterator<IntArray> = ndArray.indicesIterator()
|
||||
@ -31,20 +30,31 @@ public sealed class Nd4jArrayStructure<T> : MutableStructureND<T> {
|
||||
override fun elements(): Sequence<Pair<IntArray, T>> = Sequence(::elementsIterator)
|
||||
}
|
||||
|
||||
private data class Nd4jArrayIntStructure(override val ndArray: INDArray) : Nd4jArrayStructure<Int>() {
|
||||
public data class Nd4jArrayIntStructure(override val ndArray: INDArray) : Nd4jArrayStructure<Int>(), StructureNDOfInt {
|
||||
override fun elementsIterator(): Iterator<Pair<IntArray, Int>> = ndArray.intIterator()
|
||||
|
||||
@OptIn(PerformancePitfall::class)
|
||||
override fun get(index: IntArray): Int = ndArray.getInt(*index)
|
||||
|
||||
override fun getInt(index: IntArray): Int = ndArray.getInt(*index)
|
||||
|
||||
@OptIn(PerformancePitfall::class)
|
||||
override fun set(index: IntArray, value: Int): Unit = run { ndArray.putScalar(index, value) }
|
||||
}
|
||||
|
||||
/**
|
||||
* Wraps this [INDArray] to [Nd4jArrayStructure].
|
||||
*/
|
||||
public fun INDArray.asIntStructure(): Nd4jArrayStructure<Int> = Nd4jArrayIntStructure(this)
|
||||
public fun INDArray.asIntStructure(): Nd4jArrayIntStructure = Nd4jArrayIntStructure(this)
|
||||
|
||||
private data class Nd4jArrayDoubleStructure(override val ndArray: INDArray) : Nd4jArrayStructure<Double>() {
|
||||
public data class Nd4jArrayDoubleStructure(override val ndArray: INDArray) : Nd4jArrayStructure<Double>(), StructureNDOfDouble {
|
||||
override fun elementsIterator(): Iterator<Pair<IntArray, Double>> = ndArray.realIterator()
|
||||
@OptIn(PerformancePitfall::class)
|
||||
override fun get(index: IntArray): Double = ndArray.getDouble(*index)
|
||||
|
||||
override fun getDouble(index: IntArray): Double = ndArray.getDouble(*index)
|
||||
|
||||
@OptIn(PerformancePitfall::class)
|
||||
override fun set(index: IntArray, value: Double): Unit = run { ndArray.putScalar(index, value) }
|
||||
}
|
||||
|
||||
@ -53,9 +63,12 @@ private data class Nd4jArrayDoubleStructure(override val ndArray: INDArray) : Nd
|
||||
*/
|
||||
public fun INDArray.asDoubleStructure(): Nd4jArrayStructure<Double> = Nd4jArrayDoubleStructure(this)
|
||||
|
||||
private data class Nd4jArrayFloatStructure(override val ndArray: INDArray) : Nd4jArrayStructure<Float>() {
|
||||
public data class Nd4jArrayFloatStructure(override val ndArray: INDArray) : Nd4jArrayStructure<Float>() {
|
||||
override fun elementsIterator(): Iterator<Pair<IntArray, Float>> = ndArray.floatIterator()
|
||||
@PerformancePitfall
|
||||
override fun get(index: IntArray): Float = ndArray.getFloat(*index)
|
||||
|
||||
@PerformancePitfall
|
||||
override fun set(index: IntArray, value: Float): Unit = run { ndArray.putScalar(index, value) }
|
||||
}
|
||||
|
||||
|
@ -13,9 +13,8 @@ import org.nd4j.linalg.factory.Nd4j
|
||||
import org.nd4j.linalg.factory.ops.NDBase
|
||||
import org.nd4j.linalg.ops.transforms.Transforms
|
||||
import space.kscience.kmath.misc.PerformancePitfall
|
||||
import space.kscience.kmath.nd.ColumnStrides
|
||||
import space.kscience.kmath.nd.Shape
|
||||
import space.kscience.kmath.nd.StructureND
|
||||
import space.kscience.kmath.misc.UnsafeKMathAPI
|
||||
import space.kscience.kmath.nd.*
|
||||
import space.kscience.kmath.operations.DoubleField
|
||||
import space.kscience.kmath.operations.Field
|
||||
import space.kscience.kmath.tensors.api.AnalyticTensorAlgebra
|
||||
@ -96,7 +95,7 @@ public sealed interface Nd4jTensorAlgebra<T : Number, A : Field<T>> : AnalyticTe
|
||||
|
||||
override fun StructureND<T>.unaryMinus(): Nd4jArrayStructure<T> = ndArray.neg().wrap()
|
||||
override fun Tensor<T>.getTensor(i: Int): Nd4jArrayStructure<T> = ndArray.slice(i.toLong()).wrap()
|
||||
override fun Tensor<T>.transposed(i: Int, j: Int): Nd4jArrayStructure<T> = ndArray.swapAxes(i, j).wrap()
|
||||
override fun StructureND<T>.transposed(i: Int, j: Int): Nd4jArrayStructure<T> = ndArray.swapAxes(i, j).wrap()
|
||||
override fun StructureND<T>.dot(other: StructureND<T>): Nd4jArrayStructure<T> = ndArray.mmul(other.ndArray).wrap()
|
||||
|
||||
override fun StructureND<T>.min(dim: Int, keepDim: Boolean): Nd4jArrayStructure<T> =
|
||||
@ -108,7 +107,9 @@ public sealed interface Nd4jTensorAlgebra<T : Number, A : Field<T>> : AnalyticTe
|
||||
override fun StructureND<T>.max(dim: Int, keepDim: Boolean): Nd4jArrayStructure<T> =
|
||||
ndArray.max(keepDim, dim).wrap()
|
||||
|
||||
override fun Tensor<T>.view(shape: IntArray): Nd4jArrayStructure<T> = ndArray.reshape(shape).wrap()
|
||||
@OptIn(UnsafeKMathAPI::class)
|
||||
override fun Tensor<T>.view(shape: Shape): Nd4jArrayStructure<T> = ndArray.reshape(shape.asArray()).wrap()
|
||||
|
||||
override fun Tensor<T>.viewAs(other: StructureND<T>): Nd4jArrayStructure<T> = view(other.shape)
|
||||
|
||||
override fun StructureND<T>.argMin(dim: Int, keepDim: Boolean): Tensor<Int> =
|
||||
@ -176,8 +177,9 @@ public object DoubleNd4jTensorAlgebra : Nd4jTensorAlgebra<Double, DoubleField> {
|
||||
|
||||
override fun INDArray.wrap(): Nd4jArrayStructure<Double> = asDoubleStructure()
|
||||
|
||||
@OptIn(UnsafeKMathAPI::class)
|
||||
override fun structureND(shape: Shape, initializer: DoubleField.(IntArray) -> Double): Nd4jArrayStructure<Double> {
|
||||
val array: INDArray = Nd4j.zeros(*shape)
|
||||
val array: INDArray = Nd4j.zeros(*shape.asArray())
|
||||
val indices = ColumnStrides(shape)
|
||||
indices.asSequence().forEach { index ->
|
||||
array.putScalar(index, elementAlgebra.initializer(index))
|
||||
@ -186,21 +188,21 @@ public object DoubleNd4jTensorAlgebra : Nd4jTensorAlgebra<Double, DoubleField> {
|
||||
}
|
||||
|
||||
|
||||
@OptIn(PerformancePitfall::class)
|
||||
@OptIn(PerformancePitfall::class, UnsafeKMathAPI::class)
|
||||
override val StructureND<Double>.ndArray: INDArray
|
||||
get() = when (this) {
|
||||
is Nd4jArrayStructure<Double> -> ndArray
|
||||
else -> Nd4j.zeros(*shape).also {
|
||||
else -> Nd4j.zeros(*shape.asArray()).also {
|
||||
elements().forEach { (idx, value) -> it.putScalar(idx, value) }
|
||||
}
|
||||
}
|
||||
|
||||
override fun StructureND<Double>.valueOrNull(): Double? =
|
||||
if (shape contentEquals intArrayOf(1)) ndArray.getDouble(0) else null
|
||||
if (shape contentEquals Shape(1)) ndArray.getDouble(0) else null
|
||||
|
||||
// TODO rewrite
|
||||
override fun diagonalEmbedding(
|
||||
diagonalEntries: Tensor<Double>,
|
||||
diagonalEntries: StructureND<Double>,
|
||||
offset: Int,
|
||||
dim1: Int,
|
||||
dim2: Int,
|
||||
|
@ -7,6 +7,7 @@ package space.kscience.kmath.nd4j
|
||||
|
||||
import org.nd4j.linalg.factory.Nd4j
|
||||
import space.kscience.kmath.misc.PerformancePitfall
|
||||
import space.kscience.kmath.nd.asList
|
||||
import space.kscience.kmath.nd.get
|
||||
import kotlin.test.Test
|
||||
import kotlin.test.assertEquals
|
||||
@ -27,7 +28,7 @@ internal class Nd4jArrayStructureTest {
|
||||
fun testShape() {
|
||||
val nd = Nd4j.rand(10, 2, 3, 6) ?: fail()
|
||||
val struct = nd.asDoubleStructure()
|
||||
assertEquals(intArrayOf(10, 2, 3, 6).toList(), struct.shape.toList())
|
||||
assertEquals(intArrayOf(10, 2, 3, 6).toList(), struct.shape.asList())
|
||||
}
|
||||
|
||||
@Test
|
||||
|
@ -28,6 +28,8 @@ public class DoubleTensorFlowOutput(
|
||||
|
||||
}
|
||||
|
||||
internal fun Shape.toLongArray(): LongArray = LongArray(size) { get(it).toLong() }
|
||||
|
||||
public class DoubleTensorFlowAlgebra internal constructor(
|
||||
graph: Graph,
|
||||
) : TensorFlowAlgebra<Double, TFloat64, DoubleField>(graph), PowerOperations<StructureND<Double>> {
|
||||
|
@ -18,9 +18,12 @@ import org.tensorflow.types.TInt32
|
||||
import org.tensorflow.types.family.TNumber
|
||||
import org.tensorflow.types.family.TType
|
||||
import space.kscience.kmath.misc.PerformancePitfall
|
||||
import space.kscience.kmath.misc.UnsafeKMathAPI
|
||||
import space.kscience.kmath.misc.UnstableKMathAPI
|
||||
import space.kscience.kmath.nd.Shape
|
||||
import space.kscience.kmath.nd.StructureND
|
||||
import space.kscience.kmath.nd.asArray
|
||||
import space.kscience.kmath.nd.contentEquals
|
||||
import space.kscience.kmath.operations.Ring
|
||||
import space.kscience.kmath.tensors.api.Tensor
|
||||
import space.kscience.kmath.tensors.api.TensorAlgebra
|
||||
@ -38,7 +41,7 @@ public sealed interface TensorFlowTensor<T> : Tensor<T>
|
||||
*/
|
||||
@JvmInline
|
||||
public value class TensorFlowArray<T>(public val tensor: NdArray<T>) : Tensor<T> {
|
||||
override val shape: Shape get() = tensor.shape().asArray().toIntArray()
|
||||
override val shape: Shape get() = Shape(tensor.shape().asArray().toIntArray())
|
||||
|
||||
override fun get(index: IntArray): T = tensor.getObject(*index.toLongArray())
|
||||
|
||||
@ -62,7 +65,7 @@ public abstract class TensorFlowOutput<T, TT : TType>(
|
||||
public var output: Output<TT> = output
|
||||
internal set
|
||||
|
||||
override val shape: Shape get() = output.shape().asArray().toIntArray()
|
||||
override val shape: Shape get() = Shape(output.shape().asArray().toIntArray())
|
||||
|
||||
protected abstract fun org.tensorflow.Tensor.actualizeTensor(): NdArray<T>
|
||||
|
||||
@ -96,8 +99,8 @@ public abstract class TensorFlowAlgebra<T, TT : TNumber, A : Ring<T>> internal c
|
||||
|
||||
protected abstract fun const(value: T): Constant<TT>
|
||||
|
||||
override fun StructureND<T>.valueOrNull(): T? = if (shape contentEquals intArrayOf(1))
|
||||
get(Shape(0)) else null
|
||||
override fun StructureND<T>.valueOrNull(): T? = if (shape contentEquals Shape(1))
|
||||
get(intArrayOf(0)) else null
|
||||
|
||||
/**
|
||||
* Perform binary lazy operation on tensor. Both arguments are implicitly converted
|
||||
@ -188,12 +191,13 @@ public abstract class TensorFlowAlgebra<T, TT : TNumber, A : Ring<T>> internal c
|
||||
StridedSliceHelper.stridedSlice(ops.scope(), it, Indices.at(i.toLong()))
|
||||
}
|
||||
|
||||
override fun Tensor<T>.transposed(i: Int, j: Int): Tensor<T> = operate {
|
||||
override fun StructureND<T>.transposed(i: Int, j: Int): Tensor<T> = operate {
|
||||
ops.linalg.transpose(it, ops.constant(intArrayOf(i, j)))
|
||||
}
|
||||
|
||||
override fun Tensor<T>.view(shape: IntArray): Tensor<T> = operate {
|
||||
ops.reshape(it, ops.constant(shape))
|
||||
override fun Tensor<T>.view(shape: Shape): Tensor<T> = operate {
|
||||
@OptIn(UnsafeKMathAPI::class)
|
||||
ops.reshape(it, ops.constant(shape.asArray()))
|
||||
}
|
||||
|
||||
override fun Tensor<T>.viewAs(other: StructureND<T>): Tensor<T> = operate(other) { l, r ->
|
||||
@ -208,7 +212,7 @@ public abstract class TensorFlowAlgebra<T, TT : TNumber, A : Ring<T>> internal c
|
||||
}
|
||||
|
||||
override fun diagonalEmbedding(
|
||||
diagonalEntries: Tensor<T>,
|
||||
diagonalEntries: StructureND<T>,
|
||||
offset: Int,
|
||||
dim1: Int,
|
||||
dim2: Int,
|
||||
|
@ -7,6 +7,7 @@ package space.kscience.kmath.tensorflow
|
||||
|
||||
import org.junit.jupiter.api.Test
|
||||
import space.kscience.kmath.misc.UnstableKMathAPI
|
||||
import space.kscience.kmath.nd.Shape
|
||||
import space.kscience.kmath.nd.get
|
||||
import space.kscience.kmath.nd.structureND
|
||||
import space.kscience.kmath.operations.DoubleField
|
||||
@ -31,8 +32,8 @@ class DoubleTensorFlowOps {
|
||||
fun dot(){
|
||||
val dim = 1000
|
||||
|
||||
val tensor1 = DoubleTensorAlgebra.randomNormal(shape = intArrayOf(dim, dim), 12224)
|
||||
val tensor2 = DoubleTensorAlgebra.randomNormal(shape = intArrayOf(dim, dim), 12225)
|
||||
val tensor1 = DoubleTensorAlgebra.randomNormal(shape = Shape(dim, dim), 12224)
|
||||
val tensor2 = DoubleTensorAlgebra.randomNormal(shape = Shape(dim, dim), 12225)
|
||||
|
||||
DoubleField.produceWithTF {
|
||||
tensor1 dot tensor2
|
||||
|
@ -21,7 +21,7 @@ public interface LinearOpsTensorAlgebra<T, A : Field<T>> : TensorPartialDivision
|
||||
*
|
||||
* @return the determinant.
|
||||
*/
|
||||
public fun StructureND<T>.det(): Tensor<T>
|
||||
public fun StructureND<T>.det(): StructureND<T>
|
||||
|
||||
/**
|
||||
* Computes the multiplicative inverse matrix of a square matrix input, or of each square matrix in a batched input.
|
||||
@ -31,7 +31,7 @@ public interface LinearOpsTensorAlgebra<T, A : Field<T>> : TensorPartialDivision
|
||||
*
|
||||
* @return the multiplicative inverse of a matrix.
|
||||
*/
|
||||
public fun StructureND<T>.inv(): Tensor<T>
|
||||
public fun StructureND<T>.inv(): StructureND<T>
|
||||
|
||||
/**
|
||||
* Cholesky decomposition.
|
||||
@ -47,7 +47,7 @@ public interface LinearOpsTensorAlgebra<T, A : Field<T>> : TensorPartialDivision
|
||||
* @receiver the `input`.
|
||||
* @return the batch of `L` matrices.
|
||||
*/
|
||||
public fun StructureND<T>.cholesky(): Tensor<T>
|
||||
public fun StructureND<T>.cholesky(): StructureND<T>
|
||||
|
||||
/**
|
||||
* QR decomposition.
|
||||
@ -61,7 +61,7 @@ public interface LinearOpsTensorAlgebra<T, A : Field<T>> : TensorPartialDivision
|
||||
* @receiver the `input`.
|
||||
* @return pair of `Q` and `R` tensors.
|
||||
*/
|
||||
public fun StructureND<T>.qr(): Pair<Tensor<T>, Tensor<T>>
|
||||
public fun StructureND<T>.qr(): Pair<StructureND<T>, StructureND<T>>
|
||||
|
||||
/**
|
||||
* LUP decomposition
|
||||
@ -75,7 +75,7 @@ public interface LinearOpsTensorAlgebra<T, A : Field<T>> : TensorPartialDivision
|
||||
* @receiver the `input`.
|
||||
* @return triple of P, L and U tensors
|
||||
*/
|
||||
public fun StructureND<T>.lu(): Triple<Tensor<T>, Tensor<T>, Tensor<T>>
|
||||
public fun StructureND<T>.lu(): Triple<StructureND<T>, StructureND<T>, StructureND<T>>
|
||||
|
||||
/**
|
||||
* Singular Value Decomposition.
|
||||
@ -91,7 +91,7 @@ public interface LinearOpsTensorAlgebra<T, A : Field<T>> : TensorPartialDivision
|
||||
* @receiver the `input`.
|
||||
* @return triple `Triple(U, S, V)`.
|
||||
*/
|
||||
public fun StructureND<T>.svd(): Triple<Tensor<T>, Tensor<T>, Tensor<T>>
|
||||
public fun StructureND<T>.svd(): Triple<StructureND<T>, StructureND<T>, StructureND<T>>
|
||||
|
||||
/**
|
||||
* Returns eigenvalues and eigenvectors of a real symmetric matrix `input` or a batch of real symmetric matrices,
|
||||
@ -101,6 +101,6 @@ public interface LinearOpsTensorAlgebra<T, A : Field<T>> : TensorPartialDivision
|
||||
* @receiver the `input`.
|
||||
* @return a pair `eigenvalues to eigenvectors`
|
||||
*/
|
||||
public fun StructureND<T>.symEig(): Pair<Tensor<T>, Tensor<T>>
|
||||
public fun StructureND<T>.symEig(): Pair<StructureND<T>, StructureND<T>>
|
||||
|
||||
}
|
||||
|
@ -6,6 +6,7 @@
|
||||
package space.kscience.kmath.tensors.api
|
||||
|
||||
import space.kscience.kmath.nd.RingOpsND
|
||||
import space.kscience.kmath.nd.Shape
|
||||
import space.kscience.kmath.nd.StructureND
|
||||
import space.kscience.kmath.operations.Ring
|
||||
|
||||
@ -176,11 +177,13 @@ public interface TensorAlgebra<T, A : Ring<T>> : RingOpsND<T, A> {
|
||||
* Returns a tensor that is a transposed version of this tensor. The given dimensions [i] and [j] are swapped.
|
||||
* For more information: https://pytorch.org/docs/stable/generated/torch.transpose.html
|
||||
*
|
||||
* If axis indices are negative, they are counted from shape end.
|
||||
*
|
||||
* @param i the first dimension to be transposed
|
||||
* @param j the second dimension to be transposed
|
||||
* @return transposed tensor
|
||||
*/
|
||||
public fun Tensor<T>.transposed(i: Int = -2, j: Int = -1): Tensor<T>
|
||||
public fun StructureND<T>.transposed(i: Int = shape.size - 2, j: Int = shape.size - 1): Tensor<T>
|
||||
|
||||
/**
|
||||
* Returns a new tensor with the same data as the self tensor but of a different shape.
|
||||
@ -190,7 +193,7 @@ public interface TensorAlgebra<T, A : Ring<T>> : RingOpsND<T, A> {
|
||||
* @param shape the desired size
|
||||
* @return tensor with new shape
|
||||
*/
|
||||
public fun Tensor<T>.view(shape: IntArray): Tensor<T>
|
||||
public fun Tensor<T>.view(shape: Shape): Tensor<T>
|
||||
|
||||
/**
|
||||
* View this tensor as the same size as [other].
|
||||
@ -248,7 +251,7 @@ public interface TensorAlgebra<T, A : Ring<T>> : RingOpsND<T, A> {
|
||||
* are filled by [diagonalEntries]
|
||||
*/
|
||||
public fun diagonalEmbedding(
|
||||
diagonalEntries: Tensor<T>,
|
||||
diagonalEntries: StructureND<T>,
|
||||
offset: Int = 0,
|
||||
dim1: Int = -2,
|
||||
dim2: Int = -1,
|
||||
|
@ -7,6 +7,7 @@ package space.kscience.kmath.tensors.core
|
||||
|
||||
import space.kscience.kmath.misc.PerformancePitfall
|
||||
import space.kscience.kmath.nd.RowStrides
|
||||
import space.kscience.kmath.nd.Shape
|
||||
import space.kscience.kmath.nd.Strides
|
||||
import space.kscience.kmath.structures.MutableBuffer
|
||||
import space.kscience.kmath.tensors.api.Tensor
|
||||
@ -15,7 +16,7 @@ import space.kscience.kmath.tensors.api.Tensor
|
||||
* Represents [Tensor] over a [MutableBuffer] intended to be used through [DoubleTensor] and [IntTensor]
|
||||
*/
|
||||
public abstract class BufferedTensor<T>(
|
||||
override val shape: IntArray,
|
||||
override val shape: Shape,
|
||||
) : Tensor<T> {
|
||||
|
||||
public abstract val source: MutableBuffer<T>
|
||||
|
@ -7,10 +7,7 @@ package space.kscience.kmath.tensors.core
|
||||
|
||||
import space.kscience.kmath.misc.PerformancePitfall
|
||||
import space.kscience.kmath.misc.UnstableKMathAPI
|
||||
import space.kscience.kmath.nd.MutableStructure2D
|
||||
import space.kscience.kmath.nd.MutableStructureND
|
||||
import space.kscience.kmath.nd.Shape
|
||||
import space.kscience.kmath.nd.Strides
|
||||
import space.kscience.kmath.nd.*
|
||||
import space.kscience.kmath.structures.*
|
||||
import space.kscience.kmath.tensors.core.internal.toPrettyString
|
||||
import kotlin.jvm.JvmInline
|
||||
@ -87,22 +84,30 @@ public inline fun OffsetDoubleBuffer.mapInPlace(operation: (Double) -> Double) {
|
||||
* [DoubleTensor] always uses row-based strides
|
||||
*/
|
||||
public class DoubleTensor(
|
||||
shape: IntArray,
|
||||
shape: Shape,
|
||||
override val source: OffsetDoubleBuffer,
|
||||
) : BufferedTensor<Double>(shape) {
|
||||
) : BufferedTensor<Double>(shape), MutableStructureNDOfDouble {
|
||||
|
||||
init {
|
||||
require(linearSize == source.size) { "Source buffer size must be equal tensor size" }
|
||||
}
|
||||
|
||||
public constructor(shape: IntArray, buffer: DoubleBuffer) : this(shape, OffsetDoubleBuffer(buffer, 0, buffer.size))
|
||||
public constructor(shape: Shape, buffer: DoubleBuffer) : this(shape, OffsetDoubleBuffer(buffer, 0, buffer.size))
|
||||
|
||||
override fun get(index: IntArray): Double = this.source[indices.offset(index)]
|
||||
|
||||
@OptIn(PerformancePitfall::class)
|
||||
override fun get(index: IntArray): Double = source[indices.offset(index)]
|
||||
|
||||
@OptIn(PerformancePitfall::class)
|
||||
override fun set(index: IntArray, value: Double) {
|
||||
source[indices.offset(index)] = value
|
||||
}
|
||||
|
||||
override fun getDouble(index: IntArray): Double = get(index)
|
||||
|
||||
override fun setDouble(index: IntArray, value: Double) {
|
||||
set(index, value)
|
||||
}
|
||||
|
||||
override fun toString(): String = toPrettyString()
|
||||
}
|
||||
@ -140,25 +145,26 @@ public value class DoubleTensor2D(public val tensor: DoubleTensor) : MutableStru
|
||||
|
||||
@PerformancePitfall
|
||||
override fun elements(): Sequence<Pair<IntArray, Double>> = tensor.elements()
|
||||
@OptIn(PerformancePitfall::class)
|
||||
override fun get(index: IntArray): Double = tensor[index]
|
||||
override val shape: Shape get() = tensor.shape
|
||||
}
|
||||
|
||||
public fun DoubleTensor.asDoubleTensor2D(): DoubleTensor2D = DoubleTensor2D(this)
|
||||
|
||||
public fun DoubleTensor.asDoubleBuffer(): OffsetDoubleBuffer = if(shape.size == 1){
|
||||
public fun DoubleTensor.asDoubleBuffer(): OffsetDoubleBuffer = if (shape.size == 1) {
|
||||
source
|
||||
} else {
|
||||
error("Only 1D tensors could be cast to 1D" )
|
||||
error("Only 1D tensors could be cast to 1D")
|
||||
}
|
||||
|
||||
public inline fun DoubleTensor.forEachMatrix(block: (index: IntArray, matrix: DoubleTensor2D) -> Unit) {
|
||||
val n = shape.size
|
||||
check(n >= 2) { "Expected tensor with 2 or more dimensions, got size $n" }
|
||||
val matrixOffset = shape[n - 1] * shape[n - 2]
|
||||
val matrixShape = intArrayOf(shape[n - 2], shape[n - 1])
|
||||
val matrixShape = Shape(shape[n - 2], shape[n - 1])
|
||||
|
||||
val size = Strides.linearSizeOf(matrixShape)
|
||||
val size = matrixShape.linearSize
|
||||
for (i in 0 until linearSize / matrixOffset) {
|
||||
val offset = i * matrixOffset
|
||||
val index = indices.index(offset).sliceArray(0 until (shape.size - 2))
|
||||
|
@ -11,7 +11,6 @@ package space.kscience.kmath.tensors.core
|
||||
import space.kscience.kmath.misc.PerformancePitfall
|
||||
import space.kscience.kmath.misc.UnstableKMathAPI
|
||||
import space.kscience.kmath.nd.*
|
||||
import space.kscience.kmath.nd.Strides.Companion.linearSizeOf
|
||||
import space.kscience.kmath.operations.DoubleField
|
||||
import space.kscience.kmath.structures.*
|
||||
import space.kscience.kmath.tensors.api.AnalyticTensorAlgebra
|
||||
@ -93,7 +92,7 @@ public open class DoubleTensorAlgebra :
|
||||
|
||||
override fun StructureND<Double>.valueOrNull(): Double? {
|
||||
val dt = asDoubleTensor()
|
||||
return if (dt.shape contentEquals intArrayOf(1)) dt.source[0] else null
|
||||
return if (dt.shape contentEquals Shape(1)) dt.source[0] else null
|
||||
}
|
||||
|
||||
override fun StructureND<Double>.value(): Double = valueOrNull()
|
||||
@ -106,7 +105,7 @@ public open class DoubleTensorAlgebra :
|
||||
* @param array one-dimensional data array.
|
||||
* @return tensor with the [shape] shape and [array] data.
|
||||
*/
|
||||
public fun fromArray(shape: IntArray, array: DoubleArray): DoubleTensor {
|
||||
public fun fromArray(shape: Shape, array: DoubleArray): DoubleTensor {
|
||||
checkNotEmptyShape(shape)
|
||||
checkEmptyDoubleBuffer(array)
|
||||
checkBufferShapeConsistency(shape, array)
|
||||
@ -120,18 +119,18 @@ public open class DoubleTensorAlgebra :
|
||||
* @param initializer mapping tensor indices to values.
|
||||
* @return tensor with the [shape] shape and data generated by the [initializer].
|
||||
*/
|
||||
override fun structureND(shape: IntArray, initializer: DoubleField.(IntArray) -> Double): DoubleTensor = fromArray(
|
||||
override fun structureND(shape: Shape, initializer: DoubleField.(IntArray) -> Double): DoubleTensor = fromArray(
|
||||
shape,
|
||||
RowStrides(shape).asSequence().map { DoubleField.initializer(it) }.toMutableList().toDoubleArray()
|
||||
)
|
||||
|
||||
override fun Tensor<Double>.getTensor(i: Int): DoubleTensor {
|
||||
val dt = asDoubleTensor()
|
||||
val lastShape = shape.drop(1).toIntArray()
|
||||
val newShape = if (lastShape.isNotEmpty()) lastShape else intArrayOf(1)
|
||||
val lastShape = shape.last(shape.size - 1)
|
||||
val newShape: Shape = if (lastShape.isNotEmpty()) lastShape else Shape(1)
|
||||
return DoubleTensor(
|
||||
newShape,
|
||||
dt.source.view(newShape.reduce(Int::times) * i, linearSizeOf(newShape))
|
||||
dt.source.view(newShape.linearSize * i, newShape.linearSize)
|
||||
)
|
||||
}
|
||||
|
||||
@ -142,9 +141,9 @@ public open class DoubleTensorAlgebra :
|
||||
* @param shape array of integers defining the shape of the output tensor.
|
||||
* @return tensor with the [shape] shape and filled with [value].
|
||||
*/
|
||||
public fun full(value: Double, shape: IntArray): DoubleTensor {
|
||||
public fun full(value: Double, shape: Shape): DoubleTensor {
|
||||
checkNotEmptyShape(shape)
|
||||
val buffer = DoubleBuffer(shape.reduce(Int::times)) { value }
|
||||
val buffer = DoubleBuffer(shape.linearSize) { value }
|
||||
return DoubleTensor(shape, buffer)
|
||||
}
|
||||
|
||||
@ -166,7 +165,7 @@ public open class DoubleTensorAlgebra :
|
||||
* @param shape array of integers defining the shape of the output tensor.
|
||||
* @return tensor filled with the scalar value `0.0`, with the [shape] shape.
|
||||
*/
|
||||
public fun zeros(shape: IntArray): DoubleTensor = full(0.0, shape)
|
||||
public fun zeros(shape: Shape): DoubleTensor = full(0.0, shape)
|
||||
|
||||
/**
|
||||
* Returns a tensor filled with the scalar value `0.0`, with the same shape as a given array.
|
||||
@ -181,7 +180,7 @@ public open class DoubleTensorAlgebra :
|
||||
* @param shape array of integers defining the shape of the output tensor.
|
||||
* @return tensor filled with the scalar value `1.0`, with the [shape] shape.
|
||||
*/
|
||||
public fun ones(shape: IntArray): DoubleTensor = full(1.0, shape)
|
||||
public fun ones(shape: Shape): DoubleTensor = full(1.0, shape)
|
||||
|
||||
/**
|
||||
* Returns a tensor filled with the scalar value `1.0`, with the same shape as a given array.
|
||||
@ -197,7 +196,7 @@ public open class DoubleTensorAlgebra :
|
||||
* @return a 2-D tensor with ones on the diagonal and zeros elsewhere.
|
||||
*/
|
||||
public fun eye(n: Int): DoubleTensor {
|
||||
val shape = intArrayOf(n, n)
|
||||
val shape = Shape(n, n)
|
||||
val buffer = DoubleBuffer(n * n) { 0.0 }
|
||||
val res = DoubleTensor(shape, buffer)
|
||||
for (i in 0 until n) {
|
||||
@ -235,7 +234,7 @@ public open class DoubleTensorAlgebra :
|
||||
|
||||
override fun Tensor<Double>.minusAssign(arg: StructureND<Double>) {
|
||||
checkShapesCompatible(this, arg)
|
||||
mapIndexedInPlace { index, value -> value - arg[index] }
|
||||
mapIndexedInPlace { index, value -> value - arg.getDouble(index) }
|
||||
}
|
||||
|
||||
override fun Double.times(arg: StructureND<Double>): DoubleTensor = arg.map { this@times * it }
|
||||
@ -270,32 +269,44 @@ public open class DoubleTensorAlgebra :
|
||||
|
||||
override fun StructureND<Double>.unaryMinus(): DoubleTensor = map { -it }
|
||||
|
||||
override fun Tensor<Double>.transposed(i: Int, j: Int): DoubleTensor {
|
||||
// TODO change strides instead of changing content
|
||||
val dt = asDoubleTensor()
|
||||
val ii = dt.minusIndex(i)
|
||||
val jj = dt.minusIndex(j)
|
||||
checkTranspose(dt.dimension, ii, jj)
|
||||
val n = dt.linearSize
|
||||
val resBuffer = DoubleArray(n)
|
||||
|
||||
val resShape = dt.shape.copyOf()
|
||||
resShape[ii] = resShape[jj].also { resShape[jj] = resShape[ii] }
|
||||
|
||||
val resTensor = DoubleTensor(resShape, resBuffer.asBuffer())
|
||||
|
||||
for (offset in 0 until n) {
|
||||
val oldMultiIndex = dt.indices.index(offset)
|
||||
val newMultiIndex = oldMultiIndex.copyOf()
|
||||
newMultiIndex[ii] = newMultiIndex[jj].also { newMultiIndex[jj] = newMultiIndex[ii] }
|
||||
|
||||
val linearIndex = resTensor.indices.offset(newMultiIndex)
|
||||
resTensor.source[linearIndex] = dt.source[offset]
|
||||
override fun StructureND<Double>.transposed(i: Int, j: Int): Tensor<Double> {
|
||||
val actualI = if (i >= 0) i else shape.size + i
|
||||
val actualJ = if(j>=0) j else shape.size + j
|
||||
return asDoubleTensor().permute(
|
||||
shape.transposed(actualI, actualJ)
|
||||
) { originIndex ->
|
||||
originIndex.copyOf().apply {
|
||||
val ith = get(actualI)
|
||||
val jth = get(actualJ)
|
||||
set(actualI, jth)
|
||||
set(actualJ, ith)
|
||||
}
|
||||
}
|
||||
return resTensor
|
||||
// // TODO change strides instead of changing content
|
||||
// val dt = asDoubleTensor()
|
||||
// val ii = dt.minusIndex(i)
|
||||
// val jj = dt.minusIndex(j)
|
||||
// checkTranspose(dt.dimension, ii, jj)
|
||||
// val n = dt.linearSize
|
||||
// val resBuffer = DoubleArray(n)
|
||||
//
|
||||
// val resShape = dt.shape.copyOf()
|
||||
// resShape[ii] = resShape[jj].also { resShape[jj] = resShape[ii] }
|
||||
//
|
||||
// val resTensor = DoubleTensor(resShape, resBuffer.asBuffer())
|
||||
//
|
||||
// for (offset in 0 until n) {
|
||||
// val oldMultiIndex = dt.indices.index(offset)
|
||||
// val newMultiIndex = oldMultiIndex.copyOf()
|
||||
// newMultiIndex[ii] = newMultiIndex[jj].also { newMultiIndex[jj] = newMultiIndex[ii] }
|
||||
//
|
||||
// val linearIndex = resTensor.indices.offset(newMultiIndex)
|
||||
// resTensor.source[linearIndex] = dt.source[offset]
|
||||
// }
|
||||
// return resTensor
|
||||
}
|
||||
|
||||
override fun Tensor<Double>.view(shape: IntArray): DoubleTensor {
|
||||
override fun Tensor<Double>.view(shape: Shape): DoubleTensor {
|
||||
checkView(asDoubleTensor(), shape)
|
||||
return DoubleTensor(shape, asDoubleTensor().source)
|
||||
}
|
||||
@ -335,7 +346,7 @@ public open class DoubleTensorAlgebra :
|
||||
@UnstableKMathAPI
|
||||
public infix fun StructureND<Double>.matmul(other: StructureND<Double>): DoubleTensor {
|
||||
if (shape.size == 1 && other.shape.size == 1) {
|
||||
return DoubleTensor(intArrayOf(1), DoubleBuffer(times(other).sum()))
|
||||
return DoubleTensor(Shape(1), DoubleBuffer(times(other).sum()))
|
||||
}
|
||||
|
||||
var penultimateDim = false
|
||||
@ -347,7 +358,7 @@ public open class DoubleTensorAlgebra :
|
||||
|
||||
if (shape.size == 1) {
|
||||
penultimateDim = true
|
||||
newThis = newThis.view(intArrayOf(1) + shape)
|
||||
newThis = newThis.view(Shape(1) + shape)
|
||||
}
|
||||
|
||||
if (other.shape.size == 1) {
|
||||
@ -367,8 +378,8 @@ public open class DoubleTensorAlgebra :
|
||||
"Tensors dot operation dimension mismatch: ($l, $m1) x ($m2, $n)"
|
||||
}
|
||||
|
||||
val resShape = newThis.shape.sliceArray(0..(newThis.shape.size - 2)) + intArrayOf(newOther.shape.last())
|
||||
val resSize = resShape.reduce { acc, i -> acc * i }
|
||||
val resShape = newThis.shape.slice(0..(newThis.shape.size - 2)) + intArrayOf(newOther.shape.last())
|
||||
val resSize = resShape.linearSize
|
||||
val resTensor = DoubleTensor(resShape, DoubleArray(resSize).asBuffer())
|
||||
|
||||
val resMatrices = resTensor.matrices
|
||||
@ -385,9 +396,9 @@ public open class DoubleTensorAlgebra :
|
||||
// }
|
||||
|
||||
return if (penultimateDim) {
|
||||
resTensor.view(resTensor.shape.dropLast(2).toIntArray() + intArrayOf(resTensor.shape.last()))
|
||||
resTensor.view(resTensor.shape.first(resTensor.shape.size - 2) + Shape(resTensor.shape.last()))
|
||||
} else if (lastDim) {
|
||||
resTensor.view(resTensor.shape.dropLast(1).toIntArray())
|
||||
resTensor.view(resTensor.shape.first(resTensor.shape.size - 1))
|
||||
} else {
|
||||
resTensor
|
||||
}
|
||||
@ -399,7 +410,7 @@ public open class DoubleTensorAlgebra :
|
||||
}
|
||||
|
||||
override fun diagonalEmbedding(
|
||||
diagonalEntries: Tensor<Double>,
|
||||
diagonalEntries: StructureND<Double>,
|
||||
offset: Int,
|
||||
dim1: Int,
|
||||
dim2: Int,
|
||||
@ -423,11 +434,11 @@ public open class DoubleTensorAlgebra :
|
||||
lessDim = greaterDim.also { greaterDim = lessDim }
|
||||
}
|
||||
|
||||
val resShape = diagonalEntries.shape.slice(0 until lessDim).toIntArray() +
|
||||
val resShape = diagonalEntries.shape.slice(0 until lessDim) +
|
||||
intArrayOf(diagonalEntries.shape[n - 1] + abs(realOffset)) +
|
||||
diagonalEntries.shape.slice(lessDim until greaterDim - 1).toIntArray() +
|
||||
diagonalEntries.shape.slice(lessDim until greaterDim - 1) +
|
||||
intArrayOf(diagonalEntries.shape[n - 1] + abs(realOffset)) +
|
||||
diagonalEntries.shape.slice(greaterDim - 1 until n - 1).toIntArray()
|
||||
diagonalEntries.shape.slice(greaterDim - 1 until n - 1)
|
||||
val resTensor: DoubleTensor = zeros(resShape)
|
||||
|
||||
for (i in 0 until diagonalEntries.indices.linearSize) {
|
||||
@ -495,8 +506,8 @@ public open class DoubleTensorAlgebra :
|
||||
* @return tensor of a given shape filled with numbers from the normal distribution
|
||||
* with `0.0` mean and `1.0` standard deviation.
|
||||
*/
|
||||
public fun randomNormal(shape: IntArray, seed: Long = 0): DoubleTensor =
|
||||
DoubleTensor(shape, DoubleBuffer.randomNormals(shape.reduce(Int::times), seed))
|
||||
public fun randomNormal(shape: Shape, seed: Long = 0): DoubleTensor =
|
||||
DoubleTensor(shape, DoubleBuffer.randomNormals(shape.linearSize, seed))
|
||||
|
||||
/**
|
||||
* Returns a tensor with the same shape as `input` of random numbers drawn from normal distributions
|
||||
@ -508,7 +519,7 @@ public open class DoubleTensorAlgebra :
|
||||
* with `0.0` mean and `1.0` standard deviation.
|
||||
*/
|
||||
public fun Tensor<Double>.randomNormalLike(seed: Long = 0): DoubleTensor =
|
||||
DoubleTensor(shape, DoubleBuffer.randomNormals(shape.reduce(Int::times), seed))
|
||||
DoubleTensor(shape, DoubleBuffer.randomNormals(shape.linearSize, seed))
|
||||
|
||||
/**
|
||||
* Concatenates a sequence of tensors with equal shapes along the first dimension.
|
||||
@ -520,7 +531,7 @@ public open class DoubleTensorAlgebra :
|
||||
check(tensors.isNotEmpty()) { "List must have at least 1 element" }
|
||||
val shape = tensors[0].shape
|
||||
check(tensors.all { it.shape contentEquals shape }) { "Tensors must have same shapes" }
|
||||
val resShape = intArrayOf(tensors.size) + shape
|
||||
val resShape = Shape(tensors.size) + shape
|
||||
// val resBuffer: List<Double> = tensors.flatMap {
|
||||
// it.asDoubleTensor().source.array.drop(it.asDoubleTensor().bufferStart)
|
||||
// .take(it.asDoubleTensor().linearSize)
|
||||
@ -545,11 +556,11 @@ public open class DoubleTensorAlgebra :
|
||||
): DoubleTensor {
|
||||
check(dim < dimension) { "Dimension $dim out of range $dimension" }
|
||||
val resShape = if (keepDim) {
|
||||
shape.take(dim).toIntArray() + intArrayOf(1) + shape.takeLast(dimension - dim - 1).toIntArray()
|
||||
shape.first(dim) + intArrayOf(1) + shape.last(dimension - dim - 1)
|
||||
} else {
|
||||
shape.take(dim).toIntArray() + shape.takeLast(dimension - dim - 1).toIntArray()
|
||||
shape.first(dim) + shape.last(dimension - dim - 1)
|
||||
}
|
||||
val resNumElements = resShape.reduce(Int::times)
|
||||
val resNumElements = resShape.linearSize
|
||||
val init = foldFunction(DoubleArray(1) { 0.0 })
|
||||
val resTensor = DoubleTensor(
|
||||
resShape,
|
||||
@ -573,11 +584,11 @@ public open class DoubleTensorAlgebra :
|
||||
): IntTensor {
|
||||
check(dim < dimension) { "Dimension $dim out of range $dimension" }
|
||||
val resShape = if (keepDim) {
|
||||
shape.take(dim).toIntArray() + intArrayOf(1) + shape.takeLast(dimension - dim - 1).toIntArray()
|
||||
shape.first(dim) + intArrayOf(1) + shape.last(dimension - dim - 1)
|
||||
} else {
|
||||
shape.take(dim).toIntArray() + shape.takeLast(dimension - dim - 1).toIntArray()
|
||||
shape.first(dim) + shape.last(dimension - dim - 1)
|
||||
}
|
||||
val resNumElements = resShape.reduce(Int::times)
|
||||
val resNumElements = resShape.linearSize
|
||||
val init = foldFunction(DoubleArray(1) { 0.0 })
|
||||
val resTensor = IntTensor(
|
||||
resShape,
|
||||
@ -674,9 +685,9 @@ public open class DoubleTensorAlgebra :
|
||||
check(tensors.isNotEmpty()) { "List must have at least 1 element" }
|
||||
val n = tensors.size
|
||||
val m = tensors[0].shape[0]
|
||||
check(tensors.all { it.shape contentEquals intArrayOf(m) }) { "Tensors must have same shapes" }
|
||||
check(tensors.all { it.shape contentEquals Shape(m) }) { "Tensors must have same shapes" }
|
||||
val resTensor = DoubleTensor(
|
||||
intArrayOf(n, n),
|
||||
Shape(n, n),
|
||||
DoubleBuffer(n * n) { 0.0 }
|
||||
)
|
||||
for (i in 0 until n) {
|
||||
@ -772,7 +783,7 @@ public open class DoubleTensorAlgebra :
|
||||
): Triple<DoubleTensor, DoubleTensor, DoubleTensor> {
|
||||
checkSquareMatrix(luTensor.shape)
|
||||
check(
|
||||
luTensor.shape.dropLast(2).toIntArray() contentEquals pivotsTensor.shape.dropLast(1).toIntArray() ||
|
||||
luTensor.shape.first(luTensor.shape.size - 2) contentEquals pivotsTensor.shape.first(pivotsTensor.shape.size - 1) ||
|
||||
luTensor.shape.last() == pivotsTensor.shape.last() - 1
|
||||
) { "Inappropriate shapes of input tensors" }
|
||||
|
||||
@ -843,9 +854,10 @@ public open class DoubleTensorAlgebra :
|
||||
return qTensor to rTensor
|
||||
}
|
||||
|
||||
override fun StructureND<Double>.svd(): Triple<DoubleTensor, DoubleTensor, DoubleTensor> =
|
||||
override fun StructureND<Double>.svd(): Triple<StructureND<Double>, StructureND<Double>, StructureND<Double>> =
|
||||
svd(epsilon = 1e-10)
|
||||
|
||||
|
||||
/**
|
||||
* Singular Value Decomposition.
|
||||
*
|
||||
@ -859,13 +871,13 @@ public open class DoubleTensorAlgebra :
|
||||
* i.e., the precision with which the cosine approaches 1 in an iterative algorithm.
|
||||
* @return a triple `Triple(U, S, V)`.
|
||||
*/
|
||||
public fun StructureND<Double>.svd(epsilon: Double): Triple<DoubleTensor, DoubleTensor, DoubleTensor> {
|
||||
public fun StructureND<Double>.svd(epsilon: Double): Triple<StructureND<Double>, StructureND<Double>, StructureND<Double>> {
|
||||
val size = dimension
|
||||
val commonShape = shape.sliceArray(0 until size - 2)
|
||||
val (n, m) = shape.sliceArray(size - 2 until size)
|
||||
val uTensor = zeros(commonShape + intArrayOf(min(n, m), n))
|
||||
val sTensor = zeros(commonShape + intArrayOf(min(n, m)))
|
||||
val vTensor = zeros(commonShape + intArrayOf(min(n, m), m))
|
||||
val commonShape = shape.slice(0 until size - 2)
|
||||
val (n, m) = shape.slice(size - 2 until size)
|
||||
val uTensor = zeros(commonShape + Shape(min(n, m), n))
|
||||
val sTensor = zeros(commonShape + Shape(min(n, m)))
|
||||
val vTensor = zeros(commonShape + Shape(min(n, m), m))
|
||||
|
||||
val matrices = asDoubleTensor().matrices
|
||||
val uTensors = uTensor.matrices
|
||||
@ -879,7 +891,7 @@ public open class DoubleTensorAlgebra :
|
||||
sTensorVectors[index],
|
||||
vTensors[index]
|
||||
)
|
||||
val matrixSize = matrix.shape.reduce { acc, i -> acc * i }
|
||||
val matrixSize = matrix.shape.linearSize
|
||||
val curMatrix = DoubleTensor(
|
||||
matrix.shape,
|
||||
matrix.source.view(0, matrixSize)
|
||||
@ -901,7 +913,7 @@ public open class DoubleTensorAlgebra :
|
||||
* and when the cosine approaches 1 in the SVD algorithm.
|
||||
* @return a pair `eigenvalues to eigenvectors`.
|
||||
*/
|
||||
public fun StructureND<Double>.symEigSvd(epsilon: Double): Pair<DoubleTensor, DoubleTensor> {
|
||||
public fun StructureND<Double>.symEigSvd(epsilon: Double): Pair<DoubleTensor, StructureND<Double>> {
|
||||
//TODO optimize conversion
|
||||
checkSymmetric(asDoubleTensor(), epsilon)
|
||||
|
||||
@ -925,7 +937,7 @@ public open class DoubleTensorAlgebra :
|
||||
matrix.asDoubleTensor2D().cleanSym(n)
|
||||
}
|
||||
|
||||
val eig = (utv dot s.view(shp)).view(s.shape)
|
||||
val eig = (utv dot s.asDoubleTensor().view(shp)).view(s.shape)
|
||||
return eig to v
|
||||
}
|
||||
|
||||
@ -934,8 +946,8 @@ public open class DoubleTensorAlgebra :
|
||||
checkSymmetric(asDoubleTensor(), epsilon)
|
||||
|
||||
val size = this.dimension
|
||||
val eigenvectors = zeros(this.shape)
|
||||
val eigenvalues = zeros(this.shape.sliceArray(0 until size - 1))
|
||||
val eigenvectors = zeros(shape)
|
||||
val eigenvalues = zeros(shape.slice(0 until size - 1))
|
||||
|
||||
var eigenvalueStart = 0
|
||||
var eigenvectorStart = 0
|
||||
@ -976,9 +988,11 @@ public open class DoubleTensorAlgebra :
|
||||
|
||||
val n = shape.size
|
||||
|
||||
val detTensorShape = IntArray(n - 1) { i -> shape[i] }
|
||||
detTensorShape[n - 2] = 1
|
||||
val resBuffer = DoubleBuffer(detTensorShape.reduce(Int::times)) { 0.0 }
|
||||
val detTensorShape = Shape(IntArray(n - 1) { i -> shape[i] }.apply {
|
||||
set(n - 2, 1)
|
||||
})
|
||||
|
||||
val resBuffer = DoubleBuffer(detTensorShape.linearSize) { 0.0 }
|
||||
|
||||
val detTensor = DoubleTensor(
|
||||
detTensorShape,
|
||||
|
@ -5,6 +5,8 @@
|
||||
|
||||
package space.kscience.kmath.tensors.core
|
||||
|
||||
import space.kscience.kmath.misc.PerformancePitfall
|
||||
import space.kscience.kmath.nd.Shape
|
||||
import space.kscience.kmath.structures.*
|
||||
|
||||
/**
|
||||
@ -73,7 +75,7 @@ public inline fun OffsetIntBuffer.mapInPlace(operation: (Int) -> Int) {
|
||||
* Default [BufferedTensor] implementation for [Int] values
|
||||
*/
|
||||
public class IntTensor(
|
||||
shape: IntArray,
|
||||
shape: Shape,
|
||||
override val source: OffsetIntBuffer,
|
||||
) : BufferedTensor<Int>(shape) {
|
||||
|
||||
@ -81,10 +83,12 @@ public class IntTensor(
|
||||
require(linearSize == source.size) { "Source buffer size must be equal tensor size" }
|
||||
}
|
||||
|
||||
public constructor(shape: IntArray, buffer: IntBuffer) : this(shape, OffsetIntBuffer(buffer, 0, buffer.size))
|
||||
public constructor(shape: Shape, buffer: IntBuffer) : this(shape, OffsetIntBuffer(buffer, 0, buffer.size))
|
||||
|
||||
@OptIn(PerformancePitfall::class)
|
||||
override fun get(index: IntArray): Int = this.source[indices.offset(index)]
|
||||
|
||||
@OptIn(PerformancePitfall::class)
|
||||
override fun set(index: IntArray, value: Int) {
|
||||
source[indices.offset(index)] = value
|
||||
}
|
||||
|
@ -23,6 +23,7 @@ public open class IntTensorAlgebra : TensorAlgebra<Int, IntRing> {
|
||||
|
||||
public companion object : IntTensorAlgebra()
|
||||
|
||||
|
||||
override val elementAlgebra: IntRing get() = IntRing
|
||||
|
||||
|
||||
@ -88,7 +89,7 @@ public open class IntTensorAlgebra : TensorAlgebra<Int, IntRing> {
|
||||
|
||||
override fun StructureND<Int>.valueOrNull(): Int? {
|
||||
val dt = asIntTensor()
|
||||
return if (dt.shape contentEquals intArrayOf(1)) dt.source[0] else null
|
||||
return if (dt.shape contentEquals Shape(1)) dt.source[0] else null
|
||||
}
|
||||
|
||||
override fun StructureND<Int>.value(): Int = valueOrNull()
|
||||
@ -101,11 +102,11 @@ public open class IntTensorAlgebra : TensorAlgebra<Int, IntRing> {
|
||||
* @param array one-dimensional data array.
|
||||
* @return tensor with the [shape] shape and [array] data.
|
||||
*/
|
||||
public fun fromArray(shape: IntArray, array: IntArray): IntTensor {
|
||||
public fun fromArray(shape: Shape, array: IntArray): IntTensor {
|
||||
checkNotEmptyShape(shape)
|
||||
check(array.isNotEmpty()) { "Illegal empty buffer provided" }
|
||||
check(array.size == shape.reduce(Int::times)) {
|
||||
"Inconsistent shape ${shape.toList()} for buffer of size ${array.size} provided"
|
||||
check(array.size == shape.linearSize) {
|
||||
"Inconsistent shape ${shape} for buffer of size ${array.size} provided"
|
||||
}
|
||||
return IntTensor(shape, array.asBuffer())
|
||||
}
|
||||
@ -117,16 +118,16 @@ public open class IntTensorAlgebra : TensorAlgebra<Int, IntRing> {
|
||||
* @param initializer mapping tensor indices to values.
|
||||
* @return tensor with the [shape] shape and data generated by the [initializer].
|
||||
*/
|
||||
override fun structureND(shape: IntArray, initializer: IntRing.(IntArray) -> Int): IntTensor = fromArray(
|
||||
override fun structureND(shape: Shape, initializer: IntRing.(IntArray) -> Int): IntTensor = fromArray(
|
||||
shape,
|
||||
RowStrides(shape).asSequence().map { IntRing.initializer(it) }.toMutableList().toIntArray()
|
||||
)
|
||||
|
||||
override fun Tensor<Int>.getTensor(i: Int): IntTensor {
|
||||
val dt = asIntTensor()
|
||||
val lastShape = shape.drop(1).toIntArray()
|
||||
val newShape = if (lastShape.isNotEmpty()) lastShape else intArrayOf(1)
|
||||
return IntTensor(newShape, dt.source.view(newShape.reduce(Int::times) * i))
|
||||
val lastShape = shape.last(shape.size - 1)
|
||||
val newShape = if (lastShape.isNotEmpty()) lastShape else Shape(1)
|
||||
return IntTensor(newShape, dt.source.view(newShape.linearSize * i))
|
||||
}
|
||||
|
||||
/**
|
||||
@ -136,9 +137,9 @@ public open class IntTensorAlgebra : TensorAlgebra<Int, IntRing> {
|
||||
* @param shape array of integers defining the shape of the output tensor.
|
||||
* @return tensor with the [shape] shape and filled with [value].
|
||||
*/
|
||||
public fun full(value: Int, shape: IntArray): IntTensor {
|
||||
public fun full(value: Int, shape: Shape): IntTensor {
|
||||
checkNotEmptyShape(shape)
|
||||
val buffer = IntBuffer(shape.reduce(Int::times)) { value }
|
||||
val buffer = IntBuffer(shape.linearSize) { value }
|
||||
return IntTensor(shape, buffer)
|
||||
}
|
||||
|
||||
@ -160,7 +161,7 @@ public open class IntTensorAlgebra : TensorAlgebra<Int, IntRing> {
|
||||
* @param shape array of integers defining the shape of the output tensor.
|
||||
* @return tensor filled with the scalar value `0`, with the [shape] shape.
|
||||
*/
|
||||
public fun zeros(shape: IntArray): IntTensor = full(0, shape)
|
||||
public fun zeros(shape: Shape): IntTensor = full(0, shape)
|
||||
|
||||
/**
|
||||
* Returns a tensor filled with the scalar value `0`, with the same shape as a given array.
|
||||
@ -175,7 +176,7 @@ public open class IntTensorAlgebra : TensorAlgebra<Int, IntRing> {
|
||||
* @param shape array of integers defining the shape of the output tensor.
|
||||
* @return tensor filled with the scalar value `1`, with the [shape] shape.
|
||||
*/
|
||||
public fun ones(shape: IntArray): IntTensor = full(1, shape)
|
||||
public fun ones(shape: Shape): IntTensor = full(1, shape)
|
||||
|
||||
/**
|
||||
* Returns a tensor filled with the scalar value `1`, with the same shape as a given array.
|
||||
@ -191,7 +192,7 @@ public open class IntTensorAlgebra : TensorAlgebra<Int, IntRing> {
|
||||
* @return a 2-D tensor with ones on the diagonal and zeros elsewhere.
|
||||
*/
|
||||
public fun eye(n: Int): IntTensor {
|
||||
val shape = intArrayOf(n, n)
|
||||
val shape = Shape(n, n)
|
||||
val buffer = IntBuffer(n * n) { 0 }
|
||||
val res = IntTensor(shape, buffer)
|
||||
for (i in 0 until n) {
|
||||
@ -249,32 +250,44 @@ public open class IntTensorAlgebra : TensorAlgebra<Int, IntRing> {
|
||||
|
||||
override fun StructureND<Int>.unaryMinus(): IntTensor = map { -it }
|
||||
|
||||
override fun Tensor<Int>.transposed(i: Int, j: Int): IntTensor {
|
||||
// TODO change strides instead of changing content
|
||||
val dt = asIntTensor()
|
||||
val ii = dt.minusIndex(i)
|
||||
val jj = dt.minusIndex(j)
|
||||
checkTranspose(dt.dimension, ii, jj)
|
||||
val n = dt.linearSize
|
||||
val resBuffer = IntArray(n)
|
||||
|
||||
val resShape = dt.shape.copyOf()
|
||||
resShape[ii] = resShape[jj].also { resShape[jj] = resShape[ii] }
|
||||
|
||||
val resTensor = IntTensor(resShape, resBuffer.asBuffer())
|
||||
|
||||
for (offset in 0 until n) {
|
||||
val oldMultiIndex = dt.indices.index(offset)
|
||||
val newMultiIndex = oldMultiIndex.copyOf()
|
||||
newMultiIndex[ii] = newMultiIndex[jj].also { newMultiIndex[jj] = newMultiIndex[ii] }
|
||||
|
||||
val linearIndex = resTensor.indices.offset(newMultiIndex)
|
||||
resTensor.source[linearIndex] = dt.source[offset]
|
||||
override fun StructureND<Int>.transposed(i: Int, j: Int): Tensor<Int> {
|
||||
val actualI = if (i >= 0) i else shape.size + i
|
||||
val actualJ = if(j>=0) j else shape.size + j
|
||||
return asIntTensor().permute(
|
||||
shape.transposed(actualI, actualJ)
|
||||
) { originIndex ->
|
||||
originIndex.copyOf().apply {
|
||||
val ith = get(actualI)
|
||||
val jth = get(actualJ)
|
||||
set(actualI, jth)
|
||||
set(actualJ, ith)
|
||||
}
|
||||
}
|
||||
return resTensor
|
||||
// // TODO change strides instead of changing content
|
||||
// val dt = asIntTensor()
|
||||
// val ii = dt.minusIndex(i)
|
||||
// val jj = dt.minusIndex(j)
|
||||
// checkTranspose(dt.dimension, ii, jj)
|
||||
// val n = dt.linearSize
|
||||
// val resBuffer = IntArray(n)
|
||||
//
|
||||
// val resShape = dt.shape.toArray()
|
||||
// resShape[ii] = resShape[jj].also { resShape[jj] = resShape[ii] }
|
||||
//
|
||||
// val resTensor = IntTensor(Shape(resShape), resBuffer.asBuffer())
|
||||
//
|
||||
// for (offset in 0 until n) {
|
||||
// val oldMultiIndex = dt.indices.index(offset)
|
||||
// val newMultiIndex = oldMultiIndex.copyOf()
|
||||
// newMultiIndex[ii] = newMultiIndex[jj].also { newMultiIndex[jj] = newMultiIndex[ii] }
|
||||
//
|
||||
// val linearIndex = resTensor.indices.offset(newMultiIndex)
|
||||
// resTensor.source[linearIndex] = dt.source[offset]
|
||||
// }
|
||||
// return resTensor
|
||||
}
|
||||
|
||||
override fun Tensor<Int>.view(shape: IntArray): IntTensor {
|
||||
override fun Tensor<Int>.view(shape: Shape): IntTensor {
|
||||
checkView(asIntTensor(), shape)
|
||||
return IntTensor(shape, asIntTensor().source)
|
||||
}
|
||||
@ -287,7 +300,7 @@ public open class IntTensorAlgebra : TensorAlgebra<Int, IntRing> {
|
||||
}
|
||||
|
||||
override fun diagonalEmbedding(
|
||||
diagonalEntries: Tensor<Int>,
|
||||
diagonalEntries: StructureND<Int>,
|
||||
offset: Int,
|
||||
dim1: Int,
|
||||
dim2: Int,
|
||||
@ -311,11 +324,11 @@ public open class IntTensorAlgebra : TensorAlgebra<Int, IntRing> {
|
||||
lessDim = greaterDim.also { greaterDim = lessDim }
|
||||
}
|
||||
|
||||
val resShape = diagonalEntries.shape.slice(0 until lessDim).toIntArray() +
|
||||
val resShape = diagonalEntries.shape.slice(0 until lessDim) +
|
||||
intArrayOf(diagonalEntries.shape[n - 1] + abs(realOffset)) +
|
||||
diagonalEntries.shape.slice(lessDim until greaterDim - 1).toIntArray() +
|
||||
diagonalEntries.shape.slice(lessDim until greaterDim - 1) +
|
||||
intArrayOf(diagonalEntries.shape[n - 1] + abs(realOffset)) +
|
||||
diagonalEntries.shape.slice(greaterDim - 1 until n - 1).toIntArray()
|
||||
diagonalEntries.shape.slice(greaterDim - 1 until n - 1)
|
||||
val resTensor = zeros(resShape)
|
||||
|
||||
for (i in 0 until diagonalEntries.asIntTensor().linearSize) {
|
||||
@ -375,7 +388,7 @@ public open class IntTensorAlgebra : TensorAlgebra<Int, IntRing> {
|
||||
check(tensors.isNotEmpty()) { "List must have at least 1 element" }
|
||||
val shape = tensors[0].shape
|
||||
check(tensors.all { it.shape contentEquals shape }) { "Tensors must have same shapes" }
|
||||
val resShape = intArrayOf(tensors.size) + shape
|
||||
val resShape = Shape(tensors.size) + shape
|
||||
// val resBuffer: List<Int> = tensors.flatMap {
|
||||
// it.asIntTensor().source.array.drop(it.asIntTensor().bufferStart)
|
||||
// .take(it.asIntTensor().linearSize)
|
||||
@ -399,11 +412,11 @@ public open class IntTensorAlgebra : TensorAlgebra<Int, IntRing> {
|
||||
): IntTensor {
|
||||
check(dim < dimension) { "Dimension $dim out of range $dimension" }
|
||||
val resShape = if (keepDim) {
|
||||
shape.take(dim).toIntArray() + intArrayOf(1) + shape.takeLast(dimension - dim - 1).toIntArray()
|
||||
shape.first(dim) + intArrayOf(1) + shape.last(dimension - dim - 1)
|
||||
} else {
|
||||
shape.take(dim).toIntArray() + shape.takeLast(dimension - dim - 1).toIntArray()
|
||||
shape.first(dim) + shape.last(dimension - dim - 1)
|
||||
}
|
||||
val resNumElements = resShape.reduce(Int::times)
|
||||
val resNumElements = resShape.linearSize
|
||||
val init = foldFunction(IntArray(1) { 0 })
|
||||
val resTensor = IntTensor(
|
||||
resShape,
|
||||
|
@ -5,6 +5,8 @@
|
||||
|
||||
package space.kscience.kmath.tensors.core.internal
|
||||
|
||||
import space.kscience.kmath.misc.UnsafeKMathAPI
|
||||
import space.kscience.kmath.nd.*
|
||||
import space.kscience.kmath.structures.asBuffer
|
||||
import space.kscience.kmath.tensors.core.DoubleTensor
|
||||
import kotlin.math.max
|
||||
@ -12,7 +14,7 @@ import kotlin.math.max
|
||||
internal fun multiIndexBroadCasting(tensor: DoubleTensor, resTensor: DoubleTensor, linearSize: Int) {
|
||||
for (linearIndex in 0 until linearSize) {
|
||||
val totalMultiIndex = resTensor.indices.index(linearIndex)
|
||||
val curMultiIndex = tensor.shape.copyOf()
|
||||
val curMultiIndex = tensor.shape.toArray()
|
||||
|
||||
val offset = totalMultiIndex.size - curMultiIndex.size
|
||||
|
||||
@ -30,7 +32,7 @@ internal fun multiIndexBroadCasting(tensor: DoubleTensor, resTensor: DoubleTenso
|
||||
}
|
||||
}
|
||||
|
||||
internal fun broadcastShapes(vararg shapes: IntArray): IntArray {
|
||||
internal fun broadcastShapes(shapes: List<Shape>): Shape {
|
||||
var totalDim = 0
|
||||
for (shape in shapes) {
|
||||
totalDim = max(totalDim, shape.size)
|
||||
@ -55,15 +57,15 @@ internal fun broadcastShapes(vararg shapes: IntArray): IntArray {
|
||||
}
|
||||
}
|
||||
|
||||
return totalShape
|
||||
return Shape(totalShape)
|
||||
}
|
||||
|
||||
internal fun broadcastTo(tensor: DoubleTensor, newShape: IntArray): DoubleTensor {
|
||||
internal fun broadcastTo(tensor: DoubleTensor, newShape: Shape): DoubleTensor {
|
||||
require(tensor.shape.size <= newShape.size) {
|
||||
"Tensor is not compatible with the new shape"
|
||||
}
|
||||
|
||||
val n = newShape.reduce { acc, i -> acc * i }
|
||||
val n = newShape.linearSize
|
||||
val resTensor = DoubleTensor(newShape, DoubleArray(n).asBuffer())
|
||||
|
||||
for (i in tensor.shape.indices) {
|
||||
@ -79,8 +81,8 @@ internal fun broadcastTo(tensor: DoubleTensor, newShape: IntArray): DoubleTensor
|
||||
}
|
||||
|
||||
internal fun broadcastTensors(vararg tensors: DoubleTensor): List<DoubleTensor> {
|
||||
val totalShape = broadcastShapes(*(tensors.map { it.shape }).toTypedArray())
|
||||
val n = totalShape.reduce { acc, i -> acc * i }
|
||||
val totalShape = broadcastShapes(tensors.map { it.shape })
|
||||
val n = totalShape.linearSize
|
||||
|
||||
return tensors.map { tensor ->
|
||||
val resTensor = DoubleTensor(totalShape, DoubleArray(n).asBuffer())
|
||||
@ -100,12 +102,12 @@ internal fun broadcastOuterTensors(vararg tensors: DoubleTensor): List<DoubleTen
|
||||
return tensors.asList()
|
||||
}
|
||||
|
||||
val totalShape = broadcastShapes(*(tensors.map { it.shape.sliceArray(0..it.shape.size - 3) }).toTypedArray())
|
||||
val n = totalShape.reduce { acc, i -> acc * i }
|
||||
val totalShape = broadcastShapes(tensors.map { it.shape.slice(0..it.shape.size - 3) })
|
||||
val n = totalShape.linearSize
|
||||
|
||||
return buildList {
|
||||
for (tensor in tensors) {
|
||||
val matrixShape = tensor.shape.sliceArray(tensor.shape.size - 2 until tensor.shape.size).copyOf()
|
||||
val matrixShape = tensor.shape.slice(tensor.shape.size - 2 until tensor.shape.size)
|
||||
val matrixSize = matrixShape[0] * matrixShape[1]
|
||||
val matrix = DoubleTensor(matrixShape, DoubleArray(matrixSize).asBuffer())
|
||||
|
||||
@ -114,10 +116,11 @@ internal fun broadcastOuterTensors(vararg tensors: DoubleTensor): List<DoubleTen
|
||||
|
||||
for (linearIndex in 0 until n) {
|
||||
val totalMultiIndex = outerTensor.indices.index(linearIndex)
|
||||
var curMultiIndex = tensor.shape.sliceArray(0..tensor.shape.size - 3).copyOf()
|
||||
@OptIn(UnsafeKMathAPI::class)
|
||||
var curMultiIndex = tensor.shape.slice(0..tensor.shape.size - 3).asArray()
|
||||
curMultiIndex = IntArray(totalMultiIndex.size - curMultiIndex.size) { 1 } + curMultiIndex
|
||||
|
||||
val newTensor = DoubleTensor(curMultiIndex + matrixShape, tensor.source)
|
||||
val newTensor = DoubleTensor(Shape(curMultiIndex) + matrixShape, tensor.source)
|
||||
|
||||
for (i in curMultiIndex.indices) {
|
||||
if (curMultiIndex[i] != 1) {
|
||||
|
@ -5,15 +5,18 @@
|
||||
|
||||
package space.kscience.kmath.tensors.core.internal
|
||||
|
||||
import space.kscience.kmath.nd.Shape
|
||||
import space.kscience.kmath.nd.StructureND
|
||||
import space.kscience.kmath.nd.contentEquals
|
||||
import space.kscience.kmath.nd.linearSize
|
||||
import space.kscience.kmath.tensors.api.Tensor
|
||||
import space.kscience.kmath.tensors.core.DoubleTensor
|
||||
import space.kscience.kmath.tensors.core.DoubleTensorAlgebra
|
||||
import space.kscience.kmath.tensors.core.asDoubleTensor
|
||||
|
||||
|
||||
internal fun checkNotEmptyShape(shape: IntArray) =
|
||||
check(shape.isNotEmpty()) {
|
||||
internal fun checkNotEmptyShape(shape: Shape) =
|
||||
check(shape.size > 0) {
|
||||
"Illegal empty shape provided"
|
||||
}
|
||||
|
||||
@ -21,15 +24,15 @@ internal fun checkEmptyDoubleBuffer(buffer: DoubleArray) = check(buffer.isNotEmp
|
||||
"Illegal empty buffer provided"
|
||||
}
|
||||
|
||||
internal fun checkBufferShapeConsistency(shape: IntArray, buffer: DoubleArray) =
|
||||
check(buffer.size == shape.reduce(Int::times)) {
|
||||
"Inconsistent shape ${shape.toList()} for buffer of size ${buffer.size} provided"
|
||||
internal fun checkBufferShapeConsistency(shape: Shape, buffer: DoubleArray) =
|
||||
check(buffer.size == shape.linearSize) {
|
||||
"Inconsistent shape ${shape} for buffer of size ${buffer.size} provided"
|
||||
}
|
||||
|
||||
@PublishedApi
|
||||
internal fun <T> checkShapesCompatible(a: StructureND<T>, b: StructureND<T>): Unit =
|
||||
check(a.shape contentEquals b.shape) {
|
||||
"Incompatible shapes ${a.shape.toList()} and ${b.shape.toList()} "
|
||||
"Incompatible shapes ${a.shape} and ${b.shape} "
|
||||
}
|
||||
|
||||
internal fun checkTranspose(dim: Int, i: Int, j: Int) =
|
||||
@ -37,10 +40,10 @@ internal fun checkTranspose(dim: Int, i: Int, j: Int) =
|
||||
"Cannot transpose $i to $j for a tensor of dim $dim"
|
||||
}
|
||||
|
||||
internal fun <T> checkView(a: Tensor<T>, shape: IntArray) =
|
||||
check(a.shape.reduce(Int::times) == shape.reduce(Int::times))
|
||||
internal fun <T> checkView(a: Tensor<T>, shape: Shape) =
|
||||
check(a.shape.linearSize == shape.linearSize)
|
||||
|
||||
internal fun checkSquareMatrix(shape: IntArray) {
|
||||
internal fun checkSquareMatrix(shape: Shape) {
|
||||
val n = shape.size
|
||||
check(n >= 2) {
|
||||
"Expected tensor with 2 or more dimensions, got size $n instead"
|
||||
|
@ -6,7 +6,6 @@
|
||||
package space.kscience.kmath.tensors.core.internal
|
||||
|
||||
import space.kscience.kmath.nd.*
|
||||
import space.kscience.kmath.nd.Strides.Companion.linearSizeOf
|
||||
import space.kscience.kmath.structures.DoubleBuffer
|
||||
import space.kscience.kmath.structures.asBuffer
|
||||
import space.kscience.kmath.structures.indices
|
||||
@ -40,7 +39,7 @@ internal fun MutableStructure2D<Double>.jacobiHelper(
|
||||
source[i * shape[0] + j] = value
|
||||
}
|
||||
|
||||
fun maxOffDiagonal(matrix: BufferedTensor<Double>): Double {
|
||||
fun maxOffDiagonal(matrix: DoubleTensor): Double {
|
||||
var maxOffDiagonalElement = 0.0
|
||||
for (i in 0 until n - 1) {
|
||||
for (j in i + 1 until n) {
|
||||
@ -50,7 +49,7 @@ internal fun MutableStructure2D<Double>.jacobiHelper(
|
||||
return maxOffDiagonalElement
|
||||
}
|
||||
|
||||
fun rotate(a: BufferedTensor<Double>, s: Double, tau: Double, i: Int, j: Int, k: Int, l: Int) {
|
||||
fun rotate(a: DoubleTensor, s: Double, tau: Double, i: Int, j: Int, k: Int, l: Int) {
|
||||
val g = a[i, j]
|
||||
val h = a[k, l]
|
||||
a[i, j] = g - s * (h + g * tau)
|
||||
@ -58,8 +57,8 @@ internal fun MutableStructure2D<Double>.jacobiHelper(
|
||||
}
|
||||
|
||||
fun jacobiIteration(
|
||||
a: BufferedTensor<Double>,
|
||||
v: BufferedTensor<Double>,
|
||||
a: DoubleTensor,
|
||||
v: DoubleTensor,
|
||||
d: DoubleBuffer,
|
||||
z: DoubleBuffer,
|
||||
) {
|
||||
@ -157,7 +156,7 @@ internal val DoubleTensor.vectors: List<DoubleTensor>
|
||||
get() {
|
||||
val n = shape.size
|
||||
val vectorOffset = shape[n - 1]
|
||||
val vectorShape = intArrayOf(shape.last())
|
||||
val vectorShape = Shape(shape.last())
|
||||
|
||||
return List(linearSize / vectorOffset) { index ->
|
||||
val offset = index * vectorOffset
|
||||
@ -174,9 +173,9 @@ internal val DoubleTensor.matrices: List<DoubleTensor>
|
||||
val n = shape.size
|
||||
check(n >= 2) { "Expected tensor with 2 or more dimensions, got size $n" }
|
||||
val matrixOffset = shape[n - 1] * shape[n - 2]
|
||||
val matrixShape = intArrayOf(shape[n - 2], shape[n - 1])
|
||||
val matrixShape = Shape(shape[n - 2], shape[n - 1])
|
||||
|
||||
val size = linearSizeOf(matrixShape)
|
||||
val size = matrixShape.linearSize
|
||||
|
||||
return List(linearSize / matrixOffset) { index ->
|
||||
val offset = index * matrixOffset
|
||||
|
@ -5,6 +5,9 @@
|
||||
|
||||
package space.kscience.kmath.tensors.core.internal
|
||||
|
||||
import space.kscience.kmath.nd.Shape
|
||||
import space.kscience.kmath.nd.first
|
||||
import space.kscience.kmath.nd.last
|
||||
import space.kscience.kmath.operations.asSequence
|
||||
import space.kscience.kmath.structures.IntBuffer
|
||||
import space.kscience.kmath.structures.VirtualBuffer
|
||||
@ -35,7 +38,7 @@ internal fun List<OffsetIntBuffer>.concat(): IntBuffer {
|
||||
internal fun IntTensor.vectors(): VirtualBuffer<IntTensor> {
|
||||
val n = shape.size
|
||||
val vectorOffset = shape[n - 1]
|
||||
val vectorShape = intArrayOf(shape.last())
|
||||
val vectorShape = shape.last(1)
|
||||
|
||||
return VirtualBuffer(linearSize / vectorOffset) { index ->
|
||||
val offset = index * vectorOffset
|
||||
@ -52,7 +55,7 @@ internal val IntTensor.matrices: VirtualBuffer<IntTensor>
|
||||
val n = shape.size
|
||||
check(n >= 2) { "Expected tensor with 2 or more dimensions, got size $n" }
|
||||
val matrixOffset = shape[n - 1] * shape[n - 2]
|
||||
val matrixShape = intArrayOf(shape[n - 2], shape[n - 1])
|
||||
val matrixShape = Shape(shape[n - 2], shape[n - 1])
|
||||
|
||||
return VirtualBuffer(linearSize / matrixOffset) { index ->
|
||||
val offset = index * matrixOffset
|
||||
|
@ -5,10 +5,7 @@
|
||||
|
||||
package space.kscience.kmath.tensors.core.internal
|
||||
|
||||
import space.kscience.kmath.nd.MutableStructure1D
|
||||
import space.kscience.kmath.nd.MutableStructure2D
|
||||
import space.kscience.kmath.nd.StructureND
|
||||
import space.kscience.kmath.nd.as1D
|
||||
import space.kscience.kmath.nd.*
|
||||
import space.kscience.kmath.operations.invoke
|
||||
import space.kscience.kmath.structures.DoubleBuffer
|
||||
import space.kscience.kmath.structures.IntBuffer
|
||||
@ -98,7 +95,7 @@ internal fun <T> StructureND<T>.setUpPivots(): IntTensor {
|
||||
pivotsShape[n - 2] = m + 1
|
||||
|
||||
return IntTensor(
|
||||
pivotsShape,
|
||||
Shape(pivotsShape),
|
||||
IntBuffer(pivotsShape.reduce(Int::times)) { 0 }
|
||||
)
|
||||
}
|
||||
@ -243,10 +240,10 @@ internal fun DoubleTensorAlgebra.svd1d(a: DoubleTensor, epsilon: Double = 1e-10)
|
||||
val b: DoubleTensor
|
||||
if (n > m) {
|
||||
b = a.transposed(0, 1).dot(a)
|
||||
v = DoubleTensor(intArrayOf(m), DoubleBuffer.randomUnitVector(m, 0))
|
||||
v = DoubleTensor(Shape(m), DoubleBuffer.randomUnitVector(m, 0))
|
||||
} else {
|
||||
b = a.dot(a.transposed(0, 1))
|
||||
v = DoubleTensor(intArrayOf(n), DoubleBuffer.randomUnitVector(n, 0))
|
||||
v = DoubleTensor(Shape(n), DoubleBuffer.randomUnitVector(n, 0))
|
||||
}
|
||||
|
||||
var lastV: DoubleTensor
|
||||
@ -278,7 +275,7 @@ internal fun DoubleTensorAlgebra.svdHelper(
|
||||
outerProduct[i * v.shape[0] + j] = u.getTensor(i).value() * v.getTensor(j).value()
|
||||
}
|
||||
}
|
||||
a = a - singularValue.times(DoubleTensor(intArrayOf(u.shape[0], v.shape[0]), outerProduct.asBuffer()))
|
||||
a = a - singularValue.times(DoubleTensor(Shape(u.shape[0], v.shape[0]), outerProduct.asBuffer()))
|
||||
}
|
||||
var v: DoubleTensor
|
||||
var u: DoubleTensor
|
||||
|
@ -6,6 +6,8 @@
|
||||
package space.kscience.kmath.tensors.core.internal
|
||||
|
||||
import space.kscience.kmath.misc.PerformancePitfall
|
||||
import space.kscience.kmath.nd.asList
|
||||
import space.kscience.kmath.nd.last
|
||||
import space.kscience.kmath.operations.DoubleBufferOps.Companion.map
|
||||
import space.kscience.kmath.random.RandomGenerator
|
||||
import space.kscience.kmath.samplers.GaussianSampler
|
||||
@ -92,7 +94,7 @@ public fun DoubleTensor.toPrettyString(): String = buildString {
|
||||
append(']')
|
||||
charOffset -= 1
|
||||
|
||||
index.reversed().zip(shape.reversed()).drop(1).forEach { (ind, maxInd) ->
|
||||
index.reversed().zip(shape.asList().reversed()).drop(1).forEach { (ind, maxInd) ->
|
||||
if (ind != maxInd - 1) {
|
||||
return@forEach
|
||||
}
|
||||
|
@ -12,11 +12,11 @@ import space.kscience.kmath.nd.Shape
|
||||
import kotlin.jvm.JvmName
|
||||
|
||||
@JvmName("varArgOne")
|
||||
public fun DoubleTensorAlgebra.one(vararg shape: Int): DoubleTensor = ones(intArrayOf(*shape))
|
||||
public fun DoubleTensorAlgebra.one(vararg shape: Int): DoubleTensor = ones(Shape(shape))
|
||||
|
||||
public fun DoubleTensorAlgebra.one(shape: Shape): DoubleTensor = ones(shape)
|
||||
|
||||
@JvmName("varArgZero")
|
||||
public fun DoubleTensorAlgebra.zero(vararg shape: Int): DoubleTensor = zeros(intArrayOf(*shape))
|
||||
public fun DoubleTensorAlgebra.zero(vararg shape: Int): DoubleTensor = zeros(Shape(shape))
|
||||
|
||||
public fun DoubleTensorAlgebra.zero(shape: Shape): DoubleTensor = zeros(shape)
|
||||
|
@ -5,9 +5,7 @@
|
||||
|
||||
package space.kscience.kmath.tensors.core
|
||||
|
||||
import space.kscience.kmath.nd.DoubleBufferND
|
||||
import space.kscience.kmath.nd.RowStrides
|
||||
import space.kscience.kmath.nd.StructureND
|
||||
import space.kscience.kmath.nd.*
|
||||
import space.kscience.kmath.structures.DoubleBuffer
|
||||
import space.kscience.kmath.structures.asBuffer
|
||||
import space.kscience.kmath.tensors.api.Tensor
|
||||
@ -23,7 +21,7 @@ public fun StructureND<Double>.copyToTensor(): DoubleTensor = if (this is Double
|
||||
} else {
|
||||
DoubleTensor(
|
||||
shape,
|
||||
RowStrides(this.shape).map(this::get).toDoubleArray().asBuffer(),
|
||||
RowStrides(this.shape).map(this::getDouble).toDoubleArray().asBuffer(),
|
||||
)
|
||||
}
|
||||
|
||||
@ -36,7 +34,7 @@ public fun StructureND<Int>.toDoubleTensor(): DoubleTensor {
|
||||
} else {
|
||||
val tensor = DoubleTensorAlgebra.zeroesLike(this)
|
||||
indices.forEach {
|
||||
tensor[it] = get(it).toDouble()
|
||||
tensor[it] = getInt(it).toDouble()
|
||||
}
|
||||
return tensor
|
||||
}
|
||||
@ -59,7 +57,7 @@ public fun StructureND<Double>.asDoubleTensor(): DoubleTensor = if (this is Doub
|
||||
public fun StructureND<Int>.asIntTensor(): IntTensor = when (this) {
|
||||
is IntTensor -> this
|
||||
else -> IntTensor(
|
||||
this.shape,
|
||||
RowStrides(this.shape).map(this::get).toIntArray().asBuffer()
|
||||
shape,
|
||||
RowStrides(shape).map(this::getInt).toIntArray().asBuffer()
|
||||
)
|
||||
}
|
@ -5,6 +5,8 @@
|
||||
|
||||
package space.kscience.kmath.tensors.core
|
||||
|
||||
import space.kscience.kmath.nd.Shape
|
||||
import space.kscience.kmath.nd.contentEquals
|
||||
import space.kscience.kmath.operations.invoke
|
||||
import space.kscience.kmath.tensors.core.internal.broadcastOuterTensors
|
||||
import space.kscience.kmath.tensors.core.internal.broadcastShapes
|
||||
@ -19,38 +21,38 @@ internal class TestBroadcasting {
|
||||
fun testBroadcastShapes() = DoubleTensorAlgebra {
|
||||
assertTrue(
|
||||
broadcastShapes(
|
||||
intArrayOf(2, 3), intArrayOf(1, 3), intArrayOf(1, 1, 1)
|
||||
) contentEquals intArrayOf(1, 2, 3)
|
||||
listOf(Shape(2, 3), Shape(1, 3), Shape(1, 1, 1))
|
||||
) contentEquals Shape(1, 2, 3)
|
||||
)
|
||||
|
||||
assertTrue(
|
||||
broadcastShapes(
|
||||
intArrayOf(6, 7), intArrayOf(5, 6, 1), intArrayOf(7), intArrayOf(5, 1, 7)
|
||||
) contentEquals intArrayOf(5, 6, 7)
|
||||
listOf(Shape(6, 7), Shape(5, 6, 1), Shape(7), Shape(5, 1, 7))
|
||||
) contentEquals Shape(5, 6, 7)
|
||||
)
|
||||
}
|
||||
|
||||
@Test
|
||||
fun testBroadcastTo() = DoubleTensorAlgebra {
|
||||
val tensor1 = fromArray(intArrayOf(2, 3), doubleArrayOf(1.0, 2.0, 3.0, 4.0, 5.0, 6.0))
|
||||
val tensor2 = fromArray(intArrayOf(1, 3), doubleArrayOf(10.0, 20.0, 30.0))
|
||||
val tensor1 = fromArray(Shape(2, 3), doubleArrayOf(1.0, 2.0, 3.0, 4.0, 5.0, 6.0))
|
||||
val tensor2 = fromArray(Shape(1, 3), doubleArrayOf(10.0, 20.0, 30.0))
|
||||
|
||||
val res = broadcastTo(tensor2, tensor1.shape)
|
||||
assertTrue(res.shape contentEquals intArrayOf(2, 3))
|
||||
assertTrue(res.shape contentEquals Shape(2, 3))
|
||||
assertTrue(res.source contentEquals doubleArrayOf(10.0, 20.0, 30.0, 10.0, 20.0, 30.0))
|
||||
}
|
||||
|
||||
@Test
|
||||
fun testBroadcastTensors() = DoubleTensorAlgebra {
|
||||
val tensor1 = fromArray(intArrayOf(2, 3), doubleArrayOf(1.0, 2.0, 3.0, 4.0, 5.0, 6.0))
|
||||
val tensor2 = fromArray(intArrayOf(1, 3), doubleArrayOf(10.0, 20.0, 30.0))
|
||||
val tensor3 = fromArray(intArrayOf(1, 1, 1), doubleArrayOf(500.0))
|
||||
val tensor1 = fromArray(Shape(2, 3), doubleArrayOf(1.0, 2.0, 3.0, 4.0, 5.0, 6.0))
|
||||
val tensor2 = fromArray(Shape(1, 3), doubleArrayOf(10.0, 20.0, 30.0))
|
||||
val tensor3 = fromArray(Shape(1, 1, 1), doubleArrayOf(500.0))
|
||||
|
||||
val res = broadcastTensors(tensor1, tensor2, tensor3)
|
||||
|
||||
assertTrue(res[0].shape contentEquals intArrayOf(1, 2, 3))
|
||||
assertTrue(res[1].shape contentEquals intArrayOf(1, 2, 3))
|
||||
assertTrue(res[2].shape contentEquals intArrayOf(1, 2, 3))
|
||||
assertTrue(res[0].shape contentEquals Shape(1, 2, 3))
|
||||
assertTrue(res[1].shape contentEquals Shape(1, 2, 3))
|
||||
assertTrue(res[2].shape contentEquals Shape(1, 2, 3))
|
||||
|
||||
assertTrue(res[0].source contentEquals doubleArrayOf(1.0, 2.0, 3.0, 4.0, 5.0, 6.0))
|
||||
assertTrue(res[1].source contentEquals doubleArrayOf(10.0, 20.0, 30.0, 10.0, 20.0, 30.0))
|
||||
@ -59,15 +61,15 @@ internal class TestBroadcasting {
|
||||
|
||||
@Test
|
||||
fun testBroadcastOuterTensors() = DoubleTensorAlgebra {
|
||||
val tensor1 = fromArray(intArrayOf(2, 3), doubleArrayOf(1.0, 2.0, 3.0, 4.0, 5.0, 6.0))
|
||||
val tensor2 = fromArray(intArrayOf(1, 3), doubleArrayOf(10.0, 20.0, 30.0))
|
||||
val tensor3 = fromArray(intArrayOf(1, 1, 1), doubleArrayOf(500.0))
|
||||
val tensor1 = fromArray(Shape(2, 3), doubleArrayOf(1.0, 2.0, 3.0, 4.0, 5.0, 6.0))
|
||||
val tensor2 = fromArray(Shape(1, 3), doubleArrayOf(10.0, 20.0, 30.0))
|
||||
val tensor3 = fromArray(Shape(1, 1, 1), doubleArrayOf(500.0))
|
||||
|
||||
val res = broadcastOuterTensors(tensor1, tensor2, tensor3)
|
||||
|
||||
assertTrue(res[0].shape contentEquals intArrayOf(1, 2, 3))
|
||||
assertTrue(res[1].shape contentEquals intArrayOf(1, 1, 3))
|
||||
assertTrue(res[2].shape contentEquals intArrayOf(1, 1, 1))
|
||||
assertTrue(res[0].shape contentEquals Shape(1, 2, 3))
|
||||
assertTrue(res[1].shape contentEquals Shape(1, 1, 3))
|
||||
assertTrue(res[2].shape contentEquals Shape(1, 1, 1))
|
||||
|
||||
assertTrue(res[0].source contentEquals doubleArrayOf(1.0, 2.0, 3.0, 4.0, 5.0, 6.0))
|
||||
assertTrue(res[1].source contentEquals doubleArrayOf(10.0, 20.0, 30.0))
|
||||
@ -76,37 +78,37 @@ internal class TestBroadcasting {
|
||||
|
||||
@Test
|
||||
fun testBroadcastOuterTensorsShapes() = DoubleTensorAlgebra {
|
||||
val tensor1 = fromArray(intArrayOf(2, 1, 3, 2, 3), DoubleArray(2 * 1 * 3 * 2 * 3) { 0.0 })
|
||||
val tensor2 = fromArray(intArrayOf(4, 2, 5, 1, 3, 3), DoubleArray(4 * 2 * 5 * 1 * 3 * 3) { 0.0 })
|
||||
val tensor3 = fromArray(intArrayOf(1, 1), doubleArrayOf(500.0))
|
||||
val tensor1 = fromArray(Shape(2, 1, 3, 2, 3), DoubleArray(2 * 1 * 3 * 2 * 3) { 0.0 })
|
||||
val tensor2 = fromArray(Shape(4, 2, 5, 1, 3, 3), DoubleArray(4 * 2 * 5 * 1 * 3 * 3) { 0.0 })
|
||||
val tensor3 = fromArray(Shape(1, 1), doubleArrayOf(500.0))
|
||||
|
||||
val res = broadcastOuterTensors(tensor1, tensor2, tensor3)
|
||||
|
||||
assertTrue(res[0].shape contentEquals intArrayOf(4, 2, 5, 3, 2, 3))
|
||||
assertTrue(res[1].shape contentEquals intArrayOf(4, 2, 5, 3, 3, 3))
|
||||
assertTrue(res[2].shape contentEquals intArrayOf(4, 2, 5, 3, 1, 1))
|
||||
assertTrue(res[0].shape contentEquals Shape(4, 2, 5, 3, 2, 3))
|
||||
assertTrue(res[1].shape contentEquals Shape(4, 2, 5, 3, 3, 3))
|
||||
assertTrue(res[2].shape contentEquals Shape(4, 2, 5, 3, 1, 1))
|
||||
}
|
||||
|
||||
@Test
|
||||
fun testMinusTensor() = BroadcastDoubleTensorAlgebra.invoke {
|
||||
val tensor1 = fromArray(intArrayOf(2, 3), doubleArrayOf(1.0, 2.0, 3.0, 4.0, 5.0, 6.0))
|
||||
val tensor2 = fromArray(intArrayOf(1, 3), doubleArrayOf(10.0, 20.0, 30.0))
|
||||
val tensor3 = fromArray(intArrayOf(1, 1, 1), doubleArrayOf(500.0))
|
||||
val tensor1 = fromArray(Shape(2, 3), doubleArrayOf(1.0, 2.0, 3.0, 4.0, 5.0, 6.0))
|
||||
val tensor2 = fromArray(Shape(1, 3), doubleArrayOf(10.0, 20.0, 30.0))
|
||||
val tensor3 = fromArray(Shape(1, 1, 1), doubleArrayOf(500.0))
|
||||
|
||||
val tensor21 = tensor2 - tensor1
|
||||
val tensor31 = tensor3 - tensor1
|
||||
val tensor32 = tensor3 - tensor2
|
||||
|
||||
assertTrue(tensor21.shape contentEquals intArrayOf(2, 3))
|
||||
assertTrue(tensor21.shape contentEquals Shape(2, 3))
|
||||
assertTrue(tensor21.source contentEquals doubleArrayOf(9.0, 18.0, 27.0, 6.0, 15.0, 24.0))
|
||||
|
||||
assertTrue(tensor31.shape contentEquals intArrayOf(1, 2, 3))
|
||||
assertTrue(tensor31.shape contentEquals Shape(1, 2, 3))
|
||||
assertTrue(
|
||||
tensor31.source
|
||||
contentEquals doubleArrayOf(499.0, 498.0, 497.0, 496.0, 495.0, 494.0)
|
||||
)
|
||||
|
||||
assertTrue(tensor32.shape contentEquals intArrayOf(1, 1, 3))
|
||||
assertTrue(tensor32.shape contentEquals Shape(1, 1, 3))
|
||||
assertTrue(tensor32.source contentEquals doubleArrayOf(490.0, 480.0, 470.0))
|
||||
}
|
||||
|
||||
|
@ -5,6 +5,7 @@
|
||||
|
||||
package space.kscience.kmath.tensors.core
|
||||
|
||||
import space.kscience.kmath.nd.Shape
|
||||
import space.kscience.kmath.operations.invoke
|
||||
import space.kscience.kmath.structures.asBuffer
|
||||
import kotlin.math.*
|
||||
@ -13,7 +14,7 @@ import kotlin.test.assertTrue
|
||||
|
||||
internal class TestDoubleAnalyticTensorAlgebra {
|
||||
|
||||
val shape = intArrayOf(2, 1, 3, 2)
|
||||
val shape = Shape(2, 1, 3, 2)
|
||||
val buffer = doubleArrayOf(
|
||||
27.1, 20.0, 19.84,
|
||||
23.123, 3.0, 2.0,
|
||||
@ -102,7 +103,7 @@ internal class TestDoubleAnalyticTensorAlgebra {
|
||||
assertTrue { tensor.floor() eq expectedTensor(::floor) }
|
||||
}
|
||||
|
||||
val shape2 = intArrayOf(2, 2)
|
||||
val shape2 = Shape(2, 2)
|
||||
val buffer2 = doubleArrayOf(
|
||||
1.0, 2.0,
|
||||
-3.0, 4.0
|
||||
@ -114,13 +115,13 @@ internal class TestDoubleAnalyticTensorAlgebra {
|
||||
assertTrue { tensor2.min() == -3.0 }
|
||||
assertTrue {
|
||||
tensor2.min(0, true) eq fromArray(
|
||||
intArrayOf(1, 2),
|
||||
Shape(1, 2),
|
||||
doubleArrayOf(-3.0, 2.0)
|
||||
)
|
||||
}
|
||||
assertTrue {
|
||||
tensor2.min(1, false) eq fromArray(
|
||||
intArrayOf(2),
|
||||
Shape(2),
|
||||
doubleArrayOf(1.0, -3.0)
|
||||
)
|
||||
}
|
||||
@ -131,13 +132,13 @@ internal class TestDoubleAnalyticTensorAlgebra {
|
||||
assertTrue { tensor2.max() == 4.0 }
|
||||
assertTrue {
|
||||
tensor2.max(0, true) eq fromArray(
|
||||
intArrayOf(1, 2),
|
||||
Shape(1, 2),
|
||||
doubleArrayOf(1.0, 4.0)
|
||||
)
|
||||
}
|
||||
assertTrue {
|
||||
tensor2.max(1, false) eq fromArray(
|
||||
intArrayOf(2),
|
||||
Shape(2),
|
||||
doubleArrayOf(2.0, 4.0)
|
||||
)
|
||||
}
|
||||
@ -148,13 +149,13 @@ internal class TestDoubleAnalyticTensorAlgebra {
|
||||
assertTrue { tensor2.sum() == 4.0 }
|
||||
assertTrue {
|
||||
tensor2.sum(0, true) eq fromArray(
|
||||
intArrayOf(1, 2),
|
||||
Shape(1, 2),
|
||||
doubleArrayOf(-2.0, 6.0)
|
||||
)
|
||||
}
|
||||
assertTrue {
|
||||
tensor2.sum(1, false) eq fromArray(
|
||||
intArrayOf(2),
|
||||
Shape(2),
|
||||
doubleArrayOf(3.0, 1.0)
|
||||
)
|
||||
}
|
||||
@ -165,13 +166,13 @@ internal class TestDoubleAnalyticTensorAlgebra {
|
||||
assertTrue { tensor2.mean() == 1.0 }
|
||||
assertTrue {
|
||||
tensor2.mean(0, true) eq fromArray(
|
||||
intArrayOf(1, 2),
|
||||
Shape(1, 2),
|
||||
doubleArrayOf(-1.0, 3.0)
|
||||
)
|
||||
}
|
||||
assertTrue {
|
||||
tensor2.mean(1, false) eq fromArray(
|
||||
intArrayOf(2),
|
||||
Shape(2),
|
||||
doubleArrayOf(1.5, 0.5)
|
||||
)
|
||||
}
|
||||
|
@ -5,6 +5,8 @@
|
||||
|
||||
package space.kscience.kmath.tensors.core
|
||||
|
||||
import space.kscience.kmath.nd.Shape
|
||||
import space.kscience.kmath.nd.contentEquals
|
||||
import space.kscience.kmath.operations.invoke
|
||||
import space.kscience.kmath.tensors.core.internal.svd1d
|
||||
import kotlin.math.abs
|
||||
@ -17,7 +19,7 @@ internal class TestDoubleLinearOpsTensorAlgebra {
|
||||
@Test
|
||||
fun testDetLU() = DoubleTensorAlgebra {
|
||||
val tensor = fromArray(
|
||||
intArrayOf(2, 2, 2),
|
||||
Shape(2, 2, 2),
|
||||
doubleArrayOf(
|
||||
1.0, 3.0,
|
||||
1.0, 2.0,
|
||||
@ -27,7 +29,7 @@ internal class TestDoubleLinearOpsTensorAlgebra {
|
||||
)
|
||||
|
||||
val expectedTensor = fromArray(
|
||||
intArrayOf(2, 1),
|
||||
Shape(2, 1),
|
||||
doubleArrayOf(
|
||||
-1.0,
|
||||
-7.0
|
||||
@ -43,7 +45,7 @@ internal class TestDoubleLinearOpsTensorAlgebra {
|
||||
fun testDet() = DoubleTensorAlgebra {
|
||||
val expectedValue = 0.019827417
|
||||
val m = fromArray(
|
||||
intArrayOf(3, 3), doubleArrayOf(
|
||||
Shape(3, 3), doubleArrayOf(
|
||||
2.1843, 1.4391, -0.4845,
|
||||
1.4391, 1.7772, 0.4055,
|
||||
-0.4845, 0.4055, 0.7519
|
||||
@ -57,7 +59,7 @@ internal class TestDoubleLinearOpsTensorAlgebra {
|
||||
fun testDetSingle() = DoubleTensorAlgebra {
|
||||
val expectedValue = 48.151623
|
||||
val m = fromArray(
|
||||
intArrayOf(1, 1), doubleArrayOf(
|
||||
Shape(1, 1), doubleArrayOf(
|
||||
expectedValue
|
||||
)
|
||||
)
|
||||
@ -68,7 +70,7 @@ internal class TestDoubleLinearOpsTensorAlgebra {
|
||||
@Test
|
||||
fun testInvLU() = DoubleTensorAlgebra {
|
||||
val tensor = fromArray(
|
||||
intArrayOf(2, 2, 2),
|
||||
Shape(2, 2, 2),
|
||||
doubleArrayOf(
|
||||
1.0, 0.0,
|
||||
0.0, 2.0,
|
||||
@ -78,7 +80,7 @@ internal class TestDoubleLinearOpsTensorAlgebra {
|
||||
)
|
||||
|
||||
val expectedTensor = fromArray(
|
||||
intArrayOf(2, 2, 2), doubleArrayOf(
|
||||
Shape(2, 2, 2), doubleArrayOf(
|
||||
1.0, 0.0,
|
||||
0.0, 0.5,
|
||||
0.0, 1.0,
|
||||
@ -92,14 +94,14 @@ internal class TestDoubleLinearOpsTensorAlgebra {
|
||||
|
||||
@Test
|
||||
fun testScalarProduct() = DoubleTensorAlgebra {
|
||||
val a = fromArray(intArrayOf(3), doubleArrayOf(1.8, 2.5, 6.8))
|
||||
val b = fromArray(intArrayOf(3), doubleArrayOf(5.5, 2.6, 6.4))
|
||||
val a = fromArray(Shape(3), doubleArrayOf(1.8, 2.5, 6.8))
|
||||
val b = fromArray(Shape(3), doubleArrayOf(5.5, 2.6, 6.4))
|
||||
assertEquals(a.dot(b).value(), 59.92)
|
||||
}
|
||||
|
||||
@Test
|
||||
fun testQR() = DoubleTensorAlgebra {
|
||||
val shape = intArrayOf(2, 2, 2)
|
||||
val shape = Shape(2, 2, 2)
|
||||
val buffer = doubleArrayOf(
|
||||
1.0, 3.0,
|
||||
1.0, 2.0,
|
||||
@ -120,7 +122,7 @@ internal class TestDoubleLinearOpsTensorAlgebra {
|
||||
|
||||
@Test
|
||||
fun testLU() = DoubleTensorAlgebra {
|
||||
val shape = intArrayOf(2, 2, 2)
|
||||
val shape = Shape(2, 2, 2)
|
||||
val buffer = doubleArrayOf(
|
||||
1.0, 3.0,
|
||||
1.0, 2.0,
|
||||
@ -140,9 +142,9 @@ internal class TestDoubleLinearOpsTensorAlgebra {
|
||||
|
||||
@Test
|
||||
fun testCholesky() = DoubleTensorAlgebra {
|
||||
val tensor = randomNormal(intArrayOf(2, 5, 5), 0)
|
||||
val tensor = randomNormal(Shape(2, 5, 5), 0)
|
||||
val sigma = (tensor matmul tensor.transposed()) + diagonalEmbedding(
|
||||
fromArray(intArrayOf(2, 5), DoubleArray(10) { 0.1 })
|
||||
fromArray(Shape(2, 5), DoubleArray(10) { 0.1 })
|
||||
)
|
||||
val low = sigma.cholesky()
|
||||
val sigmChol = low matmul low.transposed()
|
||||
@ -151,24 +153,24 @@ internal class TestDoubleLinearOpsTensorAlgebra {
|
||||
|
||||
@Test
|
||||
fun testSVD1D() = DoubleTensorAlgebra {
|
||||
val tensor2 = fromArray(intArrayOf(2, 3), doubleArrayOf(1.0, 2.0, 3.0, 4.0, 5.0, 6.0))
|
||||
val tensor2 = fromArray(Shape(2, 3), doubleArrayOf(1.0, 2.0, 3.0, 4.0, 5.0, 6.0))
|
||||
|
||||
val res = svd1d(tensor2)
|
||||
|
||||
assertTrue(res.shape contentEquals intArrayOf(2))
|
||||
assertTrue(res.shape contentEquals Shape(2))
|
||||
assertTrue { abs(abs(res.source[0]) - 0.386) < 0.01 }
|
||||
assertTrue { abs(abs(res.source[1]) - 0.922) < 0.01 }
|
||||
}
|
||||
|
||||
@Test
|
||||
fun testSVD() = DoubleTensorAlgebra {
|
||||
testSVDFor(fromArray(intArrayOf(2, 3), doubleArrayOf(1.0, 2.0, 3.0, 4.0, 5.0, 6.0)))
|
||||
testSVDFor(fromArray(intArrayOf(2, 2), doubleArrayOf(-1.0, 0.0, 239.0, 238.0)))
|
||||
testSVDFor(fromArray(Shape(2, 3), doubleArrayOf(1.0, 2.0, 3.0, 4.0, 5.0, 6.0)))
|
||||
testSVDFor(fromArray(Shape(2, 2), doubleArrayOf(-1.0, 0.0, 239.0, 238.0)))
|
||||
}
|
||||
|
||||
@Test
|
||||
fun testBatchedSVD() = DoubleTensorAlgebra {
|
||||
val tensor = randomNormal(intArrayOf(2, 5, 3), 0)
|
||||
val tensor = randomNormal(Shape(2, 5, 3), 0)
|
||||
val (tensorU, tensorS, tensorV) = tensor.svd()
|
||||
val tensorSVD = tensorU matmul (diagonalEmbedding(tensorS) matmul tensorV.transposed())
|
||||
assertTrue(tensor.eq(tensorSVD))
|
||||
@ -176,7 +178,7 @@ internal class TestDoubleLinearOpsTensorAlgebra {
|
||||
|
||||
@Test
|
||||
fun testBatchedSymEig() = DoubleTensorAlgebra {
|
||||
val tensor = randomNormal(shape = intArrayOf(2, 3, 3), 0)
|
||||
val tensor = randomNormal(shape = Shape(2, 3, 3), 0)
|
||||
val tensorSigma = tensor + tensor.transposed()
|
||||
val (tensorS, tensorV) = tensorSigma.symEig()
|
||||
val tensorSigmaCalc = tensorV matmul (diagonalEmbedding(tensorS) matmul tensorV.transposed())
|
||||
|
@ -21,14 +21,14 @@ internal class TestDoubleTensor {
|
||||
@Test
|
||||
fun testValue() = DoubleTensorAlgebra {
|
||||
val value = 12.5
|
||||
val tensor = fromArray(intArrayOf(1), doubleArrayOf(value))
|
||||
val tensor = fromArray(Shape(1), doubleArrayOf(value))
|
||||
assertEquals(tensor.value(), value)
|
||||
}
|
||||
|
||||
@OptIn(PerformancePitfall::class)
|
||||
@Test
|
||||
fun testStrides() = DoubleTensorAlgebra {
|
||||
val tensor = fromArray(intArrayOf(2, 2), doubleArrayOf(3.5, 5.8, 58.4, 2.4))
|
||||
val tensor = fromArray(Shape(2, 2), doubleArrayOf(3.5, 5.8, 58.4, 2.4))
|
||||
assertEquals(tensor[intArrayOf(0, 1)], 5.8)
|
||||
assertTrue(
|
||||
tensor.elements().map { it.second }.toList()
|
||||
@ -38,7 +38,7 @@ internal class TestDoubleTensor {
|
||||
|
||||
@Test
|
||||
fun testGet() = DoubleTensorAlgebra {
|
||||
val tensor = fromArray(intArrayOf(1, 2, 2), doubleArrayOf(3.5, 5.8, 58.4, 2.4))
|
||||
val tensor = fromArray(Shape(1, 2, 2), doubleArrayOf(3.5, 5.8, 58.4, 2.4))
|
||||
val matrix = tensor.getTensor(0).asDoubleTensor2D()
|
||||
assertEquals(matrix[0, 1], 5.8)
|
||||
|
||||
@ -67,7 +67,7 @@ internal class TestDoubleTensor {
|
||||
val doubleArray = DoubleBuffer(1.0, 2.0, 3.0)
|
||||
|
||||
// create ND buffers, no data is copied
|
||||
val ndArray: MutableBufferND<Double> = DoubleBufferND(ColumnStrides(intArrayOf(3)), doubleArray)
|
||||
val ndArray: MutableBufferND<Double> = DoubleBufferND(ColumnStrides(Shape(3)), doubleArray)
|
||||
|
||||
// map to tensors
|
||||
val tensorArray = ndArray.asDoubleTensor() // Data is copied because of strides change.
|
||||
@ -91,7 +91,7 @@ internal class TestDoubleTensor {
|
||||
|
||||
@Test
|
||||
fun test2D() = with(DoubleTensorAlgebra) {
|
||||
val tensor: DoubleTensor = structureND(intArrayOf(3, 3)) { (i, j) -> (i - j).toDouble() }
|
||||
val tensor: DoubleTensor = structureND(Shape(3, 3)) { (i, j) -> (i - j).toDouble() }
|
||||
//println(tensor.toPrettyString())
|
||||
val tensor2d = tensor.asDoubleTensor2D()
|
||||
assertBufferEquals(DoubleBuffer(1.0, 0.0, -1.0), tensor2d.rows[1])
|
||||
@ -100,7 +100,7 @@ internal class TestDoubleTensor {
|
||||
|
||||
@Test
|
||||
fun testMatrixIteration() = with(DoubleTensorAlgebra) {
|
||||
val tensor = structureND(intArrayOf(3, 3, 3, 3)) { index -> index.sum().toDouble() }
|
||||
val tensor = structureND(Shape(3, 3, 3, 3)) { index -> index.sum().toDouble() }
|
||||
tensor.forEachMatrix { index, matrix ->
|
||||
println(index.joinToString { it.toString() })
|
||||
println(matrix)
|
||||
|
@ -6,6 +6,8 @@
|
||||
package space.kscience.kmath.tensors.core
|
||||
|
||||
|
||||
import space.kscience.kmath.nd.Shape
|
||||
import space.kscience.kmath.nd.contentEquals
|
||||
import space.kscience.kmath.nd.get
|
||||
import space.kscience.kmath.operations.invoke
|
||||
import space.kscience.kmath.testutils.assertBufferEquals
|
||||
@ -18,62 +20,62 @@ internal class TestDoubleTensorAlgebra {
|
||||
|
||||
@Test
|
||||
fun testDoublePlus() = DoubleTensorAlgebra {
|
||||
val tensor = fromArray(intArrayOf(2), doubleArrayOf(1.0, 2.0))
|
||||
val tensor = fromArray(Shape(2), doubleArrayOf(1.0, 2.0))
|
||||
val res = 10.0 + tensor
|
||||
assertTrue(res.source contentEquals doubleArrayOf(11.0, 12.0))
|
||||
}
|
||||
|
||||
@Test
|
||||
fun testDoubleDiv() = DoubleTensorAlgebra {
|
||||
val tensor = fromArray(intArrayOf(2), doubleArrayOf(2.0, 4.0))
|
||||
val tensor = fromArray(Shape(2), doubleArrayOf(2.0, 4.0))
|
||||
val res = 2.0 / tensor
|
||||
assertTrue(res.source contentEquals doubleArrayOf(1.0, 0.5))
|
||||
}
|
||||
|
||||
@Test
|
||||
fun testDivDouble() = DoubleTensorAlgebra {
|
||||
val tensor = fromArray(intArrayOf(2), doubleArrayOf(10.0, 5.0))
|
||||
val tensor = fromArray(Shape(2), doubleArrayOf(10.0, 5.0))
|
||||
val res = tensor / 2.5
|
||||
assertTrue(res.source contentEquals doubleArrayOf(4.0, 2.0))
|
||||
}
|
||||
|
||||
@Test
|
||||
fun testTranspose1x1() = DoubleTensorAlgebra {
|
||||
val tensor = fromArray(intArrayOf(1), doubleArrayOf(0.0))
|
||||
val tensor = fromArray(Shape(1), doubleArrayOf(0.0))
|
||||
val res = tensor.transposed(0, 0)
|
||||
|
||||
assertTrue(res.source contentEquals doubleArrayOf(0.0))
|
||||
assertTrue(res.shape contentEquals intArrayOf(1))
|
||||
assertTrue(res.asDoubleTensor().source contentEquals doubleArrayOf(0.0))
|
||||
assertTrue(res.shape contentEquals Shape(1))
|
||||
}
|
||||
|
||||
@Test
|
||||
fun testTranspose3x2() = DoubleTensorAlgebra {
|
||||
val tensor = fromArray(intArrayOf(3, 2), doubleArrayOf(1.0, 2.0, 3.0, 4.0, 5.0, 6.0))
|
||||
val tensor = fromArray(Shape(3, 2), doubleArrayOf(1.0, 2.0, 3.0, 4.0, 5.0, 6.0))
|
||||
val res = tensor.transposed(1, 0)
|
||||
|
||||
assertTrue(res.source contentEquals doubleArrayOf(1.0, 3.0, 5.0, 2.0, 4.0, 6.0))
|
||||
assertTrue(res.shape contentEquals intArrayOf(2, 3))
|
||||
assertTrue(res.asDoubleTensor().source contentEquals doubleArrayOf(1.0, 3.0, 5.0, 2.0, 4.0, 6.0))
|
||||
assertTrue(res.shape contentEquals Shape(2, 3))
|
||||
}
|
||||
|
||||
@Test
|
||||
fun testTranspose1x2x3() = DoubleTensorAlgebra {
|
||||
val tensor = fromArray(intArrayOf(1, 2, 3), doubleArrayOf(1.0, 2.0, 3.0, 4.0, 5.0, 6.0))
|
||||
val tensor = fromArray(Shape(1, 2, 3), doubleArrayOf(1.0, 2.0, 3.0, 4.0, 5.0, 6.0))
|
||||
val res01 = tensor.transposed(0, 1)
|
||||
val res02 = tensor.transposed(-3, 2)
|
||||
val res12 = tensor.transposed()
|
||||
|
||||
assertTrue(res01.shape contentEquals intArrayOf(2, 1, 3))
|
||||
assertTrue(res02.shape contentEquals intArrayOf(3, 2, 1))
|
||||
assertTrue(res12.shape contentEquals intArrayOf(1, 3, 2))
|
||||
assertTrue(res01.shape contentEquals Shape(2, 1, 3))
|
||||
assertTrue(res02.shape contentEquals Shape(3, 2, 1))
|
||||
assertTrue(res12.shape contentEquals Shape(1, 3, 2))
|
||||
|
||||
assertTrue(res01.source contentEquals doubleArrayOf(1.0, 2.0, 3.0, 4.0, 5.0, 6.0))
|
||||
assertTrue(res02.source contentEquals doubleArrayOf(1.0, 4.0, 2.0, 5.0, 3.0, 6.0))
|
||||
assertTrue(res12.source contentEquals doubleArrayOf(1.0, 4.0, 2.0, 5.0, 3.0, 6.0))
|
||||
assertTrue(res01.asDoubleTensor().source contentEquals doubleArrayOf(1.0, 2.0, 3.0, 4.0, 5.0, 6.0))
|
||||
assertTrue(res02.asDoubleTensor().source contentEquals doubleArrayOf(1.0, 4.0, 2.0, 5.0, 3.0, 6.0))
|
||||
assertTrue(res12.asDoubleTensor().source contentEquals doubleArrayOf(1.0, 4.0, 2.0, 5.0, 3.0, 6.0))
|
||||
}
|
||||
|
||||
@Test
|
||||
fun testLinearStructure() = DoubleTensorAlgebra {
|
||||
val shape = intArrayOf(3)
|
||||
val shape = Shape(3)
|
||||
val tensorA = full(value = -4.5, shape = shape)
|
||||
val tensorB = full(value = 10.9, shape = shape)
|
||||
val tensorC = full(value = 789.3, shape = shape)
|
||||
@ -105,28 +107,28 @@ internal class TestDoubleTensorAlgebra {
|
||||
|
||||
@Test
|
||||
fun testDot() = DoubleTensorAlgebra {
|
||||
val tensor1 = fromArray(intArrayOf(2, 3), doubleArrayOf(1.0, 2.0, 3.0, 4.0, 5.0, 6.0))
|
||||
val tensor11 = fromArray(intArrayOf(3, 2), doubleArrayOf(1.0, 2.0, 3.0, 4.0, 5.0, 6.0))
|
||||
val tensor2 = fromArray(intArrayOf(3), doubleArrayOf(10.0, 20.0, 30.0))
|
||||
val tensor3 = fromArray(intArrayOf(1, 1, 3), doubleArrayOf(-1.0, -2.0, -3.0))
|
||||
val tensor4 = fromArray(intArrayOf(2, 3, 3), (1..18).map { it.toDouble() }.toDoubleArray())
|
||||
val tensor5 = fromArray(intArrayOf(2, 3, 3), (1..18).map { 1 + it.toDouble() }.toDoubleArray())
|
||||
val tensor1 = fromArray(Shape(2, 3), doubleArrayOf(1.0, 2.0, 3.0, 4.0, 5.0, 6.0))
|
||||
val tensor11 = fromArray(Shape(3, 2), doubleArrayOf(1.0, 2.0, 3.0, 4.0, 5.0, 6.0))
|
||||
val tensor2 = fromArray(Shape(3), doubleArrayOf(10.0, 20.0, 30.0))
|
||||
val tensor3 = fromArray(Shape(1, 1, 3), doubleArrayOf(-1.0, -2.0, -3.0))
|
||||
val tensor4 = fromArray(Shape(2, 3, 3), (1..18).map { it.toDouble() }.toDoubleArray())
|
||||
val tensor5 = fromArray(Shape(2, 3, 3), (1..18).map { 1 + it.toDouble() }.toDoubleArray())
|
||||
|
||||
val res12 = tensor1.dot(tensor2)
|
||||
assertTrue(res12.source contentEquals doubleArrayOf(140.0, 320.0))
|
||||
assertTrue(res12.shape contentEquals intArrayOf(2))
|
||||
assertTrue(res12.shape contentEquals Shape(2))
|
||||
|
||||
val res32 = tensor3.matmul(tensor2)
|
||||
assertTrue(res32.source contentEquals doubleArrayOf(-140.0))
|
||||
assertTrue(res32.shape contentEquals intArrayOf(1, 1))
|
||||
assertTrue(res32.shape contentEquals Shape(1, 1))
|
||||
|
||||
val res22 = tensor2.dot(tensor2)
|
||||
assertTrue(res22.source contentEquals doubleArrayOf(1400.0))
|
||||
assertTrue(res22.shape contentEquals intArrayOf(1))
|
||||
assertTrue(res22.shape contentEquals Shape(1))
|
||||
|
||||
val res11 = tensor1.dot(tensor11)
|
||||
assertTrue(res11.source contentEquals doubleArrayOf(22.0, 28.0, 49.0, 64.0))
|
||||
assertTrue(res11.shape contentEquals intArrayOf(2, 2))
|
||||
assertTrue(res11.shape contentEquals Shape(2, 2))
|
||||
|
||||
val res45 = tensor4.matmul(tensor5)
|
||||
assertTrue(
|
||||
@ -135,44 +137,44 @@ internal class TestDoubleTensorAlgebra {
|
||||
468.0, 501.0, 534.0, 594.0, 636.0, 678.0, 720.0, 771.0, 822.0
|
||||
)
|
||||
)
|
||||
assertTrue(res45.shape contentEquals intArrayOf(2, 3, 3))
|
||||
assertTrue(res45.shape contentEquals Shape(2, 3, 3))
|
||||
}
|
||||
|
||||
@Test
|
||||
fun testDiagonalEmbedding() = DoubleTensorAlgebra {
|
||||
val tensor1 = fromArray(intArrayOf(3), doubleArrayOf(10.0, 20.0, 30.0))
|
||||
val tensor2 = fromArray(intArrayOf(2, 3), doubleArrayOf(1.0, 2.0, 3.0, 4.0, 5.0, 6.0))
|
||||
val tensor3 = zeros(intArrayOf(2, 3, 4, 5))
|
||||
val tensor1 = fromArray(Shape(3), doubleArrayOf(10.0, 20.0, 30.0))
|
||||
val tensor2 = fromArray(Shape(2, 3), doubleArrayOf(1.0, 2.0, 3.0, 4.0, 5.0, 6.0))
|
||||
val tensor3 = zeros(Shape(2, 3, 4, 5))
|
||||
|
||||
assertTrue(
|
||||
diagonalEmbedding(tensor3, 0, 3, 4).shape contentEquals
|
||||
intArrayOf(2, 3, 4, 5, 5)
|
||||
Shape(2, 3, 4, 5, 5)
|
||||
)
|
||||
assertTrue(
|
||||
diagonalEmbedding(tensor3, 1, 3, 4).shape contentEquals
|
||||
intArrayOf(2, 3, 4, 6, 6)
|
||||
Shape(2, 3, 4, 6, 6)
|
||||
)
|
||||
assertTrue(
|
||||
diagonalEmbedding(tensor3, 2, 0, 3).shape contentEquals
|
||||
intArrayOf(7, 2, 3, 7, 4)
|
||||
Shape(7, 2, 3, 7, 4)
|
||||
)
|
||||
|
||||
val diagonal1 = diagonalEmbedding(tensor1, 0, 1, 0)
|
||||
assertTrue(diagonal1.shape contentEquals intArrayOf(3, 3))
|
||||
assertTrue(diagonal1.shape contentEquals Shape(3, 3))
|
||||
assertTrue(
|
||||
diagonal1.source contentEquals
|
||||
doubleArrayOf(10.0, 0.0, 0.0, 0.0, 20.0, 0.0, 0.0, 0.0, 30.0)
|
||||
)
|
||||
|
||||
val diagonal1Offset = diagonalEmbedding(tensor1, 1, 1, 0)
|
||||
assertTrue(diagonal1Offset.shape contentEquals intArrayOf(4, 4))
|
||||
assertTrue(diagonal1Offset.shape contentEquals Shape(4, 4))
|
||||
assertTrue(
|
||||
diagonal1Offset.source contentEquals
|
||||
doubleArrayOf(0.0, 0.0, 0.0, 0.0, 10.0, 0.0, 0.0, 0.0, 0.0, 20.0, 0.0, 0.0, 0.0, 0.0, 30.0, 0.0)
|
||||
)
|
||||
|
||||
val diagonal2 = diagonalEmbedding(tensor2, 1, 0, 2)
|
||||
assertTrue(diagonal2.shape contentEquals intArrayOf(4, 2, 4))
|
||||
assertTrue(diagonal2.shape contentEquals Shape(4, 2, 4))
|
||||
assertTrue(
|
||||
diagonal2.source contentEquals
|
||||
doubleArrayOf(
|
||||
@ -186,9 +188,9 @@ internal class TestDoubleTensorAlgebra {
|
||||
|
||||
@Test
|
||||
fun testEq() = DoubleTensorAlgebra {
|
||||
val tensor1 = fromArray(intArrayOf(2, 3), doubleArrayOf(1.0, 2.0, 3.0, 4.0, 5.0, 6.0))
|
||||
val tensor2 = fromArray(intArrayOf(2, 3), doubleArrayOf(1.0, 2.0, 3.0, 4.0, 5.0, 6.0))
|
||||
val tensor3 = fromArray(intArrayOf(2, 3), doubleArrayOf(1.0, 2.0, 3.0, 4.0, 5.0, 5.0))
|
||||
val tensor1 = fromArray(Shape(2, 3), doubleArrayOf(1.0, 2.0, 3.0, 4.0, 5.0, 6.0))
|
||||
val tensor2 = fromArray(Shape(2, 3), doubleArrayOf(1.0, 2.0, 3.0, 4.0, 5.0, 6.0))
|
||||
val tensor3 = fromArray(Shape(2, 3), doubleArrayOf(1.0, 2.0, 3.0, 4.0, 5.0, 5.0))
|
||||
|
||||
assertTrue(tensor1 eq tensor1)
|
||||
assertTrue(tensor1 eq tensor2)
|
||||
@ -202,7 +204,7 @@ internal class TestDoubleTensorAlgebra {
|
||||
val l = tensor.getTensor(0).map { it + 1.0 }
|
||||
val r = tensor.getTensor(1).map { it - 1.0 }
|
||||
val res = l + r
|
||||
assertTrue { intArrayOf(5, 5) contentEquals res.shape }
|
||||
assertTrue { Shape(5, 5) contentEquals res.shape }
|
||||
assertEquals(2.0, res[4, 4])
|
||||
}
|
||||
}
|
||||
|
@ -9,6 +9,7 @@ package space.kscience.kmath.viktor
|
||||
|
||||
import org.jetbrains.bio.viktor.F64Array
|
||||
import space.kscience.kmath.misc.PerformancePitfall
|
||||
import space.kscience.kmath.misc.UnsafeKMathAPI
|
||||
import space.kscience.kmath.misc.UnstableKMathAPI
|
||||
import space.kscience.kmath.nd.*
|
||||
import space.kscience.kmath.operations.DoubleField
|
||||
@ -31,8 +32,9 @@ public open class ViktorFieldOpsND :
|
||||
|
||||
override val elementAlgebra: DoubleField get() = DoubleField
|
||||
|
||||
override fun structureND(shape: IntArray, initializer: DoubleField.(IntArray) -> Double): ViktorStructureND =
|
||||
F64Array(*shape).apply {
|
||||
@OptIn(UnsafeKMathAPI::class)
|
||||
override fun structureND(shape: Shape, initializer: DoubleField.(IntArray) -> Double): ViktorStructureND =
|
||||
F64Array(*shape.asArray()).apply {
|
||||
ColumnStrides(shape).asSequence().forEach { index ->
|
||||
set(value = DoubleField.initializer(index), indices = index)
|
||||
}
|
||||
@ -40,23 +42,26 @@ public open class ViktorFieldOpsND :
|
||||
|
||||
override fun StructureND<Double>.unaryMinus(): StructureND<Double> = -1 * this
|
||||
|
||||
@OptIn(UnsafeKMathAPI::class)
|
||||
@PerformancePitfall
|
||||
override fun StructureND<Double>.map(transform: DoubleField.(Double) -> Double): ViktorStructureND =
|
||||
F64Array(*shape).apply {
|
||||
ColumnStrides(shape).asSequence().forEach { index ->
|
||||
F64Array(*shape.asArray()).apply {
|
||||
ColumnStrides(Shape(shape)).asSequence().forEach { index ->
|
||||
set(value = DoubleField.transform(this@map[index]), indices = index)
|
||||
}
|
||||
}.asStructure()
|
||||
|
||||
@OptIn(UnsafeKMathAPI::class)
|
||||
@PerformancePitfall
|
||||
override fun StructureND<Double>.mapIndexed(
|
||||
transform: DoubleField.(index: IntArray, Double) -> Double,
|
||||
): ViktorStructureND = F64Array(*shape).apply {
|
||||
ColumnStrides(shape).asSequence().forEach { index ->
|
||||
): ViktorStructureND = F64Array(*shape.asArray()).apply {
|
||||
ColumnStrides(Shape(shape)).asSequence().forEach { index ->
|
||||
set(value = DoubleField.transform(index, this@mapIndexed[index]), indices = index)
|
||||
}
|
||||
}.asStructure()
|
||||
|
||||
@OptIn(UnsafeKMathAPI::class)
|
||||
@PerformancePitfall
|
||||
override fun zip(
|
||||
left: StructureND<Double>,
|
||||
@ -64,7 +69,7 @@ public open class ViktorFieldOpsND :
|
||||
transform: DoubleField.(Double, Double) -> Double,
|
||||
): ViktorStructureND {
|
||||
require(left.shape.contentEquals(right.shape))
|
||||
return F64Array(*left.shape).apply {
|
||||
return F64Array(*left.shape.asArray()).apply {
|
||||
ColumnStrides(left.shape).asSequence().forEach { index ->
|
||||
set(value = DoubleField.transform(left[index], right[index]), indices = index)
|
||||
}
|
||||
@ -119,13 +124,17 @@ public val DoubleField.viktorAlgebra: ViktorFieldOpsND get() = ViktorFieldOpsND
|
||||
|
||||
@OptIn(UnstableKMathAPI::class)
|
||||
public open class ViktorFieldND(
|
||||
override val shape: Shape,
|
||||
private val shapeAsArray: IntArray,
|
||||
) : ViktorFieldOpsND(), FieldND<Double, DoubleField>, NumbersAddOps<StructureND<Double>> {
|
||||
override val zero: ViktorStructureND by lazy { F64Array.full(init = 0.0, shape = shape).asStructure() }
|
||||
override val one: ViktorStructureND by lazy { F64Array.full(init = 1.0, shape = shape).asStructure() }
|
||||
|
||||
override val shape: Shape = Shape(shapeAsArray)
|
||||
|
||||
|
||||
override val zero: ViktorStructureND by lazy { F64Array.full(init = 0.0, shape = shapeAsArray).asStructure() }
|
||||
override val one: ViktorStructureND by lazy { F64Array.full(init = 1.0, shape = shapeAsArray).asStructure() }
|
||||
|
||||
override fun number(value: Number): ViktorStructureND =
|
||||
F64Array.full(init = value.toDouble(), shape = shape).asStructure()
|
||||
F64Array.full(init = value.toDouble(), shape = shapeAsArray).asStructure()
|
||||
}
|
||||
|
||||
public fun DoubleField.viktorAlgebra(vararg shape: Int): ViktorFieldND = ViktorFieldND(shape)
|
||||
|
@ -9,13 +9,16 @@ import org.jetbrains.bio.viktor.F64Array
|
||||
import space.kscience.kmath.misc.PerformancePitfall
|
||||
import space.kscience.kmath.nd.ColumnStrides
|
||||
import space.kscience.kmath.nd.MutableStructureND
|
||||
import space.kscience.kmath.nd.Shape
|
||||
|
||||
@Suppress("OVERRIDE_BY_INLINE", "NOTHING_TO_INLINE")
|
||||
public class ViktorStructureND(public val f64Buffer: F64Array) : MutableStructureND<Double> {
|
||||
override val shape: IntArray get() = f64Buffer.shape
|
||||
override val shape: Shape get() = Shape(f64Buffer.shape)
|
||||
|
||||
@OptIn(PerformancePitfall::class)
|
||||
override inline fun get(index: IntArray): Double = f64Buffer.get(*index)
|
||||
|
||||
@OptIn(PerformancePitfall::class)
|
||||
override inline fun set(index: IntArray, value: Double) {
|
||||
f64Buffer.set(*index, value = value)
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user