diff --git a/CHANGELOG.md b/CHANGELOG.md index acec30836..feb925436 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,7 @@ - Algebra now has an obligatory `bufferFactory` (#477). ### Changed +- Major refactor of tensors (only minor API changes) - Kotlin 1.7.20 - `LazyStructure` `deffered` -> `async` to comply with coroutines code style - Default `dot` operation in tensor algebra no longer support broadcasting. Instead `matmul` operation is added to `DoubleTensorAlgebra`. diff --git a/build.gradle.kts b/build.gradle.kts index 890824d65..120b0f35d 100644 --- a/build.gradle.kts +++ b/build.gradle.kts @@ -1,9 +1,10 @@ +import space.kscience.gradle.isInDevelopment import space.kscience.gradle.useApache2Licence import space.kscience.gradle.useSPCTeam plugins { id("space.kscience.gradle.project") - id("org.jetbrains.kotlinx.kover") version "0.5.0" + id("org.jetbrains.kotlinx.kover") version "0.6.0" } allprojects { @@ -14,7 +15,7 @@ allprojects { } group = "space.kscience" - version = "0.3.1-dev-3" + version = "0.3.1-dev-4" } subprojects { @@ -75,8 +76,14 @@ ksciencePublish { useApache2Licence() useSPCTeam() } - github("kmath", "SciProgCentre", addToRelease = false) - space("https://maven.pkg.jetbrains.space/mipt-npm/p/sci/dev") + github("kmath", "SciProgCentre") + space( + if (isInDevelopment) { + "https://maven.pkg.jetbrains.space/mipt-npm/p/sci/dev" + } else { + "https://maven.pkg.jetbrains.space/mipt-npm/p/sci/release" + } + ) sonatype() } diff --git a/examples/src/main/kotlin/space/kscience/kmath/tensors/OLSWithSVD.kt b/examples/src/main/kotlin/space/kscience/kmath/tensors/OLSWithSVD.kt index 8082ed8e2..d0b24de70 100644 --- a/examples/src/main/kotlin/space/kscience/kmath/tensors/OLSWithSVD.kt +++ b/examples/src/main/kotlin/space/kscience/kmath/tensors/OLSWithSVD.kt @@ -52,7 +52,7 @@ fun main() { // inverse Sigma matrix can be restored from singular values with diagonalEmbedding function val sigma = diagonalEmbedding(singValues.map{ if (abs(it) < 1e-3) 0.0 else 1.0/it }) - val alphaOLS = v dot sigma dot u.transpose() dot y + val alphaOLS = v dot sigma dot u.transposed() dot y println("Estimated alpha:\n" + "$alphaOLS") diff --git a/examples/src/main/kotlin/space/kscience/kmath/tensors/PCA.kt b/examples/src/main/kotlin/space/kscience/kmath/tensors/PCA.kt index e5d1ef9f8..1768be283 100644 --- a/examples/src/main/kotlin/space/kscience/kmath/tensors/PCA.kt +++ b/examples/src/main/kotlin/space/kscience/kmath/tensors/PCA.kt @@ -27,7 +27,7 @@ fun main(): Unit = Double.tensorAlgebra.withBroadcast { // work in context with println("y:\n$y") // stack them into single dataset - val dataset = stack(listOf(x, y)).transpose() + val dataset = stack(listOf(x, y)).transposed() // normalize both x and y val xMean = x.mean() diff --git a/examples/src/main/kotlin/space/kscience/kmath/tensors/linearSystemSolvingWithLUP.kt b/examples/src/main/kotlin/space/kscience/kmath/tensors/linearSystemSolvingWithLUP.kt index bafdf9b21..64cc138d7 100644 --- a/examples/src/main/kotlin/space/kscience/kmath/tensors/linearSystemSolvingWithLUP.kt +++ b/examples/src/main/kotlin/space/kscience/kmath/tensors/linearSystemSolvingWithLUP.kt @@ -75,7 +75,7 @@ fun main() = Double.tensorAlgebra.withBroadcast {// work in context with linear // solveLT(l, b) function can be easily adapted for upper triangular matrix by the permutation matrix revMat // create it by placing ones on side diagonal - val revMat = u.zeroesLike() + val revMat = zeroesLike(u) val n = revMat.shape[0] for (i in 0 until n) { revMat[intArrayOf(i, n - 1 - i)] = 1.0 diff --git a/examples/src/main/kotlin/space/kscience/kmath/tensors/neuralNetwork.kt b/examples/src/main/kotlin/space/kscience/kmath/tensors/neuralNetwork.kt index 7289d86c4..84d6dcd22 100644 --- a/examples/src/main/kotlin/space/kscience/kmath/tensors/neuralNetwork.kt +++ b/examples/src/main/kotlin/space/kscience/kmath/tensors/neuralNetwork.kt @@ -3,16 +3,14 @@ * Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file. */ -@file:OptIn(PerformancePitfall::class) - package space.kscience.kmath.tensors -import space.kscience.kmath.misc.PerformancePitfall +import space.kscience.kmath.operations.asIterable import space.kscience.kmath.operations.invoke import space.kscience.kmath.tensors.core.BroadcastDoubleTensorAlgebra import space.kscience.kmath.tensors.core.DoubleTensor import space.kscience.kmath.tensors.core.DoubleTensorAlgebra -import space.kscience.kmath.tensors.core.copyArray +import space.kscience.kmath.tensors.core.toDoubleTensor import kotlin.math.sqrt const val seed = 100500L @@ -82,9 +80,9 @@ class Dense( } override fun backward(input: DoubleTensor, outputError: DoubleTensor): DoubleTensor = DoubleTensorAlgebra { - val gradInput = outputError dot weights.transpose() + val gradInput = outputError dot weights.transposed() - val gradW = input.transpose() dot outputError + val gradW = input.transposed() dot outputError val gradBias = outputError.mean(dim = 0, keepDim = false) * input.shape[0].toDouble() weights -= learningRate * gradW @@ -109,12 +107,11 @@ fun accuracy(yPred: DoubleTensor, yTrue: DoubleTensor): Double { } // neural network class -@OptIn(ExperimentalStdlibApi::class) class NeuralNetwork(private val layers: List) { private fun softMaxLoss(yPred: DoubleTensor, yTrue: DoubleTensor): DoubleTensor = BroadcastDoubleTensorAlgebra { - val onesForAnswers = yPred.zeroesLike() - yTrue.copyArray().forEachIndexed { index, labelDouble -> + val onesForAnswers = zeroesLike(yPred) + yTrue.source.asIterable().forEachIndexed { index, labelDouble -> val label = labelDouble.toInt() onesForAnswers[intArrayOf(index, label)] = 1.0 } @@ -166,7 +163,7 @@ class NeuralNetwork(private val layers: List) { for ((xBatch, yBatch) in iterBatch(xTrain, yTrain)) { train(xBatch, yBatch) } - println("Accuracy:${accuracy(yTrain, predict(xTrain).argMax(1, true).asDouble())}") + println("Accuracy:${accuracy(yTrain, predict(xTrain).argMax(1, true).toDoubleTensor())}") } } @@ -233,7 +230,7 @@ fun main() = BroadcastDoubleTensorAlgebra { val prediction = model.predict(xTest) // process raw prediction via argMax - val predictionLabels = prediction.argMax(1, true).asDouble() + val predictionLabels = prediction.argMax(1, true).toDoubleTensor() // find out accuracy val acc = accuracy(yTest, predictionLabels) diff --git a/kmath-core/src/commonMain/kotlin/space/kscience/kmath/nd/StructureND.kt b/kmath-core/src/commonMain/kotlin/space/kscience/kmath/nd/StructureND.kt index aece10c2e..b2da083bb 100644 --- a/kmath-core/src/commonMain/kotlin/space/kscience/kmath/nd/StructureND.kt +++ b/kmath-core/src/commonMain/kotlin/space/kscience/kmath/nd/StructureND.kt @@ -237,19 +237,4 @@ public interface MutableStructureND : StructureND { */ public operator fun MutableStructureND.set(vararg index: Int, value: T) { set(index, value) -} - -/** - * Transform a structure element-by element in place. - */ -@OptIn(PerformancePitfall::class) -public inline fun MutableStructureND.mapInPlace(action: (index: IntArray, t: T) -> T): Unit = - elements().forEach { (index, oldValue) -> this[index] = action(index, oldValue) } - -public inline fun StructureND.zip( - struct: StructureND, - crossinline block: (T, T) -> T, -): StructureND { - require(shape.contentEquals(struct.shape)) { "Shape mismatch in structure combination" } - return StructureND.auto(shape) { block(this[it], struct[it]) } -} +} \ No newline at end of file diff --git a/kmath-core/src/commonMain/kotlin/space/kscience/kmath/structures/Buffer.kt b/kmath-core/src/commonMain/kotlin/space/kscience/kmath/structures/Buffer.kt index 2f9f8a8e0..5cfafe5a5 100644 --- a/kmath-core/src/commonMain/kotlin/space/kscience/kmath/structures/Buffer.kt +++ b/kmath-core/src/commonMain/kotlin/space/kscience/kmath/structures/Buffer.kt @@ -114,7 +114,6 @@ public interface Buffer { * * The [size] is specified, and each element is calculated by calling the specified [initializer] function. */ - @Suppress("UNCHECKED_CAST") public inline fun auto(size: Int, initializer: (Int) -> T): Buffer = auto(T::class, size, initializer) } diff --git a/kmath-core/src/commonMain/kotlin/space/kscience/kmath/structures/DoubleBuffer.kt b/kmath-core/src/commonMain/kotlin/space/kscience/kmath/structures/DoubleBuffer.kt index e39c22158..4dd9003f3 100644 --- a/kmath-core/src/commonMain/kotlin/space/kscience/kmath/structures/DoubleBuffer.kt +++ b/kmath-core/src/commonMain/kotlin/space/kscience/kmath/structures/DoubleBuffer.kt @@ -47,11 +47,6 @@ public inline fun DoubleBuffer(size: Int, init: (Int) -> Double): DoubleBuffer = */ public fun DoubleBuffer(vararg doubles: Double): DoubleBuffer = DoubleBuffer(doubles) -/** - * Simplified [DoubleBuffer] to array comparison - */ -public fun DoubleBuffer.contentEquals(vararg doubles: Double): Boolean = array.contentEquals(doubles) - /** * Returns a new [DoubleArray] containing all the elements of this [Buffer]. */ diff --git a/kmath-core/src/commonMain/kotlin/space/kscience/kmath/structures/IntBuffer.kt b/kmath-core/src/commonMain/kotlin/space/kscience/kmath/structures/IntBuffer.kt index 8477fc13b..f80ad0ff7 100644 --- a/kmath-core/src/commonMain/kotlin/space/kscience/kmath/structures/IntBuffer.kt +++ b/kmath-core/src/commonMain/kotlin/space/kscience/kmath/structures/IntBuffer.kt @@ -24,8 +24,7 @@ public value class IntBuffer(public val array: IntArray) : MutableBuffer { override operator fun iterator(): IntIterator = array.iterator() - override fun copy(): MutableBuffer = - IntBuffer(array.copyOf()) + override fun copy(): IntBuffer = IntBuffer(array.copyOf()) } /** diff --git a/kmath-core/src/commonTest/kotlin/space/kscience/kmath/structures/NumberNDFieldTest.kt b/kmath-core/src/commonTest/kotlin/space/kscience/kmath/structures/NumberNDFieldTest.kt index 62c4811e8..a54af571e 100644 --- a/kmath-core/src/commonTest/kotlin/space/kscience/kmath/structures/NumberNDFieldTest.kt +++ b/kmath-core/src/commonTest/kotlin/space/kscience/kmath/structures/NumberNDFieldTest.kt @@ -74,7 +74,9 @@ class NumberNDFieldTest { @Test fun combineTest() { - val division = array1.zip(array2, Double::div) + algebra { + val division = zip(array1, array2) { l, r -> l / r } + } } object L2Norm : Norm, Double> { diff --git a/kmath-for-real/build.gradle.kts b/kmath-for-real/build.gradle.kts index 308b8d732..1e964d99c 100644 --- a/kmath-for-real/build.gradle.kts +++ b/kmath-for-real/build.gradle.kts @@ -2,13 +2,13 @@ plugins { id("space.kscience.gradle.mpp") } -kscience{ +kscience { native() -} - -kotlin.sourceSets.commonMain { dependencies { - api(project(":kmath-core")) + api(projects.kmathCore) + } + dependencies("commonTest") { + implementation(projects.testUtils) } } @@ -24,21 +24,21 @@ readme { feature( id = "DoubleVector", ref = "src/commonMain/kotlin/space/kscience/kmath/real/DoubleVector.kt" - ){ + ) { "Numpy-like operations for Buffers/Points" } feature( id = "DoubleMatrix", ref = "src/commonMain/kotlin/space/kscience/kmath/real/DoubleMatrix.kt" - ){ + ) { "Numpy-like operations for 2d real structures" } feature( id = "grids", ref = "src/commonMain/kotlin/space/kscience/kmath/structures/grids.kt" - ){ + ) { "Uniform grid generators" } } diff --git a/kmath-for-real/src/commonTest/kotlin/space/kscience/kmath/real/DoubleMatrixTest.kt b/kmath-for-real/src/commonTest/kotlin/space/kscience/kmath/real/DoubleMatrixTest.kt index 89e55c6d4..0d016116d 100644 --- a/kmath-for-real/src/commonTest/kotlin/space/kscience/kmath/real/DoubleMatrixTest.kt +++ b/kmath-for-real/src/commonTest/kotlin/space/kscience/kmath/real/DoubleMatrixTest.kt @@ -11,7 +11,7 @@ import space.kscience.kmath.misc.PerformancePitfall import space.kscience.kmath.misc.UnstableKMathAPI import space.kscience.kmath.nd.StructureND import space.kscience.kmath.operations.algebra -import space.kscience.kmath.structures.contentEquals +import space.kscience.kmath.testutils.contentEquals import kotlin.test.Test import kotlin.test.assertEquals import kotlin.test.assertTrue diff --git a/kmath-multik/src/commonMain/kotlin/space/kscience/kmath/multik/MultikTensorAlgebra.kt b/kmath-multik/src/commonMain/kotlin/space/kscience/kmath/multik/MultikTensorAlgebra.kt index e32221dbf..194c5fcee 100644 --- a/kmath-multik/src/commonMain/kotlin/space/kscience/kmath/multik/MultikTensorAlgebra.kt +++ b/kmath-multik/src/commonMain/kotlin/space/kscience/kmath/multik/MultikTensorAlgebra.kt @@ -72,6 +72,20 @@ public abstract class MultikTensorAlgebra>( } } + /** + * Transform a structure element-by element in place. + */ + public inline fun MutableStructureND.mapIndexedInPlace(operation: (index: IntArray, t: T) -> T): Unit { + if (this is MultikTensor) { + array.multiIndices.iterator().forEach { + set(it, operation(it, get(it))) + } + } else { + indices.forEach { set(it, operation(it, get(it))) } + } + } + + @OptIn(PerformancePitfall::class) override fun zip(left: StructureND, right: StructureND, transform: A.(T, T) -> T): MultikTensor { require(left.shape.contentEquals(right.shape)) { "ND array shape mismatch" } //TODO replace by ShapeMismatchException @@ -121,7 +135,7 @@ public abstract class MultikTensorAlgebra>( if (this is MultikTensor) { array.plusAssign(value) } else { - mapInPlace { _, t -> elementAlgebra.add(t, value) } + mapIndexedInPlace { _, t -> elementAlgebra.add(t, value) } } } @@ -129,7 +143,7 @@ public abstract class MultikTensorAlgebra>( if (this is MultikTensor) { array.plusAssign(arg.asMultik().array) } else { - mapInPlace { index, t -> elementAlgebra.add(t, arg[index]) } + mapIndexedInPlace { index, t -> elementAlgebra.add(t, arg[index]) } } } @@ -145,7 +159,7 @@ public abstract class MultikTensorAlgebra>( if (this is MultikTensor) { array.minusAssign(value) } else { - mapInPlace { _, t -> elementAlgebra.run { t - value } } + mapIndexedInPlace { _, t -> elementAlgebra.run { t - value } } } } @@ -153,7 +167,7 @@ public abstract class MultikTensorAlgebra>( if (this is MultikTensor) { array.minusAssign(arg.asMultik().array) } else { - mapInPlace { index, t -> elementAlgebra.run { t - arg[index] } } + mapIndexedInPlace { index, t -> elementAlgebra.run { t - arg[index] } } } } @@ -170,7 +184,7 @@ public abstract class MultikTensorAlgebra>( if (this is MultikTensor) { array.timesAssign(value) } else { - mapInPlace { _, t -> elementAlgebra.multiply(t, value) } + mapIndexedInPlace { _, t -> elementAlgebra.multiply(t, value) } } } @@ -178,7 +192,7 @@ public abstract class MultikTensorAlgebra>( if (this is MultikTensor) { array.timesAssign(arg.asMultik().array) } else { - mapInPlace { index, t -> elementAlgebra.multiply(t, arg[index]) } + mapIndexedInPlace { index, t -> elementAlgebra.multiply(t, arg[index]) } } } @@ -187,7 +201,7 @@ public abstract class MultikTensorAlgebra>( override fun Tensor.getTensor(i: Int): MultikTensor = asMultik().array.mutableView(i).wrap() - override fun Tensor.transpose(i: Int, j: Int): MultikTensor = asMultik().array.transpose(i, j).wrap() + override fun Tensor.transposed(i: Int, j: Int): MultikTensor = asMultik().array.transpose(i, j).wrap() override fun Tensor.view(shape: IntArray): MultikTensor { require(shape.all { it > 0 }) @@ -283,7 +297,7 @@ public abstract class MultikDivisionTensorAlgebra>( if (this is MultikTensor) { array.divAssign(value) } else { - mapInPlace { _, t -> elementAlgebra.divide(t, value) } + mapIndexedInPlace { _, t -> elementAlgebra.divide(t, value) } } } @@ -291,7 +305,7 @@ public abstract class MultikDivisionTensorAlgebra>( if (this is MultikTensor) { array.divAssign(arg.asMultik().array) } else { - mapInPlace { index, t -> elementAlgebra.divide(t, arg[index]) } + mapIndexedInPlace { index, t -> elementAlgebra.divide(t, arg[index]) } } } } \ No newline at end of file diff --git a/kmath-nd4j/src/main/kotlin/space/kscience/kmath/nd4j/Nd4jTensorAlgebra.kt b/kmath-nd4j/src/main/kotlin/space/kscience/kmath/nd4j/Nd4jTensorAlgebra.kt index efd3ded70..c1772683f 100644 --- a/kmath-nd4j/src/main/kotlin/space/kscience/kmath/nd4j/Nd4jTensorAlgebra.kt +++ b/kmath-nd4j/src/main/kotlin/space/kscience/kmath/nd4j/Nd4jTensorAlgebra.kt @@ -96,7 +96,7 @@ public sealed interface Nd4jTensorAlgebra> : AnalyticTe override fun StructureND.unaryMinus(): Nd4jArrayStructure = ndArray.neg().wrap() override fun Tensor.getTensor(i: Int): Nd4jArrayStructure = ndArray.slice(i.toLong()).wrap() - override fun Tensor.transpose(i: Int, j: Int): Nd4jArrayStructure = ndArray.swapAxes(i, j).wrap() + override fun Tensor.transposed(i: Int, j: Int): Nd4jArrayStructure = ndArray.swapAxes(i, j).wrap() override fun StructureND.dot(other: StructureND): Nd4jArrayStructure = ndArray.mmul(other.ndArray).wrap() override fun StructureND.min(dim: Int, keepDim: Boolean): Nd4jArrayStructure = diff --git a/kmath-tensorflow/src/main/kotlin/space/kscience/kmath/tensorflow/TensorFlowAlgebra.kt b/kmath-tensorflow/src/main/kotlin/space/kscience/kmath/tensorflow/TensorFlowAlgebra.kt index 346750f88..74fcf2d7d 100644 --- a/kmath-tensorflow/src/main/kotlin/space/kscience/kmath/tensorflow/TensorFlowAlgebra.kt +++ b/kmath-tensorflow/src/main/kotlin/space/kscience/kmath/tensorflow/TensorFlowAlgebra.kt @@ -188,7 +188,7 @@ public abstract class TensorFlowAlgebra> internal c StridedSliceHelper.stridedSlice(ops.scope(), it, Indices.at(i.toLong())) } - override fun Tensor.transpose(i: Int, j: Int): Tensor = operate { + override fun Tensor.transposed(i: Int, j: Int): Tensor = operate { ops.linalg.transpose(it, ops.constant(intArrayOf(i, j))) } diff --git a/kmath-tensors/build.gradle.kts b/kmath-tensors/build.gradle.kts index 78ea4ba42..1060b37b4 100644 --- a/kmath-tensors/build.gradle.kts +++ b/kmath-tensors/build.gradle.kts @@ -25,6 +25,12 @@ kotlin.sourceSets { api(project(":kmath-stat")) } } + + commonTest{ + dependencies{ + implementation(projects.testUtils) + } + } } readme { diff --git a/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/api/TensorAlgebra.kt b/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/api/TensorAlgebra.kt index 7615af2ea..70b985a54 100644 --- a/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/api/TensorAlgebra.kt +++ b/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/api/TensorAlgebra.kt @@ -180,7 +180,7 @@ public interface TensorAlgebra> : RingOpsND { * @param j the second dimension to be transposed * @return transposed tensor */ - public fun Tensor.transpose(i: Int = -2, j: Int = -1): Tensor + public fun Tensor.transposed(i: Int = -2, j: Int = -1): Tensor /** * Returns a new tensor with the same data as the self tensor but of a different shape. diff --git a/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/BroadcastDoubleTensorAlgebra.kt b/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/BroadcastDoubleTensorAlgebra.kt index 31e5b6e69..f337a2175 100644 --- a/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/BroadcastDoubleTensorAlgebra.kt +++ b/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/BroadcastDoubleTensorAlgebra.kt @@ -7,8 +7,8 @@ package space.kscience.kmath.tensors.core import space.kscience.kmath.misc.UnstableKMathAPI import space.kscience.kmath.nd.StructureND +import space.kscience.kmath.structures.DoubleBuffer import space.kscience.kmath.tensors.api.Tensor -import space.kscience.kmath.tensors.core.internal.array import space.kscience.kmath.tensors.core.internal.broadcastTensors import space.kscience.kmath.tensors.core.internal.broadcastTo @@ -22,8 +22,8 @@ public object BroadcastDoubleTensorAlgebra : DoubleTensorAlgebra() { val broadcast = broadcastTensors(asDoubleTensor(), arg.asDoubleTensor()) val newThis = broadcast[0] val newOther = broadcast[1] - val resBuffer = DoubleArray(newThis.indices.linearSize) { i -> - newThis.mutableBuffer.array()[i] + newOther.mutableBuffer.array()[i] + val resBuffer = DoubleBuffer(newThis.indices.linearSize) { i -> + newThis.source[i] + newOther.source[i] } return DoubleTensor(newThis.shape, resBuffer) } @@ -31,8 +31,7 @@ public object BroadcastDoubleTensorAlgebra : DoubleTensorAlgebra() { override fun Tensor.plusAssign(arg: StructureND) { val newOther = broadcastTo(arg.asDoubleTensor(), asDoubleTensor().shape) for (i in 0 until asDoubleTensor().indices.linearSize) { - asDoubleTensor().mutableBuffer.array()[asDoubleTensor().bufferStart + i] += - newOther.mutableBuffer.array()[asDoubleTensor().bufferStart + i] + asDoubleTensor().source[i] += newOther.source[i] } } @@ -40,17 +39,16 @@ public object BroadcastDoubleTensorAlgebra : DoubleTensorAlgebra() { val broadcast = broadcastTensors(asDoubleTensor(), arg.asDoubleTensor()) val newThis = broadcast[0] val newOther = broadcast[1] - val resBuffer = DoubleArray(newThis.indices.linearSize) { i -> - newThis.mutableBuffer.array()[i] - newOther.mutableBuffer.array()[i] + val resBuffer = DoubleBuffer(newThis.indices.linearSize) { i -> + newThis.source[i] - newOther.source[i] } return DoubleTensor(newThis.shape, resBuffer) } override fun Tensor.minusAssign(arg: StructureND) { val newOther = broadcastTo(arg.asDoubleTensor(), asDoubleTensor().shape) - for (i in 0 until asDoubleTensor().indices.linearSize) { - asDoubleTensor().mutableBuffer.array()[asDoubleTensor().bufferStart + i] -= - newOther.mutableBuffer.array()[asDoubleTensor().bufferStart + i] + for (i in 0 until indices.linearSize) { + asDoubleTensor().source[i] -= newOther.source[i] } } @@ -58,18 +56,16 @@ public object BroadcastDoubleTensorAlgebra : DoubleTensorAlgebra() { val broadcast = broadcastTensors(asDoubleTensor(), arg.asDoubleTensor()) val newThis = broadcast[0] val newOther = broadcast[1] - val resBuffer = DoubleArray(newThis.indices.linearSize) { i -> - newThis.mutableBuffer.array()[newThis.bufferStart + i] * - newOther.mutableBuffer.array()[newOther.bufferStart + i] + val resBuffer = DoubleBuffer(newThis.indices.linearSize) { i -> + newThis.source[i] * newOther.source[i] } return DoubleTensor(newThis.shape, resBuffer) } override fun Tensor.timesAssign(arg: StructureND) { val newOther = broadcastTo(arg.asDoubleTensor(), asDoubleTensor().shape) - for (i in 0 until asDoubleTensor().indices.linearSize) { - asDoubleTensor().mutableBuffer.array()[asDoubleTensor().bufferStart + i] *= - newOther.mutableBuffer.array()[asDoubleTensor().bufferStart + i] + for (i in 0 until indices.linearSize) { + asDoubleTensor().source[+i] *= newOther.source[i] } } @@ -77,18 +73,16 @@ public object BroadcastDoubleTensorAlgebra : DoubleTensorAlgebra() { val broadcast = broadcastTensors(asDoubleTensor(), arg.asDoubleTensor()) val newThis = broadcast[0] val newOther = broadcast[1] - val resBuffer = DoubleArray(newThis.indices.linearSize) { i -> - newThis.mutableBuffer.array()[newOther.bufferStart + i] / - newOther.mutableBuffer.array()[newOther.bufferStart + i] + val resBuffer = DoubleBuffer(newThis.indices.linearSize) { i -> + newThis.source[i] / newOther.source[i] } return DoubleTensor(newThis.shape, resBuffer) } override fun Tensor.divAssign(arg: StructureND) { val newOther = broadcastTo(arg.asDoubleTensor(), asDoubleTensor().shape) - for (i in 0 until asDoubleTensor().indices.linearSize) { - asDoubleTensor().mutableBuffer.array()[asDoubleTensor().bufferStart + i] /= - newOther.mutableBuffer.array()[asDoubleTensor().bufferStart + i] + for (i in 0 until indices.linearSize) { + asDoubleTensor().source[i] /= newOther.source[i] } } } diff --git a/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/BufferedTensor.kt b/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/BufferedTensor.kt index 98e6ab430..0b043db56 100644 --- a/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/BufferedTensor.kt +++ b/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/BufferedTensor.kt @@ -13,12 +13,12 @@ import space.kscience.kmath.tensors.api.Tensor /** * Represents [Tensor] over a [MutableBuffer] intended to be used through [DoubleTensor] and [IntTensor] */ -public open class BufferedTensor internal constructor( +public abstract class BufferedTensor( override val shape: IntArray, - @PublishedApi internal val mutableBuffer: MutableBuffer, - @PublishedApi internal val bufferStart: Int, ) : Tensor { + public abstract val source: MutableBuffer + /** * Buffer strides based on [TensorLinearStructure] implementation */ @@ -27,14 +27,8 @@ public open class BufferedTensor internal constructor( /** * Number of elements in tensor */ - public val numElements: Int - get() = indices.linearSize + public val linearSize: Int get() = indices.linearSize - override fun get(index: IntArray): T = mutableBuffer[bufferStart + indices.offset(index)] - - override fun set(index: IntArray, value: T) { - mutableBuffer[bufferStart + indices.offset(index)] = value - } @PerformancePitfall override fun elements(): Sequence> = indices.asSequence().map { diff --git a/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/DoubleTensor.kt b/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/DoubleTensor.kt index e8239caf4..e9589eb0a 100644 --- a/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/DoubleTensor.kt +++ b/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/DoubleTensor.kt @@ -5,16 +5,88 @@ package space.kscience.kmath.tensors.core -import space.kscience.kmath.structures.DoubleBuffer +import space.kscience.kmath.structures.* import space.kscience.kmath.tensors.core.internal.toPrettyString +public class OffsetDoubleBuffer( + private val source: DoubleBuffer, + private val offset: Int, + override val size: Int, +) : MutableBuffer { + + init { + require(offset >= 0) { "Offset must be non-negative" } + require(size >= 0) { "Size must be non-negative" } + require(offset + size <= source.size) { "Maximum index must be inside source dimension" } + } + + override fun set(index: Int, value: Double) { + require(index in 0 until size) { "Index must be in [0, size)" } + source[index + offset] = value + } + + override fun get(index: Int): Double = source[index + offset] + + /** + * Copy only a part of buffer that belongs to this tensor + */ + override fun copy(): DoubleBuffer = source.array.copyOfRange(offset, offset + size).asBuffer() + + override fun iterator(): Iterator = iterator { + for (i in indices) { + yield(get(i)) + } + } + + override fun toString(): String = Buffer.toString(this) + + public fun view(addOffset: Int, newSize: Int = size - addOffset): OffsetDoubleBuffer = + OffsetDoubleBuffer(source, offset + addOffset, newSize) +} + +public fun OffsetDoubleBuffer.slice(range: IntRange): OffsetDoubleBuffer = view(range.first, range.last - range.first) + +/** + * Map only operable content of the offset buffer + */ +public inline fun OffsetDoubleBuffer.map(operation: (Double) -> Double): DoubleBuffer = + DoubleBuffer(size) { operation(get(it)) } + +public inline fun OffsetDoubleBuffer.zip( + other: OffsetDoubleBuffer, + operation: (l: Double, r: Double) -> Double, +): DoubleBuffer { + require(size == other.size) { "The sizes of zipped buffers must be the same" } + return DoubleBuffer(size) { operation(get(it), other[it]) } +} + +/** + * map in place + */ +public inline fun OffsetDoubleBuffer.mapInPlace(operation: (Double) -> Double) { + indices.forEach { set(it, operation(get(it))) } +} + /** * Default [BufferedTensor] implementation for [Double] values */ -public class DoubleTensor @PublishedApi internal constructor( +public class DoubleTensor( shape: IntArray, - buffer: DoubleArray, - offset: Int = 0 -) : BufferedTensor(shape, DoubleBuffer(buffer), offset) { + override val source: OffsetDoubleBuffer, +) : BufferedTensor(shape) { + + init { + require(linearSize == source.size) { "Source buffer size must be equal tensor size" } + } + + public constructor(shape: IntArray, buffer: DoubleBuffer) : this(shape, OffsetDoubleBuffer(buffer, 0, buffer.size)) + + override fun get(index: IntArray): Double = this.source[indices.offset(index)] + + override fun set(index: IntArray, value: Double) { + source[indices.offset(index)] = value + } + + override fun toString(): String = toPrettyString() } diff --git a/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/DoubleTensorAlgebra.kt b/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/DoubleTensorAlgebra.kt index 8acb15251..d758c49a1 100644 --- a/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/DoubleTensorAlgebra.kt +++ b/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/DoubleTensorAlgebra.kt @@ -12,8 +12,7 @@ import space.kscience.kmath.misc.PerformancePitfall import space.kscience.kmath.misc.UnstableKMathAPI import space.kscience.kmath.nd.* import space.kscience.kmath.operations.DoubleField -import space.kscience.kmath.structures.MutableBuffer -import space.kscience.kmath.structures.indices +import space.kscience.kmath.structures.* import space.kscience.kmath.tensors.api.AnalyticTensorAlgebra import space.kscience.kmath.tensors.api.LinearOpsTensorAlgebra import space.kscience.kmath.tensors.api.Tensor @@ -41,56 +40,64 @@ public open class DoubleTensorAlgebra : * @param transform the function to be applied to each element of the tensor. * @return the resulting tensor after applying the function. */ - @PerformancePitfall @Suppress("OVERRIDE_BY_INLINE") final override inline fun StructureND.map(transform: DoubleField.(Double) -> Double): DoubleTensor { - val tensor = this.asDoubleTensor() + val tensor = asDoubleTensor() //TODO remove additional copy - val sourceArray = tensor.copyArray() - val array = DoubleArray(tensor.numElements) { DoubleField.transform(sourceArray[it]) } + val array = DoubleBuffer(tensor.source.size) { DoubleField.transform(tensor.source[it]) } return DoubleTensor( tensor.shape, array, - tensor.bufferStart ) } - @PerformancePitfall + public inline fun Tensor.mapInPlace(operation: (Double) -> Double) { + if (this is DoubleTensor) { + source.mapInPlace(operation) + } else { + indices.forEach { set(it, operation(get(it))) } + } + } + + public inline fun Tensor.mapIndexedInPlace(operation: (IntArray, Double) -> Double) { + indices.forEach { set(it, operation(it, get(it))) } + } + @Suppress("OVERRIDE_BY_INLINE") final override inline fun StructureND.mapIndexed(transform: DoubleField.(index: IntArray, Double) -> Double): DoubleTensor { val tensor = this.asDoubleTensor() //TODO remove additional copy - val sourceArray = tensor.copyArray() - val array = DoubleArray(tensor.numElements) { DoubleField.transform(tensor.indices.index(it), sourceArray[it]) } - return DoubleTensor( - tensor.shape, - array, - tensor.bufferStart - ) + val buffer = DoubleBuffer(tensor.source.size) { + DoubleField.transform(tensor.indices.index(it), tensor.source[it]) + } + return DoubleTensor(tensor.shape, buffer) } - @PerformancePitfall - override fun zip( + @Suppress("OVERRIDE_BY_INLINE") + final override inline fun zip( left: StructureND, right: StructureND, transform: DoubleField.(Double, Double) -> Double, ): DoubleTensor { - require(left.shape.contentEquals(right.shape)) { - "The shapes in zip are not equal: left - ${left.shape}, right - ${right.shape}" - } + checkShapesCompatible(left, right) + val leftTensor = left.asDoubleTensor() - val leftArray = leftTensor.copyArray() val rightTensor = right.asDoubleTensor() - val rightArray = rightTensor.copyArray() - val array = DoubleArray(leftTensor.numElements) { DoubleField.transform(leftArray[it], rightArray[it]) } - return DoubleTensor( - leftTensor.shape, - array - ) + val buffer = DoubleBuffer(leftTensor.source.size) { + DoubleField.transform(leftTensor.source[it], rightTensor.source[it]) + } + return DoubleTensor(leftTensor.shape, buffer) } - override fun StructureND.valueOrNull(): Double? = if (asDoubleTensor().shape contentEquals intArrayOf(1)) - asDoubleTensor().mutableBuffer.array()[asDoubleTensor().bufferStart] else null + + public inline fun StructureND.reduceElements(transform: (DoubleBuffer) -> Double): Double = + transform(asDoubleTensor().source.copy()) + //TODO do we need protective copy? + + override fun StructureND.valueOrNull(): Double? { + val dt = asDoubleTensor() + return if (dt.shape contentEquals intArrayOf(1)) dt.source[0] else null + } override fun StructureND.value(): Double = valueOrNull() ?: throw IllegalArgumentException("The tensor shape is $shape, but value method is allowed only for shape [1]") @@ -99,14 +106,14 @@ public open class DoubleTensorAlgebra : * Constructs a tensor with the specified shape and data. * * @param shape the desired shape for the tensor. - * @param buffer one-dimensional data array. - * @return tensor with the [shape] shape and [buffer] data. + * @param array one-dimensional data array. + * @return tensor with the [shape] shape and [array] data. */ - public fun fromArray(shape: IntArray, buffer: DoubleArray): DoubleTensor { - checkEmptyShape(shape) - checkEmptyDoubleBuffer(buffer) - checkBufferShapeConsistency(shape, buffer) - return DoubleTensor(shape, buffer, 0) + public fun fromArray(shape: IntArray, array: DoubleArray): DoubleTensor { + checkNotEmptyShape(shape) + checkEmptyDoubleBuffer(array) + checkBufferShapeConsistency(shape, array) + return DoubleTensor(shape, array.asBuffer()) } /** @@ -122,10 +129,10 @@ public open class DoubleTensorAlgebra : ) override fun Tensor.getTensor(i: Int): DoubleTensor { - val lastShape = asDoubleTensor().shape.drop(1).toIntArray() + val dt = asDoubleTensor() + val lastShape = shape.drop(1).toIntArray() val newShape = if (lastShape.isNotEmpty()) lastShape else intArrayOf(1) - val newStart = newShape.reduce(Int::times) * i + asDoubleTensor().bufferStart - return DoubleTensor(newShape, asDoubleTensor().mutableBuffer.array(), newStart) + return DoubleTensor(newShape, dt.source.view(newShape.reduce(Int::times) * i)) } /** @@ -136,8 +143,8 @@ public open class DoubleTensorAlgebra : * @return tensor with the [shape] shape and filled with [value]. */ public fun full(value: Double, shape: IntArray): DoubleTensor { - checkEmptyShape(shape) - val buffer = DoubleArray(shape.reduce(Int::times)) { value } + checkNotEmptyShape(shape) + val buffer = DoubleBuffer(shape.reduce(Int::times)) { value } return DoubleTensor(shape, buffer) } @@ -147,9 +154,9 @@ public open class DoubleTensorAlgebra : * @param value the value to fill the output tensor with. * @return tensor with the `input` tensor shape and filled with [value]. */ - public fun Tensor.fullLike(value: Double): DoubleTensor { - val shape = asDoubleTensor().shape - val buffer = DoubleArray(asDoubleTensor().numElements) { value } + public fun fullLike(structureND: StructureND<*>, value: Double): DoubleTensor { + val shape = structureND.shape + val buffer = DoubleBuffer(structureND.indices.linearSize) { value } return DoubleTensor(shape, buffer) } @@ -166,7 +173,7 @@ public open class DoubleTensorAlgebra : * * @return tensor filled with the scalar value `0.0`, with the same shape as `input` tensor. */ - public fun StructureND.zeroesLike(): DoubleTensor = asDoubleTensor().fullLike(0.0) + public fun zeroesLike(structureND: StructureND<*>): DoubleTensor = fullLike(structureND, 0.0) /** * Returns a tensor filled with the scalar value `1.0`, with the shape defined by the variable argument [shape]. @@ -181,7 +188,7 @@ public open class DoubleTensorAlgebra : * * @return tensor filled with the scalar value `1.0`, with the same shape as `input` tensor. */ - public fun Tensor.onesLike(): DoubleTensor = asDoubleTensor().fullLike(1.0) + public fun onesLike(structureND: StructureND<*>): DoubleTensor = fullLike(structureND, 1.0) /** * Returns a 2D tensor with shape ([n], [n]), with ones on the diagonal and zeros elsewhere. @@ -191,7 +198,7 @@ public open class DoubleTensorAlgebra : */ public fun eye(n: Int): DoubleTensor { val shape = intArrayOf(n, n) - val buffer = DoubleArray(n * n) { 0.0 } + val buffer = DoubleBuffer(n * n) { 0.0 } val res = DoubleTensor(shape, buffer) for (i in 0 until n) { res[intArrayOf(i, i)] = 1.0 @@ -199,192 +206,102 @@ public open class DoubleTensorAlgebra : return res } - /** - * Return a copy of the tensor. - * - * @return a copy of the `input` tensor with a copied buffer. - */ - public fun StructureND.copy(): DoubleTensor = - DoubleTensor( - asDoubleTensor().shape, - asDoubleTensor().mutableBuffer.array().copyOf(), - asDoubleTensor().bufferStart - ) + override fun Double.plus(arg: StructureND): DoubleTensor = arg.map { this@plus + it } - override fun Double.plus(arg: StructureND): DoubleTensor { - val resBuffer = DoubleArray(arg.asDoubleTensor().numElements) { i -> - arg.asDoubleTensor().mutableBuffer.array()[arg.asDoubleTensor().bufferStart + i] + this - } - return DoubleTensor(arg.shape, resBuffer) - } + override fun StructureND.plus(arg: Double): DoubleTensor = map { it + arg } - override fun StructureND.plus(arg: Double): DoubleTensor = arg + asDoubleTensor() - - override fun StructureND.plus(arg: StructureND): DoubleTensor { - checkShapesCompatible(asDoubleTensor(), arg.asDoubleTensor()) - val resBuffer = DoubleArray(asDoubleTensor().numElements) { i -> - asDoubleTensor().mutableBuffer.array()[i] + arg.asDoubleTensor().mutableBuffer.array()[i] - } - return DoubleTensor(asDoubleTensor().shape, resBuffer) - } + override fun StructureND.plus(arg: StructureND): DoubleTensor = zip(this, arg) { l, r -> l + r } override fun Tensor.plusAssign(value: Double) { - for (i in 0 until asDoubleTensor().numElements) { - asDoubleTensor().mutableBuffer.array()[asDoubleTensor().bufferStart + i] += value - } + mapInPlace { it + value } } override fun Tensor.plusAssign(arg: StructureND) { checkShapesCompatible(asDoubleTensor(), arg.asDoubleTensor()) - for (i in 0 until asDoubleTensor().numElements) { - asDoubleTensor().mutableBuffer.array()[asDoubleTensor().bufferStart + i] += - arg.asDoubleTensor().mutableBuffer.array()[asDoubleTensor().bufferStart + i] + mapIndexedInPlace { index, value -> + value + arg[index] } } - override fun Double.minus(arg: StructureND): DoubleTensor { - val resBuffer = DoubleArray(arg.asDoubleTensor().numElements) { i -> - this - arg.asDoubleTensor().mutableBuffer.array()[arg.asDoubleTensor().bufferStart + i] - } - return DoubleTensor(arg.shape, resBuffer) - } + override fun Double.minus(arg: StructureND): DoubleTensor = arg.map { this@minus - it } - override fun StructureND.minus(arg: Double): DoubleTensor { - val resBuffer = DoubleArray(asDoubleTensor().numElements) { i -> - asDoubleTensor().mutableBuffer.array()[asDoubleTensor().bufferStart + i] - arg - } - return DoubleTensor(asDoubleTensor().shape, resBuffer) - } + override fun StructureND.minus(arg: Double): DoubleTensor = map { it - arg } - override fun StructureND.minus(arg: StructureND): DoubleTensor { - checkShapesCompatible(asDoubleTensor(), arg) - val resBuffer = DoubleArray(asDoubleTensor().numElements) { i -> - asDoubleTensor().mutableBuffer.array()[i] - arg.asDoubleTensor().mutableBuffer.array()[i] - } - return DoubleTensor(asDoubleTensor().shape, resBuffer) - } + override fun StructureND.minus(arg: StructureND): DoubleTensor = zip(this, arg) { l, r -> l + r } override fun Tensor.minusAssign(value: Double) { - for (i in 0 until asDoubleTensor().numElements) { - asDoubleTensor().mutableBuffer.array()[asDoubleTensor().bufferStart + i] -= value - } + mapInPlace { it - value } } override fun Tensor.minusAssign(arg: StructureND) { - checkShapesCompatible(asDoubleTensor(), arg) - for (i in 0 until asDoubleTensor().numElements) { - asDoubleTensor().mutableBuffer.array()[asDoubleTensor().bufferStart + i] -= - arg.asDoubleTensor().mutableBuffer.array()[asDoubleTensor().bufferStart + i] - } + checkShapesCompatible(this, arg) + mapIndexedInPlace { index, value -> value - arg[index] } } - override fun Double.times(arg: StructureND): DoubleTensor { - val resBuffer = DoubleArray(arg.asDoubleTensor().numElements) { i -> - arg.asDoubleTensor().mutableBuffer.array()[arg.asDoubleTensor().bufferStart + i] * this - } - return DoubleTensor(arg.shape, resBuffer) - } + override fun Double.times(arg: StructureND): DoubleTensor = arg.map { this@times * it } override fun StructureND.times(arg: Double): DoubleTensor = arg * asDoubleTensor() - override fun StructureND.times(arg: StructureND): DoubleTensor { - checkShapesCompatible(asDoubleTensor(), arg) - val resBuffer = DoubleArray(asDoubleTensor().numElements) { i -> - asDoubleTensor().mutableBuffer.array()[asDoubleTensor().bufferStart + i] * - arg.asDoubleTensor().mutableBuffer.array()[arg.asDoubleTensor().bufferStart + i] - } - return DoubleTensor(asDoubleTensor().shape, resBuffer) - } + override fun StructureND.times(arg: StructureND): DoubleTensor = zip(this, arg) { l, r -> l * r } override fun Tensor.timesAssign(value: Double) { - for (i in 0 until asDoubleTensor().numElements) { - asDoubleTensor().mutableBuffer.array()[asDoubleTensor().bufferStart + i] *= value - } + mapInPlace { it * value } } override fun Tensor.timesAssign(arg: StructureND) { - checkShapesCompatible(asDoubleTensor(), arg) - for (i in 0 until asDoubleTensor().numElements) { - asDoubleTensor().mutableBuffer.array()[asDoubleTensor().bufferStart + i] *= - arg.asDoubleTensor().mutableBuffer.array()[asDoubleTensor().bufferStart + i] - } + checkShapesCompatible(this, arg) + mapIndexedInPlace { index, value -> value * arg[index] } } - override fun Double.div(arg: StructureND): DoubleTensor { - val resBuffer = DoubleArray(arg.asDoubleTensor().numElements) { i -> - this / arg.asDoubleTensor().mutableBuffer.array()[arg.asDoubleTensor().bufferStart + i] - } - return DoubleTensor(arg.shape, resBuffer) - } + override fun Double.div(arg: StructureND): DoubleTensor = arg.map { this@div / it } - override fun StructureND.div(arg: Double): DoubleTensor { - val resBuffer = DoubleArray(asDoubleTensor().numElements) { i -> - asDoubleTensor().mutableBuffer.array()[asDoubleTensor().bufferStart + i] / arg - } - return DoubleTensor(shape, resBuffer) - } + override fun StructureND.div(arg: Double): DoubleTensor = map { it / arg } - override fun StructureND.div(arg: StructureND): DoubleTensor { - checkShapesCompatible(asDoubleTensor(), arg) - val resBuffer = DoubleArray(asDoubleTensor().numElements) { i -> - asDoubleTensor().mutableBuffer.array()[arg.asDoubleTensor().bufferStart + i] / - arg.asDoubleTensor().mutableBuffer.array()[arg.asDoubleTensor().bufferStart + i] - } - return DoubleTensor(asDoubleTensor().shape, resBuffer) - } + override fun StructureND.div(arg: StructureND): DoubleTensor = zip(this, arg) { l, r -> l / r } override fun Tensor.divAssign(value: Double) { - for (i in 0 until asDoubleTensor().numElements) { - asDoubleTensor().mutableBuffer.array()[asDoubleTensor().bufferStart + i] /= value - } + mapInPlace { it / value } } override fun Tensor.divAssign(arg: StructureND) { checkShapesCompatible(asDoubleTensor(), arg) - for (i in 0 until asDoubleTensor().numElements) { - asDoubleTensor().mutableBuffer.array()[asDoubleTensor().bufferStart + i] /= - arg.asDoubleTensor().mutableBuffer.array()[asDoubleTensor().bufferStart + i] - } + mapIndexedInPlace { index, value -> value / arg[index] } } - override fun StructureND.unaryMinus(): DoubleTensor { - val resBuffer = DoubleArray(asDoubleTensor().numElements) { i -> - asDoubleTensor().mutableBuffer.array()[asDoubleTensor().bufferStart + i].unaryMinus() - } - return DoubleTensor(asDoubleTensor().shape, resBuffer) - } + override fun StructureND.unaryMinus(): DoubleTensor = map { -it } - override fun Tensor.transpose(i: Int, j: Int): DoubleTensor { - val ii = asDoubleTensor().minusIndex(i) - val jj = asDoubleTensor().minusIndex(j) - checkTranspose(asDoubleTensor().dimension, ii, jj) - val n = asDoubleTensor().numElements + override fun Tensor.transposed(i: Int, j: Int): DoubleTensor { + // TODO change strides instead of changing content + val dt = asDoubleTensor() + val ii = dt.minusIndex(i) + val jj = dt.minusIndex(j) + checkTranspose(dt.dimension, ii, jj) + val n = dt.linearSize val resBuffer = DoubleArray(n) - val resShape = asDoubleTensor().shape.copyOf() + val resShape = dt.shape.copyOf() resShape[ii] = resShape[jj].also { resShape[jj] = resShape[ii] } - val resTensor = DoubleTensor(resShape, resBuffer) + val resTensor = DoubleTensor(resShape, resBuffer.asBuffer()) for (offset in 0 until n) { - val oldMultiIndex = asDoubleTensor().indices.index(offset) + val oldMultiIndex = dt.indices.index(offset) val newMultiIndex = oldMultiIndex.copyOf() newMultiIndex[ii] = newMultiIndex[jj].also { newMultiIndex[jj] = newMultiIndex[ii] } val linearIndex = resTensor.indices.offset(newMultiIndex) - resTensor.mutableBuffer.array()[linearIndex] = - asDoubleTensor().mutableBuffer.array()[asDoubleTensor().bufferStart + offset] + resTensor.source[linearIndex] = dt.source[offset] } return resTensor } override fun Tensor.view(shape: IntArray): DoubleTensor { checkView(asDoubleTensor(), shape) - return DoubleTensor(shape, asDoubleTensor().mutableBuffer.array(), asDoubleTensor().bufferStart) + return DoubleTensor(shape, asDoubleTensor().source) } override fun Tensor.viewAs(other: StructureND): DoubleTensor = - asDoubleTensor().view(other.shape) + view(other.shape) /** * Broadcasting Matrix product of two tensors. @@ -417,28 +334,28 @@ public open class DoubleTensorAlgebra : */ @UnstableKMathAPI public infix fun StructureND.matmul(other: StructureND): DoubleTensor { - if (asDoubleTensor().shape.size == 1 && other.shape.size == 1) { - return DoubleTensor( - intArrayOf(1), - doubleArrayOf(asDoubleTensor().times(other).asDoubleTensor().mutableBuffer.array().sum()) - ) + if (shape.size == 1 && other.shape.size == 1) { + return DoubleTensor(intArrayOf(1), DoubleBuffer(times(other).sum())) } - var newThis = asDoubleTensor().copy() - var newOther = other.copy() - var penultimateDim = false var lastDim = false - if (asDoubleTensor().shape.size == 1) { + + //TODO do we need protective copy here? + var newThis: DoubleTensor = copyToTensor() + var newOther: DoubleTensor = other.copyToTensor() + + if (shape.size == 1) { penultimateDim = true - newThis = asDoubleTensor().view(intArrayOf(1) + asDoubleTensor().shape) - } - if (other.shape.size == 1) { - lastDim = true - newOther = other.asDoubleTensor().view(other.shape + intArrayOf(1)) + newThis = newThis.view(intArrayOf(1) + shape) } - val broadcastTensors = broadcastOuterTensors(newThis.asDoubleTensor(), newOther.asDoubleTensor()) + if (other.shape.size == 1) { + lastDim = true + newOther = newOther.view(other.shape + intArrayOf(1)) + } + + val broadcastTensors = broadcastOuterTensors(newThis, newOther) newThis = broadcastTensors[0] newOther = broadcastTensors[1] @@ -452,12 +369,20 @@ public open class DoubleTensorAlgebra : val resShape = newThis.shape.sliceArray(0..(newThis.shape.size - 2)) + intArrayOf(newOther.shape.last()) val resSize = resShape.reduce { acc, i -> acc * i } - val resTensor = DoubleTensor(resShape, DoubleArray(resSize)) + val resTensor = DoubleTensor(resShape, DoubleArray(resSize).asBuffer()) - for ((res, ab) in resTensor.matrixSequence().zip(newThis.matrixSequence().zip(newOther.matrixSequence()))) { - val (a, b) = ab - dotTo(a, b, res, l, m1, n) + val resMatrices = resTensor.matrices + val newThisMatrices = newThis.matrices + val newOtherMatrices = newOther.matrices + + for (i in resMatrices.indices) { + dotTo(newThisMatrices[i], newOtherMatrices[i], resMatrices[i], l, m1, n) } +// +// for ((res, ab) in resTensor.matrixSequence().zip(newThis.matrixSequence().zip(newOther.matrixSequence()))) { +// val (a, b) = ab +// dotTo(a, b, res, l, m1, n) +// } return if (penultimateDim) { resTensor.view(resTensor.shape.dropLast(2).toIntArray() + intArrayOf(resTensor.shape.last())) @@ -503,10 +428,10 @@ public open class DoubleTensorAlgebra : diagonalEntries.shape.slice(lessDim until greaterDim - 1).toIntArray() + intArrayOf(diagonalEntries.shape[n - 1] + abs(realOffset)) + diagonalEntries.shape.slice(greaterDim - 1 until n - 1).toIntArray() - val resTensor = zeros(resShape) + val resTensor: DoubleTensor = zeros(resShape) - for (i in 0 until diagonalEntries.asDoubleTensor().numElements) { - val multiIndex = diagonalEntries.asDoubleTensor().indices.index(i) + for (i in 0 until diagonalEntries.indices.linearSize) { + val multiIndex = diagonalEntries.indices.index(i) var offset1 = 0 var offset2 = abs(realOffset) @@ -522,7 +447,7 @@ public open class DoubleTensorAlgebra : resTensor[diagonalMultiIndex] = diagonalEntries[multiIndex] } - return resTensor.asDoubleTensor() + return resTensor } /** @@ -542,23 +467,20 @@ public open class DoubleTensorAlgebra : * @param other the tensor to compare with `input` tensor. * @return true if two tensors have the same shape and elements, false otherwise. */ - public infix fun Tensor.eq(other: Tensor): Boolean = asDoubleTensor().eq(other, 1e-5) + public infix fun Tensor.eq(other: Tensor): Boolean = eq(other, 1e-5) private fun Tensor.eq( other: Tensor, eqFunction: (Double, Double) -> Boolean, ): Boolean { + //TODO optimize tensor conversion checkShapesCompatible(asDoubleTensor(), other) - val n = asDoubleTensor().numElements - if (n != other.asDoubleTensor().numElements) { + val n = asDoubleTensor().linearSize + if (n != other.asDoubleTensor().linearSize) { return false } for (i in 0 until n) { - if (!eqFunction( - asDoubleTensor().mutableBuffer[asDoubleTensor().bufferStart + i], - other.asDoubleTensor().mutableBuffer[other.asDoubleTensor().bufferStart + i] - ) - ) { + if (!eqFunction(asDoubleTensor().source[i], other.asDoubleTensor().source[i])) { return false } } @@ -586,7 +508,7 @@ public open class DoubleTensorAlgebra : * with `0.0` mean and `1.0` standard deviation. */ public fun Tensor.randomNormalLike(seed: Long = 0): DoubleTensor = - DoubleTensor(asDoubleTensor().shape, getRandomNormals(asDoubleTensor().shape.reduce(Int::times), seed)) + DoubleTensor(shape, getRandomNormals(shape.reduce(Int::times), seed)) /** * Concatenates a sequence of tensors with equal shapes along the first dimension. @@ -599,11 +521,12 @@ public open class DoubleTensorAlgebra : val shape = tensors[0].shape check(tensors.all { it.shape contentEquals shape }) { "Tensors must have same shapes" } val resShape = intArrayOf(tensors.size) + shape - val resBuffer = tensors.flatMap { - it.asDoubleTensor().mutableBuffer.array().drop(it.asDoubleTensor().bufferStart) - .take(it.asDoubleTensor().numElements) - }.toDoubleArray() - return DoubleTensor(resShape, resBuffer, 0) +// val resBuffer: List = tensors.flatMap { +// it.asDoubleTensor().source.array.drop(it.asDoubleTensor().bufferStart) +// .take(it.asDoubleTensor().linearSize) +// } + val resBuffer = tensors.map { it.asDoubleTensor().source }.concat() + return DoubleTensor(resShape, resBuffer) } /** @@ -614,14 +537,12 @@ public open class DoubleTensorAlgebra : */ public fun Tensor.rowsByIndices(indices: IntArray): DoubleTensor = stack(indices.map { getTensor(it) }) - private inline fun StructureND.fold(foldFunction: (DoubleArray) -> Double): Double = - foldFunction(asDoubleTensor().copyArray()) - private inline fun StructureND.foldDim( + private inline fun StructureND.foldDimToDouble( dim: Int, keepDim: Boolean, - foldFunction: (DoubleArray) -> R, - ): BufferedTensor { + foldFunction: (DoubleArray) -> Double, + ): DoubleTensor { check(dim < dimension) { "Dimension $dim out of range $dimension" } val resShape = if (keepDim) { shape.take(dim).toIntArray() + intArrayOf(1) + shape.takeLast(dimension - dim - 1).toIntArray() @@ -630,9 +551,37 @@ public open class DoubleTensorAlgebra : } val resNumElements = resShape.reduce(Int::times) val init = foldFunction(DoubleArray(1) { 0.0 }) - val resTensor = BufferedTensor( + val resTensor = DoubleTensor( resShape, - MutableBuffer.auto(resNumElements) { init }, 0 + DoubleBuffer(resNumElements) { init } + ) + val dt = asDoubleTensor() + for (index in resTensor.indices) { + val prefix = index.take(dim).toIntArray() + val suffix = index.takeLast(dimension - dim - 1).toIntArray() + resTensor[index] = foldFunction(DoubleArray(shape[dim]) { i -> + dt[prefix + intArrayOf(i) + suffix] + }) + } + return resTensor + } + + private inline fun StructureND.foldDimToInt( + dim: Int, + keepDim: Boolean, + foldFunction: (DoubleArray) -> Int, + ): IntTensor { + check(dim < dimension) { "Dimension $dim out of range $dimension" } + val resShape = if (keepDim) { + shape.take(dim).toIntArray() + intArrayOf(1) + shape.takeLast(dimension - dim - 1).toIntArray() + } else { + shape.take(dim).toIntArray() + shape.takeLast(dimension - dim - 1).toIntArray() + } + val resNumElements = resShape.reduce(Int::times) + val init = foldFunction(DoubleArray(1) { 0.0 }) + val resTensor = IntTensor( + resShape, + IntBuffer(resNumElements) { init } ) for (index in resTensor.indices) { val prefix = index.take(dim).toIntArray() @@ -644,68 +593,71 @@ public open class DoubleTensorAlgebra : return resTensor } - override fun StructureND.sum(): Double = asDoubleTensor().fold { it.sum() } + + override fun StructureND.sum(): Double = reduceElements { it.array.sum() } override fun StructureND.sum(dim: Int, keepDim: Boolean): DoubleTensor = - foldDim(dim, keepDim) { x -> x.sum() }.asDoubleTensor() + foldDimToDouble(dim, keepDim) { x -> x.sum() } - override fun StructureND.min(): Double = this.fold { it.minOrNull()!! } + override fun StructureND.min(): Double = reduceElements { it.array.min() } override fun StructureND.min(dim: Int, keepDim: Boolean): DoubleTensor = - foldDim(dim, keepDim) { x -> x.minOrNull()!! }.asDoubleTensor() + foldDimToDouble(dim, keepDim) { x -> x.minOrNull()!! } - override fun StructureND.argMin(dim: Int, keepDim: Boolean): Tensor = foldDim(dim, keepDim) { x -> - x.withIndex().minByOrNull { it.value }?.index!! - }.asIntTensor() + override fun StructureND.argMin(dim: Int, keepDim: Boolean): Tensor = foldDimToInt(dim, keepDim) { x -> + x.withIndex().minBy { it.value }.index + } - override fun StructureND.max(): Double = this.fold { it.maxOrNull()!! } + override fun StructureND.max(): Double = reduceElements { it.array.max() } override fun StructureND.max(dim: Int, keepDim: Boolean): DoubleTensor = - foldDim(dim, keepDim) { x -> x.maxOrNull()!! }.asDoubleTensor() + foldDimToDouble(dim, keepDim) { x -> x.maxOrNull()!! } override fun StructureND.argMax(dim: Int, keepDim: Boolean): IntTensor = - foldDim(dim, keepDim) { x -> - x.withIndex().maxByOrNull { it.value }?.index!! - }.asIntTensor() + foldDimToInt(dim, keepDim) { x -> + x.withIndex().maxBy { it.value }.index + } - override fun StructureND.mean(): Double = this.fold { it.sum() / asDoubleTensor().numElements } + override fun StructureND.mean(): Double = sum() / indices.linearSize - override fun StructureND.mean(dim: Int, keepDim: Boolean): DoubleTensor = foldDim(dim, keepDim) { arr -> - check(dim < dimension) { "Dimension $dim out of range $dimension" } - arr.sum() / shape[dim] - }.asDoubleTensor() + override fun StructureND.mean(dim: Int, keepDim: Boolean): DoubleTensor = + foldDimToDouble(dim, keepDim) { arr -> + check(dim < dimension) { "Dimension $dim out of range $dimension" } + arr.sum() / shape[dim] + } - override fun StructureND.std(): Double = fold { arr -> - val mean = arr.sum() / asDoubleTensor().numElements - sqrt(arr.sumOf { (it - mean) * (it - mean) } / (asDoubleTensor().numElements - 1)) + override fun StructureND.std(): Double = reduceElements { arr -> + val mean = arr.array.sum() / indices.linearSize + sqrt(arr.array.sumOf { (it - mean) * (it - mean) } / (indices.linearSize - 1)) } - override fun StructureND.std(dim: Int, keepDim: Boolean): DoubleTensor = foldDim( + override fun StructureND.std(dim: Int, keepDim: Boolean): DoubleTensor = foldDimToDouble( dim, keepDim ) { arr -> check(dim < dimension) { "Dimension $dim out of range $dimension" } val mean = arr.sum() / shape[dim] sqrt(arr.sumOf { (it - mean) * (it - mean) } / (shape[dim] - 1)) - }.asDoubleTensor() - - override fun StructureND.variance(): Double = fold { arr -> - val mean = arr.sum() / asDoubleTensor().numElements - arr.sumOf { (it - mean) * (it - mean) } / (asDoubleTensor().numElements - 1) } - override fun StructureND.variance(dim: Int, keepDim: Boolean): DoubleTensor = foldDim( + override fun StructureND.variance(): Double = reduceElements { arr -> + val linearSize = indices.linearSize + val mean = arr.array.sum() / linearSize + arr.array.sumOf { (it - mean) * (it - mean) } / (linearSize - 1) + } + + override fun StructureND.variance(dim: Int, keepDim: Boolean): DoubleTensor = foldDimToDouble( dim, keepDim ) { arr -> check(dim < dimension) { "Dimension $dim out of range $dimension" } val mean = arr.sum() / shape[dim] arr.sumOf { (it - mean) * (it - mean) } / (shape[dim] - 1) - }.asDoubleTensor() + } - private fun cov(x: DoubleTensor, y: DoubleTensor): Double { + private fun cov(x: StructureND, y: StructureND): Double { val n = x.shape[0] return ((x - x.mean()) * (y - y.mean())).mean() * n / (n - 1) } @@ -725,45 +677,45 @@ public open class DoubleTensorAlgebra : check(tensors.all { it.shape contentEquals intArrayOf(m) }) { "Tensors must have same shapes" } val resTensor = DoubleTensor( intArrayOf(n, n), - DoubleArray(n * n) { 0.0 } + DoubleBuffer(n * n) { 0.0 } ) for (i in 0 until n) { for (j in 0 until n) { - resTensor[intArrayOf(i, j)] = cov(tensors[i].asDoubleTensor(), tensors[j].asDoubleTensor()) + resTensor[intArrayOf(i, j)] = cov(tensors[i], tensors[j]) } } return resTensor } - override fun StructureND.exp(): DoubleTensor = asDoubleTensor().map { exp(it) } + override fun StructureND.exp(): DoubleTensor = map { exp(it) } - override fun StructureND.ln(): DoubleTensor = asDoubleTensor().map { ln(it) } + override fun StructureND.ln(): DoubleTensor = map { ln(it) } - override fun StructureND.sqrt(): DoubleTensor = asDoubleTensor().map { sqrt(it) } + override fun StructureND.sqrt(): DoubleTensor = map { sqrt(it) } - override fun StructureND.cos(): DoubleTensor = asDoubleTensor().map { cos(it) } + override fun StructureND.cos(): DoubleTensor = map { cos(it) } - override fun StructureND.acos(): DoubleTensor = asDoubleTensor().map { acos(it) } + override fun StructureND.acos(): DoubleTensor = map { acos(it) } - override fun StructureND.cosh(): DoubleTensor = asDoubleTensor().map { cosh(it) } + override fun StructureND.cosh(): DoubleTensor = map { cosh(it) } - override fun StructureND.acosh(): DoubleTensor = asDoubleTensor().map { acosh(it) } + override fun StructureND.acosh(): DoubleTensor = map { acosh(it) } - override fun StructureND.sin(): DoubleTensor = asDoubleTensor().map { sin(it) } + override fun StructureND.sin(): DoubleTensor = map { sin(it) } - override fun StructureND.asin(): DoubleTensor = asDoubleTensor().map { asin(it) } + override fun StructureND.asin(): DoubleTensor = map { asin(it) } - override fun StructureND.sinh(): DoubleTensor = asDoubleTensor().map { sinh(it) } + override fun StructureND.sinh(): DoubleTensor = map { sinh(it) } - override fun StructureND.asinh(): DoubleTensor = asDoubleTensor().map { asinh(it) } + override fun StructureND.asinh(): DoubleTensor = map { asinh(it) } - override fun StructureND.tan(): DoubleTensor = asDoubleTensor().map { tan(it) } + override fun StructureND.tan(): DoubleTensor = map { tan(it) } - override fun StructureND.atan(): DoubleTensor = asDoubleTensor().map { atan(it) } + override fun StructureND.atan(): DoubleTensor = map { atan(it) } - override fun StructureND.tanh(): DoubleTensor = asDoubleTensor().map { tanh(it) } + override fun StructureND.tanh(): DoubleTensor = map { tanh(it) } - override fun StructureND.atanh(): DoubleTensor = asDoubleTensor().map { atanh(it) } + override fun StructureND.atanh(): DoubleTensor = map { atanh(it) } override fun power(arg: StructureND, pow: Number): StructureND = if (pow is Int) { arg.map { it.pow(pow) } @@ -771,9 +723,9 @@ public open class DoubleTensorAlgebra : arg.map { it.pow(pow.toDouble()) } } - override fun StructureND.ceil(): DoubleTensor = asDoubleTensor().map { ceil(it) } + override fun StructureND.ceil(): DoubleTensor = map { ceil(it) } - override fun StructureND.floor(): DoubleTensor = asDoubleTensor().map { floor(it) } + override fun StructureND.floor(): DoubleTensor = map { floor(it) } override fun StructureND.inv(): DoubleTensor = invLU(1e-9) @@ -789,7 +741,7 @@ public open class DoubleTensorAlgebra : * The `pivots` has the shape ``(∗, min(m, n))``. `pivots` stores all the intermediate transpositions of rows. */ public fun StructureND.luFactor(epsilon: Double): Pair = - computeLU(asDoubleTensor(), epsilon) + computeLU(this, epsilon) ?: throw IllegalArgumentException("Tensor contains matrices which are singular at precision $epsilon") /** @@ -825,14 +777,14 @@ public open class DoubleTensorAlgebra : ) { "Inappropriate shapes of input tensors" } val n = luTensor.shape.last() - val pTensor = luTensor.zeroesLike() + val pTensor = zeroesLike(luTensor) pTensor .matrixSequence() .zip(pivotsTensor.asIntTensor().vectorSequence()) .forEach { (p, pivot) -> pivInit(p.as2D(), pivot.as1D(), n) } - val lTensor = luTensor.zeroesLike() - val uTensor = luTensor.zeroesLike() + val lTensor = zeroesLike(luTensor) + val uTensor = zeroesLike(luTensor) lTensor.matrixSequence() .zip(uTensor.matrixSequence()) @@ -863,7 +815,7 @@ public open class DoubleTensorAlgebra : checkPositiveDefinite(asDoubleTensor(), epsilon) val n = shape.last() - val lTensor = zeroesLike() + val lTensor = zeroesLike(this) for ((a, l) in asDoubleTensor().matrixSequence().zip(lTensor.matrixSequence())) for (i in 0 until n) choleskyHelper(a.as2D(), l.as2D(), n) @@ -875,15 +827,17 @@ public open class DoubleTensorAlgebra : override fun StructureND.qr(): Pair { checkSquareMatrix(shape) - val qTensor = zeroesLike() - val rTensor = zeroesLike() + val qTensor = zeroesLike(this) + val rTensor = zeroesLike(this) + + //TODO replace with cycle asDoubleTensor().matrixSequence() .zip( (qTensor.matrixSequence() .zip(rTensor.matrixSequence())) ).forEach { (matrix, qr) -> val (q, r) = qr - qrHelper(matrix.toTensor(), q.toTensor(), r.as2D()) + qrHelper(matrix, q, r.as2D()) } return qTensor to rTensor @@ -906,14 +860,14 @@ public open class DoubleTensorAlgebra : * @return a triple `Triple(U, S, V)`. */ public fun StructureND.svd(epsilon: Double): Triple { - val size = asDoubleTensor().dimension - val commonShape = asDoubleTensor().shape.sliceArray(0 until size - 2) - val (n, m) = asDoubleTensor().shape.sliceArray(size - 2 until size) + val size = dimension + val commonShape = shape.sliceArray(0 until size - 2) + val (n, m) = shape.sliceArray(size - 2 until size) val uTensor = zeros(commonShape + intArrayOf(min(n, m), n)) val sTensor = zeros(commonShape + intArrayOf(min(n, m))) val vTensor = zeros(commonShape + intArrayOf(min(n, m), m)) - val matrices = asDoubleTensor().matrices + val matrices: VirtualBuffer = asDoubleTensor().matrices val uTensors = uTensor.matrices val sTensorVectors = sTensor.vectors val vTensors = vTensor.matrices @@ -928,14 +882,12 @@ public open class DoubleTensorAlgebra : val matrixSize = matrix.shape.reduce { acc, i -> acc * i } val curMatrix = DoubleTensor( matrix.shape, - matrix.mutableBuffer.array() - .slice(matrix.bufferStart until matrix.bufferStart + matrixSize) - .toDoubleArray() + matrix.source.view(0, matrixSize) ) svdHelper(curMatrix, usv, m, n, epsilon) } - return Triple(uTensor.transpose(), sTensor, vTensor.transpose()) + return Triple(uTensor.transposed(), sTensor, vTensor.transposed()) } override fun StructureND.symEig(): Pair = @@ -950,6 +902,7 @@ public open class DoubleTensorAlgebra : * @return a pair `eigenvalues to eigenvectors`. */ public fun StructureND.symEigSvd(epsilon: Double): Pair { + //TODO optimize conversion checkSymmetric(asDoubleTensor(), epsilon) fun MutableStructure2D.cleanSym(n: Int) { @@ -964,9 +917,9 @@ public open class DoubleTensorAlgebra : } } - val (u, s, v) = asDoubleTensor().svd(epsilon) + val (u, s, v) = svd(epsilon) val shp = s.shape + intArrayOf(1) - val utv = u.transpose() matmul v + val utv = u.transposed() matmul v val n = s.shape.last() for (matrix in utv.matrixSequence()) { matrix.as2D().cleanSym(n) @@ -977,6 +930,7 @@ public open class DoubleTensorAlgebra : } public fun StructureND.symEigJacobi(maxIteration: Int, epsilon: Double): Pair { + //TODO optimize conversion checkSymmetric(asDoubleTensor(), epsilon) val size = this.dimension @@ -991,12 +945,12 @@ public open class DoubleTensorAlgebra : for (i in 0 until matrix2D.rowNum) { for (j in 0 until matrix2D.colNum) { - eigenvectors.mutableBuffer.array()[eigenvectorStart + i * matrix2D.rowNum + j] = v[i, j] + eigenvectors.source[eigenvectorStart + i * matrix2D.rowNum + j] = v[i, j] } } for (i in 0 until matrix2D.rowNum) { - eigenvalues.mutableBuffer.array()[eigenvalueStart + i] = d[i] + eigenvalues.source[eigenvalueStart + i] = d[i] } eigenvalueStart += this.shape.last() @@ -1006,121 +960,6 @@ public open class DoubleTensorAlgebra : return eigenvalues to eigenvectors } - private fun MutableStructure2D.jacobiHelper( - maxIteration: Int, - epsilon: Double, - ): Pair, Structure2D> { - val n = this.shape[0] - val A_ = this.copy() - val V = eye(n) - val D = DoubleTensor(intArrayOf(n), (0 until this.rowNum).map { this[it, it] }.toDoubleArray()).as1D() - val B = DoubleTensor(intArrayOf(n), (0 until this.rowNum).map { this[it, it] }.toDoubleArray()).as1D() - val Z = zeros(intArrayOf(n)).as1D() - - // assume that buffered tensor is square matrix - operator fun BufferedTensor.get(i: Int, j: Int): Double { - return this.mutableBuffer.array()[bufferStart + i * this.shape[0] + j] - } - - operator fun BufferedTensor.set(i: Int, j: Int, value: Double) { - this.mutableBuffer.array()[bufferStart + i * this.shape[0] + j] = value - } - - fun maxOffDiagonal(matrix: BufferedTensor): Double { - var maxOffDiagonalElement = 0.0 - for (i in 0 until n - 1) { - for (j in i + 1 until n) { - maxOffDiagonalElement = max(maxOffDiagonalElement, abs(matrix[i, j])) - } - } - return maxOffDiagonalElement - } - - fun rotate(a: BufferedTensor, s: Double, tau: Double, i: Int, j: Int, k: Int, l: Int) { - val g = a[i, j] - val h = a[k, l] - a[i, j] = g - s * (h + g * tau) - a[k, l] = h + s * (g - h * tau) - } - - fun jacobiIteration( - a: BufferedTensor, - v: BufferedTensor, - d: MutableStructure1D, - z: MutableStructure1D, - ) { - for (ip in 0 until n - 1) { - for (iq in ip + 1 until n) { - val g = 100.0 * abs(a[ip, iq]) - - if (g <= epsilon * abs(d[ip]) && g <= epsilon * abs(d[iq])) { - a[ip, iq] = 0.0 - continue - } - - var h = d[iq] - d[ip] - val t = when { - g <= epsilon * abs(h) -> (a[ip, iq]) / h - else -> { - val theta = 0.5 * h / (a[ip, iq]) - val denominator = abs(theta) + sqrt(1.0 + theta * theta) - if (theta < 0.0) -1.0 / denominator else 1.0 / denominator - } - } - - val c = 1.0 / sqrt(1 + t * t) - val s = t * c - val tau = s / (1.0 + c) - h = t * a[ip, iq] - z[ip] -= h - z[iq] += h - d[ip] -= h - d[iq] += h - a[ip, iq] = 0.0 - - for (j in 0 until ip) { - rotate(a, s, tau, j, ip, j, iq) - } - for (j in (ip + 1) until iq) { - rotate(a, s, tau, ip, j, j, iq) - } - for (j in (iq + 1) until n) { - rotate(a, s, tau, ip, j, iq, j) - } - for (j in 0 until n) { - rotate(v, s, tau, j, ip, j, iq) - } - } - } - } - - fun updateDiagonal( - d: MutableStructure1D, - z: MutableStructure1D, - b: MutableStructure1D, - ) { - for (ip in 0 until d.size) { - b[ip] += z[ip] - d[ip] = b[ip] - z[ip] = 0.0 - } - } - - var sm = maxOffDiagonal(A_) - for (iteration in 0 until maxIteration) { - if (sm < epsilon) { - break - } - - jacobiIteration(A_, V, D, Z) - updateDiagonal(D, Z, B) - sm = maxOffDiagonal(A_) - } - - // TODO sort eigenvalues - return D to V.as2D() - } - /** * Computes the determinant of a square matrix input, or of each square matrix in a batched input * using LU factorization algorithm. @@ -1130,15 +969,16 @@ public open class DoubleTensorAlgebra : * @return the determinant. */ public fun StructureND.detLU(epsilon: Double = 1e-9): DoubleTensor { - checkSquareMatrix(asDoubleTensor().shape) - val luTensor = asDoubleTensor().copy() - val pivotsTensor = asDoubleTensor().setUpPivots() + checkSquareMatrix(shape) + //TODO check for unnecessary copies + val luTensor = copyToTensor() + val pivotsTensor = setUpPivots() val n = shape.size val detTensorShape = IntArray(n - 1) { i -> shape[i] } detTensorShape[n - 2] = 1 - val resBuffer = DoubleArray(detTensorShape.reduce(Int::times)) { 0.0 } + val resBuffer = DoubleBuffer(detTensorShape.reduce(Int::times)) { 0.0 } val detTensor = DoubleTensor( detTensorShape, @@ -1164,8 +1004,9 @@ public open class DoubleTensorAlgebra : */ public fun StructureND.invLU(epsilon: Double = 1e-9): DoubleTensor { val (luTensor, pivotsTensor) = luFactor(epsilon) - val invTensor = luTensor.zeroesLike() + val invTensor = zeroesLike(luTensor) + //TODO replace sequence with a cycle val seq = luTensor.matrixSequence().zip(pivotsTensor.vectorSequence()).zip(invTensor.matrixSequence()) for ((luP, invMatrix) in seq) { val (lu, pivots) = luP @@ -1188,7 +1029,7 @@ public open class DoubleTensorAlgebra : * @return triple of `P`, `L` and `U` tensors. */ public fun StructureND.lu(epsilon: Double = 1e-9): Triple { - val (lu, pivots) = asDoubleTensor().luFactor(epsilon) + val (lu, pivots) = luFactor(epsilon) return luPivot(lu, pivots) } diff --git a/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/IntTensor.kt b/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/IntTensor.kt index a2b942bf0..ed96b6c8f 100644 --- a/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/IntTensor.kt +++ b/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/IntTensor.kt @@ -5,17 +5,87 @@ package space.kscience.kmath.tensors.core -import space.kscience.kmath.structures.IntBuffer -import space.kscience.kmath.tensors.core.internal.array +import space.kscience.kmath.structures.* /** * Default [BufferedTensor] implementation for [Int] values */ -public class IntTensor @PublishedApi internal constructor( - shape: IntArray, - buffer: IntArray, - offset: Int = 0, -) : BufferedTensor(shape, IntBuffer(buffer), offset) { - public fun asDouble(): DoubleTensor = - DoubleTensor(shape, mutableBuffer.array().map { it.toDouble() }.toDoubleArray(), bufferStart) +public class OffsetIntBuffer( + private val source: IntBuffer, + private val offset: Int, + override val size: Int, +) : MutableBuffer { + + init { + require(offset >= 0) { "Offset must be non-negative" } + require(size >= 0) { "Size must be non-negative" } + require(offset + size <= source.size) { "Maximum index must be inside source dimension" } + } + + override fun set(index: Int, value: Int) { + require(index in 0 until size) { "Index must be in [0, size)" } + source[index + offset] = value + } + + override fun get(index: Int): Int = source[index + offset] + + /** + * Copy only a part of buffer that belongs to this tensor + */ + override fun copy(): IntBuffer = source.array.copyOfRange(offset, offset + size).asBuffer() + + override fun iterator(): Iterator = iterator { + for (i in indices) { + yield(get(i)) + } + } + + override fun toString(): String = Buffer.toString(this) + + public fun view(addOffset: Int, newSize: Int = size - addOffset): OffsetIntBuffer = + OffsetIntBuffer(source, offset + addOffset, newSize) +} + +public fun OffsetIntBuffer.slice(range: IntRange): OffsetIntBuffer = view(range.first, range.last - range.first) + +/** + * Map only operable content of the offset buffer + */ +public inline fun OffsetIntBuffer.map(operation: (Int) -> Int): IntBuffer = + IntBuffer(size) { operation(get(it)) } + +public inline fun OffsetIntBuffer.zip( + other: OffsetIntBuffer, + operation: (l: Int, r: Int) -> Int, +): IntBuffer { + require(size == other.size) { "The sizes of zipped buffers must be the same" } + return IntBuffer(size) { operation(get(it), other[it]) } +} + +/** + * map in place + */ +public inline fun OffsetIntBuffer.mapInPlace(operation: (Int) -> Int) { + indices.forEach { set(it, operation(get(it))) } +} + +/** + * Default [BufferedTensor] implementation for [Int] values + */ +public class IntTensor( + shape: IntArray, + override val source: OffsetIntBuffer, +) : BufferedTensor(shape) { + + init { + require(linearSize == source.size) { "Source buffer size must be equal tensor size" } + } + + public constructor(shape: IntArray, buffer: IntBuffer) : this(shape, OffsetIntBuffer(buffer, 0, buffer.size)) + + override fun get(index: IntArray): Int = this.source[indices.offset(index)] + + override fun set(index: IntArray, value: Int) { + source[indices.offset(index)] = value + } } diff --git a/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/IntTensorAlgebra.kt b/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/IntTensorAlgebra.kt index 3ddbd3301..7c18fe533 100644 --- a/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/IntTensorAlgebra.kt +++ b/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/IntTensorAlgebra.kt @@ -11,7 +11,7 @@ package space.kscience.kmath.tensors.core import space.kscience.kmath.misc.PerformancePitfall import space.kscience.kmath.nd.* import space.kscience.kmath.operations.IntRing -import space.kscience.kmath.structures.MutableBuffer +import space.kscience.kmath.structures.* import space.kscience.kmath.tensors.api.* import space.kscience.kmath.tensors.core.internal.* import kotlin.math.* @@ -23,10 +23,6 @@ public open class IntTensorAlgebra : TensorAlgebra { public companion object : IntTensorAlgebra() - override fun StructureND.dot(other: StructureND): Tensor { - TODO("Not yet implemented") - } - override val elementAlgebra: IntRing get() = IntRing @@ -36,56 +32,64 @@ public open class IntTensorAlgebra : TensorAlgebra { * @param transform the function to be applied to each element of the tensor. * @return the resulting tensor after applying the function. */ - @PerformancePitfall @Suppress("OVERRIDE_BY_INLINE") final override inline fun StructureND.map(transform: IntRing.(Int) -> Int): IntTensor { val tensor = this.asIntTensor() //TODO remove additional copy - val sourceArray = tensor.copyArray() - val array = IntArray(tensor.numElements) { IntRing.transform(sourceArray[it]) } + val array = IntBuffer(tensor.source.size) { IntRing.transform(tensor.source[it]) } return IntTensor( tensor.shape, array, - tensor.bufferStart ) } - @PerformancePitfall + public inline fun Tensor.mapInPlace(operation: (Int) -> Int) { + if (this is IntTensor) { + source.mapInPlace(operation) + } else { + indices.forEach { set(it, operation(get(it))) } + } + } + + public inline fun Tensor.mapIndexedInPlace(operation: (IntArray, Int) -> Int) { + indices.forEach { set(it, operation(it, get(it))) } + } + @Suppress("OVERRIDE_BY_INLINE") final override inline fun StructureND.mapIndexed(transform: IntRing.(index: IntArray, Int) -> Int): IntTensor { val tensor = this.asIntTensor() //TODO remove additional copy - val sourceArray = tensor.copyArray() - val array = IntArray(tensor.numElements) { IntRing.transform(tensor.indices.index(it), sourceArray[it]) } - return IntTensor( - tensor.shape, - array, - tensor.bufferStart - ) + val buffer = IntBuffer(tensor.source.size) { + IntRing.transform(tensor.indices.index(it), tensor.source[it]) + } + return IntTensor(tensor.shape, buffer) } - @PerformancePitfall - override fun zip( + @Suppress("OVERRIDE_BY_INLINE") + final override inline fun zip( left: StructureND, right: StructureND, transform: IntRing.(Int, Int) -> Int, ): IntTensor { - require(left.shape.contentEquals(right.shape)) { - "The shapes in zip are not equal: left - ${left.shape}, right - ${right.shape}" - } + checkShapesCompatible(left, right) + val leftTensor = left.asIntTensor() - val leftArray = leftTensor.copyArray() val rightTensor = right.asIntTensor() - val rightArray = rightTensor.copyArray() - val array = IntArray(leftTensor.numElements) { IntRing.transform(leftArray[it], rightArray[it]) } - return IntTensor( - leftTensor.shape, - array - ) + val buffer = IntBuffer(leftTensor.source.size) { + IntRing.transform(leftTensor.source[it], rightTensor.source[it]) + } + return IntTensor(leftTensor.shape, buffer) } - override fun StructureND.valueOrNull(): Int? = if (asIntTensor().shape contentEquals intArrayOf(1)) - asIntTensor().mutableBuffer.array()[asIntTensor().bufferStart] else null + + public inline fun StructureND.reduceElements(transform: (IntBuffer) -> Int): Int = + transform(asIntTensor().source.copy()) + //TODO do we need protective copy? + + override fun StructureND.valueOrNull(): Int? { + val dt = asIntTensor() + return if (dt.shape contentEquals intArrayOf(1)) dt.source[0] else null + } override fun StructureND.value(): Int = valueOrNull() ?: throw IllegalArgumentException("The tensor shape is $shape, but value method is allowed only for shape [1]") @@ -94,16 +98,16 @@ public open class IntTensorAlgebra : TensorAlgebra { * Constructs a tensor with the specified shape and data. * * @param shape the desired shape for the tensor. - * @param buffer one-dimensional data array. - * @return tensor with the [shape] shape and [buffer] data. + * @param array one-dimensional data array. + * @return tensor with the [shape] shape and [array] data. */ - public fun fromArray(shape: IntArray, buffer: IntArray): IntTensor { - checkEmptyShape(shape) - check(buffer.isNotEmpty()) { "Illegal empty buffer provided" } - check(buffer.size == shape.reduce(Int::times)) { - "Inconsistent shape ${shape.toList()} for buffer of size ${buffer.size} provided" + public fun fromArray(shape: IntArray, array: IntArray): IntTensor { + checkNotEmptyShape(shape) + check(array.isNotEmpty()) { "Illegal empty buffer provided" } + check(array.size == shape.reduce(Int::times)) { + "Inconsistent shape ${shape.toList()} for buffer of size ${array.size} provided" } - return IntTensor(shape, buffer, 0) + return IntTensor(shape, array.asBuffer()) } /** @@ -119,10 +123,10 @@ public open class IntTensorAlgebra : TensorAlgebra { ) override fun Tensor.getTensor(i: Int): IntTensor { - val lastShape = asIntTensor().shape.drop(1).toIntArray() + val dt = asIntTensor() + val lastShape = shape.drop(1).toIntArray() val newShape = if (lastShape.isNotEmpty()) lastShape else intArrayOf(1) - val newStart = newShape.reduce(Int::times) * i + asIntTensor().bufferStart - return IntTensor(newShape, asIntTensor().mutableBuffer.array(), newStart) + return IntTensor(newShape, dt.source.view(newShape.reduce(Int::times) * i)) } /** @@ -133,8 +137,8 @@ public open class IntTensorAlgebra : TensorAlgebra { * @return tensor with the [shape] shape and filled with [value]. */ public fun full(value: Int, shape: IntArray): IntTensor { - checkEmptyShape(shape) - val buffer = IntArray(shape.reduce(Int::times)) { value } + checkNotEmptyShape(shape) + val buffer = IntBuffer(shape.reduce(Int::times)) { value } return IntTensor(shape, buffer) } @@ -144,9 +148,9 @@ public open class IntTensorAlgebra : TensorAlgebra { * @param value the value to fill the output tensor with. * @return tensor with the `input` tensor shape and filled with [value]. */ - public fun Tensor.fullLike(value: Int): IntTensor { - val shape = asIntTensor().shape - val buffer = IntArray(asIntTensor().numElements) { value } + public fun fullLike(structureND: StructureND<*>, value: Int): IntTensor { + val shape = structureND.shape + val buffer = IntBuffer(structureND.indices.linearSize) { value } return IntTensor(shape, buffer) } @@ -163,7 +167,7 @@ public open class IntTensorAlgebra : TensorAlgebra { * * @return tensor filled with the scalar value `0`, with the same shape as `input` tensor. */ - public fun StructureND.zeroesLike(): IntTensor = asIntTensor().fullLike(0) + public fun zeroesLike(structureND: StructureND): IntTensor = fullLike(structureND.asIntTensor(), 0) /** * Returns a tensor filled with the scalar value `1`, with the shape defined by the variable argument [shape]. @@ -178,7 +182,7 @@ public open class IntTensorAlgebra : TensorAlgebra { * * @return tensor filled with the scalar value `1`, with the same shape as `input` tensor. */ - public fun Tensor.onesLike(): IntTensor = asIntTensor().fullLike(1) + public fun onesLike(structureND: Tensor<*>): IntTensor = fullLike(structureND, 1) /** * Returns a 2D tensor with shape ([n], [n]), with ones on the diagonal and zeros elsewhere. @@ -188,7 +192,7 @@ public open class IntTensorAlgebra : TensorAlgebra { */ public fun eye(n: Int): IntTensor { val shape = intArrayOf(n, n) - val buffer = IntArray(n * n) { 0 } + val buffer = IntBuffer(n * n) { 0 } val res = IntTensor(shape, buffer) for (i in 0 until n) { res[intArrayOf(i, i)] = 1 @@ -196,151 +200,92 @@ public open class IntTensorAlgebra : TensorAlgebra { return res } - /** - * Return a copy of the tensor. - * - * @return a copy of the `input` tensor with a copied buffer. - */ - public fun StructureND.copy(): IntTensor = - IntTensor(asIntTensor().shape, asIntTensor().mutableBuffer.array().copyOf(), asIntTensor().bufferStart) + override fun Int.plus(arg: StructureND): IntTensor = arg.map { this@plus + it } - override fun Int.plus(arg: StructureND): IntTensor { - val resBuffer = IntArray(arg.asIntTensor().numElements) { i -> - arg.asIntTensor().mutableBuffer.array()[arg.asIntTensor().bufferStart + i] + this - } - return IntTensor(arg.shape, resBuffer) - } + override fun StructureND.plus(arg: Int): IntTensor = map { it + arg } - override fun StructureND.plus(arg: Int): IntTensor = arg + asIntTensor() - - override fun StructureND.plus(arg: StructureND): IntTensor { - checkShapesCompatible(asIntTensor(), arg.asIntTensor()) - val resBuffer = IntArray(asIntTensor().numElements) { i -> - asIntTensor().mutableBuffer.array()[i] + arg.asIntTensor().mutableBuffer.array()[i] - } - return IntTensor(asIntTensor().shape, resBuffer) - } + override fun StructureND.plus(arg: StructureND): IntTensor = zip(this, arg) { l, r -> l + r } override fun Tensor.plusAssign(value: Int) { - for (i in 0 until asIntTensor().numElements) { - asIntTensor().mutableBuffer.array()[asIntTensor().bufferStart + i] += value - } + mapInPlace { it + value } } override fun Tensor.plusAssign(arg: StructureND) { checkShapesCompatible(asIntTensor(), arg.asIntTensor()) - for (i in 0 until asIntTensor().numElements) { - asIntTensor().mutableBuffer.array()[asIntTensor().bufferStart + i] += - arg.asIntTensor().mutableBuffer.array()[asIntTensor().bufferStart + i] + mapIndexedInPlace { index, value -> + value + arg[index] } } - override fun Int.minus(arg: StructureND): IntTensor { - val resBuffer = IntArray(arg.asIntTensor().numElements) { i -> - this - arg.asIntTensor().mutableBuffer.array()[arg.asIntTensor().bufferStart + i] - } - return IntTensor(arg.shape, resBuffer) - } + override fun Int.minus(arg: StructureND): IntTensor = arg.map { this@minus - it } - override fun StructureND.minus(arg: Int): IntTensor { - val resBuffer = IntArray(asIntTensor().numElements) { i -> - asIntTensor().mutableBuffer.array()[asIntTensor().bufferStart + i] - arg - } - return IntTensor(asIntTensor().shape, resBuffer) - } + override fun StructureND.minus(arg: Int): IntTensor = map { it - arg } - override fun StructureND.minus(arg: StructureND): IntTensor { - checkShapesCompatible(asIntTensor(), arg) - val resBuffer = IntArray(asIntTensor().numElements) { i -> - asIntTensor().mutableBuffer.array()[i] - arg.asIntTensor().mutableBuffer.array()[i] - } - return IntTensor(asIntTensor().shape, resBuffer) - } + override fun StructureND.minus(arg: StructureND): IntTensor = zip(this, arg) { l, r -> l + r } override fun Tensor.minusAssign(value: Int) { - for (i in 0 until asIntTensor().numElements) { - asIntTensor().mutableBuffer.array()[asIntTensor().bufferStart + i] -= value - } + mapInPlace { it - value } } override fun Tensor.minusAssign(arg: StructureND) { - checkShapesCompatible(asIntTensor(), arg) - for (i in 0 until asIntTensor().numElements) { - asIntTensor().mutableBuffer.array()[asIntTensor().bufferStart + i] -= - arg.asIntTensor().mutableBuffer.array()[asIntTensor().bufferStart + i] - } + checkShapesCompatible(this, arg) + mapIndexedInPlace { index, value -> value - arg[index] } } - override fun Int.times(arg: StructureND): IntTensor { - val resBuffer = IntArray(arg.asIntTensor().numElements) { i -> - arg.asIntTensor().mutableBuffer.array()[arg.asIntTensor().bufferStart + i] * this - } - return IntTensor(arg.shape, resBuffer) - } + override fun Int.times(arg: StructureND): IntTensor = arg.map { this@times * it } override fun StructureND.times(arg: Int): IntTensor = arg * asIntTensor() - override fun StructureND.times(arg: StructureND): IntTensor { - checkShapesCompatible(asIntTensor(), arg) - val resBuffer = IntArray(asIntTensor().numElements) { i -> - asIntTensor().mutableBuffer.array()[asIntTensor().bufferStart + i] * - arg.asIntTensor().mutableBuffer.array()[arg.asIntTensor().bufferStart + i] - } - return IntTensor(asIntTensor().shape, resBuffer) - } + override fun StructureND.times(arg: StructureND): IntTensor = zip(this, arg) { l, r -> l * r } override fun Tensor.timesAssign(value: Int) { - for (i in 0 until asIntTensor().numElements) { - asIntTensor().mutableBuffer.array()[asIntTensor().bufferStart + i] *= value - } + mapInPlace { it * value } } override fun Tensor.timesAssign(arg: StructureND) { - checkShapesCompatible(asIntTensor(), arg) - for (i in 0 until asIntTensor().numElements) { - asIntTensor().mutableBuffer.array()[asIntTensor().bufferStart + i] *= - arg.asIntTensor().mutableBuffer.array()[asIntTensor().bufferStart + i] - } + checkShapesCompatible(this, arg) + mapIndexedInPlace { index, value -> value * arg[index] } } - override fun StructureND.unaryMinus(): IntTensor { - val resBuffer = IntArray(asIntTensor().numElements) { i -> - asIntTensor().mutableBuffer.array()[asIntTensor().bufferStart + i].unaryMinus() - } - return IntTensor(asIntTensor().shape, resBuffer) - } + override fun StructureND.unaryMinus(): IntTensor = map { -it } - override fun Tensor.transpose(i: Int, j: Int): IntTensor { - val ii = asIntTensor().minusIndex(i) - val jj = asIntTensor().minusIndex(j) - checkTranspose(asIntTensor().dimension, ii, jj) - val n = asIntTensor().numElements + override fun Tensor.transposed(i: Int, j: Int): IntTensor { + // TODO change strides instead of changing content + val dt = asIntTensor() + val ii = dt.minusIndex(i) + val jj = dt.minusIndex(j) + checkTranspose(dt.dimension, ii, jj) + val n = dt.linearSize val resBuffer = IntArray(n) - val resShape = asIntTensor().shape.copyOf() + val resShape = dt.shape.copyOf() resShape[ii] = resShape[jj].also { resShape[jj] = resShape[ii] } - val resTensor = IntTensor(resShape, resBuffer) + val resTensor = IntTensor(resShape, resBuffer.asBuffer()) for (offset in 0 until n) { - val oldMultiIndex = asIntTensor().indices.index(offset) + val oldMultiIndex = dt.indices.index(offset) val newMultiIndex = oldMultiIndex.copyOf() newMultiIndex[ii] = newMultiIndex[jj].also { newMultiIndex[jj] = newMultiIndex[ii] } val linearIndex = resTensor.indices.offset(newMultiIndex) - resTensor.mutableBuffer.array()[linearIndex] = - asIntTensor().mutableBuffer.array()[asIntTensor().bufferStart + offset] + resTensor.source[linearIndex] = dt.source[offset] } return resTensor } override fun Tensor.view(shape: IntArray): IntTensor { checkView(asIntTensor(), shape) - return IntTensor(shape, asIntTensor().mutableBuffer.array(), asIntTensor().bufferStart) + return IntTensor(shape, asIntTensor().source) } override fun Tensor.viewAs(other: StructureND): IntTensor = - asIntTensor().view(other.shape) + view(other.shape) + + override fun StructureND.dot(other: StructureND): IntTensor { + return if (dimension in 0..2 && other.dimension in 0..2) TODO("not implemented") + else error("Only vectors and matrices are allowed in non-broadcasting dot operation") + } override fun diagonalEmbedding( diagonalEntries: Tensor, @@ -374,7 +319,7 @@ public open class IntTensorAlgebra : TensorAlgebra { diagonalEntries.shape.slice(greaterDim - 1 until n - 1).toIntArray() val resTensor = zeros(resShape) - for (i in 0 until diagonalEntries.asIntTensor().numElements) { + for (i in 0 until diagonalEntries.asIntTensor().linearSize) { val multiIndex = diagonalEntries.asIntTensor().indices.index(i) var offset1 = 0 @@ -394,16 +339,27 @@ public open class IntTensorAlgebra : TensorAlgebra { return resTensor.asIntTensor() } - private infix fun Tensor.eq( + /** + * Compares element-wise two int tensors + * + * @param other the tensor to compare with `input` tensor. + * @param epsilon permissible error when comparing two Int values. + * @return true if two tensors have the same shape and elements, false otherwise. + */ + public fun Tensor.eq(other: Tensor): Boolean = + asIntTensor().eq(other) { x, y -> x == y } + + private fun Tensor.eq( other: Tensor, + eqFunction: (Int, Int) -> Boolean, ): Boolean { checkShapesCompatible(asIntTensor(), other) - val n = asIntTensor().numElements - if (n != other.asIntTensor().numElements) { + val n = asIntTensor().linearSize + if (n != other.asIntTensor().linearSize) { return false } for (i in 0 until n) { - if (asIntTensor().mutableBuffer[asIntTensor().bufferStart + i] != other.asIntTensor().mutableBuffer[other.asIntTensor().bufferStart + i]) { + if (!eqFunction(asIntTensor().source[i], other.asIntTensor().source[i])) { return false } } @@ -421,10 +377,12 @@ public open class IntTensorAlgebra : TensorAlgebra { val shape = tensors[0].shape check(tensors.all { it.shape contentEquals shape }) { "Tensors must have same shapes" } val resShape = intArrayOf(tensors.size) + shape - val resBuffer = tensors.flatMap { - it.asIntTensor().mutableBuffer.array().drop(it.asIntTensor().bufferStart).take(it.asIntTensor().numElements) - }.toIntArray() - return IntTensor(resShape, resBuffer, 0) +// val resBuffer: List = tensors.flatMap { +// it.asIntTensor().source.array.drop(it.asIntTensor().bufferStart) +// .take(it.asIntTensor().linearSize) +// } + val resBuffer = tensors.map { it.asIntTensor().source }.concat() + return IntTensor(resShape, resBuffer) } /** @@ -435,14 +393,11 @@ public open class IntTensorAlgebra : TensorAlgebra { */ public fun Tensor.rowsByIndices(indices: IntArray): IntTensor = stack(indices.map { getTensor(it) }) - private inline fun StructureND.fold(foldFunction: (IntArray) -> Int): Int = - foldFunction(asIntTensor().copyArray()) - - private inline fun StructureND.foldDim( + private inline fun StructureND.foldDimToInt( dim: Int, keepDim: Boolean, - foldFunction: (IntArray) -> R, - ): BufferedTensor { + foldFunction: (IntArray) -> Int, + ): IntTensor { check(dim < dimension) { "Dimension $dim out of range $dimension" } val resShape = if (keepDim) { shape.take(dim).toIntArray() + intArrayOf(1) + shape.takeLast(dimension - dim - 1).toIntArray() @@ -451,9 +406,9 @@ public open class IntTensorAlgebra : TensorAlgebra { } val resNumElements = resShape.reduce(Int::times) val init = foldFunction(IntArray(1) { 0 }) - val resTensor = BufferedTensor( + val resTensor = IntTensor( resShape, - MutableBuffer.auto(resNumElements) { init }, 0 + IntBuffer(resNumElements) { init } ) for (index in resTensor.indices) { val prefix = index.take(dim).toIntArray() @@ -465,31 +420,33 @@ public open class IntTensorAlgebra : TensorAlgebra { return resTensor } - override fun StructureND.sum(): Int = asIntTensor().fold { it.sum() } + + override fun StructureND.sum(): Int = reduceElements { it.array.sum() } override fun StructureND.sum(dim: Int, keepDim: Boolean): IntTensor = - foldDim(dim, keepDim) { x -> x.sum() }.asIntTensor() + foldDimToInt(dim, keepDim) { x -> x.sum() } - override fun StructureND.min(): Int = this.fold { it.minOrNull()!! } + override fun StructureND.min(): Int = reduceElements { it.array.min() } override fun StructureND.min(dim: Int, keepDim: Boolean): IntTensor = - foldDim(dim, keepDim) { x -> x.minOrNull()!! }.asIntTensor() + foldDimToInt(dim, keepDim) { x -> x.minOrNull()!! } - override fun StructureND.argMin(dim: Int, keepDim: Boolean): IntTensor = - foldDim(dim, keepDim) { x -> - x.withIndex().minByOrNull { it.value }?.index!! - }.asIntTensor() + override fun StructureND.argMin(dim: Int, keepDim: Boolean): Tensor = foldDimToInt(dim, keepDim) { x -> + x.withIndex().minBy { it.value }.index + } - override fun StructureND.max(): Int = this.fold { it.maxOrNull()!! } + override fun StructureND.max(): Int = reduceElements { it.array.max() } override fun StructureND.max(dim: Int, keepDim: Boolean): IntTensor = - foldDim(dim, keepDim) { x -> x.maxOrNull()!! }.asIntTensor() + foldDimToInt(dim, keepDim) { x -> x.max() } override fun StructureND.argMax(dim: Int, keepDim: Boolean): IntTensor = - foldDim(dim, keepDim) { x -> - x.withIndex().maxByOrNull { it.value }?.index!! - }.asIntTensor() + foldDimToInt(dim, keepDim) { x -> + x.withIndex().maxBy { it.value }.index + } + + public fun StructureND.mean(): Double = sum().toDouble() / indices.linearSize } public val Int.Companion.tensorAlgebra: IntTensorAlgebra get() = IntTensorAlgebra diff --git a/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/internal/broadcastUtils.kt b/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/internal/broadcastUtils.kt index ab97903e4..fee62c79c 100644 --- a/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/internal/broadcastUtils.kt +++ b/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/internal/broadcastUtils.kt @@ -5,6 +5,7 @@ package space.kscience.kmath.tensors.core.internal +import space.kscience.kmath.structures.asBuffer import space.kscience.kmath.tensors.core.DoubleTensor import kotlin.math.max @@ -24,8 +25,8 @@ internal fun multiIndexBroadCasting(tensor: DoubleTensor, resTensor: DoubleTenso } val curLinearIndex = tensor.indices.offset(curMultiIndex) - resTensor.mutableBuffer.array()[linearIndex] = - tensor.mutableBuffer.array()[tensor.bufferStart + curLinearIndex] + resTensor.source[linearIndex] = + tensor.source[curLinearIndex] } } @@ -63,7 +64,7 @@ internal fun broadcastTo(tensor: DoubleTensor, newShape: IntArray): DoubleTensor } val n = newShape.reduce { acc, i -> acc * i } - val resTensor = DoubleTensor(newShape, DoubleArray(n)) + val resTensor = DoubleTensor(newShape, DoubleArray(n).asBuffer()) for (i in tensor.shape.indices) { val curDim = tensor.shape[i] @@ -82,7 +83,7 @@ internal fun broadcastTensors(vararg tensors: DoubleTensor): List val n = totalShape.reduce { acc, i -> acc * i } return tensors.map { tensor -> - val resTensor = DoubleTensor(totalShape, DoubleArray(n)) + val resTensor = DoubleTensor(totalShape, DoubleArray(n).asBuffer()) multiIndexBroadCasting(tensor, resTensor, n) resTensor } @@ -106,17 +107,17 @@ internal fun broadcastOuterTensors(vararg tensors: DoubleTensor): List checkShapesCompatible(a: StructureND, b: StructureND) = +@PublishedApi +internal fun checkShapesCompatible(a: StructureND, b: StructureND): Unit = check(a.shape contentEquals b.shape) { "Incompatible shapes ${a.shape.toList()} and ${b.shape.toList()} " } @@ -50,15 +52,14 @@ internal fun checkSquareMatrix(shape: IntArray) { internal fun DoubleTensorAlgebra.checkSymmetric( tensor: Tensor, epsilon: Double = 1e-6, -) = - check(tensor.eq(tensor.transpose(), epsilon)) { - "Tensor is not symmetric about the last 2 dimensions at precision $epsilon" - } +) = check(tensor.eq(tensor.transposed(), epsilon)) { + "Tensor is not symmetric about the last 2 dimensions at precision $epsilon" +} internal fun DoubleTensorAlgebra.checkPositiveDefinite(tensor: DoubleTensor, epsilon: Double = 1e-6) { checkSymmetric(tensor, epsilon) for (mat in tensor.matrixSequence()) - check(mat.toTensor().detLU().value() > 0.0) { - "Tensor contains matrices which are not positive definite ${mat.toTensor().detLU().value()}" + check(mat.asDoubleTensor().detLU().value() > 0.0) { + "Tensor contains matrices which are not positive definite ${mat.asDoubleTensor().detLU().value()}" } } \ No newline at end of file diff --git a/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/internal/doubleTensorHelpers.kt b/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/internal/doubleTensorHelpers.kt new file mode 100644 index 000000000..9c6f54d61 --- /dev/null +++ b/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/internal/doubleTensorHelpers.kt @@ -0,0 +1,189 @@ +/* + * Copyright 2018-2022 KMath contributors. + * Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file. + */ + +package space.kscience.kmath.tensors.core.internal + +import space.kscience.kmath.nd.MutableStructure2D +import space.kscience.kmath.nd.Structure2D +import space.kscience.kmath.nd.as2D +import space.kscience.kmath.nd.get +import space.kscience.kmath.operations.asSequence +import space.kscience.kmath.structures.DoubleBuffer +import space.kscience.kmath.structures.VirtualBuffer +import space.kscience.kmath.structures.asBuffer +import space.kscience.kmath.structures.indices +import space.kscience.kmath.tensors.core.BroadcastDoubleTensorAlgebra.eye +import space.kscience.kmath.tensors.core.BufferedTensor +import space.kscience.kmath.tensors.core.DoubleTensor +import space.kscience.kmath.tensors.core.OffsetDoubleBuffer +import space.kscience.kmath.tensors.core.copyToTensor +import kotlin.math.abs +import kotlin.math.max +import kotlin.math.sqrt + + +internal fun MutableStructure2D.jacobiHelper( + maxIteration: Int, + epsilon: Double, +): Pair> { + val n = rowNum + val A_ = copyToTensor() + val V = eye(n) + val D = DoubleBuffer(n) { get(it, it) } + val B = DoubleBuffer(n) { get(it, it) } + val Z = DoubleBuffer(n) { 0.0 } + + // assume that buffered tensor is square matrix + operator fun DoubleTensor.get(i: Int, j: Int): Double { + return source[i * shape[0] + j] + } + + operator fun BufferedTensor.set(i: Int, j: Int, value: Double) { + source[i * shape[0] + j] = value + } + + fun maxOffDiagonal(matrix: BufferedTensor): Double { + var maxOffDiagonalElement = 0.0 + for (i in 0 until n - 1) { + for (j in i + 1 until n) { + maxOffDiagonalElement = max(maxOffDiagonalElement, abs(matrix[i, j])) + } + } + return maxOffDiagonalElement + } + + fun rotate(a: BufferedTensor, s: Double, tau: Double, i: Int, j: Int, k: Int, l: Int) { + val g = a[i, j] + val h = a[k, l] + a[i, j] = g - s * (h + g * tau) + a[k, l] = h + s * (g - h * tau) + } + + fun jacobiIteration( + a: BufferedTensor, + v: BufferedTensor, + d: DoubleBuffer, + z: DoubleBuffer, + ) { + for (ip in 0 until n - 1) { + for (iq in ip + 1 until n) { + val g = 100.0 * abs(a[ip, iq]) + + if (g <= epsilon * abs(d[ip]) && g <= epsilon * abs(d[iq])) { + a[ip, iq] = 0.0 + continue + } + + var h = d[iq] - d[ip] + val t = when { + g <= epsilon * abs(h) -> (a[ip, iq]) / h + else -> { + val theta = 0.5 * h / (a[ip, iq]) + val denominator = abs(theta) + sqrt(1.0 + theta * theta) + if (theta < 0.0) -1.0 / denominator else 1.0 / denominator + } + } + + val c = 1.0 / sqrt(1 + t * t) + val s = t * c + val tau = s / (1.0 + c) + h = t * a[ip, iq] + z[ip] -= h + z[iq] += h + d[ip] -= h + d[iq] += h + a[ip, iq] = 0.0 + + for (j in 0 until ip) { + rotate(a, s, tau, j, ip, j, iq) + } + for (j in (ip + 1) until iq) { + rotate(a, s, tau, ip, j, j, iq) + } + for (j in (iq + 1) until n) { + rotate(a, s, tau, ip, j, iq, j) + } + for (j in 0 until n) { + rotate(v, s, tau, j, ip, j, iq) + } + } + } + } + + fun updateDiagonal( + d: DoubleBuffer, + z: DoubleBuffer, + b: DoubleBuffer, + ) { + for (ip in 0 until d.size) { + b[ip] += z[ip] + d[ip] = b[ip] + z[ip] = 0.0 + } + } + + var sm = maxOffDiagonal(A_) + for (iteration in 0 until maxIteration) { + if (sm < epsilon) { + break + } + + jacobiIteration(A_, V, D, Z) + updateDiagonal(D, Z, B) + sm = maxOffDiagonal(A_) + } + + // TODO sort eigenvalues + return D to V.as2D() +} + +/** + * Concatenate a list of arrays + */ +internal fun List.concat(): DoubleBuffer { + val array = DoubleArray(sumOf { it.size }) + var pointer = 0 + while (pointer < array.size) { + for (bufferIndex in indices) { + val buffer = get(bufferIndex) + for (innerIndex in buffer.indices) { + array[pointer] = buffer[innerIndex] + pointer++ + } + } + } + return array.asBuffer() +} + +internal val DoubleTensor.vectors: VirtualBuffer + get() { + val n = shape.size + val vectorOffset = shape[n - 1] + val vectorShape = intArrayOf(shape.last()) + + return VirtualBuffer(linearSize / vectorOffset) { index -> + val offset = index * vectorOffset + DoubleTensor(vectorShape, source.view(offset)) + } + } + + +internal fun DoubleTensor.vectorSequence(): Sequence = vectors.asSequence() + + +internal val DoubleTensor.matrices: VirtualBuffer + get(){ + val n = shape.size + check(n >= 2) { "Expected tensor with 2 or more dimensions, got size $n" } + val matrixOffset = shape[n - 1] * shape[n - 2] + val matrixShape = intArrayOf(shape[n - 2], shape[n - 1]) + + return VirtualBuffer(linearSize / matrixOffset) { index -> + val offset = index * matrixOffset + DoubleTensor(matrixShape, source.view(offset)) + } +} + +internal fun DoubleTensor.matrixSequence(): Sequence = matrices.asSequence() \ No newline at end of file diff --git a/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/internal/intTensorHelpers.kt b/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/internal/intTensorHelpers.kt new file mode 100644 index 000000000..93229c2d4 --- /dev/null +++ b/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/internal/intTensorHelpers.kt @@ -0,0 +1,64 @@ +/* + * Copyright 2018-2022 KMath contributors. + * Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file. + */ + +package space.kscience.kmath.tensors.core.internal + +import space.kscience.kmath.operations.asSequence +import space.kscience.kmath.structures.IntBuffer +import space.kscience.kmath.structures.VirtualBuffer +import space.kscience.kmath.structures.asBuffer +import space.kscience.kmath.structures.indices +import space.kscience.kmath.tensors.core.IntTensor +import space.kscience.kmath.tensors.core.OffsetIntBuffer + +/** + * Concatenate a list of arrays + */ +internal fun List.concat(): IntBuffer { + val array = IntArray(sumOf { it.size }) + var pointer = 0 + while (pointer < array.size) { + for (bufferIndex in indices) { + val buffer = get(bufferIndex) + for (innerIndex in buffer.indices) { + array[pointer] = buffer[innerIndex] + pointer++ + } + } + } + return array.asBuffer() +} + + +internal val IntTensor.vectors: VirtualBuffer + get() { + val n = shape.size + val vectorOffset = shape[n - 1] + val vectorShape = intArrayOf(shape.last()) + + return VirtualBuffer(linearSize / vectorOffset) { index -> + val offset = index * vectorOffset + IntTensor(vectorShape, source.view(offset)) + } + } + + +internal fun IntTensor.vectorSequence(): Sequence = vectors.asSequence() + + +internal val IntTensor.matrices: VirtualBuffer + get(){ + val n = shape.size + check(n >= 2) { "Expected tensor with 2 or more dimensions, got size $n" } + val matrixOffset = shape[n - 1] * shape[n - 2] + val matrixShape = intArrayOf(shape[n - 2], shape[n - 1]) + + return VirtualBuffer(linearSize / matrixOffset) { index -> + val offset = index * matrixOffset + IntTensor(matrixShape, source.view(offset)) + } + } + +internal fun IntTensor.matrixSequence(): Sequence = matrices.asSequence() \ No newline at end of file diff --git a/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/internal/linUtils.kt b/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/internal/linUtils.kt index ec529590f..9047ba29e 100644 --- a/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/internal/linUtils.kt +++ b/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/internal/linUtils.kt @@ -5,75 +5,33 @@ package space.kscience.kmath.tensors.core.internal -import space.kscience.kmath.nd.MutableStructure1D -import space.kscience.kmath.nd.MutableStructure2D -import space.kscience.kmath.nd.as1D -import space.kscience.kmath.nd.as2D -import space.kscience.kmath.operations.asSequence +import space.kscience.kmath.nd.* import space.kscience.kmath.operations.invoke -import space.kscience.kmath.structures.VirtualBuffer -import space.kscience.kmath.tensors.core.BufferedTensor -import space.kscience.kmath.tensors.core.DoubleTensor -import space.kscience.kmath.tensors.core.DoubleTensorAlgebra -import space.kscience.kmath.tensors.core.IntTensor +import space.kscience.kmath.structures.IntBuffer +import space.kscience.kmath.structures.asBuffer +import space.kscience.kmath.structures.indices +import space.kscience.kmath.tensors.core.* import kotlin.math.abs import kotlin.math.min import kotlin.math.sqrt -internal val BufferedTensor.vectors: VirtualBuffer> - get() { - val n = shape.size - val vectorOffset = shape[n - 1] - val vectorShape = intArrayOf(shape.last()) - - return VirtualBuffer(numElements / vectorOffset) { index -> - val offset = index * vectorOffset - BufferedTensor(vectorShape, mutableBuffer, bufferStart + offset) - } - } - - -internal fun BufferedTensor.vectorSequence(): Sequence> = vectors.asSequence() - -/** - * A random access alternative to [matrixSequence] - */ -internal val BufferedTensor.matrices: VirtualBuffer> - get() { - val n = shape.size - check(n >= 2) { "Expected tensor with 2 or more dimensions, got size $n" } - val matrixOffset = shape[n - 1] * shape[n - 2] - val matrixShape = intArrayOf(shape[n - 2], shape[n - 1]) - - return VirtualBuffer(numElements / matrixOffset) { index -> - val offset = index * matrixOffset - BufferedTensor(matrixShape, mutableBuffer, bufferStart + offset) - } - } - -internal fun BufferedTensor.matrixSequence(): Sequence> = matrices.asSequence() - internal fun dotTo( a: BufferedTensor, b: BufferedTensor, res: BufferedTensor, l: Int, m: Int, n: Int, ) { - val aStart = a.bufferStart - val bStart = b.bufferStart - val resStart = res.bufferStart - - val aBuffer = a.mutableBuffer - val bBuffer = b.mutableBuffer - val resBuffer = res.mutableBuffer + val aBuffer = a.source + val bBuffer = b.source + val resBuffer = res.source for (i in 0 until l) { for (j in 0 until n) { var curr = 0.0 for (k in 0 until m) { - curr += aBuffer[aStart + i * m + k] * bBuffer[bStart + k * n + j] + curr += aBuffer[i * m + k] * bBuffer[k * n + j] } - resBuffer[resStart + i * n + j] = curr + resBuffer[i * n + j] = curr } } } @@ -129,7 +87,7 @@ internal fun luHelper( return false } -internal fun BufferedTensor.setUpPivots(): IntTensor { +internal fun StructureND.setUpPivots(): IntTensor { val n = this.shape.size val m = this.shape.last() val pivotsShape = IntArray(n - 1) { i -> this.shape[i] } @@ -137,17 +95,17 @@ internal fun BufferedTensor.setUpPivots(): IntTensor { return IntTensor( pivotsShape, - IntArray(pivotsShape.reduce(Int::times)) { 0 } + IntBuffer(pivotsShape.reduce(Int::times)) { 0 } ) } internal fun DoubleTensorAlgebra.computeLU( - tensor: DoubleTensor, + tensor: StructureND, epsilon: Double, ): Pair? { checkSquareMatrix(tensor.shape) - val luTensor = tensor.copy() + val luTensor = tensor.copyToTensor() val pivotsTensor = tensor.setUpPivots() for ((lu, pivots) in luTensor.matrixSequence().zip(pivotsTensor.vectorSequence())) @@ -253,8 +211,8 @@ internal fun DoubleTensorAlgebra.qrHelper( checkSquareMatrix(matrix.shape) val n = matrix.shape[0] val qM = q.as2D() - val matrixT = matrix.transpose(0, 1) - val qT = q.transpose(0, 1) + val matrixT = matrix.transposed(0, 1) + val qT = q.transposed(0, 1) for (j in 0 until n) { val v = matrixT.getTensor(j) @@ -280,10 +238,10 @@ internal fun DoubleTensorAlgebra.svd1d(a: DoubleTensor, epsilon: Double = 1e-10) var v: DoubleTensor val b: DoubleTensor if (n > m) { - b = a.transpose(0, 1).dot(a) + b = a.transposed(0, 1).dot(a) v = DoubleTensor(intArrayOf(m), getRandomUnitVector(m, 0)) } else { - b = a.dot(a.transpose(0, 1)) + b = a.dot(a.transposed(0, 1)) v = DoubleTensor(intArrayOf(n), getRandomUnitVector(n, 0)) } @@ -308,7 +266,7 @@ internal fun DoubleTensorAlgebra.svdHelper( val (matrixU, matrixS, matrixV) = USV for (k in 0 until min(n, m)) { - var a = matrix.copy() + var a = matrix.copyToTensor() for ((singularValue, u, v) in res.slice(0 until k)) { val outerProduct = DoubleArray(u.shape[0] * v.shape[0]) for (i in 0 until u.shape[0]) { @@ -316,7 +274,7 @@ internal fun DoubleTensorAlgebra.svdHelper( outerProduct[i * v.shape[0] + j] = u.getTensor(i).value() * v.getTensor(j).value() } } - a = a - singularValue.times(DoubleTensor(intArrayOf(u.shape[0], v.shape[0]), outerProduct)) + a = a - singularValue.times(DoubleTensor(intArrayOf(u.shape[0], v.shape[0]), outerProduct.asBuffer())) } var v: DoubleTensor var u: DoubleTensor @@ -328,7 +286,7 @@ internal fun DoubleTensorAlgebra.svdHelper( u = u.times(1.0 / norm) } else { u = svd1d(a, epsilon) - v = matrix.transpose(0, 1).dot(u) + v = matrix.transposed(0, 1).dot(u) norm = DoubleTensorAlgebra { (v dot v).sqrt().value() } v = v.times(1.0 / norm) } @@ -337,15 +295,15 @@ internal fun DoubleTensorAlgebra.svdHelper( } val s = res.map { it.first }.toDoubleArray() - val uBuffer = res.map { it.second }.flatMap { it.mutableBuffer.array().toList() }.toDoubleArray() - val vBuffer = res.map { it.third }.flatMap { it.mutableBuffer.array().toList() }.toDoubleArray() + val uBuffer = res.map { it.second.source }.concat() + val vBuffer = res.map { it.third.source }.concat() for (i in uBuffer.indices) { - matrixU.mutableBuffer.array()[matrixU.bufferStart + i] = uBuffer[i] + matrixU.source[i] = uBuffer[i] } for (i in s.indices) { - matrixS.mutableBuffer.array()[matrixS.bufferStart + i] = s[i] + matrixS.source[i] = s[i] } for (i in vBuffer.indices) { - matrixV.mutableBuffer.array()[matrixV.bufferStart + i] = vBuffer[i] + matrixV.source[i] = vBuffer[i] } } diff --git a/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/internal/tensorCastsUtils.kt b/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/internal/tensorCastsUtils.kt deleted file mode 100644 index e7704b257..000000000 --- a/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/internal/tensorCastsUtils.kt +++ /dev/null @@ -1,37 +0,0 @@ -/* - * Copyright 2018-2022 KMath contributors. - * Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file. - */ - -package space.kscience.kmath.tensors.core.internal - -import space.kscience.kmath.nd.MutableBufferND -import space.kscience.kmath.nd.StructureND -import space.kscience.kmath.structures.asMutableBuffer -import space.kscience.kmath.tensors.core.BufferedTensor -import space.kscience.kmath.tensors.core.DoubleTensor -import space.kscience.kmath.tensors.core.IntTensor -import space.kscience.kmath.tensors.core.TensorLinearStructure - -internal fun BufferedTensor.toTensor(): IntTensor = - IntTensor(this.shape, this.mutableBuffer.array(), this.bufferStart) - -internal fun BufferedTensor.toTensor(): DoubleTensor = - DoubleTensor(this.shape, this.mutableBuffer.array(), this.bufferStart) - -internal fun StructureND.copyToBufferedTensor(): BufferedTensor = - BufferedTensor( - this.shape, - TensorLinearStructure(this.shape).asSequence().map(this::get).toMutableList().asMutableBuffer(), - 0 - ) - -internal fun StructureND.toBufferedTensor(): BufferedTensor = when (this) { - is BufferedTensor -> this - is MutableBufferND -> if (this.indices == TensorLinearStructure(this.shape)) { - BufferedTensor(this.shape, this.buffer, 0) - } else { - this.copyToBufferedTensor() - } - else -> this.copyToBufferedTensor() -} diff --git a/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/internal/utils.kt b/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/internal/utils.kt index 8a96d516f..daafdaa58 100644 --- a/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/internal/utils.kt +++ b/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/internal/utils.kt @@ -6,41 +6,25 @@ package space.kscience.kmath.tensors.core.internal import space.kscience.kmath.nd.as1D +import space.kscience.kmath.operations.DoubleBufferOps.Companion.map import space.kscience.kmath.operations.toMutableList import space.kscience.kmath.samplers.GaussianSampler import space.kscience.kmath.stat.RandomGenerator -import space.kscience.kmath.structures.* +import space.kscience.kmath.structures.DoubleBuffer import space.kscience.kmath.tensors.core.BufferedTensor import space.kscience.kmath.tensors.core.DoubleTensor import kotlin.math.* -/** - * Returns a reference to [IntArray] containing all the elements of this [Buffer] or copy the data. - */ -internal fun Buffer.array(): IntArray = when (this) { - is IntBuffer -> array - else -> this.toIntArray() -} - -/** - * Returns a reference to [DoubleArray] containing all the elements of this [Buffer] or copy the data. - */ -@PublishedApi -internal fun Buffer.array(): DoubleArray = when (this) { - is DoubleBuffer -> array - else -> this.toDoubleArray() -} - -internal fun getRandomNormals(n: Int, seed: Long): DoubleArray { +internal fun getRandomNormals(n: Int, seed: Long): DoubleBuffer { val distribution = GaussianSampler(0.0, 1.0) val generator = RandomGenerator.default(seed) - return distribution.sample(generator).nextBufferBlocking(n).toDoubleArray() + return distribution.sample(generator).nextBufferBlocking(n) } -internal fun getRandomUnitVector(n: Int, seed: Long): DoubleArray { - val unnorm = getRandomNormals(n, seed) - val norm = sqrt(unnorm.sumOf { it * it }) - return unnorm.map { it / norm }.toDoubleArray() +internal fun getRandomUnitVector(n: Int, seed: Long): DoubleBuffer { + val unnorm: DoubleBuffer = getRandomNormals(n, seed) + val norm = sqrt(unnorm.array.sumOf { it * it }) + return unnorm.map { it / norm } } internal fun minusIndexFrom(n: Int, i: Int): Int = if (i >= 0) i else { @@ -71,6 +55,7 @@ internal fun format(value: Double, digits: Int = 4): String = buildString { append("e+") append(order) } + else -> { append('e') append(order) @@ -116,7 +101,7 @@ internal fun DoubleTensor.toPrettyString(): String = buildString { } offset += vectorSize - if (this@toPrettyString.numElements == offset) { + if (this@toPrettyString.linearSize == offset) { break } diff --git a/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/tensorCasts.kt b/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/tensorCasts.kt deleted file mode 100644 index 1c914d474..000000000 --- a/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/tensorCasts.kt +++ /dev/null @@ -1,46 +0,0 @@ -/* - * Copyright 2018-2022 KMath contributors. - * Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file. - */ - -package space.kscience.kmath.tensors.core - -import space.kscience.kmath.nd.StructureND -import space.kscience.kmath.tensors.api.Tensor -import space.kscience.kmath.tensors.core.internal.toBufferedTensor -import space.kscience.kmath.tensors.core.internal.toTensor - -/** - * Casts [Tensor] of [Double] to [DoubleTensor] - */ -public fun StructureND.asDoubleTensor(): DoubleTensor = when (this) { - is DoubleTensor -> this - else -> this.toBufferedTensor().toTensor() -} - -/** - * Casts [Tensor] of [Int] to [IntTensor] - */ -public fun StructureND.asIntTensor(): IntTensor = when (this) { - is IntTensor -> this - else -> this.toBufferedTensor().toTensor() -} - -/** - * Returns a copy-protected [DoubleArray] of tensor elements - */ -public fun DoubleTensor.copyArray(): DoubleArray { - //TODO use ArrayCopy - return DoubleArray(numElements) { i -> - mutableBuffer[bufferStart + i] - } -} - -/** - * Returns a copy-protected [IntArray] of tensor elements - */ -public fun IntTensor.copyArray(): IntArray { - return IntArray(numElements) { i -> - mutableBuffer[bufferStart + i] - } -} diff --git a/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/tensorTransform.kt b/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/tensorTransform.kt new file mode 100644 index 000000000..30a828d6a --- /dev/null +++ b/kmath-tensors/src/commonMain/kotlin/space/kscience/kmath/tensors/core/tensorTransform.kt @@ -0,0 +1,55 @@ +/* + * Copyright 2018-2022 KMath contributors. + * Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file. + */ + +package space.kscience.kmath.tensors.core + +import space.kscience.kmath.nd.StructureND +import space.kscience.kmath.structures.DoubleBuffer +import space.kscience.kmath.structures.asBuffer +import space.kscience.kmath.tensors.api.Tensor + + +public fun StructureND.copyToTensor(): DoubleTensor = if (this is DoubleTensor) { + DoubleTensor(shape, source.copy()) +} else { + DoubleTensor( + shape, + TensorLinearStructure(this.shape).map(this::get).toDoubleArray().asBuffer(), + ) +} + +public fun StructureND.toDoubleTensor(): DoubleTensor { + return if (this is IntTensor) { + DoubleTensor( + shape, + DoubleBuffer(linearSize) { source[it].toDouble() } + ) + } else { + val tensor = DoubleTensorAlgebra.zeroesLike(this) + indices.forEach { + tensor[it] = get(it).toDouble() + } + return tensor + } +} + +/** + * Casts [Tensor] of [Double] to [DoubleTensor] + */ +public fun StructureND.asDoubleTensor(): DoubleTensor = when (this) { + is DoubleTensor -> this + else -> copyToTensor() +} + +/** + * Casts [Tensor] of [Int] to [IntTensor] + */ +public fun StructureND.asIntTensor(): IntTensor = when (this) { + is IntTensor -> this + else -> IntTensor( + this.shape, + TensorLinearStructure(this.shape).map(this::get).toIntArray().asBuffer() + ) +} \ No newline at end of file diff --git a/kmath-tensors/src/commonTest/kotlin/space/kscience/kmath/tensors/core/TestBroadcasting.kt b/kmath-tensors/src/commonTest/kotlin/space/kscience/kmath/tensors/core/TestBroadcasting.kt index 67d5f9a96..6a99b9ba8 100644 --- a/kmath-tensors/src/commonTest/kotlin/space/kscience/kmath/tensors/core/TestBroadcasting.kt +++ b/kmath-tensors/src/commonTest/kotlin/space/kscience/kmath/tensors/core/TestBroadcasting.kt @@ -6,7 +6,10 @@ package space.kscience.kmath.tensors.core import space.kscience.kmath.operations.invoke -import space.kscience.kmath.tensors.core.internal.* +import space.kscience.kmath.tensors.core.internal.broadcastOuterTensors +import space.kscience.kmath.tensors.core.internal.broadcastShapes +import space.kscience.kmath.tensors.core.internal.broadcastTensors +import space.kscience.kmath.tensors.core.internal.broadcastTo import kotlin.test.Test import kotlin.test.assertTrue @@ -34,7 +37,7 @@ internal class TestBroadcasting { val res = broadcastTo(tensor2, tensor1.shape) assertTrue(res.shape contentEquals intArrayOf(2, 3)) - assertTrue(res.mutableBuffer.array() contentEquals doubleArrayOf(10.0, 20.0, 30.0, 10.0, 20.0, 30.0)) + assertTrue(res.source contentEquals doubleArrayOf(10.0, 20.0, 30.0, 10.0, 20.0, 30.0)) } @Test @@ -49,9 +52,9 @@ internal class TestBroadcasting { assertTrue(res[1].shape contentEquals intArrayOf(1, 2, 3)) assertTrue(res[2].shape contentEquals intArrayOf(1, 2, 3)) - assertTrue(res[0].mutableBuffer.array() contentEquals doubleArrayOf(1.0, 2.0, 3.0, 4.0, 5.0, 6.0)) - assertTrue(res[1].mutableBuffer.array() contentEquals doubleArrayOf(10.0, 20.0, 30.0, 10.0, 20.0, 30.0)) - assertTrue(res[2].mutableBuffer.array() contentEquals doubleArrayOf(500.0, 500.0, 500.0, 500.0, 500.0, 500.0)) + assertTrue(res[0].source contentEquals doubleArrayOf(1.0, 2.0, 3.0, 4.0, 5.0, 6.0)) + assertTrue(res[1].source contentEquals doubleArrayOf(10.0, 20.0, 30.0, 10.0, 20.0, 30.0)) + assertTrue(res[2].source contentEquals doubleArrayOf(500.0, 500.0, 500.0, 500.0, 500.0, 500.0)) } @Test @@ -66,15 +69,15 @@ internal class TestBroadcasting { assertTrue(res[1].shape contentEquals intArrayOf(1, 1, 3)) assertTrue(res[2].shape contentEquals intArrayOf(1, 1, 1)) - assertTrue(res[0].mutableBuffer.array() contentEquals doubleArrayOf(1.0, 2.0, 3.0, 4.0, 5.0, 6.0)) - assertTrue(res[1].mutableBuffer.array() contentEquals doubleArrayOf(10.0, 20.0, 30.0)) - assertTrue(res[2].mutableBuffer.array() contentEquals doubleArrayOf(500.0)) + assertTrue(res[0].source contentEquals doubleArrayOf(1.0, 2.0, 3.0, 4.0, 5.0, 6.0)) + assertTrue(res[1].source contentEquals doubleArrayOf(10.0, 20.0, 30.0)) + assertTrue(res[2].source contentEquals doubleArrayOf(500.0)) } @Test fun testBroadcastOuterTensorsShapes() = DoubleTensorAlgebra { - val tensor1 = fromArray(intArrayOf(2, 1, 3, 2, 3), DoubleArray(2 * 1 * 3 * 2 * 3) {0.0}) - val tensor2 = fromArray(intArrayOf(4, 2, 5, 1, 3, 3), DoubleArray(4 * 2 * 5 * 1 * 3 * 3) {0.0}) + val tensor1 = fromArray(intArrayOf(2, 1, 3, 2, 3), DoubleArray(2 * 1 * 3 * 2 * 3) { 0.0 }) + val tensor2 = fromArray(intArrayOf(4, 2, 5, 1, 3, 3), DoubleArray(4 * 2 * 5 * 1 * 3 * 3) { 0.0 }) val tensor3 = fromArray(intArrayOf(1, 1), doubleArrayOf(500.0)) val res = broadcastOuterTensors(tensor1, tensor2, tensor3) @@ -95,16 +98,16 @@ internal class TestBroadcasting { val tensor32 = tensor3 - tensor2 assertTrue(tensor21.shape contentEquals intArrayOf(2, 3)) - assertTrue(tensor21.mutableBuffer.array() contentEquals doubleArrayOf(9.0, 18.0, 27.0, 6.0, 15.0, 24.0)) + assertTrue(tensor21.source contentEquals doubleArrayOf(9.0, 18.0, 27.0, 6.0, 15.0, 24.0)) assertTrue(tensor31.shape contentEquals intArrayOf(1, 2, 3)) assertTrue( - tensor31.mutableBuffer.array() + tensor31.source contentEquals doubleArrayOf(499.0, 498.0, 497.0, 496.0, 495.0, 494.0) ) assertTrue(tensor32.shape contentEquals intArrayOf(1, 1, 3)) - assertTrue(tensor32.mutableBuffer.array() contentEquals doubleArrayOf(490.0, 480.0, 470.0)) + assertTrue(tensor32.source contentEquals doubleArrayOf(490.0, 480.0, 470.0)) } } diff --git a/kmath-tensors/src/commonTest/kotlin/space/kscience/kmath/tensors/core/TestDoubleAnalyticTensorAlgebra.kt b/kmath-tensors/src/commonTest/kotlin/space/kscience/kmath/tensors/core/TestDoubleAnalyticTensorAlgebra.kt index c777273f3..4bc2e3bdb 100644 --- a/kmath-tensors/src/commonTest/kotlin/space/kscience/kmath/tensors/core/TestDoubleAnalyticTensorAlgebra.kt +++ b/kmath-tensors/src/commonTest/kotlin/space/kscience/kmath/tensors/core/TestDoubleAnalyticTensorAlgebra.kt @@ -6,6 +6,7 @@ package space.kscience.kmath.tensors.core import space.kscience.kmath.operations.invoke +import space.kscience.kmath.structures.asBuffer import kotlin.math.* import kotlin.test.Test import kotlin.test.assertTrue @@ -20,14 +21,14 @@ internal class TestDoubleAnalyticTensorAlgebra { 3.23, 133.7, 25.3, 100.3, 11.0, 12.012 ) - val tensor = DoubleTensor(shape, buffer) + val tensor = DoubleTensor(shape, buffer.asBuffer()) fun DoubleArray.fmap(transform: (Double) -> Double): DoubleArray { return this.map(transform).toDoubleArray() } fun expectedTensor(transform: (Double) -> Double): DoubleTensor { - return DoubleTensor(shape, buffer.fmap(transform)) + return DoubleTensor(shape, buffer.fmap(transform).asBuffer()) } @Test @@ -106,58 +107,74 @@ internal class TestDoubleAnalyticTensorAlgebra { 1.0, 2.0, -3.0, 4.0 ) - val tensor2 = DoubleTensor(shape2, buffer2) + val tensor2 = DoubleTensor(shape2, buffer2.asBuffer()) @Test fun testMin() = DoubleTensorAlgebra { assertTrue { tensor2.min() == -3.0 } - assertTrue { tensor2.min(0, true) eq fromArray( - intArrayOf(1, 2), - doubleArrayOf(-3.0, 2.0) - )} - assertTrue { tensor2.min(1, false) eq fromArray( - intArrayOf(2), - doubleArrayOf(1.0, -3.0) - )} + assertTrue { + tensor2.min(0, true) eq fromArray( + intArrayOf(1, 2), + doubleArrayOf(-3.0, 2.0) + ) + } + assertTrue { + tensor2.min(1, false) eq fromArray( + intArrayOf(2), + doubleArrayOf(1.0, -3.0) + ) + } } @Test fun testMax() = DoubleTensorAlgebra { assertTrue { tensor2.max() == 4.0 } - assertTrue { tensor2.max(0, true) eq fromArray( - intArrayOf(1, 2), - doubleArrayOf(1.0, 4.0) - )} - assertTrue { tensor2.max(1, false) eq fromArray( - intArrayOf(2), - doubleArrayOf(2.0, 4.0) - )} + assertTrue { + tensor2.max(0, true) eq fromArray( + intArrayOf(1, 2), + doubleArrayOf(1.0, 4.0) + ) + } + assertTrue { + tensor2.max(1, false) eq fromArray( + intArrayOf(2), + doubleArrayOf(2.0, 4.0) + ) + } } @Test fun testSum() = DoubleTensorAlgebra { assertTrue { tensor2.sum() == 4.0 } - assertTrue { tensor2.sum(0, true) eq fromArray( - intArrayOf(1, 2), - doubleArrayOf(-2.0, 6.0) - )} - assertTrue { tensor2.sum(1, false) eq fromArray( - intArrayOf(2), - doubleArrayOf(3.0, 1.0) - )} + assertTrue { + tensor2.sum(0, true) eq fromArray( + intArrayOf(1, 2), + doubleArrayOf(-2.0, 6.0) + ) + } + assertTrue { + tensor2.sum(1, false) eq fromArray( + intArrayOf(2), + doubleArrayOf(3.0, 1.0) + ) + } } @Test fun testMean() = DoubleTensorAlgebra { assertTrue { tensor2.mean() == 1.0 } - assertTrue { tensor2.mean(0, true) eq fromArray( - intArrayOf(1, 2), - doubleArrayOf(-1.0, 3.0) - )} - assertTrue { tensor2.mean(1, false) eq fromArray( - intArrayOf(2), - doubleArrayOf(1.5, 0.5) - )} + assertTrue { + tensor2.mean(0, true) eq fromArray( + intArrayOf(1, 2), + doubleArrayOf(-1.0, 3.0) + ) + } + assertTrue { + tensor2.mean(1, false) eq fromArray( + intArrayOf(2), + doubleArrayOf(1.5, 0.5) + ) + } } } diff --git a/kmath-tensors/src/commonTest/kotlin/space/kscience/kmath/tensors/core/TestDoubleLinearOpsAlgebra.kt b/kmath-tensors/src/commonTest/kotlin/space/kscience/kmath/tensors/core/TestDoubleLinearOpsAlgebra.kt index 5b226bd5d..1c23cff18 100644 --- a/kmath-tensors/src/commonTest/kotlin/space/kscience/kmath/tensors/core/TestDoubleLinearOpsAlgebra.kt +++ b/kmath-tensors/src/commonTest/kotlin/space/kscience/kmath/tensors/core/TestDoubleLinearOpsAlgebra.kt @@ -6,7 +6,6 @@ package space.kscience.kmath.tensors.core import space.kscience.kmath.operations.invoke -import space.kscience.kmath.tensors.core.internal.array import space.kscience.kmath.tensors.core.internal.svd1d import kotlin.math.abs import kotlin.test.Test @@ -142,11 +141,11 @@ internal class TestDoubleLinearOpsTensorAlgebra { @Test fun testCholesky() = DoubleTensorAlgebra { val tensor = randomNormal(intArrayOf(2, 5, 5), 0) - val sigma = (tensor matmul tensor.transpose()) + diagonalEmbedding( + val sigma = (tensor matmul tensor.transposed()) + diagonalEmbedding( fromArray(intArrayOf(2, 5), DoubleArray(10) { 0.1 }) ) val low = sigma.cholesky() - val sigmChol = low matmul low.transpose() + val sigmChol = low matmul low.transposed() assertTrue(sigma.eq(sigmChol)) } @@ -157,12 +156,12 @@ internal class TestDoubleLinearOpsTensorAlgebra { val res = svd1d(tensor2) assertTrue(res.shape contentEquals intArrayOf(2)) - assertTrue { abs(abs(res.mutableBuffer.array()[res.bufferStart]) - 0.386) < 0.01 } - assertTrue { abs(abs(res.mutableBuffer.array()[res.bufferStart + 1]) - 0.922) < 0.01 } + assertTrue { abs(abs(res.source[0]) - 0.386) < 0.01 } + assertTrue { abs(abs(res.source[1]) - 0.922) < 0.01 } } @Test - fun testSVD() = DoubleTensorAlgebra{ + fun testSVD() = DoubleTensorAlgebra { testSVDFor(fromArray(intArrayOf(2, 3), doubleArrayOf(1.0, 2.0, 3.0, 4.0, 5.0, 6.0))) testSVDFor(fromArray(intArrayOf(2, 2), doubleArrayOf(-1.0, 0.0, 239.0, 238.0))) } @@ -171,16 +170,16 @@ internal class TestDoubleLinearOpsTensorAlgebra { fun testBatchedSVD() = DoubleTensorAlgebra { val tensor = randomNormal(intArrayOf(2, 5, 3), 0) val (tensorU, tensorS, tensorV) = tensor.svd() - val tensorSVD = tensorU matmul (diagonalEmbedding(tensorS) matmul tensorV.transpose()) + val tensorSVD = tensorU matmul (diagonalEmbedding(tensorS) matmul tensorV.transposed()) assertTrue(tensor.eq(tensorSVD)) } @Test fun testBatchedSymEig() = DoubleTensorAlgebra { val tensor = randomNormal(shape = intArrayOf(2, 3, 3), 0) - val tensorSigma = tensor + tensor.transpose() + val tensorSigma = tensor + tensor.transposed() val (tensorS, tensorV) = tensorSigma.symEig() - val tensorSigmaCalc = tensorV matmul (diagonalEmbedding(tensorS) matmul tensorV.transpose()) + val tensorSigmaCalc = tensorV matmul (diagonalEmbedding(tensorS) matmul tensorV.transposed()) assertTrue(tensorSigma.eq(tensorSigmaCalc)) } @@ -194,7 +193,7 @@ private fun DoubleTensorAlgebra.testSVDFor(tensor: DoubleTensor, epsilon: Double val tensorSVD = svd.first .dot( diagonalEmbedding(svd.second) - .dot(svd.third.transpose()) + .dot(svd.third.transposed()) ) assertTrue(tensor.eq(tensorSVD, epsilon)) diff --git a/kmath-tensors/src/commonTest/kotlin/space/kscience/kmath/tensors/core/TestDoubleTensor.kt b/kmath-tensors/src/commonTest/kotlin/space/kscience/kmath/tensors/core/TestDoubleTensor.kt index efdf6ba81..a36f32ac9 100644 --- a/kmath-tensors/src/commonTest/kotlin/space/kscience/kmath/tensors/core/TestDoubleTensor.kt +++ b/kmath-tensors/src/commonTest/kotlin/space/kscience/kmath/tensors/core/TestDoubleTensor.kt @@ -13,10 +13,7 @@ import space.kscience.kmath.nd.as2D import space.kscience.kmath.operations.invoke import space.kscience.kmath.structures.DoubleBuffer import space.kscience.kmath.structures.toDoubleArray -import space.kscience.kmath.tensors.core.internal.array import space.kscience.kmath.tensors.core.internal.matrixSequence -import space.kscience.kmath.tensors.core.internal.toBufferedTensor -import space.kscience.kmath.tensors.core.internal.toTensor import kotlin.test.Test import kotlin.test.assertEquals import kotlin.test.assertTrue @@ -37,7 +34,7 @@ internal class TestDoubleTensor { assertEquals(tensor[intArrayOf(0, 1)], 5.8) assertTrue( tensor.elements().map { it.second }.toList() - .toDoubleArray() contentEquals tensor.mutableBuffer.toDoubleArray() + .toDoubleArray() contentEquals tensor.source.toDoubleArray() ) } @@ -57,9 +54,9 @@ internal class TestDoubleTensor { assertEquals(tensor[intArrayOf(0, 1, 0)], 109.56) tensor.matrixSequence().forEach { - val a = it.toTensor() + val a = it.asDoubleTensor() val secondRow = a.getTensor(1).as1D() - val secondColumn = a.transpose(0, 1).getTensor(1).as1D() + val secondColumn = a.transposed(0, 1).getTensor(1).as1D() assertEquals(secondColumn[0], 77.89) assertEquals(secondRow[1], secondColumn[1]) } @@ -72,16 +69,16 @@ internal class TestDoubleTensor { val doubleArray = DoubleBuffer(doubleArrayOf(1.0, 2.0, 3.0)) // create ND buffers, no data is copied - val ndArray = MutableBufferND(DefaultStrides(intArrayOf(3)), doubleArray) + val ndArray: MutableBufferND = MutableBufferND(DefaultStrides(intArrayOf(3)), doubleArray) // map to tensors - val bufferedTensorArray = ndArray.toBufferedTensor() // strides are flipped so data copied - val tensorArray = bufferedTensorArray.toTensor() // data not contiguous so copied again + val bufferedTensorArray = ndArray.asDoubleTensor() // strides are flipped so data copied + val tensorArray = bufferedTensorArray.asDoubleTensor() // data not contiguous so copied again val tensorArrayPublic = ndArray.asDoubleTensor() // public API, data copied twice val sharedTensorArray = tensorArrayPublic.asDoubleTensor() // no data copied by matching type - assertTrue(tensorArray.mutableBuffer.array() contentEquals sharedTensorArray.mutableBuffer.array()) + assertTrue(tensorArray.source contentEquals sharedTensorArray.source) tensorArray[intArrayOf(0)] = 55.9 assertEquals(tensorArrayPublic[intArrayOf(0)], 1.0) diff --git a/kmath-tensors/src/commonTest/kotlin/space/kscience/kmath/tensors/core/TestDoubleTensorAlgebra.kt b/kmath-tensors/src/commonTest/kotlin/space/kscience/kmath/tensors/core/TestDoubleTensorAlgebra.kt index 28c327973..79b199471 100644 --- a/kmath-tensors/src/commonTest/kotlin/space/kscience/kmath/tensors/core/TestDoubleTensorAlgebra.kt +++ b/kmath-tensors/src/commonTest/kotlin/space/kscience/kmath/tensors/core/TestDoubleTensorAlgebra.kt @@ -6,9 +6,10 @@ package space.kscience.kmath.tensors.core +import space.kscience.kmath.nd.get import space.kscience.kmath.operations.invoke -import space.kscience.kmath.tensors.core.internal.array import kotlin.test.Test +import kotlin.test.assertEquals import kotlin.test.assertFalse import kotlin.test.assertTrue @@ -18,55 +19,55 @@ internal class TestDoubleTensorAlgebra { fun testDoublePlus() = DoubleTensorAlgebra { val tensor = fromArray(intArrayOf(2), doubleArrayOf(1.0, 2.0)) val res = 10.0 + tensor - assertTrue(res.mutableBuffer.array() contentEquals doubleArrayOf(11.0, 12.0)) + assertTrue(res.source contentEquals doubleArrayOf(11.0, 12.0)) } @Test fun testDoubleDiv() = DoubleTensorAlgebra { val tensor = fromArray(intArrayOf(2), doubleArrayOf(2.0, 4.0)) - val res = 2.0/tensor - assertTrue(res.mutableBuffer.array() contentEquals doubleArrayOf(1.0, 0.5)) + val res = 2.0 / tensor + assertTrue(res.source contentEquals doubleArrayOf(1.0, 0.5)) } @Test fun testDivDouble() = DoubleTensorAlgebra { val tensor = fromArray(intArrayOf(2), doubleArrayOf(10.0, 5.0)) val res = tensor / 2.5 - assertTrue(res.mutableBuffer.array() contentEquals doubleArrayOf(4.0, 2.0)) + assertTrue(res.source contentEquals doubleArrayOf(4.0, 2.0)) } @Test fun testTranspose1x1() = DoubleTensorAlgebra { val tensor = fromArray(intArrayOf(1), doubleArrayOf(0.0)) - val res = tensor.transpose(0, 0) + val res = tensor.transposed(0, 0) - assertTrue(res.mutableBuffer.array() contentEquals doubleArrayOf(0.0)) + assertTrue(res.source contentEquals doubleArrayOf(0.0)) assertTrue(res.shape contentEquals intArrayOf(1)) } @Test fun testTranspose3x2() = DoubleTensorAlgebra { val tensor = fromArray(intArrayOf(3, 2), doubleArrayOf(1.0, 2.0, 3.0, 4.0, 5.0, 6.0)) - val res = tensor.transpose(1, 0) + val res = tensor.transposed(1, 0) - assertTrue(res.mutableBuffer.array() contentEquals doubleArrayOf(1.0, 3.0, 5.0, 2.0, 4.0, 6.0)) + assertTrue(res.source contentEquals doubleArrayOf(1.0, 3.0, 5.0, 2.0, 4.0, 6.0)) assertTrue(res.shape contentEquals intArrayOf(2, 3)) } @Test fun testTranspose1x2x3() = DoubleTensorAlgebra { val tensor = fromArray(intArrayOf(1, 2, 3), doubleArrayOf(1.0, 2.0, 3.0, 4.0, 5.0, 6.0)) - val res01 = tensor.transpose(0, 1) - val res02 = tensor.transpose(-3, 2) - val res12 = tensor.transpose() + val res01 = tensor.transposed(0, 1) + val res02 = tensor.transposed(-3, 2) + val res12 = tensor.transposed() assertTrue(res01.shape contentEquals intArrayOf(2, 1, 3)) assertTrue(res02.shape contentEquals intArrayOf(3, 2, 1)) assertTrue(res12.shape contentEquals intArrayOf(1, 3, 2)) - assertTrue(res01.mutableBuffer.array() contentEquals doubleArrayOf(1.0, 2.0, 3.0, 4.0, 5.0, 6.0)) - assertTrue(res02.mutableBuffer.array() contentEquals doubleArrayOf(1.0, 4.0, 2.0, 5.0, 3.0, 6.0)) - assertTrue(res12.mutableBuffer.array() contentEquals doubleArrayOf(1.0, 4.0, 2.0, 5.0, 3.0, 6.0)) + assertTrue(res01.source contentEquals doubleArrayOf(1.0, 2.0, 3.0, 4.0, 5.0, 6.0)) + assertTrue(res02.source contentEquals doubleArrayOf(1.0, 4.0, 2.0, 5.0, 3.0, 6.0)) + assertTrue(res12.source contentEquals doubleArrayOf(1.0, 4.0, 2.0, 5.0, 3.0, 6.0)) } @Test @@ -97,8 +98,8 @@ internal class TestDoubleTensorAlgebra { assignResult += tensorC assignResult += -39.4 - assertTrue(expected.mutableBuffer.array() contentEquals result.mutableBuffer.array()) - assertTrue(expected.mutableBuffer.array() contentEquals assignResult.mutableBuffer.array()) + assertTrue(expected.source contentEquals result.source) + assertTrue(expected.source contentEquals assignResult.source) } @Test @@ -111,26 +112,28 @@ internal class TestDoubleTensorAlgebra { val tensor5 = fromArray(intArrayOf(2, 3, 3), (1..18).map { 1 + it.toDouble() }.toDoubleArray()) val res12 = tensor1.dot(tensor2) - assertTrue(res12.mutableBuffer.array() contentEquals doubleArrayOf(140.0, 320.0)) + assertTrue(res12.source contentEquals doubleArrayOf(140.0, 320.0)) assertTrue(res12.shape contentEquals intArrayOf(2)) val res32 = tensor3.matmul(tensor2) - assertTrue(res32.mutableBuffer.array() contentEquals doubleArrayOf(-140.0)) + assertTrue(res32.source contentEquals doubleArrayOf(-140.0)) assertTrue(res32.shape contentEquals intArrayOf(1, 1)) val res22 = tensor2.dot(tensor2) - assertTrue(res22.mutableBuffer.array() contentEquals doubleArrayOf(1400.0)) + assertTrue(res22.source contentEquals doubleArrayOf(1400.0)) assertTrue(res22.shape contentEquals intArrayOf(1)) val res11 = tensor1.dot(tensor11) - assertTrue(res11.mutableBuffer.array() contentEquals doubleArrayOf(22.0, 28.0, 49.0, 64.0)) + assertTrue(res11.source contentEquals doubleArrayOf(22.0, 28.0, 49.0, 64.0)) assertTrue(res11.shape contentEquals intArrayOf(2, 2)) val res45 = tensor4.matmul(tensor5) - assertTrue(res45.mutableBuffer.array() contentEquals doubleArrayOf( - 36.0, 42.0, 48.0, 81.0, 96.0, 111.0, 126.0, 150.0, 174.0, - 468.0, 501.0, 534.0, 594.0, 636.0, 678.0, 720.0, 771.0, 822.0 - )) + assertTrue( + res45.source contentEquals doubleArrayOf( + 36.0, 42.0, 48.0, 81.0, 96.0, 111.0, 126.0, 150.0, 174.0, + 468.0, 501.0, 534.0, 594.0, 636.0, 678.0, 720.0, 771.0, 822.0 + ) + ) assertTrue(res45.shape contentEquals intArrayOf(2, 3, 3)) } @@ -140,31 +143,44 @@ internal class TestDoubleTensorAlgebra { val tensor2 = fromArray(intArrayOf(2, 3), doubleArrayOf(1.0, 2.0, 3.0, 4.0, 5.0, 6.0)) val tensor3 = zeros(intArrayOf(2, 3, 4, 5)) - assertTrue(diagonalEmbedding(tensor3, 0, 3, 4).shape contentEquals - intArrayOf(2, 3, 4, 5, 5)) - assertTrue(diagonalEmbedding(tensor3, 1, 3, 4).shape contentEquals - intArrayOf(2, 3, 4, 6, 6)) - assertTrue(diagonalEmbedding(tensor3, 2, 0, 3).shape contentEquals - intArrayOf(7, 2, 3, 7, 4)) + assertTrue( + diagonalEmbedding(tensor3, 0, 3, 4).shape contentEquals + intArrayOf(2, 3, 4, 5, 5) + ) + assertTrue( + diagonalEmbedding(tensor3, 1, 3, 4).shape contentEquals + intArrayOf(2, 3, 4, 6, 6) + ) + assertTrue( + diagonalEmbedding(tensor3, 2, 0, 3).shape contentEquals + intArrayOf(7, 2, 3, 7, 4) + ) val diagonal1 = diagonalEmbedding(tensor1, 0, 1, 0) assertTrue(diagonal1.shape contentEquals intArrayOf(3, 3)) - assertTrue(diagonal1.mutableBuffer.array() contentEquals - doubleArrayOf(10.0, 0.0, 0.0, 0.0, 20.0, 0.0, 0.0, 0.0, 30.0)) + assertTrue( + diagonal1.source contentEquals + doubleArrayOf(10.0, 0.0, 0.0, 0.0, 20.0, 0.0, 0.0, 0.0, 30.0) + ) val diagonal1Offset = diagonalEmbedding(tensor1, 1, 1, 0) assertTrue(diagonal1Offset.shape contentEquals intArrayOf(4, 4)) - assertTrue(diagonal1Offset.mutableBuffer.array() contentEquals - doubleArrayOf(0.0, 0.0, 0.0, 0.0, 10.0, 0.0, 0.0, 0.0, 0.0, 20.0, 0.0, 0.0, 0.0, 0.0, 30.0, 0.0)) + assertTrue( + diagonal1Offset.source contentEquals + doubleArrayOf(0.0, 0.0, 0.0, 0.0, 10.0, 0.0, 0.0, 0.0, 0.0, 20.0, 0.0, 0.0, 0.0, 0.0, 30.0, 0.0) + ) val diagonal2 = diagonalEmbedding(tensor2, 1, 0, 2) assertTrue(diagonal2.shape contentEquals intArrayOf(4, 2, 4)) - assertTrue(diagonal2.mutableBuffer.array() contentEquals - doubleArrayOf( - 0.0, 1.0, 0.0, 0.0, 0.0, 4.0, 0.0, 0.0, - 0.0, 0.0, 2.0, 0.0, 0.0, 0.0, 5.0, 0.0, - 0.0, 0.0, 0.0, 3.0, 0.0, 0.0, 0.0, 6.0, - 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0)) + assertTrue( + diagonal2.source contentEquals + doubleArrayOf( + 0.0, 1.0, 0.0, 0.0, 0.0, 4.0, 0.0, 0.0, + 0.0, 0.0, 2.0, 0.0, 0.0, 0.0, 5.0, 0.0, + 0.0, 0.0, 0.0, 3.0, 0.0, 0.0, 0.0, 6.0, + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 + ) + ) } @Test @@ -178,4 +194,14 @@ internal class TestDoubleTensorAlgebra { assertFalse(tensor1.eq(tensor3)) } + + @Test + fun testMap() = DoubleTensorAlgebra { + val tensor = one(5, 5, 5) + val l = tensor.getTensor(0).map { it + 1.0 } + val r = tensor.getTensor(1).map { it - 1.0 } + val res = l + r + assertTrue { intArrayOf(5, 5) contentEquals res.shape } + assertEquals(1.0, res[4, 4]) + } } diff --git a/kmath-tensors/src/commonTest/kotlin/space/kscience/kmath/tensors/core/offsetBufferEquality.kt b/kmath-tensors/src/commonTest/kotlin/space/kscience/kmath/tensors/core/offsetBufferEquality.kt new file mode 100644 index 000000000..e9fc7fb9c --- /dev/null +++ b/kmath-tensors/src/commonTest/kotlin/space/kscience/kmath/tensors/core/offsetBufferEquality.kt @@ -0,0 +1,24 @@ +/* + * Copyright 2018-2022 KMath contributors. + * Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file. + */ + +package space.kscience.kmath.tensors.core + +import space.kscience.kmath.structures.Buffer +import space.kscience.kmath.structures.DoubleBuffer +import space.kscience.kmath.structures.indices +import kotlin.jvm.JvmName + + +/** + * Simplified [DoubleBuffer] to array comparison + */ +public fun OffsetDoubleBuffer.contentEquals(vararg doubles: Double): Boolean = indices.all { get(it) == doubles[it] } + +@JvmName("contentEqualsArray") +public infix fun OffsetDoubleBuffer.contentEquals(otherArray: DoubleArray): Boolean = contentEquals(*otherArray) + +@JvmName("contentEqualsBuffer") +public infix fun OffsetDoubleBuffer.contentEquals(otherBuffer: Buffer): Boolean = + indices.all { get(it) == otherBuffer[it] } \ No newline at end of file diff --git a/test-utils/src/commonMain/kotlin/bufferEquality.kt b/test-utils/src/commonMain/kotlin/bufferEquality.kt new file mode 100644 index 000000000..9e4d9ec22 --- /dev/null +++ b/test-utils/src/commonMain/kotlin/bufferEquality.kt @@ -0,0 +1,20 @@ +/* + * Copyright 2018-2022 KMath contributors. + * Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file. + */ + +package space.kscience.kmath.testutils + +import space.kscience.kmath.structures.DoubleBuffer +import kotlin.jvm.JvmName + +/** + * Simplified [DoubleBuffer] to array comparison + */ +public fun DoubleBuffer.contentEquals(vararg doubles: Double): Boolean = array.contentEquals(doubles) + +@JvmName("contentEqualsArray") +public infix fun DoubleBuffer.contentEquals(otherArray: DoubleArray): Boolean = array.contentEquals(otherArray) + +@JvmName("contentEqualsBuffer") +public infix fun DoubleBuffer.contentEquals(otherBuffer: DoubleBuffer): Boolean = array.contentEquals(otherBuffer.array) \ No newline at end of file