forked from kscience/kmath
API cleanup
This commit is contained in:
parent
64781a6785
commit
01bbb4bb13
@ -19,7 +19,7 @@ allprojects {
|
||||
}
|
||||
|
||||
group = "space.kscience"
|
||||
version = "0.3.0-dev-14"
|
||||
version = "0.3.0-dev-15"
|
||||
}
|
||||
|
||||
subprojects {
|
||||
|
@ -5,19 +5,19 @@
|
||||
|
||||
package space.kscience.kmath.tensors
|
||||
|
||||
import space.kscience.kmath.operations.invoke
|
||||
import space.kscience.kmath.tensors.core.BroadcastDoubleTensorAlgebra
|
||||
import space.kscience.kmath.tensors.core.tensorAlgebra
|
||||
import space.kscience.kmath.tensors.core.withBroadcast
|
||||
|
||||
|
||||
// simple PCA
|
||||
|
||||
fun main(): Unit = BroadcastDoubleTensorAlgebra { // work in context with broadcast methods
|
||||
fun main(): Unit = Double.tensorAlgebra.withBroadcast { // work in context with broadcast methods
|
||||
val seed = 100500L
|
||||
|
||||
// assume x is range from 0 until 10
|
||||
val x = fromArray(
|
||||
intArrayOf(10),
|
||||
(0 until 10).toList().map { it.toDouble() }.toDoubleArray()
|
||||
DoubleArray(10) { it.toDouble() }
|
||||
)
|
||||
|
||||
// take y dependent on x with noise
|
||||
@ -62,7 +62,7 @@ fun main(): Unit = BroadcastDoubleTensorAlgebra { // work in context with broad
|
||||
println("Eigenvector:\n$v")
|
||||
|
||||
// reduce dimension of dataset
|
||||
val datasetReduced = v dot stack(listOf(xScaled, yScaled))
|
||||
val datasetReduced = v dot stack(listOf(xScaled, yScaled))
|
||||
println("Reduced data:\n$datasetReduced")
|
||||
|
||||
// we can restore original data from reduced data;
|
||||
|
@ -5,13 +5,13 @@
|
||||
|
||||
package space.kscience.kmath.tensors
|
||||
|
||||
import space.kscience.kmath.operations.invoke
|
||||
import space.kscience.kmath.tensors.core.BroadcastDoubleTensorAlgebra
|
||||
import space.kscience.kmath.tensors.core.tensorAlgebra
|
||||
import space.kscience.kmath.tensors.core.withBroadcast
|
||||
|
||||
|
||||
// Dataset normalization
|
||||
|
||||
fun main() = BroadcastDoubleTensorAlgebra { // work in context with broadcast methods
|
||||
fun main() = Double.tensorAlgebra.withBroadcast { // work in context with broadcast methods
|
||||
// take dataset of 5-element vectors from normal distribution
|
||||
val dataset = randomNormal(intArrayOf(100, 5)) * 1.5 // all elements from N(0, 1.5)
|
||||
|
@ -5,13 +5,13 @@
|
||||
|
||||
package space.kscience.kmath.tensors
|
||||
|
||||
import space.kscience.kmath.operations.invoke
|
||||
import space.kscience.kmath.tensors.core.BroadcastDoubleTensorAlgebra
|
||||
import space.kscience.kmath.tensors.core.DoubleTensor
|
||||
import space.kscience.kmath.tensors.core.tensorAlgebra
|
||||
import space.kscience.kmath.tensors.core.withBroadcast
|
||||
|
||||
// solving linear system with LUP decomposition
|
||||
|
||||
fun main() = BroadcastDoubleTensorAlgebra {// work in context with linear operations
|
||||
fun main() = Double.tensorAlgebra.withBroadcast {// work in context with linear operations
|
||||
|
||||
// set true value of x
|
||||
val trueX = fromArray(
|
@ -8,12 +8,14 @@ package space.kscience.kmath.linear
|
||||
import space.kscience.kmath.misc.PerformancePitfall
|
||||
import space.kscience.kmath.nd.BufferedRingND
|
||||
import space.kscience.kmath.nd.as2D
|
||||
import space.kscience.kmath.nd.asND
|
||||
import space.kscience.kmath.nd.ndAlgebra
|
||||
import space.kscience.kmath.nd.unwrap
|
||||
import space.kscience.kmath.operations.DoubleField
|
||||
import space.kscience.kmath.operations.Ring
|
||||
import space.kscience.kmath.operations.invoke
|
||||
import space.kscience.kmath.structures.*
|
||||
import space.kscience.kmath.structures.Buffer
|
||||
import space.kscience.kmath.structures.BufferFactory
|
||||
import space.kscience.kmath.structures.VirtualBuffer
|
||||
import space.kscience.kmath.structures.indices
|
||||
|
||||
|
||||
public class BufferedLinearSpace<T, out A : Ring<T>>(
|
||||
@ -33,17 +35,17 @@ public class BufferedLinearSpace<T, out A : Ring<T>>(
|
||||
bufferFactory(size) { elementAlgebra.initializer(it) }
|
||||
|
||||
override fun Matrix<T>.unaryMinus(): Matrix<T> = ndRing(rowNum, colNum).run {
|
||||
unwrap().map { -it }.as2D()
|
||||
asND().map { -it }.as2D()
|
||||
}
|
||||
|
||||
override fun Matrix<T>.plus(other: Matrix<T>): Matrix<T> = ndRing(rowNum, colNum).run {
|
||||
require(shape.contentEquals(other.shape)) { "Shape mismatch on Matrix::plus. Expected $shape but found ${other.shape}" }
|
||||
unwrap().plus(other.unwrap()).as2D()
|
||||
asND().plus(other.asND()).as2D()
|
||||
}
|
||||
|
||||
override fun Matrix<T>.minus(other: Matrix<T>): Matrix<T> = ndRing(rowNum, colNum).run {
|
||||
require(shape.contentEquals(other.shape)) { "Shape mismatch on Matrix::minus. Expected $shape but found ${other.shape}" }
|
||||
unwrap().minus(other.unwrap()).as2D()
|
||||
asND().minus(other.asND()).as2D()
|
||||
}
|
||||
|
||||
private fun Buffer<T>.linearize() = if (this is VirtualBuffer) {
|
||||
@ -87,13 +89,10 @@ public class BufferedLinearSpace<T, out A : Ring<T>>(
|
||||
}
|
||||
|
||||
override fun Matrix<T>.times(value: T): Matrix<T> = ndRing(rowNum, colNum).run {
|
||||
unwrap().map { it * value }.as2D()
|
||||
asND().map { it * value }.as2D()
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
public fun <T, A : Ring<T>> A.linearSpace(bufferFactory: BufferFactory<T>): BufferedLinearSpace<T, A> =
|
||||
BufferedLinearSpace(this, bufferFactory)
|
||||
|
||||
public val DoubleField.linearSpace: BufferedLinearSpace<Double, DoubleField>
|
||||
get() = BufferedLinearSpace(this, ::DoubleBuffer)
|
@ -0,0 +1,109 @@
|
||||
/*
|
||||
* Copyright 2018-2021 KMath contributors.
|
||||
* Use of this source code is governed by the Apache 2.0 license that can be found in the LICENSE file.
|
||||
*/
|
||||
|
||||
package space.kscience.kmath.linear
|
||||
|
||||
import space.kscience.kmath.misc.PerformancePitfall
|
||||
import space.kscience.kmath.nd.DoubleFieldND
|
||||
import space.kscience.kmath.nd.as2D
|
||||
import space.kscience.kmath.nd.asND
|
||||
import space.kscience.kmath.operations.DoubleField
|
||||
import space.kscience.kmath.structures.Buffer
|
||||
import space.kscience.kmath.structures.DoubleBuffer
|
||||
import space.kscience.kmath.structures.indices
|
||||
|
||||
public object DoubleLinearSpace : LinearSpace<Double, DoubleField> {
|
||||
|
||||
override val elementAlgebra: DoubleField get() = DoubleField
|
||||
|
||||
private fun ndRing(
|
||||
rows: Int,
|
||||
cols: Int,
|
||||
): DoubleFieldND = DoubleFieldND(intArrayOf(rows, cols))
|
||||
|
||||
override fun buildMatrix(
|
||||
rows: Int,
|
||||
columns: Int,
|
||||
initializer: DoubleField.(i: Int, j: Int) -> Double
|
||||
): Matrix<Double> = ndRing(rows, columns).produce { (i, j) -> DoubleField.initializer(i, j) }.as2D()
|
||||
|
||||
|
||||
override fun buildVector(size: Int, initializer: DoubleField.(Int) -> Double): DoubleBuffer =
|
||||
DoubleBuffer(size) { DoubleField.initializer(it) }
|
||||
|
||||
override fun Matrix<Double>.unaryMinus(): Matrix<Double> = ndRing(rowNum, colNum).run {
|
||||
asND().map { -it }.as2D()
|
||||
}
|
||||
|
||||
override fun Matrix<Double>.plus(other: Matrix<Double>): Matrix<Double> = ndRing(rowNum, colNum).run {
|
||||
require(shape.contentEquals(other.shape)) { "Shape mismatch on Matrix::plus. Expected $shape but found ${other.shape}" }
|
||||
asND().plus(other.asND()).as2D()
|
||||
}
|
||||
|
||||
override fun Matrix<Double>.minus(other: Matrix<Double>): Matrix<Double> = ndRing(rowNum, colNum).run {
|
||||
require(shape.contentEquals(other.shape)) { "Shape mismatch on Matrix::minus. Expected $shape but found ${other.shape}" }
|
||||
asND().minus(other.asND()).as2D()
|
||||
}
|
||||
|
||||
// Create a continuous in-memory representation of this vector for better memory layout handling
|
||||
private fun Buffer<Double>.linearize() = if (this is DoubleBuffer) {
|
||||
this
|
||||
} else {
|
||||
DoubleBuffer(size) { get(it) }
|
||||
}
|
||||
|
||||
@OptIn(PerformancePitfall::class)
|
||||
override fun Matrix<Double>.dot(other: Matrix<Double>): Matrix<Double> {
|
||||
require(colNum == other.rowNum) { "Matrix dot operation dimension mismatch: ($rowNum, $colNum) x (${other.rowNum}, ${other.colNum})" }
|
||||
val rows = this@dot.rows.map { it.linearize() }
|
||||
val columns = other.columns.map { it.linearize() }
|
||||
return buildMatrix(rowNum, other.colNum) { i, j ->
|
||||
val r = rows[i]
|
||||
val c = columns[j]
|
||||
var res = 0.0
|
||||
for (l in r.indices) {
|
||||
res += r[l] * c[l]
|
||||
}
|
||||
res
|
||||
}
|
||||
}
|
||||
|
||||
@OptIn(PerformancePitfall::class)
|
||||
override fun Matrix<Double>.dot(vector: Point<Double>): DoubleBuffer {
|
||||
require(colNum == vector.size) { "Matrix dot vector operation dimension mismatch: ($rowNum, $colNum) x (${vector.size})" }
|
||||
val rows = this@dot.rows.map { it.linearize() }
|
||||
return DoubleBuffer(rowNum) { i ->
|
||||
val r = rows[i]
|
||||
var res = 0.0
|
||||
for (j in r.indices) {
|
||||
res += r[j] * vector[j]
|
||||
}
|
||||
res
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
override fun Matrix<Double>.times(value: Double): Matrix<Double> = ndRing(rowNum, colNum).run {
|
||||
asND().map { it * value }.as2D()
|
||||
}
|
||||
|
||||
public override fun Point<Double>.plus(other: Point<Double>): DoubleBuffer = DoubleBuffer(size) {
|
||||
get(it) + other[it]
|
||||
}
|
||||
|
||||
public override fun Point<Double>.minus(other: Point<Double>): DoubleBuffer = DoubleBuffer(size) {
|
||||
get(it) - other[it]
|
||||
}
|
||||
|
||||
public override fun Point<Double>.times(value: Double): DoubleBuffer = DoubleBuffer(size) { i -> get(i) * value }
|
||||
|
||||
public operator fun Point<Double>.div(value: Double): DoubleBuffer = DoubleBuffer(size) { i -> get(i) / value }
|
||||
|
||||
public override fun Double.times(v: Point<Double>): DoubleBuffer = v * this
|
||||
|
||||
|
||||
}
|
||||
|
||||
public val DoubleField.linearSpace: DoubleLinearSpace get() = DoubleLinearSpace
|
@ -136,7 +136,7 @@ public fun <T> Buffer<T>.asND(): Structure1D<T> = Buffer1DWrapper(this)
|
||||
/**
|
||||
* Expose inner buffer of this [Structure1D] if possible
|
||||
*/
|
||||
internal fun <T : Any> Structure1D<T>.unwrap(): Buffer<T> = when {
|
||||
internal fun <T : Any> Structure1D<T>.asND(): Buffer<T> = when {
|
||||
this is Buffer1DWrapper<T> -> buffer
|
||||
this is Structure1DWrapper && structure is BufferND<T> -> structure.buffer
|
||||
else -> this
|
||||
|
@ -160,10 +160,10 @@ public fun <T> MutableStructureND<T>.as2D(): MutableStructure2D<T> = this as? Mu
|
||||
/**
|
||||
* Expose inner [StructureND] if possible
|
||||
*/
|
||||
internal fun <T> Structure2D<T>.unwrap(): StructureND<T> =
|
||||
internal fun <T> Structure2D<T>.asND(): StructureND<T> =
|
||||
if (this is Structure2DWrapper) structure
|
||||
else this
|
||||
|
||||
internal fun <T> MutableStructure2D<T>.unwrap(): MutableStructureND<T> =
|
||||
internal fun <T> MutableStructure2D<T>.asND(): MutableStructureND<T> =
|
||||
if (this is MutableStructure2DWrapper) structure else this
|
||||
|
||||
|
@ -532,7 +532,7 @@ public val BigInt.algebra: BigIntField get() = BigIntField
|
||||
public inline fun Buffer.Companion.bigInt(size: Int, initializer: (Int) -> BigInt): Buffer<BigInt> =
|
||||
boxing(size, initializer)
|
||||
|
||||
public inline fun BigInt.buffer(size: Int, initializer: (Int) -> BigInt): Buffer<BigInt> =
|
||||
public inline fun BigInt.Companion.buffer(size: Int, initializer: (Int) -> BigInt): Buffer<BigInt> =
|
||||
Buffer.boxing(size, initializer)
|
||||
|
||||
@Deprecated("Use BigInt::mutableBuffer")
|
||||
@ -543,4 +543,4 @@ public inline fun BigInt.mutableBuffer(size: Int, initializer: (Int) -> BigInt):
|
||||
Buffer.boxing(size, initializer)
|
||||
|
||||
public fun BigIntField.nd(vararg shape: Int): BufferedRingND<BigInt, BigIntField> =
|
||||
BufferedRingND(shape, BigIntField, Buffer.Companion::bigInt)
|
||||
BufferedRingND(shape, BigIntField, BigInt::buffer)
|
||||
|
@ -25,6 +25,12 @@ public value class DoubleBuffer(public val array: DoubleArray) : MutableBuffer<D
|
||||
override operator fun iterator(): DoubleIterator = array.iterator()
|
||||
|
||||
override fun copy(): DoubleBuffer = DoubleBuffer(array.copyOf())
|
||||
|
||||
override fun toString(): String = Buffer.toString(this)
|
||||
|
||||
public companion object{
|
||||
public fun zero(size: Int): DoubleBuffer = DoubleArray(size).asBuffer()
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -8,6 +8,7 @@ package space.kscience.kmath.linear
|
||||
import space.kscience.kmath.misc.PerformancePitfall
|
||||
import space.kscience.kmath.misc.UnstableKMathAPI
|
||||
import space.kscience.kmath.nd.StructureND
|
||||
import space.kscience.kmath.operations.algebra
|
||||
import kotlin.test.Test
|
||||
import kotlin.test.assertEquals
|
||||
import kotlin.test.assertTrue
|
||||
@ -21,39 +22,37 @@ fun <T : Any> assertMatrixEquals(expected: StructureND<T>, actual: StructureND<T
|
||||
class DoubleLUSolverTest {
|
||||
|
||||
@Test
|
||||
fun testInvertOne() {
|
||||
val matrix = LinearSpace.double.one(2, 2)
|
||||
val inverted = LinearSpace.double.lupSolver().inverse(matrix)
|
||||
fun testInvertOne() = Double.algebra.linearSpace.run{
|
||||
val matrix = one(2, 2)
|
||||
val inverted = lupSolver().inverse(matrix)
|
||||
assertMatrixEquals(matrix, inverted)
|
||||
}
|
||||
|
||||
@Test
|
||||
fun testDecomposition() {
|
||||
LinearSpace.double.run {
|
||||
val matrix = matrix(2, 2)(
|
||||
3.0, 1.0,
|
||||
2.0, 3.0
|
||||
)
|
||||
fun testDecomposition() = Double.algebra.linearSpace.run {
|
||||
val matrix = matrix(2, 2)(
|
||||
3.0, 1.0,
|
||||
2.0, 3.0
|
||||
)
|
||||
|
||||
val lup = lup(matrix)
|
||||
val lup = lup(matrix)
|
||||
|
||||
//Check determinant
|
||||
assertEquals(7.0, lup.determinant)
|
||||
//Check determinant
|
||||
assertEquals(7.0, lup.determinant)
|
||||
|
||||
assertMatrixEquals(lup.p dot matrix, lup.l dot lup.u)
|
||||
}
|
||||
assertMatrixEquals(lup.p dot matrix, lup.l dot lup.u)
|
||||
}
|
||||
|
||||
@Test
|
||||
fun testInvert() {
|
||||
val matrix = LinearSpace.double.matrix(2, 2)(
|
||||
fun testInvert() = Double.algebra.linearSpace.run{
|
||||
val matrix = matrix(2, 2)(
|
||||
3.0, 1.0,
|
||||
1.0, 3.0
|
||||
)
|
||||
|
||||
val inverted = LinearSpace.double.lupSolver().inverse(matrix)
|
||||
val inverted = lupSolver().inverse(matrix)
|
||||
|
||||
val expected = LinearSpace.double.matrix(2, 2)(
|
||||
val expected = matrix(2, 2)(
|
||||
0.375, -0.125,
|
||||
-0.125, 0.375
|
||||
)
|
||||
|
@ -9,6 +9,7 @@ import space.kscience.kmath.misc.PerformancePitfall
|
||||
import space.kscience.kmath.misc.UnstableKMathAPI
|
||||
import space.kscience.kmath.nd.StructureND
|
||||
import space.kscience.kmath.nd.as2D
|
||||
import space.kscience.kmath.operations.algebra
|
||||
import kotlin.test.Test
|
||||
import kotlin.test.assertEquals
|
||||
import kotlin.test.assertTrue
|
||||
@ -19,15 +20,15 @@ import kotlin.test.assertTrue
|
||||
class MatrixTest {
|
||||
|
||||
@Test
|
||||
fun testTranspose() {
|
||||
val matrix = LinearSpace.double.one(3, 3)
|
||||
fun testTranspose() = Double.algebra.linearSpace.run {
|
||||
val matrix = one(3, 3)
|
||||
val transposed = matrix.transpose()
|
||||
assertTrue { StructureND.contentEquals(matrix, transposed) }
|
||||
}
|
||||
|
||||
@Test
|
||||
fun testBuilder() {
|
||||
val matrix = LinearSpace.double.matrix(2, 3)(
|
||||
fun testBuilder() = Double.algebra.linearSpace.run {
|
||||
val matrix = matrix(2, 3)(
|
||||
1.0, 0.0, 0.0,
|
||||
0.0, 1.0, 2.0
|
||||
)
|
||||
@ -36,7 +37,7 @@ class MatrixTest {
|
||||
}
|
||||
|
||||
@Test
|
||||
fun testMatrixExtension() {
|
||||
fun testMatrixExtension() = Double.algebra.linearSpace.run {
|
||||
val transitionMatrix: Matrix<Double> = VirtualMatrix(6, 6) { row, col ->
|
||||
when {
|
||||
col == 0 -> .50
|
||||
@ -49,7 +50,7 @@ class MatrixTest {
|
||||
infix fun Matrix<Double>.pow(power: Int): Matrix<Double> {
|
||||
var res = this
|
||||
repeat(power - 1) {
|
||||
res = LinearSpace.double.run { res dot this@pow }
|
||||
res = res dot this@pow
|
||||
}
|
||||
return res
|
||||
}
|
||||
@ -58,19 +59,18 @@ class MatrixTest {
|
||||
}
|
||||
|
||||
@Test
|
||||
fun test2DDot() {
|
||||
fun test2DDot() = Double.algebra.linearSpace.run {
|
||||
val firstMatrix = StructureND.auto(2, 3) { (i, j) -> (i + j).toDouble() }.as2D()
|
||||
val secondMatrix = StructureND.auto(3, 2) { (i, j) -> (i + j).toDouble() }.as2D()
|
||||
|
||||
LinearSpace.double.run {
|
||||
// val firstMatrix = produce(2, 3) { i, j -> (i + j).toDouble() }
|
||||
// val secondMatrix = produce(3, 2) { i, j -> (i + j).toDouble() }
|
||||
val result = firstMatrix dot secondMatrix
|
||||
assertEquals(2, result.rowNum)
|
||||
assertEquals(2, result.colNum)
|
||||
assertEquals(8.0, result[0, 1])
|
||||
assertEquals(8.0, result[1, 0])
|
||||
assertEquals(14.0, result[1, 1])
|
||||
}
|
||||
val result = firstMatrix dot secondMatrix
|
||||
assertEquals(2, result.rowNum)
|
||||
assertEquals(2, result.colNum)
|
||||
assertEquals(8.0, result[0, 1])
|
||||
assertEquals(8.0, result[1, 0])
|
||||
assertEquals(14.0, result[1, 1])
|
||||
|
||||
}
|
||||
}
|
||||
|
@ -5,7 +5,7 @@
|
||||
|
||||
package space.kscience.kmath.structures
|
||||
|
||||
import space.kscience.kmath.linear.LinearSpace
|
||||
import space.kscience.kmath.linear.linearSpace
|
||||
import space.kscience.kmath.misc.PerformancePitfall
|
||||
import space.kscience.kmath.nd.StructureND
|
||||
import space.kscience.kmath.nd.combine
|
||||
@ -13,6 +13,7 @@ import space.kscience.kmath.nd.get
|
||||
import space.kscience.kmath.nd.ndAlgebra
|
||||
import space.kscience.kmath.operations.DoubleField
|
||||
import space.kscience.kmath.operations.Norm
|
||||
import space.kscience.kmath.operations.algebra
|
||||
import space.kscience.kmath.operations.invoke
|
||||
import kotlin.math.abs
|
||||
import kotlin.math.pow
|
||||
@ -42,17 +43,18 @@ class NumberNDFieldTest {
|
||||
}
|
||||
|
||||
@Test
|
||||
fun testGeneration() {
|
||||
fun testGeneration() = Double.algebra.linearSpace.run {
|
||||
|
||||
val array = LinearSpace.double.buildMatrix(3, 3) { i, j ->
|
||||
val array = buildMatrix(3, 3) { i, j ->
|
||||
(i * 10 + j).toDouble()
|
||||
}
|
||||
|
||||
for (i in 0..2)
|
||||
for (i in 0..2) {
|
||||
for (j in 0..2) {
|
||||
val expected = (i * 10 + j).toDouble()
|
||||
assertEquals(expected, array[i, j], "Error at index [$i, $j]")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
|
@ -5,13 +5,11 @@
|
||||
|
||||
package space.kscience.kmath.dimensions
|
||||
|
||||
import space.kscience.kmath.linear.LinearSpace
|
||||
import space.kscience.kmath.linear.Matrix
|
||||
import space.kscience.kmath.linear.Point
|
||||
import space.kscience.kmath.linear.transpose
|
||||
import space.kscience.kmath.linear.*
|
||||
import space.kscience.kmath.nd.Structure2D
|
||||
import space.kscience.kmath.operations.DoubleField
|
||||
import space.kscience.kmath.operations.Ring
|
||||
import space.kscience.kmath.operations.algebra
|
||||
import kotlin.jvm.JvmInline
|
||||
|
||||
/**
|
||||
@ -151,7 +149,7 @@ public value class DMatrixContext<T : Any, out A : Ring<T>>(public val context:
|
||||
context.run { (this@transpose as Matrix<T>).transpose() }.coerce()
|
||||
|
||||
public companion object {
|
||||
public val real: DMatrixContext<Double, DoubleField> = DMatrixContext(LinearSpace.double)
|
||||
public val real: DMatrixContext<Double, DoubleField> = DMatrixContext(Double.algebra.linearSpace)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -5,12 +5,12 @@
|
||||
|
||||
package space.kscience.kmath.real
|
||||
|
||||
import space.kscience.kmath.linear.LinearSpace
|
||||
import space.kscience.kmath.linear.linearSpace
|
||||
import space.kscience.kmath.linear.matrix
|
||||
import space.kscience.kmath.misc.PerformancePitfall
|
||||
import space.kscience.kmath.misc.UnstableKMathAPI
|
||||
import space.kscience.kmath.nd.StructureND
|
||||
import space.kscience.kmath.real.*
|
||||
import space.kscience.kmath.operations.algebra
|
||||
import space.kscience.kmath.structures.contentEquals
|
||||
import kotlin.test.Test
|
||||
import kotlin.test.assertEquals
|
||||
@ -59,13 +59,13 @@ internal class DoubleMatrixTest {
|
||||
}
|
||||
|
||||
@Test
|
||||
fun testMatrixAndDouble() {
|
||||
fun testMatrixAndDouble() = Double.algebra.linearSpace.run {
|
||||
val matrix1 = realMatrix(2, 3)(
|
||||
1.0, 0.0, 3.0,
|
||||
4.0, 6.0, 2.0
|
||||
)
|
||||
val matrix2 = (matrix1 * 2.5 + 1.0 - 2.0) / 2.0
|
||||
val expectedResult = LinearSpace.double.matrix(2, 3)(
|
||||
val expectedResult = matrix(2, 3)(
|
||||
0.75, -0.5, 3.25,
|
||||
4.5, 7.0, 2.0
|
||||
)
|
||||
@ -159,8 +159,8 @@ internal class DoubleMatrixTest {
|
||||
}
|
||||
|
||||
@Test
|
||||
fun testAllElementOperations() {
|
||||
val matrix1 = LinearSpace.double.matrix(2, 4)(
|
||||
fun testAllElementOperations() = Double.algebra.linearSpace.run {
|
||||
val matrix1 = matrix(2, 4)(
|
||||
-1.0, 0.0, 3.0, 15.0,
|
||||
4.0, -6.0, 7.0, -11.0
|
||||
)
|
||||
|
@ -5,10 +5,10 @@
|
||||
|
||||
package space.kscience.kmath.real
|
||||
|
||||
import space.kscience.kmath.linear.LinearSpace
|
||||
import space.kscience.kmath.linear.asMatrix
|
||||
import space.kscience.kmath.linear.linearSpace
|
||||
import space.kscience.kmath.linear.transpose
|
||||
import space.kscience.kmath.real.plus
|
||||
import space.kscience.kmath.operations.algebra
|
||||
import space.kscience.kmath.structures.DoubleBuffer
|
||||
import kotlin.test.Test
|
||||
import kotlin.test.assertEquals
|
||||
@ -30,12 +30,12 @@ internal class DoubleVectorTest {
|
||||
}
|
||||
|
||||
@Test
|
||||
fun testDot() {
|
||||
fun testDot() = Double.algebra.linearSpace.run {
|
||||
val vector1 = DoubleBuffer(5) { it.toDouble() }
|
||||
val vector2 = DoubleBuffer(5) { 5 - it.toDouble() }
|
||||
val matrix1 = vector1.asMatrix()
|
||||
val matrix2 = vector2.asMatrix().transpose()
|
||||
val product = LinearSpace.double.run { matrix1 dot matrix2 }
|
||||
val product = matrix1 dot matrix2
|
||||
assertEquals(5.0, product[1, 0])
|
||||
assertEquals(6.0, product[2, 2])
|
||||
}
|
||||
|
@ -13,6 +13,7 @@ import space.kscience.kmath.linear.*
|
||||
import space.kscience.kmath.misc.UnstableKMathAPI
|
||||
import space.kscience.kmath.misc.log
|
||||
import space.kscience.kmath.operations.DoubleField
|
||||
import space.kscience.kmath.operations.algebra
|
||||
import space.kscience.kmath.structures.DoubleBuffer
|
||||
import space.kscience.kmath.structures.DoubleL2Norm
|
||||
|
||||
@ -32,7 +33,7 @@ public class QowRuns(public val runs: Int) : OptimizationFeature {
|
||||
@UnstableKMathAPI
|
||||
public object QowOptimizer : Optimizer<Double, XYFit> {
|
||||
|
||||
private val linearSpace: LinearSpace<Double, DoubleField> = LinearSpace.double
|
||||
private val linearSpace: LinearSpace<Double, DoubleField> = Double.algebra.linearSpace
|
||||
private val solver: LinearSolver<Double> = linearSpace.lupSolver()
|
||||
|
||||
@OptIn(UnstableKMathAPI::class)
|
||||
|
@ -5,6 +5,7 @@
|
||||
|
||||
package space.kscience.kmath.tensors.core
|
||||
|
||||
import space.kscience.kmath.misc.UnstableKMathAPI
|
||||
import space.kscience.kmath.tensors.api.Tensor
|
||||
import space.kscience.kmath.tensors.core.internal.array
|
||||
import space.kscience.kmath.tensors.core.internal.broadcastTensors
|
||||
@ -90,4 +91,12 @@ public object BroadcastDoubleTensorAlgebra : DoubleTensorAlgebra() {
|
||||
newOther.mutableBuffer.array()[tensor.bufferStart + i]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Compute a value using broadcast double tensor algebra
|
||||
*/
|
||||
@UnstableKMathAPI
|
||||
public fun <R> DoubleTensorAlgebra.withBroadcast(block: BroadcastDoubleTensorAlgebra.() -> R): R =
|
||||
BroadcastDoubleTensorAlgebra.block()
|
@ -949,4 +949,6 @@ public open class DoubleTensorAlgebra :
|
||||
override fun Tensor<Double>.lu(): Triple<DoubleTensor, DoubleTensor, DoubleTensor> = lu(1e-9)
|
||||
}
|
||||
|
||||
public val Double.Companion.tensorAlgebra: DoubleTensorAlgebra.Companion get() = DoubleTensorAlgebra
|
||||
|
||||
|
||||
|
@ -5,11 +5,11 @@ pluginManagement {
|
||||
gradlePluginPortal()
|
||||
}
|
||||
|
||||
val kotlinVersion = "1.5.30"
|
||||
val kotlinVersion = "1.6.0-M1"
|
||||
|
||||
plugins {
|
||||
id("org.jetbrains.kotlinx.benchmark") version "0.3.1"
|
||||
id("ru.mipt.npm.gradle.project") version "0.10.3"
|
||||
id("ru.mipt.npm.gradle.project") version "0.10.4"
|
||||
kotlin("multiplatform") version kotlinVersion
|
||||
kotlin("plugin.allopen") version kotlinVersion
|
||||
}
|
||||
@ -17,6 +17,9 @@ pluginManagement {
|
||||
|
||||
rootProject.name = "kmath"
|
||||
|
||||
enableFeaturePreview("TYPESAFE_PROJECT_ACCESSORS")
|
||||
enableFeaturePreview("VERSION_CATALOGS")
|
||||
|
||||
include(
|
||||
":kmath-memory",
|
||||
":kmath-complex",
|
||||
|
Loading…
Reference in New Issue
Block a user