Performance fixes

This commit is contained in:
Alexander Nozik 2024-02-18 12:27:46 +03:00
parent f8e91c2402
commit 10739e0d04
18 changed files with 262 additions and 496 deletions

View File

@ -11,12 +11,11 @@ import kotlinx.benchmark.Scope
import kotlinx.benchmark.State
import space.kscience.kmath.commons.linear.CMLinearSpace
import space.kscience.kmath.ejml.EjmlLinearSpaceDDRM
import space.kscience.kmath.linear.Float64ParallelLinearSpace
import space.kscience.kmath.linear.invoke
import space.kscience.kmath.linear.linearSpace
import space.kscience.kmath.operations.Float64Field
import space.kscience.kmath.operations.invoke
import space.kscience.kmath.tensorflow.produceWithTF
import space.kscience.kmath.tensors.core.DoubleTensorAlgebra
import space.kscience.kmath.tensors.core.tensorAlgebra
import kotlin.random.Random
@ -72,12 +71,12 @@ internal class DotBenchmark {
}
@Benchmark
fun tensorDot(blackhole: Blackhole) = with(Float64Field.tensorAlgebra) {
fun multikDot(blackhole: Blackhole) = with(multikAlgebra) {
blackhole.consume(matrix1 dot matrix2)
}
@Benchmark
fun multikDot(blackhole: Blackhole) = with(multikAlgebra) {
fun tensorDot(blackhole: Blackhole) = with(Float64Field.tensorAlgebra) {
blackhole.consume(matrix1 dot matrix2)
}
@ -87,12 +86,8 @@ internal class DotBenchmark {
}
@Benchmark
fun doubleDot(blackhole: Blackhole) = with(Float64Field.linearSpace) {
fun parallelDot(blackhole: Blackhole) = with(Float64ParallelLinearSpace) {
blackhole.consume(matrix1 dot matrix2)
}
@Benchmark
fun doubleTensorDot(blackhole: Blackhole) = DoubleTensorAlgebra.invoke {
blackhole.consume(matrix1 dot matrix2)
}
}

View File

@ -15,6 +15,7 @@ import space.kscience.kmath.ejml.EjmlLinearSpaceDDRM
import space.kscience.kmath.linear.invoke
import space.kscience.kmath.linear.linearSpace
import space.kscience.kmath.linear.lupSolver
import space.kscience.kmath.linear.parallel
import space.kscience.kmath.operations.algebra
import kotlin.random.Random
@ -38,16 +39,19 @@ internal class MatrixInverseBenchmark {
}
@Benchmark
fun cmLUPInversion(blackhole: Blackhole) {
CMLinearSpace {
blackhole.consume(lupSolver().inverse(matrix))
}
fun kmathParallelLupInversion(blackhole: Blackhole) {
blackhole.consume(Double.algebra.linearSpace.parallel.lupSolver().inverse(matrix))
}
@Benchmark
fun ejmlInverse(blackhole: Blackhole) {
EjmlLinearSpaceDDRM {
fun cmLUPInversion(blackhole: Blackhole) = CMLinearSpace {
blackhole.consume(lupSolver().inverse(matrix))
}
@Benchmark
fun ejmlInverse(blackhole: Blackhole) = EjmlLinearSpaceDDRM {
blackhole.consume(matrix.toEjml().inverted())
}
}
}

View File

@ -17,7 +17,7 @@ val benchmarksVersion = spclibs.versions.kotlinx.benchmark.get()
dependencies {
api("space.kscience:gradle-tools:$toolsVersion")
//plugins form benchmarks
api("org.jetbrains.kotlinx:kotlinx-benchmark-plugin:0.4.9")
api("org.jetbrains.kotlinx:kotlinx-benchmark-plugin:$benchmarksVersion")
//api("org.jetbrains.kotlin:kotlin-allopen:$kotlinVersion")
//to be used inside build-script only
//implementation(spclibs.kotlinx.serialization.json)

View File

@ -1,443 +0,0 @@
/*
* Copyright 2018-2024 KMath contributors.
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
*/
@file:Suppress("KDocUnresolvedReference")
package space.kscience.kmath.ejml.codegen
import org.intellij.lang.annotations.Language
import java.io.File
private fun Appendable.appendEjmlVector(type: String, ejmlMatrixType: String) {
@Language("kotlin") val text = """/**
* [EjmlVector] specialization for [$type].
*/
public class Ejml${type}Vector<out M : $ejmlMatrixType>(override val origin: M) : EjmlVector<$type, M>(origin) {
init {
require(origin.numRows == 1) { "The origin matrix must have only one row to form a vector" }
}
override val type: SafeType<${type}> get() = safeTypeOf()
override operator fun get(index: Int): $type = origin[0, index]
}"""
appendLine(text)
appendLine()
}
private fun Appendable.appendEjmlMatrix(type: String, ejmlMatrixType: String) {
val text = """/**
* [EjmlMatrix] specialization for [$type].
*/
public class Ejml${type}Matrix<out M : $ejmlMatrixType>(override val origin: M) : EjmlMatrix<$type, M>(origin) {
override val type: SafeType<${type}> get() = safeTypeOf()
override operator fun get(i: Int, j: Int): $type = origin[i, j]
}"""
appendLine(text)
appendLine()
}
private fun Appendable.appendEjmlLinearSpace(
type: String,
kmathAlgebra: String,
ejmlMatrixParentTypeMatrix: String,
ejmlMatrixType: String,
ejmlMatrixDenseType: String,
ops: String,
denseOps: String,
isDense: Boolean,
) {
@Language("kotlin") val text = """
/**
* [EjmlLinearSpace] implementation based on [CommonOps_$ops], [DecompositionFactory_${ops}] operations and
* [${ejmlMatrixType}] matrices.
*/
public object EjmlLinearSpace${ops} : EjmlLinearSpace<${type}, ${kmathAlgebra}, $ejmlMatrixType>() {
/**
* The [${kmathAlgebra}] reference.
*/
override val elementAlgebra: $kmathAlgebra get() = $kmathAlgebra
override val type: SafeType<${type}> get() = safeTypeOf()
@Suppress("UNCHECKED_CAST")
override fun Matrix<${type}>.toEjml(): Ejml${type}Matrix<${ejmlMatrixType}> = when {
this is Ejml${type}Matrix<*> && origin is $ejmlMatrixType -> this as Ejml${type}Matrix<${ejmlMatrixType}>
else -> buildMatrix(rowNum, colNum) { i, j -> get(i, j) }
}
@Suppress("UNCHECKED_CAST")
override fun Point<${type}>.toEjml(): Ejml${type}Vector<${ejmlMatrixType}> = when {
this is Ejml${type}Vector<*> && origin is $ejmlMatrixType -> this as Ejml${type}Vector<${ejmlMatrixType}>
else -> Ejml${type}Vector(${ejmlMatrixType}(size, 1).also {
(0 until it.numRows).forEach { row -> it[row, 0] = get(row) }
})
}
override fun buildMatrix(
rows: Int,
columns: Int,
initializer: ${kmathAlgebra}.(i: Int, j: Int) -> ${type},
): Ejml${type}Matrix<${ejmlMatrixType}> = ${ejmlMatrixType}(rows, columns).also {
(0 until rows).forEach { row ->
(0 until columns).forEach { col -> it[row, col] = elementAlgebra.initializer(row, col) }
}
}.wrapMatrix()
override fun buildVector(
size: Int,
initializer: ${kmathAlgebra}.(Int) -> ${type},
): Ejml${type}Vector<${ejmlMatrixType}> = Ejml${type}Vector(${ejmlMatrixType}(size, 1).also {
(0 until it.numRows).forEach { row -> it[row, 0] = elementAlgebra.initializer(row) }
})
private fun <T : ${ejmlMatrixParentTypeMatrix}> T.wrapMatrix() = Ejml${type}Matrix(this)
private fun <T : ${ejmlMatrixParentTypeMatrix}> T.wrapVector() = Ejml${type}Vector(this)
override fun Matrix<${type}>.unaryMinus(): Matrix<${type}> = this * elementAlgebra { -one }
override fun Matrix<${type}>.dot(other: Matrix<${type}>): Ejml${type}Matrix<${ejmlMatrixType}> {
val out = ${ejmlMatrixType}(1, 1)
CommonOps_${ops}.mult(toEjml().origin, other.toEjml().origin, out)
return out.wrapMatrix()
}
override fun Matrix<${type}>.dot(vector: Point<${type}>): Ejml${type}Vector<${ejmlMatrixType}> {
val out = ${ejmlMatrixType}(1, 1)
CommonOps_${ops}.mult(toEjml().origin, vector.toEjml().origin, out)
return out.wrapVector()
}
override operator fun Matrix<${type}>.minus(other: Matrix<${type}>): Ejml${type}Matrix<${ejmlMatrixType}> {
val out = ${ejmlMatrixType}(1, 1)
CommonOps_${ops}.add(
elementAlgebra.one,
toEjml().origin,
elementAlgebra { -one },
other.toEjml().origin,
out,${
if (isDense) "" else
"""
null,
null,"""
}
)
return out.wrapMatrix()
}
override operator fun Matrix<${type}>.times(value: ${type}): Ejml${type}Matrix<${ejmlMatrixType}> {
val res = ${ejmlMatrixType}(1, 1)
CommonOps_${ops}.scale(value, toEjml().origin, res)
return res.wrapMatrix()
}
override fun Point<${type}>.unaryMinus(): Ejml${type}Vector<${ejmlMatrixType}> {
val res = ${ejmlMatrixType}(1, 1)
CommonOps_${ops}.changeSign(toEjml().origin, res)
return res.wrapVector()
}
override fun Matrix<${type}>.plus(other: Matrix<${type}>): Ejml${type}Matrix<${ejmlMatrixType}> {
val out = ${ejmlMatrixType}(1, 1)
CommonOps_${ops}.add(
elementAlgebra.one,
toEjml().origin,
elementAlgebra.one,
other.toEjml().origin,
out,${
if (isDense) "" else
"""
null,
null,"""
}
)
return out.wrapMatrix()
}
override fun Point<${type}>.plus(other: Point<${type}>): Ejml${type}Vector<${ejmlMatrixType}> {
val out = ${ejmlMatrixType}(1, 1)
CommonOps_${ops}.add(
elementAlgebra.one,
toEjml().origin,
elementAlgebra.one,
other.toEjml().origin,
out,${
if (isDense) "" else
"""
null,
null,"""
}
)
return out.wrapVector()
}
override fun Point<${type}>.minus(other: Point<${type}>): Ejml${type}Vector<${ejmlMatrixType}> {
val out = ${ejmlMatrixType}(1, 1)
CommonOps_${ops}.add(
elementAlgebra.one,
toEjml().origin,
elementAlgebra { -one },
other.toEjml().origin,
out,${
if (isDense) "" else
"""
null,
null,"""
}
)
return out.wrapVector()
}
override fun ${type}.times(m: Matrix<${type}>): Ejml${type}Matrix<${ejmlMatrixType}> = m * this
override fun Point<${type}>.times(value: ${type}): Ejml${type}Vector<${ejmlMatrixType}> {
val res = ${ejmlMatrixType}(1, 1)
CommonOps_${ops}.scale(value, toEjml().origin, res)
return res.wrapVector()
}
override fun ${type}.times(v: Point<${type}>): Ejml${type}Vector<${ejmlMatrixType}> = v * this
@UnstableKMathAPI
override fun <F : StructureFeature> computeFeature(structure: Matrix<${type}>, type: KClass<out F>): F? {
structure.getFeature(type)?.let { return it }
val origin = structure.toEjml().origin
return when (type) {
${
if (isDense)
""" InverseMatrixFeature::class -> object : InverseMatrixFeature<${type}> {
override val inverse: Matrix<${type}> by lazy {
val res = origin.copy()
CommonOps_${ops}.invert(res)
res.wrapMatrix()
}
}
DeterminantFeature::class -> object : DeterminantFeature<${type}> {
override val determinant: $type by lazy { CommonOps_${ops}.det(origin) }
}
SingularValueDecompositionFeature::class -> object : SingularValueDecompositionFeature<${type}> {
private val svd by lazy {
DecompositionFactory_${ops}.svd(origin.numRows, origin.numCols, true, true, false)
.apply { decompose(origin.copy()) }
}
override val u: Matrix<${type}> by lazy { svd.getU(null, false).wrapMatrix() }
override val s: Matrix<${type}> by lazy { svd.getW(null).wrapMatrix() }
override val v: Matrix<${type}> by lazy { svd.getV(null, false).wrapMatrix() }
override val singularValues: Point<${type}> by lazy { ${type}Buffer(svd.singularValues) }
}
QRDecompositionFeature::class -> object : QRDecompositionFeature<${type}> {
private val qr by lazy {
DecompositionFactory_${ops}.qr().apply { decompose(origin.copy()) }
}
override val q: Matrix<${type}> by lazy {
qr.getQ(null, false).wrapMatrix().withFeature(OrthogonalFeature)
}
override val r: Matrix<${type}> by lazy { qr.getR(null, false).wrapMatrix().withFeature(UFeature) }
}
CholeskyDecompositionFeature::class -> object : CholeskyDecompositionFeature<${type}> {
override val l: Matrix<${type}> by lazy {
val cholesky =
DecompositionFactory_${ops}.chol(structure.rowNum, true).apply { decompose(origin.copy()) }
cholesky.getT(null).wrapMatrix().withFeature(LFeature)
}
}
LupDecompositionFeature::class -> object : LupDecompositionFeature<${type}> {
private val lup by lazy {
DecompositionFactory_${ops}.lu(origin.numRows, origin.numCols).apply { decompose(origin.copy()) }
}
override val l: Matrix<${type}> by lazy {
lup.getLower(null).wrapMatrix().withFeature(LFeature)
}
override val u: Matrix<${type}> by lazy {
lup.getUpper(null).wrapMatrix().withFeature(UFeature)
}
override val p: Matrix<${type}> by lazy { lup.getRowPivot(null).wrapMatrix() }
}""" else """ QRDecompositionFeature::class -> object : QRDecompositionFeature<$type> {
private val qr by lazy {
DecompositionFactory_${ops}.qr(FillReducing.NONE).apply { decompose(origin.copy()) }
}
override val q: Matrix<${type}> by lazy {
qr.getQ(null, false).wrapMatrix().withFeature(OrthogonalFeature)
}
override val r: Matrix<${type}> by lazy { qr.getR(null, false).wrapMatrix().withFeature(UFeature) }
}
CholeskyDecompositionFeature::class -> object : CholeskyDecompositionFeature<${type}> {
override val l: Matrix<${type}> by lazy {
val cholesky =
DecompositionFactory_${ops}.cholesky().apply { decompose(origin.copy()) }
(cholesky.getT(null) as ${ejmlMatrixParentTypeMatrix}).wrapMatrix().withFeature(LFeature)
}
}
LUDecompositionFeature::class, DeterminantFeature::class, InverseMatrixFeature::class -> object :
LUDecompositionFeature<${type}>, DeterminantFeature<${type}>, InverseMatrixFeature<${type}> {
private val lu by lazy {
DecompositionFactory_${ops}.lu(FillReducing.NONE).apply { decompose(origin.copy()) }
}
override val l: Matrix<${type}> by lazy {
lu.getLower(null).wrapMatrix().withFeature(LFeature)
}
override val u: Matrix<${type}> by lazy {
lu.getUpper(null).wrapMatrix().withFeature(UFeature)
}
override val inverse: Matrix<${type}> by lazy {
var a = origin
val inverse = ${ejmlMatrixDenseType}(1, 1)
val solver = LinearSolverFactory_${ops}.lu(FillReducing.NONE)
if (solver.modifiesA()) a = a.copy()
val i = CommonOps_${denseOps}.identity(a.numRows)
solver.solve(i, inverse)
inverse.wrapMatrix()
}
override val determinant: $type by lazy { elementAlgebra.number(lu.computeDeterminant().real) }
}"""
}
else -> null
}?.let{
type.cast(it)
}
}
/**
* Solves for *x* in the following equation: *x = [a] <sup>-1</sup> &middot; [b]*.
*
* @param a the base matrix.
* @param b n by p matrix.
* @return the solution for *x* that is n by p.
*/
public fun solve(a: Matrix<${type}>, b: Matrix<${type}>): Ejml${type}Matrix<${ejmlMatrixType}> {
val res = ${ejmlMatrixType}(1, 1)
CommonOps_${ops}.solve(${ejmlMatrixType}(a.toEjml().origin), ${ejmlMatrixType}(b.toEjml().origin), res)
return res.wrapMatrix()
}
/**
* Solves for *x* in the following equation: *x = [a] <sup>-1</sup> &middot; [b]*.
*
* @param a the base matrix.
* @param b n by p vector.
* @return the solution for *x* that is n by p.
*/
public fun solve(a: Matrix<${type}>, b: Point<${type}>): Ejml${type}Vector<${ejmlMatrixType}> {
val res = ${ejmlMatrixType}(1, 1)
CommonOps_${ops}.solve(${ejmlMatrixType}(a.toEjml().origin), ${ejmlMatrixType}(b.toEjml().origin), res)
return Ejml${type}Vector(res)
}
}"""
appendLine(text)
appendLine()
}
/**
* Generates routine EJML classes.
*/
fun ejmlCodegen(outputFile: String): Unit = File(outputFile).run {
parentFile.mkdirs()
writer().use {
it.appendLine("/*")
it.appendLine(" * Copyright 2018-2024 KMath contributors.")
it.appendLine(" * Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.")
it.appendLine(" */")
it.appendLine()
it.appendLine("/* This file is generated with buildSrc/src/main/kotlin/space/kscience/kmath/ejml/codegen/ejmlCodegen.kt */")
it.appendLine()
it.appendLine("package space.kscience.kmath.ejml")
it.appendLine()
it.appendLine("""import org.ejml.data.*
import org.ejml.dense.row.CommonOps_DDRM
import org.ejml.dense.row.CommonOps_FDRM
import org.ejml.dense.row.factory.DecompositionFactory_DDRM
import org.ejml.dense.row.factory.DecompositionFactory_FDRM
import org.ejml.sparse.FillReducing
import org.ejml.sparse.csc.CommonOps_DSCC
import org.ejml.sparse.csc.CommonOps_FSCC
import org.ejml.sparse.csc.factory.DecompositionFactory_DSCC
import org.ejml.sparse.csc.factory.DecompositionFactory_FSCC
import org.ejml.sparse.csc.factory.LinearSolverFactory_DSCC
import org.ejml.sparse.csc.factory.LinearSolverFactory_FSCC
import space.kscience.attributes.SafeType
import space.kscience.attributes.safeTypeOf
import space.kscience.kmath.linear.*
import space.kscience.kmath.linear.Matrix
import space.kscience.kmath.UnstableKMathAPI
import space.kscience.kmath.nd.StructureFeature
import space.kscience.kmath.structures.Float64
import space.kscience.kmath.structures.Float32
import space.kscience.kmath.operations.Float64Field
import space.kscience.kmath.operations.Float32Field
import space.kscience.kmath.operations.DoubleField
import space.kscience.kmath.operations.FloatField
import space.kscience.kmath.operations.invoke
import space.kscience.kmath.structures.Float64Buffer
import space.kscience.kmath.structures.Float32Buffer
import space.kscience.kmath.structures.DoubleBuffer
import space.kscience.kmath.structures.FloatBuffer
import kotlin.reflect.KClass
import kotlin.reflect.cast""")
it.appendLine()
it.appendEjmlVector("Double", "DMatrix")
it.appendEjmlVector("Float", "FMatrix")
it.appendEjmlMatrix("Double", "DMatrix")
it.appendEjmlMatrix("Float", "FMatrix")
it.appendEjmlLinearSpace("Double", "Float64Field", "DMatrix", "DMatrixRMaj", "DMatrixRMaj", "DDRM", "DDRM", true)
it.appendEjmlLinearSpace("Float", "Float32Field", "FMatrix", "FMatrixRMaj", "FMatrixRMaj", "FDRM", "FDRM", true)
it.appendEjmlLinearSpace(
type = "Double",
kmathAlgebra = "Float64Field",
ejmlMatrixParentTypeMatrix = "DMatrix",
ejmlMatrixType = "DMatrixSparseCSC",
ejmlMatrixDenseType = "DMatrixRMaj",
ops = "DSCC",
denseOps = "DDRM",
isDense = false,
)
it.appendEjmlLinearSpace(
type = "Float",
kmathAlgebra = "Float32Field",
ejmlMatrixParentTypeMatrix = "FMatrix",
ejmlMatrixType = "FMatrixSparseCSC",
ejmlMatrixDenseType = "FMatrixRMaj",
ops = "FSCC",
denseOps = "FDRM",
isDense = false,
)
}
}

View File

@ -5,31 +5,26 @@
package space.kscience.kmath.linear
import space.kscience.kmath.operations.algebra
import kotlin.random.Random
import kotlin.time.ExperimentalTime
import kotlin.time.measureTime
@OptIn(ExperimentalTime::class)
fun main() {
fun main() = with(Float64ParallelLinearSpace) {
val random = Random(12224)
val dim = 1000
//creating invertible matrix
val matrix1 = Double.algebra.linearSpace.buildMatrix(dim, dim) { i, j ->
val matrix1 = buildMatrix(dim, dim) { i, j ->
if (i <= j) random.nextDouble() else 0.0
}
val matrix2 = Double.algebra.linearSpace.buildMatrix(dim, dim) { i, j ->
val matrix2 = buildMatrix(dim, dim) { i, j ->
if (i <= j) random.nextDouble() else 0.0
}
val time = measureTime {
with(Double.algebra.linearSpace) {
repeat(10) {
repeat(30) {
matrix1 dot matrix2
}
}
}
println(time)

View File

@ -0,0 +1,27 @@
/*
* Copyright 2018-2024 KMath contributors.
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
*/
package space.kscience.kmath.linear
import kotlin.random.Random
import kotlin.time.measureTime
fun main(): Unit = with(Float64LinearSpace) {
val random = Random(1224)
val dim = 500
//creating invertible matrix
val u = buildMatrix(dim, dim) { i, j -> if (i <= j) random.nextDouble() else 0.0 }
val l = buildMatrix(dim, dim) { i, j -> if (i >= j) random.nextDouble() else 0.0 }
val matrix = l dot u
val time = measureTime {
repeat(20) {
lupSolver().inverse(matrix)
}
}
println(time)
}

View File

@ -7,7 +7,7 @@ package space.kscience.kmath.series
import space.kscience.kmath.structures.Buffer
import space.kscience.kmath.structures.DoubleBuffer
import space.kscience.kmath.structures.Float64Buffer
import space.kscience.kmath.structures.asBuffer
import space.kscience.kmath.structures.toDoubleArray
import space.kscience.plotly.*
@ -21,7 +21,7 @@ fun main(): Unit = with(Double.seriesAlgebra()) {
val arrayOfRandoms = DoubleArray(20) { random.nextDouble() }
val series1: DoubleBuffer = arrayOfRandoms.asBuffer()
val series1: Float64Buffer = arrayOfRandoms.asBuffer()
val series2: Series<Double> = series1.moveBy(3)
val res = series2 - series1

View File

@ -1,5 +1,5 @@
distributionBase=GRADLE_USER_HOME
distributionPath=wrapper/dists
distributionUrl=https\://services.gradle.org/distributions/gradle-8.4-bin.zip
distributionUrl=https\://services.gradle.org/distributions/gradle-8.6-bin.zip
zipStoreBase=GRADLE_USER_HOME
zipStorePath=wrapper/dists

View File

@ -54,11 +54,12 @@ public object Float64LinearSpace : LinearSpace<Double, Float64Field> {
require(colNum == other.rowNum) { "Matrix dot operation dimension mismatch: ($rowNum, $colNum) x (${other.rowNum}, ${other.colNum})" }
val rows = this@dot.rows.map { it.linearize() }
val columns = other.columns.map { it.linearize() }
val indices = 0 until this.rowNum
return buildMatrix(rowNum, other.colNum) { i, j ->
val r = rows[i]
val c = columns[j]
var res = 0.0
for (l in r.indices) {
for (l in indices) {
res += r[l] * c[l]
}
res
@ -69,10 +70,11 @@ public object Float64LinearSpace : LinearSpace<Double, Float64Field> {
override fun Matrix<Double>.dot(vector: Point<Double>): Float64Buffer {
require(colNum == vector.size) { "Matrix dot vector operation dimension mismatch: ($rowNum, $colNum) x (${vector.size})" }
val rows = this@dot.rows.map { it.linearize() }
val indices = 0 until this.rowNum
return Float64Buffer(rowNum) { i ->
val r = rows[i]
var res = 0.0
for (j in r.indices) {
for (j in indices) {
res += r[j] * vector[j]
}
res

View File

@ -11,6 +11,7 @@ import space.kscience.attributes.Attributes
import space.kscience.attributes.PolymorphicAttribute
import space.kscience.attributes.safeTypeOf
import space.kscience.kmath.UnstableKMathAPI
import space.kscience.kmath.nd.*
import space.kscience.kmath.operations.*
import space.kscience.kmath.structures.*
@ -150,7 +151,14 @@ public fun <T : Comparable<T>> LinearSpace<T, Field<T>>.lup(
for (row in col + 1 until m) lu[row, col] /= luDiag
}
return GenericLupDecomposition(this@lup, lu.toStructure2D(), pivot.asBuffer(), even)
val shape = ShapeND(rowNum, colNum)
val structure2D = BufferND(
RowStrides(ShapeND(rowNum, colNum)),
lu
).as2D()
return GenericLupDecomposition(this@lup, structure2D, pivot.asBuffer(), even)
}
}
@ -166,7 +174,7 @@ internal fun <T> LinearSpace<T, Field<T>>.solve(
): Matrix<T> {
require(matrix.rowNum == lup.l.rowNum) { "Matrix dimension mismatch. Expected ${lup.l.rowNum}, but got ${matrix.colNum}" }
BufferAccessor2D(matrix.rowNum, matrix.colNum, elementAlgebra.bufferFactory).run {
with(BufferAccessor2D(matrix.rowNum, matrix.colNum, elementAlgebra.bufferFactory)) {
elementAlgebra {
// Apply permutations to b
val bp = create { _, _ -> zero }

View File

@ -77,13 +77,11 @@ private inline fun <T, A : Algebra<T>> BufferAlgebra<T, A>.zipInline(
return elementBufferFactory(l.size) { elementAlgebra.block(l[it], r[it]) }
}
public fun <T> BufferAlgebra<T, *>.buffer(size: Int, initializer: (Int) -> T): MutableBuffer<T> {
return elementBufferFactory(size, initializer)
}
public fun <T> BufferAlgebra<T, *>.buffer(size: Int, initializer: (Int) -> T): MutableBuffer<T> =
elementBufferFactory(size, initializer)
public fun <T, A> A.buffer(initializer: (Int) -> T): MutableBuffer<T> where A : BufferAlgebra<T, *>, A : WithSize {
return elementBufferFactory(size, initializer)
}
public fun <T, A> A.buffer(initializer: (Int) -> T): MutableBuffer<T> where A : BufferAlgebra<T, *>, A : WithSize =
elementBufferFactory(size, initializer)
public fun <T, A : TrigonometricOperations<T>> BufferAlgebra<T, A>.sin(arg: Buffer<T>): Buffer<T> =
mapInline(arg) { sin(it) }

View File

@ -42,6 +42,8 @@ public inline fun <reified T> BufferFactory(): BufferFactory<T> = BufferFactory(
*/
public interface MutableBufferFactory<T> : BufferFactory<T> {
override fun invoke(size: Int, builder: (Int) -> T): MutableBuffer<T>
public companion object
}
/**

View File

@ -6,12 +6,7 @@
package space.kscience.kmath.structures
import space.kscience.attributes.SafeType
import space.kscience.kmath.nd.BufferND
import space.kscience.kmath.nd.ShapeND
import space.kscience.kmath.nd.Structure2D
import space.kscience.kmath.nd.as2D
import kotlin.collections.component1
import kotlin.collections.component2
/**
* A context that allows to operate on a [MutableBuffer] as on 2d array
@ -32,10 +27,10 @@ internal class BufferAccessor2D<T>(
fun create(mat: Structure2D<T>): MutableBuffer<T> = create { i, j -> mat[i, j] }
//TODO optimize wrapper
fun MutableBuffer<T>.toStructure2D(): Structure2D<T> = BufferND(
type, ShapeND(rowNum, colNum)
) { (i, j) -> get(i, j) }.as2D()
// //TODO optimize wrapper
// fun MutableBuffer<T>.toStructure2D(): Structure2D<T> = BufferND(
// type, ShapeND(rowNum, colNum)
// ) { (i, j) -> get(i, j) }.as2D()
inner class Row(val buffer: MutableBuffer<T>, val rowIndex: Int) : MutableBuffer<T> {
override val type: SafeType<T> get() = buffer.type

View File

@ -39,6 +39,7 @@ public value class Float64Buffer(public val array: DoubleArray) : PrimitiveBuffe
}
}
@Deprecated("Use Float64Buffer instead", ReplaceWith("Float64Buffer"))
public typealias DoubleBuffer = Float64Buffer
/**

View File

@ -77,7 +77,6 @@ public inline fun <T> MutableBuffer(
size: Int,
initializer: (Int) -> T,
): MutableBuffer<T> = when (type.kType) {
typeOf<Boolean>() -> TODO()
typeOf<Int8>() -> Int8Buffer(size) { initializer(it) as Int8 } as MutableBuffer<T>
typeOf<Int16>() -> MutableBuffer.short(size) { initializer(it) as Int16 } as MutableBuffer<T>
typeOf<Int32>() -> MutableBuffer.int(size) { initializer(it) as Int32 } as MutableBuffer<T>

View File

@ -0,0 +1,125 @@
/*
* Copyright 2018-2024 KMath contributors.
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
*/
package space.kscience.kmath.linear
import space.kscience.kmath.PerformancePitfall
import space.kscience.kmath.nd.*
import space.kscience.kmath.operations.Float64BufferOps
import space.kscience.kmath.operations.Float64Field
import space.kscience.kmath.operations.invoke
import space.kscience.kmath.structures.Buffer
import space.kscience.kmath.structures.Float64Buffer
import space.kscience.kmath.structures.asBuffer
import java.util.stream.IntStream
public object Float64ParallelLinearSpace : LinearSpace<Double, Float64Field> {
override val elementAlgebra: Float64Field = Float64Field
override fun buildMatrix(
rows: Int,
columns: Int,
initializer: Float64Field.(i: Int, j: Int) -> Double,
): Matrix<Double> {
val shape = ShapeND(rows, columns)
val indexer = BufferAlgebraND.defaultIndexerBuilder(shape)
val buffer = IntStream.range(0, indexer.linearSize).parallel().mapToDouble { offset ->
val (i, j) = indexer.index(offset)
elementAlgebra.initializer(i, j)
}.toArray().asBuffer()
return MutableBufferND(
indexer,
buffer
).as2D()
}
override fun buildVector(size: Int, initializer: Float64Field.(Int) -> Double): Float64Buffer =
IntStream.range(0, size).parallel().mapToDouble{ Float64Field.initializer(it) }.toArray().asBuffer()
override fun Matrix<Double>.unaryMinus(): Matrix<Double> = Floa64FieldOpsND {
asND().map { -it }.as2D()
}
override fun Matrix<Double>.plus(other: Matrix<Double>): Matrix<Double> = Floa64FieldOpsND {
require(shape.contentEquals(other.shape)) { "Shape mismatch on Matrix::plus. Expected $shape but found ${other.shape}" }
asND().plus(other.asND()).as2D()
}
override fun Matrix<Double>.minus(other: Matrix<Double>): Matrix<Double> = Floa64FieldOpsND {
require(shape.contentEquals(other.shape)) { "Shape mismatch on Matrix::minus. Expected $shape but found ${other.shape}" }
asND().minus(other.asND()).as2D()
}
// Create a continuous in-memory representation of this vector for better memory layout handling
private fun Buffer<Double>.linearize() = if (this is Float64Buffer) {
this.array
} else {
DoubleArray(size) { get(it) }
}
@OptIn(PerformancePitfall::class)
override fun Matrix<Double>.dot(other: Matrix<Double>): Matrix<Double> {
require(colNum == other.rowNum) { "Matrix dot operation dimension mismatch: ($rowNum, $colNum) x (${other.rowNum}, ${other.colNum})" }
val rows = this@dot.rows.map { it.linearize() }
val columns = other.columns.map { it.linearize() }
val indices = 0 until this.rowNum
return buildMatrix(rowNum, other.colNum) { i, j ->
val r = rows[i]
val c = columns[j]
var res = 0.0
for (l in indices) {
res += r[l] * c[l]
}
res
}
}
@OptIn(PerformancePitfall::class)
override fun Matrix<Double>.dot(vector: Point<Double>): Float64Buffer {
require(colNum == vector.size) { "Matrix dot vector operation dimension mismatch: ($rowNum, $colNum) x (${vector.size})" }
val rows = this@dot.rows.map { it.linearize() }
val indices = 0 until this.rowNum
return Float64Buffer(rowNum) { i ->
val r = rows[i]
var res = 0.0
for (j in indices) {
res += r[j] * vector[j]
}
res
}
}
override fun Matrix<Double>.times(value: Double): Matrix<Double> = Floa64FieldOpsND {
asND().map { it * value }.as2D()
}
public override fun Point<Double>.plus(other: Point<Double>): Float64Buffer = Float64BufferOps.run {
this@plus + other
}
public override fun Point<Double>.minus(other: Point<Double>): Float64Buffer = Float64BufferOps.run {
this@minus - other
}
public override fun Point<Double>.times(value: Double): Float64Buffer = Float64BufferOps.run {
scale(this@times, value)
}
public operator fun Point<Double>.div(value: Double): Float64Buffer = Float64BufferOps.run {
scale(this@div, 1.0 / value)
}
public override fun Double.times(v: Point<Double>): Float64Buffer = v * this
}
public val Float64LinearSpace.parallel: Float64ParallelLinearSpace get() = Float64ParallelLinearSpace

View File

@ -0,0 +1,58 @@
/*
* Copyright 2018-2024 KMath contributors.
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
*/
package space.kscience.kmath.structures
import space.kscience.attributes.SafeType
import java.util.stream.Collectors
import java.util.stream.IntStream
import kotlin.reflect.typeOf
/**
* A Java stream-based parallel version of [MutableBuffer].
* There is no parallelization for [Int8], [Int16] and [Float32] types.
* They are processed sequentially.
*/
@Suppress("UNCHECKED_CAST")
public fun <T> MutableBuffer.Companion.parallel(
type: SafeType<T>,
size: Int,
initializer: (Int) -> T,
): MutableBuffer<T> = when (type.kType) {
typeOf<Int8>() -> Int8Buffer(size) { initializer(it) as Int8 } as MutableBuffer<T>
typeOf<Int16>() -> Int16Buffer(size) { initializer(it) as Int16 } as MutableBuffer<T>
typeOf<Int32>() -> IntStream.range(0, size).parallel().map { initializer(it) as Int32 }.toArray()
.asBuffer() as MutableBuffer<T>
typeOf<Int64>() -> IntStream.range(0, size).parallel().mapToLong { initializer(it) as Int64 }.toArray()
.asBuffer() as MutableBuffer<T>
typeOf<Float>() -> Float32Buffer(size) { initializer(it) as Float } as MutableBuffer<T>
typeOf<Double>() -> IntStream.range(0, size).parallel().mapToDouble { initializer(it) as Float64 }.toArray()
.asBuffer() as MutableBuffer<T>
//TODO add unsigned types
else -> IntStream.range(0, size).parallel().mapToObj { initializer(it) }.collect(Collectors.toList<T>()).asMutableBuffer(type)
}
public class ParallelBufferFactory<T>(override val type: SafeType<T>) : MutableBufferFactory<T> {
override fun invoke(size: Int, builder: (Int) -> T): MutableBuffer<T> {
TODO("Not yet implemented")
}
}
/**
* A Java stream-based parallel alternative to [MutableBufferFactory].
* See [MutableBuffer.Companion.parallel] for details.
*/
public fun <T> MutableBufferFactory.Companion.parallel(
type: SafeType<T>,
): MutableBufferFactory<T> = object : MutableBufferFactory<T> {
override val type: SafeType<T> = type
override fun invoke(size: Int, builder: (Int) -> T): MutableBuffer<T> = MutableBuffer.parallel(type, size, builder)
}

View File

@ -59,7 +59,7 @@ public object ValueAndErrorField : Field<ValueAndError> {
ValueAndError(a.value * value, a.dispersion * value.pow(2))
private class ValueAndErrorBuffer(val values: DoubleBuffer, val ds: DoubleBuffer) : MutableBuffer<ValueAndError> {
private class ValueAndErrorBuffer(val values: Float64Buffer, val ds: Float64Buffer) : MutableBuffer<ValueAndError> {
init {
require(values.size == ds.size)
}