KMP library for tensors #300
@ -14,7 +14,7 @@ import space.kscience.kmath.tensors.core.algebras.DoubleLinearOpsTensorAlgebra
|
|||||||
fun main () {
|
fun main () {
|
||||||
|
|
||||||
// work in context with linear operations
|
// work in context with linear operations
|
||||||
DoubleLinearOpsTensorAlgebra.invoke {
|
DoubleLinearOpsTensorAlgebra {
|
||||||
|
|
||||||
// set true value of x
|
// set true value of x
|
||||||
val trueX = fromArray(
|
val trueX = fromArray(
|
||||||
|
@ -19,7 +19,7 @@ fun main() {
|
|||||||
val randSeed = 100500L
|
val randSeed = 100500L
|
||||||
|
|
||||||
// work in context with linear operations
|
// work in context with linear operations
|
||||||
DoubleLinearOpsTensorAlgebra.invoke {
|
DoubleLinearOpsTensorAlgebra {
|
||||||
// take coefficient vector from normal distribution
|
// take coefficient vector from normal distribution
|
||||||
val alpha = randomNormal(
|
val alpha = randomNormal(
|
||||||
intArrayOf(5),
|
intArrayOf(5),
|
||||||
|
@ -3,6 +3,9 @@ plugins {
|
|||||||
}
|
}
|
||||||
|
|
||||||
kotlin.sourceSets {
|
kotlin.sourceSets {
|
||||||
|
all {
|
||||||
|
languageSettings.useExperimentalAnnotation("space.kscience.kmath.misc.UnstableKMathAPI")
|
||||||
|
}
|
||||||
commonMain {
|
commonMain {
|
||||||
dependencies {
|
dependencies {
|
||||||
api(project(":kmath-core"))
|
api(project(":kmath-core"))
|
||||||
|
@ -109,9 +109,13 @@ public fun Array<IntArray>.toIntTensor(): IntTensor {
|
|||||||
}
|
}
|
||||||
|
|
||||||
public fun DoubleTensor.toDoubleArray(): DoubleArray {
|
public fun DoubleTensor.toDoubleArray(): DoubleArray {
|
||||||
return tensor.mutableBuffer.array().drop(bufferStart).take(numElements).toDoubleArray()
|
return DoubleArray(numElements) { i ->
|
||||||
|
mutableBuffer[bufferStart + i]
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
public fun IntTensor.toIntArray(): IntArray {
|
public fun IntTensor.toIntArray(): IntArray {
|
||||||
return tensor.mutableBuffer.array().drop(bufferStart).take(numElements).toIntArray()
|
return IntArray(numElements) { i ->
|
||||||
|
mutableBuffer[bufferStart + i]
|
||||||
|
}
|
||||||
}
|
}
|
@ -5,38 +5,35 @@ import space.kscience.kmath.tensors.core.algebras.DoubleLinearOpsTensorAlgebra
|
|||||||
import space.kscience.kmath.tensors.core.algebras.DoubleTensorAlgebra
|
import space.kscience.kmath.tensors.core.algebras.DoubleTensorAlgebra
|
||||||
|
|
||||||
|
|
||||||
internal fun checkEmptyShape(shape: IntArray): Unit =
|
internal fun checkEmptyShape(shape: IntArray) =
|
||||||
check(shape.isNotEmpty()) {
|
check(shape.isNotEmpty()) {
|
||||||
"Illegal empty shape provided"
|
"Illegal empty shape provided"
|
||||||
}
|
}
|
||||||
|
|
||||||
internal fun checkEmptyDoubleBuffer(buffer: DoubleArray): Unit =
|
internal fun checkEmptyDoubleBuffer(buffer: DoubleArray) =
|
||||||
check(buffer.isNotEmpty()) {
|
check(buffer.isNotEmpty()) {
|
||||||
"Illegal empty buffer provided"
|
"Illegal empty buffer provided"
|
||||||
}
|
}
|
||||||
|
|
||||||
internal fun checkBufferShapeConsistency(shape: IntArray, buffer: DoubleArray): Unit =
|
internal fun checkBufferShapeConsistency(shape: IntArray, buffer: DoubleArray) =
|
||||||
check(buffer.size == shape.reduce(Int::times)) {
|
check(buffer.size == shape.reduce(Int::times)) {
|
||||||
"Inconsistent shape ${shape.toList()} for buffer of size ${buffer.size} provided"
|
"Inconsistent shape ${shape.toList()} for buffer of size ${buffer.size} provided"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
internal fun <T> checkShapesCompatible(a: TensorStructure<T>, b: TensorStructure<T>) =
|
||||||
internal fun <T> checkShapesCompatible(a: TensorStructure<T>, b: TensorStructure<T>): Unit =
|
|
||||||
check(a.shape contentEquals b.shape) {
|
check(a.shape contentEquals b.shape) {
|
||||||
"Incompatible shapes ${a.shape.toList()} and ${b.shape.toList()} "
|
"Incompatible shapes ${a.shape.toList()} and ${b.shape.toList()} "
|
||||||
}
|
}
|
||||||
|
|
||||||
|
internal fun checkTranspose(dim: Int, i: Int, j: Int) =
|
||||||
internal fun checkTranspose(dim: Int, i: Int, j: Int): Unit =
|
|
||||||
check((i < dim) and (j < dim)) {
|
check((i < dim) and (j < dim)) {
|
||||||
"Cannot transpose $i to $j for a tensor of dim $dim"
|
"Cannot transpose $i to $j for a tensor of dim $dim"
|
||||||
}
|
}
|
||||||
|
|
||||||
internal fun <T> checkView(a: TensorStructure<T>, shape: IntArray): Unit =
|
internal fun <T> checkView(a: TensorStructure<T>, shape: IntArray) =
|
||||||
check(a.shape.reduce(Int::times) == shape.reduce(Int::times))
|
check(a.shape.reduce(Int::times) == shape.reduce(Int::times))
|
||||||
|
|
||||||
|
internal fun checkSquareMatrix(shape: IntArray) {
|
||||||
internal fun checkSquareMatrix(shape: IntArray): Unit {
|
|
||||||
val n = shape.size
|
val n = shape.size
|
||||||
check(n >= 2) {
|
check(n >= 2) {
|
||||||
"Expected tensor with 2 or more dimensions, got size $n instead"
|
"Expected tensor with 2 or more dimensions, got size $n instead"
|
||||||
@ -48,14 +45,12 @@ internal fun checkSquareMatrix(shape: IntArray): Unit {
|
|||||||
|
|
||||||
internal fun DoubleTensorAlgebra.checkSymmetric(
|
internal fun DoubleTensorAlgebra.checkSymmetric(
|
||||||
tensor: TensorStructure<Double>, epsilon: Double = 1e-6
|
tensor: TensorStructure<Double>, epsilon: Double = 1e-6
|
||||||
): Unit =
|
) =
|
||||||
check(tensor.eq(tensor.transpose(), epsilon)) {
|
check(tensor.eq(tensor.transpose(), epsilon)) {
|
||||||
"Tensor is not symmetric about the last 2 dimensions at precision $epsilon"
|
"Tensor is not symmetric about the last 2 dimensions at precision $epsilon"
|
||||||
}
|
}
|
||||||
|
|
||||||
internal fun DoubleLinearOpsTensorAlgebra.checkPositiveDefinite(
|
internal fun DoubleLinearOpsTensorAlgebra.checkPositiveDefinite(tensor: DoubleTensor, epsilon: Double = 1e-6) {
|
||||||
tensor: DoubleTensor, epsilon: Double = 1e-6
|
|
||||||
): Unit {
|
|
||||||
checkSymmetric(tensor, epsilon)
|
checkSymmetric(tensor, epsilon)
|
||||||
for (mat in tensor.matrixSequence())
|
for (mat in tensor.matrixSequence())
|
||||||
check(mat.asTensor().detLU().value() > 0.0) {
|
check(mat.asTensor().detLU().value() > 0.0) {
|
||||||
|
@ -34,18 +34,6 @@ internal fun <T> BufferedTensor<T>.matrixSequence(): Sequence<BufferedTensor<T>>
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
internal inline fun <T> BufferedTensor<T>.forEachVector(vectorAction: (BufferedTensor<T>) -> Unit) {
|
|
||||||
for (vector in vectorSequence()) {
|
|
||||||
vectorAction(vector)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
internal inline fun <T> BufferedTensor<T>.forEachMatrix(matrixAction: (BufferedTensor<T>) -> Unit) {
|
|
||||||
for (matrix in matrixSequence()) {
|
|
||||||
matrixAction(matrix)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
internal fun dotHelper(
|
internal fun dotHelper(
|
||||||
a: MutableStructure2D<Double>,
|
a: MutableStructure2D<Double>,
|
||||||
b: MutableStructure2D<Double>,
|
b: MutableStructure2D<Double>,
|
||||||
|
@ -14,7 +14,6 @@ internal fun Buffer<Int>.array(): IntArray = when (this) {
|
|||||||
else -> this.toIntArray()
|
else -> this.toIntArray()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Returns a reference to [DoubleArray] containing all of the elements of this [Buffer] or copy the data.
|
* Returns a reference to [DoubleArray] containing all of the elements of this [Buffer] or copy the data.
|
||||||
*/
|
*/
|
||||||
@ -31,7 +30,7 @@ internal fun getRandomNormals(n: Int, seed: Long): DoubleArray {
|
|||||||
|
|
||||||
internal fun getRandomUnitVector(n: Int, seed: Long): DoubleArray {
|
internal fun getRandomUnitVector(n: Int, seed: Long): DoubleArray {
|
||||||
val unnorm = getRandomNormals(n, seed)
|
val unnorm = getRandomNormals(n, seed)
|
||||||
val norm = sqrt(unnorm.map { it * it }.sum())
|
val norm = sqrt(unnorm.sumOf { it * it })
|
||||||
return unnorm.map { it / norm }.toDoubleArray()
|
return unnorm.map { it / norm }.toDoubleArray()
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -45,23 +44,33 @@ internal fun minusIndexFrom(n: Int, i: Int): Int = if (i >= 0) i else {
|
|||||||
|
|
||||||
internal fun <T> BufferedTensor<T>.minusIndex(i: Int): Int = minusIndexFrom(this.dimension, i)
|
internal fun <T> BufferedTensor<T>.minusIndex(i: Int): Int = minusIndexFrom(this.dimension, i)
|
||||||
|
|
||||||
internal fun format(value: Double, digits: Int = 4): String {
|
internal fun format(value: Double, digits: Int = 4): String = buildString {
|
||||||
val ten = 10.0
|
val res = buildString {
|
||||||
val approxOrder = if (value == 0.0) 0 else ceil(log10(abs(value))).toInt()
|
val ten = 10.0
|
||||||
val order = if (
|
val approxOrder = if (value == 0.0) 0 else ceil(log10(abs(value))).toInt()
|
||||||
((value % ten) == 0.0) or
|
val order = if (
|
||||||
(value == 1.0) or
|
((value % ten) == 0.0) ||
|
||||||
((1 / value) % ten == 0.0)
|
(value == 1.0) ||
|
||||||
) approxOrder else approxOrder - 1
|
((1 / value) % ten == 0.0)
|
||||||
val lead = value / ten.pow(order)
|
) approxOrder else approxOrder - 1
|
||||||
val leadDisplay = round(lead * ten.pow(digits)) / ten.pow(digits)
|
val lead = value / ten.pow(order)
|
||||||
val orderDisplay = if (order == 0) "" else if (order > 0) "E+$order" else "E$order"
|
if (value >= 0.0) append(' ')
|
||||||
val valueDisplay = "$leadDisplay$orderDisplay"
|
append(round(lead * ten.pow(digits)) / ten.pow(digits))
|
||||||
val res = if (value < 0.0) valueDisplay else " $valueDisplay"
|
when {
|
||||||
|
order == 0 -> Unit
|
||||||
|
order > 0 -> {
|
||||||
|
append("e+")
|
||||||
|
append(order)
|
||||||
|
}
|
||||||
|
else -> {
|
||||||
|
append('e')
|
||||||
|
append(order)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
val fLength = digits + 6
|
val fLength = digits + 6
|
||||||
val endSpace = " ".repeat(fLength - res.length)
|
append(res)
|
||||||
return "$res$endSpace"
|
repeat(fLength - res.length) { append(' ') }
|
||||||
}
|
}
|
||||||
|
|
||||||
internal fun DoubleTensor.toPrettyString(): String = buildString {
|
internal fun DoubleTensor.toPrettyString(): String = buildString {
|
||||||
|
Loading…
Reference in New Issue
Block a user