complete lu and matrix mapping
This commit is contained in:
parent
efb23591a9
commit
1fa0da2810
@ -1,5 +1,7 @@
|
||||
package space.kscience.kmath.tensors
|
||||
|
||||
import space.kscience.kmath.linear.Matrix
|
||||
import space.kscience.kmath.nd.*
|
||||
import space.kscience.kmath.structures.*
|
||||
|
||||
|
||||
@ -29,59 +31,50 @@ public open class BufferedTensor<T>(
|
||||
|
||||
override fun hashCode(): Int = 0
|
||||
|
||||
// todo rename to vector
|
||||
public inline fun forEachVector(vectorAction : (MutableStructure1D<T>) -> Unit): Unit {
|
||||
check(shape.size >= 1) {"todo"}
|
||||
val vectorOffset = strides.strides[0]
|
||||
val vectorShape = intArrayOf(shape.last())
|
||||
for (offset in 0 until numel step vectorOffset) {
|
||||
val vector = BufferedTensor<T>(vectorShape, buffer, offset).as1D()
|
||||
vectorAction(vector)
|
||||
}
|
||||
}
|
||||
|
||||
public inline fun forEachMatrix(matrixAction : (MutableStructure2D<T>) -> Unit): Unit {
|
||||
check(shape.size >= 2) {"todo"}
|
||||
val matrixOffset = strides.strides[1]
|
||||
val matrixShape = intArrayOf(shape[shape.size - 2], shape.last()) //todo better way?
|
||||
for (offset in 0 until numel step matrixOffset) {
|
||||
val matrix = BufferedTensor<T>(matrixShape, buffer, offset).as2D()
|
||||
matrixAction(matrix)
|
||||
}
|
||||
}
|
||||
// todo remove code copy-pasting
|
||||
|
||||
public fun vectorSequence(): Sequence<MutableStructure1D<T>> = sequence {
|
||||
check(shape.size >= 1) {"todo"}
|
||||
val vectorOffset = strides.strides[0]
|
||||
val vectorShape = intArrayOf(shape.last())
|
||||
for (offset in 0 until numel step vectorOffset) {
|
||||
val vector = BufferedTensor<T>(vectorShape, buffer, offset).as1D()
|
||||
yield(vector)
|
||||
}
|
||||
}
|
||||
|
||||
public fun matrixSequence(): Sequence<MutableStructure2D<T>> = sequence {
|
||||
check(shape.size >= 2) {"todo"}
|
||||
val matrixOffset = strides.strides[1]
|
||||
val matrixShape = intArrayOf(shape[shape.size - 2], shape.last()) //todo better way?
|
||||
for (offset in 0 until numel step matrixOffset) {
|
||||
val matrix = BufferedTensor<T>(matrixShape, buffer, offset).as2D()
|
||||
yield(matrix)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/*
|
||||
//todo make generator mb nextMatrixIndex?
|
||||
public class InnerMatrix<T>(private val tensor: BufferedTensor<T>){
|
||||
private var offset: Int = 0
|
||||
private val n : Int = tensor.shape.size
|
||||
//stride?
|
||||
private val step = tensor.shape[n - 1] * tensor.shape[n - 2]
|
||||
|
||||
public operator fun get(i: Int, j: Int): T {
|
||||
val index = tensor.strides.index(offset)
|
||||
index[n - 2] = i
|
||||
index[n - 1] = j
|
||||
return tensor[index]
|
||||
}
|
||||
|
||||
public operator fun set(i: Int, j: Int, value: T): Unit {
|
||||
val index = tensor.strides.index(offset)
|
||||
index[n - 2] = i
|
||||
index[n - 1] = j
|
||||
tensor[index] = value
|
||||
}
|
||||
|
||||
public fun makeStep(){
|
||||
offset += step
|
||||
}
|
||||
}
|
||||
|
||||
public class InnerVector<T>(private val tensor: BufferedTensor<T>){
|
||||
private var offset: Int = 0
|
||||
private val n : Int = tensor.shape.size
|
||||
//stride?
|
||||
private val step = tensor.shape[n - 1]
|
||||
|
||||
public operator fun get(i: Int): T {
|
||||
val index = tensor.strides.index(offset)
|
||||
index[n - 1] = i
|
||||
return tensor[index]
|
||||
}
|
||||
|
||||
public operator fun set(i: Int, value: T): Unit {
|
||||
val index = tensor.strides.index(offset)
|
||||
index[n - 1] = i
|
||||
tensor[index] = value
|
||||
}
|
||||
|
||||
public fun makeStep(){
|
||||
offset += step
|
||||
}
|
||||
}
|
||||
//todo default buffer = arrayOf(0)???
|
||||
*/
|
||||
|
||||
public class IntTensor(
|
||||
shape: IntArray,
|
||||
|
@ -9,27 +9,21 @@ public class DoubleLinearOpsTensorAlgebra :
|
||||
}
|
||||
|
||||
override fun DoubleTensor.lu(): Pair<DoubleTensor, IntTensor> {
|
||||
/*
|
||||
|
||||
// todo checks
|
||||
|
||||
val luTensor = this.copy()
|
||||
val lu = InnerMatrix(luTensor)
|
||||
//stride TODO!!! move to generator?
|
||||
var matCnt = 1
|
||||
for (i in 0 until this.shape.size - 2) {
|
||||
matCnt *= this.shape[i]
|
||||
}
|
||||
|
||||
val n = this.shape.size
|
||||
val m = this.shape[n - 1]
|
||||
val pivotsShape = IntArray(n - 1) { i ->
|
||||
this.shape[i]
|
||||
}
|
||||
val m = this.shape.last()
|
||||
val pivotsShape = IntArray(n - 1) { i -> this.shape[i] }
|
||||
val pivotsTensor = IntTensor(
|
||||
pivotsShape,
|
||||
IntArray(matCnt * m) { 0 }
|
||||
IntArray(pivotsShape.reduce(Int::times)) { 0 } //todo default???
|
||||
)
|
||||
val pivot = InnerVector(pivotsTensor)
|
||||
for (i in 0 until matCnt) {
|
||||
for (row in 0 until m) pivot[row] = row
|
||||
|
||||
for ((lu, pivots) in luTensor.matrixSequence().zip(pivotsTensor.vectorSequence())){
|
||||
for (row in 0 until m) pivots[row] = row
|
||||
|
||||
for (i in 0 until m) {
|
||||
var maxA = -1.0
|
||||
@ -47,9 +41,9 @@ public class DoubleLinearOpsTensorAlgebra :
|
||||
|
||||
if (iMax != i) {
|
||||
|
||||
val j = pivot[i]
|
||||
pivot[i] = pivot[iMax]
|
||||
pivot[iMax] = j
|
||||
val j = pivots[i]
|
||||
pivots[i] = pivots[iMax]
|
||||
pivots[iMax] = j
|
||||
|
||||
for (k in 0 until m) {
|
||||
val tmp = lu[i, k]
|
||||
@ -66,26 +60,28 @@ public class DoubleLinearOpsTensorAlgebra :
|
||||
}
|
||||
}
|
||||
}
|
||||
lu.makeStep()
|
||||
pivot.makeStep()
|
||||
}
|
||||
|
||||
return Pair(luTensor, pivotsTensor)*/
|
||||
|
||||
TODO("Andrei, use view, get, as2D, as1D")
|
||||
return Pair(luTensor, pivotsTensor)
|
||||
|
||||
}
|
||||
|
||||
override fun luPivot(lu: DoubleTensor, pivots: IntTensor): Triple<DoubleTensor, DoubleTensor, DoubleTensor> {
|
||||
/*
|
||||
// todo checks
|
||||
val n = lu.shape[0]
|
||||
val p = lu.zeroesLike()
|
||||
pivots.buffer.unsafeToIntArray().forEachIndexed { i, pivot ->
|
||||
p[i, pivot] = 1.0
|
||||
override fun luPivot(luTensor: DoubleTensor, pivotsTensor: IntTensor): Triple<DoubleTensor, DoubleTensor, DoubleTensor> {
|
||||
//todo checks
|
||||
val n = luTensor.shape.last()
|
||||
val pTensor = luTensor.zeroesLike()
|
||||
for ((p, pivot) in pTensor.matrixSequence().zip(pivotsTensor.vectorSequence())){
|
||||
for (i in 0 until n){
|
||||
p[i, pivot[i]] = 1.0
|
||||
}
|
||||
}
|
||||
val l = lu.zeroesLike()
|
||||
val u = lu.zeroesLike()
|
||||
|
||||
val lTensor = luTensor.zeroesLike()
|
||||
val uTensor = luTensor.zeroesLike()
|
||||
|
||||
for ((pairLU, lu) in lTensor.matrixSequence().zip(uTensor.matrixSequence()).zip(luTensor.matrixSequence())){
|
||||
val (l, u) = pairLU
|
||||
for (i in 0 until n) {
|
||||
for (j in 0 until n) {
|
||||
if (i == j) {
|
||||
@ -99,9 +95,10 @@ public class DoubleLinearOpsTensorAlgebra :
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return Triple(pTensor, lTensor, uTensor)
|
||||
|
||||
return Triple(p, l, u)*/
|
||||
TODO("Andrei, first we need implement get(Int)")
|
||||
}
|
||||
|
||||
override fun DoubleTensor.cholesky(): DoubleTensor {
|
||||
|
@ -5,7 +5,7 @@ public interface TensorAlgebra<T, TensorType : TensorStructure<T>> {
|
||||
|
||||
|
||||
public fun zeros(shape: IntArray): TensorType
|
||||
public fun TensorType.zeroesLike(): TensorType
|
||||
public fun TensorType.zeroesLike(): TensorType // mb it shouldn't be tensor but algebra method (like in numpy/torch) ?
|
||||
public fun ones(shape: IntArray): TensorType
|
||||
public fun TensorType.onesLike(): TensorType
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user