diff --git a/kmath-core/src/commonMain/kotlin/space/kscience/kmath/tensors/AnalyticTensorAlgebra.kt b/kmath-core/src/commonMain/kotlin/space/kscience/kmath/tensors/AnalyticTensorAlgebra.kt new file mode 100644 index 000000000..17a25b6b3 --- /dev/null +++ b/kmath-core/src/commonMain/kotlin/space/kscience/kmath/tensors/AnalyticTensorAlgebra.kt @@ -0,0 +1,112 @@ +package space.kscience.kmath.tensors + + +public interface AnalyticTensorAlgebra> : + TensorPartialDivisionAlgebra { + + //https://pytorch.org/docs/stable/generated/torch.exp.html + public fun TensorType.exp(): TensorType + + //https://pytorch.org/docs/stable/generated/torch.log.html + public fun TensorType.log(): TensorType + + //https://pytorch.org/docs/stable/generated/torch.sqrt.html + public fun TensorType.sqrt(): TensorType + + //https://pytorch.org/docs/stable/generated/torch.square.html + public fun TensorType.square(): TensorType + + //https://pytorch.org/docs/stable/generated/torch.acos.html#torch.cos + public fun TensorType.cos(): TensorType + + //https://pytorch.org/docs/stable/generated/torch.acos.html#torch.acos + public fun TensorType.acos(): TensorType + + //https://pytorch.org/docs/stable/generated/torch.acosh.html#torch.cosh + public fun TensorType.cosh(): TensorType + + //https://pytorch.org/docs/stable/generated/torch.acosh.html#torch.acosh + public fun TensorType.acosh(): TensorType + + //https://pytorch.org/docs/stable/generated/torch.asin.html#torch.sin + public fun TensorType.sin(): TensorType + + //https://pytorch.org/docs/stable/generated/torch.asin.html#torch.asin + public fun TensorType.asin(): TensorType + + //https://pytorch.org/docs/stable/generated/torch.asin.html#torch.sinh + public fun TensorType.sinh(): TensorType + + //https://pytorch.org/docs/stable/generated/torch.asin.html#torch.asinh + public fun TensorType.asinh(): TensorType + + //https://pytorch.org/docs/stable/generated/torch.atan.html#torch.tan + public fun TensorType.tan(): TensorType + + //https://pytorch.org/docs/stable/generated/torch.atan.html#torch.atan + public fun TensorType.atan(): TensorType + + //https://pytorch.org/docs/stable/generated/torch.atanh.html#torch.tanh + public fun TensorType.tanh(): TensorType + + //https://pytorch.org/docs/stable/generated/torch.atanh.html#torch.atanh + public fun TensorType.atanh(): TensorType + + //https://pytorch.org/docs/stable/generated/torch.ceil.html#torch.ceil + public fun TensorType.ceil(): TensorType + + //https://pytorch.org/docs/stable/generated/torch.floor.html#torch.floor + public fun TensorType.floor(): TensorType + + //https://pytorch.org/docs/stable/generated/torch.clamp.html#torch.clamp + public fun TensorType.clamp(min: T, max: T): TensorType + + //https://pytorch.org/docs/stable/generated/torch.erf.html#torch.erf + public fun TensorType.erf(): TensorType + + //https://pytorch.org/docs/stable/generated/torch.erfinv.html#torch.erfinv + public fun TensorType.erfinv(): TensorType + + //https://pytorch.org/docs/stable/generated/torch.erfc.html#torch.erfc + public fun TensorType.erfc(): TensorType + + //https://pytorch.org/docs/stable/generated/torch.lerp.html#torch.lerp + public fun TensorType.lerp(end: TensorType, weight: TensorType): TensorType + + //https://pytorch.org/docs/stable/generated/torch.lgamma.html#torch.lgamma + public fun TensorType.lgamma(): TensorType + + //https://pytorch.org/docs/stable/generated/torch.logit.html#torch.logit + public fun TensorType.logit(eps: T): TensorType + + //https://pytorch.org/docs/stable/generated/torch.igamma.html#torch.igamma + public fun TensorType.igamma(other: TensorType): TensorType + + //https://pytorch.org/docs/stable/generated/torch.igammac.html#torch.igammac + public fun TensorType.igammac(other: TensorType): TensorType + + //https://pytorch.org/docs/stable/generated/torch.mvlgamma.html#torch.mvlgamma + public fun TensorType.mvlgamma(dimensions: Int): TensorType + + //https://pytorch.org/docs/stable/generated/torch.polygamma.html#torch.polygamma + public fun TensorType.polygamma(order: Int): TensorType + + //https://pytorch.org/docs/stable/generated/torch.pow.html#torch.pow + public fun TensorType.pow(exponent: T): TensorType + + //https://pytorch.org/docs/stable/generated/torch.round.html#torch.round + public fun TensorType.round(): TensorType + + //https://pytorch.org/docs/stable/generated/torch.sigmoid.html#torch.sigmoid + public fun TensorType.sigmoid(): TensorType + + //https://pytorch.org/docs/stable/generated/torch.sinc.html#torch.sinc + public fun TensorType.sinc(): TensorType + + //https://pytorch.org/docs/stable/generated/torch.heaviside.html#torch.heaviside + public fun TensorType.heaviside(values: TensorType): TensorType + + //https://pytorch.org/docs/stable/generated/torch.trapz.html#torch.trapz + public fun TensorType.trapz(xValues: TensorType, dim: Int): TensorType + +} \ No newline at end of file diff --git a/kmath-core/src/commonMain/kotlin/space/kscience/kmath/tensors/LinearOpsTensorAlgebra.kt b/kmath-core/src/commonMain/kotlin/space/kscience/kmath/tensors/LinearOpsTensorAlgebra.kt new file mode 100644 index 000000000..bd9cbfd45 --- /dev/null +++ b/kmath-core/src/commonMain/kotlin/space/kscience/kmath/tensors/LinearOpsTensorAlgebra.kt @@ -0,0 +1,43 @@ +package space.kscience.kmath.tensors + + +public interface LinearOpsTensorAlgebra> : + TensorPartialDivisionAlgebra { + + //https://pytorch.org/docs/stable/generated/torch.eye.html + public fun eye(n: Int): TensorType + + //https://pytorch.org/docs/stable/generated/torch.matmul.html + public infix fun TensorType.dot(other: TensorType): TensorType + + //https://pytorch.org/docs/stable/generated/torch.diag_embed.html + public fun diagonalEmbedding( + diagonalEntries: TensorType, + offset: Int = 0, dim1: Int = -2, dim2: Int = -1 + ): TensorType + + //https://pytorch.org/docs/stable/linalg.html#torch.linalg.det + public fun TensorType.det(): TensorType + + //https://pytorch.org/docs/stable/linalg.html#torch.linalg.inv + public fun TensorType.inv(): TensorType + + //https://pytorch.org/docs/stable/linalg.html#torch.linalg.cholesky + public fun TensorType.cholesky(): TensorType + + //https://pytorch.org/docs/stable/linalg.html#torch.linalg.qr + public fun TensorType.qr(): TensorType + + //https://pytorch.org/docs/stable/generated/torch.lu.html + public fun TensorType.lu(): Pair + + //https://pytorch.org/docs/stable/generated/torch.lu_unpack.html + public fun luPivot(aLU: TensorType, pivots: IntTensor): Triple + + //https://pytorch.org/docs/stable/linalg.html#torch.linalg.svd + public fun TensorType.svd(): Triple + + //https://pytorch.org/docs/stable/generated/torch.symeig.html + public fun TensorType.symEig(eigenvectors: Boolean = true): Pair + +} \ No newline at end of file diff --git a/kmath-core/src/commonMain/kotlin/space/kscience/kmath/tensors/RealAnalyticTensorAlgebra.kt b/kmath-core/src/commonMain/kotlin/space/kscience/kmath/tensors/RealAnalyticTensorAlgebra.kt new file mode 100644 index 000000000..cfecac0f4 --- /dev/null +++ b/kmath-core/src/commonMain/kotlin/space/kscience/kmath/tensors/RealAnalyticTensorAlgebra.kt @@ -0,0 +1,148 @@ +package space.kscience.kmath.tensors + +public class RealAnalyticTensorAlgebra: + AnalyticTensorAlgebra, + RealTensorAlgebra() +{ + override fun RealTensor.exp(): RealTensor { + TODO("Not yet implemented") + } + + override fun RealTensor.log(): RealTensor { + TODO("Not yet implemented") + } + + override fun RealTensor.sqrt(): RealTensor { + TODO("Not yet implemented") + } + + override fun RealTensor.square(): RealTensor { + TODO("Not yet implemented") + } + + override fun RealTensor.cos(): RealTensor { + TODO("Not yet implemented") + } + + override fun RealTensor.acos(): RealTensor { + TODO("Not yet implemented") + } + + override fun RealTensor.cosh(): RealTensor { + TODO("Not yet implemented") + } + + override fun RealTensor.acosh(): RealTensor { + TODO("Not yet implemented") + } + + override fun RealTensor.sin(): RealTensor { + TODO("Not yet implemented") + } + + override fun RealTensor.asin(): RealTensor { + TODO("Not yet implemented") + } + + override fun RealTensor.sinh(): RealTensor { + TODO("Not yet implemented") + } + + override fun RealTensor.asinh(): RealTensor { + TODO("Not yet implemented") + } + + override fun RealTensor.tan(): RealTensor { + TODO("Not yet implemented") + } + + override fun RealTensor.atan(): RealTensor { + TODO("Not yet implemented") + } + + override fun RealTensor.tanh(): RealTensor { + TODO("Not yet implemented") + } + + override fun RealTensor.atanh(): RealTensor { + TODO("Not yet implemented") + } + + override fun RealTensor.ceil(): RealTensor { + TODO("Not yet implemented") + } + + override fun RealTensor.floor(): RealTensor { + TODO("Not yet implemented") + } + + override fun RealTensor.clamp(min: Double, max: Double): RealTensor { + TODO("Not yet implemented") + } + + override fun RealTensor.erf(): RealTensor { + TODO("Not yet implemented") + } + + override fun RealTensor.erfinv(): RealTensor { + TODO("Not yet implemented") + } + + override fun RealTensor.erfc(): RealTensor { + TODO("Not yet implemented") + } + + override fun RealTensor.lerp(end: RealTensor, weight: RealTensor): RealTensor { + TODO("Not yet implemented") + } + + override fun RealTensor.lgamma(): RealTensor { + TODO("Not yet implemented") + } + + override fun RealTensor.logit(eps: Double): RealTensor { + TODO("Not yet implemented") + } + + override fun RealTensor.igamma(other: RealTensor): RealTensor { + TODO("Not yet implemented") + } + + override fun RealTensor.igammac(other: RealTensor): RealTensor { + TODO("Not yet implemented") + } + + override fun RealTensor.mvlgamma(dimensions: Int): RealTensor { + TODO("Not yet implemented") + } + + override fun RealTensor.polygamma(order: Int): RealTensor { + TODO("Not yet implemented") + } + + override fun RealTensor.pow(exponent: Double): RealTensor { + TODO("Not yet implemented") + } + + override fun RealTensor.round(): RealTensor { + TODO("Not yet implemented") + } + + override fun RealTensor.sigmoid(): RealTensor { + TODO("Not yet implemented") + } + + override fun RealTensor.sinc(): RealTensor { + TODO("Not yet implemented") + } + + override fun RealTensor.heaviside(values: RealTensor): RealTensor { + TODO("Not yet implemented") + } + + override fun RealTensor.trapz(xValues: RealTensor, dim: Int): RealTensor { + TODO("Not yet implemented") + } + + +} \ No newline at end of file diff --git a/kmath-core/src/commonMain/kotlin/space/kscience/kmath/tensors/RealLinearOpsTensorAlgebra.kt b/kmath-core/src/commonMain/kotlin/space/kscience/kmath/tensors/RealLinearOpsTensorAlgebra.kt new file mode 100644 index 000000000..18c2050c0 --- /dev/null +++ b/kmath-core/src/commonMain/kotlin/space/kscience/kmath/tensors/RealLinearOpsTensorAlgebra.kt @@ -0,0 +1,133 @@ +package space.kscience.kmath.tensors + +import space.kscience.kmath.structures.array + +public class RealLinearOpsTensorAlgebra : + LinearOpsTensorAlgebra, + RealTensorAlgebra() +{ + override fun eye(n: Int): RealTensor { + val shape = intArrayOf(n, n) + val buffer = DoubleArray(n * n) { 0.0 } + val res = RealTensor(shape, buffer) + for (i in 0 until n) { + res[intArrayOf(i, i)] = 1.0 + } + return res + } + + + override fun RealTensor.dot(other: RealTensor): RealTensor { + TODO("Alya") + } + + override fun diagonalEmbedding(diagonalEntries: RealTensor, offset: Int, dim1: Int, dim2: Int): RealTensor { + TODO("Alya") + } + + + override fun RealTensor.lu(): Pair { + // todo checks + val lu = this.copy() + val m = this.shape[0] + val pivot = IntArray(m) + + + // Initialize permutation array and parity + for (row in 0 until m) pivot[row] = row + var even = true + + for (i in 0 until m) { + var maxA = -1.0 + var iMax = i + + for (k in i until m) { + val absA = kotlin.math.abs(lu[k, i]) + if (absA > maxA) { + maxA = absA + iMax = k + } + } + + //todo check singularity + + if (iMax != i) { + + val j = pivot[i] + pivot[i] = pivot[iMax] + pivot[iMax] = j + even != even + + for (k in 0 until m) { + val tmp = lu[i, k] + lu[i, k] = lu[iMax, k] + lu[iMax, k] = tmp + } + + } + + for (j in i + 1 until m) { + lu[j, i] /= lu[i, i] + for (k in i + 1 until m) { + lu[j, k] -= lu[j, i] * lu[i, k] + } + } + } + return Pair(lu, IntTensor(intArrayOf(m), pivot)) + } + + override fun luPivot(lu: RealTensor, pivots: IntTensor): Triple { + // todo checks + val n = lu.shape[0] + val p = lu.zeroesLike() + pivots.buffer.array.forEachIndexed { i, pivot -> + p[i, pivot] = 1.0 + } + val l = lu.zeroesLike() + val u = lu.zeroesLike() + + for (i in 0 until n) { + for (j in 0 until n) { + if (i == j) { + l[i, j] = 1.0 + } + if (j < i) { + l[i, j] = lu[i, j] + } + if (j >= i) { + u[i, j] = lu[i, j] + } + } + } + return Triple(p, l, u) + } + + override fun RealTensor.det(): RealTensor { + TODO("Not yet implemented") + } + + override fun RealTensor.inv(): RealTensor { + TODO("Not yet implemented") + } + + override fun RealTensor.cholesky(): RealTensor { + TODO("Not yet implemented") + } + + override fun RealTensor.qr(): RealTensor { + TODO("Not yet implemented") + } + + + override fun RealTensor.svd(): Triple { + TODO("Not yet implemented") + } + + override fun RealTensor.symEig(eigenvectors: Boolean): Pair { + TODO("Not yet implemented") + } + +} + +public inline fun RealLinearOpsTensorAlgebra(block: RealTensorAlgebra.() -> R): R = + RealTensorAlgebra().block() \ No newline at end of file diff --git a/kmath-core/src/commonMain/kotlin/space/kscience/kmath/tensors/RealTensorAlgebra.kt b/kmath-core/src/commonMain/kotlin/space/kscience/kmath/tensors/RealTensorAlgebra.kt index b3e54f077..05e2b57d2 100644 --- a/kmath-core/src/commonMain/kotlin/space/kscience/kmath/tensors/RealTensorAlgebra.kt +++ b/kmath-core/src/commonMain/kotlin/space/kscience/kmath/tensors/RealTensorAlgebra.kt @@ -1,12 +1,9 @@ package space.kscience.kmath.tensors -import space.kscience.kmath.structures.RealBuffer import space.kscience.kmath.structures.array -import kotlin.math.abs -import kotlin.math.max -public class RealTensorAlgebra : TensorPartialDivisionAlgebra { +public open class RealTensorAlgebra : TensorPartialDivisionAlgebra { override fun RealTensor.value(): Double { check(this.shape contentEquals intArrayOf(1)) { @@ -15,24 +12,13 @@ public class RealTensorAlgebra : TensorPartialDivisionAlgebra, dim: Int): RealTensor { TODO("Not yet implemented") } @@ -213,6 +246,10 @@ public class RealTensorAlgebra : TensorPartialDivisionAlgebra { - // todo checks - val lu = this.copy() - val m = this.shape[0] - val pivot = IntArray(m) - - - // Initialize permutation array and parity - for (row in 0 until m) pivot[row] = row - var even = true - - for (i in 0 until m) { - var maxA = -1.0 - var iMax = i - - for (k in i until m) { - val absA = abs(lu[k, i]) - if (absA > maxA) { - maxA = absA - iMax = k - } - } - - //todo check singularity - - if (iMax != i) { - - val j = pivot[i] - pivot[i] = pivot[iMax] - pivot[iMax] = j - even != even - - for (k in 0 until m) { - val tmp = lu[i, k] - lu[i, k] = lu[iMax, k] - lu[iMax, k] = tmp - } - - } - - for (j in i + 1 until m) { - lu[j, i] /= lu[i, i] - for (k in i + 1 until m) { - lu[j, k] -= lu[j, i] * lu[i, k] - } - } - } - return Pair(lu, IntTensor(intArrayOf(m), pivot)) - } - - override fun luUnpack(lu: RealTensor, pivots: IntTensor): Triple { - // todo checks - val n = lu.shape[0] - val p = zeroesLike(lu) - pivots.buffer.array.forEachIndexed { i, pivot -> - p[i, pivot] = 1.0 - } - val l = zeroesLike(lu) - val u = zeroesLike(lu) - - for (i in 0 until n){ - for (j in 0 until n){ - if (i == j) { - l[i, j] = 1.0 - } - if (j < i) { - l[i, j] = lu[i, j] - } - if (j >= i) { - u[i, j] = lu[i, j] - } - } - } - return Triple(p, l, u) - } - - override fun RealTensor.svd(): Triple { - /** - * Main first task for @AlyaNovikova - */ + override fun RealTensor.std(dim: Int, unbiased: Boolean, keepDim: Boolean): RealTensor { TODO("Not yet implemented") } - override fun RealTensor.symEig(eigenvectors: Boolean): Pair { + override fun RealTensor.variance(dim: Int, unbiased: Boolean, keepDim: Boolean): RealTensor { + TODO("Not yet implemented") + } + + override fun RealTensor.histc(bins: Int, min: Double, max: Double): RealTensor { TODO("Not yet implemented") } diff --git a/kmath-core/src/commonMain/kotlin/space/kscience/kmath/tensors/TensorAlgebra.kt b/kmath-core/src/commonMain/kotlin/space/kscience/kmath/tensors/TensorAlgebra.kt index 280acf8ea..a53054e21 100644 --- a/kmath-core/src/commonMain/kotlin/space/kscience/kmath/tensors/TensorAlgebra.kt +++ b/kmath-core/src/commonMain/kotlin/space/kscience/kmath/tensors/TensorAlgebra.kt @@ -5,11 +5,17 @@ public interface TensorAlgebra> { public fun TensorType.value(): T - public fun eye(n: Int): TensorType public fun zeros(shape: IntArray): TensorType - public fun zeroesLike(other: TensorType): TensorType + public fun TensorType.zeroesLike(): TensorType public fun ones(shape: IntArray): TensorType - public fun onesLike(shape: IntArray): TensorType + public fun TensorType.onesLike(): TensorType + + + //https://pytorch.org/docs/stable/generated/torch.full.html + public fun full(shape: IntArray, value: T): TensorType + + //https://pytorch.org/docs/stable/generated/torch.full_like.html#torch.full_like + public fun TensorType.fullLike(value: T): TensorType public fun TensorType.copy(): TensorType @@ -33,15 +39,6 @@ public interface TensorAlgebra> { public operator fun TensorType.unaryMinus(): TensorType - //https://pytorch.org/docs/stable/generated/torch.matmul.html - public infix fun TensorType.dot(other: TensorType): TensorType - - //https://pytorch.org/docs/stable/generated/torch.diag_embed.html - public fun diagonalEmbedding( - diagonalEntries: TensorType, - offset: Int = 0, dim1: Int = -2, dim2: Int = -1 - ): TensorType - //https://pytorch.org/docs/stable/generated/torch.transpose.html public fun TensorType.transpose(i: Int, j: Int): TensorType @@ -53,35 +50,45 @@ public interface TensorAlgebra> { public fun TensorType.abs(): TensorType //https://pytorch.org/docs/stable/generated/torch.sum.html - public fun TensorType.sum(): TensorType + public fun TensorType.sum(dim: Int, keepDim: Boolean): TensorType + + //https://pytorch.org/docs/stable/generated/torch.cumsum.html#torch.cumsum + public fun TensorType.cumsum(dim: Int): TensorType + + //https://pytorch.org/docs/stable/generated/torch.prod.html#torch.prod + public fun TensorType.prod(dim: Int, keepDim: Boolean): TensorType + + //https://pytorch.org/docs/stable/generated/torch.cumprod.html#torch.cumprod + public fun TensorType.cumprod(dim: Int): TensorType + + //https://pytorch.org/docs/stable/generated/torch.max.html#torch.max + public fun TensorType.max(dim: Int, keepDim: Boolean): TensorType + + //https://pytorch.org/docs/stable/generated/torch.cummax.html#torch.cummax + public fun TensorType.cummax(dim: Int): TensorType + + //https://pytorch.org/docs/stable/generated/torch.min.html#torch.min + public fun TensorType.min(dim: Int, keepDim: Boolean): TensorType + + //https://pytorch.org/docs/stable/generated/torch.cummin.html#torch.cummin + public fun TensorType.cummin(dim: Int): TensorType + + //https://pytorch.org/docs/stable/generated/torch.median.html#torch.median + public fun TensorType.median(dim: Int, keepDim: Boolean): TensorType + + //https://pytorch.org/docs/stable/generated/torch.maximum.html#torch.maximum + public fun maximum(lhs: TensorType, rhs: TensorType) + + //https://pytorch.org/docs/stable/generated/torch.minimum.html#torch.minimum + public fun minimum(lhs: TensorType, rhs: TensorType) + + //https://pytorch.org/docs/stable/generated/torch.sort.html#torch.sort + public fun TensorType.sort(dim: Int, keepDim: Boolean, descending: Boolean): TensorType + + //https://pytorch.org/docs/stable/generated/torch.cat.html#torch.cat + public fun cat(tensors: List, dim: Int): TensorType + + //https://pytorch.org/docs/stable/generated/torch.flatten.html#torch.flatten + public fun TensorType.flatten(startDim: Int, endDim: Int): TensorType + } - -// https://proofwiki.org/wiki/Definition:Division_Algebra -public interface TensorPartialDivisionAlgebra> : - TensorAlgebra { - - public operator fun TensorType.div(value: T): TensorType - public operator fun TensorType.div(other: TensorType): TensorType - public operator fun TensorType.divAssign(value: T) - public operator fun TensorType.divAssign(other: TensorType) - - //https://pytorch.org/docs/stable/generated/torch.exp.html - public fun TensorType.exp(): TensorType - - //https://pytorch.org/docs/stable/generated/torch.log.html - public fun TensorType.log(): TensorType - - // todo change type of pivots - //https://pytorch.org/docs/stable/generated/torch.lu.html - public fun TensorType.lu(): Pair - - //https://pytorch.org/docs/stable/generated/torch.lu_unpack.html - public fun luUnpack(A_LU: TensorType, pivots: IntTensor): Triple - - //https://pytorch.org/docs/stable/generated/torch.svd.html - public fun TensorType.svd(): Triple - - //https://pytorch.org/docs/stable/generated/torch.symeig.html - public fun TensorType.symEig(eigenvectors: Boolean = true): Pair - -} \ No newline at end of file diff --git a/kmath-core/src/commonMain/kotlin/space/kscience/kmath/tensors/TensorPartialDivisionAlgebra.kt b/kmath-core/src/commonMain/kotlin/space/kscience/kmath/tensors/TensorPartialDivisionAlgebra.kt new file mode 100644 index 000000000..2d448fa8c --- /dev/null +++ b/kmath-core/src/commonMain/kotlin/space/kscience/kmath/tensors/TensorPartialDivisionAlgebra.kt @@ -0,0 +1,27 @@ +package space.kscience.kmath.tensors + +// https://proofwiki.org/wiki/Definition:Division_Algebra +public interface TensorPartialDivisionAlgebra> : + TensorAlgebra { + + public operator fun TensorType.div(value: T): TensorType + public operator fun TensorType.div(other: TensorType): TensorType + public operator fun TensorType.divAssign(value: T) + public operator fun TensorType.divAssign(other: TensorType) + + //https://pytorch.org/docs/stable/generated/torch.mean.html#torch.mean + public fun TensorType.mean(dim: Int, keepDim: Boolean): TensorType + + //https://pytorch.org/docs/stable/generated/torch.quantile.html#torch.quantile + public fun TensorType.quantile(q: T, dim: Int, keepDim: Boolean): TensorType + + //https://pytorch.org/docs/stable/generated/torch.std.html#torch.std + public fun TensorType.std(dim: Int, unbiased: Boolean, keepDim: Boolean): TensorType + + //https://pytorch.org/docs/stable/generated/torch.var.html#torch.var + public fun TensorType.variance(dim: Int, unbiased: Boolean, keepDim: Boolean): TensorType + + //https://pytorch.org/docs/stable/generated/torch.histc.html#torch.histc + public fun TensorType.histc(bins: Int, min: T, max: T): TensorType + +}