diff --git a/CHANGELOG.md b/CHANGELOG.md index 857ed060b..a19b1f467 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -18,6 +18,7 @@ - Integration between `MST` and Symja `IExpr` - Complex power - Separate methods for UInt, Int and Number powers. NaN safety. +- Tensorflow prototype ### Changed - Exponential operations merged with hyperbolic functions diff --git a/README.md b/README.md index 8604873ae..92260716e 100644 --- a/README.md +++ b/README.md @@ -50,35 +50,6 @@ module definitions below. The module stability could have the following levels: with [binary-compatibility-validator](https://github.com/Kotlin/binary-compatibility-validator) tool. * **STABLE**. The API stabilized. Breaking changes are allowed only in major releases. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ## Modules
diff --git a/docs/templates/README-TEMPLATE.md b/docs/templates/README-TEMPLATE.md index e75d4c5ed..b0c418697 100644 --- a/docs/templates/README-TEMPLATE.md +++ b/docs/templates/README-TEMPLATE.md @@ -50,35 +50,6 @@ module definitions below. The module stability could have the following levels: with [binary-compatibility-validator](https://github.com/Kotlin/binary-compatibility-validator) tool. * **STABLE**. The API stabilized. Breaking changes are allowed only in major releases. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ## Modules $modules diff --git a/gradle.properties b/gradle.properties index 130d294a2..7dd9e6d61 100644 --- a/gradle.properties +++ b/gradle.properties @@ -12,4 +12,4 @@ org.gradle.configureondemand=true org.gradle.parallel=true org.gradle.jvmargs=-XX:MaxMetaspaceSize=1G -toolsVersion=0.10.9-kotlin-1.6.10 +toolsVersion=0.11.1-kotlin-1.6.10 diff --git a/kmath-tensorflow/build.gradle.kts b/kmath-tensorflow/build.gradle.kts index c8307f01f..9380a7308 100644 --- a/kmath-tensorflow/build.gradle.kts +++ b/kmath-tensorflow/build.gradle.kts @@ -6,8 +6,8 @@ description = "Google tensorflow connector" dependencies { api(project(":kmath-tensors")) - api("org.tensorflow:tensorflow-core-api:0.3.3") - testImplementation("org.tensorflow:tensorflow-core-platform:0.3.3") + api("org.tensorflow:tensorflow-core-api:0.4.0") + testImplementation("org.tensorflow:tensorflow-core-platform:0.4.0") } readme { diff --git a/kmath-tensorflow/src/main/kotlin/space/kscience/kmath/tensorflow/DoubleTensorFlowAlgebra.kt b/kmath-tensorflow/src/main/kotlin/space/kscience/kmath/tensorflow/DoubleTensorFlowAlgebra.kt index eb8245944..ecfd8d098 100644 --- a/kmath-tensorflow/src/main/kotlin/space/kscience/kmath/tensorflow/DoubleTensorFlowAlgebra.kt +++ b/kmath-tensorflow/src/main/kotlin/space/kscience/kmath/tensorflow/DoubleTensorFlowAlgebra.kt @@ -11,6 +11,7 @@ import space.kscience.kmath.nd.DefaultStrides import space.kscience.kmath.nd.Shape import space.kscience.kmath.nd.StructureND import space.kscience.kmath.operations.DoubleField +import space.kscience.kmath.operations.PowerOperations public class DoubleTensorFlowOutput( graph: Graph, @@ -23,7 +24,7 @@ public class DoubleTensorFlowOutput( public class DoubleTensorFlowAlgebra internal constructor( graph: Graph, -) : TensorFlowAlgebra(graph) { +) : TensorFlowAlgebra(graph), PowerOperations> { override val elementAlgebra: DoubleField get() = DoubleField @@ -57,9 +58,22 @@ public class DoubleTensorFlowAlgebra internal constructor( override fun const(value: Double): Constant = ops.constant(value) + override fun divide( + left: StructureND, + right: StructureND, + ): TensorFlowOutput = left.operate(right) { l, r -> + ops.math.div(l, r) + } + override fun power(arg: StructureND, pow: Number): TensorFlowOutput = + arg.operate { ops.math.pow(it, const(pow.toDouble())) } } +/** + * Compute a tensor with TensorFlow in a single run. + * + * The resulting tensor is available outside of scope + */ public fun DoubleField.produceWithTF( block: DoubleTensorFlowAlgebra.() -> StructureND, ): StructureND = Graph().use { graph -> @@ -67,6 +81,11 @@ public fun DoubleField.produceWithTF( scope.export(scope.block()) } +/** + * Compute several outputs with TensorFlow in a single run. + * + * The resulting tensors are available outside of scope + */ public fun DoubleField.produceMapWithTF( block: DoubleTensorFlowAlgebra.() -> Map>, ): Map> = Graph().use { graph -> diff --git a/kmath-tensorflow/src/main/kotlin/space/kscience/kmath/tensorflow/TensorFlowAlgebra.kt b/kmath-tensorflow/src/main/kotlin/space/kscience/kmath/tensorflow/TensorFlowAlgebra.kt index 7ad91c267..e2541a73e 100644 --- a/kmath-tensorflow/src/main/kotlin/space/kscience/kmath/tensorflow/TensorFlowAlgebra.kt +++ b/kmath-tensorflow/src/main/kotlin/space/kscience/kmath/tensorflow/TensorFlowAlgebra.kt @@ -12,6 +12,7 @@ import org.tensorflow.op.core.Max import org.tensorflow.op.core.Min import org.tensorflow.op.core.Sum import org.tensorflow.types.TInt32 +import org.tensorflow.types.family.TNumber import org.tensorflow.types.family.TType import space.kscience.kmath.misc.PerformancePitfall import space.kscience.kmath.misc.UnstableKMathAPI @@ -29,6 +30,9 @@ internal val NdArray.scalar: T get() = getObject() public sealed interface TensorFlowTensor : Tensor +/** + * Static (eager) in-memory TensorFlow tensor + */ @JvmInline public value class TensorFlowArray(public val tensor: NdArray) : Tensor { override val shape: Shape get() = tensor.shape().asArray().toIntArray() @@ -42,6 +46,11 @@ public value class TensorFlowArray(public val tensor: NdArray) : Tensor } } +/** + * Lazy graph-based TensorFlow tensor. The tensor is actualized on call. + * + * If the tensor is used for intermediate operations, actualizing it could impact performance. + */ public abstract class TensorFlowOutput( protected val graph: Graph, output: Output, @@ -72,11 +81,11 @@ public abstract class TensorFlowOutput( } -public abstract class TensorFlowAlgebra> internal constructor( +public abstract class TensorFlowAlgebra> internal constructor( protected val graph: Graph, ) : TensorAlgebra { - protected val ops: Ops by lazy { Ops.create(graph) } + public val ops: Ops by lazy { Ops.create(graph) } protected abstract fun StructureND.asTensorFlow(): TensorFlowOutput @@ -87,7 +96,10 @@ public abstract class TensorFlowAlgebra> internal con override fun StructureND.valueOrNull(): T? = if (shape contentEquals intArrayOf(1)) get(Shape(0)) else null - private inline fun StructureND.biOp( + /** + * Perform binary lazy operation on tensor. Both arguments are implicitly converted + */ + public fun StructureND.operate( other: StructureND, operation: (left: Operand, right: Operand) -> Operand, ): TensorFlowOutput { @@ -96,7 +108,7 @@ public abstract class TensorFlowAlgebra> internal con return operation(left, right).asOutput().wrap() } - private inline fun T.biOp( + public fun T.operate( other: StructureND, operation: (left: Operand, right: Operand) -> Operand, ): TensorFlowOutput { @@ -105,7 +117,7 @@ public abstract class TensorFlowAlgebra> internal con return operation(left, right).asOutput().wrap() } - private inline fun StructureND.biOp( + public fun StructureND.operate( value: T, operation: (left: Operand, right: Operand) -> Operand, ): TensorFlowOutput { @@ -114,7 +126,7 @@ public abstract class TensorFlowAlgebra> internal con return operation(left, right).asOutput().wrap() } - private inline fun Tensor.inPlaceOp( + public fun Tensor.operateInPlace( other: StructureND, operation: (left: Operand, right: Operand) -> Operand, ): Unit { @@ -124,7 +136,7 @@ public abstract class TensorFlowAlgebra> internal con origin.output = operation(left, right).asOutput() } - private inline fun Tensor.inPlaceOp( + public fun Tensor.operateInPlace( value: T, operation: (left: Operand, right: Operand) -> Operand, ): Unit { @@ -134,61 +146,61 @@ public abstract class TensorFlowAlgebra> internal con origin.output = operation(left, right).asOutput() } - private inline fun StructureND.unOp(operation: (Operand) -> Operand): TensorFlowOutput = + public fun StructureND.operate(operation: (Operand) -> Operand): TensorFlowOutput = operation(asTensorFlow().output).asOutput().wrap() - override fun T.plus(arg: StructureND): TensorFlowOutput = biOp(arg, ops.math::add) + override fun T.plus(arg: StructureND): TensorFlowOutput = operate(arg, ops.math::add) - override fun StructureND.plus(arg: T): TensorFlowOutput = biOp(arg, ops.math::add) + override fun StructureND.plus(arg: T): TensorFlowOutput = operate(arg, ops.math::add) - override fun StructureND.plus(arg: StructureND): TensorFlowOutput = biOp(arg, ops.math::add) + override fun StructureND.plus(arg: StructureND): TensorFlowOutput = operate(arg, ops.math::add) - override fun Tensor.plusAssign(value: T): Unit = inPlaceOp(value, ops.math::add) + override fun Tensor.plusAssign(value: T): Unit = operateInPlace(value, ops.math::add) - override fun Tensor.plusAssign(arg: StructureND): Unit = inPlaceOp(arg, ops.math::add) + override fun Tensor.plusAssign(arg: StructureND): Unit = operateInPlace(arg, ops.math::add) - override fun StructureND.minus(arg: T): TensorFlowOutput = biOp(arg, ops.math::sub) + override fun StructureND.minus(arg: T): TensorFlowOutput = operate(arg, ops.math::sub) - override fun StructureND.minus(arg: StructureND): TensorFlowOutput = biOp(arg, ops.math::sub) + override fun StructureND.minus(arg: StructureND): TensorFlowOutput = operate(arg, ops.math::sub) - override fun T.minus(arg: StructureND): Tensor = biOp(arg, ops.math::sub) + override fun T.minus(arg: StructureND): Tensor = operate(arg, ops.math::sub) - override fun Tensor.minusAssign(value: T): Unit = inPlaceOp(value, ops.math::sub) + override fun Tensor.minusAssign(value: T): Unit = operateInPlace(value, ops.math::sub) - override fun Tensor.minusAssign(arg: StructureND): Unit = inPlaceOp(arg, ops.math::sub) + override fun Tensor.minusAssign(arg: StructureND): Unit = operateInPlace(arg, ops.math::sub) - override fun T.times(arg: StructureND): TensorFlowOutput = biOp(arg, ops.math::mul) + override fun T.times(arg: StructureND): TensorFlowOutput = operate(arg, ops.math::mul) - override fun StructureND.times(arg: T): TensorFlowOutput = biOp(arg, ops.math::mul) + override fun StructureND.times(arg: T): TensorFlowOutput = operate(arg, ops.math::mul) - override fun StructureND.times(arg: StructureND): TensorFlowOutput = biOp(arg, ops.math::mul) + override fun StructureND.times(arg: StructureND): TensorFlowOutput = operate(arg, ops.math::mul) - override fun Tensor.timesAssign(value: T): Unit = inPlaceOp(value, ops.math::mul) + override fun Tensor.timesAssign(value: T): Unit = operateInPlace(value, ops.math::mul) - override fun Tensor.timesAssign(arg: StructureND): Unit = inPlaceOp(arg, ops.math::mul) + override fun Tensor.timesAssign(arg: StructureND): Unit = operateInPlace(arg, ops.math::mul) - override fun StructureND.unaryMinus(): TensorFlowOutput = unOp(ops.math::neg) + override fun StructureND.unaryMinus(): TensorFlowOutput = operate(ops.math::neg) - override fun Tensor.get(i: Int): Tensor = unOp { + override fun Tensor.get(i: Int): Tensor = operate { TODO("Not yet implemented") } - override fun Tensor.transpose(i: Int, j: Int): Tensor = unOp { + override fun Tensor.transpose(i: Int, j: Int): Tensor = operate { ops.linalg.transpose(it, ops.constant(intArrayOf(i, j))) } - override fun Tensor.view(shape: IntArray): Tensor = unOp { + override fun Tensor.view(shape: IntArray): Tensor = operate { ops.reshape(it, ops.constant(shape)) } - override fun Tensor.viewAs(other: StructureND): Tensor = biOp(other) { l, r -> + override fun Tensor.viewAs(other: StructureND): Tensor = operate(other) { l, r -> ops.reshape(l, ops.shape(r)) } - override fun StructureND.dot(other: StructureND): TensorFlowOutput = biOp(other) { l, r -> + override fun StructureND.dot(other: StructureND): TensorFlowOutput = operate(other) { l, r -> ops.linalg.matMul( - if (l.asTensor().shape().numDimensions() == 1) ops.expandDims(l,ops.constant(0)) else l, - if (r.asTensor().shape().numDimensions() == 1) ops.expandDims(r,ops.constant(-1)) else r) + if (l.asTensor().shape().numDimensions() == 1) ops.expandDims(l, ops.constant(0)) else l, + if (r.asTensor().shape().numDimensions() == 1) ops.expandDims(r, ops.constant(-1)) else r) } override fun diagonalEmbedding( @@ -196,31 +208,31 @@ public abstract class TensorFlowAlgebra> internal con offset: Int, dim1: Int, dim2: Int, - ): TensorFlowOutput = diagonalEntries.unOp { - TODO() + ): TensorFlowOutput = diagonalEntries.operate { + TODO("Not yet implemented") } - override fun StructureND.sum(): T = unOp { + override fun StructureND.sum(): T = operate { ops.sum(it, ops.constant(intArrayOf())) }.value() - override fun StructureND.sum(dim: Int, keepDim: Boolean): TensorFlowOutput = unOp { + override fun StructureND.sum(dim: Int, keepDim: Boolean): TensorFlowOutput = operate { ops.sum(it, ops.constant(dim), Sum.keepDims(keepDim)) } - override fun StructureND.min(): T = unOp { + override fun StructureND.min(): T = operate { ops.min(it, ops.constant(intArrayOf())) }.value() - override fun StructureND.min(dim: Int, keepDim: Boolean): Tensor = unOp { + override fun StructureND.min(dim: Int, keepDim: Boolean): Tensor = operate { ops.min(it, ops.constant(dim), Min.keepDims(keepDim)) } - override fun StructureND.max(): T = unOp { + override fun StructureND.max(): T = operate { ops.max(it, ops.constant(intArrayOf())) }.value() - override fun StructureND.max(dim: Int, keepDim: Boolean): Tensor = unOp { + override fun StructureND.max(dim: Int, keepDim: Boolean): Tensor = operate { ops.max(it, ops.constant(dim), Max.keepDims(keepDim)) } diff --git a/kmath-tensorflow/src/main/kotlin/space/kscience/kmath/tensorflow/tfOperations.kt b/kmath-tensorflow/src/main/kotlin/space/kscience/kmath/tensorflow/tfOperations.kt new file mode 100644 index 000000000..257d4d6ea --- /dev/null +++ b/kmath-tensorflow/src/main/kotlin/space/kscience/kmath/tensorflow/tfOperations.kt @@ -0,0 +1,23 @@ +/* + * Copyright 2018-2021 KMath contributors. + * Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file. + */ + +package space.kscience.kmath.tensorflow + +import org.tensorflow.types.family.TNumber +import space.kscience.kmath.nd.StructureND +import space.kscience.kmath.operations.Ring +import space.kscience.kmath.operations.TrigonometricOperations + +// + +// TODO add other operations + +public fun TensorFlowAlgebra.sin( + arg: StructureND, +): TensorFlowOutput where A : TrigonometricOperations, A : Ring = arg.operate { ops.math.sin(it) } + +public fun TensorFlowAlgebra.cos( + arg: StructureND, +): TensorFlowOutput where A : TrigonometricOperations, A : Ring = arg.operate { ops.math.cos(it) } diff --git a/kmath-tensorflow/src/test/kotlin/space/kscience/kmath/tensorflow/DoubleTensorFlowOps.kt b/kmath-tensorflow/src/test/kotlin/space/kscience/kmath/tensorflow/DoubleTensorFlowOps.kt index b7a4b94b4..805ad7c66 100644 --- a/kmath-tensorflow/src/test/kotlin/space/kscience/kmath/tensorflow/DoubleTensorFlowOps.kt +++ b/kmath-tensorflow/src/test/kotlin/space/kscience/kmath/tensorflow/DoubleTensorFlowOps.kt @@ -1,9 +1,10 @@ package space.kscience.kmath.tensorflow import org.junit.jupiter.api.Test -import space.kscience.kmath.nd.StructureND +import space.kscience.kmath.nd.get import space.kscience.kmath.nd.structureND import space.kscience.kmath.operations.DoubleField +import kotlin.test.assertEquals class DoubleTensorFlowOps { @Test @@ -13,7 +14,20 @@ class DoubleTensorFlowOps { initial + (initial * 2.0) } - println(StructureND.toString(res)) + //println(StructureND.toString(res)) + assertEquals(3.0, res[0, 0]) } + @Test + fun extensionOps(){ + val res = DoubleField.produceWithTF { + val i = structureND(2, 2) { 0.5 } + + sin(i).pow(2) + cos(i).pow(2) + } + + assertEquals(1.0, res[0,0],0.01) + } + + } \ No newline at end of file