Compare commits
1 Commits
dev
...
commandert
Author | SHA1 | Date | |
---|---|---|---|
|
b93291adab |
3
.github/CODEOWNERS
vendored
@ -1,3 +0,0 @@
|
||||
@altavir
|
||||
|
||||
/kmath-trajectory @ESchouten
|
28
.github/workflows/build.yml
vendored
@ -7,18 +7,26 @@ on:
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: windows-latest
|
||||
timeout-minutes: 20
|
||||
strategy:
|
||||
matrix:
|
||||
os: [ macOS-latest, windows-latest ]
|
||||
runs-on: ${{matrix.os}}
|
||||
timeout-minutes: 40
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/setup-java@v3.5.1
|
||||
- uses: actions/checkout@v3.0.0
|
||||
- uses: actions/setup-java@v3.0.0
|
||||
with:
|
||||
java-version: '11'
|
||||
distribution: 'liberica'
|
||||
cache: 'gradle'
|
||||
java-version: 11
|
||||
distribution: liberica
|
||||
- name: Cache konan
|
||||
uses: actions/cache@v3.0.1
|
||||
with:
|
||||
path: ~/.konan
|
||||
key: ${{ runner.os }}-gradle-${{ hashFiles('*.gradle.kts') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-gradle-
|
||||
- name: Gradle Wrapper Validation
|
||||
uses: gradle/wrapper-validation-action@v1.0.4
|
||||
- name: Gradle Build
|
||||
uses: gradle/gradle-build-action@v2.4.2
|
||||
- uses: gradle/gradle-build-action@v2.1.5
|
||||
with:
|
||||
arguments: test jvmTest
|
||||
arguments: build
|
||||
|
9
.github/workflows/pages.yml
vendored
@ -1,9 +1,8 @@
|
||||
name: Dokka publication
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
release:
|
||||
types: [ created ]
|
||||
push:
|
||||
branches: [ master ]
|
||||
|
||||
jobs:
|
||||
build:
|
||||
@ -22,10 +21,10 @@ jobs:
|
||||
key: ${{ runner.os }}-gradle-${{ hashFiles('*.gradle.kts') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-gradle-
|
||||
- uses: gradle/gradle-build-action@v2.4.2
|
||||
- uses: gradle/gradle-build-action@v2.1.5
|
||||
with:
|
||||
arguments: dokkaHtmlMultiModule --no-parallel
|
||||
- uses: JamesIves/github-pages-deploy-action@v4.3.0
|
||||
- uses: JamesIves/github-pages-deploy-action@4.2.5
|
||||
with:
|
||||
branch: gh-pages
|
||||
folder: build/dokka/htmlMultiModule
|
||||
|
21
.github/workflows/publish.yml
vendored
@ -15,7 +15,7 @@ jobs:
|
||||
runs-on: ${{matrix.os}}
|
||||
steps:
|
||||
- uses: actions/checkout@v3.0.0
|
||||
- uses: actions/setup-java@v3.10.0
|
||||
- uses: actions/setup-java@v3.0.0
|
||||
with:
|
||||
java-version: 11
|
||||
distribution: liberica
|
||||
@ -26,25 +26,24 @@ jobs:
|
||||
key: ${{ runner.os }}-gradle-${{ hashFiles('*.gradle.kts') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-gradle-
|
||||
- uses: gradle/wrapper-validation-action@v1.0.4
|
||||
- name: Publish Windows Artifacts
|
||||
if: matrix.os == 'windows-latest'
|
||||
uses: gradle/gradle-build-action@v2.4.2
|
||||
uses: gradle/gradle-build-action@v2.1.5
|
||||
with:
|
||||
arguments: |
|
||||
publishAllPublicationsToSpaceRepository
|
||||
-Ppublishing.targets=all
|
||||
releaseAll
|
||||
-Ppublishing.enabled=true
|
||||
-Ppublishing.space.user=${{ secrets.SPACE_APP_ID }}
|
||||
-Ppublishing.space.token=${{ secrets.SPACE_APP_SECRET }}
|
||||
- name: Publish Mac Artifacts
|
||||
if: matrix.os == 'macOS-latest'
|
||||
uses: gradle/gradle-build-action@v2.4.2
|
||||
uses: gradle/gradle-build-action@v2.1.5
|
||||
with:
|
||||
arguments: |
|
||||
publishMacosX64PublicationToSpaceRepository
|
||||
publishMacosArm64PublicationToSpaceRepository
|
||||
publishIosX64PublicationToSpaceRepository
|
||||
publishIosArm64PublicationToSpaceRepository
|
||||
publishIosSimulatorArm64PublicationToSpaceRepository
|
||||
-Ppublishing.targets=all
|
||||
releaseMacosX64
|
||||
releaseIosArm64
|
||||
releaseIosX64
|
||||
-Ppublishing.enabled=true
|
||||
-Ppublishing.space.user=${{ secrets.SPACE_APP_ID }}
|
||||
-Ppublishing.space.token=${{ secrets.SPACE_APP_SECRET }}
|
||||
|
8
.gitignore
vendored
@ -3,11 +3,10 @@ build/
|
||||
out/
|
||||
|
||||
.idea/
|
||||
.vscode/
|
||||
.fleet/
|
||||
.kotlin/
|
||||
|
||||
|
||||
.vscode/
|
||||
|
||||
# Avoid ignoring Gradle wrapper jar file (.jar files are usually ignored)
|
||||
!gradle-wrapper.jar
|
||||
|
||||
@ -20,5 +19,4 @@ out/
|
||||
|
||||
!/.idea/copyright/
|
||||
!/.idea/scopes/
|
||||
/gradle/yarn.lock
|
||||
|
||||
/kotlin-js-store/yarn.lock
|
||||
|
@ -1,7 +1,6 @@
|
||||
<component name="CopyrightManager">
|
||||
<copyright>
|
||||
<option name="allowReplaceRegexp" value="Copyright \d{4}-\d{4} KMath" />
|
||||
<option name="notice" value="Copyright 2018-&#36;today.year KMath contributors. Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file." />
|
||||
<option name="myName" value="kmath" />
|
||||
</copyright>
|
||||
</component>
|
||||
<copyright>
|
||||
<option name="notice" value="Copyright 2018-2021 KMath contributors. Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file." />
|
||||
<option name="myName" value="kmath" />
|
||||
</copyright>
|
||||
</component>
|
||||
|
@ -1,5 +1,5 @@
|
||||
<component name="CopyrightManager">
|
||||
<settings>
|
||||
<settings default="kmath">
|
||||
<module2copyright>
|
||||
<element module="Apply copyright" copyright="kmath" />
|
||||
</module2copyright>
|
||||
|
47
.space.kts
@ -1,48 +1,3 @@
|
||||
import kotlin.io.path.readText
|
||||
|
||||
val projectName = "kmath"
|
||||
|
||||
job("Build") {
|
||||
//Perform only jvm tests
|
||||
gradlew("spc.registry.jetbrains.space/p/sci/containers/kotlin-ci:1.0.3", "test", "jvmTest")
|
||||
gradlew("openjdk:11", "build")
|
||||
}
|
||||
|
||||
job("Publish") {
|
||||
startOn {
|
||||
gitPush { enabled = false }
|
||||
}
|
||||
container("spc.registry.jetbrains.space/p/sci/containers/kotlin-ci:1.0.3") {
|
||||
env["SPACE_USER"] = "{{ project:space_user }}"
|
||||
env["SPACE_TOKEN"] = "{{ project:space_token }}"
|
||||
kotlinScript { api ->
|
||||
|
||||
val spaceUser = System.getenv("SPACE_USER")
|
||||
val spaceToken = System.getenv("SPACE_TOKEN")
|
||||
|
||||
// write the version to the build directory
|
||||
api.gradlew("version")
|
||||
|
||||
//read the version from build file
|
||||
val version = java.nio.file.Path.of("build/project-version.txt").readText()
|
||||
|
||||
val revisionSuffix = if (version.endsWith("SNAPSHOT")) {
|
||||
"-" + api.gitRevision().take(7)
|
||||
} else {
|
||||
""
|
||||
}
|
||||
|
||||
api.space().projects.automation.deployments.start(
|
||||
project = api.projectIdentifier(),
|
||||
targetIdentifier = TargetIdentifier.Key(projectName),
|
||||
version = version + revisionSuffix,
|
||||
// automatically update deployment status based on the status of a job
|
||||
syncWithAutomationJob = true
|
||||
)
|
||||
api.gradlew(
|
||||
"publishAllPublicationsToSpaceRepository",
|
||||
"-Ppublishing.space.user=\"$spaceUser\"",
|
||||
"-Ppublishing.space.token=\"$spaceToken\"",
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
128
CHANGELOG.md
@ -1,95 +1,7 @@
|
||||
# KMath
|
||||
|
||||
## Unreleased
|
||||
|
||||
## [Unreleased]
|
||||
### Added
|
||||
|
||||
### Changed
|
||||
|
||||
### Deprecated
|
||||
|
||||
### Removed
|
||||
|
||||
### Fixed
|
||||
|
||||
### Security
|
||||
|
||||
## 0.4.0-dev-3 - 2024-02-18
|
||||
|
||||
### Added
|
||||
|
||||
- Reification. Explicit `SafeType` for algebras.
|
||||
- Integer division algebras.
|
||||
- Float32 geometries.
|
||||
- New Attributes-kt module that could be used as stand-alone. It declares. type-safe attributes containers.
|
||||
- Explicit `mutableStructureND` builders for mutable structures.
|
||||
- `Buffer.asList()` zero-copy transformation.
|
||||
- Wasm support.
|
||||
- Parallel implementation of `LinearSpace` for Float64
|
||||
- Parallel buffer factories
|
||||
|
||||
### Changed
|
||||
|
||||
- Buffer copy removed from API (added as an extension).
|
||||
- Default naming for algebra and buffers now uses IntXX/FloatXX notation instead of Java types.
|
||||
- Remove unnecessary inlines in basic algebras.
|
||||
- QuaternionField -> QuaternionAlgebra and does not implement `Field` anymore since it is non-commutative
|
||||
- kmath-geometry is split into `euclidean2d` and `euclidean3d`
|
||||
- Features replaced with Attributes.
|
||||
- Transposed refactored.
|
||||
- Kmath-memory is moved on top of core.
|
||||
|
||||
### Deprecated
|
||||
|
||||
- ND4J engine
|
||||
|
||||
### Removed
|
||||
|
||||
- `asPolynomial` function due to scope pollution
|
||||
- Codegend for ejml (450 lines of codegen for 1000 lines of code is too much)
|
||||
|
||||
### Fixed
|
||||
|
||||
- Median statistics
|
||||
- Complex power of negative real numbers
|
||||
- Add proper mutability for MutableBufferND rows and columns
|
||||
- Generic Float32 and Float64 vectors are used in geometry algebras.
|
||||
|
||||
## 0.3.1 - 2023-04-09
|
||||
|
||||
### Added
|
||||
|
||||
- Wasm support for `memory`, `core`, `complex` and `functions` modules.
|
||||
- Generic builders for `BufferND` and `MutableBufferND`
|
||||
- `NamedMatrix` - matrix with symbol-based indexing
|
||||
- `Expression` with default arguments
|
||||
- Type-aliases for numbers like `Float64`
|
||||
- Autodiff for generic algebra elements in core!
|
||||
- Algebra now has an obligatory `bufferFactory` (#477).
|
||||
|
||||
### Changed
|
||||
|
||||
- Removed marker `Vector` type for geometry
|
||||
- Geometry uses type-safe angles
|
||||
- Tensor operations switched to prefix notation
|
||||
- Row-wise and column-wise ND shapes in the core
|
||||
- Shape is read-only
|
||||
- Major refactor of tensors (only minor API changes)
|
||||
- Kotlin 1.8.20
|
||||
- `LazyStructure` `deffered` -> `async` to comply with coroutines code style
|
||||
- Default `dot` operation in tensor algebra no longer support broadcasting. Instead `matmul` operation is added
|
||||
to `DoubleTensorAlgebra`.
|
||||
- Multik went MPP
|
||||
|
||||
### Removed
|
||||
|
||||
- Trajectory moved to https://github.com/SciProgCentre/maps-kt
|
||||
- Polynomials moved to https://github.com/SciProgCentre/kmath-polynomial
|
||||
|
||||
## 0.3.0
|
||||
|
||||
### Added
|
||||
|
||||
- `ScaleOperations` interface
|
||||
- `Field` extends `ScaleOperations`
|
||||
- Basic integration API
|
||||
@ -107,15 +19,8 @@
|
||||
- Complex power
|
||||
- Separate methods for UInt, Int and Number powers. NaN safety.
|
||||
- Tensorflow prototype
|
||||
- `ValueAndErrorField`
|
||||
- MST compilation to WASM: #286
|
||||
- Jafama integration: #176
|
||||
- `contentEquals` with tolerance: #364
|
||||
- Compilation to TeX for MST: #254
|
||||
|
||||
### Changed
|
||||
|
||||
- Annotations moved to `space.kscience.kmath`
|
||||
- Exponential operations merged with hyperbolic functions
|
||||
- Space is replaced by Group. Space is reserved for vector spaces.
|
||||
- VectorSpace is now a vector space
|
||||
@ -143,16 +48,11 @@
|
||||
- Operations -> Ops
|
||||
- Default Buffer and ND algebras are now Ops and lack neutral elements (0, 1) as well as algebra-level shapes.
|
||||
- Tensor algebra takes read-only structures as input and inherits AlgebraND
|
||||
- `UnivariateDistribution` renamed to `Distribution1D`
|
||||
- Rework of histograms.
|
||||
- `UnivariateFunction` -> `Function1D`, `MultivariateFunction` -> `FunctionND`
|
||||
|
||||
### Deprecated
|
||||
|
||||
- Specialized `DoubleBufferAlgebra`
|
||||
|
||||
### Removed
|
||||
|
||||
- Nearest in Domain. To be implemented in geometry package.
|
||||
- Number multiplication and division in main Algebra chain
|
||||
- `contentEquals` from Buffer. It moved to the companion.
|
||||
@ -163,14 +63,13 @@
|
||||
- Algebra elements are completely removed. Use algebra contexts instead.
|
||||
|
||||
### Fixed
|
||||
|
||||
- Ring inherits RingOperations, not GroupOperations
|
||||
- Univariate histogram filling
|
||||
|
||||
## 0.2.0
|
||||
### Security
|
||||
|
||||
## [0.2.0]
|
||||
### Added
|
||||
|
||||
- `fun` annotation for SAM interfaces in library
|
||||
- Explicit `public` visibility for all public APIs
|
||||
- Better trigonometric and hyperbolic functions for `AutoDiffField` (https://github.com/mipt-npm/kmath/pull/140)
|
||||
@ -190,7 +89,6 @@
|
||||
- Basic Quaternion vector support in `kmath-complex`.
|
||||
|
||||
### Changed
|
||||
|
||||
- Package changed from `scientifik` to `space.kscience`
|
||||
- Gradle version: 6.6 -> 6.8.2
|
||||
- Minor exceptions refactor (throwing `IllegalArgumentException` by argument checks instead of `IllegalStateException`)
|
||||
@ -214,8 +112,9 @@
|
||||
- `symbol` method in `Algebra` renamed to `bindSymbol` to avoid ambiguity
|
||||
- Add `out` projection to `Buffer` generic
|
||||
|
||||
### Removed
|
||||
### Deprecated
|
||||
|
||||
### Removed
|
||||
- `kmath-koma` module because it doesn't support Kotlin 1.4.
|
||||
- Support of `legacy` JS backend (we will support only IR)
|
||||
- `toGrid` method.
|
||||
@ -224,24 +123,22 @@
|
||||
- StructureND identity and equals
|
||||
|
||||
### Fixed
|
||||
|
||||
- `symbol` method in `MstExtendedField` (https://github.com/mipt-npm/kmath/pull/140)
|
||||
|
||||
## 0.1.4
|
||||
### Security
|
||||
|
||||
## [0.1.4]
|
||||
|
||||
### Added
|
||||
|
||||
- Functional Expressions API
|
||||
- Mathematical Syntax Tree, its interpreter and API
|
||||
- String to MST parser (https://github.com/mipt-npm/kmath/pull/120)
|
||||
- MST to JVM bytecode translator (https://github.com/mipt-npm/kmath/pull/94)
|
||||
- FloatBuffer (specialized MutableBuffer over FloatArray)
|
||||
- FlaggedBuffer to associate primitive numbers buffer with flags (to mark values infinite or missing, etc.)
|
||||
- Specialized builder functions for all primitive buffers
|
||||
like `IntBuffer(25) { it + 1 }` (https://github.com/mipt-npm/kmath/pull/125)
|
||||
- Specialized builder functions for all primitive buffers like `IntBuffer(25) { it + 1 }` (https://github.com/mipt-npm/kmath/pull/125)
|
||||
- Interface `NumericAlgebra` where `number` operation is available to convert numbers to algebraic elements
|
||||
- Inverse trigonometric functions support in
|
||||
ExtendedField (`asin`, `acos`, `atan`) (https://github.com/mipt-npm/kmath/pull/114)
|
||||
- Inverse trigonometric functions support in ExtendedField (`asin`, `acos`, `atan`) (https://github.com/mipt-npm/kmath/pull/114)
|
||||
- New space extensions: `average` and `averageWith`
|
||||
- Local coding conventions
|
||||
- Geometric Domains API in `kmath-core`
|
||||
@ -250,12 +147,10 @@
|
||||
- Norm support for `Complex`
|
||||
|
||||
### Changed
|
||||
|
||||
- `readAsMemory` now has `throws IOException` in JVM signature.
|
||||
- Several functions taking functional types were made `inline`.
|
||||
- Several functions taking functional types now have `callsInPlace` contracts.
|
||||
- BigInteger and BigDecimal algebra: JBigDecimalField has companion object with default math context; minor
|
||||
optimizations
|
||||
- BigInteger and BigDecimal algebra: JBigDecimalField has companion object with default math context; minor optimizations
|
||||
- `power(T, Int)` extension function has preconditions and supports `Field<T>`
|
||||
- Memory objects have more preconditions (overflow checking)
|
||||
- `tg` function is renamed to `tan` (https://github.com/mipt-npm/kmath/pull/114)
|
||||
@ -263,7 +158,6 @@
|
||||
- Moved probability distributions to commons-rng and to `kmath-prob`
|
||||
|
||||
### Fixed
|
||||
|
||||
- Missing copy method in Memory implementation on JS (https://github.com/mipt-npm/kmath/pull/106)
|
||||
- D3.dim value in `kmath-dimensions`
|
||||
- Multiplication in integer rings in `kmath-core` (https://github.com/mipt-npm/kmath/pull/101)
|
||||
|
85
README.md
@ -1,6 +1,6 @@
|
||||
[![JetBrains Research](https://jb.gg/badges/research.svg)](https://confluence.jetbrains.com/display/ALL/JetBrains+on+GitHub)
|
||||
[![DOI](https://zenodo.org/badge/129486382.svg)](https://zenodo.org/badge/latestdoi/129486382)
|
||||
![Gradle build](https://github.com/SciProgCentre/kmath/workflows/Gradle%20build/badge.svg)
|
||||
![Gradle build](https://github.com/mipt-npm/kmath/workflows/Gradle%20build/badge.svg)
|
||||
[![Maven Central](https://img.shields.io/maven-central/v/space.kscience/kmath-core.svg?label=Maven%20Central)](https://search.maven.org/search?q=g:%22space.kscience%22)
|
||||
[![Space](https://img.shields.io/badge/dynamic/xml?color=orange&label=Space&query=//metadata/versioning/latest&url=https%3A%2F%2Fmaven.pkg.jetbrains.space%2Fmipt-npm%2Fp%2Fsci%2Fmaven%2Fspace%2Fkscience%2Fkmath-core%2Fmaven-metadata.xml)](https://maven.pkg.jetbrains.space/mipt-npm/p/sci/maven/space/kscience/)
|
||||
|
||||
@ -11,22 +11,18 @@ analog to Python's NumPy library. Later we found that kotlin is much more flexib
|
||||
architecture designs. In contrast to `numpy` and `scipy` it is modular and has a lightweight core. The `numpy`-like
|
||||
experience could be achieved with [kmath-for-real](/kmath-for-real) extension module.
|
||||
|
||||
[Documentation site](https://SciProgCentre.github.io/kmath/)
|
||||
[Documentation site (**WIP**)](https://mipt-npm.github.io/kmath/)
|
||||
|
||||
## Publications and talks
|
||||
|
||||
* [A conceptual article about context-oriented design](https://proandroiddev.com/an-introduction-context-oriented-programming-in-kotlin-2e79d316b0a2)
|
||||
* [Another article about context-oriented design](https://proandroiddev.com/diving-deeper-into-context-oriented-programming-in-kotlin-3ecb4ec38814)
|
||||
* [ACAT 2019 conference paper](https://aip.scitation.org/doi/abs/10.1063/1.5130103)
|
||||
* [A talk at KotlinConf 2019 about using kotlin for science](https://youtu.be/LI_5TZ7tnOE?si=4LknX41gl_YeUbIe)
|
||||
* [A talk on architecture at Joker-2021 (in Russian)](https://youtu.be/1bZ2doHiRRM?si=9w953ro9yu98X_KJ)
|
||||
* [The same talk in English](https://youtu.be/yP5DIc2fVwQ?si=louZzQ1dcXV6gP10)
|
||||
* [A seminar on tensor API](https://youtu.be/0H99wUs0xTM?si=6c__04jrByFQtVpo)
|
||||
|
||||
# Goal
|
||||
|
||||
* Provide a flexible and powerful API to work with mathematics abstractions in Kotlin-multiplatform (JVM, JS, Native and
|
||||
Wasm).
|
||||
* Provide a flexible and powerful API to work with mathematics abstractions in Kotlin-multiplatform (JVM, JS and Native)
|
||||
.
|
||||
* Provide basic multiplatform implementations for those abstractions (without significant performance optimization).
|
||||
* Provide bindings and wrappers with those abstractions for popular optimized platform libraries.
|
||||
|
||||
@ -48,7 +44,7 @@ module definitions below. The module stability could have the following levels:
|
||||
* **PROTOTYPE**. On this level there are no compatibility guarantees. All methods and classes form those modules could
|
||||
break any moment. You can still use it, but be sure to fix the specific version.
|
||||
* **EXPERIMENTAL**. The general API is decided, but some changes could be made. Volatile API is marked
|
||||
with `@UnstableKMathAPI` or other stability warning annotations.
|
||||
with `@UnstableKmathAPI` or other stability warning annotations.
|
||||
* **DEVELOPMENT**. API breaking generally follows semantic versioning ideology. There could be changes in minor
|
||||
versions, but not in patch versions. API is protected
|
||||
with [binary-compatibility-validator](https://github.com/Kotlin/binary-compatibility-validator) tool.
|
||||
@ -57,20 +53,18 @@ module definitions below. The module stability could have the following levels:
|
||||
## Modules
|
||||
|
||||
|
||||
### [attributes-kt](attributes-kt)
|
||||
> An API and basic implementation for arranging objects in a continuous memory block.
|
||||
>
|
||||
> **Maturity**: DEVELOPMENT
|
||||
|
||||
### [benchmarks](benchmarks)
|
||||
>
|
||||
>
|
||||
> **Maturity**: EXPERIMENTAL
|
||||
|
||||
### [examples](examples)
|
||||
>
|
||||
>
|
||||
> **Maturity**: EXPERIMENTAL
|
||||
|
||||
### [kmath-ast](kmath-ast)
|
||||
>
|
||||
>
|
||||
> **Maturity**: EXPERIMENTAL
|
||||
>
|
||||
@ -82,7 +76,7 @@ module definitions below. The module stability could have the following levels:
|
||||
|
||||
|
||||
### [kmath-commons](kmath-commons)
|
||||
> Commons math binding for kmath
|
||||
>
|
||||
>
|
||||
> **Maturity**: EXPERIMENTAL
|
||||
|
||||
@ -92,8 +86,8 @@ module definitions below. The module stability could have the following levels:
|
||||
> **Maturity**: PROTOTYPE
|
||||
>
|
||||
> **Features:**
|
||||
> - [complex](kmath-complex/src/commonMain/kotlin/space/kscience/kmath/complex/Complex.kt) : Complex numbers operations
|
||||
> - [quaternion](kmath-complex/src/commonMain/kotlin/space/kscience/kmath/complex/Quaternion.kt) : Quaternions and their composition
|
||||
> - [complex](kmath-complex/src/commonMain/kotlin/space/kscience/kmath/complex/Complex.kt) : Complex Numbers
|
||||
> - [quaternion](kmath-complex/src/commonMain/kotlin/space/kscience/kmath/complex/Quaternion.kt) : Quaternions
|
||||
|
||||
|
||||
### [kmath-core](kmath-core)
|
||||
@ -111,19 +105,20 @@ objects to the expression by providing a context. Expressions can be used for a
|
||||
performance calculations to code generation.
|
||||
> - [domains](kmath-core/src/commonMain/kotlin/space/kscience/kmath/domains) : Domains
|
||||
> - [autodiff](kmath-core/src/commonMain/kotlin/space/kscience/kmath/expressions/SimpleAutoDiff.kt) : Automatic differentiation
|
||||
> - [Parallel linear algebra](kmath-core/#) : Parallel implementation for `LinearAlgebra`
|
||||
|
||||
|
||||
### [kmath-coroutines](kmath-coroutines)
|
||||
>
|
||||
>
|
||||
> **Maturity**: EXPERIMENTAL
|
||||
|
||||
### [kmath-dimensions](kmath-dimensions)
|
||||
> A proof of concept module for adding type-safe dimensions to structures
|
||||
>
|
||||
>
|
||||
> **Maturity**: PROTOTYPE
|
||||
|
||||
### [kmath-ejml](kmath-ejml)
|
||||
>
|
||||
>
|
||||
> **Maturity**: PROTOTYPE
|
||||
>
|
||||
@ -147,7 +142,7 @@ One can still use generic algebras though.
|
||||
|
||||
|
||||
### [kmath-functions](kmath-functions)
|
||||
> Functions, integration and interpolation
|
||||
>
|
||||
>
|
||||
> **Maturity**: EXPERIMENTAL
|
||||
>
|
||||
@ -160,28 +155,31 @@ One can still use generic algebras though.
|
||||
|
||||
|
||||
### [kmath-geometry](kmath-geometry)
|
||||
>
|
||||
>
|
||||
> **Maturity**: PROTOTYPE
|
||||
|
||||
### [kmath-histograms](kmath-histograms)
|
||||
>
|
||||
>
|
||||
> **Maturity**: PROTOTYPE
|
||||
|
||||
### [kmath-jafama](kmath-jafama)
|
||||
> Jafama integration module
|
||||
>
|
||||
>
|
||||
> **Maturity**: DEPRECATED
|
||||
> **Maturity**: PROTOTYPE
|
||||
>
|
||||
> **Features:**
|
||||
> - [jafama-double](kmath-jafama/src/main/kotlin/space/kscience/kmath/jafama/) : Double ExtendedField implementations based on Jafama
|
||||
|
||||
|
||||
### [kmath-jupyter](kmath-jupyter)
|
||||
>
|
||||
>
|
||||
> **Maturity**: PROTOTYPE
|
||||
|
||||
### [kmath-kotlingrad](kmath-kotlingrad)
|
||||
> Kotlin∇ integration module
|
||||
>
|
||||
>
|
||||
> **Maturity**: EXPERIMENTAL
|
||||
>
|
||||
@ -196,14 +194,14 @@ One can still use generic algebras though.
|
||||
> **Maturity**: DEVELOPMENT
|
||||
|
||||
### [kmath-multik](kmath-multik)
|
||||
> JetBrains Multik connector
|
||||
>
|
||||
>
|
||||
> **Maturity**: PROTOTYPE
|
||||
|
||||
### [kmath-nd4j](kmath-nd4j)
|
||||
> ND4J NDStructure implementation and according NDAlgebra classes
|
||||
>
|
||||
>
|
||||
> **Maturity**: DEPRECATED
|
||||
> **Maturity**: EXPERIMENTAL
|
||||
>
|
||||
> **Features:**
|
||||
> - [nd4jarraystructure](kmath-nd4j/#) : NDStructure wrapper for INDArray
|
||||
@ -212,24 +210,27 @@ One can still use generic algebras though.
|
||||
|
||||
|
||||
### [kmath-optimization](kmath-optimization)
|
||||
>
|
||||
>
|
||||
> **Maturity**: EXPERIMENTAL
|
||||
|
||||
### [kmath-stat](kmath-stat)
|
||||
>
|
||||
>
|
||||
> **Maturity**: EXPERIMENTAL
|
||||
|
||||
### [kmath-symja](kmath-symja)
|
||||
> Symja integration module
|
||||
>
|
||||
>
|
||||
> **Maturity**: PROTOTYPE
|
||||
|
||||
### [kmath-tensorflow](kmath-tensorflow)
|
||||
> Google tensorflow connector
|
||||
>
|
||||
>
|
||||
> **Maturity**: PROTOTYPE
|
||||
|
||||
### [kmath-tensors](kmath-tensors)
|
||||
>
|
||||
>
|
||||
> **Maturity**: PROTOTYPE
|
||||
>
|
||||
@ -240,13 +241,9 @@ One can still use generic algebras though.
|
||||
|
||||
|
||||
### [kmath-viktor](kmath-viktor)
|
||||
> Binding for https://github.com/JetBrains-Research/viktor
|
||||
>
|
||||
>
|
||||
> **Maturity**: DEPRECATED
|
||||
|
||||
### [test-utils](test-utils)
|
||||
>
|
||||
> **Maturity**: EXPERIMENTAL
|
||||
> **Maturity**: DEVELOPMENT
|
||||
|
||||
|
||||
## Multi-platform support
|
||||
@ -254,24 +251,23 @@ One can still use generic algebras though.
|
||||
KMath is developed as a multi-platform library, which means that most of the interfaces are declared in the
|
||||
[common source sets](/kmath-core/src/commonMain) and implemented there wherever it is possible. In some cases, features
|
||||
are delegated to platform-specific implementations even if they could be provided in the common module for performance
|
||||
reasons. Currently, Kotlin/JVM is the primary platform, however, Kotlin/Native and Kotlin/JS contributions and
|
||||
reasons. Currently, the Kotlin/JVM is the primary platform, however Kotlin/Native and Kotlin/JS contributions and
|
||||
feedback are also welcome.
|
||||
|
||||
## Performance
|
||||
|
||||
Calculation of performance is one of the major goals of KMath in the future, but in some cases it is impossible to
|
||||
achieve both
|
||||
Calculation performance is one of major goals of KMath in the future, but in some cases it is impossible to achieve both
|
||||
performance and flexibility.
|
||||
|
||||
We expect to focus on creating a convenient universal API first and then work on increasing performance for specific
|
||||
We expect to focus on creating convenient universal API first and then work on increasing performance for specific
|
||||
cases. We expect the worst KMath benchmarks will perform better than native Python, but worse than optimized
|
||||
native/SciPy (mostly due to boxing operations on primitive numbers). The best performance of optimized parts could be
|
||||
better than SciPy.
|
||||
|
||||
## Requirements
|
||||
|
||||
KMath currently relies on JDK 11 for compilation and execution of Kotlin-JVM part. We recommend using GraalVM-CE or
|
||||
Oracle GraalVM for execution to get better performance.
|
||||
KMath currently relies on JDK 11 for compilation and execution of Kotlin-JVM part. We recommend to use GraalVM-CE 11 for
|
||||
execution to get better performance.
|
||||
|
||||
### Repositories
|
||||
|
||||
@ -291,10 +287,11 @@ dependencies {
|
||||
}
|
||||
```
|
||||
|
||||
Gradle `6.0+` is required for multiplatform artifacts.
|
||||
|
||||
## Contributing
|
||||
|
||||
The project requires a lot of additional work. The most important thing we need is feedback about what features are
|
||||
The project requires a lot of additional work. The most important thing we need is a feedback about what features are
|
||||
required the most. Feel free to create feature requests. We are also welcome to code contributions, especially in issues
|
||||
marked
|
||||
with [good first issue](hhttps://github.com/SciProgCentre/kmath/issues?q=is%3Aissue+is%3Aopen+label%3A%22good+first+issue%22)
|
||||
label.
|
||||
marked with
|
||||
[waiting for a hero](https://github.com/mipt-npm/kmath/labels/waiting%20for%20a%20hero) label.
|
||||
|
@ -1,21 +0,0 @@
|
||||
# Module attributes-kt
|
||||
|
||||
|
||||
|
||||
## Usage
|
||||
|
||||
## Artifact:
|
||||
|
||||
The Maven coordinates of this project are `space.kscience:attributes-kt:0.1.0`.
|
||||
|
||||
**Gradle Kotlin DSL:**
|
||||
```kotlin
|
||||
repositories {
|
||||
maven("https://repo.kotlin.link")
|
||||
mavenCentral()
|
||||
}
|
||||
|
||||
dependencies {
|
||||
implementation("space.kscience:attributes-kt:0.1.0")
|
||||
}
|
||||
```
|
@ -1,104 +0,0 @@
|
||||
public abstract interface class space/kscience/attributes/Attribute {
|
||||
}
|
||||
|
||||
public abstract interface class space/kscience/attributes/AttributeContainer {
|
||||
public abstract fun getAttributes ()Lspace/kscience/attributes/Attributes;
|
||||
}
|
||||
|
||||
public abstract interface class space/kscience/attributes/AttributeScope {
|
||||
}
|
||||
|
||||
public abstract interface class space/kscience/attributes/AttributeWithDefault : space/kscience/attributes/Attribute {
|
||||
public abstract fun getDefault ()Ljava/lang/Object;
|
||||
}
|
||||
|
||||
public abstract interface class space/kscience/attributes/Attributes {
|
||||
public static final field Companion Lspace/kscience/attributes/Attributes$Companion;
|
||||
public abstract fun equals (Ljava/lang/Object;)Z
|
||||
public fun get (Lspace/kscience/attributes/Attribute;)Ljava/lang/Object;
|
||||
public abstract fun getContent ()Ljava/util/Map;
|
||||
public fun getKeys ()Ljava/util/Set;
|
||||
public abstract fun hashCode ()I
|
||||
public abstract fun toString ()Ljava/lang/String;
|
||||
}
|
||||
|
||||
public final class space/kscience/attributes/Attributes$Companion {
|
||||
public final fun equals (Lspace/kscience/attributes/Attributes;Lspace/kscience/attributes/Attributes;)Z
|
||||
public final fun getEMPTY ()Lspace/kscience/attributes/Attributes;
|
||||
}
|
||||
|
||||
public final class space/kscience/attributes/AttributesBuilder : space/kscience/attributes/Attributes {
|
||||
public final fun add (Lspace/kscience/attributes/SetAttribute;Ljava/lang/Object;)V
|
||||
public final fun build ()Lspace/kscience/attributes/Attributes;
|
||||
public fun equals (Ljava/lang/Object;)Z
|
||||
public fun getContent ()Ljava/util/Map;
|
||||
public fun hashCode ()I
|
||||
public final fun invoke (Lspace/kscience/attributes/Attribute;Ljava/lang/Object;)V
|
||||
public final fun put (Lspace/kscience/attributes/Attribute;Ljava/lang/Object;)V
|
||||
public final fun putAll (Lspace/kscience/attributes/Attributes;)V
|
||||
public final fun remove (Lspace/kscience/attributes/SetAttribute;Ljava/lang/Object;)V
|
||||
public final fun set (Lspace/kscience/attributes/Attribute;Ljava/lang/Object;)V
|
||||
public fun toString ()Ljava/lang/String;
|
||||
}
|
||||
|
||||
public final class space/kscience/attributes/AttributesBuilderKt {
|
||||
public static final fun Attributes (Lkotlin/jvm/functions/Function1;)Lspace/kscience/attributes/Attributes;
|
||||
}
|
||||
|
||||
public final class space/kscience/attributes/AttributesKt {
|
||||
public static final fun Attributes (Lspace/kscience/attributes/Attribute;)Lspace/kscience/attributes/Attributes;
|
||||
public static final fun Attributes (Lspace/kscience/attributes/Attribute;Ljava/lang/Object;)Lspace/kscience/attributes/Attributes;
|
||||
public static final fun getOrDefault (Lspace/kscience/attributes/Attributes;Lspace/kscience/attributes/AttributeWithDefault;)Ljava/lang/Object;
|
||||
public static final fun isEmpty (Lspace/kscience/attributes/Attributes;)Z
|
||||
public static final fun modified (Lspace/kscience/attributes/Attributes;Lkotlin/jvm/functions/Function1;)Lspace/kscience/attributes/Attributes;
|
||||
public static final fun plus (Lspace/kscience/attributes/Attributes;Lspace/kscience/attributes/Attributes;)Lspace/kscience/attributes/Attributes;
|
||||
public static final fun withAttribute (Lspace/kscience/attributes/Attributes;Lspace/kscience/attributes/Attribute;)Lspace/kscience/attributes/Attributes;
|
||||
public static final fun withAttribute (Lspace/kscience/attributes/Attributes;Lspace/kscience/attributes/Attribute;Ljava/lang/Object;)Lspace/kscience/attributes/Attributes;
|
||||
public static final fun withAttributeElement (Lspace/kscience/attributes/Attributes;Lspace/kscience/attributes/SetAttribute;Ljava/lang/Object;)Lspace/kscience/attributes/Attributes;
|
||||
public static final fun withoutAttribute (Lspace/kscience/attributes/Attributes;Lspace/kscience/attributes/Attribute;)Lspace/kscience/attributes/Attributes;
|
||||
public static final fun withoutAttributeElement (Lspace/kscience/attributes/Attributes;Lspace/kscience/attributes/SetAttribute;Ljava/lang/Object;)Lspace/kscience/attributes/Attributes;
|
||||
}
|
||||
|
||||
public abstract interface class space/kscience/attributes/FlagAttribute : space/kscience/attributes/Attribute {
|
||||
}
|
||||
|
||||
public abstract class space/kscience/attributes/PolymorphicAttribute : space/kscience/attributes/Attribute {
|
||||
public synthetic fun <init> (Lkotlin/reflect/KType;Lkotlin/jvm/internal/DefaultConstructorMarker;)V
|
||||
public fun equals (Ljava/lang/Object;)Z
|
||||
public final fun getType-V0oMfBY ()Lkotlin/reflect/KType;
|
||||
public fun hashCode ()I
|
||||
}
|
||||
|
||||
public final class space/kscience/attributes/PolymorphicAttributeKt {
|
||||
public static final fun get (Lspace/kscience/attributes/Attributes;Lkotlin/jvm/functions/Function0;)Ljava/lang/Object;
|
||||
public static final fun set (Lspace/kscience/attributes/AttributesBuilder;Lkotlin/jvm/functions/Function0;Ljava/lang/Object;)V
|
||||
}
|
||||
|
||||
public final class space/kscience/attributes/SafeType {
|
||||
public static final synthetic fun box-impl (Lkotlin/reflect/KType;)Lspace/kscience/attributes/SafeType;
|
||||
public static fun constructor-impl (Lkotlin/reflect/KType;)Lkotlin/reflect/KType;
|
||||
public fun equals (Ljava/lang/Object;)Z
|
||||
public static fun equals-impl (Lkotlin/reflect/KType;Ljava/lang/Object;)Z
|
||||
public static final fun equals-impl0 (Lkotlin/reflect/KType;Lkotlin/reflect/KType;)Z
|
||||
public final fun getKType ()Lkotlin/reflect/KType;
|
||||
public fun hashCode ()I
|
||||
public static fun hashCode-impl (Lkotlin/reflect/KType;)I
|
||||
public fun toString ()Ljava/lang/String;
|
||||
public static fun toString-impl (Lkotlin/reflect/KType;)Ljava/lang/String;
|
||||
public final synthetic fun unbox-impl ()Lkotlin/reflect/KType;
|
||||
}
|
||||
|
||||
public final class space/kscience/attributes/SafeTypeKt {
|
||||
public static final fun getKClass-X0YbwmU (Lkotlin/reflect/KType;)Lkotlin/reflect/KClass;
|
||||
}
|
||||
|
||||
public abstract interface class space/kscience/attributes/SetAttribute : space/kscience/attributes/Attribute {
|
||||
}
|
||||
|
||||
public abstract interface annotation class space/kscience/attributes/UnstableAttributesAPI : java/lang/annotation/Annotation {
|
||||
}
|
||||
|
||||
public abstract interface class space/kscience/attributes/WithType {
|
||||
public abstract fun getType-V0oMfBY ()Lkotlin/reflect/KType;
|
||||
}
|
||||
|
@ -1,20 +0,0 @@
|
||||
plugins {
|
||||
id("space.kscience.gradle.mpp")
|
||||
`maven-publish`
|
||||
}
|
||||
|
||||
version = rootProject.extra.get("attributesVersion").toString()
|
||||
|
||||
kscience {
|
||||
jvm()
|
||||
js()
|
||||
native()
|
||||
wasm()
|
||||
}
|
||||
|
||||
readme {
|
||||
maturity = space.kscience.gradle.Maturity.DEVELOPMENT
|
||||
description = """
|
||||
An API and basic implementation for arranging objects in a continuous memory block.
|
||||
""".trimIndent()
|
||||
}
|
@ -1,29 +0,0 @@
|
||||
/*
|
||||
* Copyright 2018-2023 KMath contributors.
|
||||
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
|
||||
*/
|
||||
|
||||
package space.kscience.attributes
|
||||
|
||||
/**
|
||||
* A marker interface for an attribute. Attributes are used as keys to access contents of type [T] in the container.
|
||||
*/
|
||||
public interface Attribute<T>
|
||||
|
||||
/**
|
||||
* An attribute that could be either present or absent
|
||||
*/
|
||||
public interface FlagAttribute : Attribute<Unit>
|
||||
|
||||
/**
|
||||
* An attribute with a default value
|
||||
*/
|
||||
public interface AttributeWithDefault<T> : Attribute<T> {
|
||||
public val default: T
|
||||
}
|
||||
|
||||
/**
|
||||
* Attribute containing a set of values
|
||||
*/
|
||||
public interface SetAttribute<V> : Attribute<Set<V>>
|
||||
|
@ -1,20 +0,0 @@
|
||||
/*
|
||||
* Copyright 2018-2023 KMath contributors.
|
||||
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
|
||||
*/
|
||||
|
||||
package space.kscience.attributes
|
||||
|
||||
/**
|
||||
* A container for [Attributes]
|
||||
*/
|
||||
public interface AttributeContainer {
|
||||
public val attributes: Attributes
|
||||
}
|
||||
|
||||
/**
|
||||
* A scope, where attribute keys could be resolved.
|
||||
* [O] is used only to resolve types in compile-time.
|
||||
*/
|
||||
public interface AttributeScope<O>
|
||||
|
@ -1,143 +0,0 @@
|
||||
/*
|
||||
* Copyright 2018-2023 KMath contributors.
|
||||
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
|
||||
*/
|
||||
|
||||
package space.kscience.attributes
|
||||
|
||||
/**
|
||||
* A set of attributes. The implementation must guarantee that [content] keys correspond to their value types.
|
||||
*/
|
||||
public interface Attributes {
|
||||
/**
|
||||
* Raw content for this [Attributes]
|
||||
*/
|
||||
public val content: Map<out Attribute<*>, Any?>
|
||||
|
||||
/**
|
||||
* Attribute keys contained in this [Attributes]
|
||||
*/
|
||||
public val keys: Set<Attribute<*>> get() = content.keys
|
||||
|
||||
/**
|
||||
* Provide an attribute value. Return null if attribute is not present or if its value is null.
|
||||
*/
|
||||
@Suppress("UNCHECKED_CAST")
|
||||
public operator fun <T> get(attribute: Attribute<T>): T? = content[attribute] as? T
|
||||
|
||||
override fun toString(): String
|
||||
override fun equals(other: Any?): Boolean
|
||||
override fun hashCode(): Int
|
||||
|
||||
public companion object {
|
||||
public val EMPTY: Attributes = object : Attributes {
|
||||
override val content: Map<out Attribute<*>, Any?> get() = emptyMap()
|
||||
|
||||
override fun toString(): String = "Attributes.EMPTY"
|
||||
|
||||
override fun equals(other: Any?): Boolean = (other as? Attributes)?.isEmpty() ?: false
|
||||
|
||||
override fun hashCode(): Int = Unit.hashCode()
|
||||
}
|
||||
|
||||
public fun equals(a1: Attributes, a2: Attributes): Boolean =
|
||||
a1.keys == a2.keys && a1.keys.all { a1[it] == a2[it] }
|
||||
}
|
||||
}
|
||||
|
||||
internal class MapAttributes(override val content: Map<out Attribute<*>, Any?>) : Attributes {
|
||||
override fun toString(): String = "Attributes(value=${content.entries})"
|
||||
override fun equals(other: Any?): Boolean = other is Attributes && Attributes.equals(this, other)
|
||||
override fun hashCode(): Int = content.hashCode()
|
||||
}
|
||||
|
||||
public fun Attributes.isEmpty(): Boolean = keys.isEmpty()
|
||||
|
||||
/**
|
||||
* Get attribute value or default
|
||||
*/
|
||||
public fun <T> Attributes.getOrDefault(attribute: AttributeWithDefault<T>): T = get(attribute) ?: attribute.default
|
||||
|
||||
/**
|
||||
* Check if there is an attribute that matches given key by type and adheres to [predicate].
|
||||
*/
|
||||
@Suppress("UNCHECKED_CAST")
|
||||
public inline fun <T, reified A : Attribute<T>> Attributes.hasAny(predicate: (value: T) -> Boolean): Boolean =
|
||||
content.any { (mapKey, mapValue) -> mapKey is A && predicate(mapValue as T) }
|
||||
|
||||
/**
|
||||
* Check if there is an attribute of given type (subtypes included)
|
||||
*/
|
||||
public inline fun <reified A : Attribute<*>> Attributes.hasAny(): Boolean =
|
||||
content.any { (mapKey, _) -> mapKey is A }
|
||||
|
||||
/**
|
||||
* Check if [Attributes] contains a flag. Multiple keys that are instances of a flag could be present
|
||||
*/
|
||||
public inline fun <reified A : FlagAttribute> Attributes.hasFlag(): Boolean =
|
||||
content.keys.any { it is A }
|
||||
|
||||
/**
|
||||
* Create [Attributes] with an added or replaced attribute key.
|
||||
*/
|
||||
public fun <T, A : Attribute<T>> Attributes.withAttribute(
|
||||
attribute: A,
|
||||
attrValue: T,
|
||||
): Attributes = MapAttributes(content + (attribute to attrValue))
|
||||
|
||||
public fun <A : Attribute<Unit>> Attributes.withAttribute(attribute: A): Attributes =
|
||||
withAttribute(attribute, Unit)
|
||||
|
||||
/**
|
||||
* Create a new [Attributes] by modifying the current one
|
||||
*/
|
||||
public fun <O> Attributes.modified(block: AttributesBuilder<O>.() -> Unit): Attributes = Attributes<O> {
|
||||
putAll(this@modified)
|
||||
block()
|
||||
}
|
||||
|
||||
/**
|
||||
* Create new [Attributes] by removing [attribute] key
|
||||
*/
|
||||
public fun Attributes.withoutAttribute(attribute: Attribute<*>): Attributes = MapAttributes(content.minus(attribute))
|
||||
|
||||
/**
|
||||
* Add an element to a [SetAttribute]
|
||||
*/
|
||||
public fun <T, A : SetAttribute<T>> Attributes.withAttributeElement(
|
||||
attribute: A,
|
||||
attrValue: T,
|
||||
): Attributes {
|
||||
val currentSet: Set<T> = get(attribute) ?: emptySet()
|
||||
return MapAttributes(
|
||||
content + (attribute to (currentSet + attrValue))
|
||||
)
|
||||
}
|
||||
|
||||
/**
|
||||
* Remove an element from [SetAttribute]
|
||||
*/
|
||||
public fun <T, A : SetAttribute<T>> Attributes.withoutAttributeElement(
|
||||
attribute: A,
|
||||
attrValue: T,
|
||||
): Attributes {
|
||||
val currentSet: Set<T> = get(attribute) ?: emptySet()
|
||||
return MapAttributes(content + (attribute to (currentSet - attrValue)))
|
||||
}
|
||||
|
||||
/**
|
||||
* Create [Attributes] with a single key
|
||||
*/
|
||||
public fun <T, A : Attribute<T>> Attributes(
|
||||
attribute: A,
|
||||
attrValue: T,
|
||||
): Attributes = MapAttributes(mapOf(attribute to attrValue))
|
||||
|
||||
/**
|
||||
* Create Attributes with a single [Unit] valued attribute
|
||||
*/
|
||||
public fun <A : Attribute<Unit>> Attributes(
|
||||
attribute: A,
|
||||
): Attributes = MapAttributes(mapOf(attribute to Unit))
|
||||
|
||||
public operator fun Attributes.plus(other: Attributes): Attributes = MapAttributes(content + other.content)
|
@ -1,68 +0,0 @@
|
||||
/*
|
||||
* Copyright 2018-2023 KMath contributors.
|
||||
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
|
||||
*/
|
||||
|
||||
package space.kscience.attributes
|
||||
|
||||
/**
|
||||
* A builder for [Attributes].
|
||||
* The builder is not thread safe
|
||||
*
|
||||
* @param O type marker of an owner object, for which these attributes are made
|
||||
*/
|
||||
public class AttributesBuilder<out O> internal constructor() : Attributes {
|
||||
|
||||
private val map = mutableMapOf<Attribute<*>, Any?>()
|
||||
|
||||
override fun toString(): String = "Attributes(value=${map.entries})"
|
||||
override fun equals(other: Any?): Boolean = other is Attributes && Attributes.equals(this, other)
|
||||
override fun hashCode(): Int = map.hashCode()
|
||||
|
||||
override val content: Map<out Attribute<*>, Any?> get() = map
|
||||
|
||||
public operator fun <T> set(attribute: Attribute<T>, value: T?) {
|
||||
if (value == null) {
|
||||
map.remove(attribute)
|
||||
} else {
|
||||
map[attribute] = value
|
||||
}
|
||||
}
|
||||
|
||||
public operator fun <V> Attribute<V>.invoke(value: V?) {
|
||||
set(this, value)
|
||||
}
|
||||
|
||||
public infix fun <V> Attribute<V>.put(value: V?) {
|
||||
set(this, value)
|
||||
}
|
||||
|
||||
/**
|
||||
* Put all attributes for given [attributes]
|
||||
*/
|
||||
public fun putAll(attributes: Attributes) {
|
||||
map.putAll(attributes.content)
|
||||
}
|
||||
|
||||
public infix fun <V> SetAttribute<V>.add(attrValue: V) {
|
||||
val currentSet: Set<V> = get(this) ?: emptySet()
|
||||
map[this] = currentSet + attrValue
|
||||
}
|
||||
|
||||
/**
|
||||
* Remove an element from [SetAttribute]
|
||||
*/
|
||||
public infix fun <V> SetAttribute<V>.remove(attrValue: V) {
|
||||
val currentSet: Set<V> = get(this) ?: emptySet()
|
||||
map[this] = currentSet - attrValue
|
||||
}
|
||||
|
||||
public fun build(): Attributes = MapAttributes(map)
|
||||
}
|
||||
|
||||
/**
|
||||
* Create [Attributes] with a given [builder]
|
||||
* @param O the type for which attributes are built. The type is used only during compilation phase for static extension dispatch
|
||||
*/
|
||||
public fun <O> Attributes(builder: AttributesBuilder<O>.() -> Unit): Attributes =
|
||||
AttributesBuilder<O>().apply(builder).build()
|
@ -1,34 +0,0 @@
|
||||
/*
|
||||
* Copyright 2018-2023 KMath contributors.
|
||||
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
|
||||
*/
|
||||
|
||||
package space.kscience.attributes
|
||||
|
||||
/**
|
||||
* An attribute that has a type parameter for value
|
||||
* @param type parameter-type
|
||||
*/
|
||||
public abstract class PolymorphicAttribute<T>(public val type: SafeType<T>) : Attribute<T> {
|
||||
override fun equals(other: Any?): Boolean = other != null &&
|
||||
(this::class == other::class) &&
|
||||
(other as? PolymorphicAttribute<*>)?.type == this.type
|
||||
|
||||
override fun hashCode(): Int = this::class.hashCode() + type.hashCode()
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Get a polymorphic attribute using attribute factory
|
||||
*/
|
||||
@UnstableAttributesAPI
|
||||
public operator fun <T> Attributes.get(attributeKeyBuilder: () -> PolymorphicAttribute<T>): T? =
|
||||
get(attributeKeyBuilder())
|
||||
|
||||
/**
|
||||
* Set a polymorphic attribute using its factory
|
||||
*/
|
||||
@UnstableAttributesAPI
|
||||
public operator fun <O, T> AttributesBuilder<O>.set(attributeKeyBuilder: () -> PolymorphicAttribute<T>, value: T) {
|
||||
set(attributeKeyBuilder(), value)
|
||||
}
|
@ -1,35 +0,0 @@
|
||||
/*
|
||||
* Copyright 2018-2023 KMath contributors.
|
||||
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
|
||||
*/
|
||||
|
||||
package space.kscience.attributes
|
||||
|
||||
import kotlin.jvm.JvmInline
|
||||
import kotlin.reflect.KClass
|
||||
import kotlin.reflect.KType
|
||||
import kotlin.reflect.typeOf
|
||||
|
||||
/**
|
||||
* Safe variant ok Kotlin [KType] that ensures that the type parameter is of the same type as [kType]
|
||||
*
|
||||
* @param kType raw [KType]
|
||||
*/
|
||||
@JvmInline
|
||||
public value class SafeType<out T> @PublishedApi internal constructor(public val kType: KType)
|
||||
|
||||
public inline fun <reified T> safeTypeOf(): SafeType<T> = SafeType(typeOf<T>())
|
||||
|
||||
/**
|
||||
* Derive Kotlin [KClass] from this type and fail if the type is not a class (should not happen)
|
||||
*/
|
||||
@Suppress("UNCHECKED_CAST")
|
||||
@UnstableAttributesAPI
|
||||
public val <T> SafeType<T>.kClass: KClass<T & Any> get() = kType.classifier as KClass<T & Any>
|
||||
|
||||
/**
|
||||
* An interface containing [type] for dynamic type checking.
|
||||
*/
|
||||
public interface WithType<out T> {
|
||||
public val type: SafeType<T>
|
||||
}
|
@ -1,17 +0,0 @@
|
||||
/*
|
||||
* Copyright 2018-2023 KMath contributors.
|
||||
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
|
||||
*/
|
||||
|
||||
package space.kscience.attributes
|
||||
|
||||
/**
|
||||
* Marks declarations that are still experimental in the Attributes-kt APIs, which means that the design of the corresponding
|
||||
* declarations has open issues that may (or may not) lead to their changes in the future. Roughly speaking, there is
|
||||
* a chance of those declarations will be deprecated in the future or the semantics of their behavior may change
|
||||
* in some way that may break some code.
|
||||
*/
|
||||
@MustBeDocumented
|
||||
@Retention(value = AnnotationRetention.BINARY)
|
||||
@RequiresOptIn("This API is unstable and could change in future", RequiresOptIn.Level.WARNING)
|
||||
public annotation class UnstableAttributesAPI
|
@ -1,11 +1,10 @@
|
||||
@file:Suppress("UNUSED_VARIABLE")
|
||||
|
||||
import org.jetbrains.kotlin.gradle.tasks.KotlinJvmCompile
|
||||
import space.kscience.kmath.benchmarks.addBenchmarkProperties
|
||||
|
||||
plugins {
|
||||
kotlin("multiplatform")
|
||||
alias(spclibs.plugins.kotlin.plugin.allopen)
|
||||
kotlin("plugin.allopen")
|
||||
id("org.jetbrains.kotlinx.benchmark")
|
||||
}
|
||||
|
||||
@ -16,8 +15,6 @@ repositories {
|
||||
mavenCentral()
|
||||
}
|
||||
|
||||
val multikVersion: String by rootProject.extra
|
||||
|
||||
kotlin {
|
||||
jvm()
|
||||
|
||||
@ -29,9 +26,6 @@ kotlin {
|
||||
all {
|
||||
languageSettings {
|
||||
progressiveMode = true
|
||||
optIn("kotlin.contracts.ExperimentalContracts")
|
||||
optIn("kotlin.ExperimentalUnsignedTypes")
|
||||
optIn("space.kscience.kmath.UnstableKMathAPI")
|
||||
}
|
||||
}
|
||||
|
||||
@ -45,9 +39,7 @@ kotlin {
|
||||
implementation(project(":kmath-dimensions"))
|
||||
implementation(project(":kmath-for-real"))
|
||||
implementation(project(":kmath-tensors"))
|
||||
implementation(project(":kmath-multik"))
|
||||
implementation("org.jetbrains.kotlinx:multik-default:$multikVersion")
|
||||
implementation(spclibs.kotlinx.benchmark.runtime)
|
||||
implementation("org.jetbrains.kotlinx:kotlinx-benchmark-runtime:0.4.2")
|
||||
}
|
||||
}
|
||||
|
||||
@ -59,6 +51,7 @@ kotlin {
|
||||
implementation(project(":kmath-kotlingrad"))
|
||||
implementation(project(":kmath-viktor"))
|
||||
implementation(project(":kmath-jafama"))
|
||||
implementation(project(":kmath-multik"))
|
||||
implementation(projects.kmath.kmathTensorflow)
|
||||
implementation("org.tensorflow:tensorflow-core-platform:0.4.0")
|
||||
implementation("org.nd4j:nd4j-native:1.0.0-M1")
|
||||
@ -145,10 +138,12 @@ benchmark {
|
||||
commonConfiguration()
|
||||
include("ViktorLogBenchmark")
|
||||
}
|
||||
}
|
||||
|
||||
configurations.register("integration") {
|
||||
commonConfiguration()
|
||||
include("IntegrationBenchmark")
|
||||
// Fix kotlinx-benchmarks bug
|
||||
afterEvaluate {
|
||||
val jvmBenchmarkJar by tasks.getting(org.gradle.jvm.tasks.Jar::class) {
|
||||
duplicatesStrategy = DuplicatesStrategy.EXCLUDE
|
||||
}
|
||||
}
|
||||
|
||||
@ -156,11 +151,11 @@ kotlin.sourceSets.all {
|
||||
with(languageSettings) {
|
||||
optIn("kotlin.contracts.ExperimentalContracts")
|
||||
optIn("kotlin.ExperimentalUnsignedTypes")
|
||||
optIn("space.kscience.kmath.UnstableKMathAPI")
|
||||
optIn("space.kscience.kmath.misc.UnstableKMathAPI")
|
||||
}
|
||||
}
|
||||
|
||||
tasks.withType<KotlinJvmCompile> {
|
||||
tasks.withType<org.jetbrains.kotlin.gradle.dsl.KotlinJvmCompile> {
|
||||
kotlinOptions {
|
||||
jvmTarget = "11"
|
||||
freeCompilerArgs = freeCompilerArgs + "-Xjvm-default=all" + "-Xlambdas=indy"
|
||||
@ -168,7 +163,7 @@ tasks.withType<KotlinJvmCompile> {
|
||||
}
|
||||
|
||||
readme {
|
||||
maturity = space.kscience.gradle.Maturity.EXPERIMENTAL
|
||||
maturity = ru.mipt.npm.gradle.Maturity.EXPERIMENTAL
|
||||
}
|
||||
|
||||
addBenchmarkProperties()
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2018-2024 KMath contributors.
|
||||
* Copyright 2018-2021 KMath contributors.
|
||||
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
|
||||
*/
|
||||
|
||||
@ -9,9 +9,9 @@ import kotlinx.benchmark.Benchmark
|
||||
import kotlinx.benchmark.Blackhole
|
||||
import kotlinx.benchmark.Scope
|
||||
import kotlinx.benchmark.State
|
||||
import space.kscience.kmath.UnstableKMathAPI
|
||||
import space.kscience.kmath.expressions.*
|
||||
import space.kscience.kmath.operations.Float64Field
|
||||
import space.kscience.kmath.operations.Algebra
|
||||
import space.kscience.kmath.operations.DoubleField
|
||||
import space.kscience.kmath.operations.bindSymbol
|
||||
import space.kscience.kmath.operations.invoke
|
||||
import kotlin.math.sin
|
||||
@ -84,7 +84,7 @@ class ExpressionsInterpretersBenchmark {
|
||||
private val x by symbol
|
||||
private const val times = 1_000_000
|
||||
|
||||
private val functional = Float64Field.expression {
|
||||
private val functional = DoubleField.expression {
|
||||
val x = bindSymbol(Symbol.x)
|
||||
x * number(2.0) + 2.0 / x - 16.0 / sin(x)
|
||||
}
|
||||
@ -93,14 +93,12 @@ class ExpressionsInterpretersBenchmark {
|
||||
x * 2.0 + number(2.0) / x - number(16.0) / sin(x)
|
||||
}
|
||||
|
||||
private val mst = node.toExpression(Float64Field)
|
||||
|
||||
@OptIn(UnstableKMathAPI::class)
|
||||
private val wasm = node.wasmCompileToExpression(Float64Field)
|
||||
private val estree = node.estreeCompileToExpression(Float64Field)
|
||||
private val mst = node.toExpression(DoubleField)
|
||||
private val wasm = node.wasmCompileToExpression(DoubleField)
|
||||
private val estree = node.estreeCompileToExpression(DoubleField)
|
||||
|
||||
private val raw = Expression<Double> { args ->
|
||||
val x = args.getValue(x)
|
||||
val x = args[x]!!
|
||||
x * 2.0 + 2.0 / x - 16.0 / sin(x)
|
||||
}
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2018-2024 KMath contributors.
|
||||
* Copyright 2018-2021 KMath contributors.
|
||||
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
|
||||
*/
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2018-2024 KMath contributors.
|
||||
* Copyright 2018-2021 KMath contributors.
|
||||
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
|
||||
*/
|
||||
|
||||
@ -10,7 +10,7 @@ import kotlinx.benchmark.Blackhole
|
||||
import org.openjdk.jmh.annotations.Benchmark
|
||||
import org.openjdk.jmh.annotations.Scope
|
||||
import org.openjdk.jmh.annotations.State
|
||||
import space.kscience.kmath.UnstableKMathAPI
|
||||
import space.kscience.kmath.misc.UnstableKMathAPI
|
||||
import space.kscience.kmath.operations.BigIntField
|
||||
import space.kscience.kmath.operations.JBigIntegerField
|
||||
import space.kscience.kmath.operations.invoke
|
||||
@ -67,7 +67,7 @@ internal class BigIntBenchmark {
|
||||
|
||||
@Benchmark
|
||||
fun kmMultiplyLarge(blackhole: Blackhole) = BigIntField {
|
||||
blackhole.consume(kmLargeNumber * kmLargeNumber)
|
||||
blackhole.consume(kmLargeNumber*kmLargeNumber)
|
||||
}
|
||||
|
||||
@Benchmark
|
||||
@ -77,7 +77,7 @@ internal class BigIntBenchmark {
|
||||
|
||||
@Benchmark
|
||||
fun jvmMultiplyLarge(blackhole: Blackhole) = JBigIntegerField {
|
||||
blackhole.consume(jvmLargeNumber * jvmLargeNumber)
|
||||
blackhole.consume(jvmLargeNumber*jvmLargeNumber)
|
||||
}
|
||||
|
||||
@Benchmark
|
||||
|
@ -1,80 +1,39 @@
|
||||
/*
|
||||
* Copyright 2018-2024 KMath contributors.
|
||||
* Copyright 2018-2021 KMath contributors.
|
||||
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
|
||||
*/
|
||||
|
||||
package space.kscience.kmath.benchmarks
|
||||
|
||||
import kotlinx.benchmark.Benchmark
|
||||
import kotlinx.benchmark.Blackhole
|
||||
import kotlinx.benchmark.Scope
|
||||
import kotlinx.benchmark.State
|
||||
import space.kscience.kmath.complex.Complex
|
||||
import space.kscience.kmath.complex.ComplexField
|
||||
import space.kscience.kmath.complex.complex
|
||||
import space.kscience.kmath.operations.invoke
|
||||
import space.kscience.kmath.structures.Buffer
|
||||
import space.kscience.kmath.structures.Float64Buffer
|
||||
import space.kscience.kmath.structures.getDouble
|
||||
import space.kscience.kmath.structures.permute
|
||||
import space.kscience.kmath.structures.DoubleBuffer
|
||||
import space.kscience.kmath.structures.MutableBuffer
|
||||
|
||||
@State(Scope.Benchmark)
|
||||
internal class BufferBenchmark {
|
||||
|
||||
@Benchmark
|
||||
fun doubleArrayReadWrite(blackhole: Blackhole) {
|
||||
val buffer = DoubleArray(size) { it.toDouble() }
|
||||
var res = 0.0
|
||||
fun genericDoubleBufferReadWrite() {
|
||||
val buffer = DoubleBuffer(size) { it.toDouble() }
|
||||
|
||||
(0 until size).forEach {
|
||||
res += buffer[it]
|
||||
buffer[it]
|
||||
}
|
||||
blackhole.consume(res)
|
||||
}
|
||||
|
||||
@Benchmark
|
||||
fun doubleBufferReadWrite(blackhole: Blackhole) {
|
||||
val buffer = Float64Buffer(size) { it.toDouble() }
|
||||
var res = 0.0
|
||||
(0 until size).forEach {
|
||||
res += buffer[it]
|
||||
}
|
||||
blackhole.consume(res)
|
||||
}
|
||||
fun complexBufferReadWrite() {
|
||||
val buffer = MutableBuffer.complex(size / 2) { Complex(it.toDouble(), -it.toDouble()) }
|
||||
|
||||
@Benchmark
|
||||
fun bufferViewReadWrite(blackhole: Blackhole) {
|
||||
val buffer = Float64Buffer(size) { it.toDouble() }.permute(reversedIndices)
|
||||
var res = 0.0
|
||||
(0 until size).forEach {
|
||||
res += buffer[it]
|
||||
}
|
||||
blackhole.consume(res)
|
||||
}
|
||||
|
||||
@Benchmark
|
||||
fun bufferViewReadWriteSpecialized(blackhole: Blackhole) {
|
||||
val buffer = Float64Buffer(size) { it.toDouble() }.permute(reversedIndices)
|
||||
var res = 0.0
|
||||
(0 until size).forEach {
|
||||
res += buffer.getDouble(it)
|
||||
}
|
||||
blackhole.consume(res)
|
||||
}
|
||||
|
||||
@Benchmark
|
||||
fun complexBufferReadWrite(blackhole: Blackhole) = ComplexField {
|
||||
val buffer = Buffer.complex(size / 2) { Complex(it.toDouble(), -it.toDouble()) }
|
||||
|
||||
var res = zero
|
||||
(0 until size / 2).forEach {
|
||||
res += buffer[it]
|
||||
buffer[it]
|
||||
}
|
||||
|
||||
blackhole.consume(res)
|
||||
}
|
||||
|
||||
private companion object {
|
||||
private const val size = 100
|
||||
private val reversedIndices = IntArray(size) { it }.apply { reverse() }
|
||||
}
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2018-2024 KMath contributors.
|
||||
* Copyright 2018-2021 KMath contributors.
|
||||
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
|
||||
*/
|
||||
|
||||
@ -11,11 +11,14 @@ import kotlinx.benchmark.Scope
|
||||
import kotlinx.benchmark.State
|
||||
import space.kscience.kmath.commons.linear.CMLinearSpace
|
||||
import space.kscience.kmath.ejml.EjmlLinearSpaceDDRM
|
||||
import space.kscience.kmath.linear.Float64ParallelLinearSpace
|
||||
import space.kscience.kmath.linear.invoke
|
||||
import space.kscience.kmath.linear.linearSpace
|
||||
import space.kscience.kmath.operations.Float64Field
|
||||
import space.kscience.kmath.multik.multikAlgebra
|
||||
import space.kscience.kmath.operations.DoubleField
|
||||
import space.kscience.kmath.operations.invoke
|
||||
import space.kscience.kmath.structures.Buffer
|
||||
import space.kscience.kmath.tensorflow.produceWithTF
|
||||
import space.kscience.kmath.tensors.core.DoubleTensorAlgebra
|
||||
import space.kscience.kmath.tensors.core.tensorAlgebra
|
||||
import kotlin.random.Random
|
||||
|
||||
@ -26,10 +29,10 @@ internal class DotBenchmark {
|
||||
const val dim = 1000
|
||||
|
||||
//creating invertible matrix
|
||||
val matrix1 = Float64Field.linearSpace.buildMatrix(dim, dim) { _, _ ->
|
||||
val matrix1 = DoubleField.linearSpace.buildMatrix(dim, dim) { _, _ ->
|
||||
random.nextDouble()
|
||||
}
|
||||
val matrix2 = Float64Field.linearSpace.buildMatrix(dim, dim) { _, _ ->
|
||||
val matrix2 = DoubleField.linearSpace.buildMatrix(dim, dim) { _, _ ->
|
||||
random.nextDouble()
|
||||
}
|
||||
|
||||
@ -44,7 +47,7 @@ internal class DotBenchmark {
|
||||
@Benchmark
|
||||
fun tfDot(blackhole: Blackhole) {
|
||||
blackhole.consume(
|
||||
Float64Field.produceWithTF {
|
||||
DoubleField.produceWithTF {
|
||||
matrix1 dot matrix1
|
||||
}
|
||||
)
|
||||
@ -71,23 +74,27 @@ internal class DotBenchmark {
|
||||
}
|
||||
|
||||
@Benchmark
|
||||
fun multikDot(blackhole: Blackhole) = with(multikAlgebra) {
|
||||
fun tensorDot(blackhole: Blackhole) = with(DoubleField.tensorAlgebra) {
|
||||
blackhole.consume(matrix1 dot matrix2)
|
||||
}
|
||||
|
||||
@Benchmark
|
||||
fun tensorDot(blackhole: Blackhole) = with(Float64Field.tensorAlgebra) {
|
||||
fun multikDot(blackhole: Blackhole) = with(DoubleField.multikAlgebra) {
|
||||
blackhole.consume(matrix1 dot matrix2)
|
||||
}
|
||||
|
||||
@Benchmark
|
||||
fun bufferedDot(blackhole: Blackhole) = with(Float64Field.linearSpace) {
|
||||
fun bufferedDot(blackhole: Blackhole) = with(DoubleField.linearSpace(Buffer.Companion::auto)) {
|
||||
blackhole.consume(matrix1 dot matrix2)
|
||||
}
|
||||
|
||||
@Benchmark
|
||||
fun parallelDot(blackhole: Blackhole) = with(Float64ParallelLinearSpace) {
|
||||
fun doubleDot(blackhole: Blackhole) = with(DoubleField.linearSpace) {
|
||||
blackhole.consume(matrix1 dot matrix2)
|
||||
}
|
||||
|
||||
@Benchmark
|
||||
fun doubleTensorDot(blackhole: Blackhole) = DoubleTensorAlgebra.invoke {
|
||||
blackhole.consume(matrix1 dot matrix2)
|
||||
}
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2018-2024 KMath contributors.
|
||||
* Copyright 2018-2021 KMath contributors.
|
||||
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
|
||||
*/
|
||||
|
||||
@ -12,7 +12,7 @@ import kotlinx.benchmark.State
|
||||
import space.kscience.kmath.asm.compileToExpression
|
||||
import space.kscience.kmath.expressions.*
|
||||
import space.kscience.kmath.operations.Algebra
|
||||
import space.kscience.kmath.operations.Float64Field
|
||||
import space.kscience.kmath.operations.DoubleField
|
||||
import space.kscience.kmath.operations.bindSymbol
|
||||
import space.kscience.kmath.operations.invoke
|
||||
import kotlin.math.sin
|
||||
@ -100,7 +100,7 @@ internal class ExpressionsInterpretersBenchmark {
|
||||
private val x by symbol
|
||||
private const val times = 1_000_000
|
||||
|
||||
private val functional = Float64Field.expression {
|
||||
private val functional = DoubleField.expression {
|
||||
val x = bindSymbol(Symbol.x)
|
||||
x * number(2.0) + 2.0 / x - 16.0 / sin(x)
|
||||
}
|
||||
@ -109,12 +109,12 @@ internal class ExpressionsInterpretersBenchmark {
|
||||
x * 2.0 + number(2.0) / x - number(16.0) / sin(x)
|
||||
}
|
||||
|
||||
private val mst = node.toExpression(Float64Field)
|
||||
private val mst = node.toExpression(DoubleField)
|
||||
|
||||
private val asmPrimitive = node.compileToExpression(Float64Field)
|
||||
private val asmPrimitive = node.compileToExpression(DoubleField)
|
||||
private val xIdx = asmPrimitive.indexer.indexOf(x)
|
||||
|
||||
private val asmGeneric = node.compileToExpression(Float64Field as Algebra<Double>)
|
||||
private val asmGeneric = node.compileToExpression(DoubleField as Algebra<Double>)
|
||||
|
||||
private val raw = Expression<Double> { args ->
|
||||
val x = args[x]!!
|
||||
|
@ -1,40 +0,0 @@
|
||||
/*
|
||||
* Copyright 2018-2024 KMath contributors.
|
||||
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
|
||||
*/
|
||||
|
||||
package space.kscience.kmath.benchmarks
|
||||
|
||||
import org.openjdk.jmh.annotations.Benchmark
|
||||
import org.openjdk.jmh.annotations.Scope
|
||||
import org.openjdk.jmh.annotations.State
|
||||
import org.openjdk.jmh.infra.Blackhole
|
||||
import space.kscience.kmath.complex.Complex
|
||||
import space.kscience.kmath.complex.algebra
|
||||
import space.kscience.kmath.integration.gaussIntegrator
|
||||
import space.kscience.kmath.integration.integrate
|
||||
import space.kscience.kmath.integration.value
|
||||
import space.kscience.kmath.operations.algebra
|
||||
|
||||
|
||||
@State(Scope.Benchmark)
|
||||
internal class IntegrationBenchmark {
|
||||
|
||||
@Benchmark
|
||||
fun doubleIntegration(blackhole: Blackhole) {
|
||||
val res = Double.algebra.gaussIntegrator.integrate(0.0..1.0, intervals = 1000) { x: Double ->
|
||||
//sin(1 / x)
|
||||
1 / x
|
||||
}.value
|
||||
blackhole.consume(res)
|
||||
}
|
||||
|
||||
@Benchmark
|
||||
fun complexIntegration(blackhole: Blackhole) = with(Complex.algebra) {
|
||||
val res = gaussIntegrator.integrate(0.0..1.0, intervals = 1000) { x: Double ->
|
||||
// sin(1 / x) + i * cos(1 / x)
|
||||
1 / x - i / x
|
||||
}.value
|
||||
blackhole.consume(res)
|
||||
}
|
||||
}
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2018-2024 KMath contributors.
|
||||
* Copyright 2018-2021 KMath contributors.
|
||||
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
|
||||
*/
|
||||
|
||||
@ -11,8 +11,10 @@ import org.openjdk.jmh.annotations.Scope
|
||||
import org.openjdk.jmh.annotations.State
|
||||
import space.kscience.kmath.jafama.JafamaDoubleField
|
||||
import space.kscience.kmath.jafama.StrictJafamaDoubleField
|
||||
import space.kscience.kmath.operations.Float64Field
|
||||
import space.kscience.kmath.operations.DoubleField
|
||||
import space.kscience.kmath.operations.invoke
|
||||
import kotlin.contracts.InvocationKind
|
||||
import kotlin.contracts.contract
|
||||
import kotlin.random.Random
|
||||
|
||||
@State(Scope.Benchmark)
|
||||
@ -24,7 +26,7 @@ internal class JafamaBenchmark {
|
||||
|
||||
@Benchmark
|
||||
fun core(blackhole: Blackhole) = invokeBenchmarks(blackhole) { x ->
|
||||
Float64Field { x * power(x, 4) * exp(x) / cos(x) + sin(x) }
|
||||
DoubleField { x * power(x, 4) * exp(x) / cos(x) + sin(x) }
|
||||
}
|
||||
|
||||
@Benchmark
|
||||
@ -34,6 +36,7 @@ internal class JafamaBenchmark {
|
||||
}
|
||||
|
||||
private inline fun invokeBenchmarks(blackhole: Blackhole, expr: (Double) -> Double) {
|
||||
contract { callsInPlace(expr, InvocationKind.AT_LEAST_ONCE) }
|
||||
val rng = Random(0)
|
||||
repeat(1000000) { blackhole.consume(expr(rng.nextDouble())) }
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2018-2024 KMath contributors.
|
||||
* Copyright 2018-2021 KMath contributors.
|
||||
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
|
||||
*/
|
||||
|
||||
@ -15,7 +15,6 @@ import space.kscience.kmath.ejml.EjmlLinearSpaceDDRM
|
||||
import space.kscience.kmath.linear.invoke
|
||||
import space.kscience.kmath.linear.linearSpace
|
||||
import space.kscience.kmath.linear.lupSolver
|
||||
import space.kscience.kmath.linear.parallel
|
||||
import space.kscience.kmath.operations.algebra
|
||||
import kotlin.random.Random
|
||||
|
||||
@ -39,19 +38,16 @@ internal class MatrixInverseBenchmark {
|
||||
}
|
||||
|
||||
@Benchmark
|
||||
fun kmathParallelLupInversion(blackhole: Blackhole) {
|
||||
blackhole.consume(Double.algebra.linearSpace.parallel.lupSolver().inverse(matrix))
|
||||
fun cmLUPInversion(blackhole: Blackhole) {
|
||||
CMLinearSpace {
|
||||
blackhole.consume(lupSolver().inverse(matrix))
|
||||
}
|
||||
}
|
||||
|
||||
@Benchmark
|
||||
fun cmLUPInversion(blackhole: Blackhole) = CMLinearSpace {
|
||||
blackhole.consume(lupSolver().inverse(matrix))
|
||||
fun ejmlInverse(blackhole: Blackhole) {
|
||||
EjmlLinearSpaceDDRM {
|
||||
blackhole.consume(matrix.toEjml().inverse())
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@Benchmark
|
||||
fun ejmlInverse(blackhole: Blackhole) = EjmlLinearSpaceDDRM {
|
||||
blackhole.consume(matrix.toEjml().inverted())
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2018-2024 KMath contributors.
|
||||
* Copyright 2018-2021 KMath contributors.
|
||||
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
|
||||
*/
|
||||
|
||||
@ -13,10 +13,14 @@ import org.jetbrains.kotlinx.multik.api.Multik
|
||||
import org.jetbrains.kotlinx.multik.api.ones
|
||||
import org.jetbrains.kotlinx.multik.ndarray.data.DN
|
||||
import org.jetbrains.kotlinx.multik.ndarray.data.DataType
|
||||
import space.kscience.kmath.UnsafeKMathAPI
|
||||
import space.kscience.kmath.nd.*
|
||||
import space.kscience.kmath.multik.multikAlgebra
|
||||
import space.kscience.kmath.nd.BufferedFieldOpsND
|
||||
import space.kscience.kmath.nd.StructureND
|
||||
import space.kscience.kmath.nd.ndAlgebra
|
||||
import space.kscience.kmath.nd.one
|
||||
import space.kscience.kmath.nd4j.nd4j
|
||||
import space.kscience.kmath.operations.Float64Field
|
||||
import space.kscience.kmath.operations.DoubleField
|
||||
import space.kscience.kmath.structures.Buffer
|
||||
import space.kscience.kmath.tensors.core.DoubleTensor
|
||||
import space.kscience.kmath.tensors.core.one
|
||||
import space.kscience.kmath.tensors.core.tensorAlgebra
|
||||
@ -24,15 +28,11 @@ import space.kscience.kmath.viktor.viktorAlgebra
|
||||
|
||||
@State(Scope.Benchmark)
|
||||
internal class NDFieldBenchmark {
|
||||
|
||||
private companion object {
|
||||
private const val dim = 1000
|
||||
private const val n = 100
|
||||
private val shape = ShapeND(dim, dim)
|
||||
private val specializedField = Float64Field.ndAlgebra
|
||||
private val genericField = BufferedFieldOpsND(Float64Field)
|
||||
private val nd4jField = Float64Field.nd4j
|
||||
private val viktorField = Float64Field.viktorAlgebra
|
||||
@Benchmark
|
||||
fun autoFieldAdd(blackhole: Blackhole) = with(autoField) {
|
||||
var res: StructureND<Double> = one(shape)
|
||||
repeat(n) { res += 1.0 }
|
||||
blackhole.consume(res)
|
||||
}
|
||||
|
||||
@Benchmark
|
||||
@ -50,7 +50,7 @@ internal class NDFieldBenchmark {
|
||||
}
|
||||
|
||||
@Benchmark
|
||||
fun multikAdd(blackhole: Blackhole) = with(multikAlgebra) {
|
||||
fun multikAdd(blackhole: Blackhole) = with(multikField) {
|
||||
var res: StructureND<Double> = one(shape)
|
||||
repeat(n) { res += 1.0 }
|
||||
blackhole.consume(res)
|
||||
@ -77,10 +77,9 @@ internal class NDFieldBenchmark {
|
||||
blackhole.consume(res)
|
||||
}
|
||||
|
||||
@OptIn(UnsafeKMathAPI::class)
|
||||
@Benchmark
|
||||
fun multikInPlaceAdd(blackhole: Blackhole) = with(multikAlgebra) {
|
||||
val res = Multik.ones<Double, DN>(shape.asArray(), DataType.DoubleDataType).wrap()
|
||||
fun multikInPlaceAdd(blackhole: Blackhole) = with(DoubleField.multikAlgebra) {
|
||||
val res = Multik.ones<Double, DN>(shape, DataType.DoubleDataType).wrap()
|
||||
repeat(n) { res += 1.0 }
|
||||
blackhole.consume(res)
|
||||
}
|
||||
@ -92,5 +91,15 @@ internal class NDFieldBenchmark {
|
||||
// blackhole.consume(res)
|
||||
// }
|
||||
|
||||
|
||||
private companion object {
|
||||
private const val dim = 1000
|
||||
private const val n = 100
|
||||
private val shape = intArrayOf(dim, dim)
|
||||
private val autoField = BufferedFieldOpsND(DoubleField, Buffer.Companion::auto)
|
||||
private val specializedField = DoubleField.ndAlgebra
|
||||
private val genericField = BufferedFieldOpsND(DoubleField, Buffer.Companion::boxing)
|
||||
private val nd4jField = DoubleField.nd4j
|
||||
private val multikField = DoubleField.multikAlgebra
|
||||
private val viktorField = DoubleField.viktorAlgebra
|
||||
}
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2018-2024 KMath contributors.
|
||||
* Copyright 2018-2021 KMath contributors.
|
||||
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
|
||||
*/
|
||||
|
||||
@ -12,9 +12,7 @@ import kotlinx.benchmark.State
|
||||
import space.kscience.kmath.linear.linearSpace
|
||||
import space.kscience.kmath.linear.matrix
|
||||
import space.kscience.kmath.linear.symmetric
|
||||
import space.kscience.kmath.operations.Float64Field
|
||||
import space.kscience.kmath.tensors.core.symEigJacobi
|
||||
import space.kscience.kmath.tensors.core.symEigSvd
|
||||
import space.kscience.kmath.operations.DoubleField
|
||||
import space.kscience.kmath.tensors.core.tensorAlgebra
|
||||
import kotlin.random.Random
|
||||
|
||||
@ -24,16 +22,16 @@ internal class TensorAlgebraBenchmark {
|
||||
private val random = Random(12224)
|
||||
private const val dim = 30
|
||||
|
||||
private val matrix = Float64Field.linearSpace.matrix(dim, dim).symmetric { _, _ -> random.nextDouble() }
|
||||
private val matrix = DoubleField.linearSpace.matrix(dim, dim).symmetric { _, _ -> random.nextDouble() }
|
||||
}
|
||||
|
||||
@Benchmark
|
||||
fun tensorSymEigSvd(blackhole: Blackhole) = with(Double.tensorAlgebra) {
|
||||
blackhole.consume(symEigSvd(matrix, 1e-10))
|
||||
blackhole.consume(matrix.symEigSvd(1e-10))
|
||||
}
|
||||
|
||||
@Benchmark
|
||||
fun tensorSymEigJacobi(blackhole: Blackhole) = with(Double.tensorAlgebra) {
|
||||
blackhole.consume(symEigJacobi(matrix, 50, 1e-10))
|
||||
blackhole.consume(matrix.symEigJacobi(50, 1e-10))
|
||||
}
|
||||
}
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2018-2024 KMath contributors.
|
||||
* Copyright 2018-2021 KMath contributors.
|
||||
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
|
||||
*/
|
||||
|
||||
@ -10,19 +10,25 @@ import kotlinx.benchmark.Blackhole
|
||||
import kotlinx.benchmark.Scope
|
||||
import kotlinx.benchmark.State
|
||||
import org.jetbrains.bio.viktor.F64Array
|
||||
import space.kscience.kmath.nd.ShapeND
|
||||
import space.kscience.kmath.nd.StructureND
|
||||
import space.kscience.kmath.nd.ndAlgebra
|
||||
import space.kscience.kmath.nd.one
|
||||
import space.kscience.kmath.operations.Float64Field
|
||||
import space.kscience.kmath.nd.*
|
||||
import space.kscience.kmath.operations.DoubleField
|
||||
import space.kscience.kmath.structures.Buffer
|
||||
import space.kscience.kmath.viktor.ViktorFieldND
|
||||
|
||||
@State(Scope.Benchmark)
|
||||
internal class ViktorBenchmark {
|
||||
@Benchmark
|
||||
fun automaticFieldAddition(blackhole: Blackhole) {
|
||||
with(autoField) {
|
||||
var res: StructureND<Double> = one(shape)
|
||||
repeat(n) { res += 1.0 }
|
||||
blackhole.consume(res)
|
||||
}
|
||||
}
|
||||
|
||||
@Benchmark
|
||||
fun doubleFieldAddition(blackhole: Blackhole) {
|
||||
with(doubleField) {
|
||||
fun realFieldAddition(blackhole: Blackhole) {
|
||||
with(realField) {
|
||||
var res: StructureND<Double> = one(shape)
|
||||
repeat(n) { res += 1.0 }
|
||||
blackhole.consume(res)
|
||||
@ -49,10 +55,11 @@ internal class ViktorBenchmark {
|
||||
private companion object {
|
||||
private const val dim = 1000
|
||||
private const val n = 100
|
||||
private val shape = ShapeND(dim, dim)
|
||||
private val shape = Shape(dim, dim)
|
||||
|
||||
// automatically build context most suited for given type.
|
||||
private val doubleField = Float64Field.ndAlgebra
|
||||
private val autoField = BufferedFieldOpsND(DoubleField, Buffer.Companion::auto)
|
||||
private val realField = DoubleField.ndAlgebra
|
||||
private val viktorField = ViktorFieldND(dim, dim)
|
||||
}
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2018-2024 KMath contributors.
|
||||
* Copyright 2018-2021 KMath contributors.
|
||||
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
|
||||
*/
|
||||
|
||||
@ -10,17 +10,19 @@ import kotlinx.benchmark.Blackhole
|
||||
import kotlinx.benchmark.Scope
|
||||
import kotlinx.benchmark.State
|
||||
import org.jetbrains.bio.viktor.F64Array
|
||||
import space.kscience.kmath.nd.ShapeND
|
||||
import space.kscience.kmath.nd.BufferedFieldOpsND
|
||||
import space.kscience.kmath.nd.Shape
|
||||
import space.kscience.kmath.nd.ndAlgebra
|
||||
import space.kscience.kmath.nd.one
|
||||
import space.kscience.kmath.operations.Float64Field
|
||||
import space.kscience.kmath.operations.DoubleField
|
||||
import space.kscience.kmath.structures.Buffer
|
||||
import space.kscience.kmath.viktor.ViktorFieldND
|
||||
|
||||
@State(Scope.Benchmark)
|
||||
internal class ViktorLogBenchmark {
|
||||
@Benchmark
|
||||
fun realFieldLog(blackhole: Blackhole) {
|
||||
with(doubleField) {
|
||||
with(realField) {
|
||||
val fortyTwo = structureND(shape) { 42.0 }
|
||||
var res = one(shape)
|
||||
repeat(n) { res = ln(fortyTwo) }
|
||||
@ -49,10 +51,11 @@ internal class ViktorLogBenchmark {
|
||||
private companion object {
|
||||
private const val dim = 1000
|
||||
private const val n = 100
|
||||
private val shape = ShapeND(dim, dim)
|
||||
private val shape = Shape(dim, dim)
|
||||
|
||||
// automatically build context most suited for given type.
|
||||
private val doubleField = Float64Field.ndAlgebra
|
||||
private val autoField = BufferedFieldOpsND(DoubleField, Buffer.Companion::auto)
|
||||
private val realField = DoubleField.ndAlgebra
|
||||
private val viktorField = ViktorFieldND(dim, dim)
|
||||
}
|
||||
}
|
||||
|
@ -1,11 +0,0 @@
|
||||
/*
|
||||
* Copyright 2018-2024 KMath contributors.
|
||||
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
|
||||
*/
|
||||
|
||||
package space.kscience.kmath.benchmarks
|
||||
|
||||
import org.jetbrains.kotlinx.multik.default.DefaultEngine
|
||||
import space.kscience.kmath.multik.MultikDoubleAlgebra
|
||||
|
||||
val multikAlgebra = MultikDoubleAlgebra(DefaultEngine())
|
@ -1,13 +1,8 @@
|
||||
import space.kscience.gradle.useApache2Licence
|
||||
import space.kscience.gradle.useSPCTeam
|
||||
|
||||
plugins {
|
||||
id("space.kscience.gradle.project")
|
||||
id("org.jetbrains.kotlinx.kover") version "0.7.6"
|
||||
id("ru.mipt.npm.gradle.project")
|
||||
id("org.jetbrains.kotlinx.kover") version "0.5.0"
|
||||
}
|
||||
|
||||
val attributesVersion by extra("0.2.0")
|
||||
|
||||
allprojects {
|
||||
repositories {
|
||||
maven("https://repo.kotlin.link")
|
||||
@ -16,13 +11,13 @@ allprojects {
|
||||
}
|
||||
|
||||
group = "space.kscience"
|
||||
version = "0.4.0"
|
||||
version = "0.3.0-dev-20"
|
||||
}
|
||||
|
||||
subprojects {
|
||||
if (name.startsWith("kmath")) apply<MavenPublishPlugin>()
|
||||
|
||||
plugins.withId("org.jetbrains.dokka") {
|
||||
plugins.withId("org.jetbrains.dokka"){
|
||||
tasks.withType<org.jetbrains.dokka.gradle.DokkaTaskPartial> {
|
||||
dependsOn(tasks["assemble"])
|
||||
|
||||
@ -36,7 +31,7 @@ subprojects {
|
||||
localDirectory.set(kotlinDir)
|
||||
|
||||
remoteUrl.set(
|
||||
uri("https://github.com/SciProgCentre/kmath/tree/master/${this@subprojects.name}/$kotlinDirPath").toURL()
|
||||
java.net.URL("https://github.com/mipt-npm/kmath/tree/master/${this@subprojects.name}/$kotlinDirPath")
|
||||
)
|
||||
}
|
||||
|
||||
@ -61,14 +56,9 @@ subprojects {
|
||||
readme.readmeTemplate = file("docs/templates/README-TEMPLATE.md")
|
||||
|
||||
ksciencePublish {
|
||||
pom("https://github.com/SciProgCentre/kmath") {
|
||||
useApache2Licence()
|
||||
useSPCTeam()
|
||||
}
|
||||
repository("spc", "https://maven.sciprog.center/kscience")
|
||||
sonatype("https://oss.sonatype.org")
|
||||
github("kmath", addToRelease = false)
|
||||
space()
|
||||
sonatype()
|
||||
}
|
||||
|
||||
apiValidation.nonPublicMarkers.add("space.kscience.kmath.UnstableKMathAPI")
|
||||
|
||||
val multikVersion by extra("0.2.3")
|
||||
apiValidation.nonPublicMarkers.add("space.kscience.kmath.misc.UnstableKMathAPI")
|
||||
|
@ -1,8 +1,11 @@
|
||||
plugins {
|
||||
`kotlin-dsl`
|
||||
`version-catalog`
|
||||
alias(miptNpmLibs.plugins.kotlin.plugin.serialization)
|
||||
}
|
||||
|
||||
java.targetCompatibility = JavaVersion.VERSION_11
|
||||
|
||||
repositories {
|
||||
mavenLocal()
|
||||
maven("https://repo.kotlin.link")
|
||||
@ -10,25 +13,19 @@ repositories {
|
||||
gradlePluginPortal()
|
||||
}
|
||||
|
||||
val toolsVersion = spclibs.versions.tools.get()
|
||||
val kotlinVersion = spclibs.versions.kotlin.asProvider().get()
|
||||
val benchmarksVersion = spclibs.versions.kotlinx.benchmark.get()
|
||||
val toolsVersion: String by extra
|
||||
val kotlinVersion = miptNpmLibs.versions.kotlin.asProvider().get()
|
||||
val benchmarksVersion = miptNpmLibs.versions.kotlinx.benchmark.get()
|
||||
|
||||
dependencies {
|
||||
api("space.kscience:gradle-tools:$toolsVersion")
|
||||
api("ru.mipt.npm:gradle-tools:$toolsVersion")
|
||||
//plugins form benchmarks
|
||||
api("org.jetbrains.kotlinx:kotlinx-benchmark-plugin:$benchmarksVersion")
|
||||
//api("org.jetbrains.kotlin:kotlin-allopen:$kotlinVersion")
|
||||
api("org.jetbrains.kotlin:kotlin-allopen:$kotlinVersion")
|
||||
//to be used inside build-script only
|
||||
//implementation(spclibs.kotlinx.serialization.json)
|
||||
implementation("com.fasterxml.jackson.module:jackson-module-kotlin:2.14.+")
|
||||
implementation(miptNpmLibs.kotlinx.serialization.json)
|
||||
}
|
||||
|
||||
kotlin {
|
||||
jvmToolchain {
|
||||
languageVersion.set(JavaLanguageVersion.of(11))
|
||||
}
|
||||
sourceSets.all {
|
||||
languageSettings.optIn("kotlin.OptIn")
|
||||
}
|
||||
kotlin.sourceSets.all {
|
||||
languageSettings.optIn("kotlin.OptIn")
|
||||
}
|
||||
|
7
buildSrc/gradle.properties
Normal file
@ -0,0 +1,7 @@
|
||||
#
|
||||
# Copyright 2018-2021 KMath contributors.
|
||||
# Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
|
||||
#
|
||||
|
||||
kotlin.code.style=official
|
||||
toolsVersion=0.11.2-kotlin-1.6.10
|
@ -5,24 +5,9 @@
|
||||
|
||||
enableFeaturePreview("TYPESAFE_PROJECT_ACCESSORS")
|
||||
|
||||
plugins {
|
||||
id("org.gradle.toolchains.foojay-resolver-convention") version "0.8.0"
|
||||
}
|
||||
|
||||
dependencyResolutionManagement {
|
||||
val projectProperties = java.util.Properties()
|
||||
file("../gradle.properties").inputStream().use {
|
||||
projectProperties.load(it)
|
||||
}
|
||||
val toolsVersion: String by extra
|
||||
|
||||
projectProperties.forEach { key, value ->
|
||||
extra.set(key.toString(), value)
|
||||
}
|
||||
|
||||
|
||||
val toolsVersion: String = projectProperties["toolsVersion"].toString()
|
||||
|
||||
@Suppress("UnstableApiUsage")
|
||||
repositories {
|
||||
mavenLocal()
|
||||
maven("https://repo.kotlin.link")
|
||||
@ -31,8 +16,8 @@ dependencyResolutionManagement {
|
||||
}
|
||||
|
||||
versionCatalogs {
|
||||
create("spclibs") {
|
||||
from("space.kscience:version-catalog:$toolsVersion")
|
||||
create("miptNpmLibs") {
|
||||
from("ru.mipt.npm:version-catalog:$toolsVersion")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1,10 +1,13 @@
|
||||
/*
|
||||
* Copyright 2018-2024 KMath contributors.
|
||||
* Copyright 2018-2021 KMath contributors.
|
||||
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
|
||||
*/
|
||||
|
||||
package space.kscience.kmath.benchmarks
|
||||
|
||||
import kotlinx.serialization.Serializable
|
||||
|
||||
@Serializable
|
||||
data class JmhReport(
|
||||
val jmhVersion: String,
|
||||
val benchmark: String,
|
||||
@ -34,6 +37,7 @@ data class JmhReport(
|
||||
val scoreUnit: String
|
||||
}
|
||||
|
||||
@Serializable
|
||||
data class PrimaryMetric(
|
||||
override val score: Double,
|
||||
override val scoreError: Double,
|
||||
@ -44,6 +48,7 @@ data class JmhReport(
|
||||
val rawData: List<List<Double>>? = null,
|
||||
) : Metric
|
||||
|
||||
@Serializable
|
||||
data class SecondaryMetric(
|
||||
override val score: Double,
|
||||
override val scoreError: Double,
|
||||
|
@ -1,22 +1,21 @@
|
||||
/*
|
||||
* Copyright 2018-2024 KMath contributors.
|
||||
* Copyright 2018-2021 KMath contributors.
|
||||
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
|
||||
*/
|
||||
|
||||
package space.kscience.kmath.benchmarks
|
||||
|
||||
import com.fasterxml.jackson.module.kotlin.jacksonObjectMapper
|
||||
import com.fasterxml.jackson.module.kotlin.readValue
|
||||
import kotlinx.benchmark.gradle.BenchmarksExtension
|
||||
import kotlinx.serialization.decodeFromString
|
||||
import kotlinx.serialization.json.Json
|
||||
import org.gradle.api.Project
|
||||
import space.kscience.gradle.KScienceReadmeExtension
|
||||
import ru.mipt.npm.gradle.KScienceReadmeExtension
|
||||
import java.time.LocalDateTime
|
||||
import java.time.ZoneId
|
||||
import java.time.format.DateTimeFormatter
|
||||
import java.time.format.DateTimeFormatterBuilder
|
||||
import java.time.format.SignStyle
|
||||
import java.time.temporal.ChronoField.*
|
||||
import java.util.*
|
||||
|
||||
private val ISO_DATE_TIME: DateTimeFormatter = DateTimeFormatterBuilder().run {
|
||||
parseCaseInsensitive()
|
||||
@ -46,25 +45,23 @@ private val ISO_DATE_TIME: DateTimeFormatter = DateTimeFormatterBuilder().run {
|
||||
|
||||
private fun noun(number: Number, singular: String, plural: String) = if (number.toLong() == 1L) singular else plural
|
||||
|
||||
private val jsonMapper = jacksonObjectMapper()
|
||||
|
||||
fun Project.addBenchmarkProperties() {
|
||||
val benchmarksProject = this
|
||||
rootProject.subprojects.forEach { p ->
|
||||
p.extensions.findByType(KScienceReadmeExtension::class.java)?.run {
|
||||
benchmarksProject.extensions.findByType(BenchmarksExtension::class.java)?.configurations?.forEach { cfg ->
|
||||
property("benchmark${cfg.name.replaceFirstChar { if (it.isLowerCase()) it.titlecase(Locale.getDefault()) else it.toString() }}") {
|
||||
val launches = benchmarksProject.layout.buildDirectory.dir("reports/benchmarks/${cfg.name}").get()
|
||||
property("benchmark${cfg.name.capitalize()}") {
|
||||
val launches = benchmarksProject.buildDir.resolve("reports/benchmarks/${cfg.name}")
|
||||
|
||||
val resDirectory = launches.files().maxByOrNull {
|
||||
val resDirectory = launches.listFiles()?.maxByOrNull {
|
||||
LocalDateTime.parse(it.name, ISO_DATE_TIME).atZone(ZoneId.systemDefault()).toInstant()
|
||||
}
|
||||
|
||||
if (resDirectory == null || !(resDirectory.resolve("jvm.json")).exists()) {
|
||||
"> **Can't find appropriate benchmark data. Try generating readme files after running benchmarks**."
|
||||
} else {
|
||||
val reports: List<JmhReport> =
|
||||
jsonMapper.readValue<List<JmhReport>>(resDirectory.resolve("jvm.json"))
|
||||
val reports =
|
||||
Json.decodeFromString<List<JmhReport>>(resDirectory.resolve("jvm.json").readText())
|
||||
|
||||
buildString {
|
||||
appendLine("<details>")
|
||||
@ -77,20 +74,16 @@ fun Project.addBenchmarkProperties() {
|
||||
appendLine("* Run on ${first.vmName} (build ${first.vmVersion}) with Java process:")
|
||||
appendLine()
|
||||
appendLine("```")
|
||||
appendLine(
|
||||
"${first.jvm} ${
|
||||
first.jvmArgs.joinToString(" ")
|
||||
}"
|
||||
)
|
||||
appendLine("${first.jvm} ${
|
||||
first.jvmArgs.joinToString(" ")
|
||||
}")
|
||||
appendLine("```")
|
||||
|
||||
appendLine(
|
||||
"* JMH ${first.jmhVersion} was used in `${first.mode}` mode with ${first.warmupIterations} warmup ${
|
||||
noun(first.warmupIterations, "iteration", "iterations")
|
||||
} by ${first.warmupTime} and ${first.measurementIterations} measurement ${
|
||||
noun(first.measurementIterations, "iteration", "iterations")
|
||||
} by ${first.measurementTime}."
|
||||
)
|
||||
appendLine("* JMH ${first.jmhVersion} was used in `${first.mode}` mode with ${first.warmupIterations} warmup ${
|
||||
noun(first.warmupIterations, "iteration", "iterations")
|
||||
} by ${first.warmupTime} and ${first.measurementIterations} measurement ${
|
||||
noun(first.measurementIterations, "iteration", "iterations")
|
||||
} by ${first.measurementTime}.")
|
||||
|
||||
appendLine()
|
||||
appendLine("| Benchmark | Score |")
|
||||
|
@ -0,0 +1,425 @@
|
||||
/*
|
||||
* Copyright 2018-2021 KMath contributors.
|
||||
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
|
||||
*/
|
||||
|
||||
@file:Suppress("KDocUnresolvedReference")
|
||||
|
||||
package space.kscience.kmath.ejml.codegen
|
||||
|
||||
import org.intellij.lang.annotations.Language
|
||||
import java.io.File
|
||||
|
||||
private fun Appendable.appendEjmlVector(type: String, ejmlMatrixType: String) {
|
||||
@Language("kotlin") val text = """/**
|
||||
* [EjmlVector] specialization for [$type].
|
||||
*/
|
||||
public class Ejml${type}Vector<out M : $ejmlMatrixType>(override val origin: M) : EjmlVector<$type, M>(origin) {
|
||||
init {
|
||||
require(origin.numRows == 1) { "The origin matrix must have only one row to form a vector" }
|
||||
}
|
||||
|
||||
override operator fun get(index: Int): $type = origin[0, index]
|
||||
}"""
|
||||
appendLine(text)
|
||||
appendLine()
|
||||
}
|
||||
|
||||
private fun Appendable.appendEjmlMatrix(type: String, ejmlMatrixType: String) {
|
||||
val text = """/**
|
||||
* [EjmlMatrix] specialization for [$type].
|
||||
*/
|
||||
public class Ejml${type}Matrix<out M : $ejmlMatrixType>(override val origin: M) : EjmlMatrix<$type, M>(origin) {
|
||||
override operator fun get(i: Int, j: Int): $type = origin[i, j]
|
||||
}"""
|
||||
appendLine(text)
|
||||
appendLine()
|
||||
}
|
||||
|
||||
private fun Appendable.appendEjmlLinearSpace(
|
||||
type: String,
|
||||
kmathAlgebra: String,
|
||||
ejmlMatrixParentTypeMatrix: String,
|
||||
ejmlMatrixType: String,
|
||||
ejmlMatrixDenseType: String,
|
||||
ops: String,
|
||||
denseOps: String,
|
||||
isDense: Boolean,
|
||||
) {
|
||||
@Language("kotlin") val text = """/**
|
||||
* [EjmlLinearSpace] implementation based on [CommonOps_$ops], [DecompositionFactory_${ops}] operations and
|
||||
* [${ejmlMatrixType}] matrices.
|
||||
*/
|
||||
public object EjmlLinearSpace${ops} : EjmlLinearSpace<${type}, ${kmathAlgebra}, $ejmlMatrixType>() {
|
||||
/**
|
||||
* The [${kmathAlgebra}] reference.
|
||||
*/
|
||||
override val elementAlgebra: $kmathAlgebra get() = $kmathAlgebra
|
||||
|
||||
@Suppress("UNCHECKED_CAST")
|
||||
override fun Matrix<${type}>.toEjml(): Ejml${type}Matrix<${ejmlMatrixType}> = when {
|
||||
this is Ejml${type}Matrix<*> && origin is $ejmlMatrixType -> this as Ejml${type}Matrix<${ejmlMatrixType}>
|
||||
else -> buildMatrix(rowNum, colNum) { i, j -> get(i, j) }
|
||||
}
|
||||
|
||||
@Suppress("UNCHECKED_CAST")
|
||||
override fun Point<${type}>.toEjml(): Ejml${type}Vector<${ejmlMatrixType}> = when {
|
||||
this is Ejml${type}Vector<*> && origin is $ejmlMatrixType -> this as Ejml${type}Vector<${ejmlMatrixType}>
|
||||
else -> Ejml${type}Vector(${ejmlMatrixType}(size, 1).also {
|
||||
(0 until it.numRows).forEach { row -> it[row, 0] = get(row) }
|
||||
})
|
||||
}
|
||||
|
||||
override fun buildMatrix(
|
||||
rows: Int,
|
||||
columns: Int,
|
||||
initializer: ${kmathAlgebra}.(i: Int, j: Int) -> ${type},
|
||||
): Ejml${type}Matrix<${ejmlMatrixType}> = ${ejmlMatrixType}(rows, columns).also {
|
||||
(0 until rows).forEach { row ->
|
||||
(0 until columns).forEach { col -> it[row, col] = elementAlgebra.initializer(row, col) }
|
||||
}
|
||||
}.wrapMatrix()
|
||||
|
||||
override fun buildVector(
|
||||
size: Int,
|
||||
initializer: ${kmathAlgebra}.(Int) -> ${type},
|
||||
): Ejml${type}Vector<${ejmlMatrixType}> = Ejml${type}Vector(${ejmlMatrixType}(size, 1).also {
|
||||
(0 until it.numRows).forEach { row -> it[row, 0] = elementAlgebra.initializer(row) }
|
||||
})
|
||||
|
||||
private fun <T : ${ejmlMatrixParentTypeMatrix}> T.wrapMatrix() = Ejml${type}Matrix(this)
|
||||
private fun <T : ${ejmlMatrixParentTypeMatrix}> T.wrapVector() = Ejml${type}Vector(this)
|
||||
|
||||
override fun Matrix<${type}>.unaryMinus(): Matrix<${type}> = this * elementAlgebra { -one }
|
||||
|
||||
override fun Matrix<${type}>.dot(other: Matrix<${type}>): Ejml${type}Matrix<${ejmlMatrixType}> {
|
||||
val out = ${ejmlMatrixType}(1, 1)
|
||||
CommonOps_${ops}.mult(toEjml().origin, other.toEjml().origin, out)
|
||||
return out.wrapMatrix()
|
||||
}
|
||||
|
||||
override fun Matrix<${type}>.dot(vector: Point<${type}>): Ejml${type}Vector<${ejmlMatrixType}> {
|
||||
val out = ${ejmlMatrixType}(1, 1)
|
||||
CommonOps_${ops}.mult(toEjml().origin, vector.toEjml().origin, out)
|
||||
return out.wrapVector()
|
||||
}
|
||||
|
||||
override operator fun Matrix<${type}>.minus(other: Matrix<${type}>): Ejml${type}Matrix<${ejmlMatrixType}> {
|
||||
val out = ${ejmlMatrixType}(1, 1)
|
||||
|
||||
CommonOps_${ops}.add(
|
||||
elementAlgebra.one,
|
||||
toEjml().origin,
|
||||
elementAlgebra { -one },
|
||||
other.toEjml().origin,
|
||||
out,${
|
||||
if (isDense) "" else
|
||||
"""
|
||||
null,
|
||||
null,"""
|
||||
}
|
||||
)
|
||||
|
||||
return out.wrapMatrix()
|
||||
}
|
||||
|
||||
override operator fun Matrix<${type}>.times(value: ${type}): Ejml${type}Matrix<${ejmlMatrixType}> {
|
||||
val res = ${ejmlMatrixType}(1, 1)
|
||||
CommonOps_${ops}.scale(value, toEjml().origin, res)
|
||||
return res.wrapMatrix()
|
||||
}
|
||||
|
||||
override fun Point<${type}>.unaryMinus(): Ejml${type}Vector<${ejmlMatrixType}> {
|
||||
val res = ${ejmlMatrixType}(1, 1)
|
||||
CommonOps_${ops}.changeSign(toEjml().origin, res)
|
||||
return res.wrapVector()
|
||||
}
|
||||
|
||||
override fun Matrix<${type}>.plus(other: Matrix<${type}>): Ejml${type}Matrix<${ejmlMatrixType}> {
|
||||
val out = ${ejmlMatrixType}(1, 1)
|
||||
|
||||
CommonOps_${ops}.add(
|
||||
elementAlgebra.one,
|
||||
toEjml().origin,
|
||||
elementAlgebra.one,
|
||||
other.toEjml().origin,
|
||||
out,${
|
||||
if (isDense) "" else
|
||||
"""
|
||||
null,
|
||||
null,"""
|
||||
}
|
||||
)
|
||||
|
||||
return out.wrapMatrix()
|
||||
}
|
||||
|
||||
override fun Point<${type}>.plus(other: Point<${type}>): Ejml${type}Vector<${ejmlMatrixType}> {
|
||||
val out = ${ejmlMatrixType}(1, 1)
|
||||
|
||||
CommonOps_${ops}.add(
|
||||
elementAlgebra.one,
|
||||
toEjml().origin,
|
||||
elementAlgebra.one,
|
||||
other.toEjml().origin,
|
||||
out,${
|
||||
if (isDense) "" else
|
||||
"""
|
||||
null,
|
||||
null,"""
|
||||
}
|
||||
)
|
||||
|
||||
return out.wrapVector()
|
||||
}
|
||||
|
||||
override fun Point<${type}>.minus(other: Point<${type}>): Ejml${type}Vector<${ejmlMatrixType}> {
|
||||
val out = ${ejmlMatrixType}(1, 1)
|
||||
|
||||
CommonOps_${ops}.add(
|
||||
elementAlgebra.one,
|
||||
toEjml().origin,
|
||||
elementAlgebra { -one },
|
||||
other.toEjml().origin,
|
||||
out,${
|
||||
if (isDense) "" else
|
||||
"""
|
||||
null,
|
||||
null,"""
|
||||
}
|
||||
)
|
||||
|
||||
return out.wrapVector()
|
||||
}
|
||||
|
||||
override fun ${type}.times(m: Matrix<${type}>): Ejml${type}Matrix<${ejmlMatrixType}> = m * this
|
||||
|
||||
override fun Point<${type}>.times(value: ${type}): Ejml${type}Vector<${ejmlMatrixType}> {
|
||||
val res = ${ejmlMatrixType}(1, 1)
|
||||
CommonOps_${ops}.scale(value, toEjml().origin, res)
|
||||
return res.wrapVector()
|
||||
}
|
||||
|
||||
override fun ${type}.times(v: Point<${type}>): Ejml${type}Vector<${ejmlMatrixType}> = v * this
|
||||
|
||||
@UnstableKMathAPI
|
||||
override fun <F : StructureFeature> computeFeature(structure: Matrix<${type}>, type: KClass<out F>): F? {
|
||||
structure.getFeature(type)?.let { return it }
|
||||
val origin = structure.toEjml().origin
|
||||
|
||||
return when (type) {
|
||||
${
|
||||
if (isDense)
|
||||
""" InverseMatrixFeature::class -> object : InverseMatrixFeature<${type}> {
|
||||
override val inverse: Matrix<${type}> by lazy {
|
||||
val res = origin.copy()
|
||||
CommonOps_${ops}.invert(res)
|
||||
res.wrapMatrix()
|
||||
}
|
||||
}
|
||||
|
||||
DeterminantFeature::class -> object : DeterminantFeature<${type}> {
|
||||
override val determinant: $type by lazy { CommonOps_${ops}.det(origin) }
|
||||
}
|
||||
|
||||
SingularValueDecompositionFeature::class -> object : SingularValueDecompositionFeature<${type}> {
|
||||
private val svd by lazy {
|
||||
DecompositionFactory_${ops}.svd(origin.numRows, origin.numCols, true, true, false)
|
||||
.apply { decompose(origin.copy()) }
|
||||
}
|
||||
|
||||
override val u: Matrix<${type}> by lazy { svd.getU(null, false).wrapMatrix() }
|
||||
override val s: Matrix<${type}> by lazy { svd.getW(null).wrapMatrix() }
|
||||
override val v: Matrix<${type}> by lazy { svd.getV(null, false).wrapMatrix() }
|
||||
override val singularValues: Point<${type}> by lazy { ${type}Buffer(svd.singularValues) }
|
||||
}
|
||||
|
||||
QRDecompositionFeature::class -> object : QRDecompositionFeature<${type}> {
|
||||
private val qr by lazy {
|
||||
DecompositionFactory_${ops}.qr().apply { decompose(origin.copy()) }
|
||||
}
|
||||
|
||||
override val q: Matrix<${type}> by lazy {
|
||||
qr.getQ(null, false).wrapMatrix().withFeature(OrthogonalFeature)
|
||||
}
|
||||
|
||||
override val r: Matrix<${type}> by lazy { qr.getR(null, false).wrapMatrix().withFeature(UFeature) }
|
||||
}
|
||||
|
||||
CholeskyDecompositionFeature::class -> object : CholeskyDecompositionFeature<${type}> {
|
||||
override val l: Matrix<${type}> by lazy {
|
||||
val cholesky =
|
||||
DecompositionFactory_${ops}.chol(structure.rowNum, true).apply { decompose(origin.copy()) }
|
||||
|
||||
cholesky.getT(null).wrapMatrix().withFeature(LFeature)
|
||||
}
|
||||
}
|
||||
|
||||
LupDecompositionFeature::class -> object : LupDecompositionFeature<${type}> {
|
||||
private val lup by lazy {
|
||||
DecompositionFactory_${ops}.lu(origin.numRows, origin.numCols).apply { decompose(origin.copy()) }
|
||||
}
|
||||
|
||||
override val l: Matrix<${type}> by lazy {
|
||||
lup.getLower(null).wrapMatrix().withFeature(LFeature)
|
||||
}
|
||||
|
||||
override val u: Matrix<${type}> by lazy {
|
||||
lup.getUpper(null).wrapMatrix().withFeature(UFeature)
|
||||
}
|
||||
|
||||
override val p: Matrix<${type}> by lazy { lup.getRowPivot(null).wrapMatrix() }
|
||||
}""" else """ QRDecompositionFeature::class -> object : QRDecompositionFeature<$type> {
|
||||
private val qr by lazy {
|
||||
DecompositionFactory_${ops}.qr(FillReducing.NONE).apply { decompose(origin.copy()) }
|
||||
}
|
||||
|
||||
override val q: Matrix<${type}> by lazy {
|
||||
qr.getQ(null, false).wrapMatrix().withFeature(OrthogonalFeature)
|
||||
}
|
||||
|
||||
override val r: Matrix<${type}> by lazy { qr.getR(null, false).wrapMatrix().withFeature(UFeature) }
|
||||
}
|
||||
|
||||
CholeskyDecompositionFeature::class -> object : CholeskyDecompositionFeature<${type}> {
|
||||
override val l: Matrix<${type}> by lazy {
|
||||
val cholesky =
|
||||
DecompositionFactory_${ops}.cholesky().apply { decompose(origin.copy()) }
|
||||
|
||||
(cholesky.getT(null) as ${ejmlMatrixParentTypeMatrix}).wrapMatrix().withFeature(LFeature)
|
||||
}
|
||||
}
|
||||
|
||||
LUDecompositionFeature::class, DeterminantFeature::class, InverseMatrixFeature::class -> object :
|
||||
LUDecompositionFeature<${type}>, DeterminantFeature<${type}>, InverseMatrixFeature<${type}> {
|
||||
private val lu by lazy {
|
||||
DecompositionFactory_${ops}.lu(FillReducing.NONE).apply { decompose(origin.copy()) }
|
||||
}
|
||||
|
||||
override val l: Matrix<${type}> by lazy {
|
||||
lu.getLower(null).wrapMatrix().withFeature(LFeature)
|
||||
}
|
||||
|
||||
override val u: Matrix<${type}> by lazy {
|
||||
lu.getUpper(null).wrapMatrix().withFeature(UFeature)
|
||||
}
|
||||
|
||||
override val inverse: Matrix<${type}> by lazy {
|
||||
var a = origin
|
||||
val inverse = ${ejmlMatrixDenseType}(1, 1)
|
||||
val solver = LinearSolverFactory_${ops}.lu(FillReducing.NONE)
|
||||
if (solver.modifiesA()) a = a.copy()
|
||||
val i = CommonOps_${denseOps}.identity(a.numRows)
|
||||
solver.solve(i, inverse)
|
||||
inverse.wrapMatrix()
|
||||
}
|
||||
|
||||
override val determinant: $type by lazy { elementAlgebra.number(lu.computeDeterminant().real) }
|
||||
}"""
|
||||
}
|
||||
|
||||
else -> null
|
||||
}?.let(type::cast)
|
||||
}
|
||||
|
||||
/**
|
||||
* Solves for *x* in the following equation: *x = [a] <sup>-1</sup> · [b]*.
|
||||
*
|
||||
* @param a the base matrix.
|
||||
* @param b n by p matrix.
|
||||
* @return the solution for *x* that is n by p.
|
||||
*/
|
||||
public fun solve(a: Matrix<${type}>, b: Matrix<${type}>): Ejml${type}Matrix<${ejmlMatrixType}> {
|
||||
val res = ${ejmlMatrixType}(1, 1)
|
||||
CommonOps_${ops}.solve(${ejmlMatrixType}(a.toEjml().origin), ${ejmlMatrixType}(b.toEjml().origin), res)
|
||||
return res.wrapMatrix()
|
||||
}
|
||||
|
||||
/**
|
||||
* Solves for *x* in the following equation: *x = [a] <sup>-1</sup> · [b]*.
|
||||
*
|
||||
* @param a the base matrix.
|
||||
* @param b n by p vector.
|
||||
* @return the solution for *x* that is n by p.
|
||||
*/
|
||||
public fun solve(a: Matrix<${type}>, b: Point<${type}>): Ejml${type}Vector<${ejmlMatrixType}> {
|
||||
val res = ${ejmlMatrixType}(1, 1)
|
||||
CommonOps_${ops}.solve(${ejmlMatrixType}(a.toEjml().origin), ${ejmlMatrixType}(b.toEjml().origin), res)
|
||||
return Ejml${type}Vector(res)
|
||||
}
|
||||
}"""
|
||||
appendLine(text)
|
||||
appendLine()
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Generates routine EJML classes.
|
||||
*/
|
||||
fun ejmlCodegen(outputFile: String): Unit = File(outputFile).run {
|
||||
parentFile.mkdirs()
|
||||
|
||||
writer().use {
|
||||
it.appendLine("/*")
|
||||
it.appendLine(" * Copyright 2018-2021 KMath contributors.")
|
||||
it.appendLine(" * Use of this source code is governed by the Apache 2.0 license that can be found in the LICENSE file.")
|
||||
it.appendLine(" */")
|
||||
it.appendLine()
|
||||
it.appendLine("/* This file is generated with buildSrc/src/main/kotlin/space/kscience/kmath/ejml/codegen/ejmlCodegen.kt */")
|
||||
it.appendLine()
|
||||
it.appendLine("package space.kscience.kmath.ejml")
|
||||
it.appendLine()
|
||||
it.appendLine("""import org.ejml.data.*
|
||||
import org.ejml.dense.row.CommonOps_DDRM
|
||||
import org.ejml.dense.row.CommonOps_FDRM
|
||||
import org.ejml.dense.row.factory.DecompositionFactory_DDRM
|
||||
import org.ejml.dense.row.factory.DecompositionFactory_FDRM
|
||||
import org.ejml.sparse.FillReducing
|
||||
import org.ejml.sparse.csc.CommonOps_DSCC
|
||||
import org.ejml.sparse.csc.CommonOps_FSCC
|
||||
import org.ejml.sparse.csc.factory.DecompositionFactory_DSCC
|
||||
import org.ejml.sparse.csc.factory.DecompositionFactory_FSCC
|
||||
import org.ejml.sparse.csc.factory.LinearSolverFactory_DSCC
|
||||
import org.ejml.sparse.csc.factory.LinearSolverFactory_FSCC
|
||||
import space.kscience.kmath.linear.*
|
||||
import space.kscience.kmath.linear.Matrix
|
||||
import space.kscience.kmath.misc.UnstableKMathAPI
|
||||
import space.kscience.kmath.nd.StructureFeature
|
||||
import space.kscience.kmath.operations.DoubleField
|
||||
import space.kscience.kmath.operations.FloatField
|
||||
import space.kscience.kmath.operations.invoke
|
||||
import space.kscience.kmath.structures.DoubleBuffer
|
||||
import space.kscience.kmath.structures.FloatBuffer
|
||||
import kotlin.reflect.KClass
|
||||
import kotlin.reflect.cast""")
|
||||
it.appendLine()
|
||||
it.appendEjmlVector("Double", "DMatrix")
|
||||
it.appendEjmlVector("Float", "FMatrix")
|
||||
it.appendEjmlMatrix("Double", "DMatrix")
|
||||
it.appendEjmlMatrix("Float", "FMatrix")
|
||||
it.appendEjmlLinearSpace("Double", "DoubleField", "DMatrix", "DMatrixRMaj", "DMatrixRMaj", "DDRM", "DDRM", true)
|
||||
it.appendEjmlLinearSpace("Float", "FloatField", "FMatrix", "FMatrixRMaj", "FMatrixRMaj", "FDRM", "FDRM", true)
|
||||
|
||||
it.appendEjmlLinearSpace(
|
||||
type = "Double",
|
||||
kmathAlgebra = "DoubleField",
|
||||
ejmlMatrixParentTypeMatrix = "DMatrix",
|
||||
ejmlMatrixType = "DMatrixSparseCSC",
|
||||
ejmlMatrixDenseType = "DMatrixRMaj",
|
||||
ops = "DSCC",
|
||||
denseOps = "DDRM",
|
||||
isDense = false,
|
||||
)
|
||||
|
||||
it.appendEjmlLinearSpace(
|
||||
type = "Float",
|
||||
kmathAlgebra = "FloatField",
|
||||
ejmlMatrixParentTypeMatrix = "FMatrix",
|
||||
ejmlMatrixType = "FMatrixSparseCSC",
|
||||
ejmlMatrixDenseType = "FMatrixRMaj",
|
||||
ops = "FSCC",
|
||||
denseOps = "FDRM",
|
||||
isDense = false,
|
||||
)
|
||||
}
|
||||
}
|
@ -17,4 +17,4 @@ own `MemoryBuffer.create()` factory).
|
||||
## Buffer performance
|
||||
|
||||
One should avoid using default boxing buffer wherever it is possible. Try to use primitive buffers or memory buffers
|
||||
instead.
|
||||
instead .
|
||||
|
@ -1,35 +1,27 @@
|
||||
# Coding Conventions
|
||||
|
||||
Generally, KMath code follows
|
||||
general [Kotlin coding conventions](https://kotlinlang.org/docs/reference/coding-conventions.html), but with a number of
|
||||
small changes and clarifications.
|
||||
Generally, KMath code follows general [Kotlin coding conventions](https://kotlinlang.org/docs/reference/coding-conventions.html), but with a number of small changes and clarifications.
|
||||
|
||||
## Utility Class Naming
|
||||
|
||||
Filename should coincide with a name of one of the classes contained in the file or start with small letter and describe
|
||||
its contents.
|
||||
Filename should coincide with a name of one of the classes contained in the file or start with small letter and describe its contents.
|
||||
|
||||
The code convention [here](https://kotlinlang.org/docs/reference/coding-conventions.html#source-file-names) says that
|
||||
file names should start with a capital letter even if file does not contain classes. Yet starting utility classes and
|
||||
aggregators with a small letter seems to be a good way to visually separate those files.
|
||||
The code convention [here](https://kotlinlang.org/docs/reference/coding-conventions.html#source-file-names) says that file names should start with a capital letter even if file does not contain classes. Yet starting utility classes and aggregators with a small letter seems to be a good way to visually separate those files.
|
||||
|
||||
This convention could be changed in future in a non-breaking way.
|
||||
|
||||
## Private Variable Naming
|
||||
|
||||
Private variables' names may start with underscore `_` for of the private mutable variable is shadowed by the public
|
||||
read-only value with the same meaning.
|
||||
Private variables' names may start with underscore `_` for of the private mutable variable is shadowed by the public read-only value with the same meaning.
|
||||
|
||||
This rule does not permit underscores in names, but it is sometimes useful to "underscore" the fact that public and
|
||||
private versions draw up the same entity. It is allowed only for private variables.
|
||||
This rule does not permit underscores in names, but it is sometimes useful to "underscore" the fact that public and private versions draw up the same entity. It is allowed only for private variables.
|
||||
|
||||
This convention could be changed in future in a non-breaking way.
|
||||
|
||||
## Functions and Properties One-liners
|
||||
|
||||
Use one-liners when they occupy single code window line both for functions and properties with getters like
|
||||
`val b: String get() = "fff"`. The same should be performed with multiline expressions when they could be
|
||||
Use one-liners when they occupy single code window line both for functions and properties with getters like
|
||||
`val b: String get() = "fff"`. The same should be performed with multiline expressions when they could be
|
||||
cleanly separated.
|
||||
|
||||
There is no universal consensus whenever use `fun a() = ...` or `fun a() { return ... }`. Yet from reader outlook
|
||||
one-lines seem to better show that the property or function is easily calculated.
|
||||
There is no universal consensus whenever use `fun a() = ...` or `fun a() { return ... }`. Yet from reader outlook one-lines seem to better show that the property or function is easily calculated.
|
||||
|
@ -1,24 +1,21 @@
|
||||
# Expressions
|
||||
|
||||
Expressions is a feature, which allows constructing lazily or immediately calculated parametric mathematical
|
||||
expressions.
|
||||
Expressions is a feature, which allows constructing lazily or immediately calculated parametric mathematical expressions.
|
||||
|
||||
The potential use-cases for it (so far) are following:
|
||||
|
||||
* lazy evaluation (in general simple lambda is better, but there are some border cases);
|
||||
* automatic differentiation in single-dimension and in multiple dimensions;
|
||||
* generation of mathematical syntax trees with subsequent code generation for other languages;
|
||||
* symbolic computations, especially differentiation (and some other actions with `kmath-symja` integration with
|
||||
Symja's `IExpr`—integration, simplification, and more);
|
||||
* symbolic computations, especially differentiation (and some other actions with `kmath-symja` integration with Symja's `IExpr`—integration, simplification, and more);
|
||||
* visualization with `kmath-jupyter`.
|
||||
|
||||
The workhorse of this API is `Expression` interface, which exposes
|
||||
single `operator fun invoke(arguments: Map<Symbol, T>): T`
|
||||
The workhorse of this API is `Expression` interface, which exposes single `operator fun invoke(arguments: Map<Symbol, T>): T`
|
||||
method. `ExpressionAlgebra` is used to generate expressions and introduce variables.
|
||||
|
||||
Currently there are two implementations:
|
||||
|
||||
* Generic `ExpressionField` in `kmath-core` which allows construction of custom lazy expressions
|
||||
|
||||
* Auto-differentiation expression in `kmath-commons` module allows using full power of `DerivativeStructure`
|
||||
from commons-math. **TODO: add example**
|
||||
* Auto-differentiation expression in `kmath-commons` module allows using full power of `DerivativeStructure`
|
||||
from commons-math. **TODO: add example**
|
||||
|
@ -1,6 +1,6 @@
|
||||
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
|
||||
<!--
|
||||
- Copyright 2018-2024 KMath contributors.
|
||||
- Copyright 2018-2021 KMath contributors.
|
||||
- Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
|
||||
-->
|
||||
|
||||
|
Before Width: | Height: | Size: 249 KiB After Width: | Height: | Size: 249 KiB |
@ -1,6 +1,6 @@
|
||||
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
|
||||
<!--
|
||||
- Copyright 2018-2024 KMath contributors.
|
||||
- Copyright 2018-2021 KMath contributors.
|
||||
- Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
|
||||
-->
|
||||
|
||||
|
Before Width: | Height: | Size: 19 KiB After Width: | Height: | Size: 19 KiB |
@ -1,6 +1,6 @@
|
||||
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
|
||||
<!--
|
||||
- Copyright 2018-2024 KMath contributors.
|
||||
- Copyright 2018-2021 KMath contributors.
|
||||
- Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
|
||||
-->
|
||||
|
||||
|
Before Width: | Height: | Size: 278 KiB After Width: | Height: | Size: 278 KiB |
@ -1,6 +1,6 @@
|
||||
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
|
||||
<!--
|
||||
- Copyright 2018-2024 KMath contributors.
|
||||
- Copyright 2018-2021 KMath contributors.
|
||||
- Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
|
||||
-->
|
||||
|
||||
|
Before Width: | Height: | Size: 118 KiB After Width: | Height: | Size: 118 KiB |
@ -1,12 +1,8 @@
|
||||
## Basic linear algebra layout
|
||||
|
||||
KMath support for linear algebra organized in a context-oriented way, which means that operations are in most cases
|
||||
declared in context classes, and are not the members of classes that store data. This allows more flexible approach to
|
||||
maintain multiple back-ends. The new operations added as extensions to contexts instead of being member functions of
|
||||
data structures.
|
||||
KMath support for linear algebra organized in a context-oriented way, which means that operations are in most cases declared in context classes, and are not the members of classes that store data. This allows more flexible approach to maintain multiple back-ends. The new operations added as extensions to contexts instead of being member functions of data structures.
|
||||
|
||||
The main context for linear algebra over matrices and vectors is `LinearSpace`, which defines addition and dot products
|
||||
of matrices and vectors:
|
||||
The main context for linear algebra over matrices and vectors is `LinearSpace`, which defines addition and dot products of matrices and vectors:
|
||||
|
||||
```kotlin
|
||||
import space.kscience.kmath.linear.*
|
||||
@ -32,5 +28,4 @@ LinearSpace.Companion.real {
|
||||
## Backends overview
|
||||
|
||||
### EJML
|
||||
|
||||
### Commons Math
|
||||
|
@ -8,7 +8,6 @@ One of the most sought after features of mathematical libraries is the high-perf
|
||||
structures. In `kmath` performance depends on which particular context was used for operation.
|
||||
|
||||
Let us consider following contexts:
|
||||
|
||||
```kotlin
|
||||
// automatically build context most suited for given type.
|
||||
val autoField = NDField.auto(DoubleField, dim, dim)
|
||||
@ -17,7 +16,6 @@ Let us consider following contexts:
|
||||
//A generic boxing field. It should be used for objects, not primitives.
|
||||
val genericField = NDField.buffered(DoubleField, dim, dim)
|
||||
```
|
||||
|
||||
Now let us perform several tests and see, which implementation is best suited for each case:
|
||||
|
||||
## Test case
|
||||
@ -26,9 +24,7 @@ To test performance we will take 2d-structures with `dim = 1000` and add a struc
|
||||
to it `n = 1000` times.
|
||||
|
||||
## Specialized
|
||||
|
||||
The code to run this looks like:
|
||||
|
||||
```kotlin
|
||||
specializedField.run {
|
||||
var res: NDBuffer<Double> = one
|
||||
@ -37,16 +33,13 @@ The code to run this looks like:
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
The performance of this code is the best of all tests since it inlines all operations and is specialized for operation
|
||||
with doubles. We will measure everything else relative to this one, so time for this test will be `1x` (real time
|
||||
on my computer is about 4.5 seconds). The only problem with this approach is that it requires specifying type
|
||||
from the beginning. Everyone does so anyway, so it is the recommended approach.
|
||||
|
||||
## Automatic
|
||||
|
||||
Let's do the same with automatic field inference:
|
||||
|
||||
```kotlin
|
||||
autoField.run {
|
||||
var res = one
|
||||
@ -55,16 +48,13 @@ Let's do the same with automatic field inference:
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Ths speed of this operation is approximately the same as for specialized case since `NDField.auto` just
|
||||
returns the same `RealNDField` in this case. Of course, it is usually better to use specialized method to be sure.
|
||||
|
||||
## Lazy
|
||||
|
||||
Lazy field does not produce a structure when asked, instead it generates an empty structure and fills it on-demand
|
||||
using coroutines to parallelize computations.
|
||||
When one calls
|
||||
|
||||
```kotlin
|
||||
lazyField.run {
|
||||
var res = one
|
||||
@ -73,14 +63,12 @@ When one calls
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
The result will be calculated almost immediately but the result will be empty. To get the full result
|
||||
structure one needs to call all its elements. In this case computation overhead will be huge. So this field never
|
||||
should be used if one expects to use the full result structure. Though if one wants only small fraction, it could
|
||||
save a lot of time.
|
||||
|
||||
This field still could be used with reasonable performance if call code is changed:
|
||||
|
||||
```kotlin
|
||||
lazyField.run {
|
||||
val res = one.map {
|
||||
@ -94,13 +82,10 @@ This field still could be used with reasonable performance if call code is chang
|
||||
res.elements().forEach { it.second }
|
||||
}
|
||||
```
|
||||
|
||||
In this case it completes in about `4x-5x` time due to boxing.
|
||||
|
||||
## Boxing
|
||||
|
||||
The boxing field produced by
|
||||
|
||||
```kotlin
|
||||
genericField.run {
|
||||
var res: NDBuffer<Double> = one
|
||||
@ -109,22 +94,18 @@ The boxing field produced by
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
is the slowest one, because it requires boxing and unboxing the `double` on each operation. It takes about
|
||||
`15x` time (**TODO: there seems to be a problem here, it should be slow, but not that slow**). This field should
|
||||
never be used for primitives.
|
||||
|
||||
## Element operation
|
||||
|
||||
Let us also check the speed for direct operations on elements:
|
||||
|
||||
```kotlin
|
||||
var res = genericField.one
|
||||
repeat(n) {
|
||||
res += 1.0
|
||||
}
|
||||
```
|
||||
|
||||
One would expect to be at least as slow as field operation, but in fact, this one takes only `2x` time to complete.
|
||||
It happens, because in this particular case it does not use actual `NDField` but instead calculated directly
|
||||
via extension function.
|
||||
@ -133,7 +114,6 @@ via extension function.
|
||||
|
||||
Usually it is bad idea to compare the direct numerical operation performance in different languages, but it hard to
|
||||
work completely without frame of reference. In this case, simple numpy code:
|
||||
|
||||
```python
|
||||
import numpy as np
|
||||
|
||||
@ -141,9 +121,7 @@ res = np.ones((1000,1000))
|
||||
for i in range(1000):
|
||||
res = res + 1.0
|
||||
```
|
||||
|
||||
gives the completion time of about `1.1x`, which means that specialized kotlin code in fact is working faster (I think
|
||||
it is
|
||||
gives the completion time of about `1.1x`, which means that specialized kotlin code in fact is working faster (I think it is
|
||||
because better memory management). Of course if one writes `res += 1.0`, the performance will be different,
|
||||
but it would be different case, because numpy overrides `+=` with in-place operations. In-place operations are
|
||||
available in `kmath` with `MutableNDStructure` but there is no field for it (one can still work with mapping
|
||||
|
@ -1,223 +0,0 @@
|
||||
# Polynomials and Rational Functions
|
||||
|
||||
KMath provides a way to work with uni- and multivariate polynomials and rational functions. It includes full support of
|
||||
arithmetic operations of integers, **constants** (elements of ring polynomials are build over), variables (for certain
|
||||
multivariate implementations), polynomials and rational functions encapsulated in so-called **polynomial space** and *
|
||||
*rational function space** and some other utilities such as algebraic differentiation and substitution.
|
||||
|
||||
## Concrete realizations
|
||||
|
||||
There are 3 approaches to represent polynomials:
|
||||
|
||||
1. For univariate polynomials one can represent and store polynomial as a list of coefficients for each power of the
|
||||
variable. I.e. polynomial $a_0 + \dots + a_n x^n $ can be represented as a finite sequence $(a_0; \dots; a_n)$. (
|
||||
Compare to sequential definition of polynomials.)
|
||||
2. For multivariate polynomials one can represent and store polynomial as a matching (in programming it is called "map"
|
||||
or "dictionary", in math it is
|
||||
called [functional relation](https://en.wikipedia.org/wiki/Binary_relation#Special_types_of_binary_relations)) of
|
||||
each "**term signature**" (that describes what variables and in what powers appear in the term) with corresponding
|
||||
coefficient of the term. But there are 2 possible approaches of term signature representation:
|
||||
1. One can number all the variables, so term signature can be represented as a sequence describing powers of the
|
||||
variables. I.e. signature of term $c \\; x_0^{d_0} \dots x_n^{d_n} $ (for natural or zero $d_i $) can be
|
||||
represented as a finite sequence $(d_0; \dots; d_n)$.
|
||||
2. One can represent variables as objects ("**labels**"), so term signature can be also represented as a matching of
|
||||
each appeared variable with its power in the term. I.e. signature of term $c \\; x_0^{d_0} \dots x_n^{d_n} $ (for
|
||||
natural non-zero $d_i $) can be represented as a finite matching $(x_0 \to d_1; \dots; x_n \to d_n)$.
|
||||
|
||||
All that three approaches are implemented by "list", "numbered", and "labeled" versions of polynomials and polynomial
|
||||
spaces respectively. Whereas all rational functions are represented as fractions with corresponding polynomial numerator
|
||||
and denominator, and rational functions' spaces are implemented in the same way as usual field of rational numbers (or
|
||||
more precisely, as any field of fractions over integral domain) should be implemented.
|
||||
|
||||
So here are a bit of details. Let `C` by type of constants. Then:
|
||||
|
||||
1. `ListPolynomial`, `ListPolynomialSpace`, `ListRationalFunction` and `ListRationalFunctionSpace` implement the first
|
||||
scenario. `ListPolynomial` stores polynomial $a_0 + \dots + a_n x^n $ as a coefficients
|
||||
list `listOf(a_0, ..., a_n)` (of type `List<C>`).
|
||||
|
||||
They also have variation `ScalableListPolynomialSpace` that replaces former polynomials and
|
||||
implements `ScaleOperations`.
|
||||
2. `NumberedPolynomial`, `NumberedPolynomialSpace`, `NumberedRationalFunction` and `NumberedRationalFunctionSpace`
|
||||
implement second scenario. `NumberedPolynomial` stores polynomials as structures of type `Map<List<UInt>, C>`.
|
||||
Signatures are stored as `List<UInt>`. To prevent ambiguity signatures should not end with zeros.
|
||||
3. `LabeledPolynomial`, `LabeledPolynomialSpace`, `LabeledRationalFunction` and `LabeledRationalFunctionSpace` implement
|
||||
third scenario using common `Symbol` as variable type. `LabeledPolynomial` stores polynomials as structures of
|
||||
type `Map<Map<Symbol, UInt>, C>`. Signatures are stored as `Map<Symbol, UInt>`. To prevent ambiguity each signature
|
||||
should not map any variable to zero.
|
||||
|
||||
### Example: `ListPolynomial`
|
||||
|
||||
For example, polynomial $2 - 3x + x^2 $ (with `Int` coefficients) is represented
|
||||
|
||||
```kotlin
|
||||
val polynomial: ListPolynomial<Int> = ListPolynomial(listOf(2, -3, 1))
|
||||
// or
|
||||
val polynomial: ListPolynomial<Int> = ListPolynomial(2, -3, 1)
|
||||
```
|
||||
|
||||
All algebraic operations can be used in corresponding space:
|
||||
|
||||
```kotlin
|
||||
val computationResult = Int.algebra.listPolynomialSpace {
|
||||
ListPolynomial(2, -3, 1) + ListPolynomial(0, 6) == ListPolynomial(2, 3, 1)
|
||||
}
|
||||
|
||||
println(computationResult) // true
|
||||
```
|
||||
|
||||
For more see [examples](../examples/src/main/kotlin/space/kscience/kmath/functions/polynomials.kt).
|
||||
|
||||
### Example: `NumberedPolynomial`
|
||||
|
||||
For example, polynomial $3 + 5 x_1 - 7 x_0^2 x_2 $ (with `Int` coefficients) is represented
|
||||
|
||||
```kotlin
|
||||
val polynomial: NumberedPolynomial<Int> = NumberedPolynomial(
|
||||
mapOf(
|
||||
listOf<UInt>() to 3,
|
||||
listOf(0u, 1u) to 5,
|
||||
listOf(2u, 0u, 1u) to -7,
|
||||
)
|
||||
)
|
||||
// or
|
||||
val polynomial: NumberedPolynomial<Int> = NumberedPolynomial(
|
||||
listOf<UInt>() to 3,
|
||||
listOf(0u, 1u) to 5,
|
||||
listOf(2u, 0u, 1u) to -7,
|
||||
)
|
||||
```
|
||||
|
||||
All algebraic operations can be used in corresponding space:
|
||||
|
||||
```kotlin
|
||||
val computationResult = Int.algebra.numberedPolynomialSpace {
|
||||
NumberedPolynomial(
|
||||
listOf<UInt>() to 3,
|
||||
listOf(0u, 1u) to 5,
|
||||
listOf(2u, 0u, 1u) to -7,
|
||||
) + NumberedPolynomial(
|
||||
listOf(0u, 1u) to -5,
|
||||
listOf(0u, 0u, 0u, 4u) to 4,
|
||||
) == NumberedPolynomial(
|
||||
listOf<UInt>() to 3,
|
||||
listOf(0u, 1u) to 0,
|
||||
listOf(2u, 0u, 1u) to -7,
|
||||
listOf(0u, 0u, 0u, 4u) to 4,
|
||||
)
|
||||
}
|
||||
|
||||
println(computationResult) // true
|
||||
```
|
||||
|
||||
For more see [examples](../examples/src/main/kotlin/space/kscience/kmath/functions/polynomials.kt).
|
||||
|
||||
### Example: `LabeledPolynomial`
|
||||
|
||||
For example, polynomial $3 + 5 y - 7 x^2 z $ (with `Int` coefficients) is represented
|
||||
|
||||
```kotlin
|
||||
val polynomial: LabeledPolynomial<Int> = LabeledPolynomial(
|
||||
mapOf(
|
||||
mapOf<Symbol, UInt>() to 3,
|
||||
mapOf(y to 1u) to 5,
|
||||
mapOf(x to 2u, z to 1u) to -7,
|
||||
)
|
||||
)
|
||||
// or
|
||||
val polynomial: LabeledPolynomial<Int> = LabeledPolynomial(
|
||||
mapOf<Symbol, UInt>() to 3,
|
||||
mapOf(y to 1u) to 5,
|
||||
mapOf(x to 2u, z to 1u) to -7,
|
||||
)
|
||||
```
|
||||
|
||||
All algebraic operations can be used in corresponding space:
|
||||
|
||||
```kotlin
|
||||
val computationResult = Int.algebra.labeledPolynomialSpace {
|
||||
LabeledPolynomial(
|
||||
listOf<UInt>() to 3,
|
||||
listOf(0u, 1u) to 5,
|
||||
listOf(2u, 0u, 1u) to -7,
|
||||
) + LabeledPolynomial(
|
||||
listOf(0u, 1u) to -5,
|
||||
listOf(0u, 0u, 0u, 4u) to 4,
|
||||
) == LabeledPolynomial(
|
||||
listOf<UInt>() to 3,
|
||||
listOf(0u, 1u) to 0,
|
||||
listOf(2u, 0u, 1u) to -7,
|
||||
listOf(0u, 0u, 0u, 4u) to 4,
|
||||
)
|
||||
}
|
||||
|
||||
println(computationResult) // true
|
||||
```
|
||||
|
||||
For more see [examples](../examples/src/main/kotlin/space/kscience/kmath/functions/polynomials.kt).
|
||||
|
||||
## Abstract entities (interfaces and abstract classes)
|
||||
|
||||
```mermaid
|
||||
classDiagram
|
||||
Polynomial <|-- ListPolynomial
|
||||
Polynomial <|-- NumberedPolynomial
|
||||
Polynomial <|-- LabeledPolynomial
|
||||
|
||||
RationalFunction <|-- ListRationalFunction
|
||||
RationalFunction <|-- NumberedRationalFunction
|
||||
RationalFunction <|-- LabeledRationalFunction
|
||||
|
||||
Ring <|-- PolynomialSpace
|
||||
PolynomialSpace <|-- MultivariatePolynomialSpace
|
||||
PolynomialSpace <|-- PolynomialSpaceOverRing
|
||||
|
||||
Ring <|-- RationalFunctionSpace
|
||||
RationalFunctionSpace <|-- MultivariateRationalFunctionSpace
|
||||
RationalFunctionSpace <|-- RationalFunctionSpaceOverRing
|
||||
RationalFunctionSpace <|-- RationalFunctionSpaceOverPolynomialSpace
|
||||
RationalFunctionSpace <|-- PolynomialSpaceOfFractions
|
||||
RationalFunctionSpaceOverPolynomialSpace <|-- MultivariateRationalFunctionSpaceOverMultivariatePolynomialSpace
|
||||
MultivariateRationalFunctionSpace <|-- MultivariateRationalFunctionSpaceOverMultivariatePolynomialSpace
|
||||
MultivariateRationalFunctionSpace <|-- MultivariatePolynomialSpaceOfFractions
|
||||
PolynomialSpaceOfFractions <|-- MultivariatePolynomialSpaceOfFractions
|
||||
```
|
||||
|
||||
There are implemented `Polynomial` and `RationalFunction` interfaces as abstractions of polynomials and rational
|
||||
functions respectively (although, there is not a lot of logic in them) and `PolynomialSpace`
|
||||
and `RationalFunctionSpace` (that implement `Ring` interface) as abstractions of polynomials' and rational functions'
|
||||
spaces respectively. More precisely, that means they allow to declare common logic of interaction with such objects and
|
||||
spaces:
|
||||
|
||||
- `Polynomial` does not provide any logic. It is marker interface.
|
||||
- `RationalFunction` provides numerator and denominator of rational function and destructuring declaration for them.
|
||||
- `PolynomialSpace` provides all possible arithmetic interactions of integers, constants (of type `C`), and
|
||||
polynomials (of type `P`) like addition, subtraction, multiplication, and some others and common properties like
|
||||
degree of polynomial.
|
||||
- `RationalFunctionSpace` provides the same as `PolynomialSpace` but also for rational functions: all possible
|
||||
arithmetic interactions of integers, constants (of type `C`), polynomials (of type `P`), and rational functions (of
|
||||
type `R`) like addition, subtraction, multiplication, division (in some cases), and some others and common properties
|
||||
like degree of polynomial.
|
||||
|
||||
Then to add abstraction of similar behaviour with variables (in multivariate case) there are
|
||||
implemented `MultivariatePolynomialSpace` and `MultivariateRationalFunctionSpace`. They just include variables (of
|
||||
type `V`) in the interactions of the entities.
|
||||
|
||||
Also, to remove boilerplates there were provided helping subinterfaces and abstract subclasses:
|
||||
|
||||
- `PolynomialSpaceOverRing` allows to replace implementation of interactions of integers and constants with
|
||||
implementations from provided ring over constants (of type `A: Ring<C>`).
|
||||
- `RationalFunctionSpaceOverRing` — the same but for `RationalFunctionSpace`.
|
||||
- `RationalFunctionSpaceOverPolynomialSpace` — the same but "the inheritance" includes interactions with
|
||||
polynomials from provided `PolynomialSpace`.
|
||||
- `PolynomialSpaceOfFractions` is actually abstract subclass of `RationalFunctionSpace` that implements all fractions
|
||||
boilerplates with provided (`protected`) constructor of rational functions by polynomial numerator and denominator.
|
||||
- `MultivariateRationalFunctionSpaceOverMultivariatePolynomialSpace` and `MultivariatePolynomialSpaceOfFractions`
|
||||
— the same stories of operators inheritance and fractions boilerplates respectively but in multivariate case.
|
||||
|
||||
## Utilities
|
||||
|
||||
For all kinds of polynomials there are provided (implementation details depend on kind of polynomials) such common
|
||||
utilities as:
|
||||
|
||||
1. differentiation and anti-differentiation,
|
||||
2. substitution, invocation and functional representation.
|
10
docs/templates/ARTIFACT-TEMPLATE.md
vendored
@ -3,7 +3,17 @@
|
||||
The Maven coordinates of this project are `${group}:${name}:${version}`.
|
||||
|
||||
**Gradle:**
|
||||
```gradle
|
||||
repositories {
|
||||
maven { url 'https://repo.kotlin.link' }
|
||||
mavenCentral()
|
||||
}
|
||||
|
||||
dependencies {
|
||||
implementation '${group}:${name}:${version}'
|
||||
}
|
||||
```
|
||||
**Gradle Kotlin DSL:**
|
||||
```kotlin
|
||||
repositories {
|
||||
maven("https://repo.kotlin.link")
|
||||
|
34
docs/templates/README-TEMPLATE.md
vendored
@ -1,6 +1,6 @@
|
||||
[![JetBrains Research](https://jb.gg/badges/research.svg)](https://confluence.jetbrains.com/display/ALL/JetBrains+on+GitHub)
|
||||
[![DOI](https://zenodo.org/badge/129486382.svg)](https://zenodo.org/badge/latestdoi/129486382)
|
||||
![Gradle build](https://github.com/SciProgCentre/kmath/workflows/Gradle%20build/badge.svg)
|
||||
![Gradle build](https://github.com/mipt-npm/kmath/workflows/Gradle%20build/badge.svg)
|
||||
[![Maven Central](https://img.shields.io/maven-central/v/space.kscience/kmath-core.svg?label=Maven%20Central)](https://search.maven.org/search?q=g:%22space.kscience%22)
|
||||
[![Space](https://img.shields.io/badge/dynamic/xml?color=orange&label=Space&query=//metadata/versioning/latest&url=https%3A%2F%2Fmaven.pkg.jetbrains.space%2Fmipt-npm%2Fp%2Fsci%2Fmaven%2Fspace%2Fkscience%2Fkmath-core%2Fmaven-metadata.xml)](https://maven.pkg.jetbrains.space/mipt-npm/p/sci/maven/space/kscience/)
|
||||
|
||||
@ -11,22 +11,18 @@ analog to Python's NumPy library. Later we found that kotlin is much more flexib
|
||||
architecture designs. In contrast to `numpy` and `scipy` it is modular and has a lightweight core. The `numpy`-like
|
||||
experience could be achieved with [kmath-for-real](/kmath-for-real) extension module.
|
||||
|
||||
[Documentation site](https://SciProgCentre.github.io/kmath/)
|
||||
[Documentation site (**WIP**)](https://mipt-npm.github.io/kmath/)
|
||||
|
||||
## Publications and talks
|
||||
|
||||
* [A conceptual article about context-oriented design](https://proandroiddev.com/an-introduction-context-oriented-programming-in-kotlin-2e79d316b0a2)
|
||||
* [Another article about context-oriented design](https://proandroiddev.com/diving-deeper-into-context-oriented-programming-in-kotlin-3ecb4ec38814)
|
||||
* [ACAT 2019 conference paper](https://aip.scitation.org/doi/abs/10.1063/1.5130103)
|
||||
* [A talk at KotlinConf 2019 about using kotlin for science](https://youtu.be/LI_5TZ7tnOE?si=4LknX41gl_YeUbIe)
|
||||
* [A talk on architecture at Joker-2021 (in Russian)](https://youtu.be/1bZ2doHiRRM?si=9w953ro9yu98X_KJ)
|
||||
* [The same talk in English](https://youtu.be/yP5DIc2fVwQ?si=louZzQ1dcXV6gP10)
|
||||
* [A seminar on tensor API](https://youtu.be/0H99wUs0xTM?si=6c__04jrByFQtVpo)
|
||||
|
||||
# Goal
|
||||
|
||||
* Provide a flexible and powerful API to work with mathematics abstractions in Kotlin-multiplatform (JVM, JS, Native and
|
||||
Wasm).
|
||||
* Provide a flexible and powerful API to work with mathematics abstractions in Kotlin-multiplatform (JVM, JS and Native)
|
||||
.
|
||||
* Provide basic multiplatform implementations for those abstractions (without significant performance optimization).
|
||||
* Provide bindings and wrappers with those abstractions for popular optimized platform libraries.
|
||||
|
||||
@ -48,7 +44,7 @@ module definitions below. The module stability could have the following levels:
|
||||
* **PROTOTYPE**. On this level there are no compatibility guarantees. All methods and classes form those modules could
|
||||
break any moment. You can still use it, but be sure to fix the specific version.
|
||||
* **EXPERIMENTAL**. The general API is decided, but some changes could be made. Volatile API is marked
|
||||
with `@UnstableKMathAPI` or other stability warning annotations.
|
||||
with `@UnstableKmathAPI` or other stability warning annotations.
|
||||
* **DEVELOPMENT**. API breaking generally follows semantic versioning ideology. There could be changes in minor
|
||||
versions, but not in patch versions. API is protected
|
||||
with [binary-compatibility-validator](https://github.com/Kotlin/binary-compatibility-validator) tool.
|
||||
@ -63,24 +59,23 @@ ${modules}
|
||||
KMath is developed as a multi-platform library, which means that most of the interfaces are declared in the
|
||||
[common source sets](/kmath-core/src/commonMain) and implemented there wherever it is possible. In some cases, features
|
||||
are delegated to platform-specific implementations even if they could be provided in the common module for performance
|
||||
reasons. Currently, Kotlin/JVM is the primary platform, however, Kotlin/Native and Kotlin/JS contributions and
|
||||
reasons. Currently, the Kotlin/JVM is the primary platform, however Kotlin/Native and Kotlin/JS contributions and
|
||||
feedback are also welcome.
|
||||
|
||||
## Performance
|
||||
|
||||
Calculation of performance is one of the major goals of KMath in the future, but in some cases it is impossible to
|
||||
achieve both
|
||||
Calculation performance is one of major goals of KMath in the future, but in some cases it is impossible to achieve both
|
||||
performance and flexibility.
|
||||
|
||||
We expect to focus on creating a convenient universal API first and then work on increasing performance for specific
|
||||
We expect to focus on creating convenient universal API first and then work on increasing performance for specific
|
||||
cases. We expect the worst KMath benchmarks will perform better than native Python, but worse than optimized
|
||||
native/SciPy (mostly due to boxing operations on primitive numbers). The best performance of optimized parts could be
|
||||
better than SciPy.
|
||||
|
||||
## Requirements
|
||||
|
||||
KMath currently relies on JDK 11 for compilation and execution of Kotlin-JVM part. We recommend using GraalVM-CE or
|
||||
Oracle GraalVM for execution to get better performance.
|
||||
KMath currently relies on JDK 11 for compilation and execution of Kotlin-JVM part. We recommend to use GraalVM-CE 11 for
|
||||
execution to get better performance.
|
||||
|
||||
### Repositories
|
||||
|
||||
@ -100,10 +95,11 @@ dependencies {
|
||||
}
|
||||
```
|
||||
|
||||
Gradle `6.0+` is required for multiplatform artifacts.
|
||||
|
||||
## Contributing
|
||||
|
||||
The project requires a lot of additional work. The most important thing we need is feedback about what features are
|
||||
The project requires a lot of additional work. The most important thing we need is a feedback about what features are
|
||||
required the most. Feel free to create feature requests. We are also welcome to code contributions, especially in issues
|
||||
marked
|
||||
with [good first issue](hhttps://github.com/SciProgCentre/kmath/issues?q=is%3Aissue+is%3Aopen+label%3A%22good+first+issue%22)
|
||||
label.
|
||||
marked with
|
||||
[waiting for a hero](https://github.com/mipt-npm/kmath/labels/waiting%20for%20a%20hero) label.
|
||||
|
@ -1,5 +1,3 @@
|
||||
import org.jetbrains.kotlin.gradle.tasks.KotlinJvmCompile
|
||||
|
||||
plugins {
|
||||
kotlin("jvm")
|
||||
}
|
||||
@ -10,8 +8,6 @@ repositories {
|
||||
maven("https://maven.pkg.jetbrains.space/kotlin/p/kotlin/kotlin-js-wrappers")
|
||||
}
|
||||
|
||||
val multikVersion: String by rootProject.extra
|
||||
|
||||
dependencies {
|
||||
implementation(project(":kmath-ast"))
|
||||
implementation(project(":kmath-kotlingrad"))
|
||||
@ -19,7 +15,6 @@ dependencies {
|
||||
implementation(project(":kmath-coroutines"))
|
||||
implementation(project(":kmath-commons"))
|
||||
implementation(project(":kmath-complex"))
|
||||
implementation(project(":kmath-functions"))
|
||||
implementation(project(":kmath-optimization"))
|
||||
implementation(project(":kmath-stat"))
|
||||
implementation(project(":kmath-viktor"))
|
||||
@ -28,15 +23,13 @@ dependencies {
|
||||
implementation(project(":kmath-nd4j"))
|
||||
implementation(project(":kmath-tensors"))
|
||||
implementation(project(":kmath-symja"))
|
||||
implementation(project(":kmath-units"))
|
||||
implementation(project(":kmath-for-real"))
|
||||
//jafama
|
||||
implementation(project(":kmath-jafama"))
|
||||
//multik
|
||||
implementation(project(":kmath-multik"))
|
||||
implementation("org.jetbrains.kotlinx:multik-default:$multikVersion")
|
||||
|
||||
//datetime
|
||||
implementation("org.jetbrains.kotlinx:kotlinx-datetime:0.4.0")
|
||||
|
||||
implementation("org.nd4j:nd4j-native:1.0.0-beta7")
|
||||
|
||||
@ -50,28 +43,30 @@ dependencies {
|
||||
// } else
|
||||
implementation("org.nd4j:nd4j-native-platform:1.0.0-beta7")
|
||||
|
||||
// multik implementation
|
||||
implementation("org.jetbrains.kotlinx:multik-default:0.1.0")
|
||||
|
||||
implementation("org.slf4j:slf4j-simple:1.7.32")
|
||||
// plotting
|
||||
implementation("space.kscience:plotlykt-server:0.7.0")
|
||||
implementation("space.kscience:plotlykt-server:0.5.0")
|
||||
}
|
||||
|
||||
kotlin {
|
||||
jvmToolchain(11)
|
||||
sourceSets.all {
|
||||
languageSettings {
|
||||
optIn("kotlin.contracts.ExperimentalContracts")
|
||||
optIn("kotlin.ExperimentalUnsignedTypes")
|
||||
optIn("space.kscience.kmath.UnstableKMathAPI")
|
||||
}
|
||||
kotlin.sourceSets.all {
|
||||
with(languageSettings) {
|
||||
optIn("kotlin.contracts.ExperimentalContracts")
|
||||
optIn("kotlin.ExperimentalUnsignedTypes")
|
||||
optIn("kotlin.time.ExperimentalTime")
|
||||
optIn("space.kscience.kmath.misc.UnstableKMathAPI")
|
||||
}
|
||||
}
|
||||
|
||||
tasks.withType<KotlinJvmCompile> {
|
||||
compilerOptions {
|
||||
freeCompilerArgs.addAll("-Xjvm-default=all", "-Xopt-in=kotlin.RequiresOptIn", "-Xlambdas=indy")
|
||||
tasks.withType<org.jetbrains.kotlin.gradle.dsl.KotlinJvmCompile> {
|
||||
kotlinOptions {
|
||||
jvmTarget = "11"
|
||||
freeCompilerArgs = freeCompilerArgs + "-Xjvm-default=all" + "-Xopt-in=kotlin.RequiresOptIn" + "-Xlambdas=indy"
|
||||
}
|
||||
}
|
||||
|
||||
readme {
|
||||
maturity = space.kscience.gradle.Maturity.EXPERIMENTAL
|
||||
maturity = ru.mipt.npm.gradle.Maturity.EXPERIMENTAL
|
||||
}
|
||||
|
@ -1,418 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"%use kmath(0.3.1-dev-5)\n",
|
||||
"%use plotly(0.5.0)\n",
|
||||
"@file:DependsOn(\"space.kscience:kmath-commons:0.3.1-dev-5\")"
|
||||
],
|
||||
"execution_count": null,
|
||||
"outputs": [],
|
||||
"metadata": {
|
||||
"datalore": {
|
||||
"node_id": "lQbSB87rNAn9lV6poArVWW",
|
||||
"type": "CODE",
|
||||
"hide_input_from_viewers": false,
|
||||
"hide_output_from_viewers": false
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"//Uncomment to work in Jupyter classic or DataLore\n",
|
||||
"//Plotly.jupyter.notebook()"
|
||||
],
|
||||
"execution_count": null,
|
||||
"outputs": [],
|
||||
"metadata": {
|
||||
"datalore": {
|
||||
"node_id": "0UP158hfccGgjQtHz0wAi6",
|
||||
"type": "CODE",
|
||||
"hide_input_from_viewers": false,
|
||||
"hide_output_from_viewers": false
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"# The model\n",
|
||||
"\n",
|
||||
"Defining the input data format, the statistic abstraction and the statistic implementation based on a weighted sum of elements."
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"class XYValues(val xValues: DoubleArray, val yValues: DoubleArray) {\n",
|
||||
" init {\n",
|
||||
" require(xValues.size == yValues.size)\n",
|
||||
" }\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"fun interface XYStatistic {\n",
|
||||
" operator fun invoke(values: XYValues): Double\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"class ConvolutionalXYStatistic(val weights: DoubleArray) : XYStatistic {\n",
|
||||
" override fun invoke(values: XYValues): Double {\n",
|
||||
" require(weights.size == values.yValues.size)\n",
|
||||
" val norm = values.yValues.sum()\n",
|
||||
" return values.yValues.zip(weights) { value, weight -> value * weight }.sum()/norm\n",
|
||||
" }\n",
|
||||
"}"
|
||||
],
|
||||
"execution_count": null,
|
||||
"outputs": [],
|
||||
"metadata": {
|
||||
"datalore": {
|
||||
"node_id": "Zhgz1Ui91PWz0meJiQpHol",
|
||||
"type": "CODE",
|
||||
"hide_input_from_viewers": false,
|
||||
"hide_output_from_viewers": false
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"# Generator\n",
|
||||
"Generate sample data for parabolas and hyperbolas"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"fun generateParabolas(xValues: DoubleArray, a: Double, b: Double, c: Double): XYValues {\n",
|
||||
" val yValues = xValues.map { x -> a * x * x + b * x + c }.toDoubleArray()\n",
|
||||
" return XYValues(xValues, yValues)\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"fun generateHyperbols(xValues: DoubleArray, gamma: Double, x0: Double, y0: Double): XYValues {\n",
|
||||
" val yValues = xValues.map { x -> y0 + gamma / (x - x0) }.toDoubleArray()\n",
|
||||
" return XYValues(xValues, yValues)\n",
|
||||
"}"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"val xValues = (1.0..10.0).step(1.0).toDoubleArray()\n",
|
||||
"\n",
|
||||
"val xy = generateHyperbols(xValues, 1.0, 0.0, 0.0)\n",
|
||||
"\n",
|
||||
"Plotly.plot {\n",
|
||||
" scatter {\n",
|
||||
" this.x.doubles = xValues\n",
|
||||
" this.y.doubles = xy.yValues\n",
|
||||
" }\n",
|
||||
"}"
|
||||
],
|
||||
"execution_count": null,
|
||||
"outputs": [],
|
||||
"metadata": {
|
||||
"datalore": {
|
||||
"node_id": "ZE2atNvFzQsCvpAF8KK4ch",
|
||||
"type": "CODE",
|
||||
"hide_input_from_viewers": false,
|
||||
"hide_output_from_viewers": false
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"Create a default statistic with uniform weights"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"val statistic = ConvolutionalXYStatistic(DoubleArray(xValues.size){1.0})\n",
|
||||
"statistic(xy)"
|
||||
],
|
||||
"execution_count": null,
|
||||
"outputs": [],
|
||||
"metadata": {
|
||||
"datalore": {
|
||||
"node_id": "EA5HaydTddRKYrtAUwd29h",
|
||||
"type": "CODE",
|
||||
"hide_input_from_viewers": false,
|
||||
"hide_output_from_viewers": false
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"import kotlin.random.Random\n",
|
||||
"\n",
|
||||
"val random = Random(1288)\n",
|
||||
"\n",
|
||||
"val parabolas = buildList{\n",
|
||||
" repeat(500){\n",
|
||||
" add(\n",
|
||||
" generateParabolas(\n",
|
||||
" xValues, \n",
|
||||
" random.nextDouble(), \n",
|
||||
" random.nextDouble(), \n",
|
||||
" random.nextDouble()\n",
|
||||
" )\n",
|
||||
" )\n",
|
||||
" }\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"val hyperbolas: List<XYValues> = buildList{\n",
|
||||
" repeat(500){\n",
|
||||
" add(\n",
|
||||
" generateHyperbols(\n",
|
||||
" xValues, \n",
|
||||
" random.nextDouble()*10, \n",
|
||||
" random.nextDouble(), \n",
|
||||
" random.nextDouble()\n",
|
||||
" )\n",
|
||||
" )\n",
|
||||
" }\n",
|
||||
"}"
|
||||
],
|
||||
"execution_count": null,
|
||||
"outputs": [],
|
||||
"metadata": {
|
||||
"datalore": {
|
||||
"node_id": "t5t6IYmD7Q1ykeo9uijFfQ",
|
||||
"type": "CODE",
|
||||
"hide_input_from_viewers": false,
|
||||
"hide_output_from_viewers": false
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"Plotly.plot { \n",
|
||||
" scatter { \n",
|
||||
" x.doubles = xValues\n",
|
||||
" y.doubles = parabolas[257].yValues\n",
|
||||
" }\n",
|
||||
" scatter { \n",
|
||||
" x.doubles = xValues\n",
|
||||
" y.doubles = hyperbolas[252].yValues\n",
|
||||
" }\n",
|
||||
" }"
|
||||
],
|
||||
"execution_count": null,
|
||||
"outputs": [],
|
||||
"metadata": {
|
||||
"datalore": {
|
||||
"node_id": "oXB8lmju7YVYjMRXITKnhO",
|
||||
"type": "CODE",
|
||||
"hide_input_from_viewers": false,
|
||||
"hide_output_from_viewers": false
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"Plotly.plot { \n",
|
||||
" histogram { \n",
|
||||
" name = \"parabolae\"\n",
|
||||
" x.numbers = parabolas.map { statistic(it) }\n",
|
||||
" }\n",
|
||||
" histogram { \n",
|
||||
" name = \"hyperbolae\"\n",
|
||||
" x.numbers = hyperbolas.map { statistic(it) }\n",
|
||||
" }\n",
|
||||
"}"
|
||||
],
|
||||
"execution_count": null,
|
||||
"outputs": [],
|
||||
"metadata": {
|
||||
"datalore": {
|
||||
"node_id": "8EIIecUZrt2NNrOkhxG5P0",
|
||||
"type": "CODE",
|
||||
"hide_input_from_viewers": false,
|
||||
"hide_output_from_viewers": false
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"val lossFunction: (XYStatistic) -> Double = { statistic ->\n",
|
||||
" - abs(parabolas.sumOf { statistic(it) } - hyperbolas.sumOf { statistic(it) })\n",
|
||||
"}"
|
||||
],
|
||||
"execution_count": null,
|
||||
"outputs": [],
|
||||
"metadata": {
|
||||
"datalore": {
|
||||
"node_id": "h7UmglJW5zXkAfKHK40oIL",
|
||||
"type": "CODE",
|
||||
"hide_input_from_viewers": false,
|
||||
"hide_output_from_viewers": false
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"Using commons-math optimizer to optimize weights"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"import org.apache.commons.math3.optim.*\n",
|
||||
"import org.apache.commons.math3.optim.nonlinear.scalar.*\n",
|
||||
"import org.apache.commons.math3.optim.nonlinear.scalar.noderiv.*\n",
|
||||
"\n",
|
||||
"val optimizer = SimplexOptimizer(1e-1, Double.MAX_VALUE)\n",
|
||||
"\n",
|
||||
"val result = optimizer.optimize(\n",
|
||||
" ObjectiveFunction { point ->\n",
|
||||
" lossFunction(ConvolutionalXYStatistic(point))\n",
|
||||
" },\n",
|
||||
" NelderMeadSimplex(xValues.size),\n",
|
||||
" InitialGuess(DoubleArray(xValues.size){ 1.0 }),\n",
|
||||
" GoalType.MINIMIZE,\n",
|
||||
" MaxEval(100000)\n",
|
||||
")"
|
||||
],
|
||||
"execution_count": null,
|
||||
"outputs": [],
|
||||
"metadata": {
|
||||
"datalore": {
|
||||
"node_id": "0EG3K4aCUciMlgGQKPvJ57",
|
||||
"type": "CODE",
|
||||
"hide_input_from_viewers": false,
|
||||
"hide_output_from_viewers": false
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"Print resulting weights of optimization"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"result.point"
|
||||
],
|
||||
"execution_count": null,
|
||||
"outputs": [],
|
||||
"metadata": {
|
||||
"datalore": {
|
||||
"node_id": "LelUlY0ZSlJEO9yC6SLk5B",
|
||||
"type": "CODE",
|
||||
"hide_input_from_viewers": false,
|
||||
"hide_output_from_viewers": false
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"Plotly.plot { \n",
|
||||
" scatter { \n",
|
||||
" y.doubles = result.point\n",
|
||||
" }\n",
|
||||
"}"
|
||||
],
|
||||
"execution_count": null,
|
||||
"outputs": [],
|
||||
"metadata": {
|
||||
"datalore": {
|
||||
"node_id": "AuFOq5t9KpOIkGrOLsVXNf",
|
||||
"type": "CODE",
|
||||
"hide_input_from_viewers": false,
|
||||
"hide_output_from_viewers": false
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"# The resulting statistic distribution"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"val resultStatistic = ConvolutionalXYStatistic(result.point)\n",
|
||||
"Plotly.plot { \n",
|
||||
" histogram { \n",
|
||||
" name = \"parabolae\"\n",
|
||||
" x.numbers = parabolas.map { resultStatistic(it) }\n",
|
||||
" }\n",
|
||||
" histogram { \n",
|
||||
" name = \"hyperbolae\"\n",
|
||||
" x.numbers = hyperbolas.map { resultStatistic(it) }\n",
|
||||
" }\n",
|
||||
"}"
|
||||
],
|
||||
"execution_count": null,
|
||||
"outputs": [],
|
||||
"metadata": {
|
||||
"datalore": {
|
||||
"node_id": "zvmq42DRdM5mZ3SpzviHwI",
|
||||
"type": "CODE",
|
||||
"hide_input_from_viewers": false,
|
||||
"hide_output_from_viewers": false
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"outputs": [],
|
||||
"source": [],
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
}
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Kotlin",
|
||||
"language": "kotlin",
|
||||
"name": "kotlin"
|
||||
},
|
||||
"datalore": {
|
||||
"version": 1,
|
||||
"computation_mode": "JUPYTER",
|
||||
"package_manager": "pip",
|
||||
"base_environment": "default",
|
||||
"packages": []
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 4
|
||||
}
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2018-2024 KMath contributors.
|
||||
* Copyright 2018-2021 KMath contributors.
|
||||
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
|
||||
*/
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2018-2024 KMath contributors.
|
||||
* Copyright 2018-2021 KMath contributors.
|
||||
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
|
||||
*/
|
||||
|
||||
@ -8,13 +8,13 @@ package space.kscience.kmath.ast
|
||||
import space.kscience.kmath.asm.compileToExpression
|
||||
import space.kscience.kmath.expressions.MstExtendedField
|
||||
import space.kscience.kmath.expressions.Symbol.Companion.x
|
||||
import space.kscience.kmath.operations.Float64Field
|
||||
import space.kscience.kmath.operations.DoubleField
|
||||
import space.kscience.kmath.operations.invoke
|
||||
|
||||
fun main() {
|
||||
val expr = MstExtendedField {
|
||||
x * 2.0 + number(2.0) / x - number(16.0) + asinh(x) / sin(x)
|
||||
}.compileToExpression(Float64Field)
|
||||
}.compileToExpression(DoubleField)
|
||||
|
||||
val m = DoubleArray(expr.indexer.symbols.size)
|
||||
val xIdx = expr.indexer.indexOf(x)
|
||||
|
@ -1,16 +1,16 @@
|
||||
/*
|
||||
* Copyright 2018-2024 KMath contributors.
|
||||
* Copyright 2018-2021 KMath contributors.
|
||||
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
|
||||
*/
|
||||
|
||||
package space.kscience.kmath.ast
|
||||
|
||||
import space.kscience.kmath.expressions.Symbol.Companion.x
|
||||
import space.kscience.kmath.expressions.derivative
|
||||
import space.kscience.kmath.expressions.invoke
|
||||
import space.kscience.kmath.expressions.Symbol.Companion.x
|
||||
import space.kscience.kmath.expressions.toExpression
|
||||
import space.kscience.kmath.kotlingrad.toKotlingradExpression
|
||||
import space.kscience.kmath.operations.Float64Field
|
||||
import space.kscience.kmath.operations.DoubleField
|
||||
|
||||
/**
|
||||
* In this example, *x<sup>2</sup> − 4 x − 44* function is differentiated with Kotlin∇, and the
|
||||
@ -19,9 +19,9 @@ import space.kscience.kmath.operations.Float64Field
|
||||
fun main() {
|
||||
val actualDerivative = "x^2-4*x-44"
|
||||
.parseMath()
|
||||
.toKotlingradExpression(Float64Field)
|
||||
.toKotlingradExpression(DoubleField)
|
||||
.derivative(x)
|
||||
|
||||
val expectedDerivative = "2*x-4".parseMath().toExpression(Float64Field)
|
||||
val expectedDerivative = "2*x-4".parseMath().toExpression(DoubleField)
|
||||
check(actualDerivative(x to 123.0) == expectedDerivative(x to 123.0))
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2018-2024 KMath contributors.
|
||||
* Copyright 2018-2021 KMath contributors.
|
||||
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
|
||||
*/
|
||||
|
||||
@ -9,7 +9,7 @@ import space.kscience.kmath.expressions.Symbol.Companion.x
|
||||
import space.kscience.kmath.expressions.derivative
|
||||
import space.kscience.kmath.expressions.invoke
|
||||
import space.kscience.kmath.expressions.toExpression
|
||||
import space.kscience.kmath.operations.Float64Field
|
||||
import space.kscience.kmath.operations.DoubleField
|
||||
import space.kscience.kmath.symja.toSymjaExpression
|
||||
|
||||
/**
|
||||
@ -19,9 +19,9 @@ import space.kscience.kmath.symja.toSymjaExpression
|
||||
fun main() {
|
||||
val actualDerivative = "x^2-4*x-44"
|
||||
.parseMath()
|
||||
.toSymjaExpression(Float64Field)
|
||||
.toSymjaExpression(DoubleField)
|
||||
.derivative(x)
|
||||
|
||||
val expectedDerivative = "2*x-4".parseMath().toExpression(Float64Field)
|
||||
val expectedDerivative = "2*x-4".parseMath().toExpression(DoubleField)
|
||||
check(actualDerivative(x to 123.0) == expectedDerivative(x to 123.0))
|
||||
}
|
||||
|
@ -1,92 +0,0 @@
|
||||
/*
|
||||
* Copyright 2018-2024 KMath contributors.
|
||||
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
|
||||
*/
|
||||
|
||||
package space.kscience.kmath.expressions
|
||||
|
||||
import space.kscience.kmath.UnstableKMathAPI
|
||||
|
||||
// Only kmath-core is needed.
|
||||
|
||||
// Let's declare some variables
|
||||
val x by symbol
|
||||
val y by symbol
|
||||
val z by symbol
|
||||
|
||||
@OptIn(UnstableKMathAPI::class)
|
||||
fun main() {
|
||||
// Let's define some random expression.
|
||||
val someExpression = Double.autodiff.differentiate {
|
||||
// We bind variables `x` and `y` to the builder scope,
|
||||
val x = bindSymbol(x)
|
||||
val y = bindSymbol(y)
|
||||
|
||||
// Then we use the bindings to define expression `xy + x + y - 1`
|
||||
x * y + x + y - 1
|
||||
}
|
||||
|
||||
// Then we can evaluate it at any point ((-1, -1) in the case):
|
||||
println(someExpression(x to -1.0, y to -1.0))
|
||||
// >>> -2.0
|
||||
|
||||
// We can also construct its partial derivatives:
|
||||
val dxExpression = someExpression.derivative(x) // ∂/∂x. Must be `y+1`
|
||||
val dyExpression = someExpression.derivative(y) // ∂/∂y. Must be `x+1`
|
||||
val dxdxExpression = someExpression.derivative(x, x) // ∂^2/∂x^2. Must be `0`
|
||||
|
||||
// We can evaluate them as well
|
||||
println(dxExpression(x to 57.0, y to 6.0))
|
||||
// >>> 7.0
|
||||
println(dyExpression(x to -1.0, y to 179.0))
|
||||
// >>> 0.0
|
||||
println(dxdxExpression(x to 239.0, y to 30.0))
|
||||
// >>> 0.0
|
||||
|
||||
// You can also provide extra arguments that obviously won't affect the result:
|
||||
println(dxExpression(x to 57.0, y to 6.0, z to 42.0))
|
||||
// >>> 7.0
|
||||
println(dyExpression(x to -1.0, y to 179.0, z to 0.0))
|
||||
// >>> 0.0
|
||||
println(dxdxExpression(x to 239.0, y to 30.0, z to 100_000.0))
|
||||
// >>> 0.0
|
||||
|
||||
// But in case you forgot to specify bound symbol's value, exception is thrown:
|
||||
println(runCatching { someExpression(z to 4.0) })
|
||||
// >>> Failure(java.lang.IllegalStateException: Symbol 'x' is not supported in ...)
|
||||
|
||||
// The reason is that the expression is evaluated lazily,
|
||||
// and each `bindSymbol` operation actually substitutes the provided symbol with the corresponding value.
|
||||
|
||||
// For example, let there be an expression
|
||||
val simpleExpression = Double.autodiff.differentiate {
|
||||
val x = bindSymbol(x)
|
||||
x pow 2
|
||||
}
|
||||
// When you evaluate it via
|
||||
simpleExpression(x to 1.0, y to 57.0, z to 179.0)
|
||||
// lambda above has the context of map `{x: 1.0, y: 57.0, z: 179.0}`.
|
||||
// When x is bound, you can think of it as substitution `x -> 1.0`.
|
||||
// Other values are unused which does not make any problem to us.
|
||||
// But in the case the corresponding value is not provided,
|
||||
// we cannot bind the variable. Thus, exception is thrown.
|
||||
|
||||
// There is also a function `bindSymbolOrNull` that fixes the problem:
|
||||
val fixedExpression = Double.autodiff.differentiate {
|
||||
val x = bindSymbolOrNull(x) ?: const(8.0)
|
||||
x pow -2
|
||||
}
|
||||
println(fixedExpression())
|
||||
// >>> 0.015625
|
||||
// It works!
|
||||
|
||||
// The expression provides a bunch of operations:
|
||||
// 1. Constant bindings (via `const` and `number`).
|
||||
// 2. Variable bindings (via `bindVariable`, `bindVariableOrNull`).
|
||||
// 3. Arithmetic operations (via `+`, `-`, `*`, and `-`).
|
||||
// 4. Exponentiation (via `pow` or `power`).
|
||||
// 5. `exp` and `ln`.
|
||||
// 6. Trigonometrical functions (`sin`, `cos`, `tan`, `cot`).
|
||||
// 7. Inverse trigonometrical functions (`asin`, `acos`, `atan`, `acot`).
|
||||
// 8. Hyperbolic functions and inverse hyperbolic functions.
|
||||
}
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2018-2024 KMath contributors.
|
||||
* Copyright 2018-2021 KMath contributors.
|
||||
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
|
||||
*/
|
||||
|
||||
@ -7,18 +7,21 @@ package space.kscience.kmath.fit
|
||||
|
||||
import kotlinx.html.br
|
||||
import kotlinx.html.h3
|
||||
import space.kscience.kmath.commons.expressions.DSProcessor
|
||||
import space.kscience.kmath.commons.optimization.CMOptimizer
|
||||
import space.kscience.kmath.distributions.NormalDistribution
|
||||
import space.kscience.kmath.expressions.autodiff
|
||||
import space.kscience.kmath.expressions.chiSquaredExpression
|
||||
import space.kscience.kmath.expressions.symbol
|
||||
import space.kscience.kmath.operations.asIterable
|
||||
import space.kscience.kmath.operations.toList
|
||||
import space.kscience.kmath.optimization.*
|
||||
import space.kscience.kmath.random.RandomGenerator
|
||||
import space.kscience.kmath.optimization.FunctionOptimizationTarget
|
||||
import space.kscience.kmath.optimization.optimizeWith
|
||||
import space.kscience.kmath.optimization.resultPoint
|
||||
import space.kscience.kmath.optimization.resultValue
|
||||
import space.kscience.kmath.real.DoubleVector
|
||||
import space.kscience.kmath.real.map
|
||||
import space.kscience.kmath.real.step
|
||||
import space.kscience.kmath.stat.chiSquaredExpression
|
||||
import space.kscience.kmath.stat.RandomGenerator
|
||||
import space.kscience.plotly.*
|
||||
import space.kscience.plotly.models.ScatterMode
|
||||
import space.kscience.plotly.models.TraceValues
|
||||
@ -64,7 +67,7 @@ suspend fun main() {
|
||||
val yErr = y.map { sqrt(it) }//RealVector.same(x.size, sigma)
|
||||
|
||||
// compute differentiable chi^2 sum for given model ax^2 + bx + c
|
||||
val chi2 = Double.autodiff.chiSquaredExpression(x, y, yErr) { arg ->
|
||||
val chi2 = DSProcessor.chiSquaredExpression(x, y, yErr) { arg ->
|
||||
//bind variables to autodiff context
|
||||
val a = bindSymbol(a)
|
||||
val b = bindSymbol(b)
|
||||
@ -77,9 +80,8 @@ suspend fun main() {
|
||||
val result = chi2.optimizeWith(
|
||||
CMOptimizer,
|
||||
mapOf(a to 1.5, b to 0.9, c to 1.0),
|
||||
) {
|
||||
FunctionOptimizationTarget(OptimizationDirection.MINIMIZE)
|
||||
}
|
||||
FunctionOptimizationTarget.MINIMIZE
|
||||
)
|
||||
|
||||
//display a page with plot and numerical results
|
||||
val page = Plotly.page {
|
||||
@ -96,7 +98,7 @@ suspend fun main() {
|
||||
scatter {
|
||||
mode = ScatterMode.lines
|
||||
x(x)
|
||||
y(x.map { result.result[a]!! * it.pow(2) + result.result[b]!! * it + 1 })
|
||||
y(x.map { result.resultPoint[a]!! * it.pow(2) + result.resultPoint[b]!! * it + 1 })
|
||||
name = "fit"
|
||||
}
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2018-2024 KMath contributors.
|
||||
* Copyright 2018-2021 KMath contributors.
|
||||
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
|
||||
*/
|
||||
|
||||
@ -7,19 +7,21 @@ package space.kscience.kmath.fit
|
||||
|
||||
import kotlinx.html.br
|
||||
import kotlinx.html.h3
|
||||
import space.kscience.attributes.Attributes
|
||||
import space.kscience.kmath.commons.expressions.DSProcessor
|
||||
import space.kscience.kmath.data.XYErrorColumnarData
|
||||
import space.kscience.kmath.distributions.NormalDistribution
|
||||
import space.kscience.kmath.expressions.Symbol
|
||||
import space.kscience.kmath.expressions.autodiff
|
||||
import space.kscience.kmath.expressions.binding
|
||||
import space.kscience.kmath.expressions.symbol
|
||||
import space.kscience.kmath.operations.asIterable
|
||||
import space.kscience.kmath.operations.toList
|
||||
import space.kscience.kmath.optimization.*
|
||||
import space.kscience.kmath.random.RandomGenerator
|
||||
import space.kscience.kmath.optimization.QowOptimizer
|
||||
import space.kscience.kmath.optimization.chiSquaredOrNull
|
||||
import space.kscience.kmath.optimization.fitWith
|
||||
import space.kscience.kmath.optimization.resultPoint
|
||||
import space.kscience.kmath.real.map
|
||||
import space.kscience.kmath.real.step
|
||||
import space.kscience.kmath.stat.RandomGenerator
|
||||
import space.kscience.plotly.*
|
||||
import space.kscience.plotly.models.ScatterMode
|
||||
import kotlin.math.abs
|
||||
@ -30,8 +32,6 @@ import kotlin.math.sqrt
|
||||
private val a by symbol
|
||||
private val b by symbol
|
||||
private val c by symbol
|
||||
private val d by symbol
|
||||
private val e by symbol
|
||||
|
||||
|
||||
/**
|
||||
@ -63,23 +63,17 @@ suspend fun main() {
|
||||
|
||||
val result = XYErrorColumnarData.of(x, y, yErr).fitWith(
|
||||
QowOptimizer,
|
||||
Double.autodiff,
|
||||
mapOf(a to 0.9, b to 1.2, c to 2.0, e to 1.0, d to 1.0, e to 0.0),
|
||||
attributes = Attributes(OptimizationParameters, listOf(a, b, c, d))
|
||||
DSProcessor,
|
||||
mapOf(a to 0.9, b to 1.2, c to 2.0)
|
||||
) { arg ->
|
||||
//bind variables to autodiff context
|
||||
val a by binding
|
||||
val b by binding
|
||||
//Include default value for c if it is not provided as a parameter
|
||||
val c = bindSymbolOrNull(c) ?: one
|
||||
val d by binding
|
||||
val e by binding
|
||||
|
||||
a * arg.pow(2) + b * arg + c + d * arg.pow(3) + e / arg
|
||||
a * arg.pow(2) + b * arg + c
|
||||
}
|
||||
|
||||
println("Resulting chi2/dof: ${result.chiSquaredOrNull}/${result.dof}")
|
||||
|
||||
//display a page with plot and numerical results
|
||||
val page = Plotly.page {
|
||||
plot {
|
||||
@ -95,16 +89,16 @@ suspend fun main() {
|
||||
scatter {
|
||||
mode = ScatterMode.lines
|
||||
x(x)
|
||||
y(x.map { result.model(result.startPoint + result.result + (Symbol.x to it)) })
|
||||
y(x.map { result.model(result.resultPoint + (Symbol.x to it)) })
|
||||
name = "fit"
|
||||
}
|
||||
}
|
||||
br()
|
||||
h3 {
|
||||
+"Fit result: ${result.result}"
|
||||
+"Fit result: ${result.resultPoint}"
|
||||
}
|
||||
h3 {
|
||||
+"Chi2/dof = ${result.chiSquaredOrNull!! / result.dof}"
|
||||
+"Chi2/dof = ${result.chiSquaredOrNull!! / (x.size - 3)}"
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1,36 +1,23 @@
|
||||
/*
|
||||
* Copyright 2018-2024 KMath contributors.
|
||||
* Copyright 2018-2021 KMath contributors.
|
||||
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
|
||||
*/
|
||||
|
||||
package space.kscience.kmath.functions
|
||||
|
||||
import space.kscience.kmath.complex.Complex
|
||||
import space.kscience.kmath.complex.ComplexField
|
||||
import space.kscience.kmath.complex.ComplexField.div
|
||||
import space.kscience.kmath.complex.ComplexField.minus
|
||||
import space.kscience.kmath.complex.algebra
|
||||
import space.kscience.kmath.integration.gaussIntegrator
|
||||
import space.kscience.kmath.integration.integrate
|
||||
import space.kscience.kmath.integration.value
|
||||
import space.kscience.kmath.operations.Float64Field
|
||||
import space.kscience.kmath.operations.DoubleField
|
||||
import kotlin.math.pow
|
||||
|
||||
fun main() {
|
||||
//Define a function
|
||||
val function: Function1D<Double> = { x -> 3 * x.pow(2) + 2 * x + 1 }
|
||||
val function: UnivariateFunction<Double> = { x -> 3 * x.pow(2) + 2 * x + 1 }
|
||||
|
||||
//get the result of the integration
|
||||
val result = Float64Field.gaussIntegrator.integrate(0.0..10.0, function = function)
|
||||
val result = DoubleField.gaussIntegrator.integrate(0.0..10.0, function = function)
|
||||
|
||||
//the value is nullable because in some cases the integration could not succeed
|
||||
println(result.value)
|
||||
|
||||
|
||||
repeat(100000) {
|
||||
Complex.algebra.gaussIntegrator.integrate(0.0..1.0, intervals = 1000) { x: Double ->
|
||||
// sin(1 / x) + i * cos(1 / x)
|
||||
1 / x - ComplexField.i / x
|
||||
}.value
|
||||
}
|
||||
}
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2018-2024 KMath contributors.
|
||||
* Copyright 2018-2021 KMath contributors.
|
||||
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
|
||||
*/
|
||||
|
||||
@ -7,7 +7,8 @@ package space.kscience.kmath.functions
|
||||
|
||||
import space.kscience.kmath.interpolation.SplineInterpolator
|
||||
import space.kscience.kmath.interpolation.interpolatePolynomials
|
||||
import space.kscience.kmath.operations.Float64Field
|
||||
import space.kscience.kmath.operations.DoubleField
|
||||
import space.kscience.kmath.structures.DoubleBuffer
|
||||
import space.kscience.plotly.Plotly
|
||||
import space.kscience.plotly.UnstablePlotlyAPI
|
||||
import space.kscience.plotly.makeFile
|
||||
@ -23,9 +24,11 @@ fun main() {
|
||||
x to sin(x)
|
||||
}
|
||||
|
||||
val polynomial: PiecewisePolynomial<Double> = SplineInterpolator(Float64Field).interpolatePolynomials(data)
|
||||
val polynomial: PiecewisePolynomial<Double> = SplineInterpolator(
|
||||
DoubleField, ::DoubleBuffer
|
||||
).interpolatePolynomials(data)
|
||||
|
||||
val function = polynomial.asFunction(Float64Field, 0.0)
|
||||
val function = polynomial.asFunction(DoubleField, 0.0)
|
||||
|
||||
val cmInterpolate = org.apache.commons.math3.analysis.interpolation.SplineInterpolator().interpolate(
|
||||
data.map { it.first }.toDoubleArray(),
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2018-2024 KMath contributors.
|
||||
* Copyright 2018-2021 KMath contributors.
|
||||
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
|
||||
*/
|
||||
|
||||
@ -7,7 +7,7 @@ package space.kscience.kmath.functions
|
||||
|
||||
import space.kscience.kmath.interpolation.interpolatePolynomials
|
||||
import space.kscience.kmath.interpolation.splineInterpolator
|
||||
import space.kscience.kmath.operations.Float64Field
|
||||
import space.kscience.kmath.operations.DoubleField
|
||||
import space.kscience.kmath.real.map
|
||||
import space.kscience.kmath.real.step
|
||||
import space.kscience.plotly.Plotly
|
||||
@ -18,7 +18,7 @@ import space.kscience.plotly.scatter
|
||||
|
||||
@OptIn(UnstablePlotlyAPI::class)
|
||||
fun main() {
|
||||
val function: Function1D<Double> = { x ->
|
||||
val function: UnivariateFunction<Double> = { x ->
|
||||
if (x in 30.0..50.0) {
|
||||
1.0
|
||||
} else {
|
||||
@ -28,9 +28,9 @@ fun main() {
|
||||
val xs = 0.0..100.0 step 0.5
|
||||
val ys = xs.map(function)
|
||||
|
||||
val polynomial: PiecewisePolynomial<Double> = Float64Field.splineInterpolator.interpolatePolynomials(xs, ys)
|
||||
val polynomial: PiecewisePolynomial<Double> = DoubleField.splineInterpolator.interpolatePolynomials(xs, ys)
|
||||
|
||||
val polyFunction = polynomial.asFunction(Float64Field, 0.0)
|
||||
val polyFunction = polynomial.asFunction(DoubleField, 0.0)
|
||||
|
||||
Plotly.plot {
|
||||
scatter {
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2018-2024 KMath contributors.
|
||||
* Copyright 2018-2021 KMath contributors.
|
||||
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
|
||||
*/
|
||||
|
||||
@ -12,21 +12,23 @@ import space.kscience.kmath.nd.StructureND
|
||||
import space.kscience.kmath.nd.structureND
|
||||
import space.kscience.kmath.nd.withNdAlgebra
|
||||
import space.kscience.kmath.operations.algebra
|
||||
import kotlin.math.pow
|
||||
import space.kscience.kmath.operations.invoke
|
||||
|
||||
fun main(): Unit = Double.algebra.withNdAlgebra(2, 2) {
|
||||
fun main(): Unit = Double.algebra {
|
||||
withNdAlgebra(2, 2) {
|
||||
|
||||
//Produce a diagonal StructureND
|
||||
fun diagonal(v: Double) = structureND { (i, j) ->
|
||||
if (i == j) v else 0.0
|
||||
//Produce a diagonal StructureND
|
||||
fun diagonal(v: Double) = structureND { (i, j) ->
|
||||
if (i == j) v else 0.0
|
||||
}
|
||||
|
||||
//Define a function in a nd space
|
||||
val function: (Double) -> StructureND<Double> = { x: Double -> 3 * x.pow(2) + 2 * diagonal(x) + 1 }
|
||||
|
||||
//get the result of the integration
|
||||
val result = gaussIntegrator.integrate(0.0..10.0, function = function)
|
||||
|
||||
//the value is nullable because in some cases the integration could not succeed
|
||||
println(result.value)
|
||||
}
|
||||
|
||||
//Define a function in a nd space
|
||||
val function: (Double) -> StructureND<Double> = { x: Double -> 3 * x.pow(2) + 2 * diagonal(x) + 1 }
|
||||
|
||||
//get the result of the integration
|
||||
val result = gaussIntegrator.integrate(0.0..10.0, function = function)
|
||||
|
||||
//the value is nullable because in some cases the integration could not succeed
|
||||
println(result.value)
|
||||
}
|
||||
}
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2018-2024 KMath contributors.
|
||||
* Copyright 2018-2021 KMath contributors.
|
||||
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
|
||||
*/
|
||||
|
||||
|
@ -1,28 +1,31 @@
|
||||
/*
|
||||
* Copyright 2018-2024 KMath contributors.
|
||||
* Copyright 2018-2021 KMath contributors.
|
||||
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
|
||||
*/
|
||||
|
||||
package space.kscience.kmath.linear
|
||||
|
||||
import space.kscience.kmath.operations.algebra
|
||||
import kotlin.random.Random
|
||||
import kotlin.time.measureTime
|
||||
import kotlin.system.measureTimeMillis
|
||||
|
||||
fun main() = with(Float64ParallelLinearSpace) {
|
||||
fun main() {
|
||||
val random = Random(12224)
|
||||
val dim = 1000
|
||||
|
||||
//creating invertible matrix
|
||||
val matrix1 = buildMatrix(dim, dim) { i, j ->
|
||||
val matrix1 = Double.algebra.linearSpace.buildMatrix(dim, dim) { i, j ->
|
||||
if (i <= j) random.nextDouble() else 0.0
|
||||
}
|
||||
val matrix2 = buildMatrix(dim, dim) { i, j ->
|
||||
val matrix2 = Double.algebra.linearSpace.buildMatrix(dim, dim) { i, j ->
|
||||
if (i <= j) random.nextDouble() else 0.0
|
||||
}
|
||||
|
||||
val time = measureTime {
|
||||
repeat(30) {
|
||||
matrix1 dot matrix2
|
||||
val time = measureTimeMillis {
|
||||
with(Double.algebra.linearSpace) {
|
||||
repeat(10) {
|
||||
matrix1 dot matrix2
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1,12 +1,12 @@
|
||||
/*
|
||||
* Copyright 2018-2024 KMath contributors.
|
||||
* Copyright 2018-2021 KMath contributors.
|
||||
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
|
||||
*/
|
||||
|
||||
package space.kscience.kmath.linear
|
||||
|
||||
import space.kscience.kmath.real.*
|
||||
import space.kscience.kmath.structures.Float64Buffer
|
||||
import space.kscience.kmath.structures.DoubleBuffer
|
||||
|
||||
fun main() {
|
||||
val x0 = DoubleVector(0.0, 0.0, 0.0)
|
||||
@ -19,9 +19,9 @@ fun main() {
|
||||
|
||||
fun ((Point<Double>) -> Double).grad(x: Point<Double>): Point<Double> {
|
||||
require(x.size == x0.size)
|
||||
return Float64Buffer(x.size) { i ->
|
||||
return DoubleBuffer(x.size) { i ->
|
||||
val h = sigma[i] / 5
|
||||
val dVector = Float64Buffer(x.size) { if (it == i) h else 0.0 }
|
||||
val dVector = DoubleBuffer(x.size) { if (it == i) h else 0.0 }
|
||||
val f1 = this(x + dVector / 2)
|
||||
val f0 = this(x - dVector / 2)
|
||||
(f1 - f0) / h
|
||||
|
@ -1,27 +0,0 @@
|
||||
/*
|
||||
* Copyright 2018-2024 KMath contributors.
|
||||
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
|
||||
*/
|
||||
|
||||
package space.kscience.kmath.linear
|
||||
|
||||
import kotlin.random.Random
|
||||
import kotlin.time.measureTime
|
||||
|
||||
fun main(): Unit = with(Float64LinearSpace) {
|
||||
val random = Random(1224)
|
||||
val dim = 500
|
||||
|
||||
//creating invertible matrix
|
||||
val u = buildMatrix(dim, dim) { i, j -> if (i <= j) random.nextDouble() else 0.0 }
|
||||
val l = buildMatrix(dim, dim) { i, j -> if (i >= j) random.nextDouble() else 0.0 }
|
||||
val matrix = l dot u
|
||||
|
||||
val time = measureTime {
|
||||
repeat(20) {
|
||||
lupSolver().inverse(matrix)
|
||||
}
|
||||
}
|
||||
|
||||
println(time)
|
||||
}
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2018-2024 KMath contributors.
|
||||
* Copyright 2018-2021 KMath contributors.
|
||||
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
|
||||
*/
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2018-2024 KMath contributors.
|
||||
* Copyright 2018-2021 KMath contributors.
|
||||
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
|
||||
*/
|
||||
|
||||
@ -7,6 +7,7 @@ package space.kscience.kmath.operations
|
||||
|
||||
import space.kscience.kmath.complex.Complex
|
||||
import space.kscience.kmath.complex.algebra
|
||||
import space.kscience.kmath.complex.bufferAlgebra
|
||||
import space.kscience.kmath.complex.ndAlgebra
|
||||
import space.kscience.kmath.nd.BufferND
|
||||
import space.kscience.kmath.nd.StructureND
|
||||
@ -17,7 +18,7 @@ fun main() = Complex.algebra {
|
||||
println(complex * 8 - 5 * i)
|
||||
|
||||
//flat buffer
|
||||
val buffer = with(bufferAlgebra) {
|
||||
val buffer = with(bufferAlgebra){
|
||||
buffer(8) { Complex(it, -it) }.map { Complex(it.im, it.re) }
|
||||
}
|
||||
println(buffer)
|
||||
@ -29,7 +30,7 @@ fun main() = Complex.algebra {
|
||||
println(element)
|
||||
|
||||
// 1d element operation
|
||||
val result: StructureND<Complex> = ndAlgebra {
|
||||
val result: StructureND<Complex> = ndAlgebra{
|
||||
val a = structureND(8) { (it) -> i * it - it.toDouble() }
|
||||
val b = 3
|
||||
val c = Complex(1.0, 1.0)
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2018-2024 KMath contributors.
|
||||
* Copyright 2018-2021 KMath contributors.
|
||||
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
|
||||
*/
|
||||
|
||||
@ -7,22 +7,21 @@ package space.kscience.kmath.operations
|
||||
|
||||
import space.kscience.kmath.commons.linear.CMLinearSpace
|
||||
import space.kscience.kmath.linear.matrix
|
||||
import space.kscience.kmath.nd.Float64BufferND
|
||||
import space.kscience.kmath.nd.DoubleBufferND
|
||||
import space.kscience.kmath.nd.Shape
|
||||
import space.kscience.kmath.nd.Structure2D
|
||||
import space.kscience.kmath.nd.mutableStructureND
|
||||
import space.kscience.kmath.nd.ndAlgebra
|
||||
import space.kscience.kmath.viktor.ViktorStructureND
|
||||
import space.kscience.kmath.viktor.viktorAlgebra
|
||||
import kotlin.collections.component1
|
||||
import kotlin.collections.component2
|
||||
|
||||
fun main() {
|
||||
val viktorStructure = Float64Field.viktorAlgebra.mutableStructureND(2, 2) { (i, j) ->
|
||||
val viktorStructure: ViktorStructureND = DoubleField.viktorAlgebra.structureND(Shape(2, 2)) { (i, j) ->
|
||||
if (i == j) 2.0 else 0.0
|
||||
}
|
||||
|
||||
val cmMatrix: Structure2D<Double> = CMLinearSpace.matrix(2, 2)(0.0, 1.0, 0.0, 3.0)
|
||||
|
||||
val res: Float64BufferND = Float64Field.ndAlgebra {
|
||||
val res: DoubleBufferND = DoubleField.ndAlgebra {
|
||||
exp(viktorStructure) + 2.0 * cmMatrix
|
||||
}
|
||||
|
||||
|
@ -1,17 +0,0 @@
|
||||
/*
|
||||
* Copyright 2018-2024 KMath contributors.
|
||||
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
|
||||
*/
|
||||
|
||||
package space.kscience.kmath.series
|
||||
|
||||
import kotlinx.datetime.Instant
|
||||
import space.kscience.kmath.operations.algebra
|
||||
import space.kscience.kmath.operations.bufferAlgebra
|
||||
import kotlin.time.Duration
|
||||
|
||||
fun SeriesAlgebra.Companion.time(zero: Instant, step: Duration) = MonotonicSeriesAlgebra(
|
||||
bufferAlgebra = Double.algebra.bufferAlgebra,
|
||||
offsetToLabel = { zero + step * it },
|
||||
labelToOffset = { (it - zero) / step }
|
||||
)
|
@ -1,64 +0,0 @@
|
||||
/*
|
||||
* Copyright 2018-2024 KMath contributors.
|
||||
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
|
||||
*/
|
||||
|
||||
package space.kscience.kmath.series
|
||||
|
||||
|
||||
import kotlinx.html.h1
|
||||
import kotlinx.html.p
|
||||
import space.kscience.kmath.operations.algebra
|
||||
import space.kscience.kmath.operations.bufferAlgebra
|
||||
import space.kscience.kmath.operations.toList
|
||||
import space.kscience.kmath.stat.KMComparisonResult
|
||||
import space.kscience.kmath.stat.ksComparisonStatistic
|
||||
import space.kscience.kmath.structures.Buffer
|
||||
import space.kscience.kmath.structures.slice
|
||||
import space.kscience.plotly.*
|
||||
import kotlin.math.PI
|
||||
|
||||
fun Double.Companion.seriesAlgebra() = Double.algebra.bufferAlgebra.seriesAlgebra()
|
||||
|
||||
|
||||
fun main() = with(Double.seriesAlgebra()) {
|
||||
|
||||
|
||||
fun Plot.plotSeries(name: String, buffer: Buffer<Double>) {
|
||||
scatter {
|
||||
this.name = name
|
||||
x.numbers = buffer.labels
|
||||
y.numbers = buffer.toList()
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
val s1 = series(100) { sin(2 * PI * it / 100) + 1.0 }
|
||||
|
||||
val s2 = s1.slice(20..50).moveTo(40)
|
||||
|
||||
val s3: Buffer<Double> = s1.zip(s2) { l, r -> l + r } //s1 + s2
|
||||
val s4 = s3.map { ln(it) }
|
||||
|
||||
val kmTest: KMComparisonResult<Double> = ksComparisonStatistic(s1, s2)
|
||||
|
||||
Plotly.page {
|
||||
h1 { +"This is my plot" }
|
||||
p {
|
||||
+"Kolmogorov-smirnov test for s1 and s2: ${kmTest.value}"
|
||||
}
|
||||
plot {
|
||||
plotSeries("s1", s1)
|
||||
plotSeries("s2", s2)
|
||||
plotSeries("s3", s3)
|
||||
plotSeries("s4", s4)
|
||||
layout {
|
||||
xaxis {
|
||||
range(0.0..100.0)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}.makeFile()
|
||||
|
||||
}
|
@ -1,50 +0,0 @@
|
||||
/*
|
||||
* Copyright 2018-2023 KMath contributors.
|
||||
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
|
||||
*/
|
||||
|
||||
package space.kscience.kmath.series
|
||||
|
||||
|
||||
import space.kscience.kmath.structures.Buffer
|
||||
import space.kscience.kmath.structures.Float64Buffer
|
||||
import space.kscience.kmath.structures.asBuffer
|
||||
import space.kscience.kmath.structures.toDoubleArray
|
||||
import space.kscience.plotly.*
|
||||
import space.kscience.plotly.models.Scatter
|
||||
import space.kscience.plotly.models.ScatterMode
|
||||
import kotlin.random.Random
|
||||
|
||||
fun main(): Unit = with(Double.seriesAlgebra()) {
|
||||
|
||||
val random = Random(1234)
|
||||
|
||||
val arrayOfRandoms = DoubleArray(20) { random.nextDouble() }
|
||||
|
||||
val series1: Float64Buffer = arrayOfRandoms.asBuffer()
|
||||
val series2: Series<Double> = series1.moveBy(3)
|
||||
|
||||
val res = series2 - series1
|
||||
|
||||
println(res.size)
|
||||
|
||||
println(res)
|
||||
|
||||
fun Plot.series(name: String, buffer: Buffer<Double>, block: Scatter.() -> Unit = {}) {
|
||||
scatter {
|
||||
this.name = name
|
||||
x.numbers = buffer.offsetIndices
|
||||
y.doubles = buffer.toDoubleArray()
|
||||
block()
|
||||
}
|
||||
}
|
||||
|
||||
Plotly.plot {
|
||||
series("series1", series1)
|
||||
series("series2", series2)
|
||||
series("dif", res) {
|
||||
mode = ScatterMode.lines
|
||||
line.color("magenta")
|
||||
}
|
||||
}.makeFile(resourceLocation = ResourceLocation.REMOTE)
|
||||
}
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2018-2024 KMath contributors.
|
||||
* Copyright 2018-2021 KMath contributors.
|
||||
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
|
||||
*/
|
||||
|
||||
@ -10,7 +10,6 @@ import kotlinx.coroutines.async
|
||||
import kotlinx.coroutines.runBlocking
|
||||
import org.apache.commons.rng.sampling.distribution.BoxMullerNormalizedGaussianSampler
|
||||
import org.apache.commons.rng.simple.RandomSource
|
||||
import space.kscience.kmath.random.RandomGenerator
|
||||
import space.kscience.kmath.samplers.GaussianSampler
|
||||
import java.time.Duration
|
||||
import java.time.Instant
|
||||
@ -36,7 +35,7 @@ private suspend fun runKMathChained(): Duration {
|
||||
return Duration.between(startTime, Instant.now())
|
||||
}
|
||||
|
||||
private fun runCMDirect(): Duration {
|
||||
private fun runApacheDirect(): Duration {
|
||||
val rng = RandomSource.create(RandomSource.MT, 123L)
|
||||
|
||||
val sampler = CMGaussianSampler.of(
|
||||
@ -65,7 +64,7 @@ private fun runCMDirect(): Duration {
|
||||
* Comparing chain sampling performance with direct sampling performance
|
||||
*/
|
||||
fun main(): Unit = runBlocking(Dispatchers.Default) {
|
||||
val directJob = async { runCMDirect() }
|
||||
val directJob = async { runApacheDirect() }
|
||||
val chainJob = async { runKMathChained() }
|
||||
println("KMath Chained: ${chainJob.await()}")
|
||||
println("Apache Direct: ${directJob.await()}")
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2018-2024 KMath contributors.
|
||||
* Copyright 2018-2021 KMath contributors.
|
||||
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
|
||||
*/
|
||||
|
||||
@ -9,7 +9,6 @@ import kotlinx.coroutines.runBlocking
|
||||
import space.kscience.kmath.chains.Chain
|
||||
import space.kscience.kmath.chains.combineWithState
|
||||
import space.kscience.kmath.distributions.NormalDistribution
|
||||
import space.kscience.kmath.random.RandomGenerator
|
||||
|
||||
private data class AveragingChainState(var num: Int = 0, var value: Double = 0.0)
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2018-2024 KMath contributors.
|
||||
* Copyright 2018-2021 KMath contributors.
|
||||
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
|
||||
*/
|
||||
|
||||
@ -8,12 +8,12 @@
|
||||
package space.kscience.kmath.structures
|
||||
|
||||
import space.kscience.kmath.complex.*
|
||||
import space.kscience.kmath.linear.transposed
|
||||
import space.kscience.kmath.linear.transpose
|
||||
import space.kscience.kmath.nd.StructureND
|
||||
import space.kscience.kmath.nd.as2D
|
||||
import space.kscience.kmath.nd.ndAlgebra
|
||||
import space.kscience.kmath.nd.structureND
|
||||
import space.kscience.kmath.operations.Float64Field
|
||||
import space.kscience.kmath.operations.DoubleField
|
||||
import space.kscience.kmath.operations.invoke
|
||||
import kotlin.system.measureTimeMillis
|
||||
|
||||
@ -21,7 +21,7 @@ fun main() {
|
||||
val dim = 1000
|
||||
val n = 1000
|
||||
|
||||
val realField = Float64Field.ndAlgebra(dim, dim)
|
||||
val realField = DoubleField.ndAlgebra(dim, dim)
|
||||
val complexField: ComplexFieldND = ComplexField.ndAlgebra(dim, dim)
|
||||
|
||||
val realTime = measureTimeMillis {
|
||||
@ -60,7 +60,7 @@ fun complexExample() {
|
||||
val sum = matrix + x + 1.0
|
||||
|
||||
//Represent the sum as 2d-structure and transpose
|
||||
sum.as2D().transposed()
|
||||
sum.as2D().transpose()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2018-2024 KMath contributors.
|
||||
* Copyright 2018-2021 KMath contributors.
|
||||
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
|
||||
*/
|
||||
|
||||
@ -10,7 +10,7 @@ import kotlinx.coroutines.GlobalScope
|
||||
import org.nd4j.linalg.factory.Nd4j
|
||||
import space.kscience.kmath.nd.*
|
||||
import space.kscience.kmath.nd4j.nd4j
|
||||
import space.kscience.kmath.operations.Float64Field
|
||||
import space.kscience.kmath.operations.DoubleField
|
||||
import space.kscience.kmath.operations.invoke
|
||||
import space.kscience.kmath.viktor.ViktorFieldND
|
||||
import kotlin.contracts.InvocationKind
|
||||
@ -29,29 +29,31 @@ fun main() {
|
||||
Nd4j.zeros(0)
|
||||
val dim = 1000
|
||||
val n = 1000
|
||||
val shape = ShapeND(dim, dim)
|
||||
val shape = Shape(dim, dim)
|
||||
|
||||
|
||||
// automatically build context most suited for given type.
|
||||
val autoField = BufferedFieldOpsND(DoubleField, Buffer.Companion::auto)
|
||||
// specialized nd-field for Double. It works as generic Double field as well.
|
||||
val doubleField = Float64Field.ndAlgebra
|
||||
//A generic field. It should be used for objects, not primitives.
|
||||
val genericField = BufferedFieldOpsND(Float64Field)
|
||||
val realField = DoubleField.ndAlgebra
|
||||
//A generic boxing field. It should be used for objects, not primitives.
|
||||
val boxingField = BufferedFieldOpsND(DoubleField, Buffer.Companion::boxing)
|
||||
// Nd4j specialized field.
|
||||
val nd4jField = Float64Field.nd4j
|
||||
val nd4jField = DoubleField.nd4j
|
||||
//viktor field
|
||||
val viktorField = ViktorFieldND(dim, dim)
|
||||
//parallel processing based on Java Streams
|
||||
val parallelField = Float64Field.ndStreaming(dim, dim)
|
||||
val parallelField = DoubleField.ndStreaming(dim, dim)
|
||||
|
||||
measureAndPrint("Boxing addition") {
|
||||
genericField {
|
||||
boxingField {
|
||||
var res: StructureND<Double> = one(shape)
|
||||
repeat(n) { res += 1.0 }
|
||||
}
|
||||
}
|
||||
|
||||
measureAndPrint("Specialized addition") {
|
||||
doubleField {
|
||||
realField {
|
||||
var res: StructureND<Double> = one(shape)
|
||||
repeat(n) { res += 1.0 }
|
||||
}
|
||||
@ -78,8 +80,15 @@ fun main() {
|
||||
}
|
||||
}
|
||||
|
||||
measureAndPrint("Automatic field addition") {
|
||||
autoField {
|
||||
var res: StructureND<Double> = one(shape)
|
||||
repeat(n) { res += 1.0 }
|
||||
}
|
||||
}
|
||||
|
||||
measureAndPrint("Lazy addition") {
|
||||
val res = doubleField.one(shape).mapAsync(GlobalScope) {
|
||||
val res = realField.one(shape).mapAsync(GlobalScope) {
|
||||
var c = 0.0
|
||||
repeat(n) {
|
||||
c += 1.0
|
||||
|
@ -1,15 +1,13 @@
|
||||
/*
|
||||
* Copyright 2018-2024 KMath contributors.
|
||||
* Copyright 2018-2021 KMath contributors.
|
||||
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
|
||||
*/
|
||||
|
||||
package space.kscience.kmath.structures
|
||||
|
||||
import space.kscience.kmath.PerformancePitfall
|
||||
import space.kscience.kmath.nd.*
|
||||
import space.kscience.kmath.operations.DoubleField
|
||||
import space.kscience.kmath.operations.ExtendedField
|
||||
import space.kscience.kmath.operations.Float64Field
|
||||
import space.kscience.kmath.operations.NumbersAddOps
|
||||
import java.util.*
|
||||
import java.util.stream.IntStream
|
||||
@ -18,12 +16,12 @@ import java.util.stream.IntStream
|
||||
* A demonstration implementation of NDField over Real using Java [java.util.stream.DoubleStream] for parallel
|
||||
* execution.
|
||||
*/
|
||||
class StreamDoubleFieldND(override val shape: ShapeND) : FieldND<Double, Float64Field>,
|
||||
class StreamDoubleFieldND(override val shape: IntArray) : FieldND<Double, DoubleField>,
|
||||
NumbersAddOps<StructureND<Double>>,
|
||||
ExtendedField<StructureND<Double>> {
|
||||
|
||||
private val strides = ColumnStrides(shape)
|
||||
override val elementAlgebra: Float64Field get() = Float64Field
|
||||
private val strides = DefaultStrides(shape)
|
||||
override val elementAlgebra: DoubleField get() = DoubleField
|
||||
override val zero: BufferND<Double> by lazy { structureND(shape) { zero } }
|
||||
override val one: BufferND<Double> by lazy { structureND(shape) { one } }
|
||||
|
||||
@ -32,53 +30,37 @@ class StreamDoubleFieldND(override val shape: ShapeND) : FieldND<Double, Float64
|
||||
return structureND(shape) { d }
|
||||
}
|
||||
|
||||
@OptIn(PerformancePitfall::class)
|
||||
private val StructureND<Double>.buffer: Float64Buffer
|
||||
private val StructureND<Double>.buffer: DoubleBuffer
|
||||
get() = when {
|
||||
shape != this@StreamDoubleFieldND.shape -> throw ShapeMismatchException(
|
||||
!shape.contentEquals(this@StreamDoubleFieldND.shape) -> throw ShapeMismatchException(
|
||||
this@StreamDoubleFieldND.shape,
|
||||
shape
|
||||
)
|
||||
|
||||
this is BufferND && indices == this@StreamDoubleFieldND.strides -> this.buffer as Float64Buffer
|
||||
else -> Float64Buffer(strides.linearSize) { offset -> get(strides.index(offset)) }
|
||||
this is BufferND && this.indices == this@StreamDoubleFieldND.strides -> this.buffer as DoubleBuffer
|
||||
else -> DoubleBuffer(strides.linearSize) { offset -> get(strides.index(offset)) }
|
||||
}
|
||||
|
||||
override fun structureND(shape: ShapeND, initializer: Float64Field.(IntArray) -> Double): BufferND<Double> {
|
||||
val array = IntStream.range(0, strides.linearSize).parallel().mapToDouble { offset ->
|
||||
val index = strides.index(offset)
|
||||
Float64Field.initializer(index)
|
||||
}.toArray()
|
||||
|
||||
return BufferND(strides, array.asBuffer())
|
||||
}
|
||||
|
||||
override fun mutableStructureND(
|
||||
shape: ShapeND,
|
||||
initializer: DoubleField.(IntArray) -> Double,
|
||||
): MutableBufferND<Double> {
|
||||
override fun structureND(shape: Shape, initializer: DoubleField.(IntArray) -> Double): BufferND<Double> {
|
||||
val array = IntStream.range(0, strides.linearSize).parallel().mapToDouble { offset ->
|
||||
val index = strides.index(offset)
|
||||
DoubleField.initializer(index)
|
||||
}.toArray()
|
||||
|
||||
return MutableBufferND(strides, array.asBuffer())
|
||||
}
|
||||
|
||||
@OptIn(PerformancePitfall::class)
|
||||
override fun StructureND<Double>.map(
|
||||
transform: Float64Field.(Double) -> Double,
|
||||
): BufferND<Double> {
|
||||
val array = Arrays.stream(buffer.array).parallel().map { Float64Field.transform(it) }.toArray()
|
||||
return BufferND(strides, array.asBuffer())
|
||||
}
|
||||
|
||||
@OptIn(PerformancePitfall::class)
|
||||
override fun StructureND<Double>.map(
|
||||
transform: DoubleField.(Double) -> Double,
|
||||
): BufferND<Double> {
|
||||
val array = Arrays.stream(buffer.array).parallel().map { DoubleField.transform(it) }.toArray()
|
||||
return BufferND(strides, array.asBuffer())
|
||||
}
|
||||
|
||||
override fun StructureND<Double>.mapIndexed(
|
||||
transform: Float64Field.(index: IntArray, Double) -> Double,
|
||||
transform: DoubleField.(index: IntArray, Double) -> Double,
|
||||
): BufferND<Double> {
|
||||
val array = IntStream.range(0, strides.linearSize).parallel().mapToDouble { offset ->
|
||||
Float64Field.transform(
|
||||
DoubleField.transform(
|
||||
strides.index(offset),
|
||||
buffer.array[offset]
|
||||
)
|
||||
@ -87,14 +69,13 @@ class StreamDoubleFieldND(override val shape: ShapeND) : FieldND<Double, Float64
|
||||
return BufferND(strides, array.asBuffer())
|
||||
}
|
||||
|
||||
@OptIn(PerformancePitfall::class)
|
||||
override fun zip(
|
||||
left: StructureND<Double>,
|
||||
right: StructureND<Double>,
|
||||
transform: Float64Field.(Double, Double) -> Double,
|
||||
transform: DoubleField.(Double, Double) -> Double,
|
||||
): BufferND<Double> {
|
||||
val array = IntStream.range(0, strides.linearSize).parallel().mapToDouble { offset ->
|
||||
Float64Field.transform(left.buffer.array[offset], right.buffer.array[offset])
|
||||
DoubleField.transform(left.buffer.array[offset], right.buffer.array[offset])
|
||||
}.toArray()
|
||||
return BufferND(strides, array.asBuffer())
|
||||
}
|
||||
@ -124,4 +105,4 @@ class StreamDoubleFieldND(override val shape: ShapeND) : FieldND<Double, Float64
|
||||
override fun atanh(arg: StructureND<Double>): BufferND<Double> = arg.map { atanh(it) }
|
||||
}
|
||||
|
||||
fun Float64Field.ndStreaming(vararg shape: Int): StreamDoubleFieldND = StreamDoubleFieldND(ShapeND(shape))
|
||||
fun DoubleField.ndStreaming(vararg shape: Int): StreamDoubleFieldND = StreamDoubleFieldND(shape)
|
||||
|
@ -1,23 +1,20 @@
|
||||
/*
|
||||
* Copyright 2018-2024 KMath contributors.
|
||||
* Copyright 2018-2021 KMath contributors.
|
||||
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
|
||||
*/
|
||||
|
||||
package space.kscience.kmath.structures
|
||||
|
||||
import space.kscience.kmath.PerformancePitfall
|
||||
import space.kscience.kmath.nd.BufferND
|
||||
import space.kscience.kmath.nd.ColumnStrides
|
||||
import space.kscience.kmath.nd.ShapeND
|
||||
import space.kscience.kmath.nd.DefaultStrides
|
||||
import kotlin.system.measureTimeMillis
|
||||
|
||||
@Suppress("ASSIGNED_BUT_NEVER_ACCESSED_VARIABLE")
|
||||
@OptIn(PerformancePitfall::class)
|
||||
fun main() {
|
||||
val n = 6000
|
||||
val array = DoubleArray(n * n) { 1.0 }
|
||||
val buffer = Float64Buffer(array)
|
||||
val strides = ColumnStrides(ShapeND(n, n))
|
||||
val buffer = DoubleBuffer(array)
|
||||
val strides = DefaultStrides(intArrayOf(n, n))
|
||||
val structure = BufferND(strides, buffer)
|
||||
|
||||
measureTimeMillis {
|
||||
|
@ -1,25 +1,20 @@
|
||||
/*
|
||||
* Copyright 2018-2024 KMath contributors.
|
||||
* Copyright 2018-2021 KMath contributors.
|
||||
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
|
||||
*/
|
||||
|
||||
package space.kscience.kmath.structures
|
||||
|
||||
import space.kscience.kmath.nd.BufferND
|
||||
import space.kscience.kmath.operations.mapToBuffer
|
||||
import space.kscience.kmath.nd.StructureND
|
||||
import space.kscience.kmath.nd.mapToBuffer
|
||||
import kotlin.system.measureTimeMillis
|
||||
|
||||
private inline fun <T, reified R : Any> BufferND<T>.mapToBufferND(
|
||||
bufferFactory: BufferFactory<R> = BufferFactory(),
|
||||
crossinline block: (T) -> R,
|
||||
): BufferND<R> = BufferND(indices, buffer.mapToBuffer(bufferFactory, block))
|
||||
|
||||
@Suppress("UNUSED_VARIABLE")
|
||||
fun main() {
|
||||
val n = 6000
|
||||
val structure = BufferND(n, n) { 1.0 }
|
||||
structure.mapToBufferND { it + 1 } // warm-up
|
||||
val time1 = measureTimeMillis { val res = structure.mapToBufferND { it + 1 } }
|
||||
val structure = StructureND.buffered(intArrayOf(n, n), Buffer.Companion::auto) { 1.0 }
|
||||
structure.mapToBuffer { it + 1 } // warm-up
|
||||
val time1 = measureTimeMillis { val res = structure.mapToBuffer { it + 1 } }
|
||||
println("Structure mapping finished in $time1 millis")
|
||||
val array = DoubleArray(n * n) { 1.0 }
|
||||
|
||||
@ -30,10 +25,10 @@ fun main() {
|
||||
|
||||
println("Array mapping finished in $time2 millis")
|
||||
|
||||
val buffer = Float64Buffer(DoubleArray(n * n) { 1.0 })
|
||||
val buffer = DoubleBuffer(DoubleArray(n * n) { 1.0 })
|
||||
|
||||
val time3 = measureTimeMillis {
|
||||
val target = Float64Buffer(DoubleArray(n * n))
|
||||
val target = DoubleBuffer(DoubleArray(n * n))
|
||||
val res = array.forEachIndexed { index, value ->
|
||||
target[index] = value + 1
|
||||
}
|
||||
|
@ -1,23 +1,23 @@
|
||||
/*
|
||||
* Copyright 2018-2024 KMath contributors.
|
||||
* Copyright 2018-2021 KMath contributors.
|
||||
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
|
||||
*/
|
||||
|
||||
package space.kscience.kmath.structures
|
||||
|
||||
import space.kscience.kmath.operations.Float64Field
|
||||
import space.kscience.kmath.operations.DoubleField
|
||||
import space.kscience.kmath.operations.buffer
|
||||
import space.kscience.kmath.operations.bufferAlgebra
|
||||
import space.kscience.kmath.operations.withSize
|
||||
|
||||
inline fun <reified R : Any> MutableBuffer.Companion.same(
|
||||
n: Int,
|
||||
value: R,
|
||||
): MutableBuffer<R> = MutableBuffer(n) { value }
|
||||
value: R
|
||||
): MutableBuffer<R> = auto(n) { value }
|
||||
|
||||
|
||||
fun main() {
|
||||
with(Float64Field.bufferAlgebra.withSize(5)) {
|
||||
with(DoubleField.bufferAlgebra.withSize(5)) {
|
||||
println(number(2.0) + buffer(1, 2, 3, 4, 5))
|
||||
}
|
||||
}
|
||||
|
@ -1,26 +0,0 @@
|
||||
/*
|
||||
* Copyright 2018-2023 KMath contributors.
|
||||
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
|
||||
*/
|
||||
|
||||
package space.kscience.kmath.structures
|
||||
|
||||
import space.kscience.kmath.PerformancePitfall
|
||||
import space.kscience.kmath.nd.*
|
||||
import space.kscience.kmath.operations.algebra
|
||||
|
||||
@OptIn(PerformancePitfall::class)
|
||||
fun main(): Unit = with(Double.algebra.ndAlgebra) {
|
||||
val structure: MutableStructure2D<Double> = mutableStructureND(ShapeND(2, 2)) { (i, j) ->
|
||||
i.toDouble() + j.toDouble()
|
||||
}.as2D()
|
||||
|
||||
structure[0, 1] = -2.0
|
||||
|
||||
val structure2 = mutableStructureND(2, 2) { (i, j) -> i.toDouble() + j.toDouble() }.as2D()
|
||||
|
||||
structure2[0, 1] = 2.0
|
||||
|
||||
|
||||
println(structure + structure2)
|
||||
}
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2018-2024 KMath contributors.
|
||||
* Copyright 2018-2021 KMath contributors.
|
||||
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
|
||||
*/
|
||||
|
||||
@ -15,7 +15,7 @@ private fun DMatrixContext<Double, *>.simple() {
|
||||
val m2 = produce<D3, D2> { i, j -> (i + j).toDouble() }
|
||||
|
||||
//Dimension-safe addition
|
||||
m1.transposed() + m2
|
||||
m1.transpose() + m2
|
||||
}
|
||||
|
||||
private object D5 : Dimension {
|
||||
|
@ -1,92 +0,0 @@
|
||||
/*
|
||||
* Copyright 2018-2024 KMath contributors.
|
||||
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
|
||||
*/
|
||||
|
||||
package space.kscience.kmath.tensors.LevenbergMarquardt.StaticLm
|
||||
|
||||
import space.kscience.kmath.nd.ShapeND
|
||||
import space.kscience.kmath.nd.as2D
|
||||
import space.kscience.kmath.nd.component1
|
||||
import space.kscience.kmath.tensors.LevenbergMarquardt.funcDifficultForLm
|
||||
import space.kscience.kmath.tensors.core.BroadcastDoubleTensorAlgebra
|
||||
import space.kscience.kmath.tensors.core.BroadcastDoubleTensorAlgebra.div
|
||||
import space.kscience.kmath.tensors.core.DoubleTensorAlgebra
|
||||
import space.kscience.kmath.tensors.core.LMInput
|
||||
import space.kscience.kmath.tensors.core.levenbergMarquardt
|
||||
import kotlin.math.roundToInt
|
||||
|
||||
fun main() {
|
||||
val NData = 200
|
||||
var t_example = DoubleTensorAlgebra.ones(ShapeND(intArrayOf(NData, 1))).as2D()
|
||||
for (i in 0 until NData) {
|
||||
t_example[i, 0] = t_example[i, 0] * (i + 1) - 104
|
||||
}
|
||||
|
||||
val Nparams = 15
|
||||
var p_example = DoubleTensorAlgebra.ones(ShapeND(intArrayOf(Nparams, 1))).as2D()
|
||||
for (i in 0 until Nparams) {
|
||||
p_example[i, 0] = p_example[i, 0] + i - 25
|
||||
}
|
||||
|
||||
val exampleNumber = 1
|
||||
|
||||
var y_hat = funcDifficultForLm(t_example, p_example, exampleNumber)
|
||||
|
||||
var p_init = DoubleTensorAlgebra.zeros(ShapeND(intArrayOf(Nparams, 1))).as2D()
|
||||
for (i in 0 until Nparams) {
|
||||
p_init[i, 0] = (p_example[i, 0] + 0.9)
|
||||
}
|
||||
|
||||
var t = t_example
|
||||
val y_dat = y_hat
|
||||
val weight = 1.0 / Nparams * 1.0 - 0.085
|
||||
val dp = BroadcastDoubleTensorAlgebra.fromArray(
|
||||
ShapeND(intArrayOf(1, 1)), DoubleArray(1) { -0.01 }
|
||||
).as2D()
|
||||
var p_min = DoubleTensorAlgebra.ones(ShapeND(intArrayOf(Nparams, 1)))
|
||||
p_min = p_min.div(1.0 / -50.0)
|
||||
val p_max = DoubleTensorAlgebra.ones(ShapeND(intArrayOf(Nparams, 1)))
|
||||
p_min = p_min.div(1.0 / 50.0)
|
||||
val opts = doubleArrayOf(3.0, 10000.0, 1e-6, 1e-6, 1e-6, 1e-6, 1e-2, 11.0, 9.0, 1.0)
|
||||
// val opts = doubleArrayOf(3.0, 10000.0, 1e-6, 1e-6, 1e-6, 1e-6, 1e-3, 11.0, 9.0, 1.0)
|
||||
|
||||
val inputData = LMInput(
|
||||
::funcDifficultForLm,
|
||||
p_init.as2D(),
|
||||
t,
|
||||
y_dat,
|
||||
weight,
|
||||
dp,
|
||||
p_min.as2D(),
|
||||
p_max.as2D(),
|
||||
opts[1].toInt(),
|
||||
doubleArrayOf(opts[2], opts[3], opts[4], opts[5]),
|
||||
doubleArrayOf(opts[6], opts[7], opts[8]),
|
||||
opts[9].toInt(),
|
||||
10,
|
||||
1
|
||||
)
|
||||
|
||||
val result = DoubleTensorAlgebra.levenbergMarquardt(inputData)
|
||||
|
||||
println("Parameters:")
|
||||
for (i in 0 until result.resultParameters.shape.component1()) {
|
||||
val x = (result.resultParameters[i, 0] * 10000).roundToInt() / 10000.0
|
||||
print("$x ")
|
||||
}
|
||||
println()
|
||||
|
||||
println("Y true and y received:")
|
||||
var y_hat_after = funcDifficultForLm(t_example, result.resultParameters, exampleNumber)
|
||||
for (i in 0 until y_hat.shape.component1()) {
|
||||
val x = (y_hat[i, 0] * 10000).roundToInt() / 10000.0
|
||||
val y = (y_hat_after[i, 0] * 10000).roundToInt() / 10000.0
|
||||
println("$x $y")
|
||||
}
|
||||
|
||||
println("Сhi_sq:")
|
||||
println(result.resultChiSq)
|
||||
println("Number of iterations:")
|
||||
println(result.iterations)
|
||||
}
|
@ -1,59 +0,0 @@
|
||||
/*
|
||||
* Copyright 2018-2024 KMath contributors.
|
||||
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
|
||||
*/
|
||||
|
||||
package space.kscience.kmath.tensors.LevenbergMarquardt.StaticLm
|
||||
|
||||
import space.kscience.kmath.nd.ShapeND
|
||||
import space.kscience.kmath.nd.as2D
|
||||
import space.kscience.kmath.nd.component1
|
||||
import space.kscience.kmath.tensors.LevenbergMarquardt.funcDifficultForLm
|
||||
import space.kscience.kmath.tensors.LevenbergMarquardt.funcEasyForLm
|
||||
import space.kscience.kmath.tensors.LevenbergMarquardt.getStartDataForFuncEasy
|
||||
import space.kscience.kmath.tensors.core.DoubleTensorAlgebra
|
||||
import space.kscience.kmath.tensors.core.LMInput
|
||||
import space.kscience.kmath.tensors.core.levenbergMarquardt
|
||||
import kotlin.math.roundToInt
|
||||
|
||||
fun main() {
|
||||
val startedData = getStartDataForFuncEasy()
|
||||
val inputData = LMInput(
|
||||
::funcEasyForLm,
|
||||
DoubleTensorAlgebra.ones(ShapeND(intArrayOf(4, 1))).as2D(),
|
||||
startedData.t,
|
||||
startedData.y_dat,
|
||||
startedData.weight,
|
||||
startedData.dp,
|
||||
startedData.p_min,
|
||||
startedData.p_max,
|
||||
startedData.opts[1].toInt(),
|
||||
doubleArrayOf(startedData.opts[2], startedData.opts[3], startedData.opts[4], startedData.opts[5]),
|
||||
doubleArrayOf(startedData.opts[6], startedData.opts[7], startedData.opts[8]),
|
||||
startedData.opts[9].toInt(),
|
||||
10,
|
||||
startedData.example_number
|
||||
)
|
||||
|
||||
val result = DoubleTensorAlgebra.levenbergMarquardt(inputData)
|
||||
|
||||
println("Parameters:")
|
||||
for (i in 0 until result.resultParameters.shape.component1()) {
|
||||
val x = (result.resultParameters[i, 0] * 10000).roundToInt() / 10000.0
|
||||
print("$x ")
|
||||
}
|
||||
println()
|
||||
|
||||
println("Y true and y received:")
|
||||
var y_hat_after = funcDifficultForLm(startedData.t, result.resultParameters, startedData.example_number)
|
||||
for (i in 0 until startedData.y_dat.shape.component1()) {
|
||||
val x = (startedData.y_dat[i, 0] * 10000).roundToInt() / 10000.0
|
||||
val y = (y_hat_after[i, 0] * 10000).roundToInt() / 10000.0
|
||||
println("$x $y")
|
||||
}
|
||||
|
||||
println("Сhi_sq:")
|
||||
println(result.resultChiSq)
|
||||
println("Number of iterations:")
|
||||
println(result.iterations)
|
||||
}
|
@ -1,91 +0,0 @@
|
||||
/*
|
||||
* Copyright 2018-2024 KMath contributors.
|
||||
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
|
||||
*/
|
||||
|
||||
package space.kscience.kmath.tensors.LevenbergMarquardt.StaticLm
|
||||
|
||||
import space.kscience.kmath.nd.ShapeND
|
||||
import space.kscience.kmath.nd.as2D
|
||||
import space.kscience.kmath.nd.component1
|
||||
import space.kscience.kmath.tensors.LevenbergMarquardt.funcMiddleForLm
|
||||
import space.kscience.kmath.tensors.core.BroadcastDoubleTensorAlgebra
|
||||
import space.kscience.kmath.tensors.core.BroadcastDoubleTensorAlgebra.div
|
||||
import space.kscience.kmath.tensors.core.DoubleTensorAlgebra
|
||||
import space.kscience.kmath.tensors.core.LMInput
|
||||
import space.kscience.kmath.tensors.core.levenbergMarquardt
|
||||
import kotlin.math.roundToInt
|
||||
|
||||
fun main() {
|
||||
val NData = 100
|
||||
var t_example = DoubleTensorAlgebra.ones(ShapeND(intArrayOf(NData, 1))).as2D()
|
||||
for (i in 0 until NData) {
|
||||
t_example[i, 0] = t_example[i, 0] * (i + 1)
|
||||
}
|
||||
|
||||
val Nparams = 20
|
||||
var p_example = DoubleTensorAlgebra.ones(ShapeND(intArrayOf(Nparams, 1))).as2D()
|
||||
for (i in 0 until Nparams) {
|
||||
p_example[i, 0] = p_example[i, 0] + i - 25
|
||||
}
|
||||
|
||||
val exampleNumber = 1
|
||||
|
||||
var y_hat = funcMiddleForLm(t_example, p_example, exampleNumber)
|
||||
|
||||
var p_init = DoubleTensorAlgebra.zeros(ShapeND(intArrayOf(Nparams, 1))).as2D()
|
||||
for (i in 0 until Nparams) {
|
||||
p_init[i, 0] = (p_example[i, 0] + 0.9)
|
||||
}
|
||||
|
||||
var t = t_example
|
||||
val y_dat = y_hat
|
||||
val weight = 1.0
|
||||
val dp = BroadcastDoubleTensorAlgebra.fromArray(
|
||||
ShapeND(intArrayOf(1, 1)), DoubleArray(1) { -0.01 }
|
||||
).as2D()
|
||||
var p_min = DoubleTensorAlgebra.ones(ShapeND(intArrayOf(Nparams, 1)))
|
||||
p_min = p_min.div(1.0 / -50.0)
|
||||
val p_max = DoubleTensorAlgebra.ones(ShapeND(intArrayOf(Nparams, 1)))
|
||||
p_min = p_min.div(1.0 / 50.0)
|
||||
val opts = doubleArrayOf(3.0, 7000.0, 1e-5, 1e-5, 1e-5, 1e-5, 1e-5, 11.0, 9.0, 1.0)
|
||||
|
||||
val inputData = LMInput(
|
||||
::funcMiddleForLm,
|
||||
p_init.as2D(),
|
||||
t,
|
||||
y_dat,
|
||||
weight,
|
||||
dp,
|
||||
p_min.as2D(),
|
||||
p_max.as2D(),
|
||||
opts[1].toInt(),
|
||||
doubleArrayOf(opts[2], opts[3], opts[4], opts[5]),
|
||||
doubleArrayOf(opts[6], opts[7], opts[8]),
|
||||
opts[9].toInt(),
|
||||
10,
|
||||
1
|
||||
)
|
||||
|
||||
val result = DoubleTensorAlgebra.levenbergMarquardt(inputData)
|
||||
|
||||
println("Parameters:")
|
||||
for (i in 0 until result.resultParameters.shape.component1()) {
|
||||
val x = (result.resultParameters[i, 0] * 10000).roundToInt() / 10000.0
|
||||
print("$x ")
|
||||
}
|
||||
println()
|
||||
|
||||
|
||||
var y_hat_after = funcMiddleForLm(t_example, result.resultParameters, exampleNumber)
|
||||
for (i in 0 until y_hat.shape.component1()) {
|
||||
val x = (y_hat[i, 0] * 10000).roundToInt() / 10000.0
|
||||
val y = (y_hat_after[i, 0] * 10000).roundToInt() / 10000.0
|
||||
println("$x $y")
|
||||
}
|
||||
|
||||
println("Сhi_sq:")
|
||||
println(result.resultChiSq)
|
||||
println("Number of iterations:")
|
||||
println(result.iterations)
|
||||
}
|
@ -1,75 +0,0 @@
|
||||
/*
|
||||
* Copyright 2018-2024 KMath contributors.
|
||||
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
|
||||
*/
|
||||
|
||||
package space.kscience.kmath.tensors.LevenbergMarquardt.StreamingLm
|
||||
|
||||
import kotlinx.coroutines.delay
|
||||
import kotlinx.coroutines.flow.Flow
|
||||
import kotlinx.coroutines.flow.flow
|
||||
import space.kscience.kmath.nd.MutableStructure2D
|
||||
import space.kscience.kmath.nd.ShapeND
|
||||
import space.kscience.kmath.nd.as2D
|
||||
import space.kscience.kmath.nd.component1
|
||||
import space.kscience.kmath.tensors.LevenbergMarquardt.StartDataLm
|
||||
import space.kscience.kmath.tensors.core.BroadcastDoubleTensorAlgebra.zeros
|
||||
import space.kscience.kmath.tensors.core.DoubleTensorAlgebra
|
||||
import space.kscience.kmath.tensors.core.LMInput
|
||||
import space.kscience.kmath.tensors.core.levenbergMarquardt
|
||||
import kotlin.random.Random
|
||||
|
||||
fun streamLm(
|
||||
lm_func: (MutableStructure2D<Double>, MutableStructure2D<Double>, Int) -> (MutableStructure2D<Double>),
|
||||
startData: StartDataLm, launchFrequencyInMs: Long, numberOfLaunches: Int,
|
||||
): Flow<MutableStructure2D<Double>> = flow {
|
||||
|
||||
var example_number = startData.example_number
|
||||
var p_init = startData.p_init
|
||||
var t = startData.t
|
||||
var y_dat = startData.y_dat
|
||||
val weight = startData.weight
|
||||
val dp = startData.dp
|
||||
val p_min = startData.p_min
|
||||
val p_max = startData.p_max
|
||||
val opts = startData.opts
|
||||
|
||||
var steps = numberOfLaunches
|
||||
val isEndless = (steps <= 0)
|
||||
|
||||
val inputData = LMInput(
|
||||
lm_func,
|
||||
p_init,
|
||||
t,
|
||||
y_dat,
|
||||
weight,
|
||||
dp,
|
||||
p_min,
|
||||
p_max,
|
||||
opts[1].toInt(),
|
||||
doubleArrayOf(opts[2], opts[3], opts[4], opts[5]),
|
||||
doubleArrayOf(opts[6], opts[7], opts[8]),
|
||||
opts[9].toInt(),
|
||||
10,
|
||||
example_number
|
||||
)
|
||||
|
||||
while (isEndless || steps > 0) {
|
||||
val result = DoubleTensorAlgebra.levenbergMarquardt(inputData)
|
||||
emit(result.resultParameters)
|
||||
delay(launchFrequencyInMs)
|
||||
inputData.realValues = generateNewYDat(y_dat, 0.1)
|
||||
inputData.startParameters = result.resultParameters
|
||||
if (!isEndless) steps -= 1
|
||||
}
|
||||
}
|
||||
|
||||
fun generateNewYDat(y_dat: MutableStructure2D<Double>, delta: Double): MutableStructure2D<Double> {
|
||||
val n = y_dat.shape.component1()
|
||||
val y_dat_new = zeros(ShapeND(intArrayOf(n, 1))).as2D()
|
||||
for (i in 0 until n) {
|
||||
val randomEps = Random.nextDouble(delta + delta) - delta
|
||||
y_dat_new[i, 0] = y_dat[i, 0] + randomEps
|
||||
}
|
||||
return y_dat_new
|
||||
}
|
@ -1,33 +0,0 @@
|
||||
/*
|
||||
* Copyright 2018-2024 KMath contributors.
|
||||
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
|
||||
*/
|
||||
|
||||
package space.kscience.kmath.tensors.LevenbergMarquardt.StreamingLm
|
||||
|
||||
import space.kscience.kmath.nd.component1
|
||||
import space.kscience.kmath.tensors.LevenbergMarquardt.funcDifficultForLm
|
||||
import space.kscience.kmath.tensors.LevenbergMarquardt.getStartDataForFuncDifficult
|
||||
import kotlin.math.roundToInt
|
||||
|
||||
suspend fun main() {
|
||||
val startData = getStartDataForFuncDifficult()
|
||||
// Создание потока:
|
||||
val lmFlow = streamLm(::funcDifficultForLm, startData, 0, 100)
|
||||
var initialTime = System.currentTimeMillis()
|
||||
var lastTime: Long
|
||||
val launches = mutableListOf<Long>()
|
||||
// Запуск потока
|
||||
lmFlow.collect { parameters ->
|
||||
lastTime = System.currentTimeMillis()
|
||||
launches.add(lastTime - initialTime)
|
||||
initialTime = lastTime
|
||||
for (i in 0 until parameters.shape.component1()) {
|
||||
val x = (parameters[i, 0] * 10000).roundToInt() / 10000.0
|
||||
print("$x ")
|
||||
if (i == parameters.shape.component1() - 1) println()
|
||||
}
|
||||
}
|
||||
|
||||
println("Average without first is: ${launches.subList(1, launches.size - 1).average()}")
|
||||
}
|
@ -1,232 +0,0 @@
|
||||
/*
|
||||
* Copyright 2018-2024 KMath contributors.
|
||||
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
|
||||
*/
|
||||
|
||||
package space.kscience.kmath.tensors.LevenbergMarquardt
|
||||
|
||||
import space.kscience.kmath.nd.MutableStructure2D
|
||||
import space.kscience.kmath.nd.ShapeND
|
||||
import space.kscience.kmath.nd.as2D
|
||||
import space.kscience.kmath.nd.component1
|
||||
import space.kscience.kmath.tensors.core.BroadcastDoubleTensorAlgebra
|
||||
import space.kscience.kmath.tensors.core.BroadcastDoubleTensorAlgebra.div
|
||||
import space.kscience.kmath.tensors.core.DoubleTensorAlgebra
|
||||
import space.kscience.kmath.tensors.core.DoubleTensorAlgebra.Companion.max
|
||||
import space.kscience.kmath.tensors.core.DoubleTensorAlgebra.Companion.plus
|
||||
import space.kscience.kmath.tensors.core.DoubleTensorAlgebra.Companion.pow
|
||||
import space.kscience.kmath.tensors.core.DoubleTensorAlgebra.Companion.times
|
||||
import space.kscience.kmath.tensors.core.asDoubleTensor
|
||||
|
||||
public data class StartDataLm(
|
||||
var lm_matx_y_dat: MutableStructure2D<Double>,
|
||||
var example_number: Int,
|
||||
var p_init: MutableStructure2D<Double>,
|
||||
var t: MutableStructure2D<Double>,
|
||||
var y_dat: MutableStructure2D<Double>,
|
||||
var weight: Double,
|
||||
var dp: MutableStructure2D<Double>,
|
||||
var p_min: MutableStructure2D<Double>,
|
||||
var p_max: MutableStructure2D<Double>,
|
||||
var consts: MutableStructure2D<Double>,
|
||||
var opts: DoubleArray,
|
||||
)
|
||||
|
||||
fun funcEasyForLm(
|
||||
t: MutableStructure2D<Double>,
|
||||
p: MutableStructure2D<Double>,
|
||||
exampleNumber: Int,
|
||||
): MutableStructure2D<Double> {
|
||||
val m = t.shape.component1()
|
||||
var y_hat = DoubleTensorAlgebra.zeros(ShapeND(intArrayOf(m, 1)))
|
||||
|
||||
if (exampleNumber == 1) {
|
||||
y_hat = DoubleTensorAlgebra.exp((t.times(-1.0 / p[1, 0]))).times(p[0, 0]) + t.times(p[2, 0]).times(
|
||||
DoubleTensorAlgebra.exp((t.times(-1.0 / p[3, 0])))
|
||||
)
|
||||
} else if (exampleNumber == 2) {
|
||||
val mt = t.max()
|
||||
y_hat = (t.times(1.0 / mt)).times(p[0, 0]) +
|
||||
(t.times(1.0 / mt)).pow(2).times(p[1, 0]) +
|
||||
(t.times(1.0 / mt)).pow(3).times(p[2, 0]) +
|
||||
(t.times(1.0 / mt)).pow(4).times(p[3, 0])
|
||||
} else if (exampleNumber == 3) {
|
||||
y_hat = DoubleTensorAlgebra.exp((t.times(-1.0 / p[1, 0])))
|
||||
.times(p[0, 0]) + DoubleTensorAlgebra.sin((t.times(1.0 / p[3, 0]))).times(p[2, 0])
|
||||
}
|
||||
|
||||
return y_hat.as2D()
|
||||
}
|
||||
|
||||
fun funcMiddleForLm(
|
||||
t: MutableStructure2D<Double>,
|
||||
p: MutableStructure2D<Double>,
|
||||
exampleNumber: Int,
|
||||
): MutableStructure2D<Double> {
|
||||
val m = t.shape.component1()
|
||||
var y_hat = DoubleTensorAlgebra.zeros(ShapeND(intArrayOf(m, 1)))
|
||||
|
||||
val mt = t.max()
|
||||
for (i in 0 until p.shape.component1()) {
|
||||
y_hat += (t.times(1.0 / mt)).times(p[i, 0])
|
||||
}
|
||||
|
||||
for (i in 0 until 5) {
|
||||
y_hat = funcEasyForLm(y_hat.as2D(), p, exampleNumber).asDoubleTensor()
|
||||
}
|
||||
|
||||
return y_hat.as2D()
|
||||
}
|
||||
|
||||
fun funcDifficultForLm(
|
||||
t: MutableStructure2D<Double>,
|
||||
p: MutableStructure2D<Double>,
|
||||
exampleNumber: Int,
|
||||
): MutableStructure2D<Double> {
|
||||
val m = t.shape.component1()
|
||||
var y_hat = DoubleTensorAlgebra.zeros(ShapeND(intArrayOf(m, 1)))
|
||||
|
||||
val mt = t.max()
|
||||
for (i in 0 until p.shape.component1()) {
|
||||
y_hat = y_hat.plus((t.times(1.0 / mt)).times(p[i, 0]))
|
||||
}
|
||||
|
||||
for (i in 0 until 4) {
|
||||
y_hat = funcEasyForLm((y_hat.as2D() + t).as2D(), p, exampleNumber).asDoubleTensor()
|
||||
}
|
||||
|
||||
return y_hat.as2D()
|
||||
}
|
||||
|
||||
|
||||
fun getStartDataForFuncDifficult(): StartDataLm {
|
||||
val NData = 200
|
||||
var t_example = DoubleTensorAlgebra.ones(ShapeND(intArrayOf(NData, 1))).as2D()
|
||||
for (i in 0 until NData) {
|
||||
t_example[i, 0] = t_example[i, 0] * (i + 1) - 104
|
||||
}
|
||||
|
||||
val Nparams = 15
|
||||
var p_example = DoubleTensorAlgebra.ones(ShapeND(intArrayOf(Nparams, 1))).as2D()
|
||||
for (i in 0 until Nparams) {
|
||||
p_example[i, 0] = p_example[i, 0] + i - 25
|
||||
}
|
||||
|
||||
val exampleNumber = 1
|
||||
|
||||
var y_hat = funcDifficultForLm(t_example, p_example, exampleNumber)
|
||||
|
||||
var p_init = DoubleTensorAlgebra.zeros(ShapeND(intArrayOf(Nparams, 1))).as2D()
|
||||
for (i in 0 until Nparams) {
|
||||
p_init[i, 0] = (p_example[i, 0] + 0.9)
|
||||
}
|
||||
|
||||
var t = t_example
|
||||
val y_dat = y_hat
|
||||
val weight = 1.0 / Nparams * 1.0 - 0.085
|
||||
val dp = BroadcastDoubleTensorAlgebra.fromArray(
|
||||
ShapeND(intArrayOf(1, 1)), DoubleArray(1) { -0.01 }
|
||||
).as2D()
|
||||
var p_min = DoubleTensorAlgebra.ones(ShapeND(intArrayOf(Nparams, 1)))
|
||||
p_min = p_min.div(1.0 / -50.0)
|
||||
val p_max = DoubleTensorAlgebra.ones(ShapeND(intArrayOf(Nparams, 1)))
|
||||
p_min = p_min.div(1.0 / 50.0)
|
||||
val consts = BroadcastDoubleTensorAlgebra.fromArray(
|
||||
ShapeND(intArrayOf(1, 1)), doubleArrayOf(0.0)
|
||||
).as2D()
|
||||
val opts = doubleArrayOf(3.0, 10000.0, 1e-2, 1e-3, 1e-2, 1e-2, 1e-2, 11.0, 9.0, 1.0)
|
||||
|
||||
return StartDataLm(y_dat, 1, p_init, t, y_dat, weight, dp, p_min.as2D(), p_max.as2D(), consts, opts)
|
||||
}
|
||||
|
||||
fun getStartDataForFuncMiddle(): StartDataLm {
|
||||
val NData = 100
|
||||
var t_example = DoubleTensorAlgebra.ones(ShapeND(intArrayOf(NData, 1))).as2D()
|
||||
for (i in 0 until NData) {
|
||||
t_example[i, 0] = t_example[i, 0] * (i + 1)
|
||||
}
|
||||
|
||||
val Nparams = 20
|
||||
var p_example = DoubleTensorAlgebra.ones(ShapeND(intArrayOf(Nparams, 1))).as2D()
|
||||
for (i in 0 until Nparams) {
|
||||
p_example[i, 0] = p_example[i, 0] + i - 25
|
||||
}
|
||||
|
||||
val exampleNumber = 1
|
||||
|
||||
var y_hat = funcMiddleForLm(t_example, p_example, exampleNumber)
|
||||
|
||||
var p_init = DoubleTensorAlgebra.zeros(ShapeND(intArrayOf(Nparams, 1))).as2D()
|
||||
for (i in 0 until Nparams) {
|
||||
p_init[i, 0] = (p_example[i, 0] + 10.0)
|
||||
}
|
||||
var t = t_example
|
||||
val y_dat = y_hat
|
||||
val weight = 1.0
|
||||
val dp = BroadcastDoubleTensorAlgebra.fromArray(
|
||||
ShapeND(intArrayOf(1, 1)), DoubleArray(1) { -0.01 }
|
||||
).as2D()
|
||||
var p_min = DoubleTensorAlgebra.ones(ShapeND(intArrayOf(Nparams, 1)))
|
||||
p_min = p_min.div(1.0 / -50.0)
|
||||
val p_max = DoubleTensorAlgebra.ones(ShapeND(intArrayOf(Nparams, 1)))
|
||||
p_min = p_min.div(1.0 / 50.0)
|
||||
val consts = BroadcastDoubleTensorAlgebra.fromArray(
|
||||
ShapeND(intArrayOf(1, 1)), doubleArrayOf(0.0)
|
||||
).as2D()
|
||||
val opts = doubleArrayOf(3.0, 10000.0, 1e-5, 1e-5, 1e-5, 1e-5, 1e-5, 11.0, 9.0, 1.0)
|
||||
|
||||
var example_number = 1
|
||||
|
||||
return StartDataLm(y_dat, example_number, p_init, t, y_dat, weight, dp, p_min.as2D(), p_max.as2D(), consts, opts)
|
||||
}
|
||||
|
||||
fun getStartDataForFuncEasy(): StartDataLm {
|
||||
val lm_matx_y_dat = doubleArrayOf(
|
||||
19.6594, 18.6096, 17.6792, 17.2747, 16.3065, 17.1458, 16.0467, 16.7023, 15.7809, 15.9807,
|
||||
14.7620, 15.1128, 16.0973, 15.1934, 15.8636, 15.4763, 15.6860, 15.1895, 15.3495, 16.6054,
|
||||
16.2247, 15.9854, 16.1421, 17.0960, 16.7769, 17.1997, 17.2767, 17.5882, 17.5378, 16.7894,
|
||||
17.7648, 18.2512, 18.1581, 16.7037, 17.8475, 17.9081, 18.3067, 17.9632, 18.2817, 19.1427,
|
||||
18.8130, 18.5658, 18.0056, 18.4607, 18.5918, 18.2544, 18.3731, 18.7511, 19.3181, 17.3066,
|
||||
17.9632, 19.0513, 18.7528, 18.2928, 18.5967, 17.8567, 17.7859, 18.4016, 18.9423, 18.4959,
|
||||
17.8000, 18.4251, 17.7829, 17.4645, 17.5221, 17.3517, 17.4637, 17.7563, 16.8471, 17.4558,
|
||||
17.7447, 17.1487, 17.3183, 16.8312, 17.7551, 17.0942, 15.6093, 16.4163, 15.3755, 16.6725,
|
||||
16.2332, 16.2316, 16.2236, 16.5361, 15.3721, 15.3347, 15.5815, 15.6319, 14.4538, 14.6044,
|
||||
14.7665, 13.3718, 15.0587, 13.8320, 14.7873, 13.6824, 14.2579, 14.2154, 13.5818, 13.8157
|
||||
)
|
||||
|
||||
var example_number = 1
|
||||
val p_init = BroadcastDoubleTensorAlgebra.fromArray(
|
||||
ShapeND(intArrayOf(4, 1)), doubleArrayOf(5.0, 2.0, 0.2, 10.0)
|
||||
).as2D()
|
||||
|
||||
var t = DoubleTensorAlgebra.ones(ShapeND(intArrayOf(100, 1))).as2D()
|
||||
for (i in 0 until 100) {
|
||||
t[i, 0] = t[i, 0] * (i + 1)
|
||||
}
|
||||
|
||||
val y_dat = BroadcastDoubleTensorAlgebra.fromArray(
|
||||
ShapeND(intArrayOf(100, 1)), lm_matx_y_dat
|
||||
).as2D()
|
||||
|
||||
val weight = 4.0
|
||||
|
||||
val dp = BroadcastDoubleTensorAlgebra.fromArray(
|
||||
ShapeND(intArrayOf(1, 1)), DoubleArray(1) { -0.01 }
|
||||
).as2D()
|
||||
|
||||
val p_min = BroadcastDoubleTensorAlgebra.fromArray(
|
||||
ShapeND(intArrayOf(4, 1)), doubleArrayOf(-50.0, -20.0, -2.0, -100.0)
|
||||
).as2D()
|
||||
|
||||
val p_max = BroadcastDoubleTensorAlgebra.fromArray(
|
||||
ShapeND(intArrayOf(4, 1)), doubleArrayOf(50.0, 20.0, 2.0, 100.0)
|
||||
).as2D()
|
||||
|
||||
val consts = BroadcastDoubleTensorAlgebra.fromArray(
|
||||
ShapeND(intArrayOf(1, 1)), doubleArrayOf(0.0)
|
||||
).as2D()
|
||||
|
||||
val opts = doubleArrayOf(3.0, 100.0, 1e-3, 1e-3, 1e-1, 1e-1, 1e-2, 11.0, 9.0, 1.0)
|
||||
|
||||
return StartDataLm(y_dat, example_number, p_init, t, y_dat, weight, dp, p_min, p_max, consts, opts)
|
||||
}
|
@ -1,16 +1,14 @@
|
||||
/*
|
||||
* Copyright 2018-2024 KMath contributors.
|
||||
* Copyright 2018-2021 KMath contributors.
|
||||
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
|
||||
*/
|
||||
|
||||
package space.kscience.kmath.tensors
|
||||
|
||||
import space.kscience.kmath.nd.ShapeND
|
||||
import space.kscience.kmath.operations.invoke
|
||||
import space.kscience.kmath.tensors.core.DoubleTensor
|
||||
import space.kscience.kmath.tensors.core.DoubleTensorAlgebra
|
||||
import space.kscience.kmath.tensors.core.randomNormal
|
||||
import space.kscience.kmath.tensors.core.randomNormalLike
|
||||
|
||||
import kotlin.math.abs
|
||||
|
||||
// OLS estimator using SVD
|
||||
@ -23,10 +21,10 @@ fun main() {
|
||||
DoubleTensorAlgebra {
|
||||
// take coefficient vector from normal distribution
|
||||
val alpha = randomNormal(
|
||||
ShapeND(5),
|
||||
intArrayOf(5),
|
||||
randSeed
|
||||
) + fromArray(
|
||||
ShapeND(5),
|
||||
intArrayOf(5),
|
||||
doubleArrayOf(1.0, 2.5, 3.4, 5.0, 10.1)
|
||||
)
|
||||
|
||||
@ -34,37 +32,35 @@ fun main() {
|
||||
|
||||
// also take sample of size 20 from normal distribution for x
|
||||
val x = randomNormal(
|
||||
ShapeND(20, 5),
|
||||
intArrayOf(20, 5),
|
||||
randSeed
|
||||
)
|
||||
|
||||
// calculate y and add gaussian noise (N(0, 0.05))
|
||||
val y = x dot alpha
|
||||
y += randomNormalLike(y, randSeed) * 0.05
|
||||
y += y.randomNormalLike(randSeed) * 0.05
|
||||
|
||||
// now restore the coefficient vector with OSL estimator with SVD
|
||||
val (u, singValues, v) = svd(x)
|
||||
val (u, singValues, v) = x.svd()
|
||||
|
||||
// we have to make sure the singular values of the matrix are not close to zero
|
||||
println("Singular values:\n$singValues")
|
||||
|
||||
|
||||
// inverse Sigma matrix can be restored from singular values with diagonalEmbedding function
|
||||
val sigma = diagonalEmbedding(singValues.map { if (abs(it) < 1e-3) 0.0 else 1.0 / it })
|
||||
val sigma = diagonalEmbedding(singValues.map{ if (abs(it) < 1e-3) 0.0 else 1.0/it })
|
||||
|
||||
val alphaOLS = v dot sigma dot u.transposed() dot y
|
||||
println(
|
||||
"Estimated alpha:\n" +
|
||||
"$alphaOLS"
|
||||
)
|
||||
val alphaOLS = v dot sigma dot u.transpose() dot y
|
||||
println("Estimated alpha:\n" +
|
||||
"$alphaOLS")
|
||||
|
||||
// figure out MSE of approximation
|
||||
fun mse(yTrue: DoubleTensor, yPred: DoubleTensor): Double {
|
||||
require(yTrue.shape.size == 1)
|
||||
require(yTrue.shape == yPred.shape)
|
||||
require(yTrue.shape contentEquals yPred.shape)
|
||||
|
||||
val diff = yTrue - yPred
|
||||
return sqrt(diff.dot(diff)).value()
|
||||
return diff.dot(diff).sqrt().value()
|
||||
}
|
||||
|
||||
println("MSE: ${mse(alpha, alphaOLS)}")
|
||||
|
@ -1,12 +1,12 @@
|
||||
/*
|
||||
* Copyright 2018-2024 KMath contributors.
|
||||
* Copyright 2018-2021 KMath contributors.
|
||||
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
|
||||
*/
|
||||
|
||||
package space.kscience.kmath.tensors
|
||||
|
||||
import space.kscience.kmath.nd.ShapeND
|
||||
import space.kscience.kmath.tensors.core.*
|
||||
import space.kscience.kmath.tensors.core.tensorAlgebra
|
||||
import space.kscience.kmath.tensors.core.withBroadcast
|
||||
|
||||
|
||||
// simple PCA
|
||||
@ -16,49 +16,49 @@ fun main(): Unit = Double.tensorAlgebra.withBroadcast { // work in context with
|
||||
|
||||
// assume x is range from 0 until 10
|
||||
val x = fromArray(
|
||||
ShapeND(10),
|
||||
intArrayOf(10),
|
||||
DoubleArray(10) { it.toDouble() }
|
||||
)
|
||||
|
||||
// take y dependent on x with noise
|
||||
val y = 2.0 * x + (3.0 + randomNormalLike(x, seed) * 1.5)
|
||||
val y = 2.0 * x + (3.0 + x.randomNormalLike(seed) * 1.5)
|
||||
|
||||
println("x:\n$x")
|
||||
println("y:\n$y")
|
||||
|
||||
// stack them into single dataset
|
||||
val dataset = stack(listOf(x, y)).transposed()
|
||||
val dataset = stack(listOf(x, y)).transpose()
|
||||
|
||||
// normalize both x and y
|
||||
val xMean = mean(x)
|
||||
val yMean = mean(y)
|
||||
val xMean = x.mean()
|
||||
val yMean = y.mean()
|
||||
|
||||
val xStd = std(x)
|
||||
val yStd = std(y)
|
||||
val xStd = x.std()
|
||||
val yStd = y.std()
|
||||
|
||||
val xScaled: DoubleTensor = (x - xMean) / xStd
|
||||
val yScaled: DoubleTensor = (y - yMean) / yStd
|
||||
val xScaled = (x - xMean) / xStd
|
||||
val yScaled = (y - yMean) / yStd
|
||||
|
||||
// save means ans standard deviations for further recovery
|
||||
val mean = fromArray(
|
||||
ShapeND(2),
|
||||
intArrayOf(2),
|
||||
doubleArrayOf(xMean, yMean)
|
||||
)
|
||||
println("Means:\n$mean")
|
||||
|
||||
val std = fromArray(
|
||||
ShapeND(2),
|
||||
intArrayOf(2),
|
||||
doubleArrayOf(xStd, yStd)
|
||||
)
|
||||
println("Standard deviations:\n$std")
|
||||
|
||||
// calculate the covariance matrix of scaled x and y
|
||||
val covMatrix = covariance(listOf(xScaled.asDoubleTensor1D(), yScaled.asDoubleTensor1D()))
|
||||
val covMatrix = cov(listOf(xScaled, yScaled))
|
||||
println("Covariance matrix:\n$covMatrix")
|
||||
|
||||
// and find out eigenvector of it
|
||||
val (_, evecs) = symEig(covMatrix)
|
||||
val v = evecs.getTensor(0)
|
||||
val (_, evecs) = covMatrix.symEig()
|
||||
val v = evecs[0]
|
||||
println("Eigenvector:\n$v")
|
||||
|
||||
// reduce dimension of dataset
|
||||
@ -68,7 +68,7 @@ fun main(): Unit = Double.tensorAlgebra.withBroadcast { // work in context with
|
||||
// we can restore original data from reduced data;
|
||||
// for example, find 7th element of dataset.
|
||||
val n = 7
|
||||
val restored = (datasetReduced.getTensor(n) dot v.view(ShapeND(1, 2))) * std + mean
|
||||
println("Original value:\n${dataset.getTensor(n)}")
|
||||
val restored = (datasetReduced[n] dot v.view(intArrayOf(1, 2))) * std + mean
|
||||
println("Original value:\n${dataset[n]}")
|
||||
println("Restored value:\n$restored")
|
||||
}
|
||||
|
@ -1,12 +1,10 @@
|
||||
/*
|
||||
* Copyright 2018-2024 KMath contributors.
|
||||
* Copyright 2018-2021 KMath contributors.
|
||||
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
|
||||
*/
|
||||
|
||||
package space.kscience.kmath.tensors
|
||||
|
||||
import space.kscience.kmath.nd.ShapeND
|
||||
import space.kscience.kmath.tensors.core.randomNormal
|
||||
import space.kscience.kmath.tensors.core.tensorAlgebra
|
||||
import space.kscience.kmath.tensors.core.withBroadcast
|
||||
|
||||
@ -15,17 +13,17 @@ import space.kscience.kmath.tensors.core.withBroadcast
|
||||
|
||||
fun main() = Double.tensorAlgebra.withBroadcast { // work in context with broadcast methods
|
||||
// take dataset of 5-element vectors from normal distribution
|
||||
val dataset = randomNormal(ShapeND(100, 5)) * 1.5 // all elements from N(0, 1.5)
|
||||
val dataset = randomNormal(intArrayOf(100, 5)) * 1.5 // all elements from N(0, 1.5)
|
||||
|
||||
dataset += fromArray(
|
||||
ShapeND(5),
|
||||
intArrayOf(5),
|
||||
doubleArrayOf(0.0, 1.0, 1.5, 3.0, 5.0) // row means
|
||||
)
|
||||
|
||||
|
||||
// find out mean and standard deviation of each column
|
||||
val mean = mean(dataset, 0, false)
|
||||
val std = std(dataset, 0, false)
|
||||
val mean = dataset.mean(0, false)
|
||||
val std = dataset.std(0, false)
|
||||
|
||||
println("Mean:\n$mean")
|
||||
println("Standard deviation:\n$std")
|
||||
@ -37,8 +35,8 @@ fun main() = Double.tensorAlgebra.withBroadcast { // work in context with broad
|
||||
// now we can scale dataset with mean normalization
|
||||
val datasetScaled = (dataset - mean) / std
|
||||
|
||||
// find out mean and standardDiviation of scaled dataset
|
||||
// find out mean and std of scaled dataset
|
||||
|
||||
println("Mean of scaled:\n${mean(datasetScaled, 0, false)}")
|
||||
println("Mean of scaled:\n${std(datasetScaled, 0, false)}")
|
||||
println("Mean of scaled:\n${datasetScaled.mean(0, false)}")
|
||||
println("Mean of scaled:\n${datasetScaled.std(0, false)}")
|
||||
}
|
@ -1,11 +1,10 @@
|
||||
/*
|
||||
* Copyright 2018-2024 KMath contributors.
|
||||
* Copyright 2018-2021 KMath contributors.
|
||||
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
|
||||
*/
|
||||
|
||||
package space.kscience.kmath.tensors
|
||||
|
||||
import space.kscience.kmath.nd.ShapeND
|
||||
import space.kscience.kmath.tensors.core.DoubleTensor
|
||||
import space.kscience.kmath.tensors.core.tensorAlgebra
|
||||
import space.kscience.kmath.tensors.core.withBroadcast
|
||||
@ -16,13 +15,13 @@ fun main() = Double.tensorAlgebra.withBroadcast {// work in context with linear
|
||||
|
||||
// set true value of x
|
||||
val trueX = fromArray(
|
||||
ShapeND(4),
|
||||
intArrayOf(4),
|
||||
doubleArrayOf(-2.0, 1.5, 6.8, -2.4)
|
||||
)
|
||||
|
||||
// and A matrix
|
||||
val a = fromArray(
|
||||
ShapeND(4, 4),
|
||||
intArrayOf(4, 4),
|
||||
doubleArrayOf(
|
||||
0.5, 10.5, 4.5, 1.0,
|
||||
8.5, 0.9, 12.8, 0.1,
|
||||
@ -41,7 +40,7 @@ fun main() = Double.tensorAlgebra.withBroadcast {// work in context with linear
|
||||
// solve `Ax = b` system using LUP decomposition
|
||||
|
||||
// get P, L, U such that PA = LU
|
||||
val (p, l, u) = lu(a)
|
||||
val (p, l, u) = a.lu()
|
||||
|
||||
// check P is permutation matrix
|
||||
println("P:\n$p")
|
||||
@ -65,9 +64,9 @@ fun main() = Double.tensorAlgebra.withBroadcast {// work in context with linear
|
||||
// this function returns solution x of a system lx = b, l should be lower triangular
|
||||
fun solveLT(l: DoubleTensor, b: DoubleTensor): DoubleTensor {
|
||||
val n = l.shape[0]
|
||||
val x = zeros(ShapeND(n))
|
||||
val x = zeros(intArrayOf(n))
|
||||
for (i in 0 until n) {
|
||||
x[intArrayOf(i)] = (b[intArrayOf(i)] - l.getTensor(i).dot(x).value()) / l[intArrayOf(i, i)]
|
||||
x[intArrayOf(i)] = (b[intArrayOf(i)] - l[i].dot(x).value()) / l[intArrayOf(i, i)]
|
||||
}
|
||||
return x
|
||||
}
|
||||
@ -76,7 +75,7 @@ fun main() = Double.tensorAlgebra.withBroadcast {// work in context with linear
|
||||
|
||||
// solveLT(l, b) function can be easily adapted for upper triangular matrix by the permutation matrix revMat
|
||||
// create it by placing ones on side diagonal
|
||||
val revMat = zeroesLike(u)
|
||||
val revMat = u.zeroesLike()
|
||||
val n = revMat.shape[0]
|
||||
for (i in 0 until n) {
|
||||
revMat[intArrayOf(i, n - 1 - i)] = 1.0
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2018-2024 KMath contributors.
|
||||
* Copyright 2018-2021 KMath contributors.
|
||||
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
|
||||
*/
|
||||
|
||||
@ -7,14 +7,11 @@ package space.kscience.kmath.tensors
|
||||
|
||||
import org.jetbrains.kotlinx.multik.api.Multik
|
||||
import org.jetbrains.kotlinx.multik.api.ndarray
|
||||
import org.jetbrains.kotlinx.multik.default.DefaultEngine
|
||||
import space.kscience.kmath.multik.MultikDoubleAlgebra
|
||||
import space.kscience.kmath.multik.multikAlgebra
|
||||
import space.kscience.kmath.nd.one
|
||||
import space.kscience.kmath.operations.DoubleField
|
||||
|
||||
|
||||
val multikAlgebra = MultikDoubleAlgebra(DefaultEngine())
|
||||
|
||||
fun main(): Unit = with(multikAlgebra) {
|
||||
fun main(): Unit = with(DoubleField.multikAlgebra) {
|
||||
val a = Multik.ndarray(intArrayOf(1, 2, 3)).asType<Double>().wrap()
|
||||
val b = Multik.ndarray(doubleArrayOf(1.0, 2.0, 3.0)).wrap()
|
||||
one(a.shape) - a + b * 3.0
|
||||
|
@ -1,14 +1,15 @@
|
||||
/*
|
||||
* Copyright 2018-2024 KMath contributors.
|
||||
* Copyright 2018-2021 KMath contributors.
|
||||
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
|
||||
*/
|
||||
|
||||
package space.kscience.kmath.tensors
|
||||
|
||||
import space.kscience.kmath.nd.ShapeND
|
||||
import space.kscience.kmath.operations.asIterable
|
||||
import space.kscience.kmath.operations.invoke
|
||||
import space.kscience.kmath.tensors.core.*
|
||||
import space.kscience.kmath.tensors.core.BroadcastDoubleTensorAlgebra
|
||||
import space.kscience.kmath.tensors.core.DoubleTensor
|
||||
import space.kscience.kmath.tensors.core.DoubleTensorAlgebra
|
||||
import space.kscience.kmath.tensors.core.copyArray
|
||||
import kotlin.math.sqrt
|
||||
|
||||
const val seed = 100500L
|
||||
@ -47,7 +48,7 @@ fun reluDer(x: DoubleTensor): DoubleTensor = DoubleTensorAlgebra {
|
||||
class ReLU : Activation(::relu, ::reluDer)
|
||||
|
||||
fun sigmoid(x: DoubleTensor): DoubleTensor = DoubleTensorAlgebra {
|
||||
1.0 / (1.0 + exp((-x)))
|
||||
1.0 / (1.0 + (-x).exp())
|
||||
}
|
||||
|
||||
fun sigmoidDer(x: DoubleTensor): DoubleTensor = DoubleTensorAlgebra {
|
||||
@ -66,22 +67,22 @@ class Dense(
|
||||
|
||||
private val weights: DoubleTensor = DoubleTensorAlgebra {
|
||||
randomNormal(
|
||||
ShapeND(inputUnits, outputUnits),
|
||||
intArrayOf(inputUnits, outputUnits),
|
||||
seed
|
||||
) * sqrt(2.0 / (inputUnits + outputUnits))
|
||||
}
|
||||
|
||||
private val bias: DoubleTensor = DoubleTensorAlgebra { zeros(ShapeND(outputUnits)) }
|
||||
private val bias: DoubleTensor = DoubleTensorAlgebra { zeros(intArrayOf(outputUnits)) }
|
||||
|
||||
override fun forward(input: DoubleTensor): DoubleTensor = BroadcastDoubleTensorAlgebra {
|
||||
(input dot weights) + bias
|
||||
}
|
||||
|
||||
override fun backward(input: DoubleTensor, outputError: DoubleTensor): DoubleTensor = DoubleTensorAlgebra {
|
||||
val gradInput = outputError dot weights.transposed()
|
||||
val gradInput = outputError dot weights.transpose()
|
||||
|
||||
val gradW = input.transposed() dot outputError
|
||||
val gradBias = mean(structureND = outputError, dim = 0, keepDim = false) * input.shape[0].toDouble()
|
||||
val gradW = input.transpose() dot outputError
|
||||
val gradBias = outputError.mean(dim = 0, keepDim = false) * input.shape[0].toDouble()
|
||||
|
||||
weights -= learningRate * gradW
|
||||
bias -= learningRate * gradBias
|
||||
@ -93,7 +94,7 @@ class Dense(
|
||||
|
||||
// simple accuracy equal to the proportion of correct answers
|
||||
fun accuracy(yPred: DoubleTensor, yTrue: DoubleTensor): Double {
|
||||
check(yPred.shape == yTrue.shape)
|
||||
check(yPred.shape contentEquals yTrue.shape)
|
||||
val n = yPred.shape[0]
|
||||
var correctCnt = 0
|
||||
for (i in 0 until n) {
|
||||
@ -105,16 +106,17 @@ fun accuracy(yPred: DoubleTensor, yTrue: DoubleTensor): Double {
|
||||
}
|
||||
|
||||
// neural network class
|
||||
@OptIn(ExperimentalStdlibApi::class)
|
||||
class NeuralNetwork(private val layers: List<Layer>) {
|
||||
private fun softMaxLoss(yPred: DoubleTensor, yTrue: DoubleTensor): DoubleTensor = BroadcastDoubleTensorAlgebra {
|
||||
|
||||
val onesForAnswers = zeroesLike(yPred)
|
||||
yTrue.source.asIterable().forEachIndexed { index, labelDouble ->
|
||||
val onesForAnswers = yPred.zeroesLike()
|
||||
yTrue.copyArray().forEachIndexed { index, labelDouble ->
|
||||
val label = labelDouble.toInt()
|
||||
onesForAnswers[intArrayOf(index, label)] = 1.0
|
||||
}
|
||||
|
||||
val softmaxValue = exp(yPred) / exp(yPred).sum(dim = 1, keepDim = true)
|
||||
val softmaxValue = yPred.exp() / yPred.exp().sum(dim = 1, keepDim = true)
|
||||
|
||||
(-onesForAnswers + softmaxValue) / (yPred.shape[0].toDouble())
|
||||
}
|
||||
@ -161,7 +163,7 @@ class NeuralNetwork(private val layers: List<Layer>) {
|
||||
for ((xBatch, yBatch) in iterBatch(xTrain, yTrain)) {
|
||||
train(xBatch, yBatch)
|
||||
}
|
||||
println("Accuracy:${accuracy(yTrain, predict(xTrain).argMax(1, true).toDoubleTensor())}")
|
||||
println("Accuracy:${accuracy(yTrain, predict(xTrain).argMax(1, true).asDouble())}")
|
||||
}
|
||||
}
|
||||
|
||||
@ -172,6 +174,7 @@ class NeuralNetwork(private val layers: List<Layer>) {
|
||||
}
|
||||
|
||||
|
||||
@OptIn(ExperimentalStdlibApi::class)
|
||||
fun main() = BroadcastDoubleTensorAlgebra {
|
||||
val features = 5
|
||||
val sampleSize = 250
|
||||
@ -179,19 +182,19 @@ fun main() = BroadcastDoubleTensorAlgebra {
|
||||
//val testSize = sampleSize - trainSize
|
||||
|
||||
// take sample of features from normal distribution
|
||||
val x = randomNormal(ShapeND(sampleSize, features), seed) * 2.5
|
||||
val x = randomNormal(intArrayOf(sampleSize, features), seed) * 2.5
|
||||
|
||||
x += fromArray(
|
||||
ShapeND(5),
|
||||
intArrayOf(5),
|
||||
doubleArrayOf(0.0, -1.0, -2.5, -3.0, 5.5) // row means
|
||||
)
|
||||
|
||||
|
||||
// define class like '1' if the sum of features > 0 and '0' otherwise
|
||||
val y = fromArray(
|
||||
ShapeND(sampleSize, 1),
|
||||
intArrayOf(sampleSize, 1),
|
||||
DoubleArray(sampleSize) { i ->
|
||||
if (x.getTensor(i).sum() > 0.0) {
|
||||
if (x[i].sum() > 0.0) {
|
||||
1.0
|
||||
} else {
|
||||
0.0
|
||||
@ -227,7 +230,7 @@ fun main() = BroadcastDoubleTensorAlgebra {
|
||||
val prediction = model.predict(xTest)
|
||||
|
||||
// process raw prediction via argMax
|
||||
val predictionLabels = prediction.argMax(1, true).toDoubleTensor()
|
||||
val predictionLabels = prediction.argMax(1, true).asDouble()
|
||||
|
||||
// find out accuracy
|
||||
val acc = accuracy(yTest, predictionLabels)
|
||||
|