diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 00000000..e37c4bd4 --- /dev/null +++ b/.gitattributes @@ -0,0 +1,7 @@ +# ----------------------------------------------------------------------------- +# LFS Settings +# ----------------------------------------------------------------------------- + +# LFS images +*.png filter=lfs diff=lfs merge=lfs -text +*.ico filter=lfs diff=lfs merge=lfs -text diff --git a/.github/workflows/CompatHelper.yml b/.github/workflows/CompatHelper.yml index 2876f03f..bcdb51ab 100644 --- a/.github/workflows/CompatHelper.yml +++ b/.github/workflows/CompatHelper.yml @@ -13,4 +13,4 @@ jobs: env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} COMPATHELPER_PRIV: ${{ secrets.COMPATHELPER_PRIV }} # optional - run: julia -e 'using CompatHelper; CompatHelper.main()' \ No newline at end of file + run: julia -e 'using CompatHelper; CompatHelper.main()' diff --git a/.github/workflows/Documentation.yml b/.github/workflows/Documentation.yml index 2aa1b278..95cd9121 100644 --- a/.github/workflows/Documentation.yml +++ b/.github/workflows/Documentation.yml @@ -20,9 +20,11 @@ jobs: access_token: ${{ github.token }} - uses: actions/checkout@v2 + with: + lfs: 'true' - uses: julia-actions/setup-julia@latest with: - version: '1.6' + version: '1.8' - name: Install dependencies run: julia --project=docs/ -e 'using Pkg; Pkg.develop(PackageSpec(path=pwd())); Pkg.instantiate()' - name: Build and deploy @@ -30,4 +32,4 @@ jobs: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} # For authentication with GitHub Actions token DATADEPS_ALWAYS_ACCEPT: true # DOCUMENTER_KEY: ${{ secrets.DOCUMENTER_KEY }} # For authentication with SSH deploy key - run: julia --project=docs/ docs/make.jl \ No newline at end of file + run: julia --project=docs/ docs/make.jl diff --git a/.gitignore b/.gitignore index a821d872..d7e55fab 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,13 @@ +# IDE ignores +.vscode/ + +# Project ignores +_dev/ + +# DemoCards intermediate compilation directories +docs/src/democards/ +docs/src/examples/ + # Julia package development ignores *.jl.*.cov *.jl.cov @@ -8,10 +18,3 @@ /docs/Manifest.toml /docs/build/ /docs/site/ - -# Project ignores -data/mnist/ -!data/.gitkeep - -# IDE ignores -.vscode/ \ No newline at end of file diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 00000000..0fe92977 --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,10 @@ +# Contributing to the Project + +The `AdaptiveResonance.jl` project is open sourced and open to contribution! +If you would like to contribute to the project, you should know: + +1. The [README](README.md) contains a quick summary of the contributing guidelines. +2. The [code of conduct](CODE_OF_CONDUCT.md) declares the behavior guidelines for contributors, mainly concerning respect and decorum. +3. The [contributing guide](https://ap6yc.github.io/AdaptiveResonance.jl/dev/man/contributing/) in the official documentation provides an in-depth guide to contributing, such as how to do GitFlow, how to develop in Julia, and how the internals of the package work. + +In summary, the main point of contact is the [GitHub issues](https://github.com/AP6YC/AdaptiveResonance.jl/issues), and code development follows the GitFlow paradigm (i.e., create a `feature/...` branch off of the latest `develop` and submit a pull request back into `develop`). diff --git a/Project.toml b/Project.toml index 5acf3d59..ebb7a7e6 100644 --- a/Project.toml +++ b/Project.toml @@ -2,21 +2,22 @@ name = "AdaptiveResonance" uuid = "3d72adc0-63d3-4141-bf9b-84450dd0395b" authors = ["Sasha Petrenko"] description = "A Julia package for Adaptive Resonance Theory (ART) algorithms." -version = "0.5.1" +version = "0.6.0" [deps] Distributed = "8ba89e20-285c-5b6f-9357-94700520ee1b" DocStringExtensions = "ffbed154-4ef7-542d-bbb7-c09d3a79fcae" LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" Logging = "56ddb016-857b-54e1-b83d-db4d58db5568" +NumericalTypeAliases = "be9b823e-291e-41a1-b8ce-806204e78f92" Parameters = "d96e819e-fc66-5662-9728-84c9c7592b0a" -Printf = "de0858da-6303-5e67-8744-51eddeeeb8d7" ProgressBars = "49802e3a-d2f1-5c88-81d8-b72133a6f568" SharedArrays = "1a1011a3-84de-559e-8e89-a11a2f7dc383" Statistics = "10745b16-79ce-11e8-11f9-7d13ad32a3b2" [compat] DocStringExtensions = "0.8, 0.9" +NumericalTypeAliases = "0.1, 0.2" Parameters = "0.12" ProgressBars = "0.7, 0.8, 1" julia = "1" diff --git a/README.md b/README.md index bcc0b88f..e171b794 100644 --- a/README.md +++ b/README.md @@ -1,7 +1,6 @@ -# AdaptiveResonance +[![adaptiveresonance-header](docs/src/assets/header.png)][docs-dev-url] A Julia package for Adaptive Resonance Theory (ART) algorithms. -d | **Documentation** | **Testing Status** | **Coverage** | **Reference** | |:------------------:|:----------------:|:------------:|:-------------:| @@ -53,20 +52,19 @@ Please read the [documentation](https://ap6yc.github.io/AdaptiveResonance.jl/dev ## Contents -- [AdaptiveResonance](#adaptiveresonance) - - [Contents](#contents) - - [Overview](#overview) - - [Contributing](#contributing) - - [Installation](#installation) - - [Quickstart](#quickstart) - - [Implemented Modules](#implemented-modules) - - [Structure](#structure) - - [History](#history) - - [Acknowledgements](#acknowledgements) - - [Authors](#authors) - - [Software](#software) - - [Datasets](#datasets) - - [License](#license) +- [Contents](#contents) +- [Overview](#overview) +- [Contributing](#contributing) +- [Installation](#installation) +- [Quickstart](#quickstart) +- [Implemented Modules](#implemented-modules) +- [Structure](#structure) +- [History](#history) +- [Acknowledgements](#acknowledgements) + - [Authors](#authors) + - [Software](#software) + - [Datasets](#datasets) +- [License](#license) ## Overview @@ -96,21 +94,23 @@ Patch versions are for bug fixes, minor versions are for backward-compatible cha This project is distributed as a Julia package, available on [JuliaHub](https://juliahub.com/). Its usage follows the usual Julia package installation procedure, interactively: -```julia -] add AdaptiveResonance +```julia-repl +julia> ] +(@v1.8) pkg> add AdaptiveResonance ``` or programmatically: -```julia -using Pkg -Pkg.add("AdaptiveResonance") +```julia-repl +julia> using Pkg +julia> Pkg.add("AdaptiveResonance") ``` You may also add the package directly from GitHub to get the latest changes between releases: -```julia -] add https://github.com/AP6YC/AdaptiveResonance.jl +```julia-repl +julia> ] +(@v1.8) pkg> add https://github.com/AP6YC/AdaptiveResonance.jl ``` ## Quickstart @@ -209,16 +209,19 @@ The following file tree summarizes the project structure: ```console AdaptiveResonance ├── .github/workflows // GitHub: workflows for testing and documentation. -├── data // Data: CI data location. ├── docs // Docs: documentation for the module. -│ ├─── examples // DemoCards documentation examples. -│ └─── src // Documentation source files. +│ ├─── examples // DemoCards documentation examples. +│ └─── src // Documentation source files. +├── paper // JOSS: journal paper and citations. ├── src // Source: majority of source code. -│ ├─── ART // ART-based unsupervised modules. -│ └─── ARTMAP // ARTMAP-based supervised modules. +│ ├─── ART // ART-based unsupervised modules. +│ └─── ARTMAP // ARTMAP-based supervised modules. ├── test // Test: Unit, integration, and environment tests. ├── .appveyor // Appveyor: Windows-specific coverage. +├── .gitattributes // Git: LFS settings, languages, etc. ├── .gitignore // Git: .gitignore for the whole project. +├── CODE_OF_CONDUCT.md // Doc: the code of conduct for contributors. +├── CONTRIBUTING.md // Doc: contributing guide (points to this page). ├── LICENSE // Doc: the license to the project. ├── Project.toml // Julia: the Pkg.jl dependencies of the project. └── README.md // Doc: this document. diff --git a/data/.gitkeep b/data/.gitkeep deleted file mode 100644 index e69de29b..00000000 diff --git a/docs/Project.toml b/docs/Project.toml index b9c7dc28..27b68025 100644 --- a/docs/Project.toml +++ b/docs/Project.toml @@ -1,5 +1,6 @@ [deps] AdaptiveResonance = "3d72adc0-63d3-4141-bf9b-84450dd0395b" +DataFrames = "a93c6f00-e57d-5684-b7b6-d8193f3e46c0" DemoCards = "311a05b2-6137-4a5a-b473-18580a3d38b5" Distributions = "31c24e10-a181-5473-b8eb-7969acd0382f" Documenter = "e30172f5-a6a5-5a46-863b-614d45cd2de4" @@ -10,4 +11,4 @@ MultivariateStats = "6f286f6a-111f-5878-ab1e-185364afe411" Plots = "91a5bcdd-55d7-5caf-9e0b-520d859cae80" [compat] -MLDatasets = "0.6" +MLDatasets = "0.7" diff --git a/docs/combo.jl b/docs/combo.jl deleted file mode 100644 index 0df0e931..00000000 --- a/docs/combo.jl +++ /dev/null @@ -1,12 +0,0 @@ -""" - combo.jl - -This is a convenience script for docs development that makes and live serves the docs locally. -""" - - -# Make the documentation -include("make.jl") - -# Host the documentation locally -include("serve.jl") diff --git a/docs/examples/adaptive_resonance/data_config.jl b/docs/examples/adaptive_resonance/data_config.jl index ccada8fc..b281a505 100644 --- a/docs/examples/adaptive_resonance/data_config.jl +++ b/docs/examples/adaptive_resonance/data_config.jl @@ -4,7 +4,7 @@ # cover: ../assets/art.png # date: 2021-12-2 # author: "[Sasha Petrenko](https://github.com/AP6YC)" -# julia: 1.6 +# julia: 1.8 # description: This demo illustrates how the data configuration object works for data preprocessing in ART modules that require it. # --- @@ -46,16 +46,17 @@ fieldnames(AdaptiveResonance.DataConfig) ## Load data using MLDatasets # Iris dataset +using DataFrames # DataFrames, necessary for MLDatasets.Iris() using MLDataUtils # Shuffling and splitting -## We will download the Iris dataset for its small size and benchmark use for clustering algorithms. -## Get the iris dataset as a DataFrame -iris = Iris() +# We will download the Iris dataset for its small size and benchmark use for clustering algorithms. +## Get the iris dataset +iris = Iris(as_df=false) ## Manipulate the features and labels into a matrix of features and a vector of labels -features, labels = Matrix(iris.features)', vec(Matrix{String}(iris.targets)) +features, labels = iris.features, iris.targets # Because the MLDatasets package gives us Iris labels as strings, we will use the `MLDataUtils.convertlabel` method with the `MLLabelUtils.LabelEnc.Indices` type to get a list of integers representing each class: -labels = convertlabel(LabelEnc.Indices{Int}, labels) +labels = convertlabel(LabelEnc.Indices{Int}, vec(labels)) unique(labels) # !!! note diff --git a/docs/examples/adaptive_resonance/incremental-batch.jl b/docs/examples/adaptive_resonance/incremental-batch.jl index 364ff4e2..e84dba66 100644 --- a/docs/examples/adaptive_resonance/incremental-batch.jl +++ b/docs/examples/adaptive_resonance/incremental-batch.jl @@ -4,7 +4,7 @@ # cover: assets/incremental-batch-cover.png # date: 2021-12-1 # author: "[Sasha Petrenko](https://github.com/AP6YC)" -# julia: 1.6 +# julia: 1.8 # description: This demo illustrates how to use incremental training methods vs. batch training for all ART modules. # --- @@ -23,17 +23,18 @@ # We begin with importing AdaptiveResonance for the ART modules and MLDatasets for some data utilities. using AdaptiveResonance # ART using MLDatasets # Iris dataset +using DataFrames # DataFrames, necessary for MLDatasets.Iris() using MLDataUtils # Shuffling and splitting using Printf # Formatted number printing # We will download the Iris dataset for its small size and benchmark use for clustering algorithms. -## Get the iris dataset as a DataFrame -iris = Iris() +## Get the iris dataset +iris = Iris(as_df=false) ## Manipulate the features and labels into a matrix of features and a vector of labels -features, labels = Matrix(iris.features)', vec(Matrix{String}(iris.targets)) +features, labels = iris.features, iris.targets # Because the MLDatasets package gives us Iris labels as strings, we will use the `MLDataUtils.convertlabel` method with the `MLLabelUtils.LabelEnc.Indices` type to get a list of integers representing each class: -labels = convertlabel(LabelEnc.Indices{Int}, labels) +labels = convertlabel(LabelEnc.Indices{Int}, vec(labels)) unique(labels) # Next, we will create a train/test split with the `MLDataUtils.stratifiedobs` utility: @@ -73,7 +74,7 @@ n_train = length(y_train) ## Create a container for the training output labels y_hat_incremental_train = zeros(Int, n_train) ## Iterate over all training samples -for ix = 1:length(y_train) +for ix in eachindex(y_train) sample = X_train[:, ix] label = y_train[ix] y_hat_incremental_train[ix] = train!(art_incremental, sample, y=label) @@ -128,12 +129,13 @@ perf_test_incremental = performance(y_hat_incremental, y_test) using Printf # Formatted number printing using MultivariateStats # Principal component analysis (PCA) using Plots # Plotting frontend +gr() # Use the default GR backend explicitly ## Train a PCA model M = fit(PCA, features; maxoutdim=2) ## Apply the PCA model to the testing set -X_test_pca = transform(M, X_test) +X_test_pca = MultivariateStats.transform(M, X_test) # Now that we have the test points cast into a 2-D set of points, we can create a scatter plot that shows how each point is categorized by the modules. diff --git a/docs/examples/adaptive_resonance/options.jl b/docs/examples/adaptive_resonance/options.jl index 3083cc20..8b4f98c5 100644 --- a/docs/examples/adaptive_resonance/options.jl +++ b/docs/examples/adaptive_resonance/options.jl @@ -4,7 +4,7 @@ # cover: assets/options-cover.png # date: 2021-12-2 # author: "[Sasha Petrenko](https://github.com/AP6YC)" -# julia: 1.6 +# julia: 1.8 # description: This demo illustrates how to use options and modify the options for all ART and ARTMAP modules. # --- @@ -88,19 +88,21 @@ my_fuzzyart.opts.rho=0.6 # We begin with importing AdaptiveResonance for the ART modules and MLDatasets for some data utilities. using MLDatasets # Iris dataset +using DataFrames # DataFrames, necessary for MLDatasets.Iris() using MLDataUtils # Shuffling and splitting using Printf # Formatted number printing using MultivariateStats # Principal component analysis (PCA) using Plots # Plotting frontend +gr() # Use the default GR backend explicitly # We will download the Iris dataset for its small size and benchmark use for clustering algorithms. -## Get the iris dataset as a DataFrame -iris = Iris() +## Get the iris dataset +iris = Iris(as_df=false) ## Manipulate the features and labels into a matrix of features and a vector of labels -features, labels = Matrix(iris.features)', vec(Matrix{String}(iris.targets)) +features, labels = iris.features, iris.targets # Because the MLDatasets package gives us Iris labels as strings, we will use the `MLDataUtils.convertlabel` method with the `MLLabelUtils.LabelEnc.Indices` type to get a list of integers representing each class: -labels = convertlabel(LabelEnc.Indices{Int}, labels) +labels = convertlabel(LabelEnc.Indices{Int}, vec(labels)) unique(labels) # Next, we will create a train/test split with the `MLDataUtils.stratifiedobs` utility: @@ -152,7 +154,7 @@ perf_test_2 = performance(y_hat_2, y_test) M = fit(PCA, features; maxoutdim=2) ## Apply the PCA model to the testing set -X_test_pca = transform(M, X_test) +X_test_pca = MultivariateStats.transform(M, X_test) # We can now plot the PCA'ed test set and label them according to the two FuzzyART's # We will do so by creating a function for the subplots first as they will share the same format, and we dare not duplicate code. diff --git a/docs/examples/art/ddvfa_supervised.jl b/docs/examples/art/ddvfa_supervised.jl index c26e11c6..eb2473fb 100644 --- a/docs/examples/art/ddvfa_supervised.jl +++ b/docs/examples/art/ddvfa_supervised.jl @@ -4,7 +4,7 @@ # cover: ../assets/ddvfa.png # date: 2021-11-30 # author: "[Sasha Petrenko](https://github.com/AP6YC)" -# julia: 1.6 +# julia: 1.8 # description: This demo shows how to use DDVFA for simple supervised learning by clustering Iris samples and mapping the modules internal categories to the true labels. # --- @@ -14,17 +14,18 @@ # We begin with importing AdaptiveResonance for the ART modules and MLDatasets for some data utilities. using AdaptiveResonance # ART using MLDatasets # Iris dataset +using DataFrames # DataFrames, necessary for MLDatasets.Iris() using MLDataUtils # Shuffling and splitting using Printf # Formatted number printing # We will download the Iris dataset for its small size and benchmark use for clustering algorithms. -## Get the iris dataset as a DataFrame -iris = Iris() +## Get the iris dataset +iris = Iris(as_df=false) ## Manipulate the features and labels into a matrix of features and a vector of labels -features, labels = Matrix(iris.features)', vec(Matrix{String}(iris.targets)) +features, labels = iris.features, iris.targets # Because the MLDatasets package gives us Iris labels as strings, we will use the `MLDataUtils.convertlabel` method with the `MLLabelUtils.LabelEnc.Indices` type to get a list of integers representing each class: -labels = convertlabel(LabelEnc.Indices{Int}, labels) +labels = convertlabel(LabelEnc.Indices{Int}, vec(labels)) unique(labels) # Next, we will create a train/test split with the `MLDataUtils.stratifiedobs` utility: diff --git a/docs/examples/art/ddvfa_unsupervised.jl b/docs/examples/art/ddvfa_unsupervised.jl index 1f6b203a..ba43d773 100644 --- a/docs/examples/art/ddvfa_unsupervised.jl +++ b/docs/examples/art/ddvfa_unsupervised.jl @@ -4,7 +4,7 @@ # cover: ../assets/ddvfa.png # date: 2021-11-30 # author: "[Sasha Petrenko](https://github.com/AP6YC)" -# julia: 1.6 +# julia: 1.8 # description: This demo shows how to use DDVFA for unsupervised learning by clustering Iris samples. # --- @@ -13,13 +13,14 @@ # We begin with importing AdaptiveResonance for the ART modules and MLDatasets for loading some data. using AdaptiveResonance # ART using MLDatasets # Iris dataset +using DataFrames # DataFrames, necessary for MLDatasets.Iris() using MLDataUtils # Shuffling and splitting # We will download the Iris dataset for its small size and benchmark use for clustering algorithms. -## Get the iris dataset as a DataFrame -iris = Iris() -## Manipulate the features and labels into a matrix of features and a vector of labels -features, labels = Matrix(iris.features)', vec(Matrix{String}(iris.targets)) +## Get the iris dataset +iris = Iris(as_df=false) +## Extract the features into a local variable +features = iris.features # Next, we will instantiate a DDVFA module. # We could create an options struct for reuse with `opts=opts_DDVFA(...)`, but for now we will use the direct keyword arguments approach. diff --git a/docs/examples/artmap/sfam_iris.jl b/docs/examples/artmap/sfam_iris.jl index 6a3a066d..41ef9478 100644 --- a/docs/examples/artmap/sfam_iris.jl +++ b/docs/examples/artmap/sfam_iris.jl @@ -4,7 +4,7 @@ # cover: ../assets/artmap.png # date: 2021-11-30 # author: "[Sasha Petrenko](https://github.com/AP6YC)" -# julia: 1.6 +# julia: 1.8 # description: This demo shows how to use a Simplified FuzzyARTMAP (SFAM) module to conduct supervised learning on the Iris dataset. # --- diff --git a/docs/examples/assets/art.png b/docs/examples/assets/art.png index 47728937..fb8f4ec6 100644 Binary files a/docs/examples/assets/art.png and b/docs/examples/assets/art.png differ diff --git a/docs/examples/assets/artmap.png b/docs/examples/assets/artmap.png index ef2c26a5..71051c03 100644 Binary files a/docs/examples/assets/artmap.png and b/docs/examples/assets/artmap.png differ diff --git a/docs/examples/assets/ddvfa.png b/docs/examples/assets/ddvfa.png index f4f1b010..d4db818d 100644 Binary files a/docs/examples/assets/ddvfa.png and b/docs/examples/assets/ddvfa.png differ diff --git a/docs/make.jl b/docs/make.jl index f37b95b4..ba2221ca 100644 --- a/docs/make.jl +++ b/docs/make.jl @@ -6,10 +6,28 @@ using Documenter.jl and other tools. """ using Documenter -using AdaptiveResonance using DemoCards -# using JSON +# Get the current workind directory's base name +current_dir = basename(pwd()) +@info "Current directory is $(current_dir)" + +# If using the CI method `julia --project=docs/ docs/make.jl` +# or `julia --startup-file=no --project=docs/ docs/make.jl` +if occursin("AdaptiveResonance", current_dir) + push!(LOAD_PATH, "../src/") +# Otherwise, we are already in the docs project and need to dev the above package +elseif occursin("docs", current_dir) + Pkg.develop(path="..") +# Otherwise, building docs from the wrong path +else + error("Unrecognized docs setup path") +end + +# Inlude the local package +using AdaptiveResonance + +# using JSON if haskey(ENV, "DOCSARGS") for arg in split(ENV["DOCSARGS"]) (arg in ARGS) || push!(ARGS, arg) @@ -48,6 +66,7 @@ makedocs( "Modules" => "man/modules.md", "Contributing" => "man/contributing.md", "Index" => "man/full-index.md", + "Internals" => "man/dev-index.md", ], ], repo="https://github.com/AP6YC/AdaptiveResonance.jl/blob/{commit}{path}#L{line}", diff --git a/docs/serve.jl b/docs/serve.jl index 6e893a0c..f7680767 100644 --- a/docs/serve.jl +++ b/docs/serve.jl @@ -6,5 +6,8 @@ Convenience script that serves the locally built documentation. using LiveServer +# Make the documentation +include("make.jl") + # Serve the documentation for development serve(dir="build") diff --git a/docs/src/assets/favicon.ico b/docs/src/assets/favicon.ico index c20690b4..c66f7fd4 100644 Binary files a/docs/src/assets/favicon.ico and b/docs/src/assets/favicon.ico differ diff --git a/docs/src/assets/figures/art.png b/docs/src/assets/figures/art.png index 47728937..fb8f4ec6 100644 Binary files a/docs/src/assets/figures/art.png and b/docs/src/assets/figures/art.png differ diff --git a/docs/src/assets/header.png b/docs/src/assets/header.png index 7ea7792c..e87c127c 100644 Binary files a/docs/src/assets/header.png and b/docs/src/assets/header.png differ diff --git a/docs/src/assets/logo.png b/docs/src/assets/logo.png index 24efd77f..2424c0d7 100644 Binary files a/docs/src/assets/logo.png and b/docs/src/assets/logo.png differ diff --git a/docs/src/index.md b/docs/src/index.md index 3baf5955..6567678d 100644 --- a/docs/src/index.md +++ b/docs/src/index.md @@ -24,11 +24,13 @@ Pages = [ "man/modules.md", "man/contributing.md", "man/full-index.md", + "man/dev-index.md", ] Depth = 1 ``` The [Package Guide](@ref) provides a tutorial to the full usage of the package, while [Examples](@ref examples) gives sample workflows using a variety of ART modules. -A list of the implemented ART modules is included in [Modules](@ref), where different options are also listed for creating variants of these modules that exist in the literature. +A list of the implemented ART modules is included in [Modules](@ref modules-page), where different options are also listed for creating variants of these modules that exist in the literature. Instructions on how to contribute to the package are found in [Contributing](@ref), and docstrings for every element of the package is listed in the [Index](@ref main-index). +Names internal to the package are also listed under the [Developer Index](@ref dev-main-index). diff --git a/docs/src/man/contributing.md b/docs/src/man/contributing.md index 26b0198e..f8768610 100644 --- a/docs/src/man/contributing.md +++ b/docs/src/man/contributing.md @@ -35,16 +35,16 @@ julia --project=. The above immediately activates the project when starting up Julia, but you may also separately startup the julia and activate the package with the interactive package manager via the `]` syntax: - ```julia + ```julia-repl julia julia> ] - (@v1.6) pkg> activate . + (@v1.8) pkg> activate . (AdaptiveResonance) pkg> ``` You may run the package's unit tests after the above setup in Julia with -```julia +```julia-repl julia> using Pkg julia> Pkg.instantiate() julia> Pkg.test() @@ -52,7 +52,7 @@ julia> Pkg.test() or interactively though the Julia package manager with -```julia +```julia-repl julia> ] (AdaptiveResonance) pkg> instantiate (AdaptiveResonance) pkg> test @@ -94,7 +94,10 @@ AdaptiveResonance │ └───ARTMAP // ARTMAP-based supervised modules. ├── test // Test: Unit, integration, and environment tests. ├── .appveyor // Appveyor: Windows-specific coverage. +├── .gitattributes // Git: LFS settings, languages, etc. ├── .gitignore // Git: .gitignore for the whole project. +├── CODE_OF_CONDUCT.md // Doc: the code of conduct for contributors. +├── CONTRIBUTING.md // Doc: contributing guide (points to this page). ├── LICENSE // Doc: the license to the project. ├── Project.toml // Julia: the Pkg.jl dependencies of the project. └── README.md // Doc: this document. diff --git a/docs/src/man/dev-index.md b/docs/src/man/dev-index.md new file mode 100644 index 00000000..570e57f3 --- /dev/null +++ b/docs/src/man/dev-index.md @@ -0,0 +1,43 @@ +# [Developer Index](@id dev-main-index) + +This page lists the types and functions that are internal to the `AdaptiveResonance.jl` package. +Because they are not part of the public API, these names might change relatively frequently between versions and so should not be relied upon. + +All internal names are listed in the [Index](@ref dev-index-methods), and each of these entries link to the docstrings in the [Docs](@ref dev-index-docs) section. + +## Index + +This section contains a list of internal names that link to their corresponding [Documentation](@ref dev-index-docs). + +### [Methods](@id dev-index-methods) + +```@index +Pages = ["dev-index.md"] +Modules = [AdaptiveResonance] +Order = [:function] +``` + +### [Types](@id dev-index-types) + +```@index +Pages = ["dev-index.md"] +Modules = [AdaptiveResonance] +Order = [:type] +``` + +### [Constants](@id dev-index-types) + +```@index +Pages = ["dev-index.md"] +Modules = [AdaptiveResonance] +Order = [:constant] +``` + +### [Docs](@id dev-index-docs) + +Documentation for all internal names are listed below. + +```@autodocs +Modules = [AdaptiveResonance] +Public = false +``` diff --git a/docs/src/man/full-index.md b/docs/src/man/full-index.md index c4a6a43f..6f001bea 100644 --- a/docs/src/man/full-index.md +++ b/docs/src/man/full-index.md @@ -1,55 +1,80 @@ # [Index](@id main-index) This page lists the core methods and types of the `AdaptiveResonance.jl` package. +The [Modules](@ref index-modules) section lists the modules exported by the package including the `AdaptiveResonance` module itself. The [Methods](@ref index-methods) section lists the public methods for the package that use the modules in [Types](@ref index-types). Each of these entries link to the docstrings in the [Docs](@ref index-docs) section. ART modules document their internal working parameters and references, while their hyperparameters/options are documented under their corresponding option structs `opts_...`. -## [Methods](@id index-methods) +## Index + +This section enumerates the names exported by the package, each of which links to its corresponding [Documentation](@ref index-docs). + +### [Modules](@id index-modules) + +```@index +Pages = ["full-index.md"] +Modules = [AdaptiveResonance] +Order = [:module] +``` + +### [Methods](@id index-methods) ```@index +Pages = ["full-index.md"] Modules = [AdaptiveResonance] Order = [:function] -Public = true ``` -## [Types](@id index-types) +### [Types](@id index-types) ```@index +Pages = ["full-index.md"] Modules = [AdaptiveResonance] Order = [:type] -Public = true +``` + +### [Constants](@id index-constants) + +```@index +Pages = ["full-index.md"] +Modules = [AdaptiveResonance] +Order = [:constant] ``` ## [Docs](@id index-docs) -```@docs -AdaptiveResonance -train! -AdaptiveResonance.classify -data_setup! -performance -complement_code -get_data_characteristics -linear_normalization -get_data_shape -get_n_samples -DDVFA -DVFA -FuzzyART -DAM -FAM -SFAM -opts_DDVFA -opts_DVFA -opts_FuzzyART -opts_DAM -opts_FAM -opts_SFAM -DataConfig -ARTModule -ART -ARTMAP -ARTOpts +This section lists the documentation for every exported name of the `AdaptiveResonance.jl` package. + +### [Modules](@id index-modules-docs) + +```@autodocs +Modules = [AdaptiveResonance] +Private = false +Order = [:module] +``` + +### [Functions](@id index-functions-docs) + +```@autodocs +Modules = [AdaptiveResonance] +Private = false +Order = [:function] +``` + +### [Types](@id index-types-docs) + +```@autodocs +Modules = [AdaptiveResonance] +Private = false +Order = [:type] +``` + +### [Constants](@id index-constants-docs) + +```@autodocs +Modules = [AdaptiveResonance] +Private = false +Order = [:constant] ``` diff --git a/docs/src/man/modules.md b/docs/src/man/modules.md index a6349d47..b1dca73b 100644 --- a/docs/src/man/modules.md +++ b/docs/src/man/modules.md @@ -1,4 +1,4 @@ -# Modules +# [Modules](@id modules-page) This project implements a number of ART-based models with options that modulate their behavior (see the [options section of the Guide](@ref art_options)) diff --git a/src/ART/ART.jl b/src/ART/ART.jl index 6d0b37f6..4bdfb803 100644 --- a/src/ART/ART.jl +++ b/src/ART/ART.jl @@ -9,3 +9,12 @@ include("common.jl") # train!, classify include("FuzzyART.jl") # FuzzyART include("DDVFA.jl") # DDVFA include("DVFA.jl") # DVFA + +""" +A list of (default) unsupervised ART modules that are available in the `AdaptiveResonance.jl` package. +""" +const ART_MODULES = [ + FuzzyART, + DVFA, + DDVFA, +] \ No newline at end of file diff --git a/src/ART/DDVFA.jl b/src/ART/DDVFA.jl index 2f7634ae..322fc718 100644 --- a/src/ART/DDVFA.jl +++ b/src/ART/DDVFA.jl @@ -1,10 +1,10 @@ """ DDVFA.jl -Description: - Includes all of the structures and logic for running a Distributed Dual-Vigilance Fuzzy ART (DDVFA) module. +# Description: +Includes all of the structures and logic for running a Distributed Dual-Vigilance Fuzzy ART (DDVFA) module. -References +# References [1] L. E. Brito da Silva, I. Elnabarawy, and D. C. Wunsch, “Distributed dual vigilance fuzzy adaptive resonance theory learns online, retrieves arbitrarily-shaped clusters, and mitigates order dependence,” Neural Networks, vol. 121, pp. 208-228, 2020, doi: 10.1016/j.neunet.2019.08.033. [2] G. Carpenter, S. Grossberg, and D. Rosen, "Fuzzy ART: Fast stable learning and categorization of analog patterns by an adaptive resonance system," Neural Networks, vol. 4, no. 6, pp. 759-771, 1991. """ @@ -14,101 +14,139 @@ References # --------------------------------------------------------------------------- # """ - opts_DDVFA(;kwargs) - Distributed Dual Vigilance Fuzzy ART options struct. -# Keyword Arguments -- `rho_lb::Float`: lower-bound vigilance value, [0, 1], default 0.7. -- `rho_ub::Float`: upper-bound vigilance value, [0, 1], default 0.85. -- `alpha::Float`: choice parameter, alpha > 0, default 1e-3. -- `beta::Float`: learning parameter, (0, 1], default 1.0. -- `gamma::Float`: "pseudo" kernel width, gamma >= 1, default 3.0. -- `gamma_ref::Float`: "reference" kernel width, 0 <= gamma_ref < gamma, default 1.0. -- `method::String`: similarity method (activation and match): -`single`, `average`, `complete`, `median`, `weighted`, or `centroid`, default `single`. -- `display::Bool`: display flag, default true. -- `max_epoch::Int`: maximum number of epochs during training, default 1. -- `gamma_normalization::Bool`: normalize the threshold by the feature dimension, default true. +$(opts_docstring) """ @with_kw mutable struct opts_DDVFA <: ARTOpts @deftype Float - # Lower-bound vigilance parameter: [0, 1] + """ + Lower-bound vigilance parameter: rho_lb ∈ [0, 1]. + """ rho_lb = 0.7; @assert rho_lb >= 0.0 && rho_lb <= 1.0 - # Upper bound vigilance parameter: [0, 1] + + """ + Upper bound vigilance parameter: rho_ub ∈ [0, 1]. + """ rho_ub = 0.85; @assert rho_ub >= 0.0 && rho_ub <= 1.0 - # Choice parameter: alpha > 0 + + """ + Choice parameter: alpha > 0. + """ alpha = 1e-3; @assert alpha > 0.0 - # Learning parameter: (0, 1] + + """ + Learning parameter: beta ∈ (0, 1]. + """ beta = 1.0; @assert beta > 0.0 && beta <= 1.0 - # "Pseudo" kernel width: gamma >= 1 + + """ + Pseudo kernel width: gamma >= 1. + """ gamma = 3.0; @assert gamma >= 1.0 - # "Reference" gamma for normalization: 0 <= gamma_ref < gamma + + """ + Reference gamma for normalization: 0 <= gamma_ref < gamma. + """ gamma_ref = 1.0; @assert 0.0 <= gamma_ref && gamma_ref < gamma - # Similarity method (activation and match): - # 'single', 'average', 'complete', 'median', 'weighted', or 'centroid' + + """ + Similarity method (activation and match): method ∈ ["single", "average", "complete", "median", "weighted", "centroid"]. + """ method::String = "single" - # Display flag - display::Bool = true - # Maximum number of epochs during training + + """ + Maximum number of epochs during training: max_epochs ∈ (1, Inf). + """ max_epoch::Int = 1 - # Normalize the threshold by the feature dimension + + """ + Display flag. + """ + display::Bool = true + + """ + Flag to normalize the threshold by the feature dimension. + """ gamma_normalization::Bool = true -end # opts_DDVFA +end # --------------------------------------------------------------------------- # # STRUCTS # --------------------------------------------------------------------------- # """ - DDVFA <: ART - Distributed Dual Vigilance Fuzzy ARTMAP module struct. For module options, see [`AdaptiveResonance.opts_DDVFA`](@ref). -# Option Parameters -- `opts::opts_DDVFA`: DDVFA options struct. -- `subopts::opts_FuzzyART`: FuzzyART options struct used for all F2 nodes. -- `config::DataConfig`: data configuration struct. - -# Working Parameters -- `threshold::Float`: operating module threshold value, a function of the vigilance parameter. -- `F2::Vector{FuzzyART}`: list of F2 nodes (themselves FuzzyART modules). -- `labels::IntegerVector`: incremental list of labels corresponding to each F2 node, self-prescribed or supervised. -- `n_categories::Int`: number of total categories. -- `epoch::Int`: current training epoch. -- `T::Float`: winning activation value from most recent sample. -- `M::Float`: winning match value from most recent sample. - # References 1. L. E. Brito da Silva, I. Elnabarawy, and D. C. Wunsch, “Distributed dual vigilance fuzzy adaptive resonance theory learns online, retrieves arbitrarily-shaped clusters, and mitigates order dependence,” Neural Networks, vol. 121, pp. 208-228, 2020, doi: 10.1016/j.neunet.2019.08.033. 2. G. Carpenter, S. Grossberg, and D. Rosen, "Fuzzy ART: Fast stable learning and categorization of analog patterns by an adaptive resonance system," Neural Networks, vol. 4, no. 6, pp. 759-771, 1991. """ mutable struct DDVFA <: ART - # Get parameters + # Option Parameters + """ + DDVFA options struct. + """ opts::opts_DDVFA + + """ + FuzzyART options struct used for all F2 nodes. + """ subopts::opts_FuzzyART + + """ + Data configuration struct. + """ config::DataConfig # Working variables + """ + Operating module threshold value, a function of the vigilance parameter. + """ threshold::Float + + """ + List of F2 nodes (themselves FuzzyART modules). + """ F2::Vector{FuzzyART} - labels::IntegerVector + + """ + Incremental list of labels corresponding to each F2 node, self-prescribed or supervised. + """ + labels::Vector{Int} + + """ + Number of total categories. + """ n_categories::Int + + """ + Current training epoch. + """ epoch::Int + + """ + Winning activation value from most recent sample. + """ T::Float + + """ + Winning match value from most recent sample. + """ M::Float -end # DDVFA <: ART +end # --------------------------------------------------------------------------- # # CONSTRUCTORS # --------------------------------------------------------------------------- # """ - DDVFA(;kwargs...) - Implements a DDVFA learner with optional keyword arguments. +# Arguments +- `kwargs`: keyword arguments to pass to the DDVFA options struct (see [`AdaptiveResonance.opts_DDVFA`](@ref).) + # Examples By default: ```julia-repl @@ -131,13 +169,14 @@ DDVFA function DDVFA(;kwargs...) opts = opts_DDVFA(;kwargs...) DDVFA(opts) -end # DDVFA(;kwargs...) +end """ - DDVFA(opts::opts_DDVFA) - Implements a DDVFA learner with specified options. +# Arguments +- `opts::opts_DDVFA`: the DDVFA options (see [`AdaptiveResonance.opts_DDVFA`](@ref)). + # Examples ```julia-repl julia> my_opts = opts_DDVFA() @@ -170,29 +209,25 @@ function DDVFA(opts::opts_DDVFA) 0.0, 0.0 ) -end # DDVFA(opts::opts_DDVFA) +end # --------------------------------------------------------------------------- # -# ALGORITHMIC METHODS +# COMMON FUNCTIONS # --------------------------------------------------------------------------- # -""" - set_threshold!(art::DDVFA) - -Sets the vigilance threshold of the DDVFA module as a function of several flags and hyperparameters. -""" +# COMMON DOC: Set threshold function function set_threshold!(art::DDVFA) # Gamma match normalization if art.opts.gamma_normalization # Set the learning threshold as a function of the data dimension - art.threshold = art.opts.rho_lb*(art.config.dim^art.opts.gamma_ref) + art.threshold = art.opts.rho_lb * (art.config.dim ^ art.opts.gamma_ref) else # Set the learning threshold as simply the vigilance parameter art.threshold = art.opts.rho_lb end -end # set_threshold!(art::DDVFA) +end -# DDVFA incremental training method +# COMMON DOC: DDVFA incremental training method function train!(art::DDVFA, x::RealVector ; y::Integer=0, preprocessed::Bool=false) # Flag for if training in supervised mode supervised = !iszero(y) @@ -218,14 +253,14 @@ function train!(art::DDVFA, x::RealVector ; y::Integer=0, preprocessed::Bool=fal T = zeros(art.n_categories) for jx = 1:art.n_categories activation_match!(art.F2[jx], sample) - T[jx] = similarity(art.opts.method, art.F2[jx], "T", sample, art.opts.gamma_ref) + T[jx] = similarity(art.opts.method, art.F2[jx], "T", sample) end # Compute the match for each category in the order of greatest activation index = sortperm(T, rev=true) for jx = 1:art.n_categories bmu = index[jx] - M = similarity(art.opts.method, art.F2[bmu], "M", sample, art.opts.gamma_ref) + M = similarity(art.opts.method, art.F2[bmu], "M", sample) # If we got a match, then learn (update the category) if M >= art.threshold # Update the stored match and activation values @@ -249,7 +284,7 @@ function train!(art::DDVFA, x::RealVector ; y::Integer=0, preprocessed::Bool=fal if mismatch_flag # Update the stored match and activation values bmu = index[1] - art.M = similarity(art.opts.method, art.F2[bmu], "M", sample, art.opts.gamma_ref) + art.M = similarity(art.opts.method, art.F2[bmu], "M", sample) art.T = T[bmu] # Get the correct label y_hat = supervised ? y : art.n_categories + 1 @@ -257,12 +292,69 @@ function train!(art::DDVFA, x::RealVector ; y::Integer=0, preprocessed::Bool=fal end return y_hat -end # train!(art::DDVFA, x::RealVector ; y::Integer=0, preprocessed::Bool=false) +end -""" - create_category(art::DDVFA, sample::RealVector, label::Integer) +# COMMON DOC: DDVFA incremental classification method +function classify(art::DDVFA, x::RealVector ; preprocessed::Bool=false, get_bmu::Bool=false) + # Preprocess the data + sample = init_classify!(x, art, preprocessed) + + # Calculate all global activations + T = zeros(art.n_categories) + for jx = 1:art.n_categories + activation_match!(art.F2[jx], sample) + T[jx] = similarity(art.opts.method, art.F2[jx], "T", sample) + end + + # Sort by highest activation + index = sortperm(T, rev=true) + + # Default to mismatch + mismatch_flag = true + + # Iterate over the list of activations + for jx = 1:art.n_categories + # Get the best-matching unit + bmu = index[jx] + # Get the match value of this activation + M = similarity(art.opts.method, art.F2[bmu], "M", sample) + # If the match satisfies the threshold criterion, then report that label + if M >= art.threshold + # Update the stored match and activation values + art.M = M + art.T = T[bmu] + # Current winner + y_hat = art.labels[bmu] + mismatch_flag = false + break + end + end + # If we did not find a resonant category + if mismatch_flag + @debug "Mismatch" + # Update the stored match and activation values of the best matching unit + bmu = index[1] + art.M = similarity(art.opts.method, art.F2[bmu], "M", sample) + art.T = T[bmu] + # Report either the best matching unit or the mismatch label -1 + y_hat = get_bmu ? art.labels[bmu] : -1 + end + + return y_hat +end + +# --------------------------------------------------------------------------- # +# INTERNAL FUNCTIONS +# --------------------------------------------------------------------------- # + +""" Create a new category by appending and initializing a new FuzzyART node to F2. + +# Arguments +- `art::DDVFA`: the DDVFA module to create a new FuzzyART category in. +- `sample::RealVector`: the sample to use for instantiating the new category. +- `label::Integer`: the new label to use for the new category. """ function create_category(art::DDVFA, sample::RealVector, label::Integer) # Global Fuzzy ART @@ -270,27 +362,31 @@ function create_category(art::DDVFA, sample::RealVector, label::Integer) push!(art.labels, label) # Local Gamma-Normalized Fuzzy ART push!(art.F2, FuzzyART(art.subopts, sample, preprocessed=true)) -end # function create_category(art::DDVFA, sample::RealVector, label::Integer) +end """ - stopping_conditions(art::DDVFA) - Stopping conditions for Distributed Dual Vigilance Fuzzy ARTMAP. Returns true if there is no change in weights during the epoch or the maxmimum epochs has been reached. + +# Arguments +- `art::DDVFA`: the DDVFA module for checking stopping conditions. """ function stopping_conditions(art::DDVFA) # Compute the stopping condition, return a bool return art.epoch >= art.opts.max_epoch -end # stopping_conditions(DDVFA) +end """ - similarity(method::String, F2::FuzzyART, field_name::String, sample::RealVector, gamma_ref::RealFP) +Compute the similarity metric depending on method with explicit comparisons for the field name. -Compute the similarity metric depending on method with explicit comparisons -for the field name. +# Arguments +- `method::AbstractString`: the selected DDVFA linkage method. +- `F2::FuzzyART`: the FuzzyART module to compute the linkage method within. +- `field_name::AbstractString`: the activation or match value to compute, field_name ∈ ["T", "M"] +- `sample::RealVector`: the sample to use for computing the linkage to the F2 module, sample ∈ DDVFA_METHODS. """ -function similarity(method::String, F2::FuzzyART, field_name::String, sample::RealVector, gamma_ref::RealFP) +function similarity(method::AbstractString, F2::FuzzyART, field_name::AbstractString, sample::RealVector) @debug "Computing similarity" if field_name != "T" && field_name != "M" @@ -339,63 +435,13 @@ function similarity(method::String, F2::FuzzyART, field_name::String, sample::Re if field_name == "T" value = T elseif field_name == "M" - value = (norm(Wc, 1)^gamma_ref)*T + value = (norm(Wc, 1)^F2.opts.gamma_ref) * T end else error("Invalid/unimplemented similarity method") end return value -end # similarity(method::String, F2::FuzzyART, field_name::String, sample::RealVector, gamma_ref::RealFP) - -# DDVFA incremental classification method -function classify(art::DDVFA, x::RealVector ; preprocessed::Bool=false, get_bmu::Bool=false) - # Preprocess the data - sample = init_classify!(x, art, preprocessed) - - # Calculate all global activations - T = zeros(art.n_categories) - for jx = 1:art.n_categories - activation_match!(art.F2[jx], sample) - T[jx] = similarity(art.opts.method, art.F2[jx], "T", sample, art.opts.gamma_ref) - end - - # Sort by highest activation - index = sortperm(T, rev=true) - - # Default to mismatch - mismatch_flag = true - - # Iterate over the list of activations - for jx = 1:art.n_categories - # Get the best-matching unit - bmu = index[jx] - # Get the match value of this activation - M = similarity(art.opts.method, art.F2[bmu], "M", sample, art.opts.gamma_ref) - # If the match satisfies the threshold criterion, then report that label - if M >= art.threshold - # Update the stored match and activation values - art.M = M - art.T = T[bmu] - # Current winner - y_hat = art.labels[bmu] - mismatch_flag = false - break - end - end - - # If we did not find a resonant category - if mismatch_flag - @debug "Mismatch" - # Update the stored match and activation values of the best matching unit - bmu = index[1] - art.M = similarity(art.opts.method, art.F2[bmu], "M", sample, art.opts.gamma_ref) - art.T = T[bmu] - # Report either the best matching unit or the mismatch label -1 - y_hat = get_bmu ? art.labels[bmu] : -1 - end - - return y_hat end # --------------------------------------------------------------------------- # @@ -403,30 +449,30 @@ end # --------------------------------------------------------------------------- # """ - get_W(art::DDVFA) +Convenience function; return a concatenated array of all DDVFA weights. -Convenience functio; return a concatenated array of all DDVFA weights. +# Arguments +- `art::DDVFA`: the DDVFA module to get all of the weights from as a list. """ function get_W(art::DDVFA) # Return a concatenated array of the weights return [art.F2[kx].W for kx = 1:art.n_categories] -end # get_W(art::DDVFA) +end """ - get_n_weights_vec(art::DDVFA) - Convenience function; return the number of weights in each category as a vector. + +# Arguments +- `art::DDVFA`: the DDVFA module to get all of the weights from as a list. """ function get_n_weights_vec(art::DDVFA) return [art.F2[i].n_categories for i = 1:art.n_categories] -end # get_n_weights_vec(art::DDVFA) +end """ - get_n_weights(art::DDVFA) - Convenience function; return the sum total number of weights in the DDVFA module. """ function get_n_weights(art::DDVFA) # Return the number of weights across all categories return sum(get_n_weights_vec(art)) -end # get_n_weights(art::DDVFA) +end diff --git a/src/ART/DVFA.jl b/src/ART/DVFA.jl index 17a6050f..1cca7f65 100644 --- a/src/ART/DVFA.jl +++ b/src/ART/DVFA.jl @@ -17,91 +17,126 @@ resonance system," Neural Networks, vol. 4, no. 6, pp. 759-771, 1991. """ """ - opts_DVFA(;kwargs) - Dual Vigilance Fuzzy ART options struct. -# Keyword Arguments -- `rho_lb::Float`: lower-bound vigilance value, [0, 1], default 0.55. -- `rho_ub::Float`: upper-bound vigilance value, [0, 1], default 0.75. -- `alpha::Float`: choice parameter, alpha > 0, default 1e-3. -- `beta::Float`: learning parameter, (0, 1], default 1.0. -- `display::Bool`: display flag, default true. -- `max_epoch::Int`: maximum number of epochs during training, default 1. +$(opts_docstring) """ @with_kw mutable struct opts_DVFA <: ARTOpts @deftype Float - # Lower-bound vigilance parameter: [0, 1] + """ + Lower-bound vigilance parameter: rho_lb ∈ [0, 1]. + """ rho_lb = 0.55; @assert rho_lb >= 0.0 && rho_lb <= 1.0 - # Upper bound vigilance parameter: [0, 1] + + """ + Upper bound vigilance parameter: rho_ub ∈ [0, 1]. + """ rho_ub = 0.75; @assert rho_ub >= 0.0 && rho_ub <= 1.0 - # Choice parameter: alpha > 0 + + """ + Choice parameter: alpha > 0. + """ alpha = 1e-3; @assert alpha > 0.0 - # Learning parameter: (0, 1] + + """ + Learning parameter: beta ∈ (0, 1]. + """ beta = 1.0; @assert beta > 0.0 && beta <= 1.0 - # Display flag - display::Bool = true - # Maximum number of epochs during training + + """ + Maximum number of epochs during training. + """ max_epochs::Int = 1 -end # opts_DVFA -""" - DVFA <: ART + """ + Display flag. + """ + display::Bool = true +end +""" Dual Vigilance Fuzzy ARTMAP module struct. For module options, see [`AdaptiveResonance.opts_DVFA`](@ref). -# Option Parameters -- `opts::opts_DVFA`: DVFA options struct. -- `config::DataConfig`: data configuration struct. - -# Working Parameters -- `threshold_ub::Float`: operating upper bound module threshold value, a function of the upper bound vigilance parameter. -- `threshold_lb::Float`: operating lower bound module threshold value, a function of the lower bound vigilance parameter. -- `labels::IntegerVector`: incremental list of labels corresponding to each F2 node, self-prescribed or supervised. -- `W::RealMatrix`: category weight matrix. -- `T::RealVector`: activation values for every weight for a given sample. -- `M::RealVector`: match values for every weight for a given sample. -- `n_categories::Int`: number of category weights (F2 nodes). -- `n_clusters::Int`: number of labeled clusters, may be lower than `n_categories` -- `epoch::Int`: current training epoch. - # References: 1. L. E. Brito da Silva, I. Elnabarawy and D. C. Wunsch II, "Dual Vigilance Fuzzy ART," Neural Networks Letters. To appear. 2. G. Carpenter, S. Grossberg, and D. Rosen, "Fuzzy ART: Fast stable learning and categorization of analog patterns by an adaptive resonance system," Neural Networks, vol. 4, no. 6, pp. 759-771, 1991. """ mutable struct DVFA <: ART # Get parameters + """ + DVFA options struct. + """ opts::opts_DVFA + + """ + Data configuration struct. + """ config::DataConfig # Working variables + """ + Operating upper bound module threshold value, a function of the upper bound vigilance parameter. + """ threshold_ub::Float + + """ + Operating lower bound module threshold value, a function of the lower bound vigilance parameter. + """ threshold_lb::Float - labels::IntegerVector - W::RealMatrix - T::RealVector - M::RealVector + + """ + Incremental list of labels corresponding to each F2 node, self-prescribed or supervised. + """ + labels::Vector{Int} + + """ + Category weight matrix. + """ + W::Matrix{Float} + + """ + Activation values for every weight for a given sample. + """ + T::Vector{Float} + + """ + Match values for every weight for a given sample. + """ + M::Vector{Float} + + """ + Number of category weights (F2 nodes). + """ n_categories::Int + + """ + Number of labeled clusters, may be lower than `n_categories` + """ n_clusters::Int + + """ + Current training epoch. + """ epoch::Int -end # DVFA +end # --------------------------------------------------------------------------- # # CONSTRUCTORS # --------------------------------------------------------------------------- # """ - DVFA(;kwargs...) - Implements a DVFA learner with optional keyword arguments. +# Arguments +- `kwargs`: keyword arguments to pass to the DVFA options struct (see [`AdaptiveResonance.opts_DVFA`](@ref).) + # Examples By default: ```julia-repl julia> DVFA() DVFA - opts: opts_DDVFA + opts: opts_DVFA ... ``` @@ -109,20 +144,21 @@ or with keyword arguments: ```julia-repl julia> DVFA(rho=0.7) DVFA - opts: opts_DDVFA + opts: opts_DVFA ... ``` """ function DVFA(;kwargs...) opts = opts_DVFA(;kwargs...) DVFA(opts) -end # DVFA(;kwargs...) +end """ - DVFA(opts::opts_DVFA) - Implements a DVFA learner with specified options. +# Arguments +- `opts::opts_DVFA`: the DVFA options (see [`AdaptiveResonance.opts_DVFA`](@ref)). + # Examples ```julia-repl julia> my_opts = opts_DVFA() @@ -146,20 +182,20 @@ function DVFA(opts::opts_DVFA) 0, # n_clusters 0 # epoch ) -end # DDVFA(opts::opts_DDVFA) +end -""" - set_threshold!(art::DVFA) +# --------------------------------------------------------------------------- # +# FUNCTIONS +# --------------------------------------------------------------------------- # -Configure the threshold values of the DVFA module. -""" +# COMMON DOC: Set threshold function function set_threshold!(art::DVFA) # DVFA thresholds art.threshold_ub = art.opts.rho_ub * art.config.dim art.threshold_lb = art.opts.rho_lb * art.config.dim -end # set_threshold!(art::DVFA) +end -# Incremental DVFA training method +# COMMON DOC: Incremental DVFA training method function train!(art::DVFA, x::RealVector ; y::Integer=0, preprocessed::Bool=false) # Flag for if training in supervised mode supervised = !iszero(y) @@ -249,9 +285,9 @@ function train!(art::DVFA, x::RealVector ; y::Integer=0, preprocessed::Bool=fals end return y_hat -end # train!(art::DVFA, x::RealVector ; y::Integer=0, preprocessed::Bool=false) +end -# Incremental DVFA classify method +# COMMON DOC: Incremental DVFA classify method function classify(art::DVFA, x::RealVector ; preprocessed::Bool=false, get_bmu::Bool=false) # Preprocess the data sample = init_classify!(x, art, preprocessed) @@ -281,11 +317,9 @@ function classify(art::DVFA, x::RealVector ; preprocessed::Bool=false, get_bmu:: end return y_hat -end # classify(art::DVFA, x::RealVector ; preprocessed::Bool=false, get_bmu::Bool=false) +end """ - activation_match!(art::DVFA, x::RealVector) - Compute and store the activation and match values for the DVFA module. """ function activation_match!(art::DVFA, x::RealVector) @@ -296,33 +330,27 @@ function activation_match!(art::DVFA, x::RealVector) art.T[jx] = numerator/(art.opts.alpha + norm(art.W[:, jx], 1)) art.M[jx] = numerator end -end # activation_match!(art::DVFA, x::RealVector) +end """ - learn(art::DVFA, x::RealVector, W::RealVector) - Return the modified weight of the DVFA module conditioned by sample x. """ function learn(art::DVFA, x::RealVector, W::RealVector) # Update W return art.opts.beta .* element_min(x, W) .+ W .* (1 - art.opts.beta) -end # learn(art::DVFA, x::RealVector, W::RealVector) +end """ - learn!(art::DVFA, x::RealVector, index::Integer) - In place learning function. """ function learn!(art::DVFA, x::RealVector, index::Integer) # Update W art.W[:, index] = learn(art, x, art.W[:, index]) -end # learn!(art::DVFA, x::RealVector, index::Integer) +end """ - stopping_conditions(art::DVFA) - Stopping conditions for a DVFA module. """ function stopping_conditions(art::DVFA) return art.epoch >= art.opts.max_epochs -end # stopping_conditions(art::DVFA) +end diff --git a/src/ART/FuzzyART.jl b/src/ART/FuzzyART.jl index 6b1863ff..3f314ef4 100644 --- a/src/ART/FuzzyART.jl +++ b/src/ART/FuzzyART.jl @@ -13,94 +13,130 @@ References: # --------------------------------------------------------------------------- # """ - opts_FuzzyART(;kwargs) - Gamma-Normalized Fuzzy ART options struct. -# Keyword Arguments -- `rho::Float`: vigilance value, [0, 1], default 0.6. -- `alpha::Float`: choice parameter, alpha > 0, default 1e-3. -- `beta::Float`: learning parameter, (0, 1], default 1.0. -- `gamma::Float`: "pseudo" kernel width, gamma >= 1, default 3.0. -- `gamma_ref::Float`: "reference" kernel width, 0 <= gamma_ref < gamma, default 1.0. -- `display::Bool`: display flag, default true. -- `max_epoch::Int`: maximum number of epochs during training, default 1. -- `gamma_normalization::Bool`: normalize the threshold by the feature dimension, default false. +$(opts_docstring) """ @with_kw mutable struct opts_FuzzyART <: ARTOpts @deftype Float - # Vigilance parameter: [0, 1] + """ + Vigilance parameter: rho ∈ [0, 1]. + """ rho = 0.6; @assert rho >= 0.0 && rho <= 1.0 - # Choice parameter: alpha > 0 + + """ + Choice parameter: alpha > 0. + """ alpha = 1e-3; @assert alpha > 0.0 - # Learning parameter: (0, 1] + + """ + Learning parameter: beta ∈ (0, 1]. + """ beta = 1.0; @assert beta > 0.0 && beta <= 1.0 - # "Pseudo" kernel width: gamma >= 1 + + """ + Pseudo kernel width: gamma >= 1. + """ gamma = 3.0; @assert gamma >= 1.0 - # "Reference" gamma for normalization: 0 <= gamma_ref < gamma + + """ + Reference gamma for normalization: 0 <= gamma_ref < gamma. + """ gamma_ref = 1.0; @assert 0.0 <= gamma_ref && gamma_ref <= gamma - # Display flag - display::Bool = true - # Maximum number of epochs during training + + """ + Maximum number of epochs during training: max_epochs ∈ (1, Inf). + """ max_epochs::Int = 1 - # Normalize the threshold by the feature dimension + + """ + Display flag. + """ + display::Bool = true + + """ + Flag to normalize the threshold by the feature dimension. + """ gamma_normalization::Bool = false -end # opts_FuzzyART +end # --------------------------------------------------------------------------- # # STRUCTS # --------------------------------------------------------------------------- # """ - FuzzyART <: ART - Gamma-Normalized Fuzzy ART learner struct For module options, see [`AdaptiveResonance.opts_FuzzyART`](@ref). -# Option Parameters -- `opts::opts_FuzzyART`: FuzzyART options struct. -- `config::DataConfig`: data configuration struct. - -# Working Parameters -- `threshold::Float`: operating module threshold value, a function of the vigilance parameter. -- `labels::IntegerVector`: incremental list of labels corresponding to each F2 node, self-prescribed or supervised. -- `T::RealVector`: activation values for every weight for a given sample. -- `M::RealVector`: match values for every weight for a given sample. -- `W::RealMatrix`: category weight matrix. -- `n_instance::IntegerVector`: number of weights associated with each category. -- `n_categories::Int`: number of category weights (F2 nodes). -- `epoch::Int`: current training epoch. - # References 1. G. Carpenter, S. Grossberg, and D. Rosen, "Fuzzy ART: Fast stable learning and categorization of analog patterns by an adaptive resonance system," Neural Networks, vol. 4, no. 6, pp. 759-771, 1991. """ mutable struct FuzzyART <: ART + # Option Parameters # Assign numerical parameters from options + """ + FuzzyART options struct. + """ opts::opts_FuzzyART + + """ + Data configuration struct. + """ config::DataConfig # Working variables + """ + Operating module threshold value, a function of the vigilance parameter. + """ threshold::Float - labels::IntegerVector - T::RealVector - M::RealVector + + """ + Incremental list of labels corresponding to each F2 node, self-prescribed or supervised. + """ + labels::Vector{Int} + + """ + Activation values for every weight for a given sample. + """ + T::Vector{Float} + + """ + Match values for every weight for a given sample. + """ + M::Vector{Float} # "Private" working variables - W::RealMatrix - n_instance::IntegerVector + """ + Category weight matrix. + """ + W::Matrix{Float} + + """ + Number of weights associated with each category. + """ + n_instance::Vector{Int} + + """ + number of category weights (F2 nodes). + """ n_categories::Int + + """ + Current training epoch. + """ epoch::Int -end # FuzzyART <: ART +end # --------------------------------------------------------------------------- # # CONSTRUCTORS # --------------------------------------------------------------------------- # """ - FuzzyART(;kwargs...) - Implements a Gamma-Normalized Fuzzy ART learner with optional keyword arguments. +# Arguments +- `kwargs`: keyword arguments of valid FuzzyART options. + # Examples By default: ```julia-repl @@ -121,13 +157,14 @@ FuzzyART function FuzzyART(;kwargs...) opts = opts_FuzzyART(;kwargs...) FuzzyART(opts) -end # FuzzyART(;kwargs...) +end """ - FuzzyART(opts::opts_FuzzyART) - Implements a Gamma-Normalized Fuzzy ART learner with specified options. +# Arguments +- `opts::opts_FuzzyART`: the FuzzyART options struct with specified options. + # Examples ```julia-repl julia> FuzzyART(opts) @@ -137,7 +174,8 @@ FuzzyART ``` """ function FuzzyART(opts::opts_FuzzyART) - FuzzyART(opts, # opts + FuzzyART( + opts, # opts DataConfig(), # config 0.0, # threshold Array{Int}(undef,0), # labels @@ -148,37 +186,48 @@ function FuzzyART(opts::opts_FuzzyART) 0, # n_categories 0 # epoch ) -end # FuzzyART(opts::opts_FuzzyART) +end """ - FuzzyART(opts::opts_FuzzyART, sample::RealVector) - Create and initialize a FuzzyART with a single sample in one step. + +Principally used as a method for initialization within DDVFA. + +# Arguments +- `opts::opts_FuzzyART`: the FuzzyART options contains. +- `sample::RealVector`: the sample to use as a basis for setting up the FuzzyART. +- `preprocessed::Bool=false`: flag for if the sample is already complement coded and normalized. """ function FuzzyART(opts::opts_FuzzyART, sample::RealVector ; preprocessed::Bool=false) art = FuzzyART(opts) init_train!(sample, art, preprocessed) initialize!(art, sample) return art -end # FuzzyART(opts::opts_FuzzyART, sample::RealVector) +end # --------------------------------------------------------------------------- # # ALGORITHMIC METHODS # --------------------------------------------------------------------------- # +# COMMON DOC: Set threshold function function set_threshold!(art::FuzzyART) if art.opts.gamma_normalization - art.threshold = art.opts.rho*(art.config.dim^art.opts.gamma_ref) + art.threshold = art.opts.rho * (art.config.dim ^ art.opts.gamma_ref) else art.threshold = art.opts.rho end -end # set_threshold!(art::FuzzyART) +end """ - initialize!(art::FuzzyART, x::Vector{T} ; y::Integer=0) where {T<:RealFP} - Initializes a FuzzyART learner with an intial sample 'x'. +This function is used during the first training iteration when the FuzzyART module is empty. + +# Arguments +- `art::FuzzyART`: the FuzzyART module to initialize. +- `x::RealVector`: the sample to use for initialization. +- `y::Integer=0`: the optional new label for the first weight of the FuzzyART module. If not specified, defaults the new label to 1. + # Examples ```julia-repl julia> my_FuzzyART = FuzzyART() @@ -188,7 +237,7 @@ FuzzyART julia> initialize!(my_FuzzyART, [1 2 3 4]) ``` """ -function initialize!(art::FuzzyART, x::Vector{T} ; y::Integer=0) where {T<:RealFP} +function initialize!(art::FuzzyART, x::RealVector ; y::Integer=0) # Initialize the instance and categories counters art.n_instance = [1] art.n_categories = 1 @@ -197,7 +246,7 @@ function initialize!(art::FuzzyART, x::Vector{T} ; y::Integer=0) where {T<:RealF set_threshold!(art) # Fast commit the weight - art.W = Array{T}(undef, art.config.dim_comp, 1) + art.W = Matrix{Float}(undef, art.config.dim_comp, 1) # Assign the contents, valid this way for 1-D or 2-D arrays art.W[:, 1] = x @@ -207,9 +256,9 @@ function initialize!(art::FuzzyART, x::Vector{T} ; y::Integer=0) where {T<:RealF # Add the label to the label list push!(art.labels, label) -end # initialize!(art::FuzzyART, x::Vector{T} ; y::Integer=0) where {T<:RealFP} +end -# FuzzyART incremental training method +# COMMON DOC: FuzzyART incremental training method function train!(art::FuzzyART, x::RealVector ; y::Integer=0, preprocessed::Bool=false) # Flag for if training in supervised mode supervised = !iszero(y) @@ -268,20 +317,6 @@ function train!(art::FuzzyART, x::RealVector ; y::Integer=0, preprocessed::Bool= return y_hat end # train!(art::FuzzyART, x::RealVector ; y::Integer=0, preprocessed::Bool=false) -""" - create_category(art::FuzzyART, x::RealVector, y::Integer) -""" -function create_category(art::FuzzyART, x::RealVector, y::Integer) - # Increment the number of categories - art.n_categories += 1 - # Fast commit - art.W = hcat(art.W, x) - # Increment number of samples associated with new category - push!(art.n_instance, 1) - # Add the label for the ategory - push!(art.labels, y) -end # create_category(art::FuzzyART, x::RealVector, y::Integer) - # FuzzyART incremental classification method function classify(art::FuzzyART, x::RealVector ; preprocessed::Bool=false, get_bmu::Bool=false) # Preprocess the data @@ -312,13 +347,34 @@ function classify(art::FuzzyART, x::RealVector ; preprocessed::Bool=false, get_b y_hat = get_bmu ? art.labels[index[1]] : -1 end return y_hat -end # classify(art::FuzzyART, x::RealVector ; preprocessed::Bool=false, get_bmu::Bool=false) +end """ - activation_match!(art::FuzzyART, x::RealVector) +Creates a category for the FuzzyART module, expanding the weights and incrementing the category labels. +# Arguments +- `art::FuzzyART`: the FuzzyART module to add a category to. +- `x::RealVector`: the sample to use for adding a category. +- `y::Integer`: the new label for the new category. +""" +function create_category(art::FuzzyART, x::RealVector, y::Integer) + # Increment the number of categories + art.n_categories += 1 + # Fast commit + art.W = hcat(art.W, x) + # Increment number of samples associated with new category + push!(art.n_instance, 1) + # Add the label for the ategory + push!(art.labels, y) +end + +""" Computes the activation and match functions of the art module against sample x. +# Arguments +- `art::FuzzyART`: the FuzzyART module to compute the activation and match values for all weights. +- `x::RealVector`: the sample to compute the activation and match functions against. + # Examples ```julia-repl julia> my_FuzzyART = FuzzyART() @@ -337,41 +393,48 @@ function activation_match!(art::FuzzyART, x::RealVector) for i = 1:art.n_categories W_norm = norm(art.W[:, i], 1) numerator = norm(element_min(x, art.W[:, i]), 1) - art.T[i] = (numerator/(art.opts.alpha + W_norm))^art.opts.gamma + art.T[i] = (numerator / (art.opts.alpha + W_norm))^art.opts.gamma if art.opts.gamma_normalization - art.M[i] = (W_norm^art.opts.gamma_ref)*art.T[i] + art.M[i] = (W_norm^art.opts.gamma_ref) * art.T[i] else - art.M[i] = numerator/norm(x, 1) + art.M[i] = numerator / norm(x, 1) end end -end # activation_match!(art::FuzzyART, x::RealVector) +end """ - learn(art::FuzzyART, x::RealVector, W::RealVector) - Return the modified weight of the art module conditioned by sample x. + +# Arguments +- `art::FuzzyART`: the FuzzyART module containing learning options. +- `x::RealVector`: the sample to learn from. +- `W::RealVector`: the weight vector to update against the sample. """ function learn(art::FuzzyART, x::RealVector, W::RealVector) # Update W return art.opts.beta .* element_min(x, W) .+ W .* (1 - art.opts.beta) -end # learn(art::FuzzyART, x::RealVector, W::RealVector) +end """ - learn!(art::FuzzyART, x::RealVector, index::Integer) - In place learning function with instance counting. + +# Arguments +- `art::FuzzyART`: the FuzzyART module to update. +- `x::RealVector`: the sample to learn from. +- `index::Integer`: the index of the FuzzyART weight to update. """ function learn!(art::FuzzyART, x::RealVector, index::Integer) # Update W art.W[:, index] = learn(art, x, art.W[:, index]) art.n_instance[index] += 1 -end # learn!(art::FuzzyART, x::RealVector, index::Integer) +end """ - stopping_conditions(art::FuzzyART) - Stopping conditions for a FuzzyART module. + +# Arguments +- `art::FuzzyART`: the FuzzyART module to check stopping conditions for. """ function stopping_conditions(art::FuzzyART) return art.epoch >= art.opts.max_epochs -end # stopping_conditions(art::FuzzyART) +end diff --git a/src/ART/common.jl b/src/ART/common.jl index 6669074a..7941cc40 100644 --- a/src/ART/common.jl +++ b/src/ART/common.jl @@ -1,17 +1,15 @@ """ common.jl -Description: - Includes all of the unsupervised ART modules common code. +# Description: +Includes all of the unsupervised ART modules common code. """ -# ------------------------------------------- -# Methods -# ------------------------------------------- +# --------------------------------------------------------------------------- # +# FUNCTIONS +# --------------------------------------------------------------------------- # """ - train!(art::ART, x::RealMatrix ; y::IntegerVector=Vector{Int}(), preprocessed::Bool=false) - Train the ART model on a batch of data 'x' with optional supervisory labels 'y.' # Arguments @@ -35,18 +33,18 @@ function train!(art::ART, x::RealMatrix ; y::IntegerVector = Vector{Int}(), prep # Initialize the output vector y_hat = zeros(Int, n_samples) + # Learn until the stopping conditions art.epoch = 0 while true # Increment the epoch and get the iterator art.epoch += 1 - iter = get_iterator(art.opts, x) + iter = get_iterator(art.opts, n_samples) for i = iter # Update the iterator if necessary update_iter(art, iter, i) # Grab the sample slice - # sample = get_sample(x, i) - sample = x[:, i] + sample = get_sample(x, i) # Select the label to pass to the incremental method local_y = supervised ? y[i] : 0 # Train upon the sample and label @@ -59,15 +57,13 @@ function train!(art::ART, x::RealMatrix ; y::IntegerVector = Vector{Int}(), prep end end return y_hat -end # train!(art::ART, x::RealMatrix ; y::IntegerVector = Vector{Int}(), preprocessed::Bool=false) +end -# ------------------------------------------- -# Common Documentation -# ------------------------------------------- +# --------------------------------------------------------------------------- # +# COMMON DOCUMENTATION +# --------------------------------------------------------------------------- # @doc raw""" - train!(art::ART, x::RealVector ; y::Integer=0, preprocessed::Bool=false) - Train the ART model on a single sample of features 'x' with an optional supervisory label. # Arguments diff --git a/src/ARTMAP/ARTMAP.jl b/src/ARTMAP/ARTMAP.jl index 7dbfa275..38b6730d 100644 --- a/src/ARTMAP/ARTMAP.jl +++ b/src/ARTMAP/ARTMAP.jl @@ -10,3 +10,12 @@ include("DAM.jl") # Default ARTMAP include("FAM.jl") # Fuzzy ARTMAP include("SFAM.jl") # Simplified Fuzzy ARTMAP include("ARTSCENE.jl") # ARTSCENE filters + +""" +A list of supervised ARTMAP modules that are available in the `AdaptiveResonance.jl` package. +""" +const ARTMAP_MODULES = [ + DAM, + # FAM, + SFAM, +] \ No newline at end of file diff --git a/src/ARTMAP/ARTSCENE.jl b/src/ARTMAP/ARTSCENE.jl index 7ea7eb01..7944c26f 100644 --- a/src/ARTMAP/ARTSCENE.jl +++ b/src/ARTMAP/ARTSCENE.jl @@ -5,32 +5,34 @@ Description: All of the visual filter functions for the ARTSCENE algorithm. """ +# --------------------------------------------------------------------------- # +# DEPENDENCIES +# --------------------------------------------------------------------------- # + using Distributed using SharedArrays -""" - color_to_gray(image::RealArray) +# --------------------------------------------------------------------------- # +# FUNCTIONS +# --------------------------------------------------------------------------- # +""" ARTSCENE Stage 1: Color-to-gray image transformation. """ -function color_to_gray(image::RealArray) +function color_to_gray(image::Array{T, 3}) where T <: RealFP # Treat the image as a column-major array, cast to grayscale - dim, n_row, n_column = size(image) - return [sum(image[:,i,j])/3 for i=1:n_row, j=1:n_column] -end # color_to_gray(image::RealArray) + _, n_row, n_column = size(image) + return [sum(image[:, i, j])/3 for i=1:n_row, j=1:n_column] +end """ - surround_kernel(i::Integer, j::Integer, p::Integer, q::Integer, scale::Integer) - -Surround kernel S function for ARTSCENE Stage 2 +Surround kernel S function for ARTSCENE Stage 2. """ function surround_kernel(i::Integer, j::Integer, p::Integer, q::Integer, scale::Integer) return 1/(2*pi*scale^2)*MathConstants.e^(-((i-p)^2 + (j-q)^2)/(2*scale^2)) -end # surround_kernel(i::Integer, j::Integer, p::Integer, q::Integer, scale::Integer) +end """ - ddt_x(x::RealArray, image::RealArray, sigma_s::RealArray, distributed::Bool) - Time rate of change of LGN network (ARTSCENE Stage 2). """ function ddt_x(x::RealArray, image::RealArray, sigma_s::RealArray, distributed::Bool) @@ -57,11 +59,9 @@ function ddt_x(x::RealArray, image::RealArray, sigma_s::RealArray, distributed:: end end return dx -end # ddt_x(x::RealArray, image::RealArray, sigma_s::RealArray, distributed::Bool) +end """ - contrast_normalization(image::RealArray ; distributed::Bool=true) - ARTSCENE Stage 2: Constrast normalization. """ function contrast_normalization(image::RealArray ; distributed::Bool=true) @@ -84,14 +84,12 @@ function contrast_normalization(image::RealArray ; distributed::Bool=true) end return x -end # contrast_normalization(image::RealArray ; distributed::Bool=true) +end """ - oriented_kernel(i::Integer, j::Integer, p::Integer, q::Integer, k::Integer, sigma_h::Real, sigma_v::Real ; sign::String="plus") - Oriented, elongated, spatially offset kernel G for ARTSCENE Stage 3. """ -function oriented_kernel(i::Integer, j::Integer, p::Integer, q::Integer, k::Integer, sigma_h::Real, sigma_v::Real ; sign::String="plus") +function oriented_kernel(i::Integer, j::Integer, p::Integer, q::Integer, k::Integer, sigma_h::Real, sigma_v::Real ; sign::AbstractString="plus") m = sin(pi*k/4) n = cos(pi*k/4) @@ -108,11 +106,9 @@ function oriented_kernel(i::Integer, j::Integer, p::Integer, q::Integer, k::Inte end return G -end # oriented_kernel(i::Integer, j::Integer, p::Integer, q::Integer, k::Integer, sigma_h::Real, sigma_v::Real ; sign::String="plus") +end """ - ddt_y(y::RealArray, X_plus::RealArray, X_minus::RealArray, alpha::Real, distributed::Bool) - Shunting equation for ARTSCENE Stage 3. """ function ddt_y(y::RealArray, X_plus::RealArray, X_minus::RealArray, alpha::Real, distributed::Bool) @@ -150,11 +146,9 @@ function ddt_y(y::RealArray, X_plus::RealArray, X_minus::RealArray, alpha::Real, end end return dy -end # ddt_y(y::RealArray, X_plus::RealArray, X_minus::RealArray, alpha::Real, distributed::Bool) +end """ - contrast_sensitive_oriented_filtering(image::RealArray, x::RealArray ; distributed::Bool=true) - ARTSCENE Stage 3: Contrast-sensitive oriented filtering. """ function contrast_sensitive_oriented_filtering(image::RealArray, x::RealArray ; distributed::Bool=true) @@ -183,11 +177,9 @@ function contrast_sensitive_oriented_filtering(image::RealArray, x::RealArray ; end return y -end # contrast_sensitive_oriented_filtering(image::RealArray, x::RealArray ; distributed::Bool=true) +end """ - contrast_insensitive_oriented_filtering(y::RealArray) - ARTSCENE Stage 4: Contrast-insensitive oriented filtering. """ function contrast_insensitive_oriented_filtering(y::RealArray) @@ -198,14 +190,12 @@ function contrast_insensitive_oriented_filtering(y::RealArray) Y_minus = [max(0, -y[i,j,g,k]) for i=1:n_row, j=1:n_column, g=1:n_g, k=1:n_k] return Y_plus + Y_minus -end # contrast_insensitive_oriented_filtering(y::RealArray) +end """ - competition_kernel(l::Integer, k::Integer ; sign::String="plus") - Competition kernel for ARTSCENE: Stage 5. """ -function competition_kernel(l::Integer, k::Integer ; sign::String="plus") +function competition_kernel(l::Integer, k::Integer ; sign::AbstractString="plus") if sign == "plus" g = ( 1/(0.5*sqrt(2*pi))*MathConstants.e^(-0.5*((l-k)/0.5)^2) ) @@ -216,11 +206,9 @@ function competition_kernel(l::Integer, k::Integer ; sign::String="plus") end return g -end # competition_kernel(l::Integer, k::Integer ; sign::String="plus") +end """ - ddt_z(z::RealArray ; distributed=true) - Time rate of change for ARTSCENE: Stage 5. """ function ddt_z(z::RealArray ; distributed::Bool=true) @@ -247,11 +235,9 @@ function ddt_z(z::RealArray ; distributed::Bool=true) end return dz -end # ddt_z(z::RealArray ; distributed=true) +end """ - orientation_competition(z::RealArray) - ARTSCENE Stage 5: Orientation competition at the same position. """ function orientation_competition(z::RealArray) @@ -274,8 +260,6 @@ function orientation_competition(z::RealArray) end """ - patch_orientation_color(z::RealArray, image::RealArray) - ARTSCENE Stage 6: Create patch feature vectors. """ function patch_orientation_color(z::RealArray, image::RealArray) @@ -301,20 +285,22 @@ function patch_orientation_color(z::RealArray, image::RealArray) # Compute the orientation averages for k = 1:4 for g = 1:4 - O[p_i,p_j,k,g] = 1/size_patch * sum(z[i_range, j_range, k, g]) + O[p_i, p_j, k, g] = 1/size_patch * sum(z[i_range, j_range, k, g]) end end end end return O, C -end # patch_orientation_color(z::RealArray, image::RealArray) +end """ - artscene_filter(raw_image::Array{T, 3} ; distributed::Bool=true) where {T<:Real} - Process the full artscene filter toolchain on an image. + +# Arguments +- `raw_image::Array{Real, 3}`: the rwa RGB image to process with the ARTSCENE filter. +- `distributed::Bool=true`: flag to process the filter with parallel processing. """ -function artscene_filter(raw_image::Array{T, 3} ; distributed::Bool=true) where {T<:Real} +function artscene_filter(raw_image::Array{T, 3} ; distributed::Bool=true) where T <: RealFP # Get the number of workers n_processes = nprocs() @@ -333,7 +319,7 @@ function artscene_filter(raw_image::Array{T, 3} ; distributed::Bool=true) where @debug "Stage 1 Complete: Grayscale: Size = $image_size, Type = $image_type" # Stage 2: Contrast normalization - x = contrast_normalization(image, distributed=true) + x = contrast_normalization(image, distributed=distributed) image_size = size(x) image_type = typeof(x) @debug "Stage 2 Complete: Contrast: Size = $image_size, Type = $image_type" @@ -361,4 +347,4 @@ function artscene_filter(raw_image::Array{T, 3} ; distributed::Bool=true) where @debug "Stage 6 Complete" return O, C -end # artscene_filter(raw_image::Array{T, 3} ; distributed::Bool=true) where {T<:Real} +end diff --git a/src/ARTMAP/DAM.jl b/src/ARTMAP/DAM.jl index f511eaf7..58c84b87 100644 --- a/src/ARTMAP/DAM.jl +++ b/src/ARTMAP/DAM.jl @@ -1,10 +1,10 @@ """ DAM.jl -Description: - Options, structures, and logic for the Default ARTMAP (DAM) module. +# Description: +Options, structures, and logic for the Default ARTMAP (DAM) module. -References: +# References: [1] G. P. Amis and G. A. Carpenter, “Default ARTMAP 2,” IEEE Int. Conf. Neural Networks - Conf. Proc., vol. 2, no. September 2007, pp. 777-782, Mar. 2007, doi: 10.1109/IJCNN.2007.4371056. """ @@ -13,76 +13,96 @@ References: # --------------------------------------------------------------------------- # """ - opts_DAM(;kwargs) - Implements a Default ARTMAP learner's options. -# Keyword Arguments -- `rho::Float`: vigilance value, [0, 1], default 0.75. -- `alpha::Float`: choice parameter, alpha > 0, default 1e-7. -- `epsilon::Float`: match tracking parameter, (0, 1), default 1e-3 -- `beta::Float`: learning parameter, (0, 1], default 1.0. -- `uncommitted::Bool`: uncommitted node flag, default true. -- `display::Bool`: display flag, default true. -- `max_epoch::Int`: maximum number of epochs during training, default 1. +$(opts_docstring) """ @with_kw mutable struct opts_DAM <: ARTOpts @deftype Float - # Vigilance parameter: [0, 1] + """ + Vigilance parameter: rho ∈ [0, 1]. + """ rho = 0.75; @assert rho >= 0.0 && rho <= 1.0 - # Choice parameter: alpha > 0 + + """ + Choice parameter: alpha > 0. + """ alpha = 1e-7; @assert alpha > 0.0 - # Match tracking parameter: (0, 1) + + """ + Match tracking parameter: episilon ∈ (0, 1). + """ epsilon = 1e-3; @assert epsilon > 0.0 && epsilon < 1.0 - # Learning parameter: (0, 1] + + """ + Learning parameter: beta ∈ (0, 1]. + """ beta = 1.0; @assert beta > 0.0 && beta <= 1.0 - # Uncommitted node flag + + """ + Maximum number of epochs during training: ∈ [1, Inf). + """ + max_epochs::Int = 1 + + """ + Uncommitted node flag. + """ uncommitted::Bool = true - # Display flag + + """ + Display flag. + """ display::Bool = true - # Maximum number of epochs during training - max_epochs::Int = 1 -end # opts_DAM() +end # --------------------------------------------------------------------------- # # STRUCTS # --------------------------------------------------------------------------- # """ - DAM <: ARTMAP - Default ARTMAP struct. For module options, see [`AdaptiveResonance.opts_DAM`](@ref). -# Option Parameters -- `opts::opts_DAM`: Default ARTMAP options struct. -- `config::DataConfig`: data configuration struct. - -# Working Parameters -- `W::RealMatrix`: category weight matrix. -- `labels::IntegerVector`: incremental list of labels corresponding to each F2 node, self-prescribed or supervised. -- `n_categories::Int`: number of category weights (F2 nodes). -- `epoch::Int`: current training epoch. - # References 1. G. P. Amis and G. A. Carpenter, “Default ARTMAP 2,” IEEE Int. Conf. Neural Networks - Conf. Proc., vol. 2, no. September 2007, pp. 777-782, Mar. 2007, doi: 10.1109/IJCNN.2007.4371056. """ mutable struct DAM <: ARTMAP + """ + Default ARTMAP options struct. + """ opts::opts_DAM + + """ + Data configuration struct. + """ config::DataConfig - W::RealMatrix - labels::IntegerVector + + """ + Category weight matrix. + """ + W::Matrix{Float} + + """ + Incremental list of labels corresponding to each F2 node, self-prescribed or supervised. + """ + labels::Vector{Int} + + """ + Number of category weights (F2 nodes). + """ n_categories::Int + + """ + Current training epoch. + """ epoch::Int -end # DAM <: ARTMAP +end # --------------------------------------------------------------------------- # # CONSTRUCTORS # --------------------------------------------------------------------------- # """ - DAM(;kwargs...) - Implements a Default ARTMAP learner with optional keyword arguments. # Examples @@ -105,11 +125,9 @@ DAM function DAM(;kwargs...) opts = opts_DAM(;kwargs...) DAM(opts) -end # DAM(;kwargs...) +end """ - DAM(opts) - Implements a Default ARTMAP learner with specified options. # Examples @@ -130,7 +148,7 @@ function DAM(opts::opts_DAM) 0, # n_categories 0 # epoch ) -end # DAM(opts::opts_DAM) +end # --------------------------------------------------------------------------- # # ALGORITHMIC METHODS @@ -234,44 +252,36 @@ function classify(art::DAM, x::RealVector ; preprocessed::Bool=false, get_bmu::B end """ - stopping_conditions(art::DAM) - Stopping conditions for Default ARTMAP, checked at the end of every epoch. """ function stopping_conditions(art::DAM) # Compute the stopping condition, return a bool return art.epoch >= art.opts.max_epochs -end # stopping_conditions(art::DAM) +end """ - activation(art::DAM, x::RealVector, W::RealVector) - Default ARTMAP's choice-by-difference activation function. """ function activation(art::DAM, x::RealVector, W::RealVector) # Compute T and return - return norm(element_min(x, W), 1) + - (1-art.opts.alpha)*(art.config.dim - norm(W, 1)) -end # activation(art::DAM, x::RealVector, W::RealVector) + return ( + norm(element_min(x, W), 1) + + (1 - art.opts.alpha) * (art.config.dim - norm(W, 1)) + ) +end """ - learn(art::DAM, x::RealVector, W::RealVector) - -Returns a single updated weight for the Default ARTMAP module for weight -vector W and sample x. +Returns a single updated weight for the Default ARTMAP module for weight vector W and sample x. """ function learn(art::DAM, x::RealVector, W::RealVector) # Update W return art.opts.beta .* element_min(x, W) .+ W .* (1 - art.opts.beta) -end # learn(art::DAM, x::RealVector, W::RealVector) +end """ - art_match(art::DAM, x::RealVector, W::RealVector) - -Returns the match function for the Default ARTMAP module with weight W and -sample x. +Returns the match function for the Default ARTMAP module with weight W and sample x. """ function art_match(art::DAM, x::RealVector, W::RealVector) # Compute M and return return norm(element_min(x, W), 1) / art.config.dim -end # art_match(art::DAM, x::RealVector, W::RealVector) +end diff --git a/src/ARTMAP/FAM.jl b/src/ARTMAP/FAM.jl index 4513af01..2981a158 100644 --- a/src/ARTMAP/FAM.jl +++ b/src/ARTMAP/FAM.jl @@ -1,10 +1,10 @@ """ FAM.jl -Description: - Options, structures, and logic for the Fuzzy ARTMAP (FAM) module. +# Description: +Options, structures, and logic for the Fuzzy ARTMAP (FAM) module. -References: +# References: [1] G. A. Carpenter, S. Grossberg, N. Markuzon, J. H. Reynolds, and D. B. Rosen, “Fuzzy ARTMAP: A Neural Network Architecture for Incremental Supervised Learning of Analog Multidimensional Maps,” IEEE Trans. Neural Networks, vol. 3, no. 5, pp. 698-713, 1992, doi: 10.1109/72.159059. """ @@ -13,34 +13,45 @@ References: # --------------------------------------------------------------------------- # """ - opts_FAM(;kwargs) - Implements a Fuzzy ARTMAP learner's options. -# Keyword Arguments -- `rho::Float`: vigilance value, [0, 1], default 0.6. -- `alpha::Float`: choice parameter, alpha > 0, default 1e-7. -- `epsilon::Float`: match tracking parameter, (0, 1), default 1e-3 -- `beta::Float`: learning parameter, (0, 1], default 1.0. -- `uncommitted::Bool`: uncommitted node flag, default true. -- `display::Bool`: display flag, default true. -- `max_epoch::Int`: maximum number of epochs during training, default 1. +$(opts_docstring) """ @with_kw mutable struct opts_FAM <: ARTOpts @deftype Float - # Vigilance parameter: [0, 1] + """ + Vigilance parameter: rho ∈ [0, 1]. + """ rho = 0.6; @assert rho >= 0.0 && rho <= 1.0 - # Choice parameter: alpha > 0 + + """ + Choice parameter: alpha > 0. + """ alpha = 1e-7; @assert alpha > 0.0 - # Match tracking parameter: (0, 1) + + """ + Match tracking parameter: epsilon ∈ (0, 1). + """ epsilon = 1e-3; @assert epsilon > 0.0 && epsilon < 1.0 - # Learning parameter: (0, 1] + + """ + Learning parameter: beta ∈ (0, 1]. + """ beta = 1.0; @assert beta > 0.0 && beta <= 1.0 - # Uncommitted node flag + + """ + Maximum number of epochs during training: max_epochs ∈ [1, Inf) + """ + max_epochs::Int = 1 + + """ + Uncommitted node flag. + """ uncommitted::Bool = true - # Display flag + + """ + Display flag. + """ display::Bool = true - # Maximum number of epochs during training - max_epochs::Int = 1 end # opts_FAM() # --------------------------------------------------------------------------- # @@ -48,31 +59,42 @@ end # opts_FAM() # --------------------------------------------------------------------------- # """ - FAM <: ARTMAP - Fuzzy ARTMAP struct. For module options, see [`AdaptiveResonance.opts_FAM`](@ref). -# Option Parameters -- `opts::opts_FAM`: Fuzzy ARTMAP options struct. -- `config::DataConfig`: data configuration struct. - -# Working Parameters -- `W::RealMatrix`: category weight matrix. -- `labels::IntegerVector`: incremental list of labels corresponding to each F2 node, self-prescribed or supervised. -- `n_categories::Int`: number of category weights (F2 nodes). -- `epoch::Int`: current training epoch. - # References 1. G. A. Carpenter, S. Grossberg, N. Markuzon, J. H. Reynolds, and D. B. Rosen, “Fuzzy ARTMAP: A Neural Network Architecture for Incremental Supervised Learning of Analog Multidimensional Maps,” IEEE Trans. Neural Networks, vol. 3, no. 5, pp. 698-713, 1992, doi: 10.1109/72.159059. """ mutable struct FAM <: ARTMAP + """ + Fuzzy ARTMAP options struct. + """ opts::opts_FAM + + """ + Data configuration struct. + """ config::DataConfig - W::RealMatrix - labels::IntegerVector + + """ + Category weight matrix. + """ + W::Matrix{Float} + + """ + Incremental list of labels corresponding to each F2 node, self-prescribed or supervised. + """ + labels::Vector{Int} + + """ + Number of category weights (F2 nodes). + """ n_categories::Int + + """ + Current training epoch. + """ epoch::Int end # FAM <: ARTMAP @@ -81,8 +103,6 @@ end # FAM <: ARTMAP # --------------------------------------------------------------------------- # """ - FAM(;kwargs...) - Implements a Fuzzy ARTMAP learner with optional keyword arguments. # Examples @@ -108,8 +128,6 @@ function FAM(;kwargs...) end # FAM(;kwargs...) """ - FAM(opts) - Implements a Fuzzy ARTMAP learner with specified options. # Examples @@ -122,9 +140,10 @@ FAM ``` """ function FAM(opts::opts_FAM) - FAM(opts, # opts_FAM + FAM( + opts, # opts_FAM DataConfig(), # config - Array{Float}(undef, 0,0), # W + Array{Float}(undef, 0, 0), # W Array{Int}(undef, 0), # labels 0, # n_categories 0 # epoch diff --git a/src/ARTMAP/SFAM.jl b/src/ARTMAP/SFAM.jl index d52cc42b..a4e5b3bf 100644 --- a/src/ARTMAP/SFAM.jl +++ b/src/ARTMAP/SFAM.jl @@ -1,11 +1,11 @@ """ SFAM.jl -Description: - Options, structures, and logic for the Simplified Fuzzy ARTMAP (SFAM) module. +# Description: +Options, structures, and logic for the Simplified Fuzzy ARTMAP (SFAM) module. -References: - [1] G. A. Carpenter, S. Grossberg, N. Markuzon, J. H. Reynolds, and D. B. Rosen, “Fuzzy ARTMAP: A Neural Network Architecture for Incremental Supervised Learning of Analog Multidimensional Maps,” IEEE Trans. Neural Networks, vol. 3, no. 5, pp. 698-713, 1992, doi: 10.1109/72.159059. +# References: +[1] G. A. Carpenter, S. Grossberg, N. Markuzon, J. H. Reynolds, and D. B. Rosen, “Fuzzy ARTMAP: A Neural Network Architecture for Incremental Supervised Learning of Analog Multidimensional Maps,” IEEE Trans. Neural Networks, vol. 3, no. 5, pp. 698-713, 1992, doi: 10.1109/72.159059. """ # --------------------------------------------------------------------------- # @@ -13,78 +13,101 @@ References: # --------------------------------------------------------------------------- # """ - opts_SFAM(;kwargs) - Implements a Simple Fuzzy ARTMAP learner's options. -# Keyword Arguments -- `rho::Float`: vigilance value, [0, 1], default 0.75. -- `alpha::Float`: choice parameter, alpha > 0, default 1e-7. -- `epsilon::Float`: match tracking parameter, (0, 1), default 1e-3 -- `beta::Float`: learning parameter, (0, 1], default 1.0. -- `uncommitted::Bool`: uncommitted node flag, default true. -- `display::Bool`: display flag, default true. -- `max_epoch::Int`: maximum number of epochs during training, default 1. +$(opts_docstring) """ @with_kw mutable struct opts_SFAM <: ARTOpts @deftype Float - # Vigilance parameter: [0, 1] + """ + Vigilance parameter: rho ∈ [0, 1]. + """ rho = 0.75; @assert rho >= 0.0 && rho <= 1.0 - # Choice parameter: alpha > 0 + + """ + Choice parameter: alpha > 0. + """ alpha = 1e-7; @assert alpha > 0.0 - # Match tracking parameter + + """ + Match tracking parameter: epsilon ∈ (0, 1). + """ epsilon = 1e-3; @assert epsilon > 0.0 && epsilon < 1.0 - # Learning parameter: (0, 1] + + """ + Learning parameter: beta ∈ (0, 1]. + """ beta = 1.0; @assert beta > 0.0 && beta <= 1.0 - # Uncommitted node flag + + """ + Maximum number of epochs during training: max_epochs ∈ [1, Inf). + """ + max_epochs::Int = 1 + + """ + Uncommitted node flag. + """ uncommitted::Bool = true - # Display flag + + """ + Display flag. + """ display::Bool = true - # Maximum number of epochs during training - max_epochs::Int = 1 -end # opts_SFAM() +end # --------------------------------------------------------------------------- # # STRUCTS # --------------------------------------------------------------------------- # """ - SFAM <: ARTMAP - Simple Fuzzy ARTMAP struct. For module options, see [`AdaptiveResonance.opts_SFAM`](@ref). -# Option Parameters -- `opts::opts_SFAM`: Simplified Fuzzy ARTMAP options struct. -- `config::DataConfig`: data configuration struct. - -# Working Parameters -- `W::RealMatrix`: category weight matrix. -- `labels::IntegerVector`: incremental list of labels corresponding to each F2 node, self-prescribed or supervised. -- `n_categories::Int`: number of category weights (F2 nodes). -- `epoch::Int`: current training epoch. - # References 1. G. A. Carpenter, S. Grossberg, N. Markuzon, J. H. Reynolds, and D. B. Rosen, “Fuzzy ARTMAP: A Neural Network Architecture for Incremental Supervised Learning of Analog Multidimensional Maps,” IEEE Trans. Neural Networks, vol. 3, no. 5, pp. 698-713, 1992, doi: 10.1109/72.159059. """ mutable struct SFAM <: ARTMAP + """ + Simplified Fuzzy ARTMAP options struct. + """ opts::opts_SFAM + + """ + Data configuration struct. + """ config::DataConfig - W::RealMatrix - labels::IntegerVector + + """ + Category weight matrix. + """ + W::Matrix{Float} + + """ + Incremental list of labels corresponding to each F2 node, self-prescribed or supervised. + """ + labels::Vector{Int} + + """ + Number of category weights (F2 nodes). + """ n_categories::Int + + """ + Current training epoch. + """ epoch::Int -end # SFAM <: ARTMAP +end # --------------------------------------------------------------------------- # # CONSTRUCTORS # --------------------------------------------------------------------------- # """ - SFAM(;kwargs...) - Implements a Simple Fuzzy ARTMAP learner with optional keyword arguments. +# Arguments +- `kwargs`: keyword arguments to pass to the Simple Fuzzy ARTMAP options struct (see [`AdaptiveResonance.opts_SFAM`](@ref).) + # Examples By default: ```julia-repl @@ -96,7 +119,7 @@ SFAM or with keyword arguments: ```julia-repl -julia> SFAM() +julia> SFAM(rho=0.6) SFAM opts: opts_SFAM ... @@ -105,13 +128,14 @@ SFAM function SFAM(;kwargs...) opts = opts_SFAM(;kwargs...) SFAM(opts) -end # SFAM(;kwargs...) +end """ - SFAM(opts) - Implements a Simple Fuzzy ARTMAP learner with specified options. +# Arguments +- `opts::opts_SFAM`: the Simple Fuzzy ARTMAP options (see [`AdaptiveResonance.opts_SFAM`](@ref)). + # Examples ```julia-repl julia> opts = opts_SFAM() @@ -130,7 +154,7 @@ function SFAM(opts::opts_SFAM) 0, # n_categories 0 # epoch ) -end # SFAM(opts::opts_SFAM) +end # --------------------------------------------------------------------------- # # ALGORITHMIC METHODS @@ -234,44 +258,36 @@ function classify(art::SFAM, x::RealVector ; preprocessed::Bool=false, get_bmu:: end """ - stopping_conditions(art::SFAM) - Stopping conditions for Simple Fuzzy ARTMAP, checked at the end of every epoch. """ function stopping_conditions(art::SFAM) # Compute the stopping condition, return a bool return art.epoch >= art.opts.max_epochs -end # stopping_conditions(art::SFAM) +end """ - learn(art::SFAM, x::RealVector, W::RealVector) - Returns a single updated weight for the Simple Fuzzy ARTMAP module for weight vector W and sample x. """ function learn(art::SFAM, x::RealVector, W::RealVector) # Update W return art.opts.beta .* element_min(x, W) .+ W .* (1 - art.opts.beta) -end # learn(art::SFAM, x::RealVector, W::RealVector) +end """ - activation(art::SFAM, x::RealVector, W::RealVector) - Returns the activation value of the Simple Fuzzy ARTMAP module with weight W and sample x. """ function activation(art::SFAM, x::RealVector, W::RealVector) # Compute T and return return norm(element_min(x, W), 1) / (art.opts.alpha + norm(W, 1)) -end # activation(art::SFAM, x::RealVector, W::RealVector) +end """ - art_match(art::SFAM, x::RealVector, W::RealVector) - Returns the match function for the Simple Fuzzy ARTMAP module with weight W and sample x. """ function art_match(art::SFAM, x::RealVector, W::RealVector) # Compute M and return return norm(element_min(x, W), 1) / art.config.dim -end # art_match(art::SFAM, x::RealVector, W::RealVector) +end diff --git a/src/ARTMAP/common.jl b/src/ARTMAP/common.jl index 78d9f6a8..06616e30 100644 --- a/src/ARTMAP/common.jl +++ b/src/ARTMAP/common.jl @@ -1,13 +1,13 @@ """ common.jl -Description: - Includes all of the unsupervised ARTMAP modules common code. +# Description: +Includes all of the unsupervised ARTMAP modules common code. """ -# ------------------------------------------- -# Methods -# ------------------------------------------- +# --------------------------------------------------------------------------- # +# FUNCTIONS +# --------------------------------------------------------------------------- # """ train!(art::ARTMAP, x::RealMatrix, y::IntegerVector, preprocessed::Bool=false) @@ -37,13 +37,12 @@ function train!(art::ARTMAP, x::RealMatrix, y::IntegerVector, preprocessed::Bool while true # Increment the epoch and get the iterator art.epoch += 1 - iter = get_iterator(art.opts, x) + iter = get_iterator(art.opts, n_samples) for i = iter # Update the iterator if necessary update_iter(art, iter, i) # Grab the sample slice - # sample = get_sample(x, i) - sample = x[:, i] + sample = get_sample(x, i) label = y[i] # Train upon the sample and label y_hat[i] = train!(art, sample, label, preprocessed=true) @@ -54,12 +53,13 @@ function train!(art::ARTMAP, x::RealMatrix, y::IntegerVector, preprocessed::Bool break end end + return y_hat -end # train!(art::ARTMAP, x::RealMatrix, y::IntegerVector, preprocessed::Bool=false) +end -# ------------------------------------------- -# Common Documentation -# ------------------------------------------- +# --------------------------------------------------------------------------- # +# COMMON DOCUMENTATION +# --------------------------------------------------------------------------- # @doc raw""" train!(art::ARTMAP, x::RealVector, y::Integer ; preprocessed::Bool=false) diff --git a/src/AdaptiveResonance.jl b/src/AdaptiveResonance.jl index 2e4b0618..335fa365 100644 --- a/src/AdaptiveResonance.jl +++ b/src/AdaptiveResonance.jl @@ -2,58 +2,104 @@ Main module for `AdaptiveResonance.jl`, a Julia package of adaptive resonance theory algorithms. This module exports all of the ART modules, options, and utilities used by the `AdaptiveResonance.jl package.` +For full usage, see the official guide at https://ap6yc.github.io/AdaptiveResonance.jl/dev/man/guide/. -# Exports +# Basic Usage -$(EXPORTS) +Install and import the package in a script with -""" -module AdaptiveResonance +```julia +using Pkg +Pkg.add("AdaptiveResonance") +using AdaptiveResonance +``` -# --------------------------------------------------------------------------- # -# USINGS -# --------------------------------------------------------------------------- # +then create an ART module with default options -# Usings/imports for the whole package declared once -using DocStringExtensions # Docstring utilities -using Parameters # ARTopts are parameters (@with_kw) -using Logging # Logging utils used as main method of terminal reporting -using ProgressBars # Provides progress bars for training and inference -using Printf # Used for printing formatted progress bars -using LinearAlgebra: norm # Trace and norms -using Statistics: median, mean # Medians and mean for linkage methods +```julia +my_art = DDVFA() +``` -# --------------------------------------------------------------------------- # -# ABSTRACT TYPES -# --------------------------------------------------------------------------- # +or custom options via keyword arguments -""" - ARTOpts +```julia +my_art = DDVFA(rho_ub=0.45, rho_ub=0.7) +``` -Abstract supertype for all ART module options. -""" -abstract type ARTOpts end # ART module options +Train all models with `train!` and conduct inference with `classify`. +In batch, samples are interpreted in the Julia column-major fashion with dimensions `(n_dim, n_samples)` (i.e., columns are samples). -""" - ARTModule +Train unsupervised ART modules incrementally or in batch with optional labels as a keyword argument `y` -Abstract supertype for both ART (unsupervised) and ARTMAP (supervised) modules. -""" -abstract type ARTModule end # ART modules +```julia +# Load your data somehow +samples, labels = load_some_data() -""" - ART <: ARTModule +# Unsupervised batch +train!(my_art, samples) -Abstract supertype for all default unsupervised ART modules. -""" -abstract type ART <: ARTModule end # ART (unsupervised) +# Supervised batch +train!(my_art, samples, y=labels) -""" - ARTMAP <: ARTModule +# Unsupervised incremental +for ix in eachindex(labels) + train!(my_art, samples[:, ix]) +end + +# Supervised incremental +for ix in eachindex(labels) + train!(my_art, samples[:, ix], y=labels[ix]) +end +``` + +Train supervised ARTMAP with positional arguments + +```julia +my_artmap = SFAM() +train!(my_artmap, samples, labels) +``` + +With either module, conduct inference with `classify(art, samples)` + +```julia +# Batch inference +y_hat = classify(my_art, test_samples) -Abstract supertype for all supervised ARTMAP modules. +# Incremental inference +for ix in eachindex(test_labels) + y_hat[ix] = classify(my_artmap, test_samples[:, ix]) +end +``` + +# Imports + +The following names are imported by the package as dependencies: +$(IMPORTS) + +# Exports + +The following names are exported and available when `using` the package: +$(EXPORTS) """ -abstract type ARTMAP <: ARTModule end # ARTMAP (supervised) +module AdaptiveResonance + +# --------------------------------------------------------------------------- # +# DEPENDENCIES +# --------------------------------------------------------------------------- # + +# Usings/imports for the whole package declared once + +# Full usings (which supports comma-separated import notation) +using + DocStringExtensions, # Docstring utilities + Logging, # Logging utils used as main method of terminal reporting + NumericalTypeAliases, # Abstract type aliases + Parameters, # ARTopts are parameters (@with_kw) + ProgressBars # Provides progress bars for training and inference + +# Partial usings (which does not yet support comma-separated import notation) +using LinearAlgebra: norm # Trace and norms +using Statistics: median, mean # Medians and mean for linkage methods # --------------------------------------------------------------------------- # # INCLUDES @@ -65,6 +111,18 @@ include("constants.jl") # Global constants and references for convenience include("ARTMAP/ARTMAP.jl") # Supervised ART modules include("ART/ART.jl") # Unsupervised ART modules +# --------------------------------------------------------------------------- # +# DERIVATIVE TYPES AND CONSTANTS +# --------------------------------------------------------------------------- # + +""" +A combined list of all unsupervised ART and supervised ARTMAP modules from the `AdaptiveResonance.jl` package. +""" +const ADAPTIVE_RESONANCE_MODULES = [ + ART_MODULES; + ARTMAP_MODULES; +] + # --------------------------------------------------------------------------- # # EXPORTS # --------------------------------------------------------------------------- # @@ -104,6 +162,12 @@ export DAM, opts_DAM, SFAM, opts_SFAM, + # Useful constants + ART_MODULES, # List of (default) unsupervised ART modules + ARTMAP_MODULES, # List of supervised ARTMAP modules + ADAPTIVE_RESONANCE_MODULES, # Combined list of ART and ARTMAP modules + DDVFA_METHODS, # DDVFA linkage methods + # ARTSCENE filter functions color_to_gray, contrast_normalization, diff --git a/src/common.jl b/src/common.jl index 522a503e..9ac30a49 100644 --- a/src/common.jl +++ b/src/common.jl @@ -1,64 +1,118 @@ -# ------------------------------------------- -# Document: common.jl -# Author: Sasha Petrenko -# Description: -# Types and functions that are used throughout AdaptiveResonance.jl. -# ------------------------------------------- - -# ------------------------------------------- -# Aliases -# ------------------------------------------- -# **Taken from StatsBase.jl** -# -# These types signficantly reduces the need of using -# type parameters in functions (which are often just -# for the purpose of restricting the arrays to real) -# -# These could be removed when the Base supports -# covariant type notation, i.e. AbstractVector{<:Real} - -# Real-numbered aliases -const RealArray{T<:Real, N} = AbstractArray{T, N} -const RealVector{T<:Real} = AbstractArray{T, 1} -const RealMatrix{T<:Real} = AbstractArray{T, 2} - -# Integered aliases -const IntegerArray{T<:Integer, N} = AbstractArray{T, N} -const IntegerVector{T<:Integer} = AbstractArray{T, 1} -const IntegerMatrix{T<:Integer} = AbstractArray{T, 2} - -# Specifically floating-point aliases -const RealFP = Union{Float32, Float64} - -# System's largest native floating point variable -const Float = (Sys.WORD_SIZE == 64 ? Float64 : Float32) - -# Acceptable iterators for ART module training and inference -const ARTIterator = Union{UnitRange, ProgressBar} +""" + common.jl + +# Description +Types and functions that are used throughout AdaptiveResonance.jl. + +# Authors +- Sasha Petrenko +""" + +# --------------------------------------------------------------------------- # +# DOCSTRING TEMPLATES +# --------------------------------------------------------------------------- # + +# Constants template +@template CONSTANTS = +""" +$(FUNCTIONNAME) + +# Description +$(DOCSTRING) +""" + +# Types template +@template TYPES = +""" +$(TYPEDEF) + +# Summary +$(DOCSTRING) + +# Fields +$(TYPEDFIELDS) +""" + +# Template for functions, macros, and methods (i.e., constructors) +@template (FUNCTIONS, METHODS, MACROS) = +""" +$(TYPEDSIGNATURES) + +# Summary +$(DOCSTRING) + +# Method List / Definition Locations +$(METHODLIST) +""" + +# --------------------------------------------------------------------------- # +# ABSTRACT TYPES +# --------------------------------------------------------------------------- # + +""" +Abstract supertype for all ART module options. +""" +abstract type ARTOpts end # ART module options + +""" +Abstract supertype for both ART (unsupervised) and ARTMAP (supervised) modules. +""" +abstract type ARTModule end # ART modules + +""" +Abstract supertype for all default unsupervised ART modules. +""" +abstract type ART <: ARTModule end # ART (unsupervised) + +""" +Abstract supertype for all supervised ARTMAP modules. +""" +abstract type ARTMAP <: ARTModule end # ARTMAP (supervised) + +""" + const ARTIterator = Union{UnitRange, ProgressBar} +Acceptable iterators for ART module training and inference """ - DataConfig +const ARTIterator = Union{UnitRange, ProgressBar} + +# --------------------------------------------------------------------------- # +# COMPOSITE TYPES +# --------------------------------------------------------------------------- # +""" Container to standardize training/testing data configuration. -# Parameters -- `setup::Bool`: flag if data has been setup yet or not. -- `mins::RealVector`: list of minimum values for each feature. -- `maxs::RealVector`: list of maximum values for each feature. -- `dim::Int`: dimensionality of the feature vectors (i.e., number of features). -- `dim_comp::Int` complement coded feature dimensionality, twice the size of `dim`. +This container declares if a data configuration has been setup, what the original and complement coded dimensions are, and what the minimums and maximums of the values along each feature dimension are. """ mutable struct DataConfig + """ + Flag if data has been setup yet or not. + """ setup::Bool - mins::RealVector - maxs::RealVector + + """ + List of minimum values for each feature. + """ + mins::Vector{Float} + + """ + List of maximum values for each feature. + """ + maxs::Vector{Float} + + """ + Dimensionality of the feature vectors (i.e., number of features). + """ dim::Int + + """ + Complement coded feature dimensionality, twice the size of `dim`. + """ dim_comp::Int -end # DataConfig +end """ - DataConfig() - Default constructor for a data configuration, not set up. """ function DataConfig() @@ -69,14 +123,16 @@ function DataConfig() 0, # dim 0 # dim_comp ) -end # DataConfig() +end """ - DataConfig(mins::RealVector, maxs::RealVector) - Convenience constructor for DataConfig, requiring only mins and maxs of the features. This constructor is used when the mins and maxs differ across features. The dimension is inferred by the length of the mins and maxs. + +# Arguments +- `mins::RealVector`: a vector of minimum values for each feature dimension. +- `maxs::RealVector`: a vector of maximum values for each feature dimension. """ function DataConfig(mins::RealVector, maxs::RealVector) # Verify that the mins and maxs are the same length @@ -89,31 +145,35 @@ function DataConfig(mins::RealVector, maxs::RealVector) mins, # min maxs, # max dim, # dim - dim*2 # dim_comp + dim * 2 # dim_comp ) -end # DataConfig(mins::RealVector, maxs::RealVector) +end """ - DataConfig(min::Real, max::Real, dim::Int) - Convenience constructor for DataConfig, requiring only a global min, max, and dim. This constructor is used in the case that the feature mins and maxs are all the same respectively. + +# Arguments +- `min::Real`: the minimum value across all features. +- `max::Real`: the maximum value across all features. +- `dim::Integer`: the dimension of the features, which must be provided because it cannot be inferred from just the minimum or maximum values. """ -function DataConfig(min::Real, max::Real, dim::Int) +function DataConfig(min::Real, max::Real, dim::Integer) DataConfig( true, # setup repeat([min], dim), # min repeat([max], dim), # max dim, # dim - dim*2 # dim_comp + dim * 2 # dim_comp ) -end # DataConfig(min::Real, max::Real, dim::Int) +end """ - DataConfig(data::RealMatrix) - Convenience constructor for DataConfig, requiring only the data matrix. + +# Arguments +- `data::RealMatrix`: the 2-D batch of data to be used for inferring the data configuration. """ function DataConfig(data::RealMatrix) # Create an empty dataconfig @@ -124,22 +184,30 @@ function DataConfig(data::RealMatrix) # Return the constructed DataConfig return config -end # DataConfig(min::Real, max::Real, dim::Int) +end -""" - element_min(x::RealVector, W::RealVector) +# --------------------------------------------------------------------------- # +# FUNCTIONS +# --------------------------------------------------------------------------- # +""" Returns the element-wise minimum between sample x and weight W. + +# Arguments +- `x::RealVector`: the input sample. +- `W::RealVector`: the weight vector to compare the sample against. """ function element_min(x::RealVector, W::RealVector) # Compute the element-wise minimum of two vectors return minimum([x W], dims = 2) -end # element_min(x::RealVector, W::RealVector) +end """ - performance(y_hat::IntegerVector, y::IntegerVector) +Convenience function to get the categorization performance of y_hat against y. -Returns the categorization performance of y_hat against y. +# Arguments +- `y_hat::IntegerVector`: the estimated labels. +- `y::IntegerVector`: the true labels. """ function performance(y_hat::IntegerVector, y::IntegerVector) # Get the number of labels @@ -159,12 +227,13 @@ function performance(y_hat::IntegerVector, y::IntegerVector) end return n_correct/n_y -end # performance(y_hat::IntegerVector, y::IntegerVector) +end """ - get_data_shape(data::RealArray) - Returns the correct feature dimension and number of samples. + +# Arguments +- `data::RealArray`: the 1-D or 2-D data to get the dimension and number of samples from. 1-D data is interpreted as a single sample. """ function get_data_shape(data::RealArray) # Get the correct dimensionality and number of samples @@ -178,12 +247,13 @@ function get_data_shape(data::RealArray) end return dim, n_samples -end # get_data_shape(data::RealArray) +end """ - get_n_samples(data::RealArray) - Returns the number of samples, accounting for 1-D and 2-D arrays. + +# Arguments +- `data::RealArray`: the 1-D or 2-D data to infer the number of samples from. """ function get_n_samples(data::RealArray) # Get the correct dimensionality and number of samples @@ -195,12 +265,16 @@ function get_n_samples(data::RealArray) end return n_samples -end # get_n_samples(data::RealArray) +end """ - data_setup!(config::DataConfig, data::RealMatrix) - Sets up the data config for the ART module before training. + +This function crucially gets the original and complement-coded dimensions of the data, and it infers the bounds of the data (minimums and maximums) by the largest and smallest values along each feature dimension. + +# Arguments +- `config::DataConfig`: the ART/ARTMAP module's data configuration object. +- `data::RealMatrix`: the 2-D batch of data to use for creating the data configuration. """ function data_setup!(config::DataConfig, data::RealMatrix) if config.setup @@ -211,17 +285,19 @@ function data_setup!(config::DataConfig, data::RealMatrix) # Get the correct dimensionality and number of samples config.dim, _ = get_data_shape(data) - config.dim_comp = 2*config.dim + config.dim_comp = 2 * config.dim # Compute the ranges of each feature config.mins = [minimum(data[i, :]) for i in 1:config.dim] config.maxs = [maximum(data[i, :]) for i in 1:config.dim] -end # data_setup!(config::DataConfig, data::RealMatrix) +end """ - data_setup!(art::ARTModule, data::RealMatrix) - Convenience method for setting up the DataConfig of an ART module in advance. + +# Arguments +- `art::ARTModule`: the ART/ARTMAP module to manually configure the data config for. +- `data::RealArray`: the 2-D batch of data used to create the data config. """ function data_setup!(art::ARTModule, data::RealMatrix) # Modify the DataConfig of the ART module directly @@ -229,13 +305,16 @@ function data_setup!(art::ARTModule, data::RealMatrix) end # data_setup!(art::ART, data::RealMatrix) """ - get_data_characteristics(data::RealArray ; config::DataConfig=DataConfig()) - Get the characteristics of the data, taking account if a data config is passed. -If no DataConfig is passed, then the data characteristics come from the array itself. Otherwise, use the config for the statistics of the data and the data array for the number of samples. +If no DataConfig is passed, then the data characteristics come from the array itself. +Otherwise, use the config for the statistics of the data and the data array for the number of samples. + +# Arguments +- `data::RealMatrix`: the 2-D data to be complement coded. +- `config::DataConfig=DataConfig()`: the data configuration for the ART/ARTMAP module. """ -function get_data_characteristics(data::RealArray ; config::DataConfig=DataConfig()) +function get_data_characteristics(data::RealMatrix ; config::DataConfig=DataConfig()) # If the data is setup, use the config if config.setup n_samples = get_n_samples(data) @@ -250,12 +329,14 @@ function get_data_characteristics(data::RealArray ; config::DataConfig=DataConfi maxs = [maximum(data[i, :]) for i in 1:dim] end return dim, n_samples, mins, maxs -end # get_data_characteristics(data::RealArray ; config::DataConfig=DataConfig()) +end """ - linear_normalization(data::RealVector ; config::DataConfig=DataConfig()) - Normalize the data to the range [0, 1] along each feature. + +# Arguments +- `data::RealVector`: the 1-D sample of data to normalize. +- `config::DataConfig=DataConfig()`: the data configuration from the ART/ARTMAP module. """ function linear_normalization(data::RealVector ; config::DataConfig=DataConfig()) # Vector normalization requires a setup DataConfig @@ -278,12 +359,14 @@ function linear_normalization(data::RealVector ; config::DataConfig=DataConfig() end end return x_raw -end # linear_normalization(data::RealArray ; config::DataConfig=DataConfig()) +end """ - linear_normalization(data::RealMatrix ; config::DataConfig=DataConfig()) - Normalize the data to the range [0, 1] along each feature. + +# Arguments +- `data::RealMatrix`: the 2-D batch of data to normalize. +- `config::DataConfig=DataConfig()`: the data configuration from the ART/ARTMAP module. """ function linear_normalization(data::RealMatrix ; config::DataConfig=DataConfig()) # Get the data characteristics @@ -309,12 +392,14 @@ function linear_normalization(data::RealMatrix ; config::DataConfig=DataConfig() end end return x_raw -end # linear_normalization(data::RealMatrix ; config::DataConfig=DataConfig()) +end """ - complement_code(data::RealArray ; config::DataConfig=DataConfig()) +Normalizes the data x to [0, 1] and returns the augmented vector [x, 1 - x]. -Normalize the data x to [0, 1] and returns the augmented vector [x, 1 - x]. +# Arguments +- `data::RealArray`: the 1-D or 2-D data to be complement coded. +- `config::DataConfig=DataConfig()`: the data configuration for the ART/ARTMAP module. """ function complement_code(data::RealArray ; config::DataConfig=DataConfig()) # Normalize the data @@ -322,65 +407,65 @@ function complement_code(data::RealArray ; config::DataConfig=DataConfig()) # Complement code the data and return a concatenated matrix return vcat(x_raw, 1 .- x_raw) -end # complement_code(data::RealArray ; config::DataConfig=DataConfig()) +end """ - get_iterator(opts::ARTOpts, x::Array) -""" -function get_iterator(opts::ARTOpts, x::RealArray) - # Show a progbar only if the data is 2-D and the option is on - dim, n_samples = get_data_shape(x) - single_sample = n_samples == 1 - - # Decide if using a progress bar or not - # Don't use one if either there is a single sample or the display option is off - prog_bar = single_sample ? false : opts.display +Creates an iterator object according to the ART/ARTMAP modules display settings for batch iteration. +# Arguments +- `opts::ARTOpts`: the ART/ARTMAP module's options containing display settings. +- `n_samples::Integer`: the number of iterations to create the iterator for. +""" +function get_iterator(opts::ARTOpts, n_samples::Integer) # Construct the iterator iter_raw = 1:n_samples - iter = prog_bar ? ProgressBar(iter_raw) : iter_raw + + # If we want a progress bar, construct one. Otherwise, return the raw iterator + iter = opts.display ? ProgressBar(iter_raw) : iter_raw return iter -end # get_iterator(opts::ARTOpts, x::RealArray) +end """ - update_iter(art::ARTModule, iter::ARTIterator, i::Int) +Updates the iteration of the ART/ARTMAP module, training or inference, according to its display settings. + +# Arguments +- `art::ARTModule`: the ART/ARTMAP module being iterated upon. +- `iter::ARTIterator`: the iterator object used in the training/inference loop. +- `i::Integer`: the iteration during training/inference that the iterator should be updated to. """ -function update_iter(art::ARTModule, iter::ARTIterator, i::Int) +function update_iter(art::ARTModule, iter::ARTIterator, i::Integer) # Check explicitly for each, as the function definition restricts the types if iter isa ProgressBar - set_description(iter, string(@sprintf("Ep: %i, ID: %i, Cat: %i", art.epoch, i, art.n_categories))) + set_description(iter, "Ep: $(art.epoch), ID: $(i), Cat: $(art.n_categories)") elseif iter isa UnitRange return end -end # update_iter(art::ARTModule, iter::Union{UnitRange, ProgressBar}, i::Int) - -""" - get_sample(x::RealArray, i::Int) - -Returns a sample from data array x safely, accounting for 1-D and -""" -function get_sample(x::RealArray, i::Int) - # Get the shape of the data, irrespective of data type - dim, n_samples = get_data_shape(x) - # Get the type shape of the array - x_dim = ndims(x) - # Initialize the sample 1-D array with the original dim - sample = zeros(dim) - # Short-circuit error if asking for index out of bounds - i > n_samples && error("Index of data array out of bounds.") - # Copy the contents of the input if we got a 1-D array - if x_dim == 1 - sample = x - # Otherwise, take the correct slice of the 2-D array - else - sample = x[:, i] - end - return sample -end # get_sample(x::RealArray, i::Int) +end + +""" +Returns a sample from data array `x` at sample location `i`. +This function implements the convention that columns are samples while rows are features within samples. +# Arguments +- `x::RealMatrix`: the batch of data to grab a sample from. +- `i::Integer`: the index to get the sample from. """ - init_train!(x::RealVector, art::ARTModule, preprocessed::Bool) +function get_sample(x::RealMatrix, i::Integer) + # Return the sample at location + return x[:, i] +end + +""" +Initializes the module for training in a single iteration. + +The purpose of this function is mainly to handle the conditions of complement coding. +Fails if the module was incorrectly set up or if the module was not setup and the data was not preprocessed. + +# Arguments +- `x::RealVector`: the sample used for initialization. +- `art::ARTModule`: the ART/ARTMAP module that will be trained on the sample. +- `preprocessed::Bool`: a required flag for if the sample has already been complement coded and normalized. """ function init_train!(x::RealVector, art::ARTModule, preprocessed::Bool) # If the data is not preprocessed @@ -396,7 +481,7 @@ function init_train!(x::RealVector, art::ARTModule, preprocessed::Bool) dim_comp = length(x) # If the complemented dimension is not even, error if !iseven(dim_comp) - error("Declare that the vector is preprocessed, but it is not even") + error("Declared that the vector is preprocessed, but it is not even") end # Half the complemented dimension and setup the DataConfig with that dim = Int(dim_comp/2) @@ -406,7 +491,12 @@ function init_train!(x::RealVector, art::ARTModule, preprocessed::Bool) end # init_train!(x::RealVector, art::ARTModule, preprocessed::Bool) """ - init_train!(x::RealMatrix, art::ARTModule, preprocessed::Bool) +Initializes the training loop for batch learning. + +# Arguments +- `x::RealMatrix`: the data that is used for training. +- `art::ARTModule`: the ART/ARTMAP that will be trained. +- `preprocessed::Bool`: required flag for if the data has already been complement coded and normalized. """ function init_train!(x::RealMatrix, art::ARTModule, preprocessed::Bool) # If the data is not preprocessed, then complement code it @@ -416,10 +506,15 @@ function init_train!(x::RealMatrix, art::ARTModule, preprocessed::Bool) x = complement_code(x, config=art.config) end return x -end # init_train!(x::RealMatrix, art::ART, preprocessed::Bool) +end """ - init_classify!(x::RealArray, art::ARTModule, preprocessed::Bool) +Initializes the classification loop for batch inference. + +# Arguments +- `x::RealArray`: the data that is used for inference. +- `art::ARTModule`: the ART/ARTMAP module that will be used for inference. +- `preprocessed::Bool`: required flag for if the data has already been complement coded and normalized. """ function init_classify!(x::RealArray, art::ARTModule, preprocessed::Bool) # If the data is not preprocessed @@ -432,12 +527,9 @@ function init_classify!(x::RealArray, art::ARTModule, preprocessed::Bool) x = complement_code(x, config=art.config) end return x -end # init_classify!(x::RealArray, art::ART, preprocessed::Bool) - +end """ - classify(art::ARTModule, x::RealMatrix ; preprocessed::Bool=false, get_bmu::Bool=false) - Predict categories of 'x' using the ART model. Returns predicted categories 'y_hat.' @@ -473,7 +565,7 @@ function classify(art::ARTModule, x::RealMatrix ; preprocessed::Bool=false, get_ y_hat = zeros(Int, n_samples) # Get the iterator based on the module options and data shape - iter = get_iterator(art.opts, x) + iter = get_iterator(art.opts, n_samples) for ix = iter # Update the iterator if necessary update_iter(art, iter, ix) @@ -486,15 +578,13 @@ function classify(art::ARTModule, x::RealMatrix ; preprocessed::Bool=false, get_ end return y_hat -end # classify(art::ARTModule, x::RealMatrix ; preprocessed::Bool=false, get_bmu::Bool=false) +end -# ------------------------------------------- -# Common Documentation -# ------------------------------------------- +# --------------------------------------------------------------------------- # +# COMMON DOCUMENTATION +# --------------------------------------------------------------------------- # @doc raw""" - classify(art::ARTModule, x::RealVector ; preprocessed::Bool=false, get_bmu::Bool=false) - Predict categories of a single sample of features 'x' using the ART model. Returns predicted category 'y_hat.' @@ -505,4 +595,20 @@ Returns predicted category 'y_hat.' - `preprocessed::Bool=false`: optional, flag if the data has already been complement coded or not. - `get_bmu::Bool=false`: optional, flag if the model should return the best-matching-unit label in the case of total mismatch. """ -classify(art::ARTModule, x::RealVector ; preprocessed::Bool=false, get_bmu::Bool=false) \ No newline at end of file +classify(art::ARTModule, x::RealVector ; preprocessed::Bool=false, get_bmu::Bool=false) + +@doc raw""" +Sets the match threshold of the ART/ARTMAP module as a function of the vigilance parameter. + +Depending on selected ART/ARTMAP module and its options, this may be a function of other parameters as well. + +# Arguments +- `art::ARTModule`: the ART/ARTMAP module for setting a new threshold. +""" +set_threshold!(art::ARTModule) + +# Shared options docstring, inserted at the end of `opts_<...>` structs. +opts_docstring = """ +These options are a [`Parameters.jl`](https://github.com/mauro3/Parameters.jl) struct, taking custom options keyword arguments. +Each field has a default value listed below. +""" diff --git a/src/constants.jl b/src/constants.jl index 049632a4..1501d242 100644 --- a/src/constants.jl +++ b/src/constants.jl @@ -1,11 +1,13 @@ """ constants.jl -Description: - All constant values associated with the package. +# Description: +All constant values associated with the package. """ -# DDVFA linkage methods +""" +A list of all DDVFA linkage methods as strings. +""" const DDVFA_METHODS = [ "single", "average", diff --git a/test/adaptiveresonance/adaptiveresonance_tests.jl b/test/adaptiveresonance/adaptiveresonance_tests.jl new file mode 100644 index 00000000..e0580161 --- /dev/null +++ b/test/adaptiveresonance/adaptiveresonance_tests.jl @@ -0,0 +1,22 @@ +""" + adaptiveresonance_tests.jl + +# Description +Includes all of the AdaptiveResonance module tests. +""" + +@testset "Exceptions" begin + include("exceptions.jl") +end + +@testset "Initialization" begin + include("initialization.jl") +end + +@testset "Common" begin + include("common.jl") +end + +@testset "Performance" begin + include("performance.jl") +end diff --git a/test/adaptiveresonance/common.jl b/test/adaptiveresonance/common.jl new file mode 100644 index 00000000..c5fa6cc7 --- /dev/null +++ b/test/adaptiveresonance/common.jl @@ -0,0 +1,58 @@ +""" + common.jl + +# Description +Tests of common code for the `AdaptiveResonance.jl` package. +""" + +@testset "common" begin + @info "------- Common Code Tests -------" + # Example arrays + three_by_two = [1 2; 3 4; 5 6] + + # Test DataConfig constructors + @info "Testing DataConfig..." + dc1 = DataConfig() # Default constructor + dc2 = DataConfig(0, 1, 2) # When min and max are same across all features + dc3 = DataConfig([0, 1], [2, 3]) # When min and max differ across features + dc4 = DataConfig(three_by_two) # When a data matrix is provided + + # Test get_n_samples + @info "Testing get_n_samples..." + @test get_n_samples([1,2,3]) == 1 # 1-D array case + @test get_n_samples(three_by_two) == 2 # 2-D array case + + # Test data_setup! + @info "Testing data_setup!..." + data_setup!(DDVFA(), three_by_two) + data_setup!(DDVFA().config, three_by_two) + + # Test breaking situations + @info "Testing common code error handling..." + @test_throws ErrorException performance([1,2],[1,2,3]) + @test_logs (:warn,) AdaptiveResonance.data_setup!(dc3, three_by_two) + bad_config = DataConfig(1, 0, 3) + @test_throws ErrorException linear_normalization(three_by_two, config=bad_config) +end # @testset "common.jl" + +@testset "constants.jl" begin + @info "------- Constants Tests -------" + ddvfa_methods = [ + "single", + "average", + "complete", + "median", + "weighted", + "centroid", + ] + @test AdaptiveResonance.DDVFA_METHODS == ddvfa_methods +end + +@testset "kwargs" begin + @info "------- Kwargs test -------" + + # Iterate over all modules + for art in ADAPTIVE_RESONANCE_MODULES + art_module = art(alpha=1e-3, display=false) + end +end diff --git a/test/adaptiveresonance/exceptions.jl b/test/adaptiveresonance/exceptions.jl new file mode 100644 index 00000000..8271c690 --- /dev/null +++ b/test/adaptiveresonance/exceptions.jl @@ -0,0 +1,38 @@ +""" + exceptions.jl + +# Description +Tests the edge cases and exceptions of the entire `AdaptiveResonance.jl` package. +""" + +# Mismatch testset +# Enough ART modules do not encounter mismatch during the normal traing routines that these can be tested together. +@testset "Mismatch" begin + @info "------- Mismatch test -------" + + # ART + arts = [art(display=false) for art in ART_MODULES] + artmaps = [artmap(display=false) for artmap in ARTMAP_MODULES] + + # Train on disparate data + local_data = [ + 0.0 1.0; + 0.0 1.0; + ] + local_labels= [1, 1] + + # Test on data that is still within range but equally far from other points + test_data = [0.5, 0.5] + + # Get mismatch in unsupervised ART modules + for art in arts + train!(art, local_data) + classify(art, test_data) + end + + # Get mismatch in supervised ARTMAP modules + for artmap in artmaps + train!(artmap, local_data, local_labels) + classify(artmap, test_data) + end +end \ No newline at end of file diff --git a/test/modules.jl b/test/adaptiveresonance/initialization.jl similarity index 70% rename from test/modules.jl rename to test/adaptiveresonance/initialization.jl index 9e4bb213..61371078 100644 --- a/test/modules.jl +++ b/test/adaptiveresonance/initialization.jl @@ -1,4 +1,11 @@ -@testset "Modules" begin +""" + initialization.jl + +# Description +Contains tests for module initialization. +""" + +@testset "Initialization" begin # Default constructors fam = FAM() dam = DAM() @@ -12,4 +19,4 @@ sfam_2 = SFAM(opts_SFAM()) dvfa_2 = DVFA(opts_DVFA()) ddvfa_2 = DDVFA(opts_DDVFA()) -end # @testset "Modules" +end diff --git a/test/adaptiveresonance/performance.jl b/test/adaptiveresonance/performance.jl new file mode 100644 index 00000000..b24d3903 --- /dev/null +++ b/test/adaptiveresonance/performance.jl @@ -0,0 +1,66 @@ +""" + performance.jl + +# Description +A test of the performance of every ART and ARTMAP module. +""" + +@testset "Training Test" begin + + @info "------- Training test -------" + + # All ART modules + arts = ADAPTIVE_RESONANCE_MODULES + n_arts = length(arts) + + # All common ART options + art_opts = [ + (display = true,), + # (display = false,), + ] + + # Specific ART options + art_specifics = Dict( + DDVFA => [ + (gamma_normalization=true,), + (gamma_normalization=false,), + ], + FuzzyART => [ + (gamma_normalization=true,), + (gamma_normalization=false,), + ], + ) + + # All test option permutations + test_opts = [ + (get_bmu = true,), + (get_bmu = false,) + ] + n_test_opts = length(test_opts) + + # Performance baseline for all algorithms + perf_baseline = 0.7 + + # Iterate over all ART modules + for ix = 1:n_arts + # Iterate over all test options + for jx = 1:n_test_opts + # If we are testing a module with different options, merge + if haskey(art_specifics, arts[ix]) + local_art_opts = vcat(art_opts, art_specifics[arts[ix]]) + else + local_art_opts = art_opts + end + # Iterate over all options + for kx in eachindex(local_art_opts) + # Only do the unsupervised method if we have an ART module (not ARTMAP) + if arts[ix] isa ART + # Unsupervised + train_test_art(arts[ix](;local_art_opts[kx]...), data; test_opts=test_opts[jx]) + end + # Supervised + @test train_test_art(arts[ix](;local_art_opts[kx]...), data; supervised=true, test_opts=test_opts[jx]) >= perf_baseline + end + end + end +end diff --git a/test/art/art_tests.jl b/test/art/art_tests.jl new file mode 100644 index 00000000..0ca04662 --- /dev/null +++ b/test/art/art_tests.jl @@ -0,0 +1,14 @@ +""" + art_tests.jl + +# Description +Includes all of the ART module tests. +""" + +@testset "DDVFA" begin + include("ddvfa.jl") +end + +@testset "FuzzyART" begin + include("fuzzyart.jl") +end diff --git a/test/art/ddvfa.jl b/test/art/ddvfa.jl new file mode 100644 index 00000000..874ee90a --- /dev/null +++ b/test/art/ddvfa.jl @@ -0,0 +1,25 @@ +""" + ddvfa.jl + +# Description +DDVFA test sets. +""" + +@testset "Convenience functions" begin + my_art = DDVFA() + train!(my_art, data.train_x) + + # Convenience functions + W = AdaptiveResonance.get_W(my_art) + n_vec = AdaptiveResonance.get_n_weights_vec(my_art) + n_weights = AdaptiveResonance.get_n_weights(my_art) + + n_F2 = length(my_art.F2) + + # Test these values + @test ndims(W) == 1 # W is a list + @test length(W) == n_F2 # W has n_F2 weights + @test n_vec isa Vector # n_vec is a vector + @test length(n_vec) == n_F2 # n_vec describes n_F2 nodes + @test n_weights isa Int # n_weights is one number +end diff --git a/test/art/fuzzyart.jl b/test/art/fuzzyart.jl new file mode 100644 index 00000000..fed53d3c --- /dev/null +++ b/test/art/fuzzyart.jl @@ -0,0 +1,39 @@ +""" + fuzzyart.jl + +# Description +FuzzyART test sets. +""" + +@testset "FuzzyART" begin + @info "------- FuzzyART Test -------" + + # FuzzyART initialization and training + my_FuzzyART = FuzzyART() + train!(my_FuzzyART, data.train_x) + + # Compute a local sample for FuzzyART similarity method testing + local_sample = complement_code(data.train_x[:, 1], config=my_FuzzyART.config) + + # Compute the local activation and match + AdaptiveResonance.activation_match!(my_FuzzyART, local_sample) + + # Both field names + field_names = ["T", "M"] + + # Test that every method and field name computes + for method in DDVFA_METHODS + results = Dict() + for field_name in field_names + results[field_name] = AdaptiveResonance.similarity(method, my_FuzzyART, field_name, local_sample) + # @test isapprox(truth[method][field_name], results[field_name]) + end + @info "Method: $method" results + end + + # Check the error handling of the similarity function + # Access the wrong similarity metric keyword ("asdf") + @test_throws ErrorException AdaptiveResonance.similarity("asdf", my_FuzzyART, "T", local_sample) + # Access the wrong output function ("A") + @test_throws ErrorException AdaptiveResonance.similarity("centroid", my_FuzzyART, "A", local_sample) +end # @testset "FuzzyART" \ No newline at end of file diff --git a/test/artmap/artmap_tests.jl b/test/artmap/artmap_tests.jl new file mode 100644 index 00000000..88bfaa2d --- /dev/null +++ b/test/artmap/artmap_tests.jl @@ -0,0 +1,10 @@ +""" + adaptiveresonance_tests.jl + +# Description +Includes all of the ARTMAP tests. +""" + +@testset "ARTSCENE" begin + include("artscene.jl") +end diff --git a/test/artmap/artscene.jl b/test/artmap/artscene.jl new file mode 100644 index 00000000..59aac0cb --- /dev/null +++ b/test/artmap/artscene.jl @@ -0,0 +1,45 @@ +""" + test_artscene.jl + +A container for just ARTSCENE-specific unit tests. +""" + +using Distributed +using Logging + +""" + artscene_filter_porcelain() + +Runs the artscene user-level functions on a random image. +""" +# function artscene_filter_porcelain() +@testset "ARTSCENE Filter Porcelain" begin + @info "------- ARTSCENE test -------" + + # Add four workers and give them all function definitions + addprocs(3) + @everywhere using AdaptiveResonance + + # Show the parallel workers + n_processes = nprocs() + n_workers = nworkers() + @info "Started parallel workers. Processes: $n_processes, Workers: $n_workers" + + # Set the logging level to Debug within the test + LogLevel(Logging.Debug) + + # Random image + raw_image = rand(3, 5, 5) + + # Process the image through the filters + O, C = artscene_filter(raw_image) + + # Set the logging level back to Info + LogLevel(Logging.Info) + + # Close the workers after testing + rmprocs(workers()) + n_processes = nprocs() + n_workers = nworkers() + @info "Closed parallel workers. Processes: $n_processes, Workers: $n_workers" +end diff --git a/data/Iris.csv b/test/data/Iris.csv similarity index 100% rename from data/Iris.csv rename to test/data/Iris.csv diff --git a/test/runtests.jl b/test/runtests.jl index 9f83b0b8..f31d194d 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -1,5 +1,11 @@ +""" + runtests.jl + +The entry point to unit tests for the AdaptiveResonance.jl package. +""" + using SafeTestsets @safetestset "All Test Sets" begin include("test_sets.jl") -end # @safetestset "All Test Sets" \ No newline at end of file +end # @safetestset "All Test Sets" diff --git a/test/test_artscene.jl b/test/test_artscene.jl deleted file mode 100644 index c8dd1ed6..00000000 --- a/test/test_artscene.jl +++ /dev/null @@ -1,32 +0,0 @@ -using Distributed -using Logging - -# Add four workers and give them all function definitions -addprocs(4) -@everywhere using AdaptiveResonance - -""" - artscene_filter_porcelain() - -Runs the artscene user-level functions on a random image. -""" -# function artscene_filter_porcelain() -@testset "ARTSCENE Filter Porcelain" begin - # Set the logging level to Debug - LogLevel(Logging.Debug) - n_processes = nprocs() - n_workers = nworkers() - @info "Processes: $n_processes, Workers: $n_workers" - - # Random image - raw_image = rand(3, 5, 5) - - # Process the image through the filters - O, C = artscene_filter(raw_image) - - # Set the logging level back to Info - LogLevel(Logging.Info) -end # @testset "ARTSCENE Filter Porcelain" - -# Close the workers after testing -rmprocs(workers()) diff --git a/test/test_sets.jl b/test/test_sets.jl index 7302a937..21d30efd 100644 --- a/test/test_sets.jl +++ b/test/test_sets.jl @@ -1,3 +1,11 @@ +""" + test_sets.jl + +# Description +The main collection of tests for the AdaptiveResonance.jl package. +This file loads common utilities and aggregates all other unit tests files. +""" + using AdaptiveResonance using Test using Logging @@ -10,218 +18,25 @@ LogLevel(Logging.Info) include("test_utils.jl") # Load the data and test across all supervised modules -data = load_iris("../data/Iris.csv") - -@testset "common.jl" begin - @info "------- Common Code Tests -------" - # Example arrays - three_by_two = [1 2; 3 4; 5 6] - - # Test DataConfig constructors - @info "Testing DataConfig..." - dc1 = DataConfig() # Default constructor - dc2 = DataConfig(0, 1, 2) # When min and max are same across all features - dc3 = DataConfig([0, 1], [2, 3]) # When min and max differ across features - dc4 = DataConfig(three_by_two) # When a data matrix is provided - - # Test get_n_samples - @info "Testing get_n_samples..." - @test get_n_samples([1,2,3]) == 1 # 1-D array case - @test get_n_samples(three_by_two) == 2 # 2-D array case - - # Test data_setup! - @info "Testing data_setup!..." - data_setup!(DDVFA(), three_by_two) - data_setup!(DDVFA().config, three_by_two) - - # Test breaking situations - @info "Testing common code error handling..." - @test_throws ErrorException performance([1,2],[1,2,3]) - @test_logs (:warn,) AdaptiveResonance.data_setup!(dc3, three_by_two) - bad_config = DataConfig(1, 0, 3) - @test_throws ErrorException linear_normalization(three_by_two, config=bad_config) -end # @testset "common.jl" - -@testset "constants.jl" begin - @info "------- Constants Tests -------" - ddvfa_methods = [ - "single", - "average", - "complete", - "median", - "weighted", - "centroid" - ] - @test AdaptiveResonance.DDVFA_METHODS == ddvfa_methods -end # @testset "constants.jl" - -@testset "AdaptiveResonance.jl" begin - # Module loading - include("modules.jl") -end # @testset "AdaptiveResonance.jl" - -@testset "Train Test" begin - # All ART modules - arts = [ - FuzzyART, - DVFA, - DDVFA, - SFAM, - DAM, - ] - n_arts = length(arts) - - # All common ART options - art_opts = [ - (display = true,), - # (display = false,), - ] - - # Specific ART options - art_specifics = Dict( - DDVFA => [ - (gamma_normalization=true,), - (gamma_normalization=false,), - ], - FuzzyART => [ - (gamma_normalization=true,), - (gamma_normalization=false,), - ], - ) - - # All test option permutations - test_opts = [ - (get_bmu = true,), - (get_bmu = false,) - ] - n_test_opts = length(test_opts) - - @info "-------------- BEGIN TRAIN TEST --------------" - # Performance baseline for all algorithms - perf_baseline = 0.7 - - # Iterate over all ART modules - for ix = 1:n_arts - # Iterate over all test options - for jx = 1:n_test_opts - # If we are testing a module with different options, merge - if haskey(art_specifics, arts[ix]) - local_art_opts = vcat(art_opts, art_specifics[arts[ix]]) - else - local_art_opts = art_opts - end - # Iterate over all options - for kx = 1:length(local_art_opts) - # Only do the unsupervised method if we have an ART module (not ARTMAP) - if arts[ix] isa ART - # Unsupervised - train_test_art(arts[ix](;local_art_opts[kx]...), data; test_opts=test_opts[jx]) - end - # Supervised - @test train_test_art(arts[ix](;local_art_opts[kx]...), data; supervised=true, test_opts=test_opts[jx]) >= perf_baseline - end - end - end - - @info "-------------- END TRAIN TEST --------------" -end # @testset "Train Test" - -@testset "kwargs" begin - @info "--------- KWARGS TEST ---------" - - arts = [ - FuzzyART, - DVFA, - DDVFA, - SFAM, - DAM - ] - - for art in arts - art_module = art(alpha=1e-3, display=false) - end - - @info "--------- END KWARGS TEST ---------" -end # @testset "kwargs" - -@testset "FuzzyART" begin - @info "------- FuzzyART Testing -------" - - # FuzzyART train and test - my_FuzzyART = FuzzyART() - # local_complement_code = AdaptiveResonance.complement_code(data.train_x) - # train!(my_FuzzyART, local_complement_code, preprocessed=true) - train!(my_FuzzyART, data.train_x) - - # Similarity methods - methods = [ - "single", - "average", - "complete", - "median", - "weighted", - "centroid" - ] - - # Both field names - field_names = ["T", "M"] - - # Compute a local sample for FuzzyART similarity method testing - # local_sample = local_complement_code[:, 1] - # local_complement_code = AdaptiveResonance.complement_code(data.train_x) - # local_sample = data.train_x[:, 1] - local_sample = AdaptiveResonance.complement_code(data.train_x[:, 1], config=my_FuzzyART.config) - - # Compute the local activation and match - # AdaptiveResonance.activation_match!(my_FuzzyART, local_sample) +data = load_iris("data/Iris.csv") - # # Declare the true activation and match magnitudes - # truth = Dict( - # "single" => Dict( - # "T" => 0.9988714513100155, - # "M" => 2.6532834139109758 - # ), - # "average" => Dict( - # "T" => 0.33761483787933894, - # "M" => 1.1148764060015297 - # ), - # "complete" => Dict( - # "T" => 0.018234409874338647, - # "M" => 0.07293763949735459 - # ), - # "median" => Dict( - # "T" => 0.2089217851518073, - # "M" => 0.835687140607229 - # ), - # "weighted" => Dict( - # "T" => 0.5374562506748786, - # "M" => 1.4396083090159748 - # ), - # "centroid" => Dict( - # "T" => 0.0, - # "M" => 0.0 - # ) - # ) - # # Test every method and field name - # for method in methods - # results = Dict() - # for field_name in field_names - # results[field_name] = AdaptiveResonance.similarity(method, my_FuzzyART, field_name, local_sample, my_FuzzyART.opts.gamma_ref) - # @test isapprox(truth[method][field_name], results[field_name]) - # end - # @info "Method: $method" results - # end +# @testset "AdaptiveResonance.jl" begin +# # Module loading +# include("modules.jl") +# end # @testset "AdaptiveResonance.jl" - # Check the error handling of the similarity function - # Access the wrong similarity metric keyword ("asdf") - @test_throws ErrorException AdaptiveResonance.similarity("asdf", my_FuzzyART, "T", local_sample, my_FuzzyART.opts.gamma_ref) - # Access the wrong output function ("A") - @test_throws ErrorException AdaptiveResonance.similarity("centroid", my_FuzzyART, "A", local_sample, my_FuzzyART.opts.gamma_ref) +@testset "AdaptiveResonance" begin + @info "------- ADAPTIVERESONANCE TESTS -------" + include("adaptiveresonance/adaptiveresonance_tests.jl") +end -end # @testset "FuzzyART" +@testset "ART" begin + @info "------- ART TESTS -------" + include("art/art_tests.jl") +end -@testset "ARTSCENE.jl" begin - # ARTSCENE training and testing - include("test_artscene.jl") -end # @testset "ARTSCENE.jl" +@testset "ARTMAP" begin + @info "------- ARTMAP TESTS -------" + include("artmap/artmap_tests.jl") +end diff --git a/test/test_utils.jl b/test/test_utils.jl index c013adee..f67ea415 100644 --- a/test/test_utils.jl +++ b/test/test_utils.jl @@ -1,41 +1,94 @@ -using DelimitedFiles +""" + test_utils.jl +A set of common struct and function utilities for AdaptiveResonance.jl unit tests. """ - DataSplit +# --------------------------------------------------------------------------- # +# IMPORTS +# --------------------------------------------------------------------------- # + +using DelimitedFiles +using NumericalTypeAliases + +# --------------------------------------------------------------------------- # +# STRUCTS +# --------------------------------------------------------------------------- # + +""" A basic struct for encapsulating the four components of supervised training. """ struct DataSplit - train_x::AdaptiveResonance.RealMatrix - test_x::AdaptiveResonance.RealMatrix - train_y::AdaptiveResonance.IntegerVector - test_y::AdaptiveResonance.IntegerVector + """ + The training feature samples. + Dimensions are `(feature-dim, sample-index)`. + """ + train_x::Matrix{Float} + + """ + The testing feature samples. + Dimensions are `(feature-dim, sample-index)`. + """ + test_x::Matrix{Float} + + """ + A vector of training labels. + """ + train_y::Vector{Int} + + """ + A vector of testing labels. + """ + test_y::Vector{Int} + DataSplit(train_x, test_x, train_y, test_y) = new(train_x, test_x, train_y, test_y) end # DataSplit -""" - DataSplit(data_x::Array, data_y::Array, ratio::Float) +# --------------------------------------------------------------------------- # +# METHODS +# --------------------------------------------------------------------------- # +""" Return a DataSplit struct that is split by the ratio (e.g. 0.8). + +# Arguments +- `data_x::RealMatrix`: a 2-D matrix of samples with convention (features, samples). +- `data_y::RealVector`: a 1-D vector of integered labels. +- `ratio::Real`: the ratio for the train/test split ∈ (0, 1). """ -function DataSplit(data_x::Array, data_y::Array, ratio::Real) - dim, n_data = size(data_x) +function DataSplit(data_x::RealMatrix, data_y::RealVector, ratio::Real) + _, n_data = size(data_x) split_ind = Integer(floor(n_data*ratio)) train_x = data_x[:, 1:split_ind] - test_x = data_x[:, split_ind+1:end] + test_x = data_x[:, split_ind + 1:end] train_y = data_y[1:split_ind] - test_y = data_y[split_ind+1:end] + test_y = data_y[split_ind + 1:end] return DataSplit(train_x, test_x, train_y, test_y) -end # DataSplit(data_x::Array, data_y::Array, ratio::Real) +end # DataSplit(data_x::RealMatrix, data_y::RealVector, ratio::Real) -""" - train_test_art(art::ARTModule, data::DataSplit; supervised::Bool=false, art_opts...) +# --------------------------------------------------------------------------- # +# FUNCTIONS +# --------------------------------------------------------------------------- # +""" Train and test an ART module. + +# Arguments +- `art::ARTModule`: the ART or ARTMAP module to train and test. +- `data::DataSplit`: the struct containing a train/test split. +- `supervised::Bool=false`: flag for using supervised learning for ART modules (i.e., ARTMAP modules are always supervised). +- `train_opts::NamedTuple=NamedTuple()`: keyword options to pass to the `train!` function. +- `test_opts::NamedTuple=NamedTuple()`: keyword options to pass to the `classify` function. """ -function train_test_art(art::ARTModule, data::DataSplit; supervised::Bool=false, train_opts::NamedTuple=NamedTuple(), test_opts::NamedTuple=NamedTuple()) +function train_test_art( + art::ARTModule, + data::DataSplit ; + supervised::Bool=false, + train_opts::NamedTuple=NamedTuple(), + test_opts::NamedTuple=NamedTuple() +) # Default performance to undefined perf = NaN # If the module is unsupervised by default @@ -73,39 +126,25 @@ function train_test_art(art::ARTModule, data::DataSplit; supervised::Bool=false, end """ - showtypetree(T, level=0) - -Show the tree of subtypes for a type. -```julia -showtypetree(Number) -``` -""" -function showtypetree(T, level=0) - println("\t" ^ level, T) - for t in subtypes(T) - showtypetree(t, level+1) - end -end # showtypetree(T, level=0) - -""" - load_iris(data_path::String ; split_ratio::Real = 0.8) - Loads the iris dataset for testing and examples. + +# Arguments +- `data_path::AbstractString`: path containing the Iris dataset. +- `split_ratio::Real = 0.8`: train/test split ration ∈ (0, 1). """ -function load_iris(data_path::String ; split_ratio::Real = 0.8) - raw_data = readdlm(data_path,',') +function load_iris(data_path::AbstractString ; split_ratio::Real = 0.8) + raw_data = readdlm(data_path, ',') labels = ["Iris-setosa", "Iris-versicolor", "Iris-virginica"] - raw_x = Array{AdaptiveResonance.RealFP}(raw_data[2:end, 2:5]) + raw_x = Matrix{Real}(raw_data[2:end, 2:5]) raw_y_labels = raw_data[2:end, 6] - raw_y = Array{Integer}(undef, 0) - for ix = 1:length(raw_y_labels) - for jx = 1:length(labels) + raw_y = Vector{Int}(undef, 0) + for ix in eachindex(raw_y_labels) + for jx in eachindex(labels) if raw_y_labels[ix] == labels[jx] push!(raw_y, jx) end end end - n_samples, n_features = size(raw_x) # Julia is column-major, so use columns for features raw_x = permutedims(raw_x) @@ -114,4 +153,4 @@ function load_iris(data_path::String ; split_ratio::Real = 0.8) data = DataSplit(raw_x, raw_y, split_ratio) return data -end # load_iris(data_path::String ; split_ratio::Real = 0.8) +end # load_iris(data_path::AbstractString ; split_ratio::Real = 0.8)