version 0.0.9

This commit is contained in:
ton
2023-09-07 08:50:19 +07:00
parent 52f5694727
commit b2097b2512
17 changed files with 6397 additions and 99 deletions

View File

@@ -0,0 +1,946 @@
# This file is machine-generated - editing it directly is not advised
julia_version = "1.9.3"
manifest_format = "2.0"
project_hash = "844808a02b2a30acdc69d975773e029da0ec81b8"
[[deps.AbstractFFTs]]
deps = ["LinearAlgebra"]
git-tree-sha1 = "8bc0aaec0ca548eb6cf5f0d7d16351650c1ee956"
uuid = "621f4979-c628-5d54-868e-fcf4e3e8185c"
version = "1.3.2"
weakdeps = ["ChainRulesCore"]
[deps.AbstractFFTs.extensions]
AbstractFFTsChainRulesCoreExt = "ChainRulesCore"
[[deps.Adapt]]
deps = ["LinearAlgebra", "Requires"]
git-tree-sha1 = "76289dc51920fdc6e0013c872ba9551d54961c24"
uuid = "79e6a3ab-5dfb-504d-930d-738a2a938a0e"
version = "3.6.2"
weakdeps = ["StaticArrays"]
[deps.Adapt.extensions]
AdaptStaticArraysExt = "StaticArrays"
[[deps.ArgCheck]]
git-tree-sha1 = "a3a402a35a2f7e0b87828ccabbd5ebfbebe356b4"
uuid = "dce04be8-c92d-5529-be00-80e4d2c0e197"
version = "2.3.0"
[[deps.ArgTools]]
uuid = "0dad84c5-d112-42e6-8d28-ef12dabb789f"
version = "1.1.1"
[[deps.Artifacts]]
uuid = "56f22d72-fd6d-98f1-02f0-08ddc0907c33"
[[deps.Atomix]]
deps = ["UnsafeAtomics"]
git-tree-sha1 = "c06a868224ecba914baa6942988e2f2aade419be"
uuid = "a9b6321e-bd34-4604-b9c9-b65b8de01458"
version = "0.1.0"
[[deps.BFloat16s]]
deps = ["LinearAlgebra", "Printf", "Random", "Test"]
git-tree-sha1 = "dbf84058d0a8cbbadee18d25cf606934b22d7c66"
uuid = "ab4f0b2a-ad5b-11e8-123f-65d77653426b"
version = "0.4.2"
[[deps.BangBang]]
deps = ["Compat", "ConstructionBase", "InitialValues", "LinearAlgebra", "Requires", "Setfield", "Tables"]
git-tree-sha1 = "e28912ce94077686443433c2800104b061a827ed"
uuid = "198e06fe-97b7-11e9-32a5-e1d131e6ad66"
version = "0.3.39"
[deps.BangBang.extensions]
BangBangChainRulesCoreExt = "ChainRulesCore"
BangBangDataFramesExt = "DataFrames"
BangBangStaticArraysExt = "StaticArrays"
BangBangStructArraysExt = "StructArrays"
BangBangTypedTablesExt = "TypedTables"
[deps.BangBang.weakdeps]
ChainRulesCore = "d360d2e6-b24c-11e9-a2a3-2a2ae2dbcce4"
DataFrames = "a93c6f00-e57d-5684-b7b6-d8193f3e46c0"
StaticArrays = "90137ffa-7385-5640-81b9-e52037218182"
StructArrays = "09ab397b-f2b6-538f-b94a-2f83cf4a842a"
TypedTables = "9d95f2ec-7b3d-5a63-8d20-e2491e220bb9"
[[deps.Base64]]
uuid = "2a0f44e3-6c83-55bd-87e4-b1978d98bd5f"
[[deps.Baselet]]
git-tree-sha1 = "aebf55e6d7795e02ca500a689d326ac979aaf89e"
uuid = "9718e550-a3fa-408a-8086-8db961cd8217"
version = "0.1.1"
[[deps.CEnum]]
git-tree-sha1 = "eb4cb44a499229b3b8426dcfb5dd85333951ff90"
uuid = "fa961155-64e5-5f13-b03f-caf6b980ea82"
version = "0.4.2"
[[deps.CUDA]]
deps = ["AbstractFFTs", "Adapt", "BFloat16s", "CEnum", "CUDA_Driver_jll", "CUDA_Runtime_Discovery", "CUDA_Runtime_jll", "CompilerSupportLibraries_jll", "ExprTools", "GPUArrays", "GPUCompiler", "KernelAbstractions", "LLVM", "LazyArtifacts", "Libdl", "LinearAlgebra", "Logging", "Preferences", "Printf", "Random", "Random123", "RandomNumbers", "Reexport", "Requires", "SparseArrays", "SpecialFunctions", "UnsafeAtomicsLLVM"]
git-tree-sha1 = "442d989978ed3ff4e174c928ee879dc09d1ef693"
uuid = "052768ef-5323-5732-b1bb-66c8b64840ba"
version = "4.3.2"
[[deps.CUDA_Driver_jll]]
deps = ["Artifacts", "JLLWrappers", "LazyArtifacts", "Libdl", "Pkg"]
git-tree-sha1 = "498f45593f6ddc0adff64a9310bb6710e851781b"
uuid = "4ee394cb-3365-5eb0-8335-949819d2adfc"
version = "0.5.0+1"
[[deps.CUDA_Runtime_Discovery]]
deps = ["Libdl"]
git-tree-sha1 = "bcc4a23cbbd99c8535a5318455dcf0f2546ec536"
uuid = "1af6417a-86b4-443c-805f-a4643ffb695f"
version = "0.2.2"
[[deps.CUDA_Runtime_jll]]
deps = ["Artifacts", "CUDA_Driver_jll", "JLLWrappers", "LazyArtifacts", "Libdl", "TOML"]
git-tree-sha1 = "5248d9c45712e51e27ba9b30eebec65658c6ce29"
uuid = "76a88914-d11a-5bdc-97e0-2f5a05c973a2"
version = "0.6.0+0"
[[deps.CUDNN_jll]]
deps = ["Artifacts", "CUDA_Runtime_jll", "JLLWrappers", "LazyArtifacts", "Libdl", "TOML"]
git-tree-sha1 = "2918fbffb50e3b7a0b9127617587afa76d4276e8"
uuid = "62b44479-cb7b-5706-934f-f13b2eb2e645"
version = "8.8.1+0"
[[deps.Calculus]]
deps = ["LinearAlgebra"]
git-tree-sha1 = "f641eb0a4f00c343bbc32346e1217b86f3ce9dad"
uuid = "49dc2e85-a5d0-5ad3-a950-438e2897f1b9"
version = "0.5.1"
[[deps.ChainRules]]
deps = ["Adapt", "ChainRulesCore", "Compat", "Distributed", "GPUArraysCore", "IrrationalConstants", "LinearAlgebra", "Random", "RealDot", "SparseArrays", "Statistics", "StructArrays"]
git-tree-sha1 = "1cdf290d4feec68824bfb84f4bfc9f3aba185647"
uuid = "082447d4-558c-5d27-93f4-14fc19e9eca2"
version = "1.51.1"
[[deps.ChainRulesCore]]
deps = ["Compat", "LinearAlgebra", "SparseArrays"]
git-tree-sha1 = "e30f2f4e20f7f186dc36529910beaedc60cfa644"
uuid = "d360d2e6-b24c-11e9-a2a3-2a2ae2dbcce4"
version = "1.16.0"
[[deps.CommonSubexpressions]]
deps = ["MacroTools", "Test"]
git-tree-sha1 = "7b8a93dba8af7e3b42fecabf646260105ac373f7"
uuid = "bbf7d656-a473-5ed7-a52c-81e309532950"
version = "0.3.0"
[[deps.Compat]]
deps = ["UUIDs"]
git-tree-sha1 = "4e88377ae7ebeaf29a047aa1ee40826e0b708a5d"
uuid = "34da2185-b29b-5c13-b0c7-acf172513d20"
version = "4.7.0"
weakdeps = ["Dates", "LinearAlgebra"]
[deps.Compat.extensions]
CompatLinearAlgebraExt = "LinearAlgebra"
[[deps.CompilerSupportLibraries_jll]]
deps = ["Artifacts", "Libdl"]
uuid = "e66e0078-7015-5450-92f7-15fbd957f2ae"
version = "1.0.5+0"
[[deps.CompositionsBase]]
git-tree-sha1 = "802bb88cd69dfd1509f6670416bd4434015693ad"
uuid = "a33af91c-f02d-484b-be07-31d278c5ca2b"
version = "0.1.2"
[deps.CompositionsBase.extensions]
CompositionsBaseInverseFunctionsExt = "InverseFunctions"
[deps.CompositionsBase.weakdeps]
InverseFunctions = "3587e190-3f89-42d0-90ee-14403ec27112"
[[deps.CondaPkg]]
deps = ["JSON3", "Markdown", "MicroMamba", "Pidfile", "Pkg", "TOML"]
git-tree-sha1 = "741146cf2ced5859faae76a84b541aa9af1a78bb"
uuid = "992eb4ea-22a4-4c89-a5bb-47a3300528ab"
version = "0.2.18"
[[deps.ConstructionBase]]
deps = ["LinearAlgebra"]
git-tree-sha1 = "738fec4d684a9a6ee9598a8bfee305b26831f28c"
uuid = "187b0558-2788-49d3-abe0-74a17ed4e7c9"
version = "1.5.2"
[deps.ConstructionBase.extensions]
ConstructionBaseIntervalSetsExt = "IntervalSets"
ConstructionBaseStaticArraysExt = "StaticArrays"
[deps.ConstructionBase.weakdeps]
IntervalSets = "8197267c-284f-5f27-9208-e0e47529a953"
StaticArrays = "90137ffa-7385-5640-81b9-e52037218182"
[[deps.ContextVariablesX]]
deps = ["Compat", "Logging", "UUIDs"]
git-tree-sha1 = "25cc3803f1030ab855e383129dcd3dc294e322cc"
uuid = "6add18c4-b38d-439d-96f6-d6bc489c04c5"
version = "0.1.3"
[[deps.DataAPI]]
git-tree-sha1 = "8da84edb865b0b5b0100c0666a9bc9a0b71c553c"
uuid = "9a962f9c-6df0-11e9-0e5d-c546b8b5ee8a"
version = "1.15.0"
[[deps.DataStructures]]
deps = ["Compat", "InteractiveUtils", "OrderedCollections"]
git-tree-sha1 = "cf25ccb972fec4e4817764d01c82386ae94f77b4"
uuid = "864edb3b-99cc-5e75-8d2d-829cb0a9cfe8"
version = "0.18.14"
[[deps.DataValueInterfaces]]
git-tree-sha1 = "bfc1187b79289637fa0ef6d4436ebdfe6905cbd6"
uuid = "e2d170a0-9d28-54be-80f0-106bbe20a464"
version = "1.0.0"
[[deps.Dates]]
deps = ["Printf"]
uuid = "ade2ca70-3891-5945-98fb-dc099432e06a"
[[deps.DefineSingletons]]
git-tree-sha1 = "0fba8b706d0178b4dc7fd44a96a92382c9065c2c"
uuid = "244e2a9f-e319-4986-a169-4d1fe445cd52"
version = "0.1.2"
[[deps.DelimitedFiles]]
deps = ["Mmap"]
git-tree-sha1 = "9e2f36d3c96a820c678f2f1f1782582fcf685bae"
uuid = "8bb1440f-4735-579b-a4ab-409b98df4dab"
version = "1.9.1"
[[deps.DiffResults]]
deps = ["StaticArraysCore"]
git-tree-sha1 = "782dd5f4561f5d267313f23853baaaa4c52ea621"
uuid = "163ba53b-c6d8-5494-b064-1a9d43ac40c5"
version = "1.1.0"
[[deps.DiffRules]]
deps = ["IrrationalConstants", "LogExpFunctions", "NaNMath", "Random", "SpecialFunctions"]
git-tree-sha1 = "23163d55f885173722d1e4cf0f6110cdbaf7e272"
uuid = "b552c78f-8df3-52c6-915a-8e097449b14b"
version = "1.15.1"
[[deps.Distributed]]
deps = ["Random", "Serialization", "Sockets"]
uuid = "8ba89e20-285c-5b6f-9357-94700520ee1b"
[[deps.Distributions]]
deps = ["FillArrays", "LinearAlgebra", "PDMats", "Printf", "QuadGK", "Random", "SpecialFunctions", "Statistics", "StatsAPI", "StatsBase", "StatsFuns", "Test"]
git-tree-sha1 = "938fe2981db009f531b6332e31c58e9584a2f9bd"
uuid = "31c24e10-a181-5473-b8eb-7969acd0382f"
version = "0.25.100"
[deps.Distributions.extensions]
DistributionsChainRulesCoreExt = "ChainRulesCore"
DistributionsDensityInterfaceExt = "DensityInterface"
[deps.Distributions.weakdeps]
ChainRulesCore = "d360d2e6-b24c-11e9-a2a3-2a2ae2dbcce4"
DensityInterface = "b429d917-457f-4dbc-8f4c-0cc954292b1d"
[[deps.DocStringExtensions]]
deps = ["LibGit2"]
git-tree-sha1 = "2fb1e02f2b635d0845df5d7c167fec4dd739b00d"
uuid = "ffbed154-4ef7-542d-bbb7-c09d3a79fcae"
version = "0.9.3"
[[deps.Downloads]]
deps = ["ArgTools", "FileWatching", "LibCURL", "NetworkOptions"]
uuid = "f43a241f-c20a-4ad4-852c-f6b1247861c6"
version = "1.6.0"
[[deps.DualNumbers]]
deps = ["Calculus", "NaNMath", "SpecialFunctions"]
git-tree-sha1 = "5837a837389fccf076445fce071c8ddaea35a566"
uuid = "fa6b7ba4-c1ee-5f82-b5fc-ecf0adba8f74"
version = "0.6.8"
[[deps.ExprTools]]
git-tree-sha1 = "c1d06d129da9f55715c6c212866f5b1bddc5fa00"
uuid = "e2ba6199-217a-4e67-a87a-7c52f15ade04"
version = "0.1.9"
[[deps.FLoops]]
deps = ["BangBang", "Compat", "FLoopsBase", "InitialValues", "JuliaVariables", "MLStyle", "Serialization", "Setfield", "Transducers"]
git-tree-sha1 = "ffb97765602e3cbe59a0589d237bf07f245a8576"
uuid = "cc61a311-1640-44b5-9fba-1b764f453329"
version = "0.2.1"
[[deps.FLoopsBase]]
deps = ["ContextVariablesX"]
git-tree-sha1 = "656f7a6859be8673bf1f35da5670246b923964f7"
uuid = "b9860ae5-e623-471e-878b-f6a53c775ea6"
version = "0.1.1"
[[deps.FileWatching]]
uuid = "7b1f6079-737a-58dc-b8bc-7a2ca5c1b5ee"
[[deps.FillArrays]]
deps = ["LinearAlgebra", "Random", "SparseArrays", "Statistics"]
git-tree-sha1 = "0b3b52afd0f87b0a3f5ada0466352d125c9db458"
uuid = "1a297f60-69ca-5386-bcde-b61e274b549b"
version = "1.2.1"
[[deps.Flux]]
deps = ["Adapt", "CUDA", "ChainRulesCore", "Functors", "LinearAlgebra", "MLUtils", "MacroTools", "NNlib", "NNlibCUDA", "OneHotArrays", "Optimisers", "Preferences", "ProgressLogging", "Random", "Reexport", "SparseArrays", "SpecialFunctions", "Statistics", "Zygote", "cuDNN"]
git-tree-sha1 = "3e2c3704c2173ab4b1935362384ca878b53d4c34"
uuid = "587475ba-b771-5e3f-ad9e-33799f191a9c"
version = "0.13.17"
[deps.Flux.extensions]
AMDGPUExt = "AMDGPU"
FluxMetalExt = "Metal"
[deps.Flux.weakdeps]
AMDGPU = "21141c5a-9bdb-4563-92ae-f87d6854732e"
Metal = "dde4c033-4e86-420c-a63e-0dd931031962"
[[deps.ForwardDiff]]
deps = ["CommonSubexpressions", "DiffResults", "DiffRules", "LinearAlgebra", "LogExpFunctions", "NaNMath", "Preferences", "Printf", "Random", "SpecialFunctions"]
git-tree-sha1 = "00e252f4d706b3d55a8863432e742bf5717b498d"
uuid = "f6369f11-7733-5829-9624-2563aa707210"
version = "0.10.35"
weakdeps = ["StaticArrays"]
[deps.ForwardDiff.extensions]
ForwardDiffStaticArraysExt = "StaticArrays"
[[deps.Functors]]
deps = ["LinearAlgebra"]
git-tree-sha1 = "478f8c3145bb91d82c2cf20433e8c1b30df454cc"
uuid = "d9f16b24-f501-4c13-a1f2-28368ffc5196"
version = "0.4.4"
[[deps.Future]]
deps = ["Random"]
uuid = "9fa8497b-333b-5362-9e8d-4d0656e87820"
[[deps.GPUArrays]]
deps = ["Adapt", "GPUArraysCore", "LLVM", "LinearAlgebra", "Printf", "Random", "Reexport", "Serialization", "Statistics"]
git-tree-sha1 = "2e57b4a4f9cc15e85a24d603256fe08e527f48d1"
uuid = "0c68f7d7-f131-5f86-a1c3-88cf8149b2d7"
version = "8.8.1"
[[deps.GPUArraysCore]]
deps = ["Adapt"]
git-tree-sha1 = "2d6ca471a6c7b536127afccfa7564b5b39227fe0"
uuid = "46192b85-c4d5-4398-a991-12ede77f4527"
version = "0.1.5"
[[deps.GPUCompiler]]
deps = ["ExprTools", "InteractiveUtils", "LLVM", "Libdl", "Logging", "Scratch", "TimerOutputs", "UUIDs"]
git-tree-sha1 = "cb090aea21c6ca78d59672a7e7d13bd56d09de64"
uuid = "61eb1bfa-7361-4325-ad38-22787b887f55"
version = "0.20.3"
[[deps.GeneralUtils]]
deps = ["CUDA", "DataStructures", "Distributions", "Flux", "JSON3", "Random"]
path = "C:\\Users\\pitak\\.julia\\dev\\GeneralUtils"
uuid = "c6c72f09-b708-4ac8-ac7c-2084d70108fe"
version = "0.1.0"
[[deps.HypergeometricFunctions]]
deps = ["DualNumbers", "LinearAlgebra", "OpenLibm_jll", "SpecialFunctions"]
git-tree-sha1 = "f218fe3736ddf977e0e772bc9a586b2383da2685"
uuid = "34004b35-14d8-5ef3-9330-4cdb6864b03a"
version = "0.3.23"
[[deps.IRTools]]
deps = ["InteractiveUtils", "MacroTools", "Test"]
git-tree-sha1 = "eac00994ce3229a464c2847e956d77a2c64ad3a5"
uuid = "7869d1d1-7146-5819-86e3-90919afe41df"
version = "0.4.10"
[[deps.InitialValues]]
git-tree-sha1 = "4da0f88e9a39111c2fa3add390ab15f3a44f3ca3"
uuid = "22cec73e-a1b8-11e9-2c92-598750a2cf9c"
version = "0.3.1"
[[deps.InteractiveUtils]]
deps = ["Markdown"]
uuid = "b77e0a4c-d291-57a0-90e8-8db25a27a240"
[[deps.IrrationalConstants]]
git-tree-sha1 = "630b497eafcc20001bba38a4651b327dcfc491d2"
uuid = "92d709cd-6900-40b7-9082-c6be49f344b6"
version = "0.2.2"
[[deps.IteratorInterfaceExtensions]]
git-tree-sha1 = "a3f24677c21f5bbe9d2a714f95dcd58337fb2856"
uuid = "82899510-4779-5014-852e-03e436cf321d"
version = "1.0.0"
[[deps.JLLWrappers]]
deps = ["Preferences"]
git-tree-sha1 = "abc9885a7ca2052a736a600f7fa66209f96506e1"
uuid = "692b3bcd-3c85-4b1f-b108-f13ce0eb3210"
version = "1.4.1"
[[deps.JSON3]]
deps = ["Dates", "Mmap", "Parsers", "PrecompileTools", "StructTypes", "UUIDs"]
git-tree-sha1 = "5b62d93f2582b09e469b3099d839c2d2ebf5066d"
uuid = "0f8b85d8-7281-11e9-16c2-39a750bddbf1"
version = "1.13.1"
[[deps.JuliaVariables]]
deps = ["MLStyle", "NameResolution"]
git-tree-sha1 = "49fb3cb53362ddadb4415e9b73926d6b40709e70"
uuid = "b14d175d-62b4-44ba-8fb7-3064adc8c3ec"
version = "0.2.4"
[[deps.KernelAbstractions]]
deps = ["Adapt", "Atomix", "InteractiveUtils", "LinearAlgebra", "MacroTools", "PrecompileTools", "SparseArrays", "StaticArrays", "UUIDs", "UnsafeAtomics", "UnsafeAtomicsLLVM"]
git-tree-sha1 = "b48617c5d764908b5fac493cd907cf33cc11eec1"
uuid = "63c18a36-062a-441e-b654-da1e3ab1ce7c"
version = "0.9.6"
[[deps.LLVM]]
deps = ["CEnum", "LLVMExtra_jll", "Libdl", "Printf", "Unicode"]
git-tree-sha1 = "5007c1421563108110bbd57f63d8ad4565808818"
uuid = "929cbde3-209d-540e-8aea-75f648917ca0"
version = "5.2.0"
[[deps.LLVMExtra_jll]]
deps = ["Artifacts", "JLLWrappers", "LazyArtifacts", "Libdl", "TOML"]
git-tree-sha1 = "1222116d7313cdefecf3d45a2bc1a89c4e7c9217"
uuid = "dad2f222-ce93-54a1-a47d-0025e8a3acab"
version = "0.0.22+0"
[[deps.LazyArtifacts]]
deps = ["Artifacts", "Pkg"]
uuid = "4af54fe1-eca0-43a8-85a7-787d91b784e3"
[[deps.LibCURL]]
deps = ["LibCURL_jll", "MozillaCACerts_jll"]
uuid = "b27032c2-a3e7-50c8-80cd-2d36dbcbfd21"
version = "0.6.3"
[[deps.LibCURL_jll]]
deps = ["Artifacts", "LibSSH2_jll", "Libdl", "MbedTLS_jll", "Zlib_jll", "nghttp2_jll"]
uuid = "deac9b47-8bc7-5906-a0fe-35ac56dc84c0"
version = "7.84.0+0"
[[deps.LibGit2]]
deps = ["Base64", "NetworkOptions", "Printf", "SHA"]
uuid = "76f85450-5226-5b5a-8eaa-529ad045b433"
[[deps.LibSSH2_jll]]
deps = ["Artifacts", "Libdl", "MbedTLS_jll"]
uuid = "29816b5a-b9ab-546f-933c-edad1886dfa8"
version = "1.10.2+0"
[[deps.Libdl]]
uuid = "8f399da3-3557-5675-b5ff-fb832c97cbdb"
[[deps.LinearAlgebra]]
deps = ["Libdl", "OpenBLAS_jll", "libblastrampoline_jll"]
uuid = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e"
[[deps.LogExpFunctions]]
deps = ["DocStringExtensions", "IrrationalConstants", "LinearAlgebra"]
git-tree-sha1 = "c3ce8e7420b3a6e071e0fe4745f5d4300e37b13f"
uuid = "2ab3a3ac-af41-5b50-aa03-7779005ae688"
version = "0.3.24"
[deps.LogExpFunctions.extensions]
LogExpFunctionsChainRulesCoreExt = "ChainRulesCore"
LogExpFunctionsChangesOfVariablesExt = "ChangesOfVariables"
LogExpFunctionsInverseFunctionsExt = "InverseFunctions"
[deps.LogExpFunctions.weakdeps]
ChainRulesCore = "d360d2e6-b24c-11e9-a2a3-2a2ae2dbcce4"
ChangesOfVariables = "9e997f8a-9a97-42d5-a9f1-ce6bfc15e2c0"
InverseFunctions = "3587e190-3f89-42d0-90ee-14403ec27112"
[[deps.Logging]]
uuid = "56ddb016-857b-54e1-b83d-db4d58db5568"
[[deps.MLStyle]]
git-tree-sha1 = "bc38dff0548128765760c79eb7388a4b37fae2c8"
uuid = "d8e11817-5142-5d16-987a-aa16d5891078"
version = "0.4.17"
[[deps.MLUtils]]
deps = ["ChainRulesCore", "Compat", "DataAPI", "DelimitedFiles", "FLoops", "NNlib", "Random", "ShowCases", "SimpleTraits", "Statistics", "StatsBase", "Tables", "Transducers"]
git-tree-sha1 = "3504cdb8c2bc05bde4d4b09a81b01df88fcbbba0"
uuid = "f1d291b0-491e-4a28-83b9-f70985020b54"
version = "0.4.3"
[[deps.MacroTools]]
deps = ["Markdown", "Random"]
git-tree-sha1 = "42324d08725e200c23d4dfb549e0d5d89dede2d2"
uuid = "1914dd2f-81c6-5fcd-8719-6d5c9610ff09"
version = "0.5.10"
[[deps.Markdown]]
deps = ["Base64"]
uuid = "d6f4376e-aef5-505a-96c1-9c027394607a"
[[deps.MbedTLS_jll]]
deps = ["Artifacts", "Libdl"]
uuid = "c8ffd9c3-330d-5841-b78e-0817d7145fa1"
version = "2.28.2+0"
[[deps.MicroCollections]]
deps = ["BangBang", "InitialValues", "Setfield"]
git-tree-sha1 = "629afd7d10dbc6935ec59b32daeb33bc4460a42e"
uuid = "128add7d-3638-4c79-886c-908ea0c25c34"
version = "0.1.4"
[[deps.MicroMamba]]
deps = ["Pkg", "Scratch", "micromamba_jll"]
git-tree-sha1 = "011cab361eae7bcd7d278f0a7a00ff9c69000c51"
uuid = "0b3b1443-0f03-428d-bdfb-f27f9c1191ea"
version = "0.1.14"
[[deps.Missings]]
deps = ["DataAPI"]
git-tree-sha1 = "f66bdc5de519e8f8ae43bdc598782d35a25b1272"
uuid = "e1d29d7a-bbdc-5cf2-9ac0-f12de2c33e28"
version = "1.1.0"
[[deps.Mmap]]
uuid = "a63ad114-7e13-5084-954f-fe012c677804"
[[deps.MozillaCACerts_jll]]
uuid = "14a3606d-f60d-562e-9121-12d972cd8159"
version = "2022.10.11"
[[deps.NNlib]]
deps = ["Adapt", "Atomix", "ChainRulesCore", "GPUArraysCore", "KernelAbstractions", "LinearAlgebra", "Pkg", "Random", "Requires", "Statistics"]
git-tree-sha1 = "72240e3f5ca031937bd536182cb2c031da5f46dd"
uuid = "872c559c-99b0-510c-b3b7-b6c96a88d5cd"
version = "0.8.21"
[deps.NNlib.extensions]
NNlibAMDGPUExt = "AMDGPU"
[deps.NNlib.weakdeps]
AMDGPU = "21141c5a-9bdb-4563-92ae-f87d6854732e"
[[deps.NNlibCUDA]]
deps = ["Adapt", "CUDA", "LinearAlgebra", "NNlib", "Random", "Statistics", "cuDNN"]
git-tree-sha1 = "f94a9684394ff0d325cc12b06da7032d8be01aaf"
uuid = "a00861dc-f156-4864-bf3c-e6376f28a68d"
version = "0.2.7"
[[deps.NaNMath]]
deps = ["OpenLibm_jll"]
git-tree-sha1 = "0877504529a3e5c3343c6f8b4c0381e57e4387e4"
uuid = "77ba4419-2d1f-58cd-9bb1-8ffee604a2e3"
version = "1.0.2"
[[deps.NameResolution]]
deps = ["PrettyPrint"]
git-tree-sha1 = "1a0fa0e9613f46c9b8c11eee38ebb4f590013c5e"
uuid = "71a1bf82-56d0-4bbc-8a3c-48b961074391"
version = "0.1.5"
[[deps.NetworkOptions]]
uuid = "ca575930-c2e3-43a9-ace4-1e988b2c1908"
version = "1.2.0"
[[deps.OneHotArrays]]
deps = ["Adapt", "ChainRulesCore", "Compat", "GPUArraysCore", "LinearAlgebra", "NNlib"]
git-tree-sha1 = "5e4029759e8699ec12ebdf8721e51a659443403c"
uuid = "0b1bfda6-eb8a-41d2-88d8-f5af5cad476f"
version = "0.2.4"
[[deps.OpenBLAS_jll]]
deps = ["Artifacts", "CompilerSupportLibraries_jll", "Libdl"]
uuid = "4536629a-c528-5b80-bd46-f80d51c5b363"
version = "0.3.21+4"
[[deps.OpenLibm_jll]]
deps = ["Artifacts", "Libdl"]
uuid = "05823500-19ac-5b8b-9628-191a04bc5112"
version = "0.8.1+0"
[[deps.OpenSpecFun_jll]]
deps = ["Artifacts", "CompilerSupportLibraries_jll", "JLLWrappers", "Libdl", "Pkg"]
git-tree-sha1 = "13652491f6856acfd2db29360e1bbcd4565d04f1"
uuid = "efe28fd5-8261-553b-a9e1-b2916fc3738e"
version = "0.5.5+0"
[[deps.Optimisers]]
deps = ["ChainRulesCore", "Functors", "LinearAlgebra", "Random", "Statistics"]
git-tree-sha1 = "6a01f65dd8583dee82eecc2a19b0ff21521aa749"
uuid = "3bd65402-5787-11e9-1adc-39752487f4e2"
version = "0.2.18"
[[deps.OrderedCollections]]
git-tree-sha1 = "d321bf2de576bf25ec4d3e4360faca399afca282"
uuid = "bac558e1-5e72-5ebc-8fee-abe8a469f55d"
version = "1.6.0"
[[deps.PDMats]]
deps = ["LinearAlgebra", "SparseArrays", "SuiteSparse"]
git-tree-sha1 = "67eae2738d63117a196f497d7db789821bce61d1"
uuid = "90014a1f-27ba-587c-ab20-58faa44d9150"
version = "0.11.17"
[[deps.Parsers]]
deps = ["Dates", "PrecompileTools", "UUIDs"]
git-tree-sha1 = "4b2e829ee66d4218e0cef22c0a64ee37cf258c29"
uuid = "69de0a69-1ddd-5017-9359-2bf0b02dc9f0"
version = "2.7.1"
[[deps.Pidfile]]
deps = ["FileWatching", "Test"]
git-tree-sha1 = "2d8aaf8ee10df53d0dfb9b8ee44ae7c04ced2b03"
uuid = "fa939f87-e72e-5be4-a000-7fc836dbe307"
version = "1.3.0"
[[deps.Pkg]]
deps = ["Artifacts", "Dates", "Downloads", "FileWatching", "LibGit2", "Libdl", "Logging", "Markdown", "Printf", "REPL", "Random", "SHA", "Serialization", "TOML", "Tar", "UUIDs", "p7zip_jll"]
uuid = "44cfe95a-1eb2-52ea-b672-e2afdf69b78f"
version = "1.9.2"
[[deps.PrecompileTools]]
deps = ["Preferences"]
git-tree-sha1 = "9673d39decc5feece56ef3940e5dafba15ba0f81"
uuid = "aea7be01-6a6a-4083-8856-8a6e6704d82a"
version = "1.1.2"
[[deps.Preferences]]
deps = ["TOML"]
git-tree-sha1 = "7eb1686b4f04b82f96ed7a4ea5890a4f0c7a09f1"
uuid = "21216c6a-2e73-6563-6e65-726566657250"
version = "1.4.0"
[[deps.PrettyPrint]]
git-tree-sha1 = "632eb4abab3449ab30c5e1afaa874f0b98b586e4"
uuid = "8162dcfd-2161-5ef2-ae6c-7681170c5f98"
version = "0.2.0"
[[deps.Printf]]
deps = ["Unicode"]
uuid = "de0858da-6303-5e67-8744-51eddeeeb8d7"
[[deps.ProgressLogging]]
deps = ["Logging", "SHA", "UUIDs"]
git-tree-sha1 = "80d919dee55b9c50e8d9e2da5eeafff3fe58b539"
uuid = "33c8b6b6-d38a-422a-b730-caa89a2f386c"
version = "0.1.4"
[[deps.PythonCall]]
deps = ["CondaPkg", "Dates", "Libdl", "MacroTools", "Markdown", "Pkg", "REPL", "Requires", "Serialization", "Tables", "UnsafePointers"]
git-tree-sha1 = "70af6bdbde63d7d0a4ea99f3e890ebdb55e9d464"
uuid = "6099a3de-0909-46bc-b1f4-468b9a2dfc0d"
version = "0.9.14"
[[deps.QuadGK]]
deps = ["DataStructures", "LinearAlgebra"]
git-tree-sha1 = "6ec7ac8412e83d57e313393220879ede1740f9ee"
uuid = "1fd47b50-473d-5c70-9696-f719f8f3bcdc"
version = "2.8.2"
[[deps.REPL]]
deps = ["InteractiveUtils", "Markdown", "Sockets", "Unicode"]
uuid = "3fa0cd96-eef1-5676-8a61-b3b8758bbffb"
[[deps.Random]]
deps = ["SHA", "Serialization"]
uuid = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c"
[[deps.Random123]]
deps = ["Random", "RandomNumbers"]
git-tree-sha1 = "552f30e847641591ba3f39fd1bed559b9deb0ef3"
uuid = "74087812-796a-5b5d-8853-05524746bad3"
version = "1.6.1"
[[deps.RandomNumbers]]
deps = ["Random", "Requires"]
git-tree-sha1 = "043da614cc7e95c703498a491e2c21f58a2b8111"
uuid = "e6cf234a-135c-5ec9-84dd-332b85af5143"
version = "1.5.3"
[[deps.RealDot]]
deps = ["LinearAlgebra"]
git-tree-sha1 = "9f0a1b71baaf7650f4fa8a1d168c7fb6ee41f0c9"
uuid = "c1ae055f-0cd5-4b69-90a6-9a35b1a98df9"
version = "0.1.0"
[[deps.Reexport]]
git-tree-sha1 = "45e428421666073eab6f2da5c9d310d99bb12f9b"
uuid = "189a3867-3050-52da-a836-e630ba90ab69"
version = "1.2.2"
[[deps.Requires]]
deps = ["UUIDs"]
git-tree-sha1 = "838a3a4188e2ded87a4f9f184b4b0d78a1e91cb7"
uuid = "ae029012-a4dd-5104-9daa-d747884805df"
version = "1.3.0"
[[deps.Rmath]]
deps = ["Random", "Rmath_jll"]
git-tree-sha1 = "f65dcb5fa46aee0cf9ed6274ccbd597adc49aa7b"
uuid = "79098fc4-a85e-5d69-aa6a-4863f24498fa"
version = "0.7.1"
[[deps.Rmath_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"]
git-tree-sha1 = "6ed52fdd3382cf21947b15e8870ac0ddbff736da"
uuid = "f50d1b31-88e8-58de-be2c-1cc44531875f"
version = "0.4.0+0"
[[deps.SHA]]
uuid = "ea8e919c-243c-51af-8825-aaa63cd721ce"
version = "0.7.0"
[[deps.Scratch]]
deps = ["Dates"]
git-tree-sha1 = "30449ee12237627992a99d5e30ae63e4d78cd24a"
uuid = "6c6a2e73-6563-6170-7368-637461726353"
version = "1.2.0"
[[deps.Serialization]]
uuid = "9e88b42a-f829-5b0c-bbe9-9e923198166b"
[[deps.Setfield]]
deps = ["ConstructionBase", "Future", "MacroTools", "StaticArraysCore"]
git-tree-sha1 = "e2cc6d8c88613c05e1defb55170bf5ff211fbeac"
uuid = "efcf1570-3423-57d1-acb7-fd33fddbac46"
version = "1.1.1"
[[deps.ShowCases]]
git-tree-sha1 = "7f534ad62ab2bd48591bdeac81994ea8c445e4a5"
uuid = "605ecd9f-84a6-4c9e-81e2-4798472b76a3"
version = "0.1.0"
[[deps.SimpleTraits]]
deps = ["InteractiveUtils", "MacroTools"]
git-tree-sha1 = "5d7e3f4e11935503d3ecaf7186eac40602e7d231"
uuid = "699a6c99-e7fa-54fc-8d76-47d257e15c1d"
version = "0.9.4"
[[deps.Sockets]]
uuid = "6462fe0b-24de-5631-8697-dd941f90decc"
[[deps.SortingAlgorithms]]
deps = ["DataStructures"]
git-tree-sha1 = "c60ec5c62180f27efea3ba2908480f8055e17cee"
uuid = "a2af1166-a08f-5f64-846c-94a0d3cef48c"
version = "1.1.1"
[[deps.SparseArrays]]
deps = ["Libdl", "LinearAlgebra", "Random", "Serialization", "SuiteSparse_jll"]
uuid = "2f01184e-e22b-5df5-ae63-d93ebab69eaf"
[[deps.SpecialFunctions]]
deps = ["IrrationalConstants", "LogExpFunctions", "OpenLibm_jll", "OpenSpecFun_jll"]
git-tree-sha1 = "7beb031cf8145577fbccacd94b8a8f4ce78428d3"
uuid = "276daf66-3868-5448-9aa4-cd146d93841b"
version = "2.3.0"
weakdeps = ["ChainRulesCore"]
[deps.SpecialFunctions.extensions]
SpecialFunctionsChainRulesCoreExt = "ChainRulesCore"
[[deps.SplittablesBase]]
deps = ["Setfield", "Test"]
git-tree-sha1 = "e08a62abc517eb79667d0a29dc08a3b589516bb5"
uuid = "171d559e-b47b-412a-8079-5efa626c420e"
version = "0.1.15"
[[deps.StaticArrays]]
deps = ["LinearAlgebra", "Random", "StaticArraysCore", "Statistics"]
git-tree-sha1 = "832afbae2a45b4ae7e831f86965469a24d1d8a83"
uuid = "90137ffa-7385-5640-81b9-e52037218182"
version = "1.5.26"
[[deps.StaticArraysCore]]
git-tree-sha1 = "6b7ba252635a5eff6a0b0664a41ee140a1c9e72a"
uuid = "1e83bf80-4336-4d27-bf5d-d5a4f845583c"
version = "1.4.0"
[[deps.Statistics]]
deps = ["LinearAlgebra", "SparseArrays"]
uuid = "10745b16-79ce-11e8-11f9-7d13ad32a3b2"
version = "1.9.0"
[[deps.StatsAPI]]
deps = ["LinearAlgebra"]
git-tree-sha1 = "45a7769a04a3cf80da1c1c7c60caf932e6f4c9f7"
uuid = "82ae8749-77ed-4fe6-ae5f-f523153014b0"
version = "1.6.0"
[[deps.StatsBase]]
deps = ["DataAPI", "DataStructures", "LinearAlgebra", "LogExpFunctions", "Missings", "Printf", "Random", "SortingAlgorithms", "SparseArrays", "Statistics", "StatsAPI"]
git-tree-sha1 = "75ebe04c5bed70b91614d684259b661c9e6274a4"
uuid = "2913bbd2-ae8a-5f71-8c99-4fb6c76f3a91"
version = "0.34.0"
[[deps.StatsFuns]]
deps = ["HypergeometricFunctions", "IrrationalConstants", "LogExpFunctions", "Reexport", "Rmath", "SpecialFunctions"]
git-tree-sha1 = "f625d686d5a88bcd2b15cd81f18f98186fdc0c9a"
uuid = "4c63d2b9-4356-54db-8cca-17b64c39e42c"
version = "1.3.0"
[deps.StatsFuns.extensions]
StatsFunsChainRulesCoreExt = "ChainRulesCore"
StatsFunsInverseFunctionsExt = "InverseFunctions"
[deps.StatsFuns.weakdeps]
ChainRulesCore = "d360d2e6-b24c-11e9-a2a3-2a2ae2dbcce4"
InverseFunctions = "3587e190-3f89-42d0-90ee-14403ec27112"
[[deps.StructArrays]]
deps = ["Adapt", "DataAPI", "GPUArraysCore", "StaticArraysCore", "Tables"]
git-tree-sha1 = "521a0e828e98bb69042fec1809c1b5a680eb7389"
uuid = "09ab397b-f2b6-538f-b94a-2f83cf4a842a"
version = "0.6.15"
[[deps.StructTypes]]
deps = ["Dates", "UUIDs"]
git-tree-sha1 = "ca4bccb03acf9faaf4137a9abc1881ed1841aa70"
uuid = "856f2bd8-1eba-4b0a-8007-ebc267875bd4"
version = "1.10.0"
[[deps.SuiteSparse]]
deps = ["Libdl", "LinearAlgebra", "Serialization", "SparseArrays"]
uuid = "4607b0f0-06f3-5cda-b6b1-a6196a1729e9"
[[deps.SuiteSparse_jll]]
deps = ["Artifacts", "Libdl", "Pkg", "libblastrampoline_jll"]
uuid = "bea87d4a-7f5b-5778-9afe-8cc45184846c"
version = "5.10.1+6"
[[deps.TOML]]
deps = ["Dates"]
uuid = "fa267f1f-6049-4f14-aa54-33bafae1ed76"
version = "1.0.3"
[[deps.TableTraits]]
deps = ["IteratorInterfaceExtensions"]
git-tree-sha1 = "c06b2f539df1c6efa794486abfb6ed2022561a39"
uuid = "3783bdb8-4a98-5b6b-af9a-565f29a5fe9c"
version = "1.0.1"
[[deps.Tables]]
deps = ["DataAPI", "DataValueInterfaces", "IteratorInterfaceExtensions", "LinearAlgebra", "OrderedCollections", "TableTraits", "Test"]
git-tree-sha1 = "1544b926975372da01227b382066ab70e574a3ec"
uuid = "bd369af6-aec1-5ad0-b16a-f7cc5008161c"
version = "1.10.1"
[[deps.Tar]]
deps = ["ArgTools", "SHA"]
uuid = "a4e569a6-e804-4fa4-b0f3-eef7a1d5b13e"
version = "1.10.0"
[[deps.Test]]
deps = ["InteractiveUtils", "Logging", "Random", "Serialization"]
uuid = "8dfed614-e22c-5e08-85e1-65c5234f0b40"
[[deps.TimerOutputs]]
deps = ["ExprTools", "Printf"]
git-tree-sha1 = "f548a9e9c490030e545f72074a41edfd0e5bcdd7"
uuid = "a759f4b9-e2f1-59dc-863e-4aeb61b1ea8f"
version = "0.5.23"
[[deps.Transducers]]
deps = ["Adapt", "ArgCheck", "BangBang", "Baselet", "CompositionsBase", "DefineSingletons", "Distributed", "InitialValues", "Logging", "Markdown", "MicroCollections", "Requires", "Setfield", "SplittablesBase", "Tables"]
git-tree-sha1 = "a66fb81baec325cf6ccafa243af573b031e87b00"
uuid = "28d57a85-8fef-5791-bfe6-a80928e7c999"
version = "0.4.77"
[deps.Transducers.extensions]
TransducersBlockArraysExt = "BlockArrays"
TransducersDataFramesExt = "DataFrames"
TransducersLazyArraysExt = "LazyArrays"
TransducersOnlineStatsBaseExt = "OnlineStatsBase"
TransducersReferenceablesExt = "Referenceables"
[deps.Transducers.weakdeps]
BlockArrays = "8e7c35d0-a365-5155-bbbb-fb81a777f24e"
DataFrames = "a93c6f00-e57d-5684-b7b6-d8193f3e46c0"
LazyArrays = "5078a376-72f3-5289-bfd5-ec5146d43c02"
OnlineStatsBase = "925886fa-5bf2-5e8e-b522-a9147a512338"
Referenceables = "42d2dcc6-99eb-4e98-b66c-637b7d73030e"
[[deps.UUIDs]]
deps = ["Random", "SHA"]
uuid = "cf7118a7-6976-5b1a-9a39-7adc72f591a4"
[[deps.Unicode]]
uuid = "4ec0a83e-493e-50e2-b9ac-8f72acf5a8f5"
[[deps.UnsafeAtomics]]
git-tree-sha1 = "6331ac3440856ea1988316b46045303bef658278"
uuid = "013be700-e6cd-48c3-b4a1-df204f14c38f"
version = "0.2.1"
[[deps.UnsafeAtomicsLLVM]]
deps = ["LLVM", "UnsafeAtomics"]
git-tree-sha1 = "ea37e6066bf194ab78f4e747f5245261f17a7175"
uuid = "d80eeb9a-aca5-4d75-85e5-170c8b632249"
version = "0.1.2"
[[deps.UnsafePointers]]
git-tree-sha1 = "c81331b3b2e60a982be57c046ec91f599ede674a"
uuid = "e17b2a0c-0bdf-430a-bd0c-3a23cae4ff39"
version = "1.0.0"
[[deps.Zlib_jll]]
deps = ["Libdl"]
uuid = "83775a58-1f1d-513f-b197-d71354ab007a"
version = "1.2.13+0"
[[deps.Zygote]]
deps = ["AbstractFFTs", "ChainRules", "ChainRulesCore", "DiffRules", "Distributed", "FillArrays", "ForwardDiff", "GPUArrays", "GPUArraysCore", "IRTools", "InteractiveUtils", "LinearAlgebra", "LogExpFunctions", "MacroTools", "NaNMath", "PrecompileTools", "Random", "Requires", "SparseArrays", "SpecialFunctions", "Statistics", "ZygoteRules"]
git-tree-sha1 = "5be3ddb88fc992a7d8ea96c3f10a49a7e98ebc7b"
uuid = "e88e6eb3-aa80-5325-afca-941959d7151f"
version = "0.6.62"
[deps.Zygote.extensions]
ZygoteColorsExt = "Colors"
ZygoteDistancesExt = "Distances"
ZygoteTrackerExt = "Tracker"
[deps.Zygote.weakdeps]
Colors = "5ae59095-9a9b-59fe-a467-6f913c188581"
Distances = "b4f34e82-e78d-54a5-968a-f98e89d6e8f7"
Tracker = "9f7883ad-71c0-57eb-9f7f-b5c9e6d3789c"
[[deps.ZygoteRules]]
deps = ["ChainRulesCore", "MacroTools"]
git-tree-sha1 = "977aed5d006b840e2e40c0b48984f7463109046d"
uuid = "700de1a5-db45-46bc-99cf-38207098b444"
version = "0.2.3"
[[deps.cuDNN]]
deps = ["CEnum", "CUDA", "CUDNN_jll"]
git-tree-sha1 = "f65490d187861d6222cb38bcbbff3fd949a7ec3e"
uuid = "02a925ec-e4fe-4b08-9a7e-0d78e3d38ccd"
version = "1.0.4"
[[deps.libblastrampoline_jll]]
deps = ["Artifacts", "Libdl"]
uuid = "8e850b90-86db-534c-a0d3-1478176c7d93"
version = "5.8.0+0"
[[deps.micromamba_jll]]
deps = ["Artifacts", "JLLWrappers", "LazyArtifacts", "Libdl"]
git-tree-sha1 = "66d07957bcf7e4930d933195aed484078dd8cbb5"
uuid = "f8abcde7-e9b7-5caa-b8af-a437887ae8e4"
version = "1.4.9+0"
[[deps.nghttp2_jll]]
deps = ["Artifacts", "Libdl"]
uuid = "8e850ede-7688-5339-a07c-302acd2aaf8d"
version = "1.48.0+0"
[[deps.p7zip_jll]]
deps = ["Artifacts", "Libdl"]
uuid = "3f19e933-33d8-53b3-aaab-bd5110c3b7a0"
version = "17.4.0+0"

View File

@@ -0,0 +1,16 @@
name = "IronpenGPU"
uuid = "3d5396ea-818e-43fc-a9d3-164248e840cd"
authors = ["ton <narawat@gmail.com>"]
version = "0.1.0"
[deps]
CUDA = "052768ef-5323-5732-b1bb-66c8b64840ba"
CondaPkg = "992eb4ea-22a4-4c89-a5bb-47a3300528ab"
Dates = "ade2ca70-3891-5945-98fb-dc099432e06a"
Flux = "587475ba-b771-5e3f-ad9e-33799f191a9c"
GeneralUtils = "c6c72f09-b708-4ac8-ac7c-2084d70108fe"
JSON3 = "0f8b85d8-7281-11e9-16c2-39a750bddbf1"
LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e"
PythonCall = "6099a3de-0909-46bc-b1f4-468b9a2dfc0d"
Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c"
Statistics = "10745b16-79ce-11e8-11f9-7d13ad32a3b2"

View File

@@ -0,0 +1,91 @@
module IronpenGPU # this is a parent module
# export
""" Order by dependencies of each file. The 1st included file must not depend on any other
files and each file can only depend on the file included before it.
"""
include("type.jl")
using .type # bring type into parent module namespace
include("snnUtil.jl")
using .snnUtil
include("forward.jl")
using .forward
include("learn.jl")
using .learn
include("interface.jl")
using .interface
#------------------------------------------------------------------------------------------------100
""" version 0.0.9
Todo:
[1] +W 90% of most active conn
[2] -W 10% of less active conn
[-] add temporal summation in addition to already used spatial summation.
CANCELLED, spatial summation every second until membrane potential reach a threshold
is in itself a temporal summation.
[4] implement dormant connection and pruning machanism. the longer the training the longer
0 weight stay 0.
[] using RL to control learning signal
[] consider using Dates.now() instead of timestamp because time_stamp may overflow
[] Liquid time constant. training should include adjusting α, neuron membrane potential decay factor
which defined by neuron.tau_m formula in type.jl
Change from version: 0.0.6
-
All features
- excitatory/inhabitory matrix
- neuroplasticity
- voltage regulator
"""
end # module IronpenGPU

View File

@@ -0,0 +1,912 @@
module forward
# export
using Flux, CUDA
using GeneralUtils
using ..type, ..snnUtil
#------------------------------------------------------------------------------------------------100
""" kfn forward
input (row, col, batch)
"""
function (kfn::kfn_1)(input::AbstractArray)
kfn.timeStep .+= 1
# what to do at the start of learning round
if view(kfn.learningStage, 1)[1] == 1
# reset learning params
kfn.zitCumulative .= 0
kfn.lif_vt .= 0
kfn.lif_wRecChange .= 0
kfn.lif_epsilonRec .= 0
kfn.lif_firingCounter .= 0
kfn.lif_refractoryCounter .= 0
kfn.lif_zt .= 0
kfn.lif_synapticActivityCounter .= 0
kfn.alif_vt .= 0
kfn.alif_a .= 0
kfn.alif_epsilonRec .= 0
kfn.alif_epsilonRecA .= 0
kfn.alif_wRecChange .= 0
kfn.alif_firingCounter .= 0
kfn.alif_refractoryCounter .= 0
kfn.alif_zt .= 0
kfn.alif_synapticActivityCounter .= 0
kfn.on_vt .= 0
kfn.on_epsilonRec .= 0
kfn.on_wOutChange .= 0
kfn.on_refractoryCounter .= 0
kfn.learningStage = [2]
end
# update activation matrix with "lif_zt1" and "alif_zt1" by concatenating
# (input, lif_zt1, alif_zt1) to form activation matrix
_zit = cat(reshape(input, (size(input, 1), size(input, 2), 1, size(input, 3))),
reshape(kfn.lif_zt, (size(input, 1), :, 1, size(input, 3))),
reshape(kfn.alif_zt, (size(input, 1), :, 1, size(input, 3))), dims=2)
kfn.zit .= reshape(_zit, (size(input, 1), :, size(input, 3)))
@sync begin
@async begin
# project 3D kfn zit into 4D lif zit
i1, i2, i3, i4 = size(kfn.lif_zit)
kfn.lif_zit .= reshape(kfn.zit, (i1, i2, 1, i4)) .* kfn.lif_arrayProjection4d
kfn.lif_exInType .= kfn.exInType .* kfn.lif_arrayProjection4d
lifForward( kfn.lif_zit,
kfn.lif_wRec,
kfn.lif_vt,
kfn.lif_vth,
kfn.lif_vRest,
kfn.lif_zt4d,
kfn.lif_alpha,
kfn.lif_phi,
kfn.lif_epsilonRec,
kfn.lif_refractoryCounter,
kfn.lif_refractoryDuration,
kfn.lif_gammaPd,
kfn.lif_firingCounter,
kfn.lif_recSignal,
kfn.lif_exInType,
kfn.lif_wRecChange,
kfn.lif_neuronInactivityCounter,
kfn.lif_synapticActivityCounter,
)
end
@async begin
# project 3D kfn zit into 4D alif zit
i1, i2, i3, i4 = size(kfn.alif_zit)
kfn.alif_zit .= reshape(kfn.zit, (i1, i2, 1, i4)) .* kfn.alif_arrayProjection4d
kfn.alif_exInType .= kfn.exInType .* kfn.alif_arrayProjection4d
alifForward(kfn.alif_zit,
kfn.alif_wRec,
kfn.alif_vt,
kfn.alif_vth,
kfn.alif_vRest,
kfn.alif_zt4d,
kfn.alif_alpha,
kfn.alif_phi,
kfn.alif_epsilonRec,
kfn.alif_refractoryCounter,
kfn.alif_refractoryDuration,
kfn.alif_gammaPd,
kfn.alif_firingCounter,
kfn.alif_recSignal,
kfn.alif_exInType,
kfn.alif_wRecChange,
kfn.alif_neuronInactivityCounter,
kfn.alif_synapticActivityCounter,
kfn.alif_epsilonRecA,
kfn.alif_a,
kfn.alif_avth,
kfn.alif_beta,
kfn.alif_rho,
)
end
end
# reduce lif_zt4d and alif_zt4d into lif_zt, alif_zt (4d -> 1d)
kfn.lif_zt .= reduce(max, kfn.lif_zt4d, dims=(1,2))
kfn.alif_zt .= reduce(max, kfn.alif_zt4d, dims=(1,2))
# update activation matrix with "lif_zt1" and "alif_zt1" by concatenating
# (input, lif_zt1, alif_zt1) to form activation matrix
_zit = cat(reshape(input, (size(input, 1), size(input, 2), 1, size(input, 3))),
reshape(kfn.lif_zt, (size(input, 1), :, 1, size(input, 3))),
reshape(kfn.alif_zt, (size(input, 1), :, 1, size(input, 3))), dims=2)
kfn.zit .= reshape(_zit, (size(input, 1), :, size(input, 3)))
kfn.zitCumulative .+= kfn.zit
# project 3D kfn zit into 4D on zit
i1, i2, i3, i4 = size(kfn.on_zit)
kfn.on_zit .= reshape(kfn.zit, (i1, i2, 1, i4)) .* kfn.on_arrayProjection4d
# read out
onForward( kfn.on_zit,
kfn.on_wOut,
kfn.on_vt,
kfn.on_vth,
kfn.on_vRest,
kfn.on_zt4d,
kfn.on_alpha,
kfn.on_phi,
kfn.on_epsilonRec,
kfn.on_refractoryCounter,
kfn.on_refractoryDuration,
kfn.on_gammaPd,
kfn.on_firingCounter,
kfn.on_recSignal,
)
# get on_zt4d to on_zt
kfn.on_zt .= reduce(max, kfn.on_zt4d, dims=(1,2))
logit = reshape(kfn.on_zt, (size(input, 1), :))
return logit,
kfn.zit
end
# gpu launcher
function lifForward( zit::CuArray,
wRec::CuArray,
vt::CuArray,
vth::CuArray,
vRest::CuArray,
zt::CuArray,
alpha::CuArray,
phi::CuArray,
epsilonRec::CuArray,
refractoryCounter::CuArray,
refractoryDuration::CuArray,
gammaPd::CuArray,
firingCounter::CuArray,
recSignal::CuArray,
exInType::CuArray,
wRecChange::CuArray,
neuronInactivityCounter::CuArray,
synapticActivityCounter::CuArray,
)
kernel = @cuda launch=false lifForward( zit,
wRec,
vt,
vth,
vRest,
zt,
alpha,
phi,
epsilonRec,
refractoryCounter,
refractoryDuration,
gammaPd,
firingCounter,
recSignal,
exInType,
wRecChange,
neuronInactivityCounter,
synapticActivityCounter,
GeneralUtils.linear_to_cartesian,
)
config = launch_configuration(kernel.fun)
# threads to be launched. Since one can't launch exact thread number the kernel needs,
# one just launch threads more than this kernel needs then use a guard inside the kernel
# to prevent unused threads to access memory.
threads = min(1024, config.threads) # depend on gpu. Most NVIDIA gpu has 1024 threads per block
# total desired threads to launch to gpu. Usually 1 thread per 1 matrix element
totalThreads = length(wRec)
blocks = cld(totalThreads, threads)
# println("launching gpu kernel")
CUDA.@sync begin
kernel( zit,
wRec,
vt,
vth,
vRest,
zt,
alpha,
phi,
epsilonRec,
refractoryCounter,
refractoryDuration,
gammaPd,
firingCounter,
recSignal,
exInType,
wRecChange,
neuronInactivityCounter,
synapticActivityCounter,
GeneralUtils.linear_to_cartesian; threads, blocks)
end
end
# gpu kernel
function lifForward( zit,
wRec,
vt,
vth,
vRest,
zt,
alpha,
phi,
epsilonRec,
refractoryCounter,
refractoryDuration,
gammaPd,
firingCounter,
recSignal,
exInType,
wRecChange,
neuronInactivityCounter,
synapticActivityCounter,
linear_to_cartesian,
)
i = (blockIdx().x - 1) * blockDim().x + threadIdx().x # gpu threads index
if i <= length(wRec)
# cartesian index
i1, i2, i3, i4 = linear_to_cartesian(i, size(wRec))
# @cuprintln("gpu thread $i $i1 $i2 $i3 $i4")
if refractoryCounter[i1,i2,i3,i4] > 0 # refractory period is active
refractoryCounter[i1,i2,i3,i4] -= 1
recSignal[i1,i2,i3,i4] = 0
zt[i1,i2,i3,i4] = 0
vt[i1,i2,i3,i4] = alpha[i1,i2,i3,i4] * vt[i1,i2,i3,i4]
phi[i1,i2,i3,i4] = 0
# compute epsilonRec
epsilonRec[i1,i2,i3,i4] = (alpha[i1,i2,i3,i4] * epsilonRec[i1,i2,i3,i4])
else # refractory period is inactive
recSignal[i1,i2,i3,i4] = wRec[i1,i2,i3,i4] * zit[i1,i2,i3,i4] *
exInType[i1,i2,i3,i4]
vt[i1,i2,i3,i4] = (alpha[i1,i2,i3,i4] * vt[i1,i2,i3,i4]) +
sum(@view(recSignal[:,:,i3,i4]))
# fires if membrane potential exceed threshold
if vt[i1,i2,i3,i4] > vth[i1,i2,i3,i4]
zt[i1,i2,i3,i4] = 1
refractoryCounter[i1,i2,i3,i4] = refractoryDuration[i1,i2,i3,i4]
firingCounter[i1,i2,i3,i4] += 1
vt[i1,i2,i3,i4] = vRest[i1,i2,i3,i4]
# reset counter if neuron fires
neuronInactivityCounter[i1,i2,i3,i4] = 0
else
zt[i1,i2,i3,i4] = 0
neuronInactivityCounter[i1,i2,i3,i4] -= 1
end
# compute phi, there is a difference from lif formula
phi[i1,i2,i3,i4] = (gammaPd[i1,i2,i3,i4] / vth[i1,i2,i3,i4]) *
max(0, 1 - ((vt[i1,i2,i3,i4] - vth[i1,i2,i3,i4]) / vth[i1,i2,i3,i4]))
# compute epsilonRec
epsilonRec[i1,i2,i3,i4] = (alpha[i1,i2,i3,i4] * epsilonRec[i1,i2,i3,i4]) +
(zit[i1,i2,i3,i4] * !iszero(wRec[i1,i2,i3,i4]))
# !iszero indicates synaptic subscription
# count synaptic inactivity
if !iszero(wRec[i1,i2,i3,i4]) # check if this is wRec subscription
if !iszero(zit[i1,i2,i3,i4]) # synapse is active
synapticActivityCounter[i1,i2,i3,i4] += 1
else # synapse is inactive
synapticActivityCounter[i1,i2,i3,i4] += 0
end
end
# voltage regulator
wRecChange[i1,i2,i3,i4] = -0.01*0.0001 * (vt[i1,i2,i3,i4] - vth[i1,i2,i3,i4]) *
zit[i1,i2,i3,i4]
end
end
return nothing
end
# gpu launcher
function alifForward( zit::CuArray,
wRec::CuArray,
vt::CuArray,
vth::CuArray,
vRest::CuArray,
zt::CuArray,
alpha::CuArray,
phi::CuArray,
epsilonRec::CuArray,
refractoryCounter::CuArray,
refractoryDuration::CuArray,
gammaPd::CuArray,
firingCounter::CuArray,
recSignal::CuArray,
exInType::CuArray,
wRecChange::CuArray,
neuronInactivityCounter::CuArray,
synapticActivityCounter::CuArray,
epsilonRecA::CuArray,
a::CuArray,
avth::CuArray,
beta::CuArray,
rho::CuArray,
)
kernel = @cuda launch=false alifForward( zit,
wRec,
vt,
vth,
vRest,
zt,
alpha,
phi,
epsilonRec,
refractoryCounter,
refractoryDuration,
gammaPd,
firingCounter,
recSignal,
exInType,
wRecChange,
neuronInactivityCounter,
synapticActivityCounter,
epsilonRecA,
a,
avth,
beta,
rho,
GeneralUtils.linear_to_cartesian,
)
config = launch_configuration(kernel.fun)
# threads to be launched. Since one can't launch exact thread number the kernel needs,
# one just launch threads more than this kernel needs then use a guard inside the kernel
# to prevent unused threads to access memory.
threads = min(1024, config.threads) # depend on gpu. Most NVIDIA gpu has 1024 threads per block
# total desired threads to launch to gpu. Usually 1 thread per 1 matrix element
totalThreads = length(wRec)
blocks = cld(totalThreads, threads)
# println("launching gpu kernel")
CUDA.@sync begin
kernel( zit,
wRec,
vt,
vth,
vRest,
zt,
alpha,
phi,
epsilonRec,
refractoryCounter,
refractoryDuration,
gammaPd,
firingCounter,
recSignal,
exInType,
wRecChange,
neuronInactivityCounter,
synapticActivityCounter,
epsilonRecA,
a,
avth,
beta,
rho,
GeneralUtils.linear_to_cartesian; threads, blocks)
end
end
# gpu kernel
function alifForward( zit,
wRec,
vt,
vth,
vRest,
zt,
alpha,
phi,
epsilonRec,
refractoryCounter,
refractoryDuration,
gammaPd,
firingCounter,
recSignal,
exInType,
wRecChange,
neuronInactivityCounter,
synapticActivityCounter,
epsilonRecA,
a,
avth,
beta,
rho,
linear_to_cartesian,
)
i = (blockIdx().x - 1) * blockDim().x + threadIdx().x # gpu threads index
if i <= length(wRec)
# cartesian index
i1, i2, i3, i4 = linear_to_cartesian(i, size(wRec))
# @cuprintln("gpu thread $i $i1 $i2 $i3 $i4")
if refractoryCounter[i1,i2,i3,i4] > 0 # refractory period is active
refractoryCounter[i1,i2,i3,i4] -= 1
recSignal[i1,i2,i3,i4] = 0
zt[i1,i2,i3,i4] = 0
vt[i1,i2,i3,i4] = alpha[i1,i2,i3,i4] * vt[i1,i2,i3,i4]
phi[i1,i2,i3,i4] = 0
a[i1,i2,i3,i4] = rho[i1,i2,i3,i4] * a[i1,i2,i3,i4]
# compute epsilonRec
epsilonRec[i1,i2,i3,i4] = (alpha[i1,i2,i3,i4] * epsilonRec[i1,i2,i3,i4])
# compute epsilonRecA use eq.26
epsilonRecA[i1,i2,i3,i4] = (rho[i1,i2,i3,i4] *
(phi[i1,i2,i3,i4] * epsilonRec[i1,i2,i3,i4]))
# compute avth
avth[i1,i2,i3,i4] = vth[i1,i2,i3,i4] + (beta[i1,i2,i3,i4] * a[i1,i2,i3,i4])
else # refractory period is inactive
recSignal[i1,i2,i3,i4] = wRec[i1,i2,i3,i4] * zit[i1,i2,i3,i4] *
exInType[i1,i2,i3,i4]
vt[i1,i2,i3,i4] = (alpha[i1,i2,i3,i4] * vt[i1,i2,i3,i4]) +
sum(@view(recSignal[:,:,i3,i4]))
# compute avth
avth[i1,i2,i3,i4] = vth[i1,i2,i3,i4] + (beta[i1,i2,i3,i4] * a[i1,i2,i3,i4])
# fires if membrane potential exceed threshold
if vt[i1,i2,i3,i4] > avth[i1,i2,i3,i4]
zt[i1,i2,i3,i4] = 1
refractoryCounter[i1,i2,i3,i4] = refractoryDuration[i1,i2,i3,i4]
firingCounter[i1,i2,i3,i4] += 1
vt[i1,i2,i3,i4] = vRest[i1,i2,i3,i4]
a[i1,i2,i3,i4] = (rho[i1,i2,i3,i4] * a[i1,i2,i3,i4]) + 1
neuronInactivityCounter[i1,i2,i3,i4] = 0
else
zt[i1,i2,i3,i4] = 0
a[i1,i2,i3,i4] = (rho[i1,i2,i3,i4] * a[i1,i2,i3,i4])
neuronInactivityCounter[i1,i2,i3,i4] -= 1
end
# compute phi, there is a difference from alif formula
phi[i1,i2,i3,i4] = (gammaPd[i1,i2,i3,i4] / vth[i1,i2,i3,i4]) *
max(0, 1 - ((vt[i1,i2,i3,i4] - vth[i1,i2,i3,i4]) / vth[i1,i2,i3,i4]))
# compute epsilonRec
epsilonRec[i1,i2,i3,i4] = (alpha[i1,i2,i3,i4] * epsilonRec[i1,i2,i3,i4]) +
(zit[i1,i2,i3,i4] * !iszero(wRec[i1,i2,i3,i4]))
# compute epsilonRecA use eq.26
epsilonRecA[i1,i2,i3,i4] = (rho[i1,i2,i3,i4] *
(phi[i1,i2,i3,i4] * epsilonRec[i1,i2,i3,i4])) +
(zit[i1,i2,i3,i4] * !iszero(wRec[i1,i2,i3,i4]))
# count synaptic inactivity
if !iszero(wRec[i1,i2,i3,i4]) # check if this is wRec subscription
if !iszero(zit[i1,i2,i3,i4]) # synapse is active
synapticActivityCounter[i1,i2,i3,i4] += 1
else # synapse is inactive
synapticActivityCounter[i1,i2,i3,i4] += 0
end
end
# voltage regulator
wRecChange[i1,i2,i3,i4] = -0.01*0.0001 * (vt[i1,i2,i3,i4] - avth[i1,i2,i3,i4]) *
zit[i1,i2,i3,i4]
end
end
return nothing
end
# gpu launcher
function onForward( zit::CuArray,
wOut::CuArray,
vt::CuArray,
vth::CuArray,
vRest::CuArray,
zt::CuArray,
alpha::CuArray,
phi::CuArray,
epsilonRec::CuArray,
refractoryCounter::CuArray,
refractoryDuration::CuArray,
gammaPd::CuArray,
firingCounter::CuArray,
recSignal::CuArray,
)
kernel = @cuda launch=false onForward( zit,
wOut,
vt,
vth,
vRest,
zt,
alpha,
phi,
epsilonRec,
refractoryCounter,
refractoryDuration,
gammaPd,
firingCounter,
recSignal,
GeneralUtils.linear_to_cartesian,
)
config = launch_configuration(kernel.fun)
# threads to be launched. Since one can't launch exact thread number the kernel needs,
# one just launch threads more than this kernel needs then use a guard inside the kernel
# to prevent unused threads to access memory.
threads = min(1024, config.threads) # depend on gpu. Most NVIDIA gpu has 1024 threads per block
# total desired threads to launch to gpu. Usually 1 thread per 1 matrix element
totalThreads = length(wOut)
blocks = cld(totalThreads, threads)
# println("launching gpu kernel")
CUDA.@sync begin
kernel( zit,
wOut,
vt,
vth,
vRest,
zt,
alpha,
phi,
epsilonRec,
refractoryCounter,
refractoryDuration,
gammaPd,
firingCounter,
recSignal,
GeneralUtils.linear_to_cartesian; threads, blocks)
end
end
# gpu kernel
function onForward( zit,
wOut,
vt,
vth,
vRest,
zt,
alpha,
phi,
epsilonRec,
refractoryCounter,
refractoryDuration,
gammaPd,
firingCounter,
recSignal,
linear_to_cartesian,
)
i = (blockIdx().x - 1) * blockDim().x + threadIdx().x # gpu threads index
if i <= length(wOut)
# cartesian index
i1, i2, i3, i4 = linear_to_cartesian(i, size(wOut))
# @cuprintln("gpu thread $i $i1 $i2 $i3 $i4")
if refractoryCounter[i1,i2,i3,i4] > 0 # refractory period is active
refractoryCounter[i1,i2,i3,i4] -= 1
recSignal[i1,i2,i3,i4] = 0
zt[i1,i2,i3,i4] = 0
vt[i1,i2,i3,i4] = alpha[i1,i2,i3,i4] * vt[i1,i2,i3,i4]
phi[i1,i2,i3,i4] = 0
# compute epsilonRec
epsilonRec[i1,i2,i3,i4] = (alpha[i1,i2,i3,i4] * epsilonRec[i1,i2,i3,i4])
else # refractory period is inactive
recSignal[i1,i2,i3,i4] = zit[i1,i2,i3,i4] * wOut[i1,i2,i3,i4]
vt[i1,i2,i3,i4] = (alpha[i1,i2,i3,i4] * vt[i1,i2,i3,i4]) + sum(@view(recSignal[:,:,i3,i4]))
# fires if membrane potential exceed threshold
if vt[i1,i2,i3,i4] > vth[i1,i2,i3,i4]
zt[i1,i2,i3,i4] = 1
refractoryCounter[i1,i2,i3,i4] = refractoryDuration[i1,i2,i3,i4]
firingCounter[i1,i2,i3,i4] += 1
vt[i1,i2,i3,i4] = vRest[i1,i2,i3,i4]
else
zt[i1,i2,i3,i4] = 0
end
# compute phi, there is a difference from on formula
phi[i1,i2,i3,i4] = (gammaPd[i1,i2,i3,i4] / vth[i1,i2,i3,i4]) *
max(0, 1 - ((vt[i1,i2,i3,i4] - vth[i1,i2,i3,i4]) / vth[i1,i2,i3,i4]))
# compute epsilonRec
epsilonRec[i1,i2,i3,i4] = (alpha[i1,i2,i3,i4] * epsilonRec[i1,i2,i3,i4]) +
(zit[i1,i2,i3,i4] * !iszero(wOut[i1,i2,i3,i4]))
end
end
return nothing
end
# function lifForward(kfn_zit::Array{T},
# zit::Array{T},
# wRec::Array{T},
# vt0::Array{T},
# vt1::Array{T},
# vth::Array{T},
# vRest::Array{T},
# zt1::Array{T},
# alpha::Array{T},
# phi::Array{T},
# epsilonRec::Array{T},
# refractoryCounter::Array{T},
# refractoryDuration::Array{T},
# gammaPd::Array{T},
# firingCounter::Array{T},
# arrayProjection4d::Array{T},
# recSignal::Array{T},
# decayed_vt0::Array{T},
# decayed_epsilonRec::Array{T},
# vt1_diff_vth::Array{T},
# vt1_diff_vth_div_vth::Array{T},
# gammaPd_div_vth::Array{T},
# phiActivation::Array{T},
# ) where T<:Number
# # project 3D kfn zit into 4D lif zit
# i1, i2, i3, i4 = size(alif_wRec)
# lif_zit .= reshape(kfn_zit, (i1, i2, 1, i4)) .* lif_arrayProjection4d
# for j in 1:size(wRec, 4), i in 1:size(wRec, 3) # compute along neurons axis of every batch
# if sum(@view(refractoryCounter[:,:,i,j])) > 0 # refractory period is active
# @. @views refractoryCounter[:,:,i,j] -= 1
# @. @views zt1[:,:,i,j] = 0
# @. @views vt1[:,:,i,j] = alpha[:,:,i,j] * vt0[:,:,i,j]
# @. @views phi[:,:,i,j] = 0
# # compute epsilonRec
# @. @views decayed_epsilonRec[:,:,i,j] = alpha[:,:,i,j] * epsilonRec[:,:,i,j]
# @. @views epsilonRec[:,:,i,j] = decayed_epsilonRec[:,:,i,j]
# else # refractory period is inactive
# @. @views recSignal[:,:,i,j] = zit[:,:,i,j] * wRec[:,:,i,j]
# @. @views decayed_vt0[:,:,i,j] = alpha[:,:,i,j] * vt0[:,:,i,j]
# @view(vt1[:,:,i,j]) .= @view(decayed_vt0[:,:,i,j]) .+ sum(@view(recSignal[:,:,i,j]))
# if sum(@view(vt1[:,:,i,j])) > sum(@view(vth[:,:,i,j]))
# @. @views zt1[:,:,i,j] = 1
# @. @views refractoryCounter[:,:,i,j] = refractoryDuration[:,:,i,j]
# @. @views firingCounter[:,:,i,j] += 1
# @. @views vt1[:,:,i,j] = vRest[:,:,i,j]
# else
# @. @views zt1[:,:,i,j] = 0
# end
# # compute phi, there is a difference from alif formula
# @. @views gammaPd_div_vth[:,:,i,j] = gammaPd[:,:,i,j] / vth[:,:,i,j]
# @. @views vt1_diff_vth[:,:,i,j] = vt1[:,:,i,j] - vth[:,:,i,j]
# @. @views vt1_diff_vth_div_vth[:,:,i,j] = vt1_diff_vth[:,:,i,j] / vth[:,:,i,j]
# @view(phiActivation[:,:,i,j]) .= max(0, 1 - sum(@view(vt1_diff_vth_div_vth[:,:,i,j])))
# @. @views phi[:,:,i,j] = gammaPd_div_vth[:,:,i,j] * phiActivation[:,:,i,j]
# # compute epsilonRec
# @. @views decayed_epsilonRec[:,:,i,j] = alpha[:,:,i,j] * epsilonRec[:,:,i,j]
# @. @views epsilonRec[:,:,i,j] = decayed_epsilonRec[:,:,i,j] + zit[:,:,i,j]
# end
# end
# end
# function alifForward(zit::Array{T},
# wRec::Array{T},
# vt0::Array{T},
# vt1::Array{T},
# vth::Array{T},
# vRest::Array{T},
# zt1::Array{T},
# alpha::Array{T},
# phi::Array{T},
# epsilonRec::Array{T},
# refractoryCounter::Array{T},
# refractoryDuration::Array{T},
# gammaPd::Array{T},
# firingCounter::Array{T},
# recSignal::Array{T},
# decayed_vt0::Array{T},
# decayed_epsilonRec::Array{T},
# vt1_diff_vth::Array{T},
# vt1_diff_vth_div_vth::Array{T},
# gammaPd_div_vth::Array{T},
# phiActivation::Array{T},
# epsilonRecA::Array{T},
# avth::Array{T},
# a::Array{T},
# beta::Array{T},
# rho::Array{T},
# phi_x_epsilonRec::Array{T},
# phi_x_beta::Array{T},
# rho_diff_phi_x_beta::Array{T},
# rho_div_phi_x_beta_x_epsilonRecA::Array{T},
# beta_x_a::Array{T},
# ) where T<:Number
# for j in 1:size(wRec, 4), i in 1:size(wRec, 3) # compute along neurons axis of every batch
# if sum(@view(refractoryCounter[:,:,i,j])) > 0 # refractory period is active
# @. @views refractoryCounter[:,:,i,j] -= 1
# @. @views zt1[:,:,i,j] = 0
# @. @views vt1[:,:,i,j] = alpha[:,:,i,j] * vt0[:,:,i,j]
# @. @views phi[:,:,i,j] = 0
# @. @views a[:,:,i,j] = rho[:,:,i,j] * a[:,:,i,j]
# # compute epsilonRec
# @. @views decayed_epsilonRec[:,:,i,j] = alpha[:,:,i,j] * epsilonRec[:,:,i,j]
# @. @views epsilonRec[:,:,i,j] = decayed_epsilonRec[:,:,i,j]
# # compute epsilonRecA
# @. @views phi_x_epsilonRec[:,:,i,j] = phi[:,:,i,j] * epsilonRec[:,:,i,j]
# @. @views phi_x_beta[:,:,i,j] = phi[:,:,i,j] * beta[:,:,i,j]
# @. @views rho_diff_phi_x_beta[:,:,i,j] = rho[:,:,i,j] - phi_x_beta[:,:,i,j]
# @. @views rho_div_phi_x_beta_x_epsilonRecA[:,:,i,j] = rho_diff_phi_x_beta[:,:,i,j] * epsilonRecA[:,:,i,j]
# @. @views epsilonRecA[:,:,i,j] = phi_x_epsilonRec[:,:,i,j] + rho_div_phi_x_beta_x_epsilonRecA[:,:,i,j]
# # compute avth
# @. @views beta_x_a[:,:,i,j] = beta[:,:,i,j] * a[:,:,i,j]
# @. @views avth[:,:,i,j] = vth[:,:,i,j] + beta_x_a[:,:,i,j]
# else # refractory period is inactive
# @. @views recSignal[:,:,i,j] = zit[:,:,i,j] * wRec[:,:,i,j]
# @. @views decayed_vt0[:,:,i,j] = alpha[:,:,i,j] * vt0[:,:,i,j]
# @view(vt1[:,:,i,j]) .= @view(decayed_vt0[:,:,i,j]) .+ sum(@view(recSignal[:,:,i,j]))
# # compute avth
# @. @views beta_x_a[:,:,i,j] = beta[:,:,i,j] * a[:,:,i,j]
# @. @views avth[:,:,i,j] = vth[:,:,i,j] + beta_x_a[:,:,i,j]
# if sum(@view(vt1[:,:,i,j])) > sum(@view(avth[:,:,i,j]))
# @. @views zt1[:,:,i,j] = 1
# @. @views refractoryCounter[:,:,i,j] = refractoryDuration[:,:,i,j]
# @. @views firingCounter[:,:,i,j] += 1
# @. @views vt1[:,:,i,j] = vRest[:,:,i,j]
# @. @views a[:,:,i,j] = rho[:,:,i,j] * a[:,:,i,j]
# @. @views a[:,:,i,j] = a[:,:,i,j] += 1
# else
# @. @views zt1[:,:,i,j] = 0
# @. @views a[:,:,i,j] = rho[:,:,i,j] * a[:,:,i,j]
# end
# # compute phi, there is a difference from alif formula
# @. @views gammaPd_div_vth[:,:,i,j] = gammaPd[:,:,i,j] / vth[:,:,i,j]
# @. @views vt1_diff_vth[:,:,i,j] = vt1[:,:,i,j] - vth[:,:,i,j]
# @. @views vt1_diff_vth_div_vth[:,:,i,j] = vt1_diff_vth[:,:,i,j] / vth[:,:,i,j]
# @view(phiActivation[:,:,i,j]) .= max(0, 1 - sum(@view(vt1_diff_vth_div_vth[:,:,i,j])))
# @. @views phi[:,:,i,j] = gammaPd_div_vth[:,:,i,j] * phiActivation[:,:,i,j]
# # compute epsilonRec
# @. @views decayed_epsilonRec[:,:,i,j] = alpha[:,:,i,j] * epsilonRec[:,:,i,j]
# @. @views epsilonRec[:,:,i,j] = decayed_epsilonRec[:,:,i,j] + zit[:,:,i,j]
# # compute epsilonRecA
# @. @views phi_x_epsilonRec[:,:,i,j] = phi[:,:,i,j] * epsilonRec[:,:,i,j]
# @. @views phi_x_beta[:,:,i,j] = phi[:,:,i,j] * beta[:,:,i,j]
# @. @views rho_diff_phi_x_beta[:,:,i,j] = rho[:,:,i,j] - phi_x_beta[:,:,i,j]
# @. @views rho_div_phi_x_beta_x_epsilonRecA[:,:,i,j] = rho_diff_phi_x_beta[:,:,i,j] * epsilonRecA[:,:,i,j]
# @. @views epsilonRecA[:,:,i,j] = phi_x_epsilonRec[:,:,i,j] + rho_div_phi_x_beta_x_epsilonRecA[:,:,i,j]
# end
# end
# end
# function onForward(kfn_zit::Array{T},
# zit::Array{T},
# wOut::Array{T},
# vt0::Array{T},
# vt1::Array{T},
# vth::Array{T},
# vRest::Array{T},
# zt1::Array{T},
# alpha::Array{T},
# phi::Array{T},
# epsilonRec::Array{T},
# refractoryCounter::Array{T},
# refractoryDuration::Array{T},
# gammaPd::Array{T},
# firingCounter::Array{T},
# arrayProjection4d::Array{T},
# recSignal::Array{T},
# decayed_vt0::Array{T},
# decayed_epsilonRec::Array{T},
# vt1_diff_vth::Array{T},
# vt1_diff_vth_div_vth::Array{T},
# gammaPd_div_vth::Array{T},
# phiActivation::Array{T},
# ) where T<:Number
# # project 3D kfn zit into 4D lif zit
# zit .= reshape(kfn_zit,
# (size(wOut, 1), size(wOut, 2), 1, size(wOut, 4))) .* arrayProjection4d
# for j in 1:size(wOut, 4), i in 1:size(wOut, 3) # compute along neurons axis of every batch
# if sum(@view(refractoryCounter[:,:,i,j])) > 0 # refractory period is active
# @. @views refractoryCounter[:,:,i,j] -= 1
# @. @views zt1[:,:,i,j] = 0
# @. @views vt1[:,:,i,j] = alpha[:,:,i,j] * vt0[:,:,i,j]
# @. @views phi[:,:,i,j] = 0
# # compute epsilonRec
# @. @views decayed_epsilonRec[:,:,i,j] = alpha[:,:,i,j] * epsilonRec[:,:,i,j]
# @. @views epsilonRec[:,:,i,j] = decayed_epsilonRec[:,:,i,j]
# else # refractory period is inactive
# @. @views recSignal[:,:,i,j] = zit[:,:,i,j] * wOut[:,:,i,j]
# @. @views decayed_vt0[:,:,i,j] = alpha[:,:,i,j] * vt0[:,:,i,j]
# @view(vt1[:,:,i,j]) .= @view(decayed_vt0[:,:,i,j]) .+ sum(@view(recSignal[:,:,i,j]))
# if sum(@view(vt1[:,:,i,j])) > sum(@view(vth[:,:,i,j]))
# @. @views zt1[:,:,i,j] = 1
# @. @views refractoryCounter[:,:,i,j] = refractoryDuration[:,:,i,j]
# @. @views firingCounter[:,:,i,j] += 1
# @. @views vt1[:,:,i,j] = vRest[:,:,i,j]
# else
# @. @views zt1[:,:,i,j] = 0
# end
# # compute phi, there is a difference from alif formula
# @. @views gammaPd_div_vth[:,:,i,j] = gammaPd[:,:,i,j] / vth[:,:,i,j]
# @. @views vt1_diff_vth[:,:,i,j] = vt1[:,:,i,j] - vth[:,:,i,j]
# @. @views vt1_diff_vth_div_vth[:,:,i,j] = vt1_diff_vth[:,:,i,j] / vth[:,:,i,j]
# @view(phiActivation[:,:,i,j]) .= max(0, 1 - sum(@view(vt1_diff_vth_div_vth[:,:,i,j])))
# @. @views phi[:,:,i,j] = gammaPd_div_vth[:,:,i,j] * phiActivation[:,:,i,j]
# # compute epsilonRec
# @. @views decayed_epsilonRec[:,:,i,j] = alpha[:,:,i,j] * epsilonRec[:,:,i,j]
# @. @views epsilonRec[:,:,i,j] = decayed_epsilonRec[:,:,i,j] + zit[:,:,i,j]
# end
# end
# end
end # module

View File

@@ -0,0 +1,87 @@
module interface
# export
# using Flux, CUDA
#------------------------------------------------------------------------------------------------100
end # module

View File

@@ -0,0 +1,561 @@
module learn
export learn!, compute_paramsChange!
using Statistics, Random, LinearAlgebra, JSON3, Flux, CUDA, Dates
using GeneralUtils
using ..type, ..snnUtil
#------------------------------------------------------------------------------------------------100
function compute_paramsChange!(kfn::kfn_1, modelError, outputError)
lifComputeParamsChange!(kfn.timeStep,
kfn.lif_phi,
kfn.lif_epsilonRec,
kfn.lif_eta,
kfn.lif_eRec,
kfn.lif_wRec,
kfn.lif_exInType,
kfn.lif_wRecChange,
kfn.on_wOut,
kfn.lif_firingCounter,
kfn.lif_firingTargetFrequency,
kfn.lif_arrayProjection4d,
kfn.lif_error,
modelError,
kfn.inputSize,
)
alifComputeParamsChange!(kfn.timeStep,
kfn.alif_phi,
kfn.alif_epsilonRec,
kfn.alif_eta,
kfn.alif_eRec,
kfn.alif_wRec,
kfn.alif_exInType,
kfn.alif_wRecChange,
kfn.on_wOut,
kfn.alif_firingCounter,
kfn.alif_firingTargetFrequency,
kfn.alif_arrayProjection4d,
kfn.alif_error,
modelError,
kfn.alif_epsilonRecA,
kfn.alif_beta,
)
onComputeParamsChange!(kfn.on_phi,
kfn.on_epsilonRec,
kfn.on_eta,
kfn.on_eRec,
kfn.on_wOutChange,
kfn.on_arrayProjection4d,
kfn.on_error,
outputError,
)
# error("DEBUG -> kfn compute_paramsChange! $(Dates.now())")
end
function lifComputeParamsChange!( timeStep::CuArray,
phi::CuArray,
epsilonRec::CuArray,
eta::CuArray,
eRec::CuArray,
wRec::CuArray,
exInType::CuArray,
wRecChange::CuArray,
wOut::CuArray,
firingCounter::CuArray,
firingTargetFrequency::CuArray,
arrayProjection4d::CuArray,
nError::CuArray,
modelError::CuArray,
inputSize::CuArray,
)
# Bₖⱼ in paper, sum() to get each neuron's total wOut weight,
# use absolute because only magnitude is needed
wOutSum_all = reshape( abs.(sum(wOut, dims=3)), (1,1,:, size(wOut, 4)) ) # (1,1,allNeuron,batch)
# get only each lif neuron's wOut, leaving out other neuron's wOut
startIndex = prod(inputSize) +1
stopIndex = startIndex + size(wRec, 3) -1
wOutSum = @view(wOutSum_all[1,1, startIndex:stopIndex, :])
wOutSum = reshape(wOutSum, (1, 1, size(wOutSum, 1), size(wOutSum, 2))) # (1,1,n,batch)
# nError a.k.a. learning signal use dopamine concept,
# this neuron receive summed error signal (modelError)
nError .= (modelError .* wOutSum) .* arrayProjection4d
eRec .= phi .* epsilonRec
wRecChange .+= (-eta .* nError .* eRec)
# frequency regulator
wRecChange .+= 0.001 .* ((firingTargetFrequency - (firingCounter./timeStep)) ./ timeStep) .*
eta .* eRec
# if sum(timeStep) == 785
# epsilonRec_cpu = epsilonRec |> cpu
# println("modelError ", modelError)
# println("")
# wchange = (-eta .* nError .* eRec) |> cpu
# println("wchange 5 1 ", wchange[:,:,5,1])
# println("")
# println("wchange 5 2 ", wchange[:,:,5,2])
# println("")
# println("epsilonRec 5 1 ", epsilonRec_cpu[:,:,5,1])
# println("")
# println("epsilonRec 5 2 ", epsilonRec_cpu[:,:,5,2])
# println("")
# error("DEBUG lifComputeParamsChange!")
# end
# reset epsilonRec
epsilonRec .= 0
end
function alifComputeParamsChange!( timeStep::CuArray,
phi::CuArray,
epsilonRec::CuArray,
eta::CuArray,
eRec::CuArray,
wRec::CuArray,
exInType::CuArray,
wRecChange::CuArray,
wOut::CuArray,
firingCounter::CuArray,
firingTargetFrequency::CuArray,
arrayProjection4d::CuArray,
nError::CuArray,
modelError::CuArray,
epsilonRecA::CuArray,
beta::CuArray
)
# Bₖⱼ in paper, sum() to get each neuron's total wOut weight,
# use absolute because only magnitude is needed
wOutSum_all = reshape( abs.(sum(wOut, dims=3)), (1,1,:, size(wOut, 4)) ) # (1,1,allNeuron,batch)
# get only each lif neuron's wOut, leaving out other neuron's wOut
wOutSum = @view(wOutSum_all[1,1, end-size(wRec, 3)+1:end, :])
wOutSum = reshape(wOutSum, (1, 1, size(wOutSum, 1), size(wOutSum, 2))) # (1,1,n,batch)
# nError a.k.a. learning signal use dopamine concept,
# this neuron receive summed error signal (modelError)
nError .= (modelError .* wOutSum) .* arrayProjection4d
eRec .= phi .* (epsilonRec .- (beta .* epsilonRecA)) # use eq. 25
wRecChange .+= (-eta .* nError .* eRec)
# frequency regulator
wRecChange .+= 0.001 .* ((firingTargetFrequency - (firingCounter./timeStep)) ./ timeStep) .*
eta .* eRec
# reset epsilonRec
epsilonRec .= 0
epsilonRecA .= 0
# error("DEBUG -> alifComputeParamsChange! $(Dates.now())")
end
function onComputeParamsChange!(phi::CuArray,
epsilonRec::CuArray,
eta::CuArray,
eRec::CuArray,
wOutChange::CuArray,
arrayProjection4d::CuArray,
nError::CuArray,
outputError::CuArray # outputError is output neuron's error
)
eRec .= phi .* epsilonRec
nError .= reshape(outputError, (1, 1, :, size(outputError, 2))) .* arrayProjection4d
wOutChange .+= (-eta .* nError .* eRec)
# reset epsilonRec
epsilonRec .= 0
# error("DEBUG -> onComputeParamsChange! $(Dates.now())")
end
function lifComputeParamsChange!( phi::AbstractArray,
epsilonRec::AbstractArray,
eta::AbstractArray,
wRec::AbstractArray,
wRecChange::AbstractArray,
wOut::AbstractArray,
modelError::AbstractArray)
d1, d2, d3, d4 = size(epsilonRec)
# Bₖⱼ in paper, sum() to get each neuron's total wOut weight
wOutSum = reshape(sum(wOut, dims=3), (d1, :, d4))
for j in 1:d4, i in 1:d3 # compute along neurons axis of every batch
# how much error of this neuron 1-spike causing each output neuron's error
view(wRecChange, :, :, i, j) .+= (-1 * view(eta, :, :, i, j)[1]) .*
# eRec
(
(view(phi, :, :, i, j)[1] .* view(epsilonRec, :, :, i, j)) .*
# nError a.k.a. learning signal
(
view(modelError, :, j)[1] * # dopamine concept, this neuron receive summed error signal
# RSNN neuron's total wOut weight (neuron synaptic subscription .* wOutSum)
view(wOutSum, :, :, j)[i]
)
)
end
end
function alifComputeParamsChange!( phi::AbstractArray,
epsilonRec::AbstractArray,
epsilonRecA::AbstractArray,
eta::AbstractArray,
wRec::AbstractArray,
wRecChange::AbstractArray,
beta::AbstractArray,
wOut::AbstractArray,
modelError::AbstractArray)
d1, d2, d3, d4 = size(epsilonRec)
# Bₖⱼ in paper, sum() to get each neuron's total wOut weight
wOutSum = reshape(sum(wOut, dims=3), (d1, :, d4))
for j in 1:d4, i in 1:d3 # compute along neurons axis of every batch
# how much error of this neuron 1-spike causing each output neuron's error
view(wRecChange, :, :, i, j) .+= (-1 * view(eta, :, :, i, j)[1]) .*
# eRec
(
# eRec_v
(view(phi, :, :, i, j)[1] .* view(epsilonRec, :, :, i, j)) .+
# eRec_a
((view(phi, :, :, i, j)[1] * view(beta, :, :, i, j)[1]) .*
view(epsilonRecA, :, :, i, j))
) .*
# nError a.k.a. learning signal
(
view(modelError, :, j)[1] *
# RSNN neuron's total wOut weight (neuron synaptic subscription .* wOutSum)
view(wOutSum, :, :, j)[i]
# sum(GeneralUtils.isNotEqual.(view(wRec, :, :, i, j), 0) .*
# view(wOutSum, :, :, j))
)
end
end
function onComputeParamsChange!(phi::AbstractArray,
epsilonRec::AbstractArray,
eta::AbstractArray,
wOutChange::AbstractArray,
outputError::AbstractArray)
d1, d2, d3, d4 = size(epsilonRec)
for j in 1:d4, i in 1:d3 # compute along neurons axis of every batch
# how much error of this neuron 1-spike causing each output neuron's error
view(wOutChange, :, :, i, j) .+= (-1 * view(eta, :, :, i, j)[1]) .*
# eRec
(
(view(phi, :, :, i, j)[1] .* view(epsilonRec, :, :, i, j)) .*
# nError a.k.a. learning signal, output neuron receives error of its own answer - correct answer.
view(outputError, :, j)[i]
)
end
end
function learn!(kfn::kfn_1, device=cpu)
# lif learn
kfn.lif_wRec, kfn.lif_neuronInactivityCounter, kfn.lif_synapticActivityCounter =
lifLearn(kfn.lif_wRec,
kfn.lif_exInType,
kfn.lif_wRecChange,
kfn.lif_arrayProjection4d,
kfn.lif_neuronInactivityCounter,
kfn.lif_synapticActivityCounter,
kfn.lif_synapseConnectionNumber,
kfn.lif_synapticWChangeCounter,
kfn.lif_eta,
kfn.zitCumulative,
device)
# alif learn
kfn.alif_wRec, kfn.alif_neuronInactivityCounter, kfn.alif_synapticActivityCounter =
alifLearn(kfn.alif_wRec,
kfn.alif_exInType,
kfn.alif_wRecChange,
kfn.alif_arrayProjection4d,
kfn.alif_neuronInactivityCounter,
kfn.alif_synapticActivityCounter,
kfn.alif_synapseConnectionNumber,
kfn.alif_synapticWChangeCounter,
kfn.zitCumulative,
device)
# on learn
onLearn!(kfn.on_wOut,
kfn.on_wOutChange,
kfn.on_arrayProjection4d)
# wrap up learning session
if kfn.learningStage == [3]
kfn.learningStage = [0]
end
# error("DEBUG -> kfn learn! $(Dates.now())")
end
function lifLearn(wRec,
exInType,
wRecChange,
arrayProjection4d,
neuronInactivityCounter,
synapticActivityCounter,
synapseConnectionNumber,
synapticWChangeCounter, #TODO
eta,
zitCumulative,
device)
# merge learning weight with average learning weight of all batch
wch = sum(wRecChange, dims=4) ./ (size(wRec, 4)) .* arrayProjection4d
wRec .= (exInType .* wRec) .+ wch
arrayProjection4d_cpu = arrayProjection4d |> cpu
wRec_cpu = wRec |> cpu
wRec_cpu = wRec_cpu[:,:,:,1] # since every batch has the same neuron wRec, (row, col, n)
eta_cpu = eta |> cpu
eta_cpu = eta_cpu[:,:,:,1]
neuronInactivityCounter_cpu = neuronInactivityCounter |> cpu
neuronInactivityCounter_cpu = neuronInactivityCounter_cpu[:,:,:,1] # (row, col, n)
synapticActivityCounter_cpu = synapticActivityCounter |> cpu
synapticActivityCounter_cpu = synapticActivityCounter_cpu[:,:,:,1]
zitCumulative_cpu = zitCumulative |> cpu
zitCumulative_cpu = zitCumulative_cpu[:,:,1] # (row, col)
# -W if less than 10% of repeat avg, +W otherwise
println("wRec_cpu 1 ", wRec_cpu)
_, _, i3 = size(wRec_cpu)
for i in 1:i3
x = 0.1 * (sum(synapticActivityCounter[:,:,i]) / length(synapticActivityCounter[:,:,i]))
mask = GeneralUtils.replaceLessThan.(wRec_cpu[:,:,i], x, -1, 1)
wRec_cpu[:,:,i] .+= mask .* eta_cpu[:,:,i] .* wRec_cpu[:,:,i]
end
# weak / negative synaptic connection will get randomed in neuroplasticity()
wRec_cpu = GeneralUtils.replaceBetween.(wRec_cpu, 0.0, 0.01, -1.0) # mark with -1.0
# # synaptic connection that has no activity will get randomed in neuroplasticity()
# mask = isless.(synapticActivityCounter_cpu, -100000)
# GeneralUtils.replaceElements!(mask, 1, wRec_cpu, -1.0)
# # reset lif_inactivity elements to base value
# GeneralUtils.replaceElements!(mask, 1, synapticActivityCounter_cpu, 0.0)
# neuroplasticity, work on CPU side
wRec_cpu = neuroplasticity(synapseConnectionNumber,
zitCumulative_cpu,
wRec_cpu,
neuronInactivityCounter_cpu,
synapticActivityCounter_cpu)
wRec_cpu = wRec_cpu .* arrayProjection4d_cpu
wRec = wRec_cpu |> device
neuronInactivityCounter_cpu = neuronInactivityCounter_cpu .* arrayProjection4d_cpu
neuronInactivityCounter = neuronInactivityCounter_cpu |> device
synapticActivityCounter_cpu = synapticActivityCounter_cpu .* arrayProjection4d_cpu
synapticActivityCounter = synapticActivityCounter_cpu |> device
return wRec, neuronInactivityCounter, synapticActivityCounter
end
function alifLearn(wRec,
exInType,
wRecChange,
arrayProjection4d,
neuronInactivityCounter,
synapticActivityCounter,
synapseConnectionNumber,
synapticWChangeCounter, #TODO
zitCumulative,
device)
# merge learning weight with average learning weight of all batch
wch = sum(wRecChange, dims=4) ./ (size(wRec, 4)) .* arrayProjection4d
wRec .= (exInType .* wRec) .+ wch
arrayProjection4d_cpu = arrayProjection4d |> cpu
wRec_cpu = wRec |> cpu
wRec_cpu = wRec_cpu[:,:,:,1] # since every batch has the same neuron wRec, (row, col, n)
neuronInactivityCounter_cpu = neuronInactivityCounter |> cpu
neuronInactivityCounter_cpu = neuronInactivityCounter_cpu[:,:,:,1] # (row, col, n)
synapticActivityCounter_cpu = synapticActivityCounter |> cpu
synapticActivityCounter_cpu = synapticActivityCounter_cpu[:,:,:,1]
zitCumulative_cpu = zitCumulative |> cpu
zitCumulative_cpu = zitCumulative_cpu[:,:,1] # (row, col)
# weak / negative synaptic connection will get randomed in neuroplasticity()
wRec_cpu = GeneralUtils.replaceBetween.(wRec_cpu, 0.0, 0.01, -1.0) # mark with -1.0
# synaptic connection that has no activity will get randomed in neuroplasticity()
mask = isless.(synapticActivityCounter_cpu, -100000)
GeneralUtils.replaceElements!(mask, 1, wRec_cpu, -1.0)
# reset alif_inactivity elements to base value
GeneralUtils.replaceElements!(mask, 1, synapticActivityCounter_cpu, 0.0)
# neuroplasticity, work on CPU side
wRec_cpu = neuroplasticity(synapseConnectionNumber,
zitCumulative_cpu,
wRec_cpu,
neuronInactivityCounter_cpu,
synapticActivityCounter_cpu)
wRec_cpu = wRec_cpu .* arrayProjection4d_cpu
wRec = wRec_cpu |> device
neuronInactivityCounter_cpu = neuronInactivityCounter_cpu .* arrayProjection4d_cpu
neuronInactivityCounter = neuronInactivityCounter_cpu |> device
synapticActivityCounter_cpu = synapticActivityCounter_cpu .* arrayProjection4d_cpu
synapticActivityCounter = synapticActivityCounter_cpu |> device
# error("DEBUG -> alifLearn! $(Dates.now())")
return wRec, neuronInactivityCounter, synapticActivityCounter
end
function onLearn!(wOut,
wOutChange,
arrayProjection4d)
# merge learning weight with average learning weight
wOut .+= (sum(wOutChange, dims=4) ./ (size(wOut, 4))) .* arrayProjection4d
# adaptive wOut to help convergence using c_decay
wOut .-= 0.001 .* wOut
#TODO synaptic strength
#TODO neuroplasticity
end
function neuroplasticity(synapseConnectionNumber,
zitCumulative, # (row, col)
wRec, # (row, col, n)
neuronInactivityCounter,
synapticActivityCounter) # (row, col, n)
i1,i2,i3 = size(wRec)
# for each neuron, find total number of synaptic conn that should draw
# new connection to firing and non-firing neurons pool
subToFireNeuron_toBe = Int(floor(0.7 * synapseConnectionNumber))
# for each neuron, count how many synap already subscribed to firing-neurons
zw = zitCumulative .* wRec
subToFireNeuron_current = sum(GeneralUtils.isBetween.(zw, 0.0, 100.0), dims=(1,2)) # (1, 1, n)
zitMask = (!iszero).(zitCumulative) # zitMask of firing neurons = 1, non-firing = 0
projection = ones(i1,i2,i3)
zitMask = zitMask .* projection # (row, col, n)
totalNewConn = sum(isequal.(wRec, -1.0), dims=(1,2)) # count new conn mark (-1.0), (1, 1, n)
println("neuroplasticity, from $(synapseConnectionNumber*size(totalNewConn, 3)) conn, $(sum(totalNewConn)) are replaced")
# clear -1.0 marker
GeneralUtils.replaceElements!(wRec, -1.0, synapticActivityCounter, -0.99)
GeneralUtils.replaceElements!(wRec, -1.0, 0.0) # -1.0 marker is no longer required
for i in 1:i3
if neuronInactivityCounter[1:1:i][1] < -10000 # neuron die i.e. reset all weight
println("neuron die")
neuronInactivityCounter[:,:,i] .= 0 # reset
w = random_wRec(i1,i2,1,synapseConnectionNumber)
wRec[:,:,i] .= w
a = similar(w) .= -0.99 # synapseConnectionNumber of this neuron
mask = (!iszero).(w)
GeneralUtils.replaceElements!(mask, 1, a, 0)
synapticActivityCounter[:,:,i] = a
else
remaining = 0
if subToFireNeuron_current[1,1,i] < subToFireNeuron_toBe
toAddConn = subToFireNeuron_toBe - subToFireNeuron_current[1,1,i]
totalNewConn[1,1,i] = totalNewConn[1,1,i] - toAddConn
# add new conn to firing neurons pool
remaining = addNewSynapticConn!(zitMask[:,:,i], 1,
@view(wRec[:,:,i]),
@view(synapticActivityCounter[:,:,i]),
toAddConn)
totalNewConn[1,1,i] += remaining
end
# add new conn to non-firing neurons pool
remaining = addNewSynapticConn!(zitMask[:,:,i], 0,
@view(wRec[:,:,i]),
@view(synapticActivityCounter[:,:,i]),
totalNewConn[1,1,i])
if remaining > 0 # final get-all round if somehow non-firing pool has not enough slot
remaining = addNewSynapticConn!(zitMask[:,:,i], 1,
@view(wRec[:,:,i]),
@view(synapticActivityCounter[:,:,i]),
remaining)
end
end
end
# error("DEBUG -> neuroplasticity $(Dates.now())")
return wRec
end
# learningLiquidity(x) = -0.0001x + 1 # -10000 to +10000; f(x) = -5e-05x+0.5
function learningLiquidity(x)
if x > 10000
y = 0.0
elseif x < -10000
y = 1.0
else
y = -5e-05x+0.5 # range -10000 to +10000
end
return y
end
end # module

View File

@@ -0,0 +1,118 @@
module snnUtil
export refractoryStatus!, addNewSynapticConn!
using Random
#------------------------------------------------------------------------------------------------100
function refractoryStatus!(refractoryCounter, refractoryActive, refractoryInactive)
d1, d2, d3, d4 = size(refractoryCounter)
for j in 1:d4
for i in 1:d3
if refractoryCounter[1, 1, i, j] > 0 # inactive
view(refractoryActive, 1, 1, i, j) .= 0
view(refractoryInactive, 1, 1, i, j) .= 1
else # active
view(refractoryActive, 1, 1, i, j) .= 1
view(refractoryInactive, 1, 1, i, j) .= 0
end
end
end
end
# function frobenius_distance(A, B)
# # Check if the matrices have the same size
# if size(A) != size(B)
# error("The matrices must have the same size")
# end
# # Initialize the distance to zero
# distance = 0.0
# # Loop over the elements of the matrices and add the squared differences
# for i in 1:size(A, 1)
# for j in 1:size(A, 2)
# distance += (A[i, j] - B[i, j])^2
# end
# end
# # Return the square root of the distance
# return sqrt(distance)
# end
function addNewSynapticConn!(mask::AbstractArray{<:Any}, x::Number, wRec::AbstractArray{<:Any},
counter::AbstractArray{<:Any}, n=0;
rng::AbstractRNG=MersenneTwister(1234))
# println("mask ", mask, size(mask))
# println("")
# println("x ", x, size(x))
# println("")
# println("wRec ", wRec, size(wRec))
# println("")
# println("counter ", counter, size(counter))
# println("")
# println("n ", n, size(n))
# println("")
# check if mask and wRec have the same size
if size(mask) != size(wRec)
error("mask and wRec must have the same size")
end
# get the indices of elements in mask that equal x
indices = findall(x -> x == x, mask)
alreadySub = findall(x -> x != 0, wRec) # get already subscribe
setdiff!(indices, alreadySub) # remove already sub conn from pool
remaining = 0
if n == 0 || n > length(indices)
remaining = n - length(indices)
n = length(indices)
end
# shuffle the indices using the rng function
shuffle!(rng, indices)
# select the first n indices
n > length(indices) ? println(">>> ", total_x_tobeReplced) : nothing
selected = indices[1:n]
# replace the elements in wRec at the selected positions with a
for i in selected
wRec[i] = rand(0.01:0.01:0.1)
counter[i] = 0 # counting start from 0
end
# error("DEBUG addNewSynapticConn!")
return remaining
end
end # module

View File

@@ -0,0 +1,447 @@
module type
export
# struct
kfn_1,
# function
random_wRec
using Random, GeneralUtils
#------------------------------------------------------------------------------------------------100
rng = MersenneTwister(1234)
abstract type Ironpen end
abstract type knowledgeFn <: Ironpen end
#------------------------------------------------------------------------------------------------100
Base.@kwdef mutable struct kfn_1 <: knowledgeFn
params::Union{Dict, Nothing} = nothing # store params of knowledgeFn itself for later use
timeStep::Union{AbstractArray, Nothing} = nothing
learningStage::Union{AbstractArray, Nothing} = nothing # 0 inference, 1 start, 2 during, 3 end learning
inputSize::Union{AbstractArray, Nothing} = nothing
zit::Union{AbstractArray, Nothing} = nothing # 3D activation matrix
zitCumulative::Union{AbstractArray, Nothing} = nothing
exInType::Union{AbstractArray, Nothing} = nothing
modelError::Union{AbstractArray, Nothing} = nothing # store RSNN error
outputError::Union{AbstractArray, Nothing} = nothing # store output neurons error
# ---------------------------------------------------------------------------- #
# LIF Neurons #
# ---------------------------------------------------------------------------- #
# a projection of kfn.zit into lif dimension for broadcasting later)
lif_zit::Union{AbstractArray, Nothing} = nothing
# main variables according to papers
lif_wRec::Union{AbstractArray, Nothing} = nothing
lif_vt::Union{AbstractArray, Nothing} = nothing
lif_vth::Union{AbstractArray, Nothing} = nothing
lif_vRest::Union{AbstractArray, Nothing} = nothing
lif_zt::Union{AbstractArray, Nothing} = nothing
lif_zt4d::Union{AbstractArray, Nothing} = nothing
lif_refractoryCounter::Union{AbstractArray, Nothing} = nothing
lif_refractoryDuration::Union{AbstractArray, Nothing} = nothing
lif_alpha::Union{AbstractArray, Nothing} = nothing
lif_delta::Union{AbstractFloat, Nothing} = nothing
lif_tau_m::Union{AbstractFloat, Nothing} = nothing
lif_phi::Union{AbstractArray, Nothing} = nothing
lif_epsilonRec::Union{AbstractArray, Nothing} = nothing
lif_eRec::Union{AbstractArray, Nothing} = nothing
lif_eta::Union{AbstractArray, Nothing} = nothing
lif_gammaPd::Union{AbstractArray, Nothing} = nothing
lif_wRecChange::Union{AbstractArray, Nothing} = nothing
lif_error::Union{AbstractArray, Nothing} = nothing
lif_firingCounter::Union{AbstractArray, Nothing} = nothing
lif_firingTargetFrequency::Union{AbstractArray, Nothing} = nothing
lif_neuronInactivityCounter::Union{AbstractArray, Nothing} = nothing
lif_synapticActivityCounter::Union{AbstractArray, Nothing} = nothing
lif_synapseConnectionNumber::Union{Int, Nothing} = nothing
lif_synapticWChangeCounter::Union{AbstractArray, Nothing} = nothing
# pre-allocation array
lif_arrayProjection4d::Union{AbstractArray, Nothing} = nothing # use to project 3d array to 4d
lif_recSignal::Union{AbstractArray, Nothing} = nothing
lif_exInType::Union{AbstractArray, Nothing} = nothing
# lif_decayed_epsilonRec::Union{AbstractArray, Nothing} = nothing
# lif_vt_diff_vth::Union{AbstractArray, Nothing} = nothing
# lif_vt_diff_vth_div_vth::Union{AbstractArray, Nothing} = nothing
# lif_gammaPd_div_vth::Union{AbstractArray, Nothing} = nothing
# lif_phiActivation::Union{AbstractArray, Nothing} = nothing
# ---------------------------------------------------------------------------- #
# ALIF Neurons #
# ---------------------------------------------------------------------------- #
alif_zit::Union{AbstractArray, Nothing} = nothing
alif_wRec::Union{AbstractArray, Nothing} = nothing
alif_vt::Union{AbstractArray, Nothing} = nothing
alif_vth::Union{AbstractArray, Nothing} = nothing
alif_vRest::Union{AbstractArray, Nothing} = nothing
alif_zt::Union{AbstractArray, Nothing} = nothing
alif_zt4d::Union{AbstractArray, Nothing} = nothing
alif_refractoryCounter::Union{AbstractArray, Nothing} = nothing
alif_refractoryDuration::Union{AbstractArray, Nothing} = nothing
alif_alpha::Union{AbstractArray, Nothing} = nothing
alif_delta::Union{AbstractFloat, Nothing} = nothing
alif_tau_m::Union{AbstractFloat, Nothing} = nothing
alif_phi::Union{AbstractArray, Nothing} = nothing
alif_epsilonRec::Union{AbstractArray, Nothing} = nothing
alif_eRec::Union{AbstractArray, Nothing} = nothing
alif_eta::Union{AbstractArray, Nothing} = nothing
alif_gammaPd::Union{AbstractArray, Nothing} = nothing
alif_wRecChange::Union{AbstractArray, Nothing} = nothing
alif_error::Union{AbstractArray, Nothing} = nothing
alif_firingCounter::Union{AbstractArray, Nothing} = nothing
alif_firingTargetFrequency::Union{AbstractArray, Nothing} = nothing
alif_neuronInactivityCounter::Union{AbstractArray, Nothing} = nothing
alif_synapticActivityCounter::Union{AbstractArray, Nothing} = nothing
alif_synapseConnectionNumber::Union{Int, Nothing} = nothing
alif_synapticWChangeCounter::Union{AbstractArray, Nothing} = nothing
# pre-allocation array
alif_arrayProjection4d::Union{AbstractArray, Nothing} = nothing # use to project 3d array to 4d
alif_recSignal::Union{AbstractArray, Nothing} = nothing
alif_exInType::Union{AbstractArray, Nothing} = nothing
# alif_decayed_epsilonRec::Union{AbstractArray, Nothing} = nothing
# alif_vt_diff_vth::Union{AbstractArray, Nothing} = nothing
# alif_vt_diff_vth_div_vth::Union{AbstractArray, Nothing} = nothing
# alif_gammaPd_div_vth::Union{AbstractArray, Nothing} = nothing
# alif_phiActivation::Union{AbstractArray, Nothing} = nothing
# alif specific variables
alif_epsilonRecA::Union{AbstractArray, Nothing} = nothing
alif_avth::Union{AbstractArray, Nothing} = nothing
alif_a::Union{AbstractArray, Nothing} = nothing # threshold adaptation
alif_beta::Union{AbstractArray, Nothing} = nothing # β, constant, value from paper
alif_rho::Union{AbstractArray, Nothing} = nothing # ρ, threshold adaptation decay factor
alif_tau_a::Union{AbstractFloat, Nothing} = nothing # τ_a, adaption time constant in millisecond
# alif specific pre-allocation array
# alif_phi_x_epsilonRec::Union{AbstractArray, Nothing} = nothing
# alif_phi_x_beta::Union{AbstractArray, Nothing} = nothing
# alif_rho_diff_phi_x_beta::Union{AbstractArray, Nothing} = nothing
# alif_rho_div_phi_x_beta_x_epsilonRecA::Union{AbstractArray, Nothing} = nothing
# alif_beta_x_a::Union{AbstractArray, Nothing} = nothing
# ---------------------------------------------------------------------------- #
# Output Neurons #
# ---------------------------------------------------------------------------- #
# output neuron is based on LIF
on_zit::Union{AbstractArray, Nothing} = nothing
# main variables according to papers
on_wOut::Union{AbstractArray, Nothing} = nothing # wOut is wRec, just use the name from paper
on_vt::Union{AbstractArray, Nothing} = nothing
on_vth::Union{AbstractArray, Nothing} = nothing
on_vRest::Union{AbstractArray, Nothing} = nothing
on_zt::Union{AbstractArray, Nothing} = nothing
on_zt4d::Union{AbstractArray, Nothing} = nothing
on_refractoryCounter::Union{AbstractArray, Nothing} = nothing
on_refractoryDuration::Union{AbstractArray, Nothing} = nothing
on_alpha::Union{AbstractArray, Nothing} = nothing
on_delta::Union{AbstractFloat, Nothing} = nothing
on_tau_m::Union{AbstractFloat, Nothing} = nothing
on_phi::Union{AbstractArray, Nothing} = nothing
on_epsilonRec::Union{AbstractArray, Nothing} = nothing
on_eRec::Union{AbstractArray, Nothing} = nothing
on_eta::Union{AbstractArray, Nothing} = nothing
on_gammaPd::Union{AbstractArray, Nothing} = nothing
on_wOutChange::Union{AbstractArray, Nothing} = nothing
on_error::Union{AbstractArray, Nothing} = nothing
on_subscription::Union{AbstractArray, Nothing} = nothing
on_firingCounter::Union{AbstractArray, Nothing} = nothing
# pre-allocation array
on_arrayProjection4d::Union{AbstractArray, Nothing} = nothing # use to project 3d array to 4d
on_recSignal::Union{AbstractArray, Nothing} = nothing
# on_decayed_epsilonRec::Union{AbstractArray, Nothing} = nothing
# on_vt_diff_vth::Union{AbstractArray, Nothing} = nothing
# on_vt_diff_vth_div_vth::Union{AbstractArray, Nothing} = nothing
# on_gammaPd_div_vth::Union{AbstractArray, Nothing} = nothing
# on_phiActivation::Union{AbstractArray, Nothing} = nothing
end
# outer constructor
function kfn_1(params::Dict; device=cpu)
kfn = kfn_1()
kfn.params = params
kfn.timeStep = [0] |> device
kfn.learningStage = [0] |> device
# ---------------------------------------------------------------------------- #
# initialize activation matrix #
# ---------------------------------------------------------------------------- #
# row*col is a 2D matrix represent all RSNN activation
row, signal_col, batch = kfn.params[:inputPort][:signal][:numbers] # z-axis represent signal batch number
kfn.inputSize = [row, signal_col] |> device
lif_col = kfn.params[:computeNeuron][:lif][:numbers][2]
alif_col = kfn.params[:computeNeuron][:alif][:numbers][2]
col = signal_col + lif_col + alif_col
# activation matrix
kfn.zit = zeros(row, col, batch) |> device
kfn.zitCumulative = (similar(kfn.zit) .= 0)
kfn.modelError = zeros(1) |> device
# ---------------------------------------------------------------------------- #
# LIF config #
# ---------------------------------------------------------------------------- #
# In 3D LIF matrix, z-axis represent each neuron while each 2D slice represent that neuron's
# synaptic subscription to other neurons (via activation matrix)
lif_n = kfn.params[:computeNeuron][:lif][:numbers][1] * kfn.params[:computeNeuron][:lif][:numbers][2]
# subscription
synapticConnectionPercent = kfn.params[:computeNeuron][:lif][:params][:synapticConnectionPercent]
kfn.lif_synapseConnectionNumber = Int(floor(row*col * synapticConnectionPercent/100))
w = random_wRec(row, col, lif_n, kfn.lif_synapseConnectionNumber)
# project 3D w into 4D kfn.lif_wRec (row, col, n, batch)
kfn.lif_wRec = reshape(w, (row, col, lif_n, 1)) .* ones(row, col, lif_n, batch) |> device
kfn.lif_zit = (similar(kfn.lif_wRec) .= 0)
kfn.lif_vt = (similar(kfn.lif_wRec) .= 0)
kfn.lif_vth = (similar(kfn.lif_wRec) .= 1)
kfn.lif_vRest = (similar(kfn.lif_wRec) .= 0)
kfn.lif_zt = zeros(1, 1, lif_n, batch) |> device
kfn.lif_zt4d = (similar(kfn.lif_wRec) .= 0)
kfn.lif_refractoryCounter = (similar(kfn.lif_wRec) .= 0)
kfn.lif_refractoryDuration = (similar(kfn.lif_wRec) .= 3)
kfn.lif_delta = 1.0
kfn.lif_tau_m = 20.0
kfn.lif_alpha = (similar(kfn.lif_wRec) .= (exp(-kfn.lif_delta / kfn.lif_tau_m)))
kfn.lif_phi = (similar(kfn.lif_wRec) .= 0)
kfn.lif_epsilonRec = (similar(kfn.lif_wRec) .= 0)
kfn.lif_eRec = (similar(kfn.lif_wRec) .= 0)
kfn.lif_eta = (similar(kfn.lif_wRec) .= 0.01)
kfn.lif_gammaPd = (similar(kfn.lif_wRec) .= 0.3)
kfn.lif_wRecChange = (similar(kfn.lif_wRec) .= 0)
kfn.lif_error = (similar(kfn.lif_wRec) .= 0)
kfn.lif_firingCounter = (similar(kfn.lif_wRec) .= 0)
kfn.lif_firingTargetFrequency = (similar(kfn.lif_wRec) .= 0.1)
kfn.lif_neuronInactivityCounter = (similar(kfn.lif_wRec) .= 0)
# count subscribed synapse activity, just like epsilonRec but without decay.
# use to adjust weight based on how often neural pathway is used
kfn.lif_synapticActivityCounter = Array(similar(kfn.lif_wRec) .= -0.99) # -0.99 for non-sub conn
mask = Array((!iszero).(kfn.lif_wRec))
# initial value subscribed conn, synapticActivityCounter range -10000 to +10000
GeneralUtils.replaceElements!(mask, 1, kfn.lif_synapticActivityCounter, 0)
kfn.lif_synapticActivityCounter = kfn.lif_synapticActivityCounter |> device
kfn.lif_synapticWChangeCounter = Array(similar(kfn.lif_wRec) .= -0.99) # -0.99 for non-sub conn
mask = Array((!iszero).(kfn.lif_wRec))
# initial value subscribed conn, synapticActivityCounter range -10000 to +10000
GeneralUtils.replaceElements!(mask, 1, kfn.lif_synapticWChangeCounter, 1.0)
kfn.lif_synapticWChangeCounter = kfn.lif_synapticWChangeCounter |> device
kfn.lif_arrayProjection4d = (similar(kfn.lif_wRec) .= 1)
kfn.lif_recSignal = (similar(kfn.lif_wRec) .= 0)
kfn.lif_exInType = (similar(kfn.lif_wRec) .= 0)
# kfn.lif_decayed_epsilonRec = (similar(kfn.lif_wRec) .= 0)
# kfn.lif_vt_diff_vth = (similar(kfn.lif_wRec) .= 0)
# kfn.lif_vt_diff_vth_div_vth = (similar(kfn.lif_wRec) .= 0)
# kfn.lif_gammaPd_div_vth = (similar(kfn.lif_wRec) .= 0)
# kfn.lif_phiActivation = (similar(kfn.lif_wRec) .= 0)
# ---------------------------------------------------------------------------- #
# ALIF config #
# ---------------------------------------------------------------------------- #
alif_n = kfn.params[:computeNeuron][:alif][:numbers][1] * kfn.params[:computeNeuron][:alif][:numbers][2]
# subscription
synapticConnectionPercent = kfn.params[:computeNeuron][:alif][:params][:synapticConnectionPercent]
kfn.alif_synapseConnectionNumber = Int(floor(row*col * synapticConnectionPercent/100))
w = random_wRec(row, col, alif_n, kfn.alif_synapseConnectionNumber)
# project 3D w into 4D kfn.alif_wRec
kfn.alif_wRec = reshape(w, (row, col, alif_n, 1)) .* ones(row, col, alif_n, batch) |> device
kfn.alif_zit = (similar(kfn.alif_wRec) .= 0)
kfn.alif_vt = (similar(kfn.alif_wRec) .= 0)
kfn.alif_vth = (similar(kfn.alif_wRec) .= 1)
kfn.alif_vRest = (similar(kfn.alif_wRec) .= 0)
kfn.alif_zt = zeros(1, 1, alif_n, batch) |> device
kfn.alif_zt4d = (similar(kfn.alif_wRec) .= 0)
kfn.alif_refractoryCounter = (similar(kfn.alif_wRec) .= 0)
kfn.alif_refractoryDuration = (similar(kfn.alif_wRec) .= 3)
kfn.alif_delta = 1.0
kfn.alif_tau_m = 20.0
kfn.alif_alpha = (similar(kfn.alif_wRec) .= (exp(-kfn.alif_delta / kfn.alif_tau_m)))
kfn.alif_phi = (similar(kfn.alif_wRec) .= 0)
kfn.alif_epsilonRec = (similar(kfn.alif_wRec) .= 0)
kfn.alif_eRec = (similar(kfn.alif_wRec) .= 0)
kfn.alif_eta = (similar(kfn.alif_wRec) .= 0.01)
kfn.alif_gammaPd = (similar(kfn.alif_wRec) .= 0.3)
kfn.alif_wRecChange = (similar(kfn.alif_wRec) .= 0)
kfn.alif_error = (similar(kfn.alif_wRec) .= 0)
kfn.alif_firingCounter = (similar(kfn.alif_wRec) .= 0)
kfn.alif_firingTargetFrequency = (similar(kfn.alif_wRec) .= 0.1)
kfn.alif_neuronInactivityCounter = (similar(kfn.alif_wRec) .= 0)
kfn.alif_synapticActivityCounter = Array(similar(kfn.alif_wRec) .= -0.99) # -9 for non-sub conn
mask = Array((!iszero).(kfn.alif_wRec))
# initial value subscribed conn, synapticActivityCounter range -10000 to +10000
GeneralUtils.replaceElements!(mask, 1, kfn.alif_synapticActivityCounter, 0)
kfn.alif_synapticActivityCounter = kfn.alif_synapticActivityCounter |> device
kfn.alif_synapticWChangeCounter = Array(similar(kfn.alif_wRec) .= -0.99) # -9 for non-sub conn
mask = Array((!iszero).(kfn.alif_wRec))
# initial value subscribed conn, synapticActivityCounter range -10000 to +10000
GeneralUtils.replaceElements!(mask, 1, kfn.alif_synapticWChangeCounter, 1.0)
kfn.alif_synapticWChangeCounter = kfn.alif_synapticWChangeCounter |> device
kfn.alif_arrayProjection4d = (similar(kfn.alif_wRec) .= 1)
kfn.alif_recSignal = (similar(kfn.alif_wRec) .= 0)
kfn.alif_exInType = (similar(kfn.alif_wRec) .= 0)
# kfn.alif_decayed_epsilonRec = (similar(kfn.alif_wRec) .= 0)
# kfn.alif_vt_diff_vth = (similar(kfn.alif_wRec) .= 0)
# kfn.alif_vt_diff_vth_div_vth = (similar(kfn.alif_wRec) .= 0)
# kfn.alif_gammaPd_div_vth = (similar(kfn.alif_wRec) .= 0)
# kfn.alif_phiActivation = (similar(kfn.alif_wRec) .= 0)
# alif specific variables
kfn.alif_epsilonRecA = (similar(kfn.alif_wRec) .= 0)
kfn.alif_avth = (similar(kfn.alif_wRec) .= 0)
kfn.alif_a = (similar(kfn.alif_wRec) .= 0)
kfn.alif_beta = (similar(kfn.alif_wRec) .= 0.07)
kfn.alif_tau_a = 800.0
kfn.alif_rho = (similar(kfn.alif_wRec) .= (exp(-kfn.alif_delta / kfn.alif_tau_a))) |> device
# kfn.alif_phi_x_epsilonRec = (similar(kfn.alif_wRec) .= 0)
# kfn.alif_phi_x_beta = (similar(kfn.alif_wRec) .= 0)
# kfn.alif_rho_diff_phi_x_beta = (similar(kfn.alif_wRec) .= 0)
# kfn.alif_rho_div_phi_x_beta_x_epsilonRecA = (similar(kfn.alif_wRec) .= 0)
# kfn.alif_beta_x_a = (similar(kfn.alif_wRec) .= 0)
# ---------------------------------------------------------------------------- #
# output config #
# ---------------------------------------------------------------------------- #
n = kfn.params[:outputPort][:numbers][1] * kfn.params[:outputPort][:numbers][2]
# subscription
w = zeros(row, col, n)
synapticConnectionPercent = kfn.params[:outputPort][:params][:synapticConnectionPercent]
subable = size(kfn.lif_wRec, 3) + size(kfn.alif_wRec, 3) # sub to lif, alif only
synapticConnection = Int(floor(subable * synapticConnectionPercent/100))
for slice in eachslice(w, dims=3) # each slice is a neuron
startInd = row*col - subable + 1 # e.g. 100(row*col) - 50(subable) = 50 -> startInd = 51
# pool must contain only lif, alif neurons
pool = shuffle!([startInd:row*col...])[1:synapticConnection]
for i in pool
slice[i] = rand() # assign weight to synaptic connection. /10 to start small,
# otherwise RSNN's vt Usually stay negative (-)
end
end
# 10% of neuron connection should be enough to start to make neuron fires
should_be_avg_weight = 1 / (0.1 * n)
w = w .* (should_be_avg_weight / maximum(w)) # adjust overall weight
# project 3D w into 4D kfn.lif_wOut (row, col, n, batch)
kfn.on_wOut = reshape(w, (row, col, n, 1)) .* ones(row, col, n, batch) |> device
kfn.on_zit = (similar(kfn.on_wOut) .= 0)
kfn.on_vt = (similar(kfn.on_wOut) .= 0)
kfn.on_vth = (similar(kfn.on_wOut) .= 1)
kfn.on_vRest = (similar(kfn.on_wOut) .= 0)
kfn.on_zt = zeros(1, 1, n, batch) |> device
kfn.on_zt4d = (similar(kfn.on_wOut) .= 0)
kfn.on_refractoryCounter = (similar(kfn.on_wOut) .= 0)
kfn.on_refractoryDuration = (similar(kfn.on_wOut) .= 0)
kfn.on_delta = 1.0
kfn.on_tau_m = 20.0
kfn.on_alpha = (similar(kfn.on_wOut) .= (exp(-kfn.on_delta / kfn.on_tau_m)))
kfn.on_phi = (similar(kfn.on_wOut) .= 0)
kfn.on_epsilonRec = (similar(kfn.on_wOut) .= 0)
kfn.on_eRec = (similar(kfn.on_wOut) .= 0)
kfn.on_eta = (similar(kfn.on_wOut) .= 0.01)
kfn.on_gammaPd = (similar(kfn.on_wOut) .= 0.3)
kfn.on_wOutChange = (similar(kfn.on_wOut) .= 0)
kfn.on_error = (similar(kfn.on_wOut) .= 0)
kfn.on_subscription = (GeneralUtils.isNotEqual.(kfn.on_wOut, 0)) |> device
kfn.on_firingCounter = (similar(kfn.on_wOut) .= 0)
kfn.on_arrayProjection4d = (similar(kfn.on_wOut) .= 1)
kfn.on_recSignal = (similar(kfn.on_wOut) .= 0)
kfn.outputError = zeros(n, batch) |> device
totalComputeNeurons = lif_n + alif_n
inhabitoryNeurons = Int(floor(totalComputeNeurons * 30/100))
mask1 = ones(row, signal_col)
mask2 = GeneralUtils.multiply_random_elements(ones(row, lif_col + alif_col),
-1, inhabitoryNeurons, MersenneTwister(1234))
kfn.exInType = cat(mask1, mask2, dims=2) |> device
return kfn
end
function random_wRec(row, col, n, synapseConnectionNumber)
# subscription
w = zeros(row, col, n)
for slice in eachslice(w, dims=3)
pool = shuffle!([1:row*col...])[1:synapseConnectionNumber]
for i in pool
slice[i] = rand(0.01:0.01:0.1) # assign weight to synaptic connection. /10 to start small,
# otherwise RSNN's vt Usually stay negative (-)
end
end
# adjust weight so that RSNN fires small amount of neurons at the beginning to avoid overwhelming
# all-fire situation. it also better than not-fire-at-all situation.
avgWeight = sum(w)/length(w)
w = w .* (0.01 / avgWeight) # adjust overall weight
return w #(row, col, n)
end
end # module

View File

@@ -0,0 +1 @@
.CondaPkg

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,24 @@
[deps]
BSON = "fbb218c0-5317-5bc6-957e-2ee96dd4b1f0"
BenchmarkTools = "6e4b80f9-dd63-53aa-95a3-0cdb28fa8baf"
CUDA = "052768ef-5323-5732-b1bb-66c8b64840ba"
CondaPkg = "992eb4ea-22a4-4c89-a5bb-47a3300528ab"
Cthulhu = "f68482b8-f384-11e8-15f7-abe071a5a75f"
Flux = "587475ba-b771-5e3f-ad9e-33799f191a9c"
GLMakie = "e9467ef8-e4e7-5192-8a1a-b1aee30e663a"
GPUArrays = "0c68f7d7-f131-5f86-a1c3-88cf8149b2d7"
GeneralUtils = "c6c72f09-b708-4ac8-ac7c-2084d70108fe"
IronpenGPU = "3d5396ea-818e-43fc-a9d3-164248e840cd"
JSON3 = "0f8b85d8-7281-11e9-16c2-39a750bddbf1"
MLDatasets = "eb30cadb-4394-5ae3-aed4-317e484a6458"
MLUtils = "f1d291b0-491e-4a28-83b9-f70985020b54"
MethodAnalysis = "85b6ec6f-f7df-4429-9514-a64bcd9ee824"
OneHotArrays = "0b1bfda6-eb8a-41d2-88d8-f5af5cad476f"
ProgressMeter = "92933f4c-e287-5a05-a399-4b506db050ca"
PythonCall = "6099a3de-0909-46bc-b1f4-468b9a2dfc0d"
Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c"
Revise = "295af30f-e4ad-537b-8983-00126c2a3abe"
Serialization = "9e88b42a-f829-5b0c-bbe9-9e923198166b"
SliceMap = "82cb661a-3f19-5665-9e27-df437c7e54c8"
StaticArrays = "90137ffa-7385-5640-81b9-e52037218182"
cuDNN = "02a925ec-e4fe-4b08-9a7e-0d78e3d38ccd"

View File

@@ -0,0 +1,887 @@
# ---------------------------------------------------------------------------- #
# if one need to reinstall all python packages #
# ---------------------------------------------------------------------------- #
# 1. delete .CondaPkg folder in working folder
# 2. delete CondaPkg.toml file in working folder
# using Pkg; Pkg.activate(".");
# pythonPkg = ["CondaPkg", "PythonCall"]
# for i in pythonPkg try Pkg.rm(i) catch end end
# for i in pythonPkg Pkg.add(i) end
# using CondaPkg, PythonCall
# channels = ["anaconda", "conda-forge", "pytorch"]
# for i in channels CondaPkg.add_channel(i) end
# condapackage = ["numpy", "pytorch", "snntorch"]
# for i in condapackage CondaPkg.add(i) end
using Pkg; Pkg.activate("."); Pkg.resolve(), Pkg.instantiate()
# ---------------------------------------------------------------------------- #
# for debugging purpose #
# ---------------------------------------------------------------------------- #
# https://discourse.julialang.org/t/debugging-extremely-slow/53801/3
# using MethodAnalysis
# visit(Base) do item
# isa(item, Module) && push!(JuliaInterpreter.compiled_modules, item)
# true
# end
using Revise
using BenchmarkTools, Cthulhu
using Flux, CUDA
using BSON, JSON3
using MLDatasets: MNIST
using MLUtils, ProgressMeter, Dates, Random,
Serialization, OneHotArrays , GLMakie
using CondaPkg, PythonCall
np = pyimport("numpy")
torch = pyimport("torch")
spikegen = pyimport("snntorch.spikegen") # https://github.com/jeshraghian/snntorch
using IronpenGPU
using GeneralUtils
sep = Sys.iswindows() ? "\\" : "/"
rootDir = pwd()
# select compute device
# device = Flux.CUDA.functional() ? gpu : cpu # Flux provide "cpu" and "gpu" keywork
device = gpu
if device == gpu CUDA.device!(0) end #CHANGE
# CUDA.allowscalar(false) # turn off scalar indexing in CPU to make it easier when moving to GPU
#------------------------------------------------------------------------------------------------100
"""
Todo:
- []
Change from version:
-
All features
-
"""
# communication config --------------------------------------------------------------------------100
database_ip = "localhost"
# database_ip = "192.168.0.8"
#------------------------------------------------------------------------------------------------100
modelname = "runOn_gpu_0" #CHANGE
imageBatch = 1
function generate_snn(filename::String, location::String)
signalInput_portnumbers = (10, 20, imageBatch) # 2nd dim needs to match
# input signal + copied input signal + noise.
# 3rd dim is input batch size
noise_portnumbers = (signalInput_portnumbers[1], 1)
output_portnumbers = (10, 1)
# 5000 neurons are maximum for 64GB memory i.e. 300 LIF : 200 ALIF
lif_neuron_number = (signalInput_portnumbers[1], 3) # CHANGE
alif_neuron_number = (signalInput_portnumbers[1], 2) # CHANGE from Allen Institute, ALIF is 20-40% of LIF
# totalNeurons = computeNeuronNumber + noise_portnumbers + signalInput_portnumbers
# totalInputPort = noise_portnumbers + signalInput_portnumbers
# kfn and neuron config
passthrough_neuron_params = Dict(
:type => "passthroughNeuron"
)
lif_neuron_params = Dict{Symbol, Any}(
:type => "lifNeuron",
:v_t_default => 0.0,
:v_th => 1.0, # neuron firing threshold (this value is treated as maximum bound if I use auto generate)
:tau_m => 50.0, # membrane time constant in millisecond.
:eta => 1e-6,
# Good starting value is 1/10th of tau_a
# This is problem specific parameter. It controls how leaky the neuron is.
# Too high(less leaky) makes learning algo harder to move model into direction that reduce error
# resulting in model's error to explode exponantially likely because learning algo will try to
# exert more force (larger w_out_change) to move neuron into direction that reduce error
# For example, model error from 7 to 2e6.
:synapticConnectionPercent => 20, # % coverage of total neurons in kfn
)
alif_neuron_params = Dict{Symbol, Any}(
:type => "alifNeuron",
:v_t_default => 0.0,
:v_th => 1.0, # neuron firing threshold (this value is treated as maximum bound if I use auto generate)
:tau_m => 50.0, # membrane time constant in millisecond.
:eta => 1e-6,
# Good starting value is 1/10th of tau_a
# This is problem specific parameter. It controls how leaky the neuron is.
# Too high(less leaky) makes learning algo harder to move model into direction that reduce error
# resulting in model's error to explode exponantially likely because learning algo will try to
# exert more force (larger w_out_change) to move neuron into direction that reduce error
# For example, model error from 7 to 2e6.
:tau_a => 800.0, # adaptation time constant in millisecond. it defines neuron memory length.
# This is problem specific parameter
# Good starting value is 0.5 to 2 times of info STORE-RECALL length i.e. total time SNN takes to
# perform a task, for example, equals to episode length.
# From "Spike frequency adaptation supports network computations on temporally dispersed
# information"
:synapticConnectionPercent => 20, # % coverage of total neurons in kfn
)
linear_neuron_params = Dict{Symbol, Any}(
:type => "linearNeuron",
:v_th => 1.0, # neuron firing threshold (this value is treated as maximum bound if I use auto generate)
:tau_out => 100.0, # output time constant in millisecond.
:synapticConnectionPercent => 20, # % coverage of total neurons in kfn
# Good starting value is 1/50th of tau_a
# This is problem specific parameter.
# It controls how leaky the neuron is.
# Too high(less leaky) makes learning algo harder to move model into direction that reduce error
# resulting in model's error to explode exponantially. For example, model error from 7 to 2e6
# One can image training output neuron is like Tetris Game.
)
integrate_neuron_params = Dict{Symbol, Any}(
:type => "integrateNeuron",
:synapticConnectionPercent => 100, # % coverage of total neurons in kfn
:eta => 1e-6,
:tau_out => 100.0,
# Good starting value is 1/50th of tau_a
# This is problem specific parameter.
# It controls how leaky the neuron is.
# Too high(less leaky) makes learning algo harder to move model into direction that reduce error
# resulting in model's error to explode exponantially. For example, model error from 7 to 2e6
# One can image training output neuron is like Tetris Game.
)
I_kfnparams = Dict{Symbol, Any}(
:knowledgeFnName=> "I",
:neuronFiringRateTarget=> 20.0, # Hz
# group relavent info
:inputPort=> Dict(
:noise=> Dict(
:numbers=> noise_portnumbers,
:params=> passthrough_neuron_params,
),
:signal=> Dict(
:numbers=> signalInput_portnumbers, # in case of GloVe word encoding, it is 300
:params=> passthrough_neuron_params,
),
),
:outputPort=> Dict(
:numbers=> output_portnumbers, # output neuron, this is also the output length
:params=> linear_neuron_params,
),
:computeNeuron=> Dict(
:lif=> Dict(
:numbers=> lif_neuron_number, # number in (row, col) tuple format
:params=> lif_neuron_params,
),
:alif=> Dict(
:numbers=> alif_neuron_number, # number in (row, col) tuple format
:params=> alif_neuron_params,
),
),
)
#------------------------------------------------------------------------------------------------100
model = IronpenGPU.kfn_1(I_kfnparams, device=device);
# serialize(location * sep * filename, model)
println("SNN generated")
return model
end
function data_loader()
# test problem
trainDataset = MNIST(:train)[1:3] # total 60000
# validateDataset = MNIST(:test)
validateDataset = MNIST(:train)[1:3]
labelDict = [0:9...]
trainData = MLUtils.DataLoader(
trainDataset; # fullTrainDataset or trainDataset
batchsize=imageBatch,
collate=true,
shuffle=true,
buffer=true,
partial=false, # better for gpu memory if batchsize is fixed
# parallel=true, #BUG ?? causing dataloader into forever loop
)
validateData = MLUtils.DataLoader(
validateDataset;
batchsize=imageBatch,
collate=true,
shuffle=true,
buffer=true,
partial=false, # better for gpu memory if batchsize is fixed
# parallel=true, #BUG ?? causing dataloader into forever loop
)
# dummy data used to debug
# trainData = [(rand(10, 10), [5]), (rand(10, 10), [2])]
# trainData = [(rand(10, 10), [5]),]
return trainData, validateData, labelDict
end
function train_snn(model, trainData, validateData, labelDict::Vector)
# random seed
# rng = MersenneTwister(1234)
logitLog = zeros(10, 2)
firedNeurons_t1 = zeros(1)
var1 = zeros(10, 2)
var2 = zeros(10, 2)
var3 = zeros(10, 2)
var4 = zeros(10, 2)
# ----------------------------------- plot ----------------------------------- #
plot10 = Observable(firedNeurons_t1)
plot20 = Observable(logitLog[1 , :])
plot21 = Observable(logitLog[2 , :])
plot22 = Observable(logitLog[3 , :])
plot23 = Observable(logitLog[4 , :])
plot24 = Observable(logitLog[5 , :])
plot25 = Observable(logitLog[6 , :])
plot26 = Observable(logitLog[7 , :])
plot27 = Observable(logitLog[8 , :])
plot28 = Observable(logitLog[9 , :])
plot29 = Observable(logitLog[10, :])
plot30 = Observable(var1[1 , :])
plot31 = Observable(var1[2 , :])
plot32 = Observable(var1[3 , :])
plot33 = Observable(var1[4 , :])
plot34 = Observable(var1[5 , :])
plot35 = Observable(var1[6 , :])
plot36 = Observable(var1[7 , :])
plot37 = Observable(var1[8 , :])
plot38 = Observable(var1[9 , :])
plot39 = Observable(var1[10, :])
plot40 = Observable(var2[1 , :])
plot41 = Observable(var2[2 , :])
plot42 = Observable(var2[3 , :])
plot43 = Observable(var2[4 , :])
plot44 = Observable(var2[5 , :])
plot45 = Observable(var2[6 , :])
plot46 = Observable(var2[7 , :])
plot47 = Observable(var2[8 , :])
plot48 = Observable(var2[9 , :])
plot49 = Observable(var2[10, :])
plot50 = Observable(var3[1 , :])
plot51 = Observable(var3[2 , :])
plot52 = Observable(var3[3 , :])
plot53 = Observable(var3[4 , :])
plot54 = Observable(var3[5 , :])
plot55 = Observable(var3[6 , :])
plot56 = Observable(var3[7 , :])
plot57 = Observable(var3[8 , :])
plot58 = Observable(var3[9 , :])
plot59 = Observable(var3[10, :])
plot60 = Observable(var4[1 , :])
plot61 = Observable(var4[2 , :])
plot62 = Observable(var4[3 , :])
plot63 = Observable(var4[4 , :])
plot64 = Observable(var4[5 , :])
plot65 = Observable(var4[6 , :])
plot66 = Observable(var4[7 , :])
plot67 = Observable(var4[8 , :])
plot68 = Observable(var4[9 , :])
plot69 = Observable(var4[10, :])
# main figure
fig1 = Figure()
subfig1 = GLMakie.Axis(fig1[1, 1], # define position of this subfigure inside a figure
title = "RSNN firedNeurons_t1",
xlabel = "time",
ylabel = "data"
)
lines!(subfig1, plot10, label = "firedNeurons_t1")
# axislegend(subfig1, position = :lb)
subfig2 = GLMakie.Axis(fig1[2, 1], # define position of this subfigure inside a figure
title = "output neurons logit",
xlabel = "time",
ylabel = "data"
)
lines!(subfig2, plot20, label = "0", color = 1, colormap = :tab10, colorrange = (1, 10) )
lines!(subfig2, plot21, label = "1", color = 2, colormap = :tab10, colorrange = (1, 10) )
lines!(subfig2, plot22, label = "2", color = 3, colormap = :tab10, colorrange = (1, 10) )
lines!(subfig2, plot23, label = "3", color = 4, colormap = :tab10, colorrange = (1, 10) )
lines!(subfig2, plot24, label = "4", color = 5, colormap = :tab10, colorrange = (1, 10) )
lines!(subfig2, plot25, label = "5", color = 6, colormap = :tab10, colorrange = (1, 10) )
lines!(subfig2, plot26, label = "6", color = 7, colormap = :tab10, colorrange = (1, 10) )
lines!(subfig2, plot27, label = "7", color = 8, colormap = :tab10, colorrange = (1, 10) )
lines!(subfig2, plot28, label = "8", color = 9, colormap = :tab10, colorrange = (1, 10) )
lines!(subfig2, plot29, label = "9", color = 10, colormap = :tab10, colorrange = (1, 10))
# axislegend(subfig2, position = :lb)
subfig3 = GLMakie.Axis(fig1[3, 1], # define position of this subfigure inside a figure
title = "last RSNN wRec",
xlabel = "time",
ylabel = "data"
)
lines!(subfig3, plot30, label = "0", color = 1, colormap = :tab10, colorrange = (1, 10) )
lines!(subfig3, plot31, label = "1", color = 2, colormap = :tab10, colorrange = (1, 10) )
lines!(subfig3, plot32, label = "2", color = 3, colormap = :tab10, colorrange = (1, 10) )
lines!(subfig3, plot33, label = "3", color = 4, colormap = :tab10, colorrange = (1, 10) )
lines!(subfig3, plot34, label = "4", color = 5, colormap = :tab10, colorrange = (1, 10) )
lines!(subfig3, plot35, label = "5", color = 6, colormap = :tab10, colorrange = (1, 10) )
lines!(subfig3, plot36, label = "6", color = 7, colormap = :tab10, colorrange = (1, 10) )
lines!(subfig3, plot37, label = "7", color = 8, colormap = :tab10, colorrange = (1, 10) )
lines!(subfig3, plot38, label = "8", color = 9, colormap = :tab10, colorrange = (1, 10) )
lines!(subfig3, plot39, label = "9", color = 10, colormap = :tab10, colorrange = (1, 10))
# axislegend(subfig3, position = :lb)
subfig4 = GLMakie.Axis(fig1[4, 1], # define position of this subfigure inside a figure
title = "RSNN v_t1",
xlabel = "time",
ylabel = "data"
)
lines!(subfig4, plot40, label = "0", color = 1, colormap = :tab10, colorrange = (1, 10) )
lines!(subfig4, plot41, label = "1", color = 2, colormap = :tab10, colorrange = (1, 10) )
lines!(subfig4, plot42, label = "2", color = 3, colormap = :tab10, colorrange = (1, 10) )
lines!(subfig4, plot43, label = "3", color = 4, colormap = :tab10, colorrange = (1, 10) )
lines!(subfig4, plot44, label = "4", color = 5, colormap = :tab10, colorrange = (1, 10) )
lines!(subfig4, plot45, label = "5", color = 6, colormap = :tab10, colorrange = (1, 10) )
lines!(subfig4, plot46, label = "6", color = 7, colormap = :tab10, colorrange = (1, 10) )
lines!(subfig4, plot47, label = "7", color = 8, colormap = :tab10, colorrange = (1, 10) )
lines!(subfig4, plot48, label = "8", color = 9, colormap = :tab10, colorrange = (1, 10) )
lines!(subfig4, plot49, label = "9", color = 10, colormap = :tab10, colorrange = (1, 10))
# axislegend(subfig4, position = :lb)
subfig5 = GLMakie.Axis(fig1[5, 1], # define position of this subfigure inside a figure
title = "output neuron epsilonRec",
xlabel = "time",
ylabel = "data"
)
lines!(subfig5, plot50, label = "0", color = 1, colormap = :tab10, colorrange = (1, 10) )
lines!(subfig5, plot51, label = "1", color = 2, colormap = :tab10, colorrange = (1, 10) )
lines!(subfig5, plot52, label = "2", color = 3, colormap = :tab10, colorrange = (1, 10) )
lines!(subfig5, plot53, label = "3", color = 4, colormap = :tab10, colorrange = (1, 10) )
lines!(subfig5, plot54, label = "4", color = 5, colormap = :tab10, colorrange = (1, 10) )
lines!(subfig5, plot55, label = "5", color = 6, colormap = :tab10, colorrange = (1, 10) )
lines!(subfig5, plot56, label = "6", color = 7, colormap = :tab10, colorrange = (1, 10) )
lines!(subfig5, plot57, label = "7", color = 8, colormap = :tab10, colorrange = (1, 10) )
lines!(subfig5, plot58, label = "8", color = 9, colormap = :tab10, colorrange = (1, 10) )
lines!(subfig5, plot59, label = "9", color = 10, colormap = :tab10, colorrange = (1, 10))
# axislegend(subfig5, position = :lb)
subfig6 = GLMakie.Axis(fig1[6, 1], # define position of this subfigure inside a figure
title = "output neuron wRecChange",
xlabel = "time",
ylabel = "data"
)
lines!(subfig6, plot60, label = "0", color = 1, colormap = :tab10, colorrange = (1, 10) )
lines!(subfig6, plot61, label = "1", color = 2, colormap = :tab10, colorrange = (1, 10) )
lines!(subfig6, plot62, label = "2", color = 3, colormap = :tab10, colorrange = (1, 10) )
lines!(subfig6, plot63, label = "3", color = 4, colormap = :tab10, colorrange = (1, 10) )
lines!(subfig6, plot64, label = "4", color = 5, colormap = :tab10, colorrange = (1, 10) )
lines!(subfig6, plot65, label = "5", color = 6, colormap = :tab10, colorrange = (1, 10) )
lines!(subfig6, plot66, label = "6", color = 7, colormap = :tab10, colorrange = (1, 10) )
lines!(subfig6, plot67, label = "7", color = 8, colormap = :tab10, colorrange = (1, 10) )
lines!(subfig6, plot68, label = "8", color = 9, colormap = :tab10, colorrange = (1, 10) )
lines!(subfig6, plot69, label = "9", color = 10, colormap = :tab10, colorrange = (1, 10))
# axislegend(subfig6, position = :lb)
# wait(display(fig1))
# display(fig1)
# --------------------------------- end plot --------------------------------- #
# model learning
thinkingPeriod = 16 # 1000-784 = 216
bestAccuracy = 0.0
finalAnswer = [0] |> device # store model prediction in (logit of choices, batch)
stop = 0
for epoch = 1:1000
stop == 3 ? break : false
println("epoch $epoch")
n = length(trainData)
println("n $n")
p = Progress(n, dt=1.0) # minimum update interval: 1 second
for (imgBatch, labels) in trainData # imgBatch (28, 28, 4) i.e. (row, col, batch)
for rep in 1:10
stop == 3 ? break : false
#WORKING prepare image into input signal (10, 2, 784, 4) i.e. (row, col, timestep, batch)
signal = dualTrackSpikeGen(imgBatch, [0.05, 0.1, 0.2, 0.3, 0.5], noise=(true, 1, 0.1), copies=18)
if length(size(signal)) == 3
row, col, sequence = size(signal)
batch = 1
else
row, col, sequence, batch = size(signal)
end
# encode labels
correctAnswer = onehotbatch(labels, labelDict) # (choices, batch)
# insert data into model sequencially
for timestep in 1:(sequence + thinkingPeriod) # sMNIST has 784 timestep(pixel) + thinking period = 1000 timestep
if timestep <= sequence
current_pixel = view(signal, :, :, timestep, :) |> device
else
current_pixel = zeros(row, col, batch) |> device # dummy input in "thinking" period
end
if timestep == 1 # tell a model to start learning. 1-time only
model.learningStage = [1]
finalAnswer = [0] |> device
elseif timestep == (sequence+thinkingPeriod)
model.learningStage = [3]
else
end
# predict
logit, _firedNeurons_t1 = model(current_pixel)
# # log answer of all timestep
# logitLog = [logitLog;; logit]
# firedNeurons_t1 = push!(firedNeurons_t1, _firedNeurons_t1)
# var1 = [var1;; _var1]
# var2 = [var2;; _var2]
# var3 = [var3;; _var3]
# var4 = [var4;; _var4]
if timestep < sequence # online learning, 1-by-1 timestep
# no error calculation
elseif timestep == sequence # online learning, 1-by-1 timestep
# no error calculation
elseif timestep > sequence && timestep < sequence+thinkingPeriod # collect answer
finalAnswer = length(finalAnswer) == 1 ? logit : finalAnswer .+ logit # (logit, batch)
predict_cpu = logit |> cpu
modelError = (predict_cpu .- correctAnswer)
modelError = reshape(modelError, (1,1,:, size(modelError, 2)))
modelError = sum(modelError, dims=3) |> device
outputError = (predict_cpu .- correctAnswer) |> device
lif_epsilonRec_cpu = model.lif_epsilonRec |> cpu
on_zt_cpu = model.on_zt |> cpu
IronpenGPU.compute_paramsChange!(model, modelError, outputError)
lif_wRecChange_cpu = model.lif_wRecChange |> cpu
# if sum(lif_wRecChange_cpu) != 0
# println("")
# lif_vt_cpu = model.lif_vt |> cpu
# lif_zt_cpu = model.lif_zt |> cpu
# lif_recSignal = model.lif_recSignal |> cpu
# on_vt_cpu = model.on_vt |> cpu
# on_vt_cpu = on_vt_cpu[1,1,:,1]
# on_zt_cpu = on_zt_cpu[1,1,:,1]
# on_wOutChange_cpu = model.on_wOutChange |> cpu
# on_wOutChange_cpu = sum(on_wOutChange_cpu, dims=(1,2))
# println("lif vt $(lif_vt_cpu[1,1,5,1]) lif zt $(lif_zt_cpu[1,1,5,1]) on_vt $on_vt_cpu on_zt $on_zt_cpu on_wOutChange_cpu $on_wOutChange_cpu")
# println("lif_recSignal ", lif_recSignal)
# println("")
# println("lif_epsilonRec_cpu ", lif_epsilonRec_cpu)
# println("")
# println("lif_wRecChange ", lif_wRecChange_cpu)
# println("")
# zit_cumulative = model.zit_cumulative |> cpu
# println("zit_cumulative ", zit_cumulative)
# # error("DEBUG -> main $(Dates.now())")
# end
elseif timestep == sequence+thinkingPeriod
finalAnswer = length(finalAnswer) == 1 ? logit : finalAnswer .+ logit # (logit, batch)
predict_cpu = logit |> cpu
modelError = (predict_cpu .- correctAnswer)
modelError = reshape(modelError, (1,1,:, size(modelError, 2)))
modelError = sum(modelError, dims=3) |> device
outputError = (predict_cpu .- correctAnswer) |> device
lif_epsilonRec_cpu = model.lif_epsilonRec |> cpu
on_zt_cpu = model.on_zt |> cpu
IronpenGPU.compute_paramsChange!(model, modelError, outputError)
lif_wRecChange_cpu = model.lif_wRecChange |> cpu
println("")
lif_recSignal_cpu = model.lif_recSignal |> cpu
lif_recSignal_cpu = sum(lif_recSignal_cpu[:,:,5,1])
lif_vt_cpu = model.lif_vt |> cpu
lif_vt_cpu = lif_vt_cpu[1,1,5,1]
lif_zt_cpu = model.lif_zt |> cpu
lif_zt_cpu = lif_zt_cpu[1,1,5,1]
lif_epsilonRec_cpu = model.lif_epsilonRec |> cpu
lif_epsilonRec_cpu = sum(lif_epsilonRec_cpu[:,:,5,1])
lif_wRecChange_cpu = sum(lif_wRecChange_cpu[:,:,5,1])
on_vt_cpu = model.on_vt |> cpu
on_vt_cpu = on_vt_cpu[1,1,:,1]
on_zt_cpu = on_zt_cpu[1,1,:,1]
on_wOutChange_cpu = model.on_wOutChange |> cpu
on_wOutChange_cpu = sum(on_wOutChange_cpu, dims=(1,2))
println("lif recSignal $lif_recSignal_cpu lif vt $lif_vt_cpu lif zt $lif_zt_cpu lif_epsilonRec_cpu $lif_epsilonRec_cpu lif_wRecChange_cpu $lif_wRecChange_cpu on_vt $on_vt_cpu on_zt $on_zt_cpu on_wOutChange_cpu $on_wOutChange_cpu")
# println("lif_recSignal ", lif_recSignal)
# println("")
# println("lif_epsilonRec_cpu ", lif_epsilonRec_cpu)
# println("")
# println("lif_wRecChange ", lif_wRecChange_cpu)
# println("")
# zit_cumulative = model.zit_cumulative |> cpu
# println("zit_cumulative ", zit_cumulative)
# error("DEBUG -> main $(Dates.now())")
# commit learned weight only if the model answer incorrectly
finalAnswer_cpu = finalAnswer |> cpu
# println("label $(labels[1]) finalAnswer $finalAnswer_cpu")
max = isequal.(finalAnswer_cpu[:,1], maximum(finalAnswer_cpu[:,1]))
if sum(finalAnswer_cpu) == 0
println("modelname $modelname epoch $epoch label $(labels[1]) finalAnswer ZERO answer LEARNING")
IronpenGPU.learn!(model, device)
elseif sum(max) == 1 && findall(max)[1] -1 == labels[1]
finalAnswer_cpu = findall(max)[1] - 1
println("modelname $modelname epoch $epoch label $(labels[1]) finalAnswer $finalAnswer_cpu CORRECT")
elseif sum(max) == 1 && findall(max)[1] -1 != labels[1]
finalAnswer = findall(max)[1] - 1
println("modelname $modelname epoch $epoch label $(labels[1]) finalAnswer $finalAnswer_cpu LEARNING")
IronpenGPU.learn!(model, device)
else
println("modelname $modelname epoch $epoch label $(labels[1]) finalAnswer $finalAnswer_cpu LEARNING")
IronpenGPU.learn!(model, device)
end
# error("DEBUG -> main $(Dates.now())")
else
error("undefined condition line $(@__LINE__)")
# error("DEBUG -> main $(Dates.now())")
end
# update plot
# plot10[] = firedNeurons_t1
# plot20[] = view(logitLog, 1 , :)
# plot21[] = view(logitLog, 2 , :)
# plot22[] = view(logitLog, 3 , :)
# plot23[] = view(logitLog, 4 , :)
# plot24[] = view(logitLog, 5 , :)
# plot25[] = view(logitLog, 6 , :)
# plot26[] = view(logitLog, 7 , :)
# plot27[] = view(logitLog, 8 , :)
# plot28[] = view(logitLog, 9 , :)
# plot29[] = view(logitLog, 10, :)
# plot30[] = view(var1, 1 , :)
# plot31[] = view(var1, 2 , :)
# plot32[] = view(var1, 3 , :)
# plot33[] = view(var1, 4 , :)
# plot34[] = view(var1, 5 , :)
# plot35[] = view(var1, 6 , :)
# plot36[] = view(var1, 7 , :)
# plot37[] = view(var1, 8 , :)
# plot38[] = view(var1, 9 , :)
# plot39[] = view(var1, 10, :)
# plot40[] = view(var2, 1 , :)
# plot41[] = view(var2, 2 , :)
# plot42[] = view(var2, 3 , :)
# plot43[] = view(var2, 4 , :)
# plot44[] = view(var2, 5 , :)
# plot45[] = view(var2, 6 , :)
# plot46[] = view(var2, 7 , :)
# plot47[] = view(var2, 8 , :)
# plot48[] = view(var2, 9 , :)
# plot49[] = view(var2, 10, :)
# plot50[] = view(var3, 1 , :)
# plot51[] = view(var3, 2 , :)
# plot52[] = view(var3, 3 , :)
# plot53[] = view(var3, 4 , :)
# plot54[] = view(var3, 5 , :)
# plot55[] = view(var3, 6 , :)
# plot56[] = view(var3, 7 , :)
# plot57[] = view(var3, 8 , :)
# plot58[] = view(var3, 9 , :)
# plot59[] = view(var3, 10, :)
# plot60[] = view(var4, 1 , :)
# plot61[] = view(var4, 2 , :)
# plot62[] = view(var4, 3 , :)
# plot63[] = view(var4, 4 , :)
# plot64[] = view(var4, 5 , :)
# plot65[] = view(var4, 6 , :)
# plot66[] = view(var4, 7 , :)
# plot67[] = view(var4, 8 , :)
# plot68[] = view(var4, 9 , :)
# plot69[] = view(var4, 10, :)
end
# end-thinkingPeriod+2; +2 because initialize logitLog = zeros(10, 2)
# _modelRespond = logitLog[:, end-thinkingPeriod+2:end] # answer count during thinking period
# _modelRespond = [sum(i) for i in eachrow(_modelRespond)]
# modelRespond = isequal.(isequal.(_modelRespond, 0), 0)
# display(fig1)
# sleep(1)
# if k % 3 == 0
# firedNeurons_t1 = zeros(1)
# logitLog = zeros(10, 2)
# var1 = zeros(10, 2)
# var2 = zeros(10, 2)
# var3 = zeros(10, 2)
# var4 = zeros(10, 2)
# end
# # if predict == OneHotArrays.onehot(label, labelDict)
# # println("model train $label successfully, $k tries")
# # # wait(display(fig1))
# # firedNeurons_t1 = zeros(1)
# # logitLog = zeros(10, 2)
# # var1 = zeros(10, 2)
# # var2 = zeros(10, 2)
# # var3 = zeros(10, 2)
# # var4 = zeros(10, 2)
# # break
# # end
# if k == maxRepeatRound
# # println("model train $label unsuccessfully, $maxRepeatRound tries, skip training")
# # display(fig1)
# firedNeurons_t1 = zeros(1)
# logitLog = zeros(10, 2)
# var1 = zeros(10, 2)
# var2 = zeros(10, 2)
# var3 = zeros(10, 2)
# var4 = zeros(10, 2)
# break
# end
end
next!(p)
end
if epoch > 200
# check accuracy
println("validating model")
percentCorrect = validate(model, validateData, labelDict)
bestAccuracy = percentCorrect > bestAccuracy ? percentCorrect : bestAccuracy
println("$modelname model accuracy is $percentCorrect %, best accuracy is $bestAccuracy")
end
end
end
function validate(model, dataset, labelDict)
totalAnswerCorrectly = 0 # score
totalSignal = 0
thinkingPeriod = 16 # 1000-784 = 216
predict = [0] |> device
n = length(dataset)
println("n $n")
p = Progress(n, dt=1.0) # minimum update interval: 1 second
for (imgBatch, labels) in dataset
signal = spikeGenerator(imgBatch, [0.05, 0.1, 0.2, 0.3, 0.5], noise=(true, 1, 0.5), copies=18)
if length(size(signal)) == 3
row, col, sequence = size(signal)
batch = 1
else
row, col, sequence, batch = size(signal)
end
# encode labels
correctAnswer = onehotbatch(labels, labelDict) # (choices, batch)
# insert data into model sequencially
for timestep in 1:(sequence + thinkingPeriod) # sMNIST has 784 timestep(pixel) + thinking period = 1000 timestep
if timestep <= sequence
current_pixel = view(signal, :, :, timestep, :) |> device
else
current_pixel = zeros(row, col, batch) |> device # dummy input in "thinking" period
end
if timestep == 1 # tell a model to start learning. 1-time only
predict = [0] |> device
elseif timestep == (sequence+thinkingPeriod)
else
end
# predict
logit, _ = model(current_pixel)
if timestep < sequence # online learning, 1-by-1 timestep
# no error calculation
elseif timestep == sequence # online learning, 1-by-1 timestep
# no error calculation
elseif timestep > sequence && timestep < sequence+thinkingPeriod # collect answer
predict = length(predict) == 1 ? logit : predict .+ logit # (logit, batch)
elseif timestep == sequence+thinkingPeriod
predict = length(predict) == 1 ? logit : predict .+ logit # (logit, batch)
else
error("undefined condition line $(@__LINE__)")
end
end
predict_cpu = predict |> cpu
_predict_label = mapslices(GeneralUtils.vectorMax, predict_cpu; dims=1)
s = sum(_predict_label, dims=1)
if 0 s
predict_label = []
for i in eachcol(_predict_label)
_label = findall(i) .- 1
if length(_label) == 1
append!(predict_label, _label)
else
push!(predict_label, -1) # predict more than 1 label. add non-count label.
end
end
answerCorrectly = sum([x == y for (x,y) in zip(predict_label, labels)])
totalAnswerCorrectly += answerCorrectly
totalSignal += batch
end
next!(p)
end
percentCorrect = totalAnswerCorrectly * 100.0 / totalSignal
return percentCorrect::Float64
end
function dualTrackSpikeGen(inputsignals, thresholds=[1.0]; noise=(false, 1, 0.5), copies=0)
rowInputSignal = nothing
colInputSignal = nothing
for slice in eachslice(inputsignals, dims=3)
srow = nothing
scol = nothing
for row in eachrow(slice)
srow = srow === nothing ? row : cat(srow, row, dims=1)
end
for col in eachcol(slice)
scol = scol === nothing ? col : cat(scol, col, dims=1)
end
rowInputSignal = rowInputSignal === nothing ? srow : cat(rowInputSignal, srow, dims=3)
colInputSignal = colInputSignal === nothing ? scol : cat(colInputSignal, scol, dims=3)
end
rowInputSignal = reshape(rowInputSignal, (size(rowInputSignal, 1), 1, size(inputsignals, 3)))
colInputSignal = reshape(colInputSignal, (size(colInputSignal, 1), 1, size(inputsignals, 3)))
rowInputSignal = spikeGenerator(rowInputSignal, thresholds, noise=noise, copies=8)
colInputSignal = spikeGenerator(colInputSignal, thresholds, noise=noise, copies=8)
signal = cat(rowInputSignal, colInputSignal, dims=2)
return signal
end
""" inputsignals is normal column-major julia matrix in (row, col, batch) dimension
- each threshold scan return 2 vectors. 1 for +, 1 for -
- noise = (true/false, row, col, probability)
"""
function spikeGenerator(inputsignals, thresholds=[1.0]; noise=(false, 1, 0.5), copies=0)
s = length(size(inputsignals))
ar = [] # holding all signals that are scanned
for slice in eachslice(inputsignals, dims=s)
signal_jl = reshape(slice, (:, 1)) # python array is row-major
signal_pytensor = torch.from_numpy( np.asarray(signal_jl) )
arr = [] # holding signal that is scanned by several thresholds
for threshold in thresholds
spike_py = spikegen.delta(signal_pytensor, threshold=threshold, off_spike=true)
_spike_jl = pyconvert(Array, spike_py.data.numpy())
spike_jl = reshape(_spike_jl, (1, :)) # reshape back to julia's column-major
spike_jl1 = isequal.(spike_jl, 1)
spike_jl2 = isequal.(spike_jl, -1)
arr = length(arr) == 0 ? [spike_jl1; spike_jl2] : [arr; spike_jl1; spike_jl2]
end
arrSize = [size(arr)...]
arr = reshape(arr, (arrSize[1], 1, arrSize[2])) # reshape into (row, 1, timestep)
# multiply col
if copies > 0
a = deepcopy(arr)
for i in 1:copies
arr = cat(arr, a, dims=2)
end
end
if noise[1] == true
arrSize = [size(arr)...]
n = noiseGenerator(arrSize[1], noise[2], arrSize[3], prob=noise[3])
arr = cat(arr, n, dims=2) # concatenate into (row, signal:noise, timestep)
end
# concatenate into (row, signal:noise, timestep, batch)
ar = length(ar) == 0 ? arr : [ar;;;;arr]
end
return ar
end
function noiseGenerator(row, col, z; prob=0.5)
spike_prob = torch.rand(row, col, z) * prob
spike_rand = spikegen.rate_conv(spike_prob)
noise = isequal.(pyconvert(Array, spike_rand.data.numpy()), 1)
return noise
end
# function arrayMax(x)
# if sum(GeneralUtils.isNotEqual.(x, 0)) == 0 # guard against all-zeros array
# return GeneralUtils.isNotEqual.(x, 0)
# else
# return isequal.(x, maximum(x))
# end
# end
# arraySliceMax(x) = mapslices(arrayMax, x; dims=1)
function main()
filelocation = string(@__DIR__)
filename = "$modelname.jl163"
training_start_time = Dates.now()
println("$modelname program started $training_start_time")
model = generate_snn(filename, filelocation)
trainDataset, validateDataset, labelDict = data_loader()
train_snn(model, trainDataset, validateDataset, labelDict)
finish_training_time = Dates.now()
println("training done, $training_start_time ==> $finish_training_time ")
println(" ///////////////////////////////////////////////////////////////////////")
end
# only runs main() if julia isnt started interactively
# https://discourse.julialang.org/t/scripting-like-a-julian/50707
!isinteractive() && main()
#------------------------------------------------------------------------------------------------100

View File

@@ -25,11 +25,11 @@ using .interface
#------------------------------------------------------------------------------------------------100 #------------------------------------------------------------------------------------------------100
""" version 0.0.7 """ version 0.0.9
Todo: Todo:
[] add voltage regulator [1] +W 90% of most active conn
[] synaptic liquidity range 0 to 100,000 -> 1.0 to 0.99 [2] -W 10% of less active conn
[] add weight liquidity
[-] add temporal summation in addition to already used spatial summation. [-] add temporal summation in addition to already used spatial summation.
CANCELLED, spatial summation every second until membrane potential reach a threshold CANCELLED, spatial summation every second until membrane potential reach a threshold
is in itself a temporal summation. is in itself a temporal summation.
@@ -46,6 +46,7 @@ using .interface
All features All features
- excitatory/inhabitory matrix - excitatory/inhabitory matrix
- neuroplasticity - neuroplasticity
- voltage regulator
""" """

View File

@@ -26,6 +26,7 @@ function (kfn::kfn_1)(input::AbstractArray)
kfn.lif_firingCounter .= 0 kfn.lif_firingCounter .= 0
kfn.lif_refractoryCounter .= 0 kfn.lif_refractoryCounter .= 0
kfn.lif_zt .= 0 kfn.lif_zt .= 0
kfn.lif_synapticActivityCounter .= 0
kfn.alif_vt .= 0 kfn.alif_vt .= 0
kfn.alif_a .= 0 kfn.alif_a .= 0
@@ -35,6 +36,7 @@ function (kfn::kfn_1)(input::AbstractArray)
kfn.alif_firingCounter .= 0 kfn.alif_firingCounter .= 0
kfn.alif_refractoryCounter .= 0 kfn.alif_refractoryCounter .= 0
kfn.alif_zt .= 0 kfn.alif_zt .= 0
kfn.alif_synapticActivityCounter .= 0
kfn.on_vt .= 0 kfn.on_vt .= 0
kfn.on_epsilonRec .= 0 kfn.on_epsilonRec .= 0
@@ -75,7 +77,7 @@ function (kfn::kfn_1)(input::AbstractArray)
kfn.lif_exInType, kfn.lif_exInType,
kfn.lif_wRecChange, kfn.lif_wRecChange,
kfn.lif_neuronInactivityCounter, kfn.lif_neuronInactivityCounter,
kfn.lif_synapticInactivityCounter, kfn.lif_synapticActivityCounter,
) )
end end
@async begin @async begin
@@ -101,7 +103,7 @@ function (kfn::kfn_1)(input::AbstractArray)
kfn.alif_exInType, kfn.alif_exInType,
kfn.alif_wRecChange, kfn.alif_wRecChange,
kfn.alif_neuronInactivityCounter, kfn.alif_neuronInactivityCounter,
kfn.alif_synapticInactivityCounter, kfn.alif_synapticActivityCounter,
kfn.alif_epsilonRecA, kfn.alif_epsilonRecA,
kfn.alif_a, kfn.alif_a,
kfn.alif_avth, kfn.alif_avth,
@@ -169,7 +171,7 @@ function lifForward( zit::CuArray,
exInType::CuArray, exInType::CuArray,
wRecChange::CuArray, wRecChange::CuArray,
neuronInactivityCounter::CuArray, neuronInactivityCounter::CuArray,
synapticInactivityCounter::CuArray, synapticActivityCounter::CuArray,
) )
kernel = @cuda launch=false lifForward( zit, kernel = @cuda launch=false lifForward( zit,
@@ -189,7 +191,7 @@ function lifForward( zit::CuArray,
exInType, exInType,
wRecChange, wRecChange,
neuronInactivityCounter, neuronInactivityCounter,
synapticInactivityCounter, synapticActivityCounter,
GeneralUtils.linear_to_cartesian, GeneralUtils.linear_to_cartesian,
) )
config = launch_configuration(kernel.fun) config = launch_configuration(kernel.fun)
@@ -223,7 +225,7 @@ function lifForward( zit::CuArray,
exInType, exInType,
wRecChange, wRecChange,
neuronInactivityCounter, neuronInactivityCounter,
synapticInactivityCounter, synapticActivityCounter,
GeneralUtils.linear_to_cartesian; threads, blocks) GeneralUtils.linear_to_cartesian; threads, blocks)
end end
end end
@@ -246,7 +248,7 @@ function lifForward( zit,
exInType, exInType,
wRecChange, wRecChange,
neuronInactivityCounter, neuronInactivityCounter,
synapticInactivityCounter, synapticActivityCounter,
linear_to_cartesian, linear_to_cartesian,
) )
i = (blockIdx().x - 1) * blockDim().x + threadIdx().x # gpu threads index i = (blockIdx().x - 1) * blockDim().x + threadIdx().x # gpu threads index
@@ -297,12 +299,10 @@ function lifForward( zit,
# count synaptic inactivity # count synaptic inactivity
if !iszero(wRec[i1,i2,i3,i4]) # check if this is wRec subscription if !iszero(wRec[i1,i2,i3,i4]) # check if this is wRec subscription
if !iszero(zit[i1,i2,i3,i4]) # synapse is active, reset counter if !iszero(zit[i1,i2,i3,i4]) # synapse is active
#WORKING should be function based. range +1.0 to +0.1 synapticActivityCounter[i1,i2,i3,i4] += 1
synapticInactivityCounter[i1,i2,i3,i4] += 1 else # synapse is inactive
else # synapse is inactive, counting synapticActivityCounter[i1,i2,i3,i4] += 0
#WORKING should be function based. range +1.0 to +0.01
synapticInactivityCounter[i1,i2,i3,i4] -= 1
end end
end end
# voltage regulator # voltage regulator
@@ -331,7 +331,7 @@ function alifForward( zit::CuArray,
exInType::CuArray, exInType::CuArray,
wRecChange::CuArray, wRecChange::CuArray,
neuronInactivityCounter::CuArray, neuronInactivityCounter::CuArray,
synapticInactivityCounter::CuArray, synapticActivityCounter::CuArray,
epsilonRecA::CuArray, epsilonRecA::CuArray,
a::CuArray, a::CuArray,
avth::CuArray, avth::CuArray,
@@ -356,7 +356,7 @@ function alifForward( zit::CuArray,
exInType, exInType,
wRecChange, wRecChange,
neuronInactivityCounter, neuronInactivityCounter,
synapticInactivityCounter, synapticActivityCounter,
epsilonRecA, epsilonRecA,
a, a,
avth, avth,
@@ -394,7 +394,7 @@ function alifForward( zit::CuArray,
exInType, exInType,
wRecChange, wRecChange,
neuronInactivityCounter, neuronInactivityCounter,
synapticInactivityCounter, synapticActivityCounter,
epsilonRecA, epsilonRecA,
a, a,
avth, avth,
@@ -422,7 +422,7 @@ function alifForward( zit,
exInType, exInType,
wRecChange, wRecChange,
neuronInactivityCounter, neuronInactivityCounter,
synapticInactivityCounter, synapticActivityCounter,
epsilonRecA, epsilonRecA,
a, a,
avth, avth,
@@ -492,10 +492,10 @@ function alifForward( zit,
# count synaptic inactivity # count synaptic inactivity
if !iszero(wRec[i1,i2,i3,i4]) # check if this is wRec subscription if !iszero(wRec[i1,i2,i3,i4]) # check if this is wRec subscription
if !iszero(zit[i1,i2,i3,i4]) # synapse is active, reset counter if !iszero(zit[i1,i2,i3,i4]) # synapse is active
synapticInactivityCounter[i1,i2,i3,i4] += 1 synapticActivityCounter[i1,i2,i3,i4] += 1
else # synapse is inactive, counting else # synapse is inactive
synapticInactivityCounter[i1,i2,i3,i4] -= 1 synapticActivityCounter[i1,i2,i3,i4] += 0
end end
end end
# voltage regulator # voltage regulator

View File

@@ -267,27 +267,28 @@ end
function learn!(kfn::kfn_1, device=cpu) function learn!(kfn::kfn_1, device=cpu)
# lif learn # lif learn
kfn.lif_wRec, kfn.lif_neuronInactivityCounter, kfn.lif_synapticInactivityCounter = kfn.lif_wRec, kfn.lif_neuronInactivityCounter, kfn.lif_synapticActivityCounter =
lifLearn(kfn.lif_wRec, lifLearn(kfn.lif_wRec,
kfn.lif_exInType, kfn.lif_exInType,
kfn.lif_wRecChange, kfn.lif_wRecChange,
kfn.lif_arrayProjection4d, kfn.lif_arrayProjection4d,
kfn.lif_neuronInactivityCounter, kfn.lif_neuronInactivityCounter,
kfn.lif_synapticInactivityCounter, kfn.lif_synapticActivityCounter,
kfn.lif_synapticConnectionNumber, kfn.lif_synapseConnectionNumber,
kfn.lif_synapticWChangeCounter, kfn.lif_synapticWChangeCounter,
kfn.lif_eta,
kfn.zitCumulative, kfn.zitCumulative,
device) device)
# alif learn # alif learn
kfn.alif_wRec, kfn.alif_neuronInactivityCounter, kfn.alif_synapticInactivityCounter = kfn.alif_wRec, kfn.alif_neuronInactivityCounter, kfn.alif_synapticActivityCounter =
alifLearn(kfn.alif_wRec, alifLearn(kfn.alif_wRec,
kfn.alif_exInType, kfn.alif_exInType,
kfn.alif_wRecChange, kfn.alif_wRecChange,
kfn.alif_arrayProjection4d, kfn.alif_arrayProjection4d,
kfn.alif_neuronInactivityCounter, kfn.alif_neuronInactivityCounter,
kfn.alif_synapticInactivityCounter, kfn.alif_synapticActivityCounter,
kfn.alif_synapticConnectionNumber, kfn.alif_synapseConnectionNumber,
kfn.alif_synapticWChangeCounter, kfn.alif_synapticWChangeCounter,
kfn.zitCumulative, kfn.zitCumulative,
device) device)
@@ -309,9 +310,10 @@ function lifLearn(wRec,
wRecChange, wRecChange,
arrayProjection4d, arrayProjection4d,
neuronInactivityCounter, neuronInactivityCounter,
synapticInactivityCounter, synapticActivityCounter,
synapticConnectionNumber, synapseConnectionNumber,
synapticWChangeCounter, #WORKING synapticWChangeCounter, #TODO
eta,
zitCumulative, zitCumulative,
device) device)
@@ -322,28 +324,39 @@ function lifLearn(wRec,
arrayProjection4d_cpu = arrayProjection4d |> cpu arrayProjection4d_cpu = arrayProjection4d |> cpu
wRec_cpu = wRec |> cpu wRec_cpu = wRec |> cpu
wRec_cpu = wRec_cpu[:,:,:,1] # since every batch has the same neuron wRec, (row, col, n) wRec_cpu = wRec_cpu[:,:,:,1] # since every batch has the same neuron wRec, (row, col, n)
eta_cpu = eta |> cpu
eta_cpu = eta_cpu[:,:,:,1]
neuronInactivityCounter_cpu = neuronInactivityCounter |> cpu neuronInactivityCounter_cpu = neuronInactivityCounter |> cpu
neuronInactivityCounter_cpu = neuronInactivityCounter_cpu[:,:,:,1] # (row, col, n) neuronInactivityCounter_cpu = neuronInactivityCounter_cpu[:,:,:,1] # (row, col, n)
synapticInactivityCounter_cpu = synapticInactivityCounter |> cpu synapticActivityCounter_cpu = synapticActivityCounter |> cpu
synapticInactivityCounter_cpu = synapticInactivityCounter_cpu[:,:,:,1] synapticActivityCounter_cpu = synapticActivityCounter_cpu[:,:,:,1]
zitCumulative_cpu = zitCumulative |> cpu zitCumulative_cpu = zitCumulative |> cpu
zitCumulative_cpu = zitCumulative_cpu[:,:,1] # (row, col) zitCumulative_cpu = zitCumulative_cpu[:,:,1] # (row, col)
# -W if less than 10% of repeat avg, +W otherwise
println("wRec_cpu 1 ", wRec_cpu)
_, _, i3 = size(wRec_cpu)
for i in 1:i3
x = 0.1 * (sum(synapticActivityCounter[:,:,i]) / length(synapticActivityCounter[:,:,i]))
mask = GeneralUtils.replaceLessThan.(wRec_cpu[:,:,i], x, -1, 1)
wRec_cpu[:,:,i] .+= mask .* eta_cpu[:,:,i] .* wRec_cpu[:,:,i]
end
# weak / negative synaptic connection will get randomed in neuroplasticity() # weak / negative synaptic connection will get randomed in neuroplasticity()
wRec_cpu = GeneralUtils.replaceBetween.(wRec_cpu, 0.0, 0.01, -1.0) # mark with -1.0 wRec_cpu = GeneralUtils.replaceBetween.(wRec_cpu, 0.0, 0.01, -1.0) # mark with -1.0
# synaptic connection that has no activity will get randomed in neuroplasticity() # # synaptic connection that has no activity will get randomed in neuroplasticity()
mask = isless.(synapticInactivityCounter_cpu, -100000) # mask = isless.(synapticActivityCounter_cpu, -100000)
GeneralUtils.replace_elements!(mask, 1, wRec_cpu, -1.0) # GeneralUtils.replaceElements!(mask, 1, wRec_cpu, -1.0)
# reset lif_inactivity elements to base value # # reset lif_inactivity elements to base value
GeneralUtils.replace_elements!(mask, 1, synapticInactivityCounter_cpu, 0.0) # GeneralUtils.replaceElements!(mask, 1, synapticActivityCounter_cpu, 0.0)
# neuroplasticity, work on CPU side # neuroplasticity, work on CPU side
wRec_cpu = neuroplasticity(synapticConnectionNumber, wRec_cpu = neuroplasticity(synapseConnectionNumber,
zitCumulative_cpu, zitCumulative_cpu,
wRec_cpu, wRec_cpu,
neuronInactivityCounter_cpu, neuronInactivityCounter_cpu,
synapticInactivityCounter_cpu) synapticActivityCounter_cpu)
wRec_cpu = wRec_cpu .* arrayProjection4d_cpu wRec_cpu = wRec_cpu .* arrayProjection4d_cpu
wRec = wRec_cpu |> device wRec = wRec_cpu |> device
@@ -351,11 +364,11 @@ function lifLearn(wRec,
neuronInactivityCounter_cpu = neuronInactivityCounter_cpu .* arrayProjection4d_cpu neuronInactivityCounter_cpu = neuronInactivityCounter_cpu .* arrayProjection4d_cpu
neuronInactivityCounter = neuronInactivityCounter_cpu |> device neuronInactivityCounter = neuronInactivityCounter_cpu |> device
synapticInactivityCounter_cpu = synapticInactivityCounter_cpu .* arrayProjection4d_cpu synapticActivityCounter_cpu = synapticActivityCounter_cpu .* arrayProjection4d_cpu
synapticInactivityCounter = synapticInactivityCounter_cpu |> device synapticActivityCounter = synapticActivityCounter_cpu |> device
# error("DEBUG -> lifLearn! $(Dates.now())")
return wRec, neuronInactivityCounter, synapticInactivityCounter return wRec, neuronInactivityCounter, synapticActivityCounter
end end
function alifLearn(wRec, function alifLearn(wRec,
@@ -363,9 +376,9 @@ function alifLearn(wRec,
wRecChange, wRecChange,
arrayProjection4d, arrayProjection4d,
neuronInactivityCounter, neuronInactivityCounter,
synapticInactivityCounter, synapticActivityCounter,
synapticConnectionNumber, synapseConnectionNumber,
synapticWChangeCounter, synapticWChangeCounter, #TODO
zitCumulative, zitCumulative,
device) device)
@@ -378,8 +391,8 @@ function alifLearn(wRec,
wRec_cpu = wRec_cpu[:,:,:,1] # since every batch has the same neuron wRec, (row, col, n) wRec_cpu = wRec_cpu[:,:,:,1] # since every batch has the same neuron wRec, (row, col, n)
neuronInactivityCounter_cpu = neuronInactivityCounter |> cpu neuronInactivityCounter_cpu = neuronInactivityCounter |> cpu
neuronInactivityCounter_cpu = neuronInactivityCounter_cpu[:,:,:,1] # (row, col, n) neuronInactivityCounter_cpu = neuronInactivityCounter_cpu[:,:,:,1] # (row, col, n)
synapticInactivityCounter_cpu = synapticInactivityCounter |> cpu synapticActivityCounter_cpu = synapticActivityCounter |> cpu
synapticInactivityCounter_cpu = synapticInactivityCounter_cpu[:,:,:,1] synapticActivityCounter_cpu = synapticActivityCounter_cpu[:,:,:,1]
zitCumulative_cpu = zitCumulative |> cpu zitCumulative_cpu = zitCumulative |> cpu
zitCumulative_cpu = zitCumulative_cpu[:,:,1] # (row, col) zitCumulative_cpu = zitCumulative_cpu[:,:,1] # (row, col)
@@ -387,17 +400,17 @@ function alifLearn(wRec,
wRec_cpu = GeneralUtils.replaceBetween.(wRec_cpu, 0.0, 0.01, -1.0) # mark with -1.0 wRec_cpu = GeneralUtils.replaceBetween.(wRec_cpu, 0.0, 0.01, -1.0) # mark with -1.0
# synaptic connection that has no activity will get randomed in neuroplasticity() # synaptic connection that has no activity will get randomed in neuroplasticity()
mask = isless.(synapticInactivityCounter_cpu, -100000) mask = isless.(synapticActivityCounter_cpu, -100000)
GeneralUtils.replace_elements!(mask, 1, wRec_cpu, -1.0) GeneralUtils.replaceElements!(mask, 1, wRec_cpu, -1.0)
# reset alif_inactivity elements to base value # reset alif_inactivity elements to base value
GeneralUtils.replace_elements!(mask, 1, synapticInactivityCounter_cpu, 0.0) GeneralUtils.replaceElements!(mask, 1, synapticActivityCounter_cpu, 0.0)
# neuroplasticity, work on CPU side # neuroplasticity, work on CPU side
wRec_cpu = neuroplasticity(synapticConnectionNumber, wRec_cpu = neuroplasticity(synapseConnectionNumber,
zitCumulative_cpu, zitCumulative_cpu,
wRec_cpu, wRec_cpu,
neuronInactivityCounter_cpu, neuronInactivityCounter_cpu,
synapticInactivityCounter_cpu) synapticActivityCounter_cpu)
wRec_cpu = wRec_cpu .* arrayProjection4d_cpu wRec_cpu = wRec_cpu .* arrayProjection4d_cpu
wRec = wRec_cpu |> device wRec = wRec_cpu |> device
@@ -405,11 +418,11 @@ function alifLearn(wRec,
neuronInactivityCounter_cpu = neuronInactivityCounter_cpu .* arrayProjection4d_cpu neuronInactivityCounter_cpu = neuronInactivityCounter_cpu .* arrayProjection4d_cpu
neuronInactivityCounter = neuronInactivityCounter_cpu |> device neuronInactivityCounter = neuronInactivityCounter_cpu |> device
synapticInactivityCounter_cpu = synapticInactivityCounter_cpu .* arrayProjection4d_cpu synapticActivityCounter_cpu = synapticActivityCounter_cpu .* arrayProjection4d_cpu
synapticInactivityCounter = synapticInactivityCounter_cpu |> device synapticActivityCounter = synapticActivityCounter_cpu |> device
# error("DEBUG -> alifLearn! $(Dates.now())") # error("DEBUG -> alifLearn! $(Dates.now())")
return wRec, neuronInactivityCounter, synapticInactivityCounter return wRec, neuronInactivityCounter, synapticActivityCounter
end end
function onLearn!(wOut, function onLearn!(wOut,
@@ -427,17 +440,17 @@ function onLearn!(wOut,
end end
function neuroplasticity(synapticConnectionNumber, function neuroplasticity(synapseConnectionNumber,
zitCumulative, # (row, col) zitCumulative, # (row, col)
wRec, # (row, col, n) wRec, # (row, col, n)
neuronInactivityCounter, neuronInactivityCounter,
synapticInactivityCounter) # (row, col, n) synapticActivityCounter) # (row, col, n)
i1,i2,i3 = size(wRec) i1,i2,i3 = size(wRec)
# for each neuron, find total number of synaptic conn that should draw # for each neuron, find total number of synaptic conn that should draw
# new connection to firing and non-firing neurons pool # new connection to firing and non-firing neurons pool
subToFireNeuron_toBe = Int(floor(0.7 * synapticConnectionNumber)) subToFireNeuron_toBe = Int(floor(0.7 * synapseConnectionNumber))
# for each neuron, count how many synap already subscribed to firing-neurons # for each neuron, count how many synap already subscribed to firing-neurons
zw = zitCumulative .* wRec zw = zitCumulative .* wRec
@@ -446,23 +459,23 @@ function neuroplasticity(synapticConnectionNumber,
projection = ones(i1,i2,i3) projection = ones(i1,i2,i3)
zitMask = zitMask .* projection # (row, col, n) zitMask = zitMask .* projection # (row, col, n)
totalNewConn = sum(isequal.(wRec, -1.0), dims=(1,2)) # count new conn mark (-1.0), (1, 1, n) totalNewConn = sum(isequal.(wRec, -1.0), dims=(1,2)) # count new conn mark (-1.0), (1, 1, n)
println("neuroplasticity, from $(synapticConnectionNumber*size(totalNewConn, 3)) conn, $(sum(totalNewConn)) are replaced") println("neuroplasticity, from $(synapseConnectionNumber*size(totalNewConn, 3)) conn, $(sum(totalNewConn)) are replaced")
# clear -1.0 marker # clear -1.0 marker
GeneralUtils.replace_elements!(wRec, -1.0, synapticInactivityCounter, -0.99) GeneralUtils.replaceElements!(wRec, -1.0, synapticActivityCounter, -0.99)
GeneralUtils.replace_elements!(wRec, -1.0, 0.0) # -1.0 marker is no longer required GeneralUtils.replaceElements!(wRec, -1.0, 0.0) # -1.0 marker is no longer required
for i in 1:i3 for i in 1:i3
if neuronInactivityCounter[1:1:i][1] < -10000 # neuron die i.e. reset all weight if neuronInactivityCounter[1:1:i][1] < -10000 # neuron die i.e. reset all weight
println("neuron die") println("neuron die")
neuronInactivityCounter[:,:,i] .= 0 # reset neuronInactivityCounter[:,:,i] .= 0 # reset
w = random_wRec(i1,i2,1,synapticConnectionNumber) w = random_wRec(i1,i2,1,synapseConnectionNumber)
wRec[:,:,i] .= w wRec[:,:,i] .= w
a = similar(w) .= -0.99 # synapticConnectionNumber of this neuron a = similar(w) .= -0.99 # synapseConnectionNumber of this neuron
mask = (!iszero).(w) mask = (!iszero).(w)
GeneralUtils.replace_elements!(mask, 1, a, 0) GeneralUtils.replaceElements!(mask, 1, a, 0)
synapticInactivityCounter[:,:,i] = a synapticActivityCounter[:,:,i] = a
else else
remaining = 0 remaining = 0
if subToFireNeuron_current[1,1,i] < subToFireNeuron_toBe if subToFireNeuron_current[1,1,i] < subToFireNeuron_toBe
@@ -471,7 +484,7 @@ function neuroplasticity(synapticConnectionNumber,
# add new conn to firing neurons pool # add new conn to firing neurons pool
remaining = addNewSynapticConn!(zitMask[:,:,i], 1, remaining = addNewSynapticConn!(zitMask[:,:,i], 1,
@view(wRec[:,:,i]), @view(wRec[:,:,i]),
@view(synapticInactivityCounter[:,:,i]), @view(synapticActivityCounter[:,:,i]),
toAddConn) toAddConn)
totalNewConn[1,1,i] += remaining totalNewConn[1,1,i] += remaining
end end
@@ -479,12 +492,12 @@ function neuroplasticity(synapticConnectionNumber,
# add new conn to non-firing neurons pool # add new conn to non-firing neurons pool
remaining = addNewSynapticConn!(zitMask[:,:,i], 0, remaining = addNewSynapticConn!(zitMask[:,:,i], 0,
@view(wRec[:,:,i]), @view(wRec[:,:,i]),
@view(synapticInactivityCounter[:,:,i]), @view(synapticActivityCounter[:,:,i]),
totalNewConn[1,1,i]) totalNewConn[1,1,i])
if remaining > 0 # final get-all round if somehow non-firing pool has not enough slot if remaining > 0 # final get-all round if somehow non-firing pool has not enough slot
remaining = addNewSynapticConn!(zitMask[:,:,i], 1, remaining = addNewSynapticConn!(zitMask[:,:,i], 1,
@view(wRec[:,:,i]), @view(wRec[:,:,i]),
@view(synapticInactivityCounter[:,:,i]), @view(synapticActivityCounter[:,:,i]),
remaining) remaining)
end end
end end
@@ -543,7 +556,6 @@ end
end # module end # module

View File

@@ -75,10 +75,8 @@ function addNewSynapticConn!(mask::AbstractArray{<:Any}, x::Number, wRec::Abstra
# replace the elements in wRec at the selected positions with a # replace the elements in wRec at the selected positions with a
for i in selected for i in selected
wRec[i] = rand(0.01:0.01:0.1) wRec[i] = rand(0.01:0.01:0.1)
if counter !== nothing
counter[i] = 0 # counting start from 0 counter[i] = 0 # counting start from 0
end end
end
# error("DEBUG addNewSynapticConn!") # error("DEBUG addNewSynapticConn!")
return remaining return remaining
end end

View File

@@ -58,8 +58,8 @@ Base.@kwdef mutable struct kfn_1 <: knowledgeFn
lif_firingCounter::Union{AbstractArray, Nothing} = nothing lif_firingCounter::Union{AbstractArray, Nothing} = nothing
lif_firingTargetFrequency::Union{AbstractArray, Nothing} = nothing lif_firingTargetFrequency::Union{AbstractArray, Nothing} = nothing
lif_neuronInactivityCounter::Union{AbstractArray, Nothing} = nothing lif_neuronInactivityCounter::Union{AbstractArray, Nothing} = nothing
lif_synapticInactivityCounter::Union{AbstractArray, Nothing} = nothing lif_synapticActivityCounter::Union{AbstractArray, Nothing} = nothing
lif_synapticConnectionNumber::Union{Int, Nothing} = nothing lif_synapseConnectionNumber::Union{Int, Nothing} = nothing
lif_synapticWChangeCounter::Union{AbstractArray, Nothing} = nothing lif_synapticWChangeCounter::Union{AbstractArray, Nothing} = nothing
# pre-allocation array # pre-allocation array
@@ -99,8 +99,8 @@ Base.@kwdef mutable struct kfn_1 <: knowledgeFn
alif_firingCounter::Union{AbstractArray, Nothing} = nothing alif_firingCounter::Union{AbstractArray, Nothing} = nothing
alif_firingTargetFrequency::Union{AbstractArray, Nothing} = nothing alif_firingTargetFrequency::Union{AbstractArray, Nothing} = nothing
alif_neuronInactivityCounter::Union{AbstractArray, Nothing} = nothing alif_neuronInactivityCounter::Union{AbstractArray, Nothing} = nothing
alif_synapticInactivityCounter::Union{AbstractArray, Nothing} = nothing alif_synapticActivityCounter::Union{AbstractArray, Nothing} = nothing
alif_synapticConnectionNumber::Union{Int, Nothing} = nothing alif_synapseConnectionNumber::Union{Int, Nothing} = nothing
alif_synapticWChangeCounter::Union{AbstractArray, Nothing} = nothing alif_synapticWChangeCounter::Union{AbstractArray, Nothing} = nothing
# pre-allocation array # pre-allocation array
@@ -200,8 +200,8 @@ function kfn_1(params::Dict; device=cpu)
# subscription # subscription
synapticConnectionPercent = kfn.params[:computeNeuron][:lif][:params][:synapticConnectionPercent] synapticConnectionPercent = kfn.params[:computeNeuron][:lif][:params][:synapticConnectionPercent]
kfn.lif_synapticConnectionNumber = Int(floor(row*col * synapticConnectionPercent/100)) kfn.lif_synapseConnectionNumber = Int(floor(row*col * synapticConnectionPercent/100))
w = random_wRec(row, col, lif_n, kfn.lif_synapticConnectionNumber) w = random_wRec(row, col, lif_n, kfn.lif_synapseConnectionNumber)
# project 3D w into 4D kfn.lif_wRec (row, col, n, batch) # project 3D w into 4D kfn.lif_wRec (row, col, n, batch)
kfn.lif_wRec = reshape(w, (row, col, lif_n, 1)) .* ones(row, col, lif_n, batch) |> device kfn.lif_wRec = reshape(w, (row, col, lif_n, 1)) .* ones(row, col, lif_n, batch) |> device
@@ -227,15 +227,19 @@ function kfn_1(params::Dict; device=cpu)
kfn.lif_firingCounter = (similar(kfn.lif_wRec) .= 0) kfn.lif_firingCounter = (similar(kfn.lif_wRec) .= 0)
kfn.lif_firingTargetFrequency = (similar(kfn.lif_wRec) .= 0.1) kfn.lif_firingTargetFrequency = (similar(kfn.lif_wRec) .= 0.1)
kfn.lif_neuronInactivityCounter = (similar(kfn.lif_wRec) .= 0) kfn.lif_neuronInactivityCounter = (similar(kfn.lif_wRec) .= 0)
kfn.lif_synapticInactivityCounter = Array(similar(kfn.lif_wRec) .= -0.99) # -9 for non-sub conn
# count subscribed synapse activity, just like epsilonRec but without decay.
# use to adjust weight based on how often neural pathway is used
kfn.lif_synapticActivityCounter = Array(similar(kfn.lif_wRec) .= -0.99) # -0.99 for non-sub conn
mask = Array((!iszero).(kfn.lif_wRec)) mask = Array((!iszero).(kfn.lif_wRec))
# initial value subscribed conn, synapticInactivityCounter range -10000 to +10000 # initial value subscribed conn, synapticActivityCounter range -10000 to +10000
GeneralUtils.replace_elements!(mask, 1, kfn.lif_synapticInactivityCounter, 0) GeneralUtils.replaceElements!(mask, 1, kfn.lif_synapticActivityCounter, 0)
kfn.lif_synapticInactivityCounter = kfn.lif_synapticInactivityCounter |> device kfn.lif_synapticActivityCounter = kfn.lif_synapticActivityCounter |> device
kfn.lif_synapticWChangeCounter = Array(similar(kfn.lif_wRec) .= -0.99) # -9 for non-sub conn
kfn.lif_synapticWChangeCounter = Array(similar(kfn.lif_wRec) .= -0.99) # -0.99 for non-sub conn
mask = Array((!iszero).(kfn.lif_wRec)) mask = Array((!iszero).(kfn.lif_wRec))
# initial value subscribed conn, synapticInactivityCounter range -10000 to +10000 # initial value subscribed conn, synapticActivityCounter range -10000 to +10000
GeneralUtils.replace_elements!(mask, 1, kfn.lif_synapticWChangeCounter, 1.0) GeneralUtils.replaceElements!(mask, 1, kfn.lif_synapticWChangeCounter, 1.0)
kfn.lif_synapticWChangeCounter = kfn.lif_synapticWChangeCounter |> device kfn.lif_synapticWChangeCounter = kfn.lif_synapticWChangeCounter |> device
kfn.lif_arrayProjection4d = (similar(kfn.lif_wRec) .= 1) kfn.lif_arrayProjection4d = (similar(kfn.lif_wRec) .= 1)
@@ -254,8 +258,8 @@ function kfn_1(params::Dict; device=cpu)
# subscription # subscription
synapticConnectionPercent = kfn.params[:computeNeuron][:alif][:params][:synapticConnectionPercent] synapticConnectionPercent = kfn.params[:computeNeuron][:alif][:params][:synapticConnectionPercent]
kfn.alif_synapticConnectionNumber = Int(floor(row*col * synapticConnectionPercent/100)) kfn.alif_synapseConnectionNumber = Int(floor(row*col * synapticConnectionPercent/100))
w = random_wRec(row, col, alif_n, kfn.alif_synapticConnectionNumber) w = random_wRec(row, col, alif_n, kfn.alif_synapseConnectionNumber)
# project 3D w into 4D kfn.alif_wRec # project 3D w into 4D kfn.alif_wRec
kfn.alif_wRec = reshape(w, (row, col, alif_n, 1)) .* ones(row, col, alif_n, batch) |> device kfn.alif_wRec = reshape(w, (row, col, alif_n, 1)) .* ones(row, col, alif_n, batch) |> device
@@ -281,15 +285,15 @@ function kfn_1(params::Dict; device=cpu)
kfn.alif_firingCounter = (similar(kfn.alif_wRec) .= 0) kfn.alif_firingCounter = (similar(kfn.alif_wRec) .= 0)
kfn.alif_firingTargetFrequency = (similar(kfn.alif_wRec) .= 0.1) kfn.alif_firingTargetFrequency = (similar(kfn.alif_wRec) .= 0.1)
kfn.alif_neuronInactivityCounter = (similar(kfn.alif_wRec) .= 0) kfn.alif_neuronInactivityCounter = (similar(kfn.alif_wRec) .= 0)
kfn.alif_synapticInactivityCounter = Array(similar(kfn.alif_wRec) .= -0.99) # -9 for non-sub conn kfn.alif_synapticActivityCounter = Array(similar(kfn.alif_wRec) .= -0.99) # -9 for non-sub conn
mask = Array((!iszero).(kfn.alif_wRec)) mask = Array((!iszero).(kfn.alif_wRec))
# initial value subscribed conn, synapticInactivityCounter range -10000 to +10000 # initial value subscribed conn, synapticActivityCounter range -10000 to +10000
GeneralUtils.replace_elements!(mask, 1, kfn.alif_synapticInactivityCounter, 0) GeneralUtils.replaceElements!(mask, 1, kfn.alif_synapticActivityCounter, 0)
kfn.alif_synapticInactivityCounter = kfn.alif_synapticInactivityCounter |> device kfn.alif_synapticActivityCounter = kfn.alif_synapticActivityCounter |> device
kfn.alif_synapticWChangeCounter = Array(similar(kfn.alif_wRec) .= -0.99) # -9 for non-sub conn kfn.alif_synapticWChangeCounter = Array(similar(kfn.alif_wRec) .= -0.99) # -9 for non-sub conn
mask = Array((!iszero).(kfn.alif_wRec)) mask = Array((!iszero).(kfn.alif_wRec))
# initial value subscribed conn, synapticInactivityCounter range -10000 to +10000 # initial value subscribed conn, synapticActivityCounter range -10000 to +10000
GeneralUtils.replace_elements!(mask, 1, kfn.alif_synapticWChangeCounter, 1.0) GeneralUtils.replaceElements!(mask, 1, kfn.alif_synapticWChangeCounter, 1.0)
kfn.alif_synapticWChangeCounter = kfn.alif_synapticWChangeCounter |> device kfn.alif_synapticWChangeCounter = kfn.alif_synapticWChangeCounter |> device
kfn.alif_arrayProjection4d = (similar(kfn.alif_wRec) .= 1) kfn.alif_arrayProjection4d = (similar(kfn.alif_wRec) .= 1)
@@ -376,12 +380,12 @@ function kfn_1(params::Dict; device=cpu)
return kfn return kfn
end end
function random_wRec(row, col, n, synapticConnectionNumber) function random_wRec(row, col, n, synapseConnectionNumber)
# subscription # subscription
w = zeros(row, col, n) w = zeros(row, col, n)
for slice in eachslice(w, dims=3) for slice in eachslice(w, dims=3)
pool = shuffle!([1:row*col...])[1:synapticConnectionNumber] pool = shuffle!([1:row*col...])[1:synapseConnectionNumber]
for i in pool for i in pool
slice[i] = rand(0.01:0.01:0.1) # assign weight to synaptic connection. /10 to start small, slice[i] = rand(0.01:0.01:0.1) # assign weight to synaptic connection. /10 to start small,
# otherwise RSNN's vt Usually stay negative (-) # otherwise RSNN's vt Usually stay negative (-)