minor fix
This commit is contained in:
931
previousVersion/0.0.5_25percentAccuracy/Manifest.toml
Normal file
931
previousVersion/0.0.5_25percentAccuracy/Manifest.toml
Normal file
@@ -0,0 +1,931 @@
|
||||
# This file is machine-generated - editing it directly is not advised
|
||||
|
||||
julia_version = "1.9.0"
|
||||
manifest_format = "2.0"
|
||||
project_hash = "b9e7ae4b78dc59a5adb629a04e856c4fedc6fb60"
|
||||
|
||||
[[deps.AbstractFFTs]]
|
||||
deps = ["LinearAlgebra"]
|
||||
git-tree-sha1 = "16b6dbc4cf7caee4e1e75c49485ec67b667098a0"
|
||||
uuid = "621f4979-c628-5d54-868e-fcf4e3e8185c"
|
||||
version = "1.3.1"
|
||||
weakdeps = ["ChainRulesCore"]
|
||||
|
||||
[deps.AbstractFFTs.extensions]
|
||||
AbstractFFTsChainRulesCoreExt = "ChainRulesCore"
|
||||
|
||||
[[deps.Accessors]]
|
||||
deps = ["Compat", "CompositionsBase", "ConstructionBase", "Dates", "InverseFunctions", "LinearAlgebra", "MacroTools", "Requires", "Test"]
|
||||
git-tree-sha1 = "2b301c2388067d655fe5e4ca6d4aa53b61f895b4"
|
||||
uuid = "7d9f7c33-5ae7-4f3b-8dc6-eff91059b697"
|
||||
version = "0.1.31"
|
||||
|
||||
[deps.Accessors.extensions]
|
||||
AccessorsAxisKeysExt = "AxisKeys"
|
||||
AccessorsIntervalSetsExt = "IntervalSets"
|
||||
AccessorsStaticArraysExt = "StaticArrays"
|
||||
AccessorsStructArraysExt = "StructArrays"
|
||||
|
||||
[deps.Accessors.weakdeps]
|
||||
AxisKeys = "94b1ba4f-4ee9-5380-92f1-94cde586c3c5"
|
||||
IntervalSets = "8197267c-284f-5f27-9208-e0e47529a953"
|
||||
StaticArrays = "90137ffa-7385-5640-81b9-e52037218182"
|
||||
StructArrays = "09ab397b-f2b6-538f-b94a-2f83cf4a842a"
|
||||
|
||||
[[deps.Adapt]]
|
||||
deps = ["LinearAlgebra", "Requires"]
|
||||
git-tree-sha1 = "cc37d689f599e8df4f464b2fa3870ff7db7492ef"
|
||||
uuid = "79e6a3ab-5dfb-504d-930d-738a2a938a0e"
|
||||
version = "3.6.1"
|
||||
weakdeps = ["StaticArrays"]
|
||||
|
||||
[deps.Adapt.extensions]
|
||||
AdaptStaticArraysExt = "StaticArrays"
|
||||
|
||||
[[deps.ArgCheck]]
|
||||
git-tree-sha1 = "a3a402a35a2f7e0b87828ccabbd5ebfbebe356b4"
|
||||
uuid = "dce04be8-c92d-5529-be00-80e4d2c0e197"
|
||||
version = "2.3.0"
|
||||
|
||||
[[deps.ArgTools]]
|
||||
uuid = "0dad84c5-d112-42e6-8d28-ef12dabb789f"
|
||||
version = "1.1.1"
|
||||
|
||||
[[deps.Artifacts]]
|
||||
uuid = "56f22d72-fd6d-98f1-02f0-08ddc0907c33"
|
||||
|
||||
[[deps.Atomix]]
|
||||
deps = ["UnsafeAtomics"]
|
||||
git-tree-sha1 = "c06a868224ecba914baa6942988e2f2aade419be"
|
||||
uuid = "a9b6321e-bd34-4604-b9c9-b65b8de01458"
|
||||
version = "0.1.0"
|
||||
|
||||
[[deps.BFloat16s]]
|
||||
deps = ["LinearAlgebra", "Printf", "Random", "Test"]
|
||||
git-tree-sha1 = "dbf84058d0a8cbbadee18d25cf606934b22d7c66"
|
||||
uuid = "ab4f0b2a-ad5b-11e8-123f-65d77653426b"
|
||||
version = "0.4.2"
|
||||
|
||||
[[deps.BangBang]]
|
||||
deps = ["Compat", "ConstructionBase", "InitialValues", "LinearAlgebra", "Requires", "Setfield", "Tables"]
|
||||
git-tree-sha1 = "54b00d1b93791f8e19e31584bd30f2cb6004614b"
|
||||
uuid = "198e06fe-97b7-11e9-32a5-e1d131e6ad66"
|
||||
version = "0.3.38"
|
||||
|
||||
[deps.BangBang.extensions]
|
||||
BangBangChainRulesCoreExt = "ChainRulesCore"
|
||||
BangBangDataFramesExt = "DataFrames"
|
||||
BangBangStaticArraysExt = "StaticArrays"
|
||||
BangBangStructArraysExt = "StructArrays"
|
||||
BangBangTypedTablesExt = "TypedTables"
|
||||
|
||||
[deps.BangBang.weakdeps]
|
||||
ChainRulesCore = "d360d2e6-b24c-11e9-a2a3-2a2ae2dbcce4"
|
||||
DataFrames = "a93c6f00-e57d-5684-b7b6-d8193f3e46c0"
|
||||
StaticArrays = "90137ffa-7385-5640-81b9-e52037218182"
|
||||
StructArrays = "09ab397b-f2b6-538f-b94a-2f83cf4a842a"
|
||||
TypedTables = "9d95f2ec-7b3d-5a63-8d20-e2491e220bb9"
|
||||
|
||||
[[deps.Base64]]
|
||||
uuid = "2a0f44e3-6c83-55bd-87e4-b1978d98bd5f"
|
||||
|
||||
[[deps.Baselet]]
|
||||
git-tree-sha1 = "aebf55e6d7795e02ca500a689d326ac979aaf89e"
|
||||
uuid = "9718e550-a3fa-408a-8086-8db961cd8217"
|
||||
version = "0.1.1"
|
||||
|
||||
[[deps.CEnum]]
|
||||
git-tree-sha1 = "eb4cb44a499229b3b8426dcfb5dd85333951ff90"
|
||||
uuid = "fa961155-64e5-5f13-b03f-caf6b980ea82"
|
||||
version = "0.4.2"
|
||||
|
||||
[[deps.CUDA]]
|
||||
deps = ["AbstractFFTs", "Adapt", "BFloat16s", "CEnum", "CUDA_Driver_jll", "CUDA_Runtime_Discovery", "CUDA_Runtime_jll", "CompilerSupportLibraries_jll", "ExprTools", "GPUArrays", "GPUCompiler", "KernelAbstractions", "LLVM", "LazyArtifacts", "Libdl", "LinearAlgebra", "Logging", "Preferences", "Printf", "Random", "Random123", "RandomNumbers", "Reexport", "Requires", "SparseArrays", "SpecialFunctions", "UnsafeAtomicsLLVM"]
|
||||
git-tree-sha1 = "280893f920654ebfaaaa1999fbd975689051f890"
|
||||
uuid = "052768ef-5323-5732-b1bb-66c8b64840ba"
|
||||
version = "4.2.0"
|
||||
|
||||
[[deps.CUDA_Driver_jll]]
|
||||
deps = ["Artifacts", "JLLWrappers", "LazyArtifacts", "Libdl", "Pkg"]
|
||||
git-tree-sha1 = "498f45593f6ddc0adff64a9310bb6710e851781b"
|
||||
uuid = "4ee394cb-3365-5eb0-8335-949819d2adfc"
|
||||
version = "0.5.0+1"
|
||||
|
||||
[[deps.CUDA_Runtime_Discovery]]
|
||||
deps = ["Libdl"]
|
||||
git-tree-sha1 = "bcc4a23cbbd99c8535a5318455dcf0f2546ec536"
|
||||
uuid = "1af6417a-86b4-443c-805f-a4643ffb695f"
|
||||
version = "0.2.2"
|
||||
|
||||
[[deps.CUDA_Runtime_jll]]
|
||||
deps = ["Artifacts", "CUDA_Driver_jll", "JLLWrappers", "LazyArtifacts", "Libdl", "TOML"]
|
||||
git-tree-sha1 = "5248d9c45712e51e27ba9b30eebec65658c6ce29"
|
||||
uuid = "76a88914-d11a-5bdc-97e0-2f5a05c973a2"
|
||||
version = "0.6.0+0"
|
||||
|
||||
[[deps.CUDNN_jll]]
|
||||
deps = ["Artifacts", "CUDA_Runtime_jll", "JLLWrappers", "LazyArtifacts", "Libdl", "TOML"]
|
||||
git-tree-sha1 = "2918fbffb50e3b7a0b9127617587afa76d4276e8"
|
||||
uuid = "62b44479-cb7b-5706-934f-f13b2eb2e645"
|
||||
version = "8.8.1+0"
|
||||
|
||||
[[deps.Calculus]]
|
||||
deps = ["LinearAlgebra"]
|
||||
git-tree-sha1 = "f641eb0a4f00c343bbc32346e1217b86f3ce9dad"
|
||||
uuid = "49dc2e85-a5d0-5ad3-a950-438e2897f1b9"
|
||||
version = "0.5.1"
|
||||
|
||||
[[deps.ChainRules]]
|
||||
deps = ["Adapt", "ChainRulesCore", "Compat", "Distributed", "GPUArraysCore", "IrrationalConstants", "LinearAlgebra", "Random", "RealDot", "SparseArrays", "Statistics", "StructArrays"]
|
||||
git-tree-sha1 = "8bae903893aeeb429cf732cf1888490b93ecf265"
|
||||
uuid = "082447d4-558c-5d27-93f4-14fc19e9eca2"
|
||||
version = "1.49.0"
|
||||
|
||||
[[deps.ChainRulesCore]]
|
||||
deps = ["Compat", "LinearAlgebra", "SparseArrays"]
|
||||
git-tree-sha1 = "e30f2f4e20f7f186dc36529910beaedc60cfa644"
|
||||
uuid = "d360d2e6-b24c-11e9-a2a3-2a2ae2dbcce4"
|
||||
version = "1.16.0"
|
||||
|
||||
[[deps.CommonSubexpressions]]
|
||||
deps = ["MacroTools", "Test"]
|
||||
git-tree-sha1 = "7b8a93dba8af7e3b42fecabf646260105ac373f7"
|
||||
uuid = "bbf7d656-a473-5ed7-a52c-81e309532950"
|
||||
version = "0.3.0"
|
||||
|
||||
[[deps.Compat]]
|
||||
deps = ["UUIDs"]
|
||||
git-tree-sha1 = "7a60c856b9fa189eb34f5f8a6f6b5529b7942957"
|
||||
uuid = "34da2185-b29b-5c13-b0c7-acf172513d20"
|
||||
version = "4.6.1"
|
||||
weakdeps = ["Dates", "LinearAlgebra"]
|
||||
|
||||
[deps.Compat.extensions]
|
||||
CompatLinearAlgebraExt = "LinearAlgebra"
|
||||
|
||||
[[deps.CompilerSupportLibraries_jll]]
|
||||
deps = ["Artifacts", "Libdl"]
|
||||
uuid = "e66e0078-7015-5450-92f7-15fbd957f2ae"
|
||||
version = "1.0.2+0"
|
||||
|
||||
[[deps.CompositionsBase]]
|
||||
git-tree-sha1 = "802bb88cd69dfd1509f6670416bd4434015693ad"
|
||||
uuid = "a33af91c-f02d-484b-be07-31d278c5ca2b"
|
||||
version = "0.1.2"
|
||||
weakdeps = ["InverseFunctions"]
|
||||
|
||||
[deps.CompositionsBase.extensions]
|
||||
CompositionsBaseInverseFunctionsExt = "InverseFunctions"
|
||||
|
||||
[[deps.ConstructionBase]]
|
||||
deps = ["LinearAlgebra"]
|
||||
git-tree-sha1 = "738fec4d684a9a6ee9598a8bfee305b26831f28c"
|
||||
uuid = "187b0558-2788-49d3-abe0-74a17ed4e7c9"
|
||||
version = "1.5.2"
|
||||
|
||||
[deps.ConstructionBase.extensions]
|
||||
ConstructionBaseIntervalSetsExt = "IntervalSets"
|
||||
ConstructionBaseStaticArraysExt = "StaticArrays"
|
||||
|
||||
[deps.ConstructionBase.weakdeps]
|
||||
IntervalSets = "8197267c-284f-5f27-9208-e0e47529a953"
|
||||
StaticArrays = "90137ffa-7385-5640-81b9-e52037218182"
|
||||
|
||||
[[deps.ContextVariablesX]]
|
||||
deps = ["Compat", "Logging", "UUIDs"]
|
||||
git-tree-sha1 = "25cc3803f1030ab855e383129dcd3dc294e322cc"
|
||||
uuid = "6add18c4-b38d-439d-96f6-d6bc489c04c5"
|
||||
version = "0.1.3"
|
||||
|
||||
[[deps.DataAPI]]
|
||||
git-tree-sha1 = "e8119c1a33d267e16108be441a287a6981ba1630"
|
||||
uuid = "9a962f9c-6df0-11e9-0e5d-c546b8b5ee8a"
|
||||
version = "1.14.0"
|
||||
|
||||
[[deps.DataStructures]]
|
||||
deps = ["Compat", "InteractiveUtils", "OrderedCollections"]
|
||||
git-tree-sha1 = "d1fff3a548102f48987a52a2e0d114fa97d730f0"
|
||||
uuid = "864edb3b-99cc-5e75-8d2d-829cb0a9cfe8"
|
||||
version = "0.18.13"
|
||||
|
||||
[[deps.DataValueInterfaces]]
|
||||
git-tree-sha1 = "bfc1187b79289637fa0ef6d4436ebdfe6905cbd6"
|
||||
uuid = "e2d170a0-9d28-54be-80f0-106bbe20a464"
|
||||
version = "1.0.0"
|
||||
|
||||
[[deps.Dates]]
|
||||
deps = ["Printf"]
|
||||
uuid = "ade2ca70-3891-5945-98fb-dc099432e06a"
|
||||
|
||||
[[deps.DefineSingletons]]
|
||||
git-tree-sha1 = "0fba8b706d0178b4dc7fd44a96a92382c9065c2c"
|
||||
uuid = "244e2a9f-e319-4986-a169-4d1fe445cd52"
|
||||
version = "0.1.2"
|
||||
|
||||
[[deps.DelimitedFiles]]
|
||||
deps = ["Mmap"]
|
||||
git-tree-sha1 = "9e2f36d3c96a820c678f2f1f1782582fcf685bae"
|
||||
uuid = "8bb1440f-4735-579b-a4ab-409b98df4dab"
|
||||
version = "1.9.1"
|
||||
|
||||
[[deps.DiffResults]]
|
||||
deps = ["StaticArraysCore"]
|
||||
git-tree-sha1 = "782dd5f4561f5d267313f23853baaaa4c52ea621"
|
||||
uuid = "163ba53b-c6d8-5494-b064-1a9d43ac40c5"
|
||||
version = "1.1.0"
|
||||
|
||||
[[deps.DiffRules]]
|
||||
deps = ["IrrationalConstants", "LogExpFunctions", "NaNMath", "Random", "SpecialFunctions"]
|
||||
git-tree-sha1 = "a4ad7ef19d2cdc2eff57abbbe68032b1cd0bd8f8"
|
||||
uuid = "b552c78f-8df3-52c6-915a-8e097449b14b"
|
||||
version = "1.13.0"
|
||||
|
||||
[[deps.Distributed]]
|
||||
deps = ["Random", "Serialization", "Sockets"]
|
||||
uuid = "8ba89e20-285c-5b6f-9357-94700520ee1b"
|
||||
|
||||
[[deps.Distributions]]
|
||||
deps = ["FillArrays", "LinearAlgebra", "PDMats", "Printf", "QuadGK", "Random", "SparseArrays", "SpecialFunctions", "Statistics", "StatsAPI", "StatsBase", "StatsFuns", "Test"]
|
||||
git-tree-sha1 = "eead66061583b6807652281c0fbf291d7a9dc497"
|
||||
uuid = "31c24e10-a181-5473-b8eb-7969acd0382f"
|
||||
version = "0.25.90"
|
||||
|
||||
[deps.Distributions.extensions]
|
||||
DistributionsChainRulesCoreExt = "ChainRulesCore"
|
||||
DistributionsDensityInterfaceExt = "DensityInterface"
|
||||
|
||||
[deps.Distributions.weakdeps]
|
||||
ChainRulesCore = "d360d2e6-b24c-11e9-a2a3-2a2ae2dbcce4"
|
||||
DensityInterface = "b429d917-457f-4dbc-8f4c-0cc954292b1d"
|
||||
|
||||
[[deps.DocStringExtensions]]
|
||||
deps = ["LibGit2"]
|
||||
git-tree-sha1 = "2fb1e02f2b635d0845df5d7c167fec4dd739b00d"
|
||||
uuid = "ffbed154-4ef7-542d-bbb7-c09d3a79fcae"
|
||||
version = "0.9.3"
|
||||
|
||||
[[deps.Downloads]]
|
||||
deps = ["ArgTools", "FileWatching", "LibCURL", "NetworkOptions"]
|
||||
uuid = "f43a241f-c20a-4ad4-852c-f6b1247861c6"
|
||||
version = "1.6.0"
|
||||
|
||||
[[deps.DualNumbers]]
|
||||
deps = ["Calculus", "NaNMath", "SpecialFunctions"]
|
||||
git-tree-sha1 = "5837a837389fccf076445fce071c8ddaea35a566"
|
||||
uuid = "fa6b7ba4-c1ee-5f82-b5fc-ecf0adba8f74"
|
||||
version = "0.6.8"
|
||||
|
||||
[[deps.ExprTools]]
|
||||
git-tree-sha1 = "c1d06d129da9f55715c6c212866f5b1bddc5fa00"
|
||||
uuid = "e2ba6199-217a-4e67-a87a-7c52f15ade04"
|
||||
version = "0.1.9"
|
||||
|
||||
[[deps.FLoops]]
|
||||
deps = ["BangBang", "Compat", "FLoopsBase", "InitialValues", "JuliaVariables", "MLStyle", "Serialization", "Setfield", "Transducers"]
|
||||
git-tree-sha1 = "ffb97765602e3cbe59a0589d237bf07f245a8576"
|
||||
uuid = "cc61a311-1640-44b5-9fba-1b764f453329"
|
||||
version = "0.2.1"
|
||||
|
||||
[[deps.FLoopsBase]]
|
||||
deps = ["ContextVariablesX"]
|
||||
git-tree-sha1 = "656f7a6859be8673bf1f35da5670246b923964f7"
|
||||
uuid = "b9860ae5-e623-471e-878b-f6a53c775ea6"
|
||||
version = "0.1.1"
|
||||
|
||||
[[deps.FileWatching]]
|
||||
uuid = "7b1f6079-737a-58dc-b8bc-7a2ca5c1b5ee"
|
||||
|
||||
[[deps.FillArrays]]
|
||||
deps = ["LinearAlgebra", "Random", "SparseArrays", "Statistics"]
|
||||
git-tree-sha1 = "fc86b4fd3eff76c3ce4f5e96e2fdfa6282722885"
|
||||
uuid = "1a297f60-69ca-5386-bcde-b61e274b549b"
|
||||
version = "1.0.0"
|
||||
|
||||
[[deps.Flux]]
|
||||
deps = ["Adapt", "CUDA", "ChainRulesCore", "Functors", "LinearAlgebra", "MLUtils", "MacroTools", "NNlib", "NNlibCUDA", "OneHotArrays", "Optimisers", "Preferences", "ProgressLogging", "Random", "Reexport", "SparseArrays", "SpecialFunctions", "Statistics", "Zygote", "cuDNN"]
|
||||
git-tree-sha1 = "64005071944bae14fc145661f617eb68b339189c"
|
||||
uuid = "587475ba-b771-5e3f-ad9e-33799f191a9c"
|
||||
version = "0.13.16"
|
||||
|
||||
[deps.Flux.extensions]
|
||||
AMDGPUExt = "AMDGPU"
|
||||
|
||||
[deps.Flux.weakdeps]
|
||||
AMDGPU = "21141c5a-9bdb-4563-92ae-f87d6854732e"
|
||||
|
||||
[[deps.FoldsThreads]]
|
||||
deps = ["Accessors", "FunctionWrappers", "InitialValues", "SplittablesBase", "Transducers"]
|
||||
git-tree-sha1 = "eb8e1989b9028f7e0985b4268dabe94682249025"
|
||||
uuid = "9c68100b-dfe1-47cf-94c8-95104e173443"
|
||||
version = "0.1.1"
|
||||
|
||||
[[deps.ForwardDiff]]
|
||||
deps = ["CommonSubexpressions", "DiffResults", "DiffRules", "LinearAlgebra", "LogExpFunctions", "NaNMath", "Preferences", "Printf", "Random", "SpecialFunctions"]
|
||||
git-tree-sha1 = "00e252f4d706b3d55a8863432e742bf5717b498d"
|
||||
uuid = "f6369f11-7733-5829-9624-2563aa707210"
|
||||
version = "0.10.35"
|
||||
weakdeps = ["StaticArrays"]
|
||||
|
||||
[deps.ForwardDiff.extensions]
|
||||
ForwardDiffStaticArraysExt = "StaticArrays"
|
||||
|
||||
[[deps.FunctionWrappers]]
|
||||
git-tree-sha1 = "d62485945ce5ae9c0c48f124a84998d755bae00e"
|
||||
uuid = "069b7b12-0de2-55c6-9aab-29f3d0a68a2e"
|
||||
version = "1.1.3"
|
||||
|
||||
[[deps.Functors]]
|
||||
deps = ["LinearAlgebra"]
|
||||
git-tree-sha1 = "478f8c3145bb91d82c2cf20433e8c1b30df454cc"
|
||||
uuid = "d9f16b24-f501-4c13-a1f2-28368ffc5196"
|
||||
version = "0.4.4"
|
||||
|
||||
[[deps.Future]]
|
||||
deps = ["Random"]
|
||||
uuid = "9fa8497b-333b-5362-9e8d-4d0656e87820"
|
||||
|
||||
[[deps.GPUArrays]]
|
||||
deps = ["Adapt", "GPUArraysCore", "LLVM", "LinearAlgebra", "Printf", "Random", "Reexport", "Serialization", "Statistics"]
|
||||
git-tree-sha1 = "9ade6983c3dbbd492cf5729f865fe030d1541463"
|
||||
uuid = "0c68f7d7-f131-5f86-a1c3-88cf8149b2d7"
|
||||
version = "8.6.6"
|
||||
|
||||
[[deps.GPUArraysCore]]
|
||||
deps = ["Adapt"]
|
||||
git-tree-sha1 = "1cd7f0af1aa58abc02ea1d872953a97359cb87fa"
|
||||
uuid = "46192b85-c4d5-4398-a991-12ede77f4527"
|
||||
version = "0.1.4"
|
||||
|
||||
[[deps.GPUCompiler]]
|
||||
deps = ["ExprTools", "InteractiveUtils", "LLVM", "Libdl", "Logging", "Scratch", "TimerOutputs", "UUIDs"]
|
||||
git-tree-sha1 = "5737dc242dadd392d934ee330c69ceff47f0259c"
|
||||
uuid = "61eb1bfa-7361-4325-ad38-22787b887f55"
|
||||
version = "0.19.4"
|
||||
|
||||
[[deps.GeneralUtils]]
|
||||
deps = ["DataStructures", "Distributions", "JSON3"]
|
||||
path = "C:\\Users\\naraw\\.julia\\dev\\GeneralUtils"
|
||||
uuid = "c6c72f09-b708-4ac8-ac7c-2084d70108fe"
|
||||
version = "0.1.0"
|
||||
|
||||
[[deps.HypergeometricFunctions]]
|
||||
deps = ["DualNumbers", "LinearAlgebra", "OpenLibm_jll", "SpecialFunctions"]
|
||||
git-tree-sha1 = "84204eae2dd237500835990bcade263e27674a93"
|
||||
uuid = "34004b35-14d8-5ef3-9330-4cdb6864b03a"
|
||||
version = "0.3.16"
|
||||
|
||||
[[deps.IRTools]]
|
||||
deps = ["InteractiveUtils", "MacroTools", "Test"]
|
||||
git-tree-sha1 = "eac00994ce3229a464c2847e956d77a2c64ad3a5"
|
||||
uuid = "7869d1d1-7146-5819-86e3-90919afe41df"
|
||||
version = "0.4.10"
|
||||
|
||||
[[deps.InitialValues]]
|
||||
git-tree-sha1 = "4da0f88e9a39111c2fa3add390ab15f3a44f3ca3"
|
||||
uuid = "22cec73e-a1b8-11e9-2c92-598750a2cf9c"
|
||||
version = "0.3.1"
|
||||
|
||||
[[deps.InteractiveUtils]]
|
||||
deps = ["Markdown"]
|
||||
uuid = "b77e0a4c-d291-57a0-90e8-8db25a27a240"
|
||||
|
||||
[[deps.InverseFunctions]]
|
||||
deps = ["Test"]
|
||||
git-tree-sha1 = "6667aadd1cdee2c6cd068128b3d226ebc4fb0c67"
|
||||
uuid = "3587e190-3f89-42d0-90ee-14403ec27112"
|
||||
version = "0.1.9"
|
||||
|
||||
[[deps.IrrationalConstants]]
|
||||
git-tree-sha1 = "630b497eafcc20001bba38a4651b327dcfc491d2"
|
||||
uuid = "92d709cd-6900-40b7-9082-c6be49f344b6"
|
||||
version = "0.2.2"
|
||||
|
||||
[[deps.IteratorInterfaceExtensions]]
|
||||
git-tree-sha1 = "a3f24677c21f5bbe9d2a714f95dcd58337fb2856"
|
||||
uuid = "82899510-4779-5014-852e-03e436cf321d"
|
||||
version = "1.0.0"
|
||||
|
||||
[[deps.JLLWrappers]]
|
||||
deps = ["Preferences"]
|
||||
git-tree-sha1 = "abc9885a7ca2052a736a600f7fa66209f96506e1"
|
||||
uuid = "692b3bcd-3c85-4b1f-b108-f13ce0eb3210"
|
||||
version = "1.4.1"
|
||||
|
||||
[[deps.JSON3]]
|
||||
deps = ["Dates", "Mmap", "Parsers", "SnoopPrecompile", "StructTypes", "UUIDs"]
|
||||
git-tree-sha1 = "84b10656a41ef564c39d2d477d7236966d2b5683"
|
||||
uuid = "0f8b85d8-7281-11e9-16c2-39a750bddbf1"
|
||||
version = "1.12.0"
|
||||
|
||||
[[deps.JuliaVariables]]
|
||||
deps = ["MLStyle", "NameResolution"]
|
||||
git-tree-sha1 = "49fb3cb53362ddadb4415e9b73926d6b40709e70"
|
||||
uuid = "b14d175d-62b4-44ba-8fb7-3064adc8c3ec"
|
||||
version = "0.2.4"
|
||||
|
||||
[[deps.KernelAbstractions]]
|
||||
deps = ["Adapt", "Atomix", "InteractiveUtils", "LinearAlgebra", "MacroTools", "PrecompileTools", "SparseArrays", "StaticArrays", "UUIDs", "UnsafeAtomics", "UnsafeAtomicsLLVM"]
|
||||
git-tree-sha1 = "47be64f040a7ece575c2b5f53ca6da7b548d69f4"
|
||||
uuid = "63c18a36-062a-441e-b654-da1e3ab1ce7c"
|
||||
version = "0.9.4"
|
||||
|
||||
[[deps.LLVM]]
|
||||
deps = ["CEnum", "LLVMExtra_jll", "Libdl", "Printf", "Unicode"]
|
||||
git-tree-sha1 = "a8960cae30b42b66dd41808beb76490519f6f9e2"
|
||||
uuid = "929cbde3-209d-540e-8aea-75f648917ca0"
|
||||
version = "5.0.0"
|
||||
|
||||
[[deps.LLVMExtra_jll]]
|
||||
deps = ["Artifacts", "JLLWrappers", "LazyArtifacts", "Libdl", "TOML"]
|
||||
git-tree-sha1 = "09b7505cc0b1cee87e5d4a26eea61d2e1b0dcd35"
|
||||
uuid = "dad2f222-ce93-54a1-a47d-0025e8a3acab"
|
||||
version = "0.0.21+0"
|
||||
|
||||
[[deps.LazyArtifacts]]
|
||||
deps = ["Artifacts", "Pkg"]
|
||||
uuid = "4af54fe1-eca0-43a8-85a7-787d91b784e3"
|
||||
|
||||
[[deps.LibCURL]]
|
||||
deps = ["LibCURL_jll", "MozillaCACerts_jll"]
|
||||
uuid = "b27032c2-a3e7-50c8-80cd-2d36dbcbfd21"
|
||||
version = "0.6.3"
|
||||
|
||||
[[deps.LibCURL_jll]]
|
||||
deps = ["Artifacts", "LibSSH2_jll", "Libdl", "MbedTLS_jll", "Zlib_jll", "nghttp2_jll"]
|
||||
uuid = "deac9b47-8bc7-5906-a0fe-35ac56dc84c0"
|
||||
version = "7.84.0+0"
|
||||
|
||||
[[deps.LibGit2]]
|
||||
deps = ["Base64", "NetworkOptions", "Printf", "SHA"]
|
||||
uuid = "76f85450-5226-5b5a-8eaa-529ad045b433"
|
||||
|
||||
[[deps.LibSSH2_jll]]
|
||||
deps = ["Artifacts", "Libdl", "MbedTLS_jll"]
|
||||
uuid = "29816b5a-b9ab-546f-933c-edad1886dfa8"
|
||||
version = "1.10.2+0"
|
||||
|
||||
[[deps.Libdl]]
|
||||
uuid = "8f399da3-3557-5675-b5ff-fb832c97cbdb"
|
||||
|
||||
[[deps.LinearAlgebra]]
|
||||
deps = ["Libdl", "OpenBLAS_jll", "libblastrampoline_jll"]
|
||||
uuid = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e"
|
||||
|
||||
[[deps.LogExpFunctions]]
|
||||
deps = ["DocStringExtensions", "IrrationalConstants", "LinearAlgebra"]
|
||||
git-tree-sha1 = "0a1b7c2863e44523180fdb3146534e265a91870b"
|
||||
uuid = "2ab3a3ac-af41-5b50-aa03-7779005ae688"
|
||||
version = "0.3.23"
|
||||
|
||||
[deps.LogExpFunctions.extensions]
|
||||
LogExpFunctionsChainRulesCoreExt = "ChainRulesCore"
|
||||
LogExpFunctionsChangesOfVariablesExt = "ChangesOfVariables"
|
||||
LogExpFunctionsInverseFunctionsExt = "InverseFunctions"
|
||||
|
||||
[deps.LogExpFunctions.weakdeps]
|
||||
ChainRulesCore = "d360d2e6-b24c-11e9-a2a3-2a2ae2dbcce4"
|
||||
ChangesOfVariables = "9e997f8a-9a97-42d5-a9f1-ce6bfc15e2c0"
|
||||
InverseFunctions = "3587e190-3f89-42d0-90ee-14403ec27112"
|
||||
|
||||
[[deps.Logging]]
|
||||
uuid = "56ddb016-857b-54e1-b83d-db4d58db5568"
|
||||
|
||||
[[deps.MLStyle]]
|
||||
git-tree-sha1 = "bc38dff0548128765760c79eb7388a4b37fae2c8"
|
||||
uuid = "d8e11817-5142-5d16-987a-aa16d5891078"
|
||||
version = "0.4.17"
|
||||
|
||||
[[deps.MLUtils]]
|
||||
deps = ["ChainRulesCore", "Compat", "DataAPI", "DelimitedFiles", "FLoops", "FoldsThreads", "NNlib", "Random", "ShowCases", "SimpleTraits", "Statistics", "StatsBase", "Tables", "Transducers"]
|
||||
git-tree-sha1 = "ca31739905ddb08c59758726e22b9e25d0d1521b"
|
||||
uuid = "f1d291b0-491e-4a28-83b9-f70985020b54"
|
||||
version = "0.4.2"
|
||||
|
||||
[[deps.MacroTools]]
|
||||
deps = ["Markdown", "Random"]
|
||||
git-tree-sha1 = "42324d08725e200c23d4dfb549e0d5d89dede2d2"
|
||||
uuid = "1914dd2f-81c6-5fcd-8719-6d5c9610ff09"
|
||||
version = "0.5.10"
|
||||
|
||||
[[deps.Markdown]]
|
||||
deps = ["Base64"]
|
||||
uuid = "d6f4376e-aef5-505a-96c1-9c027394607a"
|
||||
|
||||
[[deps.MbedTLS_jll]]
|
||||
deps = ["Artifacts", "Libdl"]
|
||||
uuid = "c8ffd9c3-330d-5841-b78e-0817d7145fa1"
|
||||
version = "2.28.2+0"
|
||||
|
||||
[[deps.MicroCollections]]
|
||||
deps = ["BangBang", "InitialValues", "Setfield"]
|
||||
git-tree-sha1 = "629afd7d10dbc6935ec59b32daeb33bc4460a42e"
|
||||
uuid = "128add7d-3638-4c79-886c-908ea0c25c34"
|
||||
version = "0.1.4"
|
||||
|
||||
[[deps.Missings]]
|
||||
deps = ["DataAPI"]
|
||||
git-tree-sha1 = "f66bdc5de519e8f8ae43bdc598782d35a25b1272"
|
||||
uuid = "e1d29d7a-bbdc-5cf2-9ac0-f12de2c33e28"
|
||||
version = "1.1.0"
|
||||
|
||||
[[deps.Mmap]]
|
||||
uuid = "a63ad114-7e13-5084-954f-fe012c677804"
|
||||
|
||||
[[deps.MozillaCACerts_jll]]
|
||||
uuid = "14a3606d-f60d-562e-9121-12d972cd8159"
|
||||
version = "2022.10.11"
|
||||
|
||||
[[deps.NNlib]]
|
||||
deps = ["Adapt", "Atomix", "ChainRulesCore", "GPUArraysCore", "KernelAbstractions", "LinearAlgebra", "Pkg", "Random", "Requires", "Statistics"]
|
||||
git-tree-sha1 = "99e6dbb50d8a96702dc60954569e9fe7291cc55d"
|
||||
uuid = "872c559c-99b0-510c-b3b7-b6c96a88d5cd"
|
||||
version = "0.8.20"
|
||||
|
||||
[deps.NNlib.extensions]
|
||||
NNlibAMDGPUExt = "AMDGPU"
|
||||
|
||||
[deps.NNlib.weakdeps]
|
||||
AMDGPU = "21141c5a-9bdb-4563-92ae-f87d6854732e"
|
||||
|
||||
[[deps.NNlibCUDA]]
|
||||
deps = ["Adapt", "CUDA", "LinearAlgebra", "NNlib", "Random", "Statistics", "cuDNN"]
|
||||
git-tree-sha1 = "f94a9684394ff0d325cc12b06da7032d8be01aaf"
|
||||
uuid = "a00861dc-f156-4864-bf3c-e6376f28a68d"
|
||||
version = "0.2.7"
|
||||
|
||||
[[deps.NaNMath]]
|
||||
deps = ["OpenLibm_jll"]
|
||||
git-tree-sha1 = "0877504529a3e5c3343c6f8b4c0381e57e4387e4"
|
||||
uuid = "77ba4419-2d1f-58cd-9bb1-8ffee604a2e3"
|
||||
version = "1.0.2"
|
||||
|
||||
[[deps.NameResolution]]
|
||||
deps = ["PrettyPrint"]
|
||||
git-tree-sha1 = "1a0fa0e9613f46c9b8c11eee38ebb4f590013c5e"
|
||||
uuid = "71a1bf82-56d0-4bbc-8a3c-48b961074391"
|
||||
version = "0.1.5"
|
||||
|
||||
[[deps.NetworkOptions]]
|
||||
uuid = "ca575930-c2e3-43a9-ace4-1e988b2c1908"
|
||||
version = "1.2.0"
|
||||
|
||||
[[deps.OneHotArrays]]
|
||||
deps = ["Adapt", "ChainRulesCore", "Compat", "GPUArraysCore", "LinearAlgebra", "NNlib"]
|
||||
git-tree-sha1 = "f511fca956ed9e70b80cd3417bb8c2dde4b68644"
|
||||
uuid = "0b1bfda6-eb8a-41d2-88d8-f5af5cad476f"
|
||||
version = "0.2.3"
|
||||
|
||||
[[deps.OpenBLAS_jll]]
|
||||
deps = ["Artifacts", "CompilerSupportLibraries_jll", "Libdl"]
|
||||
uuid = "4536629a-c528-5b80-bd46-f80d51c5b363"
|
||||
version = "0.3.21+4"
|
||||
|
||||
[[deps.OpenLibm_jll]]
|
||||
deps = ["Artifacts", "Libdl"]
|
||||
uuid = "05823500-19ac-5b8b-9628-191a04bc5112"
|
||||
version = "0.8.1+0"
|
||||
|
||||
[[deps.OpenSpecFun_jll]]
|
||||
deps = ["Artifacts", "CompilerSupportLibraries_jll", "JLLWrappers", "Libdl", "Pkg"]
|
||||
git-tree-sha1 = "13652491f6856acfd2db29360e1bbcd4565d04f1"
|
||||
uuid = "efe28fd5-8261-553b-a9e1-b2916fc3738e"
|
||||
version = "0.5.5+0"
|
||||
|
||||
[[deps.Optimisers]]
|
||||
deps = ["ChainRulesCore", "Functors", "LinearAlgebra", "Random", "Statistics"]
|
||||
git-tree-sha1 = "6a01f65dd8583dee82eecc2a19b0ff21521aa749"
|
||||
uuid = "3bd65402-5787-11e9-1adc-39752487f4e2"
|
||||
version = "0.2.18"
|
||||
|
||||
[[deps.OrderedCollections]]
|
||||
git-tree-sha1 = "d321bf2de576bf25ec4d3e4360faca399afca282"
|
||||
uuid = "bac558e1-5e72-5ebc-8fee-abe8a469f55d"
|
||||
version = "1.6.0"
|
||||
|
||||
[[deps.PDMats]]
|
||||
deps = ["LinearAlgebra", "SparseArrays", "SuiteSparse"]
|
||||
git-tree-sha1 = "67eae2738d63117a196f497d7db789821bce61d1"
|
||||
uuid = "90014a1f-27ba-587c-ab20-58faa44d9150"
|
||||
version = "0.11.17"
|
||||
|
||||
[[deps.Parsers]]
|
||||
deps = ["Dates", "SnoopPrecompile"]
|
||||
git-tree-sha1 = "478ac6c952fddd4399e71d4779797c538d0ff2bf"
|
||||
uuid = "69de0a69-1ddd-5017-9359-2bf0b02dc9f0"
|
||||
version = "2.5.8"
|
||||
|
||||
[[deps.Pkg]]
|
||||
deps = ["Artifacts", "Dates", "Downloads", "FileWatching", "LibGit2", "Libdl", "Logging", "Markdown", "Printf", "REPL", "Random", "SHA", "Serialization", "TOML", "Tar", "UUIDs", "p7zip_jll"]
|
||||
uuid = "44cfe95a-1eb2-52ea-b672-e2afdf69b78f"
|
||||
version = "1.9.0"
|
||||
|
||||
[[deps.PrecompileTools]]
|
||||
deps = ["Preferences"]
|
||||
git-tree-sha1 = "259e206946c293698122f63e2b513a7c99a244e8"
|
||||
uuid = "aea7be01-6a6a-4083-8856-8a6e6704d82a"
|
||||
version = "1.1.1"
|
||||
|
||||
[[deps.Preferences]]
|
||||
deps = ["TOML"]
|
||||
git-tree-sha1 = "7eb1686b4f04b82f96ed7a4ea5890a4f0c7a09f1"
|
||||
uuid = "21216c6a-2e73-6563-6e65-726566657250"
|
||||
version = "1.4.0"
|
||||
|
||||
[[deps.PrettyPrint]]
|
||||
git-tree-sha1 = "632eb4abab3449ab30c5e1afaa874f0b98b586e4"
|
||||
uuid = "8162dcfd-2161-5ef2-ae6c-7681170c5f98"
|
||||
version = "0.2.0"
|
||||
|
||||
[[deps.Printf]]
|
||||
deps = ["Unicode"]
|
||||
uuid = "de0858da-6303-5e67-8744-51eddeeeb8d7"
|
||||
|
||||
[[deps.ProgressLogging]]
|
||||
deps = ["Logging", "SHA", "UUIDs"]
|
||||
git-tree-sha1 = "80d919dee55b9c50e8d9e2da5eeafff3fe58b539"
|
||||
uuid = "33c8b6b6-d38a-422a-b730-caa89a2f386c"
|
||||
version = "0.1.4"
|
||||
|
||||
[[deps.QuadGK]]
|
||||
deps = ["DataStructures", "LinearAlgebra"]
|
||||
git-tree-sha1 = "6ec7ac8412e83d57e313393220879ede1740f9ee"
|
||||
uuid = "1fd47b50-473d-5c70-9696-f719f8f3bcdc"
|
||||
version = "2.8.2"
|
||||
|
||||
[[deps.REPL]]
|
||||
deps = ["InteractiveUtils", "Markdown", "Sockets", "Unicode"]
|
||||
uuid = "3fa0cd96-eef1-5676-8a61-b3b8758bbffb"
|
||||
|
||||
[[deps.Random]]
|
||||
deps = ["SHA", "Serialization"]
|
||||
uuid = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c"
|
||||
|
||||
[[deps.Random123]]
|
||||
deps = ["Random", "RandomNumbers"]
|
||||
git-tree-sha1 = "552f30e847641591ba3f39fd1bed559b9deb0ef3"
|
||||
uuid = "74087812-796a-5b5d-8853-05524746bad3"
|
||||
version = "1.6.1"
|
||||
|
||||
[[deps.RandomNumbers]]
|
||||
deps = ["Random", "Requires"]
|
||||
git-tree-sha1 = "043da614cc7e95c703498a491e2c21f58a2b8111"
|
||||
uuid = "e6cf234a-135c-5ec9-84dd-332b85af5143"
|
||||
version = "1.5.3"
|
||||
|
||||
[[deps.RealDot]]
|
||||
deps = ["LinearAlgebra"]
|
||||
git-tree-sha1 = "9f0a1b71baaf7650f4fa8a1d168c7fb6ee41f0c9"
|
||||
uuid = "c1ae055f-0cd5-4b69-90a6-9a35b1a98df9"
|
||||
version = "0.1.0"
|
||||
|
||||
[[deps.Reexport]]
|
||||
git-tree-sha1 = "45e428421666073eab6f2da5c9d310d99bb12f9b"
|
||||
uuid = "189a3867-3050-52da-a836-e630ba90ab69"
|
||||
version = "1.2.2"
|
||||
|
||||
[[deps.Requires]]
|
||||
deps = ["UUIDs"]
|
||||
git-tree-sha1 = "838a3a4188e2ded87a4f9f184b4b0d78a1e91cb7"
|
||||
uuid = "ae029012-a4dd-5104-9daa-d747884805df"
|
||||
version = "1.3.0"
|
||||
|
||||
[[deps.Rmath]]
|
||||
deps = ["Random", "Rmath_jll"]
|
||||
git-tree-sha1 = "f65dcb5fa46aee0cf9ed6274ccbd597adc49aa7b"
|
||||
uuid = "79098fc4-a85e-5d69-aa6a-4863f24498fa"
|
||||
version = "0.7.1"
|
||||
|
||||
[[deps.Rmath_jll]]
|
||||
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"]
|
||||
git-tree-sha1 = "6ed52fdd3382cf21947b15e8870ac0ddbff736da"
|
||||
uuid = "f50d1b31-88e8-58de-be2c-1cc44531875f"
|
||||
version = "0.4.0+0"
|
||||
|
||||
[[deps.SHA]]
|
||||
uuid = "ea8e919c-243c-51af-8825-aaa63cd721ce"
|
||||
version = "0.7.0"
|
||||
|
||||
[[deps.Scratch]]
|
||||
deps = ["Dates"]
|
||||
git-tree-sha1 = "30449ee12237627992a99d5e30ae63e4d78cd24a"
|
||||
uuid = "6c6a2e73-6563-6170-7368-637461726353"
|
||||
version = "1.2.0"
|
||||
|
||||
[[deps.Serialization]]
|
||||
uuid = "9e88b42a-f829-5b0c-bbe9-9e923198166b"
|
||||
|
||||
[[deps.Setfield]]
|
||||
deps = ["ConstructionBase", "Future", "MacroTools", "StaticArraysCore"]
|
||||
git-tree-sha1 = "e2cc6d8c88613c05e1defb55170bf5ff211fbeac"
|
||||
uuid = "efcf1570-3423-57d1-acb7-fd33fddbac46"
|
||||
version = "1.1.1"
|
||||
|
||||
[[deps.ShowCases]]
|
||||
git-tree-sha1 = "7f534ad62ab2bd48591bdeac81994ea8c445e4a5"
|
||||
uuid = "605ecd9f-84a6-4c9e-81e2-4798472b76a3"
|
||||
version = "0.1.0"
|
||||
|
||||
[[deps.SimpleTraits]]
|
||||
deps = ["InteractiveUtils", "MacroTools"]
|
||||
git-tree-sha1 = "5d7e3f4e11935503d3ecaf7186eac40602e7d231"
|
||||
uuid = "699a6c99-e7fa-54fc-8d76-47d257e15c1d"
|
||||
version = "0.9.4"
|
||||
|
||||
[[deps.SnoopPrecompile]]
|
||||
deps = ["Preferences"]
|
||||
git-tree-sha1 = "e760a70afdcd461cf01a575947738d359234665c"
|
||||
uuid = "66db9d55-30c0-4569-8b51-7e840670fc0c"
|
||||
version = "1.0.3"
|
||||
|
||||
[[deps.Sockets]]
|
||||
uuid = "6462fe0b-24de-5631-8697-dd941f90decc"
|
||||
|
||||
[[deps.SortingAlgorithms]]
|
||||
deps = ["DataStructures"]
|
||||
git-tree-sha1 = "a4ada03f999bd01b3a25dcaa30b2d929fe537e00"
|
||||
uuid = "a2af1166-a08f-5f64-846c-94a0d3cef48c"
|
||||
version = "1.1.0"
|
||||
|
||||
[[deps.SparseArrays]]
|
||||
deps = ["Libdl", "LinearAlgebra", "Random", "Serialization", "SuiteSparse_jll"]
|
||||
uuid = "2f01184e-e22b-5df5-ae63-d93ebab69eaf"
|
||||
|
||||
[[deps.SpecialFunctions]]
|
||||
deps = ["IrrationalConstants", "LogExpFunctions", "OpenLibm_jll", "OpenSpecFun_jll"]
|
||||
git-tree-sha1 = "ef28127915f4229c971eb43f3fc075dd3fe91880"
|
||||
uuid = "276daf66-3868-5448-9aa4-cd146d93841b"
|
||||
version = "2.2.0"
|
||||
weakdeps = ["ChainRulesCore"]
|
||||
|
||||
[deps.SpecialFunctions.extensions]
|
||||
SpecialFunctionsChainRulesCoreExt = "ChainRulesCore"
|
||||
|
||||
[[deps.SplittablesBase]]
|
||||
deps = ["Setfield", "Test"]
|
||||
git-tree-sha1 = "e08a62abc517eb79667d0a29dc08a3b589516bb5"
|
||||
uuid = "171d559e-b47b-412a-8079-5efa626c420e"
|
||||
version = "0.1.15"
|
||||
|
||||
[[deps.StaticArrays]]
|
||||
deps = ["LinearAlgebra", "Random", "StaticArraysCore", "Statistics"]
|
||||
git-tree-sha1 = "8982b3607a212b070a5e46eea83eb62b4744ae12"
|
||||
uuid = "90137ffa-7385-5640-81b9-e52037218182"
|
||||
version = "1.5.25"
|
||||
|
||||
[[deps.StaticArraysCore]]
|
||||
git-tree-sha1 = "6b7ba252635a5eff6a0b0664a41ee140a1c9e72a"
|
||||
uuid = "1e83bf80-4336-4d27-bf5d-d5a4f845583c"
|
||||
version = "1.4.0"
|
||||
|
||||
[[deps.Statistics]]
|
||||
deps = ["LinearAlgebra", "SparseArrays"]
|
||||
uuid = "10745b16-79ce-11e8-11f9-7d13ad32a3b2"
|
||||
version = "1.9.0"
|
||||
|
||||
[[deps.StatsAPI]]
|
||||
deps = ["LinearAlgebra"]
|
||||
git-tree-sha1 = "45a7769a04a3cf80da1c1c7c60caf932e6f4c9f7"
|
||||
uuid = "82ae8749-77ed-4fe6-ae5f-f523153014b0"
|
||||
version = "1.6.0"
|
||||
|
||||
[[deps.StatsBase]]
|
||||
deps = ["DataAPI", "DataStructures", "LinearAlgebra", "LogExpFunctions", "Missings", "Printf", "Random", "SortingAlgorithms", "SparseArrays", "Statistics", "StatsAPI"]
|
||||
git-tree-sha1 = "75ebe04c5bed70b91614d684259b661c9e6274a4"
|
||||
uuid = "2913bbd2-ae8a-5f71-8c99-4fb6c76f3a91"
|
||||
version = "0.34.0"
|
||||
|
||||
[[deps.StatsFuns]]
|
||||
deps = ["HypergeometricFunctions", "IrrationalConstants", "LogExpFunctions", "Reexport", "Rmath", "SpecialFunctions"]
|
||||
git-tree-sha1 = "f625d686d5a88bcd2b15cd81f18f98186fdc0c9a"
|
||||
uuid = "4c63d2b9-4356-54db-8cca-17b64c39e42c"
|
||||
version = "1.3.0"
|
||||
weakdeps = ["ChainRulesCore", "InverseFunctions"]
|
||||
|
||||
[deps.StatsFuns.extensions]
|
||||
StatsFunsChainRulesCoreExt = "ChainRulesCore"
|
||||
StatsFunsInverseFunctionsExt = "InverseFunctions"
|
||||
|
||||
[[deps.StructArrays]]
|
||||
deps = ["Adapt", "DataAPI", "GPUArraysCore", "StaticArraysCore", "Tables"]
|
||||
git-tree-sha1 = "521a0e828e98bb69042fec1809c1b5a680eb7389"
|
||||
uuid = "09ab397b-f2b6-538f-b94a-2f83cf4a842a"
|
||||
version = "0.6.15"
|
||||
|
||||
[[deps.StructTypes]]
|
||||
deps = ["Dates", "UUIDs"]
|
||||
git-tree-sha1 = "ca4bccb03acf9faaf4137a9abc1881ed1841aa70"
|
||||
uuid = "856f2bd8-1eba-4b0a-8007-ebc267875bd4"
|
||||
version = "1.10.0"
|
||||
|
||||
[[deps.SuiteSparse]]
|
||||
deps = ["Libdl", "LinearAlgebra", "Serialization", "SparseArrays"]
|
||||
uuid = "4607b0f0-06f3-5cda-b6b1-a6196a1729e9"
|
||||
|
||||
[[deps.SuiteSparse_jll]]
|
||||
deps = ["Artifacts", "Libdl", "Pkg", "libblastrampoline_jll"]
|
||||
uuid = "bea87d4a-7f5b-5778-9afe-8cc45184846c"
|
||||
version = "5.10.1+6"
|
||||
|
||||
[[deps.TOML]]
|
||||
deps = ["Dates"]
|
||||
uuid = "fa267f1f-6049-4f14-aa54-33bafae1ed76"
|
||||
version = "1.0.3"
|
||||
|
||||
[[deps.TableTraits]]
|
||||
deps = ["IteratorInterfaceExtensions"]
|
||||
git-tree-sha1 = "c06b2f539df1c6efa794486abfb6ed2022561a39"
|
||||
uuid = "3783bdb8-4a98-5b6b-af9a-565f29a5fe9c"
|
||||
version = "1.0.1"
|
||||
|
||||
[[deps.Tables]]
|
||||
deps = ["DataAPI", "DataValueInterfaces", "IteratorInterfaceExtensions", "LinearAlgebra", "OrderedCollections", "TableTraits", "Test"]
|
||||
git-tree-sha1 = "1544b926975372da01227b382066ab70e574a3ec"
|
||||
uuid = "bd369af6-aec1-5ad0-b16a-f7cc5008161c"
|
||||
version = "1.10.1"
|
||||
|
||||
[[deps.Tar]]
|
||||
deps = ["ArgTools", "SHA"]
|
||||
uuid = "a4e569a6-e804-4fa4-b0f3-eef7a1d5b13e"
|
||||
version = "1.10.0"
|
||||
|
||||
[[deps.Test]]
|
||||
deps = ["InteractiveUtils", "Logging", "Random", "Serialization"]
|
||||
uuid = "8dfed614-e22c-5e08-85e1-65c5234f0b40"
|
||||
|
||||
[[deps.TimerOutputs]]
|
||||
deps = ["ExprTools", "Printf"]
|
||||
git-tree-sha1 = "f548a9e9c490030e545f72074a41edfd0e5bcdd7"
|
||||
uuid = "a759f4b9-e2f1-59dc-863e-4aeb61b1ea8f"
|
||||
version = "0.5.23"
|
||||
|
||||
[[deps.Transducers]]
|
||||
deps = ["Adapt", "ArgCheck", "BangBang", "Baselet", "CompositionsBase", "DefineSingletons", "Distributed", "InitialValues", "Logging", "Markdown", "MicroCollections", "Requires", "Setfield", "SplittablesBase", "Tables"]
|
||||
git-tree-sha1 = "25358a5f2384c490e98abd565ed321ffae2cbb37"
|
||||
uuid = "28d57a85-8fef-5791-bfe6-a80928e7c999"
|
||||
version = "0.4.76"
|
||||
|
||||
[[deps.UUIDs]]
|
||||
deps = ["Random", "SHA"]
|
||||
uuid = "cf7118a7-6976-5b1a-9a39-7adc72f591a4"
|
||||
|
||||
[[deps.Unicode]]
|
||||
uuid = "4ec0a83e-493e-50e2-b9ac-8f72acf5a8f5"
|
||||
|
||||
[[deps.UnsafeAtomics]]
|
||||
git-tree-sha1 = "6331ac3440856ea1988316b46045303bef658278"
|
||||
uuid = "013be700-e6cd-48c3-b4a1-df204f14c38f"
|
||||
version = "0.2.1"
|
||||
|
||||
[[deps.UnsafeAtomicsLLVM]]
|
||||
deps = ["LLVM", "UnsafeAtomics"]
|
||||
git-tree-sha1 = "ea37e6066bf194ab78f4e747f5245261f17a7175"
|
||||
uuid = "d80eeb9a-aca5-4d75-85e5-170c8b632249"
|
||||
version = "0.1.2"
|
||||
|
||||
[[deps.Zlib_jll]]
|
||||
deps = ["Libdl"]
|
||||
uuid = "83775a58-1f1d-513f-b197-d71354ab007a"
|
||||
version = "1.2.13+0"
|
||||
|
||||
[[deps.Zygote]]
|
||||
deps = ["AbstractFFTs", "ChainRules", "ChainRulesCore", "DiffRules", "Distributed", "FillArrays", "ForwardDiff", "GPUArrays", "GPUArraysCore", "IRTools", "InteractiveUtils", "LinearAlgebra", "LogExpFunctions", "MacroTools", "NaNMath", "Random", "Requires", "SnoopPrecompile", "SparseArrays", "SpecialFunctions", "Statistics", "ZygoteRules"]
|
||||
git-tree-sha1 = "987ae5554ca90e837594a0f30325eeb5e7303d1e"
|
||||
uuid = "e88e6eb3-aa80-5325-afca-941959d7151f"
|
||||
version = "0.6.60"
|
||||
|
||||
[deps.Zygote.extensions]
|
||||
ZygoteColorsExt = "Colors"
|
||||
ZygoteDistancesExt = "Distances"
|
||||
ZygoteTrackerExt = "Tracker"
|
||||
|
||||
[deps.Zygote.weakdeps]
|
||||
Colors = "5ae59095-9a9b-59fe-a467-6f913c188581"
|
||||
Distances = "b4f34e82-e78d-54a5-968a-f98e89d6e8f7"
|
||||
Tracker = "9f7883ad-71c0-57eb-9f7f-b5c9e6d3789c"
|
||||
|
||||
[[deps.ZygoteRules]]
|
||||
deps = ["ChainRulesCore", "MacroTools"]
|
||||
git-tree-sha1 = "977aed5d006b840e2e40c0b48984f7463109046d"
|
||||
uuid = "700de1a5-db45-46bc-99cf-38207098b444"
|
||||
version = "0.2.3"
|
||||
|
||||
[[deps.cuDNN]]
|
||||
deps = ["CEnum", "CUDA", "CUDNN_jll"]
|
||||
git-tree-sha1 = "ec954b59f6b0324543f2e3ed8118309ac60cb75b"
|
||||
uuid = "02a925ec-e4fe-4b08-9a7e-0d78e3d38ccd"
|
||||
version = "1.0.3"
|
||||
|
||||
[[deps.libblastrampoline_jll]]
|
||||
deps = ["Artifacts", "Libdl"]
|
||||
uuid = "8e850b90-86db-534c-a0d3-1478176c7d93"
|
||||
version = "5.7.0+0"
|
||||
|
||||
[[deps.nghttp2_jll]]
|
||||
deps = ["Artifacts", "Libdl"]
|
||||
uuid = "8e850ede-7688-5339-a07c-302acd2aaf8d"
|
||||
version = "1.48.0+0"
|
||||
|
||||
[[deps.p7zip_jll]]
|
||||
deps = ["Artifacts", "Libdl"]
|
||||
uuid = "3f19e933-33d8-53b3-aaab-bd5110c3b7a0"
|
||||
version = "17.4.0+0"
|
||||
14
previousVersion/0.0.5_25percentAccuracy/Project.toml
Normal file
14
previousVersion/0.0.5_25percentAccuracy/Project.toml
Normal file
@@ -0,0 +1,14 @@
|
||||
name = "Ironpen"
|
||||
uuid = "29a645ab-0d6f-4ef8-acfd-1b192480382c"
|
||||
authors = ["tonaerospace <tonaerospace.etc@gmail.com>"]
|
||||
version = "0.1.0"
|
||||
|
||||
[deps]
|
||||
Distributions = "31c24e10-a181-5473-b8eb-7969acd0382f"
|
||||
Flux = "587475ba-b771-5e3f-ad9e-33799f191a9c"
|
||||
GeneralUtils = "c6c72f09-b708-4ac8-ac7c-2084d70108fe"
|
||||
JSON3 = "0f8b85d8-7281-11e9-16c2-39a750bddbf1"
|
||||
LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e"
|
||||
Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c"
|
||||
Statistics = "10745b16-79ce-11e8-11f9-7d13ad32a3b2"
|
||||
Zygote = "e88e6eb3-aa80-5325-afca-941959d7151f"
|
||||
826
previousVersion/0.0.5_25percentAccuracy/example_main.jl
Normal file
826
previousVersion/0.0.5_25percentAccuracy/example_main.jl
Normal file
@@ -0,0 +1,826 @@
|
||||
using Pkg; Pkg.activate("."); Pkg.resolve(), Pkg.instantiate()
|
||||
using Revise
|
||||
using Flux #, CUDA
|
||||
using BSON, JSON3
|
||||
using MLDatasets: MNIST
|
||||
using MLUtils, Images, ProgressMeter, Dates, DataFrames, Random, Statistics, LinearAlgebra,
|
||||
BenchmarkTools, Serialization, OneHotArrays , GLMakie # ClickHouse
|
||||
|
||||
|
||||
# if one need to reinstall all python packages
|
||||
# try Pkg.rm("PythonCall") catch end # should be removed before using CondaPkg to install packages
|
||||
# condapackage = ["numpy", "pytorch", "snntorch"]
|
||||
# using CondaPkg # in CondaPkg.toml file, channels = ["anaconda", "conda-forge", "pytorch"]
|
||||
# for i in condapackage
|
||||
# try CondaPkg.rm(i) catch end
|
||||
# end
|
||||
# for i in condapackage
|
||||
# CondaPkg.add(i)
|
||||
# end
|
||||
# Pkg.add("PythonCall");
|
||||
|
||||
using PythonCall;
|
||||
np = pyimport("numpy")
|
||||
torch = pyimport("torch")
|
||||
spikegen = pyimport("snntorch.spikegen") # https://github.com/jeshraghian/snntorch
|
||||
|
||||
using Ironpen
|
||||
using GeneralUtils
|
||||
|
||||
sep = Sys.iswindows() ? "\\" : "/"
|
||||
rootDir = pwd()
|
||||
|
||||
# select compute device
|
||||
# device = Flux.CUDA.functional() ? gpu : cpu
|
||||
# if device == gpu
|
||||
# CUDA.device!(3)
|
||||
# end
|
||||
#------------------------------------------------------------------------------------------------100
|
||||
|
||||
|
||||
|
||||
"""
|
||||
Todo:
|
||||
- []
|
||||
|
||||
Change from version:
|
||||
-
|
||||
|
||||
All features
|
||||
-
|
||||
"""
|
||||
|
||||
|
||||
# communication config --------------------------------------------------------------------------100
|
||||
|
||||
database_ip = "localhost"
|
||||
# database_ip = "192.168.0.8"
|
||||
|
||||
#------------------------------------------------------------------------------------------------100
|
||||
|
||||
function generate_snn(filename::String, location::String)
|
||||
expect_compute_neuron_numbers = 1024 #FIXME change to 512
|
||||
signalInput_portnumbers = 50
|
||||
noise_portnumbers = signalInput_portnumbers
|
||||
output_portnumbers = 10
|
||||
|
||||
lif_neuron_number = Int(floor(expect_compute_neuron_numbers * 0.4))
|
||||
alif_neuron_number = expect_compute_neuron_numbers - lif_neuron_number # from Allen Institute, ALIF is 20-40% of LIF
|
||||
computeNeuronNumber = lif_neuron_number + alif_neuron_number
|
||||
|
||||
totalNeurons = computeNeuronNumber + noise_portnumbers + signalInput_portnumbers
|
||||
totalInputPort = noise_portnumbers + signalInput_portnumbers
|
||||
|
||||
# kfn and neuron config
|
||||
passthrough_neuron_params = Dict(
|
||||
:type => "passthroughNeuron"
|
||||
)
|
||||
|
||||
lif_neuron_params = Dict{Symbol, Any}(
|
||||
:type => "lifNeuron",
|
||||
:v_t_default => 0.0,
|
||||
:v_th => 1.0, # neuron firing threshold (this value is treated as maximum bound if I use auto generate)
|
||||
:tau_m => 200.0, # membrane time constant in millisecond.
|
||||
:eta => 1e-2,
|
||||
# Good starting value is 1/10th of tau_a
|
||||
# This is problem specific parameter. It controls how leaky the neuron is.
|
||||
# Too high(less leaky) makes learning algo harder to move model into direction that reduce error
|
||||
# resulting in model's error to explode exponantially likely because learning algo will try to
|
||||
# exert more force (larger w_out_change) to move neuron into direction that reduce error
|
||||
# For example, model error from 7 to 2e6.
|
||||
|
||||
:synapticConnectionPercent => 50, # % coverage of total neurons in kfn
|
||||
:w_rec_generation_pattern => "random",
|
||||
)
|
||||
|
||||
alif_neuron_params = Dict{Symbol, Any}(
|
||||
:type => "alifNeuron",
|
||||
:v_t_default => 0.0,
|
||||
:v_th => 1.0, # neuron firing threshold (this value is treated as maximum bound if I use auto generate)
|
||||
:tau_m => 200.0, # membrane time constant in millisecond.
|
||||
:eta => 1e-2,
|
||||
# Good starting value is 1/10th of tau_a
|
||||
# This is problem specific parameter. It controls how leaky the neuron is.
|
||||
# Too high(less leaky) makes learning algo harder to move model into direction that reduce error
|
||||
# resulting in model's error to explode exponantially likely because learning algo will try to
|
||||
# exert more force (larger w_out_change) to move neuron into direction that reduce error
|
||||
# For example, model error from 7 to 2e6.
|
||||
|
||||
:tau_a => 500.0, # adaptation time constant in millisecond. it defines neuron memory length.
|
||||
# This is problem specific parameter
|
||||
# Good starting value is 0.5 to 2 times of info STORE-RECALL length i.e. total time SNN takes to
|
||||
# perform a task, for example, equals to episode length.
|
||||
# From "Spike frequency adaptation supports network computations on temporally dispersed
|
||||
# information"
|
||||
|
||||
:synapticConnectionPercent => 50, # % coverage of total neurons in kfn
|
||||
:w_rec_generation_pattern => "random",
|
||||
)
|
||||
|
||||
# linear_neuron_params = Dict{Symbol, Any}(
|
||||
# :type => "linearNeuron",
|
||||
# :v_th => 1.0, # neuron firing threshold (this value is treated as maximum bound if I use auto generate)
|
||||
# :tau_out => 50.0, # output time constant in millisecond.
|
||||
# :synapticConnectionPercent => 100, # % coverage of total neurons in kfn
|
||||
# # Good starting value is 1/50th of tau_a
|
||||
# # This is problem specific parameter.
|
||||
# # It controls how leaky the neuron is.
|
||||
# # Too high(less leaky) makes learning algo harder to move model into direction that reduce error
|
||||
# # resulting in model's error to explode exponantially. For example, model error from 7 to 2e6
|
||||
# # One can image training output neuron is like Tetris Game.
|
||||
# )
|
||||
|
||||
integrate_neuron_params = Dict{Symbol, Any}(
|
||||
:type => "integrateNeuron",
|
||||
:synapticConnectionPercent => 100, # % coverage of total neurons in kfn
|
||||
:eta => 1e-2,
|
||||
:tau_out => 100.0,
|
||||
# Good starting value is 1/50th of tau_a
|
||||
# This is problem specific parameter.
|
||||
# It controls how leaky the neuron is.
|
||||
# Too high(less leaky) makes learning algo harder to move model into direction that reduce error
|
||||
# resulting in model's error to explode exponantially. For example, model error from 7 to 2e6
|
||||
# One can image training output neuron is like Tetris Game.
|
||||
)
|
||||
|
||||
I_kfnparams = Dict{Symbol, Any}(
|
||||
:knowledgeFnName=> "I",
|
||||
:computeNeuronNumber=> computeNeuronNumber,
|
||||
:neuronFiringRateTarget=> 10.0, # Hz
|
||||
:Bn=> "random", # error projection coefficient for EACH neuron
|
||||
:totalNeurons=> totalNeurons,
|
||||
:totalInputPort=> totalInputPort,
|
||||
:totalComputeNeuron=> computeNeuronNumber,
|
||||
|
||||
# group relavent info
|
||||
:inputPort=> Dict(
|
||||
:noise=> Dict(
|
||||
:numbers=> noise_portnumbers,
|
||||
:params=> passthrough_neuron_params,
|
||||
),
|
||||
:signal=> Dict(
|
||||
:numbers=> signalInput_portnumbers, # in case of GloVe word encoding, it is 300
|
||||
:params=> passthrough_neuron_params,
|
||||
),
|
||||
),
|
||||
:outputPort=> Dict(
|
||||
:numbers=> output_portnumbers, # output neuron, this is also the output length
|
||||
:params=> integrate_neuron_params,
|
||||
),
|
||||
:computeNeuron=> Dict(
|
||||
:1=> Dict(
|
||||
:numbers=> lif_neuron_number,
|
||||
:params=> lif_neuron_params,
|
||||
),
|
||||
:2=> Dict(
|
||||
:numbers=> alif_neuron_number,
|
||||
:params=> alif_neuron_params,
|
||||
),
|
||||
),
|
||||
)
|
||||
|
||||
#------------------------------------------------------------------------------------------------100
|
||||
|
||||
I_kfn = Ironpen.kfn_1(I_kfnparams)
|
||||
|
||||
model_params_1 = Dict(:knowledgeFn => Dict(
|
||||
:I => I_kfn),
|
||||
)
|
||||
|
||||
model = Ironpen.model(model_params_1)
|
||||
|
||||
serialize(location * sep * filename, model)
|
||||
println("SNN generated")
|
||||
end
|
||||
|
||||
function data_loader()
|
||||
# test problem
|
||||
fullTrainDataset = MNIST(:train)
|
||||
prototypeDataset = fullTrainDataset[1:10] # use reshape(test_dataset[1], (:, 1)) to flaten matrix
|
||||
trainDataset = fullTrainDataset # total 60000
|
||||
validateDataset = fullTrainDataset[1:100]
|
||||
labelDict = [0:9...]
|
||||
|
||||
trainData = MLUtils.DataLoader(
|
||||
trainDataset; # fullTrainDataset or trainDataset
|
||||
batchsize=100,
|
||||
collate=true,
|
||||
shuffle=true,
|
||||
buffer=true,
|
||||
partial=false, # better for gpu memory if batchsize is fixed
|
||||
# parallel=true, #BUG ?? causing dataloader into forever loop
|
||||
)
|
||||
|
||||
validateData = MLUtils.DataLoader(
|
||||
validateDataset;
|
||||
batchsize=1,
|
||||
collate=true,
|
||||
shuffle=true,
|
||||
buffer=true,
|
||||
partial=false, # better for gpu memory if batchsize is fixed
|
||||
# parallel=true, #BUG ?? causing dataloader into forever loop
|
||||
)
|
||||
|
||||
#CHANGE dummy data used to debug
|
||||
# trainData = [(rand(10, 10), [5]), (rand(10, 10), [2])]
|
||||
# trainData = [(rand(10, 10), [5]),]
|
||||
|
||||
return trainData, validateData, labelDict
|
||||
end
|
||||
|
||||
function train_snn(model_name::String, filename::String, location::String,
|
||||
trainData, validateData, labelDict::Vector)
|
||||
println("loading SNN model")
|
||||
|
||||
model = deserialize(location * sep * filename)
|
||||
println("model loading completed")
|
||||
|
||||
# random seed
|
||||
# rng = MersenneTwister(1234)
|
||||
|
||||
logitLog = zeros(10, 2)
|
||||
firedNeurons_t1 = zeros(1)
|
||||
var1 = zeros(10, 2)
|
||||
var2 = zeros(10, 2)
|
||||
var3 = zeros(10, 2)
|
||||
var4 = zeros(10, 2)
|
||||
|
||||
# ----------------------------------- plot ----------------------------------- #
|
||||
plot10 = Observable(firedNeurons_t1)
|
||||
|
||||
plot20 = Observable(logitLog[1 , :])
|
||||
plot21 = Observable(logitLog[2 , :])
|
||||
plot22 = Observable(logitLog[3 , :])
|
||||
plot23 = Observable(logitLog[4 , :])
|
||||
plot24 = Observable(logitLog[5 , :])
|
||||
plot25 = Observable(logitLog[6 , :])
|
||||
plot26 = Observable(logitLog[7 , :])
|
||||
plot27 = Observable(logitLog[8 , :])
|
||||
plot28 = Observable(logitLog[9 , :])
|
||||
plot29 = Observable(logitLog[10, :])
|
||||
|
||||
plot30 = Observable(var1[1 , :])
|
||||
plot31 = Observable(var1[2 , :])
|
||||
plot32 = Observable(var1[3 , :])
|
||||
plot33 = Observable(var1[4 , :])
|
||||
plot34 = Observable(var1[5 , :])
|
||||
plot35 = Observable(var1[6 , :])
|
||||
plot36 = Observable(var1[7 , :])
|
||||
plot37 = Observable(var1[8 , :])
|
||||
plot38 = Observable(var1[9 , :])
|
||||
plot39 = Observable(var1[10, :])
|
||||
|
||||
plot40 = Observable(var2[1 , :])
|
||||
plot41 = Observable(var2[2 , :])
|
||||
plot42 = Observable(var2[3 , :])
|
||||
plot43 = Observable(var2[4 , :])
|
||||
plot44 = Observable(var2[5 , :])
|
||||
plot45 = Observable(var2[6 , :])
|
||||
plot46 = Observable(var2[7 , :])
|
||||
plot47 = Observable(var2[8 , :])
|
||||
plot48 = Observable(var2[9 , :])
|
||||
plot49 = Observable(var2[10, :])
|
||||
|
||||
plot50 = Observable(var3[1 , :])
|
||||
plot51 = Observable(var3[2 , :])
|
||||
plot52 = Observable(var3[3 , :])
|
||||
plot53 = Observable(var3[4 , :])
|
||||
plot54 = Observable(var3[5 , :])
|
||||
plot55 = Observable(var3[6 , :])
|
||||
plot56 = Observable(var3[7 , :])
|
||||
plot57 = Observable(var3[8 , :])
|
||||
plot58 = Observable(var3[9 , :])
|
||||
plot59 = Observable(var3[10, :])
|
||||
|
||||
plot60 = Observable(var4[1 , :])
|
||||
plot61 = Observable(var4[2 , :])
|
||||
plot62 = Observable(var4[3 , :])
|
||||
plot63 = Observable(var4[4 , :])
|
||||
plot64 = Observable(var4[5 , :])
|
||||
plot65 = Observable(var4[6 , :])
|
||||
plot66 = Observable(var4[7 , :])
|
||||
plot67 = Observable(var4[8 , :])
|
||||
plot68 = Observable(var4[9 , :])
|
||||
plot69 = Observable(var4[10, :])
|
||||
|
||||
# main figure
|
||||
fig1 = Figure()
|
||||
|
||||
subfig1 = GLMakie.Axis(fig1[1, 1], # define position of this subfigure inside a figure
|
||||
title = "RSNN firedNeurons_t1",
|
||||
xlabel = "time",
|
||||
ylabel = "data"
|
||||
)
|
||||
lines!(subfig1, plot10, label = "firedNeurons_t1")
|
||||
axislegend(subfig1, position = :lb)
|
||||
|
||||
subfig2 = GLMakie.Axis(fig1[2, 1], # define position of this subfigure inside a figure
|
||||
title = "output neurons activation",
|
||||
xlabel = "time",
|
||||
ylabel = "data"
|
||||
)
|
||||
|
||||
lines!(subfig2, plot20, label = "0", color = 1, colormap = :tab10, colorrange = (1, 10) )
|
||||
lines!(subfig2, plot21, label = "1", color = 2, colormap = :tab10, colorrange = (1, 10) )
|
||||
lines!(subfig2, plot22, label = "2", color = 3, colormap = :tab10, colorrange = (1, 10) )
|
||||
lines!(subfig2, plot23, label = "3", color = 4, colormap = :tab10, colorrange = (1, 10) )
|
||||
lines!(subfig2, plot24, label = "4", color = 5, colormap = :tab10, colorrange = (1, 10) )
|
||||
lines!(subfig2, plot25, label = "5", color = 6, colormap = :tab10, colorrange = (1, 10) )
|
||||
lines!(subfig2, plot26, label = "6", color = 7, colormap = :tab10, colorrange = (1, 10) )
|
||||
lines!(subfig2, plot27, label = "7", color = 8, colormap = :tab10, colorrange = (1, 10) )
|
||||
lines!(subfig2, plot28, label = "8", color = 9, colormap = :tab10, colorrange = (1, 10) )
|
||||
lines!(subfig2, plot29, label = "9", color = 10, colormap = :tab10, colorrange = (1, 10))
|
||||
axislegend(subfig2, position = :lb)
|
||||
|
||||
|
||||
subfig3 = GLMakie.Axis(fig1[3, 1], # define position of this subfigure inside a figure
|
||||
title = "output neurons membrane potential v_t1",
|
||||
xlabel = "time",
|
||||
ylabel = "data"
|
||||
)
|
||||
lines!(subfig3, plot30, label = "0", color = 1, colormap = :tab10, colorrange = (1, 10) )
|
||||
lines!(subfig3, plot31, label = "1", color = 2, colormap = :tab10, colorrange = (1, 10) )
|
||||
lines!(subfig3, plot32, label = "2", color = 3, colormap = :tab10, colorrange = (1, 10) )
|
||||
lines!(subfig3, plot33, label = "3", color = 4, colormap = :tab10, colorrange = (1, 10) )
|
||||
lines!(subfig3, plot34, label = "4", color = 5, colormap = :tab10, colorrange = (1, 10) )
|
||||
lines!(subfig3, plot35, label = "5", color = 6, colormap = :tab10, colorrange = (1, 10) )
|
||||
lines!(subfig3, plot36, label = "6", color = 7, colormap = :tab10, colorrange = (1, 10) )
|
||||
lines!(subfig3, plot37, label = "7", color = 8, colormap = :tab10, colorrange = (1, 10) )
|
||||
lines!(subfig3, plot38, label = "8", color = 9, colormap = :tab10, colorrange = (1, 10) )
|
||||
lines!(subfig3, plot39, label = "9", color = 10, colormap = :tab10, colorrange = (1, 10))
|
||||
axislegend(subfig3, position = :lb)
|
||||
|
||||
subfig4 = GLMakie.Axis(fig1[4, 1], # define position of this subfigure inside a figure
|
||||
title = "output neuron wRec",
|
||||
xlabel = "time",
|
||||
ylabel = "data"
|
||||
)
|
||||
lines!(subfig4, plot40, label = "0", color = 1, colormap = :tab10, colorrange = (1, 10) )
|
||||
lines!(subfig4, plot41, label = "1", color = 2, colormap = :tab10, colorrange = (1, 10) )
|
||||
lines!(subfig4, plot42, label = "2", color = 3, colormap = :tab10, colorrange = (1, 10) )
|
||||
lines!(subfig4, plot43, label = "3", color = 4, colormap = :tab10, colorrange = (1, 10) )
|
||||
lines!(subfig4, plot44, label = "4", color = 5, colormap = :tab10, colorrange = (1, 10) )
|
||||
lines!(subfig4, plot45, label = "5", color = 6, colormap = :tab10, colorrange = (1, 10) )
|
||||
lines!(subfig4, plot46, label = "6", color = 7, colormap = :tab10, colorrange = (1, 10) )
|
||||
lines!(subfig4, plot47, label = "7", color = 8, colormap = :tab10, colorrange = (1, 10) )
|
||||
lines!(subfig4, plot48, label = "8", color = 9, colormap = :tab10, colorrange = (1, 10) )
|
||||
lines!(subfig4, plot49, label = "9", color = 10, colormap = :tab10, colorrange = (1, 10))
|
||||
axislegend(subfig4, position = :lb)
|
||||
|
||||
subfig5 = GLMakie.Axis(fig1[5, 1], # define position of this subfigure inside a figure
|
||||
title = "output neuron epsilonRec",
|
||||
xlabel = "time",
|
||||
ylabel = "data"
|
||||
)
|
||||
lines!(subfig5, plot50, label = "0", color = 1, colormap = :tab10, colorrange = (1, 10) )
|
||||
lines!(subfig5, plot51, label = "1", color = 2, colormap = :tab10, colorrange = (1, 10) )
|
||||
lines!(subfig5, plot52, label = "2", color = 3, colormap = :tab10, colorrange = (1, 10) )
|
||||
lines!(subfig5, plot53, label = "3", color = 4, colormap = :tab10, colorrange = (1, 10) )
|
||||
lines!(subfig5, plot54, label = "4", color = 5, colormap = :tab10, colorrange = (1, 10) )
|
||||
lines!(subfig5, plot55, label = "5", color = 6, colormap = :tab10, colorrange = (1, 10) )
|
||||
lines!(subfig5, plot56, label = "6", color = 7, colormap = :tab10, colorrange = (1, 10) )
|
||||
lines!(subfig5, plot57, label = "7", color = 8, colormap = :tab10, colorrange = (1, 10) )
|
||||
lines!(subfig5, plot58, label = "8", color = 9, colormap = :tab10, colorrange = (1, 10) )
|
||||
lines!(subfig5, plot59, label = "9", color = 10, colormap = :tab10, colorrange = (1, 10))
|
||||
axislegend(subfig5, position = :lb)
|
||||
|
||||
subfig6 = GLMakie.Axis(fig1[6, 1], # define position of this subfigure inside a figure
|
||||
title = "output neuron wRecChange",
|
||||
xlabel = "time",
|
||||
ylabel = "data"
|
||||
)
|
||||
lines!(subfig6, plot60, label = "0", color = 1, colormap = :tab10, colorrange = (1, 10) )
|
||||
lines!(subfig6, plot61, label = "1", color = 2, colormap = :tab10, colorrange = (1, 10) )
|
||||
lines!(subfig6, plot62, label = "2", color = 3, colormap = :tab10, colorrange = (1, 10) )
|
||||
lines!(subfig6, plot63, label = "3", color = 4, colormap = :tab10, colorrange = (1, 10) )
|
||||
lines!(subfig6, plot64, label = "4", color = 5, colormap = :tab10, colorrange = (1, 10) )
|
||||
lines!(subfig6, plot65, label = "5", color = 6, colormap = :tab10, colorrange = (1, 10) )
|
||||
lines!(subfig6, plot66, label = "6", color = 7, colormap = :tab10, colorrange = (1, 10) )
|
||||
lines!(subfig6, plot67, label = "7", color = 8, colormap = :tab10, colorrange = (1, 10) )
|
||||
lines!(subfig6, plot68, label = "8", color = 9, colormap = :tab10, colorrange = (1, 10) )
|
||||
lines!(subfig6, plot69, label = "9", color = 10, colormap = :tab10, colorrange = (1, 10))
|
||||
axislegend(subfig6, position = :lb)
|
||||
|
||||
# wait(display(fig1))
|
||||
# display(fig1)
|
||||
# --------------------------------- end plot --------------------------------- #
|
||||
|
||||
# model learning
|
||||
maxRepeatRound = 1 # repeat each image
|
||||
thinkingPeriod = 16 # 1000-784 = 216
|
||||
for epoch = 1:1000
|
||||
println("epoch $epoch")
|
||||
for (imgBatch, labelBatch) in trainData
|
||||
@showprogress for i in eachindex(labelBatch)
|
||||
_img = (imgBatch[:, :, i])
|
||||
img = reshape(_img, (:, 1))
|
||||
row, col = size(img)
|
||||
label = labelBatch[i]
|
||||
println("epoch $epoch training label $label")
|
||||
|
||||
img_tensor = torch.from_numpy( np.asarray(img) )
|
||||
|
||||
# create more data for RSNN
|
||||
spike = spikegen.delta(img_tensor, threshold=0.1, off_spike=true)
|
||||
spike1 = isequal.(pyconvert(Array, spike.data.numpy()), 1)
|
||||
spike2 = isequal.(pyconvert(Array, spike.data.numpy()), 1)
|
||||
spike3 = isequal.(pyconvert(Array, spike.data.numpy()), 1)
|
||||
spike4 = isequal.(pyconvert(Array, spike.data.numpy()), 1)
|
||||
spike5 = isequal.(pyconvert(Array, spike.data.numpy()), 1)
|
||||
spike6 = isequal.(pyconvert(Array, spike.data.numpy()), -1)
|
||||
spike7 = isequal.(pyconvert(Array, spike.data.numpy()), -1)
|
||||
spike8 = isequal.(pyconvert(Array, spike.data.numpy()), -1)
|
||||
spike9 = isequal.(pyconvert(Array, spike.data.numpy()), -1)
|
||||
spike10 = isequal.(pyconvert(Array, spike.data.numpy()), -1)
|
||||
|
||||
spike = spikegen.delta(img_tensor, threshold=0.2, off_spike=true)
|
||||
spike11 = isequal.(pyconvert(Array, spike.data.numpy()), 1)
|
||||
spike12 = isequal.(pyconvert(Array, spike.data.numpy()), 1)
|
||||
spike13 = isequal.(pyconvert(Array, spike.data.numpy()), 1)
|
||||
spike14 = isequal.(pyconvert(Array, spike.data.numpy()), 1)
|
||||
spike15 = isequal.(pyconvert(Array, spike.data.numpy()), 1)
|
||||
spike16 = isequal.(pyconvert(Array, spike.data.numpy()), -1)
|
||||
spike17 = isequal.(pyconvert(Array, spike.data.numpy()), -1)
|
||||
spike18 = isequal.(pyconvert(Array, spike.data.numpy()), -1)
|
||||
spike19 = isequal.(pyconvert(Array, spike.data.numpy()), -1)
|
||||
spike20 = isequal.(pyconvert(Array, spike.data.numpy()), -1)
|
||||
|
||||
spike = spikegen.delta(img_tensor, threshold=0.3, off_spike=true)
|
||||
spike21 = isequal.(pyconvert(Array, spike.data.numpy()), 1)
|
||||
spike22 = isequal.(pyconvert(Array, spike.data.numpy()), 1)
|
||||
spike23 = isequal.(pyconvert(Array, spike.data.numpy()), 1)
|
||||
spike24 = isequal.(pyconvert(Array, spike.data.numpy()), 1)
|
||||
spike25 = isequal.(pyconvert(Array, spike.data.numpy()), 1)
|
||||
spike26 = isequal.(pyconvert(Array, spike.data.numpy()), -1)
|
||||
spike27 = isequal.(pyconvert(Array, spike.data.numpy()), -1)
|
||||
spike28 = isequal.(pyconvert(Array, spike.data.numpy()), -1)
|
||||
spike29 = isequal.(pyconvert(Array, spike.data.numpy()), -1)
|
||||
spike30 = isequal.(pyconvert(Array, spike.data.numpy()), -1)
|
||||
|
||||
spike = spikegen.delta(img_tensor, threshold=0.4, off_spike=true)
|
||||
spike31 = isequal.(pyconvert(Array, spike.data.numpy()), 1)
|
||||
spike32 = isequal.(pyconvert(Array, spike.data.numpy()), 1)
|
||||
spike33 = isequal.(pyconvert(Array, spike.data.numpy()), 1)
|
||||
spike34 = isequal.(pyconvert(Array, spike.data.numpy()), 1)
|
||||
spike35 = isequal.(pyconvert(Array, spike.data.numpy()), 1)
|
||||
spike36 = isequal.(pyconvert(Array, spike.data.numpy()), -1)
|
||||
spike37 = isequal.(pyconvert(Array, spike.data.numpy()), -1)
|
||||
spike38 = isequal.(pyconvert(Array, spike.data.numpy()), -1)
|
||||
spike39 = isequal.(pyconvert(Array, spike.data.numpy()), -1)
|
||||
spike40 = isequal.(pyconvert(Array, spike.data.numpy()), -1)
|
||||
|
||||
spike = spikegen.delta(img_tensor, threshold=0.5, off_spike=true)
|
||||
spike41 = isequal.(pyconvert(Array, spike.data.numpy()), 1)
|
||||
spike42 = isequal.(pyconvert(Array, spike.data.numpy()), 1)
|
||||
spike43 = isequal.(pyconvert(Array, spike.data.numpy()), 1)
|
||||
spike44 = isequal.(pyconvert(Array, spike.data.numpy()), 1)
|
||||
spike45 = isequal.(pyconvert(Array, spike.data.numpy()), 1)
|
||||
spike46 = isequal.(pyconvert(Array, spike.data.numpy()), -1)
|
||||
spike47 = isequal.(pyconvert(Array, spike.data.numpy()), -1)
|
||||
spike48 = isequal.(pyconvert(Array, spike.data.numpy()), -1)
|
||||
spike49 = isequal.(pyconvert(Array, spike.data.numpy()), -1)
|
||||
spike50 = isequal.(pyconvert(Array, spike.data.numpy()), -1)
|
||||
|
||||
input = [spike1;; spike2;; spike3;; spike4;; spike5;; spike6;; spike7;; spike8;; spike9;; spike10;;
|
||||
spike11;; spike12;; spike13;; spike14;; spike15;; spike16;; spike17;; spike18;; spike19;; spike20;;
|
||||
spike21;; spike22;; spike23;; spike24;; spike25;; spike26;; spike27;; spike28;; spike29;; spike30;;
|
||||
spike31;; spike32;; spike33;; spike34;; spike35;; spike36;; spike37;; spike38;; spike39;; spike40;;
|
||||
spike41;; spike42;; spike43;; spike44;; spike45;; spike46;; spike47;; spike48;; spike49;; spike50
|
||||
]' # ' to flip 784x10 to 10x784
|
||||
|
||||
predict = 0
|
||||
|
||||
for k in 1:maxRepeatRound
|
||||
|
||||
# insert data into model sequencially
|
||||
for i in 1:(row + thinkingPeriod) # sMNIST ihas 784 timestep(pixel) + thinking period = 1000 timestep
|
||||
tick = i
|
||||
if i <= row
|
||||
current_pixel = input[:, i]
|
||||
else
|
||||
current_pixel = zeros(size(input)[1]) # dummy input in "thinking" period
|
||||
end
|
||||
|
||||
if tick == 1 # tell a model to start learning. 1-time only
|
||||
model.learningStage = "start_learning"
|
||||
|
||||
elseif tick == (row+thinkingPeriod)
|
||||
model.learningStage = "end_learning"
|
||||
else
|
||||
end
|
||||
|
||||
_firedNeurons_t1, logit, _var1, _var2, _var3, _var4 = model(current_pixel)
|
||||
# log answer of all timestep
|
||||
logitLog = [logitLog;; logit]
|
||||
firedNeurons_t1 = push!(firedNeurons_t1, _firedNeurons_t1)
|
||||
var1 = [var1;; _var1]
|
||||
var2 = [var2;; _var2]
|
||||
var3 = [var3;; _var3]
|
||||
var4 = [var4;; _var4]
|
||||
|
||||
# if tick <= row # online learning, 1-by-1 timestep
|
||||
# correctAnswer = zeros(length(logit))
|
||||
# modelError = (logit - correctAnswer) * 1.0
|
||||
# Ironpen.compute_wRecChange!(model, modelError, correctAnswer)
|
||||
# elseif tick == row+1
|
||||
# correctAnswer = OneHotArrays.onehot(label, labelDict)
|
||||
# modelError = (logit - correctAnswer) * 1.0
|
||||
# Ironpen.compute_wRecChange!(model, modelError, correctAnswer)
|
||||
# elseif tick > row+1 && tick < row+thinkingPeriod
|
||||
# correctAnswer = OneHotArrays.onehot(label, labelDict)
|
||||
# modelError = (logit - correctAnswer) * 1.0
|
||||
# Ironpen.compute_wRecChange!(model, modelError, correctAnswer)
|
||||
# elseif tick == row+thinkingPeriod
|
||||
# _predict = logitLog[:, end-thinkingPeriod+1:end] # answer count during thinking period
|
||||
# _predict = Int.([sum(row) for row in eachrow(_predict)])
|
||||
# # predict = [x > 0 for x in _predict]
|
||||
# correctAnswer = OneHotArrays.onehot(label, labelDict)
|
||||
# modelError = (logit - correctAnswer) * 1.0
|
||||
# Ironpen.compute_wRecChange!(model, modelError, correctAnswer)
|
||||
# Ironpen.learn!(model)
|
||||
# println("label $label predict $(_predict) model error $(Int.(modelError))")
|
||||
# else
|
||||
# error("undefined condition line $(@__LINE__)")
|
||||
# end
|
||||
|
||||
if tick <= row # online learning, 1-by-1 timestep
|
||||
# no error calculation
|
||||
elseif tick > row && tick < row+thinkingPeriod
|
||||
# correctAnswer = OneHotArrays.onehot(label, labelDict)
|
||||
# modelError = (logit - correctAnswer) * 1.0
|
||||
# Ironpen.compute_wRecChange!(model, modelError, correctAnswer)
|
||||
|
||||
elseif tick == row+thinkingPeriod
|
||||
correctAnswer = OneHotArrays.onehot(label, labelDict)
|
||||
modelError = Flux.logitcrossentropy(logit, correctAnswer) * 1.0
|
||||
outputError = (logit - correctAnswer) * 1.0
|
||||
Ironpen.compute_wRecChange!(model, modelError, outputError)
|
||||
Ironpen.learn!(model)
|
||||
_logit = round.(logit; digits=2)
|
||||
predict = findall(isequal.(logit, maximum(logit)))[1] - 1
|
||||
y = round.(modelError; digits=2)
|
||||
println("")
|
||||
println("label $label predict $predict logit $_logit model error $y")
|
||||
else
|
||||
error("undefined condition line $(@__LINE__)")
|
||||
end
|
||||
|
||||
# update plot
|
||||
plot10[] = firedNeurons_t1
|
||||
|
||||
plot20[] = view(logitLog, 1 , :)
|
||||
plot21[] = view(logitLog, 2 , :)
|
||||
plot22[] = view(logitLog, 3 , :)
|
||||
plot23[] = view(logitLog, 4 , :)
|
||||
plot24[] = view(logitLog, 5 , :)
|
||||
plot25[] = view(logitLog, 6 , :)
|
||||
plot26[] = view(logitLog, 7 , :)
|
||||
plot27[] = view(logitLog, 8 , :)
|
||||
plot28[] = view(logitLog, 9 , :)
|
||||
plot29[] = view(logitLog, 10, :)
|
||||
|
||||
plot30[] = view(var1, 1 , :)
|
||||
plot31[] = view(var1, 2 , :)
|
||||
plot32[] = view(var1, 3 , :)
|
||||
plot33[] = view(var1, 4 , :)
|
||||
plot34[] = view(var1, 5 , :)
|
||||
plot35[] = view(var1, 6 , :)
|
||||
plot36[] = view(var1, 7 , :)
|
||||
plot37[] = view(var1, 8 , :)
|
||||
plot38[] = view(var1, 9 , :)
|
||||
plot39[] = view(var1, 10, :)
|
||||
|
||||
plot40[] = view(var2, 1 , :)
|
||||
plot41[] = view(var2, 2 , :)
|
||||
plot42[] = view(var2, 3 , :)
|
||||
plot43[] = view(var2, 4 , :)
|
||||
plot44[] = view(var2, 5 , :)
|
||||
plot45[] = view(var2, 6 , :)
|
||||
plot46[] = view(var2, 7 , :)
|
||||
plot47[] = view(var2, 8 , :)
|
||||
plot48[] = view(var2, 9 , :)
|
||||
plot49[] = view(var2, 10, :)
|
||||
|
||||
plot50[] = view(var3, 1 , :)
|
||||
plot51[] = view(var3, 2 , :)
|
||||
plot52[] = view(var3, 3 , :)
|
||||
plot53[] = view(var3, 4 , :)
|
||||
plot54[] = view(var3, 5 , :)
|
||||
plot55[] = view(var3, 6 , :)
|
||||
plot56[] = view(var3, 7 , :)
|
||||
plot57[] = view(var3, 8 , :)
|
||||
plot58[] = view(var3, 9 , :)
|
||||
plot59[] = view(var3, 10, :)
|
||||
|
||||
plot60[] = view(var4, 1 , :)
|
||||
plot61[] = view(var4, 2 , :)
|
||||
plot62[] = view(var4, 3 , :)
|
||||
plot63[] = view(var4, 4 , :)
|
||||
plot64[] = view(var4, 5 , :)
|
||||
plot65[] = view(var4, 6 , :)
|
||||
plot66[] = view(var4, 7 , :)
|
||||
plot67[] = view(var4, 8 , :)
|
||||
plot68[] = view(var4, 9 , :)
|
||||
plot69[] = view(var4, 10, :)
|
||||
end
|
||||
# end-thinkingPeriod+2; +2 because initialize logitLog = zeros(10, 2)
|
||||
# _modelRespond = logitLog[:, end-thinkingPeriod+2:end] # answer count during thinking period
|
||||
# _modelRespond = [sum(i) for i in eachrow(_modelRespond)]
|
||||
# modelRespond = isequal.(isequal.(_modelRespond, 0), 0)
|
||||
|
||||
display(fig1)
|
||||
# sleep(1)
|
||||
if k % 3 == 0
|
||||
firedNeurons_t1 = zeros(1)
|
||||
logitLog = zeros(10, 2)
|
||||
var1 = zeros(10, 2)
|
||||
var2 = zeros(10, 2)
|
||||
var3 = zeros(10, 2)
|
||||
var4 = zeros(10, 2)
|
||||
end
|
||||
|
||||
# if predict == OneHotArrays.onehot(label, labelDict)
|
||||
# println("model train $label successfully, $k tries")
|
||||
# # wait(display(fig1))
|
||||
|
||||
# firedNeurons_t1 = zeros(1)
|
||||
# logitLog = zeros(10, 2)
|
||||
# var1 = zeros(10, 2)
|
||||
# var2 = zeros(10, 2)
|
||||
# var3 = zeros(10, 2)
|
||||
# var4 = zeros(10, 2)
|
||||
# break
|
||||
# end
|
||||
|
||||
if k == maxRepeatRound
|
||||
# println("model train $label unsuccessfully, $maxRepeatRound tries, skip training")
|
||||
# display(fig1)
|
||||
firedNeurons_t1 = zeros(1)
|
||||
logitLog = zeros(10, 2)
|
||||
var1 = zeros(10, 2)
|
||||
var2 = zeros(10, 2)
|
||||
var3 = zeros(10, 2)
|
||||
var4 = zeros(10, 2)
|
||||
break
|
||||
end
|
||||
|
||||
GC.gc()
|
||||
end
|
||||
end
|
||||
# check accuracy
|
||||
println("validating model")
|
||||
answerCorrectly = validate(model, validateData, labelDict)
|
||||
println("model accuracy is $answerCorrectly %")
|
||||
end
|
||||
|
||||
# # check mean error and accuracy
|
||||
# mean_error = round(mean(model_error_list), sigdigits = 3)
|
||||
# accuracy = round(model_accuracy / batch_size * 100, sigdigits = 3)
|
||||
# println("------------")
|
||||
# println(model_name)
|
||||
# println("mean error $mean_error accuracy $accuracy")
|
||||
end
|
||||
end
|
||||
|
||||
function validate(model, dataset, labelDict)
|
||||
answerCorrectly = 0.0 # %
|
||||
thinkingPeriod = 16 # 1000-784 = 216
|
||||
@showprogress for (image, label) in dataset
|
||||
img = reshape(image, (:, 1))
|
||||
row, col = size(img)
|
||||
label = label[1]
|
||||
|
||||
img_tensor = torch.from_numpy( np.asarray(img) )
|
||||
|
||||
# create more data for RSNN
|
||||
spike = spikegen.delta(img_tensor, threshold=0.1, off_spike=true)
|
||||
spike1 = isequal.(pyconvert(Array, spike.data.numpy()), 1)
|
||||
spike2 = isequal.(pyconvert(Array, spike.data.numpy()), 1)
|
||||
spike3 = isequal.(pyconvert(Array, spike.data.numpy()), 1)
|
||||
spike4 = isequal.(pyconvert(Array, spike.data.numpy()), 1)
|
||||
spike5 = isequal.(pyconvert(Array, spike.data.numpy()), 1)
|
||||
spike6 = isequal.(pyconvert(Array, spike.data.numpy()), -1)
|
||||
spike7 = isequal.(pyconvert(Array, spike.data.numpy()), -1)
|
||||
spike8 = isequal.(pyconvert(Array, spike.data.numpy()), -1)
|
||||
spike9 = isequal.(pyconvert(Array, spike.data.numpy()), -1)
|
||||
spike10 = isequal.(pyconvert(Array, spike.data.numpy()), -1)
|
||||
|
||||
spike = spikegen.delta(img_tensor, threshold=0.2, off_spike=true)
|
||||
spike11 = isequal.(pyconvert(Array, spike.data.numpy()), 1)
|
||||
spike12 = isequal.(pyconvert(Array, spike.data.numpy()), 1)
|
||||
spike13 = isequal.(pyconvert(Array, spike.data.numpy()), 1)
|
||||
spike14 = isequal.(pyconvert(Array, spike.data.numpy()), 1)
|
||||
spike15 = isequal.(pyconvert(Array, spike.data.numpy()), 1)
|
||||
spike16 = isequal.(pyconvert(Array, spike.data.numpy()), -1)
|
||||
spike17 = isequal.(pyconvert(Array, spike.data.numpy()), -1)
|
||||
spike18 = isequal.(pyconvert(Array, spike.data.numpy()), -1)
|
||||
spike19 = isequal.(pyconvert(Array, spike.data.numpy()), -1)
|
||||
spike20 = isequal.(pyconvert(Array, spike.data.numpy()), -1)
|
||||
|
||||
spike = spikegen.delta(img_tensor, threshold=0.3, off_spike=true)
|
||||
spike21 = isequal.(pyconvert(Array, spike.data.numpy()), 1)
|
||||
spike22 = isequal.(pyconvert(Array, spike.data.numpy()), 1)
|
||||
spike23 = isequal.(pyconvert(Array, spike.data.numpy()), 1)
|
||||
spike24 = isequal.(pyconvert(Array, spike.data.numpy()), 1)
|
||||
spike25 = isequal.(pyconvert(Array, spike.data.numpy()), 1)
|
||||
spike26 = isequal.(pyconvert(Array, spike.data.numpy()), -1)
|
||||
spike27 = isequal.(pyconvert(Array, spike.data.numpy()), -1)
|
||||
spike28 = isequal.(pyconvert(Array, spike.data.numpy()), -1)
|
||||
spike29 = isequal.(pyconvert(Array, spike.data.numpy()), -1)
|
||||
spike30 = isequal.(pyconvert(Array, spike.data.numpy()), -1)
|
||||
|
||||
spike = spikegen.delta(img_tensor, threshold=0.4, off_spike=true)
|
||||
spike31 = isequal.(pyconvert(Array, spike.data.numpy()), 1)
|
||||
spike32 = isequal.(pyconvert(Array, spike.data.numpy()), 1)
|
||||
spike33 = isequal.(pyconvert(Array, spike.data.numpy()), 1)
|
||||
spike34 = isequal.(pyconvert(Array, spike.data.numpy()), 1)
|
||||
spike35 = isequal.(pyconvert(Array, spike.data.numpy()), 1)
|
||||
spike36 = isequal.(pyconvert(Array, spike.data.numpy()), -1)
|
||||
spike37 = isequal.(pyconvert(Array, spike.data.numpy()), -1)
|
||||
spike38 = isequal.(pyconvert(Array, spike.data.numpy()), -1)
|
||||
spike39 = isequal.(pyconvert(Array, spike.data.numpy()), -1)
|
||||
spike40 = isequal.(pyconvert(Array, spike.data.numpy()), -1)
|
||||
|
||||
spike = spikegen.delta(img_tensor, threshold=0.5, off_spike=true)
|
||||
spike41 = isequal.(pyconvert(Array, spike.data.numpy()), 1)
|
||||
spike42 = isequal.(pyconvert(Array, spike.data.numpy()), 1)
|
||||
spike43 = isequal.(pyconvert(Array, spike.data.numpy()), 1)
|
||||
spike44 = isequal.(pyconvert(Array, spike.data.numpy()), 1)
|
||||
spike45 = isequal.(pyconvert(Array, spike.data.numpy()), 1)
|
||||
spike46 = isequal.(pyconvert(Array, spike.data.numpy()), -1)
|
||||
spike47 = isequal.(pyconvert(Array, spike.data.numpy()), -1)
|
||||
spike48 = isequal.(pyconvert(Array, spike.data.numpy()), -1)
|
||||
spike49 = isequal.(pyconvert(Array, spike.data.numpy()), -1)
|
||||
spike50 = isequal.(pyconvert(Array, spike.data.numpy()), -1)
|
||||
|
||||
input = [spike1;; spike2;; spike3;; spike4;; spike5;; spike6;; spike7;; spike8;; spike9;; spike10;;
|
||||
spike11;; spike12;; spike13;; spike14;; spike15;; spike16;; spike17;; spike18;; spike19;; spike20;;
|
||||
spike21;; spike22;; spike23;; spike24;; spike25;; spike26;; spike27;; spike28;; spike29;; spike30;;
|
||||
spike31;; spike32;; spike33;; spike34;; spike35;; spike36;; spike37;; spike38;; spike39;; spike40;;
|
||||
spike41;; spike42;; spike43;; spike44;; spike45;; spike46;; spike47;; spike48;; spike49;; spike50
|
||||
]' # ' to flip 784x10 to 10x784
|
||||
|
||||
# insert data into model sequencially
|
||||
logit = Float64[]
|
||||
for i in 1:(row + thinkingPeriod) # sMNIST ihas 784 timestep(pixel) + thinking period = 1000 timestep
|
||||
if i <= row
|
||||
current_pixel = input[:, i]
|
||||
else
|
||||
current_pixel = zeros(size(input)[1]) # dummy input in "thinking" period
|
||||
end
|
||||
|
||||
_firedNeurons_t1, logit, _var1, _var2, _var3, _var4 = model(current_pixel)
|
||||
end
|
||||
|
||||
predict = findall(isequal.(logit, maximum(logit)))[1] - 1
|
||||
if predict == label
|
||||
answerCorrectly += 1
|
||||
# println("model answer $label correctly")
|
||||
else
|
||||
# println("img $label, model answer $predict")
|
||||
end
|
||||
GC.gc()
|
||||
end
|
||||
|
||||
correctPercent = answerCorrectly * 100.0 / length(dataset)
|
||||
|
||||
return correctPercent::Float64
|
||||
end
|
||||
|
||||
|
||||
function main()
|
||||
training_start_time = Dates.now()
|
||||
println("program started ", training_start_time)
|
||||
|
||||
filelocation = string(@__DIR__)
|
||||
|
||||
# generate SNN
|
||||
for i = 1:1
|
||||
modelname = "v06_36"
|
||||
filename = "$modelname.jl163"
|
||||
generate_snn(filename, filelocation)
|
||||
end
|
||||
|
||||
modelname = "v06_36"
|
||||
filename = "$modelname.jl163"
|
||||
# filename = "v06_31c.jl163"
|
||||
|
||||
trainDataset, validateDataset, labelDict = data_loader()
|
||||
|
||||
train_snn(modelname, filename, filelocation, trainDataset, validateDataset, labelDict)
|
||||
|
||||
finish_training_time = Dates.now()
|
||||
println("training done, $training_start_time ==> $finish_training_time ")
|
||||
println(" ///////////////////////////////////////////////////////////////////////")
|
||||
end
|
||||
|
||||
# only runs main() if julia isn’t started interactively
|
||||
# https://discourse.julialang.org/t/scripting-like-a-julian/50707
|
||||
!isinteractive() && main()
|
||||
#------------------------------------------------------------------------------------------------100
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
1
previousVersion/0.0.5_25percentAccuracy/src/.vscode/settings.json
vendored
Normal file
1
previousVersion/0.0.5_25percentAccuracy/src/.vscode/settings.json
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{}
|
||||
153
previousVersion/0.0.5_25percentAccuracy/src/DB_services.jl
Normal file
153
previousVersion/0.0.5_25percentAccuracy/src/DB_services.jl
Normal file
@@ -0,0 +1,153 @@
|
||||
module DB_services
|
||||
|
||||
""" version 0.2
|
||||
"""
|
||||
|
||||
using DataStructures: count
|
||||
export send_to_DB, data_prep_for_DB
|
||||
|
||||
using DataStructures
|
||||
using JSON3
|
||||
using Redis
|
||||
using Random
|
||||
using UUIDs
|
||||
|
||||
include("Utils.jl")
|
||||
using .Utils
|
||||
|
||||
"""
|
||||
Dummy iron_pen_ai for raw_data_db_service testing
|
||||
"""
|
||||
|
||||
#------------------------------------------------------------------------------------------------100
|
||||
|
||||
|
||||
""" Prepare model data for sending to raw_data_db_service by flattening all hierarchy
|
||||
data structure inside model_data into 1-dept JSON3.
|
||||
This function output is flattened JSON3 data
|
||||
*** all parameter name that is going to Cassandra must not contain a capital letter ***
|
||||
"""
|
||||
function data_prep_for_DB(model_name::String, experiment_number::Int, episode_number::Int,
|
||||
time_stamp::Int, model_data::OrderedDict)::Array{OrderedDict, 1}
|
||||
|
||||
payload_template = OrderedDict{Any, Any}(
|
||||
:model_name => model_name,
|
||||
:knowledgeFn_name => "none",
|
||||
:experiment_number => experiment_number,
|
||||
:episode_number => episode_number,
|
||||
)
|
||||
payloads = []
|
||||
for (k, v) in model_data[:m][:knowledgeFn] # loop over each knowledgeFn
|
||||
payload = deepcopy(payload_template)
|
||||
payload[:knowledgeFn_name] = v[:knowledgefn_name]
|
||||
payload[:neurons_list] = []
|
||||
for (k1, v1) in v
|
||||
if k1 == :neurons_array || k1 == :output_neurons_array
|
||||
for (k2, v2) in v1 # loop over each neuron
|
||||
if k2 != :type # add the following additonal data into neuron's ODict data (already have its parameters in there)
|
||||
neuron = OrderedDict(v2) # v2 is still in JSON3 format but
|
||||
# to be able to add new value to
|
||||
# it, it needs to be in
|
||||
# OrderedDict format
|
||||
|
||||
# # add corresponding knowledgeFn to neuron OrderedDict
|
||||
# neuron[:knowledgefn_name] = v[:knowledgefn_name]
|
||||
|
||||
# add corresponding experiment_number to neuron OrderedDict
|
||||
neuron[:experiment_number] = experiment_number
|
||||
|
||||
# add corresponding episode_number to neuron OrderedDict
|
||||
neuron[:episode_number] = episode_number
|
||||
|
||||
# # add corresponding tick_number to neuron OrderedDict
|
||||
# neuron[:tick_number] = tick_number
|
||||
|
||||
""" add neuron name of itself to neuron OrderedDict
|
||||
since neurons in neurons_array and output_neurons_array has the
|
||||
same name (because its name derived from its position in the
|
||||
array it lives in). In order to store them in the same
|
||||
OrderedDict, I need to change their name so I prefix their name
|
||||
with their array name
|
||||
"""
|
||||
neuron[:neuron_name] = Symbol(string(k1) * "_" * string(k2))
|
||||
|
||||
neuron[:model_error] = model_data[:m][:model_error]
|
||||
|
||||
neuron[:knowledgefn_error] = model_data[:m][:knowledgeFn][k][:knowledgeFn_error]
|
||||
|
||||
neuron[:model_name] = model_name
|
||||
|
||||
# use as identifier durin debug
|
||||
# neuron[:random] = Random.rand(1:100)
|
||||
|
||||
push!(payload[:neurons_list], neuron)
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
push!(payloads, payload)
|
||||
end
|
||||
return payloads
|
||||
end
|
||||
|
||||
function send_to_DB(model_name::String, experiment_number::Int, episode_number::Int,
|
||||
tick_number::Int, model_json_string::String, redis_server_ip::String,
|
||||
pub_channel::String, sub_channel::String)
|
||||
model_ordereddict = OrderedDict(JSON3.read(model_json_string))
|
||||
payloads = data_prep_for_DB(model_name, experiment_number, episode_number, tick_number,
|
||||
model_ordereddict)
|
||||
|
||||
for payload in payloads
|
||||
# ask raw data service whether it is ready
|
||||
# println("checking raw_data_db_service")
|
||||
ask = Dict(:sender => "ironpen_ai",
|
||||
:topic => "whois", # [uuid1(), "whois"] to get name of the receiver
|
||||
:topic_id => uuid1(),
|
||||
:responding_to => nothing, # receiver fills in the message uuid it is responding to
|
||||
:communication_channel => sub_channel, # a channel that sender wants receiver to send message to or "none" to get message at receiver's default respond channel
|
||||
:instruction => nothing,
|
||||
:payload => nothing,
|
||||
:isreturn => true)
|
||||
incoming_message = Utils.service_query(redis_server_ip, pub_channel, sub_channel, ask)
|
||||
# println("raw_data_db_service ok")
|
||||
if UUID(incoming_message[:responding_to]) == ask[:topic_id]
|
||||
message = Dict(:sender => "ironpen_ai",
|
||||
:topic => "process", # [uuid1(), "whois"] to get name of the receiver
|
||||
:topic_id => uuid1(),
|
||||
:responding_to => nothing, # receiver fills in the message uuid it is responding to
|
||||
:communication_channel => sub_channel, # a channel that sender wants receiver to send message to or "none" to get message at receiver's default respond channel
|
||||
:instruction => "insert",
|
||||
:payload => payload,
|
||||
:isreturn => false)
|
||||
|
||||
result = Utils.service_query(redis_server_ip, pub_channel, sub_channel, message)
|
||||
# println("published")
|
||||
else
|
||||
error("raw_data_db_service not respond")
|
||||
end
|
||||
end
|
||||
|
||||
end
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
end # module end
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
148
previousVersion/0.0.5_25percentAccuracy/src/Ironpen.jl
Normal file
148
previousVersion/0.0.5_25percentAccuracy/src/Ironpen.jl
Normal file
@@ -0,0 +1,148 @@
|
||||
module Ironpen
|
||||
|
||||
export kfn_1, synapticConnStrength!
|
||||
|
||||
|
||||
""" Order by dependencies of each file. The 1st included file must not depend on any other
|
||||
files and each file can only depend on the file included before it.
|
||||
"""
|
||||
|
||||
include("types.jl")
|
||||
using .types # bring model into this module namespace (this module is a parent module)
|
||||
|
||||
include("snn_utils.jl")
|
||||
using .snn_utils
|
||||
|
||||
# include("Save_and_load.jl")
|
||||
# using .Save_and_load
|
||||
|
||||
# include("DB_services.jl")
|
||||
# using .DB_services
|
||||
|
||||
include("forward.jl")
|
||||
using .forward
|
||||
|
||||
include("learn.jl")
|
||||
using .learn
|
||||
|
||||
# include("readout.jl")
|
||||
# using .readout
|
||||
|
||||
# include("interface.jl")
|
||||
# using .interface
|
||||
#------------------------------------------------------------------------------------------------100
|
||||
|
||||
""" version 0.0.5 first working RSNN
|
||||
Todo:
|
||||
[4] implement dormant connection
|
||||
[] using RL to control learning signal
|
||||
[] consider using Dates.now() instead of timestamp because time_stamp may overflow
|
||||
[5] training should include adjusting α, neuron membrane potential decay factor
|
||||
which defined by neuron.tau_m formula in type.jl
|
||||
|
||||
|
||||
Change from version: 0.0.4
|
||||
- compute model error in main loop so one could decide when to calculate error in
|
||||
training sequence and how to calculate
|
||||
- fix ALIF adaptation formula, now n.a compute avery time step
|
||||
- add higher input signal to noise ratio
|
||||
- no noise generate
|
||||
- increase input signal by adding more input neuron population
|
||||
- add /100 every wRec and b
|
||||
- add integrateNeuron
|
||||
- ΔwRecChange during input signal ingestion will be merged at the end of learning
|
||||
- use Flux.logitcrossentropy for overall error
|
||||
- move timestep_forward!() in kfn's forward to the beginning so that v_t and z_t is reset
|
||||
- fix n.a formula in forward() and calculate both non-firing and firing state
|
||||
- RSNN use overall modelError to update while integrate neuron use error with respect to
|
||||
itself (yk - yk*) to update
|
||||
- all RSNN neuron connect to integrateNeuron
|
||||
- integrateNeuron does NOT repect RSNN excitatory and inhabitory sign
|
||||
- weaker connection should be harder to increase strength. It requires a lot of
|
||||
repeat activation to get it stronger. While strong connction requires a lot of
|
||||
inactivation to get it weaker. The concept is strong connection will lock
|
||||
correct neural pathway through repeated use of the right connection i.e. keep training
|
||||
on the correct answer -> strengthen the right neural pathway (connections) ->
|
||||
this correct neural pathway resist to change.
|
||||
Not used connection should dissapear (forgetting).
|
||||
|
||||
|
||||
All features
|
||||
- synapticStrength apply at the end of learning
|
||||
- collect ΔwRecChange during online learning (0-784th) and merge with wRec at
|
||||
the end learning (800th).
|
||||
- multidispatch + for loop as main compute method
|
||||
- allow -w_rec yes
|
||||
- voltage drop when neuron fires voltage drop equals to vRest
|
||||
- v_t decay during refractory
|
||||
- input data population encoding, each pixel data =>
|
||||
population encoding, ralative between pixel data
|
||||
- compute neuron weight init rand()
|
||||
- output neuron weight init randn()
|
||||
|
||||
- compute pseudo derivative (n.phi) every time step
|
||||
- add excitatory, inhabitory to neuron
|
||||
- implement "start learning", reset learning and "learning", "end_learning and
|
||||
"inference"
|
||||
- synaptic connection strength concept. use sigmoid, turn connection offline
|
||||
- neuroplasticity() i.e. change connection
|
||||
- add multi threads
|
||||
|
||||
|
||||
Removed features
|
||||
- normalize output yes
|
||||
<logitcrossentropy does not need normalization>
|
||||
- compute model error at the end learning. Model error times with 5 constant for
|
||||
higher learning impact than the error during online
|
||||
<there should be no difference between error in each timestep because error
|
||||
has equal importance>
|
||||
- output neuron connect to random multiple compute neurons and overall have
|
||||
the same structure as lif
|
||||
- time-based learning method based on new error formula
|
||||
(use output vt compared to vth instead of late time)
|
||||
if output neuron not activate when it should, use output neuron's
|
||||
(vth - vt)*100/vth as error
|
||||
if output neuron activates when it should NOT, use output neuron's
|
||||
(vt*100)/vth as error
|
||||
<use logitcrossentropy>
|
||||
- use LinearAlgebra.normalize!(vector, 1) to adjust weight after weight merge
|
||||
<it reduce instant respond of neuron. Sometime postsynaptic neuron need to
|
||||
respond quickly at differnt neural pathway. If wRec is normalized, weights that
|
||||
needs to be high to allow neuron instant respond get reduced.>
|
||||
- reset_epsilonRec after ΔwRecChange is calculated
|
||||
<training example does not require intermediate respond from RSNN>
|
||||
- add maximum weight cap of each connection
|
||||
<capping weight limit neuron ability to adjust its respond>
|
||||
- wRec should not normalized whole. it should be local 5 conn normalized.
|
||||
<it makes small weight bigger>
|
||||
|
||||
|
||||
Ideas to try
|
||||
- Δweight * connection strength
|
||||
- reset_epsilonRec after ΔwRecChange is calculated
|
||||
- ΔwRecChange that apply immediately during online learning
|
||||
"""
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
end # module end
|
||||
200
previousVersion/0.0.5_25percentAccuracy/src/WPembeddings.jl
Normal file
200
previousVersion/0.0.5_25percentAccuracy/src/WPembeddings.jl
Normal file
@@ -0,0 +1,200 @@
|
||||
"
|
||||
version 0.4
|
||||
Word and Positional embedding module
|
||||
"
|
||||
module WPembeddings
|
||||
|
||||
using Embeddings
|
||||
using JSON3
|
||||
using Redis
|
||||
|
||||
include("Utils.jl")
|
||||
|
||||
export get_word_embedding, get_positional_embedding, wp_embedding
|
||||
|
||||
|
||||
#----------------------------------------------------------------------------------------------
|
||||
# user setting for word embedding
|
||||
GloVe_embedding_filepath = "C:\\myWork\\my_projects\\AI\\NLP\\my_NLP\\glove.840B.300d.txt"
|
||||
max_GloVe_vocab_size = 0 # size 10000+ or "all"
|
||||
#----------------------------------------------------------------------------------------------
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
# load GloVe word embedding. URL of the embedding file: https://nlp.stanford.edu/projects/glove/
|
||||
if max_GloVe_vocab_size == 0
|
||||
# don't load vocab
|
||||
elseif max_GloVe_vocab_size != "all"
|
||||
@time const embtable = Embeddings.load_embeddings(GloVe{:en}, GloVe_embedding_filepath,
|
||||
max_vocab_size=max_GloVe_vocab_size) # size 10000 or something
|
||||
const get_word_index = Dict(word=>ii for (ii,word) in enumerate(embtable.vocab))
|
||||
else
|
||||
@time const embtable = Embeddings.load_embeddings(GloVe{:en}, GloVe_embedding_filepath)
|
||||
const get_word_index = Dict(word=>ii for (ii,word) in enumerate(embtable.vocab))
|
||||
end
|
||||
|
||||
|
||||
# if max_GloVe_vocab_size != "all"
|
||||
# @time const embtable = Embeddings.load_embeddings(GloVe{:en}, GloVe_embedding_filepath,
|
||||
# max_vocab_size=max_GloVe_vocab_size) # size 10000 or something
|
||||
# const get_word_index = Dict(word=>ii for (ii,word) in enumerate(embtable.vocab))
|
||||
# elseif max_GloVe_vocab_size == 0
|
||||
# else
|
||||
# @time const embtable = Embeddings.load_embeddings(GloVe{:en}, GloVe_embedding_filepath)
|
||||
# const get_word_index = Dict(word=>ii for (ii,word) in enumerate(embtable.vocab))
|
||||
# end
|
||||
|
||||
|
||||
"""
|
||||
get_word_embedding(word::String)
|
||||
|
||||
Get embedding vector of a word. Its dimention is depend on GloVe file used
|
||||
|
||||
# Example
|
||||
|
||||
we_matrix = get_word_embedding("blue")
|
||||
"""
|
||||
function get_word_embedding(word::String)
|
||||
index = get_word_index[word]
|
||||
embedding = embtable.embeddings[:,index]
|
||||
return embedding
|
||||
end
|
||||
|
||||
|
||||
"""
|
||||
get_positional_embedding(total_word_position::Integer, word_embedding_dimension::Integer=300)
|
||||
|
||||
return positional embedding matrix of size [word_embedding_dimension * total_word_position]
|
||||
|
||||
# Example
|
||||
|
||||
pe_matrix = get_positional_embedding(length(content), 300)
|
||||
"""
|
||||
function get_positional_embedding(total_word_position::Integer, word_embedding_dimension::Integer=300)
|
||||
d = word_embedding_dimension
|
||||
p = total_word_position
|
||||
pe = [x = i%2 == 0 ? cos(j/(10^(2i/d))) : sin(j/(10^(2i/d))) for i = 1:d, j = 1:p]
|
||||
return pe
|
||||
|
||||
end
|
||||
|
||||
|
||||
"""
|
||||
wp_embedding(tokenized_word::Array{String}, positional_embedding::Bool=false)
|
||||
|
||||
Word embedding with positional embedding.
|
||||
tokenized_word = sentense's tokenized word (not sentense in English definition but BERT definition.
|
||||
1-BERT sentense can be 20+ English's sentense)
|
||||
|
||||
# Example
|
||||
|
||||
|
||||
"""
|
||||
function wp_embedding(tokenized_word::Array{String}, positional_embedding::Bool=false)
|
||||
we_matrix = 0
|
||||
for (i, v) in enumerate(tokenized_word)
|
||||
if i == 1
|
||||
we_matrix = get_word_embedding(v)
|
||||
else
|
||||
we_matrix = hcat(we_matrix, get_word_embedding(v))
|
||||
end
|
||||
end
|
||||
|
||||
if positional_embedding
|
||||
pe_matrix = get_positional_embedding(length(tokenized_word), 300) # positional embedding
|
||||
wp_matrix = we_matrix + pe_matrix
|
||||
|
||||
return wp_matrix
|
||||
else
|
||||
return we_matrix
|
||||
end
|
||||
end
|
||||
|
||||
|
||||
"""
|
||||
wp_query(tokenized_word::Array{String}, positional_embedding::Bool=false)
|
||||
|
||||
convert tokenized_word into JSON3 String to be sent to GloVe docker server
|
||||
"""
|
||||
function wp_query_send(tokenized_word::Array{String}, positional_embedding::Bool=false)
|
||||
d = Dict("tokenized_word"=> tokenized_word, "positional_embedding"=>positional_embedding)
|
||||
json3_str = JSON3.write(d)
|
||||
return json3_str
|
||||
end
|
||||
|
||||
|
||||
"""
|
||||
wp_query(tokenized_word::Array{String}, positional_embedding::Bool=false)
|
||||
|
||||
Using inside word_embedding_server to receive word embedding job
|
||||
convert JSON3 String into tokenized_word and positional_embedding
|
||||
"""
|
||||
function wp_query_receive(json3_str::String)
|
||||
d = JSON3.read(json3_str)
|
||||
tokenized_word = Array(d.tokenized_word)
|
||||
positional_embedding = d.positional_embedding
|
||||
|
||||
return tokenized_word, positional_embedding
|
||||
end
|
||||
|
||||
|
||||
"""
|
||||
Send tokenized_word to word_embedding_server and return word embedding
|
||||
|
||||
# Example
|
||||
|
||||
WPembeddings.query_wp_server(tokenized_word)
|
||||
"""
|
||||
function query_wp_server(query;
|
||||
host="0.0.0.0",
|
||||
port=6379,
|
||||
publish_channel="word_embedding_server/input",
|
||||
positional_encoding=true)
|
||||
|
||||
# channel used to receive JSON String from word_embedding_server
|
||||
wp_channel = Channel(10)
|
||||
function wp_receive(x)
|
||||
array = Utils.JSON3_str_to_Array(x)
|
||||
put!(wp_channel, array)
|
||||
end
|
||||
|
||||
# establish connection to word_embedding_server using default port
|
||||
conn = Redis.RedisConnection(host=host, port=port)
|
||||
sub = Redis.open_subscription(conn)
|
||||
Redis.subscribe(sub, "word_embedding_server/output", wp_receive)
|
||||
# Redis.subscribe(sub, "word_embedding_server/output", WPembeddings.wp_receive)
|
||||
|
||||
# set positional_encoding = true to enable positional encoding
|
||||
query = WPembeddings.wp_query_send(query, positional_encoding)
|
||||
# Ask word_embedding_server for word embedding
|
||||
Redis.publish(conn, publish_channel, query);
|
||||
wait(wp_channel) # wait for word_embedding_server to response
|
||||
embedded_word = take!(wp_channel)
|
||||
|
||||
disconnect(conn)
|
||||
return embedded_word
|
||||
end
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
end
|
||||
323
previousVersion/0.0.5_25percentAccuracy/src/forward.jl
Normal file
323
previousVersion/0.0.5_25percentAccuracy/src/forward.jl
Normal file
@@ -0,0 +1,323 @@
|
||||
module forward
|
||||
|
||||
using Statistics, Random, LinearAlgebra, JSON3, Flux
|
||||
using GeneralUtils
|
||||
using ..types, ..snn_utils
|
||||
|
||||
#------------------------------------------------------------------------------------------------100
|
||||
|
||||
""" Model forward()
|
||||
"""
|
||||
function (m::model)(input_data::AbstractVector)
|
||||
m.timeStep += 1
|
||||
|
||||
# process all corresponding KFN
|
||||
# raw_model_respond, outputNeuron_v_t1, firedNeurons_t1 = m.knowledgeFn[:I](m, input_data)
|
||||
|
||||
# the 2nd return (KFN error) should not be used as model error but I use it because there is
|
||||
# only one KFN in a model right now
|
||||
return m.knowledgeFn[:I](m, input_data)
|
||||
end
|
||||
|
||||
#------------------------------------------------------------------------------------------------100
|
||||
|
||||
""" knowledgeFn forward()
|
||||
"""
|
||||
|
||||
function (kfn::kfn_1)(m::model, input_data::AbstractVector)
|
||||
kfn.timeStep = m.timeStep
|
||||
for n in kfn.neuronsArray
|
||||
timestep_forward!(n)
|
||||
end
|
||||
for n in kfn.outputNeuronsArray
|
||||
timestep_forward!(n)
|
||||
end
|
||||
|
||||
kfn.learningStage = m.learningStage
|
||||
|
||||
if kfn.learningStage == "start_learning"
|
||||
# reset params here instead of at the end_learning so that neuron's parameter data
|
||||
# don't gets wiped and can be logged for visualization later
|
||||
for n in kfn.neuronsArray
|
||||
# epsilonRec need to be reset because it counting how many each synaptic fires and
|
||||
# use this info to calculate how much synaptic weight should be adjust
|
||||
resetLearningParams!(n)
|
||||
end
|
||||
|
||||
for n in kfn.outputNeuronsArray
|
||||
# epsilonRec need to be reset because it counting how many each synaptic fires and
|
||||
# use this info to calculate how much synaptic weight should be adjust
|
||||
resetLearningParams!(n)
|
||||
end
|
||||
|
||||
# clear variables
|
||||
kfn.firedNeurons = Int64[]
|
||||
kfn.firedNeurons_t0 = Bool[]
|
||||
kfn.firedNeurons_t1 = Bool[]
|
||||
|
||||
kfn.learningStage = "learning"
|
||||
m.learningStage = kfn.learningStage
|
||||
end
|
||||
|
||||
# generate noise
|
||||
noise = [GeneralUtils.randomChoiceWithProb([true, false],[0.0, 1.0])
|
||||
for i in 1:length(input_data)]
|
||||
# noise = [rand(rng, Distributions.Binomial(1, 0.5)) for i in 1:10] # another option
|
||||
|
||||
input_data = [noise; input_data] # noise must start from neuron id 1
|
||||
|
||||
# pass input_data into input neuron.
|
||||
# number of data point equals to number of input neuron starting from id 1
|
||||
for (i, data) in enumerate(input_data)
|
||||
kfn.neuronsArray[i].z_t1 = data
|
||||
end
|
||||
|
||||
kfn.firedNeurons_t0 = [n.z_t for n in kfn.neuronsArray]
|
||||
|
||||
Threads.@threads for n in kfn.neuronsArray
|
||||
# for n in kfn.neuronsArray
|
||||
n(kfn)
|
||||
end
|
||||
|
||||
kfn.firedNeurons_t1 = [n.z_t1 for n in kfn.neuronsArray]
|
||||
append!(kfn.firedNeurons, findall(kfn.firedNeurons_t1)) # store id of neuron that fires
|
||||
kfn.firedNeurons |> unique! # use for random new neuron connection
|
||||
|
||||
Threads.@threads for n in kfn.outputNeuronsArray
|
||||
# for n in kfn.outputNeuronsArray
|
||||
n(kfn)
|
||||
end
|
||||
|
||||
logit = [n.v_t1 for n in kfn.outputNeuronsArray]
|
||||
|
||||
# _predict = Flux.softmax(logit)
|
||||
# predict = findall(isequal.(_predict, maximum(_predict)))[1]
|
||||
|
||||
return sum(kfn.firedNeurons_t1[kfn.kfnParams[:totalInputPort]+1:end])::Int,
|
||||
logit::Array{Float64},
|
||||
[n.v_t1 for n in kfn.outputNeuronsArray],
|
||||
[sum(i.wRec) for i in kfn.outputNeuronsArray],
|
||||
[sum(i.epsilonRec) for i in kfn.outputNeuronsArray],
|
||||
[sum(i.wRecChange) for i in kfn.outputNeuronsArray]
|
||||
end
|
||||
|
||||
#------------------------------------------------------------------------------------------------100
|
||||
|
||||
""" passthroughNeuron forward()
|
||||
"""
|
||||
function (n::passthroughNeuron)(kfn::knowledgeFn)
|
||||
n.timeStep = kfn.timeStep
|
||||
end
|
||||
|
||||
#------------------------------------------------------------------------------------------------100
|
||||
|
||||
""" lifNeuron forward()
|
||||
"""
|
||||
function (n::lifNeuron)(kfn::knowledgeFn)
|
||||
n.timeStep = kfn.timeStep
|
||||
|
||||
# pulling other neuron's firing status at time t
|
||||
n.z_i_t = getindex(kfn.firedNeurons_t0, n.subscriptionList)
|
||||
n.z_i_t_commulative += n.z_i_t
|
||||
|
||||
if n.refractoryCounter != 0
|
||||
n.refractoryCounter -= 1
|
||||
|
||||
# neuron is in refractory state, skip all calculation
|
||||
n.z_t1 = false # used by timestep_forward() in kfn. Set to zero because neuron spike
|
||||
# last only 1 timestep follow by a period of refractory.
|
||||
n.recSignal = n.recSignal * 0.0
|
||||
|
||||
# decay of v_t1
|
||||
n.v_t1 = n.alpha * n.v_t
|
||||
|
||||
n.phi = 0.0
|
||||
n.decayedEpsilonRec = n.alpha * n.epsilonRec
|
||||
n.epsilonRec = n.decayedEpsilonRec
|
||||
else
|
||||
n.recSignal = sum(n.wRec .* n.z_i_t) # signal from other neuron that this neuron subscribed
|
||||
n.alpha_v_t = n.alpha * n.v_t
|
||||
n.v_t1 = n.alpha_v_t + n.recSignal
|
||||
# n.v_t1 = no_negative!(n.v_t1)
|
||||
|
||||
if n.v_t1 > n.v_th
|
||||
n.z_t1 = true
|
||||
n.refractoryCounter = n.refractoryDuration
|
||||
n.firingCounter += 1
|
||||
n.v_t1 = n.vRest
|
||||
else
|
||||
n.z_t1 = false
|
||||
end
|
||||
|
||||
# there is a difference from alif formula
|
||||
n.phi = (n.gammaPd / n.v_th) * max(0, 1 - (n.v_t1 - n.v_th) / n.v_th)
|
||||
n.decayedEpsilonRec = n.alpha * n.epsilonRec
|
||||
n.epsilonRec = n.decayedEpsilonRec + n.z_i_t
|
||||
end
|
||||
end
|
||||
|
||||
#------------------------------------------------------------------------------------------------100
|
||||
|
||||
""" alifNeuron forward()
|
||||
"""
|
||||
function (n::alifNeuron)(kfn::knowledgeFn)
|
||||
n.timeStep = kfn.timeStep
|
||||
|
||||
n.z_i_t = getindex(kfn.firedNeurons_t0, n.subscriptionList)
|
||||
n.z_i_t_commulative += n.z_i_t
|
||||
|
||||
if n.refractoryCounter != 0
|
||||
n.refractoryCounter -= 1
|
||||
|
||||
# neuron is in refractory state, skip all calculation
|
||||
n.z_t1 = false # used by timestep_forward() in kfn. Set to zero because neuron spike last only 1 timestep follow by a period of refractory.
|
||||
n.a = (n.rho * n.a)
|
||||
n.recSignal = n.recSignal * 0.0
|
||||
|
||||
# decay of v_t1
|
||||
n.v_t1 = n.alpha * n.v_t
|
||||
|
||||
n.phi = 0.0
|
||||
n.decayedEpsilonRec = n.alpha * n.epsilonRec
|
||||
n.epsilonRec = n.decayedEpsilonRec
|
||||
else
|
||||
n.av_th = n.v_th + (n.beta * n.a)
|
||||
n.recSignal = sum(n.wRec .* n.z_i_t) # signal from other neuron that this neuron subscribed
|
||||
n.alpha_v_t = n.alpha * n.v_t
|
||||
n.v_t1 = n.alpha_v_t + n.recSignal
|
||||
# n.v_t1 = no_negative!(n.v_t1)
|
||||
if n.v_t1 > n.av_th
|
||||
n.z_t1 = true
|
||||
n.refractoryCounter = n.refractoryDuration
|
||||
n.firingCounter += 1
|
||||
n.v_t1 = n.vRest
|
||||
n.a = (n.rho * n.a) + 1.0
|
||||
else
|
||||
n.z_t1 = false
|
||||
n.a = (n.rho * n.a)
|
||||
end
|
||||
|
||||
# there is a difference from lif formula
|
||||
n.phi = (n.gammaPd / n.v_th) * max(0, 1 - (n.v_t1 - n.av_th) / n.v_th)
|
||||
n.decayedEpsilonRec = n.alpha * n.epsilonRec
|
||||
n.epsilonRec = n.decayedEpsilonRec + n.z_i_t
|
||||
n.epsilonRecA = (n.phi * n.epsilonRec) +
|
||||
((n.rho - (n.phi * n.beta)) * n.epsilonRecA)
|
||||
end
|
||||
end
|
||||
|
||||
#------------------------------------------------------------------------------------------------100
|
||||
|
||||
""" linearNeuron forward()
|
||||
In this implementation, each output neuron is fully connected to every lif and alif neuron.
|
||||
"""
|
||||
function (n::linearNeuron)(kfn::T) where T<:knowledgeFn
|
||||
n.timeStep = kfn.timeStep
|
||||
|
||||
# pulling other neuron's firing status at time t
|
||||
n.z_i_t = getindex(kfn.firedNeurons_t1, n.subscriptionList)
|
||||
n.z_i_t_commulative += n.z_i_t
|
||||
|
||||
if n.refractoryCounter != 0
|
||||
n.refractoryCounter -= 1
|
||||
|
||||
# neuron is in refractory state, skip all calculation
|
||||
n.z_t1 = false # used by timestep_forward() in kfn. Set to zero because neuron spike
|
||||
# last only 1 timestep follow by a period of refractory.
|
||||
n.recSignal = n.recSignal * 0.0
|
||||
|
||||
# decay of v_t1
|
||||
n.v_t1 = n.alpha * n.v_t
|
||||
n.vError = n.v_t1 # store voltage that will be used to calculate error later
|
||||
|
||||
n.phi = 0.0
|
||||
n.decayedEpsilonRec = n.alpha * n.epsilonRec
|
||||
n.epsilonRec = n.decayedEpsilonRec
|
||||
else
|
||||
recSignal = n.wRec .* n.z_i_t
|
||||
n.recSignal = sum(recSignal) # signal from other neuron that this neuron subscribed
|
||||
n.alpha_v_t = n.alpha * n.v_t
|
||||
n.v_t1 = n.alpha_v_t + n.recSignal
|
||||
# n.v_t1 = no_negative!(n.v_t1)
|
||||
n.vError = n.v_t1 # store voltage that will be used to calculate error later
|
||||
if n.v_t1 > n.v_th
|
||||
n.z_t1 = true
|
||||
n.refractoryCounter = n.refractoryDuration
|
||||
n.firingCounter += 1
|
||||
n.v_t1 = n.vRest
|
||||
else
|
||||
n.z_t1 = false
|
||||
end
|
||||
|
||||
# there is a difference from alif formula
|
||||
n.phi = (n.gammaPd / n.v_th) * max(0, 1 - (n.v_t1 - n.v_th) / n.v_th)
|
||||
n.decayedEpsilonRec = n.alpha * n.epsilonRec
|
||||
n.epsilonRec = n.decayedEpsilonRec + n.z_i_t
|
||||
end
|
||||
end
|
||||
|
||||
#------------------------------------------------------------------------------------------------100
|
||||
|
||||
""" integrateNeuron forward()
|
||||
"""
|
||||
function (n::integrateNeuron)(kfn::knowledgeFn)
|
||||
n.timeStep = kfn.timeStep
|
||||
|
||||
# pulling other neuron's firing status at time t
|
||||
n.z_i_t = getindex(kfn.firedNeurons_t0, n.subscriptionList)
|
||||
n.z_i_t_commulative += n.z_i_t
|
||||
|
||||
n.recSignal = sum(n.wRec .* n.z_i_t) # signal from other neuron that this neuron subscribed
|
||||
n.alpha_v_t = n.alpha * n.v_t
|
||||
if n.recSignal <= 0
|
||||
n.v_t1 = n.alpha_v_t
|
||||
else
|
||||
n.v_t1 = n.alpha_v_t + n.recSignal + n.b
|
||||
end
|
||||
|
||||
# there is a difference from alif formula
|
||||
n.decayedEpsilonRec = n.alpha * n.epsilonRec
|
||||
n.epsilonRec = n.decayedEpsilonRec + n.z_i_t
|
||||
end
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
end # end module
|
||||
79
previousVersion/0.0.5_25percentAccuracy/src/interface.jl
Normal file
79
previousVersion/0.0.5_25percentAccuracy/src/interface.jl
Normal file
@@ -0,0 +1,79 @@
|
||||
module interface
|
||||
|
||||
|
||||
# export
|
||||
|
||||
# using
|
||||
|
||||
#------------------------------------------------------------------------------------------------100
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
end
|
||||
254
previousVersion/0.0.5_25percentAccuracy/src/learn.jl
Normal file
254
previousVersion/0.0.5_25percentAccuracy/src/learn.jl
Normal file
@@ -0,0 +1,254 @@
|
||||
module learn
|
||||
|
||||
using Statistics, Random, LinearAlgebra, JSON3, Flux
|
||||
using GeneralUtils
|
||||
using ..types, ..snn_utils
|
||||
|
||||
export learn!, compute_wRecChange!, computeModelError
|
||||
|
||||
#------------------------------------------------------------------------------------------------100
|
||||
|
||||
function compute_wRecChange!(m::model, modelError::Float64, outputError::Vector{Float64})
|
||||
# normalize!(modelError)
|
||||
compute_wRecChange!(m.knowledgeFn[:I], modelError, outputError)
|
||||
end
|
||||
|
||||
# function compute_wRecChange!(kfn::kfn_1, errors::Vector{Float64}, correctAnswer::AbstractVector)
|
||||
# for (i, error) in enumerate(errors)
|
||||
# if error == 0 # output is correct
|
||||
# # Threads.@threads for n in kfn.neuronsArray # multithread is not atomic and causing error
|
||||
# # # for n in kfn.neuronsArray
|
||||
# # synapticConnStrength!(n, true)
|
||||
# # end
|
||||
# # synapticConnStrength!(kfn.outputNeuronsArray[i], true)
|
||||
# else # output is wrong, error occurs
|
||||
# if correctAnswer[i] == 1 # high priority answer
|
||||
# error = error *
|
||||
# abs(kfn.outputNeuronsArray[i].v_th - kfn.outputNeuronsArray[i].vError)
|
||||
# else # low priority answer
|
||||
# error = error *
|
||||
# abs(kfn.outputNeuronsArray[i].v_th - kfn.outputNeuronsArray[i].vError)
|
||||
# end
|
||||
|
||||
# Threads.@threads for n in kfn.neuronsArray # multithread is not atomic and causing error
|
||||
# # for n in kfn.neuronsArray
|
||||
# compute_wRecChange!(n, error)
|
||||
# # synapticConnStrength!(n, false)
|
||||
# end
|
||||
# compute_wRecChange!(kfn.outputNeuronsArray[i], error)
|
||||
# # synapticConnStrength!(kfn.outputNeuronsArray[i], false)
|
||||
# end
|
||||
# end
|
||||
|
||||
|
||||
function compute_wRecChange!(kfn::kfn_1, modelError::Float64, outputError::Vector{Float64})
|
||||
Threads.@threads for n in kfn.neuronsArray
|
||||
# for n in kfn.neuronsArray
|
||||
if typeof(n)<: computeNeuron
|
||||
# wIndex = findall(isequal.(oN.subscriptionList, n.id))
|
||||
wOut = abs.([oN.wRec[findall(isequal.(oN.subscriptionList, n.id))[1]]
|
||||
for oN in kfn.outputNeuronsArray])
|
||||
compute_wRecChange!(n, wOut, modelError)
|
||||
end
|
||||
end
|
||||
|
||||
for oN in kfn.outputNeuronsArray
|
||||
compute_wRecChange!(oN, outputError[oN.id])
|
||||
end
|
||||
end
|
||||
|
||||
function compute_wRecChange!(n::passthroughNeuron, wOut::AbstractVector, modelError::Vector{Float64})
|
||||
# skip
|
||||
end
|
||||
|
||||
function compute_wRecChange!(n::lifNeuron, wOut::AbstractVector, modelError::Float64)
|
||||
# how much error of this neuron 1-spike causing each output neuron's error
|
||||
nError = sum(wOut * modelError)
|
||||
|
||||
n.eRec = n.phi * n.epsilonRec
|
||||
ΔwRecChange = -n.eta * nError * n.eRec
|
||||
# if sum(n.wRec) < 0 # prevent -sum(wRec) that causing neuron NOT fire at all
|
||||
# ΔwRecChange .+= (0.2*(abs(sum(n.wRec)) / length(n.wRec)))
|
||||
# end
|
||||
n.wRecChange .+= ΔwRecChange
|
||||
# reset_epsilonRec!(n)
|
||||
end
|
||||
|
||||
function compute_wRecChange!(n::alifNeuron, wOut::AbstractVector, modelError::Float64)
|
||||
# how much error of this neuron 1-spike causing each output neuron's error
|
||||
# (prejected throug wOut)
|
||||
nError = sum(wOut * modelError)
|
||||
|
||||
n.eRec_v = n.phi * n.epsilonRec
|
||||
n.eRec_a = n.phi * n.beta * n.epsilonRecA
|
||||
n.eRec = n.eRec_v + n.eRec_a
|
||||
ΔwRecChange = -n.eta * nError * n.eRec
|
||||
# if sum(n.wRec) < 0 # prevent -sum(wRec) that causing neuron NOT fire at all
|
||||
# ΔwRecChange .+= (0.2*(abs(sum(n.wRec)) / length(n.wRec)))
|
||||
# end
|
||||
n.wRecChange .+= ΔwRecChange
|
||||
|
||||
# reset_epsilonRec!(n)
|
||||
# reset_epsilonRecA!(n)
|
||||
# n.alphaChange += compute_alphaChange(n.eta, nError)
|
||||
end
|
||||
|
||||
function compute_wRecChange!(n::integrateNeuron, error::Float64)
|
||||
ΔwRecChange = -n.eta * error * n.epsilonRec
|
||||
ΔbChange = -n.eta * error
|
||||
# if sum(n.wRec) < 0 # prevent -sum(wRec) that causing neuron NOT fire at all
|
||||
# ΔwRecChange .+= (abs(sum(n.wRec)) / length(n.wRec))
|
||||
# end
|
||||
n.wRecChange .+= ΔwRecChange
|
||||
n.bChange += ΔbChange
|
||||
# reset_epsilonRec!(n)
|
||||
# n.alphaChange += compute_alphaChange(n.eta, error)
|
||||
end
|
||||
|
||||
# function compute_wRecChange!(n::linearNeuron, error::Float64)
|
||||
# n.eRec = n.phi * n.epsilonRec
|
||||
# ΔwRecChange = -n.eta * error * n.eRec
|
||||
# # if sum(n.wRec) < 0 # prevent -sum(wRec) that causing neuron NOT fire at all
|
||||
# # ΔwRecChange .+= (abs(sum(n.wRec)) / length(n.wRec))
|
||||
# # end
|
||||
# n.wRecChange .+= ΔwRecChange
|
||||
# # reset_epsilonRec!(n)
|
||||
# end
|
||||
|
||||
# add compute_alphaChange
|
||||
compute_alphaChange(learningRate::Float64, total_wRecChange) = -learningRate * total_wRecChange
|
||||
|
||||
function learn!(m::model)
|
||||
learn!(m.knowledgeFn[:I])
|
||||
end
|
||||
|
||||
""" knowledgeFn learn()
|
||||
"""
|
||||
function learn!(kfn::kfn_1)
|
||||
# compute kfn error for each neuron
|
||||
Threads.@threads for n in kfn.neuronsArray # multithread is not atomic and causing error
|
||||
# for n in kfn.neuronsArray
|
||||
learn!(n, kfn.firedNeurons, kfn.nExInType)
|
||||
end
|
||||
for n in kfn.outputNeuronsArray
|
||||
learn!(n, kfn.firedNeurons, kfn.nExInType, kfn.kfnParams[:totalInputPort])
|
||||
end
|
||||
|
||||
# wrap up learning session
|
||||
if kfn.learningStage == "end_learning"
|
||||
kfn.learningStage = "inference"
|
||||
end
|
||||
end
|
||||
|
||||
function learn!(n::T, firedNeurons, nExInType) where T<:inputNeuron
|
||||
# skip
|
||||
end
|
||||
|
||||
function learn!(n::T, firedNeurons, nExInType) where T<:computeNeuron
|
||||
wSign_0 = sign.(n.wRec) # original sign
|
||||
# n.wRecChange .*= (connStrengthAdjust.(n.synapticStrength))
|
||||
|
||||
wRecChange_reduceCoeff = 1.0
|
||||
# wRecChange_max = 0.2 * abs(sum(n.wRec)) # max change 20%
|
||||
# y = abs(sum(n.wRecChange))
|
||||
# if y > wRecChange_max # capping weight update
|
||||
# wRecChange_reduceCoeff = wRecChange_max / y
|
||||
# end
|
||||
n.wRec += (wRecChange_reduceCoeff * n.wRecChange)
|
||||
# n.alpha += n.alphaChange
|
||||
|
||||
wSign_1 = sign.(n.wRec) # check for fliped sign, 1 indicates non-fliped sign
|
||||
nonFlipedSign = isequal.(wSign_0, wSign_1) # 1 not fliped, 0 fliped
|
||||
# normalize wRec peak to prevent input signal overwhelming neuron
|
||||
# if sum(n.wRecChange) != 0
|
||||
# normalizePeak!(n.wRec, n.wRecChange, 2)
|
||||
# end
|
||||
# set weight that fliped sign to 0 for random new connection
|
||||
n.wRec .*= nonFlipedSign
|
||||
# capMaxWeight!(n.wRec) # cap maximum weight
|
||||
synapticConnStrength!(n, "updown")
|
||||
neuroplasticity!(n, firedNeurons, nExInType)
|
||||
end
|
||||
|
||||
function learn!(n::integrateNeuron, firedNeurons, nExInType, totalInputPort)
|
||||
wRecChange_reduceCoeff = 1.0
|
||||
# wRecChange_max = 0.2 * abs(sum(n.wRec)) # max change 20%
|
||||
# y = abs(sum(n.wRecChange))
|
||||
# if y > wRecChange_max # capping weight update
|
||||
# wRecChange_reduceCoeff = wRecChange_max / y
|
||||
# end
|
||||
n.wRec += (wRecChange_reduceCoeff * n.wRecChange)
|
||||
n.b += (wRecChange_reduceCoeff * n.bChange)
|
||||
# n.alpha += n.alphaChange
|
||||
end
|
||||
|
||||
# function learn!(n::linearNeuron, firedNeurons, nExInType, totalInputPort)
|
||||
# wSign_0 = sign.(n.wRec) # original sign
|
||||
# # n.wRecChange .*= (connStrengthAdjust.(n.synapticStrength))
|
||||
# wRecChange_max = 0.1 * abs(sum(n.wRec)) # max change 20%
|
||||
# y = abs(sum(n.wRecChange))
|
||||
# wRecChange_reduceCoeff = 1.0
|
||||
# # if y > wRecChange_max # capping weight update
|
||||
# # wRecChange_reduceCoeff = wRecChange_max / y
|
||||
# # end
|
||||
# n.wRec += (wRecChange_reduceCoeff * n.wRecChange)
|
||||
# n.alpha += n.alphaChange
|
||||
|
||||
# wSign_1 = sign.(n.wRec) # check for fliped sign, 1 indicates non-fliped sign
|
||||
# nonFlipedSign = isequal.(wSign_0, wSign_1) # 1 not fliped, 0 fliped
|
||||
# # normalize wRec peak to prevent input signal overwhelming neuron
|
||||
# if sum(n.wRecChange) != 0
|
||||
# # normalizePeak!(n.wRec, n.wRecChange, 2)
|
||||
# end
|
||||
# # set weight that fliped sign to 0 for random new connection
|
||||
# # n.wRec .*= nonFlipedSign
|
||||
# # capMaxWeight!(n.wRec) # cap maximum weight
|
||||
# # synapticConnStrength!(n, "updown")
|
||||
# # neuroplasticity!(n,firedNeurons, nExInType, totalInputPort)
|
||||
# end
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
end # module end
|
||||
83
previousVersion/0.0.5_25percentAccuracy/src/readout.jl
Normal file
83
previousVersion/0.0.5_25percentAccuracy/src/readout.jl
Normal file
@@ -0,0 +1,83 @@
|
||||
module readout
|
||||
|
||||
using Flux.Optimise: apply!
|
||||
|
||||
using Statistics, Flux, Random, LinearAlgebra
|
||||
using GeneralUtils
|
||||
using ..types, ..readout, ..learn, ..forward
|
||||
|
||||
export readout!
|
||||
|
||||
#------------------------------------------------------------------------------------------------100
|
||||
|
||||
function readout!(kfn::knowledgeFn; correctAnswer=nothing) # correctAnswer=nothing use for inference
|
||||
# clear output to start reading
|
||||
# kfn.on_out_t0 *= 0.0 #FIXME should I clear it before RSNN readout?
|
||||
respondCount = zeros(length(kfn.on_out_t0))
|
||||
|
||||
# prepare signal used to read RSNN
|
||||
readoutSignal = zeros(length(kfn.passthrough_zt0))
|
||||
readoutSignal[1] = 1
|
||||
readoutSignal[end] = 1
|
||||
|
||||
lastKfnTimeStamp = kfn.timeStamp[1]
|
||||
for t in 1:kfn.on_tauOut[1]
|
||||
# println("t $t")
|
||||
tick = lastKfnTimeStamp + t
|
||||
if t == kfn.on_tauOut[1]
|
||||
println("")
|
||||
end
|
||||
if kfn.learningStage[1] == 0 # RSNN is in inference mode, do not change marker
|
||||
# skip
|
||||
else # RSNN is in learning mode, assign marker for commiting wChange at the end of readout window.
|
||||
marker = t == kfn.on_tauOut[1] ? 4 : kfn.learningStage[1]
|
||||
end
|
||||
|
||||
# RSNN forward ----------
|
||||
singleTimeReadout, on_out_t0, softmaxRespond = kfn(readoutSignal, tick, marker,
|
||||
correctAnswer=correctAnswer)
|
||||
_, _, respondPosition = Utils.findMax(softmaxRespond)
|
||||
respondCount += respondPosition
|
||||
|
||||
if correctAnswer !== nothing
|
||||
kfn.kfnError = [Flux.logitcrossentropy(on_out_t0, correctAnswer)]
|
||||
learn!(kfn)
|
||||
end
|
||||
end
|
||||
|
||||
_, readout, _ = Utils.findMax(respondCount/kfn.on_tauOut[1])
|
||||
|
||||
return readout, kfn.on_out_t0
|
||||
end
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
end # module
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
439
previousVersion/0.0.5_25percentAccuracy/src/snn_utils.jl
Normal file
439
previousVersion/0.0.5_25percentAccuracy/src/snn_utils.jl
Normal file
@@ -0,0 +1,439 @@
|
||||
module snn_utils
|
||||
|
||||
export calculate_α, calculate_ρ, calculate_k, timestep_forward!, init_neuron, no_negative!,
|
||||
precision, calculate_w_change!, store_knowledgefn_error!, interneurons_adjustment!,
|
||||
reset_z_t!, resetLearningParams!, reset_learning_history_params!, reset_epsilonRec!,
|
||||
reset_epsilonRecA!, synapticConnStrength!, normalizePeak!, reset_wRecChange!,
|
||||
firing_rate_error!, firing_rate_regulator!, update_Bn!, cal_firing_reg!,
|
||||
neuroplasticity!, shakeup!, reset_learning_no_wchange!, adjust_internal_learning_rate!,
|
||||
gradient_withloss, capMaxWeight!, connStrengthAdjust
|
||||
|
||||
using Statistics, Random, LinearAlgebra, Distributions, Zygote, Flux
|
||||
using GeneralUtils
|
||||
using ..types
|
||||
|
||||
#------------------------------------------------------------------------------------------------100
|
||||
rng = MersenneTwister(1234)
|
||||
|
||||
function timestep_forward!(x::passthroughNeuron)
|
||||
x.z_t = x.z_t1
|
||||
end
|
||||
|
||||
function timestep_forward!(x::Union{computeNeuron, outputNeuron})
|
||||
x.z_t = x.z_t1
|
||||
x.v_t = x.v_t1
|
||||
end
|
||||
|
||||
no_negative!(x) = x < 0.0 ? 0.0 : x
|
||||
precision(x::Array{<:Array}) = ( std(mean.(x)) / mean(mean.(x)) ) * 100
|
||||
|
||||
# reset functions for LIF/ALIF neuron
|
||||
reset_last_firing_time!(n::computeNeuron) = n.lastFiringTime = 0.0
|
||||
reset_refractory_state_active!(n::computeNeuron) = n.refractory_state_active = false
|
||||
reset_v_t!(n::neuron) = n.v_t = n.vRest
|
||||
reset_v_t1!(n::neuron) = n.v_t1 = n.vRest
|
||||
reset_z_t!(n::computeNeuron) = n.z_t = false
|
||||
reset_epsilonRec!(n::computeNeuron) = n.epsilonRec = n.epsilonRec * 0.0
|
||||
reset_epsilonRec!(n::outputNeuron) = n.epsilonRec = n.epsilonRec * 0.0
|
||||
reset_epsilonRecA!(n::alifNeuron) = n.epsilonRecA = n.epsilonRecA * 0.0
|
||||
reset_epsilon_in!(n::computeNeuron) = n.epsilon_in = isnothing(n.epsilon_in) ? nothing : n.epsilon_in * 0.0
|
||||
reset_error!(n::Union{computeNeuron, outputNeuron}) = n.error = nothing
|
||||
reset_w_in_change!(n::computeNeuron) = n.w_in_change = isnothing(n.w_in_change) ? nothing : n.w_in_change * 0.0
|
||||
reset_wRecChange!(n::Union{computeNeuron, outputNeuron}) = n.wRecChange = n.wRecChange * 0.0
|
||||
reset_a!(n::alifNeuron) = n.a = n.a * 0.0
|
||||
reset_reg_voltage_a!(n::computeNeuron) = n.reg_voltage_a = n.reg_voltage_a * 0.0
|
||||
reset_reg_voltage_b!(n::computeNeuron) = n.reg_voltage_b = n.reg_voltage_b * 0.0
|
||||
reset_reg_voltage_error!(n::computeNeuron) = n.reg_voltage_error = n.reg_voltage_error * 0.0
|
||||
reset_firing_counter!(n::Union{computeNeuron, outputNeuron}) = n.firingCounter = n.firingCounter * 0.0
|
||||
reset_firing_diff!(n::Union{computeNeuron, outputNeuron}) = n.firingDiff = n.firingDiff * 0.0
|
||||
reset_refractoryCounter!(n::Union{computeNeuron, outputNeuron}) = n.refractoryCounter = n.refractoryCounter * 0.0
|
||||
reset_z_i_t_commulative!(n::Union{computeNeuron, outputNeuron}) = n.z_i_t_commulative = n.z_i_t_commulative * 0.0
|
||||
reset_alphaChange!(n::Union{computeNeuron, outputNeuron}) = n.alphaChange = n.alphaChange * 0.0
|
||||
|
||||
# reset function for output neuron
|
||||
reset_epsilon_j!(n::linearNeuron) = n.epsilon_j = n.epsilon_j * 0.0
|
||||
reset_out_t!(n::linearNeuron) = n.out_t = n.out_t * 0.0
|
||||
reset_bChange!(n::integrateNeuron) = n.bChange = n.bChange * 0.0
|
||||
|
||||
""" Reset all learning-related params at the END of learning session
|
||||
"""
|
||||
function resetLearningParams!(n::lifNeuron)
|
||||
reset_epsilonRec!(n)
|
||||
reset_wRecChange!(n)
|
||||
reset_v_t!(n)
|
||||
reset_z_t!(n)
|
||||
reset_firing_counter!(n)
|
||||
reset_firing_diff!(n)
|
||||
reset_alphaChange!(n)
|
||||
|
||||
# reset refractory state at the start/end of episode. Otherwise once neuron goes into
|
||||
# refractory state, it will stay in refractory state forever
|
||||
# reset_refractoryCounter!(n)
|
||||
reset_z_i_t_commulative!(n)
|
||||
end
|
||||
function resetLearningParams!(n::alifNeuron)
|
||||
reset_epsilonRec!(n)
|
||||
reset_epsilonRecA!(n)
|
||||
reset_wRecChange!(n)
|
||||
reset_v_t!(n)
|
||||
reset_z_t!(n)
|
||||
reset_a!(n)
|
||||
reset_firing_counter!(n)
|
||||
reset_firing_diff!(n)
|
||||
reset_alphaChange!(n)
|
||||
|
||||
# reset refractory state at the start/end of episode. Otherwise once neuron goes into
|
||||
# refractory state, it will stay in refractory state forever
|
||||
# reset_refractoryCounter!(n)
|
||||
reset_z_i_t_commulative!(n)
|
||||
end
|
||||
|
||||
# function reset_learning_no_wchange!(n::passthroughNeuron)
|
||||
# end
|
||||
|
||||
function resetLearningParams!(n::passthroughNeuron)
|
||||
# skip
|
||||
end
|
||||
|
||||
# function resetLearningParams!(n::linearNeuron)
|
||||
# reset_epsilonRec!(n)
|
||||
# reset_wRecChange!(n)
|
||||
# # reset_v_t!(n)
|
||||
# reset_firing_counter!(n)
|
||||
|
||||
# # reset refractory state at the start/end of episode. Otherwise once neuron goes into
|
||||
# # refractory state, it will stay in refractory state forever
|
||||
# # reset_refractoryCounter!(n)
|
||||
# reset_z_i_t_commulative!(n)
|
||||
# end
|
||||
|
||||
function resetLearningParams!(n::integrateNeuron)
|
||||
reset_epsilonRec!(n)
|
||||
reset_wRecChange!(n)
|
||||
reset_bChange!(n)
|
||||
reset_v_t!(n)
|
||||
reset_firing_counter!(n)
|
||||
reset_alphaChange!(n)
|
||||
end
|
||||
|
||||
#------------------------------------------------------------------------------------------------100
|
||||
|
||||
function store_knowledgefn_error!(kfn::knowledgeFn)
|
||||
# condition to adjust neuron in KFN plane in addition to weight adjustment inside each neuron
|
||||
if kfn.learningStage == "start_learning"
|
||||
if kfn.recent_knowledgeFn_error === nothing && kfn.knowledgeFn_error === nothing
|
||||
kfn.recent_knowledgeFn_error = [[]]
|
||||
elseif kfn.recent_knowledgeFn_error === nothing
|
||||
kfn.recent_knowledgeFn_error = [[kfn.knowledgeFn_error]]
|
||||
elseif kfn.recent_knowledgeFn_error !== nothing && kfn.knowledgeFn_error === nothing
|
||||
push!(kfn.recent_knowledgeFn_error, [])
|
||||
else
|
||||
push!(kfn.recent_knowledgeFn_error, [kfn.knowledgeFn_error])
|
||||
end
|
||||
elseif kfn.learningStage == "during_learning"
|
||||
if kfn.knowledgeFn_error === nothing
|
||||
#skip
|
||||
else
|
||||
push!(kfn.recent_knowledgeFn_error[end], kfn.knowledgeFn_error)
|
||||
end
|
||||
elseif kfn.learningStage == "end_learning"
|
||||
if kfn.recent_knowledgeFn_error === nothing
|
||||
#skip
|
||||
else
|
||||
push!(kfn.recent_knowledgeFn_error[end], kfn.knowledgeFn_error)
|
||||
end
|
||||
else
|
||||
error("case does not defined yet")
|
||||
end
|
||||
|
||||
if length(kfn.recent_knowledgeFn_error) > 3
|
||||
deleteat!(kfn.recent_knowledgeFn_error, 1)
|
||||
end
|
||||
end
|
||||
|
||||
function update_Bn!(kfn::knowledgeFn)
|
||||
Δw = nothing
|
||||
for n in kfn.outputNeuronsArray
|
||||
Δw = Δw === nothing ? n.w_out_change : Δw + n.w_out_change
|
||||
n.w_out = n.w_out - (n.Bn_wout_decay * n.w_out) # w_out decay
|
||||
end
|
||||
# Δw = Δw / kfn.kfnParams[:linear_neuron_number] # average
|
||||
|
||||
input_neuron_number = kfn.kfnParams[:input_neuron_number] # skip input neuron
|
||||
for i = 1:kfn.kfnParams[:compute_neuron_number]
|
||||
n = kfn.neuronsArray[input_neuron_number+i]
|
||||
n.Bn = n.Bn + Δw[i]
|
||||
n.Bn = n.Bn - (n.Bn_wout_decay * n.Bn) # w_out decay
|
||||
end
|
||||
end
|
||||
|
||||
""" Regulates membrane potential to stay under v_th, output is weight change
|
||||
"""
|
||||
function cal_v_reg!(n::lifNeuron)
|
||||
# retified linear function
|
||||
component_a1 = n.v_t1 - n.v_th < 0 ? 0 : (n.v_t1 - n.v_th)^2
|
||||
component_a2 = -n.v_t1 - n.v_th < 0 ? 0 : (-n.v_t1 - n.v_th)^2
|
||||
n.reg_voltage_a = n.reg_voltage_a + component_a1 + component_a2
|
||||
|
||||
component_b = n.v_t1 - n.v_th < 0 ? 0 : n.v_t1 - n.v_th
|
||||
#FIXME: not sure the following line is correct
|
||||
n.reg_voltage_b = n.reg_voltage_b + (component_b * n.epsilonRec)
|
||||
end
|
||||
|
||||
function cal_v_reg!(n::alifNeuron)
|
||||
# retified linear function
|
||||
component_a1 = n.v_t1 - n.av_th < 0 ? 0 : (n.v_t1 - n.av_th)^2
|
||||
component_a2 = -n.v_t1 - n.av_th < 0 ? 0 : (-n.v_t1 - n.av_th)^2
|
||||
n.reg_voltage_a = n.reg_voltage_a + component_a1 + component_a2
|
||||
|
||||
component_b = n.v_t1 - n.av_th < 0 ? 0 : n.v_t1 - n.av_th
|
||||
#FIXME: not sure the following line is correct
|
||||
n.reg_voltage_b = n.reg_voltage_b + (component_b * (n.epsilonRec - n.epsilonRecA))
|
||||
end
|
||||
|
||||
function voltage_error!(n::computeNeuron)
|
||||
n.reg_voltage_error = 0.5 * n.reg_voltage_a
|
||||
return n.reg_voltage_error
|
||||
end
|
||||
|
||||
function voltage_regulator!(n::computeNeuron) # running average
|
||||
Δw = n.optimiser.eta * n.c_reg_v * n.reg_voltage_b
|
||||
return Δw
|
||||
end
|
||||
|
||||
function firingRateError(kfn::knowledgeFn)
|
||||
start_id = kfn.kfnParams[:input_neuron_number] + 1
|
||||
return 0.5 * sum([(n.firingDiff)^2 for n in kfn.neuronsArray[start_id:end]])
|
||||
end
|
||||
|
||||
function firing_rate_regulator!(n::computeNeuron)
|
||||
# n.firingRate NOT running average (average over learning batch)
|
||||
Δw = n.optimiser.eta * n.c_reg *
|
||||
(n.firingRate - n.firingRateTarget) * n.eRec
|
||||
Δw = n.firingRate > n.firingRateTarget ? Δw : Δw * 0.0
|
||||
return Δw
|
||||
end
|
||||
|
||||
firing_rate!(n::computeNeuron) = n.firingRate = (n.firingCounter / n.timeStep) * 1000
|
||||
firing_diff!(n::computeNeuron) = n.firingDiff = n.firingRate - n.firingRateTarget
|
||||
|
||||
function adjust_internal_learning_rate!(n::computeNeuron)
|
||||
n.internal_learning_rate = n.error_diff[end] < 0.0 ? n.internal_learning_rate * 0.99 :
|
||||
n.internal_learning_rate * 1.005
|
||||
end
|
||||
|
||||
function connStrengthAdjust(currentStrength::Float64)
|
||||
Δstrength = (1.0 - sigmoid(currentStrength))
|
||||
return Δstrength::Float64
|
||||
end
|
||||
|
||||
""" Compute synaptic connection strength. bias will shift currentStrength to fit into
|
||||
sigmoid operating range which centred at 0 and range is -37 to 37.
|
||||
|
||||
# Example
|
||||
synaptic strength range is 0 to 10
|
||||
one may use bias = -5 to transform synaptic strength into range -5 to 5
|
||||
the return value is shifted back to original scale.
|
||||
|
||||
# Concept
|
||||
weaker connection should be harder to increase strength. It requires a lot of
|
||||
repeat activation to get it stronger. While strong connction requires a lot of
|
||||
inactivation to get it weaker. The concept is strong connection will lock
|
||||
correct neural pathway through repeated use of the right connection i.e. keep training
|
||||
on the correct answer -> strengthen the right neural pathway (connections) ->
|
||||
this correct neural pathway resist to change.
|
||||
Not used connection should dissapear (forgetting).
|
||||
"""
|
||||
function synapticConnStrength(currentStrength::Float64, updown::String)
|
||||
Δstrength = connStrengthAdjust(currentStrength)
|
||||
|
||||
if updown == "up"
|
||||
if currentStrength > 4 # strong connection
|
||||
updatedStrength = currentStrength + (Δstrength * 0.2)
|
||||
else
|
||||
updatedStrength = currentStrength + (Δstrength * 0.1)
|
||||
end
|
||||
elseif updown == "down"
|
||||
if currentStrength > 4
|
||||
updatedStrength = currentStrength - (Δstrength * 0.1)
|
||||
else
|
||||
updatedStrength = currentStrength - (Δstrength * 0.2)
|
||||
end
|
||||
else
|
||||
error("undefined condition line $(@__LINE__)")
|
||||
end
|
||||
return updatedStrength::Float64
|
||||
end
|
||||
|
||||
""" Compute all synaptic connection strength of a neuron. Also mark n.wRec to 0 if wRec goes
|
||||
below lowerlimit.
|
||||
"""
|
||||
function synapticConnStrength!(n::Union{computeNeuron, outputNeuron}, mode::String)
|
||||
if mode == "updown"
|
||||
for (i, connStrength) in enumerate(n.synapticStrength)
|
||||
# check whether connStrength increase or decrease based on usage from n.epsilonRec
|
||||
""" use n.z_i_t_commulative instead of the best choice, epsilonRec, here because ΔwRecChange
|
||||
calculation in learn!() will reset epsilonRec to zeroes vector in case where
|
||||
output neuron fires and trigger learn!() just before this synapticConnStrength
|
||||
calculation.
|
||||
Since n.z_i_t_commulative indicates whether a synaptic connection were used or not, it is
|
||||
ok to use. n.z_i_t_commulative also span across a training sample without resetting.
|
||||
"""
|
||||
updown = n.z_i_t_commulative[i] == 0 ? "down" : "up"
|
||||
updatedConnStrength = synapticConnStrength(connStrength, updown)
|
||||
updatedConnStrength = GeneralUtils.limitvalue(updatedConnStrength,
|
||||
n.synapticStrengthLimit.lowerlimit, n.synapticStrengthLimit.upperlimit)
|
||||
# at lowerlimit, mark wRec at this position to 0. for new random synaptic conn
|
||||
if updatedConnStrength == n.synapticStrengthLimit.lowerlimit[1]
|
||||
n.wRec[i] = 0.0
|
||||
end
|
||||
n.synapticStrength[i] = updatedConnStrength
|
||||
end
|
||||
elseif mode == "down"
|
||||
for (i, connStrength) in enumerate(n.synapticStrength)
|
||||
updatedConnStrength = synapticConnStrength(connStrength, "down")
|
||||
updatedConnStrength = GeneralUtils.limitvalue(updatedConnStrength,
|
||||
n.synapticStrengthLimit.lowerlimit, n.synapticStrengthLimit.upperlimit)
|
||||
# at lowerlimit, mark wRec at this position to 0. for new random synaptic conn
|
||||
if updatedConnStrength == n.synapticStrengthLimit.lowerlimit[1]
|
||||
n.wRec[i] = 0.0
|
||||
end
|
||||
n.synapticStrength[i] = updatedConnStrength
|
||||
end
|
||||
else
|
||||
error("undefined condition line $(@__LINE__)")
|
||||
end
|
||||
end
|
||||
|
||||
function synapticConnStrength!(n::inputNeuron, correctness::Bool) end
|
||||
|
||||
""" normalize a part of a vector centering at a vector's maximum value along with nearby value
|
||||
within its radius. radius must be odd number.
|
||||
v1 will be normalized based on v2's peak
|
||||
"""
|
||||
function normalizePeak!(v1::Vector, v2::Vector, radius::Integer=2)
|
||||
peak = findall(isequal.(abs.(v2), maximum(abs.(v2))))[1]
|
||||
upindex = peak - radius
|
||||
upindex = upindex < 1 ? 1 : upindex
|
||||
downindex = peak + radius
|
||||
downindex = downindex > length(v1) ? length(v1) : downindex
|
||||
subvector = view(v1, upindex:downindex)
|
||||
normalize!(subvector, 1)
|
||||
end
|
||||
|
||||
""" rewire of neuron synaptic connection that has 0 weight. Without connection's excitatory and
|
||||
inhabitory ratio constraint.
|
||||
"""
|
||||
function neuroplasticity!(n::computeNeuron, firedNeurons::Vector,
|
||||
nExInTypeList::Vector)
|
||||
# if there is 0-weight then replace it with new connection
|
||||
zeroWeightConnIndex = findall(iszero.(n.wRec)) # connection that has 0 weight
|
||||
if length(zeroWeightConnIndex) != 0
|
||||
# new synaptic connection must sample fron neuron that fires
|
||||
nFiredPool = filter(x -> x ∉ [n.id], firedNeurons) # exclude this neuron id from the id list
|
||||
filter!(x -> x ∉ n.subscriptionList, nFiredPool) # exclude this neuron's subscriptionList from the list
|
||||
|
||||
nNonFiredPool = setdiff!([1:length(nExInTypeList)...], nFiredPool)
|
||||
|
||||
filter!(x -> x ∉ [n.id], nNonFiredPool) # exclude this neuron id from the id list
|
||||
filter!(x -> x ∉ n.subscriptionList, nNonFiredPool) # exclude this neuron's subscriptionList from the list
|
||||
|
||||
w = randn(length(zeroWeightConnIndex)) / 100
|
||||
synapticStrength = rand(-4.5:0.1:-3.5, length(zeroWeightConnIndex))
|
||||
|
||||
shuffle!(nFiredPool)
|
||||
shuffle!(nNonFiredPool)
|
||||
|
||||
# add new synaptic connection to neuron
|
||||
for (i, connIndex) in enumerate(zeroWeightConnIndex)
|
||||
""" conn that is being replaced has to go into nNonFiredPool so
|
||||
nNonFiredPool isn't empty """
|
||||
push!(nNonFiredPool, n.subscriptionList[connIndex])
|
||||
|
||||
if length(nFiredPool) != 0
|
||||
newConn = popfirst!(nFiredPool)
|
||||
else
|
||||
newConn = popfirst!(nNonFiredPool)
|
||||
end
|
||||
n.subscriptionList[connIndex] = newConn
|
||||
n.wRec[connIndex] = abs(w[i]) * nExInTypeList[newConn]
|
||||
n.synapticStrength[connIndex] = synapticStrength[i]
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
# function neuroplasticity!(n::outputNeuron, firedNeurons::Vector,
|
||||
# nExInTypeList::Vector, totalInputNeuron::Integer)
|
||||
# # if there is 0-weight then replace it with new connection
|
||||
# zeroWeightConnIndex = findall(iszero.(n.wRec)) # connection that has 0 weight
|
||||
# if length(zeroWeightConnIndex) != 0
|
||||
# # new synaptic connection must sample fron neuron that fires
|
||||
# nFiredPool = filter(x -> x ∉ [n.id], firedNeurons) # exclude this neuron id from the id list
|
||||
# filter!(x -> x ∉ n.subscriptionList, nFiredPool) # exclude this neuron's subscriptionList from the list
|
||||
# filter!(x -> x ∉ [1:totalInputNeuron...], nFiredPool) # exclude input neuron
|
||||
|
||||
# nNonFiredPool = setdiff!([1:length(nExInTypeList)...], nFiredPool)
|
||||
# unique!(append!(nNonFiredPool, zeroWeightConnIndex))
|
||||
# filter!(x -> x ∉ [n.id], nNonFiredPool) # exclude this neuron id from the id list
|
||||
# filter!(x -> x ∉ n.subscriptionList, nNonFiredPool) # exclude this neuron's subscriptionList from the list
|
||||
# filter!(x -> x ∉ [1:totalInputNeuron...], nNonFiredPool) # exclude input neuron
|
||||
|
||||
# w = randn(length(zeroWeightConnIndex)) / 100
|
||||
# synapticStrength = rand(-4.5:0.1:-3.5, length(zeroWeightConnIndex))
|
||||
|
||||
# shuffle!(nFiredPool)
|
||||
# shuffle!(nNonFiredPool)
|
||||
|
||||
# # add new synaptic connection to neuron
|
||||
# for (i, connIndex) in enumerate(zeroWeightConnIndex)
|
||||
# """ conn that is being replaced has to go into nNonFiredPool so
|
||||
# nNonFiredPool isn't empty """
|
||||
# push!(nNonFiredPool, n.subscriptionList[connIndex])
|
||||
|
||||
# if length(nFiredPool) != 0
|
||||
# newConn = popfirst!(nFiredPool)
|
||||
# else
|
||||
# newConn = popfirst!(nNonFiredPool)
|
||||
# end
|
||||
# n.subscriptionList[connIndex] = newConn
|
||||
# n.wRec[connIndex] = w[i] * nExInTypeList[newConn]
|
||||
# n.synapticStrength[connIndex] = synapticStrength[i]
|
||||
# end
|
||||
# end
|
||||
# end
|
||||
|
||||
""" Cap maximum weight of each neuron connection
|
||||
"""
|
||||
function capMaxWeight!(v::Vector{Float64}, max=1.0)
|
||||
originalSign = sign.(v)
|
||||
v = originalSign .* GeneralUtils.replaceMoreThan.(abs.(v), max)
|
||||
end
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
end # end module
|
||||
885
previousVersion/0.0.5_25percentAccuracy/src/types.jl
Normal file
885
previousVersion/0.0.5_25percentAccuracy/src/types.jl
Normal file
@@ -0,0 +1,885 @@
|
||||
module types
|
||||
|
||||
export
|
||||
# struct
|
||||
IronpenStruct, model, knowledgeFn, lifNeuron, alifNeuron, linearNeuron,
|
||||
kfn_1, inputNeuron, computeNeuron, neuron, outputNeuron, passthroughNeuron,
|
||||
integrateNeuron,
|
||||
|
||||
# function
|
||||
instantiate_custom_types, init_neuron, populate_neuron,
|
||||
add_neuron!
|
||||
|
||||
using Random, LinearAlgebra
|
||||
|
||||
#------------------------------------------------------------------------------------------------100
|
||||
|
||||
abstract type Ironpen end
|
||||
abstract type knowledgeFn <: Ironpen end
|
||||
abstract type neuron <: Ironpen end
|
||||
abstract type inputNeuron <: neuron end
|
||||
abstract type outputNeuron <: neuron end
|
||||
abstract type computeNeuron <: neuron end
|
||||
|
||||
#------------------------------------------------------------------------------------------------100
|
||||
|
||||
rng = MersenneTwister(1234)
|
||||
|
||||
""" Model struct
|
||||
"""
|
||||
Base.@kwdef mutable struct model <: Ironpen
|
||||
knowledgeFn::Union{Dict,Nothing} = nothing
|
||||
modelParams::Union{Dict,Nothing} = nothing
|
||||
error::Float64 = 0.0
|
||||
outputError::Array{Float64} = Float64[]
|
||||
|
||||
""" "inference" = no learning params will be collected.
|
||||
"learning" = neuron will accumulate epsilon_j, compute Δw_rec_change each time
|
||||
correct answer is available then merge Δw_rec_change into wRecChange then
|
||||
reset epsilon_j.
|
||||
"reflect" = neuron will merge wRecChange into wRec then reset wRecChange. """
|
||||
learningStage::String = "inference"
|
||||
timeStep::Number = 0.0
|
||||
end
|
||||
""" Model outer constructor
|
||||
|
||||
# Example
|
||||
I_kfnparams = Dict(
|
||||
:type => "lifNeuron",
|
||||
:v_t1 => 0.0, # neuron membrane potential at time = t+1
|
||||
:v_th => 2.0, # neuron firing threshold (this value is treated as maximum bound if I use auto generate)
|
||||
:z_t => false, # neuron firing status at time = t
|
||||
:z_t1 => false, # neuron firing status at time = t+1
|
||||
:gammaPd => 0.3, # discount factor. The value is from the paper
|
||||
:phi => 0.0, # psuedo derivative
|
||||
:refractoryDuration => 2.0, # neuron refractory period in tick
|
||||
:delta => 1.0,
|
||||
:tau_m => 20.0, # membrane time constant in millisecond. The value is from the paper
|
||||
:eta => 0.01, # learning rate
|
||||
|
||||
I_kfn = Ironpen_ai_gpu.knowledgeFn(I_kfnparams, lif_neuron_params, alif_neuron_params,
|
||||
linear_neuron_params)
|
||||
|
||||
modelParams_1 = Dict(:knowledgeFn => Dict(:I => I_kfn,
|
||||
:run => run_kfn),
|
||||
:learningStage => "doing_inference",)
|
||||
|
||||
model_1 = Ironpen_ai_gpu.model(modelParams_1)
|
||||
"""
|
||||
function model(params::Dict)
|
||||
m = model()
|
||||
m.modelParams = params
|
||||
|
||||
fields = fieldnames(typeof(m))
|
||||
for i in fields
|
||||
if i in keys(params)
|
||||
m.:($i) = params[i] # assign params to n struct fields
|
||||
end
|
||||
end
|
||||
|
||||
return m
|
||||
end
|
||||
|
||||
#------------------------------------------------------------------------------------------------100
|
||||
|
||||
""" knowledgeFn struct
|
||||
"""
|
||||
Base.@kwdef mutable struct kfn_1 <: knowledgeFn
|
||||
knowledgeFnName::String = "not defined"
|
||||
kfnParams::Union{Dict,Nothing} = nothing # store params of knowledgeFn itself for later use
|
||||
timeStep::Number = 0.0
|
||||
|
||||
# Bn contain error coefficient for both neurons and output neurons in one place
|
||||
Bn::Vector{Float64} = Float64[] # error projection coefficient from kfn output's error to each neurons's error
|
||||
neuronsArray::Array{neuron} = neuron[] # put neurons here
|
||||
|
||||
""" put output neuron here. I seperate output neuron because
|
||||
1. its calculation is difference than other neuron types
|
||||
2. other neuron type will not induced to connnect to output neuron
|
||||
3. output neuron does not induced to connect to its own type """
|
||||
outputNeuronsArray::Array{outputNeuron} = outputNeuron[]
|
||||
|
||||
""" "inference" = no learning params will be collected.
|
||||
"learning" = neuron will accumulate epsilon_j, compute Δw_rec_change each time
|
||||
correct answer is available then merge Δw_rec_change into wRecChange then
|
||||
reset epsilon_j.
|
||||
"reflect" = neuron will merge wRecChange into wRec then reset wRecChange. """
|
||||
learningStage::String = "inference"
|
||||
|
||||
error::Float64 = 0.0
|
||||
|
||||
firedNeurons::Array{Int64} = Int64[] # store unique id of firing neurons to be used when random neuron connection
|
||||
firedNeurons_t0::Union{Vector{Bool},Nothing} = nothing # store firing state of all neurons at t0
|
||||
firedNeurons_t1::Union{Vector{Bool},Nothing} = nothing # store firing state of all neurons at t1
|
||||
|
||||
avgNeuronsFiringRate::Union{Float64,Nothing} = 0.0 # for displaying average firing rate over all neurons
|
||||
avgNeurons_v_t1::Union{Float64,Nothing} = 0.0 # for displaying average v_t1 over all neurons
|
||||
nExcitatory::Array{Int64} =Int64[] # list of excitatory neuron id
|
||||
nInhabitory::Array{Int64} = Int64[] # list of inhabitory neuron id
|
||||
nExInType::Array{Int64} = Int64[] # list all neuron EX or IN
|
||||
excitatoryPercent::Int64 = 60 # percentage of excitatory neuron, inhabitory percent will be 100-ExcitatoryPercent
|
||||
end
|
||||
|
||||
#------------------------------------------------------------------------------------------------100
|
||||
|
||||
""" Knowledge function outer constructor >>> auto generate <<<
|
||||
|
||||
# Example
|
||||
|
||||
lif_neuron_params = Dict(
|
||||
:type => "lifNeuron",
|
||||
:v_th => 1.2, # neuron firing threshold (this value is treated as maximum bound if I use auto generate)
|
||||
:z_t => false, # neuron firing status at time = t
|
||||
:gammaPd => 0.3, # discount factor. The value is from the paper
|
||||
:refractoryDuration => 2.0, # neuron refractory period in tick
|
||||
:delta => 1.0,
|
||||
:tau_m => 5.0, # membrane time constant in millisecond. It should equals to time use for 1 sequence
|
||||
)
|
||||
|
||||
alif_neuron_params = Dict(
|
||||
:type => "alifNeuron",
|
||||
:v_th => 1.2, # neuron firing threshold (this value is treated as maximum bound if I use auto generate)
|
||||
:z_t => false, # neuron firing status at time = t
|
||||
:gammaPd => 0.3, # discount factor. The value is from the paper
|
||||
:refractoryDuration => 2.0, # neuron refractory period in millisecond
|
||||
:delta => 1.0,
|
||||
:tau_m => 5.0, # membrane time constant in millisecond. It should equals to time use for 1 sequence
|
||||
|
||||
# adaptation time constant in millisecond. It should equals to total time SNN takes to
|
||||
# perform a task i.e. equals to episode length
|
||||
:tau_a => 10.0,
|
||||
:beta => 0.15, # constant.
|
||||
:a => 0.0,
|
||||
)
|
||||
|
||||
linear_neuron_params = Dict(
|
||||
:type => "linearNeuron",
|
||||
:k => 0.9, # output leakink coefficient
|
||||
:tau_out => 5.0, # output time constant in millisecond. It should equals to time use for 1 sequence
|
||||
:out => 0.0, # neuron's output value store here
|
||||
)
|
||||
|
||||
I_kfnparams = Dict(
|
||||
:knowledgeFnName => "I",
|
||||
:lif_neuron_number => 200,
|
||||
:alif_neuron_number => 100, # from Allen Institute, ALIF is 40% of LIF
|
||||
:linear_neuron_number => 5, # output neuron, this is also the output length
|
||||
:Bn => "random", # error projection coefficient from kfn output's error to each neurons's error
|
||||
:learning_rate => 0.01,
|
||||
:neuron_connection_pattern => "100%", # number of each neuron subscribe to other neuron in knowledgeFn.neuronsArray
|
||||
:output_neuron_connection_pattern => "100%", # "60%" of kfn.neuronsArray or number
|
||||
:maximum_input_data_length => 5, # in case of GloVe word encoding, it is 300
|
||||
:neuron_w_in_generation_pattern => "random", # number or "random"
|
||||
:neuron_w_rec_generation_pattern => "random",
|
||||
:neuron_v_t_default => 0.5,
|
||||
:neuron_voltage_drop_percentage => "100%",
|
||||
:neuronFiringRateTarget => 50.0,
|
||||
:neuron_learning_rate => 0.01,
|
||||
:neuron_c_reg => 0.0001,
|
||||
:neuron_c_reg_v => 0.0001,
|
||||
:neuron_optimiser => "ADAM",
|
||||
:meta_params => Dict(:is_first_cycle => true,
|
||||
:launch_time => 0.0,))
|
||||
|
||||
kfn1 = knowledgeFn(kfnParams, lif_neuron_params, alif_neuron_params, linear_neuron_params)
|
||||
"""
|
||||
function kfn_1(kfnParams::Dict)
|
||||
|
||||
kfn = kfn_1()
|
||||
kfn.kfnParams = kfnParams
|
||||
kfn.knowledgeFnName = kfn.kfnParams[:knowledgeFnName]
|
||||
|
||||
if kfn.kfnParams[:computeNeuronNumber] < kfn.kfnParams[:totalInputPort]
|
||||
throw(error("number of compute neuron must be greater than input neuron"))
|
||||
end
|
||||
|
||||
# # Bn
|
||||
# if kfn.kfnParams[:Bn] == "random"
|
||||
# kfn.Bn = [Random.rand(0:0.001:1) for i in 1:kfn.kfnParams[:computeNeuronNumber]]
|
||||
# else # in case I want to specify manually
|
||||
# kfn.Bn = [kfn.kfnParams[:Bn] for i in 1:kfn.kfnParams[:computeNeuronNumber]]
|
||||
# end
|
||||
|
||||
# assign neurons ID by their position in kfn.neurons array because I think it is
|
||||
# straight forward way
|
||||
|
||||
# add input port, it must be added before any other neuron types
|
||||
for (k, v) in kfn.kfnParams[:inputPort]
|
||||
current_type = kfn.kfnParams[:inputPort][k]
|
||||
for i = 1:current_type[:numbers]
|
||||
n_id = length(kfn.neuronsArray) + 1
|
||||
neuron = init_neuron(n_id, current_type[:params], kfn.kfnParams)
|
||||
push!(kfn.neuronsArray, neuron)
|
||||
end
|
||||
end
|
||||
|
||||
# add compute neurons
|
||||
for (k, v) in kfn.kfnParams[:computeNeuron]
|
||||
current_type = kfn.kfnParams[:computeNeuron][k]
|
||||
for i = 1:current_type[:numbers]
|
||||
n_id = length(kfn.neuronsArray) + 1
|
||||
neuron = init_neuron(n_id, current_type[:params], kfn.kfnParams)
|
||||
push!(kfn.neuronsArray, neuron)
|
||||
end
|
||||
end
|
||||
|
||||
for i = 1:kfn.kfnParams[:outputPort][:numbers]
|
||||
neuron = init_neuron(i, kfn.kfnParams[:outputPort][:params],
|
||||
kfn.kfnParams)
|
||||
push!(kfn.outputNeuronsArray, neuron)
|
||||
end
|
||||
|
||||
for n in kfn.neuronsArray
|
||||
if typeof(n) <: computeNeuron
|
||||
n.firingRateTarget = kfn.kfnParams[:neuronFiringRateTarget]
|
||||
end
|
||||
end
|
||||
|
||||
# excitatory neuron to inhabitory neuron = 60:40 % of computeNeuron
|
||||
ex_number = Int(floor((kfn.excitatoryPercent/100.0) * kfn.kfnParams[:computeNeuronNumber]))
|
||||
ex_n = [1 for i in 1:ex_number]
|
||||
in_number = kfn.kfnParams[:computeNeuronNumber] - ex_number
|
||||
in_n = [-1 for i in 1:in_number]
|
||||
ex_in = shuffle!([ex_n; in_n])
|
||||
|
||||
# input neurons are always excitatory, compute_neurons are random between excitatory
|
||||
# and inhabitory
|
||||
for n in kfn.neuronsArray
|
||||
try n.ExInType = pop!(ex_in) catch end
|
||||
end
|
||||
|
||||
# add ExInType into each computeNeuron subExInType
|
||||
for n in kfn.neuronsArray
|
||||
try # input neuron doest have n.subscriptionList
|
||||
for (i, sub_id) in enumerate(n.subscriptionList)
|
||||
n_ExInType = kfn.neuronsArray[sub_id].ExInType
|
||||
n.wRec[i] *= n_ExInType
|
||||
# add id exin type to kfn
|
||||
if n_ExInType < 0
|
||||
push!(kfn.nInhabitory, sub_id)
|
||||
else
|
||||
push!(kfn.nExcitatory, sub_id)
|
||||
end
|
||||
end
|
||||
catch
|
||||
end
|
||||
end
|
||||
|
||||
# # add ExInType into each output neuron subExInType
|
||||
# for n in kfn.outputNeuronsArray
|
||||
# try # input neuron doest have n.subscriptionList
|
||||
# for (i, sub_id) in enumerate(n.subscriptionList)
|
||||
# n_ExInType = kfn.neuronsArray[sub_id].ExInType
|
||||
# n.wRec[i] *= n_ExInType
|
||||
# end
|
||||
# catch
|
||||
# end
|
||||
# end
|
||||
|
||||
for n in kfn.neuronsArray
|
||||
push!(kfn.nExInType, n.ExInType)
|
||||
end
|
||||
|
||||
return kfn
|
||||
end
|
||||
|
||||
#------------------------------------------------------------------------------------------------100
|
||||
|
||||
""" passthroughNeuron struct
|
||||
"""
|
||||
Base.@kwdef mutable struct passthroughNeuron <: inputNeuron
|
||||
id::Int64 = 0 # ID of this neuron which is it position in knowledgeFn array
|
||||
type::String = "passthroughNeuron"
|
||||
knowledgeFnName::String = "not defined" # knowledgeFn that this neuron belongs to
|
||||
z_t::Bool = false
|
||||
z_t1::Bool = false
|
||||
timeStep::Int64 = 0 # current time
|
||||
ExInType::Int64 = 1 # 1 excitatory, -1 inhabitory. input neuron is always excitatory
|
||||
end
|
||||
|
||||
function passthroughNeuron(params::Dict)
|
||||
n = passthroughNeuron()
|
||||
field_names = fieldnames(typeof(n))
|
||||
for i in field_names
|
||||
if i in keys(params)
|
||||
if i == :optimiser
|
||||
opt_type = string(split(params[i], ".")[end])
|
||||
n.:($i) = load_optimiser(opt_type)
|
||||
else
|
||||
n.:($i) = params[i] # assign params to n struct fields
|
||||
end
|
||||
end
|
||||
end
|
||||
return n
|
||||
end
|
||||
|
||||
#------------------------------------------------------------------------------------------------100
|
||||
|
||||
""" lifNeuron struct
|
||||
"""
|
||||
Base.@kwdef mutable struct lifNeuron <: computeNeuron
|
||||
id::Int64 = 0 # this neuron ID i.e. position of this neuron in knowledgeFn
|
||||
type::String = "lifNeuron"
|
||||
ExInType::Int64 = 1 # 1 excitatory, -1 inhabitory
|
||||
knowledgeFnName::String = "not defined" # knowledgeFn that this neuron belongs to
|
||||
subscriptionList::Array{Int64} = Int64[] # list of other neuron that this neuron synapse subscribed to
|
||||
timeStep::Int64 = 0 # current time
|
||||
wRec::Array{Float64} = Float64[] # synaptic weight (for receiving signal from other neuron)
|
||||
v_t::Float64 = 0.0 # vᵗ, postsynaptic neuron membrane potential of previous timestep
|
||||
v_t1::Float64 = rand() # vᵗ⁺¹, postsynaptic neuron membrane potential at current timestep
|
||||
v_th::Float64 = 1.0 # vᵗʰ, neuron firing threshold
|
||||
vRest::Float64 = 0.0 # resting potential after neuron fired
|
||||
z_t::Bool = false # zᵗ, neuron postsynaptic firing of previous timestep
|
||||
# zᵗ⁺¹, neuron firing status at time = t+1. I need this because the way I calculate all
|
||||
# neurons forward function at each timestep-by-timestep is to do every neuron
|
||||
# forward calculation. Each neuron requires access to other neuron's firing status
|
||||
# during v_t1 calculation hence I need a variable to hold z_t1 so that I'm not replacing z_t
|
||||
z_t1::Bool = false # neuron postsynaptic firing at current timestep (after neuron's calculation)
|
||||
z_i_t::Array{Bool} = Bool[] # neuron presynaptic firing at current timestep (which is other neuron postsynaptic firing of previous timestep)
|
||||
z_i_t_commulative::Array{Int64} = Int64[] # used to compute connection strength
|
||||
synapticStrength::Array{Float64} = Float64[]
|
||||
synapticStrengthLimit::NamedTuple = (lowerlimit=(-5=>-5), upperlimit=(5=>5))
|
||||
|
||||
gammaPd::Float64 = 0.3 # γ_pd, discount factor, value from paper
|
||||
alpha::Float64 = 0.0 # α, neuron membrane potential decay factor
|
||||
alphaChange::Float64 = 0.0
|
||||
phi::Float64 = 0.0 # ϕ, psuedo derivative
|
||||
epsilonRec::Array{Float64} = Float64[] # ϵ_rec, eligibility vector for neuron spike
|
||||
decayedEpsilonRec::Array{Float64} = Float64[] # α * epsilonRec
|
||||
eRec::Array{Float64} = Float64[] # eligibility trace for neuron spike
|
||||
delta::Float64 = 1.0 # δ, discreate timestep size in millisecond
|
||||
refractoryDuration::Int64 = 3 # neuron's refratory period in millisecond
|
||||
refractoryCounter::Int64 = 0
|
||||
tau_m::Float64 = 100.0 # τ_m, membrane time constant in millisecond
|
||||
eta::Float64 = 1e-3 # η, learning rate
|
||||
wRecChange::Array{Float64} = Float64[] # Δw_rec, cumulated wRec change
|
||||
recSignal::Float64 = 0.0 # incoming recurrent signal
|
||||
alpha_v_t::Float64 = 0.0 # alpha * v_t
|
||||
error::Float64 = 0.0 # local neuron error
|
||||
# optimiser::Union{Any,Nothing} = load_optimiser("AdaBelief") # Flux optimizer
|
||||
|
||||
firingCounter::Int64 = 0 # store how many times neuron fires
|
||||
firingRateTarget::Float64 = 20.0 # neuron's target firing rate in Hz
|
||||
firingDiff::Float64 = 0.0 # e-prop supplement paper equation 5
|
||||
firingRateError::Float64 = 0.0 # local neuron error w.r.t. firing regularization
|
||||
firingRate::Float64 = 0.0 # running average of firing rate in Hz
|
||||
|
||||
""" "inference" = no learning params will be collected.
|
||||
"learning" = neuron will accumulate epsilon_j, compute Δw_rec_change each time
|
||||
correct answer is available then merge Δw_rec_change into wRecChange then
|
||||
reset epsilon_j.
|
||||
"reflect" = neuron will merge wRecChange into wRec then reset wRecChange. """
|
||||
learningStage::String = "inference"
|
||||
end
|
||||
|
||||
""" lif neuron outer constructor
|
||||
|
||||
# Example
|
||||
|
||||
lif_neuron_params = Dict(
|
||||
:type => "lifNeuron",
|
||||
:v_th => 1.2, # neuron firing threshold (this value is treated as maximum bound if I use auto generate)
|
||||
:z_t => false, # neuron firing status at time = t
|
||||
:gammaPd => 0.3, # discount factor. The value is from the paper
|
||||
:refractoryDuration => 2.0, # neuron refractory period in tick
|
||||
:delta => 1.0,
|
||||
:tau_m => 5.0, # membrane time constant in millisecond. It should equals to time use for 1 sequence
|
||||
)
|
||||
|
||||
neuron1 = lifNeuron(lif_neuron_params)
|
||||
"""
|
||||
function lifNeuron(params::Dict)
|
||||
n = lifNeuron()
|
||||
field_names = fieldnames(typeof(n))
|
||||
for i in field_names
|
||||
if i in keys(params)
|
||||
if i == :optimiser
|
||||
opt_type = string(split(params[i], ".")[end])
|
||||
n.:($i) = load_optimiser(opt_type)
|
||||
else
|
||||
n.:($i) = params[i] # assign params to n struct fields
|
||||
end
|
||||
end
|
||||
end
|
||||
return n
|
||||
end
|
||||
|
||||
#------------------------------------------------------------------------------------------------100
|
||||
|
||||
""" alifNeuron struct
|
||||
"""
|
||||
Base.@kwdef mutable struct alifNeuron <: computeNeuron
|
||||
id::Int64 = 0 # this neuron ID i.e. position of this neuron in knowledgeFn
|
||||
type::String = "alifNeuron"
|
||||
ExInType::Int64 = -1 # 1 excitatory, -1 inhabitory
|
||||
knowledgeFnName::String = "not defined" # knowledgeFn that this neuron belongs to
|
||||
subscriptionList::Array{Int64} = Int64[] # list of other neuron that this neuron synapse subscribed to
|
||||
timeStep::Int64 = 0 # current time
|
||||
wRec::Array{Float64} = Float64[] # synaptic weight (for receiving signal from other neuron)
|
||||
v_t::Float64 = 0.0 # vᵗ, postsynaptic neuron membrane potential of previous timestep
|
||||
v_t1::Float64 = rand() # vᵗ⁺¹, postsynaptic neuron membrane potential at current timestep
|
||||
v_th::Float64 = 1.0 # vᵗʰ, neuron firing threshold
|
||||
vRest::Float64 = 0.0 # resting potential after neuron fired
|
||||
z_t::Bool = false # zᵗ, neuron postsynaptic firing of previous timestep
|
||||
# zᵗ⁺¹, neuron firing status at time = t+1. I need this because the way I calculate all
|
||||
# neurons forward function at each timestep-by-timestep is to do every neuron
|
||||
# forward calculation. Each neuron requires access to other neuron's firing status
|
||||
# during v_t1 calculation hence I need a variable to hold z_t1 so that I'm not replacing z_t
|
||||
z_t1::Bool = false # neuron postsynaptic firing at current timestep (after neuron's calculation)
|
||||
z_i_t::Array{Bool} = Bool[] # neuron presynaptic firing at current timestep (which is other neuron postsynaptic firing of previous timestep)
|
||||
z_i_t_commulative::Array{Int64} = Int64[] # used to compute connection strength
|
||||
synapticStrength::Array{Float64} = Float64[]
|
||||
synapticStrengthLimit::NamedTuple = (lowerlimit=(-5=>0), upperlimit=(5=>5))
|
||||
|
||||
alpha::Float64 = 0.0 # α, neuron membrane potential decay factor
|
||||
alphaChange::Float64 = 0.0
|
||||
delta::Float64 = 1.0 # δ, discreate timestep size in millisecond
|
||||
epsilonRec::Array{Float64} = Float64[] # ϵ_rec(v), eligibility vector for neuron i spike
|
||||
epsilonRecA::Array{Float64} = Float64[] # ϵ_rec(a)
|
||||
decayedEpsilonRec::Array{Float64} = Float64[] # α * epsilonRec
|
||||
eRec_v::Array{Float64} = Float64[] # a component of neuron's eligibility trace resulted from v_t
|
||||
eRec_a::Array{Float64} = Float64[] # a component of neuron's eligibility trace resulted from av_th
|
||||
eRec::Array{Float64} = Float64[] # neuron's eligibility trace
|
||||
eta::Float64 = 1e-3 # eta, learning rate
|
||||
gammaPd::Float64 = 0.3 # γ_pd, discount factor, value from paper
|
||||
phi::Float64 = 0.0 # ϕ, psuedo derivative
|
||||
refractoryDuration::Int64 = 3 # neuron's refractory period in millisecond
|
||||
refractoryCounter::Int64 = 0
|
||||
tau_m::Float64 = 100.0 # τ_m, membrane time constant in millisecond
|
||||
wRecChange::Array{Float64} = Float64[] # Δw_rec, cumulated wRec change
|
||||
recSignal::Float64 = 0.0 # incoming recurrent signal
|
||||
alpha_v_t::Float64 = 0.0 # alpha * v_t
|
||||
error::Float64 = 0.0 # local neuron error
|
||||
# optimiser::Union{Any,Nothing} = load_optimiser("AdaBelief") # Flux optimizer
|
||||
|
||||
firingCounter::Int64 = 0 # store how many times neuron fires
|
||||
firingRateTarget::Float64 = 20.0 # neuron's target firing rate in Hz
|
||||
firingDiff::Float64 = 0.0 # e-prop supplement paper equation 5
|
||||
firingRateError::Float64 = 0.0 # local neuron error w.r.t. firing regularization
|
||||
firingRate::Float64 = 0.0 # running average of firing rate, Hz
|
||||
|
||||
tau_a::Float64 = 100.0 # τ_a, adaption time constant in millisecond
|
||||
beta::Float64 = 0.15 # β, constant, value from paper
|
||||
rho::Float64 = 0.0 # ρ, threshold adaptation decay factor
|
||||
a::Float64 = 0.0 # threshold adaptation
|
||||
av_th::Float64 = 0.0 # adjusted neuron firing threshold
|
||||
|
||||
""" "inference" = no learning params will be collected.
|
||||
"learning" = neuron will accumulate epsilon_j, compute Δw_rec_change each time
|
||||
correct answer is available then merge Δw_rec_change into wRecChange then
|
||||
reset epsilon_j.
|
||||
"reflect" = neuron will merge wRecChange into wRec then reset wRecChange. """
|
||||
learningStage::String = "inference"
|
||||
end
|
||||
""" alif neuron outer constructor
|
||||
|
||||
# Example
|
||||
|
||||
alif_neuron_params = Dict(
|
||||
:type => "alifNeuron",
|
||||
:v_th => 1.2, # neuron firing threshold (this value is treated as maximum bound if I
|
||||
use auto generate)
|
||||
:z_t => false, # neuron firing status at time = t
|
||||
:gammaPd => 0.3, # discount factor. The value is from the paper
|
||||
:refractoryDuration => 2.0, # neuron refractory period in millisecond
|
||||
:delta => 1.0,
|
||||
:tau_m => 5.0, # membrane time constant in millisecond. It should equals to time use
|
||||
for 1 sequence
|
||||
|
||||
# adaptation time constant in millisecond. It should equals to total time SNN takes to
|
||||
# perform a task i.e. equals to episode length
|
||||
:tau_a => 10.0,
|
||||
:beta => 0.15, # constant.
|
||||
:a => 0.0,
|
||||
)
|
||||
|
||||
neuron1 = alifNeuron(alif_neuron_params)
|
||||
"""
|
||||
function alifNeuron(params::Dict)
|
||||
n = alifNeuron()
|
||||
field_names = fieldnames(typeof(n))
|
||||
for i in field_names
|
||||
if i in keys(params)
|
||||
if i == :optimiser
|
||||
opt_type = string(split(params[i], ".")[end])
|
||||
n.:($i) = load_optimiser(opt_type)
|
||||
else
|
||||
n.:($i) = params[i] # assign params to n struct fields
|
||||
end
|
||||
end
|
||||
end
|
||||
return n
|
||||
end
|
||||
|
||||
#------------------------------------------------------------------------------------------------100
|
||||
""" linearNeuron struct
|
||||
"""
|
||||
Base.@kwdef mutable struct linearNeuron <: outputNeuron
|
||||
id::Int64 = 0 # ID of this neuron which is it position in knowledgeFn array
|
||||
type::String = "linearNeuron"
|
||||
knowledgeFnName::String = "not defined" # knowledgeFn that this neuron belongs to
|
||||
subscriptionList::Array{Int64} = Int64[] # list of other neuron that this neuron synapse subscribed to
|
||||
timeStep::Int64 = 0 # current time
|
||||
wRec::Array{Float64} = Float64[] # synaptic weight (for receiving signal from other neuron)
|
||||
v_t::Float64 = 0.0 # vᵗ, postsynaptic neuron membrane potential of previous timestep
|
||||
v_t1::Float64 = rand() # vᵗ⁺¹, postsynaptic neuron membrane potential at current timestep
|
||||
v_th::Float64 = 1.0 # vᵗʰ, neuron firing threshold
|
||||
vRest::Float64 = 0.0 # resting potential after neuron fired
|
||||
vError::Float64 = 0.0 # used to compute model error
|
||||
z_t::Bool = false # zᵗ, neuron postsynaptic firing of previous timestep
|
||||
# zᵗ⁺¹, neuron firing status at time = t+1. I need this because the way I calculate all
|
||||
# neurons forward function at each timestep-by-timestep is to do every neuron
|
||||
# forward calculation. Each neuron requires access to other neuron's firing status
|
||||
# during v_t1 calculation hence I need a variable to hold z_t1 so that I'm not replacing z_t
|
||||
z_t1::Bool = false # neuron postsynaptic firing at current timestep (after neuron's calculation)
|
||||
|
||||
# neuron presynaptic firing at current timestep (which is other neuron postsynaptic firing of
|
||||
# previous timestep)
|
||||
z_i_t::Array{Bool} = Bool[]
|
||||
z_i_t_commulative::Array{Int64} = Int64[] # used to compute connection strength
|
||||
synapticStrength::Array{Float64} = Float64[]
|
||||
synapticStrengthLimit::NamedTuple = (lowerlimit=(-5=>-5), upperlimit=(5=>5))
|
||||
|
||||
gammaPd::Float64 = 0.3 # γ_pd, discount factor, value from paper
|
||||
alpha::Float64 = 0.0 # α, neuron membrane potential decay factor
|
||||
phi::Float64 = 0.0 # ϕ, psuedo derivative
|
||||
epsilonRec::Array{Float64} = Float64[] # ϵ_rec, eligibility vector for neuron spike
|
||||
decayedEpsilonRec::Array{Float64} = Float64[] # α * epsilonRec
|
||||
eRec::Array{Float64} = Float64[] # eligibility trace for neuron spike
|
||||
delta::Float64 = 1.0 # δ, discreate timestep size in millisecond
|
||||
refractoryDuration::Int64 = 3 # neuron's refratory period in millisecond
|
||||
refractoryCounter::Int64 = 0
|
||||
tau_out::Float64 = 50.0 # τ_out, membrane time constant in millisecond
|
||||
eta::Float64 = 1e-3 # η, learning rate
|
||||
wRecChange::Array{Float64} = Float64[] # Δw_rec, cumulated wRec change
|
||||
recSignal::Float64 = 0.0 # incoming recurrent signal
|
||||
alpha_v_t::Float64 = 0.0 # alpha * v_t
|
||||
|
||||
firingCounter::Int64 = 0 # store how many times neuron fires
|
||||
ExInSignalSum::Float64 = 0.0
|
||||
end
|
||||
|
||||
""" linear neuron outer constructor
|
||||
|
||||
# Example
|
||||
|
||||
linear_neuron_params = Dict(
|
||||
:type => "linearNeuron",
|
||||
:k => 0.9, # output leakink coefficient
|
||||
:tau_out => 5.0, # output time constant in millisecond. It should equals to time use for 1 sequence
|
||||
:out => 0.0, # neuron's output value store here
|
||||
)
|
||||
|
||||
neuron1 = linearNeuron(linear_neuron_params)
|
||||
"""
|
||||
function linearNeuron(params::Dict)
|
||||
n = linearNeuron()
|
||||
field_names = fieldnames(typeof(n))
|
||||
for i in field_names
|
||||
if i in keys(params)
|
||||
if i == :optimiser
|
||||
opt_type = string(split(params[i], ".")[end])
|
||||
n.:($i) = load_optimiser(opt_type)
|
||||
else
|
||||
n.:($i) = params[i] # assign params to n struct fields
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
return n
|
||||
end
|
||||
|
||||
#------------------------------------------------------------------------------------------------100
|
||||
""" integrateNeuron struct
|
||||
"""
|
||||
Base.@kwdef mutable struct integrateNeuron <: outputNeuron
|
||||
id::Int64 = 0 # ID of this neuron which is it position in knowledgeFn array
|
||||
type::String = "integrateNeuron"
|
||||
knowledgeFnName::String = "not defined" # knowledgeFn that this neuron belongs to
|
||||
subscriptionList::Array{Int64} = Int64[] # list of other neuron that this neuron synapse subscribed to
|
||||
timeStep::Int64 = 0 # current time
|
||||
wRec::Array{Float64} = Float64[] # synaptic weight (for receiving signal from other neuron)
|
||||
v_t::Float64 = randn() # vᵗ, postsynaptic neuron membrane potential of previous timestep
|
||||
v_t1::Float64 = 0.0 # vᵗ⁺¹, postsynaptic neuron membrane potential at current timestep
|
||||
v_th::Float64 = 1.0 # vᵗʰ, neuron firing threshold
|
||||
vRest::Float64 = 0.0 # resting potential after neuron fired
|
||||
vError::Float64 = 0.0 # used to compute model error
|
||||
z_t::Bool = false # zᵗ, neuron postsynaptic firing of previous timestep
|
||||
# zᵗ⁺¹, neuron firing status at time = t+1. I need this because the way I calculate all
|
||||
# neurons forward function at each timestep-by-timestep is to do every neuron
|
||||
# forward calculation. Each neuron requires access to other neuron's firing status
|
||||
# during v_t1 calculation hence I need a variable to hold z_t1 so that I'm not replacing z_t
|
||||
z_t1::Bool = false # neuron postsynaptic firing at current timestep (after neuron's calculation)
|
||||
b::Float64 = 0.0
|
||||
bChange::Float64 = 0.0
|
||||
|
||||
# neuron presynaptic firing at current timestep (which is other neuron postsynaptic firing of
|
||||
# previous timestep)
|
||||
z_i_t::Array{Bool} = Bool[]
|
||||
z_i_t_commulative::Array{Int64} = Int64[] # used to compute connection strength
|
||||
synapticStrength::Array{Float64} = Float64[]
|
||||
synapticStrengthLimit::NamedTuple = (lowerlimit=(-5=>-5), upperlimit=(5=>5))
|
||||
|
||||
gammaPd::Float64 = 0.3 # γ_pd, discount factor, value from paper
|
||||
alpha::Float64 = 0.0 # α, neuron membrane potential decay factor
|
||||
alphaChange::Float64 = 0.0
|
||||
phi::Float64 = 0.0 # ϕ, psuedo derivative
|
||||
epsilonRec::Array{Float64} = Float64[] # ϵ_rec, eligibility vector for neuron spike
|
||||
decayedEpsilonRec::Array{Float64} = Float64[] # α * epsilonRec
|
||||
eRec::Array{Float64} = Float64[] # eligibility trace for neuron spike
|
||||
delta::Float64 = 1.0 # δ, discreate timestep size in millisecond
|
||||
refractoryDuration::Int64 = 3 # neuron's refratory period in millisecond
|
||||
refractoryCounter::Int64 = 0
|
||||
tau_out::Float64 = 50.0 # τ_out, membrane time constant in millisecond
|
||||
eta::Float64 = 1e-3 # η, learning rate
|
||||
wRecChange::Array{Float64} = Float64[] # Δw_rec, cumulated wRec change
|
||||
recSignal::Float64 = 0.0 # incoming recurrent signal
|
||||
alpha_v_t::Float64 = 0.0 # alpha * v_t
|
||||
|
||||
firingCounter::Int64 = 0 # store how many times neuron fires
|
||||
ExInSignalSum::Float64 = 0.0
|
||||
end
|
||||
|
||||
""" linear neuron outer constructor
|
||||
|
||||
# Example
|
||||
|
||||
linear_neuron_params = Dict(
|
||||
:type => "linearNeuron",
|
||||
:k => 0.9, # output leakink coefficient
|
||||
:tau_out => 5.0, # output time constant in millisecond. It should equals to time use for 1 sequence
|
||||
:out => 0.0, # neuron's output value store here
|
||||
)
|
||||
|
||||
neuron1 = linearNeuron(linear_neuron_params)
|
||||
"""
|
||||
function integrateNeuron(params::Dict)
|
||||
n = integrateNeuron()
|
||||
field_names = fieldnames(typeof(n))
|
||||
for i in field_names
|
||||
if i in keys(params)
|
||||
if i == :optimiser
|
||||
opt_type = string(split(params[i], ".")[end])
|
||||
n.:($i) = load_optimiser(opt_type)
|
||||
else
|
||||
n.:($i) = params[i] # assign params to n struct fields
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
return n
|
||||
end
|
||||
|
||||
#------------------------------------------------------------------------------------------------100
|
||||
|
||||
# function load_optimiser(optimiser_name::String; params::Union{Dict,Nothing} = nothing)
|
||||
# if optimiser_name == "AdaBelief"
|
||||
# params = (0.01, (0.9, 0.8))
|
||||
# return Flux.Optimise.AdaBelief(params...)
|
||||
# elseif optimiser_name == "AdaBelief2"
|
||||
# # output neuron requires slower change pace so η is lower than compute neuron at 0.007
|
||||
# # because if w_out change too fast, compute neuron will not able to
|
||||
# # grapse output neuron moving direction i.e. both compute neuron's direction and
|
||||
# # output neuron direction are out of sync.
|
||||
# params = (0.007, (0.9, 0.8))
|
||||
# return Flux.Optimise.AdaBelief(params...)
|
||||
# else
|
||||
# error("optimiser is not defined yet in load_optimiser()")
|
||||
# end
|
||||
# end
|
||||
|
||||
function init_neuron!(id::Int64, n::passthroughNeuron, n_params::Dict, kfnParams::Dict)
|
||||
n.id = id
|
||||
n.knowledgeFnName = kfnParams[:knowledgeFnName]
|
||||
end
|
||||
|
||||
# function init_neuron!(id::Int64, n::lifNeuron, kfnParams::Dict)
|
||||
# n.id = id
|
||||
# n.knowledgeFnName = kfnParams[:knowledgeFnName]
|
||||
# subscription_options = shuffle!([1:(kfnParams[:input_neuron_number]+kfnParams[:computeNeuronNumber])...])
|
||||
# if typeof(kfnParams[:synapticConnectionPercent]) == String
|
||||
# percent = parse(Int, kfnParams[:synapticConnectionPercent][1:end-1]) / 100
|
||||
# synapticConnectionPercent = floor(length(subscription_options) * percent)
|
||||
# n.subscriptionList = [pop!(subscription_options) for i = 1:synapticConnectionPercent]
|
||||
# end
|
||||
# filter!(x -> x != n.id, n.subscriptionList)
|
||||
# n.epsilonRec = zeros(length(n.subscriptionList))
|
||||
# n.wRec = Random.rand(length(n.subscriptionList))
|
||||
# n.wRecChange = zeros(length(n.subscriptionList))
|
||||
# n.reg_voltage_b = zeros(length(n.subscriptionList))
|
||||
# n.alpha = calculate_α(n)
|
||||
# end
|
||||
|
||||
function init_neuron!(id::Int64, n::lifNeuron, n_params::Dict, kfnParams::Dict)
|
||||
n.id = id
|
||||
n.knowledgeFnName = kfnParams[:knowledgeFnName]
|
||||
subscription_options = shuffle!([1:kfnParams[:totalNeurons]...])
|
||||
subscription_numbers = Int(floor((n_params[:synapticConnectionPercent] / 100.0) *
|
||||
kfnParams[:totalNeurons]))
|
||||
n.subscriptionList = [pop!(subscription_options) for i = 1:subscription_numbers]
|
||||
|
||||
# prevent subscription to itself by removing this neuron id
|
||||
filter!(x -> x != n.id, n.subscriptionList)
|
||||
n.synapticStrength = rand(-4.5:0.01:-4, length(n.subscriptionList))
|
||||
|
||||
n.epsilonRec = zeros(length(n.subscriptionList))
|
||||
# n.wRec = randn(length(n.subscriptionList))
|
||||
n.wRec = randn(rng, length(n.subscriptionList)) / 100
|
||||
n.wRecChange = zeros(length(n.subscriptionList))
|
||||
n.alpha = calculate_α(n)
|
||||
n.z_i_t_commulative = zeros(length(n.subscriptionList))
|
||||
end
|
||||
|
||||
function init_neuron!(id::Int64, n::alifNeuron, n_params::Dict,
|
||||
kfnParams::Dict)
|
||||
n.id = id
|
||||
n.knowledgeFnName = kfnParams[:knowledgeFnName]
|
||||
subscription_options = shuffle!([1:kfnParams[:totalNeurons]...])
|
||||
subscription_numbers = Int(floor((n_params[:synapticConnectionPercent] / 100.0) *
|
||||
kfnParams[:totalNeurons]))
|
||||
n.subscriptionList = [pop!(subscription_options) for i = 1:subscription_numbers]
|
||||
|
||||
# prevent subscription to itself by removing this neuron id
|
||||
filter!(x -> x != n.id, n.subscriptionList)
|
||||
n.synapticStrength = rand(-4.5:0.01:-4, length(n.subscriptionList))
|
||||
|
||||
n.epsilonRec = zeros(length(n.subscriptionList))
|
||||
n.wRec = randn(rng, length(n.subscriptionList)) / 100 # TODO use abs()
|
||||
n.wRecChange = zeros(length(n.subscriptionList))
|
||||
|
||||
# the more time has passed from the last time neuron was activated, the more
|
||||
# neuron membrane potential is reduced
|
||||
n.alpha = calculate_α(n)
|
||||
n.rho = calculate_ρ(n)
|
||||
n.epsilonRecA = zeros(length(n.subscriptionList))
|
||||
n.z_i_t_commulative = zeros(length(n.subscriptionList))
|
||||
end
|
||||
|
||||
function init_neuron!(id::Int64, n::integrateNeuron, n_params::Dict, kfnParams::Dict)
|
||||
n.id = id
|
||||
n.knowledgeFnName = kfnParams[:knowledgeFnName]
|
||||
|
||||
subscription_options = shuffle!([kfnParams[:totalInputPort]+1 : kfnParams[:totalNeurons]...])
|
||||
subscription_numbers = Int(floor((n_params[:synapticConnectionPercent] / 100.0) *
|
||||
kfnParams[:totalNeurons] - kfnParams[:totalInputPort]))
|
||||
n.subscriptionList = [pop!(subscription_options) for i = 1:subscription_numbers]
|
||||
n.synapticStrength = rand(-4.5:0.01:-4, length(n.subscriptionList))
|
||||
|
||||
n.epsilonRec = zeros(length(n.subscriptionList))
|
||||
n.wRec = randn(rng, length(n.subscriptionList)) / 100
|
||||
n.wRecChange = zeros(length(n.subscriptionList))
|
||||
n.alpha = calculate_k(n)
|
||||
n.z_i_t_commulative = zeros(length(n.subscriptionList))
|
||||
n.b = randn(rng) / 100
|
||||
end
|
||||
|
||||
# function init_neuron!(id::Int64, n::linearNeuron, n_params::Dict, kfnParams::Dict)
|
||||
# n.id = id
|
||||
# n.knowledgeFnName = kfnParams[:knowledgeFnName]
|
||||
|
||||
# subscription_options = shuffle!([kfnParams[:totalInputPort]+1 : kfnParams[:totalNeurons]...])
|
||||
# subscription_numbers = Int(floor((n_params[:synapticConnectionPercent] / 100.0) *
|
||||
# kfnParams[:totalNeurons] - kfnParams[:totalInputPort]))
|
||||
# n.subscriptionList = [pop!(subscription_options) for i = 1:subscription_numbers]
|
||||
# n.synapticStrength = rand(-4.5:0.01:-4, length(n.subscriptionList))
|
||||
|
||||
# n.epsilonRec = zeros(length(n.subscriptionList))
|
||||
# n.wRec = randn(rng, length(n.subscriptionList)) / 100
|
||||
# n.wRecChange = zeros(length(n.subscriptionList))
|
||||
# n.alpha = calculate_k(n)
|
||||
# n.z_i_t_commulative = zeros(length(n.subscriptionList))
|
||||
# end
|
||||
|
||||
""" Make a neuron intended for use with knowledgeFn
|
||||
"""
|
||||
function init_neuron(id::Int64, n_params::Dict, kfnParams::Dict)
|
||||
n = instantiate_custom_types(n_params)
|
||||
init_neuron!(id, n, n_params, kfnParams)
|
||||
|
||||
return n
|
||||
end
|
||||
|
||||
""" This function instantiate Ironpen type.
|
||||
|
||||
# Example
|
||||
|
||||
new_model = instantiate_custom_types("model")
|
||||
"""
|
||||
function instantiate_custom_types(params::Union{Dict,Nothing} = nothing)
|
||||
type = string(split(params[:type], ".")[end])
|
||||
|
||||
if type == "model"
|
||||
return model()
|
||||
elseif type == "knowledgeFn"
|
||||
return knowledgeFn()
|
||||
elseif type == "passthroughNeuron"
|
||||
return passthroughNeuron(params)
|
||||
elseif type == "lifNeuron"
|
||||
return lifNeuron(params)
|
||||
elseif type == "alifNeuron"
|
||||
return alifNeuron(params)
|
||||
elseif type == "linearNeuron"
|
||||
return linearNeuron(params)
|
||||
elseif type == "integrateNeuron"
|
||||
return integrateNeuron(params)
|
||||
else
|
||||
return nothing
|
||||
end
|
||||
end
|
||||
|
||||
""" Add a new neuron into a knowledgeFn
|
||||
|
||||
# Example
|
||||
add_neuron!(kfn.kfnParams[:lif_neuron_params], kfn)
|
||||
"""
|
||||
# function add_neuron!(neuron_Dict::Dict, kfn::knowledgeFn)
|
||||
# id = length(kfn.neuronsArray) + 1
|
||||
# neuron = init_neuron(id, neuron_Dict, kfn.kfnParams,
|
||||
# totalNeurons = (length(kfn.neuronsArray) + 1))
|
||||
# push!(kfn.neuronsArray, neuron)
|
||||
|
||||
# # Randomly select an output neuron to add a new neuron to
|
||||
# add_n_output_n!(Random.rand(kfn.outputNeuronsArray), id)
|
||||
# end
|
||||
|
||||
calculate_α(neuron::lifNeuron) = exp(-neuron.delta / neuron.tau_m)
|
||||
calculate_α(neuron::alifNeuron) = exp(-neuron.delta / neuron.tau_m)
|
||||
calculate_ρ(neuron::alifNeuron) = exp(-neuron.delta / neuron.tau_a)
|
||||
calculate_k(neuron::linearNeuron) = exp(-neuron.delta / neuron.tau_out)
|
||||
calculate_k(neuron::integrateNeuron) = exp(-neuron.delta / neuron.tau_out)
|
||||
|
||||
#------------------------------------------------------------------------------------------------100
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
end # module end
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
15
previousVersion/0.0.5_25percentAccuracy/test/etc3.jl
Normal file
15
previousVersion/0.0.5_25percentAccuracy/test/etc3.jl
Normal file
@@ -0,0 +1,15 @@
|
||||
src_folder = "C:\\myWork\\my_projects\\AI\\NLP\\my_NLP\\Ironpen_ai\\src"
|
||||
|
||||
include("$src_folder/Utils.jl")
|
||||
using .Utils
|
||||
|
||||
|
||||
pub = "ch1"
|
||||
sub = "ch2"
|
||||
|
||||
function p(x)
|
||||
println("function called")
|
||||
return x + 1
|
||||
end
|
||||
|
||||
service_server("192.168.0.10", pub, sub, "testserver", p)
|
||||
@@ -0,0 +1,27 @@
|
||||
using Revise
|
||||
using Ironpen_ai
|
||||
using DataStructures
|
||||
using JSON3
|
||||
using Redis
|
||||
|
||||
|
||||
|
||||
# file_location = "C:\\myWork\\my_projects\\AI\\NLP\\my_NLP\\Ironpen_ai\\"
|
||||
# filename = "tonModel_2.json"
|
||||
# jsonString = read(file_location * filename, String)
|
||||
# jsonObject = JSON3.read(jsonString)
|
||||
# model_data = OrderedDict(jsonObject)
|
||||
|
||||
# Ironpen_ai.data_prep_for_db(1, 1, 1, model_data)
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -34,8 +34,10 @@ using .learn
|
||||
|
||||
""" version 0.0.6
|
||||
Todo:
|
||||
[*1] if neuron not fire for a long time, reduce it conn strength
|
||||
[DONE] use abs(wRec) during neuron init
|
||||
[] use partial error update for computeNeuron
|
||||
[] use integrate_neuron_params synapticConnectionPercent = 20%
|
||||
[] add liquid time constant
|
||||
[DONE] if neuron not fire for a long time, reduce it conn strength
|
||||
[2] implement dormant connection and pruning machanism. the longer the training the longer
|
||||
0 weight stay 0.
|
||||
[] using RL to control learning signal
|
||||
|
||||
@@ -60,9 +60,11 @@ function (kfn::kfn_1)(m::model, input_data::AbstractVector)
|
||||
end
|
||||
|
||||
# generate noise
|
||||
noise = [GeneralUtils.randomChoiceWithProb([true, false],[0.2, 0.8])
|
||||
noise = [GeneralUtils.randomChoiceWithProb([true, false],[0.01, 0.99])
|
||||
for i in 1:length(input_data)]
|
||||
# noise = [rand(rng, Distributions.Binomial(1, 0.5)) for i in 1:10] # another option
|
||||
# noise = [kfn.timeStep % 50 == 0
|
||||
# for i in 1:length(input_data)]
|
||||
|
||||
input_data = [noise; input_data] # noise must start from neuron id 1
|
||||
|
||||
@@ -95,8 +97,8 @@ function (kfn::kfn_1)(m::model, input_data::AbstractVector)
|
||||
|
||||
return sum(kfn.firedNeurons_t1[kfn.kfnParams[:totalInputPort]+1:end])::Int,
|
||||
logit::Array{Float64},
|
||||
[i for i in kfn.neuronsArray[end].wRec[1:10]],
|
||||
[sum(i.wRec) for i in kfn.outputNeuronsArray],
|
||||
[i for i in kfn.neuronsArray[101].wRec[1:10]],
|
||||
[i.v_t1 for i in kfn.neuronsArray[101:110]],
|
||||
[sum(i.epsilonRec) for i in kfn.outputNeuronsArray],
|
||||
[sum(i.wRecChange) for i in kfn.outputNeuronsArray]
|
||||
end
|
||||
@@ -136,6 +138,8 @@ function (n::lifNeuron)(kfn::knowledgeFn)
|
||||
n.epsilonRec = n.decayedEpsilonRec
|
||||
else
|
||||
n.recSignal = sum(n.wRec .* n.z_i_t) # signal from other neuron that this neuron subscribed
|
||||
|
||||
# computeAlpha!(n)
|
||||
n.alpha_v_t = n.alpha * n.v_t
|
||||
n.v_t1 = n.alpha_v_t + n.recSignal
|
||||
# n.v_t1 = no_negative!(n.v_t1)
|
||||
@@ -183,6 +187,7 @@ function (n::alifNeuron)(kfn::knowledgeFn)
|
||||
else
|
||||
n.av_th = n.v_th + (n.beta * n.a)
|
||||
n.recSignal = sum(n.wRec .* n.z_i_t) # signal from other neuron that this neuron subscribed
|
||||
# computeAlpha!(n)
|
||||
n.alpha_v_t = n.alpha * n.v_t
|
||||
n.v_t1 = n.alpha_v_t + n.recSignal
|
||||
# n.v_t1 = no_negative!(n.v_t1)
|
||||
@@ -215,7 +220,7 @@ function (n::linearNeuron)(kfn::T) where T<:knowledgeFn
|
||||
n.timeStep = kfn.timeStep
|
||||
|
||||
# pulling other neuron's firing status at time t
|
||||
n.z_i_t = getindex(kfn.firedNeurons_t1, n.subscriptionList)
|
||||
n.z_i_t = getindex(kfn.firedNeurons_t0, n.subscriptionList)
|
||||
n.z_i_t_commulative += n.z_i_t
|
||||
|
||||
if n.refractoryCounter != 0
|
||||
@@ -228,18 +233,18 @@ function (n::linearNeuron)(kfn::T) where T<:knowledgeFn
|
||||
|
||||
# decay of v_t1
|
||||
n.v_t1 = n.alpha * n.v_t
|
||||
n.vError = n.v_t1 # store voltage that will be used to calculate error later
|
||||
|
||||
n.phi = 0.0
|
||||
n.decayedEpsilonRec = n.alpha * n.epsilonRec
|
||||
n.epsilonRec = n.decayedEpsilonRec
|
||||
else
|
||||
recSignal = n.wRec .* n.z_i_t
|
||||
n.recSignal = sum(recSignal) # signal from other neuron that this neuron subscribed
|
||||
n.recSignal = sum(n.wRec .* n.z_i_t) # signal from other neuron that this neuron subscribed
|
||||
|
||||
# computeAlpha!(n)
|
||||
n.alpha_v_t = n.alpha * n.v_t
|
||||
n.v_t1 = n.alpha_v_t + n.recSignal
|
||||
# n.v_t1 = no_negative!(n.v_t1)
|
||||
n.vError = n.v_t1 # store voltage that will be used to calculate error later
|
||||
|
||||
if n.v_t1 > n.v_th
|
||||
n.z_t1 = true
|
||||
n.refractoryCounter = n.refractoryDuration
|
||||
@@ -267,7 +272,8 @@ function (n::integrateNeuron)(kfn::knowledgeFn)
|
||||
n.z_i_t = getindex(kfn.firedNeurons_t0, n.subscriptionList)
|
||||
n.z_i_t_commulative += n.z_i_t
|
||||
|
||||
n.recSignal = sum(n.wRec .* n.z_i_t) # signal from other neuron that this neuron subscribed
|
||||
n.recSignal = sum(n.wRec .* n.z_i_t) # signal from other neuron that this neuron
|
||||
# computeAlpha!(n)
|
||||
n.alpha_v_t = n.alpha * n.v_t
|
||||
if n.recSignal <= 0
|
||||
n.v_t1 = n.alpha_v_t
|
||||
|
||||
202
src/learn.jl
202
src/learn.jl
@@ -4,13 +4,13 @@ using Statistics, Random, LinearAlgebra, JSON3, Flux
|
||||
using GeneralUtils
|
||||
using ..types, ..snn_utils
|
||||
|
||||
export learn!, compute_wRecChange!, computeModelError
|
||||
export learn!, compute_paramsChange!, computeModelError
|
||||
|
||||
#------------------------------------------------------------------------------------------------100
|
||||
|
||||
function compute_wRecChange!(m::model, modelError::Float64, outputError::Vector{Float64})
|
||||
function compute_paramsChange!(m::model, modelError::Float64, outputError::Vector{Float64})
|
||||
# normalize!(modelError)
|
||||
compute_wRecChange!(m.knowledgeFn[:I], modelError, outputError)
|
||||
compute_paramsChange!(m.knowledgeFn[:I], modelError, outputError)
|
||||
end
|
||||
|
||||
# function compute_wRecChange!(kfn::kfn_1, errors::Vector{Float64}, correctAnswer::AbstractVector)
|
||||
@@ -40,20 +40,54 @@ end
|
||||
# end
|
||||
# end
|
||||
|
||||
# function compute_paramsChange!(kfn::kfn_1, modelError::Float64, outputError::Vector{Float64})
|
||||
|
||||
# Threads.@threads for n in kfn.neuronsArray
|
||||
# # for n in kfn.neuronsArray
|
||||
# if typeof(n) <: computeNeuron
|
||||
# # wIndex = findall(isequal.(oN.subscriptionList, n.id)) # use for error projection
|
||||
# wOut = [oN.wRec[findall(isequal.(oN.subscriptionList, n.id))[1]]
|
||||
# for oN in kfn.outputNeuronsArray]
|
||||
|
||||
# compute_wRecChange!(n, wOut, modelError)
|
||||
# # compute_alphaChange!(n, modelError)
|
||||
# compute_firingRateError!(n, kfn.kfnParams[:neuronFiringRateTarget],
|
||||
# kfn.kfnParams[:totalComputeNeuron])
|
||||
# end
|
||||
# end
|
||||
|
||||
# for oN in kfn.outputNeuronsArray
|
||||
# compute_wRecChange!(oN, outputError[oN.id])
|
||||
# # compute_alphaChaZnge!(oN, outputError[oN.id])
|
||||
# end
|
||||
# end
|
||||
|
||||
function compute_paramsChange!(kfn::kfn_1, modelError::Float64, outputError::Vector{Float64})
|
||||
|
||||
function compute_wRecChange!(kfn::kfn_1, modelError::Float64, outputError::Vector{Float64})
|
||||
Threads.@threads for n in kfn.neuronsArray
|
||||
# for n in kfn.neuronsArray
|
||||
if typeof(n) <: computeNeuron
|
||||
# wIndex = findall(isequal.(oN.subscriptionList, n.id))
|
||||
wOut = abs.([oN.wRec[findall(isequal.(oN.subscriptionList, n.id))[1]]
|
||||
for oN in kfn.outputNeuronsArray])
|
||||
#WORKING
|
||||
wOut = Int64[]
|
||||
for oN in kfn.outputNeuronsArray
|
||||
wIndex = findall(isequal.(oN.subscriptionList, n.id))
|
||||
if length(wIndex) != 0
|
||||
push!(wOut, wIndex[1])
|
||||
end
|
||||
end
|
||||
|
||||
if length(wOut) != 0
|
||||
compute_wRecChange!(n, wOut, modelError)
|
||||
# compute_alphaChange!(n, modelError)
|
||||
compute_firingRateError!(n, kfn.kfnParams[:neuronFiringRateTarget],
|
||||
kfn.kfnParams[:totalComputeNeuron])
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
for oN in kfn.outputNeuronsArray
|
||||
compute_wRecChange!(oN, outputError[oN.id])
|
||||
# compute_alphaChaZnge!(oN, outputError[oN.id])
|
||||
end
|
||||
end
|
||||
|
||||
@@ -61,7 +95,7 @@ function compute_wRecChange!(n::passthroughNeuron, wOut::AbstractVector, modelEr
|
||||
# skip
|
||||
end
|
||||
|
||||
function compute_wRecChange!(n::lifNeuron, wOut::AbstractVector, modelError::Float64)
|
||||
function compute_wRecChange!(n::lifNeuron, wOut::AbstractVector, modelError::Float64, )
|
||||
# how much error of this neuron 1-spike causing each output neuron's error
|
||||
nError = sum(wOut * modelError)
|
||||
|
||||
@@ -71,7 +105,8 @@ function compute_wRecChange!(n::lifNeuron, wOut::AbstractVector, modelError::Flo
|
||||
# ΔwRecChange .+= (0.2*(abs(sum(n.wRec)) / length(n.wRec)))
|
||||
# end
|
||||
n.wRecChange .+= ΔwRecChange
|
||||
reset_epsilonRec!(n)
|
||||
|
||||
# n.alphaChange += compute_alphaChange(n.eta, nError)
|
||||
end
|
||||
|
||||
function compute_wRecChange!(n::alifNeuron, wOut::AbstractVector, modelError::Float64)
|
||||
@@ -88,11 +123,18 @@ function compute_wRecChange!(n::alifNeuron, wOut::AbstractVector, modelError::Fl
|
||||
# end
|
||||
n.wRecChange .+= ΔwRecChange
|
||||
|
||||
reset_epsilonRec!(n)
|
||||
reset_epsilonRecA!(n)
|
||||
# n.alphaChange += compute_alphaChange(n.eta, nError)
|
||||
end
|
||||
|
||||
function compute_wRecChange!(n::linearNeuron, error::Float64)
|
||||
n.eRec = n.phi * n.epsilonRec
|
||||
ΔwRecChange = -n.eta * error * n.eRec
|
||||
# if sum(n.wRec) < 0 # prevent -sum(wRec) that causing neuron NOT fire at all
|
||||
# ΔwRecChange .+= (abs(sum(n.wRec)) / length(n.wRec))
|
||||
# end
|
||||
n.wRecChange .+= ΔwRecChange
|
||||
end
|
||||
|
||||
function compute_wRecChange!(n::integrateNeuron, error::Float64)
|
||||
ΔwRecChange = -n.eta * error * n.epsilonRec
|
||||
ΔbChange = -n.eta * error
|
||||
@@ -101,22 +143,19 @@ function compute_wRecChange!(n::integrateNeuron, error::Float64)
|
||||
# end
|
||||
n.wRecChange .+= ΔwRecChange
|
||||
n.bChange += ΔbChange
|
||||
reset_epsilonRec!(n)
|
||||
|
||||
# n.alphaChange += compute_alphaChange(n.eta, error)
|
||||
end
|
||||
|
||||
# function compute_wRecChange!(n::linearNeuron, error::Float64)
|
||||
# n.eRec = n.phi * n.epsilonRec
|
||||
# ΔwRecChange = -n.eta * error * n.eRec
|
||||
# # if sum(n.wRec) < 0 # prevent -sum(wRec) that causing neuron NOT fire at all
|
||||
# # ΔwRecChange .+= (abs(sum(n.wRec)) / length(n.wRec))
|
||||
# # end
|
||||
# n.wRecChange .+= ΔwRecChange
|
||||
# # reset_epsilonRec!(n)
|
||||
# end
|
||||
|
||||
# add compute_alphaChange
|
||||
compute_alphaChange(learningRate::Float64, total_wRecChange) = -learningRate * total_wRecChange
|
||||
function compute_firingRateError!(n::computeNeuron, firingRateTarget, totalComputeNeuron)
|
||||
# compute frequency error --> 1-timeStep of kfn runs fires X neurons
|
||||
# (frequency from kfn perspective)
|
||||
n.firingRateTarget = n.timeStep * firingRateTarget / totalComputeNeuron
|
||||
n.firingRate = n.firingCounter / n.timeStep
|
||||
error = n.firingRate - n.firingRateTarget
|
||||
ΔwRecChange = -n.eta * 0.1 * sign(error) * error^2
|
||||
n.wRecChange .+= ΔwRecChange
|
||||
end
|
||||
|
||||
function learn!(m::model)
|
||||
learn!(m.knowledgeFn[:I])
|
||||
@@ -125,11 +164,14 @@ end
|
||||
""" knowledgeFn learn()
|
||||
"""
|
||||
function learn!(kfn::kfn_1)
|
||||
|
||||
# compute kfn error for each neuron
|
||||
Threads.@threads for n in kfn.neuronsArray # multithread is not atomic and causing error
|
||||
# for n in kfn.neuronsArray
|
||||
# for n in kfn.neuronsArray totalNeuronFired
|
||||
if typeof(n) <: computeNeuron
|
||||
learn!(n, kfn.firedNeurons, kfn.nExInType)
|
||||
end
|
||||
end
|
||||
for n in kfn.outputNeuronsArray
|
||||
learn!(n, kfn.firedNeurons, kfn.nExInType, kfn.kfnParams[:totalInputPort])
|
||||
end
|
||||
@@ -145,15 +187,11 @@ function learn!(n::T, firedNeurons, nExInType) where T<:inputNeuron
|
||||
end
|
||||
|
||||
function learn!(n::T, firedNeurons, nExInType) where T<:computeNeuron
|
||||
|
||||
wSign_0 = sign.(n.wRec) # original sign
|
||||
# n.wRecChange .*= (connStrengthAdjust.(n.synapticStrength))
|
||||
|
||||
wRecChange_reduceCoeff = 1.0
|
||||
# wRecChange_max = 0.2 * abs(sum(n.wRec)) # max change 20%
|
||||
# y = abs(sum(n.wRecChange))
|
||||
# if y > wRecChange_max # capping weight update
|
||||
# wRecChange_reduceCoeff = wRecChange_max / y
|
||||
# end
|
||||
n.wRec += (wRecChange_reduceCoeff * n.wRecChange)
|
||||
# n.alpha += n.alphaChange
|
||||
|
||||
@@ -163,63 +201,113 @@ function learn!(n::T, firedNeurons, nExInType) where T<:computeNeuron
|
||||
# if sum(n.wRecChange) != 0
|
||||
# normalizePeak!(n.wRec, n.wRecChange, 2)
|
||||
# end
|
||||
|
||||
# set weight that fliped sign to 0 for random new connection
|
||||
n.wRec .*= nonFlipedSign
|
||||
# n.wRec = wRecMaxWeight!(n, max=1.0) # cap maximum weight
|
||||
n.wRec = wRecMaxWeight!(n, max=1.0) # cap maximum weight
|
||||
|
||||
# learn alpha
|
||||
# n.alpha_wSignal += n.alpha_wSignalChange
|
||||
# n.alpha_wPotential += n.alpha_wPotentialChange
|
||||
# n.alpha_b += n.alpha_bChange
|
||||
# n.alpha_wSignalChange *= 0.0
|
||||
# n.alpha_wPotentialChange *= 0.0
|
||||
# n.alpha_bChange *= 0.0
|
||||
# computeAlpha!(n)
|
||||
|
||||
# check for non firing. if neuron not fire for too long, reduce all connection strength
|
||||
if n.id ∈ firedNeurons
|
||||
n.notFireCounter = n.notFireTimeOut
|
||||
synapticConnStrength!(n, "updown")
|
||||
n.notFireTimeOut = 0
|
||||
elseif n.id ∉ firedNeurons && n.notFireCounter != n.notFireTimeOut
|
||||
n.notFireTimeOut += 1
|
||||
synapticConnStrength!(n, "updown")
|
||||
elseif n.id ∉ firedNeurons && n.notFireCounter == n.notFireCounter
|
||||
elseif n.id ∉ firedNeurons && n.notFireCounter == n.notFireTimeOut
|
||||
synapticConnStrength!(n, "down")
|
||||
else
|
||||
error("undefined condition line $(@__LINE__)")
|
||||
end
|
||||
|
||||
synapticConnStrength!(n, "updown")
|
||||
neuroplasticity!(n, firedNeurons, nExInType)
|
||||
end
|
||||
|
||||
function learn!(n::integrateNeuron, firedNeurons, nExInType, totalInputPort)
|
||||
function learn!(n::linearNeuron, firedNeurons, nExInType, totalInputPort)
|
||||
wSign_0 = sign.(n.wRec) # original sign
|
||||
# n.wRecChange .*= (connStrengthAdjust.(n.synapticStrength))
|
||||
wRecChange_max = 0.1 * abs(sum(n.wRec)) # max change 20%
|
||||
y = abs(sum(n.wRecChange))
|
||||
wRecChange_reduceCoeff = 1.0
|
||||
# wRecChange_max = 0.2 * abs(sum(n.wRec)) # max change 20%
|
||||
# y = abs(sum(n.wRecChange))
|
||||
# if y > wRecChange_max # capping weight update
|
||||
# wRecChange_reduceCoeff = wRecChange_max / y
|
||||
# end
|
||||
n.wRec += (wRecChange_reduceCoeff * n.wRecChange)
|
||||
n.b += (wRecChange_reduceCoeff * n.bChange)
|
||||
# n.alpha += n.alphaChange
|
||||
end
|
||||
n.alpha += n.alphaChange
|
||||
|
||||
# function learn!(n::linearNeuron, firedNeurons, nExInType, totalInputPort)
|
||||
# wSign_0 = sign.(n.wRec) # original sign
|
||||
# # n.wRecChange .*= (connStrengthAdjust.(n.synapticStrength))
|
||||
# wRecChange_max = 0.1 * abs(sum(n.wRec)) # max change 20%
|
||||
# y = abs(sum(n.wRecChange))
|
||||
# wRecChange_reduceCoeff = 1.0
|
||||
# # if y > wRecChange_max # capping weight update
|
||||
# # wRecChange_reduceCoeff = wRecChange_max / y
|
||||
# # end
|
||||
# n.wRec += (wRecChange_reduceCoeff * n.wRecChange)
|
||||
# n.alpha += n.alphaChange
|
||||
|
||||
# wSign_1 = sign.(n.wRec) # check for fliped sign, 1 indicates non-fliped sign
|
||||
# nonFlipedSign = isequal.(wSign_0, wSign_1) # 1 not fliped, 0 fliped
|
||||
wSign_1 = sign.(n.wRec) # check for fliped sign, 1 indicates non-fliped sign
|
||||
nonFlipedSign = isequal.(wSign_0, wSign_1) # 1 not fliped, 0 fliped
|
||||
# # normalize wRec peak to prevent input signal overwhelming neuron
|
||||
# if sum(n.wRecChange) != 0
|
||||
# # normalizePeak!(n.wRec, n.wRecChange, 2)
|
||||
# end
|
||||
# # set weight that fliped sign to 0 for random new connection
|
||||
# # n.wRec .*= nonFlipedSign
|
||||
# # capMaxWeight!(n.wRec) # cap maximum weight
|
||||
# # synapticConnStrength!(n, "updown")
|
||||
# # neuroplasticity!(n,firedNeurons, nExInType, totalInputPort)
|
||||
|
||||
# set weight that fliped sign to 0 for random new connection
|
||||
# n.wRec .*= nonFlipedSign
|
||||
|
||||
# check for non firing. if neuron not fire for too long, reduce all connection strength
|
||||
if n.id ∈ firedNeurons
|
||||
n.notFireCounter = n.notFireTimeOut
|
||||
synapticConnStrength!(n, "updown")
|
||||
n.notFireTimeOut = 0
|
||||
elseif n.id ∉ firedNeurons && n.notFireCounter != n.notFireTimeOut
|
||||
n.notFireTimeOut += 1
|
||||
synapticConnStrength!(n, "updown")
|
||||
elseif n.id ∉ firedNeurons && n.notFireCounter == n.notFireTimeOut
|
||||
synapticConnStrength!(n, "down")
|
||||
else
|
||||
error("undefined condition line $(@__LINE__)")
|
||||
end
|
||||
|
||||
synapticConnStrength!(n, "updown")
|
||||
neuroplasticity!(n,firedNeurons, nExInType, totalInputPort)
|
||||
end
|
||||
|
||||
function learn!(n::integrateNeuron, firedNeurons, nExInType, totalInputPort)
|
||||
wRecChange_reduceCoeff = 1.0
|
||||
n.wRec += (wRecChange_reduceCoeff * n.wRecChange)
|
||||
n.b += (wRecChange_reduceCoeff * n.bChange)
|
||||
# n.alpha += n.alphaChange
|
||||
|
||||
# learn alpha
|
||||
# n.alpha_wSignal += n.alpha_wSignalChange
|
||||
# n.alpha_wPotential += n.alpha_wPotentialChange
|
||||
# n.alpha_b += n.alpha_bChange
|
||||
# n.alpha_wSignalChange *= 0.0
|
||||
# n.alpha_wPotentialChange *= 0.0
|
||||
# n.alpha_bChange *= 0.0
|
||||
# computeAlpha!(n)
|
||||
|
||||
# # check for non firing. if neuron not fire for too long, reduce all connection strength
|
||||
# if n.id ∈ firedNeurons
|
||||
# n.notFireCounter = n.notFireTimeOut
|
||||
# synapticConnStrength!(n, "updown")
|
||||
# n.notFireTimeOut = 0
|
||||
# elseif n.id ∉ firedNeurons && n.notFireCounter != n.notFireTimeOut
|
||||
# n.notFireTimeOut += 1
|
||||
# synapticConnStrength!(n, "updown")
|
||||
# elseif n.id ∉ firedNeurons && n.notFireCounter == n.notFireTimeOut
|
||||
# synapticConnStrength!(n, "down")
|
||||
# else
|
||||
# error("undefined condition line $(@__LINE__)")
|
||||
# end
|
||||
|
||||
# synapticConnStrength!(n, "updown")
|
||||
# neuroplasticity!(n,firedNeurons, nExInType, totalInputPort)
|
||||
end
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
143
src/snn_utils.jl
143
src/snn_utils.jl
@@ -6,7 +6,8 @@ export calculate_α, calculate_ρ, calculate_k, timestep_forward!, init_neuron,
|
||||
reset_epsilonRecA!, synapticConnStrength!, normalizePeak!, reset_wRecChange!,
|
||||
firing_rate_error!, firing_rate_regulator!, update_Bn!, cal_firing_reg!,
|
||||
neuroplasticity!, shakeup!, reset_learning_no_wchange!, adjust_internal_learning_rate!,
|
||||
gradient_withloss, capMaxWeight, connStrengthAdjust, wRecMaxWeight!
|
||||
gradient_withloss, capMaxWeight, connStrengthAdjust, wRecMaxWeight!,
|
||||
computeAlpha!, compute_alphaChange!
|
||||
|
||||
using Statistics, Random, LinearAlgebra, Distributions, Zygote, Flux
|
||||
using GeneralUtils
|
||||
@@ -48,7 +49,6 @@ reset_firing_counter!(n::Union{computeNeuron, outputNeuron}) = n.firingCounter =
|
||||
reset_firing_diff!(n::Union{computeNeuron, outputNeuron}) = n.firingDiff = n.firingDiff * 0.0
|
||||
reset_refractoryCounter!(n::Union{computeNeuron, outputNeuron}) = n.refractoryCounter = n.refractoryCounter * 0.0
|
||||
reset_z_i_t_commulative!(n::Union{computeNeuron, outputNeuron}) = n.z_i_t_commulative = n.z_i_t_commulative * 0.0
|
||||
reset_alphaChange!(n::Union{computeNeuron, outputNeuron}) = n.alphaChange = n.alphaChange * 0.0
|
||||
|
||||
# reset function for output neuron
|
||||
reset_epsilon_j!(n::linearNeuron) = n.epsilon_j = n.epsilon_j * 0.0
|
||||
@@ -63,8 +63,7 @@ function resetLearningParams!(n::lifNeuron)
|
||||
reset_v_t!(n)
|
||||
reset_z_t!(n)
|
||||
reset_firing_counter!(n)
|
||||
reset_firing_diff!(n)
|
||||
reset_alphaChange!(n)
|
||||
|
||||
|
||||
# reset refractory state at the start/end of episode. Otherwise once neuron goes into
|
||||
# refractory state, it will stay in refractory state forever
|
||||
@@ -79,8 +78,7 @@ function resetLearningParams!(n::alifNeuron)
|
||||
reset_z_t!(n)
|
||||
reset_a!(n)
|
||||
reset_firing_counter!(n)
|
||||
reset_firing_diff!(n)
|
||||
reset_alphaChange!(n)
|
||||
|
||||
|
||||
# reset refractory state at the start/end of episode. Otherwise once neuron goes into
|
||||
# refractory state, it will stay in refractory state forever
|
||||
@@ -113,7 +111,7 @@ function resetLearningParams!(n::integrateNeuron)
|
||||
reset_bChange!(n)
|
||||
reset_v_t!(n)
|
||||
reset_firing_counter!(n)
|
||||
reset_alphaChange!(n)
|
||||
|
||||
end
|
||||
|
||||
#------------------------------------------------------------------------------------------------100
|
||||
@@ -334,11 +332,10 @@ function neuroplasticity!(n::computeNeuron, firedNeurons::Vector,
|
||||
filter!(x -> x ∉ n.subscriptionList, nFiredPool) # exclude this neuron's subscriptionList from the list
|
||||
|
||||
nNonFiredPool = setdiff!([1:length(nExInTypeList)...], nFiredPool)
|
||||
|
||||
filter!(x -> x ∉ [n.id], nNonFiredPool) # exclude this neuron id from the id list
|
||||
filter!(x -> x ∉ n.subscriptionList, nNonFiredPool) # exclude this neuron's subscriptionList from the list
|
||||
|
||||
w = randn(length(zeroWeightConnIndex)) / 100
|
||||
w = randn(length(zeroWeightConnIndex)) / 10
|
||||
synapticStrength = rand(-4.5:0.1:-3.5, length(zeroWeightConnIndex))
|
||||
|
||||
shuffle!(nFiredPool)
|
||||
@@ -356,51 +353,90 @@ function neuroplasticity!(n::computeNeuron, firedNeurons::Vector,
|
||||
newConn = popfirst!(nNonFiredPool)
|
||||
end
|
||||
n.subscriptionList[connIndex] = newConn
|
||||
n.wRec[connIndex] = abs(w[i]) * nExInTypeList[newConn]
|
||||
n.wRec[connIndex] = w[i] #* nExInTypeList[newConn]
|
||||
n.synapticStrength[connIndex] = synapticStrength[i]
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
# function neuroplasticity!(n::outputNeuron, firedNeurons::Vector,
|
||||
# nExInTypeList::Vector, totalInputNeuron::Integer)
|
||||
# # if there is 0-weight then replace it with new connection
|
||||
# zeroWeightConnIndex = findall(iszero.(n.wRec)) # connection that has 0 weight
|
||||
# if length(zeroWeightConnIndex) != 0
|
||||
# # new synaptic connection must sample fron neuron that fires
|
||||
# nFiredPool = filter(x -> x ∉ [n.id], firedNeurons) # exclude this neuron id from the id list
|
||||
# filter!(x -> x ∉ n.subscriptionList, nFiredPool) # exclude this neuron's subscriptionList from the list
|
||||
# filter!(x -> x ∉ [1:totalInputNeuron...], nFiredPool) # exclude input neuron
|
||||
function neuroplasticity!(n::linearNeuron, firedNeurons::Vector,
|
||||
nExInTypeList::Vector, totalInputNeuron::Integer)
|
||||
# if there is 0-weight then replace it with new connection
|
||||
zeroWeightConnIndex = findall(iszero.(n.wRec)) # connection that has 0 weight
|
||||
if length(zeroWeightConnIndex) != 0
|
||||
# new synaptic connection must sample fron neuron that fires
|
||||
nFiredPool = filter(x -> x ∉ [n.id], firedNeurons) # exclude this neuron id from the id list
|
||||
filter!(x -> x ∉ n.subscriptionList, nFiredPool) # exclude this neuron's subscriptionList from the list
|
||||
filter!(x -> x ∉ [1:totalInputNeuron...], nFiredPool) # exclude input neuron
|
||||
|
||||
# nNonFiredPool = setdiff!([1:length(nExInTypeList)...], nFiredPool)
|
||||
nNonFiredPool = setdiff!([1:length(nExInTypeList)...], nFiredPool)
|
||||
filter!(x -> x ∉ [n.id], nNonFiredPool) # exclude this neuron id from the id list
|
||||
filter!(x -> x ∉ n.subscriptionList, nNonFiredPool) # exclude this neuron's subscriptionList from the list
|
||||
filter!(x -> x ∉ [1:totalInputNeuron...], nNonFiredPool) # exclude input neuron
|
||||
|
||||
w = randn(length(zeroWeightConnIndex)) / 10
|
||||
synapticStrength = rand(-4.5:0.1:-3.5, length(zeroWeightConnIndex))
|
||||
|
||||
shuffle!(nFiredPool)
|
||||
shuffle!(nNonFiredPool)
|
||||
|
||||
# add new synaptic connection to neuron
|
||||
for (i, connIndex) in enumerate(zeroWeightConnIndex)
|
||||
""" conn that is being replaced has to go into nNonFiredPool so
|
||||
nNonFiredPool isn't empty """
|
||||
push!(nNonFiredPool, n.subscriptionList[connIndex])
|
||||
|
||||
if length(nFiredPool) != 0
|
||||
newConn = popfirst!(nFiredPool)
|
||||
else
|
||||
newConn = popfirst!(nNonFiredPool)
|
||||
end
|
||||
n.subscriptionList[connIndex] = newConn
|
||||
n.wRec[connIndex] = w[i] * nExInTypeList[newConn]
|
||||
n.synapticStrength[connIndex] = synapticStrength[i]
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
function neuroplasticity!(n::integrateNeuron, firedNeurons::Vector,
|
||||
nExInTypeList::Vector, totalInputNeuron::Integer)
|
||||
# if there is 0-weight then replace it with new connection
|
||||
zeroWeightConnIndex = findall(iszero.(n.wRec)) # connection that has 0 weight
|
||||
if length(zeroWeightConnIndex) != 0
|
||||
# new synaptic connection must sample fron neuron that fires
|
||||
nFiredPool = filter(x -> x ∉ [n.id], firedNeurons) # exclude this neuron id from the id list
|
||||
filter!(x -> x ∉ n.subscriptionList, nFiredPool) # exclude this neuron's subscriptionList from the list
|
||||
filter!(x -> x ∉ [1:totalInputNeuron...], nFiredPool) # exclude input neuron
|
||||
|
||||
nNonFiredPool = setdiff!([1:length(nExInTypeList)...], nFiredPool)
|
||||
# unique!(append!(nNonFiredPool, zeroWeightConnIndex))
|
||||
# filter!(x -> x ∉ [n.id], nNonFiredPool) # exclude this neuron id from the id list
|
||||
# filter!(x -> x ∉ n.subscriptionList, nNonFiredPool) # exclude this neuron's subscriptionList from the list
|
||||
# filter!(x -> x ∉ [1:totalInputNeuron...], nNonFiredPool) # exclude input neuron
|
||||
filter!(x -> x ∉ [n.id], nNonFiredPool) # exclude this neuron id from the id list
|
||||
filter!(x -> x ∉ n.subscriptionList, nNonFiredPool) # exclude this neuron's subscriptionList from the list
|
||||
filter!(x -> x ∉ [1:totalInputNeuron...], nNonFiredPool) # exclude input neuron
|
||||
|
||||
# w = randn(length(zeroWeightConnIndex)) / 100
|
||||
# synapticStrength = rand(-4.5:0.1:-3.5, length(zeroWeightConnIndex))
|
||||
w = randn(length(zeroWeightConnIndex)) / 10
|
||||
synapticStrength = rand(-4.5:0.1:-3.5, length(zeroWeightConnIndex))
|
||||
|
||||
# shuffle!(nFiredPool)
|
||||
# shuffle!(nNonFiredPool)
|
||||
shuffle!(nFiredPool)
|
||||
shuffle!(nNonFiredPool)
|
||||
|
||||
# # add new synaptic connection to neuron
|
||||
# for (i, connIndex) in enumerate(zeroWeightConnIndex)
|
||||
# """ conn that is being replaced has to go into nNonFiredPool so
|
||||
# nNonFiredPool isn't empty """
|
||||
# push!(nNonFiredPool, n.subscriptionList[connIndex])
|
||||
# add new synaptic connection to neuron
|
||||
for (i, connIndex) in enumerate(zeroWeightConnIndex)
|
||||
""" conn that is being replaced has to go into nNonFiredPool so
|
||||
nNonFiredPool isn't empty """
|
||||
push!(nNonFiredPool, n.subscriptionList[connIndex])
|
||||
|
||||
# if length(nFiredPool) != 0
|
||||
# newConn = popfirst!(nFiredPool)
|
||||
# else
|
||||
# newConn = popfirst!(nNonFiredPool)
|
||||
# end
|
||||
# n.subscriptionList[connIndex] = newConn
|
||||
# n.wRec[connIndex] = w[i] * nExInTypeList[newConn]
|
||||
# n.synapticStrength[connIndex] = synapticStrength[i]
|
||||
# end
|
||||
# end
|
||||
# end
|
||||
if length(nFiredPool) != 0
|
||||
newConn = popfirst!(nFiredPool)
|
||||
else
|
||||
newConn = popfirst!(nNonFiredPool)
|
||||
end
|
||||
n.subscriptionList[connIndex] = newConn
|
||||
n.wRec[connIndex] = w[i] * nExInTypeList[newConn]
|
||||
n.synapticStrength[connIndex] = synapticStrength[i]
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
""" Cap maximum weight of each neuron connection
|
||||
"""
|
||||
@@ -415,6 +451,27 @@ function wRecMaxWeight!(n::computeNeuron; max=1.0)
|
||||
end
|
||||
|
||||
|
||||
function compute_alphaChange!(n::passthroughNeuron, error::Float64) end
|
||||
|
||||
function compute_alphaChange!(n::Union{computeNeuron, outputNeuron}, error::Float64)
|
||||
if error != 0
|
||||
n.alpha_wSignalChange += -n.eta * sum(n.epsilonRec) * error
|
||||
n.alpha_wPotentialChange += -n.eta * error
|
||||
n.alpha_bChange += -n.eta * error
|
||||
else
|
||||
n.alpha_wSignalChange += n.eta
|
||||
n.alpha_wPotentialChange += n.eta
|
||||
n.alpha_bChange += n.eta
|
||||
end
|
||||
end
|
||||
|
||||
function computeAlpha!(n::Union{computeNeuron, outputNeuron})
|
||||
if sum(n.recSignal) != 0
|
||||
alphaSignal = n.alpha_wSignal * sum(n.recSignal)
|
||||
alphaV = n.alpha_wPotential * n.v_t
|
||||
n.alpha = Flux.sigmoid(alphaSignal + alphaV + n.alpha_b)
|
||||
end
|
||||
end
|
||||
|
||||
|
||||
|
||||
|
||||
191
src/types.jl
191
src/types.jl
@@ -10,7 +10,7 @@ export
|
||||
instantiate_custom_types, init_neuron, populate_neuron,
|
||||
add_neuron!
|
||||
|
||||
using Random, LinearAlgebra
|
||||
using Random, LinearAlgebra, Flux
|
||||
|
||||
#------------------------------------------------------------------------------------------------100
|
||||
|
||||
@@ -117,7 +117,7 @@ Base.@kwdef mutable struct kfn_1 <: knowledgeFn
|
||||
nExcitatory::Array{Int64} =Int64[] # list of excitatory neuron id
|
||||
nInhabitory::Array{Int64} = Int64[] # list of inhabitory neuron id
|
||||
nExInType::Array{Int64} = Int64[] # list all neuron EX or IN
|
||||
excitatoryPercent::Int64 = 60 # percentage of excitatory neuron, inhabitory percent will be 100-ExcitatoryPercent
|
||||
excitatoryPercent::Int64 = 70 # percentage of excitatory neuron, inhabitory percent will be 100-ExcitatoryPercent
|
||||
end
|
||||
|
||||
#------------------------------------------------------------------------------------------------100
|
||||
@@ -193,13 +193,6 @@ function kfn_1(kfnParams::Dict)
|
||||
throw(error("number of compute neuron must be greater than input neuron"))
|
||||
end
|
||||
|
||||
# # Bn
|
||||
# if kfn.kfnParams[:Bn] == "random"
|
||||
# kfn.Bn = [Random.rand(0:0.001:1) for i in 1:kfn.kfnParams[:computeNeuronNumber]]
|
||||
# else # in case I want to specify manually
|
||||
# kfn.Bn = [kfn.kfnParams[:Bn] for i in 1:kfn.kfnParams[:computeNeuronNumber]]
|
||||
# end
|
||||
|
||||
# assign neurons ID by their position in kfn.neurons array because I think it is
|
||||
# straight forward way
|
||||
|
||||
@@ -229,12 +222,6 @@ function kfn_1(kfnParams::Dict)
|
||||
push!(kfn.outputNeuronsArray, neuron)
|
||||
end
|
||||
|
||||
for n in kfn.neuronsArray
|
||||
if typeof(n) <: computeNeuron
|
||||
n.firingRateTarget = kfn.kfnParams[:neuronFiringRateTarget]
|
||||
end
|
||||
end
|
||||
|
||||
# excitatory neuron to inhabitory neuron = 60:40 % of computeNeuron
|
||||
ex_number = Int(floor((kfn.excitatoryPercent/100.0) * kfn.kfnParams[:computeNeuronNumber]))
|
||||
ex_n = [1 for i in 1:ex_number]
|
||||
@@ -265,21 +252,23 @@ function kfn_1(kfnParams::Dict)
|
||||
end
|
||||
end
|
||||
|
||||
# # add ExInType into each output neuron subExInType
|
||||
# for n in kfn.outputNeuronsArray
|
||||
# try # input neuron doest have n.subscriptionList
|
||||
# for (i, sub_id) in enumerate(n.subscriptionList)
|
||||
# n_ExInType = kfn.neuronsArray[sub_id].ExInType
|
||||
# n.wRec[i] *= n_ExInType
|
||||
# end
|
||||
# catch
|
||||
# end
|
||||
# end
|
||||
# add ExInType into each output neuron subExInType
|
||||
for n in kfn.outputNeuronsArray
|
||||
try # input neuron doest have n.subscriptionList
|
||||
for (i, sub_id) in enumerate(n.subscriptionList)
|
||||
n_ExInType = kfn.neuronsArray[sub_id].ExInType
|
||||
n.wRec[i] *= n_ExInType
|
||||
end
|
||||
catch
|
||||
end
|
||||
end
|
||||
|
||||
for n in kfn.neuronsArray
|
||||
push!(kfn.nExInType, n.ExInType)
|
||||
end
|
||||
|
||||
|
||||
|
||||
return kfn
|
||||
end
|
||||
|
||||
@@ -341,8 +330,15 @@ Base.@kwdef mutable struct lifNeuron <: computeNeuron
|
||||
synapticStrengthLimit::NamedTuple = (lowerlimit=(-5=>-5), upperlimit=(5=>5))
|
||||
|
||||
gammaPd::Float64 = 0.3 # γ_pd, discount factor, value from paper
|
||||
alpha::Float64 = 0.0 # α, neuron membrane potential decay factor
|
||||
alphaChange::Float64 = 0.0
|
||||
|
||||
alpha::Float64 = 0.99
|
||||
alpha_wSignal::Float64 = 2.0
|
||||
alpha_wPotential::Float64 = 2.0
|
||||
alpha_b::Float64 = 2.0
|
||||
alpha_wSignalChange::Float64 = 0.0
|
||||
alpha_wPotentialChange::Float64 = 0.0
|
||||
alpha_bChange::Float64 = 0.0
|
||||
|
||||
phi::Float64 = 0.0 # ϕ, psuedo derivative
|
||||
epsilonRec::Array{Float64} = Float64[] # ϵ_rec, eligibility vector for neuron spike
|
||||
decayedEpsilonRec::Array{Float64} = Float64[] # α * epsilonRec
|
||||
@@ -364,7 +360,7 @@ Base.@kwdef mutable struct lifNeuron <: computeNeuron
|
||||
firingRateError::Float64 = 0.0 # local neuron error w.r.t. firing regularization
|
||||
firingRate::Float64 = 0.0 # running average of firing rate in Hz
|
||||
|
||||
notFireTimeOut::Int64 = 100 # consecutive count of not firing. Should be the same as batch size
|
||||
notFireTimeOut::Int64 = 10 # consecutive count of not firing. Should be the same as batch size
|
||||
notFireCounter::Int64 = 0
|
||||
|
||||
""" "inference" = no learning params will be collected.
|
||||
@@ -434,8 +430,22 @@ Base.@kwdef mutable struct alifNeuron <: computeNeuron
|
||||
synapticStrength::Array{Float64} = Float64[]
|
||||
synapticStrengthLimit::NamedTuple = (lowerlimit=(-5=>0), upperlimit=(5=>5))
|
||||
|
||||
alpha::Float64 = 0.0 # α, neuron membrane potential decay factor
|
||||
alphaChange::Float64 = 0.0
|
||||
alpha::Float64 = 0.99
|
||||
alpha_wSignal::Float64 = 2.0
|
||||
alpha_wPotential::Float64 = 2.0
|
||||
alpha_b::Float64 = 2.0
|
||||
alpha_wSignalChange::Float64 = 0.0
|
||||
alpha_wPotentialChange::Float64 = 0.0
|
||||
alpha_bChange::Float64 = 0.0
|
||||
|
||||
# alpha::Vector{Float64} = Float64[]
|
||||
# alpha_wSignal::Vector{Float64} = Float64[]
|
||||
# alpha_wPotential::Float64 = randn() / 100
|
||||
# alpha_b::Vector{Float64} = Float64[]
|
||||
# alpha_wSignalChange::Vector{Float64} = Float64[]
|
||||
# alpha_wPotentialChange::Float64 = 0.0
|
||||
# alpha_bChange::Vector{Float64} = Float64[]
|
||||
|
||||
delta::Float64 = 1.0 # δ, discreate timestep size in millisecond
|
||||
epsilonRec::Array{Float64} = Float64[] # ϵ_rec(v), eligibility vector for neuron i spike
|
||||
epsilonRecA::Array{Float64} = Float64[] # ϵ_rec(a)
|
||||
@@ -461,7 +471,7 @@ Base.@kwdef mutable struct alifNeuron <: computeNeuron
|
||||
firingRateError::Float64 = 0.0 # local neuron error w.r.t. firing regularization
|
||||
firingRate::Float64 = 0.0 # running average of firing rate, Hz
|
||||
|
||||
notFireTimeOut::Int64 = 100 # consecutive count of not firing. Should be the same as batch size
|
||||
notFireTimeOut::Int64 = 10 # consecutive count of not firing. Should be the same as batch size
|
||||
notFireCounter::Int64 = 0
|
||||
|
||||
tau_a::Float64 = 100.0 # τ_a, adaption time constant in millisecond
|
||||
@@ -546,8 +556,16 @@ Base.@kwdef mutable struct linearNeuron <: outputNeuron
|
||||
synapticStrength::Array{Float64} = Float64[]
|
||||
synapticStrengthLimit::NamedTuple = (lowerlimit=(-5=>-5), upperlimit=(5=>5))
|
||||
|
||||
gammaPd::Float64 = 0.3 # γ_pd, discount factor, value from paper
|
||||
alpha::Float64 = 0.0 # α, neuron membrane potential decay factor
|
||||
gammaPd::Float64 = 0.3 # γ_pd, discount factor, value from
|
||||
|
||||
alpha::Float64 = 0.99
|
||||
alpha_wSignal::Float64 = 2.0
|
||||
alpha_wPotential::Float64 = 2.0
|
||||
alpha_b::Float64 = 2.0
|
||||
alpha_wSignalChange::Float64 = 0.0
|
||||
alpha_wPotentialChange::Float64 = 0.0
|
||||
alpha_bChange::Float64 = 0.0
|
||||
|
||||
phi::Float64 = 0.0 # ϕ, psuedo derivative
|
||||
epsilonRec::Array{Float64} = Float64[] # ϵ_rec, eligibility vector for neuron spike
|
||||
decayedEpsilonRec::Array{Float64} = Float64[] # α * epsilonRec
|
||||
@@ -562,6 +580,14 @@ Base.@kwdef mutable struct linearNeuron <: outputNeuron
|
||||
alpha_v_t::Float64 = 0.0 # alpha * v_t
|
||||
|
||||
firingCounter::Int64 = 0 # store how many times neuron fires
|
||||
firingRateTarget::Float64 = 20.0 # neuron's target firing rate in Hz
|
||||
firingDiff::Float64 = 0.0 # e-prop supplement paper equation 5
|
||||
firingRateError::Float64 = 0.0 # local neuron error w.r.t. firing regularization
|
||||
firingRate::Float64 = 0.0 # running average of firing rate in Hz
|
||||
|
||||
notFireTimeOut::Int64 = 10 # consecutive count of not firing. Should be the same as batch size
|
||||
notFireCounter::Int64 = 0
|
||||
|
||||
ExInSignalSum::Float64 = 0.0
|
||||
end
|
||||
|
||||
@@ -627,8 +653,15 @@ Base.@kwdef mutable struct integrateNeuron <: outputNeuron
|
||||
synapticStrengthLimit::NamedTuple = (lowerlimit=(-5=>-5), upperlimit=(5=>5))
|
||||
|
||||
gammaPd::Float64 = 0.3 # γ_pd, discount factor, value from paper
|
||||
alpha::Float64 = 0.0 # α, neuron membrane potential decay factor
|
||||
alphaChange::Float64 = 0.0
|
||||
|
||||
alpha::Float64 = 0.99
|
||||
alpha_wSignal::Float64 = 2.0
|
||||
alpha_wPotential::Float64 = 2.0
|
||||
alpha_b::Float64 = 2.0
|
||||
alpha_wSignalChange::Float64 = 0.0
|
||||
alpha_wPotentialChange::Float64 = 0.0
|
||||
alpha_bChange::Float64 = 0.0
|
||||
|
||||
phi::Float64 = 0.0 # ϕ, psuedo derivative
|
||||
epsilonRec::Array{Float64} = Float64[] # ϵ_rec, eligibility vector for neuron spike
|
||||
decayedEpsilonRec::Array{Float64} = Float64[] # α * epsilonRec
|
||||
@@ -643,6 +676,14 @@ Base.@kwdef mutable struct integrateNeuron <: outputNeuron
|
||||
alpha_v_t::Float64 = 0.0 # alpha * v_t
|
||||
|
||||
firingCounter::Int64 = 0 # store how many times neuron fires
|
||||
firingRateTarget::Float64 = 20.0 # neuron's target firing rate in Hz
|
||||
firingDiff::Float64 = 0.0 # e-prop supplement paper equation 5
|
||||
firingRateError::Float64 = 0.0 # local neuron error w.r.t. firing regularization
|
||||
firingRate::Float64 = 0.0 # running average of firing rate in Hz
|
||||
|
||||
notFireTimeOut::Int64 = 10 # consecutive count of not firing. Should be the same as batch size
|
||||
notFireCounter::Int64 = 0
|
||||
|
||||
ExInSignalSum::Float64 = 0.0
|
||||
end
|
||||
|
||||
@@ -699,23 +740,6 @@ function init_neuron!(id::Int64, n::passthroughNeuron, n_params::Dict, kfnParams
|
||||
n.knowledgeFnName = kfnParams[:knowledgeFnName]
|
||||
end
|
||||
|
||||
# function init_neuron!(id::Int64, n::lifNeuron, kfnParams::Dict)
|
||||
# n.id = id
|
||||
# n.knowledgeFnName = kfnParams[:knowledgeFnName]
|
||||
# subscription_options = shuffle!([1:(kfnParams[:input_neuron_number]+kfnParams[:computeNeuronNumber])...])
|
||||
# if typeof(kfnParams[:synapticConnectionPercent]) == String
|
||||
# percent = parse(Int, kfnParams[:synapticConnectionPercent][1:end-1]) / 100
|
||||
# synapticConnectionPercent = floor(length(subscription_options) * percent)
|
||||
# n.subscriptionList = [pop!(subscription_options) for i = 1:synapticConnectionPercent]
|
||||
# end
|
||||
# filter!(x -> x != n.id, n.subscriptionList)
|
||||
# n.epsilonRec = zeros(length(n.subscriptionList))
|
||||
# n.wRec = Random.rand(length(n.subscriptionList))
|
||||
# n.wRecChange = zeros(length(n.subscriptionList))
|
||||
# n.reg_voltage_b = zeros(length(n.subscriptionList))
|
||||
# n.alpha = calculate_α(n)
|
||||
# end
|
||||
|
||||
function init_neuron!(id::Int64, n::lifNeuron, n_params::Dict, kfnParams::Dict)
|
||||
n.id = id
|
||||
n.knowledgeFnName = kfnParams[:knowledgeFnName]
|
||||
@@ -728,11 +752,13 @@ function init_neuron!(id::Int64, n::lifNeuron, n_params::Dict, kfnParams::Dict)
|
||||
filter!(x -> x != n.id, n.subscriptionList)
|
||||
n.synapticStrength = rand(-4.5:0.01:-4, length(n.subscriptionList))
|
||||
|
||||
n.epsilonRec = zeros(length(n.subscriptionList))
|
||||
# n.wRec = randn(length(n.subscriptionList))
|
||||
n.wRec = randn(rng, length(n.subscriptionList)) / 100
|
||||
n.wRecChange = zeros(length(n.subscriptionList))
|
||||
n.alpha = calculate_α(n)
|
||||
|
||||
n.epsilonRec = zeros(length(n.subscriptionList))
|
||||
# start w/ small weight Otherwise neuron's weight will be explode in the long run
|
||||
n.wRec = randn(rng, length(n.subscriptionList)) / 10
|
||||
|
||||
n.wRecChange = zeros(length(n.subscriptionList))
|
||||
n.z_i_t_commulative = zeros(length(n.subscriptionList))
|
||||
end
|
||||
|
||||
@@ -750,7 +776,9 @@ function init_neuron!(id::Int64, n::alifNeuron, n_params::Dict,
|
||||
n.synapticStrength = rand(-4.5:0.01:-4, length(n.subscriptionList))
|
||||
|
||||
n.epsilonRec = zeros(length(n.subscriptionList))
|
||||
n.wRec = randn(rng, length(n.subscriptionList)) / 100
|
||||
# start w/ small weight Otherwise neuron's weight will be explode in the long run
|
||||
n.wRec = randn(rng, length(n.subscriptionList)) / 10
|
||||
|
||||
n.wRecChange = zeros(length(n.subscriptionList))
|
||||
|
||||
# the more time has passed from the last time neuron was activated, the more
|
||||
@@ -761,6 +789,23 @@ function init_neuron!(id::Int64, n::alifNeuron, n_params::Dict,
|
||||
n.z_i_t_commulative = zeros(length(n.subscriptionList))
|
||||
end
|
||||
|
||||
function init_neuron!(id::Int64, n::linearNeuron, n_params::Dict, kfnParams::Dict)
|
||||
n.id = id
|
||||
n.knowledgeFnName = kfnParams[:knowledgeFnName]
|
||||
|
||||
subscription_options = shuffle!([kfnParams[:totalInputPort]+1 : kfnParams[:totalNeurons]...])
|
||||
subscription_numbers = Int(floor((n_params[:synapticConnectionPercent] / 100.0) *
|
||||
kfnParams[:totalNeurons] - kfnParams[:totalInputPort]))
|
||||
n.subscriptionList = [pop!(subscription_options) for i = 1:subscription_numbers]
|
||||
n.synapticStrength = rand(-4.5:0.01:-4, length(n.subscriptionList))
|
||||
|
||||
n.epsilonRec = zeros(length(n.subscriptionList))
|
||||
n.wRec = randn(rng, length(n.subscriptionList)) / 10
|
||||
n.wRecChange = zeros(length(n.subscriptionList))
|
||||
n.alpha = calculate_k(n)
|
||||
n.z_i_t_commulative = zeros(length(n.subscriptionList))
|
||||
end
|
||||
|
||||
function init_neuron!(id::Int64, n::integrateNeuron, n_params::Dict, kfnParams::Dict)
|
||||
n.id = id
|
||||
n.knowledgeFnName = kfnParams[:knowledgeFnName]
|
||||
@@ -771,31 +816,17 @@ function init_neuron!(id::Int64, n::integrateNeuron, n_params::Dict, kfnParams::
|
||||
n.subscriptionList = [pop!(subscription_options) for i = 1:subscription_numbers]
|
||||
n.synapticStrength = rand(-4.5:0.01:-4, length(n.subscriptionList))
|
||||
|
||||
n.epsilonRec = zeros(length(n.subscriptionList))
|
||||
n.wRec = randn(rng, length(n.subscriptionList)) / 100
|
||||
n.wRecChange = zeros(length(n.subscriptionList))
|
||||
n.alpha = calculate_k(n)
|
||||
|
||||
n.epsilonRec = zeros(length(n.subscriptionList))
|
||||
# start w/ small weight Otherwise neuron's weight will be explode in the long run
|
||||
n.wRec = randn(rng, length(n.subscriptionList)) / 10
|
||||
n.wRecChange = zeros(length(n.subscriptionList))
|
||||
n.z_i_t_commulative = zeros(length(n.subscriptionList))
|
||||
n.b = randn(rng) / 100
|
||||
# start w/ small weight Otherwise neuron's weight will be explode in the long run
|
||||
n.b = randn(rng) / 10
|
||||
end
|
||||
|
||||
# function init_neuron!(id::Int64, n::linearNeuron, n_params::Dict, kfnParams::Dict)
|
||||
# n.id = id
|
||||
# n.knowledgeFnName = kfnParams[:knowledgeFnName]
|
||||
|
||||
# subscription_options = shuffle!([kfnParams[:totalInputPort]+1 : kfnParams[:totalNeurons]...])
|
||||
# subscription_numbers = Int(floor((n_params[:synapticConnectionPercent] / 100.0) *
|
||||
# kfnParams[:totalNeurons] - kfnParams[:totalInputPort]))
|
||||
# n.subscriptionList = [pop!(subscription_options) for i = 1:subscription_numbers]
|
||||
# n.synapticStrength = rand(-4.5:0.01:-4, length(n.subscriptionList))
|
||||
|
||||
# n.epsilonRec = zeros(length(n.subscriptionList))
|
||||
# n.wRec = randn(rng, length(n.subscriptionList)) / 100
|
||||
# n.wRecChange = zeros(length(n.subscriptionList))
|
||||
# n.alpha = calculate_k(n)
|
||||
# n.z_i_t_commulative = zeros(length(n.subscriptionList))
|
||||
# end
|
||||
|
||||
""" Make a neuron intended for use with knowledgeFn
|
||||
"""
|
||||
function init_neuron(id::Int64, n_params::Dict, kfnParams::Dict)
|
||||
@@ -854,6 +885,10 @@ calculate_ρ(neuron::alifNeuron) = exp(-neuron.delta / neuron.tau_a)
|
||||
calculate_k(neuron::linearNeuron) = exp(-neuron.delta / neuron.tau_out)
|
||||
calculate_k(neuron::integrateNeuron) = exp(-neuron.delta / neuron.tau_out)
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
#------------------------------------------------------------------------------------------------100
|
||||
|
||||
|
||||
|
||||
Reference in New Issue
Block a user