Compare commits

...

10 Commits

Author SHA1 Message Date
ton
364cfb4ea8 dev 2023-09-29 08:27:26 +07:00
ton
2b15510669 dev 2023-09-24 15:25:12 +07:00
ton
c165afaa11 dev 2023-09-23 09:57:40 +07:00
ton
0112e6e1be dev 2023-09-21 15:08:51 +07:00
ton
1c5baea34a version 0.0.11 2023-09-21 10:10:58 +07:00
ton
8e6d8a83d1 dev 2023-09-21 06:36:28 +07:00
ton
415e7fc2f3 dev 2023-09-20 22:20:41 +07:00
ton
edd26c180d dev 2023-09-20 07:41:28 +07:00
ton
909e27a208 dev 2023-09-19 21:43:00 +07:00
ton
821f12c86b dev 2023-09-19 20:46:40 +07:00
35 changed files with 15117 additions and 513 deletions

View File

@@ -27,8 +27,10 @@ using Pkg; Pkg.activate("."); Pkg.resolve(), Pkg.instantiate()
# end # end
using Revise using Revise
using BenchmarkTools, Cthulhu using BenchmarkTools, Cthulhu, REPL.TerminalMenus
using Flux, CUDA using Flux, CUDA
using BSON, JSON3 using BSON, JSON3
using MLDatasets: MNIST using MLDatasets: MNIST
@@ -50,7 +52,7 @@ rootDir = pwd()
# device = Flux.CUDA.functional() ? gpu : cpu # Flux provide "cpu" and "gpu" keywork # device = Flux.CUDA.functional() ? gpu : cpu # Flux provide "cpu" and "gpu" keywork
device = gpu device = gpu
if device == gpu CUDA.device!(0) end #CHANGE if device == gpu CUDA.device!(0) end #CHANGE
# CUDA.allowscalar(false) # turn off scalar indexing in CPU to make it easier when moving to GPU CUDA.allowscalar(false) # turn off scalar indexing in CPU to make it easier when moving to GPU
#------------------------------------------------------------------------------------------------100 #------------------------------------------------------------------------------------------------100
@@ -66,6 +68,9 @@ if device == gpu CUDA.device!(0) end #CHANGE
- -
""" """
# ----------------------------- REPL menu options ---------------------------- #
options = ["yes", "no"]
menu = RadioMenu(options)
# communication config --------------------------------------------------------------------------100 # communication config --------------------------------------------------------------------------100
@@ -78,7 +83,7 @@ imageBatch = 1
function generate_snn(filename::String, location::String) function generate_snn(filename::String, location::String)
signalInput_portnumbers = (10, 20, imageBatch) # 2nd dim needs to match signalInput_portnumbers = (10, 10, imageBatch) # 2nd dim needs to match
# input signal + copied input signal + noise. # input signal + copied input signal + noise.
# 3rd dim is input batch size # 3rd dim is input batch size
noise_portnumbers = (signalInput_portnumbers[1], 1) noise_portnumbers = (signalInput_portnumbers[1], 1)
@@ -139,7 +144,7 @@ function generate_snn(filename::String, location::String)
:type => "linearNeuron", :type => "linearNeuron",
:v_th => 1.0, # neuron firing threshold (this value is treated as maximum bound if I use auto generate) :v_th => 1.0, # neuron firing threshold (this value is treated as maximum bound if I use auto generate)
:tau_out => 20.0, # output time constant in millisecond. :tau_out => 20.0, # output time constant in millisecond.
:synapticConnectionPercent => 20, # % coverage of total neurons in kfn :synapticConnectionPercent => 100, # % coverage of total neurons in kfn
# Good starting value is 1/50th of tau_a # Good starting value is 1/50th of tau_a
# This is problem specific parameter. # This is problem specific parameter.
# It controls how leaky the neuron is. # It controls how leaky the neuron is.
@@ -150,7 +155,7 @@ function generate_snn(filename::String, location::String)
integrate_neuron_params = Dict{Symbol, Any}( integrate_neuron_params = Dict{Symbol, Any}(
:type => "integrateNeuron", :type => "integrateNeuron",
:synapticConnectionPercent => 20, # % coverage of total neurons in kfn :synapticConnectionPercent => 100, # % coverage of total neurons in kfn
:eta => 1e-6, :eta => 1e-6,
:tau_out => 100.0, :tau_out => 100.0,
# Good starting value is 1/50th of tau_a # Good starting value is 1/50th of tau_a
@@ -205,9 +210,9 @@ end
function data_loader() function data_loader()
# test problem # test problem
trainDataset = MNIST(:train)[1:10] # total 60000 trainDataset = MNIST(:train)[1:3] # total 60000
# validateDataset = MNIST(:test) # validateDataset = MNIST(:test)
validateDataset = MNIST(:train)[1:10] validateDataset = MNIST(:train)[1:3]
labelDict = [0:9...] labelDict = [0:9...]
trainData = MLUtils.DataLoader( trainData = MLUtils.DataLoader(
@@ -244,8 +249,8 @@ function train_snn(model, trainData, validateData, labelDict::Vector)
logitLog = zeros(10, 2) logitLog = zeros(10, 2)
firedNeurons_t1 = zeros(1) firedNeurons_t1 = zeros(1)
var1 = zeros(10, 2) var1 = zeros(3, 1)
var2 = zeros(10, 2) var2 = zeros(3, 1)
var3 = zeros(10, 2) var3 = zeros(10, 2)
var4 = zeros(10, 2) var4 = zeros(10, 2)
@@ -266,46 +271,46 @@ function train_snn(model, trainData, validateData, labelDict::Vector)
plot30 = Observable(var1[1 , :]) plot30 = Observable(var1[1 , :])
plot31 = Observable(var1[2 , :]) plot31 = Observable(var1[2 , :])
plot32 = Observable(var1[3 , :]) plot32 = Observable(var1[3 , :])
plot33 = Observable(var1[4 , :]) # plot33 = Observable(var1[4 , :])
plot34 = Observable(var1[5 , :]) # plot34 = Observable(var1[5 , :])
plot35 = Observable(var1[6 , :]) # plot35 = Observable(var1[6 , :])
plot36 = Observable(var1[7 , :]) # plot36 = Observable(var1[7 , :])
plot37 = Observable(var1[8 , :]) # plot37 = Observable(var1[8 , :])
plot38 = Observable(var1[9 , :]) # plot38 = Observable(var1[9 , :])
plot39 = Observable(var1[10, :]) # plot39 = Observable(var1[10, :])
plot40 = Observable(var2[1 , :]) plot40 = Observable(var2[1 , :])
plot41 = Observable(var2[2 , :]) plot41 = Observable(var2[2 , :])
plot42 = Observable(var2[3 , :]) plot42 = Observable(var2[3 , :])
plot43 = Observable(var2[4 , :]) # plot43 = Observable(var2[4 , :])
plot44 = Observable(var2[5 , :]) # plot44 = Observable(var2[5 , :])
plot45 = Observable(var2[6 , :]) # plot45 = Observable(var2[6 , :])
plot46 = Observable(var2[7 , :]) # plot46 = Observable(var2[7 , :])
plot47 = Observable(var2[8 , :]) # plot47 = Observable(var2[8 , :])
plot48 = Observable(var2[9 , :]) # plot48 = Observable(var2[9 , :])
plot49 = Observable(var2[10, :]) # plot49 = Observable(var2[10, :])
plot50 = Observable(var3[1 , :]) # plot50 = Observable(var3[1 , :])
plot51 = Observable(var3[2 , :]) # plot51 = Observable(var3[2 , :])
plot52 = Observable(var3[3 , :]) # plot52 = Observable(var3[3 , :])
plot53 = Observable(var3[4 , :]) # plot53 = Observable(var3[4 , :])
plot54 = Observable(var3[5 , :]) # plot54 = Observable(var3[5 , :])
plot55 = Observable(var3[6 , :]) # plot55 = Observable(var3[6 , :])
plot56 = Observable(var3[7 , :]) # plot56 = Observable(var3[7 , :])
plot57 = Observable(var3[8 , :]) # plot57 = Observable(var3[8 , :])
plot58 = Observable(var3[9 , :]) # plot58 = Observable(var3[9 , :])
plot59 = Observable(var3[10, :]) # plot59 = Observable(var3[10, :])
plot60 = Observable(var4[1 , :]) # plot60 = Observable(var4[1 , :])
plot61 = Observable(var4[2 , :]) # plot61 = Observable(var4[2 , :])
plot62 = Observable(var4[3 , :]) # plot62 = Observable(var4[3 , :])
plot63 = Observable(var4[4 , :]) # plot63 = Observable(var4[4 , :])
plot64 = Observable(var4[5 , :]) # plot64 = Observable(var4[5 , :])
plot65 = Observable(var4[6 , :]) # plot65 = Observable(var4[6 , :])
plot66 = Observable(var4[7 , :]) # plot66 = Observable(var4[7 , :])
plot67 = Observable(var4[8 , :]) # plot67 = Observable(var4[8 , :])
plot68 = Observable(var4[9 , :]) # plot68 = Observable(var4[9 , :])
plot69 = Observable(var4[10, :]) # plot69 = Observable(var4[10, :])
# main figure # main figure
fig1 = Figure() fig1 = Figure()
@@ -338,75 +343,75 @@ function train_snn(model, trainData, validateData, labelDict::Vector)
subfig3 = GLMakie.Axis(fig1[3, 1], # define position of this subfigure inside a figure subfig3 = GLMakie.Axis(fig1[3, 1], # define position of this subfigure inside a figure
title = "last RSNN wRec", title = "1st lif epsilonRec",
xlabel = "time", xlabel = "time",
ylabel = "data" ylabel = "data"
) )
lines!(subfig3, plot30, label = "0", color = 1, colormap = :tab10, colorrange = (1, 10) ) lines!(subfig3, plot30, label = "0", color = 1, colormap = :tab10, colorrange = (1, 10) )
lines!(subfig3, plot31, label = "1", color = 2, colormap = :tab10, colorrange = (1, 10) ) lines!(subfig3, plot31, label = "1", color = 2, colormap = :tab10, colorrange = (1, 10) )
lines!(subfig3, plot32, label = "2", color = 3, colormap = :tab10, colorrange = (1, 10) ) lines!(subfig3, plot32, label = "2", color = 3, colormap = :tab10, colorrange = (1, 10) )
lines!(subfig3, plot33, label = "3", color = 4, colormap = :tab10, colorrange = (1, 10) ) # lines!(subfig3, plot33, label = "3", color = 4, colormap = :tab10, colorrange = (1, 10) )
lines!(subfig3, plot34, label = "4", color = 5, colormap = :tab10, colorrange = (1, 10) ) # lines!(subfig3, plot34, label = "4", color = 5, colormap = :tab10, colorrange = (1, 10) )
lines!(subfig3, plot35, label = "5", color = 6, colormap = :tab10, colorrange = (1, 10) ) # lines!(subfig3, plot35, label = "5", color = 6, colormap = :tab10, colorrange = (1, 10) )
lines!(subfig3, plot36, label = "6", color = 7, colormap = :tab10, colorrange = (1, 10) ) # lines!(subfig3, plot36, label = "6", color = 7, colormap = :tab10, colorrange = (1, 10) )
lines!(subfig3, plot37, label = "7", color = 8, colormap = :tab10, colorrange = (1, 10) ) # lines!(subfig3, plot37, label = "7", color = 8, colormap = :tab10, colorrange = (1, 10) )
lines!(subfig3, plot38, label = "8", color = 9, colormap = :tab10, colorrange = (1, 10) ) # lines!(subfig3, plot38, label = "8", color = 9, colormap = :tab10, colorrange = (1, 10) )
lines!(subfig3, plot39, label = "9", color = 10, colormap = :tab10, colorrange = (1, 10)) # lines!(subfig3, plot39, label = "9", color = 10, colormap = :tab10, colorrange = (1, 10))
# axislegend(subfig3, position = :lb) # axislegend(subfig3, position = :lb)
subfig4 = GLMakie.Axis(fig1[4, 1], # define position of this subfigure inside a figure subfig4 = GLMakie.Axis(fig1[4, 1], # define position of this subfigure inside a figure
title = "RSNN v_t1", title = "RSNN v_t",
xlabel = "time", xlabel = "time",
ylabel = "data" ylabel = "data"
) )
lines!(subfig4, plot40, label = "0", color = 1, colormap = :tab10, colorrange = (1, 10) ) lines!(subfig4, plot40, label = "0", color = 1, colormap = :tab10, colorrange = (1, 10) )
lines!(subfig4, plot41, label = "1", color = 2, colormap = :tab10, colorrange = (1, 10) ) lines!(subfig4, plot41, label = "1", color = 2, colormap = :tab10, colorrange = (1, 10) )
lines!(subfig4, plot42, label = "2", color = 3, colormap = :tab10, colorrange = (1, 10) ) lines!(subfig4, plot42, label = "2", color = 3, colormap = :tab10, colorrange = (1, 10) )
lines!(subfig4, plot43, label = "3", color = 4, colormap = :tab10, colorrange = (1, 10) ) # lines!(subfig4, plot43, label = "3", color = 4, colormap = :tab10, colorrange = (1, 10) )
lines!(subfig4, plot44, label = "4", color = 5, colormap = :tab10, colorrange = (1, 10) ) # lines!(subfig4, plot44, label = "4", color = 5, colormap = :tab10, colorrange = (1, 10) )
lines!(subfig4, plot45, label = "5", color = 6, colormap = :tab10, colorrange = (1, 10) ) # lines!(subfig4, plot45, label = "5", color = 6, colormap = :tab10, colorrange = (1, 10) )
lines!(subfig4, plot46, label = "6", color = 7, colormap = :tab10, colorrange = (1, 10) ) # lines!(subfig4, plot46, label = "6", color = 7, colormap = :tab10, colorrange = (1, 10) )
lines!(subfig4, plot47, label = "7", color = 8, colormap = :tab10, colorrange = (1, 10) ) # lines!(subfig4, plot47, label = "7", color = 8, colormap = :tab10, colorrange = (1, 10) )
lines!(subfig4, plot48, label = "8", color = 9, colormap = :tab10, colorrange = (1, 10) ) # lines!(subfig4, plot48, label = "8", color = 9, colormap = :tab10, colorrange = (1, 10) )
lines!(subfig4, plot49, label = "9", color = 10, colormap = :tab10, colorrange = (1, 10)) # lines!(subfig4, plot49, label = "9", color = 10, colormap = :tab10, colorrange = (1, 10))
# axislegend(subfig4, position = :lb) # axislegend(subfig4, position = :lb)
subfig5 = GLMakie.Axis(fig1[5, 1], # define position of this subfigure inside a figure # subfig5 = GLMakie.Axis(fig1[5, 1], # define position of this subfigure inside a figure
title = "output neuron epsilonRec", # title = "output neuron epsilonRec",
xlabel = "time", # xlabel = "time",
ylabel = "data" # ylabel = "data"
) # )
lines!(subfig5, plot50, label = "0", color = 1, colormap = :tab10, colorrange = (1, 10) ) # lines!(subfig5, plot50, label = "0", color = 1, colormap = :tab10, colorrange = (1, 10) )
lines!(subfig5, plot51, label = "1", color = 2, colormap = :tab10, colorrange = (1, 10) ) # lines!(subfig5, plot51, label = "1", color = 2, colormap = :tab10, colorrange = (1, 10) )
lines!(subfig5, plot52, label = "2", color = 3, colormap = :tab10, colorrange = (1, 10) ) # lines!(subfig5, plot52, label = "2", color = 3, colormap = :tab10, colorrange = (1, 10) )
lines!(subfig5, plot53, label = "3", color = 4, colormap = :tab10, colorrange = (1, 10) ) # lines!(subfig5, plot53, label = "3", color = 4, colormap = :tab10, colorrange = (1, 10) )
lines!(subfig5, plot54, label = "4", color = 5, colormap = :tab10, colorrange = (1, 10) ) # lines!(subfig5, plot54, label = "4", color = 5, colormap = :tab10, colorrange = (1, 10) )
lines!(subfig5, plot55, label = "5", color = 6, colormap = :tab10, colorrange = (1, 10) ) # lines!(subfig5, plot55, label = "5", color = 6, colormap = :tab10, colorrange = (1, 10) )
lines!(subfig5, plot56, label = "6", color = 7, colormap = :tab10, colorrange = (1, 10) ) # lines!(subfig5, plot56, label = "6", color = 7, colormap = :tab10, colorrange = (1, 10) )
lines!(subfig5, plot57, label = "7", color = 8, colormap = :tab10, colorrange = (1, 10) ) # lines!(subfig5, plot57, label = "7", color = 8, colormap = :tab10, colorrange = (1, 10) )
lines!(subfig5, plot58, label = "8", color = 9, colormap = :tab10, colorrange = (1, 10) ) # lines!(subfig5, plot58, label = "8", color = 9, colormap = :tab10, colorrange = (1, 10) )
lines!(subfig5, plot59, label = "9", color = 10, colormap = :tab10, colorrange = (1, 10)) # lines!(subfig5, plot59, label = "9", color = 10, colormap = :tab10, colorrange = (1, 10))
# axislegend(subfig5, position = :lb) # # axislegend(subfig5, position = :lb)
subfig6 = GLMakie.Axis(fig1[6, 1], # define position of this subfigure inside a figure # subfig6 = GLMakie.Axis(fig1[6, 1], # define position of this subfigure inside a figure
title = "output neuron wRecChange", # title = "output neuron wRecChange",
xlabel = "time", # xlabel = "time",
ylabel = "data" # ylabel = "data"
) # )
lines!(subfig6, plot60, label = "0", color = 1, colormap = :tab10, colorrange = (1, 10) ) # lines!(subfig6, plot60, label = "0", color = 1, colormap = :tab10, colorrange = (1, 10) )
lines!(subfig6, plot61, label = "1", color = 2, colormap = :tab10, colorrange = (1, 10) ) # lines!(subfig6, plot61, label = "1", color = 2, colormap = :tab10, colorrange = (1, 10) )
lines!(subfig6, plot62, label = "2", color = 3, colormap = :tab10, colorrange = (1, 10) ) # lines!(subfig6, plot62, label = "2", color = 3, colormap = :tab10, colorrange = (1, 10) )
lines!(subfig6, plot63, label = "3", color = 4, colormap = :tab10, colorrange = (1, 10) ) # lines!(subfig6, plot63, label = "3", color = 4, colormap = :tab10, colorrange = (1, 10) )
lines!(subfig6, plot64, label = "4", color = 5, colormap = :tab10, colorrange = (1, 10) ) # lines!(subfig6, plot64, label = "4", color = 5, colormap = :tab10, colorrange = (1, 10) )
lines!(subfig6, plot65, label = "5", color = 6, colormap = :tab10, colorrange = (1, 10) ) # lines!(subfig6, plot65, label = "5", color = 6, colormap = :tab10, colorrange = (1, 10) )
lines!(subfig6, plot66, label = "6", color = 7, colormap = :tab10, colorrange = (1, 10) ) # lines!(subfig6, plot66, label = "6", color = 7, colormap = :tab10, colorrange = (1, 10) )
lines!(subfig6, plot67, label = "7", color = 8, colormap = :tab10, colorrange = (1, 10) ) # lines!(subfig6, plot67, label = "7", color = 8, colormap = :tab10, colorrange = (1, 10) )
lines!(subfig6, plot68, label = "8", color = 9, colormap = :tab10, colorrange = (1, 10) ) # lines!(subfig6, plot68, label = "8", color = 9, colormap = :tab10, colorrange = (1, 10) )
lines!(subfig6, plot69, label = "9", color = 10, colormap = :tab10, colorrange = (1, 10)) # lines!(subfig6, plot69, label = "9", color = 10, colormap = :tab10, colorrange = (1, 10))
# axislegend(subfig6, position = :lb) # axislegend(subfig6, position = :lb)
# wait(display(fig1)) # wait(display(fig1))
# display(fig1) display(fig1)
# --------------------------------- end plot --------------------------------- # # --------------------------------- end plot --------------------------------- #
# model learning # model learning
@@ -416,17 +421,22 @@ function train_snn(model, trainData, validateData, labelDict::Vector)
stop = 0 stop = 0
vt0 = 0.0 # store vt to compute learning progress vt0 = 0.0 # store vt to compute learning progress
for epoch = 1:1000 for epoch = 1:1000
stop == 3 ? break : false stop == 1 ? break : false
println("epoch $epoch") println("epoch $epoch")
n = length(trainData) n = length(trainData)
println("n $n") println("n $n")
p = Progress(n, dt=1.0) # minimum update interval: 1 second p = Progress(n, dt=1.0) # minimum update interval: 1 second
for (imgBatch, labels) in trainData # imgBatch(28, 28, 4) i.e. (row, col, batch), labels(label, batch) for (imgBatch, labels) in trainData # imgBatch(28, 28, 4) i.e. (row, col, batch), labels(label, batch)
for rep in 1:1 stop == 1 ? break : false
stop == 3 ? break : false consecutiveCorrect = 0
rep = 0
# for rep in 1:20
while consecutiveCorrect < 10
rep += 1
stop == 1 ? break : false
# prepare image into input signal (10, 2, 784, 4) i.e. (row, col, timestep, batch) # prepare image into input signal (10, 2, 784, 4) i.e. (row, col, timestep, batch)
signal = dualTrackSpikeGen(imgBatch, [0.05, 0.1, 0.2, 0.3, 0.5], noise=(true, 1, 0.5), copies=18) signal = dualTrackSpikeGen(imgBatch, [0.05, 0.1, 0.2, 0.3, 0.5], noise=(true, 1, 0.1), copies=8)
if length(size(signal)) == 3 if length(size(signal)) == 3
row, col, sequence = size(signal) row, col, sequence = size(signal)
batch = 1 batch = 1
@@ -457,32 +467,31 @@ function train_snn(model, trainData, validateData, labelDict::Vector)
end end
# predict # predict
logit, _firedNeurons_t1 = model(current_pixel) logit, _ = model(current_pixel)
# # log answer of all timestep # log answer of all timestep
# logitLog = [logitLog;; logit] logitLog = [logitLog;; cpu(logit)]
# firedNeurons_t1 = push!(firedNeurons_t1, _firedNeurons_t1) var1 = [var1;; reshape(sum(cpu(model.lif_epsilonRec)[:,:,1:3,1], dims=(1,2)), (:, 1))]
# var1 = [var1;; _var1] var2 = [var2;; reshape(cpu(model.lif_vt)[1,1,1:3,1], (:, 1))]
# var2 = [var2;; _var2] # var3 = [var3;; 0]
# var3 = [var3;; _var3] # var4 = [var4;; 0]
# var4 = [var4;; _var4]
if timestep < sequence # online learning, 1-by-1 timestep if timestep < sequence # online learning, 1-by-1 timestep
# no error calculation # no error calculation
elseif timestep == sequence # online learning, 1-by-1 timestep elseif timestep == sequence # online learning, 1-by-1 timestep
# no error calculation # no error calculation
#WORKING answer time windows, collect logit to get finalAnswer # answer time windows, collect logit to get finalAnswer
elseif timestep > sequence && timestep < sequence+thinkingPeriod elseif timestep > sequence && timestep < sequence+thinkingPeriod
logit_cpu = logit |> cpu logit_cpu = logit |> cpu
logit_cpu = logit_cpu[:,1] # logit_cpu = logit_cpu[:,1]
finalAnswer = length(finalAnswer) == 1 ? logit : finalAnswer .+ logit # (logit, batch) finalAnswer = length(finalAnswer) == 1 ? logit : finalAnswer .+ logit # (logit, batch)
finalAnswer_cpu = finalAnswer |> cpu finalAnswer_cpu = finalAnswer |> cpu
on_vt_cpu = model.on_vt |> cpu on_vt_cpu = model.on_vt |> cpu
on_vt_cpu = on_vt_cpu[1,1,:,1] on_vt_cpu = on_vt_cpu[1,1,:,1]
modelError, outputError, vt0, progress = modelError, outputError, vt0, progress =
loss(vt0, on_vt_cpu, logit_cpu, correctAnswer_array, correctAnswer_number) loss(vt0, on_vt_cpu, logit_cpu, finalAnswer_cpu, correctAnswer_array, correctAnswer_number)
modelError_gpu = [modelError] |> device modelError_gpu = [modelError] |> device
outputError_gpu = outputError |> device outputError_gpu = outputError |> device
IronpenGPU.compute_paramsChange!(model, modelError_gpu, outputError_gpu, label_gpu) IronpenGPU.compute_paramsChange!(model, modelError_gpu, outputError_gpu, label_gpu)
@@ -517,14 +526,14 @@ function train_snn(model, trainData, validateData, labelDict::Vector)
elseif timestep == sequence+thinkingPeriod #TODO update code elseif timestep == sequence+thinkingPeriod #TODO update code
logit_cpu = logit |> cpu logit_cpu = logit |> cpu
logit_cpu = logit_cpu[:,1] # logit_cpu = logit_cpu[:,1]
finalAnswer = length(finalAnswer) == 1 ? logit : finalAnswer .+ logit # (logit, batch) finalAnswer = length(finalAnswer) == 1 ? logit : finalAnswer .+ logit # (logit, batch)
finalAnswer_cpu = finalAnswer |> cpu finalAnswer_cpu = finalAnswer |> cpu
on_vt_cpu = model.on_vt |> cpu on_vt_cpu = model.on_vt |> cpu
on_vt_cpu = on_vt_cpu[1,1,:,1] on_vt_cpu = on_vt_cpu[1,1,:,1]
modelError, outputError, vt0, progress = modelError, outputError, vt0, progress =
loss(vt0, on_vt_cpu, logit_cpu, correctAnswer_array, correctAnswer_number) loss(vt0, on_vt_cpu, logit_cpu, finalAnswer_cpu, correctAnswer_array, correctAnswer_number)
modelError_gpu = [modelError] |> device modelError_gpu = [modelError] |> device
outputError_gpu = outputError |> device outputError_gpu = outputError |> device
@@ -565,17 +574,21 @@ function train_snn(model, trainData, validateData, labelDict::Vector)
# println("label $(labels[1]) finalAnswer $finalAnswer_cpu") # println("label $(labels[1]) finalAnswer $finalAnswer_cpu")
max = isequal.(finalAnswer_cpu[:,1], maximum(finalAnswer_cpu[:,1])) max = isequal.(finalAnswer_cpu[:,1], maximum(finalAnswer_cpu[:,1]))
if sum(finalAnswer_cpu) == 0 if sum(finalAnswer_cpu) == 0
IronpenGPU.learn!(model, progress, device)
consecutiveCorrect = 0
println("modelname $modelname epoch $epoch rep $rep label $(labels[1]) finalAnswer __ LEARNING") println("modelname $modelname epoch $epoch rep $rep label $(labels[1]) finalAnswer __ LEARNING")
IronpenGPU.learn!(model, progress, device)
elseif sum(max) == 1 && findall(max)[1] -1 == labels[1] elseif sum(max) == 1 && findall(max)[1] -1 == labels[1]
IronpenGPU.learn!(model, progress, device)
consecutiveCorrect += 1
println("modelname $modelname epoch $epoch rep $rep label $(labels[1]) finalAnswer $finalAnswer_cpu CORRECT") println("modelname $modelname epoch $epoch rep $rep label $(labels[1]) finalAnswer $finalAnswer_cpu CORRECT")
IronpenGPU.learn!(model, progress, device)
elseif sum(max) == 1 && findall(max)[1] -1 != labels[1] elseif sum(max) == 1 && findall(max)[1] -1 != labels[1]
println("modelname $modelname epoch $epoch rep $rep label $(labels[1]) finalAnswer $finalAnswer_cpu LEARNING")
IronpenGPU.learn!(model, progress, device) IronpenGPU.learn!(model, progress, device)
consecutiveCorrect = 0
println("modelname $modelname epoch $epoch rep $rep label $(labels[1]) finalAnswer $finalAnswer_cpu LEARNING")
else else
println("modelname $modelname epoch $epoch rep $rep label $(labels[1]) finalAnswer $finalAnswer_cpu LEARNING")
IronpenGPU.learn!(model, progress, device) IronpenGPU.learn!(model, progress, device)
consecutiveCorrect = 0
println("modelname $modelname epoch $epoch rep $rep label $(labels[1]) finalAnswer $finalAnswer_cpu LEARNING")
end end
# error("DEBUG -> main $(Dates.now())") # error("DEBUG -> main $(Dates.now())")
@@ -585,22 +598,22 @@ function train_snn(model, trainData, validateData, labelDict::Vector)
end end
# update plot # update plot
# plot10[] = firedNeurons_t1 plot10[] = firedNeurons_t1
# plot20[] = view(logitLog, 1 , :) plot20[] = view(logitLog, 1 , :)
# plot21[] = view(logitLog, 2 , :) plot21[] = view(logitLog, 2 , :)
# plot22[] = view(logitLog, 3 , :) plot22[] = view(logitLog, 3 , :)
# plot23[] = view(logitLog, 4 , :) plot23[] = view(logitLog, 4 , :)
# plot24[] = view(logitLog, 5 , :) plot24[] = view(logitLog, 5 , :)
# plot25[] = view(logitLog, 6 , :) plot25[] = view(logitLog, 6 , :)
# plot26[] = view(logitLog, 7 , :) plot26[] = view(logitLog, 7 , :)
# plot27[] = view(logitLog, 8 , :) plot27[] = view(logitLog, 8 , :)
# plot28[] = view(logitLog, 9 , :) plot28[] = view(logitLog, 9 , :)
# plot29[] = view(logitLog, 10, :) plot29[] = view(logitLog, 10, :)
# plot30[] = view(var1, 1 , :) plot30[] = view(var1, 1 , :)
# plot31[] = view(var1, 2 , :) plot31[] = view(var1, 2 , :)
# plot32[] = view(var1, 3 , :) plot32[] = view(var1, 3 , :)
# plot33[] = view(var1, 4 , :) # plot33[] = view(var1, 4 , :)
# plot34[] = view(var1, 5 , :) # plot34[] = view(var1, 5 , :)
# plot35[] = view(var1, 6 , :) # plot35[] = view(var1, 6 , :)
@@ -609,9 +622,9 @@ function train_snn(model, trainData, validateData, labelDict::Vector)
# plot38[] = view(var1, 9 , :) # plot38[] = view(var1, 9 , :)
# plot39[] = view(var1, 10, :) # plot39[] = view(var1, 10, :)
# plot40[] = view(var2, 1 , :) plot40[] = view(var2, 1 , :)
# plot41[] = view(var2, 2 , :) plot41[] = view(var2, 2 , :)
# plot42[] = view(var2, 3 , :) plot42[] = view(var2, 3 , :)
# plot43[] = view(var2, 4 , :) # plot43[] = view(var2, 4 , :)
# plot44[] = view(var2, 5 , :) # plot44[] = view(var2, 5 , :)
# plot45[] = view(var2, 6 , :) # plot45[] = view(var2, 6 , :)
@@ -647,43 +660,22 @@ function train_snn(model, trainData, validateData, labelDict::Vector)
# _modelRespond = [sum(i) for i in eachrow(_modelRespond)] # _modelRespond = [sum(i) for i in eachrow(_modelRespond)]
# modelRespond = isequal.(isequal.(_modelRespond, 0), 0) # modelRespond = isequal.(isequal.(_modelRespond, 0), 0)
# display(fig1) display(fig1)
# sleep(1) sleep(1)
# if k % 3 == 0 if rep % 3 == 0
# firedNeurons_t1 = zeros(1) firedNeurons_t1 = zeros(1)
# logitLog = zeros(10, 2) logitLog = zeros(10, 2)
# var1 = zeros(10, 2) var1 = zeros(3, 1)
# var2 = zeros(10, 2) var2 = zeros(3, 1)
# var3 = zeros(10, 2) # var3 = zeros(10, 2)
# var4 = zeros(10, 2) # var4 = zeros(10, 2)
# end end
# # if predict == OneHotArrays.onehot(label, labelDict)
# # println("model train $label successfully, $k tries")
# # # wait(display(fig1))
# # firedNeurons_t1 = zeros(1)
# # logitLog = zeros(10, 2)
# # var1 = zeros(10, 2)
# # var2 = zeros(10, 2)
# # var3 = zeros(10, 2)
# # var4 = zeros(10, 2)
# # break
# # end
# if k == maxRepeatRound
# # println("model train $label unsuccessfully, $maxRepeatRound tries, skip training")
# # display(fig1)
# firedNeurons_t1 = zeros(1)
# logitLog = zeros(10, 2)
# var1 = zeros(10, 2)
# var2 = zeros(10, 2)
# var3 = zeros(10, 2)
# var4 = zeros(10, 2)
# break
# end
end end
next!(p) next!(p)
end end
@@ -794,8 +786,8 @@ function dualTrackSpikeGen(inputsignals, thresholds=[1.0]; noise=(false, 1, 0.5)
end end
rowInputSignal = reshape(rowInputSignal, (size(rowInputSignal, 1), 1, size(inputsignals, 3))) rowInputSignal = reshape(rowInputSignal, (size(rowInputSignal, 1), 1, size(inputsignals, 3)))
colInputSignal = reshape(colInputSignal, (size(colInputSignal, 1), 1, size(inputsignals, 3))) colInputSignal = reshape(colInputSignal, (size(colInputSignal, 1), 1, size(inputsignals, 3)))
rowInputSignal = spikeGenerator(rowInputSignal, thresholds, noise=noise, copies=8) rowInputSignal = spikeGenerator(rowInputSignal, thresholds, noise=noise, copies=3)
colInputSignal = spikeGenerator(colInputSignal, thresholds, noise=noise, copies=8) colInputSignal = spikeGenerator(colInputSignal, thresholds, noise=noise, copies=3)
signal = cat(rowInputSignal, colInputSignal, dims=2) signal = cat(rowInputSignal, colInputSignal, dims=2)
return signal return signal
@@ -853,7 +845,7 @@ function noiseGenerator(row, col, z; prob=0.5)
end end
function loss(vt0::AbstractFloat, vt1::AbstractArray, logit::AbstractArray, function loss(vt0::AbstractFloat, vt1::AbstractArray, logit::AbstractArray,
correctAnswer_array, correctAnswer_number) finalAnswer, correctAnswer_array, correctAnswer_number)
labelPosition = correctAnswer_number +1 labelPosition = correctAnswer_number +1
@@ -861,14 +853,17 @@ function loss(vt0::AbstractFloat, vt1::AbstractArray, logit::AbstractArray,
vt1 = vt1[labelPosition] vt1 = vt1[labelPosition]
# get zt of correct neuron # get zt of correct neuron
zt = logit[labelPosition] zt = finalAnswer[labelPosition]
rsnnError = nothing rsnnError = nothing
progress = nothing progress = nothing
outputError = correctAnswer_array .- finalAnswer
if zt == 1 if zt > 0
rsnnError = 0.0 # already correct, no weight update rsnnError = 0.0 # already correct, no weight update
progress = 2 progress = 2
outputError[labelPosition] = 0
elseif vt1 > vt0 # progress increase elseif vt1 > vt0 # progress increase
rsnnError = 1.0 - vt1 rsnnError = 1.0 - vt1
progress = 1 progress = 1
@@ -879,14 +874,15 @@ function loss(vt0::AbstractFloat, vt1::AbstractArray, logit::AbstractArray,
rsnnError = vt0 - vt1 rsnnError = vt0 - vt1
progress = -1 progress = -1
else else
error("undefined condition line $(@__LINE__)") error("undefined condition zt $zt, vt1 $vt1 vt0 $vt0")
end end
outputError = correctAnswer_array .- logit
return rsnnError, outputError, vt1, progress return rsnnError, outputError, vt1, progress
end end
# function arrayMax(x) # function arrayMax(x)
# if sum(GeneralUtils.isNotEqual.(x, 0)) == 0 # guard against all-zeros array # if sum(GeneralUtils.isNotEqual.(x, 0)) == 0 # guard against all-zeros array
# return GeneralUtils.isNotEqual.(x, 0) # return GeneralUtils.isNotEqual.(x, 0)

View File

@@ -27,8 +27,10 @@ using Pkg; Pkg.activate("."); Pkg.resolve(), Pkg.instantiate()
# end # end
using Revise using Revise
using BenchmarkTools, Cthulhu using BenchmarkTools, Cthulhu, REPL.TerminalMenus
using Flux, CUDA using Flux, CUDA
using BSON, JSON3 using BSON, JSON3
using MLDatasets: MNIST using MLDatasets: MNIST
@@ -66,6 +68,9 @@ if device == gpu CUDA.device!(0) end #CHANGE
- -
""" """
# ----------------------------- REPL menu options ---------------------------- #
options = ["yes", "no"]
menu = RadioMenu(options)
# communication config --------------------------------------------------------------------------100 # communication config --------------------------------------------------------------------------100
@@ -416,17 +421,22 @@ function train_snn(model, trainData, validateData, labelDict::Vector)
stop = 0 stop = 0
vt0 = 0.0 # store vt to compute learning progress vt0 = 0.0 # store vt to compute learning progress
for epoch = 1:1000 for epoch = 1:1000
stop == 3 ? break : false stop == 1 ? break : false
println("epoch $epoch") println("epoch $epoch")
n = length(trainData) n = length(trainData)
println("n $n") println("n $n")
p = Progress(n, dt=1.0) # minimum update interval: 1 second p = Progress(n, dt=1.0) # minimum update interval: 1 second
for (imgBatch, labels) in trainData # imgBatch(28, 28, 4) i.e. (row, col, batch), labels(label, batch) for (imgBatch, labels) in trainData # imgBatch(28, 28, 4) i.e. (row, col, batch), labels(label, batch)
for rep in 1:10 stop == 1 ? break : false
stop == 3 ? break : false consecutiveCorrect = 0
rep = 0
# for rep in 1:20
while consecutiveCorrect < 10
rep += 1
stop == 1 ? break : false
# prepare image into input signal (10, 2, 784, 4) i.e. (row, col, timestep, batch) # prepare image into input signal (10, 2, 784, 4) i.e. (row, col, timestep, batch)
signal = dualTrackSpikeGen(imgBatch, [0.05, 0.1, 0.2, 0.3, 0.5], noise=(true, 1, 0.5), copies=18) signal = dualTrackSpikeGen(imgBatch, [0.05, 0.1, 0.2, 0.3, 0.5], noise=(true, 1, 1.0), copies=18)
if length(size(signal)) == 3 if length(size(signal)) == 3
row, col, sequence = size(signal) row, col, sequence = size(signal)
batch = 1 batch = 1
@@ -472,7 +482,7 @@ function train_snn(model, trainData, validateData, labelDict::Vector)
elseif timestep == sequence # online learning, 1-by-1 timestep elseif timestep == sequence # online learning, 1-by-1 timestep
# no error calculation # no error calculation
#WORKING answer time windows, collect logit to get finalAnswer # answer time windows, collect logit to get finalAnswer
elseif timestep > sequence && timestep < sequence+thinkingPeriod elseif timestep > sequence && timestep < sequence+thinkingPeriod
logit_cpu = logit |> cpu logit_cpu = logit |> cpu
logit_cpu = logit_cpu[:,1] logit_cpu = logit_cpu[:,1]
@@ -565,17 +575,21 @@ function train_snn(model, trainData, validateData, labelDict::Vector)
# println("label $(labels[1]) finalAnswer $finalAnswer_cpu") # println("label $(labels[1]) finalAnswer $finalAnswer_cpu")
max = isequal.(finalAnswer_cpu[:,1], maximum(finalAnswer_cpu[:,1])) max = isequal.(finalAnswer_cpu[:,1], maximum(finalAnswer_cpu[:,1]))
if sum(finalAnswer_cpu) == 0 if sum(finalAnswer_cpu) == 0
IronpenGPU.learn!(model, progress, device)
consecutiveCorrect = 0
println("modelname $modelname epoch $epoch rep $rep label $(labels[1]) finalAnswer __ LEARNING") println("modelname $modelname epoch $epoch rep $rep label $(labels[1]) finalAnswer __ LEARNING")
IronpenGPU.learn!(model, progress, device)
elseif sum(max) == 1 && findall(max)[1] -1 == labels[1] elseif sum(max) == 1 && findall(max)[1] -1 == labels[1]
IronpenGPU.learn!(model, progress, device)
consecutiveCorrect += 1
println("modelname $modelname epoch $epoch rep $rep label $(labels[1]) finalAnswer $finalAnswer_cpu CORRECT") println("modelname $modelname epoch $epoch rep $rep label $(labels[1]) finalAnswer $finalAnswer_cpu CORRECT")
IronpenGPU.learn!(model, progress, device)
elseif sum(max) == 1 && findall(max)[1] -1 != labels[1] elseif sum(max) == 1 && findall(max)[1] -1 != labels[1]
println("modelname $modelname epoch $epoch rep $rep label $(labels[1]) finalAnswer $finalAnswer_cpu LEARNING")
IronpenGPU.learn!(model, progress, device) IronpenGPU.learn!(model, progress, device)
consecutiveCorrect = 0
println("modelname $modelname epoch $epoch rep $rep label $(labels[1]) finalAnswer $finalAnswer_cpu LEARNING")
else else
println("modelname $modelname epoch $epoch rep $rep label $(labels[1]) finalAnswer $finalAnswer_cpu LEARNING")
IronpenGPU.learn!(model, progress, device) IronpenGPU.learn!(model, progress, device)
consecutiveCorrect = 0
println("modelname $modelname epoch $epoch rep $rep label $(labels[1]) finalAnswer $finalAnswer_cpu LEARNING")
end end
# error("DEBUG -> main $(Dates.now())") # error("DEBUG -> main $(Dates.now())")
@@ -683,6 +697,14 @@ function train_snn(model, trainData, validateData, labelDict::Vector)
# break # break
# end # end
end end
#WORKING add menu
# choice = request("continue?", menu)
# if choice == "yes"
# continue
# else
# stop = 1
# end
next!(p) next!(p)
end end

View File

@@ -0,0 +1,946 @@
# This file is machine-generated - editing it directly is not advised
julia_version = "1.9.3"
manifest_format = "2.0"
project_hash = "844808a02b2a30acdc69d975773e029da0ec81b8"
[[deps.AbstractFFTs]]
deps = ["LinearAlgebra"]
git-tree-sha1 = "8bc0aaec0ca548eb6cf5f0d7d16351650c1ee956"
uuid = "621f4979-c628-5d54-868e-fcf4e3e8185c"
version = "1.3.2"
weakdeps = ["ChainRulesCore"]
[deps.AbstractFFTs.extensions]
AbstractFFTsChainRulesCoreExt = "ChainRulesCore"
[[deps.Adapt]]
deps = ["LinearAlgebra", "Requires"]
git-tree-sha1 = "76289dc51920fdc6e0013c872ba9551d54961c24"
uuid = "79e6a3ab-5dfb-504d-930d-738a2a938a0e"
version = "3.6.2"
weakdeps = ["StaticArrays"]
[deps.Adapt.extensions]
AdaptStaticArraysExt = "StaticArrays"
[[deps.ArgCheck]]
git-tree-sha1 = "a3a402a35a2f7e0b87828ccabbd5ebfbebe356b4"
uuid = "dce04be8-c92d-5529-be00-80e4d2c0e197"
version = "2.3.0"
[[deps.ArgTools]]
uuid = "0dad84c5-d112-42e6-8d28-ef12dabb789f"
version = "1.1.1"
[[deps.Artifacts]]
uuid = "56f22d72-fd6d-98f1-02f0-08ddc0907c33"
[[deps.Atomix]]
deps = ["UnsafeAtomics"]
git-tree-sha1 = "c06a868224ecba914baa6942988e2f2aade419be"
uuid = "a9b6321e-bd34-4604-b9c9-b65b8de01458"
version = "0.1.0"
[[deps.BFloat16s]]
deps = ["LinearAlgebra", "Printf", "Random", "Test"]
git-tree-sha1 = "dbf84058d0a8cbbadee18d25cf606934b22d7c66"
uuid = "ab4f0b2a-ad5b-11e8-123f-65d77653426b"
version = "0.4.2"
[[deps.BangBang]]
deps = ["Compat", "ConstructionBase", "InitialValues", "LinearAlgebra", "Requires", "Setfield", "Tables"]
git-tree-sha1 = "e28912ce94077686443433c2800104b061a827ed"
uuid = "198e06fe-97b7-11e9-32a5-e1d131e6ad66"
version = "0.3.39"
[deps.BangBang.extensions]
BangBangChainRulesCoreExt = "ChainRulesCore"
BangBangDataFramesExt = "DataFrames"
BangBangStaticArraysExt = "StaticArrays"
BangBangStructArraysExt = "StructArrays"
BangBangTypedTablesExt = "TypedTables"
[deps.BangBang.weakdeps]
ChainRulesCore = "d360d2e6-b24c-11e9-a2a3-2a2ae2dbcce4"
DataFrames = "a93c6f00-e57d-5684-b7b6-d8193f3e46c0"
StaticArrays = "90137ffa-7385-5640-81b9-e52037218182"
StructArrays = "09ab397b-f2b6-538f-b94a-2f83cf4a842a"
TypedTables = "9d95f2ec-7b3d-5a63-8d20-e2491e220bb9"
[[deps.Base64]]
uuid = "2a0f44e3-6c83-55bd-87e4-b1978d98bd5f"
[[deps.Baselet]]
git-tree-sha1 = "aebf55e6d7795e02ca500a689d326ac979aaf89e"
uuid = "9718e550-a3fa-408a-8086-8db961cd8217"
version = "0.1.1"
[[deps.CEnum]]
git-tree-sha1 = "eb4cb44a499229b3b8426dcfb5dd85333951ff90"
uuid = "fa961155-64e5-5f13-b03f-caf6b980ea82"
version = "0.4.2"
[[deps.CUDA]]
deps = ["AbstractFFTs", "Adapt", "BFloat16s", "CEnum", "CUDA_Driver_jll", "CUDA_Runtime_Discovery", "CUDA_Runtime_jll", "CompilerSupportLibraries_jll", "ExprTools", "GPUArrays", "GPUCompiler", "KernelAbstractions", "LLVM", "LazyArtifacts", "Libdl", "LinearAlgebra", "Logging", "Preferences", "Printf", "Random", "Random123", "RandomNumbers", "Reexport", "Requires", "SparseArrays", "SpecialFunctions", "UnsafeAtomicsLLVM"]
git-tree-sha1 = "442d989978ed3ff4e174c928ee879dc09d1ef693"
uuid = "052768ef-5323-5732-b1bb-66c8b64840ba"
version = "4.3.2"
[[deps.CUDA_Driver_jll]]
deps = ["Artifacts", "JLLWrappers", "LazyArtifacts", "Libdl", "Pkg"]
git-tree-sha1 = "498f45593f6ddc0adff64a9310bb6710e851781b"
uuid = "4ee394cb-3365-5eb0-8335-949819d2adfc"
version = "0.5.0+1"
[[deps.CUDA_Runtime_Discovery]]
deps = ["Libdl"]
git-tree-sha1 = "bcc4a23cbbd99c8535a5318455dcf0f2546ec536"
uuid = "1af6417a-86b4-443c-805f-a4643ffb695f"
version = "0.2.2"
[[deps.CUDA_Runtime_jll]]
deps = ["Artifacts", "CUDA_Driver_jll", "JLLWrappers", "LazyArtifacts", "Libdl", "TOML"]
git-tree-sha1 = "5248d9c45712e51e27ba9b30eebec65658c6ce29"
uuid = "76a88914-d11a-5bdc-97e0-2f5a05c973a2"
version = "0.6.0+0"
[[deps.CUDNN_jll]]
deps = ["Artifacts", "CUDA_Runtime_jll", "JLLWrappers", "LazyArtifacts", "Libdl", "TOML"]
git-tree-sha1 = "2918fbffb50e3b7a0b9127617587afa76d4276e8"
uuid = "62b44479-cb7b-5706-934f-f13b2eb2e645"
version = "8.8.1+0"
[[deps.Calculus]]
deps = ["LinearAlgebra"]
git-tree-sha1 = "f641eb0a4f00c343bbc32346e1217b86f3ce9dad"
uuid = "49dc2e85-a5d0-5ad3-a950-438e2897f1b9"
version = "0.5.1"
[[deps.ChainRules]]
deps = ["Adapt", "ChainRulesCore", "Compat", "Distributed", "GPUArraysCore", "IrrationalConstants", "LinearAlgebra", "Random", "RealDot", "SparseArrays", "Statistics", "StructArrays"]
git-tree-sha1 = "1cdf290d4feec68824bfb84f4bfc9f3aba185647"
uuid = "082447d4-558c-5d27-93f4-14fc19e9eca2"
version = "1.51.1"
[[deps.ChainRulesCore]]
deps = ["Compat", "LinearAlgebra", "SparseArrays"]
git-tree-sha1 = "e30f2f4e20f7f186dc36529910beaedc60cfa644"
uuid = "d360d2e6-b24c-11e9-a2a3-2a2ae2dbcce4"
version = "1.16.0"
[[deps.CommonSubexpressions]]
deps = ["MacroTools", "Test"]
git-tree-sha1 = "7b8a93dba8af7e3b42fecabf646260105ac373f7"
uuid = "bbf7d656-a473-5ed7-a52c-81e309532950"
version = "0.3.0"
[[deps.Compat]]
deps = ["UUIDs"]
git-tree-sha1 = "4e88377ae7ebeaf29a047aa1ee40826e0b708a5d"
uuid = "34da2185-b29b-5c13-b0c7-acf172513d20"
version = "4.7.0"
weakdeps = ["Dates", "LinearAlgebra"]
[deps.Compat.extensions]
CompatLinearAlgebraExt = "LinearAlgebra"
[[deps.CompilerSupportLibraries_jll]]
deps = ["Artifacts", "Libdl"]
uuid = "e66e0078-7015-5450-92f7-15fbd957f2ae"
version = "1.0.5+0"
[[deps.CompositionsBase]]
git-tree-sha1 = "802bb88cd69dfd1509f6670416bd4434015693ad"
uuid = "a33af91c-f02d-484b-be07-31d278c5ca2b"
version = "0.1.2"
[deps.CompositionsBase.extensions]
CompositionsBaseInverseFunctionsExt = "InverseFunctions"
[deps.CompositionsBase.weakdeps]
InverseFunctions = "3587e190-3f89-42d0-90ee-14403ec27112"
[[deps.CondaPkg]]
deps = ["JSON3", "Markdown", "MicroMamba", "Pidfile", "Pkg", "TOML"]
git-tree-sha1 = "741146cf2ced5859faae76a84b541aa9af1a78bb"
uuid = "992eb4ea-22a4-4c89-a5bb-47a3300528ab"
version = "0.2.18"
[[deps.ConstructionBase]]
deps = ["LinearAlgebra"]
git-tree-sha1 = "738fec4d684a9a6ee9598a8bfee305b26831f28c"
uuid = "187b0558-2788-49d3-abe0-74a17ed4e7c9"
version = "1.5.2"
[deps.ConstructionBase.extensions]
ConstructionBaseIntervalSetsExt = "IntervalSets"
ConstructionBaseStaticArraysExt = "StaticArrays"
[deps.ConstructionBase.weakdeps]
IntervalSets = "8197267c-284f-5f27-9208-e0e47529a953"
StaticArrays = "90137ffa-7385-5640-81b9-e52037218182"
[[deps.ContextVariablesX]]
deps = ["Compat", "Logging", "UUIDs"]
git-tree-sha1 = "25cc3803f1030ab855e383129dcd3dc294e322cc"
uuid = "6add18c4-b38d-439d-96f6-d6bc489c04c5"
version = "0.1.3"
[[deps.DataAPI]]
git-tree-sha1 = "8da84edb865b0b5b0100c0666a9bc9a0b71c553c"
uuid = "9a962f9c-6df0-11e9-0e5d-c546b8b5ee8a"
version = "1.15.0"
[[deps.DataStructures]]
deps = ["Compat", "InteractiveUtils", "OrderedCollections"]
git-tree-sha1 = "cf25ccb972fec4e4817764d01c82386ae94f77b4"
uuid = "864edb3b-99cc-5e75-8d2d-829cb0a9cfe8"
version = "0.18.14"
[[deps.DataValueInterfaces]]
git-tree-sha1 = "bfc1187b79289637fa0ef6d4436ebdfe6905cbd6"
uuid = "e2d170a0-9d28-54be-80f0-106bbe20a464"
version = "1.0.0"
[[deps.Dates]]
deps = ["Printf"]
uuid = "ade2ca70-3891-5945-98fb-dc099432e06a"
[[deps.DefineSingletons]]
git-tree-sha1 = "0fba8b706d0178b4dc7fd44a96a92382c9065c2c"
uuid = "244e2a9f-e319-4986-a169-4d1fe445cd52"
version = "0.1.2"
[[deps.DelimitedFiles]]
deps = ["Mmap"]
git-tree-sha1 = "9e2f36d3c96a820c678f2f1f1782582fcf685bae"
uuid = "8bb1440f-4735-579b-a4ab-409b98df4dab"
version = "1.9.1"
[[deps.DiffResults]]
deps = ["StaticArraysCore"]
git-tree-sha1 = "782dd5f4561f5d267313f23853baaaa4c52ea621"
uuid = "163ba53b-c6d8-5494-b064-1a9d43ac40c5"
version = "1.1.0"
[[deps.DiffRules]]
deps = ["IrrationalConstants", "LogExpFunctions", "NaNMath", "Random", "SpecialFunctions"]
git-tree-sha1 = "23163d55f885173722d1e4cf0f6110cdbaf7e272"
uuid = "b552c78f-8df3-52c6-915a-8e097449b14b"
version = "1.15.1"
[[deps.Distributed]]
deps = ["Random", "Serialization", "Sockets"]
uuid = "8ba89e20-285c-5b6f-9357-94700520ee1b"
[[deps.Distributions]]
deps = ["FillArrays", "LinearAlgebra", "PDMats", "Printf", "QuadGK", "Random", "SpecialFunctions", "Statistics", "StatsAPI", "StatsBase", "StatsFuns", "Test"]
git-tree-sha1 = "938fe2981db009f531b6332e31c58e9584a2f9bd"
uuid = "31c24e10-a181-5473-b8eb-7969acd0382f"
version = "0.25.100"
[deps.Distributions.extensions]
DistributionsChainRulesCoreExt = "ChainRulesCore"
DistributionsDensityInterfaceExt = "DensityInterface"
[deps.Distributions.weakdeps]
ChainRulesCore = "d360d2e6-b24c-11e9-a2a3-2a2ae2dbcce4"
DensityInterface = "b429d917-457f-4dbc-8f4c-0cc954292b1d"
[[deps.DocStringExtensions]]
deps = ["LibGit2"]
git-tree-sha1 = "2fb1e02f2b635d0845df5d7c167fec4dd739b00d"
uuid = "ffbed154-4ef7-542d-bbb7-c09d3a79fcae"
version = "0.9.3"
[[deps.Downloads]]
deps = ["ArgTools", "FileWatching", "LibCURL", "NetworkOptions"]
uuid = "f43a241f-c20a-4ad4-852c-f6b1247861c6"
version = "1.6.0"
[[deps.DualNumbers]]
deps = ["Calculus", "NaNMath", "SpecialFunctions"]
git-tree-sha1 = "5837a837389fccf076445fce071c8ddaea35a566"
uuid = "fa6b7ba4-c1ee-5f82-b5fc-ecf0adba8f74"
version = "0.6.8"
[[deps.ExprTools]]
git-tree-sha1 = "c1d06d129da9f55715c6c212866f5b1bddc5fa00"
uuid = "e2ba6199-217a-4e67-a87a-7c52f15ade04"
version = "0.1.9"
[[deps.FLoops]]
deps = ["BangBang", "Compat", "FLoopsBase", "InitialValues", "JuliaVariables", "MLStyle", "Serialization", "Setfield", "Transducers"]
git-tree-sha1 = "ffb97765602e3cbe59a0589d237bf07f245a8576"
uuid = "cc61a311-1640-44b5-9fba-1b764f453329"
version = "0.2.1"
[[deps.FLoopsBase]]
deps = ["ContextVariablesX"]
git-tree-sha1 = "656f7a6859be8673bf1f35da5670246b923964f7"
uuid = "b9860ae5-e623-471e-878b-f6a53c775ea6"
version = "0.1.1"
[[deps.FileWatching]]
uuid = "7b1f6079-737a-58dc-b8bc-7a2ca5c1b5ee"
[[deps.FillArrays]]
deps = ["LinearAlgebra", "Random", "SparseArrays", "Statistics"]
git-tree-sha1 = "0b3b52afd0f87b0a3f5ada0466352d125c9db458"
uuid = "1a297f60-69ca-5386-bcde-b61e274b549b"
version = "1.2.1"
[[deps.Flux]]
deps = ["Adapt", "CUDA", "ChainRulesCore", "Functors", "LinearAlgebra", "MLUtils", "MacroTools", "NNlib", "NNlibCUDA", "OneHotArrays", "Optimisers", "Preferences", "ProgressLogging", "Random", "Reexport", "SparseArrays", "SpecialFunctions", "Statistics", "Zygote", "cuDNN"]
git-tree-sha1 = "3e2c3704c2173ab4b1935362384ca878b53d4c34"
uuid = "587475ba-b771-5e3f-ad9e-33799f191a9c"
version = "0.13.17"
[deps.Flux.extensions]
AMDGPUExt = "AMDGPU"
FluxMetalExt = "Metal"
[deps.Flux.weakdeps]
AMDGPU = "21141c5a-9bdb-4563-92ae-f87d6854732e"
Metal = "dde4c033-4e86-420c-a63e-0dd931031962"
[[deps.ForwardDiff]]
deps = ["CommonSubexpressions", "DiffResults", "DiffRules", "LinearAlgebra", "LogExpFunctions", "NaNMath", "Preferences", "Printf", "Random", "SpecialFunctions"]
git-tree-sha1 = "00e252f4d706b3d55a8863432e742bf5717b498d"
uuid = "f6369f11-7733-5829-9624-2563aa707210"
version = "0.10.35"
weakdeps = ["StaticArrays"]
[deps.ForwardDiff.extensions]
ForwardDiffStaticArraysExt = "StaticArrays"
[[deps.Functors]]
deps = ["LinearAlgebra"]
git-tree-sha1 = "478f8c3145bb91d82c2cf20433e8c1b30df454cc"
uuid = "d9f16b24-f501-4c13-a1f2-28368ffc5196"
version = "0.4.4"
[[deps.Future]]
deps = ["Random"]
uuid = "9fa8497b-333b-5362-9e8d-4d0656e87820"
[[deps.GPUArrays]]
deps = ["Adapt", "GPUArraysCore", "LLVM", "LinearAlgebra", "Printf", "Random", "Reexport", "Serialization", "Statistics"]
git-tree-sha1 = "2e57b4a4f9cc15e85a24d603256fe08e527f48d1"
uuid = "0c68f7d7-f131-5f86-a1c3-88cf8149b2d7"
version = "8.8.1"
[[deps.GPUArraysCore]]
deps = ["Adapt"]
git-tree-sha1 = "2d6ca471a6c7b536127afccfa7564b5b39227fe0"
uuid = "46192b85-c4d5-4398-a991-12ede77f4527"
version = "0.1.5"
[[deps.GPUCompiler]]
deps = ["ExprTools", "InteractiveUtils", "LLVM", "Libdl", "Logging", "Scratch", "TimerOutputs", "UUIDs"]
git-tree-sha1 = "cb090aea21c6ca78d59672a7e7d13bd56d09de64"
uuid = "61eb1bfa-7361-4325-ad38-22787b887f55"
version = "0.20.3"
[[deps.GeneralUtils]]
deps = ["CUDA", "DataStructures", "Distributions", "Flux", "JSON3", "Random"]
path = "C:\\Users\\pitak\\.julia\\dev\\GeneralUtils"
uuid = "c6c72f09-b708-4ac8-ac7c-2084d70108fe"
version = "0.1.0"
[[deps.HypergeometricFunctions]]
deps = ["DualNumbers", "LinearAlgebra", "OpenLibm_jll", "SpecialFunctions"]
git-tree-sha1 = "f218fe3736ddf977e0e772bc9a586b2383da2685"
uuid = "34004b35-14d8-5ef3-9330-4cdb6864b03a"
version = "0.3.23"
[[deps.IRTools]]
deps = ["InteractiveUtils", "MacroTools", "Test"]
git-tree-sha1 = "eac00994ce3229a464c2847e956d77a2c64ad3a5"
uuid = "7869d1d1-7146-5819-86e3-90919afe41df"
version = "0.4.10"
[[deps.InitialValues]]
git-tree-sha1 = "4da0f88e9a39111c2fa3add390ab15f3a44f3ca3"
uuid = "22cec73e-a1b8-11e9-2c92-598750a2cf9c"
version = "0.3.1"
[[deps.InteractiveUtils]]
deps = ["Markdown"]
uuid = "b77e0a4c-d291-57a0-90e8-8db25a27a240"
[[deps.IrrationalConstants]]
git-tree-sha1 = "630b497eafcc20001bba38a4651b327dcfc491d2"
uuid = "92d709cd-6900-40b7-9082-c6be49f344b6"
version = "0.2.2"
[[deps.IteratorInterfaceExtensions]]
git-tree-sha1 = "a3f24677c21f5bbe9d2a714f95dcd58337fb2856"
uuid = "82899510-4779-5014-852e-03e436cf321d"
version = "1.0.0"
[[deps.JLLWrappers]]
deps = ["Preferences"]
git-tree-sha1 = "abc9885a7ca2052a736a600f7fa66209f96506e1"
uuid = "692b3bcd-3c85-4b1f-b108-f13ce0eb3210"
version = "1.4.1"
[[deps.JSON3]]
deps = ["Dates", "Mmap", "Parsers", "PrecompileTools", "StructTypes", "UUIDs"]
git-tree-sha1 = "5b62d93f2582b09e469b3099d839c2d2ebf5066d"
uuid = "0f8b85d8-7281-11e9-16c2-39a750bddbf1"
version = "1.13.1"
[[deps.JuliaVariables]]
deps = ["MLStyle", "NameResolution"]
git-tree-sha1 = "49fb3cb53362ddadb4415e9b73926d6b40709e70"
uuid = "b14d175d-62b4-44ba-8fb7-3064adc8c3ec"
version = "0.2.4"
[[deps.KernelAbstractions]]
deps = ["Adapt", "Atomix", "InteractiveUtils", "LinearAlgebra", "MacroTools", "PrecompileTools", "SparseArrays", "StaticArrays", "UUIDs", "UnsafeAtomics", "UnsafeAtomicsLLVM"]
git-tree-sha1 = "b48617c5d764908b5fac493cd907cf33cc11eec1"
uuid = "63c18a36-062a-441e-b654-da1e3ab1ce7c"
version = "0.9.6"
[[deps.LLVM]]
deps = ["CEnum", "LLVMExtra_jll", "Libdl", "Printf", "Unicode"]
git-tree-sha1 = "5007c1421563108110bbd57f63d8ad4565808818"
uuid = "929cbde3-209d-540e-8aea-75f648917ca0"
version = "5.2.0"
[[deps.LLVMExtra_jll]]
deps = ["Artifacts", "JLLWrappers", "LazyArtifacts", "Libdl", "TOML"]
git-tree-sha1 = "1222116d7313cdefecf3d45a2bc1a89c4e7c9217"
uuid = "dad2f222-ce93-54a1-a47d-0025e8a3acab"
version = "0.0.22+0"
[[deps.LazyArtifacts]]
deps = ["Artifacts", "Pkg"]
uuid = "4af54fe1-eca0-43a8-85a7-787d91b784e3"
[[deps.LibCURL]]
deps = ["LibCURL_jll", "MozillaCACerts_jll"]
uuid = "b27032c2-a3e7-50c8-80cd-2d36dbcbfd21"
version = "0.6.3"
[[deps.LibCURL_jll]]
deps = ["Artifacts", "LibSSH2_jll", "Libdl", "MbedTLS_jll", "Zlib_jll", "nghttp2_jll"]
uuid = "deac9b47-8bc7-5906-a0fe-35ac56dc84c0"
version = "7.84.0+0"
[[deps.LibGit2]]
deps = ["Base64", "NetworkOptions", "Printf", "SHA"]
uuid = "76f85450-5226-5b5a-8eaa-529ad045b433"
[[deps.LibSSH2_jll]]
deps = ["Artifacts", "Libdl", "MbedTLS_jll"]
uuid = "29816b5a-b9ab-546f-933c-edad1886dfa8"
version = "1.10.2+0"
[[deps.Libdl]]
uuid = "8f399da3-3557-5675-b5ff-fb832c97cbdb"
[[deps.LinearAlgebra]]
deps = ["Libdl", "OpenBLAS_jll", "libblastrampoline_jll"]
uuid = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e"
[[deps.LogExpFunctions]]
deps = ["DocStringExtensions", "IrrationalConstants", "LinearAlgebra"]
git-tree-sha1 = "c3ce8e7420b3a6e071e0fe4745f5d4300e37b13f"
uuid = "2ab3a3ac-af41-5b50-aa03-7779005ae688"
version = "0.3.24"
[deps.LogExpFunctions.extensions]
LogExpFunctionsChainRulesCoreExt = "ChainRulesCore"
LogExpFunctionsChangesOfVariablesExt = "ChangesOfVariables"
LogExpFunctionsInverseFunctionsExt = "InverseFunctions"
[deps.LogExpFunctions.weakdeps]
ChainRulesCore = "d360d2e6-b24c-11e9-a2a3-2a2ae2dbcce4"
ChangesOfVariables = "9e997f8a-9a97-42d5-a9f1-ce6bfc15e2c0"
InverseFunctions = "3587e190-3f89-42d0-90ee-14403ec27112"
[[deps.Logging]]
uuid = "56ddb016-857b-54e1-b83d-db4d58db5568"
[[deps.MLStyle]]
git-tree-sha1 = "bc38dff0548128765760c79eb7388a4b37fae2c8"
uuid = "d8e11817-5142-5d16-987a-aa16d5891078"
version = "0.4.17"
[[deps.MLUtils]]
deps = ["ChainRulesCore", "Compat", "DataAPI", "DelimitedFiles", "FLoops", "NNlib", "Random", "ShowCases", "SimpleTraits", "Statistics", "StatsBase", "Tables", "Transducers"]
git-tree-sha1 = "3504cdb8c2bc05bde4d4b09a81b01df88fcbbba0"
uuid = "f1d291b0-491e-4a28-83b9-f70985020b54"
version = "0.4.3"
[[deps.MacroTools]]
deps = ["Markdown", "Random"]
git-tree-sha1 = "42324d08725e200c23d4dfb549e0d5d89dede2d2"
uuid = "1914dd2f-81c6-5fcd-8719-6d5c9610ff09"
version = "0.5.10"
[[deps.Markdown]]
deps = ["Base64"]
uuid = "d6f4376e-aef5-505a-96c1-9c027394607a"
[[deps.MbedTLS_jll]]
deps = ["Artifacts", "Libdl"]
uuid = "c8ffd9c3-330d-5841-b78e-0817d7145fa1"
version = "2.28.2+0"
[[deps.MicroCollections]]
deps = ["BangBang", "InitialValues", "Setfield"]
git-tree-sha1 = "629afd7d10dbc6935ec59b32daeb33bc4460a42e"
uuid = "128add7d-3638-4c79-886c-908ea0c25c34"
version = "0.1.4"
[[deps.MicroMamba]]
deps = ["Pkg", "Scratch", "micromamba_jll"]
git-tree-sha1 = "011cab361eae7bcd7d278f0a7a00ff9c69000c51"
uuid = "0b3b1443-0f03-428d-bdfb-f27f9c1191ea"
version = "0.1.14"
[[deps.Missings]]
deps = ["DataAPI"]
git-tree-sha1 = "f66bdc5de519e8f8ae43bdc598782d35a25b1272"
uuid = "e1d29d7a-bbdc-5cf2-9ac0-f12de2c33e28"
version = "1.1.0"
[[deps.Mmap]]
uuid = "a63ad114-7e13-5084-954f-fe012c677804"
[[deps.MozillaCACerts_jll]]
uuid = "14a3606d-f60d-562e-9121-12d972cd8159"
version = "2022.10.11"
[[deps.NNlib]]
deps = ["Adapt", "Atomix", "ChainRulesCore", "GPUArraysCore", "KernelAbstractions", "LinearAlgebra", "Pkg", "Random", "Requires", "Statistics"]
git-tree-sha1 = "72240e3f5ca031937bd536182cb2c031da5f46dd"
uuid = "872c559c-99b0-510c-b3b7-b6c96a88d5cd"
version = "0.8.21"
[deps.NNlib.extensions]
NNlibAMDGPUExt = "AMDGPU"
[deps.NNlib.weakdeps]
AMDGPU = "21141c5a-9bdb-4563-92ae-f87d6854732e"
[[deps.NNlibCUDA]]
deps = ["Adapt", "CUDA", "LinearAlgebra", "NNlib", "Random", "Statistics", "cuDNN"]
git-tree-sha1 = "f94a9684394ff0d325cc12b06da7032d8be01aaf"
uuid = "a00861dc-f156-4864-bf3c-e6376f28a68d"
version = "0.2.7"
[[deps.NaNMath]]
deps = ["OpenLibm_jll"]
git-tree-sha1 = "0877504529a3e5c3343c6f8b4c0381e57e4387e4"
uuid = "77ba4419-2d1f-58cd-9bb1-8ffee604a2e3"
version = "1.0.2"
[[deps.NameResolution]]
deps = ["PrettyPrint"]
git-tree-sha1 = "1a0fa0e9613f46c9b8c11eee38ebb4f590013c5e"
uuid = "71a1bf82-56d0-4bbc-8a3c-48b961074391"
version = "0.1.5"
[[deps.NetworkOptions]]
uuid = "ca575930-c2e3-43a9-ace4-1e988b2c1908"
version = "1.2.0"
[[deps.OneHotArrays]]
deps = ["Adapt", "ChainRulesCore", "Compat", "GPUArraysCore", "LinearAlgebra", "NNlib"]
git-tree-sha1 = "5e4029759e8699ec12ebdf8721e51a659443403c"
uuid = "0b1bfda6-eb8a-41d2-88d8-f5af5cad476f"
version = "0.2.4"
[[deps.OpenBLAS_jll]]
deps = ["Artifacts", "CompilerSupportLibraries_jll", "Libdl"]
uuid = "4536629a-c528-5b80-bd46-f80d51c5b363"
version = "0.3.21+4"
[[deps.OpenLibm_jll]]
deps = ["Artifacts", "Libdl"]
uuid = "05823500-19ac-5b8b-9628-191a04bc5112"
version = "0.8.1+0"
[[deps.OpenSpecFun_jll]]
deps = ["Artifacts", "CompilerSupportLibraries_jll", "JLLWrappers", "Libdl", "Pkg"]
git-tree-sha1 = "13652491f6856acfd2db29360e1bbcd4565d04f1"
uuid = "efe28fd5-8261-553b-a9e1-b2916fc3738e"
version = "0.5.5+0"
[[deps.Optimisers]]
deps = ["ChainRulesCore", "Functors", "LinearAlgebra", "Random", "Statistics"]
git-tree-sha1 = "6a01f65dd8583dee82eecc2a19b0ff21521aa749"
uuid = "3bd65402-5787-11e9-1adc-39752487f4e2"
version = "0.2.18"
[[deps.OrderedCollections]]
git-tree-sha1 = "d321bf2de576bf25ec4d3e4360faca399afca282"
uuid = "bac558e1-5e72-5ebc-8fee-abe8a469f55d"
version = "1.6.0"
[[deps.PDMats]]
deps = ["LinearAlgebra", "SparseArrays", "SuiteSparse"]
git-tree-sha1 = "67eae2738d63117a196f497d7db789821bce61d1"
uuid = "90014a1f-27ba-587c-ab20-58faa44d9150"
version = "0.11.17"
[[deps.Parsers]]
deps = ["Dates", "PrecompileTools", "UUIDs"]
git-tree-sha1 = "4b2e829ee66d4218e0cef22c0a64ee37cf258c29"
uuid = "69de0a69-1ddd-5017-9359-2bf0b02dc9f0"
version = "2.7.1"
[[deps.Pidfile]]
deps = ["FileWatching", "Test"]
git-tree-sha1 = "2d8aaf8ee10df53d0dfb9b8ee44ae7c04ced2b03"
uuid = "fa939f87-e72e-5be4-a000-7fc836dbe307"
version = "1.3.0"
[[deps.Pkg]]
deps = ["Artifacts", "Dates", "Downloads", "FileWatching", "LibGit2", "Libdl", "Logging", "Markdown", "Printf", "REPL", "Random", "SHA", "Serialization", "TOML", "Tar", "UUIDs", "p7zip_jll"]
uuid = "44cfe95a-1eb2-52ea-b672-e2afdf69b78f"
version = "1.9.2"
[[deps.PrecompileTools]]
deps = ["Preferences"]
git-tree-sha1 = "9673d39decc5feece56ef3940e5dafba15ba0f81"
uuid = "aea7be01-6a6a-4083-8856-8a6e6704d82a"
version = "1.1.2"
[[deps.Preferences]]
deps = ["TOML"]
git-tree-sha1 = "7eb1686b4f04b82f96ed7a4ea5890a4f0c7a09f1"
uuid = "21216c6a-2e73-6563-6e65-726566657250"
version = "1.4.0"
[[deps.PrettyPrint]]
git-tree-sha1 = "632eb4abab3449ab30c5e1afaa874f0b98b586e4"
uuid = "8162dcfd-2161-5ef2-ae6c-7681170c5f98"
version = "0.2.0"
[[deps.Printf]]
deps = ["Unicode"]
uuid = "de0858da-6303-5e67-8744-51eddeeeb8d7"
[[deps.ProgressLogging]]
deps = ["Logging", "SHA", "UUIDs"]
git-tree-sha1 = "80d919dee55b9c50e8d9e2da5eeafff3fe58b539"
uuid = "33c8b6b6-d38a-422a-b730-caa89a2f386c"
version = "0.1.4"
[[deps.PythonCall]]
deps = ["CondaPkg", "Dates", "Libdl", "MacroTools", "Markdown", "Pkg", "REPL", "Requires", "Serialization", "Tables", "UnsafePointers"]
git-tree-sha1 = "70af6bdbde63d7d0a4ea99f3e890ebdb55e9d464"
uuid = "6099a3de-0909-46bc-b1f4-468b9a2dfc0d"
version = "0.9.14"
[[deps.QuadGK]]
deps = ["DataStructures", "LinearAlgebra"]
git-tree-sha1 = "6ec7ac8412e83d57e313393220879ede1740f9ee"
uuid = "1fd47b50-473d-5c70-9696-f719f8f3bcdc"
version = "2.8.2"
[[deps.REPL]]
deps = ["InteractiveUtils", "Markdown", "Sockets", "Unicode"]
uuid = "3fa0cd96-eef1-5676-8a61-b3b8758bbffb"
[[deps.Random]]
deps = ["SHA", "Serialization"]
uuid = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c"
[[deps.Random123]]
deps = ["Random", "RandomNumbers"]
git-tree-sha1 = "552f30e847641591ba3f39fd1bed559b9deb0ef3"
uuid = "74087812-796a-5b5d-8853-05524746bad3"
version = "1.6.1"
[[deps.RandomNumbers]]
deps = ["Random", "Requires"]
git-tree-sha1 = "043da614cc7e95c703498a491e2c21f58a2b8111"
uuid = "e6cf234a-135c-5ec9-84dd-332b85af5143"
version = "1.5.3"
[[deps.RealDot]]
deps = ["LinearAlgebra"]
git-tree-sha1 = "9f0a1b71baaf7650f4fa8a1d168c7fb6ee41f0c9"
uuid = "c1ae055f-0cd5-4b69-90a6-9a35b1a98df9"
version = "0.1.0"
[[deps.Reexport]]
git-tree-sha1 = "45e428421666073eab6f2da5c9d310d99bb12f9b"
uuid = "189a3867-3050-52da-a836-e630ba90ab69"
version = "1.2.2"
[[deps.Requires]]
deps = ["UUIDs"]
git-tree-sha1 = "838a3a4188e2ded87a4f9f184b4b0d78a1e91cb7"
uuid = "ae029012-a4dd-5104-9daa-d747884805df"
version = "1.3.0"
[[deps.Rmath]]
deps = ["Random", "Rmath_jll"]
git-tree-sha1 = "f65dcb5fa46aee0cf9ed6274ccbd597adc49aa7b"
uuid = "79098fc4-a85e-5d69-aa6a-4863f24498fa"
version = "0.7.1"
[[deps.Rmath_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"]
git-tree-sha1 = "6ed52fdd3382cf21947b15e8870ac0ddbff736da"
uuid = "f50d1b31-88e8-58de-be2c-1cc44531875f"
version = "0.4.0+0"
[[deps.SHA]]
uuid = "ea8e919c-243c-51af-8825-aaa63cd721ce"
version = "0.7.0"
[[deps.Scratch]]
deps = ["Dates"]
git-tree-sha1 = "30449ee12237627992a99d5e30ae63e4d78cd24a"
uuid = "6c6a2e73-6563-6170-7368-637461726353"
version = "1.2.0"
[[deps.Serialization]]
uuid = "9e88b42a-f829-5b0c-bbe9-9e923198166b"
[[deps.Setfield]]
deps = ["ConstructionBase", "Future", "MacroTools", "StaticArraysCore"]
git-tree-sha1 = "e2cc6d8c88613c05e1defb55170bf5ff211fbeac"
uuid = "efcf1570-3423-57d1-acb7-fd33fddbac46"
version = "1.1.1"
[[deps.ShowCases]]
git-tree-sha1 = "7f534ad62ab2bd48591bdeac81994ea8c445e4a5"
uuid = "605ecd9f-84a6-4c9e-81e2-4798472b76a3"
version = "0.1.0"
[[deps.SimpleTraits]]
deps = ["InteractiveUtils", "MacroTools"]
git-tree-sha1 = "5d7e3f4e11935503d3ecaf7186eac40602e7d231"
uuid = "699a6c99-e7fa-54fc-8d76-47d257e15c1d"
version = "0.9.4"
[[deps.Sockets]]
uuid = "6462fe0b-24de-5631-8697-dd941f90decc"
[[deps.SortingAlgorithms]]
deps = ["DataStructures"]
git-tree-sha1 = "c60ec5c62180f27efea3ba2908480f8055e17cee"
uuid = "a2af1166-a08f-5f64-846c-94a0d3cef48c"
version = "1.1.1"
[[deps.SparseArrays]]
deps = ["Libdl", "LinearAlgebra", "Random", "Serialization", "SuiteSparse_jll"]
uuid = "2f01184e-e22b-5df5-ae63-d93ebab69eaf"
[[deps.SpecialFunctions]]
deps = ["IrrationalConstants", "LogExpFunctions", "OpenLibm_jll", "OpenSpecFun_jll"]
git-tree-sha1 = "7beb031cf8145577fbccacd94b8a8f4ce78428d3"
uuid = "276daf66-3868-5448-9aa4-cd146d93841b"
version = "2.3.0"
weakdeps = ["ChainRulesCore"]
[deps.SpecialFunctions.extensions]
SpecialFunctionsChainRulesCoreExt = "ChainRulesCore"
[[deps.SplittablesBase]]
deps = ["Setfield", "Test"]
git-tree-sha1 = "e08a62abc517eb79667d0a29dc08a3b589516bb5"
uuid = "171d559e-b47b-412a-8079-5efa626c420e"
version = "0.1.15"
[[deps.StaticArrays]]
deps = ["LinearAlgebra", "Random", "StaticArraysCore", "Statistics"]
git-tree-sha1 = "832afbae2a45b4ae7e831f86965469a24d1d8a83"
uuid = "90137ffa-7385-5640-81b9-e52037218182"
version = "1.5.26"
[[deps.StaticArraysCore]]
git-tree-sha1 = "6b7ba252635a5eff6a0b0664a41ee140a1c9e72a"
uuid = "1e83bf80-4336-4d27-bf5d-d5a4f845583c"
version = "1.4.0"
[[deps.Statistics]]
deps = ["LinearAlgebra", "SparseArrays"]
uuid = "10745b16-79ce-11e8-11f9-7d13ad32a3b2"
version = "1.9.0"
[[deps.StatsAPI]]
deps = ["LinearAlgebra"]
git-tree-sha1 = "45a7769a04a3cf80da1c1c7c60caf932e6f4c9f7"
uuid = "82ae8749-77ed-4fe6-ae5f-f523153014b0"
version = "1.6.0"
[[deps.StatsBase]]
deps = ["DataAPI", "DataStructures", "LinearAlgebra", "LogExpFunctions", "Missings", "Printf", "Random", "SortingAlgorithms", "SparseArrays", "Statistics", "StatsAPI"]
git-tree-sha1 = "75ebe04c5bed70b91614d684259b661c9e6274a4"
uuid = "2913bbd2-ae8a-5f71-8c99-4fb6c76f3a91"
version = "0.34.0"
[[deps.StatsFuns]]
deps = ["HypergeometricFunctions", "IrrationalConstants", "LogExpFunctions", "Reexport", "Rmath", "SpecialFunctions"]
git-tree-sha1 = "f625d686d5a88bcd2b15cd81f18f98186fdc0c9a"
uuid = "4c63d2b9-4356-54db-8cca-17b64c39e42c"
version = "1.3.0"
[deps.StatsFuns.extensions]
StatsFunsChainRulesCoreExt = "ChainRulesCore"
StatsFunsInverseFunctionsExt = "InverseFunctions"
[deps.StatsFuns.weakdeps]
ChainRulesCore = "d360d2e6-b24c-11e9-a2a3-2a2ae2dbcce4"
InverseFunctions = "3587e190-3f89-42d0-90ee-14403ec27112"
[[deps.StructArrays]]
deps = ["Adapt", "DataAPI", "GPUArraysCore", "StaticArraysCore", "Tables"]
git-tree-sha1 = "521a0e828e98bb69042fec1809c1b5a680eb7389"
uuid = "09ab397b-f2b6-538f-b94a-2f83cf4a842a"
version = "0.6.15"
[[deps.StructTypes]]
deps = ["Dates", "UUIDs"]
git-tree-sha1 = "ca4bccb03acf9faaf4137a9abc1881ed1841aa70"
uuid = "856f2bd8-1eba-4b0a-8007-ebc267875bd4"
version = "1.10.0"
[[deps.SuiteSparse]]
deps = ["Libdl", "LinearAlgebra", "Serialization", "SparseArrays"]
uuid = "4607b0f0-06f3-5cda-b6b1-a6196a1729e9"
[[deps.SuiteSparse_jll]]
deps = ["Artifacts", "Libdl", "Pkg", "libblastrampoline_jll"]
uuid = "bea87d4a-7f5b-5778-9afe-8cc45184846c"
version = "5.10.1+6"
[[deps.TOML]]
deps = ["Dates"]
uuid = "fa267f1f-6049-4f14-aa54-33bafae1ed76"
version = "1.0.3"
[[deps.TableTraits]]
deps = ["IteratorInterfaceExtensions"]
git-tree-sha1 = "c06b2f539df1c6efa794486abfb6ed2022561a39"
uuid = "3783bdb8-4a98-5b6b-af9a-565f29a5fe9c"
version = "1.0.1"
[[deps.Tables]]
deps = ["DataAPI", "DataValueInterfaces", "IteratorInterfaceExtensions", "LinearAlgebra", "OrderedCollections", "TableTraits", "Test"]
git-tree-sha1 = "1544b926975372da01227b382066ab70e574a3ec"
uuid = "bd369af6-aec1-5ad0-b16a-f7cc5008161c"
version = "1.10.1"
[[deps.Tar]]
deps = ["ArgTools", "SHA"]
uuid = "a4e569a6-e804-4fa4-b0f3-eef7a1d5b13e"
version = "1.10.0"
[[deps.Test]]
deps = ["InteractiveUtils", "Logging", "Random", "Serialization"]
uuid = "8dfed614-e22c-5e08-85e1-65c5234f0b40"
[[deps.TimerOutputs]]
deps = ["ExprTools", "Printf"]
git-tree-sha1 = "f548a9e9c490030e545f72074a41edfd0e5bcdd7"
uuid = "a759f4b9-e2f1-59dc-863e-4aeb61b1ea8f"
version = "0.5.23"
[[deps.Transducers]]
deps = ["Adapt", "ArgCheck", "BangBang", "Baselet", "CompositionsBase", "DefineSingletons", "Distributed", "InitialValues", "Logging", "Markdown", "MicroCollections", "Requires", "Setfield", "SplittablesBase", "Tables"]
git-tree-sha1 = "a66fb81baec325cf6ccafa243af573b031e87b00"
uuid = "28d57a85-8fef-5791-bfe6-a80928e7c999"
version = "0.4.77"
[deps.Transducers.extensions]
TransducersBlockArraysExt = "BlockArrays"
TransducersDataFramesExt = "DataFrames"
TransducersLazyArraysExt = "LazyArrays"
TransducersOnlineStatsBaseExt = "OnlineStatsBase"
TransducersReferenceablesExt = "Referenceables"
[deps.Transducers.weakdeps]
BlockArrays = "8e7c35d0-a365-5155-bbbb-fb81a777f24e"
DataFrames = "a93c6f00-e57d-5684-b7b6-d8193f3e46c0"
LazyArrays = "5078a376-72f3-5289-bfd5-ec5146d43c02"
OnlineStatsBase = "925886fa-5bf2-5e8e-b522-a9147a512338"
Referenceables = "42d2dcc6-99eb-4e98-b66c-637b7d73030e"
[[deps.UUIDs]]
deps = ["Random", "SHA"]
uuid = "cf7118a7-6976-5b1a-9a39-7adc72f591a4"
[[deps.Unicode]]
uuid = "4ec0a83e-493e-50e2-b9ac-8f72acf5a8f5"
[[deps.UnsafeAtomics]]
git-tree-sha1 = "6331ac3440856ea1988316b46045303bef658278"
uuid = "013be700-e6cd-48c3-b4a1-df204f14c38f"
version = "0.2.1"
[[deps.UnsafeAtomicsLLVM]]
deps = ["LLVM", "UnsafeAtomics"]
git-tree-sha1 = "ea37e6066bf194ab78f4e747f5245261f17a7175"
uuid = "d80eeb9a-aca5-4d75-85e5-170c8b632249"
version = "0.1.2"
[[deps.UnsafePointers]]
git-tree-sha1 = "c81331b3b2e60a982be57c046ec91f599ede674a"
uuid = "e17b2a0c-0bdf-430a-bd0c-3a23cae4ff39"
version = "1.0.0"
[[deps.Zlib_jll]]
deps = ["Libdl"]
uuid = "83775a58-1f1d-513f-b197-d71354ab007a"
version = "1.2.13+0"
[[deps.Zygote]]
deps = ["AbstractFFTs", "ChainRules", "ChainRulesCore", "DiffRules", "Distributed", "FillArrays", "ForwardDiff", "GPUArrays", "GPUArraysCore", "IRTools", "InteractiveUtils", "LinearAlgebra", "LogExpFunctions", "MacroTools", "NaNMath", "PrecompileTools", "Random", "Requires", "SparseArrays", "SpecialFunctions", "Statistics", "ZygoteRules"]
git-tree-sha1 = "5be3ddb88fc992a7d8ea96c3f10a49a7e98ebc7b"
uuid = "e88e6eb3-aa80-5325-afca-941959d7151f"
version = "0.6.62"
[deps.Zygote.extensions]
ZygoteColorsExt = "Colors"
ZygoteDistancesExt = "Distances"
ZygoteTrackerExt = "Tracker"
[deps.Zygote.weakdeps]
Colors = "5ae59095-9a9b-59fe-a467-6f913c188581"
Distances = "b4f34e82-e78d-54a5-968a-f98e89d6e8f7"
Tracker = "9f7883ad-71c0-57eb-9f7f-b5c9e6d3789c"
[[deps.ZygoteRules]]
deps = ["ChainRulesCore", "MacroTools"]
git-tree-sha1 = "977aed5d006b840e2e40c0b48984f7463109046d"
uuid = "700de1a5-db45-46bc-99cf-38207098b444"
version = "0.2.3"
[[deps.cuDNN]]
deps = ["CEnum", "CUDA", "CUDNN_jll"]
git-tree-sha1 = "f65490d187861d6222cb38bcbbff3fd949a7ec3e"
uuid = "02a925ec-e4fe-4b08-9a7e-0d78e3d38ccd"
version = "1.0.4"
[[deps.libblastrampoline_jll]]
deps = ["Artifacts", "Libdl"]
uuid = "8e850b90-86db-534c-a0d3-1478176c7d93"
version = "5.8.0+0"
[[deps.micromamba_jll]]
deps = ["Artifacts", "JLLWrappers", "LazyArtifacts", "Libdl"]
git-tree-sha1 = "66d07957bcf7e4930d933195aed484078dd8cbb5"
uuid = "f8abcde7-e9b7-5caa-b8af-a437887ae8e4"
version = "1.4.9+0"
[[deps.nghttp2_jll]]
deps = ["Artifacts", "Libdl"]
uuid = "8e850ede-7688-5339-a07c-302acd2aaf8d"
version = "1.48.0+0"
[[deps.p7zip_jll]]
deps = ["Artifacts", "Libdl"]
uuid = "3f19e933-33d8-53b3-aaab-bd5110c3b7a0"
version = "17.4.0+0"

View File

@@ -0,0 +1,16 @@
name = "IronpenGPU"
uuid = "3d5396ea-818e-43fc-a9d3-164248e840cd"
authors = ["ton <narawat@gmail.com>"]
version = "0.1.0"
[deps]
CUDA = "052768ef-5323-5732-b1bb-66c8b64840ba"
CondaPkg = "992eb4ea-22a4-4c89-a5bb-47a3300528ab"
Dates = "ade2ca70-3891-5945-98fb-dc099432e06a"
Flux = "587475ba-b771-5e3f-ad9e-33799f191a9c"
GeneralUtils = "c6c72f09-b708-4ac8-ac7c-2084d70108fe"
JSON3 = "0f8b85d8-7281-11e9-16c2-39a750bddbf1"
LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e"
PythonCall = "6099a3de-0909-46bc-b1f4-468b9a2dfc0d"
Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c"
Statistics = "10745b16-79ce-11e8-11f9-7d13ad32a3b2"

View File

@@ -0,0 +1,85 @@
module IronpenGPU # this is a parent module
# export
""" Order by dependencies of each file. The 1st included file must not depend on any other
files and each file can only depend on the file included before it.
"""
include("type.jl")
using .type # bring type into parent module namespace
include("snnUtil.jl")
using .snnUtil
include("forward.jl")
using .forward
include("learn.jl")
using .learn
include("interface.jl")
using .interface
#------------------------------------------------------------------------------------------------100
""" version 0.0.11
Todo:
[] make output neuron draw connection randomly
[4] implement variable dormant connection and pruning machanism. the longer the training the longer
0 weight stay 0.
[] using RL to control learning signal
[] consider using Dates.now() instead of timestamp because time_stamp may overflow
[] Liquid time constant. training should include adjusting α, neuron membrane potential decay factor
which defined by neuron.tau_m formula in type.jl
Change from version: 0.0.10
- growRepeatedPath!(), instead of synapse with 60% less activity count gets -w, may be I
should rank synapse based on activity count from highest perforimg synapse to lowest
and the last 60% of the rank get -w
- 10% instead of 20% synapticConnectionPercent
"""
end # module IronpenGPU

View File

@@ -0,0 +1,956 @@
module forward
# export
using Flux, CUDA
using GeneralUtils
using ..type, ..snnUtil
#------------------------------------------------------------------------------------------------100
""" kfn forward
input (row, col, batch)
"""
function (kfn::kfn_1)(input::AbstractArray)
kfn.timeStep .+= 1
# what to do at the start of learning round
if view(kfn.learningStage, 1)[1] == 1
kfn.timeStep .= 1
# reset learning params
kfn.zitCumulative = (kfn.zitCumulative[:,:,1] .= 0)
kfn.lif_vt .= 0
kfn.lif_wRecChange .= 0
kfn.lif_epsilonRec .= 0
kfn.lif_firingCounter .= 0
kfn.lif_refractoryCounter .= 0
kfn.lif_zt .= 0
kfn.lif_synapticActivityCounter .= 0
kfn.alif_vt .= 0
kfn.alif_a .= 0
kfn.alif_epsilonRec .= 0
kfn.alif_epsilonRecA .= 0
kfn.alif_wRecChange .= 0
kfn.alif_firingCounter .= 0
kfn.alif_refractoryCounter .= 0
kfn.alif_zt .= 0
kfn.alif_synapticActivityCounter .= 0
kfn.on_vt .= 0
kfn.on_epsilonRec .= 0
kfn.on_wOutChange .= 0
kfn.on_refractoryCounter .= 0
kfn.on_synapticActivityCounter .= 0
kfn.learningStage = [2]
end
# update activation matrix with "lif_zt1" and "alif_zt1" by concatenating
# (input, lif_zt1, alif_zt1) to form activation matrix
_zit = cat(reshape(input, (size(input, 1), size(input, 2), 1, size(input, 3))),
reshape(kfn.lif_zt, (size(input, 1), :, 1, size(input, 3))),
reshape(kfn.alif_zt, (size(input, 1), :, 1, size(input, 3))), dims=2)
kfn.zit .= reshape(_zit, (size(input, 1), :, size(input, 3)))
@sync begin
@async begin
# project 3D kfn zit into 4D lif zit
i1, i2, i3, i4 = size(kfn.lif_zit)
kfn.lif_zit .= reshape(kfn.zit, (i1, i2, 1, i4)) .* kfn.lif_arrayProjection4d
kfn.lif_exInType .= kfn.exInType .* kfn.lif_arrayProjection4d
lifForward( kfn.lif_zit,
kfn.lif_wRec,
kfn.lif_vt,
kfn.lif_vth,
kfn.lif_vRest,
kfn.lif_zt4d,
kfn.lif_alpha,
kfn.lif_phi,
kfn.lif_epsilonRec,
kfn.lif_refractoryCounter,
kfn.lif_refractoryDuration,
kfn.lif_gammaPd,
kfn.lif_firingCounter,
kfn.lif_recSignal,
kfn.lif_exInType,
kfn.lif_wRecChange,
kfn.lif_neuronInactivityCounter,
kfn.lif_synapseReconnectDelay,
kfn.lif_synapticActivityCounter,
kfn.timeStep,
)
end
@async begin
# project 3D kfn zit into 4D alif zit
i1, i2, i3, i4 = size(kfn.alif_zit)
kfn.alif_zit .= reshape(kfn.zit, (i1, i2, 1, i4)) .* kfn.alif_arrayProjection4d
kfn.alif_exInType .= kfn.exInType .* kfn.alif_arrayProjection4d
alifForward(kfn.alif_zit,
kfn.alif_wRec,
kfn.alif_vt,
kfn.alif_vth,
kfn.alif_vRest,
kfn.alif_zt4d,
kfn.alif_alpha,
kfn.alif_phi,
kfn.alif_epsilonRec,
kfn.alif_refractoryCounter,
kfn.alif_refractoryDuration,
kfn.alif_gammaPd,
kfn.alif_firingCounter,
kfn.alif_recSignal,
kfn.alif_exInType,
kfn.alif_wRecChange,
kfn.alif_neuronInactivityCounter,
kfn.alif_synapseReconnectDelay,
kfn.alif_synapticActivityCounter,
kfn.timeStep,
kfn.alif_epsilonRecA,
kfn.alif_a,
kfn.alif_avth,
kfn.alif_beta,
kfn.alif_rho,
)
end
end
# reduce lif_zt4d and alif_zt4d into lif_zt, alif_zt (4d -> 1d)
kfn.lif_zt .= reduce(max, kfn.lif_zt4d, dims=(1,2))
kfn.alif_zt .= reduce(max, kfn.alif_zt4d, dims=(1,2))
# update activation matrix with "lif_zt1" and "alif_zt1" by concatenating
# (input, lif_zt1, alif_zt1) to form activation matrix
_zit = cat(reshape(input, (size(input, 1), size(input, 2), 1, size(input, 3))),
reshape(kfn.lif_zt, (size(input, 1), :, 1, size(input, 3))),
reshape(kfn.alif_zt, (size(input, 1), :, 1, size(input, 3))), dims=2)
kfn.zit .= reshape(_zit, (size(input, 1), :, size(input, 3)))
kfn.zitCumulative = sum(kfn.zitCumulative) == 0 ? kfn.zit : cat(kfn.zitCumulative, kfn.zit, dims=3)
# kfn.zitCumulative = cat(kfn.zitCumulative, kfn.zit, dims=3)
# kfn.zitCumulative .+= kfn.zit
# project 3D kfn zit into 4D on zit
i1, i2, i3, i4 = size(kfn.on_zit)
kfn.on_zit .= reshape(kfn.zit, (i1, i2, 1, i4)) .* kfn.on_arrayProjection4d
# read out
onForward( kfn.on_zit,
kfn.on_wOut,
kfn.on_vt,
kfn.on_vth,
kfn.on_vRest,
kfn.on_zt4d,
kfn.on_alpha,
kfn.on_phi,
kfn.on_epsilonRec,
kfn.on_refractoryCounter,
kfn.on_refractoryDuration,
kfn.on_gammaPd,
kfn.on_firingCounter,
kfn.on_recSignal,
kfn.on_synapticActivityCounter,
)
# get on_zt4d to on_zt
kfn.on_zt .= reduce(max, kfn.on_zt4d, dims=(1,2))
logit = reshape(kfn.on_zt, (size(input, 1), :)) # (outputNeurons, batch)
return logit,
kfn.zit
end
# gpu launcher
function lifForward( zit::CuArray,
wRec::CuArray,
vt::CuArray,
vth::CuArray,
vRest::CuArray,
zt::CuArray,
alpha::CuArray,
phi::CuArray,
epsilonRec::CuArray,
refractoryCounter::CuArray,
refractoryDuration::CuArray,
gammaPd::CuArray,
firingCounter::CuArray,
recSignal::CuArray,
exInType::CuArray,
wRecChange::CuArray,
neuronInactivityCounter::CuArray,
synapseReconnectDelay::CuArray,
synapticActivityCounter::CuArray,
timeStep::CuArray,
)
kernel = @cuda launch=false lifForward( zit,
wRec,
vt,
vth,
vRest,
zt,
alpha,
phi,
epsilonRec,
refractoryCounter,
refractoryDuration,
gammaPd,
firingCounter,
recSignal,
exInType,
wRecChange,
neuronInactivityCounter,
synapseReconnectDelay,
synapticActivityCounter,
timeStep,
GeneralUtils.linear_to_cartesian,
)
config = launch_configuration(kernel.fun)
# threads to be launched. Since one can't launch exact thread number the kernel needs,
# one just launch threads more than this kernel needs then use a guard inside the kernel
# to prevent unused threads to access memory.
threads = min(1024, config.threads) # depend on gpu. Most NVIDIA gpu has 1024 threads per block
# total desired threads to launch to gpu. Usually 1 thread per 1 matrix element
totalThreads = length(wRec)
blocks = cld(totalThreads, threads)
# println("launching gpu kernel")
CUDA.@sync begin
kernel( zit,
wRec,
vt,
vth,
vRest,
zt,
alpha,
phi,
epsilonRec,
refractoryCounter,
refractoryDuration,
gammaPd,
firingCounter,
recSignal,
exInType,
wRecChange,
neuronInactivityCounter,
synapseReconnectDelay,
synapticActivityCounter,
timeStep,
GeneralUtils.linear_to_cartesian; threads, blocks)
end
end
# gpu kernel
function lifForward( zit,
wRec,
vt,
vth,
vRest,
zt,
alpha,
phi,
epsilonRec,
refractoryCounter,
refractoryDuration,
gammaPd,
firingCounter,
recSignal,
exInType,
wRecChange,
neuronInactivityCounter,
synapseReconnectDelay,
synapticActivityCounter,
timeStep,
linear_to_cartesian,
)
i = (blockIdx().x - 1) * blockDim().x + threadIdx().x # gpu threads index
if i <= length(wRec)
# cartesian index
i1, i2, i3, i4 = linear_to_cartesian(i, size(wRec))
# @cuprintln("gpu thread $i $i1 $i2 $i3 $i4")
if refractoryCounter[i1,i2,i3,i4] > 0 # refractory period is active
refractoryCounter[i1,i2,i3,i4] -= 1
recSignal[i1,i2,i3,i4] = 0
zt[i1,i2,i3,i4] = 0
vt[i1,i2,i3,i4] = (1 - alpha[i1,i2,i3,i4]) * vt[i1,i2,i3,i4]
phi[i1,i2,i3,i4] = 0
# compute epsilonRec
epsilonRec[i1,i2,i3,i4] = (alpha[i1,i2,i3,i4] * epsilonRec[i1,i2,i3,i4])
else # refractory period is inactive
recSignal[i1,i2,i3,i4] = wRec[i1,i2,i3,i4] * zit[i1,i2,i3,i4] *
exInType[i1,i2,i3,i4]
vt[i1,i2,i3,i4] = (alpha[i1,i2,i3,i4] * vt[i1,i2,i3,i4]) +
sum(@view(recSignal[:,:,i3,i4]))
# fires if membrane potential exceed threshold
if vt[i1,i2,i3,i4] > vth[i1,i2,i3,i4]
zt[i1,i2,i3,i4] = 1
refractoryCounter[i1,i2,i3,i4] = refractoryDuration[i1,i2,i3,i4]
firingCounter[i1,i2,i3,i4] += 1
# vt[i1,i2,i3,i4] = vRest[i1,i2,i3,i4]
# reset counter if neuron fires
neuronInactivityCounter[i1,i2,i3,i4] = 0
else
zt[i1,i2,i3,i4] = 0
neuronInactivityCounter[i1,i2,i3,i4] -= 1
end
# compute phi, there is a difference from lif formula
phi[i1,i2,i3,i4] = (gammaPd[i1,i2,i3,i4] / vth[i1,i2,i3,i4]) *
max(0, 1 - ((vt[i1,i2,i3,i4] - vth[i1,i2,i3,i4]) / vth[i1,i2,i3,i4]))
# compute epsilonRec
epsilonRec[i1,i2,i3,i4] = (alpha[i1,i2,i3,i4] * epsilonRec[i1,i2,i3,i4]) +
(zit[i1,i2,i3,i4] * !iszero(wRec[i1,i2,i3,i4]))
# !iszero indicates synaptic subscription
synapticActivityCounter[i1,i2,i3,i4] += zit[i1,i2,i3,i4] * !iszero(wRec[i1,i2,i3,i4])
# voltage regulator
wRecChange[i1,i2,i3,i4] = -0.001 * (vt[i1,i2,i3,i4] - vth[i1,i2,i3,i4]) *
zit[i1,i2,i3,i4]
# negative value is counting mode, -0.1 < -0.1 won't work on GPU
if synapseReconnectDelay[i1,i2,i3,i4] < -0.2
synapseReconnectDelay[i1,i2,i3,i4] += 1
if synapseReconnectDelay[i1,i2,i3,i4] == 0
# mark timestep
synapseReconnectDelay[i1,i2,i3,i4] = sum(timeStep)
end
end
end
end
return nothing
end
# gpu launcher
function alifForward( zit::CuArray,
wRec::CuArray,
vt::CuArray,
vth::CuArray,
vRest::CuArray,
zt::CuArray,
alpha::CuArray,
phi::CuArray,
epsilonRec::CuArray,
refractoryCounter::CuArray,
refractoryDuration::CuArray,
gammaPd::CuArray,
firingCounter::CuArray,
recSignal::CuArray,
exInType::CuArray,
wRecChange::CuArray,
neuronInactivityCounter::CuArray,
synapseReconnectDelay::CuArray,
synapticActivityCounter::CuArray,
timeStep::CuArray,
epsilonRecA::CuArray,
a::CuArray,
avth::CuArray,
beta::CuArray,
rho::CuArray,
)
kernel = @cuda launch=false alifForward( zit,
wRec,
vt,
vth,
vRest,
zt,
alpha,
phi,
epsilonRec,
refractoryCounter,
refractoryDuration,
gammaPd,
firingCounter,
recSignal,
exInType,
wRecChange,
neuronInactivityCounter,
synapseReconnectDelay,
synapticActivityCounter,
timeStep,
epsilonRecA,
a,
avth,
beta,
rho,
GeneralUtils.linear_to_cartesian,
)
config = launch_configuration(kernel.fun)
# threads to be launched. Since one can't launch exact thread number the kernel needs,
# one just launch threads more than this kernel needs then use a guard inside the kernel
# to prevent unused threads to access memory.
threads = min(1024, config.threads) # depend on gpu. Most NVIDIA gpu has 1024 threads per block
# total desired threads to launch to gpu. Usually 1 thread per 1 matrix element
totalThreads = length(wRec)
blocks = cld(totalThreads, threads)
# println("launching gpu kernel")
CUDA.@sync begin
kernel( zit,
wRec,
vt,
vth,
vRest,
zt,
alpha,
phi,
epsilonRec,
refractoryCounter,
refractoryDuration,
gammaPd,
firingCounter,
recSignal,
exInType,
wRecChange,
neuronInactivityCounter,
synapseReconnectDelay,
synapticActivityCounter,
timeStep,
epsilonRecA,
a,
avth,
beta,
rho,
GeneralUtils.linear_to_cartesian; threads, blocks)
end
end
# gpu kernel
function alifForward( zit,
wRec,
vt,
vth,
vRest,
zt,
alpha,
phi,
epsilonRec,
refractoryCounter,
refractoryDuration,
gammaPd,
firingCounter,
recSignal,
exInType,
wRecChange,
neuronInactivityCounter,
synapseReconnectDelay,
synapticActivityCounter,
timeStep,
epsilonRecA,
a,
avth,
beta,
rho,
linear_to_cartesian,
)
i = (blockIdx().x - 1) * blockDim().x + threadIdx().x # gpu threads index
if i <= length(wRec)
# cartesian index
i1, i2, i3, i4 = linear_to_cartesian(i, size(wRec))
# @cuprintln("gpu thread $i $i1 $i2 $i3 $i4")
if refractoryCounter[i1,i2,i3,i4] > 0 # refractory period is active
refractoryCounter[i1,i2,i3,i4] -= 1
recSignal[i1,i2,i3,i4] = 0
zt[i1,i2,i3,i4] = 0
vt[i1,i2,i3,i4] = (1 - alpha[i1,i2,i3,i4]) * vt[i1,i2,i3,i4]
phi[i1,i2,i3,i4] = 0
a[i1,i2,i3,i4] = rho[i1,i2,i3,i4] * a[i1,i2,i3,i4]
# compute epsilonRec
epsilonRec[i1,i2,i3,i4] = (alpha[i1,i2,i3,i4] * epsilonRec[i1,i2,i3,i4])
# compute epsilonRecA use eq.26
epsilonRecA[i1,i2,i3,i4] = (rho[i1,i2,i3,i4] *
(phi[i1,i2,i3,i4] * epsilonRec[i1,i2,i3,i4]))
# compute avth
avth[i1,i2,i3,i4] = vth[i1,i2,i3,i4] + (beta[i1,i2,i3,i4] * a[i1,i2,i3,i4])
else # refractory period is inactive
recSignal[i1,i2,i3,i4] = wRec[i1,i2,i3,i4] * zit[i1,i2,i3,i4] *
exInType[i1,i2,i3,i4]
vt[i1,i2,i3,i4] = (alpha[i1,i2,i3,i4] * vt[i1,i2,i3,i4]) +
sum(@view(recSignal[:,:,i3,i4]))
# compute avth
avth[i1,i2,i3,i4] = vth[i1,i2,i3,i4] + (beta[i1,i2,i3,i4] * a[i1,i2,i3,i4])
# fires if membrane potential exceed threshold
if vt[i1,i2,i3,i4] > avth[i1,i2,i3,i4]
zt[i1,i2,i3,i4] = 1
refractoryCounter[i1,i2,i3,i4] = refractoryDuration[i1,i2,i3,i4]
firingCounter[i1,i2,i3,i4] += 1
# vt[i1,i2,i3,i4] = vRest[i1,i2,i3,i4]
a[i1,i2,i3,i4] = (rho[i1,i2,i3,i4] * a[i1,i2,i3,i4]) + 1
neuronInactivityCounter[i1,i2,i3,i4] = 0
else
zt[i1,i2,i3,i4] = 0
a[i1,i2,i3,i4] = (rho[i1,i2,i3,i4] * a[i1,i2,i3,i4])
neuronInactivityCounter[i1,i2,i3,i4] -= 1
end
# compute phi, there is a difference from alif formula
phi[i1,i2,i3,i4] = (gammaPd[i1,i2,i3,i4] / vth[i1,i2,i3,i4]) *
max(0, 1 - ((vt[i1,i2,i3,i4] - vth[i1,i2,i3,i4]) / vth[i1,i2,i3,i4]))
# compute epsilonRec
epsilonRec[i1,i2,i3,i4] = (alpha[i1,i2,i3,i4] * epsilonRec[i1,i2,i3,i4]) +
(zit[i1,i2,i3,i4] * !iszero(wRec[i1,i2,i3,i4]))
# compute epsilonRecA use eq.26
epsilonRecA[i1,i2,i3,i4] = (rho[i1,i2,i3,i4] *
(phi[i1,i2,i3,i4] * epsilonRec[i1,i2,i3,i4])) +
(zit[i1,i2,i3,i4] * !iszero(wRec[i1,i2,i3,i4]))
synapticActivityCounter[i1,i2,i3,i4] += zit[i1,i2,i3,i4] * !iszero(wRec[i1,i2,i3,i4])
# voltage regulator
wRecChange[i1,i2,i3,i4] = -0.001 * (vt[i1,i2,i3,i4] - avth[i1,i2,i3,i4]) *
zit[i1,i2,i3,i4]
# negative value is counting mode, -0.1 < -0.1 won't work on GPU
if synapseReconnectDelay[i1,i2,i3,i4] < -0.2
synapseReconnectDelay[i1,i2,i3,i4] += 1
if synapseReconnectDelay[i1,i2,i3,i4] == 0
# mark timestep
synapseReconnectDelay[i1,i2,i3,i4] = sum(timeStep)
end
end
end
end
return nothing
end
# gpu launcher
function onForward( zit::CuArray,
wOut::CuArray,
vt::CuArray,
vth::CuArray,
vRest::CuArray,
zt::CuArray,
alpha::CuArray,
phi::CuArray,
epsilonRec::CuArray,
refractoryCounter::CuArray,
refractoryDuration::CuArray,
gammaPd::CuArray,
firingCounter::CuArray,
recSignal::CuArray,
synapticActivityCounter::CuArray,
)
kernel = @cuda launch=false onForward( zit,
wOut,
vt,
vth,
vRest,
zt,
alpha,
phi,
epsilonRec,
refractoryCounter,
refractoryDuration,
gammaPd,
firingCounter,
recSignal,
synapticActivityCounter,
GeneralUtils.linear_to_cartesian,
)
config = launch_configuration(kernel.fun)
# threads to be launched. Since one can't launch exact thread number the kernel needs,
# one just launch threads more than this kernel needs then use a guard inside the kernel
# to prevent unused threads to access memory.
threads = min(1024, config.threads) # depend on gpu. Most NVIDIA gpu has 1024 threads per block
# total desired threads to launch to gpu. Usually 1 thread per 1 matrix element
totalThreads = length(wOut)
blocks = cld(totalThreads, threads)
# println("launching gpu kernel")
CUDA.@sync begin
kernel( zit,
wOut,
vt,
vth,
vRest,
zt,
alpha,
phi,
epsilonRec,
refractoryCounter,
refractoryDuration,
gammaPd,
firingCounter,
recSignal,
synapticActivityCounter,
GeneralUtils.linear_to_cartesian; threads, blocks)
end
end
# gpu kernel
function onForward( zit,
wOut,
vt,
vth,
vRest,
zt,
alpha,
phi,
epsilonRec,
refractoryCounter,
refractoryDuration,
gammaPd,
firingCounter,
recSignal,
synapticActivityCounter,
linear_to_cartesian,
)
i = (blockIdx().x - 1) * blockDim().x + threadIdx().x # gpu threads index
if i <= length(wOut)
# cartesian index
i1, i2, i3, i4 = linear_to_cartesian(i, size(wOut))
# @cuprintln("gpu thread $i $i1 $i2 $i3 $i4")
if refractoryCounter[i1,i2,i3,i4] > 0 # refractory period is active
refractoryCounter[i1,i2,i3,i4] -= 1
recSignal[i1,i2,i3,i4] = 0
zt[i1,i2,i3,i4] = 0
vt[i1,i2,i3,i4] = (1 - alpha[i1,i2,i3,i4]) * vt[i1,i2,i3,i4]
phi[i1,i2,i3,i4] = 0
# compute epsilonRec
epsilonRec[i1,i2,i3,i4] = (alpha[i1,i2,i3,i4] * epsilonRec[i1,i2,i3,i4])
else # refractory period is inactive
recSignal[i1,i2,i3,i4] = zit[i1,i2,i3,i4] * wOut[i1,i2,i3,i4]
vt[i1,i2,i3,i4] = (alpha[i1,i2,i3,i4] * vt[i1,i2,i3,i4]) + sum(@view(recSignal[:,:,i3,i4]))
# fires if membrane potential exceed threshold
if vt[i1,i2,i3,i4] > vth[i1,i2,i3,i4]
zt[i1,i2,i3,i4] = 1
refractoryCounter[i1,i2,i3,i4] = refractoryDuration[i1,i2,i3,i4]
firingCounter[i1,i2,i3,i4] += 1
vt[i1,i2,i3,i4] = (1 - alpha[i1,i2,i3,i4]) * vt[i1,i2,i3,i4]
else
zt[i1,i2,i3,i4] = 0
end
# compute phi, there is a difference from on formula
phi[i1,i2,i3,i4] = (gammaPd[i1,i2,i3,i4] / vth[i1,i2,i3,i4]) *
max(0, 1 - ((vt[i1,i2,i3,i4] - vth[i1,i2,i3,i4]) / vth[i1,i2,i3,i4]))
# compute epsilonRec
epsilonRec[i1,i2,i3,i4] = (alpha[i1,i2,i3,i4] * epsilonRec[i1,i2,i3,i4]) +
(zit[i1,i2,i3,i4] * !iszero(wOut[i1,i2,i3,i4]))
synapticActivityCounter[i1,i2,i3,i4] += zit[i1,i2,i3,i4] * !iszero(wOut[i1,i2,i3,i4])
end
end
return nothing
end
# function lifForward(kfn_zit::Array{T},
# zit::Array{T},
# wRec::Array{T},
# vt0::Array{T},
# vt1::Array{T},
# vth::Array{T},
# vRest::Array{T},
# zt1::Array{T},
# alpha::Array{T},
# phi::Array{T},
# epsilonRec::Array{T},
# refractoryCounter::Array{T},
# refractoryDuration::Array{T},
# gammaPd::Array{T},
# firingCounter::Array{T},
# arrayProjection4d::Array{T},
# recSignal::Array{T},
# decayed_vt0::Array{T},
# decayed_epsilonRec::Array{T},
# vt1_diff_vth::Array{T},
# vt1_diff_vth_div_vth::Array{T},
# gammaPd_div_vth::Array{T},
# phiActivation::Array{T},
# ) where T<:Number
# # project 3D kfn zit into 4D lif zit
# i1, i2, i3, i4 = size(alif_wRec)
# lif_zit .= reshape(kfn_zit, (i1, i2, 1, i4)) .* lif_arrayProjection4d
# for j in 1:size(wRec, 4), i in 1:size(wRec, 3) # compute along neurons axis of every batch
# if sum(@view(refractoryCounter[:,:,i,j])) > 0 # refractory period is active
# @. @views refractoryCounter[:,:,i,j] -= 1
# @. @views zt1[:,:,i,j] = 0
# @. @views vt1[:,:,i,j] = alpha[:,:,i,j] * vt0[:,:,i,j]
# @. @views phi[:,:,i,j] = 0
# # compute epsilonRec
# @. @views decayed_epsilonRec[:,:,i,j] = alpha[:,:,i,j] * epsilonRec[:,:,i,j]
# @. @views epsilonRec[:,:,i,j] = decayed_epsilonRec[:,:,i,j]
# else # refractory period is inactive
# @. @views recSignal[:,:,i,j] = zit[:,:,i,j] * wRec[:,:,i,j]
# @. @views decayed_vt0[:,:,i,j] = alpha[:,:,i,j] * vt0[:,:,i,j]
# @view(vt1[:,:,i,j]) .= @view(decayed_vt0[:,:,i,j]) .+ sum(@view(recSignal[:,:,i,j]))
# if sum(@view(vt1[:,:,i,j])) > sum(@view(vth[:,:,i,j]))
# @. @views zt1[:,:,i,j] = 1
# @. @views refractoryCounter[:,:,i,j] = refractoryDuration[:,:,i,j]
# @. @views firingCounter[:,:,i,j] += 1
# @. @views vt1[:,:,i,j] = vRest[:,:,i,j]
# else
# @. @views zt1[:,:,i,j] = 0
# end
# # compute phi, there is a difference from alif formula
# @. @views gammaPd_div_vth[:,:,i,j] = gammaPd[:,:,i,j] / vth[:,:,i,j]
# @. @views vt1_diff_vth[:,:,i,j] = vt1[:,:,i,j] - vth[:,:,i,j]
# @. @views vt1_diff_vth_div_vth[:,:,i,j] = vt1_diff_vth[:,:,i,j] / vth[:,:,i,j]
# @view(phiActivation[:,:,i,j]) .= max(0, 1 - sum(@view(vt1_diff_vth_div_vth[:,:,i,j])))
# @. @views phi[:,:,i,j] = gammaPd_div_vth[:,:,i,j] * phiActivation[:,:,i,j]
# # compute epsilonRec
# @. @views decayed_epsilonRec[:,:,i,j] = alpha[:,:,i,j] * epsilonRec[:,:,i,j]
# @. @views epsilonRec[:,:,i,j] = decayed_epsilonRec[:,:,i,j] + zit[:,:,i,j]
# end
# end
# end
# function alifForward(zit::Array{T},
# wRec::Array{T},
# vt0::Array{T},
# vt1::Array{T},
# vth::Array{T},
# vRest::Array{T},
# zt1::Array{T},
# alpha::Array{T},
# phi::Array{T},
# epsilonRec::Array{T},
# refractoryCounter::Array{T},
# refractoryDuration::Array{T},
# gammaPd::Array{T},
# firingCounter::Array{T},
# recSignal::Array{T},
# decayed_vt0::Array{T},
# decayed_epsilonRec::Array{T},
# vt1_diff_vth::Array{T},
# vt1_diff_vth_div_vth::Array{T},
# gammaPd_div_vth::Array{T},
# phiActivation::Array{T},
# epsilonRecA::Array{T},
# avth::Array{T},
# a::Array{T},
# beta::Array{T},
# rho::Array{T},
# phi_x_epsilonRec::Array{T},
# phi_x_beta::Array{T},
# rho_diff_phi_x_beta::Array{T},
# rho_div_phi_x_beta_x_epsilonRecA::Array{T},
# beta_x_a::Array{T},
# ) where T<:Number
# for j in 1:size(wRec, 4), i in 1:size(wRec, 3) # compute along neurons axis of every batch
# if sum(@view(refractoryCounter[:,:,i,j])) > 0 # refractory period is active
# @. @views refractoryCounter[:,:,i,j] -= 1
# @. @views zt1[:,:,i,j] = 0
# @. @views vt1[:,:,i,j] = alpha[:,:,i,j] * vt0[:,:,i,j]
# @. @views phi[:,:,i,j] = 0
# @. @views a[:,:,i,j] = rho[:,:,i,j] * a[:,:,i,j]
# # compute epsilonRec
# @. @views decayed_epsilonRec[:,:,i,j] = alpha[:,:,i,j] * epsilonRec[:,:,i,j]
# @. @views epsilonRec[:,:,i,j] = decayed_epsilonRec[:,:,i,j]
# # compute epsilonRecA
# @. @views phi_x_epsilonRec[:,:,i,j] = phi[:,:,i,j] * epsilonRec[:,:,i,j]
# @. @views phi_x_beta[:,:,i,j] = phi[:,:,i,j] * beta[:,:,i,j]
# @. @views rho_diff_phi_x_beta[:,:,i,j] = rho[:,:,i,j] - phi_x_beta[:,:,i,j]
# @. @views rho_div_phi_x_beta_x_epsilonRecA[:,:,i,j] = rho_diff_phi_x_beta[:,:,i,j] * epsilonRecA[:,:,i,j]
# @. @views epsilonRecA[:,:,i,j] = phi_x_epsilonRec[:,:,i,j] + rho_div_phi_x_beta_x_epsilonRecA[:,:,i,j]
# # compute avth
# @. @views beta_x_a[:,:,i,j] = beta[:,:,i,j] * a[:,:,i,j]
# @. @views avth[:,:,i,j] = vth[:,:,i,j] + beta_x_a[:,:,i,j]
# else # refractory period is inactive
# @. @views recSignal[:,:,i,j] = zit[:,:,i,j] * wRec[:,:,i,j]
# @. @views decayed_vt0[:,:,i,j] = alpha[:,:,i,j] * vt0[:,:,i,j]
# @view(vt1[:,:,i,j]) .= @view(decayed_vt0[:,:,i,j]) .+ sum(@view(recSignal[:,:,i,j]))
# # compute avth
# @. @views beta_x_a[:,:,i,j] = beta[:,:,i,j] * a[:,:,i,j]
# @. @views avth[:,:,i,j] = vth[:,:,i,j] + beta_x_a[:,:,i,j]
# if sum(@view(vt1[:,:,i,j])) > sum(@view(avth[:,:,i,j]))
# @. @views zt1[:,:,i,j] = 1
# @. @views refractoryCounter[:,:,i,j] = refractoryDuration[:,:,i,j]
# @. @views firingCounter[:,:,i,j] += 1
# @. @views vt1[:,:,i,j] = vRest[:,:,i,j]
# @. @views a[:,:,i,j] = rho[:,:,i,j] * a[:,:,i,j]
# @. @views a[:,:,i,j] = a[:,:,i,j] += 1
# else
# @. @views zt1[:,:,i,j] = 0
# @. @views a[:,:,i,j] = rho[:,:,i,j] * a[:,:,i,j]
# end
# # compute phi, there is a difference from alif formula
# @. @views gammaPd_div_vth[:,:,i,j] = gammaPd[:,:,i,j] / vth[:,:,i,j]
# @. @views vt1_diff_vth[:,:,i,j] = vt1[:,:,i,j] - vth[:,:,i,j]
# @. @views vt1_diff_vth_div_vth[:,:,i,j] = vt1_diff_vth[:,:,i,j] / vth[:,:,i,j]
# @view(phiActivation[:,:,i,j]) .= max(0, 1 - sum(@view(vt1_diff_vth_div_vth[:,:,i,j])))
# @. @views phi[:,:,i,j] = gammaPd_div_vth[:,:,i,j] * phiActivation[:,:,i,j]
# # compute epsilonRec
# @. @views decayed_epsilonRec[:,:,i,j] = alpha[:,:,i,j] * epsilonRec[:,:,i,j]
# @. @views epsilonRec[:,:,i,j] = decayed_epsilonRec[:,:,i,j] + zit[:,:,i,j]
# # compute epsilonRecA
# @. @views phi_x_epsilonRec[:,:,i,j] = phi[:,:,i,j] * epsilonRec[:,:,i,j]
# @. @views phi_x_beta[:,:,i,j] = phi[:,:,i,j] * beta[:,:,i,j]
# @. @views rho_diff_phi_x_beta[:,:,i,j] = rho[:,:,i,j] - phi_x_beta[:,:,i,j]
# @. @views rho_div_phi_x_beta_x_epsilonRecA[:,:,i,j] = rho_diff_phi_x_beta[:,:,i,j] * epsilonRecA[:,:,i,j]
# @. @views epsilonRecA[:,:,i,j] = phi_x_epsilonRec[:,:,i,j] + rho_div_phi_x_beta_x_epsilonRecA[:,:,i,j]
# end
# end
# end
# function onForward(kfn_zit::Array{T},
# zit::Array{T},
# wOut::Array{T},
# vt0::Array{T},
# vt1::Array{T},
# vth::Array{T},
# vRest::Array{T},
# zt1::Array{T},
# alpha::Array{T},
# phi::Array{T},
# epsilonRec::Array{T},
# refractoryCounter::Array{T},
# refractoryDuration::Array{T},
# gammaPd::Array{T},
# firingCounter::Array{T},
# arrayProjection4d::Array{T},
# recSignal::Array{T},
# decayed_vt0::Array{T},
# decayed_epsilonRec::Array{T},
# vt1_diff_vth::Array{T},
# vt1_diff_vth_div_vth::Array{T},
# gammaPd_div_vth::Array{T},
# phiActivation::Array{T},
# ) where T<:Number
# # project 3D kfn zit into 4D lif zit
# zit .= reshape(kfn_zit,
# (size(wOut, 1), size(wOut, 2), 1, size(wOut, 4))) .* arrayProjection4d
# for j in 1:size(wOut, 4), i in 1:size(wOut, 3) # compute along neurons axis of every batch
# if sum(@view(refractoryCounter[:,:,i,j])) > 0 # refractory period is active
# @. @views refractoryCounter[:,:,i,j] -= 1
# @. @views zt1[:,:,i,j] = 0
# @. @views vt1[:,:,i,j] = alpha[:,:,i,j] * vt0[:,:,i,j]
# @. @views phi[:,:,i,j] = 0
# # compute epsilonRec
# @. @views decayed_epsilonRec[:,:,i,j] = alpha[:,:,i,j] * epsilonRec[:,:,i,j]
# @. @views epsilonRec[:,:,i,j] = decayed_epsilonRec[:,:,i,j]
# else # refractory period is inactive
# @. @views recSignal[:,:,i,j] = zit[:,:,i,j] * wOut[:,:,i,j]
# @. @views decayed_vt0[:,:,i,j] = alpha[:,:,i,j] * vt0[:,:,i,j]
# @view(vt1[:,:,i,j]) .= @view(decayed_vt0[:,:,i,j]) .+ sum(@view(recSignal[:,:,i,j]))
# if sum(@view(vt1[:,:,i,j])) > sum(@view(vth[:,:,i,j]))
# @. @views zt1[:,:,i,j] = 1
# @. @views refractoryCounter[:,:,i,j] = refractoryDuration[:,:,i,j]
# @. @views firingCounter[:,:,i,j] += 1
# @. @views vt1[:,:,i,j] = vRest[:,:,i,j]
# else
# @. @views zt1[:,:,i,j] = 0
# end
# # compute phi, there is a difference from alif formula
# @. @views gammaPd_div_vth[:,:,i,j] = gammaPd[:,:,i,j] / vth[:,:,i,j]
# @. @views vt1_diff_vth[:,:,i,j] = vt1[:,:,i,j] - vth[:,:,i,j]
# @. @views vt1_diff_vth_div_vth[:,:,i,j] = vt1_diff_vth[:,:,i,j] / vth[:,:,i,j]
# @view(phiActivation[:,:,i,j]) .= max(0, 1 - sum(@view(vt1_diff_vth_div_vth[:,:,i,j])))
# @. @views phi[:,:,i,j] = gammaPd_div_vth[:,:,i,j] * phiActivation[:,:,i,j]
# # compute epsilonRec
# @. @views decayed_epsilonRec[:,:,i,j] = alpha[:,:,i,j] * epsilonRec[:,:,i,j]
# @. @views epsilonRec[:,:,i,j] = decayed_epsilonRec[:,:,i,j] + zit[:,:,i,j]
# end
# end
# end
end # module

View File

@@ -0,0 +1,87 @@
module interface
# export
# using Flux, CUDA
#------------------------------------------------------------------------------------------------100
end # module

View File

@@ -0,0 +1,567 @@
module learn
export learn!, compute_paramsChange!
using Statistics, Random, LinearAlgebra, JSON3, Flux, CUDA, Dates
using GeneralUtils
using ..type, ..snnUtil
#------------------------------------------------------------------------------------------------100
function compute_paramsChange!(kfn::kfn_1, modelError::CuArray, outputError::CuArray, label)
lifComputeParamsChange!(kfn.timeStep,
kfn.lif_phi,
kfn.lif_epsilonRec,
kfn.lif_eta,
kfn.lif_eRec,
kfn.lif_wRec,
kfn.lif_exInType,
kfn.lif_wRecChange,
kfn.on_wOut,
kfn.lif_firingCounter,
kfn.lif_firingTargetFrequency,
kfn.lif_arrayProjection4d,
kfn.lif_error,
modelError,
outputError,
kfn.inputSize,
kfn.bk,
label,
)
alifComputeParamsChange!(kfn.timeStep,
kfn.alif_phi,
kfn.alif_epsilonRec,
kfn.alif_eta,
kfn.alif_eRec,
kfn.alif_wRec,
kfn.alif_exInType,
kfn.alif_wRecChange,
kfn.on_wOut,
kfn.alif_firingCounter,
kfn.alif_firingTargetFrequency,
kfn.alif_arrayProjection4d,
kfn.alif_error,
modelError,
outputError,
kfn.inputSize,
kfn.bk,
label,
kfn.alif_epsilonRecA,
kfn.alif_beta,
)
onComputeParamsChange!(kfn.on_phi,
kfn.on_epsilonRec,
kfn.on_eta,
kfn.on_eRec,
kfn.on_wOutChange,
kfn.on_arrayProjection4d,
kfn.on_error,
outputError,
)
# error("DEBUG -> kfn compute_paramsChange! $(Dates.now())")
end
function lifComputeParamsChange!( timeStep::CuArray,
phi::CuArray,
epsilonRec::CuArray,
eta::CuArray,
eRec::CuArray,
wRec::CuArray,
exInType::CuArray,
wRecChange::CuArray,
wOut::CuArray,
firingCounter::CuArray,
firingTargetFrequency::CuArray,
arrayProjection4d::CuArray,
nError::CuArray,
modelError::CuArray,
outputError::CuArray,
inputSize::CuArray,
bk::CuArray,
label,
)
eRec .= phi .* epsilonRec
# 2D wRec matrix contain input, lif, alif neurons. I need only lif neurons
startIndex = prod(inputSize) +1
stopIndex = startIndex + size(wRec, 3) -1
startCol = CartesianIndices(wRec)[startIndex][2]
stopCol = CartesianIndices(wRec)[stopIndex][2]
# some RSNN neuron that has direct connection to output neuron need to get Bjk
# from output neuron that represent correct answer, the rest of RSNN get random Bjk
onW = @view(wOut[:, startCol:stopCol, sum(label+1), 1]) # label+1 because julia is 1-based index
_bk = @view(bk[:, startCol:stopCol, 1])
mask = iszero.(onW)
bk_ = mask .* _bk
bkComposed = onW .+ bk_
nError = bkComposed .* modelError
nError = reshape(nError, (1,1,:,1))
# compute wRecChange of all neurons wrt to iᵗʰ output neuron
wRecChange .+= (eta .* nError .* eRec)
# frequency regulator
targetFiringCount = firingTargetFrequency .* timeStep
freqError = (firingCounter .- targetFiringCount) ./ timeStep
freqWRecChange = -1 .* freqError .* eta .* eRec
wRecChange .+= freqWRecChange
# reset epsilonRec
epsilonRec .= 0
end
function alifComputeParamsChange!( timeStep::CuArray,
phi::CuArray,
epsilonRec::CuArray,
eta::CuArray,
eRec::CuArray,
wRec::CuArray,
exInType::CuArray,
wRecChange::CuArray,
wOut::CuArray,
firingCounter::CuArray,
firingTargetFrequency::CuArray,
arrayProjection4d::CuArray,
nError::CuArray,
modelError::CuArray,
outputError::CuArray,
inputSize::CuArray,
bk::CuArray,
label,
epsilonRecA::CuArray,
beta::CuArray,
)
eRec .= phi .* (epsilonRec .- (beta .* epsilonRecA)) # use eq. 25
# 2D wRec matrix contain input, lif, alif neurons. I need only lif neurons
startIndex = prod(inputSize) +1
stopIndex = startIndex + size(wRec, 3) -1
startCol = CartesianIndices(wRec)[startIndex][2]
stopCol = CartesianIndices(wRec)[stopIndex][2]
# some RSNN neuron that has direct connection to output neuron need to get Bjk
# from output neuron that represent correct answer, the rest of RSNN get random Bjk
onW = @view(wOut[:, startCol:stopCol, sum(label+1), 1]) # label+1 because julia is 1-based index
_bk = @view(bk[:, startCol:stopCol, 1])
mask = iszero.(onW)
bk_ = mask .* _bk
bkComposed = onW .+ bk_
nError = bkComposed .* modelError
nError = reshape(nError, (1,1,:,1))
wRecChange .+= (eta .* nError .* eRec)
# frequency regulator
targetFiringCount = firingTargetFrequency .* timeStep
freqError = (firingCounter .- targetFiringCount) ./ timeStep
freqWRecChange = -1 .* freqError .* eta .* eRec
wRecChange .+= freqWRecChange
# wRecChange .+= 0.01 .* ((firingTargetFrequency - (firingCounter./timeStep)) ./ timeStep) .*
# eta .* eRec
# reset epsilonRec
epsilonRec .= 0
epsilonRecA .= 0
# error("DEBUG -> alifComputeParamsChange! $(Dates.now())")
end
function onComputeParamsChange!(phi::CuArray,
epsilonRec::CuArray,
eta::CuArray,
eRec::CuArray,
wOutChange::CuArray,
arrayProjection4d::CuArray,
nError::CuArray,
outputError::CuArray # outputError is output neuron's error
)
eRec .= phi .* epsilonRec
nError .= reshape(outputError, (1, 1, :, size(outputError, 2))) .* arrayProjection4d
wOutChange .+= (eta .* nError .* eRec)
# reset epsilonRec
epsilonRec .= 0
# error("DEBUG -> onComputeParamsChange! $(Dates.now())")
end
function lifComputeParamsChange!( phi::AbstractArray,
epsilonRec::AbstractArray,
eta::AbstractArray,
wRec::AbstractArray,
wRecChange::AbstractArray,
wOut::AbstractArray,
modelError::AbstractArray)
d1, d2, d3, d4 = size(epsilonRec)
# Bₖⱼ in paper, sum() to get each neuron's total wOut weight
wOutSum = reshape(sum(wOut, dims=3), (d1, :, d4))
for j in 1:d4, i in 1:d3 # compute along neurons axis of every batch
# how much error of this neuron 1-spike causing each output neuron's error
view(wRecChange, :, :, i, j) .+= (-1 * view(eta, :, :, i, j)[1]) .*
# eRec
(
(view(phi, :, :, i, j)[1] .* view(epsilonRec, :, :, i, j)) .*
# nError a.k.a. learning signal
(
view(modelError, :, j)[1] * # dopamine concept, this neuron receive summed error signal
# RSNN neuron's total wOut weight (neuron synaptic subscription .* wOutSum)
view(wOutSum, :, :, j)[i]
)
)
end
end
function alifComputeParamsChange!( phi::AbstractArray,
epsilonRec::AbstractArray,
epsilonRecA::AbstractArray,
eta::AbstractArray,
wRec::AbstractArray,
wRecChange::AbstractArray,
beta::AbstractArray,
wOut::AbstractArray,
modelError::AbstractArray)
d1, d2, d3, d4 = size(epsilonRec)
# Bₖⱼ in paper, sum() to get each neuron's total wOut weight
wOutSum = reshape(sum(wOut, dims=3), (d1, :, d4))
for j in 1:d4, i in 1:d3 # compute along neurons axis of every batch
# how much error of this neuron 1-spike causing each output neuron's error
view(wRecChange, :, :, i, j) .+= (-1 * view(eta, :, :, i, j)[1]) .*
# eRec
(
# eRec_v
(view(phi, :, :, i, j)[1] .* view(epsilonRec, :, :, i, j)) .+
# eRec_a
((view(phi, :, :, i, j)[1] * view(beta, :, :, i, j)[1]) .*
view(epsilonRecA, :, :, i, j))
) .*
# nError a.k.a. learning signal
(
view(modelError, :, j)[1] *
# RSNN neuron's total wOut weight (neuron synaptic subscription .* wOutSum)
view(wOutSum, :, :, j)[i]
# sum(GeneralUtils.isNotEqual.(view(wRec, :, :, i, j), 0) .*
# view(wOutSum, :, :, j))
)
end
end
function onComputeParamsChange!(phi::AbstractArray,
epsilonRec::AbstractArray,
eta::AbstractArray,
wOutChange::AbstractArray,
outputError::AbstractArray)
d1, d2, d3, d4 = size(epsilonRec)
for j in 1:d4, i in 1:d3 # compute along neurons axis of every batch
# how much error of this neuron 1-spike causing each output neuron's error
view(wOutChange, :, :, i, j) .+= (-1 * view(eta, :, :, i, j)[1]) .*
# eRec
(
(view(phi, :, :, i, j)[1] .* view(epsilonRec, :, :, i, j)) .*
# nError a.k.a. learning signal, output neuron receives error of its own answer - correct answer.
view(outputError, :, j)[i]
)
end
end
function learn!(kfn::kfn_1, progress, device=cpu)
if sum(kfn.timeStep) == 800
println("zitCumulative ", sum(kfn.zitCumulative[:,:,784:size(kfn.zitCumulative, 3)], dims=3))
# println("on_synapticActivityCounter ", kfn.on_synapticActivityCounter[:,:,1,:])
end
#WORKING compare output neuron 0 synapse activity when input are label 0 and 5, (!isequal).(wOut)
# lif learn
kfn.lif_wRec, kfn.lif_neuronInactivityCounter, kfn.lif_synapticActivityCounter, kfn.lif_synapseReconnectDelay =
lifLearn(kfn.lif_wRec,
kfn.lif_wRecChange,
kfn.lif_exInType,
kfn.lif_arrayProjection4d,
kfn.lif_neuronInactivityCounter,
kfn.lif_synapseReconnectDelay,
kfn.lif_synapseConnectionNumber,
kfn.lif_synapticActivityCounter,
kfn.lif_eta,
kfn.lif_vt,
kfn.zitCumulative,
progress,
device)
# alif learn
kfn.alif_wRec, kfn.alif_neuronInactivityCounter, kfn.alif_synapticActivityCounter, kfn.alif_synapseReconnectDelay =
alifLearn(kfn.alif_wRec,
kfn.alif_wRecChange,
kfn.alif_exInType,
kfn.alif_arrayProjection4d,
kfn.alif_neuronInactivityCounter,
kfn.alif_synapseReconnectDelay,
kfn.alif_synapseConnectionNumber,
kfn.alif_synapticActivityCounter,
kfn.alif_eta,
kfn.alif_vt,
kfn.zitCumulative,
progress,
device)
# on learn
onLearn!(kfn.on_wOut,
kfn.on_wOutChange,
kfn.on_eta,
kfn.on_arrayProjection4d,
progress,)
# wrap up learning session
if kfn.learningStage == [3]
kfn.learningStage = [0]
end
# error("DEBUG -> kfn learn! $(Dates.now())")
end
function lifLearn(wRec,
wRecChange,
exInType,
arrayProjection4d,
neuronInactivityCounter,
synapseReconnectDelay,
synapseConnectionNumber,
synapticActivityCounter,
eta,
vt,
zitCumulative,
progress,
device)
# transfer data to cpu
arrayProjection4d_cpu = arrayProjection4d |> cpu
wRec_cpu = wRec |> cpu
wRecChange_cpu = wRecChange |> cpu
eta_cpu = eta |> cpu
exInType_cpu = exInType |> cpu
neuronInactivityCounter_cpu = neuronInactivityCounter |> cpu
synapseReconnectDelay_cpu = synapseReconnectDelay |> cpu
synapticActivityCounter_cpu = synapticActivityCounter |> cpu
zitCumulative_cpu = zitCumulative |> cpu
# neuroplasticity, work on CPU side
wRec_cpu, neuronInactivityCounter_cpu, synapticActivityCounter_cpu, synapseReconnectDelay_cpu =
neuroplasticity(synapseConnectionNumber,
zitCumulative_cpu,
wRec_cpu,
exInType_cpu,
wRecChange_cpu,
vt,
eta_cpu,
neuronInactivityCounter_cpu,
synapseReconnectDelay_cpu,
synapticActivityCounter_cpu,
progress,)
# transfer data backto gpu
wRec = wRec_cpu |> device
neuronInactivityCounter = neuronInactivityCounter_cpu |> device
synapticActivityCounter = synapticActivityCounter_cpu |> device
synapseReconnectDelay = synapseReconnectDelay_cpu |> device
# error("DEBUG -> lifLearn! $(Dates.now())")
return wRec, neuronInactivityCounter, synapticActivityCounter, synapseReconnectDelay
end
function alifLearn(wRec,
wRecChange,
exInType,
arrayProjection4d,
neuronInactivityCounter,
synapseReconnectDelay,
synapseConnectionNumber,
synapticActivityCounter,
eta,
vt,
zitCumulative,
progress,
device)
# transfer data to cpu
arrayProjection4d_cpu = arrayProjection4d |> cpu
wRec_cpu = wRec |> cpu
wRecChange_cpu = wRecChange |> cpu
eta_cpu = eta |> cpu
exInType_cpu = exInType |> cpu
neuronInactivityCounter_cpu = neuronInactivityCounter |> cpu
synapseReconnectDelay_cpu = synapseReconnectDelay |> cpu
synapticActivityCounter_cpu = synapticActivityCounter |> cpu
zitCumulative_cpu = zitCumulative |> cpu
# neuroplasticity, work on CPU side
wRec_cpu, neuronInactivityCounter_cpu, synapticActivityCounter_cpu, synapseReconnectDelay_cpu =
neuroplasticity(synapseConnectionNumber,
zitCumulative_cpu,
wRec_cpu,
exInType_cpu,
wRecChange_cpu,
vt,
eta_cpu,
neuronInactivityCounter_cpu,
synapseReconnectDelay_cpu,
synapticActivityCounter_cpu,
progress,)
# transfer data backto gpu
wRec = wRec_cpu |> device
neuronInactivityCounter = neuronInactivityCounter_cpu |> device
synapticActivityCounter = synapticActivityCounter_cpu |> device
synapseReconnectDelay = synapseReconnectDelay_cpu |> device
# error("DEBUG -> alifLearn! $(Dates.now())")
return wRec, neuronInactivityCounter, synapticActivityCounter, synapseReconnectDelay
end
# function onLearn!(wOut,
# wOutChange,
# arrayProjection4d)
# # merge learning weight with average learning weight
# wOut .+= (sum(wOutChange, dims=4) ./ (size(wOut, 4))) .* arrayProjection4d
# # adaptive wOut to help convergence using c_decay
# wOut .-= 0.001 .* wOut
# end
function onLearn!(wOut,
wOutChange,
eta,
arrayProjection4d,
progress,)
if progress != 0
# merge learning weight with average learning weight
wOut .+= (sum(wOutChange, dims=4) ./ (size(wOut, 4))) .* arrayProjection4d
# adaptive wOut to help convergence using c_decay
wOut .-= 0.1 .* eta .* wOut # wOut .-= 0.001 .* wOut
else
#TESTING skip
wOutChange .= 0
end
end
function neuroplasticity(synapseConnectionNumber,
zitCumulative, # (row, col)
wRec, # (row, col, n)
exInType,
wRecChange,
vt,
eta,
neuronInactivityCounter,
synapseReconnectDelay,
synapticActivityCounter,
progress,) # (row, col, n)
if progress == 2 # no need to learn
# skip neuroplasticity
#TODO I may need to do something with neuronInactivityCounter and other variables
wRecChange .= 0
# error("DEBUG -> neuroplasticity")
elseif progress != 0 # progress increase
# ready to reconnect synapse must not have wRecChange
mask = (!isequal).(wRec, 0)
wRecChange .*= mask
# merge learning weight, all resulting negative wRec will get pruned
mergeLearnWeight!(wRec, exInType, wRecChange, synapticActivityCounter, synapseReconnectDelay)
# adjust wRec based on repeatition (90% +w, 10% -w)
growRepeatedPath!(wRec, synapticActivityCounter, eta)
# -w all non-fire connection except mature connection
weakenNotMatureSynapse!(wRec, synapticActivityCounter, eta)
# prune weak synapse
pruneSynapse!(wRec, synapticActivityCounter, synapseReconnectDelay)
# rewire synapse connection
rewireSynapse!(wRec, neuronInactivityCounter, synapticActivityCounter,
synapseReconnectDelay, synapseConnectionNumber, zitCumulative)
# error("DEBUG -> neuroplasticity 1")
elseif progress == 0 # no progress, no weight update, only rewire
# -w all non-fire connection except mature connection
weakenNotMatureSynapse!(wRec, synapticActivityCounter, eta)
# prune weak synapse
pruneSynapse!(wRec, synapticActivityCounter, synapseReconnectDelay)
# rewire synapse connection
rewireSynapse!(wRec, neuronInactivityCounter, synapticActivityCounter,
synapseReconnectDelay, synapseConnectionNumber, zitCumulative)
# error("DEBUG -> neuroplasticity")
else
error("undefined condition line $(@__LINE__)")
end
# error("DEBUG -> neuroplasticity $(Dates.now())")
return wRec, neuronInactivityCounter,
synapticActivityCounter, synapseReconnectDelay
end
# learningLiquidity(x) = -0.0001x + 1 # -10000 to +10000; f(x) = -5e-05x+0.5
function learningLiquidity(x)
if x > 10000
y = 0.0
elseif x < -10000
y = 1.0
else
y = -5e-05x+0.5 # range -10000 to +10000
end
return y
end
end # module

View File

@@ -0,0 +1,267 @@
module snnUtil
export refractoryStatus!, addNewSynapticConn!, mergeLearnWeight!, growRepeatedPath!,
weakenNotMatureSynapse!, pruneSynapse!, rewireSynapse!
using Random, GeneralUtils
using ..type
#------------------------------------------------------------------------------------------------100
function refractoryStatus!(refractoryCounter, refractoryActive, refractoryInactive)
d1, d2, d3, d4 = size(refractoryCounter)
for j in 1:d4
for i in 1:d3
if refractoryCounter[1, 1, i, j] > 0 # inactive
view(refractoryActive, 1, 1, i, j) .= 0
view(refractoryInactive, 1, 1, i, j) .= 1
else # active
view(refractoryActive, 1, 1, i, j) .= 1
view(refractoryInactive, 1, 1, i, j) .= 0
end
end
end
end
# function addNewSynapticConn!(mask::AbstractArray{<:Any}, markValue::Number, wRec::AbstractArray{<:Any},
# counter::AbstractArray{<:Any}, n=0;
# rng::AbstractRNG=MersenneTwister(1234))
# # check if mask and wRec have the same size
# if size(mask) != size(wRec)
# error("mask and wRec must have the same size")
# end
# # get the indices of elements in mask that equal markValue
# indices = findall(x -> x == markValue, mask)
# alreadySub = findall(x -> x != 0, wRec) # get already subscribe
# setdiff!(indices, alreadySub) # remove already sub conn from pool
# remaining = 0
# if n == 0 || n > length(indices)
# remaining = n - length(indices)
# n = length(indices)
# end
# # shuffle the indices using the rng function
# shuffle!(rng, indices)
# # select the first n indices
# n > length(indices) ? println(">>> ", total_x_tobeReplced) : nothing
# selected = indices[1:n]
# # replace the elements in wRec at the selected positions with a
# for i in selected
# wRec[i] = rand(0.01:0.01:0.1)
# counter[i] = 0 # counting start from 0
# end
# # error("DEBUG addNewSynapticConn!")
# return remaining
# end
function mergeLearnWeight!(wRec::AbstractArray, exInType, wRecChange::AbstractArray,
synapticActivityCounter::AbstractArray,
synapseReconnectDelay::AbstractArray)
wRecSigned = exInType .* wRec
# -0.0 == 0.0 but isequal() implement as -0.0 != 0.0, so I need to get rid of -0.0 manually
GeneralUtils.replaceElements!(wRecSigned, -0, 0)
# println("wRec 2 $(size(wRecSigned)) ", wRecSigned[:,:,1,1])
# println("wRecChange ", wRecChange[:,:,1,1])
originalsign = sign.(wRecSigned)
# println("originalsign ", originalsign[:,:,1,1])
wRecSigned .= wRecSigned .+ wRecChange
# println("wRec 3 $(size(wRecSigned)) ", wRecSigned[:,:,1,1])
newsign = sign.(wRecSigned) # look for flipped sign, it needs to get pruned
# println("newsign ", newsign[:,:,1,1])
flipsign = (!isequal).(originalsign, newsign)
# println("flipsign ", flipsign[:,:,1,1])
nonflipsign = isequal.(originalsign, newsign)
wRec .= abs.(wRecSigned) # wRec store magnitude only, sign is at exInType
# println("wRec 4 $(size(wRec)) ", wRec[:,:,1,1])
GeneralUtils.replaceElements!(flipsign, 1, wRec, 0.0) # negative synapse get pruned
# println("wRec 5 $(size(wRec)) ", wRec[:,:,1,1])
GeneralUtils.replaceElements!(flipsign, 1, synapticActivityCounter, 0)
# set pruned synapse to random wait time
waittime = rand((1:1000), size(wRec)) .* flipsign # synapse's random wait time to reconnect
# synapseReconnectDelay counting mode when value is negative hence .* -1
synapseReconnectDelay .= (synapseReconnectDelay .* nonflipsign) .+ (waittime .* -1)
# println("synapseReconnectDelay ", synapseReconnectDelay[:,:,1,1])
# error("DEBUG -> mergeLearnWeight!")
end
function growRepeatedPath!(wRec, synapticActivityCounter, eta)
# seperate active synapse out of inactive in this signal
mask_activeSynapse = (!isequal).(synapticActivityCounter, 0)
# adjust weight based on vt progress and repeatition (40% +w, 60% -w) depend on epsilonRec
mask_more, mask_less, _ = rankMatrix(synapticActivityCounter, 0.6) # sort synapse from highest to lowest activity
# +w, synapse with more than 10% of avg activity get increase weight by eta
# mask_more = (!isless).(synapticActivityCounter, lowerlimit)
mask_2 = GeneralUtils.allTrue.(mask_activeSynapse, mask_more)
mask_3 = mask_2 .* (1 .+ eta) # minor activity synapse weight will be reduced by eta
GeneralUtils.replaceElements!(mask_3, 0, 1) # replace 0 with 1 so mask * Wrec will not get 0 weight
wRec .*= mask_3
# -w, synapse with less than 10% of avg activity get reduced weight by eta
# mask_less = GeneralUtils.isBetween.(synapticActivityCounter, 0, lowerlimit) # 1st criteria
mask_3 = GeneralUtils.allTrue.(mask_activeSynapse, mask_less)
mask_4 = mask_3 .* (1 .- eta) # minor activity synapse weight will be reduced by eta
# replace 0 with 1 so mask * wRec will not get 0 weight i.e. non-effected weight remain the same
GeneralUtils.replaceElements!(mask_4, 0, 1)
wRec .*= mask_4
# error("DEBUG -> growRepeatedPath!")
end
function weakenNotMatureSynapse!(wRec, synapticActivityCounter, eta) # TODO not fully tested, there is no connection YET where there is 0 synapse activity but wRec is not 0 (subscribed)
mask_inactiveSynapse = isequal.(synapticActivityCounter, 0)
mask_notmature = GeneralUtils.isBetween.(wRec, 0.0, 0.1) # 2nd criteria, not mature synapse has weight < 0.1
mask_1 = GeneralUtils.allTrue.(mask_inactiveSynapse, mask_notmature)
mask_2 = mask_1 .* (1 .- eta)
GeneralUtils.replaceElements!(mask_2, 0, 1) # replace 0 with 1 so mask * Wrec will not get 0 weight
wRec .*= mask_2
end
function pruneSynapse!(wRec, synapticActivityCounter, synapseReconnectDelay)
mask_weak = GeneralUtils.isBetween.(wRec, 0.0, 0.01)
mask_notweak = (!GeneralUtils.isBetween).(wRec, 0.0, 0.01)
wRec .*= mask_notweak # all marked weak synapse weight need to be 0.0 i.e. pruned
# all weak synapse activity are reset
GeneralUtils.replaceElements!(mask_weak, 1, synapticActivityCounter, 0)
# set pruned synapse to random wait time
waittime = rand((1:1000), size(wRec)) .* mask_weak # synapse's random wait time to reconnect
# synapseReconnectDelay counting mode when value is negative hence .* -1
synapseReconnectDelay .= (synapseReconnectDelay .* mask_notweak) .+ (waittime .* -1)
# error("DEBUG -> pruneSynapse!")
end
function rewireSynapse!(wRec::AbstractArray, neuronInactivityCounter::AbstractArray,
synapticActivityCounter::AbstractArray,
synapseReconnectDelay::AbstractArray,
synapseConnectionNumber::Integer,
zitCumulative::AbstractArray)
i1,i2,i3,i4 = size(wRec)
for n in 1:i3 # neuron-by-neuron
if neuronInactivityCounter[1,1,n,i4][1] < -100000 # neuron die i.e. reset all weight
println("neuron $n die")
neuronInactivityCounter[:,:,n,i4] .= 0 # reset
w = random_wRec(i1,i2,1,synapseConnectionNumber)
wRec[:,:,n,i4] .= w
a = similar(w) .= -0.1 # temp matrix use to put -0.1 into synapseReconnectDelay
mask = (!iszero).(w)
GeneralUtils.replaceElements!(mask, 1, a, 0)
synapseReconnectDelay[:,:,n,i4] = a
else
for ind in eachindex(synapseReconnectDelay[:,:,n,i4])
timemark = synapseReconnectDelay[:,:,n,i4][ind]
if timemark > 0 #TODO not fully tested. mark timeStep available
timemark = Int(timemark)
# get neuron pool at 10 timeStep earlier
earlier = size(zitCumulative, 3) - 10 > 0 ? size(zitCumulative, 3) - 10 : size(zitCumulative, 3)
current = size(zitCumulative, 3)
pool = sum(zitCumulative[:,:,earlier:current], dims=3)
if sum(pool) != 0
indices = findall(x -> x != 0, pool)
pick = rand(indices) # cartesian indice
wRec[pick] = rand(0.01:0.01:0.05)
synapticActivityCounter[pick] = 0
synapseReconnectDelay[pick] = -0.1
# error("DEBUG -> rewireSynapse!")
else # if neurons not firing at all, try again next time
synapticActivityCounter[:,:,n,i4][ind] = 0
synapseReconnectDelay[:,:,n,i4][ind] = rand(1:1000) * -1
# error("DEBUG -> rewireSynapse!")
end
end
end
end
end
end
""" Rank input matrix elements value from high to low (not including 0 in ranking)
and return 2 resulting bitmatrix. 1st matrix contain high rank, 2nd
matrix contain low rank. high and low rank are devided by percent threshold
"""
function rankMatrix(X, percent::Float64)
"""prompt
write a function in julia that satisfy the following requirements.
1. the function operate on column-major 3D matrix
2. the function input are matrix X and percent value from 0.0 to 1.0
3. the function rank the matrix's elements value from high to low ignoring 0
4. return first bitmatrix according to percent, true for 1-percent and false otherwise
5. return second bitmatrix according to percent, true for percent and false otherwise
6. the first and second bitmatrix must be in the same shape as X
"""
if percent < 0.0 || percent > 1.0
error("percent must be 0.0 <= percent <= 1.0")
end
percent = 1 - percent
if percent == 1.0
first_bitmatrix = zeros(size(X)...)
second_bitmatrix = ones(size(X)...)
threshold = 0.0
elseif percent == 0.0
first_bitmatrix = ones(size(X)...)
second_bitmatrix = zeros(size(X)...)
threshold = 1.0
else
# Create an array to store the ranked values
ranked_values = sort(vec(X), rev=true)
# Calculate the threshold value based on the given percent
threshold = ranked_values[ceil(Int, percent * length(ranked_values))]
# Create the first bitmatrix according to the threshold
first_bitmatrix = X .> threshold
# Create the second bitmatrix according to the threshold
second_bitmatrix = X .<= threshold
end
return first_bitmatrix, second_bitmatrix, threshold
end
end # module

View File

@@ -0,0 +1,430 @@
module type
export
# struct
kfn_1,
# function
random_wRec
using Random, GeneralUtils
#------------------------------------------------------------------------------------------------100
rng = MersenneTwister(1234)
abstract type Ironpen end
abstract type knowledgeFn <: Ironpen end
#------------------------------------------------------------------------------------------------100
Base.@kwdef mutable struct kfn_1 <: knowledgeFn
params::Union{Dict, Nothing} = nothing # store params of knowledgeFn itself for later use
timeStep::Union{AbstractArray, Nothing} = nothing
learningStage::Union{AbstractArray, Nothing} = nothing # 0 inference, 1 start, 2 during, 3 end learning
inputSize::Union{AbstractArray, Nothing} = nothing
zit::Union{AbstractArray, Nothing} = nothing # RSNN 3D activation matrix (row, col, batch)
zitCumulative::Union{AbstractArray, Nothing} = nothing
exInType::Union{AbstractArray, Nothing} = nothing
modelError::Union{AbstractArray, Nothing} = nothing # store RSNN error
outputError::Union{AbstractArray, Nothing} = nothing # store output neurons error
bk::Union{AbstractArray, Nothing} = nothing # Bⱼₖ
# ---------------------------------------------------------------------------- #
# LIF Neurons #
# ---------------------------------------------------------------------------- #
# a projection of kfn.zit into lif dimension for broadcasting later)
lif_zit::Union{AbstractArray, Nothing} = nothing
# main variables according to papers
lif_wRec::Union{AbstractArray, Nothing} = nothing
lif_vt::Union{AbstractArray, Nothing} = nothing
lif_vth::Union{AbstractArray, Nothing} = nothing
lif_vRest::Union{AbstractArray, Nothing} = nothing
lif_zt::Union{AbstractArray, Nothing} = nothing
lif_zt4d::Union{AbstractArray, Nothing} = nothing
lif_refractoryCounter::Union{AbstractArray, Nothing} = nothing
lif_refractoryDuration::Union{AbstractArray, Nothing} = nothing
lif_alpha::Union{AbstractArray, Nothing} = nothing
lif_delta::Union{AbstractFloat, Nothing} = nothing
lif_tau_m::Union{AbstractFloat, Nothing} = nothing
lif_phi::Union{AbstractArray, Nothing} = nothing
lif_epsilonRec::Union{AbstractArray, Nothing} = nothing
lif_eRec::Union{AbstractArray, Nothing} = nothing
lif_eta::Union{AbstractArray, Nothing} = nothing
lif_gammaPd::Union{AbstractArray, Nothing} = nothing
lif_wRecChange::Union{AbstractArray, Nothing} = nothing
lif_error::Union{AbstractArray, Nothing} = nothing
lif_firingCounter::Union{AbstractArray, Nothing} = nothing
lif_firingTargetFrequency::Union{AbstractArray, Nothing} = nothing
lif_neuronInactivityCounter::Union{AbstractArray, Nothing} = nothing
lif_synapseReconnectDelay::Union{AbstractArray, Nothing} = nothing
lif_synapseConnectionNumber::Union{Int, Nothing} = nothing
lif_synapticActivityCounter::Union{AbstractArray, Nothing} = nothing # work
# pre-allocation array
lif_arrayProjection4d::Union{AbstractArray, Nothing} = nothing # use to project 3d array to 4d
lif_recSignal::Union{AbstractArray, Nothing} = nothing
lif_exInType::Union{AbstractArray, Nothing} = nothing
# lif_decayed_epsilonRec::Union{AbstractArray, Nothing} = nothing
# lif_vt_diff_vth::Union{AbstractArray, Nothing} = nothing
# lif_vt_diff_vth_div_vth::Union{AbstractArray, Nothing} = nothing
# lif_gammaPd_div_vth::Union{AbstractArray, Nothing} = nothing
# lif_phiActivation::Union{AbstractArray, Nothing} = nothing
# ---------------------------------------------------------------------------- #
# ALIF Neurons #
# ---------------------------------------------------------------------------- #
alif_zit::Union{AbstractArray, Nothing} = nothing
alif_wRec::Union{AbstractArray, Nothing} = nothing
alif_vt::Union{AbstractArray, Nothing} = nothing
alif_vth::Union{AbstractArray, Nothing} = nothing
alif_vRest::Union{AbstractArray, Nothing} = nothing
alif_zt::Union{AbstractArray, Nothing} = nothing
alif_zt4d::Union{AbstractArray, Nothing} = nothing
alif_refractoryCounter::Union{AbstractArray, Nothing} = nothing
alif_refractoryDuration::Union{AbstractArray, Nothing} = nothing
alif_alpha::Union{AbstractArray, Nothing} = nothing
alif_delta::Union{AbstractFloat, Nothing} = nothing
alif_tau_m::Union{AbstractFloat, Nothing} = nothing
alif_phi::Union{AbstractArray, Nothing} = nothing
alif_epsilonRec::Union{AbstractArray, Nothing} = nothing
alif_eRec::Union{AbstractArray, Nothing} = nothing
alif_eta::Union{AbstractArray, Nothing} = nothing
alif_gammaPd::Union{AbstractArray, Nothing} = nothing
alif_wRecChange::Union{AbstractArray, Nothing} = nothing
alif_error::Union{AbstractArray, Nothing} = nothing
alif_firingCounter::Union{AbstractArray, Nothing} = nothing
alif_firingTargetFrequency::Union{AbstractArray, Nothing} = nothing
alif_neuronInactivityCounter::Union{AbstractArray, Nothing} = nothing
alif_synapseReconnectDelay::Union{AbstractArray, Nothing} = nothing
alif_synapseConnectionNumber::Union{Int, Nothing} = nothing
alif_synapticActivityCounter::Union{AbstractArray, Nothing} = nothing
# pre-allocation array
alif_arrayProjection4d::Union{AbstractArray, Nothing} = nothing # use to project 3d array to 4d
alif_recSignal::Union{AbstractArray, Nothing} = nothing
alif_exInType::Union{AbstractArray, Nothing} = nothing
# alif_decayed_epsilonRec::Union{AbstractArray, Nothing} = nothing
# alif_vt_diff_vth::Union{AbstractArray, Nothing} = nothing
# alif_vt_diff_vth_div_vth::Union{AbstractArray, Nothing} = nothing
# alif_gammaPd_div_vth::Union{AbstractArray, Nothing} = nothing
# alif_phiActivation::Union{AbstractArray, Nothing} = nothing
# alif specific variables
alif_epsilonRecA::Union{AbstractArray, Nothing} = nothing
alif_avth::Union{AbstractArray, Nothing} = nothing
alif_a::Union{AbstractArray, Nothing} = nothing # threshold adaptation
alif_beta::Union{AbstractArray, Nothing} = nothing # β, constant, value from paper
alif_rho::Union{AbstractArray, Nothing} = nothing # ρ, threshold adaptation decay factor
alif_tau_a::Union{AbstractFloat, Nothing} = nothing # τ_a, adaption time constant in millisecond
# alif specific pre-allocation array
# alif_phi_x_epsilonRec::Union{AbstractArray, Nothing} = nothing
# alif_phi_x_beta::Union{AbstractArray, Nothing} = nothing
# alif_rho_diff_phi_x_beta::Union{AbstractArray, Nothing} = nothing
# alif_rho_div_phi_x_beta_x_epsilonRecA::Union{AbstractArray, Nothing} = nothing
# alif_beta_x_a::Union{AbstractArray, Nothing} = nothing
# ---------------------------------------------------------------------------- #
# Output Neurons #
# ---------------------------------------------------------------------------- #
# output neuron is based on LIF
on_zit::Union{AbstractArray, Nothing} = nothing
# main variables according to papers
on_wOut::Union{AbstractArray, Nothing} = nothing # wOut is wRec, just use the name from paper
on_vt::Union{AbstractArray, Nothing} = nothing
on_vth::Union{AbstractArray, Nothing} = nothing
on_vRest::Union{AbstractArray, Nothing} = nothing
on_zt::Union{AbstractArray, Nothing} = nothing
on_zt4d::Union{AbstractArray, Nothing} = nothing
on_refractoryCounter::Union{AbstractArray, Nothing} = nothing
on_refractoryDuration::Union{AbstractArray, Nothing} = nothing
on_alpha::Union{AbstractArray, Nothing} = nothing
on_delta::Union{AbstractFloat, Nothing} = nothing
on_tau_m::Union{AbstractFloat, Nothing} = nothing
on_phi::Union{AbstractArray, Nothing} = nothing
on_epsilonRec::Union{AbstractArray, Nothing} = nothing
on_eRec::Union{AbstractArray, Nothing} = nothing
on_eta::Union{AbstractArray, Nothing} = nothing
on_gammaPd::Union{AbstractArray, Nothing} = nothing
on_wOutChange::Union{AbstractArray, Nothing} = nothing
on_error::Union{AbstractArray, Nothing} = nothing
on_synapticActivityCounter::Union{AbstractArray, Nothing} = nothing
on_firingCounter::Union{AbstractArray, Nothing} = nothing
# pre-allocation array
on_arrayProjection4d::Union{AbstractArray, Nothing} = nothing # use to project 3d array to 4d
on_recSignal::Union{AbstractArray, Nothing} = nothing
# on_decayed_epsilonRec::Union{AbstractArray, Nothing} = nothing
# on_vt_diff_vth::Union{AbstractArray, Nothing} = nothing
# on_vt_diff_vth_div_vth::Union{AbstractArray, Nothing} = nothing
# on_gammaPd_div_vth::Union{AbstractArray, Nothing} = nothing
# on_phiActivation::Union{AbstractArray, Nothing} = nothing
end
# outer constructor
function kfn_1(params::Dict; device=cpu)
kfn = kfn_1()
kfn.params = params
kfn.timeStep = [0] |> device
kfn.learningStage = [0] |> device
# ---------------------------------------------------------------------------- #
# initialize activation matrix #
# ---------------------------------------------------------------------------- #
# row*col is a 2D matrix represent all RSNN activation
row, signal_col, batch = kfn.params[:inputPort][:signal][:numbers] # z-axis represent signal batch number
kfn.inputSize = [row, signal_col] |> device
lif_col = kfn.params[:computeNeuron][:lif][:numbers][2]
alif_col = kfn.params[:computeNeuron][:alif][:numbers][2]
col = signal_col + lif_col + alif_col
# activation matrix
kfn.zit = zeros(row, col, batch) |> device
kfn.zitCumulative = zeros(row, col, 1, batch) |> device
kfn.modelError = zeros(1) |> device
kfn.bk = rand(size(kfn.zit)...) |> device
# ---------------------------------------------------------------------------- #
# LIF config #
# ---------------------------------------------------------------------------- #
# In 3D LIF matrix, z-axis represent each neuron while each 2D slice represent that neuron's
# synaptic subscription to other neurons (via activation matrix)
lif_n = kfn.params[:computeNeuron][:lif][:numbers][1] * kfn.params[:computeNeuron][:lif][:numbers][2]
# subscription
synapticConnectionPercent = kfn.params[:computeNeuron][:lif][:params][:synapticConnectionPercent]
kfn.lif_synapseConnectionNumber = Int(floor(row*col * synapticConnectionPercent/100))
w = random_wRec(row, col, lif_n, kfn.lif_synapseConnectionNumber)
# project 3D w into 4D kfn.lif_wRec (row, col, n, batch)
kfn.lif_wRec = reshape(w, (row, col, lif_n, 1)) .* ones(row, col, lif_n, batch) |> device
kfn.lif_zit = (similar(kfn.lif_wRec) .= 0)
kfn.lif_vt = (similar(kfn.lif_wRec) .= 0)
kfn.lif_vth = (similar(kfn.lif_wRec) .= 1)
kfn.lif_vRest = (similar(kfn.lif_wRec) .= 0)
kfn.lif_zt = zeros(1, 1, lif_n, batch) |> device
kfn.lif_zt4d = (similar(kfn.lif_wRec) .= 0)
kfn.lif_refractoryCounter = (similar(kfn.lif_wRec) .= 0)
kfn.lif_refractoryDuration = (similar(kfn.lif_wRec) .= 3)
kfn.lif_delta = 1.0
kfn.lif_tau_m = 100.0
kfn.lif_alpha = (similar(kfn.lif_wRec) .= (exp(-kfn.lif_delta / kfn.lif_tau_m)))
kfn.lif_phi = (similar(kfn.lif_wRec) .= 0)
kfn.lif_epsilonRec = (similar(kfn.lif_wRec) .= 0)
kfn.lif_eRec = (similar(kfn.lif_wRec) .= 0)
kfn.lif_eta = (similar(kfn.lif_wRec) .= 0.01)
kfn.lif_gammaPd = (similar(kfn.lif_wRec) .= 0.3)
kfn.lif_wRecChange = (similar(kfn.lif_wRec) .= 0)
kfn.lif_error = (similar(kfn.lif_wRec) .= 0)
kfn.lif_firingCounter = (similar(kfn.lif_wRec) .= 0)
kfn.lif_firingTargetFrequency = (similar(kfn.lif_wRec) .= 0.1)
kfn.lif_neuronInactivityCounter = (similar(kfn.lif_wRec) .= 0)
# count subscribed synapse activity, just like epsilonRec but without decay.
# use to adjust weight based on how often neural pathway is used
kfn.lif_synapseReconnectDelay = (similar(kfn.lif_wRec) .= -0.1) # -0.1 for non-sub conn
kfn.lif_synapticActivityCounter = (similar(kfn.lif_wRec) .= -0.1) # -0.1 for non-sub conn
kfn.lif_arrayProjection4d = (similar(kfn.lif_wRec) .= 1)
kfn.lif_recSignal = (similar(kfn.lif_wRec) .= 0)
kfn.lif_exInType = (similar(kfn.lif_wRec) .= 0)
# kfn.lif_decayed_epsilonRec = (similar(kfn.lif_wRec) .= 0)
# kfn.lif_vt_diff_vth = (similar(kfn.lif_wRec) .= 0)
# kfn.lif_vt_diff_vth_div_vth = (similar(kfn.lif_wRec) .= 0)
# kfn.lif_gammaPd_div_vth = (similar(kfn.lif_wRec) .= 0)
# kfn.lif_phiActivation = (similar(kfn.lif_wRec) .= 0)
# ---------------------------------------------------------------------------- #
# ALIF config #
# ---------------------------------------------------------------------------- #
alif_n = kfn.params[:computeNeuron][:alif][:numbers][1] * kfn.params[:computeNeuron][:alif][:numbers][2]
# subscription
synapticConnectionPercent = kfn.params[:computeNeuron][:alif][:params][:synapticConnectionPercent]
kfn.alif_synapseConnectionNumber = Int(floor(row*col * synapticConnectionPercent/100))
w = random_wRec(row, col, alif_n, kfn.alif_synapseConnectionNumber)
# project 3D w into 4D kfn.alif_wRec
kfn.alif_wRec = reshape(w, (row, col, alif_n, 1)) .* ones(row, col, alif_n, batch) |> device
kfn.alif_zit = (similar(kfn.alif_wRec) .= 0)
kfn.alif_vt = (similar(kfn.alif_wRec) .= 0)
kfn.alif_vth = (similar(kfn.alif_wRec) .= 1)
kfn.alif_vRest = (similar(kfn.alif_wRec) .= 0)
kfn.alif_zt = zeros(1, 1, alif_n, batch) |> device
kfn.alif_zt4d = (similar(kfn.alif_wRec) .= 0)
kfn.alif_refractoryCounter = (similar(kfn.alif_wRec) .= 0)
kfn.alif_refractoryDuration = (similar(kfn.alif_wRec) .= 3)
kfn.alif_delta = 1.0
kfn.alif_tau_m = 100.0
kfn.alif_alpha = (similar(kfn.alif_wRec) .= (exp(-kfn.alif_delta / kfn.alif_tau_m)))
kfn.alif_phi = (similar(kfn.alif_wRec) .= 0)
kfn.alif_epsilonRec = (similar(kfn.alif_wRec) .= 0)
kfn.alif_eRec = (similar(kfn.alif_wRec) .= 0)
kfn.alif_eta = (similar(kfn.alif_wRec) .= 0.01)
kfn.alif_gammaPd = (similar(kfn.alif_wRec) .= 0.3)
kfn.alif_wRecChange = (similar(kfn.alif_wRec) .= 0)
kfn.alif_error = (similar(kfn.alif_wRec) .= 0)
kfn.alif_firingCounter = (similar(kfn.alif_wRec) .= 0)
kfn.alif_firingTargetFrequency = (similar(kfn.alif_wRec) .= 0.1)
kfn.alif_neuronInactivityCounter = (similar(kfn.alif_wRec) .= 0)
kfn.alif_synapseReconnectDelay = (similar(kfn.alif_wRec) .= -0.1) # -0.1 for non-sub conn
kfn.alif_synapticActivityCounter = (similar(kfn.alif_wRec) .= 0)
kfn.alif_arrayProjection4d = (similar(kfn.alif_wRec) .= 1)
kfn.alif_recSignal = (similar(kfn.alif_wRec) .= 0)
kfn.alif_exInType = (similar(kfn.alif_wRec) .= 0)
# kfn.alif_decayed_epsilonRec = (similar(kfn.alif_wRec) .= 0)
# kfn.alif_vt_diff_vth = (similar(kfn.alif_wRec) .= 0)
# kfn.alif_vt_diff_vth_div_vth = (similar(kfn.alif_wRec) .= 0)
# kfn.alif_gammaPd_div_vth = (similar(kfn.alif_wRec) .= 0)
# kfn.alif_phiActivation = (similar(kfn.alif_wRec) .= 0)
# alif specific variables
kfn.alif_epsilonRecA = (similar(kfn.alif_wRec) .= 0)
kfn.alif_avth = (similar(kfn.alif_wRec) .= 0)
kfn.alif_a = (similar(kfn.alif_wRec) .= 0)
kfn.alif_beta = (similar(kfn.alif_wRec) .= 0.07)
kfn.alif_tau_a = 800.0
kfn.alif_rho = (similar(kfn.alif_wRec) .= (exp(-kfn.alif_delta / kfn.alif_tau_a))) |> device
# kfn.alif_phi_x_epsilonRec = (similar(kfn.alif_wRec) .= 0)
# kfn.alif_phi_x_beta = (similar(kfn.alif_wRec) .= 0)
# kfn.alif_rho_diff_phi_x_beta = (similar(kfn.alif_wRec) .= 0)
# kfn.alif_rho_div_phi_x_beta_x_epsilonRecA = (similar(kfn.alif_wRec) .= 0)
# kfn.alif_beta_x_a = (similar(kfn.alif_wRec) .= 0)
# ---------------------------------------------------------------------------- #
# output config #
# ---------------------------------------------------------------------------- #
n = kfn.params[:outputPort][:numbers][1] * kfn.params[:outputPort][:numbers][2]
# subscription
w = zeros(row, col, n)
synapticConnectionPercent = kfn.params[:outputPort][:params][:synapticConnectionPercent]
subable = size(kfn.lif_wRec, 3) + size(kfn.alif_wRec, 3) # sub to lif, alif only
synapticConnection = Int(floor(subable * synapticConnectionPercent/100))
for slice in eachslice(w, dims=3) # each slice is a neuron
startInd = row*col - subable + 1 # e.g. 100(row*col) - 50(subable) = 50 -> startInd = 51
# pool must contain only lif, alif neurons
pool = shuffle!([startInd:row*col...])[1:synapticConnection]
for i in pool
slice[i] = rand() # assign weight to synaptic connection. /10 to start small,
# otherwise RSNN's vt Usually stay negative (-)
end
end
# 10% of neuron connection should be enough to start to make neuron fires
should_be_avg_weight = 1 / (0.1 * n)
w = w .* (should_be_avg_weight / maximum(w)) # adjust overall weight
# project 3D w into 4D kfn.lif_wOut (row, col, n, batch)
kfn.on_wOut = reshape(w, (row, col, n, 1)) .* ones(row, col, n, batch) |> device
kfn.on_zit = (similar(kfn.on_wOut) .= 0)
kfn.on_vt = (similar(kfn.on_wOut) .= 0)
kfn.on_vth = (similar(kfn.on_wOut) .= 1)
kfn.on_vRest = (similar(kfn.on_wOut) .= 0)
kfn.on_zt = zeros(1, 1, n, batch) |> device
kfn.on_zt4d = (similar(kfn.on_wOut) .= 0)
kfn.on_refractoryCounter = (similar(kfn.on_wOut) .= 0)
kfn.on_refractoryDuration = (similar(kfn.on_wOut) .= 0)
kfn.on_delta = 1.0
kfn.on_tau_m = 100.0
kfn.on_alpha = (similar(kfn.on_wOut) .= (exp(-kfn.on_delta / kfn.on_tau_m)))
kfn.on_phi = (similar(kfn.on_wOut) .= 0)
kfn.on_epsilonRec = (similar(kfn.on_wOut) .= 0)
kfn.on_eRec = (similar(kfn.on_wOut) .= 0)
kfn.on_eta = (similar(kfn.on_wOut) .= 0.01)
kfn.on_gammaPd = (similar(kfn.on_wOut) .= 0.3)
kfn.on_wOutChange = (similar(kfn.on_wOut) .= 0)
kfn.on_error = (similar(kfn.on_wOut) .= 0)
kfn.on_synapticActivityCounter = (similar(kfn.on_wOut) .= 0)
kfn.on_firingCounter = (similar(kfn.on_wOut) .= 0)
kfn.on_arrayProjection4d = (similar(kfn.on_wOut) .= 1)
kfn.on_recSignal = (similar(kfn.on_wOut) .= 0)
kfn.outputError = zeros(n, batch) |> device
totalComputeNeurons = lif_n + alif_n
inhabitoryNeurons = Int(floor(totalComputeNeurons * 30/100))
mask1 = ones(row, signal_col)
mask2 = GeneralUtils.multiply_random_elements(ones(row, lif_col + alif_col),
-1, inhabitoryNeurons, MersenneTwister(1234))
kfn.exInType = cat(mask1, mask2, dims=2) |> device
return kfn
end
function random_wRec(row, col, n, synapseConnectionNumber)
# subscription
w = zeros(row, col, n)
for slice in eachslice(w, dims=3)
pool = shuffle!([1:row*col...])[1:synapseConnectionNumber]
for i in pool
slice[i] = rand(0.01:0.01:0.05) # assign weight to synaptic connection. /10 to start small,
# otherwise RSNN's vt Usually stay negative (-)
end
end
# # adjust weight so that RSNN fires small amount of neurons at the beginning to avoid overwhelming
# # all-fire situation. it also better than not-fire-at-all situation.
# avgWeight = sum(w)/length(w)
# w = w .* (0.01 / avgWeight) # adjust overall weight
return w #(row, col, n)
end
end # module

View File

@@ -0,0 +1 @@
.CondaPkg

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,25 @@
[deps]
BSON = "fbb218c0-5317-5bc6-957e-2ee96dd4b1f0"
BenchmarkTools = "6e4b80f9-dd63-53aa-95a3-0cdb28fa8baf"
CUDA = "052768ef-5323-5732-b1bb-66c8b64840ba"
CondaPkg = "992eb4ea-22a4-4c89-a5bb-47a3300528ab"
Cthulhu = "f68482b8-f384-11e8-15f7-abe071a5a75f"
Flux = "587475ba-b771-5e3f-ad9e-33799f191a9c"
GLMakie = "e9467ef8-e4e7-5192-8a1a-b1aee30e663a"
GPUArrays = "0c68f7d7-f131-5f86-a1c3-88cf8149b2d7"
GeneralUtils = "c6c72f09-b708-4ac8-ac7c-2084d70108fe"
IronpenGPU = "3d5396ea-818e-43fc-a9d3-164248e840cd"
JSON3 = "0f8b85d8-7281-11e9-16c2-39a750bddbf1"
MLDatasets = "eb30cadb-4394-5ae3-aed4-317e484a6458"
MLUtils = "f1d291b0-491e-4a28-83b9-f70985020b54"
MethodAnalysis = "85b6ec6f-f7df-4429-9514-a64bcd9ee824"
OneHotArrays = "0b1bfda6-eb8a-41d2-88d8-f5af5cad476f"
ProgressMeter = "92933f4c-e287-5a05-a399-4b506db050ca"
PythonCall = "6099a3de-0909-46bc-b1f4-468b9a2dfc0d"
REPL = "3fa0cd96-eef1-5676-8a61-b3b8758bbffb"
Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c"
Revise = "295af30f-e4ad-537b-8983-00126c2a3abe"
Serialization = "9e88b42a-f829-5b0c-bbe9-9e923198166b"
SliceMap = "82cb661a-3f19-5665-9e27-df437c7e54c8"
StaticArrays = "90137ffa-7385-5640-81b9-e52037218182"
cuDNN = "02a925ec-e4fe-4b08-9a7e-0d78e3d38ccd"

View File

@@ -0,0 +1,932 @@
# ---------------------------------------------------------------------------- #
# if one need to reinstall all python packages #
# ---------------------------------------------------------------------------- #
# 1. delete .CondaPkg folder in working folder
# 2. delete CondaPkg.toml file in working folder
# using Pkg; Pkg.activate(".");
# pythonPkg = ["CondaPkg", "PythonCall"]
# for i in pythonPkg try Pkg.rm(i) catch end end
# for i in pythonPkg Pkg.add(i) end
# using CondaPkg, PythonCall
# channels = ["anaconda", "conda-forge", "pytorch"]
# for i in channels CondaPkg.add_channel(i) end
# condapackage = ["numpy", "pytorch", "snntorch"]
# for i in condapackage CondaPkg.add(i) end
using Pkg; Pkg.activate("."); Pkg.resolve(), Pkg.instantiate()
# ---------------------------------------------------------------------------- #
# for debugging purpose #
# ---------------------------------------------------------------------------- #
# https://discourse.julialang.org/t/debugging-extremely-slow/53801/3
# using MethodAnalysis
# visit(Base) do item
# isa(item, Module) && push!(JuliaInterpreter.compiled_modules, item)
# true
# end
using Revise
using BenchmarkTools, Cthulhu, REPL.TerminalMenus
using Flux, CUDA
using BSON, JSON3
using MLDatasets: MNIST
using MLUtils, ProgressMeter, Dates, Random,
Serialization, OneHotArrays , GLMakie
using CondaPkg, PythonCall
np = pyimport("numpy")
torch = pyimport("torch")
spikegen = pyimport("snntorch.spikegen") # https://github.com/jeshraghian/snntorch
using IronpenGPU
using GeneralUtils
sep = Sys.iswindows() ? "\\" : "/"
rootDir = pwd()
# select compute device
# device = Flux.CUDA.functional() ? gpu : cpu # Flux provide "cpu" and "gpu" keywork
device = gpu
if device == gpu CUDA.device!(0) end #CHANGE
CUDA.allowscalar(false) # turn off scalar indexing in CPU to make it easier when moving to GPU
#------------------------------------------------------------------------------------------------100
"""
Todo:
- []
Change from version:
-
All features
-
"""
# ----------------------------- REPL menu options ---------------------------- #
options = ["yes", "no"]
menu = RadioMenu(options)
# communication config --------------------------------------------------------------------------100
database_ip = "localhost"
# database_ip = "192.168.0.8"
#------------------------------------------------------------------------------------------------100
modelname = "runOn_gpu_0" #CHANGE
imageBatch = 1
function generate_snn(filename::String, location::String)
signalInput_portnumbers = (10, 10, imageBatch) # 2nd dim needs to match
# input signal + copied input signal + noise.
# 3rd dim is input batch size
noise_portnumbers = (signalInput_portnumbers[1], 1)
output_portnumbers = (10, 1)
# 5000 neurons are maximum for 64GB memory i.e. 300 LIF : 200 ALIF
lif_neuron_number = (signalInput_portnumbers[1], 30) # CHANGE
alif_neuron_number = (signalInput_portnumbers[1], 20) # CHANGE from Allen Institute, ALIF is 20-40% of LIF
# totalNeurons = computeNeuronNumber + noise_portnumbers + signalInput_portnumbers
# totalInputPort = noise_portnumbers + signalInput_portnumbers
# kfn and neuron config
passthrough_neuron_params = Dict(
:type => "passthroughNeuron"
)
lif_neuron_params = Dict{Symbol, Any}(
:type => "lifNeuron",
:v_t_default => 0.0,
:v_th => 1.0, # neuron firing threshold (this value is treated as maximum bound if I use auto generate)
:tau_m => 50.0, # membrane time constant in millisecond.
:eta => 1e-6,
# Good starting value is 1/10th of tau_a
# This is problem specific parameter. It controls how leaky the neuron is.
# Too high(less leaky) makes learning algo harder to move model into direction that reduce error
# resulting in model's error to explode exponantially likely because learning algo will try to
# exert more force (larger w_out_change) to move neuron into direction that reduce error
# For example, model error from 7 to 2e6.
:synapticConnectionPercent => 10, # % coverage of total neurons in kfn
)
alif_neuron_params = Dict{Symbol, Any}(
:type => "alifNeuron",
:v_t_default => 0.0,
:v_th => 1.0, # neuron firing threshold (this value is treated as maximum bound if I use auto generate)
:tau_m => 50.0, # membrane time constant in millisecond.
:eta => 1e-6,
# Good starting value is 1/10th of tau_a
# This is problem specific parameter. It controls how leaky the neuron is.
# Too high(less leaky) makes learning algo harder to move model into direction that reduce error
# resulting in model's error to explode exponantially likely because learning algo will try to
# exert more force (larger w_out_change) to move neuron into direction that reduce error
# For example, model error from 7 to 2e6.
:tau_a => 800.0, # adaptation time constant in millisecond. it defines neuron memory length.
# This is problem specific parameter
# Good starting value is 0.5 to 2 times of info STORE-RECALL length i.e. total time SNN takes to
# perform a task, for example, equals to episode length.
# From "Spike frequency adaptation supports network computations on temporally dispersed
# information"
:synapticConnectionPercent => 10, # % coverage of total neurons in kfn
)
linear_neuron_params = Dict{Symbol, Any}(
:type => "linearNeuron",
:v_th => 1.0, # neuron firing threshold (this value is treated as maximum bound if I use auto generate)
:tau_out => 20.0, # output time constant in millisecond.
:synapticConnectionPercent => 10, # % coverage of total neurons in kfn
# Good starting value is 1/50th of tau_a
# This is problem specific parameter.
# It controls how leaky the neuron is.
# Too high(less leaky) makes learning algo harder to move model into direction that reduce error
# resulting in model's error to explode exponantially. For example, model error from 7 to 2e6
# One can image training output neuron is like Tetris Game.
)
# integrate_neuron_params = Dict{Symbol, Any}(
# :type => "integrateNeuron",
# :synapticConnectionPercent => 10, # % coverage of total neurons in kfn
# :eta => 1e-6,
# :tau_out => 100.0,
# # Good starting value is 1/50th of tau_a
# # This is problem specific parameter.
# # It controls how leaky the neuron is.
# # Too high(less leaky) makes learning algo harder to move model into direction that reduce error
# # resulting in model's error to explode exponantially. For example, model error from 7 to 2e6
# # One can image training output neuron is like Tetris Game.
# )
I_kfnparams = Dict{Symbol, Any}(
:knowledgeFnName=> "I",
:neuronFiringRateTarget=> 20.0, # Hz
# group relavent info
:inputPort=> Dict(
:noise=> Dict(
:numbers=> noise_portnumbers,
:params=> passthrough_neuron_params,
),
:signal=> Dict(
:numbers=> signalInput_portnumbers, # in case of GloVe word encoding, it is 300
:params=> passthrough_neuron_params,
),
),
:outputPort=> Dict(
:numbers=> output_portnumbers, # output neuron, this is also the output length
:params=> linear_neuron_params,
),
:computeNeuron=> Dict(
:lif=> Dict(
:numbers=> lif_neuron_number, # number in (row, col) tuple format
:params=> lif_neuron_params,
),
:alif=> Dict(
:numbers=> alif_neuron_number, # number in (row, col) tuple format
:params=> alif_neuron_params,
),
),
)
#------------------------------------------------------------------------------------------------100
model = IronpenGPU.kfn_1(I_kfnparams, device=device);
# serialize(location * sep * filename, model)
println("SNN generated")
return model
end
function data_loader()
# test problem
trainDataset = MNIST(:train)[1:3] # total 60000
# validateDataset = MNIST(:test)
validateDataset = MNIST(:train)[1:3]
labelDict = [0:9...]
trainData = MLUtils.DataLoader(
trainDataset; # fullTrainDataset or trainDataset
batchsize=imageBatch,
collate=true,
shuffle=true,
buffer=true,
partial=false, # better for gpu memory if batchsize is fixed
# parallel=true, #BUG ?? causing dataloader into forever loop
)
validateData = MLUtils.DataLoader(
validateDataset;
batchsize=imageBatch,
collate=true,
shuffle=true,
buffer=true,
partial=false, # better for gpu memory if batchsize is fixed
# parallel=true, #BUG ?? causing dataloader into forever loop
)
# dummy data used to debug
# trainData = [(rand(10, 10), [5]), (rand(10, 10), [2])]
# trainData = [(rand(10, 10), [5]),]
return trainData, validateData, labelDict
end
function train_snn(model, trainData, validateData, labelDict::Vector)
# random seed
# rng = MersenneTwister(1234)
logitLog = zeros(10, 2)
firedNeurons_t1 = zeros(1)
var1 = zeros(3, 1)
var2 = zeros(3, 1)
var3 = zeros(10, 2)
var4 = zeros(10, 2)
# ----------------------------------- plot ----------------------------------- #
plot10 = Observable(firedNeurons_t1)
plot20 = Observable(logitLog[1 , :])
plot21 = Observable(logitLog[2 , :])
plot22 = Observable(logitLog[3 , :])
plot23 = Observable(logitLog[4 , :])
plot24 = Observable(logitLog[5 , :])
plot25 = Observable(logitLog[6 , :])
plot26 = Observable(logitLog[7 , :])
plot27 = Observable(logitLog[8 , :])
plot28 = Observable(logitLog[9 , :])
plot29 = Observable(logitLog[10, :])
plot30 = Observable(var1[1 , :])
plot31 = Observable(var1[2 , :])
plot32 = Observable(var1[3 , :])
# plot33 = Observable(var1[4 , :])
# plot34 = Observable(var1[5 , :])
# plot35 = Observable(var1[6 , :])
# plot36 = Observable(var1[7 , :])
# plot37 = Observable(var1[8 , :])
# plot38 = Observable(var1[9 , :])
# plot39 = Observable(var1[10, :])
plot40 = Observable(var2[1 , :])
plot41 = Observable(var2[2 , :])
plot42 = Observable(var2[3 , :])
# plot43 = Observable(var2[4 , :])
# plot44 = Observable(var2[5 , :])
# plot45 = Observable(var2[6 , :])
# plot46 = Observable(var2[7 , :])
# plot47 = Observable(var2[8 , :])
# plot48 = Observable(var2[9 , :])
# plot49 = Observable(var2[10, :])
# plot50 = Observable(var3[1 , :])
# plot51 = Observable(var3[2 , :])
# plot52 = Observable(var3[3 , :])
# plot53 = Observable(var3[4 , :])
# plot54 = Observable(var3[5 , :])
# plot55 = Observable(var3[6 , :])
# plot56 = Observable(var3[7 , :])
# plot57 = Observable(var3[8 , :])
# plot58 = Observable(var3[9 , :])
# plot59 = Observable(var3[10, :])
# plot60 = Observable(var4[1 , :])
# plot61 = Observable(var4[2 , :])
# plot62 = Observable(var4[3 , :])
# plot63 = Observable(var4[4 , :])
# plot64 = Observable(var4[5 , :])
# plot65 = Observable(var4[6 , :])
# plot66 = Observable(var4[7 , :])
# plot67 = Observable(var4[8 , :])
# plot68 = Observable(var4[9 , :])
# plot69 = Observable(var4[10, :])
# main figure
fig1 = Figure()
subfig1 = GLMakie.Axis(fig1[1, 1], # define position of this subfigure inside a figure
title = "RSNN firedNeurons_t1",
xlabel = "time",
ylabel = "data"
)
lines!(subfig1, plot10, label = "firedNeurons_t1")
# axislegend(subfig1, position = :lb)
subfig2 = GLMakie.Axis(fig1[2, 1], # define position of this subfigure inside a figure
title = "output neurons logit",
xlabel = "time",
ylabel = "data"
)
lines!(subfig2, plot20, label = "0", color = 1, colormap = :tab10, colorrange = (1, 10) )
lines!(subfig2, plot21, label = "1", color = 2, colormap = :tab10, colorrange = (1, 10) )
lines!(subfig2, plot22, label = "2", color = 3, colormap = :tab10, colorrange = (1, 10) )
lines!(subfig2, plot23, label = "3", color = 4, colormap = :tab10, colorrange = (1, 10) )
lines!(subfig2, plot24, label = "4", color = 5, colormap = :tab10, colorrange = (1, 10) )
lines!(subfig2, plot25, label = "5", color = 6, colormap = :tab10, colorrange = (1, 10) )
lines!(subfig2, plot26, label = "6", color = 7, colormap = :tab10, colorrange = (1, 10) )
lines!(subfig2, plot27, label = "7", color = 8, colormap = :tab10, colorrange = (1, 10) )
lines!(subfig2, plot28, label = "8", color = 9, colormap = :tab10, colorrange = (1, 10) )
lines!(subfig2, plot29, label = "9", color = 10, colormap = :tab10, colorrange = (1, 10))
# axislegend(subfig2, position = :lb)
subfig3 = GLMakie.Axis(fig1[3, 1], # define position of this subfigure inside a figure
title = "1st lif epsilonRec",
xlabel = "time",
ylabel = "data"
)
lines!(subfig3, plot30, label = "0", color = 1, colormap = :tab10, colorrange = (1, 10) )
lines!(subfig3, plot31, label = "1", color = 2, colormap = :tab10, colorrange = (1, 10) )
lines!(subfig3, plot32, label = "2", color = 3, colormap = :tab10, colorrange = (1, 10) )
# lines!(subfig3, plot33, label = "3", color = 4, colormap = :tab10, colorrange = (1, 10) )
# lines!(subfig3, plot34, label = "4", color = 5, colormap = :tab10, colorrange = (1, 10) )
# lines!(subfig3, plot35, label = "5", color = 6, colormap = :tab10, colorrange = (1, 10) )
# lines!(subfig3, plot36, label = "6", color = 7, colormap = :tab10, colorrange = (1, 10) )
# lines!(subfig3, plot37, label = "7", color = 8, colormap = :tab10, colorrange = (1, 10) )
# lines!(subfig3, plot38, label = "8", color = 9, colormap = :tab10, colorrange = (1, 10) )
# lines!(subfig3, plot39, label = "9", color = 10, colormap = :tab10, colorrange = (1, 10))
# axislegend(subfig3, position = :lb)
subfig4 = GLMakie.Axis(fig1[4, 1], # define position of this subfigure inside a figure
title = "RSNN v_t",
xlabel = "time",
ylabel = "data"
)
lines!(subfig4, plot40, label = "0", color = 1, colormap = :tab10, colorrange = (1, 10) )
lines!(subfig4, plot41, label = "1", color = 2, colormap = :tab10, colorrange = (1, 10) )
lines!(subfig4, plot42, label = "2", color = 3, colormap = :tab10, colorrange = (1, 10) )
# lines!(subfig4, plot43, label = "3", color = 4, colormap = :tab10, colorrange = (1, 10) )
# lines!(subfig4, plot44, label = "4", color = 5, colormap = :tab10, colorrange = (1, 10) )
# lines!(subfig4, plot45, label = "5", color = 6, colormap = :tab10, colorrange = (1, 10) )
# lines!(subfig4, plot46, label = "6", color = 7, colormap = :tab10, colorrange = (1, 10) )
# lines!(subfig4, plot47, label = "7", color = 8, colormap = :tab10, colorrange = (1, 10) )
# lines!(subfig4, plot48, label = "8", color = 9, colormap = :tab10, colorrange = (1, 10) )
# lines!(subfig4, plot49, label = "9", color = 10, colormap = :tab10, colorrange = (1, 10))
# axislegend(subfig4, position = :lb)
# subfig5 = GLMakie.Axis(fig1[5, 1], # define position of this subfigure inside a figure
# title = "output neuron epsilonRec",
# xlabel = "time",
# ylabel = "data"
# )
# lines!(subfig5, plot50, label = "0", color = 1, colormap = :tab10, colorrange = (1, 10) )
# lines!(subfig5, plot51, label = "1", color = 2, colormap = :tab10, colorrange = (1, 10) )
# lines!(subfig5, plot52, label = "2", color = 3, colormap = :tab10, colorrange = (1, 10) )
# lines!(subfig5, plot53, label = "3", color = 4, colormap = :tab10, colorrange = (1, 10) )
# lines!(subfig5, plot54, label = "4", color = 5, colormap = :tab10, colorrange = (1, 10) )
# lines!(subfig5, plot55, label = "5", color = 6, colormap = :tab10, colorrange = (1, 10) )
# lines!(subfig5, plot56, label = "6", color = 7, colormap = :tab10, colorrange = (1, 10) )
# lines!(subfig5, plot57, label = "7", color = 8, colormap = :tab10, colorrange = (1, 10) )
# lines!(subfig5, plot58, label = "8", color = 9, colormap = :tab10, colorrange = (1, 10) )
# lines!(subfig5, plot59, label = "9", color = 10, colormap = :tab10, colorrange = (1, 10))
# # axislegend(subfig5, position = :lb)
# subfig6 = GLMakie.Axis(fig1[6, 1], # define position of this subfigure inside a figure
# title = "output neuron wRecChange",
# xlabel = "time",
# ylabel = "data"
# )
# lines!(subfig6, plot60, label = "0", color = 1, colormap = :tab10, colorrange = (1, 10) )
# lines!(subfig6, plot61, label = "1", color = 2, colormap = :tab10, colorrange = (1, 10) )
# lines!(subfig6, plot62, label = "2", color = 3, colormap = :tab10, colorrange = (1, 10) )
# lines!(subfig6, plot63, label = "3", color = 4, colormap = :tab10, colorrange = (1, 10) )
# lines!(subfig6, plot64, label = "4", color = 5, colormap = :tab10, colorrange = (1, 10) )
# lines!(subfig6, plot65, label = "5", color = 6, colormap = :tab10, colorrange = (1, 10) )
# lines!(subfig6, plot66, label = "6", color = 7, colormap = :tab10, colorrange = (1, 10) )
# lines!(subfig6, plot67, label = "7", color = 8, colormap = :tab10, colorrange = (1, 10) )
# lines!(subfig6, plot68, label = "8", color = 9, colormap = :tab10, colorrange = (1, 10) )
# lines!(subfig6, plot69, label = "9", color = 10, colormap = :tab10, colorrange = (1, 10))
# axislegend(subfig6, position = :lb)
# wait(display(fig1))
display(fig1)
# --------------------------------- end plot --------------------------------- #
# model learning
thinkingPeriod = 16 # 1000-784 = 216
bestAccuracy = 0.0
finalAnswer = [0] |> device # store model prediction in (logit of choices, batch)
stop = 0
vt0 = 0.0 # store vt to compute learning progress
for epoch = 1:1000
stop == 1 ? break : false
println("epoch $epoch")
n = length(trainData)
println("n $n")
p = Progress(n, dt=1.0) # minimum update interval: 1 second
for (imgBatch, labels) in trainData # imgBatch(28, 28, 4) i.e. (row, col, batch), labels(label, batch)
stop == 1 ? break : false
consecutiveCorrect = 0
rep = 0
# for rep in 1:20
while consecutiveCorrect < 10
rep += 1
stop == 1 ? break : false
# prepare image into input signal (10, 2, 784, 4) i.e. (row, col, timestep, batch)
signal = dualTrackSpikeGen(imgBatch, [0.05, 0.1, 0.2, 0.3, 0.5], noise=(true, 1, 0.1), copies=8)
if length(size(signal)) == 3
row, col, sequence = size(signal)
batch = 1
else
row, col, sequence, batch = size(signal)
end
# encode labels
correctAnswer_array = onehotbatch(labels, labelDict) # (correctAnswer, batch)
correctAnswer_number = labels[1]
label_gpu = labels[1] |> device
# insert data into model sequencially
for timestep in 1:(sequence + thinkingPeriod) # sMNIST has 784 timestep(pixel) + thinking period = 1000 timestep
if timestep <= sequence
current_pixel = view(signal, :, :, timestep, :) |> device
else
current_pixel = zeros(row, col, batch) |> device # dummy input in "thinking" period
end
if timestep == 1 # tell a model to start learning. 1-time only
model.learningStage = [1]
finalAnswer = [0] |> device
vt0 = 0.0
elseif timestep == (sequence+thinkingPeriod)
model.learningStage = [3]
else
end
# predict
logit, _ = model(current_pixel)
# log answer of all timestep
logitLog = [logitLog;; cpu(logit)]
var1 = [var1;; reshape(sum(cpu(model.lif_epsilonRec)[:,:,1:3,1], dims=(1,2)), (:, 1))]
var2 = [var2;; reshape(cpu(model.lif_vt)[1,1,1:3,1], (:, 1))]
# var3 = [var3;; 0]
# var4 = [var4;; 0]
if timestep < sequence # online learning, 1-by-1 timestep
# no error calculation
elseif timestep == sequence # online learning, 1-by-1 timestep
# no error calculation
# answer time windows, collect logit to get finalAnswer
elseif timestep > sequence && timestep < sequence+thinkingPeriod
logit_cpu = logit |> cpu
# logit_cpu = logit_cpu[:,1]
finalAnswer = length(finalAnswer) == 1 ? logit : finalAnswer .+ logit # (logit, batch)
finalAnswer_cpu = finalAnswer |> cpu
on_vt_cpu = model.on_vt |> cpu
on_vt_cpu = on_vt_cpu[1,1,:,1]
modelError, outputError, vt0, progress =
loss(vt0, on_vt_cpu, logit_cpu, finalAnswer_cpu, correctAnswer_array, correctAnswer_number)
modelError_gpu = [modelError] |> device
outputError_gpu = outputError |> device
IronpenGPU.compute_paramsChange!(model, modelError_gpu, outputError_gpu, label_gpu)
# lif_wRecChange_cpu = model.lif_wRecChange |> cpu
# if sum(lif_wRecChange_cpu) != 0
# println("")
# lif_vt_cpu = model.lif_vt |> cpu
# lif_zt_cpu = model.lif_zt |> cpu
# lif_recSignal = model.lif_recSignal |> cpu
# on_vt_cpu = model.on_vt |> cpu
# on_vt_cpu = on_vt_cpu[1,1,:,1]
# on_zt_cpu = on_zt_cpu[1,1,:,1]
# on_wOutChange_cpu = model.on_wOutChange |> cpu
# on_wOutChange_cpu = sum(on_wOutChange_cpu, dims=(1,2))
# println("lif vt $(lif_vt_cpu[1,1,5,1]) lif zt $(lif_zt_cpu[1,1,5,1]) on_vt $on_vt_cpu on_zt $on_zt_cpu on_wOutChange_cpu $on_wOutChange_cpu")
# println("lif_recSignal ", lif_recSignal)
# println("")
# println("lif_epsilonRec_cpu ", lif_epsilonRec_cpu)
# println("")
# println("lif_wRecChange ", lif_wRecChange_cpu)
# println("")
# zit_cumulative = model.zit_cumulative |> cpu
# println("zit_cumulative ", zit_cumulative)
# # error("DEBUG -> main $(Dates.now())")
# end
elseif timestep == sequence+thinkingPeriod #TODO update code
logit_cpu = logit |> cpu
# logit_cpu = logit_cpu[:,1]
finalAnswer = length(finalAnswer) == 1 ? logit : finalAnswer .+ logit # (logit, batch)
finalAnswer_cpu = finalAnswer |> cpu
on_vt_cpu = model.on_vt |> cpu
on_vt_cpu = on_vt_cpu[1,1,:,1]
modelError, outputError, vt0, progress =
loss(vt0, on_vt_cpu, logit_cpu, finalAnswer_cpu, correctAnswer_array, correctAnswer_number)
modelError_gpu = [modelError] |> device
outputError_gpu = outputError |> device
lif_epsilonRec_cpu = model.lif_epsilonRec |> cpu
on_zt_cpu = model.on_zt |> cpu
IronpenGPU.compute_paramsChange!(model, modelError_gpu, outputError_gpu, label_gpu)
lif_wRecChange_cpu = model.lif_wRecChange |> cpu
println("")
lif_recSignal_cpu = model.lif_recSignal |> cpu
lif_recSignal_cpu = sum(lif_recSignal_cpu[:,:,5,1])
lif_vt_cpu = model.lif_vt |> cpu
lif_vt_cpu = lif_vt_cpu[1,1,5,1]
lif_zt_cpu = model.lif_zt |> cpu
lif_zt_cpu = lif_zt_cpu[1,1,5,1]
lif_epsilonRec_cpu = model.lif_epsilonRec |> cpu
lif_epsilonRec_cpu = sum(lif_epsilonRec_cpu[:,:,5,1])
lif_wRecChange_cpu = sum(lif_wRecChange_cpu[:,:,5,1])
on_vt_cpu = model.on_vt |> cpu
on_vt_cpu = on_vt_cpu[1,1,:,1]
on_zt_cpu = on_zt_cpu[1,1,:,1]
on_wOutChange_cpu = model.on_wOutChange |> cpu
on_wOutChange_cpu = sum(on_wOutChange_cpu, dims=(1,2))
println("lif recSignal $lif_recSignal_cpu lif vt $lif_vt_cpu lif zt $lif_zt_cpu lif_epsilonRec_cpu $lif_epsilonRec_cpu lif_wRecChange_cpu $lif_wRecChange_cpu on_vt $on_vt_cpu on_zt $on_zt_cpu on_wOutChange_cpu $on_wOutChange_cpu")
# println("lif_recSignal ", lif_recSignal)
# println("")
# println("lif_epsilonRec_cpu ", lif_epsilonRec_cpu)
# println("")
# println("lif_wRecChange ", lif_wRecChange_cpu)
# println("")
# zit_cumulative = model.zit_cumulative |> cpu
# println("zit_cumulative ", zit_cumulative)
# error("DEBUG -> main $(Dates.now())")
# commit learned weight only if the model answer incorrectly
finalAnswer_cpu = finalAnswer |> cpu
# println("label $(labels[1]) finalAnswer $finalAnswer_cpu")
max = isequal.(finalAnswer_cpu[:,1], maximum(finalAnswer_cpu[:,1]))
if sum(finalAnswer_cpu) == 0
IronpenGPU.learn!(model, progress, device)
consecutiveCorrect = 0
println("modelname $modelname epoch $epoch rep $rep label $(labels[1]) finalAnswer __ LEARNING")
elseif sum(max) == 1 && findall(max)[1] -1 == labels[1]
IronpenGPU.learn!(model, progress, device)
consecutiveCorrect += 1
println("modelname $modelname epoch $epoch rep $rep label $(labels[1]) finalAnswer $finalAnswer_cpu CORRECT")
elseif sum(max) == 1 && findall(max)[1] -1 != labels[1]
IronpenGPU.learn!(model, progress, device)
consecutiveCorrect = 0
println("modelname $modelname epoch $epoch rep $rep label $(labels[1]) finalAnswer $finalAnswer_cpu LEARNING")
else
IronpenGPU.learn!(model, progress, device)
consecutiveCorrect = 0
println("modelname $modelname epoch $epoch rep $rep label $(labels[1]) finalAnswer $finalAnswer_cpu LEARNING")
end
# error("DEBUG -> main $(Dates.now())")
else
error("undefined condition line $(@__LINE__)")
# error("DEBUG -> main $(Dates.now())")
end
# update plot
plot10[] = firedNeurons_t1
plot20[] = view(logitLog, 1 , :)
plot21[] = view(logitLog, 2 , :)
plot22[] = view(logitLog, 3 , :)
plot23[] = view(logitLog, 4 , :)
plot24[] = view(logitLog, 5 , :)
plot25[] = view(logitLog, 6 , :)
plot26[] = view(logitLog, 7 , :)
plot27[] = view(logitLog, 8 , :)
plot28[] = view(logitLog, 9 , :)
plot29[] = view(logitLog, 10, :)
plot30[] = view(var1, 1 , :)
plot31[] = view(var1, 2 , :)
plot32[] = view(var1, 3 , :)
# plot33[] = view(var1, 4 , :)
# plot34[] = view(var1, 5 , :)
# plot35[] = view(var1, 6 , :)
# plot36[] = view(var1, 7 , :)
# plot37[] = view(var1, 8 , :)
# plot38[] = view(var1, 9 , :)
# plot39[] = view(var1, 10, :)
plot40[] = view(var2, 1 , :)
plot41[] = view(var2, 2 , :)
plot42[] = view(var2, 3 , :)
# plot43[] = view(var2, 4 , :)
# plot44[] = view(var2, 5 , :)
# plot45[] = view(var2, 6 , :)
# plot46[] = view(var2, 7 , :)
# plot47[] = view(var2, 8 , :)
# plot48[] = view(var2, 9 , :)
# plot49[] = view(var2, 10, :)
# plot50[] = view(var3, 1 , :)
# plot51[] = view(var3, 2 , :)
# plot52[] = view(var3, 3 , :)
# plot53[] = view(var3, 4 , :)
# plot54[] = view(var3, 5 , :)
# plot55[] = view(var3, 6 , :)
# plot56[] = view(var3, 7 , :)
# plot57[] = view(var3, 8 , :)
# plot58[] = view(var3, 9 , :)
# plot59[] = view(var3, 10, :)
# plot60[] = view(var4, 1 , :)
# plot61[] = view(var4, 2 , :)
# plot62[] = view(var4, 3 , :)
# plot63[] = view(var4, 4 , :)
# plot64[] = view(var4, 5 , :)
# plot65[] = view(var4, 6 , :)
# plot66[] = view(var4, 7 , :)
# plot67[] = view(var4, 8 , :)
# plot68[] = view(var4, 9 , :)
# plot69[] = view(var4, 10, :)
end
# end-thinkingPeriod+2; +2 because initialize logitLog = zeros(10, 2)
# _modelRespond = logitLog[:, end-thinkingPeriod+2:end] # answer count during thinking period
# _modelRespond = [sum(i) for i in eachrow(_modelRespond)]
# modelRespond = isequal.(isequal.(_modelRespond, 0), 0)
display(fig1)
sleep(1)
if rep % 3 == 0
firedNeurons_t1 = zeros(1)
logitLog = zeros(10, 2)
var1 = zeros(3, 1)
var2 = zeros(3, 1)
# var3 = zeros(10, 2)
# var4 = zeros(10, 2)
end
end
next!(p)
end
if epoch > 200
# check accuracy
println("validating model")
percentCorrect = validate(model, validateData, labelDict)
bestAccuracy = percentCorrect > bestAccuracy ? percentCorrect : bestAccuracy
println("$modelname model accuracy is $percentCorrect %, best accuracy is $bestAccuracy")
end
end
end
function validate(model, dataset, labelDict)
totalAnswerCorrectly = 0 # score
totalSignal = 0
thinkingPeriod = 16 # 1000-784 = 216
predict = [0] |> device
n = length(dataset)
println("n $n")
p = Progress(n, dt=1.0) # minimum update interval: 1 second
for (imgBatch, labels) in dataset
signal = spikeGenerator(imgBatch, [0.05, 0.1, 0.2, 0.3, 0.5], noise=(true, 1, 0.5), copies=18)
if length(size(signal)) == 3
row, col, sequence = size(signal)
batch = 1
else
row, col, sequence, batch = size(signal)
end
# encode labels
correctAnswer = onehotbatch(labels, labelDict) # (choices, batch)
# insert data into model sequencially
for timestep in 1:(sequence + thinkingPeriod) # sMNIST has 784 timestep(pixel) + thinking period = 1000 timestep
if timestep <= sequence
current_pixel = view(signal, :, :, timestep, :) |> device
else
current_pixel = zeros(row, col, batch) |> device # dummy input in "thinking" period
end
if timestep == 1 # tell a model to start learning. 1-time only
predict = [0] |> device
elseif timestep == (sequence+thinkingPeriod)
else
end
# predict
logit, _ = model(current_pixel)
if timestep < sequence # online learning, 1-by-1 timestep
# no error calculation
elseif timestep == sequence # online learning, 1-by-1 timestep
# no error calculation
elseif timestep > sequence && timestep < sequence+thinkingPeriod # collect answer
predict = length(predict) == 1 ? logit : predict .+ logit # (logit, batch)
elseif timestep == sequence+thinkingPeriod
predict = length(predict) == 1 ? logit : predict .+ logit # (logit, batch)
else
error("undefined condition line $(@__LINE__)")
end
end
predict_cpu = predict |> cpu
_predict_label = mapslices(GeneralUtils.vectorMax, predict_cpu; dims=1)
s = sum(_predict_label, dims=1)
if 0 s
predict_label = []
for i in eachcol(_predict_label)
_label = findall(i) .- 1
if length(_label) == 1
append!(predict_label, _label)
else
push!(predict_label, -1) # predict more than 1 label. add non-count label.
end
end
answerCorrectly = sum([x == y for (x,y) in zip(predict_label, labels)])
totalAnswerCorrectly += answerCorrectly
totalSignal += batch
end
next!(p)
end
percentCorrect = totalAnswerCorrectly * 100.0 / totalSignal
return percentCorrect::Float64
end
function dualTrackSpikeGen(inputsignals, thresholds=[1.0]; noise=(false, 1, 0.5), copies=0)
rowInputSignal = nothing
colInputSignal = nothing
for slice in eachslice(inputsignals, dims=3)
srow = nothing
scol = nothing
for row in eachrow(slice)
srow = srow === nothing ? row : cat(srow, row, dims=1)
end
for col in eachcol(slice)
scol = scol === nothing ? col : cat(scol, col, dims=1)
end
rowInputSignal = rowInputSignal === nothing ? srow : cat(rowInputSignal, srow, dims=3)
colInputSignal = colInputSignal === nothing ? scol : cat(colInputSignal, scol, dims=3)
end
rowInputSignal = reshape(rowInputSignal, (size(rowInputSignal, 1), 1, size(inputsignals, 3)))
colInputSignal = reshape(colInputSignal, (size(colInputSignal, 1), 1, size(inputsignals, 3)))
rowInputSignal = spikeGenerator(rowInputSignal, thresholds, noise=noise, copies=3)
colInputSignal = spikeGenerator(colInputSignal, thresholds, noise=noise, copies=3)
signal = cat(rowInputSignal, colInputSignal, dims=2)
return signal
end
""" inputsignals is normal column-major julia matrix in (row, col, batch) dimension
- each threshold scan return 2 vectors. 1 for +, 1 for -
- noise = (true/false, row, col, probability)
"""
function spikeGenerator(inputsignals, thresholds=[1.0]; noise=(false, 1, 0.5), copies=0)
s = length(size(inputsignals))
ar = [] # holding all signals that are scanned
for slice in eachslice(inputsignals, dims=s)
signal_jl = reshape(slice, (:, 1)) # python array is row-major
signal_pytensor = torch.from_numpy( np.asarray(signal_jl) )
arr = [] # holding signal that is scanned by several thresholds
for threshold in thresholds
spike_py = spikegen.delta(signal_pytensor, threshold=threshold, off_spike=true)
_spike_jl = pyconvert(Array, spike_py.data.numpy())
spike_jl = reshape(_spike_jl, (1, :)) # reshape back to julia's column-major
spike_jl1 = isequal.(spike_jl, 1)
spike_jl2 = isequal.(spike_jl, -1)
arr = length(arr) == 0 ? [spike_jl1; spike_jl2] : [arr; spike_jl1; spike_jl2]
end
arrSize = [size(arr)...]
arr = reshape(arr, (arrSize[1], 1, arrSize[2])) # reshape into (row, 1, timestep)
# multiply col
if copies > 0
a = deepcopy(arr)
for i in 1:copies
arr = cat(arr, a, dims=2)
end
end
if noise[1] == true
arrSize = [size(arr)...]
n = noiseGenerator(arrSize[1], noise[2], arrSize[3], prob=noise[3])
arr = cat(arr, n, dims=2) # concatenate into (row, signal:noise, timestep)
end
# concatenate into (row, signal:noise, timestep, batch)
ar = length(ar) == 0 ? arr : [ar;;;;arr]
end
return ar
end
function noiseGenerator(row, col, z; prob=0.5)
spike_prob = torch.rand(row, col, z) * prob
spike_rand = spikegen.rate_conv(spike_prob)
noise = isequal.(pyconvert(Array, spike_rand.data.numpy()), 1)
return noise
end
function loss(vt0::AbstractFloat, vt1::AbstractArray, logit::AbstractArray,
finalAnswer, correctAnswer_array, correctAnswer_number)
labelPosition = correctAnswer_number +1
# get vt of correct neuron
vt1 = vt1[labelPosition]
# get zt of correct neuron
zt = finalAnswer[labelPosition]
rsnnError = nothing
outputError = nothing
progress = nothing
""" the idea is if the correct output neuron fires, -w other output neurons should be
enough.
However if correct output neuron doesn't fire, +w along RSNN neural pathway
and
"""
if zt > 0
progress = 2
rsnnError = 0 # already correct, no weight update
outputError = correctAnswer_array .- finalAnswer
outputError[labelPosition] = 0 # already correct, no weight update
elseif vt1 > vt0 # progress increase
progress = 1
rsnnError = 1 - vt1
outputError = correctAnswer_array .- finalAnswer
elseif vt1 == vt0 # no progress, let RSNN try new pathway
rsnnError = 0
progress = 0
outputError = (finalAnswer .= 0)
elseif vt1 < vt0 # setback,
rsnnError = vt0 - vt1
progress = -1
outputError = correctAnswer_array .- finalAnswer
else
error("undefined condition zt $zt, vt1 $vt1 vt0 $vt0")
end
return rsnnError, outputError, vt1, progress
end
# function arrayMax(x)
# if sum(GeneralUtils.isNotEqual.(x, 0)) == 0 # guard against all-zeros array
# return GeneralUtils.isNotEqual.(x, 0)
# else
# return isequal.(x, maximum(x))
# end
# end
# arraySliceMax(x) = mapslices(arrayMax, x; dims=1)
function main()
filelocation = string(@__DIR__)
filename = "$modelname.jl163"
training_start_time = Dates.now()
println("$modelname program started $training_start_time")
model = generate_snn(filename, filelocation)
trainDataset, validateDataset, labelDict = data_loader()
train_snn(model, trainDataset, validateDataset, labelDict)
finish_training_time = Dates.now()
println("training done, $training_start_time ==> $finish_training_time ")
println(" ///////////////////////////////////////////////////////////////////////")
end
# only runs main() if julia isnt started interactively
# https://discourse.julialang.org/t/scripting-like-a-julian/50707
!isinteractive() && main()
#------------------------------------------------------------------------------------------------100

View File

@@ -0,0 +1,946 @@
# This file is machine-generated - editing it directly is not advised
julia_version = "1.9.3"
manifest_format = "2.0"
project_hash = "844808a02b2a30acdc69d975773e029da0ec81b8"
[[deps.AbstractFFTs]]
deps = ["LinearAlgebra"]
git-tree-sha1 = "8bc0aaec0ca548eb6cf5f0d7d16351650c1ee956"
uuid = "621f4979-c628-5d54-868e-fcf4e3e8185c"
version = "1.3.2"
weakdeps = ["ChainRulesCore"]
[deps.AbstractFFTs.extensions]
AbstractFFTsChainRulesCoreExt = "ChainRulesCore"
[[deps.Adapt]]
deps = ["LinearAlgebra", "Requires"]
git-tree-sha1 = "76289dc51920fdc6e0013c872ba9551d54961c24"
uuid = "79e6a3ab-5dfb-504d-930d-738a2a938a0e"
version = "3.6.2"
weakdeps = ["StaticArrays"]
[deps.Adapt.extensions]
AdaptStaticArraysExt = "StaticArrays"
[[deps.ArgCheck]]
git-tree-sha1 = "a3a402a35a2f7e0b87828ccabbd5ebfbebe356b4"
uuid = "dce04be8-c92d-5529-be00-80e4d2c0e197"
version = "2.3.0"
[[deps.ArgTools]]
uuid = "0dad84c5-d112-42e6-8d28-ef12dabb789f"
version = "1.1.1"
[[deps.Artifacts]]
uuid = "56f22d72-fd6d-98f1-02f0-08ddc0907c33"
[[deps.Atomix]]
deps = ["UnsafeAtomics"]
git-tree-sha1 = "c06a868224ecba914baa6942988e2f2aade419be"
uuid = "a9b6321e-bd34-4604-b9c9-b65b8de01458"
version = "0.1.0"
[[deps.BFloat16s]]
deps = ["LinearAlgebra", "Printf", "Random", "Test"]
git-tree-sha1 = "dbf84058d0a8cbbadee18d25cf606934b22d7c66"
uuid = "ab4f0b2a-ad5b-11e8-123f-65d77653426b"
version = "0.4.2"
[[deps.BangBang]]
deps = ["Compat", "ConstructionBase", "InitialValues", "LinearAlgebra", "Requires", "Setfield", "Tables"]
git-tree-sha1 = "e28912ce94077686443433c2800104b061a827ed"
uuid = "198e06fe-97b7-11e9-32a5-e1d131e6ad66"
version = "0.3.39"
[deps.BangBang.extensions]
BangBangChainRulesCoreExt = "ChainRulesCore"
BangBangDataFramesExt = "DataFrames"
BangBangStaticArraysExt = "StaticArrays"
BangBangStructArraysExt = "StructArrays"
BangBangTypedTablesExt = "TypedTables"
[deps.BangBang.weakdeps]
ChainRulesCore = "d360d2e6-b24c-11e9-a2a3-2a2ae2dbcce4"
DataFrames = "a93c6f00-e57d-5684-b7b6-d8193f3e46c0"
StaticArrays = "90137ffa-7385-5640-81b9-e52037218182"
StructArrays = "09ab397b-f2b6-538f-b94a-2f83cf4a842a"
TypedTables = "9d95f2ec-7b3d-5a63-8d20-e2491e220bb9"
[[deps.Base64]]
uuid = "2a0f44e3-6c83-55bd-87e4-b1978d98bd5f"
[[deps.Baselet]]
git-tree-sha1 = "aebf55e6d7795e02ca500a689d326ac979aaf89e"
uuid = "9718e550-a3fa-408a-8086-8db961cd8217"
version = "0.1.1"
[[deps.CEnum]]
git-tree-sha1 = "eb4cb44a499229b3b8426dcfb5dd85333951ff90"
uuid = "fa961155-64e5-5f13-b03f-caf6b980ea82"
version = "0.4.2"
[[deps.CUDA]]
deps = ["AbstractFFTs", "Adapt", "BFloat16s", "CEnum", "CUDA_Driver_jll", "CUDA_Runtime_Discovery", "CUDA_Runtime_jll", "CompilerSupportLibraries_jll", "ExprTools", "GPUArrays", "GPUCompiler", "KernelAbstractions", "LLVM", "LazyArtifacts", "Libdl", "LinearAlgebra", "Logging", "Preferences", "Printf", "Random", "Random123", "RandomNumbers", "Reexport", "Requires", "SparseArrays", "SpecialFunctions", "UnsafeAtomicsLLVM"]
git-tree-sha1 = "442d989978ed3ff4e174c928ee879dc09d1ef693"
uuid = "052768ef-5323-5732-b1bb-66c8b64840ba"
version = "4.3.2"
[[deps.CUDA_Driver_jll]]
deps = ["Artifacts", "JLLWrappers", "LazyArtifacts", "Libdl", "Pkg"]
git-tree-sha1 = "498f45593f6ddc0adff64a9310bb6710e851781b"
uuid = "4ee394cb-3365-5eb0-8335-949819d2adfc"
version = "0.5.0+1"
[[deps.CUDA_Runtime_Discovery]]
deps = ["Libdl"]
git-tree-sha1 = "bcc4a23cbbd99c8535a5318455dcf0f2546ec536"
uuid = "1af6417a-86b4-443c-805f-a4643ffb695f"
version = "0.2.2"
[[deps.CUDA_Runtime_jll]]
deps = ["Artifacts", "CUDA_Driver_jll", "JLLWrappers", "LazyArtifacts", "Libdl", "TOML"]
git-tree-sha1 = "5248d9c45712e51e27ba9b30eebec65658c6ce29"
uuid = "76a88914-d11a-5bdc-97e0-2f5a05c973a2"
version = "0.6.0+0"
[[deps.CUDNN_jll]]
deps = ["Artifacts", "CUDA_Runtime_jll", "JLLWrappers", "LazyArtifacts", "Libdl", "TOML"]
git-tree-sha1 = "2918fbffb50e3b7a0b9127617587afa76d4276e8"
uuid = "62b44479-cb7b-5706-934f-f13b2eb2e645"
version = "8.8.1+0"
[[deps.Calculus]]
deps = ["LinearAlgebra"]
git-tree-sha1 = "f641eb0a4f00c343bbc32346e1217b86f3ce9dad"
uuid = "49dc2e85-a5d0-5ad3-a950-438e2897f1b9"
version = "0.5.1"
[[deps.ChainRules]]
deps = ["Adapt", "ChainRulesCore", "Compat", "Distributed", "GPUArraysCore", "IrrationalConstants", "LinearAlgebra", "Random", "RealDot", "SparseArrays", "Statistics", "StructArrays"]
git-tree-sha1 = "1cdf290d4feec68824bfb84f4bfc9f3aba185647"
uuid = "082447d4-558c-5d27-93f4-14fc19e9eca2"
version = "1.51.1"
[[deps.ChainRulesCore]]
deps = ["Compat", "LinearAlgebra", "SparseArrays"]
git-tree-sha1 = "e30f2f4e20f7f186dc36529910beaedc60cfa644"
uuid = "d360d2e6-b24c-11e9-a2a3-2a2ae2dbcce4"
version = "1.16.0"
[[deps.CommonSubexpressions]]
deps = ["MacroTools", "Test"]
git-tree-sha1 = "7b8a93dba8af7e3b42fecabf646260105ac373f7"
uuid = "bbf7d656-a473-5ed7-a52c-81e309532950"
version = "0.3.0"
[[deps.Compat]]
deps = ["UUIDs"]
git-tree-sha1 = "4e88377ae7ebeaf29a047aa1ee40826e0b708a5d"
uuid = "34da2185-b29b-5c13-b0c7-acf172513d20"
version = "4.7.0"
weakdeps = ["Dates", "LinearAlgebra"]
[deps.Compat.extensions]
CompatLinearAlgebraExt = "LinearAlgebra"
[[deps.CompilerSupportLibraries_jll]]
deps = ["Artifacts", "Libdl"]
uuid = "e66e0078-7015-5450-92f7-15fbd957f2ae"
version = "1.0.5+0"
[[deps.CompositionsBase]]
git-tree-sha1 = "802bb88cd69dfd1509f6670416bd4434015693ad"
uuid = "a33af91c-f02d-484b-be07-31d278c5ca2b"
version = "0.1.2"
[deps.CompositionsBase.extensions]
CompositionsBaseInverseFunctionsExt = "InverseFunctions"
[deps.CompositionsBase.weakdeps]
InverseFunctions = "3587e190-3f89-42d0-90ee-14403ec27112"
[[deps.CondaPkg]]
deps = ["JSON3", "Markdown", "MicroMamba", "Pidfile", "Pkg", "TOML"]
git-tree-sha1 = "741146cf2ced5859faae76a84b541aa9af1a78bb"
uuid = "992eb4ea-22a4-4c89-a5bb-47a3300528ab"
version = "0.2.18"
[[deps.ConstructionBase]]
deps = ["LinearAlgebra"]
git-tree-sha1 = "738fec4d684a9a6ee9598a8bfee305b26831f28c"
uuid = "187b0558-2788-49d3-abe0-74a17ed4e7c9"
version = "1.5.2"
[deps.ConstructionBase.extensions]
ConstructionBaseIntervalSetsExt = "IntervalSets"
ConstructionBaseStaticArraysExt = "StaticArrays"
[deps.ConstructionBase.weakdeps]
IntervalSets = "8197267c-284f-5f27-9208-e0e47529a953"
StaticArrays = "90137ffa-7385-5640-81b9-e52037218182"
[[deps.ContextVariablesX]]
deps = ["Compat", "Logging", "UUIDs"]
git-tree-sha1 = "25cc3803f1030ab855e383129dcd3dc294e322cc"
uuid = "6add18c4-b38d-439d-96f6-d6bc489c04c5"
version = "0.1.3"
[[deps.DataAPI]]
git-tree-sha1 = "8da84edb865b0b5b0100c0666a9bc9a0b71c553c"
uuid = "9a962f9c-6df0-11e9-0e5d-c546b8b5ee8a"
version = "1.15.0"
[[deps.DataStructures]]
deps = ["Compat", "InteractiveUtils", "OrderedCollections"]
git-tree-sha1 = "cf25ccb972fec4e4817764d01c82386ae94f77b4"
uuid = "864edb3b-99cc-5e75-8d2d-829cb0a9cfe8"
version = "0.18.14"
[[deps.DataValueInterfaces]]
git-tree-sha1 = "bfc1187b79289637fa0ef6d4436ebdfe6905cbd6"
uuid = "e2d170a0-9d28-54be-80f0-106bbe20a464"
version = "1.0.0"
[[deps.Dates]]
deps = ["Printf"]
uuid = "ade2ca70-3891-5945-98fb-dc099432e06a"
[[deps.DefineSingletons]]
git-tree-sha1 = "0fba8b706d0178b4dc7fd44a96a92382c9065c2c"
uuid = "244e2a9f-e319-4986-a169-4d1fe445cd52"
version = "0.1.2"
[[deps.DelimitedFiles]]
deps = ["Mmap"]
git-tree-sha1 = "9e2f36d3c96a820c678f2f1f1782582fcf685bae"
uuid = "8bb1440f-4735-579b-a4ab-409b98df4dab"
version = "1.9.1"
[[deps.DiffResults]]
deps = ["StaticArraysCore"]
git-tree-sha1 = "782dd5f4561f5d267313f23853baaaa4c52ea621"
uuid = "163ba53b-c6d8-5494-b064-1a9d43ac40c5"
version = "1.1.0"
[[deps.DiffRules]]
deps = ["IrrationalConstants", "LogExpFunctions", "NaNMath", "Random", "SpecialFunctions"]
git-tree-sha1 = "23163d55f885173722d1e4cf0f6110cdbaf7e272"
uuid = "b552c78f-8df3-52c6-915a-8e097449b14b"
version = "1.15.1"
[[deps.Distributed]]
deps = ["Random", "Serialization", "Sockets"]
uuid = "8ba89e20-285c-5b6f-9357-94700520ee1b"
[[deps.Distributions]]
deps = ["FillArrays", "LinearAlgebra", "PDMats", "Printf", "QuadGK", "Random", "SpecialFunctions", "Statistics", "StatsAPI", "StatsBase", "StatsFuns", "Test"]
git-tree-sha1 = "938fe2981db009f531b6332e31c58e9584a2f9bd"
uuid = "31c24e10-a181-5473-b8eb-7969acd0382f"
version = "0.25.100"
[deps.Distributions.extensions]
DistributionsChainRulesCoreExt = "ChainRulesCore"
DistributionsDensityInterfaceExt = "DensityInterface"
[deps.Distributions.weakdeps]
ChainRulesCore = "d360d2e6-b24c-11e9-a2a3-2a2ae2dbcce4"
DensityInterface = "b429d917-457f-4dbc-8f4c-0cc954292b1d"
[[deps.DocStringExtensions]]
deps = ["LibGit2"]
git-tree-sha1 = "2fb1e02f2b635d0845df5d7c167fec4dd739b00d"
uuid = "ffbed154-4ef7-542d-bbb7-c09d3a79fcae"
version = "0.9.3"
[[deps.Downloads]]
deps = ["ArgTools", "FileWatching", "LibCURL", "NetworkOptions"]
uuid = "f43a241f-c20a-4ad4-852c-f6b1247861c6"
version = "1.6.0"
[[deps.DualNumbers]]
deps = ["Calculus", "NaNMath", "SpecialFunctions"]
git-tree-sha1 = "5837a837389fccf076445fce071c8ddaea35a566"
uuid = "fa6b7ba4-c1ee-5f82-b5fc-ecf0adba8f74"
version = "0.6.8"
[[deps.ExprTools]]
git-tree-sha1 = "c1d06d129da9f55715c6c212866f5b1bddc5fa00"
uuid = "e2ba6199-217a-4e67-a87a-7c52f15ade04"
version = "0.1.9"
[[deps.FLoops]]
deps = ["BangBang", "Compat", "FLoopsBase", "InitialValues", "JuliaVariables", "MLStyle", "Serialization", "Setfield", "Transducers"]
git-tree-sha1 = "ffb97765602e3cbe59a0589d237bf07f245a8576"
uuid = "cc61a311-1640-44b5-9fba-1b764f453329"
version = "0.2.1"
[[deps.FLoopsBase]]
deps = ["ContextVariablesX"]
git-tree-sha1 = "656f7a6859be8673bf1f35da5670246b923964f7"
uuid = "b9860ae5-e623-471e-878b-f6a53c775ea6"
version = "0.1.1"
[[deps.FileWatching]]
uuid = "7b1f6079-737a-58dc-b8bc-7a2ca5c1b5ee"
[[deps.FillArrays]]
deps = ["LinearAlgebra", "Random", "SparseArrays", "Statistics"]
git-tree-sha1 = "0b3b52afd0f87b0a3f5ada0466352d125c9db458"
uuid = "1a297f60-69ca-5386-bcde-b61e274b549b"
version = "1.2.1"
[[deps.Flux]]
deps = ["Adapt", "CUDA", "ChainRulesCore", "Functors", "LinearAlgebra", "MLUtils", "MacroTools", "NNlib", "NNlibCUDA", "OneHotArrays", "Optimisers", "Preferences", "ProgressLogging", "Random", "Reexport", "SparseArrays", "SpecialFunctions", "Statistics", "Zygote", "cuDNN"]
git-tree-sha1 = "3e2c3704c2173ab4b1935362384ca878b53d4c34"
uuid = "587475ba-b771-5e3f-ad9e-33799f191a9c"
version = "0.13.17"
[deps.Flux.extensions]
AMDGPUExt = "AMDGPU"
FluxMetalExt = "Metal"
[deps.Flux.weakdeps]
AMDGPU = "21141c5a-9bdb-4563-92ae-f87d6854732e"
Metal = "dde4c033-4e86-420c-a63e-0dd931031962"
[[deps.ForwardDiff]]
deps = ["CommonSubexpressions", "DiffResults", "DiffRules", "LinearAlgebra", "LogExpFunctions", "NaNMath", "Preferences", "Printf", "Random", "SpecialFunctions"]
git-tree-sha1 = "00e252f4d706b3d55a8863432e742bf5717b498d"
uuid = "f6369f11-7733-5829-9624-2563aa707210"
version = "0.10.35"
weakdeps = ["StaticArrays"]
[deps.ForwardDiff.extensions]
ForwardDiffStaticArraysExt = "StaticArrays"
[[deps.Functors]]
deps = ["LinearAlgebra"]
git-tree-sha1 = "478f8c3145bb91d82c2cf20433e8c1b30df454cc"
uuid = "d9f16b24-f501-4c13-a1f2-28368ffc5196"
version = "0.4.4"
[[deps.Future]]
deps = ["Random"]
uuid = "9fa8497b-333b-5362-9e8d-4d0656e87820"
[[deps.GPUArrays]]
deps = ["Adapt", "GPUArraysCore", "LLVM", "LinearAlgebra", "Printf", "Random", "Reexport", "Serialization", "Statistics"]
git-tree-sha1 = "2e57b4a4f9cc15e85a24d603256fe08e527f48d1"
uuid = "0c68f7d7-f131-5f86-a1c3-88cf8149b2d7"
version = "8.8.1"
[[deps.GPUArraysCore]]
deps = ["Adapt"]
git-tree-sha1 = "2d6ca471a6c7b536127afccfa7564b5b39227fe0"
uuid = "46192b85-c4d5-4398-a991-12ede77f4527"
version = "0.1.5"
[[deps.GPUCompiler]]
deps = ["ExprTools", "InteractiveUtils", "LLVM", "Libdl", "Logging", "Scratch", "TimerOutputs", "UUIDs"]
git-tree-sha1 = "cb090aea21c6ca78d59672a7e7d13bd56d09de64"
uuid = "61eb1bfa-7361-4325-ad38-22787b887f55"
version = "0.20.3"
[[deps.GeneralUtils]]
deps = ["CUDA", "DataStructures", "Distributions", "Flux", "JSON3", "Random"]
path = "C:\\Users\\pitak\\.julia\\dev\\GeneralUtils"
uuid = "c6c72f09-b708-4ac8-ac7c-2084d70108fe"
version = "0.1.0"
[[deps.HypergeometricFunctions]]
deps = ["DualNumbers", "LinearAlgebra", "OpenLibm_jll", "SpecialFunctions"]
git-tree-sha1 = "f218fe3736ddf977e0e772bc9a586b2383da2685"
uuid = "34004b35-14d8-5ef3-9330-4cdb6864b03a"
version = "0.3.23"
[[deps.IRTools]]
deps = ["InteractiveUtils", "MacroTools", "Test"]
git-tree-sha1 = "eac00994ce3229a464c2847e956d77a2c64ad3a5"
uuid = "7869d1d1-7146-5819-86e3-90919afe41df"
version = "0.4.10"
[[deps.InitialValues]]
git-tree-sha1 = "4da0f88e9a39111c2fa3add390ab15f3a44f3ca3"
uuid = "22cec73e-a1b8-11e9-2c92-598750a2cf9c"
version = "0.3.1"
[[deps.InteractiveUtils]]
deps = ["Markdown"]
uuid = "b77e0a4c-d291-57a0-90e8-8db25a27a240"
[[deps.IrrationalConstants]]
git-tree-sha1 = "630b497eafcc20001bba38a4651b327dcfc491d2"
uuid = "92d709cd-6900-40b7-9082-c6be49f344b6"
version = "0.2.2"
[[deps.IteratorInterfaceExtensions]]
git-tree-sha1 = "a3f24677c21f5bbe9d2a714f95dcd58337fb2856"
uuid = "82899510-4779-5014-852e-03e436cf321d"
version = "1.0.0"
[[deps.JLLWrappers]]
deps = ["Preferences"]
git-tree-sha1 = "abc9885a7ca2052a736a600f7fa66209f96506e1"
uuid = "692b3bcd-3c85-4b1f-b108-f13ce0eb3210"
version = "1.4.1"
[[deps.JSON3]]
deps = ["Dates", "Mmap", "Parsers", "PrecompileTools", "StructTypes", "UUIDs"]
git-tree-sha1 = "5b62d93f2582b09e469b3099d839c2d2ebf5066d"
uuid = "0f8b85d8-7281-11e9-16c2-39a750bddbf1"
version = "1.13.1"
[[deps.JuliaVariables]]
deps = ["MLStyle", "NameResolution"]
git-tree-sha1 = "49fb3cb53362ddadb4415e9b73926d6b40709e70"
uuid = "b14d175d-62b4-44ba-8fb7-3064adc8c3ec"
version = "0.2.4"
[[deps.KernelAbstractions]]
deps = ["Adapt", "Atomix", "InteractiveUtils", "LinearAlgebra", "MacroTools", "PrecompileTools", "SparseArrays", "StaticArrays", "UUIDs", "UnsafeAtomics", "UnsafeAtomicsLLVM"]
git-tree-sha1 = "b48617c5d764908b5fac493cd907cf33cc11eec1"
uuid = "63c18a36-062a-441e-b654-da1e3ab1ce7c"
version = "0.9.6"
[[deps.LLVM]]
deps = ["CEnum", "LLVMExtra_jll", "Libdl", "Printf", "Unicode"]
git-tree-sha1 = "5007c1421563108110bbd57f63d8ad4565808818"
uuid = "929cbde3-209d-540e-8aea-75f648917ca0"
version = "5.2.0"
[[deps.LLVMExtra_jll]]
deps = ["Artifacts", "JLLWrappers", "LazyArtifacts", "Libdl", "TOML"]
git-tree-sha1 = "1222116d7313cdefecf3d45a2bc1a89c4e7c9217"
uuid = "dad2f222-ce93-54a1-a47d-0025e8a3acab"
version = "0.0.22+0"
[[deps.LazyArtifacts]]
deps = ["Artifacts", "Pkg"]
uuid = "4af54fe1-eca0-43a8-85a7-787d91b784e3"
[[deps.LibCURL]]
deps = ["LibCURL_jll", "MozillaCACerts_jll"]
uuid = "b27032c2-a3e7-50c8-80cd-2d36dbcbfd21"
version = "0.6.3"
[[deps.LibCURL_jll]]
deps = ["Artifacts", "LibSSH2_jll", "Libdl", "MbedTLS_jll", "Zlib_jll", "nghttp2_jll"]
uuid = "deac9b47-8bc7-5906-a0fe-35ac56dc84c0"
version = "7.84.0+0"
[[deps.LibGit2]]
deps = ["Base64", "NetworkOptions", "Printf", "SHA"]
uuid = "76f85450-5226-5b5a-8eaa-529ad045b433"
[[deps.LibSSH2_jll]]
deps = ["Artifacts", "Libdl", "MbedTLS_jll"]
uuid = "29816b5a-b9ab-546f-933c-edad1886dfa8"
version = "1.10.2+0"
[[deps.Libdl]]
uuid = "8f399da3-3557-5675-b5ff-fb832c97cbdb"
[[deps.LinearAlgebra]]
deps = ["Libdl", "OpenBLAS_jll", "libblastrampoline_jll"]
uuid = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e"
[[deps.LogExpFunctions]]
deps = ["DocStringExtensions", "IrrationalConstants", "LinearAlgebra"]
git-tree-sha1 = "c3ce8e7420b3a6e071e0fe4745f5d4300e37b13f"
uuid = "2ab3a3ac-af41-5b50-aa03-7779005ae688"
version = "0.3.24"
[deps.LogExpFunctions.extensions]
LogExpFunctionsChainRulesCoreExt = "ChainRulesCore"
LogExpFunctionsChangesOfVariablesExt = "ChangesOfVariables"
LogExpFunctionsInverseFunctionsExt = "InverseFunctions"
[deps.LogExpFunctions.weakdeps]
ChainRulesCore = "d360d2e6-b24c-11e9-a2a3-2a2ae2dbcce4"
ChangesOfVariables = "9e997f8a-9a97-42d5-a9f1-ce6bfc15e2c0"
InverseFunctions = "3587e190-3f89-42d0-90ee-14403ec27112"
[[deps.Logging]]
uuid = "56ddb016-857b-54e1-b83d-db4d58db5568"
[[deps.MLStyle]]
git-tree-sha1 = "bc38dff0548128765760c79eb7388a4b37fae2c8"
uuid = "d8e11817-5142-5d16-987a-aa16d5891078"
version = "0.4.17"
[[deps.MLUtils]]
deps = ["ChainRulesCore", "Compat", "DataAPI", "DelimitedFiles", "FLoops", "NNlib", "Random", "ShowCases", "SimpleTraits", "Statistics", "StatsBase", "Tables", "Transducers"]
git-tree-sha1 = "3504cdb8c2bc05bde4d4b09a81b01df88fcbbba0"
uuid = "f1d291b0-491e-4a28-83b9-f70985020b54"
version = "0.4.3"
[[deps.MacroTools]]
deps = ["Markdown", "Random"]
git-tree-sha1 = "42324d08725e200c23d4dfb549e0d5d89dede2d2"
uuid = "1914dd2f-81c6-5fcd-8719-6d5c9610ff09"
version = "0.5.10"
[[deps.Markdown]]
deps = ["Base64"]
uuid = "d6f4376e-aef5-505a-96c1-9c027394607a"
[[deps.MbedTLS_jll]]
deps = ["Artifacts", "Libdl"]
uuid = "c8ffd9c3-330d-5841-b78e-0817d7145fa1"
version = "2.28.2+0"
[[deps.MicroCollections]]
deps = ["BangBang", "InitialValues", "Setfield"]
git-tree-sha1 = "629afd7d10dbc6935ec59b32daeb33bc4460a42e"
uuid = "128add7d-3638-4c79-886c-908ea0c25c34"
version = "0.1.4"
[[deps.MicroMamba]]
deps = ["Pkg", "Scratch", "micromamba_jll"]
git-tree-sha1 = "011cab361eae7bcd7d278f0a7a00ff9c69000c51"
uuid = "0b3b1443-0f03-428d-bdfb-f27f9c1191ea"
version = "0.1.14"
[[deps.Missings]]
deps = ["DataAPI"]
git-tree-sha1 = "f66bdc5de519e8f8ae43bdc598782d35a25b1272"
uuid = "e1d29d7a-bbdc-5cf2-9ac0-f12de2c33e28"
version = "1.1.0"
[[deps.Mmap]]
uuid = "a63ad114-7e13-5084-954f-fe012c677804"
[[deps.MozillaCACerts_jll]]
uuid = "14a3606d-f60d-562e-9121-12d972cd8159"
version = "2022.10.11"
[[deps.NNlib]]
deps = ["Adapt", "Atomix", "ChainRulesCore", "GPUArraysCore", "KernelAbstractions", "LinearAlgebra", "Pkg", "Random", "Requires", "Statistics"]
git-tree-sha1 = "72240e3f5ca031937bd536182cb2c031da5f46dd"
uuid = "872c559c-99b0-510c-b3b7-b6c96a88d5cd"
version = "0.8.21"
[deps.NNlib.extensions]
NNlibAMDGPUExt = "AMDGPU"
[deps.NNlib.weakdeps]
AMDGPU = "21141c5a-9bdb-4563-92ae-f87d6854732e"
[[deps.NNlibCUDA]]
deps = ["Adapt", "CUDA", "LinearAlgebra", "NNlib", "Random", "Statistics", "cuDNN"]
git-tree-sha1 = "f94a9684394ff0d325cc12b06da7032d8be01aaf"
uuid = "a00861dc-f156-4864-bf3c-e6376f28a68d"
version = "0.2.7"
[[deps.NaNMath]]
deps = ["OpenLibm_jll"]
git-tree-sha1 = "0877504529a3e5c3343c6f8b4c0381e57e4387e4"
uuid = "77ba4419-2d1f-58cd-9bb1-8ffee604a2e3"
version = "1.0.2"
[[deps.NameResolution]]
deps = ["PrettyPrint"]
git-tree-sha1 = "1a0fa0e9613f46c9b8c11eee38ebb4f590013c5e"
uuid = "71a1bf82-56d0-4bbc-8a3c-48b961074391"
version = "0.1.5"
[[deps.NetworkOptions]]
uuid = "ca575930-c2e3-43a9-ace4-1e988b2c1908"
version = "1.2.0"
[[deps.OneHotArrays]]
deps = ["Adapt", "ChainRulesCore", "Compat", "GPUArraysCore", "LinearAlgebra", "NNlib"]
git-tree-sha1 = "5e4029759e8699ec12ebdf8721e51a659443403c"
uuid = "0b1bfda6-eb8a-41d2-88d8-f5af5cad476f"
version = "0.2.4"
[[deps.OpenBLAS_jll]]
deps = ["Artifacts", "CompilerSupportLibraries_jll", "Libdl"]
uuid = "4536629a-c528-5b80-bd46-f80d51c5b363"
version = "0.3.21+4"
[[deps.OpenLibm_jll]]
deps = ["Artifacts", "Libdl"]
uuid = "05823500-19ac-5b8b-9628-191a04bc5112"
version = "0.8.1+0"
[[deps.OpenSpecFun_jll]]
deps = ["Artifacts", "CompilerSupportLibraries_jll", "JLLWrappers", "Libdl", "Pkg"]
git-tree-sha1 = "13652491f6856acfd2db29360e1bbcd4565d04f1"
uuid = "efe28fd5-8261-553b-a9e1-b2916fc3738e"
version = "0.5.5+0"
[[deps.Optimisers]]
deps = ["ChainRulesCore", "Functors", "LinearAlgebra", "Random", "Statistics"]
git-tree-sha1 = "6a01f65dd8583dee82eecc2a19b0ff21521aa749"
uuid = "3bd65402-5787-11e9-1adc-39752487f4e2"
version = "0.2.18"
[[deps.OrderedCollections]]
git-tree-sha1 = "d321bf2de576bf25ec4d3e4360faca399afca282"
uuid = "bac558e1-5e72-5ebc-8fee-abe8a469f55d"
version = "1.6.0"
[[deps.PDMats]]
deps = ["LinearAlgebra", "SparseArrays", "SuiteSparse"]
git-tree-sha1 = "67eae2738d63117a196f497d7db789821bce61d1"
uuid = "90014a1f-27ba-587c-ab20-58faa44d9150"
version = "0.11.17"
[[deps.Parsers]]
deps = ["Dates", "PrecompileTools", "UUIDs"]
git-tree-sha1 = "4b2e829ee66d4218e0cef22c0a64ee37cf258c29"
uuid = "69de0a69-1ddd-5017-9359-2bf0b02dc9f0"
version = "2.7.1"
[[deps.Pidfile]]
deps = ["FileWatching", "Test"]
git-tree-sha1 = "2d8aaf8ee10df53d0dfb9b8ee44ae7c04ced2b03"
uuid = "fa939f87-e72e-5be4-a000-7fc836dbe307"
version = "1.3.0"
[[deps.Pkg]]
deps = ["Artifacts", "Dates", "Downloads", "FileWatching", "LibGit2", "Libdl", "Logging", "Markdown", "Printf", "REPL", "Random", "SHA", "Serialization", "TOML", "Tar", "UUIDs", "p7zip_jll"]
uuid = "44cfe95a-1eb2-52ea-b672-e2afdf69b78f"
version = "1.9.2"
[[deps.PrecompileTools]]
deps = ["Preferences"]
git-tree-sha1 = "9673d39decc5feece56ef3940e5dafba15ba0f81"
uuid = "aea7be01-6a6a-4083-8856-8a6e6704d82a"
version = "1.1.2"
[[deps.Preferences]]
deps = ["TOML"]
git-tree-sha1 = "7eb1686b4f04b82f96ed7a4ea5890a4f0c7a09f1"
uuid = "21216c6a-2e73-6563-6e65-726566657250"
version = "1.4.0"
[[deps.PrettyPrint]]
git-tree-sha1 = "632eb4abab3449ab30c5e1afaa874f0b98b586e4"
uuid = "8162dcfd-2161-5ef2-ae6c-7681170c5f98"
version = "0.2.0"
[[deps.Printf]]
deps = ["Unicode"]
uuid = "de0858da-6303-5e67-8744-51eddeeeb8d7"
[[deps.ProgressLogging]]
deps = ["Logging", "SHA", "UUIDs"]
git-tree-sha1 = "80d919dee55b9c50e8d9e2da5eeafff3fe58b539"
uuid = "33c8b6b6-d38a-422a-b730-caa89a2f386c"
version = "0.1.4"
[[deps.PythonCall]]
deps = ["CondaPkg", "Dates", "Libdl", "MacroTools", "Markdown", "Pkg", "REPL", "Requires", "Serialization", "Tables", "UnsafePointers"]
git-tree-sha1 = "70af6bdbde63d7d0a4ea99f3e890ebdb55e9d464"
uuid = "6099a3de-0909-46bc-b1f4-468b9a2dfc0d"
version = "0.9.14"
[[deps.QuadGK]]
deps = ["DataStructures", "LinearAlgebra"]
git-tree-sha1 = "6ec7ac8412e83d57e313393220879ede1740f9ee"
uuid = "1fd47b50-473d-5c70-9696-f719f8f3bcdc"
version = "2.8.2"
[[deps.REPL]]
deps = ["InteractiveUtils", "Markdown", "Sockets", "Unicode"]
uuid = "3fa0cd96-eef1-5676-8a61-b3b8758bbffb"
[[deps.Random]]
deps = ["SHA", "Serialization"]
uuid = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c"
[[deps.Random123]]
deps = ["Random", "RandomNumbers"]
git-tree-sha1 = "552f30e847641591ba3f39fd1bed559b9deb0ef3"
uuid = "74087812-796a-5b5d-8853-05524746bad3"
version = "1.6.1"
[[deps.RandomNumbers]]
deps = ["Random", "Requires"]
git-tree-sha1 = "043da614cc7e95c703498a491e2c21f58a2b8111"
uuid = "e6cf234a-135c-5ec9-84dd-332b85af5143"
version = "1.5.3"
[[deps.RealDot]]
deps = ["LinearAlgebra"]
git-tree-sha1 = "9f0a1b71baaf7650f4fa8a1d168c7fb6ee41f0c9"
uuid = "c1ae055f-0cd5-4b69-90a6-9a35b1a98df9"
version = "0.1.0"
[[deps.Reexport]]
git-tree-sha1 = "45e428421666073eab6f2da5c9d310d99bb12f9b"
uuid = "189a3867-3050-52da-a836-e630ba90ab69"
version = "1.2.2"
[[deps.Requires]]
deps = ["UUIDs"]
git-tree-sha1 = "838a3a4188e2ded87a4f9f184b4b0d78a1e91cb7"
uuid = "ae029012-a4dd-5104-9daa-d747884805df"
version = "1.3.0"
[[deps.Rmath]]
deps = ["Random", "Rmath_jll"]
git-tree-sha1 = "f65dcb5fa46aee0cf9ed6274ccbd597adc49aa7b"
uuid = "79098fc4-a85e-5d69-aa6a-4863f24498fa"
version = "0.7.1"
[[deps.Rmath_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"]
git-tree-sha1 = "6ed52fdd3382cf21947b15e8870ac0ddbff736da"
uuid = "f50d1b31-88e8-58de-be2c-1cc44531875f"
version = "0.4.0+0"
[[deps.SHA]]
uuid = "ea8e919c-243c-51af-8825-aaa63cd721ce"
version = "0.7.0"
[[deps.Scratch]]
deps = ["Dates"]
git-tree-sha1 = "30449ee12237627992a99d5e30ae63e4d78cd24a"
uuid = "6c6a2e73-6563-6170-7368-637461726353"
version = "1.2.0"
[[deps.Serialization]]
uuid = "9e88b42a-f829-5b0c-bbe9-9e923198166b"
[[deps.Setfield]]
deps = ["ConstructionBase", "Future", "MacroTools", "StaticArraysCore"]
git-tree-sha1 = "e2cc6d8c88613c05e1defb55170bf5ff211fbeac"
uuid = "efcf1570-3423-57d1-acb7-fd33fddbac46"
version = "1.1.1"
[[deps.ShowCases]]
git-tree-sha1 = "7f534ad62ab2bd48591bdeac81994ea8c445e4a5"
uuid = "605ecd9f-84a6-4c9e-81e2-4798472b76a3"
version = "0.1.0"
[[deps.SimpleTraits]]
deps = ["InteractiveUtils", "MacroTools"]
git-tree-sha1 = "5d7e3f4e11935503d3ecaf7186eac40602e7d231"
uuid = "699a6c99-e7fa-54fc-8d76-47d257e15c1d"
version = "0.9.4"
[[deps.Sockets]]
uuid = "6462fe0b-24de-5631-8697-dd941f90decc"
[[deps.SortingAlgorithms]]
deps = ["DataStructures"]
git-tree-sha1 = "c60ec5c62180f27efea3ba2908480f8055e17cee"
uuid = "a2af1166-a08f-5f64-846c-94a0d3cef48c"
version = "1.1.1"
[[deps.SparseArrays]]
deps = ["Libdl", "LinearAlgebra", "Random", "Serialization", "SuiteSparse_jll"]
uuid = "2f01184e-e22b-5df5-ae63-d93ebab69eaf"
[[deps.SpecialFunctions]]
deps = ["IrrationalConstants", "LogExpFunctions", "OpenLibm_jll", "OpenSpecFun_jll"]
git-tree-sha1 = "7beb031cf8145577fbccacd94b8a8f4ce78428d3"
uuid = "276daf66-3868-5448-9aa4-cd146d93841b"
version = "2.3.0"
weakdeps = ["ChainRulesCore"]
[deps.SpecialFunctions.extensions]
SpecialFunctionsChainRulesCoreExt = "ChainRulesCore"
[[deps.SplittablesBase]]
deps = ["Setfield", "Test"]
git-tree-sha1 = "e08a62abc517eb79667d0a29dc08a3b589516bb5"
uuid = "171d559e-b47b-412a-8079-5efa626c420e"
version = "0.1.15"
[[deps.StaticArrays]]
deps = ["LinearAlgebra", "Random", "StaticArraysCore", "Statistics"]
git-tree-sha1 = "832afbae2a45b4ae7e831f86965469a24d1d8a83"
uuid = "90137ffa-7385-5640-81b9-e52037218182"
version = "1.5.26"
[[deps.StaticArraysCore]]
git-tree-sha1 = "6b7ba252635a5eff6a0b0664a41ee140a1c9e72a"
uuid = "1e83bf80-4336-4d27-bf5d-d5a4f845583c"
version = "1.4.0"
[[deps.Statistics]]
deps = ["LinearAlgebra", "SparseArrays"]
uuid = "10745b16-79ce-11e8-11f9-7d13ad32a3b2"
version = "1.9.0"
[[deps.StatsAPI]]
deps = ["LinearAlgebra"]
git-tree-sha1 = "45a7769a04a3cf80da1c1c7c60caf932e6f4c9f7"
uuid = "82ae8749-77ed-4fe6-ae5f-f523153014b0"
version = "1.6.0"
[[deps.StatsBase]]
deps = ["DataAPI", "DataStructures", "LinearAlgebra", "LogExpFunctions", "Missings", "Printf", "Random", "SortingAlgorithms", "SparseArrays", "Statistics", "StatsAPI"]
git-tree-sha1 = "75ebe04c5bed70b91614d684259b661c9e6274a4"
uuid = "2913bbd2-ae8a-5f71-8c99-4fb6c76f3a91"
version = "0.34.0"
[[deps.StatsFuns]]
deps = ["HypergeometricFunctions", "IrrationalConstants", "LogExpFunctions", "Reexport", "Rmath", "SpecialFunctions"]
git-tree-sha1 = "f625d686d5a88bcd2b15cd81f18f98186fdc0c9a"
uuid = "4c63d2b9-4356-54db-8cca-17b64c39e42c"
version = "1.3.0"
[deps.StatsFuns.extensions]
StatsFunsChainRulesCoreExt = "ChainRulesCore"
StatsFunsInverseFunctionsExt = "InverseFunctions"
[deps.StatsFuns.weakdeps]
ChainRulesCore = "d360d2e6-b24c-11e9-a2a3-2a2ae2dbcce4"
InverseFunctions = "3587e190-3f89-42d0-90ee-14403ec27112"
[[deps.StructArrays]]
deps = ["Adapt", "DataAPI", "GPUArraysCore", "StaticArraysCore", "Tables"]
git-tree-sha1 = "521a0e828e98bb69042fec1809c1b5a680eb7389"
uuid = "09ab397b-f2b6-538f-b94a-2f83cf4a842a"
version = "0.6.15"
[[deps.StructTypes]]
deps = ["Dates", "UUIDs"]
git-tree-sha1 = "ca4bccb03acf9faaf4137a9abc1881ed1841aa70"
uuid = "856f2bd8-1eba-4b0a-8007-ebc267875bd4"
version = "1.10.0"
[[deps.SuiteSparse]]
deps = ["Libdl", "LinearAlgebra", "Serialization", "SparseArrays"]
uuid = "4607b0f0-06f3-5cda-b6b1-a6196a1729e9"
[[deps.SuiteSparse_jll]]
deps = ["Artifacts", "Libdl", "Pkg", "libblastrampoline_jll"]
uuid = "bea87d4a-7f5b-5778-9afe-8cc45184846c"
version = "5.10.1+6"
[[deps.TOML]]
deps = ["Dates"]
uuid = "fa267f1f-6049-4f14-aa54-33bafae1ed76"
version = "1.0.3"
[[deps.TableTraits]]
deps = ["IteratorInterfaceExtensions"]
git-tree-sha1 = "c06b2f539df1c6efa794486abfb6ed2022561a39"
uuid = "3783bdb8-4a98-5b6b-af9a-565f29a5fe9c"
version = "1.0.1"
[[deps.Tables]]
deps = ["DataAPI", "DataValueInterfaces", "IteratorInterfaceExtensions", "LinearAlgebra", "OrderedCollections", "TableTraits", "Test"]
git-tree-sha1 = "1544b926975372da01227b382066ab70e574a3ec"
uuid = "bd369af6-aec1-5ad0-b16a-f7cc5008161c"
version = "1.10.1"
[[deps.Tar]]
deps = ["ArgTools", "SHA"]
uuid = "a4e569a6-e804-4fa4-b0f3-eef7a1d5b13e"
version = "1.10.0"
[[deps.Test]]
deps = ["InteractiveUtils", "Logging", "Random", "Serialization"]
uuid = "8dfed614-e22c-5e08-85e1-65c5234f0b40"
[[deps.TimerOutputs]]
deps = ["ExprTools", "Printf"]
git-tree-sha1 = "f548a9e9c490030e545f72074a41edfd0e5bcdd7"
uuid = "a759f4b9-e2f1-59dc-863e-4aeb61b1ea8f"
version = "0.5.23"
[[deps.Transducers]]
deps = ["Adapt", "ArgCheck", "BangBang", "Baselet", "CompositionsBase", "DefineSingletons", "Distributed", "InitialValues", "Logging", "Markdown", "MicroCollections", "Requires", "Setfield", "SplittablesBase", "Tables"]
git-tree-sha1 = "a66fb81baec325cf6ccafa243af573b031e87b00"
uuid = "28d57a85-8fef-5791-bfe6-a80928e7c999"
version = "0.4.77"
[deps.Transducers.extensions]
TransducersBlockArraysExt = "BlockArrays"
TransducersDataFramesExt = "DataFrames"
TransducersLazyArraysExt = "LazyArrays"
TransducersOnlineStatsBaseExt = "OnlineStatsBase"
TransducersReferenceablesExt = "Referenceables"
[deps.Transducers.weakdeps]
BlockArrays = "8e7c35d0-a365-5155-bbbb-fb81a777f24e"
DataFrames = "a93c6f00-e57d-5684-b7b6-d8193f3e46c0"
LazyArrays = "5078a376-72f3-5289-bfd5-ec5146d43c02"
OnlineStatsBase = "925886fa-5bf2-5e8e-b522-a9147a512338"
Referenceables = "42d2dcc6-99eb-4e98-b66c-637b7d73030e"
[[deps.UUIDs]]
deps = ["Random", "SHA"]
uuid = "cf7118a7-6976-5b1a-9a39-7adc72f591a4"
[[deps.Unicode]]
uuid = "4ec0a83e-493e-50e2-b9ac-8f72acf5a8f5"
[[deps.UnsafeAtomics]]
git-tree-sha1 = "6331ac3440856ea1988316b46045303bef658278"
uuid = "013be700-e6cd-48c3-b4a1-df204f14c38f"
version = "0.2.1"
[[deps.UnsafeAtomicsLLVM]]
deps = ["LLVM", "UnsafeAtomics"]
git-tree-sha1 = "ea37e6066bf194ab78f4e747f5245261f17a7175"
uuid = "d80eeb9a-aca5-4d75-85e5-170c8b632249"
version = "0.1.2"
[[deps.UnsafePointers]]
git-tree-sha1 = "c81331b3b2e60a982be57c046ec91f599ede674a"
uuid = "e17b2a0c-0bdf-430a-bd0c-3a23cae4ff39"
version = "1.0.0"
[[deps.Zlib_jll]]
deps = ["Libdl"]
uuid = "83775a58-1f1d-513f-b197-d71354ab007a"
version = "1.2.13+0"
[[deps.Zygote]]
deps = ["AbstractFFTs", "ChainRules", "ChainRulesCore", "DiffRules", "Distributed", "FillArrays", "ForwardDiff", "GPUArrays", "GPUArraysCore", "IRTools", "InteractiveUtils", "LinearAlgebra", "LogExpFunctions", "MacroTools", "NaNMath", "PrecompileTools", "Random", "Requires", "SparseArrays", "SpecialFunctions", "Statistics", "ZygoteRules"]
git-tree-sha1 = "5be3ddb88fc992a7d8ea96c3f10a49a7e98ebc7b"
uuid = "e88e6eb3-aa80-5325-afca-941959d7151f"
version = "0.6.62"
[deps.Zygote.extensions]
ZygoteColorsExt = "Colors"
ZygoteDistancesExt = "Distances"
ZygoteTrackerExt = "Tracker"
[deps.Zygote.weakdeps]
Colors = "5ae59095-9a9b-59fe-a467-6f913c188581"
Distances = "b4f34e82-e78d-54a5-968a-f98e89d6e8f7"
Tracker = "9f7883ad-71c0-57eb-9f7f-b5c9e6d3789c"
[[deps.ZygoteRules]]
deps = ["ChainRulesCore", "MacroTools"]
git-tree-sha1 = "977aed5d006b840e2e40c0b48984f7463109046d"
uuid = "700de1a5-db45-46bc-99cf-38207098b444"
version = "0.2.3"
[[deps.cuDNN]]
deps = ["CEnum", "CUDA", "CUDNN_jll"]
git-tree-sha1 = "f65490d187861d6222cb38bcbbff3fd949a7ec3e"
uuid = "02a925ec-e4fe-4b08-9a7e-0d78e3d38ccd"
version = "1.0.4"
[[deps.libblastrampoline_jll]]
deps = ["Artifacts", "Libdl"]
uuid = "8e850b90-86db-534c-a0d3-1478176c7d93"
version = "5.8.0+0"
[[deps.micromamba_jll]]
deps = ["Artifacts", "JLLWrappers", "LazyArtifacts", "Libdl"]
git-tree-sha1 = "66d07957bcf7e4930d933195aed484078dd8cbb5"
uuid = "f8abcde7-e9b7-5caa-b8af-a437887ae8e4"
version = "1.4.9+0"
[[deps.nghttp2_jll]]
deps = ["Artifacts", "Libdl"]
uuid = "8e850ede-7688-5339-a07c-302acd2aaf8d"
version = "1.48.0+0"
[[deps.p7zip_jll]]
deps = ["Artifacts", "Libdl"]
uuid = "3f19e933-33d8-53b3-aaab-bd5110c3b7a0"
version = "17.4.0+0"

View File

@@ -0,0 +1,16 @@
name = "IronpenGPU"
uuid = "3d5396ea-818e-43fc-a9d3-164248e840cd"
authors = ["ton <narawat@gmail.com>"]
version = "0.1.0"
[deps]
CUDA = "052768ef-5323-5732-b1bb-66c8b64840ba"
CondaPkg = "992eb4ea-22a4-4c89-a5bb-47a3300528ab"
Dates = "ade2ca70-3891-5945-98fb-dc099432e06a"
Flux = "587475ba-b771-5e3f-ad9e-33799f191a9c"
GeneralUtils = "c6c72f09-b708-4ac8-ac7c-2084d70108fe"
JSON3 = "0f8b85d8-7281-11e9-16c2-39a750bddbf1"
LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e"
PythonCall = "6099a3de-0909-46bc-b1f4-468b9a2dfc0d"
Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c"
Statistics = "10745b16-79ce-11e8-11f9-7d13ad32a3b2"

View File

@@ -0,0 +1,85 @@
module IronpenGPU # this is a parent module
# export
""" Order by dependencies of each file. The 1st included file must not depend on any other
files and each file can only depend on the file included before it.
"""
include("type.jl")
using .type # bring type into parent module namespace
include("snnUtil.jl")
using .snnUtil
include("forward.jl")
using .forward
include("learn.jl")
using .learn
include("interface.jl")
using .interface
#------------------------------------------------------------------------------------------------100
""" version 0.0.11
Todo:
[] make output neuron draw connection randomly
[4] implement variable dormant connection and pruning machanism. the longer the training the longer
0 weight stay 0.
[] using RL to control learning signal
[] consider using Dates.now() instead of timestamp because time_stamp may overflow
[] Liquid time constant. training should include adjusting α, neuron membrane potential decay factor
which defined by neuron.tau_m formula in type.jl
Change from version: 0.0.10
- growRepeatedPath!(), instead of synapse with 60% less activity count gets -w, may be I
should rank synapse based on activity count from highest perforimg synapse to lowest
and the last 60% of the rank get -w
- 10% instead of 20% synapticConnectionPercent
"""
end # module IronpenGPU

View File

@@ -0,0 +1,946 @@
# This file is machine-generated - editing it directly is not advised
julia_version = "1.9.3"
manifest_format = "2.0"
project_hash = "844808a02b2a30acdc69d975773e029da0ec81b8"
[[deps.AbstractFFTs]]
deps = ["LinearAlgebra"]
git-tree-sha1 = "8bc0aaec0ca548eb6cf5f0d7d16351650c1ee956"
uuid = "621f4979-c628-5d54-868e-fcf4e3e8185c"
version = "1.3.2"
weakdeps = ["ChainRulesCore"]
[deps.AbstractFFTs.extensions]
AbstractFFTsChainRulesCoreExt = "ChainRulesCore"
[[deps.Adapt]]
deps = ["LinearAlgebra", "Requires"]
git-tree-sha1 = "76289dc51920fdc6e0013c872ba9551d54961c24"
uuid = "79e6a3ab-5dfb-504d-930d-738a2a938a0e"
version = "3.6.2"
weakdeps = ["StaticArrays"]
[deps.Adapt.extensions]
AdaptStaticArraysExt = "StaticArrays"
[[deps.ArgCheck]]
git-tree-sha1 = "a3a402a35a2f7e0b87828ccabbd5ebfbebe356b4"
uuid = "dce04be8-c92d-5529-be00-80e4d2c0e197"
version = "2.3.0"
[[deps.ArgTools]]
uuid = "0dad84c5-d112-42e6-8d28-ef12dabb789f"
version = "1.1.1"
[[deps.Artifacts]]
uuid = "56f22d72-fd6d-98f1-02f0-08ddc0907c33"
[[deps.Atomix]]
deps = ["UnsafeAtomics"]
git-tree-sha1 = "c06a868224ecba914baa6942988e2f2aade419be"
uuid = "a9b6321e-bd34-4604-b9c9-b65b8de01458"
version = "0.1.0"
[[deps.BFloat16s]]
deps = ["LinearAlgebra", "Printf", "Random", "Test"]
git-tree-sha1 = "dbf84058d0a8cbbadee18d25cf606934b22d7c66"
uuid = "ab4f0b2a-ad5b-11e8-123f-65d77653426b"
version = "0.4.2"
[[deps.BangBang]]
deps = ["Compat", "ConstructionBase", "InitialValues", "LinearAlgebra", "Requires", "Setfield", "Tables"]
git-tree-sha1 = "e28912ce94077686443433c2800104b061a827ed"
uuid = "198e06fe-97b7-11e9-32a5-e1d131e6ad66"
version = "0.3.39"
[deps.BangBang.extensions]
BangBangChainRulesCoreExt = "ChainRulesCore"
BangBangDataFramesExt = "DataFrames"
BangBangStaticArraysExt = "StaticArrays"
BangBangStructArraysExt = "StructArrays"
BangBangTypedTablesExt = "TypedTables"
[deps.BangBang.weakdeps]
ChainRulesCore = "d360d2e6-b24c-11e9-a2a3-2a2ae2dbcce4"
DataFrames = "a93c6f00-e57d-5684-b7b6-d8193f3e46c0"
StaticArrays = "90137ffa-7385-5640-81b9-e52037218182"
StructArrays = "09ab397b-f2b6-538f-b94a-2f83cf4a842a"
TypedTables = "9d95f2ec-7b3d-5a63-8d20-e2491e220bb9"
[[deps.Base64]]
uuid = "2a0f44e3-6c83-55bd-87e4-b1978d98bd5f"
[[deps.Baselet]]
git-tree-sha1 = "aebf55e6d7795e02ca500a689d326ac979aaf89e"
uuid = "9718e550-a3fa-408a-8086-8db961cd8217"
version = "0.1.1"
[[deps.CEnum]]
git-tree-sha1 = "eb4cb44a499229b3b8426dcfb5dd85333951ff90"
uuid = "fa961155-64e5-5f13-b03f-caf6b980ea82"
version = "0.4.2"
[[deps.CUDA]]
deps = ["AbstractFFTs", "Adapt", "BFloat16s", "CEnum", "CUDA_Driver_jll", "CUDA_Runtime_Discovery", "CUDA_Runtime_jll", "CompilerSupportLibraries_jll", "ExprTools", "GPUArrays", "GPUCompiler", "KernelAbstractions", "LLVM", "LazyArtifacts", "Libdl", "LinearAlgebra", "Logging", "Preferences", "Printf", "Random", "Random123", "RandomNumbers", "Reexport", "Requires", "SparseArrays", "SpecialFunctions", "UnsafeAtomicsLLVM"]
git-tree-sha1 = "442d989978ed3ff4e174c928ee879dc09d1ef693"
uuid = "052768ef-5323-5732-b1bb-66c8b64840ba"
version = "4.3.2"
[[deps.CUDA_Driver_jll]]
deps = ["Artifacts", "JLLWrappers", "LazyArtifacts", "Libdl", "Pkg"]
git-tree-sha1 = "498f45593f6ddc0adff64a9310bb6710e851781b"
uuid = "4ee394cb-3365-5eb0-8335-949819d2adfc"
version = "0.5.0+1"
[[deps.CUDA_Runtime_Discovery]]
deps = ["Libdl"]
git-tree-sha1 = "bcc4a23cbbd99c8535a5318455dcf0f2546ec536"
uuid = "1af6417a-86b4-443c-805f-a4643ffb695f"
version = "0.2.2"
[[deps.CUDA_Runtime_jll]]
deps = ["Artifacts", "CUDA_Driver_jll", "JLLWrappers", "LazyArtifacts", "Libdl", "TOML"]
git-tree-sha1 = "5248d9c45712e51e27ba9b30eebec65658c6ce29"
uuid = "76a88914-d11a-5bdc-97e0-2f5a05c973a2"
version = "0.6.0+0"
[[deps.CUDNN_jll]]
deps = ["Artifacts", "CUDA_Runtime_jll", "JLLWrappers", "LazyArtifacts", "Libdl", "TOML"]
git-tree-sha1 = "2918fbffb50e3b7a0b9127617587afa76d4276e8"
uuid = "62b44479-cb7b-5706-934f-f13b2eb2e645"
version = "8.8.1+0"
[[deps.Calculus]]
deps = ["LinearAlgebra"]
git-tree-sha1 = "f641eb0a4f00c343bbc32346e1217b86f3ce9dad"
uuid = "49dc2e85-a5d0-5ad3-a950-438e2897f1b9"
version = "0.5.1"
[[deps.ChainRules]]
deps = ["Adapt", "ChainRulesCore", "Compat", "Distributed", "GPUArraysCore", "IrrationalConstants", "LinearAlgebra", "Random", "RealDot", "SparseArrays", "Statistics", "StructArrays"]
git-tree-sha1 = "1cdf290d4feec68824bfb84f4bfc9f3aba185647"
uuid = "082447d4-558c-5d27-93f4-14fc19e9eca2"
version = "1.51.1"
[[deps.ChainRulesCore]]
deps = ["Compat", "LinearAlgebra", "SparseArrays"]
git-tree-sha1 = "e30f2f4e20f7f186dc36529910beaedc60cfa644"
uuid = "d360d2e6-b24c-11e9-a2a3-2a2ae2dbcce4"
version = "1.16.0"
[[deps.CommonSubexpressions]]
deps = ["MacroTools", "Test"]
git-tree-sha1 = "7b8a93dba8af7e3b42fecabf646260105ac373f7"
uuid = "bbf7d656-a473-5ed7-a52c-81e309532950"
version = "0.3.0"
[[deps.Compat]]
deps = ["UUIDs"]
git-tree-sha1 = "4e88377ae7ebeaf29a047aa1ee40826e0b708a5d"
uuid = "34da2185-b29b-5c13-b0c7-acf172513d20"
version = "4.7.0"
weakdeps = ["Dates", "LinearAlgebra"]
[deps.Compat.extensions]
CompatLinearAlgebraExt = "LinearAlgebra"
[[deps.CompilerSupportLibraries_jll]]
deps = ["Artifacts", "Libdl"]
uuid = "e66e0078-7015-5450-92f7-15fbd957f2ae"
version = "1.0.5+0"
[[deps.CompositionsBase]]
git-tree-sha1 = "802bb88cd69dfd1509f6670416bd4434015693ad"
uuid = "a33af91c-f02d-484b-be07-31d278c5ca2b"
version = "0.1.2"
[deps.CompositionsBase.extensions]
CompositionsBaseInverseFunctionsExt = "InverseFunctions"
[deps.CompositionsBase.weakdeps]
InverseFunctions = "3587e190-3f89-42d0-90ee-14403ec27112"
[[deps.CondaPkg]]
deps = ["JSON3", "Markdown", "MicroMamba", "Pidfile", "Pkg", "TOML"]
git-tree-sha1 = "741146cf2ced5859faae76a84b541aa9af1a78bb"
uuid = "992eb4ea-22a4-4c89-a5bb-47a3300528ab"
version = "0.2.18"
[[deps.ConstructionBase]]
deps = ["LinearAlgebra"]
git-tree-sha1 = "738fec4d684a9a6ee9598a8bfee305b26831f28c"
uuid = "187b0558-2788-49d3-abe0-74a17ed4e7c9"
version = "1.5.2"
[deps.ConstructionBase.extensions]
ConstructionBaseIntervalSetsExt = "IntervalSets"
ConstructionBaseStaticArraysExt = "StaticArrays"
[deps.ConstructionBase.weakdeps]
IntervalSets = "8197267c-284f-5f27-9208-e0e47529a953"
StaticArrays = "90137ffa-7385-5640-81b9-e52037218182"
[[deps.ContextVariablesX]]
deps = ["Compat", "Logging", "UUIDs"]
git-tree-sha1 = "25cc3803f1030ab855e383129dcd3dc294e322cc"
uuid = "6add18c4-b38d-439d-96f6-d6bc489c04c5"
version = "0.1.3"
[[deps.DataAPI]]
git-tree-sha1 = "8da84edb865b0b5b0100c0666a9bc9a0b71c553c"
uuid = "9a962f9c-6df0-11e9-0e5d-c546b8b5ee8a"
version = "1.15.0"
[[deps.DataStructures]]
deps = ["Compat", "InteractiveUtils", "OrderedCollections"]
git-tree-sha1 = "cf25ccb972fec4e4817764d01c82386ae94f77b4"
uuid = "864edb3b-99cc-5e75-8d2d-829cb0a9cfe8"
version = "0.18.14"
[[deps.DataValueInterfaces]]
git-tree-sha1 = "bfc1187b79289637fa0ef6d4436ebdfe6905cbd6"
uuid = "e2d170a0-9d28-54be-80f0-106bbe20a464"
version = "1.0.0"
[[deps.Dates]]
deps = ["Printf"]
uuid = "ade2ca70-3891-5945-98fb-dc099432e06a"
[[deps.DefineSingletons]]
git-tree-sha1 = "0fba8b706d0178b4dc7fd44a96a92382c9065c2c"
uuid = "244e2a9f-e319-4986-a169-4d1fe445cd52"
version = "0.1.2"
[[deps.DelimitedFiles]]
deps = ["Mmap"]
git-tree-sha1 = "9e2f36d3c96a820c678f2f1f1782582fcf685bae"
uuid = "8bb1440f-4735-579b-a4ab-409b98df4dab"
version = "1.9.1"
[[deps.DiffResults]]
deps = ["StaticArraysCore"]
git-tree-sha1 = "782dd5f4561f5d267313f23853baaaa4c52ea621"
uuid = "163ba53b-c6d8-5494-b064-1a9d43ac40c5"
version = "1.1.0"
[[deps.DiffRules]]
deps = ["IrrationalConstants", "LogExpFunctions", "NaNMath", "Random", "SpecialFunctions"]
git-tree-sha1 = "23163d55f885173722d1e4cf0f6110cdbaf7e272"
uuid = "b552c78f-8df3-52c6-915a-8e097449b14b"
version = "1.15.1"
[[deps.Distributed]]
deps = ["Random", "Serialization", "Sockets"]
uuid = "8ba89e20-285c-5b6f-9357-94700520ee1b"
[[deps.Distributions]]
deps = ["FillArrays", "LinearAlgebra", "PDMats", "Printf", "QuadGK", "Random", "SpecialFunctions", "Statistics", "StatsAPI", "StatsBase", "StatsFuns", "Test"]
git-tree-sha1 = "938fe2981db009f531b6332e31c58e9584a2f9bd"
uuid = "31c24e10-a181-5473-b8eb-7969acd0382f"
version = "0.25.100"
[deps.Distributions.extensions]
DistributionsChainRulesCoreExt = "ChainRulesCore"
DistributionsDensityInterfaceExt = "DensityInterface"
[deps.Distributions.weakdeps]
ChainRulesCore = "d360d2e6-b24c-11e9-a2a3-2a2ae2dbcce4"
DensityInterface = "b429d917-457f-4dbc-8f4c-0cc954292b1d"
[[deps.DocStringExtensions]]
deps = ["LibGit2"]
git-tree-sha1 = "2fb1e02f2b635d0845df5d7c167fec4dd739b00d"
uuid = "ffbed154-4ef7-542d-bbb7-c09d3a79fcae"
version = "0.9.3"
[[deps.Downloads]]
deps = ["ArgTools", "FileWatching", "LibCURL", "NetworkOptions"]
uuid = "f43a241f-c20a-4ad4-852c-f6b1247861c6"
version = "1.6.0"
[[deps.DualNumbers]]
deps = ["Calculus", "NaNMath", "SpecialFunctions"]
git-tree-sha1 = "5837a837389fccf076445fce071c8ddaea35a566"
uuid = "fa6b7ba4-c1ee-5f82-b5fc-ecf0adba8f74"
version = "0.6.8"
[[deps.ExprTools]]
git-tree-sha1 = "c1d06d129da9f55715c6c212866f5b1bddc5fa00"
uuid = "e2ba6199-217a-4e67-a87a-7c52f15ade04"
version = "0.1.9"
[[deps.FLoops]]
deps = ["BangBang", "Compat", "FLoopsBase", "InitialValues", "JuliaVariables", "MLStyle", "Serialization", "Setfield", "Transducers"]
git-tree-sha1 = "ffb97765602e3cbe59a0589d237bf07f245a8576"
uuid = "cc61a311-1640-44b5-9fba-1b764f453329"
version = "0.2.1"
[[deps.FLoopsBase]]
deps = ["ContextVariablesX"]
git-tree-sha1 = "656f7a6859be8673bf1f35da5670246b923964f7"
uuid = "b9860ae5-e623-471e-878b-f6a53c775ea6"
version = "0.1.1"
[[deps.FileWatching]]
uuid = "7b1f6079-737a-58dc-b8bc-7a2ca5c1b5ee"
[[deps.FillArrays]]
deps = ["LinearAlgebra", "Random", "SparseArrays", "Statistics"]
git-tree-sha1 = "0b3b52afd0f87b0a3f5ada0466352d125c9db458"
uuid = "1a297f60-69ca-5386-bcde-b61e274b549b"
version = "1.2.1"
[[deps.Flux]]
deps = ["Adapt", "CUDA", "ChainRulesCore", "Functors", "LinearAlgebra", "MLUtils", "MacroTools", "NNlib", "NNlibCUDA", "OneHotArrays", "Optimisers", "Preferences", "ProgressLogging", "Random", "Reexport", "SparseArrays", "SpecialFunctions", "Statistics", "Zygote", "cuDNN"]
git-tree-sha1 = "3e2c3704c2173ab4b1935362384ca878b53d4c34"
uuid = "587475ba-b771-5e3f-ad9e-33799f191a9c"
version = "0.13.17"
[deps.Flux.extensions]
AMDGPUExt = "AMDGPU"
FluxMetalExt = "Metal"
[deps.Flux.weakdeps]
AMDGPU = "21141c5a-9bdb-4563-92ae-f87d6854732e"
Metal = "dde4c033-4e86-420c-a63e-0dd931031962"
[[deps.ForwardDiff]]
deps = ["CommonSubexpressions", "DiffResults", "DiffRules", "LinearAlgebra", "LogExpFunctions", "NaNMath", "Preferences", "Printf", "Random", "SpecialFunctions"]
git-tree-sha1 = "00e252f4d706b3d55a8863432e742bf5717b498d"
uuid = "f6369f11-7733-5829-9624-2563aa707210"
version = "0.10.35"
weakdeps = ["StaticArrays"]
[deps.ForwardDiff.extensions]
ForwardDiffStaticArraysExt = "StaticArrays"
[[deps.Functors]]
deps = ["LinearAlgebra"]
git-tree-sha1 = "478f8c3145bb91d82c2cf20433e8c1b30df454cc"
uuid = "d9f16b24-f501-4c13-a1f2-28368ffc5196"
version = "0.4.4"
[[deps.Future]]
deps = ["Random"]
uuid = "9fa8497b-333b-5362-9e8d-4d0656e87820"
[[deps.GPUArrays]]
deps = ["Adapt", "GPUArraysCore", "LLVM", "LinearAlgebra", "Printf", "Random", "Reexport", "Serialization", "Statistics"]
git-tree-sha1 = "2e57b4a4f9cc15e85a24d603256fe08e527f48d1"
uuid = "0c68f7d7-f131-5f86-a1c3-88cf8149b2d7"
version = "8.8.1"
[[deps.GPUArraysCore]]
deps = ["Adapt"]
git-tree-sha1 = "2d6ca471a6c7b536127afccfa7564b5b39227fe0"
uuid = "46192b85-c4d5-4398-a991-12ede77f4527"
version = "0.1.5"
[[deps.GPUCompiler]]
deps = ["ExprTools", "InteractiveUtils", "LLVM", "Libdl", "Logging", "Scratch", "TimerOutputs", "UUIDs"]
git-tree-sha1 = "cb090aea21c6ca78d59672a7e7d13bd56d09de64"
uuid = "61eb1bfa-7361-4325-ad38-22787b887f55"
version = "0.20.3"
[[deps.GeneralUtils]]
deps = ["CUDA", "DataStructures", "Distributions", "Flux", "JSON3", "Random"]
path = "C:\\Users\\pitak\\.julia\\dev\\GeneralUtils"
uuid = "c6c72f09-b708-4ac8-ac7c-2084d70108fe"
version = "0.1.0"
[[deps.HypergeometricFunctions]]
deps = ["DualNumbers", "LinearAlgebra", "OpenLibm_jll", "SpecialFunctions"]
git-tree-sha1 = "f218fe3736ddf977e0e772bc9a586b2383da2685"
uuid = "34004b35-14d8-5ef3-9330-4cdb6864b03a"
version = "0.3.23"
[[deps.IRTools]]
deps = ["InteractiveUtils", "MacroTools", "Test"]
git-tree-sha1 = "eac00994ce3229a464c2847e956d77a2c64ad3a5"
uuid = "7869d1d1-7146-5819-86e3-90919afe41df"
version = "0.4.10"
[[deps.InitialValues]]
git-tree-sha1 = "4da0f88e9a39111c2fa3add390ab15f3a44f3ca3"
uuid = "22cec73e-a1b8-11e9-2c92-598750a2cf9c"
version = "0.3.1"
[[deps.InteractiveUtils]]
deps = ["Markdown"]
uuid = "b77e0a4c-d291-57a0-90e8-8db25a27a240"
[[deps.IrrationalConstants]]
git-tree-sha1 = "630b497eafcc20001bba38a4651b327dcfc491d2"
uuid = "92d709cd-6900-40b7-9082-c6be49f344b6"
version = "0.2.2"
[[deps.IteratorInterfaceExtensions]]
git-tree-sha1 = "a3f24677c21f5bbe9d2a714f95dcd58337fb2856"
uuid = "82899510-4779-5014-852e-03e436cf321d"
version = "1.0.0"
[[deps.JLLWrappers]]
deps = ["Preferences"]
git-tree-sha1 = "abc9885a7ca2052a736a600f7fa66209f96506e1"
uuid = "692b3bcd-3c85-4b1f-b108-f13ce0eb3210"
version = "1.4.1"
[[deps.JSON3]]
deps = ["Dates", "Mmap", "Parsers", "PrecompileTools", "StructTypes", "UUIDs"]
git-tree-sha1 = "5b62d93f2582b09e469b3099d839c2d2ebf5066d"
uuid = "0f8b85d8-7281-11e9-16c2-39a750bddbf1"
version = "1.13.1"
[[deps.JuliaVariables]]
deps = ["MLStyle", "NameResolution"]
git-tree-sha1 = "49fb3cb53362ddadb4415e9b73926d6b40709e70"
uuid = "b14d175d-62b4-44ba-8fb7-3064adc8c3ec"
version = "0.2.4"
[[deps.KernelAbstractions]]
deps = ["Adapt", "Atomix", "InteractiveUtils", "LinearAlgebra", "MacroTools", "PrecompileTools", "SparseArrays", "StaticArrays", "UUIDs", "UnsafeAtomics", "UnsafeAtomicsLLVM"]
git-tree-sha1 = "b48617c5d764908b5fac493cd907cf33cc11eec1"
uuid = "63c18a36-062a-441e-b654-da1e3ab1ce7c"
version = "0.9.6"
[[deps.LLVM]]
deps = ["CEnum", "LLVMExtra_jll", "Libdl", "Printf", "Unicode"]
git-tree-sha1 = "5007c1421563108110bbd57f63d8ad4565808818"
uuid = "929cbde3-209d-540e-8aea-75f648917ca0"
version = "5.2.0"
[[deps.LLVMExtra_jll]]
deps = ["Artifacts", "JLLWrappers", "LazyArtifacts", "Libdl", "TOML"]
git-tree-sha1 = "1222116d7313cdefecf3d45a2bc1a89c4e7c9217"
uuid = "dad2f222-ce93-54a1-a47d-0025e8a3acab"
version = "0.0.22+0"
[[deps.LazyArtifacts]]
deps = ["Artifacts", "Pkg"]
uuid = "4af54fe1-eca0-43a8-85a7-787d91b784e3"
[[deps.LibCURL]]
deps = ["LibCURL_jll", "MozillaCACerts_jll"]
uuid = "b27032c2-a3e7-50c8-80cd-2d36dbcbfd21"
version = "0.6.3"
[[deps.LibCURL_jll]]
deps = ["Artifacts", "LibSSH2_jll", "Libdl", "MbedTLS_jll", "Zlib_jll", "nghttp2_jll"]
uuid = "deac9b47-8bc7-5906-a0fe-35ac56dc84c0"
version = "7.84.0+0"
[[deps.LibGit2]]
deps = ["Base64", "NetworkOptions", "Printf", "SHA"]
uuid = "76f85450-5226-5b5a-8eaa-529ad045b433"
[[deps.LibSSH2_jll]]
deps = ["Artifacts", "Libdl", "MbedTLS_jll"]
uuid = "29816b5a-b9ab-546f-933c-edad1886dfa8"
version = "1.10.2+0"
[[deps.Libdl]]
uuid = "8f399da3-3557-5675-b5ff-fb832c97cbdb"
[[deps.LinearAlgebra]]
deps = ["Libdl", "OpenBLAS_jll", "libblastrampoline_jll"]
uuid = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e"
[[deps.LogExpFunctions]]
deps = ["DocStringExtensions", "IrrationalConstants", "LinearAlgebra"]
git-tree-sha1 = "c3ce8e7420b3a6e071e0fe4745f5d4300e37b13f"
uuid = "2ab3a3ac-af41-5b50-aa03-7779005ae688"
version = "0.3.24"
[deps.LogExpFunctions.extensions]
LogExpFunctionsChainRulesCoreExt = "ChainRulesCore"
LogExpFunctionsChangesOfVariablesExt = "ChangesOfVariables"
LogExpFunctionsInverseFunctionsExt = "InverseFunctions"
[deps.LogExpFunctions.weakdeps]
ChainRulesCore = "d360d2e6-b24c-11e9-a2a3-2a2ae2dbcce4"
ChangesOfVariables = "9e997f8a-9a97-42d5-a9f1-ce6bfc15e2c0"
InverseFunctions = "3587e190-3f89-42d0-90ee-14403ec27112"
[[deps.Logging]]
uuid = "56ddb016-857b-54e1-b83d-db4d58db5568"
[[deps.MLStyle]]
git-tree-sha1 = "bc38dff0548128765760c79eb7388a4b37fae2c8"
uuid = "d8e11817-5142-5d16-987a-aa16d5891078"
version = "0.4.17"
[[deps.MLUtils]]
deps = ["ChainRulesCore", "Compat", "DataAPI", "DelimitedFiles", "FLoops", "NNlib", "Random", "ShowCases", "SimpleTraits", "Statistics", "StatsBase", "Tables", "Transducers"]
git-tree-sha1 = "3504cdb8c2bc05bde4d4b09a81b01df88fcbbba0"
uuid = "f1d291b0-491e-4a28-83b9-f70985020b54"
version = "0.4.3"
[[deps.MacroTools]]
deps = ["Markdown", "Random"]
git-tree-sha1 = "42324d08725e200c23d4dfb549e0d5d89dede2d2"
uuid = "1914dd2f-81c6-5fcd-8719-6d5c9610ff09"
version = "0.5.10"
[[deps.Markdown]]
deps = ["Base64"]
uuid = "d6f4376e-aef5-505a-96c1-9c027394607a"
[[deps.MbedTLS_jll]]
deps = ["Artifacts", "Libdl"]
uuid = "c8ffd9c3-330d-5841-b78e-0817d7145fa1"
version = "2.28.2+0"
[[deps.MicroCollections]]
deps = ["BangBang", "InitialValues", "Setfield"]
git-tree-sha1 = "629afd7d10dbc6935ec59b32daeb33bc4460a42e"
uuid = "128add7d-3638-4c79-886c-908ea0c25c34"
version = "0.1.4"
[[deps.MicroMamba]]
deps = ["Pkg", "Scratch", "micromamba_jll"]
git-tree-sha1 = "011cab361eae7bcd7d278f0a7a00ff9c69000c51"
uuid = "0b3b1443-0f03-428d-bdfb-f27f9c1191ea"
version = "0.1.14"
[[deps.Missings]]
deps = ["DataAPI"]
git-tree-sha1 = "f66bdc5de519e8f8ae43bdc598782d35a25b1272"
uuid = "e1d29d7a-bbdc-5cf2-9ac0-f12de2c33e28"
version = "1.1.0"
[[deps.Mmap]]
uuid = "a63ad114-7e13-5084-954f-fe012c677804"
[[deps.MozillaCACerts_jll]]
uuid = "14a3606d-f60d-562e-9121-12d972cd8159"
version = "2022.10.11"
[[deps.NNlib]]
deps = ["Adapt", "Atomix", "ChainRulesCore", "GPUArraysCore", "KernelAbstractions", "LinearAlgebra", "Pkg", "Random", "Requires", "Statistics"]
git-tree-sha1 = "72240e3f5ca031937bd536182cb2c031da5f46dd"
uuid = "872c559c-99b0-510c-b3b7-b6c96a88d5cd"
version = "0.8.21"
[deps.NNlib.extensions]
NNlibAMDGPUExt = "AMDGPU"
[deps.NNlib.weakdeps]
AMDGPU = "21141c5a-9bdb-4563-92ae-f87d6854732e"
[[deps.NNlibCUDA]]
deps = ["Adapt", "CUDA", "LinearAlgebra", "NNlib", "Random", "Statistics", "cuDNN"]
git-tree-sha1 = "f94a9684394ff0d325cc12b06da7032d8be01aaf"
uuid = "a00861dc-f156-4864-bf3c-e6376f28a68d"
version = "0.2.7"
[[deps.NaNMath]]
deps = ["OpenLibm_jll"]
git-tree-sha1 = "0877504529a3e5c3343c6f8b4c0381e57e4387e4"
uuid = "77ba4419-2d1f-58cd-9bb1-8ffee604a2e3"
version = "1.0.2"
[[deps.NameResolution]]
deps = ["PrettyPrint"]
git-tree-sha1 = "1a0fa0e9613f46c9b8c11eee38ebb4f590013c5e"
uuid = "71a1bf82-56d0-4bbc-8a3c-48b961074391"
version = "0.1.5"
[[deps.NetworkOptions]]
uuid = "ca575930-c2e3-43a9-ace4-1e988b2c1908"
version = "1.2.0"
[[deps.OneHotArrays]]
deps = ["Adapt", "ChainRulesCore", "Compat", "GPUArraysCore", "LinearAlgebra", "NNlib"]
git-tree-sha1 = "5e4029759e8699ec12ebdf8721e51a659443403c"
uuid = "0b1bfda6-eb8a-41d2-88d8-f5af5cad476f"
version = "0.2.4"
[[deps.OpenBLAS_jll]]
deps = ["Artifacts", "CompilerSupportLibraries_jll", "Libdl"]
uuid = "4536629a-c528-5b80-bd46-f80d51c5b363"
version = "0.3.21+4"
[[deps.OpenLibm_jll]]
deps = ["Artifacts", "Libdl"]
uuid = "05823500-19ac-5b8b-9628-191a04bc5112"
version = "0.8.1+0"
[[deps.OpenSpecFun_jll]]
deps = ["Artifacts", "CompilerSupportLibraries_jll", "JLLWrappers", "Libdl", "Pkg"]
git-tree-sha1 = "13652491f6856acfd2db29360e1bbcd4565d04f1"
uuid = "efe28fd5-8261-553b-a9e1-b2916fc3738e"
version = "0.5.5+0"
[[deps.Optimisers]]
deps = ["ChainRulesCore", "Functors", "LinearAlgebra", "Random", "Statistics"]
git-tree-sha1 = "6a01f65dd8583dee82eecc2a19b0ff21521aa749"
uuid = "3bd65402-5787-11e9-1adc-39752487f4e2"
version = "0.2.18"
[[deps.OrderedCollections]]
git-tree-sha1 = "d321bf2de576bf25ec4d3e4360faca399afca282"
uuid = "bac558e1-5e72-5ebc-8fee-abe8a469f55d"
version = "1.6.0"
[[deps.PDMats]]
deps = ["LinearAlgebra", "SparseArrays", "SuiteSparse"]
git-tree-sha1 = "67eae2738d63117a196f497d7db789821bce61d1"
uuid = "90014a1f-27ba-587c-ab20-58faa44d9150"
version = "0.11.17"
[[deps.Parsers]]
deps = ["Dates", "PrecompileTools", "UUIDs"]
git-tree-sha1 = "4b2e829ee66d4218e0cef22c0a64ee37cf258c29"
uuid = "69de0a69-1ddd-5017-9359-2bf0b02dc9f0"
version = "2.7.1"
[[deps.Pidfile]]
deps = ["FileWatching", "Test"]
git-tree-sha1 = "2d8aaf8ee10df53d0dfb9b8ee44ae7c04ced2b03"
uuid = "fa939f87-e72e-5be4-a000-7fc836dbe307"
version = "1.3.0"
[[deps.Pkg]]
deps = ["Artifacts", "Dates", "Downloads", "FileWatching", "LibGit2", "Libdl", "Logging", "Markdown", "Printf", "REPL", "Random", "SHA", "Serialization", "TOML", "Tar", "UUIDs", "p7zip_jll"]
uuid = "44cfe95a-1eb2-52ea-b672-e2afdf69b78f"
version = "1.9.2"
[[deps.PrecompileTools]]
deps = ["Preferences"]
git-tree-sha1 = "9673d39decc5feece56ef3940e5dafba15ba0f81"
uuid = "aea7be01-6a6a-4083-8856-8a6e6704d82a"
version = "1.1.2"
[[deps.Preferences]]
deps = ["TOML"]
git-tree-sha1 = "7eb1686b4f04b82f96ed7a4ea5890a4f0c7a09f1"
uuid = "21216c6a-2e73-6563-6e65-726566657250"
version = "1.4.0"
[[deps.PrettyPrint]]
git-tree-sha1 = "632eb4abab3449ab30c5e1afaa874f0b98b586e4"
uuid = "8162dcfd-2161-5ef2-ae6c-7681170c5f98"
version = "0.2.0"
[[deps.Printf]]
deps = ["Unicode"]
uuid = "de0858da-6303-5e67-8744-51eddeeeb8d7"
[[deps.ProgressLogging]]
deps = ["Logging", "SHA", "UUIDs"]
git-tree-sha1 = "80d919dee55b9c50e8d9e2da5eeafff3fe58b539"
uuid = "33c8b6b6-d38a-422a-b730-caa89a2f386c"
version = "0.1.4"
[[deps.PythonCall]]
deps = ["CondaPkg", "Dates", "Libdl", "MacroTools", "Markdown", "Pkg", "REPL", "Requires", "Serialization", "Tables", "UnsafePointers"]
git-tree-sha1 = "70af6bdbde63d7d0a4ea99f3e890ebdb55e9d464"
uuid = "6099a3de-0909-46bc-b1f4-468b9a2dfc0d"
version = "0.9.14"
[[deps.QuadGK]]
deps = ["DataStructures", "LinearAlgebra"]
git-tree-sha1 = "6ec7ac8412e83d57e313393220879ede1740f9ee"
uuid = "1fd47b50-473d-5c70-9696-f719f8f3bcdc"
version = "2.8.2"
[[deps.REPL]]
deps = ["InteractiveUtils", "Markdown", "Sockets", "Unicode"]
uuid = "3fa0cd96-eef1-5676-8a61-b3b8758bbffb"
[[deps.Random]]
deps = ["SHA", "Serialization"]
uuid = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c"
[[deps.Random123]]
deps = ["Random", "RandomNumbers"]
git-tree-sha1 = "552f30e847641591ba3f39fd1bed559b9deb0ef3"
uuid = "74087812-796a-5b5d-8853-05524746bad3"
version = "1.6.1"
[[deps.RandomNumbers]]
deps = ["Random", "Requires"]
git-tree-sha1 = "043da614cc7e95c703498a491e2c21f58a2b8111"
uuid = "e6cf234a-135c-5ec9-84dd-332b85af5143"
version = "1.5.3"
[[deps.RealDot]]
deps = ["LinearAlgebra"]
git-tree-sha1 = "9f0a1b71baaf7650f4fa8a1d168c7fb6ee41f0c9"
uuid = "c1ae055f-0cd5-4b69-90a6-9a35b1a98df9"
version = "0.1.0"
[[deps.Reexport]]
git-tree-sha1 = "45e428421666073eab6f2da5c9d310d99bb12f9b"
uuid = "189a3867-3050-52da-a836-e630ba90ab69"
version = "1.2.2"
[[deps.Requires]]
deps = ["UUIDs"]
git-tree-sha1 = "838a3a4188e2ded87a4f9f184b4b0d78a1e91cb7"
uuid = "ae029012-a4dd-5104-9daa-d747884805df"
version = "1.3.0"
[[deps.Rmath]]
deps = ["Random", "Rmath_jll"]
git-tree-sha1 = "f65dcb5fa46aee0cf9ed6274ccbd597adc49aa7b"
uuid = "79098fc4-a85e-5d69-aa6a-4863f24498fa"
version = "0.7.1"
[[deps.Rmath_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"]
git-tree-sha1 = "6ed52fdd3382cf21947b15e8870ac0ddbff736da"
uuid = "f50d1b31-88e8-58de-be2c-1cc44531875f"
version = "0.4.0+0"
[[deps.SHA]]
uuid = "ea8e919c-243c-51af-8825-aaa63cd721ce"
version = "0.7.0"
[[deps.Scratch]]
deps = ["Dates"]
git-tree-sha1 = "30449ee12237627992a99d5e30ae63e4d78cd24a"
uuid = "6c6a2e73-6563-6170-7368-637461726353"
version = "1.2.0"
[[deps.Serialization]]
uuid = "9e88b42a-f829-5b0c-bbe9-9e923198166b"
[[deps.Setfield]]
deps = ["ConstructionBase", "Future", "MacroTools", "StaticArraysCore"]
git-tree-sha1 = "e2cc6d8c88613c05e1defb55170bf5ff211fbeac"
uuid = "efcf1570-3423-57d1-acb7-fd33fddbac46"
version = "1.1.1"
[[deps.ShowCases]]
git-tree-sha1 = "7f534ad62ab2bd48591bdeac81994ea8c445e4a5"
uuid = "605ecd9f-84a6-4c9e-81e2-4798472b76a3"
version = "0.1.0"
[[deps.SimpleTraits]]
deps = ["InteractiveUtils", "MacroTools"]
git-tree-sha1 = "5d7e3f4e11935503d3ecaf7186eac40602e7d231"
uuid = "699a6c99-e7fa-54fc-8d76-47d257e15c1d"
version = "0.9.4"
[[deps.Sockets]]
uuid = "6462fe0b-24de-5631-8697-dd941f90decc"
[[deps.SortingAlgorithms]]
deps = ["DataStructures"]
git-tree-sha1 = "c60ec5c62180f27efea3ba2908480f8055e17cee"
uuid = "a2af1166-a08f-5f64-846c-94a0d3cef48c"
version = "1.1.1"
[[deps.SparseArrays]]
deps = ["Libdl", "LinearAlgebra", "Random", "Serialization", "SuiteSparse_jll"]
uuid = "2f01184e-e22b-5df5-ae63-d93ebab69eaf"
[[deps.SpecialFunctions]]
deps = ["IrrationalConstants", "LogExpFunctions", "OpenLibm_jll", "OpenSpecFun_jll"]
git-tree-sha1 = "7beb031cf8145577fbccacd94b8a8f4ce78428d3"
uuid = "276daf66-3868-5448-9aa4-cd146d93841b"
version = "2.3.0"
weakdeps = ["ChainRulesCore"]
[deps.SpecialFunctions.extensions]
SpecialFunctionsChainRulesCoreExt = "ChainRulesCore"
[[deps.SplittablesBase]]
deps = ["Setfield", "Test"]
git-tree-sha1 = "e08a62abc517eb79667d0a29dc08a3b589516bb5"
uuid = "171d559e-b47b-412a-8079-5efa626c420e"
version = "0.1.15"
[[deps.StaticArrays]]
deps = ["LinearAlgebra", "Random", "StaticArraysCore", "Statistics"]
git-tree-sha1 = "832afbae2a45b4ae7e831f86965469a24d1d8a83"
uuid = "90137ffa-7385-5640-81b9-e52037218182"
version = "1.5.26"
[[deps.StaticArraysCore]]
git-tree-sha1 = "6b7ba252635a5eff6a0b0664a41ee140a1c9e72a"
uuid = "1e83bf80-4336-4d27-bf5d-d5a4f845583c"
version = "1.4.0"
[[deps.Statistics]]
deps = ["LinearAlgebra", "SparseArrays"]
uuid = "10745b16-79ce-11e8-11f9-7d13ad32a3b2"
version = "1.9.0"
[[deps.StatsAPI]]
deps = ["LinearAlgebra"]
git-tree-sha1 = "45a7769a04a3cf80da1c1c7c60caf932e6f4c9f7"
uuid = "82ae8749-77ed-4fe6-ae5f-f523153014b0"
version = "1.6.0"
[[deps.StatsBase]]
deps = ["DataAPI", "DataStructures", "LinearAlgebra", "LogExpFunctions", "Missings", "Printf", "Random", "SortingAlgorithms", "SparseArrays", "Statistics", "StatsAPI"]
git-tree-sha1 = "75ebe04c5bed70b91614d684259b661c9e6274a4"
uuid = "2913bbd2-ae8a-5f71-8c99-4fb6c76f3a91"
version = "0.34.0"
[[deps.StatsFuns]]
deps = ["HypergeometricFunctions", "IrrationalConstants", "LogExpFunctions", "Reexport", "Rmath", "SpecialFunctions"]
git-tree-sha1 = "f625d686d5a88bcd2b15cd81f18f98186fdc0c9a"
uuid = "4c63d2b9-4356-54db-8cca-17b64c39e42c"
version = "1.3.0"
[deps.StatsFuns.extensions]
StatsFunsChainRulesCoreExt = "ChainRulesCore"
StatsFunsInverseFunctionsExt = "InverseFunctions"
[deps.StatsFuns.weakdeps]
ChainRulesCore = "d360d2e6-b24c-11e9-a2a3-2a2ae2dbcce4"
InverseFunctions = "3587e190-3f89-42d0-90ee-14403ec27112"
[[deps.StructArrays]]
deps = ["Adapt", "DataAPI", "GPUArraysCore", "StaticArraysCore", "Tables"]
git-tree-sha1 = "521a0e828e98bb69042fec1809c1b5a680eb7389"
uuid = "09ab397b-f2b6-538f-b94a-2f83cf4a842a"
version = "0.6.15"
[[deps.StructTypes]]
deps = ["Dates", "UUIDs"]
git-tree-sha1 = "ca4bccb03acf9faaf4137a9abc1881ed1841aa70"
uuid = "856f2bd8-1eba-4b0a-8007-ebc267875bd4"
version = "1.10.0"
[[deps.SuiteSparse]]
deps = ["Libdl", "LinearAlgebra", "Serialization", "SparseArrays"]
uuid = "4607b0f0-06f3-5cda-b6b1-a6196a1729e9"
[[deps.SuiteSparse_jll]]
deps = ["Artifacts", "Libdl", "Pkg", "libblastrampoline_jll"]
uuid = "bea87d4a-7f5b-5778-9afe-8cc45184846c"
version = "5.10.1+6"
[[deps.TOML]]
deps = ["Dates"]
uuid = "fa267f1f-6049-4f14-aa54-33bafae1ed76"
version = "1.0.3"
[[deps.TableTraits]]
deps = ["IteratorInterfaceExtensions"]
git-tree-sha1 = "c06b2f539df1c6efa794486abfb6ed2022561a39"
uuid = "3783bdb8-4a98-5b6b-af9a-565f29a5fe9c"
version = "1.0.1"
[[deps.Tables]]
deps = ["DataAPI", "DataValueInterfaces", "IteratorInterfaceExtensions", "LinearAlgebra", "OrderedCollections", "TableTraits", "Test"]
git-tree-sha1 = "1544b926975372da01227b382066ab70e574a3ec"
uuid = "bd369af6-aec1-5ad0-b16a-f7cc5008161c"
version = "1.10.1"
[[deps.Tar]]
deps = ["ArgTools", "SHA"]
uuid = "a4e569a6-e804-4fa4-b0f3-eef7a1d5b13e"
version = "1.10.0"
[[deps.Test]]
deps = ["InteractiveUtils", "Logging", "Random", "Serialization"]
uuid = "8dfed614-e22c-5e08-85e1-65c5234f0b40"
[[deps.TimerOutputs]]
deps = ["ExprTools", "Printf"]
git-tree-sha1 = "f548a9e9c490030e545f72074a41edfd0e5bcdd7"
uuid = "a759f4b9-e2f1-59dc-863e-4aeb61b1ea8f"
version = "0.5.23"
[[deps.Transducers]]
deps = ["Adapt", "ArgCheck", "BangBang", "Baselet", "CompositionsBase", "DefineSingletons", "Distributed", "InitialValues", "Logging", "Markdown", "MicroCollections", "Requires", "Setfield", "SplittablesBase", "Tables"]
git-tree-sha1 = "a66fb81baec325cf6ccafa243af573b031e87b00"
uuid = "28d57a85-8fef-5791-bfe6-a80928e7c999"
version = "0.4.77"
[deps.Transducers.extensions]
TransducersBlockArraysExt = "BlockArrays"
TransducersDataFramesExt = "DataFrames"
TransducersLazyArraysExt = "LazyArrays"
TransducersOnlineStatsBaseExt = "OnlineStatsBase"
TransducersReferenceablesExt = "Referenceables"
[deps.Transducers.weakdeps]
BlockArrays = "8e7c35d0-a365-5155-bbbb-fb81a777f24e"
DataFrames = "a93c6f00-e57d-5684-b7b6-d8193f3e46c0"
LazyArrays = "5078a376-72f3-5289-bfd5-ec5146d43c02"
OnlineStatsBase = "925886fa-5bf2-5e8e-b522-a9147a512338"
Referenceables = "42d2dcc6-99eb-4e98-b66c-637b7d73030e"
[[deps.UUIDs]]
deps = ["Random", "SHA"]
uuid = "cf7118a7-6976-5b1a-9a39-7adc72f591a4"
[[deps.Unicode]]
uuid = "4ec0a83e-493e-50e2-b9ac-8f72acf5a8f5"
[[deps.UnsafeAtomics]]
git-tree-sha1 = "6331ac3440856ea1988316b46045303bef658278"
uuid = "013be700-e6cd-48c3-b4a1-df204f14c38f"
version = "0.2.1"
[[deps.UnsafeAtomicsLLVM]]
deps = ["LLVM", "UnsafeAtomics"]
git-tree-sha1 = "ea37e6066bf194ab78f4e747f5245261f17a7175"
uuid = "d80eeb9a-aca5-4d75-85e5-170c8b632249"
version = "0.1.2"
[[deps.UnsafePointers]]
git-tree-sha1 = "c81331b3b2e60a982be57c046ec91f599ede674a"
uuid = "e17b2a0c-0bdf-430a-bd0c-3a23cae4ff39"
version = "1.0.0"
[[deps.Zlib_jll]]
deps = ["Libdl"]
uuid = "83775a58-1f1d-513f-b197-d71354ab007a"
version = "1.2.13+0"
[[deps.Zygote]]
deps = ["AbstractFFTs", "ChainRules", "ChainRulesCore", "DiffRules", "Distributed", "FillArrays", "ForwardDiff", "GPUArrays", "GPUArraysCore", "IRTools", "InteractiveUtils", "LinearAlgebra", "LogExpFunctions", "MacroTools", "NaNMath", "PrecompileTools", "Random", "Requires", "SparseArrays", "SpecialFunctions", "Statistics", "ZygoteRules"]
git-tree-sha1 = "5be3ddb88fc992a7d8ea96c3f10a49a7e98ebc7b"
uuid = "e88e6eb3-aa80-5325-afca-941959d7151f"
version = "0.6.62"
[deps.Zygote.extensions]
ZygoteColorsExt = "Colors"
ZygoteDistancesExt = "Distances"
ZygoteTrackerExt = "Tracker"
[deps.Zygote.weakdeps]
Colors = "5ae59095-9a9b-59fe-a467-6f913c188581"
Distances = "b4f34e82-e78d-54a5-968a-f98e89d6e8f7"
Tracker = "9f7883ad-71c0-57eb-9f7f-b5c9e6d3789c"
[[deps.ZygoteRules]]
deps = ["ChainRulesCore", "MacroTools"]
git-tree-sha1 = "977aed5d006b840e2e40c0b48984f7463109046d"
uuid = "700de1a5-db45-46bc-99cf-38207098b444"
version = "0.2.3"
[[deps.cuDNN]]
deps = ["CEnum", "CUDA", "CUDNN_jll"]
git-tree-sha1 = "f65490d187861d6222cb38bcbbff3fd949a7ec3e"
uuid = "02a925ec-e4fe-4b08-9a7e-0d78e3d38ccd"
version = "1.0.4"
[[deps.libblastrampoline_jll]]
deps = ["Artifacts", "Libdl"]
uuid = "8e850b90-86db-534c-a0d3-1478176c7d93"
version = "5.8.0+0"
[[deps.micromamba_jll]]
deps = ["Artifacts", "JLLWrappers", "LazyArtifacts", "Libdl"]
git-tree-sha1 = "66d07957bcf7e4930d933195aed484078dd8cbb5"
uuid = "f8abcde7-e9b7-5caa-b8af-a437887ae8e4"
version = "1.4.9+0"
[[deps.nghttp2_jll]]
deps = ["Artifacts", "Libdl"]
uuid = "8e850ede-7688-5339-a07c-302acd2aaf8d"
version = "1.48.0+0"
[[deps.p7zip_jll]]
deps = ["Artifacts", "Libdl"]
uuid = "3f19e933-33d8-53b3-aaab-bd5110c3b7a0"
version = "17.4.0+0"

View File

@@ -0,0 +1,16 @@
name = "IronpenGPU"
uuid = "3d5396ea-818e-43fc-a9d3-164248e840cd"
authors = ["ton <narawat@gmail.com>"]
version = "0.1.0"
[deps]
CUDA = "052768ef-5323-5732-b1bb-66c8b64840ba"
CondaPkg = "992eb4ea-22a4-4c89-a5bb-47a3300528ab"
Dates = "ade2ca70-3891-5945-98fb-dc099432e06a"
Flux = "587475ba-b771-5e3f-ad9e-33799f191a9c"
GeneralUtils = "c6c72f09-b708-4ac8-ac7c-2084d70108fe"
JSON3 = "0f8b85d8-7281-11e9-16c2-39a750bddbf1"
LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e"
PythonCall = "6099a3de-0909-46bc-b1f4-468b9a2dfc0d"
Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c"
Statistics = "10745b16-79ce-11e8-11f9-7d13ad32a3b2"

View File

@@ -0,0 +1,722 @@
module forward
# export
using Flux, CUDA
using GeneralUtils
using ..type, ..snnUtil
#------------------------------------------------------------------------------------------------100
""" kfn forward
input (row, col, batch)
"""
function (kfn::kfn_1)(input::AbstractArray)
kfn.timeStep .+= 1
# what to do at the start of learning round
if view(kfn.learningStage, 1)[1] == 1
kfn.timeStep .= 1
# reset learning params
kfn.zitCumulative = (kfn.zitCumulative[:,:,1] .= 0)
kfn.lif_vt .= 0
kfn.lif_wRecChange .= 0
kfn.lif_epsilonRec .= 0
kfn.lif_firingCounter .= 0
kfn.lif_refractoryCounter .= 0
kfn.lif_zt .= 0
kfn.lif_synapticActivityCounter .= 0
kfn.alif_vt .= 0
kfn.alif_a .= 0
kfn.alif_epsilonRec .= 0
kfn.alif_epsilonRecA .= 0
kfn.alif_wRecChange .= 0
kfn.alif_firingCounter .= 0
kfn.alif_refractoryCounter .= 0
kfn.alif_zt .= 0
kfn.alif_synapticActivityCounter .= 0
kfn.on_vt .= 0
kfn.on_epsilonRec .= 0
kfn.on_wOutChange .= 0
kfn.on_refractoryCounter .= 0
kfn.on_firingCounter .= 0
kfn.on_synapticActivityCounter .= 0
kfn.learningStage = [2]
end
# update activation matrix with "lif_zt1" and "alif_zt1" by concatenating
# (input, lif_zt1, alif_zt1) to form activation matrix
_zit = cat(reshape(input, (size(input, 1), size(input, 2), 1, size(input, 3))),
reshape(kfn.lif_zt, (size(input, 1), :, 1, size(input, 3))),
reshape(kfn.alif_zt, (size(input, 1), :, 1, size(input, 3))), dims=2)
kfn.zit .= reshape(_zit, (size(input, 1), :, size(input, 3)))
@sync begin
@async begin
# project 3D kfn zit into 4D lif zit
i1, i2, i3, i4 = size(kfn.lif_zit)
kfn.lif_zit .= reshape(kfn.zit, (i1, i2, 1, i4)) .* kfn.lif_arrayProjection4d
kfn.lif_exInType .= kfn.exInType .* kfn.lif_arrayProjection4d
lifForward( kfn.lif_zit,
kfn.lif_wRec,
kfn.lif_vt,
kfn.lif_vth,
kfn.lif_vRest,
kfn.lif_zt4d,
kfn.lif_alpha,
kfn.lif_phi,
kfn.lif_epsilonRec,
kfn.lif_refractoryCounter,
kfn.lif_refractoryDuration,
kfn.lif_gammaPd,
kfn.lif_firingCounter,
kfn.lif_recSignal,
kfn.lif_exInType,
kfn.lif_wRecChange,
kfn.lif_neuronInactivityCounter,
kfn.lif_synapseReconnectDelay,
kfn.lif_synapticActivityCounter,
kfn.timeStep,
)
end
@async begin
# project 3D kfn zit into 4D alif zit
i1, i2, i3, i4 = size(kfn.alif_zit)
kfn.alif_zit .= reshape(kfn.zit, (i1, i2, 1, i4)) .* kfn.alif_arrayProjection4d
kfn.alif_exInType .= kfn.exInType .* kfn.alif_arrayProjection4d
alifForward(kfn.alif_zit,
kfn.alif_wRec,
kfn.alif_vt,
kfn.alif_vth,
kfn.alif_vRest,
kfn.alif_zt4d,
kfn.alif_alpha,
kfn.alif_phi,
kfn.alif_epsilonRec,
kfn.alif_refractoryCounter,
kfn.alif_refractoryDuration,
kfn.alif_gammaPd,
kfn.alif_firingCounter,
kfn.alif_recSignal,
kfn.alif_exInType,
kfn.alif_wRecChange,
kfn.alif_neuronInactivityCounter,
kfn.alif_synapseReconnectDelay,
kfn.alif_synapticActivityCounter,
kfn.timeStep,
kfn.alif_epsilonRecA,
kfn.alif_a,
kfn.alif_avth,
kfn.alif_beta,
kfn.alif_rho,
)
end
end
# reduce lif_zt4d and alif_zt4d into lif_zt, alif_zt (4d -> 1d)
kfn.lif_zt .= reduce(max, kfn.lif_zt4d, dims=(1,2))
kfn.alif_zt .= reduce(max, kfn.alif_zt4d, dims=(1,2))
# update activation matrix with "lif_zt1" and "alif_zt1" by concatenating
# (input, lif_zt1, alif_zt1) to form activation matrix
_zit = cat(reshape(input, (size(input, 1), size(input, 2), 1, size(input, 3))),
reshape(kfn.lif_zt, (size(input, 1), :, 1, size(input, 3))),
reshape(kfn.alif_zt, (size(input, 1), :, 1, size(input, 3))), dims=2)
kfn.zit .= reshape(_zit, (size(input, 1), :, size(input, 3)))
kfn.zitCumulative = sum(kfn.zitCumulative) == 0 ? kfn.zit : cat(kfn.zitCumulative, kfn.zit, dims=3)
# kfn.zitCumulative = cat(kfn.zitCumulative, kfn.zit, dims=3)
# kfn.zitCumulative .+= kfn.zit
# project 3D kfn zit into 4D on zit
i1, i2, i3, i4 = size(kfn.on_zit)
kfn.on_zit .= reshape(kfn.zit, (i1, i2, 1, i4)) .* kfn.on_arrayProjection4d
# read out
onForward( kfn.on_zit,
kfn.on_wOut,
kfn.on_vt,
kfn.on_vth,
kfn.on_vRest,
kfn.on_zt4d,
kfn.on_alpha,
kfn.on_phi,
kfn.on_epsilonRec,
kfn.on_refractoryCounter,
kfn.on_refractoryDuration,
kfn.on_gammaPd,
kfn.on_firingCounter,
kfn.on_recSignal,
kfn.on_synapticActivityCounter,
)
# get on_zt4d to on_zt
kfn.on_zt .= reduce(max, kfn.on_zt4d, dims=(1,2))
logit = reshape(kfn.on_zt, (size(input, 1), :)) # (outputNeurons, batch)
return logit,
kfn.zit
end
# gpu launcher
function lifForward( zit::CuArray,
wRec::CuArray,
vt::CuArray,
vth::CuArray,
vRest::CuArray,
zt::CuArray,
alpha::CuArray,
phi::CuArray,
epsilonRec::CuArray,
refractoryCounter::CuArray,
refractoryDuration::CuArray,
gammaPd::CuArray,
firingCounter::CuArray,
recSignal::CuArray,
exInType::CuArray,
wRecChange::CuArray,
neuronInactivityCounter::CuArray,
synapseReconnectDelay::CuArray,
synapticActivityCounter::CuArray,
timeStep::CuArray,
)
kernel = @cuda launch=false lifForward( zit,
wRec,
vt,
vth,
vRest,
zt,
alpha,
phi,
epsilonRec,
refractoryCounter,
refractoryDuration,
gammaPd,
firingCounter,
recSignal,
exInType,
wRecChange,
neuronInactivityCounter,
synapseReconnectDelay,
synapticActivityCounter,
timeStep,
GeneralUtils.linear_to_cartesian,
)
config = launch_configuration(kernel.fun)
# threads to be launched. Since one can't launch exact thread number the kernel needs,
# one just launch threads more than this kernel needs then use a guard inside the kernel
# to prevent unused threads to access memory.
threads = min(1024, config.threads) # depend on gpu. Most NVIDIA gpu has 1024 threads per block
# total desired threads to launch to gpu. Usually 1 thread per 1 matrix element
totalThreads = length(wRec)
blocks = cld(totalThreads, threads)
# println("launching gpu kernel")
CUDA.@sync begin
kernel( zit,
wRec,
vt,
vth,
vRest,
zt,
alpha,
phi,
epsilonRec,
refractoryCounter,
refractoryDuration,
gammaPd,
firingCounter,
recSignal,
exInType,
wRecChange,
neuronInactivityCounter,
synapseReconnectDelay,
synapticActivityCounter,
timeStep,
GeneralUtils.linear_to_cartesian; threads, blocks)
end
end
# gpu kernel
function lifForward( zit,
wRec,
vt,
vth,
vRest,
zt,
alpha,
phi,
epsilonRec,
refractoryCounter,
refractoryDuration,
gammaPd,
firingCounter,
recSignal,
exInType,
wRecChange,
neuronInactivityCounter,
synapseReconnectDelay,
synapticActivityCounter,
timeStep,
linear_to_cartesian,
)
i = (blockIdx().x - 1) * blockDim().x + threadIdx().x # gpu threads index
if i <= length(wRec)
# cartesian index
i1, i2, i3, i4 = linear_to_cartesian(i, size(wRec))
# @cuprintln("gpu thread $i $i1 $i2 $i3 $i4")
if refractoryCounter[i1,i2,i3,i4] > 0 # refractory period is active
refractoryCounter[i1,i2,i3,i4] -= 1
recSignal[i1,i2,i3,i4] = 0
zt[i1,i2,i3,i4] = 0
vt[i1,i2,i3,i4] = (1 - alpha[i1,i2,i3,i4]) * vt[i1,i2,i3,i4]
phi[i1,i2,i3,i4] = 0
# compute epsilonRec
epsilonRec[i1,i2,i3,i4] = (alpha[i1,i2,i3,i4] * epsilonRec[i1,i2,i3,i4])
else # refractory period is inactive
recSignal[i1,i2,i3,i4] = wRec[i1,i2,i3,i4] * zit[i1,i2,i3,i4] *
exInType[i1,i2,i3,i4]
vt[i1,i2,i3,i4] = (alpha[i1,i2,i3,i4] * vt[i1,i2,i3,i4]) +
sum(@view(recSignal[:,:,i3,i4]))
# fires if membrane potential exceed threshold
if vt[i1,i2,i3,i4] > vth[i1,i2,i3,i4]
zt[i1,i2,i3,i4] = 1
refractoryCounter[i1,i2,i3,i4] = refractoryDuration[i1,i2,i3,i4]
firingCounter[i1,i2,i3,i4] += 1
# vt[i1,i2,i3,i4] = vRest[i1,i2,i3,i4]
# reset counter if neuron fires
neuronInactivityCounter[i1,i2,i3,i4] = 0
else
zt[i1,i2,i3,i4] = 0
neuronInactivityCounter[i1,i2,i3,i4] -= 1
end
# compute phi, there is a difference from lif formula
phi[i1,i2,i3,i4] = (gammaPd[i1,i2,i3,i4] / vth[i1,i2,i3,i4]) *
max(0, 1 - ((vt[i1,i2,i3,i4] - vth[i1,i2,i3,i4]) / vth[i1,i2,i3,i4]))
# compute epsilonRec
epsilonRec[i1,i2,i3,i4] = (alpha[i1,i2,i3,i4] * epsilonRec[i1,i2,i3,i4]) +
(zit[i1,i2,i3,i4] * !iszero(wRec[i1,i2,i3,i4]))
# !iszero indicates synaptic subscription
synapticActivityCounter[i1,i2,i3,i4] += zit[i1,i2,i3,i4] * !iszero(wRec[i1,i2,i3,i4])
# voltage regulator
wRecChange[i1,i2,i3,i4] = -0.001 * (vt[i1,i2,i3,i4] - vth[i1,i2,i3,i4]) *
zit[i1,i2,i3,i4]
# negative value is counting mode, -0.1 < -0.1 won't work on GPU
if synapseReconnectDelay[i1,i2,i3,i4] < -0.2
synapseReconnectDelay[i1,i2,i3,i4] += 1
if synapseReconnectDelay[i1,i2,i3,i4] == 0
# mark timestep
synapseReconnectDelay[i1,i2,i3,i4] = sum(timeStep)
end
end
end
end
return nothing
end
# gpu launcher
function alifForward( zit::CuArray,
wRec::CuArray,
vt::CuArray,
vth::CuArray,
vRest::CuArray,
zt::CuArray,
alpha::CuArray,
phi::CuArray,
epsilonRec::CuArray,
refractoryCounter::CuArray,
refractoryDuration::CuArray,
gammaPd::CuArray,
firingCounter::CuArray,
recSignal::CuArray,
exInType::CuArray,
wRecChange::CuArray,
neuronInactivityCounter::CuArray,
synapseReconnectDelay::CuArray,
synapticActivityCounter::CuArray,
timeStep::CuArray,
epsilonRecA::CuArray,
a::CuArray,
avth::CuArray,
beta::CuArray,
rho::CuArray,
)
kernel = @cuda launch=false alifForward( zit,
wRec,
vt,
vth,
vRest,
zt,
alpha,
phi,
epsilonRec,
refractoryCounter,
refractoryDuration,
gammaPd,
firingCounter,
recSignal,
exInType,
wRecChange,
neuronInactivityCounter,
synapseReconnectDelay,
synapticActivityCounter,
timeStep,
epsilonRecA,
a,
avth,
beta,
rho,
GeneralUtils.linear_to_cartesian,
)
config = launch_configuration(kernel.fun)
# threads to be launched. Since one can't launch exact thread number the kernel needs,
# one just launch threads more than this kernel needs then use a guard inside the kernel
# to prevent unused threads to access memory.
threads = min(1024, config.threads) # depend on gpu. Most NVIDIA gpu has 1024 threads per block
# total desired threads to launch to gpu. Usually 1 thread per 1 matrix element
totalThreads = length(wRec)
blocks = cld(totalThreads, threads)
# println("launching gpu kernel")
CUDA.@sync begin
kernel( zit,
wRec,
vt,
vth,
vRest,
zt,
alpha,
phi,
epsilonRec,
refractoryCounter,
refractoryDuration,
gammaPd,
firingCounter,
recSignal,
exInType,
wRecChange,
neuronInactivityCounter,
synapseReconnectDelay,
synapticActivityCounter,
timeStep,
epsilonRecA,
a,
avth,
beta,
rho,
GeneralUtils.linear_to_cartesian; threads, blocks)
end
end
# gpu kernel
function alifForward( zit,
wRec,
vt,
vth,
vRest,
zt,
alpha,
phi,
epsilonRec,
refractoryCounter,
refractoryDuration,
gammaPd,
firingCounter,
recSignal,
exInType,
wRecChange,
neuronInactivityCounter,
synapseReconnectDelay,
synapticActivityCounter,
timeStep,
epsilonRecA,
a,
avth,
beta,
rho,
linear_to_cartesian,
)
i = (blockIdx().x - 1) * blockDim().x + threadIdx().x # gpu threads index
if i <= length(wRec)
# cartesian index
i1, i2, i3, i4 = linear_to_cartesian(i, size(wRec))
# @cuprintln("gpu thread $i $i1 $i2 $i3 $i4")
if refractoryCounter[i1,i2,i3,i4] > 0 # refractory period is active
refractoryCounter[i1,i2,i3,i4] -= 1
recSignal[i1,i2,i3,i4] = 0
zt[i1,i2,i3,i4] = 0
vt[i1,i2,i3,i4] = (1 - alpha[i1,i2,i3,i4]) * vt[i1,i2,i3,i4]
phi[i1,i2,i3,i4] = 0
a[i1,i2,i3,i4] = rho[i1,i2,i3,i4] * a[i1,i2,i3,i4]
# compute epsilonRec
epsilonRec[i1,i2,i3,i4] = (alpha[i1,i2,i3,i4] * epsilonRec[i1,i2,i3,i4])
# compute epsilonRecA use eq.26
epsilonRecA[i1,i2,i3,i4] = (rho[i1,i2,i3,i4] *
(phi[i1,i2,i3,i4] * epsilonRec[i1,i2,i3,i4]))
# compute avth
avth[i1,i2,i3,i4] = vth[i1,i2,i3,i4] + (beta[i1,i2,i3,i4] * a[i1,i2,i3,i4])
else # refractory period is inactive
recSignal[i1,i2,i3,i4] = wRec[i1,i2,i3,i4] * zit[i1,i2,i3,i4] *
exInType[i1,i2,i3,i4]
vt[i1,i2,i3,i4] = (alpha[i1,i2,i3,i4] * vt[i1,i2,i3,i4]) +
sum(@view(recSignal[:,:,i3,i4]))
# compute avth
avth[i1,i2,i3,i4] = vth[i1,i2,i3,i4] + (beta[i1,i2,i3,i4] * a[i1,i2,i3,i4])
# fires if membrane potential exceed threshold
if vt[i1,i2,i3,i4] > avth[i1,i2,i3,i4]
zt[i1,i2,i3,i4] = 1
refractoryCounter[i1,i2,i3,i4] = refractoryDuration[i1,i2,i3,i4]
firingCounter[i1,i2,i3,i4] += 1
# vt[i1,i2,i3,i4] = vRest[i1,i2,i3,i4]
a[i1,i2,i3,i4] = (rho[i1,i2,i3,i4] * a[i1,i2,i3,i4]) + 1
neuronInactivityCounter[i1,i2,i3,i4] = 0
else
zt[i1,i2,i3,i4] = 0
a[i1,i2,i3,i4] = (rho[i1,i2,i3,i4] * a[i1,i2,i3,i4])
neuronInactivityCounter[i1,i2,i3,i4] -= 1
end
# compute phi, there is a difference from alif formula
phi[i1,i2,i3,i4] = (gammaPd[i1,i2,i3,i4] / vth[i1,i2,i3,i4]) *
max(0, 1 - ((vt[i1,i2,i3,i4] - vth[i1,i2,i3,i4]) / vth[i1,i2,i3,i4]))
# compute epsilonRec
epsilonRec[i1,i2,i3,i4] = (alpha[i1,i2,i3,i4] * epsilonRec[i1,i2,i3,i4]) +
(zit[i1,i2,i3,i4] * !iszero(wRec[i1,i2,i3,i4]))
# compute epsilonRecA use eq.26
epsilonRecA[i1,i2,i3,i4] = (rho[i1,i2,i3,i4] *
(phi[i1,i2,i3,i4] * epsilonRec[i1,i2,i3,i4])) +
(zit[i1,i2,i3,i4] * !iszero(wRec[i1,i2,i3,i4]))
synapticActivityCounter[i1,i2,i3,i4] += zit[i1,i2,i3,i4] * !iszero(wRec[i1,i2,i3,i4])
# voltage regulator
wRecChange[i1,i2,i3,i4] = -0.001 * (vt[i1,i2,i3,i4] - avth[i1,i2,i3,i4]) *
zit[i1,i2,i3,i4]
# negative value is counting mode, -0.1 < -0.1 won't work on GPU
if synapseReconnectDelay[i1,i2,i3,i4] < -0.2
synapseReconnectDelay[i1,i2,i3,i4] += 1
if synapseReconnectDelay[i1,i2,i3,i4] == 0
# mark timestep
synapseReconnectDelay[i1,i2,i3,i4] = sum(timeStep)
end
end
end
end
return nothing
end
# gpu launcher
function onForward( zit::CuArray,
wOut::CuArray,
vt::CuArray,
vth::CuArray,
vRest::CuArray,
zt::CuArray,
alpha::CuArray,
phi::CuArray,
epsilonRec::CuArray,
refractoryCounter::CuArray,
refractoryDuration::CuArray,
gammaPd::CuArray,
firingCounter::CuArray,
recSignal::CuArray,
synapticActivityCounter::CuArray,
)
kernel = @cuda launch=false onForward( zit,
wOut,
vt,
vth,
vRest,
zt,
alpha,
phi,
epsilonRec,
refractoryCounter,
refractoryDuration,
gammaPd,
firingCounter,
recSignal,
synapticActivityCounter,
GeneralUtils.linear_to_cartesian,
)
config = launch_configuration(kernel.fun)
# threads to be launched. Since one can't launch exact thread number the kernel needs,
# one just launch threads more than this kernel needs then use a guard inside the kernel
# to prevent unused threads to access memory.
threads = min(1024, config.threads) # depend on gpu. Most NVIDIA gpu has 1024 threads per block
# total desired threads to launch to gpu. Usually 1 thread per 1 matrix element
totalThreads = length(wOut)
blocks = cld(totalThreads, threads)
# println("launching gpu kernel")
CUDA.@sync begin
kernel( zit,
wOut,
vt,
vth,
vRest,
zt,
alpha,
phi,
epsilonRec,
refractoryCounter,
refractoryDuration,
gammaPd,
firingCounter,
recSignal,
synapticActivityCounter,
GeneralUtils.linear_to_cartesian; threads, blocks)
end
end
# gpu kernel
function onForward( zit,
wOut,
vt,
vth,
vRest,
zt,
alpha,
phi,
epsilonRec,
refractoryCounter,
refractoryDuration,
gammaPd,
firingCounter,
recSignal,
synapticActivityCounter,
linear_to_cartesian,
)
i = (blockIdx().x - 1) * blockDim().x + threadIdx().x # gpu threads index
if i <= length(wOut)
# cartesian index
i1, i2, i3, i4 = linear_to_cartesian(i, size(wOut))
# @cuprintln("gpu thread $i $i1 $i2 $i3 $i4")
if refractoryCounter[i1,i2,i3,i4] > 0 # refractory period is active
refractoryCounter[i1,i2,i3,i4] -= 1
recSignal[i1,i2,i3,i4] = 0
zt[i1,i2,i3,i4] = 0
vt[i1,i2,i3,i4] = (1 - alpha[i1,i2,i3,i4]) * vt[i1,i2,i3,i4]
phi[i1,i2,i3,i4] = 0
# compute epsilonRec
epsilonRec[i1,i2,i3,i4] = (alpha[i1,i2,i3,i4] * epsilonRec[i1,i2,i3,i4])
else # refractory period is inactive
recSignal[i1,i2,i3,i4] = zit[i1,i2,i3,i4] * wOut[i1,i2,i3,i4]
vt[i1,i2,i3,i4] = (alpha[i1,i2,i3,i4] * vt[i1,i2,i3,i4]) + sum(@view(recSignal[:,:,i3,i4]))
# fires if membrane potential exceed threshold
if vt[i1,i2,i3,i4] > vth[i1,i2,i3,i4]
zt[i1,i2,i3,i4] = 1
refractoryCounter[i1,i2,i3,i4] = refractoryDuration[i1,i2,i3,i4]
firingCounter[i1,i2,i3,i4] += 1
vt[i1,i2,i3,i4] = (1 - alpha[i1,i2,i3,i4]) * vt[i1,i2,i3,i4]
else
zt[i1,i2,i3,i4] = 0
end
# compute phi, there is a difference from on formula
phi[i1,i2,i3,i4] = (gammaPd[i1,i2,i3,i4] / vth[i1,i2,i3,i4]) *
max(0, 1 - ((vt[i1,i2,i3,i4] - vth[i1,i2,i3,i4]) / vth[i1,i2,i3,i4]))
# compute epsilonRec
epsilonRec[i1,i2,i3,i4] = (alpha[i1,i2,i3,i4] * epsilonRec[i1,i2,i3,i4]) +
(zit[i1,i2,i3,i4] * !iszero(wOut[i1,i2,i3,i4]))
synapticActivityCounter[i1,i2,i3,i4] += zit[i1,i2,i3,i4] * !iszero(wOut[i1,i2,i3,i4])
end
end
return nothing
end
end # module

View File

@@ -0,0 +1,87 @@
module interface
# export
# using Flux, CUDA
#------------------------------------------------------------------------------------------------100
end # module

View File

@@ -0,0 +1,605 @@
module learn
export learn!, compute_paramsChange!
using Statistics, Random, LinearAlgebra, JSON3, Flux, CUDA, Dates
using GeneralUtils
using ..type, ..snnUtil
#------------------------------------------------------------------------------------------------100
function compute_paramsChange!(kfn::kfn_1, modelError::CuArray, outputError::CuArray, label)
lifComputeParamsChange!(kfn.timeStep,
kfn.lif_phi,
kfn.lif_epsilonRec,
kfn.lif_eta,
kfn.lif_eRec,
kfn.lif_wRec,
kfn.lif_exInType,
kfn.lif_wRecChange,
kfn.on_wOut,
kfn.lif_firingCounter,
kfn.lif_firingTargetFrequency,
kfn.lif_arrayProjection4d,
kfn.lif_error,
modelError,
outputError,
kfn.inputSize,
kfn.bk,
label,
)
alifComputeParamsChange!(kfn.timeStep,
kfn.alif_phi,
kfn.alif_epsilonRec,
kfn.alif_eta,
kfn.alif_eRec,
kfn.alif_wRec,
kfn.alif_exInType,
kfn.alif_wRecChange,
kfn.on_wOut,
kfn.alif_firingCounter,
kfn.alif_firingTargetFrequency,
kfn.alif_arrayProjection4d,
kfn.alif_error,
modelError,
outputError,
kfn.inputSize,
kfn.bk,
label,
kfn.alif_epsilonRecA,
kfn.alif_beta,
)
onComputeParamsChange!(kfn.on_phi,
kfn.on_epsilonRec,
kfn.on_eta,
kfn.on_eRec,
kfn.on_wOutChange,
kfn.on_arrayProjection4d,
kfn.on_error,
kfn.on_synapticActivityCounter,
outputError,
)
# error("DEBUG -> kfn compute_paramsChange! $(Dates.now())")
end
function lifComputeParamsChange!( timeStep::CuArray,
phi::CuArray,
epsilonRec::CuArray,
eta::CuArray,
eRec::CuArray,
wRec::CuArray,
exInType::CuArray,
wRecChange::CuArray,
wOut::CuArray,
firingCounter::CuArray,
firingTargetFrequency::CuArray,
arrayProjection4d::CuArray,
nError::CuArray,
modelError::CuArray,
outputError::CuArray,
inputSize::CuArray,
bk::CuArray,
label,
)
eRec .= phi .* epsilonRec
# 2D wRec matrix contain input, lif, alif neurons. I need only lif neurons
startIndex = prod(inputSize) +1
stopIndex = startIndex + size(wRec, 3) -1
startCol = CartesianIndices(wRec)[startIndex][2]
stopCol = CartesianIndices(wRec)[stopIndex][2]
# some RSNN neuron that has direct connection to output neuron need to get Bjk
# from output neuron that represent correct answer, the rest of RSNN get random Bjk
onW = @view(wOut[:, startCol:stopCol, sum(label+1), 1]) # label+1 because julia is 1-based index
_bk = @view(bk[:, startCol:stopCol, 1])
mask = iszero.(onW)
bk_ = mask .* _bk
bkComposed = onW .+ bk_
nError = bkComposed .* modelError
nError = reshape(nError, (1,1,:,1))
# compute wRecChange of all neurons wrt to iᵗʰ output neuron
wRecChange .+= (eta .* nError .* eRec)
# frequency regulator
targetFiringCount = firingTargetFrequency .* timeStep
freqError = (firingCounter .- targetFiringCount) ./ timeStep
freqWRecChange = -1 .* freqError .* eta .* eRec
wRecChange .+= freqWRecChange
# reset epsilonRec
epsilonRec .= 0
end
function alifComputeParamsChange!( timeStep::CuArray,
phi::CuArray,
epsilonRec::CuArray,
eta::CuArray,
eRec::CuArray,
wRec::CuArray,
exInType::CuArray,
wRecChange::CuArray,
wOut::CuArray,
firingCounter::CuArray,
firingTargetFrequency::CuArray,
arrayProjection4d::CuArray,
nError::CuArray,
modelError::CuArray,
outputError::CuArray,
inputSize::CuArray,
bk::CuArray,
label,
epsilonRecA::CuArray,
beta::CuArray,
)
eRec .= phi .* (epsilonRec .- (beta .* epsilonRecA)) # use eq. 25
# 2D wRec matrix contain input, lif, alif neurons. I need only lif neurons
startIndex = prod(inputSize) +1
stopIndex = startIndex + size(wRec, 3) -1
startCol = CartesianIndices(wRec)[startIndex][2]
stopCol = CartesianIndices(wRec)[stopIndex][2]
# some RSNN neuron that has direct connection to output neuron need to get Bjk
# from output neuron that represent correct answer, the rest of RSNN get random Bjk
onW = @view(wOut[:, startCol:stopCol, sum(label+1), 1]) # label+1 because julia is 1-based index
_bk = @view(bk[:, startCol:stopCol, 1])
mask = iszero.(onW)
bk_ = mask .* _bk
bkComposed = onW .+ bk_
nError = bkComposed .* modelError
nError = reshape(nError, (1,1,:,1))
wRecChange .+= (eta .* nError .* eRec)
# frequency regulator
targetFiringCount = firingTargetFrequency .* timeStep
freqError = (firingCounter .- targetFiringCount) ./ timeStep
freqWRecChange = -1 .* freqError .* eta .* eRec
wRecChange .+= freqWRecChange
# wRecChange .+= 0.01 .* ((firingTargetFrequency - (firingCounter./timeStep)) ./ timeStep) .*
# eta .* eRec
# reset epsilonRec
epsilonRec .= 0
epsilonRecA .= 0
# error("DEBUG -> alifComputeParamsChange! $(Dates.now())")
end
function onComputeParamsChange!(phi::CuArray,
epsilonRec::CuArray,
eta::CuArray,
eRec::CuArray,
wOutChange::CuArray,
arrayProjection4d::CuArray,
nError::CuArray,
synapticActivityCounter,
outputError::CuArray # outputError is output neuron's error
)
eRec .= phi .* epsilonRec
nError .= reshape(outputError, (1, 1, :, size(outputError, 2))) .* arrayProjection4d
wOutChange .+= (eta .* nError .* eRec)
# reset epsilonRec
epsilonRec .= 0
# error("DEBUG -> onComputeParamsChange! $(Dates.now())")
end
function lifComputeParamsChange!( phi::AbstractArray,
epsilonRec::AbstractArray,
eta::AbstractArray,
wRec::AbstractArray,
wRecChange::AbstractArray,
wOut::AbstractArray,
modelError::AbstractArray)
d1, d2, d3, d4 = size(epsilonRec)
# Bₖⱼ in paper, sum() to get each neuron's total wOut weight
wOutSum = reshape(sum(wOut, dims=3), (d1, :, d4))
for j in 1:d4, i in 1:d3 # compute along neurons axis of every batch
# how much error of this neuron 1-spike causing each output neuron's error
view(wRecChange, :, :, i, j) .+= (-1 * view(eta, :, :, i, j)[1]) .*
# eRec
(
(view(phi, :, :, i, j)[1] .* view(epsilonRec, :, :, i, j)) .*
# nError a.k.a. learning signal
(
view(modelError, :, j)[1] * # dopamine concept, this neuron receive summed error signal
# RSNN neuron's total wOut weight (neuron synaptic subscription .* wOutSum)
view(wOutSum, :, :, j)[i]
)
)
end
end
function alifComputeParamsChange!( phi::AbstractArray,
epsilonRec::AbstractArray,
epsilonRecA::AbstractArray,
eta::AbstractArray,
wRec::AbstractArray,
wRecChange::AbstractArray,
beta::AbstractArray,
wOut::AbstractArray,
modelError::AbstractArray)
d1, d2, d3, d4 = size(epsilonRec)
# Bₖⱼ in paper, sum() to get each neuron's total wOut weight
wOutSum = reshape(sum(wOut, dims=3), (d1, :, d4))
for j in 1:d4, i in 1:d3 # compute along neurons axis of every batch
# how much error of this neuron 1-spike causing each output neuron's error
view(wRecChange, :, :, i, j) .+= (-1 * view(eta, :, :, i, j)[1]) .*
# eRec
(
# eRec_v
(view(phi, :, :, i, j)[1] .* view(epsilonRec, :, :, i, j)) .+
# eRec_a
((view(phi, :, :, i, j)[1] * view(beta, :, :, i, j)[1]) .*
view(epsilonRecA, :, :, i, j))
) .*
# nError a.k.a. learning signal
(
view(modelError, :, j)[1] *
# RSNN neuron's total wOut weight (neuron synaptic subscription .* wOutSum)
view(wOutSum, :, :, j)[i]
# sum(GeneralUtils.isNotEqual.(view(wRec, :, :, i, j), 0) .*
# view(wOutSum, :, :, j))
)
end
end
function onComputeParamsChange!(phi::AbstractArray,
epsilonRec::AbstractArray,
eta::AbstractArray,
wOutChange::AbstractArray,
outputError::AbstractArray)
d1, d2, d3, d4 = size(epsilonRec)
for j in 1:d4, i in 1:d3 # compute along neurons axis of every batch
# how much error of this neuron 1-spike causing each output neuron's error
view(wOutChange, :, :, i, j) .+= (-1 * view(eta, :, :, i, j)[1]) .*
# eRec
(
(view(phi, :, :, i, j)[1] .* view(epsilonRec, :, :, i, j)) .*
# nError a.k.a. learning signal, output neuron receives error of its own answer - correct answer.
view(outputError, :, j)[i]
)
end
end
function learn!(kfn::kfn_1, progress, device=cpu)
if sum(kfn.timeStep) == 800
# println("zitCumulative ", sum(kfn.zitCumulative[:,:,784:size(kfn.zitCumulative, 3)], dims=3))
println("synapse lif $(sum((!isequal).(kfn.lif_wRec, 0))) alif $(sum((!isequal).(kfn.alif_wRec, 0)))")
println("on_synapticActivityCounter 0 ", kfn.on_synapticActivityCounter[:,:,1])
println("on_synapticActivityCounter 5 ", kfn.on_synapticActivityCounter[:,:,6])
println("wOut 0 $(sum(kfn.on_wOut[:,:,1,1], dims=3)) total $(sum(sum(kfn.on_wOut[:,:,1,1], dims=3)))")
println("wOut 5 $(sum(kfn.on_wOut[:,:,6,1], dims=3)) total $(sum(sum(kfn.on_wOut[:,:,6,1], dims=3)))")
end
# lif learn
kfn.lif_wRec, kfn.lif_neuronInactivityCounter, kfn.lif_synapticActivityCounter, kfn.lif_synapseReconnectDelay =
lifLearn(kfn.lif_wRec,
kfn.lif_wRecChange,
kfn.lif_exInType,
kfn.lif_arrayProjection4d,
kfn.lif_neuronInactivityCounter,
kfn.lif_synapseReconnectDelay,
kfn.lif_synapseConnectionNumber,
kfn.lif_synapticActivityCounter,
kfn.lif_eta,
kfn.lif_vt,
kfn.zitCumulative,
progress,
device)
# alif learn
kfn.alif_wRec, kfn.alif_neuronInactivityCounter, kfn.alif_synapticActivityCounter, kfn.alif_synapseReconnectDelay =
alifLearn(kfn.alif_wRec,
kfn.alif_wRecChange,
kfn.alif_exInType,
kfn.alif_arrayProjection4d,
kfn.alif_neuronInactivityCounter,
kfn.alif_synapseReconnectDelay,
kfn.alif_synapseConnectionNumber,
kfn.alif_synapticActivityCounter,
kfn.alif_eta,
kfn.alif_vt,
kfn.zitCumulative,
progress,
device)
# on learn
onLearn!(kfn.on_wOut,
kfn.on_wOutChange,
kfn.on_eta,
kfn.on_arrayProjection4d,
progress,)
# wrap up learning session
if kfn.learningStage == [3]
kfn.learningStage = [0]
end
# error("DEBUG -> kfn learn! $(Dates.now())")
end
function lifLearn(wRec,
wRecChange,
exInType,
arrayProjection4d,
neuronInactivityCounter,
synapseReconnectDelay,
synapseConnectionNumber,
synapticActivityCounter,
eta,
vt,
zitCumulative,
progress,
device)
# transfer data to cpu
arrayProjection4d_cpu = arrayProjection4d |> cpu
wRec_cpu = wRec |> cpu
wRecChange_cpu = wRecChange |> cpu
eta_cpu = eta |> cpu
exInType_cpu = exInType |> cpu
neuronInactivityCounter_cpu = neuronInactivityCounter |> cpu
synapseReconnectDelay_cpu = synapseReconnectDelay |> cpu
synapticActivityCounter_cpu = synapticActivityCounter |> cpu
zitCumulative_cpu = zitCumulative |> cpu
# neuroplasticity, work on CPU side
wRec_cpu, neuronInactivityCounter_cpu, synapticActivityCounter_cpu, synapseReconnectDelay_cpu =
neuroplasticity(synapseConnectionNumber,
zitCumulative_cpu,
wRec_cpu,
exInType_cpu,
wRecChange_cpu,
vt,
eta_cpu,
neuronInactivityCounter_cpu,
synapseReconnectDelay_cpu,
synapticActivityCounter_cpu,
progress,)
# transfer data backto gpu
wRec = wRec_cpu |> device
neuronInactivityCounter = neuronInactivityCounter_cpu |> device
synapticActivityCounter = synapticActivityCounter_cpu |> device
synapseReconnectDelay = synapseReconnectDelay_cpu |> device
# error("DEBUG -> lifLearn! $(Dates.now())")
return wRec, neuronInactivityCounter, synapticActivityCounter, synapseReconnectDelay
end
function alifLearn(wRec,
wRecChange,
exInType,
arrayProjection4d,
neuronInactivityCounter,
synapseReconnectDelay,
synapseConnectionNumber,
synapticActivityCounter,
eta,
vt,
zitCumulative,
progress,
device)
# transfer data to cpu
arrayProjection4d_cpu = arrayProjection4d |> cpu
wRec_cpu = wRec |> cpu
wRecChange_cpu = wRecChange |> cpu
eta_cpu = eta |> cpu
exInType_cpu = exInType |> cpu
neuronInactivityCounter_cpu = neuronInactivityCounter |> cpu
synapseReconnectDelay_cpu = synapseReconnectDelay |> cpu
synapticActivityCounter_cpu = synapticActivityCounter |> cpu
zitCumulative_cpu = zitCumulative |> cpu
# neuroplasticity, work on CPU side
wRec_cpu, neuronInactivityCounter_cpu, synapticActivityCounter_cpu, synapseReconnectDelay_cpu =
neuroplasticity(synapseConnectionNumber,
zitCumulative_cpu,
wRec_cpu,
exInType_cpu,
wRecChange_cpu,
vt,
eta_cpu,
neuronInactivityCounter_cpu,
synapseReconnectDelay_cpu,
synapticActivityCounter_cpu,
progress,)
# transfer data backto gpu
wRec = wRec_cpu |> device
neuronInactivityCounter = neuronInactivityCounter_cpu |> device
synapticActivityCounter = synapticActivityCounter_cpu |> device
synapseReconnectDelay = synapseReconnectDelay_cpu |> device
# error("DEBUG -> alifLearn! $(Dates.now())")
return wRec, neuronInactivityCounter, synapticActivityCounter, synapseReconnectDelay
end
# function onLearn!(wOut,
# wOutChange,
# arrayProjection4d)
# # merge learning weight with average learning weight
# wOut .+= (sum(wOutChange, dims=4) ./ (size(wOut, 4))) .* arrayProjection4d
# # adaptive wOut to help convergence using c_decay
# wOut .-= 0.001 .* wOut
# end
function onLearn!(wOut,
wOutChange,
eta,
arrayProjection4d,
progress,)
if progress != 0
# adaptive wOut to help convergence using c_decay
wOut .-= 0.1 .* eta .* wOut # wOut .-= 0.001 .* wOut
# merge learning weight with average learning weight
wOut .+= (sum(wOutChange, dims=4) ./ (size(wOut, 4))) .* arrayProjection4d
else
#TESTING skip
wOutChange .= 0
end
end
function neuroplasticity(synapseConnectionNumber,
zitCumulative, # (row, col)
wRec, # (row, col, n)
exInType,
wRecChange,
vt,
eta,
neuronInactivityCounter,
synapseReconnectDelay,
synapticActivityCounter,
progress,) # (row, col, n)
if progress == 2 # no need to learn for current neural pathway
# skip neuroplasticity
#TODO I may need to do something with neuronInactivityCounter and other variables
wRecChange .= 0
# # -w all non-fire connection except mature connection
# weakenNotMatureSynapse!(wRec, synapticActivityCounter, eta)
# # prune weak synapse
# pruneSynapse!(wRec, synapticActivityCounter, synapseReconnectDelay)
# error("DEBUG -> neuroplasticity")
elseif progress == 1 # some progress whether up or down
# ready to reconnect synapse must not have wRecChange
mask = (!isequal).(wRec, 0)
wRecChange .*= mask
weakenAllActiveSynapse!(wRec, synapticActivityCounter, eta)
# merge learning weight, all resulting negative wRec will get pruned
mergeLearnWeight!(wRec, exInType, wRecChange, synapticActivityCounter, synapseReconnectDelay)
# # adjust wRec based on repeatition (90% +w, 10% -w)
# growRepeatedPath!(wRec, synapticActivityCounter, eta)
# # -w all non-fire connection except mature connection
# weakenNotMatureSynapse!(wRec, synapticActivityCounter, eta)
# # prune weak synapse
# pruneSynapse!(wRec, synapticActivityCounter, synapseReconnectDelay)
# rewire synapse connection
rewireSynapse!(wRec, neuronInactivityCounter, synapticActivityCounter,
synapseReconnectDelay, synapseConnectionNumber, zitCumulative)
# error("DEBUG -> neuroplasticity 1")
elseif progress == 0 # no progress, no weight update, only rewire
wRecChange .= 0
weakenAllActiveSynapse!(wRec, synapticActivityCounter, eta)
# # prune weak synapse
# pruneSynapse!(wRec, synapticActivityCounter, synapseReconnectDelay)
# rewire synapse connection
rewireSynapse!(wRec, neuronInactivityCounter, synapticActivityCounter,
synapseReconnectDelay, synapseConnectionNumber, zitCumulative)
# error("DEBUG -> neuroplasticity")
elseif progress == -1 # some progress whether up or down
# ready to reconnect synapse must not have wRecChange
mask = (!isequal).(wRec, 0)
wRecChange .*= mask
weakenAllActiveSynapse!(wRec, synapticActivityCounter, eta)
# merge learning weight, all resulting negative wRec will get pruned
mergeLearnWeight!(wRec, exInType, wRecChange, synapticActivityCounter, synapseReconnectDelay)
# # adjust wRec based on repeatition (90% +w, 10% -w)
# growRepeatedPath!(wRec, synapticActivityCounter, eta)
# # -w all non-fire connection except mature connection
# weakenNotMatureSynapse!(wRec, synapticActivityCounter, eta)
# # prune weak synapse
# pruneSynapse!(wRec, synapticActivityCounter, synapseReconnectDelay)
# rewire synapse connection
rewireSynapse!(wRec, neuronInactivityCounter, synapticActivityCounter,
synapseReconnectDelay, synapseConnectionNumber, zitCumulative)
# error("DEBUG -> neuroplasticity 1")
else
error("undefined condition line $(@__LINE__)")
end
# error("DEBUG -> neuroplasticity $(Dates.now())")
return wRec, neuronInactivityCounter,
synapticActivityCounter, synapseReconnectDelay
end
# learningLiquidity(x) = -0.0001x + 1 # -10000 to +10000; f(x) = -5e-05x+0.5
function learningLiquidity(x)
if x > 10000
y = 0.0
elseif x < -10000
y = 1.0
else
y = -5e-05x+0.5 # range -10000 to +10000
end
return y
end
end # module

View File

@@ -0,0 +1,274 @@
module snnUtil
export refractoryStatus!, addNewSynapticConn!, mergeLearnWeight!, growRepeatedPath!,
weakenNotMatureSynapse!, pruneSynapse!, rewireSynapse!, weakenAllActiveSynapse!
using Random, GeneralUtils
using ..type
#------------------------------------------------------------------------------------------------100
synapseMaxWaittime = 100
function refractoryStatus!(refractoryCounter, refractoryActive, refractoryInactive)
d1, d2, d3, d4 = size(refractoryCounter)
for j in 1:d4
for i in 1:d3
if refractoryCounter[1, 1, i, j] > 0 # inactive
view(refractoryActive, 1, 1, i, j) .= 0
view(refractoryInactive, 1, 1, i, j) .= 1
else # active
view(refractoryActive, 1, 1, i, j) .= 1
view(refractoryInactive, 1, 1, i, j) .= 0
end
end
end
end
# function addNewSynapticConn!(mask::AbstractArray{<:Any}, markValue::Number, wRec::AbstractArray{<:Any},
# counter::AbstractArray{<:Any}, n=0;
# rng::AbstractRNG=MersenneTwister(1234))
# # check if mask and wRec have the same size
# if size(mask) != size(wRec)
# error("mask and wRec must have the same size")
# end
# # get the indices of elements in mask that equal markValue
# indices = findall(x -> x == markValue, mask)
# alreadySub = findall(x -> x != 0, wRec) # get already subscribe
# setdiff!(indices, alreadySub) # remove already sub conn from pool
# remaining = 0
# if n == 0 || n > length(indices)
# remaining = n - length(indices)
# n = length(indices)
# end
# # shuffle the indices using the rng function
# shuffle!(rng, indices)
# # select the first n indices
# n > length(indices) ? println(">>> ", total_x_tobeReplced) : nothing
# selected = indices[1:n]
# # replace the elements in wRec at the selected positions with a
# for i in selected
# wRec[i] = rand(0.01:0.01:0.1)
# counter[i] = 0 # counting start from 0
# end
# # error("DEBUG addNewSynapticConn!")
# return remaining
# end
function mergeLearnWeight!(wRec::AbstractArray, exInType, wRecChange::AbstractArray,
synapticActivityCounter::AbstractArray,
synapseReconnectDelay::AbstractArray)
wRecSigned = exInType .* wRec
# -0.0 == 0.0 but isequal() implement as -0.0 != 0.0, so I need to get rid of -0.0 manually
GeneralUtils.replaceElements!(wRecSigned, -0, 0)
# println("wRec 2 $(size(wRecSigned)) ", wRecSigned[:,:,1,1])
# println("wRecChange ", wRecChange[:,:,1,1])
originalsign = sign.(wRecSigned)
# println("originalsign ", originalsign[:,:,1,1])
wRecSigned .= wRecSigned .+ wRecChange
# println("wRec 3 $(size(wRecSigned)) ", wRecSigned[:,:,1,1])
newsign = sign.(wRecSigned) # look for flipped sign, it needs to get pruned
# println("newsign ", newsign[:,:,1,1])
flipsign = (!isequal).(originalsign, newsign)
# println("flipsign ", flipsign[:,:,1,1])
nonflipsign = isequal.(originalsign, newsign)
wRec .= abs.(wRecSigned) # wRec store magnitude only, sign is at exInType
# println("wRec 4 $(size(wRec)) ", wRec[:,:,1,1])
GeneralUtils.replaceElements!(flipsign, 1, wRec, 0.0) # negative synapse get pruned
# println("wRec 5 $(size(wRec)) ", wRec[:,:,1,1])
GeneralUtils.replaceElements!(flipsign, 1, synapticActivityCounter, 0)
# set pruned synapse to random wait time
waittime = rand((1:synapseMaxWaittime), size(wRec)) .* flipsign # synapse's random wait time to reconnect
# synapseReconnectDelay counting mode when value is negative hence .* -1
synapseReconnectDelay .= (synapseReconnectDelay .* nonflipsign) .+ (waittime .* -1)
# println("synapseReconnectDelay ", synapseReconnectDelay[:,:,1,1])
# error("DEBUG -> mergeLearnWeight!")
end
function growRepeatedPath!(wRec, synapticActivityCounter, eta)
# seperate active synapse out of inactive in this signal
mask_activeSynapse = (!isequal).(synapticActivityCounter, 0)
# adjust weight based on vt progress and repeatition (40% +w, 60% -w) depend on epsilonRec
mask_more, mask_less, _ = rankMatrix(synapticActivityCounter, 0.6) # sort synapse from highest to lowest activity
# +w, synapse with more than 10% of avg activity get increase weight by eta
# mask_more = (!isless).(synapticActivityCounter, lowerlimit)
mask_2 = GeneralUtils.allTrue.(mask_activeSynapse, mask_more)
mask_3 = mask_2 .* (1 .+ eta) # minor activity synapse weight will be reduced by eta
GeneralUtils.replaceElements!(mask_3, 0, 1) # replace 0 with 1 so mask * Wrec will not get 0 weight
wRec .*= mask_3
# -w, synapse with less than 10% of avg activity get reduced weight by eta
# mask_less = GeneralUtils.isBetween.(synapticActivityCounter, 0, lowerlimit) # 1st criteria
mask_3 = GeneralUtils.allTrue.(mask_activeSynapse, mask_less)
mask_4 = mask_3 .* (1 .- eta) # minor activity synapse weight will be reduced by eta
# replace 0 with 1 so mask * wRec will not get 0 weight i.e. non-effected weight remain the same
GeneralUtils.replaceElements!(mask_4, 0, 1)
wRec .*= mask_4
# error("DEBUG -> growRepeatedPath!")
end
function weakenAllActiveSynapse!(wRec, synapticActivityCounter, eta) # TODO not fully tested, there is no connection YET where there is 0 synapse activity but wRec is not 0 (subscribed)
mask_activeSynapse = (!isequal).(synapticActivityCounter, 0)
mask_1 = mask_activeSynapse .* (1 .- (0.1 .* eta))
GeneralUtils.replaceElements!(mask_1, 0, 1) # replace 0 with 1 so mask * Wrec will not get 0 weight
wRec .*= mask_1
end
function weakenNotMatureSynapse!(wRec, synapticActivityCounter, eta) # TODO not fully tested, there is no connection YET where there is 0 synapse activity but wRec is not 0 (subscribed)
mask_inactiveSynapse = isequal.(synapticActivityCounter, 0)
mask_notmature = GeneralUtils.isBetween.(wRec, 0.0, 0.1) # 2nd criteria, not mature synapse has weight < 0.1
mask_1 = GeneralUtils.allTrue.(mask_inactiveSynapse, mask_notmature)
mask_2 = mask_1 .* (1 .- eta)
GeneralUtils.replaceElements!(mask_2, 0, 1) # replace 0 with 1 so mask * Wrec will not get 0 weight
wRec .*= mask_2
end
function pruneSynapse!(wRec, synapticActivityCounter, synapseReconnectDelay)
mask_weak = GeneralUtils.isBetween.(wRec, 0.0, 0.01)
mask_notweak = (!GeneralUtils.isBetween).(wRec, 0.0, 0.01)
wRec .*= mask_notweak # all marked weak synapse weight need to be 0.0 i.e. pruned
# all weak synapse activity are reset
GeneralUtils.replaceElements!(mask_weak, 1, synapticActivityCounter, 0)
# set pruned synapse to random wait time
waittime = rand((1:synapseMaxWaittime), size(wRec)) .* mask_weak # synapse's random wait time to reconnect
# synapseReconnectDelay counting mode when value is negative hence .* -1
synapseReconnectDelay .= (synapseReconnectDelay .* mask_notweak) .+ (waittime .* -1)
# error("DEBUG -> pruneSynapse!")
end
function rewireSynapse!(wRec::AbstractArray, neuronInactivityCounter::AbstractArray,
synapticActivityCounter::AbstractArray,
synapseReconnectDelay::AbstractArray,
synapseConnectionNumber::Integer,
zitCumulative::AbstractArray)
i1,i2,i3,i4 = size(wRec)
for n in 1:i3 # neuron-by-neuron
if neuronInactivityCounter[1,1,n,i4][1] < -100000 # neuron die i.e. reset all weight
println("neuron $n die")
neuronInactivityCounter[:,:,n,i4] .= 0 # reset
w = random_wRec(i1,i2,1,synapseConnectionNumber)
wRec[:,:,n,i4] .= w
a = similar(w) .= -0.1 # temp matrix use to put -0.1 into synapseReconnectDelay
mask = (!iszero).(w)
GeneralUtils.replaceElements!(mask, 1, a, 0)
synapseReconnectDelay[:,:,n,i4] = a
else
for ind in eachindex(synapseReconnectDelay[:,:,n,i4])
timemark = synapseReconnectDelay[:,:,n,i4][ind]
if timemark > 0 #TODO not fully tested. mark timeStep available
timemark = Int(timemark)
# get neuron pool within 100 timeStep earlier
earlier = size(zitCumulative, 3) - 100 > 0 ? size(zitCumulative, 3) - 100 : size(zitCumulative, 3)
current = size(zitCumulative, 3)
pool = sum(zitCumulative[:,:,earlier:current], dims=3)
if sum(pool) != 0
indices = findall(x -> x != 0, pool)
pick = rand(indices) # cartesian indice
wRec[pick] = rand(0.001:0.001:0.02)
synapticActivityCounter[pick] = 0
synapseReconnectDelay[pick] = -0.1
# error("DEBUG -> rewireSynapse!")
else # if neurons not firing at all, try again next time
synapticActivityCounter[:,:,n,i4][ind] = 0
synapseReconnectDelay[:,:,n,i4][ind] = rand(1:synapseMaxWaittime) * -1 # wait time
# error("DEBUG -> rewireSynapse!")
end
end
end
end
end
end
""" Rank input matrix elements value from high to low (not including 0 in ranking)
and return 2 resulting bitmatrix. 1st matrix contain high rank, 2nd
matrix contain low rank. high and low rank are devided by percent threshold
"""
function rankMatrix(X, percent::Float64)
"""prompt
write a function in julia that satisfy the following requirements.
1. the function operate on column-major 3D matrix
2. the function input are matrix X and percent value from 0.0 to 1.0
3. the function rank the matrix's elements value from high to low ignoring 0
4. return first bitmatrix according to percent, true for 1-percent and false otherwise
5. return second bitmatrix according to percent, true for percent and false otherwise
6. the first and second bitmatrix must be in the same shape as X
"""
if percent < 0.0 || percent > 1.0
error("percent must be 0.0 <= percent <= 1.0")
end
percent = 1 - percent
if percent == 1.0
first_bitmatrix = zeros(size(X)...)
second_bitmatrix = ones(size(X)...)
threshold = 0.0
elseif percent == 0.0
first_bitmatrix = ones(size(X)...)
second_bitmatrix = zeros(size(X)...)
threshold = 1.0
else
# Create an array to store the ranked values
ranked_values = sort(vec(X), rev=true)
# Calculate the threshold value based on the given percent
threshold = ranked_values[ceil(Int, percent * length(ranked_values))]
# Create the first bitmatrix according to the threshold
first_bitmatrix = X .> threshold
# Create the second bitmatrix according to the threshold
second_bitmatrix = X .<= threshold
end
return first_bitmatrix, second_bitmatrix, threshold
end
end # module

View File

@@ -0,0 +1,430 @@
module type
export
# struct
kfn_1,
# function
random_wRec
using Random, GeneralUtils, LinearAlgebra
#------------------------------------------------------------------------------------------------100
rng = MersenneTwister(1234)
abstract type Ironpen end
abstract type knowledgeFn <: Ironpen end
#------------------------------------------------------------------------------------------------100
Base.@kwdef mutable struct kfn_1 <: knowledgeFn
params::Union{Dict, Nothing} = nothing # store params of knowledgeFn itself for later use
timeStep::Union{AbstractArray, Nothing} = nothing
learningStage::Union{AbstractArray, Nothing} = nothing # 0 inference, 1 start, 2 during, 3 end learning
inputSize::Union{AbstractArray, Nothing} = nothing
zit::Union{AbstractArray, Nothing} = nothing # RSNN 3D activation matrix (row, col, batch)
zitCumulative::Union{AbstractArray, Nothing} = nothing
exInType::Union{AbstractArray, Nothing} = nothing
modelError::Union{AbstractArray, Nothing} = nothing # store RSNN error
outputError::Union{AbstractArray, Nothing} = nothing # store output neurons error
bk::Union{AbstractArray, Nothing} = nothing # Bⱼₖ
# ---------------------------------------------------------------------------- #
# LIF Neurons #
# ---------------------------------------------------------------------------- #
# a projection of kfn.zit into lif dimension for broadcasting later)
lif_zit::Union{AbstractArray, Nothing} = nothing
# main variables according to papers
lif_wRec::Union{AbstractArray, Nothing} = nothing
lif_vt::Union{AbstractArray, Nothing} = nothing
lif_vth::Union{AbstractArray, Nothing} = nothing
lif_vRest::Union{AbstractArray, Nothing} = nothing
lif_zt::Union{AbstractArray, Nothing} = nothing
lif_zt4d::Union{AbstractArray, Nothing} = nothing
lif_refractoryCounter::Union{AbstractArray, Nothing} = nothing
lif_refractoryDuration::Union{AbstractArray, Nothing} = nothing
lif_alpha::Union{AbstractArray, Nothing} = nothing
lif_delta::Union{AbstractFloat, Nothing} = nothing
lif_tau_m::Union{AbstractFloat, Nothing} = nothing
lif_phi::Union{AbstractArray, Nothing} = nothing
lif_epsilonRec::Union{AbstractArray, Nothing} = nothing
lif_eRec::Union{AbstractArray, Nothing} = nothing
lif_eta::Union{AbstractArray, Nothing} = nothing
lif_gammaPd::Union{AbstractArray, Nothing} = nothing
lif_wRecChange::Union{AbstractArray, Nothing} = nothing
lif_error::Union{AbstractArray, Nothing} = nothing
lif_firingCounter::Union{AbstractArray, Nothing} = nothing
lif_firingTargetFrequency::Union{AbstractArray, Nothing} = nothing
lif_neuronInactivityCounter::Union{AbstractArray, Nothing} = nothing
lif_synapseReconnectDelay::Union{AbstractArray, Nothing} = nothing
lif_synapseConnectionNumber::Union{Int, Nothing} = nothing
lif_synapticActivityCounter::Union{AbstractArray, Nothing} = nothing # work
# pre-allocation array
lif_arrayProjection4d::Union{AbstractArray, Nothing} = nothing # use to project 3d array to 4d
lif_recSignal::Union{AbstractArray, Nothing} = nothing
lif_exInType::Union{AbstractArray, Nothing} = nothing
# lif_decayed_epsilonRec::Union{AbstractArray, Nothing} = nothing
# lif_vt_diff_vth::Union{AbstractArray, Nothing} = nothing
# lif_vt_diff_vth_div_vth::Union{AbstractArray, Nothing} = nothing
# lif_gammaPd_div_vth::Union{AbstractArray, Nothing} = nothing
# lif_phiActivation::Union{AbstractArray, Nothing} = nothing
# ---------------------------------------------------------------------------- #
# ALIF Neurons #
# ---------------------------------------------------------------------------- #
alif_zit::Union{AbstractArray, Nothing} = nothing
alif_wRec::Union{AbstractArray, Nothing} = nothing
alif_vt::Union{AbstractArray, Nothing} = nothing
alif_vth::Union{AbstractArray, Nothing} = nothing
alif_vRest::Union{AbstractArray, Nothing} = nothing
alif_zt::Union{AbstractArray, Nothing} = nothing
alif_zt4d::Union{AbstractArray, Nothing} = nothing
alif_refractoryCounter::Union{AbstractArray, Nothing} = nothing
alif_refractoryDuration::Union{AbstractArray, Nothing} = nothing
alif_alpha::Union{AbstractArray, Nothing} = nothing
alif_delta::Union{AbstractFloat, Nothing} = nothing
alif_tau_m::Union{AbstractFloat, Nothing} = nothing
alif_phi::Union{AbstractArray, Nothing} = nothing
alif_epsilonRec::Union{AbstractArray, Nothing} = nothing
alif_eRec::Union{AbstractArray, Nothing} = nothing
alif_eta::Union{AbstractArray, Nothing} = nothing
alif_gammaPd::Union{AbstractArray, Nothing} = nothing
alif_wRecChange::Union{AbstractArray, Nothing} = nothing
alif_error::Union{AbstractArray, Nothing} = nothing
alif_firingCounter::Union{AbstractArray, Nothing} = nothing
alif_firingTargetFrequency::Union{AbstractArray, Nothing} = nothing
alif_neuronInactivityCounter::Union{AbstractArray, Nothing} = nothing
alif_synapseReconnectDelay::Union{AbstractArray, Nothing} = nothing
alif_synapseConnectionNumber::Union{Int, Nothing} = nothing
alif_synapticActivityCounter::Union{AbstractArray, Nothing} = nothing
# pre-allocation array
alif_arrayProjection4d::Union{AbstractArray, Nothing} = nothing # use to project 3d array to 4d
alif_recSignal::Union{AbstractArray, Nothing} = nothing
alif_exInType::Union{AbstractArray, Nothing} = nothing
# alif_decayed_epsilonRec::Union{AbstractArray, Nothing} = nothing
# alif_vt_diff_vth::Union{AbstractArray, Nothing} = nothing
# alif_vt_diff_vth_div_vth::Union{AbstractArray, Nothing} = nothing
# alif_gammaPd_div_vth::Union{AbstractArray, Nothing} = nothing
# alif_phiActivation::Union{AbstractArray, Nothing} = nothing
# alif specific variables
alif_epsilonRecA::Union{AbstractArray, Nothing} = nothing
alif_avth::Union{AbstractArray, Nothing} = nothing
alif_a::Union{AbstractArray, Nothing} = nothing # threshold adaptation
alif_beta::Union{AbstractArray, Nothing} = nothing # β, constant, value from paper
alif_rho::Union{AbstractArray, Nothing} = nothing # ρ, threshold adaptation decay factor
alif_tau_a::Union{AbstractFloat, Nothing} = nothing # τ_a, adaption time constant in millisecond
# alif specific pre-allocation array
# alif_phi_x_epsilonRec::Union{AbstractArray, Nothing} = nothing
# alif_phi_x_beta::Union{AbstractArray, Nothing} = nothing
# alif_rho_diff_phi_x_beta::Union{AbstractArray, Nothing} = nothing
# alif_rho_div_phi_x_beta_x_epsilonRecA::Union{AbstractArray, Nothing} = nothing
# alif_beta_x_a::Union{AbstractArray, Nothing} = nothing
# ---------------------------------------------------------------------------- #
# Output Neurons #
# ---------------------------------------------------------------------------- #
# output neuron is based on LIF
on_zit::Union{AbstractArray, Nothing} = nothing
# main variables according to papers
on_wOut::Union{AbstractArray, Nothing} = nothing # wOut is wRec, just use the name from paper
on_vt::Union{AbstractArray, Nothing} = nothing
on_vth::Union{AbstractArray, Nothing} = nothing
on_vRest::Union{AbstractArray, Nothing} = nothing
on_zt::Union{AbstractArray, Nothing} = nothing
on_zt4d::Union{AbstractArray, Nothing} = nothing
on_refractoryCounter::Union{AbstractArray, Nothing} = nothing
on_refractoryDuration::Union{AbstractArray, Nothing} = nothing
on_alpha::Union{AbstractArray, Nothing} = nothing
on_delta::Union{AbstractFloat, Nothing} = nothing
on_tau_m::Union{AbstractFloat, Nothing} = nothing
on_phi::Union{AbstractArray, Nothing} = nothing
on_epsilonRec::Union{AbstractArray, Nothing} = nothing
on_eRec::Union{AbstractArray, Nothing} = nothing
on_eta::Union{AbstractArray, Nothing} = nothing
on_gammaPd::Union{AbstractArray, Nothing} = nothing
on_wOutChange::Union{AbstractArray, Nothing} = nothing
on_error::Union{AbstractArray, Nothing} = nothing
on_synapticActivityCounter::Union{AbstractArray, Nothing} = nothing
on_firingCounter::Union{AbstractArray, Nothing} = nothing
# pre-allocation array
on_arrayProjection4d::Union{AbstractArray, Nothing} = nothing # use to project 3d array to 4d
on_recSignal::Union{AbstractArray, Nothing} = nothing
# on_decayed_epsilonRec::Union{AbstractArray, Nothing} = nothing
# on_vt_diff_vth::Union{AbstractArray, Nothing} = nothing
# on_vt_diff_vth_div_vth::Union{AbstractArray, Nothing} = nothing
# on_gammaPd_div_vth::Union{AbstractArray, Nothing} = nothing
# on_phiActivation::Union{AbstractArray, Nothing} = nothing
end
# outer constructor
function kfn_1(params::Dict; device=cpu)
kfn = kfn_1()
kfn.params = params
kfn.timeStep = [0] |> device
kfn.learningStage = [0] |> device
# ---------------------------------------------------------------------------- #
# initialize activation matrix #
# ---------------------------------------------------------------------------- #
# row*col is a 2D matrix represent all RSNN activation
row, signal_col, batch = kfn.params[:inputPort][:signal][:numbers] # z-axis represent signal batch number
kfn.inputSize = [row, signal_col] |> device
lif_col = kfn.params[:computeNeuron][:lif][:numbers][2]
alif_col = kfn.params[:computeNeuron][:alif][:numbers][2]
col = signal_col + lif_col + alif_col
# activation matrix
kfn.zit = zeros(row, col, batch) |> device
kfn.zitCumulative = zeros(row, col, 1, batch) |> device
kfn.modelError = zeros(1) |> device
kfn.bk = rand(size(kfn.zit)...) |> device
# ---------------------------------------------------------------------------- #
# LIF config #
# ---------------------------------------------------------------------------- #
# In 3D LIF matrix, z-axis represent each neuron while each 2D slice represent that neuron's
# synaptic subscription to other neurons (via activation matrix)
lif_n = kfn.params[:computeNeuron][:lif][:numbers][1] * kfn.params[:computeNeuron][:lif][:numbers][2]
# subscription
synapticConnectionPercent = kfn.params[:computeNeuron][:lif][:params][:synapticConnectionPercent]
kfn.lif_synapseConnectionNumber = Int(floor(row*col * synapticConnectionPercent/100))
w = random_wRec(row, col, lif_n, kfn.lif_synapseConnectionNumber)
# project 3D w into 4D kfn.lif_wRec (row, col, n, batch)
kfn.lif_wRec = reshape(w, (row, col, lif_n, 1)) .* ones(row, col, lif_n, batch) |> device
kfn.lif_zit = (similar(kfn.lif_wRec) .= 0)
kfn.lif_vt = (similar(kfn.lif_wRec) .= 0)
kfn.lif_vth = (similar(kfn.lif_wRec) .= 1)
kfn.lif_vRest = (similar(kfn.lif_wRec) .= 0)
kfn.lif_zt = zeros(1, 1, lif_n, batch) |> device
kfn.lif_zt4d = (similar(kfn.lif_wRec) .= 0)
kfn.lif_refractoryCounter = (similar(kfn.lif_wRec) .= 0)
kfn.lif_refractoryDuration = (similar(kfn.lif_wRec) .= 3)
kfn.lif_delta = 1.0
kfn.lif_tau_m = 100.0
kfn.lif_alpha = (similar(kfn.lif_wRec) .= (exp(-kfn.lif_delta / kfn.lif_tau_m)))
kfn.lif_phi = (similar(kfn.lif_wRec) .= 0)
kfn.lif_epsilonRec = (similar(kfn.lif_wRec) .= 0)
kfn.lif_eRec = (similar(kfn.lif_wRec) .= 0)
kfn.lif_eta = (similar(kfn.lif_wRec) .= 0.01)
kfn.lif_gammaPd = (similar(kfn.lif_wRec) .= 0.3)
kfn.lif_wRecChange = (similar(kfn.lif_wRec) .= 0)
kfn.lif_error = (similar(kfn.lif_wRec) .= 0)
kfn.lif_firingCounter = (similar(kfn.lif_wRec) .= 0)
kfn.lif_firingTargetFrequency = (similar(kfn.lif_wRec) .= 0.1)
kfn.lif_neuronInactivityCounter = (similar(kfn.lif_wRec) .= 0)
# count subscribed synapse activity, just like epsilonRec but without decay.
# use to adjust weight based on how often neural pathway is used
kfn.lif_synapseReconnectDelay = (similar(kfn.lif_wRec) .= -0.1) # -0.1 for non-sub conn
kfn.lif_synapticActivityCounter = (similar(kfn.lif_wRec) .= -0.1) # -0.1 for non-sub conn
kfn.lif_arrayProjection4d = (similar(kfn.lif_wRec) .= 1)
kfn.lif_recSignal = (similar(kfn.lif_wRec) .= 0)
kfn.lif_exInType = (similar(kfn.lif_wRec) .= 0)
# kfn.lif_decayed_epsilonRec = (similar(kfn.lif_wRec) .= 0)
# kfn.lif_vt_diff_vth = (similar(kfn.lif_wRec) .= 0)
# kfn.lif_vt_diff_vth_div_vth = (similar(kfn.lif_wRec) .= 0)
# kfn.lif_gammaPd_div_vth = (similar(kfn.lif_wRec) .= 0)
# kfn.lif_phiActivation = (similar(kfn.lif_wRec) .= 0)
# ---------------------------------------------------------------------------- #
# ALIF config #
# ---------------------------------------------------------------------------- #
alif_n = kfn.params[:computeNeuron][:alif][:numbers][1] * kfn.params[:computeNeuron][:alif][:numbers][2]
# subscription
synapticConnectionPercent = kfn.params[:computeNeuron][:alif][:params][:synapticConnectionPercent]
kfn.alif_synapseConnectionNumber = Int(floor(row*col * synapticConnectionPercent/100))
w = random_wRec(row, col, alif_n, kfn.alif_synapseConnectionNumber)
# project 3D w into 4D kfn.alif_wRec
kfn.alif_wRec = reshape(w, (row, col, alif_n, 1)) .* ones(row, col, alif_n, batch) |> device
kfn.alif_zit = (similar(kfn.alif_wRec) .= 0)
kfn.alif_vt = (similar(kfn.alif_wRec) .= 0)
kfn.alif_vth = (similar(kfn.alif_wRec) .= 1)
kfn.alif_vRest = (similar(kfn.alif_wRec) .= 0)
kfn.alif_zt = zeros(1, 1, alif_n, batch) |> device
kfn.alif_zt4d = (similar(kfn.alif_wRec) .= 0)
kfn.alif_refractoryCounter = (similar(kfn.alif_wRec) .= 0)
kfn.alif_refractoryDuration = (similar(kfn.alif_wRec) .= 3)
kfn.alif_delta = 1.0
kfn.alif_tau_m = 100.0
kfn.alif_alpha = (similar(kfn.alif_wRec) .= (exp(-kfn.alif_delta / kfn.alif_tau_m)))
kfn.alif_phi = (similar(kfn.alif_wRec) .= 0)
kfn.alif_epsilonRec = (similar(kfn.alif_wRec) .= 0)
kfn.alif_eRec = (similar(kfn.alif_wRec) .= 0)
kfn.alif_eta = (similar(kfn.alif_wRec) .= 0.01)
kfn.alif_gammaPd = (similar(kfn.alif_wRec) .= 0.3)
kfn.alif_wRecChange = (similar(kfn.alif_wRec) .= 0)
kfn.alif_error = (similar(kfn.alif_wRec) .= 0)
kfn.alif_firingCounter = (similar(kfn.alif_wRec) .= 0)
kfn.alif_firingTargetFrequency = (similar(kfn.alif_wRec) .= 0.1)
kfn.alif_neuronInactivityCounter = (similar(kfn.alif_wRec) .= 0)
kfn.alif_synapseReconnectDelay = (similar(kfn.alif_wRec) .= -0.1) # -0.1 for non-sub conn
kfn.alif_synapticActivityCounter = (similar(kfn.alif_wRec) .= 0)
kfn.alif_arrayProjection4d = (similar(kfn.alif_wRec) .= 1)
kfn.alif_recSignal = (similar(kfn.alif_wRec) .= 0)
kfn.alif_exInType = (similar(kfn.alif_wRec) .= 0)
# kfn.alif_decayed_epsilonRec = (similar(kfn.alif_wRec) .= 0)
# kfn.alif_vt_diff_vth = (similar(kfn.alif_wRec) .= 0)
# kfn.alif_vt_diff_vth_div_vth = (similar(kfn.alif_wRec) .= 0)
# kfn.alif_gammaPd_div_vth = (similar(kfn.alif_wRec) .= 0)
# kfn.alif_phiActivation = (similar(kfn.alif_wRec) .= 0)
# alif specific variables
kfn.alif_epsilonRecA = (similar(kfn.alif_wRec) .= 0)
kfn.alif_avth = (similar(kfn.alif_wRec) .= 0)
kfn.alif_a = (similar(kfn.alif_wRec) .= 0)
kfn.alif_beta = (similar(kfn.alif_wRec) .= 0.07)
kfn.alif_tau_a = 800.0
kfn.alif_rho = (similar(kfn.alif_wRec) .= (exp(-kfn.alif_delta / kfn.alif_tau_a))) |> device
# kfn.alif_phi_x_epsilonRec = (similar(kfn.alif_wRec) .= 0)
# kfn.alif_phi_x_beta = (similar(kfn.alif_wRec) .= 0)
# kfn.alif_rho_diff_phi_x_beta = (similar(kfn.alif_wRec) .= 0)
# kfn.alif_rho_div_phi_x_beta_x_epsilonRecA = (similar(kfn.alif_wRec) .= 0)
# kfn.alif_beta_x_a = (similar(kfn.alif_wRec) .= 0)
# ---------------------------------------------------------------------------- #
# output config #
# ---------------------------------------------------------------------------- #
n = kfn.params[:outputPort][:numbers][1] * kfn.params[:outputPort][:numbers][2]
# subscription
w = zeros(row, col, n)
synapticConnectionPercent = kfn.params[:outputPort][:params][:synapticConnectionPercent]
subable = size(kfn.lif_wRec, 3) + size(kfn.alif_wRec, 3) # sub to lif, alif only
synapticConnection = Int(floor(subable * synapticConnectionPercent/100))
for slice in eachslice(w, dims=3) # each slice is a neuron
startInd = row*col - subable + 1 # e.g. 100(row*col) - 50(subable) = 50 -> startInd = 51
# pool must contain only lif, alif neurons
pool = shuffle!([startInd:row*col...])[1:synapticConnection]
for i in pool
slice[i] = rand() # assign weight to synaptic connection. /10 to start small,
# otherwise RSNN's vt Usually stay negative (-)
end
end
# 10% of neuron connection should be enough to start to make neuron fires
should_be_avg_weight = 1 / (0.1 * n)
w = w .* (should_be_avg_weight / maximum(w)) # adjust overall weight
# project 3D w into 4D kfn.lif_wOut (row, col, n, batch)
kfn.on_wOut = reshape(w, (row, col, n, 1)) .* ones(row, col, n, batch) |> device
kfn.on_zit = (similar(kfn.on_wOut) .= 0)
kfn.on_vt = (similar(kfn.on_wOut) .= 0)
kfn.on_vth = (similar(kfn.on_wOut) .= 1)
kfn.on_vRest = (similar(kfn.on_wOut) .= 0)
kfn.on_zt = zeros(1, 1, n, batch) |> device
kfn.on_zt4d = (similar(kfn.on_wOut) .= 0)
kfn.on_refractoryCounter = (similar(kfn.on_wOut) .= 0)
kfn.on_refractoryDuration = (similar(kfn.on_wOut) .= 1)
kfn.on_delta = 1.0
kfn.on_tau_m = 100.0
kfn.on_alpha = (similar(kfn.on_wOut) .= (exp(-kfn.on_delta / kfn.on_tau_m)))
kfn.on_phi = (similar(kfn.on_wOut) .= 0)
kfn.on_epsilonRec = (similar(kfn.on_wOut) .= 0)
kfn.on_eRec = (similar(kfn.on_wOut) .= 0)
kfn.on_eta = (similar(kfn.on_wOut) .= 0.01)
kfn.on_gammaPd = (similar(kfn.on_wOut) .= 0.3)
kfn.on_wOutChange = (similar(kfn.on_wOut) .= 0)
kfn.on_error = (similar(kfn.on_wOut) .= 0)
kfn.on_synapticActivityCounter = (similar(kfn.on_wOut) .= 0)
kfn.on_firingCounter = (similar(kfn.on_wOut) .= 0)
kfn.on_arrayProjection4d = (similar(kfn.on_wOut) .= 1)
kfn.on_recSignal = (similar(kfn.on_wOut) .= 0)
kfn.outputError = zeros(n, batch) |> device
totalComputeNeurons = lif_n + alif_n
inhabitoryNeurons = Int(floor(totalComputeNeurons * 30/100))
mask1 = ones(row, signal_col)
mask2 = GeneralUtils.multiply_random_elements(ones(row, lif_col + alif_col),
-1, inhabitoryNeurons, MersenneTwister(1234))
kfn.exInType = cat(mask1, mask2, dims=2) |> device
return kfn
end
function random_wRec(row, col, n, synapseConnectionNumber)
# subscription
w = zeros(row, col, n)
for slice in eachslice(w, dims=3)
pool = shuffle!([1:row*col...])[1:synapseConnectionNumber]
for i in pool
slice[i] = rand() # assign weight to synaptic connection. /10 to start small,
# otherwise RSNN's vt Usually stay negative (-)
end
end
# # adjust weight so that RSNN fires small amount of neurons at the beginning to avoid overwhelming
# # all-fire situation. it also better than not-fire-at-all situation.
# avgWeight = sum(w)/length(w)
# w = w .* (0.01 / avgWeight) # adjust overall weight
return normalize!(w) #(row, col, n)
end
end # module

View File

@@ -0,0 +1 @@
.CondaPkg

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,25 @@
[deps]
BSON = "fbb218c0-5317-5bc6-957e-2ee96dd4b1f0"
BenchmarkTools = "6e4b80f9-dd63-53aa-95a3-0cdb28fa8baf"
CUDA = "052768ef-5323-5732-b1bb-66c8b64840ba"
CondaPkg = "992eb4ea-22a4-4c89-a5bb-47a3300528ab"
Cthulhu = "f68482b8-f384-11e8-15f7-abe071a5a75f"
Flux = "587475ba-b771-5e3f-ad9e-33799f191a9c"
GLMakie = "e9467ef8-e4e7-5192-8a1a-b1aee30e663a"
GPUArrays = "0c68f7d7-f131-5f86-a1c3-88cf8149b2d7"
GeneralUtils = "c6c72f09-b708-4ac8-ac7c-2084d70108fe"
IronpenGPU = "3d5396ea-818e-43fc-a9d3-164248e840cd"
JSON3 = "0f8b85d8-7281-11e9-16c2-39a750bddbf1"
MLDatasets = "eb30cadb-4394-5ae3-aed4-317e484a6458"
MLUtils = "f1d291b0-491e-4a28-83b9-f70985020b54"
MethodAnalysis = "85b6ec6f-f7df-4429-9514-a64bcd9ee824"
OneHotArrays = "0b1bfda6-eb8a-41d2-88d8-f5af5cad476f"
ProgressMeter = "92933f4c-e287-5a05-a399-4b506db050ca"
PythonCall = "6099a3de-0909-46bc-b1f4-468b9a2dfc0d"
REPL = "3fa0cd96-eef1-5676-8a61-b3b8758bbffb"
Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c"
Revise = "295af30f-e4ad-537b-8983-00126c2a3abe"
Serialization = "9e88b42a-f829-5b0c-bbe9-9e923198166b"
SliceMap = "82cb661a-3f19-5665-9e27-df437c7e54c8"
StaticArrays = "90137ffa-7385-5640-81b9-e52037218182"
cuDNN = "02a925ec-e4fe-4b08-9a7e-0d78e3d38ccd"

View File

@@ -0,0 +1,966 @@
# ---------------------------------------------------------------------------- #
# if one need to reinstall all python packages #
# ---------------------------------------------------------------------------- #
# 1. delete .CondaPkg folder in working folder
# 2. delete CondaPkg.toml file in working folder
# using Pkg; Pkg.activate(".");
# pythonPkg = ["CondaPkg", "PythonCall"]
# for i in pythonPkg try Pkg.rm(i) catch end end
# for i in pythonPkg Pkg.add(i) end
# using CondaPkg, PythonCall
# channels = ["anaconda", "conda-forge", "pytorch"]
# for i in channels CondaPkg.add_channel(i) end
# condapackage = ["numpy", "pytorch", "snntorch"]
# for i in condapackage CondaPkg.add(i) end
using Pkg; Pkg.activate("."); Pkg.resolve(), Pkg.instantiate()
# ---------------------------------------------------------------------------- #
# for debugging purpose #
# ---------------------------------------------------------------------------- #
# https://discourse.julialang.org/t/debugging-extremely-slow/53801/3
# using MethodAnalysis
# visit(Base) do item
# isa(item, Module) && push!(JuliaInterpreter.compiled_modules, item)
# true
# end
using Revise
using BenchmarkTools, Cthulhu, REPL.TerminalMenus
using Flux, CUDA
using BSON, JSON3
using MLDatasets: MNIST
using MLUtils, ProgressMeter, Dates, Random, LinearAlgebra,
Serialization, OneHotArrays , GLMakie
using CondaPkg, PythonCall
np = pyimport("numpy")
torch = pyimport("torch")
spikegen = pyimport("snntorch.spikegen") # https://github.com/jeshraghian/snntorch
using IronpenGPU
using GeneralUtils
sep = Sys.iswindows() ? "\\" : "/"
rootDir = pwd()
# select compute device
# device = Flux.CUDA.functional() ? gpu : cpu # Flux provide "cpu" and "gpu" keywork
device = gpu
if device == gpu CUDA.device!(0) end #CHANGE
CUDA.allowscalar(false) # turn off scalar indexing in CPU to make it easier when moving to GPU
#------------------------------------------------------------------------------------------------100
"""
Todo:
- []
Change from version:
-
All features
-
"""
# ----------------------------- REPL menu options ---------------------------- #
options = ["yes", "no"]
menu = RadioMenu(options)
# communication config --------------------------------------------------------------------------100
database_ip = "localhost"
# database_ip = "192.168.0.8"
#------------------------------------------------------------------------------------------------100
modelname = "runOn_gpu_0" #CHANGE
imageBatch = 1
function generate_snn(filename::String, location::String)
signalInput_portnumbers = (10, 10, imageBatch) # 2nd dim needs to match
# input signal + copied input signal + noise.
# 3rd dim is input batch size
noise_portnumbers = (signalInput_portnumbers[1], 1)
output_portnumbers = (10, 1)
# 5000 neurons are maximum for 64GB memory i.e. 300 LIF : 200 ALIF
lif_neuron_number = (signalInput_portnumbers[1], 30) # CHANGE
alif_neuron_number = (signalInput_portnumbers[1], 20) # CHANGE from Allen Institute, ALIF is 20-40% of LIF
# totalNeurons = computeNeuronNumber + noise_portnumbers + signalInput_portnumbers
# totalInputPort = noise_portnumbers + signalInput_portnumbers
# kfn and neuron config
passthrough_neuron_params = Dict(
:type => "passthroughNeuron"
)
lif_neuron_params = Dict{Symbol, Any}(
:type => "lifNeuron",
:v_t_default => 0.0,
:v_th => 1.0, # neuron firing threshold (this value is treated as maximum bound if I use auto generate)
:tau_m => 50.0, # membrane time constant in millisecond.
:eta => 1e-6,
# Good starting value is 1/10th of tau_a
# This is problem specific parameter. It controls how leaky the neuron is.
# Too high(less leaky) makes learning algo harder to move model into direction that reduce error
# resulting in model's error to explode exponantially likely because learning algo will try to
# exert more force (larger w_out_change) to move neuron into direction that reduce error
# For example, model error from 7 to 2e6.
:synapticConnectionPercent => 20, # % coverage of total neurons in kfn
)
alif_neuron_params = Dict{Symbol, Any}(
:type => "alifNeuron",
:v_t_default => 0.0,
:v_th => 1.0, # neuron firing threshold (this value is treated as maximum bound if I use auto generate)
:tau_m => 50.0, # membrane time constant in millisecond.
:eta => 1e-6,
# Good starting value is 1/10th of tau_a
# This is problem specific parameter. It controls how leaky the neuron is.
# Too high(less leaky) makes learning algo harder to move model into direction that reduce error
# resulting in model's error to explode exponantially likely because learning algo will try to
# exert more force (larger w_out_change) to move neuron into direction that reduce error
# For example, model error from 7 to 2e6.
:tau_a => 800.0, # adaptation time constant in millisecond. it defines neuron memory length.
# This is problem specific parameter
# Good starting value is 0.5 to 2 times of info STORE-RECALL length i.e. total time SNN takes to
# perform a task, for example, equals to episode length.
# From "Spike frequency adaptation supports network computations on temporally dispersed
# information"
:synapticConnectionPercent => 20, # % coverage of total neurons in kfn
)
linear_neuron_params = Dict{Symbol, Any}(
:type => "linearNeuron",
:v_th => 1.0, # neuron firing threshold (this value is treated as maximum bound if I use auto generate)
:tau_out => 20.0, # output time constant in millisecond.
:synapticConnectionPercent => 20, # % coverage of total neurons in kfn
# Good starting value is 1/50th of tau_a
# This is problem specific parameter.
# It controls how leaky the neuron is.
# Too high(less leaky) makes learning algo harder to move model into direction that reduce error
# resulting in model's error to explode exponantially. For example, model error from 7 to 2e6
# One can image training output neuron is like Tetris Game.
)
# integrate_neuron_params = Dict{Symbol, Any}(
# :type => "integrateNeuron",
# :synapticConnectionPercent => 10, # % coverage of total neurons in kfn
# :eta => 1e-6,
# :tau_out => 100.0,
# # Good starting value is 1/50th of tau_a
# # This is problem specific parameter.
# # It controls how leaky the neuron is.
# # Too high(less leaky) makes learning algo harder to move model into direction that reduce error
# # resulting in model's error to explode exponantially. For example, model error from 7 to 2e6
# # One can image training output neuron is like Tetris Game.
# )
I_kfnparams = Dict{Symbol, Any}(
:knowledgeFnName=> "I",
:neuronFiringRateTarget=> 20.0, # Hz
# group relavent info
:inputPort=> Dict(
:noise=> Dict(
:numbers=> noise_portnumbers,
:params=> passthrough_neuron_params,
),
:signal=> Dict(
:numbers=> signalInput_portnumbers, # in case of GloVe word encoding, it is 300
:params=> passthrough_neuron_params,
),
),
:outputPort=> Dict(
:numbers=> output_portnumbers, # output neuron, this is also the output length
:params=> linear_neuron_params,
),
:computeNeuron=> Dict(
:lif=> Dict(
:numbers=> lif_neuron_number, # number in (row, col) tuple format
:params=> lif_neuron_params,
),
:alif=> Dict(
:numbers=> alif_neuron_number, # number in (row, col) tuple format
:params=> alif_neuron_params,
),
),
)
#------------------------------------------------------------------------------------------------100
model = IronpenGPU.kfn_1(I_kfnparams, device=device);
# serialize(location * sep * filename, model)
println("SNN generated")
return model
end
function data_loader()
# test problem
trainDataset = MNIST(:train)[1:2] # total 60000
# validateDataset = MNIST(:test)
validateDataset = MNIST(:train)[1:2]
labelDict = [0:9...]
trainData = MLUtils.DataLoader(
trainDataset; # fullTrainDataset or trainDataset
batchsize=imageBatch,
collate=true,
shuffle=true,
buffer=true,
partial=false, # better for gpu memory if batchsize is fixed
# parallel=true, #BUG ?? causing dataloader into forever loop
)
validateData = MLUtils.DataLoader(
validateDataset;
batchsize=imageBatch,
collate=true,
shuffle=true,
buffer=true,
partial=false, # better for gpu memory if batchsize is fixed
# parallel=true, #BUG ?? causing dataloader into forever loop
)
# dummy data used to debug
# trainData = [(rand(10, 10), [5]), (rand(10, 10), [2])]
# trainData = [(rand(10, 10), [5]),]
return trainData, validateData, labelDict
end
function train_snn(model, trainData, validateData, labelDict::Vector)
# random seed
# rng = MersenneTwister(1234)
logitLog = zeros(10, 2)
firedNeurons_t1 = zeros(1)
var1 = zeros(3, 1)
var2 = zeros(3, 1)
var3 = zeros(6, 1)
var4 = zeros(10, 2)
# ----------------------------------- plot ----------------------------------- #
plot10 = Observable(firedNeurons_t1)
plot20 = Observable(logitLog[1 , :])
plot21 = Observable(logitLog[2 , :])
plot22 = Observable(logitLog[3 , :])
plot23 = Observable(logitLog[4 , :])
plot24 = Observable(logitLog[5 , :])
plot25 = Observable(logitLog[6 , :])
plot26 = Observable(logitLog[7 , :])
plot27 = Observable(logitLog[8 , :])
plot28 = Observable(logitLog[9 , :])
plot29 = Observable(logitLog[10, :])
plot30 = Observable(var1[1 , :])
plot31 = Observable(var1[2 , :])
plot32 = Observable(var1[3 , :])
# plot33 = Observable(var1[4 , :])
# plot34 = Observable(var1[5 , :])
# plot35 = Observable(var1[6 , :])
# plot36 = Observable(var1[7 , :])
# plot37 = Observable(var1[8 , :])
# plot38 = Observable(var1[9 , :])
# plot39 = Observable(var1[10, :])
plot40 = Observable(var2[1 , :])
plot41 = Observable(var2[2 , :])
plot42 = Observable(var2[3 , :])
# plot43 = Observable(var2[4 , :])
# plot44 = Observable(var2[5 , :])
# plot45 = Observable(var2[6 , :])
# plot46 = Observable(var2[7 , :])
# plot47 = Observable(var2[8 , :])
# plot48 = Observable(var2[9 , :])
# plot49 = Observable(var2[10, :])
plot50 = Observable(var3[1 , :])
plot51 = Observable(var3[2 , :])
plot52 = Observable(var3[3 , :])
plot53 = Observable(var3[4 , :])
plot54 = Observable(var3[5 , :])
plot55 = Observable(var3[6 , :])
# plot56 = Observable(var3[7 , :])
# plot57 = Observable(var3[8 , :])
# plot58 = Observable(var3[9 , :])
# plot59 = Observable(var3[10, :])
# plot60 = Observable(var4[1 , :])
# plot61 = Observable(var4[2 , :])
# plot62 = Observable(var4[3 , :])
# plot63 = Observable(var4[4 , :])
# plot64 = Observable(var4[5 , :])
# plot65 = Observable(var4[6 , :])
# plot66 = Observable(var4[7 , :])
# plot67 = Observable(var4[8 , :])
# plot68 = Observable(var4[9 , :])
# plot69 = Observable(var4[10, :])
# main figure
fig1 = Figure()
subfig1 = GLMakie.Axis(fig1[1, 1], # define position of this subfigure inside a figure
title = "RSNN firedNeurons_t1",
xlabel = "time",
ylabel = "data"
)
lines!(subfig1, plot10, label = "firedNeurons_t1")
# axislegend(subfig1, position = :lb)
subfig2 = GLMakie.Axis(fig1[2, 1], # define position of this subfigure inside a figure
title = "output neurons logit",
xlabel = "time",
ylabel = "data"
)
lines!(subfig2, plot20, label = "0", color = 1, colormap = :tab10, colorrange = (1, 10) )
lines!(subfig2, plot21, label = "1", color = 2, colormap = :tab10, colorrange = (1, 10) )
lines!(subfig2, plot22, label = "2", color = 3, colormap = :tab10, colorrange = (1, 10) )
lines!(subfig2, plot23, label = "3", color = 4, colormap = :tab10, colorrange = (1, 10) )
lines!(subfig2, plot24, label = "4", color = 5, colormap = :tab10, colorrange = (1, 10) )
lines!(subfig2, plot25, label = "5", color = 6, colormap = :tab10, colorrange = (1, 10) )
lines!(subfig2, plot26, label = "6", color = 7, colormap = :tab10, colorrange = (1, 10) )
lines!(subfig2, plot27, label = "7", color = 8, colormap = :tab10, colorrange = (1, 10) )
lines!(subfig2, plot28, label = "8", color = 9, colormap = :tab10, colorrange = (1, 10) )
lines!(subfig2, plot29, label = "9", color = 10, colormap = :tab10, colorrange = (1, 10))
# axislegend(subfig2, position = :lb)
subfig3 = GLMakie.Axis(fig1[3, 1], # define position of this subfigure inside a figure
title = "1st lif epsilonRec",
xlabel = "time",
ylabel = "data"
)
lines!(subfig3, plot30, label = "0", color = 1, colormap = :tab10, colorrange = (1, 10) )
lines!(subfig3, plot31, label = "1", color = 2, colormap = :tab10, colorrange = (1, 10) )
lines!(subfig3, plot32, label = "2", color = 3, colormap = :tab10, colorrange = (1, 10) )
# lines!(subfig3, plot33, label = "3", color = 4, colormap = :tab10, colorrange = (1, 10) )
# lines!(subfig3, plot34, label = "4", color = 5, colormap = :tab10, colorrange = (1, 10) )
# lines!(subfig3, plot35, label = "5", color = 6, colormap = :tab10, colorrange = (1, 10) )
# lines!(subfig3, plot36, label = "6", color = 7, colormap = :tab10, colorrange = (1, 10) )
# lines!(subfig3, plot37, label = "7", color = 8, colormap = :tab10, colorrange = (1, 10) )
# lines!(subfig3, plot38, label = "8", color = 9, colormap = :tab10, colorrange = (1, 10) )
# lines!(subfig3, plot39, label = "9", color = 10, colormap = :tab10, colorrange = (1, 10))
# axislegend(subfig3, position = :lb)
subfig4 = GLMakie.Axis(fig1[4, 1], # define position of this subfigure inside a figure
title = "lif v_t",
xlabel = "time",
ylabel = "data"
)
lines!(subfig4, plot40, label = "0", color = 1, colormap = :tab10, colorrange = (1, 10) )
lines!(subfig4, plot41, label = "1", color = 2, colormap = :tab10, colorrange = (1, 10) )
lines!(subfig4, plot42, label = "2", color = 3, colormap = :tab10, colorrange = (1, 10) )
# lines!(subfig4, plot43, label = "3", color = 4, colormap = :tab10, colorrange = (1, 10) )
# lines!(subfig4, plot44, label = "4", color = 5, colormap = :tab10, colorrange = (1, 10) )
# lines!(subfig4, plot45, label = "5", color = 6, colormap = :tab10, colorrange = (1, 10) )
# lines!(subfig4, plot46, label = "6", color = 7, colormap = :tab10, colorrange = (1, 10) )
# lines!(subfig4, plot47, label = "7", color = 8, colormap = :tab10, colorrange = (1, 10) )
# lines!(subfig4, plot48, label = "8", color = 9, colormap = :tab10, colorrange = (1, 10) )
# lines!(subfig4, plot49, label = "9", color = 10, colormap = :tab10, colorrange = (1, 10))
# axislegend(subfig4, position = :lb)
subfig5 = GLMakie.Axis(fig1[5, 1], # define position of this subfigure inside a figure
title = "output neuron vt",
xlabel = "time",
ylabel = "data"
)
lines!(subfig5, plot50, label = "0", color = 1, colormap = :tab10, colorrange = (1, 10) )
lines!(subfig5, plot51, label = "1", color = 2, colormap = :tab10, colorrange = (1, 10) )
lines!(subfig5, plot52, label = "2", color = 3, colormap = :tab10, colorrange = (1, 10) )
lines!(subfig5, plot53, label = "3", color = 4, colormap = :tab10, colorrange = (1, 10) )
lines!(subfig5, plot54, label = "4", color = 5, colormap = :tab10, colorrange = (1, 10) )
lines!(subfig5, plot55, label = "5", color = 6, colormap = :tab10, colorrange = (1, 10) )
# lines!(subfig5, plot56, label = "6", color = 7, colormap = :tab10, colorrange = (1, 10) )
# lines!(subfig5, plot57, label = "7", color = 8, colormap = :tab10, colorrange = (1, 10) )
# lines!(subfig5, plot58, label = "8", color = 9, colormap = :tab10, colorrange = (1, 10) )
# lines!(subfig5, plot59, label = "9", color = 10, colormap = :tab10, colorrange = (1, 10))
# # axislegend(subfig5, position = :lb)
# subfig6 = GLMakie.Axis(fig1[6, 1], # define position of this subfigure inside a figure
# title = "output neuron wRecChange",
# xlabel = "time",
# ylabel = "data"
# )
# lines!(subfig6, plot60, label = "0", color = 1, colormap = :tab10, colorrange = (1, 10) )
# lines!(subfig6, plot61, label = "1", color = 2, colormap = :tab10, colorrange = (1, 10) )
# lines!(subfig6, plot62, label = "2", color = 3, colormap = :tab10, colorrange = (1, 10) )
# lines!(subfig6, plot63, label = "3", color = 4, colormap = :tab10, colorrange = (1, 10) )
# lines!(subfig6, plot64, label = "4", color = 5, colormap = :tab10, colorrange = (1, 10) )
# lines!(subfig6, plot65, label = "5", color = 6, colormap = :tab10, colorrange = (1, 10) )
# lines!(subfig6, plot66, label = "6", color = 7, colormap = :tab10, colorrange = (1, 10) )
# lines!(subfig6, plot67, label = "7", color = 8, colormap = :tab10, colorrange = (1, 10) )
# lines!(subfig6, plot68, label = "8", color = 9, colormap = :tab10, colorrange = (1, 10) )
# lines!(subfig6, plot69, label = "9", color = 10, colormap = :tab10, colorrange = (1, 10))
# axislegend(subfig6, position = :lb)
# wait(display(fig1))
display(fig1)
# --------------------------------- end plot --------------------------------- #
# model learning
thinkingPeriod = 16 # 1000-784 = 216
bestAccuracy = 0.0
finalAnswer = nothing # store model prediction in (logit of choices, batch)
stop = 0
for epoch = 1:1000
stop == 1 ? break : false
println("epoch $epoch")
n = length(trainData)
println("n $n")
p = Progress(n, dt=1.0) # minimum update interval: 1 second
for (imgBatch, labels) in trainData # imgBatch(28, 28, 4) i.e. (row, col, batch), labels(label, batch)
stop == 1 ? break : false
consecutiveCorrect = 0
rep = 0
vt0 = 0.0 # store vt to compute learning progress
# for rep in 1:20
while consecutiveCorrect < 10
rep += 1
stop == 1 ? break : false
# prepare image into input signal (10, 2, 784, 4) i.e. (row, col, timestep, batch)
signal = dualTrackSpikeGen(imgBatch, [0.05, 0.1, 0.2, 0.3, 0.5], noise=(true, 1, 0.2), copies=8)
if length(size(signal)) == 3
row, col, sequence = size(signal)
batch = 1
else
row, col, sequence, batch = size(signal)
end
# encode labels
correctAnswer_array = onehotbatch(labels, labelDict) # (correctAnswer, batch)
correctAnswer_number = labels[1]
label_gpu = labels[1] |> device
# insert data into model sequencially
for timestep in 1:(sequence + thinkingPeriod) # sMNIST has 784 timestep(pixel) + thinking period = 1000 timestep
if timestep <= sequence
current_pixel = view(signal, :, :, timestep, :) |> device
else
current_pixel = zeros(row, col, batch) |> device # dummy input in "thinking" period
end
if timestep == 1 # tell a model to start learning. 1-time only
model.learningStage = [1]
finalAnswer = nothing
elseif timestep == (sequence+thinkingPeriod)
model.learningStage = [3]
else
end
# predict
logit, _ = model(current_pixel)
# log answer of all timestep
logitLog = [logitLog;; cpu(logit)]
var1 = [var1;; reshape(sum(cpu(model.lif_epsilonRec)[:,:,1:3,1], dims=(1,2)), (:, 1))]
var2 = [var2;; reshape(cpu(model.lif_vt)[1,1,1:3,1], (:, 1))]
var3 = [var3;; reshape(cpu(model.on_vt)[1,1,1:6,1], (:, 1))]
# var4 = [var4;; 0]
if timestep < sequence # online learning, 1-by-1 timestep
# correctAnswer_array0 = (similar(correctAnswer_array) .= 0)
# logit_cpu = logit |> cpu
# finalAnswer = finalAnswer === nothing ? logit : finalAnswer .+ logit # (logit, batch)
# finalAnswer_cpu = finalAnswer |> cpu
# modelError, outputError, vt1, progress =
# loss(vt0, logit_cpu, finalAnswer_cpu, correctAnswer_array0, correctAnswer_number)
# modelError_gpu = ([modelError] .= 0) |> device
# outputError_gpu = outputError |> device
# IronpenGPU.compute_paramsChange!(model, modelError_gpu, outputError_gpu, label_gpu)
elseif timestep == sequence # online learning, 1-by-1 timestep
# correctAnswer_array0 = (similar(correctAnswer_array) .= 0)
# logit_cpu = logit |> cpu
# finalAnswer = finalAnswer === nothing ? logit : finalAnswer .+ logit # (logit, batch)
# finalAnswer_cpu = finalAnswer |> cpu
# modelError, outputError, vt1, progress =
# loss(vt0, logit_cpu, finalAnswer_cpu, correctAnswer_array0, correctAnswer_number)
# modelError_gpu = ([modelError] .= 0) |> device
# outputError_gpu = outputError |> device
# IronpenGPU.compute_paramsChange!(model, modelError_gpu, outputError_gpu, label_gpu)
# answer time windows, collect logit to get finalAnswer
elseif timestep > sequence && timestep < sequence+thinkingPeriod
logit_cpu = logit |> cpu
finalAnswer = finalAnswer === nothing ? logit : finalAnswer .+ logit # (logit, batch)
finalAnswer_cpu = finalAnswer |> cpu
modelError, outputError, vt1, progress =
loss(vt0, logit_cpu, finalAnswer_cpu, correctAnswer_array, correctAnswer_number)
modelError_gpu = [modelError] |> device
outputError_gpu = outputError |> device
IronpenGPU.compute_paramsChange!(model, modelError_gpu, outputError_gpu, label_gpu)
elseif timestep == sequence+thinkingPeriod #TODO update code
logit_cpu = logit |> cpu
finalAnswer = finalAnswer === nothing ? logit : finalAnswer .+ logit # (logit, batch)
finalAnswer_cpu = finalAnswer |> cpu
modelError, outputError, vt1, progress =
loss(vt0, logit_cpu, finalAnswer_cpu, correctAnswer_array, correctAnswer_number)
vt0 = vt1
modelError_gpu = [modelError] |> device
outputError_gpu = outputError |> device
IronpenGPU.compute_paramsChange!(model, modelError_gpu, outputError_gpu, label_gpu)
lif_wRecChange_cpu = model.lif_wRecChange |> cpu
lif_recSignal_cpu = model.lif_recSignal |> cpu
lif_recSignal_cpu = sum(lif_recSignal_cpu[:,:,5,1])
lif_vt_cpu = model.lif_vt |> cpu
lif_vt_cpu = lif_vt_cpu[1,1,5,1]
lif_zt_cpu = model.lif_zt |> cpu
lif_zt_cpu = lif_zt_cpu[1,1,5,1]
lif_epsilonRec_cpu = model.lif_epsilonRec |> cpu
lif_epsilonRec_cpu = model.lif_epsilonRec |> cpu
lif_epsilonRec_cpu = sum(lif_epsilonRec_cpu[:,:,5,1])
lif_wRecChange_cpu = sum(lif_wRecChange_cpu[:,:,5,1])
on_vt_cpu = model.on_vt |> cpu
on_vt_cpu = on_vt_cpu[1,1,:,1]
on_zt_cpu = model.on_zt |> cpu
on_zt_cpu = on_zt_cpu[1,1,:,1]
on_wOutChange_cpu = model.on_wOutChange |> cpu
on_wOutChange_cpu = sum(on_wOutChange_cpu, dims=(1,2))
println("")
println("lif recSignal $lif_recSignal_cpu lif vt $lif_vt_cpu lif zt $lif_zt_cpu lif_epsilonRec_cpu $lif_epsilonRec_cpu lif_wRecChange_cpu $lif_wRecChange_cpu on_vt $on_vt_cpu on_zt $on_zt_cpu on_wOutChange_cpu $on_wOutChange_cpu vt0 $vt0")
# commit learned weight only if the model answer incorrectly
finalAnswer_cpu = finalAnswer |> cpu
# println("label $(labels[1]) finalAnswer $finalAnswer_cpu")
max = isequal.(finalAnswer_cpu[:,1], maximum(finalAnswer_cpu[:,1]))
if sum(finalAnswer_cpu) == 0
IronpenGPU.learn!(model, progress, device)
consecutiveCorrect = 0
println("modelname $modelname epoch $epoch rep $rep label $(labels[1]) finalAnswer __ progress $progress LEARNING")
elseif sum(max) == 1 && findall(max)[1] -1 == labels[1]
IronpenGPU.learn!(model, progress, device)
consecutiveCorrect += 1
println("modelname $modelname epoch $epoch rep $rep label $(labels[1]) finalAnswer $finalAnswer_cpu progress $progress CORRECT")
elseif sum(max) == 1 && findall(max)[1] -1 != labels[1]
IronpenGPU.learn!(model, progress, device)
consecutiveCorrect = 0
println("modelname $modelname epoch $epoch rep $rep label $(labels[1]) finalAnswer $finalAnswer_cpu progress $progress LEARNING")
else
IronpenGPU.learn!(model, progress, device)
consecutiveCorrect = 0
println("modelname $modelname epoch $epoch rep $rep label $(labels[1]) finalAnswer $finalAnswer_cpu progress $progress LEARNING")
end
# error("DEBUG -> main $(Dates.now())")
else
error("undefined condition line $(@__LINE__)")
# error("DEBUG -> main $(Dates.now())")
end
# update plot
plot10[] = firedNeurons_t1
plot20[] = view(logitLog, 1 , :)
plot21[] = view(logitLog, 2 , :)
plot22[] = view(logitLog, 3 , :)
plot23[] = view(logitLog, 4 , :)
plot24[] = view(logitLog, 5 , :)
plot25[] = view(logitLog, 6 , :)
plot26[] = view(logitLog, 7 , :)
plot27[] = view(logitLog, 8 , :)
plot28[] = view(logitLog, 9 , :)
plot29[] = view(logitLog, 10, :)
plot30[] = view(var1, 1 , :)
plot31[] = view(var1, 2 , :)
plot32[] = view(var1, 3 , :)
# plot33[] = view(var1, 4 , :)
# plot34[] = view(var1, 5 , :)
# plot35[] = view(var1, 6 , :)
# plot36[] = view(var1, 7 , :)
# plot37[] = view(var1, 8 , :)
# plot38[] = view(var1, 9 , :)
# plot39[] = view(var1, 10, :)
plot40[] = view(var2, 1 , :)
plot41[] = view(var2, 2 , :)
plot42[] = view(var2, 3 , :)
# plot43[] = view(var2, 4 , :)
# plot44[] = view(var2, 5 , :)
# plot45[] = view(var2, 6 , :)
# plot46[] = view(var2, 7 , :)
# plot47[] = view(var2, 8 , :)
# plot48[] = view(var2, 9 , :)
# plot49[] = view(var2, 10, :)
plot50[] = view(var3, 1 , :)
plot51[] = view(var3, 2 , :)
plot52[] = view(var3, 3 , :)
plot53[] = view(var3, 4 , :)
plot54[] = view(var3, 5 , :)
plot55[] = view(var3, 6 , :)
# plot56[] = view(var3, 7 , :)
# plot57[] = view(var3, 8 , :)
# plot58[] = view(var3, 9 , :)
# plot59[] = view(var3, 10, :)
# plot60[] = view(var4, 1 , :)
# plot61[] = view(var4, 2 , :)
# plot62[] = view(var4, 3 , :)
# plot63[] = view(var4, 4 , :)
# plot64[] = view(var4, 5 , :)
# plot65[] = view(var4, 6 , :)
# plot66[] = view(var4, 7 , :)
# plot67[] = view(var4, 8 , :)
# plot68[] = view(var4, 9 , :)
# plot69[] = view(var4, 10, :)
end
# end-thinkingPeriod+2; +2 because initialize logitLog = zeros(10, 2)
# _modelRespond = logitLog[:, end-thinkingPeriod+2:end] # answer count during thinking period
# _modelRespond = [sum(i) for i in eachrow(_modelRespond)]
# modelRespond = isequal.(isequal.(_modelRespond, 0), 0)
display(fig1)
sleep(1)
if rep % 3 == 0
firedNeurons_t1 = zeros(1)
logitLog = zeros(10, 2)
var1 = zeros(3, 1)
var2 = zeros(3, 1)
var3 = zeros(6, 1)
# var4 = zeros(10, 2)
end
end
next!(p)
end
if epoch > 200
# check accuracy
println("validating model")
percentCorrect = validate(model, validateData, labelDict)
bestAccuracy = percentCorrect > bestAccuracy ? percentCorrect : bestAccuracy
println("$modelname model accuracy is $percentCorrect %, best accuracy is $bestAccuracy")
end
end
end
function validate(model, dataset, labelDict)
totalAnswerCorrectly = 0 # score
totalSignal = 0
thinkingPeriod = 16 # 1000-784 = 216
predict = [0] |> device
n = length(dataset)
println("n $n")
p = Progress(n, dt=1.0) # minimum update interval: 1 second
for (imgBatch, labels) in dataset
signal = spikeGenerator(imgBatch, [0.05, 0.1, 0.2, 0.3, 0.5], noise=(true, 1, 0.5), copies=18)
if length(size(signal)) == 3
row, col, sequence = size(signal)
batch = 1
else
row, col, sequence, batch = size(signal)
end
# encode labels
correctAnswer = onehotbatch(labels, labelDict) # (choices, batch)
# insert data into model sequencially
for timestep in 1:(sequence + thinkingPeriod) # sMNIST has 784 timestep(pixel) + thinking period = 1000 timestep
if timestep <= sequence
current_pixel = view(signal, :, :, timestep, :) |> device
else
current_pixel = zeros(row, col, batch) |> device # dummy input in "thinking" period
end
if timestep == 1 # tell a model to start learning. 1-time only
predict = [0] |> device
elseif timestep == (sequence+thinkingPeriod)
else
end
# predict
logit, _ = model(current_pixel)
if timestep < sequence # online learning, 1-by-1 timestep
# no error calculation
elseif timestep == sequence # online learning, 1-by-1 timestep
# no error calculation
elseif timestep > sequence && timestep < sequence+thinkingPeriod # collect answer
predict = length(predict) == 1 ? logit : predict .+ logit # (logit, batch)
elseif timestep == sequence+thinkingPeriod
predict = length(predict) == 1 ? logit : predict .+ logit # (logit, batch)
else
error("undefined condition line $(@__LINE__)")
end
end
predict_cpu = predict |> cpu
_predict_label = mapslices(GeneralUtils.vectorMax, predict_cpu; dims=1)
s = sum(_predict_label, dims=1)
if 0 s
predict_label = []
for i in eachcol(_predict_label)
_label = findall(i) .- 1
if length(_label) == 1
append!(predict_label, _label)
else
push!(predict_label, -1) # predict more than 1 label. add non-count label.
end
end
answerCorrectly = sum([x == y for (x,y) in zip(predict_label, labels)])
totalAnswerCorrectly += answerCorrectly
totalSignal += batch
end
next!(p)
end
percentCorrect = totalAnswerCorrectly * 100.0 / totalSignal
return percentCorrect::Float64
end
function dualTrackSpikeGen(inputsignals, thresholds=[1.0]; noise=(false, 1, 0.5), copies=0)
rowInputSignal = nothing
colInputSignal = nothing
for slice in eachslice(inputsignals, dims=3)
srow = nothing
scol = nothing
for row in eachrow(slice)
srow = srow === nothing ? row : cat(srow, row, dims=1)
end
for col in eachcol(slice)
scol = scol === nothing ? col : cat(scol, col, dims=1)
end
rowInputSignal = rowInputSignal === nothing ? srow : cat(rowInputSignal, srow, dims=3)
colInputSignal = colInputSignal === nothing ? scol : cat(colInputSignal, scol, dims=3)
end
rowInputSignal = reshape(rowInputSignal, (size(rowInputSignal, 1), 1, size(inputsignals, 3)))
colInputSignal = reshape(colInputSignal, (size(colInputSignal, 1), 1, size(inputsignals, 3)))
rowInputSignal = spikeGenerator(rowInputSignal, thresholds, noise=noise, copies=3)
colInputSignal = spikeGenerator(colInputSignal, thresholds, noise=noise, copies=3)
signal = cat(rowInputSignal, colInputSignal, dims=2)
return signal
end
""" inputsignals is normal column-major julia matrix in (row, col, batch) dimension
- each threshold scan return 2 vectors. 1 for +, 1 for -
- noise = (true/false, row, col, probability)
"""
function spikeGenerator(inputsignals, thresholds=[1.0]; noise=(false, 1, 0.5), copies=0)
s = length(size(inputsignals))
ar = [] # holding all signals that are scanned
for slice in eachslice(inputsignals, dims=s)
signal_jl = reshape(slice, (:, 1)) # python array is row-major
signal_pytensor = torch.from_numpy( np.asarray(signal_jl) )
arr = [] # holding signal that is scanned by several thresholds
for threshold in thresholds
spike_py = spikegen.delta(signal_pytensor, threshold=threshold, off_spike=true)
_spike_jl = pyconvert(Array, spike_py.data.numpy())
spike_jl = reshape(_spike_jl, (1, :)) # reshape back to julia's column-major
spike_jl1 = isequal.(spike_jl, 1)
spike_jl2 = isequal.(spike_jl, -1)
arr = length(arr) == 0 ? [spike_jl1; spike_jl2] : [arr; spike_jl1; spike_jl2]
end
arrSize = [size(arr)...]
arr = reshape(arr, (arrSize[1], 1, arrSize[2])) # reshape into (row, 1, timestep)
# multiply col
if copies > 0
a = deepcopy(arr)
for i in 1:copies
arr = cat(arr, a, dims=2)
end
end
if noise[1] == true
arrSize = [size(arr)...]
n = noiseGenerator(arrSize[1], noise[2], arrSize[3], prob=noise[3])
arr = cat(arr, n, dims=2) # concatenate into (row, signal:noise, timestep)
end
# concatenate into (row, signal:noise, timestep, batch)
ar = length(ar) == 0 ? arr : [ar;;;;arr]
end
return ar
end
function noiseGenerator(row, col, z; prob=0.5)
spike_prob = torch.rand(row, col, z) * prob
spike_rand = spikegen.rate_conv(spike_prob)
noise = isequal.(pyconvert(Array, spike_rand.data.numpy()), 1)
return noise
end
# function loss(vt0::AbstractFloat, vt1::AbstractArray, logit::AbstractArray,
# finalAnswer, correctAnswer_array, correctAnswer_number)
# labelPosition = correctAnswer_number +1
# # get vt of correct neuron
# vt1 = vt1[labelPosition]
# # get zt of correct neuron
# zt = finalAnswer[labelPosition]
# rsnnError = nothing
# outputError = nothing
# progress = nothing
# """ the idea is if the correct output neuron fires, -w other output neurons should be
# enough.
# However if correct output neuron doesn't fire, +w along RSNN neural pathway
# and
# """
# if zt > 0
# progress = 2
# rsnnError = 0 # already correct, no weight update
# outputError = correctAnswer_array .- finalAnswer
# outputError[labelPosition] = 0 # already correct, no weight update
# elseif vt1 > vt0 # progress increase
# progress = 1
# rsnnError = 1 - vt1
# outputError = correctAnswer_array .- finalAnswer
# elseif vt1 == vt0 # no progress, let RSNN try new pathway
# rsnnError = 0
# progress = 0
# outputError = (finalAnswer .= 0)
# elseif vt1 < vt0 # setback,
# rsnnError = vt0 - vt1
# progress = -1
# outputError = correctAnswer_array .- finalAnswer
# else
# error("undefined condition zt $zt, vt1 $vt1 vt0 $vt0")
# end
# return rsnnError, outputError, vt1, progress
# end
function loss(vt0::AbstractFloat, logit::AbstractArray,
finalAnswer, correctAnswer_array, correctAnswer_number)
labelPosition = correctAnswer_number +1
# get vt of correct neuron
vt1 = softmax(finalAnswer)[labelPosition]
l = reverse(sort(softmax(finalAnswer), dims=1)) # sort from high to low value
rsnnError = nothing
outputError = nothing
progress = nothing
""" the idea is if the correct output neuron fires, -w other output neurons should be
enough.
However if correct output neuron doesn't fire, +w along RSNN neural pathway
and
"""
if vt1 == l[1] && vt1 - l[2] >= 0.3
progress = 2
rsnnError = 0 # already correct, no weight update
outputError = correctAnswer_array .- softmax(finalAnswer)
elseif vt1 == l[1] && vt1 - l[2] < 0.3 && vt1 - vt0 > 0 # progress increase
progress = 1
rsnnError = 1 - vt1
outputError = correctAnswer_array .- softmax(finalAnswer)
elseif vt1 == l[1] && vt1 - l[2] < 0.3 && vt1 - vt0 < 0
progress = -1
rsnnError = vt0 - vt1
# rsnnError = correctAnswer_array[labelPosition] - logit[labelPosition] #TESTING may be logit[labelPosition] - correctAnswer_array[labelPosition]
outputError = correctAnswer_array .- softmax(finalAnswer)
elseif vt1 == l[1] && vt1 - vt0 == 0
progress = 1
rsnnError = 1 - vt1
outputError = correctAnswer_array .- softmax(finalAnswer)
elseif vt1 != l[1] && vt1 - vt0 > 0
progress = 1
rsnnError = 1 - vt1
outputError = correctAnswer_array .- softmax(finalAnswer)
elseif vt1 != l[1] && vt1 - vt0 < 0
progress = -1
rsnnError = vt0 - vt1
outputError = correctAnswer_array .- softmax(finalAnswer)
elseif vt1 != l[1] && vt1 - vt0 == 0
progress = 0
rsnnError = 0
outputError = (finalAnswer .= 0)
else
error("undefined condition finalAnswer $finalAnswer labelPosition $labelPosition vt1 $vt1 vt0 $vt0")
end
return rsnnError, outputError, vt1, progress
end
# function outputloss()
# end
# function arrayMax(x)
# if sum(GeneralUtils.isNotEqual.(x, 0)) == 0 # guard against all-zeros array
# return GeneralUtils.isNotEqual.(x, 0)
# else
# return isequal.(x, maximum(x))
# end
# end
# arraySliceMax(x) = mapslices(arrayMax, x; dims=1)
function main()
filelocation = string(@__DIR__)
filename = "$modelname.jl163"
training_start_time = Dates.now()
println("$modelname program started $training_start_time")
model = generate_snn(filename, filelocation)
trainDataset, validateDataset, labelDict = data_loader()
train_snn(model, trainDataset, validateDataset, labelDict)
finish_training_time = Dates.now()
println("training done, $training_start_time ==> $finish_training_time ")
println(" ///////////////////////////////////////////////////////////////////////")
end
# only runs main() if julia isnt started interactively
# https://discourse.julialang.org/t/scripting-like-a-julian/50707
!isinteractive() && main() # include("main_gpu_0.jl"); main() at julia prompt
#------------------------------------------------------------------------------------------------100

View File

@@ -27,12 +27,7 @@ using .interface
""" version 0.0.11 """ version 0.0.11
Todo: Todo:
[] growRepeatedPath!(), instead of synapse with 20% less activity count gets -w, may be I [] make output neuron draw connection randomly
should rank synapse based on activity count from highest perforimg synapse to lowest
and the last 20% of the rank get -w
[-] add temporal summation in addition to already used spatial summation.
CANCELLED, spatial summation every second until membrane potential reach a threshold
is in itself a temporal summation.
[4] implement variable dormant connection and pruning machanism. the longer the training the longer [4] implement variable dormant connection and pruning machanism. the longer the training the longer
0 weight stay 0. 0 weight stay 0.
[] using RL to control learning signal [] using RL to control learning signal
@@ -41,7 +36,10 @@ using .interface
which defined by neuron.tau_m formula in type.jl which defined by neuron.tau_m formula in type.jl
Change from version: 0.0.10 Change from version: 0.0.10
- - growRepeatedPath!(), instead of synapse with 60% less activity count gets -w, may be I
should rank synapse based on activity count from highest perforimg synapse to lowest
and the last 60% of the rank get -w
- 10% instead of 20% synapticConnectionPercent
""" """

946
src/Manifest.toml Normal file
View File

@@ -0,0 +1,946 @@
# This file is machine-generated - editing it directly is not advised
julia_version = "1.9.3"
manifest_format = "2.0"
project_hash = "844808a02b2a30acdc69d975773e029da0ec81b8"
[[deps.AbstractFFTs]]
deps = ["LinearAlgebra"]
git-tree-sha1 = "8bc0aaec0ca548eb6cf5f0d7d16351650c1ee956"
uuid = "621f4979-c628-5d54-868e-fcf4e3e8185c"
version = "1.3.2"
weakdeps = ["ChainRulesCore"]
[deps.AbstractFFTs.extensions]
AbstractFFTsChainRulesCoreExt = "ChainRulesCore"
[[deps.Adapt]]
deps = ["LinearAlgebra", "Requires"]
git-tree-sha1 = "76289dc51920fdc6e0013c872ba9551d54961c24"
uuid = "79e6a3ab-5dfb-504d-930d-738a2a938a0e"
version = "3.6.2"
weakdeps = ["StaticArrays"]
[deps.Adapt.extensions]
AdaptStaticArraysExt = "StaticArrays"
[[deps.ArgCheck]]
git-tree-sha1 = "a3a402a35a2f7e0b87828ccabbd5ebfbebe356b4"
uuid = "dce04be8-c92d-5529-be00-80e4d2c0e197"
version = "2.3.0"
[[deps.ArgTools]]
uuid = "0dad84c5-d112-42e6-8d28-ef12dabb789f"
version = "1.1.1"
[[deps.Artifacts]]
uuid = "56f22d72-fd6d-98f1-02f0-08ddc0907c33"
[[deps.Atomix]]
deps = ["UnsafeAtomics"]
git-tree-sha1 = "c06a868224ecba914baa6942988e2f2aade419be"
uuid = "a9b6321e-bd34-4604-b9c9-b65b8de01458"
version = "0.1.0"
[[deps.BFloat16s]]
deps = ["LinearAlgebra", "Printf", "Random", "Test"]
git-tree-sha1 = "dbf84058d0a8cbbadee18d25cf606934b22d7c66"
uuid = "ab4f0b2a-ad5b-11e8-123f-65d77653426b"
version = "0.4.2"
[[deps.BangBang]]
deps = ["Compat", "ConstructionBase", "InitialValues", "LinearAlgebra", "Requires", "Setfield", "Tables"]
git-tree-sha1 = "e28912ce94077686443433c2800104b061a827ed"
uuid = "198e06fe-97b7-11e9-32a5-e1d131e6ad66"
version = "0.3.39"
[deps.BangBang.extensions]
BangBangChainRulesCoreExt = "ChainRulesCore"
BangBangDataFramesExt = "DataFrames"
BangBangStaticArraysExt = "StaticArrays"
BangBangStructArraysExt = "StructArrays"
BangBangTypedTablesExt = "TypedTables"
[deps.BangBang.weakdeps]
ChainRulesCore = "d360d2e6-b24c-11e9-a2a3-2a2ae2dbcce4"
DataFrames = "a93c6f00-e57d-5684-b7b6-d8193f3e46c0"
StaticArrays = "90137ffa-7385-5640-81b9-e52037218182"
StructArrays = "09ab397b-f2b6-538f-b94a-2f83cf4a842a"
TypedTables = "9d95f2ec-7b3d-5a63-8d20-e2491e220bb9"
[[deps.Base64]]
uuid = "2a0f44e3-6c83-55bd-87e4-b1978d98bd5f"
[[deps.Baselet]]
git-tree-sha1 = "aebf55e6d7795e02ca500a689d326ac979aaf89e"
uuid = "9718e550-a3fa-408a-8086-8db961cd8217"
version = "0.1.1"
[[deps.CEnum]]
git-tree-sha1 = "eb4cb44a499229b3b8426dcfb5dd85333951ff90"
uuid = "fa961155-64e5-5f13-b03f-caf6b980ea82"
version = "0.4.2"
[[deps.CUDA]]
deps = ["AbstractFFTs", "Adapt", "BFloat16s", "CEnum", "CUDA_Driver_jll", "CUDA_Runtime_Discovery", "CUDA_Runtime_jll", "CompilerSupportLibraries_jll", "ExprTools", "GPUArrays", "GPUCompiler", "KernelAbstractions", "LLVM", "LazyArtifacts", "Libdl", "LinearAlgebra", "Logging", "Preferences", "Printf", "Random", "Random123", "RandomNumbers", "Reexport", "Requires", "SparseArrays", "SpecialFunctions", "UnsafeAtomicsLLVM"]
git-tree-sha1 = "442d989978ed3ff4e174c928ee879dc09d1ef693"
uuid = "052768ef-5323-5732-b1bb-66c8b64840ba"
version = "4.3.2"
[[deps.CUDA_Driver_jll]]
deps = ["Artifacts", "JLLWrappers", "LazyArtifacts", "Libdl", "Pkg"]
git-tree-sha1 = "498f45593f6ddc0adff64a9310bb6710e851781b"
uuid = "4ee394cb-3365-5eb0-8335-949819d2adfc"
version = "0.5.0+1"
[[deps.CUDA_Runtime_Discovery]]
deps = ["Libdl"]
git-tree-sha1 = "bcc4a23cbbd99c8535a5318455dcf0f2546ec536"
uuid = "1af6417a-86b4-443c-805f-a4643ffb695f"
version = "0.2.2"
[[deps.CUDA_Runtime_jll]]
deps = ["Artifacts", "CUDA_Driver_jll", "JLLWrappers", "LazyArtifacts", "Libdl", "TOML"]
git-tree-sha1 = "5248d9c45712e51e27ba9b30eebec65658c6ce29"
uuid = "76a88914-d11a-5bdc-97e0-2f5a05c973a2"
version = "0.6.0+0"
[[deps.CUDNN_jll]]
deps = ["Artifacts", "CUDA_Runtime_jll", "JLLWrappers", "LazyArtifacts", "Libdl", "TOML"]
git-tree-sha1 = "2918fbffb50e3b7a0b9127617587afa76d4276e8"
uuid = "62b44479-cb7b-5706-934f-f13b2eb2e645"
version = "8.8.1+0"
[[deps.Calculus]]
deps = ["LinearAlgebra"]
git-tree-sha1 = "f641eb0a4f00c343bbc32346e1217b86f3ce9dad"
uuid = "49dc2e85-a5d0-5ad3-a950-438e2897f1b9"
version = "0.5.1"
[[deps.ChainRules]]
deps = ["Adapt", "ChainRulesCore", "Compat", "Distributed", "GPUArraysCore", "IrrationalConstants", "LinearAlgebra", "Random", "RealDot", "SparseArrays", "Statistics", "StructArrays"]
git-tree-sha1 = "1cdf290d4feec68824bfb84f4bfc9f3aba185647"
uuid = "082447d4-558c-5d27-93f4-14fc19e9eca2"
version = "1.51.1"
[[deps.ChainRulesCore]]
deps = ["Compat", "LinearAlgebra", "SparseArrays"]
git-tree-sha1 = "e30f2f4e20f7f186dc36529910beaedc60cfa644"
uuid = "d360d2e6-b24c-11e9-a2a3-2a2ae2dbcce4"
version = "1.16.0"
[[deps.CommonSubexpressions]]
deps = ["MacroTools", "Test"]
git-tree-sha1 = "7b8a93dba8af7e3b42fecabf646260105ac373f7"
uuid = "bbf7d656-a473-5ed7-a52c-81e309532950"
version = "0.3.0"
[[deps.Compat]]
deps = ["UUIDs"]
git-tree-sha1 = "4e88377ae7ebeaf29a047aa1ee40826e0b708a5d"
uuid = "34da2185-b29b-5c13-b0c7-acf172513d20"
version = "4.7.0"
weakdeps = ["Dates", "LinearAlgebra"]
[deps.Compat.extensions]
CompatLinearAlgebraExt = "LinearAlgebra"
[[deps.CompilerSupportLibraries_jll]]
deps = ["Artifacts", "Libdl"]
uuid = "e66e0078-7015-5450-92f7-15fbd957f2ae"
version = "1.0.5+0"
[[deps.CompositionsBase]]
git-tree-sha1 = "802bb88cd69dfd1509f6670416bd4434015693ad"
uuid = "a33af91c-f02d-484b-be07-31d278c5ca2b"
version = "0.1.2"
[deps.CompositionsBase.extensions]
CompositionsBaseInverseFunctionsExt = "InverseFunctions"
[deps.CompositionsBase.weakdeps]
InverseFunctions = "3587e190-3f89-42d0-90ee-14403ec27112"
[[deps.CondaPkg]]
deps = ["JSON3", "Markdown", "MicroMamba", "Pidfile", "Pkg", "TOML"]
git-tree-sha1 = "741146cf2ced5859faae76a84b541aa9af1a78bb"
uuid = "992eb4ea-22a4-4c89-a5bb-47a3300528ab"
version = "0.2.18"
[[deps.ConstructionBase]]
deps = ["LinearAlgebra"]
git-tree-sha1 = "738fec4d684a9a6ee9598a8bfee305b26831f28c"
uuid = "187b0558-2788-49d3-abe0-74a17ed4e7c9"
version = "1.5.2"
[deps.ConstructionBase.extensions]
ConstructionBaseIntervalSetsExt = "IntervalSets"
ConstructionBaseStaticArraysExt = "StaticArrays"
[deps.ConstructionBase.weakdeps]
IntervalSets = "8197267c-284f-5f27-9208-e0e47529a953"
StaticArrays = "90137ffa-7385-5640-81b9-e52037218182"
[[deps.ContextVariablesX]]
deps = ["Compat", "Logging", "UUIDs"]
git-tree-sha1 = "25cc3803f1030ab855e383129dcd3dc294e322cc"
uuid = "6add18c4-b38d-439d-96f6-d6bc489c04c5"
version = "0.1.3"
[[deps.DataAPI]]
git-tree-sha1 = "8da84edb865b0b5b0100c0666a9bc9a0b71c553c"
uuid = "9a962f9c-6df0-11e9-0e5d-c546b8b5ee8a"
version = "1.15.0"
[[deps.DataStructures]]
deps = ["Compat", "InteractiveUtils", "OrderedCollections"]
git-tree-sha1 = "cf25ccb972fec4e4817764d01c82386ae94f77b4"
uuid = "864edb3b-99cc-5e75-8d2d-829cb0a9cfe8"
version = "0.18.14"
[[deps.DataValueInterfaces]]
git-tree-sha1 = "bfc1187b79289637fa0ef6d4436ebdfe6905cbd6"
uuid = "e2d170a0-9d28-54be-80f0-106bbe20a464"
version = "1.0.0"
[[deps.Dates]]
deps = ["Printf"]
uuid = "ade2ca70-3891-5945-98fb-dc099432e06a"
[[deps.DefineSingletons]]
git-tree-sha1 = "0fba8b706d0178b4dc7fd44a96a92382c9065c2c"
uuid = "244e2a9f-e319-4986-a169-4d1fe445cd52"
version = "0.1.2"
[[deps.DelimitedFiles]]
deps = ["Mmap"]
git-tree-sha1 = "9e2f36d3c96a820c678f2f1f1782582fcf685bae"
uuid = "8bb1440f-4735-579b-a4ab-409b98df4dab"
version = "1.9.1"
[[deps.DiffResults]]
deps = ["StaticArraysCore"]
git-tree-sha1 = "782dd5f4561f5d267313f23853baaaa4c52ea621"
uuid = "163ba53b-c6d8-5494-b064-1a9d43ac40c5"
version = "1.1.0"
[[deps.DiffRules]]
deps = ["IrrationalConstants", "LogExpFunctions", "NaNMath", "Random", "SpecialFunctions"]
git-tree-sha1 = "23163d55f885173722d1e4cf0f6110cdbaf7e272"
uuid = "b552c78f-8df3-52c6-915a-8e097449b14b"
version = "1.15.1"
[[deps.Distributed]]
deps = ["Random", "Serialization", "Sockets"]
uuid = "8ba89e20-285c-5b6f-9357-94700520ee1b"
[[deps.Distributions]]
deps = ["FillArrays", "LinearAlgebra", "PDMats", "Printf", "QuadGK", "Random", "SpecialFunctions", "Statistics", "StatsAPI", "StatsBase", "StatsFuns", "Test"]
git-tree-sha1 = "938fe2981db009f531b6332e31c58e9584a2f9bd"
uuid = "31c24e10-a181-5473-b8eb-7969acd0382f"
version = "0.25.100"
[deps.Distributions.extensions]
DistributionsChainRulesCoreExt = "ChainRulesCore"
DistributionsDensityInterfaceExt = "DensityInterface"
[deps.Distributions.weakdeps]
ChainRulesCore = "d360d2e6-b24c-11e9-a2a3-2a2ae2dbcce4"
DensityInterface = "b429d917-457f-4dbc-8f4c-0cc954292b1d"
[[deps.DocStringExtensions]]
deps = ["LibGit2"]
git-tree-sha1 = "2fb1e02f2b635d0845df5d7c167fec4dd739b00d"
uuid = "ffbed154-4ef7-542d-bbb7-c09d3a79fcae"
version = "0.9.3"
[[deps.Downloads]]
deps = ["ArgTools", "FileWatching", "LibCURL", "NetworkOptions"]
uuid = "f43a241f-c20a-4ad4-852c-f6b1247861c6"
version = "1.6.0"
[[deps.DualNumbers]]
deps = ["Calculus", "NaNMath", "SpecialFunctions"]
git-tree-sha1 = "5837a837389fccf076445fce071c8ddaea35a566"
uuid = "fa6b7ba4-c1ee-5f82-b5fc-ecf0adba8f74"
version = "0.6.8"
[[deps.ExprTools]]
git-tree-sha1 = "c1d06d129da9f55715c6c212866f5b1bddc5fa00"
uuid = "e2ba6199-217a-4e67-a87a-7c52f15ade04"
version = "0.1.9"
[[deps.FLoops]]
deps = ["BangBang", "Compat", "FLoopsBase", "InitialValues", "JuliaVariables", "MLStyle", "Serialization", "Setfield", "Transducers"]
git-tree-sha1 = "ffb97765602e3cbe59a0589d237bf07f245a8576"
uuid = "cc61a311-1640-44b5-9fba-1b764f453329"
version = "0.2.1"
[[deps.FLoopsBase]]
deps = ["ContextVariablesX"]
git-tree-sha1 = "656f7a6859be8673bf1f35da5670246b923964f7"
uuid = "b9860ae5-e623-471e-878b-f6a53c775ea6"
version = "0.1.1"
[[deps.FileWatching]]
uuid = "7b1f6079-737a-58dc-b8bc-7a2ca5c1b5ee"
[[deps.FillArrays]]
deps = ["LinearAlgebra", "Random", "SparseArrays", "Statistics"]
git-tree-sha1 = "0b3b52afd0f87b0a3f5ada0466352d125c9db458"
uuid = "1a297f60-69ca-5386-bcde-b61e274b549b"
version = "1.2.1"
[[deps.Flux]]
deps = ["Adapt", "CUDA", "ChainRulesCore", "Functors", "LinearAlgebra", "MLUtils", "MacroTools", "NNlib", "NNlibCUDA", "OneHotArrays", "Optimisers", "Preferences", "ProgressLogging", "Random", "Reexport", "SparseArrays", "SpecialFunctions", "Statistics", "Zygote", "cuDNN"]
git-tree-sha1 = "3e2c3704c2173ab4b1935362384ca878b53d4c34"
uuid = "587475ba-b771-5e3f-ad9e-33799f191a9c"
version = "0.13.17"
[deps.Flux.extensions]
AMDGPUExt = "AMDGPU"
FluxMetalExt = "Metal"
[deps.Flux.weakdeps]
AMDGPU = "21141c5a-9bdb-4563-92ae-f87d6854732e"
Metal = "dde4c033-4e86-420c-a63e-0dd931031962"
[[deps.ForwardDiff]]
deps = ["CommonSubexpressions", "DiffResults", "DiffRules", "LinearAlgebra", "LogExpFunctions", "NaNMath", "Preferences", "Printf", "Random", "SpecialFunctions"]
git-tree-sha1 = "00e252f4d706b3d55a8863432e742bf5717b498d"
uuid = "f6369f11-7733-5829-9624-2563aa707210"
version = "0.10.35"
weakdeps = ["StaticArrays"]
[deps.ForwardDiff.extensions]
ForwardDiffStaticArraysExt = "StaticArrays"
[[deps.Functors]]
deps = ["LinearAlgebra"]
git-tree-sha1 = "478f8c3145bb91d82c2cf20433e8c1b30df454cc"
uuid = "d9f16b24-f501-4c13-a1f2-28368ffc5196"
version = "0.4.4"
[[deps.Future]]
deps = ["Random"]
uuid = "9fa8497b-333b-5362-9e8d-4d0656e87820"
[[deps.GPUArrays]]
deps = ["Adapt", "GPUArraysCore", "LLVM", "LinearAlgebra", "Printf", "Random", "Reexport", "Serialization", "Statistics"]
git-tree-sha1 = "2e57b4a4f9cc15e85a24d603256fe08e527f48d1"
uuid = "0c68f7d7-f131-5f86-a1c3-88cf8149b2d7"
version = "8.8.1"
[[deps.GPUArraysCore]]
deps = ["Adapt"]
git-tree-sha1 = "2d6ca471a6c7b536127afccfa7564b5b39227fe0"
uuid = "46192b85-c4d5-4398-a991-12ede77f4527"
version = "0.1.5"
[[deps.GPUCompiler]]
deps = ["ExprTools", "InteractiveUtils", "LLVM", "Libdl", "Logging", "Scratch", "TimerOutputs", "UUIDs"]
git-tree-sha1 = "cb090aea21c6ca78d59672a7e7d13bd56d09de64"
uuid = "61eb1bfa-7361-4325-ad38-22787b887f55"
version = "0.20.3"
[[deps.GeneralUtils]]
deps = ["CUDA", "DataStructures", "Distributions", "Flux", "JSON3", "Random"]
path = "C:\\Users\\pitak\\.julia\\dev\\GeneralUtils"
uuid = "c6c72f09-b708-4ac8-ac7c-2084d70108fe"
version = "0.1.0"
[[deps.HypergeometricFunctions]]
deps = ["DualNumbers", "LinearAlgebra", "OpenLibm_jll", "SpecialFunctions"]
git-tree-sha1 = "f218fe3736ddf977e0e772bc9a586b2383da2685"
uuid = "34004b35-14d8-5ef3-9330-4cdb6864b03a"
version = "0.3.23"
[[deps.IRTools]]
deps = ["InteractiveUtils", "MacroTools", "Test"]
git-tree-sha1 = "eac00994ce3229a464c2847e956d77a2c64ad3a5"
uuid = "7869d1d1-7146-5819-86e3-90919afe41df"
version = "0.4.10"
[[deps.InitialValues]]
git-tree-sha1 = "4da0f88e9a39111c2fa3add390ab15f3a44f3ca3"
uuid = "22cec73e-a1b8-11e9-2c92-598750a2cf9c"
version = "0.3.1"
[[deps.InteractiveUtils]]
deps = ["Markdown"]
uuid = "b77e0a4c-d291-57a0-90e8-8db25a27a240"
[[deps.IrrationalConstants]]
git-tree-sha1 = "630b497eafcc20001bba38a4651b327dcfc491d2"
uuid = "92d709cd-6900-40b7-9082-c6be49f344b6"
version = "0.2.2"
[[deps.IteratorInterfaceExtensions]]
git-tree-sha1 = "a3f24677c21f5bbe9d2a714f95dcd58337fb2856"
uuid = "82899510-4779-5014-852e-03e436cf321d"
version = "1.0.0"
[[deps.JLLWrappers]]
deps = ["Preferences"]
git-tree-sha1 = "abc9885a7ca2052a736a600f7fa66209f96506e1"
uuid = "692b3bcd-3c85-4b1f-b108-f13ce0eb3210"
version = "1.4.1"
[[deps.JSON3]]
deps = ["Dates", "Mmap", "Parsers", "PrecompileTools", "StructTypes", "UUIDs"]
git-tree-sha1 = "5b62d93f2582b09e469b3099d839c2d2ebf5066d"
uuid = "0f8b85d8-7281-11e9-16c2-39a750bddbf1"
version = "1.13.1"
[[deps.JuliaVariables]]
deps = ["MLStyle", "NameResolution"]
git-tree-sha1 = "49fb3cb53362ddadb4415e9b73926d6b40709e70"
uuid = "b14d175d-62b4-44ba-8fb7-3064adc8c3ec"
version = "0.2.4"
[[deps.KernelAbstractions]]
deps = ["Adapt", "Atomix", "InteractiveUtils", "LinearAlgebra", "MacroTools", "PrecompileTools", "SparseArrays", "StaticArrays", "UUIDs", "UnsafeAtomics", "UnsafeAtomicsLLVM"]
git-tree-sha1 = "b48617c5d764908b5fac493cd907cf33cc11eec1"
uuid = "63c18a36-062a-441e-b654-da1e3ab1ce7c"
version = "0.9.6"
[[deps.LLVM]]
deps = ["CEnum", "LLVMExtra_jll", "Libdl", "Printf", "Unicode"]
git-tree-sha1 = "5007c1421563108110bbd57f63d8ad4565808818"
uuid = "929cbde3-209d-540e-8aea-75f648917ca0"
version = "5.2.0"
[[deps.LLVMExtra_jll]]
deps = ["Artifacts", "JLLWrappers", "LazyArtifacts", "Libdl", "TOML"]
git-tree-sha1 = "1222116d7313cdefecf3d45a2bc1a89c4e7c9217"
uuid = "dad2f222-ce93-54a1-a47d-0025e8a3acab"
version = "0.0.22+0"
[[deps.LazyArtifacts]]
deps = ["Artifacts", "Pkg"]
uuid = "4af54fe1-eca0-43a8-85a7-787d91b784e3"
[[deps.LibCURL]]
deps = ["LibCURL_jll", "MozillaCACerts_jll"]
uuid = "b27032c2-a3e7-50c8-80cd-2d36dbcbfd21"
version = "0.6.3"
[[deps.LibCURL_jll]]
deps = ["Artifacts", "LibSSH2_jll", "Libdl", "MbedTLS_jll", "Zlib_jll", "nghttp2_jll"]
uuid = "deac9b47-8bc7-5906-a0fe-35ac56dc84c0"
version = "7.84.0+0"
[[deps.LibGit2]]
deps = ["Base64", "NetworkOptions", "Printf", "SHA"]
uuid = "76f85450-5226-5b5a-8eaa-529ad045b433"
[[deps.LibSSH2_jll]]
deps = ["Artifacts", "Libdl", "MbedTLS_jll"]
uuid = "29816b5a-b9ab-546f-933c-edad1886dfa8"
version = "1.10.2+0"
[[deps.Libdl]]
uuid = "8f399da3-3557-5675-b5ff-fb832c97cbdb"
[[deps.LinearAlgebra]]
deps = ["Libdl", "OpenBLAS_jll", "libblastrampoline_jll"]
uuid = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e"
[[deps.LogExpFunctions]]
deps = ["DocStringExtensions", "IrrationalConstants", "LinearAlgebra"]
git-tree-sha1 = "c3ce8e7420b3a6e071e0fe4745f5d4300e37b13f"
uuid = "2ab3a3ac-af41-5b50-aa03-7779005ae688"
version = "0.3.24"
[deps.LogExpFunctions.extensions]
LogExpFunctionsChainRulesCoreExt = "ChainRulesCore"
LogExpFunctionsChangesOfVariablesExt = "ChangesOfVariables"
LogExpFunctionsInverseFunctionsExt = "InverseFunctions"
[deps.LogExpFunctions.weakdeps]
ChainRulesCore = "d360d2e6-b24c-11e9-a2a3-2a2ae2dbcce4"
ChangesOfVariables = "9e997f8a-9a97-42d5-a9f1-ce6bfc15e2c0"
InverseFunctions = "3587e190-3f89-42d0-90ee-14403ec27112"
[[deps.Logging]]
uuid = "56ddb016-857b-54e1-b83d-db4d58db5568"
[[deps.MLStyle]]
git-tree-sha1 = "bc38dff0548128765760c79eb7388a4b37fae2c8"
uuid = "d8e11817-5142-5d16-987a-aa16d5891078"
version = "0.4.17"
[[deps.MLUtils]]
deps = ["ChainRulesCore", "Compat", "DataAPI", "DelimitedFiles", "FLoops", "NNlib", "Random", "ShowCases", "SimpleTraits", "Statistics", "StatsBase", "Tables", "Transducers"]
git-tree-sha1 = "3504cdb8c2bc05bde4d4b09a81b01df88fcbbba0"
uuid = "f1d291b0-491e-4a28-83b9-f70985020b54"
version = "0.4.3"
[[deps.MacroTools]]
deps = ["Markdown", "Random"]
git-tree-sha1 = "42324d08725e200c23d4dfb549e0d5d89dede2d2"
uuid = "1914dd2f-81c6-5fcd-8719-6d5c9610ff09"
version = "0.5.10"
[[deps.Markdown]]
deps = ["Base64"]
uuid = "d6f4376e-aef5-505a-96c1-9c027394607a"
[[deps.MbedTLS_jll]]
deps = ["Artifacts", "Libdl"]
uuid = "c8ffd9c3-330d-5841-b78e-0817d7145fa1"
version = "2.28.2+0"
[[deps.MicroCollections]]
deps = ["BangBang", "InitialValues", "Setfield"]
git-tree-sha1 = "629afd7d10dbc6935ec59b32daeb33bc4460a42e"
uuid = "128add7d-3638-4c79-886c-908ea0c25c34"
version = "0.1.4"
[[deps.MicroMamba]]
deps = ["Pkg", "Scratch", "micromamba_jll"]
git-tree-sha1 = "011cab361eae7bcd7d278f0a7a00ff9c69000c51"
uuid = "0b3b1443-0f03-428d-bdfb-f27f9c1191ea"
version = "0.1.14"
[[deps.Missings]]
deps = ["DataAPI"]
git-tree-sha1 = "f66bdc5de519e8f8ae43bdc598782d35a25b1272"
uuid = "e1d29d7a-bbdc-5cf2-9ac0-f12de2c33e28"
version = "1.1.0"
[[deps.Mmap]]
uuid = "a63ad114-7e13-5084-954f-fe012c677804"
[[deps.MozillaCACerts_jll]]
uuid = "14a3606d-f60d-562e-9121-12d972cd8159"
version = "2022.10.11"
[[deps.NNlib]]
deps = ["Adapt", "Atomix", "ChainRulesCore", "GPUArraysCore", "KernelAbstractions", "LinearAlgebra", "Pkg", "Random", "Requires", "Statistics"]
git-tree-sha1 = "72240e3f5ca031937bd536182cb2c031da5f46dd"
uuid = "872c559c-99b0-510c-b3b7-b6c96a88d5cd"
version = "0.8.21"
[deps.NNlib.extensions]
NNlibAMDGPUExt = "AMDGPU"
[deps.NNlib.weakdeps]
AMDGPU = "21141c5a-9bdb-4563-92ae-f87d6854732e"
[[deps.NNlibCUDA]]
deps = ["Adapt", "CUDA", "LinearAlgebra", "NNlib", "Random", "Statistics", "cuDNN"]
git-tree-sha1 = "f94a9684394ff0d325cc12b06da7032d8be01aaf"
uuid = "a00861dc-f156-4864-bf3c-e6376f28a68d"
version = "0.2.7"
[[deps.NaNMath]]
deps = ["OpenLibm_jll"]
git-tree-sha1 = "0877504529a3e5c3343c6f8b4c0381e57e4387e4"
uuid = "77ba4419-2d1f-58cd-9bb1-8ffee604a2e3"
version = "1.0.2"
[[deps.NameResolution]]
deps = ["PrettyPrint"]
git-tree-sha1 = "1a0fa0e9613f46c9b8c11eee38ebb4f590013c5e"
uuid = "71a1bf82-56d0-4bbc-8a3c-48b961074391"
version = "0.1.5"
[[deps.NetworkOptions]]
uuid = "ca575930-c2e3-43a9-ace4-1e988b2c1908"
version = "1.2.0"
[[deps.OneHotArrays]]
deps = ["Adapt", "ChainRulesCore", "Compat", "GPUArraysCore", "LinearAlgebra", "NNlib"]
git-tree-sha1 = "5e4029759e8699ec12ebdf8721e51a659443403c"
uuid = "0b1bfda6-eb8a-41d2-88d8-f5af5cad476f"
version = "0.2.4"
[[deps.OpenBLAS_jll]]
deps = ["Artifacts", "CompilerSupportLibraries_jll", "Libdl"]
uuid = "4536629a-c528-5b80-bd46-f80d51c5b363"
version = "0.3.21+4"
[[deps.OpenLibm_jll]]
deps = ["Artifacts", "Libdl"]
uuid = "05823500-19ac-5b8b-9628-191a04bc5112"
version = "0.8.1+0"
[[deps.OpenSpecFun_jll]]
deps = ["Artifacts", "CompilerSupportLibraries_jll", "JLLWrappers", "Libdl", "Pkg"]
git-tree-sha1 = "13652491f6856acfd2db29360e1bbcd4565d04f1"
uuid = "efe28fd5-8261-553b-a9e1-b2916fc3738e"
version = "0.5.5+0"
[[deps.Optimisers]]
deps = ["ChainRulesCore", "Functors", "LinearAlgebra", "Random", "Statistics"]
git-tree-sha1 = "6a01f65dd8583dee82eecc2a19b0ff21521aa749"
uuid = "3bd65402-5787-11e9-1adc-39752487f4e2"
version = "0.2.18"
[[deps.OrderedCollections]]
git-tree-sha1 = "d321bf2de576bf25ec4d3e4360faca399afca282"
uuid = "bac558e1-5e72-5ebc-8fee-abe8a469f55d"
version = "1.6.0"
[[deps.PDMats]]
deps = ["LinearAlgebra", "SparseArrays", "SuiteSparse"]
git-tree-sha1 = "67eae2738d63117a196f497d7db789821bce61d1"
uuid = "90014a1f-27ba-587c-ab20-58faa44d9150"
version = "0.11.17"
[[deps.Parsers]]
deps = ["Dates", "PrecompileTools", "UUIDs"]
git-tree-sha1 = "4b2e829ee66d4218e0cef22c0a64ee37cf258c29"
uuid = "69de0a69-1ddd-5017-9359-2bf0b02dc9f0"
version = "2.7.1"
[[deps.Pidfile]]
deps = ["FileWatching", "Test"]
git-tree-sha1 = "2d8aaf8ee10df53d0dfb9b8ee44ae7c04ced2b03"
uuid = "fa939f87-e72e-5be4-a000-7fc836dbe307"
version = "1.3.0"
[[deps.Pkg]]
deps = ["Artifacts", "Dates", "Downloads", "FileWatching", "LibGit2", "Libdl", "Logging", "Markdown", "Printf", "REPL", "Random", "SHA", "Serialization", "TOML", "Tar", "UUIDs", "p7zip_jll"]
uuid = "44cfe95a-1eb2-52ea-b672-e2afdf69b78f"
version = "1.9.2"
[[deps.PrecompileTools]]
deps = ["Preferences"]
git-tree-sha1 = "9673d39decc5feece56ef3940e5dafba15ba0f81"
uuid = "aea7be01-6a6a-4083-8856-8a6e6704d82a"
version = "1.1.2"
[[deps.Preferences]]
deps = ["TOML"]
git-tree-sha1 = "7eb1686b4f04b82f96ed7a4ea5890a4f0c7a09f1"
uuid = "21216c6a-2e73-6563-6e65-726566657250"
version = "1.4.0"
[[deps.PrettyPrint]]
git-tree-sha1 = "632eb4abab3449ab30c5e1afaa874f0b98b586e4"
uuid = "8162dcfd-2161-5ef2-ae6c-7681170c5f98"
version = "0.2.0"
[[deps.Printf]]
deps = ["Unicode"]
uuid = "de0858da-6303-5e67-8744-51eddeeeb8d7"
[[deps.ProgressLogging]]
deps = ["Logging", "SHA", "UUIDs"]
git-tree-sha1 = "80d919dee55b9c50e8d9e2da5eeafff3fe58b539"
uuid = "33c8b6b6-d38a-422a-b730-caa89a2f386c"
version = "0.1.4"
[[deps.PythonCall]]
deps = ["CondaPkg", "Dates", "Libdl", "MacroTools", "Markdown", "Pkg", "REPL", "Requires", "Serialization", "Tables", "UnsafePointers"]
git-tree-sha1 = "70af6bdbde63d7d0a4ea99f3e890ebdb55e9d464"
uuid = "6099a3de-0909-46bc-b1f4-468b9a2dfc0d"
version = "0.9.14"
[[deps.QuadGK]]
deps = ["DataStructures", "LinearAlgebra"]
git-tree-sha1 = "6ec7ac8412e83d57e313393220879ede1740f9ee"
uuid = "1fd47b50-473d-5c70-9696-f719f8f3bcdc"
version = "2.8.2"
[[deps.REPL]]
deps = ["InteractiveUtils", "Markdown", "Sockets", "Unicode"]
uuid = "3fa0cd96-eef1-5676-8a61-b3b8758bbffb"
[[deps.Random]]
deps = ["SHA", "Serialization"]
uuid = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c"
[[deps.Random123]]
deps = ["Random", "RandomNumbers"]
git-tree-sha1 = "552f30e847641591ba3f39fd1bed559b9deb0ef3"
uuid = "74087812-796a-5b5d-8853-05524746bad3"
version = "1.6.1"
[[deps.RandomNumbers]]
deps = ["Random", "Requires"]
git-tree-sha1 = "043da614cc7e95c703498a491e2c21f58a2b8111"
uuid = "e6cf234a-135c-5ec9-84dd-332b85af5143"
version = "1.5.3"
[[deps.RealDot]]
deps = ["LinearAlgebra"]
git-tree-sha1 = "9f0a1b71baaf7650f4fa8a1d168c7fb6ee41f0c9"
uuid = "c1ae055f-0cd5-4b69-90a6-9a35b1a98df9"
version = "0.1.0"
[[deps.Reexport]]
git-tree-sha1 = "45e428421666073eab6f2da5c9d310d99bb12f9b"
uuid = "189a3867-3050-52da-a836-e630ba90ab69"
version = "1.2.2"
[[deps.Requires]]
deps = ["UUIDs"]
git-tree-sha1 = "838a3a4188e2ded87a4f9f184b4b0d78a1e91cb7"
uuid = "ae029012-a4dd-5104-9daa-d747884805df"
version = "1.3.0"
[[deps.Rmath]]
deps = ["Random", "Rmath_jll"]
git-tree-sha1 = "f65dcb5fa46aee0cf9ed6274ccbd597adc49aa7b"
uuid = "79098fc4-a85e-5d69-aa6a-4863f24498fa"
version = "0.7.1"
[[deps.Rmath_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"]
git-tree-sha1 = "6ed52fdd3382cf21947b15e8870ac0ddbff736da"
uuid = "f50d1b31-88e8-58de-be2c-1cc44531875f"
version = "0.4.0+0"
[[deps.SHA]]
uuid = "ea8e919c-243c-51af-8825-aaa63cd721ce"
version = "0.7.0"
[[deps.Scratch]]
deps = ["Dates"]
git-tree-sha1 = "30449ee12237627992a99d5e30ae63e4d78cd24a"
uuid = "6c6a2e73-6563-6170-7368-637461726353"
version = "1.2.0"
[[deps.Serialization]]
uuid = "9e88b42a-f829-5b0c-bbe9-9e923198166b"
[[deps.Setfield]]
deps = ["ConstructionBase", "Future", "MacroTools", "StaticArraysCore"]
git-tree-sha1 = "e2cc6d8c88613c05e1defb55170bf5ff211fbeac"
uuid = "efcf1570-3423-57d1-acb7-fd33fddbac46"
version = "1.1.1"
[[deps.ShowCases]]
git-tree-sha1 = "7f534ad62ab2bd48591bdeac81994ea8c445e4a5"
uuid = "605ecd9f-84a6-4c9e-81e2-4798472b76a3"
version = "0.1.0"
[[deps.SimpleTraits]]
deps = ["InteractiveUtils", "MacroTools"]
git-tree-sha1 = "5d7e3f4e11935503d3ecaf7186eac40602e7d231"
uuid = "699a6c99-e7fa-54fc-8d76-47d257e15c1d"
version = "0.9.4"
[[deps.Sockets]]
uuid = "6462fe0b-24de-5631-8697-dd941f90decc"
[[deps.SortingAlgorithms]]
deps = ["DataStructures"]
git-tree-sha1 = "c60ec5c62180f27efea3ba2908480f8055e17cee"
uuid = "a2af1166-a08f-5f64-846c-94a0d3cef48c"
version = "1.1.1"
[[deps.SparseArrays]]
deps = ["Libdl", "LinearAlgebra", "Random", "Serialization", "SuiteSparse_jll"]
uuid = "2f01184e-e22b-5df5-ae63-d93ebab69eaf"
[[deps.SpecialFunctions]]
deps = ["IrrationalConstants", "LogExpFunctions", "OpenLibm_jll", "OpenSpecFun_jll"]
git-tree-sha1 = "7beb031cf8145577fbccacd94b8a8f4ce78428d3"
uuid = "276daf66-3868-5448-9aa4-cd146d93841b"
version = "2.3.0"
weakdeps = ["ChainRulesCore"]
[deps.SpecialFunctions.extensions]
SpecialFunctionsChainRulesCoreExt = "ChainRulesCore"
[[deps.SplittablesBase]]
deps = ["Setfield", "Test"]
git-tree-sha1 = "e08a62abc517eb79667d0a29dc08a3b589516bb5"
uuid = "171d559e-b47b-412a-8079-5efa626c420e"
version = "0.1.15"
[[deps.StaticArrays]]
deps = ["LinearAlgebra", "Random", "StaticArraysCore", "Statistics"]
git-tree-sha1 = "832afbae2a45b4ae7e831f86965469a24d1d8a83"
uuid = "90137ffa-7385-5640-81b9-e52037218182"
version = "1.5.26"
[[deps.StaticArraysCore]]
git-tree-sha1 = "6b7ba252635a5eff6a0b0664a41ee140a1c9e72a"
uuid = "1e83bf80-4336-4d27-bf5d-d5a4f845583c"
version = "1.4.0"
[[deps.Statistics]]
deps = ["LinearAlgebra", "SparseArrays"]
uuid = "10745b16-79ce-11e8-11f9-7d13ad32a3b2"
version = "1.9.0"
[[deps.StatsAPI]]
deps = ["LinearAlgebra"]
git-tree-sha1 = "45a7769a04a3cf80da1c1c7c60caf932e6f4c9f7"
uuid = "82ae8749-77ed-4fe6-ae5f-f523153014b0"
version = "1.6.0"
[[deps.StatsBase]]
deps = ["DataAPI", "DataStructures", "LinearAlgebra", "LogExpFunctions", "Missings", "Printf", "Random", "SortingAlgorithms", "SparseArrays", "Statistics", "StatsAPI"]
git-tree-sha1 = "75ebe04c5bed70b91614d684259b661c9e6274a4"
uuid = "2913bbd2-ae8a-5f71-8c99-4fb6c76f3a91"
version = "0.34.0"
[[deps.StatsFuns]]
deps = ["HypergeometricFunctions", "IrrationalConstants", "LogExpFunctions", "Reexport", "Rmath", "SpecialFunctions"]
git-tree-sha1 = "f625d686d5a88bcd2b15cd81f18f98186fdc0c9a"
uuid = "4c63d2b9-4356-54db-8cca-17b64c39e42c"
version = "1.3.0"
[deps.StatsFuns.extensions]
StatsFunsChainRulesCoreExt = "ChainRulesCore"
StatsFunsInverseFunctionsExt = "InverseFunctions"
[deps.StatsFuns.weakdeps]
ChainRulesCore = "d360d2e6-b24c-11e9-a2a3-2a2ae2dbcce4"
InverseFunctions = "3587e190-3f89-42d0-90ee-14403ec27112"
[[deps.StructArrays]]
deps = ["Adapt", "DataAPI", "GPUArraysCore", "StaticArraysCore", "Tables"]
git-tree-sha1 = "521a0e828e98bb69042fec1809c1b5a680eb7389"
uuid = "09ab397b-f2b6-538f-b94a-2f83cf4a842a"
version = "0.6.15"
[[deps.StructTypes]]
deps = ["Dates", "UUIDs"]
git-tree-sha1 = "ca4bccb03acf9faaf4137a9abc1881ed1841aa70"
uuid = "856f2bd8-1eba-4b0a-8007-ebc267875bd4"
version = "1.10.0"
[[deps.SuiteSparse]]
deps = ["Libdl", "LinearAlgebra", "Serialization", "SparseArrays"]
uuid = "4607b0f0-06f3-5cda-b6b1-a6196a1729e9"
[[deps.SuiteSparse_jll]]
deps = ["Artifacts", "Libdl", "Pkg", "libblastrampoline_jll"]
uuid = "bea87d4a-7f5b-5778-9afe-8cc45184846c"
version = "5.10.1+6"
[[deps.TOML]]
deps = ["Dates"]
uuid = "fa267f1f-6049-4f14-aa54-33bafae1ed76"
version = "1.0.3"
[[deps.TableTraits]]
deps = ["IteratorInterfaceExtensions"]
git-tree-sha1 = "c06b2f539df1c6efa794486abfb6ed2022561a39"
uuid = "3783bdb8-4a98-5b6b-af9a-565f29a5fe9c"
version = "1.0.1"
[[deps.Tables]]
deps = ["DataAPI", "DataValueInterfaces", "IteratorInterfaceExtensions", "LinearAlgebra", "OrderedCollections", "TableTraits", "Test"]
git-tree-sha1 = "1544b926975372da01227b382066ab70e574a3ec"
uuid = "bd369af6-aec1-5ad0-b16a-f7cc5008161c"
version = "1.10.1"
[[deps.Tar]]
deps = ["ArgTools", "SHA"]
uuid = "a4e569a6-e804-4fa4-b0f3-eef7a1d5b13e"
version = "1.10.0"
[[deps.Test]]
deps = ["InteractiveUtils", "Logging", "Random", "Serialization"]
uuid = "8dfed614-e22c-5e08-85e1-65c5234f0b40"
[[deps.TimerOutputs]]
deps = ["ExprTools", "Printf"]
git-tree-sha1 = "f548a9e9c490030e545f72074a41edfd0e5bcdd7"
uuid = "a759f4b9-e2f1-59dc-863e-4aeb61b1ea8f"
version = "0.5.23"
[[deps.Transducers]]
deps = ["Adapt", "ArgCheck", "BangBang", "Baselet", "CompositionsBase", "DefineSingletons", "Distributed", "InitialValues", "Logging", "Markdown", "MicroCollections", "Requires", "Setfield", "SplittablesBase", "Tables"]
git-tree-sha1 = "a66fb81baec325cf6ccafa243af573b031e87b00"
uuid = "28d57a85-8fef-5791-bfe6-a80928e7c999"
version = "0.4.77"
[deps.Transducers.extensions]
TransducersBlockArraysExt = "BlockArrays"
TransducersDataFramesExt = "DataFrames"
TransducersLazyArraysExt = "LazyArrays"
TransducersOnlineStatsBaseExt = "OnlineStatsBase"
TransducersReferenceablesExt = "Referenceables"
[deps.Transducers.weakdeps]
BlockArrays = "8e7c35d0-a365-5155-bbbb-fb81a777f24e"
DataFrames = "a93c6f00-e57d-5684-b7b6-d8193f3e46c0"
LazyArrays = "5078a376-72f3-5289-bfd5-ec5146d43c02"
OnlineStatsBase = "925886fa-5bf2-5e8e-b522-a9147a512338"
Referenceables = "42d2dcc6-99eb-4e98-b66c-637b7d73030e"
[[deps.UUIDs]]
deps = ["Random", "SHA"]
uuid = "cf7118a7-6976-5b1a-9a39-7adc72f591a4"
[[deps.Unicode]]
uuid = "4ec0a83e-493e-50e2-b9ac-8f72acf5a8f5"
[[deps.UnsafeAtomics]]
git-tree-sha1 = "6331ac3440856ea1988316b46045303bef658278"
uuid = "013be700-e6cd-48c3-b4a1-df204f14c38f"
version = "0.2.1"
[[deps.UnsafeAtomicsLLVM]]
deps = ["LLVM", "UnsafeAtomics"]
git-tree-sha1 = "ea37e6066bf194ab78f4e747f5245261f17a7175"
uuid = "d80eeb9a-aca5-4d75-85e5-170c8b632249"
version = "0.1.2"
[[deps.UnsafePointers]]
git-tree-sha1 = "c81331b3b2e60a982be57c046ec91f599ede674a"
uuid = "e17b2a0c-0bdf-430a-bd0c-3a23cae4ff39"
version = "1.0.0"
[[deps.Zlib_jll]]
deps = ["Libdl"]
uuid = "83775a58-1f1d-513f-b197-d71354ab007a"
version = "1.2.13+0"
[[deps.Zygote]]
deps = ["AbstractFFTs", "ChainRules", "ChainRulesCore", "DiffRules", "Distributed", "FillArrays", "ForwardDiff", "GPUArrays", "GPUArraysCore", "IRTools", "InteractiveUtils", "LinearAlgebra", "LogExpFunctions", "MacroTools", "NaNMath", "PrecompileTools", "Random", "Requires", "SparseArrays", "SpecialFunctions", "Statistics", "ZygoteRules"]
git-tree-sha1 = "5be3ddb88fc992a7d8ea96c3f10a49a7e98ebc7b"
uuid = "e88e6eb3-aa80-5325-afca-941959d7151f"
version = "0.6.62"
[deps.Zygote.extensions]
ZygoteColorsExt = "Colors"
ZygoteDistancesExt = "Distances"
ZygoteTrackerExt = "Tracker"
[deps.Zygote.weakdeps]
Colors = "5ae59095-9a9b-59fe-a467-6f913c188581"
Distances = "b4f34e82-e78d-54a5-968a-f98e89d6e8f7"
Tracker = "9f7883ad-71c0-57eb-9f7f-b5c9e6d3789c"
[[deps.ZygoteRules]]
deps = ["ChainRulesCore", "MacroTools"]
git-tree-sha1 = "977aed5d006b840e2e40c0b48984f7463109046d"
uuid = "700de1a5-db45-46bc-99cf-38207098b444"
version = "0.2.3"
[[deps.cuDNN]]
deps = ["CEnum", "CUDA", "CUDNN_jll"]
git-tree-sha1 = "f65490d187861d6222cb38bcbbff3fd949a7ec3e"
uuid = "02a925ec-e4fe-4b08-9a7e-0d78e3d38ccd"
version = "1.0.4"
[[deps.libblastrampoline_jll]]
deps = ["Artifacts", "Libdl"]
uuid = "8e850b90-86db-534c-a0d3-1478176c7d93"
version = "5.8.0+0"
[[deps.micromamba_jll]]
deps = ["Artifacts", "JLLWrappers", "LazyArtifacts", "Libdl"]
git-tree-sha1 = "66d07957bcf7e4930d933195aed484078dd8cbb5"
uuid = "f8abcde7-e9b7-5caa-b8af-a437887ae8e4"
version = "1.4.9+0"
[[deps.nghttp2_jll]]
deps = ["Artifacts", "Libdl"]
uuid = "8e850ede-7688-5339-a07c-302acd2aaf8d"
version = "1.48.0+0"
[[deps.p7zip_jll]]
deps = ["Artifacts", "Libdl"]
uuid = "3f19e933-33d8-53b3-aaab-bd5110c3b7a0"
version = "17.4.0+0"

16
src/Project.toml Normal file
View File

@@ -0,0 +1,16 @@
name = "IronpenGPU"
uuid = "3d5396ea-818e-43fc-a9d3-164248e840cd"
authors = ["ton <narawat@gmail.com>"]
version = "0.1.0"
[deps]
CUDA = "052768ef-5323-5732-b1bb-66c8b64840ba"
CondaPkg = "992eb4ea-22a4-4c89-a5bb-47a3300528ab"
Dates = "ade2ca70-3891-5945-98fb-dc099432e06a"
Flux = "587475ba-b771-5e3f-ad9e-33799f191a9c"
GeneralUtils = "c6c72f09-b708-4ac8-ac7c-2084d70108fe"
JSON3 = "0f8b85d8-7281-11e9-16c2-39a750bddbf1"
LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e"
PythonCall = "6099a3de-0909-46bc-b1f4-468b9a2dfc0d"
Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c"
Statistics = "10745b16-79ce-11e8-11f9-7d13ad32a3b2"

View File

@@ -44,6 +44,7 @@ function (kfn::kfn_1)(input::AbstractArray)
kfn.on_epsilonRec .= 0 kfn.on_epsilonRec .= 0
kfn.on_wOutChange .= 0 kfn.on_wOutChange .= 0
kfn.on_refractoryCounter .= 0 kfn.on_refractoryCounter .= 0
kfn.on_firingCounter .= 0
kfn.on_synapticActivityCounter .= 0 kfn.on_synapticActivityCounter .= 0
kfn.learningStage = [2] kfn.learningStage = [2]
@@ -140,7 +141,7 @@ function (kfn::kfn_1)(input::AbstractArray)
kfn.on_zit .= reshape(kfn.zit, (i1, i2, 1, i4)) .* kfn.on_arrayProjection4d kfn.on_zit .= reshape(kfn.zit, (i1, i2, 1, i4)) .* kfn.on_arrayProjection4d
# read out # read out
onForward( kfn.on_zit, onForward( kfn.on_zit,
kfn.on_wOut, kfn.on_wOut,
kfn.on_vt, kfn.on_vt,
kfn.on_vth, kfn.on_vth,
@@ -282,7 +283,7 @@ function lifForward( zit,
refractoryCounter[i1,i2,i3,i4] -= 1 refractoryCounter[i1,i2,i3,i4] -= 1
recSignal[i1,i2,i3,i4] = 0 recSignal[i1,i2,i3,i4] = 0
zt[i1,i2,i3,i4] = 0 zt[i1,i2,i3,i4] = 0
vt[i1,i2,i3,i4] = alpha[i1,i2,i3,i4] * vt[i1,i2,i3,i4] vt[i1,i2,i3,i4] = (1 - alpha[i1,i2,i3,i4]) * vt[i1,i2,i3,i4]
phi[i1,i2,i3,i4] = 0 phi[i1,i2,i3,i4] = 0
# compute epsilonRec # compute epsilonRec
@@ -299,7 +300,7 @@ function lifForward( zit,
zt[i1,i2,i3,i4] = 1 zt[i1,i2,i3,i4] = 1
refractoryCounter[i1,i2,i3,i4] = refractoryDuration[i1,i2,i3,i4] refractoryCounter[i1,i2,i3,i4] = refractoryDuration[i1,i2,i3,i4]
firingCounter[i1,i2,i3,i4] += 1 firingCounter[i1,i2,i3,i4] += 1
vt[i1,i2,i3,i4] = vRest[i1,i2,i3,i4] # vt[i1,i2,i3,i4] = vRest[i1,i2,i3,i4]
# reset counter if neuron fires # reset counter if neuron fires
neuronInactivityCounter[i1,i2,i3,i4] = 0 neuronInactivityCounter[i1,i2,i3,i4] = 0
@@ -320,7 +321,7 @@ function lifForward( zit,
synapticActivityCounter[i1,i2,i3,i4] += zit[i1,i2,i3,i4] * !iszero(wRec[i1,i2,i3,i4]) synapticActivityCounter[i1,i2,i3,i4] += zit[i1,i2,i3,i4] * !iszero(wRec[i1,i2,i3,i4])
# voltage regulator # voltage regulator
wRecChange[i1,i2,i3,i4] = -0.01*0.0001 * (vt[i1,i2,i3,i4] - vth[i1,i2,i3,i4]) * wRecChange[i1,i2,i3,i4] = -0.001 * (vt[i1,i2,i3,i4] - vth[i1,i2,i3,i4]) *
zit[i1,i2,i3,i4] zit[i1,i2,i3,i4]
# negative value is counting mode, -0.1 < -0.1 won't work on GPU # negative value is counting mode, -0.1 < -0.1 won't work on GPU
@@ -476,7 +477,7 @@ function alifForward( zit,
refractoryCounter[i1,i2,i3,i4] -= 1 refractoryCounter[i1,i2,i3,i4] -= 1
recSignal[i1,i2,i3,i4] = 0 recSignal[i1,i2,i3,i4] = 0
zt[i1,i2,i3,i4] = 0 zt[i1,i2,i3,i4] = 0
vt[i1,i2,i3,i4] = alpha[i1,i2,i3,i4] * vt[i1,i2,i3,i4] vt[i1,i2,i3,i4] = (1 - alpha[i1,i2,i3,i4]) * vt[i1,i2,i3,i4]
phi[i1,i2,i3,i4] = 0 phi[i1,i2,i3,i4] = 0
a[i1,i2,i3,i4] = rho[i1,i2,i3,i4] * a[i1,i2,i3,i4] a[i1,i2,i3,i4] = rho[i1,i2,i3,i4] * a[i1,i2,i3,i4]
@@ -504,7 +505,7 @@ function alifForward( zit,
zt[i1,i2,i3,i4] = 1 zt[i1,i2,i3,i4] = 1
refractoryCounter[i1,i2,i3,i4] = refractoryDuration[i1,i2,i3,i4] refractoryCounter[i1,i2,i3,i4] = refractoryDuration[i1,i2,i3,i4]
firingCounter[i1,i2,i3,i4] += 1 firingCounter[i1,i2,i3,i4] += 1
vt[i1,i2,i3,i4] = vRest[i1,i2,i3,i4] # vt[i1,i2,i3,i4] = vRest[i1,i2,i3,i4]
a[i1,i2,i3,i4] = (rho[i1,i2,i3,i4] * a[i1,i2,i3,i4]) + 1 a[i1,i2,i3,i4] = (rho[i1,i2,i3,i4] * a[i1,i2,i3,i4]) + 1
neuronInactivityCounter[i1,i2,i3,i4] = 0 neuronInactivityCounter[i1,i2,i3,i4] = 0
else else
@@ -528,7 +529,7 @@ function alifForward( zit,
synapticActivityCounter[i1,i2,i3,i4] += zit[i1,i2,i3,i4] * !iszero(wRec[i1,i2,i3,i4]) synapticActivityCounter[i1,i2,i3,i4] += zit[i1,i2,i3,i4] * !iszero(wRec[i1,i2,i3,i4])
# voltage regulator # voltage regulator
wRecChange[i1,i2,i3,i4] = -0.01*0.0001 * (vt[i1,i2,i3,i4] - avth[i1,i2,i3,i4]) * wRecChange[i1,i2,i3,i4] = -0.001 * (vt[i1,i2,i3,i4] - avth[i1,i2,i3,i4]) *
zit[i1,i2,i3,i4] zit[i1,i2,i3,i4]
# negative value is counting mode, -0.1 < -0.1 won't work on GPU # negative value is counting mode, -0.1 < -0.1 won't work on GPU
@@ -640,7 +641,7 @@ function onForward( zit,
refractoryCounter[i1,i2,i3,i4] -= 1 refractoryCounter[i1,i2,i3,i4] -= 1
recSignal[i1,i2,i3,i4] = 0 recSignal[i1,i2,i3,i4] = 0
zt[i1,i2,i3,i4] = 0 zt[i1,i2,i3,i4] = 0
vt[i1,i2,i3,i4] = alpha[i1,i2,i3,i4] * vt[i1,i2,i3,i4] vt[i1,i2,i3,i4] = (1 - alpha[i1,i2,i3,i4]) * vt[i1,i2,i3,i4]
phi[i1,i2,i3,i4] = 0 phi[i1,i2,i3,i4] = 0
# compute epsilonRec # compute epsilonRec
@@ -655,7 +656,7 @@ function onForward( zit,
zt[i1,i2,i3,i4] = 1 zt[i1,i2,i3,i4] = 1
refractoryCounter[i1,i2,i3,i4] = refractoryDuration[i1,i2,i3,i4] refractoryCounter[i1,i2,i3,i4] = refractoryDuration[i1,i2,i3,i4]
firingCounter[i1,i2,i3,i4] += 1 firingCounter[i1,i2,i3,i4] += 1
vt[i1,i2,i3,i4] = vRest[i1,i2,i3,i4] vt[i1,i2,i3,i4] = (1 - alpha[i1,i2,i3,i4]) * vt[i1,i2,i3,i4]
else else
zt[i1,i2,i3,i4] = 0 zt[i1,i2,i3,i4] = 0
end end
@@ -674,241 +675,6 @@ function onForward( zit,
return nothing return nothing
end end
# function lifForward(kfn_zit::Array{T},
# zit::Array{T},
# wRec::Array{T},
# vt0::Array{T},
# vt1::Array{T},
# vth::Array{T},
# vRest::Array{T},
# zt1::Array{T},
# alpha::Array{T},
# phi::Array{T},
# epsilonRec::Array{T},
# refractoryCounter::Array{T},
# refractoryDuration::Array{T},
# gammaPd::Array{T},
# firingCounter::Array{T},
# arrayProjection4d::Array{T},
# recSignal::Array{T},
# decayed_vt0::Array{T},
# decayed_epsilonRec::Array{T},
# vt1_diff_vth::Array{T},
# vt1_diff_vth_div_vth::Array{T},
# gammaPd_div_vth::Array{T},
# phiActivation::Array{T},
# ) where T<:Number
# # project 3D kfn zit into 4D lif zit
# i1, i2, i3, i4 = size(alif_wRec)
# lif_zit .= reshape(kfn_zit, (i1, i2, 1, i4)) .* lif_arrayProjection4d
# for j in 1:size(wRec, 4), i in 1:size(wRec, 3) # compute along neurons axis of every batch
# if sum(@view(refractoryCounter[:,:,i,j])) > 0 # refractory period is active
# @. @views refractoryCounter[:,:,i,j] -= 1
# @. @views zt1[:,:,i,j] = 0
# @. @views vt1[:,:,i,j] = alpha[:,:,i,j] * vt0[:,:,i,j]
# @. @views phi[:,:,i,j] = 0
# # compute epsilonRec
# @. @views decayed_epsilonRec[:,:,i,j] = alpha[:,:,i,j] * epsilonRec[:,:,i,j]
# @. @views epsilonRec[:,:,i,j] = decayed_epsilonRec[:,:,i,j]
# else # refractory period is inactive
# @. @views recSignal[:,:,i,j] = zit[:,:,i,j] * wRec[:,:,i,j]
# @. @views decayed_vt0[:,:,i,j] = alpha[:,:,i,j] * vt0[:,:,i,j]
# @view(vt1[:,:,i,j]) .= @view(decayed_vt0[:,:,i,j]) .+ sum(@view(recSignal[:,:,i,j]))
# if sum(@view(vt1[:,:,i,j])) > sum(@view(vth[:,:,i,j]))
# @. @views zt1[:,:,i,j] = 1
# @. @views refractoryCounter[:,:,i,j] = refractoryDuration[:,:,i,j]
# @. @views firingCounter[:,:,i,j] += 1
# @. @views vt1[:,:,i,j] = vRest[:,:,i,j]
# else
# @. @views zt1[:,:,i,j] = 0
# end
# # compute phi, there is a difference from alif formula
# @. @views gammaPd_div_vth[:,:,i,j] = gammaPd[:,:,i,j] / vth[:,:,i,j]
# @. @views vt1_diff_vth[:,:,i,j] = vt1[:,:,i,j] - vth[:,:,i,j]
# @. @views vt1_diff_vth_div_vth[:,:,i,j] = vt1_diff_vth[:,:,i,j] / vth[:,:,i,j]
# @view(phiActivation[:,:,i,j]) .= max(0, 1 - sum(@view(vt1_diff_vth_div_vth[:,:,i,j])))
# @. @views phi[:,:,i,j] = gammaPd_div_vth[:,:,i,j] * phiActivation[:,:,i,j]
# # compute epsilonRec
# @. @views decayed_epsilonRec[:,:,i,j] = alpha[:,:,i,j] * epsilonRec[:,:,i,j]
# @. @views epsilonRec[:,:,i,j] = decayed_epsilonRec[:,:,i,j] + zit[:,:,i,j]
# end
# end
# end
# function alifForward(zit::Array{T},
# wRec::Array{T},
# vt0::Array{T},
# vt1::Array{T},
# vth::Array{T},
# vRest::Array{T},
# zt1::Array{T},
# alpha::Array{T},
# phi::Array{T},
# epsilonRec::Array{T},
# refractoryCounter::Array{T},
# refractoryDuration::Array{T},
# gammaPd::Array{T},
# firingCounter::Array{T},
# recSignal::Array{T},
# decayed_vt0::Array{T},
# decayed_epsilonRec::Array{T},
# vt1_diff_vth::Array{T},
# vt1_diff_vth_div_vth::Array{T},
# gammaPd_div_vth::Array{T},
# phiActivation::Array{T},
# epsilonRecA::Array{T},
# avth::Array{T},
# a::Array{T},
# beta::Array{T},
# rho::Array{T},
# phi_x_epsilonRec::Array{T},
# phi_x_beta::Array{T},
# rho_diff_phi_x_beta::Array{T},
# rho_div_phi_x_beta_x_epsilonRecA::Array{T},
# beta_x_a::Array{T},
# ) where T<:Number
# for j in 1:size(wRec, 4), i in 1:size(wRec, 3) # compute along neurons axis of every batch
# if sum(@view(refractoryCounter[:,:,i,j])) > 0 # refractory period is active
# @. @views refractoryCounter[:,:,i,j] -= 1
# @. @views zt1[:,:,i,j] = 0
# @. @views vt1[:,:,i,j] = alpha[:,:,i,j] * vt0[:,:,i,j]
# @. @views phi[:,:,i,j] = 0
# @. @views a[:,:,i,j] = rho[:,:,i,j] * a[:,:,i,j]
# # compute epsilonRec
# @. @views decayed_epsilonRec[:,:,i,j] = alpha[:,:,i,j] * epsilonRec[:,:,i,j]
# @. @views epsilonRec[:,:,i,j] = decayed_epsilonRec[:,:,i,j]
# # compute epsilonRecA
# @. @views phi_x_epsilonRec[:,:,i,j] = phi[:,:,i,j] * epsilonRec[:,:,i,j]
# @. @views phi_x_beta[:,:,i,j] = phi[:,:,i,j] * beta[:,:,i,j]
# @. @views rho_diff_phi_x_beta[:,:,i,j] = rho[:,:,i,j] - phi_x_beta[:,:,i,j]
# @. @views rho_div_phi_x_beta_x_epsilonRecA[:,:,i,j] = rho_diff_phi_x_beta[:,:,i,j] * epsilonRecA[:,:,i,j]
# @. @views epsilonRecA[:,:,i,j] = phi_x_epsilonRec[:,:,i,j] + rho_div_phi_x_beta_x_epsilonRecA[:,:,i,j]
# # compute avth
# @. @views beta_x_a[:,:,i,j] = beta[:,:,i,j] * a[:,:,i,j]
# @. @views avth[:,:,i,j] = vth[:,:,i,j] + beta_x_a[:,:,i,j]
# else # refractory period is inactive
# @. @views recSignal[:,:,i,j] = zit[:,:,i,j] * wRec[:,:,i,j]
# @. @views decayed_vt0[:,:,i,j] = alpha[:,:,i,j] * vt0[:,:,i,j]
# @view(vt1[:,:,i,j]) .= @view(decayed_vt0[:,:,i,j]) .+ sum(@view(recSignal[:,:,i,j]))
# # compute avth
# @. @views beta_x_a[:,:,i,j] = beta[:,:,i,j] * a[:,:,i,j]
# @. @views avth[:,:,i,j] = vth[:,:,i,j] + beta_x_a[:,:,i,j]
# if sum(@view(vt1[:,:,i,j])) > sum(@view(avth[:,:,i,j]))
# @. @views zt1[:,:,i,j] = 1
# @. @views refractoryCounter[:,:,i,j] = refractoryDuration[:,:,i,j]
# @. @views firingCounter[:,:,i,j] += 1
# @. @views vt1[:,:,i,j] = vRest[:,:,i,j]
# @. @views a[:,:,i,j] = rho[:,:,i,j] * a[:,:,i,j]
# @. @views a[:,:,i,j] = a[:,:,i,j] += 1
# else
# @. @views zt1[:,:,i,j] = 0
# @. @views a[:,:,i,j] = rho[:,:,i,j] * a[:,:,i,j]
# end
# # compute phi, there is a difference from alif formula
# @. @views gammaPd_div_vth[:,:,i,j] = gammaPd[:,:,i,j] / vth[:,:,i,j]
# @. @views vt1_diff_vth[:,:,i,j] = vt1[:,:,i,j] - vth[:,:,i,j]
# @. @views vt1_diff_vth_div_vth[:,:,i,j] = vt1_diff_vth[:,:,i,j] / vth[:,:,i,j]
# @view(phiActivation[:,:,i,j]) .= max(0, 1 - sum(@view(vt1_diff_vth_div_vth[:,:,i,j])))
# @. @views phi[:,:,i,j] = gammaPd_div_vth[:,:,i,j] * phiActivation[:,:,i,j]
# # compute epsilonRec
# @. @views decayed_epsilonRec[:,:,i,j] = alpha[:,:,i,j] * epsilonRec[:,:,i,j]
# @. @views epsilonRec[:,:,i,j] = decayed_epsilonRec[:,:,i,j] + zit[:,:,i,j]
# # compute epsilonRecA
# @. @views phi_x_epsilonRec[:,:,i,j] = phi[:,:,i,j] * epsilonRec[:,:,i,j]
# @. @views phi_x_beta[:,:,i,j] = phi[:,:,i,j] * beta[:,:,i,j]
# @. @views rho_diff_phi_x_beta[:,:,i,j] = rho[:,:,i,j] - phi_x_beta[:,:,i,j]
# @. @views rho_div_phi_x_beta_x_epsilonRecA[:,:,i,j] = rho_diff_phi_x_beta[:,:,i,j] * epsilonRecA[:,:,i,j]
# @. @views epsilonRecA[:,:,i,j] = phi_x_epsilonRec[:,:,i,j] + rho_div_phi_x_beta_x_epsilonRecA[:,:,i,j]
# end
# end
# end
# function onForward(kfn_zit::Array{T},
# zit::Array{T},
# wOut::Array{T},
# vt0::Array{T},
# vt1::Array{T},
# vth::Array{T},
# vRest::Array{T},
# zt1::Array{T},
# alpha::Array{T},
# phi::Array{T},
# epsilonRec::Array{T},
# refractoryCounter::Array{T},
# refractoryDuration::Array{T},
# gammaPd::Array{T},
# firingCounter::Array{T},
# arrayProjection4d::Array{T},
# recSignal::Array{T},
# decayed_vt0::Array{T},
# decayed_epsilonRec::Array{T},
# vt1_diff_vth::Array{T},
# vt1_diff_vth_div_vth::Array{T},
# gammaPd_div_vth::Array{T},
# phiActivation::Array{T},
# ) where T<:Number
# # project 3D kfn zit into 4D lif zit
# zit .= reshape(kfn_zit,
# (size(wOut, 1), size(wOut, 2), 1, size(wOut, 4))) .* arrayProjection4d
# for j in 1:size(wOut, 4), i in 1:size(wOut, 3) # compute along neurons axis of every batch
# if sum(@view(refractoryCounter[:,:,i,j])) > 0 # refractory period is active
# @. @views refractoryCounter[:,:,i,j] -= 1
# @. @views zt1[:,:,i,j] = 0
# @. @views vt1[:,:,i,j] = alpha[:,:,i,j] * vt0[:,:,i,j]
# @. @views phi[:,:,i,j] = 0
# # compute epsilonRec
# @. @views decayed_epsilonRec[:,:,i,j] = alpha[:,:,i,j] * epsilonRec[:,:,i,j]
# @. @views epsilonRec[:,:,i,j] = decayed_epsilonRec[:,:,i,j]
# else # refractory period is inactive
# @. @views recSignal[:,:,i,j] = zit[:,:,i,j] * wOut[:,:,i,j]
# @. @views decayed_vt0[:,:,i,j] = alpha[:,:,i,j] * vt0[:,:,i,j]
# @view(vt1[:,:,i,j]) .= @view(decayed_vt0[:,:,i,j]) .+ sum(@view(recSignal[:,:,i,j]))
# if sum(@view(vt1[:,:,i,j])) > sum(@view(vth[:,:,i,j]))
# @. @views zt1[:,:,i,j] = 1
# @. @views refractoryCounter[:,:,i,j] = refractoryDuration[:,:,i,j]
# @. @views firingCounter[:,:,i,j] += 1
# @. @views vt1[:,:,i,j] = vRest[:,:,i,j]
# else
# @. @views zt1[:,:,i,j] = 0
# end
# # compute phi, there is a difference from alif formula
# @. @views gammaPd_div_vth[:,:,i,j] = gammaPd[:,:,i,j] / vth[:,:,i,j]
# @. @views vt1_diff_vth[:,:,i,j] = vt1[:,:,i,j] - vth[:,:,i,j]
# @. @views vt1_diff_vth_div_vth[:,:,i,j] = vt1_diff_vth[:,:,i,j] / vth[:,:,i,j]
# @view(phiActivation[:,:,i,j]) .= max(0, 1 - sum(@view(vt1_diff_vth_div_vth[:,:,i,j])))
# @. @views phi[:,:,i,j] = gammaPd_div_vth[:,:,i,j] * phiActivation[:,:,i,j]
# # compute epsilonRec
# @. @views decayed_epsilonRec[:,:,i,j] = alpha[:,:,i,j] * epsilonRec[:,:,i,j]
# @. @views epsilonRec[:,:,i,j] = decayed_epsilonRec[:,:,i,j] + zit[:,:,i,j]
# end
# end
# end

View File

@@ -60,6 +60,7 @@ function compute_paramsChange!(kfn::kfn_1, modelError::CuArray, outputError::CuA
kfn.on_wOutChange, kfn.on_wOutChange,
kfn.on_arrayProjection4d, kfn.on_arrayProjection4d,
kfn.on_error, kfn.on_error,
kfn.on_synapticActivityCounter,
outputError, outputError,
) )
# error("DEBUG -> kfn compute_paramsChange! $(Dates.now())") # error("DEBUG -> kfn compute_paramsChange! $(Dates.now())")
@@ -106,8 +107,9 @@ function lifComputeParamsChange!( timeStep::CuArray,
wRecChange .+= (eta .* nError .* eRec) wRecChange .+= (eta .* nError .* eRec)
# frequency regulator # frequency regulator
freqError = (firingTargetFrequency - (firingCounter./timeStep)) ./ timeStep targetFiringCount = firingTargetFrequency .* timeStep
freqWRecChange = -0.1 .* freqError .* eta .* eRec freqError = (firingCounter .- targetFiringCount) ./ timeStep
freqWRecChange = -1 .* freqError .* eta .* eRec
wRecChange .+= freqWRecChange wRecChange .+= freqWRecChange
# reset epsilonRec # reset epsilonRec
@@ -158,8 +160,9 @@ function alifComputeParamsChange!( timeStep::CuArray,
wRecChange .+= (eta .* nError .* eRec) wRecChange .+= (eta .* nError .* eRec)
# frequency regulator # frequency regulator
freqError = (firingTargetFrequency - (firingCounter./timeStep)) ./ timeStep targetFiringCount = firingTargetFrequency .* timeStep
freqWRecChange = -0.1 .* freqError .* eta .* eRec freqError = (firingCounter .- targetFiringCount) ./ timeStep
freqWRecChange = -1 .* freqError .* eta .* eRec
wRecChange .+= freqWRecChange wRecChange .+= freqWRecChange
# wRecChange .+= 0.01 .* ((firingTargetFrequency - (firingCounter./timeStep)) ./ timeStep) .* # wRecChange .+= 0.01 .* ((firingTargetFrequency - (firingCounter./timeStep)) ./ timeStep) .*
# eta .* eRec # eta .* eRec
@@ -179,6 +182,7 @@ function onComputeParamsChange!(phi::CuArray,
wOutChange::CuArray, wOutChange::CuArray,
arrayProjection4d::CuArray, arrayProjection4d::CuArray,
nError::CuArray, nError::CuArray,
synapticActivityCounter,
outputError::CuArray # outputError is output neuron's error outputError::CuArray # outputError is output neuron's error
) )
@@ -279,13 +283,15 @@ end
function learn!(kfn::kfn_1, progress, device=cpu) function learn!(kfn::kfn_1, progress, device=cpu)
if sum(kfn.timeStep) == 800 if sum(kfn.timeStep) == 800
println("zitCumulative ", sum(kfn.zitCumulative[:,:,784:size(kfn.zitCumulative, 3)], dims=3)) # println("zitCumulative ", sum(kfn.zitCumulative[:,:,784:size(kfn.zitCumulative, 3)], dims=3))
# println("on_synapticActivityCounter ", kfn.on_synapticActivityCounter[:,:,1,:]) println("synapse lif $(sum((!isequal).(kfn.lif_wRec, 0))) alif $(sum((!isequal).(kfn.alif_wRec, 0)))")
println("on_synapticActivityCounter 0 ", kfn.on_synapticActivityCounter[:,:,1])
println("on_synapticActivityCounter 5 ", kfn.on_synapticActivityCounter[:,:,6])
println("wOut 0 $(sum(kfn.on_wOut[:,:,1,1], dims=3)) total $(sum(sum(kfn.on_wOut[:,:,1,1], dims=3)))")
println("wOut 5 $(sum(kfn.on_wOut[:,:,6,1], dims=3)) total $(sum(sum(kfn.on_wOut[:,:,6,1], dims=3)))")
end end
#WORKING compare output neuron 0 synapse activity when input are label 0 and 5, (!isequal).(wOut)
# lif learn # lif learn
kfn.lif_wRec, kfn.lif_neuronInactivityCounter, kfn.lif_synapticActivityCounter, kfn.lif_synapseReconnectDelay = kfn.lif_wRec, kfn.lif_neuronInactivityCounter, kfn.lif_synapticActivityCounter, kfn.lif_synapseReconnectDelay =
lifLearn(kfn.lif_wRec, lifLearn(kfn.lif_wRec,
@@ -321,7 +327,9 @@ function learn!(kfn::kfn_1, progress, device=cpu)
# on learn # on learn
onLearn!(kfn.on_wOut, onLearn!(kfn.on_wOut,
kfn.on_wOutChange, kfn.on_wOutChange,
kfn.on_arrayProjection4d) kfn.on_eta,
kfn.on_arrayProjection4d,
progress,)
# wrap up learning session # wrap up learning session
if kfn.learningStage == [3] if kfn.learningStage == [3]
@@ -424,14 +432,34 @@ function alifLearn(wRec,
return wRec, neuronInactivityCounter, synapticActivityCounter, synapseReconnectDelay return wRec, neuronInactivityCounter, synapticActivityCounter, synapseReconnectDelay
end end
# function onLearn!(wOut,
# wOutChange,
# arrayProjection4d)
# # merge learning weight with average learning weight
# wOut .+= (sum(wOutChange, dims=4) ./ (size(wOut, 4))) .* arrayProjection4d
# # adaptive wOut to help convergence using c_decay
# wOut .-= 0.001 .* wOut
# end
function onLearn!(wOut, function onLearn!(wOut,
wOutChange, wOutChange,
arrayProjection4d) eta,
# merge learning weight with average learning weight arrayProjection4d,
wOut .+= (sum(wOutChange, dims=4) ./ (size(wOut, 4))) .* arrayProjection4d progress,)
# adaptive wOut to help convergence using c_decay if progress != 0
wOut .-= 0.001 .* wOut # adaptive wOut to help convergence using c_decay
wOut .-= 0.1 .* eta .* wOut # wOut .-= 0.001 .* wOut
# merge learning weight with average learning weight
wOut .+= (sum(wOutChange, dims=4) ./ (size(wOut, 4))) .* arrayProjection4d
else
#TESTING skip
wOutChange .= 0
end
end end
function neuroplasticity(synapseConnectionNumber, function neuroplasticity(synapseConnectionNumber,
@@ -446,44 +474,77 @@ function neuroplasticity(synapseConnectionNumber,
synapticActivityCounter, synapticActivityCounter,
progress,) # (row, col, n) progress,) # (row, col, n)
if progress == 2 # no need to learn if progress == 2 # no need to learn for current neural pathway
# skip neuroplasticity # skip neuroplasticity
#TODO I may need to do something with neuronInactivityCounter and other variables #TODO I may need to do something with neuronInactivityCounter and other variables
wRecChange .= 0 wRecChange .= 0
# # -w all non-fire connection except mature connection
# weakenNotMatureSynapse!(wRec, synapticActivityCounter, eta)
# # prune weak synapse
# pruneSynapse!(wRec, synapticActivityCounter, synapseReconnectDelay)
# error("DEBUG -> neuroplasticity") # error("DEBUG -> neuroplasticity")
elseif progress != 0 # progress increase elseif progress == 1 # some progress whether up or down
# ready to reconnect synapse must not have wRecChange # ready to reconnect synapse must not have wRecChange
mask = (!isequal).(wRec, 0) mask = (!isequal).(wRec, 0)
wRecChange .*= mask wRecChange .*= mask
# weakenAllActiveSynapse!(wRec, synapticActivityCounter, eta)
# merge learning weight, all resulting negative wRec will get pruned # merge learning weight, all resulting negative wRec will get pruned
mergeLearnWeight!(wRec, exInType, wRecChange, synapticActivityCounter, synapseReconnectDelay) mergeLearnWeight!(wRec, exInType, wRecChange, synapticActivityCounter, synapseReconnectDelay)
# adjust wRec based on repeatition (90% +w, 10% -w) # # adjust wRec based on repeatition (90% +w, 10% -w)
growRepeatedPath!(wRec, synapticActivityCounter, eta) # growRepeatedPath!(wRec, synapticActivityCounter, eta)
# -w all non-fire connection except mature connection # # -w all non-fire connection except mature connection
weakenNotMatureSynapse!(wRec, synapticActivityCounter, eta) # weakenNotMatureSynapse!(wRec, synapticActivityCounter, eta)
# prune weak synapse # # prune weak synapse
pruneSynapse!(wRec, synapticActivityCounter, synapseReconnectDelay) # pruneSynapse!(wRec, synapticActivityCounter, synapseReconnectDelay)
# rewire synapse connection # rewire synapse connection
rewireSynapse!(wRec, neuronInactivityCounter, synapticActivityCounter, rewireSynapse!(wRec, neuronInactivityCounter, synapticActivityCounter,
synapseReconnectDelay, zitCumulative) synapseReconnectDelay, synapseConnectionNumber, zitCumulative)
# error("DEBUG -> neuroplasticity 1") # error("DEBUG -> neuroplasticity 1")
elseif progress == 0 # no progress, no weight update, only rewire elseif progress == 0 # no progress, no weight update, only rewire
# -w all non-fire connection except mature connection wRecChange .= 0
weakenNotMatureSynapse!(wRec, synapticActivityCounter, eta)
# prune weak synapse # weakenAllActiveSynapse!(wRec, synapticActivityCounter, eta)
pruneSynapse!(wRec, synapticActivityCounter, synapseReconnectDelay)
# # prune weak synapse
# pruneSynapse!(wRec, synapticActivityCounter, synapseReconnectDelay)
# rewire synapse connection # rewire synapse connection
rewireSynapse!(wRec, neuronInactivityCounter, synapticActivityCounter, rewireSynapse!(wRec, neuronInactivityCounter, synapticActivityCounter,
synapseReconnectDelay, zitCumulative) synapseReconnectDelay, synapseConnectionNumber, zitCumulative)
# error("DEBUG -> neuroplasticity") # error("DEBUG -> neuroplasticity")
elseif progress == -1 # some progress whether up or down
# ready to reconnect synapse must not have wRecChange
mask = (!isequal).(wRec, 0)
wRecChange .*= mask
# weakenAllActiveSynapse!(wRec, synapticActivityCounter, eta)
# merge learning weight, all resulting negative wRec will get pruned
mergeLearnWeight!(wRec, exInType, wRecChange, synapticActivityCounter, synapseReconnectDelay)
# # adjust wRec based on repeatition (90% +w, 10% -w)
# growRepeatedPath!(wRec, synapticActivityCounter, eta)
# # -w all non-fire connection except mature connection
# weakenNotMatureSynapse!(wRec, synapticActivityCounter, eta)
# # prune weak synapse
# pruneSynapse!(wRec, synapticActivityCounter, synapseReconnectDelay)
# rewire synapse connection
rewireSynapse!(wRec, neuronInactivityCounter, synapticActivityCounter,
synapseReconnectDelay, synapseConnectionNumber, zitCumulative)
# error("DEBUG -> neuroplasticity 1")
else else
error("undefined condition line $(@__LINE__)") error("undefined condition line $(@__LINE__)")
end end

View File

@@ -1,11 +1,13 @@
module snnUtil module snnUtil
export refractoryStatus!, addNewSynapticConn!, mergeLearnWeight!, growRepeatedPath!, export refractoryStatus!, addNewSynapticConn!, mergeLearnWeight!, growRepeatedPath!,
weakenNotMatureSynapse!, pruneSynapse!, rewireSynapse! weakenNotMatureSynapse!, pruneSynapse!, rewireSynapse!, weakenAllActiveSynapse!
using Random, GeneralUtils using Random, GeneralUtils
using ..type
#------------------------------------------------------------------------------------------------100 #------------------------------------------------------------------------------------------------100
synapseMaxWaittime = 100
function refractoryStatus!(refractoryCounter, refractoryActive, refractoryInactive) function refractoryStatus!(refractoryCounter, refractoryActive, refractoryInactive)
d1, d2, d3, d4 = size(refractoryCounter) d1, d2, d3, d4 = size(refractoryCounter)
@@ -81,45 +83,19 @@ function mergeLearnWeight!(wRec::AbstractArray, exInType, wRecChange::AbstractAr
# println("wRec 5 $(size(wRec)) ", wRec[:,:,1,1]) # println("wRec 5 $(size(wRec)) ", wRec[:,:,1,1])
GeneralUtils.replaceElements!(flipsign, 1, synapticActivityCounter, 0) GeneralUtils.replaceElements!(flipsign, 1, synapticActivityCounter, 0)
# set pruned synapse to random wait time # set pruned synapse to random wait time
waittime = rand((1:1000), size(wRec)) .* flipsign # synapse's random wait time to reconnect waittime = rand((1:synapseMaxWaittime), size(wRec)) .* flipsign # synapse's random wait time to reconnect
# synapseReconnectDelay counting mode when value is negative hence .* -1 # synapseReconnectDelay counting mode when value is negative hence .* -1
synapseReconnectDelay .= (synapseReconnectDelay .* nonflipsign) .+ (waittime .* -1) synapseReconnectDelay .= (synapseReconnectDelay .* nonflipsign) .+ (waittime .* -1)
# println("synapseReconnectDelay ", synapseReconnectDelay[:,:,1,1]) # println("synapseReconnectDelay ", synapseReconnectDelay[:,:,1,1])
# error("DEBUG -> mergeLearnWeight!") # error("DEBUG -> mergeLearnWeight!")
end end
# function growRepeatedPath!(wRec, synapticActivityCounter, eta)
# # seperate active synapse out of inactive in this signal
# mask_activeSynapse = (!isequal).(synapticActivityCounter, 0)
# # adjust weight based on vt progress and repeatition (80% +w, 20% -w) depend on epsilonRec
# avgActivity = sum(synapticActivityCounter) / sum(mask_activeSynapse)
# lowerlimit = 0.2 * avgActivity # boundary at 20%
# # +w, synapse with more than 10% of avg activity get increase weight by eta
# mask_more = (!isless).(synapticActivityCounter, lowerlimit)
# mask_2 = GeneralUtils.allTrue.(mask_activeSynapse, mask_more)
# mask_3 = mask_2 .* (1 .+ eta) # minor activity synapse weight will be reduced by eta
# GeneralUtils.replaceElements!(mask_3, 0, 1) # replace 0 with 1 so mask * Wrec will not get 0 weight
# wRec .*= mask_3
# # -w, synapse with less than 10% of avg activity get reduced weight by eta
# mask_less = GeneralUtils.isBetween.(synapticActivityCounter, 0, lowerlimit) # 1st criteria
# mask_3 = GeneralUtils.allTrue.(mask_activeSynapse, mask_less)
# mask_4 = mask_3 .* (1 .- eta) # minor activity synapse weight will be reduced by eta
# # replace 0 with 1 so mask * wRec will not get 0 weight i.e. non-effected weight remain the same
# GeneralUtils.replaceElements!(mask_4, 0, 1)
# wRec .*= mask_4
# # error("DEBUG -> growRepeatedPath!")
# end
function growRepeatedPath!(wRec, synapticActivityCounter, eta) function growRepeatedPath!(wRec, synapticActivityCounter, eta)
# seperate active synapse out of inactive in this signal # seperate active synapse out of inactive in this signal
mask_activeSynapse = (!isequal).(synapticActivityCounter, 0) mask_activeSynapse = (!isequal).(synapticActivityCounter, 0)
# adjust weight based on vt progress and repeatition (80% +w, 20% -w) depend on epsilonRec # adjust weight based on vt progress and repeatition (40% +w, 60% -w) depend on epsilonRec
mask_more, mask_less, _ = rankMatrix(synapticActivityCounter, 0.2) # sort synapse from highest to lowest activity mask_more, mask_less, _ = rankMatrix(synapticActivityCounter, 0.6) # sort synapse from highest to lowest activity
# +w, synapse with more than 10% of avg activity get increase weight by eta # +w, synapse with more than 10% of avg activity get increase weight by eta
# mask_more = (!isless).(synapticActivityCounter, lowerlimit) # mask_more = (!isless).(synapticActivityCounter, lowerlimit)
@@ -138,6 +114,12 @@ function growRepeatedPath!(wRec, synapticActivityCounter, eta)
# error("DEBUG -> growRepeatedPath!") # error("DEBUG -> growRepeatedPath!")
end end
function weakenAllActiveSynapse!(wRec, synapticActivityCounter, eta) # TODO not fully tested, there is no connection YET where there is 0 synapse activity but wRec is not 0 (subscribed)
mask_activeSynapse = (!isequal).(synapticActivityCounter, 0)
mask_1 = mask_activeSynapse .* (1 .- (0.1 .* eta))
GeneralUtils.replaceElements!(mask_1, 0, 1) # replace 0 with 1 so mask * Wrec will not get 0 weight
wRec .*= mask_1
end
function weakenNotMatureSynapse!(wRec, synapticActivityCounter, eta) # TODO not fully tested, there is no connection YET where there is 0 synapse activity but wRec is not 0 (subscribed) function weakenNotMatureSynapse!(wRec, synapticActivityCounter, eta) # TODO not fully tested, there is no connection YET where there is 0 synapse activity but wRec is not 0 (subscribed)
mask_inactiveSynapse = isequal.(synapticActivityCounter, 0) mask_inactiveSynapse = isequal.(synapticActivityCounter, 0)
@@ -155,7 +137,7 @@ function pruneSynapse!(wRec, synapticActivityCounter, synapseReconnectDelay)
# all weak synapse activity are reset # all weak synapse activity are reset
GeneralUtils.replaceElements!(mask_weak, 1, synapticActivityCounter, 0) GeneralUtils.replaceElements!(mask_weak, 1, synapticActivityCounter, 0)
# set pruned synapse to random wait time # set pruned synapse to random wait time
waittime = rand((1:1000), size(wRec)) .* mask_weak # synapse's random wait time to reconnect waittime = rand((1:synapseMaxWaittime), size(wRec)) .* mask_weak # synapse's random wait time to reconnect
# synapseReconnectDelay counting mode when value is negative hence .* -1 # synapseReconnectDelay counting mode when value is negative hence .* -1
synapseReconnectDelay .= (synapseReconnectDelay .* mask_notweak) .+ (waittime .* -1) synapseReconnectDelay .= (synapseReconnectDelay .* mask_notweak) .+ (waittime .* -1)
# error("DEBUG -> pruneSynapse!") # error("DEBUG -> pruneSynapse!")
@@ -164,10 +146,11 @@ end
function rewireSynapse!(wRec::AbstractArray, neuronInactivityCounter::AbstractArray, function rewireSynapse!(wRec::AbstractArray, neuronInactivityCounter::AbstractArray,
synapticActivityCounter::AbstractArray, synapticActivityCounter::AbstractArray,
synapseReconnectDelay::AbstractArray, synapseReconnectDelay::AbstractArray,
synapseConnectionNumber::Integer,
zitCumulative::AbstractArray) zitCumulative::AbstractArray)
_,_,i3,i4 = size(wRec) i1,i2,i3,i4 = size(wRec)
for n in 1:i3 # neuron-by-neuron for n in 1:i3 # neuron-by-neuron
if neuronInactivityCounter[1,1,n,i4][1] < -10000 # neuron die i.e. reset all weight if neuronInactivityCounter[1,1,n,i4][1] < -100000 # neuron die i.e. reset all weight
println("neuron $n die") println("neuron $n die")
neuronInactivityCounter[:,:,n,i4] .= 0 # reset neuronInactivityCounter[:,:,n,i4] .= 0 # reset
w = random_wRec(i1,i2,1,synapseConnectionNumber) w = random_wRec(i1,i2,1,synapseConnectionNumber)
@@ -183,23 +166,21 @@ function rewireSynapse!(wRec::AbstractArray, neuronInactivityCounter::AbstractAr
if timemark > 0 #TODO not fully tested. mark timeStep available if timemark > 0 #TODO not fully tested. mark timeStep available
timemark = Int(timemark) timemark = Int(timemark)
# get neuron pool at 10 timeStep earlier # get neuron pool within 100 timeStep earlier
earlier = size(zitCumulative, 3) - 10 > 0 ? size(zitCumulative, 3) - 10 : size(zitCumulative, 3) earlier = size(zitCumulative, 3) - 100 > 0 ? size(zitCumulative, 3) - 100 : size(zitCumulative, 3)
current = size(zitCumulative, 3) current = size(zitCumulative, 3)
pool = sum(zitCumulative[:,:,earlier:current], dims=3) pool = sum(zitCumulative[:,:,earlier:current], dims=3)
# earlier = timemark - 10 > 0 ? timemark - 10 : timemark
# timemark = timemark == 800 ? 799 : timemark
# pool = sum(zitCumulative[:,:,earlier:timemark], dims=3) #BUG BoundsError: attempt to access 10×25×801 Array{Float32, 3} at index [1:10, 1:25, 1340.0f0:1.0f0:1350.0f0]
if sum(pool) != 0 if sum(pool) != 0
indices = findall(x -> x != 0, pool) indices = findall(x -> x != 0, pool)
pick = rand(indices) # cartesian indice pick = rand(indices) # cartesian indice
wRec[pick] = rand(0.01:0.01:0.05) wRec[pick] = rand(0.001:0.001:0.02)
synapticActivityCounter[pick] = 0 synapticActivityCounter[pick] = 0
synapseReconnectDelay[pick] = -0.1 synapseReconnectDelay[pick] = -0.1
# error("DEBUG -> rewireSynapse!") # error("DEBUG -> rewireSynapse!")
else # if neurons not firing at all, try again next time else # if neurons not firing at all, try again next time
synapticActivityCounter[:,:,n,i4][ind] = 0 synapticActivityCounter[:,:,n,i4][ind] = 0
synapseReconnectDelay[:,:,n,i4][ind] = rand(1:1000) * -1 synapseReconnectDelay[:,:,n,i4][ind] = rand(1:synapseMaxWaittime) * -1 # wait time
# error("DEBUG -> rewireSynapse!") # error("DEBUG -> rewireSynapse!")
end end
end end
@@ -209,10 +190,11 @@ function rewireSynapse!(wRec::AbstractArray, neuronInactivityCounter::AbstractAr
end end
""" Rank input matrix elements value from high to low (not including 0 in ranking)
and return 2 resulting bitmatrix. 1st matrix contain high rank, 2nd
matrix contain low rank. high and low rank are devided by percent threshold
function rankMatrix(X, percent) """
function rankMatrix(X, percent::Float64)
"""prompt """prompt
write a function in julia that satisfy the following requirements. write a function in julia that satisfy the following requirements.
1. the function operate on column-major 3D matrix 1. the function operate on column-major 3D matrix

View File

@@ -7,7 +7,7 @@ export
# function # function
random_wRec random_wRec
using Random, GeneralUtils using Random, GeneralUtils, LinearAlgebra
#------------------------------------------------------------------------------------------------100 #------------------------------------------------------------------------------------------------100
rng = MersenneTwister(1234) rng = MersenneTwister(1234)
@@ -216,8 +216,8 @@ function kfn_1(params::Dict; device=cpu)
kfn.lif_refractoryCounter = (similar(kfn.lif_wRec) .= 0) kfn.lif_refractoryCounter = (similar(kfn.lif_wRec) .= 0)
kfn.lif_refractoryDuration = (similar(kfn.lif_wRec) .= 3) kfn.lif_refractoryDuration = (similar(kfn.lif_wRec) .= 3)
kfn.lif_delta = 1.0 kfn.lif_delta = 1.0
kfn.lif_tau_m = 20.0 kfn.lif_tau_m = 100.0
kfn.lif_alpha = (similar(kfn.lif_wRec) .= (exp(-kfn.lif_delta / kfn.lif_tau_m))) kfn.lif_alpha = (similar(kfn.lif_wRec) .= (exp(-kfn.lif_delta / kfn.lif_tau_m)))
kfn.lif_phi = (similar(kfn.lif_wRec) .= 0) kfn.lif_phi = (similar(kfn.lif_wRec) .= 0)
kfn.lif_epsilonRec = (similar(kfn.lif_wRec) .= 0) kfn.lif_epsilonRec = (similar(kfn.lif_wRec) .= 0)
kfn.lif_eRec = (similar(kfn.lif_wRec) .= 0) kfn.lif_eRec = (similar(kfn.lif_wRec) .= 0)
@@ -227,7 +227,7 @@ function kfn_1(params::Dict; device=cpu)
kfn.lif_error = (similar(kfn.lif_wRec) .= 0) kfn.lif_error = (similar(kfn.lif_wRec) .= 0)
kfn.lif_firingCounter = (similar(kfn.lif_wRec) .= 0) kfn.lif_firingCounter = (similar(kfn.lif_wRec) .= 0)
kfn.lif_firingTargetFrequency = (similar(kfn.lif_wRec) .= 10) kfn.lif_firingTargetFrequency = (similar(kfn.lif_wRec) .= 0.1)
kfn.lif_neuronInactivityCounter = (similar(kfn.lif_wRec) .= 0) kfn.lif_neuronInactivityCounter = (similar(kfn.lif_wRec) .= 0)
# count subscribed synapse activity, just like epsilonRec but without decay. # count subscribed synapse activity, just like epsilonRec but without decay.
@@ -265,7 +265,7 @@ function kfn_1(params::Dict; device=cpu)
kfn.alif_refractoryCounter = (similar(kfn.alif_wRec) .= 0) kfn.alif_refractoryCounter = (similar(kfn.alif_wRec) .= 0)
kfn.alif_refractoryDuration = (similar(kfn.alif_wRec) .= 3) kfn.alif_refractoryDuration = (similar(kfn.alif_wRec) .= 3)
kfn.alif_delta = 1.0 kfn.alif_delta = 1.0
kfn.alif_tau_m = 20.0 kfn.alif_tau_m = 100.0
kfn.alif_alpha = (similar(kfn.alif_wRec) .= (exp(-kfn.alif_delta / kfn.alif_tau_m))) kfn.alif_alpha = (similar(kfn.alif_wRec) .= (exp(-kfn.alif_delta / kfn.alif_tau_m)))
kfn.alif_phi = (similar(kfn.alif_wRec) .= 0) kfn.alif_phi = (similar(kfn.alif_wRec) .= 0)
kfn.alif_epsilonRec = (similar(kfn.alif_wRec) .= 0) kfn.alif_epsilonRec = (similar(kfn.alif_wRec) .= 0)
@@ -276,7 +276,7 @@ function kfn_1(params::Dict; device=cpu)
kfn.alif_error = (similar(kfn.alif_wRec) .= 0) kfn.alif_error = (similar(kfn.alif_wRec) .= 0)
kfn.alif_firingCounter = (similar(kfn.alif_wRec) .= 0) kfn.alif_firingCounter = (similar(kfn.alif_wRec) .= 0)
kfn.alif_firingTargetFrequency = (similar(kfn.alif_wRec) .= 10) kfn.alif_firingTargetFrequency = (similar(kfn.alif_wRec) .= 0.1)
kfn.alif_neuronInactivityCounter = (similar(kfn.alif_wRec) .= 0) kfn.alif_neuronInactivityCounter = (similar(kfn.alif_wRec) .= 0)
kfn.alif_synapseReconnectDelay = (similar(kfn.alif_wRec) .= -0.1) # -0.1 for non-sub conn kfn.alif_synapseReconnectDelay = (similar(kfn.alif_wRec) .= -0.1) # -0.1 for non-sub conn
kfn.alif_synapticActivityCounter = (similar(kfn.alif_wRec) .= 0) kfn.alif_synapticActivityCounter = (similar(kfn.alif_wRec) .= 0)
@@ -336,9 +336,9 @@ function kfn_1(params::Dict; device=cpu)
kfn.on_zt = zeros(1, 1, n, batch) |> device kfn.on_zt = zeros(1, 1, n, batch) |> device
kfn.on_zt4d = (similar(kfn.on_wOut) .= 0) kfn.on_zt4d = (similar(kfn.on_wOut) .= 0)
kfn.on_refractoryCounter = (similar(kfn.on_wOut) .= 0) kfn.on_refractoryCounter = (similar(kfn.on_wOut) .= 0)
kfn.on_refractoryDuration = (similar(kfn.on_wOut) .= 0) kfn.on_refractoryDuration = (similar(kfn.on_wOut) .= 1)
kfn.on_delta = 1.0 kfn.on_delta = 1.0
kfn.on_tau_m = 20.0 kfn.on_tau_m = 100.0
kfn.on_alpha = (similar(kfn.on_wOut) .= (exp(-kfn.on_delta / kfn.on_tau_m))) kfn.on_alpha = (similar(kfn.on_wOut) .= (exp(-kfn.on_delta / kfn.on_tau_m)))
kfn.on_phi = (similar(kfn.on_wOut) .= 0) kfn.on_phi = (similar(kfn.on_wOut) .= 0)
kfn.on_epsilonRec = (similar(kfn.on_wOut) .= 0) kfn.on_epsilonRec = (similar(kfn.on_wOut) .= 0)
@@ -372,7 +372,7 @@ function random_wRec(row, col, n, synapseConnectionNumber)
for slice in eachslice(w, dims=3) for slice in eachslice(w, dims=3)
pool = shuffle!([1:row*col...])[1:synapseConnectionNumber] pool = shuffle!([1:row*col...])[1:synapseConnectionNumber]
for i in pool for i in pool
slice[i] = rand(0.01:0.01:0.05) # assign weight to synaptic connection. /10 to start small, slice[i] = rand() # assign weight to synaptic connection. /10 to start small,
# otherwise RSNN's vt Usually stay negative (-) # otherwise RSNN's vt Usually stay negative (-)
end end
end end
@@ -382,7 +382,7 @@ function random_wRec(row, col, n, synapseConnectionNumber)
# avgWeight = sum(w)/length(w) # avgWeight = sum(w)/length(w)
# w = w .* (0.01 / avgWeight) # adjust overall weight # w = w .* (0.01 / avgWeight) # adjust overall weight
return w #(row, col, n) return normalize!(w) #(row, col, n)
end end