From 45bd332b6f574fbf83e9c560a8dc9f93a4291bed Mon Sep 17 00:00:00 2001 From: tonaerospace Date: Mon, 4 Dec 2023 04:57:08 +0000 Subject: [PATCH] update --- previousVersion/0.0.4/CondaPkg.toml | 7 + previousVersion/0.0.4/Manifest.toml | 1157 ++++++++++++++++++++ previousVersion/0.0.4/Project.toml | 15 + previousVersion/0.0.4/src/ChatAgent.jl | 101 ++ previousVersion/0.0.4/src/interface.jl | 1229 ++++++++++++++++++++++ previousVersion/0.0.4/src/llmfunction.jl | 146 +++ previousVersion/0.0.4/src/type.jl | 376 +++++++ previousVersion/0.0.4/src/utils.jl | 666 ++++++++++++ 8 files changed, 3697 insertions(+) create mode 100755 previousVersion/0.0.4/CondaPkg.toml create mode 100755 previousVersion/0.0.4/Manifest.toml create mode 100755 previousVersion/0.0.4/Project.toml create mode 100755 previousVersion/0.0.4/src/ChatAgent.jl create mode 100755 previousVersion/0.0.4/src/interface.jl create mode 100644 previousVersion/0.0.4/src/llmfunction.jl create mode 100644 previousVersion/0.0.4/src/type.jl create mode 100644 previousVersion/0.0.4/src/utils.jl diff --git a/previousVersion/0.0.4/CondaPkg.toml b/previousVersion/0.0.4/CondaPkg.toml new file mode 100755 index 0000000..f61a833 --- /dev/null +++ b/previousVersion/0.0.4/CondaPkg.toml @@ -0,0 +1,7 @@ +channels = ["anaconda", "conda-forge", "pytorch"] + +[deps] +python = ">=3.8,<3.11" + +[pip.deps] +langchain = "" diff --git a/previousVersion/0.0.4/Manifest.toml b/previousVersion/0.0.4/Manifest.toml new file mode 100755 index 0000000..95a427f --- /dev/null +++ b/previousVersion/0.0.4/Manifest.toml @@ -0,0 +1,1157 @@ +# This file is machine-generated - editing it directly is not advised + +julia_version = "1.9.4" +manifest_format = "2.0" +project_hash = "b23a59551d194bc84b0faf94c230717f9d7b23c8" + +[[deps.AbstractFFTs]] +deps = ["LinearAlgebra"] +git-tree-sha1 = "d92ad398961a3ed262d8bf04a1a2b8340f915fef" +uuid = "621f4979-c628-5d54-868e-fcf4e3e8185c" +version = "1.5.0" +weakdeps = ["ChainRulesCore", "Test"] + + [deps.AbstractFFTs.extensions] + AbstractFFTsChainRulesCoreExt = "ChainRulesCore" + AbstractFFTsTestExt = "Test" + +[[deps.Adapt]] +deps = ["LinearAlgebra", "Requires"] +git-tree-sha1 = "02f731463748db57cc2ebfbd9fbc9ce8280d3433" +uuid = "79e6a3ab-5dfb-504d-930d-738a2a938a0e" +version = "3.7.1" +weakdeps = ["StaticArrays"] + + [deps.Adapt.extensions] + AdaptStaticArraysExt = "StaticArrays" + +[[deps.ArgCheck]] +git-tree-sha1 = "a3a402a35a2f7e0b87828ccabbd5ebfbebe356b4" +uuid = "dce04be8-c92d-5529-be00-80e4d2c0e197" +version = "2.3.0" + +[[deps.ArgTools]] +uuid = "0dad84c5-d112-42e6-8d28-ef12dabb789f" +version = "1.1.1" + +[[deps.Artifacts]] +uuid = "56f22d72-fd6d-98f1-02f0-08ddc0907c33" + +[[deps.Atomix]] +deps = ["UnsafeAtomics"] +git-tree-sha1 = "c06a868224ecba914baa6942988e2f2aade419be" +uuid = "a9b6321e-bd34-4604-b9c9-b65b8de01458" +version = "0.1.0" + +[[deps.BFloat16s]] +deps = ["LinearAlgebra", "Printf", "Random", "Test"] +git-tree-sha1 = "dbf84058d0a8cbbadee18d25cf606934b22d7c66" +uuid = "ab4f0b2a-ad5b-11e8-123f-65d77653426b" +version = "0.4.2" + +[[deps.BangBang]] +deps = ["Compat", "ConstructionBase", "InitialValues", "LinearAlgebra", "Requires", "Setfield", "Tables"] +git-tree-sha1 = "e28912ce94077686443433c2800104b061a827ed" +uuid = "198e06fe-97b7-11e9-32a5-e1d131e6ad66" +version = "0.3.39" + + [deps.BangBang.extensions] + BangBangChainRulesCoreExt = "ChainRulesCore" + BangBangDataFramesExt = "DataFrames" + BangBangStaticArraysExt = "StaticArrays" + BangBangStructArraysExt = "StructArrays" + BangBangTypedTablesExt = "TypedTables" + + [deps.BangBang.weakdeps] + ChainRulesCore = "d360d2e6-b24c-11e9-a2a3-2a2ae2dbcce4" + DataFrames = "a93c6f00-e57d-5684-b7b6-d8193f3e46c0" + StaticArrays = "90137ffa-7385-5640-81b9-e52037218182" + StructArrays = "09ab397b-f2b6-538f-b94a-2f83cf4a842a" + TypedTables = "9d95f2ec-7b3d-5a63-8d20-e2491e220bb9" + +[[deps.Base64]] +uuid = "2a0f44e3-6c83-55bd-87e4-b1978d98bd5f" + +[[deps.Baselet]] +git-tree-sha1 = "aebf55e6d7795e02ca500a689d326ac979aaf89e" +uuid = "9718e550-a3fa-408a-8086-8db961cd8217" +version = "0.1.1" + +[[deps.BitFlags]] +git-tree-sha1 = "2dc09997850d68179b69dafb58ae806167a32b1b" +uuid = "d1d4a3ce-64b1-5f1a-9ba4-7e7e69966f35" +version = "0.1.8" + +[[deps.CEnum]] +git-tree-sha1 = "eb4cb44a499229b3b8426dcfb5dd85333951ff90" +uuid = "fa961155-64e5-5f13-b03f-caf6b980ea82" +version = "0.4.2" + +[[deps.CUDA]] +deps = ["AbstractFFTs", "Adapt", "BFloat16s", "CEnum", "CUDA_Driver_jll", "CUDA_Runtime_Discovery", "CUDA_Runtime_jll", "Crayons", "DataFrames", "ExprTools", "GPUArrays", "GPUCompiler", "KernelAbstractions", "LLVM", "LLVMLoopInfo", "LazyArtifacts", "Libdl", "LinearAlgebra", "Logging", "NVTX", "Preferences", "PrettyTables", "Printf", "Random", "Random123", "RandomNumbers", "Reexport", "Requires", "SparseArrays", "Statistics", "UnsafeAtomicsLLVM"] +git-tree-sha1 = "64461b0e9df3069248979113ce8ab6d11bd371cf" +uuid = "052768ef-5323-5732-b1bb-66c8b64840ba" +version = "5.1.0" +weakdeps = ["ChainRulesCore", "SpecialFunctions"] + + [deps.CUDA.extensions] + ChainRulesCoreExt = "ChainRulesCore" + SpecialFunctionsExt = "SpecialFunctions" + +[[deps.CUDA_Driver_jll]] +deps = ["Artifacts", "JLLWrappers", "LazyArtifacts", "Libdl", "Pkg"] +git-tree-sha1 = "1e42ef1bdb45487ff28de16182c0df4920181dc3" +uuid = "4ee394cb-3365-5eb0-8335-949819d2adfc" +version = "0.7.0+0" + +[[deps.CUDA_Runtime_Discovery]] +deps = ["Libdl"] +git-tree-sha1 = "bcc4a23cbbd99c8535a5318455dcf0f2546ec536" +uuid = "1af6417a-86b4-443c-805f-a4643ffb695f" +version = "0.2.2" + +[[deps.CUDA_Runtime_jll]] +deps = ["Artifacts", "CUDA_Driver_jll", "JLLWrappers", "LazyArtifacts", "Libdl", "TOML"] +git-tree-sha1 = "92394521ec4582c11d089a3b15b76ef2cb850994" +uuid = "76a88914-d11a-5bdc-97e0-2f5a05c973a2" +version = "0.10.0+1" + +[[deps.Calculus]] +deps = ["LinearAlgebra"] +git-tree-sha1 = "f641eb0a4f00c343bbc32346e1217b86f3ce9dad" +uuid = "49dc2e85-a5d0-5ad3-a950-438e2897f1b9" +version = "0.5.1" + +[[deps.ChainRules]] +deps = ["Adapt", "ChainRulesCore", "Compat", "Distributed", "GPUArraysCore", "IrrationalConstants", "LinearAlgebra", "Random", "RealDot", "SparseArrays", "SparseInverseSubset", "Statistics", "StructArrays", "SuiteSparse"] +git-tree-sha1 = "006cc7170be3e0fa02ccac6d4164a1eee1fc8c27" +uuid = "082447d4-558c-5d27-93f4-14fc19e9eca2" +version = "1.58.0" + +[[deps.ChainRulesCore]] +deps = ["Compat", "LinearAlgebra"] +git-tree-sha1 = "e0af648f0692ec1691b5d094b8724ba1346281cf" +uuid = "d360d2e6-b24c-11e9-a2a3-2a2ae2dbcce4" +version = "1.18.0" +weakdeps = ["SparseArrays"] + + [deps.ChainRulesCore.extensions] + ChainRulesCoreSparseArraysExt = "SparseArrays" + +[[deps.CodecZlib]] +deps = ["TranscodingStreams", "Zlib_jll"] +git-tree-sha1 = "cd67fc487743b2f0fd4380d4cbd3a24660d0eec8" +uuid = "944b1d66-785c-5afd-91f1-9de20f533193" +version = "0.7.3" + +[[deps.ColorTypes]] +deps = ["FixedPointNumbers", "Random"] +git-tree-sha1 = "eb7f0f8307f71fac7c606984ea5fb2817275d6e4" +uuid = "3da002f7-5984-5a60-b8a6-cbb66c0b333f" +version = "0.11.4" + +[[deps.Colors]] +deps = ["ColorTypes", "FixedPointNumbers", "Reexport"] +git-tree-sha1 = "fc08e5930ee9a4e03f84bfb5211cb54e7769758a" +uuid = "5ae59095-9a9b-59fe-a467-6f913c188581" +version = "0.12.10" + +[[deps.CommUtils]] +deps = ["Dates", "JSON3", "Mosquitto", "Redis", "UUIDs"] +path = "/privatejuliapkg/CommUtils" +uuid = "646cbe82-3d4a-47b2-9440-2e80a472ca20" +version = "0.1.0" + +[[deps.CommonSubexpressions]] +deps = ["MacroTools", "Test"] +git-tree-sha1 = "7b8a93dba8af7e3b42fecabf646260105ac373f7" +uuid = "bbf7d656-a473-5ed7-a52c-81e309532950" +version = "0.3.0" + +[[deps.Compat]] +deps = ["UUIDs"] +git-tree-sha1 = "8a62af3e248a8c4bad6b32cbbe663ae02275e32c" +uuid = "34da2185-b29b-5c13-b0c7-acf172513d20" +version = "4.10.0" +weakdeps = ["Dates", "LinearAlgebra"] + + [deps.Compat.extensions] + CompatLinearAlgebraExt = "LinearAlgebra" + +[[deps.CompilerSupportLibraries_jll]] +deps = ["Artifacts", "Libdl"] +uuid = "e66e0078-7015-5450-92f7-15fbd957f2ae" +version = "1.0.5+0" + +[[deps.CompositionsBase]] +git-tree-sha1 = "802bb88cd69dfd1509f6670416bd4434015693ad" +uuid = "a33af91c-f02d-484b-be07-31d278c5ca2b" +version = "0.1.2" + + [deps.CompositionsBase.extensions] + CompositionsBaseInverseFunctionsExt = "InverseFunctions" + + [deps.CompositionsBase.weakdeps] + InverseFunctions = "3587e190-3f89-42d0-90ee-14403ec27112" + +[[deps.ConcurrentUtilities]] +deps = ["Serialization", "Sockets"] +git-tree-sha1 = "8cfa272e8bdedfa88b6aefbbca7c19f1befac519" +uuid = "f0e56b4a-5159-44fe-b623-3e5288b988bb" +version = "2.3.0" + +[[deps.CondaPkg]] +deps = ["JSON3", "Markdown", "MicroMamba", "Pidfile", "Pkg", "Preferences", "TOML"] +git-tree-sha1 = "bbd0c518cb11acc6707190199025dbc34b6c7ca7" +uuid = "992eb4ea-22a4-4c89-a5bb-47a3300528ab" +version = "0.2.21" + +[[deps.ConstructionBase]] +deps = ["LinearAlgebra"] +git-tree-sha1 = "c53fc348ca4d40d7b371e71fd52251839080cbc9" +uuid = "187b0558-2788-49d3-abe0-74a17ed4e7c9" +version = "1.5.4" + + [deps.ConstructionBase.extensions] + ConstructionBaseIntervalSetsExt = "IntervalSets" + ConstructionBaseStaticArraysExt = "StaticArrays" + + [deps.ConstructionBase.weakdeps] + IntervalSets = "8197267c-284f-5f27-9208-e0e47529a953" + StaticArrays = "90137ffa-7385-5640-81b9-e52037218182" + +[[deps.ContextVariablesX]] +deps = ["Compat", "Logging", "UUIDs"] +git-tree-sha1 = "25cc3803f1030ab855e383129dcd3dc294e322cc" +uuid = "6add18c4-b38d-439d-96f6-d6bc489c04c5" +version = "0.1.3" + +[[deps.Crayons]] +git-tree-sha1 = "249fe38abf76d48563e2f4556bebd215aa317e15" +uuid = "a8cc5b0e-0ffa-5ad4-8c14-923d3ee1735f" +version = "4.1.1" + +[[deps.DataAPI]] +git-tree-sha1 = "8da84edb865b0b5b0100c0666a9bc9a0b71c553c" +uuid = "9a962f9c-6df0-11e9-0e5d-c546b8b5ee8a" +version = "1.15.0" + +[[deps.DataFrames]] +deps = ["Compat", "DataAPI", "DataStructures", "Future", "InlineStrings", "InvertedIndices", "IteratorInterfaceExtensions", "LinearAlgebra", "Markdown", "Missings", "PooledArrays", "PrecompileTools", "PrettyTables", "Printf", "REPL", "Random", "Reexport", "SentinelArrays", "SortingAlgorithms", "Statistics", "TableTraits", "Tables", "Unicode"] +git-tree-sha1 = "04c738083f29f86e62c8afc341f0967d8717bdb8" +uuid = "a93c6f00-e57d-5684-b7b6-d8193f3e46c0" +version = "1.6.1" + +[[deps.DataStructures]] +deps = ["Compat", "InteractiveUtils", "OrderedCollections"] +git-tree-sha1 = "3dbd312d370723b6bb43ba9d02fc36abade4518d" +uuid = "864edb3b-99cc-5e75-8d2d-829cb0a9cfe8" +version = "0.18.15" + +[[deps.DataValueInterfaces]] +git-tree-sha1 = "bfc1187b79289637fa0ef6d4436ebdfe6905cbd6" +uuid = "e2d170a0-9d28-54be-80f0-106bbe20a464" +version = "1.0.0" + +[[deps.Dates]] +deps = ["Printf"] +uuid = "ade2ca70-3891-5945-98fb-dc099432e06a" + +[[deps.DefineSingletons]] +git-tree-sha1 = "0fba8b706d0178b4dc7fd44a96a92382c9065c2c" +uuid = "244e2a9f-e319-4986-a169-4d1fe445cd52" +version = "0.1.2" + +[[deps.DelimitedFiles]] +deps = ["Mmap"] +git-tree-sha1 = "9e2f36d3c96a820c678f2f1f1782582fcf685bae" +uuid = "8bb1440f-4735-579b-a4ab-409b98df4dab" +version = "1.9.1" + +[[deps.DiffResults]] +deps = ["StaticArraysCore"] +git-tree-sha1 = "782dd5f4561f5d267313f23853baaaa4c52ea621" +uuid = "163ba53b-c6d8-5494-b064-1a9d43ac40c5" +version = "1.1.0" + +[[deps.DiffRules]] +deps = ["IrrationalConstants", "LogExpFunctions", "NaNMath", "Random", "SpecialFunctions"] +git-tree-sha1 = "23163d55f885173722d1e4cf0f6110cdbaf7e272" +uuid = "b552c78f-8df3-52c6-915a-8e097449b14b" +version = "1.15.1" + +[[deps.Distributed]] +deps = ["Random", "Serialization", "Sockets"] +uuid = "8ba89e20-285c-5b6f-9357-94700520ee1b" + +[[deps.Distributions]] +deps = ["FillArrays", "LinearAlgebra", "PDMats", "Printf", "QuadGK", "Random", "SpecialFunctions", "Statistics", "StatsAPI", "StatsBase", "StatsFuns"] +git-tree-sha1 = "a6c00f894f24460379cb7136633cef54ac9f6f4a" +uuid = "31c24e10-a181-5473-b8eb-7969acd0382f" +version = "0.25.103" + + [deps.Distributions.extensions] + DistributionsChainRulesCoreExt = "ChainRulesCore" + DistributionsDensityInterfaceExt = "DensityInterface" + DistributionsTestExt = "Test" + + [deps.Distributions.weakdeps] + ChainRulesCore = "d360d2e6-b24c-11e9-a2a3-2a2ae2dbcce4" + DensityInterface = "b429d917-457f-4dbc-8f4c-0cc954292b1d" + Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" + +[[deps.DocStringExtensions]] +deps = ["LibGit2"] +git-tree-sha1 = "2fb1e02f2b635d0845df5d7c167fec4dd739b00d" +uuid = "ffbed154-4ef7-542d-bbb7-c09d3a79fcae" +version = "0.9.3" + +[[deps.Downloads]] +deps = ["ArgTools", "FileWatching", "LibCURL", "NetworkOptions"] +uuid = "f43a241f-c20a-4ad4-852c-f6b1247861c6" +version = "1.6.0" + +[[deps.DualNumbers]] +deps = ["Calculus", "NaNMath", "SpecialFunctions"] +git-tree-sha1 = "5837a837389fccf076445fce071c8ddaea35a566" +uuid = "fa6b7ba4-c1ee-5f82-b5fc-ecf0adba8f74" +version = "0.6.8" + +[[deps.ExceptionUnwrapping]] +deps = ["Test"] +git-tree-sha1 = "e90caa41f5a86296e014e148ee061bd6c3edec96" +uuid = "460bff9d-24e4-43bc-9d9f-a8973cb893f4" +version = "0.1.9" + +[[deps.ExprTools]] +git-tree-sha1 = "27415f162e6028e81c72b82ef756bf321213b6ec" +uuid = "e2ba6199-217a-4e67-a87a-7c52f15ade04" +version = "0.1.10" + +[[deps.FLoops]] +deps = ["BangBang", "Compat", "FLoopsBase", "InitialValues", "JuliaVariables", "MLStyle", "Serialization", "Setfield", "Transducers"] +git-tree-sha1 = "ffb97765602e3cbe59a0589d237bf07f245a8576" +uuid = "cc61a311-1640-44b5-9fba-1b764f453329" +version = "0.2.1" + +[[deps.FLoopsBase]] +deps = ["ContextVariablesX"] +git-tree-sha1 = "656f7a6859be8673bf1f35da5670246b923964f7" +uuid = "b9860ae5-e623-471e-878b-f6a53c775ea6" +version = "0.1.1" + +[[deps.FileWatching]] +uuid = "7b1f6079-737a-58dc-b8bc-7a2ca5c1b5ee" + +[[deps.FillArrays]] +deps = ["LinearAlgebra", "Random"] +git-tree-sha1 = "35f0c0f345bff2c6d636f95fdb136323b5a796ef" +uuid = "1a297f60-69ca-5386-bcde-b61e274b549b" +version = "1.7.0" +weakdeps = ["SparseArrays", "Statistics"] + + [deps.FillArrays.extensions] + FillArraysSparseArraysExt = "SparseArrays" + FillArraysStatisticsExt = "Statistics" + +[[deps.FixedPointNumbers]] +deps = ["Statistics"] +git-tree-sha1 = "335bfdceacc84c5cdf16aadc768aa5ddfc5383cc" +uuid = "53c48c17-4a7d-5ca2-90c5-79b7896eea93" +version = "0.8.4" + +[[deps.Flux]] +deps = ["Adapt", "ChainRulesCore", "Functors", "LinearAlgebra", "MLUtils", "MacroTools", "NNlib", "OneHotArrays", "Optimisers", "Preferences", "ProgressLogging", "Random", "Reexport", "SparseArrays", "SpecialFunctions", "Statistics", "Zygote"] +git-tree-sha1 = "b97c3fc4f3628b8835d83789b09382961a254da4" +uuid = "587475ba-b771-5e3f-ad9e-33799f191a9c" +version = "0.14.6" + + [deps.Flux.extensions] + FluxAMDGPUExt = "AMDGPU" + FluxCUDAExt = "CUDA" + FluxCUDAcuDNNExt = ["CUDA", "cuDNN"] + FluxMetalExt = "Metal" + + [deps.Flux.weakdeps] + AMDGPU = "21141c5a-9bdb-4563-92ae-f87d6854732e" + CUDA = "052768ef-5323-5732-b1bb-66c8b64840ba" + Metal = "dde4c033-4e86-420c-a63e-0dd931031962" + cuDNN = "02a925ec-e4fe-4b08-9a7e-0d78e3d38ccd" + +[[deps.ForwardDiff]] +deps = ["CommonSubexpressions", "DiffResults", "DiffRules", "LinearAlgebra", "LogExpFunctions", "NaNMath", "Preferences", "Printf", "Random", "SpecialFunctions"] +git-tree-sha1 = "cf0fe81336da9fb90944683b8c41984b08793dad" +uuid = "f6369f11-7733-5829-9624-2563aa707210" +version = "0.10.36" +weakdeps = ["StaticArrays"] + + [deps.ForwardDiff.extensions] + ForwardDiffStaticArraysExt = "StaticArrays" + +[[deps.Functors]] +deps = ["LinearAlgebra"] +git-tree-sha1 = "9a68d75d466ccc1218d0552a8e1631151c569545" +uuid = "d9f16b24-f501-4c13-a1f2-28368ffc5196" +version = "0.4.5" + +[[deps.Future]] +deps = ["Random"] +uuid = "9fa8497b-333b-5362-9e8d-4d0656e87820" + +[[deps.GPUArrays]] +deps = ["Adapt", "GPUArraysCore", "LLVM", "LinearAlgebra", "Printf", "Random", "Reexport", "Serialization", "Statistics"] +git-tree-sha1 = "85d7fb51afb3def5dcb85ad31c3707795c8bccc1" +uuid = "0c68f7d7-f131-5f86-a1c3-88cf8149b2d7" +version = "9.1.0" + +[[deps.GPUArraysCore]] +deps = ["Adapt"] +git-tree-sha1 = "2d6ca471a6c7b536127afccfa7564b5b39227fe0" +uuid = "46192b85-c4d5-4398-a991-12ede77f4527" +version = "0.1.5" + +[[deps.GPUCompiler]] +deps = ["ExprTools", "InteractiveUtils", "LLVM", "Libdl", "Logging", "Scratch", "TimerOutputs", "UUIDs"] +git-tree-sha1 = "a846f297ce9d09ccba02ead0cae70690e072a119" +uuid = "61eb1bfa-7361-4325-ad38-22787b887f55" +version = "0.25.0" + +[[deps.GeneralUtils]] +deps = ["CUDA", "DataStructures", "Distributions", "Flux", "JSON3", "Random"] +path = "/privatejuliapkg/GeneralUtils/" +uuid = "c6c72f09-b708-4ac8-ac7c-2084d70108fe" +version = "0.1.0" + +[[deps.HTTP]] +deps = ["Base64", "CodecZlib", "ConcurrentUtilities", "Dates", "ExceptionUnwrapping", "Logging", "LoggingExtras", "MbedTLS", "NetworkOptions", "OpenSSL", "Random", "SimpleBufferStream", "Sockets", "URIs", "UUIDs"] +git-tree-sha1 = "5eab648309e2e060198b45820af1a37182de3cce" +uuid = "cd3eb016-35fb-5094-929b-558a96fad6f3" +version = "1.10.0" + +[[deps.HypergeometricFunctions]] +deps = ["DualNumbers", "LinearAlgebra", "OpenLibm_jll", "SpecialFunctions"] +git-tree-sha1 = "f218fe3736ddf977e0e772bc9a586b2383da2685" +uuid = "34004b35-14d8-5ef3-9330-4cdb6864b03a" +version = "0.3.23" + +[[deps.IRTools]] +deps = ["InteractiveUtils", "MacroTools", "Test"] +git-tree-sha1 = "8aa91235360659ca7560db43a7d57541120aa31d" +uuid = "7869d1d1-7146-5819-86e3-90919afe41df" +version = "0.4.11" + +[[deps.InitialValues]] +git-tree-sha1 = "4da0f88e9a39111c2fa3add390ab15f3a44f3ca3" +uuid = "22cec73e-a1b8-11e9-2c92-598750a2cf9c" +version = "0.3.1" + +[[deps.InlineStrings]] +deps = ["Parsers"] +git-tree-sha1 = "9cc2baf75c6d09f9da536ddf58eb2f29dedaf461" +uuid = "842dd82b-1e85-43dc-bf29-5d0ee9dffc48" +version = "1.4.0" + +[[deps.InteractiveUtils]] +deps = ["Markdown"] +uuid = "b77e0a4c-d291-57a0-90e8-8db25a27a240" + +[[deps.InvertedIndices]] +git-tree-sha1 = "0dc7b50b8d436461be01300fd8cd45aa0274b038" +uuid = "41ab1584-1d38-5bbf-9106-f11c6c58b48f" +version = "1.3.0" + +[[deps.IrrationalConstants]] +git-tree-sha1 = "630b497eafcc20001bba38a4651b327dcfc491d2" +uuid = "92d709cd-6900-40b7-9082-c6be49f344b6" +version = "0.2.2" + +[[deps.IteratorInterfaceExtensions]] +git-tree-sha1 = "a3f24677c21f5bbe9d2a714f95dcd58337fb2856" +uuid = "82899510-4779-5014-852e-03e436cf321d" +version = "1.0.0" + +[[deps.JLLWrappers]] +deps = ["Artifacts", "Preferences"] +git-tree-sha1 = "7e5d6779a1e09a36db2a7b6cff50942a0a7d0fca" +uuid = "692b3bcd-3c85-4b1f-b108-f13ce0eb3210" +version = "1.5.0" + +[[deps.JSON3]] +deps = ["Dates", "Mmap", "Parsers", "PrecompileTools", "StructTypes", "UUIDs"] +git-tree-sha1 = "95220473901735a0f4df9d1ca5b171b568b2daa3" +uuid = "0f8b85d8-7281-11e9-16c2-39a750bddbf1" +version = "1.13.2" + +[[deps.JuliaNVTXCallbacks_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] +git-tree-sha1 = "af433a10f3942e882d3c671aacb203e006a5808f" +uuid = "9c1d0b0a-7046-5b2e-a33f-ea22f176ac7e" +version = "0.2.1+0" + +[[deps.JuliaVariables]] +deps = ["MLStyle", "NameResolution"] +git-tree-sha1 = "49fb3cb53362ddadb4415e9b73926d6b40709e70" +uuid = "b14d175d-62b4-44ba-8fb7-3064adc8c3ec" +version = "0.2.4" + +[[deps.KernelAbstractions]] +deps = ["Adapt", "Atomix", "InteractiveUtils", "LinearAlgebra", "MacroTools", "PrecompileTools", "Requires", "SparseArrays", "StaticArrays", "UUIDs", "UnsafeAtomics", "UnsafeAtomicsLLVM"] +git-tree-sha1 = "b0737cbbe1c8da6f1139d1c23e35e7cea129c0af" +uuid = "63c18a36-062a-441e-b654-da1e3ab1ce7c" +version = "0.9.13" + + [deps.KernelAbstractions.extensions] + EnzymeExt = "EnzymeCore" + + [deps.KernelAbstractions.weakdeps] + EnzymeCore = "f151be2c-9106-41f4-ab19-57ee4f262869" + +[[deps.LLVM]] +deps = ["CEnum", "LLVMExtra_jll", "Libdl", "Preferences", "Printf", "Requires", "Unicode"] +git-tree-sha1 = "c879e47398a7ab671c782e02b51a4456794a7fa3" +uuid = "929cbde3-209d-540e-8aea-75f648917ca0" +version = "6.4.0" +weakdeps = ["BFloat16s"] + + [deps.LLVM.extensions] + BFloat16sExt = "BFloat16s" + +[[deps.LLVMExtra_jll]] +deps = ["Artifacts", "JLLWrappers", "LazyArtifacts", "Libdl", "TOML"] +git-tree-sha1 = "98eaee04d96d973e79c25d49167668c5c8fb50e2" +uuid = "dad2f222-ce93-54a1-a47d-0025e8a3acab" +version = "0.0.27+1" + +[[deps.LLVMLoopInfo]] +git-tree-sha1 = "2e5c102cfc41f48ae4740c7eca7743cc7e7b75ea" +uuid = "8b046642-f1f6-4319-8d3c-209ddc03c586" +version = "1.0.0" + +[[deps.LaTeXStrings]] +git-tree-sha1 = "50901ebc375ed41dbf8058da26f9de442febbbec" +uuid = "b964fa9f-0449-5b57-a5c2-d3ea65f4040f" +version = "1.3.1" + +[[deps.LazyArtifacts]] +deps = ["Artifacts", "Pkg"] +uuid = "4af54fe1-eca0-43a8-85a7-787d91b784e3" + +[[deps.LibCURL]] +deps = ["LibCURL_jll", "MozillaCACerts_jll"] +uuid = "b27032c2-a3e7-50c8-80cd-2d36dbcbfd21" +version = "0.6.4" + +[[deps.LibCURL_jll]] +deps = ["Artifacts", "LibSSH2_jll", "Libdl", "MbedTLS_jll", "Zlib_jll", "nghttp2_jll"] +uuid = "deac9b47-8bc7-5906-a0fe-35ac56dc84c0" +version = "8.4.0+0" + +[[deps.LibGit2]] +deps = ["Base64", "NetworkOptions", "Printf", "SHA"] +uuid = "76f85450-5226-5b5a-8eaa-529ad045b433" + +[[deps.LibSSH2_jll]] +deps = ["Artifacts", "Libdl", "MbedTLS_jll"] +uuid = "29816b5a-b9ab-546f-933c-edad1886dfa8" +version = "1.11.0+1" + +[[deps.Libdl]] +uuid = "8f399da3-3557-5675-b5ff-fb832c97cbdb" + +[[deps.LinearAlgebra]] +deps = ["Libdl", "OpenBLAS_jll", "libblastrampoline_jll"] +uuid = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" + +[[deps.LogExpFunctions]] +deps = ["DocStringExtensions", "IrrationalConstants", "LinearAlgebra"] +git-tree-sha1 = "7d6dd4e9212aebaeed356de34ccf262a3cd415aa" +uuid = "2ab3a3ac-af41-5b50-aa03-7779005ae688" +version = "0.3.26" + + [deps.LogExpFunctions.extensions] + LogExpFunctionsChainRulesCoreExt = "ChainRulesCore" + LogExpFunctionsChangesOfVariablesExt = "ChangesOfVariables" + LogExpFunctionsInverseFunctionsExt = "InverseFunctions" + + [deps.LogExpFunctions.weakdeps] + ChainRulesCore = "d360d2e6-b24c-11e9-a2a3-2a2ae2dbcce4" + ChangesOfVariables = "9e997f8a-9a97-42d5-a9f1-ce6bfc15e2c0" + InverseFunctions = "3587e190-3f89-42d0-90ee-14403ec27112" + +[[deps.Logging]] +uuid = "56ddb016-857b-54e1-b83d-db4d58db5568" + +[[deps.LoggingExtras]] +deps = ["Dates", "Logging"] +git-tree-sha1 = "c1dd6d7978c12545b4179fb6153b9250c96b0075" +uuid = "e6f89c97-d47a-5376-807f-9c37f3926c36" +version = "1.0.3" + +[[deps.MLStyle]] +git-tree-sha1 = "bc38dff0548128765760c79eb7388a4b37fae2c8" +uuid = "d8e11817-5142-5d16-987a-aa16d5891078" +version = "0.4.17" + +[[deps.MLUtils]] +deps = ["ChainRulesCore", "Compat", "DataAPI", "DelimitedFiles", "FLoops", "NNlib", "Random", "ShowCases", "SimpleTraits", "Statistics", "StatsBase", "Tables", "Transducers"] +git-tree-sha1 = "3504cdb8c2bc05bde4d4b09a81b01df88fcbbba0" +uuid = "f1d291b0-491e-4a28-83b9-f70985020b54" +version = "0.4.3" + +[[deps.MacroTools]] +deps = ["Markdown", "Random"] +git-tree-sha1 = "9ee1618cbf5240e6d4e0371d6f24065083f60c48" +uuid = "1914dd2f-81c6-5fcd-8719-6d5c9610ff09" +version = "0.5.11" + +[[deps.Markdown]] +deps = ["Base64"] +uuid = "d6f4376e-aef5-505a-96c1-9c027394607a" + +[[deps.MbedTLS]] +deps = ["Dates", "MbedTLS_jll", "MozillaCACerts_jll", "NetworkOptions", "Random", "Sockets"] +git-tree-sha1 = "f512dc13e64e96f703fd92ce617755ee6b5adf0f" +uuid = "739be429-bea8-5141-9913-cc70e7f3736d" +version = "1.1.8" + +[[deps.MbedTLS_jll]] +deps = ["Artifacts", "Libdl"] +uuid = "c8ffd9c3-330d-5841-b78e-0817d7145fa1" +version = "2.28.2+0" + +[[deps.MicroCollections]] +deps = ["BangBang", "InitialValues", "Setfield"] +git-tree-sha1 = "629afd7d10dbc6935ec59b32daeb33bc4460a42e" +uuid = "128add7d-3638-4c79-886c-908ea0c25c34" +version = "0.1.4" + +[[deps.MicroMamba]] +deps = ["Pkg", "Scratch", "micromamba_jll"] +git-tree-sha1 = "011cab361eae7bcd7d278f0a7a00ff9c69000c51" +uuid = "0b3b1443-0f03-428d-bdfb-f27f9c1191ea" +version = "0.1.14" + +[[deps.Missings]] +deps = ["DataAPI"] +git-tree-sha1 = "f66bdc5de519e8f8ae43bdc598782d35a25b1272" +uuid = "e1d29d7a-bbdc-5cf2-9ac0-f12de2c33e28" +version = "1.1.0" + +[[deps.Mmap]] +uuid = "a63ad114-7e13-5084-954f-fe012c677804" + +[[deps.Mosquitto]] +deps = ["Libdl", "Random", "Test"] +path = "../Mosquitto" +uuid = "db317de6-444b-4dfa-9d0e-fbf3d8dd78ea" +version = "0.4.1" + +[[deps.MozillaCACerts_jll]] +uuid = "14a3606d-f60d-562e-9121-12d972cd8159" +version = "2022.10.11" + +[[deps.NNlib]] +deps = ["Adapt", "Atomix", "ChainRulesCore", "GPUArraysCore", "KernelAbstractions", "LinearAlgebra", "Pkg", "Random", "Requires", "Statistics"] +git-tree-sha1 = "3bc568de99214f72a76c7773ade218819afcc36e" +uuid = "872c559c-99b0-510c-b3b7-b6c96a88d5cd" +version = "0.9.7" + + [deps.NNlib.extensions] + NNlibAMDGPUExt = "AMDGPU" + NNlibCUDACUDNNExt = ["CUDA", "cuDNN"] + NNlibCUDAExt = "CUDA" + NNlibEnzymeCoreExt = "EnzymeCore" + + [deps.NNlib.weakdeps] + AMDGPU = "21141c5a-9bdb-4563-92ae-f87d6854732e" + CUDA = "052768ef-5323-5732-b1bb-66c8b64840ba" + EnzymeCore = "f151be2c-9106-41f4-ab19-57ee4f262869" + cuDNN = "02a925ec-e4fe-4b08-9a7e-0d78e3d38ccd" + +[[deps.NVTX]] +deps = ["Colors", "JuliaNVTXCallbacks_jll", "Libdl", "NVTX_jll"] +git-tree-sha1 = "8bc9ce4233be3c63f8dcd78ccaf1b63a9c0baa34" +uuid = "5da4648a-3479-48b8-97b9-01cb529c0a1f" +version = "0.3.3" + +[[deps.NVTX_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] +git-tree-sha1 = "ce3269ed42816bf18d500c9f63418d4b0d9f5a3b" +uuid = "e98f9f5b-d649-5603-91fd-7774390e6439" +version = "3.1.0+2" + +[[deps.NaNMath]] +deps = ["OpenLibm_jll"] +git-tree-sha1 = "0877504529a3e5c3343c6f8b4c0381e57e4387e4" +uuid = "77ba4419-2d1f-58cd-9bb1-8ffee604a2e3" +version = "1.0.2" + +[[deps.NameResolution]] +deps = ["PrettyPrint"] +git-tree-sha1 = "1a0fa0e9613f46c9b8c11eee38ebb4f590013c5e" +uuid = "71a1bf82-56d0-4bbc-8a3c-48b961074391" +version = "0.1.5" + +[[deps.NetworkOptions]] +uuid = "ca575930-c2e3-43a9-ace4-1e988b2c1908" +version = "1.2.0" + +[[deps.OneHotArrays]] +deps = ["Adapt", "ChainRulesCore", "Compat", "GPUArraysCore", "LinearAlgebra", "NNlib"] +git-tree-sha1 = "5e4029759e8699ec12ebdf8721e51a659443403c" +uuid = "0b1bfda6-eb8a-41d2-88d8-f5af5cad476f" +version = "0.2.4" + +[[deps.OpenBLAS_jll]] +deps = ["Artifacts", "CompilerSupportLibraries_jll", "Libdl"] +uuid = "4536629a-c528-5b80-bd46-f80d51c5b363" +version = "0.3.21+4" + +[[deps.OpenLibm_jll]] +deps = ["Artifacts", "Libdl"] +uuid = "05823500-19ac-5b8b-9628-191a04bc5112" +version = "0.8.1+0" + +[[deps.OpenSSL]] +deps = ["BitFlags", "Dates", "MozillaCACerts_jll", "OpenSSL_jll", "Sockets"] +git-tree-sha1 = "51901a49222b09e3743c65b8847687ae5fc78eb2" +uuid = "4d8831e6-92b7-49fb-bdf8-b643e874388c" +version = "1.4.1" + +[[deps.OpenSSL_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl"] +git-tree-sha1 = "cc6e1927ac521b659af340e0ca45828a3ffc748f" +uuid = "458c3c95-2e84-50aa-8efc-19380b2a3a95" +version = "3.0.12+0" + +[[deps.OpenSpecFun_jll]] +deps = ["Artifacts", "CompilerSupportLibraries_jll", "JLLWrappers", "Libdl", "Pkg"] +git-tree-sha1 = "13652491f6856acfd2db29360e1bbcd4565d04f1" +uuid = "efe28fd5-8261-553b-a9e1-b2916fc3738e" +version = "0.5.5+0" + +[[deps.Optimisers]] +deps = ["ChainRulesCore", "Functors", "LinearAlgebra", "Random", "Statistics"] +git-tree-sha1 = "34205b1204cc83c43cd9cfe53ffbd3b310f6e8c5" +uuid = "3bd65402-5787-11e9-1adc-39752487f4e2" +version = "0.3.1" + +[[deps.OrderedCollections]] +git-tree-sha1 = "2e73fe17cac3c62ad1aebe70d44c963c3cfdc3e3" +uuid = "bac558e1-5e72-5ebc-8fee-abe8a469f55d" +version = "1.6.2" + +[[deps.PDMats]] +deps = ["LinearAlgebra", "SparseArrays", "SuiteSparse"] +git-tree-sha1 = "f6f85a2edb9c356b829934ad3caed2ad0ebbfc99" +uuid = "90014a1f-27ba-587c-ab20-58faa44d9150" +version = "0.11.29" + +[[deps.Parsers]] +deps = ["Dates", "PrecompileTools", "UUIDs"] +git-tree-sha1 = "a935806434c9d4c506ba941871b327b96d41f2bf" +uuid = "69de0a69-1ddd-5017-9359-2bf0b02dc9f0" +version = "2.8.0" + +[[deps.Pidfile]] +deps = ["FileWatching", "Test"] +git-tree-sha1 = "2d8aaf8ee10df53d0dfb9b8ee44ae7c04ced2b03" +uuid = "fa939f87-e72e-5be4-a000-7fc836dbe307" +version = "1.3.0" + +[[deps.Pkg]] +deps = ["Artifacts", "Dates", "Downloads", "FileWatching", "LibGit2", "Libdl", "Logging", "Markdown", "Printf", "REPL", "Random", "SHA", "Serialization", "TOML", "Tar", "UUIDs", "p7zip_jll"] +uuid = "44cfe95a-1eb2-52ea-b672-e2afdf69b78f" +version = "1.9.2" + +[[deps.PooledArrays]] +deps = ["DataAPI", "Future"] +git-tree-sha1 = "36d8b4b899628fb92c2749eb488d884a926614d3" +uuid = "2dfb63ee-cc39-5dd5-95bd-886bf059d720" +version = "1.4.3" + +[[deps.PrecompileTools]] +deps = ["Preferences"] +git-tree-sha1 = "03b4c25b43cb84cee5c90aa9b5ea0a78fd848d2f" +uuid = "aea7be01-6a6a-4083-8856-8a6e6704d82a" +version = "1.2.0" + +[[deps.Preferences]] +deps = ["TOML"] +git-tree-sha1 = "00805cd429dcb4870060ff49ef443486c262e38e" +uuid = "21216c6a-2e73-6563-6e65-726566657250" +version = "1.4.1" + +[[deps.PrettyPrint]] +git-tree-sha1 = "632eb4abab3449ab30c5e1afaa874f0b98b586e4" +uuid = "8162dcfd-2161-5ef2-ae6c-7681170c5f98" +version = "0.2.0" + +[[deps.PrettyTables]] +deps = ["Crayons", "LaTeXStrings", "Markdown", "PrecompileTools", "Printf", "Reexport", "StringManipulation", "Tables"] +git-tree-sha1 = "3f43c2aae6aa4a2503b05587ab74f4f6aeff9fd0" +uuid = "08abe8d2-0d0c-5749-adfa-8a2ac140af0d" +version = "2.3.0" + +[[deps.Printf]] +deps = ["Unicode"] +uuid = "de0858da-6303-5e67-8744-51eddeeeb8d7" + +[[deps.ProgressLogging]] +deps = ["Logging", "SHA", "UUIDs"] +git-tree-sha1 = "80d919dee55b9c50e8d9e2da5eeafff3fe58b539" +uuid = "33c8b6b6-d38a-422a-b730-caa89a2f386c" +version = "0.1.4" + +[[deps.PythonCall]] +deps = ["CondaPkg", "Dates", "Libdl", "MacroTools", "Markdown", "Pkg", "REPL", "Requires", "Serialization", "Tables", "UnsafePointers"] +git-tree-sha1 = "70af6bdbde63d7d0a4ea99f3e890ebdb55e9d464" +uuid = "6099a3de-0909-46bc-b1f4-468b9a2dfc0d" +version = "0.9.14" + +[[deps.QuadGK]] +deps = ["DataStructures", "LinearAlgebra"] +git-tree-sha1 = "9ebcd48c498668c7fa0e97a9cae873fbee7bfee1" +uuid = "1fd47b50-473d-5c70-9696-f719f8f3bcdc" +version = "2.9.1" + +[[deps.REPL]] +deps = ["InteractiveUtils", "Markdown", "Sockets", "Unicode"] +uuid = "3fa0cd96-eef1-5676-8a61-b3b8758bbffb" + +[[deps.Random]] +deps = ["SHA", "Serialization"] +uuid = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" + +[[deps.Random123]] +deps = ["Random", "RandomNumbers"] +git-tree-sha1 = "552f30e847641591ba3f39fd1bed559b9deb0ef3" +uuid = "74087812-796a-5b5d-8853-05524746bad3" +version = "1.6.1" + +[[deps.RandomNumbers]] +deps = ["Random", "Requires"] +git-tree-sha1 = "043da614cc7e95c703498a491e2c21f58a2b8111" +uuid = "e6cf234a-135c-5ec9-84dd-332b85af5143" +version = "1.5.3" + +[[deps.RealDot]] +deps = ["LinearAlgebra"] +git-tree-sha1 = "9f0a1b71baaf7650f4fa8a1d168c7fb6ee41f0c9" +uuid = "c1ae055f-0cd5-4b69-90a6-9a35b1a98df9" +version = "0.1.0" + +[[deps.Redis]] +deps = ["DataStructures", "Dates", "Sockets"] +git-tree-sha1 = "6b3c136222b08ae0c71657f2501c6741782a1ad1" +uuid = "0cf705f9-a9e2-50d1-a699-2b372a39b750" +version = "1.0.0" + +[[deps.Reexport]] +git-tree-sha1 = "45e428421666073eab6f2da5c9d310d99bb12f9b" +uuid = "189a3867-3050-52da-a836-e630ba90ab69" +version = "1.2.2" + +[[deps.Requires]] +deps = ["UUIDs"] +git-tree-sha1 = "838a3a4188e2ded87a4f9f184b4b0d78a1e91cb7" +uuid = "ae029012-a4dd-5104-9daa-d747884805df" +version = "1.3.0" + +[[deps.Rmath]] +deps = ["Random", "Rmath_jll"] +git-tree-sha1 = "f65dcb5fa46aee0cf9ed6274ccbd597adc49aa7b" +uuid = "79098fc4-a85e-5d69-aa6a-4863f24498fa" +version = "0.7.1" + +[[deps.Rmath_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] +git-tree-sha1 = "6ed52fdd3382cf21947b15e8870ac0ddbff736da" +uuid = "f50d1b31-88e8-58de-be2c-1cc44531875f" +version = "0.4.0+0" + +[[deps.SHA]] +uuid = "ea8e919c-243c-51af-8825-aaa63cd721ce" +version = "0.7.0" + +[[deps.Scratch]] +deps = ["Dates"] +git-tree-sha1 = "3bac05bc7e74a75fd9cba4295cde4045d9fe2386" +uuid = "6c6a2e73-6563-6170-7368-637461726353" +version = "1.2.1" + +[[deps.SentinelArrays]] +deps = ["Dates", "Random"] +git-tree-sha1 = "0e7508ff27ba32f26cd459474ca2ede1bc10991f" +uuid = "91c51154-3ec4-41a3-a24f-3f23e20d615c" +version = "1.4.1" + +[[deps.Serialization]] +uuid = "9e88b42a-f829-5b0c-bbe9-9e923198166b" + +[[deps.Setfield]] +deps = ["ConstructionBase", "Future", "MacroTools", "StaticArraysCore"] +git-tree-sha1 = "e2cc6d8c88613c05e1defb55170bf5ff211fbeac" +uuid = "efcf1570-3423-57d1-acb7-fd33fddbac46" +version = "1.1.1" + +[[deps.ShowCases]] +git-tree-sha1 = "7f534ad62ab2bd48591bdeac81994ea8c445e4a5" +uuid = "605ecd9f-84a6-4c9e-81e2-4798472b76a3" +version = "0.1.0" + +[[deps.SimpleBufferStream]] +git-tree-sha1 = "874e8867b33a00e784c8a7e4b60afe9e037b74e1" +uuid = "777ac1f9-54b0-4bf8-805c-2214025038e7" +version = "1.1.0" + +[[deps.SimpleTraits]] +deps = ["InteractiveUtils", "MacroTools"] +git-tree-sha1 = "5d7e3f4e11935503d3ecaf7186eac40602e7d231" +uuid = "699a6c99-e7fa-54fc-8d76-47d257e15c1d" +version = "0.9.4" + +[[deps.Sockets]] +uuid = "6462fe0b-24de-5631-8697-dd941f90decc" + +[[deps.SortingAlgorithms]] +deps = ["DataStructures"] +git-tree-sha1 = "5165dfb9fd131cf0c6957a3a7605dede376e7b63" +uuid = "a2af1166-a08f-5f64-846c-94a0d3cef48c" +version = "1.2.0" + +[[deps.SparseArrays]] +deps = ["Libdl", "LinearAlgebra", "Random", "Serialization", "SuiteSparse_jll"] +uuid = "2f01184e-e22b-5df5-ae63-d93ebab69eaf" + +[[deps.SparseInverseSubset]] +deps = ["LinearAlgebra", "SparseArrays", "SuiteSparse"] +git-tree-sha1 = "91402087fd5d13b2d97e3ef29bbdf9d7859e678a" +uuid = "dc90abb0-5640-4711-901d-7e5b23a2fada" +version = "0.1.1" + +[[deps.SpecialFunctions]] +deps = ["IrrationalConstants", "LogExpFunctions", "OpenLibm_jll", "OpenSpecFun_jll"] +git-tree-sha1 = "e2cfc4012a19088254b3950b85c3c1d8882d864d" +uuid = "276daf66-3868-5448-9aa4-cd146d93841b" +version = "2.3.1" +weakdeps = ["ChainRulesCore"] + + [deps.SpecialFunctions.extensions] + SpecialFunctionsChainRulesCoreExt = "ChainRulesCore" + +[[deps.SplittablesBase]] +deps = ["Setfield", "Test"] +git-tree-sha1 = "e08a62abc517eb79667d0a29dc08a3b589516bb5" +uuid = "171d559e-b47b-412a-8079-5efa626c420e" +version = "0.1.15" + +[[deps.StaticArrays]] +deps = ["LinearAlgebra", "PrecompileTools", "Random", "StaticArraysCore"] +git-tree-sha1 = "5ef59aea6f18c25168842bded46b16662141ab87" +uuid = "90137ffa-7385-5640-81b9-e52037218182" +version = "1.7.0" +weakdeps = ["Statistics"] + + [deps.StaticArrays.extensions] + StaticArraysStatisticsExt = "Statistics" + +[[deps.StaticArraysCore]] +git-tree-sha1 = "36b3d696ce6366023a0ea192b4cd442268995a0d" +uuid = "1e83bf80-4336-4d27-bf5d-d5a4f845583c" +version = "1.4.2" + +[[deps.Statistics]] +deps = ["LinearAlgebra", "SparseArrays"] +uuid = "10745b16-79ce-11e8-11f9-7d13ad32a3b2" +version = "1.9.0" + +[[deps.StatsAPI]] +deps = ["LinearAlgebra"] +git-tree-sha1 = "1ff449ad350c9c4cbc756624d6f8a8c3ef56d3ed" +uuid = "82ae8749-77ed-4fe6-ae5f-f523153014b0" +version = "1.7.0" + +[[deps.StatsBase]] +deps = ["DataAPI", "DataStructures", "LinearAlgebra", "LogExpFunctions", "Missings", "Printf", "Random", "SortingAlgorithms", "SparseArrays", "Statistics", "StatsAPI"] +git-tree-sha1 = "1d77abd07f617c4868c33d4f5b9e1dbb2643c9cf" +uuid = "2913bbd2-ae8a-5f71-8c99-4fb6c76f3a91" +version = "0.34.2" + +[[deps.StatsFuns]] +deps = ["HypergeometricFunctions", "IrrationalConstants", "LogExpFunctions", "Reexport", "Rmath", "SpecialFunctions"] +git-tree-sha1 = "f625d686d5a88bcd2b15cd81f18f98186fdc0c9a" +uuid = "4c63d2b9-4356-54db-8cca-17b64c39e42c" +version = "1.3.0" + + [deps.StatsFuns.extensions] + StatsFunsChainRulesCoreExt = "ChainRulesCore" + StatsFunsInverseFunctionsExt = "InverseFunctions" + + [deps.StatsFuns.weakdeps] + ChainRulesCore = "d360d2e6-b24c-11e9-a2a3-2a2ae2dbcce4" + InverseFunctions = "3587e190-3f89-42d0-90ee-14403ec27112" + +[[deps.StringManipulation]] +deps = ["PrecompileTools"] +git-tree-sha1 = "a04cabe79c5f01f4d723cc6704070ada0b9d46d5" +uuid = "892a3eda-7b42-436c-8928-eab12a02cf0e" +version = "0.3.4" + +[[deps.StructArrays]] +deps = ["Adapt", "ConstructionBase", "DataAPI", "GPUArraysCore", "StaticArraysCore", "Tables"] +git-tree-sha1 = "0a3db38e4cce3c54fe7a71f831cd7b6194a54213" +uuid = "09ab397b-f2b6-538f-b94a-2f83cf4a842a" +version = "0.6.16" + +[[deps.StructTypes]] +deps = ["Dates", "UUIDs"] +git-tree-sha1 = "ca4bccb03acf9faaf4137a9abc1881ed1841aa70" +uuid = "856f2bd8-1eba-4b0a-8007-ebc267875bd4" +version = "1.10.0" + +[[deps.SuiteSparse]] +deps = ["Libdl", "LinearAlgebra", "Serialization", "SparseArrays"] +uuid = "4607b0f0-06f3-5cda-b6b1-a6196a1729e9" + +[[deps.SuiteSparse_jll]] +deps = ["Artifacts", "Libdl", "Pkg", "libblastrampoline_jll"] +uuid = "bea87d4a-7f5b-5778-9afe-8cc45184846c" +version = "5.10.1+6" + +[[deps.TOML]] +deps = ["Dates"] +uuid = "fa267f1f-6049-4f14-aa54-33bafae1ed76" +version = "1.0.3" + +[[deps.TableTraits]] +deps = ["IteratorInterfaceExtensions"] +git-tree-sha1 = "c06b2f539df1c6efa794486abfb6ed2022561a39" +uuid = "3783bdb8-4a98-5b6b-af9a-565f29a5fe9c" +version = "1.0.1" + +[[deps.Tables]] +deps = ["DataAPI", "DataValueInterfaces", "IteratorInterfaceExtensions", "LinearAlgebra", "OrderedCollections", "TableTraits"] +git-tree-sha1 = "cb76cf677714c095e535e3501ac7954732aeea2d" +uuid = "bd369af6-aec1-5ad0-b16a-f7cc5008161c" +version = "1.11.1" + +[[deps.Tar]] +deps = ["ArgTools", "SHA"] +uuid = "a4e569a6-e804-4fa4-b0f3-eef7a1d5b13e" +version = "1.10.0" + +[[deps.Test]] +deps = ["InteractiveUtils", "Logging", "Random", "Serialization"] +uuid = "8dfed614-e22c-5e08-85e1-65c5234f0b40" + +[[deps.TimerOutputs]] +deps = ["ExprTools", "Printf"] +git-tree-sha1 = "f548a9e9c490030e545f72074a41edfd0e5bcdd7" +uuid = "a759f4b9-e2f1-59dc-863e-4aeb61b1ea8f" +version = "0.5.23" + +[[deps.TranscodingStreams]] +git-tree-sha1 = "1fbeaaca45801b4ba17c251dd8603ef24801dd84" +uuid = "3bb67fe8-82b1-5028-8e26-92a6c54297fa" +version = "0.10.2" +weakdeps = ["Random", "Test"] + + [deps.TranscodingStreams.extensions] + TestExt = ["Test", "Random"] + +[[deps.Transducers]] +deps = ["Adapt", "ArgCheck", "BangBang", "Baselet", "CompositionsBase", "ConstructionBase", "DefineSingletons", "Distributed", "InitialValues", "Logging", "Markdown", "MicroCollections", "Requires", "Setfield", "SplittablesBase", "Tables"] +git-tree-sha1 = "e579d3c991938fecbb225699e8f611fa3fbf2141" +uuid = "28d57a85-8fef-5791-bfe6-a80928e7c999" +version = "0.4.79" + + [deps.Transducers.extensions] + TransducersBlockArraysExt = "BlockArrays" + TransducersDataFramesExt = "DataFrames" + TransducersLazyArraysExt = "LazyArrays" + TransducersOnlineStatsBaseExt = "OnlineStatsBase" + TransducersReferenceablesExt = "Referenceables" + + [deps.Transducers.weakdeps] + BlockArrays = "8e7c35d0-a365-5155-bbbb-fb81a777f24e" + DataFrames = "a93c6f00-e57d-5684-b7b6-d8193f3e46c0" + LazyArrays = "5078a376-72f3-5289-bfd5-ec5146d43c02" + OnlineStatsBase = "925886fa-5bf2-5e8e-b522-a9147a512338" + Referenceables = "42d2dcc6-99eb-4e98-b66c-637b7d73030e" + +[[deps.URIs]] +git-tree-sha1 = "67db6cc7b3821e19ebe75791a9dd19c9b1188f2b" +uuid = "5c2747f8-b7ea-4ff2-ba2e-563bfd36b1d4" +version = "1.5.1" + +[[deps.UUIDs]] +deps = ["Random", "SHA"] +uuid = "cf7118a7-6976-5b1a-9a39-7adc72f591a4" + +[[deps.Unicode]] +uuid = "4ec0a83e-493e-50e2-b9ac-8f72acf5a8f5" + +[[deps.UnsafeAtomics]] +git-tree-sha1 = "6331ac3440856ea1988316b46045303bef658278" +uuid = "013be700-e6cd-48c3-b4a1-df204f14c38f" +version = "0.2.1" + +[[deps.UnsafeAtomicsLLVM]] +deps = ["LLVM", "UnsafeAtomics"] +git-tree-sha1 = "323e3d0acf5e78a56dfae7bd8928c989b4f3083e" +uuid = "d80eeb9a-aca5-4d75-85e5-170c8b632249" +version = "0.1.3" + +[[deps.UnsafePointers]] +git-tree-sha1 = "c81331b3b2e60a982be57c046ec91f599ede674a" +uuid = "e17b2a0c-0bdf-430a-bd0c-3a23cae4ff39" +version = "1.0.0" + +[[deps.Zlib_jll]] +deps = ["Libdl"] +uuid = "83775a58-1f1d-513f-b197-d71354ab007a" +version = "1.2.13+0" + +[[deps.Zygote]] +deps = ["AbstractFFTs", "ChainRules", "ChainRulesCore", "DiffRules", "Distributed", "FillArrays", "ForwardDiff", "GPUArrays", "GPUArraysCore", "IRTools", "InteractiveUtils", "LinearAlgebra", "LogExpFunctions", "MacroTools", "NaNMath", "PrecompileTools", "Random", "Requires", "SparseArrays", "SpecialFunctions", "Statistics", "ZygoteRules"] +git-tree-sha1 = "5ded212acd815612df112bb895ef3910c5a03f57" +uuid = "e88e6eb3-aa80-5325-afca-941959d7151f" +version = "0.6.67" + + [deps.Zygote.extensions] + ZygoteColorsExt = "Colors" + ZygoteDistancesExt = "Distances" + ZygoteTrackerExt = "Tracker" + + [deps.Zygote.weakdeps] + Colors = "5ae59095-9a9b-59fe-a467-6f913c188581" + Distances = "b4f34e82-e78d-54a5-968a-f98e89d6e8f7" + Tracker = "9f7883ad-71c0-57eb-9f7f-b5c9e6d3789c" + +[[deps.ZygoteRules]] +deps = ["ChainRulesCore", "MacroTools"] +git-tree-sha1 = "9d749cd449fb448aeca4feee9a2f4186dbb5d184" +uuid = "700de1a5-db45-46bc-99cf-38207098b444" +version = "0.2.4" + +[[deps.libblastrampoline_jll]] +deps = ["Artifacts", "Libdl"] +uuid = "8e850b90-86db-534c-a0d3-1478176c7d93" +version = "5.8.0+0" + +[[deps.micromamba_jll]] +deps = ["Artifacts", "JLLWrappers", "LazyArtifacts", "Libdl"] +git-tree-sha1 = "66d07957bcf7e4930d933195aed484078dd8cbb5" +uuid = "f8abcde7-e9b7-5caa-b8af-a437887ae8e4" +version = "1.4.9+0" + +[[deps.nghttp2_jll]] +deps = ["Artifacts", "Libdl"] +uuid = "8e850ede-7688-5339-a07c-302acd2aaf8d" +version = "1.52.0+1" + +[[deps.p7zip_jll]] +deps = ["Artifacts", "Libdl"] +uuid = "3f19e933-33d8-53b3-aaab-bd5110c3b7a0" +version = "17.4.0+0" diff --git a/previousVersion/0.0.4/Project.toml b/previousVersion/0.0.4/Project.toml new file mode 100755 index 0000000..7e70563 --- /dev/null +++ b/previousVersion/0.0.4/Project.toml @@ -0,0 +1,15 @@ +name = "ChatAgent" +uuid = "cff63402-b71f-455f-804d-24489fc61e5e" +authors = ["narawat "] +version = "0.1.0" + +[deps] +CommUtils = "646cbe82-3d4a-47b2-9440-2e80a472ca20" +CondaPkg = "992eb4ea-22a4-4c89-a5bb-47a3300528ab" +DataStructures = "864edb3b-99cc-5e75-8d2d-829cb0a9cfe8" +Dates = "ade2ca70-3891-5945-98fb-dc099432e06a" +GeneralUtils = "c6c72f09-b708-4ac8-ac7c-2084d70108fe" +HTTP = "cd3eb016-35fb-5094-929b-558a96fad6f3" +JSON3 = "0f8b85d8-7281-11e9-16c2-39a750bddbf1" +PythonCall = "6099a3de-0909-46bc-b1f4-468b9a2dfc0d" +UUIDs = "cf7118a7-6976-5b1a-9a39-7adc72f591a4" diff --git a/previousVersion/0.0.4/src/ChatAgent.jl b/previousVersion/0.0.4/src/ChatAgent.jl new file mode 100755 index 0000000..6c4cea2 --- /dev/null +++ b/previousVersion/0.0.4/src/ChatAgent.jl @@ -0,0 +1,101 @@ +module ChatAgent + + # export agent, addNewMessage, clearMessage + + + """ Order by dependencies of each file. The 1st included file must not depend on any other + files and each file can only depend on the file included before it. + """ + + include("type.jl") + using .type + + include("utils.jl") + using .utils + + include("llmfunction.jl") + using .llmfunction + + include("interface.jl") + using .interface + + +#------------------------------------------------------------------------------------------------100 + + """ version 0.0.1 + Todo: + [] add chat mechanism + + Change from version: 0.0.0 + - + + """ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +end # module ChatAgent diff --git a/previousVersion/0.0.4/src/interface.jl b/previousVersion/0.0.4/src/interface.jl new file mode 100755 index 0000000..dd8009c --- /dev/null +++ b/previousVersion/0.0.4/src/interface.jl @@ -0,0 +1,1229 @@ +module interface + + +export agentReact, agentReflex, + addNewMessage, clearMessage, removeLatestMsg, conversation, writeEvaluationGuideline, + grading, analyze, selfReflext, actor_mistral_openorca2 + +using JSON3, DataStructures, Dates, UUIDs, HTTP +using CommUtils, GeneralUtils +using ..type, ..utils + +# ---------------------------------------------------------------------------- # +# pythoncall setting # +# ---------------------------------------------------------------------------- # +# Ref: https://github.com/JuliaPy/PythonCall.jl/issues/252 +# by setting the following variables, PythonCall will use system python or conda python and +# packages installed by system or conda +# if these setting are not set (comment out), PythonCall will use its own python and package that +# installed by CondaPkg (from env_preparation.jl) +# ENV["JULIA_CONDAPKG_BACKEND"] = "Null" +# systemPython = split(read(`which python`, String), "\n")[1] +# ENV["JULIA_PYTHONCALL_EXE"] = systemPython # find python location with $> which python ex. raw"/root/conda/bin/python" + +# using PythonCall +# const py_agents = PythonCall.pynew() +# const py_llms = PythonCall.pynew() +# function __init__() +# # PythonCall.pycopy!(py_cv2, pyimport("cv2")) + +# # equivalent to from urllib.request import urlopen in python +# PythonCall.pycopy!(py_agents, pyimport("langchain.agents")) +# PythonCall.pycopy!(py_llms, pyimport("langchain.llms")) +# end + +#------------------------------------------------------------------------------------------------100 + + +""" Add new message to agent. + + Args: + + Return: + +```jldoctest +julia> addNewMessage(agent1, "user", "Where should I go to buy snacks") +```` +""" +function addNewMessage(a::T1, role::String, content::T2) where {T1<:agent, T2<:AbstractString} + if role ∉ a.availableRole # guard against typo + error("role is not in agent.availableRole $(@__LINE__)") + end + + # check whether user messages exceed limit + userMsg = 0 + for i in a.messages + if i[:role] == "user" + userMsg += 1 + end + end + messageleft = 0 + + if userMsg > a.maxUserMsg # delete all conversation + clearMessage(a) + messageleft = a.maxUserMsg + else + userMsg += 1 + d = Dict(:role=> role, :content=> content, :timestamp=> Dates.now()) + push!(a.messages, d) + messageleft = a.maxUserMsg - userMsg + end + + return messageleft +end + +function clearMessage(a::T) where {T<:agent} + for i in eachindex(a.messages) + if length(a.messages) > 1 # system instruction will NOT be deleted + pop!(a.messages) + else + break + end + end + a.thought = "nothing" +end + +function removeLatestMsg(a::T) where {T<:agent} + if length(a.messages) > 1 + pop!(a.messages) + end +end + +# function generatePrompt_mistral_openorca(a::T, usermsg::String, role::Symbol) where {T<:agent} +# prompt = +# """ +# <|im_start|>system +# {systemMsg} +# <|im_end|> +# Here are the context for the question: +# {context} +# """ +# prompt = replace(prompt, "{systemMsg}" => a.roles[role]) + +# toolnames = "" +# toollines = "" +# for (toolname, v) in a.tools +# toolline = "$toolname: $(v[:description]) $(v[:input]) $(v[:output])\n" +# toollines *= toolline +# toolnames *= "$toolname," +# end +# prompt = replace(prompt, "{toolnames}" => toolnames) +# prompt = replace(prompt, "{tools}" => toollines) + +# prompt = replace(prompt, "{context}" => a.context) + +# prompt *= "<|im_start|>user\n" * usermsg * "\n<|im_end|>\n" +# prompt *= "<|im_start|>assistant\n" + +# return prompt +# end + +# function generatePrompt_mistral_openorca(a::T, usermsg::String, +# thinkingMode::Symbol=:nothinking) where {T<:agent} + +# prompt = +# """ +# <|im_start|>system +# {systemMsg} +# You have access to the following tools: +# {tools} +# {thinkingMode} +# <|im_end|> +# Here are the context for the question: +# {context} +# """ +# prompt = replace(prompt, "{systemMsg}" => a.roles[a.role]) +# prompt = replace(prompt, "{thinkingMode}" => a.thinkingMode[thinkingMode]) +# toolnames = "" +# toollines = "" +# for (toolname, v) in a.tools +# toolline = "$toolname: $(v[:description]) $(v[:input]) $(v[:output])\n" +# toollines *= toolline +# toolnames *= "$toolname," +# end +# prompt = replace(prompt, "{toolnames}" => toolnames) +# prompt = replace(prompt, "{tools}" => toollines) + +# prompt = replace(prompt, "{context}" => a.context) + +# prompt *= "<|im_start|>user\nQuestion: " * usermsg * "\n<|im_end|>\n" +# prompt *= "<|im_start|>assistant\n" + +# return prompt +# end + + +function generatePrompt_mistral_openorca(a::T, usermsg::String, + thinkingMode::Symbol=:nothinking) where {T<:agent} + + prompt = + """ + <|im_start|>system + {systemMsg} + {tools} + {thinkingMode} + <|im_end|> + Here are the context for the stimulus: + {context} + """ + prompt = replace(prompt, "{systemMsg}" => a.roles[a.role]) + prompt = replace(prompt, "{thinkingMode}" => a.thinkingMode[thinkingMode]) + toolnames = "" + toollines = "" + for (toolname, v) in a.tools + toolline = "$toolname: $(v[:description]) $(v[:input]) $(v[:output])\n" + toollines *= toolline + toolnames *= "$toolname," + end + prompt = replace(prompt, "{toolnames}" => toolnames) + + prompt = replace(prompt, "{context}" => a.context) + + prompt *= "<|im_start|>user\nStimulus: " * usermsg * "\n<|im_end|>\n" + prompt *= "<|im_start|>assistant\n" + + return prompt +end + +function chat_mistral_openorca(a::agentReflex, usermsg::String) + """ + general prompt format: + + " + <|im_start|>system + {role} + {tools} + {thinkingFormat} + <|im_end|> + {context} + <|im_start|>user + {usermsg} + <|im_end|> + <|im_start|>assistant + + " + + Note: + {context} = + " + {earlierConversation} + {env state} + {shortterm memory} + {longterm memory} + " + """ + + prompt = + """ + <|im_start|>system + {role} + {thinkingFormat} + <|im_end|> + {context} + <|im_start|>user + {usermsg} + <|im_end|> + <|im_start|>assistant + + """ + prompt = replace(prompt, "{role}" => a.roles[a.role]) + prompt = replace(prompt, "{thinkingFormat}" => "") + + context = + """ + {earlierConversation} + {env state} + {longterm memory} + """ + context = replace(context, "{earlierConversation}" => "My earlier talk with the user:\n$(a.earlierConversation)") + context = replace(context, "{env state}" => "") + context = replace(context, "{longterm memory}" => "") + + prompt = replace(prompt, "{context}" => context) + + prompt = replace(prompt, "{usermsg}" => "Stimulus: $usermsg") + + return prompt +end + +#WORKING mark only attemp, step +function planner_mistral_openorca(a::agentReflex) + """ + general prompt format: + + " + <|im_start|>system + {role} + {tools} + {thinkingFormat} + <|im_end|> + {context} + <|im_start|>user + {usermsg} + <|im_end|> + <|im_start|>assistant + + " + + Note: + {context} = + " + {earlierConversation} + {env state} + {shortterm memory} + {longterm memory} + " + """ + + prompt = + """ + <|im_start|>system + {role} + {roleSpecificKnowledge} + {tools} + {thinkingFormat} + {context} + <|im_end|> + <|im_start|>user + {usermsg} + <|im_end|> + <|im_start|>assistant + Plan: + """ + prompt = replace(prompt, "{role}" => a.roles[a.role]) + prompt = replace(prompt, "{thinkingFormat}" => a.thinkingFormat[:planner]) + roleSpecificKnowledge = + """ + Info you need from the user to be able to help them selecting their best wine: + - type of food + - occasion + - user's personal taste of wine + - wine price range + - temperature at the serving location + - wines we have in stock + You job is to provide a personalized recommendation of up to two wines based on the user's info above, and you describe the benefits of each wine in detail. + """ + prompt = replace(prompt, "{roleSpecificKnowledge}" => roleSpecificKnowledge) + toolnames = "" + toollines = "" + for (toolname, v) in a.tools + toolline = "$toolname: $(v[:description]) $(v[:input]) $(v[:output])\n" + toollines *= toolline + toolnames *= "$toolname," + end + + prompt = replace(prompt, "{toolnames}" => toolnames) + prompt = replace(prompt, "{tools}" => "You have access to the following tools:\n$toollines") + + context = + """ + {earlierConversation} + {env state} + {longterm memory} + """ + context = replace(context, "{earlierConversation}" => "My earlier talk with the user:\n$(a.earlierConversation)") + context = replace(context, "{env state}" => "") + context = replace(context, "{longterm memory}" => "") + + prompt = replace(prompt, "{context}" => context) + + # initialize short term memory + prompt = replace(prompt, "{usermsg}" => "Stimulus: $(a.memory[:shortterm]["user:"])") + + return prompt +end + + +function actor_mistral_openorca(a::agentReflex) + """ + general prompt format: + + " + <|im_start|>system + {role} + {tools} + {thinkingFormat} + <|im_end|> + {context} + <|im_start|>user + {usermsg} + <|im_end|> + <|im_start|>assistant + + " + + Note: + {context} = + " + {earlierConversation} + {env state} + {shortterm memory} + {longterm memory} + " + """ + + mark = "$(a.step)" + + prompt = + """ + <|im_start|>system + {role} + {tools} + {thinkingFormat} + {context} + <|im_end|> + {shorttermMemory} + Thought $(a.step): + """ + + prompt = replace(prompt, "{role}" => a.roles[a.role]) + prompt = replace(prompt, "{thinkingFormat}" => a.thinkingFormat[:actor]) + prompt = replace(prompt, "{step}" => a.step) + + + s = "" + for (k, v) in a.memory[:shortterm] + if k ∉ ["user:", "Plan 1:"] + s1 = "$k $v" + s *= s1 + if s[end] != "\n" + s *= "\n" + end + end + end + prompt = replace(prompt, "{shorttermMemory}" => s) + + toolnames = "" + toollines = "" + for (toolname, v) in a.tools + toolline = "$toolname: $(v[:description]) $(v[:input]) $(v[:output])\n" + toollines *= toolline + toolnames *= "$toolname, " + end + prompt = replace(prompt, "{toolnames}" => toolnames) + prompt = replace(prompt, "{tools}" => "You have access to the following tools:\n$toollines") + + context = + """ + {env state} + {longterm memory} + {plan} + """ + context = replace(context, "{earlierConversation}" => "My earlier talk with the user:\n$(a.earlierConversation)") + context = replace(context, "{env state}" => "") + context = replace(context, "{longterm memory}" => "") + mark = "$(a.attempt)" + context = replace(context, "{plan}" => "My plan:\n$(a.memory[:shortterm]["Plan $mark:"])") + prompt = replace(prompt, "{context}" => context) + + return prompt +end + +""" + Chat with llm. + + ```jldoctest + julia> using JSON3, UUIDs, Dates, FileIO, CommUtils, ChatAgent + julia> mqttClientSpec = ( + clientName= "someclient", # name of this client + clientID= "$(uuid4())", + broker= "mqtt.yiem.ai", + pubtopic= (imgAI="img/api/v0.0.1/gpu/request", + txtAI="txt/api/v0.1.0/gpu/request"), + subtopic= (imgAI="agent/api/v0.1.0/img/respond", + txtAI="agent/api/v0.1.0/txt/respond"), + keepalive= 30, + ) + julia> msgMeta = Dict( + :msgPurpose=> "updateStatus", + :from=> "agent", + :to=> "llmAI", + :requestrespond=> "request", + :sendto=> "", # destination topic + :replyTo=> "agent/api/v0.1.0/txt/respond", # requester ask responder to send reply to this topic + :repondToMsgId=> "", # responder is responding to this msg id + :taskstatus=> "", # "complete", "fail", "waiting" or other status + :timestamp=> Dates.now(), + :msgId=> "$(uuid4())", + ) + julia> newAgent = ChatAgent.agentReact( + "Jene", + mqttClientSpec, + role=:assistant_react, + msgMeta=msgMeta + ) + julia> respond = ChatAgent.conversation(newAgent, "Hi! how are you?") + ``` +""" +function conversation(a::T, usermsg::String) where {T<:agent} + respond = nothing + + if a.thought != "nothing" # continue thought + _ = addNewMessage(a, "user", usermsg) + a.thought *= "Obs $(a.thinkinground): $usermsg\n" + prompt = a.thought + respond = work(a, prompt) + else # new thought + thinkingmode = chooseThinkingMode(a, usermsg) + @show thinkingmode + if thinkingmode == :no_thinking + a.context = conversationSummary(a) #TODO should be long conversation before use summary because it leaves out details + _ = addNewMessage(a, "user", usermsg) + prompt = generatePrompt_mistral_openorca(a, usermsg, thinkingmode) + @show prompt + respond = sendReceivePrompt(a, prompt) + respond = split(respond, "<|im_end|>")[1] + respond = replace(respond, "\n" => "") + _ = addNewMessage(a, "assistant", respond) + @show respond + elseif thinkingmode == :thinking + a.context = conversationSummary(a) + _ = addNewMessage(a, "user", usermsg) + prompt = generatePrompt_mistral_openorca(a, usermsg, thinkingmode) + respond = work(a, prompt) + else + error("undefined condition thinkingmode = $thinkingmode $(@__LINE__)") + end + end + + return respond +end + +""" + Continuously run llm functions except when llm is getting Answer: or chatbox. + There are many work() depend on thinking mode. +""" +function work(a::T, prompt::String, maxround::Int=3) where {T<:agent} + respond = nothing + while true + a.thinkinground += 1 + @show a.thinkinground + toolname = nothing + toolinput = nothing + + if a.thinkinground > a.thinkingroundlimit + a.thought *= "Thought $(a.thinkinground): I think I know the answer." + prompt = a.thought + end + + @show prompt + respond = sendReceivePrompt(a, prompt) + + headerToDetect = nothing + if a.thinkinground == 1 + try + respond = split(respond, "Obs:")[1] + headerToDetect = ["Question:", "Plan:", "Thought:", "Act:", "ActInput:", "Obs:", "...", "Answer:", + "Conclusion:", "Summary:"] + catch + end + else + try + respond = split(respond, "Obs $(a.thinkinground):")[1] + headerToDetect = ["Question $(a.thinkinground):", "Plan $(a.thinkinground):", + "Thought $(a.thinkinground):", "Act $(a.thinkinground):", + "ActInput $(a.thinkinground):", "Obs $(a.thinkinground):", + "...", "Answer:", + "Conclusion:", "Summary:"] + catch + end + end + @show respond + headers = detectCharacters(respond, headerToDetect) + chunkedtext = chunktext(respond, headers) + + Answer = findDetectedCharacter(headers, "Answer:") + AnswerInd = length(Answer) != 0 ? Answer[1] : nothing + Act = findDetectedCharacter(headers, "Act $(a.thinkinground):") + if length(Answer) == 1 && length(Act) == 0 + a.thought = "nothing" # assignment finished, no more thought + a.context = "nothing" + a.thinkinground = 0 + respond = chunkedtext[AnswerInd][:body] + respond = replace(respond, "<|im_end|>"=>"") + _ = addNewMessage(a, "assistant", respond) + break + else + + # check for tool being called + ActHeader = a.thinkinground == 1 ? "Act:" : "Act $(a.thinkinground):" + if length(findDetectedCharacter(headers, ActHeader)) != 0 # check whether there is Act: in a respond + ActInd = findDetectedCharacter(headers, ActHeader)[1] + toolname = toolNameBeingCalled(chunkedtext[ActInd][:body], a.tools) + end + ActInputHeader = a.thinkinground == 1 ? "ActInput:" : "ActInput $(a.thinkinground):" + if length(findDetectedCharacter(headers, ActInputHeader)) != 0 # check whether there is ActInput: in a respond + ActInputInd = findDetectedCharacter(headers, ActInputHeader)[1] + toolinput = chunkedtext[ActInputInd][:body] + end + + # clean up + if occursin(" \"", toolinput) + toolinput = GeneralUtils.getStringBetweenCharacters(toolinput, " \"", "\"\n") + else + toolinput = GeneralUtils.getStringBetweenCharacters(toolinput, " ", "\n") + end + @show toolname + @show toolinput + if toolname === nothing || toolinput === nothing + println("toolname $toolname toolinput $toolinput retry thinking") + a.thinkinground -= 1 + continue + end + + if a.thought == "nothing" + thought = "" + for i in chunkedtext + header = i[:header] + header = replace(header, ":"=>" $(a.thinkinground):") # add number so that llm not confused + body = i[:body] + thought *= "$header $body" + end + a.thought = prompt * thought + else + a.thought *= respond + end + + + if toolname == "chatbox" # chat with user + a.thought *= toolinput + respond = toolinput + _ = addNewMessage(a, "assistant", respond) + break + else # function call + f = a.tools[Symbol(toolname)][:func] + _result = f(toolinput) + if _result != "No info available." #TODO for use with wikisearch(). Not good for other tools + _result = makeSummary(a, _result) + end + result = "Obs $(a.thinkinground): $_result\n" + a.thought *= result + prompt = a.thought + end + end + end + @show respond + return respond +end + +function conversation(a::agentReflex, usermsg::String; attemptlimit::Int=3) + a.attemptlimit = attemptlimit + respond = nothing + + # determine thinking mode + a.thinkingmode = chooseThinkingMode(a, usermsg) + @show a.thinkingmode + + if a.thinkingmode == :no_thinking + a.earlierConversation = conversationSummary(a) #TODO should be long conversation before use summary because it leaves out details + _ = addNewMessage(a, "user", usermsg) + prompt = chat_mistral_openorca(a, usermsg) + println("") + @show prompt + respond = sendReceivePrompt(a, prompt) + respond = split(respond, "<|im_end|>")[1] + respond = replace(respond, "\n" => "") + _ = addNewMessage(a, "assistant", respond) + println("") + @show respond + else + respond = work(a, usermsg) + end + + return respond +end + + +function work(a::agentReflex, usermsg::String) + respond = nothing + mark_plan = "$(a.attempt)" + mark_actor = "$(a.step)" + + if a.thinkingmode == :new_thinking + a.earlierConversation = conversationSummary(a) + _ = addNewMessage(a, "user", usermsg) + a.memory[:shortterm]["user:"] = usermsg + a.memory[:log]["user:"] = usermsg + a.newplan = true + a.attempt = 1 + elseif a.thinkingmode == :continue_thinking #TODO + println("continue_thinking!!") + _ = addNewMessage(a, "user", usermsg) + a.memory[:shortterm]["Obs $mark_actor:"] = usermsg + a.step += 1 + a.memory[:log]["Obs $mark_actor:"] = usermsg + else + error("undefined condition thinkingmode = $thinkingmode $(@__LINE__)") + end + + while true # Work loop + # plan + @show a.attempt + @show usermsg + + if a.attempt <= a.attemptlimit + toolname = nothing + toolinput = nothing + if a.newplan == true + prompt_plan = planner_mistral_openorca(a) + println("") + @show prompt_plan + respond = sendReceivePrompt(a, prompt_plan, max_tokens=1024) + println("") + plan_raw = respond + @show plan_raw + # sometimes LLM add not-need word I don't want + plan = splittext(respond, ["<|im_end|>", "Response", "Execution", + "Result", "Recommendation", "My response"]) + # plan = replace(plan, "Plan:"=>"") + println("") + @show plan + + a.memory[:shortterm]["Plan $mark_plan:"] = plan + a.memory[:log]["Plan $mark_plan:"] = plan + a.step = 1 + a.newplan = false + end + + actorstate, msgToUser = actor(a) + + if actorstate == "chatbox" + respond = msgToUser + break + elseif actorstate == "all steps done" #WORKING add canceled during plan + println("all steps done") + + respond = formulateUserRespond(a) + error("10") + + a.memory[:shortterm]["Respond $mark_plan:"] = respond + a.memory[:log]["Respond $mark:"] = respond + + # evaluate. if score < 8/10 try again. + guideline = writeEvaluationGuideline(a, a.memory[:shortterm]["user:"]) + @show guideline + score = grading(a, guideline, respond) + @show score + if score >= 8 # good enough answer + @show a.memory[:shortterm] + a.memory[:shortterm] = OrderedDict{String, Any}() + a.memory[:log] = OrderedDict{String, Any}() + break + else # self evaluate and reflect then try again + analysis = analyze(a, a.memory[:shortterm]) + @show analysis + error(12) + lessonwithcontext = selfReflext(a, analysis) + @show lessonwithcontext + a.memory[:shortterm] = OrderedDict{String, Any}() + #TODO add lesson and context into longterm memory + headerToDetect = ["Lesson:", "Context:", ] + headers = detectCharacters(lessonwithcontext, headerToDetect) + chunkedtext = chunktext(lessonwithcontext, headers) + @show chunkedtext + a.memory[:longterm][chunkedtext["Context:"] => chunkedtext["Lesson:"]] + a.newplan = true + error("22222222") + + end + else + error("undefied condition, actorstate $actorstate $(@__LINE__)") + break + end + else #TODO attempt limit reached, force AI to answer + error("attempt limit reach") + break + end + a.attempt += 1 + end + + # good enough answer + + + # communicates with user + _ = addNewMessage(a, "assistant", respond) + return respond +end + + +function evaluate() + +end + + + +""" + Actor function. + + Args: + a, one of ChatAgent's agent. + plan, a step by step plan to respond + + Return: + case 1) if actor complete the plan successfully. + actorState = "all steps done" inidicates that all step in plan were done. + msgToUser = nothing. + case 2) if actor needs to talk to user for more context + actorState = "chatbox" + msgToUser = "message from assistant to user" + +""" +function actor(a::agentReflex) + actorState = nothing + msgToUser = nothing + + totalsteps = checkTotalStepInPlan(a) + + while true # Actor loop + + @show a.step + if a.step <= totalsteps + + prompt_actor = actor_mistral_openorca(a) + println("") + @show prompt_actor + respond = sendReceivePrompt(a, prompt_actor) + respond = splittext(respond, ["Obs", "<|im_end|>"]) + respond_actor_raw = respond + println("") + @show respond_actor_raw + if !occursin("Thought", respond) + respond = "Thought: " * respond + end + + headerToDetect = ["Question:", "Plan:", "Thought:", + "Act:", "ActInput:", "Obs:", "...", + "Answer:", "Conclusion:", "Summary:"] + + # replace headers with headers with correct attempt and step number + respond = replaceHeaders(respond, headerToDetect, a.step) + + headers = detectCharacters(respond, headerToDetect) + + respond_actor = respond + println("") + @show respond_actor + + mark_plan = "$(a.attempt)" + mark_actor = "$(a.step)" + headerToDetect = ["Plan $mark_plan:", + "Thought $mark_actor:", + "Act $mark_actor:", + "ActInput $mark_actor:", + "Obs $mark_actor:", + "Check $mark_actor:",] + headers = detectCharacters(respond, headerToDetect) + chunkedtext = chunktext(respond, headers) + @show chunkedtext + + # add to memory + a.memory[:shortterm] = addShortMem!(a.memory[:shortterm], chunkedtext) + + toolname = toolNameBeingCalled(chunkedtext["Act $mark_actor:"], a.tools) + toolinput = chunkedtext["ActInput $mark_actor:"] + @show toolname + @show toolinput + + if toolname == "chatbox" # chat with user + #TODO donot use chatbox to respond to user + respond = toolinput + msgToUser = respond + actorState = toolname + break + else # function call + f = a.tools[Symbol(toolname)][:func] + toolresult = f(a, toolinput) + @show toolresult + a.memory[:shortterm]["Obs $mark_actor:"] = toolresult + a.step += 1 + + go, reason = goNogo(a) + @show go + a.memory[:shortterm]["Check $mark_actor:"] = reason + if go == "No" # in case there is a cancel, go straight to evaluation + a.step -= 1 + error(113) + end + end + else #TODO finish all steps + actorState = "all steps done" + msgToUser = nothing + break + end + end + + return actorState, msgToUser +end + + + +""" Write evaluation guideline. + +Args: + a, one of ChatAgent's agent. + usermsg, stimulus e.g. question, task and etc. + +Return: + An evaluation guideline used to guage AI's work. + +# Example + +```jldoctest +julia> using ChatAgent, CommUtils +julia> agent = ChatAgent.agentReflex("Jene") +julia> usermsg = "What's AMD latest product?" +" +julia> evaluationGuideLine = writeEvaluationGuideline(agent, usermsg) +``` +""" +function writeEvaluationGuideline(a::agentReflex, usermsg::T) where {T<:AbstractString} + prompt = + """ + <|im_start|>system + You have access to the following tools: + chatbox: Useful for when you need to ask a customer for more context. Input should be a conversation to customer. + wikisearch: Useful for when you need to search an encyclopedia Input is keywords and not a question. + + Your work: + $usermsg + + Your job are: + 1. Write an evaluation guideline for your work in order to be able to evaluate your respond. + 2. An example of what the respond should be. + <|im_end|> + """ + + respond = sendReceivePrompt(a, prompt) + return respond +end + + + +""" Determine a score out of 10 according to evaluation guideline. + +Args: + a, one of ChatAgent's agent. + guidelines, an evaluation guideline. + shorttermMemory, a short term memory that logs what happened. + +Return: + A score out of 10 based on guideline. + +# Example + +```jldoctest +julia> using ChatAgent, CommUtils +julia> agent = ChatAgent.agentReflex("Jene") +julia> shorttermMemory = OrderedDict{String, Any}( + "user" => "What's the latest AMD GPU?", + "Plan 1:" => " To answer this question, I will need to search for the latest AMD GPU using the wikisearch tool.\n", + "Act 1:" => " wikisearch\n", + "ActInput 1:" => " amd gpu latest\n", + "Obs 1:" => "No info available for your search query.", + "Act 2:" => " wikisearch\n", + "ActInput 2:" => " amd graphics card latest\n", + "Obs 2:" => "No info available for your search query.") +julia> guideline = "\nEvaluation Guideline:\n1. Check if the user's question has been understood correctly.\n2. Evaluate the steps taken to provide the information requested by the user.\n3. Assess whether the correct tools were used for the task.\n4. Determine if the user's request was successfully fulfilled.\n5. Identify any potential improvements or alternative approaches that could be used in the future.\n\nThe respond should include:\n1. A clear understanding of the user's question.\n2. The steps taken to provide the information requested by the user.\n3. An evaluation of whether the correct tools were used for the task.\n4. A confirmation or explanation if the user's request was successfully fulfilled.\n5. Any potential improvements or alternative approaches that could be used in the future." +julia> score = grading(agent, guideline, shorttermMemory) +2 +``` +""" +function grading(a, guideline::T, text::T) where {T<:AbstractString} + prompt = + """ + <|im_start|>system + You have access to the following tools: + chatbox: Useful for when you need to ask a customer for more context. Input should be a conversation to customer. + wikisearch: Useful for when you need to search an encyclopedia Input is keywords and not a question. + + $guideline + + Your respond: $text + + You job are: + 1. Evaluate your respond using the evaluation guideline and an example respond. + 2. Give yourself a score out of 10 for your respond. + + Use the following format to answer: + {Evaluation} Score {}/10. + <|im_end|> + """ + println("prompt 11 ", prompt) + respond = sendReceivePrompt(a, prompt) + println("grading respond 11 = $respond") + _score = split(respond[end-5:end], "/")[1] + _score = split(_score, " ")[end] + score = parse(Int, _score) + return score +end + + + +""" Analize work. + +Args: + a, one of ChatAgent's agent. + +Return: + A report of analized work. + +# Example + +```jldoctest +julia> using ChatAgent, CommUtils +julia> agent = ChatAgent.agentReflex("Jene") +julia> shorttermMemory = OrderedDict{String, Any}( + "user:" => "What's the latest AMD GPU?", + "Plan 1:" => " To answer this question, I will need to search for the latest AMD GPU using the wikisearch tool.\n", + "Act 1:" => " wikisearch\n", + "ActInput 1:" => " amd gpu latest\n", + "Obs 1:" => "No info available for your search query.", + "Act 2:" => " wikisearch\n", + "ActInput 2:" => " amd graphics card latest\n", + "Obs 2:" => "No info available for your search query.") +julia> report = analyze(agent, shorttermMemory) +``` +""" +function analyze(a) + + + prompt = + """ + <|im_start|>system + You have access to the following tools: + chatbox: Useful for when you need to ask a customer for more context. Input should be a conversation to customer. + wikisearch: Useful for when you need to search an encyclopedia Input is keywords and not a question. + + Your work: + $shorttermMemory + + Do each of the following steps in detail to analize your work. + 1. What happened? + 2. List all relationships, each with cause and effect . + 3. Look at each relationship, figure out why it behaved that way. + 4. What could you do to improve the respond? + <|im_end|> + """ + + respond = sendReceivePrompt(a, prompt, max_tokens=2048) + return respond +end + + +""" Write a lesson drawn from evaluation. + +Args: + a, one of ChatAgent's agent. + report, a report resulted from analyzing shorttermMemory + +Return: + A lesson. + +# Example + +```jldoctest +julia> using ChatAgent, CommUtils +julia> agent = ChatAgent.agentReflex("Jene") +julia> report = + "What happened: I tried to search for AMD's latest product using the wikisearch tool, + but no information was available in the search results. + Cause and effect relationships: + 1. Searching \"AMD latest product\" -> No info available. + 2. Searching \"most recent product release\" -> No info available. + 3. Searching \"latest product\" -> No info available. + Analysis of each relationship: + 1. The search for \"AMD latest product\" did not provide any information because the wikisearch tool could not find relevant results for that query. + 2. The search for \"most recent product release\" also did not yield any results, indicating that there might be no recent product releases available or that the information is not accessible through the wikisearch tool. + 3. The search for \"latest product\" similarly resulted in no information being found, suggesting that either the latest product is not listed on the encyclopedia or it is not easily identifiable using the wikisearch tool. + Improvements: To improve the response, I could try searching for AMD's products on a different + source or search engine to find the most recent product release. Additionally, I could ask + the user for more context or clarify their question to better understand what they are + looking for." +julia> lesson = selfReflext(agent, report) +``` +""" +function selfReflext(a, analysis::T) where {T<:AbstractString} + prompt = + """ + <|im_start|>system + You have access to the following tools: + chatbox: Useful for when you need to ask a customer for more context. Input should be a conversation to customer. + wikisearch: Useful for when you need to search an encyclopedia Input is keywords and not a question. + + Your report: + $report + + Your job are: + 1. Lesson: what lesson could you learn from your report?. + 2. Context: what is the context this lesson could apply to? + <|im_end|> + """ + + respond = sendReceivePrompt(a, prompt, max_tokens=2048) + return respond +end + + +""" Formulate a respond from work for user's stimulus. + +Args: + a, one of ChatAgent's agent. + +Return: + A respond for user's stimulus. + +# Example +```jldoctest +julia> using ChatAgent, CommUtils +julia> agent = ChatAgent.agentReflex("Jene") +julia> shorttermMemory = OrderedDict{String, Any}( + "user:" => "What's the latest AMD GPU?", + "Plan 1:" => " To answer this question, I will need to search for the latest AMD GPU using the wikisearch tool.\n", + "Act 1:" => " wikisearch\n", + "ActInput 1:" => " amd gpu latest\n", + "Obs 1:" => "No info available for your search query.", + "Act 2:" => " wikisearch\n", + "ActInput 2:" => " amd graphics card latest\n", + "Obs 2:" => "No info available for your search query.") + +julia> report = formulateRespond(agent, shorttermMemory) +``` +""" +function formulateUserRespond(a) + stimulus = a.memory[:shortterm]["user:"] + + work = "" + for (k, v) in a.memory[:shortterm] + if k ∉ ["user:",] + work *= "$k, $v\n" + end + end + + prompt = + """ + <|im_start|>system + You have access to the following tools: + chatbox: Useful for when you need to ask a customer for more context. Input should be a conversation to customer. + wikisearch: Useful for when you need to search an encyclopedia Input is keywords and not a question. + + Symbol: + Stimulus: the input user gives to you and you must respond + Plan: a plan + Thought: your thought + Act: the action you took + ActInput: the input to the action + Obs: the result of the action + + Stimulus: + $stimulus + + Your work: + $work + + From your work, formulate a respond for user's stimulus. + <|im_end|> + Respond: + """ + respond = sendReceivePrompt(a, prompt) + return respond +end + + + +""" Determine whether LLM should go to next step. + +Args: + a, one of ChatAgent's agent. + +Return: + "Yes" or "no" decision to go next step. + +# Example +```jldoctest +julia> using ChatAgent, CommUtils +julia> agent = ChatAgent.agentReflex("Jene") +julia> shorttermMemory = OrderedDict{String, Any}( + "user:" => "What's the latest AMD GPU?", + "Plan 1:" => " To answer this question, I will need to search for the latest AMD GPU using the wikisearch tool.\n", + "Act 1:" => " wikisearch\n", + "ActInput 1:" => " amd gpu latest\n", + "Obs 1:" => "No info available for your search query.", + "Act 2:" => " wikisearch\n", + "ActInput 2:" => " amd graphics card latest\n", + "Obs 2:" => "No info available for your search query.") + +julia> decision = goNogo(agent) +"Yes" +``` +""" +function goNogo(a) + stimulus = a.memory[:shortterm]["user:"] + work = "" + for (k, v) in a.memory[:shortterm] + if k ∉ ["user:"] + work *= "$k $v" + end + end + + + prompt = + """ + <|im_start|>system + You have access to the following tools: + chatbox: Useful for when you need to ask a customer for more context. Input should be a conversation to customer. + wikisearch: Useful for when you need to search an encyclopedia Input is keywords and not a question. + + Symbol: + Stimulus: the input user gives to you and you must respond + Plan: a plan + Thought: your thought + Act: the action you took + ActInput: the input to the action + Obs: the result of the action + + Stimulus: + $stimulus + + Your work: + $work + + From your work, you job is to decide whether you're ready to do the next step in the plan by choosing one of the following choices: + If you are ready say, "{Yes}". And what is the rationale behind the decision? + If you are not ready say, "{No}". And what is the rationale behind the decision? + <|im_end|> + """ + + respond = sendReceivePrompt(a, prompt) + + decision = GeneralUtils.getStringBetweenCharacters(respond, "{", "}") + start = findfirst("}", respond)[end] +1 + reason = respond[start:end] + + return decision, reason +end + + + + + + + + + + + + + + + + + + + + + + + + + +end # module \ No newline at end of file diff --git a/previousVersion/0.0.4/src/llmfunction.jl b/previousVersion/0.0.4/src/llmfunction.jl new file mode 100644 index 0000000..4706f82 --- /dev/null +++ b/previousVersion/0.0.4/src/llmfunction.jl @@ -0,0 +1,146 @@ +module llmfunction + +export wikisearch, winestock + +using HTTP, JSON3 +using GeneralUtils +using ..type, ..utils +#------------------------------------------------------------------------------------------------100 + +""" + Search wikipedia. + + Args: + query (string): The query to search for + + Returns: + string: The search result text from wikipedia + ```jldoctest + julia> using HTTP, JSON3 + julia> result = wikisearch("AMD") + "Advanced Micro Devices, Inc., commonly abbreviated as AMD, is an ..." + ``` +""" +function wikisearch(a::agentReflex, phrase::T) where {T<:AbstractString} + phrase = phrase[1] == " " ? phrase[2:end] : phrase + # prepare input phrase + if occursin("\"", phrase) + phrase = GeneralUtils.getStringBetweenCharacters(phrase, "\"", "\"") + end + phrase = replace(phrase, "\n"=>"") + + url = "https://en.wikipedia.org/w/api.php?action=query&format=json&prop=extracts&titles=$(replace(phrase, " " => "%20"))&exintro=1&explaintext=1" + @show url + response = HTTP.get(url) + json_data = JSON3.read(String(response.body)) + page_id = first(keys(json_data["query"]["pages"])) + if page_id == "-1" + return "Sorry, I couldn't find any Wikipedia page for the given phrase." + end + + result = nothing + try + result = json_data["query"]["pages"][page_id]["extract"] + wiki = result + @show wiki + catch + result = "No info available for your search query." + end + + # if result == "" + # result = "No info available for your search query." + # else + # result = makeSummary(a, result) + # end + + return result +end + + + + +function winestock(a::agentReflex, phrase::T) where {T<:AbstractString} + # result = [ + # Dict( + # "name" => "Louis Latou - Corton-Charlamagne - Chardonnay", + # "description" => "Corton-Charlemagne 2018 is a powerful, complex wine. Its nose is intense, with notes of white stone fruits such as white peach, fresh hazelnut, vanilla, and almond paste. The wine is full-bodied for the palate, and the vanilla is complemented by aromas of fresh almond and lime blossom. The experience ends with a very fine aromatic aftertaste that has subtle saline notes.", + # "price" => "49", + # "ID" => "ws-114" + # ), + # Dict( + # "name" => "Louis Latou - Corton-Charlamagne - Chardonnay", + # "description" => "Corton-Charlemagne 2018 is a powerful, complex wine. Its nose is intense, with notes of white stone fruits such as white peach, fresh hazelnut, vanilla, and almond paste. The wine is full-bodied for the palate, and the vanilla is complemented by aromas of fresh almond and lime blossom. The experience ends with a very fine aromatic aftertaste that has subtle saline notes.", + # "price" => "49", + # "ID" => "ws-114" + # ) + # ] + + result = + """ + 1. Name: Louis Latou - Corton-Charlamagne - Chardonnay, + Description: Corton-Charlemagne 2018 is a powerful, complex wine. Its nose is intense, with notes of white stone fruits such as white peach, fresh hazelnut, vanilla, and almond paste. The wine is full-bodied for the palate, and the vanilla is complemented by aromas of fresh almond and lime blossom. The experience ends with a very fine aromatic aftertaste that has subtle saline notes., + Price: 49 dollars, + ID: ws-114 + 2. Name: Chateau de Beaucastel Hommage Jacques Perrin Chateauneuf-du-Pape, + Year: 2019, + Description: The quintessence of Château de Beaucastel, Hommage à Jacques Perrin delights us every year, and the 2019 vintage is no exception. To the eye it offers a splendid deep red color, verging on black. Full of power and supremely elegant, the nose is of magnificent aromatic complexity with notes of black fruit and spices that offer all the characteristic expression of Mourvèdre. Perfectly balanced by an incredible freshness, the mouth is eminently elegant with intense and complex aromas of great subtlety, a full, refined texture, subtle tannins of great finesse, and infinite length. A great classic Hommage à Jacques Perrin., + Price: 42, + ID: ed-23 + 3. Name: M. Chapoutier Ermitage l'Ermite Blanc, + Year: 2017 + Description: Brilliant pale yellow. Complex aromas of vanilla, almonds, dried fruits and linden-tree. The mineraliaty is marked (typical of soil). Very round and rich wine. An elegant balance, of very ripe white fruit aromas (peach and apricot) and light notes of minerality. Beautiful length and complexity., + Price: 13, + ID: wwr-259 + """ + return result +end + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +end # end module \ No newline at end of file diff --git a/previousVersion/0.0.4/src/type.jl b/previousVersion/0.0.4/src/type.jl new file mode 100644 index 0000000..ffd1691 --- /dev/null +++ b/previousVersion/0.0.4/src/type.jl @@ -0,0 +1,376 @@ +module type + +export agent, agentReflex + +using Dates, UUIDs, DataStructures +using CommUtils + +#------------------------------------------------------------------------------------------------100 + + +abstract type agent end + + +@kwdef mutable struct agentReflex <: agent + availableRole::AbstractVector = ["system", "user", "assistant"] + agentName::String = "Jene" # ex. Jene + maxUserMsg::Int = 10 + earlierConversation::String = "" # summary of earlier conversation + mqttClient::Union{mqttClient, Nothing} = nothing + msgMeta::Union{Dict, Nothing} = nothing + + """ Dict(Role=> Content) ; Role can be system, user, assistant + Example: + messages=[ + Dict(:role=>"system", :content=> "You are a helpful assistant."), + Dict(:role=>"assistant", :content=> "How may I help you"), + Dict(:role=>"user", :content=> "Hello, how are you"), + ] + """ + role::Symbol = :assistant + roles::Dict = Dict(:assistant => "You are a helpful assistant.",) + + # Ref: Chat prompt format https://huggingface.co/TheBloke/Llama-2-7B-Chat-GGML/discussions/3 + # messages= [Dict(:role=>"system", :content=> "", :timestamp=> Dates.now()),] + messages = Vector{Dict{Symbol, Any}}() + tools::Union{Dict, Nothing} = nothing + newplan::Bool = false # if true, new plan will be generated + attemptlimit::Int = 5 # thinking round limit + attempt::Int = 1 # attempted number + step::Int = 1 # step number + thinkingmode::Symbol = :no_thinking + thinkingFormat::Union{Dict, Nothing} = nothing + memory::Dict = Dict( + :shortterm=> OrderedDict{String, Any}(), + :longterm=> OrderedDict{String, Any}(), + :log=> OrderedDict{String, Any}(), # span from user stimulus -> multiples attempts -> final respond + ) +end + +function agentReflex( + agentName::String; + mqttClientSpec::NamedTuple=( + clientName= "someclient", # name of this client + clientID= "$(uuid4())", + broker= "mqtt.yiem.ai", + pubtopic= (imgAI="img/api/v0.0.1/gpu/request", + txtAI="txt/api/v0.1.0/gpu/request"), + subtopic= (imgAI="agent/api/v0.1.0/img/respond", + txtAI="agent/api/v0.1.0/txt/respond"), + keepalive= 30,), + role::Symbol=:assistant, + roles::Dict=Dict( + :assistant => + """ + You are a helpful assistant who respond to a stimulus as best you can. + """, + :sommelier => + """ + You are a sommelier at an online wine reseller who always help users choosing their wine from your inventory. + You don't know other people personal info previously. + """, + # :sommelier => + # """ + # You are a sommelier at an online wine reseller who always ask user for wine relevant info before you could help them choosing wine. + # You provide a personalized recommendation of up to two wines based on the user's preference, and you describe the benefits of each wine in detail. + # You don't know other people personal info previously. + + # Info used to select wine: + # - type of food + # - occasion + # - user's personal taste of wine + # - wine price range + # - temperature at the serving location + # - wine we have in stock + # """, + ), + thinkingFormat::Dict=Dict( + :react=> + """ + Use the following format: + Question: the input question your user is asking and you must answer + Plan: first you should always think about the question and the info you have thoroughly then extract and devise a complete plan to find the answer (pay attention to variables and their corresponding numerals). + Thought: ask yourself do you have all the info you need? And what to do according to the plan (pay attention to correct numeral calculation and commonsense). + Act: the tool that match your thought, should be one of {toolnames} + ActInput: the input to the action (pay attention to the tool's input) + Obs: the result of the action + ... (this Plan/Thought/Act/ActInput/Obs can repeat N times until you know the answer.) + Thought: I think I know the answer + Answer: Answer of the original question + + Begin!""", + :planner=> + """ + Use the following format: + Stimulus: the input user gives to you and you must respond + Plan: first you should always think about the stimulus, the info you need and the info you have thoroughly then extract and devise a step by step plan (pay attention to correct numeral calculation and commonsense). + p.s.1 each step should be a single question/action. + p.s.2 Do not respond yet. + """, + :actor=> + """ + Use the following format: + Thought: ask yourself do you have what you need and what to do according to step {step} of the plan (pay attention to correct numeral calculation and commonsense). + Act: the action to take that match your thought, should be one of [{toolnames}] + ActInput: the input to the action (pay attention to the tool's input) + Obs: the result of the action + Check: check whether you are ready for the next step of the plan + """, + ), + tools::Dict=Dict( + :chatbox=>Dict( + :name => "chatbox", + :description => "Useful for when you need to communicate with the user.", + :input => "Input should be a conversation to the user.", + :output => "" , + :func => nothing, + ), + # :wikisearch=>Dict( + # :name => "wikisearch", + # :description => "Useful for when you need to search an encyclopedia", + # :input => "Input is keywords and not a question.", + # :output => "", + # :func => wikisearch, # put function here + # ), + # :wineStock=>Dict( + # :name => "wineStock", + # :description => "useful for when you need to search for wine by your description, price, name or ID.", + # :input => "Input should be a search query with as much details as possible.", + # :output => "" , + # :func => nothing, + # ), + # :NTHING=>Dict( + # :name => "NTHING", + # :description => "useful for when you don't need to use tools or actions", + # :input => "No input is needed", + # :output => "" , + # :func => nothing, + # ), + ), + msgMeta::Dict=Dict( + :msgPurpose=> "updateStatus", + :from=> "chatbothub", + :to=> "llmAI", + :requestrespond=> "request", + :sendto=> "", # destination topic + :replyTo=> "agent/api/v0.1.0/txt/respond", # requester ask responder to send reply to this topic + :repondToMsgId=> "", # responder is responding to this msg id + :taskstatus=> "", # "complete", "fail", "waiting" or other status + :timestamp=> Dates.now(), + :msgId=> "$(uuid4())", + ), + availableRole::AbstractArray=["system", "user", "assistant"], + maxUserMsg::Int=10,) + + newAgent = agentReflex() + newAgent.availableRole = availableRole + newAgent.maxUserMsg = maxUserMsg + newAgent.mqttClient = CommUtils.mqttClient(mqttClientSpec) + newAgent.msgMeta = msgMeta + newAgent.tools = tools + newAgent.role = role + newAgent.roles = roles + newAgent.thinkingFormat = thinkingFormat + newAgent.agentName = agentName + + return newAgent +end + + + + + + +@kwdef mutable struct agentReact <: agent + availableRole::AbstractVector = ["system", "user", "assistant"] + agentName::String = "assistant" + maxUserMsg::Int = 10 + earlierConversation::String = "" # summary of earlier conversation + mqttClient::Union{mqttClient, Nothing} = nothing + msgMeta::Union{Dict, Nothing} = nothing + + """ Dict(Role=> Content) ; Role can be system, user, assistant + Example: + messages=[ + Dict(:role=>"system", :content=> "You are a helpful assistant."), + Dict(:role=>"assistant", :content=> "How may I help you"), + Dict(:role=>"user", :content=> "Hello, how are you"), + ] + """ + role::Symbol = :assistant + roles::Dict = Dict(:assistant => "You are a helpful assistant.",) + + # Ref: Chat prompt format https://huggingface.co/TheBloke/Llama-2-7B-Chat-GGML/discussions/3 + # messages= [Dict(:role=>"system", :content=> "", :timestamp=> Dates.now()),] + messages = Vector{Dict{Symbol, Any}}() + context::String = "nothing" # internal thinking area + tools::Union{Dict, Nothing} = nothing + thought::String = "nothing" # contain unfinished thoughts for ReAct agent only + thinkinground::Int = 0 # no. of thinking round + thinkingroundlimit::Int = 5 # thinking round limit + thinkingMode::Union{Dict, Nothing} = nothing +end + +function agentReact( + agentName::String, + mqttClientSpec::NamedTuple; + role::Symbol=:assistant, + roles::Dict=Dict( + :assistant => + """ + You are a helpful assistant who answer the user's questions as best you can. + """, + :sommelier => + """ + You are a helpful sommelier at an online wine reseller who always ask user for wine relevant info before you could help them choosing wine. + You provide a personalized recommendation of up to two wines based on the user's preference, and you describe the benefits of each wine in detail. + You don't know other people personal info previously. + + Info used to select wine: + - type of food + - occasion + - user's personal taste of wine + - wine price range + - temperature at the serving location + - wine we have in stock + """, + ), + thinkingMode::Dict=Dict( + :no_thinking=> "", + :thinking=> + """Use the following format: + Question: the input question your user is asking and you must answer + Plan: first you should always think about the question and the info you have thoroughly then extract and devise a complete plan to find the answer (pay attention to variables and their corresponding numerals). + Thought: ask yourself do you have all the info you need? And what to do according to the plan (pay attention to correct numeral calculation and commonsense). + Act: the tool that match your thought, should be one of {toolnames} + ActInput: the input to the action (pay attention to the tool's input) + Obs: the result of the action + ... (this Plan/Thought/Act/ActInput/Obs can repeat N times until you know the answer.) + Thought: I think I know the answer + Answer: Answer of the original question + + Begin!""", + ), + tools::Dict=Dict( + :wikisearch=>Dict( + :name => "wikisearch", + :description => "Useful for when you need to search an encyclopedia", + :input => "Input is keywords and not a question.", + :output => "", + :func => wikisearch, # put function here + ), + :chatbox=>Dict( + :name => "chatbox", + :description => "Useful for when you need to ask a customer for more context.", + :input => "Input should be a conversation to customer.", + :output => "" , + :func => nothing, + ), + # :wineStock=>Dict( + # :name => "wineStock", + # :description => "useful for when you need to search for wine by your description, price, name or ID.", + # :input => "Input should be a search query with as much details as possible.", + # :output => "" , + # :func => nothing, + # ), + # :NTHING=>Dict( + # :name => "NTHING", + # :description => "useful for when you don't need to use tools or actions", + # :input => "No input is needed", + # :output => "" , + # :func => nothing, + # ), + ), + msgMeta::Dict=Dict( + :msgPurpose=> "updateStatus", + :from=> "chatbothub", + :to=> "llmAI", + :requestrespond=> "request", + :sendto=> "", # destination topic + :replyTo=> "chatbothub/llm/respond", # requester ask responder to send reply to this topic + :repondToMsgId=> "", # responder is responding to this msg id + :taskstatus=> "", # "complete", "fail", "waiting" or other status + :timestamp=> Dates.now(), + :msgId=> "$(uuid4())", + ), + availableRole::AbstractArray=["system", "user", "assistant"], + maxUserMsg::Int=10,) + + newAgent = agentReact() + newAgent.availableRole = availableRole + newAgent.maxUserMsg = maxUserMsg + newAgent.mqttClient = CommUtils.mqttClient(mqttClientSpec) + newAgent.msgMeta = msgMeta + newAgent.tools = tools + newAgent.role = role + newAgent.roles = roles + newAgent.thinkingMode = thinkingMode + + return newAgent +end + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +end # end module + + + diff --git a/previousVersion/0.0.4/src/utils.jl b/previousVersion/0.0.4/src/utils.jl new file mode 100644 index 0000000..a399315 --- /dev/null +++ b/previousVersion/0.0.4/src/utils.jl @@ -0,0 +1,666 @@ +module utils + +export makeSummary, sendReceivePrompt, chunktext, extractStepFromPlan, checkTotalStepInPlan, + detectCharacters, findDetectedCharacter, extract_number, toolNameBeingCalled, + chooseThinkingMode, conversationSummary, checkReasonableness, replaceHeaders, + addShortMem!, splittext + +using UUIDs, Dates, DataStructures +using CommUtils, GeneralUtils +using ..type + +#------------------------------------------------------------------------------------------------100 + +function makeSummary(a::T1, input::T2) where {T1<:agent, T2<:AbstractString} + summary = "Nothing." + prompt = + """ + <|im_start|>system + Input text: + $input + + Your job is to determine now whether you can make a summary of the input text by choosing one of following choices: + If you cannot make a summary say, "{No}". + If you can make a summary say, "{Yes}". + <|im_end|> + """ + prompt = replace(prompt, "{input}" => input) + result = sendReceivePrompt(a, prompt) + result = GeneralUtils.getStringBetweenCharacters(result, "{", "}") + if result == "Yes" # seperate summary part + prompt = + """ + <|im_start|>system + Input text: + $input + Your job is to make a concise summary of the input text. + <|im_end|> + """ + result = sendReceivePrompt(a, prompt) + if result[1:1] == "\n" + summary = result[2:end] + else + summary = result + end + end + input_summary = input + @show input_summary + @show summary + + return summary +end + + + + + +""" + Send a msg to registered mqtt topic within mqttClient. + + ```jldoctest + julia> using JSON3, UUIDs, Dates, FileIO, CommUtils, ChatAgent + julia> mqttClientSpec = ( + clientName= "someclient", # name of this client + clientID= "$(uuid4())", + broker= "mqtt.yiem.ai", + pubtopic= (imgAI="img/api/v0.0.1/gpu/request", + txtAI="txt/api/v0.1.0/gpu/request"), + subtopic= (imgAI="agent/api/v0.1.0/img/respond", + txtAI="agent/api/v0.1.0/txt/respond"), + keepalive= 30, + ) + julia> msgMeta = Dict( + :msgPurpose=> "updateStatus", + :from=> "agent", + :to=> "llmAI", + :requestrespond=> "request", + :sendto=> "", # destination topic + :replyTo=> "agent/api/v0.1.0/txt/respond", # requester ask responder to send reply to this topic + :repondToMsgId=> "", # responder is responding to this msg id + :taskstatus=> "", # "complete", "fail", "waiting" or other status + :timestamp=> Dates.now(), + :msgId=> "$(uuid4())", + ) + julia> newAgent = ChatAgent.agentReact( + "Jene", + mqttClientSpec, + role=:assistant_react, + msgMeta=msgMeta + ) + ``` +""" +function sendReceivePrompt(a::T, prompt::String; max_tokens=256, timeout::Int=120) where {T<:agent} + a.msgMeta[:msgId] = "$(uuid4())" # new msg id for each msg + msg = Dict( + :msgMeta=> a.msgMeta, + :txt=> prompt, + :max_tokens=>max_tokens + ) + payloadChannel = Channel(1) + + # send prompt + CommUtils.request(a.mqttClient, msg) + starttime = Dates.now() + result = nothing + + while true + timepass = (Dates.now() - starttime).value / 1000.0 + CommUtils.mqttRun(a.mqttClient, payloadChannel) + if isready(payloadChannel) + topic, payload = take!(payloadChannel) + if payload[:msgMeta][:repondToMsgId] == msg[:msgMeta][:msgId] + result = haskey(payload, :txt) ? payload[:txt] : nothing + break + end + elseif timepass <= timeout + # skip, within waiting period + elseif timepass > timeout + println("sendReceivePrompt timeout $timepass/$timeout") + result = nothing + break + else + error("undefined condition. timepass=$timepass timeout=$timeout $(@__LINE__)") + end + end + + return result +end + + + +""" + Detect given characters. Output is a list of named tuple of detected char. + + ```jldoctest + julia> text = "I like to eat apples and use utensils." + julia> characters = ["eat", "use", "i"] + julia> result = detectCharacters(text, characters) + 4-element Vector{Any}: + (char = "i", start = 4, stop = 4) + (char = "eat", start = 11, stop = 13) + (char = "use", start = 26, stop = 28) + (char = "i", start = 35, stop = 35) + ``` +""" +function detectCharacters(text::T1, characters::Vector{T2}) where {T1<:AbstractString, T2<:AbstractString} + result = [] + for i in eachindex(text) + for char in characters + l = length(char) + char_startInd = i + char_endInd = i+l-1 # -1 because Julia use inclusive index + + if char_endInd > length(text) + # skip + else + try # some time StringIndexError: invalid index [535], valid nearby indices [534]=>'é', [536]=>' ' + if text[char_startInd: char_endInd] == char + push!(result, (char=char, start=char_startInd, stop=char_endInd)) + end + catch + end + end + end + end + + return result +end + + + +""" + Chunk a text into smaller pieces by header. + ```jldoctest + julia> using ChatAgent + julia> text = "Plan: First, we need to find out what kind of wine the user wants." + julia> headers = ChatAgent.detectCharacters(text, ["Nope", "sick", "First", "user", "Then", ]) + 3-element Vector{Any}: + (char = "First", start = 7, stop = 11) + (char = "user", start = 56, stop = 59) + (char = "Then", start = 102, stop = 105) + julia> chunkedtext = ChatAgent.chunktext(text, headers) + OrderedDict{String, String} with 3 entries: + "Act 1:" => " wikisearch" + "ActInput 1:" => " latest AMD GPU" + "Thought 1:" => " I should always think about..." + ``` +""" +function chunktext(text::T1, headers::T2) where {T1<:AbstractString, T2<:AbstractVector} + result = OrderedDict{String, Any}() + + for (i, v) in enumerate(headers) + if i < length(headers) + nextheader = headers[i+1] + body = text[v[:stop]+1: nextheader[:start]-1] + # push!(result, (header=v[:char], body=body)) + result[v[:char]] = body + else + body = text[v[:stop]+1: end] + # push!(result, (header=v[:char], body=body)) + result[v[:char]] = body + end + end + + return result +end + + +function extractStepFromPlan(a::agent, plan::T, step::Int) where {T<:AbstractString} + prompt = + """ + <|im_start|>system + You are a helpful assistant. + Your job is to extract step $step in the user plan. + + Use the following format only: + {copy the step and put it here} + + <|im_end|> + + <|im_start|>user + $plan + <|im_end|> + <|im_start|>assistant + + """ + + respond = sendReceivePrompt(a, prompt) + + return respond +end + +function checkTotalStepInPlan(a::agent) + headers = [] + + for (k, v) in a.memory[:shortterm] + push!(headers, k) + end + + # Plan will have number e.g. Plan 3: so I need a way to detect latest Plan + header = nothing + for i in reverse(headers) + if occursin("Plan", i) + header = i + break + end + end + + p = a.memory[:shortterm][header] + plan = "Plan: $p" + prompt = + """ + <|im_start|>system + You are a helpful assistant. + Your job is to determine how many steps in a user plan. + + Use the following format to answer: + Total step number is {} + <|im_end|> + + <|im_start|>user + $plan + <|im_end|> + <|im_start|>assistant + + """ + respond = sendReceivePrompt(a, prompt) + result = extract_number(respond) + + return result +end + + +""" + Find a given character from a vector of named tuple. + Output is character location index inside detectedCharacters + + ```jldoctest + julia a = [ (char = "i", start = 4, stop = 4) + (char = "eat", start = 11, stop = 13) + (char = "use", start = 26, stop = 28) + (char = "i", start = 35, stop = 35) ] + julia> findDetectedCharacter(a, "i") + [1, 4] + ``` +""" +function findDetectedCharacter(detectedCharacters, character) + allchar = [i[1] for i in detectedCharacters] + return findall(isequal.(allchar, character)) +end + + + + + + +function extract_number(text::T) where {T<:AbstractString} + regex = r"\d+" # regular expression to match one or more digits + match = Base.match(regex, text) # find the first match in the text + if match !== nothing + number = parse(Int, match.match) + return number + else + error("No number found in the text $(@__LINE__)") + end +end + + + +""" + Extract toolname from text. + ```jldoctest + julia> text = " internetsearch\n" + julia> tools = Dict( + :internetsearch=>Dict( + :name => "internetsearch", + :description => "Useful for when you need to search the Internet", + :input => "Input should be a search query.", + :output => "", + # :func => internetsearch # function + ), + :chatbox=>Dict( + :name => "chatbox", + :description => "Useful for when you need to ask a customer what you need to know or to talk with them.", + :input => "Input should be a conversation to customer.", + :output => "" , + ), + ) + julia> toolname = toolNameBeingCalled(text, tools) + ``` +""" +function toolNameBeingCalled(text::T, tools::Dict) where {T<:AbstractString} + toolNameBeingCalled = nothing + for (k, v) in tools + toolname = String(k) + if contains(text, toolname) + toolNameBeingCalled = toolname + break + end + end + return toolNameBeingCalled +end + + + +function chooseThinkingMode(a::agentReflex, usermsg::String) + thinkingmode = nothing + if length(a.memory[:log]) != 0 + thinkingmode = :continue_thinking + else + prompt = + """ + <|im_start|>system + {systemMsg} + You always use tools if there is a chance to impove your respond. + You have access to the following tools: + {tools} + Your job is to determine whether you will use tools or actions to respond. + + Choose one of the following choices: + If you don't need to use tools or actions to respond to the stimulus say, "{no}". + If you need tools or actions to respond to the stimulus say, "{yes}". + If the user want to get wine say, "{yes}". + <|im_end|> + + <|im_start|>user + {input} + <|im_end|> + <|im_start|>assistant + + """ + toollines = "" + for (toolname, v) in a.tools + if toolname ∉ ["chatbox", "nothing"] + toolline = "$toolname: $(v[:description]) $(v[:input]) $(v[:output])\n" + toollines *= toolline + end + end + prompt = replace(prompt, "{systemMsg}" => a.roles[a.role]) + prompt = replace(prompt, "{tools}" => toollines) + prompt = replace(prompt, "{input}" => usermsg) + result = sendReceivePrompt(a, prompt) + willusetools = GeneralUtils.getStringBetweenCharacters(result, "{", "}") + thinkingmode = willusetools == "yes" ? :new_thinking : :no_thinking + end + + return thinkingmode +end + + + + + +""" + make a conversation summary. + ```jldoctest + julia> conversation = [ + Dict(:role=> "user", :content=> "I would like to get a bottle of wine", :timestamp=> Dates.now()), + Dict(:role=> "assistant", :content=> "What kind of Thai dishes are you having?", :timestamp=> Dates.now()), + Dict(:role=> "user", :content=> "It a pad thai.", :timestamp=> Dates.now()), + Dict(:role=> "assistant", :content=> "Is there any special occasion for this event?", :timestamp=> Dates.now()), + Dict(:role=> "user", :content=> "We'll hold a wedding party at the beach.", :timestamp=> Dates.now()), + Dict(:role=> "assistant", :content=> "What is your preferred type of wine?", :timestamp=> Dates.now()), + Dict(:role=> "user", :content=> "I like dry white wine with medium tanins.", :timestamp=> Dates.now()), + Dict(:role=> "assistant", :content=> "What is your preferred price range for this bottle of wine?", :timestamp=> Dates.now()), + Dict(:role=> "user", :content=> "lower than 50 dollars.", :timestamp=> Dates.now()), + Dict(:role=> "assistant", :content=> "Based on your preferences and our stock, I recommend the following two wines for you: + 1. Pierre Girardin \"Murgers des Dents de Chien\" - Saint-Aubin 1er Cru (17 USD) + 2. Etienne Sauzet'Les Perrieres' - Puligny Montrachet Premier Cru (22 USD) + The first wine, Pierre Girardin \"Murgers des Dents de Chien\" - Saint-Aubin 1er Cru, is a great choice for its affordable price and refreshing taste. + It pairs well with Thai dishes and will be perfect for your beach wedding party. + The second wine, Etienne Sauzet'Les Perrieres' - Puligny Montrachet Premier Cru, offers a more complex flavor profile and slightly higher price point, but still remains within your budget. + Both wines are suitable for serving at 22 C temperature.", :timestamp=> Dates.now()), + ] + + julia> summary = conversationSummary(conversation) + ``` +""" +function conversationSummary(a::T) where {T<:agent} + prompt = + """ + <|im_start|>system + You talked with a user earlier. + Now you make a detailed bullet summary of the conversation from your perspective. + You must refers to yourself by "I" in the summary. + + Here are the conversation: + {conversation} + <|im_end|> + + """ + conversation = "" + summary = "nothing" + if length(a.messages)!= 0 + for msg in a.messages + role = msg[:role] + content = msg[:content] + + if role == "user" + conversation *= "$role: $content\n" + elseif role == "assistant" + conversation *= "I: $content\n" + else + error("undefied condition role = $role $(@__LINE__)") + end + end + + prompt = replace(prompt, "{conversation}" => conversation) + result = sendReceivePrompt(a, prompt) + summary = result === nothing ? "nothing" : result + summary = split(summary, "<|im_end|>")[1] + if summary[1:1] == "\n" + summary = summary[2:end] + end + end + @show summary + return summary +end + + + +#TODO +function checkReasonableness(userMsg::String, context::String, tools) + # Ref: https://www.youtube.com/watch?v=XV4IBaZqbps + + prompt = + """ + <|im_start|>system + You are a helpful assistant. Your job is to check the reasonableness of user assignments. + If the user assignment can be answered given the tools available say, "This is a reasonable assignment". + If the user assignment cannot be answered then provide some feedback to the user that may improve + their assignment. + + Here is the context for the assignment: + {context} + + <|im_end|> + + <|im_start|>user + {assignment} + <|im_end|> + <|im_start|>assistant + + """ + + context = "You have access to the following tools: + WineStock: useful for when you need to find info about wine by matching your description, price, name or ID. Input should be a search query with as much details as possible." + prompt = replace(prompt, "{assignment}" => userMsg) + prompt = replace(prompt, "{context}" => context) + + output_py = llm( + prompt, + max_tokens=512, + temperature=0.1, + # top_p=top_p, + echo=false, + stop=["", "<>", ], + ) + _output_jl = pyconvert(Dict, output_py); + output = pyconvert(Dict, _output_jl["choices"][1]); + output["text"] + +end + + +""" Add chunked text to a short term memory of a chat agent + +Args: + shortMem = short memory of a chat agent, + chunkedtext = a dict contains text + +Return: no return + +# Example +```jldoctest +julia> chunkedtext = OrderedDict{String, String}( + "Thought 1:" => " I should always think about...", + "Act 1:" => " wikisearch", + "ActInput 1:" => " latest AMD GPU",) +julia> shortMem = OrderedDict{String, Any}() +julia> addShortMem!(shortMem, chunkedtext) +OrderedDict{String, Any} with 3 entries: +"Thought 1:" => " I should always think about..." +"Act 1:" => " wikisearch" +"ActInput 1:" => " latest AMD GPU" +``` +""" +function addShortMem!(shortMem::OrderedDict{String, Any}, chunkedtext::T) where {T<:AbstractDict} + for (k, v) in chunkedtext + shortMem[k] = v + end + return shortMem +end + + + + +""" Split text using all keywords in a list. Start spliting from rightmost of the text. + +Args: + text = a text you want to split + list = a list of keywords you want to split + +Return: + a leftmost text after split + +# Example +```jldoctest +julia> text = "Consider the type of food, occasion and temperature at the serving location." +julia> list = ["at", "and"] +"Consider the type of food, occasion " +``` +""" +function splittext(text, list) + newtext = text + for i in list + newtext = split(newtext, i)[1] + end + return newtext +end + + + +""" + Add step number to header in a text +""" +function addStepNumber(text::T, headers, step::Int) where {T<:AbstractString} + newtext = text + for i in headers + if occursin(i[:char], newtext) + new = replace(i[:char], ":"=> " $step:") + newtext = replace(newtext, i[:char]=>new ) + end + end + return newtext +end +function addStepNumber(text::T, headers, step::Int, substep::Int) where {T<:AbstractString} + newtext = text + for i in headers + if occursin(i[:char], newtext) + new = replace(i[:char], ":"=> " $step-$substep:") + newtext = replace(newtext, i[:char]=>new ) + end + end + return newtext +end + + +""" Add step number to header in a text + +Args: + text = a text you want to split + headers = a list of keywords you want to add step and substep to + +Return: + a leftmost text after split + +# Example +```jldoctest +julia> text = "Consider the type of food, occasion and temperature at the serving location." +julia> headers = ["Thought", "Act"] + +``` +""" +function replaceHeaders(text::T, headers, step::Int) where {T<:AbstractString} + newtext = text + for i in headers + header = i[1:end-1] # not include ":" + if occursin(header, newtext) + startind = findfirst(header, newtext)[1] + stopind = findnext(":", newtext, startind+1)[end] + word = newtext[startind: stopind] + newword = "$header $step:" + newtext = replace(newtext, word=> newword) + + end + end + + return newtext +end + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +end # end module \ No newline at end of file