diff --git a/.vscode/launch.json b/.vscode/launch.json
new file mode 100644
index 0000000..7f264b6
--- /dev/null
+++ b/.vscode/launch.json
@@ -0,0 +1,17 @@
+{
+ // Use IntelliSense to learn about possible attributes.
+ // Hover to view descriptions of existing attributes.
+ // For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387
+ "version": "0.2.0",
+ "configurations": [
+ {
+ "type": "julia",
+ "request": "launch",
+ "name": "Run active Julia file",
+ "program": "${file}",
+ "stopOnEntry": false,
+ "cwd": "${workspaceFolder}",
+ "juliaEnv": "${command:activeJuliaEnvironment}"
+ }
+ ]
+}
\ No newline at end of file
diff --git a/Manifest.toml b/Manifest.toml
index 4954cb4..66f1e59 100644
--- a/Manifest.toml
+++ b/Manifest.toml
@@ -1,8 +1,8 @@
# This file is machine-generated - editing it directly is not advised
-julia_version = "1.10.3"
+julia_version = "1.10.4"
manifest_format = "2.0"
-project_hash = "d5182042dab089bafa4f01ef385efd46c01a0396"
+project_hash = "42fe76ec8191cf95e51733bee474db0f4870d573"
[[deps.AliasTables]]
deps = ["PtrArrays", "Random"]
@@ -21,9 +21,20 @@ uuid = "56f22d72-fd6d-98f1-02f0-08ddc0907c33"
uuid = "2a0f44e3-6c83-55bd-87e4-b1978d98bd5f"
[[deps.BitFlags]]
-git-tree-sha1 = "2dc09997850d68179b69dafb58ae806167a32b1b"
+git-tree-sha1 = "0691e34b3bb8be9307330f88d1a3c3f25466c24d"
uuid = "d1d4a3ce-64b1-5f1a-9ba4-7e7e69966f35"
-version = "0.1.8"
+version = "0.1.9"
+
+[[deps.CEnum]]
+git-tree-sha1 = "eb4cb44a499229b3b8426dcfb5dd85333951ff90"
+uuid = "fa961155-64e5-5f13-b03f-caf6b980ea82"
+version = "0.4.2"
+
+[[deps.CSV]]
+deps = ["CodecZlib", "Dates", "FilePathsBase", "InlineStrings", "Mmap", "Parsers", "PooledArrays", "PrecompileTools", "SentinelArrays", "Tables", "Unicode", "WeakRefStrings", "WorkerUtilities"]
+git-tree-sha1 = "6c834533dc1fabd820c1db03c839bf97e45a3fab"
+uuid = "336ed68f-0bac-5ca0-87d4-7b16caf5d00b"
+version = "0.10.14"
[[deps.Calculus]]
deps = ["LinearAlgebra"]
@@ -39,9 +50,9 @@ version = "1.3.5"
[[deps.CodecZlib]]
deps = ["TranscodingStreams", "Zlib_jll"]
-git-tree-sha1 = "59939d8a997469ee05c4b4944560a820f9ba0d73"
+git-tree-sha1 = "b8fe8546d52ca154ac556809e10c75e6e7430ac8"
uuid = "944b1d66-785c-5afd-91f1-9de20f533193"
-version = "0.7.4"
+version = "0.7.5"
[[deps.Compat]]
deps = ["TOML", "UUIDs"]
@@ -60,9 +71,9 @@ version = "1.1.1+0"
[[deps.ConcurrentUtilities]]
deps = ["Serialization", "Sockets"]
-git-tree-sha1 = "6cbbd4d241d7e6579ab354737f4dd95ca43946e1"
+git-tree-sha1 = "ea32b83ca4fefa1768dc84e504cc0a94fb1ab8d1"
uuid = "f0e56b4a-5159-44fe-b623-3e5288b988bb"
-version = "2.4.1"
+version = "2.4.2"
[[deps.CondaPkg]]
deps = ["JSON3", "Markdown", "MicroMamba", "Pidfile", "Pkg", "Preferences", "TOML"]
@@ -70,11 +81,27 @@ git-tree-sha1 = "e81c4263c7ef4eca4d645ef612814d72e9255b41"
uuid = "992eb4ea-22a4-4c89-a5bb-47a3300528ab"
version = "0.2.22"
+[[deps.Crayons]]
+git-tree-sha1 = "249fe38abf76d48563e2f4556bebd215aa317e15"
+uuid = "a8cc5b0e-0ffa-5ad4-8c14-923d3ee1735f"
+version = "4.1.1"
+
+[[deps.DBInterface]]
+git-tree-sha1 = "a444404b3f94deaa43ca2a58e18153a82695282b"
+uuid = "a10d1c49-ce27-4219-8d33-6db1a4562965"
+version = "2.6.1"
+
[[deps.DataAPI]]
git-tree-sha1 = "abe83f3a2f1b857aac70ef8b269080af17764bbe"
uuid = "9a962f9c-6df0-11e9-0e5d-c546b8b5ee8a"
version = "1.16.0"
+[[deps.DataFrames]]
+deps = ["Compat", "DataAPI", "DataStructures", "Future", "InlineStrings", "InvertedIndices", "IteratorInterfaceExtensions", "LinearAlgebra", "Markdown", "Missings", "PooledArrays", "PrecompileTools", "PrettyTables", "Printf", "REPL", "Random", "Reexport", "SentinelArrays", "SortingAlgorithms", "Statistics", "TableTraits", "Tables", "Unicode"]
+git-tree-sha1 = "04c738083f29f86e62c8afc341f0967d8717bdb8"
+uuid = "a93c6f00-e57d-5684-b7b6-d8193f3e46c0"
+version = "1.6.1"
+
[[deps.DataStructures]]
deps = ["Compat", "InteractiveUtils", "OrderedCollections"]
git-tree-sha1 = "1d0a14036acb104d9e89698bd408f63ab58cdc82"
@@ -90,6 +117,23 @@ version = "1.0.0"
deps = ["Printf"]
uuid = "ade2ca70-3891-5945-98fb-dc099432e06a"
+[[deps.Decimals]]
+git-tree-sha1 = "e98abef36d02a0ec385d68cd7dadbce9b28cbd88"
+uuid = "abce61dc-4473-55a0-ba07-351d65e31d42"
+version = "0.4.1"
+
+[[deps.DispatchDoctor]]
+deps = ["MacroTools", "Preferences"]
+git-tree-sha1 = "32d236e685d028f5bc808aae0634b58aac5128f0"
+uuid = "8d63f2c5-f18a-4cf2-ba9d-b3f60fc568c8"
+version = "0.4.10"
+
+ [deps.DispatchDoctor.extensions]
+ DispatchDoctorChainRulesCoreExt = "ChainRulesCore"
+
+ [deps.DispatchDoctor.weakdeps]
+ ChainRulesCore = "d360d2e6-b24c-11e9-a2a3-2a2ae2dbcce4"
+
[[deps.Distributed]]
deps = ["Random", "Serialization", "Sockets"]
uuid = "8ba89e20-285c-5b6f-9357-94700520ee1b"
@@ -133,6 +177,23 @@ git-tree-sha1 = "dcb08a0d93ec0b1cdc4af184b26b591e9695423a"
uuid = "460bff9d-24e4-43bc-9d9f-a8973cb893f4"
version = "0.1.10"
+[[deps.ExprTools]]
+git-tree-sha1 = "27415f162e6028e81c72b82ef756bf321213b6ec"
+uuid = "e2ba6199-217a-4e67-a87a-7c52f15ade04"
+version = "0.1.10"
+
+[[deps.FileIO]]
+deps = ["Pkg", "Requires", "UUIDs"]
+git-tree-sha1 = "82d8afa92ecf4b52d78d869f038ebfb881267322"
+uuid = "5789e2e9-d7fb-5bc7-8068-2c6fae9b9549"
+version = "1.16.3"
+
+[[deps.FilePathsBase]]
+deps = ["Compat", "Dates", "Mmap", "Printf", "Test", "UUIDs"]
+git-tree-sha1 = "9f00e42f8d99fdde64d40c8ea5d14269a2e2c1aa"
+uuid = "48062228-2e41-5def-b9a4-89aafe57970f"
+version = "0.9.21"
+
[[deps.FileWatching]]
uuid = "7b1f6079-737a-58dc-b8bc-7a2ca5c1b5ee"
@@ -148,8 +209,18 @@ weakdeps = ["PDMats", "SparseArrays", "Statistics"]
FillArraysSparseArraysExt = "SparseArrays"
FillArraysStatisticsExt = "Statistics"
+[[deps.FormatCorrector]]
+deps = ["CSV", "CondaPkg", "DataFrames", "DataStructures", "Dates", "DispatchDoctor", "FileIO", "GeneralUtils", "HTTP", "JSON3", "LLMMCTS", "LibPQ", "MQTTClient", "PrettyPrinting", "PythonCall", "Random", "Revise", "URIs", "UUIDs"]
+path = "../FormatCorrector"
+uuid = "4aa99331-0491-47cc-864d-b8dfff346b60"
+version = "0.1.0"
+
+[[deps.Future]]
+deps = ["Random"]
+uuid = "9fa8497b-333b-5362-9e8d-4d0656e87820"
+
[[deps.GeneralUtils]]
-deps = ["DataStructures", "Dates", "Distributions", "JSON3", "MQTTClient", "Random", "Revise", "UUIDs"]
+deps = ["CSV", "DataFrames", "DataStructures", "Dates", "Distributions", "JSON3", "MQTTClient", "Random", "Revise", "UUIDs"]
path = "/appfolder/app/privatejuliapkg/GeneralUtils"
uuid = "c6c72f09-b708-4ac8-ac7c-2084d70108fe"
version = "0.1.0"
@@ -166,15 +237,50 @@ git-tree-sha1 = "f218fe3736ddf977e0e772bc9a586b2383da2685"
uuid = "34004b35-14d8-5ef3-9330-4cdb6864b03a"
version = "0.3.23"
+[[deps.Infinity]]
+deps = ["Dates", "Random", "Requires"]
+git-tree-sha1 = "cf8234411cbeb98676c173f930951ea29dca3b23"
+uuid = "a303e19e-6eb4-11e9-3b09-cd9505f79100"
+version = "0.2.4"
+
+[[deps.InlineStrings]]
+git-tree-sha1 = "45521d31238e87ee9f9732561bfee12d4eebd52d"
+uuid = "842dd82b-1e85-43dc-bf29-5d0ee9dffc48"
+version = "1.4.2"
+
+ [deps.InlineStrings.extensions]
+ ArrowTypesExt = "ArrowTypes"
+ ParsersExt = "Parsers"
+
+ [deps.InlineStrings.weakdeps]
+ ArrowTypes = "31f734f8-188a-4ce0-8406-c8a06bd891cd"
+ Parsers = "69de0a69-1ddd-5017-9359-2bf0b02dc9f0"
+
[[deps.InteractiveUtils]]
deps = ["Markdown"]
uuid = "b77e0a4c-d291-57a0-90e8-8db25a27a240"
+[[deps.Intervals]]
+deps = ["Dates", "Printf", "RecipesBase", "Serialization", "TimeZones"]
+git-tree-sha1 = "ac0aaa807ed5eaf13f67afe188ebc07e828ff640"
+uuid = "d8418881-c3e1-53bb-8760-2df7ec849ed5"
+version = "1.10.0"
+
+[[deps.InvertedIndices]]
+git-tree-sha1 = "0dc7b50b8d436461be01300fd8cd45aa0274b038"
+uuid = "41ab1584-1d38-5bbf-9106-f11c6c58b48f"
+version = "1.3.0"
+
[[deps.IrrationalConstants]]
git-tree-sha1 = "630b497eafcc20001bba38a4651b327dcfc491d2"
uuid = "92d709cd-6900-40b7-9082-c6be49f344b6"
version = "0.2.2"
+[[deps.IterTools]]
+git-tree-sha1 = "42d5f897009e7ff2cf88db414a389e5ed1bdd023"
+uuid = "c8e1da08-722c-5040-9ed9-7db0dc04731e"
+version = "1.10.0"
+
[[deps.IteratorInterfaceExtensions]]
git-tree-sha1 = "a3f24677c21f5bbe9d2a714f95dcd58337fb2856"
uuid = "82899510-4779-5014-852e-03e436cf321d"
@@ -200,16 +306,32 @@ version = "1.14.0"
[[deps.JuliaInterpreter]]
deps = ["CodeTracking", "InteractiveUtils", "Random", "UUIDs"]
-git-tree-sha1 = "e9648d90370e2d0317f9518c9c6e0841db54a90b"
+git-tree-sha1 = "a6adc2dcfe4187c40dc7c2c9d2128e326360e90a"
uuid = "aa1ae85d-cabe-5617-a682-6adf51b2e16a"
-version = "0.9.31"
+version = "0.9.32"
+
+[[deps.Kerberos_krb5_jll]]
+deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"]
+git-tree-sha1 = "60274b4ab38e8d1248216fe6b6ace75ae09b0502"
+uuid = "b39eb1a6-c29a-53d7-8c32-632cd16f18da"
+version = "1.19.3+0"
[[deps.LLMMCTS]]
-deps = ["JSON3"]
+deps = ["GeneralUtils", "JSON3"]
path = "/appfolder/app/privatejuliapkg/LLMMCTS"
uuid = "d76c5a4d-449e-4835-8cc4-dd86ec44f241"
version = "0.1.0"
+[[deps.LaTeXStrings]]
+git-tree-sha1 = "50901ebc375ed41dbf8058da26f9de442febbbec"
+uuid = "b964fa9f-0449-5b57-a5c2-d3ea65f4040f"
+version = "1.3.1"
+
+[[deps.LayerDicts]]
+git-tree-sha1 = "6087ad3521d6278ebe5c27ae55e7bbb15ca312cb"
+uuid = "6f188dcb-512c-564b-bc01-e0f76e72f166"
+version = "1.0.0"
+
[[deps.LazyArtifacts]]
deps = ["Artifacts", "Pkg"]
uuid = "4af54fe1-eca0-43a8-85a7-787d91b784e3"
@@ -233,6 +355,18 @@ deps = ["Artifacts", "LibSSH2_jll", "Libdl", "MbedTLS_jll"]
uuid = "e37daf67-58a4-590a-8e99-b0245dd2ffc5"
version = "1.6.4+0"
+[[deps.LibPQ]]
+deps = ["CEnum", "DBInterface", "Dates", "Decimals", "DocStringExtensions", "FileWatching", "Infinity", "Intervals", "IterTools", "LayerDicts", "LibPQ_jll", "Libdl", "Memento", "OffsetArrays", "SQLStrings", "Tables", "TimeZones", "UTCDateTimes"]
+git-tree-sha1 = "74feb1a63ebbcdcf1730016d2a4dfad0a655404f"
+uuid = "194296ae-ab2e-5f79-8cd4-7183a0a5a0d1"
+version = "1.17.1"
+
+[[deps.LibPQ_jll]]
+deps = ["Artifacts", "JLLWrappers", "Kerberos_krb5_jll", "Libdl", "OpenSSL_jll", "Pkg"]
+git-tree-sha1 = "a299629703a93d8efcefccfc16b18ad9a073d131"
+uuid = "08be9ffa-1c94-5ee5-a977-46a84ec9b350"
+version = "14.3.0+1"
+
[[deps.LibSSH2_jll]]
deps = ["Artifacts", "Libdl", "MbedTLS_jll"]
uuid = "29816b5a-b9ab-546f-933c-edad1886dfa8"
@@ -247,9 +381,9 @@ uuid = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e"
[[deps.LogExpFunctions]]
deps = ["DocStringExtensions", "IrrationalConstants", "LinearAlgebra"]
-git-tree-sha1 = "18144f3e9cbe9b15b070288eef858f71b291ce37"
+git-tree-sha1 = "a2d09619db4e765091ee5c6ffe8872849de0feea"
uuid = "2ab3a3ac-af41-5b50-aa03-7779005ae688"
-version = "0.3.27"
+version = "0.3.28"
[deps.LogExpFunctions.extensions]
LogExpFunctionsChainRulesCoreExt = "ChainRulesCore"
@@ -272,9 +406,9 @@ version = "1.0.3"
[[deps.LoweredCodeUtils]]
deps = ["JuliaInterpreter"]
-git-tree-sha1 = "c6a36b22d2cca0e1a903f00f600991f97bf5f426"
+git-tree-sha1 = "eeaedcf337f33c039f9f3a209a8db992deefd7e9"
uuid = "6f1432cf-f94c-5a45-995e-cdbf5db27b0b"
-version = "2.4.6"
+version = "2.4.8"
[[deps.MQTTClient]]
deps = ["Distributed", "Random", "Sockets"]
@@ -307,6 +441,12 @@ deps = ["Artifacts", "Libdl"]
uuid = "c8ffd9c3-330d-5841-b78e-0817d7145fa1"
version = "2.28.2+1"
+[[deps.Memento]]
+deps = ["Dates", "Distributed", "Requires", "Serialization", "Sockets", "Test", "UUIDs"]
+git-tree-sha1 = "bb2e8f4d9f400f6e90d57b34860f6abdc51398e5"
+uuid = "f28f55f0-a522-5efc-85c2-fe41dfb9b2d9"
+version = "1.4.1"
+
[[deps.MicroMamba]]
deps = ["Pkg", "Scratch", "micromamba_jll"]
git-tree-sha1 = "011cab361eae7bcd7d278f0a7a00ff9c69000c51"
@@ -322,6 +462,12 @@ version = "1.2.0"
[[deps.Mmap]]
uuid = "a63ad114-7e13-5084-954f-fe012c677804"
+[[deps.Mocking]]
+deps = ["Compat", "ExprTools"]
+git-tree-sha1 = "bf17d9cb4f0d2882351dfad030598f64286e5936"
+uuid = "78c3b35d-d492-501b-9361-3d52fe80e533"
+version = "0.7.8"
+
[[deps.MozillaCACerts_jll]]
uuid = "14a3606d-f60d-562e-9121-12d972cd8159"
version = "2023.1.10"
@@ -336,6 +482,17 @@ version = "1.0.2"
uuid = "ca575930-c2e3-43a9-ace4-1e988b2c1908"
version = "1.2.0"
+[[deps.OffsetArrays]]
+git-tree-sha1 = "1a27764e945a152f7ca7efa04de513d473e9542e"
+uuid = "6fe1bfb0-de20-5000-8ca7-80f57d26f881"
+version = "1.14.1"
+
+ [deps.OffsetArrays.extensions]
+ OffsetArraysAdaptExt = "Adapt"
+
+ [deps.OffsetArrays.weakdeps]
+ Adapt = "79e6a3ab-5dfb-504d-930d-738a2a938a0e"
+
[[deps.OpenBLAS_jll]]
deps = ["Artifacts", "CompilerSupportLibraries_jll", "Libdl"]
uuid = "4536629a-c528-5b80-bd46-f80d51c5b363"
@@ -354,9 +511,9 @@ version = "1.4.3"
[[deps.OpenSSL_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl"]
-git-tree-sha1 = "3da7367955dcc5c54c1ba4d402ccdc09a1a3e046"
+git-tree-sha1 = "a12e56c72edee3ce6b96667745e6cbbe5498f200"
uuid = "458c3c95-2e84-50aa-8efc-19380b2a3a95"
-version = "3.0.13+1"
+version = "1.1.23+0"
[[deps.OpenSpecFun_jll]]
deps = ["Artifacts", "CompilerSupportLibraries_jll", "JLLWrappers", "Libdl", "Pkg"]
@@ -392,6 +549,12 @@ deps = ["Artifacts", "Dates", "Downloads", "FileWatching", "LibGit2", "Libdl", "
uuid = "44cfe95a-1eb2-52ea-b672-e2afdf69b78f"
version = "1.10.0"
+[[deps.PooledArrays]]
+deps = ["DataAPI", "Future"]
+git-tree-sha1 = "36d8b4b899628fb92c2749eb488d884a926614d3"
+uuid = "2dfb63ee-cc39-5dd5-95bd-886bf059d720"
+version = "1.4.3"
+
[[deps.PrecompileTools]]
deps = ["Preferences"]
git-tree-sha1 = "5aa36f7049a63a1528fe8f7c3f2113413ffd4e1f"
@@ -409,6 +572,12 @@ git-tree-sha1 = "142ee93724a9c5d04d78df7006670a93ed1b244e"
uuid = "54e16d92-306c-5ea0-a30b-337be88ac337"
version = "0.4.2"
+[[deps.PrettyTables]]
+deps = ["Crayons", "LaTeXStrings", "Markdown", "PrecompileTools", "Printf", "Reexport", "StringManipulation", "Tables"]
+git-tree-sha1 = "66b20dd35966a748321d3b2537c4584cf40387c7"
+uuid = "08abe8d2-0d0c-5749-adfa-8a2ac140af0d"
+version = "2.3.2"
+
[[deps.Printf]]
deps = ["Unicode"]
uuid = "de0858da-6303-5e67-8744-51eddeeeb8d7"
@@ -438,6 +607,12 @@ uuid = "3fa0cd96-eef1-5676-8a61-b3b8758bbffb"
deps = ["SHA"]
uuid = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c"
+[[deps.RecipesBase]]
+deps = ["PrecompileTools"]
+git-tree-sha1 = "5c3d09cc4f31f5fc6af001c250bf1278733100ff"
+uuid = "3cdcf5f2-1ef4-517c-9805-6587b60abb01"
+version = "1.3.4"
+
[[deps.Reexport]]
git-tree-sha1 = "45e428421666073eab6f2da5c9d310d99bb12f9b"
uuid = "189a3867-3050-52da-a836-e630ba90ab69"
@@ -451,9 +626,9 @@ version = "1.3.0"
[[deps.Revise]]
deps = ["CodeTracking", "Distributed", "FileWatching", "JuliaInterpreter", "LibGit2", "LoweredCodeUtils", "OrderedCollections", "Pkg", "REPL", "Requires", "UUIDs", "Unicode"]
-git-tree-sha1 = "12aa2d7593df490c407a3bbd8b86b8b515017f3e"
+git-tree-sha1 = "85ddd93ea15dcd8493400600e09104a9e94bb18d"
uuid = "295af30f-e4ad-537b-8983-00126c2a3abe"
-version = "3.5.14"
+version = "3.5.15"
[[deps.Rmath]]
deps = ["Random", "Rmath_jll"]
@@ -471,12 +646,29 @@ version = "0.4.2+0"
uuid = "ea8e919c-243c-51af-8825-aaa63cd721ce"
version = "0.7.0"
+[[deps.SQLLLM]]
+deps = ["CSV", "CondaPkg", "DataFrames", "DataStructures", "Dates", "DispatchDoctor", "FileIO", "FormatCorrector", "GeneralUtils", "HTTP", "JSON3", "LLMMCTS", "LibPQ", "MQTTClient", "PrettyPrinting", "PythonCall", "Random", "Revise", "Tables", "URIs", "UUIDs"]
+path = "/appfolder/app/privatejuliapkg/SQLLLM"
+uuid = "2ebc79c7-cc10-4a3a-9665-d2e1d61e63d3"
+version = "0.1.0"
+
+[[deps.SQLStrings]]
+git-tree-sha1 = "55de0530689832b1d3d43491ee6b67bd54d3323c"
+uuid = "af517c2e-c243-48fa-aab8-efac3db270f5"
+version = "0.1.0"
+
[[deps.Scratch]]
deps = ["Dates"]
git-tree-sha1 = "3bac05bc7e74a75fd9cba4295cde4045d9fe2386"
uuid = "6c6a2e73-6563-6170-7368-637461726353"
version = "1.2.1"
+[[deps.SentinelArrays]]
+deps = ["Dates", "Random"]
+git-tree-sha1 = "ff11acffdb082493657550959d4feb4b6149e73a"
+uuid = "91c51154-3ec4-41a3-a24f-3f23e20d615c"
+version = "1.4.5"
+
[[deps.Serialization]]
uuid = "9e88b42a-f829-5b0c-bbe9-9e923198166b"
@@ -542,6 +734,12 @@ version = "1.3.1"
ChainRulesCore = "d360d2e6-b24c-11e9-a2a3-2a2ae2dbcce4"
InverseFunctions = "3587e190-3f89-42d0-90ee-14403ec27112"
+[[deps.StringManipulation]]
+deps = ["PrecompileTools"]
+git-tree-sha1 = "a04cabe79c5f01f4d723cc6704070ada0b9d46d5"
+uuid = "892a3eda-7b42-436c-8928-eab12a02cf0e"
+version = "0.3.4"
+
[[deps.StructTypes]]
deps = ["Dates", "UUIDs"]
git-tree-sha1 = "ca4bccb03acf9faaf4137a9abc1881ed1841aa70"
@@ -562,6 +760,12 @@ deps = ["Dates"]
uuid = "fa267f1f-6049-4f14-aa54-33bafae1ed76"
version = "1.0.3"
+[[deps.TZJData]]
+deps = ["Artifacts"]
+git-tree-sha1 = "1607ad46cf8d642aa779a1d45af1c8620dbf6915"
+uuid = "dc5dba14-91b3-4cab-a142-028a31da12f7"
+version = "1.2.0+2024a"
+
[[deps.TableTraits]]
deps = ["IteratorInterfaceExtensions"]
git-tree-sha1 = "c06b2f539df1c6efa794486abfb6ed2022561a39"
@@ -583,10 +787,20 @@ version = "1.10.0"
deps = ["InteractiveUtils", "Logging", "Random", "Serialization"]
uuid = "8dfed614-e22c-5e08-85e1-65c5234f0b40"
+[[deps.TimeZones]]
+deps = ["Dates", "Downloads", "InlineStrings", "Mocking", "Printf", "Scratch", "TZJData", "Unicode", "p7zip_jll"]
+git-tree-sha1 = "a6ae8d7a27940c33624f8c7bde5528de21ba730d"
+uuid = "f269a46b-ccf7-5d73-abea-4c690281aa53"
+version = "1.17.0"
+weakdeps = ["RecipesBase"]
+
+ [deps.TimeZones.extensions]
+ TimeZonesRecipesBaseExt = "RecipesBase"
+
[[deps.TranscodingStreams]]
-git-tree-sha1 = "5d54d076465da49d6746c647022f3b3674e64156"
+git-tree-sha1 = "60df3f8126263c0d6b357b9a1017bb94f53e3582"
uuid = "3bb67fe8-82b1-5028-8e26-92a6c54297fa"
-version = "0.10.8"
+version = "0.11.0"
weakdeps = ["Random", "Test"]
[deps.TranscodingStreams.extensions]
@@ -597,6 +811,12 @@ git-tree-sha1 = "67db6cc7b3821e19ebe75791a9dd19c9b1188f2b"
uuid = "5c2747f8-b7ea-4ff2-ba2e-563bfd36b1d4"
version = "1.5.1"
+[[deps.UTCDateTimes]]
+deps = ["Dates", "TimeZones"]
+git-tree-sha1 = "4af3552bf0cf4a071bf3d14bd20023ea70f31b62"
+uuid = "0f7cfa37-7abf-4834-b969-a8aa512401c2"
+version = "1.6.1"
+
[[deps.UUIDs]]
deps = ["Random", "SHA"]
uuid = "cf7118a7-6976-5b1a-9a39-7adc72f591a4"
@@ -609,6 +829,17 @@ git-tree-sha1 = "c81331b3b2e60a982be57c046ec91f599ede674a"
uuid = "e17b2a0c-0bdf-430a-bd0c-3a23cae4ff39"
version = "1.0.0"
+[[deps.WeakRefStrings]]
+deps = ["DataAPI", "InlineStrings", "Parsers"]
+git-tree-sha1 = "b1be2855ed9ed8eac54e5caff2afcdb442d52c23"
+uuid = "ea10d353-3f73-51f8-a26c-33c1cb351aa5"
+version = "1.4.2"
+
+[[deps.WorkerUtilities]]
+git-tree-sha1 = "cd1659ba0d57b71a464a29e64dbc67cfe83d54e7"
+uuid = "76eceee3-57b5-4d4a-8e66-0e911cebbf60"
+version = "1.6.1"
+
[[deps.Zlib_jll]]
deps = ["Libdl"]
uuid = "83775a58-1f1d-513f-b197-d71354ab007a"
diff --git a/Project.toml b/Project.toml
index 55e0cad..93863b9 100644
--- a/Project.toml
+++ b/Project.toml
@@ -15,5 +15,7 @@ MQTTClient = "985f35cc-2c3d-4943-b8c1-f0931d5f0959"
PrettyPrinting = "54e16d92-306c-5ea0-a30b-337be88ac337"
PythonCall = "6099a3de-0909-46bc-b1f4-468b9a2dfc0d"
Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c"
+Revise = "295af30f-e4ad-537b-8983-00126c2a3abe"
+SQLLLM = "2ebc79c7-cc10-4a3a-9665-d2e1d61e63d3"
URIs = "5c2747f8-b7ea-4ff2-ba2e-563bfd36b1d4"
UUIDs = "cf7118a7-6976-5b1a-9a39-7adc72f591a4"
diff --git a/src/interface BACKUP.jl b/src/interface BACKUP.jl
new file mode 100644
index 0000000..fecabc7
--- /dev/null
+++ b/src/interface BACKUP.jl
@@ -0,0 +1,1196 @@
+module interface
+
+export addNewMessage, conversation, decisionMaker, evaluator, reflector
+ # isterminal,
+
+using JSON3, DataStructures, Dates, UUIDs, HTTP, Random, MQTTClient, PrettyPrinting
+using GeneralUtils, LLMMCTS
+using ..type, ..util, ..llmfunction
+
+# ------------------------------------------------------------------------------------------------ #
+# pythoncall setting #
+# ------------------------------------------------------------------------------------------------ #
+# Ref: https://github.com/JuliaPy/PythonCall.jl/issues/252
+# by setting the following variables, PythonCall.jl will use:
+# 1. system's python and packages installed by system (via apt install)
+# or 2. conda python and packages installed by conda
+# if these setting are not set (comment out), PythonCall will use its own python and packages that
+# installed by CondaPkg.jl (from env_preparation.jl)
+# ENV["JULIA_CONDAPKG_BACKEND"] = "Null" # set condapkg backend = none
+# systemPython = split(read(`which python`, String), "\n")[1] # system's python path
+# ENV["JULIA_PYTHONCALL_EXE"] = systemPython # find python location with $> which python ex. raw"/root/conda/bin/python"
+
+# using PythonCall
+# const py_agents = PythonCall.pynew()
+# const py_llms = PythonCall.pynew()
+# function __init__()
+# # PythonCall.pycopy!(py_cv2, pyimport("cv2"))
+
+# # equivalent to from urllib.request import urlopen in python
+# PythonCall.pycopy!(py_agents, pyimport("langchain.agents"))
+# PythonCall.pycopy!(py_llms, pyimport("langchain.llms"))
+# end
+
+# ---------------------------------------------- 100 --------------------------------------------- #
+
+
+macro executeStringFunction(functionStr, args...)
+ # Parse the function string into an expression
+ func_expr = Meta.parse(functionStr)
+
+ # Create a new function with the parsed expression
+ function_to_call = eval(Expr(:function,
+ Expr(:call, func_expr, args...), func_expr.args[2:end]...))
+
+ # Call the newly created function with the provided arguments
+ function_to_call(args...)
+end
+
+
+""" Think and choose action
+
+# Arguments
+ - `config::T1`
+ config
+ - `state::T2`
+ a game state
+
+# Return
+ - `thoughtDict::Dict`
+
+# Example
+```jldoctest
+julia> config = Dict(
+ :mqttServerInfo => Dict(
+ :description => "mqtt server info",
+ :port => 1883,
+ :broker => "mqtt.yiem.cc"
+ ),
+ :externalservice => Dict(
+ :text2textinstruct => Dict(
+ :mqtttopic => "/loadbalancer/requestingservice",
+ :description => "text to text service with instruct LLM",
+ :llminfo => Dict(
+ :name => "llama3instruct"
+ )
+ ),
+ )
+ )
+
+julia> output_thoughtDict = Dict(
+ :thought_1 => "The customer wants to buy a bottle of wine. This is a good start!",
+ :action_1 => Dict{Symbol, Any}(
+ :action=>"Chatbox",
+ :input=>"What occasion are you buying the wine for?"
+ ),
+ :observation_1 => ""
+ )
+```
+
+# TODO
+ - [] update docstring
+ - [x] implement the function
+ - [] implement RAG to pull similar experience
+ - [] use customerinfo
+ - [] user storeinfo
+ - [x] add try block. check result that it is expected before returning
+
+# Signature
+"""
+function decisionMaker(config::T1, state::T2)::Dict{Symbol, Any} where {T1<:AbstractDict, T2<:AbstractDict}
+ customerinfo =
+ """
+ I will give you the following information about customer:
+ $(JSON3.write(state[:customerinfo]))
+ """
+
+ storeinfo =
+ """
+ I will give you the following information about your store:
+ $(JSON3.write(state[:storeinfo]))
+ """
+
+ lessonDict = copy(JSON3.read("lesson.json"))
+
+ lesson =
+ if isempty(lessonDict)
+ ""
+ else
+ lessons = Dict{Symbol, Any}()
+ for (k, v) in lessonDict
+ lessons[k] = lessonDict[k][:lesson]
+ end
+
+ """
+ You have attempted to help the user before and failed, either because your reasoning for the
+ recommendation was incorrect or your response did not exactly match the user expectation.
+ The following lesson(s) give a plan to avoid failing to help the user in the same way you
+ did previously. Use them to improve your strategy to help the user.
+
+ Here are some lessons in JSON format:
+ $(JSON3.write(lessons))
+
+ When providing the thought and action for the current trial, that into account these failed
+ trajectories and make sure not to repeat the same mistakes and incorrect answers.
+ """
+ end
+
+ _prompt =
+ """
+ You are a helpful sommelier working for a wine store.
+ Your goal is to recommend the best wine from your inventory that match the user preferences.
+ You are also keen to improve your recommendation with lesson(s).
+
+ You must follow the following criteria:
+ 1) Get to know how much the user willing to spend
+ 2) Get to know type of wine the user is looking for e.g. red, white, sparkling, rose, dessert, fortified
+ 3) Get to know what occasion the user is buying wine for
+ 4) Get to know what characteristics of wine the user is looking for
+ e.g. tannin, sweetness, intensity, acidity
+ 5) Get to know what food the user will have with wine
+ 6) Check your inventory for the best wine that match the user preference
+ 7) Recommend wine to the user
+
+ You should only respond with interleaving Thought, Action, Observation steps.
+ Thought can reason about the current situation, and Action can be three types:
+ 1) winestock[query], which you can use to find wine in your inventory. The more input data the better.
+ 2) chatbox[text], which you can use to interact with the user.
+ 3) recommendbox[answer], which returns your wine recommendation to the user.
+ After each observation, provide the next Thought and next Action.
+
+ You should only respond in JSON format as describe below:
+ {
+ "thought": "your reasoning",
+ "action": {"name": "action to take", "input": "action input"},
+ "observation": "result of the action"
+ }
+
+ Here are some examples:
+ {
+ "question": "I would like to buy a sedan with 8 seats.",
+ "thought_1": "Our showroom carries various vehicle model. But I'm not sure whether we have a models that fits the user demand, I need to check our inventory.",
+ "action_1": {"name": "inventory", "input": "sedan with 8 seats."},
+ "observation_1": "Several model has 8 seats. Available color are black, red green"
+ }
+ {
+ "thought": "I have a few color for the user to choose from. I will ask him what color he likes.",
+ "action": {"name": "chatbox", "input": "Which color do you like?"}
+ "observation": "I'll take black."
+ }
+
+ $lesson
+
+ Let's begin!
+
+ $(JSON3.write(state[:thoughtHistory]))
+ {"thought"
+ """
+
+ # apply LLM specific instruct format
+ externalService = config[:externalservice][:text2textinstruct]
+ llminfo = externalService[:llminfo]
+ prompt =
+ if llminfo[:name] == "llama3instruct"
+ formatLLMtext_llama3instruct("system", _prompt)
+ else
+ error("llm model name is not defied yet $(@__LINE__)")
+ end
+
+ msgMeta = GeneralUtils.generate_msgMeta(
+ externalService[:mqtttopic],
+ senderName= "decisionMaker",
+ senderId= string(uuid4()),
+ receiverName= "text2textinstruct",
+ mqttBroker= config[:mqttServerInfo][:broker],
+ mqttBrokerPort= config[:mqttServerInfo][:port],
+ )
+
+ outgoingMsg = Dict(
+ :msgMeta=> msgMeta,
+ :payload=> Dict(
+ :text=> prompt,
+ :kwargs=> Dict(
+ :max_tokens=> 512,
+ :stop=> ["<|eot_id|>"],
+ )
+ )
+ )
+ @show outgoingMsg
+
+ for attempt in 1:5
+ try
+ response = GeneralUtils.sendReceiveMqttMsg(outgoingMsg)
+ _responseJsonStr = response[:response][:text]
+ expectedJsonExample =
+ """
+ Here is an expected JSON format:
+ {
+ "thought": "...",
+ "action": {"name": "...", "input": "..."},
+ "observation": "..."
+ }
+ """
+ responseJsonStr = jsoncorrection(config, _responseJsonStr, expectedJsonExample)
+ thoughtDict = copy(JSON3.read(responseJsonStr))
+
+ # check if dict has all required value
+ thought::AbstractString = thoughtDict[:thought]
+ actionname::AbstractString = thoughtDict[:action][:name]
+ actioninput::AbstractString = thoughtDict[:action][:input]
+ if actionname ∈ ["winestock", "chatbox", "recommendbox"]
+ # LLM use available function
+ elseif thought == ""
+ error("DecisionMaker has no thought")
+ elseif length(actioninput) == 0
+ error("DecisionMaker has no actioninput")
+ else
+ error("DecisionMaker use wrong function")
+ end
+
+ return thoughtDict
+ catch e
+ io = IOBuffer()
+ showerror(io, e)
+ errorMsg = String(take!(io))
+ st = sprint((io, v) -> show(io, "text/plain", v), stacktrace(catch_backtrace()))
+ println("")
+ @warn "Attempt $attempt. Error occurred: $errorMsg\n$st"
+ println("")
+ end
+ end
+ error("DecisionMaker failed to generate a thought")
+end
+
+
+""" Assigns a scalar value to each new child node to be used for selec-
+tion and backpropagation. This value effectively quantifies the agent’s progress in task completion,
+serving as a heuristic to steer the search algorithm towards the most promising regions of the tree.
+
+# Arguments
+ - `a::T1`
+ one of Yiem's agent
+ - `state::T2`
+ a game state
+
+# Return
+ - `evaluation::Tuple{String, Integer}`
+ evaluation and score
+
+# Example
+```jldoctest
+julia>
+```
+
+# Signature
+"""
+function evaluator(config::T1, state::T2
+ )::Tuple{String, Integer} where {T1<:AbstractDict, T2<:AbstractDict}
+
+ systemmsg =
+ """
+ Analyze the trajectories of a solution to a question answering task. The trajectories are
+ labeled by environmental observations about the situation, thoughts that can reason about
+ the current situation and actions that can be three types:
+ 1) winestock[query], which you can use to find wine in your inventory.
+ 2) chatbox[text], which you can use to interact with the user.
+ 3) recommendbox[answer], which returns your wine recommendation to the user.
+
+ Given a question and a trajectory, evaluate its correctness and provide your reasoning and
+ analysis in detail. Focus on the latest thought, action, and observation. Incomplete trajectories
+ can be correct if the thoughts and actions so far are correct, even if the answer is not found
+ yet. Do not generate additional thoughts or actions. Then ending with the correctness score s
+ where s is an integer from 0 to 10.
+
+ You should only respond in JSON format as describe below:
+ {"evaluation": "your evaluation", "score": "your evaluation score"}
+
+ Here are some examples:
+ user:
+ {
+ "question": "I'm looking for a sedan with an automatic driving feature.",
+ "thought_1": "I have many types of sedans in my inventory, each with diverse features.",
+ "thought_2": "But there is only 1 model that has the feature customer wanted.",
+ "thought_3": "I should check our inventory first to see if we have it.",
+ "action_1": {"name": "inventory", "input": "Yiem model A"},
+ "observation_1": "Yiem model A is in stock."
+ }
+ assistant
+ {
+ "evaluation": "This trajectory is correct as it is reasonable to check an inventory for info provided in the question.
+ It is also better to have simple searches corresponding to a single entity, making this the best action.",
+ "score": 10
+ }
+
+ user:
+ {
+ "question": "Do you have an all-in-one pen with 4 colors and a pencil for sale?",
+ "thought_1": "Let me check our inventory first to see if I have it.",
+ "action_1": {"name": "inventory", "input": "pen with 4 color and a pencil."},
+ "observation_1": "I found {1: "Pilot Dr. grip 4-in-1 pen", 2: "Rotting pencil"}",
+ "thought_2": "Ok, I have what the user is asking. Let's tell the user.",
+ "action_2": {"name": "chatbox", "input": "Yes, we do have a Pilot Dr. grip 4-in-1 pen and a Rotting pencil"},
+ "observation_1": "This is not what I wanted."
+ }
+ assistant:
+ {
+ "evaluation": "This trajectory is incorrect as my search term should be related to a 4-colors pen with a pencil in it,
+ not a pen and a pencil seperately. A better search term should have been a 4-colors pen with a pencil, all-in-one.",
+ "score": 0
+ }
+
+ Let's begin!
+ """
+
+ usermsg =
+ """
+ $(JSON3.write(state[:thoughtHistory]))
+ """
+
+ chathistory =
+ [
+ Dict(:name=> "system", :text=> systemmsg),
+ Dict(:name=> "user", :text=> usermsg)
+ ]
+
+ # put in model format
+ prompt = formatLLMtext(chathistory, "llama3instruct")
+ prompt *=
+ """
+ <|start_header_id|>assistant<|end_header_id|>
+ {
+ """
+
+ pprint(prompt)
+ externalService = config[:externalservice][:text2textinstruct]
+
+
+ # apply LLM specific instruct format
+ externalService = config[:externalservice][:text2textinstruct]
+
+ msgMeta = GeneralUtils.generate_msgMeta(
+ externalService[:mqtttopic],
+ senderName= "evaluator",
+ senderId= string(uuid4()),
+ receiverName= "text2textinstruct",
+ mqttBroker= config[:mqttServerInfo][:broker],
+ mqttBrokerPort= config[:mqttServerInfo][:port],
+ )
+
+ outgoingMsg = Dict(
+ :msgMeta=> msgMeta,
+ :payload=> Dict(
+ :text=> prompt,
+ :kwargs=> Dict(
+ :max_tokens=> 512,
+ :stop=> ["<|eot_id|>"],
+ )
+ )
+ )
+
+ for attempt in 1:5
+ try
+ response = GeneralUtils.sendReceiveMqttMsg(outgoingMsg)
+ _responseJsonStr = response[:response][:text]
+ expectedJsonExample =
+ """
+ Here is an expected JSON format:
+ {"evaluation": "...", "score": "..."}
+ """
+ responseJsonStr = jsoncorrection(config, _responseJsonStr, expectedJsonExample)
+ evaluationDict = copy(JSON3.read(responseJsonStr))
+
+ # check if dict has all required value
+ dummya::AbstractString = evaluationDict[:evaluation]
+ dummyb::Integer = evaluationDict[:score]
+
+ return (evaluationDict[:evaluation], evaluationDict[:score])
+ catch e
+ io = IOBuffer()
+ showerror(io, e)
+ errorMsg = String(take!(io))
+ st = sprint((io, v) -> show(io, "text/plain", v), stacktrace(catch_backtrace()))
+ println("")
+ @warn "Attempt $attempt. Error occurred: $errorMsg\n$st"
+ println("")
+ end
+ end
+ error("evaluator failed to generate an evaluation")
+end
+
+
+"""
+
+# Arguments
+
+# Return
+
+# Example
+```jldoctest
+julia>
+```
+
+# TODO
+ - [] update docstring
+ - [x] implement the function
+ - [x] add try block. check result that it is expected before returning
+
+# Signature
+"""
+function reflector(config::T1, state::T2)::String where {T1<:AbstractDict, T2<:AbstractDict}
+ # https://github.com/andyz245/LanguageAgentTreeSearch/blob/main/hotpot/hotpot.py
+
+ _prompt =
+ """
+ You are a helpful sommelier working for a wine store.
+ Your goal is to recommend the best wine from your inventory that match the user preferences.
+ You will be given a question and a trajectory of the previous help you've done for a user.
+ You were unsuccessful in helping the user either because you guessed the wrong answer with Finish[answer], or you didn't know the user enough.
+ In a few sentences, Diagnose a possible reason for failure and devise a new, concise, high level plan that aims to mitigate the same failure.
+ Use complete sentences.
+
+ You should only respond in JSON format as describe below:
+ {"reflection": "your relection"}
+
+ Here are some examples:
+ Previous Trial:
+ {
+ "question": "Hello, I would like a get a bottle of wine",
+ "thought_1": "A customer wants to buy a bottle of wine. Before making a recommendation, I need to know more about their preferences.",
+ "action_1": {"name": "chatbox", "input": "What is the occasion for which you're buying this wine?"},
+ "observation_1": "We are holding a wedding party",
+
+ "thought_2": "A wedding party, that's a great occasion! The customer might be looking for a celebratory drink. Let me ask some more questions to narrow down the options.",
+ "action_2": {"name": "chatbox", "input": "What type of food will you be serving at the wedding?"},
+ "observation_2": "It will be Thai dishes.",
+
+ "thought_3": "With Thai food, I should recommend a wine that complements its spicy and savory flavors. And since it's a celebratory occasion, the customer might prefer a full-bodied wine.",
+ "action_3": {"name": "chatbox", "input": "What is your budget for this bottle of wine?"},
+ "observation_3": "I would spend up to 50 bucks.",
+
+ "thought_4": "Now that I have some more information, it's time to narrow down the options.",
+ "action_4": {"name": "winestock", "input": "red wine with full body, pairs well with spicy food, budget \$50"},
+ "observation_4": "I found the following wines in our stock: \n{\n 1: El Enemigo Cabernet Franc 2019\n2: Tantara Chardonnay 2017\n\n}\n",
+
+ "thought_5": "Now that I have a list of potential wines, I need to know more about the customer's taste preferences.",
+ "action_5": {"name": "chatbox", "input": "What type of wine characteristics are you looking for? (e.g. t.e.g. tannin level, sweetness, intensity, acidity)"},
+ "observation_5": "I like full-bodied red wine with low tannin.",
+
+ "thought_6": "Now that I have more information about the customer's preferences, it's time to make a recommendation.",
+ "action_6": {"name": "recommendbox", "input": "El Enemigo Cabernet Franc 2019"},
+ "observation_6": "I don't like the one you recommend. I want dry wine."
+ }
+
+ {
+ "reflection": "I asked the user about the occasion, food type, and budget, and then searched for wine in the inventory right away. However, I should have asked the user for the specific wine type and their preferences in order to gather more information before making a recommendation."
+ }
+
+ Let's begin!
+
+ Previous trial:
+ $(JSON3.write(state[:thoughtHistory]))
+ {"reflection"
+ """
+
+ # apply LLM specific instruct format
+ externalService = config[:externalservice][:text2textinstruct]
+ llminfo = externalService[:llminfo]
+ prompt =
+ if llminfo[:name] == "llama3instruct"
+ formatLLMtext_llama3instruct("system", _prompt)
+ else
+ error("llm model name is not defied yet $(@__LINE__)")
+ end
+
+ msgMeta = GeneralUtils.generate_msgMeta(
+ a.config[:externalservice][:text2textinstruct][:mqtttopic],
+ senderName= "reflector",
+ senderId= string(uuid4()),
+ receiverName= "text2textinstruct",
+ mqttBroker= config[:mqttServerInfo][:broker],
+ mqttBrokerPort= config[:mqttServerInfo][:port],
+ )
+
+ outgoingMsg = Dict(
+ :msgMeta=> msgMeta,
+ :payload=> Dict(
+ :text=> prompt,
+ :kwargs=> Dict(
+ :max_tokens=> 512,
+ :stop=> ["<|eot_id|>"],
+ )
+ )
+ )
+
+ for attempt in 1:5
+ try
+ response = GeneralUtils.sendReceiveMqttMsg(outgoingMsg)
+ _responseJsonStr = response[:response][:text]
+ expectedJsonExample =
+ """
+ Here is an expected JSON format:
+ {"reflection": "..."}
+ """
+ responseJsonStr = jsoncorrection(config, _responseJsonStr, expectedJsonExample)
+ reflectionDict = copy(JSON3.read(responseJsonStr))
+
+ # check if dict has all required value
+ dummya::AbstractString = reflectionDict[:reflection]
+
+ return reflectionDict[:reflection]
+ catch e
+ io = IOBuffer()
+ showerror(io, e)
+ errorMsg = String(take!(io))
+ st = sprint((io, v) -> show(io, "text/plain", v), stacktrace(catch_backtrace()))
+ println("")
+ @warn "Attempt $attempt. Error occurred: $errorMsg\n$st"
+ println("")
+ end
+ end
+ error("reflector failed to generate a thought")
+end
+
+
+""" Get a new state
+
+# Arguments
+ - `a::T1`
+ one of YiemAgent's agent
+ - `state::T2`
+ current game state
+ - `thoughtDict::T3`
+ contain Thought, Action, Observation
+ - `isterminal::Function`
+ a function to determine terminal state
+
+# Return
+ - `(newNodeKey, newstate, isterminalstate, reward)::Tuple{String, Dict{Symbol, <:Any}, Bool, <:Number}`
+
+# Example
+```jldoctest
+julia> state = Dict{Symbol, Dict{Symbol, Any}}(
+ :thoughtHistory => Dict(:question => "Hello, I want to buy a bottle of wine."),
+ :storeinfo => Dict(),
+ :customerinfo => Dict()
+ )
+julia> thoughtDict = Dict(
+ :question=> "I want to buy a bottle of wine.",
+ :thought_1=> "The customer wants to buy a bottle of wine.",
+ :action_1=> Dict{Symbol, Any}(
+ :name=>"Chatbox",
+ :input=>"What occasion are you buying the wine for?",
+ ),
+ :observation_1 => ""
+ )
+```
+
+# TODO
+ - [] add other actions
+ - [WORKING] add embedding of newstate and store in newstate[:embedding]
+
+# Signature
+"""
+function transition(state::T2, config::T1, decisionMaker::Function, evaluator::Function,
+ reflector::Function
+ )::Tuple{String, Dict{Symbol, <:Any}, Integer} where {T1<:AbstractDict, T2<:AbstractDict}
+
+ thoughtDict = decisionMaker(config, state)
+
+ actionname = thoughtDict[:action][:name]
+ actioninput = thoughtDict[:action][:input]
+
+ # map action and input() to llm function
+ response, select, reward, isterminal =
+ if actionname == "chatbox"
+ # deepcopy(state[:virtualCustomerChatHistory]) because I want to keep it clean
+ # so that other simulation start from this same node is not contaminated with actioninput
+ virtualWineUserChatbox(config, actioninput, deepcopy(state[:virtualCustomerChatHistory])) # virtual customer
+ elseif actionname == "winestock"
+ winestock(config, actioninput)
+ elseif actionname == "recommendbox"
+ virtualWineUserRecommendbox(config, actioninput)
+ else
+ error("undefined LLM function. Requesting $actionname")
+ end
+
+ newNodeKey, newstate = LLMMCTS.makeNewState(state, thoughtDict, response, select, reward,
+ isterminal)
+ if actionname == "chatbox"
+ push!(newstate[:virtualCustomerChatHistory], Dict(:name=>"assistant", :text=> actioninput) )
+ push!(newstate[:virtualCustomerChatHistory], Dict(:name=>"user", :text=> response))
+ end
+
+ stateevaluation, progressvalue = evaluator(config, newstate)
+
+ if newstate[:reward] < 0
+ pprint(newstate[:thoughtHistory])
+ newstate[:evaluation] = stateevaluation
+ newstate[:lesson] = reflector(config, newstate)
+
+ # store new lesson for later use
+ lessonDict = copy(JSON3.read("lesson.json"))
+ latestLessonKey, latestLessonIndice =
+ GeneralUtils.findHighestIndexKey(lessonDict, "lesson")
+ nextIndice = latestLessonKey == :NA ? 1 : latestLessonIndice + 1
+ newLessonKey = Symbol("lesson_$(nextIndice)")
+ lessonDict[newLessonKey] = newstate
+ open("lesson.json", "w") do io
+ JSON3.pretty(io, lessonDict)
+ end
+ print("---> reflector()")
+ end
+
+ return (newNodeKey, newstate, progressvalue)
+end
+
+
+
+# """ Chat with llm.
+
+# # Arguments
+# `a::agent`
+# an agent
+
+# # Return
+# None
+
+# # Example
+# ```jldoctest
+# julia> using JSON3, UUIDs, Dates, FileIO, MQTTClient, ChatAgent
+# julia> const mqttBroker = "mqtt.yiem.cc"
+# julia> mqttclient, connection = MakeConnection(mqttBroker, 1883)
+# julia> tools=Dict( # update input format
+# "askbox"=>Dict(
+# :description => "Useful for when you need to ask the user for more context. Do not ask the user their own question.",
+# :input => "Input is a text in JSON format.{\"Q1\": \"How are you doing?\", \"Q2\": \"How may I help you?\"}",
+# :output => "" ,
+# :func => nothing,
+# ),
+# )
+# julia> msgMeta = Dict(
+# :msgPurpose=> "updateStatus",
+# :from=> "agent",
+# :to=> "llmAI",
+# :requestresponse=> "request",
+# :sendto=> "", # destination topic
+# :replyTo=> "agent/api/v0.1.0/txt/response", # requester ask responseer to send reply to this topic
+# :repondToMsgId=> "", # responseer is responseing to this msg id
+# :taskstatus=> "", # "complete", "fail", "waiting" or other status
+# :timestamp=> Dates.now(),
+# :msgId=> "$(uuid4())",
+# )
+# julia> a = ChatAgent.agentReflex(
+# "Jene",
+# mqttclient,
+# msgMeta,
+# agentConfigTopic, # I need a function to send msg to config topic to get load balancer
+# role=:sommelier,
+# tools=tools
+# )
+# julia> newAgent = ChatAgent.agentReact(agent)
+# julia> response = ChatAgent.conversation(newAgent, "Hi! how are you?")
+# ```
+
+# # TODO
+# - [] update docstring
+# - [x] MCTS() for planning
+# - [] add recap to initialState for earlier completed question
+# - [WORKING] conversation loop
+
+# # Signature
+# """
+# function conversation(a::T, userinput::Dict) where {T<:agent}
+# config = deepcopy(a.config)
+# pprint(config)
+# if userinput[:text] == "newtopic"
+# clearhistory(a)
+# return "Okay. What shall we talk about?"
+# else
+# # add usermsg to a.chathistory
+# addNewMessage(a, "user", userinput[:text])
+
+# if isempty(a.plan[:currenttrajectory])
+
+# # initial state
+# a.plan[:currenttrajectory] = Dict{Symbol, Any}(
+# # deepcopy the info to prevent modifying the info unintentionally during MCTS planning
+# :customerinfo=> deepcopy(a.keywordinfo[:customerinfo]),
+# :storeinfo=> deepcopy(a.keywordinfo[:storeinfo]),
+# :userselect=> nothing,
+# :reward=> 0,
+# :isterminal=> false,
+# :evaluation=> nothing,
+# :lesson=> nothing,
+
+# :totalTrajectoryReward=> nothing,
+
+# # contain question, thought_1, action_1, observation_1, thought_2, ...
+# :thoughtHistory=> OrderedDict{Symbol, Any}(
+# #[] :recap=>,
+# :question=> userinput[:text],
+# ),
+
+# # store conversation for virtual customer because the virtual customer agent is just
+# # a function and stateless.
+# :virtualCustomerChatHistory=> Vector{Dict{Symbol, Any}}(
+# [Dict(:name=> "user", :text=> userinput[:text])]
+# ),
+# )
+# else
+# _, a.plan[:currenttrajectory] = makeNewState(a.plan[:currenttrajectory],
+# a.plan[:activeplan][:thoughtHistory], userinput[:text], userinput[:select],
+# userinput[:reward], userinput[:isterminal])
+# end
+# end
+
+# while true
+# bestNextState, besttrajectory = LLMMCTS.runMCTS(a.plan[:currenttrajectory],
+# transition, config, decisionMaker, evaluator, reflector;
+# totalsample=2, maxDepth=3, maxiterations=3, explorationweight=1.0)
+# a.plan[:activeplan] = bestNextState
+
+# latestActionKey, latestActionIndice =
+# GeneralUtils.findHighestIndexKey(bestNextState[:thoughtHistory], "action")
+# actionname = bestNextState[:thoughtHistory][latestActionKey][:name]
+# actioninput = bestNextState[:thoughtHistory][latestActionKey][:input]
+
+# # transition
+# if actionname == "chatbox"
+# # add usermsg to a.chathistory
+# addNewMessage(a, "assistant", actioninput)
+# return actioninput
+# elseif actionname == "recommendbox"
+# # add usermsg to a.chathistory
+# addNewMessage(a, "assistant", actioninput)
+# return actioninput
+# else
+# _, a.plan[:currenttrajectory] = transition(a, a.plan[:currenttrajectory], a.plan[:activeplan])
+# end
+# end
+# end
+
+
+
+""" Chat with llm.
+
+# Arguments
+ `a::agent`
+ an agent
+
+# Return
+ None
+
+# Example
+```jldoctest
+julia> using JSON3, UUIDs, Dates, FileIO, MQTTClient, ChatAgent
+julia> const mqttBroker = "mqtt.yiem.cc"
+julia> mqttclient, connection = MakeConnection(mqttBroker, 1883)
+julia> tools=Dict( # update input format
+ "askbox"=>Dict(
+ :description => "Useful for when you need to ask the user for more context. Do not ask the user their own question.",
+ :input => "Input is a text in JSON format.{\"Q1\": \"How are you doing?\", \"Q2\": \"How may I help you?\"}",
+ :output => "" ,
+ :func => nothing,
+ ),
+ )
+julia> msgMeta = Dict(
+ :msgPurpose=> "updateStatus",
+ :from=> "agent",
+ :to=> "llmAI",
+ :requestresponse=> "request",
+ :sendto=> "", # destination topic
+ :replyTo=> "agent/api/v0.1.0/txt/response", # requester ask responseer to send reply to this topic
+ :repondToMsgId=> "", # responseer is responseing to this msg id
+ :taskstatus=> "", # "complete", "fail", "waiting" or other status
+ :timestamp=> Dates.now(),
+ :msgId=> "$(uuid4())",
+)
+julia> a = ChatAgent.agentReflex(
+ "Jene",
+ mqttclient,
+ msgMeta,
+ agentConfigTopic, # I need a function to send msg to config topic to get load balancer
+ role=:sommelier,
+ tools=tools
+ )
+julia> newAgent = ChatAgent.agentReact(agent)
+julia> response = ChatAgent.conversation(newAgent, "Hi! how are you?")
+```
+
+# TODO
+ - [] update docstring
+ - [x] MCTS() for planning
+ - [] add recap to initialState for earlier completed question
+ - [WORKING] conversation loop
+
+# Signature
+"""
+function conversation(a::T, userinput::Dict) where {T<:agent}
+ config = deepcopy(a.config)
+ pprint(config)
+ if userinput[:text] == "newtopic"
+ clearhistory(a)
+ return "Okay. What shall we talk about?"
+ else
+ # add usermsg to a.chathistory
+ addNewMessage(a, "user", userinput[:text])
+
+ thought = think(a)
+
+ # thought will be added to chat model via context
+ chatresponse = generatechat(a, thought)
+
+ return chatresponse
+ end
+
+
+end
+# function conversation(a::T, userinput::Dict) where {T<:agent}
+# config = deepcopy(a.config)
+# pprint(config)
+# if userinput[:text] == "newtopic"
+# clearhistory(a)
+# return "Okay. What shall we talk about?"
+# else
+# # add usermsg to a.chathistory
+# addNewMessage(a, "user", userinput[:text])
+
+# if isempty(a.plan[:currenttrajectory])
+
+# # initial state
+# a.plan[:currenttrajectory] = Dict{Symbol, Any}(
+# # deepcopy the info to prevent modifying the info unintentionally during MCTS planning
+# :customerinfo=> deepcopy(a.keywordinfo[:customerinfo]),
+# :storeinfo=> deepcopy(a.keywordinfo[:storeinfo]),
+# :userselect=> nothing,
+# :reward=> 0,
+# :isterminal=> false,
+# :evaluation=> nothing,
+# :lesson=> nothing,
+
+# :totalTrajectoryReward=> nothing,
+
+# # contain question, thought_1, action_1, observation_1, thought_2, ...
+# :thoughtHistory=> OrderedDict{Symbol, Any}(
+# #[] :recap=>,
+# :question=> userinput[:text],
+# ),
+
+# # store conversation for virtual customer because the virtual customer agent is just
+# # a function and stateless.
+# :virtualCustomerChatHistory=> Vector{Dict{Symbol, Any}}(
+# [Dict(:name=> "user", :text=> userinput[:text])]
+# ),
+# )
+# else
+# _, a.plan[:currenttrajectory] = makeNewState(a.plan[:currenttrajectory],
+# a.plan[:activeplan][:thoughtHistory], userinput[:text], userinput[:select],
+# userinput[:reward], userinput[:isterminal])
+# end
+# end
+
+# while true
+# bestNextState, besttrajectory = LLMMCTS.runMCTS(a.plan[:currenttrajectory],
+# transition, config, decisionMaker, evaluator, reflector;
+# totalsample=2, maxDepth=3, maxiterations=3, explorationweight=1.0)
+# a.plan[:activeplan] = bestNextState
+
+# latestActionKey, latestActionIndice =
+# GeneralUtils.findHighestIndexKey(bestNextState[:thoughtHistory], "action")
+# actionname = bestNextState[:thoughtHistory][latestActionKey][:name]
+# actioninput = bestNextState[:thoughtHistory][latestActionKey][:input]
+
+# # transition
+# if actionname == "chatbox"
+# # add usermsg to a.chathistory
+# addNewMessage(a, "assistant", actioninput)
+# return actioninput
+# elseif actionname == "recommendbox"
+# # add usermsg to a.chathistory
+# addNewMessage(a, "assistant", actioninput)
+# return actioninput
+# else
+# _, a.plan[:currenttrajectory] = transition(a, a.plan[:currenttrajectory], a.plan[:activeplan])
+# end
+# end
+# end
+
+
+"""
+
+# Arguments
+
+# Return
+
+# Example
+```jldoctest
+julia>
+```
+
+# TODO
+ - [] update docstring
+ - [x] implement the function
+ - [x] add try block. check result that it is expected before returning
+
+# Signature
+"""
+function think(a::T) where {T<:agent}
+ config = deepcopy(a.config)
+ pprint(config)
+ if userinput[:text] == "newtopic"
+ clearhistory(a)
+ return "Okay. What shall we talk about?"
+ else
+ # add usermsg to a.chathistory
+ addNewMessage(a, "user", userinput[:text])
+
+ if isempty(a.plan[:currenttrajectory])
+
+ # initial state
+ a.plan[:currenttrajectory] = Dict{Symbol, Any}(
+ # deepcopy the info to prevent modifying the info unintentionally during MCTS planning
+ :customerinfo=> deepcopy(a.keywordinfo[:customerinfo]),
+ :storeinfo=> deepcopy(a.keywordinfo[:storeinfo]),
+ :userselect=> nothing,
+ :reward=> 0,
+ :isterminal=> false,
+ :evaluation=> nothing,
+ :lesson=> nothing,
+
+ :totalTrajectoryReward=> nothing,
+
+ # contain question, thought_1, action_1, observation_1, thought_2, ...
+ :thoughtHistory=> OrderedDict{Symbol, Any}(
+ #[] :recap=>,
+ :question=> userinput[:text],
+ ),
+
+ # store conversation for virtual customer because the virtual customer agent is just
+ # a function and stateless.
+ :virtualCustomerChatHistory=> Vector{Dict{Symbol, Any}}(
+ [Dict(:name=> "user", :text=> userinput[:text])]
+ ),
+ )
+ else
+ _, a.plan[:currenttrajectory] = makeNewState(a.plan[:currenttrajectory],
+ a.plan[:activeplan][:thoughtHistory], userinput[:text], userinput[:select],
+ userinput[:reward], userinput[:isterminal])
+ end
+ end
+
+ while true
+ bestNextState, besttrajectory = LLMMCTS.runMCTS(a.plan[:currenttrajectory],
+ transition, config, decisionMaker, evaluator, reflector;
+ totalsample=2, maxDepth=3, maxiterations=3, explorationweight=1.0)
+ a.plan[:activeplan] = bestNextState
+
+ latestActionKey, latestActionIndice =
+ GeneralUtils.findHighestIndexKey(bestNextState[:thoughtHistory], "action")
+ actionname = bestNextState[:thoughtHistory][latestActionKey][:name]
+ actioninput = bestNextState[:thoughtHistory][latestActionKey][:input]
+
+ # transition
+ if actionname == "chatbox"
+ # add usermsg to a.chathistory
+ addNewMessage(a, "assistant", actioninput)
+ return actioninput
+ elseif actionname == "recommendbox"
+ # add usermsg to a.chathistory
+ addNewMessage(a, "assistant", actioninput)
+ return actioninput
+ else
+ _, a.plan[:currenttrajectory] = transition(a, a.plan[:currenttrajectory], a.plan[:activeplan])
+ end
+ end
+end
+
+
+
+
+
+# """
+
+# # Arguments
+# - `a::T1`
+# one of Yiem's agent
+# - `state::T2`
+# a game state
+
+# # Return
+# - `evaluation::Tuple{String, Integer}`
+# evaluation and score
+
+# # Example
+# ```jldoctest
+# julia>
+# ```
+
+# # TODO
+# - [] update docs
+# - [] implement the function
+
+# # Signature
+# """
+# function comparer(a::T1, state::T2)::Tuple{String, Integer} where {T1<:agent, T2<:AbstractDict}
+
+# _prompt =
+# """
+# Analyze the trajectories of a solution to a question answering task. The trajectories are
+# labeled by environmental observations about the situation, thoughts that can reason about
+# the current situation and actions that can be three types:
+# 1) winestock[query], which you can use to find wine in your inventory.
+# 2) chatbox[text], which you can use to interact with the user.
+# 3) recommendbox[answer], which returns your wine recommendation to the user.
+
+# Given a question and a trajectory, evaluate its correctness and provide your reasoning and
+# analysis in detail. Focus on the latest thought, action, and observation. Incomplete trajectories
+# can be correct if the thoughts and actions so far are correct, even if the answer is not found
+# yet. Do not generate additional thoughts or actions. Then ending with the correctness score s
+# where s is an integer from 0 to 10.
+
+# You should only respond in JSON format as describe below:
+# {"evaluation": "your evaluation", "score": "your evaluation score"}
+
+# Here are some examples:
+# {
+# "question": "I'm looking for a sedan with an automatic driving feature.",
+# "thought_1": "I have many types of sedans in my inventory, each with diverse features.",
+# "thought_2": "But there is only 1 model that has the feature customer wanted.",
+# "thought_3": "I should check our inventory first to see if we have it.",
+# "action_1": {"name": "inventory", "input": "Yiem model A"},
+# "observation_1": "Yiem model A is in stock."
+# }
+# {"evaluation": "This trajectory is correct as it is reasonable to check an inventory for info provided in the question.
+# It is also better to have simple searches corresponding to a single entity, making this the best action.",
+# "score": 10
+# }
+
+# {
+# "question": "Do you have an all-in-one pen with 4 colors and a pencil for sale?",
+# "thought_1": "Let me check our inventory first to see if I have it.",
+# "action_1": {"name": "inventory", "input": "pen with 4 color and a pencil."},
+# "observation_1": "I found {1: "Pilot Dr. grip 4-in-1 pen", 2: "Rotting pencil"}",
+# "thought_2": "Ok, I have what the user is asking. Let's tell the user.",
+# "action_2": {"name": "chatbox", "input": "Yes, we do have a Pilot Dr. grip 4-in-1 pen and a Rotting pencil"},
+# "observation_1": "This is not what I wanted."
+# }
+# {"evaluation": "This trajectory is incorrect as my search term should be related to a 4-colors pen with a pencil in it,
+# not a pen and a pencil seperately. A better search term should have been a 4-colors pen with a pencil, all-in-one.",
+# "score": 0
+# }
+
+# Let's begin!:
+# $(JSON3.write(state[:thoughtHistory]))
+# {"evaluation"
+# """
+
+# # apply LLM specific instruct format
+# externalService = a.config[:externalservice][:text2textinstruct]
+# llminfo = externalService[:llminfo]
+# prompt =
+# if llminfo[:name] == "llama3instruct"
+# formatLLMtext_llama3instruct("system", _prompt)
+# else
+# error("llm model name is not defied yet $(@__LINE__)")
+# end
+
+# msgMeta = GeneralUtils.generate_msgMeta(
+# a.config[:externalservice][:text2textinstruct][:mqtttopic],
+# senderName= "evaluator",
+# senderId= a.id,
+# receiverName= "text2textinstruct",
+# mqttBroker= a.config[:mqttServerInfo][:broker],
+# mqttBrokerPort= a.config[:mqttServerInfo][:port],
+# )
+
+# outgoingMsg = Dict(
+# :msgMeta=> msgMeta,
+# :payload=> Dict(
+# :text=> prompt,
+# :kwargs=> Dict(
+# :max_tokens=> 512,
+# :stop=> ["<|eot_id|>"],
+# )
+# )
+# )
+
+# for attempt in 1:5
+# try
+# response = GeneralUtils.sendReceiveMqttMsg(outgoingMsg)
+# _responseJsonStr = response[:response][:text]
+# expectedJsonExample =
+# """
+# Here is an expected JSON format:
+# {"evaluation": "...", "score": "..."}
+# """
+# responseJsonStr = jsoncorrection(a, _responseJsonStr, expectedJsonExample)
+# evaluationDict = copy(JSON3.read(responseJsonStr))
+
+# # check if dict has all required value
+# dummya::AbstractString = evaluationDict[:evaluation]
+# dummyb::Integer = evaluationDict[:score]
+
+# return (evaluationDict[:evaluation], evaluationDict[:score])
+# catch e
+# io = IOBuffer()
+# showerror(io, e)
+# errorMsg = String(take!(io))
+# st = sprint((io, v) -> show(io, "text/plain", v), stacktrace(catch_backtrace()))
+# println("")
+# @warn "Attempt $attempt. Error occurred: $errorMsg\n$st"
+# println("")
+# end
+# end
+# error("evaluator failed to generate an evaluation")
+# end
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+end # module interface
\ No newline at end of file
diff --git a/src/interface.jl b/src/interface.jl
index b71f105..6dca850 100644
--- a/src/interface.jl
+++ b/src/interface.jl
@@ -39,8 +39,8 @@ macro executeStringFunction(functionStr, args...)
func_expr = Meta.parse(functionStr)
# Create a new function with the parsed expression
- function_to_call = eval(Expr(:function, Expr(:call, func_expr, args...),
- func_expr.args[2:end]...))
+ function_to_call = eval(Expr(:function,
+ Expr(:call, func_expr, args...), func_expr.args[2:end]...))
# Call the newly created function with the provided arguments
function_to_call(args...)
@@ -97,169 +97,328 @@ julia> output_thoughtDict = Dict(
# Signature
"""
-function decisionMaker(config::T1, state::T2)::Dict{Symbol, Any} where {T1<:AbstractDict, T2<:AbstractDict}
- customerinfo =
+function decisionMaker(a::T)::Dict{Symbol, Any} where {T<:agent}
+
+ # lessonDict = copy(JSON3.read("lesson.json"))
+
+ # lesson =
+ # if isempty(lessonDict)
+ # ""
+ # else
+ # lessons = Dict{Symbol, Any}()
+ # for (k, v) in lessonDict
+ # lessons[k] = lessonDict[k][:lesson]
+ # end
+
+ # """
+ # You have attempted to help the user before and failed, either because your reasoning for the
+ # recommendation was incorrect or your response did not exactly match the user expectation.
+ # The following lesson(s) give a plan to avoid failing to help the user in the same way you
+ # did previously. Use them to improve your strategy to help the user.
+
+ # Here are some lessons in JSON format:
+ # $(JSON3.write(lessons))
+
+ # When providing the thought and action for the current trial, that into account these failed
+ # trajectories and make sure not to repeat the same mistakes and incorrect answers.
+ # """
+ # end
+
+ # _prompt =
+ # """
+ # You are a helpful sommelier working for a wine store.
+ # Your goal is to recommend the best wine from your inventory that match the user preferences.
+ # You are also keen to improve your recommendation with lesson(s).
+
+ # You must follow the following criteria:
+ # 1) Get to know how much the user willing to spend
+ # 2) Get to know type of wine the user is looking for e.g. red, white, sparkling, rose, dessert, fortified
+ # 3) Get to know what occasion the user is buying wine for
+ # 4) Get to know what characteristics of wine the user is looking for
+ # e.g. tannin, sweetness, intensity, acidity
+ # 5) Get to know what food the user will have with wine
+ # 6) Check your inventory for the best wine that match the user preference
+ # 7) Recommend wine to the user
+
+ # You should only respond with interleaving Thought, Action, Observation steps.
+ # Thought can reason about the current situation, and Action can be three types:
+ # 1) winestock[query], which you can use to find wine in your inventory. The more input data the better.
+ # 2) chatbox[text], which you can use to interact with the user.
+ # After each observation, provide the next Thought and next Action.
+
+ # You should only respond in JSON format as describe below:
+ # {
+ # "thought": "your reasoning",
+ # "action": {"name": "action to take", "input": "action input"},
+ # "observation": "result of the action"
+ # }
+
+ # Here are some examples:
+ # {
+ # "question": "I would like to buy a sedan with 8 seats.",
+ # "thought_1": "Our showroom carries various vehicle model. But I'm not sure whether we have a models that fits the user demand, I need to check our inventory.",
+ # "action_1": {"name": "inventory", "input": "sedan with 8 seats."},
+ # "observation_1": "Several model has 8 seats. Available color are black, red green"
+ # }
+ # {
+ # "thought": "I have a few color for the user to choose from. I will ask him what color he likes.",
+ # "action": {"name": "chatbox", "input": "Which color do you like?"}
+ # "observation": "I'll take black."
+ # }
+
+ # $lesson
+
+ # Let's begin!
+
+ # $(JSON3.write(state[:thoughtHistory]))
+ # {"thought"
+ # """
+
+ systemmsg =
"""
- I will give you the following information about customer:
- $(JSON3.write(state[:customerinfo]))
+ You are a helpful sommelier working for a wine store.
+ Your task is to help the user choose the best wine that match the user preferences from your inventory.
+ You are also eager to improve your helpfulness.
+
+ You must follow the following guidelines:
+ - Get to know how much the user willing to spend
+ - Get to know type of wine the user is looking for e.g. red, white, sparkling, rose, dessert, fortified
+ - Get to know what occasion the user is buying wine for
+ - Get to know what characteristics of wine the user is looking for e.g. tannin, sweetness, intensity, acidity
+ - Get to know what food the user will have with wine
+
+ At each round of conversation, the user will give you the current situation:
+ Context: ...
+ Your earlier conversation with the user: ...
+
+ You should then respond to the user with interleaving Thought, Plan, Action and Observation:
+ - thought:
+ 1) State your reasoning about the current situation.
+ - plan: Based on the current situation, what would you do to complete the task? Be specific.
+ - action (Must be aligned with your plan): Can be one of the following functions:
+ 1) CHATBOX[text], which you can use to talk with the user. "text" is in verbal English.
+ 2) WINESTOCK[query], which you can use to find info about wine in your inventory. "query" is a search term in verbal English.
+ - observation: result of the action.
+
+ You should only respond in format as described below:
+ thought: ...
+ plan: ...
+ action_name: ...
+ action_input: ...
+ observation: ...
+
+ Let's begin!
"""
- storeinfo =
+ usermsg =
"""
- I will give you the following information about your store:
- $(JSON3.write(state[:storeinfo]))
+ Context: None
+ Your earlier conversation with the user: $(chatHistoryToString(a))
"""
- lessonDict = copy(JSON3.read("lesson.json"))
-
- lesson =
- if isempty(lessonDict)
- ""
- else
- lessons = Dict{Symbol, Any}()
- for (k, v) in lessonDict
- lessons[k] = lessonDict[k][:lesson]
- end
-
- """
- You have attempted to help the user before and failed, either because your reasoning for the
- recommendation was incorrect or your response did not exactly match the user expectation.
- The following lesson(s) give a plan to avoid failing to help the user in the same way you
- did previously. Use them to improve your strategy to help the user.
-
- Here are some lessons in JSON format:
- $(JSON3.write(lessons))
-
- When providing the thought and action for the current trial, that into account these failed
- trajectories and make sure not to repeat the same mistakes and incorrect answers.
- """
- end
-
_prompt =
- """
- You are a helpful sommelier working for a wine store.
- Your goal is to recommend the best wine from your inventory that match the user preferences.
- You are also keen to improve your recommendation with lesson(s).
-
- You must follow the following criteria:
- 1) Get to know how much the user willing to spend
- 2) Get to know type of wine the user is looking for e.g. red, white, sparkling, rose, dessert, fortified
- 3) Get to know what occasion the user is buying wine for
- 4) Get to know what characteristics of wine the user is looking for
- e.g. tannin, sweetness, intensity, acidity
- 5) Get to know what food the user will have with wine
- 6) Check your inventory for the best wine that match the user preference
- 7) Recommend wine to the user
-
- You should only respond with interleaving Thought, Action, Observation steps.
- Thought can reason about the current situation, and Action can be three types:
- 1) winestock[query], which you can use to find wine in your inventory. The more input data the better.
- 2) chatbox[text], which you can use to interact with the user.
- 3) recommendbox[answer], which returns your wine recommendation to the user.
- After each observation, provide the next Thought and next Action.
-
- You should only respond in JSON format as describe below:
- {
- "thought": "your reasoning",
- "action": {"name": "action to take", "input": "action input"},
- "observation": "result of the action"
- }
-
- Here are some examples:
- {
- "question": "I would like to buy a sedan with 8 seats.",
- "thought_1": "Our showroom carries various vehicle model. But I'm not sure whether we have a models that fits the user demand, I need to check our inventory.",
- "action_1": {"name": "inventory", "input": "sedan with 8 seats."},
- "observation_1": "Several model has 8 seats. Available color are black, red green"
- }
- {
- "thought": "I have a few color for the user to choose from. I will ask him what color he likes.",
- "action": {"name": "chatbox", "input": "Which color do you like?"}
- "observation": "I'll take black."
- }
+ [
+ Dict(:name=> "system", :text=> systemmsg),
+ Dict(:name=> "user", :text=> usermsg)
+ ]
- $lesson
-
- Let's begin!
-
- $(JSON3.write(state[:thoughtHistory]))
- {"thought"
- """
-
- # apply LLM specific instruct format
- externalService = config[:externalservice][:text2textinstruct]
- llminfo = externalService[:llminfo]
- prompt =
- if llminfo[:name] == "llama3instruct"
- formatLLMtext_llama3instruct("system", _prompt)
- else
- error("llm model name is not defied yet $(@__LINE__)")
- end
-
- msgMeta = GeneralUtils.generate_msgMeta(
- externalService[:mqtttopic],
- senderName= "decisionMaker",
- senderId= string(uuid4()),
- receiverName= "text2textinstruct",
- mqttBroker= config[:mqttServerInfo][:broker],
- mqttBrokerPort= config[:mqttServerInfo][:port],
- )
-
- outgoingMsg = Dict(
- :msgMeta=> msgMeta,
- :payload=> Dict(
- :text=> prompt,
- :kwargs=> Dict(
- :max_tokens=> 512,
- :stop=> ["<|eot_id|>"],
- )
- )
- )
- @show outgoingMsg
-
- for attempt in 1:5
+ # put in model format
+ prompt = GeneralUtils.formatLLMtext(_prompt, "llama3instruct")
+ prompt *=
+ """
+ <|start_header_id|>assistant<|end_header_id|>
+ """
+ response = nothing # store for show when error msg show up
+ for attempt in 1:10
try
- response = GeneralUtils.sendReceiveMqttMsg(outgoingMsg)
- _responseJsonStr = response[:response][:text]
- expectedJsonExample =
- """
- Here is an expected JSON format:
- {
- "thought": "...",
- "action": {"name": "...", "input": "..."},
- "observation": "..."
- }
- """
- responseJsonStr = jsoncorrection(config, _responseJsonStr, expectedJsonExample)
- thoughtDict = copy(JSON3.read(responseJsonStr))
+ response = a.text2textInstructLLM(prompt)
+ responsedict = GeneralUtils.textToDict(response,
+ ["thought", "plan", "action_name", "action_input", "observation"],
+ rightmarker=":", symbolkey=true)
- # check if dict has all required value
- thought::AbstractString = thoughtDict[:thought]
- actionname::AbstractString = thoughtDict[:action][:name]
- actioninput::AbstractString = thoughtDict[:action][:input]
- if actionname ∈ ["winestock", "chatbox", "recommendbox"]
- # LLM use available function
- elseif thought == ""
- error("DecisionMaker has no thought")
- elseif length(actioninput) == 0
- error("DecisionMaker has no actioninput")
- else
- error("DecisionMaker use wrong function")
+ if responsedict[:action_name] ∉ ["CHATBOX", "WINESTOCK"]
+ error("decisionMaker didn't use the given functions ", @__LINE__)
end
- return thoughtDict
+ for i ∈ [:thought, :plan, :action_name]
+ if length(JSON3.write(responsedict[i])) == 0
+ error("$i is empty ", @__LINE__)
+ end
+ end
+
+ # check if there are more than 1 key per categories
+ for i ∈ [:thought, :plan, :action_name, :action_input, :observation]
+ matchkeys = GeneralUtils.findMatchingDictKey(responsedict, i)
+ if length(matchkeys) > 1
+ error("DecisionMaker has more than one key per categories")
+ end
+ end
+ println("--> 1")
+ pprintln(responsedict)
+ return responsedict
catch e
io = IOBuffer()
showerror(io, e)
errorMsg = String(take!(io))
st = sprint((io, v) -> show(io, "text/plain", v), stacktrace(catch_backtrace()))
println("")
- @warn "Attempt $attempt. Error occurred: $errorMsg\n$st"
+ println("Attempt $attempt. Error occurred: $errorMsg\n$st")
println("")
end
end
- error("DecisionMaker failed to generate a thought")
+ error("DecisionMaker failed to generate a thought ", response)
end
+# function decisionMaker(a::T)::Dict{Symbol, Any} where {T<:agent}
+
+# # lessonDict = copy(JSON3.read("lesson.json"))
+
+# # lesson =
+# # if isempty(lessonDict)
+# # ""
+# # else
+# # lessons = Dict{Symbol, Any}()
+# # for (k, v) in lessonDict
+# # lessons[k] = lessonDict[k][:lesson]
+# # end
+
+# # """
+# # You have attempted to help the user before and failed, either because your reasoning for the
+# # recommendation was incorrect or your response did not exactly match the user expectation.
+# # The following lesson(s) give a plan to avoid failing to help the user in the same way you
+# # did previously. Use them to improve your strategy to help the user.
+
+# # Here are some lessons in JSON format:
+# # $(JSON3.write(lessons))
+
+# # When providing the thought and action for the current trial, that into account these failed
+# # trajectories and make sure not to repeat the same mistakes and incorrect answers.
+# # """
+# # end
+
+# _prompt =
+# """
+# You are a helpful sommelier working for a wine store.
+# Your goal is to recommend the best wine from your inventory that match the user preferences.
+# You are also keen to improve your recommendation with lesson(s).
+
+# You must follow the following criteria:
+# 1) Get to know how much the user willing to spend
+# 2) Get to know type of wine the user is looking for e.g. red, white, sparkling, rose, dessert, fortified
+# 3) Get to know what occasion the user is buying wine for
+# 4) Get to know what characteristics of wine the user is looking for
+# e.g. tannin, sweetness, intensity, acidity
+# 5) Get to know what food the user will have with wine
+# 6) Check your inventory for the best wine that match the user preference
+# 7) Recommend wine to the user
+
+# You should only respond with interleaving Thought, Action, Observation steps.
+# Thought can reason about the current situation, and Action can be three types:
+# 1) winestock[query], which you can use to find wine in your inventory. The more input data the better.
+# 2) chatbox[text], which you can use to interact with the user.
+# After each observation, provide the next Thought and next Action.
+
+# You should only respond in JSON format as describe below:
+# {
+# "thought": "your reasoning",
+# "action": {"name": "action to take", "input": "action input"},
+# "observation": "result of the action"
+# }
+
+# Here are some examples:
+# {
+# "question": "I would like to buy a sedan with 8 seats.",
+# "thought_1": "Our showroom carries various vehicle model. But I'm not sure whether we have a models that fits the user demand, I need to check our inventory.",
+# "action_1": {"name": "inventory", "input": "sedan with 8 seats."},
+# "observation_1": "Several model has 8 seats. Available color are black, red green"
+# }
+# {
+# "thought": "I have a few color for the user to choose from. I will ask him what color he likes.",
+# "action": {"name": "chatbox", "input": "Which color do you like?"}
+# "observation": "I'll take black."
+# }
+
+# $lesson
+
+# Let's begin!
+
+# $(JSON3.write(state[:thoughtHistory]))
+# {"thought"
+# """
+
+# # apply LLM specific instruct format
+# externalService = config[:externalservice][:text2textinstruct]
+# llminfo = externalService[:llminfo]
+# prompt =
+# if llminfo[:name] == "llama3instruct"
+# formatLLMtext_llama3instruct("system", _prompt)
+# else
+# error("llm model name is not defied yet $(@__LINE__)")
+# end
+
+# msgMeta = GeneralUtils.generate_msgMeta(
+# externalService[:mqtttopic],
+# senderName= "decisionMaker",
+# senderId= string(uuid4()),
+# receiverName= "text2textinstruct",
+# mqttBroker= config[:mqttServerInfo][:broker],
+# mqttBrokerPort= config[:mqttServerInfo][:port],
+# )
+
+# outgoingMsg = Dict(
+# :msgMeta=> msgMeta,
+# :payload=> Dict(
+# :text=> prompt,
+# :kwargs=> Dict(
+# :max_tokens=> 512,
+# :stop=> ["<|eot_id|>"],
+# )
+# )
+# )
+# @show outgoingMsg
+
+# for attempt in 1:5
+# try
+# response = GeneralUtils.sendReceiveMqttMsg(outgoingMsg)
+# _responseJsonStr = response[:response][:text]
+# expectedJsonExample =
+# """
+# Here is an expected JSON format:
+# {
+# "thought": "...",
+# "action": {"name": "...", "input": "..."},
+# "observation": "..."
+# }
+# """
+# responseJsonStr = jsoncorrection(config, _responseJsonStr, expectedJsonExample)
+# thoughtDict = copy(JSON3.read(responseJsonStr))
+
+# # check if dict has all required value
+# thought::AbstractString = thoughtDict[:thought]
+# actionname::AbstractString = thoughtDict[:action][:name]
+# actioninput::AbstractString = thoughtDict[:action][:input]
+# if actionname ∈ ["winestock", "chatbox", "recommendbox"]
+# # LLM use available function
+# elseif thought == ""
+# error("DecisionMaker has no thought")
+# elseif length(actioninput) == 0
+# error("DecisionMaker has no actioninput")
+# else
+# error("DecisionMaker use wrong function")
+# end
+
+# return thoughtDict
+# catch e
+# io = IOBuffer()
+# showerror(io, e)
+# errorMsg = String(take!(io))
+# st = sprint((io, v) -> show(io, "text/plain", v), stacktrace(catch_backtrace()))
+# println("")
+# @warn "Attempt $attempt. Error occurred: $errorMsg\n$st"
+# println("")
+# end
+# end
+# error("DecisionMaker failed to generate a thought")
+# end
""" Assigns a scalar value to each new child node to be used for selec-
@@ -551,97 +710,132 @@ function reflector(config::T1, state::T2)::String where {T1<:AbstractDict, T2<:A
end
-""" Get a new state
-# Arguments
- - `a::T1`
- one of YiemAgent's agent
- - `state::T2`
- current game state
- - `thoughtDict::T3`
- contain Thought, Action, Observation
- - `isterminal::Function`
- a function to determine terminal state
-# Return
- - `(newNodeKey, newstate, isterminalstate, reward)::Tuple{String, Dict{Symbol, <:Any}, Bool, <:Number}`
-# Example
-```jldoctest
-julia> state = Dict{Symbol, Dict{Symbol, Any}}(
- :thoughtHistory => Dict(:question => "Hello, I want to buy a bottle of wine."),
- :storeinfo => Dict(),
- :customerinfo => Dict()
- )
-julia> thoughtDict = Dict(
- :question=> "I want to buy a bottle of wine.",
- :thought_1=> "The customer wants to buy a bottle of wine.",
- :action_1=> Dict{Symbol, Any}(
- :name=>"Chatbox",
- :input=>"What occasion are you buying the wine for?",
- ),
- :observation_1 => ""
- )
-```
+# """ Chat with llm.
-# TODO
- - [] add other actions
- - [WORKING] add embedding of newstate and store in newstate[:embedding]
+# # Arguments
+# `a::agent`
+# an agent
+
+# # Return
+# None
-# Signature
-"""
-function transition(state::T2, config::T1, decisionMaker::Function, evaluator::Function,
- reflector::Function
- )::Tuple{String, Dict{Symbol, <:Any}, Integer} where {T1<:AbstractDict, T2<:AbstractDict}
+# # Example
+# ```jldoctest
+# julia> using JSON3, UUIDs, Dates, FileIO, MQTTClient, ChatAgent
+# julia> const mqttBroker = "mqtt.yiem.cc"
+# julia> mqttclient, connection = MakeConnection(mqttBroker, 1883)
+# julia> tools=Dict( # update input format
+# "askbox"=>Dict(
+# :description => "Useful for when you need to ask the user for more context. Do not ask the user their own question.",
+# :input => "Input is a text in JSON format.{\"Q1\": \"How are you doing?\", \"Q2\": \"How may I help you?\"}",
+# :output => "" ,
+# :func => nothing,
+# ),
+# )
+# julia> msgMeta = Dict(
+# :msgPurpose=> "updateStatus",
+# :from=> "agent",
+# :to=> "llmAI",
+# :requestresponse=> "request",
+# :sendto=> "", # destination topic
+# :replyTo=> "agent/api/v0.1.0/txt/response", # requester ask responseer to send reply to this topic
+# :repondToMsgId=> "", # responseer is responseing to this msg id
+# :taskstatus=> "", # "complete", "fail", "waiting" or other status
+# :timestamp=> Dates.now(),
+# :msgId=> "$(uuid4())",
+# )
+# julia> a = ChatAgent.agentReflex(
+# "Jene",
+# mqttclient,
+# msgMeta,
+# agentConfigTopic, # I need a function to send msg to config topic to get load balancer
+# role=:sommelier,
+# tools=tools
+# )
+# julia> newAgent = ChatAgent.agentReact(agent)
+# julia> response = ChatAgent.conversation(newAgent, "Hi! how are you?")
+# ```
- thoughtDict = decisionMaker(config, state)
+# # TODO
+# - [] update docstring
+# - [x] MCTS() for planning
+# - [] add recap to initialState for earlier completed question
+# - [WORKING] conversation loop
- actionname = thoughtDict[:action][:name]
- actioninput = thoughtDict[:action][:input]
+# # Signature
+# """
+# function conversation(a::T, userinput::Dict) where {T<:agent}
+# config = deepcopy(a.config)
+# pprint(config)
+# if userinput[:text] == "newtopic"
+# clearhistory(a)
+# return "Okay. What shall we talk about?"
+# else
+# # add usermsg to a.chathistory
+# addNewMessage(a, "user", userinput[:text])
- # map action and input() to llm function
- response, select, reward, isterminal =
- if actionname == "chatbox"
- # deepcopy(state[:virtualCustomerChatHistory]) because I want to keep it clean
- # so that other simulation start from this same node is not contaminated with actioninput
- virtualWineUserChatbox(config, actioninput, deepcopy(state[:virtualCustomerChatHistory])) # virtual customer
- elseif actionname == "winestock"
- winestock(config, actioninput)
- elseif actionname == "recommendbox"
- virtualWineUserRecommendbox(config, actioninput)
- else
- error("undefined LLM function. Requesting $actionname")
- end
+# if isempty(a.plan[:currenttrajectory])
- newNodeKey, newstate = LLMMCTS.makeNewState(state, thoughtDict, response, select, reward,
- isterminal)
- if actionname == "chatbox"
- push!(newstate[:virtualCustomerChatHistory], Dict(:name=>"assistant", :text=> actioninput) )
- push!(newstate[:virtualCustomerChatHistory], Dict(:name=>"user", :text=> response))
- end
+# # initial state
+# a.plan[:currenttrajectory] = Dict{Symbol, Any}(
+# # deepcopy the info to prevent modifying the info unintentionally during MCTS planning
+# :customerinfo=> deepcopy(a.keywordinfo[:customerinfo]),
+# :storeinfo=> deepcopy(a.keywordinfo[:storeinfo]),
+# :userselect=> nothing,
+# :reward=> 0,
+# :isterminal=> false,
+# :evaluation=> nothing,
+# :lesson=> nothing,
- stateevaluation, progressvalue = evaluator(config, newstate)
+# :totalTrajectoryReward=> nothing,
- if newstate[:reward] < 0
- pprint(newstate[:thoughtHistory])
- newstate[:evaluation] = stateevaluation
- newstate[:lesson] = reflector(config, newstate)
+# # contain question, thought_1, action_1, observation_1, thought_2, ...
+# :thoughtHistory=> OrderedDict{Symbol, Any}(
+# #[] :recap=>,
+# :question=> userinput[:text],
+# ),
- # store new lesson for later use
- lessonDict = copy(JSON3.read("lesson.json"))
- latestLessonKey, latestLessonIndice =
- GeneralUtils.findHighestIndexKey(lessonDict, "lesson")
- nextIndice = latestLessonKey == :NA ? 1 : latestLessonIndice + 1
- newLessonKey = Symbol("lesson_$(nextIndice)")
- lessonDict[newLessonKey] = newstate
- open("lesson.json", "w") do io
- JSON3.pretty(io, lessonDict)
- end
- print("---> reflector()")
- end
+# # store conversation for virtual customer because the virtual customer agent is just
+# # a function and stateless.
+# :virtualCustomerChatHistory=> Vector{Dict{Symbol, Any}}(
+# [Dict(:name=> "user", :text=> userinput[:text])]
+# ),
+# )
+# else
+# _, a.plan[:currenttrajectory] = makeNewState(a.plan[:currenttrajectory],
+# a.plan[:activeplan][:thoughtHistory], userinput[:text], userinput[:select],
+# userinput[:reward], userinput[:isterminal])
+# end
+# end
- return (newNodeKey, newstate, progressvalue)
-end
+# while true
+# bestNextState, besttrajectory = LLMMCTS.runMCTS(a.plan[:currenttrajectory],
+# transition, config, decisionMaker, evaluator, reflector;
+# totalsample=2, maxDepth=3, maxiterations=3, explorationweight=1.0)
+# a.plan[:activeplan] = bestNextState
+
+# latestActionKey, latestActionIndice =
+# GeneralUtils.findHighestIndexKey(bestNextState[:thoughtHistory], "action")
+# actionname = bestNextState[:thoughtHistory][latestActionKey][:name]
+# actioninput = bestNextState[:thoughtHistory][latestActionKey][:input]
+
+# # transition
+# if actionname == "chatbox"
+# # add usermsg to a.chathistory
+# addNewMessage(a, "assistant", actioninput)
+# return actioninput
+# elseif actionname == "recommendbox"
+# # add usermsg to a.chathistory
+# addNewMessage(a, "assistant", actioninput)
+# return actioninput
+# else
+# _, a.plan[:currenttrajectory] = transition(a, a.plan[:currenttrajectory], a.plan[:activeplan])
+# end
+# end
+# end
@@ -700,8 +894,7 @@ julia> response = ChatAgent.conversation(newAgent, "Hi! how are you?")
# Signature
"""
function conversation(a::T, userinput::Dict) where {T<:agent}
- config = deepcopy(a.config)
- pprint(config)
+
if userinput[:text] == "newtopic"
clearhistory(a)
return "Okay. What shall we talk about?"
@@ -709,64 +902,246 @@ function conversation(a::T, userinput::Dict) where {T<:agent}
# add usermsg to a.chathistory
addNewMessage(a, "user", userinput[:text])
- if isempty(a.plan[:currenttrajectory])
+ thought = think(a)
- # initial state
- a.plan[:currenttrajectory] = Dict{Symbol, Any}(
- # deepcopy the info to prevent modifying the info unintentionally during MCTS planning
- :customerinfo=> deepcopy(a.keywordinfo[:customerinfo]),
- :storeinfo=> deepcopy(a.keywordinfo[:storeinfo]),
- :userselect=> nothing,
- :reward=> 0,
- :isterminal=> false,
- :evaluation=> nothing,
- :lesson=> nothing,
+ # thought will be added to chat model via context
+ chatresponse = generatechat(a, thought)
- :totalTrajectoryReward=> nothing,
-
- # contain question, thought_1, action_1, observation_1, thought_2, ...
- :thoughtHistory=> OrderedDict{Symbol, Any}(
- #[] :recap=>,
- :question=> userinput[:text],
- ),
-
- # store conversation for virtual customer because the virtual customer agent is just
- # a function and stateless.
- :virtualCustomerChatHistory=> Vector{Dict{Symbol, Any}}(
- [Dict(:name=> "user", :text=> userinput[:text])]
- ),
- )
- else
- _, a.plan[:currenttrajectory] = makeNewState(a.plan[:currenttrajectory],
- a.plan[:activeplan][:thoughtHistory], userinput[:text], userinput[:select],
- userinput[:reward], userinput[:isterminal])
- end
+ return chatresponse
end
- while true
- bestNextState, besttrajectory = LLMMCTS.runMCTS(a.plan[:currenttrajectory],
- transition, config, decisionMaker, evaluator, reflector;
- totalsample=2, maxDepth=3, maxiterations=3, explorationweight=1.0)
- a.plan[:activeplan] = bestNextState
- latestActionKey, latestActionIndice =
- GeneralUtils.findHighestIndexKey(bestNextState[:thoughtHistory], "action")
- actionname = bestNextState[:thoughtHistory][latestActionKey][:name]
- actioninput = bestNextState[:thoughtHistory][latestActionKey][:input]
+end
+# function conversation(a::T, userinput::Dict) where {T<:agent}
+# config = deepcopy(a.config)
+# pprint(config)
+# if userinput[:text] == "newtopic"
+# clearhistory(a)
+# return "Okay. What shall we talk about?"
+# else
+# # add usermsg to a.chathistory
+# addNewMessage(a, "user", userinput[:text])
- # transition
- if actionname == "chatbox"
- # add usermsg to a.chathistory
- addNewMessage(a, "assistant", actioninput)
- return actioninput
- elseif actionname == "recommendbox"
- # add usermsg to a.chathistory
- addNewMessage(a, "assistant", actioninput)
- return actioninput
+# if isempty(a.plan[:currenttrajectory])
+
+# # initial state
+# a.plan[:currenttrajectory] = Dict{Symbol, Any}(
+# # deepcopy the info to prevent modifying the info unintentionally during MCTS planning
+# :customerinfo=> deepcopy(a.keywordinfo[:customerinfo]),
+# :storeinfo=> deepcopy(a.keywordinfo[:storeinfo]),
+# :userselect=> nothing,
+# :reward=> 0,
+# :isterminal=> false,
+# :evaluation=> nothing,
+# :lesson=> nothing,
+
+# :totalTrajectoryReward=> nothing,
+
+# # contain question, thought_1, action_1, observation_1, thought_2, ...
+# :thoughtHistory=> OrderedDict{Symbol, Any}(
+# #[] :recap=>,
+# :question=> userinput[:text],
+# ),
+
+# # store conversation for virtual customer because the virtual customer agent is just
+# # a function and stateless.
+# :virtualCustomerChatHistory=> Vector{Dict{Symbol, Any}}(
+# [Dict(:name=> "user", :text=> userinput[:text])]
+# ),
+# )
+# else
+# _, a.plan[:currenttrajectory] = makeNewState(a.plan[:currenttrajectory],
+# a.plan[:activeplan][:thoughtHistory], userinput[:text], userinput[:select],
+# userinput[:reward], userinput[:isterminal])
+# end
+# end
+
+# while true
+# bestNextState, besttrajectory = LLMMCTS.runMCTS(a.plan[:currenttrajectory],
+# transition, config, decisionMaker, evaluator, reflector;
+# totalsample=2, maxDepth=3, maxiterations=3, explorationweight=1.0)
+# a.plan[:activeplan] = bestNextState
+
+# latestActionKey, latestActionIndice =
+# GeneralUtils.findHighestIndexKey(bestNextState[:thoughtHistory], "action")
+# actionname = bestNextState[:thoughtHistory][latestActionKey][:name]
+# actioninput = bestNextState[:thoughtHistory][latestActionKey][:input]
+
+# # transition
+# if actionname == "chatbox"
+# # add usermsg to a.chathistory
+# addNewMessage(a, "assistant", actioninput)
+# return actioninput
+# elseif actionname == "recommendbox"
+# # add usermsg to a.chathistory
+# addNewMessage(a, "assistant", actioninput)
+# return actioninput
+# else
+# _, a.plan[:currenttrajectory] = transition(a, a.plan[:currenttrajectory], a.plan[:activeplan])
+# end
+# end
+# end
+
+
+"""
+
+# Arguments
+
+# Return
+
+# Example
+```jldoctest
+julia>
+```
+
+# TODO
+ - [] update docstring
+ - [x] implement the function
+ - [x] add try block. check result that it is expected before returning
+
+# Signature
+"""
+function think(a::T) where {T<:agent}
+ thoughtDict = decisionMaker(a)
+ actionname = thoughtDict[:action_name]
+ actioninput = thoughtDict[:action_input]
+
+ # map action and input() to llm function
+ response =
+ if actionname == "CHATBOX"
+ (result=actioninput, errormsg=nothing, success=true)
+ elseif actionname == "WINESTOCK"
+ DBconnection = LibPQ.Connection("host=192.168.88.12 port=5432 dbname=yiem_wine_assistant user=yiem password=yiem@Postgres_0.0")
+ winestock(actioninput, DBconnection, a.text2textInstructLLM)
else
- _, a.plan[:currenttrajectory] = transition(a, a.plan[:currenttrajectory], a.plan[:activeplan])
+ error("undefined LLM function. Requesting $actionname")
+ end
+
+ # this section allow LLM functions above to have different return values.
+ result = haskey(response, :result) ? response[:result] : nothing
+ select = haskey(response, :select) ? response[:select] : nothing
+ reward::Integer = haskey(response, :reward) ? response[:reward] : 0
+ isterminal::Bool = haskey(response, :isterminal) ? response[:isterminal] : false
+ errormsg::Union{AbstractString, Nothing} = haskey(response, :errormsg) ? response[:errormsg] : nothing
+ success::Bool = haskey(response, :success) ? response[:success] : false
+ a.shortmem
+ return result
+end
+
+
+"""
+
+# Arguments
+ - `a::T1`
+ one of ChatAgent's agent.
+ - `input::T2`
+# Return
+ A JSON string of available wine
+
+# Example
+```jldoctest
+julia>
+```
+
+# TODO
+ - [] update docs
+ - [WORKING] implement the function
+
+# Signature
+"""
+function generatechat(a::T1, input::T2) where {T1<:agent, T2<:AbstractString}
+ systemmsg =
+ """
+ You are a helpful sommelier working for a wine store.
+ Your task is to help the user choose the best wine that match the user preferences from your inventory.
+ You are also eager to improve your helpfulness.
+
+ You must follow the following guidelines:
+ - Get to know how much the user willing to spend
+ - Get to know type of wine the user is looking for e.g. red, white, sparkling, rose, dessert, fortified
+ - Get to know what occasion the user is buying wine for
+ - Get to know what characteristics of wine the user is looking for e.g. tannin, sweetness, intensity, acidity
+ - Get to know what food the user will have with wine
+
+ At each round of conversation, the user will give you:
+ Context: ...
+ Your thoughts: Your current thinking in your mind
+ Your earlier conversation with the user: ...
+
+ You should then respond to the user with:
+ - chat: what do you want to say to the user
+
+ You should only respond in format as described below:
+ chat: ...
+
+ Let's begin!
+ """
+
+ usermsg =
+ """
+ Context: None
+ Your thoughts: $input
+ Your earlier conversation with the user: $(chatHistoryToString(a))
+ """
+
+ _prompt =
+ [
+ Dict(:name=> "system", :text=> systemmsg),
+ Dict(:name=> "user", :text=> usermsg)
+ ]
+
+ # put in model format
+ prompt = GeneralUtils.formatLLMtext(_prompt, "llama3instruct")
+ prompt *=
+ """
+ <|start_header_id|>assistant<|end_header_id|>
+ """
+
+ for attempt in 1:5
+ try
+ response = text2textInstructLLM(prompt)
+ responsedict = GeneralUtils.textToDict(response,
+ ["chat"],
+ rightmarker=":", symbolkey=true)
+
+ # check if dict has all required value
+ evaluationtext::AbstractString = responsedict[:evaluation]
+ responsedict[:score] = parse(Int, responsedict[:score]) # convert string "5" into integer 5
+ score::Integer = responsedict[:score]
+ accepted_as_answer::AbstractString = responsedict[:accepted_as_answer]
+ suggestion::AbstractString = responsedict[:suggestion]
+
+ # add to state here instead to in transition() because the latter causes julia extension crash (a bug in julia extension)
+ state[:evaluation] = responsedict[:evaluation]
+ state[:evaluationscore] = responsedict[:score]
+ state[:accepted_as_answer] = responsedict[:accepted_as_answer]
+ state[:suggestion] = responsedict[:suggestion]
+
+ # mark as terminal state when the answer is achieved
+ if accepted_as_answer == "Yes"
+ state[:isterminal] = true
+ state[:reward] = 1
+ end
+
+ return responsedict[:score]
+ catch e
+ io = IOBuffer()
+ showerror(io, e)
+ errorMsg = String(take!(io))
+ st = sprint((io, v) -> show(io, "text/plain", v), stacktrace(catch_backtrace()))
+ println("")
+ println("Attempt $attempt. Error occurred: $errorMsg\n$st")
+ println("")
end
end
+ error("evaluator failed to generate an evaluation")
+
+
+
+
+
+
+
end
diff --git a/src/llmfunction.jl b/src/llmfunction.jl
index 06f15dc..1a75d3d 100644
--- a/src/llmfunction.jl
+++ b/src/llmfunction.jl
@@ -4,7 +4,7 @@ export virtualWineUserChatbox, jsoncorrection, winestock,
virtualWineUserRecommendbox, userChatbox, userRecommendbox
using HTTP, JSON3, URIs, Random, PrettyPrinting, UUIDs
-using GeneralUtils
+using GeneralUtils, SQLLLM
using ..type, ..util
# ---------------------------------------------- 100 --------------------------------------------- #
@@ -357,13 +357,13 @@ julia> result = winestock(agent, input)
```
# TODO
- [] update docs
- [WORKING] implement the function
+ - [] update docs
+ - [WORKING] implement the function
# Signature
"""
-function winestock(config::T1, input::T2
- )::Union{Tuple{String, Number, Number, Bool}, Tuple{String, Nothing, Number, Bool}} where {T1<:AbstractDict, T2<:AbstractString}
+function winestock(input::T, DBconnection, text2textInstructLLM::Function
+ )::Union{Tuple{String, Number, Number, Bool}, Tuple{String, Nothing, Number, Bool}} where {T<:AbstractString}
# SELECT *
# FROM food
diff --git a/src/type.jl b/src/type.jl
index f55ad1a..be7de20 100644
--- a/src/type.jl
+++ b/src/type.jl
@@ -80,12 +80,8 @@ julia> agent = YiemAgent.bsommelier(
mutable struct sommelier <: agent
name::String # agent name
id::String # agent id
- config::Dict # agent config
tools::Dict
- maxiterations::Integer # how many thinking round
- totalsample::Integer # how many sample in each thinking round
- maxDepth::Integer # how many step ahead to be simulated start from current state into the future
- maxHistoryMsg::Integer # 21th and earlier messages will get summarized
+ maxHistoryMsg::Integer # e.g. 21th and earlier messages will get summarized
""" Memory
Ref: Chat prompt format https://huggingface.co/TheBloke/Llama-2-7B-Chat-GGML/discussions/3
@@ -97,76 +93,48 @@ mutable struct sommelier <: agent
"""
chathistory::Vector{Dict{Symbol, Any}}
- keywordinfo::Dict{Symbol, Any}
-
- # 1-historyPoint is in Dict{Symbol, Any} and compose of:
- # state, statevalue, thought, action, observation
- plan::Dict{Symbol, Any}
+ shortmem::Dict{Symbol, Any}
+
+ # communication function
+ text2textInstructLLM::Function
end
function sommelier(
- config::Dict = Dict(
- :mqttServerInfo=> Dict(
- :broker=> nothing,
- :port=> nothing,
- ),
- :receivemsg=> Dict(
- :prompt=> nothing, # topic to receive prompt i.e. frontend send msg to this topic
- :internal=> nothing,
- ),
- :thirdPartyService=> Dict(
- :text2textinstruct=> nothing,
- :text2textchat=> nothing,
- ),
- )
+ text2textInstructLLM::Function
;
name::String= "Assistant",
id::String= string(uuid4()),
- tools::Dict= Dict(
- :chatbox=> Dict(
- :name => "chatbox",
- :description => "Useful for when you need to communicate with the user.",
- :input => "Input should be a conversation to the user.",
- :output => "" ,
- :func => nothing,
- ),
- ),
- maxiterations::Integer= 3,
- totalsample::Integer= 3,
- maxDepth::Integer= 3,
maxHistoryMsg::Integer= 20,
chathistory::Vector{Dict{Symbol, Any}} = Vector{Dict{Symbol, Any}}(),
- keywordinfo::Dict{Symbol, Any} = Dict{Symbol, Any}(
- :customerinfo => Dict{Symbol, Any}(),
- :storeinfo => Dict{Symbol, Any}(),
- ),
- plan::Dict{Symbol, Any} = Dict{Symbol, Any}(
-
- # store 3 to 5 best plan AI frequently used to avoid having to search MCTS all the time
- # each plan is in [historyPoint_1, historyPoint_2, ...] format
- :existingplan => Vector(),
-
- :activeplan => Dict{Symbol, Any}(), # current using plan
- :currenttrajectory=> Dict{Symbol, Any}(), # store question, thought, action, observation, ...
- )
)
- #[NEXTVERSION] publish to a.config[:configtopic] to get a config.
- #[NEXTVERSION] get a config message in a.mqttMsg_internal
- #[NEXTVERSION] set agent according to config
+ tools = Dict( # update input format
+ "chatbox"=> Dict(
+ :description => "Useful for when you need to ask the user for more context. Do not ask the user their own question.",
+ :input => """Input is a text in JSON format.{\"Q1\": \"How are you doing?\", \"Q2\": \"How may I help you?\"}""",
+ :output => "" ,
+ ),
+ "winestock"=> Dict(
+ :description => "A handy tool for searching wine in your inventory that match the user preferences.",
+ :input => """Input is a JSON-formatted string that contains a detailed and precise search query.{\"wine type\": \"rose\", \"price\": \"max 35\", \"sweetness level\": \"sweet\", \"intensity level\": \"light bodied\", \"Tannin level\": \"low\", \"Acidity level\": \"low\"}""",
+ :output => """